summaryrefslogtreecommitdiff
path: root/drivers/char
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/char')
-rw-r--r--drivers/char/Kconfig28
-rw-r--r--drivers/char/Makefile7
-rw-r--r--drivers/char/adsprpc.c3269
-rw-r--r--drivers/char/adsprpc_compat.c543
-rw-r--r--drivers/char/adsprpc_compat.h26
-rw-r--r--drivers/char/adsprpc_shared.h251
-rw-r--r--drivers/char/diag/Kconfig33
-rw-r--r--drivers/char/diag/Makefile5
-rw-r--r--drivers/char/diag/diag_dci.c3312
-rw-r--r--drivers/char/diag/diag_dci.h329
-rw-r--r--drivers/char/diag/diag_debugfs.c1216
-rw-r--r--drivers/char/diag/diag_debugfs.h19
-rw-r--r--drivers/char/diag/diag_ipc_logging.h44
-rw-r--r--drivers/char/diag/diag_masks.c2289
-rw-r--r--drivers/char/diag/diag_masks.h179
-rw-r--r--drivers/char/diag/diag_memorydevice.c483
-rw-r--r--drivers/char/diag/diag_memorydevice.h60
-rw-r--r--drivers/char/diag/diag_mux.c290
-rw-r--r--drivers/char/diag/diag_mux.h76
-rw-r--r--drivers/char/diag/diag_usb.c686
-rw-r--r--drivers/char/diag/diag_usb.h110
-rw-r--r--drivers/char/diag/diagchar.h690
-rw-r--r--drivers/char/diag/diagchar_core.c3963
-rw-r--r--drivers/char/diag/diagchar_hdlc.c267
-rw-r--r--drivers/char/diag/diagchar_hdlc.h66
-rw-r--r--drivers/char/diag/diagfwd.c1932
-rw-r--r--drivers/char/diag/diagfwd.h52
-rw-r--r--drivers/char/diag/diagfwd_bridge.c330
-rw-r--r--drivers/char/diag/diagfwd_bridge.h67
-rw-r--r--drivers/char/diag/diagfwd_cntl.c1696
-rw-r--r--drivers/char/diag/diagfwd_cntl.h332
-rw-r--r--drivers/char/diag/diagfwd_glink.c830
-rw-r--r--drivers/char/diag/diagfwd_glink.h57
-rw-r--r--drivers/char/diag/diagfwd_hsic.c453
-rw-r--r--drivers/char/diag/diagfwd_hsic.h47
-rw-r--r--drivers/char/diag/diagfwd_mhi.c759
-rw-r--r--drivers/char/diag/diagfwd_mhi.h88
-rw-r--r--drivers/char/diag/diagfwd_peripheral.c2070
-rw-r--r--drivers/char/diag/diagfwd_peripheral.h133
-rw-r--r--drivers/char/diag/diagfwd_smd.c898
-rw-r--r--drivers/char/diag/diagfwd_smd.h50
-rw-r--r--drivers/char/diag/diagfwd_smux.c330
-rw-r--r--drivers/char/diag/diagfwd_smux.h43
-rw-r--r--drivers/char/diag/diagfwd_socket.c1238
-rw-r--r--drivers/char/diag/diagfwd_socket.h110
-rw-r--r--drivers/char/diag/diagmem.c294
-rw-r--r--drivers/char/diag/diagmem.h63
-rw-r--r--drivers/char/hw_random/Kconfig17
-rw-r--r--drivers/char/hw_random/Makefile1
-rw-r--r--drivers/char/hw_random/msm-rng.c3
-rw-r--r--drivers/char/hw_random/msm_rng.c482
-rw-r--r--drivers/char/misc.c2
-rw-r--r--drivers/char/msm_smd_pkt.c1397
-rw-r--r--drivers/char/rdbg.c1173
54 files changed, 33185 insertions, 3 deletions
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 3143db57ce44..db8eb7ccd744 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -48,6 +48,8 @@ config SGI_MBCS
source "drivers/tty/serial/Kconfig"
+source "drivers/char/diag/Kconfig"
+
config TTY_PRINTK
tristate "TTY driver to output user messages via printk"
depends on EXPERT && TTY
@@ -592,6 +594,16 @@ config DEVPORT
source "drivers/s390/char/Kconfig"
+config MSM_SMD_PKT
+ bool "Enable device interface for some SMD packet ports"
+ default n
+ depends on MSM_SMD
+ help
+ smd_pkt driver provides the interface for the userspace clients
+ to communicate over smd via device nodes. This enable the
+ usersapce clients to read and write to some smd packets channel
+ for MSM chipset.
+
config TILE_SROM
bool "Character-device access via hypervisor to the Tilera SPI ROM"
depends on TILE
@@ -605,5 +617,21 @@ config TILE_SROM
source "drivers/char/xillybus/Kconfig"
+config MSM_ADSPRPC
+ tristate "QTI ADSP RPC driver"
+ depends on MSM_SMD
+ help
+ Provides a communication mechanism that allows for clients to
+ make remote method invocations across processor boundary to
+ applications DSP processor. Say M if you want to enable this
+ module.
+
+config MSM_RDBG
+ tristate "QTI Remote debug driver"
+ help
+ Implements a shared memory based transport mechanism that allows
+ for a debugger running on a host PC to communicate with a remote
+ stub running on peripheral subsystems such as the ADSP, MODEM etc.
+
endmenu
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index d8a7579300d2..77697b8c42c0 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_ATARI_DSP56K) += dsp56k.o
obj-$(CONFIG_VIRTIO_CONSOLE) += virtio_console.o
obj-$(CONFIG_RAW_DRIVER) += raw.o
obj-$(CONFIG_SGI_SNSC) += snsc.o snsc_event.o
+obj-$(CONFIG_MSM_SMD_PKT) += msm_smd_pkt.o
obj-$(CONFIG_MSPEC) += mspec.o
obj-$(CONFIG_MMTIMER) += mmtimer.o
obj-$(CONFIG_UV_MMTIMER) += uv_mmtimer.o
@@ -59,4 +60,10 @@ obj-$(CONFIG_JS_RTC) += js-rtc.o
js-rtc-y = rtc.o
obj-$(CONFIG_TILE_SROM) += tile-srom.o
+obj-$(CONFIG_DIAG_CHAR) += diag/
obj-$(CONFIG_XILLYBUS) += xillybus/
+obj-$(CONFIG_MSM_ADSPRPC) += adsprpc.o
+ifdef CONFIG_COMPAT
+obj-$(CONFIG_MSM_ADSPRPC) += adsprpc_compat.o
+endif
+obj-$(CONFIG_MSM_RDBG) += rdbg.o
diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c
new file mode 100644
index 000000000000..165c5707a9f7
--- /dev/null
+++ b/drivers/char/adsprpc.c
@@ -0,0 +1,3269 @@
+/*
+ * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/dma-buf.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/completion.h>
+#include <linux/pagemap.h>
+#include <linux/mm.h>
+#include <linux/fs.h>
+#include <linux/sched.h>
+#include <linux/module.h>
+#include <linux/cdev.h>
+#include <linux/list.h>
+#include <linux/hash.h>
+#include <linux/msm_ion.h>
+#include <soc/qcom/secure_buffer.h>
+#include <soc/qcom/smd.h>
+#include <soc/qcom/glink.h>
+#include <soc/qcom/subsystem_notif.h>
+#include <soc/qcom/subsystem_restart.h>
+#include <linux/scatterlist.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/device.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/dma-contiguous.h>
+#include <linux/cma.h>
+#include <linux/iommu.h>
+#include <linux/qcom_iommu.h>
+#include <linux/kref.h>
+#include <linux/sort.h>
+#include <linux/msm_dma_iommu_mapping.h>
+#include <asm/dma-iommu.h>
+#include <soc/qcom/scm.h>
+#include "adsprpc_compat.h"
+#include "adsprpc_shared.h"
+#include <soc/qcom/ramdump.h>
+#include <linux/debugfs.h>
+
+#define TZ_PIL_PROTECT_MEM_SUBSYS_ID 0x0C
+#define TZ_PIL_CLEAR_PROTECT_MEM_SUBSYS_ID 0x0D
+#define TZ_PIL_AUTH_QDSP6_PROC 1
+#define ADSP_MMAP_HEAP_ADDR 4
+#define ADSP_MMAP_REMOTE_HEAP_ADDR 8
+#define FASTRPC_ENOSUCH 39
+#define VMID_SSC_Q6 38
+#define VMID_ADSP_Q6 6
+#define AC_VM_ADSP_HEAP_SHARED 33
+#define DEBUGFS_SIZE 1024
+
+#define RPC_TIMEOUT (5 * HZ)
+#define BALIGN 128
+#define NUM_CHANNELS 4 /* adsp,sdsp,mdsp,cdsp */
+#define NUM_SESSIONS 9 /*8 compute, 1 cpz*/
+#define FASTRPC_CTX_MAGIC (0xbeeddeed)
+#define FASTRPC_CTX_MAX (256)
+#define FASTRPC_CTXID_MASK (0xFF0)
+
+#define IS_CACHE_ALIGNED(x) (((x) & ((L1_CACHE_BYTES)-1)) == 0)
+
+#define FASTRPC_LINK_STATE_DOWN (0x0)
+#define FASTRPC_LINK_STATE_UP (0x1)
+#define FASTRPC_LINK_DISCONNECTED (0x0)
+#define FASTRPC_LINK_CONNECTING (0x1)
+#define FASTRPC_LINK_CONNECTED (0x3)
+#define FASTRPC_LINK_DISCONNECTING (0x7)
+
+#define PERF_KEYS "count:flush:map:copy:glink:getargs:putargs:invalidate:invoke"
+#define FASTRPC_STATIC_HANDLE_LISTENER (3)
+#define FASTRPC_STATIC_HANDLE_MAX (20)
+
+#define PERF_END (void)0
+
+#define PERF(enb, cnt, ff) \
+ {\
+ struct timespec startT = {0};\
+ if (enb) {\
+ getnstimeofday(&startT);\
+ } \
+ ff ;\
+ if (enb) {\
+ cnt += getnstimediff(&startT);\
+ } \
+ }
+
+static int fastrpc_glink_open(int cid);
+static void fastrpc_glink_close(void *chan, int cid);
+static struct dentry *debugfs_root;
+static struct dentry *debugfs_global_file;
+
+static inline uint64_t buf_page_start(uint64_t buf)
+{
+ uint64_t start = (uint64_t) buf & PAGE_MASK;
+ return start;
+}
+
+static inline uint64_t buf_page_offset(uint64_t buf)
+{
+ uint64_t offset = (uint64_t) buf & (PAGE_SIZE - 1);
+ return offset;
+}
+
+static inline uint64_t buf_num_pages(uint64_t buf, size_t len)
+{
+ uint64_t start = buf_page_start(buf) >> PAGE_SHIFT;
+ uint64_t end = (((uint64_t) buf + len - 1) & PAGE_MASK) >> PAGE_SHIFT;
+ uint64_t nPages = end - start + 1;
+ return nPages;
+}
+
+static inline uint64_t buf_page_size(uint32_t size)
+{
+ uint64_t sz = (size + (PAGE_SIZE - 1)) & PAGE_MASK;
+ return sz > PAGE_SIZE ? sz : PAGE_SIZE;
+}
+
+static inline void *uint64_to_ptr(uint64_t addr)
+{
+ void *ptr = (void *)((uintptr_t)addr);
+ return ptr;
+}
+
+static inline uint64_t ptr_to_uint64(void *ptr)
+{
+ uint64_t addr = (uint64_t)((uintptr_t)ptr);
+ return addr;
+}
+
+struct fastrpc_file;
+
+struct fastrpc_buf {
+ struct hlist_node hn;
+ struct fastrpc_file *fl;
+ void *virt;
+ uint64_t phys;
+ size_t size;
+};
+
+struct fastrpc_ctx_lst;
+
+struct overlap {
+ uintptr_t start;
+ uintptr_t end;
+ int raix;
+ uintptr_t mstart;
+ uintptr_t mend;
+ uintptr_t offset;
+};
+
+struct smq_invoke_ctx {
+ struct hlist_node hn;
+ struct completion work;
+ int retval;
+ int pid;
+ int tgid;
+ remote_arg_t *lpra;
+ remote_arg64_t *rpra;
+ int *fds;
+ unsigned *attrs;
+ struct fastrpc_mmap **maps;
+ struct fastrpc_buf *buf;
+ size_t used;
+ struct fastrpc_file *fl;
+ uint32_t sc;
+ struct overlap *overs;
+ struct overlap **overps;
+ struct smq_msg msg;
+ unsigned int magic;
+ uint64_t ctxid;
+};
+
+struct fastrpc_ctx_lst {
+ struct hlist_head pending;
+ struct hlist_head interrupted;
+};
+
+struct fastrpc_smmu {
+ struct device *dev;
+ struct dma_iommu_mapping *mapping;
+ int cb;
+ int enabled;
+ int faults;
+ int secure;
+ int coherent;
+};
+
+struct fastrpc_session_ctx {
+ struct device *dev;
+ struct fastrpc_smmu smmu;
+ int used;
+};
+
+struct fastrpc_glink_info {
+ int link_state;
+ int port_state;
+ struct glink_open_config cfg;
+ struct glink_link_info link_info;
+ void *link_notify_handle;
+};
+
+struct fastrpc_channel_ctx {
+ char *name;
+ char *subsys;
+ void *chan;
+ struct device *dev;
+ struct fastrpc_session_ctx session[NUM_SESSIONS];
+ struct completion work;
+ struct completion workport;
+ struct notifier_block nb;
+ struct kref kref;
+ int channel;
+ int sesscount;
+ int ssrcount;
+ void *handle;
+ int prevssrcount;
+ int issubsystemup;
+ int vmid;
+ int heap_vmid;
+ int ramdumpenabled;
+ void *remoteheap_ramdump_dev;
+ struct fastrpc_glink_info link;
+};
+
+struct fastrpc_apps {
+ struct fastrpc_channel_ctx *channel;
+ struct cdev cdev;
+ struct class *class;
+ struct mutex smd_mutex;
+ struct smq_phy_page range;
+ struct hlist_head maps;
+ uint32_t staticpd_flags;
+ dev_t dev_no;
+ int compat;
+ struct hlist_head drivers;
+ spinlock_t hlock;
+ struct ion_client *client;
+ struct device *dev;
+ bool glink;
+ spinlock_t ctxlock;
+ struct smq_invoke_ctx *ctxtable[FASTRPC_CTX_MAX];
+};
+
+struct fastrpc_mmap {
+ struct hlist_node hn;
+ struct fastrpc_file *fl;
+ struct fastrpc_apps *apps;
+ int fd;
+ uint32_t flags;
+ struct dma_buf *buf;
+ struct sg_table *table;
+ struct dma_buf_attachment *attach;
+ struct ion_handle *handle;
+ uint64_t phys;
+ size_t size;
+ uintptr_t va;
+ size_t len;
+ int refs;
+ uintptr_t raddr;
+ int uncached;
+ int secure;
+ uintptr_t attr;
+};
+
+struct fastrpc_perf {
+ int64_t count;
+ int64_t flush;
+ int64_t map;
+ int64_t copy;
+ int64_t link;
+ int64_t getargs;
+ int64_t putargs;
+ int64_t invargs;
+ int64_t invoke;
+};
+
+struct fastrpc_file {
+ struct hlist_node hn;
+ spinlock_t hlock;
+ struct hlist_head maps;
+ struct hlist_head bufs;
+ struct fastrpc_ctx_lst clst;
+ struct fastrpc_session_ctx *sctx;
+ struct fastrpc_session_ctx *secsctx;
+ uint32_t mode;
+ uint32_t profile;
+ int tgid;
+ int cid;
+ int ssrcount;
+ int pd;
+ int file_close;
+ struct fastrpc_apps *apps;
+ struct fastrpc_perf perf;
+ struct dentry *debugfs_file;
+ struct mutex map_mutex;
+};
+
+static struct fastrpc_apps gfa;
+
+static struct fastrpc_channel_ctx gcinfo[NUM_CHANNELS] = {
+ {
+ .name = "adsprpc-smd",
+ .subsys = "adsp",
+ .channel = SMD_APPS_QDSP,
+ .link.link_info.edge = "lpass",
+ .link.link_info.transport = "smem",
+ },
+ {
+ .name = "mdsprpc-smd",
+ .subsys = "modem",
+ .channel = SMD_APPS_MODEM,
+ .link.link_info.edge = "mpss",
+ .link.link_info.transport = "smem",
+ },
+ {
+ .name = "sdsprpc-smd",
+ .subsys = "slpi",
+ .channel = SMD_APPS_DSPS,
+ .link.link_info.edge = "dsps",
+ .link.link_info.transport = "smem",
+ .vmid = VMID_SSC_Q6,
+ },
+ {
+ .name = "cdsprpc-smd",
+ .subsys = "cdsp",
+ .link.link_info.edge = "cdsp",
+ .link.link_info.transport = "smem",
+ },
+};
+
+static inline int64_t getnstimediff(struct timespec *start)
+{
+ int64_t ns;
+ struct timespec ts, b;
+
+ getnstimeofday(&ts);
+ b = timespec_sub(ts, *start);
+ ns = timespec_to_ns(&b);
+ return ns;
+}
+
+static void fastrpc_buf_free(struct fastrpc_buf *buf, int cache)
+{
+ struct fastrpc_file *fl = buf == NULL ? NULL : buf->fl;
+ int vmid;
+
+ if (!fl)
+ return;
+ if (cache) {
+ spin_lock(&fl->hlock);
+ hlist_add_head(&buf->hn, &fl->bufs);
+ spin_unlock(&fl->hlock);
+ return;
+ }
+ if (!IS_ERR_OR_NULL(buf->virt)) {
+ int destVM[1] = {VMID_HLOS};
+ int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
+
+ if (fl->sctx->smmu.cb)
+ buf->phys &= ~((uint64_t)fl->sctx->smmu.cb << 32);
+ vmid = fl->apps->channel[fl->cid].vmid;
+ if (vmid) {
+ int srcVM[2] = {VMID_HLOS, vmid};
+
+ hyp_assign_phys(buf->phys, buf_page_size(buf->size),
+ srcVM, 2, destVM, destVMperm, 1);
+ }
+ dma_free_coherent(fl->sctx->smmu.dev, buf->size, buf->virt,
+ buf->phys);
+ }
+ kfree(buf);
+}
+
+static void fastrpc_buf_list_free(struct fastrpc_file *fl)
+{
+ struct fastrpc_buf *buf, *free;
+ do {
+ struct hlist_node *n;
+
+ free = NULL;
+ spin_lock(&fl->hlock);
+ hlist_for_each_entry_safe(buf, n, &fl->bufs, hn) {
+ hlist_del_init(&buf->hn);
+ free = buf;
+ break;
+ }
+ spin_unlock(&fl->hlock);
+ if (free)
+ fastrpc_buf_free(free, 0);
+ } while (free);
+}
+
+static void fastrpc_mmap_add(struct fastrpc_mmap *map)
+{
+ if (map->flags == ADSP_MMAP_HEAP_ADDR ||
+ map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
+ struct fastrpc_apps *me = &gfa;
+
+ spin_lock(&me->hlock);
+ hlist_add_head(&map->hn, &me->maps);
+ spin_unlock(&me->hlock);
+ } else {
+ struct fastrpc_file *fl = map->fl;
+
+ spin_lock(&fl->hlock);
+ hlist_add_head(&map->hn, &fl->maps);
+ spin_unlock(&fl->hlock);
+ }
+}
+
+static int fastrpc_mmap_find(struct fastrpc_file *fl, int fd, uintptr_t va,
+ size_t len, int mflags, struct fastrpc_mmap **ppmap)
+{
+ struct fastrpc_apps *me = &gfa;
+ struct fastrpc_mmap *match = NULL, *map = NULL;
+ struct hlist_node *n;
+
+ if ((va + len) < va)
+ return -EOVERFLOW;
+ if (mflags == ADSP_MMAP_HEAP_ADDR ||
+ mflags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
+ spin_lock(&me->hlock);
+ hlist_for_each_entry_safe(map, n, &me->maps, hn) {
+ if (va >= map->va &&
+ va + len <= map->va + map->len &&
+ map->fd == fd) {
+ map->refs++;
+ match = map;
+ break;
+ }
+ }
+ spin_unlock(&me->hlock);
+ } else {
+ spin_lock(&fl->hlock);
+ hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
+ if (va >= map->va &&
+ va + len <= map->va + map->len &&
+ map->fd == fd) {
+ map->refs++;
+ match = map;
+ break;
+ }
+ }
+ spin_unlock(&fl->hlock);
+ }
+ if (match) {
+ *ppmap = match;
+ return 0;
+ }
+ return -ENOTTY;
+}
+
+static int dma_alloc_memory(phys_addr_t *region_start, size_t size)
+{
+ struct fastrpc_apps *me = &gfa;
+ void *vaddr = NULL;
+ DEFINE_DMA_ATTRS(attrs);
+
+ if (me->dev == NULL) {
+ pr_err("device adsprpc-mem is not initialized\n");
+ return -ENODEV;
+ }
+ dma_set_attr(DMA_ATTR_SKIP_ZEROING, &attrs);
+ dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
+ vaddr = dma_alloc_attrs(me->dev, size, region_start, GFP_KERNEL,
+ &attrs);
+ if (!vaddr) {
+ pr_err("ADSPRPC: Failed to allocate %x remote heap memory\n",
+ (unsigned int)size);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va,
+ size_t len, struct fastrpc_mmap **ppmap)
+{
+ struct fastrpc_mmap *match = NULL, *map;
+ struct hlist_node *n;
+ struct fastrpc_apps *me = &gfa;
+
+ spin_lock(&me->hlock);
+ hlist_for_each_entry_safe(map, n, &me->maps, hn) {
+ if (map->raddr == va &&
+ map->raddr + map->len == va + len &&
+ map->refs == 1) {
+ match = map;
+ hlist_del_init(&map->hn);
+ break;
+ }
+ }
+ spin_unlock(&me->hlock);
+ if (match) {
+ *ppmap = match;
+ return 0;
+ }
+ spin_lock(&fl->hlock);
+ hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
+ if (map->raddr == va &&
+ map->raddr + map->len == va + len &&
+ map->refs == 1) {
+ match = map;
+ hlist_del_init(&map->hn);
+ break;
+ }
+ }
+ spin_unlock(&fl->hlock);
+ if (match) {
+ *ppmap = match;
+ return 0;
+ }
+ return -ENOTTY;
+}
+
+static void fastrpc_mmap_free(struct fastrpc_mmap *map)
+{
+ struct fastrpc_apps *me = &gfa;
+ struct fastrpc_file *fl;
+ int vmid;
+ struct fastrpc_session_ctx *sess;
+
+ if (!map)
+ return;
+ fl = map->fl;
+ if (map->flags == ADSP_MMAP_HEAP_ADDR ||
+ map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
+ spin_lock(&me->hlock);
+ map->refs--;
+ if (!map->refs)
+ hlist_del_init(&map->hn);
+ spin_unlock(&me->hlock);
+ } else {
+ spin_lock(&fl->hlock);
+ map->refs--;
+ if (!map->refs)
+ hlist_del_init(&map->hn);
+ spin_unlock(&fl->hlock);
+ }
+ if (map->refs > 0)
+ return;
+ if (map->flags == ADSP_MMAP_HEAP_ADDR ||
+ map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
+ DEFINE_DMA_ATTRS(attrs);
+
+ if (me->dev == NULL) {
+ pr_err("failed to free remote heap allocation\n");
+ return;
+ }
+ if (map->phys) {
+ dma_set_attr(DMA_ATTR_SKIP_ZEROING, &attrs);
+ dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
+ dma_free_attrs(me->dev, map->size,
+ &(map->va), map->phys, &attrs);
+ }
+ } else {
+ int destVM[1] = {VMID_HLOS};
+ int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
+
+ if (map->secure)
+ sess = fl->secsctx;
+ else
+ sess = fl->sctx;
+
+ if (!IS_ERR_OR_NULL(map->handle))
+ ion_free(fl->apps->client, map->handle);
+ if (sess && sess->smmu.enabled) {
+ if (map->size || map->phys)
+ msm_dma_unmap_sg(sess->smmu.dev,
+ map->table->sgl,
+ map->table->nents, DMA_BIDIRECTIONAL,
+ map->buf);
+ }
+ vmid = fl->apps->channel[fl->cid].vmid;
+ if (vmid && map->phys) {
+ int srcVM[2] = {VMID_HLOS, vmid};
+
+ hyp_assign_phys(map->phys, buf_page_size(map->size),
+ srcVM, 2, destVM, destVMperm, 1);
+ }
+
+ if (!IS_ERR_OR_NULL(map->table))
+ dma_buf_unmap_attachment(map->attach, map->table,
+ DMA_BIDIRECTIONAL);
+ if (!IS_ERR_OR_NULL(map->attach))
+ dma_buf_detach(map->buf, map->attach);
+ if (!IS_ERR_OR_NULL(map->buf))
+ dma_buf_put(map->buf);
+ }
+ kfree(map);
+}
+
+static int fastrpc_session_alloc(struct fastrpc_channel_ctx *chan, int secure,
+ struct fastrpc_session_ctx **session);
+
+static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd, unsigned attr,
+ uintptr_t va, size_t len, int mflags, struct fastrpc_mmap **ppmap)
+{
+ struct fastrpc_apps *me = &gfa;
+ struct fastrpc_session_ctx *sess;
+ struct fastrpc_apps *apps = fl->apps;
+ int cid = fl->cid;
+ struct fastrpc_channel_ctx *chan = &apps->channel[cid];
+ struct fastrpc_mmap *map = NULL;
+ struct dma_attrs attrs;
+ phys_addr_t region_start = 0;
+ unsigned long flags;
+ int err = 0, vmid;
+
+ if (!fastrpc_mmap_find(fl, fd, va, len, mflags, ppmap))
+ return 0;
+ map = kzalloc(sizeof(*map), GFP_KERNEL);
+ VERIFY(err, !IS_ERR_OR_NULL(map));
+ if (err)
+ goto bail;
+ INIT_HLIST_NODE(&map->hn);
+ map->flags = mflags;
+ map->refs = 1;
+ map->fl = fl;
+ map->fd = fd;
+ map->attr = attr;
+ if (mflags == ADSP_MMAP_HEAP_ADDR ||
+ mflags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
+ map->apps = me;
+ map->fl = NULL;
+ VERIFY(err, !dma_alloc_memory(&region_start, len));
+ if (err)
+ goto bail;
+ map->phys = (uintptr_t)region_start;
+ map->size = len;
+ map->va = (uintptr_t)map->phys;
+ } else {
+ VERIFY(err, !IS_ERR_OR_NULL(map->handle =
+ ion_import_dma_buf(fl->apps->client, fd)));
+ if (err)
+ goto bail;
+ VERIFY(err, !ion_handle_get_flags(fl->apps->client, map->handle,
+ &flags));
+ if (err)
+ goto bail;
+
+ map->uncached = !ION_IS_CACHED(flags);
+ if (map->attr & FASTRPC_ATTR_NOVA)
+ map->uncached = 1;
+
+ map->secure = flags & ION_FLAG_SECURE;
+ if (map->secure) {
+ if (!fl->secsctx)
+ err = fastrpc_session_alloc(chan, 1,
+ &fl->secsctx);
+ if (err)
+ goto bail;
+ }
+ if (map->secure)
+ sess = fl->secsctx;
+ else
+ sess = fl->sctx;
+
+ VERIFY(err, !IS_ERR_OR_NULL(sess));
+ if (err)
+ goto bail;
+ VERIFY(err, !IS_ERR_OR_NULL(map->buf = dma_buf_get(fd)));
+ if (err)
+ goto bail;
+ VERIFY(err, !IS_ERR_OR_NULL(map->attach =
+ dma_buf_attach(map->buf, sess->smmu.dev)));
+ if (err)
+ goto bail;
+ VERIFY(err, !IS_ERR_OR_NULL(map->table =
+ dma_buf_map_attachment(map->attach,
+ DMA_BIDIRECTIONAL)));
+ if (err)
+ goto bail;
+ if (sess->smmu.enabled) {
+ init_dma_attrs(&attrs);
+ dma_set_attr(DMA_ATTR_EXEC_MAPPING, &attrs);
+
+ if ((map->attr & FASTRPC_ATTR_NON_COHERENT) ||
+ (sess->smmu.coherent && map->uncached))
+ dma_set_attr(DMA_ATTR_FORCE_NON_COHERENT,
+ &attrs);
+ else if (map->attr & FASTRPC_ATTR_COHERENT)
+ dma_set_attr(DMA_ATTR_FORCE_COHERENT, &attrs);
+
+ VERIFY(err, map->table->nents ==
+ msm_dma_map_sg_attrs(sess->smmu.dev,
+ map->table->sgl, map->table->nents,
+ DMA_BIDIRECTIONAL, map->buf, &attrs));
+ if (err)
+ goto bail;
+ } else {
+ VERIFY(err, map->table->nents == 1);
+ if (err)
+ goto bail;
+ }
+ map->phys = sg_dma_address(map->table->sgl);
+ if (sess->smmu.cb) {
+ map->phys += ((uint64_t)sess->smmu.cb << 32);
+ map->size = sg_dma_len(map->table->sgl);
+ } else {
+ map->size = buf_page_size(len);
+ }
+ vmid = fl->apps->channel[fl->cid].vmid;
+ if (vmid) {
+ int srcVM[1] = {VMID_HLOS};
+ int destVM[2] = {VMID_HLOS, vmid};
+ int destVMperm[2] = {PERM_READ | PERM_WRITE | PERM_EXEC,
+ PERM_READ | PERM_WRITE | PERM_EXEC};
+
+ VERIFY(err, !hyp_assign_phys(map->phys,
+ buf_page_size(map->size),
+ srcVM, 1, destVM, destVMperm, 2));
+ if (err)
+ goto bail;
+ }
+ map->va = va;
+ }
+ map->len = len;
+
+ fastrpc_mmap_add(map);
+ *ppmap = map;
+
+bail:
+ if (err && map)
+ fastrpc_mmap_free(map);
+ return err;
+}
+
+static int fastrpc_buf_alloc(struct fastrpc_file *fl, size_t size,
+ struct fastrpc_buf **obuf)
+{
+ int err = 0, vmid;
+ struct fastrpc_buf *buf = NULL, *fr = NULL;
+ struct hlist_node *n;
+
+ VERIFY(err, size > 0);
+ if (err)
+ goto bail;
+
+ /* find the smallest buffer that fits in the cache */
+ spin_lock(&fl->hlock);
+ hlist_for_each_entry_safe(buf, n, &fl->bufs, hn) {
+ if (buf->size >= size && (!fr || fr->size > buf->size))
+ fr = buf;
+ }
+ if (fr)
+ hlist_del_init(&fr->hn);
+ spin_unlock(&fl->hlock);
+ if (fr) {
+ *obuf = fr;
+ return 0;
+ }
+ buf = NULL;
+ VERIFY(err, NULL != (buf = kzalloc(sizeof(*buf), GFP_KERNEL)));
+ if (err)
+ goto bail;
+ INIT_HLIST_NODE(&buf->hn);
+ buf->fl = fl;
+ buf->virt = NULL;
+ buf->phys = 0;
+ buf->size = size;
+ buf->virt = dma_alloc_coherent(fl->sctx->smmu.dev, buf->size,
+ (void *)&buf->phys, GFP_KERNEL);
+ if (IS_ERR_OR_NULL(buf->virt)) {
+ /* free cache and retry */
+ fastrpc_buf_list_free(fl);
+ buf->virt = dma_alloc_coherent(fl->sctx->smmu.dev, buf->size,
+ (void *)&buf->phys, GFP_KERNEL);
+ VERIFY(err, !IS_ERR_OR_NULL(buf->virt));
+ }
+ if (err)
+ goto bail;
+ if (fl->sctx->smmu.cb)
+ buf->phys += ((uint64_t)fl->sctx->smmu.cb << 32);
+ vmid = fl->apps->channel[fl->cid].vmid;
+ if (vmid) {
+ int srcVM[1] = {VMID_HLOS};
+ int destVM[2] = {VMID_HLOS, vmid};
+ int destVMperm[2] = {PERM_READ | PERM_WRITE | PERM_EXEC,
+ PERM_READ | PERM_WRITE | PERM_EXEC};
+
+ VERIFY(err, !hyp_assign_phys(buf->phys, buf_page_size(size),
+ srcVM, 1, destVM, destVMperm, 2));
+ if (err)
+ goto bail;
+ }
+
+ *obuf = buf;
+ bail:
+ if (err && buf)
+ fastrpc_buf_free(buf, 0);
+ return err;
+}
+
+
+static int context_restore_interrupted(struct fastrpc_file *fl,
+ struct fastrpc_ioctl_invoke_attrs *inv,
+ struct smq_invoke_ctx **po)
+{
+ int err = 0;
+ struct smq_invoke_ctx *ctx = NULL, *ictx = NULL;
+ struct hlist_node *n;
+ struct fastrpc_ioctl_invoke *invoke = &inv->inv;
+ spin_lock(&fl->hlock);
+ hlist_for_each_entry_safe(ictx, n, &fl->clst.interrupted, hn) {
+ if (ictx->pid == current->pid) {
+ if (invoke->sc != ictx->sc || ictx->fl != fl)
+ err = -1;
+ else {
+ ctx = ictx;
+ hlist_del_init(&ctx->hn);
+ hlist_add_head(&ctx->hn, &fl->clst.pending);
+ }
+ break;
+ }
+ }
+ spin_unlock(&fl->hlock);
+ if (ctx)
+ *po = ctx;
+ return err;
+}
+
+#define CMP(aa, bb) ((aa) == (bb) ? 0 : (aa) < (bb) ? -1 : 1)
+static int overlap_ptr_cmp(const void *a, const void *b)
+{
+ struct overlap *pa = *((struct overlap **)a);
+ struct overlap *pb = *((struct overlap **)b);
+ /* sort with lowest starting buffer first */
+ int st = CMP(pa->start, pb->start);
+ /* sort with highest ending buffer first */
+ int ed = CMP(pb->end, pa->end);
+ return st == 0 ? ed : st;
+}
+
+static int context_build_overlap(struct smq_invoke_ctx *ctx)
+{
+ int i, err = 0;
+ remote_arg_t *lpra = ctx->lpra;
+ int inbufs = REMOTE_SCALARS_INBUFS(ctx->sc);
+ int outbufs = REMOTE_SCALARS_OUTBUFS(ctx->sc);
+ int nbufs = inbufs + outbufs;
+ struct overlap max;
+ for (i = 0; i < nbufs; ++i) {
+ ctx->overs[i].start = (uintptr_t)lpra[i].buf.pv;
+ ctx->overs[i].end = ctx->overs[i].start + lpra[i].buf.len;
+ if (lpra[i].buf.len) {
+ VERIFY(err, ctx->overs[i].end > ctx->overs[i].start);
+ if (err)
+ goto bail;
+ }
+ ctx->overs[i].raix = i;
+ ctx->overps[i] = &ctx->overs[i];
+ }
+ sort(ctx->overps, nbufs, sizeof(*ctx->overps), overlap_ptr_cmp, NULL);
+ max.start = 0;
+ max.end = 0;
+ for (i = 0; i < nbufs; ++i) {
+ if (ctx->overps[i]->start < max.end) {
+ ctx->overps[i]->mstart = max.end;
+ ctx->overps[i]->mend = ctx->overps[i]->end;
+ ctx->overps[i]->offset = max.end -
+ ctx->overps[i]->start;
+ if (ctx->overps[i]->end > max.end) {
+ max.end = ctx->overps[i]->end;
+ } else {
+ ctx->overps[i]->mend = 0;
+ ctx->overps[i]->mstart = 0;
+ }
+ } else {
+ ctx->overps[i]->mend = ctx->overps[i]->end;
+ ctx->overps[i]->mstart = ctx->overps[i]->start;
+ ctx->overps[i]->offset = 0;
+ max = *ctx->overps[i];
+ }
+ }
+bail:
+ return err;
+}
+
+#define K_COPY_FROM_USER(err, kernel, dst, src, size) \
+ do {\
+ if (!(kernel))\
+ VERIFY(err, 0 == copy_from_user((dst),\
+ (void const __user *)(src),\
+ (size)));\
+ else\
+ memmove((dst), (src), (size));\
+ } while (0)
+
+#define K_COPY_TO_USER(err, kernel, dst, src, size) \
+ do {\
+ if (!(kernel))\
+ VERIFY(err, 0 == copy_to_user((void __user *)(dst), \
+ (src), (size)));\
+ else\
+ memmove((dst), (src), (size));\
+ } while (0)
+
+
+static void context_free(struct smq_invoke_ctx *ctx);
+
+static int context_alloc(struct fastrpc_file *fl, uint32_t kernel,
+ struct fastrpc_ioctl_invoke_attrs *invokefd,
+ struct smq_invoke_ctx **po)
+{
+ int err = 0, bufs, ii, size = 0;
+ struct fastrpc_apps *me = &gfa;
+ struct smq_invoke_ctx *ctx = NULL;
+ struct fastrpc_ctx_lst *clst = &fl->clst;
+ struct fastrpc_ioctl_invoke *invoke = &invokefd->inv;
+
+ bufs = REMOTE_SCALARS_LENGTH(invoke->sc);
+ size = bufs * sizeof(*ctx->lpra) + bufs * sizeof(*ctx->maps) +
+ sizeof(*ctx->fds) * (bufs) +
+ sizeof(*ctx->attrs) * (bufs) +
+ sizeof(*ctx->overs) * (bufs) +
+ sizeof(*ctx->overps) * (bufs);
+
+ VERIFY(err, NULL != (ctx = kzalloc(sizeof(*ctx) + size, GFP_KERNEL)));
+ if (err)
+ goto bail;
+
+ INIT_HLIST_NODE(&ctx->hn);
+ hlist_add_fake(&ctx->hn);
+ ctx->fl = fl;
+ ctx->maps = (struct fastrpc_mmap **)(&ctx[1]);
+ ctx->lpra = (remote_arg_t *)(&ctx->maps[bufs]);
+ ctx->fds = (int *)(&ctx->lpra[bufs]);
+ ctx->attrs = (unsigned *)(&ctx->fds[bufs]);
+ ctx->overs = (struct overlap *)(&ctx->attrs[bufs]);
+ ctx->overps = (struct overlap **)(&ctx->overs[bufs]);
+
+ K_COPY_FROM_USER(err, kernel, (void *)ctx->lpra, invoke->pra,
+ bufs * sizeof(*ctx->lpra));
+ if (err)
+ goto bail;
+
+ if (invokefd->fds) {
+ K_COPY_FROM_USER(err, kernel, ctx->fds, invokefd->fds,
+ bufs * sizeof(*ctx->fds));
+ if (err)
+ goto bail;
+ }
+ if (invokefd->attrs) {
+ K_COPY_FROM_USER(err, kernel, ctx->attrs, invokefd->attrs,
+ bufs * sizeof(*ctx->attrs));
+ if (err)
+ goto bail;
+ }
+
+ ctx->sc = invoke->sc;
+ if (bufs) {
+ VERIFY(err, 0 == context_build_overlap(ctx));
+ if (err)
+ goto bail;
+ }
+ ctx->retval = -1;
+ ctx->pid = current->pid;
+ ctx->tgid = current->tgid;
+ init_completion(&ctx->work);
+ ctx->magic = FASTRPC_CTX_MAGIC;
+
+ spin_lock(&fl->hlock);
+ hlist_add_head(&ctx->hn, &clst->pending);
+ spin_unlock(&fl->hlock);
+
+ spin_lock(&me->ctxlock);
+ for (ii = 0; ii < FASTRPC_CTX_MAX; ii++) {
+ if (!me->ctxtable[ii]) {
+ me->ctxtable[ii] = ctx;
+ ctx->ctxid = (ptr_to_uint64(ctx) & ~0xFFF)|(ii << 4);
+ break;
+ }
+ }
+ spin_unlock(&me->ctxlock);
+ VERIFY(err, ii < FASTRPC_CTX_MAX);
+ if (err) {
+ pr_err("adsprpc: out of context memory\n");
+ goto bail;
+ }
+
+ *po = ctx;
+bail:
+ if (ctx && err)
+ context_free(ctx);
+ return err;
+}
+
+static void context_save_interrupted(struct smq_invoke_ctx *ctx)
+{
+ struct fastrpc_ctx_lst *clst = &ctx->fl->clst;
+ spin_lock(&ctx->fl->hlock);
+ hlist_del_init(&ctx->hn);
+ hlist_add_head(&ctx->hn, &clst->interrupted);
+ spin_unlock(&ctx->fl->hlock);
+ /* free the cache on power collapse */
+ fastrpc_buf_list_free(ctx->fl);
+}
+
+static void context_free(struct smq_invoke_ctx *ctx)
+{
+ int i;
+ struct fastrpc_apps *me = &gfa;
+ int nbufs = REMOTE_SCALARS_INBUFS(ctx->sc) +
+ REMOTE_SCALARS_OUTBUFS(ctx->sc);
+ spin_lock(&ctx->fl->hlock);
+ hlist_del_init(&ctx->hn);
+ spin_unlock(&ctx->fl->hlock);
+ for (i = 0; i < nbufs; ++i)
+ fastrpc_mmap_free(ctx->maps[i]);
+ fastrpc_buf_free(ctx->buf, 1);
+ ctx->magic = 0;
+ ctx->ctxid = 0;
+
+ spin_lock(&me->ctxlock);
+ for (i = 0; i < FASTRPC_CTX_MAX; i++) {
+ if (me->ctxtable[i] == ctx) {
+ me->ctxtable[i] = NULL;
+ break;
+ }
+ }
+ spin_unlock(&me->ctxlock);
+
+ kfree(ctx);
+}
+
+static void context_notify_user(struct smq_invoke_ctx *ctx, int retval)
+{
+ ctx->retval = retval;
+ complete(&ctx->work);
+}
+
+
+static void fastrpc_notify_users(struct fastrpc_file *me)
+{
+ struct smq_invoke_ctx *ictx;
+ struct hlist_node *n;
+ spin_lock(&me->hlock);
+ hlist_for_each_entry_safe(ictx, n, &me->clst.pending, hn) {
+ complete(&ictx->work);
+ }
+ hlist_for_each_entry_safe(ictx, n, &me->clst.interrupted, hn) {
+ complete(&ictx->work);
+ }
+ spin_unlock(&me->hlock);
+
+}
+
+static void fastrpc_notify_drivers(struct fastrpc_apps *me, int cid)
+{
+ struct fastrpc_file *fl;
+ struct hlist_node *n;
+ spin_lock(&me->hlock);
+ hlist_for_each_entry_safe(fl, n, &me->drivers, hn) {
+ if (fl->cid == cid)
+ fastrpc_notify_users(fl);
+ }
+ spin_unlock(&me->hlock);
+
+}
+static void context_list_ctor(struct fastrpc_ctx_lst *me)
+{
+ INIT_HLIST_HEAD(&me->interrupted);
+ INIT_HLIST_HEAD(&me->pending);
+}
+
+static void fastrpc_context_list_dtor(struct fastrpc_file *fl)
+{
+ struct fastrpc_ctx_lst *clst = &fl->clst;
+ struct smq_invoke_ctx *ictx = NULL, *ctxfree;
+ struct hlist_node *n;
+ do {
+ ctxfree = NULL;
+ spin_lock(&fl->hlock);
+ hlist_for_each_entry_safe(ictx, n, &clst->interrupted, hn) {
+ hlist_del_init(&ictx->hn);
+ ctxfree = ictx;
+ break;
+ }
+ spin_unlock(&fl->hlock);
+ if (ctxfree)
+ context_free(ctxfree);
+ } while (ctxfree);
+ do {
+ ctxfree = NULL;
+ spin_lock(&fl->hlock);
+ hlist_for_each_entry_safe(ictx, n, &clst->pending, hn) {
+ hlist_del_init(&ictx->hn);
+ ctxfree = ictx;
+ break;
+ }
+ spin_unlock(&fl->hlock);
+ if (ctxfree)
+ context_free(ctxfree);
+ } while (ctxfree);
+}
+
+static int fastrpc_file_free(struct fastrpc_file *fl);
+static void fastrpc_file_list_dtor(struct fastrpc_apps *me)
+{
+ struct fastrpc_file *fl, *free;
+ struct hlist_node *n;
+ do {
+ free = NULL;
+ spin_lock(&me->hlock);
+ hlist_for_each_entry_safe(fl, n, &me->drivers, hn) {
+ hlist_del_init(&fl->hn);
+ free = fl;
+ break;
+ }
+ spin_unlock(&me->hlock);
+ if (free)
+ fastrpc_file_free(free);
+ } while (free);
+}
+
+static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
+{
+ remote_arg64_t *rpra;
+ remote_arg_t *lpra = ctx->lpra;
+ struct smq_invoke_buf *list;
+ struct smq_phy_page *pages, *ipage;
+ uint32_t sc = ctx->sc;
+ int inbufs = REMOTE_SCALARS_INBUFS(sc);
+ int outbufs = REMOTE_SCALARS_OUTBUFS(sc);
+ int bufs = inbufs + outbufs;
+ uintptr_t args;
+ size_t rlen = 0, copylen = 0, metalen = 0;
+ int i, inh, oix;
+ int err = 0;
+ int mflags = 0;
+
+ /* calculate size of the metadata */
+ rpra = NULL;
+ list = smq_invoke_buf_start(rpra, sc);
+ pages = smq_phy_page_start(sc, list);
+ ipage = pages;
+
+ for (i = 0; i < bufs; ++i) {
+ uintptr_t buf = (uintptr_t)lpra[i].buf.pv;
+ size_t len = lpra[i].buf.len;
+
+ if (ctx->fds[i] && (ctx->fds[i] != -1))
+ fastrpc_mmap_create(ctx->fl, ctx->fds[i],
+ ctx->attrs[i], buf, len,
+ mflags, &ctx->maps[i]);
+ ipage += 1;
+ }
+ metalen = copylen = (size_t)&ipage[0];
+ /* calculate len requreed for copying */
+ for (oix = 0; oix < inbufs + outbufs; ++oix) {
+ int i = ctx->overps[oix]->raix;
+ uintptr_t mstart, mend;
+ size_t len = lpra[i].buf.len;
+
+ if (!len)
+ continue;
+ if (ctx->maps[i])
+ continue;
+ if (ctx->overps[oix]->offset == 0)
+ copylen = ALIGN(copylen, BALIGN);
+ mstart = ctx->overps[oix]->mstart;
+ mend = ctx->overps[oix]->mend;
+ VERIFY(err, (mend - mstart) <= LONG_MAX);
+ if (err)
+ goto bail;
+ copylen += mend - mstart;
+ VERIFY(err, copylen >= 0);
+ if (err)
+ goto bail;
+ }
+ ctx->used = copylen;
+
+ /* allocate new buffer */
+ if (copylen) {
+ VERIFY(err, !fastrpc_buf_alloc(ctx->fl, copylen, &ctx->buf));
+ if (err)
+ goto bail;
+ }
+ VERIFY(err, ctx->buf->virt != NULL);
+ if (err)
+ goto bail;
+ if (metalen <= copylen)
+ memset(ctx->buf->virt, 0, metalen);
+
+ /* copy metadata */
+ rpra = ctx->buf->virt;
+ ctx->rpra = rpra;
+ list = smq_invoke_buf_start(rpra, sc);
+ pages = smq_phy_page_start(sc, list);
+ ipage = pages;
+ args = (uintptr_t)ctx->buf->virt + metalen;
+ for (i = 0; i < bufs; ++i) {
+ size_t len = lpra[i].buf.len;
+ list[i].num = 0;
+ list[i].pgidx = 0;
+ if (!len)
+ continue;
+ list[i].num = 1;
+ list[i].pgidx = ipage - pages;
+ ipage++;
+ }
+ /* map ion buffers */
+ PERF(ctx->fl->profile, ctx->fl->perf.map,
+ for (i = 0; i < inbufs + outbufs; ++i) {
+ struct fastrpc_mmap *map = ctx->maps[i];
+ uint64_t buf = ptr_to_uint64(lpra[i].buf.pv);
+ size_t len = lpra[i].buf.len;
+ rpra[i].buf.pv = 0;
+ rpra[i].buf.len = len;
+ if (!len)
+ continue;
+ if (map) {
+ struct vm_area_struct *vma;
+ uintptr_t offset;
+ uint64_t num = buf_num_pages(buf, len);
+ int idx = list[i].pgidx;
+
+ if (map->attr & FASTRPC_ATTR_NOVA) {
+ offset = 0;
+ } else {
+ down_read(&current->mm->mmap_sem);
+ VERIFY(err, NULL != (vma = find_vma(current->mm,
+ map->va)));
+ if (err) {
+ up_read(&current->mm->mmap_sem);
+ goto bail;
+ }
+ offset = buf_page_start(buf) - vma->vm_start;
+ up_read(&current->mm->mmap_sem);
+ VERIFY(err, offset < (uintptr_t)map->size);
+ if (err)
+ goto bail;
+ }
+ pages[idx].addr = map->phys + offset;
+ pages[idx].size = num << PAGE_SHIFT;
+ }
+ rpra[i].buf.pv = buf;
+ }
+ PERF_END);
+
+ /* copy non ion buffers */
+ PERF(ctx->fl->profile, ctx->fl->perf.copy,
+ rlen = copylen - metalen;
+ for (oix = 0; rpra && oix < inbufs + outbufs; ++oix) {
+ int i = ctx->overps[oix]->raix;
+ struct fastrpc_mmap *map = ctx->maps[i];
+ size_t mlen;
+ uint64_t buf;
+ size_t len = lpra[i].buf.len;
+
+ if (!len)
+ continue;
+ if (map)
+ continue;
+ if (ctx->overps[oix]->offset == 0) {
+ rlen -= ALIGN(args, BALIGN) - args;
+ args = ALIGN(args, BALIGN);
+ }
+ mlen = ctx->overps[oix]->mend - ctx->overps[oix]->mstart;
+ VERIFY(err, rlen >= mlen);
+ if (err)
+ goto bail;
+ rpra[i].buf.pv = (args - ctx->overps[oix]->offset);
+ pages[list[i].pgidx].addr = ctx->buf->phys -
+ ctx->overps[oix]->offset +
+ (copylen - rlen);
+ pages[list[i].pgidx].addr =
+ buf_page_start(pages[list[i].pgidx].addr);
+ buf = rpra[i].buf.pv;
+ pages[list[i].pgidx].size = buf_num_pages(buf, len) * PAGE_SIZE;
+ if (i < inbufs) {
+ K_COPY_FROM_USER(err, kernel, uint64_to_ptr(buf),
+ lpra[i].buf.pv, len);
+ if (err)
+ goto bail;
+ }
+ args = args + mlen;
+ rlen -= mlen;
+ }
+ PERF_END);
+
+ PERF(ctx->fl->profile, ctx->fl->perf.flush,
+ for (oix = 0; oix < inbufs + outbufs; ++oix) {
+ int i = ctx->overps[oix]->raix;
+ struct fastrpc_mmap *map = ctx->maps[i];
+
+ if (map && map->uncached)
+ continue;
+ if (ctx->fl->sctx->smmu.coherent &&
+ !(map && (map->attr & FASTRPC_ATTR_NON_COHERENT)))
+ continue;
+ if (map && (map->attr & FASTRPC_ATTR_COHERENT))
+ continue;
+
+ if (rpra && rpra[i].buf.len && ctx->overps[oix]->mstart) {
+ if (map && map->handle)
+ msm_ion_do_cache_op(ctx->fl->apps->client,
+ map->handle,
+ uint64_to_ptr(rpra[i].buf.pv),
+ rpra[i].buf.len,
+ ION_IOC_CLEAN_INV_CACHES);
+ else
+ dmac_flush_range(uint64_to_ptr(rpra[i].buf.pv),
+ uint64_to_ptr(rpra[i].buf.pv
+ + rpra[i].buf.len));
+ }
+ }
+ PERF_END);
+
+ inh = inbufs + outbufs;
+ for (i = 0; rpra && i < REMOTE_SCALARS_INHANDLES(sc); i++) {
+ rpra[inh + i].buf.pv = ptr_to_uint64(ctx->lpra[inh + i].buf.pv);
+ rpra[inh + i].buf.len = ctx->lpra[inh + i].buf.len;
+ rpra[inh + i].h = ctx->lpra[inh + i].h;
+ }
+
+ bail:
+ return err;
+}
+
+static int put_args(uint32_t kernel, struct smq_invoke_ctx *ctx,
+ remote_arg_t *upra)
+{
+ uint32_t sc = ctx->sc;
+ remote_arg64_t *rpra = ctx->rpra;
+ int i, inbufs, outbufs, outh, size;
+ int err = 0;
+
+ inbufs = REMOTE_SCALARS_INBUFS(sc);
+ outbufs = REMOTE_SCALARS_OUTBUFS(sc);
+ for (i = inbufs; i < inbufs + outbufs; ++i) {
+ if (!ctx->maps[i]) {
+ K_COPY_TO_USER(err, kernel,
+ ctx->lpra[i].buf.pv,
+ uint64_to_ptr(rpra[i].buf.pv),
+ rpra[i].buf.len);
+ if (err)
+ goto bail;
+ } else {
+ fastrpc_mmap_free(ctx->maps[i]);
+ ctx->maps[i] = NULL;
+ }
+ }
+ size = sizeof(*rpra) * REMOTE_SCALARS_OUTHANDLES(sc);
+ if (size) {
+ outh = inbufs + outbufs + REMOTE_SCALARS_INHANDLES(sc);
+ K_COPY_TO_USER(err, kernel, &upra[outh], &rpra[outh], size);
+ if (err)
+ goto bail;
+ }
+ bail:
+ return err;
+}
+
+static void inv_args_pre(struct smq_invoke_ctx *ctx)
+{
+ int i, inbufs, outbufs;
+ uint32_t sc = ctx->sc;
+ remote_arg64_t *rpra = ctx->rpra;
+ uintptr_t end;
+
+ inbufs = REMOTE_SCALARS_INBUFS(sc);
+ outbufs = REMOTE_SCALARS_OUTBUFS(sc);
+ for (i = inbufs; i < inbufs + outbufs; ++i) {
+ struct fastrpc_mmap *map = ctx->maps[i];
+
+ if (map && map->uncached)
+ continue;
+ if (!rpra[i].buf.len)
+ continue;
+ if (ctx->fl->sctx->smmu.coherent &&
+ !(map && (map->attr & FASTRPC_ATTR_NON_COHERENT)))
+ continue;
+ if (map && (map->attr & FASTRPC_ATTR_COHERENT))
+ continue;
+
+ if (buf_page_start(ptr_to_uint64((void *)rpra)) ==
+ buf_page_start(rpra[i].buf.pv))
+ continue;
+ if (!IS_CACHE_ALIGNED((uintptr_t)
+ uint64_to_ptr(rpra[i].buf.pv))) {
+ if (map && map->handle)
+ msm_ion_do_cache_op(ctx->fl->apps->client,
+ map->handle,
+ uint64_to_ptr(rpra[i].buf.pv),
+ sizeof(uintptr_t),
+ ION_IOC_CLEAN_INV_CACHES);
+ else
+ dmac_flush_range(
+ uint64_to_ptr(rpra[i].buf.pv), (char *)
+ uint64_to_ptr(rpra[i].buf.pv + 1));
+ }
+
+ end = (uintptr_t)uint64_to_ptr(rpra[i].buf.pv +
+ rpra[i].buf.len);
+ if (!IS_CACHE_ALIGNED(end)) {
+ if (map && map->handle)
+ msm_ion_do_cache_op(ctx->fl->apps->client,
+ map->handle,
+ uint64_to_ptr(end),
+ sizeof(uintptr_t),
+ ION_IOC_CLEAN_INV_CACHES);
+ else
+ dmac_flush_range((char *)end,
+ (char *)end + 1);
+ }
+ }
+}
+
+static void inv_args(struct smq_invoke_ctx *ctx)
+{
+ int i, inbufs, outbufs;
+ uint32_t sc = ctx->sc;
+ remote_arg64_t *rpra = ctx->rpra;
+ int inv = 0;
+
+ inbufs = REMOTE_SCALARS_INBUFS(sc);
+ outbufs = REMOTE_SCALARS_OUTBUFS(sc);
+ for (i = inbufs; i < inbufs + outbufs; ++i) {
+ struct fastrpc_mmap *map = ctx->maps[i];
+
+ if (map && map->uncached)
+ continue;
+ if (!rpra[i].buf.len)
+ continue;
+ if (ctx->fl->sctx->smmu.coherent &&
+ !(map && (map->attr & FASTRPC_ATTR_NON_COHERENT)))
+ continue;
+ if (map && (map->attr & FASTRPC_ATTR_COHERENT))
+ continue;
+
+ if (buf_page_start(ptr_to_uint64((void *)rpra)) ==
+ buf_page_start(rpra[i].buf.pv)) {
+ inv = 1;
+ continue;
+ }
+ if (map && map->handle)
+ msm_ion_do_cache_op(ctx->fl->apps->client, map->handle,
+ (char *)uint64_to_ptr(rpra[i].buf.pv),
+ rpra[i].buf.len, ION_IOC_INV_CACHES);
+ else
+ dmac_inv_range((char *)uint64_to_ptr(rpra[i].buf.pv),
+ (char *)uint64_to_ptr(rpra[i].buf.pv
+ + rpra[i].buf.len));
+ }
+
+}
+
+static int fastrpc_invoke_send(struct smq_invoke_ctx *ctx,
+ uint32_t kernel, uint32_t handle)
+{
+ struct smq_msg *msg = &ctx->msg;
+ struct fastrpc_file *fl = ctx->fl;
+ struct fastrpc_channel_ctx *channel_ctx = &fl->apps->channel[fl->cid];
+ int err = 0, len;
+
+ VERIFY(err, NULL != channel_ctx->chan);
+ if (err)
+ goto bail;
+ msg->pid = current->tgid;
+ msg->tid = current->pid;
+ if (kernel)
+ msg->pid = 0;
+ msg->invoke.header.ctx = ctx->ctxid | fl->pd;
+ msg->invoke.header.handle = handle;
+ msg->invoke.header.sc = ctx->sc;
+ msg->invoke.page.addr = ctx->buf ? ctx->buf->phys : 0;
+ msg->invoke.page.size = buf_page_size(ctx->used);
+
+ if (fl->apps->glink) {
+ if (fl->ssrcount != channel_ctx->ssrcount) {
+ err = -ECONNRESET;
+ goto bail;
+ }
+ VERIFY(err, channel_ctx->link.port_state ==
+ FASTRPC_LINK_CONNECTED);
+ if (err)
+ goto bail;
+ err = glink_tx(channel_ctx->chan,
+ (void *)&fl->apps->channel[fl->cid], msg, sizeof(*msg),
+ GLINK_TX_REQ_INTENT);
+ } else {
+ spin_lock(&fl->apps->hlock);
+ len = smd_write((smd_channel_t *)
+ channel_ctx->chan,
+ msg, sizeof(*msg));
+ spin_unlock(&fl->apps->hlock);
+ VERIFY(err, len == sizeof(*msg));
+ }
+ bail:
+ return err;
+}
+
+static void fastrpc_smd_read_handler(int cid)
+{
+ struct fastrpc_apps *me = &gfa;
+ struct smq_invoke_rsp rsp = {0};
+ int ret = 0, err = 0;
+ uint32_t index;
+
+ do {
+ ret = smd_read_from_cb(me->channel[cid].chan, &rsp,
+ sizeof(rsp));
+ if (ret != sizeof(rsp))
+ break;
+ index = (uint32_t)((rsp.ctx & FASTRPC_CTXID_MASK) >> 4);
+ VERIFY(err, index < FASTRPC_CTX_MAX);
+ if (err)
+ goto bail;
+
+ VERIFY(err, !IS_ERR_OR_NULL(me->ctxtable[index]));
+ if (err)
+ goto bail;
+
+ VERIFY(err, ((me->ctxtable[index]->ctxid == (rsp.ctx & ~1)) &&
+ me->ctxtable[index]->magic == FASTRPC_CTX_MAGIC));
+ if (err)
+ goto bail;
+
+ context_notify_user(me->ctxtable[index], rsp.retval);
+ } while (ret == sizeof(rsp));
+
+bail:
+ if (err)
+ pr_err("adsprpc: invalid response or context\n");
+}
+
+static void smd_event_handler(void *priv, unsigned event)
+{
+ struct fastrpc_apps *me = &gfa;
+ int cid = (int)(uintptr_t)priv;
+
+ switch (event) {
+ case SMD_EVENT_OPEN:
+ complete(&me->channel[cid].workport);
+ break;
+ case SMD_EVENT_CLOSE:
+ fastrpc_notify_drivers(me, cid);
+ break;
+ case SMD_EVENT_DATA:
+ fastrpc_smd_read_handler(cid);
+ break;
+ }
+}
+
+static void fastrpc_init(struct fastrpc_apps *me)
+{
+ int i;
+ INIT_HLIST_HEAD(&me->drivers);
+ INIT_HLIST_HEAD(&me->maps);
+ spin_lock_init(&me->hlock);
+ spin_lock_init(&me->ctxlock);
+ mutex_init(&me->smd_mutex);
+ me->channel = &gcinfo[0];
+ for (i = 0; i < NUM_CHANNELS; i++) {
+ init_completion(&me->channel[i].work);
+ init_completion(&me->channel[i].workport);
+ me->channel[i].sesscount = 0;
+ }
+}
+
+static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl);
+
+static int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode,
+ uint32_t kernel,
+ struct fastrpc_ioctl_invoke_attrs *inv)
+{
+ struct smq_invoke_ctx *ctx = NULL;
+ struct fastrpc_ioctl_invoke *invoke = &inv->inv;
+ int cid = fl->cid;
+ int interrupted = 0;
+ int err = 0;
+ struct timespec invoket = {0};
+
+ if (fl->profile)
+ getnstimeofday(&invoket);
+
+ VERIFY(err, fl->sctx != NULL);
+ if (err)
+ goto bail;
+ VERIFY(err, fl->cid >= 0 && fl->cid < NUM_CHANNELS);
+ if (err)
+ goto bail;
+ if (!kernel) {
+ VERIFY(err, 0 == context_restore_interrupted(fl, inv,
+ &ctx));
+ if (err)
+ goto bail;
+ if (fl->sctx->smmu.faults)
+ err = FASTRPC_ENOSUCH;
+ if (err)
+ goto bail;
+ if (ctx)
+ goto wait;
+ }
+
+ VERIFY(err, 0 == context_alloc(fl, kernel, inv, &ctx));
+ if (err)
+ goto bail;
+
+ if (REMOTE_SCALARS_LENGTH(ctx->sc)) {
+ PERF(fl->profile, fl->perf.getargs,
+ VERIFY(err, 0 == get_args(kernel, ctx));
+ PERF_END);
+ if (err)
+ goto bail;
+ }
+
+ PERF(fl->profile, fl->perf.invargs,
+ inv_args_pre(ctx);
+ if (mode == FASTRPC_MODE_SERIAL)
+ inv_args(ctx);
+ PERF_END);
+
+ PERF(fl->profile, fl->perf.link,
+ VERIFY(err, 0 == fastrpc_invoke_send(ctx, kernel, invoke->handle));
+ PERF_END);
+
+ if (err)
+ goto bail;
+
+ PERF(fl->profile, fl->perf.invargs,
+ if (mode == FASTRPC_MODE_PARALLEL)
+ inv_args(ctx);
+ PERF_END);
+ wait:
+ if (kernel)
+ wait_for_completion(&ctx->work);
+ else {
+ interrupted = wait_for_completion_interruptible(&ctx->work);
+ VERIFY(err, 0 == (err = interrupted));
+ if (err)
+ goto bail;
+ }
+ VERIFY(err, 0 == (err = ctx->retval));
+ if (err)
+ goto bail;
+
+ PERF(fl->profile, fl->perf.putargs,
+ VERIFY(err, 0 == put_args(kernel, ctx, invoke->pra));
+ PERF_END);
+ if (err)
+ goto bail;
+ bail:
+ if (ctx && interrupted == -ERESTARTSYS)
+ context_save_interrupted(ctx);
+ else if (ctx)
+ context_free(ctx);
+ if (fl->ssrcount != fl->apps->channel[cid].ssrcount)
+ err = ECONNRESET;
+
+ if (fl->profile && !interrupted) {
+ if (invoke->handle != FASTRPC_STATIC_HANDLE_LISTENER)
+ fl->perf.invoke += getnstimediff(&invoket);
+ if (!(invoke->handle >= 0 &&
+ invoke->handle <= FASTRPC_STATIC_HANDLE_MAX))
+ fl->perf.count++;
+ }
+ return err;
+}
+
+static int fastrpc_channel_open(struct fastrpc_file *fl);
+static int fastrpc_init_process(struct fastrpc_file *fl,
+ struct fastrpc_ioctl_init_attrs *uproc)
+{
+ int err = 0;
+ struct fastrpc_apps *me = &gfa;
+ struct fastrpc_ioctl_invoke_attrs ioctl;
+ struct fastrpc_ioctl_init *init = &uproc->init;
+ struct smq_phy_page pages[1];
+ struct fastrpc_mmap *file = NULL, *mem = NULL;
+ char *proc_name = NULL;
+ int srcVM[1] = {VMID_HLOS};
+ int destVM[1] = {gcinfo[0].heap_vmid};
+ int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
+ int hlosVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
+
+ VERIFY(err, 0 == (err = fastrpc_channel_open(fl)));
+ if (err)
+ goto bail;
+ if (init->flags == FASTRPC_INIT_ATTACH) {
+ remote_arg_t ra[1];
+ int tgid = current->tgid;
+ ra[0].buf.pv = (void *)&tgid;
+ ra[0].buf.len = sizeof(tgid);
+ ioctl.inv.handle = 1;
+ ioctl.inv.sc = REMOTE_SCALARS_MAKE(0, 1, 0);
+ ioctl.inv.pra = ra;
+ ioctl.fds = NULL;
+ ioctl.attrs = NULL;
+ fl->pd = 0;
+ VERIFY(err, !(err = fastrpc_internal_invoke(fl,
+ FASTRPC_MODE_PARALLEL, 1, &ioctl)));
+ if (err)
+ goto bail;
+ } else if (init->flags == FASTRPC_INIT_CREATE) {
+ remote_arg_t ra[6];
+ int fds[6];
+ int mflags = 0;
+ struct {
+ int pgid;
+ unsigned int namelen;
+ unsigned int filelen;
+ unsigned int pageslen;
+ int attrs;
+ int siglen;
+ } inbuf;
+ inbuf.pgid = current->tgid;
+ inbuf.namelen = strlen(current->comm) + 1;
+ inbuf.filelen = init->filelen;
+ fl->pd = 1;
+
+ if (!access_ok(0, (void const __user *)init->file,
+ init->filelen))
+ goto bail;
+ if (init->filelen) {
+ VERIFY(err, !fastrpc_mmap_create(fl, init->filefd, 0,
+ init->file, init->filelen, mflags, &file));
+ if (err)
+ goto bail;
+ }
+ if (!access_ok(1, (void const __user *)init->mem,
+ init->memlen))
+ goto bail;
+ inbuf.pageslen = 1;
+ VERIFY(err, !fastrpc_mmap_create(fl, init->memfd, 0,
+ init->mem, init->memlen, mflags, &mem));
+ if (err)
+ goto bail;
+ inbuf.pageslen = 1;
+ ra[0].buf.pv = (void *)&inbuf;
+ ra[0].buf.len = sizeof(inbuf);
+ fds[0] = 0;
+
+ ra[1].buf.pv = (void *)current->comm;
+ ra[1].buf.len = inbuf.namelen;
+ fds[1] = 0;
+
+ ra[2].buf.pv = (void *)init->file;
+ ra[2].buf.len = inbuf.filelen;
+ fds[2] = init->filefd;
+
+ pages[0].addr = mem->phys;
+ pages[0].size = mem->size;
+ ra[3].buf.pv = (void *)pages;
+ ra[3].buf.len = 1 * sizeof(*pages);
+ fds[3] = 0;
+
+ inbuf.attrs = uproc->attrs;
+ ra[4].buf.pv = (void *)&(inbuf.attrs);
+ ra[4].buf.len = sizeof(inbuf.attrs);
+ fds[4] = 0;
+
+ inbuf.siglen = uproc->siglen;
+ ra[5].buf.pv = (void *)&(inbuf.siglen);
+ ra[5].buf.len = sizeof(inbuf.siglen);
+ fds[5] = 0;
+
+ ioctl.inv.handle = 1;
+ ioctl.inv.sc = REMOTE_SCALARS_MAKE(6, 4, 0);
+ if (uproc->attrs)
+ ioctl.inv.sc = REMOTE_SCALARS_MAKE(7, 6, 0);
+ ioctl.inv.pra = ra;
+ ioctl.fds = fds;
+ ioctl.attrs = NULL;
+ VERIFY(err, !(err = fastrpc_internal_invoke(fl,
+ FASTRPC_MODE_PARALLEL, 1, &ioctl)));
+ if (err)
+ goto bail;
+ } else if (init->flags == FASTRPC_INIT_CREATE_STATIC) {
+ remote_arg_t ra[3];
+ uint64_t phys = 0;
+ size_t size = 0;
+ int fds[3];
+ struct {
+ int pgid;
+ unsigned int namelen;
+ unsigned int pageslen;
+ } inbuf;
+
+ if (!init->filelen)
+ goto bail;
+ VERIFY(err, proc_name = kzalloc(init->filelen, GFP_KERNEL));
+ if (err)
+ goto bail;
+ VERIFY(err, 0 == copy_from_user(proc_name,
+ (unsigned char *)init->file, init->filelen));
+ if (err)
+ goto bail;
+ inbuf.pgid = current->tgid;
+ inbuf.namelen = init->filelen;
+ inbuf.pageslen = 0;
+ if (!me->staticpd_flags) {
+ inbuf.pageslen = 1;
+ VERIFY(err, !fastrpc_mmap_create(fl, -1, 0, init->mem,
+ init->memlen, ADSP_MMAP_REMOTE_HEAP_ADDR,
+ &mem));
+ if (err)
+ goto bail;
+ phys = mem->phys;
+ size = mem->size;
+ VERIFY(err, !hyp_assign_phys(phys, (uint64_t)size,
+ srcVM, 1, destVM, destVMperm, 1));
+ if (err) {
+ pr_err("ADSPRPC: hyp_assign_phys fail err %d",
+ err);
+ pr_err("map->phys %llx, map->size %d\n",
+ phys, (int)size);
+ goto bail;
+ }
+ me->staticpd_flags = 1;
+ }
+
+ ra[0].buf.pv = (void *)&inbuf;
+ ra[0].buf.len = sizeof(inbuf);
+ fds[0] = 0;
+
+ ra[1].buf.pv = (void *)proc_name;
+ ra[1].buf.len = inbuf.namelen;
+ fds[1] = 0;
+
+ pages[0].addr = phys;
+ pages[0].size = size;
+
+ ra[2].buf.pv = (void *)pages;
+ ra[2].buf.len = sizeof(*pages);
+ fds[2] = 0;
+ ioctl.inv.handle = 1;
+
+ ioctl.inv.sc = REMOTE_SCALARS_MAKE(8, 3, 0);
+ ioctl.inv.pra = ra;
+ ioctl.fds = NULL;
+ ioctl.attrs = NULL;
+ VERIFY(err, !(err = fastrpc_internal_invoke(fl,
+ FASTRPC_MODE_PARALLEL, 1, &ioctl)));
+ if (err)
+ goto bail;
+ } else {
+ err = -ENOTTY;
+ }
+bail:
+ kfree(proc_name);
+ if (err && (init->flags == FASTRPC_INIT_CREATE_STATIC))
+ me->staticpd_flags = 0;
+ if (mem && err) {
+ if (mem->flags == ADSP_MMAP_REMOTE_HEAP_ADDR)
+ hyp_assign_phys(mem->phys, (uint64_t)mem->size,
+ destVM, 1, srcVM, hlosVMperm, 1);
+ fastrpc_mmap_free(mem);
+ }
+ if (file)
+ fastrpc_mmap_free(file);
+ return err;
+}
+
+static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl)
+{
+ int err = 0;
+ struct fastrpc_ioctl_invoke_attrs ioctl;
+ remote_arg_t ra[1];
+ int tgid = 0;
+
+ VERIFY(err, fl->cid >= 0 && fl->cid < NUM_CHANNELS);
+ if (err)
+ goto bail;
+ VERIFY(err, fl->apps->channel[fl->cid].chan != NULL);
+ if (err)
+ goto bail;
+ tgid = fl->tgid;
+ ra[0].buf.pv = (void *)&tgid;
+ ra[0].buf.len = sizeof(tgid);
+ ioctl.inv.handle = 1;
+ ioctl.inv.sc = REMOTE_SCALARS_MAKE(1, 1, 0);
+ ioctl.inv.pra = ra;
+ ioctl.fds = NULL;
+ ioctl.attrs = NULL;
+ VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
+ FASTRPC_MODE_PARALLEL, 1, &ioctl)));
+bail:
+ return err;
+}
+
+static int fastrpc_mmap_on_dsp(struct fastrpc_file *fl, uint32_t flags,
+ struct fastrpc_mmap *map)
+{
+ struct fastrpc_ioctl_invoke_attrs ioctl;
+ struct smq_phy_page page;
+ int num = 1;
+ remote_arg_t ra[3];
+ int err = 0;
+ struct {
+ int pid;
+ uint32_t flags;
+ uintptr_t vaddrin;
+ int num;
+ } inargs;
+
+ struct {
+ uintptr_t vaddrout;
+ } routargs;
+ inargs.pid = current->tgid;
+ inargs.vaddrin = (uintptr_t)map->va;
+ inargs.flags = flags;
+ inargs.num = fl->apps->compat ? num * sizeof(page) : num;
+ ra[0].buf.pv = (void *)&inargs;
+ ra[0].buf.len = sizeof(inargs);
+ page.addr = map->phys;
+ page.size = map->size;
+ ra[1].buf.pv = (void *)&page;
+ ra[1].buf.len = num * sizeof(page);
+
+ ra[2].buf.pv = (void *)&routargs;
+ ra[2].buf.len = sizeof(routargs);
+
+ ioctl.inv.handle = 1;
+ if (fl->apps->compat)
+ ioctl.inv.sc = REMOTE_SCALARS_MAKE(4, 2, 1);
+ else
+ ioctl.inv.sc = REMOTE_SCALARS_MAKE(2, 2, 1);
+ ioctl.inv.pra = ra;
+ ioctl.fds = NULL;
+ ioctl.attrs = NULL;
+ VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
+ FASTRPC_MODE_PARALLEL, 1, &ioctl)));
+ map->raddr = (uintptr_t)routargs.vaddrout;
+ if (err)
+ goto bail;
+ if (flags == ADSP_MMAP_HEAP_ADDR) {
+ struct scm_desc desc = {0};
+
+ desc.args[0] = TZ_PIL_AUTH_QDSP6_PROC;
+ desc.args[1] = map->phys;
+ desc.args[2] = map->size;
+ desc.arginfo = SCM_ARGS(3);
+ err = scm_call2(SCM_SIP_FNID(SCM_SVC_PIL,
+ TZ_PIL_PROTECT_MEM_SUBSYS_ID), &desc);
+ } else if (flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
+
+ int srcVM[1] = {VMID_HLOS};
+ int destVM[1] = {gcinfo[0].heap_vmid};
+ int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
+
+ VERIFY(err, !hyp_assign_phys(map->phys, (uint64_t)map->size,
+ srcVM, 1, destVM, destVMperm, 1));
+ if (err)
+ goto bail;
+ }
+bail:
+ return err;
+}
+
+static int fastrpc_munmap_on_dsp_rh(struct fastrpc_file *fl,
+ struct fastrpc_mmap *map)
+{
+ int err = 0;
+ int srcVM[1] = {gcinfo[0].heap_vmid};
+ int destVM[1] = {VMID_HLOS};
+ int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
+
+ if (map->flags == ADSP_MMAP_HEAP_ADDR) {
+ struct fastrpc_ioctl_invoke_attrs ioctl;
+ struct scm_desc desc = {0};
+ remote_arg_t ra[1];
+ int err = 0;
+ struct {
+ uint8_t skey;
+ } routargs;
+
+ ra[0].buf.pv = (void *)&routargs;
+ ra[0].buf.len = sizeof(routargs);
+
+ ioctl.inv.handle = 1;
+ ioctl.inv.sc = REMOTE_SCALARS_MAKE(7, 0, 1);
+ ioctl.inv.pra = ra;
+ ioctl.fds = NULL;
+ ioctl.attrs = NULL;
+ if (fl == NULL)
+ goto bail;
+
+ VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
+ FASTRPC_MODE_PARALLEL, 1, &ioctl)));
+ if (err)
+ goto bail;
+ desc.args[0] = TZ_PIL_AUTH_QDSP6_PROC;
+ desc.args[1] = map->phys;
+ desc.args[2] = map->size;
+ desc.args[3] = routargs.skey;
+ desc.arginfo = SCM_ARGS(4);
+ err = scm_call2(SCM_SIP_FNID(SCM_SVC_PIL,
+ TZ_PIL_CLEAR_PROTECT_MEM_SUBSYS_ID), &desc);
+ } else if (map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
+ VERIFY(err, !hyp_assign_phys(map->phys, (uint64_t)map->size,
+ srcVM, 1, destVM, destVMperm, 1));
+ if (err)
+ goto bail;
+ }
+
+bail:
+ return err;
+}
+
+static int fastrpc_munmap_on_dsp(struct fastrpc_file *fl,
+ struct fastrpc_mmap *map)
+{
+ struct fastrpc_ioctl_invoke_attrs ioctl;
+ remote_arg_t ra[1];
+ int err = 0;
+ struct {
+ int pid;
+ uintptr_t vaddrout;
+ size_t size;
+ } inargs;
+
+ inargs.pid = current->tgid;
+ inargs.size = map->size;
+ inargs.vaddrout = map->raddr;
+ ra[0].buf.pv = (void *)&inargs;
+ ra[0].buf.len = sizeof(inargs);
+
+ ioctl.inv.handle = 1;
+ if (fl->apps->compat)
+ ioctl.inv.sc = REMOTE_SCALARS_MAKE(5, 1, 0);
+ else
+ ioctl.inv.sc = REMOTE_SCALARS_MAKE(3, 1, 0);
+ ioctl.inv.pra = ra;
+ ioctl.fds = NULL;
+ ioctl.attrs = NULL;
+ VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
+ FASTRPC_MODE_PARALLEL, 1, &ioctl)));
+ if (err)
+ goto bail;
+ if (map->flags == ADSP_MMAP_HEAP_ADDR ||
+ map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
+ VERIFY(err, !fastrpc_munmap_on_dsp_rh(fl, map));
+ if (err)
+ goto bail;
+ }
+bail:
+ return err;
+}
+
+static int fastrpc_mmap_remove_ssr(struct fastrpc_file *fl)
+{
+ struct fastrpc_mmap *match = NULL, *map = NULL;
+ struct hlist_node *n = NULL;
+ int err = 0, ret = 0;
+ struct fastrpc_apps *me = &gfa;
+ struct ramdump_segment *ramdump_segments_rh = NULL;
+
+ do {
+ match = NULL;
+ spin_lock(&me->hlock);
+ hlist_for_each_entry_safe(map, n, &me->maps, hn) {
+ match = map;
+ hlist_del_init(&map->hn);
+ break;
+ }
+ spin_unlock(&me->hlock);
+
+ if (match) {
+ VERIFY(err, !fastrpc_munmap_on_dsp_rh(fl, match));
+ if (err)
+ goto bail;
+ if (me->channel[0].ramdumpenabled) {
+ ramdump_segments_rh = kcalloc(1,
+ sizeof(struct ramdump_segment), GFP_KERNEL);
+ if (ramdump_segments_rh) {
+ ramdump_segments_rh->address =
+ match->phys;
+ ramdump_segments_rh->size = match->size;
+ ret = do_elf_ramdump(
+ me->channel[0].remoteheap_ramdump_dev,
+ ramdump_segments_rh, 1);
+ if (ret < 0)
+ pr_err("ADSPRPC: unable to dump heap");
+ kfree(ramdump_segments_rh);
+ }
+ }
+ fastrpc_mmap_free(match);
+ }
+ } while (match);
+bail:
+ if (err && match)
+ fastrpc_mmap_add(match);
+ return err;
+}
+
+static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va,
+ size_t len, struct fastrpc_mmap **ppmap);
+
+static void fastrpc_mmap_add(struct fastrpc_mmap *map);
+
+static int fastrpc_internal_munmap(struct fastrpc_file *fl,
+ struct fastrpc_ioctl_munmap *ud)
+{
+ int err = 0;
+ struct fastrpc_mmap *map = NULL;
+
+ mutex_lock(&fl->map_mutex);
+ VERIFY(err, !fastrpc_mmap_remove(fl, ud->vaddrout, ud->size, &map));
+ if (err)
+ goto bail;
+ VERIFY(err, !fastrpc_munmap_on_dsp(fl, map));
+ if (err)
+ goto bail;
+ fastrpc_mmap_free(map);
+bail:
+ if (err && map)
+ fastrpc_mmap_add(map);
+ mutex_unlock(&fl->map_mutex);
+ return err;
+}
+
+static int fastrpc_internal_mmap(struct fastrpc_file *fl,
+ struct fastrpc_ioctl_mmap *ud)
+{
+
+ struct fastrpc_mmap *map = NULL;
+ int err = 0;
+
+ mutex_lock(&fl->map_mutex);
+ if (!fastrpc_mmap_find(fl, ud->fd, (uintptr_t)ud->vaddrin, ud->size,
+ ud->flags, &map)){
+ mutex_unlock(&fl->map_mutex);
+ return 0;
+ }
+ VERIFY(err, !fastrpc_mmap_create(fl, ud->fd, 0,
+ (uintptr_t)ud->vaddrin, ud->size, ud->flags, &map));
+ if (err)
+ goto bail;
+ VERIFY(err, 0 == fastrpc_mmap_on_dsp(fl, ud->flags, map));
+ if (err)
+ goto bail;
+ ud->vaddrout = map->raddr;
+ bail:
+ if (err && map)
+ fastrpc_mmap_free(map);
+ mutex_unlock(&fl->map_mutex);
+ return err;
+}
+
+static void fastrpc_channel_close(struct kref *kref)
+{
+ struct fastrpc_apps *me = &gfa;
+ struct fastrpc_channel_ctx *ctx;
+ int cid;
+
+ ctx = container_of(kref, struct fastrpc_channel_ctx, kref);
+ cid = ctx - &gcinfo[0];
+ if (!me->glink)
+ smd_close(ctx->chan);
+ else
+ fastrpc_glink_close(ctx->chan, cid);
+
+ ctx->chan = NULL;
+ mutex_unlock(&me->smd_mutex);
+ pr_info("'closed /dev/%s c %d %d'\n", gcinfo[cid].name,
+ MAJOR(me->dev_no), cid);
+}
+
+static void fastrpc_context_list_dtor(struct fastrpc_file *fl);
+
+static int fastrpc_session_alloc_locked(struct fastrpc_channel_ctx *chan,
+ int secure, struct fastrpc_session_ctx **session)
+{
+ struct fastrpc_apps *me = &gfa;
+ int idx = 0, err = 0;
+
+ if (chan->sesscount) {
+ for (idx = 0; idx < chan->sesscount; ++idx) {
+ if (!chan->session[idx].used &&
+ chan->session[idx].smmu.secure == secure) {
+ chan->session[idx].used = 1;
+ break;
+ }
+ }
+ VERIFY(err, idx < chan->sesscount);
+ if (err)
+ goto bail;
+ chan->session[idx].smmu.faults = 0;
+ } else {
+ VERIFY(err, me->dev != NULL);
+ if (err)
+ goto bail;
+ chan->session[0].dev = me->dev;
+ chan->session[0].smmu.dev = me->dev;
+ }
+
+ *session = &chan->session[idx];
+ bail:
+ return err;
+}
+
+static bool fastrpc_glink_notify_rx_intent_req(void *h, const void *priv,
+ size_t size)
+{
+ if (glink_queue_rx_intent(h, NULL, size))
+ return false;
+ return true;
+}
+
+static void fastrpc_glink_notify_tx_done(void *handle, const void *priv,
+ const void *pkt_priv, const void *ptr)
+{
+}
+
+static void fastrpc_glink_notify_rx(void *handle, const void *priv,
+ const void *pkt_priv, const void *ptr, size_t size)
+{
+ struct smq_invoke_rsp *rsp = (struct smq_invoke_rsp *)ptr;
+ struct fastrpc_apps *me = &gfa;
+ uint32_t index;
+ int err = 0;
+
+ VERIFY(err, (rsp && size >= sizeof(*rsp)));
+ if (err)
+ goto bail;
+
+ index = (uint32_t)((rsp->ctx & FASTRPC_CTXID_MASK) >> 4);
+ VERIFY(err, index < FASTRPC_CTX_MAX);
+ if (err)
+ goto bail;
+
+ VERIFY(err, !IS_ERR_OR_NULL(me->ctxtable[index]));
+ if (err)
+ goto bail;
+
+ VERIFY(err, ((me->ctxtable[index]->ctxid == (rsp->ctx & ~1)) &&
+ me->ctxtable[index]->magic == FASTRPC_CTX_MAGIC));
+ if (err)
+ goto bail;
+
+ context_notify_user(me->ctxtable[index], rsp->retval);
+bail:
+ if (err)
+ pr_err("adsprpc: invalid response or context\n");
+ glink_rx_done(handle, ptr, true);
+}
+
+static void fastrpc_glink_notify_state(void *handle, const void *priv,
+ unsigned int event)
+{
+ struct fastrpc_apps *me = &gfa;
+ int cid = (int)(uintptr_t)priv;
+ struct fastrpc_glink_info *link;
+
+ if (cid < 0 || cid >= NUM_CHANNELS)
+ return;
+ link = &me->channel[cid].link;
+ switch (event) {
+ case GLINK_CONNECTED:
+ link->port_state = FASTRPC_LINK_CONNECTED;
+ complete(&me->channel[cid].workport);
+ break;
+ case GLINK_LOCAL_DISCONNECTED:
+ link->port_state = FASTRPC_LINK_DISCONNECTED;
+ break;
+ case GLINK_REMOTE_DISCONNECTED:
+ if (me->channel[cid].chan) {
+ fastrpc_glink_close(me->channel[cid].chan, cid);
+ me->channel[cid].chan = 0;
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static int fastrpc_session_alloc(struct fastrpc_channel_ctx *chan, int secure,
+ struct fastrpc_session_ctx **session)
+{
+ int err = 0;
+ struct fastrpc_apps *me = &gfa;
+
+ mutex_lock(&me->smd_mutex);
+ if (!*session)
+ err = fastrpc_session_alloc_locked(chan, secure, session);
+ mutex_unlock(&me->smd_mutex);
+ return err;
+}
+
+static void fastrpc_session_free(struct fastrpc_channel_ctx *chan,
+ struct fastrpc_session_ctx *session)
+{
+ struct fastrpc_apps *me = &gfa;
+
+ mutex_lock(&me->smd_mutex);
+ session->used = 0;
+ mutex_unlock(&me->smd_mutex);
+}
+
+static int fastrpc_file_free(struct fastrpc_file *fl)
+{
+ struct hlist_node *n;
+ struct fastrpc_mmap *map = NULL;
+ int cid;
+
+ if (!fl)
+ return 0;
+ cid = fl->cid;
+
+ (void)fastrpc_release_current_dsp_process(fl);
+
+ spin_lock(&fl->apps->hlock);
+ hlist_del_init(&fl->hn);
+ spin_unlock(&fl->apps->hlock);
+
+ if (!fl->sctx) {
+ goto bail;
+ }
+
+ spin_lock(&fl->hlock);
+ fl->file_close = 1;
+ spin_unlock(&fl->hlock);
+ fastrpc_context_list_dtor(fl);
+ fastrpc_buf_list_free(fl);
+ hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
+ fastrpc_mmap_free(map);
+ }
+ if (fl->ssrcount == fl->apps->channel[cid].ssrcount)
+ kref_put_mutex(&fl->apps->channel[cid].kref,
+ fastrpc_channel_close, &fl->apps->smd_mutex);
+ if (fl->sctx)
+ fastrpc_session_free(&fl->apps->channel[cid], fl->sctx);
+ if (fl->secsctx)
+ fastrpc_session_free(&fl->apps->channel[cid], fl->secsctx);
+bail:
+ mutex_destroy(&fl->map_mutex);
+ kfree(fl);
+ return 0;
+}
+
+static int fastrpc_device_release(struct inode *inode, struct file *file)
+{
+ struct fastrpc_file *fl = (struct fastrpc_file *)file->private_data;
+
+ if (fl) {
+ if (fl->debugfs_file != NULL)
+ debugfs_remove(fl->debugfs_file);
+
+ fastrpc_file_free(fl);
+ file->private_data = NULL;
+ }
+ return 0;
+}
+
+static void fastrpc_link_state_handler(struct glink_link_state_cb_info *cb_info,
+ void *priv)
+{
+ struct fastrpc_apps *me = &gfa;
+ int cid = (int)((uintptr_t)priv);
+ struct fastrpc_glink_info *link;
+
+ if (cid < 0 || cid >= NUM_CHANNELS)
+ return;
+
+ link = &me->channel[cid].link;
+ switch (cb_info->link_state) {
+ case GLINK_LINK_STATE_UP:
+ link->link_state = FASTRPC_LINK_STATE_UP;
+ complete(&me->channel[cid].work);
+ break;
+ case GLINK_LINK_STATE_DOWN:
+ link->link_state = FASTRPC_LINK_STATE_DOWN;
+ break;
+ default:
+ pr_err("adsprpc: unknown link state %d\n", cb_info->link_state);
+ break;
+ }
+}
+
+static int fastrpc_glink_register(int cid, struct fastrpc_apps *me)
+{
+ int err = 0;
+ struct fastrpc_glink_info *link;
+
+ VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
+ if (err)
+ goto bail;
+
+ link = &me->channel[cid].link;
+ if (link->link_notify_handle != NULL)
+ goto bail;
+
+ link->link_info.glink_link_state_notif_cb = fastrpc_link_state_handler;
+ link->link_notify_handle = glink_register_link_state_cb(
+ &link->link_info,
+ (void *)((uintptr_t)cid));
+ VERIFY(err, !IS_ERR_OR_NULL(me->channel[cid].link.link_notify_handle));
+ if (err) {
+ link->link_notify_handle = NULL;
+ goto bail;
+ }
+ VERIFY(err, wait_for_completion_timeout(&me->channel[cid].work,
+ RPC_TIMEOUT));
+bail:
+ return err;
+}
+
+static void fastrpc_glink_close(void *chan, int cid)
+{
+ int err = 0;
+ struct fastrpc_glink_info *link;
+
+ VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
+ if (err)
+ return;
+ link = &gfa.channel[cid].link;
+
+ if (link->port_state == FASTRPC_LINK_CONNECTED) {
+ link->port_state = FASTRPC_LINK_DISCONNECTING;
+ glink_close(chan);
+ }
+}
+
+static int fastrpc_glink_open(int cid)
+{
+ int err = 0;
+ void *handle = NULL;
+ struct fastrpc_apps *me = &gfa;
+ struct glink_open_config *cfg;
+ struct fastrpc_glink_info *link;
+
+ VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
+ if (err)
+ goto bail;
+ link = &me->channel[cid].link;
+ cfg = &me->channel[cid].link.cfg;
+ VERIFY(err, (link->link_state == FASTRPC_LINK_STATE_UP));
+ if (err)
+ goto bail;
+
+ VERIFY(err, (link->port_state == FASTRPC_LINK_DISCONNECTED));
+ if (err)
+ goto bail;
+
+ link->port_state = FASTRPC_LINK_CONNECTING;
+ cfg->priv = (void *)(uintptr_t)cid;
+ cfg->edge = gcinfo[cid].link.link_info.edge;
+ cfg->transport = gcinfo[cid].link.link_info.transport;
+ cfg->name = FASTRPC_GLINK_GUID;
+ cfg->notify_rx = fastrpc_glink_notify_rx;
+ cfg->notify_tx_done = fastrpc_glink_notify_tx_done;
+ cfg->notify_state = fastrpc_glink_notify_state;
+ cfg->notify_rx_intent_req = fastrpc_glink_notify_rx_intent_req;
+ handle = glink_open(cfg);
+ VERIFY(err, !IS_ERR_OR_NULL(handle));
+ if (err)
+ goto bail;
+ me->channel[cid].chan = handle;
+bail:
+ return err;
+}
+
+static int fastrpc_debugfs_open(struct inode *inode, struct file *filp)
+{
+ filp->private_data = inode->i_private;
+ return 0;
+}
+
+static ssize_t fastrpc_debugfs_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *position)
+{
+ struct fastrpc_file *fl = filp->private_data;
+ struct hlist_node *n;
+ struct fastrpc_buf *buf = NULL;
+ struct fastrpc_mmap *map = NULL;
+ struct smq_invoke_ctx *ictx = NULL;
+ struct fastrpc_channel_ctx *chan;
+ struct fastrpc_session_ctx *sess;
+ unsigned int len = 0;
+ int i, j, ret = 0;
+ char *fileinfo = NULL;
+
+ fileinfo = kzalloc(DEBUGFS_SIZE, GFP_KERNEL);
+ if (!fileinfo)
+ goto bail;
+ if (fl == NULL) {
+ for (i = 0; i < NUM_CHANNELS; i++) {
+ chan = &gcinfo[i];
+ len += scnprintf(fileinfo + len,
+ DEBUGFS_SIZE - len, "%s\n\n",
+ chan->name);
+ len += scnprintf(fileinfo + len,
+ DEBUGFS_SIZE - len, "%s %d\n",
+ "sesscount:", chan->sesscount);
+ for (j = 0; j < chan->sesscount; j++) {
+ sess = &chan->session[j];
+ len += scnprintf(fileinfo + len,
+ DEBUGFS_SIZE - len,
+ "%s%d\n\n", "SESSION", j);
+ len += scnprintf(fileinfo + len,
+ DEBUGFS_SIZE - len,
+ "%s %d\n", "sid:",
+ sess->smmu.cb);
+ len += scnprintf(fileinfo + len,
+ DEBUGFS_SIZE - len,
+ "%s %d\n", "SECURE:",
+ sess->smmu.secure);
+ }
+ }
+ } else {
+ len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
+ "%s %d\n\n",
+ "PROCESS_ID:", fl->tgid);
+ len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
+ "%s %d\n\n",
+ "CHANNEL_ID:", fl->cid);
+ len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
+ "%s %d\n\n",
+ "SSRCOUNT:", fl->ssrcount);
+ len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
+ "%s\n",
+ "LIST OF BUFS:");
+ spin_lock(&fl->hlock);
+ hlist_for_each_entry_safe(buf, n, &fl->bufs, hn) {
+ len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
+ "%s %pK %s %pK %s %llx\n", "buf:",
+ buf, "buf->virt:", buf->virt,
+ "buf->phys:", buf->phys);
+ }
+ len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
+ "\n%s\n",
+ "LIST OF MAPS:");
+ hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
+ len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
+ "%s %pK %s %lx %s %llx\n",
+ "map:", map,
+ "map->va:", map->va,
+ "map->phys:", map->phys);
+ }
+ len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
+ "\n%s\n",
+ "LIST OF PENDING SMQCONTEXTS:");
+ hlist_for_each_entry_safe(ictx, n, &fl->clst.pending, hn) {
+ len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
+ "%s %pK %s %u %s %u %s %u\n",
+ "smqcontext:", ictx,
+ "sc:", ictx->sc,
+ "tid:", ictx->pid,
+ "handle", ictx->rpra->h);
+ }
+ len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
+ "\n%s\n",
+ "LIST OF INTERRUPTED SMQCONTEXTS:");
+ hlist_for_each_entry_safe(ictx, n, &fl->clst.interrupted, hn) {
+ len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
+ "%s %pK %s %u %s %u %s %u\n",
+ "smqcontext:", ictx,
+ "sc:", ictx->sc,
+ "tid:", ictx->pid,
+ "handle", ictx->rpra->h);
+ }
+ spin_unlock(&fl->hlock);
+ }
+ if (len > DEBUGFS_SIZE)
+ len = DEBUGFS_SIZE;
+ ret = simple_read_from_buffer(buffer, count, position, fileinfo, len);
+ kfree(fileinfo);
+bail:
+ return ret;
+}
+
+static const struct file_operations debugfs_fops = {
+ .open = fastrpc_debugfs_open,
+ .read = fastrpc_debugfs_read,
+};
+static int fastrpc_channel_open(struct fastrpc_file *fl)
+{
+ struct fastrpc_apps *me = &gfa;
+ int cid, err = 0;
+
+ mutex_lock(&me->smd_mutex);
+
+ VERIFY(err, fl && fl->sctx);
+ if (err)
+ goto bail;
+ cid = fl->cid;
+ VERIFY(err, cid >= 0 && cid < NUM_CHANNELS);
+ if (err)
+ goto bail;
+ if (me->channel[cid].ssrcount !=
+ me->channel[cid].prevssrcount) {
+ if (!me->channel[cid].issubsystemup) {
+ VERIFY(err, 0);
+ if (err)
+ goto bail;
+ }
+ }
+ fl->ssrcount = me->channel[cid].ssrcount;
+ if ((kref_get_unless_zero(&me->channel[cid].kref) == 0) ||
+ (me->channel[cid].chan == NULL)) {
+ if (me->glink) {
+ VERIFY(err, 0 == fastrpc_glink_register(cid, me));
+ if (err)
+ goto bail;
+ VERIFY(err, 0 == fastrpc_glink_open(cid));
+ } else {
+ VERIFY(err, !smd_named_open_on_edge(FASTRPC_SMD_GUID,
+ gcinfo[cid].channel,
+ (smd_channel_t **)&me->channel[cid].chan,
+ (void *)(uintptr_t)cid,
+ smd_event_handler));
+ }
+ if (err)
+ goto bail;
+
+ VERIFY(err,
+ wait_for_completion_timeout(&me->channel[cid].workport,
+ RPC_TIMEOUT));
+ if (err) {
+ me->channel[cid].chan = NULL;
+ goto bail;
+ }
+ kref_init(&me->channel[cid].kref);
+ pr_info("'opened /dev/%s c %d %d'\n", gcinfo[cid].name,
+ MAJOR(me->dev_no), cid);
+
+ if (me->glink) {
+ err = glink_queue_rx_intent(me->channel[cid].chan,
+ NULL, 16);
+ err |= glink_queue_rx_intent(me->channel[cid].chan,
+ NULL, 64);
+ if (err)
+ pr_warn("adsprpc: intent fail for %d err %d\n",
+ cid, err);
+ }
+ if (cid == 0 && me->channel[cid].ssrcount !=
+ me->channel[cid].prevssrcount) {
+ if (fastrpc_mmap_remove_ssr(fl))
+ pr_err("ADSPRPC: SSR: Failed to unmap remote heap\n");
+ me->channel[cid].prevssrcount =
+ me->channel[cid].ssrcount;
+ }
+ }
+
+bail:
+ mutex_unlock(&me->smd_mutex);
+ return err;
+}
+
+static int fastrpc_device_open(struct inode *inode, struct file *filp)
+{
+ int err = 0;
+ struct dentry *debugfs_file;
+ struct fastrpc_file *fl = NULL;
+ struct fastrpc_apps *me = &gfa;
+
+ VERIFY(err, NULL != (fl = kzalloc(sizeof(*fl), GFP_KERNEL)));
+ if (err)
+ return err;
+ debugfs_file = debugfs_create_file(current->comm, 0644, debugfs_root,
+ fl, &debugfs_fops);
+ context_list_ctor(&fl->clst);
+ spin_lock_init(&fl->hlock);
+ INIT_HLIST_HEAD(&fl->maps);
+ INIT_HLIST_HEAD(&fl->bufs);
+ INIT_HLIST_NODE(&fl->hn);
+ fl->tgid = current->tgid;
+ fl->apps = me;
+ fl->mode = FASTRPC_MODE_SERIAL;
+ fl->cid = -1;
+ if (debugfs_file != NULL)
+ fl->debugfs_file = debugfs_file;
+ memset(&fl->perf, 0, sizeof(fl->perf));
+ filp->private_data = fl;
+ mutex_init(&fl->map_mutex);
+ spin_lock(&me->hlock);
+ hlist_add_head(&fl->hn, &me->drivers);
+ spin_unlock(&me->hlock);
+ return 0;
+}
+
+static int fastrpc_get_info(struct fastrpc_file *fl, uint32_t *info)
+{
+ int err = 0;
+ uint32_t cid;
+
+ VERIFY(err, fl != NULL);
+ if (err)
+ goto bail;
+ if (fl->cid == -1) {
+ cid = *info;
+ VERIFY(err, cid < NUM_CHANNELS);
+ if (err)
+ goto bail;
+ fl->cid = cid;
+ fl->ssrcount = fl->apps->channel[cid].ssrcount;
+ VERIFY(err, !fastrpc_session_alloc_locked(
+ &fl->apps->channel[cid], 0, &fl->sctx));
+ if (err)
+ goto bail;
+ }
+ if (fl->sctx)
+ *info = (fl->sctx->smmu.enabled ? 1 : 0);
+bail:
+ return err;
+}
+
+static long fastrpc_device_ioctl(struct file *file, unsigned int ioctl_num,
+ unsigned long ioctl_param)
+{
+ union {
+ struct fastrpc_ioctl_invoke_attrs inv;
+ struct fastrpc_ioctl_mmap mmap;
+ struct fastrpc_ioctl_munmap munmap;
+ struct fastrpc_ioctl_init_attrs init;
+ struct fastrpc_ioctl_perf perf;
+ } p;
+ void *param = (char *)ioctl_param;
+ struct fastrpc_file *fl = (struct fastrpc_file *)file->private_data;
+ int size = 0, err = 0;
+ uint32_t info;
+
+ p.inv.fds = NULL;
+ p.inv.attrs = NULL;
+ spin_lock(&fl->hlock);
+ if (fl->file_close == 1) {
+ err = EBADF;
+ pr_warn("ADSPRPC: fastrpc_device_release is happening, So not sending any new requests to DSP");
+ spin_unlock(&fl->hlock);
+ goto bail;
+ }
+ spin_unlock(&fl->hlock);
+
+ switch (ioctl_num) {
+ case FASTRPC_IOCTL_INVOKE:
+ size = sizeof(struct fastrpc_ioctl_invoke);
+ case FASTRPC_IOCTL_INVOKE_FD:
+ if (!size)
+ size = sizeof(struct fastrpc_ioctl_invoke_fd);
+ /* fall through */
+ case FASTRPC_IOCTL_INVOKE_ATTRS:
+ if (!size)
+ size = sizeof(struct fastrpc_ioctl_invoke_attrs);
+ K_COPY_FROM_USER(err, 0, &p.inv, param, size);
+ if (err)
+ goto bail;
+ VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl, fl->mode,
+ 0, &p.inv)));
+ if (err)
+ goto bail;
+ break;
+ case FASTRPC_IOCTL_MMAP:
+ K_COPY_FROM_USER(err, 0, &p.mmap, param,
+ sizeof(p.mmap));
+ if (err)
+ goto bail;
+ VERIFY(err, 0 == (err = fastrpc_internal_mmap(fl, &p.mmap)));
+ if (err)
+ goto bail;
+ K_COPY_TO_USER(err, 0, param, &p.mmap, sizeof(p.mmap));
+ if (err)
+ goto bail;
+ break;
+ case FASTRPC_IOCTL_MUNMAP:
+ K_COPY_FROM_USER(err, 0, &p.munmap, param,
+ sizeof(p.munmap));
+ if (err)
+ goto bail;
+ VERIFY(err, 0 == (err = fastrpc_internal_munmap(fl,
+ &p.munmap)));
+ if (err)
+ goto bail;
+ break;
+ case FASTRPC_IOCTL_MMAP_64:
+ K_COPY_FROM_USER(err, 0, &p.mmap, param,
+ sizeof(p.mmap));
+ if (err)
+ goto bail;
+ VERIFY(err, 0 == (err = fastrpc_internal_mmap(fl, &p.mmap)));
+ if (err)
+ goto bail;
+ K_COPY_TO_USER(err, 0, param, &p.mmap, sizeof(p.mmap));
+ if (err)
+ goto bail;
+ break;
+ case FASTRPC_IOCTL_MUNMAP_64:
+ K_COPY_FROM_USER(err, 0, &p.munmap, param,
+ sizeof(p.munmap));
+ if (err)
+ goto bail;
+ VERIFY(err, 0 == (err = fastrpc_internal_munmap(fl,
+ &p.munmap)));
+ if (err)
+ goto bail;
+ break;
+ case FASTRPC_IOCTL_SETMODE:
+ switch ((uint32_t)ioctl_param) {
+ case FASTRPC_MODE_PARALLEL:
+ case FASTRPC_MODE_SERIAL:
+ fl->mode = (uint32_t)ioctl_param;
+ break;
+ case FASTRPC_MODE_PROFILE:
+ fl->profile = (uint32_t)ioctl_param;
+ break;
+ default:
+ err = -ENOTTY;
+ break;
+ }
+ break;
+ case FASTRPC_IOCTL_GETPERF:
+ K_COPY_FROM_USER(err, 0, &p.perf,
+ param, sizeof(p.perf));
+ if (err)
+ goto bail;
+ p.perf.numkeys = sizeof(struct fastrpc_perf)/sizeof(int64_t);
+ if (p.perf.keys) {
+ char *keys = PERF_KEYS;
+
+ K_COPY_TO_USER(err, 0, (void *)p.perf.keys,
+ keys, strlen(keys)+1);
+ if (err)
+ goto bail;
+ }
+ if (p.perf.data) {
+ K_COPY_TO_USER(err, 0, (void *)p.perf.data,
+ &fl->perf, sizeof(fl->perf));
+ }
+ K_COPY_TO_USER(err, 0, param, &p.perf, sizeof(p.perf));
+ if (err)
+ goto bail;
+ break;
+ case FASTRPC_IOCTL_GETINFO:
+ K_COPY_FROM_USER(err, 0, &info, param, sizeof(info));
+ if (err)
+ goto bail;
+ VERIFY(err, 0 == (err = fastrpc_get_info(fl, &info)));
+ if (err)
+ goto bail;
+ K_COPY_TO_USER(err, 0, param, &info, sizeof(info));
+ if (err)
+ goto bail;
+ break;
+ case FASTRPC_IOCTL_INIT:
+ p.init.attrs = 0;
+ p.init.siglen = 0;
+ size = sizeof(struct fastrpc_ioctl_init);
+ /* fall through */
+ case FASTRPC_IOCTL_INIT_ATTRS:
+ if (!size)
+ size = sizeof(struct fastrpc_ioctl_init_attrs);
+ K_COPY_FROM_USER(err, 0, &p.init, param, size);
+ if (err)
+ goto bail;
+ VERIFY(err, p.init.init.filelen >= 0 &&
+ p.init.init.memlen >= 0);
+ if (err)
+ goto bail;
+ VERIFY(err, 0 == fastrpc_init_process(fl, &p.init));
+ if (err)
+ goto bail;
+ break;
+
+ default:
+ err = -ENOTTY;
+ pr_info("bad ioctl: %d\n", ioctl_num);
+ break;
+ }
+ bail:
+ return err;
+}
+
+static int fastrpc_restart_notifier_cb(struct notifier_block *nb,
+ unsigned long code,
+ void *data)
+{
+ struct fastrpc_apps *me = &gfa;
+ struct fastrpc_channel_ctx *ctx;
+ struct notif_data *notifdata = data;
+ int cid;
+
+ ctx = container_of(nb, struct fastrpc_channel_ctx, nb);
+ cid = ctx - &me->channel[0];
+ if (code == SUBSYS_BEFORE_SHUTDOWN) {
+ mutex_lock(&me->smd_mutex);
+ ctx->ssrcount++;
+ ctx->issubsystemup = 0;
+ if (ctx->chan) {
+ if (me->glink)
+ fastrpc_glink_close(ctx->chan, cid);
+ else
+ smd_close(ctx->chan);
+
+ ctx->chan = 0;
+ pr_info("'restart notifier: closed /dev/%s c %d %d'\n",
+ gcinfo[cid].name, MAJOR(me->dev_no), cid);
+ }
+ mutex_unlock(&me->smd_mutex);
+ if (cid == 0)
+ me->staticpd_flags = 0;
+ fastrpc_notify_drivers(me, cid);
+ } else if (code == SUBSYS_RAMDUMP_NOTIFICATION) {
+ if (me->channel[0].remoteheap_ramdump_dev &&
+ notifdata->enable_ramdump) {
+ me->channel[0].ramdumpenabled = 1;
+ }
+ } else if (code == SUBSYS_AFTER_POWERUP) {
+ ctx->issubsystemup = 1;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static int fastrpc_smmu_fault_handler(struct iommu_domain *domain,
+ struct device *dev, unsigned long iova, int flags, void *token)
+{
+ struct fastrpc_session_ctx *sess = (struct fastrpc_session_ctx *)token;
+ int err = 0;
+
+ VERIFY(err, sess != NULL);
+ if (err)
+ return err;
+ sess->smmu.faults++;
+ dev_err(dev, "ADSPRPC context fault: iova=0x%08lx, cb = %d, faults=%d",
+ iova, sess->smmu.cb, sess->smmu.faults);
+ return 0;
+}
+
+static const struct file_operations fops = {
+ .open = fastrpc_device_open,
+ .release = fastrpc_device_release,
+ .unlocked_ioctl = fastrpc_device_ioctl,
+ .compat_ioctl = compat_fastrpc_device_ioctl,
+};
+
+static struct of_device_id fastrpc_match_table[] = {
+ { .compatible = "qcom,msm-fastrpc-adsp", },
+ { .compatible = "qcom,msm-fastrpc-compute-cb", },
+ { .compatible = "qcom,msm-fastrpc-legacy-compute-cb", },
+ { .compatible = "qcom,msm-adsprpc-mem-region", },
+ {}
+};
+
+static int fastrpc_cb_probe(struct device *dev)
+{
+ struct fastrpc_channel_ctx *chan;
+ struct fastrpc_session_ctx *sess;
+ struct of_phandle_args iommuspec;
+ const char *name;
+ unsigned int start = 0x80000000;
+ int err = 0, i;
+ int secure_vmid = VMID_CP_PIXEL;
+
+ VERIFY(err, NULL != (name = of_get_property(dev->of_node,
+ "label", NULL)));
+ if (err)
+ goto bail;
+ for (i = 0; i < NUM_CHANNELS; i++) {
+ if (!gcinfo[i].name)
+ continue;
+ if (!strcmp(name, gcinfo[i].name))
+ break;
+ }
+ VERIFY(err, i < NUM_CHANNELS);
+ if (err)
+ goto bail;
+ chan = &gcinfo[i];
+ VERIFY(err, chan->sesscount < NUM_SESSIONS);
+ if (err)
+ goto bail;
+
+ VERIFY(err, !of_parse_phandle_with_args(dev->of_node, "iommus",
+ "#iommu-cells", 0, &iommuspec));
+ if (err)
+ goto bail;
+ sess = &chan->session[chan->sesscount];
+ sess->smmu.cb = iommuspec.args[0];
+ sess->used = 0;
+ sess->smmu.coherent = of_property_read_bool(dev->of_node,
+ "dma-coherent");
+ sess->smmu.secure = of_property_read_bool(dev->of_node,
+ "qcom,secure-context-bank");
+ if (sess->smmu.secure)
+ start = 0x60000000;
+ VERIFY(err, !IS_ERR_OR_NULL(sess->smmu.mapping =
+ arm_iommu_create_mapping(&platform_bus_type,
+ start, 0x70000000)));
+ if (err)
+ goto bail;
+ iommu_set_fault_handler(sess->smmu.mapping->domain,
+ fastrpc_smmu_fault_handler, sess);
+ if (sess->smmu.secure)
+ iommu_domain_set_attr(sess->smmu.mapping->domain,
+ DOMAIN_ATTR_SECURE_VMID,
+ &secure_vmid);
+
+ VERIFY(err, !arm_iommu_attach_device(dev, sess->smmu.mapping));
+ if (err)
+ goto bail;
+ sess->smmu.dev = dev;
+ sess->smmu.enabled = 1;
+ chan->sesscount++;
+ debugfs_global_file = debugfs_create_file("global", 0644, debugfs_root,
+ NULL, &debugfs_fops);
+
+bail:
+ return err;
+}
+
+static int fastrpc_cb_legacy_probe(struct device *dev)
+{
+ struct device_node *domains_child_node = NULL;
+ struct device_node *ctx_node = NULL;
+ struct fastrpc_channel_ctx *chan;
+ struct fastrpc_session_ctx *first_sess, *sess;
+ const char *name;
+ unsigned int *range = NULL, range_size = 0;
+ unsigned int *sids = NULL, sids_size = 0;
+ int err = 0, ret = 0, i;
+
+ VERIFY(err, 0 != (domains_child_node = of_get_child_by_name(
+ dev->of_node,
+ "qcom,msm_fastrpc_compute_cb")));
+ if (err)
+ goto bail;
+ VERIFY(err, 0 != (ctx_node = of_parse_phandle(
+ domains_child_node,
+ "qcom,adsp-shared-phandle", 0)));
+ if (err)
+ goto bail;
+ VERIFY(err, 0 != of_get_property(domains_child_node,
+ "qcom,adsp-shared-sids", &sids_size));
+ if (err)
+ goto bail;
+ VERIFY(err, sids = kzalloc(sids_size, GFP_KERNEL));
+ if (err)
+ goto bail;
+ ret = of_property_read_u32_array(domains_child_node,
+ "qcom,adsp-shared-sids",
+ sids,
+ sids_size/sizeof(unsigned int));
+ if (ret)
+ goto bail;
+ VERIFY(err, 0 != (name = of_get_property(ctx_node, "label", NULL)));
+ if (err)
+ goto bail;
+ VERIFY(err, 0 != of_get_property(domains_child_node,
+ "qcom,virtual-addr-pool", &range_size));
+ if (err)
+ goto bail;
+ VERIFY(err, range = kzalloc(range_size, GFP_KERNEL));
+ if (err)
+ goto bail;
+ ret = of_property_read_u32_array(domains_child_node,
+ "qcom,virtual-addr-pool",
+ range,
+ range_size/sizeof(unsigned int));
+ if (ret)
+ goto bail;
+
+ chan = &gcinfo[0];
+ VERIFY(err, chan->sesscount < NUM_SESSIONS);
+ if (err)
+ goto bail;
+ first_sess = &chan->session[chan->sesscount];
+ first_sess->smmu.dev = msm_iommu_get_ctx(name);
+ VERIFY(err, !IS_ERR_OR_NULL(first_sess->smmu.mapping =
+ arm_iommu_create_mapping(
+ msm_iommu_get_bus(first_sess->smmu.dev),
+ range[0], range[1])));
+ if (err)
+ goto bail;
+ VERIFY(err, !arm_iommu_attach_device(first_sess->dev,
+ first_sess->smmu.mapping));
+ if (err)
+ goto bail;
+ for (i = 0; i < sids_size/sizeof(unsigned int); i++) {
+ VERIFY(err, chan->sesscount < NUM_SESSIONS);
+ if (err)
+ goto bail;
+ sess = &chan->session[chan->sesscount];
+ sess->smmu.cb = sids[i];
+ sess->smmu.dev = first_sess->smmu.dev;
+ sess->smmu.enabled = 1;
+ sess->smmu.mapping = first_sess->smmu.mapping;
+ chan->sesscount++;
+ }
+bail:
+ kfree(sids);
+ kfree(range);
+ return err;
+}
+
+static int fastrpc_probe(struct platform_device *pdev)
+{
+ int err = 0;
+ struct fastrpc_apps *me = &gfa;
+ struct device *dev = &pdev->dev;
+
+ if (of_device_is_compatible(dev->of_node,
+ "qcom,msm-fastrpc-compute-cb"))
+ return fastrpc_cb_probe(dev);
+
+ if (of_device_is_compatible(dev->of_node,
+ "qcom,msm-fastrpc-legacy-compute-cb"))
+ return fastrpc_cb_legacy_probe(dev);
+
+ if (of_device_is_compatible(dev->of_node,
+ "qcom,msm-adsprpc-mem-region")) {
+ me->dev = dev;
+ me->channel[0].remoteheap_ramdump_dev =
+ create_ramdump_device("adsp_rh", dev);
+ if (IS_ERR_OR_NULL(me->channel[0].remoteheap_ramdump_dev)) {
+ pr_err("ADSPRPC: Unable to create adsp-remoteheap ramdump device.\n");
+ me->channel[0].remoteheap_ramdump_dev = NULL;
+ }
+ return 0;
+ }
+ if (of_property_read_bool(dev->of_node,
+ "qcom,fastrpc-vmid-heap-shared"))
+ gcinfo[0].heap_vmid = AC_VM_ADSP_HEAP_SHARED;
+ else
+ gcinfo[0].heap_vmid = VMID_ADSP_Q6;
+ pr_info("ADSPRPC: gcinfo[0].heap_vmid %d\n", gcinfo[0].heap_vmid);
+ me->glink = of_property_read_bool(dev->of_node, "qcom,fastrpc-glink");
+ VERIFY(err, !of_platform_populate(pdev->dev.of_node,
+ fastrpc_match_table,
+ NULL, &pdev->dev));
+ if (err)
+ goto bail;
+bail:
+ return err;
+}
+
+static void fastrpc_deinit(void)
+{
+ struct fastrpc_apps *me = &gfa;
+ struct fastrpc_channel_ctx *chan = gcinfo;
+ int i, j;
+
+ for (i = 0; i < NUM_CHANNELS; i++, chan++) {
+ if (chan->chan) {
+ kref_put_mutex(&chan->kref,
+ fastrpc_channel_close, &me->smd_mutex);
+ chan->chan = NULL;
+ }
+ for (j = 0; j < NUM_SESSIONS; j++) {
+ struct fastrpc_session_ctx *sess = &chan->session[j];
+ if (sess->smmu.dev) {
+ arm_iommu_detach_device(sess->smmu.dev);
+ sess->smmu.dev = NULL;
+ }
+ if (sess->smmu.mapping) {
+ arm_iommu_release_mapping(sess->smmu.mapping);
+ sess->smmu.mapping = NULL;
+ }
+ }
+ }
+}
+
+static struct platform_driver fastrpc_driver = {
+ .probe = fastrpc_probe,
+ .driver = {
+ .name = "fastrpc",
+ .owner = THIS_MODULE,
+ .of_match_table = fastrpc_match_table,
+ },
+};
+
+static int __init fastrpc_device_init(void)
+{
+ struct fastrpc_apps *me = &gfa;
+ struct device *dev = NULL;
+ int err = 0, i;
+
+ memset(me, 0, sizeof(*me));
+
+ fastrpc_init(me);
+ me->dev = NULL;
+ VERIFY(err, 0 == platform_driver_register(&fastrpc_driver));
+ if (err)
+ goto register_bail;
+ VERIFY(err, 0 == alloc_chrdev_region(&me->dev_no, 0, NUM_CHANNELS,
+ DEVICE_NAME));
+ if (err)
+ goto alloc_chrdev_bail;
+ cdev_init(&me->cdev, &fops);
+ me->cdev.owner = THIS_MODULE;
+ VERIFY(err, 0 == cdev_add(&me->cdev, MKDEV(MAJOR(me->dev_no), 0),
+ 1));
+ if (err)
+ goto cdev_init_bail;
+ me->class = class_create(THIS_MODULE, "fastrpc");
+ VERIFY(err, !IS_ERR(me->class));
+ if (err)
+ goto class_create_bail;
+ me->compat = (NULL == fops.compat_ioctl) ? 0 : 1;
+ dev = device_create(me->class, NULL,
+ MKDEV(MAJOR(me->dev_no), 0),
+ NULL, gcinfo[0].name);
+ VERIFY(err, !IS_ERR_OR_NULL(dev));
+ if (err)
+ goto device_create_bail;
+ for (i = 0; i < NUM_CHANNELS; i++) {
+ me->channel[i].dev = dev;
+ me->channel[i].ssrcount = 0;
+ me->channel[i].prevssrcount = 0;
+ me->channel[i].issubsystemup = 1;
+ me->channel[i].ramdumpenabled = 0;
+ me->channel[i].remoteheap_ramdump_dev = NULL;
+ me->channel[i].nb.notifier_call = fastrpc_restart_notifier_cb;
+ me->channel[i].handle = subsys_notif_register_notifier(
+ gcinfo[i].subsys,
+ &me->channel[i].nb);
+ }
+
+ me->client = msm_ion_client_create(DEVICE_NAME);
+ VERIFY(err, !IS_ERR_OR_NULL(me->client));
+ if (err)
+ goto device_create_bail;
+ debugfs_root = debugfs_create_dir("adsprpc", NULL);
+ return 0;
+device_create_bail:
+ for (i = 0; i < NUM_CHANNELS; i++) {
+ if (me->channel[i].handle)
+ subsys_notif_unregister_notifier(me->channel[i].handle,
+ &me->channel[i].nb);
+ }
+ if (!IS_ERR_OR_NULL(dev))
+ device_destroy(me->class, MKDEV(MAJOR(me->dev_no), 0));
+ class_destroy(me->class);
+class_create_bail:
+ cdev_del(&me->cdev);
+cdev_init_bail:
+ unregister_chrdev_region(me->dev_no, NUM_CHANNELS);
+alloc_chrdev_bail:
+register_bail:
+ fastrpc_deinit();
+ return err;
+}
+
+static void __exit fastrpc_device_exit(void)
+{
+ struct fastrpc_apps *me = &gfa;
+ int i;
+
+ fastrpc_file_list_dtor(me);
+ fastrpc_deinit();
+ for (i = 0; i < NUM_CHANNELS; i++) {
+ if (!gcinfo[i].name)
+ continue;
+ device_destroy(me->class, MKDEV(MAJOR(me->dev_no), i));
+ subsys_notif_unregister_notifier(me->channel[i].handle,
+ &me->channel[i].nb);
+ }
+ class_destroy(me->class);
+ cdev_del(&me->cdev);
+ unregister_chrdev_region(me->dev_no, NUM_CHANNELS);
+ ion_client_destroy(me->client);
+ debugfs_remove_recursive(debugfs_root);
+}
+
+late_initcall(fastrpc_device_init);
+module_exit(fastrpc_device_exit);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/char/adsprpc_compat.c b/drivers/char/adsprpc_compat.c
new file mode 100644
index 000000000000..e1e061748f22
--- /dev/null
+++ b/drivers/char/adsprpc_compat.c
@@ -0,0 +1,543 @@
+/*
+ * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/compat.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/msm_ion.h>
+
+#include "adsprpc_compat.h"
+#include "adsprpc_shared.h"
+
+#define COMPAT_FASTRPC_IOCTL_INVOKE \
+ _IOWR('R', 1, struct compat_fastrpc_ioctl_invoke)
+#define COMPAT_FASTRPC_IOCTL_MMAP \
+ _IOWR('R', 2, struct compat_fastrpc_ioctl_mmap)
+#define COMPAT_FASTRPC_IOCTL_MUNMAP \
+ _IOWR('R', 3, struct compat_fastrpc_ioctl_munmap)
+#define COMPAT_FASTRPC_IOCTL_INVOKE_FD \
+ _IOWR('R', 4, struct compat_fastrpc_ioctl_invoke_fd)
+#define COMPAT_FASTRPC_IOCTL_INIT \
+ _IOWR('R', 6, struct compat_fastrpc_ioctl_init)
+#define COMPAT_FASTRPC_IOCTL_INVOKE_ATTRS \
+ _IOWR('R', 7, struct compat_fastrpc_ioctl_invoke_attrs)
+#define COMPAT_FASTRPC_IOCTL_GETPERF \
+ _IOWR('R', 9, struct compat_fastrpc_ioctl_perf)
+#define COMPAT_FASTRPC_IOCTL_INIT_ATTRS \
+ _IOWR('R', 10, struct compat_fastrpc_ioctl_init_attrs)
+#define COMPAT_FASTRPC_IOCTL_MMAP_64 \
+ _IOWR('R', 14, struct compat_fastrpc_ioctl_mmap_64)
+#define COMPAT_FASTRPC_IOCTL_MUNMAP_64 \
+ _IOWR('R', 15, struct compat_fastrpc_ioctl_munmap_64)
+
+
+struct compat_remote_buf {
+ compat_uptr_t pv; /* buffer pointer */
+ compat_size_t len; /* length of buffer */
+};
+
+union compat_remote_arg {
+ struct compat_remote_buf buf;
+ compat_uint_t h;
+};
+
+struct compat_fastrpc_ioctl_invoke {
+ compat_uint_t handle; /* remote handle */
+ compat_uint_t sc; /* scalars describing the data */
+ compat_uptr_t pra; /* remote arguments list */
+};
+
+struct compat_fastrpc_ioctl_invoke_fd {
+ struct compat_fastrpc_ioctl_invoke inv;
+ compat_uptr_t fds; /* fd list */
+};
+
+struct compat_fastrpc_ioctl_invoke_attrs {
+ struct compat_fastrpc_ioctl_invoke inv;
+ compat_uptr_t fds; /* fd list */
+ compat_uptr_t attrs; /* attribute list */
+};
+
+struct compat_fastrpc_ioctl_mmap {
+ compat_int_t fd; /* ion fd */
+ compat_uint_t flags; /* flags for dsp to map with */
+ compat_uptr_t vaddrin; /* optional virtual address */
+ compat_size_t size; /* size */
+ compat_uptr_t vaddrout; /* dsps virtual address */
+};
+
+struct compat_fastrpc_ioctl_mmap_64 {
+ compat_int_t fd; /* ion fd */
+ compat_uint_t flags; /* flags for dsp to map with */
+ compat_u64 vaddrin; /* optional virtual address */
+ compat_size_t size; /* size */
+ compat_u64 vaddrout; /* dsps virtual address */
+};
+
+struct compat_fastrpc_ioctl_munmap {
+ compat_uptr_t vaddrout; /* address to unmap */
+ compat_size_t size; /* size */
+};
+
+struct compat_fastrpc_ioctl_munmap_64 {
+ compat_u64 vaddrout; /* address to unmap */
+ compat_size_t size; /* size */
+};
+
+struct compat_fastrpc_ioctl_init {
+ compat_uint_t flags; /* one of FASTRPC_INIT_* macros */
+ compat_uptr_t file; /* pointer to elf file */
+ compat_int_t filelen; /* elf file length */
+ compat_int_t filefd; /* ION fd for the file */
+ compat_uptr_t mem; /* mem for the PD */
+ compat_int_t memlen; /* mem length */
+ compat_int_t memfd; /* ION fd for the mem */
+};
+
+struct compat_fastrpc_ioctl_init_attrs {
+ struct compat_fastrpc_ioctl_init init;
+ compat_int_t attrs; /* attributes to init process */
+ compat_int_t siglen; /* test signature file length */
+};
+
+struct compat_fastrpc_ioctl_perf { /* kernel performance data */
+ compat_uptr_t data;
+ compat_int_t numkeys;
+ compat_uptr_t keys;
+};
+
+static int compat_get_fastrpc_ioctl_invoke(
+ struct compat_fastrpc_ioctl_invoke_attrs __user *inv32,
+ struct fastrpc_ioctl_invoke_attrs __user **inva,
+ unsigned int cmd)
+{
+ compat_uint_t u, sc;
+ compat_size_t s;
+ compat_uptr_t p;
+ struct fastrpc_ioctl_invoke_attrs *inv;
+ union compat_remote_arg *pra32;
+ union remote_arg *pra;
+ int err, len, num, j;
+
+ err = get_user(sc, &inv32->inv.sc);
+ if (err)
+ return err;
+
+ len = REMOTE_SCALARS_LENGTH(sc);
+ VERIFY(err, NULL != (inv = compat_alloc_user_space(
+ sizeof(*inv) + len * sizeof(*pra))));
+ if (err)
+ return -EFAULT;
+
+ pra = (union remote_arg *)(inv + 1);
+ err = put_user(pra, &inv->inv.pra);
+ err |= put_user(sc, &inv->inv.sc);
+ err |= get_user(u, &inv32->inv.handle);
+ err |= put_user(u, &inv->inv.handle);
+ err |= get_user(p, &inv32->inv.pra);
+ if (err)
+ return err;
+
+ pra32 = compat_ptr(p);
+ pra = (union remote_arg *)(inv + 1);
+ num = REMOTE_SCALARS_INBUFS(sc) + REMOTE_SCALARS_OUTBUFS(sc);
+ for (j = 0; j < num; j++) {
+ err |= get_user(p, &pra32[j].buf.pv);
+ err |= put_user(p, (uintptr_t *)&pra[j].buf.pv);
+ err |= get_user(s, &pra32[j].buf.len);
+ err |= put_user(s, &pra[j].buf.len);
+ }
+ for (j = 0; j < REMOTE_SCALARS_INHANDLES(sc); j++) {
+ err |= get_user(u, &pra32[num + j].h);
+ err |= put_user(u, &pra[num + j].h);
+ }
+
+ err |= put_user(NULL, &inv->fds);
+ if (cmd != COMPAT_FASTRPC_IOCTL_INVOKE) {
+ err |= get_user(p, &inv32->fds);
+ err |= put_user(p, (compat_uptr_t *)&inv->fds);
+ }
+ err |= put_user(NULL, &inv->attrs);
+ if (cmd == COMPAT_FASTRPC_IOCTL_INVOKE_ATTRS) {
+ err |= get_user(p, &inv32->attrs);
+ err |= put_user(p, (compat_uptr_t *)&inv->attrs);
+ }
+
+ *inva = inv;
+ return err;
+}
+
+static int compat_put_fastrpc_ioctl_invoke(
+ struct compat_fastrpc_ioctl_invoke_attrs __user *inv32,
+ struct fastrpc_ioctl_invoke_attrs __user *inv)
+{
+ compat_uptr_t p;
+ compat_uint_t u, h;
+ union compat_remote_arg *pra32;
+ union remote_arg *pra;
+ int err, i, num;
+
+ err = get_user(u, &inv32->inv.sc);
+ err |= get_user(p, &inv32->inv.pra);
+ if (err)
+ return err;
+
+ pra32 = compat_ptr(p);
+ pra = (union remote_arg *)(inv + 1);
+ num = REMOTE_SCALARS_INBUFS(u) + REMOTE_SCALARS_OUTBUFS(u)
+ + REMOTE_SCALARS_INHANDLES(u);
+ for (i = 0; i < REMOTE_SCALARS_OUTHANDLES(u); i++) {
+ err |= get_user(h, &pra[num + i].h);
+ err |= put_user(h, &pra32[num + i].h);
+ }
+
+ return err;
+}
+
+static int compat_get_fastrpc_ioctl_mmap(
+ struct compat_fastrpc_ioctl_mmap __user *map32,
+ struct fastrpc_ioctl_mmap __user *map)
+{
+ compat_uint_t u;
+ compat_int_t i;
+ compat_size_t s;
+ compat_uptr_t p;
+ int err;
+
+ err = get_user(i, &map32->fd);
+ err |= put_user(i, &map->fd);
+ err |= get_user(u, &map32->flags);
+ err |= put_user(u, &map->flags);
+ err |= get_user(p, &map32->vaddrin);
+ err |= put_user(p, (uintptr_t *)&map->vaddrin);
+ err |= get_user(s, &map32->size);
+ err |= put_user(s, &map->size);
+
+ return err;
+}
+
+static int compat_get_fastrpc_ioctl_mmap_64(
+ struct compat_fastrpc_ioctl_mmap_64 __user *map32,
+ struct fastrpc_ioctl_mmap __user *map)
+{
+ compat_uint_t u;
+ compat_int_t i;
+ compat_size_t s;
+ compat_u64 p;
+ int err;
+
+ err = get_user(i, &map32->fd);
+ err |= put_user(i, &map->fd);
+ err |= get_user(u, &map32->flags);
+ err |= put_user(u, &map->flags);
+ err |= get_user(p, &map32->vaddrin);
+ err |= put_user(p, &map->vaddrin);
+ err |= get_user(s, &map32->size);
+ err |= put_user(s, &map->size);
+
+ return err;
+}
+
+static int compat_put_fastrpc_ioctl_mmap(
+ struct compat_fastrpc_ioctl_mmap __user *map32,
+ struct fastrpc_ioctl_mmap __user *map)
+{
+ compat_uptr_t p;
+ int err;
+
+ err = get_user(p, &map->vaddrout);
+ err |= put_user(p, &map32->vaddrout);
+
+ return err;
+}
+
+static int compat_put_fastrpc_ioctl_mmap_64(
+ struct compat_fastrpc_ioctl_mmap_64 __user *map32,
+ struct fastrpc_ioctl_mmap __user *map)
+{
+ compat_u64 p;
+ int err;
+
+ err = get_user(p, &map->vaddrout);
+ err |= put_user(p, &map32->vaddrout);
+
+ return err;
+}
+
+static int compat_get_fastrpc_ioctl_munmap(
+ struct compat_fastrpc_ioctl_munmap __user *unmap32,
+ struct fastrpc_ioctl_munmap __user *unmap)
+{
+ compat_uptr_t p;
+ compat_size_t s;
+ int err;
+
+ err = get_user(p, &unmap32->vaddrout);
+ err |= put_user(p, &unmap->vaddrout);
+ err |= get_user(s, &unmap32->size);
+ err |= put_user(s, &unmap->size);
+
+ return err;
+}
+
+static int compat_get_fastrpc_ioctl_munmap_64(
+ struct compat_fastrpc_ioctl_munmap_64 __user *unmap32,
+ struct fastrpc_ioctl_munmap __user *unmap)
+{
+ compat_u64 p;
+ compat_size_t s;
+ int err;
+
+ err = get_user(p, &unmap32->vaddrout);
+ err |= put_user(p, &unmap->vaddrout);
+ err |= get_user(s, &unmap32->size);
+ err |= put_user(s, &unmap->size);
+
+ return err;
+}
+
+static int compat_get_fastrpc_ioctl_perf(
+ struct compat_fastrpc_ioctl_perf __user *perf32,
+ struct fastrpc_ioctl_perf __user *perf)
+{
+ compat_uptr_t p;
+ int err;
+
+ err = get_user(p, &perf32->data);
+ err |= put_user(p, &perf->data);
+ err |= get_user(p, &perf32->keys);
+ err |= put_user(p, &perf->keys);
+
+ return err;
+}
+
+static int compat_get_fastrpc_ioctl_init(
+ struct compat_fastrpc_ioctl_init_attrs __user *init32,
+ struct fastrpc_ioctl_init_attrs __user *init,
+ unsigned int cmd)
+{
+ compat_uint_t u;
+ compat_uptr_t p;
+ compat_int_t i;
+ int err;
+
+ err = get_user(u, &init32->init.flags);
+ err |= put_user(u, &init->init.flags);
+ err |= get_user(p, &init32->init.file);
+ err |= put_user(p, &init->init.file);
+ err |= get_user(i, &init32->init.filelen);
+ err |= put_user(i, &init->init.filelen);
+ err |= get_user(i, &init32->init.filefd);
+ err |= put_user(i, &init->init.filefd);
+ err |= get_user(p, &init32->init.mem);
+ err |= put_user(p, &init->init.mem);
+ err |= get_user(i, &init32->init.memlen);
+ err |= put_user(i, &init->init.memlen);
+ err |= get_user(i, &init32->init.memfd);
+ err |= put_user(i, &init->init.memfd);
+
+ err |= put_user(0, &init->attrs);
+ if (cmd == COMPAT_FASTRPC_IOCTL_INIT_ATTRS) {
+ err |= get_user(i, &init32->attrs);
+ err |= put_user(i, (compat_uptr_t *)&init->attrs);
+ }
+
+ err |= put_user(0, &init->siglen);
+ if (cmd == COMPAT_FASTRPC_IOCTL_INIT_ATTRS) {
+ err |= get_user(i, &init32->siglen);
+ err |= put_user(i, (compat_uptr_t *)&init->siglen);
+ }
+
+ return err;
+}
+
+long compat_fastrpc_device_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ int err = 0;
+
+ if (!filp->f_op || !filp->f_op->unlocked_ioctl)
+ return -ENOTTY;
+
+ switch (cmd) {
+ case COMPAT_FASTRPC_IOCTL_INVOKE:
+ case COMPAT_FASTRPC_IOCTL_INVOKE_FD:
+ case COMPAT_FASTRPC_IOCTL_INVOKE_ATTRS:
+ {
+ struct compat_fastrpc_ioctl_invoke_attrs __user *inv32;
+ struct fastrpc_ioctl_invoke_attrs __user *inv;
+ long ret;
+
+ inv32 = compat_ptr(arg);
+ VERIFY(err, 0 == compat_get_fastrpc_ioctl_invoke(inv32,
+ &inv, cmd));
+ if (err)
+ return err;
+ ret = filp->f_op->unlocked_ioctl(filp,
+ FASTRPC_IOCTL_INVOKE_ATTRS, (unsigned long)inv);
+ if (ret)
+ return ret;
+ VERIFY(err, 0 == compat_put_fastrpc_ioctl_invoke(inv32, inv));
+ return err;
+ }
+ case COMPAT_FASTRPC_IOCTL_MMAP:
+ {
+ struct compat_fastrpc_ioctl_mmap __user *map32;
+ struct fastrpc_ioctl_mmap __user *map;
+ long ret;
+
+ map32 = compat_ptr(arg);
+ VERIFY(err, NULL != (map = compat_alloc_user_space(
+ sizeof(*map))));
+ if (err)
+ return -EFAULT;
+ VERIFY(err, 0 == compat_get_fastrpc_ioctl_mmap(map32, map));
+ if (err)
+ return err;
+ ret = filp->f_op->unlocked_ioctl(filp, FASTRPC_IOCTL_MMAP,
+ (unsigned long)map);
+ if (ret)
+ return ret;
+ VERIFY(err, 0 == compat_put_fastrpc_ioctl_mmap(map32, map));
+ return err;
+ }
+ case COMPAT_FASTRPC_IOCTL_MMAP_64:
+ {
+ struct compat_fastrpc_ioctl_mmap_64 __user *map32;
+ struct fastrpc_ioctl_mmap __user *map;
+ long ret;
+
+ map32 = compat_ptr(arg);
+ VERIFY(err, NULL != (map = compat_alloc_user_space(
+ sizeof(*map))));
+ if (err)
+ return -EFAULT;
+ VERIFY(err, 0 == compat_get_fastrpc_ioctl_mmap_64(map32, map));
+ if (err)
+ return err;
+ ret = filp->f_op->unlocked_ioctl(filp, FASTRPC_IOCTL_MMAP_64,
+ (unsigned long)map);
+ if (ret)
+ return ret;
+ VERIFY(err, 0 == compat_put_fastrpc_ioctl_mmap_64(map32, map));
+ return err;
+ }
+ case COMPAT_FASTRPC_IOCTL_MUNMAP:
+ {
+ struct compat_fastrpc_ioctl_munmap __user *unmap32;
+ struct fastrpc_ioctl_munmap __user *unmap;
+
+ unmap32 = compat_ptr(arg);
+ VERIFY(err, NULL != (unmap = compat_alloc_user_space(
+ sizeof(*unmap))));
+ if (err)
+ return -EFAULT;
+ VERIFY(err, 0 == compat_get_fastrpc_ioctl_munmap(unmap32,
+ unmap));
+ if (err)
+ return err;
+ return filp->f_op->unlocked_ioctl(filp, FASTRPC_IOCTL_MUNMAP,
+ (unsigned long)unmap);
+ }
+ case COMPAT_FASTRPC_IOCTL_MUNMAP_64:
+ {
+ struct compat_fastrpc_ioctl_munmap_64 __user *unmap32;
+ struct fastrpc_ioctl_munmap __user *unmap;
+
+ unmap32 = compat_ptr(arg);
+ VERIFY(err, NULL != (unmap = compat_alloc_user_space(
+ sizeof(*unmap))));
+ if (err)
+ return -EFAULT;
+ VERIFY(err, 0 == compat_get_fastrpc_ioctl_munmap_64(unmap32,
+ unmap));
+ if (err)
+ return err;
+ return filp->f_op->unlocked_ioctl(filp, FASTRPC_IOCTL_MUNMAP_64,
+ (unsigned long)unmap);
+ }
+ case COMPAT_FASTRPC_IOCTL_INIT:
+ /* fall through */
+ case COMPAT_FASTRPC_IOCTL_INIT_ATTRS:
+ {
+ struct compat_fastrpc_ioctl_init_attrs __user *init32;
+ struct fastrpc_ioctl_init_attrs __user *init;
+
+ init32 = compat_ptr(arg);
+ VERIFY(err, NULL != (init = compat_alloc_user_space(
+ sizeof(*init))));
+ if (err)
+ return -EFAULT;
+ VERIFY(err, 0 == compat_get_fastrpc_ioctl_init(init32,
+ init, cmd));
+ if (err)
+ return err;
+ return filp->f_op->unlocked_ioctl(filp,
+ FASTRPC_IOCTL_INIT_ATTRS, (unsigned long)init);
+ }
+ case FASTRPC_IOCTL_GETINFO:
+ {
+ compat_uptr_t __user *info32;
+ uint32_t __user *info;
+ compat_uint_t u;
+ long ret;
+
+ info32 = compat_ptr(arg);
+ VERIFY(err, NULL != (info = compat_alloc_user_space(
+ sizeof(*info))));
+ if (err)
+ return -EFAULT;
+ err = get_user(u, info32);
+ err |= put_user(u, info);
+ if (err)
+ return err;
+ ret = filp->f_op->unlocked_ioctl(filp, FASTRPC_IOCTL_GETINFO,
+ (unsigned long)info);
+ if (ret)
+ return ret;
+ err = get_user(u, info);
+ err |= put_user(u, info32);
+ return err;
+ }
+ case FASTRPC_IOCTL_SETMODE:
+ return filp->f_op->unlocked_ioctl(filp, cmd,
+ (unsigned long)compat_ptr(arg));
+ case COMPAT_FASTRPC_IOCTL_GETPERF:
+ {
+ struct compat_fastrpc_ioctl_perf __user *perf32;
+ struct fastrpc_ioctl_perf *perf;
+ compat_uint_t u;
+ long ret;
+
+ perf32 = compat_ptr(arg);
+ VERIFY(err, NULL != (perf = compat_alloc_user_space(
+ sizeof(*perf))));
+ if (err)
+ return -EFAULT;
+ VERIFY(err, 0 == compat_get_fastrpc_ioctl_perf(perf32,
+ perf));
+ if (err)
+ return err;
+ ret = filp->f_op->unlocked_ioctl(filp, FASTRPC_IOCTL_GETPERF,
+ (unsigned long)perf);
+ if (ret)
+ return ret;
+ err = get_user(u, &perf->numkeys);
+ err |= put_user(u, &perf32->numkeys);
+ return err;
+ }
+ default:
+ return -ENOIOCTLCMD;
+ }
+}
diff --git a/drivers/char/adsprpc_compat.h b/drivers/char/adsprpc_compat.h
new file mode 100644
index 000000000000..12506bbb3f06
--- /dev/null
+++ b/drivers/char/adsprpc_compat.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef ADSPRPC_COMPAT_H
+#define ADSPRPC_COMPAT_H
+
+#ifdef CONFIG_COMPAT
+
+long compat_fastrpc_device_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg);
+#else
+
+#define compat_fastrpc_device_ioctl NULL
+
+#endif /* CONFIG_COMPAT */
+#endif /* ADSPRPC_COMPAT_H */
diff --git a/drivers/char/adsprpc_shared.h b/drivers/char/adsprpc_shared.h
new file mode 100644
index 000000000000..a88c668440c7
--- /dev/null
+++ b/drivers/char/adsprpc_shared.h
@@ -0,0 +1,251 @@
+/*
+ * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef ADSPRPC_SHARED_H
+#define ADSPRPC_SHARED_H
+
+#include <linux/types.h>
+
+#define FASTRPC_IOCTL_INVOKE _IOWR('R', 1, struct fastrpc_ioctl_invoke)
+#define FASTRPC_IOCTL_MMAP _IOWR('R', 2, struct fastrpc_ioctl_mmap)
+#define FASTRPC_IOCTL_MUNMAP _IOWR('R', 3, struct fastrpc_ioctl_munmap)
+#define FASTRPC_IOCTL_MMAP_64 _IOWR('R', 14, struct fastrpc_ioctl_mmap_64)
+#define FASTRPC_IOCTL_MUNMAP_64 _IOWR('R', 15, struct fastrpc_ioctl_munmap_64)
+#define FASTRPC_IOCTL_INVOKE_FD _IOWR('R', 4, struct fastrpc_ioctl_invoke_fd)
+#define FASTRPC_IOCTL_SETMODE _IOWR('R', 5, uint32_t)
+#define FASTRPC_IOCTL_INIT _IOWR('R', 6, struct fastrpc_ioctl_init)
+#define FASTRPC_IOCTL_INVOKE_ATTRS \
+ _IOWR('R', 7, struct fastrpc_ioctl_invoke_attrs)
+#define FASTRPC_IOCTL_GETINFO _IOWR('R', 8, uint32_t)
+#define FASTRPC_IOCTL_GETPERF _IOWR('R', 9, struct fastrpc_ioctl_perf)
+#define FASTRPC_IOCTL_INIT_ATTRS _IOWR('R', 10, struct fastrpc_ioctl_init_attrs)
+
+#define FASTRPC_GLINK_GUID "fastrpcglink-apps-dsp"
+#define FASTRPC_SMD_GUID "fastrpcsmd-apps-dsp"
+#define DEVICE_NAME "adsprpc-smd"
+
+/* Set for buffers that have no virtual mapping in userspace */
+#define FASTRPC_ATTR_NOVA 0x1
+
+/* Set for buffers that are NOT dma coherent */
+#define FASTRPC_ATTR_NON_COHERENT 0x2
+
+/* Set for buffers that are dma coherent */
+#define FASTRPC_ATTR_COHERENT 0x4
+
+/* Driver should operate in parallel with the co-processor */
+#define FASTRPC_MODE_PARALLEL 0
+
+/* Driver should operate in serial mode with the co-processor */
+#define FASTRPC_MODE_SERIAL 1
+
+/* Driver should operate in profile mode with the co-processor */
+#define FASTRPC_MODE_PROFILE 2
+
+/* INIT a new process or attach to guestos */
+#define FASTRPC_INIT_ATTACH 0
+#define FASTRPC_INIT_CREATE 1
+#define FASTRPC_INIT_CREATE_STATIC 2
+
+/* Retrives number of input buffers from the scalars parameter */
+#define REMOTE_SCALARS_INBUFS(sc) (((sc) >> 16) & 0x0ff)
+
+/* Retrives number of output buffers from the scalars parameter */
+#define REMOTE_SCALARS_OUTBUFS(sc) (((sc) >> 8) & 0x0ff)
+
+/* Retrives number of input handles from the scalars parameter */
+#define REMOTE_SCALARS_INHANDLES(sc) (((sc) >> 4) & 0x0f)
+
+/* Retrives number of output handles from the scalars parameter */
+#define REMOTE_SCALARS_OUTHANDLES(sc) ((sc) & 0x0f)
+
+#define REMOTE_SCALARS_LENGTH(sc) (REMOTE_SCALARS_INBUFS(sc) +\
+ REMOTE_SCALARS_OUTBUFS(sc) +\
+ REMOTE_SCALARS_INHANDLES(sc) +\
+ REMOTE_SCALARS_OUTHANDLES(sc))
+
+#define REMOTE_SCALARS_MAKEX(attr, method, in, out, oin, oout) \
+ ((((uint32_t) (attr) & 0x7) << 29) | \
+ (((uint32_t) (method) & 0x1f) << 24) | \
+ (((uint32_t) (in) & 0xff) << 16) | \
+ (((uint32_t) (out) & 0xff) << 8) | \
+ (((uint32_t) (oin) & 0x0f) << 4) | \
+ ((uint32_t) (oout) & 0x0f))
+
+#define REMOTE_SCALARS_MAKE(method, in, out) \
+ REMOTE_SCALARS_MAKEX(0, method, in, out, 0, 0)
+
+
+#ifndef VERIFY_PRINT_ERROR
+#define VERIFY_EPRINTF(format, args) (void)0
+#endif
+
+#ifndef VERIFY_PRINT_INFO
+#define VERIFY_IPRINTF(args) (void)0
+#endif
+
+#ifndef VERIFY
+#define __STR__(x) #x ":"
+#define __TOSTR__(x) __STR__(x)
+#define __FILE_LINE__ __FILE__ ":" __TOSTR__(__LINE__)
+
+#define VERIFY(err, val) \
+do {\
+ VERIFY_IPRINTF(__FILE_LINE__"info: calling: " #val "\n");\
+ if (0 == (val)) {\
+ (err) = (err) == 0 ? -1 : (err);\
+ VERIFY_EPRINTF(__FILE_LINE__"error: %d: " #val "\n", (err));\
+ } else {\
+ VERIFY_IPRINTF(__FILE_LINE__"info: passed: " #val "\n");\
+ } \
+} while (0)
+#endif
+
+#define remote_arg64_t union remote_arg64
+
+struct remote_buf64 {
+ uint64_t pv;
+ uint64_t len;
+};
+
+union remote_arg64 {
+ struct remote_buf64 buf;
+ uint32_t h;
+};
+
+#define remote_arg_t union remote_arg
+
+struct remote_buf {
+ void *pv; /* buffer pointer */
+ size_t len; /* length of buffer */
+};
+
+union remote_arg {
+ struct remote_buf buf; /* buffer info */
+ uint32_t h; /* remote handle */
+};
+
+struct fastrpc_ioctl_invoke {
+ uint32_t handle; /* remote handle */
+ uint32_t sc; /* scalars describing the data */
+ remote_arg_t *pra; /* remote arguments list */
+};
+
+struct fastrpc_ioctl_invoke_fd {
+ struct fastrpc_ioctl_invoke inv;
+ int *fds; /* fd list */
+};
+
+struct fastrpc_ioctl_invoke_attrs {
+ struct fastrpc_ioctl_invoke inv;
+ int *fds; /* fd list */
+ unsigned *attrs; /* attribute list */
+};
+
+struct fastrpc_ioctl_init {
+ uint32_t flags; /* one of FASTRPC_INIT_* macros */
+ uintptr_t file; /* pointer to elf file */
+ uint32_t filelen; /* elf file length */
+ int32_t filefd; /* ION fd for the file */
+ uintptr_t mem; /* mem for the PD */
+ uint32_t memlen; /* mem length */
+ int32_t memfd; /* ION fd for the mem */
+};
+
+struct fastrpc_ioctl_init_attrs {
+ struct fastrpc_ioctl_init init;
+ int attrs;
+ unsigned int siglen;
+};
+
+struct fastrpc_ioctl_munmap {
+ uintptr_t vaddrout; /* address to unmap */
+ size_t size; /* size */
+};
+
+struct fastrpc_ioctl_munmap_64 {
+ uint64_t vaddrout; /* address to unmap */
+ size_t size; /* size */
+};
+
+struct fastrpc_ioctl_mmap {
+ int fd; /* ion fd */
+ uint32_t flags; /* flags for dsp to map with */
+ uintptr_t vaddrin; /* optional virtual address */
+ size_t size; /* size */
+ uintptr_t vaddrout; /* dsps virtual address */
+};
+
+
+struct fastrpc_ioctl_mmap_64 {
+ int fd; /* ion fd */
+ uint32_t flags; /* flags for dsp to map with */
+ uint64_t vaddrin; /* optional virtual address */
+ size_t size; /* size */
+ uint64_t vaddrout; /* dsps virtual address */
+};
+
+struct fastrpc_ioctl_perf { /* kernel performance data */
+ uintptr_t data;
+ uint32_t numkeys;
+ uintptr_t keys;
+};
+
+struct smq_null_invoke {
+ uint64_t ctx; /* invoke caller context */
+ uint32_t handle; /* handle to invoke */
+ uint32_t sc; /* scalars structure describing the data */
+};
+
+struct smq_phy_page {
+ uint64_t addr; /* physical address */
+ uint64_t size; /* size of contiguous region */
+};
+
+struct smq_invoke_buf {
+ int num; /* number of contiguous regions */
+ int pgidx; /* index to start of contiguous region */
+};
+
+struct smq_invoke {
+ struct smq_null_invoke header;
+ struct smq_phy_page page; /* remote arg and list of pages address */
+};
+
+struct smq_msg {
+ uint32_t pid; /* process group id */
+ uint32_t tid; /* thread id */
+ struct smq_invoke invoke;
+};
+
+struct smq_invoke_rsp {
+ uint64_t ctx; /* invoke caller context */
+ int retval; /* invoke return value */
+};
+
+static inline struct smq_invoke_buf *smq_invoke_buf_start(remote_arg64_t *pra,
+ uint32_t sc)
+{
+ unsigned int len = REMOTE_SCALARS_LENGTH(sc);
+
+ return (struct smq_invoke_buf *)(&pra[len]);
+}
+
+static inline struct smq_phy_page *smq_phy_page_start(uint32_t sc,
+ struct smq_invoke_buf *buf)
+{
+ uint64_t nTotal = REMOTE_SCALARS_INBUFS(sc)+REMOTE_SCALARS_OUTBUFS(sc);
+ return (struct smq_phy_page *)(&buf[nTotal]);
+}
+
+#endif
diff --git a/drivers/char/diag/Kconfig b/drivers/char/diag/Kconfig
new file mode 100644
index 000000000000..ba629abdd36e
--- /dev/null
+++ b/drivers/char/diag/Kconfig
@@ -0,0 +1,33 @@
+menu "Diag Support"
+
+config DIAG_CHAR
+ tristate "char driver interface and diag forwarding to/from modem"
+ default m
+ depends on USB_CONFIGFS_F_DIAG || USB_FUNCTION_DIAG || USB_QCOM_MAEMO
+ depends on ARCH_QCOM
+ depends on POWER_RESET_QCOM
+ select CRC_CCITT
+ help
+ Char driver interface for diag user space and diag-forwarding to modem ARM and back.
+ This enables diagchar for maemo usb gadget or android usb gadget based on config selected.
+endmenu
+
+menu "DIAG traffic over USB"
+
+config DIAG_OVER_USB
+ bool "Enable DIAG traffic to go over USB"
+ depends on ARCH_QCOM
+ default y
+ help
+ This feature helps segregate code required for DIAG traffic to go over USB.
+endmenu
+
+menu "HSIC/SMUX support for DIAG"
+
+config DIAGFWD_BRIDGE_CODE
+ depends on USB_QCOM_DIAG_BRIDGE || MSM_MHI
+ default y
+ bool "Enable QSC/9K DIAG traffic over SMUX/HSIC"
+ help
+ SMUX/HSIC Transport Layer for DIAG Router
+endmenu
diff --git a/drivers/char/diag/Makefile b/drivers/char/diag/Makefile
new file mode 100644
index 000000000000..d57ebd8d671e
--- /dev/null
+++ b/drivers/char/diag/Makefile
@@ -0,0 +1,5 @@
+obj-$(CONFIG_DIAG_CHAR) := diagchar.o
+obj-$(CONFIG_DIAGFWD_BRIDGE_CODE) += diagfwd_bridge.o
+obj-$(CONFIG_USB_QCOM_DIAG_BRIDGE) += diagfwd_hsic.o
+obj-$(CONFIG_MSM_MHI) += diagfwd_mhi.o
+diagchar-objs := diagchar_core.o diagchar_hdlc.o diagfwd.o diagfwd_glink.o diagfwd_peripheral.o diagfwd_smd.o diagfwd_socket.o diag_mux.o diag_memorydevice.o diag_usb.o diagmem.o diagfwd_cntl.o diag_dci.o diag_masks.o diag_debugfs.o
diff --git a/drivers/char/diag/diag_dci.c b/drivers/char/diag/diag_dci.c
new file mode 100644
index 000000000000..b0b36d00415d
--- /dev/null
+++ b/drivers/char/diag/diag_dci.c
@@ -0,0 +1,3312 @@
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/uaccess.h>
+#include <linux/diagchar.h>
+#include <linux/sched.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/workqueue.h>
+#include <linux/pm_runtime.h>
+#include <linux/platform_device.h>
+#include <linux/pm_wakeup.h>
+#include <linux/spinlock.h>
+#include <linux/ratelimit.h>
+#include <linux/reboot.h>
+#include <asm/current.h>
+#include <soc/qcom/restart.h>
+#include <linux/vmalloc.h>
+#ifdef CONFIG_DIAG_OVER_USB
+#include <linux/usb/usbdiag.h>
+#endif
+#include "diagchar_hdlc.h"
+#include "diagmem.h"
+#include "diagchar.h"
+#include "diagfwd.h"
+#include "diagfwd_cntl.h"
+#include "diag_dci.h"
+#include "diag_masks.h"
+#include "diagfwd_bridge.h"
+#include "diagfwd_peripheral.h"
+#include "diag_ipc_logging.h"
+
+static struct timer_list dci_drain_timer;
+static int dci_timer_in_progress;
+static struct work_struct dci_data_drain_work;
+
+struct diag_dci_partial_pkt_t partial_pkt;
+
+unsigned int dci_max_reg = 100;
+unsigned int dci_max_clients = 10;
+struct mutex dci_log_mask_mutex;
+struct mutex dci_event_mask_mutex;
+
+/*
+ * DCI_HANDSHAKE_RETRY_TIME: Time to wait (in microseconds) before checking the
+ * connection status again.
+ *
+ * DCI_HANDSHAKE_WAIT_TIME: Timeout (in milliseconds) to check for dci
+ * connection status
+ */
+#define DCI_HANDSHAKE_RETRY_TIME 500000
+#define DCI_HANDSHAKE_WAIT_TIME 200
+
+spinlock_t ws_lock;
+unsigned long ws_lock_flags;
+
+struct dci_ops_tbl_t dci_ops_tbl[NUM_DCI_PROC] = {
+ {
+ .ctx = 0,
+ .send_log_mask = diag_send_dci_log_mask,
+ .send_event_mask = diag_send_dci_event_mask,
+ .peripheral_status = 0,
+ .mempool = 0,
+ },
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+ {
+ .ctx = DIAGFWD_MDM_DCI,
+ .send_log_mask = diag_send_dci_log_mask_remote,
+ .send_event_mask = diag_send_dci_event_mask_remote,
+ .peripheral_status = 0,
+ .mempool = POOL_TYPE_MDM_DCI_WRITE,
+ }
+#endif
+};
+
+struct dci_channel_status_t dci_channel_status[NUM_DCI_PROC] = {
+ {
+ .id = 0,
+ .open = 0,
+ .retry_count = 0
+ },
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+ {
+ .id = DIAGFWD_MDM_DCI,
+ .open = 0,
+ .retry_count = 0
+ }
+#endif
+};
+
+/* Number of milliseconds anticipated to process the DCI data */
+#define DCI_WAKEUP_TIMEOUT 1
+
+#define DCI_CAN_ADD_BUF_TO_LIST(buf) \
+ (buf && buf->data && !buf->in_busy && buf->data_len > 0) \
+
+#ifdef CONFIG_DEBUG_FS
+struct diag_dci_data_info *dci_traffic;
+struct mutex dci_stat_mutex;
+void diag_dci_record_traffic(int read_bytes, uint8_t ch_type,
+ uint8_t peripheral, uint8_t proc)
+{
+ static int curr_dci_data;
+ static unsigned long iteration;
+ struct diag_dci_data_info *temp_data = dci_traffic;
+ if (!temp_data)
+ return;
+ mutex_lock(&dci_stat_mutex);
+ if (curr_dci_data == DIAG_DCI_DEBUG_CNT)
+ curr_dci_data = 0;
+ temp_data += curr_dci_data;
+ temp_data->iteration = iteration + 1;
+ temp_data->data_size = read_bytes;
+ temp_data->peripheral = peripheral;
+ temp_data->ch_type = ch_type;
+ temp_data->proc = proc;
+ diag_get_timestamp(temp_data->time_stamp);
+ curr_dci_data++;
+ iteration++;
+ mutex_unlock(&dci_stat_mutex);
+}
+#else
+void diag_dci_record_traffic(int read_bytes, uint8_t ch_type,
+ uint8_t peripheral, uint8_t proc) { }
+#endif
+
+static int check_peripheral_dci_support(int peripheral_id, int dci_proc_id)
+{
+ int dci_peripheral_list = 0;
+
+ if (dci_proc_id < 0 || dci_proc_id >= NUM_DCI_PROC) {
+ pr_err("diag:In %s,not a supported DCI proc id\n", __func__);
+ return 0;
+ }
+ if (peripheral_id < 0 || peripheral_id >= NUM_PERIPHERALS) {
+ pr_err("diag:In %s,not a valid peripheral id\n", __func__);
+ return 0;
+ }
+ dci_peripheral_list = dci_ops_tbl[dci_proc_id].peripheral_status;
+
+ if (dci_peripheral_list <= 0 || dci_peripheral_list > DIAG_CON_ALL) {
+ pr_err("diag:In %s,not a valid dci peripheral mask\n",
+ __func__);
+ return 0;
+ }
+ /* Remove APSS bit mask information */
+ dci_peripheral_list = dci_peripheral_list >> 1;
+
+ if ((1 << peripheral_id) & (dci_peripheral_list))
+ return 1;
+ else
+ return 0;
+}
+
+static void create_dci_log_mask_tbl(unsigned char *mask, uint8_t dirty)
+{
+ unsigned char *temp = mask;
+ uint8_t i;
+
+ if (!mask)
+ return;
+
+ /* create hard coded table for log mask with 16 categories */
+ for (i = 0; i < DCI_MAX_LOG_CODES; i++) {
+ *temp = i;
+ temp++;
+ *temp = dirty ? 1 : 0;
+ temp++;
+ memset(temp, 0, DCI_MAX_ITEMS_PER_LOG_CODE);
+ temp += DCI_MAX_ITEMS_PER_LOG_CODE;
+ }
+}
+
+static void create_dci_event_mask_tbl(unsigned char *tbl_buf)
+{
+ if (tbl_buf)
+ memset(tbl_buf, 0, DCI_EVENT_MASK_SIZE);
+}
+
+void dci_drain_data(unsigned long data)
+{
+ queue_work(driver->diag_dci_wq, &dci_data_drain_work);
+}
+
+static void dci_check_drain_timer(void)
+{
+ if (!dci_timer_in_progress) {
+ dci_timer_in_progress = 1;
+ mod_timer(&dci_drain_timer, jiffies + msecs_to_jiffies(200));
+ }
+}
+
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+static void dci_handshake_work_fn(struct work_struct *work)
+{
+ int err = 0;
+ int max_retries = 5;
+
+ struct dci_channel_status_t *status = container_of(work,
+ struct dci_channel_status_t,
+ handshake_work);
+
+ if (status->open) {
+ pr_debug("diag: In %s, remote dci channel is open, index: %d\n",
+ __func__, status->id);
+ return;
+ }
+
+ if (status->retry_count == max_retries) {
+ status->retry_count = 0;
+ pr_info("diag: dci channel connection handshake timed out, id: %d\n",
+ status->id);
+ err = diagfwd_bridge_close(TOKEN_TO_BRIDGE(status->id));
+ if (err) {
+ pr_err("diag: In %s, unable to close dci channel id: %d, err: %d\n",
+ __func__, status->id, err);
+ }
+ return;
+ }
+ status->retry_count++;
+ /*
+ * Sleep for sometime to check for the connection status again. The
+ * value should be optimum to include a roundabout time for a small
+ * packet to the remote processor.
+ */
+ usleep_range(DCI_HANDSHAKE_RETRY_TIME, DCI_HANDSHAKE_RETRY_TIME + 100);
+ mod_timer(&status->wait_time,
+ jiffies + msecs_to_jiffies(DCI_HANDSHAKE_WAIT_TIME));
+}
+
+static void dci_chk_handshake(unsigned long data)
+{
+ int index = (int)data;
+
+ if (index < 0 || index >= NUM_DCI_PROC)
+ return;
+
+ queue_work(driver->diag_dci_wq,
+ &dci_channel_status[index].handshake_work);
+}
+#endif
+
+static int diag_dci_init_buffer(struct diag_dci_buffer_t *buffer, int type)
+{
+ if (!buffer || buffer->data)
+ return -EINVAL;
+
+ switch (type) {
+ case DCI_BUF_PRIMARY:
+ buffer->capacity = IN_BUF_SIZE;
+ buffer->data = vzalloc(buffer->capacity);
+ if (!buffer->data)
+ return -ENOMEM;
+ break;
+ case DCI_BUF_SECONDARY:
+ buffer->data = NULL;
+ buffer->capacity = IN_BUF_SIZE;
+ break;
+ case DCI_BUF_CMD:
+ buffer->capacity = DIAG_MAX_REQ_SIZE + DCI_BUF_SIZE;
+ buffer->data = vzalloc(buffer->capacity);
+ if (!buffer->data)
+ return -ENOMEM;
+ break;
+ default:
+ pr_err("diag: In %s, unknown type %d", __func__, type);
+ return -EINVAL;
+ }
+
+ buffer->data_len = 0;
+ buffer->in_busy = 0;
+ buffer->buf_type = type;
+ mutex_init(&buffer->data_mutex);
+
+ return 0;
+}
+
+static inline int diag_dci_check_buffer(struct diag_dci_buffer_t *buf, int len)
+{
+ if (!buf)
+ return -EINVAL;
+
+ /* Return 1 if the buffer is not busy and can hold new data */
+ if ((buf->data_len + len < buf->capacity) && !buf->in_busy)
+ return 1;
+
+ return 0;
+}
+
+static void dci_add_buffer_to_list(struct diag_dci_client_tbl *client,
+ struct diag_dci_buffer_t *buf)
+{
+ if (!buf || !client || !buf->data)
+ return;
+
+ if (buf->in_list || buf->data_len == 0)
+ return;
+
+ mutex_lock(&client->write_buf_mutex);
+ list_add_tail(&buf->buf_track, &client->list_write_buf);
+ /*
+ * In the case of DCI, there can be multiple packets in one read. To
+ * calculate the wakeup source reference count, we must account for each
+ * packet in a single read.
+ */
+ diag_ws_on_read(DIAG_WS_DCI, buf->data_len);
+ mutex_lock(&buf->data_mutex);
+ buf->in_busy = 1;
+ buf->in_list = 1;
+ mutex_unlock(&buf->data_mutex);
+ mutex_unlock(&client->write_buf_mutex);
+}
+
+static int diag_dci_get_buffer(struct diag_dci_client_tbl *client,
+ int data_source, int len)
+{
+ struct diag_dci_buffer_t *buf_primary = NULL;
+ struct diag_dci_buffer_t *buf_temp = NULL;
+ struct diag_dci_buffer_t *curr = NULL;
+
+ if (!client)
+ return -EINVAL;
+ if (len < 0 || len > IN_BUF_SIZE)
+ return -EINVAL;
+
+ curr = client->buffers[data_source].buf_curr;
+ buf_primary = client->buffers[data_source].buf_primary;
+
+ if (curr && diag_dci_check_buffer(curr, len) == 1)
+ return 0;
+
+ dci_add_buffer_to_list(client, curr);
+ client->buffers[data_source].buf_curr = NULL;
+
+ if (diag_dci_check_buffer(buf_primary, len) == 1) {
+ client->buffers[data_source].buf_curr = buf_primary;
+ return 0;
+ }
+
+ buf_temp = kzalloc(sizeof(struct diag_dci_buffer_t), GFP_KERNEL);
+ if (!buf_temp)
+ return -EIO;
+
+ if (!diag_dci_init_buffer(buf_temp, DCI_BUF_SECONDARY)) {
+ buf_temp->data = diagmem_alloc(driver, IN_BUF_SIZE,
+ POOL_TYPE_DCI);
+ if (!buf_temp->data) {
+ kfree(buf_temp);
+ buf_temp = NULL;
+ return -ENOMEM;
+ }
+ client->buffers[data_source].buf_curr = buf_temp;
+ return 0;
+ }
+
+ kfree(buf_temp);
+ buf_temp = NULL;
+ return -EIO;
+}
+
+void diag_dci_wakeup_clients()
+{
+ struct list_head *start, *temp;
+ struct diag_dci_client_tbl *entry = NULL;
+
+ mutex_lock(&driver->dci_mutex);
+ list_for_each_safe(start, temp, &driver->dci_client_list) {
+ entry = list_entry(start, struct diag_dci_client_tbl, track);
+
+ /*
+ * Don't wake up the client when there is no pending buffer to
+ * write or when it is writing to user space
+ */
+ if (!list_empty(&entry->list_write_buf) && !entry->in_service) {
+ mutex_lock(&entry->write_buf_mutex);
+ entry->in_service = 1;
+ mutex_unlock(&entry->write_buf_mutex);
+ diag_update_sleeping_process(entry->client->tgid,
+ DCI_DATA_TYPE);
+ }
+ }
+ mutex_unlock(&driver->dci_mutex);
+}
+
+void dci_data_drain_work_fn(struct work_struct *work)
+{
+ int i;
+ struct list_head *start, *temp;
+ struct diag_dci_client_tbl *entry = NULL;
+ struct diag_dci_buf_peripheral_t *proc_buf = NULL;
+ struct diag_dci_buffer_t *buf_temp = NULL;
+
+ mutex_lock(&driver->dci_mutex);
+ list_for_each_safe(start, temp, &driver->dci_client_list) {
+ entry = list_entry(start, struct diag_dci_client_tbl, track);
+ for (i = 0; i < entry->num_buffers; i++) {
+ proc_buf = &entry->buffers[i];
+
+ mutex_lock(&proc_buf->buf_mutex);
+ buf_temp = proc_buf->buf_primary;
+ if (DCI_CAN_ADD_BUF_TO_LIST(buf_temp))
+ dci_add_buffer_to_list(entry, buf_temp);
+
+ buf_temp = proc_buf->buf_cmd;
+ if (DCI_CAN_ADD_BUF_TO_LIST(buf_temp))
+ dci_add_buffer_to_list(entry, buf_temp);
+
+ buf_temp = proc_buf->buf_curr;
+ if (DCI_CAN_ADD_BUF_TO_LIST(buf_temp)) {
+ dci_add_buffer_to_list(entry, buf_temp);
+ proc_buf->buf_curr = NULL;
+ }
+ mutex_unlock(&proc_buf->buf_mutex);
+ }
+ if (!list_empty(&entry->list_write_buf) && !entry->in_service) {
+ mutex_lock(&entry->write_buf_mutex);
+ entry->in_service = 1;
+ mutex_unlock(&entry->write_buf_mutex);
+ diag_update_sleeping_process(entry->client->tgid,
+ DCI_DATA_TYPE);
+ }
+ }
+ mutex_unlock(&driver->dci_mutex);
+ dci_timer_in_progress = 0;
+}
+
+static int diag_process_single_dci_pkt(unsigned char *buf, int len,
+ int data_source, int token)
+{
+ uint8_t cmd_code = 0;
+
+ if (!buf || len < 0) {
+ pr_err("diag: Invalid input in %s, buf: %pK, len: %d\n",
+ __func__, buf, len);
+ return -EIO;
+ }
+
+ cmd_code = *(uint8_t *)buf;
+
+ switch (cmd_code) {
+ case LOG_CMD_CODE:
+ extract_dci_log(buf, len, data_source, token, NULL);
+ break;
+ case EVENT_CMD_CODE:
+ extract_dci_events(buf, len, data_source, token, NULL);
+ break;
+ case EXT_HDR_CMD_CODE:
+ extract_dci_ext_pkt(buf, len, data_source, token);
+ break;
+ case DCI_PKT_RSP_CODE:
+ case DCI_DELAYED_RSP_CODE:
+ extract_dci_pkt_rsp(buf, len, data_source, token);
+ break;
+ case DCI_CONTROL_PKT_CODE:
+ extract_dci_ctrl_pkt(buf, len, token);
+ break;
+ default:
+ pr_err("diag: Unable to process single DCI packet, cmd_code: %d, data_source: %d",
+ cmd_code, data_source);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Process the data read from apps userspace client */
+void diag_process_apps_dci_read_data(int data_type, void *buf, int recd_bytes)
+{
+ int err = 0;
+
+ if (!buf) {
+ pr_err_ratelimited("diag: In %s, Null buf pointer\n", __func__);
+ return;
+ }
+
+ if (data_type != DATA_TYPE_DCI_LOG && data_type != DATA_TYPE_DCI_EVENT
+ && data_type != DCI_PKT_TYPE) {
+ pr_err("diag: In %s, unsupported data_type: 0x%x\n",
+ __func__, (unsigned int)data_type);
+ return;
+ }
+
+ err = diag_process_single_dci_pkt(buf, recd_bytes, APPS_DATA,
+ DCI_LOCAL_PROC);
+ if (err)
+ return;
+
+ /* wake up all sleeping DCI clients which have some data */
+ diag_dci_wakeup_clients();
+ dci_check_drain_timer();
+}
+
+void diag_process_remote_dci_read_data(int index, void *buf, int recd_bytes)
+{
+ int read_bytes = 0, err = 0;
+ uint16_t dci_pkt_len;
+ struct diag_dci_header_t *header = NULL;
+ int header_len = sizeof(struct diag_dci_header_t);
+ int token = BRIDGE_TO_TOKEN(index);
+
+ if (!buf)
+ return;
+
+ diag_dci_record_traffic(recd_bytes, 0, 0, token);
+
+ if (!partial_pkt.processing)
+ goto start;
+
+ if (partial_pkt.remaining > recd_bytes) {
+ if ((partial_pkt.read_len + recd_bytes) >
+ (MAX_DCI_PACKET_SZ)) {
+ pr_err("diag: Invalid length %d, %d received in %s\n",
+ partial_pkt.read_len, recd_bytes, __func__);
+ goto end;
+ }
+ memcpy(partial_pkt.data + partial_pkt.read_len, buf,
+ recd_bytes);
+ read_bytes += recd_bytes;
+ buf += read_bytes;
+ partial_pkt.read_len += recd_bytes;
+ partial_pkt.remaining -= recd_bytes;
+ } else {
+ if ((partial_pkt.read_len + partial_pkt.remaining) >
+ (MAX_DCI_PACKET_SZ)) {
+ pr_err("diag: Invalid length during partial read %d, %d received in %s\n",
+ partial_pkt.read_len,
+ partial_pkt.remaining, __func__);
+ goto end;
+ }
+ memcpy(partial_pkt.data + partial_pkt.read_len, buf,
+ partial_pkt.remaining);
+ read_bytes += partial_pkt.remaining;
+ buf += read_bytes;
+ partial_pkt.read_len += partial_pkt.remaining;
+ partial_pkt.remaining = 0;
+ }
+
+ if (partial_pkt.remaining == 0) {
+ /*
+ * Retrieve from the DCI control packet after the header = start
+ * (1 byte) + version (1 byte) + length (2 bytes)
+ */
+ diag_process_single_dci_pkt(partial_pkt.data + 4,
+ partial_pkt.read_len - header_len,
+ DCI_REMOTE_DATA, token);
+ partial_pkt.read_len = 0;
+ partial_pkt.total_len = 0;
+ partial_pkt.processing = 0;
+ goto start;
+ }
+ goto end;
+
+start:
+ while (read_bytes < recd_bytes) {
+ header = (struct diag_dci_header_t *)buf;
+ dci_pkt_len = header->length;
+
+ if (header->cmd_code != DCI_CONTROL_PKT_CODE &&
+ driver->num_dci_client == 0) {
+ read_bytes += header_len + dci_pkt_len;
+ buf += header_len + dci_pkt_len;
+ continue;
+ }
+
+ if (dci_pkt_len + header_len > MAX_DCI_PACKET_SZ) {
+ pr_err("diag: Invalid length in the dci packet field %d\n",
+ dci_pkt_len);
+ break;
+ }
+
+ if ((dci_pkt_len + header_len) > (recd_bytes - read_bytes)) {
+ partial_pkt.read_len = recd_bytes - read_bytes;
+ partial_pkt.total_len = dci_pkt_len + header_len;
+ partial_pkt.remaining = partial_pkt.total_len -
+ partial_pkt.read_len;
+ partial_pkt.processing = 1;
+ memcpy(partial_pkt.data, buf, partial_pkt.read_len);
+ break;
+ }
+ /*
+ * Retrieve from the DCI control packet after the header = start
+ * (1 byte) + version (1 byte) + length (2 bytes)
+ */
+ err = diag_process_single_dci_pkt(buf + 4, dci_pkt_len,
+ DCI_REMOTE_DATA, DCI_MDM_PROC);
+ if (err)
+ break;
+ read_bytes += header_len + dci_pkt_len;
+ buf += header_len + dci_pkt_len; /* advance to next DCI pkt */
+ }
+end:
+ if (err)
+ return;
+ /* wake up all sleeping DCI clients which have some data */
+ diag_dci_wakeup_clients();
+ dci_check_drain_timer();
+ return;
+}
+
+/* Process the data read from the peripheral dci channels */
+void diag_dci_process_peripheral_data(struct diagfwd_info *p_info, void *buf,
+ int recd_bytes)
+{
+ int read_bytes = 0, err = 0;
+ uint16_t dci_pkt_len;
+ struct diag_dci_pkt_header_t *header = NULL;
+ uint8_t recv_pkt_cmd_code;
+
+ if (!buf || !p_info)
+ return;
+
+ /*
+ * Release wakeup source when there are no more clients to
+ * process DCI data
+ */
+ if (driver->num_dci_client == 0) {
+ diag_ws_reset(DIAG_WS_DCI);
+ return;
+ }
+
+ diag_dci_record_traffic(recd_bytes, p_info->type, p_info->peripheral,
+ DCI_LOCAL_PROC);
+ while (read_bytes < recd_bytes) {
+ header = (struct diag_dci_pkt_header_t *)buf;
+ recv_pkt_cmd_code = header->pkt_code;
+ dci_pkt_len = header->len;
+
+ /*
+ * Check if the length of the current packet is lesser than the
+ * remaining bytes in the received buffer. This includes space
+ * for the Start byte (1), Version byte (1), length bytes (2)
+ * and End byte (1)
+ */
+ if ((dci_pkt_len + 5) > (recd_bytes - read_bytes)) {
+ pr_err("diag: Invalid length in %s, len: %d, dci_pkt_len: %d",
+ __func__, recd_bytes, dci_pkt_len);
+ diag_ws_release();
+ return;
+ }
+ /*
+ * Retrieve from the DCI control packet after the header = start
+ * (1 byte) + version (1 byte) + length (2 bytes)
+ */
+ err = diag_process_single_dci_pkt(buf + 4, dci_pkt_len,
+ (int)p_info->peripheral,
+ DCI_LOCAL_PROC);
+ if (err) {
+ diag_ws_release();
+ break;
+ }
+ read_bytes += 5 + dci_pkt_len;
+ buf += 5 + dci_pkt_len; /* advance to next DCI pkt */
+ }
+
+ if (err)
+ return;
+ /* wake up all sleeping DCI clients which have some data */
+ diag_dci_wakeup_clients();
+ dci_check_drain_timer();
+ return;
+}
+
+int diag_dci_query_log_mask(struct diag_dci_client_tbl *entry,
+ uint16_t log_code)
+{
+ uint16_t item_num;
+ uint8_t equip_id, *log_mask_ptr, byte_mask;
+ int byte_index, offset;
+
+ if (!entry) {
+ pr_err("diag: In %s, invalid client entry\n", __func__);
+ return 0;
+ }
+
+ equip_id = LOG_GET_EQUIP_ID(log_code);
+ item_num = LOG_GET_ITEM_NUM(log_code);
+ byte_index = item_num/8 + 2;
+ byte_mask = 0x01 << (item_num % 8);
+ offset = equip_id * 514;
+
+ if (offset + byte_index >= DCI_LOG_MASK_SIZE) {
+ pr_err("diag: In %s, invalid offset: %d, log_code: %d, byte_index: %d\n",
+ __func__, offset, log_code, byte_index);
+ return 0;
+ }
+
+ log_mask_ptr = entry->dci_log_mask;
+ log_mask_ptr = log_mask_ptr + offset + byte_index;
+ return ((*log_mask_ptr & byte_mask) == byte_mask) ? 1 : 0;
+
+}
+
+int diag_dci_query_event_mask(struct diag_dci_client_tbl *entry,
+ uint16_t event_id)
+{
+ uint8_t *event_mask_ptr, byte_mask;
+ int byte_index, bit_index;
+
+ if (!entry) {
+ pr_err("diag: In %s, invalid client entry\n", __func__);
+ return 0;
+ }
+
+ byte_index = event_id/8;
+ bit_index = event_id % 8;
+ byte_mask = 0x1 << bit_index;
+
+ if (byte_index >= DCI_EVENT_MASK_SIZE) {
+ pr_err("diag: In %s, invalid, event_id: %d, byte_index: %d\n",
+ __func__, event_id, byte_index);
+ return 0;
+ }
+
+ event_mask_ptr = entry->dci_event_mask;
+ event_mask_ptr = event_mask_ptr + byte_index;
+ return ((*event_mask_ptr & byte_mask) == byte_mask) ? 1 : 0;
+}
+
+static int diag_dci_filter_commands(struct diag_pkt_header_t *header)
+{
+ if (!header)
+ return -ENOMEM;
+
+ switch (header->cmd_code) {
+ case 0x7d: /* Msg Mask Configuration */
+ case 0x73: /* Log Mask Configuration */
+ case 0x81: /* Event Mask Configuration */
+ case 0x82: /* Event Mask Change */
+ case 0x60: /* Event Mask Toggle */
+ return 1;
+ }
+
+ if (header->cmd_code == 0x4b && header->subsys_id == 0x12) {
+ switch (header->subsys_cmd_code) {
+ case 0x60: /* Extended Event Mask Config */
+ case 0x61: /* Extended Msg Mask Config */
+ case 0x62: /* Extended Log Mask Config */
+ case 0x20C: /* Set current Preset ID */
+ case 0x20D: /* Get current Preset ID */
+ case 0x218: /* HDLC Disabled Command */
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+static struct dci_pkt_req_entry_t *diag_register_dci_transaction(int uid,
+ int client_id)
+{
+ struct dci_pkt_req_entry_t *entry = NULL;
+ entry = kzalloc(sizeof(struct dci_pkt_req_entry_t), GFP_KERNEL);
+ if (!entry)
+ return NULL;
+
+ driver->dci_tag++;
+ entry->client_id = client_id;
+ entry->uid = uid;
+ entry->tag = driver->dci_tag;
+ pr_debug("diag: Registering DCI cmd req, client_id: %d, uid: %d, tag:%d\n",
+ entry->client_id, entry->uid, entry->tag);
+ list_add_tail(&entry->track, &driver->dci_req_list);
+
+ return entry;
+}
+
+static struct dci_pkt_req_entry_t *diag_dci_get_request_entry(int tag)
+{
+ struct list_head *start, *temp;
+ struct dci_pkt_req_entry_t *entry = NULL;
+ list_for_each_safe(start, temp, &driver->dci_req_list) {
+ entry = list_entry(start, struct dci_pkt_req_entry_t, track);
+ if (entry->tag == tag)
+ return entry;
+ }
+ return NULL;
+}
+
+static int diag_dci_remove_req_entry(unsigned char *buf, int len,
+ struct dci_pkt_req_entry_t *entry)
+{
+ uint16_t rsp_count = 0, delayed_rsp_id = 0;
+ if (!buf || len <= 0 || !entry) {
+ pr_err("diag: In %s, invalid input buf: %pK, len: %d, entry: %pK\n",
+ __func__, buf, len, entry);
+ return -EIO;
+ }
+
+ /* It is an immediate response, delete it from the table */
+ if (*buf != 0x80) {
+ list_del(&entry->track);
+ kfree(entry);
+ entry = NULL;
+ return 1;
+ }
+
+ /* It is a delayed response. Check if the length is valid */
+ if (len < MIN_DELAYED_RSP_LEN) {
+ pr_err("diag: Invalid delayed rsp packet length %d\n", len);
+ return -EINVAL;
+ }
+
+ /*
+ * If the delayed response id field (uint16_t at byte 8) is 0 then
+ * there is only one response and we can remove the request entry.
+ */
+ delayed_rsp_id = *(uint16_t *)(buf + 8);
+ if (delayed_rsp_id == 0) {
+ list_del(&entry->track);
+ kfree(entry);
+ entry = NULL;
+ return 1;
+ }
+
+ /*
+ * Check the response count field (uint16 at byte 10). The request
+ * entry can be deleted it it is the last response in the sequence.
+ * It is the last response in the sequence if the response count
+ * is 1 or if the signed bit gets dropped.
+ */
+ rsp_count = *(uint16_t *)(buf + 10);
+ if (rsp_count > 0 && rsp_count < 0x1000) {
+ list_del(&entry->track);
+ kfree(entry);
+ entry = NULL;
+ return 1;
+ }
+
+ return 0;
+}
+
+static void dci_process_ctrl_status(unsigned char *buf, int len, int token)
+{
+ struct diag_ctrl_dci_status *header = NULL;
+ unsigned char *temp = buf;
+ uint32_t read_len = 0;
+ uint8_t i;
+ int peripheral_mask, status;
+
+ if (!buf || (len < sizeof(struct diag_ctrl_dci_status))) {
+ pr_err("diag: In %s, invalid buf %pK or length: %d\n",
+ __func__, buf, len);
+ return;
+ }
+
+ if (!VALID_DCI_TOKEN(token)) {
+ pr_err("diag: In %s, invalid DCI token %d\n", __func__, token);
+ return;
+ }
+
+ header = (struct diag_ctrl_dci_status *)temp;
+ temp += sizeof(struct diag_ctrl_dci_status);
+ read_len += sizeof(struct diag_ctrl_dci_status);
+
+ for (i = 0; i < header->count; i++) {
+ if (read_len > (len - 2)) {
+ pr_err("diag: In %s, Invalid length len: %d\n",
+ __func__, len);
+ return;
+ }
+
+ switch (*(uint8_t *)temp) {
+ case PERIPHERAL_MODEM:
+ peripheral_mask = DIAG_CON_MPSS;
+ break;
+ case PERIPHERAL_LPASS:
+ peripheral_mask = DIAG_CON_LPASS;
+ break;
+ case PERIPHERAL_WCNSS:
+ peripheral_mask = DIAG_CON_WCNSS;
+ break;
+ case PERIPHERAL_SENSORS:
+ peripheral_mask = DIAG_CON_SENSORS;
+ break;
+ default:
+ pr_err("diag: In %s, unknown peripheral, peripheral: %d\n",
+ __func__, *(uint8_t *)temp);
+ return;
+ }
+ temp += sizeof(uint8_t);
+ read_len += sizeof(uint8_t);
+
+ status = (*(uint8_t *)temp) ? DIAG_STATUS_OPEN :
+ DIAG_STATUS_CLOSED;
+ temp += sizeof(uint8_t);
+ read_len += sizeof(uint8_t);
+ diag_dci_notify_client(peripheral_mask, status, token);
+ }
+}
+
+static void dci_process_ctrl_handshake_pkt(unsigned char *buf, int len,
+ int token)
+{
+ struct diag_ctrl_dci_handshake_pkt *header = NULL;
+ unsigned char *temp = buf;
+ int err = 0;
+
+ if (!buf || (len < sizeof(struct diag_ctrl_dci_handshake_pkt)))
+ return;
+
+ if (!VALID_DCI_TOKEN(token))
+ return;
+
+ header = (struct diag_ctrl_dci_handshake_pkt *)temp;
+ if (header->magic == DCI_MAGIC) {
+ dci_channel_status[token].open = 1;
+ err = dci_ops_tbl[token].send_log_mask(token);
+ if (err) {
+ pr_err("diag: In %s, unable to send log mask to token: %d, err: %d\n",
+ __func__, token, err);
+ }
+ err = dci_ops_tbl[token].send_event_mask(token);
+ if (err) {
+ pr_err("diag: In %s, unable to send event mask to token: %d, err: %d\n",
+ __func__, token, err);
+ }
+ }
+}
+
+void extract_dci_ctrl_pkt(unsigned char *buf, int len, int token)
+{
+ unsigned char *temp = buf;
+ uint32_t ctrl_pkt_id;
+
+ diag_ws_on_read(DIAG_WS_DCI, len);
+ if (!buf) {
+ pr_err("diag: Invalid buffer in %s\n", __func__);
+ goto err;
+ }
+
+ if (len < (sizeof(uint8_t) + sizeof(uint32_t))) {
+ pr_err("diag: In %s, invalid length %d\n", __func__, len);
+ goto err;
+ }
+
+ /* Skip the Control packet command code */
+ temp += sizeof(uint8_t);
+ len -= sizeof(uint8_t);
+ ctrl_pkt_id = *(uint32_t *)temp;
+ switch (ctrl_pkt_id) {
+ case DIAG_CTRL_MSG_DCI_CONNECTION_STATUS:
+ dci_process_ctrl_status(temp, len, token);
+ break;
+ case DIAG_CTRL_MSG_DCI_HANDSHAKE_PKT:
+ dci_process_ctrl_handshake_pkt(temp, len, token);
+ break;
+ default:
+ pr_debug("diag: In %s, unknown control pkt %d\n",
+ __func__, ctrl_pkt_id);
+ break;
+ }
+
+err:
+ /*
+ * DCI control packets are not consumed by the clients. Mimic client
+ * consumption by setting and clearing the wakeup source copy_count
+ * explicitly.
+ */
+ diag_ws_on_copy_fail(DIAG_WS_DCI);
+}
+
+void extract_dci_pkt_rsp(unsigned char *buf, int len, int data_source,
+ int token)
+{
+ int tag;
+ struct diag_dci_client_tbl *entry = NULL;
+ void *temp_buf = NULL;
+ uint8_t dci_cmd_code, cmd_code_len, delete_flag = 0;
+ uint32_t rsp_len = 0;
+ struct diag_dci_buffer_t *rsp_buf = NULL;
+ struct dci_pkt_req_entry_t *req_entry = NULL;
+ unsigned char *temp = buf;
+ int save_req_uid = 0;
+ struct diag_dci_pkt_rsp_header_t pkt_rsp_header;
+
+ if (!buf) {
+ pr_err("diag: Invalid pointer in %s\n", __func__);
+ return;
+ }
+ dci_cmd_code = *(uint8_t *)(temp);
+ if (dci_cmd_code == DCI_PKT_RSP_CODE) {
+ cmd_code_len = sizeof(uint8_t);
+ } else if (dci_cmd_code == DCI_DELAYED_RSP_CODE) {
+ cmd_code_len = sizeof(uint32_t);
+ } else {
+ pr_err("diag: In %s, invalid command code %d\n", __func__,
+ dci_cmd_code);
+ return;
+ }
+ temp += cmd_code_len;
+ tag = *(int *)temp;
+ temp += sizeof(int);
+
+ /*
+ * The size of the response is (total length) - (length of the command
+ * code, the tag (int)
+ */
+ rsp_len = len - (cmd_code_len + sizeof(int));
+ if ((rsp_len == 0) || (rsp_len > (len - 5))) {
+ pr_err("diag: Invalid length in %s, len: %d, rsp_len: %d",
+ __func__, len, rsp_len);
+ return;
+ }
+
+ mutex_lock(&driver->dci_mutex);
+ req_entry = diag_dci_get_request_entry(tag);
+ if (!req_entry) {
+ pr_err_ratelimited("diag: No matching client for DCI data\n");
+ mutex_unlock(&driver->dci_mutex);
+ return;
+ }
+
+ entry = diag_dci_get_client_entry(req_entry->client_id);
+ if (!entry) {
+ pr_err("diag: In %s, couldn't find client entry, id:%d\n",
+ __func__, req_entry->client_id);
+ mutex_unlock(&driver->dci_mutex);
+ return;
+ }
+
+ save_req_uid = req_entry->uid;
+ /* Remove the headers and send only the response to this function */
+ delete_flag = diag_dci_remove_req_entry(temp, rsp_len, req_entry);
+ if (delete_flag < 0) {
+ mutex_unlock(&driver->dci_mutex);
+ return;
+ }
+
+ mutex_lock(&entry->buffers[data_source].buf_mutex);
+ rsp_buf = entry->buffers[data_source].buf_cmd;
+
+ mutex_lock(&rsp_buf->data_mutex);
+ /*
+ * Check if we can fit the data in the rsp buffer. The total length of
+ * the rsp is the rsp length (write_len) + DCI_PKT_RSP_TYPE header (int)
+ * + field for length (int) + delete_flag (uint8_t)
+ */
+ if ((rsp_buf->data_len + 9 + rsp_len) > rsp_buf->capacity) {
+ pr_alert("diag: create capacity for pkt rsp\n");
+ rsp_buf->capacity += 9 + rsp_len;
+ temp_buf = krealloc(rsp_buf->data, rsp_buf->capacity,
+ GFP_KERNEL);
+ if (!temp_buf) {
+ pr_err("diag: DCI realloc failed\n");
+ mutex_unlock(&rsp_buf->data_mutex);
+ mutex_unlock(&entry->buffers[data_source].buf_mutex);
+ mutex_unlock(&driver->dci_mutex);
+ return;
+ } else {
+ rsp_buf->data = temp_buf;
+ }
+ }
+
+ /* Fill in packet response header information */
+ pkt_rsp_header.type = DCI_PKT_RSP_TYPE;
+ /* Packet Length = Response Length + Length of uid field (int) */
+ pkt_rsp_header.length = rsp_len + sizeof(int);
+ pkt_rsp_header.delete_flag = delete_flag;
+ pkt_rsp_header.uid = save_req_uid;
+ memcpy(rsp_buf->data + rsp_buf->data_len, &pkt_rsp_header,
+ sizeof(struct diag_dci_pkt_rsp_header_t));
+ rsp_buf->data_len += sizeof(struct diag_dci_pkt_rsp_header_t);
+ memcpy(rsp_buf->data + rsp_buf->data_len, temp, rsp_len);
+ rsp_buf->data_len += rsp_len;
+ rsp_buf->data_source = data_source;
+
+ mutex_unlock(&rsp_buf->data_mutex);
+
+ /*
+ * Add directly to the list for writing responses to the
+ * userspace as these shouldn't be buffered and shouldn't wait
+ * for log and event buffers to be full
+ */
+ dci_add_buffer_to_list(entry, rsp_buf);
+ mutex_unlock(&entry->buffers[data_source].buf_mutex);
+ mutex_unlock(&driver->dci_mutex);
+}
+
+static void copy_ext_hdr(struct diag_dci_buffer_t *data_buffer, void *ext_hdr)
+{
+ if (!data_buffer) {
+ pr_err("diag: In %s, data buffer is NULL", __func__);
+ return;
+ }
+
+ *(int *)(data_buffer->data + data_buffer->data_len) =
+ DCI_EXT_HDR_TYPE;
+ data_buffer->data_len += sizeof(int);
+ memcpy(data_buffer->data + data_buffer->data_len, ext_hdr,
+ EXT_HDR_LEN);
+ data_buffer->data_len += EXT_HDR_LEN;
+}
+
+static void copy_dci_event(unsigned char *buf, int len,
+ struct diag_dci_client_tbl *client, int data_source,
+ void *ext_hdr)
+{
+ struct diag_dci_buffer_t *data_buffer = NULL;
+ struct diag_dci_buf_peripheral_t *proc_buf = NULL;
+ int err = 0, total_len = 0;
+
+ if (!buf || !client) {
+ pr_err("diag: Invalid pointers in %s", __func__);
+ return;
+ }
+
+ total_len = sizeof(int) + len;
+ if (ext_hdr)
+ total_len += sizeof(int) + EXT_HDR_LEN;
+
+ proc_buf = &client->buffers[data_source];
+ mutex_lock(&proc_buf->buf_mutex);
+ mutex_lock(&proc_buf->health_mutex);
+ err = diag_dci_get_buffer(client, data_source, total_len);
+ if (err) {
+ if (err == -ENOMEM)
+ proc_buf->health.dropped_events++;
+ else
+ pr_err("diag: In %s, invalid packet\n", __func__);
+ mutex_unlock(&proc_buf->health_mutex);
+ mutex_unlock(&proc_buf->buf_mutex);
+ return;
+ }
+
+ data_buffer = proc_buf->buf_curr;
+
+ proc_buf->health.received_events++;
+ mutex_unlock(&proc_buf->health_mutex);
+ mutex_unlock(&proc_buf->buf_mutex);
+
+ mutex_lock(&data_buffer->data_mutex);
+ if (ext_hdr)
+ copy_ext_hdr(data_buffer, ext_hdr);
+
+ *(int *)(data_buffer->data + data_buffer->data_len) = DCI_EVENT_TYPE;
+ data_buffer->data_len += sizeof(int);
+ memcpy(data_buffer->data + data_buffer->data_len, buf, len);
+ data_buffer->data_len += len;
+ data_buffer->data_source = data_source;
+ mutex_unlock(&data_buffer->data_mutex);
+
+}
+
+void extract_dci_events(unsigned char *buf, int len, int data_source,
+ int token, void *ext_hdr)
+{
+ uint16_t event_id, event_id_packet, length, temp_len;
+ uint8_t payload_len, payload_len_field;
+ uint8_t timestamp[8] = {0}, timestamp_len;
+ unsigned char event_data[MAX_EVENT_SIZE];
+ unsigned int total_event_len;
+ struct list_head *start, *temp;
+ struct diag_dci_client_tbl *entry = NULL;
+
+ if (!buf) {
+ pr_err("diag: In %s buffer is NULL\n", __func__);
+ return;
+ }
+ /*
+ * 1 byte for event code and 2 bytes for the length field.
+ * The length field indicates the total length removing the cmd_code
+ * and the lenght field. The event parsing in that case should happen
+ * till the end.
+ */
+ if (len < 3) {
+ pr_err("diag: In %s invalid len: %d\n", __func__, len);
+ return;
+ }
+ length = *(uint16_t *)(buf + 1); /* total length of event series */
+ if ((length == 0) || (len != (length + 3))) {
+ pr_err("diag: Incoming dci event length: %d is invalid\n",
+ length);
+ return;
+ }
+ /*
+ * Move directly to the start of the event series.
+ * The event parsing should happen from start of event
+ * series till the end.
+ */
+ temp_len = 3;
+ while (temp_len < length) {
+ event_id_packet = *(uint16_t *)(buf + temp_len);
+ event_id = event_id_packet & 0x0FFF; /* extract 12 bits */
+ if (event_id_packet & 0x8000) {
+ /* The packet has the two smallest byte of the
+ * timestamp
+ */
+ timestamp_len = 2;
+ } else {
+ /* The packet has the full timestamp. The first event
+ * will always have full timestamp. Save it in the
+ * timestamp buffer and use it for subsequent events if
+ * necessary.
+ */
+ timestamp_len = 8;
+ if ((temp_len + timestamp_len + 2) <= len)
+ memcpy(timestamp, buf + temp_len + 2,
+ timestamp_len);
+ else {
+ pr_err("diag: Invalid length in %s, len: %d, temp_len: %d",
+ __func__, len, temp_len);
+ return;
+ }
+ }
+ /* 13th and 14th bit represent the payload length */
+ if (((event_id_packet & 0x6000) >> 13) == 3) {
+ payload_len_field = 1;
+ if ((temp_len + timestamp_len + 3) <= len) {
+ payload_len = *(uint8_t *)
+ (buf + temp_len + 2 + timestamp_len);
+ } else {
+ pr_err("diag: Invalid length in %s, len: %d, temp_len: %d",
+ __func__, len, temp_len);
+ return;
+ }
+ if ((payload_len < (MAX_EVENT_SIZE - 13)) &&
+ ((temp_len + timestamp_len + payload_len + 3) <= len)) {
+ /*
+ * Copy the payload length and the payload
+ * after skipping temp_len bytes for already
+ * parsed packet, timestamp_len for timestamp
+ * buffer, 2 bytes for event_id_packet.
+ */
+ memcpy(event_data + 12, buf + temp_len + 2 +
+ timestamp_len, 1);
+ memcpy(event_data + 13, buf + temp_len + 2 +
+ timestamp_len + 1, payload_len);
+ } else {
+ pr_err("diag: event > %d, payload_len = %d, temp_len = %d\n",
+ (MAX_EVENT_SIZE - 13), payload_len, temp_len);
+ return;
+ }
+ } else {
+ payload_len_field = 0;
+ payload_len = (event_id_packet & 0x6000) >> 13;
+ /*
+ * Copy the payload after skipping temp_len bytes
+ * for already parsed packet, timestamp_len for
+ * timestamp buffer, 2 bytes for event_id_packet.
+ */
+ if ((payload_len < (MAX_EVENT_SIZE - 12)) &&
+ ((temp_len + timestamp_len + payload_len + 2) <= len))
+ memcpy(event_data + 12, buf + temp_len + 2 +
+ timestamp_len, payload_len);
+ else {
+ pr_err("diag: event > %d, payload_len = %d, temp_len = %d\n",
+ (MAX_EVENT_SIZE - 12), payload_len, temp_len);
+ return;
+ }
+ }
+
+ /* Before copying the data to userspace, check if we are still
+ * within the buffer limit. This is an error case, don't count
+ * it towards the health statistics.
+ *
+ * Here, the offset of 2 bytes(uint16_t) is for the
+ * event_id_packet length
+ */
+ temp_len += sizeof(uint16_t) + timestamp_len +
+ payload_len_field + payload_len;
+ if (temp_len > len) {
+ pr_err("diag: Invalid length in %s, len: %d, read: %d",
+ __func__, len, temp_len);
+ return;
+ }
+
+ /* 2 bytes for the event id & timestamp len is hard coded to 8,
+ as individual events have full timestamp */
+ *(uint16_t *)(event_data) = 10 +
+ payload_len_field + payload_len;
+ *(uint16_t *)(event_data + 2) = event_id_packet & 0x7FFF;
+ memcpy(event_data + 4, timestamp, 8);
+ /* 2 bytes for the event length field which is added to
+ the event data */
+ total_event_len = 2 + 10 + payload_len_field + payload_len;
+ /* parse through event mask tbl of each client and check mask */
+ mutex_lock(&driver->dci_mutex);
+ list_for_each_safe(start, temp, &driver->dci_client_list) {
+ entry = list_entry(start, struct diag_dci_client_tbl,
+ track);
+ if (entry->client_info.token != token)
+ continue;
+ if (diag_dci_query_event_mask(entry, event_id)) {
+ /* copy to client buffer */
+ copy_dci_event(event_data, total_event_len,
+ entry, data_source, ext_hdr);
+ }
+ }
+ mutex_unlock(&driver->dci_mutex);
+ }
+}
+
+static void copy_dci_log(unsigned char *buf, int len,
+ struct diag_dci_client_tbl *client, int data_source,
+ void *ext_hdr)
+{
+ uint16_t log_length = 0;
+ struct diag_dci_buffer_t *data_buffer = NULL;
+ struct diag_dci_buf_peripheral_t *proc_buf = NULL;
+ int err = 0, total_len = 0;
+
+ if (!buf || !client) {
+ pr_err("diag: Invalid pointers in %s", __func__);
+ return;
+ }
+
+ log_length = *(uint16_t *)(buf + 2);
+ if (log_length > USHRT_MAX - 4) {
+ pr_err("diag: Integer overflow in %s, log_len: %d",
+ __func__, log_length);
+ return;
+ }
+ total_len = sizeof(int) + log_length;
+ if (ext_hdr)
+ total_len += sizeof(int) + EXT_HDR_LEN;
+
+ /* Check if we are within the len. The check should include the
+ * first 4 bytes for the Log code(2) and the length bytes (2)
+ */
+ if ((log_length + sizeof(uint16_t) + 2) > len) {
+ pr_err("diag: Invalid length in %s, log_len: %d, len: %d",
+ __func__, log_length, len);
+ return;
+ }
+
+ proc_buf = &client->buffers[data_source];
+ mutex_lock(&proc_buf->buf_mutex);
+ mutex_lock(&proc_buf->health_mutex);
+ err = diag_dci_get_buffer(client, data_source, total_len);
+ if (err) {
+ if (err == -ENOMEM)
+ proc_buf->health.dropped_logs++;
+ else
+ pr_err("diag: In %s, invalid packet\n", __func__);
+ mutex_unlock(&proc_buf->health_mutex);
+ mutex_unlock(&proc_buf->buf_mutex);
+ return;
+ }
+
+ data_buffer = proc_buf->buf_curr;
+ proc_buf->health.received_logs++;
+ mutex_unlock(&proc_buf->health_mutex);
+ mutex_unlock(&proc_buf->buf_mutex);
+
+ mutex_lock(&data_buffer->data_mutex);
+ if (!data_buffer->data) {
+ mutex_unlock(&data_buffer->data_mutex);
+ return;
+ }
+ if (ext_hdr)
+ copy_ext_hdr(data_buffer, ext_hdr);
+
+ *(int *)(data_buffer->data + data_buffer->data_len) = DCI_LOG_TYPE;
+ data_buffer->data_len += sizeof(int);
+ memcpy(data_buffer->data + data_buffer->data_len, buf + sizeof(int),
+ log_length);
+ data_buffer->data_len += log_length;
+ data_buffer->data_source = data_source;
+ mutex_unlock(&data_buffer->data_mutex);
+}
+
+void extract_dci_log(unsigned char *buf, int len, int data_source, int token,
+ void *ext_hdr)
+{
+ uint16_t log_code, read_bytes = 0;
+ struct list_head *start, *temp;
+ struct diag_dci_client_tbl *entry = NULL;
+
+ if (!buf) {
+ pr_err("diag: In %s buffer is NULL\n", __func__);
+ return;
+ }
+ /*
+ * The first eight bytes for the incoming log packet contains
+ * Command code (2), the length of the packet (2), the length
+ * of the log (2) and log code (2)
+ */
+ if (len < 8) {
+ pr_err("diag: In %s invalid len: %d\n", __func__, len);
+ return;
+ }
+
+ log_code = *(uint16_t *)(buf + 6);
+ read_bytes += sizeof(uint16_t) + 6;
+
+ /* parse through log mask table of each client and check mask */
+ mutex_lock(&driver->dci_mutex);
+ list_for_each_safe(start, temp, &driver->dci_client_list) {
+ entry = list_entry(start, struct diag_dci_client_tbl, track);
+ if (entry->client_info.token != token)
+ continue;
+ if (diag_dci_query_log_mask(entry, log_code)) {
+ pr_debug("\t log code %x needed by client %d",
+ log_code, entry->client->tgid);
+ /* copy to client buffer */
+ copy_dci_log(buf, len, entry, data_source, ext_hdr);
+ }
+ }
+ mutex_unlock(&driver->dci_mutex);
+}
+
+void extract_dci_ext_pkt(unsigned char *buf, int len, int data_source,
+ int token)
+{
+ uint8_t version, pkt_cmd_code = 0;
+ unsigned char *pkt = NULL;
+
+ if (!buf) {
+ pr_err("diag: In %s buffer is NULL\n", __func__);
+ return;
+ }
+ if (len < (EXT_HDR_LEN + sizeof(uint8_t))) {
+ pr_err("diag: In %s invalid len: %d\n", __func__, len);
+ return;
+ }
+
+ version = *(uint8_t *)buf + 1;
+ if (version < EXT_HDR_VERSION) {
+ pr_err("diag: %s, Extended header with invalid version: %d\n",
+ __func__, version);
+ return;
+ }
+
+ pkt = buf + EXT_HDR_LEN;
+ pkt_cmd_code = *(uint8_t *)pkt;
+ len -= EXT_HDR_LEN;
+
+ switch (pkt_cmd_code) {
+ case LOG_CMD_CODE:
+ extract_dci_log(pkt, len, data_source, token, buf);
+ break;
+ case EVENT_CMD_CODE:
+ extract_dci_events(pkt, len, data_source, token, buf);
+ break;
+ default:
+ pr_err("diag: %s unsupported cmd_code: %d, data_source: %d\n",
+ __func__, pkt_cmd_code, data_source);
+ return;
+ }
+}
+
+void diag_dci_channel_open_work(struct work_struct *work)
+{
+ int i, j;
+ char dirty_bits[16];
+ uint8_t *client_log_mask_ptr;
+ uint8_t *log_mask_ptr;
+ int ret;
+ struct list_head *start, *temp;
+ struct diag_dci_client_tbl *entry = NULL;
+
+ /* Update apps and peripheral(s) with the dci log and event masks */
+ memset(dirty_bits, 0, 16 * sizeof(uint8_t));
+
+ /*
+ * From each log entry used by each client, determine
+ * which log entries in the cumulative logs that need
+ * to be updated on the peripheral.
+ */
+ mutex_lock(&driver->dci_mutex);
+ list_for_each_safe(start, temp, &driver->dci_client_list) {
+ entry = list_entry(start, struct diag_dci_client_tbl, track);
+ if (entry->client_info.token != DCI_LOCAL_PROC)
+ continue;
+ client_log_mask_ptr = entry->dci_log_mask;
+ for (j = 0; j < 16; j++) {
+ if (*(client_log_mask_ptr+1))
+ dirty_bits[j] = 1;
+ client_log_mask_ptr += 514;
+ }
+ }
+ mutex_unlock(&driver->dci_mutex);
+
+ mutex_lock(&dci_log_mask_mutex);
+ /* Update the appropriate dirty bits in the cumulative mask */
+ log_mask_ptr = dci_ops_tbl[DCI_LOCAL_PROC].log_mask_composite;
+ for (i = 0; i < 16; i++) {
+ if (dirty_bits[i])
+ *(log_mask_ptr+1) = dirty_bits[i];
+
+ log_mask_ptr += 514;
+ }
+ mutex_unlock(&dci_log_mask_mutex);
+
+ /* Send updated mask to userspace clients */
+ diag_update_userspace_clients(DCI_LOG_MASKS_TYPE);
+ /* Send updated log mask to peripherals */
+ ret = dci_ops_tbl[DCI_LOCAL_PROC].send_log_mask(DCI_LOCAL_PROC);
+
+ /* Send updated event mask to userspace clients */
+ diag_update_userspace_clients(DCI_EVENT_MASKS_TYPE);
+ /* Send updated event mask to peripheral */
+ ret = dci_ops_tbl[DCI_LOCAL_PROC].send_event_mask(DCI_LOCAL_PROC);
+}
+
+void diag_dci_notify_client(int peripheral_mask, int data, int proc)
+{
+ int stat = 0;
+ struct siginfo info;
+ struct list_head *start, *temp;
+ struct diag_dci_client_tbl *entry = NULL;
+ struct pid *pid_struct = NULL;
+ struct task_struct *dci_task = NULL;
+
+ memset(&info, 0, sizeof(struct siginfo));
+ info.si_code = SI_QUEUE;
+ info.si_int = (peripheral_mask | data);
+ if (data == DIAG_STATUS_OPEN)
+ dci_ops_tbl[proc].peripheral_status |= peripheral_mask;
+ else
+ dci_ops_tbl[proc].peripheral_status &= ~peripheral_mask;
+
+ /* Notify the DCI process that the peripheral DCI Channel is up */
+ mutex_lock(&driver->dci_mutex);
+ list_for_each_safe(start, temp, &driver->dci_client_list) {
+ entry = list_entry(start, struct diag_dci_client_tbl, track);
+ if (entry->client_info.token != proc)
+ continue;
+ if (entry->client_info.notification_list & peripheral_mask) {
+ info.si_signo = entry->client_info.signal_type;
+ pid_struct = find_get_pid(entry->tgid);
+ if (pid_struct) {
+ dci_task = get_pid_task(pid_struct,
+ PIDTYPE_PID);
+ if (!dci_task) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: dci client with pid = %d Exited..\n",
+ entry->tgid);
+ mutex_unlock(&driver->dci_mutex);
+ return;
+ }
+ if (entry->client &&
+ entry->tgid == dci_task->tgid) {
+ DIAG_LOG(DIAG_DEBUG_DCI,
+ "entry tgid = %d, dci client tgid = %d\n",
+ entry->tgid, dci_task->tgid);
+ stat = send_sig_info(
+ entry->client_info.signal_type,
+ &info, dci_task);
+ if (stat)
+ pr_err("diag: Err sending dci signal to client, signal data: 0x%x, stat: %d\n",
+ info.si_int, stat);
+ } else
+ pr_err("diag: client data is corrupted, signal data: 0x%x, stat: %d\n",
+ info.si_int, stat);
+ }
+ }
+ }
+ mutex_unlock(&driver->dci_mutex);
+}
+
+static int diag_send_dci_pkt(struct diag_cmd_reg_t *entry,
+ unsigned char *buf, int len, int tag)
+{
+ int i, status = DIAG_DCI_NO_ERROR;
+ uint32_t write_len = 0;
+ struct diag_dci_pkt_header_t header;
+
+ if (!entry)
+ return -EIO;
+
+ if (len < 1 || len > DIAG_MAX_REQ_SIZE) {
+ pr_err("diag: dci: In %s, invalid length %d, max_length: %d\n",
+ __func__, len, (int)(DCI_REQ_BUF_SIZE - sizeof(header)));
+ return -EIO;
+ }
+
+ if ((len + sizeof(header) + sizeof(uint8_t)) > DCI_BUF_SIZE) {
+ pr_err("diag: dci: In %s, invalid length %d for apps_dci_buf, max_length: %d\n",
+ __func__, len, DIAG_MAX_REQ_SIZE);
+ return -EIO;
+ }
+
+ mutex_lock(&driver->dci_mutex);
+ /* prepare DCI packet */
+ header.start = CONTROL_CHAR;
+ header.version = 1;
+ header.len = len + sizeof(int) + sizeof(uint8_t);
+ header.pkt_code = DCI_PKT_RSP_CODE;
+ header.tag = tag;
+ memcpy(driver->apps_dci_buf, &header, sizeof(header));
+ write_len += sizeof(header);
+ memcpy(driver->apps_dci_buf + write_len , buf, len);
+ write_len += len;
+ *(uint8_t *)(driver->apps_dci_buf + write_len) = CONTROL_CHAR;
+ write_len += sizeof(uint8_t);
+
+ /* This command is registered locally on the Apps */
+ if (entry->proc == APPS_DATA) {
+ diag_update_pkt_buffer(driver->apps_dci_buf, write_len,
+ DCI_PKT_TYPE);
+ diag_update_sleeping_process(entry->pid, DCI_PKT_TYPE);
+ mutex_unlock(&driver->dci_mutex);
+ return DIAG_DCI_NO_ERROR;
+ }
+
+ for (i = 0; i < NUM_PERIPHERALS; i++)
+ if (entry->proc == i) {
+ status = 1;
+ break;
+ }
+
+ if (status) {
+ status = diag_dci_write_proc(entry->proc,
+ DIAG_DATA_TYPE,
+ driver->apps_dci_buf,
+ write_len);
+ } else {
+ pr_err("diag: Cannot send packet to peripheral %d",
+ entry->proc);
+ status = DIAG_DCI_SEND_DATA_FAIL;
+ }
+ mutex_unlock(&driver->dci_mutex);
+ return status;
+}
+
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+unsigned char *dci_get_buffer_from_bridge(int token)
+{
+ uint8_t retries = 0, max_retries = 3;
+ unsigned char *buf = NULL;
+
+ do {
+ buf = diagmem_alloc(driver, DIAG_MDM_BUF_SIZE,
+ dci_ops_tbl[token].mempool);
+ if (!buf) {
+ usleep_range(5000, 5100);
+ retries++;
+ } else
+ break;
+ } while (retries < max_retries);
+
+ return buf;
+}
+
+int diag_dci_write_bridge(int token, unsigned char *buf, int len)
+{
+ return diagfwd_bridge_write(TOKEN_TO_BRIDGE(token), buf, len);
+}
+
+int diag_dci_write_done_bridge(int index, unsigned char *buf, int len)
+{
+ int token = BRIDGE_TO_TOKEN(index);
+ if (!VALID_DCI_TOKEN(token)) {
+ pr_err("diag: Invalid DCI token %d in %s\n", token, __func__);
+ return -EINVAL;
+ }
+ diagmem_free(driver, buf, dci_ops_tbl[token].mempool);
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+static int diag_send_dci_pkt_remote(unsigned char *data, int len, int tag,
+ int token)
+{
+ unsigned char *buf = NULL;
+ struct diag_dci_header_t dci_header;
+ int dci_header_size = sizeof(struct diag_dci_header_t);
+ int ret = DIAG_DCI_NO_ERROR;
+ uint32_t write_len = 0;
+
+ if (!data)
+ return -EIO;
+
+ buf = dci_get_buffer_from_bridge(token);
+ if (!buf) {
+ pr_err("diag: In %s, unable to get dci buffers to write data\n",
+ __func__);
+ return -EAGAIN;
+ }
+
+ dci_header.start = CONTROL_CHAR;
+ dci_header.version = 1;
+ /*
+ * The Length of the DCI packet = length of the command + tag (int) +
+ * the command code size (uint8_t)
+ */
+ dci_header.length = len + sizeof(int) + sizeof(uint8_t);
+ dci_header.cmd_code = DCI_PKT_RSP_CODE;
+
+ memcpy(buf + write_len, &dci_header, dci_header_size);
+ write_len += dci_header_size;
+ *(int *)(buf + write_len) = tag;
+ write_len += sizeof(int);
+ memcpy(buf + write_len, data, len);
+ write_len += len;
+ *(buf + write_len) = CONTROL_CHAR; /* End Terminator */
+ write_len += sizeof(uint8_t);
+
+ ret = diag_dci_write_bridge(token, buf, write_len);
+ if (ret) {
+ pr_err("diag: error writing dci pkt to remote proc, token: %d, err: %d\n",
+ token, ret);
+ diagmem_free(driver, buf, dci_ops_tbl[token].mempool);
+ } else {
+ ret = DIAG_DCI_NO_ERROR;
+ }
+
+ return ret;
+}
+#else
+static int diag_send_dci_pkt_remote(unsigned char *data, int len, int tag,
+ int token)
+{
+ return DIAG_DCI_NO_ERROR;
+}
+#endif
+
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+int diag_dci_send_handshake_pkt(int index)
+{
+ int err = 0;
+ int token = BRIDGE_TO_TOKEN(index);
+ int write_len = 0;
+ struct diag_ctrl_dci_handshake_pkt ctrl_pkt;
+ unsigned char *buf = NULL;
+ struct diag_dci_header_t dci_header;
+
+ if (!VALID_DCI_TOKEN(token)) {
+ pr_err("diag: In %s, invalid DCI token %d\n", __func__, token);
+ return -EINVAL;
+ }
+
+ buf = dci_get_buffer_from_bridge(token);
+ if (!buf) {
+ pr_err("diag: In %s, unable to get dci buffers to write data\n",
+ __func__);
+ return -EAGAIN;
+ }
+
+ dci_header.start = CONTROL_CHAR;
+ dci_header.version = 1;
+ /* Include the cmd code (uint8_t) in the length */
+ dci_header.length = sizeof(ctrl_pkt) + sizeof(uint8_t);
+ dci_header.cmd_code = DCI_CONTROL_PKT_CODE;
+ memcpy(buf, &dci_header, sizeof(dci_header));
+ write_len += sizeof(dci_header);
+
+ ctrl_pkt.ctrl_pkt_id = DIAG_CTRL_MSG_DCI_HANDSHAKE_PKT;
+ /*
+ * The control packet data length accounts for the version (uint32_t)
+ * of the packet and the magic number (uint32_t).
+ */
+ ctrl_pkt.ctrl_pkt_data_len = 2 * sizeof(uint32_t);
+ ctrl_pkt.version = 1;
+ ctrl_pkt.magic = DCI_MAGIC;
+ memcpy(buf + write_len, &ctrl_pkt, sizeof(ctrl_pkt));
+ write_len += sizeof(ctrl_pkt);
+
+ *(uint8_t *)(buf + write_len) = CONTROL_CHAR;
+ write_len += sizeof(uint8_t);
+
+ err = diag_dci_write_bridge(token, buf, write_len);
+ if (err) {
+ pr_err("diag: error writing ack packet to remote proc, token: %d, err: %d\n",
+ token, err);
+ diagmem_free(driver, buf, dci_ops_tbl[token].mempool);
+ return err;
+ }
+
+ mod_timer(&(dci_channel_status[token].wait_time),
+ jiffies + msecs_to_jiffies(DCI_HANDSHAKE_WAIT_TIME));
+
+ return 0;
+}
+#else
+int diag_dci_send_handshake_pkt(int index)
+{
+ return 0;
+}
+#endif
+
+static int diag_dci_process_apps_pkt(struct diag_pkt_header_t *pkt_header,
+ unsigned char *req_buf, int req_len,
+ int tag)
+{
+ uint8_t cmd_code, subsys_id, i, goto_download = 0;
+ uint8_t header_len = sizeof(struct diag_dci_pkt_header_t);
+ uint16_t ss_cmd_code;
+ uint32_t write_len = 0;
+ unsigned char *dest_buf = driver->apps_dci_buf;
+ unsigned char *payload_ptr = driver->apps_dci_buf + header_len;
+ struct diag_dci_pkt_header_t dci_header;
+
+ if (!pkt_header || !req_buf || req_len <= 0 || tag < 0)
+ return -EIO;
+
+ cmd_code = pkt_header->cmd_code;
+ subsys_id = pkt_header->subsys_id;
+ ss_cmd_code = pkt_header->subsys_cmd_code;
+
+ if (cmd_code == DIAG_CMD_DOWNLOAD) {
+ *payload_ptr = DIAG_CMD_DOWNLOAD;
+ write_len = sizeof(uint8_t);
+ goto_download = 1;
+ goto fill_buffer;
+ } else if (cmd_code == DIAG_CMD_VERSION) {
+ if (chk_polling_response()) {
+ for (i = 0; i < 55; i++, write_len++, payload_ptr++)
+ *(payload_ptr) = 0;
+ goto fill_buffer;
+ }
+ } else if (cmd_code == DIAG_CMD_EXT_BUILD) {
+ if (chk_polling_response()) {
+ *payload_ptr = DIAG_CMD_EXT_BUILD;
+ write_len = sizeof(uint8_t);
+ payload_ptr += sizeof(uint8_t);
+ for (i = 0; i < 8; i++, write_len++, payload_ptr++)
+ *(payload_ptr) = 0;
+ *(int *)(payload_ptr) = chk_config_get_id();
+ write_len += sizeof(int);
+ goto fill_buffer;
+ }
+ } else if (cmd_code == DIAG_CMD_LOG_ON_DMND) {
+ write_len = diag_cmd_log_on_demand(req_buf, req_len,
+ payload_ptr,
+ APPS_BUF_SIZE - header_len);
+ goto fill_buffer;
+ } else if (cmd_code != DIAG_CMD_DIAG_SUBSYS) {
+ return DIAG_DCI_TABLE_ERR;
+ }
+
+ if (subsys_id == DIAG_SS_DIAG) {
+ if (ss_cmd_code == DIAG_DIAG_MAX_PKT_SZ) {
+ memcpy(payload_ptr, pkt_header,
+ sizeof(struct diag_pkt_header_t));
+ write_len = sizeof(struct diag_pkt_header_t);
+ *(uint32_t *)(payload_ptr + write_len) =
+ DIAG_MAX_REQ_SIZE;
+ write_len += sizeof(uint32_t);
+ } else if (ss_cmd_code == DIAG_DIAG_STM) {
+ write_len = diag_process_stm_cmd(req_buf, payload_ptr);
+ }
+ } else if (subsys_id == DIAG_SS_PARAMS) {
+ if (ss_cmd_code == DIAG_DIAG_POLL) {
+ if (chk_polling_response()) {
+ memcpy(payload_ptr, pkt_header,
+ sizeof(struct diag_pkt_header_t));
+ write_len = sizeof(struct diag_pkt_header_t);
+ payload_ptr += write_len;
+ for (i = 0; i < 12; i++, write_len++) {
+ *(payload_ptr) = 0;
+ payload_ptr++;
+ }
+ }
+ } else if (ss_cmd_code == DIAG_DEL_RSP_WRAP) {
+ memcpy(payload_ptr, pkt_header,
+ sizeof(struct diag_pkt_header_t));
+ write_len = sizeof(struct diag_pkt_header_t);
+ *(int *)(payload_ptr + write_len) = wrap_enabled;
+ write_len += sizeof(int);
+ } else if (ss_cmd_code == DIAG_DEL_RSP_WRAP_CNT) {
+ wrap_enabled = true;
+ memcpy(payload_ptr, pkt_header,
+ sizeof(struct diag_pkt_header_t));
+ write_len = sizeof(struct diag_pkt_header_t);
+ *(uint16_t *)(payload_ptr + write_len) = wrap_count;
+ write_len += sizeof(uint16_t);
+ } else if (ss_cmd_code == DIAG_EXT_MOBILE_ID) {
+ write_len = diag_cmd_get_mobile_id(req_buf, req_len,
+ payload_ptr,
+ APPS_BUF_SIZE - header_len);
+ }
+ }
+
+fill_buffer:
+ if (write_len > 0) {
+ /* Check if we are within the range of the buffer*/
+ if (write_len + header_len > DIAG_MAX_REQ_SIZE) {
+ pr_err("diag: In %s, invalid length %d\n", __func__,
+ write_len + header_len);
+ return -ENOMEM;
+ }
+ dci_header.start = CONTROL_CHAR;
+ dci_header.version = 1;
+ /*
+ * Length of the rsp pkt = actual data len + pkt rsp code
+ * (uint8_t) + tag (int)
+ */
+ dci_header.len = write_len + sizeof(uint8_t) + sizeof(int);
+ dci_header.pkt_code = DCI_PKT_RSP_CODE;
+ dci_header.tag = tag;
+ driver->in_busy_dcipktdata = 1;
+ memcpy(dest_buf, &dci_header, header_len);
+ diag_process_apps_dci_read_data(DCI_PKT_TYPE, dest_buf + 4,
+ dci_header.len);
+ driver->in_busy_dcipktdata = 0;
+
+ if (goto_download) {
+ /*
+ * Sleep for sometime so that the response reaches the
+ * client. The value 5000 empirically as an optimum
+ * time for the response to reach the client.
+ */
+ usleep_range(5000, 5100);
+ /* call download API */
+ msm_set_restart_mode(RESTART_DLOAD);
+ pr_alert("diag: download mode set, Rebooting SoC..\n");
+ kernel_restart(NULL);
+ }
+ return DIAG_DCI_NO_ERROR;
+ }
+
+ return DIAG_DCI_TABLE_ERR;
+}
+
+static int diag_process_dci_pkt_rsp(unsigned char *buf, int len)
+{
+ int ret = DIAG_DCI_TABLE_ERR;
+ int common_cmd = 0;
+ struct diag_pkt_header_t *header = NULL;
+ unsigned char *temp = buf;
+ unsigned char *req_buf = NULL;
+ uint8_t retry_count = 0, max_retries = 3;
+ uint32_t read_len = 0, req_len = len;
+ struct dci_pkt_req_entry_t *req_entry = NULL;
+ struct diag_dci_client_tbl *dci_entry = NULL;
+ struct dci_pkt_req_t req_hdr;
+ struct diag_cmd_reg_t *reg_item;
+ struct diag_cmd_reg_entry_t reg_entry;
+ struct diag_cmd_reg_entry_t *temp_entry;
+
+ if (!buf)
+ return -EIO;
+
+ if (len <= sizeof(struct dci_pkt_req_t) || len > DCI_REQ_BUF_SIZE) {
+ pr_err("diag: dci: Invalid length %d len in %s", len, __func__);
+ return -EIO;
+ }
+
+ req_hdr = *(struct dci_pkt_req_t *)temp;
+ temp += sizeof(struct dci_pkt_req_t);
+ read_len += sizeof(struct dci_pkt_req_t);
+ req_len -= sizeof(struct dci_pkt_req_t);
+ req_buf = temp; /* Start of the Request */
+ header = (struct diag_pkt_header_t *)temp;
+ temp += sizeof(struct diag_pkt_header_t);
+ read_len += sizeof(struct diag_pkt_header_t);
+ if (read_len >= DCI_REQ_BUF_SIZE) {
+ pr_err("diag: dci: In %s, invalid read_len: %d\n", __func__,
+ read_len);
+ return -EIO;
+ }
+
+ mutex_lock(&driver->dci_mutex);
+ dci_entry = diag_dci_get_client_entry(req_hdr.client_id);
+ if (!dci_entry) {
+ pr_err("diag: Invalid client %d in %s\n",
+ req_hdr.client_id, __func__);
+ mutex_unlock(&driver->dci_mutex);
+ return DIAG_DCI_NO_REG;
+ }
+
+ /* Check if the command is allowed on DCI */
+ if (diag_dci_filter_commands(header)) {
+ pr_debug("diag: command not supported %d %d %d",
+ header->cmd_code, header->subsys_id,
+ header->subsys_cmd_code);
+ mutex_unlock(&driver->dci_mutex);
+ return DIAG_DCI_SEND_DATA_FAIL;
+ }
+
+ common_cmd = diag_check_common_cmd(header);
+ if (common_cmd < 0) {
+ pr_debug("diag: error in checking common command, %d\n",
+ common_cmd);
+ mutex_unlock(&driver->dci_mutex);
+ return DIAG_DCI_SEND_DATA_FAIL;
+ }
+
+ /*
+ * Previous packet is yet to be consumed by the client. Wait
+ * till the buffer is free.
+ */
+ while (retry_count < max_retries) {
+ retry_count++;
+ if (driver->in_busy_dcipktdata)
+ usleep_range(10000, 10100);
+ else
+ break;
+ }
+ /* The buffer is still busy */
+ if (driver->in_busy_dcipktdata) {
+ pr_err("diag: In %s, apps dci buffer is still busy. Dropping packet\n",
+ __func__);
+ mutex_unlock(&driver->dci_mutex);
+ return -EAGAIN;
+ }
+
+ /* Register this new DCI packet */
+ req_entry = diag_register_dci_transaction(req_hdr.uid,
+ req_hdr.client_id);
+ if (!req_entry) {
+ pr_alert("diag: registering new DCI transaction failed\n");
+ mutex_unlock(&driver->dci_mutex);
+ return DIAG_DCI_NO_REG;
+ }
+ mutex_unlock(&driver->dci_mutex);
+
+ /*
+ * If the client has registered for remote data, route the packet to the
+ * remote processor
+ */
+ if (dci_entry->client_info.token > 0) {
+ ret = diag_send_dci_pkt_remote(req_buf, req_len, req_entry->tag,
+ dci_entry->client_info.token);
+ return ret;
+ }
+
+ /* Check if it is a dedicated Apps command */
+ ret = diag_dci_process_apps_pkt(header, req_buf, req_len,
+ req_entry->tag);
+ if ((ret == DIAG_DCI_NO_ERROR && !common_cmd) || ret < 0)
+ return ret;
+
+ reg_entry.cmd_code = header->cmd_code;
+ reg_entry.subsys_id = header->subsys_id;
+ reg_entry.cmd_code_hi = header->subsys_cmd_code;
+ reg_entry.cmd_code_lo = header->subsys_cmd_code;
+
+ mutex_lock(&driver->cmd_reg_mutex);
+ temp_entry = diag_cmd_search(&reg_entry, ALL_PROC);
+ if (temp_entry) {
+ reg_item = container_of(temp_entry, struct diag_cmd_reg_t,
+ entry);
+ ret = diag_send_dci_pkt(reg_item, req_buf, req_len,
+ req_entry->tag);
+ } else {
+ DIAG_LOG(DIAG_DEBUG_DCI, "Command not found: %02x %02x %02x\n",
+ reg_entry.cmd_code, reg_entry.subsys_id,
+ reg_entry.cmd_code_hi);
+ }
+ mutex_unlock(&driver->cmd_reg_mutex);
+
+ return ret;
+}
+
+int diag_process_dci_transaction(unsigned char *buf, int len)
+{
+ unsigned char *temp = buf;
+ uint16_t log_code, item_num;
+ int ret = -1, found = 0, client_id = 0, client_token = 0;
+ int count, set_mask, num_codes, bit_index, event_id, offset = 0;
+ unsigned int byte_index, read_len = 0;
+ uint8_t equip_id, *log_mask_ptr, *head_log_mask_ptr, byte_mask;
+ uint8_t *event_mask_ptr;
+ struct diag_dci_client_tbl *dci_entry = NULL;
+
+ if (!temp) {
+ pr_err("diag: Invalid buffer in %s\n", __func__);
+ return -ENOMEM;
+ }
+
+ /* This is Pkt request/response transaction */
+ if (*(int *)temp > 0) {
+ return diag_process_dci_pkt_rsp(buf, len);
+ } else if (*(int *)temp == DCI_LOG_TYPE) {
+ /* Minimum length of a log mask config is 12 + 2 bytes for
+ atleast one log code to be set or reset */
+ if (len < DCI_LOG_CON_MIN_LEN || len > USER_SPACE_DATA) {
+ pr_err("diag: dci: Invalid length in %s\n", __func__);
+ return -EIO;
+ }
+
+ /* Extract each log code and put in client table */
+ temp += sizeof(int);
+ read_len += sizeof(int);
+ client_id = *(int *)temp;
+ temp += sizeof(int);
+ read_len += sizeof(int);
+ set_mask = *(int *)temp;
+ temp += sizeof(int);
+ read_len += sizeof(int);
+ num_codes = *(int *)temp;
+ temp += sizeof(int);
+ read_len += sizeof(int);
+
+ /* find client table entry */
+ mutex_lock(&driver->dci_mutex);
+ dci_entry = diag_dci_get_client_entry(client_id);
+ if (!dci_entry) {
+ pr_err("diag: In %s, invalid client\n", __func__);
+ mutex_unlock(&driver->dci_mutex);
+ return ret;
+ }
+ client_token = dci_entry->client_info.token;
+
+ if (num_codes == 0 || (num_codes >= (USER_SPACE_DATA - 8)/2)) {
+ pr_err("diag: dci: Invalid number of log codes %d\n",
+ num_codes);
+ mutex_unlock(&driver->dci_mutex);
+ return -EIO;
+ }
+
+ head_log_mask_ptr = dci_entry->dci_log_mask;
+ if (!head_log_mask_ptr) {
+ pr_err("diag: dci: Invalid Log mask pointer in %s\n",
+ __func__);
+ mutex_unlock(&driver->dci_mutex);
+ return -ENOMEM;
+ }
+ pr_debug("diag: head of dci log mask %pK\n", head_log_mask_ptr);
+ count = 0; /* iterator for extracting log codes */
+
+ while (count < num_codes) {
+ if (read_len >= USER_SPACE_DATA) {
+ pr_err("diag: dci: Invalid length for log type in %s",
+ __func__);
+ mutex_unlock(&driver->dci_mutex);
+ return -EIO;
+ }
+ log_code = *(uint16_t *)temp;
+ equip_id = LOG_GET_EQUIP_ID(log_code);
+ item_num = LOG_GET_ITEM_NUM(log_code);
+ byte_index = item_num/8 + 2;
+ if (byte_index >= (DCI_MAX_ITEMS_PER_LOG_CODE+2)) {
+ pr_err("diag: dci: Log type, invalid byte index\n");
+ mutex_unlock(&driver->dci_mutex);
+ return ret;
+ }
+ byte_mask = 0x01 << (item_num % 8);
+ /*
+ * Parse through log mask table and find
+ * relevant range
+ */
+ log_mask_ptr = head_log_mask_ptr;
+ found = 0;
+ offset = 0;
+ while (log_mask_ptr && (offset < DCI_LOG_MASK_SIZE)) {
+ if (*log_mask_ptr == equip_id) {
+ found = 1;
+ pr_debug("diag: find equip id = %x at %pK\n",
+ equip_id, log_mask_ptr);
+ break;
+ } else {
+ pr_debug("diag: did not find equip id = %x at %d\n",
+ equip_id, *log_mask_ptr);
+ log_mask_ptr += 514;
+ offset += 514;
+ }
+ }
+ if (!found) {
+ pr_err("diag: dci equip id not found\n");
+ mutex_unlock(&driver->dci_mutex);
+ return ret;
+ }
+ *(log_mask_ptr+1) = 1; /* set the dirty byte */
+ log_mask_ptr = log_mask_ptr + byte_index;
+ if (set_mask)
+ *log_mask_ptr |= byte_mask;
+ else
+ *log_mask_ptr &= ~byte_mask;
+ /* add to cumulative mask */
+ update_dci_cumulative_log_mask(
+ offset, byte_index,
+ byte_mask, client_token);
+ temp += 2;
+ read_len += 2;
+ count++;
+ ret = DIAG_DCI_NO_ERROR;
+ }
+ /* send updated mask to userspace clients */
+ if (client_token == DCI_LOCAL_PROC)
+ diag_update_userspace_clients(DCI_LOG_MASKS_TYPE);
+ /* send updated mask to peripherals */
+ ret = dci_ops_tbl[client_token].send_log_mask(client_token);
+ mutex_unlock(&driver->dci_mutex);
+ } else if (*(int *)temp == DCI_EVENT_TYPE) {
+ /* Minimum length of a event mask config is 12 + 4 bytes for
+ atleast one event id to be set or reset. */
+ if (len < DCI_EVENT_CON_MIN_LEN || len > USER_SPACE_DATA) {
+ pr_err("diag: dci: Invalid length in %s\n", __func__);
+ return -EIO;
+ }
+
+ /* Extract each event id and put in client table */
+ temp += sizeof(int);
+ read_len += sizeof(int);
+ client_id = *(int *)temp;
+ temp += sizeof(int);
+ read_len += sizeof(int);
+ set_mask = *(int *)temp;
+ temp += sizeof(int);
+ read_len += sizeof(int);
+ num_codes = *(int *)temp;
+ temp += sizeof(int);
+ read_len += sizeof(int);
+
+ /* find client table entry */
+ mutex_lock(&driver->dci_mutex);
+ dci_entry = diag_dci_get_client_entry(client_id);
+ if (!dci_entry) {
+ pr_err("diag: In %s, invalid client\n", __func__);
+ mutex_unlock(&driver->dci_mutex);
+ return ret;
+ }
+ client_token = dci_entry->client_info.token;
+
+ /* Check for positive number of event ids. Also, the number of
+ event ids should fit in the buffer along with set_mask and
+ num_codes which are 4 bytes each */
+ if (num_codes == 0 || (num_codes >= (USER_SPACE_DATA - 8)/2)) {
+ pr_err("diag: dci: Invalid number of event ids %d\n",
+ num_codes);
+ mutex_unlock(&driver->dci_mutex);
+ return -EIO;
+ }
+
+ event_mask_ptr = dci_entry->dci_event_mask;
+ if (!event_mask_ptr) {
+ pr_err("diag: dci: Invalid event mask pointer in %s\n",
+ __func__);
+ mutex_unlock(&driver->dci_mutex);
+ return -ENOMEM;
+ }
+ pr_debug("diag: head of dci event mask %pK\n", event_mask_ptr);
+ count = 0; /* iterator for extracting log codes */
+ while (count < num_codes) {
+ if (read_len >= USER_SPACE_DATA) {
+ pr_err("diag: dci: Invalid length for event type in %s",
+ __func__);
+ mutex_unlock(&driver->dci_mutex);
+ return -EIO;
+ }
+ event_id = *(int *)temp;
+ byte_index = event_id/8;
+ if (byte_index >= DCI_EVENT_MASK_SIZE) {
+ pr_err("diag: dci: Event type, invalid byte index\n");
+ mutex_unlock(&driver->dci_mutex);
+ return ret;
+ }
+ bit_index = event_id % 8;
+ byte_mask = 0x1 << bit_index;
+ /*
+ * Parse through event mask table and set
+ * relevant byte & bit combination
+ */
+ if (set_mask)
+ *(event_mask_ptr + byte_index) |= byte_mask;
+ else
+ *(event_mask_ptr + byte_index) &= ~byte_mask;
+ /* add to cumulative mask */
+ update_dci_cumulative_event_mask(byte_index, byte_mask,
+ client_token);
+ temp += sizeof(int);
+ read_len += sizeof(int);
+ count++;
+ ret = DIAG_DCI_NO_ERROR;
+ }
+ /* send updated mask to userspace clients */
+ if (dci_entry->client_info.token == DCI_LOCAL_PROC)
+ diag_update_userspace_clients(DCI_EVENT_MASKS_TYPE);
+ /* send updated mask to peripherals */
+ ret = dci_ops_tbl[client_token].send_event_mask(client_token);
+ mutex_unlock(&driver->dci_mutex);
+ } else {
+ pr_alert("diag: Incorrect DCI transaction\n");
+ }
+ return ret;
+}
+
+
+struct diag_dci_client_tbl *diag_dci_get_client_entry(int client_id)
+{
+ struct list_head *start, *temp;
+ struct diag_dci_client_tbl *entry = NULL;
+ list_for_each_safe(start, temp, &driver->dci_client_list) {
+ entry = list_entry(start, struct diag_dci_client_tbl, track);
+ if (entry->client_info.client_id == client_id)
+ return entry;
+ }
+ return NULL;
+}
+
+struct diag_dci_client_tbl *dci_lookup_client_entry_pid(int tgid)
+{
+ struct list_head *start, *temp;
+ struct diag_dci_client_tbl *entry = NULL;
+ struct pid *pid_struct = NULL;
+ struct task_struct *task_s = NULL;
+
+ list_for_each_safe(start, temp, &driver->dci_client_list) {
+ entry = list_entry(start, struct diag_dci_client_tbl, track);
+ pid_struct = find_get_pid(entry->tgid);
+ if (!pid_struct) {
+ DIAG_LOG(DIAG_DEBUG_DCI,
+ "diag: valid pid doesn't exist for pid = %d\n",
+ entry->tgid);
+ continue;
+ }
+ task_s = get_pid_task(pid_struct, PIDTYPE_PID);
+ if (!task_s) {
+ DIAG_LOG(DIAG_DEBUG_DCI,
+ "diag: valid task doesn't exist for pid = %d\n",
+ entry->tgid);
+ continue;
+ }
+ if (task_s == entry->client)
+ if (entry->client->tgid == tgid)
+ return entry;
+ }
+ return NULL;
+}
+
+void update_dci_cumulative_event_mask(int offset, uint8_t byte_mask, int token)
+{
+ uint8_t *event_mask_ptr, *update_ptr = NULL;
+ struct list_head *start, *temp;
+ struct diag_dci_client_tbl *entry = NULL;
+ bool is_set = false;
+
+ mutex_lock(&dci_event_mask_mutex);
+ update_ptr = dci_ops_tbl[token].event_mask_composite;
+ if (!update_ptr) {
+ mutex_unlock(&dci_event_mask_mutex);
+ return;
+ }
+ update_ptr += offset;
+ list_for_each_safe(start, temp, &driver->dci_client_list) {
+ entry = list_entry(start, struct diag_dci_client_tbl, track);
+ if (entry->client_info.token != token)
+ continue;
+ event_mask_ptr = entry->dci_event_mask;
+ event_mask_ptr += offset;
+ if ((*event_mask_ptr & byte_mask) == byte_mask) {
+ is_set = true;
+ /* break even if one client has the event mask set */
+ break;
+ }
+ }
+ if (is_set == false)
+ *update_ptr &= ~byte_mask;
+ else
+ *update_ptr |= byte_mask;
+ mutex_unlock(&dci_event_mask_mutex);
+}
+
+void diag_dci_invalidate_cumulative_event_mask(int token)
+{
+ int i = 0;
+ struct list_head *start, *temp;
+ struct diag_dci_client_tbl *entry = NULL;
+ uint8_t *event_mask_ptr, *update_ptr = NULL;
+
+ mutex_lock(&dci_event_mask_mutex);
+ update_ptr = dci_ops_tbl[token].event_mask_composite;
+ if (!update_ptr) {
+ mutex_unlock(&dci_event_mask_mutex);
+ return;
+ }
+
+ create_dci_event_mask_tbl(update_ptr);
+ list_for_each_safe(start, temp, &driver->dci_client_list) {
+ entry = list_entry(start, struct diag_dci_client_tbl, track);
+ if (entry->client_info.token != token)
+ continue;
+ event_mask_ptr = entry->dci_event_mask;
+ for (i = 0; i < DCI_EVENT_MASK_SIZE; i++)
+ *(update_ptr+i) |= *(event_mask_ptr+i);
+ }
+ mutex_unlock(&dci_event_mask_mutex);
+}
+
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+int diag_send_dci_event_mask_remote(int token)
+{
+ unsigned char *buf = NULL;
+ struct diag_dci_header_t dci_header;
+ struct diag_ctrl_event_mask event_mask;
+ int dci_header_size = sizeof(struct diag_dci_header_t);
+ int event_header_size = sizeof(struct diag_ctrl_event_mask);
+ int i, ret = DIAG_DCI_NO_ERROR, err = DIAG_DCI_NO_ERROR;
+ unsigned char *event_mask_ptr = NULL;
+ uint32_t write_len = 0;
+
+ mutex_lock(&dci_event_mask_mutex);
+ event_mask_ptr = dci_ops_tbl[token].event_mask_composite;
+ if (!event_mask_ptr) {
+ mutex_unlock(&dci_event_mask_mutex);
+ return -EINVAL;
+ }
+ buf = dci_get_buffer_from_bridge(token);
+ if (!buf) {
+ pr_err("diag: In %s, unable to get dci buffers to write data\n",
+ __func__);
+ mutex_unlock(&dci_event_mask_mutex);
+ return -EAGAIN;
+ }
+
+ /* Frame the DCI header */
+ dci_header.start = CONTROL_CHAR;
+ dci_header.version = 1;
+ dci_header.length = event_header_size + DCI_EVENT_MASK_SIZE + 1;
+ dci_header.cmd_code = DCI_CONTROL_PKT_CODE;
+
+ event_mask.cmd_type = DIAG_CTRL_MSG_EVENT_MASK;
+ event_mask.data_len = EVENT_MASK_CTRL_HEADER_LEN + DCI_EVENT_MASK_SIZE;
+ event_mask.stream_id = DCI_MASK_STREAM;
+ event_mask.status = DIAG_CTRL_MASK_VALID;
+ event_mask.event_config = 0; /* event config */
+ event_mask.event_mask_size = DCI_EVENT_MASK_SIZE;
+ for (i = 0; i < DCI_EVENT_MASK_SIZE; i++) {
+ if (event_mask_ptr[i] != 0) {
+ event_mask.event_config = 1;
+ break;
+ }
+ }
+ memcpy(buf + write_len, &dci_header, dci_header_size);
+ write_len += dci_header_size;
+ memcpy(buf + write_len, &event_mask, event_header_size);
+ write_len += event_header_size;
+ memcpy(buf + write_len, event_mask_ptr, DCI_EVENT_MASK_SIZE);
+ write_len += DCI_EVENT_MASK_SIZE;
+ *(buf + write_len) = CONTROL_CHAR; /* End Terminator */
+ write_len += sizeof(uint8_t);
+ err = diag_dci_write_bridge(token, buf, write_len);
+ if (err) {
+ pr_err("diag: error writing event mask to remote proc, token: %d, err: %d\n",
+ token, err);
+ diagmem_free(driver, buf, dci_ops_tbl[token].mempool);
+ ret = err;
+ } else {
+ ret = DIAG_DCI_NO_ERROR;
+ }
+ mutex_unlock(&dci_event_mask_mutex);
+ return ret;
+}
+#endif
+
+int diag_send_dci_event_mask(int token)
+{
+ void *buf = event_mask.update_buf;
+ struct diag_ctrl_event_mask header;
+ int header_size = sizeof(struct diag_ctrl_event_mask);
+ int ret = DIAG_DCI_NO_ERROR, err = DIAG_DCI_NO_ERROR, i;
+ unsigned char *event_mask_ptr = NULL;
+
+ mutex_lock(&dci_event_mask_mutex);
+ event_mask_ptr = dci_ops_tbl[DCI_LOCAL_PROC].event_mask_composite;
+ if (!event_mask_ptr) {
+ mutex_unlock(&dci_event_mask_mutex);
+ return -EINVAL;
+ }
+
+ mutex_lock(&event_mask.lock);
+ /* send event mask update */
+ header.cmd_type = DIAG_CTRL_MSG_EVENT_MASK;
+ header.data_len = EVENT_MASK_CTRL_HEADER_LEN + DCI_EVENT_MASK_SIZE;
+ header.stream_id = DCI_MASK_STREAM;
+ header.status = DIAG_CTRL_MASK_VALID;
+ header.event_config = 0; /* event config */
+ header.event_mask_size = DCI_EVENT_MASK_SIZE;
+ for (i = 0; i < DCI_EVENT_MASK_SIZE; i++) {
+ if (event_mask_ptr[i] != 0) {
+ header.event_config = 1;
+ break;
+ }
+ }
+ memcpy(buf, &header, header_size);
+ memcpy(buf+header_size, event_mask_ptr, DCI_EVENT_MASK_SIZE);
+ for (i = 0; i < NUM_PERIPHERALS; i++) {
+ /*
+ * Don't send to peripheral if its regular channel
+ * is down. It may also mean that the peripheral doesn't
+ * support DCI.
+ */
+ if (check_peripheral_dci_support(i, DCI_LOCAL_PROC)) {
+ err = diag_dci_write_proc(i, DIAG_CNTL_TYPE, buf,
+ header_size + DCI_EVENT_MASK_SIZE);
+ if (err != DIAG_DCI_NO_ERROR)
+ ret = DIAG_DCI_SEND_DATA_FAIL;
+ }
+ }
+
+ mutex_unlock(&event_mask.lock);
+ mutex_unlock(&dci_event_mask_mutex);
+
+ return ret;
+}
+
+void update_dci_cumulative_log_mask(int offset, unsigned int byte_index,
+ uint8_t byte_mask, int token)
+{
+ uint8_t *log_mask_ptr, *update_ptr = NULL;
+ bool is_set = false;
+ struct list_head *start, *temp;
+ struct diag_dci_client_tbl *entry = NULL;
+
+ mutex_lock(&dci_log_mask_mutex);
+ update_ptr = dci_ops_tbl[token].log_mask_composite;
+ if (!update_ptr) {
+ mutex_unlock(&dci_log_mask_mutex);
+ return;
+ }
+
+ update_ptr += offset;
+ /* update the dirty bit */
+ *(update_ptr+1) = 1;
+ update_ptr = update_ptr + byte_index;
+ list_for_each_safe(start, temp, &driver->dci_client_list) {
+ entry = list_entry(start, struct diag_dci_client_tbl, track);
+ if (entry->client_info.token != token)
+ continue;
+ log_mask_ptr = entry->dci_log_mask;
+ log_mask_ptr = log_mask_ptr + offset + byte_index;
+ if ((*log_mask_ptr & byte_mask) == byte_mask) {
+ is_set = true;
+ /* break even if one client has the log mask set */
+ break;
+ }
+ }
+
+ if (is_set == false)
+ *update_ptr &= ~byte_mask;
+ else
+ *update_ptr |= byte_mask;
+ mutex_unlock(&dci_log_mask_mutex);
+}
+
+void diag_dci_invalidate_cumulative_log_mask(int token)
+{
+ int i = 0;
+ struct list_head *start, *temp;
+ struct diag_dci_client_tbl *entry = NULL;
+ uint8_t *log_mask_ptr, *update_ptr = NULL;
+
+ /* Clear the composite mask and redo all the masks */
+ mutex_lock(&dci_log_mask_mutex);
+ update_ptr = dci_ops_tbl[token].log_mask_composite;
+ if (!update_ptr) {
+ mutex_unlock(&dci_log_mask_mutex);
+ return;
+ }
+
+ create_dci_log_mask_tbl(update_ptr, DCI_LOG_MASK_DIRTY);
+ list_for_each_safe(start, temp, &driver->dci_client_list) {
+ entry = list_entry(start, struct diag_dci_client_tbl, track);
+ if (entry->client_info.token != token)
+ continue;
+ log_mask_ptr = entry->dci_log_mask;
+ for (i = 0; i < DCI_LOG_MASK_SIZE; i++)
+ *(update_ptr+i) |= *(log_mask_ptr+i);
+ }
+ mutex_unlock(&dci_log_mask_mutex);
+}
+
+static int dci_fill_log_mask(unsigned char *dest_ptr, unsigned char *src_ptr)
+{
+ struct diag_ctrl_log_mask header;
+ int header_len = sizeof(struct diag_ctrl_log_mask);
+
+ header.cmd_type = DIAG_CTRL_MSG_LOG_MASK;
+ header.num_items = DCI_MAX_ITEMS_PER_LOG_CODE;
+ header.data_len = 11 + DCI_MAX_ITEMS_PER_LOG_CODE;
+ header.stream_id = DCI_MASK_STREAM;
+ header.status = 3;
+ header.equip_id = *src_ptr;
+ header.log_mask_size = DCI_MAX_ITEMS_PER_LOG_CODE;
+ memcpy(dest_ptr, &header, header_len);
+ memcpy(dest_ptr + header_len, src_ptr + 2, DCI_MAX_ITEMS_PER_LOG_CODE);
+
+ return header_len + DCI_MAX_ITEMS_PER_LOG_CODE;
+}
+
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+int diag_send_dci_log_mask_remote(int token)
+{
+
+ unsigned char *buf = NULL;
+ struct diag_dci_header_t dci_header;
+ int dci_header_size = sizeof(struct diag_dci_header_t);
+ int log_header_size = sizeof(struct diag_ctrl_log_mask);
+ uint8_t *log_mask_ptr = NULL;
+ int i, ret = DIAG_DCI_NO_ERROR, err = DIAG_DCI_NO_ERROR;
+ int updated;
+ uint32_t write_len = 0;
+
+ mutex_lock(&dci_log_mask_mutex);
+ log_mask_ptr = dci_ops_tbl[token].log_mask_composite;
+ if (!log_mask_ptr) {
+ mutex_unlock(&dci_log_mask_mutex);
+ return -EINVAL;
+ }
+
+ /* DCI header is common to all equipment IDs */
+ dci_header.start = CONTROL_CHAR;
+ dci_header.version = 1;
+ dci_header.length = log_header_size + DCI_MAX_ITEMS_PER_LOG_CODE + 1;
+ dci_header.cmd_code = DCI_CONTROL_PKT_CODE;
+
+ for (i = 0; i < DCI_MAX_LOG_CODES; i++) {
+ updated = 1;
+ write_len = 0;
+ if (!*(log_mask_ptr + 1)) {
+ log_mask_ptr += 514;
+ continue;
+ }
+
+ buf = dci_get_buffer_from_bridge(token);
+ if (!buf) {
+ pr_err("diag: In %s, unable to get dci buffers to write data\n",
+ __func__);
+ mutex_unlock(&dci_log_mask_mutex);
+ return -EAGAIN;
+ }
+
+ memcpy(buf + write_len, &dci_header, dci_header_size);
+ write_len += dci_header_size;
+ write_len += dci_fill_log_mask(buf + write_len, log_mask_ptr);
+ *(buf + write_len) = CONTROL_CHAR; /* End Terminator */
+ write_len += sizeof(uint8_t);
+ err = diag_dci_write_bridge(token, buf, write_len);
+ if (err) {
+ pr_err("diag: error writing log mask to remote processor, equip_id: %d, token: %d, err: %d\n",
+ i, token, err);
+ diagmem_free(driver, buf, dci_ops_tbl[token].mempool);
+ updated = 0;
+ }
+ if (updated)
+ *(log_mask_ptr + 1) = 0; /* clear dirty byte */
+ log_mask_ptr += 514;
+ }
+ mutex_unlock(&dci_log_mask_mutex);
+ return ret;
+}
+#endif
+
+int diag_send_dci_log_mask(int token)
+{
+ void *buf = log_mask.update_buf;
+ int write_len = 0;
+ uint8_t *log_mask_ptr = NULL;
+ int i, j, ret = DIAG_DCI_NO_ERROR, err = DIAG_DCI_NO_ERROR;
+ int updated;
+
+
+ mutex_lock(&dci_log_mask_mutex);
+ log_mask_ptr = dci_ops_tbl[DCI_LOCAL_PROC].log_mask_composite;
+ if (!log_mask_ptr) {
+ mutex_unlock(&dci_log_mask_mutex);
+ return -EINVAL;
+ }
+
+ mutex_lock(&log_mask.lock);
+ for (i = 0; i < 16; i++) {
+ updated = 1;
+ /* Dirty bit is set don't update the mask for this equip id */
+ if (!(*(log_mask_ptr + 1))) {
+ log_mask_ptr += 514;
+ continue;
+ }
+ write_len = dci_fill_log_mask(buf, log_mask_ptr);
+ for (j = 0; j < NUM_PERIPHERALS && write_len; j++) {
+ if (check_peripheral_dci_support(j, DCI_LOCAL_PROC)) {
+ err = diag_dci_write_proc(j, DIAG_CNTL_TYPE,
+ buf, write_len);
+ if (err != DIAG_DCI_NO_ERROR) {
+ updated = 0;
+ ret = DIAG_DCI_SEND_DATA_FAIL;
+ }
+ }
+ }
+ if (updated)
+ *(log_mask_ptr+1) = 0; /* clear dirty byte */
+ log_mask_ptr += 514;
+ }
+ mutex_unlock(&log_mask.lock);
+ mutex_unlock(&dci_log_mask_mutex);
+ return ret;
+}
+
+static int diag_dci_init_local(void)
+{
+ struct dci_ops_tbl_t *temp = &dci_ops_tbl[DCI_LOCAL_PROC];
+
+ create_dci_log_mask_tbl(temp->log_mask_composite, DCI_LOG_MASK_CLEAN);
+ create_dci_event_mask_tbl(temp->event_mask_composite);
+ temp->peripheral_status |= DIAG_CON_APSS;
+
+ return 0;
+}
+
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+static void diag_dci_init_handshake_remote(void)
+{
+ int i;
+ struct dci_channel_status_t *temp = NULL;
+
+ for (i = DCI_REMOTE_BASE; i < NUM_DCI_PROC; i++) {
+ temp = &dci_channel_status[i];
+ temp->id = i;
+ setup_timer(&temp->wait_time, dci_chk_handshake, i);
+ INIT_WORK(&temp->handshake_work, dci_handshake_work_fn);
+ }
+}
+
+static int diag_dci_init_remote(void)
+{
+ int i;
+ struct dci_ops_tbl_t *temp = NULL;
+
+ diagmem_init(driver, POOL_TYPE_MDM_DCI_WRITE);
+
+ for (i = DCI_REMOTE_BASE; i < DCI_REMOTE_LAST; i++) {
+ temp = &dci_ops_tbl[i];
+ create_dci_log_mask_tbl(temp->log_mask_composite,
+ DCI_LOG_MASK_CLEAN);
+ create_dci_event_mask_tbl(temp->event_mask_composite);
+ }
+
+ partial_pkt.data = vzalloc(MAX_DCI_PACKET_SZ);
+ if (!partial_pkt.data) {
+ pr_err("diag: Unable to create partial pkt data\n");
+ return -ENOMEM;
+ }
+
+ partial_pkt.total_len = 0;
+ partial_pkt.read_len = 0;
+ partial_pkt.remaining = 0;
+ partial_pkt.processing = 0;
+
+ diag_dci_init_handshake_remote();
+
+ return 0;
+}
+#else
+static int diag_dci_init_remote(void)
+{
+ return 0;
+}
+#endif
+
+static int diag_dci_init_ops_tbl(void)
+{
+ int err = 0;
+
+ err = diag_dci_init_local();
+ if (err)
+ goto err;
+ err = diag_dci_init_remote();
+ if (err)
+ goto err;
+
+ return 0;
+
+err:
+ return -ENOMEM;
+}
+
+int diag_dci_init(void)
+{
+ int ret = 0;
+
+ driver->dci_tag = 0;
+ driver->dci_client_id = 0;
+ driver->num_dci_client = 0;
+ mutex_init(&driver->dci_mutex);
+ mutex_init(&dci_log_mask_mutex);
+ mutex_init(&dci_event_mask_mutex);
+ spin_lock_init(&ws_lock);
+
+ ret = diag_dci_init_ops_tbl();
+ if (ret)
+ goto err;
+
+ if (driver->apps_dci_buf == NULL) {
+ driver->apps_dci_buf = vzalloc(DCI_BUF_SIZE);
+ if (driver->apps_dci_buf == NULL)
+ goto err;
+ }
+ INIT_LIST_HEAD(&driver->dci_client_list);
+ INIT_LIST_HEAD(&driver->dci_req_list);
+
+ driver->diag_dci_wq = create_singlethread_workqueue("diag_dci_wq");
+ if (!driver->diag_dci_wq)
+ goto err;
+
+ INIT_WORK(&dci_data_drain_work, dci_data_drain_work_fn);
+
+ setup_timer(&dci_drain_timer, dci_drain_data, 0);
+ return DIAG_DCI_NO_ERROR;
+err:
+ pr_err("diag: Could not initialize diag DCI buffers");
+ vfree(driver->apps_dci_buf);
+ driver->apps_dci_buf = NULL;
+
+ if (driver->diag_dci_wq)
+ destroy_workqueue(driver->diag_dci_wq);
+ vfree(partial_pkt.data);
+ partial_pkt.data = NULL;
+ mutex_destroy(&driver->dci_mutex);
+ mutex_destroy(&dci_log_mask_mutex);
+ mutex_destroy(&dci_event_mask_mutex);
+ return DIAG_DCI_NO_REG;
+}
+
+void diag_dci_channel_init(void)
+{
+ uint8_t peripheral;
+
+ for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
+ diagfwd_open(peripheral, TYPE_DCI);
+ diagfwd_open(peripheral, TYPE_DCI_CMD);
+ }
+}
+
+void diag_dci_exit(void)
+{
+ vfree(partial_pkt.data);
+ partial_pkt.data = NULL;
+ vfree(driver->apps_dci_buf);
+ driver->apps_dci_buf = NULL;
+ mutex_destroy(&driver->dci_mutex);
+ mutex_destroy(&dci_log_mask_mutex);
+ mutex_destroy(&dci_event_mask_mutex);
+ destroy_workqueue(driver->diag_dci_wq);
+}
+
+int diag_dci_clear_log_mask(int client_id)
+{
+ int err = DIAG_DCI_NO_ERROR, token = DCI_LOCAL_PROC;
+ uint8_t *update_ptr;
+ struct diag_dci_client_tbl *entry = NULL;
+
+ entry = diag_dci_get_client_entry(client_id);
+ if (!entry) {
+ pr_err("diag: In %s, invalid client entry\n", __func__);
+ return DIAG_DCI_TABLE_ERR;
+ }
+ token = entry->client_info.token;
+ update_ptr = dci_ops_tbl[token].log_mask_composite;
+
+ create_dci_log_mask_tbl(entry->dci_log_mask, DCI_LOG_MASK_CLEAN);
+ diag_dci_invalidate_cumulative_log_mask(token);
+
+ /*
+ * Send updated mask to userspace clients only if the client
+ * is registered on the local processor
+ */
+ if (token == DCI_LOCAL_PROC)
+ diag_update_userspace_clients(DCI_LOG_MASKS_TYPE);
+ /* Send updated mask to peripherals */
+ err = dci_ops_tbl[token].send_log_mask(token);
+ return err;
+}
+
+int diag_dci_clear_event_mask(int client_id)
+{
+ int err = DIAG_DCI_NO_ERROR, token = DCI_LOCAL_PROC;
+ uint8_t *update_ptr;
+ struct diag_dci_client_tbl *entry = NULL;
+
+ entry = diag_dci_get_client_entry(client_id);
+ if (!entry) {
+ pr_err("diag: In %s, invalid client entry\n", __func__);
+ return DIAG_DCI_TABLE_ERR;
+ }
+ token = entry->client_info.token;
+ update_ptr = dci_ops_tbl[token].event_mask_composite;
+
+ create_dci_event_mask_tbl(entry->dci_event_mask);
+ diag_dci_invalidate_cumulative_event_mask(token);
+
+ /*
+ * Send updated mask to userspace clients only if the client is
+ * registerted on the local processor
+ */
+ if (token == DCI_LOCAL_PROC)
+ diag_update_userspace_clients(DCI_EVENT_MASKS_TYPE);
+ /* Send updated mask to peripherals */
+ err = dci_ops_tbl[token].send_event_mask(token);
+ return err;
+}
+
+uint8_t diag_dci_get_cumulative_real_time(int token)
+{
+ uint8_t real_time = MODE_NONREALTIME;
+ struct list_head *start, *temp;
+ struct diag_dci_client_tbl *entry = NULL;
+
+ list_for_each_safe(start, temp, &driver->dci_client_list) {
+ entry = list_entry(start, struct diag_dci_client_tbl, track);
+ if (entry->real_time == MODE_REALTIME &&
+ entry->client_info.token == token) {
+ real_time = 1;
+ break;
+ }
+ }
+ return real_time;
+}
+
+int diag_dci_set_real_time(struct diag_dci_client_tbl *entry, uint8_t real_time)
+{
+ if (!entry) {
+ pr_err("diag: In %s, invalid client entry\n", __func__);
+ return 0;
+ }
+ entry->real_time = real_time;
+ return 1;
+}
+
+int diag_dci_register_client(struct diag_dci_reg_tbl_t *reg_entry)
+{
+ int i, err = 0;
+ struct diag_dci_client_tbl *new_entry = NULL;
+ struct diag_dci_buf_peripheral_t *proc_buf = NULL;
+
+ if (!reg_entry)
+ return DIAG_DCI_NO_REG;
+ if (!VALID_DCI_TOKEN(reg_entry->token)) {
+ pr_alert("diag: Invalid DCI client token, %d\n",
+ reg_entry->token);
+ return DIAG_DCI_NO_REG;
+ }
+
+ if (driver->dci_state == DIAG_DCI_NO_REG)
+ return DIAG_DCI_NO_REG;
+
+ if (driver->num_dci_client >= MAX_DCI_CLIENTS)
+ return DIAG_DCI_NO_REG;
+
+ new_entry = kzalloc(sizeof(struct diag_dci_client_tbl), GFP_KERNEL);
+ if (new_entry == NULL) {
+ pr_err("diag: unable to alloc memory\n");
+ return DIAG_DCI_NO_REG;
+ }
+
+ mutex_lock(&driver->dci_mutex);
+
+ new_entry->client = current;
+ new_entry->tgid = current->tgid;
+ new_entry->client_info.notification_list =
+ reg_entry->notification_list;
+ new_entry->client_info.signal_type =
+ reg_entry->signal_type;
+ new_entry->client_info.token = reg_entry->token;
+ switch (reg_entry->token) {
+ case DCI_LOCAL_PROC:
+ new_entry->num_buffers = NUM_DCI_PERIPHERALS;
+ break;
+ case DCI_MDM_PROC:
+ new_entry->num_buffers = 1;
+ break;
+ }
+
+ new_entry->buffers = NULL;
+ new_entry->real_time = MODE_REALTIME;
+ new_entry->in_service = 0;
+ INIT_LIST_HEAD(&new_entry->list_write_buf);
+ mutex_init(&new_entry->write_buf_mutex);
+ new_entry->dci_log_mask = vzalloc(DCI_LOG_MASK_SIZE);
+ if (!new_entry->dci_log_mask) {
+ pr_err("diag: Unable to create log mask for client, %d",
+ driver->dci_client_id);
+ goto fail_alloc;
+ }
+ create_dci_log_mask_tbl(new_entry->dci_log_mask, DCI_LOG_MASK_CLEAN);
+
+ new_entry->dci_event_mask = vzalloc(DCI_EVENT_MASK_SIZE);
+ if (!new_entry->dci_event_mask) {
+ pr_err("diag: Unable to create event mask for client, %d",
+ driver->dci_client_id);
+ goto fail_alloc;
+ }
+ create_dci_event_mask_tbl(new_entry->dci_event_mask);
+
+ new_entry->buffers = kzalloc(new_entry->num_buffers *
+ sizeof(struct diag_dci_buf_peripheral_t),
+ GFP_KERNEL);
+ if (!new_entry->buffers) {
+ pr_err("diag: Unable to allocate buffers for peripherals in %s\n",
+ __func__);
+ goto fail_alloc;
+ }
+
+ for (i = 0; i < new_entry->num_buffers; i++) {
+ proc_buf = &new_entry->buffers[i];
+ if (!proc_buf)
+ goto fail_alloc;
+
+ mutex_init(&proc_buf->health_mutex);
+ mutex_init(&proc_buf->buf_mutex);
+ proc_buf->health.dropped_events = 0;
+ proc_buf->health.dropped_logs = 0;
+ proc_buf->health.received_events = 0;
+ proc_buf->health.received_logs = 0;
+ proc_buf->buf_primary = kzalloc(
+ sizeof(struct diag_dci_buffer_t),
+ GFP_KERNEL);
+ if (!proc_buf->buf_primary)
+ goto fail_alloc;
+ proc_buf->buf_cmd = kzalloc(sizeof(struct diag_dci_buffer_t),
+ GFP_KERNEL);
+ if (!proc_buf->buf_cmd)
+ goto fail_alloc;
+ err = diag_dci_init_buffer(proc_buf->buf_primary,
+ DCI_BUF_PRIMARY);
+ if (err)
+ goto fail_alloc;
+ err = diag_dci_init_buffer(proc_buf->buf_cmd, DCI_BUF_CMD);
+ if (err)
+ goto fail_alloc;
+ proc_buf->buf_curr = proc_buf->buf_primary;
+ }
+
+ list_add_tail(&new_entry->track, &driver->dci_client_list);
+ driver->dci_client_id++;
+ new_entry->client_info.client_id = driver->dci_client_id;
+ reg_entry->client_id = driver->dci_client_id;
+ driver->num_dci_client++;
+ if (driver->num_dci_client == 1)
+ diag_update_proc_vote(DIAG_PROC_DCI, VOTE_UP, reg_entry->token);
+ queue_work(driver->diag_real_time_wq, &driver->diag_real_time_work);
+ mutex_unlock(&driver->dci_mutex);
+
+ return driver->dci_client_id;
+
+fail_alloc:
+ if (new_entry) {
+ for (i = 0; ((i < new_entry->num_buffers) &&
+ new_entry->buffers); i++) {
+ proc_buf = &new_entry->buffers[i];
+ if (proc_buf) {
+ mutex_destroy(&proc_buf->health_mutex);
+ if (proc_buf->buf_primary) {
+ vfree(proc_buf->buf_primary->data);
+ proc_buf->buf_primary->data = NULL;
+ mutex_destroy(
+ &proc_buf->buf_primary->data_mutex);
+ }
+ kfree(proc_buf->buf_primary);
+ proc_buf->buf_primary = NULL;
+ if (proc_buf->buf_cmd) {
+ vfree(proc_buf->buf_cmd->data);
+ proc_buf->buf_cmd->data = NULL;
+ mutex_destroy(
+ &proc_buf->buf_cmd->data_mutex);
+ }
+ kfree(proc_buf->buf_cmd);
+ proc_buf->buf_cmd = NULL;
+ }
+ }
+ vfree(new_entry->dci_event_mask);
+ new_entry->dci_event_mask = NULL;
+ vfree(new_entry->dci_log_mask);
+ new_entry->dci_log_mask = NULL;
+ kfree(new_entry->buffers);
+ new_entry->buffers = NULL;
+ kfree(new_entry);
+ new_entry = NULL;
+ }
+ mutex_unlock(&driver->dci_mutex);
+ return DIAG_DCI_NO_REG;
+}
+
+int diag_dci_deinit_client(struct diag_dci_client_tbl *entry)
+{
+ int ret = DIAG_DCI_NO_ERROR, real_time = MODE_REALTIME, i, peripheral;
+ struct diag_dci_buf_peripheral_t *proc_buf = NULL;
+ struct diag_dci_buffer_t *buf_entry, *temp;
+ struct list_head *start, *req_temp;
+ struct dci_pkt_req_entry_t *req_entry = NULL;
+ int token = DCI_LOCAL_PROC;
+
+ if (!entry)
+ return DIAG_DCI_NOT_SUPPORTED;
+
+ token = entry->client_info.token;
+ /*
+ * Remove the entry from the list before freeing the buffers
+ * to ensure that we don't have any invalid access.
+ */
+ if (!list_empty(&entry->track))
+ list_del(&entry->track);
+ driver->num_dci_client--;
+ /*
+ * Clear the client's log and event masks, update the cumulative
+ * masks and send the masks to peripherals
+ */
+ vfree(entry->dci_log_mask);
+ entry->dci_log_mask = NULL;
+ diag_dci_invalidate_cumulative_log_mask(token);
+ if (token == DCI_LOCAL_PROC)
+ diag_update_userspace_clients(DCI_LOG_MASKS_TYPE);
+ ret = dci_ops_tbl[token].send_log_mask(token);
+ if (ret != DIAG_DCI_NO_ERROR) {
+ return ret;
+ }
+ vfree(entry->dci_event_mask);
+ entry->dci_event_mask = NULL;
+ diag_dci_invalidate_cumulative_event_mask(token);
+ if (token == DCI_LOCAL_PROC)
+ diag_update_userspace_clients(DCI_EVENT_MASKS_TYPE);
+ ret = dci_ops_tbl[token].send_event_mask(token);
+ if (ret != DIAG_DCI_NO_ERROR) {
+ return ret;
+ }
+
+ list_for_each_safe(start, req_temp, &driver->dci_req_list) {
+ req_entry = list_entry(start, struct dci_pkt_req_entry_t,
+ track);
+ if (req_entry->client_id == entry->client_info.client_id) {
+ if (!list_empty(&req_entry->track))
+ list_del(&req_entry->track);
+ kfree(req_entry);
+ req_entry = NULL;
+ }
+ }
+
+ /* Clean up any buffer that is pending write */
+ mutex_lock(&entry->write_buf_mutex);
+ list_for_each_entry_safe(buf_entry, temp, &entry->list_write_buf,
+ buf_track) {
+ if (!list_empty(&buf_entry->buf_track))
+ list_del(&buf_entry->buf_track);
+ if (buf_entry->buf_type == DCI_BUF_SECONDARY) {
+ mutex_lock(&buf_entry->data_mutex);
+ diagmem_free(driver, buf_entry->data, POOL_TYPE_DCI);
+ buf_entry->data = NULL;
+ mutex_unlock(&buf_entry->data_mutex);
+ kfree(buf_entry);
+ buf_entry = NULL;
+ } else if (buf_entry->buf_type == DCI_BUF_CMD) {
+ peripheral = buf_entry->data_source;
+ if (peripheral == APPS_DATA)
+ continue;
+ }
+ /*
+ * These are buffers that can't be written to the client which
+ * means that the copy cannot be completed. Make sure that we
+ * remove those references in DCI wakeup source.
+ */
+ diag_ws_on_copy_fail(DIAG_WS_DCI);
+ }
+ mutex_unlock(&entry->write_buf_mutex);
+
+ for (i = 0; i < entry->num_buffers; i++) {
+ proc_buf = &entry->buffers[i];
+ buf_entry = proc_buf->buf_curr;
+ mutex_lock(&proc_buf->buf_mutex);
+ /* Clean up secondary buffer from mempool that is active */
+ if (buf_entry && buf_entry->buf_type == DCI_BUF_SECONDARY) {
+ mutex_lock(&buf_entry->data_mutex);
+ diagmem_free(driver, buf_entry->data, POOL_TYPE_DCI);
+ buf_entry->data = NULL;
+ mutex_unlock(&buf_entry->data_mutex);
+ mutex_destroy(&buf_entry->data_mutex);
+ kfree(buf_entry);
+ buf_entry = NULL;
+ }
+
+ mutex_lock(&proc_buf->buf_primary->data_mutex);
+ vfree(proc_buf->buf_primary->data);
+ proc_buf->buf_primary->data = NULL;
+ mutex_unlock(&proc_buf->buf_primary->data_mutex);
+
+ mutex_lock(&proc_buf->buf_cmd->data_mutex);
+ vfree(proc_buf->buf_cmd->data);
+ proc_buf->buf_cmd->data = NULL;
+ mutex_unlock(&proc_buf->buf_cmd->data_mutex);
+
+ mutex_destroy(&proc_buf->health_mutex);
+ mutex_destroy(&proc_buf->buf_primary->data_mutex);
+ mutex_destroy(&proc_buf->buf_cmd->data_mutex);
+
+ kfree(proc_buf->buf_primary);
+ proc_buf->buf_primary = NULL;
+ kfree(proc_buf->buf_cmd);
+ proc_buf->buf_cmd = NULL;
+ mutex_unlock(&proc_buf->buf_mutex);
+ }
+ mutex_destroy(&entry->write_buf_mutex);
+
+ kfree(entry->buffers);
+ entry->buffers = NULL;
+ kfree(entry);
+ entry = NULL;
+
+ if (driver->num_dci_client == 0) {
+ diag_update_proc_vote(DIAG_PROC_DCI, VOTE_DOWN, token);
+ } else {
+ real_time = diag_dci_get_cumulative_real_time(token);
+ diag_update_real_time_vote(DIAG_PROC_DCI, real_time, token);
+ }
+ queue_work(driver->diag_real_time_wq, &driver->diag_real_time_work);
+
+ return DIAG_DCI_NO_ERROR;
+}
+
+int diag_dci_write_proc(uint8_t peripheral, int pkt_type, char *buf, int len)
+{
+ uint8_t dest_channel = TYPE_DATA;
+ int err = 0;
+
+ if (!buf || peripheral >= NUM_PERIPHERALS || len < 0 ||
+ !(driver->feature[PERIPHERAL_MODEM].rcvd_feature_mask)) {
+ DIAG_LOG(DIAG_DEBUG_DCI,
+ "buf: 0x%pK, p: %d, len: %d, f_mask: %d\n",
+ buf, peripheral, len,
+ driver->feature[PERIPHERAL_MODEM].rcvd_feature_mask);
+ return -EINVAL;
+ }
+
+ if (pkt_type == DIAG_DATA_TYPE) {
+ dest_channel = TYPE_DCI_CMD;
+ } else if (pkt_type == DIAG_CNTL_TYPE) {
+ dest_channel = TYPE_CNTL;
+ } else {
+ pr_err("diag: Invalid DCI pkt type in %s", __func__);
+ return -EINVAL;
+ }
+
+ err = diagfwd_write(peripheral, dest_channel, buf, len);
+ if (err && err != -ENODEV) {
+ pr_err("diag: In %s, unable to write to peripheral: %d, type: %d, len: %d, err: %d\n",
+ __func__, peripheral, dest_channel, len, err);
+ } else {
+ err = DIAG_DCI_NO_ERROR;
+ }
+
+ return err;
+}
+
+int diag_dci_copy_health_stats(struct diag_dci_health_stats_proc *stats_proc)
+{
+ struct diag_dci_client_tbl *entry = NULL;
+ struct diag_dci_health_t *health = NULL;
+ struct diag_dci_health_stats *stats = NULL;
+ int i, proc;
+
+ if (!stats_proc)
+ return -EINVAL;
+
+ stats = &stats_proc->health;
+ proc = stats_proc->proc;
+ if (proc < ALL_PROC || proc > APPS_DATA)
+ return -EINVAL;
+
+ entry = diag_dci_get_client_entry(stats_proc->client_id);
+ if (!entry)
+ return DIAG_DCI_NOT_SUPPORTED;
+
+ /*
+ * If the client has registered for remote processor, the
+ * proc field doesn't have any effect as they have only one buffer.
+ */
+ if (entry->client_info.token)
+ proc = 0;
+
+ stats->stats.dropped_logs = 0;
+ stats->stats.dropped_events = 0;
+ stats->stats.received_logs = 0;
+ stats->stats.received_events = 0;
+
+ if (proc != ALL_PROC) {
+ health = &entry->buffers[proc].health;
+ stats->stats.dropped_logs = health->dropped_logs;
+ stats->stats.dropped_events = health->dropped_events;
+ stats->stats.received_logs = health->received_logs;
+ stats->stats.received_events = health->received_events;
+ if (stats->reset_status) {
+ mutex_lock(&entry->buffers[proc].health_mutex);
+ health->dropped_logs = 0;
+ health->dropped_events = 0;
+ health->received_logs = 0;
+ health->received_events = 0;
+ mutex_unlock(&entry->buffers[proc].health_mutex);
+ }
+ return DIAG_DCI_NO_ERROR;
+ }
+
+ for (i = 0; i < entry->num_buffers; i++) {
+ health = &entry->buffers[i].health;
+ stats->stats.dropped_logs += health->dropped_logs;
+ stats->stats.dropped_events += health->dropped_events;
+ stats->stats.received_logs += health->received_logs;
+ stats->stats.received_events += health->received_events;
+ if (stats->reset_status) {
+ mutex_lock(&entry->buffers[i].health_mutex);
+ health->dropped_logs = 0;
+ health->dropped_events = 0;
+ health->received_logs = 0;
+ health->received_events = 0;
+ mutex_unlock(&entry->buffers[i].health_mutex);
+ }
+ }
+ return DIAG_DCI_NO_ERROR;
+}
+
+int diag_dci_get_support_list(struct diag_dci_peripherals_t *support_list)
+{
+ if (!support_list)
+ return -ENOMEM;
+
+ if (!VALID_DCI_TOKEN(support_list->proc))
+ return -EIO;
+
+ support_list->list = dci_ops_tbl[support_list->proc].peripheral_status;
+ return DIAG_DCI_NO_ERROR;
+}
diff --git a/drivers/char/diag/diag_dci.h b/drivers/char/diag/diag_dci.h
new file mode 100644
index 000000000000..c2a8ac1e3854
--- /dev/null
+++ b/drivers/char/diag/diag_dci.h
@@ -0,0 +1,329 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef DIAG_DCI_H
+#define DIAG_DCI_H
+
+#define MAX_DCI_CLIENTS 10
+#define DCI_PKT_RSP_CODE 0x93
+#define DCI_DELAYED_RSP_CODE 0x94
+#define DCI_CONTROL_PKT_CODE 0x9A
+#define EXT_HDR_CMD_CODE 0x98
+#define LOG_CMD_CODE 0x10
+#define EVENT_CMD_CODE 0x60
+#define DCI_PKT_RSP_TYPE 0
+#define DCI_LOG_TYPE -1
+#define DCI_EVENT_TYPE -2
+#define DCI_EXT_HDR_TYPE -3
+#define SET_LOG_MASK 1
+#define DISABLE_LOG_MASK 0
+#define MAX_EVENT_SIZE 512
+#define DCI_CLIENT_INDEX_INVALID -1
+#define DCI_LOG_CON_MIN_LEN 14
+#define DCI_EVENT_CON_MIN_LEN 16
+
+#define EXT_HDR_LEN 8
+#define EXT_HDR_VERSION 1
+
+#define DCI_BUF_PRIMARY 1
+#define DCI_BUF_SECONDARY 2
+#define DCI_BUF_CMD 3
+
+#ifdef CONFIG_DEBUG_FS
+#define DIAG_DCI_DEBUG_CNT 100
+#define DIAG_DCI_DEBUG_LEN 100
+#endif
+
+/* 16 log code categories, each has:
+ * 1 bytes equip id + 1 dirty byte + 512 byte max log mask
+ */
+#define DCI_LOG_MASK_SIZE (16*514)
+#define DCI_EVENT_MASK_SIZE 512
+#define DCI_MASK_STREAM 2
+#define DCI_MAX_LOG_CODES 16
+#define DCI_MAX_ITEMS_PER_LOG_CODE 512
+
+#define DCI_LOG_MASK_CLEAN 0
+#define DCI_LOG_MASK_DIRTY 1
+
+#define MIN_DELAYED_RSP_LEN 12
+/*
+ * Maximum data size that peripherals send = 8.5K log +
+ * DCI header + footer (6 bytes)
+ */
+#define MAX_DCI_PACKET_SZ 8710
+
+extern unsigned int dci_max_reg;
+extern unsigned int dci_max_clients;
+
+#define DCI_LOCAL_PROC 0
+#define DCI_REMOTE_BASE 1
+#define DCI_MDM_PROC DCI_REMOTE_BASE
+#define DCI_REMOTE_LAST (DCI_REMOTE_BASE + 1)
+
+#ifndef CONFIG_DIAGFWD_BRIDGE_CODE
+#define NUM_DCI_PROC 1
+#else
+#define NUM_DCI_PROC DCI_REMOTE_LAST
+#endif
+
+#define DCI_REMOTE_DATA 0
+
+#define VALID_DCI_TOKEN(x) ((x >= 0 && x < NUM_DCI_PROC) ? 1 : 0)
+#define BRIDGE_TO_TOKEN(x) (x - DIAGFWD_MDM_DCI + DCI_REMOTE_BASE)
+#define TOKEN_TO_BRIDGE(x) (dci_ops_tbl[x].ctx)
+
+#define DCI_MAGIC (0xAABB1122)
+
+struct dci_pkt_req_t {
+ int uid;
+ int client_id;
+} __packed;
+
+struct dci_stream_req_t {
+ int type;
+ int client_id;
+ int set_flag;
+ int count;
+} __packed;
+
+struct dci_pkt_req_entry_t {
+ int client_id;
+ int uid;
+ int tag;
+ struct list_head track;
+} __packed;
+
+struct diag_dci_reg_tbl_t {
+ int client_id;
+ uint16_t notification_list;
+ int signal_type;
+ int token;
+} __packed;
+
+struct diag_dci_health_t {
+ int dropped_logs;
+ int dropped_events;
+ int received_logs;
+ int received_events;
+};
+
+struct diag_dci_partial_pkt_t {
+ unsigned char *data;
+ uint32_t total_len;
+ uint32_t read_len;
+ uint32_t remaining;
+ uint8_t processing;
+} __packed;
+
+struct diag_dci_buffer_t {
+ unsigned char *data;
+ unsigned int data_len;
+ struct mutex data_mutex;
+ uint8_t in_busy;
+ uint8_t buf_type;
+ int data_source;
+ int capacity;
+ uint8_t in_list;
+ struct list_head buf_track;
+};
+
+struct diag_dci_buf_peripheral_t {
+ struct diag_dci_buffer_t *buf_curr;
+ struct diag_dci_buffer_t *buf_primary;
+ struct diag_dci_buffer_t *buf_cmd;
+ struct diag_dci_health_t health;
+ struct mutex health_mutex;
+ struct mutex buf_mutex;
+};
+
+struct diag_dci_client_tbl {
+ int tgid;
+ struct diag_dci_reg_tbl_t client_info;
+ struct task_struct *client;
+ unsigned char *dci_log_mask;
+ unsigned char *dci_event_mask;
+ uint8_t real_time;
+ struct list_head track;
+ struct diag_dci_buf_peripheral_t *buffers;
+ uint8_t num_buffers;
+ uint8_t in_service;
+ struct list_head list_write_buf;
+ struct mutex write_buf_mutex;
+};
+
+struct diag_dci_health_stats {
+ struct diag_dci_health_t stats;
+ int reset_status;
+};
+
+struct diag_dci_health_stats_proc {
+ int client_id;
+ struct diag_dci_health_stats health;
+ int proc;
+} __packed;
+
+struct diag_dci_peripherals_t {
+ int proc;
+ uint16_t list;
+} __packed;
+
+/* This is used for querying DCI Log
+ or Event Mask */
+struct diag_log_event_stats {
+ int client_id;
+ uint16_t code;
+ int is_set;
+} __packed;
+
+struct diag_dci_pkt_rsp_header_t {
+ int type;
+ int length;
+ uint8_t delete_flag;
+ int uid;
+} __packed;
+
+struct diag_dci_pkt_header_t {
+ uint8_t start;
+ uint8_t version;
+ uint16_t len;
+ uint8_t pkt_code;
+ int tag;
+} __packed;
+
+struct diag_dci_header_t {
+ uint8_t start;
+ uint8_t version;
+ uint16_t length;
+ uint8_t cmd_code;
+} __packed;
+
+struct dci_ops_tbl_t {
+ int ctx;
+ int mempool;
+ unsigned char log_mask_composite[DCI_LOG_MASK_SIZE];
+ unsigned char event_mask_composite[DCI_EVENT_MASK_SIZE];
+ int (*send_log_mask)(int token);
+ int (*send_event_mask)(int token);
+ uint16_t peripheral_status;
+} __packed;
+
+struct dci_channel_status_t {
+ int id;
+ int open;
+ int retry_count;
+ struct timer_list wait_time;
+ struct work_struct handshake_work;
+} __packed;
+
+extern struct dci_ops_tbl_t dci_ops_tbl[NUM_DCI_PROC];
+
+enum {
+ DIAG_DCI_NO_ERROR = 1001, /* No error */
+ DIAG_DCI_NO_REG, /* Could not register */
+ DIAG_DCI_NO_MEM, /* Failed memory allocation */
+ DIAG_DCI_NOT_SUPPORTED, /* This particular client is not supported */
+ DIAG_DCI_HUGE_PACKET, /* Request/Response Packet too huge */
+ DIAG_DCI_SEND_DATA_FAIL,/* writing to kernel or peripheral fails */
+ DIAG_DCI_TABLE_ERR /* Error dealing with registration tables */
+};
+
+#define DCI_HDR_SIZE \
+ ((sizeof(struct diag_dci_pkt_header_t) > \
+ sizeof(struct diag_dci_header_t)) ? \
+ (sizeof(struct diag_dci_pkt_header_t) + 1) : \
+ (sizeof(struct diag_dci_header_t) + 1)) \
+
+#define DCI_BUF_SIZE (uint32_t)(DIAG_MAX_REQ_SIZE + DCI_HDR_SIZE)
+
+#define DCI_REQ_HDR_SIZE \
+ ((sizeof(struct dci_pkt_req_t) > \
+ sizeof(struct dci_stream_req_t)) ? \
+ (sizeof(struct dci_pkt_req_t)) : \
+ (sizeof(struct dci_stream_req_t))) \
+
+#define DCI_REQ_BUF_SIZE (uint32_t)(DIAG_MAX_REQ_SIZE + DCI_REQ_HDR_SIZE)
+
+#ifdef CONFIG_DEBUG_FS
+/* To collect debug information during each smd read */
+struct diag_dci_data_info {
+ unsigned long iteration;
+ int data_size;
+ char time_stamp[DIAG_TS_SIZE];
+ uint8_t peripheral;
+ uint8_t ch_type;
+ uint8_t proc;
+};
+
+extern struct diag_dci_data_info *dci_traffic;
+extern struct mutex dci_stat_mutex;
+#endif
+
+int diag_dci_init(void);
+void diag_dci_channel_init(void);
+void diag_dci_exit(void);
+int diag_dci_register_client(struct diag_dci_reg_tbl_t *reg_entry);
+int diag_dci_deinit_client(struct diag_dci_client_tbl *entry);
+void diag_dci_channel_open_work(struct work_struct *);
+void diag_dci_notify_client(int peripheral_mask, int data, int proc);
+void diag_dci_wakeup_clients(void);
+void diag_process_apps_dci_read_data(int data_type, void *buf, int recd_bytes);
+void diag_dci_process_peripheral_data(struct diagfwd_info *p_info, void *buf,
+ int recd_bytes);
+int diag_process_dci_transaction(unsigned char *buf, int len);
+void extract_dci_pkt_rsp(unsigned char *buf, int len, int data_source,
+ int token);
+void extract_dci_ctrl_pkt(unsigned char *buf, int len, int token);
+struct diag_dci_client_tbl *diag_dci_get_client_entry(int client_id);
+struct diag_dci_client_tbl *dci_lookup_client_entry_pid(int tgid);
+void diag_process_remote_dci_read_data(int index, void *buf, int recd_bytes);
+int diag_dci_get_support_list(struct diag_dci_peripherals_t *support_list);
+/* DCI Log streaming functions */
+void update_dci_cumulative_log_mask(int offset, unsigned int byte_index,
+ uint8_t byte_mask, int token);
+void diag_dci_invalidate_cumulative_log_mask(int token);
+int diag_send_dci_log_mask(int token);
+void extract_dci_log(unsigned char *buf, int len, int data_source, int token,
+ void *ext_hdr);
+int diag_dci_clear_log_mask(int client_id);
+int diag_dci_query_log_mask(struct diag_dci_client_tbl *entry,
+ uint16_t log_code);
+/* DCI event streaming functions */
+void update_dci_cumulative_event_mask(int offset, uint8_t byte_mask, int token);
+void diag_dci_invalidate_cumulative_event_mask(int token);
+int diag_send_dci_event_mask(int token);
+void extract_dci_events(unsigned char *buf, int len, int data_source,
+ int token, void *ext_hdr);
+/* DCI extended header handling functions */
+void extract_dci_ext_pkt(unsigned char *buf, int len, int data_source,
+ int token);
+int diag_dci_clear_event_mask(int client_id);
+int diag_dci_query_event_mask(struct diag_dci_client_tbl *entry,
+ uint16_t event_id);
+void diag_dci_record_traffic(int read_bytes, uint8_t ch_type,
+ uint8_t peripheral, uint8_t proc);
+uint8_t diag_dci_get_cumulative_real_time(int token);
+int diag_dci_set_real_time(struct diag_dci_client_tbl *entry,
+ uint8_t real_time);
+int diag_dci_copy_health_stats(struct diag_dci_health_stats_proc *stats_proc);
+int diag_dci_write_proc(uint8_t peripheral, int pkt_type, char *buf, int len);
+void dci_drain_data(unsigned long data);
+
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+int diag_send_dci_log_mask_remote(int token);
+int diag_send_dci_event_mask_remote(int token);
+unsigned char *dci_get_buffer_from_bridge(int token);
+int diag_dci_write_bridge(int token, unsigned char *buf, int len);
+int diag_dci_write_done_bridge(int index, unsigned char *buf, int len);
+int diag_dci_send_handshake_pkt(int index);
+#endif
+
+#endif
diff --git a/drivers/char/diag/diag_debugfs.c b/drivers/char/diag/diag_debugfs.c
new file mode 100644
index 000000000000..3d916e790814
--- /dev/null
+++ b/drivers/char/diag/diag_debugfs.c
@@ -0,0 +1,1216 @@
+/* Copyright (c) 2011-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifdef CONFIG_DEBUG_FS
+
+#include <linux/slab.h>
+#include <linux/debugfs.h>
+#include <linux/atomic.h>
+#include <linux/uaccess.h>
+#include "diagchar.h"
+#include "diagfwd.h"
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+#include "diagfwd_bridge.h"
+#endif
+#ifdef CONFIG_USB_QCOM_DIAG_BRIDGE
+#include "diagfwd_hsic.h"
+#endif
+#ifdef CONFIG_MSM_MHI
+#include "diagfwd_mhi.h"
+#endif
+#include "diagmem.h"
+#include "diag_dci.h"
+#include "diag_usb.h"
+#include "diagfwd_peripheral.h"
+#include "diagfwd_smd.h"
+#include "diagfwd_socket.h"
+#include "diagfwd_glink.h"
+#include "diag_debugfs.h"
+#include "diag_ipc_logging.h"
+
+#define DEBUG_BUF_SIZE 4096
+static struct dentry *diag_dbgfs_dent;
+static int diag_dbgfs_table_index;
+static int diag_dbgfs_mempool_index;
+static int diag_dbgfs_usbinfo_index;
+static int diag_dbgfs_smdinfo_index;
+static int diag_dbgfs_socketinfo_index;
+static int diag_dbgfs_glinkinfo_index;
+static int diag_dbgfs_hsicinfo_index;
+static int diag_dbgfs_mhiinfo_index;
+static int diag_dbgfs_bridgeinfo_index;
+static int diag_dbgfs_finished;
+static int diag_dbgfs_dci_data_index;
+static int diag_dbgfs_dci_finished;
+static struct mutex diag_dci_dbgfs_mutex;
+static ssize_t diag_dbgfs_read_status(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ char *buf;
+ int ret, i;
+ unsigned int buf_size;
+ buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL);
+ if (!buf) {
+ pr_err("diag: %s, Error allocating memory\n", __func__);
+ return -ENOMEM;
+ }
+ buf_size = ksize(buf);
+ ret = scnprintf(buf, buf_size,
+ "CPU Tools ID: %d\n"
+ "Check Polling Response: %d\n"
+ "Polling Registered: %d\n"
+ "Uses Device Tree: %d\n"
+ "Apps Supports Separate CMDRSP: %d\n"
+ "Apps Supports HDLC Encoding: %d\n"
+ "Apps Supports Header Untagging: %d\n"
+ "Apps Supports Sockets: %d\n"
+ "Logging Mode: %d\n"
+ "RSP Buffer is Busy: %d\n"
+ "HDLC Disabled: %d\n"
+ "Time Sync Enabled: %d\n"
+ "MD session mode: %d\n"
+ "MD session mask: %d\n"
+ "Uses Time API: %d\n"
+ "Supports PD buffering: %d\n",
+ chk_config_get_id(),
+ chk_polling_response(),
+ driver->polling_reg_flag,
+ driver->use_device_tree,
+ driver->supports_separate_cmdrsp,
+ driver->supports_apps_hdlc_encoding,
+ driver->supports_apps_header_untagging,
+ driver->supports_sockets,
+ driver->logging_mode,
+ driver->rsp_buf_busy,
+ driver->hdlc_disabled,
+ driver->time_sync_enabled,
+ driver->md_session_mode,
+ driver->md_session_mask,
+ driver->uses_time_api,
+ driver->supports_pd_buffering);
+
+ for (i = 0; i < NUM_PERIPHERALS; i++) {
+ ret += scnprintf(buf+ret, buf_size-ret,
+ "p: %s Feature: %02x %02x |%c%c%c%c%c%c%c%c%c%c|\n",
+ PERIPHERAL_STRING(i),
+ driver->feature[i].feature_mask[0],
+ driver->feature[i].feature_mask[1],
+ driver->feature[i].rcvd_feature_mask ? 'F':'f',
+ driver->feature[i].peripheral_buffering ? 'B':'b',
+ driver->feature[i].separate_cmd_rsp ? 'C':'c',
+ driver->feature[i].encode_hdlc ? 'H':'h',
+ driver->feature[i].mask_centralization ? 'M':'m',
+ driver->feature[i].pd_buffering ? 'P':'p',
+ driver->feature[i].stm_support ? 'Q':'q',
+ driver->feature[i].sockets_enabled ? 'S':'s',
+ driver->feature[i].sent_feature_mask ? 'T':'t',
+ driver->feature[i].untag_header ? 'U':'u');
+ }
+
+#ifdef CONFIG_DIAG_OVER_USB
+ ret += scnprintf(buf+ret, buf_size-ret,
+ "USB Connected: %d\n",
+ driver->usb_connected);
+#endif
+
+ for (i = 0; i < DIAG_NUM_PROC; i++) {
+ ret += scnprintf(buf+ret, buf_size-ret,
+ "Real Time Mode: %d: %d\n", i,
+ driver->real_time_mode[i]);
+ }
+
+ ret = simple_read_from_buffer(ubuf, count, ppos, buf, ret);
+
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t diag_dbgfs_read_dcistats(struct file *file,
+ char __user *ubuf, size_t count, loff_t *ppos)
+{
+ char *buf = NULL;
+ unsigned int bytes_remaining, bytes_written = 0;
+ unsigned int bytes_in_buf = 0, i = 0;
+ struct diag_dci_data_info *temp_data = dci_traffic;
+ unsigned int buf_size;
+ buf_size = (DEBUG_BUF_SIZE < count) ? DEBUG_BUF_SIZE : count;
+
+ if (diag_dbgfs_dci_finished) {
+ diag_dbgfs_dci_finished = 0;
+ return 0;
+ }
+
+ buf = kzalloc(sizeof(char) * buf_size, GFP_KERNEL);
+ if (ZERO_OR_NULL_PTR(buf)) {
+ pr_err("diag: %s, Error allocating memory\n", __func__);
+ return -ENOMEM;
+ }
+
+ buf_size = ksize(buf);
+ bytes_remaining = buf_size;
+
+ mutex_lock(&diag_dci_dbgfs_mutex);
+ if (diag_dbgfs_dci_data_index == 0) {
+ bytes_written =
+ scnprintf(buf, buf_size,
+ "number of clients: %d\n"
+ "dci proc active: %d\n"
+ "dci real time vote: %d\n",
+ driver->num_dci_client,
+ (driver->proc_active_mask & DIAG_PROC_DCI) ? 1 : 0,
+ (driver->proc_rt_vote_mask[DIAG_LOCAL_PROC] &
+ DIAG_PROC_DCI) ? 1 : 0);
+ bytes_in_buf += bytes_written;
+ bytes_remaining -= bytes_written;
+#ifdef CONFIG_DIAG_OVER_USB
+ bytes_written = scnprintf(buf+bytes_in_buf, bytes_remaining,
+ "usb_connected: %d\n",
+ driver->usb_connected);
+ bytes_in_buf += bytes_written;
+ bytes_remaining -= bytes_written;
+#endif
+ bytes_written = scnprintf(buf+bytes_in_buf,
+ bytes_remaining,
+ "dci power: active, relax: %lu, %lu\n",
+ driver->diag_dev->power.wakeup->
+ active_count,
+ driver->diag_dev->
+ power.wakeup->relax_count);
+ bytes_in_buf += bytes_written;
+ bytes_remaining -= bytes_written;
+
+ }
+ temp_data += diag_dbgfs_dci_data_index;
+ for (i = diag_dbgfs_dci_data_index; i < DIAG_DCI_DEBUG_CNT; i++) {
+ if (temp_data->iteration != 0) {
+ bytes_written = scnprintf(
+ buf + bytes_in_buf, bytes_remaining,
+ "i %-5ld\t"
+ "s %-5d\t"
+ "p %-5d\t"
+ "r %-5d\t"
+ "c %-5d\t"
+ "t %-15s\n",
+ temp_data->iteration,
+ temp_data->data_size,
+ temp_data->peripheral,
+ temp_data->proc,
+ temp_data->ch_type,
+ temp_data->time_stamp);
+ bytes_in_buf += bytes_written;
+ bytes_remaining -= bytes_written;
+ /* Check if there is room for another entry */
+ if (bytes_remaining < bytes_written)
+ break;
+ }
+ temp_data++;
+ }
+ diag_dbgfs_dci_data_index = (i >= DIAG_DCI_DEBUG_CNT) ? 0 : i + 1;
+ mutex_unlock(&diag_dci_dbgfs_mutex);
+ bytes_written = simple_read_from_buffer(ubuf, count, ppos, buf,
+ bytes_in_buf);
+ kfree(buf);
+ diag_dbgfs_dci_finished = 1;
+ return bytes_written;
+}
+
+static ssize_t diag_dbgfs_read_power(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ char *buf;
+ int ret;
+ unsigned int buf_size;
+
+ buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL);
+ if (!buf) {
+ pr_err("diag: %s, Error allocating memory\n", __func__);
+ return -ENOMEM;
+ }
+
+ buf_size = ksize(buf);
+ ret = scnprintf(buf, buf_size,
+ "DCI reference count: %d\n"
+ "DCI copy count: %d\n"
+ "DCI Client Count: %d\n\n"
+ "Memory Device reference count: %d\n"
+ "Memory Device copy count: %d\n"
+ "Logging mode: %d\n\n"
+ "Wakeup source active count: %lu\n"
+ "Wakeup source relax count: %lu\n\n",
+ driver->dci_ws.ref_count,
+ driver->dci_ws.copy_count,
+ driver->num_dci_client,
+ driver->md_ws.ref_count,
+ driver->md_ws.copy_count,
+ driver->logging_mode,
+ driver->diag_dev->power.wakeup->active_count,
+ driver->diag_dev->power.wakeup->relax_count);
+
+ ret = simple_read_from_buffer(ubuf, count, ppos, buf, ret);
+
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t diag_dbgfs_read_table(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ char *buf;
+ int ret = 0;
+ int i = 0;
+ int is_polling = 0;
+ unsigned int bytes_remaining;
+ unsigned int bytes_in_buffer = 0;
+ unsigned int bytes_written;
+ unsigned int buf_size;
+ struct list_head *start;
+ struct list_head *temp;
+ struct diag_cmd_reg_t *item = NULL;
+
+ mutex_lock(&driver->cmd_reg_mutex);
+ if (diag_dbgfs_table_index == driver->cmd_reg_count) {
+ diag_dbgfs_table_index = 0;
+ mutex_unlock(&driver->cmd_reg_mutex);
+ return 0;
+ }
+
+ buf_size = (DEBUG_BUF_SIZE < count) ? DEBUG_BUF_SIZE : count;
+
+ buf = kzalloc(sizeof(char) * buf_size, GFP_KERNEL);
+ if (ZERO_OR_NULL_PTR(buf)) {
+ pr_err("diag: %s, Error allocating memory\n", __func__);
+ mutex_unlock(&driver->cmd_reg_mutex);
+ return -ENOMEM;
+ }
+ buf_size = ksize(buf);
+ bytes_remaining = buf_size;
+
+ if (diag_dbgfs_table_index == 0) {
+ bytes_written = scnprintf(buf+bytes_in_buffer, bytes_remaining,
+ "Client ids: Modem: %d, LPASS: %d, WCNSS: %d, SLPI: %d, APPS: %d\n",
+ PERIPHERAL_MODEM, PERIPHERAL_LPASS,
+ PERIPHERAL_WCNSS, PERIPHERAL_SENSORS,
+ APPS_DATA);
+ bytes_in_buffer += bytes_written;
+ bytes_remaining -= bytes_written;
+ }
+
+ list_for_each_safe(start, temp, &driver->cmd_reg_list) {
+ item = list_entry(start, struct diag_cmd_reg_t, link);
+ if (i < diag_dbgfs_table_index) {
+ i++;
+ continue;
+ }
+
+ is_polling = diag_cmd_chk_polling(&item->entry);
+ bytes_written = scnprintf(buf+bytes_in_buffer, bytes_remaining,
+ "i: %3d, cmd_code: %4x, subsys_id: %4x, cmd_code_lo: %4x, cmd_code_hi: %4x, proc: %d, process_id: %5d %s\n",
+ i++,
+ item->entry.cmd_code,
+ item->entry.subsys_id,
+ item->entry.cmd_code_lo,
+ item->entry.cmd_code_hi,
+ item->proc,
+ item->pid,
+ (is_polling == DIAG_CMD_POLLING) ?
+ "<-- Polling Cmd" : "");
+
+ bytes_in_buffer += bytes_written;
+
+ /* Check if there is room to add another table entry */
+ bytes_remaining = buf_size - bytes_in_buffer;
+
+ if (bytes_remaining < bytes_written)
+ break;
+ }
+ diag_dbgfs_table_index = i;
+ mutex_unlock(&driver->cmd_reg_mutex);
+
+ *ppos = 0;
+ ret = simple_read_from_buffer(ubuf, count, ppos, buf, bytes_in_buffer);
+
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t diag_dbgfs_read_mempool(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ char *buf = NULL;
+ int ret = 0;
+ int i = 0;
+ unsigned int buf_size;
+ unsigned int bytes_remaining = 0;
+ unsigned int bytes_written = 0;
+ unsigned int bytes_in_buffer = 0;
+ struct diag_mempool_t *mempool = NULL;
+
+ if (diag_dbgfs_mempool_index >= NUM_MEMORY_POOLS) {
+ /* Done. Reset to prepare for future requests */
+ diag_dbgfs_mempool_index = 0;
+ return 0;
+ }
+
+ buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL);
+ if (ZERO_OR_NULL_PTR(buf)) {
+ pr_err("diag: %s, Error allocating memory\n", __func__);
+ return -ENOMEM;
+ }
+
+ buf_size = ksize(buf);
+ bytes_remaining = buf_size;
+ bytes_written = scnprintf(buf+bytes_in_buffer, bytes_remaining,
+ "%-24s\t"
+ "%-10s\t"
+ "%-5s\t"
+ "%-5s\t"
+ "%-5s\n",
+ "POOL", "HANDLE", "COUNT", "SIZE", "ITEMSIZE");
+ bytes_in_buffer += bytes_written;
+ bytes_remaining = buf_size - bytes_in_buffer;
+
+ for (i = diag_dbgfs_mempool_index; i < NUM_MEMORY_POOLS; i++) {
+ mempool = &diag_mempools[i];
+ bytes_written = scnprintf(buf+bytes_in_buffer, bytes_remaining,
+ "%-24s\t"
+ "%-10p\t"
+ "%-5d\t"
+ "%-5d\t"
+ "%-5d\n",
+ mempool->name,
+ mempool->pool,
+ mempool->count,
+ mempool->poolsize,
+ mempool->itemsize);
+ bytes_in_buffer += bytes_written;
+
+ /* Check if there is room to add another table entry */
+ bytes_remaining = buf_size - bytes_in_buffer;
+
+ if (bytes_remaining < bytes_written)
+ break;
+ }
+ diag_dbgfs_mempool_index = i+1;
+ *ppos = 0;
+ ret = simple_read_from_buffer(ubuf, count, ppos, buf, bytes_in_buffer);
+
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t diag_dbgfs_read_usbinfo(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ char *buf = NULL;
+ int ret = 0;
+ int i = 0;
+ unsigned int buf_size;
+ unsigned int bytes_remaining = 0;
+ unsigned int bytes_written = 0;
+ unsigned int bytes_in_buffer = 0;
+ struct diag_usb_info *usb_info = NULL;
+
+ if (diag_dbgfs_usbinfo_index >= NUM_DIAG_USB_DEV) {
+ /* Done. Reset to prepare for future requests */
+ diag_dbgfs_usbinfo_index = 0;
+ return 0;
+ }
+
+ buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL);
+ if (ZERO_OR_NULL_PTR(buf)) {
+ pr_err("diag: %s, Error allocating memory\n", __func__);
+ return -ENOMEM;
+ }
+
+ buf_size = ksize(buf);
+ bytes_remaining = buf_size;
+ for (i = diag_dbgfs_usbinfo_index; i < NUM_DIAG_USB_DEV; i++) {
+ usb_info = &diag_usb[i];
+ if (!usb_info->enabled)
+ continue;
+ bytes_written = scnprintf(buf+bytes_in_buffer, bytes_remaining,
+ "id: %d\n"
+ "name: %s\n"
+ "hdl: %pK\n"
+ "connected: %d\n"
+ "diag state: %d\n"
+ "enabled: %d\n"
+ "mempool: %s\n"
+ "read pending: %d\n"
+ "read count: %lu\n"
+ "write count: %lu\n"
+ "read work pending: %d\n"
+ "read done work pending: %d\n"
+ "connect work pending: %d\n"
+ "disconnect work pending: %d\n"
+ "max size supported: %d\n\n",
+ usb_info->id,
+ usb_info->name,
+ usb_info->hdl,
+ atomic_read(&usb_info->connected),
+ atomic_read(&usb_info->diag_state),
+ usb_info->enabled,
+ DIAG_MEMPOOL_GET_NAME(usb_info->mempool),
+ atomic_read(&usb_info->read_pending),
+ usb_info->read_cnt,
+ usb_info->write_cnt,
+ work_pending(&usb_info->read_work),
+ work_pending(&usb_info->read_done_work),
+ work_pending(&usb_info->connect_work),
+ work_pending(&usb_info->disconnect_work),
+ usb_info->max_size);
+ bytes_in_buffer += bytes_written;
+
+ /* Check if there is room to add another table entry */
+ bytes_remaining = buf_size - bytes_in_buffer;
+
+ if (bytes_remaining < bytes_written)
+ break;
+ }
+ diag_dbgfs_usbinfo_index = i+1;
+ *ppos = 0;
+ ret = simple_read_from_buffer(ubuf, count, ppos, buf, bytes_in_buffer);
+
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t diag_dbgfs_read_smdinfo(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ char *buf = NULL;
+ int ret = 0;
+ int i = 0;
+ int j = 0;
+ unsigned int buf_size;
+ unsigned int bytes_remaining = 0;
+ unsigned int bytes_written = 0;
+ unsigned int bytes_in_buffer = 0;
+ struct diag_smd_info *smd_info = NULL;
+ struct diagfwd_info *fwd_ctxt = NULL;
+
+ if (diag_dbgfs_smdinfo_index >= NUM_PERIPHERALS) {
+ /* Done. Reset to prepare for future requests */
+ diag_dbgfs_smdinfo_index = 0;
+ return 0;
+ }
+
+ buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL);
+ if (ZERO_OR_NULL_PTR(buf)) {
+ pr_err("diag: %s, Error allocating memory\n", __func__);
+ return -ENOMEM;
+ }
+
+ buf_size = ksize(buf);
+ bytes_remaining = buf_size;
+ for (i = 0; i < NUM_TYPES; i++) {
+ for (j = 0; j < NUM_PERIPHERALS; j++) {
+ switch (i) {
+ case TYPE_DATA:
+ smd_info = &smd_data[j];
+ break;
+ case TYPE_CNTL:
+ smd_info = &smd_cntl[j];
+ break;
+ case TYPE_DCI:
+ smd_info = &smd_dci[j];
+ break;
+ case TYPE_CMD:
+ smd_info = &smd_cmd[j];
+ break;
+ case TYPE_DCI_CMD:
+ smd_info = &smd_dci_cmd[j];
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ fwd_ctxt = (struct diagfwd_info *)(smd_info->fwd_ctxt);
+
+ bytes_written = scnprintf(buf+bytes_in_buffer,
+ bytes_remaining,
+ "name\t\t:\t%s\n"
+ "hdl\t\t:\t%pK\n"
+ "inited\t\t:\t%d\n"
+ "opened\t\t:\t%d\n"
+ "diag_state\t:\t%d\n"
+ "fifo size\t:\t%d\n"
+ "open pending\t:\t%d\n"
+ "close pending\t:\t%d\n"
+ "read pending\t:\t%d\n"
+ "buf_1 busy\t:\t%d\n"
+ "buf_2 busy\t:\t%d\n"
+ "bytes read\t:\t%lu\n"
+ "bytes written\t:\t%lu\n"
+ "fwd inited\t:\t%d\n"
+ "fwd opened\t:\t%d\n"
+ "fwd ch_open\t:\t%d\n\n",
+ smd_info->name,
+ smd_info->hdl,
+ smd_info->inited,
+ atomic_read(&smd_info->opened),
+ atomic_read(&smd_info->diag_state),
+ smd_info->fifo_size,
+ work_pending(&smd_info->open_work),
+ work_pending(&smd_info->close_work),
+ work_pending(&smd_info->read_work),
+ (fwd_ctxt && fwd_ctxt->buf_1) ?
+ atomic_read(&fwd_ctxt->buf_1->in_busy) : -1,
+ (fwd_ctxt && fwd_ctxt->buf_2) ?
+ atomic_read(&fwd_ctxt->buf_2->in_busy) : -1,
+ (fwd_ctxt) ? fwd_ctxt->read_bytes : 0,
+ (fwd_ctxt) ? fwd_ctxt->write_bytes : 0,
+ (fwd_ctxt) ? fwd_ctxt->inited : -1,
+ (fwd_ctxt) ?
+ atomic_read(&fwd_ctxt->opened) : -1,
+ (fwd_ctxt) ? fwd_ctxt->ch_open : -1);
+ bytes_in_buffer += bytes_written;
+
+ /* Check if there is room to add another table entry */
+ bytes_remaining = buf_size - bytes_in_buffer;
+
+ if (bytes_remaining < bytes_written)
+ break;
+ }
+ }
+ diag_dbgfs_smdinfo_index = i+1;
+ *ppos = 0;
+ ret = simple_read_from_buffer(ubuf, count, ppos, buf, bytes_in_buffer);
+
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t diag_dbgfs_read_socketinfo(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ char *buf = NULL;
+ int ret = 0;
+ int i = 0;
+ int j = 0;
+ unsigned int buf_size;
+ unsigned int bytes_remaining = 0;
+ unsigned int bytes_written = 0;
+ unsigned int bytes_in_buffer = 0;
+ struct diag_socket_info *info = NULL;
+ struct diagfwd_info *fwd_ctxt = NULL;
+
+ if (diag_dbgfs_socketinfo_index >= NUM_PERIPHERALS) {
+ /* Done. Reset to prepare for future requests */
+ diag_dbgfs_socketinfo_index = 0;
+ return 0;
+ }
+
+ buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL);
+ if (ZERO_OR_NULL_PTR(buf)) {
+ pr_err("diag: %s, Error allocating memory\n", __func__);
+ return -ENOMEM;
+ }
+
+ buf_size = ksize(buf);
+ bytes_remaining = buf_size;
+ for (i = 0; i < NUM_TYPES; i++) {
+ for (j = 0; j < NUM_PERIPHERALS; j++) {
+ switch (i) {
+ case TYPE_DATA:
+ info = &socket_data[j];
+ break;
+ case TYPE_CNTL:
+ info = &socket_cntl[j];
+ break;
+ case TYPE_DCI:
+ info = &socket_dci[j];
+ break;
+ case TYPE_CMD:
+ info = &socket_cmd[j];
+ break;
+ case TYPE_DCI_CMD:
+ info = &socket_dci_cmd[j];
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ fwd_ctxt = (struct diagfwd_info *)(info->fwd_ctxt);
+
+ bytes_written = scnprintf(buf+bytes_in_buffer,
+ bytes_remaining,
+ "name\t\t:\t%s\n"
+ "hdl\t\t:\t%pK\n"
+ "inited\t\t:\t%d\n"
+ "opened\t\t:\t%d\n"
+ "diag_state\t:\t%d\n"
+ "buf_1 busy\t:\t%d\n"
+ "buf_2 busy\t:\t%d\n"
+ "flow ctrl count\t:\t%d\n"
+ "data_ready\t:\t%d\n"
+ "init pending\t:\t%d\n"
+ "read pending\t:\t%d\n"
+ "bytes read\t:\t%lu\n"
+ "bytes written\t:\t%lu\n"
+ "fwd inited\t:\t%d\n"
+ "fwd opened\t:\t%d\n"
+ "fwd ch_open\t:\t%d\n\n",
+ info->name,
+ info->hdl,
+ info->inited,
+ atomic_read(&info->opened),
+ atomic_read(&info->diag_state),
+ (fwd_ctxt && fwd_ctxt->buf_1) ?
+ atomic_read(&fwd_ctxt->buf_1->in_busy) : -1,
+ (fwd_ctxt && fwd_ctxt->buf_2) ?
+ atomic_read(&fwd_ctxt->buf_2->in_busy) : -1,
+ atomic_read(&info->flow_cnt),
+ info->data_ready,
+ work_pending(&info->init_work),
+ work_pending(&info->read_work),
+ (fwd_ctxt) ? fwd_ctxt->read_bytes : 0,
+ (fwd_ctxt) ? fwd_ctxt->write_bytes : 0,
+ (fwd_ctxt) ? fwd_ctxt->inited : -1,
+ (fwd_ctxt) ?
+ atomic_read(&fwd_ctxt->opened) : -1,
+ (fwd_ctxt) ? fwd_ctxt->ch_open : -1);
+ bytes_in_buffer += bytes_written;
+
+ /* Check if there is room to add another table entry */
+ bytes_remaining = buf_size - bytes_in_buffer;
+
+ if (bytes_remaining < bytes_written)
+ break;
+ }
+ }
+ diag_dbgfs_socketinfo_index = i+1;
+ *ppos = 0;
+ ret = simple_read_from_buffer(ubuf, count, ppos, buf, bytes_in_buffer);
+
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t diag_dbgfs_read_glinkinfo(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ char *buf = NULL;
+ int ret = 0;
+ int i = 0;
+ int j = 0;
+ unsigned int buf_size;
+ unsigned int bytes_remaining = 0;
+ unsigned int bytes_written = 0;
+ unsigned int bytes_in_buffer = 0;
+ struct diag_glink_info *info = NULL;
+ struct diagfwd_info *fwd_ctxt = NULL;
+
+ if (diag_dbgfs_glinkinfo_index >= NUM_PERIPHERALS) {
+ /* Done. Reset to prepare for future requests */
+ diag_dbgfs_socketinfo_index = 0;
+ return 0;
+ }
+
+ buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ buf_size = ksize(buf);
+ bytes_remaining = buf_size;
+ for (i = 0; i < NUM_TYPES; i++) {
+ for (j = 0; j < NUM_PERIPHERALS; j++) {
+ switch (i) {
+ case TYPE_DATA:
+ info = &glink_data[j];
+ break;
+ case TYPE_CNTL:
+ info = &glink_cntl[j];
+ break;
+ case TYPE_DCI:
+ info = &glink_dci[j];
+ break;
+ case TYPE_CMD:
+ info = &glink_cmd[j];
+ break;
+ case TYPE_DCI_CMD:
+ info = &glink_dci_cmd[j];
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ fwd_ctxt = (struct diagfwd_info *)(info->fwd_ctxt);
+
+ bytes_written = scnprintf(buf+bytes_in_buffer,
+ bytes_remaining,
+ "name\t\t:\t%s\n"
+ "hdl\t\t:\t%pK\n"
+ "inited\t\t:\t%d\n"
+ "opened\t\t:\t%d\n"
+ "diag_state\t:\t%d\n"
+ "buf_1 busy\t:\t%d\n"
+ "buf_2 busy\t:\t%d\n"
+ "tx_intent_ready\t:\t%d\n"
+ "open pending\t:\t%d\n"
+ "close pending\t:\t%d\n"
+ "read pending\t:\t%d\n"
+ "bytes read\t:\t%lu\n"
+ "bytes written\t:\t%lu\n"
+ "fwd inited\t:\t%d\n"
+ "fwd opened\t:\t%d\n"
+ "fwd ch_open\t:\t%d\n\n",
+ info->name,
+ info->hdl,
+ info->inited,
+ atomic_read(&info->opened),
+ atomic_read(&info->diag_state),
+ (fwd_ctxt && fwd_ctxt->buf_1) ?
+ atomic_read(&fwd_ctxt->buf_1->in_busy) : -1,
+ (fwd_ctxt && fwd_ctxt->buf_2) ?
+ atomic_read(&fwd_ctxt->buf_2->in_busy) : -1,
+ atomic_read(&info->tx_intent_ready),
+ work_pending(&info->open_work),
+ work_pending(&info->close_work),
+ work_pending(&info->read_work),
+ (fwd_ctxt) ? fwd_ctxt->read_bytes : 0,
+ (fwd_ctxt) ? fwd_ctxt->write_bytes : 0,
+ (fwd_ctxt) ? fwd_ctxt->inited : -1,
+ (fwd_ctxt) ?
+ atomic_read(&fwd_ctxt->opened) : -1,
+ (fwd_ctxt) ? fwd_ctxt->ch_open : -1);
+ bytes_in_buffer += bytes_written;
+
+ /* Check if there is room to add another table entry */
+ bytes_remaining = buf_size - bytes_in_buffer;
+
+ if (bytes_remaining < bytes_written)
+ break;
+ }
+ }
+ diag_dbgfs_glinkinfo_index = i+1;
+ *ppos = 0;
+ ret = simple_read_from_buffer(ubuf, count, ppos, buf, bytes_in_buffer);
+
+ kfree(buf);
+ return ret;
+}
+
+#ifdef CONFIG_IPC_LOGGING
+static ssize_t diag_dbgfs_write_debug(struct file *fp, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ const int size = 10;
+ unsigned char cmd[size];
+ long value = 0;
+ int len = 0;
+
+ if (count < 1)
+ return -EINVAL;
+
+ len = (count < (size - 1)) ? count : size - 1;
+ if (copy_from_user(cmd, buf, len))
+ return -EFAULT;
+
+ cmd[len] = 0;
+ if (cmd[len-1] == '\n') {
+ cmd[len-1] = 0;
+ len--;
+ }
+
+ if (kstrtol(cmd, 10, &value))
+ return -EINVAL;
+
+ if (value < 0)
+ return -EINVAL;
+
+ diag_debug_mask = (uint16_t)value;
+ return count;
+}
+#endif
+
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+#ifdef CONFIG_USB_QCOM_DIAG_BRIDGE
+static ssize_t diag_dbgfs_read_hsicinfo(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ char *buf = NULL;
+ int ret = 0;
+ int i = 0;
+ unsigned int buf_size;
+ unsigned int bytes_remaining = 0;
+ unsigned int bytes_written = 0;
+ unsigned int bytes_in_buffer = 0;
+ struct diag_hsic_info *hsic_info = NULL;
+
+ if (diag_dbgfs_hsicinfo_index >= NUM_DIAG_USB_DEV) {
+ /* Done. Reset to prepare for future requests */
+ diag_dbgfs_hsicinfo_index = 0;
+ return 0;
+ }
+
+ buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL);
+ if (ZERO_OR_NULL_PTR(buf)) {
+ pr_err("diag: %s, Error allocating memory\n", __func__);
+ return -ENOMEM;
+ }
+
+ buf_size = ksize(buf);
+ bytes_remaining = buf_size;
+ for (i = diag_dbgfs_hsicinfo_index; i < NUM_HSIC_DEV; i++) {
+ hsic_info = &diag_hsic[i];
+ if (!hsic_info->enabled)
+ continue;
+ bytes_written = scnprintf(buf+bytes_in_buffer, bytes_remaining,
+ "id: %d\n"
+ "name: %s\n"
+ "bridge index: %s\n"
+ "opened: %d\n"
+ "enabled: %d\n"
+ "suspended: %d\n"
+ "mempool: %s\n"
+ "read work pending: %d\n"
+ "open work pending: %d\n"
+ "close work pending: %d\n\n",
+ hsic_info->id,
+ hsic_info->name,
+ DIAG_BRIDGE_GET_NAME(hsic_info->dev_id),
+ hsic_info->opened,
+ hsic_info->enabled,
+ hsic_info->suspended,
+ DIAG_MEMPOOL_GET_NAME(hsic_info->mempool),
+ work_pending(&hsic_info->read_work),
+ work_pending(&hsic_info->open_work),
+ work_pending(&hsic_info->close_work));
+ bytes_in_buffer += bytes_written;
+
+ /* Check if there is room to add another table entry */
+ bytes_remaining = buf_size - bytes_in_buffer;
+
+ if (bytes_remaining < bytes_written)
+ break;
+ }
+ diag_dbgfs_hsicinfo_index = i+1;
+ *ppos = 0;
+ ret = simple_read_from_buffer(ubuf, count, ppos, buf, bytes_in_buffer);
+
+ kfree(buf);
+ return ret;
+}
+
+const struct file_operations diag_dbgfs_hsicinfo_ops = {
+ .read = diag_dbgfs_read_hsicinfo,
+};
+#endif
+#ifdef CONFIG_MSM_MHI
+static ssize_t diag_dbgfs_read_mhiinfo(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ char *buf = NULL;
+ int ret = 0;
+ int i = 0;
+ unsigned int buf_size;
+ unsigned int bytes_remaining = 0;
+ unsigned int bytes_written = 0;
+ unsigned int bytes_in_buffer = 0;
+ struct diag_mhi_info *mhi_info = NULL;
+
+ if (diag_dbgfs_mhiinfo_index >= NUM_MHI_DEV) {
+ /* Done. Reset to prepare for future requests */
+ diag_dbgfs_mhiinfo_index = 0;
+ return 0;
+ }
+
+ buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL);
+ if (ZERO_OR_NULL_PTR(buf)) {
+ pr_err("diag: %s, Error allocating memory\n", __func__);
+ return -ENOMEM;
+ }
+
+ buf_size = ksize(buf);
+ bytes_remaining = buf_size;
+ for (i = diag_dbgfs_mhiinfo_index; i < NUM_MHI_DEV; i++) {
+ mhi_info = &diag_mhi[i];
+ bytes_written = scnprintf(buf+bytes_in_buffer, bytes_remaining,
+ "id: %d\n"
+ "name: %s\n"
+ "bridge index: %s\n"
+ "mempool: %s\n"
+ "read ch opened: %d\n"
+ "read ch hdl: %pK\n"
+ "write ch opened: %d\n"
+ "write ch hdl: %pK\n"
+ "read work pending: %d\n"
+ "read done work pending: %d\n"
+ "open work pending: %d\n"
+ "close work pending: %d\n\n",
+ mhi_info->id,
+ mhi_info->name,
+ DIAG_BRIDGE_GET_NAME(mhi_info->dev_id),
+ DIAG_MEMPOOL_GET_NAME(mhi_info->mempool),
+ atomic_read(&mhi_info->read_ch.opened),
+ mhi_info->read_ch.hdl,
+ atomic_read(&mhi_info->write_ch.opened),
+ mhi_info->write_ch.hdl,
+ work_pending(&mhi_info->read_work),
+ work_pending(&mhi_info->read_done_work),
+ work_pending(&mhi_info->open_work),
+ work_pending(&mhi_info->close_work));
+ bytes_in_buffer += bytes_written;
+
+ /* Check if there is room to add another table entry */
+ bytes_remaining = buf_size - bytes_in_buffer;
+
+ if (bytes_remaining < bytes_written)
+ break;
+ }
+ diag_dbgfs_mhiinfo_index = i+1;
+ *ppos = 0;
+ ret = simple_read_from_buffer(ubuf, count, ppos, buf, bytes_in_buffer);
+
+ kfree(buf);
+ return ret;
+}
+
+
+const struct file_operations diag_dbgfs_mhiinfo_ops = {
+ .read = diag_dbgfs_read_mhiinfo,
+};
+
+#endif
+static ssize_t diag_dbgfs_read_bridge(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ char *buf = NULL;
+ int ret = 0;
+ int i = 0;
+ unsigned int buf_size;
+ unsigned int bytes_remaining = 0;
+ unsigned int bytes_written = 0;
+ unsigned int bytes_in_buffer = 0;
+ struct diagfwd_bridge_info *info = NULL;
+
+ if (diag_dbgfs_bridgeinfo_index >= NUM_DIAG_USB_DEV) {
+ /* Done. Reset to prepare for future requests */
+ diag_dbgfs_bridgeinfo_index = 0;
+ return 0;
+ }
+
+ buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL);
+ if (ZERO_OR_NULL_PTR(buf)) {
+ pr_err("diag: %s, Error allocating memory\n", __func__);
+ return -ENOMEM;
+ }
+
+ buf_size = ksize(buf);
+ bytes_remaining = buf_size;
+ for (i = diag_dbgfs_bridgeinfo_index; i < NUM_REMOTE_DEV; i++) {
+ info = &bridge_info[i];
+ if (!info->inited)
+ continue;
+ bytes_written = scnprintf(buf+bytes_in_buffer, bytes_remaining,
+ "id: %d\n"
+ "name: %s\n"
+ "type: %d\n"
+ "inited: %d\n"
+ "ctxt: %d\n"
+ "dev_ops: %pK\n"
+ "dci_read_buf: %pK\n"
+ "dci_read_ptr: %pK\n"
+ "dci_read_len: %d\n\n",
+ info->id,
+ info->name,
+ info->type,
+ info->inited,
+ info->ctxt,
+ info->dev_ops,
+ info->dci_read_buf,
+ info->dci_read_ptr,
+ info->dci_read_len);
+ bytes_in_buffer += bytes_written;
+
+ /* Check if there is room to add another table entry */
+ bytes_remaining = buf_size - bytes_in_buffer;
+
+ if (bytes_remaining < bytes_written)
+ break;
+ }
+ diag_dbgfs_bridgeinfo_index = i+1;
+ *ppos = 0;
+ ret = simple_read_from_buffer(ubuf, count, ppos, buf, bytes_in_buffer);
+
+ kfree(buf);
+ return ret;
+}
+
+const struct file_operations diag_dbgfs_bridge_ops = {
+ .read = diag_dbgfs_read_bridge,
+};
+
+#endif
+
+const struct file_operations diag_dbgfs_status_ops = {
+ .read = diag_dbgfs_read_status,
+};
+
+const struct file_operations diag_dbgfs_smdinfo_ops = {
+ .read = diag_dbgfs_read_smdinfo,
+};
+
+const struct file_operations diag_dbgfs_socketinfo_ops = {
+ .read = diag_dbgfs_read_socketinfo,
+};
+
+const struct file_operations diag_dbgfs_glinkinfo_ops = {
+ .read = diag_dbgfs_read_glinkinfo,
+};
+
+const struct file_operations diag_dbgfs_table_ops = {
+ .read = diag_dbgfs_read_table,
+};
+
+const struct file_operations diag_dbgfs_mempool_ops = {
+ .read = diag_dbgfs_read_mempool,
+};
+
+const struct file_operations diag_dbgfs_usbinfo_ops = {
+ .read = diag_dbgfs_read_usbinfo,
+};
+
+const struct file_operations diag_dbgfs_dcistats_ops = {
+ .read = diag_dbgfs_read_dcistats,
+};
+
+const struct file_operations diag_dbgfs_power_ops = {
+ .read = diag_dbgfs_read_power,
+};
+
+#ifdef CONFIG_IPC_LOGGING
+const struct file_operations diag_dbgfs_debug_ops = {
+ .write = diag_dbgfs_write_debug
+};
+#endif
+
+int diag_debugfs_init(void)
+{
+ struct dentry *entry = NULL;
+
+ diag_dbgfs_dent = debugfs_create_dir("diag", 0);
+ if (IS_ERR(diag_dbgfs_dent))
+ return -ENOMEM;
+
+ entry = debugfs_create_file("status", 0444, diag_dbgfs_dent, 0,
+ &diag_dbgfs_status_ops);
+ if (!entry)
+ goto err;
+
+ entry = debugfs_create_file("smdinfo", 0444, diag_dbgfs_dent, 0,
+ &diag_dbgfs_smdinfo_ops);
+ if (!entry)
+ goto err;
+
+ entry = debugfs_create_file("socketinfo", 0444, diag_dbgfs_dent, 0,
+ &diag_dbgfs_socketinfo_ops);
+ if (!entry)
+ goto err;
+
+ entry = debugfs_create_file("glinkinfo", 0444, diag_dbgfs_dent, 0,
+ &diag_dbgfs_glinkinfo_ops);
+ if (!entry)
+ goto err;
+
+ entry = debugfs_create_file("table", 0444, diag_dbgfs_dent, 0,
+ &diag_dbgfs_table_ops);
+ if (!entry)
+ goto err;
+
+ entry = debugfs_create_file("mempool", 0444, diag_dbgfs_dent, 0,
+ &diag_dbgfs_mempool_ops);
+ if (!entry)
+ goto err;
+
+ entry = debugfs_create_file("usbinfo", 0444, diag_dbgfs_dent, 0,
+ &diag_dbgfs_usbinfo_ops);
+ if (!entry)
+ goto err;
+
+ entry = debugfs_create_file("dci_stats", 0444, diag_dbgfs_dent, 0,
+ &diag_dbgfs_dcistats_ops);
+ if (!entry)
+ goto err;
+
+ entry = debugfs_create_file("power", 0444, diag_dbgfs_dent, 0,
+ &diag_dbgfs_power_ops);
+ if (!entry)
+ goto err;
+
+#ifdef CONFIG_IPC_LOGGING
+ entry = debugfs_create_file("debug", 0444, diag_dbgfs_dent, 0,
+ &diag_dbgfs_debug_ops);
+ if (!entry)
+ goto err;
+#endif
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+ entry = debugfs_create_file("bridge", 0444, diag_dbgfs_dent, 0,
+ &diag_dbgfs_bridge_ops);
+ if (!entry)
+ goto err;
+#ifdef CONFIG_USB_QCOM_DIAG_BRIDGE
+ entry = debugfs_create_file("hsicinfo", 0444, diag_dbgfs_dent, 0,
+ &diag_dbgfs_hsicinfo_ops);
+ if (!entry)
+ goto err;
+#endif
+#ifdef CONFIG_MSM_MHI
+ entry = debugfs_create_file("mhiinfo", 0444, diag_dbgfs_dent, 0,
+ &diag_dbgfs_mhiinfo_ops);
+ if (!entry)
+ goto err;
+#endif
+#endif
+ diag_dbgfs_table_index = 0;
+ diag_dbgfs_mempool_index = 0;
+ diag_dbgfs_usbinfo_index = 0;
+ diag_dbgfs_smdinfo_index = 0;
+ diag_dbgfs_socketinfo_index = 0;
+ diag_dbgfs_hsicinfo_index = 0;
+ diag_dbgfs_bridgeinfo_index = 0;
+ diag_dbgfs_mhiinfo_index = 0;
+ diag_dbgfs_finished = 0;
+ diag_dbgfs_dci_data_index = 0;
+ diag_dbgfs_dci_finished = 0;
+
+ /* DCI related structures */
+ dci_traffic = kzalloc(sizeof(struct diag_dci_data_info) *
+ DIAG_DCI_DEBUG_CNT, GFP_KERNEL);
+ if (ZERO_OR_NULL_PTR(dci_traffic))
+ pr_warn("diag: could not allocate memory for dci debug info\n");
+
+ mutex_init(&dci_stat_mutex);
+ mutex_init(&diag_dci_dbgfs_mutex);
+ return 0;
+err:
+ kfree(dci_traffic);
+ debugfs_remove_recursive(diag_dbgfs_dent);
+ return -ENOMEM;
+}
+
+void diag_debugfs_cleanup(void)
+{
+ if (diag_dbgfs_dent) {
+ debugfs_remove_recursive(diag_dbgfs_dent);
+ diag_dbgfs_dent = NULL;
+ }
+
+ kfree(dci_traffic);
+ mutex_destroy(&dci_stat_mutex);
+ mutex_destroy(&diag_dci_dbgfs_mutex);
+}
+#else
+int diag_debugfs_init(void) { return 0; }
+void diag_debugfs_cleanup(void) { }
+#endif
diff --git a/drivers/char/diag/diag_debugfs.h b/drivers/char/diag/diag_debugfs.h
new file mode 100644
index 000000000000..e8db56e5b19c
--- /dev/null
+++ b/drivers/char/diag/diag_debugfs.h
@@ -0,0 +1,19 @@
+/* Copyright (c) 2012, 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DIAG_DEBUGFS_H
+#define DIAG_DEBUGFS_H
+
+int diag_debugfs_init(void);
+void diag_debugfs_cleanup(void);
+
+#endif
diff --git a/drivers/char/diag/diag_ipc_logging.h b/drivers/char/diag/diag_ipc_logging.h
new file mode 100644
index 000000000000..839c8ca02e7c
--- /dev/null
+++ b/drivers/char/diag/diag_ipc_logging.h
@@ -0,0 +1,44 @@
+/* Copyright (c) 2015, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DIAGIPCLOG_H
+#define DIAGIPCLOG_H
+
+#include <linux/ipc_logging.h>
+
+#define DIAG_IPC_LOG_PAGES 50
+
+#define DIAG_DEBUG_USERSPACE 0x0001
+#define DIAG_DEBUG_MUX 0x0002
+#define DIAG_DEBUG_DCI 0x0004
+#define DIAG_DEBUG_PERIPHERALS 0x0008
+#define DIAG_DEBUG_MASKS 0x0010
+#define DIAG_DEBUG_POWER 0x0020
+#define DIAG_DEBUG_BRIDGE 0x0040
+#define DIAG_DEBUG_CONTROL 0x0080
+
+#ifdef CONFIG_IPC_LOGGING
+extern uint16_t diag_debug_mask;
+extern void *diag_ipc_log;
+
+#define DIAG_LOG(log_lvl, msg, ...) \
+ do { \
+ if (diag_ipc_log && (log_lvl & diag_debug_mask)) { \
+ ipc_log_string(diag_ipc_log, \
+ "[%s] " msg, __func__, ##__VA_ARGS__); \
+ } \
+ } while (0)
+#else
+#define DIAG_LOG(log_lvl, msg, ...)
+#endif
+
+#endif
diff --git a/drivers/char/diag/diag_masks.c b/drivers/char/diag/diag_masks.c
new file mode 100644
index 000000000000..ad6805553998
--- /dev/null
+++ b/drivers/char/diag/diag_masks.c
@@ -0,0 +1,2289 @@
+/* Copyright (c) 2008-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/diagchar.h>
+#include <linux/kmemleak.h>
+#include <linux/workqueue.h>
+#include <linux/uaccess.h>
+#include "diagchar.h"
+#include "diagfwd_cntl.h"
+#include "diag_masks.h"
+#include "diagfwd_peripheral.h"
+#include "diag_ipc_logging.h"
+
+#define ALL_EQUIP_ID 100
+#define ALL_SSID -1
+
+#define DIAG_SET_FEATURE_MASK(x) (feature_bytes[(x)/8] |= (1 << (x & 0x7)))
+
+#define diag_check_update(x) \
+ (!info || (info && (info->peripheral_mask & MD_PERIPHERAL_MASK(x))) \
+ || (info && (info->peripheral_mask & MD_PERIPHERAL_PD_MASK(x)))) \
+
+struct diag_mask_info msg_mask;
+struct diag_mask_info msg_bt_mask;
+struct diag_mask_info log_mask;
+struct diag_mask_info event_mask;
+
+static const struct diag_ssid_range_t msg_mask_tbl[] = {
+ { .ssid_first = MSG_SSID_0, .ssid_last = MSG_SSID_0_LAST },
+ { .ssid_first = MSG_SSID_1, .ssid_last = MSG_SSID_1_LAST },
+ { .ssid_first = MSG_SSID_2, .ssid_last = MSG_SSID_2_LAST },
+ { .ssid_first = MSG_SSID_3, .ssid_last = MSG_SSID_3_LAST },
+ { .ssid_first = MSG_SSID_4, .ssid_last = MSG_SSID_4_LAST },
+ { .ssid_first = MSG_SSID_5, .ssid_last = MSG_SSID_5_LAST },
+ { .ssid_first = MSG_SSID_6, .ssid_last = MSG_SSID_6_LAST },
+ { .ssid_first = MSG_SSID_7, .ssid_last = MSG_SSID_7_LAST },
+ { .ssid_first = MSG_SSID_8, .ssid_last = MSG_SSID_8_LAST },
+ { .ssid_first = MSG_SSID_9, .ssid_last = MSG_SSID_9_LAST },
+ { .ssid_first = MSG_SSID_10, .ssid_last = MSG_SSID_10_LAST },
+ { .ssid_first = MSG_SSID_11, .ssid_last = MSG_SSID_11_LAST },
+ { .ssid_first = MSG_SSID_12, .ssid_last = MSG_SSID_12_LAST },
+ { .ssid_first = MSG_SSID_13, .ssid_last = MSG_SSID_13_LAST },
+ { .ssid_first = MSG_SSID_14, .ssid_last = MSG_SSID_14_LAST },
+ { .ssid_first = MSG_SSID_15, .ssid_last = MSG_SSID_15_LAST },
+ { .ssid_first = MSG_SSID_16, .ssid_last = MSG_SSID_16_LAST },
+ { .ssid_first = MSG_SSID_17, .ssid_last = MSG_SSID_17_LAST },
+ { .ssid_first = MSG_SSID_18, .ssid_last = MSG_SSID_18_LAST },
+ { .ssid_first = MSG_SSID_19, .ssid_last = MSG_SSID_19_LAST },
+ { .ssid_first = MSG_SSID_20, .ssid_last = MSG_SSID_20_LAST },
+ { .ssid_first = MSG_SSID_21, .ssid_last = MSG_SSID_21_LAST },
+ { .ssid_first = MSG_SSID_22, .ssid_last = MSG_SSID_22_LAST },
+ { .ssid_first = MSG_SSID_23, .ssid_last = MSG_SSID_23_LAST },
+ { .ssid_first = MSG_SSID_24, .ssid_last = MSG_SSID_24_LAST },
+ { .ssid_first = MSG_SSID_25, .ssid_last = MSG_SSID_25_LAST }
+};
+
+static int diag_apps_responds(void)
+{
+ /*
+ * Apps processor should respond to mask commands only if the
+ * Modem channel is up, the feature mask is received from Modem
+ * and if Modem supports Mask Centralization.
+ */
+ if (!chk_apps_only())
+ return 0;
+
+ if (driver->diagfwd_cntl[PERIPHERAL_MODEM] &&
+ driver->diagfwd_cntl[PERIPHERAL_MODEM]->ch_open &&
+ driver->feature[PERIPHERAL_MODEM].rcvd_feature_mask) {
+ if (driver->feature[PERIPHERAL_MODEM].mask_centralization)
+ return 1;
+ return 0;
+ }
+ return 1;
+}
+
+static void diag_send_log_mask_update(uint8_t peripheral, int equip_id)
+{
+ int i;
+ int err = 0;
+ int send_once = 0;
+ int header_len = sizeof(struct diag_ctrl_log_mask);
+ uint8_t *buf = NULL, upd = 0;
+ uint8_t *temp = NULL;
+ uint32_t mask_size = 0;
+ struct diag_ctrl_log_mask ctrl_pkt;
+ struct diag_mask_info *mask_info = NULL;
+ struct diag_log_mask_t *mask = NULL;
+
+ if (peripheral >= NUM_PERIPHERALS)
+ return;
+
+ if (!driver->diagfwd_cntl[peripheral] ||
+ !driver->diagfwd_cntl[peripheral]->ch_open) {
+ pr_debug("diag: In %s, control channel is not open, p: %d\n",
+ __func__, peripheral);
+ return;
+ }
+
+ if (driver->md_session_mask != 0) {
+ if (driver->md_session_mask & MD_PERIPHERAL_MASK(peripheral)) {
+ if (driver->md_session_map[peripheral])
+ mask_info =
+ driver->md_session_map[peripheral]->log_mask;
+ } else if (driver->md_session_mask &
+ MD_PERIPHERAL_PD_MASK(peripheral)) {
+ upd = diag_mask_to_pd_value(driver->md_session_mask);
+ if (upd && driver->md_session_map[upd])
+ mask_info =
+ driver->md_session_map[upd]->log_mask;
+ } else {
+ DIAG_LOG(DIAG_DEBUG_MASKS,
+ "asking for mask update with unknown session mask\n");
+ return;
+ }
+ } else {
+ mask_info = &log_mask;
+ }
+
+ if (!mask_info || !mask_info->ptr || !mask_info->update_buf)
+ return;
+
+ mask = (struct diag_log_mask_t *)mask_info->ptr;
+ if (!mask->ptr)
+ return;
+ buf = mask_info->update_buf;
+
+ switch (mask_info->status) {
+ case DIAG_CTRL_MASK_ALL_DISABLED:
+ ctrl_pkt.equip_id = 0;
+ ctrl_pkt.num_items = 0;
+ ctrl_pkt.log_mask_size = 0;
+ send_once = 1;
+ break;
+ case DIAG_CTRL_MASK_ALL_ENABLED:
+ ctrl_pkt.equip_id = 0;
+ ctrl_pkt.num_items = 0;
+ ctrl_pkt.log_mask_size = 0;
+ send_once = 1;
+ break;
+ case DIAG_CTRL_MASK_VALID:
+ send_once = 0;
+ break;
+ default:
+ pr_debug("diag: In %s, invalid log_mask status\n", __func__);
+ return;
+ }
+
+ mutex_lock(&mask_info->lock);
+ for (i = 0; i < MAX_EQUIP_ID; i++, mask++) {
+ if (equip_id != i && equip_id != ALL_EQUIP_ID)
+ continue;
+
+ mutex_lock(&mask->lock);
+ ctrl_pkt.cmd_type = DIAG_CTRL_MSG_LOG_MASK;
+ ctrl_pkt.stream_id = 1;
+ ctrl_pkt.status = mask_info->status;
+ if (mask_info->status == DIAG_CTRL_MASK_VALID) {
+ mask_size = LOG_ITEMS_TO_SIZE(mask->num_items_tools);
+ ctrl_pkt.equip_id = i;
+ ctrl_pkt.num_items = mask->num_items_tools;
+ ctrl_pkt.log_mask_size = mask_size;
+ }
+ ctrl_pkt.data_len = LOG_MASK_CTRL_HEADER_LEN + mask_size;
+
+ if (header_len + mask_size > mask_info->update_buf_len) {
+ temp = krealloc(buf, header_len + mask_size,
+ GFP_KERNEL);
+ if (!temp) {
+ pr_err_ratelimited("diag: Unable to realloc log update buffer, new size: %d, equip_id: %d\n",
+ header_len + mask_size, equip_id);
+ mutex_unlock(&mask->lock);
+ break;
+ }
+ mask_info->update_buf = temp;
+ mask_info->update_buf_len = header_len + mask_size;
+ buf = temp;
+ }
+
+ memcpy(buf, &ctrl_pkt, header_len);
+ if (mask_size > 0 && mask_size <= LOG_MASK_SIZE)
+ memcpy(buf + header_len, mask->ptr, mask_size);
+ mutex_unlock(&mask->lock);
+
+ DIAG_LOG(DIAG_DEBUG_MASKS,
+ "sending ctrl pkt to %d, e %d num_items %d size %d\n",
+ peripheral, i, ctrl_pkt.num_items,
+ ctrl_pkt.log_mask_size);
+
+ err = diagfwd_write(peripheral, TYPE_CNTL,
+ buf, header_len + mask_size);
+ if (err && err != -ENODEV)
+ pr_err_ratelimited("diag: Unable to send log masks to peripheral %d, equip_id: %d, err: %d\n",
+ peripheral, i, err);
+ if (send_once || equip_id != ALL_EQUIP_ID)
+ break;
+
+ }
+ mutex_unlock(&mask_info->lock);
+}
+
+static void diag_send_event_mask_update(uint8_t peripheral)
+{
+ uint8_t *buf = NULL, upd = 0;
+ uint8_t *temp = NULL;
+ struct diag_ctrl_event_mask header;
+ struct diag_mask_info *mask_info = NULL;
+ int num_bytes = EVENT_COUNT_TO_BYTES(driver->last_event_id);
+ int write_len = 0;
+ int err = 0;
+ int temp_len = 0;
+
+ if (num_bytes <= 0 || num_bytes > driver->event_mask_size) {
+ pr_debug("diag: In %s, invalid event mask length %d\n",
+ __func__, num_bytes);
+ return;
+ }
+
+ if (peripheral >= NUM_PERIPHERALS)
+ return;
+
+ if (!driver->diagfwd_cntl[peripheral] ||
+ !driver->diagfwd_cntl[peripheral]->ch_open) {
+ pr_debug("diag: In %s, control channel is not open, p: %d\n",
+ __func__, peripheral);
+ return;
+ }
+
+ if (driver->md_session_mask != 0) {
+ if (driver->md_session_mask & MD_PERIPHERAL_MASK(peripheral)) {
+ if (driver->md_session_map[peripheral])
+ mask_info =
+ driver->md_session_map[peripheral]->event_mask;
+ } else if (driver->md_session_mask &
+ MD_PERIPHERAL_PD_MASK(peripheral)) {
+ upd = diag_mask_to_pd_value(driver->md_session_mask);
+ if (upd && driver->md_session_map[upd])
+ mask_info =
+ driver->md_session_map[upd]->event_mask;
+ } else {
+ DIAG_LOG(DIAG_DEBUG_MASKS,
+ "asking for mask update with unknown session mask\n");
+ return;
+ }
+ } else {
+ mask_info = &event_mask;
+ }
+
+ if (!mask_info || !mask_info->ptr || !mask_info->update_buf)
+ return;
+
+ buf = mask_info->update_buf;
+ mutex_lock(&mask_info->lock);
+ header.cmd_type = DIAG_CTRL_MSG_EVENT_MASK;
+ header.stream_id = 1;
+ header.status = mask_info->status;
+
+ switch (mask_info->status) {
+ case DIAG_CTRL_MASK_ALL_DISABLED:
+ header.event_config = 0;
+ header.event_mask_size = 0;
+ break;
+ case DIAG_CTRL_MASK_ALL_ENABLED:
+ header.event_config = 1;
+ header.event_mask_size = 0;
+ break;
+ case DIAG_CTRL_MASK_VALID:
+ header.event_config = 1;
+ header.event_mask_size = num_bytes;
+ if (num_bytes + sizeof(header) > mask_info->update_buf_len) {
+ temp_len = num_bytes + sizeof(header);
+ temp = krealloc(buf, temp_len, GFP_KERNEL);
+ if (!temp) {
+ pr_err("diag: Unable to realloc event mask update buffer\n");
+ goto err;
+ } else {
+ mask_info->update_buf = temp;
+ mask_info->update_buf_len = temp_len;
+ buf = temp;
+ }
+ }
+ if (num_bytes > 0 && num_bytes < mask_info->mask_len)
+ memcpy(buf + sizeof(header), mask_info->ptr, num_bytes);
+ else {
+ pr_err("diag: num_bytes(%d) is not satisfying length condition\n",
+ num_bytes);
+ goto err;
+ }
+ write_len += num_bytes;
+ break;
+ default:
+ pr_debug("diag: In %s, invalid status %d\n", __func__,
+ mask_info->status);
+ goto err;
+ }
+ header.data_len = EVENT_MASK_CTRL_HEADER_LEN + header.event_mask_size;
+ memcpy(buf, &header, sizeof(header));
+ write_len += sizeof(header);
+
+ err = diagfwd_write(peripheral, TYPE_CNTL, buf, write_len);
+ if (err && err != -ENODEV)
+ pr_err_ratelimited("diag: Unable to send event masks to peripheral %d\n",
+ peripheral);
+err:
+ mutex_unlock(&mask_info->lock);
+}
+
+static void diag_send_msg_mask_update(uint8_t peripheral, int first, int last)
+{
+ int i;
+ int err = 0;
+ int header_len = sizeof(struct diag_ctrl_msg_mask);
+ int temp_len = 0;
+ uint8_t *buf = NULL, upd = 0;
+ uint8_t *temp = NULL;
+ uint32_t mask_size = 0;
+ struct diag_mask_info *mask_info = NULL;
+ struct diag_msg_mask_t *mask = NULL;
+ struct diag_ctrl_msg_mask header;
+ uint8_t msg_mask_tbl_count_local;
+
+ if (peripheral >= NUM_PERIPHERALS)
+ return;
+
+ if (!driver->diagfwd_cntl[peripheral] ||
+ !driver->diagfwd_cntl[peripheral]->ch_open) {
+ pr_debug("diag: In %s, control channel is not open, p: %d\n",
+ __func__, peripheral);
+ return;
+ }
+
+ if (driver->md_session_mask != 0) {
+ if (driver->md_session_mask & MD_PERIPHERAL_MASK(peripheral)) {
+ if (driver->md_session_map[peripheral])
+ mask_info =
+ driver->md_session_map[peripheral]->msg_mask;
+ } else if (driver->md_session_mask &
+ MD_PERIPHERAL_PD_MASK(peripheral)) {
+ upd = diag_mask_to_pd_value(driver->md_session_mask);
+ if (upd && driver->md_session_map[upd])
+ mask_info =
+ driver->md_session_map[upd]->msg_mask;
+ } else {
+ DIAG_LOG(DIAG_DEBUG_MASKS,
+ "asking for mask update with unknown session mask\n");
+ return;
+ }
+ } else {
+ mask_info = &msg_mask;
+ }
+
+ if (!mask_info || !mask_info->ptr || !mask_info->update_buf)
+ return;
+ mutex_lock(&driver->msg_mask_lock);
+ mask = (struct diag_msg_mask_t *)mask_info->ptr;
+ if (!mask->ptr) {
+ mutex_unlock(&driver->msg_mask_lock);
+ return;
+ }
+ buf = mask_info->update_buf;
+ msg_mask_tbl_count_local = driver->msg_mask_tbl_count;
+ mutex_unlock(&driver->msg_mask_lock);
+ mutex_lock(&mask_info->lock);
+ switch (mask_info->status) {
+ case DIAG_CTRL_MASK_ALL_DISABLED:
+ mask_size = 0;
+ break;
+ case DIAG_CTRL_MASK_ALL_ENABLED:
+ mask_size = 1;
+ break;
+ case DIAG_CTRL_MASK_VALID:
+ break;
+ default:
+ pr_debug("diag: In %s, invalid status: %d\n", __func__,
+ mask_info->status);
+ goto err;
+ }
+
+ for (i = 0; i < msg_mask_tbl_count_local; i++, mask++) {
+ mutex_lock(&driver->msg_mask_lock);
+ if (((mask->ssid_first > first) ||
+ (mask->ssid_last_tools < last)) && first != ALL_SSID) {
+ mutex_unlock(&driver->msg_mask_lock);
+ continue;
+ }
+
+ mutex_lock(&mask->lock);
+ if (mask_info->status == DIAG_CTRL_MASK_VALID) {
+ mask_size =
+ mask->ssid_last_tools - mask->ssid_first + 1;
+ temp_len = mask_size * sizeof(uint32_t);
+ if (temp_len + header_len <= mask_info->update_buf_len)
+ goto proceed;
+ temp = krealloc(mask_info->update_buf, temp_len,
+ GFP_KERNEL);
+ if (!temp) {
+ pr_err("diag: In %s, unable to realloc msg_mask update buffer\n",
+ __func__);
+ mask_size = (mask_info->update_buf_len -
+ header_len) / sizeof(uint32_t);
+ } else {
+ mask_info->update_buf = temp;
+ mask_info->update_buf_len = temp_len;
+ buf = temp;
+ pr_debug("diag: In %s, successfully reallocated msg_mask update buffer to len: %d\n",
+ __func__, mask_info->update_buf_len);
+ }
+ } else if (mask_info->status == DIAG_CTRL_MASK_ALL_ENABLED) {
+ mask_size = 1;
+ }
+proceed:
+ header.cmd_type = DIAG_CTRL_MSG_F3_MASK;
+ header.status = mask_info->status;
+ header.stream_id = 1;
+ header.msg_mode = 0;
+ header.ssid_first = mask->ssid_first;
+ header.ssid_last = mask->ssid_last_tools;
+ header.msg_mask_size = mask_size;
+ mask_size *= sizeof(uint32_t);
+ header.data_len = MSG_MASK_CTRL_HEADER_LEN + mask_size;
+ memcpy(buf, &header, header_len);
+ if (mask_size > 0)
+ memcpy(buf + header_len, mask->ptr, mask_size);
+ mutex_unlock(&mask->lock);
+ mutex_unlock(&driver->msg_mask_lock);
+
+ err = diagfwd_write(peripheral, TYPE_CNTL, buf,
+ header_len + mask_size);
+ if (err && err != -ENODEV)
+ pr_err_ratelimited("diag: Unable to send msg masks to peripheral %d, error = %d\n",
+ peripheral, err);
+
+ if (first != ALL_SSID)
+ break;
+ }
+err:
+ mutex_unlock(&mask_info->lock);
+}
+
+static void diag_send_time_sync_update(uint8_t peripheral)
+{
+ struct diag_ctrl_msg_time_sync time_sync_msg;
+ int msg_size = sizeof(struct diag_ctrl_msg_time_sync);
+ int err = 0;
+
+ if (peripheral >= NUM_PERIPHERALS) {
+ pr_err("diag: In %s, Invalid peripheral, %d\n",
+ __func__, peripheral);
+ return;
+ }
+
+ if (!driver->diagfwd_cntl[peripheral] ||
+ !driver->diagfwd_cntl[peripheral]->ch_open) {
+ pr_err("diag: In %s, control channel is not open, p: %d, %pK\n",
+ __func__, peripheral, driver->diagfwd_cntl[peripheral]);
+ return;
+ }
+
+ mutex_lock(&driver->diag_cntl_mutex);
+ time_sync_msg.ctrl_pkt_id = DIAG_CTRL_MSG_TIME_SYNC_PKT;
+ time_sync_msg.ctrl_pkt_data_len = 5;
+ time_sync_msg.version = 1;
+ time_sync_msg.time_api = driver->uses_time_api;
+
+ err = diagfwd_write(peripheral, TYPE_CNTL, &time_sync_msg, msg_size);
+ if (err)
+ pr_err("diag: In %s, unable to write to peripheral: %d, type: %d, len: %d, err: %d\n",
+ __func__, peripheral, TYPE_CNTL,
+ msg_size, err);
+ mutex_unlock(&driver->diag_cntl_mutex);
+}
+
+static void diag_send_feature_mask_update(uint8_t peripheral)
+{
+ void *buf = driver->buf_feature_mask_update;
+ int header_size = sizeof(struct diag_ctrl_feature_mask);
+ uint8_t feature_bytes[FEATURE_MASK_LEN] = {0, 0};
+ struct diag_ctrl_feature_mask feature_mask;
+ int total_len = 0;
+ int err = 0;
+
+ if (peripheral >= NUM_PERIPHERALS) {
+ pr_err("diag: In %s, Invalid peripheral, %d\n",
+ __func__, peripheral);
+ return;
+ }
+
+ if (!driver->diagfwd_cntl[peripheral] ||
+ !driver->diagfwd_cntl[peripheral]->ch_open) {
+ pr_err("diag: In %s, control channel is not open, p: %d, %pK\n",
+ __func__, peripheral, driver->diagfwd_cntl[peripheral]);
+ return;
+ }
+
+ mutex_lock(&driver->diag_cntl_mutex);
+ /* send feature mask update */
+ feature_mask.ctrl_pkt_id = DIAG_CTRL_MSG_FEATURE;
+ feature_mask.ctrl_pkt_data_len = sizeof(uint32_t) + FEATURE_MASK_LEN;
+ feature_mask.feature_mask_len = FEATURE_MASK_LEN;
+ memcpy(buf, &feature_mask, header_size);
+ DIAG_SET_FEATURE_MASK(F_DIAG_FEATURE_MASK_SUPPORT);
+ DIAG_SET_FEATURE_MASK(F_DIAG_LOG_ON_DEMAND_APPS);
+ DIAG_SET_FEATURE_MASK(F_DIAG_STM);
+ DIAG_SET_FEATURE_MASK(F_DIAG_DCI_EXTENDED_HEADER_SUPPORT);
+ if (driver->supports_separate_cmdrsp)
+ DIAG_SET_FEATURE_MASK(F_DIAG_REQ_RSP_SUPPORT);
+ if (driver->supports_apps_hdlc_encoding)
+ DIAG_SET_FEATURE_MASK(F_DIAG_APPS_HDLC_ENCODE);
+ if (driver->supports_apps_header_untagging) {
+ if (peripheral == PERIPHERAL_MODEM ||
+ peripheral == PERIPHERAL_LPASS ||
+ peripheral == PERIPHERAL_CDSP) {
+ DIAG_SET_FEATURE_MASK(F_DIAG_PKT_HEADER_UNTAG);
+ driver->peripheral_untag[peripheral] =
+ ENABLE_PKT_HEADER_UNTAGGING;
+ }
+ }
+ DIAG_SET_FEATURE_MASK(F_DIAG_MASK_CENTRALIZATION);
+ if (driver->supports_sockets)
+ DIAG_SET_FEATURE_MASK(F_DIAG_SOCKETS_ENABLED);
+
+ memcpy(buf + header_size, &feature_bytes, FEATURE_MASK_LEN);
+ total_len = header_size + FEATURE_MASK_LEN;
+
+ err = diagfwd_write(peripheral, TYPE_CNTL, buf, total_len);
+ if (err) {
+ pr_err_ratelimited("diag: In %s, unable to write feature mask to peripheral: %d, type: %d, len: %d, err: %d\n",
+ __func__, peripheral, TYPE_CNTL,
+ total_len, err);
+ mutex_unlock(&driver->diag_cntl_mutex);
+ return;
+ }
+ driver->feature[peripheral].sent_feature_mask = 1;
+ mutex_unlock(&driver->diag_cntl_mutex);
+}
+
+static int diag_cmd_get_ssid_range(unsigned char *src_buf, int src_len,
+ unsigned char *dest_buf, int dest_len, int pid)
+{
+ int i;
+ int write_len = 0;
+ struct diag_msg_mask_t *mask_ptr = NULL;
+ struct diag_msg_ssid_query_t rsp;
+ struct diag_ssid_range_t ssid_range;
+ struct diag_mask_info *mask_info = NULL;
+ struct diag_md_session_t *info = NULL;
+
+ mutex_lock(&driver->md_session_lock);
+ info = diag_md_session_get_pid(pid);
+ mask_info = (!info) ? &msg_mask : info->msg_mask;
+ if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0 ||
+ !mask_info) {
+ pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d, mask_info: %pK\n",
+ __func__, src_buf, src_len, dest_buf, dest_len,
+ mask_info);
+ mutex_unlock(&driver->md_session_lock);
+ return -EINVAL;
+ }
+ if (!mask_info->ptr) {
+ pr_err("diag: In %s, invalid input mask_info->ptr: %pK\n",
+ __func__, mask_info->ptr);
+ mutex_unlock(&driver->md_session_lock);
+ return -EINVAL;
+ }
+
+ if (!diag_apps_responds()) {
+ mutex_unlock(&driver->md_session_lock);
+ return 0;
+ }
+ mutex_lock(&driver->msg_mask_lock);
+ rsp.cmd_code = DIAG_CMD_MSG_CONFIG;
+ rsp.sub_cmd = DIAG_CMD_OP_GET_SSID_RANGE;
+ rsp.status = MSG_STATUS_SUCCESS;
+ rsp.padding = 0;
+ rsp.count = driver->msg_mask_tbl_count;
+ memcpy(dest_buf, &rsp, sizeof(rsp));
+ write_len += sizeof(rsp);
+ mask_ptr = (struct diag_msg_mask_t *)mask_info->ptr;
+ for (i = 0; i < driver->msg_mask_tbl_count; i++, mask_ptr++) {
+ if (write_len + sizeof(ssid_range) > dest_len) {
+ pr_err("diag: In %s, Truncating response due to size limitations of rsp buffer\n",
+ __func__);
+ break;
+ }
+ ssid_range.ssid_first = mask_ptr->ssid_first;
+ ssid_range.ssid_last = mask_ptr->ssid_last_tools;
+ memcpy(dest_buf + write_len, &ssid_range, sizeof(ssid_range));
+ write_len += sizeof(ssid_range);
+ }
+ mutex_unlock(&driver->msg_mask_lock);
+ mutex_unlock(&driver->md_session_lock);
+ return write_len;
+}
+
+static int diag_cmd_get_build_mask(unsigned char *src_buf, int src_len,
+ unsigned char *dest_buf, int dest_len, int pid)
+{
+ int i = 0;
+ int write_len = 0;
+ int num_entries = 0;
+ int copy_len = 0;
+ struct diag_msg_mask_t *build_mask = NULL;
+ struct diag_build_mask_req_t *req = NULL;
+ struct diag_msg_build_mask_t rsp;
+
+ if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0) {
+ pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d\n",
+ __func__, src_buf, src_len, dest_buf, dest_len);
+ return -EINVAL;
+ }
+
+ if (!diag_apps_responds())
+ return 0;
+ mutex_lock(&driver->msg_mask_lock);
+ req = (struct diag_build_mask_req_t *)src_buf;
+ rsp.cmd_code = DIAG_CMD_MSG_CONFIG;
+ rsp.sub_cmd = DIAG_CMD_OP_GET_BUILD_MASK;
+ rsp.ssid_first = req->ssid_first;
+ rsp.ssid_last = req->ssid_last;
+ rsp.status = MSG_STATUS_FAIL;
+ rsp.padding = 0;
+ build_mask = (struct diag_msg_mask_t *)msg_bt_mask.ptr;
+ for (i = 0; i < driver->bt_msg_mask_tbl_count; i++, build_mask++) {
+ if (build_mask->ssid_first != req->ssid_first)
+ continue;
+ num_entries = req->ssid_last - req->ssid_first + 1;
+ if (num_entries > build_mask->range) {
+ pr_warn("diag: In %s, truncating ssid range for ssid_first: %d ssid_last %d\n",
+ __func__, req->ssid_first, req->ssid_last);
+ num_entries = build_mask->range;
+ req->ssid_last = req->ssid_first + build_mask->range;
+ }
+ copy_len = num_entries * sizeof(uint32_t);
+ if (copy_len + sizeof(rsp) > dest_len)
+ copy_len = dest_len - sizeof(rsp);
+ memcpy(dest_buf + sizeof(rsp), build_mask->ptr, copy_len);
+ write_len += copy_len;
+ rsp.ssid_last = build_mask->ssid_last;
+ rsp.status = MSG_STATUS_SUCCESS;
+ break;
+ }
+ memcpy(dest_buf, &rsp, sizeof(rsp));
+ write_len += sizeof(rsp);
+ mutex_unlock(&driver->msg_mask_lock);
+ return write_len;
+}
+
+static int diag_cmd_get_msg_mask(unsigned char *src_buf, int src_len,
+ unsigned char *dest_buf, int dest_len, int pid)
+{
+ int i;
+ int write_len = 0;
+ uint32_t mask_size = 0;
+ struct diag_msg_mask_t *mask = NULL;
+ struct diag_build_mask_req_t *req = NULL;
+ struct diag_msg_build_mask_t rsp;
+ struct diag_mask_info *mask_info = NULL;
+ struct diag_md_session_t *info = NULL;
+
+ mutex_lock(&driver->md_session_lock);
+ info = diag_md_session_get_pid(pid);
+
+ mask_info = (!info) ? &msg_mask : info->msg_mask;
+ if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0 ||
+ !mask_info) {
+ pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d, mask_info: %pK\n",
+ __func__, src_buf, src_len, dest_buf, dest_len,
+ mask_info);
+ mutex_unlock(&driver->md_session_lock);
+ return -EINVAL;
+ }
+ if (!mask_info->ptr) {
+ pr_err("diag: In %s, invalid input mask_info->ptr: %pK\n",
+ __func__, mask_info->ptr);
+ mutex_unlock(&driver->md_session_lock);
+ return -EINVAL;
+ }
+ if (!diag_apps_responds()) {
+ mutex_unlock(&driver->md_session_lock);
+ return 0;
+ }
+
+ mutex_lock(&driver->msg_mask_lock);
+ req = (struct diag_build_mask_req_t *)src_buf;
+ rsp.cmd_code = DIAG_CMD_MSG_CONFIG;
+ rsp.sub_cmd = DIAG_CMD_OP_GET_MSG_MASK;
+ rsp.ssid_first = req->ssid_first;
+ rsp.ssid_last = req->ssid_last;
+ rsp.status = MSG_STATUS_FAIL;
+ rsp.padding = 0;
+ mask = (struct diag_msg_mask_t *)mask_info->ptr;
+ if (!mask->ptr) {
+ pr_err("diag: Invalid input in %s, mask->ptr: %pK\n",
+ __func__, mask->ptr);
+ mutex_unlock(&driver->msg_mask_lock);
+ mutex_unlock(&driver->md_session_lock);
+ return -EINVAL;
+ }
+ for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) {
+ if ((req->ssid_first < mask->ssid_first) ||
+ (req->ssid_first > mask->ssid_last_tools)) {
+ continue;
+ }
+ mask_size = mask->range * sizeof(uint32_t);
+ /* Copy msg mask only till the end of the rsp buffer */
+ if (mask_size + sizeof(rsp) > dest_len)
+ mask_size = dest_len - sizeof(rsp);
+ memcpy(dest_buf + sizeof(rsp), mask->ptr, mask_size);
+ write_len += mask_size;
+ rsp.status = MSG_STATUS_SUCCESS;
+ break;
+ }
+ memcpy(dest_buf, &rsp, sizeof(rsp));
+ write_len += sizeof(rsp);
+ mutex_unlock(&driver->msg_mask_lock);
+ mutex_unlock(&driver->md_session_lock);
+ return write_len;
+}
+
+static int diag_cmd_set_msg_mask(unsigned char *src_buf, int src_len,
+ unsigned char *dest_buf, int dest_len, int pid)
+{
+ int i;
+ int write_len = 0;
+ int header_len = sizeof(struct diag_msg_build_mask_t);
+ int found = 0;
+ uint32_t mask_size = 0;
+ uint32_t offset = 0;
+ struct diag_msg_mask_t *mask = NULL;
+ struct diag_msg_build_mask_t *req = NULL;
+ struct diag_msg_build_mask_t rsp;
+ struct diag_mask_info *mask_info = NULL;
+ struct diag_msg_mask_t *mask_next = NULL;
+ uint32_t *temp = NULL;
+ struct diag_md_session_t *info = NULL;
+
+ mutex_lock(&driver->md_session_lock);
+ info = diag_md_session_get_pid(pid);
+
+ mask_info = (!info) ? &msg_mask : info->msg_mask;
+ if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0 ||
+ !mask_info) {
+ pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d, mask_info: %pK\n",
+ __func__, src_buf, src_len, dest_buf, dest_len,
+ mask_info);
+ mutex_unlock(&driver->md_session_lock);
+ return -EINVAL;
+ }
+ if (!mask_info->ptr) {
+ pr_err("diag: In %s, invalid input mask_info->ptr: %pK\n",
+ __func__, mask_info->ptr);
+ mutex_unlock(&driver->md_session_lock);
+ return -EINVAL;
+ }
+
+ req = (struct diag_msg_build_mask_t *)src_buf;
+ mutex_lock(&mask_info->lock);
+ mutex_lock(&driver->msg_mask_lock);
+ mask = (struct diag_msg_mask_t *)mask_info->ptr;
+ if (!mask->ptr) {
+ pr_err("diag: Invalid input in %s, mask->ptr: %pK\n",
+ __func__, mask->ptr);
+ mutex_unlock(&driver->msg_mask_lock);
+ mutex_unlock(&mask_info->lock);
+ mutex_unlock(&driver->md_session_lock);
+ return -EINVAL;
+ }
+ for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) {
+ if (i < (driver->msg_mask_tbl_count - 1)) {
+ mask_next = mask;
+ mask_next++;
+ } else
+ mask_next = NULL;
+
+ if ((req->ssid_first < mask->ssid_first) ||
+ (req->ssid_first > mask->ssid_first + MAX_SSID_PER_RANGE) ||
+ (mask_next && (req->ssid_first >= mask_next->ssid_first))) {
+ continue;
+ }
+ mask_next = NULL;
+ found = 1;
+ mutex_lock(&mask->lock);
+ mask_size = req->ssid_last - req->ssid_first + 1;
+ if (mask_size > MAX_SSID_PER_RANGE) {
+ pr_warn("diag: In %s, truncating ssid range, %d-%d to max allowed: %d\n",
+ __func__, mask->ssid_first, mask->ssid_last,
+ MAX_SSID_PER_RANGE);
+ mask_size = MAX_SSID_PER_RANGE;
+ mask->range_tools = MAX_SSID_PER_RANGE;
+ mask->ssid_last_tools =
+ mask->ssid_first + mask->range_tools;
+ }
+ if (req->ssid_last > mask->ssid_last_tools) {
+ pr_debug("diag: Msg SSID range mismatch\n");
+ if (mask_size != MAX_SSID_PER_RANGE)
+ mask->ssid_last_tools = req->ssid_last;
+ mask->range_tools =
+ mask->ssid_last_tools - mask->ssid_first + 1;
+ temp = krealloc(mask->ptr,
+ mask->range_tools * sizeof(uint32_t),
+ GFP_KERNEL);
+ if (!temp) {
+ pr_err_ratelimited("diag: In %s, unable to allocate memory for msg mask ptr, mask_size: %d\n",
+ __func__, mask_size);
+ mutex_unlock(&mask->lock);
+ mutex_unlock(&driver->msg_mask_lock);
+ mutex_unlock(&mask_info->lock);
+ mutex_unlock(&driver->md_session_lock);
+ return -ENOMEM;
+ }
+ mask->ptr = temp;
+ }
+
+ offset = req->ssid_first - mask->ssid_first;
+ if (offset + mask_size > mask->range_tools) {
+ pr_err("diag: In %s, Not in msg mask range, mask_size: %d, offset: %d\n",
+ __func__, mask_size, offset);
+ mutex_unlock(&mask->lock);
+ break;
+ }
+ mask_size = mask_size * sizeof(uint32_t);
+ memcpy(mask->ptr + offset, src_buf + header_len, mask_size);
+ mutex_unlock(&mask->lock);
+ mask_info->status = DIAG_CTRL_MASK_VALID;
+ break;
+ }
+ mutex_unlock(&driver->msg_mask_lock);
+ mutex_unlock(&mask_info->lock);
+ mutex_unlock(&driver->md_session_lock);
+ if (diag_check_update(APPS_DATA))
+ diag_update_userspace_clients(MSG_MASKS_TYPE);
+
+ /*
+ * Apps processor must send the response to this command. Frame the
+ * response.
+ */
+ rsp.cmd_code = DIAG_CMD_MSG_CONFIG;
+ rsp.sub_cmd = DIAG_CMD_OP_SET_MSG_MASK;
+ rsp.ssid_first = req->ssid_first;
+ rsp.ssid_last = req->ssid_last;
+ rsp.status = found;
+ rsp.padding = 0;
+ memcpy(dest_buf, &rsp, header_len);
+ write_len += header_len;
+ if (!found)
+ goto end;
+ if (mask_size + write_len > dest_len)
+ mask_size = dest_len - write_len;
+ memcpy(dest_buf + write_len, src_buf + header_len, mask_size);
+ write_len += mask_size;
+ for (i = 0; i < NUM_PERIPHERALS; i++) {
+ if (!diag_check_update(i))
+ continue;
+ mutex_lock(&driver->md_session_lock);
+ diag_send_msg_mask_update(i, req->ssid_first, req->ssid_last);
+ mutex_unlock(&driver->md_session_lock);
+ }
+end:
+ return write_len;
+}
+
+static int diag_cmd_set_all_msg_mask(unsigned char *src_buf, int src_len,
+ unsigned char *dest_buf, int dest_len, int pid)
+{
+ int i;
+ int write_len = 0;
+ int header_len = sizeof(struct diag_msg_config_rsp_t);
+ struct diag_msg_config_rsp_t rsp;
+ struct diag_msg_config_rsp_t *req = NULL;
+ struct diag_msg_mask_t *mask = NULL;
+ struct diag_mask_info *mask_info = NULL;
+ struct diag_md_session_t *info = NULL;
+
+ mutex_lock(&driver->md_session_lock);
+ info = diag_md_session_get_pid(pid);
+
+ mask_info = (!info) ? &msg_mask : info->msg_mask;
+ if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0 ||
+ !mask_info) {
+ pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d, mask_info: %pK\n",
+ __func__, src_buf, src_len, dest_buf, dest_len,
+ mask_info);
+ mutex_unlock(&driver->md_session_lock);
+ return -EINVAL;
+ }
+ if (!mask_info->ptr) {
+ pr_err("diag: In %s, invalid input mask_info->ptr: %pK\n",
+ __func__, mask_info->ptr);
+ mutex_unlock(&driver->md_session_lock);
+ return -EINVAL;
+ }
+
+ req = (struct diag_msg_config_rsp_t *)src_buf;
+
+ mutex_lock(&mask_info->lock);
+ mutex_lock(&driver->msg_mask_lock);
+
+ mask = (struct diag_msg_mask_t *)mask_info->ptr;
+ if (!mask->ptr) {
+ pr_err("diag: Invalid input in %s, mask->ptr: %pK\n",
+ __func__, mask->ptr);
+ mutex_unlock(&driver->msg_mask_lock);
+ mutex_unlock(&mask_info->lock);
+ mutex_unlock(&driver->md_session_lock);
+ return -EINVAL;
+ }
+ mask_info->status = (req->rt_mask) ? DIAG_CTRL_MASK_ALL_ENABLED :
+ DIAG_CTRL_MASK_ALL_DISABLED;
+ for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) {
+ if (mask && mask->ptr) {
+ mutex_lock(&mask->lock);
+ memset(mask->ptr, req->rt_mask,
+ mask->range * sizeof(uint32_t));
+ mutex_unlock(&mask->lock);
+ }
+ }
+ mutex_unlock(&driver->msg_mask_lock);
+ mutex_unlock(&mask_info->lock);
+ mutex_unlock(&driver->md_session_lock);
+ if (diag_check_update(APPS_DATA))
+ diag_update_userspace_clients(MSG_MASKS_TYPE);
+
+ /*
+ * Apps processor must send the response to this command. Frame the
+ * response.
+ */
+ rsp.cmd_code = DIAG_CMD_MSG_CONFIG;
+ rsp.sub_cmd = DIAG_CMD_OP_SET_ALL_MSG_MASK;
+ rsp.status = MSG_STATUS_SUCCESS;
+ rsp.padding = 0;
+ rsp.rt_mask = req->rt_mask;
+ memcpy(dest_buf, &rsp, header_len);
+ write_len += header_len;
+
+ for (i = 0; i < NUM_PERIPHERALS; i++) {
+ if (!diag_check_update(i))
+ continue;
+ mutex_lock(&driver->md_session_lock);
+ diag_send_msg_mask_update(i, ALL_SSID, ALL_SSID);
+ mutex_unlock(&driver->md_session_lock);
+ }
+
+ return write_len;
+}
+
+static int diag_cmd_get_event_mask(unsigned char *src_buf, int src_len,
+ unsigned char *dest_buf, int dest_len, int pid)
+{
+ int write_len = 0;
+ uint32_t mask_size;
+ struct diag_event_mask_config_t rsp;
+
+ if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0) {
+ pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d\n",
+ __func__, src_buf, src_len, dest_buf, dest_len);
+ return -EINVAL;
+ }
+
+ if (!diag_apps_responds())
+ return 0;
+
+ mask_size = EVENT_COUNT_TO_BYTES(driver->last_event_id);
+ if (mask_size + sizeof(rsp) > dest_len) {
+ pr_err("diag: In %s, invalid mask size: %d\n", __func__,
+ mask_size);
+ return -ENOMEM;
+ }
+
+ rsp.cmd_code = DIAG_CMD_GET_EVENT_MASK;
+ rsp.status = EVENT_STATUS_SUCCESS;
+ rsp.padding = 0;
+ rsp.num_bits = driver->last_event_id + 1;
+ memcpy(dest_buf, &rsp, sizeof(rsp));
+ write_len += sizeof(rsp);
+ memcpy(dest_buf + write_len, event_mask.ptr, mask_size);
+ write_len += mask_size;
+
+ return write_len;
+}
+
+static int diag_cmd_update_event_mask(unsigned char *src_buf, int src_len,
+ unsigned char *dest_buf, int dest_len, int pid)
+{
+ int i;
+ int write_len = 0;
+ int mask_len = 0;
+ int header_len = sizeof(struct diag_event_mask_config_t);
+ struct diag_event_mask_config_t rsp;
+ struct diag_event_mask_config_t *req;
+ struct diag_mask_info *mask_info = NULL;
+ struct diag_md_session_t *info = NULL;
+
+ mutex_lock(&driver->md_session_lock);
+ info = diag_md_session_get_pid(pid);
+ mask_info = (!info) ? &event_mask : info->event_mask;
+ if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0 ||
+ !mask_info) {
+ pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d, mask_info: %pK\n",
+ __func__, src_buf, src_len, dest_buf, dest_len,
+ mask_info);
+ mutex_unlock(&driver->md_session_lock);
+ return -EINVAL;
+ }
+ if (!mask_info->ptr) {
+ pr_err("diag: In %s, invalid input mask_info->ptr: %pK\n",
+ __func__, mask_info->ptr);
+ mutex_unlock(&driver->md_session_lock);
+ return -EINVAL;
+ }
+ req = (struct diag_event_mask_config_t *)src_buf;
+ mask_len = EVENT_COUNT_TO_BYTES(req->num_bits);
+ if (mask_len <= 0 || mask_len > event_mask.mask_len) {
+ pr_err("diag: In %s, invalid event mask len: %d\n", __func__,
+ mask_len);
+ mutex_unlock(&driver->md_session_lock);
+ return -EIO;
+ }
+
+ mutex_lock(&mask_info->lock);
+ memcpy(mask_info->ptr, src_buf + header_len, mask_len);
+ mask_info->status = DIAG_CTRL_MASK_VALID;
+ mutex_unlock(&mask_info->lock);
+ mutex_unlock(&driver->md_session_lock);
+ if (diag_check_update(APPS_DATA))
+ diag_update_userspace_clients(EVENT_MASKS_TYPE);
+
+ /*
+ * Apps processor must send the response to this command. Frame the
+ * response.
+ */
+ rsp.cmd_code = DIAG_CMD_SET_EVENT_MASK;
+ rsp.status = EVENT_STATUS_SUCCESS;
+ rsp.padding = 0;
+ rsp.num_bits = driver->last_event_id + 1;
+ memcpy(dest_buf, &rsp, header_len);
+ write_len += header_len;
+ memcpy(dest_buf + write_len, mask_info->ptr, mask_len);
+ write_len += mask_len;
+
+ for (i = 0; i < NUM_PERIPHERALS; i++) {
+ if (!diag_check_update(i))
+ continue;
+ mutex_lock(&driver->md_session_lock);
+ diag_send_event_mask_update(i);
+ mutex_unlock(&driver->md_session_lock);
+ }
+
+ return write_len;
+}
+
+static int diag_cmd_toggle_events(unsigned char *src_buf, int src_len,
+ unsigned char *dest_buf, int dest_len, int pid)
+{
+ int i;
+ int write_len = 0;
+ uint8_t toggle = 0;
+ struct diag_event_report_t header;
+ struct diag_mask_info *mask_info = NULL;
+ struct diag_md_session_t *info = NULL;
+
+ mutex_lock(&driver->md_session_lock);
+ info = diag_md_session_get_pid(pid);
+ mask_info = (!info) ? &event_mask : info->event_mask;
+ if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0 ||
+ !mask_info) {
+ pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d, mask_info: %pK\n",
+ __func__, src_buf, src_len, dest_buf, dest_len,
+ mask_info);
+ mutex_unlock(&driver->md_session_lock);
+ return -EINVAL;
+ }
+ if (!mask_info->ptr) {
+ pr_err("diag: In %s, invalid input mask_info->ptr: %pK\n",
+ __func__, mask_info->ptr);
+ mutex_unlock(&driver->md_session_lock);
+ return -EINVAL;
+ }
+
+ toggle = *(src_buf + 1);
+ mutex_lock(&mask_info->lock);
+ if (toggle) {
+ mask_info->status = DIAG_CTRL_MASK_ALL_ENABLED;
+ memset(mask_info->ptr, 0xFF, mask_info->mask_len);
+ } else {
+ mask_info->status = DIAG_CTRL_MASK_ALL_DISABLED;
+ memset(mask_info->ptr, 0, mask_info->mask_len);
+ }
+ mutex_unlock(&mask_info->lock);
+ mutex_unlock(&driver->md_session_lock);
+ if (diag_check_update(APPS_DATA))
+ diag_update_userspace_clients(EVENT_MASKS_TYPE);
+
+ /*
+ * Apps processor must send the response to this command. Frame the
+ * response.
+ */
+ header.cmd_code = DIAG_CMD_EVENT_TOGGLE;
+ header.padding = 0;
+ for (i = 0; i < NUM_PERIPHERALS; i++) {
+ if (!diag_check_update(i))
+ continue;
+ mutex_lock(&driver->md_session_lock);
+ diag_send_event_mask_update(i);
+ mutex_unlock(&driver->md_session_lock);
+ }
+ memcpy(dest_buf, &header, sizeof(header));
+ write_len += sizeof(header);
+
+ return write_len;
+}
+
+static int diag_cmd_get_log_mask(unsigned char *src_buf, int src_len,
+ unsigned char *dest_buf, int dest_len, int pid)
+{
+ int i;
+ int status = LOG_STATUS_INVALID;
+ int write_len = 0;
+ int read_len = 0;
+ int req_header_len = sizeof(struct diag_log_config_req_t);
+ int rsp_header_len = sizeof(struct diag_log_config_rsp_t);
+ uint32_t mask_size = 0;
+ struct diag_log_mask_t *log_item = NULL;
+ struct diag_log_config_req_t *req;
+ struct diag_log_config_rsp_t rsp;
+ struct diag_mask_info *mask_info = NULL;
+ struct diag_md_session_t *info = NULL;
+
+ mutex_lock(&driver->md_session_lock);
+ info = diag_md_session_get_pid(pid);
+
+ mask_info = (!info) ? &log_mask : info->log_mask;
+ if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0 ||
+ !mask_info) {
+ pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d, mask_info: %pK\n",
+ __func__, src_buf, src_len, dest_buf, dest_len,
+ mask_info);
+ mutex_unlock(&driver->md_session_lock);
+ return -EINVAL;
+ }
+ if (!mask_info->ptr) {
+ pr_err("diag: In %s, invalid input mask_info->ptr: %pK\n",
+ __func__, mask_info->ptr);
+ mutex_unlock(&driver->md_session_lock);
+ return -EINVAL;
+ }
+
+ if (!diag_apps_responds()) {
+ mutex_unlock(&driver->md_session_lock);
+ return 0;
+ }
+
+ req = (struct diag_log_config_req_t *)src_buf;
+ read_len += req_header_len;
+
+ rsp.cmd_code = DIAG_CMD_LOG_CONFIG;
+ rsp.padding[0] = 0;
+ rsp.padding[1] = 0;
+ rsp.padding[2] = 0;
+ rsp.sub_cmd = DIAG_CMD_OP_GET_LOG_MASK;
+ /*
+ * Don't copy the response header now. Copy at the end after
+ * calculating the status field value
+ */
+ write_len += rsp_header_len;
+
+ log_item = (struct diag_log_mask_t *)mask_info->ptr;
+ if (!log_item->ptr) {
+ pr_err("diag: Invalid input in %s, mask: %pK\n",
+ __func__, log_item);
+ mutex_unlock(&driver->md_session_lock);
+ return -EINVAL;
+ }
+ for (i = 0; i < MAX_EQUIP_ID; i++, log_item++) {
+ if (log_item->equip_id != req->equip_id)
+ continue;
+ mutex_lock(&log_item->lock);
+ mask_size = LOG_ITEMS_TO_SIZE(log_item->num_items_tools);
+ /*
+ * Make sure we have space to fill the response in the buffer.
+ * Destination buffer should atleast be able to hold equip_id
+ * (uint32_t), num_items(uint32_t), mask (mask_size) and the
+ * response header.
+ */
+ if ((mask_size + (2 * sizeof(uint32_t)) + rsp_header_len) >
+ dest_len) {
+ pr_err("diag: In %s, invalid length: %d, max rsp_len: %d\n",
+ __func__, mask_size, dest_len);
+ status = LOG_STATUS_FAIL;
+ mutex_unlock(&log_item->lock);
+ break;
+ }
+ *(uint32_t *)(dest_buf + write_len) = log_item->equip_id;
+ write_len += sizeof(uint32_t);
+ *(uint32_t *)(dest_buf + write_len) = log_item->num_items_tools;
+ write_len += sizeof(uint32_t);
+ if (mask_size > 0) {
+ memcpy(dest_buf + write_len, log_item->ptr, mask_size);
+ write_len += mask_size;
+ }
+ DIAG_LOG(DIAG_DEBUG_MASKS,
+ "sending log e %d num_items %d size %d\n",
+ log_item->equip_id, log_item->num_items_tools,
+ log_item->range_tools);
+ mutex_unlock(&log_item->lock);
+ status = LOG_STATUS_SUCCESS;
+ break;
+ }
+
+ rsp.status = status;
+ memcpy(dest_buf, &rsp, rsp_header_len);
+
+ mutex_unlock(&driver->md_session_lock);
+ return write_len;
+}
+
+static int diag_cmd_get_log_range(unsigned char *src_buf, int src_len,
+ unsigned char *dest_buf, int dest_len, int pid)
+{
+ int i;
+ int write_len = 0;
+ struct diag_log_config_rsp_t rsp;
+ struct diag_log_mask_t *mask = (struct diag_log_mask_t *)log_mask.ptr;
+
+ if (!mask)
+ return -EINVAL;
+
+ if (!diag_apps_responds())
+ return 0;
+
+ if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0) {
+ pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d\n",
+ __func__, src_buf, src_len, dest_buf, dest_len);
+ return -EINVAL;
+ }
+
+ rsp.cmd_code = DIAG_CMD_LOG_CONFIG;
+ rsp.padding[0] = 0;
+ rsp.padding[1] = 0;
+ rsp.padding[2] = 0;
+ rsp.sub_cmd = DIAG_CMD_OP_GET_LOG_RANGE;
+ rsp.status = LOG_STATUS_SUCCESS;
+ memcpy(dest_buf, &rsp, sizeof(rsp));
+ write_len += sizeof(rsp);
+
+ for (i = 0; i < MAX_EQUIP_ID && write_len < dest_len; i++, mask++) {
+ *(uint32_t *)(dest_buf + write_len) = mask->num_items_tools;
+ write_len += sizeof(uint32_t);
+ }
+
+ return write_len;
+}
+
+static int diag_cmd_set_log_mask(unsigned char *src_buf, int src_len,
+ unsigned char *dest_buf, int dest_len,
+ int pid)
+{
+ int i;
+ int write_len = 0;
+ int status = LOG_STATUS_SUCCESS;
+ int read_len = 0;
+ int payload_len = 0;
+ int req_header_len = sizeof(struct diag_log_config_req_t);
+ int rsp_header_len = sizeof(struct diag_log_config_set_rsp_t);
+ uint32_t mask_size = 0;
+ struct diag_log_config_req_t *req;
+ struct diag_log_config_set_rsp_t rsp;
+ struct diag_log_mask_t *mask = NULL;
+ unsigned char *temp_buf = NULL;
+ struct diag_mask_info *mask_info = NULL;
+ struct diag_md_session_t *info = NULL;
+
+ mutex_lock(&driver->md_session_lock);
+ info = diag_md_session_get_pid(pid);
+
+ mask_info = (!info) ? &log_mask : info->log_mask;
+ if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0 ||
+ !mask_info) {
+ pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d, mask_info: %pK\n",
+ __func__, src_buf, src_len, dest_buf, dest_len,
+ mask_info);
+ mutex_unlock(&driver->md_session_lock);
+ return -EINVAL;
+ }
+ if (!mask_info->ptr) {
+ pr_err("diag: In %s, invalid input mask_info->ptr: %pK\n",
+ __func__, mask_info->ptr);
+ mutex_unlock(&driver->md_session_lock);
+ return -EINVAL;
+ }
+
+ req = (struct diag_log_config_req_t *)src_buf;
+ read_len += req_header_len;
+ mask = (struct diag_log_mask_t *)mask_info->ptr;
+ if (!mask->ptr) {
+ pr_err("diag: Invalid input in %s, mask->ptr: %pK\n",
+ __func__, mask->ptr);
+ mutex_unlock(&driver->md_session_lock);
+ return -EINVAL;
+ }
+ if (req->equip_id >= MAX_EQUIP_ID) {
+ pr_err("diag: In %s, Invalid logging mask request, equip_id: %d\n",
+ __func__, req->equip_id);
+ status = LOG_STATUS_INVALID;
+ }
+
+ if (req->num_items == 0) {
+ pr_err("diag: In %s, Invalid number of items in log mask request, equip_id: %d\n",
+ __func__, req->equip_id);
+ status = LOG_STATUS_INVALID;
+ }
+
+ mutex_lock(&mask_info->lock);
+ for (i = 0; i < MAX_EQUIP_ID && !status; i++, mask++) {
+ if (!mask || !mask->ptr)
+ continue;
+ if (mask->equip_id != req->equip_id)
+ continue;
+ mutex_lock(&mask->lock);
+
+ DIAG_LOG(DIAG_DEBUG_MASKS, "e: %d current: %d %d new: %d %d",
+ mask->equip_id, mask->num_items_tools,
+ mask->range_tools, req->num_items,
+ LOG_ITEMS_TO_SIZE(req->num_items));
+ /*
+ * If the size of the log mask cannot fit into our
+ * buffer, trim till we have space left in the buffer.
+ * num_items should then reflect the items that we have
+ * in our buffer.
+ */
+ mask->num_items_tools = (req->num_items > MAX_ITEMS_ALLOWED) ?
+ MAX_ITEMS_ALLOWED : req->num_items;
+ mask_size = LOG_ITEMS_TO_SIZE(mask->num_items_tools);
+ memset(mask->ptr, 0, mask->range_tools);
+ if (mask_size > mask->range_tools) {
+ DIAG_LOG(DIAG_DEBUG_MASKS,
+ "log range mismatch, e: %d old: %d new: %d\n",
+ req->equip_id, mask->range_tools,
+ LOG_ITEMS_TO_SIZE(mask->num_items_tools));
+ /* Change in the mask reported by tools */
+ temp_buf = krealloc(mask->ptr, mask_size, GFP_KERNEL);
+ if (!temp_buf) {
+ mask_info->status = DIAG_CTRL_MASK_INVALID;
+ mutex_unlock(&mask->lock);
+ break;
+ }
+ mask->ptr = temp_buf;
+ memset(mask->ptr, 0, mask_size);
+ mask->range_tools = mask_size;
+ }
+ req->num_items = mask->num_items_tools;
+ if (mask_size > 0)
+ memcpy(mask->ptr, src_buf + read_len, mask_size);
+ DIAG_LOG(DIAG_DEBUG_MASKS,
+ "copying log mask, e %d num %d range %d size %d\n",
+ req->equip_id, mask->num_items_tools,
+ mask->range_tools, mask_size);
+ mutex_unlock(&mask->lock);
+ mask_info->status = DIAG_CTRL_MASK_VALID;
+ break;
+ }
+ mutex_unlock(&mask_info->lock);
+ mutex_unlock(&driver->md_session_lock);
+ if (diag_check_update(APPS_DATA))
+ diag_update_userspace_clients(LOG_MASKS_TYPE);
+
+ /*
+ * Apps processor must send the response to this command. Frame the
+ * response.
+ */
+ payload_len = LOG_ITEMS_TO_SIZE(req->num_items);
+ if ((payload_len + rsp_header_len > dest_len) || (payload_len == 0)) {
+ pr_err("diag: In %s, invalid length, payload_len: %d, header_len: %d, dest_len: %d\n",
+ __func__, payload_len, rsp_header_len , dest_len);
+ status = LOG_STATUS_FAIL;
+ }
+ rsp.cmd_code = DIAG_CMD_LOG_CONFIG;
+ rsp.padding[0] = 0;
+ rsp.padding[1] = 0;
+ rsp.padding[2] = 0;
+ rsp.sub_cmd = DIAG_CMD_OP_SET_LOG_MASK;
+ rsp.status = status;
+ rsp.equip_id = req->equip_id;
+ rsp.num_items = req->num_items;
+ memcpy(dest_buf, &rsp, rsp_header_len);
+ write_len += rsp_header_len;
+ if (status != LOG_STATUS_SUCCESS)
+ goto end;
+ memcpy(dest_buf + write_len, src_buf + read_len, payload_len);
+ write_len += payload_len;
+
+ for (i = 0; i < NUM_PERIPHERALS; i++) {
+ if (!diag_check_update(i))
+ continue;
+ mutex_lock(&driver->md_session_lock);
+ diag_send_log_mask_update(i, req->equip_id);
+ mutex_unlock(&driver->md_session_lock);
+ }
+end:
+ return write_len;
+}
+
+static int diag_cmd_disable_log_mask(unsigned char *src_buf, int src_len,
+ unsigned char *dest_buf, int dest_len, int pid)
+{
+ struct diag_mask_info *mask_info = NULL;
+ struct diag_log_mask_t *mask = NULL;
+ struct diag_log_config_rsp_t header;
+ int write_len = 0, i;
+ struct diag_md_session_t *info = NULL;
+
+ mutex_lock(&driver->md_session_lock);
+ info = diag_md_session_get_pid(pid);
+
+ mask_info = (!info) ? &log_mask : info->log_mask;
+ if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0 ||
+ !mask_info) {
+ pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d, mask_info: %pK\n",
+ __func__, src_buf, src_len, dest_buf, dest_len,
+ mask_info);
+ mutex_unlock(&driver->md_session_lock);
+ return -EINVAL;
+ }
+ if (!mask_info->ptr) {
+ pr_err("diag: In %s, invalid input mask_info->ptr: %pK\n",
+ __func__, mask_info->ptr);
+ mutex_unlock(&driver->md_session_lock);
+ return -EINVAL;
+ }
+ mask = (struct diag_log_mask_t *)mask_info->ptr;
+ if (!mask->ptr) {
+ pr_err("diag: Invalid input in %s, mask->ptr: %pK\n",
+ __func__, mask->ptr);
+ mutex_unlock(&driver->md_session_lock);
+ return -EINVAL;
+ }
+ for (i = 0; i < MAX_EQUIP_ID; i++, mask++) {
+ if (mask && mask->ptr) {
+ mutex_lock(&mask->lock);
+ memset(mask->ptr, 0, mask->range);
+ mutex_unlock(&mask->lock);
+ }
+ }
+ mask_info->status = DIAG_CTRL_MASK_ALL_DISABLED;
+ mutex_unlock(&driver->md_session_lock);
+ if (diag_check_update(APPS_DATA))
+ diag_update_userspace_clients(LOG_MASKS_TYPE);
+
+ /*
+ * Apps processor must send the response to this command. Frame the
+ * response.
+ */
+ header.cmd_code = DIAG_CMD_LOG_CONFIG;
+ header.padding[0] = 0;
+ header.padding[1] = 0;
+ header.padding[2] = 0;
+ header.sub_cmd = DIAG_CMD_OP_LOG_DISABLE;
+ header.status = LOG_STATUS_SUCCESS;
+ memcpy(dest_buf, &header, sizeof(struct diag_log_config_rsp_t));
+ write_len += sizeof(struct diag_log_config_rsp_t);
+ for (i = 0; i < NUM_PERIPHERALS; i++) {
+ if (!diag_check_update(i))
+ continue;
+ mutex_lock(&driver->md_session_lock);
+ diag_send_log_mask_update(i, ALL_EQUIP_ID);
+ mutex_unlock(&driver->md_session_lock);
+ }
+
+ return write_len;
+}
+
+int diag_create_msg_mask_table_entry(struct diag_msg_mask_t *msg_mask,
+ struct diag_ssid_range_t *range)
+{
+ if (!msg_mask || !range)
+ return -EIO;
+ if (range->ssid_last < range->ssid_first)
+ return -EINVAL;
+ msg_mask->ssid_first = range->ssid_first;
+ msg_mask->ssid_last = range->ssid_last;
+ msg_mask->ssid_last_tools = range->ssid_last;
+ msg_mask->range = msg_mask->ssid_last - msg_mask->ssid_first + 1;
+ if (msg_mask->range < MAX_SSID_PER_RANGE)
+ msg_mask->range = MAX_SSID_PER_RANGE;
+ msg_mask->range_tools = msg_mask->range;
+ mutex_init(&msg_mask->lock);
+ if (msg_mask->range > 0) {
+ msg_mask->ptr = kzalloc(msg_mask->range * sizeof(uint32_t),
+ GFP_KERNEL);
+ if (!msg_mask->ptr)
+ return -ENOMEM;
+ kmemleak_not_leak(msg_mask->ptr);
+ }
+ return 0;
+}
+
+static int diag_create_msg_mask_table(void)
+{
+ int i;
+ int err = 0;
+ struct diag_msg_mask_t *mask = (struct diag_msg_mask_t *)msg_mask.ptr;
+ struct diag_ssid_range_t range;
+
+ mutex_lock(&msg_mask.lock);
+ mutex_lock(&driver->msg_mask_lock);
+ driver->msg_mask_tbl_count = MSG_MASK_TBL_CNT;
+ for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) {
+ range.ssid_first = msg_mask_tbl[i].ssid_first;
+ range.ssid_last = msg_mask_tbl[i].ssid_last;
+ err = diag_create_msg_mask_table_entry(mask, &range);
+ if (err)
+ break;
+ }
+ mutex_unlock(&driver->msg_mask_lock);
+ mutex_unlock(&msg_mask.lock);
+ return err;
+}
+
+static int diag_create_build_time_mask(void)
+{
+ int i;
+ int err = 0;
+ const uint32_t *tbl = NULL;
+ uint32_t tbl_size = 0;
+ struct diag_msg_mask_t *build_mask = NULL;
+ struct diag_ssid_range_t range;
+
+ mutex_lock(&msg_bt_mask.lock);
+ mutex_lock(&driver->msg_mask_lock);
+ driver->bt_msg_mask_tbl_count = MSG_MASK_TBL_CNT;
+ build_mask = (struct diag_msg_mask_t *)msg_bt_mask.ptr;
+ for (i = 0; i < driver->bt_msg_mask_tbl_count; i++, build_mask++) {
+ range.ssid_first = msg_mask_tbl[i].ssid_first;
+ range.ssid_last = msg_mask_tbl[i].ssid_last;
+ err = diag_create_msg_mask_table_entry(build_mask, &range);
+ if (err)
+ break;
+ switch (build_mask->ssid_first) {
+ case MSG_SSID_0:
+ tbl = msg_bld_masks_0;
+ tbl_size = sizeof(msg_bld_masks_0);
+ break;
+ case MSG_SSID_1:
+ tbl = msg_bld_masks_1;
+ tbl_size = sizeof(msg_bld_masks_1);
+ break;
+ case MSG_SSID_2:
+ tbl = msg_bld_masks_2;
+ tbl_size = sizeof(msg_bld_masks_2);
+ break;
+ case MSG_SSID_3:
+ tbl = msg_bld_masks_3;
+ tbl_size = sizeof(msg_bld_masks_3);
+ break;
+ case MSG_SSID_4:
+ tbl = msg_bld_masks_4;
+ tbl_size = sizeof(msg_bld_masks_4);
+ break;
+ case MSG_SSID_5:
+ tbl = msg_bld_masks_5;
+ tbl_size = sizeof(msg_bld_masks_5);
+ break;
+ case MSG_SSID_6:
+ tbl = msg_bld_masks_6;
+ tbl_size = sizeof(msg_bld_masks_6);
+ break;
+ case MSG_SSID_7:
+ tbl = msg_bld_masks_7;
+ tbl_size = sizeof(msg_bld_masks_7);
+ break;
+ case MSG_SSID_8:
+ tbl = msg_bld_masks_8;
+ tbl_size = sizeof(msg_bld_masks_8);
+ break;
+ case MSG_SSID_9:
+ tbl = msg_bld_masks_9;
+ tbl_size = sizeof(msg_bld_masks_9);
+ break;
+ case MSG_SSID_10:
+ tbl = msg_bld_masks_10;
+ tbl_size = sizeof(msg_bld_masks_10);
+ break;
+ case MSG_SSID_11:
+ tbl = msg_bld_masks_11;
+ tbl_size = sizeof(msg_bld_masks_11);
+ break;
+ case MSG_SSID_12:
+ tbl = msg_bld_masks_12;
+ tbl_size = sizeof(msg_bld_masks_12);
+ break;
+ case MSG_SSID_13:
+ tbl = msg_bld_masks_13;
+ tbl_size = sizeof(msg_bld_masks_13);
+ break;
+ case MSG_SSID_14:
+ tbl = msg_bld_masks_14;
+ tbl_size = sizeof(msg_bld_masks_14);
+ break;
+ case MSG_SSID_15:
+ tbl = msg_bld_masks_15;
+ tbl_size = sizeof(msg_bld_masks_15);
+ break;
+ case MSG_SSID_16:
+ tbl = msg_bld_masks_16;
+ tbl_size = sizeof(msg_bld_masks_16);
+ break;
+ case MSG_SSID_17:
+ tbl = msg_bld_masks_17;
+ tbl_size = sizeof(msg_bld_masks_17);
+ break;
+ case MSG_SSID_18:
+ tbl = msg_bld_masks_18;
+ tbl_size = sizeof(msg_bld_masks_18);
+ break;
+ case MSG_SSID_19:
+ tbl = msg_bld_masks_19;
+ tbl_size = sizeof(msg_bld_masks_19);
+ break;
+ case MSG_SSID_20:
+ tbl = msg_bld_masks_20;
+ tbl_size = sizeof(msg_bld_masks_20);
+ break;
+ case MSG_SSID_21:
+ tbl = msg_bld_masks_21;
+ tbl_size = sizeof(msg_bld_masks_21);
+ break;
+ case MSG_SSID_22:
+ tbl = msg_bld_masks_22;
+ tbl_size = sizeof(msg_bld_masks_22);
+ break;
+ }
+ if (!tbl)
+ continue;
+ if (tbl_size > build_mask->range * sizeof(uint32_t)) {
+ pr_warn("diag: In %s, table %d has more ssid than max, ssid_first: %d, ssid_last: %d\n",
+ __func__, i, build_mask->ssid_first,
+ build_mask->ssid_last);
+ tbl_size = build_mask->range * sizeof(uint32_t);
+ }
+ memcpy(build_mask->ptr, tbl, tbl_size);
+ }
+ mutex_unlock(&driver->msg_mask_lock);
+ mutex_unlock(&msg_bt_mask.lock);
+ return err;
+}
+
+static int diag_create_log_mask_table(void)
+{
+ struct diag_log_mask_t *mask = NULL;
+ uint8_t i;
+ int err = 0;
+
+ mutex_lock(&log_mask.lock);
+ mask = (struct diag_log_mask_t *)(log_mask.ptr);
+ for (i = 0; i < MAX_EQUIP_ID; i++, mask++) {
+ mask->equip_id = i;
+ mask->num_items = LOG_GET_ITEM_NUM(log_code_last_tbl[i]);
+ mask->num_items_tools = mask->num_items;
+ mutex_init(&mask->lock);
+ if (LOG_ITEMS_TO_SIZE(mask->num_items) > MAX_ITEMS_PER_EQUIP_ID)
+ mask->range = LOG_ITEMS_TO_SIZE(mask->num_items);
+ else
+ mask->range = MAX_ITEMS_PER_EQUIP_ID;
+ mask->range_tools = mask->range;
+ mask->ptr = kzalloc(mask->range, GFP_KERNEL);
+ if (!mask->ptr) {
+ err = -ENOMEM;
+ break;
+ }
+ kmemleak_not_leak(mask->ptr);
+ }
+ mutex_unlock(&log_mask.lock);
+ return err;
+}
+
+static int __diag_mask_init(struct diag_mask_info *mask_info, int mask_len,
+ int update_buf_len)
+{
+ if (!mask_info || mask_len < 0 || update_buf_len < 0)
+ return -EINVAL;
+
+ mask_info->status = DIAG_CTRL_MASK_INVALID;
+ mask_info->mask_len = mask_len;
+ mask_info->update_buf_len = update_buf_len;
+ if (mask_len > 0) {
+ mask_info->ptr = kzalloc(mask_len, GFP_KERNEL);
+ if (!mask_info->ptr)
+ return -ENOMEM;
+ kmemleak_not_leak(mask_info->ptr);
+ }
+ if (update_buf_len > 0) {
+ mask_info->update_buf = kzalloc(update_buf_len, GFP_KERNEL);
+ if (!mask_info->update_buf) {
+ kfree(mask_info->ptr);
+ return -ENOMEM;
+ }
+ kmemleak_not_leak(mask_info->update_buf);
+ }
+ mutex_init(&mask_info->lock);
+ return 0;
+}
+
+static void __diag_mask_exit(struct diag_mask_info *mask_info)
+{
+ if (!mask_info || !mask_info->ptr)
+ return;
+
+ mutex_lock(&mask_info->lock);
+ kfree(mask_info->ptr);
+ mask_info->ptr = NULL;
+ kfree(mask_info->update_buf);
+ mask_info->update_buf = NULL;
+ mutex_unlock(&mask_info->lock);
+}
+
+int diag_log_mask_copy(struct diag_mask_info *dest, struct diag_mask_info *src)
+{
+ int i;
+ int err = 0;
+ struct diag_log_mask_t *src_mask = NULL;
+ struct diag_log_mask_t *dest_mask = NULL;
+
+ if (!src)
+ return -EINVAL;
+
+ err = __diag_mask_init(dest, LOG_MASK_SIZE, APPS_BUF_SIZE);
+ if (err)
+ return err;
+
+ mutex_lock(&dest->lock);
+ src_mask = (struct diag_log_mask_t *)(src->ptr);
+ dest_mask = (struct diag_log_mask_t *)(dest->ptr);
+
+ dest->mask_len = src->mask_len;
+ dest->status = src->status;
+
+ for (i = 0; i < MAX_EQUIP_ID; i++, src_mask++, dest_mask++) {
+ dest_mask->equip_id = src_mask->equip_id;
+ dest_mask->num_items = src_mask->num_items;
+ dest_mask->num_items_tools = src_mask->num_items_tools;
+ mutex_init(&dest_mask->lock);
+ dest_mask->range = src_mask->range;
+ dest_mask->range_tools = src_mask->range_tools;
+ dest_mask->ptr = kzalloc(dest_mask->range_tools, GFP_KERNEL);
+ if (!dest_mask->ptr) {
+ err = -ENOMEM;
+ break;
+ }
+ kmemleak_not_leak(dest_mask->ptr);
+ memcpy(dest_mask->ptr, src_mask->ptr, dest_mask->range_tools);
+ }
+ mutex_unlock(&dest->lock);
+
+ return err;
+}
+
+void diag_log_mask_free(struct diag_mask_info *mask_info)
+{
+ int i;
+ struct diag_log_mask_t *mask = NULL;
+
+ if (!mask_info || !mask_info->ptr)
+ return;
+
+ mutex_lock(&mask_info->lock);
+ mask = (struct diag_log_mask_t *)mask_info->ptr;
+ if (!mask->ptr) {
+ pr_err("diag: Invalid input in %s, mask->ptr: %pK\n",
+ __func__, mask->ptr);
+ mutex_unlock(&mask_info->lock);
+ return;
+ }
+ for (i = 0; i < MAX_EQUIP_ID; i++, mask++) {
+ kfree(mask->ptr);
+ mask->ptr = NULL;
+ }
+ mutex_unlock(&mask_info->lock);
+
+ __diag_mask_exit(mask_info);
+
+}
+
+static int diag_msg_mask_init(void)
+{
+ int err = 0;
+ int i;
+
+ err = __diag_mask_init(&msg_mask, MSG_MASK_SIZE, APPS_BUF_SIZE);
+ if (err)
+ return err;
+ err = diag_create_msg_mask_table();
+ if (err) {
+ pr_err("diag: Unable to create msg masks, err: %d\n", err);
+ return err;
+ }
+ mutex_lock(&driver->msg_mask_lock);
+ driver->msg_mask = &msg_mask;
+ for (i = 0; i < NUM_PERIPHERALS; i++)
+ driver->max_ssid_count[i] = 0;
+ mutex_unlock(&driver->msg_mask_lock);
+
+ return 0;
+}
+
+int diag_msg_mask_copy(struct diag_mask_info *dest, struct diag_mask_info *src)
+{
+ int i;
+ int err = 0;
+ struct diag_msg_mask_t *src_mask = NULL;
+ struct diag_msg_mask_t *dest_mask = NULL;
+ struct diag_ssid_range_t range;
+
+ if (!src || !dest)
+ return -EINVAL;
+
+ err = __diag_mask_init(dest, MSG_MASK_SIZE, APPS_BUF_SIZE);
+ if (err)
+ return err;
+ mutex_lock(&dest->lock);
+ mutex_lock(&driver->msg_mask_lock);
+ src_mask = (struct diag_msg_mask_t *)src->ptr;
+ dest_mask = (struct diag_msg_mask_t *)dest->ptr;
+
+ dest->mask_len = src->mask_len;
+ dest->status = src->status;
+ for (i = 0; i < driver->msg_mask_tbl_count; i++) {
+ range.ssid_first = src_mask->ssid_first;
+ range.ssid_last = src_mask->ssid_last;
+ err = diag_create_msg_mask_table_entry(dest_mask, &range);
+ if (err)
+ break;
+ memcpy(dest_mask->ptr, src_mask->ptr,
+ dest_mask->range * sizeof(uint32_t));
+ src_mask++;
+ dest_mask++;
+ }
+ mutex_unlock(&driver->msg_mask_lock);
+ mutex_unlock(&dest->lock);
+ return err;
+}
+
+void diag_msg_mask_free(struct diag_mask_info *mask_info)
+{
+ int i;
+ struct diag_msg_mask_t *mask = NULL;
+
+ if (!mask_info || !mask_info->ptr)
+ return;
+ mutex_lock(&mask_info->lock);
+ mutex_lock(&driver->msg_mask_lock);
+ mask = (struct diag_msg_mask_t *)mask_info->ptr;
+ if (!mask->ptr) {
+ pr_err("diag: Invalid input in %s, mask->ptr: %pK\n",
+ __func__, mask->ptr);
+ mutex_unlock(&driver->msg_mask_lock);
+ mutex_unlock(&mask_info->lock);
+ return;
+ }
+ for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) {
+ kfree(mask->ptr);
+ mask->ptr = NULL;
+ }
+ mutex_unlock(&driver->msg_mask_lock);
+ mutex_unlock(&mask_info->lock);
+ __diag_mask_exit(mask_info);
+}
+
+static void diag_msg_mask_exit(void)
+{
+ int i;
+ struct diag_msg_mask_t *mask = NULL;
+ mutex_lock(&driver->msg_mask_lock);
+ mask = (struct diag_msg_mask_t *)(msg_mask.ptr);
+ if (mask) {
+ for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++)
+ kfree(mask->ptr);
+ kfree(msg_mask.ptr);
+ msg_mask.ptr = NULL;
+ }
+ kfree(msg_mask.update_buf);
+ msg_mask.update_buf = NULL;
+ mutex_unlock(&driver->msg_mask_lock);
+}
+
+static int diag_build_time_mask_init(void)
+{
+ int err = 0;
+
+ /* There is no need for update buffer for Build Time masks */
+ err = __diag_mask_init(&msg_bt_mask, MSG_MASK_SIZE, 0);
+ if (err)
+ return err;
+ err = diag_create_build_time_mask();
+ if (err) {
+ pr_err("diag: Unable to create msg build time masks, err: %d\n",
+ err);
+ return err;
+ }
+ driver->build_time_mask = &msg_bt_mask;
+ return 0;
+}
+
+static void diag_build_time_mask_exit(void)
+{
+ int i;
+ struct diag_msg_mask_t *mask = NULL;
+ mutex_lock(&driver->msg_mask_lock);
+ mask = (struct diag_msg_mask_t *)(msg_bt_mask.ptr);
+ if (mask) {
+ for (i = 0; i < driver->bt_msg_mask_tbl_count; i++, mask++)
+ kfree(mask->ptr);
+ kfree(msg_bt_mask.ptr);
+ msg_bt_mask.ptr = NULL;
+ }
+ mutex_unlock(&driver->msg_mask_lock);
+}
+
+static int diag_log_mask_init(void)
+{
+ int err = 0;
+ int i;
+
+ err = __diag_mask_init(&log_mask, LOG_MASK_SIZE, APPS_BUF_SIZE);
+ if (err)
+ return err;
+ err = diag_create_log_mask_table();
+ if (err)
+ return err;
+ driver->log_mask = &log_mask;
+
+ for (i = 0; i < NUM_PERIPHERALS; i++)
+ driver->num_equip_id[i] = 0;
+
+ return 0;
+}
+
+static void diag_log_mask_exit(void)
+{
+ int i;
+ struct diag_log_mask_t *mask = NULL;
+
+ mask = (struct diag_log_mask_t *)(log_mask.ptr);
+ if (mask) {
+ for (i = 0; i < MAX_EQUIP_ID; i++, mask++)
+ kfree(mask->ptr);
+ kfree(log_mask.ptr);
+ }
+
+ kfree(log_mask.update_buf);
+}
+
+static int diag_event_mask_init(void)
+{
+ int err = 0;
+ int i;
+
+ err = __diag_mask_init(&event_mask, EVENT_MASK_SIZE, APPS_BUF_SIZE);
+ if (err)
+ return err;
+ driver->event_mask_size = EVENT_MASK_SIZE;
+ driver->last_event_id = APPS_EVENT_LAST_ID;
+ driver->event_mask = &event_mask;
+
+ for (i = 0; i < NUM_PERIPHERALS; i++)
+ driver->num_event_id[i] = 0;
+
+ return 0;
+}
+
+int diag_event_mask_copy(struct diag_mask_info *dest,
+ struct diag_mask_info *src)
+{
+ int err = 0;
+
+ if (!src || !dest)
+ return -EINVAL;
+
+ err = __diag_mask_init(dest, EVENT_MASK_SIZE, APPS_BUF_SIZE);
+ if (err)
+ return err;
+
+ mutex_lock(&dest->lock);
+ dest->mask_len = src->mask_len;
+ dest->status = src->status;
+ memcpy(dest->ptr, src->ptr, dest->mask_len);
+ mutex_unlock(&dest->lock);
+
+ return err;
+}
+
+void diag_event_mask_free(struct diag_mask_info *mask_info)
+{
+ if (!mask_info)
+ return;
+
+ __diag_mask_exit(mask_info);
+}
+
+static void diag_event_mask_exit(void)
+{
+ kfree(event_mask.ptr);
+ kfree(event_mask.update_buf);
+}
+
+int diag_copy_to_user_msg_mask(char __user *buf, size_t count,
+ struct diag_md_session_t *info)
+{
+ int i;
+ int err = 0;
+ int len = 0;
+ int copy_len = 0;
+ int total_len = 0;
+ struct diag_msg_mask_userspace_t header;
+ struct diag_mask_info *mask_info = NULL;
+ struct diag_msg_mask_t *mask = NULL;
+ unsigned char *ptr = NULL;
+
+ if (!buf || count == 0)
+ return -EINVAL;
+
+ mask_info = (!info) ? &msg_mask : info->msg_mask;
+ if (!mask_info)
+ return -EIO;
+
+ if (!mask_info->ptr || !mask_info->update_buf) {
+ pr_err("diag: In %s, invalid input mask_info->ptr: %pK, mask_info->update_buf: %pK\n",
+ __func__, mask_info->ptr, mask_info->update_buf);
+ return -EINVAL;
+ }
+ mutex_lock(&driver->diag_maskclear_mutex);
+ if (driver->mask_clear) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag:%s: count = %zu\n", __func__, count);
+ mutex_unlock(&driver->diag_maskclear_mutex);
+ return -EIO;
+ }
+ mutex_unlock(&driver->diag_maskclear_mutex);
+ mutex_lock(&mask_info->lock);
+ mutex_lock(&driver->msg_mask_lock);
+
+ mask = (struct diag_msg_mask_t *)(mask_info->ptr);
+ if (!mask->ptr) {
+ pr_err("diag: Invalid input in %s, mask->ptr: %pK\n",
+ __func__, mask->ptr);
+ mutex_unlock(&driver->msg_mask_lock);
+ mutex_unlock(&mask_info->lock);
+ return -EINVAL;
+ }
+ for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) {
+ ptr = mask_info->update_buf;
+ len = 0;
+ mutex_lock(&mask->lock);
+ header.ssid_first = mask->ssid_first;
+ header.ssid_last = mask->ssid_last_tools;
+ header.range = mask->range_tools;
+ memcpy(ptr, &header, sizeof(header));
+ len += sizeof(header);
+ copy_len = (sizeof(uint32_t) * mask->range_tools);
+ if ((len + copy_len) > mask_info->update_buf_len) {
+ pr_err("diag: In %s, no space to update msg mask, first: %d, last: %d\n",
+ __func__, mask->ssid_first,
+ mask->ssid_last_tools);
+ mutex_unlock(&mask->lock);
+ continue;
+ }
+ memcpy(ptr + len, mask->ptr, copy_len);
+ len += copy_len;
+ mutex_unlock(&mask->lock);
+ /* + sizeof(int) to account for data_type already in buf */
+ if (total_len + sizeof(int) + len > count) {
+ pr_err("diag: In %s, unable to send msg masks to user space, total_len: %d, count: %zu\n",
+ __func__, total_len, count);
+ err = -ENOMEM;
+ break;
+ }
+ err = copy_to_user(buf + total_len, (void *)ptr, len);
+ if (err) {
+ pr_err("diag: In %s Unable to send msg masks to user space clients, err: %d\n",
+ __func__, err);
+ break;
+ }
+ total_len += len;
+ }
+ mutex_unlock(&driver->msg_mask_lock);
+ mutex_unlock(&mask_info->lock);
+ return err ? err : total_len;
+}
+
+int diag_copy_to_user_log_mask(char __user *buf, size_t count,
+ struct diag_md_session_t *info)
+{
+ int i;
+ int err = 0;
+ int len = 0;
+ int copy_len = 0;
+ int total_len = 0;
+ struct diag_log_mask_userspace_t header;
+ struct diag_log_mask_t *mask = NULL;
+ struct diag_mask_info *mask_info = NULL;
+ unsigned char *ptr = NULL;
+
+ if (!buf || count == 0)
+ return -EINVAL;
+
+ mask_info = (!info) ? &log_mask : info->log_mask;
+ if (!mask_info)
+ return -EIO;
+
+ if (!mask_info->ptr || !mask_info->update_buf) {
+ pr_err("diag: In %s, invalid input mask_info->ptr: %pK, mask_info->update_buf: %pK\n",
+ __func__, mask_info->ptr, mask_info->update_buf);
+ return -EINVAL;
+ }
+
+ mutex_lock(&mask_info->lock);
+ mask = (struct diag_log_mask_t *)(mask_info->ptr);
+ if (!mask->ptr) {
+ pr_err("diag: Invalid input in %s, mask->ptr: %pK\n",
+ __func__, mask->ptr);
+ mutex_unlock(&mask_info->lock);
+ return -EINVAL;
+ }
+ for (i = 0; i < MAX_EQUIP_ID; i++, mask++) {
+ ptr = mask_info->update_buf;
+ len = 0;
+ mutex_lock(&mask->lock);
+ header.equip_id = mask->equip_id;
+ header.num_items = mask->num_items_tools;
+ memcpy(ptr, &header, sizeof(header));
+ len += sizeof(header);
+ copy_len = LOG_ITEMS_TO_SIZE(header.num_items);
+ if ((len + copy_len) > mask_info->update_buf_len) {
+ pr_err("diag: In %s, no space to update log mask, equip_id: %d\n",
+ __func__, mask->equip_id);
+ mutex_unlock(&mask->lock);
+ continue;
+ }
+ memcpy(ptr + len, mask->ptr, copy_len);
+ len += copy_len;
+ mutex_unlock(&mask->lock);
+ /* + sizeof(int) to account for data_type already in buf */
+ if (total_len + sizeof(int) + len > count) {
+ pr_err("diag: In %s, unable to send log masks to user space, total_len: %d, count: %zu\n",
+ __func__, total_len, count);
+ err = -ENOMEM;
+ break;
+ }
+ err = copy_to_user(buf + total_len, (void *)ptr, len);
+ if (err) {
+ pr_err("diag: In %s Unable to send log masks to user space clients, err: %d\n",
+ __func__, err);
+ break;
+ }
+ total_len += len;
+ }
+ mutex_unlock(&mask_info->lock);
+
+ return err ? err : total_len;
+}
+
+void diag_send_updates_peripheral(uint8_t peripheral)
+{
+ diag_send_feature_mask_update(peripheral);
+ if (driver->time_sync_enabled)
+ diag_send_time_sync_update(peripheral);
+ mutex_lock(&driver->md_session_lock);
+ diag_send_msg_mask_update(peripheral, ALL_SSID, ALL_SSID);
+ diag_send_log_mask_update(peripheral, ALL_EQUIP_ID);
+ diag_send_event_mask_update(peripheral);
+ mutex_unlock(&driver->md_session_lock);
+ diag_send_real_time_update(peripheral,
+ driver->real_time_mode[DIAG_LOCAL_PROC]);
+ diag_send_peripheral_buffering_mode(
+ &driver->buffering_mode[peripheral]);
+}
+
+int diag_process_apps_masks(unsigned char *buf, int len, int pid)
+{
+ int size = 0;
+ int sub_cmd = 0;
+ int (*hdlr)(unsigned char *src_buf, int src_len,
+ unsigned char *dest_buf, int dest_len, int pid) = NULL;
+
+ if (!buf || len <= 0)
+ return -EINVAL;
+
+ if (*buf == DIAG_CMD_LOG_CONFIG) {
+ sub_cmd = *(int *)(buf + sizeof(int));
+ switch (sub_cmd) {
+ case DIAG_CMD_OP_LOG_DISABLE:
+ hdlr = diag_cmd_disable_log_mask;
+ break;
+ case DIAG_CMD_OP_GET_LOG_RANGE:
+ hdlr = diag_cmd_get_log_range;
+ break;
+ case DIAG_CMD_OP_SET_LOG_MASK:
+ hdlr = diag_cmd_set_log_mask;
+ break;
+ case DIAG_CMD_OP_GET_LOG_MASK:
+ hdlr = diag_cmd_get_log_mask;
+ break;
+ }
+ } else if (*buf == DIAG_CMD_MSG_CONFIG) {
+ sub_cmd = *(uint8_t *)(buf + sizeof(uint8_t));
+ switch (sub_cmd) {
+ case DIAG_CMD_OP_GET_SSID_RANGE:
+ hdlr = diag_cmd_get_ssid_range;
+ break;
+ case DIAG_CMD_OP_GET_BUILD_MASK:
+ hdlr = diag_cmd_get_build_mask;
+ break;
+ case DIAG_CMD_OP_GET_MSG_MASK:
+ hdlr = diag_cmd_get_msg_mask;
+ break;
+ case DIAG_CMD_OP_SET_MSG_MASK:
+ hdlr = diag_cmd_set_msg_mask;
+ break;
+ case DIAG_CMD_OP_SET_ALL_MSG_MASK:
+ hdlr = diag_cmd_set_all_msg_mask;
+ break;
+ }
+ } else if (*buf == DIAG_CMD_GET_EVENT_MASK) {
+ hdlr = diag_cmd_get_event_mask;
+ } else if (*buf == DIAG_CMD_SET_EVENT_MASK) {
+ hdlr = diag_cmd_update_event_mask;
+ } else if (*buf == DIAG_CMD_EVENT_TOGGLE) {
+ hdlr = diag_cmd_toggle_events;
+ }
+
+ if (hdlr)
+ size = hdlr(buf, len, driver->apps_rsp_buf,
+ DIAG_MAX_RSP_SIZE, pid);
+
+ return (size > 0) ? size : 0;
+}
+
+int diag_masks_init(void)
+{
+ int err = 0;
+ err = diag_msg_mask_init();
+ if (err)
+ goto fail;
+
+ err = diag_build_time_mask_init();
+ if (err)
+ goto fail;
+
+ err = diag_log_mask_init();
+ if (err)
+ goto fail;
+
+ err = diag_event_mask_init();
+ if (err)
+ goto fail;
+
+ if (driver->buf_feature_mask_update == NULL) {
+ driver->buf_feature_mask_update = kzalloc(sizeof(
+ struct diag_ctrl_feature_mask) +
+ FEATURE_MASK_LEN, GFP_KERNEL);
+ if (driver->buf_feature_mask_update == NULL)
+ goto fail;
+ kmemleak_not_leak(driver->buf_feature_mask_update);
+ }
+
+ return 0;
+fail:
+ pr_err("diag: Could not initialize diag mask buffers\n");
+ diag_masks_exit();
+ return -ENOMEM;
+}
+
+void diag_masks_exit(void)
+{
+ diag_msg_mask_exit();
+ diag_build_time_mask_exit();
+ diag_log_mask_exit();
+ diag_event_mask_exit();
+ kfree(driver->buf_feature_mask_update);
+}
diff --git a/drivers/char/diag/diag_masks.h b/drivers/char/diag/diag_masks.h
new file mode 100644
index 000000000000..6edeee954d74
--- /dev/null
+++ b/drivers/char/diag/diag_masks.h
@@ -0,0 +1,179 @@
+/* Copyright (c) 2013-2015, 2018 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DIAG_MASKS_H
+#define DIAG_MASKS_H
+
+#include "diagfwd.h"
+
+struct diag_log_mask_t {
+ uint8_t equip_id;
+ uint32_t num_items;
+ uint32_t num_items_tools;
+ uint32_t range;
+ uint32_t range_tools;
+ struct mutex lock;
+ uint8_t *ptr;
+};
+
+struct diag_ssid_range_t {
+ uint16_t ssid_first;
+ uint16_t ssid_last;
+} __packed;
+
+struct diag_msg_mask_t {
+ uint32_t ssid_first;
+ uint32_t ssid_last;
+ uint32_t ssid_last_tools;
+ uint32_t range;
+ uint32_t range_tools;
+ struct mutex lock;
+ uint32_t *ptr;
+};
+
+struct diag_log_config_req_t {
+ uint8_t cmd_code;
+ uint8_t padding[3];
+ uint32_t sub_cmd;
+ uint32_t equip_id;
+ uint32_t num_items;
+} __packed;
+
+struct diag_log_config_rsp_t {
+ uint8_t cmd_code;
+ uint8_t padding[3];
+ uint32_t sub_cmd;
+ uint32_t status;
+} __packed;
+
+struct diag_log_config_set_rsp_t {
+ uint8_t cmd_code;
+ uint8_t padding[3];
+ uint32_t sub_cmd;
+ uint32_t status;
+ uint32_t equip_id;
+ uint32_t num_items;
+} __packed;
+
+struct diag_log_on_demand_rsp_t {
+ uint8_t cmd_code;
+ uint16_t log_code;
+ uint8_t status;
+} __packed;
+
+struct diag_event_report_t {
+ uint8_t cmd_code;
+ uint16_t padding;
+} __packed;
+
+struct diag_event_mask_config_t {
+ uint8_t cmd_code;
+ uint8_t status;
+ uint16_t padding;
+ uint16_t num_bits;
+} __packed;
+
+struct diag_msg_config_rsp_t {
+ uint8_t cmd_code;
+ uint8_t sub_cmd;
+ uint8_t status;
+ uint8_t padding;
+ uint32_t rt_mask;
+} __packed;
+
+struct diag_msg_ssid_query_t {
+ uint8_t cmd_code;
+ uint8_t sub_cmd;
+ uint8_t status;
+ uint8_t padding;
+ uint32_t count;
+} __packed;
+
+struct diag_build_mask_req_t {
+ uint8_t cmd_code;
+ uint8_t sub_cmd;
+ uint16_t ssid_first;
+ uint16_t ssid_last;
+} __packed;
+
+struct diag_msg_build_mask_t {
+ uint8_t cmd_code;
+ uint8_t sub_cmd;
+ uint16_t ssid_first;
+ uint16_t ssid_last;
+ uint8_t status;
+ uint8_t padding;
+} __packed;
+
+struct diag_msg_mask_userspace_t {
+ uint32_t ssid_first;
+ uint32_t ssid_last;
+ uint32_t range;
+} __packed;
+
+struct diag_log_mask_userspace_t {
+ uint8_t equip_id;
+ uint32_t num_items;
+} __packed;
+
+#define MAX_EQUIP_ID 16
+#define MSG_MASK_SIZE (MSG_MASK_TBL_CNT * sizeof(struct diag_msg_mask_t))
+#define LOG_MASK_SIZE (MAX_EQUIP_ID * sizeof(struct diag_log_mask_t))
+#define EVENT_MASK_SIZE 513
+#define MAX_ITEMS_PER_EQUIP_ID 512
+#define MAX_ITEMS_ALLOWED 0xFFF
+
+#define LOG_MASK_CTRL_HEADER_LEN 11
+#define MSG_MASK_CTRL_HEADER_LEN 11
+#define EVENT_MASK_CTRL_HEADER_LEN 7
+
+#define LOG_STATUS_SUCCESS 0
+#define LOG_STATUS_INVALID 1
+#define LOG_STATUS_FAIL 2
+
+#define MSG_STATUS_FAIL 0
+#define MSG_STATUS_SUCCESS 1
+
+#define EVENT_STATUS_SUCCESS 0
+#define EVENT_STATUS_FAIL 1
+
+#define DIAG_CTRL_MASK_INVALID 0
+#define DIAG_CTRL_MASK_ALL_DISABLED 1
+#define DIAG_CTRL_MASK_ALL_ENABLED 2
+#define DIAG_CTRL_MASK_VALID 3
+
+extern struct diag_mask_info msg_mask;
+extern struct diag_mask_info msg_bt_mask;
+extern struct diag_mask_info log_mask;
+extern struct diag_mask_info event_mask;
+
+int diag_masks_init(void);
+void diag_masks_exit(void);
+int diag_log_mask_copy(struct diag_mask_info *dest,
+ struct diag_mask_info *src);
+int diag_msg_mask_copy(struct diag_mask_info *dest,
+ struct diag_mask_info *src);
+int diag_event_mask_copy(struct diag_mask_info *dest,
+ struct diag_mask_info *src);
+void diag_log_mask_free(struct diag_mask_info *mask_info);
+void diag_msg_mask_free(struct diag_mask_info *mask_info);
+void diag_event_mask_free(struct diag_mask_info *mask_info);
+int diag_process_apps_masks(unsigned char *buf, int len, int pid);
+void diag_send_updates_peripheral(uint8_t peripheral);
+
+extern int diag_create_msg_mask_table_entry(struct diag_msg_mask_t *msg_mask,
+ struct diag_ssid_range_t *range);
+extern int diag_copy_to_user_msg_mask(char __user *buf, size_t count,
+ struct diag_md_session_t *info);
+extern int diag_copy_to_user_log_mask(char __user *buf, size_t count,
+ struct diag_md_session_t *info);
+#endif
diff --git a/drivers/char/diag/diag_memorydevice.c b/drivers/char/diag/diag_memorydevice.c
new file mode 100644
index 000000000000..aa45c2e7ec7b
--- /dev/null
+++ b/drivers/char/diag/diag_memorydevice.c
@@ -0,0 +1,483 @@
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/sched.h>
+#include <linux/ratelimit.h>
+#include <linux/workqueue.h>
+#include <linux/diagchar.h>
+#include <linux/delay.h>
+#include <linux/kmemleak.h>
+#include <linux/uaccess.h>
+#include "diagchar.h"
+#include "diag_memorydevice.h"
+#include "diagfwd_bridge.h"
+#include "diag_mux.h"
+#include "diagmem.h"
+#include "diagfwd.h"
+#include "diagfwd_peripheral.h"
+#include "diag_ipc_logging.h"
+
+struct diag_md_info diag_md[NUM_DIAG_MD_DEV] = {
+ {
+ .id = DIAG_MD_LOCAL,
+ .ctx = 0,
+ .mempool = POOL_TYPE_MUX_APPS,
+ .num_tbl_entries = 0,
+ .md_info_inited = 0,
+ .tbl = NULL,
+ .ops = NULL,
+ },
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+ {
+ .id = DIAG_MD_MDM,
+ .ctx = 0,
+ .mempool = POOL_TYPE_MDM_MUX,
+ .num_tbl_entries = 0,
+ .md_info_inited = 0,
+ .tbl = NULL,
+ .ops = NULL,
+ },
+ {
+ .id = DIAG_MD_MDM2,
+ .ctx = 0,
+ .mempool = POOL_TYPE_MDM2_MUX,
+ .num_tbl_entries = 0,
+ .md_info_inited = 0,
+ .tbl = NULL,
+ .ops = NULL,
+ },
+ {
+ .id = DIAG_MD_SMUX,
+ .ctx = 0,
+ .mempool = POOL_TYPE_QSC_MUX,
+ .num_tbl_entries = 0,
+ .md_info_inited = 0,
+ .tbl = NULL,
+ .ops = NULL,
+ }
+#endif
+};
+
+int diag_md_register(int id, int ctx, struct diag_mux_ops *ops)
+{
+ if (id < 0 || id >= NUM_DIAG_MD_DEV || !ops)
+ return -EINVAL;
+
+ diag_md[id].ops = ops;
+ diag_md[id].ctx = ctx;
+ return 0;
+}
+
+void diag_md_open_all()
+{
+ int i;
+ struct diag_md_info *ch = NULL;
+
+ for (i = 0; i < NUM_DIAG_MD_DEV; i++) {
+ ch = &diag_md[i];
+ if (!ch->md_info_inited)
+ continue;
+ if (ch->ops && ch->ops->open)
+ ch->ops->open(ch->ctx, DIAG_MEMORY_DEVICE_MODE);
+ }
+
+ return;
+}
+
+void diag_md_close_all()
+{
+ int i, j;
+ unsigned long flags;
+ struct diag_md_info *ch = NULL;
+ struct diag_buf_tbl_t *entry = NULL;
+
+ for (i = 0; i < NUM_DIAG_MD_DEV; i++) {
+ ch = &diag_md[i];
+ if (!ch->md_info_inited)
+ continue;
+
+ if (ch->ops && ch->ops->close)
+ ch->ops->close(ch->ctx, DIAG_MEMORY_DEVICE_MODE);
+
+ /*
+ * When we close the Memory device mode, make sure we flush the
+ * internal buffers in the table so that there are no stale
+ * entries.
+ */
+ spin_lock_irqsave(&ch->lock, flags);
+ for (j = 0; j < ch->num_tbl_entries; j++) {
+ entry = &ch->tbl[j];
+ if (entry->len <= 0)
+ continue;
+ if (ch->ops && ch->ops->write_done)
+ ch->ops->write_done(entry->buf, entry->len,
+ entry->ctx,
+ DIAG_MEMORY_DEVICE_MODE);
+ entry->buf = NULL;
+ entry->len = 0;
+ entry->ctx = 0;
+ }
+ spin_unlock_irqrestore(&ch->lock, flags);
+ }
+
+ diag_ws_reset(DIAG_WS_MUX);
+}
+
+int diag_md_write(int id, unsigned char *buf, int len, int ctx)
+{
+ int i, pid = 0;
+ uint8_t found = 0;
+ unsigned long flags;
+ struct diag_md_info *ch = NULL;
+ uint8_t peripheral;
+ struct diag_md_session_t *session_info = NULL;
+
+ if (id < 0 || id >= NUM_DIAG_MD_DEV || id >= DIAG_NUM_PROC)
+ return -EINVAL;
+
+ if (!buf || len < 0)
+ return -EINVAL;
+
+ peripheral =
+ diag_md_get_peripheral(ctx);
+ if (peripheral < 0)
+ return -EINVAL;
+
+ mutex_lock(&driver->md_session_lock);
+ session_info = diag_md_session_get_peripheral(peripheral);
+ if (!session_info) {
+ mutex_unlock(&driver->md_session_lock);
+ return -EIO;
+ }
+ pid = session_info->pid;
+ mutex_unlock(&driver->md_session_lock);
+
+ ch = &diag_md[id];
+ if (!ch || !ch->md_info_inited)
+ return -EINVAL;
+
+ spin_lock_irqsave(&ch->lock, flags);
+ for (i = 0; i < ch->num_tbl_entries && !found; i++) {
+ if (ch->tbl[i].buf != buf)
+ continue;
+ found = 1;
+ pr_err_ratelimited("diag: trying to write the same buffer buf: %pK, len: %d, back to the table for p: %d, t: %d, buf_num: %d, proc: %d, i: %d\n",
+ buf, ch->tbl[i].len, GET_BUF_PERIPHERAL(ctx),
+ GET_BUF_TYPE(ctx), GET_BUF_NUM(ctx), id, i);
+ ch->tbl[i].buf = NULL;
+ ch->tbl[i].len = 0;
+ ch->tbl[i].ctx = 0;
+ }
+ spin_unlock_irqrestore(&ch->lock, flags);
+
+ if (found)
+ return -ENOMEM;
+
+ spin_lock_irqsave(&ch->lock, flags);
+ for (i = 0; i < ch->num_tbl_entries && !found; i++) {
+ if (ch->tbl[i].len == 0) {
+ ch->tbl[i].buf = buf;
+ ch->tbl[i].len = len;
+ ch->tbl[i].ctx = ctx;
+ found = 1;
+ diag_ws_on_read(DIAG_WS_MUX, len);
+ }
+ }
+ spin_unlock_irqrestore(&ch->lock, flags);
+
+ if (!found) {
+ pr_err_ratelimited("diag: Unable to find an empty space in table, please reduce logging rate, proc: %d\n",
+ id);
+ return -ENOMEM;
+ }
+
+ found = 0;
+ for (i = 0; i < driver->num_clients && !found; i++) {
+ if ((driver->client_map[i].pid != pid) ||
+ (driver->client_map[i].pid == 0))
+ continue;
+
+ found = 1;
+ if (!(driver->data_ready[i] & USER_SPACE_DATA_TYPE)) {
+ driver->data_ready[i] |= USER_SPACE_DATA_TYPE;
+ atomic_inc(&driver->data_ready_notif[i]);
+ }
+ pr_debug("diag: wake up logging process\n");
+ wake_up_interruptible(&driver->wait_q);
+ }
+
+ if (!found)
+ return -EINVAL;
+
+ return 0;
+}
+
+int diag_md_copy_to_user(char __user *buf, int *pret, size_t buf_size,
+ struct diag_md_session_t *info)
+{
+ int i, j;
+ int err = 0;
+ int ret = *pret;
+ int num_data = 0;
+ int remote_token;
+ unsigned long flags;
+ struct diag_md_info *ch = NULL;
+ struct diag_buf_tbl_t *entry = NULL;
+ uint8_t drain_again = 0;
+ uint8_t peripheral = 0;
+ struct diag_md_session_t *session_info = NULL;
+ struct pid *pid_struct = NULL;
+
+ for (i = 0; i < NUM_DIAG_MD_DEV && !err; i++) {
+ ch = &diag_md[i];
+ if (!ch->md_info_inited)
+ continue;
+ for (j = 0; j < ch->num_tbl_entries && !err; j++) {
+ entry = &ch->tbl[j];
+ if (entry->len <= 0 || entry->buf == NULL)
+ continue;
+
+ peripheral = diag_md_get_peripheral(entry->ctx);
+ if (peripheral < 0)
+ goto drop_data;
+
+ session_info =
+ diag_md_session_get_peripheral(peripheral);
+ if (!session_info) {
+ goto drop_data;
+ }
+
+ if (session_info && info &&
+ (session_info->pid != info->pid))
+ continue;
+ if ((info && (info->peripheral_mask &
+ MD_PERIPHERAL_MASK(peripheral)) == 0))
+ goto drop_data;
+ pid_struct = find_get_pid(session_info->pid);
+ if (!pid_struct) {
+ err = -ESRCH;
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: No such md_session_map[%d] with pid = %d err=%d exists..\n",
+ peripheral, session_info->pid, err);
+ goto drop_data;
+ }
+ /*
+ * If the data is from remote processor, copy the remote
+ * token first
+ */
+ if (i > 0) {
+ if ((ret + (3 * sizeof(int)) + entry->len) >=
+ buf_size) {
+ drain_again = 1;
+ break;
+ }
+ } else {
+ if ((ret + (2 * sizeof(int)) + entry->len) >=
+ buf_size) {
+ drain_again = 1;
+ break;
+ }
+ }
+ if (i > 0) {
+ remote_token = diag_get_remote(i);
+ if (get_pid_task(pid_struct, PIDTYPE_PID)) {
+ err = copy_to_user(buf + ret,
+ &remote_token,
+ sizeof(int));
+ if (err)
+ goto drop_data;
+ ret += sizeof(int);
+ }
+ }
+
+ /* Copy the length of data being passed */
+ if (get_pid_task(pid_struct, PIDTYPE_PID)) {
+ err = copy_to_user(buf + ret,
+ (void *)&(entry->len),
+ sizeof(int));
+ if (err)
+ goto drop_data;
+ ret += sizeof(int);
+ }
+
+ /* Copy the actual data being passed */
+ if (get_pid_task(pid_struct, PIDTYPE_PID)) {
+ err = copy_to_user(buf + ret,
+ (void *)entry->buf,
+ entry->len);
+ if (err)
+ goto drop_data;
+ ret += entry->len;
+ }
+ /*
+ * The data is now copied to the user space client,
+ * Notify that the write is complete and delete its
+ * entry from the table
+ */
+ num_data++;
+drop_data:
+ spin_lock_irqsave(&ch->lock, flags);
+ if (ch->ops && ch->ops->write_done)
+ ch->ops->write_done(entry->buf, entry->len,
+ entry->ctx,
+ DIAG_MEMORY_DEVICE_MODE);
+ diag_ws_on_copy(DIAG_WS_MUX);
+ entry->buf = NULL;
+ entry->len = 0;
+ entry->ctx = 0;
+ spin_unlock_irqrestore(&ch->lock, flags);
+ }
+ }
+
+ *pret = ret;
+ if (pid_struct && get_pid_task(pid_struct, PIDTYPE_PID)) {
+ err = copy_to_user(buf + sizeof(int),
+ (void *)&num_data,
+ sizeof(int));
+ }
+ diag_ws_on_copy_complete(DIAG_WS_MUX);
+ if (drain_again)
+ chk_logging_wakeup();
+
+ return err;
+}
+
+int diag_md_close_peripheral(int id, uint8_t peripheral)
+{
+ int i;
+ uint8_t found = 0;
+ unsigned long flags;
+ struct diag_md_info *ch = NULL;
+ struct diag_buf_tbl_t *entry = NULL;
+
+ if (id < 0 || id >= NUM_DIAG_MD_DEV || id >= DIAG_NUM_PROC)
+ return -EINVAL;
+
+ ch = &diag_md[id];
+ if (!ch || !ch->md_info_inited)
+ return -EINVAL;
+
+ spin_lock_irqsave(&ch->lock, flags);
+ for (i = 0; i < ch->num_tbl_entries && !found; i++) {
+ entry = &ch->tbl[i];
+
+ if (peripheral > NUM_PERIPHERALS) {
+ if (GET_PD_CTXT(entry->ctx) != peripheral)
+ continue;
+ } else {
+ if (GET_BUF_PERIPHERAL(entry->ctx) !=
+ peripheral)
+ continue;
+ }
+ found = 1;
+ if (ch->ops && ch->ops->write_done) {
+ ch->ops->write_done(entry->buf, entry->len,
+ entry->ctx,
+ DIAG_MEMORY_DEVICE_MODE);
+ entry->buf = NULL;
+ entry->len = 0;
+ entry->ctx = 0;
+ }
+ }
+ spin_unlock_irqrestore(&ch->lock, flags);
+ return 0;
+}
+
+int diag_md_init(void)
+{
+ int i, j;
+ struct diag_md_info *ch = NULL;
+
+ for (i = 0; i < DIAG_MD_LOCAL_LAST; i++) {
+ ch = &diag_md[i];
+ ch->num_tbl_entries = diag_mempools[ch->mempool].poolsize;
+ ch->tbl = kzalloc(ch->num_tbl_entries *
+ sizeof(struct diag_buf_tbl_t),
+ GFP_KERNEL);
+ if (!ch->tbl)
+ goto fail;
+
+ for (j = 0; j < ch->num_tbl_entries; j++) {
+ ch->tbl[j].buf = NULL;
+ ch->tbl[j].len = 0;
+ ch->tbl[j].ctx = 0;
+ }
+ spin_lock_init(&(ch->lock));
+ ch->md_info_inited = 1;
+ }
+
+ return 0;
+
+fail:
+ diag_md_exit();
+ return -ENOMEM;
+}
+
+int diag_md_mdm_init(void)
+{
+ int i, j;
+ struct diag_md_info *ch = NULL;
+
+ for (i = DIAG_MD_BRIDGE_BASE; i < NUM_DIAG_MD_DEV; i++) {
+ ch = &diag_md[i];
+ ch->num_tbl_entries = diag_mempools[ch->mempool].poolsize;
+ ch->tbl = kcalloc(ch->num_tbl_entries, sizeof(*ch->tbl),
+ GFP_KERNEL);
+ if (!ch->tbl)
+ goto fail;
+
+ for (j = 0; j < ch->num_tbl_entries; j++) {
+ ch->tbl[j].buf = NULL;
+ ch->tbl[j].len = 0;
+ ch->tbl[j].ctx = 0;
+ }
+ spin_lock_init(&(ch->lock));
+ ch->md_info_inited = 1;
+ }
+
+ return 0;
+
+fail:
+ diag_md_mdm_exit();
+ return -ENOMEM;
+}
+
+void diag_md_exit(void)
+{
+ int i;
+ struct diag_md_info *ch = NULL;
+
+ for (i = 0; i < DIAG_MD_LOCAL_LAST; i++) {
+ ch = &diag_md[i];
+ kfree(ch->tbl);
+ ch->num_tbl_entries = 0;
+ ch->ops = NULL;
+ }
+}
+
+void diag_md_mdm_exit(void)
+{
+ int i;
+ struct diag_md_info *ch = NULL;
+
+ for (i = DIAG_MD_BRIDGE_BASE; i < NUM_DIAG_MD_DEV; i++) {
+ ch = &diag_md[i];
+ kfree(ch->tbl);
+ ch->num_tbl_entries = 0;
+ ch->ops = NULL;
+ }
+}
diff --git a/drivers/char/diag/diag_memorydevice.h b/drivers/char/diag/diag_memorydevice.h
new file mode 100644
index 000000000000..4d65dedfdb58
--- /dev/null
+++ b/drivers/char/diag/diag_memorydevice.h
@@ -0,0 +1,60 @@
+/* Copyright (c) 2014-2015, 2018 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DIAG_MEMORYDEVICE_H
+#define DIAG_MEMORYDEVICE_H
+
+#define DIAG_MD_LOCAL 0
+#define DIAG_MD_LOCAL_LAST 1
+#define DIAG_MD_BRIDGE_BASE DIAG_MD_LOCAL_LAST
+#define DIAG_MD_MDM (DIAG_MD_BRIDGE_BASE)
+#define DIAG_MD_MDM2 (DIAG_MD_BRIDGE_BASE + 1)
+#define DIAG_MD_SMUX (DIAG_MD_BRIDGE_BASE + 2)
+#define DIAG_MD_BRIDGE_LAST (DIAG_MD_BRIDGE_BASE + 3)
+
+#ifndef CONFIG_DIAGFWD_BRIDGE_CODE
+#define NUM_DIAG_MD_DEV DIAG_MD_LOCAL_LAST
+#else
+#define NUM_DIAG_MD_DEV DIAG_MD_BRIDGE_LAST
+#endif
+
+struct diag_buf_tbl_t {
+ unsigned char *buf;
+ int len;
+ int ctx;
+};
+
+struct diag_md_info {
+ int id;
+ int ctx;
+ int mempool;
+ int num_tbl_entries;
+ int md_info_inited;
+ spinlock_t lock;
+ struct diag_buf_tbl_t *tbl;
+ struct diag_mux_ops *ops;
+};
+
+extern struct diag_md_info diag_md[NUM_DIAG_MD_DEV];
+
+int diag_md_init(void);
+int diag_md_mdm_init(void);
+void diag_md_exit(void);
+void diag_md_mdm_exit(void);
+void diag_md_open_all(void);
+void diag_md_close_all(void);
+int diag_md_register(int id, int ctx, struct diag_mux_ops *ops);
+int diag_md_close_peripheral(int id, uint8_t peripheral);
+int diag_md_write(int id, unsigned char *buf, int len, int ctx);
+int diag_md_copy_to_user(char __user *buf, int *pret, size_t buf_size,
+ struct diag_md_session_t *info);
+#endif
diff --git a/drivers/char/diag/diag_mux.c b/drivers/char/diag/diag_mux.c
new file mode 100644
index 000000000000..8d766e1ae583
--- /dev/null
+++ b/drivers/char/diag/diag_mux.c
@@ -0,0 +1,290 @@
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/uaccess.h>
+#include <linux/diagchar.h>
+#include <linux/sched.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/workqueue.h>
+#include <linux/pm_runtime.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/ratelimit.h>
+#include "diagchar.h"
+#include "diagfwd.h"
+#include "diag_mux.h"
+#include "diag_usb.h"
+#include "diag_memorydevice.h"
+#include "diag_ipc_logging.h"
+
+struct diag_mux_state_t *diag_mux;
+static struct diag_logger_t usb_logger;
+static struct diag_logger_t md_logger;
+
+static struct diag_logger_ops usb_log_ops = {
+ .open = diag_usb_connect_all,
+ .close = diag_usb_disconnect_all,
+ .queue_read = diag_usb_queue_read,
+ .write = diag_usb_write,
+ .close_peripheral = NULL
+};
+
+static struct diag_logger_ops md_log_ops = {
+ .open = diag_md_open_all,
+ .close = diag_md_close_all,
+ .queue_read = NULL,
+ .write = diag_md_write,
+ .close_peripheral = diag_md_close_peripheral,
+};
+
+int diag_mux_init()
+{
+ diag_mux = kzalloc(sizeof(struct diag_mux_state_t),
+ GFP_KERNEL);
+ if (!diag_mux)
+ return -ENOMEM;
+ kmemleak_not_leak(diag_mux);
+
+ usb_logger.mode = DIAG_USB_MODE;
+ usb_logger.log_ops = &usb_log_ops;
+
+ md_logger.mode = DIAG_MEMORY_DEVICE_MODE;
+ md_logger.log_ops = &md_log_ops;
+ diag_md_init();
+
+ /*
+ * Set USB logging as the default logger. This is the mode
+ * Diag should be in when it initializes.
+ */
+ diag_mux->usb_ptr = &usb_logger;
+ diag_mux->md_ptr = &md_logger;
+ diag_mux->logger = &usb_logger;
+ diag_mux->mux_mask = 0;
+ diag_mux->mode = DIAG_USB_MODE;
+ return 0;
+}
+
+void diag_mux_exit()
+{
+ kfree(diag_mux);
+}
+
+int diag_mux_register(int proc, int ctx, struct diag_mux_ops *ops)
+{
+ int err = 0;
+ if (!ops)
+ return -EINVAL;
+
+ if (proc < 0 || proc >= NUM_MUX_PROC)
+ return 0;
+
+ /* Register with USB logger */
+ usb_logger.ops[proc] = ops;
+ err = diag_usb_register(proc, ctx, ops);
+ if (err) {
+ pr_err("diag: MUX: unable to register usb operations for proc: %d, err: %d\n",
+ proc, err);
+ return err;
+ }
+
+ md_logger.ops[proc] = ops;
+ err = diag_md_register(proc, ctx, ops);
+ if (err) {
+ pr_err("diag: MUX: unable to register md operations for proc: %d, err: %d\n",
+ proc, err);
+ return err;
+ }
+
+ return 0;
+}
+
+int diag_mux_queue_read(int proc)
+{
+ struct diag_logger_t *logger = NULL;
+
+ if (proc < 0 || proc >= NUM_MUX_PROC)
+ return -EINVAL;
+ if (!diag_mux)
+ return -EIO;
+
+ if (diag_mux->mode == DIAG_MULTI_MODE)
+ logger = diag_mux->usb_ptr;
+ else
+ logger = diag_mux->logger;
+
+ if (logger && logger->log_ops && logger->log_ops->queue_read)
+ return logger->log_ops->queue_read(proc);
+
+ return 0;
+}
+
+int diag_mux_write(int proc, unsigned char *buf, int len, int ctx)
+{
+ struct diag_logger_t *logger = NULL;
+ int peripheral, upd;
+
+ if (proc < 0 || proc >= NUM_MUX_PROC)
+ return -EINVAL;
+ if (!diag_mux)
+ return -EIO;
+
+ upd = GET_PD_CTXT(ctx);
+ if (upd) {
+ switch (upd) {
+ case DIAG_ID_MPSS:
+ upd = PERIPHERAL_MODEM;
+ break;
+ case DIAG_ID_LPASS:
+ upd = PERIPHERAL_LPASS;
+ break;
+ case DIAG_ID_CDSP:
+ upd = PERIPHERAL_CDSP;
+ break;
+ case UPD_WLAN:
+ if (!driver->pd_logging_mode[0])
+ upd = PERIPHERAL_MODEM;
+ break;
+ case UPD_AUDIO:
+ if (!driver->pd_logging_mode[1])
+ upd = PERIPHERAL_LPASS;
+ break;
+ case UPD_SENSORS:
+ if (!driver->pd_logging_mode[2])
+ upd = PERIPHERAL_LPASS;
+ break;
+ default:
+ pr_err("diag: invalid pd ctxt= %d\n", upd);
+ return -EINVAL;
+ }
+ if (((MD_PERIPHERAL_MASK(upd)) &
+ (diag_mux->mux_mask)) &&
+ driver->md_session_map[upd])
+ logger = diag_mux->md_ptr;
+ else
+ logger = diag_mux->usb_ptr;
+ } else {
+
+ peripheral = GET_BUF_PERIPHERAL(ctx);
+ if (peripheral > NUM_PERIPHERALS)
+ return -EINVAL;
+
+ if (MD_PERIPHERAL_MASK(peripheral) &
+ diag_mux->mux_mask)
+ logger = diag_mux->md_ptr;
+ else
+ logger = diag_mux->usb_ptr;
+ }
+
+ if (logger && logger->log_ops && logger->log_ops->write)
+ return logger->log_ops->write(proc, buf, len, ctx);
+ return 0;
+}
+
+int diag_mux_close_peripheral(int proc, uint8_t peripheral)
+{
+ struct diag_logger_t *logger = NULL;
+ if (proc < 0 || proc >= NUM_MUX_PROC)
+ return -EINVAL;
+
+ /* Peripheral should account for Apps data as well */
+ if (peripheral > NUM_PERIPHERALS) {
+ if (driver->num_pd_session) {
+ if (peripheral > NUM_MD_SESSIONS)
+ return -EINVAL;
+ } else {
+ return -EINVAL;
+ }
+ }
+
+ if (!diag_mux)
+ return -EIO;
+
+ if (MD_PERIPHERAL_MASK(peripheral) & diag_mux->mux_mask)
+ logger = diag_mux->md_ptr;
+ else
+ logger = diag_mux->logger;
+
+ if (logger && logger->log_ops && logger->log_ops->close_peripheral)
+ return logger->log_ops->close_peripheral(proc, peripheral);
+ return 0;
+}
+
+int diag_mux_switch_logging(int *req_mode, int *peripheral_mask)
+{
+ unsigned int new_mask = 0;
+
+ if (!req_mode)
+ return -EINVAL;
+
+ if (*peripheral_mask <= 0 ||
+ (*peripheral_mask > (DIAG_CON_ALL | DIAG_CON_UPD_ALL))) {
+ pr_err("diag: mask %d in %s\n", *peripheral_mask, __func__);
+ return -EINVAL;
+ }
+
+ switch (*req_mode) {
+ case DIAG_USB_MODE:
+ new_mask = ~(*peripheral_mask) & diag_mux->mux_mask;
+ if (new_mask != DIAG_CON_NONE)
+ *req_mode = DIAG_MULTI_MODE;
+ if (new_mask == DIAG_CON_ALL)
+ *req_mode = DIAG_MEMORY_DEVICE_MODE;
+ break;
+ case DIAG_MEMORY_DEVICE_MODE:
+ new_mask = (*peripheral_mask) | diag_mux->mux_mask;
+ if (new_mask != DIAG_CON_ALL)
+ *req_mode = DIAG_MULTI_MODE;
+ break;
+ default:
+ pr_err("diag: Invalid mode %d in %s\n", *req_mode, __func__);
+ return -EINVAL;
+ }
+
+ switch (diag_mux->mode) {
+ case DIAG_USB_MODE:
+ if (*req_mode == DIAG_MEMORY_DEVICE_MODE) {
+ diag_mux->usb_ptr->log_ops->close();
+ diag_mux->logger = diag_mux->md_ptr;
+ diag_mux->md_ptr->log_ops->open();
+ } else if (*req_mode == DIAG_MULTI_MODE) {
+ diag_mux->md_ptr->log_ops->open();
+ diag_mux->logger = NULL;
+ }
+ break;
+ case DIAG_MEMORY_DEVICE_MODE:
+ if (*req_mode == DIAG_USB_MODE) {
+ diag_mux->md_ptr->log_ops->close();
+ diag_mux->logger = diag_mux->usb_ptr;
+ diag_mux->usb_ptr->log_ops->open();
+ } else if (*req_mode == DIAG_MULTI_MODE) {
+ diag_mux->usb_ptr->log_ops->open();
+ diag_mux->logger = NULL;
+ }
+ break;
+ case DIAG_MULTI_MODE:
+ if (*req_mode == DIAG_USB_MODE) {
+ diag_mux->md_ptr->log_ops->close();
+ diag_mux->logger = diag_mux->usb_ptr;
+ } else if (*req_mode == DIAG_MEMORY_DEVICE_MODE) {
+ diag_mux->usb_ptr->log_ops->close();
+ diag_mux->logger = diag_mux->md_ptr;
+ }
+ break;
+ }
+ diag_mux->mode = *req_mode;
+ diag_mux->mux_mask = new_mask;
+ *peripheral_mask = new_mask;
+ return 0;
+}
diff --git a/drivers/char/diag/diag_mux.h b/drivers/char/diag/diag_mux.h
new file mode 100644
index 000000000000..e1fcebbe6fd1
--- /dev/null
+++ b/drivers/char/diag/diag_mux.h
@@ -0,0 +1,76 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef DIAG_MUX_H
+#define DIAG_MUX_H
+#include "diagchar.h"
+
+struct diag_mux_state_t {
+ struct diag_logger_t *logger;
+ struct diag_logger_t *usb_ptr;
+ struct diag_logger_t *md_ptr;
+ unsigned int mux_mask;
+ unsigned int mode;
+};
+
+struct diag_mux_ops {
+ int (*open)(int id, int mode);
+ int (*close)(int id, int mode);
+ int (*read_done)(unsigned char *buf, int len, int id);
+ int (*write_done)(unsigned char *buf, int len, int buf_ctx,
+ int id);
+};
+
+#define DIAG_USB_MODE 0
+#define DIAG_MEMORY_DEVICE_MODE 1
+#define DIAG_NO_LOGGING_MODE 2
+#define DIAG_MULTI_MODE 3
+
+#define DIAG_MUX_LOCAL 0
+#define DIAG_MUX_LOCAL_LAST 1
+#define DIAG_MUX_BRIDGE_BASE DIAG_MUX_LOCAL_LAST
+#define DIAG_MUX_MDM (DIAG_MUX_BRIDGE_BASE)
+#define DIAG_MUX_MDM2 (DIAG_MUX_BRIDGE_BASE + 1)
+#define DIAG_MUX_SMUX (DIAG_MUX_BRIDGE_BASE + 2)
+#define DIAG_MUX_BRIDGE_LAST (DIAG_MUX_BRIDGE_BASE + 3)
+
+#ifndef CONFIG_DIAGFWD_BRIDGE_CODE
+#define NUM_MUX_PROC DIAG_MUX_LOCAL_LAST
+#else
+#define NUM_MUX_PROC DIAG_MUX_BRIDGE_LAST
+#endif
+
+struct diag_logger_ops {
+ void (*open)(void);
+ void (*close)(void);
+ int (*queue_read)(int id);
+ int (*write)(int id, unsigned char *buf, int len, int ctx);
+ int (*close_peripheral)(int id, uint8_t peripheral);
+};
+
+struct diag_logger_t {
+ int mode;
+ struct diag_mux_ops *ops[NUM_MUX_PROC];
+ struct diag_logger_ops *log_ops;
+};
+
+extern struct diag_mux_state_t *diag_mux;
+
+int diag_mux_init(void);
+void diag_mux_exit(void);
+int diag_mux_register(int proc, int ctx, struct diag_mux_ops *ops);
+int diag_mux_queue_read(int proc);
+int diag_mux_write(int proc, unsigned char *buf, int len, int ctx);
+int diag_mux_close_peripheral(int proc, uint8_t peripheral);
+int diag_mux_open_all(struct diag_logger_t *logger);
+int diag_mux_close_all(void);
+int diag_mux_switch_logging(int *new_mode, int *peripheral_mask);
+#endif
diff --git a/drivers/char/diag/diag_usb.c b/drivers/char/diag/diag_usb.c
new file mode 100644
index 000000000000..87d021f6a956
--- /dev/null
+++ b/drivers/char/diag/diag_usb.c
@@ -0,0 +1,686 @@
+/* Copyright (c) 2014-2016, 2018 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/sched.h>
+#include <linux/ratelimit.h>
+#include <linux/workqueue.h>
+#include <linux/diagchar.h>
+#include <linux/delay.h>
+#include <linux/kmemleak.h>
+#include <linux/list.h>
+#ifdef CONFIG_DIAG_OVER_USB
+#include <linux/usb/usbdiag.h>
+#endif
+#include "diag_usb.h"
+#include "diag_mux.h"
+#include "diagmem.h"
+#include "diag_ipc_logging.h"
+
+#define DIAG_USB_STRING_SZ 10
+#define DIAG_USB_MAX_SIZE 16384
+
+struct diag_usb_info diag_usb[NUM_DIAG_USB_DEV] = {
+ {
+ .id = DIAG_USB_LOCAL,
+ .name = DIAG_LEGACY,
+ .enabled = 0,
+ .mempool = POOL_TYPE_MUX_APPS,
+ .hdl = NULL,
+ .ops = NULL,
+ .read_buf = NULL,
+ .read_ptr = NULL,
+ .usb_wq = NULL,
+ .read_cnt = 0,
+ .write_cnt = 0,
+ .max_size = DIAG_USB_MAX_SIZE,
+ },
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+ {
+ .id = DIAG_USB_MDM,
+ .name = DIAG_MDM,
+ .enabled = 0,
+ .mempool = POOL_TYPE_MDM_MUX,
+ .hdl = NULL,
+ .ops = NULL,
+ .read_buf = NULL,
+ .read_ptr = NULL,
+ .usb_wq = NULL,
+ .read_cnt = 0,
+ .write_cnt = 0,
+ .max_size = DIAG_USB_MAX_SIZE,
+ },
+ {
+ .id = DIAG_USB_MDM2,
+ .name = DIAG_MDM2,
+ .enabled = 0,
+ .mempool = POOL_TYPE_MDM2_MUX,
+ .hdl = NULL,
+ .ops = NULL,
+ .read_buf = NULL,
+ .read_ptr = NULL,
+ .usb_wq = NULL,
+ .read_cnt = 0,
+ .write_cnt = 0,
+ .max_size = DIAG_USB_MAX_SIZE,
+ },
+ {
+ .id = DIAG_USB_QSC,
+ .name = DIAG_QSC,
+ .enabled = 0,
+ .mempool = POOL_TYPE_QSC_MUX,
+ .hdl = NULL,
+ .ops = NULL,
+ .read_buf = NULL,
+ .read_ptr = NULL,
+ .usb_wq = NULL,
+ .read_cnt = 0,
+ .write_cnt = 0,
+ .max_size = DIAG_USB_MAX_SIZE,
+ }
+#endif
+};
+
+static int diag_usb_buf_tbl_add(struct diag_usb_info *usb_info,
+ unsigned char *buf, uint32_t len, int ctxt)
+{
+ struct list_head *start, *temp;
+ struct diag_usb_buf_tbl_t *entry = NULL;
+
+ list_for_each_safe(start, temp, &usb_info->buf_tbl) {
+ entry = list_entry(start, struct diag_usb_buf_tbl_t, track);
+ if (entry->buf == buf) {
+ atomic_inc(&entry->ref_count);
+ return 0;
+ }
+ }
+
+ /* New buffer, not found in the list */
+ entry = kzalloc(sizeof(struct diag_usb_buf_tbl_t), GFP_ATOMIC);
+ if (!entry)
+ return -ENOMEM;
+
+ entry->buf = buf;
+ entry->ctxt = ctxt;
+ entry->len = len;
+ atomic_set(&entry->ref_count, 1);
+ INIT_LIST_HEAD(&entry->track);
+ list_add_tail(&entry->track, &usb_info->buf_tbl);
+
+ return 0;
+}
+
+static void diag_usb_buf_tbl_remove(struct diag_usb_info *usb_info,
+ unsigned char *buf)
+{
+ struct list_head *start, *temp;
+ struct diag_usb_buf_tbl_t *entry = NULL;
+
+ list_for_each_safe(start, temp, &usb_info->buf_tbl) {
+ entry = list_entry(start, struct diag_usb_buf_tbl_t, track);
+ if (entry->buf == buf) {
+ DIAG_LOG(DIAG_DEBUG_MUX, "ref_count-- for %pK\n", buf);
+ atomic_dec(&entry->ref_count);
+ /*
+ * Remove reference from the table if it is the
+ * only instance of the buffer
+ */
+ if (atomic_read(&entry->ref_count) == 0)
+ list_del(&entry->track);
+ break;
+ }
+ }
+}
+
+static struct diag_usb_buf_tbl_t *diag_usb_buf_tbl_get(
+ struct diag_usb_info *usb_info,
+ unsigned char *buf)
+{
+ struct list_head *start, *temp;
+ struct diag_usb_buf_tbl_t *entry = NULL;
+
+ list_for_each_safe(start, temp, &usb_info->buf_tbl) {
+ entry = list_entry(start, struct diag_usb_buf_tbl_t, track);
+ if (entry->buf == buf) {
+ DIAG_LOG(DIAG_DEBUG_MUX, "ref_count-- for %pK\n", buf);
+ atomic_dec(&entry->ref_count);
+ return entry;
+ }
+ }
+
+ return NULL;
+}
+
+/*
+ * This function is called asynchronously when USB is connected and
+ * synchronously when Diag wants to connect to USB explicitly.
+ */
+static void usb_connect(struct diag_usb_info *ch)
+{
+ int err = 0;
+ int num_write = 0;
+ int num_read = 1; /* Only one read buffer for any USB channel */
+
+ if (!ch || !atomic_read(&ch->connected))
+ return;
+
+ num_write = diag_mempools[ch->mempool].poolsize;
+ err = usb_diag_alloc_req(ch->hdl, num_write, num_read);
+ if (err) {
+ pr_err("diag: Unable to allocate usb requests for %s, write: %d read: %d, err: %d\n",
+ ch->name, num_write, num_read, err);
+ return;
+ }
+
+ if (ch->ops && ch->ops->open) {
+ if (atomic_read(&ch->diag_state)) {
+ ch->ops->open(ch->ctxt, DIAG_USB_MODE);
+ } else {
+ /*
+ * This case indicates that the USB is connected
+ * but the logging is still happening in MEMORY
+ * DEVICE MODE. Continue the logging without
+ * resetting the buffers.
+ */
+ }
+ }
+ /* As soon as we open the channel, queue a read */
+ queue_work(ch->usb_wq, &(ch->read_work));
+}
+
+static void usb_connect_work_fn(struct work_struct *work)
+{
+ struct diag_usb_info *ch = container_of(work, struct diag_usb_info,
+ connect_work);
+ usb_connect(ch);
+}
+
+/*
+ * This function is called asynchronously when USB is disconnected
+ * and synchronously when Diag wants to disconnect from USB
+ * explicitly.
+ */
+static void usb_disconnect(struct diag_usb_info *ch)
+{
+ if (!ch)
+ return;
+
+ if (!atomic_read(&ch->connected) &&
+ driver->usb_connected && diag_mask_param())
+ diag_clear_masks(0);
+
+ if (ch && ch->ops && ch->ops->close)
+ ch->ops->close(ch->ctxt, DIAG_USB_MODE);
+}
+
+static void usb_disconnect_work_fn(struct work_struct *work)
+{
+ struct diag_usb_info *ch = container_of(work, struct diag_usb_info,
+ disconnect_work);
+ usb_disconnect(ch);
+}
+
+static void usb_read_work_fn(struct work_struct *work)
+{
+ int err = 0;
+ unsigned long flags;
+ struct diag_request *req = NULL;
+ struct diag_usb_info *ch = container_of(work, struct diag_usb_info,
+ read_work);
+ if (!ch)
+ return;
+
+ if (!atomic_read(&ch->connected) || !ch->enabled ||
+ atomic_read(&ch->read_pending) || !atomic_read(&ch->diag_state)) {
+ pr_debug_ratelimited("diag: Discarding USB read, ch: %s e: %d, c: %d, p: %d, d: %d\n",
+ ch->name, ch->enabled,
+ atomic_read(&ch->connected),
+ atomic_read(&ch->read_pending),
+ atomic_read(&ch->diag_state));
+ return;
+ }
+
+ spin_lock_irqsave(&ch->lock, flags);
+ req = ch->read_ptr;
+ if (req) {
+ atomic_set(&ch->read_pending, 1);
+ req->buf = ch->read_buf;
+ req->length = USB_MAX_OUT_BUF;
+ err = usb_diag_read(ch->hdl, req);
+ if (err) {
+ pr_debug("diag: In %s, error in reading from USB %s, err: %d\n",
+ __func__, ch->name, err);
+ atomic_set(&ch->read_pending, 0);
+ queue_work(ch->usb_wq, &(ch->read_work));
+ }
+ } else {
+ pr_err_ratelimited("diag: In %s invalid read req\n", __func__);
+ }
+ spin_unlock_irqrestore(&ch->lock, flags);
+}
+
+static void usb_read_done_work_fn(struct work_struct *work)
+{
+ struct diag_request *req = NULL;
+ struct diag_usb_info *ch = container_of(work, struct diag_usb_info,
+ read_done_work);
+ if (!ch)
+ return;
+
+ /*
+ * USB is disconnected/Disabled before the previous read completed.
+ * Discard the packet and don't do any further processing.
+ */
+ if (!atomic_read(&ch->connected) || !ch->enabled ||
+ !atomic_read(&ch->diag_state))
+ return;
+
+ req = ch->read_ptr;
+ ch->read_cnt++;
+
+ if (ch->ops && ch->ops->read_done && req->status >= 0)
+ ch->ops->read_done(req->buf, req->actual, ch->ctxt);
+}
+
+static void diag_usb_write_done(struct diag_usb_info *ch,
+ struct diag_request *req)
+{
+ int ctxt = 0;
+ int len = 0;
+ struct diag_usb_buf_tbl_t *entry = NULL;
+ unsigned char *buf = NULL;
+ unsigned long flags;
+
+ if (!ch || !req)
+ return;
+
+ ch->write_cnt++;
+ entry = diag_usb_buf_tbl_get(ch, req->context);
+ if (!entry) {
+ pr_err_ratelimited("diag: In %s, unable to find entry %pK in the table\n",
+ __func__, req->context);
+ return;
+ }
+ if (atomic_read(&entry->ref_count) != 0) {
+ DIAG_LOG(DIAG_DEBUG_MUX, "partial write_done ref %d\n",
+ atomic_read(&entry->ref_count));
+ diag_ws_on_copy_complete(DIAG_WS_MUX);
+ diagmem_free(driver, req, ch->mempool);
+ return;
+ }
+ DIAG_LOG(DIAG_DEBUG_MUX, "full write_done, ctxt: %d\n",
+ ctxt);
+ spin_lock_irqsave(&ch->write_lock, flags);
+ list_del(&entry->track);
+ ctxt = entry->ctxt;
+ buf = entry->buf;
+ len = entry->len;
+ kfree(entry);
+ diag_ws_on_copy_complete(DIAG_WS_MUX);
+
+ if (ch->ops && ch->ops->write_done)
+ ch->ops->write_done(buf, len, ctxt, DIAG_USB_MODE);
+ buf = NULL;
+ len = 0;
+ ctxt = 0;
+ spin_unlock_irqrestore(&ch->write_lock, flags);
+ diagmem_free(driver, req, ch->mempool);
+}
+
+static void diag_usb_notifier(void *priv, unsigned event,
+ struct diag_request *d_req)
+{
+ int id = 0;
+ unsigned long flags;
+ struct diag_usb_info *usb_info = NULL;
+
+ id = (int)(uintptr_t)priv;
+ if (id < 0 || id >= NUM_DIAG_USB_DEV)
+ return;
+ usb_info = &diag_usb[id];
+
+ switch (event) {
+ case USB_DIAG_CONNECT:
+ usb_info->max_size = usb_diag_request_size(usb_info->hdl);
+ atomic_set(&usb_info->connected, 1);
+ pr_info("diag: USB channel %s connected\n", usb_info->name);
+ queue_work(usb_info->usb_wq,
+ &usb_info->connect_work);
+ break;
+ case USB_DIAG_DISCONNECT:
+ atomic_set(&usb_info->connected, 0);
+ pr_info("diag: USB channel %s disconnected\n", usb_info->name);
+ queue_work(usb_info->usb_wq,
+ &usb_info->disconnect_work);
+ break;
+ case USB_DIAG_READ_DONE:
+ spin_lock_irqsave(&usb_info->lock, flags);
+ usb_info->read_ptr = d_req;
+ spin_unlock_irqrestore(&usb_info->lock, flags);
+ atomic_set(&usb_info->read_pending, 0);
+ queue_work(usb_info->usb_wq,
+ &usb_info->read_done_work);
+ break;
+ case USB_DIAG_WRITE_DONE:
+ diag_usb_write_done(usb_info, d_req);
+ break;
+ default:
+ pr_err_ratelimited("diag: Unknown event from USB diag\n");
+ break;
+ }
+}
+
+int diag_usb_queue_read(int id)
+{
+ if (id < 0 || id >= NUM_DIAG_USB_DEV) {
+ pr_err_ratelimited("diag: In %s, Incorrect id %d\n",
+ __func__, id);
+ return -EINVAL;
+ }
+ queue_work(diag_usb[id].usb_wq, &(diag_usb[id].read_work));
+ return 0;
+}
+
+static int diag_usb_write_ext(struct diag_usb_info *usb_info,
+ unsigned char *buf, int len, int ctxt)
+{
+ int err = 0;
+ int write_len = 0;
+ int bytes_remaining = len;
+ int offset = 0;
+ unsigned long flags;
+ struct diag_request *req = NULL;
+
+ if (!usb_info || !buf || len <= 0) {
+ pr_err_ratelimited("diag: In %s, usb_info: %pK buf: %pK, len: %d\n",
+ __func__, usb_info, buf, len);
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&usb_info->write_lock, flags);
+ while (bytes_remaining > 0) {
+ req = diagmem_alloc(driver, sizeof(struct diag_request),
+ usb_info->mempool);
+ if (!req) {
+ /*
+ * This should never happen. It either means that we are
+ * trying to write more buffers than the max supported
+ * by this particualar diag USB channel at any given
+ * instance, or the previous write ptrs are stuck in
+ * the USB layer.
+ */
+ pr_err_ratelimited("diag: In %s, cannot retrieve USB write ptrs for USB channel %s\n",
+ __func__, usb_info->name);
+ spin_unlock_irqrestore(&usb_info->write_lock, flags);
+ return -ENOMEM;
+ }
+
+ write_len = (bytes_remaining > usb_info->max_size) ?
+ usb_info->max_size : (bytes_remaining);
+
+ req->buf = buf + offset;
+ req->length = write_len;
+ req->context = (void *)buf;
+
+ if (!usb_info->hdl || !atomic_read(&usb_info->connected) ||
+ !atomic_read(&usb_info->diag_state)) {
+ pr_debug_ratelimited("diag: USB ch %s is not connected\n",
+ usb_info->name);
+ diagmem_free(driver, req, usb_info->mempool);
+ spin_unlock_irqrestore(&usb_info->write_lock, flags);
+ return -ENODEV;
+ }
+
+ if (diag_usb_buf_tbl_add(usb_info, buf, len, ctxt)) {
+ diagmem_free(driver, req, usb_info->mempool);
+ spin_unlock_irqrestore(&usb_info->write_lock, flags);
+ return -ENOMEM;
+ }
+
+ diag_ws_on_read(DIAG_WS_MUX, len);
+ err = usb_diag_write(usb_info->hdl, req);
+ diag_ws_on_copy(DIAG_WS_MUX);
+ if (err) {
+ pr_err_ratelimited("diag: In %s, error writing to usb channel %s, err: %d\n",
+ __func__, usb_info->name, err);
+ DIAG_LOG(DIAG_DEBUG_MUX,
+ "ERR! unable to write t usb, err: %d\n", err);
+ diag_ws_on_copy_fail(DIAG_WS_MUX);
+ diag_usb_buf_tbl_remove(usb_info, buf);
+ diagmem_free(driver, req, usb_info->mempool);
+ spin_unlock_irqrestore(&usb_info->write_lock, flags);
+ return err;
+ }
+ offset += write_len;
+ bytes_remaining -= write_len;
+ DIAG_LOG(DIAG_DEBUG_MUX,
+ "bytes_remaining: %d write_len: %d, len: %d\n",
+ bytes_remaining, write_len, len);
+ }
+ DIAG_LOG(DIAG_DEBUG_MUX, "done writing!");
+ spin_unlock_irqrestore(&usb_info->write_lock, flags);
+
+ return 0;
+}
+
+int diag_usb_write(int id, unsigned char *buf, int len, int ctxt)
+{
+ int err = 0;
+ struct diag_request *req = NULL;
+ struct diag_usb_info *usb_info = NULL;
+ unsigned long flags;
+
+ if (id < 0 || id >= NUM_DIAG_USB_DEV) {
+ pr_err_ratelimited("diag: In %s, Incorrect id %d\n",
+ __func__, id);
+ return -EINVAL;
+ }
+
+ usb_info = &diag_usb[id];
+
+ if (len > usb_info->max_size) {
+ DIAG_LOG(DIAG_DEBUG_MUX, "len: %d, max_size: %d\n",
+ len, usb_info->max_size);
+ return diag_usb_write_ext(usb_info, buf, len, ctxt);
+ }
+
+ req = diagmem_alloc(driver, sizeof(struct diag_request),
+ usb_info->mempool);
+ if (!req) {
+ /*
+ * This should never happen. It either means that we are
+ * trying to write more buffers than the max supported by
+ * this particualar diag USB channel at any given instance,
+ * or the previous write ptrs are stuck in the USB layer.
+ */
+ pr_err_ratelimited("diag: In %s, cannot retrieve USB write ptrs for USB channel %s\n",
+ __func__, usb_info->name);
+ return -ENOMEM;
+ }
+
+ req->buf = buf;
+ req->length = len;
+ req->context = (void *)buf;
+
+ if (!usb_info->hdl || !atomic_read(&usb_info->connected) ||
+ !atomic_read(&usb_info->diag_state)) {
+ pr_debug_ratelimited("diag: USB ch %s is not connected\n",
+ usb_info->name);
+ diagmem_free(driver, req, usb_info->mempool);
+ return -ENODEV;
+ }
+
+ spin_lock_irqsave(&usb_info->write_lock, flags);
+ if (diag_usb_buf_tbl_add(usb_info, buf, len, ctxt)) {
+ DIAG_LOG(DIAG_DEBUG_MUX,
+ "ERR! unable to add buf %pK to table\n",
+ buf);
+ diagmem_free(driver, req, usb_info->mempool);
+ spin_unlock_irqrestore(&usb_info->write_lock, flags);
+ return -ENOMEM;
+ }
+
+ diag_ws_on_read(DIAG_WS_MUX, len);
+ err = usb_diag_write(usb_info->hdl, req);
+ diag_ws_on_copy(DIAG_WS_MUX);
+ if (err) {
+ pr_err_ratelimited("diag: In %s, error writing to usb channel %s, err: %d\n",
+ __func__, usb_info->name, err);
+ diag_ws_on_copy_fail(DIAG_WS_MUX);
+ DIAG_LOG(DIAG_DEBUG_MUX,
+ "ERR! unable to write t usb, err: %d\n", err);
+ diag_usb_buf_tbl_remove(usb_info, buf);
+ diagmem_free(driver, req, usb_info->mempool);
+ }
+ spin_unlock_irqrestore(&usb_info->write_lock, flags);
+
+ return err;
+}
+
+/*
+ * This functions performs USB connect operations wrt Diag synchronously. It
+ * doesn't translate to actual USB connect. This is used when Diag switches
+ * logging to USB mode and wants to mimic USB connection.
+ */
+void diag_usb_connect_all(void)
+{
+ int i = 0;
+ struct diag_usb_info *usb_info = NULL;
+
+ for (i = 0; i < NUM_DIAG_USB_DEV; i++) {
+ usb_info = &diag_usb[i];
+ if (!usb_info->enabled)
+ continue;
+ atomic_set(&usb_info->diag_state, 1);
+ usb_connect(usb_info);
+ }
+}
+
+/*
+ * This functions performs USB disconnect operations wrt Diag synchronously.
+ * It doesn't translate to actual USB disconnect. This is used when Diag
+ * switches logging from USB mode and want to mimic USB disconnect.
+ */
+void diag_usb_disconnect_all(void)
+{
+ int i = 0;
+ struct diag_usb_info *usb_info = NULL;
+
+ for (i = 0; i < NUM_DIAG_USB_DEV; i++) {
+ usb_info = &diag_usb[i];
+ if (!usb_info->enabled)
+ continue;
+ atomic_set(&usb_info->diag_state, 0);
+ usb_disconnect(usb_info);
+ }
+}
+
+int diag_usb_register(int id, int ctxt, struct diag_mux_ops *ops)
+{
+ struct diag_usb_info *ch = NULL;
+ unsigned char wq_name[DIAG_USB_NAME_SZ + DIAG_USB_STRING_SZ];
+
+ if (id < 0 || id >= NUM_DIAG_USB_DEV) {
+ pr_err("diag: Unable to register with USB, id: %d\n", id);
+ return -EIO;
+ }
+
+ if (!ops) {
+ pr_err("diag: Invalid operations for USB\n");
+ return -EIO;
+ }
+
+ ch = &diag_usb[id];
+ ch->ops = ops;
+ ch->ctxt = ctxt;
+ spin_lock_init(&ch->lock);
+ spin_lock_init(&ch->write_lock);
+ ch->read_buf = kzalloc(USB_MAX_OUT_BUF, GFP_KERNEL);
+ if (!ch->read_buf)
+ goto err;
+ ch->read_ptr = kzalloc(sizeof(struct diag_request), GFP_KERNEL);
+ if (!ch->read_ptr)
+ goto err;
+ atomic_set(&ch->connected, 0);
+ atomic_set(&ch->read_pending, 0);
+ /*
+ * This function is called when the mux registers with Diag-USB.
+ * The registration happens during boot up and Diag always starts
+ * in USB mode. Set the state to 1.
+ */
+ atomic_set(&ch->diag_state, 1);
+ INIT_LIST_HEAD(&ch->buf_tbl);
+ diagmem_init(driver, ch->mempool);
+ INIT_WORK(&(ch->read_work), usb_read_work_fn);
+ INIT_WORK(&(ch->read_done_work), usb_read_done_work_fn);
+ INIT_WORK(&(ch->connect_work), usb_connect_work_fn);
+ INIT_WORK(&(ch->disconnect_work), usb_disconnect_work_fn);
+ strlcpy(wq_name, "DIAG_USB_", DIAG_USB_STRING_SZ);
+ strlcat(wq_name, ch->name, sizeof(ch->name));
+ ch->usb_wq = create_singlethread_workqueue(wq_name);
+ if (!ch->usb_wq)
+ goto err;
+ ch->hdl = usb_diag_open(ch->name, (void *)(uintptr_t)id,
+ diag_usb_notifier);
+ if (IS_ERR(ch->hdl)) {
+ pr_err("diag: Unable to open USB channel %s\n", ch->name);
+ goto err;
+ }
+ ch->enabled = 1;
+ pr_debug("diag: Successfully registered USB %s\n", ch->name);
+ return 0;
+
+err:
+ if (ch->usb_wq)
+ destroy_workqueue(ch->usb_wq);
+ kfree(ch->read_ptr);
+ kfree(ch->read_buf);
+ return -ENOMEM;
+}
+
+void diag_usb_exit(int id)
+{
+ struct diag_usb_info *ch = NULL;
+
+ if (id < 0 || id >= NUM_DIAG_USB_DEV) {
+ pr_err("diag: In %s, incorrect id %d\n", __func__, id);
+ return;
+ }
+
+ ch = &diag_usb[id];
+ ch->ops = NULL;
+ atomic_set(&ch->connected, 0);
+ atomic_set(&ch->read_pending, 0);
+ atomic_set(&ch->diag_state, 0);
+ ch->enabled = 0;
+ ch->ctxt = 0;
+ ch->read_cnt = 0;
+ ch->write_cnt = 0;
+ diagmem_exit(driver, ch->mempool);
+ ch->mempool = 0;
+ if (ch->hdl) {
+ usb_diag_close(ch->hdl);
+ ch->hdl = NULL;
+ }
+ if (ch->usb_wq)
+ destroy_workqueue(ch->usb_wq);
+ kfree(ch->read_ptr);
+ ch->read_ptr = NULL;
+ kfree(ch->read_buf);
+ ch->read_buf = NULL;
+}
+
diff --git a/drivers/char/diag/diag_usb.h b/drivers/char/diag/diag_usb.h
new file mode 100644
index 000000000000..cf4d4db3e3c3
--- /dev/null
+++ b/drivers/char/diag/diag_usb.h
@@ -0,0 +1,110 @@
+/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DIAGUSB_H
+#define DIAGUSB_H
+
+#ifdef CONFIG_DIAG_OVER_USB
+#include <linux/usb/usbdiag.h>
+#endif
+#include "diagchar.h"
+#include "diag_mux.h"
+
+#define DIAG_USB_LOCAL 0
+#define DIAG_USB_LOCAL_LAST 1
+#define DIAG_USB_BRIDGE_BASE DIAG_USB_LOCAL_LAST
+#define DIAG_USB_MDM (DIAG_USB_BRIDGE_BASE)
+#define DIAG_USB_MDM2 (DIAG_USB_BRIDGE_BASE + 1)
+#define DIAG_USB_QSC (DIAG_USB_BRIDGE_BASE + 2)
+#define DIAG_USB_BRIDGE_LAST (DIAG_USB_BRIDGE_BASE + 3)
+
+#ifndef CONFIG_DIAGFWD_BRIDGE_CODE
+#define NUM_DIAG_USB_DEV DIAG_USB_LOCAL_LAST
+#else
+#define NUM_DIAG_USB_DEV DIAG_USB_BRIDGE_LAST
+#endif
+
+#define DIAG_USB_NAME_SZ 24
+#define DIAG_USB_GET_NAME(x) (diag_usb[x].name)
+
+#define DIAG_USB_MODE 0
+
+struct diag_usb_buf_tbl_t {
+ struct list_head track;
+ unsigned char *buf;
+ uint32_t len;
+ atomic_t ref_count;
+ int ctxt;
+};
+
+struct diag_usb_info {
+ int id;
+ int ctxt;
+ char name[DIAG_USB_NAME_SZ];
+ atomic_t connected;
+ atomic_t diag_state;
+ atomic_t read_pending;
+ int enabled;
+ int mempool;
+ int max_size;
+ struct list_head buf_tbl;
+ unsigned long read_cnt;
+ unsigned long write_cnt;
+ spinlock_t lock;
+ spinlock_t write_lock;
+ struct usb_diag_ch *hdl;
+ struct diag_mux_ops *ops;
+ unsigned char *read_buf;
+ struct diag_request *read_ptr;
+ struct work_struct read_work;
+ struct work_struct read_done_work;
+ struct work_struct connect_work;
+ struct work_struct disconnect_work;
+ struct workqueue_struct *usb_wq;
+};
+
+#ifdef CONFIG_DIAG_OVER_USB
+extern struct diag_usb_info diag_usb[NUM_DIAG_USB_DEV];
+int diag_usb_register(int id, int ctxt, struct diag_mux_ops *ops);
+int diag_usb_queue_read(int id);
+int diag_usb_write(int id, unsigned char *buf, int len, int ctxt);
+void diag_usb_connect_all(void);
+void diag_usb_disconnect_all(void);
+void diag_usb_exit(int id);
+#else
+int diag_usb_register(int id, int ctxt, struct diag_mux_ops *ops)
+{
+ return 0;
+}
+int diag_usb_queue_read(int id)
+{
+ return 0;
+}
+int diag_usb_write(int id, unsigned char *buf, int len, int ctxt)
+{
+ return 0;
+}
+void diag_usb_connect_all(void)
+{
+ return;
+}
+void diag_usb_disconnect_all(void)
+{
+ return;
+}
+void diag_usb_exit(int id)
+{
+ return;
+}
+#endif
+
+#endif
diff --git a/drivers/char/diag/diagchar.h b/drivers/char/diag/diagchar.h
new file mode 100644
index 000000000000..afea5f40bfee
--- /dev/null
+++ b/drivers/char/diag/diagchar.h
@@ -0,0 +1,690 @@
+/* Copyright (c) 2008-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DIAGCHAR_H
+#define DIAGCHAR_H
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mempool.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/sched.h>
+#include <linux/wakelock.h>
+#include <soc/qcom/smd.h>
+#include <asm/atomic.h>
+#include "diagfwd_bridge.h"
+
+#define THRESHOLD_CLIENT_LIMIT 50
+
+/* Size of the USB buffers used for read and write*/
+#define USB_MAX_OUT_BUF 4096
+#define APPS_BUF_SIZE 4096
+#define IN_BUF_SIZE 16384
+#define MAX_SYNC_OBJ_NAME_SIZE 32
+
+#define DIAG_MAX_REQ_SIZE (16 * 1024)
+#define DIAG_MAX_RSP_SIZE (16 * 1024)
+#define APF_DIAG_PADDING 0
+/*
+ * In the worst case, the HDLC buffer can be atmost twice the size of the
+ * original packet. Add 3 bytes for 16 bit CRC (2 bytes) and a delimiter
+ * (1 byte)
+ */
+#define DIAG_MAX_HDLC_BUF_SIZE ((DIAG_MAX_REQ_SIZE * 2) + 3)
+
+/* The header of callback data type has remote processor token (of type int) */
+#define CALLBACK_HDR_SIZE (sizeof(int))
+#define CALLBACK_BUF_SIZE (DIAG_MAX_REQ_SIZE + CALLBACK_HDR_SIZE)
+
+#define MAX_SSID_PER_RANGE 200
+
+#define ALL_PROC -1
+
+#define REMOTE_DATA 4
+
+#define USER_SPACE_DATA 16384
+
+#define DIAG_CTRL_MSG_LOG_MASK 9
+#define DIAG_CTRL_MSG_EVENT_MASK 10
+#define DIAG_CTRL_MSG_F3_MASK 11
+#define CONTROL_CHAR 0x7E
+
+#define DIAG_CON_APSS (0x0001) /* Bit mask for APSS */
+#define DIAG_CON_MPSS (0x0002) /* Bit mask for MPSS */
+#define DIAG_CON_LPASS (0x0004) /* Bit mask for LPASS */
+#define DIAG_CON_WCNSS (0x0008) /* Bit mask for WCNSS */
+#define DIAG_CON_SENSORS (0x0010) /* Bit mask for Sensors */
+#define DIAG_CON_WDSP (0x0020) /* Bit mask for WDSP */
+#define DIAG_CON_CDSP (0x0040) /* Bit mask for CDSP */
+
+#define DIAG_CON_UPD_WLAN (0x1000) /*Bit mask for WLAN PD*/
+#define DIAG_CON_UPD_AUDIO (0x2000) /*Bit mask for AUDIO PD*/
+#define DIAG_CON_UPD_SENSORS (0x4000) /*Bit mask for SENSORS PD*/
+
+#define DIAG_CON_NONE (0x0000) /* Bit mask for No SS*/
+#define DIAG_CON_ALL (DIAG_CON_APSS | DIAG_CON_MPSS \
+ | DIAG_CON_LPASS | DIAG_CON_WCNSS \
+ | DIAG_CON_SENSORS | DIAG_CON_WDSP \
+ | DIAG_CON_CDSP)
+#define DIAG_CON_UPD_ALL (DIAG_CON_UPD_WLAN \
+ | DIAG_CON_UPD_AUDIO \
+ | DIAG_CON_UPD_SENSORS)
+
+#define DIAG_STM_MODEM 0x01
+#define DIAG_STM_LPASS 0x02
+#define DIAG_STM_WCNSS 0x04
+#define DIAG_STM_APPS 0x08
+#define DIAG_STM_SENSORS 0x10
+#define DIAG_STM_WDSP 0x20
+#define DIAG_STM_CDSP 0x40
+
+#define INVALID_PID -1
+#define DIAG_CMD_FOUND 1
+#define DIAG_CMD_NOT_FOUND 0
+#define DIAG_CMD_POLLING 1
+#define DIAG_CMD_NOT_POLLING 0
+#define DIAG_CMD_ADD 1
+#define DIAG_CMD_REMOVE 0
+
+#define DIAG_CMD_VERSION 0
+#define DIAG_CMD_ERROR 0x13
+#define DIAG_CMD_DOWNLOAD 0x3A
+#define DIAG_CMD_DIAG_SUBSYS 0x4B
+#define DIAG_CMD_LOG_CONFIG 0x73
+#define DIAG_CMD_LOG_ON_DMND 0x78
+#define DIAG_CMD_EXT_BUILD 0x7c
+#define DIAG_CMD_MSG_CONFIG 0x7D
+#define DIAG_CMD_GET_EVENT_MASK 0x81
+#define DIAG_CMD_SET_EVENT_MASK 0x82
+#define DIAG_CMD_EVENT_TOGGLE 0x60
+#define DIAG_CMD_NO_SUBSYS 0xFF
+#define DIAG_CMD_STATUS 0x0C
+#define DIAG_SS_WCDMA 0x04
+#define DIAG_CMD_QUERY_CALL 0x0E
+#define DIAG_SS_GSM 0x08
+#define DIAG_CMD_QUERY_TMC 0x02
+#define DIAG_SS_TDSCDMA 0x57
+#define DIAG_CMD_TDSCDMA_STATUS 0x0E
+#define DIAG_CMD_DIAG_SUBSYS_DELAY 0x80
+
+#define DIAG_SS_DIAG 0x12
+#define DIAG_SS_PARAMS 0x32
+#define DIAG_SS_FILE_READ_MODEM 0x0816
+#define DIAG_SS_FILE_READ_ADSP 0x0E10
+#define DIAG_SS_FILE_READ_WCNSS 0x141F
+#define DIAG_SS_FILE_READ_SLPI 0x01A18
+#define DIAG_SS_FILE_READ_APPS 0x020F
+
+#define DIAG_DIAG_MAX_PKT_SZ 0x55
+#define DIAG_DIAG_STM 0x214
+#define DIAG_DIAG_POLL 0x03
+#define DIAG_DEL_RSP_WRAP 0x04
+#define DIAG_DEL_RSP_WRAP_CNT 0x05
+#define DIAG_EXT_MOBILE_ID 0x06
+#define DIAG_GET_TIME_API 0x21B
+#define DIAG_SET_TIME_API 0x21C
+#define DIAG_SWITCH_COMMAND 0x081B
+#define DIAG_BUFFERING_MODE 0x080C
+
+#define DIAG_CMD_OP_LOG_DISABLE 0
+#define DIAG_CMD_OP_GET_LOG_RANGE 1
+#define DIAG_CMD_OP_SET_LOG_MASK 3
+#define DIAG_CMD_OP_GET_LOG_MASK 4
+
+#define DIAG_CMD_OP_GET_SSID_RANGE 1
+#define DIAG_CMD_OP_GET_BUILD_MASK 2
+#define DIAG_CMD_OP_GET_MSG_MASK 3
+#define DIAG_CMD_OP_SET_MSG_MASK 4
+#define DIAG_CMD_OP_SET_ALL_MSG_MASK 5
+
+#define DIAG_CMD_OP_GET_MSG_ALLOC 0x33
+#define DIAG_CMD_OP_GET_MSG_DROP 0x30
+#define DIAG_CMD_OP_RESET_MSG_STATS 0x2F
+#define DIAG_CMD_OP_GET_LOG_ALLOC 0x31
+#define DIAG_CMD_OP_GET_LOG_DROP 0x2C
+#define DIAG_CMD_OP_RESET_LOG_STATS 0x2B
+#define DIAG_CMD_OP_GET_EVENT_ALLOC 0x32
+#define DIAG_CMD_OP_GET_EVENT_DROP 0x2E
+#define DIAG_CMD_OP_RESET_EVENT_STATS 0x2D
+
+#define DIAG_CMD_OP_HDLC_DISABLE 0x218
+
+#define BAD_PARAM_RESPONSE_MESSAGE 20
+
+#define PERSIST_TIME_SUCCESS 0
+#define PERSIST_TIME_FAILURE 1
+#define PERSIST_TIME_NOT_SUPPORTED 2
+
+#define MODE_CMD 41
+#define RESET_ID 2
+
+#define PKT_DROP 0
+#define PKT_ALLOC 1
+#define PKT_RESET 2
+
+#define FEATURE_MASK_LEN 4
+
+#define DIAG_MD_NONE 0
+#define DIAG_MD_PERIPHERAL 1
+
+/*
+ * The status bit masks when received in a signal handler are to be
+ * used in conjunction with the peripheral list bit mask to determine the
+ * status for a peripheral. For instance, 0x00010002 would denote an open
+ * status on the MPSS
+ */
+#define DIAG_STATUS_OPEN (0x00010000) /* DCI channel open status mask */
+#define DIAG_STATUS_CLOSED (0x00020000) /* DCI channel closed status mask */
+
+#define MODE_NONREALTIME 0
+#define MODE_REALTIME 1
+#define MODE_UNKNOWN 2
+
+#define DIAG_BUFFERING_MODE_STREAMING 0
+#define DIAG_BUFFERING_MODE_THRESHOLD 1
+#define DIAG_BUFFERING_MODE_CIRCULAR 2
+
+#define DIAG_MIN_WM_VAL 0
+#define DIAG_MAX_WM_VAL 100
+
+#define DEFAULT_LOW_WM_VAL 15
+#define DEFAULT_HIGH_WM_VAL 85
+
+#define TYPE_DATA 0
+#define TYPE_CNTL 1
+#define TYPE_DCI 2
+#define TYPE_CMD 3
+#define TYPE_DCI_CMD 4
+#define NUM_TYPES 5
+
+#define PERIPHERAL_MODEM 0
+#define PERIPHERAL_LPASS 1
+#define PERIPHERAL_WCNSS 2
+#define PERIPHERAL_SENSORS 3
+#define PERIPHERAL_WDSP 4
+#define PERIPHERAL_CDSP 5
+#define NUM_PERIPHERALS 6
+#define APPS_DATA (NUM_PERIPHERALS)
+
+#define UPD_WLAN 7
+#define UPD_AUDIO 8
+#define UPD_SENSORS 9
+#define NUM_UPD 3
+
+#define DIAG_ID_APPS 1
+#define DIAG_ID_MPSS 2
+#define DIAG_ID_WLAN 3
+#define DIAG_ID_LPASS 4
+#define DIAG_ID_CDSP 5
+#define DIAG_ID_AUDIO 6
+#define DIAG_ID_SENSORS 7
+
+/* Number of sessions possible in Memory Device Mode. +1 for Apps data */
+#define NUM_MD_SESSIONS (NUM_PERIPHERALS \
+ + NUM_UPD + 1)
+
+#define MD_PERIPHERAL_MASK(x) (1 << x)
+
+#define MD_PERIPHERAL_PD_MASK(x) \
+ ((x == PERIPHERAL_MODEM) ? (1 << UPD_WLAN) : \
+ ((x == PERIPHERAL_LPASS) ? (1 << UPD_AUDIO | 1 << UPD_SENSORS) : 0))\
+
+/*
+ * Number of stm processors includes all the peripherals and
+ * apps.Added 1 below to indicate apps
+ */
+#define NUM_STM_PROCESSORS (NUM_PERIPHERALS + 1)
+/*
+ * Indicates number of peripherals that can support DCI and Apps
+ * processor. This doesn't mean that a peripheral has the
+ * feature.
+ */
+#define NUM_DCI_PERIPHERALS (NUM_PERIPHERALS + 1)
+
+#define DIAG_PROC_DCI 1
+#define DIAG_PROC_MEMORY_DEVICE 2
+
+/* Flags to vote the DCI or Memory device process up or down
+ when it becomes active or inactive */
+#define VOTE_DOWN 0
+#define VOTE_UP 1
+
+#define DIAG_TS_SIZE 50
+
+#define DIAG_MDM_BUF_SIZE 2048
+/* The Maximum request size is 2k + DCI header + footer (6 bytes) */
+#define DIAG_MDM_DCI_BUF_SIZE (2048 + 6)
+
+#define DIAG_LOCAL_PROC 0
+
+#ifndef CONFIG_DIAGFWD_BRIDGE_CODE
+/* Local Processor only */
+#define DIAG_NUM_PROC 1
+#else
+/* Local Processor + Remote Devices */
+#define DIAG_NUM_PROC (1 + NUM_REMOTE_DEV)
+#endif
+
+#define DIAG_WS_DCI 0
+#define DIAG_WS_MUX 1
+
+#define DIAG_DATA_TYPE 1
+#define DIAG_CNTL_TYPE 2
+#define DIAG_DCI_TYPE 3
+
+/* List of remote processor supported */
+enum remote_procs {
+ MDM = 1,
+ MDM2 = 2,
+ QSC = 5,
+};
+
+struct diag_pkt_header_t {
+ uint8_t cmd_code;
+ uint8_t subsys_id;
+ uint16_t subsys_cmd_code;
+} __packed;
+
+struct diag_cmd_ext_mobile_rsp_t {
+ struct diag_pkt_header_t header;
+ uint8_t version;
+ uint8_t padding[3];
+ uint32_t family;
+ uint32_t chip_id;
+} __packed;
+
+struct diag_cmd_time_sync_query_req_t {
+ struct diag_pkt_header_t header;
+ uint8_t version;
+};
+
+struct diag_cmd_time_sync_query_rsp_t {
+ struct diag_pkt_header_t header;
+ uint8_t version;
+ uint8_t time_api;
+};
+
+struct diag_cmd_time_sync_switch_req_t {
+ struct diag_pkt_header_t header;
+ uint8_t version;
+ uint8_t time_api;
+ uint8_t persist_time;
+};
+
+struct diag_cmd_time_sync_switch_rsp_t {
+ struct diag_pkt_header_t header;
+ uint8_t version;
+ uint8_t time_api;
+ uint8_t time_api_status;
+ uint8_t persist_time_status;
+};
+
+struct diag_cmd_reg_entry_t {
+ uint16_t cmd_code;
+ uint16_t subsys_id;
+ uint16_t cmd_code_lo;
+ uint16_t cmd_code_hi;
+} __packed;
+
+struct diag_cmd_reg_t {
+ struct list_head link;
+ struct diag_cmd_reg_entry_t entry;
+ uint8_t proc;
+ int pid;
+};
+
+/*
+ * @sync_obj_name: name of the synchronization object associated with this proc
+ * @count: number of entries in the bind
+ * @entries: the actual packet registrations
+ */
+struct diag_cmd_reg_tbl_t {
+ char sync_obj_name[MAX_SYNC_OBJ_NAME_SIZE];
+ uint32_t count;
+ struct diag_cmd_reg_entry_t *entries;
+};
+
+struct diag_client_map {
+ char name[20];
+ int pid;
+};
+
+struct real_time_vote_t {
+ int client_id;
+ uint16_t proc;
+ uint8_t real_time_vote;
+} __packed;
+
+struct real_time_query_t {
+ int real_time;
+ int proc;
+} __packed;
+
+struct diag_buffering_mode_t {
+ uint8_t peripheral;
+ uint8_t mode;
+ uint8_t high_wm_val;
+ uint8_t low_wm_val;
+} __packed;
+
+struct diag_callback_reg_t {
+ int proc;
+} __packed;
+
+struct diag_ws_ref_t {
+ int ref_count;
+ int copy_count;
+ spinlock_t lock;
+};
+
+/* This structure is defined in USB header file */
+#ifndef CONFIG_DIAG_OVER_USB
+struct diag_request {
+ char *buf;
+ int length;
+ int actual;
+ int status;
+ void *context;
+};
+#endif
+
+struct diag_pkt_stats_t {
+ uint32_t alloc_count;
+ uint32_t drop_count;
+};
+
+struct diag_cmd_stats_rsp_t {
+ struct diag_pkt_header_t header;
+ uint32_t payload;
+};
+
+struct diag_cmd_hdlc_disable_rsp_t {
+ struct diag_pkt_header_t header;
+ uint8_t framing_version;
+ uint8_t result;
+};
+
+struct diag_pkt_frame_t {
+ uint8_t start;
+ uint8_t version;
+ uint16_t length;
+};
+
+struct diag_partial_pkt_t {
+ uint32_t total_len;
+ uint32_t read_len;
+ uint32_t remaining;
+ uint32_t capacity;
+ uint8_t processing;
+ unsigned char *data;
+} __packed;
+
+struct diag_logging_mode_param_t {
+ uint32_t req_mode;
+ uint32_t peripheral_mask;
+ uint32_t pd_mask;
+ uint8_t mode_param;
+} __packed;
+
+struct diag_md_session_t {
+ int pid;
+ int peripheral_mask;
+ uint8_t hdlc_disabled;
+ struct timer_list hdlc_reset_timer;
+ struct diag_mask_info *msg_mask;
+ struct diag_mask_info *log_mask;
+ struct diag_mask_info *event_mask;
+ struct thread_info *md_client_thread_info;
+ struct task_struct *task;
+};
+
+/*
+ * High level structure for storing Diag masks.
+ *
+ * @ptr: Pointer to the buffer that stores the masks
+ * @mask_len: Length of the buffer pointed by ptr
+ * @update_buf: Buffer for performing mask updates to peripherals
+ * @update_buf_len: Length of the buffer pointed by buf
+ * @status: status of the mask - all enable, disabled, valid
+ * @lock: To protect access to the mask variables
+ */
+struct diag_mask_info {
+ uint8_t *ptr;
+ int mask_len;
+ uint8_t *update_buf;
+ int update_buf_len;
+ uint8_t status;
+ struct mutex lock;
+};
+
+struct diag_md_proc_info {
+ int pid;
+ struct task_struct *socket_process;
+ struct task_struct *callback_process;
+ struct task_struct *mdlog_process;
+};
+
+struct diag_feature_t {
+ uint8_t feature_mask[FEATURE_MASK_LEN];
+ uint8_t rcvd_feature_mask;
+ uint8_t log_on_demand;
+ uint8_t separate_cmd_rsp;
+ uint8_t encode_hdlc;
+ uint8_t untag_header;
+ uint8_t peripheral_buffering;
+ uint8_t pd_buffering;
+ uint8_t mask_centralization;
+ uint8_t stm_support;
+ uint8_t sockets_enabled;
+ uint8_t sent_feature_mask;
+};
+
+struct diagchar_dev {
+
+ /* State for the char driver */
+ unsigned int major;
+ unsigned int minor_start;
+ int num;
+ struct cdev *cdev;
+ char *name;
+ struct class *diagchar_class;
+ struct device *diag_dev;
+ int ref_count;
+ int mask_clear;
+ struct mutex diag_maskclear_mutex;
+ struct mutex diag_notifier_mutex;
+ struct mutex diagchar_mutex;
+ struct mutex diag_file_mutex;
+ wait_queue_head_t wait_q;
+ struct diag_client_map *client_map;
+ int *data_ready;
+ atomic_t data_ready_notif[THRESHOLD_CLIENT_LIMIT];
+ int num_clients;
+ int polling_reg_flag;
+ int use_device_tree;
+ int supports_separate_cmdrsp;
+ int supports_apps_hdlc_encoding;
+ int supports_apps_header_untagging;
+ int supports_pd_buffering;
+ int peripheral_untag[NUM_PERIPHERALS];
+ int supports_sockets;
+ /* The state requested in the STM command */
+ int stm_state_requested[NUM_STM_PROCESSORS];
+ /* The current STM state */
+ int stm_state[NUM_STM_PROCESSORS];
+ uint16_t stm_peripheral;
+ struct work_struct stm_update_work;
+ uint16_t mask_update;
+ struct work_struct mask_update_work;
+ uint16_t close_transport;
+ struct work_struct close_transport_work;
+ struct workqueue_struct *cntl_wq;
+ struct mutex cntl_lock;
+ /* Whether or not the peripheral supports STM */
+ /* Delayed response Variables */
+ uint16_t delayed_rsp_id;
+ struct mutex delayed_rsp_mutex;
+ /* DCI related variables */
+ struct list_head dci_req_list;
+ struct list_head dci_client_list;
+ int dci_tag;
+ int dci_client_id;
+ struct mutex dci_mutex;
+ int num_dci_client;
+ unsigned char *apps_dci_buf;
+ int dci_state;
+ struct workqueue_struct *diag_dci_wq;
+ struct list_head cmd_reg_list;
+ struct mutex cmd_reg_mutex;
+ uint32_t cmd_reg_count;
+ struct mutex diagfwd_channel_mutex[NUM_PERIPHERALS];
+ /* Sizes that reflect memory pool sizes */
+ unsigned int poolsize;
+ unsigned int poolsize_hdlc;
+ unsigned int poolsize_dci;
+ unsigned int poolsize_user;
+ /* Buffers for masks */
+ struct mutex diag_cntl_mutex;
+ /* Members for Sending response */
+ unsigned char *encoded_rsp_buf;
+ int encoded_rsp_len;
+ uint8_t rsp_buf_busy;
+ spinlock_t rsp_buf_busy_lock;
+ int rsp_buf_ctxt;
+ struct diagfwd_info *diagfwd_data[NUM_PERIPHERALS];
+ struct diagfwd_info *diagfwd_cntl[NUM_PERIPHERALS];
+ struct diagfwd_info *diagfwd_dci[NUM_PERIPHERALS];
+ struct diagfwd_info *diagfwd_cmd[NUM_PERIPHERALS];
+ struct diagfwd_info *diagfwd_dci_cmd[NUM_PERIPHERALS];
+ struct diag_feature_t feature[NUM_PERIPHERALS];
+ struct diag_buffering_mode_t buffering_mode[NUM_MD_SESSIONS];
+ uint8_t buffering_flag[NUM_MD_SESSIONS];
+ struct mutex mode_lock;
+ unsigned char *user_space_data_buf;
+ uint8_t user_space_data_busy;
+ struct diag_pkt_stats_t msg_stats;
+ struct diag_pkt_stats_t log_stats;
+ struct diag_pkt_stats_t event_stats;
+ /* buffer for updating mask to peripherals */
+ unsigned char *buf_feature_mask_update;
+ uint8_t hdlc_disabled;
+ uint8_t p_hdlc_disabled[NUM_MD_SESSIONS];
+ struct mutex hdlc_disable_mutex;
+ struct mutex hdlc_recovery_mutex;
+ struct timer_list hdlc_reset_timer;
+ struct mutex diag_hdlc_mutex;
+ unsigned char *hdlc_buf;
+ uint32_t hdlc_buf_len;
+ unsigned char *apps_rsp_buf;
+ struct diag_partial_pkt_t incoming_pkt;
+ int in_busy_pktdata;
+ /* Variables for non real time mode */
+ int real_time_mode[DIAG_NUM_PROC];
+ int real_time_update_busy;
+ uint16_t proc_active_mask;
+ uint16_t proc_rt_vote_mask[DIAG_NUM_PROC];
+ struct mutex real_time_mutex;
+ struct work_struct diag_real_time_work;
+ struct workqueue_struct *diag_real_time_wq;
+#ifdef CONFIG_DIAG_OVER_USB
+ int usb_connected;
+#endif
+ struct workqueue_struct *diag_wq;
+ struct work_struct diag_drain_work;
+ struct work_struct update_user_clients;
+ struct work_struct update_md_clients;
+ struct work_struct diag_hdlc_reset_work;
+ struct workqueue_struct *diag_cntl_wq;
+ uint8_t log_on_demand_support;
+ uint8_t *apps_req_buf;
+ uint32_t apps_req_buf_len;
+ uint8_t *dci_pkt_buf; /* For Apps DCI packets */
+ uint32_t dci_pkt_length;
+ int in_busy_dcipktdata;
+ int logging_mode;
+ int logging_mask;
+ int pd_logging_mode[NUM_UPD];
+ int pd_session_clear[NUM_UPD];
+ int num_pd_session;
+ int mask_check;
+ uint32_t md_session_mask;
+ uint8_t md_session_mode;
+ struct diag_md_session_t *md_session_map[NUM_MD_SESSIONS];
+ struct mutex md_session_lock;
+ /* Power related variables */
+ struct diag_ws_ref_t dci_ws;
+ struct diag_ws_ref_t md_ws;
+ /* Pointers to Diag Masks */
+ struct diag_mask_info *msg_mask;
+ struct diag_mask_info *log_mask;
+ struct diag_mask_info *event_mask;
+ struct diag_mask_info *build_time_mask;
+ uint8_t msg_mask_tbl_count;
+ uint8_t bt_msg_mask_tbl_count;
+ uint16_t event_mask_size;
+ uint16_t last_event_id;
+ struct mutex msg_mask_lock;
+ /* Variables for Mask Centralization */
+ uint16_t num_event_id[NUM_PERIPHERALS];
+ uint32_t num_equip_id[NUM_PERIPHERALS];
+ uint32_t max_ssid_count[NUM_PERIPHERALS];
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+ /* For sending command requests in callback mode */
+ unsigned char *hdlc_encode_buf;
+ int hdlc_encode_buf_len;
+#endif
+ int time_sync_enabled;
+ uint8_t uses_time_api;
+ struct platform_device *pdev;
+};
+
+extern struct diagchar_dev *driver;
+
+extern int wrap_enabled;
+extern uint16_t wrap_count;
+
+void diag_get_timestamp(char *time_str);
+void check_drain_timer(void);
+int diag_get_remote(int remote_info);
+
+void diag_ws_init(void);
+void diag_ws_on_notify(void);
+void diag_ws_on_read(int type, int pkt_len);
+void diag_ws_on_copy(int type);
+void diag_ws_on_copy_fail(int type);
+void diag_ws_on_copy_complete(int type);
+void diag_ws_reset(int type);
+void diag_ws_release(void);
+void chk_logging_wakeup(void);
+int diag_cmd_add_reg(struct diag_cmd_reg_entry_t *new_entry, uint8_t proc,
+ int pid);
+struct diag_cmd_reg_entry_t *diag_cmd_search(
+ struct diag_cmd_reg_entry_t *entry,
+ int proc);
+void diag_cmd_remove_reg(struct diag_cmd_reg_entry_t *entry, uint8_t proc);
+void diag_cmd_remove_reg_by_pid(int pid);
+void diag_cmd_remove_reg_by_proc(int proc);
+int diag_cmd_chk_polling(struct diag_cmd_reg_entry_t *entry);
+int diag_mask_param(void);
+void diag_clear_masks(int pid);
+uint8_t diag_mask_to_pd_value(uint32_t peripheral_mask);
+
+void diag_record_stats(int type, int flag);
+
+struct diag_md_session_t *diag_md_session_get_pid(int pid);
+struct diag_md_session_t *diag_md_session_get_peripheral(uint8_t peripheral);
+int diag_md_session_match_pid_peripheral(int pid, uint8_t peripheral);
+
+#endif
diff --git a/drivers/char/diag/diagchar_core.c b/drivers/char/diag/diagchar_core.c
new file mode 100644
index 000000000000..2bac98117c03
--- /dev/null
+++ b/drivers/char/diag/diagchar_core.c
@@ -0,0 +1,3963 @@
+/* Copyright (c) 2008-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/cdev.h>
+#include <linux/fs.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/uaccess.h>
+#include <linux/diagchar.h>
+#include <linux/sched.h>
+#include <linux/ratelimit.h>
+#include <linux/timer.h>
+#include <linux/platform_device.h>
+#include <linux/msm_mhi.h>
+#ifdef CONFIG_DIAG_OVER_USB
+#include <linux/usb/usbdiag.h>
+#endif
+#include <asm/current.h>
+#include "diagchar_hdlc.h"
+#include "diagmem.h"
+#include "diagchar.h"
+#include "diagfwd.h"
+#include "diagfwd_cntl.h"
+#include "diag_dci.h"
+#include "diag_debugfs.h"
+#include "diag_masks.h"
+#include "diagfwd_bridge.h"
+#include "diag_usb.h"
+#include "diag_memorydevice.h"
+#include "diag_mux.h"
+#include "diag_ipc_logging.h"
+#include "diagfwd_peripheral.h"
+
+#include <linux/coresight-stm.h>
+#include <linux/kernel.h>
+#ifdef CONFIG_COMPAT
+#include <linux/compat.h>
+#endif
+
+MODULE_DESCRIPTION("Diag Char Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION("1.0");
+
+#define MIN_SIZ_ALLOW 4
+#define INIT 1
+#define EXIT -1
+struct diagchar_dev *driver;
+struct diagchar_priv {
+ int pid;
+};
+
+#define USER_SPACE_RAW_DATA 0
+#define USER_SPACE_HDLC_DATA 1
+
+/* Memory pool variables */
+/* Used for copying any incoming packet from user space clients. */
+static unsigned int poolsize = 12;
+module_param(poolsize, uint, 0);
+
+/*
+ * Used for HDLC encoding packets coming from the user
+ * space.
+ */
+static unsigned int poolsize_hdlc = 10;
+module_param(poolsize_hdlc, uint, 0);
+
+/*
+ * This is used for incoming DCI requests from the user space clients.
+ * Don't expose itemsize as it is internal.
+ */
+static unsigned int poolsize_user = 8;
+module_param(poolsize_user, uint, 0);
+
+/*
+ * USB structures allocated for writing Diag data generated on the Apps to USB.
+ * Don't expose itemsize as it is constant.
+ */
+static unsigned int itemsize_usb_apps = sizeof(struct diag_request);
+static unsigned int poolsize_usb_apps = 10;
+module_param(poolsize_usb_apps, uint, 0);
+
+/* Used for DCI client buffers. Don't expose itemsize as it is constant. */
+static unsigned int poolsize_dci = 10;
+module_param(poolsize_dci, uint, 0);
+
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+/* Used for reading data from the remote device. */
+static unsigned int itemsize_mdm = DIAG_MDM_BUF_SIZE;
+static unsigned int poolsize_mdm = 18;
+module_param(itemsize_mdm, uint, 0);
+module_param(poolsize_mdm, uint, 0);
+
+/*
+ * Used for reading DCI data from the remote device.
+ * Don't expose poolsize for DCI data. There is only one read buffer
+ */
+static unsigned int itemsize_mdm_dci = DIAG_MDM_BUF_SIZE;
+static unsigned int poolsize_mdm_dci = 1;
+module_param(itemsize_mdm_dci, uint, 0);
+
+/*
+ * Used for USB structues associated with a remote device.
+ * Don't expose the itemsize since it is constant.
+ */
+static unsigned int itemsize_mdm_usb = sizeof(struct diag_request);
+static unsigned int poolsize_mdm_usb = 18;
+module_param(poolsize_mdm_usb, uint, 0);
+
+/*
+ * Used for writing read DCI data to remote peripherals. Don't
+ * expose poolsize for DCI data. There is only one read
+ * buffer. Add 6 bytes for DCI header information: Start (1),
+ * Version (1), Length (2), Tag (2)
+ */
+static unsigned int itemsize_mdm_dci_write = DIAG_MDM_DCI_BUF_SIZE;
+static unsigned int poolsize_mdm_dci_write = 1;
+module_param(itemsize_mdm_dci_write, uint, 0);
+
+/*
+ * Used for USB structures associated with a remote SMUX
+ * device Don't expose the itemsize since it is constant
+ */
+static unsigned int itemsize_qsc_usb = sizeof(struct diag_request);
+static unsigned int poolsize_qsc_usb = 8;
+module_param(poolsize_qsc_usb, uint, 0);
+#endif
+
+/* This is the max number of user-space clients supported at initialization*/
+static unsigned int max_clients = 15;
+module_param(max_clients, uint, 0);
+
+/* Timer variables */
+static struct timer_list drain_timer;
+static int timer_in_progress;
+
+/*
+ * Diag Mask clear variable
+ * Used for clearing masks upon
+ * USB disconnection and stopping ODL
+ */
+static int diag_mask_clear_param = 1;
+module_param(diag_mask_clear_param, int, 0644);
+
+struct diag_apps_data_t {
+ void *buf;
+ uint32_t len;
+ int ctxt;
+};
+
+static struct diag_apps_data_t hdlc_data;
+static struct diag_apps_data_t non_hdlc_data;
+static struct mutex apps_data_mutex;
+
+#define DIAGPKT_MAX_DELAYED_RSP 0xFFFF
+
+#ifdef CONFIG_IPC_LOGGING
+uint16_t diag_debug_mask;
+void *diag_ipc_log;
+#endif
+
+static void diag_md_session_close(int pid);
+
+/*
+ * Returns the next delayed rsp id. If wrapping is enabled,
+ * wraps the delayed rsp id to DIAGPKT_MAX_DELAYED_RSP.
+ */
+static uint16_t diag_get_next_delayed_rsp_id(void)
+{
+ uint16_t rsp_id = 0;
+
+ mutex_lock(&driver->delayed_rsp_mutex);
+ rsp_id = driver->delayed_rsp_id;
+ if (rsp_id < DIAGPKT_MAX_DELAYED_RSP)
+ rsp_id++;
+ else {
+ if (wrap_enabled) {
+ rsp_id = 1;
+ wrap_count++;
+ } else
+ rsp_id = DIAGPKT_MAX_DELAYED_RSP;
+ }
+ driver->delayed_rsp_id = rsp_id;
+ mutex_unlock(&driver->delayed_rsp_mutex);
+
+ return rsp_id;
+}
+
+static int diag_switch_logging(struct diag_logging_mode_param_t *param);
+
+#define COPY_USER_SPACE_OR_EXIT(buf, data, length) \
+do { \
+ if ((count < ret+length) || (copy_to_user(buf, \
+ (void *)&data, length))) { \
+ ret = -EFAULT; \
+ goto exit; \
+ } \
+ ret += length; \
+} while (0)
+
+#define COPY_USER_SPACE_OR_ERR(buf, data, length) \
+do { \
+ if ((count < ret+length) || (copy_to_user(buf, \
+ (void *)&data, length))) { \
+ ret = -EFAULT; \
+ break; \
+ } \
+ ret += length; \
+} while (0)
+
+static void drain_timer_func(unsigned long data)
+{
+ queue_work(driver->diag_wq , &(driver->diag_drain_work));
+}
+
+static void diag_drain_apps_data(struct diag_apps_data_t *data)
+{
+ int err = 0;
+
+ if (!data || !data->buf)
+ return;
+
+ err = diag_mux_write(DIAG_LOCAL_PROC, data->buf, data->len,
+ data->ctxt);
+ if (err)
+ diagmem_free(driver, data->buf, POOL_TYPE_HDLC);
+
+ data->buf = NULL;
+ data->len = 0;
+}
+
+void diag_update_user_client_work_fn(struct work_struct *work)
+{
+ diag_update_userspace_clients(HDLC_SUPPORT_TYPE);
+}
+
+static void diag_update_md_client_work_fn(struct work_struct *work)
+{
+ diag_update_md_clients(HDLC_SUPPORT_TYPE);
+}
+
+void diag_drain_work_fn(struct work_struct *work)
+{
+ uint8_t hdlc_disabled = 0;
+
+ timer_in_progress = 0;
+ mutex_lock(&apps_data_mutex);
+ mutex_lock(&driver->hdlc_disable_mutex);
+ hdlc_disabled = driver->p_hdlc_disabled[APPS_DATA];
+ mutex_unlock(&driver->hdlc_disable_mutex);
+ if (!hdlc_disabled)
+ diag_drain_apps_data(&hdlc_data);
+ else
+ diag_drain_apps_data(&non_hdlc_data);
+ mutex_unlock(&apps_data_mutex);
+}
+
+void check_drain_timer(void)
+{
+ int ret = 0;
+
+ if (!timer_in_progress) {
+ timer_in_progress = 1;
+ ret = mod_timer(&drain_timer, jiffies + msecs_to_jiffies(200));
+ }
+}
+
+void diag_add_client(int i, struct file *file)
+{
+ struct diagchar_priv *diagpriv_data;
+
+ driver->client_map[i].pid = current->tgid;
+ diagpriv_data = kmalloc(sizeof(struct diagchar_priv),
+ GFP_KERNEL);
+ if (diagpriv_data)
+ diagpriv_data->pid = current->tgid;
+ file->private_data = diagpriv_data;
+ strlcpy(driver->client_map[i].name, current->comm, 20);
+ driver->client_map[i].name[19] = '\0';
+}
+
+static void diag_mempool_init(void)
+{
+ uint32_t itemsize = DIAG_MAX_REQ_SIZE;
+ uint32_t itemsize_hdlc = DIAG_MAX_HDLC_BUF_SIZE + APF_DIAG_PADDING;
+ uint32_t itemsize_dci = IN_BUF_SIZE;
+ uint32_t itemsize_user = DCI_REQ_BUF_SIZE;
+
+ itemsize += ((DCI_HDR_SIZE > CALLBACK_HDR_SIZE) ? DCI_HDR_SIZE :
+ CALLBACK_HDR_SIZE);
+ diagmem_setsize(POOL_TYPE_COPY, itemsize, poolsize);
+ diagmem_setsize(POOL_TYPE_HDLC, itemsize_hdlc, poolsize_hdlc);
+ diagmem_setsize(POOL_TYPE_DCI, itemsize_dci, poolsize_dci);
+ diagmem_setsize(POOL_TYPE_USER, itemsize_user, poolsize_user);
+
+ diagmem_init(driver, POOL_TYPE_COPY);
+ diagmem_init(driver, POOL_TYPE_HDLC);
+ diagmem_init(driver, POOL_TYPE_USER);
+ diagmem_init(driver, POOL_TYPE_DCI);
+}
+
+static void diag_mempool_exit(void)
+{
+ diagmem_exit(driver, POOL_TYPE_COPY);
+ diagmem_exit(driver, POOL_TYPE_HDLC);
+ diagmem_exit(driver, POOL_TYPE_USER);
+ diagmem_exit(driver, POOL_TYPE_DCI);
+}
+
+static int diagchar_open(struct inode *inode, struct file *file)
+{
+ int i = 0;
+ void *temp;
+
+ if (driver) {
+ mutex_lock(&driver->diagchar_mutex);
+
+ for (i = 0; i < driver->num_clients; i++)
+ if (driver->client_map[i].pid == 0)
+ break;
+
+ if (i < driver->num_clients) {
+ diag_add_client(i, file);
+ } else {
+ if (i < THRESHOLD_CLIENT_LIMIT) {
+ driver->num_clients++;
+ temp = krealloc(driver->client_map
+ , (driver->num_clients) * sizeof(struct
+ diag_client_map), GFP_KERNEL);
+ if (!temp)
+ goto fail;
+ else
+ driver->client_map = temp;
+ temp = krealloc(driver->data_ready
+ , (driver->num_clients) * sizeof(int),
+ GFP_KERNEL);
+ if (!temp)
+ goto fail;
+ else
+ driver->data_ready = temp;
+ diag_add_client(i, file);
+ } else {
+ mutex_unlock(&driver->diagchar_mutex);
+ pr_err_ratelimited("diag: Max client limit for DIAG reached\n");
+ pr_err_ratelimited("diag: Cannot open handle %s"
+ " %d", current->comm, current->tgid);
+ for (i = 0; i < driver->num_clients; i++)
+ pr_debug("%d) %s PID=%d", i, driver->
+ client_map[i].name,
+ driver->client_map[i].pid);
+ return -ENOMEM;
+ }
+ }
+ driver->data_ready[i] = 0x0;
+ atomic_set(&driver->data_ready_notif[i], 0);
+ driver->data_ready[i] |= MSG_MASKS_TYPE;
+ atomic_inc(&driver->data_ready_notif[i]);
+ driver->data_ready[i] |= EVENT_MASKS_TYPE;
+ atomic_inc(&driver->data_ready_notif[i]);
+ driver->data_ready[i] |= LOG_MASKS_TYPE;
+ atomic_inc(&driver->data_ready_notif[i]);
+ driver->data_ready[i] |= DCI_LOG_MASKS_TYPE;
+ atomic_inc(&driver->data_ready_notif[i]);
+ driver->data_ready[i] |= DCI_EVENT_MASKS_TYPE;
+ atomic_inc(&driver->data_ready_notif[i]);
+
+ if (driver->ref_count == 0)
+ diag_mempool_init();
+ driver->ref_count++;
+ mutex_unlock(&driver->diagchar_mutex);
+ return 0;
+ }
+ return -ENOMEM;
+
+fail:
+ driver->num_clients--;
+ mutex_unlock(&driver->diagchar_mutex);
+ pr_err_ratelimited("diag: Insufficient memory for new client");
+ return -ENOMEM;
+}
+
+static uint32_t diag_translate_kernel_to_user_mask(uint32_t peripheral_mask)
+{
+ uint32_t ret = 0;
+
+ if (peripheral_mask & MD_PERIPHERAL_MASK(APPS_DATA))
+ ret |= DIAG_CON_APSS;
+ if (peripheral_mask & MD_PERIPHERAL_MASK(PERIPHERAL_MODEM))
+ ret |= DIAG_CON_MPSS;
+ if (peripheral_mask & MD_PERIPHERAL_MASK(PERIPHERAL_LPASS))
+ ret |= DIAG_CON_LPASS;
+ if (peripheral_mask & MD_PERIPHERAL_MASK(PERIPHERAL_WCNSS))
+ ret |= DIAG_CON_WCNSS;
+ if (peripheral_mask & MD_PERIPHERAL_MASK(PERIPHERAL_SENSORS))
+ ret |= DIAG_CON_SENSORS;
+ if (peripheral_mask & MD_PERIPHERAL_MASK(PERIPHERAL_WDSP))
+ ret |= DIAG_CON_WDSP;
+ if (peripheral_mask & MD_PERIPHERAL_MASK(PERIPHERAL_CDSP))
+ ret |= DIAG_CON_CDSP;
+ if (peripheral_mask & MD_PERIPHERAL_MASK(UPD_WLAN))
+ ret |= DIAG_CON_UPD_WLAN;
+ if (peripheral_mask & MD_PERIPHERAL_MASK(UPD_AUDIO))
+ ret |= DIAG_CON_UPD_AUDIO;
+ if (peripheral_mask & MD_PERIPHERAL_MASK(UPD_SENSORS))
+ ret |= DIAG_CON_UPD_SENSORS;
+ return ret;
+}
+
+uint8_t diag_mask_to_pd_value(uint32_t peripheral_mask)
+{
+ uint8_t upd = 0;
+ uint32_t pd_mask = 0;
+
+ pd_mask = diag_translate_kernel_to_user_mask(peripheral_mask);
+ switch (pd_mask) {
+ case DIAG_CON_UPD_WLAN:
+ upd = UPD_WLAN;
+ break;
+ case DIAG_CON_UPD_AUDIO:
+ upd = UPD_AUDIO;
+ break;
+ case DIAG_CON_UPD_SENSORS:
+ upd = UPD_SENSORS;
+ break;
+ default:
+ DIAG_LOG(DIAG_DEBUG_MASKS,
+ "asking for mask update with no pd mask set\n");
+ }
+ return upd;
+}
+
+int diag_mask_param(void)
+{
+ return diag_mask_clear_param;
+}
+void diag_clear_masks(int pid)
+{
+ int ret;
+ char cmd_disable_log_mask[] = { 0x73, 0, 0, 0, 0, 0, 0, 0};
+ char cmd_disable_msg_mask[] = { 0x7D, 0x05, 0, 0, 0, 0, 0, 0};
+ char cmd_disable_event_mask[] = { 0x60, 0};
+
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: %s: masks clear request upon %s\n", __func__,
+ ((pid) ? "ODL exit" : "USB Disconnection"));
+
+ ret = diag_process_apps_masks(cmd_disable_log_mask,
+ sizeof(cmd_disable_log_mask), pid);
+ ret = diag_process_apps_masks(cmd_disable_msg_mask,
+ sizeof(cmd_disable_msg_mask), pid);
+ ret = diag_process_apps_masks(cmd_disable_event_mask,
+ sizeof(cmd_disable_event_mask), pid);
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag:%s: masks cleared successfully\n", __func__);
+}
+
+static void diag_close_logging_process(const int pid)
+{
+ int i, j;
+ int session_mask;
+ uint32_t p_mask;
+ struct diag_md_session_t *session_info = NULL;
+ struct diag_logging_mode_param_t params;
+
+ mutex_lock(&driver->md_session_lock);
+ session_info = diag_md_session_get_pid(pid);
+ if (!session_info) {
+ mutex_unlock(&driver->md_session_lock);
+ return;
+ }
+ session_mask = session_info->peripheral_mask;
+ mutex_unlock(&driver->md_session_lock);
+
+ if (diag_mask_clear_param)
+ diag_clear_masks(pid);
+
+ mutex_lock(&driver->diag_maskclear_mutex);
+ driver->mask_clear = 1;
+ mutex_unlock(&driver->diag_maskclear_mutex);
+
+ mutex_lock(&driver->diagchar_mutex);
+
+ p_mask =
+ diag_translate_kernel_to_user_mask(session_mask);
+
+ for (i = 0; i < NUM_MD_SESSIONS; i++)
+ if (MD_PERIPHERAL_MASK(i) & session_mask)
+ diag_mux_close_peripheral(DIAG_LOCAL_PROC, i);
+
+ params.req_mode = USB_MODE;
+ params.mode_param = 0;
+ params.pd_mask = 0;
+ params.peripheral_mask = p_mask;
+
+ if (driver->num_pd_session > 0) {
+ for (i = UPD_WLAN; (i < NUM_MD_SESSIONS); i++) {
+ if (session_mask & MD_PERIPHERAL_MASK(i)) {
+ j = i - UPD_WLAN;
+ driver->pd_session_clear[j] = 1;
+ driver->pd_logging_mode[j] = 0;
+ driver->num_pd_session -= 1;
+ params.pd_mask = p_mask;
+ }
+ }
+ }
+ mutex_lock(&driver->md_session_lock);
+ diag_md_session_close(pid);
+ mutex_unlock(&driver->md_session_lock);
+ diag_switch_logging(&params);
+
+ mutex_unlock(&driver->diagchar_mutex);
+}
+
+static int diag_remove_client_entry(struct file *file)
+{
+ int i = -1;
+ struct diagchar_priv *diagpriv_data = NULL;
+ struct diag_dci_client_tbl *dci_entry = NULL;
+
+ if (!driver)
+ return -ENOMEM;
+
+ mutex_lock(&driver->diag_file_mutex);
+ if (!file) {
+ DIAG_LOG(DIAG_DEBUG_USERSPACE, "Invalid file pointer\n");
+ mutex_unlock(&driver->diag_file_mutex);
+ return -ENOENT;
+ }
+ if (!(file->private_data)) {
+ DIAG_LOG(DIAG_DEBUG_USERSPACE, "Invalid private data\n");
+ mutex_unlock(&driver->diag_file_mutex);
+ return -EINVAL;
+ }
+
+ diagpriv_data = file->private_data;
+
+ /*
+ * clean up any DCI registrations, if this is a DCI client
+ * This will specially help in case of ungraceful exit of any DCI client
+ * This call will remove any pending registrations of such client
+ */
+ mutex_lock(&driver->dci_mutex);
+ dci_entry = dci_lookup_client_entry_pid(current->tgid);
+ if (dci_entry)
+ diag_dci_deinit_client(dci_entry);
+ mutex_unlock(&driver->dci_mutex);
+
+ diag_close_logging_process(current->tgid);
+
+ /* Delete the pkt response table entry for the exiting process */
+ diag_cmd_remove_reg_by_pid(current->tgid);
+
+ mutex_lock(&driver->diagchar_mutex);
+ driver->ref_count--;
+ if (driver->ref_count == 0)
+ diag_mempool_exit();
+
+ for (i = 0; i < driver->num_clients; i++) {
+ if (NULL != diagpriv_data && diagpriv_data->pid ==
+ driver->client_map[i].pid) {
+ driver->client_map[i].pid = 0;
+ kfree(diagpriv_data);
+ diagpriv_data = NULL;
+ file->private_data = 0;
+ break;
+ }
+ }
+ mutex_unlock(&driver->diagchar_mutex);
+ mutex_unlock(&driver->diag_file_mutex);
+ return 0;
+}
+static int diagchar_close(struct inode *inode, struct file *file)
+{
+ int ret;
+ DIAG_LOG(DIAG_DEBUG_USERSPACE, "diag: process exit %s\n",
+ current->comm);
+ ret = diag_remove_client_entry(file);
+ mutex_lock(&driver->diag_maskclear_mutex);
+ driver->mask_clear = 0;
+ mutex_unlock(&driver->diag_maskclear_mutex);
+ return ret;
+}
+
+void diag_record_stats(int type, int flag)
+{
+ struct diag_pkt_stats_t *pkt_stats = NULL;
+
+ switch (type) {
+ case DATA_TYPE_EVENT:
+ pkt_stats = &driver->event_stats;
+ break;
+ case DATA_TYPE_F3:
+ pkt_stats = &driver->msg_stats;
+ break;
+ case DATA_TYPE_LOG:
+ pkt_stats = &driver->log_stats;
+ break;
+ case DATA_TYPE_RESPONSE:
+ if (flag != PKT_DROP)
+ return;
+ pr_err_ratelimited("diag: In %s, dropping response. This shouldn't happen\n",
+ __func__);
+ return;
+ case DATA_TYPE_DELAYED_RESPONSE:
+ /* No counters to increase for Delayed responses */
+ return;
+ default:
+ pr_err_ratelimited("diag: In %s, invalid pkt_type: %d\n",
+ __func__, type);
+ return;
+ }
+
+ switch (flag) {
+ case PKT_ALLOC:
+ atomic_add(1, (atomic_t *)&pkt_stats->alloc_count);
+ break;
+ case PKT_DROP:
+ atomic_add(1, (atomic_t *)&pkt_stats->drop_count);
+ break;
+ case PKT_RESET:
+ atomic_set((atomic_t *)&pkt_stats->alloc_count, 0);
+ atomic_set((atomic_t *)&pkt_stats->drop_count, 0);
+ break;
+ default:
+ pr_err_ratelimited("diag: In %s, invalid flag: %d\n",
+ __func__, flag);
+ return;
+ }
+}
+
+void diag_get_timestamp(char *time_str)
+{
+ struct timeval t;
+ struct tm broken_tm;
+ do_gettimeofday(&t);
+ if (!time_str)
+ return;
+ time_to_tm(t.tv_sec, 0, &broken_tm);
+ scnprintf(time_str, DIAG_TS_SIZE, "%d:%d:%d:%ld", broken_tm.tm_hour,
+ broken_tm.tm_min, broken_tm.tm_sec, t.tv_usec);
+}
+
+int diag_get_remote(int remote_info)
+{
+ int val = (remote_info < 0) ? -remote_info : remote_info;
+ int remote_val;
+
+ switch (val) {
+ case MDM:
+ case MDM2:
+ case QSC:
+ remote_val = -remote_info;
+ break;
+ default:
+ remote_val = 0;
+ break;
+ }
+
+ return remote_val;
+}
+
+int diag_cmd_chk_polling(struct diag_cmd_reg_entry_t *entry)
+{
+ int polling = DIAG_CMD_NOT_POLLING;
+
+ if (!entry)
+ return -EIO;
+
+ if (entry->cmd_code == DIAG_CMD_NO_SUBSYS) {
+ if (entry->subsys_id == DIAG_CMD_NO_SUBSYS &&
+ entry->cmd_code_hi >= DIAG_CMD_STATUS &&
+ entry->cmd_code_lo <= DIAG_CMD_STATUS)
+ polling = DIAG_CMD_POLLING;
+ else if (entry->subsys_id == DIAG_SS_WCDMA &&
+ entry->cmd_code_hi >= DIAG_CMD_QUERY_CALL &&
+ entry->cmd_code_lo <= DIAG_CMD_QUERY_CALL)
+ polling = DIAG_CMD_POLLING;
+ else if (entry->subsys_id == DIAG_SS_GSM &&
+ entry->cmd_code_hi >= DIAG_CMD_QUERY_TMC &&
+ entry->cmd_code_lo <= DIAG_CMD_QUERY_TMC)
+ polling = DIAG_CMD_POLLING;
+ else if (entry->subsys_id == DIAG_SS_PARAMS &&
+ entry->cmd_code_hi >= DIAG_DIAG_POLL &&
+ entry->cmd_code_lo <= DIAG_DIAG_POLL)
+ polling = DIAG_CMD_POLLING;
+ else if (entry->subsys_id == DIAG_SS_TDSCDMA &&
+ entry->cmd_code_hi >= DIAG_CMD_TDSCDMA_STATUS &&
+ entry->cmd_code_lo <= DIAG_CMD_TDSCDMA_STATUS)
+ polling = DIAG_CMD_POLLING;
+ }
+
+ return polling;
+}
+
+static void diag_cmd_invalidate_polling(int change_flag)
+{
+ int polling = DIAG_CMD_NOT_POLLING;
+ struct list_head *start;
+ struct list_head *temp;
+ struct diag_cmd_reg_t *item = NULL;
+
+ if (change_flag == DIAG_CMD_ADD) {
+ if (driver->polling_reg_flag)
+ return;
+ }
+
+ driver->polling_reg_flag = 0;
+ list_for_each_safe(start, temp, &driver->cmd_reg_list) {
+ item = list_entry(start, struct diag_cmd_reg_t, link);
+ if (&item->entry == NULL) {
+ pr_err("diag: In %s, unable to search command\n",
+ __func__);
+ return;
+ }
+ polling = diag_cmd_chk_polling(&item->entry);
+ if (polling == DIAG_CMD_POLLING) {
+ driver->polling_reg_flag = 1;
+ break;
+ }
+ }
+}
+
+int diag_cmd_add_reg(struct diag_cmd_reg_entry_t *new_entry, uint8_t proc,
+ int pid)
+{
+ struct diag_cmd_reg_t *new_item = NULL;
+
+ if (!new_entry) {
+ pr_err("diag: In %s, invalid new entry\n", __func__);
+ return -EINVAL;
+ }
+
+ if (proc > APPS_DATA) {
+ pr_err("diag: In %s, invalid peripheral %d\n", __func__, proc);
+ return -EINVAL;
+ }
+
+ if (proc != APPS_DATA)
+ pid = INVALID_PID;
+
+ new_item = kzalloc(sizeof(struct diag_cmd_reg_t), GFP_KERNEL);
+ if (!new_item) {
+ pr_err("diag: In %s, unable to create memory for new command registration\n",
+ __func__);
+ return -ENOMEM;
+ }
+ kmemleak_not_leak(new_item);
+
+ new_item->pid = pid;
+ new_item->proc = proc;
+ memcpy(&new_item->entry, new_entry,
+ sizeof(struct diag_cmd_reg_entry_t));
+ INIT_LIST_HEAD(&new_item->link);
+
+ mutex_lock(&driver->cmd_reg_mutex);
+ list_add_tail(&new_item->link, &driver->cmd_reg_list);
+ driver->cmd_reg_count++;
+ diag_cmd_invalidate_polling(DIAG_CMD_ADD);
+ mutex_unlock(&driver->cmd_reg_mutex);
+
+ return 0;
+}
+
+struct diag_cmd_reg_entry_t *diag_cmd_search(
+ struct diag_cmd_reg_entry_t *entry, int proc)
+{
+ struct list_head *start;
+ struct list_head *temp;
+ struct diag_cmd_reg_t *item = NULL;
+ struct diag_cmd_reg_entry_t *temp_entry = NULL;
+
+ if (!entry) {
+ pr_err("diag: In %s, invalid entry\n", __func__);
+ return NULL;
+ }
+
+ list_for_each_safe(start, temp, &driver->cmd_reg_list) {
+ item = list_entry(start, struct diag_cmd_reg_t, link);
+ if (&item->entry == NULL) {
+ pr_err("diag: In %s, unable to search command\n",
+ __func__);
+ return NULL;
+ }
+ temp_entry = &item->entry;
+ if (temp_entry->cmd_code == entry->cmd_code &&
+ temp_entry->subsys_id == entry->subsys_id &&
+ temp_entry->cmd_code_hi >= entry->cmd_code_hi &&
+ temp_entry->cmd_code_lo <= entry->cmd_code_lo &&
+ (proc == item->proc || proc == ALL_PROC)) {
+ return &item->entry;
+ } else if (temp_entry->cmd_code == DIAG_CMD_NO_SUBSYS &&
+ entry->cmd_code == DIAG_CMD_DIAG_SUBSYS) {
+ if (temp_entry->subsys_id == entry->subsys_id &&
+ temp_entry->cmd_code_hi >= entry->cmd_code_hi &&
+ temp_entry->cmd_code_lo <= entry->cmd_code_lo &&
+ (proc == item->proc || proc == ALL_PROC)) {
+ return &item->entry;
+ }
+ } else if (temp_entry->cmd_code == DIAG_CMD_NO_SUBSYS &&
+ temp_entry->subsys_id == DIAG_CMD_NO_SUBSYS) {
+ if ((temp_entry->cmd_code_hi >= entry->cmd_code) &&
+ (temp_entry->cmd_code_lo <= entry->cmd_code) &&
+ (proc == item->proc || proc == ALL_PROC)) {
+ if (entry->cmd_code == MODE_CMD) {
+ if (entry->subsys_id == RESET_ID &&
+ item->proc != APPS_DATA) {
+ continue;
+ }
+ if (entry->subsys_id != RESET_ID &&
+ item->proc == APPS_DATA) {
+ continue;
+ }
+ }
+ return &item->entry;
+ }
+ }
+ }
+
+ return NULL;
+}
+
+void diag_cmd_remove_reg(struct diag_cmd_reg_entry_t *entry, uint8_t proc)
+{
+ struct diag_cmd_reg_t *item = NULL;
+ struct diag_cmd_reg_entry_t *temp_entry;
+ if (!entry) {
+ pr_err("diag: In %s, invalid entry\n", __func__);
+ return;
+ }
+
+ mutex_lock(&driver->cmd_reg_mutex);
+ temp_entry = diag_cmd_search(entry, proc);
+ if (temp_entry) {
+ item = container_of(temp_entry, struct diag_cmd_reg_t, entry);
+ if (!item) {
+ mutex_unlock(&driver->cmd_reg_mutex);
+ return;
+ }
+ list_del(&item->link);
+ kfree(item);
+ driver->cmd_reg_count--;
+ }
+ diag_cmd_invalidate_polling(DIAG_CMD_REMOVE);
+ mutex_unlock(&driver->cmd_reg_mutex);
+}
+
+void diag_cmd_remove_reg_by_pid(int pid)
+{
+ struct list_head *start;
+ struct list_head *temp;
+ struct diag_cmd_reg_t *item = NULL;
+
+ mutex_lock(&driver->cmd_reg_mutex);
+ list_for_each_safe(start, temp, &driver->cmd_reg_list) {
+ item = list_entry(start, struct diag_cmd_reg_t, link);
+ if (&item->entry == NULL) {
+ pr_err("diag: In %s, unable to search command\n",
+ __func__);
+ mutex_unlock(&driver->cmd_reg_mutex);
+ return;
+ }
+ if (item->pid == pid) {
+ list_del(&item->link);
+ kfree(item);
+ driver->cmd_reg_count--;
+ }
+ }
+ mutex_unlock(&driver->cmd_reg_mutex);
+}
+
+void diag_cmd_remove_reg_by_proc(int proc)
+{
+ struct list_head *start;
+ struct list_head *temp;
+ struct diag_cmd_reg_t *item = NULL;
+
+ mutex_lock(&driver->cmd_reg_mutex);
+ list_for_each_safe(start, temp, &driver->cmd_reg_list) {
+ item = list_entry(start, struct diag_cmd_reg_t, link);
+ if (&item->entry == NULL) {
+ pr_err("diag: In %s, unable to search command\n",
+ __func__);
+ mutex_unlock(&driver->cmd_reg_mutex);
+ return;
+ }
+ if (item->proc == proc) {
+ list_del(&item->link);
+ kfree(item);
+ driver->cmd_reg_count--;
+ }
+ }
+ diag_cmd_invalidate_polling(DIAG_CMD_REMOVE);
+ mutex_unlock(&driver->cmd_reg_mutex);
+}
+
+static int diag_copy_dci(char __user *buf, size_t count,
+ struct diag_dci_client_tbl *entry, int *pret)
+{
+ int total_data_len = 0;
+ int ret = 0;
+ int exit_stat = 1;
+ uint8_t drain_again = 0;
+ struct diag_dci_buffer_t *buf_entry, *temp;
+
+ if (!buf || !entry || !pret)
+ return exit_stat;
+
+ ret = *pret;
+
+ ret += sizeof(int);
+ if (ret >= count) {
+ pr_err("diag: In %s, invalid value for ret: %d, count: %zu\n",
+ __func__, ret, count);
+ return -EINVAL;
+ }
+
+ mutex_lock(&entry->write_buf_mutex);
+ list_for_each_entry_safe(buf_entry, temp, &entry->list_write_buf,
+ buf_track) {
+
+ if ((ret + buf_entry->data_len) > count) {
+ drain_again = 1;
+ break;
+ }
+
+ list_del(&buf_entry->buf_track);
+ mutex_lock(&buf_entry->data_mutex);
+ if ((buf_entry->data_len > 0) &&
+ (buf_entry->in_busy) &&
+ (buf_entry->data)) {
+ if (copy_to_user(buf+ret, (void *)buf_entry->data,
+ buf_entry->data_len))
+ goto drop;
+ ret += buf_entry->data_len;
+ total_data_len += buf_entry->data_len;
+ diag_ws_on_copy(DIAG_WS_DCI);
+drop:
+ buf_entry->in_busy = 0;
+ buf_entry->data_len = 0;
+ buf_entry->in_list = 0;
+ if (buf_entry->buf_type == DCI_BUF_CMD) {
+ mutex_unlock(&buf_entry->data_mutex);
+ continue;
+ } else if (buf_entry->buf_type == DCI_BUF_SECONDARY) {
+ diagmem_free(driver, buf_entry->data,
+ POOL_TYPE_DCI);
+ buf_entry->data = NULL;
+ mutex_unlock(&buf_entry->data_mutex);
+ kfree(buf_entry);
+ continue;
+ }
+
+ }
+ mutex_unlock(&buf_entry->data_mutex);
+ }
+
+ if (total_data_len > 0) {
+ /* Copy the total data length */
+ COPY_USER_SPACE_OR_EXIT(buf+8, total_data_len, 4);
+ ret -= 4;
+ } else {
+ pr_debug("diag: In %s, Trying to copy ZERO bytes, total_data_len: %d\n",
+ __func__, total_data_len);
+ }
+
+ exit_stat = 0;
+exit:
+ entry->in_service = 0;
+ mutex_unlock(&entry->write_buf_mutex);
+ *pret = ret;
+ if (drain_again)
+ dci_drain_data(0);
+
+ return exit_stat;
+}
+
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+static int diag_remote_init(void)
+{
+ diagmem_setsize(POOL_TYPE_MDM, itemsize_mdm, poolsize_mdm);
+ diagmem_setsize(POOL_TYPE_MDM2, itemsize_mdm, poolsize_mdm);
+ diagmem_setsize(POOL_TYPE_MDM_DCI, itemsize_mdm_dci, poolsize_mdm_dci);
+ diagmem_setsize(POOL_TYPE_MDM2_DCI, itemsize_mdm_dci,
+ poolsize_mdm_dci);
+ diagmem_setsize(POOL_TYPE_MDM_MUX, itemsize_mdm_usb, poolsize_mdm_usb);
+ diagmem_setsize(POOL_TYPE_MDM2_MUX, itemsize_mdm_usb, poolsize_mdm_usb);
+ diagmem_setsize(POOL_TYPE_MDM_DCI_WRITE, itemsize_mdm_dci_write,
+ poolsize_mdm_dci_write);
+ diagmem_setsize(POOL_TYPE_MDM2_DCI_WRITE, itemsize_mdm_dci_write,
+ poolsize_mdm_dci_write);
+ diagmem_setsize(POOL_TYPE_QSC_MUX, itemsize_qsc_usb,
+ poolsize_qsc_usb);
+ diag_md_mdm_init();
+ driver->hdlc_encode_buf = kzalloc(DIAG_MAX_HDLC_BUF_SIZE, GFP_KERNEL);
+ if (!driver->hdlc_encode_buf)
+ return -ENOMEM;
+ driver->hdlc_encode_buf_len = 0;
+ return 0;
+}
+
+static void diag_remote_exit(void)
+{
+ kfree(driver->hdlc_encode_buf);
+}
+
+static int diag_send_raw_data_remote(int proc, void *buf, int len,
+ uint8_t hdlc_flag)
+{
+ int err = 0;
+ int max_len = 0;
+ uint8_t retry_count = 0;
+ uint8_t max_retries = 3;
+ uint16_t payload = 0;
+ struct diag_send_desc_type send = { NULL, NULL, DIAG_STATE_START, 0 };
+ struct diag_hdlc_dest_type enc = { NULL, NULL, 0 };
+ int bridge_index = proc - 1;
+ uint8_t hdlc_disabled = 0;
+
+ if (!buf)
+ return -EINVAL;
+
+ if (len <= 0) {
+ pr_err("diag: In %s, invalid len: %d", __func__, len);
+ return -EBADMSG;
+ }
+
+ if (bridge_index < 0 || bridge_index > NUM_REMOTE_DEV) {
+ pr_err("diag: In %s, invalid bridge index: %d\n", __func__,
+ bridge_index);
+ return -EINVAL;
+ }
+
+ do {
+ if (driver->hdlc_encode_buf_len == 0)
+ break;
+ usleep_range(10000, 10100);
+ retry_count++;
+ } while (retry_count < max_retries);
+
+ if (driver->hdlc_encode_buf_len != 0)
+ return -EAGAIN;
+ mutex_lock(&driver->hdlc_disable_mutex);
+ hdlc_disabled = driver->p_hdlc_disabled[APPS_DATA];
+ mutex_unlock(&driver->hdlc_disable_mutex);
+ if (hdlc_disabled) {
+ if (len < 4) {
+ pr_err("diag: In %s, invalid len: %d of non_hdlc pkt",
+ __func__, len);
+ return -EBADMSG;
+ }
+ payload = *(uint16_t *)(buf + 2);
+ if (payload > DIAG_MAX_HDLC_BUF_SIZE) {
+ pr_err("diag: Dropping packet, payload size is %d\n",
+ payload);
+ return -EBADMSG;
+ }
+ driver->hdlc_encode_buf_len = payload;
+ /*
+ * Adding 5 bytes for start (1 byte), version (1 byte),
+ * payload (2 bytes) and end (1 byte)
+ */
+ if (len == (payload + 5)) {
+ /*
+ * Adding 4 bytes for start (1 byte), version (1 byte)
+ * and payload (2 bytes)
+ */
+ memcpy(driver->hdlc_encode_buf, buf + 4, payload);
+ goto send_data;
+ } else {
+ pr_err("diag: In %s, invalid len: %d of non_hdlc pkt",
+ __func__, len);
+ return -EBADMSG;
+ }
+ }
+
+ if (hdlc_flag) {
+ if (DIAG_MAX_HDLC_BUF_SIZE < len) {
+ pr_err("diag: Dropping packet, HDLC encoded packet payload size crosses buffer limit. Current payload size %d\n",
+ len);
+ return -EBADMSG;
+ }
+ driver->hdlc_encode_buf_len = len;
+ memcpy(driver->hdlc_encode_buf, buf, len);
+ goto send_data;
+ }
+
+ /*
+ * The worst case length will be twice as the incoming packet length.
+ * Add 3 bytes for CRC bytes (2 bytes) and delimiter (1 byte)
+ */
+ max_len = (2 * len) + 3;
+ if (DIAG_MAX_HDLC_BUF_SIZE < max_len) {
+ pr_err("diag: Dropping packet, HDLC encoded packet payload size crosses buffer limit. Current payload size %d\n",
+ max_len);
+ return -EBADMSG;
+ }
+
+ /* Perform HDLC encoding on incoming data */
+ send.state = DIAG_STATE_START;
+ send.pkt = (void *)(buf);
+ send.last = (void *)(buf + len - 1);
+ send.terminate = 1;
+
+ enc.dest = driver->hdlc_encode_buf;
+ enc.dest_last = (void *)(driver->hdlc_encode_buf + max_len - 1);
+ diag_hdlc_encode(&send, &enc);
+ driver->hdlc_encode_buf_len = (int)(enc.dest -
+ (void *)driver->hdlc_encode_buf);
+
+send_data:
+ err = diagfwd_bridge_write(bridge_index, driver->hdlc_encode_buf,
+ driver->hdlc_encode_buf_len);
+ if (err) {
+ pr_err_ratelimited("diag: Error writing Callback packet to proc: %d, err: %d\n",
+ proc, err);
+ driver->hdlc_encode_buf_len = 0;
+ }
+
+ return err;
+}
+
+static int diag_process_userspace_remote(int proc, void *buf, int len)
+{
+ int bridge_index = proc - 1;
+
+ if (!buf || len < 0) {
+ pr_err("diag: Invalid input in %s, buf: %pK, len: %d\n",
+ __func__, buf, len);
+ return -EINVAL;
+ }
+
+ if (bridge_index < 0 || bridge_index > NUM_REMOTE_DEV) {
+ pr_err("diag: In %s, invalid bridge index: %d\n", __func__,
+ bridge_index);
+ return -EINVAL;
+ }
+
+ driver->user_space_data_busy = 1;
+ return diagfwd_bridge_write(bridge_index, buf, len);
+}
+#else
+static int diag_remote_init(void)
+{
+ return 0;
+}
+
+static void diag_remote_exit(void)
+{
+ return;
+}
+
+int diagfwd_bridge_init(bool use_mhi)
+{
+ return 0;
+}
+
+void diagfwd_bridge_exit(void)
+{
+ return;
+}
+
+uint16_t diag_get_remote_device_mask(void)
+{
+ return 0;
+}
+
+static int diag_send_raw_data_remote(int proc, void *buf, int len,
+ uint8_t hdlc_flag)
+{
+ return -EINVAL;
+}
+
+static int diag_process_userspace_remote(int proc, void *buf, int len)
+{
+ return 0;
+}
+#endif
+
+static int mask_request_validate(unsigned char mask_buf[])
+{
+ uint8_t packet_id;
+ uint8_t subsys_id;
+ uint16_t ss_cmd;
+
+ packet_id = mask_buf[0];
+
+ if (packet_id == DIAG_CMD_DIAG_SUBSYS_DELAY) {
+ subsys_id = mask_buf[1];
+ ss_cmd = *(uint16_t *)(mask_buf + 2);
+ switch (subsys_id) {
+ case DIAG_SS_DIAG:
+ if ((ss_cmd == DIAG_SS_FILE_READ_MODEM) ||
+ (ss_cmd == DIAG_SS_FILE_READ_ADSP) ||
+ (ss_cmd == DIAG_SS_FILE_READ_WCNSS) ||
+ (ss_cmd == DIAG_SS_FILE_READ_SLPI) ||
+ (ss_cmd == DIAG_SS_FILE_READ_APPS))
+ return 1;
+ break;
+ default:
+ return 0;
+ }
+ } else if (packet_id == 0x4B) {
+ subsys_id = mask_buf[1];
+ ss_cmd = *(uint16_t *)(mask_buf + 2);
+ /* Packets with SSID which are allowed */
+ switch (subsys_id) {
+ case 0x04: /* DIAG_SUBSYS_WCDMA */
+ if ((ss_cmd == 0) || (ss_cmd == 0xF))
+ return 1;
+ break;
+ case 0x08: /* DIAG_SUBSYS_GSM */
+ if ((ss_cmd == 0) || (ss_cmd == 0x1))
+ return 1;
+ break;
+ case 0x09: /* DIAG_SUBSYS_UMTS */
+ case 0x0F: /* DIAG_SUBSYS_CM */
+ if (ss_cmd == 0)
+ return 1;
+ break;
+ case 0x0C: /* DIAG_SUBSYS_OS */
+ if ((ss_cmd == 2) || (ss_cmd == 0x100))
+ return 1; /* MPU and APU */
+ break;
+ case 0x12: /* DIAG_SUBSYS_DIAG_SERV */
+ if ((ss_cmd == 0) || (ss_cmd == 0x6) || (ss_cmd == 0x7))
+ return 1;
+ else if (ss_cmd == 0x218) /* HDLC Disabled Command*/
+ return 0;
+ else if (ss_cmd == DIAG_GET_TIME_API)
+ return 1;
+ else if (ss_cmd == DIAG_SET_TIME_API)
+ return 1;
+ else if (ss_cmd == DIAG_SWITCH_COMMAND)
+ return 1;
+ else if (ss_cmd == DIAG_BUFFERING_MODE)
+ return 1;
+ break;
+ case 0x13: /* DIAG_SUBSYS_FS */
+ if ((ss_cmd == 0) || (ss_cmd == 0x1))
+ return 1;
+ break;
+ default:
+ return 0;
+ break;
+ }
+ } else {
+ switch (packet_id) {
+ case 0x00: /* Version Number */
+ case 0x0C: /* CDMA status packet */
+ case 0x1C: /* Diag Version */
+ case 0x1D: /* Time Stamp */
+ case 0x60: /* Event Report Control */
+ case 0x63: /* Status snapshot */
+ case 0x73: /* Logging Configuration */
+ case 0x7C: /* Extended build ID */
+ case 0x7D: /* Extended Message configuration */
+ case 0x81: /* Event get mask */
+ case 0x82: /* Set the event mask */
+ return 1;
+ break;
+ default:
+ return 0;
+ break;
+ }
+ }
+ return 0;
+}
+
+static void diag_md_session_init(void)
+{
+ int i;
+
+ mutex_init(&driver->md_session_lock);
+ driver->md_session_mask = 0;
+ driver->md_session_mode = DIAG_MD_NONE;
+ for (i = 0; i < NUM_MD_SESSIONS; i++)
+ driver->md_session_map[i] = NULL;
+}
+
+static void diag_md_session_exit(void)
+{
+ int i;
+ struct diag_md_session_t *session_info = NULL;
+
+ for (i = 0; i < NUM_MD_SESSIONS; i++) {
+ if (driver->md_session_map[i]) {
+ session_info = driver->md_session_map[i];
+ diag_log_mask_free(session_info->log_mask);
+ kfree(session_info->log_mask);
+ session_info->log_mask = NULL;
+ diag_msg_mask_free(session_info->msg_mask);
+ kfree(session_info->msg_mask);
+ session_info->msg_mask = NULL;
+ diag_event_mask_free(session_info->event_mask);
+ kfree(session_info->event_mask);
+ session_info->event_mask = NULL;
+ kfree(session_info);
+ session_info = NULL;
+ driver->md_session_map[i] = NULL;
+ }
+ }
+ mutex_destroy(&driver->md_session_lock);
+ driver->md_session_mask = 0;
+ driver->md_session_mode = DIAG_MD_NONE;
+}
+
+int diag_md_session_create(int mode, int peripheral_mask, int proc)
+{
+ int i;
+ int err = 0;
+ struct diag_md_session_t *new_session = NULL;
+
+ /*
+ * If a session is running with a peripheral mask and a new session
+ * request comes in with same peripheral mask value then return
+ * invalid param
+ */
+ if (driver->md_session_mode == DIAG_MD_PERIPHERAL &&
+ (driver->md_session_mask & peripheral_mask) != 0)
+ return -EINVAL;
+
+ mutex_lock(&driver->md_session_lock);
+ new_session = kzalloc(sizeof(struct diag_md_session_t), GFP_KERNEL);
+ if (!new_session) {
+ mutex_unlock(&driver->md_session_lock);
+ return -ENOMEM;
+ }
+ new_session->peripheral_mask = 0;
+ new_session->pid = current->tgid;
+ new_session->task = current;
+ new_session->md_client_thread_info = current_thread_info();
+ new_session->log_mask = kzalloc(sizeof(struct diag_mask_info),
+ GFP_KERNEL);
+ if (!new_session->log_mask) {
+ err = -ENOMEM;
+ goto fail_peripheral;
+ }
+ new_session->event_mask = kzalloc(sizeof(struct diag_mask_info),
+ GFP_KERNEL);
+ if (!new_session->event_mask) {
+ err = -ENOMEM;
+ goto fail_peripheral;
+ }
+ new_session->msg_mask = kzalloc(sizeof(struct diag_mask_info),
+ GFP_KERNEL);
+ if (!new_session->msg_mask) {
+ err = -ENOMEM;
+ goto fail_peripheral;
+ }
+
+ err = diag_log_mask_copy(new_session->log_mask, &log_mask);
+ if (err) {
+ DIAG_LOG(DIAG_DEBUG_USERSPACE,
+ "return value of log copy. err %d\n", err);
+ goto fail_peripheral;
+ }
+ err = diag_event_mask_copy(new_session->event_mask, &event_mask);
+ if (err) {
+ DIAG_LOG(DIAG_DEBUG_USERSPACE,
+ "return value of event copy. err %d\n", err);
+ goto fail_peripheral;
+ }
+ err = diag_msg_mask_copy(new_session->msg_mask, &msg_mask);
+ if (err) {
+ DIAG_LOG(DIAG_DEBUG_USERSPACE,
+ "return value of msg copy. err %d\n", err);
+ goto fail_peripheral;
+ }
+ for (i = 0; i < NUM_MD_SESSIONS; i++) {
+ if ((MD_PERIPHERAL_MASK(i) & peripheral_mask) == 0)
+ continue;
+ if (driver->md_session_map[i] != NULL) {
+ DIAG_LOG(DIAG_DEBUG_USERSPACE,
+ "another instance present for %d\n", i);
+ err = -EEXIST;
+ goto fail_peripheral;
+ }
+ new_session->peripheral_mask |= MD_PERIPHERAL_MASK(i);
+ driver->md_session_map[i] = new_session;
+ driver->md_session_mask |= MD_PERIPHERAL_MASK(i);
+ }
+ setup_timer(&new_session->hdlc_reset_timer,
+ diag_md_hdlc_reset_timer_func,
+ new_session->pid);
+
+ driver->md_session_mode = DIAG_MD_PERIPHERAL;
+ mutex_unlock(&driver->md_session_lock);
+ DIAG_LOG(DIAG_DEBUG_USERSPACE,
+ "created session in peripheral mode\n");
+ return 0;
+
+fail_peripheral:
+ diag_log_mask_free(new_session->log_mask);
+ kfree(new_session->log_mask);
+ new_session->log_mask = NULL;
+ diag_event_mask_free(new_session->event_mask);
+ kfree(new_session->event_mask);
+ new_session->event_mask = NULL;
+ diag_msg_mask_free(new_session->msg_mask);
+ kfree(new_session->msg_mask);
+ new_session->msg_mask = NULL;
+ kfree(new_session);
+ new_session = NULL;
+ mutex_unlock(&driver->md_session_lock);
+ return err;
+}
+
+static void diag_md_session_close(int pid)
+{
+ int i;
+ uint8_t found = 0;
+ struct diag_md_session_t *session_info = NULL;
+
+ session_info = diag_md_session_get_pid(pid);
+ if (!session_info)
+ return;
+
+ for (i = 0; i < NUM_MD_SESSIONS; i++) {
+ if (driver->md_session_map[i] != session_info)
+ continue;
+ driver->md_session_map[i] = NULL;
+ driver->md_session_mask &= ~session_info->peripheral_mask;
+ }
+ diag_log_mask_free(session_info->log_mask);
+ kfree(session_info->log_mask);
+ session_info->log_mask = NULL;
+ diag_msg_mask_free(session_info->msg_mask);
+ kfree(session_info->msg_mask);
+ session_info->msg_mask = NULL;
+ diag_event_mask_free(session_info->event_mask);
+ kfree(session_info->event_mask);
+ session_info->event_mask = NULL;
+ del_timer(&session_info->hdlc_reset_timer);
+
+ for (i = 0; i < NUM_MD_SESSIONS && !found; i++) {
+ if (driver->md_session_map[i] != NULL)
+ found = 1;
+ }
+
+ driver->md_session_mode = (found) ? DIAG_MD_PERIPHERAL : DIAG_MD_NONE;
+ kfree(session_info);
+ session_info = NULL;
+ DIAG_LOG(DIAG_DEBUG_USERSPACE, "cleared up session\n");
+}
+
+struct diag_md_session_t *diag_md_session_get_pid(int pid)
+{
+ int i;
+ if (pid <= 0)
+ return NULL;
+ for (i = 0; i < NUM_MD_SESSIONS; i++) {
+ if (driver->md_session_map[i] &&
+ driver->md_session_map[i]->pid == pid)
+ return driver->md_session_map[i];
+ }
+ return NULL;
+}
+
+struct diag_md_session_t *diag_md_session_get_peripheral(uint8_t peripheral)
+{
+ if (peripheral >= NUM_MD_SESSIONS)
+ return NULL;
+ return driver->md_session_map[peripheral];
+}
+
+/*
+ * diag_md_session_match_pid_peripheral
+ *
+ * 1. Pass valid PID and get all the peripherals in logging session
+ * for that PID
+ * 2. Pass valid Peipheral and get the pid logging for that peripheral
+ *
+ */
+
+int diag_md_session_match_pid_peripheral(int pid,
+ uint8_t peripheral)
+{
+ int i, flag = 0;
+
+ if (pid <= 0 || peripheral >= NUM_MD_SESSIONS)
+ return -EINVAL;
+
+ if (!peripheral) {
+ for (i = 0; i < NUM_MD_SESSIONS; i++) {
+ if (driver->md_session_map[i] &&
+ driver->md_session_map[i]->pid == pid) {
+ peripheral |= 1 << i;
+ flag = 1;
+ }
+ }
+ if (flag)
+ return peripheral;
+ }
+
+ if (!pid) {
+ if (driver->md_session_map[peripheral])
+ return driver->md_session_map[peripheral]->pid;
+ }
+
+ return -EINVAL;
+}
+
+static int diag_md_peripheral_switch(int pid,
+ int peripheral_mask, int req_mode) {
+ int i, bit = 0;
+ struct diag_md_session_t *session_info = NULL;
+
+ session_info = diag_md_session_get_pid(pid);
+ if (!session_info)
+ return -EINVAL;
+ if (req_mode != DIAG_USB_MODE || req_mode != DIAG_MEMORY_DEVICE_MODE)
+ return -EINVAL;
+
+ /*
+ * check that md_session_map for i == session_info,
+ * if not then race condition occurred and bail
+ */
+ for (i = 0; i < NUM_MD_SESSIONS; i++) {
+ bit = MD_PERIPHERAL_MASK(i) & peripheral_mask;
+ if (!bit)
+ continue;
+ if (req_mode == DIAG_USB_MODE) {
+ if (driver->md_session_map[i] != session_info)
+ return -EINVAL;
+ driver->md_session_map[i] = NULL;
+ driver->md_session_mask &= ~bit;
+ session_info->peripheral_mask &= ~bit;
+
+ } else {
+ if (driver->md_session_map[i] != NULL)
+ return -EINVAL;
+ driver->md_session_map[i] = session_info;
+ driver->md_session_mask |= bit;
+ session_info->peripheral_mask |= bit;
+
+ }
+ }
+
+ driver->md_session_mode = DIAG_MD_PERIPHERAL;
+ DIAG_LOG(DIAG_DEBUG_USERSPACE, "Changed Peripherals:0x%x to mode:%d\n",
+ peripheral_mask, req_mode);
+}
+
+static int diag_md_session_check(int curr_mode, int req_mode,
+ const struct diag_logging_mode_param_t *param,
+ uint8_t *change_mode)
+{
+ int i, bit = 0, err = 0, peripheral_mask = 0;
+ int change_mask = 0;
+ struct diag_md_session_t *session_info = NULL;
+
+ if (!param || !change_mode)
+ return -EIO;
+
+ *change_mode = 0;
+
+ switch (curr_mode) {
+ case DIAG_USB_MODE:
+ case DIAG_MEMORY_DEVICE_MODE:
+ case DIAG_MULTI_MODE:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (req_mode != DIAG_USB_MODE && req_mode != DIAG_MEMORY_DEVICE_MODE)
+ return -EINVAL;
+
+ if (req_mode == DIAG_USB_MODE) {
+ if (curr_mode == DIAG_USB_MODE)
+ return 0;
+ mutex_lock(&driver->md_session_lock);
+ if (driver->md_session_mode == DIAG_MD_NONE
+ && driver->md_session_mask == 0 && driver->logging_mask) {
+ *change_mode = 1;
+ mutex_unlock(&driver->md_session_lock);
+ return 0;
+ }
+ /*
+ * curr_mode is either DIAG_MULTI_MODE or DIAG_MD_MODE
+ * Check if requested peripherals are already in usb mode
+ */
+ for (i = 0; i < NUM_MD_SESSIONS; i++) {
+ bit = MD_PERIPHERAL_MASK(i) & param->peripheral_mask;
+ if (!bit)
+ continue;
+ if (bit & driver->logging_mask)
+ change_mask |= bit;
+ }
+ if (!change_mask) {
+ mutex_unlock(&driver->md_session_lock);
+ return 0;
+ }
+
+ /*
+ * Change is needed. Check if this md_session has set all the
+ * requested peripherals. If another md session set a requested
+ * peripheral then we cannot switch that peripheral to USB.
+ * If this session owns all the requested peripherals, then
+ * call function to switch the modes/masks for the md_session
+ */
+ session_info = diag_md_session_get_pid(current->tgid);
+ if (!session_info) {
+ *change_mode = 1;
+ mutex_unlock(&driver->md_session_lock);
+ return 0;
+ }
+ peripheral_mask = session_info->peripheral_mask;
+ if ((change_mask & peripheral_mask)
+ != change_mask) {
+ DIAG_LOG(DIAG_DEBUG_USERSPACE,
+ "Another MD Session owns a requested peripheral\n");
+ mutex_unlock(&driver->md_session_lock);
+ return -EINVAL;
+ }
+ *change_mode = 1;
+
+ /* If all peripherals are being set to USB Mode, call close */
+ if (~change_mask & peripheral_mask) {
+ err = diag_md_peripheral_switch(current->tgid,
+ change_mask, DIAG_USB_MODE);
+ } else
+ diag_md_session_close(current->tgid);
+ mutex_unlock(&driver->md_session_lock);
+ return err;
+
+ } else if (req_mode == DIAG_MEMORY_DEVICE_MODE) {
+ /*
+ * Get bit mask that represents what peripherals already have
+ * been set. Check that requested peripherals already set are
+ * owned by this md session
+ */
+ mutex_lock(&driver->md_session_lock);
+ change_mask = driver->md_session_mask & param->peripheral_mask;
+ session_info = diag_md_session_get_pid(current->tgid);
+
+ if (session_info) {
+ if ((session_info->peripheral_mask & change_mask)
+ != change_mask) {
+ DIAG_LOG(DIAG_DEBUG_USERSPACE,
+ "Another MD Session owns a requested peripheral\n");
+ mutex_unlock(&driver->md_session_lock);
+ return -EINVAL;
+ }
+ err = diag_md_peripheral_switch(current->tgid,
+ change_mask, DIAG_USB_MODE);
+ mutex_unlock(&driver->md_session_lock);
+ } else {
+ mutex_unlock(&driver->md_session_lock);
+ if (change_mask) {
+ DIAG_LOG(DIAG_DEBUG_USERSPACE,
+ "Another MD Session owns a requested peripheral\n");
+ return -EINVAL;
+ }
+ err = diag_md_session_create(DIAG_MD_PERIPHERAL,
+ param->peripheral_mask, DIAG_LOCAL_PROC);
+ mutex_lock(&driver->hdlc_disable_mutex);
+ for (i = 0; i < NUM_MD_SESSIONS; i++) {
+ if ((param->peripheral_mask > 0) &&
+ (param->peripheral_mask & (1 << i)))
+ driver->p_hdlc_disabled[i] = 0;
+ }
+ mutex_unlock(&driver->hdlc_disable_mutex);
+ }
+ *change_mode = 1;
+ return err;
+ }
+ return -EINVAL;
+}
+
+static uint32_t diag_translate_mask(uint32_t peripheral_mask)
+{
+ uint32_t ret = 0;
+
+ if (peripheral_mask & DIAG_CON_APSS)
+ ret |= (1 << APPS_DATA);
+ if (peripheral_mask & DIAG_CON_MPSS)
+ ret |= (1 << PERIPHERAL_MODEM);
+ if (peripheral_mask & DIAG_CON_LPASS)
+ ret |= (1 << PERIPHERAL_LPASS);
+ if (peripheral_mask & DIAG_CON_WCNSS)
+ ret |= (1 << PERIPHERAL_WCNSS);
+ if (peripheral_mask & DIAG_CON_SENSORS)
+ ret |= (1 << PERIPHERAL_SENSORS);
+ if (peripheral_mask & DIAG_CON_WDSP)
+ ret |= (1 << PERIPHERAL_WDSP);
+ if (peripheral_mask & DIAG_CON_CDSP)
+ ret |= (1 << PERIPHERAL_CDSP);
+ if (peripheral_mask & DIAG_CON_UPD_WLAN)
+ ret |= (1 << UPD_WLAN);
+ if (peripheral_mask & DIAG_CON_UPD_AUDIO)
+ ret |= (1 << UPD_AUDIO);
+ if (peripheral_mask & DIAG_CON_UPD_SENSORS)
+ ret |= (1 << UPD_SENSORS);
+
+ return ret;
+}
+
+static int diag_switch_logging(struct diag_logging_mode_param_t *param)
+{
+ int new_mode, i = 0;
+ int curr_mode;
+ int err = 0;
+ uint8_t do_switch = 1;
+ uint32_t peripheral_mask = 0;
+ uint8_t peripheral, upd;
+
+ if (!param)
+ return -EINVAL;
+
+ if (!param->peripheral_mask) {
+ DIAG_LOG(DIAG_DEBUG_USERSPACE,
+ "asking for mode switch with no peripheral mask set\n");
+ return -EINVAL;
+ }
+
+ if (param->pd_mask) {
+ switch (param->pd_mask) {
+ case DIAG_CON_UPD_WLAN:
+ peripheral = PERIPHERAL_MODEM;
+ upd = UPD_WLAN;
+ break;
+ case DIAG_CON_UPD_AUDIO:
+ peripheral = PERIPHERAL_LPASS;
+ upd = UPD_AUDIO;
+ break;
+ case DIAG_CON_UPD_SENSORS:
+ peripheral = PERIPHERAL_LPASS;
+ upd = UPD_SENSORS;
+ break;
+ default:
+ DIAG_LOG(DIAG_DEBUG_USERSPACE,
+ "asking for mode switch with no pd mask set\n");
+ return -EINVAL;
+ }
+
+ i = upd - UPD_WLAN;
+
+ if (driver->md_session_map[peripheral] &&
+ (MD_PERIPHERAL_MASK(peripheral) &
+ diag_mux->mux_mask) &&
+ !driver->pd_session_clear[i]) {
+ DIAG_LOG(DIAG_DEBUG_USERSPACE,
+ "diag_fr: User PD is already logging onto active peripheral logging\n");
+ driver->pd_session_clear[i] = 0;
+ return -EINVAL;
+ }
+ peripheral_mask =
+ diag_translate_mask(param->pd_mask);
+ param->peripheral_mask = peripheral_mask;
+ if (!driver->pd_session_clear[i]) {
+ driver->pd_logging_mode[i] = 1;
+ driver->num_pd_session += 1;
+ }
+ driver->pd_session_clear[i] = 0;
+ } else {
+ peripheral_mask =
+ diag_translate_mask(param->peripheral_mask);
+ param->peripheral_mask = peripheral_mask;
+ }
+
+ switch (param->req_mode) {
+ case CALLBACK_MODE:
+ case UART_MODE:
+ case SOCKET_MODE:
+ case MEMORY_DEVICE_MODE:
+ new_mode = DIAG_MEMORY_DEVICE_MODE;
+ break;
+ case USB_MODE:
+ new_mode = DIAG_USB_MODE;
+ break;
+ default:
+ pr_err("diag: In %s, request to switch to invalid mode: %d\n",
+ __func__, param->req_mode);
+ return -EINVAL;
+ }
+
+ curr_mode = driver->logging_mode;
+ DIAG_LOG(DIAG_DEBUG_USERSPACE,
+ "request to switch logging from %d mask:%0x to new_mode %d mask:%0x\n",
+ curr_mode, driver->md_session_mask, new_mode, peripheral_mask);
+
+ err = diag_md_session_check(curr_mode, new_mode, param, &do_switch);
+ if (err) {
+ DIAG_LOG(DIAG_DEBUG_USERSPACE,
+ "err from diag_md_session_check, err: %d\n", err);
+ return err;
+ }
+
+ if (do_switch == 0) {
+ DIAG_LOG(DIAG_DEBUG_USERSPACE,
+ "not switching modes c: %d n: %d\n",
+ curr_mode, new_mode);
+ return 0;
+ }
+
+ diag_ws_reset(DIAG_WS_MUX);
+ err = diag_mux_switch_logging(&new_mode, &peripheral_mask);
+ if (err) {
+ pr_err("diag: In %s, unable to switch mode from %d to %d, err: %d\n",
+ __func__, curr_mode, new_mode, err);
+ driver->logging_mode = curr_mode;
+ goto fail;
+ }
+ driver->logging_mode = new_mode;
+ driver->logging_mask = peripheral_mask;
+ DIAG_LOG(DIAG_DEBUG_USERSPACE,
+ "Switch logging to %d mask:%0x\n", new_mode, peripheral_mask);
+
+ /* Update to take peripheral_mask */
+ if (new_mode != DIAG_MEMORY_DEVICE_MODE) {
+ diag_update_real_time_vote(DIAG_PROC_MEMORY_DEVICE,
+ MODE_REALTIME, ALL_PROC);
+ } else {
+ diag_update_proc_vote(DIAG_PROC_MEMORY_DEVICE, VOTE_UP,
+ ALL_PROC);
+ }
+
+ if (!(new_mode == DIAG_MEMORY_DEVICE_MODE &&
+ curr_mode == DIAG_USB_MODE)) {
+ queue_work(driver->diag_real_time_wq,
+ &driver->diag_real_time_work);
+ }
+
+ return 0;
+fail:
+ return err;
+}
+
+static int diag_ioctl_dci_reg(unsigned long ioarg)
+{
+ int result = -EINVAL;
+ struct diag_dci_reg_tbl_t dci_reg_params;
+
+ if (copy_from_user(&dci_reg_params, (void __user *)ioarg,
+ sizeof(struct diag_dci_reg_tbl_t)))
+ return -EFAULT;
+
+ result = diag_dci_register_client(&dci_reg_params);
+
+ return result;
+}
+
+static int diag_ioctl_dci_health_stats(unsigned long ioarg)
+{
+ int result = -EINVAL;
+ struct diag_dci_health_stats_proc stats;
+
+ if (copy_from_user(&stats, (void __user *)ioarg,
+ sizeof(struct diag_dci_health_stats_proc)))
+ return -EFAULT;
+
+ result = diag_dci_copy_health_stats(&stats);
+ if (result == DIAG_DCI_NO_ERROR) {
+ if (copy_to_user((void __user *)ioarg, &stats,
+ sizeof(struct diag_dci_health_stats_proc)))
+ return -EFAULT;
+ }
+
+ return result;
+}
+
+static int diag_ioctl_dci_log_status(unsigned long ioarg)
+{
+ struct diag_log_event_stats le_stats;
+ struct diag_dci_client_tbl *dci_client = NULL;
+
+ if (copy_from_user(&le_stats, (void __user *)ioarg,
+ sizeof(struct diag_log_event_stats)))
+ return -EFAULT;
+
+ dci_client = diag_dci_get_client_entry(le_stats.client_id);
+ if (!dci_client)
+ return DIAG_DCI_NOT_SUPPORTED;
+ le_stats.is_set = diag_dci_query_log_mask(dci_client, le_stats.code);
+ if (copy_to_user((void __user *)ioarg, &le_stats,
+ sizeof(struct diag_log_event_stats)))
+ return -EFAULT;
+
+ return DIAG_DCI_NO_ERROR;
+}
+
+static int diag_ioctl_dci_event_status(unsigned long ioarg)
+{
+ struct diag_log_event_stats le_stats;
+ struct diag_dci_client_tbl *dci_client = NULL;
+
+ if (copy_from_user(&le_stats, (void __user *)ioarg,
+ sizeof(struct diag_log_event_stats)))
+ return -EFAULT;
+
+ dci_client = diag_dci_get_client_entry(le_stats.client_id);
+ if (!dci_client)
+ return DIAG_DCI_NOT_SUPPORTED;
+
+ le_stats.is_set = diag_dci_query_event_mask(dci_client, le_stats.code);
+ if (copy_to_user((void __user *)ioarg, &le_stats,
+ sizeof(struct diag_log_event_stats)))
+ return -EFAULT;
+
+ return DIAG_DCI_NO_ERROR;
+}
+
+static int diag_ioctl_lsm_deinit(void)
+{
+ int i;
+
+ mutex_lock(&driver->diagchar_mutex);
+ for (i = 0; i < driver->num_clients; i++)
+ if (driver->client_map[i].pid == current->tgid)
+ break;
+
+ if (i == driver->num_clients) {
+ mutex_unlock(&driver->diagchar_mutex);
+ return -EINVAL;
+ }
+ if (!(driver->data_ready[i] & DEINIT_TYPE)) {
+ driver->data_ready[i] |= DEINIT_TYPE;
+ atomic_inc(&driver->data_ready_notif[i]);
+ }
+ mutex_unlock(&driver->diagchar_mutex);
+ wake_up_interruptible(&driver->wait_q);
+
+ return 1;
+}
+
+static int diag_ioctl_vote_real_time(unsigned long ioarg)
+{
+ int real_time = 0;
+ int temp_proc = ALL_PROC;
+ struct real_time_vote_t vote;
+ struct diag_dci_client_tbl *dci_client = NULL;
+
+ if (copy_from_user(&vote, (void __user *)ioarg,
+ sizeof(struct real_time_vote_t)))
+ return -EFAULT;
+
+ if (vote.proc > DIAG_PROC_MEMORY_DEVICE ||
+ vote.real_time_vote > MODE_UNKNOWN ||
+ vote.client_id < 0) {
+ pr_err("diag: %s, invalid params, proc: %d, vote: %d, client_id: %d\n",
+ __func__, vote.proc, vote.real_time_vote,
+ vote.client_id);
+ return -EINVAL;
+ }
+
+ driver->real_time_update_busy++;
+ if (vote.proc == DIAG_PROC_DCI) {
+ dci_client = diag_dci_get_client_entry(vote.client_id);
+ if (!dci_client) {
+ driver->real_time_update_busy--;
+ return DIAG_DCI_NOT_SUPPORTED;
+ }
+ diag_dci_set_real_time(dci_client, vote.real_time_vote);
+ real_time = diag_dci_get_cumulative_real_time(
+ dci_client->client_info.token);
+ diag_update_real_time_vote(vote.proc, real_time,
+ dci_client->client_info.token);
+ } else {
+ real_time = vote.real_time_vote;
+ temp_proc = vote.client_id;
+ diag_update_real_time_vote(vote.proc, real_time,
+ temp_proc);
+ }
+ queue_work(driver->diag_real_time_wq, &driver->diag_real_time_work);
+ return 0;
+}
+
+static int diag_ioctl_get_real_time(unsigned long ioarg)
+{
+ int i;
+ int retry_count = 0;
+ int timer = 0;
+ struct real_time_query_t rt_query;
+
+ if (copy_from_user(&rt_query, (void __user *)ioarg,
+ sizeof(struct real_time_query_t)))
+ return -EFAULT;
+ while (retry_count < 3) {
+ if (driver->real_time_update_busy > 0) {
+ retry_count++;
+ /*
+ * The value 10000 was chosen empirically as an
+ * optimum value in order to give the work in
+ * diag_real_time_wq to complete processing.
+ */
+ for (timer = 0; timer < 5; timer++)
+ usleep_range(10000, 10100);
+ } else {
+ break;
+ }
+ }
+
+ if (driver->real_time_update_busy > 0)
+ return -EAGAIN;
+
+ if (rt_query.proc < 0 || rt_query.proc >= DIAG_NUM_PROC) {
+ pr_err("diag: Invalid proc %d in %s\n", rt_query.proc,
+ __func__);
+ return -EINVAL;
+ }
+ rt_query.real_time = driver->real_time_mode[rt_query.proc];
+ /*
+ * For the local processor, if any of the peripherals is in buffering
+ * mode, overwrite the value of real time with UNKNOWN_MODE
+ */
+ if (rt_query.proc == DIAG_LOCAL_PROC) {
+ for (i = 0; i < NUM_PERIPHERALS; i++) {
+ if (!driver->feature[i].peripheral_buffering)
+ continue;
+ switch (driver->buffering_mode[i].mode) {
+ case DIAG_BUFFERING_MODE_CIRCULAR:
+ case DIAG_BUFFERING_MODE_THRESHOLD:
+ rt_query.real_time = MODE_UNKNOWN;
+ break;
+ }
+ }
+ }
+
+ if (copy_to_user((void __user *)ioarg, &rt_query,
+ sizeof(struct real_time_query_t)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int diag_ioctl_set_buffering_mode(unsigned long ioarg)
+{
+ struct diag_buffering_mode_t params;
+ int peripheral = 0;
+ uint8_t diag_id = 0;
+
+ if (copy_from_user(&params, (void __user *)ioarg, sizeof(params)))
+ return -EFAULT;
+
+ diag_map_pd_to_diagid(params.peripheral, &diag_id, &peripheral);
+
+ if ((peripheral < 0) ||
+ peripheral >= NUM_PERIPHERALS) {
+ pr_err("diag: In %s, invalid peripheral %d\n", __func__,
+ peripheral);
+ return -EIO;
+ }
+
+ if (params.peripheral > NUM_PERIPHERALS &&
+ !driver->feature[peripheral].pd_buffering) {
+ pr_err("diag: In %s, pd buffering not supported for peripheral:%d\n",
+ __func__, peripheral);
+ return -EIO;
+ }
+
+ if (!driver->feature[peripheral].peripheral_buffering) {
+ pr_err("diag: In %s, peripheral %d doesn't support buffering\n",
+ __func__, peripheral);
+ return -EIO;
+ }
+
+ mutex_lock(&driver->mode_lock);
+ driver->buffering_flag[params.peripheral] = 1;
+ mutex_unlock(&driver->mode_lock);
+
+ return diag_send_peripheral_buffering_mode(&params);
+}
+
+static int diag_ioctl_peripheral_drain_immediate(unsigned long ioarg)
+{
+ uint8_t pd, diag_id = 0;
+ int peripheral = 0;
+
+ if (copy_from_user(&pd, (void __user *)ioarg, sizeof(uint8_t)))
+ return -EFAULT;
+
+ diag_map_pd_to_diagid(pd, &diag_id, &peripheral);
+
+ if ((peripheral < 0) ||
+ peripheral >= NUM_PERIPHERALS) {
+ pr_err("diag: In %s, invalid peripheral %d\n", __func__,
+ peripheral);
+ return -EINVAL;
+ }
+
+ if (pd > NUM_PERIPHERALS &&
+ !driver->feature[peripheral].pd_buffering) {
+ pr_err("diag: In %s, pd buffering not supported for peripheral:%d\n",
+ __func__, peripheral);
+ return -EIO;
+ }
+
+ return diag_send_peripheral_drain_immediate(pd, diag_id, peripheral);
+}
+
+static int diag_ioctl_dci_support(unsigned long ioarg)
+{
+ struct diag_dci_peripherals_t dci_support;
+ int result = -EINVAL;
+
+ if (copy_from_user(&dci_support, (void __user *)ioarg,
+ sizeof(struct diag_dci_peripherals_t)))
+ return -EFAULT;
+
+ result = diag_dci_get_support_list(&dci_support);
+ if (result == DIAG_DCI_NO_ERROR)
+ if (copy_to_user((void __user *)ioarg, &dci_support,
+ sizeof(struct diag_dci_peripherals_t)))
+ return -EFAULT;
+
+ return result;
+}
+
+static int diag_ioctl_hdlc_toggle(unsigned long ioarg)
+{
+ uint8_t hdlc_support, i;
+ int peripheral = -EINVAL;
+ struct diag_md_session_t *session_info = NULL;
+
+ if (copy_from_user(&hdlc_support, (void __user *)ioarg,
+ sizeof(uint8_t)))
+ return -EFAULT;
+
+ mutex_lock(&driver->hdlc_disable_mutex);
+ mutex_lock(&driver->md_session_lock);
+ session_info = diag_md_session_get_pid(current->tgid);
+ if (session_info)
+ session_info->hdlc_disabled = hdlc_support;
+ else
+ driver->hdlc_disabled = hdlc_support;
+
+ peripheral =
+ diag_md_session_match_pid_peripheral(current->tgid,
+ 0);
+ for (i = 0; i < NUM_MD_SESSIONS; i++) {
+ if (peripheral > 0 && session_info) {
+ if (peripheral & (1 << i))
+ driver->p_hdlc_disabled[i] =
+ session_info->hdlc_disabled;
+ else if (!diag_md_session_get_peripheral(i))
+ driver->p_hdlc_disabled[i] =
+ driver->hdlc_disabled;
+ } else {
+ if (!diag_md_session_get_peripheral(i))
+ driver->p_hdlc_disabled[i] =
+ driver->hdlc_disabled;
+ }
+ }
+
+ mutex_unlock(&driver->md_session_lock);
+ mutex_unlock(&driver->hdlc_disable_mutex);
+ diag_update_md_clients(HDLC_SUPPORT_TYPE);
+
+ return 0;
+}
+
+static int diag_ioctl_query_pd_logging(struct diag_logging_mode_param_t *param)
+{
+ int ret = -EINVAL;
+ int peripheral;
+ char *p_str = NULL;
+
+ if (!param)
+ return -EINVAL;
+
+ if (!param->pd_mask) {
+ DIAG_LOG(DIAG_DEBUG_USERSPACE,
+ "query with no pd mask set, returning error\n");
+ return -EINVAL;
+ }
+
+ switch (param->pd_mask) {
+ case DIAG_CON_UPD_WLAN:
+ peripheral = PERIPHERAL_MODEM;
+ p_str = "MODEM";
+ break;
+ case DIAG_CON_UPD_AUDIO:
+ case DIAG_CON_UPD_SENSORS:
+ peripheral = PERIPHERAL_LPASS;
+ p_str = "LPASS";
+ break;
+ default:
+ DIAG_LOG(DIAG_DEBUG_USERSPACE,
+ "Invalid pd mask, returning EINVAL\n");
+ return -EINVAL;
+ }
+
+ DIAG_LOG(DIAG_DEBUG_USERSPACE,
+ "diag: %s: Untagging support on APPS is %s\n", __func__,
+ ((driver->supports_apps_header_untagging) ?
+ "present" : "absent"));
+
+ DIAG_LOG(DIAG_DEBUG_USERSPACE,
+ "diag: %s: Tagging support on %s is %s\n",
+ __func__, p_str,
+ (driver->feature[peripheral].untag_header ?
+ "present" : "absent"));
+
+ if (driver->supports_apps_header_untagging &&
+ driver->feature[peripheral].untag_header)
+ ret = 0;
+
+ return ret;
+}
+
+static int diag_ioctl_register_callback(unsigned long ioarg)
+{
+ int err = 0;
+ struct diag_callback_reg_t reg;
+
+ if (copy_from_user(&reg, (void __user *)ioarg,
+ sizeof(struct diag_callback_reg_t))) {
+ return -EFAULT;
+ }
+
+ if (reg.proc < 0 || reg.proc >= DIAG_NUM_PROC) {
+ pr_err("diag: In %s, invalid proc %d for callback registration\n",
+ __func__, reg.proc);
+ return -EINVAL;
+ }
+
+ if (driver->md_session_mode == DIAG_MD_PERIPHERAL)
+ return -EIO;
+
+ return err;
+}
+
+static int diag_cmd_register_tbl(struct diag_cmd_reg_tbl_t *reg_tbl)
+{
+ int i;
+ int err = 0;
+ uint32_t count = 0;
+ struct diag_cmd_reg_entry_t *entries = NULL;
+ const uint16_t entry_len = sizeof(struct diag_cmd_reg_entry_t);
+
+
+ if (!reg_tbl) {
+ pr_err("diag: In %s, invalid registration table\n", __func__);
+ return -EINVAL;
+ }
+
+ count = reg_tbl->count;
+ if ((UINT_MAX / entry_len) < count) {
+ pr_warn("diag: In %s, possbile integer overflow.\n", __func__);
+ return -EFAULT;
+ }
+
+ entries = kzalloc(count * entry_len, GFP_KERNEL);
+ if (!entries) {
+ pr_err("diag: In %s, unable to create memory for registration table entries\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ err = copy_from_user(entries, reg_tbl->entries, count * entry_len);
+ if (err) {
+ pr_err("diag: In %s, error copying data from userspace, err: %d\n",
+ __func__, err);
+ kfree(entries);
+ return -EFAULT;
+ }
+
+ for (i = 0; i < count; i++) {
+ err = diag_cmd_add_reg(&entries[i], APPS_DATA, current->tgid);
+ if (err) {
+ pr_err("diag: In %s, unable to register command, err: %d\n",
+ __func__, err);
+ break;
+ }
+ }
+
+ kfree(entries);
+ return err;
+}
+
+static int diag_ioctl_cmd_reg(unsigned long ioarg)
+{
+ struct diag_cmd_reg_tbl_t reg_tbl;
+
+ if (copy_from_user(&reg_tbl, (void __user *)ioarg,
+ sizeof(struct diag_cmd_reg_tbl_t))) {
+ return -EFAULT;
+ }
+
+ return diag_cmd_register_tbl(&reg_tbl);
+}
+
+static int diag_ioctl_cmd_dereg(void)
+{
+ diag_cmd_remove_reg_by_pid(current->tgid);
+ return 0;
+}
+
+#ifdef CONFIG_COMPAT
+/*
+ * @sync_obj_name: name of the synchronization object associated with this proc
+ * @count: number of entries in the bind
+ * @params: the actual packet registrations
+ */
+struct diag_cmd_reg_tbl_compat_t {
+ char sync_obj_name[MAX_SYNC_OBJ_NAME_SIZE];
+ uint32_t count;
+ compat_uptr_t entries;
+};
+
+static int diag_ioctl_cmd_reg_compat(unsigned long ioarg)
+{
+ struct diag_cmd_reg_tbl_compat_t reg_tbl_compat;
+ struct diag_cmd_reg_tbl_t reg_tbl;
+
+ if (copy_from_user(&reg_tbl_compat, (void __user *)ioarg,
+ sizeof(struct diag_cmd_reg_tbl_compat_t))) {
+ return -EFAULT;
+ }
+
+ strlcpy(reg_tbl.sync_obj_name, reg_tbl_compat.sync_obj_name,
+ MAX_SYNC_OBJ_NAME_SIZE);
+ reg_tbl.count = reg_tbl_compat.count;
+ reg_tbl.entries = (struct diag_cmd_reg_entry_t *)
+ (uintptr_t)reg_tbl_compat.entries;
+
+ return diag_cmd_register_tbl(&reg_tbl);
+}
+
+long diagchar_compat_ioctl(struct file *filp,
+ unsigned int iocmd, unsigned long ioarg)
+{
+ int result = -EINVAL;
+ int client_id = 0;
+ uint16_t delayed_rsp_id = 0;
+ uint16_t remote_dev;
+ struct diag_dci_client_tbl *dci_client = NULL;
+ struct diag_logging_mode_param_t mode_param;
+
+ switch (iocmd) {
+ case DIAG_IOCTL_COMMAND_REG:
+ result = diag_ioctl_cmd_reg_compat(ioarg);
+ break;
+ case DIAG_IOCTL_COMMAND_DEREG:
+ result = diag_ioctl_cmd_dereg();
+ break;
+ case DIAG_IOCTL_GET_DELAYED_RSP_ID:
+ delayed_rsp_id = diag_get_next_delayed_rsp_id();
+ if (copy_to_user((void __user *)ioarg, &delayed_rsp_id,
+ sizeof(uint16_t)))
+ result = -EFAULT;
+ else
+ result = 0;
+ break;
+ case DIAG_IOCTL_DCI_REG:
+ result = diag_ioctl_dci_reg(ioarg);
+ break;
+ case DIAG_IOCTL_DCI_DEINIT:
+ mutex_lock(&driver->dci_mutex);
+ if (copy_from_user((void *)&client_id, (void __user *)ioarg,
+ sizeof(int))) {
+ mutex_unlock(&driver->dci_mutex);
+ return -EFAULT;
+ }
+ dci_client = diag_dci_get_client_entry(client_id);
+ if (!dci_client) {
+ mutex_unlock(&driver->dci_mutex);
+ return DIAG_DCI_NOT_SUPPORTED;
+ }
+ result = diag_dci_deinit_client(dci_client);
+ mutex_unlock(&driver->dci_mutex);
+ break;
+ case DIAG_IOCTL_DCI_SUPPORT:
+ result = diag_ioctl_dci_support(ioarg);
+ break;
+ case DIAG_IOCTL_DCI_HEALTH_STATS:
+ mutex_lock(&driver->dci_mutex);
+ result = diag_ioctl_dci_health_stats(ioarg);
+ mutex_unlock(&driver->dci_mutex);
+ break;
+ case DIAG_IOCTL_DCI_LOG_STATUS:
+ mutex_lock(&driver->dci_mutex);
+ result = diag_ioctl_dci_log_status(ioarg);
+ mutex_unlock(&driver->dci_mutex);
+ break;
+ case DIAG_IOCTL_DCI_EVENT_STATUS:
+ mutex_lock(&driver->dci_mutex);
+ result = diag_ioctl_dci_event_status(ioarg);
+ mutex_unlock(&driver->dci_mutex);
+ break;
+ case DIAG_IOCTL_DCI_CLEAR_LOGS:
+ mutex_lock(&driver->dci_mutex);
+ if (copy_from_user((void *)&client_id, (void __user *)ioarg,
+ sizeof(int))) {
+ mutex_unlock(&driver->dci_mutex);
+ return -EFAULT;
+ }
+ result = diag_dci_clear_log_mask(client_id);
+ mutex_unlock(&driver->dci_mutex);
+ break;
+ case DIAG_IOCTL_DCI_CLEAR_EVENTS:
+ mutex_lock(&driver->dci_mutex);
+ if (copy_from_user(&client_id, (void __user *)ioarg,
+ sizeof(int))) {
+ mutex_unlock(&driver->dci_mutex);
+ return -EFAULT;
+ }
+ result = diag_dci_clear_event_mask(client_id);
+ mutex_unlock(&driver->dci_mutex);
+ break;
+ case DIAG_IOCTL_LSM_DEINIT:
+ result = diag_ioctl_lsm_deinit();
+ break;
+ case DIAG_IOCTL_SWITCH_LOGGING:
+ if (copy_from_user((void *)&mode_param, (void __user *)ioarg,
+ sizeof(mode_param)))
+ return -EFAULT;
+ mutex_lock(&driver->diagchar_mutex);
+ result = diag_switch_logging(&mode_param);
+ mutex_unlock(&driver->diagchar_mutex);
+ break;
+ case DIAG_IOCTL_REMOTE_DEV:
+ remote_dev = diag_get_remote_device_mask();
+ if (copy_to_user((void __user *)ioarg, &remote_dev,
+ sizeof(uint16_t)))
+ result = -EFAULT;
+ else
+ result = 1;
+ break;
+ case DIAG_IOCTL_VOTE_REAL_TIME:
+ mutex_lock(&driver->dci_mutex);
+ result = diag_ioctl_vote_real_time(ioarg);
+ mutex_unlock(&driver->dci_mutex);
+ break;
+ case DIAG_IOCTL_GET_REAL_TIME:
+ result = diag_ioctl_get_real_time(ioarg);
+ break;
+ case DIAG_IOCTL_PERIPHERAL_BUF_CONFIG:
+ result = diag_ioctl_set_buffering_mode(ioarg);
+ break;
+ case DIAG_IOCTL_PERIPHERAL_BUF_DRAIN:
+ result = diag_ioctl_peripheral_drain_immediate(ioarg);
+ break;
+ case DIAG_IOCTL_REGISTER_CALLBACK:
+ result = diag_ioctl_register_callback(ioarg);
+ break;
+ case DIAG_IOCTL_HDLC_TOGGLE:
+ result = diag_ioctl_hdlc_toggle(ioarg);
+ break;
+ case DIAG_IOCTL_QUERY_PD_LOGGING:
+ if (copy_from_user((void *)&mode_param, (void __user *)ioarg,
+ sizeof(mode_param)))
+ return -EFAULT;
+ result = diag_ioctl_query_pd_logging(&mode_param);
+ break;
+ }
+ return result;
+}
+#endif
+
+long diagchar_ioctl(struct file *filp,
+ unsigned int iocmd, unsigned long ioarg)
+{
+ int result = -EINVAL;
+ int client_id = 0;
+ uint16_t delayed_rsp_id;
+ uint16_t remote_dev;
+ struct diag_dci_client_tbl *dci_client = NULL;
+ struct diag_logging_mode_param_t mode_param;
+
+ switch (iocmd) {
+ case DIAG_IOCTL_COMMAND_REG:
+ result = diag_ioctl_cmd_reg(ioarg);
+ break;
+ case DIAG_IOCTL_COMMAND_DEREG:
+ result = diag_ioctl_cmd_dereg();
+ break;
+ case DIAG_IOCTL_GET_DELAYED_RSP_ID:
+ delayed_rsp_id = diag_get_next_delayed_rsp_id();
+ if (copy_to_user((void __user *)ioarg, &delayed_rsp_id,
+ sizeof(uint16_t)))
+ result = -EFAULT;
+ else
+ result = 0;
+ break;
+ case DIAG_IOCTL_DCI_REG:
+ result = diag_ioctl_dci_reg(ioarg);
+ break;
+ case DIAG_IOCTL_DCI_DEINIT:
+ mutex_lock(&driver->dci_mutex);
+ if (copy_from_user((void *)&client_id, (void __user *)ioarg,
+ sizeof(int))) {
+ mutex_unlock(&driver->dci_mutex);
+ return -EFAULT;
+ }
+ dci_client = diag_dci_get_client_entry(client_id);
+ if (!dci_client) {
+ mutex_unlock(&driver->dci_mutex);
+ return DIAG_DCI_NOT_SUPPORTED;
+ }
+ result = diag_dci_deinit_client(dci_client);
+ mutex_unlock(&driver->dci_mutex);
+ break;
+ case DIAG_IOCTL_DCI_SUPPORT:
+ result = diag_ioctl_dci_support(ioarg);
+ break;
+ case DIAG_IOCTL_DCI_HEALTH_STATS:
+ mutex_lock(&driver->dci_mutex);
+ result = diag_ioctl_dci_health_stats(ioarg);
+ mutex_unlock(&driver->dci_mutex);
+ break;
+ case DIAG_IOCTL_DCI_LOG_STATUS:
+ mutex_lock(&driver->dci_mutex);
+ result = diag_ioctl_dci_log_status(ioarg);
+ mutex_unlock(&driver->dci_mutex);
+ break;
+ case DIAG_IOCTL_DCI_EVENT_STATUS:
+ mutex_lock(&driver->dci_mutex);
+ result = diag_ioctl_dci_event_status(ioarg);
+ mutex_unlock(&driver->dci_mutex);
+ break;
+ case DIAG_IOCTL_DCI_CLEAR_LOGS:
+ mutex_lock(&driver->dci_mutex);
+ if (copy_from_user((void *)&client_id, (void __user *)ioarg,
+ sizeof(int))) {
+ mutex_unlock(&driver->dci_mutex);
+ return -EFAULT;
+ }
+ result = diag_dci_clear_log_mask(client_id);
+ mutex_unlock(&driver->dci_mutex);
+ break;
+ case DIAG_IOCTL_DCI_CLEAR_EVENTS:
+ mutex_lock(&driver->dci_mutex);
+ if (copy_from_user(&client_id, (void __user *)ioarg,
+ sizeof(int))) {
+ mutex_unlock(&driver->dci_mutex);
+ return -EFAULT;
+ }
+ result = diag_dci_clear_event_mask(client_id);
+ mutex_unlock(&driver->dci_mutex);
+ break;
+ case DIAG_IOCTL_LSM_DEINIT:
+ result = diag_ioctl_lsm_deinit();
+ break;
+ case DIAG_IOCTL_SWITCH_LOGGING:
+ if (copy_from_user((void *)&mode_param, (void __user *)ioarg,
+ sizeof(mode_param)))
+ return -EFAULT;
+ mutex_lock(&driver->diagchar_mutex);
+ result = diag_switch_logging(&mode_param);
+ mutex_unlock(&driver->diagchar_mutex);
+ break;
+ case DIAG_IOCTL_REMOTE_DEV:
+ remote_dev = diag_get_remote_device_mask();
+ if (copy_to_user((void __user *)ioarg, &remote_dev,
+ sizeof(uint16_t)))
+ result = -EFAULT;
+ else
+ result = 1;
+ break;
+ case DIAG_IOCTL_VOTE_REAL_TIME:
+ mutex_lock(&driver->dci_mutex);
+ result = diag_ioctl_vote_real_time(ioarg);
+ mutex_unlock(&driver->dci_mutex);
+ break;
+ case DIAG_IOCTL_GET_REAL_TIME:
+ result = diag_ioctl_get_real_time(ioarg);
+ break;
+ case DIAG_IOCTL_PERIPHERAL_BUF_CONFIG:
+ result = diag_ioctl_set_buffering_mode(ioarg);
+ break;
+ case DIAG_IOCTL_PERIPHERAL_BUF_DRAIN:
+ result = diag_ioctl_peripheral_drain_immediate(ioarg);
+ break;
+ case DIAG_IOCTL_REGISTER_CALLBACK:
+ result = diag_ioctl_register_callback(ioarg);
+ break;
+ case DIAG_IOCTL_HDLC_TOGGLE:
+ result = diag_ioctl_hdlc_toggle(ioarg);
+ break;
+ case DIAG_IOCTL_QUERY_PD_LOGGING:
+ if (copy_from_user((void *)&mode_param, (void __user *)ioarg,
+ sizeof(mode_param)))
+ return -EFAULT;
+ result = diag_ioctl_query_pd_logging(&mode_param);
+ break;
+ }
+ return result;
+}
+
+static int diag_process_apps_data_hdlc(unsigned char *buf, int len,
+ int pkt_type)
+{
+ int err = 0;
+ int ret = PKT_DROP;
+ struct diag_apps_data_t *data = &hdlc_data;
+ struct diag_send_desc_type send = { NULL, NULL, DIAG_STATE_START, 0 };
+ struct diag_hdlc_dest_type enc = { NULL, NULL, 0 };
+ /*
+ * The maximum encoded size of the buffer can be atmost twice the length
+ * of the packet. Add three bytes foe footer - 16 bit CRC (2 bytes) +
+ * delimiter (1 byte).
+ */
+ const uint32_t max_encoded_size = ((2 * len) + 3);
+
+ if (!buf || len <= 0) {
+ pr_err("diag: In %s, invalid buf: %pK len: %d\n",
+ __func__, buf, len);
+ return -EIO;
+ }
+
+ if (DIAG_MAX_HDLC_BUF_SIZE < max_encoded_size) {
+ pr_err_ratelimited("diag: In %s, encoded data is larger %d than the buffer size %d\n",
+ __func__, max_encoded_size, DIAG_MAX_HDLC_BUF_SIZE);
+ return -EBADMSG;
+ }
+
+ send.state = DIAG_STATE_START;
+ send.pkt = buf;
+ send.last = (void *)(buf + len - 1);
+ send.terminate = 1;
+
+ if (!data->buf)
+ data->buf = diagmem_alloc(driver, DIAG_MAX_HDLC_BUF_SIZE +
+ APF_DIAG_PADDING,
+ POOL_TYPE_HDLC);
+ if (!data->buf) {
+ ret = PKT_DROP;
+ goto fail_ret;
+ }
+
+ if ((DIAG_MAX_HDLC_BUF_SIZE - data->len) <= max_encoded_size) {
+ err = diag_mux_write(DIAG_LOCAL_PROC, data->buf, data->len,
+ data->ctxt);
+ if (err) {
+ ret = -EIO;
+ goto fail_free_buf;
+ }
+ data->buf = NULL;
+ data->len = 0;
+ data->buf = diagmem_alloc(driver, DIAG_MAX_HDLC_BUF_SIZE +
+ APF_DIAG_PADDING,
+ POOL_TYPE_HDLC);
+ if (!data->buf) {
+ ret = PKT_DROP;
+ goto fail_ret;
+ }
+ }
+
+ enc.dest = data->buf + data->len;
+ enc.dest_last = (void *)(data->buf + data->len + max_encoded_size);
+ diag_hdlc_encode(&send, &enc);
+
+ /*
+ * This is to check if after HDLC encoding, we are still within
+ * the limits of aggregation buffer. If not, we write out the
+ * current buffer and start aggregation in a newly allocated
+ * buffer.
+ */
+ if ((uintptr_t)enc.dest >= (uintptr_t)(data->buf +
+ DIAG_MAX_HDLC_BUF_SIZE)) {
+ err = diag_mux_write(DIAG_LOCAL_PROC, data->buf, data->len,
+ data->ctxt);
+ if (err) {
+ ret = -EIO;
+ goto fail_free_buf;
+ }
+ data->buf = NULL;
+ data->len = 0;
+ data->buf = diagmem_alloc(driver, DIAG_MAX_HDLC_BUF_SIZE +
+ APF_DIAG_PADDING,
+ POOL_TYPE_HDLC);
+ if (!data->buf) {
+ ret = PKT_DROP;
+ goto fail_ret;
+ }
+
+ enc.dest = data->buf + data->len;
+ enc.dest_last = (void *)(data->buf + data->len +
+ max_encoded_size);
+ diag_hdlc_encode(&send, &enc);
+ }
+
+ data->len = (((uintptr_t)enc.dest - (uintptr_t)data->buf) <
+ DIAG_MAX_HDLC_BUF_SIZE) ?
+ ((uintptr_t)enc.dest - (uintptr_t)data->buf) :
+ DIAG_MAX_HDLC_BUF_SIZE;
+
+ if (pkt_type == DATA_TYPE_RESPONSE) {
+ err = diag_mux_write(DIAG_LOCAL_PROC, data->buf, data->len,
+ data->ctxt);
+ if (err) {
+ ret = -EIO;
+ goto fail_free_buf;
+ }
+ data->buf = NULL;
+ data->len = 0;
+ }
+
+ return PKT_ALLOC;
+
+fail_free_buf:
+ diagmem_free(driver, data->buf, POOL_TYPE_HDLC);
+ data->buf = NULL;
+ data->len = 0;
+
+fail_ret:
+ return ret;
+}
+
+static int diag_process_apps_data_non_hdlc(unsigned char *buf, int len,
+ int pkt_type)
+{
+ int err = 0;
+ int ret = PKT_DROP;
+ struct diag_pkt_frame_t header;
+ struct diag_apps_data_t *data = &non_hdlc_data;
+ /*
+ * The maximum packet size, when the data is non hdlc encoded is equal
+ * to the size of the packet frame header and the length. Add 1 for the
+ * delimiter 0x7E at the end.
+ */
+ const uint32_t max_pkt_size = sizeof(header) + len + 1;
+
+ if (!buf || len <= 0) {
+ pr_err("diag: In %s, invalid buf: %pK len: %d\n",
+ __func__, buf, len);
+ return -EIO;
+ }
+
+ if (!data->buf) {
+ data->buf = diagmem_alloc(driver, DIAG_MAX_HDLC_BUF_SIZE +
+ APF_DIAG_PADDING,
+ POOL_TYPE_HDLC);
+ if (!data->buf) {
+ ret = PKT_DROP;
+ goto fail_ret;
+ }
+ }
+
+ if ((DIAG_MAX_HDLC_BUF_SIZE - data->len) <= max_pkt_size) {
+ err = diag_mux_write(DIAG_LOCAL_PROC, data->buf, data->len,
+ data->ctxt);
+ if (err) {
+ ret = -EIO;
+ goto fail_free_buf;
+ }
+ data->buf = NULL;
+ data->len = 0;
+ data->buf = diagmem_alloc(driver, DIAG_MAX_HDLC_BUF_SIZE +
+ APF_DIAG_PADDING,
+ POOL_TYPE_HDLC);
+ if (!data->buf) {
+ ret = PKT_DROP;
+ goto fail_ret;
+ }
+ }
+
+ header.start = CONTROL_CHAR;
+ header.version = 1;
+ header.length = len;
+ memcpy(data->buf + data->len, &header, sizeof(header));
+ data->len += sizeof(header);
+ memcpy(data->buf + data->len, buf, len);
+ data->len += len;
+ *(uint8_t *)(data->buf + data->len) = CONTROL_CHAR;
+ data->len += sizeof(uint8_t);
+ if (pkt_type == DATA_TYPE_RESPONSE) {
+ err = diag_mux_write(DIAG_LOCAL_PROC, data->buf, data->len,
+ data->ctxt);
+ if (err) {
+ ret = -EIO;
+ goto fail_free_buf;
+ }
+ data->buf = NULL;
+ data->len = 0;
+ }
+
+ return PKT_ALLOC;
+
+fail_free_buf:
+ diagmem_free(driver, data->buf, POOL_TYPE_HDLC);
+ data->buf = NULL;
+ data->len = 0;
+
+fail_ret:
+ return ret;
+}
+
+static int diag_user_process_dci_data(const char __user *buf, int len)
+{
+ int err = 0;
+ const int mempool = POOL_TYPE_USER;
+ unsigned char *user_space_data = NULL;
+
+ if (!buf || len <= 0 || len > diag_mempools[mempool].itemsize) {
+ pr_err_ratelimited("diag: In %s, invalid buf %pK len: %d\n",
+ __func__, buf, len);
+ return -EBADMSG;
+ }
+
+ user_space_data = diagmem_alloc(driver, len, mempool);
+ if (!user_space_data)
+ return -ENOMEM;
+
+ err = copy_from_user(user_space_data, buf, len);
+ if (err) {
+ pr_err_ratelimited("diag: In %s, unable to copy data from userspace, err: %d\n",
+ __func__, err);
+ err = DIAG_DCI_SEND_DATA_FAIL;
+ goto fail;
+ }
+
+ err = diag_process_dci_transaction(user_space_data, len);
+fail:
+ diagmem_free(driver, user_space_data, mempool);
+ user_space_data = NULL;
+ return err;
+}
+
+static int diag_user_process_dci_apps_data(const char __user *buf, int len,
+ int pkt_type)
+{
+ int err = 0;
+ const int mempool = POOL_TYPE_COPY;
+ unsigned char *user_space_data = NULL;
+
+ if (!buf || len <= 0 || len > diag_mempools[mempool].itemsize) {
+ pr_err_ratelimited("diag: In %s, invalid buf %pK len: %d\n",
+ __func__, buf, len);
+ return -EBADMSG;
+ }
+
+ pkt_type &= (DCI_PKT_TYPE | DATA_TYPE_DCI_LOG | DATA_TYPE_DCI_EVENT);
+ if (!pkt_type) {
+ pr_err_ratelimited("diag: In %s, invalid pkt_type: %d\n",
+ __func__, pkt_type);
+ return -EBADMSG;
+ }
+
+ user_space_data = diagmem_alloc(driver, len, mempool);
+ if (!user_space_data)
+ return -ENOMEM;
+
+ err = copy_from_user(user_space_data, buf, len);
+ if (err) {
+ pr_alert("diag: In %s, unable to copy data from userspace, err: %d\n",
+ __func__, err);
+ goto fail;
+ }
+
+ diag_process_apps_dci_read_data(pkt_type, user_space_data, len);
+fail:
+ diagmem_free(driver, user_space_data, mempool);
+ user_space_data = NULL;
+ return err;
+}
+
+static int diag_user_process_raw_data(const char __user *buf, int len)
+{
+ int err = 0;
+ int ret = 0;
+ int token_offset = 0;
+ int remote_proc = 0;
+ const int mempool = POOL_TYPE_COPY;
+ unsigned char *user_space_data = NULL;
+
+ if (!buf || len <= 0 || len > CALLBACK_BUF_SIZE) {
+ pr_err_ratelimited("diag: In %s, invalid buf %pK len: %d\n",
+ __func__, buf, len);
+ return -EBADMSG;
+ }
+
+ user_space_data = diagmem_alloc(driver, len, mempool);
+ if (!user_space_data)
+ return -ENOMEM;
+
+ err = copy_from_user(user_space_data, buf, len);
+ if (err) {
+ pr_err("diag: copy failed for user space data\n");
+ goto fail;
+ }
+
+ /* Check for proc_type */
+ remote_proc = diag_get_remote(*(int *)user_space_data);
+ if (remote_proc) {
+ token_offset = sizeof(int);
+ if (len <= MIN_SIZ_ALLOW) {
+ pr_err("diag: In %s, possible integer underflow, payload size: %d\n",
+ __func__, len);
+ diagmem_free(driver, user_space_data, mempool);
+ user_space_data = NULL;
+ return -EBADMSG;
+ }
+ len -= sizeof(int);
+ }
+ if (driver->mask_check) {
+ if (!mask_request_validate(user_space_data +
+ token_offset)) {
+ pr_alert("diag: mask request Invalid\n");
+ diagmem_free(driver, user_space_data, mempool);
+ user_space_data = NULL;
+ return -EFAULT;
+ }
+ }
+ if (remote_proc) {
+ ret = diag_send_raw_data_remote(remote_proc,
+ (void *)(user_space_data + token_offset),
+ len, USER_SPACE_RAW_DATA);
+ if (ret) {
+ pr_err("diag: Error sending data to remote proc %d, err: %d\n",
+ remote_proc, ret);
+ }
+ } else {
+ wait_event_interruptible(driver->wait_q,
+ (driver->in_busy_pktdata == 0));
+ ret = diag_process_apps_pkt(user_space_data, len,
+ current->tgid);
+ if (ret == 1)
+ diag_send_error_rsp((void *)(user_space_data), len,
+ current->tgid);
+ }
+fail:
+ diagmem_free(driver, user_space_data, mempool);
+ user_space_data = NULL;
+ return ret;
+}
+
+static int diag_user_process_userspace_data(const char __user *buf, int len)
+{
+ int err = 0;
+ int max_retries = 3;
+ int retry_count = 0;
+ int remote_proc = 0;
+ int token_offset = 0;
+ struct diag_md_session_t *session_info = NULL;
+ uint8_t hdlc_disabled;
+
+ if (!buf || len <= 0 || len > USER_SPACE_DATA) {
+ pr_err_ratelimited("diag: In %s, invalid buf %pK len: %d\n",
+ __func__, buf, len);
+ return -EBADMSG;
+ }
+
+ do {
+ if (!driver->user_space_data_busy)
+ break;
+ retry_count++;
+ usleep_range(10000, 10100);
+ } while (retry_count < max_retries);
+
+ if (driver->user_space_data_busy)
+ return -EAGAIN;
+
+ err = copy_from_user(driver->user_space_data_buf, buf, len);
+ if (err) {
+ pr_err("diag: In %s, failed to copy data from userspace, err: %d\n",
+ __func__, err);
+ return -EIO;
+ }
+
+ /* Check for proc_type */
+ remote_proc = diag_get_remote(*(int *)driver->user_space_data_buf);
+ if (remote_proc) {
+ if (len <= MIN_SIZ_ALLOW) {
+ pr_err("diag: Integer underflow in %s, payload size: %d",
+ __func__, len);
+ return -EBADMSG;
+ }
+ token_offset = sizeof(int);
+ len -= sizeof(int);
+ }
+
+ /* Check masks for On-Device logging */
+ if (driver->mask_check) {
+ if (!mask_request_validate(driver->user_space_data_buf +
+ token_offset)) {
+ pr_alert("diag: mask request Invalid\n");
+ return -EFAULT;
+ }
+ }
+
+ /* send masks to local processor now */
+ if (!remote_proc) {
+ mutex_lock(&driver->md_session_lock);
+ session_info = diag_md_session_get_pid(current->tgid);
+ if (!session_info) {
+ pr_err("diag:In %s request came from invalid md session pid:%d",
+ __func__, current->tgid);
+ mutex_unlock(&driver->md_session_lock);
+ return -EINVAL;
+ }
+ if (session_info)
+ hdlc_disabled = session_info->hdlc_disabled;
+ else
+ hdlc_disabled = driver->hdlc_disabled;
+ mutex_unlock(&driver->md_session_lock);
+ if (!hdlc_disabled)
+ diag_process_hdlc_pkt((void *)
+ (driver->user_space_data_buf),
+ len, current->tgid);
+ else
+ diag_process_non_hdlc_pkt((char *)
+ (driver->user_space_data_buf),
+ len, current->tgid);
+ return 0;
+ }
+
+ err = diag_process_userspace_remote(remote_proc,
+ driver->user_space_data_buf +
+ token_offset, len);
+ if (err) {
+ driver->user_space_data_busy = 0;
+ pr_err("diag: Error sending mask to remote proc %d, err: %d\n",
+ remote_proc, err);
+ }
+
+ return err;
+}
+
+static int diag_user_process_apps_data(const char __user *buf, int len,
+ int pkt_type)
+{
+ int ret = 0;
+ int stm_size = 0;
+ const int mempool = POOL_TYPE_COPY;
+ unsigned char *user_space_data = NULL;
+ uint8_t hdlc_disabled;
+
+ if (!buf || len <= 0 || len > DIAG_MAX_RSP_SIZE) {
+ pr_err_ratelimited("diag: In %s, invalid buf %pK len: %d\n",
+ __func__, buf, len);
+ return -EBADMSG;
+ }
+
+ switch (pkt_type) {
+ case DATA_TYPE_EVENT:
+ case DATA_TYPE_F3:
+ case DATA_TYPE_LOG:
+ case DATA_TYPE_RESPONSE:
+ case DATA_TYPE_DELAYED_RESPONSE:
+ break;
+ default:
+ pr_err_ratelimited("diag: In %s, invalid pkt_type: %d\n",
+ __func__, pkt_type);
+ return -EBADMSG;
+ }
+
+ user_space_data = diagmem_alloc(driver, len, mempool);
+ if (!user_space_data) {
+ diag_record_stats(pkt_type, PKT_DROP);
+ return -ENOMEM;
+ }
+
+ ret = copy_from_user(user_space_data, buf, len);
+ if (ret) {
+ pr_alert("diag: In %s, unable to copy data from userspace, err: %d\n",
+ __func__, ret);
+ diagmem_free(driver, user_space_data, mempool);
+ user_space_data = NULL;
+ diag_record_stats(pkt_type, PKT_DROP);
+ return -EBADMSG;
+ }
+
+ if (driver->stm_state[APPS_DATA] &&
+ (pkt_type >= DATA_TYPE_EVENT) && (pkt_type <= DATA_TYPE_LOG)) {
+ stm_size = stm_log_inv_ts(OST_ENTITY_DIAG, 0, user_space_data,
+ len);
+ if (stm_size == 0) {
+ pr_debug("diag: In %s, stm_log_inv_ts returned size of 0\n",
+ __func__);
+ }
+ diagmem_free(driver, user_space_data, mempool);
+ user_space_data = NULL;
+
+ return 0;
+ }
+
+ mutex_lock(&apps_data_mutex);
+ mutex_lock(&driver->hdlc_disable_mutex);
+ hdlc_disabled = driver->p_hdlc_disabled[APPS_DATA];
+ if (hdlc_disabled)
+ ret = diag_process_apps_data_non_hdlc(user_space_data, len,
+ pkt_type);
+ else
+ ret = diag_process_apps_data_hdlc(user_space_data, len,
+ pkt_type);
+ mutex_unlock(&driver->hdlc_disable_mutex);
+ mutex_unlock(&apps_data_mutex);
+
+ diagmem_free(driver, user_space_data, mempool);
+ user_space_data = NULL;
+
+ check_drain_timer();
+
+ if (ret == PKT_DROP)
+ diag_record_stats(pkt_type, PKT_DROP);
+ else if (ret == PKT_ALLOC)
+ diag_record_stats(pkt_type, PKT_ALLOC);
+ else
+ return ret;
+
+ return 0;
+}
+
+static ssize_t diagchar_read(struct file *file, char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ struct diag_dci_client_tbl *entry;
+ struct list_head *start, *temp;
+ int index = -1, i = 0, ret = 0;
+ int data_type;
+ int copy_dci_data = 0;
+ int exit_stat = 0;
+ int write_len = 0;
+ struct diag_md_session_t *session_info = NULL;
+
+ mutex_lock(&driver->diagchar_mutex);
+ for (i = 0; i < driver->num_clients; i++)
+ if (driver->client_map[i].pid == current->tgid)
+ index = i;
+ mutex_unlock(&driver->diagchar_mutex);
+
+ if (index == -1) {
+ pr_err("diag: Client PID not found in table");
+ return -EINVAL;
+ }
+ if (!buf) {
+ pr_err("diag: bad address from user side\n");
+ return -EFAULT;
+ }
+ wait_event_interruptible(driver->wait_q,
+ atomic_read(&driver->data_ready_notif[index]) > 0);
+
+ mutex_lock(&driver->diagchar_mutex);
+
+ if ((driver->data_ready[index] & USER_SPACE_DATA_TYPE) &&
+ (driver->logging_mode == DIAG_MEMORY_DEVICE_MODE ||
+ driver->logging_mode == DIAG_MULTI_MODE)) {
+ pr_debug("diag: process woken up\n");
+ /*Copy the type of data being passed*/
+ data_type = driver->data_ready[index] & USER_SPACE_DATA_TYPE;
+ driver->data_ready[index] ^= USER_SPACE_DATA_TYPE;
+ atomic_dec(&driver->data_ready_notif[index]);
+ COPY_USER_SPACE_OR_EXIT(buf, data_type, sizeof(int));
+ /* place holder for number of data field */
+ ret += sizeof(int);
+ mutex_lock(&driver->md_session_lock);
+ session_info = diag_md_session_get_pid(current->tgid);
+ exit_stat = diag_md_copy_to_user(buf, &ret, count,
+ session_info);
+ mutex_unlock(&driver->md_session_lock);
+ goto exit;
+ } else if (driver->data_ready[index] & USER_SPACE_DATA_TYPE) {
+ /* In case, the thread wakes up and the logging mode is
+ not memory device any more, the condition needs to be cleared */
+ driver->data_ready[index] ^= USER_SPACE_DATA_TYPE;
+ atomic_dec(&driver->data_ready_notif[index]);
+ }
+
+ if (driver->data_ready[index] & HDLC_SUPPORT_TYPE) {
+ data_type = driver->data_ready[index] & HDLC_SUPPORT_TYPE;
+ driver->data_ready[index] ^= HDLC_SUPPORT_TYPE;
+ atomic_dec(&driver->data_ready_notif[index]);
+ COPY_USER_SPACE_OR_EXIT(buf, data_type, sizeof(int));
+ mutex_lock(&driver->md_session_lock);
+ session_info = diag_md_session_get_pid(current->tgid);
+ if (session_info) {
+ COPY_USER_SPACE_OR_ERR(buf+4,
+ session_info->hdlc_disabled,
+ sizeof(uint8_t));
+ if (ret == -EFAULT) {
+ mutex_unlock(&driver->md_session_lock);
+ goto exit;
+ }
+ }
+ mutex_unlock(&driver->md_session_lock);
+ goto exit;
+ }
+
+ if (driver->data_ready[index] & DEINIT_TYPE) {
+ /*Copy the type of data being passed*/
+ data_type = driver->data_ready[index] & DEINIT_TYPE;
+ COPY_USER_SPACE_OR_EXIT(buf, data_type, 4);
+ driver->data_ready[index] ^= DEINIT_TYPE;
+ atomic_dec(&driver->data_ready_notif[index]);
+ mutex_unlock(&driver->diagchar_mutex);
+ diag_remove_client_entry(file);
+ return ret;
+ }
+
+ if (driver->data_ready[index] & MSG_MASKS_TYPE) {
+ /*Copy the type of data being passed*/
+ data_type = driver->data_ready[index] & MSG_MASKS_TYPE;
+ mutex_lock(&driver->md_session_lock);
+ session_info = diag_md_session_get_peripheral(APPS_DATA);
+ COPY_USER_SPACE_OR_ERR(buf, data_type, sizeof(int));
+ if (ret == -EFAULT) {
+ mutex_unlock(&driver->md_session_lock);
+ goto exit;
+ }
+ write_len = diag_copy_to_user_msg_mask(buf + ret, count,
+ session_info);
+ mutex_unlock(&driver->md_session_lock);
+ if (write_len > 0)
+ ret += write_len;
+ driver->data_ready[index] ^= MSG_MASKS_TYPE;
+ atomic_dec(&driver->data_ready_notif[index]);
+ goto exit;
+ }
+
+ if (driver->data_ready[index] & EVENT_MASKS_TYPE) {
+ /*Copy the type of data being passed*/
+ data_type = driver->data_ready[index] & EVENT_MASKS_TYPE;
+ mutex_lock(&driver->md_session_lock);
+ session_info = diag_md_session_get_peripheral(APPS_DATA);
+ COPY_USER_SPACE_OR_ERR(buf, data_type, 4);
+ if (ret == -EFAULT) {
+ mutex_unlock(&driver->md_session_lock);
+ goto exit;
+ }
+ if (session_info && session_info->event_mask &&
+ session_info->event_mask->ptr) {
+ COPY_USER_SPACE_OR_ERR(buf + sizeof(int),
+ *(session_info->event_mask->ptr),
+ session_info->event_mask->mask_len);
+ if (ret == -EFAULT) {
+ mutex_unlock(&driver->md_session_lock);
+ goto exit;
+ }
+ } else {
+ COPY_USER_SPACE_OR_ERR(buf + sizeof(int),
+ *(event_mask.ptr),
+ event_mask.mask_len);
+ if (ret == -EFAULT) {
+ mutex_unlock(&driver->md_session_lock);
+ goto exit;
+ }
+ }
+ mutex_unlock(&driver->md_session_lock);
+ driver->data_ready[index] ^= EVENT_MASKS_TYPE;
+ atomic_dec(&driver->data_ready_notif[index]);
+ goto exit;
+ }
+
+ if (driver->data_ready[index] & LOG_MASKS_TYPE) {
+ /*Copy the type of data being passed*/
+ data_type = driver->data_ready[index] & LOG_MASKS_TYPE;
+ mutex_lock(&driver->md_session_lock);
+ session_info = diag_md_session_get_peripheral(APPS_DATA);
+ COPY_USER_SPACE_OR_ERR(buf, data_type, sizeof(int));
+ if (ret == -EFAULT) {
+ mutex_unlock(&driver->md_session_lock);
+ goto exit;
+ }
+ write_len = diag_copy_to_user_log_mask(buf + ret, count,
+ session_info);
+ mutex_unlock(&driver->md_session_lock);
+ if (write_len > 0)
+ ret += write_len;
+ driver->data_ready[index] ^= LOG_MASKS_TYPE;
+ atomic_dec(&driver->data_ready_notif[index]);
+ goto exit;
+ }
+
+ if (driver->data_ready[index] & PKT_TYPE) {
+ /*Copy the type of data being passed*/
+ data_type = driver->data_ready[index] & PKT_TYPE;
+ COPY_USER_SPACE_OR_EXIT(buf, data_type, sizeof(data_type));
+ COPY_USER_SPACE_OR_EXIT(buf + sizeof(data_type),
+ *(driver->apps_req_buf),
+ driver->apps_req_buf_len);
+ driver->data_ready[index] ^= PKT_TYPE;
+ atomic_dec(&driver->data_ready_notif[index]);
+ driver->in_busy_pktdata = 0;
+ goto exit;
+ }
+
+ if (driver->data_ready[index] & DCI_PKT_TYPE) {
+ /* Copy the type of data being passed */
+ data_type = driver->data_ready[index] & DCI_PKT_TYPE;
+ COPY_USER_SPACE_OR_EXIT(buf, data_type, 4);
+ COPY_USER_SPACE_OR_EXIT(buf+4, *(driver->dci_pkt_buf),
+ driver->dci_pkt_length);
+ driver->data_ready[index] ^= DCI_PKT_TYPE;
+ atomic_dec(&driver->data_ready_notif[index]);
+ driver->in_busy_dcipktdata = 0;
+ goto exit;
+ }
+
+ if (driver->data_ready[index] & DCI_EVENT_MASKS_TYPE) {
+ /*Copy the type of data being passed*/
+ data_type = driver->data_ready[index] & DCI_EVENT_MASKS_TYPE;
+ COPY_USER_SPACE_OR_EXIT(buf, data_type, 4);
+ COPY_USER_SPACE_OR_EXIT(buf+4, driver->num_dci_client, 4);
+ COPY_USER_SPACE_OR_EXIT(buf + 8, (dci_ops_tbl[DCI_LOCAL_PROC].
+ event_mask_composite), DCI_EVENT_MASK_SIZE);
+ driver->data_ready[index] ^= DCI_EVENT_MASKS_TYPE;
+ atomic_dec(&driver->data_ready_notif[index]);
+ goto exit;
+ }
+
+ if (driver->data_ready[index] & DCI_LOG_MASKS_TYPE) {
+ /*Copy the type of data being passed*/
+ data_type = driver->data_ready[index] & DCI_LOG_MASKS_TYPE;
+ COPY_USER_SPACE_OR_EXIT(buf, data_type, 4);
+ COPY_USER_SPACE_OR_EXIT(buf+4, driver->num_dci_client, 4);
+ COPY_USER_SPACE_OR_EXIT(buf+8, (dci_ops_tbl[DCI_LOCAL_PROC].
+ log_mask_composite), DCI_LOG_MASK_SIZE);
+ driver->data_ready[index] ^= DCI_LOG_MASKS_TYPE;
+ atomic_dec(&driver->data_ready_notif[index]);
+ goto exit;
+ }
+
+exit:
+ if (driver->data_ready[index] & DCI_DATA_TYPE) {
+ data_type = driver->data_ready[index] & DCI_DATA_TYPE;
+ mutex_unlock(&driver->diagchar_mutex);
+ /* Copy the type of data being passed */
+ mutex_lock(&driver->dci_mutex);
+ list_for_each_safe(start, temp, &driver->dci_client_list) {
+ entry = list_entry(start, struct diag_dci_client_tbl,
+ track);
+ if (entry->client->tgid != current->tgid)
+ continue;
+ if (!entry->in_service)
+ continue;
+ if (copy_to_user(buf + ret, &data_type, sizeof(int))) {
+ mutex_unlock(&driver->dci_mutex);
+ goto end;
+ }
+ ret += sizeof(int);
+ if (copy_to_user(buf + ret, &entry->client_info.token,
+ sizeof(int))) {
+ mutex_unlock(&driver->dci_mutex);
+ goto end;
+ }
+ ret += sizeof(int);
+ copy_dci_data = 1;
+ exit_stat = diag_copy_dci(buf, count, entry, &ret);
+ mutex_lock(&driver->diagchar_mutex);
+ driver->data_ready[index] ^= DCI_DATA_TYPE;
+ atomic_dec(&driver->data_ready_notif[index]);
+ mutex_unlock(&driver->diagchar_mutex);
+ if (exit_stat == 1) {
+ mutex_unlock(&driver->dci_mutex);
+ goto end;
+ }
+ }
+ mutex_unlock(&driver->dci_mutex);
+ goto end;
+ }
+ mutex_unlock(&driver->diagchar_mutex);
+end:
+ /*
+ * Flush any read that is currently pending on DCI data and
+ * command channnels. This will ensure that the next read is not
+ * missed.
+ */
+ if (copy_dci_data) {
+ diag_ws_on_copy_complete(DIAG_WS_DCI);
+ flush_workqueue(driver->diag_dci_wq);
+ }
+ return ret;
+}
+
+static ssize_t diagchar_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ int err = 0;
+ int pkt_type = 0;
+ int payload_len = 0;
+ const char __user *payload_buf = NULL;
+
+ /*
+ * The data coming from the user sapce should at least have the
+ * packet type heeader.
+ */
+ if (count < sizeof(int)) {
+ pr_err("diag: In %s, client is sending short data, len: %d\n",
+ __func__, (int)count);
+ return -EBADMSG;
+ }
+
+ err = copy_from_user((&pkt_type), buf, sizeof(int));
+ if (err) {
+ pr_err_ratelimited("diag: In %s, unable to copy pkt_type from userspace, err: %d\n",
+ __func__, err);
+ return -EIO;
+ }
+
+ if (driver->logging_mode == DIAG_USB_MODE && !driver->usb_connected) {
+ if (!((pkt_type == DCI_DATA_TYPE) ||
+ (pkt_type == DCI_PKT_TYPE) ||
+ (pkt_type & DATA_TYPE_DCI_LOG) ||
+ (pkt_type & DATA_TYPE_DCI_EVENT))) {
+ pr_debug("diag: In %s, Dropping non DCI packet type\n",
+ __func__);
+ return -EIO;
+ }
+ }
+
+ payload_buf = buf + sizeof(int);
+ payload_len = count - sizeof(int);
+
+ if (pkt_type == DCI_PKT_TYPE)
+ return diag_user_process_dci_apps_data(payload_buf,
+ payload_len,
+ pkt_type);
+ else if (pkt_type == DCI_DATA_TYPE)
+ return diag_user_process_dci_data(payload_buf, payload_len);
+ else if (pkt_type == USER_SPACE_RAW_DATA_TYPE)
+ return diag_user_process_raw_data(payload_buf,
+ payload_len);
+ else if (pkt_type == USER_SPACE_DATA_TYPE)
+ return diag_user_process_userspace_data(payload_buf,
+ payload_len);
+ if (pkt_type & (DATA_TYPE_DCI_LOG | DATA_TYPE_DCI_EVENT)) {
+ err = diag_user_process_dci_apps_data(payload_buf, payload_len,
+ pkt_type);
+ if (pkt_type & DATA_TYPE_DCI_LOG)
+ pkt_type ^= DATA_TYPE_DCI_LOG;
+ if (pkt_type & DATA_TYPE_DCI_EVENT)
+ pkt_type ^= DATA_TYPE_DCI_EVENT;
+ /*
+ * Check if the log or event is selected even on the regular
+ * stream. If USB is not connected and we are not in memory
+ * device mode, we should not process these logs/events.
+ */
+ if (pkt_type && driver->logging_mode == DIAG_USB_MODE &&
+ !driver->usb_connected)
+ return err;
+ }
+
+ switch (pkt_type) {
+ case DATA_TYPE_EVENT:
+ case DATA_TYPE_F3:
+ case DATA_TYPE_LOG:
+ case DATA_TYPE_DELAYED_RESPONSE:
+ case DATA_TYPE_RESPONSE:
+ return diag_user_process_apps_data(payload_buf, payload_len,
+ pkt_type);
+ default:
+ pr_err_ratelimited("diag: In %s, invalid pkt_type: %d\n",
+ __func__, pkt_type);
+ return -EINVAL;
+ }
+
+ return err;
+}
+
+void diag_ws_init()
+{
+ driver->dci_ws.ref_count = 0;
+ driver->dci_ws.copy_count = 0;
+ spin_lock_init(&driver->dci_ws.lock);
+
+ driver->md_ws.ref_count = 0;
+ driver->md_ws.copy_count = 0;
+ spin_lock_init(&driver->md_ws.lock);
+}
+
+static void diag_stats_init(void)
+{
+ if (!driver)
+ return;
+
+ driver->msg_stats.alloc_count = 0;
+ driver->msg_stats.drop_count = 0;
+
+ driver->log_stats.alloc_count = 0;
+ driver->log_stats.drop_count = 0;
+
+ driver->event_stats.alloc_count = 0;
+ driver->event_stats.drop_count = 0;
+}
+
+void diag_ws_on_notify()
+{
+ /*
+ * Do not deal with reference count here as there can be spurious
+ * interrupts.
+ */
+ pm_stay_awake(driver->diag_dev);
+}
+
+void diag_ws_on_read(int type, int pkt_len)
+{
+ unsigned long flags;
+ struct diag_ws_ref_t *ws_ref = NULL;
+
+ switch (type) {
+ case DIAG_WS_DCI:
+ ws_ref = &driver->dci_ws;
+ break;
+ case DIAG_WS_MUX:
+ ws_ref = &driver->md_ws;
+ break;
+ default:
+ pr_err_ratelimited("diag: In %s, invalid type: %d\n",
+ __func__, type);
+ return;
+ }
+
+ spin_lock_irqsave(&ws_ref->lock, flags);
+ if (pkt_len > 0) {
+ ws_ref->ref_count++;
+ } else {
+ if (ws_ref->ref_count < 1) {
+ ws_ref->ref_count = 0;
+ ws_ref->copy_count = 0;
+ }
+ diag_ws_release();
+ }
+ spin_unlock_irqrestore(&ws_ref->lock, flags);
+}
+
+
+void diag_ws_on_copy(int type)
+{
+ unsigned long flags;
+ struct diag_ws_ref_t *ws_ref = NULL;
+
+ switch (type) {
+ case DIAG_WS_DCI:
+ ws_ref = &driver->dci_ws;
+ break;
+ case DIAG_WS_MUX:
+ ws_ref = &driver->md_ws;
+ break;
+ default:
+ pr_err_ratelimited("diag: In %s, invalid type: %d\n",
+ __func__, type);
+ return;
+ }
+
+ spin_lock_irqsave(&ws_ref->lock, flags);
+ ws_ref->copy_count++;
+ spin_unlock_irqrestore(&ws_ref->lock, flags);
+}
+
+void diag_ws_on_copy_fail(int type)
+{
+ unsigned long flags;
+ struct diag_ws_ref_t *ws_ref = NULL;
+
+ switch (type) {
+ case DIAG_WS_DCI:
+ ws_ref = &driver->dci_ws;
+ break;
+ case DIAG_WS_MUX:
+ ws_ref = &driver->md_ws;
+ break;
+ default:
+ pr_err_ratelimited("diag: In %s, invalid type: %d\n",
+ __func__, type);
+ return;
+ }
+
+ spin_lock_irqsave(&ws_ref->lock, flags);
+ ws_ref->ref_count--;
+ spin_unlock_irqrestore(&ws_ref->lock, flags);
+
+ diag_ws_release();
+}
+
+void diag_ws_on_copy_complete(int type)
+{
+ unsigned long flags;
+ struct diag_ws_ref_t *ws_ref = NULL;
+
+ switch (type) {
+ case DIAG_WS_DCI:
+ ws_ref = &driver->dci_ws;
+ break;
+ case DIAG_WS_MUX:
+ ws_ref = &driver->md_ws;
+ break;
+ default:
+ pr_err_ratelimited("diag: In %s, invalid type: %d\n",
+ __func__, type);
+ return;
+ }
+
+ spin_lock_irqsave(&ws_ref->lock, flags);
+ ws_ref->ref_count -= ws_ref->copy_count;
+ if (ws_ref->ref_count < 1)
+ ws_ref->ref_count = 0;
+ ws_ref->copy_count = 0;
+ spin_unlock_irqrestore(&ws_ref->lock, flags);
+
+ diag_ws_release();
+}
+
+void diag_ws_reset(int type)
+{
+ unsigned long flags;
+ struct diag_ws_ref_t *ws_ref = NULL;
+
+ switch (type) {
+ case DIAG_WS_DCI:
+ ws_ref = &driver->dci_ws;
+ break;
+ case DIAG_WS_MUX:
+ ws_ref = &driver->md_ws;
+ break;
+ default:
+ pr_err_ratelimited("diag: In %s, invalid type: %d\n",
+ __func__, type);
+ return;
+ }
+
+ spin_lock_irqsave(&ws_ref->lock, flags);
+ ws_ref->ref_count = 0;
+ ws_ref->copy_count = 0;
+ spin_unlock_irqrestore(&ws_ref->lock, flags);
+
+ diag_ws_release();
+}
+
+void diag_ws_release()
+{
+ if (driver->dci_ws.ref_count == 0 && driver->md_ws.ref_count == 0)
+ pm_relax(driver->diag_dev);
+}
+
+#ifdef CONFIG_IPC_LOGGING
+static void diag_debug_init(void)
+{
+ diag_ipc_log = ipc_log_context_create(DIAG_IPC_LOG_PAGES, "diag", 0);
+ if (!diag_ipc_log)
+ pr_err("diag: Failed to create IPC logging context\n");
+ /*
+ * Set the bit mask here as per diag_ipc_logging.h to enable debug logs
+ * to be logged to IPC
+ */
+ diag_debug_mask = DIAG_DEBUG_PERIPHERALS | DIAG_DEBUG_DCI |
+ DIAG_DEBUG_USERSPACE | DIAG_DEBUG_BRIDGE;
+}
+#else
+static void diag_debug_init(void)
+{
+
+}
+#endif
+
+static int diag_real_time_info_init(void)
+{
+ int i;
+ if (!driver)
+ return -EIO;
+ for (i = 0; i < DIAG_NUM_PROC; i++) {
+ driver->real_time_mode[i] = 1;
+ driver->proc_rt_vote_mask[i] |= DIAG_PROC_DCI;
+ driver->proc_rt_vote_mask[i] |= DIAG_PROC_MEMORY_DEVICE;
+ }
+ driver->real_time_update_busy = 0;
+ driver->proc_active_mask = 0;
+ driver->diag_real_time_wq = create_singlethread_workqueue(
+ "diag_real_time_wq");
+ if (!driver->diag_real_time_wq)
+ return -ENOMEM;
+ INIT_WORK(&(driver->diag_real_time_work), diag_real_time_work_fn);
+ mutex_init(&driver->real_time_mutex);
+ return 0;
+}
+
+static const struct file_operations diagcharfops = {
+ .owner = THIS_MODULE,
+ .read = diagchar_read,
+ .write = diagchar_write,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = diagchar_compat_ioctl,
+#endif
+ .unlocked_ioctl = diagchar_ioctl,
+ .open = diagchar_open,
+ .release = diagchar_close
+};
+
+static int diagchar_setup_cdev(dev_t devno)
+{
+
+ int err;
+
+ cdev_init(driver->cdev, &diagcharfops);
+
+ driver->cdev->owner = THIS_MODULE;
+ driver->cdev->ops = &diagcharfops;
+
+ err = cdev_add(driver->cdev, devno, 1);
+
+ if (err) {
+ printk(KERN_INFO "diagchar cdev registration failed !\n\n");
+ return -1;
+ }
+
+ driver->diagchar_class = class_create(THIS_MODULE, "diag");
+
+ if (IS_ERR(driver->diagchar_class)) {
+ printk(KERN_ERR "Error creating diagchar class.\n");
+ return -1;
+ }
+
+ driver->diag_dev = device_create(driver->diagchar_class, NULL, devno,
+ (void *)driver, "diag");
+
+ if (!driver->diag_dev)
+ return -EIO;
+
+ driver->diag_dev->power.wakeup = wakeup_source_register("DIAG_WS");
+ return 0;
+
+}
+
+static int diagchar_cleanup(void)
+{
+ if (driver) {
+ if (driver->cdev) {
+ /* TODO - Check if device exists before deleting */
+ device_destroy(driver->diagchar_class,
+ MKDEV(driver->major,
+ driver->minor_start));
+ cdev_del(driver->cdev);
+ }
+ if (!IS_ERR(driver->diagchar_class))
+ class_destroy(driver->diagchar_class);
+ kfree(driver);
+ }
+ return 0;
+}
+
+static int diag_mhi_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ if (!mhi_is_device_ready(&pdev->dev, "qcom,mhi"))
+ return -EPROBE_DEFER;
+ driver->pdev = pdev;
+ ret = diag_remote_init();
+ if (ret) {
+ diag_remote_exit();
+ return ret;
+ }
+ ret = diagfwd_bridge_init(true);
+ if (ret) {
+ diagfwd_bridge_exit();
+ return ret;
+ }
+ pr_debug("diag: mhi device is ready\n");
+ return 0;
+}
+
+static const struct of_device_id diag_mhi_table[] = {
+ {.compatible = "qcom,diag-mhi"},
+ {},
+};
+
+static struct platform_driver diag_mhi_driver = {
+ .probe = diag_mhi_probe,
+ .driver = {
+ .name = "DIAG MHI Platform",
+ .owner = THIS_MODULE,
+ .of_match_table = diag_mhi_table,
+ },
+};
+
+static int diagfwd_usb_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ driver->pdev = pdev;
+ ret = diag_remote_init();
+ if (ret) {
+ diag_remote_exit();
+ return ret;
+ }
+ ret = diagfwd_bridge_init(false);
+ if (ret) {
+ diagfwd_bridge_exit();
+ return ret;
+ }
+ pr_debug("diag: usb device is ready\n");
+ return 0;
+}
+
+static const struct of_device_id diagfwd_usb_table[] = {
+ {.compatible = "qcom,diagfwd-usb"},
+ {},
+};
+
+static struct platform_driver diagfwd_usb_driver = {
+ .probe = diagfwd_usb_probe,
+ .driver = {
+ .name = "DIAGFWD USB Platform",
+ .owner = THIS_MODULE,
+ .of_match_table = diagfwd_usb_table,
+ },
+};
+
+static int __init diagchar_init(void)
+{
+ dev_t dev;
+ int error, ret, i;
+
+ pr_debug("diagfwd initializing ..\n");
+ ret = 0;
+ driver = kzalloc(sizeof(struct diagchar_dev) + 5, GFP_KERNEL);
+ if (!driver)
+ return -ENOMEM;
+ kmemleak_not_leak(driver);
+
+ timer_in_progress = 0;
+ driver->delayed_rsp_id = 0;
+ driver->hdlc_disabled = 0;
+ driver->dci_state = DIAG_DCI_NO_ERROR;
+ setup_timer(&drain_timer, drain_timer_func, 1234);
+ driver->supports_sockets = 1;
+ driver->time_sync_enabled = 0;
+ driver->uses_time_api = 0;
+ driver->poolsize = poolsize;
+ driver->poolsize_hdlc = poolsize_hdlc;
+ driver->poolsize_dci = poolsize_dci;
+ driver->poolsize_user = poolsize_user;
+ /*
+ * POOL_TYPE_MUX_APPS is for the buffers in the Diag MUX layer.
+ * The number of buffers encompasses Diag data generated on
+ * the Apss processor + 1 for the responses generated exclusively on
+ * the Apps processor + data from data channels (4 channels per
+ * peripheral) + data from command channels (2)
+ */
+ diagmem_setsize(POOL_TYPE_MUX_APPS, itemsize_usb_apps,
+ poolsize_usb_apps + 1 + (NUM_PERIPHERALS * 6));
+ driver->num_clients = max_clients;
+ driver->logging_mode = DIAG_USB_MODE;
+ for (i = 0; i < NUM_UPD; i++) {
+ driver->pd_logging_mode[i] = 0;
+ driver->pd_session_clear[i] = 0;
+ }
+ driver->num_pd_session = 0;
+ driver->mask_check = 0;
+ driver->in_busy_pktdata = 0;
+ driver->in_busy_dcipktdata = 0;
+ driver->rsp_buf_ctxt = SET_BUF_CTXT(APPS_DATA, TYPE_CMD, 1);
+ hdlc_data.ctxt = SET_BUF_CTXT(APPS_DATA, TYPE_DATA, 1);
+ hdlc_data.len = 0;
+ non_hdlc_data.ctxt = SET_BUF_CTXT(APPS_DATA, TYPE_DATA, 1);
+ non_hdlc_data.len = 0;
+ mutex_init(&driver->hdlc_disable_mutex);
+ mutex_init(&driver->diagchar_mutex);
+ mutex_init(&driver->diag_maskclear_mutex);
+ mutex_init(&driver->diag_notifier_mutex);
+ mutex_init(&driver->diag_file_mutex);
+ mutex_init(&driver->delayed_rsp_mutex);
+ mutex_init(&apps_data_mutex);
+ mutex_init(&driver->msg_mask_lock);
+ mutex_init(&driver->hdlc_recovery_mutex);
+ for (i = 0; i < NUM_PERIPHERALS; i++)
+ mutex_init(&driver->diagfwd_channel_mutex[i]);
+ init_waitqueue_head(&driver->wait_q);
+ INIT_WORK(&(driver->diag_drain_work), diag_drain_work_fn);
+ INIT_WORK(&(driver->update_user_clients),
+ diag_update_user_client_work_fn);
+ INIT_WORK(&(driver->update_md_clients),
+ diag_update_md_client_work_fn);
+ diag_ws_init();
+ diag_stats_init();
+ diag_debug_init();
+ diag_md_session_init();
+
+ driver->incoming_pkt.capacity = DIAG_MAX_REQ_SIZE;
+ driver->incoming_pkt.data = kzalloc(DIAG_MAX_REQ_SIZE, GFP_KERNEL);
+ if (!driver->incoming_pkt.data)
+ goto fail;
+ kmemleak_not_leak(driver->incoming_pkt.data);
+ driver->incoming_pkt.processing = 0;
+ driver->incoming_pkt.read_len = 0;
+ driver->incoming_pkt.remaining = 0;
+ driver->incoming_pkt.total_len = 0;
+
+ ret = diag_real_time_info_init();
+ if (ret)
+ goto fail;
+ ret = diag_debugfs_init();
+ if (ret)
+ goto fail;
+ ret = diag_masks_init();
+ if (ret)
+ goto fail;
+ ret = diag_mux_init();
+ if (ret)
+ goto fail;
+ ret = diagfwd_init();
+ if (ret)
+ goto fail;
+ ret = diagfwd_cntl_init();
+ if (ret)
+ goto fail;
+ driver->dci_state = diag_dci_init();
+ ret = diagfwd_peripheral_init();
+ if (ret)
+ goto fail;
+ diagfwd_cntl_channel_init();
+ if (driver->dci_state == DIAG_DCI_NO_ERROR)
+ diag_dci_channel_init();
+ pr_debug("diagchar initializing ..\n");
+ driver->num = 1;
+ driver->name = ((void *)driver) + sizeof(struct diagchar_dev);
+ strlcpy(driver->name, "diag", 4);
+ /* Get major number from kernel and initialize */
+ error = alloc_chrdev_region(&dev, driver->minor_start,
+ driver->num, driver->name);
+ if (!error) {
+ driver->major = MAJOR(dev);
+ driver->minor_start = MINOR(dev);
+ } else {
+ pr_err("diag: Major number not allocated\n");
+ goto fail;
+ }
+ driver->cdev = cdev_alloc();
+ error = diagchar_setup_cdev(dev);
+ if (error)
+ goto fail;
+
+ pr_debug("diagchar initialized now");
+ platform_driver_register(&diag_mhi_driver);
+ platform_driver_register(&diagfwd_usb_driver);
+ return 0;
+
+fail:
+ pr_err("diagchar is not initialized, ret: %d\n", ret);
+ diag_debugfs_cleanup();
+ diagchar_cleanup();
+ diag_mux_exit();
+ diagfwd_peripheral_exit();
+ diagfwd_bridge_exit();
+ diagfwd_exit();
+ diagfwd_cntl_exit();
+ diag_dci_exit();
+ diag_masks_exit();
+ return -1;
+}
+
+static void diagchar_exit(void)
+{
+ printk(KERN_INFO "diagchar exiting ..\n");
+ diag_mempool_exit();
+ diag_mux_exit();
+ diagfwd_peripheral_exit();
+ diagfwd_exit();
+ diagfwd_cntl_exit();
+ diag_dci_exit();
+ diag_masks_exit();
+ diag_md_session_exit();
+ diag_remote_exit();
+ diag_debugfs_cleanup();
+ diagchar_cleanup();
+ printk(KERN_INFO "done diagchar exit\n");
+}
+
+module_init(diagchar_init);
+module_exit(diagchar_exit);
diff --git a/drivers/char/diag/diagchar_hdlc.c b/drivers/char/diag/diagchar_hdlc.c
new file mode 100644
index 000000000000..d265746ac4f4
--- /dev/null
+++ b/drivers/char/diag/diagchar_hdlc.c
@@ -0,0 +1,267 @@
+/* Copyright (c) 2008-2009, 2012-2014, The Linux Foundation.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/cdev.h>
+#include <linux/fs.h>
+#include <linux/device.h>
+#include <linux/uaccess.h>
+#include <linux/ratelimit.h>
+#include <linux/crc-ccitt.h>
+#include "diagchar_hdlc.h"
+#include "diagchar.h"
+
+
+MODULE_LICENSE("GPL v2");
+
+#define CRC_16_L_SEED 0xFFFF
+
+#define CRC_16_L_STEP(xx_crc, xx_c) \
+ crc_ccitt_byte(xx_crc, xx_c)
+
+void diag_hdlc_encode(struct diag_send_desc_type *src_desc,
+ struct diag_hdlc_dest_type *enc)
+{
+ uint8_t *dest;
+ uint8_t *dest_last;
+ const uint8_t *src;
+ const uint8_t *src_last;
+ uint16_t crc;
+ unsigned char src_byte = 0;
+ enum diag_send_state_enum_type state;
+ unsigned int used = 0;
+
+ if (src_desc && enc) {
+
+ /* Copy parts to local variables. */
+ src = src_desc->pkt;
+ src_last = src_desc->last;
+ state = src_desc->state;
+ dest = enc->dest;
+ dest_last = enc->dest_last;
+
+ if (state == DIAG_STATE_START) {
+ crc = CRC_16_L_SEED;
+ state++;
+ } else {
+ /* Get a local copy of the CRC */
+ crc = enc->crc;
+ }
+
+ /* dest or dest_last may be NULL to trigger a
+ state transition only */
+ if (dest && dest_last) {
+ /* This condition needs to include the possibility
+ of 2 dest bytes for an escaped byte */
+ while (src <= src_last && dest <= dest_last) {
+
+ src_byte = *src++;
+
+ if ((src_byte == CONTROL_CHAR) ||
+ (src_byte == ESC_CHAR)) {
+
+ /* If the escape character is not the
+ last byte */
+ if (dest != dest_last) {
+ crc = CRC_16_L_STEP(crc,
+ src_byte);
+
+ *dest++ = ESC_CHAR;
+ used++;
+
+ *dest++ = src_byte
+ ^ ESC_MASK;
+ used++;
+ } else {
+
+ src--;
+ break;
+ }
+
+ } else {
+ crc = CRC_16_L_STEP(crc, src_byte);
+ *dest++ = src_byte;
+ used++;
+ }
+ }
+
+ if (src > src_last) {
+
+ if (state == DIAG_STATE_BUSY) {
+ if (src_desc->terminate) {
+ crc = ~crc;
+ state++;
+ } else {
+ /* Done with fragment */
+ state = DIAG_STATE_COMPLETE;
+ }
+ }
+
+ while (dest <= dest_last &&
+ state >= DIAG_STATE_CRC1 &&
+ state < DIAG_STATE_TERM) {
+ /* Encode a byte of the CRC next */
+ src_byte = crc & 0xFF;
+
+ if ((src_byte == CONTROL_CHAR)
+ || (src_byte == ESC_CHAR)) {
+
+ if (dest != dest_last) {
+
+ *dest++ = ESC_CHAR;
+ used++;
+ *dest++ = src_byte ^
+ ESC_MASK;
+ used++;
+
+ crc >>= 8;
+ } else {
+
+ break;
+ }
+ } else {
+
+ crc >>= 8;
+ *dest++ = src_byte;
+ used++;
+ }
+
+ state++;
+ }
+
+ if (state == DIAG_STATE_TERM) {
+ if (dest_last >= dest) {
+ *dest++ = CONTROL_CHAR;
+ used++;
+ state++; /* Complete */
+ }
+ }
+ }
+ }
+ /* Copy local variables back into the encode structure. */
+
+ enc->dest = dest;
+ enc->dest_last = dest_last;
+ enc->crc = crc;
+ src_desc->pkt = src;
+ src_desc->last = src_last;
+ src_desc->state = state;
+ }
+
+ return;
+}
+
+
+int diag_hdlc_decode(struct diag_hdlc_decode_type *hdlc)
+{
+ uint8_t *src_ptr = NULL, *dest_ptr = NULL;
+ unsigned int src_length = 0, dest_length = 0;
+
+ unsigned int len = 0;
+ unsigned int i;
+ uint8_t src_byte;
+
+ int pkt_bnd = HDLC_INCOMPLETE;
+ int msg_start;
+
+ if (hdlc && hdlc->src_ptr && hdlc->dest_ptr &&
+ (hdlc->src_size > hdlc->src_idx) &&
+ (hdlc->dest_size > hdlc->dest_idx)) {
+
+ msg_start = (hdlc->src_idx == 0) ? 1 : 0;
+
+ src_ptr = hdlc->src_ptr;
+ src_ptr = &src_ptr[hdlc->src_idx];
+ src_length = hdlc->src_size - hdlc->src_idx;
+
+ dest_ptr = hdlc->dest_ptr;
+ dest_ptr = &dest_ptr[hdlc->dest_idx];
+ dest_length = hdlc->dest_size - hdlc->dest_idx;
+
+ for (i = 0; i < src_length; i++) {
+
+ src_byte = src_ptr[i];
+
+ if (hdlc->escaping) {
+ dest_ptr[len++] = src_byte ^ ESC_MASK;
+ hdlc->escaping = 0;
+ } else if (src_byte == ESC_CHAR) {
+ if (i == (src_length - 1)) {
+ hdlc->escaping = 1;
+ i++;
+ break;
+ } else {
+ dest_ptr[len++] = src_ptr[++i]
+ ^ ESC_MASK;
+ }
+ } else if (src_byte == CONTROL_CHAR) {
+ if (msg_start && i == 0 && src_length > 1)
+ continue;
+ /* Byte 0x7E will be considered
+ as end of packet */
+ dest_ptr[len++] = src_byte;
+ i++;
+ pkt_bnd = HDLC_COMPLETE;
+ break;
+ } else {
+ dest_ptr[len++] = src_byte;
+ }
+
+ if (len >= dest_length) {
+ i++;
+ break;
+ }
+ }
+
+ hdlc->src_idx += i;
+ hdlc->dest_idx += len;
+ }
+
+ return pkt_bnd;
+}
+
+int crc_check(uint8_t *buf, uint16_t len)
+{
+ uint16_t crc = CRC_16_L_SEED;
+ uint8_t sent_crc[2] = {0, 0};
+
+ /*
+ * The minimum length of a valid incoming packet is 4. 1 byte
+ * of data and 3 bytes for CRC
+ */
+ if (!buf || len < 4) {
+ pr_err_ratelimited("diag: In %s, invalid packet or length, buf: 0x%p, len: %d",
+ __func__, buf, len);
+ return -EIO;
+ }
+
+ /*
+ * Run CRC check for the original input. Skip the last 3 CRC
+ * bytes
+ */
+ crc = crc_ccitt(crc, buf, len-3);
+ crc ^= CRC_16_L_SEED;
+
+ /* Check the computed CRC against the original CRC bytes. */
+ sent_crc[0] = buf[len-3];
+ sent_crc[1] = buf[len-2];
+ if (crc != *((uint16_t *)sent_crc)) {
+ pr_debug("diag: In %s, crc mismatch. expected: %x, sent %x.\n",
+ __func__, crc, *((uint16_t *)sent_crc));
+ return -EIO;
+ }
+
+ return 0;
+}
diff --git a/drivers/char/diag/diagchar_hdlc.h b/drivers/char/diag/diagchar_hdlc.h
new file mode 100644
index 000000000000..1d88da2f07ae
--- /dev/null
+++ b/drivers/char/diag/diagchar_hdlc.h
@@ -0,0 +1,66 @@
+/* Copyright (c) 2008-2009, 2012-2014, The Linux Foundation.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DIAGCHAR_HDLC
+#define DIAGCHAR_HDLC
+
+enum diag_send_state_enum_type {
+ DIAG_STATE_START,
+ DIAG_STATE_BUSY,
+ DIAG_STATE_CRC1,
+ DIAG_STATE_CRC2,
+ DIAG_STATE_TERM,
+ DIAG_STATE_COMPLETE
+};
+
+struct diag_send_desc_type {
+ const void *pkt;
+ const void *last; /* Address of last byte to send. */
+ enum diag_send_state_enum_type state;
+ unsigned char terminate; /* True if this fragment
+ terminates the packet */
+};
+
+struct diag_hdlc_dest_type {
+ void *dest;
+ void *dest_last;
+ /* Below: internal use only */
+ uint16_t crc;
+};
+
+struct diag_hdlc_decode_type {
+ uint8_t *src_ptr;
+ unsigned int src_idx;
+ unsigned int src_size;
+ uint8_t *dest_ptr;
+ unsigned int dest_idx;
+ unsigned int dest_size;
+ int escaping;
+
+};
+
+void diag_hdlc_encode(struct diag_send_desc_type *src_desc,
+ struct diag_hdlc_dest_type *enc);
+
+int diag_hdlc_decode(struct diag_hdlc_decode_type *hdlc);
+
+int crc_check(uint8_t *buf, uint16_t len);
+
+#define ESC_CHAR 0x7D
+#define ESC_MASK 0x20
+
+#define HDLC_INCOMPLETE 0
+#define HDLC_COMPLETE 1
+
+#define HDLC_FOOTER_LEN 3
+#endif
diff --git a/drivers/char/diag/diagfwd.c b/drivers/char/diag/diagfwd.c
new file mode 100644
index 000000000000..ff024c2200c0
--- /dev/null
+++ b/drivers/char/diag/diagfwd.c
@@ -0,0 +1,1932 @@
+/* Copyright (c) 2008-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/sched.h>
+#include <linux/ratelimit.h>
+#include <linux/workqueue.h>
+#include <linux/pm_runtime.h>
+#include <linux/diagchar.h>
+#include <linux/delay.h>
+#include <linux/reboot.h>
+#include <linux/of.h>
+#include <linux/kmemleak.h>
+#ifdef CONFIG_DIAG_OVER_USB
+#include <linux/usb/usbdiag.h>
+#endif
+#include <soc/qcom/socinfo.h>
+#include <soc/qcom/restart.h>
+#include "diagmem.h"
+#include "diagchar.h"
+#include "diagfwd.h"
+#include "diagfwd_peripheral.h"
+#include "diagfwd_cntl.h"
+#include "diagchar_hdlc.h"
+#include "diag_dci.h"
+#include "diag_masks.h"
+#include "diag_usb.h"
+#include "diag_mux.h"
+#include "diag_ipc_logging.h"
+
+#define STM_CMD_VERSION_OFFSET 4
+#define STM_CMD_MASK_OFFSET 5
+#define STM_CMD_DATA_OFFSET 6
+#define STM_CMD_NUM_BYTES 7
+
+#define STM_RSP_SUPPORTED_INDEX 7
+#define STM_RSP_STATUS_INDEX 8
+#define STM_RSP_NUM_BYTES 9
+#define RETRY_MAX_COUNT 1000
+
+struct diag_md_hdlc_reset_work {
+ int pid;
+ struct work_struct work;
+};
+
+static int timestamp_switch;
+module_param(timestamp_switch, int, 0644);
+
+int wrap_enabled;
+uint16_t wrap_count;
+static struct diag_hdlc_decode_type *hdlc_decode;
+
+#define DIAG_NUM_COMMON_CMD 1
+static uint8_t common_cmds[DIAG_NUM_COMMON_CMD] = {
+ DIAG_CMD_LOG_ON_DMND
+};
+
+static uint8_t hdlc_timer_in_progress;
+
+/* Determine if this device uses a device tree */
+#ifdef CONFIG_OF
+static int has_device_tree(void)
+{
+ struct device_node *node;
+
+ node = of_find_node_by_path("/");
+ if (node) {
+ of_node_put(node);
+ return 1;
+ }
+ return 0;
+}
+#else
+static int has_device_tree(void)
+{
+ return 0;
+}
+#endif
+
+int chk_config_get_id(void)
+{
+ switch (socinfo_get_msm_cpu()) {
+ case MSM_CPU_8X60:
+ return APQ8060_TOOLS_ID;
+ case MSM_CPU_8960:
+ case MSM_CPU_8960AB:
+ return AO8960_TOOLS_ID;
+ case MSM_CPU_8064:
+ case MSM_CPU_8064AB:
+ case MSM_CPU_8064AA:
+ return APQ8064_TOOLS_ID;
+ case MSM_CPU_8930:
+ case MSM_CPU_8930AA:
+ case MSM_CPU_8930AB:
+ return MSM8930_TOOLS_ID;
+ case MSM_CPU_8974:
+ return MSM8974_TOOLS_ID;
+ case MSM_CPU_8625:
+ return MSM8625_TOOLS_ID;
+ case MSM_CPU_8084:
+ return APQ8084_TOOLS_ID;
+ case MSM_CPU_8916:
+ return MSM8916_TOOLS_ID;
+ case MSM_CPU_8939:
+ return MSM8939_TOOLS_ID;
+ case MSM_CPU_8994:
+ return MSM8994_TOOLS_ID;
+ case MSM_CPU_8226:
+ return APQ8026_TOOLS_ID;
+ case MSM_CPU_8909:
+ return MSM8909_TOOLS_ID;
+ case MSM_CPU_8992:
+ return MSM8992_TOOLS_ID;
+ case MSM_CPU_8996:
+ return MSM_8996_TOOLS_ID;
+ default:
+ if (driver->use_device_tree) {
+ if (machine_is_msm8974())
+ return MSM8974_TOOLS_ID;
+ else if (machine_is_apq8074())
+ return APQ8074_TOOLS_ID;
+ else
+ return 0;
+ } else {
+ return 0;
+ }
+ }
+}
+
+/*
+ * This will return TRUE for targets which support apps only mode and hence SSR.
+ * This applies to 8960 and newer targets.
+ */
+int chk_apps_only(void)
+{
+ if (driver->use_device_tree)
+ return 1;
+
+ switch (socinfo_get_msm_cpu()) {
+ case MSM_CPU_8960:
+ case MSM_CPU_8960AB:
+ case MSM_CPU_8064:
+ case MSM_CPU_8064AB:
+ case MSM_CPU_8064AA:
+ case MSM_CPU_8930:
+ case MSM_CPU_8930AA:
+ case MSM_CPU_8930AB:
+ case MSM_CPU_8627:
+ case MSM_CPU_9615:
+ case MSM_CPU_8974:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+/*
+ * This will return TRUE for targets which support apps as master.
+ * Thus, SW DLOAD and Mode Reset are supported on apps processor.
+ * This applies to 8960 and newer targets.
+ */
+int chk_apps_master(void)
+{
+ if (driver->use_device_tree)
+ return 1;
+ else
+ return 0;
+}
+
+int chk_polling_response(void)
+{
+ if (!(driver->polling_reg_flag) && chk_apps_master())
+ /*
+ * If the apps processor is master and no other processor
+ * has registered to respond for polling
+ */
+ return 1;
+ else if (!(driver->diagfwd_cntl[PERIPHERAL_MODEM] &&
+ driver->diagfwd_cntl[PERIPHERAL_MODEM]->ch_open) &&
+ (driver->feature[PERIPHERAL_MODEM].rcvd_feature_mask))
+ /*
+ * If the apps processor is not the master and the modem
+ * is not up or we did not receive the feature masks from Modem
+ */
+ return 1;
+ else
+ return 0;
+}
+
+/*
+ * This function should be called if you feel that the logging process may
+ * need to be woken up. For instance, if the logging mode is MEMORY_DEVICE MODE
+ * and while trying to read data from data channel there are no buffers
+ * available to read the data into, then this function should be called to
+ * determine if the logging process needs to be woken up.
+ */
+void chk_logging_wakeup(void)
+{
+ int i;
+ int j;
+ int pid = 0;
+
+ for (j = 0; j < NUM_MD_SESSIONS; j++) {
+ if (!driver->md_session_map[j])
+ continue;
+ pid = driver->md_session_map[j]->pid;
+
+ /* Find the index of the logging process */
+ for (i = 0; i < driver->num_clients; i++) {
+ if (driver->client_map[i].pid != pid)
+ continue;
+ if (driver->data_ready[i] & USER_SPACE_DATA_TYPE)
+ continue;
+ /*
+ * At very high logging rates a race condition can
+ * occur where the buffers containing the data from
+ * a channel are all in use, but the data_ready flag
+ * is cleared. In this case, the buffers never have
+ * their data read/logged. Detect and remedy this
+ * situation.
+ */
+ driver->data_ready[i] |= USER_SPACE_DATA_TYPE;
+ atomic_inc(&driver->data_ready_notif[i]);
+ pr_debug("diag: Force wakeup of logging process\n");
+ wake_up_interruptible(&driver->wait_q);
+ break;
+ }
+ /*
+ * Diag Memory Device is in normal. Check only for the first
+ * index as all the indices point to the same session
+ * structure.
+ */
+ if ((driver->md_session_mask == DIAG_CON_ALL) && (j == 0))
+ break;
+ }
+}
+
+static void pack_rsp_and_send(unsigned char *buf, int len,
+ int pid)
+{
+ int err;
+ int retry_count = 0, i, rsp_ctxt;
+ uint32_t write_len = 0;
+ unsigned long flags;
+ unsigned char *rsp_ptr = driver->encoded_rsp_buf;
+ struct diag_pkt_frame_t header;
+ struct diag_md_session_t *session_info = NULL, *info = NULL;
+
+ if (!rsp_ptr || !buf)
+ return;
+
+ if (len > DIAG_MAX_RSP_SIZE || len < 0) {
+ pr_err("diag: In %s, invalid len %d, permissible len %d\n",
+ __func__, len, DIAG_MAX_RSP_SIZE);
+ return;
+ }
+
+ mutex_lock(&driver->md_session_lock);
+ session_info = diag_md_session_get_pid(pid);
+ info = (session_info) ? session_info :
+ diag_md_session_get_peripheral(APPS_DATA);
+
+ if (info && info->peripheral_mask) {
+ for (i = 0; i < NUM_MD_SESSIONS; i++) {
+ if (info->peripheral_mask & (1 << i))
+ break;
+ }
+ rsp_ctxt = SET_BUF_CTXT(i, TYPE_CMD, TYPE_CMD);
+ } else
+ rsp_ctxt = driver->rsp_buf_ctxt;
+ mutex_unlock(&driver->md_session_lock);
+
+ /*
+ * Keep trying till we get the buffer back. It should probably
+ * take one or two iterations. When this loops till RETRY_MAX_COUNT, it
+ * means we did not get a write complete for the previous
+ * response.
+ */
+ while (retry_count < RETRY_MAX_COUNT) {
+ if (!driver->rsp_buf_busy)
+ break;
+ /*
+ * Wait for sometime and try again. The value 10000 was chosen
+ * empirically as an optimum value for USB to complete a write
+ */
+ usleep_range(10000, 10100);
+ retry_count++;
+
+ /*
+ * There can be a race conditon that clears the data ready flag
+ * for responses. Make sure we don't miss previous wakeups for
+ * draining responses when we are in Memory Device Mode.
+ */
+ if (driver->logging_mode == DIAG_MEMORY_DEVICE_MODE ||
+ driver->logging_mode == DIAG_MULTI_MODE) {
+ mutex_lock(&driver->md_session_lock);
+ chk_logging_wakeup();
+ mutex_unlock(&driver->md_session_lock);
+ }
+ }
+ if (driver->rsp_buf_busy) {
+ pr_err("diag: unable to get hold of response buffer\n");
+ return;
+ }
+
+ driver->rsp_buf_busy = 1;
+ header.start = CONTROL_CHAR;
+ header.version = 1;
+ header.length = len;
+ memcpy(rsp_ptr, &header, sizeof(header));
+ write_len += sizeof(header);
+ memcpy(rsp_ptr + write_len, buf, len);
+ write_len += len;
+ *(uint8_t *)(rsp_ptr + write_len) = CONTROL_CHAR;
+ write_len += sizeof(uint8_t);
+
+ err = diag_mux_write(DIAG_LOCAL_PROC, rsp_ptr, write_len, rsp_ctxt);
+ if (err) {
+ pr_err("diag: In %s, unable to write to mux, err: %d\n",
+ __func__, err);
+ spin_lock_irqsave(&driver->rsp_buf_busy_lock, flags);
+ driver->rsp_buf_busy = 0;
+ spin_unlock_irqrestore(&driver->rsp_buf_busy_lock, flags);
+ }
+}
+
+static void encode_rsp_and_send(unsigned char *buf, int len,
+ int pid)
+{
+ struct diag_send_desc_type send = { NULL, NULL, DIAG_STATE_START, 0 };
+ struct diag_hdlc_dest_type enc = { NULL, NULL, 0 };
+ unsigned char *rsp_ptr = driver->encoded_rsp_buf;
+ int err, i, rsp_ctxt, retry_count = 0;
+ unsigned long flags;
+ struct diag_md_session_t *session_info = NULL, *info = NULL;
+
+ if (!rsp_ptr || !buf)
+ return;
+
+ if (len > DIAG_MAX_RSP_SIZE || len < 0) {
+ pr_err("diag: In %s, invalid len %d, permissible len %d\n",
+ __func__, len, DIAG_MAX_RSP_SIZE);
+ return;
+ }
+
+ mutex_lock(&driver->md_session_lock);
+ session_info = diag_md_session_get_pid(pid);
+ info = (session_info) ? session_info :
+ diag_md_session_get_peripheral(APPS_DATA);
+
+ if (info && info->peripheral_mask) {
+ for (i = 0; i < NUM_MD_SESSIONS; i++) {
+ if (info->peripheral_mask & (1 << i))
+ break;
+ }
+ rsp_ctxt = SET_BUF_CTXT(i, TYPE_CMD, TYPE_CMD);
+ } else
+ rsp_ctxt = driver->rsp_buf_ctxt;
+ mutex_unlock(&driver->md_session_lock);
+ /*
+ * Keep trying till we get the buffer back. It should probably
+ * take one or two iterations. When this loops till RETRY_MAX_COUNT, it
+ * means we did not get a write complete for the previous
+ * response.
+ */
+ while (retry_count < RETRY_MAX_COUNT) {
+ if (!driver->rsp_buf_busy)
+ break;
+ /*
+ * Wait for sometime and try again. The value 10000 was chosen
+ * empirically as an optimum value for USB to complete a write
+ */
+ usleep_range(10000, 10100);
+ retry_count++;
+
+ /*
+ * There can be a race conditon that clears the data ready flag
+ * for responses. Make sure we don't miss previous wakeups for
+ * draining responses when we are in Memory Device Mode.
+ */
+ if (driver->logging_mode == DIAG_MEMORY_DEVICE_MODE ||
+ driver->logging_mode == DIAG_MULTI_MODE) {
+ mutex_lock(&driver->md_session_lock);
+ chk_logging_wakeup();
+ mutex_unlock(&driver->md_session_lock);
+ }
+ }
+
+ if (driver->rsp_buf_busy) {
+ pr_err("diag: unable to get hold of response buffer\n");
+ return;
+ }
+
+ spin_lock_irqsave(&driver->rsp_buf_busy_lock, flags);
+ driver->rsp_buf_busy = 1;
+ spin_unlock_irqrestore(&driver->rsp_buf_busy_lock, flags);
+ send.state = DIAG_STATE_START;
+ send.pkt = buf;
+ send.last = (void *)(buf + len - 1);
+ send.terminate = 1;
+ enc.dest = rsp_ptr;
+ enc.dest_last = (void *)(rsp_ptr + DIAG_MAX_HDLC_BUF_SIZE - 1);
+ diag_hdlc_encode(&send, &enc);
+ driver->encoded_rsp_len = (int)(enc.dest - (void *)rsp_ptr);
+ err = diag_mux_write(DIAG_LOCAL_PROC, rsp_ptr, driver->encoded_rsp_len,
+ rsp_ctxt);
+ if (err) {
+ pr_err("diag: In %s, Unable to write to device, err: %d\n",
+ __func__, err);
+ spin_lock_irqsave(&driver->rsp_buf_busy_lock, flags);
+ driver->rsp_buf_busy = 0;
+ spin_unlock_irqrestore(&driver->rsp_buf_busy_lock, flags);
+ }
+ memset(buf, '\0', DIAG_MAX_RSP_SIZE);
+}
+
+static void diag_send_rsp(unsigned char *buf, int len, int pid)
+{
+ struct diag_md_session_t *session_info = NULL, *info = NULL;
+ uint8_t hdlc_disabled;
+
+ mutex_lock(&driver->md_session_lock);
+ info = diag_md_session_get_pid(pid);
+ session_info = (info) ? info :
+ diag_md_session_get_peripheral(APPS_DATA);
+ if (session_info)
+ hdlc_disabled = session_info->hdlc_disabled;
+ else
+ hdlc_disabled = driver->hdlc_disabled;
+ mutex_unlock(&driver->md_session_lock);
+
+ if (hdlc_disabled)
+ pack_rsp_and_send(buf, len, pid);
+ else
+ encode_rsp_and_send(buf, len, pid);
+}
+
+void diag_update_pkt_buffer(unsigned char *buf, uint32_t len, int type)
+{
+ unsigned char *ptr = NULL;
+ unsigned char *temp = buf;
+ int *in_busy = NULL;
+ uint32_t *length = NULL;
+ uint32_t max_len = 0;
+
+ if (!buf || len == 0) {
+ pr_err("diag: In %s, Invalid ptr %pK and length %d\n",
+ __func__, buf, len);
+ return;
+ }
+
+ switch (type) {
+ case PKT_TYPE:
+ ptr = driver->apps_req_buf;
+ length = &driver->apps_req_buf_len;
+ max_len = DIAG_MAX_REQ_SIZE;
+ in_busy = &driver->in_busy_pktdata;
+ break;
+ case DCI_PKT_TYPE:
+ ptr = driver->dci_pkt_buf;
+ length = &driver->dci_pkt_length;
+ max_len = DCI_BUF_SIZE;
+ in_busy = &driver->in_busy_dcipktdata;
+ break;
+ default:
+ pr_err("diag: Invalid type %d in %s\n", type, __func__);
+ return;
+ }
+
+ mutex_lock(&driver->diagchar_mutex);
+ if (CHK_OVERFLOW(ptr, ptr, ptr + max_len, len)) {
+ memcpy(ptr, temp , len);
+ *length = len;
+ *in_busy = 1;
+ } else {
+ pr_alert("diag: In %s, no space for response packet, len: %d, type: %d\n",
+ __func__, len, type);
+ }
+ mutex_unlock(&driver->diagchar_mutex);
+}
+
+void diag_update_userspace_clients(unsigned int type)
+{
+ int i;
+
+ mutex_lock(&driver->diagchar_mutex);
+ for (i = 0; i < driver->num_clients; i++)
+ if (driver->client_map[i].pid != 0 &&
+ !(driver->data_ready[i] & type)) {
+ driver->data_ready[i] |= type;
+ atomic_inc(&driver->data_ready_notif[i]);
+ }
+ wake_up_interruptible(&driver->wait_q);
+ mutex_unlock(&driver->diagchar_mutex);
+}
+
+void diag_update_md_clients(unsigned int type)
+{
+ int i, j;
+
+ mutex_lock(&driver->diagchar_mutex);
+ mutex_lock(&driver->md_session_lock);
+ for (i = 0; i < NUM_MD_SESSIONS; i++) {
+ if (driver->md_session_map[i] != NULL)
+ for (j = 0; j < driver->num_clients; j++) {
+ if (driver->client_map[j].pid != 0 &&
+ driver->client_map[j].pid ==
+ driver->md_session_map[i]->pid) {
+ if (!(driver->data_ready[j] & type)) {
+ driver->data_ready[j] |= type;
+ atomic_inc(
+ &driver->data_ready_notif[j]);
+ }
+ break;
+ }
+ }
+ }
+ mutex_unlock(&driver->md_session_lock);
+ wake_up_interruptible(&driver->wait_q);
+ mutex_unlock(&driver->diagchar_mutex);
+}
+void diag_update_sleeping_process(int process_id, int data_type)
+{
+ int i;
+
+ mutex_lock(&driver->diagchar_mutex);
+ for (i = 0; i < driver->num_clients; i++)
+ if (driver->client_map[i].pid == process_id) {
+ if (!(driver->data_ready[i] & data_type)) {
+ driver->data_ready[i] |= data_type;
+ atomic_inc(&driver->data_ready_notif[i]);
+ }
+ break;
+ }
+ wake_up_interruptible(&driver->wait_q);
+ mutex_unlock(&driver->diagchar_mutex);
+}
+
+static int diag_send_data(struct diag_cmd_reg_t *entry, unsigned char *buf,
+ int len)
+{
+ if (!entry)
+ return -EIO;
+
+ if (entry->proc == APPS_DATA) {
+ diag_update_pkt_buffer(buf, len, PKT_TYPE);
+ diag_update_sleeping_process(entry->pid, PKT_TYPE);
+ return 0;
+ }
+
+ return diagfwd_write(entry->proc, TYPE_CMD, buf, len);
+}
+
+void diag_process_stm_mask(uint8_t cmd, uint8_t data_mask, int data_type)
+{
+ int status = 0;
+ if (data_type >= PERIPHERAL_MODEM && data_type <= PERIPHERAL_SENSORS) {
+ if (driver->feature[data_type].stm_support) {
+ status = diag_send_stm_state(data_type, cmd);
+ if (status == 0)
+ driver->stm_state[data_type] = cmd;
+ }
+ driver->stm_state_requested[data_type] = cmd;
+ } else if (data_type == APPS_DATA) {
+ driver->stm_state[data_type] = cmd;
+ driver->stm_state_requested[data_type] = cmd;
+ }
+}
+
+int diag_process_stm_cmd(unsigned char *buf, unsigned char *dest_buf)
+{
+ uint8_t version, mask, cmd;
+ uint8_t rsp_supported = 0;
+ uint8_t rsp_status = 0;
+ int i;
+
+ if (!buf || !dest_buf) {
+ pr_err("diag: Invalid pointers buf: %pK, dest_buf %pK in %s\n",
+ buf, dest_buf, __func__);
+ return -EIO;
+ }
+
+ version = *(buf + STM_CMD_VERSION_OFFSET);
+ mask = *(buf + STM_CMD_MASK_OFFSET);
+ cmd = *(buf + STM_CMD_DATA_OFFSET);
+
+ /*
+ * Check if command is valid. If the command is asking for
+ * status, then the processor mask field is to be ignored.
+ */
+ if ((version != 2) || (cmd > STATUS_STM) ||
+ ((cmd != STATUS_STM) && ((mask == 0) || (0 != (mask >> 4))))) {
+ /* Command is invalid. Send bad param message response */
+ dest_buf[0] = BAD_PARAM_RESPONSE_MESSAGE;
+ for (i = 0; i < STM_CMD_NUM_BYTES; i++)
+ dest_buf[i+1] = *(buf + i);
+ return STM_CMD_NUM_BYTES+1;
+ } else if (cmd != STATUS_STM) {
+ if (mask & DIAG_STM_MODEM)
+ diag_process_stm_mask(cmd, DIAG_STM_MODEM,
+ PERIPHERAL_MODEM);
+
+ if (mask & DIAG_STM_LPASS)
+ diag_process_stm_mask(cmd, DIAG_STM_LPASS,
+ PERIPHERAL_LPASS);
+
+ if (mask & DIAG_STM_WCNSS)
+ diag_process_stm_mask(cmd, DIAG_STM_WCNSS,
+ PERIPHERAL_WCNSS);
+
+ if (mask & DIAG_STM_SENSORS)
+ diag_process_stm_mask(cmd, DIAG_STM_SENSORS,
+ PERIPHERAL_SENSORS);
+ if (mask & DIAG_STM_WDSP)
+ diag_process_stm_mask(cmd, DIAG_STM_WDSP,
+ PERIPHERAL_WDSP);
+
+ if (mask & DIAG_STM_APPS)
+ diag_process_stm_mask(cmd, DIAG_STM_APPS, APPS_DATA);
+ }
+
+ for (i = 0; i < STM_CMD_NUM_BYTES; i++)
+ dest_buf[i] = *(buf + i);
+
+ /* Set mask denoting which peripherals support STM */
+ if (driver->feature[PERIPHERAL_MODEM].stm_support)
+ rsp_supported |= DIAG_STM_MODEM;
+
+ if (driver->feature[PERIPHERAL_LPASS].stm_support)
+ rsp_supported |= DIAG_STM_LPASS;
+
+ if (driver->feature[PERIPHERAL_WCNSS].stm_support)
+ rsp_supported |= DIAG_STM_WCNSS;
+
+ if (driver->feature[PERIPHERAL_SENSORS].stm_support)
+ rsp_supported |= DIAG_STM_SENSORS;
+
+ if (driver->feature[PERIPHERAL_WDSP].stm_support)
+ rsp_supported |= DIAG_STM_WDSP;
+
+ rsp_supported |= DIAG_STM_APPS;
+
+ /* Set mask denoting STM state/status for each peripheral/APSS */
+ if (driver->stm_state[PERIPHERAL_MODEM])
+ rsp_status |= DIAG_STM_MODEM;
+
+ if (driver->stm_state[PERIPHERAL_LPASS])
+ rsp_status |= DIAG_STM_LPASS;
+
+ if (driver->stm_state[PERIPHERAL_WCNSS])
+ rsp_status |= DIAG_STM_WCNSS;
+
+ if (driver->stm_state[PERIPHERAL_SENSORS])
+ rsp_status |= DIAG_STM_SENSORS;
+
+ if (driver->stm_state[PERIPHERAL_WDSP])
+ rsp_status |= DIAG_STM_WDSP;
+
+ if (driver->stm_state[APPS_DATA])
+ rsp_status |= DIAG_STM_APPS;
+
+ dest_buf[STM_RSP_SUPPORTED_INDEX] = rsp_supported;
+ dest_buf[STM_RSP_STATUS_INDEX] = rsp_status;
+
+ return STM_RSP_NUM_BYTES;
+}
+
+int diag_process_time_sync_query_cmd(unsigned char *src_buf, int src_len,
+ unsigned char *dest_buf, int dest_len)
+{
+ int write_len = 0;
+ struct diag_cmd_time_sync_query_req_t *req = NULL;
+ struct diag_cmd_time_sync_query_rsp_t rsp;
+
+ if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0) {
+ pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d",
+ __func__, src_buf, src_len, dest_buf, dest_len);
+ return -EINVAL;
+ }
+
+ req = (struct diag_cmd_time_sync_query_req_t *)src_buf;
+ rsp.header.cmd_code = req->header.cmd_code;
+ rsp.header.subsys_id = req->header.subsys_id;
+ rsp.header.subsys_cmd_code = req->header.subsys_cmd_code;
+ rsp.version = req->version;
+ rsp.time_api = driver->uses_time_api;
+ memcpy(dest_buf, &rsp, sizeof(rsp));
+ write_len = sizeof(rsp);
+ return write_len;
+}
+
+int diag_process_time_sync_switch_cmd(unsigned char *src_buf, int src_len,
+ unsigned char *dest_buf, int dest_len)
+{
+ uint8_t peripheral, status = 0;
+ struct diag_cmd_time_sync_switch_req_t *req = NULL;
+ struct diag_cmd_time_sync_switch_rsp_t rsp;
+ struct diag_ctrl_msg_time_sync time_sync_msg;
+ int msg_size = sizeof(struct diag_ctrl_msg_time_sync);
+ int err = 0, write_len = 0;
+
+ if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0) {
+ pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d",
+ __func__, src_buf, src_len, dest_buf, dest_len);
+ return -EINVAL;
+ }
+
+ req = (struct diag_cmd_time_sync_switch_req_t *)src_buf;
+ rsp.header.cmd_code = req->header.cmd_code;
+ rsp.header.subsys_id = req->header.subsys_id;
+ rsp.header.subsys_cmd_code = req->header.subsys_cmd_code;
+ rsp.version = req->version;
+ rsp.time_api = req->time_api;
+ if ((req->version > 1) || (req->time_api > 1) ||
+ (req->persist_time > 0)) {
+ dest_buf[0] = BAD_PARAM_RESPONSE_MESSAGE;
+ rsp.time_api_status = 0;
+ rsp.persist_time_status = PERSIST_TIME_NOT_SUPPORTED;
+ memcpy(dest_buf + 1, &rsp, sizeof(rsp));
+ write_len = sizeof(rsp) + 1;
+ timestamp_switch = 0;
+ return write_len;
+ }
+
+ time_sync_msg.ctrl_pkt_id = DIAG_CTRL_MSG_TIME_SYNC_PKT;
+ time_sync_msg.ctrl_pkt_data_len = 5;
+ time_sync_msg.version = 1;
+ time_sync_msg.time_api = req->time_api;
+
+ for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
+ err = diagfwd_write(peripheral, TYPE_CNTL, &time_sync_msg,
+ msg_size);
+ if (err && err != -ENODEV) {
+ pr_err("diag: In %s, unable to write to peripheral: %d, type: %d, len: %d, err: %d\n",
+ __func__, peripheral, TYPE_CNTL,
+ msg_size, err);
+ status |= (1 << peripheral);
+ }
+ }
+
+ driver->time_sync_enabled = 1;
+ driver->uses_time_api = req->time_api;
+
+ switch (req->time_api) {
+ case 0:
+ timestamp_switch = 0;
+ break;
+ case 1:
+ timestamp_switch = 1;
+ break;
+ default:
+ timestamp_switch = 0;
+ break;
+ }
+
+ rsp.time_api_status = status;
+ rsp.persist_time_status = PERSIST_TIME_NOT_SUPPORTED;
+ memcpy(dest_buf, &rsp, sizeof(rsp));
+ write_len = sizeof(rsp);
+ return write_len;
+}
+
+int diag_cmd_log_on_demand(unsigned char *src_buf, int src_len,
+ unsigned char *dest_buf, int dest_len)
+{
+ int write_len = 0;
+ struct diag_log_on_demand_rsp_t header;
+
+ if (!driver->diagfwd_cntl[PERIPHERAL_MODEM] ||
+ !driver->diagfwd_cntl[PERIPHERAL_MODEM]->ch_open ||
+ !driver->log_on_demand_support)
+ return 0;
+
+ if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0) {
+ pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d",
+ __func__, src_buf, src_len, dest_buf, dest_len);
+ return -EINVAL;
+ }
+
+ header.cmd_code = DIAG_CMD_LOG_ON_DMND;
+ header.log_code = *(uint16_t *)(src_buf + 1);
+ header.status = 1;
+ memcpy(dest_buf, &header, sizeof(struct diag_log_on_demand_rsp_t));
+ write_len += sizeof(struct diag_log_on_demand_rsp_t);
+
+ return write_len;
+}
+
+int diag_cmd_get_mobile_id(unsigned char *src_buf, int src_len,
+ unsigned char *dest_buf, int dest_len)
+{
+ int write_len = 0;
+ struct diag_pkt_header_t *header = NULL;
+ struct diag_cmd_ext_mobile_rsp_t rsp;
+
+ if (!src_buf || src_len != sizeof(*header) || !dest_buf ||
+ dest_len < sizeof(rsp))
+ return -EIO;
+
+ header = (struct diag_pkt_header_t *)src_buf;
+ rsp.header.cmd_code = header->cmd_code;
+ rsp.header.subsys_id = header->subsys_id;
+ rsp.header.subsys_cmd_code = header->subsys_cmd_code;
+ rsp.version = 2;
+ rsp.padding[0] = 0;
+ rsp.padding[1] = 0;
+ rsp.padding[2] = 0;
+ rsp.family = 0;
+ rsp.chip_id = (uint32_t)socinfo_get_id();
+
+ memcpy(dest_buf, &rsp, sizeof(rsp));
+ write_len += sizeof(rsp);
+
+ return write_len;
+}
+
+int diag_check_common_cmd(struct diag_pkt_header_t *header)
+{
+ int i;
+
+ if (!header)
+ return -EIO;
+
+ for (i = 0; i < DIAG_NUM_COMMON_CMD; i++) {
+ if (header->cmd_code == common_cmds[i])
+ return 1;
+ }
+
+ return 0;
+}
+
+static int diag_cmd_chk_stats(unsigned char *src_buf, int src_len,
+ unsigned char *dest_buf, int dest_len)
+{
+ int payload = 0;
+ int write_len = 0;
+ struct diag_pkt_header_t *header = NULL;
+ struct diag_cmd_stats_rsp_t rsp;
+
+ if (!src_buf || src_len < sizeof(struct diag_pkt_header_t) ||
+ !dest_buf || dest_len < sizeof(rsp))
+ return -EINVAL;
+
+ header = (struct diag_pkt_header_t *)src_buf;
+
+ if (header->cmd_code != DIAG_CMD_DIAG_SUBSYS ||
+ header->subsys_id != DIAG_SS_DIAG)
+ return -EINVAL;
+
+ switch (header->subsys_cmd_code) {
+ case DIAG_CMD_OP_GET_MSG_ALLOC:
+ payload = driver->msg_stats.alloc_count;
+ break;
+ case DIAG_CMD_OP_GET_MSG_DROP:
+ payload = driver->msg_stats.drop_count;
+ break;
+ case DIAG_CMD_OP_RESET_MSG_STATS:
+ diag_record_stats(DATA_TYPE_F3, PKT_RESET);
+ break;
+ case DIAG_CMD_OP_GET_LOG_ALLOC:
+ payload = driver->log_stats.alloc_count;
+ break;
+ case DIAG_CMD_OP_GET_LOG_DROP:
+ payload = driver->log_stats.drop_count;
+ break;
+ case DIAG_CMD_OP_RESET_LOG_STATS:
+ diag_record_stats(DATA_TYPE_LOG, PKT_RESET);
+ break;
+ case DIAG_CMD_OP_GET_EVENT_ALLOC:
+ payload = driver->event_stats.alloc_count;
+ break;
+ case DIAG_CMD_OP_GET_EVENT_DROP:
+ payload = driver->event_stats.drop_count;
+ break;
+ case DIAG_CMD_OP_RESET_EVENT_STATS:
+ diag_record_stats(DATA_TYPE_EVENT, PKT_RESET);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ memcpy(&rsp.header, header, sizeof(struct diag_pkt_header_t));
+ rsp.payload = payload;
+ write_len = sizeof(rsp);
+ memcpy(dest_buf, &rsp, sizeof(rsp));
+
+ return write_len;
+}
+
+static int diag_cmd_disable_hdlc(unsigned char *src_buf, int src_len,
+ unsigned char *dest_buf, int dest_len)
+{
+ struct diag_pkt_header_t *header = NULL;
+ struct diag_cmd_hdlc_disable_rsp_t rsp;
+ int write_len = 0;
+
+ if (!src_buf || src_len < sizeof(*header) ||
+ !dest_buf || dest_len < sizeof(rsp)) {
+ return -EIO;
+ }
+
+ header = (struct diag_pkt_header_t *)src_buf;
+ if (header->cmd_code != DIAG_CMD_DIAG_SUBSYS ||
+ header->subsys_id != DIAG_SS_DIAG ||
+ header->subsys_cmd_code != DIAG_CMD_OP_HDLC_DISABLE) {
+ return -EINVAL;
+ }
+
+ memcpy(&rsp.header, header, sizeof(struct diag_pkt_header_t));
+ rsp.framing_version = 1;
+ rsp.result = 0;
+ write_len = sizeof(rsp);
+ memcpy(dest_buf, &rsp, sizeof(rsp));
+
+ return write_len;
+}
+
+void diag_send_error_rsp(unsigned char *buf, int len,
+ int pid)
+{
+ /* -1 to accomodate the first byte 0x13 */
+ if (len > (DIAG_MAX_RSP_SIZE - 1)) {
+ pr_err("diag: cannot send err rsp, huge length: %d\n", len);
+ return;
+ }
+
+ *(uint8_t *)driver->apps_rsp_buf = DIAG_CMD_ERROR;
+ memcpy((driver->apps_rsp_buf + sizeof(uint8_t)), buf, len);
+ diag_send_rsp(driver->apps_rsp_buf, len + 1, pid);
+}
+
+int diag_process_apps_pkt(unsigned char *buf, int len, int pid)
+{
+ int i, p_mask = 0;
+ int mask_ret, peripheral = -EINVAL;
+ int write_len = 0;
+ unsigned char *temp = NULL;
+ struct diag_cmd_reg_entry_t entry;
+ struct diag_cmd_reg_entry_t *temp_entry = NULL;
+ struct diag_cmd_reg_t *reg_item = NULL;
+ struct diag_md_session_t *info = NULL;
+
+ if (!buf)
+ return -EIO;
+
+ /* Check if the command is a supported mask command */
+ mask_ret = diag_process_apps_masks(buf, len, pid);
+ if (mask_ret > 0) {
+ diag_send_rsp(driver->apps_rsp_buf, mask_ret, pid);
+ return 0;
+ }
+
+ temp = buf;
+ entry.cmd_code = (uint16_t)(*(uint8_t *)temp);
+ temp += sizeof(uint8_t);
+ entry.subsys_id = (uint16_t)(*(uint8_t *)temp);
+ temp += sizeof(uint8_t);
+ entry.cmd_code_hi = (uint16_t)(*(uint16_t *)temp);
+ entry.cmd_code_lo = (uint16_t)(*(uint16_t *)temp);
+ temp += sizeof(uint16_t);
+
+ pr_debug("diag: In %s, received cmd %02x %02x %02x\n",
+ __func__, entry.cmd_code, entry.subsys_id, entry.cmd_code_hi);
+
+ if (*buf == DIAG_CMD_LOG_ON_DMND && driver->log_on_demand_support &&
+ driver->feature[PERIPHERAL_MODEM].rcvd_feature_mask) {
+ write_len = diag_cmd_log_on_demand(buf, len,
+ driver->apps_rsp_buf,
+ DIAG_MAX_RSP_SIZE);
+ if (write_len > 0)
+ diag_send_rsp(driver->apps_rsp_buf, write_len, pid);
+ return 0;
+ }
+
+ mutex_lock(&driver->cmd_reg_mutex);
+ temp_entry = diag_cmd_search(&entry, ALL_PROC);
+ if (temp_entry) {
+ reg_item = container_of(temp_entry, struct diag_cmd_reg_t,
+ entry);
+ mutex_lock(&driver->md_session_lock);
+ info = diag_md_session_get_pid(pid);
+ if (info) {
+ p_mask = info->peripheral_mask;
+ mutex_unlock(&driver->md_session_lock);
+ if (MD_PERIPHERAL_MASK(reg_item->proc) & p_mask)
+ write_len = diag_send_data(reg_item, buf, len);
+ } else {
+ mutex_unlock(&driver->md_session_lock);
+ if (MD_PERIPHERAL_MASK(reg_item->proc) &
+ driver->logging_mask) {
+ mutex_unlock(&driver->cmd_reg_mutex);
+ diag_send_error_rsp(buf, len, pid);
+ return write_len;
+ }
+ else
+ write_len = diag_send_data(reg_item, buf, len);
+ }
+ mutex_unlock(&driver->cmd_reg_mutex);
+ return write_len;
+ }
+ mutex_unlock(&driver->cmd_reg_mutex);
+
+#if defined(CONFIG_DIAG_OVER_USB)
+ /* Check for the command/respond msg for the maximum packet length */
+ if ((*buf == 0x4b) && (*(buf+1) == 0x12) &&
+ (*(uint16_t *)(buf+2) == 0x0055)) {
+ for (i = 0; i < 4; i++)
+ *(driver->apps_rsp_buf+i) = *(buf+i);
+ *(uint32_t *)(driver->apps_rsp_buf+4) = DIAG_MAX_REQ_SIZE;
+ diag_send_rsp(driver->apps_rsp_buf, 8, pid);
+ return 0;
+ } else if ((*buf == 0x4b) && (*(buf+1) == 0x12) &&
+ (*(uint16_t *)(buf+2) == DIAG_DIAG_STM)) {
+ len = diag_process_stm_cmd(buf, driver->apps_rsp_buf);
+ if (len > 0) {
+ diag_send_rsp(driver->apps_rsp_buf, len, pid);
+ return 0;
+ }
+ return len;
+ }
+ /* Check for time sync query command */
+ else if ((*buf == DIAG_CMD_DIAG_SUBSYS) &&
+ (*(buf+1) == DIAG_SS_DIAG) &&
+ (*(uint16_t *)(buf+2) == DIAG_GET_TIME_API)) {
+ write_len = diag_process_time_sync_query_cmd(buf, len,
+ driver->apps_rsp_buf,
+ DIAG_MAX_RSP_SIZE);
+ if (write_len > 0)
+ diag_send_rsp(driver->apps_rsp_buf, write_len, pid);
+ return 0;
+ }
+ /* Check for time sync switch command */
+ else if ((*buf == DIAG_CMD_DIAG_SUBSYS) &&
+ (*(buf+1) == DIAG_SS_DIAG) &&
+ (*(uint16_t *)(buf+2) == DIAG_SET_TIME_API)) {
+ write_len = diag_process_time_sync_switch_cmd(buf, len,
+ driver->apps_rsp_buf,
+ DIAG_MAX_RSP_SIZE);
+ if (write_len > 0)
+ diag_send_rsp(driver->apps_rsp_buf, write_len, pid);
+ return 0;
+ }
+ /* Check for download command */
+ else if ((chk_apps_master()) && (*buf == 0x3A)) {
+ /* send response back */
+ driver->apps_rsp_buf[0] = *buf;
+ diag_send_rsp(driver->apps_rsp_buf, 1, pid);
+ msleep(5000);
+ /* call download API */
+ msm_set_restart_mode(RESTART_DLOAD);
+ printk(KERN_CRIT "diag: download mode set, Rebooting SoC..\n");
+ kernel_restart(NULL);
+ /* Not required, represents that command isnt sent to modem */
+ return 0;
+ }
+ /* Check for polling for Apps only DIAG */
+ else if ((*buf == 0x4b) && (*(buf+1) == 0x32) &&
+ (*(buf+2) == 0x03)) {
+ /* If no one has registered for polling */
+ if (chk_polling_response()) {
+ /* Respond to polling for Apps only DIAG */
+ for (i = 0; i < 3; i++)
+ driver->apps_rsp_buf[i] = *(buf+i);
+ for (i = 0; i < 13; i++)
+ driver->apps_rsp_buf[i+3] = 0;
+
+ diag_send_rsp(driver->apps_rsp_buf, 16, pid);
+ return 0;
+ }
+ }
+ /* Return the Delayed Response Wrap Status */
+ else if ((*buf == 0x4b) && (*(buf+1) == 0x32) &&
+ (*(buf+2) == 0x04) && (*(buf+3) == 0x0)) {
+ memcpy(driver->apps_rsp_buf, buf, 4);
+ driver->apps_rsp_buf[4] = wrap_enabled;
+ diag_send_rsp(driver->apps_rsp_buf, 5, pid);
+ return 0;
+ }
+ /* Wrap the Delayed Rsp ID */
+ else if ((*buf == 0x4b) && (*(buf+1) == 0x32) &&
+ (*(buf+2) == 0x05) && (*(buf+3) == 0x0)) {
+ wrap_enabled = true;
+ memcpy(driver->apps_rsp_buf, buf, 4);
+ driver->apps_rsp_buf[4] = wrap_count;
+ diag_send_rsp(driver->apps_rsp_buf, 6, pid);
+ return 0;
+ }
+ /* Mobile ID Rsp */
+ else if ((*buf == DIAG_CMD_DIAG_SUBSYS) &&
+ (*(buf+1) == DIAG_SS_PARAMS) &&
+ (*(buf+2) == DIAG_EXT_MOBILE_ID) && (*(buf+3) == 0x0)) {
+ write_len = diag_cmd_get_mobile_id(buf, len,
+ driver->apps_rsp_buf,
+ DIAG_MAX_RSP_SIZE);
+ if (write_len > 0) {
+ diag_send_rsp(driver->apps_rsp_buf, write_len, pid);
+ return 0;
+ }
+ }
+ /*
+ * If the apps processor is master and no other
+ * processor has registered for polling command.
+ * If modem is not up and we have not received feature
+ * mask update from modem, in that case APPS should
+ * respond for 0X7C command
+ */
+ else if (chk_apps_master() &&
+ !(driver->polling_reg_flag) &&
+ !(driver->diagfwd_cntl[PERIPHERAL_MODEM]->ch_open) &&
+ !(driver->feature[PERIPHERAL_MODEM].rcvd_feature_mask)) {
+ /* respond to 0x0 command */
+ if (*buf == 0x00) {
+ for (i = 0; i < 55; i++)
+ driver->apps_rsp_buf[i] = 0;
+
+ diag_send_rsp(driver->apps_rsp_buf, 55, pid);
+ return 0;
+ }
+ /* respond to 0x7c command */
+ else if (*buf == 0x7c) {
+ driver->apps_rsp_buf[0] = 0x7c;
+ for (i = 1; i < 8; i++)
+ driver->apps_rsp_buf[i] = 0;
+ /* Tools ID for APQ 8060 */
+ *(int *)(driver->apps_rsp_buf + 8) =
+ chk_config_get_id();
+ *(unsigned char *)(driver->apps_rsp_buf + 12) = '\0';
+ *(unsigned char *)(driver->apps_rsp_buf + 13) = '\0';
+ diag_send_rsp(driver->apps_rsp_buf, 14, pid);
+ return 0;
+ }
+ }
+ write_len = diag_cmd_chk_stats(buf, len, driver->apps_rsp_buf,
+ DIAG_MAX_RSP_SIZE);
+ if (write_len > 0) {
+ diag_send_rsp(driver->apps_rsp_buf, write_len, pid);
+ return 0;
+ }
+ write_len = diag_cmd_disable_hdlc(buf, len, driver->apps_rsp_buf,
+ DIAG_MAX_RSP_SIZE);
+ if (write_len > 0) {
+ /*
+ * This mutex lock is necessary since we need to drain all the
+ * pending buffers from peripherals which may be HDLC encoded
+ * before disabling HDLC encoding on Apps processor.
+ */
+ mutex_lock(&driver->hdlc_disable_mutex);
+ diag_send_rsp(driver->apps_rsp_buf, write_len, pid);
+ /*
+ * Set the value of hdlc_disabled after sending the response to
+ * the tools. This is required since the tools is expecting a
+ * HDLC encoded reponse for this request.
+ */
+ pr_debug("diag: In %s, disabling HDLC encoding\n",
+ __func__);
+ mutex_lock(&driver->md_session_lock);
+ info = diag_md_session_get_pid(pid);
+ if (info)
+ info->hdlc_disabled = 1;
+ else
+ driver->hdlc_disabled = 1;
+ peripheral =
+ diag_md_session_match_pid_peripheral(pid, 0);
+ for (i = 0; i < NUM_MD_SESSIONS; i++) {
+ if (peripheral > 0 && info) {
+ if (peripheral & (1 << i))
+ driver->p_hdlc_disabled[i] =
+ info->hdlc_disabled;
+ else if (!diag_md_session_get_peripheral(i))
+ driver->p_hdlc_disabled[i] =
+ driver->hdlc_disabled;
+ } else {
+ if (!diag_md_session_get_peripheral(i))
+ driver->p_hdlc_disabled[i] =
+ driver->hdlc_disabled;
+ }
+ }
+ mutex_unlock(&driver->md_session_lock);
+ diag_update_md_clients(HDLC_SUPPORT_TYPE);
+ mutex_unlock(&driver->hdlc_disable_mutex);
+ return 0;
+ }
+#endif
+
+ /* We have now come to the end of the function. */
+ if (chk_apps_only())
+ diag_send_error_rsp(buf, len, pid);
+
+ return 0;
+}
+
+void diag_process_hdlc_pkt(void *data, unsigned int len, int pid)
+{
+ int err = 0;
+ int ret = 0;
+
+ if (len > DIAG_MAX_HDLC_BUF_SIZE) {
+ pr_err("diag: In %s, invalid length: %d\n", __func__, len);
+ return;
+ }
+
+ mutex_lock(&driver->diag_hdlc_mutex);
+ pr_debug("diag: In %s, received packet of length: %d, req_buf_len: %d\n",
+ __func__, len, driver->hdlc_buf_len);
+
+ if (driver->hdlc_buf_len >= DIAG_MAX_REQ_SIZE) {
+ pr_err("diag: In %s, request length is more than supported len. Dropping packet.\n",
+ __func__);
+ goto fail;
+ }
+
+ hdlc_decode->dest_ptr = driver->hdlc_buf + driver->hdlc_buf_len;
+ hdlc_decode->dest_size = DIAG_MAX_HDLC_BUF_SIZE - driver->hdlc_buf_len;
+ hdlc_decode->src_ptr = data;
+ hdlc_decode->src_size = len;
+ hdlc_decode->src_idx = 0;
+ hdlc_decode->dest_idx = 0;
+
+ ret = diag_hdlc_decode(hdlc_decode);
+ /*
+ * driver->hdlc_buf is of size DIAG_MAX_HDLC_BUF_SIZE. But the decoded
+ * packet should be within DIAG_MAX_REQ_SIZE.
+ */
+ if (driver->hdlc_buf_len + hdlc_decode->dest_idx <= DIAG_MAX_REQ_SIZE) {
+ driver->hdlc_buf_len += hdlc_decode->dest_idx;
+ } else {
+ pr_err_ratelimited("diag: In %s, Dropping packet. pkt_size: %d, max: %d\n",
+ __func__,
+ driver->hdlc_buf_len + hdlc_decode->dest_idx,
+ DIAG_MAX_REQ_SIZE);
+ goto fail;
+ }
+
+ if (ret == HDLC_COMPLETE) {
+ err = crc_check(driver->hdlc_buf, driver->hdlc_buf_len);
+ if (err) {
+ /* CRC check failed. */
+ pr_err_ratelimited("diag: In %s, bad CRC. Dropping packet\n",
+ __func__);
+ goto fail;
+ }
+ driver->hdlc_buf_len -= HDLC_FOOTER_LEN;
+
+ if (driver->hdlc_buf_len < 1) {
+ pr_err_ratelimited("diag: In %s, message is too short, len: %d, dest len: %d\n",
+ __func__, driver->hdlc_buf_len,
+ hdlc_decode->dest_idx);
+ goto fail;
+ }
+
+ err = diag_process_apps_pkt(driver->hdlc_buf,
+ driver->hdlc_buf_len, pid);
+ if (err < 0)
+ goto fail;
+ } else {
+ goto end;
+ }
+
+ driver->hdlc_buf_len = 0;
+ mutex_unlock(&driver->diag_hdlc_mutex);
+ return;
+
+fail:
+ /*
+ * Tools needs to get a response in order to start its
+ * recovery algorithm. Send an error response if the
+ * packet is not in expected format.
+ */
+ diag_send_error_rsp(driver->hdlc_buf, driver->hdlc_buf_len, pid);
+ driver->hdlc_buf_len = 0;
+end:
+ mutex_unlock(&driver->diag_hdlc_mutex);
+}
+
+static int diagfwd_mux_open(int id, int mode)
+{
+ uint8_t i;
+ unsigned long flags;
+
+ switch (mode) {
+ case DIAG_USB_MODE:
+ driver->usb_connected = 1;
+ break;
+ case DIAG_MEMORY_DEVICE_MODE:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (driver->rsp_buf_busy) {
+ /*
+ * When a client switches from callback mode to USB mode
+ * explicitly, there can be a situation when the last response
+ * is not drained to the user space application. Reset the
+ * in_busy flag in this case.
+ */
+ spin_lock_irqsave(&driver->rsp_buf_busy_lock, flags);
+ driver->rsp_buf_busy = 0;
+ spin_unlock_irqrestore(&driver->rsp_buf_busy_lock, flags);
+ }
+ for (i = 0; i < NUM_PERIPHERALS; i++) {
+ diagfwd_open(i, TYPE_DATA);
+ diagfwd_open(i, TYPE_CMD);
+ }
+ queue_work(driver->diag_real_time_wq, &driver->diag_real_time_work);
+ return 0;
+}
+
+static int diagfwd_mux_close(int id, int mode)
+{
+ uint8_t i;
+
+ switch (mode) {
+ case DIAG_USB_MODE:
+ driver->usb_connected = 0;
+ break;
+ case DIAG_MEMORY_DEVICE_MODE:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if ((driver->logging_mode == DIAG_MULTI_MODE &&
+ driver->md_session_mode == DIAG_MD_NONE) ||
+ (driver->md_session_mode == DIAG_MD_PERIPHERAL)) {
+ /*
+ * This case indicates that the USB is removed
+ * but there is a client running in background
+ * with Memory Device mode.
+ */
+ } else {
+ /*
+ * With sysfs parameter to clear masks set,
+ * peripheral masks are cleared on ODL exit and
+ * USB disconnection and buffers are not marked busy.
+ * This enables read and drop of stale packets.
+ *
+ * With sysfs parameter to clear masks cleared,
+ * masks are not cleared and buffers are to be marked
+ * busy to ensure traffic generated by peripheral
+ * are not read
+ */
+ if (!(diag_mask_param())) {
+ for (i = 0; i < NUM_PERIPHERALS; i++) {
+ diagfwd_close(i, TYPE_DATA);
+ diagfwd_close(i, TYPE_CMD);
+ }
+ }
+ /* Re enable HDLC encoding */
+ pr_debug("diag: In %s, re-enabling HDLC encoding\n",
+ __func__);
+ mutex_lock(&driver->hdlc_disable_mutex);
+ if (driver->md_session_mode == DIAG_MD_NONE) {
+ driver->hdlc_disabled = 0;
+ /*
+ * HDLC encoding is re-enabled when
+ * there is logical/physical disconnection of diag
+ * to USB.
+ */
+ for (i = 0; i < NUM_MD_SESSIONS; i++)
+ driver->p_hdlc_disabled[i] =
+ driver->hdlc_disabled;
+ }
+ mutex_unlock(&driver->hdlc_disable_mutex);
+ queue_work(driver->diag_wq,
+ &(driver->update_user_clients));
+ }
+ queue_work(driver->diag_real_time_wq,
+ &driver->diag_real_time_work);
+ return 0;
+}
+
+static uint8_t hdlc_reset;
+
+static void hdlc_reset_timer_start(int pid)
+{
+ struct diag_md_session_t *info = NULL;
+
+ mutex_lock(&driver->md_session_lock);
+ info = diag_md_session_get_pid(pid);
+ if (!hdlc_timer_in_progress) {
+ hdlc_timer_in_progress = 1;
+ if (info)
+ mod_timer(&info->hdlc_reset_timer,
+ jiffies + msecs_to_jiffies(200));
+ else
+ mod_timer(&driver->hdlc_reset_timer,
+ jiffies + msecs_to_jiffies(200));
+ }
+ mutex_unlock(&driver->md_session_lock);
+}
+
+/*
+ * diag_timer_work_fn
+ * Queued in workqueue to protect md_session_info structure
+ *
+ * Update hdlc_disabled for each peripheral
+ * which are not in any md_session_info.
+ *
+ */
+static void diag_timer_work_fn(struct work_struct *work)
+{
+ int i = 0;
+ struct diag_md_session_t *session_info = NULL;
+
+ mutex_lock(&driver->hdlc_disable_mutex);
+ driver->hdlc_disabled = 0;
+ mutex_lock(&driver->md_session_lock);
+ for (i = 0; i < NUM_MD_SESSIONS; i++) {
+ session_info = diag_md_session_get_peripheral(i);
+ if (!session_info)
+ driver->p_hdlc_disabled[i] =
+ driver->hdlc_disabled;
+ }
+ mutex_unlock(&driver->md_session_lock);
+ mutex_unlock(&driver->hdlc_disable_mutex);
+}
+
+/*
+ * diag_md_timer_work_fn
+ * Queued in workqueue to protect md_session_info structure
+ *
+ * Update hdlc_disabled for each peripheral
+ * which are in any md_session_info
+ *
+ */
+static void diag_md_timer_work_fn(struct work_struct *work)
+{
+ int peripheral = -EINVAL, i = 0;
+ struct diag_md_session_t *session_info = NULL;
+ struct diag_md_hdlc_reset_work *hdlc_work = container_of(work,
+ struct diag_md_hdlc_reset_work, work);
+
+ if (!hdlc_work)
+ return;
+
+ mutex_lock(&driver->hdlc_disable_mutex);
+ mutex_lock(&driver->md_session_lock);
+ session_info = diag_md_session_get_pid(hdlc_work->pid);
+ if (session_info)
+ session_info->hdlc_disabled = 0;
+ peripheral =
+ diag_md_session_match_pid_peripheral(hdlc_work->pid, 0);
+ if (peripheral > 0 && session_info) {
+ for (i = 0; i < NUM_MD_SESSIONS; i++) {
+ if (peripheral & (1 << i))
+ driver->p_hdlc_disabled[i] =
+ session_info->hdlc_disabled;
+ }
+ }
+ kfree(hdlc_work);
+ mutex_unlock(&driver->md_session_lock);
+ mutex_unlock(&driver->hdlc_disable_mutex);
+}
+
+static void hdlc_reset_timer_func(unsigned long data)
+{
+ pr_debug("diag: In %s, re-enabling HDLC encoding\n",
+ __func__);
+
+ if (hdlc_reset) {
+ queue_work(driver->diag_wq, &(driver->diag_hdlc_reset_work));
+ queue_work(driver->diag_wq, &(driver->update_user_clients));
+ }
+ hdlc_timer_in_progress = 0;
+}
+
+void diag_md_hdlc_reset_timer_func(unsigned long pid)
+{
+ struct diag_md_hdlc_reset_work *hdlc_reset_work = NULL;
+
+ pr_debug("diag: In %s, re-enabling HDLC encoding\n",
+ __func__);
+ hdlc_reset_work = kmalloc(sizeof(*hdlc_reset_work), GFP_ATOMIC);
+ if (!hdlc_reset_work) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: Could not allocate hdlc_reset_work\n");
+ hdlc_timer_in_progress = 0;
+ return;
+ }
+ if (hdlc_reset) {
+ hdlc_reset_work->pid = pid;
+ INIT_WORK(&hdlc_reset_work->work, diag_md_timer_work_fn);
+ queue_work(driver->diag_wq, &(hdlc_reset_work->work));
+ queue_work(driver->diag_wq, &(driver->update_md_clients));
+ }
+ hdlc_timer_in_progress = 0;
+}
+
+static void diag_hdlc_start_recovery(unsigned char *buf, int len,
+ int pid)
+{
+ int i, peripheral = -EINVAL;
+ static uint32_t bad_byte_counter;
+ unsigned char *start_ptr = NULL;
+ struct diag_pkt_frame_t *actual_pkt = NULL;
+ struct diag_md_session_t *info = NULL;
+
+ hdlc_reset = 1;
+ hdlc_reset_timer_start(pid);
+
+ actual_pkt = (struct diag_pkt_frame_t *)buf;
+ for (i = 0; i < len; i++) {
+ if (actual_pkt->start == CONTROL_CHAR &&
+ actual_pkt->version == 1 &&
+ actual_pkt->length < len &&
+ (*(uint8_t *)(buf + sizeof(struct diag_pkt_frame_t) +
+ actual_pkt->length) == CONTROL_CHAR)) {
+ start_ptr = &buf[i];
+ break;
+ }
+ bad_byte_counter++;
+ if (bad_byte_counter > (DIAG_MAX_REQ_SIZE +
+ sizeof(struct diag_pkt_frame_t) + 1)) {
+ bad_byte_counter = 0;
+ pr_err("diag: In %s, re-enabling HDLC encoding\n",
+ __func__);
+ mutex_lock(&driver->hdlc_disable_mutex);
+ mutex_lock(&driver->md_session_lock);
+ info = diag_md_session_get_pid(pid);
+ if (info)
+ info->hdlc_disabled = 0;
+ else
+ driver->hdlc_disabled = 0;
+
+ peripheral =
+ diag_md_session_match_pid_peripheral(pid, 0);
+ for (i = 0; i < NUM_MD_SESSIONS; i++) {
+ if (peripheral > 0 && info) {
+ if (peripheral & (1 << i))
+ driver->p_hdlc_disabled[i] =
+ info->hdlc_disabled;
+ else if (
+ !diag_md_session_get_peripheral(i))
+ driver->p_hdlc_disabled[i] =
+ driver->hdlc_disabled;
+ } else {
+ if (!diag_md_session_get_peripheral(i))
+ driver->p_hdlc_disabled[i] =
+ driver->hdlc_disabled;
+ }
+ }
+ mutex_unlock(&driver->md_session_lock);
+ mutex_unlock(&driver->hdlc_disable_mutex);
+ diag_update_md_clients(HDLC_SUPPORT_TYPE);
+
+ return;
+ }
+ }
+
+ if (start_ptr) {
+ /* Discard any partial packet reads */
+ mutex_lock(&driver->hdlc_recovery_mutex);
+ driver->incoming_pkt.processing = 0;
+ mutex_unlock(&driver->hdlc_recovery_mutex);
+ diag_process_non_hdlc_pkt(start_ptr, len - i, pid);
+ }
+}
+
+void diag_process_non_hdlc_pkt(unsigned char *buf, int len, int pid)
+{
+ int err = 0;
+ uint16_t pkt_len = 0;
+ uint32_t read_bytes = 0;
+ const uint32_t header_len = sizeof(struct diag_pkt_frame_t);
+ struct diag_pkt_frame_t *actual_pkt = NULL;
+ unsigned char *data_ptr = NULL;
+ struct diag_partial_pkt_t *partial_pkt = NULL;
+
+ mutex_lock(&driver->hdlc_recovery_mutex);
+ if (!buf || len <= 0) {
+ mutex_unlock(&driver->hdlc_recovery_mutex);
+ return;
+ }
+ partial_pkt = &driver->incoming_pkt;
+ if (!partial_pkt->processing) {
+ mutex_unlock(&driver->hdlc_recovery_mutex);
+ goto start;
+ }
+
+ if (partial_pkt->remaining > len) {
+ if ((partial_pkt->read_len + len) > partial_pkt->capacity) {
+ pr_err("diag: Invalid length %d, %d received in %s\n",
+ partial_pkt->read_len, len, __func__);
+ mutex_unlock(&driver->hdlc_recovery_mutex);
+ goto end;
+ }
+ memcpy(partial_pkt->data + partial_pkt->read_len, buf, len);
+ read_bytes += len;
+ buf += read_bytes;
+ partial_pkt->read_len += len;
+ partial_pkt->remaining -= len;
+ } else {
+ if ((partial_pkt->read_len + partial_pkt->remaining) >
+ partial_pkt->capacity) {
+ pr_err("diag: Invalid length during partial read %d, %d received in %s\n",
+ partial_pkt->read_len,
+ partial_pkt->remaining, __func__);
+ mutex_unlock(&driver->hdlc_recovery_mutex);
+ goto end;
+ }
+ memcpy(partial_pkt->data + partial_pkt->read_len, buf,
+ partial_pkt->remaining);
+ read_bytes += partial_pkt->remaining;
+ buf += read_bytes;
+ partial_pkt->read_len += partial_pkt->remaining;
+ partial_pkt->remaining = 0;
+ }
+
+ if (partial_pkt->remaining == 0) {
+ actual_pkt = (struct diag_pkt_frame_t *)(partial_pkt->data);
+ data_ptr = partial_pkt->data + header_len;
+ if (*(uint8_t *)(data_ptr + actual_pkt->length) !=
+ CONTROL_CHAR) {
+ mutex_unlock(&driver->hdlc_recovery_mutex);
+ diag_hdlc_start_recovery(buf, len, pid);
+ mutex_lock(&driver->hdlc_recovery_mutex);
+ }
+ err = diag_process_apps_pkt(data_ptr,
+ actual_pkt->length, pid);
+ if (err) {
+ pr_err("diag: In %s, unable to process incoming data packet, err: %d\n",
+ __func__, err);
+ mutex_unlock(&driver->hdlc_recovery_mutex);
+ goto end;
+ }
+ partial_pkt->read_len = 0;
+ partial_pkt->total_len = 0;
+ partial_pkt->processing = 0;
+ mutex_unlock(&driver->hdlc_recovery_mutex);
+ goto start;
+ }
+ mutex_unlock(&driver->hdlc_recovery_mutex);
+ goto end;
+
+start:
+ while (read_bytes < len) {
+ actual_pkt = (struct diag_pkt_frame_t *)buf;
+ pkt_len = actual_pkt->length;
+
+ if (actual_pkt->start != CONTROL_CHAR) {
+ diag_hdlc_start_recovery(buf, len, pid);
+ diag_send_error_rsp(buf, len, pid);
+ goto end;
+ }
+ mutex_lock(&driver->hdlc_recovery_mutex);
+ if (pkt_len + header_len > partial_pkt->capacity) {
+ pr_err("diag: In %s, incoming data is too large for the request buffer %d\n",
+ __func__, pkt_len);
+ mutex_unlock(&driver->hdlc_recovery_mutex);
+ diag_hdlc_start_recovery(buf, len, pid);
+ break;
+ }
+ if ((pkt_len + header_len) > (len - read_bytes)) {
+ partial_pkt->read_len = len - read_bytes;
+ partial_pkt->total_len = pkt_len + header_len;
+ partial_pkt->remaining = partial_pkt->total_len -
+ partial_pkt->read_len;
+ partial_pkt->processing = 1;
+ memcpy(partial_pkt->data, buf, partial_pkt->read_len);
+ mutex_unlock(&driver->hdlc_recovery_mutex);
+ break;
+ }
+ data_ptr = buf + header_len;
+ if (*(uint8_t *)(data_ptr + actual_pkt->length) !=
+ CONTROL_CHAR) {
+ mutex_unlock(&driver->hdlc_recovery_mutex);
+ diag_hdlc_start_recovery(buf, len, pid);
+ mutex_lock(&driver->hdlc_recovery_mutex);
+ }
+ else
+ hdlc_reset = 0;
+ err = diag_process_apps_pkt(data_ptr,
+ actual_pkt->length, pid);
+ if (err) {
+ mutex_unlock(&driver->hdlc_recovery_mutex);
+ break;
+ }
+ read_bytes += header_len + pkt_len + 1;
+ buf += header_len + pkt_len + 1; /* advance to next pkt */
+ mutex_unlock(&driver->hdlc_recovery_mutex);
+ }
+end:
+ return;
+}
+
+static int diagfwd_mux_read_done(unsigned char *buf, int len, int ctxt)
+{
+ if (!buf || len <= 0)
+ return -EINVAL;
+
+ if (!driver->hdlc_disabled)
+ diag_process_hdlc_pkt(buf, len, 0);
+ else
+ diag_process_non_hdlc_pkt(buf, len, 0);
+
+ diag_mux_queue_read(ctxt);
+ return 0;
+}
+
+static int diagfwd_mux_write_done(unsigned char *buf, int len, int buf_ctxt,
+ int ctxt)
+{
+ unsigned long flags;
+ int peripheral = -1;
+ int type = -1;
+ int num = -1;
+
+ if (!buf || len < 0)
+ return -EINVAL;
+
+ peripheral = GET_BUF_PERIPHERAL(buf_ctxt);
+ type = GET_BUF_TYPE(buf_ctxt);
+ num = GET_BUF_NUM(buf_ctxt);
+
+ switch (type) {
+ case TYPE_DATA:
+ if (peripheral >= 0 && peripheral < NUM_PERIPHERALS) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "Marking buffer as free after write done p: %d, t: %d, buf_num: %d\n",
+ peripheral, type, num);
+ diagfwd_write_done(peripheral, type, num);
+ diag_ws_on_copy(DIAG_WS_MUX);
+ } else if (peripheral == APPS_DATA) {
+ diagmem_free(driver, (unsigned char *)buf,
+ POOL_TYPE_HDLC);
+ buf = NULL;
+ } else {
+ pr_err_ratelimited("diag: Invalid peripheral %d in %s, type: %d\n",
+ peripheral, __func__, type);
+ }
+ break;
+ case TYPE_CMD:
+ if (peripheral >= 0 && peripheral < NUM_PERIPHERALS &&
+ num != TYPE_CMD) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "Marking buffer as free after write done p: %d, t: %d, buf_num: %d\n",
+ peripheral, type, num);
+ diagfwd_write_done(peripheral, type, num);
+ } else if (peripheral == APPS_DATA ||
+ (peripheral >= 0 && peripheral < NUM_PERIPHERALS &&
+ num == TYPE_CMD)) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "Marking APPS response buffer free after write done for p: %d, t: %d, buf_num: %d\n",
+ peripheral, type, num);
+ spin_lock_irqsave(&driver->rsp_buf_busy_lock, flags);
+ driver->rsp_buf_busy = 0;
+ driver->encoded_rsp_len = 0;
+ spin_unlock_irqrestore(&driver->rsp_buf_busy_lock,
+ flags);
+ }
+ break;
+ default:
+ pr_err_ratelimited("diag: Incorrect data type %d, buf_ctxt: %d in %s\n",
+ type, buf_ctxt, __func__);
+ break;
+ }
+
+ return 0;
+}
+
+static struct diag_mux_ops diagfwd_mux_ops = {
+ .open = diagfwd_mux_open,
+ .close = diagfwd_mux_close,
+ .read_done = diagfwd_mux_read_done,
+ .write_done = diagfwd_mux_write_done
+};
+
+int diagfwd_init(void)
+{
+ int ret;
+ int i;
+
+ wrap_enabled = 0;
+ wrap_count = 0;
+ driver->use_device_tree = has_device_tree();
+ for (i = 0; i < DIAG_NUM_PROC; i++)
+ driver->real_time_mode[i] = 1;
+ driver->supports_separate_cmdrsp = 1;
+ driver->supports_apps_hdlc_encoding = 1;
+ driver->supports_apps_header_untagging = 1;
+ driver->supports_pd_buffering = 1;
+ for (i = 0; i < NUM_PERIPHERALS; i++)
+ driver->peripheral_untag[i] = 0;
+ mutex_init(&driver->diag_hdlc_mutex);
+ mutex_init(&driver->diag_cntl_mutex);
+ mutex_init(&driver->mode_lock);
+ driver->encoded_rsp_buf = kzalloc(DIAG_MAX_HDLC_BUF_SIZE +
+ APF_DIAG_PADDING, GFP_KERNEL);
+ if (!driver->encoded_rsp_buf)
+ goto err;
+ kmemleak_not_leak(driver->encoded_rsp_buf);
+ hdlc_decode = kzalloc(sizeof(struct diag_hdlc_decode_type),
+ GFP_KERNEL);
+ if (!hdlc_decode)
+ goto err;
+ setup_timer(&driver->hdlc_reset_timer, hdlc_reset_timer_func, 0);
+ kmemleak_not_leak(hdlc_decode);
+ driver->encoded_rsp_len = 0;
+ driver->rsp_buf_busy = 0;
+ spin_lock_init(&driver->rsp_buf_busy_lock);
+ driver->user_space_data_busy = 0;
+ driver->hdlc_buf_len = 0;
+ INIT_LIST_HEAD(&driver->cmd_reg_list);
+ driver->cmd_reg_count = 0;
+ mutex_init(&driver->cmd_reg_mutex);
+ INIT_WORK(&(driver->diag_hdlc_reset_work),
+ diag_timer_work_fn);
+
+ for (i = 0; i < NUM_PERIPHERALS; i++) {
+ driver->feature[i].separate_cmd_rsp = 0;
+ driver->feature[i].stm_support = DISABLE_STM;
+ driver->feature[i].rcvd_feature_mask = 0;
+ driver->feature[i].peripheral_buffering = 0;
+ driver->feature[i].pd_buffering = 0;
+ driver->feature[i].encode_hdlc = 0;
+ driver->feature[i].untag_header =
+ DISABLE_PKT_HEADER_UNTAGGING;
+ driver->feature[i].mask_centralization = 0;
+ driver->feature[i].log_on_demand = 0;
+ driver->feature[i].sent_feature_mask = 0;
+ }
+
+ for (i = 0; i < NUM_MD_SESSIONS; i++) {
+ driver->buffering_mode[i].peripheral = i;
+ driver->buffering_mode[i].mode = DIAG_BUFFERING_MODE_STREAMING;
+ driver->buffering_mode[i].high_wm_val = DEFAULT_HIGH_WM_VAL;
+ driver->buffering_mode[i].low_wm_val = DEFAULT_LOW_WM_VAL;
+ }
+
+ for (i = 0; i < NUM_STM_PROCESSORS; i++) {
+ driver->stm_state_requested[i] = DISABLE_STM;
+ driver->stm_state[i] = DISABLE_STM;
+ }
+
+ if (driver->hdlc_buf == NULL) {
+ driver->hdlc_buf = kzalloc(DIAG_MAX_HDLC_BUF_SIZE, GFP_KERNEL);
+ if (!driver->hdlc_buf)
+ goto err;
+ kmemleak_not_leak(driver->hdlc_buf);
+ }
+ if (driver->user_space_data_buf == NULL)
+ driver->user_space_data_buf = kzalloc(USER_SPACE_DATA,
+ GFP_KERNEL);
+ if (driver->user_space_data_buf == NULL)
+ goto err;
+ kmemleak_not_leak(driver->user_space_data_buf);
+ if (driver->client_map == NULL &&
+ (driver->client_map = kzalloc
+ ((driver->num_clients) * sizeof(struct diag_client_map),
+ GFP_KERNEL)) == NULL)
+ goto err;
+ kmemleak_not_leak(driver->client_map);
+ if (driver->data_ready == NULL &&
+ (driver->data_ready = kzalloc(driver->num_clients * sizeof(int)
+ , GFP_KERNEL)) == NULL)
+ goto err;
+ kmemleak_not_leak(driver->data_ready);
+ for (i = 0; i < THRESHOLD_CLIENT_LIMIT; i++)
+ atomic_set(&driver->data_ready_notif[i], 0);
+ if (driver->apps_req_buf == NULL) {
+ driver->apps_req_buf = kzalloc(DIAG_MAX_REQ_SIZE, GFP_KERNEL);
+ if (!driver->apps_req_buf)
+ goto err;
+ kmemleak_not_leak(driver->apps_req_buf);
+ }
+ if (driver->dci_pkt_buf == NULL) {
+ driver->dci_pkt_buf = kzalloc(DCI_BUF_SIZE, GFP_KERNEL);
+ if (!driver->dci_pkt_buf)
+ goto err;
+ kmemleak_not_leak(driver->dci_pkt_buf);
+ }
+ if (driver->apps_rsp_buf == NULL) {
+ driver->apps_rsp_buf = kzalloc(DIAG_MAX_RSP_SIZE, GFP_KERNEL);
+ if (driver->apps_rsp_buf == NULL)
+ goto err;
+ kmemleak_not_leak(driver->apps_rsp_buf);
+ }
+ driver->diag_wq = create_singlethread_workqueue("diag_wq");
+ if (!driver->diag_wq)
+ goto err;
+ ret = diag_mux_register(DIAG_LOCAL_PROC, DIAG_LOCAL_PROC,
+ &diagfwd_mux_ops);
+ if (ret) {
+ pr_err("diag: Unable to register with USB, err: %d\n", ret);
+ goto err;
+ }
+
+ return 0;
+err:
+ pr_err("diag: In %s, couldn't initialize diag\n", __func__);
+
+ diag_usb_exit(DIAG_USB_LOCAL);
+ kfree(driver->encoded_rsp_buf);
+ kfree(driver->hdlc_buf);
+ kfree(driver->client_map);
+ kfree(driver->data_ready);
+ kfree(driver->apps_req_buf);
+ kfree(driver->dci_pkt_buf);
+ kfree(driver->apps_rsp_buf);
+ kfree(hdlc_decode);
+ kfree(driver->user_space_data_buf);
+ if (driver->diag_wq)
+ destroy_workqueue(driver->diag_wq);
+ return -ENOMEM;
+}
+
+void diagfwd_exit(void)
+{
+ kfree(driver->encoded_rsp_buf);
+ kfree(driver->hdlc_buf);
+ kfree(hdlc_decode);
+ kfree(driver->client_map);
+ kfree(driver->data_ready);
+ kfree(driver->apps_req_buf);
+ kfree(driver->dci_pkt_buf);
+ kfree(driver->apps_rsp_buf);
+ kfree(driver->user_space_data_buf);
+ destroy_workqueue(driver->diag_wq);
+}
diff --git a/drivers/char/diag/diagfwd.h b/drivers/char/diag/diagfwd.h
new file mode 100644
index 000000000000..8b097cfc4527
--- /dev/null
+++ b/drivers/char/diag/diagfwd.h
@@ -0,0 +1,52 @@
+/* Copyright (c) 2008-2015, 2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DIAGFWD_H
+#define DIAGFWD_H
+
+/*
+ * The context applies to Diag SMD data buffers. It is used to identify the
+ * buffer once these buffers are writtent to USB.
+ */
+#define SET_BUF_CTXT(p, d, n) \
+ (((p & 0xFF) << 16) | ((d & 0xFF) << 8) | (n & 0xFF))
+#define SET_PD_CTXT(u) ((u & 0xFF) << 24)
+#define GET_BUF_PERIPHERAL(p) ((p & 0xFF0000) >> 16)
+#define GET_BUF_TYPE(d) ((d & 0x00FF00) >> 8)
+#define GET_BUF_NUM(n) ((n & 0x0000FF))
+#define GET_PD_CTXT(u) ((u & 0xFF000000) >> 24)
+
+#define CHK_OVERFLOW(bufStart, start, end, length) \
+ ((((bufStart) <= (start)) && ((end) - (start) >= (length))) ? 1 : 0)
+
+int diagfwd_init(void);
+void diagfwd_exit(void);
+void diag_process_hdlc_pkt(void *data, unsigned int len, int pid);
+void diag_process_non_hdlc_pkt(unsigned char *data, int len, int pid);
+int chk_config_get_id(void);
+int chk_apps_only(void);
+int chk_apps_master(void);
+int chk_polling_response(void);
+int diag_cmd_log_on_demand(unsigned char *src_buf, int src_len,
+ unsigned char *dest_buf, int dest_len);
+int diag_cmd_get_mobile_id(unsigned char *src_buf, int src_len,
+ unsigned char *dest_buf, int dest_len);
+int diag_check_common_cmd(struct diag_pkt_header_t *header);
+void diag_update_userspace_clients(unsigned int type);
+void diag_update_sleeping_process(int process_id, int data_type);
+int diag_process_apps_pkt(unsigned char *buf, int len, int pid);
+void diag_send_error_rsp(unsigned char *buf, int len, int pid);
+void diag_update_pkt_buffer(unsigned char *buf, uint32_t len, int type);
+int diag_process_stm_cmd(unsigned char *buf, unsigned char *dest_buf);
+void diag_md_hdlc_reset_timer_func(unsigned long pid);
+void diag_update_md_clients(unsigned int type);
+#endif
diff --git a/drivers/char/diag/diagfwd_bridge.c b/drivers/char/diag/diagfwd_bridge.c
new file mode 100644
index 000000000000..ad6203fe5684
--- /dev/null
+++ b/drivers/char/diag/diagfwd_bridge.c
@@ -0,0 +1,330 @@
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/diagchar.h>
+#include <linux/kmemleak.h>
+#include <linux/err.h>
+#include <linux/workqueue.h>
+#include <linux/ratelimit.h>
+#include <linux/platform_device.h>
+#include "diag_mux.h"
+#include "diagfwd_bridge.h"
+#ifdef CONFIG_USB_QCOM_DIAG_BRIDGE
+#include "diagfwd_hsic.h"
+#endif
+#include "diagfwd_mhi.h"
+#include "diag_dci.h"
+
+#ifndef CONFIG_USB_QCOM_DIAG_BRIDGE
+static int diag_hsic_init(void)
+{
+ return -EINVAL;
+}
+#endif
+
+#ifndef CONFIG_MSM_MHI
+static int diag_mhi_init(void)
+{
+ return -EINVAL;
+}
+#endif
+
+#define BRIDGE_TO_MUX(x) (x + DIAG_MUX_BRIDGE_BASE)
+
+struct diagfwd_bridge_info bridge_info[NUM_REMOTE_DEV] = {
+ {
+ .id = DIAGFWD_MDM,
+ .type = DIAG_DATA_TYPE,
+ .name = "MDM",
+ .inited = 0,
+ .ctxt = 0,
+ .dev_ops = NULL,
+ .dci_read_ptr = NULL,
+ .dci_read_buf = NULL,
+ .dci_read_len = 0,
+ .dci_wq = NULL,
+ },
+ {
+ .id = DIAGFWD_SMUX,
+ .type = DIAG_DATA_TYPE,
+ .name = "SMUX",
+ .inited = 0,
+ .ctxt = 0,
+ .dci_read_ptr = NULL,
+ .dev_ops = NULL,
+ .dci_read_buf = NULL,
+ .dci_read_len = 0,
+ .dci_wq = NULL,
+ },
+ {
+ .id = DIAGFWD_MDM_DCI,
+ .type = DIAG_DCI_TYPE,
+ .name = "MDM_DCI",
+ .inited = 0,
+ .ctxt = 0,
+ .dci_read_ptr = NULL,
+ .dev_ops = NULL,
+ .dci_read_buf = NULL,
+ .dci_read_len = 0,
+ .dci_wq = NULL,
+ },
+};
+
+static int diagfwd_bridge_mux_connect(int id, int mode)
+{
+ if (id < 0 || id >= NUM_REMOTE_DEV)
+ return -EINVAL;
+ if (bridge_info[id].dev_ops && bridge_info[id].dev_ops->open)
+ bridge_info[id].dev_ops->open(bridge_info[id].ctxt);
+ return 0;
+}
+
+static int diagfwd_bridge_mux_disconnect(int id, int mode)
+{
+ if (id < 0 || id >= NUM_REMOTE_DEV)
+ return -EINVAL;
+
+ if ((mode == DIAG_USB_MODE &&
+ driver->logging_mode == DIAG_MEMORY_DEVICE_MODE) ||
+ (mode == DIAG_MEMORY_DEVICE_MODE &&
+ driver->logging_mode == DIAG_USB_MODE)) {
+ /*
+ * Don't close the MHI channels when usb is disconnected
+ * and a process is running in memory device mode.
+ */
+ return 0;
+ }
+
+ if (bridge_info[id].dev_ops && bridge_info[id].dev_ops->close)
+ bridge_info[id].dev_ops->close(bridge_info[id].ctxt);
+ return 0;
+}
+
+static int diagfwd_bridge_mux_read_done(unsigned char *buf, int len, int id)
+{
+ return diagfwd_bridge_write(id, buf, len);
+}
+
+static int diagfwd_bridge_mux_write_done(unsigned char *buf, int len,
+ int buf_ctx, int id)
+{
+ struct diagfwd_bridge_info *ch = NULL;
+
+ if (id < 0 || id >= NUM_REMOTE_DEV)
+ return -EINVAL;
+ ch = &bridge_info[buf_ctx];
+ if (ch->dev_ops && ch->dev_ops->fwd_complete)
+ ch->dev_ops->fwd_complete(ch->ctxt, buf, len, 0);
+ return 0;
+}
+
+static struct diag_mux_ops diagfwd_bridge_mux_ops = {
+ .open = diagfwd_bridge_mux_connect,
+ .close = diagfwd_bridge_mux_disconnect,
+ .read_done = diagfwd_bridge_mux_read_done,
+ .write_done = diagfwd_bridge_mux_write_done
+};
+
+static void bridge_dci_read_work_fn(struct work_struct *work)
+{
+ struct diagfwd_bridge_info *ch = container_of(work,
+ struct diagfwd_bridge_info,
+ dci_read_work);
+ if (!ch)
+ return;
+ diag_process_remote_dci_read_data(ch->id, ch->dci_read_buf,
+ ch->dci_read_len);
+ if (ch->dev_ops && ch->dev_ops->fwd_complete) {
+ ch->dev_ops->fwd_complete(ch->ctxt, ch->dci_read_ptr,
+ ch->dci_read_len, 0);
+ }
+}
+
+int diagfwd_bridge_register(int id, int ctxt, struct diag_remote_dev_ops *ops)
+{
+ int err = 0;
+ struct diagfwd_bridge_info *ch = NULL;
+ char wq_name[DIAG_BRIDGE_NAME_SZ + 10];
+
+ if (!ops) {
+ pr_err("diag: Invalid pointers ops: %pK ctxt: %d\n", ops, ctxt);
+ return -EINVAL;
+ }
+
+ if (id < 0 || id >= NUM_REMOTE_DEV)
+ return -EINVAL;
+
+ ch = &bridge_info[id];
+ ch->ctxt = ctxt;
+ ch->dev_ops = ops;
+ switch (ch->type) {
+ case DIAG_DATA_TYPE:
+ err = diag_mux_register(BRIDGE_TO_MUX(id), id,
+ &diagfwd_bridge_mux_ops);
+ if (err)
+ return err;
+ break;
+ case DIAG_DCI_TYPE:
+ ch->dci_read_buf = kzalloc(DIAG_MDM_BUF_SIZE, GFP_KERNEL);
+ if (!ch->dci_read_buf)
+ return -ENOMEM;
+ ch->dci_read_len = 0;
+ strlcpy(wq_name, "diag_dci_", 10);
+ strlcat(wq_name, ch->name, sizeof(ch->name));
+ INIT_WORK(&(ch->dci_read_work), bridge_dci_read_work_fn);
+ ch->dci_wq = create_singlethread_workqueue(wq_name);
+ if (!ch->dci_wq) {
+ kfree(ch->dci_read_buf);
+ return -ENOMEM;
+ }
+ break;
+ default:
+ pr_err("diag: Invalid channel type %d in %s\n", ch->type,
+ __func__);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int diag_remote_dev_open(int id)
+{
+ if (id < 0 || id >= NUM_REMOTE_DEV)
+ return -EINVAL;
+ bridge_info[id].inited = 1;
+ if (bridge_info[id].type == DIAG_DATA_TYPE)
+ return diag_mux_queue_read(BRIDGE_TO_MUX(id));
+ else if (bridge_info[id].type == DIAG_DCI_TYPE)
+ return diag_dci_send_handshake_pkt(bridge_info[id].id);
+
+ return 0;
+}
+
+void diag_remote_dev_close(int id)
+{
+ return;
+}
+
+int diag_remote_dev_read_done(int id, unsigned char *buf, int len)
+{
+ int err = 0;
+ struct diagfwd_bridge_info *ch = NULL;
+
+ if (id < 0 || id >= NUM_REMOTE_DEV)
+ return -EINVAL;
+ ch = &bridge_info[id];
+ if (ch->type == DIAG_DATA_TYPE) {
+ err = diag_mux_write(BRIDGE_TO_MUX(id), buf, len, id);
+ if (ch->dev_ops && ch->dev_ops->queue_read)
+ ch->dev_ops->queue_read(ch->ctxt);
+ return err;
+ }
+ /*
+ * For DCI channels copy to the internal buffer. Don't queue any
+ * further reads. A read should be queued once we are done processing
+ * the current packet
+ */
+ if (len <= 0 || len > DIAG_MDM_BUF_SIZE) {
+ pr_err_ratelimited("diag: Invalid len %d in %s, ch: %s\n",
+ len, __func__, ch->name);
+ return -EINVAL;
+ }
+ ch->dci_read_ptr = buf;
+ memcpy(ch->dci_read_buf, buf, len);
+ ch->dci_read_len = len;
+ queue_work(ch->dci_wq, &ch->dci_read_work);
+ return 0;
+}
+
+int diag_remote_dev_write_done(int id, unsigned char *buf, int len, int ctxt)
+{
+ int err = 0;
+ if (id < 0 || id >= NUM_REMOTE_DEV)
+ return -EINVAL;
+
+ if (bridge_info[id].type == DIAG_DATA_TYPE) {
+ if (buf == driver->hdlc_encode_buf)
+ driver->hdlc_encode_buf_len = 0;
+ /*
+ * For remote processor, the token offset is stripped from the
+ * buffer. Account for the token offset while checking against
+ * the original buffer
+ */
+ if (buf == (driver->user_space_data_buf + sizeof(int)))
+ driver->user_space_data_busy = 0;
+ err = diag_mux_queue_read(BRIDGE_TO_MUX(id));
+ } else {
+ err = diag_dci_write_done_bridge(id, buf, len);
+ }
+ return err;
+}
+
+int diagfwd_bridge_init(bool use_mhi)
+{
+ int err = 0;
+
+ if (use_mhi)
+ err = diag_mhi_init();
+ else
+ err = diag_hsic_init();
+ if (err)
+ goto fail;
+ return 0;
+
+fail:
+ pr_err("diag: Unable to initialze diagfwd bridge, err: %d\n", err);
+ return err;
+}
+
+void diagfwd_bridge_exit()
+{
+ #ifdef USB_QCOM_DIAG_BRIDGE
+ diag_hsic_exit();
+ #endif
+}
+
+int diagfwd_bridge_close(int id)
+{
+ if (id < 0 || id >= NUM_REMOTE_DEV)
+ return -EINVAL;
+ if (bridge_info[id].dev_ops && bridge_info[id].dev_ops->close)
+ return bridge_info[id].dev_ops->close(bridge_info[id].ctxt);
+ return 0;
+}
+
+int diagfwd_bridge_write(int id, unsigned char *buf, int len)
+{
+ if (id < 0 || id >= NUM_REMOTE_DEV)
+ return -EINVAL;
+ if (bridge_info[id].dev_ops && bridge_info[id].dev_ops->write) {
+ return bridge_info[id].dev_ops->write(bridge_info[id].ctxt,
+ buf, len, 0);
+ }
+ return 0;
+}
+
+uint16_t diag_get_remote_device_mask()
+{
+ int i;
+ uint16_t remote_dev = 0;
+
+ for (i = 0; i < NUM_REMOTE_DEV; i++) {
+ if (bridge_info[i].inited &&
+ bridge_info[i].type == DIAG_DATA_TYPE) {
+ remote_dev |= 1 << i;
+ }
+ }
+
+ return remote_dev;
+}
+
diff --git a/drivers/char/diag/diagfwd_bridge.h b/drivers/char/diag/diagfwd_bridge.h
new file mode 100644
index 000000000000..250ef07b0b04
--- /dev/null
+++ b/drivers/char/diag/diagfwd_bridge.h
@@ -0,0 +1,67 @@
+/* Copyright (c) 2012-2014, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DIAGFWD_BRIDGE_H
+#define DIAGFWD_BRIDGE_H
+
+/*
+ * Add Data channels at the top half and the DCI channels at the
+ * bottom half of this list.
+ */
+#define DIAGFWD_MDM 0
+#define DIAGFWD_SMUX 1
+#define NUM_REMOTE_DATA_DEV 2
+#define DIAGFWD_MDM_DCI NUM_REMOTE_DATA_DEV
+#define NUM_REMOTE_DCI_DEV (DIAGFWD_MDM_DCI - NUM_REMOTE_DATA_DEV + 1)
+#define NUM_REMOTE_DEV (NUM_REMOTE_DATA_DEV + NUM_REMOTE_DCI_DEV)
+
+#define DIAG_BRIDGE_NAME_SZ 24
+#define DIAG_BRIDGE_GET_NAME(x) (bridge_info[x].name)
+
+struct diag_remote_dev_ops {
+ int (*open)(int id);
+ int (*close)(int id);
+ int (*queue_read)(int id);
+ int (*write)(int id, unsigned char *buf, int len, int ctxt);
+ int (*fwd_complete)(int id, unsigned char *buf, int len, int ctxt);
+};
+
+struct diagfwd_bridge_info {
+ int id;
+ int type;
+ int inited;
+ int ctxt;
+ char name[DIAG_BRIDGE_NAME_SZ];
+ struct diag_remote_dev_ops *dev_ops;
+ /* DCI related variables. These would be NULL for data channels */
+ void *dci_read_ptr;
+ unsigned char *dci_read_buf;
+ int dci_read_len;
+ struct workqueue_struct *dci_wq;
+ struct work_struct dci_read_work;
+};
+
+extern struct diagfwd_bridge_info bridge_info[NUM_REMOTE_DEV];
+int diagfwd_bridge_init(bool use_mhi);
+void diagfwd_bridge_exit(void);
+int diagfwd_bridge_close(int id);
+int diagfwd_bridge_write(int id, unsigned char *buf, int len);
+uint16_t diag_get_remote_device_mask(void);
+
+/* The following functions must be called by Diag remote devices only. */
+int diagfwd_bridge_register(int id, int ctxt, struct diag_remote_dev_ops *ops);
+int diag_remote_dev_open(int id);
+void diag_remote_dev_close(int id);
+int diag_remote_dev_read_done(int id, unsigned char *buf, int len);
+int diag_remote_dev_write_done(int id, unsigned char *buf, int len, int ctxt);
+
+#endif
diff --git a/drivers/char/diag/diagfwd_cntl.c b/drivers/char/diag/diagfwd_cntl.c
new file mode 100644
index 000000000000..907bf6ee8511
--- /dev/null
+++ b/drivers/char/diag/diagfwd_cntl.c
@@ -0,0 +1,1696 @@
+/* Copyright (c) 2011-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/diagchar.h>
+#include <linux/kmemleak.h>
+#include <linux/delay.h>
+#include "diagchar.h"
+#include "diagfwd.h"
+#include "diagfwd_cntl.h"
+#include "diagfwd_peripheral.h"
+#include "diagfwd_bridge.h"
+#include "diag_dci.h"
+#include "diagmem.h"
+#include "diag_masks.h"
+#include "diag_ipc_logging.h"
+#include "diag_mux.h"
+
+#define FEATURE_SUPPORTED(x) ((feature_mask << (i * 8)) & (1 << x))
+
+/* tracks which peripheral is undergoing SSR */
+static uint16_t reg_dirty;
+static void diag_notify_md_client(uint8_t peripheral, int data);
+
+static void diag_mask_update_work_fn(struct work_struct *work)
+{
+ uint8_t peripheral;
+
+ for (peripheral = 0; peripheral <= NUM_PERIPHERALS; peripheral++) {
+ if (!(driver->mask_update & PERIPHERAL_MASK(peripheral)))
+ continue;
+ mutex_lock(&driver->cntl_lock);
+ driver->mask_update ^= PERIPHERAL_MASK(peripheral);
+ mutex_unlock(&driver->cntl_lock);
+ diag_send_updates_peripheral(peripheral);
+ }
+}
+
+void diag_cntl_channel_open(struct diagfwd_info *p_info)
+{
+ if (!p_info) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: Invalid fwd_info structure\n");
+ return;
+ }
+ driver->mask_update |= PERIPHERAL_MASK(p_info->peripheral);
+ queue_work(driver->cntl_wq, &driver->mask_update_work);
+ diag_notify_md_client(p_info->peripheral, DIAG_STATUS_OPEN);
+}
+
+void diag_cntl_channel_close(struct diagfwd_info *p_info)
+{
+ uint8_t peripheral;
+
+ if (!p_info) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: Invalid fwd_info structure\n");
+ return;
+ }
+
+ peripheral = p_info->peripheral;
+ if (peripheral >= NUM_PERIPHERALS) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: Invalid peripheral (%d)\n", peripheral);
+ return;
+ }
+
+ driver->feature[peripheral].sent_feature_mask = 0;
+ driver->feature[peripheral].rcvd_feature_mask = 0;
+ reg_dirty |= PERIPHERAL_MASK(peripheral);
+ diag_cmd_remove_reg_by_proc(peripheral);
+ driver->feature[peripheral].stm_support = DISABLE_STM;
+ driver->feature[peripheral].log_on_demand = 0;
+ driver->stm_state[peripheral] = DISABLE_STM;
+ driver->stm_state_requested[peripheral] = DISABLE_STM;
+ reg_dirty ^= PERIPHERAL_MASK(peripheral);
+ diag_notify_md_client(peripheral, DIAG_STATUS_CLOSED);
+}
+
+static void diag_stm_update_work_fn(struct work_struct *work)
+{
+ uint8_t i;
+ uint16_t peripheral_mask = 0;
+ int err = 0;
+
+ mutex_lock(&driver->cntl_lock);
+ peripheral_mask = driver->stm_peripheral;
+ driver->stm_peripheral = 0;
+ mutex_unlock(&driver->cntl_lock);
+
+ if (peripheral_mask == 0) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: Empty Peripheral mask\n");
+ return;
+ }
+
+ for (i = 0; i < NUM_PERIPHERALS; i++) {
+ if (!driver->feature[i].stm_support)
+ continue;
+ if (peripheral_mask & PERIPHERAL_MASK(i)) {
+ err = diag_send_stm_state(i,
+ (uint8_t)(driver->stm_state_requested[i]));
+ if (!err) {
+ driver->stm_state[i] =
+ driver->stm_state_requested[i];
+ }
+ }
+ }
+}
+
+void diag_notify_md_client(uint8_t peripheral, int data)
+{
+ int stat = 0;
+ struct siginfo info;
+ struct pid *pid_struct;
+ struct task_struct *result;
+
+ if (peripheral > NUM_PERIPHERALS) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: Invalid peripheral (%d)\n", peripheral);
+ return;
+ }
+
+ if (driver->logging_mode != DIAG_MEMORY_DEVICE_MODE) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: Invalid logging_mode (%d)\n",
+ driver->logging_mode);
+ return;
+ }
+
+ mutex_lock(&driver->md_session_lock);
+ memset(&info, 0, sizeof(struct siginfo));
+ info.si_code = SI_QUEUE;
+ info.si_int = (PERIPHERAL_MASK(peripheral) | data);
+ info.si_signo = SIGCONT;
+
+ if (!driver->md_session_map[peripheral] ||
+ driver->md_session_map[peripheral]->pid <= 0) {
+ pr_err("diag: md_session_map[%d] is invalid\n", peripheral);
+ mutex_unlock(&driver->md_session_lock);
+ return;
+ }
+
+ pid_struct = find_get_pid(
+ driver->md_session_map[peripheral]->pid);
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "md_session_map[%d] pid = %d task = %pK\n",
+ peripheral,
+ driver->md_session_map[peripheral]->pid,
+ driver->md_session_map[peripheral]->task);
+
+ if (pid_struct) {
+ result = get_pid_task(pid_struct, PIDTYPE_PID);
+
+ if (!result) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: md_session_map[%d] with pid = %d Exited..\n",
+ peripheral,
+ driver->md_session_map[peripheral]->pid);
+ mutex_unlock(&driver->md_session_lock);
+ return;
+ }
+
+ if (driver->md_session_map[peripheral] &&
+ driver->md_session_map[peripheral]->task == result) {
+ stat = send_sig_info(info.si_signo,
+ &info, result);
+ if (stat)
+ pr_err("diag: Err sending signal to memory device client, signal data: 0x%x, stat: %d\n",
+ info.si_int, stat);
+ } else
+ pr_err("diag: md_session_map[%d] data is corrupted, signal data: 0x%x, stat: %d\n",
+ peripheral, info.si_int, stat);
+ }
+ mutex_unlock(&driver->md_session_lock);
+}
+
+static void process_pd_status(uint8_t *buf, uint32_t len,
+ uint8_t peripheral)
+{
+ struct diag_ctrl_msg_pd_status *pd_msg = NULL;
+ uint32_t pd;
+ int status = DIAG_STATUS_CLOSED;
+
+ if (!buf || peripheral >= NUM_PERIPHERALS || len < sizeof(*pd_msg)) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: Invalid parameters:(!buf) = %d, peripheral = %d, len = %d, pd_msg_len = %d\n",
+ !buf, peripheral, len, (int)sizeof(*pd_msg));
+ return;
+ }
+
+ pd_msg = (struct diag_ctrl_msg_pd_status *)buf;
+ pd = pd_msg->pd_id;
+ status = (pd_msg->status == 0) ? DIAG_STATUS_OPEN : DIAG_STATUS_CLOSED;
+ diag_notify_md_client(peripheral, status);
+}
+
+static void enable_stm_feature(uint8_t peripheral)
+{
+ if (peripheral >= NUM_PERIPHERALS) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: Invalid peripheral (%d)\n", peripheral);
+ return;
+ }
+
+ mutex_lock(&driver->cntl_lock);
+ driver->feature[peripheral].stm_support = ENABLE_STM;
+ driver->stm_peripheral |= PERIPHERAL_MASK(peripheral);
+ mutex_unlock(&driver->cntl_lock);
+
+ queue_work(driver->cntl_wq, &(driver->stm_update_work));
+}
+
+static void enable_socket_feature(uint8_t peripheral)
+{
+ if (peripheral >= NUM_PERIPHERALS) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: Invalid peripheral (%d)\n", peripheral);
+ return;
+ }
+
+ if (driver->supports_sockets)
+ driver->feature[peripheral].sockets_enabled = 1;
+ else
+ driver->feature[peripheral].sockets_enabled = 0;
+}
+
+static void process_hdlc_encoding_feature(uint8_t peripheral)
+{
+ if (peripheral >= NUM_PERIPHERALS) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: Invalid peripheral (%d)\n", peripheral);
+ return;
+ }
+
+ if (driver->supports_apps_hdlc_encoding) {
+ driver->feature[peripheral].encode_hdlc =
+ ENABLE_APPS_HDLC_ENCODING;
+ } else {
+ driver->feature[peripheral].encode_hdlc =
+ DISABLE_APPS_HDLC_ENCODING;
+ }
+}
+
+static void process_upd_header_untagging_feature(uint8_t peripheral)
+{
+ if (peripheral >= NUM_PERIPHERALS) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: Invalid peripheral (%d)\n", peripheral);
+ return;
+ }
+
+ if (driver->supports_apps_header_untagging) {
+ driver->feature[peripheral].untag_header =
+ ENABLE_PKT_HEADER_UNTAGGING;
+ } else {
+ driver->feature[peripheral].untag_header =
+ DISABLE_PKT_HEADER_UNTAGGING;
+ }
+}
+
+static void process_command_deregistration(uint8_t *buf, uint32_t len,
+ uint8_t peripheral)
+{
+ uint8_t *ptr = buf;
+ int i;
+ int header_len = sizeof(struct diag_ctrl_cmd_dereg);
+ int read_len = 0;
+ struct diag_ctrl_cmd_dereg *dereg = NULL;
+ struct cmd_code_range *range = NULL;
+ struct diag_cmd_reg_entry_t del_entry;
+
+ /*
+ * Perform Basic sanity. The len field is the size of the data payload.
+ * This doesn't include the header size.
+ */
+ if (!buf || peripheral >= NUM_PERIPHERALS || len == 0) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: Invalid parameters:(!buf) = %d, peripheral = %d, len = %d\n",
+ !buf, peripheral, len);
+ return;
+ }
+
+ DIAG_LOG(DIAG_DEBUG_CONTROL,
+ "diag:peripheral(%d) command deregistration packet processing started\n",
+ peripheral);
+
+ dereg = (struct diag_ctrl_cmd_dereg *)ptr;
+ ptr += header_len;
+ /* Don't account for pkt_id and length */
+ read_len += header_len - (2 * sizeof(uint32_t));
+
+ if (dereg->count_entries == 0) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: received reg tbl with no entries\n");
+ return;
+ }
+
+ for (i = 0; i < dereg->count_entries && read_len < len; i++) {
+ range = (struct cmd_code_range *)ptr;
+ ptr += sizeof(struct cmd_code_range) - sizeof(uint32_t);
+ read_len += sizeof(struct cmd_code_range) - sizeof(uint32_t);
+ del_entry.cmd_code = dereg->cmd_code;
+ del_entry.subsys_id = dereg->subsysid;
+ del_entry.cmd_code_hi = range->cmd_code_hi;
+ del_entry.cmd_code_lo = range->cmd_code_lo;
+ diag_cmd_remove_reg(&del_entry, peripheral);
+ }
+
+ if (i != dereg->count_entries) {
+ pr_err("diag: In %s, reading less than available, read_len: %d, len: %d count: %d\n",
+ __func__, read_len, len, dereg->count_entries);
+ }
+ DIAG_LOG(DIAG_DEBUG_CONTROL,
+ "diag:peripheral(%d) command deregistration packet processing complete\n",
+ peripheral);
+}
+static void process_command_registration(uint8_t *buf, uint32_t len,
+ uint8_t peripheral)
+{
+ uint8_t *ptr = buf;
+ int i;
+ int header_len = sizeof(struct diag_ctrl_cmd_reg);
+ int read_len = 0;
+ struct diag_ctrl_cmd_reg *reg = NULL;
+ struct cmd_code_range *range = NULL;
+ struct diag_cmd_reg_entry_t new_entry;
+
+ /*
+ * Perform Basic sanity. The len field is the size of the data payload.
+ * This doesn't include the header size.
+ */
+ if (!buf || peripheral >= NUM_PERIPHERALS || len == 0) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: Invalid parameters:(!buf) = %d, peripheral = %d, len = %d\n",
+ !buf, peripheral, len);
+ return;
+ }
+ DIAG_LOG(DIAG_DEBUG_CONTROL,
+ "diag: peripheral(%d) command registration packet processing started\n",
+ peripheral);
+
+ reg = (struct diag_ctrl_cmd_reg *)ptr;
+ ptr += header_len;
+ /* Don't account for pkt_id and length */
+ read_len += header_len - (2 * sizeof(uint32_t));
+
+ if (reg->count_entries == 0) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: In %s, received reg tbl with no entries\n",
+ __func__);
+ return;
+ }
+
+ for (i = 0; i < reg->count_entries && read_len < len; i++) {
+ range = (struct cmd_code_range *)ptr;
+ ptr += sizeof(struct cmd_code_range);
+ read_len += sizeof(struct cmd_code_range);
+ new_entry.cmd_code = reg->cmd_code;
+ new_entry.subsys_id = reg->subsysid;
+ new_entry.cmd_code_hi = range->cmd_code_hi;
+ new_entry.cmd_code_lo = range->cmd_code_lo;
+ diag_cmd_add_reg(&new_entry, peripheral, INVALID_PID);
+ }
+
+ if (i != reg->count_entries) {
+ pr_err("diag: In %s, reading less than available, read_len: %d, len: %d count: %d\n",
+ __func__, read_len, len, reg->count_entries);
+ }
+ DIAG_LOG(DIAG_DEBUG_CONTROL,
+ "diag: peripheral(%d) command registration packet processing complete\n",
+ peripheral);
+}
+
+static void diag_close_transport_work_fn(struct work_struct *work)
+{
+ uint8_t transport;
+ uint8_t peripheral;
+
+ mutex_lock(&driver->cntl_lock);
+ for (peripheral = 0; peripheral <= NUM_PERIPHERALS; peripheral++) {
+ if (!(driver->close_transport & PERIPHERAL_MASK(peripheral)))
+ continue;
+ driver->close_transport ^= PERIPHERAL_MASK(peripheral);
+ transport = driver->feature[peripheral].sockets_enabled ?
+ TRANSPORT_SMD : TRANSPORT_SOCKET;
+ diagfwd_close_transport(transport, peripheral);
+ }
+ mutex_unlock(&driver->cntl_lock);
+}
+
+static void process_socket_feature(uint8_t peripheral)
+{
+ if (peripheral >= NUM_PERIPHERALS) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: Invalid peripheral (%d)\n", peripheral);
+ return;
+ }
+
+ mutex_lock(&driver->cntl_lock);
+ driver->close_transport |= PERIPHERAL_MASK(peripheral);
+ queue_work(driver->cntl_wq, &driver->close_transport_work);
+ mutex_unlock(&driver->cntl_lock);
+}
+
+static void process_log_on_demand_feature(uint8_t peripheral)
+{
+ /* Log On Demand command is registered only on Modem */
+ if (peripheral != PERIPHERAL_MODEM)
+ return;
+
+ if (driver->feature[PERIPHERAL_MODEM].log_on_demand)
+ driver->log_on_demand_support = 1;
+ else
+ driver->log_on_demand_support = 0;
+}
+
+static void process_incoming_feature_mask(uint8_t *buf, uint32_t len,
+ uint8_t peripheral)
+{
+ int i;
+ int header_len = sizeof(struct diag_ctrl_feature_mask);
+ int read_len = 0;
+ struct diag_ctrl_feature_mask *header = NULL;
+ uint32_t feature_mask_len = 0;
+ uint32_t feature_mask = 0;
+ uint8_t *ptr = buf;
+
+ if (!buf || peripheral >= NUM_PERIPHERALS || len == 0) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: Invalid parameters:(!buf) = %d, peripheral = %d, len = %d\n",
+ !buf, peripheral, len);
+ return;
+ }
+
+ header = (struct diag_ctrl_feature_mask *)ptr;
+ ptr += header_len;
+ feature_mask_len = header->feature_mask_len;
+
+ if (feature_mask_len == 0) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: In %s, received invalid feature mask from peripheral %d\n",
+ __func__, peripheral);
+ return;
+ }
+
+ if (feature_mask_len > FEATURE_MASK_LEN) {
+ pr_alert("diag: Receiving feature mask length more than Apps support\n");
+ feature_mask_len = FEATURE_MASK_LEN;
+ }
+
+ diag_cmd_remove_reg_by_proc(peripheral);
+
+ driver->feature[peripheral].rcvd_feature_mask = 1;
+ DIAG_LOG(DIAG_DEBUG_CONTROL,
+ "diag: Received feature mask for peripheral %d\n", peripheral);
+
+ for (i = 0; i < feature_mask_len && read_len < len; i++) {
+ feature_mask = *(uint8_t *)ptr;
+ driver->feature[peripheral].feature_mask[i] = feature_mask;
+ ptr += sizeof(uint8_t);
+ read_len += sizeof(uint8_t);
+
+ if (FEATURE_SUPPORTED(F_DIAG_LOG_ON_DEMAND_APPS))
+ driver->feature[peripheral].log_on_demand = 1;
+ if (FEATURE_SUPPORTED(F_DIAG_REQ_RSP_SUPPORT))
+ driver->feature[peripheral].separate_cmd_rsp = 1;
+ if (FEATURE_SUPPORTED(F_DIAG_APPS_HDLC_ENCODE))
+ process_hdlc_encoding_feature(peripheral);
+ if (FEATURE_SUPPORTED(F_DIAG_PKT_HEADER_UNTAG))
+ process_upd_header_untagging_feature(peripheral);
+ if (FEATURE_SUPPORTED(F_DIAG_STM))
+ enable_stm_feature(peripheral);
+ if (FEATURE_SUPPORTED(F_DIAG_MASK_CENTRALIZATION))
+ driver->feature[peripheral].mask_centralization = 1;
+ if (FEATURE_SUPPORTED(F_DIAG_PERIPHERAL_BUFFERING))
+ driver->feature[peripheral].peripheral_buffering = 1;
+ if (FEATURE_SUPPORTED(F_DIAG_PD_BUFFERING))
+ driver->feature[peripheral].pd_buffering = 1;
+ if (FEATURE_SUPPORTED(F_DIAG_SOCKETS_ENABLED))
+ enable_socket_feature(peripheral);
+ }
+
+ process_socket_feature(peripheral);
+ process_log_on_demand_feature(peripheral);
+
+ DIAG_LOG(DIAG_DEBUG_CONTROL,
+ "diag: Peripheral(%d) feature mask is processed\n",
+ peripheral);
+}
+
+static void process_last_event_report(uint8_t *buf, uint32_t len,
+ uint8_t peripheral)
+{
+ struct diag_ctrl_last_event_report *header = NULL;
+ uint8_t *ptr = buf;
+ uint8_t *temp = NULL;
+ uint32_t pkt_len = sizeof(uint32_t) + sizeof(uint16_t);
+ uint16_t event_size = 0;
+
+ if (!buf || peripheral >= NUM_PERIPHERALS || len != pkt_len) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: Invalid parameters:(!buf) = %d, peripheral = %d, len = %d, pkt_len = %d\n",
+ !buf, peripheral, len, pkt_len);
+ return;
+ }
+
+ DIAG_LOG(DIAG_DEBUG_CONTROL,
+ "diag:started processing last event report for peripheral (%d)\n",
+ peripheral);
+
+ mutex_lock(&event_mask.lock);
+ header = (struct diag_ctrl_last_event_report *)ptr;
+ event_size = ((header->event_last_id / 8) + 1);
+ if (event_size >= driver->event_mask_size) {
+ DIAG_LOG(DIAG_DEBUG_MASKS,
+ "diag: receiving event mask size more that Apps can handle\n");
+ temp = krealloc(driver->event_mask->ptr, event_size,
+ GFP_KERNEL);
+ if (!temp) {
+ pr_err("diag: In %s, unable to reallocate event mask to support events from %d\n",
+ __func__, peripheral);
+ goto err;
+ }
+ driver->event_mask->ptr = temp;
+ driver->event_mask_size = event_size;
+ }
+
+ driver->num_event_id[peripheral] = header->event_last_id;
+ if (header->event_last_id > driver->last_event_id)
+ driver->last_event_id = header->event_last_id;
+err:
+ mutex_unlock(&event_mask.lock);
+ DIAG_LOG(DIAG_DEBUG_CONTROL,
+ "diag: last event report processed for peripheral (%d)\n",
+ peripheral);
+}
+
+static void process_log_range_report(uint8_t *buf, uint32_t len,
+ uint8_t peripheral)
+{
+ int i;
+ int read_len = 0;
+ int header_len = sizeof(struct diag_ctrl_log_range_report);
+ uint8_t *ptr = buf;
+ struct diag_ctrl_log_range_report *header = NULL;
+ struct diag_ctrl_log_range *log_range = NULL;
+ struct diag_log_mask_t *mask_ptr = NULL;
+
+ if (!buf || peripheral >= NUM_PERIPHERALS || len < 0) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: Invalid parameters:(!buf) = %d, peripheral = %d, len = %d\n",
+ !buf, peripheral, len);
+ return;
+ }
+ DIAG_LOG(DIAG_DEBUG_CONTROL,
+ "diag:started processing log range report for peripheral(%d)\n",
+ peripheral);
+
+ header = (struct diag_ctrl_log_range_report *)ptr;
+ ptr += header_len;
+ /* Don't account for pkt_id and length */
+ read_len += header_len - (2 * sizeof(uint32_t));
+
+ driver->num_equip_id[peripheral] = header->num_ranges;
+ for (i = 0; i < header->num_ranges && read_len < len; i++) {
+ log_range = (struct diag_ctrl_log_range *)ptr;
+ ptr += sizeof(struct diag_ctrl_log_range);
+ read_len += sizeof(struct diag_ctrl_log_range);
+
+ if (log_range->equip_id >= MAX_EQUIP_ID) {
+ pr_err("diag: receiving log equip id %d more than supported equip id: %d from peripheral: %d\n",
+ log_range->equip_id, MAX_EQUIP_ID, peripheral);
+ continue;
+ }
+ mask_ptr = (struct diag_log_mask_t *)log_mask.ptr;
+ mask_ptr = &mask_ptr[log_range->equip_id];
+
+ mutex_lock(&(mask_ptr->lock));
+ mask_ptr->num_items = log_range->num_items;
+ mask_ptr->range = LOG_ITEMS_TO_SIZE(log_range->num_items);
+ mutex_unlock(&(mask_ptr->lock));
+ }
+ DIAG_LOG(DIAG_DEBUG_CONTROL,
+ "diag: log range report processed for peripheral (%d)\n",
+ peripheral);
+}
+
+static int update_msg_mask_tbl_entry(struct diag_msg_mask_t *mask,
+ struct diag_ssid_range_t *range)
+{
+ uint32_t temp_range;
+
+ if (!mask || !range) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: Invalid %s\n",
+ (!mask ? "mask" : (!range ? "range" : " ")));
+ return -EIO;
+ }
+ if (range->ssid_last < range->ssid_first) {
+ pr_err("diag: In %s, invalid ssid range, first: %d, last: %d\n",
+ __func__, range->ssid_first, range->ssid_last);
+ return -EINVAL;
+ }
+ if (range->ssid_last >= mask->ssid_last) {
+ temp_range = range->ssid_last - mask->ssid_first + 1;
+ mask->ssid_last = range->ssid_last;
+ mask->range = temp_range;
+ }
+
+ return 0;
+}
+
+static void process_ssid_range_report(uint8_t *buf, uint32_t len,
+ uint8_t peripheral)
+{
+ int i;
+ int j;
+ int read_len = 0;
+ int found = 0;
+ int new_size = 0;
+ int err = 0;
+ struct diag_ctrl_ssid_range_report *header = NULL;
+ struct diag_ssid_range_t *ssid_range = NULL;
+ int header_len = sizeof(struct diag_ctrl_ssid_range_report);
+ struct diag_msg_mask_t *mask_ptr = NULL;
+ uint8_t *ptr = buf;
+ uint8_t *temp = NULL;
+ uint32_t min_len = header_len - sizeof(struct diag_ctrl_pkt_header_t);
+
+ if (!buf || peripheral >= NUM_PERIPHERALS || len < min_len) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: Invalid parameters:(!buf) = %d, peripheral = %d, len = %d, min_len = %d\n",
+ !buf, peripheral, len, min_len);
+ return;
+ }
+
+ DIAG_LOG(DIAG_DEBUG_CONTROL,
+ "diag: started processing ssid range for peripheral (%d)\n",
+ peripheral);
+
+ header = (struct diag_ctrl_ssid_range_report *)ptr;
+ ptr += header_len;
+ /* Don't account for pkt_id and length */
+ read_len += header_len - (2 * sizeof(uint32_t));
+
+ mutex_lock(&driver->msg_mask_lock);
+ driver->max_ssid_count[peripheral] = header->count;
+ for (i = 0; i < header->count && read_len < len; i++) {
+ ssid_range = (struct diag_ssid_range_t *)ptr;
+ ptr += sizeof(struct diag_ssid_range_t);
+ read_len += sizeof(struct diag_ssid_range_t);
+ mask_ptr = (struct diag_msg_mask_t *)msg_mask.ptr;
+ found = 0;
+ for (j = 0; j < driver->msg_mask_tbl_count; j++, mask_ptr++) {
+ if (!mask_ptr || !ssid_range) {
+ found = 1;
+ break;
+ }
+ if (mask_ptr->ssid_first != ssid_range->ssid_first)
+ continue;
+ mutex_lock(&mask_ptr->lock);
+ err = update_msg_mask_tbl_entry(mask_ptr, ssid_range);
+ mutex_unlock(&mask_ptr->lock);
+ if (err == -ENOMEM) {
+ pr_err("diag: In %s, unable to increase the msg mask table range\n",
+ __func__);
+ }
+ found = 1;
+ break;
+ }
+
+ if (found)
+ continue;
+
+ new_size = (driver->msg_mask_tbl_count + 1) *
+ sizeof(struct diag_msg_mask_t);
+ DIAG_LOG(DIAG_DEBUG_MASKS,
+ "diag: receiving msg mask size more that Apps can handle\n");
+ temp = krealloc(msg_mask.ptr, new_size, GFP_KERNEL);
+ if (!temp) {
+ pr_err("diag: In %s, Unable to add new ssid table to msg mask, ssid first: %d, last: %d\n",
+ __func__, ssid_range->ssid_first,
+ ssid_range->ssid_last);
+ continue;
+ }
+ msg_mask.ptr = temp;
+ mask_ptr = (struct diag_msg_mask_t *)msg_mask.ptr;
+ err = diag_create_msg_mask_table_entry(mask_ptr, ssid_range);
+ if (err) {
+ pr_err("diag: In %s, Unable to create a new msg mask table entry, first: %d last: %d err: %d\n",
+ __func__, ssid_range->ssid_first,
+ ssid_range->ssid_last, err);
+ continue;
+ }
+ driver->msg_mask_tbl_count += 1;
+ }
+ mutex_unlock(&driver->msg_mask_lock);
+ DIAG_LOG(DIAG_DEBUG_CONTROL,
+ "diag: processed ssid range for peripheral(%d)\n",
+ peripheral);
+}
+
+static void diag_build_time_mask_update(uint8_t *buf,
+ struct diag_ssid_range_t *range)
+{
+ int i;
+ int j;
+ int num_items = 0;
+ int err = 0;
+ int found = 0;
+ int new_size = 0;
+ uint8_t *temp = NULL;
+ uint32_t *mask_ptr = (uint32_t *)buf;
+ uint32_t *dest_ptr = NULL;
+ struct diag_msg_mask_t *build_mask = NULL;
+
+ if (!range || !buf) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: Invalid %s\n",
+ (!range ? "range" : (!buf ? "buf" : " ")));
+ return;
+ }
+
+ if (range->ssid_last < range->ssid_first) {
+ pr_err("diag: In %s, invalid ssid range, first: %d, last: %d\n",
+ __func__, range->ssid_first, range->ssid_last);
+ return;
+ }
+ mutex_lock(&driver->msg_mask_lock);
+ build_mask = (struct diag_msg_mask_t *)(driver->build_time_mask->ptr);
+ num_items = range->ssid_last - range->ssid_first + 1;
+
+ for (i = 0; i < driver->bt_msg_mask_tbl_count; i++, build_mask++) {
+ if (!build_mask) {
+ found = 1;
+ break;
+ }
+ if (build_mask->ssid_first != range->ssid_first)
+ continue;
+ found = 1;
+ mutex_lock(&build_mask->lock);
+ err = update_msg_mask_tbl_entry(build_mask, range);
+ if (err == -ENOMEM) {
+ pr_err("diag: In %s, unable to increase the msg build mask table range\n",
+ __func__);
+ }
+ dest_ptr = build_mask->ptr;
+ for (j = 0; (j < build_mask->range) && mask_ptr && dest_ptr;
+ j++, mask_ptr++, dest_ptr++)
+ *(uint32_t *)dest_ptr |= *mask_ptr;
+ mutex_unlock(&build_mask->lock);
+ break;
+ }
+
+ if (found)
+ goto end;
+
+ new_size = (driver->bt_msg_mask_tbl_count + 1) *
+ sizeof(struct diag_msg_mask_t);
+ DIAG_LOG(DIAG_DEBUG_MASKS,
+ "diag: receiving build time mask size more that Apps can handle\n");
+
+ temp = krealloc(driver->build_time_mask->ptr, new_size, GFP_KERNEL);
+ if (!temp) {
+ pr_err("diag: In %s, unable to create a new entry for build time mask\n",
+ __func__);
+ goto end;
+ }
+ driver->build_time_mask->ptr = temp;
+ build_mask = (struct diag_msg_mask_t *)driver->build_time_mask->ptr;
+ err = diag_create_msg_mask_table_entry(build_mask, range);
+ if (err) {
+ pr_err("diag: In %s, Unable to create a new msg mask table entry, err: %d\n",
+ __func__, err);
+ goto end;
+ }
+ driver->bt_msg_mask_tbl_count += 1;
+end:
+ mutex_unlock(&driver->msg_mask_lock);
+ return;
+}
+
+static void process_build_mask_report(uint8_t *buf, uint32_t len,
+ uint8_t peripheral)
+{
+ int i;
+ int read_len = 0;
+ int num_items = 0;
+ int header_len = sizeof(struct diag_ctrl_build_mask_report);
+ uint8_t *ptr = buf;
+ struct diag_ctrl_build_mask_report *header = NULL;
+ struct diag_ssid_range_t *range = NULL;
+
+ if (!buf || peripheral >= NUM_PERIPHERALS || len < header_len) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: Invalid parameters:(!buf) = %d, peripheral = %d, len = %d, header_len = %d\n",
+ !buf, peripheral, len, header_len);
+ return;
+ }
+
+ DIAG_LOG(DIAG_DEBUG_CONTROL,
+ "diag: started processing build mask for peripheral(%d)\n",
+ peripheral);
+
+ header = (struct diag_ctrl_build_mask_report *)ptr;
+ ptr += header_len;
+ /* Don't account for pkt_id and length */
+ read_len += header_len - (2 * sizeof(uint32_t));
+
+ for (i = 0; i < header->count && read_len < len; i++) {
+ range = (struct diag_ssid_range_t *)ptr;
+ ptr += sizeof(struct diag_ssid_range_t);
+ read_len += sizeof(struct diag_ssid_range_t);
+ num_items = range->ssid_last - range->ssid_first + 1;
+ diag_build_time_mask_update(ptr, range);
+ ptr += num_items * sizeof(uint32_t);
+ read_len += num_items * sizeof(uint32_t);
+ }
+ DIAG_LOG(DIAG_DEBUG_CONTROL,
+ "diag: processing build mask complete (%d)\n", peripheral);
+}
+
+void diag_cntl_process_read_data(struct diagfwd_info *p_info, void *buf,
+ int len)
+{
+ uint32_t read_len = 0;
+ uint32_t header_len = sizeof(struct diag_ctrl_pkt_header_t);
+ uint8_t *ptr = buf;
+ struct diag_ctrl_pkt_header_t *ctrl_pkt = NULL;
+
+ if (!buf || len <= 0 || !p_info) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "diag: Invalid parameters\n");
+ return;
+ }
+
+ if (reg_dirty & PERIPHERAL_MASK(p_info->peripheral)) {
+ pr_err_ratelimited("diag: dropping command registration from peripheral %d\n",
+ p_info->peripheral);
+ return;
+ }
+
+ while (read_len + header_len < len) {
+ ctrl_pkt = (struct diag_ctrl_pkt_header_t *)ptr;
+ DIAG_LOG(DIAG_DEBUG_CONTROL,
+ "diag:peripheral: %d: pkt_id: %d\n",
+ p_info->peripheral, ctrl_pkt->pkt_id);
+ switch (ctrl_pkt->pkt_id) {
+ case DIAG_CTRL_MSG_REG:
+ process_command_registration(ptr, ctrl_pkt->len,
+ p_info->peripheral);
+ break;
+ case DIAG_CTRL_MSG_DEREG:
+ process_command_deregistration(ptr, ctrl_pkt->len,
+ p_info->peripheral);
+ break;
+ case DIAG_CTRL_MSG_FEATURE:
+ process_incoming_feature_mask(ptr, ctrl_pkt->len,
+ p_info->peripheral);
+ break;
+ case DIAG_CTRL_MSG_LAST_EVENT_REPORT:
+ process_last_event_report(ptr, ctrl_pkt->len,
+ p_info->peripheral);
+ break;
+ case DIAG_CTRL_MSG_LOG_RANGE_REPORT:
+ process_log_range_report(ptr, ctrl_pkt->len,
+ p_info->peripheral);
+ break;
+ case DIAG_CTRL_MSG_SSID_RANGE_REPORT:
+ process_ssid_range_report(ptr, ctrl_pkt->len,
+ p_info->peripheral);
+ break;
+ case DIAG_CTRL_MSG_BUILD_MASK_REPORT:
+ process_build_mask_report(ptr, ctrl_pkt->len,
+ p_info->peripheral);
+ break;
+ case DIAG_CTRL_MSG_PD_STATUS:
+ process_pd_status(ptr, ctrl_pkt->len,
+ p_info->peripheral);
+ break;
+ default:
+ DIAG_LOG(DIAG_DEBUG_CONTROL,
+ "diag: Control packet %d not supported\n",
+ ctrl_pkt->pkt_id);
+ }
+ ptr += header_len + ctrl_pkt->len;
+ read_len += header_len + ctrl_pkt->len;
+ }
+ DIAG_LOG(DIAG_DEBUG_CONTROL,
+ "diag: control packet processing complete\n");
+ return;
+}
+
+static int diag_compute_real_time(int idx)
+{
+ int real_time = MODE_REALTIME;
+ if (driver->proc_active_mask == 0) {
+ /*
+ * There are no DCI or Memory Device processes. Diag should
+ * be in Real Time mode irrespective of USB connection
+ */
+ real_time = MODE_REALTIME;
+ } else if (driver->proc_rt_vote_mask[idx] & driver->proc_active_mask) {
+ /*
+ * Atleast one process is alive and is voting for Real Time
+ * data - Diag should be in real time mode irrespective of USB
+ * connection.
+ */
+ real_time = MODE_REALTIME;
+ } else if (driver->usb_connected) {
+ /*
+ * If USB is connected, check individual process. If Memory
+ * Device Mode is active, set the mode requested by Memory
+ * Device process. Set to realtime mode otherwise.
+ */
+ if ((driver->proc_rt_vote_mask[idx] &
+ DIAG_PROC_MEMORY_DEVICE) == 0)
+ real_time = MODE_NONREALTIME;
+ else
+ real_time = MODE_REALTIME;
+ } else {
+ /*
+ * We come here if USB is not connected and the active
+ * processes are voting for Non realtime mode.
+ */
+ real_time = MODE_NONREALTIME;
+ }
+ return real_time;
+}
+
+static void diag_create_diag_mode_ctrl_pkt(unsigned char *dest_buf,
+ uint8_t diag_id, int real_time)
+{
+ struct diag_ctrl_msg_diagmode diagmode;
+ struct diag_ctrl_msg_diagmode_v2 diagmode_v2;
+ int msg_size = sizeof(struct diag_ctrl_msg_diagmode);
+ int msg_size_2 = sizeof(struct diag_ctrl_msg_diagmode_v2);
+
+ if (!dest_buf)
+ return;
+
+ if (diag_id) {
+ diagmode_v2.ctrl_pkt_id = DIAG_CTRL_MSG_DIAGMODE;
+ diagmode_v2.ctrl_pkt_data_len = DIAG_MODE_PKT_LEN_V2;
+ diagmode_v2.version = 2;
+ diagmode_v2.sleep_vote = real_time ? 1 : 0;
+ /*
+ * 0 - Disables real-time logging (to prevent
+ * frequent APPS wake-ups, etc.).
+ * 1 - Enable real-time logging
+ */
+ diagmode_v2.real_time = real_time;
+ diagmode_v2.use_nrt_values = 0;
+ diagmode_v2.commit_threshold = 0;
+ diagmode_v2.sleep_threshold = 0;
+ diagmode_v2.sleep_time = 0;
+ diagmode_v2.drain_timer_val = 0;
+ diagmode_v2.event_stale_timer_val = 0;
+ diagmode_v2.diag_id = diag_id;
+ memcpy(dest_buf, &diagmode_v2, msg_size_2);
+ } else {
+ diagmode.ctrl_pkt_id = DIAG_CTRL_MSG_DIAGMODE;
+ diagmode.ctrl_pkt_data_len = DIAG_MODE_PKT_LEN;
+ diagmode.version = 1;
+ diagmode.sleep_vote = real_time ? 1 : 0;
+ /*
+ * 0 - Disables real-time logging (to prevent
+ * frequent APPS wake-ups, etc.).
+ * 1 - Enable real-time logging
+ */
+ diagmode.real_time = real_time;
+ diagmode.use_nrt_values = 0;
+ diagmode.commit_threshold = 0;
+ diagmode.sleep_threshold = 0;
+ diagmode.sleep_time = 0;
+ diagmode.drain_timer_val = 0;
+ diagmode.event_stale_timer_val = 0;
+ memcpy(dest_buf, &diagmode, msg_size);
+ }
+}
+
+void diag_update_proc_vote(uint16_t proc, uint8_t vote, int index)
+{
+ int i;
+
+ mutex_lock(&driver->real_time_mutex);
+ if (vote)
+ driver->proc_active_mask |= proc;
+ else {
+ driver->proc_active_mask &= ~proc;
+ if (index == ALL_PROC) {
+ for (i = 0; i < DIAG_NUM_PROC; i++)
+ driver->proc_rt_vote_mask[i] |= proc;
+ } else {
+ driver->proc_rt_vote_mask[index] |= proc;
+ }
+ }
+ mutex_unlock(&driver->real_time_mutex);
+}
+
+void diag_update_real_time_vote(uint16_t proc, uint8_t real_time, int index)
+{
+ int i;
+
+ if (index >= DIAG_NUM_PROC) {
+ pr_err("diag: In %s, invalid index %d\n", __func__, index);
+ return;
+ }
+
+ mutex_lock(&driver->real_time_mutex);
+ if (index == ALL_PROC) {
+ for (i = 0; i < DIAG_NUM_PROC; i++) {
+ if (real_time)
+ driver->proc_rt_vote_mask[i] |= proc;
+ else
+ driver->proc_rt_vote_mask[i] &= ~proc;
+ }
+ } else {
+ if (real_time)
+ driver->proc_rt_vote_mask[index] |= proc;
+ else
+ driver->proc_rt_vote_mask[index] &= ~proc;
+ }
+ mutex_unlock(&driver->real_time_mutex);
+}
+
+
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+static void diag_send_diag_mode_update_remote(int token, int real_time)
+{
+ unsigned char *buf = NULL;
+ int err = 0;
+ struct diag_dci_header_t dci_header;
+ int dci_header_size = sizeof(struct diag_dci_header_t);
+ int msg_size = sizeof(struct diag_ctrl_msg_diagmode);
+ uint32_t write_len = 0;
+
+ if (token < 0 || token >= NUM_DCI_PROC) {
+ pr_err("diag: Invalid remote device channel in %s, token: %d\n",
+ __func__, token);
+ return;
+ }
+
+ if (real_time != MODE_REALTIME && real_time != MODE_NONREALTIME) {
+ pr_err("diag: Invalid real time value in %s, type: %d\n",
+ __func__, real_time);
+ return;
+ }
+
+ buf = dci_get_buffer_from_bridge(token);
+ if (!buf) {
+ pr_err("diag: In %s, unable to get dci buffers to write data\n",
+ __func__);
+ return;
+ }
+ /* Frame the DCI header */
+ dci_header.start = CONTROL_CHAR;
+ dci_header.version = 1;
+ dci_header.length = msg_size + 1;
+ dci_header.cmd_code = DCI_CONTROL_PKT_CODE;
+
+ memcpy(buf + write_len, &dci_header, dci_header_size);
+ write_len += dci_header_size;
+ diag_create_diag_mode_ctrl_pkt(buf + write_len, 0, real_time);
+ write_len += msg_size;
+ *(buf + write_len) = CONTROL_CHAR; /* End Terminator */
+ write_len += sizeof(uint8_t);
+ err = diagfwd_bridge_write(TOKEN_TO_BRIDGE(token), buf, write_len);
+ if (err != write_len) {
+ pr_err("diag: cannot send nrt mode ctrl pkt, err: %d\n", err);
+ diagmem_free(driver, buf, dci_ops_tbl[token].mempool);
+ } else {
+ driver->real_time_mode[token + 1] = real_time;
+ }
+}
+#else
+static inline void diag_send_diag_mode_update_remote(int token, int real_time)
+{
+}
+#endif
+
+#ifdef CONFIG_DIAG_OVER_USB
+void diag_real_time_work_fn(struct work_struct *work)
+{
+ int temp_real_time = MODE_REALTIME, i, j;
+ uint8_t send_update = 1;
+
+ /*
+ * If any peripheral in the local processor is in either threshold or
+ * circular buffering mode, don't send the real time mode control
+ * packet.
+ */
+ for (i = 0; i < NUM_PERIPHERALS; i++) {
+ if (!driver->feature[i].peripheral_buffering)
+ continue;
+ switch (driver->buffering_mode[i].mode) {
+ case DIAG_BUFFERING_MODE_THRESHOLD:
+ case DIAG_BUFFERING_MODE_CIRCULAR:
+ send_update = 0;
+ break;
+ }
+ }
+
+ mutex_lock(&driver->mode_lock);
+ for (i = 0; i < DIAG_NUM_PROC; i++) {
+ temp_real_time = diag_compute_real_time(i);
+ if (temp_real_time == driver->real_time_mode[i]) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: did not update real time mode on proc %d, already in the req mode %d\n",
+ i, temp_real_time);
+ continue;
+ }
+
+ if (i == DIAG_LOCAL_PROC) {
+ if (!send_update) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: cannot send real time mode pkt since one of the periperhal is in buffering mode\n");
+ break;
+ }
+ for (j = 0; j < NUM_PERIPHERALS; j++)
+ diag_send_real_time_update(j,
+ temp_real_time);
+ } else {
+ diag_send_diag_mode_update_remote(i - 1,
+ temp_real_time);
+ }
+ }
+ mutex_unlock(&driver->mode_lock);
+
+ if (driver->real_time_update_busy > 0)
+ driver->real_time_update_busy--;
+}
+#else
+void diag_real_time_work_fn(struct work_struct *work)
+{
+ int temp_real_time = MODE_REALTIME, i, j;
+
+ for (i = 0; i < DIAG_NUM_PROC; i++) {
+ if (driver->proc_active_mask == 0) {
+ /*
+ * There are no DCI or Memory Device processes.
+ * Diag should be in Real Time mode.
+ */
+ temp_real_time = MODE_REALTIME;
+ } else if (!(driver->proc_rt_vote_mask[i] &
+ driver->proc_active_mask)) {
+ /* No active process is voting for real time mode */
+ temp_real_time = MODE_NONREALTIME;
+ }
+ if (temp_real_time == driver->real_time_mode[i]) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: did not update real time mode on proc %d, already in the req mode %d\n",
+ i, temp_real_time);
+ continue;
+ }
+
+ if (i == DIAG_LOCAL_PROC) {
+ for (j = 0; j < NUM_PERIPHERALS; j++)
+ diag_send_real_time_update(
+ j, temp_real_time);
+ } else {
+ diag_send_diag_mode_update_remote(i - 1,
+ temp_real_time);
+ }
+ }
+
+ if (driver->real_time_update_busy > 0)
+ driver->real_time_update_busy--;
+}
+#endif
+
+static int __diag_send_real_time_update(uint8_t peripheral, int real_time,
+ uint8_t diag_id)
+{
+ char buf[sizeof(struct diag_ctrl_msg_diagmode_v2)];
+ int msg_size = 0;
+ int err = 0;
+
+ if (peripheral >= NUM_PERIPHERALS) {
+ pr_err("diag: In %s, invalid peripheral %d\n", __func__,
+ peripheral);
+ return -EINVAL;
+ }
+
+ if (!driver->diagfwd_cntl[peripheral] ||
+ !driver->diagfwd_cntl[peripheral]->ch_open) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: control channel is not open, p: %d\n", peripheral);
+ return err;
+ }
+
+ if (real_time != MODE_NONREALTIME && real_time != MODE_REALTIME) {
+ pr_err("diag: In %s, invalid real time mode %d, peripheral: %d\n",
+ __func__, real_time, peripheral);
+ return -EINVAL;
+ }
+
+ msg_size = (diag_id ? sizeof(struct diag_ctrl_msg_diagmode_v2) :
+ sizeof(struct diag_ctrl_msg_diagmode));
+
+ diag_create_diag_mode_ctrl_pkt(buf, diag_id, real_time);
+
+ mutex_lock(&driver->diag_cntl_mutex);
+
+ err = diagfwd_write(peripheral, TYPE_CNTL, buf, msg_size);
+
+ if (err && err != -ENODEV) {
+ pr_err("diag: In %s, unable to write, peripheral: %d, type: %d, len: %d, err: %d\n",
+ __func__, peripheral, TYPE_CNTL,
+ msg_size, err);
+ } else {
+ driver->real_time_mode[DIAG_LOCAL_PROC] = real_time;
+ }
+
+ mutex_unlock(&driver->diag_cntl_mutex);
+
+ return err;
+}
+
+int diag_send_real_time_update(uint8_t peripheral, int real_time)
+{
+ int i;
+
+ for (i = 0; i < NUM_PERIPHERALS; i++) {
+ if (!driver->buffering_flag[i])
+ continue;
+ /*
+ * One of the peripherals is in buffering mode. Don't set
+ * the RT value.
+ */
+ return -EINVAL;
+ }
+
+ return __diag_send_real_time_update(peripheral, real_time, 0);
+}
+
+void diag_map_pd_to_diagid(uint8_t pd, uint8_t *diag_id, int *peripheral)
+{
+ switch (pd) {
+ case UPD_WLAN:
+ *diag_id = DIAG_ID_WLAN;
+ *peripheral = PERIPHERAL_MODEM;
+ break;
+ case UPD_AUDIO:
+ *diag_id = DIAG_ID_AUDIO;
+ *peripheral = PERIPHERAL_LPASS;
+ break;
+ case UPD_SENSORS:
+ *diag_id = DIAG_ID_SENSORS;
+ *peripheral = PERIPHERAL_LPASS;
+ break;
+ case PERIPHERAL_MODEM:
+ *diag_id = DIAG_ID_MPSS;
+ *peripheral = PERIPHERAL_MODEM;
+ break;
+ case PERIPHERAL_LPASS:
+ *diag_id = DIAG_ID_LPASS;
+ *peripheral = PERIPHERAL_LPASS;
+ break;
+ case PERIPHERAL_WCNSS:
+ *diag_id = 0;
+ *peripheral = PERIPHERAL_WCNSS;
+ break;
+ case PERIPHERAL_SENSORS:
+ *diag_id = 0;
+ *peripheral = PERIPHERAL_SENSORS;
+ break;
+ case PERIPHERAL_WDSP:
+ *diag_id = 0;
+ *peripheral = PERIPHERAL_WDSP;
+ break;
+ case PERIPHERAL_CDSP:
+ *diag_id = DIAG_ID_CDSP;
+ *peripheral = PERIPHERAL_CDSP;
+ break;
+ default:
+ pr_err("diag: In %s, invalid peripheral %d\n", __func__,
+ pd);
+ *peripheral = -EINVAL;
+ break;
+ }
+
+ if (*peripheral > 0)
+ if (!driver->feature[*peripheral].pd_buffering)
+ *diag_id = 0;
+}
+
+int diag_send_peripheral_buffering_mode(struct diag_buffering_mode_t *params)
+{
+ int err = 0;
+ int mode = MODE_REALTIME;
+ int peripheral = 0;
+ uint8_t diag_id = 0;
+
+ if (!params)
+ return -EIO;
+
+ diag_map_pd_to_diagid(params->peripheral,
+ &diag_id, &peripheral);
+
+ if ((peripheral < 0) ||
+ peripheral >= NUM_PERIPHERALS) {
+ pr_err("diag: In %s, invalid peripheral %d\n", __func__,
+ peripheral);
+ return -EINVAL;
+ }
+
+ if (!driver->buffering_flag[params->peripheral]) {
+ pr_err("diag: In %s, buffering flag not set for %d\n", __func__,
+ params->peripheral);
+ return -EINVAL;
+ }
+
+ if (!driver->feature[peripheral].peripheral_buffering) {
+ pr_err("diag: In %s, peripheral %d doesn't support buffering\n",
+ __func__, peripheral);
+ return -EIO;
+ }
+
+ switch (params->mode) {
+ case DIAG_BUFFERING_MODE_STREAMING:
+ mode = MODE_REALTIME;
+ break;
+ case DIAG_BUFFERING_MODE_THRESHOLD:
+ case DIAG_BUFFERING_MODE_CIRCULAR:
+ mode = MODE_NONREALTIME;
+ break;
+ default:
+ pr_err("diag: In %s, invalid tx mode %d\n", __func__,
+ params->mode);
+ return -EINVAL;
+ }
+
+ if (!driver->feature[peripheral].peripheral_buffering) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: peripheral %d doesn't support buffering\n",
+ peripheral);
+ driver->buffering_flag[params->peripheral] = 0;
+ return -EIO;
+ }
+
+ /*
+ * Perform sanity on watermark values. These values must be
+ * checked irrespective of the buffering mode.
+ */
+ if (((params->high_wm_val > DIAG_MAX_WM_VAL) ||
+ (params->low_wm_val > DIAG_MAX_WM_VAL)) ||
+ (params->low_wm_val > params->high_wm_val) ||
+ ((params->low_wm_val == params->high_wm_val) &&
+ (params->low_wm_val != DIAG_MIN_WM_VAL))) {
+ pr_err("diag: In %s, invalid watermark values, high: %d, low: %d, peripheral: %d\n",
+ __func__, params->high_wm_val, params->low_wm_val,
+ params->peripheral);
+ return -EINVAL;
+ }
+
+ mutex_lock(&driver->mode_lock);
+ err = diag_send_buffering_tx_mode_pkt(peripheral, diag_id, params);
+ if (err) {
+ pr_err("diag: In %s, unable to send buffering mode packet to peripheral %d, err: %d\n",
+ __func__, peripheral, err);
+ goto fail;
+ }
+ err = diag_send_buffering_wm_values(peripheral, diag_id, params);
+ if (err) {
+ pr_err("diag: In %s, unable to send buffering wm value packet to peripheral %d, err: %d\n",
+ __func__, peripheral, err);
+ goto fail;
+ }
+ err = __diag_send_real_time_update(peripheral, mode, diag_id);
+ if (err) {
+ pr_err("diag: In %s, unable to send mode update to peripheral %d, mode: %d, err: %d\n",
+ __func__, peripheral, mode, err);
+ goto fail;
+ }
+ driver->buffering_mode[params->peripheral].peripheral =
+ params->peripheral;
+ driver->buffering_mode[params->peripheral].mode =
+ params->mode;
+ driver->buffering_mode[params->peripheral].low_wm_val =
+ params->low_wm_val;
+ driver->buffering_mode[params->peripheral].high_wm_val =
+ params->high_wm_val;
+ if (params->mode == DIAG_BUFFERING_MODE_STREAMING)
+ driver->buffering_flag[params->peripheral] = 0;
+fail:
+ mutex_unlock(&driver->mode_lock);
+ return err;
+}
+
+int diag_send_stm_state(uint8_t peripheral, uint8_t stm_control_data)
+{
+ struct diag_ctrl_msg_stm stm_msg;
+ int msg_size = sizeof(struct diag_ctrl_msg_stm);
+ int err = 0;
+
+ if (peripheral >= NUM_PERIPHERALS)
+ return -EIO;
+
+ if (!driver->diagfwd_cntl[peripheral] ||
+ !driver->diagfwd_cntl[peripheral]->ch_open) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: control channel is not open, p: %d\n",
+ peripheral);
+ return -ENODEV;
+ }
+
+ if (driver->feature[peripheral].stm_support == DISABLE_STM)
+ return -EINVAL;
+
+ stm_msg.ctrl_pkt_id = 21;
+ stm_msg.ctrl_pkt_data_len = 5;
+ stm_msg.version = 1;
+ stm_msg.control_data = stm_control_data;
+ err = diagfwd_write(peripheral, TYPE_CNTL, &stm_msg, msg_size);
+ if (err && err != -ENODEV) {
+ pr_err("diag: In %s, unable to write to smd, peripheral: %d, type: %d, len: %d, err: %d\n",
+ __func__, peripheral, TYPE_CNTL,
+ msg_size, err);
+ }
+
+ return err;
+}
+
+int diag_send_peripheral_drain_immediate(uint8_t pd,
+ uint8_t diag_id, int peripheral)
+{
+ int err = 0;
+ struct diag_ctrl_drain_immediate ctrl_pkt;
+ struct diag_ctrl_drain_immediate_v2 ctrl_pkt_v2;
+
+ if (!driver->feature[peripheral].peripheral_buffering) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: peripheral %d doesn't support buffering\n",
+ peripheral);
+ return -EINVAL;
+ }
+
+ if (!driver->diagfwd_cntl[peripheral] ||
+ !driver->diagfwd_cntl[peripheral]->ch_open) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: control channel is not open, p: %d\n",
+ peripheral);
+ return -ENODEV;
+ }
+
+ if (diag_id && driver->feature[peripheral].pd_buffering) {
+ ctrl_pkt_v2.pkt_id = DIAG_CTRL_MSG_PERIPHERAL_BUF_DRAIN_IMM;
+ /*
+ * The length of the ctrl pkt is size of version,
+ * diag_id and stream id
+ */
+ ctrl_pkt_v2.len = sizeof(uint32_t) + (2 * sizeof(uint8_t));
+ ctrl_pkt_v2.version = 2;
+ ctrl_pkt_v2.diag_id = diag_id;
+ ctrl_pkt_v2.stream_id = 1;
+ err = diagfwd_write(peripheral, TYPE_CNTL, &ctrl_pkt_v2,
+ sizeof(ctrl_pkt_v2));
+ if (err && err != -ENODEV) {
+ pr_err("diag: Unable to send drain immediate ctrl packet to peripheral %d, err: %d\n",
+ peripheral, err);
+ }
+ } else {
+ ctrl_pkt.pkt_id = DIAG_CTRL_MSG_PERIPHERAL_BUF_DRAIN_IMM;
+ /*
+ * The length of the ctrl pkt is
+ * size of version and stream id
+ */
+ ctrl_pkt.len = sizeof(uint32_t) + sizeof(uint8_t);
+ ctrl_pkt.version = 1;
+ ctrl_pkt.stream_id = 1;
+ err = diagfwd_write(peripheral, TYPE_CNTL, &ctrl_pkt,
+ sizeof(ctrl_pkt));
+ if (err && err != -ENODEV) {
+ pr_err("diag: Unable to send drain immediate ctrl packet to peripheral %d, err: %d\n",
+ peripheral, err);
+ }
+ }
+
+ return err;
+}
+
+int diag_send_buffering_tx_mode_pkt(uint8_t peripheral,
+ uint8_t diag_id, struct diag_buffering_mode_t *params)
+{
+ int err = 0;
+ struct diag_ctrl_peripheral_tx_mode ctrl_pkt;
+ struct diag_ctrl_peripheral_tx_mode_v2 ctrl_pkt_v2;
+
+ if (!params)
+ return -EIO;
+
+ if (peripheral >= NUM_PERIPHERALS) {
+ pr_err("diag: In %s, invalid peripheral %d\n", __func__,
+ peripheral);
+ return -EINVAL;
+ }
+
+ if (!driver->feature[peripheral].peripheral_buffering) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: peripheral %d doesn't support buffering\n",
+ peripheral);
+ return -EINVAL;
+ }
+
+ switch (params->mode) {
+ case DIAG_BUFFERING_MODE_STREAMING:
+ case DIAG_BUFFERING_MODE_THRESHOLD:
+ case DIAG_BUFFERING_MODE_CIRCULAR:
+ break;
+ default:
+ pr_err("diag: In %s, invalid tx mode: %d\n", __func__,
+ params->mode);
+ return -EINVAL;
+ }
+
+ if (diag_id &&
+ driver->feature[peripheral].pd_buffering) {
+
+ ctrl_pkt_v2.pkt_id = DIAG_CTRL_MSG_CONFIG_PERIPHERAL_TX_MODE;
+ /*
+ * Control packet length is size of version, diag_id,
+ * stream_id and tx_mode
+ */
+ ctrl_pkt_v2.len = sizeof(uint32_t) + (3 * sizeof(uint8_t));
+ ctrl_pkt_v2.version = 2;
+ ctrl_pkt_v2.diag_id = diag_id;
+ ctrl_pkt_v2.stream_id = 1;
+ ctrl_pkt_v2.tx_mode = params->mode;
+
+ err = diagfwd_write(peripheral, TYPE_CNTL, &ctrl_pkt_v2,
+ sizeof(ctrl_pkt_v2));
+ if (err && err != -ENODEV) {
+ pr_err("diag: Unable to send tx_mode ctrl packet to peripheral %d, err: %d\n",
+ peripheral, err);
+ goto fail;
+ }
+ } else {
+ ctrl_pkt.pkt_id = DIAG_CTRL_MSG_CONFIG_PERIPHERAL_TX_MODE;
+ /*
+ * Control packet length is size of version,
+ * stream_id and tx_mode
+ */
+ ctrl_pkt.len = sizeof(uint32_t) + (2 * sizeof(uint8_t));
+ ctrl_pkt.version = 1;
+ ctrl_pkt.stream_id = 1;
+ ctrl_pkt.tx_mode = params->mode;
+
+ err = diagfwd_write(peripheral, TYPE_CNTL, &ctrl_pkt,
+ sizeof(ctrl_pkt));
+ if (err && err != -ENODEV) {
+ pr_err("diag: Unable to send tx_mode ctrl packet to peripheral %d, err: %d\n",
+ peripheral, err);
+ goto fail;
+ }
+ }
+ driver->buffering_mode[params->peripheral].mode = params->mode;
+
+fail:
+ return err;
+}
+
+int diag_send_buffering_wm_values(uint8_t peripheral,
+ uint8_t diag_id, struct diag_buffering_mode_t *params)
+{
+ int err = 0;
+ struct diag_ctrl_set_wq_val ctrl_pkt;
+ struct diag_ctrl_set_wq_val_v2 ctrl_pkt_v2;
+
+ if (!params)
+ return -EIO;
+
+ if (peripheral >= NUM_PERIPHERALS) {
+ pr_err("diag: In %s, invalid peripheral %d\n", __func__,
+ peripheral);
+ return -EINVAL;
+ }
+
+ if (!driver->feature[peripheral].peripheral_buffering) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: peripheral %d doesn't support buffering\n",
+ peripheral);
+ return -EINVAL;
+ }
+
+ if (!driver->diagfwd_cntl[peripheral] ||
+ !driver->diagfwd_cntl[peripheral]->ch_open) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: control channel is not open, p: %d\n",
+ peripheral);
+ return -ENODEV;
+ }
+
+ switch (params->mode) {
+ case DIAG_BUFFERING_MODE_STREAMING:
+ case DIAG_BUFFERING_MODE_THRESHOLD:
+ case DIAG_BUFFERING_MODE_CIRCULAR:
+ break;
+ default:
+ pr_err("diag: In %s, invalid tx mode: %d\n", __func__,
+ params->mode);
+ return -EINVAL;
+ }
+
+ if (diag_id &&
+ driver->feature[peripheral].pd_buffering) {
+ ctrl_pkt_v2.pkt_id = DIAG_CTRL_MSG_CONFIG_PERIPHERAL_WMQ_VAL;
+ /*
+ * Control packet length is size of version, diag_id,
+ * stream_id and wmq values
+ */
+ ctrl_pkt_v2.len = sizeof(uint32_t) + (4 * sizeof(uint8_t));
+ ctrl_pkt_v2.version = 2;
+ ctrl_pkt_v2.diag_id = diag_id;
+ ctrl_pkt_v2.stream_id = 1;
+ ctrl_pkt_v2.high_wm_val = params->high_wm_val;
+ ctrl_pkt_v2.low_wm_val = params->low_wm_val;
+
+ err = diagfwd_write(peripheral, TYPE_CNTL, &ctrl_pkt_v2,
+ sizeof(ctrl_pkt_v2));
+ if (err && err != -ENODEV) {
+ pr_err("diag: Unable to send watermark values to peripheral %d, err: %d\n",
+ peripheral, err);
+ }
+ } else {
+ ctrl_pkt.pkt_id = DIAG_CTRL_MSG_CONFIG_PERIPHERAL_WMQ_VAL;
+ /*
+ * Control packet length is size of version,
+ * stream_id and wmq values
+ */
+ ctrl_pkt.len = sizeof(uint32_t) + (3 * sizeof(uint8_t));
+ ctrl_pkt.version = 1;
+ ctrl_pkt.stream_id = 1;
+ ctrl_pkt.high_wm_val = params->high_wm_val;
+ ctrl_pkt.low_wm_val = params->low_wm_val;
+
+ err = diagfwd_write(peripheral, TYPE_CNTL, &ctrl_pkt,
+ sizeof(ctrl_pkt));
+ if (err && err != -ENODEV) {
+ pr_err("diag: Unable to send watermark values to peripheral %d, err: %d\n",
+ peripheral, err);
+ }
+ }
+ return err;
+}
+
+int diagfwd_cntl_init(void)
+{
+ uint8_t peripheral = 0;
+
+ reg_dirty = 0;
+ driver->polling_reg_flag = 0;
+ driver->log_on_demand_support = 1;
+ driver->stm_peripheral = 0;
+ driver->close_transport = 0;
+ for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++)
+ driver->buffering_flag[peripheral] = 0;
+
+ mutex_init(&driver->cntl_lock);
+ INIT_WORK(&(driver->stm_update_work), diag_stm_update_work_fn);
+ INIT_WORK(&(driver->mask_update_work), diag_mask_update_work_fn);
+ INIT_WORK(&(driver->close_transport_work),
+ diag_close_transport_work_fn);
+
+ driver->cntl_wq = create_singlethread_workqueue("diag_cntl_wq");
+ if (!driver->cntl_wq)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void diagfwd_cntl_channel_init(void)
+{
+ uint8_t peripheral;
+
+ for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
+ diagfwd_early_open(peripheral);
+ diagfwd_open(peripheral, TYPE_CNTL);
+ }
+}
+
+void diagfwd_cntl_exit(void)
+{
+ if (driver->cntl_wq)
+ destroy_workqueue(driver->cntl_wq);
+ return;
+}
diff --git a/drivers/char/diag/diagfwd_cntl.h b/drivers/char/diag/diagfwd_cntl.h
new file mode 100644
index 000000000000..86442d838471
--- /dev/null
+++ b/drivers/char/diag/diagfwd_cntl.h
@@ -0,0 +1,332 @@
+/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DIAGFWD_CNTL_H
+#define DIAGFWD_CNTL_H
+
+/* Message registration commands */
+#define DIAG_CTRL_MSG_REG 1
+/* Message passing for DTR events */
+#define DIAG_CTRL_MSG_DTR 2
+/* Control Diag sleep vote, buffering etc */
+#define DIAG_CTRL_MSG_DIAGMODE 3
+/* Diag data based on "light" diag mask */
+#define DIAG_CTRL_MSG_DIAGDATA 4
+/* Send diag internal feature mask 'diag_int_feature_mask' */
+#define DIAG_CTRL_MSG_FEATURE 8
+/* Send Diag log mask for a particular equip id */
+#define DIAG_CTRL_MSG_EQUIP_LOG_MASK 9
+/* Send Diag event mask */
+#define DIAG_CTRL_MSG_EVENT_MASK_V2 10
+/* Send Diag F3 mask */
+#define DIAG_CTRL_MSG_F3_MASK_V2 11
+#define DIAG_CTRL_MSG_NUM_PRESETS 12
+#define DIAG_CTRL_MSG_SET_PRESET_ID 13
+#define DIAG_CTRL_MSG_LOG_MASK_WITH_PRESET_ID 14
+#define DIAG_CTRL_MSG_EVENT_MASK_WITH_PRESET_ID 15
+#define DIAG_CTRL_MSG_F3_MASK_WITH_PRESET_ID 16
+#define DIAG_CTRL_MSG_CONFIG_PERIPHERAL_TX_MODE 17
+#define DIAG_CTRL_MSG_PERIPHERAL_BUF_DRAIN_IMM 18
+#define DIAG_CTRL_MSG_CONFIG_PERIPHERAL_WMQ_VAL 19
+#define DIAG_CTRL_MSG_DCI_CONNECTION_STATUS 20
+#define DIAG_CTRL_MSG_LAST_EVENT_REPORT 22
+#define DIAG_CTRL_MSG_LOG_RANGE_REPORT 23
+#define DIAG_CTRL_MSG_SSID_RANGE_REPORT 24
+#define DIAG_CTRL_MSG_BUILD_MASK_REPORT 25
+#define DIAG_CTRL_MSG_DEREG 27
+#define DIAG_CTRL_MSG_DCI_HANDSHAKE_PKT 29
+#define DIAG_CTRL_MSG_PD_STATUS 30
+#define DIAG_CTRL_MSG_TIME_SYNC_PKT 31
+
+/*
+ * Feature Mask Definitions: Feature mask is used to sepcify Diag features
+ * supported by the Apps processor
+ *
+ * F_DIAG_FEATURE_MASK_SUPPORT - Denotes we support sending and receiving
+ * feature masks
+ * F_DIAG_LOG_ON_DEMAND_APPS - Apps responds to Log on Demand request
+ * F_DIAG_REQ_RSP_SUPPORT - Apps supported dedicated request response Channel
+ * F_DIAG_APPS_HDLC_ENCODE - HDLC encoding is done on the forward channel
+ * F_DIAG_STM - Denotes Apps supports Diag over STM
+ */
+#define F_DIAG_FEATURE_MASK_SUPPORT 0
+#define F_DIAG_LOG_ON_DEMAND_APPS 2
+#define F_DIAG_REQ_RSP_SUPPORT 4
+#define F_DIAG_APPS_HDLC_ENCODE 6
+#define F_DIAG_STM 9
+#define F_DIAG_PERIPHERAL_BUFFERING 10
+#define F_DIAG_MASK_CENTRALIZATION 11
+#define F_DIAG_SOCKETS_ENABLED 13
+#define F_DIAG_DCI_EXTENDED_HEADER_SUPPORT 14
+#define F_DIAG_PKT_HEADER_UNTAG 16
+#define F_DIAG_PD_BUFFERING 17
+
+#define ENABLE_SEPARATE_CMDRSP 1
+#define DISABLE_SEPARATE_CMDRSP 0
+
+#define DISABLE_STM 0
+#define ENABLE_STM 1
+#define STATUS_STM 2
+
+#define UPDATE_PERIPHERAL_STM_STATE 1
+#define CLEAR_PERIPHERAL_STM_STATE 2
+
+#define ENABLE_APPS_HDLC_ENCODING 1
+#define DISABLE_APPS_HDLC_ENCODING 0
+
+#define ENABLE_PKT_HEADER_UNTAGGING 1
+#define DISABLE_PKT_HEADER_UNTAGGING 0
+
+#define DIAG_MODE_PKT_LEN 36
+#define DIAG_MODE_PKT_LEN_V2 37
+
+struct diag_ctrl_pkt_header_t {
+ uint32_t pkt_id;
+ uint32_t len;
+};
+
+struct cmd_code_range {
+ uint16_t cmd_code_lo;
+ uint16_t cmd_code_hi;
+ uint32_t data;
+};
+
+struct diag_ctrl_cmd_reg {
+ uint32_t pkt_id;
+ uint32_t len;
+ uint32_t version;
+ uint16_t cmd_code;
+ uint16_t subsysid;
+ uint16_t count_entries;
+ uint16_t port;
+};
+
+struct diag_ctrl_cmd_dereg {
+ uint32_t pkt_id;
+ uint32_t len;
+ uint32_t version;
+ uint16_t cmd_code;
+ uint16_t subsysid;
+ uint16_t count_entries;
+} __packed;
+
+struct diag_ctrl_event_mask {
+ uint32_t cmd_type;
+ uint32_t data_len;
+ uint8_t stream_id;
+ uint8_t status;
+ uint8_t event_config;
+ uint32_t event_mask_size;
+ /* Copy event mask here */
+} __packed;
+
+struct diag_ctrl_log_mask {
+ uint32_t cmd_type;
+ uint32_t data_len;
+ uint8_t stream_id;
+ uint8_t status;
+ uint8_t equip_id;
+ uint32_t num_items; /* Last log code for this equip_id */
+ uint32_t log_mask_size; /* Size of log mask stored in log_mask[] */
+ /* Copy log mask here */
+} __packed;
+
+struct diag_ctrl_msg_mask {
+ uint32_t cmd_type;
+ uint32_t data_len;
+ uint8_t stream_id;
+ uint8_t status;
+ uint8_t msg_mode;
+ uint16_t ssid_first; /* Start of range of supported SSIDs */
+ uint16_t ssid_last; /* Last SSID in range */
+ uint32_t msg_mask_size; /* ssid_last - ssid_first + 1 */
+ /* Copy msg mask here */
+} __packed;
+
+struct diag_ctrl_feature_mask {
+ uint32_t ctrl_pkt_id;
+ uint32_t ctrl_pkt_data_len;
+ uint32_t feature_mask_len;
+ /* Copy feature mask here */
+} __packed;
+
+struct diag_ctrl_msg_diagmode {
+ uint32_t ctrl_pkt_id;
+ uint32_t ctrl_pkt_data_len;
+ uint32_t version;
+ uint32_t sleep_vote;
+ uint32_t real_time;
+ uint32_t use_nrt_values;
+ uint32_t commit_threshold;
+ uint32_t sleep_threshold;
+ uint32_t sleep_time;
+ uint32_t drain_timer_val;
+ uint32_t event_stale_timer_val;
+} __packed;
+
+struct diag_ctrl_msg_diagmode_v2 {
+ uint32_t ctrl_pkt_id;
+ uint32_t ctrl_pkt_data_len;
+ uint32_t version;
+ uint32_t sleep_vote;
+ uint32_t real_time;
+ uint32_t use_nrt_values;
+ uint32_t commit_threshold;
+ uint32_t sleep_threshold;
+ uint32_t sleep_time;
+ uint32_t drain_timer_val;
+ uint32_t event_stale_timer_val;
+ uint8_t diag_id;
+} __packed;
+
+struct diag_ctrl_msg_stm {
+ uint32_t ctrl_pkt_id;
+ uint32_t ctrl_pkt_data_len;
+ uint32_t version;
+ uint8_t control_data;
+} __packed;
+
+struct diag_ctrl_msg_time_sync {
+ uint32_t ctrl_pkt_id;
+ uint32_t ctrl_pkt_data_len;
+ uint32_t version;
+ uint8_t time_api;
+} __packed;
+
+struct diag_ctrl_dci_status {
+ uint32_t ctrl_pkt_id;
+ uint32_t ctrl_pkt_data_len;
+ uint32_t version;
+ uint8_t count;
+} __packed;
+
+struct diag_ctrl_dci_handshake_pkt {
+ uint32_t ctrl_pkt_id;
+ uint32_t ctrl_pkt_data_len;
+ uint32_t version;
+ uint32_t magic;
+} __packed;
+
+struct diag_ctrl_msg_pd_status {
+ uint32_t ctrl_pkt_id;
+ uint32_t ctrl_pkt_data_len;
+ uint32_t version;
+ uint32_t pd_id;
+ uint8_t status;
+} __packed;
+
+struct diag_ctrl_last_event_report {
+ uint32_t pkt_id;
+ uint32_t len;
+ uint32_t version;
+ uint16_t event_last_id;
+} __packed;
+
+struct diag_ctrl_log_range_report {
+ uint32_t pkt_id;
+ uint32_t len;
+ uint32_t version;
+ uint32_t last_equip_id;
+ uint32_t num_ranges;
+} __packed;
+
+struct diag_ctrl_log_range {
+ uint32_t equip_id;
+ uint32_t num_items;
+} __packed;
+
+struct diag_ctrl_ssid_range_report {
+ uint32_t pkt_id;
+ uint32_t len;
+ uint32_t version;
+ uint32_t count;
+} __packed;
+
+struct diag_ctrl_build_mask_report {
+ uint32_t pkt_id;
+ uint32_t len;
+ uint32_t version;
+ uint32_t count;
+} __packed;
+
+struct diag_ctrl_peripheral_tx_mode {
+ uint32_t pkt_id;
+ uint32_t len;
+ uint32_t version;
+ uint8_t stream_id;
+ uint8_t tx_mode;
+} __packed;
+
+struct diag_ctrl_peripheral_tx_mode_v2 {
+ uint32_t pkt_id;
+ uint32_t len;
+ uint32_t version;
+ uint8_t diag_id;
+ uint8_t stream_id;
+ uint8_t tx_mode;
+} __packed;
+
+struct diag_ctrl_drain_immediate {
+ uint32_t pkt_id;
+ uint32_t len;
+ uint32_t version;
+ uint8_t stream_id;
+} __packed;
+
+struct diag_ctrl_drain_immediate_v2 {
+ uint32_t pkt_id;
+ uint32_t len;
+ uint32_t version;
+ uint8_t diag_id;
+ uint8_t stream_id;
+} __packed;
+
+struct diag_ctrl_set_wq_val {
+ uint32_t pkt_id;
+ uint32_t len;
+ uint32_t version;
+ uint8_t stream_id;
+ uint8_t high_wm_val;
+ uint8_t low_wm_val;
+} __packed;
+
+struct diag_ctrl_set_wq_val_v2 {
+ uint32_t pkt_id;
+ uint32_t len;
+ uint32_t version;
+ uint8_t diag_id;
+ uint8_t stream_id;
+ uint8_t high_wm_val;
+ uint8_t low_wm_val;
+} __packed;
+
+int diagfwd_cntl_init(void);
+void diagfwd_cntl_channel_init(void);
+void diagfwd_cntl_exit(void);
+void diag_cntl_channel_open(struct diagfwd_info *p_info);
+void diag_cntl_channel_close(struct diagfwd_info *p_info);
+void diag_cntl_process_read_data(struct diagfwd_info *p_info, void *buf,
+ int len);
+int diag_send_real_time_update(uint8_t peripheral, int real_time);
+void diag_map_pd_to_diagid(uint8_t pd, uint8_t *diag_id, int *peripheral);
+int diag_send_peripheral_buffering_mode(struct diag_buffering_mode_t *params);
+void diag_update_proc_vote(uint16_t proc, uint8_t vote, int index);
+void diag_update_real_time_vote(uint16_t proc, uint8_t real_time, int index);
+void diag_real_time_work_fn(struct work_struct *work);
+int diag_send_stm_state(uint8_t peripheral, uint8_t stm_control_data);
+int diag_send_peripheral_drain_immediate(uint8_t pd,
+ uint8_t diag_id, int peripheral);
+int diag_send_buffering_tx_mode_pkt(uint8_t peripheral,
+ uint8_t diag_id, struct diag_buffering_mode_t *params);
+int diag_send_buffering_wm_values(uint8_t peripheral,
+ uint8_t diag_id, struct diag_buffering_mode_t *params);
+#endif
diff --git a/drivers/char/diag/diagfwd_glink.c b/drivers/char/diag/diagfwd_glink.c
new file mode 100644
index 000000000000..f1f8f0b2b34b
--- /dev/null
+++ b/drivers/char/diag/diagfwd_glink.c
@@ -0,0 +1,830 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/sched.h>
+#include <linux/ratelimit.h>
+#include <linux/workqueue.h>
+#include <linux/pm_runtime.h>
+#include <linux/delay.h>
+#include <linux/atomic.h>
+#include <linux/diagchar.h>
+#include <linux/of.h>
+#include <linux/kmemleak.h>
+#include <soc/qcom/glink.h>
+#include "diagchar.h"
+#include "diagfwd.h"
+#include "diagfwd_peripheral.h"
+#include "diagfwd_glink.h"
+#include "diag_ipc_logging.h"
+
+struct diag_glink_info glink_data[NUM_PERIPHERALS] = {
+ {
+ .peripheral = PERIPHERAL_MODEM,
+ .type = TYPE_DATA,
+ .edge = "mpss",
+ .name = "DIAG_DATA",
+ .hdl = NULL
+ },
+ {
+ .peripheral = PERIPHERAL_LPASS,
+ .type = TYPE_DATA,
+ .edge = "lpass",
+ .name = "DIAG_DATA",
+ .hdl = NULL
+ },
+ {
+ .peripheral = PERIPHERAL_WCNSS,
+ .type = TYPE_DATA,
+ .edge = "wcnss",
+ .name = "DIAG_DATA",
+ .hdl = NULL
+ },
+ {
+ .peripheral = PERIPHERAL_SENSORS,
+ .type = TYPE_DATA,
+ .edge = "dsps",
+ .name = "DIAG_DATA",
+ .hdl = NULL
+ },
+ {
+ .peripheral = PERIPHERAL_WDSP,
+ .type = TYPE_DATA,
+ .edge = "wdsp",
+ .name = "DIAG_DATA",
+ .hdl = NULL
+ },
+ {
+ .peripheral = PERIPHERAL_CDSP,
+ .type = TYPE_DATA,
+ .edge = "cdsp",
+ .name = "DIAG_DATA",
+ .hdl = NULL
+ }
+};
+
+struct diag_glink_info glink_cntl[NUM_PERIPHERALS] = {
+ {
+ .peripheral = PERIPHERAL_MODEM,
+ .type = TYPE_CNTL,
+ .edge = "mpss",
+ .name = "DIAG_CTRL",
+ .hdl = NULL
+ },
+ {
+ .peripheral = PERIPHERAL_LPASS,
+ .type = TYPE_CNTL,
+ .edge = "lpass",
+ .name = "DIAG_CTRL",
+ .hdl = NULL
+ },
+ {
+ .peripheral = PERIPHERAL_WCNSS,
+ .type = TYPE_CNTL,
+ .edge = "wcnss",
+ .name = "DIAG_CTRL",
+ .hdl = NULL
+ },
+ {
+ .peripheral = PERIPHERAL_SENSORS,
+ .type = TYPE_CNTL,
+ .edge = "dsps",
+ .name = "DIAG_CTRL",
+ .hdl = NULL
+ },
+ {
+ .peripheral = PERIPHERAL_WDSP,
+ .type = TYPE_CNTL,
+ .edge = "wdsp",
+ .name = "DIAG_CTRL",
+ .hdl = NULL
+ },
+ {
+ .peripheral = PERIPHERAL_CDSP,
+ .type = TYPE_CNTL,
+ .edge = "cdsp",
+ .name = "DIAG_CTRL",
+ .hdl = NULL
+ }
+};
+
+struct diag_glink_info glink_dci[NUM_PERIPHERALS] = {
+ {
+ .peripheral = PERIPHERAL_MODEM,
+ .type = TYPE_DCI,
+ .edge = "mpss",
+ .name = "DIAG_DCI_DATA",
+ .hdl = NULL
+ },
+ {
+ .peripheral = PERIPHERAL_LPASS,
+ .type = TYPE_DCI,
+ .edge = "lpass",
+ .name = "DIAG_DCI_DATA",
+ .hdl = NULL
+ },
+ {
+ .peripheral = PERIPHERAL_WCNSS,
+ .type = TYPE_DCI,
+ .edge = "wcnss",
+ .name = "DIAG_DCI_DATA",
+ .hdl = NULL
+ },
+ {
+ .peripheral = PERIPHERAL_SENSORS,
+ .type = TYPE_DCI,
+ .edge = "dsps",
+ .name = "DIAG_DCI_DATA",
+ .hdl = NULL
+ },
+ {
+ .peripheral = PERIPHERAL_WDSP,
+ .type = TYPE_DCI,
+ .edge = "wdsp",
+ .name = "DIAG_DCI_DATA",
+ .hdl = NULL
+ },
+ {
+ .peripheral = PERIPHERAL_CDSP,
+ .type = TYPE_DCI,
+ .edge = "cdsp",
+ .name = "DIAG_DCI_DATA",
+ .hdl = NULL
+ }
+};
+
+struct diag_glink_info glink_cmd[NUM_PERIPHERALS] = {
+ {
+ .peripheral = PERIPHERAL_MODEM,
+ .type = TYPE_CMD,
+ .edge = "mpss",
+ .name = "DIAG_CMD",
+ .hdl = NULL
+ },
+ {
+ .peripheral = PERIPHERAL_LPASS,
+ .type = TYPE_CMD,
+ .edge = "lpass",
+ .name = "DIAG_CMD",
+ .hdl = NULL
+ },
+ {
+ .peripheral = PERIPHERAL_WCNSS,
+ .type = TYPE_CMD,
+ .edge = "wcnss",
+ .name = "DIAG_CMD",
+ .hdl = NULL
+ },
+ {
+ .peripheral = PERIPHERAL_SENSORS,
+ .type = TYPE_CMD,
+ .edge = "dsps",
+ .name = "DIAG_CMD",
+ .hdl = NULL
+ },
+ {
+ .peripheral = PERIPHERAL_WDSP,
+ .type = TYPE_CMD,
+ .edge = "wdsp",
+ .name = "DIAG_CMD",
+ .hdl = NULL
+ },
+ {
+ .peripheral = PERIPHERAL_CDSP,
+ .type = TYPE_CMD,
+ .edge = "cdsp",
+ .name = "DIAG_CMD",
+ .hdl = NULL
+ }
+};
+
+struct diag_glink_info glink_dci_cmd[NUM_PERIPHERALS] = {
+ {
+ .peripheral = PERIPHERAL_MODEM,
+ .type = TYPE_DCI_CMD,
+ .edge = "mpss",
+ .name = "DIAG_DCI_CMD",
+ .hdl = NULL
+ },
+ {
+ .peripheral = PERIPHERAL_LPASS,
+ .type = TYPE_DCI_CMD,
+ .edge = "lpass",
+ .name = "DIAG_DCI_CMD",
+ .hdl = NULL
+ },
+ {
+ .peripheral = PERIPHERAL_WCNSS,
+ .type = TYPE_DCI_CMD,
+ .edge = "wcnss",
+ .name = "DIAG_DCI_CMD",
+ .hdl = NULL
+ },
+ {
+ .peripheral = PERIPHERAL_SENSORS,
+ .type = TYPE_DCI_CMD,
+ .edge = "dsps",
+ .name = "DIAG_DCI_CMD",
+ .hdl = NULL
+ },
+ {
+ .peripheral = PERIPHERAL_WDSP,
+ .type = TYPE_DCI_CMD,
+ .edge = "wdsp",
+ .name = "DIAG_DCI_CMD",
+ .hdl = NULL
+ },
+ {
+ .peripheral = PERIPHERAL_CDSP,
+ .type = TYPE_DCI_CMD,
+ .edge = "cdsp",
+ .name = "DIAG_DCI_CMD",
+ .hdl = NULL
+ }
+};
+
+static void diag_state_open_glink(void *ctxt);
+static void diag_state_close_glink(void *ctxt);
+static int diag_glink_write(void *ctxt, unsigned char *buf, int len);
+static int diag_glink_read(void *ctxt, unsigned char *buf, int buf_len);
+static void diag_glink_queue_read(void *ctxt);
+
+static struct diag_peripheral_ops glink_ops = {
+ .open = diag_state_open_glink,
+ .close = diag_state_close_glink,
+ .write = diag_glink_write,
+ .read = diag_glink_read,
+ .queue_read = diag_glink_queue_read
+};
+
+static void diag_state_open_glink(void *ctxt)
+{
+ struct diag_glink_info *glink_info = NULL;
+
+ if (!ctxt)
+ return;
+
+ glink_info = (struct diag_glink_info *)(ctxt);
+ atomic_set(&glink_info->diag_state, 1);
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "%s setting diag state to 1", glink_info->name);
+}
+
+static void diag_glink_queue_read(void *ctxt)
+{
+ struct diag_glink_info *glink_info = NULL;
+
+ if (!ctxt)
+ return;
+
+ glink_info = (struct diag_glink_info *)ctxt;
+ if (glink_info->hdl && glink_info->wq &&
+ atomic_read(&glink_info->opened))
+ queue_work(glink_info->wq, &(glink_info->read_work));
+}
+
+static void diag_state_close_glink(void *ctxt)
+{
+ struct diag_glink_info *glink_info = NULL;
+
+ if (!ctxt)
+ return;
+
+ glink_info = (struct diag_glink_info *)(ctxt);
+ atomic_set(&glink_info->diag_state, 0);
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "%s setting diag state to 0", glink_info->name);
+ wake_up_interruptible(&glink_info->read_wait_q);
+ flush_workqueue(glink_info->wq);
+}
+
+int diag_glink_check_state(void *ctxt)
+{
+ struct diag_glink_info *info = NULL;
+
+ if (!ctxt)
+ return 0;
+
+ info = (struct diag_glink_info *)ctxt;
+ return (int)(atomic_read(&info->diag_state));
+}
+
+static int diag_glink_read(void *ctxt, unsigned char *buf, int buf_len)
+{
+ struct diag_glink_info *glink_info = NULL;
+ int ret_val = 0;
+
+ if (!ctxt || !buf || buf_len <= 0)
+ return -EIO;
+
+ glink_info = (struct diag_glink_info *)ctxt;
+ if (!glink_info || !atomic_read(&glink_info->opened) ||
+ !glink_info->hdl || !glink_info->inited) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag:Glink channel not opened");
+ return -EIO;
+ }
+
+ ret_val = glink_queue_rx_intent(glink_info->hdl, buf, buf_len);
+ if (ret_val == 0)
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: queued an rx intent ch:%s perip:%d buf:%pK of len:%d\n",
+ glink_info->name, glink_info->peripheral, buf, buf_len);
+
+ return ret_val;
+}
+
+static void diag_glink_read_work_fn(struct work_struct *work)
+{
+ struct diag_glink_info *glink_info = container_of(work,
+ struct diag_glink_info,
+ read_work);
+
+ if (!glink_info || !atomic_read(&glink_info->opened))
+ return;
+
+ if (!glink_info->inited) {
+ diag_ws_release();
+ return;
+ }
+
+ diagfwd_channel_read(glink_info->fwd_ctxt);
+}
+struct diag_glink_read_work {
+ struct diag_glink_info *glink_info;
+ const void *ptr_read_done;
+ const void *ptr_rx_done;
+ size_t ptr_read_size;
+ struct work_struct work;
+};
+
+static void diag_glink_notify_rx_work_fn(struct work_struct *work)
+{
+ struct diag_glink_read_work *read_work = container_of(work,
+ struct diag_glink_read_work, work);
+ struct diag_glink_info *glink_info = read_work->glink_info;
+
+ if (!glink_info || !glink_info->hdl) {
+ kfree(read_work);
+ return;
+ }
+
+ diagfwd_channel_read_done(glink_info->fwd_ctxt,
+ (unsigned char *)(read_work->ptr_read_done),
+ read_work->ptr_read_size);
+
+ glink_rx_done(glink_info->hdl, read_work->ptr_rx_done, false);
+
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: Rx done for packet %pK of len: %d periph: %d ch: %d\n",
+ read_work->ptr_rx_done, (int)read_work->ptr_read_size,
+ glink_info->peripheral, glink_info->type);
+ kfree(read_work);
+}
+
+static void diag_glink_notify_rx(void *hdl, const void *priv,
+ const void *pkt_priv, const void *ptr,
+ size_t size)
+{
+ struct diag_glink_info *glink_info = (struct diag_glink_info *)priv;
+ struct diag_glink_read_work *read_work;
+
+ if (!glink_info || !glink_info->hdl || !ptr || !pkt_priv || !hdl)
+ return;
+
+ if (size <= 0)
+ return;
+
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: received a packet %pK of len:%d from periph:%d ch:%d\n",
+ ptr, (int)size, glink_info->peripheral, glink_info->type);
+
+ read_work = kmalloc(sizeof(*read_work), GFP_ATOMIC);
+ if (!read_work) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: Could not allocate read_work\n");
+ glink_rx_done(glink_info->hdl, ptr, true);
+ return;
+ }
+
+ memcpy((void *)pkt_priv, ptr, size);
+
+ read_work->glink_info = glink_info;
+ read_work->ptr_read_done = pkt_priv;
+ read_work->ptr_rx_done = ptr;
+ read_work->ptr_read_size = size;
+ INIT_WORK(&read_work->work, diag_glink_notify_rx_work_fn);
+ queue_work(glink_info->wq, &read_work->work);
+
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: Rx queued for packet %pK of len: %d periph: %d ch: %d\n",
+ ptr, (int)size, glink_info->peripheral, glink_info->type);
+}
+
+static void diag_glink_notify_remote_rx_intent(void *hdl, const void *priv,
+ size_t size)
+{
+ struct diag_glink_info *glink_info = (struct diag_glink_info *)priv;
+
+ if (!glink_info)
+ return;
+
+ atomic_inc(&glink_info->tx_intent_ready);
+ wake_up_interruptible(&glink_info->wait_q);
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag:received remote rx intent for %d type %d\n",
+ glink_info->peripheral, glink_info->type);
+}
+
+static void diag_glink_notify_tx_done(void *hdl, const void *priv,
+ const void *pkt_priv,
+ const void *ptr)
+{
+ struct diag_glink_info *glink_info = NULL;
+ struct diagfwd_info *fwd_info = NULL;
+ int found = 0;
+
+ glink_info = (struct diag_glink_info *)priv;
+ if (!glink_info)
+ return;
+
+ fwd_info = glink_info->fwd_ctxt;
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: Received glink tx done notify for ptr%pK pkt_priv %pK\n",
+ ptr, pkt_priv);
+ found = diagfwd_write_buffer_done(fwd_info, ptr);
+ if (!found)
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "Received Tx done on invalid buffer ptr %pK\n", ptr);
+}
+
+static int diag_glink_write(void *ctxt, unsigned char *buf, int len)
+{
+ struct diag_glink_info *glink_info = NULL;
+ int err = 0;
+ uint32_t tx_flags = GLINK_TX_REQ_INTENT;
+
+ if (!ctxt || !buf)
+ return -EIO;
+
+ glink_info = (struct diag_glink_info *)ctxt;
+ if (!glink_info || len <= 0) {
+ pr_err_ratelimited("diag: In %s, invalid params, glink_info: %pK, buf: %pK, len: %d\n",
+ __func__, glink_info, buf, len);
+ return -EINVAL;
+ }
+
+ if (!glink_info->inited || !glink_info->hdl ||
+ !atomic_read(&glink_info->opened)) {
+ pr_err_ratelimited("diag: In %s, glink not inited, glink_info: %pK, buf: %pK, len: %d\n",
+ __func__, glink_info, buf, len);
+ return -ENODEV;
+ }
+
+ if (atomic_read(&glink_info->tx_intent_ready)) {
+ atomic_dec(&glink_info->tx_intent_ready);
+ err = glink_tx(glink_info->hdl, glink_info, buf, len, tx_flags);
+ if (!err) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "%s wrote to glink, len: %d\n",
+ glink_info->name, len);
+ }
+ } else
+ err = -ENOMEM;
+
+ return err;
+
+}
+
+static void diag_glink_connect_work_fn(struct work_struct *work)
+{
+ struct diag_glink_info *glink_info = container_of(work,
+ struct diag_glink_info,
+ connect_work);
+ if (!glink_info || !glink_info->hdl)
+ return;
+ atomic_set(&glink_info->opened, 1);
+ diagfwd_channel_open(glink_info->fwd_ctxt);
+ diagfwd_late_open(glink_info->fwd_ctxt);
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "glink channel open: p: %d t: %d\n",
+ glink_info->peripheral, glink_info->type);
+}
+
+static void diag_glink_remote_disconnect_work_fn(struct work_struct *work)
+{
+ struct diag_glink_info *glink_info = container_of(work,
+ struct diag_glink_info,
+ remote_disconnect_work);
+ if (!glink_info || !glink_info->hdl)
+ return;
+ atomic_set(&glink_info->opened, 0);
+ diagfwd_channel_close(glink_info->fwd_ctxt);
+ atomic_set(&glink_info->tx_intent_ready, 0);
+}
+
+static void diag_glink_late_init_work_fn(struct work_struct *work)
+{
+ struct diag_glink_info *glink_info = container_of(work,
+ struct diag_glink_info,
+ late_init_work);
+ if (!glink_info || !glink_info->hdl)
+ return;
+ diagfwd_channel_open(glink_info->fwd_ctxt);
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "glink late init p: %d t: %d\n",
+ glink_info->peripheral, glink_info->type);
+}
+
+static void diag_glink_transport_notify_state(void *handle, const void *priv,
+ unsigned event)
+{
+ struct diag_glink_info *glink_info = (struct diag_glink_info *)priv;
+
+ if (!glink_info)
+ return;
+
+ switch (event) {
+ case GLINK_CONNECTED:
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "%s received channel connect for periph:%d\n",
+ glink_info->name, glink_info->peripheral);
+ queue_work(glink_info->wq, &glink_info->connect_work);
+ break;
+ case GLINK_LOCAL_DISCONNECTED:
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "%s received channel disconnect for periph:%d\n",
+ glink_info->name, glink_info->peripheral);
+
+ break;
+ case GLINK_REMOTE_DISCONNECTED:
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "%s received channel remote disconnect for periph:%d\n",
+ glink_info->name, glink_info->peripheral);
+ queue_work(glink_info->wq, &glink_info->remote_disconnect_work);
+ break;
+ default:
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "%s received invalid notification\n",
+ glink_info->name);
+ break;
+ }
+
+}
+static void diag_glink_open_work_fn(struct work_struct *work)
+{
+ struct diag_glink_info *glink_info = container_of(work,
+ struct diag_glink_info,
+ open_work);
+ struct glink_open_config open_cfg;
+ void *handle = NULL;
+
+ if (!glink_info || glink_info->hdl)
+ return;
+
+ memset(&open_cfg, 0, sizeof(struct glink_open_config));
+ open_cfg.priv = glink_info;
+ open_cfg.edge = glink_info->edge;
+ open_cfg.name = glink_info->name;
+ open_cfg.notify_rx = diag_glink_notify_rx;
+ open_cfg.notify_tx_done = diag_glink_notify_tx_done;
+ open_cfg.notify_state = diag_glink_transport_notify_state;
+ open_cfg.notify_remote_rx_intent = diag_glink_notify_remote_rx_intent;
+ handle = glink_open(&open_cfg);
+ if (IS_ERR_OR_NULL(handle)) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "error opening channel %s",
+ glink_info->name);
+ } else
+ glink_info->hdl = handle;
+}
+
+static void diag_glink_close_work_fn(struct work_struct *work)
+{
+ struct diag_glink_info *glink_info = container_of(work,
+ struct diag_glink_info,
+ close_work);
+ if (!glink_info || !glink_info->inited || !glink_info->hdl)
+ return;
+
+ glink_close(glink_info->hdl);
+ atomic_set(&glink_info->opened, 0);
+ atomic_set(&glink_info->tx_intent_ready, 0);
+ glink_info->hdl = NULL;
+ diagfwd_channel_close(glink_info->fwd_ctxt);
+}
+
+static void diag_glink_notify_cb(struct glink_link_state_cb_info *cb_info,
+ void *priv)
+{
+ struct diag_glink_info *glink_info = NULL;
+
+ glink_info = (struct diag_glink_info *)priv;
+ if (!glink_info)
+ return;
+ if (!cb_info)
+ return;
+
+ switch (cb_info->link_state) {
+ case GLINK_LINK_STATE_UP:
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "%s channel opened for periph:%d\n",
+ glink_info->name, glink_info->peripheral);
+ queue_work(glink_info->wq, &glink_info->open_work);
+ break;
+ case GLINK_LINK_STATE_DOWN:
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "%s channel closed for periph:%d\n",
+ glink_info->name, glink_info->peripheral);
+ queue_work(glink_info->wq, &glink_info->close_work);
+ break;
+ default:
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "Invalid link state notification for ch:%s\n",
+ glink_info->name);
+ break;
+
+ }
+}
+
+static void glink_late_init(struct diag_glink_info *glink_info)
+{
+ struct diagfwd_info *fwd_info = NULL;
+
+ if (!glink_info)
+ return;
+
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s entering\n",
+ glink_info->name);
+
+ diagfwd_register(TRANSPORT_GLINK, glink_info->peripheral,
+ glink_info->type, (void *)glink_info,
+ &glink_ops, &glink_info->fwd_ctxt);
+ fwd_info = glink_info->fwd_ctxt;
+ if (!fwd_info)
+ return;
+
+ glink_info->inited = 1;
+
+ if (atomic_read(&glink_info->opened))
+ queue_work(glink_info->wq, &(glink_info->late_init_work));
+
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s exiting\n",
+ glink_info->name);
+}
+
+int diag_glink_init_peripheral(uint8_t peripheral)
+{
+ if (peripheral >= NUM_PERIPHERALS) {
+ pr_err("diag: In %s, invalid peripheral %d\n",
+ __func__, peripheral);
+ return -EINVAL;
+ }
+
+ glink_late_init(&glink_data[peripheral]);
+ glink_late_init(&glink_dci[peripheral]);
+ glink_late_init(&glink_cmd[peripheral]);
+ glink_late_init(&glink_dci_cmd[peripheral]);
+
+ return 0;
+}
+
+static void __diag_glink_init(struct diag_glink_info *glink_info)
+{
+ char wq_name[DIAG_GLINK_NAME_SZ + 12];
+ struct glink_link_info link_info;
+ void *link_state_handle = NULL;
+
+ if (!glink_info)
+ return;
+
+ init_waitqueue_head(&glink_info->wait_q);
+ init_waitqueue_head(&glink_info->read_wait_q);
+ mutex_init(&glink_info->lock);
+ strlcpy(wq_name, "DIAG_GLINK_", 12);
+ strlcat(wq_name, glink_info->name, sizeof(glink_info->name));
+ glink_info->wq = create_singlethread_workqueue(wq_name);
+ if (!glink_info->wq) {
+ pr_err("diag: In %s, unable to create workqueue for glink ch:%s\n",
+ __func__, glink_info->name);
+ return;
+ }
+ INIT_WORK(&(glink_info->open_work), diag_glink_open_work_fn);
+ INIT_WORK(&(glink_info->close_work), diag_glink_close_work_fn);
+ INIT_WORK(&(glink_info->read_work), diag_glink_read_work_fn);
+ INIT_WORK(&(glink_info->connect_work), diag_glink_connect_work_fn);
+ INIT_WORK(&(glink_info->remote_disconnect_work),
+ diag_glink_remote_disconnect_work_fn);
+ INIT_WORK(&(glink_info->late_init_work), diag_glink_late_init_work_fn);
+ link_info.glink_link_state_notif_cb = diag_glink_notify_cb;
+ link_info.transport = NULL;
+ link_info.edge = glink_info->edge;
+ glink_info->link_state_handle = NULL;
+ link_state_handle = glink_register_link_state_cb(&link_info,
+ (void *)glink_info);
+ if (IS_ERR_OR_NULL(link_state_handle)) {
+ pr_err("diag: In %s, unable to register for glink channel %s\n",
+ __func__, glink_info->name);
+ destroy_workqueue(glink_info->wq);
+ return;
+ }
+ glink_info->link_state_handle = link_state_handle;
+ glink_info->fwd_ctxt = NULL;
+ atomic_set(&glink_info->tx_intent_ready, 0);
+ atomic_set(&glink_info->opened, 0);
+ atomic_set(&glink_info->diag_state, 0);
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "%s initialized fwd_ctxt: %pK hdl: %pK\n",
+ glink_info->name, glink_info->fwd_ctxt,
+ glink_info->link_state_handle);
+}
+
+void diag_glink_invalidate(void *ctxt, struct diagfwd_info *fwd_ctxt)
+{
+ struct diag_glink_info *info = NULL;
+
+ if (!ctxt || !fwd_ctxt)
+ return;
+
+ info = (struct diag_glink_info *)ctxt;
+ info->fwd_ctxt = fwd_ctxt;
+}
+
+int diag_glink_init(void)
+{
+ uint8_t peripheral;
+ struct diag_glink_info *glink_info = NULL;
+
+ for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
+ if (peripheral != PERIPHERAL_WDSP)
+ continue;
+ glink_info = &glink_cntl[peripheral];
+ __diag_glink_init(glink_info);
+ diagfwd_cntl_register(TRANSPORT_GLINK, glink_info->peripheral,
+ (void *)glink_info, &glink_ops,
+ &(glink_info->fwd_ctxt));
+ glink_info->inited = 1;
+ __diag_glink_init(&glink_data[peripheral]);
+ __diag_glink_init(&glink_cmd[peripheral]);
+ __diag_glink_init(&glink_dci[peripheral]);
+ __diag_glink_init(&glink_dci_cmd[peripheral]);
+ }
+ return 0;
+}
+
+static void __diag_glink_exit(struct diag_glink_info *glink_info)
+{
+ if (!glink_info)
+ return;
+
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s entering\n",
+ glink_info->name);
+
+ diagfwd_deregister(glink_info->peripheral, glink_info->type,
+ (void *)glink_info);
+ glink_info->fwd_ctxt = NULL;
+ glink_info->hdl = NULL;
+ if (glink_info->wq)
+ destroy_workqueue(glink_info->wq);
+
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s exiting\n",
+ glink_info->name);
+}
+
+void diag_glink_early_exit(void)
+{
+ int peripheral = 0;
+
+ for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
+ if (peripheral != PERIPHERAL_WDSP)
+ continue;
+ __diag_glink_exit(&glink_cntl[peripheral]);
+ glink_unregister_link_state_cb(&glink_cntl[peripheral].hdl);
+ }
+}
+
+void diag_glink_exit(void)
+{
+ int peripheral = 0;
+
+ for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
+ if (peripheral != PERIPHERAL_WDSP)
+ continue;
+ __diag_glink_exit(&glink_data[peripheral]);
+ __diag_glink_exit(&glink_cmd[peripheral]);
+ __diag_glink_exit(&glink_dci[peripheral]);
+ __diag_glink_exit(&glink_dci_cmd[peripheral]);
+ glink_unregister_link_state_cb(&glink_data[peripheral].hdl);
+ glink_unregister_link_state_cb(&glink_cmd[peripheral].hdl);
+ glink_unregister_link_state_cb(&glink_dci[peripheral].hdl);
+ glink_unregister_link_state_cb(&glink_dci_cmd[peripheral].hdl);
+ }
+}
diff --git a/drivers/char/diag/diagfwd_glink.h b/drivers/char/diag/diagfwd_glink.h
new file mode 100644
index 000000000000..6cad44522ab6
--- /dev/null
+++ b/drivers/char/diag/diagfwd_glink.h
@@ -0,0 +1,57 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DIAGFWD_GLINK_H
+#define DIAGFWD_GLINK_H
+
+#define DIAG_GLINK_NAME_SZ 24
+#define GLINK_DRAIN_BUF_SIZE 4096
+
+struct diag_glink_info {
+ uint8_t peripheral;
+ uint8_t type;
+ uint8_t inited;
+ atomic_t opened;
+ atomic_t diag_state;
+ uint32_t fifo_size;
+ atomic_t tx_intent_ready;
+ void *hdl;
+ void *link_state_handle;
+ char edge[DIAG_GLINK_NAME_SZ];
+ char name[DIAG_GLINK_NAME_SZ];
+ struct mutex lock;
+ wait_queue_head_t read_wait_q;
+ wait_queue_head_t wait_q;
+ struct workqueue_struct *wq;
+ struct work_struct open_work;
+ struct work_struct close_work;
+ struct work_struct read_work;
+ struct work_struct connect_work;
+ struct work_struct remote_disconnect_work;
+ struct work_struct late_init_work;
+ struct diagfwd_info *fwd_ctxt;
+};
+
+extern struct diag_glink_info glink_data[NUM_PERIPHERALS];
+extern struct diag_glink_info glink_cntl[NUM_PERIPHERALS];
+extern struct diag_glink_info glink_cmd[NUM_PERIPHERALS];
+extern struct diag_glink_info glink_dci_cmd[NUM_PERIPHERALS];
+extern struct diag_glink_info glink_dci[NUM_PERIPHERALS];
+
+int diag_glink_init_peripheral(uint8_t peripheral);
+void diag_glink_exit(void);
+int diag_glink_init(void);
+void diag_glink_early_exit(void);
+void diag_glink_invalidate(void *ctxt, struct diagfwd_info *fwd_ctxt);
+int diag_glink_check_state(void *ctxt);
+
+#endif
diff --git a/drivers/char/diag/diagfwd_hsic.c b/drivers/char/diag/diagfwd_hsic.c
new file mode 100644
index 000000000000..5fed1f88382d
--- /dev/null
+++ b/drivers/char/diag/diagfwd_hsic.c
@@ -0,0 +1,453 @@
+/* Copyright (c) 2012-2014, 2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/uaccess.h>
+#include <linux/diagchar.h>
+#include <linux/sched.h>
+#include <linux/err.h>
+#include <linux/ratelimit.h>
+#include <linux/workqueue.h>
+#include <linux/pm_runtime.h>
+#include <linux/platform_device.h>
+#include <asm/current.h>
+#include "diagmem.h"
+#include "diagfwd_bridge.h"
+#include "diagfwd_hsic.h"
+
+#define DIAG_HSIC_STRING_SZ 11
+
+struct diag_hsic_info diag_hsic[NUM_HSIC_DEV] = {
+ {
+ .id = HSIC_1,
+ .dev_id = DIAGFWD_MDM,
+ .name = "MDM",
+ .mempool = POOL_TYPE_MDM,
+ .opened = 0,
+ .enabled = 0,
+ .suspended = 0,
+ .hsic_wq = NULL
+ },
+ {
+ .id = HSIC_2,
+ .dev_id = DIAGFWD_MDM_DCI,
+ .name = "MDM_DCI",
+ .mempool = POOL_TYPE_MDM_DCI,
+ .opened = 0,
+ .enabled = 0,
+ .suspended = 0,
+ .hsic_wq = NULL
+ }
+};
+
+static void diag_hsic_read_complete(void *ctxt, char *buf, int len,
+ int actual_size)
+{
+ int err = 0;
+ int index = (int)(uintptr_t)ctxt;
+ struct diag_hsic_info *ch = NULL;
+
+ if (index < 0 || index >= NUM_HSIC_DEV) {
+ pr_err_ratelimited("diag: In %s, invalid HSIC index %d\n",
+ __func__, index);
+ return;
+ }
+ ch = &diag_hsic[index];
+
+ /*
+ * Don't pass on the buffer if the channel is closed when a pending read
+ * completes. Also, actual size can be negative error codes - do not
+ * pass on the buffer.
+ */
+ if (!ch->opened || actual_size <= 0)
+ goto fail;
+ err = diag_remote_dev_read_done(ch->dev_id, buf, actual_size);
+ if (err)
+ goto fail;
+ return;
+
+fail:
+ diagmem_free(driver, buf, ch->mempool);
+ queue_work(ch->hsic_wq, &ch->read_work);
+ return;
+}
+
+static void diag_hsic_write_complete(void *ctxt, char *buf, int len,
+ int actual_size)
+{
+ int index = (int)(uintptr_t)ctxt;
+ struct diag_hsic_info *ch = NULL;
+
+ if (index < 0 || index >= NUM_HSIC_DEV) {
+ pr_err_ratelimited("diag: In %s, invalid HSIC index %d\n",
+ __func__, index);
+ return;
+ }
+
+ ch = &diag_hsic[index];
+ diag_remote_dev_write_done(ch->dev_id, buf, actual_size, ch->id);
+ return;
+}
+
+static int diag_hsic_suspend(void *ctxt)
+{
+ int index = (int)(uintptr_t)ctxt;
+ unsigned long flags;
+ struct diag_hsic_info *ch = NULL;
+
+ if (index < 0 || index >= NUM_HSIC_DEV) {
+ pr_err_ratelimited("diag: In %s, invalid HSIC index %d\n",
+ __func__, index);
+ return -EINVAL;
+ }
+
+ ch = &diag_hsic[index];
+ spin_lock_irqsave(&ch->lock, flags);
+ ch->suspended = 1;
+ spin_unlock_irqrestore(&ch->lock, flags);
+ return 0;
+}
+
+static void diag_hsic_resume(void *ctxt)
+{
+ int index = (int)(uintptr_t)ctxt;
+ unsigned long flags;
+ struct diag_hsic_info *ch = NULL;
+
+ if (index < 0 || index >= NUM_HSIC_DEV) {
+ pr_err_ratelimited("diag: In %s, invalid HSIC index %d\n",
+ __func__, index);
+ return;
+ }
+ ch = &diag_hsic[index];
+ spin_lock_irqsave(&ch->lock, flags);
+ ch->suspended = 0;
+ spin_unlock_irqrestore(&ch->lock, flags);
+ queue_work(ch->hsic_wq, &(ch->read_work));
+}
+
+static struct diag_bridge_ops diag_hsic_ops[NUM_HSIC_DEV] = {
+ {
+ .ctxt = (void *)HSIC_1,
+ .read_complete_cb = diag_hsic_read_complete,
+ .write_complete_cb = diag_hsic_write_complete,
+ .suspend = diag_hsic_suspend,
+ .resume = diag_hsic_resume,
+ },
+ {
+ .ctxt = (void *)HSIC_2,
+ .read_complete_cb = diag_hsic_read_complete,
+ .write_complete_cb = diag_hsic_write_complete,
+ .suspend = diag_hsic_suspend,
+ .resume = diag_hsic_resume,
+ }
+};
+
+static int hsic_open(int id)
+{
+ int err = 0;
+ unsigned long flags;
+ struct diag_hsic_info *ch = NULL;
+
+ if (id < 0 || id >= NUM_HSIC_DEV) {
+ pr_err("diag: Invalid index %d in %s\n", id, __func__);
+ return -EINVAL;
+ }
+
+ ch = &diag_hsic[id];
+ if (!ch->enabled)
+ return -ENODEV;
+
+ if (ch->opened) {
+ pr_debug("diag: HSIC channel %d is already opened\n", ch->id);
+ return -ENODEV;
+ }
+
+ err = diag_bridge_open(ch->id, &diag_hsic_ops[ch->id]);
+ if (err) {
+ pr_err("diag: Unable to open HSIC channel %d, err: %d",
+ ch->id, err);
+ return err;
+ }
+ spin_lock_irqsave(&ch->lock, flags);
+ ch->opened = 1;
+ spin_unlock_irqrestore(&ch->lock, flags);
+ diagmem_init(driver, ch->mempool);
+ /* Notify the bridge that the channel is open */
+ diag_remote_dev_open(ch->dev_id);
+ queue_work(ch->hsic_wq, &(ch->read_work));
+ return 0;
+}
+
+static void hsic_open_work_fn(struct work_struct *work)
+{
+ struct diag_hsic_info *ch = container_of(work, struct diag_hsic_info,
+ open_work);
+ if (ch)
+ hsic_open(ch->id);
+}
+
+static int hsic_close(int id)
+{
+ unsigned long flags;
+ struct diag_hsic_info *ch = NULL;
+
+ if (id < 0 || id >= NUM_HSIC_DEV) {
+ pr_err("diag: Invalid index %d in %s\n", id, __func__);
+ return -EINVAL;
+ }
+
+ ch = &diag_hsic[id];
+ if (!ch->enabled)
+ return -ENODEV;
+
+ if (!ch->opened) {
+ pr_debug("diag: HSIC channel %d is already closed\n", ch->id);
+ return -ENODEV;
+ }
+
+ spin_lock_irqsave(&ch->lock, flags);
+ ch->opened = 0;
+ spin_unlock_irqrestore(&ch->lock, flags);
+ diag_bridge_close(ch->id);
+ diagmem_exit(driver, ch->mempool);
+ diag_remote_dev_close(ch->dev_id);
+ return 0;
+}
+
+static void hsic_close_work_fn(struct work_struct *work)
+{
+ struct diag_hsic_info *ch = container_of(work, struct diag_hsic_info,
+ close_work);
+ if (ch)
+ hsic_close(ch->id);
+}
+
+static void hsic_read_work_fn(struct work_struct *work)
+{
+ int err = 0;
+ unsigned char *buf = NULL;
+ struct diag_hsic_info *ch = container_of(work, struct diag_hsic_info,
+ read_work);
+ if (!ch || !ch->enabled || !ch->opened)
+ return;
+
+ do {
+ buf = diagmem_alloc(driver, DIAG_MDM_BUF_SIZE, ch->mempool);
+ if (!buf) {
+ err = -ENOMEM;
+ break;
+ }
+
+ err = diag_bridge_read(ch->id, buf, DIAG_MDM_BUF_SIZE);
+ if (err) {
+ diagmem_free(driver, buf, ch->mempool);
+ pr_err_ratelimited("diag: Unable to read from HSIC channel %d, err: %d\n",
+ ch->id, err);
+ break;
+ }
+ } while (buf);
+
+ /* Read from the HSIC channel continously if the channel is present */
+ if (!err)
+ queue_work(ch->hsic_wq, &ch->read_work);
+}
+
+static int diag_hsic_probe(struct platform_device *pdev)
+{
+ unsigned long flags;
+ struct diag_hsic_info *ch = NULL;
+
+ if (!pdev)
+ return -EIO;
+
+ pr_debug("diag: hsic probe pdev: %d\n", pdev->id);
+ if (pdev->id >= NUM_HSIC_DEV) {
+ pr_err("diag: No support for HSIC device %d\n", pdev->id);
+ return -EIO;
+ }
+
+ ch = &diag_hsic[pdev->id];
+ if (!ch->enabled) {
+ spin_lock_irqsave(&ch->lock, flags);
+ ch->enabled = 1;
+ spin_unlock_irqrestore(&ch->lock, flags);
+ }
+ queue_work(ch->hsic_wq, &(ch->open_work));
+ return 0;
+}
+
+static int diag_hsic_remove(struct platform_device *pdev)
+{
+ struct diag_hsic_info *ch = NULL;
+
+ if (!pdev)
+ return -EIO;
+
+ pr_debug("diag: hsic close pdev: %d\n", pdev->id);
+ if (pdev->id >= NUM_HSIC_DEV) {
+ pr_err("diag: No support for HSIC device %d\n", pdev->id);
+ return -EIO;
+ }
+
+ ch = &diag_hsic[pdev->id];
+ queue_work(ch->hsic_wq, &(ch->close_work));
+ return 0;
+}
+
+static int diagfwd_hsic_runtime_suspend(struct device *dev)
+{
+ dev_dbg(dev, "pm_runtime: suspending...\n");
+ return 0;
+}
+
+static int diagfwd_hsic_runtime_resume(struct device *dev)
+{
+ dev_dbg(dev, "pm_runtime: resuming...\n");
+ return 0;
+}
+
+static const struct dev_pm_ops diagfwd_hsic_dev_pm_ops = {
+ .runtime_suspend = diagfwd_hsic_runtime_suspend,
+ .runtime_resume = diagfwd_hsic_runtime_resume,
+};
+
+static struct platform_driver msm_hsic_ch_driver = {
+ .probe = diag_hsic_probe,
+ .remove = diag_hsic_remove,
+ .driver = {
+ .name = "diag_bridge",
+ .owner = THIS_MODULE,
+ .pm = &diagfwd_hsic_dev_pm_ops,
+ },
+};
+
+static int hsic_queue_read(int id)
+{
+ if (id < 0 || id >= NUM_HSIC_DEV) {
+ pr_err_ratelimited("diag: In %s, invalid index %d\n",
+ __func__, id);
+ return -EINVAL;
+ }
+ queue_work(diag_hsic[id].hsic_wq, &(diag_hsic[id].read_work));
+ return 0;
+}
+
+static int hsic_write(int id, unsigned char *buf, int len, int ctxt)
+{
+ int err = 0;
+ struct diag_hsic_info *ch = NULL;
+
+ if (id < 0 || id >= NUM_HSIC_DEV) {
+ pr_err_ratelimited("diag: In %s, invalid index %d\n",
+ __func__, id);
+ return -EINVAL;
+ }
+ if (!buf || len <= 0) {
+ pr_err_ratelimited("diag: In %s, ch %d, invalid buf %pK len %d\n",
+ __func__, id, buf, len);
+ return -EINVAL;
+ }
+
+ ch = &diag_hsic[id];
+ if (!ch->opened || !ch->enabled) {
+ pr_debug_ratelimited("diag: In %s, ch %d is disabled. opened %d enabled: %d\n",
+ __func__, id, ch->opened, ch->enabled);
+ return -EIO;
+ }
+
+ err = diag_bridge_write(ch->id, buf, len);
+ if (err) {
+ pr_err_ratelimited("diag: cannot write to HSIC ch %d, err: %d\n",
+ ch->id, err);
+ }
+ return err;
+}
+
+static int hsic_fwd_complete(int id, unsigned char *buf, int len, int ctxt)
+{
+ if (id < 0 || id >= NUM_HSIC_DEV) {
+ pr_err_ratelimited("diag: In %s, invalid index %d\n",
+ __func__, id);
+ return -EINVAL;
+ }
+ if (!buf)
+ return -EIO;
+ diagmem_free(driver, buf, diag_hsic[id].mempool);
+ queue_work(diag_hsic[id].hsic_wq, &(diag_hsic[id].read_work));
+ return 0;
+}
+
+static struct diag_remote_dev_ops diag_hsic_fwd_ops = {
+ .open = hsic_open,
+ .close = hsic_close,
+ .queue_read = hsic_queue_read,
+ .write = hsic_write,
+ .fwd_complete = hsic_fwd_complete,
+};
+
+int diag_hsic_init()
+{
+ int i;
+ int err = 0;
+ struct diag_hsic_info *ch = NULL;
+ char wq_name[DIAG_HSIC_NAME_SZ + DIAG_HSIC_STRING_SZ];
+
+ for (i = 0; i < NUM_HSIC_DEV; i++) {
+ ch = &diag_hsic[i];
+ spin_lock_init(&ch->lock);
+ INIT_WORK(&(ch->read_work), hsic_read_work_fn);
+ INIT_WORK(&(ch->open_work), hsic_open_work_fn);
+ INIT_WORK(&(ch->close_work), hsic_close_work_fn);
+ strlcpy(wq_name, "DIAG_HSIC_", DIAG_HSIC_STRING_SZ);
+ strlcat(wq_name, ch->name, sizeof(ch->name));
+ ch->hsic_wq = create_singlethread_workqueue(wq_name);
+ if (!ch->hsic_wq)
+ goto fail;
+ err = diagfwd_bridge_register(ch->dev_id, ch->id,
+ &diag_hsic_fwd_ops);
+ if (err) {
+ pr_err("diag: Unable to register HSIC channel %d with bridge, err: %d\n",
+ i, err);
+ goto fail;
+ }
+ }
+
+ err = platform_driver_register(&msm_hsic_ch_driver);
+ if (err) {
+ pr_err("diag: could not register HSIC device, err: %d\n", err);
+ goto fail;
+ }
+
+ return 0;
+fail:
+ diag_hsic_exit();
+ return -ENOMEM;
+}
+
+void diag_hsic_exit()
+{
+ int i;
+ struct diag_hsic_info *ch = NULL;
+
+ for (i = 0; i < NUM_HSIC_DEV; i++) {
+ ch = &diag_hsic[i];
+ ch->enabled = 0;
+ ch->opened = 0;
+ ch->suspended = 0;
+ if (ch->hsic_wq)
+ destroy_workqueue(ch->hsic_wq);
+ }
+ platform_driver_unregister(&msm_hsic_ch_driver);
+}
+
diff --git a/drivers/char/diag/diagfwd_hsic.h b/drivers/char/diag/diagfwd_hsic.h
new file mode 100644
index 000000000000..c4d87a223105
--- /dev/null
+++ b/drivers/char/diag/diagfwd_hsic.h
@@ -0,0 +1,47 @@
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DIAGFWD_HSIC_H
+#define DIAGFWD_HSIC_H
+#ifdef CONFIG_DIAG_OVER_USB
+#include <linux/usb/usbdiag.h>
+#endif
+#include <linux/usb/diag_bridge.h>
+
+#define HSIC_1 0
+#define HSIC_2 1
+#define NUM_HSIC_DEV 2
+
+#define DIAG_HSIC_NAME_SZ 24
+
+struct diag_hsic_info {
+ int id;
+ int dev_id;
+ int mempool;
+ uint8_t opened;
+ uint8_t enabled;
+ uint8_t suspended;
+ char name[DIAG_HSIC_NAME_SZ];
+ struct work_struct read_work;
+ struct work_struct open_work;
+ struct work_struct close_work;
+ struct workqueue_struct *hsic_wq;
+ spinlock_t lock;
+};
+
+extern struct diag_hsic_info diag_hsic[NUM_HSIC_DEV];
+
+int diag_hsic_init(void);
+void diag_hsic_exit(void);
+
+#endif
+
diff --git a/drivers/char/diag/diagfwd_mhi.c b/drivers/char/diag/diagfwd_mhi.c
new file mode 100644
index 000000000000..edfba6bb09c9
--- /dev/null
+++ b/drivers/char/diag/diagfwd_mhi.c
@@ -0,0 +1,759 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/uaccess.h>
+#include <linux/diagchar.h>
+#include <linux/sched.h>
+#include <linux/err.h>
+#include <linux/ratelimit.h>
+#include <linux/workqueue.h>
+#include <linux/pm_runtime.h>
+#include <linux/platform_device.h>
+#include <linux/msm_mhi.h>
+#include <linux/delay.h>
+#include <linux/vmalloc.h>
+#include <asm/current.h>
+#include <linux/atomic.h>
+#include "diagmem.h"
+#include "diagfwd_bridge.h"
+#include "diagfwd_mhi.h"
+#include "diag_ipc_logging.h"
+
+#define SET_CH_CTXT(index, type) (((index & 0xFF) << 8) | (type & 0xFF))
+#define GET_INFO_INDEX(val) ((val & 0xFF00) >> 8)
+#define GET_CH_TYPE(val) ((val & 0x00FF))
+
+#define CHANNELS_OPENED 0
+#define OPEN_CHANNELS 1
+#define CHANNELS_CLOSED 0
+#define CLOSE_CHANNELS 1
+
+#define DIAG_MHI_STRING_SZ 11
+
+struct diag_mhi_info diag_mhi[NUM_MHI_DEV] = {
+ {
+ .id = MHI_1,
+ .dev_id = DIAGFWD_MDM,
+ .name = "MDM",
+ .enabled = 0,
+ .num_read = 0,
+ .mempool = POOL_TYPE_MDM,
+ .mempool_init = 0,
+ .mhi_wq = NULL,
+ .read_ch = {
+ .chan = MHI_CLIENT_DIAG_IN,
+ .type = TYPE_MHI_READ_CH,
+ .hdl = NULL,
+ },
+ .write_ch = {
+ .chan = MHI_CLIENT_DIAG_OUT,
+ .type = TYPE_MHI_WRITE_CH,
+ .hdl = NULL,
+ }
+ },
+ {
+ .id = MHI_DCI_1,
+ .dev_id = DIAGFWD_MDM_DCI,
+ .name = "MDM_DCI",
+ .enabled = 0,
+ .num_read = 0,
+ .mempool = POOL_TYPE_MDM_DCI,
+ .mempool_init = 0,
+ .mhi_wq = NULL,
+ .read_ch = {
+ .chan = MHI_CLIENT_DCI_IN,
+ .type = TYPE_MHI_READ_CH,
+ .hdl = NULL,
+ },
+ .write_ch = {
+ .chan = MHI_CLIENT_DCI_OUT,
+ .type = TYPE_MHI_WRITE_CH,
+ .hdl = NULL,
+ }
+ }
+};
+
+static int mhi_ch_open(struct diag_mhi_ch_t *ch)
+{
+ int err = 0;
+
+ if (!ch)
+ return -EINVAL;
+
+ if (atomic_read(&ch->opened)) {
+ pr_debug("diag: In %s, channel is already opened, id: %d\n",
+ __func__, ch->type);
+ return 0;
+ }
+ err = mhi_open_channel(ch->hdl);
+ if (err) {
+ pr_err("diag: In %s, unable to open ch, type: %d, err: %d\n",
+ __func__, ch->type, err);
+ return err;
+ }
+
+ atomic_set(&ch->opened, 1);
+ INIT_LIST_HEAD(&ch->buf_tbl);
+ return 0;
+}
+
+static int mhi_buf_tbl_add(struct diag_mhi_info *mhi_info, int type,
+ void *buf, int len)
+{
+ unsigned long flags;
+ struct diag_mhi_buf_tbl_t *item;
+ struct diag_mhi_ch_t *ch = NULL;
+
+ if (!mhi_info || !buf || len < 0)
+ return -EINVAL;
+
+ switch (type) {
+ case TYPE_MHI_READ_CH:
+ ch = &mhi_info->read_ch;
+ break;
+ case TYPE_MHI_WRITE_CH:
+ ch = &mhi_info->write_ch;
+ break;
+ default:
+ pr_err_ratelimited("diag: In %s, invalid type: %d\n",
+ __func__, type);
+ return -EINVAL;
+ }
+
+ item = kzalloc(sizeof(struct diag_mhi_buf_tbl_t), GFP_KERNEL);
+ if (!item) {
+ return -ENOMEM;
+ }
+ kmemleak_not_leak(item);
+
+ spin_lock_irqsave(&ch->lock, flags);
+ item->buf = buf;
+ item->len = len;
+ list_add_tail(&item->link, &ch->buf_tbl);
+ spin_unlock_irqrestore(&ch->lock, flags);
+
+ return 0;
+}
+
+static void mhi_buf_tbl_remove(struct diag_mhi_info *mhi_info, int type,
+ void *buf, int len)
+{
+ int found = 0;
+ unsigned long flags;
+ struct list_head *start, *temp;
+ struct diag_mhi_buf_tbl_t *item = NULL;
+ struct diag_mhi_ch_t *ch = NULL;
+
+ if (!mhi_info || !buf || len < 0)
+ return;
+
+ switch (type) {
+ case TYPE_MHI_READ_CH:
+ ch = &mhi_info->read_ch;
+ break;
+ case TYPE_MHI_WRITE_CH:
+ ch = &mhi_info->write_ch;
+ break;
+ default:
+ pr_err_ratelimited("diag: In %s, invalid type: %d\n",
+ __func__, type);
+ return;
+ }
+
+ spin_lock_irqsave(&ch->lock, flags);
+ list_for_each_safe(start, temp, &ch->buf_tbl) {
+ item = list_entry(start, struct diag_mhi_buf_tbl_t, link);
+ if (item->buf != buf)
+ continue;
+ list_del(&item->link);
+ if (type == TYPE_MHI_READ_CH)
+ diagmem_free(driver, item->buf, mhi_info->mempool);
+ kfree(item);
+ found = 1;
+ }
+ spin_unlock_irqrestore(&ch->lock, flags);
+
+ if (!found) {
+ pr_err_ratelimited("diag: In %s, unable to find buffer, ch: %pK, type: %d, buf: %pK\n",
+ __func__, ch, ch->type, buf);
+ }
+}
+
+static void mhi_buf_tbl_clear(struct diag_mhi_info *mhi_info)
+{
+ unsigned long flags;
+ struct list_head *start, *temp;
+ struct diag_mhi_buf_tbl_t *item = NULL;
+ struct diag_mhi_ch_t *ch = NULL;
+
+ if (!mhi_info)
+ return;
+
+ /* Clear all the pending reads */
+ ch = &mhi_info->read_ch;
+ /* At this point, the channel should already by closed */
+ if (!(atomic_read(&ch->opened))) {
+ spin_lock_irqsave(&ch->lock, flags);
+ list_for_each_safe(start, temp, &ch->buf_tbl) {
+ item = list_entry(start, struct diag_mhi_buf_tbl_t,
+ link);
+ list_del(&item->link);
+ diagmem_free(driver, item->buf, mhi_info->mempool);
+ kfree(item);
+
+ }
+ spin_unlock_irqrestore(&ch->lock, flags);
+ }
+
+ /* Clear all the pending writes */
+ ch = &mhi_info->write_ch;
+ /* At this point, the channel should already by closed */
+ if (!(atomic_read(&ch->opened))) {
+ spin_lock_irqsave(&ch->lock, flags);
+ list_for_each_safe(start, temp, &ch->buf_tbl) {
+ item = list_entry(start, struct diag_mhi_buf_tbl_t,
+ link);
+ list_del(&item->link);
+ diag_remote_dev_write_done(mhi_info->dev_id, item->buf,
+ item->len, mhi_info->id);
+ kfree(item);
+
+ }
+ spin_unlock_irqrestore(&ch->lock, flags);
+ }
+}
+
+static int __mhi_close(struct diag_mhi_info *mhi_info, int close_flag)
+{
+ if (!mhi_info)
+ return -EIO;
+
+ if (!mhi_info->enabled)
+ return -ENODEV;
+
+ if (close_flag == CLOSE_CHANNELS) {
+ atomic_set(&(mhi_info->read_ch.opened), 0);
+ atomic_set(&(mhi_info->write_ch.opened), 0);
+ }
+
+ if (!(atomic_read(&(mhi_info->read_ch.opened)))) {
+ flush_workqueue(mhi_info->mhi_wq);
+ mhi_close_channel(mhi_info->read_ch.hdl);
+ }
+
+ if (!(atomic_read(&(mhi_info->write_ch.opened)))) {
+ flush_workqueue(mhi_info->mhi_wq);
+ mhi_close_channel(mhi_info->write_ch.hdl);
+ }
+
+ mhi_buf_tbl_clear(mhi_info);
+ diag_remote_dev_close(mhi_info->dev_id);
+ return 0;
+}
+
+static int mhi_close(int id)
+{
+ if (id < 0 || id >= NUM_MHI_DEV) {
+ pr_err("diag: In %s, invalid index %d\n", __func__, id);
+ return -EINVAL;
+ }
+
+ if (!diag_mhi[id].enabled)
+ return -ENODEV;
+ /*
+ * This function is called whenever the channel needs to be closed
+ * explicitly by Diag. Close both the read and write channels (denoted
+ * by CLOSE_CHANNELS flag)
+ */
+ return __mhi_close(&diag_mhi[id], CLOSE_CHANNELS);
+}
+
+static void mhi_close_work_fn(struct work_struct *work)
+{
+ struct diag_mhi_info *mhi_info = container_of(work,
+ struct diag_mhi_info,
+ close_work);
+ /*
+ * This is a part of work function which is queued after the channels
+ * are explicitly closed. Do not close channels again (denoted by
+ * CHANNELS_CLOSED flag)
+ */
+ if (mhi_info)
+ __mhi_close(mhi_info, CHANNELS_CLOSED);
+}
+
+static int __mhi_open(struct diag_mhi_info *mhi_info, int open_flag)
+{
+ int err = 0;
+ unsigned long flags;
+
+ if (!mhi_info)
+ return -EIO;
+
+ if (open_flag == OPEN_CHANNELS) {
+ if (!atomic_read(&mhi_info->read_ch.opened)) {
+ err = mhi_ch_open(&mhi_info->read_ch);
+ if (err)
+ goto fail;
+ DIAG_LOG(DIAG_DEBUG_BRIDGE,
+ "opened mhi read channel, port: %d\n",
+ mhi_info->id);
+ }
+ if (!atomic_read(&mhi_info->write_ch.opened)) {
+ err = mhi_ch_open(&mhi_info->write_ch);
+ if (err)
+ goto fail;
+ DIAG_LOG(DIAG_DEBUG_BRIDGE,
+ "opened mhi write channel, port: %d\n",
+ mhi_info->id);
+ }
+ } else if (open_flag == CHANNELS_OPENED) {
+ if (!atomic_read(&(mhi_info->read_ch.opened)) ||
+ !atomic_read(&(mhi_info->write_ch.opened))) {
+ return -ENODEV;
+ }
+ }
+
+ spin_lock_irqsave(&mhi_info->lock, flags);
+ mhi_info->enabled = 1;
+ spin_unlock_irqrestore(&mhi_info->lock, flags);
+ diag_remote_dev_open(mhi_info->dev_id);
+ queue_work(mhi_info->mhi_wq, &(mhi_info->read_work));
+ return 0;
+
+fail:
+ pr_err("diag: Failed to open mhi channlels, err: %d\n", err);
+ mhi_close(mhi_info->id);
+ return err;
+}
+
+static int mhi_open(int id)
+{
+ if (id < 0 || id >= NUM_MHI_DEV) {
+ pr_err("diag: In %s, invalid index %d\n", __func__, id);
+ return -EINVAL;
+ }
+
+ if (!diag_mhi[id].enabled)
+ return -ENODEV;
+ /*
+ * This function is called whenever the channel needs to be opened
+ * explicitly by Diag. Open both the read and write channels (denoted by
+ * OPEN_CHANNELS flag)
+ */
+ __mhi_open(&diag_mhi[id], OPEN_CHANNELS);
+ diag_remote_dev_open(diag_mhi[id].dev_id);
+ queue_work(diag_mhi[id].mhi_wq, &(diag_mhi[id].read_work));
+
+ return 0;
+}
+
+static void mhi_open_work_fn(struct work_struct *work)
+{
+ struct diag_mhi_info *mhi_info = container_of(work,
+ struct diag_mhi_info,
+ open_work);
+ /*
+ * This is a part of work function which is queued after the channels
+ * are explicitly opened. Do not open channels again (denoted by
+ * CHANNELS_OPENED flag)
+ */
+ if (mhi_info) {
+ diag_remote_dev_open(mhi_info->dev_id);
+ queue_work(mhi_info->mhi_wq, &(mhi_info->read_work));
+ }
+}
+
+static void mhi_read_done_work_fn(struct work_struct *work)
+{
+ unsigned char *buf = NULL;
+ struct mhi_result result;
+ int err = 0;
+ struct diag_mhi_info *mhi_info = container_of(work,
+ struct diag_mhi_info,
+ read_done_work);
+ if (!mhi_info)
+ return;
+
+ do {
+ if (!(atomic_read(&(mhi_info->read_ch.opened))))
+ break;
+ err = mhi_poll_inbound(mhi_info->read_ch.hdl, &result);
+ if (err) {
+ pr_debug("diag: In %s, err %d\n", __func__, err);
+ break;
+ }
+ buf = result.buf_addr;
+ if (!buf)
+ break;
+ DIAG_LOG(DIAG_DEBUG_BRIDGE,
+ "read from mhi port %d buf %pK\n",
+ mhi_info->id, buf);
+ /*
+ * The read buffers can come after the MHI channels are closed.
+ * If the channels are closed at the time of read, discard the
+ * buffers here and do not forward them to the mux layer.
+ */
+ if ((atomic_read(&(mhi_info->read_ch.opened)))) {
+ err = diag_remote_dev_read_done(mhi_info->dev_id, buf,
+ result.bytes_xferd);
+ if (err)
+ mhi_buf_tbl_remove(mhi_info, TYPE_MHI_READ_CH,
+ buf, result.bytes_xferd);
+ } else {
+ mhi_buf_tbl_remove(mhi_info, TYPE_MHI_READ_CH, buf,
+ result.bytes_xferd);
+ }
+ } while (buf);
+}
+
+static void mhi_read_work_fn(struct work_struct *work)
+{
+ int err = 0;
+ unsigned char *buf = NULL;
+ enum MHI_FLAGS mhi_flags = MHI_EOT;
+ struct diag_mhi_ch_t *read_ch = NULL;
+ unsigned long flags;
+ struct diag_mhi_info *mhi_info = container_of(work,
+ struct diag_mhi_info,
+ read_work);
+ if (!mhi_info)
+ return;
+
+ read_ch = &mhi_info->read_ch;
+ do {
+ if (!(atomic_read(&(read_ch->opened))))
+ break;
+
+ buf = diagmem_alloc(driver, DIAG_MDM_BUF_SIZE,
+ mhi_info->mempool);
+ if (!buf)
+ break;
+
+ err = mhi_buf_tbl_add(mhi_info, TYPE_MHI_READ_CH, buf,
+ DIAG_MDM_BUF_SIZE);
+ if (err)
+ goto fail;
+
+ DIAG_LOG(DIAG_DEBUG_BRIDGE,
+ "queueing a read buf %pK, ch: %s\n",
+ buf, mhi_info->name);
+ spin_lock_irqsave(&read_ch->lock, flags);
+ err = mhi_queue_xfer(read_ch->hdl, buf, DIAG_MDM_BUF_SIZE,
+ mhi_flags);
+ spin_unlock_irqrestore(&read_ch->lock, flags);
+ if (err) {
+ pr_err_ratelimited("diag: Unable to read from MHI channel %s, err: %d\n",
+ mhi_info->name, err);
+ goto fail;
+ }
+ } while (buf);
+
+ return;
+fail:
+ mhi_buf_tbl_remove(mhi_info, TYPE_MHI_READ_CH, buf, DIAG_MDM_BUF_SIZE);
+ queue_work(mhi_info->mhi_wq, &mhi_info->read_work);
+}
+
+static int mhi_queue_read(int id)
+{
+ if (id < 0 || id >= NUM_MHI_DEV) {
+ pr_err_ratelimited("diag: In %s, invalid index %d\n", __func__,
+ id);
+ return -EINVAL;
+ }
+ queue_work(diag_mhi[id].mhi_wq, &(diag_mhi[id].read_work));
+ return 0;
+}
+
+static int mhi_write(int id, unsigned char *buf, int len, int ctxt)
+{
+ int err = 0;
+ enum MHI_FLAGS mhi_flags = MHI_EOT;
+ unsigned long flags;
+ struct diag_mhi_ch_t *ch = NULL;
+
+ if (id < 0 || id >= NUM_MHI_DEV) {
+ pr_err_ratelimited("diag: In %s, invalid index %d\n", __func__,
+ id);
+ return -EINVAL;
+ }
+
+ if (!buf || len <= 0) {
+ pr_err("diag: In %s, ch %d, invalid buf %pK len %d\n",
+ __func__, id, buf, len);
+ return -EINVAL;
+ }
+
+ if (!diag_mhi[id].enabled) {
+ pr_err_ratelimited("diag: In %s, MHI channel %s is not enabled\n",
+ __func__, diag_mhi[id].name);
+ return -EIO;
+ }
+
+ ch = &diag_mhi[id].write_ch;
+ if (!(atomic_read(&(ch->opened)))) {
+ pr_err_ratelimited("diag: In %s, MHI write channel %s is not open\n",
+ __func__, diag_mhi[id].name);
+ return -EIO;
+ }
+
+ err = mhi_buf_tbl_add(&diag_mhi[id], TYPE_MHI_WRITE_CH, buf,
+ len);
+ if (err)
+ goto fail;
+
+ spin_lock_irqsave(&ch->lock, flags);
+ err = mhi_queue_xfer(ch->hdl, buf, len, mhi_flags);
+ spin_unlock_irqrestore(&ch->lock, flags);
+ if (err) {
+ pr_err_ratelimited("diag: In %s, cannot write to MHI channel %pK, len %d, err: %d\n",
+ __func__, diag_mhi[id].name, len, err);
+ mhi_buf_tbl_remove(&diag_mhi[id], TYPE_MHI_WRITE_CH, buf, len);
+ goto fail;
+ }
+
+ return 0;
+fail:
+ return err;
+}
+
+static int mhi_fwd_complete(int id, unsigned char *buf, int len, int ctxt)
+{
+ if (id < 0 || id >= NUM_MHI_DEV) {
+ pr_err_ratelimited("diag: In %s, invalid index %d\n", __func__,
+ id);
+ return -EINVAL;
+ }
+
+ if (!buf)
+ return -EINVAL;
+
+ mhi_buf_tbl_remove(&diag_mhi[id], TYPE_MHI_READ_CH, buf, len);
+ queue_work(diag_mhi[id].mhi_wq, &(diag_mhi[id].read_work));
+ return 0;
+}
+
+static void mhi_notifier(struct mhi_cb_info *cb_info)
+{
+ int index;
+ int type;
+ int err = 0;
+ struct mhi_result *result = NULL;
+ struct diag_mhi_ch_t *ch = NULL;
+ void *buf = NULL;
+ struct diag_mhi_info *mhi_info = NULL;
+ unsigned long flags;
+
+ if (!cb_info)
+ return;
+
+ result = cb_info->result;
+ if (!result) {
+ pr_err_ratelimited("diag: failed to obtain mhi result from callback\n");
+ return;
+ }
+
+ index = GET_INFO_INDEX((uintptr_t)cb_info->result->user_data);
+ if (index < 0 || index >= NUM_MHI_DEV) {
+ pr_err_ratelimited("diag: In %s, invalid MHI index %d\n",
+ __func__, index);
+ return;
+ }
+
+ type = GET_CH_TYPE((uintptr_t)cb_info->result->user_data);
+ switch (type) {
+ case TYPE_MHI_READ_CH:
+ ch = &diag_mhi[index].read_ch;
+ break;
+ case TYPE_MHI_WRITE_CH:
+ ch = &diag_mhi[index].write_ch;
+ break;
+ default:
+ pr_err_ratelimited("diag: In %s, invalid channel type %d\n",
+ __func__, type);
+ return;
+ }
+
+ switch (cb_info->cb_reason) {
+ case MHI_CB_MHI_ENABLED:
+ DIAG_LOG(DIAG_DEBUG_BRIDGE,
+ "received mhi enabled notifiation port: %d ch: %d\n",
+ index, ch->type);
+ err = mhi_ch_open(ch);
+ if (err)
+ break;
+ if (ch->type == TYPE_MHI_READ_CH) {
+ diag_mhi[index].num_read = mhi_get_free_desc(ch->hdl);
+ if (diag_mhi[index].num_read <= 0) {
+ pr_err("diag: In %s, invalid number of descriptors %d\n",
+ __func__, diag_mhi[index].num_read);
+ break;
+ }
+ }
+ __mhi_open(&diag_mhi[index], CHANNELS_OPENED);
+ queue_work(diag_mhi[index].mhi_wq,
+ &(diag_mhi[index].open_work));
+ break;
+ case MHI_CB_XFER:
+ /*
+ * If the channel is a read channel, this is a read
+ * complete notification - write complete if the channel is
+ * a write channel.
+ */
+ if (type == TYPE_MHI_READ_CH) {
+ if (!atomic_read(&(diag_mhi[index].read_ch.opened)))
+ break;
+
+ queue_work(diag_mhi[index].mhi_wq,
+ &(diag_mhi[index].read_done_work));
+ break;
+ }
+ buf = result->buf_addr;
+ if (!buf) {
+ pr_err_ratelimited("diag: In %s, unable to de-serialize the data\n",
+ __func__);
+ break;
+ }
+ mhi_buf_tbl_remove(&diag_mhi[index], TYPE_MHI_WRITE_CH, buf,
+ result->bytes_xferd);
+ diag_remote_dev_write_done(diag_mhi[index].dev_id, buf,
+ result->bytes_xferd,
+ diag_mhi[index].id);
+ break;
+ case MHI_CB_MHI_DISABLED:
+ case MHI_CB_SYS_ERROR:
+ case MHI_CB_MHI_SHUTDOWN:
+ DIAG_LOG(DIAG_DEBUG_BRIDGE,
+ "received mhi link down cb: %d port: %d ch: %d\n",
+ cb_info->cb_reason, index, ch->type);
+ mhi_info = &diag_mhi[index];
+ if (!mhi_info->enabled)
+ return;
+ spin_lock_irqsave(&mhi_info->lock, flags);
+ mhi_info->enabled = 0;
+ spin_unlock_irqrestore(&mhi_info->lock, flags);
+ atomic_set(&(mhi_info->read_ch.opened), 0);
+ atomic_set(&(mhi_info->write_ch.opened), 0);
+ flush_workqueue(mhi_info->mhi_wq);
+ mhi_buf_tbl_clear(mhi_info);
+ diag_remote_dev_close(mhi_info->dev_id);
+ break;
+ default:
+ pr_err("diag: In %s, invalid cb reason 0x%x\n", __func__,
+ cb_info->cb_reason);
+ break;
+ }
+
+ return;
+}
+
+static struct diag_remote_dev_ops diag_mhi_fwd_ops = {
+ .open = mhi_open,
+ .close = mhi_close,
+ .queue_read = mhi_queue_read,
+ .write = mhi_write,
+ .fwd_complete = mhi_fwd_complete,
+};
+
+static int diag_mhi_register_ch(int id, struct diag_mhi_ch_t *ch)
+{
+ int ctxt = 0;
+ if (!ch)
+ return -EIO;
+ if (id < 0 || id >= NUM_MHI_DEV)
+ return -EINVAL;
+ spin_lock_init(&ch->lock);
+ atomic_set(&(ch->opened), 0);
+ ctxt = SET_CH_CTXT(id, ch->type);
+ ch->client_info.mhi_client_cb = mhi_notifier;
+ ch->client_info.chan = ch->chan;
+ ch->client_info.dev = &driver->pdev->dev;
+ ch->client_info.node_name = "qcom,mhi";
+ ch->client_info.user_data = (void *)(uintptr_t)ctxt;
+ return mhi_register_channel(&ch->hdl, &ch->client_info);
+}
+
+static void diag_mhi_dev_exit(int dev)
+{
+ struct diag_mhi_info *mhi_info = NULL;
+
+ mhi_info = &diag_mhi[dev];
+ if (!mhi_info)
+ return;
+ if (mhi_info->mhi_wq)
+ destroy_workqueue(mhi_info->mhi_wq);
+ mhi_close(mhi_info->id);
+ if (mhi_info->mempool_init)
+ diagmem_exit(driver, mhi_info->mempool);
+}
+
+int diag_mhi_init()
+{
+ int i;
+ int err = 0;
+ struct diag_mhi_info *mhi_info = NULL;
+ char wq_name[DIAG_MHI_NAME_SZ + DIAG_MHI_STRING_SZ];
+
+ for (i = 0; i < NUM_MHI_DEV; i++) {
+ mhi_info = &diag_mhi[i];
+ spin_lock_init(&mhi_info->lock);
+ INIT_WORK(&(mhi_info->read_work), mhi_read_work_fn);
+ INIT_WORK(&(mhi_info->read_done_work), mhi_read_done_work_fn);
+ INIT_WORK(&(mhi_info->open_work), mhi_open_work_fn);
+ INIT_WORK(&(mhi_info->close_work), mhi_close_work_fn);
+ strlcpy(wq_name, "diag_mhi_", DIAG_MHI_STRING_SZ);
+ strlcat(wq_name, mhi_info->name, sizeof(mhi_info->name));
+ diagmem_init(driver, mhi_info->mempool);
+ mhi_info->mempool_init = 1;
+ mhi_info->mhi_wq = create_singlethread_workqueue(wq_name);
+ if (!mhi_info->mhi_wq)
+ goto fail;
+ err = diagfwd_bridge_register(mhi_info->dev_id, mhi_info->id,
+ &diag_mhi_fwd_ops);
+ if (err) {
+ pr_err("diag: Unable to register MHI channel %d with bridge, err: %d\n",
+ i, err);
+ goto fail;
+ }
+ err = diag_mhi_register_ch(mhi_info->id, &mhi_info->read_ch);
+ if (err) {
+ pr_err("diag: Unable to register MHI read channel for %d, err: %d\n",
+ i, err);
+ goto fail;
+ }
+ err = diag_mhi_register_ch(mhi_info->id, &mhi_info->write_ch);
+ if (err) {
+ pr_err("diag: Unable to register MHI write channel for %d, err: %d\n",
+ i, err);
+ goto fail;
+ }
+ DIAG_LOG(DIAG_DEBUG_BRIDGE, "mhi port %d is initailzed\n", i);
+ }
+
+ return 0;
+fail:
+ diag_mhi_dev_exit(i);
+ return -ENOMEM;
+}
+
+void diag_mhi_exit()
+{
+ int i;
+
+ for (i = 0; i < NUM_MHI_DEV; i++) {
+ diag_mhi_dev_exit(i);
+ }
+}
+
diff --git a/drivers/char/diag/diagfwd_mhi.h b/drivers/char/diag/diagfwd_mhi.h
new file mode 100644
index 000000000000..a4466977ca97
--- /dev/null
+++ b/drivers/char/diag/diagfwd_mhi.h
@@ -0,0 +1,88 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DIAGFWD_MHI_H
+#define DIAGFWD_MHI_H
+
+#include "diagchar.h"
+#include <linux/msm_mhi.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/completion.h>
+#include <linux/fs.h>
+#include <linux/types.h>
+#include <linux/cdev.h>
+#include <linux/list.h>
+#include <linux/wait.h>
+#include <linux/pci.h>
+#include <linux/vmalloc.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include <linux/platform_device.h>
+#include <linux/tty.h>
+#include <linux/delay.h>
+#include <linux/ipc_logging.h>
+#include <linux/msm_mhi.h>
+
+#define MHI_1 0
+#define MHI_DCI_1 1
+#define NUM_MHI_DEV 2
+
+#define TYPE_MHI_READ_CH 0
+#define TYPE_MHI_WRITE_CH 1
+
+#define DIAG_MHI_NAME_SZ 24
+
+struct diag_mhi_buf_tbl_t {
+ struct list_head link;
+ unsigned char *buf;
+ int len;
+};
+
+struct diag_mhi_ch_t {
+ uint8_t type;
+ u32 channel;
+ enum MHI_CLIENT_CHANNEL chan;
+ atomic_t opened;
+ spinlock_t lock;
+ struct mhi_client_info_t client_info;
+ struct mhi_client_handle *hdl;
+ struct list_head buf_tbl;
+};
+
+struct diag_mhi_info {
+ int id;
+ int dev_id;
+ int mempool;
+ int mempool_init;
+ int num_read;
+ uint8_t enabled;
+ char name[DIAG_MHI_NAME_SZ];
+ struct work_struct read_work;
+ struct work_struct read_done_work;
+ struct work_struct open_work;
+ struct work_struct close_work;
+ struct workqueue_struct *mhi_wq;
+ wait_queue_head_t mhi_wait_q;
+ struct diag_mhi_ch_t read_ch;
+ struct diag_mhi_ch_t write_ch;
+ spinlock_t lock;
+};
+
+extern struct diag_mhi_info diag_mhi[NUM_MHI_DEV];
+
+int diag_mhi_init(void);
+void diag_mhi_exit(void);
+
+#endif
diff --git a/drivers/char/diag/diagfwd_peripheral.c b/drivers/char/diag/diagfwd_peripheral.c
new file mode 100644
index 000000000000..bfdce051d405
--- /dev/null
+++ b/drivers/char/diag/diagfwd_peripheral.c
@@ -0,0 +1,2070 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/sched.h>
+#include <linux/ratelimit.h>
+#include <linux/workqueue.h>
+#include <linux/diagchar.h>
+#include <linux/of.h>
+#include <linux/kmemleak.h>
+#include <linux/delay.h>
+#include <linux/atomic.h>
+#include "diagchar.h"
+#include "diagchar_hdlc.h"
+#include "diagfwd_peripheral.h"
+#include "diagfwd_cntl.h"
+#include "diag_masks.h"
+#include "diag_dci.h"
+#include "diagfwd.h"
+#include "diagfwd_smd.h"
+#include "diagfwd_socket.h"
+#include "diag_mux.h"
+#include "diag_ipc_logging.h"
+#include "diagfwd_glink.h"
+#include "diag_memorydevice.h"
+
+struct data_header {
+ uint8_t control_char;
+ uint8_t version;
+ uint16_t length;
+};
+
+static struct diagfwd_info *early_init_info[NUM_TRANSPORT];
+
+static void diagfwd_queue_read(struct diagfwd_info *fwd_info);
+static void diagfwd_buffers_exit(struct diagfwd_info *fwd_info);
+static void diagfwd_cntl_open(struct diagfwd_info *fwd_info);
+static void diagfwd_cntl_close(struct diagfwd_info *fwd_info);
+static void diagfwd_dci_open(struct diagfwd_info *fwd_info);
+static void diagfwd_dci_close(struct diagfwd_info *fwd_info);
+static void diagfwd_data_read_untag_done(struct diagfwd_info *fwd_info,
+ unsigned char *buf, int len);
+static void diagfwd_data_read_done(struct diagfwd_info *fwd_info,
+ unsigned char *buf, int len);
+static void diagfwd_cntl_read_done(struct diagfwd_info *fwd_info,
+ unsigned char *buf, int len);
+static void diagfwd_dci_read_done(struct diagfwd_info *fwd_info,
+ unsigned char *buf, int len);
+static void diagfwd_write_buffers_init(struct diagfwd_info *fwd_info);
+static void diagfwd_write_buffers_exit(struct diagfwd_info *fwd_info);
+struct diagfwd_info peripheral_info[NUM_TYPES][NUM_PERIPHERALS];
+
+static struct diag_channel_ops data_ch_ops = {
+ .open = NULL,
+ .close = NULL,
+ .read_done = diagfwd_data_read_untag_done
+};
+
+static struct diag_channel_ops cntl_ch_ops = {
+ .open = diagfwd_cntl_open,
+ .close = diagfwd_cntl_close,
+ .read_done = diagfwd_cntl_read_done
+};
+
+static struct diag_channel_ops dci_ch_ops = {
+ .open = diagfwd_dci_open,
+ .close = diagfwd_dci_close,
+ .read_done = diagfwd_dci_read_done
+};
+
+static void diagfwd_cntl_open(struct diagfwd_info *fwd_info)
+{
+ if (!fwd_info)
+ return;
+ diag_cntl_channel_open(fwd_info);
+}
+
+static void diagfwd_cntl_close(struct diagfwd_info *fwd_info)
+{
+ if (!fwd_info)
+ return;
+ diag_cntl_channel_close(fwd_info);
+}
+
+static void diagfwd_dci_open(struct diagfwd_info *fwd_info)
+{
+ if (!fwd_info)
+ return;
+
+ diag_dci_notify_client(PERIPHERAL_MASK(fwd_info->peripheral),
+ DIAG_STATUS_OPEN, DCI_LOCAL_PROC);
+}
+
+static void diagfwd_dci_close(struct diagfwd_info *fwd_info)
+{
+ if (!fwd_info)
+ return;
+
+ diag_dci_notify_client(PERIPHERAL_MASK(fwd_info->peripheral),
+ DIAG_STATUS_CLOSED, DCI_LOCAL_PROC);
+}
+
+static int diag_add_hdlc_encoding(unsigned char *dest_buf, int *dest_len,
+ unsigned char *buf, int len)
+{
+ struct diag_send_desc_type send = { NULL, NULL, DIAG_STATE_START, 0 };
+ struct diag_hdlc_dest_type enc = { NULL, NULL, 0 };
+ struct data_header *header;
+ int header_size = sizeof(struct data_header);
+ uint8_t *end_control_char = NULL;
+ uint8_t *payload = NULL;
+ uint8_t *temp_buf = NULL;
+ uint8_t *temp_encode_buf = NULL;
+ int src_pkt_len;
+ int encoded_pkt_length;
+ int max_size;
+ int total_processed = 0;
+ int bytes_remaining;
+ int err = 0;
+ uint8_t loop_count = 0;
+
+ if (!dest_buf || !dest_len || !buf)
+ return -EIO;
+
+ temp_buf = buf;
+ temp_encode_buf = dest_buf;
+ bytes_remaining = *dest_len;
+
+ while (total_processed < len) {
+ loop_count++;
+ header = (struct data_header *)temp_buf;
+ /* Perform initial error checking */
+ if (header->control_char != CONTROL_CHAR ||
+ header->version != 1) {
+ err = -EINVAL;
+ break;
+ }
+
+ if (header->length >= bytes_remaining)
+ break;
+
+ payload = temp_buf + header_size;
+ end_control_char = payload + header->length;
+ if (*end_control_char != CONTROL_CHAR) {
+ err = -EINVAL;
+ break;
+ }
+
+ max_size = 2 * header->length + 3;
+ if (bytes_remaining < max_size) {
+ err = -EINVAL;
+ break;
+ }
+
+ /* Prepare for encoding the data */
+ send.state = DIAG_STATE_START;
+ send.pkt = payload;
+ send.last = (void *)(payload + header->length - 1);
+ send.terminate = 1;
+
+ enc.dest = temp_encode_buf;
+ enc.dest_last = (void *)(temp_encode_buf + max_size);
+ enc.crc = 0;
+ diag_hdlc_encode(&send, &enc);
+
+ /* Prepare for next packet */
+ src_pkt_len = (header_size + header->length + 1);
+ total_processed += src_pkt_len;
+ temp_buf += src_pkt_len;
+
+ encoded_pkt_length = (uint8_t *)enc.dest - temp_encode_buf;
+ bytes_remaining -= encoded_pkt_length;
+ temp_encode_buf = enc.dest;
+ }
+
+ *dest_len = (int)(temp_encode_buf - dest_buf);
+
+ return err;
+}
+
+static int check_bufsize_for_encoding(struct diagfwd_buf_t *buf, uint32_t len)
+{
+ int i, ctx = 0;
+ uint32_t max_size = 0;
+ unsigned char *temp_buf = NULL;
+ struct diag_md_info *ch = NULL;
+
+ if (!buf || len == 0)
+ return -EINVAL;
+
+ max_size = (2 * len) + 3;
+ if (max_size > PERIPHERAL_BUF_SZ) {
+ if (max_size > MAX_PERIPHERAL_HDLC_BUF_SZ) {
+ pr_err("diag: In %s, max_size is going beyond limit %d\n",
+ __func__, max_size);
+ max_size = MAX_PERIPHERAL_HDLC_BUF_SZ;
+ }
+
+ if (buf->len < max_size) {
+ if (driver->logging_mode == DIAG_MEMORY_DEVICE_MODE) {
+ ch = &diag_md[DIAG_LOCAL_PROC];
+ for (i = 0; ch != NULL &&
+ i < ch->num_tbl_entries; i++) {
+ if (ch->tbl[i].buf == buf->data) {
+ ctx = ch->tbl[i].ctx;
+ ch->tbl[i].buf = NULL;
+ ch->tbl[i].len = 0;
+ ch->tbl[i].ctx = 0;
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "Flushed mdlog table entries before reallocating data buffer, p:%d, t:%d\n",
+ GET_BUF_PERIPHERAL(ctx),
+ GET_BUF_TYPE(ctx));
+ break;
+ }
+ }
+ }
+ temp_buf = krealloc(buf->data, max_size +
+ APF_DIAG_PADDING,
+ GFP_KERNEL);
+ if (!temp_buf)
+ return -ENOMEM;
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "Reallocated data buffer: %pK with size: %d\n",
+ temp_buf, max_size);
+ buf->data = temp_buf;
+ buf->len = max_size;
+ }
+ }
+
+ return buf->len;
+}
+
+int diag_md_get_peripheral(int ctxt)
+{
+ int peripheral;
+
+ if (driver->num_pd_session) {
+ peripheral = GET_PD_CTXT(ctxt);
+ switch (peripheral) {
+ case UPD_WLAN:
+ if (!driver->pd_logging_mode[0])
+ peripheral = PERIPHERAL_MODEM;
+ break;
+ case UPD_AUDIO:
+ if (!driver->pd_logging_mode[1])
+ peripheral = PERIPHERAL_LPASS;
+ break;
+ case UPD_SENSORS:
+ if (!driver->pd_logging_mode[2])
+ peripheral = PERIPHERAL_LPASS;
+ break;
+ case DIAG_ID_MPSS:
+ case DIAG_ID_LPASS:
+ case DIAG_ID_CDSP:
+ default:
+ peripheral =
+ GET_BUF_PERIPHERAL(ctxt);
+ if (peripheral > NUM_PERIPHERALS)
+ peripheral = -EINVAL;
+ break;
+ }
+ } else {
+ /* Account for Apps data as well */
+ peripheral = GET_BUF_PERIPHERAL(ctxt);
+ if (peripheral > NUM_PERIPHERALS)
+ peripheral = -EINVAL;
+ }
+
+ return peripheral;
+}
+
+static void diagfwd_data_process_done(struct diagfwd_info *fwd_info,
+ struct diagfwd_buf_t *buf, int len)
+{
+ int err = 0;
+ int write_len = 0, peripheral = 0;
+ unsigned char *write_buf = NULL;
+ uint8_t hdlc_disabled = 0;
+
+ if (!fwd_info || !buf || len <= 0) {
+ diag_ws_release();
+ return;
+ }
+
+ switch (fwd_info->type) {
+ case TYPE_DATA:
+ case TYPE_CMD:
+ break;
+ default:
+ pr_err_ratelimited("diag: In %s, invalid type %d for peripheral %d\n",
+ __func__, fwd_info->type,
+ fwd_info->peripheral);
+ diag_ws_release();
+ return;
+ }
+
+ mutex_lock(&driver->hdlc_disable_mutex);
+ mutex_lock(&fwd_info->data_mutex);
+
+ peripheral = diag_md_get_peripheral(buf->ctxt);
+ if (peripheral < 0) {
+ pr_err("diag:%s:%d invalid peripheral = %d\n",
+ __func__, __LINE__, peripheral);
+ mutex_unlock(&fwd_info->data_mutex);
+ mutex_unlock(&driver->hdlc_disable_mutex);
+ diag_ws_release();
+ return;
+ }
+
+ hdlc_disabled = driver->p_hdlc_disabled[peripheral];
+
+ if (hdlc_disabled) {
+ /* The data is raw and and on APPS side HDLC is disabled */
+ if (!buf) {
+ pr_err("diag: In %s, no match for non encode buffer %pK, peripheral %d, type: %d\n",
+ __func__, buf, fwd_info->peripheral,
+ fwd_info->type);
+ goto end;
+ }
+ if (len > PERIPHERAL_BUF_SZ) {
+ pr_err("diag: In %s, Incoming buffer too large %d, peripheral %d, type: %d\n",
+ __func__, len, fwd_info->peripheral,
+ fwd_info->type);
+ goto end;
+ }
+ write_len = len;
+ if (write_len <= 0)
+ goto end;
+ write_buf = buf->data_raw;
+ } else {
+ if (!buf) {
+ pr_err("diag: In %s, no match for non encode buffer %pK, peripheral %d, type: %d\n",
+ __func__, buf, fwd_info->peripheral,
+ fwd_info->type);
+ goto end;
+ }
+
+ write_len = check_bufsize_for_encoding(buf, len);
+ if (write_len <= 0) {
+ pr_err("diag: error in checking buf for encoding\n");
+ goto end;
+ }
+ write_buf = buf->data;
+ err = diag_add_hdlc_encoding(write_buf, &write_len,
+ buf->data_raw, len);
+ if (err) {
+ pr_err("diag: error in adding hdlc encoding\n");
+ goto end;
+ }
+ }
+
+ if (write_len > 0) {
+ err = diag_mux_write(DIAG_LOCAL_PROC, write_buf, write_len,
+ buf->ctxt);
+ if (err) {
+ pr_err_ratelimited("diag: In %s, unable to write to mux error: %d\n",
+ __func__, err);
+ goto end;
+ }
+ }
+ mutex_unlock(&fwd_info->data_mutex);
+ mutex_unlock(&driver->hdlc_disable_mutex);
+ diagfwd_queue_read(fwd_info);
+ return;
+
+end:
+ diag_ws_release();
+ mutex_unlock(&fwd_info->data_mutex);
+ mutex_unlock(&driver->hdlc_disable_mutex);
+ if (buf) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "Marking buffer as free p: %d, t: %d, buf_num: %d\n",
+ fwd_info->peripheral, fwd_info->type,
+ GET_BUF_NUM(buf->ctxt));
+ diagfwd_write_done(fwd_info->peripheral, fwd_info->type,
+ GET_BUF_NUM(buf->ctxt));
+ }
+ diagfwd_queue_read(fwd_info);
+}
+
+static void diagfwd_data_read_untag_done(struct diagfwd_info *fwd_info,
+ unsigned char *buf, int len)
+{
+ int len_cpd = 0;
+ int len_upd_1 = 0, len_upd_2 = 0;
+ int ctxt_cpd = 0;
+ int ctxt_upd_1 = 0, ctxt_upd_2 = 0;
+ int buf_len = 0, processed = 0;
+ unsigned char *temp_buf_main = NULL;
+ unsigned char *temp_buf_cpd = NULL;
+ unsigned char *temp_buf_upd_1 = NULL;
+ unsigned char *temp_buf_upd_2 = NULL;
+ struct diagfwd_buf_t *temp_ptr_upd = NULL;
+ struct diagfwd_buf_t *temp_ptr_cpd = NULL;
+ int flag_buf_1 = 0, flag_buf_2 = 0;
+ uint8_t peripheral;
+
+ if (!fwd_info || !buf || len <= 0) {
+ diag_ws_release();
+ return;
+ }
+
+ switch (fwd_info->type) {
+ case TYPE_DATA:
+ case TYPE_CMD:
+ break;
+ default:
+ pr_err_ratelimited("diag: In %s, invalid type %d for peripheral %d\n",
+ __func__, fwd_info->type,
+ fwd_info->peripheral);
+ diag_ws_release();
+ return;
+ }
+ peripheral = fwd_info->peripheral;
+
+ if (driver->feature[peripheral].encode_hdlc &&
+ driver->feature[peripheral].untag_header &&
+ driver->peripheral_untag[peripheral]) {
+ temp_buf_cpd = buf;
+ temp_buf_main = buf;
+ if (fwd_info->buf_1 &&
+ fwd_info->buf_1->data_raw == buf) {
+ flag_buf_1 = 1;
+ temp_ptr_cpd = fwd_info->buf_1;
+ if (fwd_info->type == TYPE_DATA) {
+ temp_buf_upd_1 =
+ fwd_info->buf_upd_1_a->data_raw;
+ if (peripheral ==
+ PERIPHERAL_LPASS)
+ temp_buf_upd_2 =
+ fwd_info->buf_upd_2_a->data_raw;
+ }
+ } else if (fwd_info->buf_2 &&
+ fwd_info->buf_2->data_raw == buf) {
+ flag_buf_2 = 1;
+ temp_ptr_cpd = fwd_info->buf_2;
+ if (fwd_info->type == TYPE_DATA)
+ temp_buf_upd_1 =
+ fwd_info->buf_upd_1_b->data_raw;
+ if (peripheral ==
+ PERIPHERAL_LPASS)
+ temp_buf_upd_2 =
+ fwd_info->buf_upd_2_b->data_raw;
+ } else {
+ pr_err("diag: In %s, no match for buffer %pK, peripheral %d, type: %d\n",
+ __func__, buf, peripheral,
+ fwd_info->type);
+ goto end;
+ }
+ while (processed < len) {
+ buf_len =
+ *(uint16_t *) (temp_buf_main + 2);
+ switch ((*temp_buf_main)) {
+ case DIAG_ID_MPSS:
+ ctxt_cpd = DIAG_ID_MPSS;
+ len_cpd += buf_len;
+ if (temp_buf_cpd) {
+ memcpy(temp_buf_cpd,
+ (temp_buf_main + 4), buf_len);
+ temp_buf_cpd += buf_len;
+ }
+ break;
+ case DIAG_ID_WLAN:
+ ctxt_upd_1 = UPD_WLAN;
+ len_upd_1 += buf_len;
+ if (temp_buf_upd_1) {
+ memcpy(temp_buf_upd_1,
+ (temp_buf_main + 4), buf_len);
+ temp_buf_upd_1 += buf_len;
+ }
+ break;
+ case DIAG_ID_LPASS:
+ ctxt_cpd = DIAG_ID_LPASS;
+ len_cpd += buf_len;
+ if (temp_buf_cpd) {
+ memcpy(temp_buf_cpd,
+ (temp_buf_main + 4), buf_len);
+ temp_buf_cpd += buf_len;
+ }
+ break;
+ case DIAG_ID_AUDIO:
+ ctxt_upd_1 = UPD_AUDIO;
+ len_upd_1 += buf_len;
+ if (temp_buf_upd_1) {
+ memcpy(temp_buf_upd_1,
+ (temp_buf_main + 4), buf_len);
+ temp_buf_upd_1 += buf_len;
+ }
+ break;
+ case DIAG_ID_SENSORS:
+ ctxt_upd_2 = UPD_SENSORS;
+ len_upd_2 += buf_len;
+ if (temp_buf_upd_2) {
+ memcpy(temp_buf_upd_2,
+ (temp_buf_main + 4), buf_len);
+ temp_buf_upd_2 += buf_len;
+ }
+ break;
+ case DIAG_ID_CDSP:
+ ctxt_cpd = DIAG_ID_CDSP;
+ len_cpd += buf_len;
+ if (temp_buf_cpd) {
+ memcpy(temp_buf_cpd,
+ (temp_buf_main + 4), buf_len);
+ temp_buf_cpd += buf_len;
+ }
+ break;
+ default:
+ goto end;
+ }
+ len = len - 4;
+ temp_buf_main += (buf_len + 4);
+ processed += buf_len;
+ }
+
+ if (flag_buf_1) {
+ fwd_info->cpd_len_1 = len_cpd;
+ if (fwd_info->type == TYPE_DATA)
+ fwd_info->upd_len_1_a = len_upd_1;
+ if (peripheral == PERIPHERAL_LPASS &&
+ fwd_info->type == TYPE_DATA)
+ fwd_info->upd_len_2_a = len_upd_2;
+ } else if (flag_buf_2) {
+ fwd_info->cpd_len_2 = len_cpd;
+ if (fwd_info->type == TYPE_DATA)
+ fwd_info->upd_len_1_b = len_upd_1;
+ if (peripheral == PERIPHERAL_LPASS &&
+ fwd_info->type == TYPE_DATA)
+ fwd_info->upd_len_2_b = len_upd_2;
+ }
+
+ if (peripheral == PERIPHERAL_LPASS &&
+ fwd_info->type == TYPE_DATA && len_upd_2) {
+ if (flag_buf_1)
+ temp_ptr_upd = fwd_info->buf_upd_2_a;
+ else
+ temp_ptr_upd = fwd_info->buf_upd_2_b;
+ temp_ptr_upd->ctxt &= 0x00FFFFFF;
+ temp_ptr_upd->ctxt |=
+ (SET_PD_CTXT(ctxt_upd_2));
+ atomic_set(&temp_ptr_upd->in_busy, 1);
+ diagfwd_data_process_done(fwd_info,
+ temp_ptr_upd, len_upd_2);
+ } else {
+ if (flag_buf_1)
+ fwd_info->upd_len_2_a = 0;
+ if (flag_buf_2)
+ fwd_info->upd_len_2_b = 0;
+ }
+ if (fwd_info->type == TYPE_DATA && len_upd_1) {
+ if (flag_buf_1)
+ temp_ptr_upd = fwd_info->buf_upd_1_a;
+ else
+ temp_ptr_upd = fwd_info->buf_upd_1_b;
+ temp_ptr_upd->ctxt &= 0x00FFFFFF;
+ temp_ptr_upd->ctxt |=
+ (SET_PD_CTXT(ctxt_upd_1));
+ atomic_set(&temp_ptr_upd->in_busy, 1);
+ diagfwd_data_process_done(fwd_info,
+ temp_ptr_upd, len_upd_1);
+ } else {
+ if (flag_buf_1)
+ fwd_info->upd_len_1_a = 0;
+ if (flag_buf_2)
+ fwd_info->upd_len_1_b = 0;
+ }
+ if (len_cpd) {
+ temp_ptr_cpd->ctxt &= 0x00FFFFFF;
+ temp_ptr_cpd->ctxt |=
+ (SET_PD_CTXT(ctxt_cpd));
+ diagfwd_data_process_done(fwd_info,
+ temp_ptr_cpd, len_cpd);
+ } else {
+ if (flag_buf_1)
+ fwd_info->cpd_len_1 = 0;
+ if (flag_buf_2)
+ fwd_info->cpd_len_2 = 0;
+ }
+ return;
+ } else {
+ diagfwd_data_read_done(fwd_info, buf, len);
+ return;
+ }
+end:
+ diag_ws_release();
+ if (temp_ptr_cpd) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "Marking buffer as free p: %d, t: %d, buf_num: %d\n",
+ fwd_info->peripheral, fwd_info->type,
+ GET_BUF_NUM(temp_ptr_cpd->ctxt));
+ diagfwd_write_done(fwd_info->peripheral, fwd_info->type,
+ GET_BUF_NUM(temp_ptr_cpd->ctxt));
+ }
+ diagfwd_queue_read(fwd_info);
+}
+
+static void diagfwd_data_read_done(struct diagfwd_info *fwd_info,
+ unsigned char *buf, int len)
+{
+ int err = 0;
+ int write_len = 0;
+ unsigned char *write_buf = NULL;
+ struct diagfwd_buf_t *temp_buf = NULL;
+ uint8_t hdlc_disabled = 0;
+
+ if (!fwd_info || !buf || len <= 0) {
+ diag_ws_release();
+ return;
+ }
+
+ switch (fwd_info->type) {
+ case TYPE_DATA:
+ case TYPE_CMD:
+ break;
+ default:
+ pr_err_ratelimited("diag: In %s, invalid type %d for peripheral %d\n",
+ __func__, fwd_info->type,
+ fwd_info->peripheral);
+ diag_ws_release();
+ return;
+ }
+
+ mutex_lock(&driver->hdlc_disable_mutex);
+ mutex_lock(&fwd_info->data_mutex);
+
+ hdlc_disabled = driver->p_hdlc_disabled[fwd_info->peripheral];
+
+ if (!driver->feature[fwd_info->peripheral].encode_hdlc) {
+ if (fwd_info->buf_1 && fwd_info->buf_1->data == buf) {
+ temp_buf = fwd_info->buf_1;
+ write_buf = fwd_info->buf_1->data;
+ } else if (fwd_info->buf_2 && fwd_info->buf_2->data == buf) {
+ temp_buf = fwd_info->buf_2;
+ write_buf = fwd_info->buf_2->data;
+ } else {
+ pr_err("diag: In %s, no match for buffer %pK, peripheral %d, type: %d\n",
+ __func__, buf, fwd_info->peripheral,
+ fwd_info->type);
+ goto end;
+ }
+ write_len = len;
+ } else if (hdlc_disabled) {
+ /* The data is raw and and on APPS side HDLC is disabled */
+ if (fwd_info->buf_1 && fwd_info->buf_1->data_raw == buf) {
+ temp_buf = fwd_info->buf_1;
+ } else if (fwd_info->buf_2 &&
+ fwd_info->buf_2->data_raw == buf) {
+ temp_buf = fwd_info->buf_2;
+ } else {
+ pr_err("diag: In %s, no match for non encode buffer %pK, peripheral %d, type: %d\n",
+ __func__, buf, fwd_info->peripheral,
+ fwd_info->type);
+ goto end;
+ }
+ if (len > PERIPHERAL_BUF_SZ) {
+ pr_err("diag: In %s, Incoming buffer too large %d, peripheral %d, type: %d\n",
+ __func__, len, fwd_info->peripheral,
+ fwd_info->type);
+ goto end;
+ }
+ write_len = len;
+ write_buf = buf;
+ } else {
+ if (fwd_info->buf_1 && fwd_info->buf_1->data_raw == buf) {
+ temp_buf = fwd_info->buf_1;
+ } else if (fwd_info->buf_2 &&
+ fwd_info->buf_2->data_raw == buf) {
+ temp_buf = fwd_info->buf_2;
+ } else {
+ pr_err("diag: In %s, no match for non encode buffer %pK, peripheral %d, type: %d\n",
+ __func__, buf, fwd_info->peripheral,
+ fwd_info->type);
+ goto end;
+ }
+ write_len = check_bufsize_for_encoding(temp_buf, len);
+ if (write_len <= 0) {
+ pr_err("diag: error in checking buf for encoding\n");
+ goto end;
+ }
+ write_buf = temp_buf->data;
+ err = diag_add_hdlc_encoding(write_buf, &write_len, buf, len);
+ if (err) {
+ pr_err("diag: error in adding hdlc encoding\n");
+ goto end;
+ }
+ }
+
+ if (write_len > 0) {
+ err = diag_mux_write(DIAG_LOCAL_PROC, write_buf, write_len,
+ temp_buf->ctxt);
+ if (err) {
+ pr_err_ratelimited("diag: In %s, unable to write to mux error: %d\n",
+ __func__, err);
+ goto end;
+ }
+ }
+ mutex_unlock(&fwd_info->data_mutex);
+ mutex_unlock(&driver->hdlc_disable_mutex);
+ diagfwd_queue_read(fwd_info);
+ return;
+
+end:
+ diag_ws_release();
+ mutex_unlock(&fwd_info->data_mutex);
+ mutex_unlock(&driver->hdlc_disable_mutex);
+ if (temp_buf) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "Marking buffer as free p: %d, t: %d, buf_num: %d\n",
+ fwd_info->peripheral, fwd_info->type,
+ GET_BUF_NUM(temp_buf->ctxt));
+ diagfwd_write_done(fwd_info->peripheral, fwd_info->type,
+ GET_BUF_NUM(temp_buf->ctxt));
+ }
+ diagfwd_queue_read(fwd_info);
+ return;
+}
+
+static void diagfwd_cntl_read_done(struct diagfwd_info *fwd_info,
+ unsigned char *buf, int len)
+{
+ if (!fwd_info) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "diag: Invalid fwd_info\n");
+ diag_ws_release();
+ return;
+ }
+
+ if (fwd_info->type != TYPE_CNTL) {
+ pr_err("diag: In %s, invalid type %d for peripheral %d\n",
+ __func__, fwd_info->type, fwd_info->peripheral);
+ diag_ws_release();
+ return;
+ }
+
+ diag_ws_on_read(DIAG_WS_MUX, len);
+ diag_cntl_process_read_data(fwd_info, buf, len);
+ /*
+ * Control packets are not consumed by the clients. Mimic
+ * consumption by setting and clearing the wakeup source copy_count
+ * explicitly.
+ */
+ diag_ws_on_copy_fail(DIAG_WS_MUX);
+ /* Reset the buffer in_busy value after processing the data */
+ if (fwd_info->buf_1) {
+ atomic_set(&fwd_info->buf_1->in_busy, 0);
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "Buffer 1 for core PD is marked free, p: %d, t: %d\n",
+ fwd_info->peripheral, fwd_info->type);
+ }
+
+ diagfwd_queue_read(fwd_info);
+ diagfwd_queue_read(&peripheral_info[TYPE_DATA][fwd_info->peripheral]);
+ diagfwd_queue_read(&peripheral_info[TYPE_CMD][fwd_info->peripheral]);
+}
+
+static void diagfwd_dci_read_done(struct diagfwd_info *fwd_info,
+ unsigned char *buf, int len)
+{
+ if (!fwd_info)
+ return;
+
+ switch (fwd_info->type) {
+ case TYPE_DCI:
+ case TYPE_DCI_CMD:
+ break;
+ default:
+ pr_err("diag: In %s, invalid type %d for peripheral %d\n",
+ __func__, fwd_info->type, fwd_info->peripheral);
+ return;
+ }
+
+ diag_dci_process_peripheral_data(fwd_info, (void *)buf, len);
+ /* Reset the buffer in_busy value after processing the data */
+ if (fwd_info->buf_1) {
+ atomic_set(&fwd_info->buf_1->in_busy, 0);
+ DIAG_LOG(DIAG_DEBUG_DCI,
+ "Buffer 1 for core PD is marked free, p: %d, t: %d\n",
+ fwd_info->peripheral, fwd_info->type);
+ }
+
+ diagfwd_queue_read(fwd_info);
+}
+
+static void diagfwd_reset_buffers(struct diagfwd_info *fwd_info,
+ unsigned char *buf)
+{
+ if (!fwd_info || !buf)
+ return;
+
+ if (!driver->feature[fwd_info->peripheral].encode_hdlc) {
+ if (fwd_info->buf_1 && fwd_info->buf_1->data == buf)
+ atomic_set(&fwd_info->buf_1->in_busy, 0);
+ else if (fwd_info->buf_2 && fwd_info->buf_2->data == buf)
+ atomic_set(&fwd_info->buf_2->in_busy, 0);
+ } else {
+ if (fwd_info->buf_1 && fwd_info->buf_1->data_raw == buf)
+ atomic_set(&fwd_info->buf_1->in_busy, 0);
+ else if (fwd_info->buf_2 && fwd_info->buf_2->data_raw == buf)
+ atomic_set(&fwd_info->buf_2->in_busy, 0);
+ }
+ if (fwd_info->buf_1 && !atomic_read(&(fwd_info->buf_1->in_busy))) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "Buffer 1 for core PD is marked free, p: %d, t: %d\n",
+ fwd_info->peripheral, fwd_info->type);
+ }
+ if (fwd_info->buf_2 && !atomic_read(&(fwd_info->buf_2->in_busy))) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "Buffer 2 for core PD is marked free, p: %d, t: %d\n",
+ fwd_info->peripheral, fwd_info->type);
+ }
+}
+
+int diagfwd_peripheral_init(void)
+{
+ uint8_t peripheral;
+ uint8_t transport;
+ uint8_t type;
+ struct diagfwd_info *fwd_info = NULL;
+
+ for (transport = 0; transport < NUM_TRANSPORT; transport++) {
+ early_init_info[transport] = kzalloc(
+ sizeof(struct diagfwd_info) * NUM_PERIPHERALS,
+ GFP_KERNEL);
+ if (!early_init_info[transport])
+ return -ENOMEM;
+ kmemleak_not_leak(early_init_info[transport]);
+ }
+
+ for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
+ for (transport = 0; transport < NUM_TRANSPORT; transport++) {
+ fwd_info = &early_init_info[transport][peripheral];
+ fwd_info->peripheral = peripheral;
+ fwd_info->type = TYPE_CNTL;
+ fwd_info->transport = transport;
+ fwd_info->ctxt = NULL;
+ fwd_info->p_ops = NULL;
+ fwd_info->ch_open = 0;
+ fwd_info->inited = 1;
+ fwd_info->read_bytes = 0;
+ fwd_info->write_bytes = 0;
+ fwd_info->cpd_len_1 = 0;
+ fwd_info->cpd_len_2 = 0;
+ fwd_info->upd_len_1_a = 0;
+ fwd_info->upd_len_1_b = 0;
+ fwd_info->upd_len_2_a = 0;
+ fwd_info->upd_len_2_a = 0;
+ mutex_init(&fwd_info->buf_mutex);
+ mutex_init(&fwd_info->data_mutex);
+ spin_lock_init(&fwd_info->write_buf_lock);
+ }
+ }
+
+ for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
+ for (type = 0; type < NUM_TYPES; type++) {
+ fwd_info = &peripheral_info[type][peripheral];
+ fwd_info->peripheral = peripheral;
+ fwd_info->type = type;
+ fwd_info->ctxt = NULL;
+ fwd_info->p_ops = NULL;
+ fwd_info->ch_open = 0;
+ fwd_info->read_bytes = 0;
+ fwd_info->write_bytes = 0;
+ fwd_info->cpd_len_1 = 0;
+ fwd_info->cpd_len_2 = 0;
+ fwd_info->upd_len_1_a = 0;
+ fwd_info->upd_len_1_b = 0;
+ fwd_info->upd_len_2_a = 0;
+ fwd_info->upd_len_2_a = 0;
+ spin_lock_init(&fwd_info->write_buf_lock);
+ mutex_init(&fwd_info->buf_mutex);
+ mutex_init(&fwd_info->data_mutex);
+ /*
+ * This state shouldn't be set for Control channels
+ * during initialization. This is set when the feature
+ * mask is received for the first time.
+ */
+ if (type != TYPE_CNTL)
+ fwd_info->inited = 1;
+ }
+ driver->diagfwd_data[peripheral] =
+ &peripheral_info[TYPE_DATA][peripheral];
+ driver->diagfwd_cntl[peripheral] =
+ &peripheral_info[TYPE_CNTL][peripheral];
+ driver->diagfwd_dci[peripheral] =
+ &peripheral_info[TYPE_DCI][peripheral];
+ driver->diagfwd_cmd[peripheral] =
+ &peripheral_info[TYPE_CMD][peripheral];
+ driver->diagfwd_dci_cmd[peripheral] =
+ &peripheral_info[TYPE_DCI_CMD][peripheral];
+ }
+
+ diag_smd_init();
+ if (driver->supports_sockets)
+ diag_socket_init();
+ diag_glink_init();
+
+ return 0;
+}
+
+void diagfwd_peripheral_exit(void)
+{
+ uint8_t peripheral;
+ uint8_t type;
+ struct diagfwd_info *fwd_info = NULL;
+ int transport = 0;
+
+ diag_smd_exit();
+ diag_socket_exit();
+
+ for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
+ for (type = 0; type < NUM_TYPES; type++) {
+ fwd_info = &peripheral_info[type][peripheral];
+ fwd_info->ctxt = NULL;
+ fwd_info->p_ops = NULL;
+ fwd_info->ch_open = 0;
+ diagfwd_buffers_exit(fwd_info);
+ }
+ }
+
+ for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
+ driver->diagfwd_data[peripheral] = NULL;
+ driver->diagfwd_cntl[peripheral] = NULL;
+ driver->diagfwd_dci[peripheral] = NULL;
+ driver->diagfwd_cmd[peripheral] = NULL;
+ driver->diagfwd_dci_cmd[peripheral] = NULL;
+ }
+
+ for (transport = 0; transport < NUM_TRANSPORT; transport++) {
+ kfree(early_init_info[transport]);
+ early_init_info[transport] = NULL;
+ }
+}
+
+int diagfwd_cntl_register(uint8_t transport, uint8_t peripheral, void *ctxt,
+ struct diag_peripheral_ops *ops,
+ struct diagfwd_info **fwd_ctxt)
+{
+ struct diagfwd_info *fwd_info = NULL;
+
+ if (!ctxt || !ops)
+ return -EIO;
+
+ if (transport >= NUM_TRANSPORT || peripheral >= NUM_PERIPHERALS)
+ return -EINVAL;
+
+ fwd_info = &early_init_info[transport][peripheral];
+ *fwd_ctxt = &early_init_info[transport][peripheral];
+ fwd_info->ctxt = ctxt;
+ fwd_info->p_ops = ops;
+ fwd_info->c_ops = &cntl_ch_ops;
+
+ return 0;
+}
+
+int diagfwd_register(uint8_t transport, uint8_t peripheral, uint8_t type,
+ void *ctxt, struct diag_peripheral_ops *ops,
+ struct diagfwd_info **fwd_ctxt)
+{
+ struct diagfwd_info *fwd_info = NULL;
+
+ if (peripheral >= NUM_PERIPHERALS || type >= NUM_TYPES ||
+ !ctxt || !ops || transport >= NUM_TRANSPORT) {
+ pr_err("diag: In %s, returning error\n", __func__);
+ return -EIO;
+ }
+
+ fwd_info = &peripheral_info[type][peripheral];
+ *fwd_ctxt = &peripheral_info[type][peripheral];
+ fwd_info->ctxt = ctxt;
+ fwd_info->p_ops = ops;
+ fwd_info->transport = transport;
+ fwd_info->ch_open = 0;
+
+ switch (type) {
+ case TYPE_DATA:
+ case TYPE_CMD:
+ fwd_info->c_ops = &data_ch_ops;
+ break;
+ case TYPE_DCI:
+ case TYPE_DCI_CMD:
+ fwd_info->c_ops = &dci_ch_ops;
+ break;
+ default:
+ pr_err("diag: In %s, invalid type: %d\n", __func__, type);
+ return -EINVAL;
+ }
+
+ if (atomic_read(&fwd_info->opened) &&
+ fwd_info->p_ops && fwd_info->p_ops->open) {
+ /*
+ * The registration can happen late, like in the case of
+ * sockets. fwd_info->opened reflects diag_state. Propogate the
+ * state to the peipherals.
+ */
+ fwd_info->p_ops->open(fwd_info->ctxt);
+ }
+
+ return 0;
+}
+
+void diagfwd_deregister(uint8_t peripheral, uint8_t type, void *ctxt)
+{
+ struct diagfwd_info *fwd_info = NULL;
+
+ if (peripheral >= NUM_PERIPHERALS || type >= NUM_TYPES || !ctxt)
+ return;
+
+ fwd_info = &peripheral_info[type][peripheral];
+ if (fwd_info->ctxt != ctxt) {
+ pr_err("diag: In %s, unable to find a match for p: %d t: %d\n",
+ __func__, peripheral, type);
+ return;
+ }
+ fwd_info->ctxt = NULL;
+ fwd_info->p_ops = NULL;
+ fwd_info->ch_open = 0;
+ diagfwd_buffers_exit(fwd_info);
+
+ switch (type) {
+ case TYPE_DATA:
+ driver->diagfwd_data[peripheral] = NULL;
+ break;
+ case TYPE_CNTL:
+ driver->diagfwd_cntl[peripheral] = NULL;
+ break;
+ case TYPE_DCI:
+ driver->diagfwd_dci[peripheral] = NULL;
+ break;
+ case TYPE_CMD:
+ driver->diagfwd_cmd[peripheral] = NULL;
+ break;
+ case TYPE_DCI_CMD:
+ driver->diagfwd_dci_cmd[peripheral] = NULL;
+ break;
+ }
+}
+
+void diagfwd_close_transport(uint8_t transport, uint8_t peripheral)
+{
+ struct diagfwd_info *fwd_info = NULL;
+ struct diagfwd_info *dest_info = NULL;
+ int (*init_fn)(uint8_t) = NULL;
+ void (*invalidate_fn)(void *, struct diagfwd_info *) = NULL;
+ int (*check_channel_state)(void *) = NULL;
+ uint8_t transport_open = 0;
+ int i = 0;
+
+ if (peripheral >= NUM_PERIPHERALS)
+ return;
+
+ switch (transport) {
+ case TRANSPORT_SMD:
+ transport_open = TRANSPORT_SOCKET;
+ init_fn = diag_socket_init_peripheral;
+ invalidate_fn = diag_socket_invalidate;
+ check_channel_state = diag_socket_check_state;
+ break;
+ case TRANSPORT_SOCKET:
+ if (peripheral == PERIPHERAL_WDSP) {
+ transport_open = TRANSPORT_GLINK;
+ init_fn = diag_glink_init_peripheral;
+ invalidate_fn = diag_glink_invalidate;
+ check_channel_state = diag_glink_check_state;
+ } else {
+ transport_open = TRANSPORT_SMD;
+ init_fn = diag_smd_init_peripheral;
+ invalidate_fn = diag_smd_invalidate;
+ check_channel_state = diag_smd_check_state;
+ }
+ break;
+ default:
+ return;
+ }
+
+ mutex_lock(&driver->diagfwd_channel_mutex[peripheral]);
+ fwd_info = &early_init_info[transport][peripheral];
+ if (fwd_info->p_ops && fwd_info->p_ops->close)
+ fwd_info->p_ops->close(fwd_info->ctxt);
+ fwd_info = &early_init_info[transport_open][peripheral];
+ dest_info = &peripheral_info[TYPE_CNTL][peripheral];
+ dest_info->inited = 1;
+ dest_info->ctxt = fwd_info->ctxt;
+ dest_info->p_ops = fwd_info->p_ops;
+ dest_info->c_ops = fwd_info->c_ops;
+ dest_info->ch_open = fwd_info->ch_open;
+ dest_info->read_bytes = fwd_info->read_bytes;
+ dest_info->write_bytes = fwd_info->write_bytes;
+ dest_info->inited = fwd_info->inited;
+ dest_info->buf_1 = fwd_info->buf_1;
+ dest_info->buf_2 = fwd_info->buf_2;
+ dest_info->transport = fwd_info->transport;
+ invalidate_fn(dest_info->ctxt, dest_info);
+ for (i = 0; i < NUM_WRITE_BUFFERS; i++)
+ dest_info->buf_ptr[i] = fwd_info->buf_ptr[i];
+ if (!check_channel_state(dest_info->ctxt))
+ diagfwd_late_open(dest_info);
+ diagfwd_cntl_open(dest_info);
+ init_fn(peripheral);
+ mutex_unlock(&driver->diagfwd_channel_mutex[peripheral]);
+ diagfwd_queue_read(&peripheral_info[TYPE_DATA][peripheral]);
+ diagfwd_queue_read(&peripheral_info[TYPE_CMD][peripheral]);
+}
+
+void *diagfwd_request_write_buf(struct diagfwd_info *fwd_info)
+{
+ void *buf = NULL;
+ int index;
+ unsigned long flags;
+
+ if (!fwd_info)
+ return NULL;
+ spin_lock_irqsave(&fwd_info->write_buf_lock, flags);
+ for (index = 0; (index < NUM_WRITE_BUFFERS) && fwd_info->buf_ptr[index];
+ index++) {
+ if (!atomic_read(&(fwd_info->buf_ptr[index]->in_busy))) {
+ atomic_set(&(fwd_info->buf_ptr[index]->in_busy), 1);
+ buf = fwd_info->buf_ptr[index]->data;
+ if (!buf)
+ return NULL;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&fwd_info->write_buf_lock, flags);
+ return buf;
+}
+
+int diagfwd_write(uint8_t peripheral, uint8_t type, void *buf, int len)
+{
+ struct diagfwd_info *fwd_info = NULL;
+ int err = 0;
+ uint8_t retry_count = 0;
+ uint8_t max_retries = 3;
+ void *buf_ptr = NULL;
+ if (peripheral >= NUM_PERIPHERALS || type >= NUM_TYPES)
+ return -EINVAL;
+
+ if (type == TYPE_CMD || type == TYPE_DCI_CMD) {
+ if (!driver->feature[peripheral].rcvd_feature_mask ||
+ !driver->feature[peripheral].sent_feature_mask) {
+ pr_debug_ratelimited("diag: In %s, feature mask for peripheral: %d not received or sent yet\n",
+ __func__, peripheral);
+ return 0;
+ }
+ if (!driver->feature[peripheral].separate_cmd_rsp)
+ type = (type == TYPE_CMD) ? TYPE_DATA : TYPE_DCI;
+ }
+
+ fwd_info = &peripheral_info[type][peripheral];
+ if (!fwd_info->inited || !atomic_read(&fwd_info->opened))
+ return -ENODEV;
+
+ if (!(fwd_info->p_ops && fwd_info->p_ops->write && fwd_info->ctxt))
+ return -EIO;
+
+ if (fwd_info->transport == TRANSPORT_GLINK) {
+ buf_ptr = diagfwd_request_write_buf(fwd_info);
+ if (buf_ptr)
+ memcpy(buf_ptr, buf, len);
+ else {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: buffer not found for writing\n");
+ return -EIO;
+ }
+ } else
+ buf_ptr = buf;
+
+ while (retry_count < max_retries) {
+ err = 0;
+ err = fwd_info->p_ops->write(fwd_info->ctxt, buf_ptr, len);
+ if (err && err != -ENODEV) {
+ usleep_range(100000, 101000);
+ retry_count++;
+ continue;
+ }
+ break;
+ }
+
+ if (!err)
+ fwd_info->write_bytes += len;
+ else
+ if (fwd_info->transport == TRANSPORT_GLINK)
+ diagfwd_write_buffer_done(fwd_info, buf_ptr);
+ return err;
+}
+
+static void __diag_fwd_open(struct diagfwd_info *fwd_info)
+{
+ if (!fwd_info)
+ return;
+
+ atomic_set(&fwd_info->opened, 1);
+ if (!fwd_info->inited)
+ return;
+
+ /*
+ * Logging mode here is reflecting previous mode
+ * status and will be updated to new mode later.
+ *
+ * Keeping the buffers busy for Memory Device Mode.
+ */
+
+ if ((driver->logging_mode != DIAG_USB_MODE) ||
+ driver->usb_connected) {
+ if (fwd_info->buf_1) {
+ atomic_set(&fwd_info->buf_1->in_busy, 0);
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "Buffer 1 for core PD is marked free, p: %d, t: %d\n",
+ fwd_info->peripheral, fwd_info->type);
+ }
+ if (fwd_info->buf_2) {
+ atomic_set(&fwd_info->buf_2->in_busy, 0);
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "Buffer 2 for core PD is marked free, p: %d, t: %d\n",
+ fwd_info->peripheral, fwd_info->type);
+ }
+ }
+
+ if (fwd_info->p_ops && fwd_info->p_ops->open)
+ fwd_info->p_ops->open(fwd_info->ctxt);
+
+ diagfwd_queue_read(fwd_info);
+}
+
+void diagfwd_early_open(uint8_t peripheral)
+{
+ uint8_t transport = 0;
+ struct diagfwd_info *fwd_info = NULL;
+
+ if (peripheral >= NUM_PERIPHERALS)
+ return;
+
+ for (transport = 0; transport < NUM_TRANSPORT; transport++) {
+ fwd_info = &early_init_info[transport][peripheral];
+ __diag_fwd_open(fwd_info);
+ }
+}
+
+void diagfwd_open(uint8_t peripheral, uint8_t type)
+{
+ struct diagfwd_info *fwd_info = NULL;
+
+ if (peripheral >= NUM_PERIPHERALS || type >= NUM_TYPES)
+ return;
+
+ fwd_info = &peripheral_info[type][peripheral];
+ __diag_fwd_open(fwd_info);
+}
+
+void diagfwd_late_open(struct diagfwd_info *fwd_info)
+{
+ __diag_fwd_open(fwd_info);
+}
+
+void diagfwd_close(uint8_t peripheral, uint8_t type)
+{
+ struct diagfwd_info *fwd_info = NULL;
+
+ if (peripheral >= NUM_PERIPHERALS || type >= NUM_TYPES)
+ return;
+
+ fwd_info = &peripheral_info[type][peripheral];
+ atomic_set(&fwd_info->opened, 0);
+ if (!fwd_info->inited)
+ return;
+
+ if (fwd_info->p_ops && fwd_info->p_ops->close)
+ fwd_info->p_ops->close(fwd_info->ctxt);
+
+ if (fwd_info->buf_1)
+ atomic_set(&fwd_info->buf_1->in_busy, 1);
+ /*
+ * Only Data channels have two buffers. Set both the buffers
+ * to busy on close.
+ */
+ if (fwd_info->buf_2)
+ atomic_set(&fwd_info->buf_2->in_busy, 1);
+}
+
+int diagfwd_channel_open(struct diagfwd_info *fwd_info)
+{
+ int i;
+ if (!fwd_info)
+ return -EIO;
+
+ if (!fwd_info->inited) {
+ pr_debug("diag: In %s, channel is not inited, p: %d, t: %d\n",
+ __func__, fwd_info->peripheral, fwd_info->type);
+ return -EINVAL;
+ }
+
+ if (fwd_info->ch_open) {
+ pr_debug("diag: In %s, channel is already open, p: %d, t: %d\n",
+ __func__, fwd_info->peripheral, fwd_info->type);
+ return 0;
+ }
+ mutex_lock(&driver->diagfwd_channel_mutex[fwd_info->peripheral]);
+ fwd_info->ch_open = 1;
+ diagfwd_buffers_init(fwd_info);
+
+ /*
+ * Initialize buffers for glink supported
+ * peripherals only.
+ */
+ if (fwd_info->transport == TRANSPORT_GLINK)
+ diagfwd_write_buffers_init(fwd_info);
+
+ if (fwd_info && fwd_info->c_ops && fwd_info->c_ops->open)
+ fwd_info->c_ops->open(fwd_info);
+ for (i = 0; i < NUM_WRITE_BUFFERS; i++) {
+ if (fwd_info->buf_ptr[i])
+ atomic_set(&fwd_info->buf_ptr[i]->in_busy, 0);
+ }
+ diagfwd_queue_read(fwd_info);
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "p: %d t: %d considered opened\n",
+ fwd_info->peripheral, fwd_info->type);
+
+ if (atomic_read(&fwd_info->opened)) {
+ if (fwd_info->p_ops && fwd_info->p_ops->open)
+ fwd_info->p_ops->open(fwd_info->ctxt);
+ }
+ mutex_unlock(&driver->diagfwd_channel_mutex[fwd_info->peripheral]);
+ return 0;
+}
+
+int diagfwd_channel_close(struct diagfwd_info *fwd_info)
+{
+ int i;
+ if (!fwd_info)
+ return -EIO;
+
+ if (fwd_info->type == TYPE_CNTL)
+ flush_workqueue(driver->cntl_wq);
+
+ mutex_lock(&driver->diagfwd_channel_mutex[fwd_info->peripheral]);
+ fwd_info->ch_open = 0;
+ if (fwd_info && fwd_info->c_ops && fwd_info->c_ops->close)
+ fwd_info->c_ops->close(fwd_info);
+
+ if (fwd_info->buf_1 && fwd_info->buf_1->data) {
+ atomic_set(&fwd_info->buf_1->in_busy, 0);
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "Buffer 1 for core PD is marked free, p: %d, t: %d\n",
+ fwd_info->peripheral, fwd_info->type);
+ }
+ if (fwd_info->buf_2 && fwd_info->buf_2->data) {
+ atomic_set(&fwd_info->buf_2->in_busy, 0);
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "Buffer 2 for core PD is marked free, p: %d, t: %d\n",
+ fwd_info->peripheral, fwd_info->type);
+ }
+
+ for (i = 0; i < NUM_WRITE_BUFFERS; i++) {
+ if (fwd_info->buf_ptr[i])
+ atomic_set(&fwd_info->buf_ptr[i]->in_busy, 1);
+ }
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "p: %d t: %d considered closed\n",
+ fwd_info->peripheral, fwd_info->type);
+ mutex_unlock(&driver->diagfwd_channel_mutex[fwd_info->peripheral]);
+ return 0;
+}
+
+int diagfwd_channel_read_done(struct diagfwd_info *fwd_info,
+ unsigned char *buf, uint32_t len)
+{
+ if (!fwd_info) {
+ diag_ws_release();
+ return -EIO;
+ }
+
+ /*
+ * Diag peripheral layers should send len as 0 if there is any error
+ * in reading data from the transport. Use this information to reset the
+ * in_busy flags. No need to queue read in this case.
+ */
+ if (len == 0) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "Read Length is 0, resetting the diag buffers p: %d, t: %d\n",
+ fwd_info->peripheral, fwd_info->type);
+ diagfwd_reset_buffers(fwd_info, buf);
+ diag_ws_release();
+ return 0;
+ }
+
+ if (fwd_info && fwd_info->c_ops && fwd_info->c_ops->read_done)
+ fwd_info->c_ops->read_done(fwd_info, buf, len);
+ fwd_info->read_bytes += len;
+
+ return 0;
+}
+
+void diagfwd_write_done(uint8_t peripheral, uint8_t type, int buf_num)
+{
+ struct diagfwd_info *fwd_info = NULL;
+
+ if (peripheral >= NUM_PERIPHERALS || type >= NUM_TYPES)
+ return;
+
+ fwd_info = &peripheral_info[type][peripheral];
+ if (!fwd_info)
+ return;
+
+ if (buf_num == 1 && fwd_info->buf_1) {
+ /* Buffer 1 for core PD is freed */
+ fwd_info->cpd_len_1 = 0;
+
+ if (peripheral == PERIPHERAL_LPASS) {
+ if (!fwd_info->upd_len_1_a &&
+ !fwd_info->upd_len_2_a)
+ atomic_set(&fwd_info->buf_1->in_busy, 0);
+ } else if (peripheral == PERIPHERAL_MODEM) {
+ if (!fwd_info->upd_len_1_a)
+ atomic_set(&fwd_info->buf_1->in_busy, 0);
+ } else {
+ atomic_set(&fwd_info->buf_1->in_busy, 0);
+ }
+ if (!atomic_read(&(fwd_info->buf_1->in_busy))) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "Buffer 1 for core PD is marked free, p: %d, t: %d, buf_num: %d\n",
+ fwd_info->peripheral, fwd_info->type, buf_num);
+ }
+ } else if (buf_num == 2 && fwd_info->buf_2) {
+ /* Buffer 2 for core PD is freed */
+ fwd_info->cpd_len_2 = 0;
+
+ if (peripheral == PERIPHERAL_LPASS) {
+ if (!fwd_info->upd_len_1_b &&
+ !fwd_info->upd_len_2_b)
+ atomic_set(&fwd_info->buf_2->in_busy, 0);
+ } else if (peripheral == PERIPHERAL_MODEM) {
+ if (!fwd_info->upd_len_1_b)
+ atomic_set(&fwd_info->buf_2->in_busy, 0);
+ } else {
+ atomic_set(&fwd_info->buf_2->in_busy, 0);
+ }
+ if (!atomic_read(&(fwd_info->buf_2->in_busy))) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "Buffer 2 for core PD is marked free, p: %d, t: %d, buf_num: %d\n",
+ fwd_info->peripheral, fwd_info->type, buf_num);
+ }
+ } else if (buf_num == 3 && fwd_info->buf_upd_1_a && fwd_info->buf_1) {
+ /* Buffer 1 for user pd 1 is freed */
+ atomic_set(&fwd_info->buf_upd_1_a->in_busy, 0);
+
+ if (peripheral == PERIPHERAL_LPASS) {
+ /* if not data in cpd and other user pd
+ * free the core pd buffer for LPASS
+ */
+ if (!fwd_info->cpd_len_1 &&
+ !fwd_info->upd_len_2_a)
+ atomic_set(&fwd_info->buf_1->in_busy, 0);
+ } else {
+ /* if not data in cpd
+ * free the core pd buffer for MPSS
+ */
+ if (!fwd_info->cpd_len_1)
+ atomic_set(&fwd_info->buf_1->in_busy, 0);
+ }
+ if (!atomic_read(&(fwd_info->buf_1->in_busy))) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "Buffer 1 for core PD is marked free, p: %d, t: %d, buf_num: %d\n",
+ fwd_info->peripheral, fwd_info->type, buf_num);
+ }
+ fwd_info->upd_len_1_a = 0;
+
+ } else if (buf_num == 4 && fwd_info->buf_upd_1_b && fwd_info->buf_2) {
+ /* Buffer 2 for user pd 1 is freed */
+ atomic_set(&fwd_info->buf_upd_1_b->in_busy, 0);
+ if (peripheral == PERIPHERAL_LPASS) {
+ /* if not data in cpd and other user pd
+ * free the core pd buffer for LPASS
+ */
+ if (!fwd_info->cpd_len_2 &&
+ !fwd_info->upd_len_2_b)
+ atomic_set(&fwd_info->buf_2->in_busy, 0);
+ } else {
+ /* if not data in cpd
+ * free the core pd buffer for MPSS
+ */
+ if (!fwd_info->cpd_len_2)
+ atomic_set(&fwd_info->buf_2->in_busy, 0);
+ }
+ if (!atomic_read(&(fwd_info->buf_2->in_busy))) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "Buffer 2 for core PD is marked free, p: %d, t: %d, buf_num: %d\n",
+ fwd_info->peripheral, fwd_info->type, buf_num);
+ }
+ fwd_info->upd_len_1_b = 0;
+
+ } else if (buf_num == 5 && fwd_info->buf_upd_2_a && fwd_info->buf_1) {
+ /* Buffer 1 for user pd 2 is freed */
+ atomic_set(&fwd_info->buf_upd_2_a->in_busy, 0);
+ /* if not data in cpd and other user pd
+ * free the core pd buffer for LPASS
+ */
+ if (!fwd_info->cpd_len_1 &&
+ !fwd_info->upd_len_1_a) {
+ atomic_set(&fwd_info->buf_1->in_busy, 0);
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "Buffer 1 for core PD is marked free, p: %d, t: %d, buf_num: %d\n",
+ fwd_info->peripheral, fwd_info->type, buf_num);
+ }
+
+ fwd_info->upd_len_2_a = 0;
+
+ } else if (buf_num == 6 && fwd_info->buf_upd_2_b && fwd_info->buf_2) {
+ /* Buffer 2 for user pd 2 is freed */
+ atomic_set(&fwd_info->buf_upd_2_b->in_busy, 0);
+ /* if not data in cpd and other user pd
+ * free the core pd buffer for LPASS
+ */
+ if (!fwd_info->cpd_len_2 &&
+ !fwd_info->upd_len_1_b) {
+ atomic_set(&fwd_info->buf_2->in_busy, 0);
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "Buffer 2 for core PD is marked free, p: %d, t: %d, buf_num: %d\n",
+ fwd_info->peripheral, fwd_info->type, buf_num);
+ }
+ fwd_info->upd_len_2_b = 0;
+
+ } else
+ pr_err("diag: In %s, invalid buf_num: %d\n", __func__, buf_num);
+
+ diagfwd_queue_read(fwd_info);
+}
+
+int diagfwd_write_buffer_done(struct diagfwd_info *fwd_info, const void *ptr)
+{
+
+ int found = 0;
+ int index = 0;
+ unsigned long flags;
+
+ if (!fwd_info || !ptr)
+ return found;
+ spin_lock_irqsave(&fwd_info->write_buf_lock, flags);
+ for (index = 0; (index < NUM_WRITE_BUFFERS) && fwd_info->buf_ptr[index];
+ index++) {
+ if (fwd_info->buf_ptr[index]->data == ptr) {
+ atomic_set(&fwd_info->buf_ptr[index]->in_busy, 0);
+ found = 1;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&fwd_info->write_buf_lock, flags);
+ return found;
+}
+
+void diagfwd_channel_read(struct diagfwd_info *fwd_info)
+{
+ int err = 0;
+ uint32_t read_len = 0;
+ unsigned char *read_buf = NULL;
+ struct diagfwd_buf_t *temp_buf = NULL;
+
+ if (!fwd_info) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "diag: Invalid fwd_info\n");
+ diag_ws_release();
+ return;
+ }
+
+ if (!fwd_info->inited || !atomic_read(&fwd_info->opened)) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: p: %d, t: %d, inited: %d, opened: %d, ch_open: %d\n",
+ fwd_info->peripheral, fwd_info->type,
+ fwd_info->inited, atomic_read(&fwd_info->opened),
+ fwd_info->ch_open);
+ diag_ws_release();
+ return;
+ }
+
+ if (fwd_info->buf_1 && !atomic_read(&fwd_info->buf_1->in_busy)) {
+ if (driver->feature[fwd_info->peripheral].encode_hdlc &&
+ (fwd_info->type == TYPE_DATA ||
+ fwd_info->type == TYPE_CMD)) {
+ read_buf = fwd_info->buf_1->data_raw;
+ read_len = fwd_info->buf_1->len_raw;
+ } else {
+ read_buf = fwd_info->buf_1->data;
+ read_len = fwd_info->buf_1->len;
+ }
+ if (read_buf) {
+ temp_buf = fwd_info->buf_1;
+ atomic_set(&temp_buf->in_busy, 1);
+ }
+ } else if (fwd_info->buf_2 && !atomic_read(&fwd_info->buf_2->in_busy)) {
+ if (driver->feature[fwd_info->peripheral].encode_hdlc &&
+ (fwd_info->type == TYPE_DATA ||
+ fwd_info->type == TYPE_CMD)) {
+ read_buf = fwd_info->buf_2->data_raw;
+ read_len = fwd_info->buf_2->len_raw;
+ } else {
+ read_buf = fwd_info->buf_2->data;
+ read_len = fwd_info->buf_2->len;
+ }
+ if (read_buf) {
+ temp_buf = fwd_info->buf_2;
+ atomic_set(&temp_buf->in_busy, 1);
+ }
+ } else {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: both buffers are busy for p: %d, t: %d\n",
+ fwd_info->peripheral, fwd_info->type);
+ }
+
+ if (!read_buf) {
+ diag_ws_release();
+ return;
+ }
+
+ if (!(fwd_info->p_ops && fwd_info->p_ops->read && fwd_info->ctxt))
+ goto fail_return;
+
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "issued a read p: %d t: %d buf: %pK\n",
+ fwd_info->peripheral, fwd_info->type, read_buf);
+ err = fwd_info->p_ops->read(fwd_info->ctxt, read_buf, read_len);
+ if (err)
+ goto fail_return;
+
+ return;
+
+fail_return:
+ diag_ws_release();
+ atomic_set(&temp_buf->in_busy, 0);
+ return;
+}
+
+static void diagfwd_queue_read(struct diagfwd_info *fwd_info)
+{
+ if (!fwd_info)
+ return;
+
+ if (!fwd_info->inited || !atomic_read(&fwd_info->opened)) {
+ pr_debug("diag: In %s, p: %d, t: %d, inited: %d, opened: %d ch_open: %d\n",
+ __func__, fwd_info->peripheral, fwd_info->type,
+ fwd_info->inited, atomic_read(&fwd_info->opened),
+ fwd_info->ch_open);
+ return;
+ }
+
+ /*
+ * Don't queue a read on the data and command channels before receiving
+ * the feature mask from the peripheral. We won't know which buffer to
+ * use - HDLC or non HDLC buffer for reading.
+ */
+ if ((!driver->feature[fwd_info->peripheral].rcvd_feature_mask) &&
+ (fwd_info->type != TYPE_CNTL)) {
+ return;
+ }
+
+ if (fwd_info->p_ops && fwd_info->p_ops->queue_read && fwd_info->ctxt)
+ fwd_info->p_ops->queue_read(fwd_info->ctxt);
+}
+
+void diagfwd_buffers_init(struct diagfwd_info *fwd_info)
+{
+ struct diagfwd_buf_t *temp_fwd_buf;
+ unsigned char *temp_char_buf;
+
+ if (!fwd_info)
+ return;
+
+ if (!fwd_info->inited) {
+ pr_err("diag: In %s, channel not inited, p: %d, t: %d\n",
+ __func__, fwd_info->peripheral, fwd_info->type);
+ return;
+ }
+
+ mutex_lock(&fwd_info->buf_mutex);
+
+ if (!fwd_info->buf_1) {
+ fwd_info->buf_1 = kzalloc(sizeof(struct diagfwd_buf_t),
+ GFP_KERNEL);
+ if (ZERO_OR_NULL_PTR(fwd_info->buf_1))
+ goto err;
+ kmemleak_not_leak(fwd_info->buf_1);
+ }
+
+ if (!fwd_info->buf_1->data) {
+ fwd_info->buf_1->data = kzalloc(PERIPHERAL_BUF_SZ +
+ APF_DIAG_PADDING,
+ GFP_KERNEL);
+ if (ZERO_OR_NULL_PTR(fwd_info->buf_1->data))
+ goto err;
+ fwd_info->buf_1->len = PERIPHERAL_BUF_SZ;
+ kmemleak_not_leak(fwd_info->buf_1->data);
+ fwd_info->buf_1->ctxt = SET_BUF_CTXT(fwd_info->peripheral,
+ fwd_info->type, 1);
+ }
+
+ if (fwd_info->type == TYPE_DATA) {
+ if (!fwd_info->buf_2) {
+ fwd_info->buf_2 = kzalloc(sizeof(struct diagfwd_buf_t),
+ GFP_KERNEL);
+ if (ZERO_OR_NULL_PTR(fwd_info->buf_2))
+ goto err;
+ kmemleak_not_leak(fwd_info->buf_2);
+ }
+
+ if (!fwd_info->buf_2->data) {
+ fwd_info->buf_2->data = kzalloc(PERIPHERAL_BUF_SZ +
+ APF_DIAG_PADDING,
+ GFP_KERNEL);
+ if (ZERO_OR_NULL_PTR(fwd_info->buf_2->data))
+ goto err;
+ fwd_info->buf_2->len = PERIPHERAL_BUF_SZ;
+ kmemleak_not_leak(fwd_info->buf_2->data);
+ fwd_info->buf_2->ctxt = SET_BUF_CTXT(
+ fwd_info->peripheral,
+ fwd_info->type, 2);
+ }
+
+ if (driver->feature[fwd_info->peripheral].untag_header) {
+ if (!fwd_info->buf_upd_1_a) {
+ fwd_info->buf_upd_1_a =
+ kzalloc(sizeof(struct diagfwd_buf_t),
+ GFP_KERNEL);
+ if (ZERO_OR_NULL_PTR(fwd_info->buf_upd_1_a))
+ goto err;
+ kmemleak_not_leak(fwd_info->buf_upd_1_a);
+ }
+
+ if (fwd_info->buf_upd_1_a &&
+ !fwd_info->buf_upd_1_a->data) {
+ fwd_info->buf_upd_1_a->data =
+ kzalloc(PERIPHERAL_BUF_SZ +
+ APF_DIAG_PADDING,
+ GFP_KERNEL);
+ temp_char_buf = fwd_info->buf_upd_1_a->data;
+ if (ZERO_OR_NULL_PTR(temp_char_buf))
+ goto err;
+ fwd_info->buf_upd_1_a->len = PERIPHERAL_BUF_SZ;
+ kmemleak_not_leak(temp_char_buf);
+ fwd_info->buf_upd_1_a->ctxt = SET_BUF_CTXT(
+ fwd_info->peripheral,
+ fwd_info->type, 3);
+ }
+
+ if (!fwd_info->buf_upd_1_b) {
+ fwd_info->buf_upd_1_b =
+ kzalloc(sizeof(struct diagfwd_buf_t),
+ GFP_KERNEL);
+ if (ZERO_OR_NULL_PTR(fwd_info->buf_upd_1_b))
+ goto err;
+ kmemleak_not_leak(fwd_info->buf_upd_1_b);
+ }
+
+ if (fwd_info->buf_upd_1_b &&
+ !fwd_info->buf_upd_1_b->data) {
+ fwd_info->buf_upd_1_b->data =
+ kzalloc(PERIPHERAL_BUF_SZ +
+ APF_DIAG_PADDING,
+ GFP_KERNEL);
+ temp_char_buf =
+ fwd_info->buf_upd_1_b->data;
+ if (ZERO_OR_NULL_PTR(temp_char_buf))
+ goto err;
+ fwd_info->buf_upd_1_b->len =
+ PERIPHERAL_BUF_SZ;
+ kmemleak_not_leak(temp_char_buf);
+ fwd_info->buf_upd_1_b->ctxt = SET_BUF_CTXT(
+ fwd_info->peripheral,
+ fwd_info->type, 4);
+ }
+ if (fwd_info->peripheral ==
+ PERIPHERAL_LPASS) {
+ if (!fwd_info->buf_upd_2_a) {
+ fwd_info->buf_upd_2_a =
+ kzalloc(sizeof(struct diagfwd_buf_t),
+ GFP_KERNEL);
+ temp_fwd_buf =
+ fwd_info->buf_upd_2_a;
+ if (ZERO_OR_NULL_PTR(temp_fwd_buf))
+ goto err;
+ kmemleak_not_leak(temp_fwd_buf);
+ }
+
+ if (!fwd_info->buf_upd_2_a->data) {
+ fwd_info->buf_upd_2_a->data =
+ kzalloc(PERIPHERAL_BUF_SZ +
+ APF_DIAG_PADDING,
+ GFP_KERNEL);
+ temp_char_buf =
+ fwd_info->buf_upd_2_a->data;
+ if (ZERO_OR_NULL_PTR(temp_char_buf))
+ goto err;
+ fwd_info->buf_upd_2_a->len =
+ PERIPHERAL_BUF_SZ;
+ kmemleak_not_leak(temp_char_buf);
+ fwd_info->buf_upd_2_a->ctxt =
+ SET_BUF_CTXT(
+ fwd_info->peripheral,
+ fwd_info->type, 5);
+ }
+ if (!fwd_info->buf_upd_2_b) {
+ fwd_info->buf_upd_2_b =
+ kzalloc(sizeof(struct diagfwd_buf_t),
+ GFP_KERNEL);
+ temp_fwd_buf =
+ fwd_info->buf_upd_2_b;
+ if (ZERO_OR_NULL_PTR(temp_fwd_buf))
+ goto err;
+ kmemleak_not_leak(temp_fwd_buf);
+ }
+
+ if (!fwd_info->buf_upd_2_b->data) {
+ fwd_info->buf_upd_2_b->data =
+ kzalloc(PERIPHERAL_BUF_SZ +
+ APF_DIAG_PADDING,
+ GFP_KERNEL);
+ temp_char_buf =
+ fwd_info->buf_upd_2_b->data;
+ if (ZERO_OR_NULL_PTR(temp_char_buf))
+ goto err;
+ fwd_info->buf_upd_2_b->len =
+ PERIPHERAL_BUF_SZ;
+ kmemleak_not_leak(temp_char_buf);
+ fwd_info->buf_upd_2_b->ctxt =
+ SET_BUF_CTXT(
+ fwd_info->peripheral,
+ fwd_info->type, 6);
+ }
+ }
+ }
+
+ if (driver->supports_apps_hdlc_encoding) {
+ /* In support of hdlc encoding */
+ if (!fwd_info->buf_1->data_raw) {
+ fwd_info->buf_1->data_raw =
+ kzalloc(PERIPHERAL_BUF_SZ +
+ APF_DIAG_PADDING,
+ GFP_KERNEL);
+ temp_char_buf =
+ fwd_info->buf_1->data_raw;
+ if (ZERO_OR_NULL_PTR(temp_char_buf))
+ goto err;
+ fwd_info->buf_1->len_raw =
+ PERIPHERAL_BUF_SZ;
+ kmemleak_not_leak(temp_char_buf);
+ }
+
+ if (!fwd_info->buf_2->data_raw) {
+ fwd_info->buf_2->data_raw =
+ kzalloc(PERIPHERAL_BUF_SZ +
+ APF_DIAG_PADDING,
+ GFP_KERNEL);
+ temp_char_buf =
+ fwd_info->buf_2->data_raw;
+ if (ZERO_OR_NULL_PTR(temp_char_buf))
+ goto err;
+ fwd_info->buf_2->len_raw =
+ PERIPHERAL_BUF_SZ;
+ kmemleak_not_leak(temp_char_buf);
+ }
+
+ if (driver->feature[fwd_info->peripheral].
+ untag_header) {
+ if (fwd_info->buf_upd_1_a &&
+ !fwd_info->buf_upd_1_a->data_raw) {
+ fwd_info->buf_upd_1_a->data_raw =
+ kzalloc(PERIPHERAL_BUF_SZ +
+ APF_DIAG_PADDING,
+ GFP_KERNEL);
+ temp_char_buf =
+ fwd_info->buf_upd_1_a->data_raw;
+ if (ZERO_OR_NULL_PTR(temp_char_buf))
+ goto err;
+ fwd_info->buf_upd_1_a->len_raw =
+ PERIPHERAL_BUF_SZ;
+ kmemleak_not_leak(temp_char_buf);
+ }
+
+ if (fwd_info->buf_upd_1_b &&
+ !fwd_info->buf_upd_1_b->data_raw) {
+ fwd_info->buf_upd_1_b->data_raw =
+ kzalloc(PERIPHERAL_BUF_SZ +
+ APF_DIAG_PADDING,
+ GFP_KERNEL);
+ temp_char_buf =
+ fwd_info->buf_upd_1_b->data_raw;
+ if (ZERO_OR_NULL_PTR(temp_char_buf))
+ goto err;
+ fwd_info->buf_upd_1_b->len_raw =
+ PERIPHERAL_BUF_SZ;
+ kmemleak_not_leak(temp_char_buf);
+ }
+ if (fwd_info->peripheral == PERIPHERAL_LPASS
+ && !fwd_info->buf_upd_2_a->data_raw) {
+ fwd_info->buf_upd_2_a->data_raw =
+ kzalloc(PERIPHERAL_BUF_SZ +
+ APF_DIAG_PADDING,
+ GFP_KERNEL);
+ temp_char_buf =
+ fwd_info->buf_upd_2_a->data_raw;
+ if (ZERO_OR_NULL_PTR(temp_char_buf))
+ goto err;
+ fwd_info->buf_upd_2_a->len_raw =
+ PERIPHERAL_BUF_SZ;
+ kmemleak_not_leak(temp_char_buf);
+ }
+ if (fwd_info->peripheral == PERIPHERAL_LPASS
+ && !fwd_info->buf_upd_2_b->data_raw) {
+ fwd_info->buf_upd_2_b->data_raw =
+ kzalloc(PERIPHERAL_BUF_SZ +
+ APF_DIAG_PADDING,
+ GFP_KERNEL);
+ temp_char_buf =
+ fwd_info->buf_upd_2_b->data_raw;
+ if (ZERO_OR_NULL_PTR(temp_char_buf))
+ goto err;
+ fwd_info->buf_upd_2_b->len_raw =
+ PERIPHERAL_BUF_SZ;
+ kmemleak_not_leak(temp_char_buf);
+ }
+ }
+ }
+ }
+
+ if (fwd_info->type == TYPE_CMD &&
+ driver->supports_apps_hdlc_encoding) {
+ /* In support of hdlc encoding */
+ if (!fwd_info->buf_1->data_raw) {
+ fwd_info->buf_1->data_raw = kzalloc(PERIPHERAL_BUF_SZ +
+ APF_DIAG_PADDING,
+ GFP_KERNEL);
+ temp_char_buf =
+ fwd_info->buf_1->data_raw;
+ if (ZERO_OR_NULL_PTR(temp_char_buf))
+ goto err;
+ fwd_info->buf_1->len_raw = PERIPHERAL_BUF_SZ;
+ kmemleak_not_leak(temp_char_buf);
+ }
+ }
+
+ mutex_unlock(&fwd_info->buf_mutex);
+ return;
+
+err:
+ mutex_unlock(&fwd_info->buf_mutex);
+ diagfwd_buffers_exit(fwd_info);
+
+ return;
+}
+
+static void diagfwd_buffers_exit(struct diagfwd_info *fwd_info)
+{
+
+ if (!fwd_info)
+ return;
+
+ mutex_lock(&fwd_info->buf_mutex);
+ if (fwd_info->buf_1) {
+ kfree(fwd_info->buf_1->data);
+ fwd_info->buf_1->data = NULL;
+ kfree(fwd_info->buf_1->data_raw);
+ fwd_info->buf_1->data_raw = NULL;
+ kfree(fwd_info->buf_1);
+ fwd_info->buf_1 = NULL;
+ }
+ if (fwd_info->buf_2) {
+ kfree(fwd_info->buf_2->data);
+ fwd_info->buf_2->data = NULL;
+ kfree(fwd_info->buf_2->data_raw);
+ fwd_info->buf_2->data_raw = NULL;
+ kfree(fwd_info->buf_2);
+ fwd_info->buf_2 = NULL;
+ }
+ if (fwd_info->buf_upd_1_a) {
+ kfree(fwd_info->buf_upd_1_a->data);
+ fwd_info->buf_upd_1_a->data = NULL;
+ kfree(fwd_info->buf_upd_1_a->data_raw);
+ fwd_info->buf_upd_1_a->data_raw = NULL;
+ kfree(fwd_info->buf_upd_1_a);
+ fwd_info->buf_upd_1_a = NULL;
+ }
+ if (fwd_info->buf_upd_1_b) {
+ kfree(fwd_info->buf_upd_1_b->data);
+ fwd_info->buf_upd_1_b->data = NULL;
+ kfree(fwd_info->buf_upd_1_b->data_raw);
+ fwd_info->buf_upd_1_b->data_raw = NULL;
+ kfree(fwd_info->buf_upd_1_b);
+ fwd_info->buf_upd_1_b = NULL;
+ }
+ if (fwd_info->buf_upd_2_a) {
+ kfree(fwd_info->buf_upd_2_a->data);
+ fwd_info->buf_upd_2_a->data = NULL;
+ kfree(fwd_info->buf_upd_2_a->data_raw);
+ fwd_info->buf_upd_2_a->data_raw = NULL;
+ kfree(fwd_info->buf_upd_2_a);
+ fwd_info->buf_upd_2_a = NULL;
+ }
+ if (fwd_info->buf_upd_2_b) {
+ kfree(fwd_info->buf_upd_2_b->data);
+ fwd_info->buf_upd_2_b->data = NULL;
+ kfree(fwd_info->buf_upd_2_b->data_raw);
+ fwd_info->buf_upd_2_b->data_raw = NULL;
+ kfree(fwd_info->buf_upd_2_b);
+ fwd_info->buf_upd_2_b = NULL;
+ }
+ mutex_unlock(&fwd_info->buf_mutex);
+}
+
+void diagfwd_write_buffers_init(struct diagfwd_info *fwd_info)
+{
+ unsigned long flags;
+ int i;
+
+ if (!fwd_info)
+ return;
+
+ if (!fwd_info->inited) {
+ pr_err("diag: In %s, channel not inited, p: %d, t: %d\n",
+ __func__, fwd_info->peripheral, fwd_info->type);
+ return;
+ }
+
+ spin_lock_irqsave(&fwd_info->write_buf_lock, flags);
+ for (i = 0; i < NUM_WRITE_BUFFERS; i++) {
+ if (!fwd_info->buf_ptr[i])
+ fwd_info->buf_ptr[i] =
+ kzalloc(sizeof(struct diagfwd_buf_t),
+ GFP_ATOMIC);
+ if (!fwd_info->buf_ptr[i])
+ goto err;
+ kmemleak_not_leak(fwd_info->buf_ptr[i]);
+ if (!fwd_info->buf_ptr[i]->data) {
+ fwd_info->buf_ptr[i]->data = kzalloc(PERIPHERAL_BUF_SZ,
+ GFP_ATOMIC);
+ if (!fwd_info->buf_ptr[i]->data)
+ goto err;
+ fwd_info->buf_ptr[i]->len = PERIPHERAL_BUF_SZ;
+ kmemleak_not_leak(fwd_info->buf_ptr[i]->data);
+ }
+ }
+ spin_unlock_irqrestore(&fwd_info->write_buf_lock, flags);
+ return;
+
+err:
+ spin_unlock_irqrestore(&fwd_info->write_buf_lock, flags);
+ pr_err("diag:unable to allocate write buffers\n");
+ diagfwd_write_buffers_exit(fwd_info);
+
+}
+
+static void diagfwd_write_buffers_exit(struct diagfwd_info *fwd_info)
+{
+ unsigned long flags;
+ int i;
+
+ if (!fwd_info)
+ return;
+
+ spin_lock_irqsave(&fwd_info->write_buf_lock, flags);
+ for (i = 0; i < NUM_WRITE_BUFFERS; i++) {
+ if (fwd_info->buf_ptr[i]) {
+ kfree(fwd_info->buf_ptr[i]->data);
+ fwd_info->buf_ptr[i]->data = NULL;
+ kfree(fwd_info->buf_ptr[i]);
+ fwd_info->buf_ptr[i] = NULL;
+ }
+ }
+ spin_unlock_irqrestore(&fwd_info->write_buf_lock, flags);
+}
diff --git a/drivers/char/diag/diagfwd_peripheral.h b/drivers/char/diag/diagfwd_peripheral.h
new file mode 100644
index 000000000000..00621c178906
--- /dev/null
+++ b/drivers/char/diag/diagfwd_peripheral.h
@@ -0,0 +1,133 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DIAGFWD_PERIPHERAL_H
+#define DIAGFWD_PERIPHERAL_H
+
+#define PERIPHERAL_BUF_SZ 16384
+#define MAX_PERIPHERAL_BUF_SZ 32768
+#define MAX_PERIPHERAL_HDLC_BUF_SZ 65539
+
+#define TRANSPORT_UNKNOWN -1
+#define TRANSPORT_SMD 0
+#define TRANSPORT_SOCKET 1
+#define TRANSPORT_GLINK 2
+#define NUM_TRANSPORT 3
+#define NUM_WRITE_BUFFERS 2
+#define PERIPHERAL_MASK(x) \
+ ((x == PERIPHERAL_MODEM) ? DIAG_CON_MPSS : \
+ ((x == PERIPHERAL_LPASS) ? DIAG_CON_LPASS : \
+ ((x == PERIPHERAL_WCNSS) ? DIAG_CON_WCNSS : \
+ ((x == PERIPHERAL_SENSORS) ? DIAG_CON_SENSORS : \
+ ((x == PERIPHERAL_WDSP) ? DIAG_CON_WDSP : \
+ ((x == PERIPHERAL_CDSP) ? DIAG_CON_CDSP : 0)))))) \
+
+#define PERIPHERAL_STRING(x) \
+ ((x == PERIPHERAL_MODEM) ? "MODEM" : \
+ ((x == PERIPHERAL_LPASS) ? "LPASS" : \
+ ((x == PERIPHERAL_WCNSS) ? "WCNSS" : \
+ ((x == PERIPHERAL_SENSORS) ? "SENSORS" : \
+ ((x == PERIPHERAL_WDSP) ? "WDSP" : \
+ ((x == PERIPHERAL_CDSP) ? "CDSP" : "UNKNOWN")))))) \
+
+struct diagfwd_buf_t {
+ unsigned char *data;
+ unsigned char *data_raw;
+ uint32_t len;
+ uint32_t len_raw;
+ atomic_t in_busy;
+ int ctxt;
+};
+
+struct diag_channel_ops {
+ void (*open)(struct diagfwd_info *fwd_info);
+ void (*close)(struct diagfwd_info *fwd_info);
+ void (*read_done)(struct diagfwd_info *fwd_info,
+ unsigned char *buf, int len);
+};
+
+struct diag_peripheral_ops {
+ void (*open)(void *ctxt);
+ void (*close)(void *ctxt);
+ int (*write)(void *ctxt, unsigned char *buf, int len);
+ int (*read)(void *ctxt, unsigned char *buf, int len);
+ void (*queue_read)(void *ctxt);
+};
+
+struct diagfwd_info {
+ uint8_t peripheral;
+ uint8_t type;
+ uint8_t transport;
+ uint8_t inited;
+ uint8_t ch_open;
+ atomic_t opened;
+ unsigned long read_bytes;
+ unsigned long write_bytes;
+ spinlock_t write_buf_lock;
+ struct mutex buf_mutex;
+ struct mutex data_mutex;
+ void *ctxt;
+ struct diagfwd_buf_t *buf_1;
+ struct diagfwd_buf_t *buf_2;
+ struct diagfwd_buf_t *buf_upd_1_a;
+ struct diagfwd_buf_t *buf_upd_1_b;
+ struct diagfwd_buf_t *buf_upd_2_a;
+ struct diagfwd_buf_t *buf_upd_2_b;
+ struct diagfwd_buf_t *buf_ptr[NUM_WRITE_BUFFERS];
+ int cpd_len_1;
+ int cpd_len_2;
+ int upd_len_1_a;
+ int upd_len_1_b;
+ int upd_len_2_a;
+ int upd_len_2_b;
+ struct diag_peripheral_ops *p_ops;
+ struct diag_channel_ops *c_ops;
+};
+
+extern struct diagfwd_info peripheral_info[NUM_TYPES][NUM_PERIPHERALS];
+
+int diagfwd_peripheral_init(void);
+void diagfwd_peripheral_exit(void);
+
+void diagfwd_close_transport(uint8_t transport, uint8_t peripheral);
+
+void diagfwd_open(uint8_t peripheral, uint8_t type);
+void diagfwd_early_open(uint8_t peripheral);
+
+void diagfwd_late_open(struct diagfwd_info *fwd_info);
+void diagfwd_close(uint8_t peripheral, uint8_t type);
+
+int diag_md_get_peripheral(int ctxt);
+
+int diagfwd_register(uint8_t transport, uint8_t peripheral, uint8_t type,
+ void *ctxt, struct diag_peripheral_ops *ops,
+ struct diagfwd_info **fwd_ctxt);
+int diagfwd_cntl_register(uint8_t transport, uint8_t peripheral, void *ctxt,
+ struct diag_peripheral_ops *ops,
+ struct diagfwd_info **fwd_ctxt);
+void diagfwd_deregister(uint8_t peripheral, uint8_t type, void *ctxt);
+
+int diagfwd_write(uint8_t peripheral, uint8_t type, void *buf, int len);
+void diagfwd_write_done(uint8_t peripheral, uint8_t type, int buf_num);
+void diagfwd_buffers_init(struct diagfwd_info *fwd_info);
+
+/*
+ * The following functions are called by the channels
+ */
+int diagfwd_channel_open(struct diagfwd_info *fwd_info);
+int diagfwd_channel_close(struct diagfwd_info *fwd_info);
+void diagfwd_channel_read(struct diagfwd_info *fwd_info);
+int diagfwd_channel_read_done(struct diagfwd_info *fwd_info,
+ unsigned char *buf, uint32_t len);
+int diagfwd_write_buffer_done(struct diagfwd_info *fwd_info, const void *ptr);
+
+#endif
diff --git a/drivers/char/diag/diagfwd_smd.c b/drivers/char/diag/diagfwd_smd.c
new file mode 100644
index 000000000000..51ab58b99fdd
--- /dev/null
+++ b/drivers/char/diag/diagfwd_smd.c
@@ -0,0 +1,898 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/sched.h>
+#include <linux/ratelimit.h>
+#include <linux/workqueue.h>
+#include <linux/pm_runtime.h>
+#include <linux/delay.h>
+#include <linux/atomic.h>
+#include <linux/diagchar.h>
+#include <linux/of.h>
+#include <linux/kmemleak.h>
+#include "diagchar.h"
+#include "diagfwd.h"
+#include "diagfwd_peripheral.h"
+#include "diagfwd_smd.h"
+#include "diag_ipc_logging.h"
+
+struct diag_smd_info smd_data[NUM_PERIPHERALS] = {
+ {
+ .peripheral = PERIPHERAL_MODEM,
+ .type = TYPE_DATA,
+ .name = "MODEM_DATA"
+ },
+ {
+ .peripheral = PERIPHERAL_LPASS,
+ .type = TYPE_DATA,
+ .name = "LPASS_DATA"
+ },
+ {
+ .peripheral = PERIPHERAL_WCNSS,
+ .type = TYPE_DATA,
+ .name = "WCNSS_DATA"
+ },
+ {
+ .peripheral = PERIPHERAL_SENSORS,
+ .type = TYPE_DATA,
+ .name = "SENSORS_DATA"
+ },
+ {
+ .peripheral = PERIPHERAL_WDSP,
+ .type = TYPE_DATA,
+ .name = "DIAG_DATA"
+ },
+ {
+ .peripheral = PERIPHERAL_CDSP,
+ .type = TYPE_DATA,
+ .name = "CDSP_DATA"
+ }
+};
+
+struct diag_smd_info smd_cntl[NUM_PERIPHERALS] = {
+ {
+ .peripheral = PERIPHERAL_MODEM,
+ .type = TYPE_CNTL,
+ .name = "MODEM_CNTL"
+ },
+ {
+ .peripheral = PERIPHERAL_LPASS,
+ .type = TYPE_CNTL,
+ .name = "LPASS_CNTL"
+ },
+ {
+ .peripheral = PERIPHERAL_WCNSS,
+ .type = TYPE_CNTL,
+ .name = "WCNSS_CNTL"
+ },
+ {
+ .peripheral = PERIPHERAL_SENSORS,
+ .type = TYPE_CNTL,
+ .name = "SENSORS_CNTL"
+ },
+ {
+ .peripheral = PERIPHERAL_WDSP,
+ .type = TYPE_CNTL,
+ .name = "DIAG_CTRL"
+ },
+ {
+ .peripheral = PERIPHERAL_CDSP,
+ .type = TYPE_CNTL,
+ .name = "CDSP_CNTL"
+ }
+};
+
+struct diag_smd_info smd_dci[NUM_PERIPHERALS] = {
+ {
+ .peripheral = PERIPHERAL_MODEM,
+ .type = TYPE_DCI,
+ .name = "MODEM_DCI"
+ },
+ {
+ .peripheral = PERIPHERAL_LPASS,
+ .type = TYPE_DCI,
+ .name = "LPASS_DCI"
+ },
+ {
+ .peripheral = PERIPHERAL_WCNSS,
+ .type = TYPE_DCI,
+ .name = "WCNSS_DCI"
+ },
+ {
+ .peripheral = PERIPHERAL_SENSORS,
+ .type = TYPE_DCI,
+ .name = "SENSORS_DCI"
+ },
+ {
+ .peripheral = PERIPHERAL_WDSP,
+ .type = TYPE_DCI,
+ .name = "DIAG_DCI_DATA"
+ },
+ {
+ .peripheral = PERIPHERAL_CDSP,
+ .type = TYPE_DCI,
+ .name = "CDSP_DCI"
+ }
+};
+
+struct diag_smd_info smd_cmd[NUM_PERIPHERALS] = {
+ {
+ .peripheral = PERIPHERAL_MODEM,
+ .type = TYPE_CMD,
+ .name = "MODEM_CMD"
+ },
+ {
+ .peripheral = PERIPHERAL_LPASS,
+ .type = TYPE_CMD,
+ .name = "LPASS_CMD"
+ },
+ {
+ .peripheral = PERIPHERAL_WCNSS,
+ .type = TYPE_CMD,
+ .name = "WCNSS_CMD"
+ },
+ {
+ .peripheral = PERIPHERAL_SENSORS,
+ .type = TYPE_CMD,
+ .name = "SENSORS_CMD"
+ },
+ {
+ .peripheral = PERIPHERAL_WDSP,
+ .type = TYPE_CMD,
+ .name = "DIAG_CMD"
+ },
+ {
+ .peripheral = PERIPHERAL_CDSP,
+ .type = TYPE_CMD,
+ .name = "CDSP_CMD"
+ }
+};
+
+struct diag_smd_info smd_dci_cmd[NUM_PERIPHERALS] = {
+ {
+ .peripheral = PERIPHERAL_MODEM,
+ .type = TYPE_DCI_CMD,
+ .name = "MODEM_DCI_CMD"
+ },
+ {
+ .peripheral = PERIPHERAL_LPASS,
+ .type = TYPE_DCI_CMD,
+ .name = "LPASS_DCI_CMD"
+ },
+ {
+ .peripheral = PERIPHERAL_WCNSS,
+ .type = TYPE_DCI_CMD,
+ .name = "WCNSS_DCI_CMD"
+ },
+ {
+ .peripheral = PERIPHERAL_SENSORS,
+ .type = TYPE_DCI_CMD,
+ .name = "SENSORS_DCI_CMD"
+ },
+ {
+ .peripheral = PERIPHERAL_WDSP,
+ .type = TYPE_DCI_CMD,
+ .name = "DIAG_DCI_CMD"
+ },
+ {
+ .peripheral = PERIPHERAL_CDSP,
+ .type = TYPE_DCI_CMD,
+ .name = "CDSP_DCI_CMD"
+ }
+};
+
+static void diag_state_open_smd(void *ctxt);
+static void diag_state_close_smd(void *ctxt);
+static void smd_notify(void *ctxt, unsigned event);
+static int diag_smd_write(void *ctxt, unsigned char *buf, int len);
+static int diag_smd_read(void *ctxt, unsigned char *buf, int buf_len);
+static void diag_smd_queue_read(void *ctxt);
+
+static struct diag_peripheral_ops smd_ops = {
+ .open = diag_state_open_smd,
+ .close = diag_state_close_smd,
+ .write = diag_smd_write,
+ .read = diag_smd_read,
+ .queue_read = diag_smd_queue_read
+};
+
+static void diag_state_open_smd(void *ctxt)
+{
+ struct diag_smd_info *smd_info = NULL;
+
+ if (!ctxt)
+ return;
+
+ smd_info = (struct diag_smd_info *)(ctxt);
+ atomic_set(&smd_info->diag_state, 1);
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "%s setting diag state to 1", smd_info->name);
+}
+
+static void diag_state_close_smd(void *ctxt)
+{
+ struct diag_smd_info *smd_info = NULL;
+
+ if (!ctxt)
+ return;
+
+ smd_info = (struct diag_smd_info *)(ctxt);
+ atomic_set(&smd_info->diag_state, 0);
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "%s setting diag state to 0", smd_info->name);
+ wake_up_interruptible(&smd_info->read_wait_q);
+ flush_workqueue(smd_info->wq);
+}
+
+static int smd_channel_probe(struct platform_device *pdev, uint8_t type)
+{
+ int r = 0;
+ int index = -1;
+ const char *channel_name = NULL;
+ struct diag_smd_info *smd_info = NULL;
+
+ switch (pdev->id) {
+ case SMD_APPS_MODEM:
+ index = PERIPHERAL_MODEM;
+ break;
+ case SMD_APPS_QDSP:
+ index = PERIPHERAL_LPASS;
+ break;
+ case SMD_APPS_WCNSS:
+ index = PERIPHERAL_WCNSS;
+ break;
+ case SMD_APPS_DSPS:
+ index = PERIPHERAL_SENSORS;
+ break;
+ default:
+ pr_debug("diag: In %s Received probe for invalid index %d",
+ __func__, pdev->id);
+ return -EINVAL;
+ }
+
+ switch (type) {
+ case TYPE_DATA:
+ smd_info = &smd_data[index];
+ channel_name = "DIAG";
+ break;
+ case TYPE_CNTL:
+ smd_info = &smd_cntl[index];
+ channel_name = "DIAG_CNTL";
+ break;
+ case TYPE_CMD:
+ smd_info = &smd_cmd[index];
+ channel_name = "DIAG_CMD";
+ break;
+ case TYPE_DCI:
+ smd_info = &smd_dci[index];
+ channel_name = "DIAG_2";
+ break;
+ case TYPE_DCI_CMD:
+ smd_info = &smd_dci_cmd[index];
+ channel_name = "DIAG_2_CMD";
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (index == PERIPHERAL_WCNSS && type == TYPE_DATA)
+ channel_name = "APPS_RIVA_DATA";
+ else if (index == PERIPHERAL_WCNSS && type == TYPE_CNTL)
+ channel_name = "APPS_RIVA_CTRL";
+
+ if (!channel_name || !smd_info)
+ return -EIO;
+
+ r = smd_named_open_on_edge(channel_name, pdev->id, &smd_info->hdl,
+ smd_info, smd_notify);
+
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ pr_debug("diag: In %s, SMD port probed %s, id = %d, r = %d\n",
+ __func__, smd_info->name, pdev->id, r);
+
+ return 0;
+}
+
+static int smd_data_probe(struct platform_device *pdev)
+{
+ return smd_channel_probe(pdev, TYPE_DATA);
+}
+
+static int smd_cntl_probe(struct platform_device *pdev)
+{
+ return smd_channel_probe(pdev, TYPE_CNTL);
+}
+
+static int smd_cmd_probe(struct platform_device *pdev)
+{
+ return smd_channel_probe(pdev, TYPE_CMD);
+}
+
+static int smd_dci_probe(struct platform_device *pdev)
+{
+ return smd_channel_probe(pdev, TYPE_DCI);
+}
+
+static int smd_dci_cmd_probe(struct platform_device *pdev)
+{
+ return smd_channel_probe(pdev, TYPE_DCI_CMD);
+}
+
+static int smd_runtime_suspend(struct device *dev)
+{
+ dev_dbg(dev, "pm_runtime: suspending...\n");
+ return 0;
+}
+
+static int smd_runtime_resume(struct device *dev)
+{
+ dev_dbg(dev, "pm_runtime: resuming...\n");
+ return 0;
+}
+
+static const struct dev_pm_ops smd_dev_pm_ops = {
+ .runtime_suspend = smd_runtime_suspend,
+ .runtime_resume = smd_runtime_resume,
+};
+
+static struct platform_driver diag_smd_ch_driver = {
+ .probe = smd_data_probe,
+ .driver = {
+ .name = "DIAG",
+ .owner = THIS_MODULE,
+ .pm = &smd_dev_pm_ops,
+ },
+};
+
+static struct platform_driver diag_smd_lite_driver = {
+ .probe = smd_data_probe,
+ .driver = {
+ .name = "APPS_RIVA_DATA",
+ .owner = THIS_MODULE,
+ .pm = &smd_dev_pm_ops,
+ },
+};
+
+static struct platform_driver diag_smd_cntl_driver = {
+ .probe = smd_cntl_probe,
+ .driver = {
+ .name = "DIAG_CNTL",
+ .owner = THIS_MODULE,
+ .pm = &smd_dev_pm_ops,
+ },
+};
+
+static struct platform_driver diag_smd_lite_cntl_driver = {
+ .probe = smd_cntl_probe,
+ .driver = {
+ .name = "APPS_RIVA_CTRL",
+ .owner = THIS_MODULE,
+ .pm = &smd_dev_pm_ops,
+ },
+};
+
+static struct platform_driver diag_smd_lite_cmd_driver = {
+ .probe = smd_cmd_probe,
+ .driver = {
+ .name = "DIAG_CMD",
+ .owner = THIS_MODULE,
+ .pm = &smd_dev_pm_ops,
+ }
+};
+
+static struct platform_driver diag_smd_dci_driver = {
+ .probe = smd_dci_probe,
+ .driver = {
+ .name = "DIAG_2",
+ .owner = THIS_MODULE,
+ .pm = &smd_dev_pm_ops,
+ },
+};
+
+static struct platform_driver diag_smd_dci_cmd_driver = {
+ .probe = smd_dci_cmd_probe,
+ .driver = {
+ .name = "DIAG_2_CMD",
+ .owner = THIS_MODULE,
+ .pm = &smd_dev_pm_ops,
+ },
+};
+
+static void smd_open_work_fn(struct work_struct *work)
+{
+ struct diag_smd_info *smd_info = container_of(work,
+ struct diag_smd_info,
+ open_work);
+ if (!smd_info->inited)
+ return;
+
+ diagfwd_channel_open(smd_info->fwd_ctxt);
+ diagfwd_late_open(smd_info->fwd_ctxt);
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s exiting\n",
+ smd_info->name);
+}
+
+static void smd_close_work_fn(struct work_struct *work)
+{
+ struct diag_smd_info *smd_info = container_of(work,
+ struct diag_smd_info,
+ close_work);
+ if (!smd_info->inited)
+ return;
+
+ diagfwd_channel_close(smd_info->fwd_ctxt);
+ wake_up_interruptible(&smd_info->read_wait_q);
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s exiting\n",
+ smd_info->name);
+}
+
+static void smd_read_work_fn(struct work_struct *work)
+{
+ struct diag_smd_info *smd_info = container_of(work,
+ struct diag_smd_info,
+ read_work);
+ if (!smd_info->inited) {
+ diag_ws_release();
+ return;
+ }
+
+ diagfwd_channel_read(smd_info->fwd_ctxt);
+}
+
+static void diag_smd_queue_read(void *ctxt)
+{
+ struct diag_smd_info *smd_info = NULL;
+
+ if (!ctxt)
+ return;
+
+ smd_info = (struct diag_smd_info *)ctxt;
+ if (smd_info->inited && atomic_read(&smd_info->opened) &&
+ smd_info->hdl) {
+ wake_up_interruptible(&smd_info->read_wait_q);
+ queue_work(smd_info->wq, &(smd_info->read_work));
+ }
+}
+int diag_smd_check_state(void *ctxt)
+{
+ struct diag_smd_info *info = NULL;
+
+ if (!ctxt)
+ return 0;
+
+ info = (struct diag_smd_info *)ctxt;
+ return (int)(atomic_read(&info->diag_state));
+}
+void diag_smd_invalidate(void *ctxt, struct diagfwd_info *fwd_ctxt)
+{
+ struct diag_smd_info *smd_info = NULL;
+ void *prev = NULL;
+
+ if (!ctxt || !fwd_ctxt)
+ return;
+
+ smd_info = (struct diag_smd_info *)ctxt;
+ prev = smd_info->fwd_ctxt;
+ smd_info->fwd_ctxt = fwd_ctxt;
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s prev: %pK fwd_ctxt: %pK\n",
+ smd_info->name, prev, smd_info->fwd_ctxt);
+}
+
+static void __diag_smd_init(struct diag_smd_info *smd_info)
+{
+ char wq_name[DIAG_SMD_NAME_SZ + 10];
+ if (!smd_info)
+ return;
+
+ init_waitqueue_head(&smd_info->read_wait_q);
+ mutex_init(&smd_info->lock);
+ strlcpy(wq_name, "DIAG_SMD_", 10);
+ strlcat(wq_name, smd_info->name, sizeof(smd_info->name));
+ smd_info->wq = create_singlethread_workqueue(wq_name);
+ if (!smd_info->wq) {
+ pr_err("diag: In %s, unable to create workqueue for smd channel %s\n",
+ __func__, smd_info->name);
+ return;
+ }
+ INIT_WORK(&(smd_info->open_work), smd_open_work_fn);
+ INIT_WORK(&(smd_info->close_work), smd_close_work_fn);
+ INIT_WORK(&(smd_info->read_work), smd_read_work_fn);
+ smd_info->fifo_size = 0;
+ smd_info->hdl = NULL;
+ smd_info->fwd_ctxt = NULL;
+ atomic_set(&smd_info->opened, 0);
+ atomic_set(&smd_info->diag_state, 0);
+
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s initialized fwd_ctxt: %pK\n",
+ smd_info->name, smd_info->fwd_ctxt);
+}
+
+int diag_smd_init(void)
+{
+ uint8_t peripheral;
+ struct diag_smd_info *smd_info = NULL;
+
+ for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
+ smd_info = &smd_cntl[peripheral];
+ __diag_smd_init(smd_info);
+ diagfwd_cntl_register(TRANSPORT_SMD, smd_info->peripheral,
+ (void *)smd_info, &smd_ops,
+ &smd_info->fwd_ctxt);
+ smd_info->inited = 1;
+ __diag_smd_init(&smd_data[peripheral]);
+ __diag_smd_init(&smd_cmd[peripheral]);
+ __diag_smd_init(&smd_dci[peripheral]);
+ __diag_smd_init(&smd_dci_cmd[peripheral]);
+ }
+
+ platform_driver_register(&diag_smd_cntl_driver);
+ platform_driver_register(&diag_smd_lite_cntl_driver);
+ platform_driver_register(&diag_smd_ch_driver);
+ platform_driver_register(&diag_smd_lite_driver);
+ platform_driver_register(&diag_smd_lite_cmd_driver);
+ platform_driver_register(&diag_smd_dci_driver);
+ platform_driver_register(&diag_smd_dci_cmd_driver);
+
+ return 0;
+}
+
+static void smd_late_init(struct diag_smd_info *smd_info)
+{
+ struct diagfwd_info *fwd_info = NULL;
+ if (!smd_info)
+ return;
+
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s entering\n",
+ smd_info->name);
+
+ diagfwd_register(TRANSPORT_SMD, smd_info->peripheral, smd_info->type,
+ (void *)smd_info, &smd_ops, &smd_info->fwd_ctxt);
+ fwd_info = smd_info->fwd_ctxt;
+ smd_info->inited = 1;
+ /*
+ * The channel is already open by the probe call as a result of other
+ * peripheral. Inform the diag fwd layer that the channel is open.
+ */
+ if (atomic_read(&smd_info->opened))
+ diagfwd_channel_open(smd_info->fwd_ctxt);
+
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s exiting\n",
+ smd_info->name);
+}
+
+int diag_smd_init_peripheral(uint8_t peripheral)
+{
+ if (peripheral >= NUM_PERIPHERALS) {
+ pr_err("diag: In %s, invalid peripheral %d\n",
+ __func__, peripheral);
+ return -EINVAL;
+ }
+
+ smd_late_init(&smd_data[peripheral]);
+ smd_late_init(&smd_dci[peripheral]);
+ smd_late_init(&smd_cmd[peripheral]);
+ smd_late_init(&smd_dci_cmd[peripheral]);
+
+ return 0;
+}
+
+static void __diag_smd_exit(struct diag_smd_info *smd_info)
+{
+ if (!smd_info)
+ return;
+
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s entering\n",
+ smd_info->name);
+
+ diagfwd_deregister(smd_info->peripheral, smd_info->type,
+ (void *)smd_info);
+ smd_info->fwd_ctxt = NULL;
+ smd_info->hdl = NULL;
+ if (smd_info->wq)
+ destroy_workqueue(smd_info->wq);
+
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s exiting\n",
+ smd_info->name);
+}
+
+void diag_smd_early_exit(void)
+{
+ int i = 0;
+
+ for (i = 0; i < NUM_PERIPHERALS; i++)
+ __diag_smd_exit(&smd_cntl[i]);
+
+ platform_driver_unregister(&diag_smd_cntl_driver);
+ platform_driver_unregister(&diag_smd_lite_cntl_driver);
+}
+
+void diag_smd_exit(void)
+{
+ int i = 0;
+
+ for (i = 0; i < NUM_PERIPHERALS; i++) {
+ __diag_smd_exit(&smd_data[i]);
+ __diag_smd_exit(&smd_cmd[i]);
+ __diag_smd_exit(&smd_dci[i]);
+ __diag_smd_exit(&smd_dci_cmd[i]);
+ }
+
+ platform_driver_unregister(&diag_smd_ch_driver);
+ platform_driver_unregister(&diag_smd_lite_driver);
+ platform_driver_unregister(&diag_smd_lite_cmd_driver);
+ platform_driver_unregister(&diag_smd_dci_driver);
+ platform_driver_unregister(&diag_smd_dci_cmd_driver);
+}
+
+static int diag_smd_write_ext(struct diag_smd_info *smd_info,
+ unsigned char *buf, int len)
+{
+ int err = 0;
+ int offset = 0;
+ int write_len = 0;
+ int retry_count = 0;
+ int max_retries = 3;
+ uint8_t avail = 0;
+
+ if (!smd_info || !buf || len <= 0) {
+ pr_err_ratelimited("diag: In %s, invalid params, smd_info: %pK, buf: %pK, len: %d\n",
+ __func__, smd_info, buf, len);
+ return -EINVAL;
+ }
+
+ if (!smd_info->inited || !smd_info->hdl ||
+ !atomic_read(&smd_info->opened))
+ return -ENODEV;
+
+ mutex_lock(&smd_info->lock);
+ err = smd_write_start(smd_info->hdl, len);
+ if (err) {
+ pr_err_ratelimited("diag: In %s, error calling smd_write_start, peripheral: %d, err: %d\n",
+ __func__, smd_info->peripheral, err);
+ goto fail;
+ }
+
+ while (offset < len) {
+ retry_count = 0;
+ do {
+ if (smd_write_segment_avail(smd_info->hdl)) {
+ avail = 1;
+ break;
+ }
+ /*
+ * The channel maybe busy - the FIFO can be full. Retry
+ * after sometime. The value of 10000 was chosen
+ * emprically as the optimal value for the peripherals
+ * to read data from the SMD channel.
+ */
+ usleep_range(10000, 10100);
+ retry_count++;
+ } while (retry_count < max_retries);
+
+ if (!avail) {
+ err = -EAGAIN;
+ goto fail;
+ }
+
+ write_len = smd_write_segment(smd_info->hdl, buf + offset,
+ (len - offset));
+ offset += write_len;
+ write_len = 0;
+ }
+
+ err = smd_write_end(smd_info->hdl);
+ if (err) {
+ pr_err_ratelimited("diag: In %s, error calling smd_write_end, peripheral: %d, err: %d\n",
+ __func__, smd_info->peripheral, err);
+ goto fail;
+ }
+
+fail:
+ mutex_unlock(&smd_info->lock);
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "%s wrote to channel, write_len: %d, err: %d\n",
+ smd_info->name, offset, err);
+ return err;
+}
+
+static int diag_smd_write(void *ctxt, unsigned char *buf, int len)
+{
+ int write_len = 0;
+ int retry_count = 0;
+ int max_retries = 3;
+ struct diag_smd_info *smd_info = NULL;
+
+ if (!ctxt || !buf)
+ return -EIO;
+
+ smd_info = (struct diag_smd_info *)ctxt;
+ if (!smd_info || !buf || len <= 0) {
+ pr_err_ratelimited("diag: In %s, invalid params, smd_info: %pK, buf: %pK, len: %d\n",
+ __func__, smd_info, buf, len);
+ return -EINVAL;
+ }
+
+ if (!smd_info->inited || !smd_info->hdl ||
+ !atomic_read(&smd_info->opened))
+ return -ENODEV;
+
+ if (len > smd_info->fifo_size)
+ return diag_smd_write_ext(smd_info, buf, len);
+
+ do {
+ mutex_lock(&smd_info->lock);
+ write_len = smd_write(smd_info->hdl, buf, len);
+ mutex_unlock(&smd_info->lock);
+ if (write_len == len)
+ break;
+ /*
+ * The channel maybe busy - the FIFO can be full. Retry after
+ * sometime. The value of 10000 was chosen emprically as the
+ * optimal value for the peripherals to read data from the SMD
+ * channel.
+ */
+ usleep_range(10000, 10100);
+ retry_count++;
+ } while (retry_count < max_retries);
+
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s wrote to channel, write_len: %d\n",
+ smd_info->name, write_len);
+
+ if (write_len != len)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int diag_smd_read(void *ctxt, unsigned char *buf, int buf_len)
+{
+ int pkt_len = 0;
+ int err = 0;
+ int total_recd_partial = 0;
+ int total_recd = 0;
+ uint8_t buf_full = 0;
+ unsigned char *temp_buf = NULL;
+ uint32_t read_len = 0;
+ struct diag_smd_info *smd_info = NULL;
+
+ if (!ctxt || !buf || buf_len <= 0)
+ return -EIO;
+
+ smd_info = (struct diag_smd_info *)ctxt;
+ if (!smd_info->hdl || !smd_info->inited ||
+ !atomic_read(&smd_info->opened))
+ return -EIO;
+
+ /*
+ * Always try to read the data if notification is received from smd
+ * In case if packet size is 0 release the wake source hold earlier
+ */
+ err = wait_event_interruptible(smd_info->read_wait_q,
+ (smd_info->hdl != NULL) &&
+ (atomic_read(&smd_info->opened) == 1));
+ if (err) {
+ diagfwd_channel_read_done(smd_info->fwd_ctxt, buf, 0);
+ return -ERESTARTSYS;
+ }
+
+ /*
+ * Reset the buffers. Also release the wake source hold earlier.
+ */
+ if (atomic_read(&smd_info->diag_state) == 0) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "%s closing read thread. diag state is closed\n",
+ smd_info->name);
+ diagfwd_channel_read_done(smd_info->fwd_ctxt, buf, 0);
+ return 0;
+ }
+
+ if (!smd_info->hdl || !atomic_read(&smd_info->opened)) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "%s stopping read, hdl: %pK, opened: %d\n",
+ smd_info->name, smd_info->hdl,
+ atomic_read(&smd_info->opened));
+ goto fail_return;
+ }
+
+ do {
+ total_recd_partial = 0;
+ temp_buf = buf + total_recd;
+ pkt_len = smd_cur_packet_size(smd_info->hdl);
+ if (pkt_len <= 0)
+ break;
+
+ if (total_recd + pkt_len > buf_len) {
+ buf_full = 1;
+ break;
+ }
+
+ while (total_recd_partial < pkt_len) {
+ read_len = smd_read_avail(smd_info->hdl);
+ if (!read_len) {
+ wait_event_interruptible(smd_info->read_wait_q,
+ ((atomic_read(&smd_info->opened)) &&
+ smd_read_avail(smd_info->hdl)));
+
+ if (!smd_info->hdl ||
+ !atomic_read(&smd_info->opened)) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "%s exiting from wait",
+ smd_info->name);
+ goto fail_return;
+ }
+ }
+
+ if (pkt_len < read_len)
+ goto fail_return;
+
+ smd_read(smd_info->hdl, temp_buf, read_len);
+ total_recd_partial += read_len;
+ total_recd += read_len;
+ temp_buf += read_len;
+ }
+ } while (pkt_len > 0);
+
+ if ((smd_info->type == TYPE_DATA && pkt_len) || buf_full)
+ err = queue_work(smd_info->wq, &(smd_info->read_work));
+
+ if (total_recd > 0) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s read total bytes: %d\n",
+ smd_info->name, total_recd);
+ diagfwd_channel_read_done(smd_info->fwd_ctxt, buf, total_recd);
+ } else {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s error in read, err: %d\n",
+ smd_info->name, total_recd);
+ goto fail_return;
+ }
+ return 0;
+
+fail_return:
+ diagfwd_channel_read_done(smd_info->fwd_ctxt, buf, 0);
+ return -EINVAL;
+}
+
+static void smd_notify(void *ctxt, unsigned event)
+{
+ struct diag_smd_info *smd_info = NULL;
+
+ smd_info = (struct diag_smd_info *)ctxt;
+ if (!smd_info)
+ return;
+
+ switch (event) {
+ case SMD_EVENT_OPEN:
+ atomic_set(&smd_info->opened, 1);
+ smd_info->fifo_size = smd_write_avail(smd_info->hdl);
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s channel opened\n",
+ smd_info->name);
+ queue_work(smd_info->wq, &(smd_info->open_work));
+ break;
+ case SMD_EVENT_CLOSE:
+ atomic_set(&smd_info->opened, 0);
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s channel closed\n",
+ smd_info->name);
+ queue_work(smd_info->wq, &(smd_info->close_work));
+ break;
+ case SMD_EVENT_DATA:
+ diag_ws_on_notify();
+ queue_work(smd_info->wq, &(smd_info->read_work));
+ break;
+ }
+
+ wake_up_interruptible(&smd_info->read_wait_q);
+}
+
diff --git a/drivers/char/diag/diagfwd_smd.h b/drivers/char/diag/diagfwd_smd.h
new file mode 100644
index 000000000000..44453734ae9c
--- /dev/null
+++ b/drivers/char/diag/diagfwd_smd.h
@@ -0,0 +1,50 @@
+/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DIAGFWD_SMD_H
+#define DIAGFWD_SMD_H
+
+#define DIAG_SMD_NAME_SZ 24
+#define SMD_DRAIN_BUF_SIZE 4096
+
+struct diag_smd_info {
+ uint8_t peripheral;
+ uint8_t type;
+ uint8_t inited;
+ atomic_t opened;
+ atomic_t diag_state;
+ uint32_t fifo_size;
+ smd_channel_t *hdl;
+ char name[DIAG_SMD_NAME_SZ];
+ struct mutex lock;
+ wait_queue_head_t read_wait_q;
+ struct workqueue_struct *wq;
+ struct work_struct open_work;
+ struct work_struct close_work;
+ struct work_struct read_work;
+ struct diagfwd_info *fwd_ctxt;
+};
+
+extern struct diag_smd_info smd_data[NUM_PERIPHERALS];
+extern struct diag_smd_info smd_cntl[NUM_PERIPHERALS];
+extern struct diag_smd_info smd_dci[NUM_PERIPHERALS];
+extern struct diag_smd_info smd_cmd[NUM_PERIPHERALS];
+extern struct diag_smd_info smd_dci_cmd[NUM_PERIPHERALS];
+
+int diag_smd_init_peripheral(uint8_t peripheral);
+void diag_smd_exit(void);
+int diag_smd_init(void);
+void diag_smd_early_exit(void);
+void diag_smd_invalidate(void *ctxt, struct diagfwd_info *fwd_ctxt);
+int diag_smd_check_state(void *ctxt);
+
+#endif
diff --git a/drivers/char/diag/diagfwd_smux.c b/drivers/char/diag/diagfwd_smux.c
new file mode 100644
index 000000000000..62974383e0a7
--- /dev/null
+++ b/drivers/char/diag/diagfwd_smux.c
@@ -0,0 +1,330 @@
+/* Copyright (c) 2012,2014 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/termios.h>
+#include <linux/slab.h>
+#include <linux/diagchar.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/usb/usbdiag.h>
+
+#include "diagchar.h"
+#include "diagfwd_bridge.h"
+#include "diagfwd_smux.h"
+
+struct diag_smux_info diag_smux[NUM_SMUX_DEV] = {
+ {
+ .id = SMUX_1,
+ .lcid = SMUX_USB_DIAG_0,
+ .dev_id = DIAGFWD_SMUX,
+ .name = "SMUX_1",
+ .read_buf = NULL,
+ .read_len = 0,
+ .in_busy = 0,
+ .enabled = 0,
+ .opened = 0,
+ },
+};
+
+static void diag_smux_event(void *priv, int event_type, const void *metadata)
+{
+ int len = 0;
+ int id = (int)priv;
+ unsigned char *rx_buf = NULL;
+ struct diag_smux_info *ch = NULL;
+
+ if (id < 0 || id >= NUM_SMUX_DEV)
+ return;
+
+ ch = &diag_smux[id];
+ if (metadata) {
+ len = ((struct smux_meta_read *)metadata)->len;
+ rx_buf = ((struct smux_meta_read *)metadata)->buffer;
+ }
+
+ switch (event_type) {
+ case SMUX_CONNECTED:
+ pr_info("diag: SMUX_CONNECTED received, ch: %d\n", ch->id);
+ ch->opened = 1;
+ ch->in_busy = 0;
+ break;
+ case SMUX_DISCONNECTED:
+ ch->opened = 0;
+ msm_smux_close(ch->lcid);
+ pr_info("diag: SMUX_DISCONNECTED received, ch: %d\n", ch->id);
+ break;
+ case SMUX_WRITE_DONE:
+ pr_debug("diag: SMUX Write done, ch: %d\n", ch->id);
+ diag_remote_dev_write_done(ch->dev_id, rx_buf, len, ch->id);
+ break;
+ case SMUX_WRITE_FAIL:
+ pr_info("diag: SMUX Write Failed, ch: %d\n", ch->id);
+ break;
+ case SMUX_READ_FAIL:
+ pr_info("diag: SMUX Read Failed, ch: %d\n", ch->id);
+ break;
+ case SMUX_READ_DONE:
+ ch->read_buf = rx_buf;
+ ch->read_len = len;
+ ch->in_busy = 1;
+ diag_remote_dev_read_done(ch->dev_id, ch->read_buf,
+ ch->read_len);
+ break;
+ };
+}
+
+static int diag_smux_init_ch(struct diag_smux_info *ch)
+{
+ if (!ch)
+ return -EINVAL;
+
+ if (!ch->enabled) {
+ pr_debug("diag: SMUX channel is not enabled id: %d\n", ch->id);
+ return -ENODEV;
+ }
+
+ if (ch->inited) {
+ pr_debug("diag: SMUX channel %d is already initialize\n",
+ ch->id);
+ return 0;
+ }
+
+ ch->read_buf = kzalloc(IN_BUF_SIZE, GFP_KERNEL);
+ if (!ch->read_buf)
+ return -ENOMEM;
+
+ ch->inited = 1;
+
+ return 0;
+}
+
+static int smux_get_rx_buffer(void *priv, void **pkt_priv, void **buf,
+ int size)
+{
+ int id = (int)priv;
+ struct diag_smux_info *ch = NULL;
+
+ if (id < 0 || id >= NUM_SMUX_DEV)
+ return -EINVAL;
+
+ ch = &diag_smux[id];
+
+ if (ch->in_busy) {
+ pr_debug("diag: read buffer for SMUX is BUSY\n");
+ return -EAGAIN;
+ }
+
+ *pkt_priv = (void *)0x1234;
+ *buf = ch->read_buf;
+ ch->in_busy = 1;
+ return 0;
+}
+
+static int smux_open(int id)
+{
+ int err = 0;
+ struct diag_smux_info *ch = NULL;
+
+ if (id < 0 || id >= NUM_SMUX_DEV)
+ return -EINVAL;
+
+ ch = &diag_smux[id];
+ if (ch->opened) {
+ pr_debug("diag: SMUX channel %d is already connected\n",
+ ch->id);
+ return 0;
+ }
+
+ err = diag_smux_init_ch(ch);
+ if (err) {
+ pr_err("diag: Unable to initialize SMUX channel %d, err: %d\n",
+ ch->id, err);
+ return err;
+ }
+
+ err = msm_smux_open(ch->lcid, (void *)ch->id, diag_smux_event,
+ smux_get_rx_buffer);
+ if (err) {
+ pr_err("diag: failed to open SMUX ch %d, err: %d\n",
+ ch->id, err);
+ return err;
+ }
+ msm_smux_tiocm_set(ch->lcid, TIOCM_DTR, 0);
+ ch->opened = 1;
+ pr_info("diag: SMUX ch %d is connected\n", ch->id);
+ return 0;
+}
+
+static int smux_close(int id)
+{
+ struct diag_smux_info *ch = NULL;
+
+ if (id < 0 || id >= NUM_SMUX_DEV)
+ return -EINVAL;
+
+ ch = &diag_smux[id];
+ if (!ch->enabled) {
+ pr_debug("diag: SMUX channel is not enabled id: %d\n", ch->id);
+ return -ENODEV;
+ }
+
+ msm_smux_close(ch->lcid);
+ ch->opened = 0;
+ ch->in_busy = 1;
+ kfree(ch->read_buf);
+ ch->read_buf = NULL;
+ return 0;
+}
+
+static int smux_queue_read(int id)
+{
+ return 0;
+}
+
+static int smux_write(int id, unsigned char *buf, int len, int ctxt)
+{
+ struct diag_smux_info *ch = NULL;
+
+ if (id < 0 || id >= NUM_SMUX_DEV)
+ return -EINVAL;
+
+ ch = &diag_smux[id];
+ return msm_smux_write(ch->lcid, NULL, buf, len);
+}
+
+static int smux_fwd_complete(int id, unsigned char *buf, int len, int ctxt)
+{
+ if (id < 0 || id >= NUM_SMUX_DEV)
+ return -EINVAL;
+
+ diag_smux[id].in_busy = 0;
+ return 0;
+}
+
+static int diagfwd_smux_runtime_suspend(struct device *dev)
+{
+ dev_dbg(dev, "pm_runtime: suspending...\n");
+ return 0;
+}
+
+static int diagfwd_smux_runtime_resume(struct device *dev)
+{
+ dev_dbg(dev, "pm_runtime: resuming...\n");
+ return 0;
+}
+
+static const struct dev_pm_ops diagfwd_smux_dev_pm_ops = {
+ .runtime_suspend = diagfwd_smux_runtime_suspend,
+ .runtime_resume = diagfwd_smux_runtime_resume,
+};
+
+static int diagfwd_smux_probe(struct platform_device *pdev)
+{
+ if (!pdev)
+ return -EINVAL;
+
+ pr_debug("diag: SMUX probe called, pdev->id: %d\n", pdev->id);
+ if (pdev->id < 0 || pdev->id >= NUM_SMUX_DEV) {
+ pr_err("diag: No support for SMUX device %d\n", pdev->id);
+ return -EINVAL;
+ }
+
+ diag_smux[pdev->id].enabled = 1;
+ return smux_open(pdev->id);
+}
+
+static int diagfwd_smux_remove(struct platform_device *pdev)
+{
+ if (!pdev)
+ return -EINVAL;
+
+ pr_debug("diag: SMUX probe called, pdev->id: %d\n", pdev->id);
+ if (pdev->id < 0 || pdev->id >= NUM_SMUX_DEV) {
+ pr_err("diag: No support for SMUX device %d\n", pdev->id);
+ return -EINVAL;
+ }
+ if (!diag_smux[pdev->id].enabled) {
+ pr_err("diag: SMUX channel %d is not enabled\n",
+ diag_smux[pdev->id].id);
+ return -ENODEV;
+ }
+ return smux_close(pdev->id);
+}
+
+static struct platform_driver msm_diagfwd_smux_driver = {
+ .probe = diagfwd_smux_probe,
+ .remove = diagfwd_smux_remove,
+ .driver = {
+ .name = "SMUX_DIAG",
+ .owner = THIS_MODULE,
+ .pm = &diagfwd_smux_dev_pm_ops,
+ },
+};
+
+static struct diag_remote_dev_ops diag_smux_fwd_ops = {
+ .open = smux_open,
+ .close = smux_close,
+ .queue_read = smux_queue_read,
+ .write = smux_write,
+ .fwd_complete = smux_fwd_complete,
+};
+
+int diag_smux_init()
+{
+ int i;
+ int err = 0;
+ struct diag_smux_info *ch = NULL;
+ char wq_name[DIAG_SMUX_NAME_SZ + 11];
+
+ for (i = 0; i < NUM_SMUX_DEV; i++) {
+ ch = &diag_smux[i];
+ strlcpy(wq_name, "DIAG_SMUX_", 11);
+ strlcat(wq_name, ch->name, sizeof(ch->name));
+ ch->smux_wq = create_singlethread_workqueue(wq_name);
+ if (!ch->smux_wq) {
+ err = -ENOMEM;
+ goto fail;
+ }
+ err = diagfwd_bridge_register(ch->dev_id, ch->id,
+ &diag_smux_fwd_ops);
+ if (err) {
+ pr_err("diag: Unable to register SMUX ch %d with bridge\n",
+ ch->id);
+ goto fail;
+ }
+ }
+
+ err = platform_driver_register(&msm_diagfwd_smux_driver);
+ if (err) {
+ pr_err("diag: Unable to register SMUX device, err: %d\n", err);
+ goto fail;
+ }
+
+ return 0;
+fail:
+ diag_smux_exit();
+ return err;
+}
+
+void diag_smux_exit()
+{
+ int i;
+ struct diag_smux_info *ch = NULL;
+ for (i = 0; i < NUM_SMUX_DEV; i++) {
+ ch = &diag_smux[i];
+ kfree(ch->read_buf);
+ ch->read_buf = NULL;
+ ch->enabled = 0;
+ ch->opened = 0;
+ ch->read_len = 0;
+ }
+ platform_driver_unregister(&msm_diagfwd_smux_driver);
+}
diff --git a/drivers/char/diag/diagfwd_smux.h b/drivers/char/diag/diagfwd_smux.h
new file mode 100644
index 000000000000..f2514a267d92
--- /dev/null
+++ b/drivers/char/diag/diagfwd_smux.h
@@ -0,0 +1,43 @@
+/* Copyright (c) 2012,2014 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DIAGFWD_SMUX_H
+#define DIAGFWD_SMUX_H
+
+#include <linux/smux.h>
+
+#define SMUX_1 0
+#define NUM_SMUX_DEV 1
+
+#define DIAG_SMUX_NAME_SZ 24
+
+struct diag_smux_info {
+ int id;
+ int lcid;
+ int dev_id;
+ char name[DIAG_SMUX_NAME_SZ];
+ unsigned char *read_buf;
+ int read_len;
+ int in_busy;
+ int enabled;
+ int inited;
+ int opened;
+ struct work_struct read_work;
+ struct workqueue_struct *smux_wq;
+};
+
+extern struct diag_smux_info diag_smux[NUM_SMUX_DEV];
+
+int diag_smux_init(void);
+void diag_smux_exit(void);
+
+#endif
diff --git a/drivers/char/diag/diagfwd_socket.c b/drivers/char/diag/diagfwd_socket.c
new file mode 100644
index 000000000000..22a60cdff7e7
--- /dev/null
+++ b/drivers/char/diag/diagfwd_socket.c
@@ -0,0 +1,1238 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/sched.h>
+#include <linux/ratelimit.h>
+#include <linux/workqueue.h>
+#include <linux/msm_ipc.h>
+#include <linux/socket.h>
+#include <linux/pm_runtime.h>
+#include <linux/delay.h>
+#include <linux/diagchar.h>
+#include <linux/of.h>
+#include <linux/kmemleak.h>
+#include <asm/current.h>
+#include <net/sock.h>
+#include <linux/ipc_router.h>
+#include <linux/notifier.h>
+#include "diagchar.h"
+#include "diagfwd.h"
+#include "diagfwd_peripheral.h"
+#include "diagfwd_socket.h"
+#include "diag_ipc_logging.h"
+
+#include <soc/qcom/subsystem_notif.h>
+#include <soc/qcom/subsystem_restart.h>
+
+#define DIAG_SVC_ID 0x1001
+
+#define MODEM_INST_BASE 0
+#define LPASS_INST_BASE 64
+#define WCNSS_INST_BASE 128
+#define SENSORS_INST_BASE 192
+#define CDSP_INST_BASE 256
+#define WDSP_INST_BASE 320
+
+#define INST_ID_CNTL 0
+#define INST_ID_CMD 1
+#define INST_ID_DATA 2
+#define INST_ID_DCI_CMD 3
+#define INST_ID_DCI 4
+
+struct diag_cntl_socket_info *cntl_socket;
+static uint64_t bootup_req[NUM_SOCKET_SUBSYSTEMS];
+
+struct diag_socket_info socket_data[NUM_PERIPHERALS] = {
+ {
+ .peripheral = PERIPHERAL_MODEM,
+ .type = TYPE_DATA,
+ .name = "MODEM_DATA"
+ },
+ {
+ .peripheral = PERIPHERAL_LPASS,
+ .type = TYPE_DATA,
+ .name = "LPASS_DATA"
+ },
+ {
+ .peripheral = PERIPHERAL_WCNSS,
+ .type = TYPE_DATA,
+ .name = "WCNSS_DATA"
+ },
+ {
+ .peripheral = PERIPHERAL_SENSORS,
+ .type = TYPE_DATA,
+ .name = "SENSORS_DATA"
+ },
+ {
+ .peripheral = PERIPHERAL_WDSP,
+ .type = TYPE_DATA,
+ .name = "DIAG_DATA"
+ },
+ {
+ .peripheral = PERIPHERAL_CDSP,
+ .type = TYPE_DATA,
+ .name = "CDSP_DATA"
+ }
+};
+
+struct diag_socket_info socket_cntl[NUM_PERIPHERALS] = {
+ {
+ .peripheral = PERIPHERAL_MODEM,
+ .type = TYPE_CNTL,
+ .name = "MODEM_CNTL"
+ },
+ {
+ .peripheral = PERIPHERAL_LPASS,
+ .type = TYPE_CNTL,
+ .name = "LPASS_CNTL"
+ },
+ {
+ .peripheral = PERIPHERAL_WCNSS,
+ .type = TYPE_CNTL,
+ .name = "WCNSS_CNTL"
+ },
+ {
+ .peripheral = PERIPHERAL_SENSORS,
+ .type = TYPE_CNTL,
+ .name = "SENSORS_CNTL"
+ },
+ {
+ .peripheral = PERIPHERAL_WDSP,
+ .type = TYPE_CNTL,
+ .name = "DIAG_CTRL"
+ },
+ {
+ .peripheral = PERIPHERAL_CDSP,
+ .type = TYPE_CNTL,
+ .name = "CDSP_CNTL"
+ }
+};
+
+struct diag_socket_info socket_dci[NUM_PERIPHERALS] = {
+ {
+ .peripheral = PERIPHERAL_MODEM,
+ .type = TYPE_DCI,
+ .name = "MODEM_DCI"
+ },
+ {
+ .peripheral = PERIPHERAL_LPASS,
+ .type = TYPE_DCI,
+ .name = "LPASS_DCI"
+ },
+ {
+ .peripheral = PERIPHERAL_WCNSS,
+ .type = TYPE_DCI,
+ .name = "WCNSS_DCI"
+ },
+ {
+ .peripheral = PERIPHERAL_SENSORS,
+ .type = TYPE_DCI,
+ .name = "SENSORS_DCI"
+ },
+ {
+ .peripheral = PERIPHERAL_WDSP,
+ .type = TYPE_DCI,
+ .name = "DIAG_DCI_DATA"
+ },
+ {
+ .peripheral = PERIPHERAL_CDSP,
+ .type = TYPE_DCI,
+ .name = "CDSP_DCI"
+ }
+};
+
+struct diag_socket_info socket_cmd[NUM_PERIPHERALS] = {
+ {
+ .peripheral = PERIPHERAL_MODEM,
+ .type = TYPE_CMD,
+ .name = "MODEM_CMD"
+ },
+ {
+ .peripheral = PERIPHERAL_LPASS,
+ .type = TYPE_CMD,
+ .name = "LPASS_CMD"
+ },
+ {
+ .peripheral = PERIPHERAL_WCNSS,
+ .type = TYPE_CMD,
+ .name = "WCNSS_CMD"
+ },
+ {
+ .peripheral = PERIPHERAL_SENSORS,
+ .type = TYPE_CMD,
+ .name = "SENSORS_CMD"
+ },
+ {
+ .peripheral = PERIPHERAL_WDSP,
+ .type = TYPE_CMD,
+ .name = "DIAG_CMD"
+ },
+ {
+ .peripheral = PERIPHERAL_CDSP,
+ .type = TYPE_CMD,
+ .name = "CDSP_CMD"
+ }
+
+};
+
+struct diag_socket_info socket_dci_cmd[NUM_PERIPHERALS] = {
+ {
+ .peripheral = PERIPHERAL_MODEM,
+ .type = TYPE_DCI_CMD,
+ .name = "MODEM_DCI_CMD"
+ },
+ {
+ .peripheral = PERIPHERAL_LPASS,
+ .type = TYPE_DCI_CMD,
+ .name = "LPASS_DCI_CMD"
+ },
+ {
+ .peripheral = PERIPHERAL_WCNSS,
+ .type = TYPE_DCI_CMD,
+ .name = "WCNSS_DCI_CMD"
+ },
+ {
+ .peripheral = PERIPHERAL_SENSORS,
+ .type = TYPE_DCI_CMD,
+ .name = "SENSORS_DCI_CMD"
+ },
+ {
+ .peripheral = PERIPHERAL_WDSP,
+ .type = TYPE_DCI_CMD,
+ .name = "DIAG_DCI_CMD"
+ },
+ {
+ .peripheral = PERIPHERAL_CDSP,
+ .type = TYPE_DCI_CMD,
+ .name = "CDSP_DCI_CMD"
+ },
+};
+
+static void diag_state_open_socket(void *ctxt);
+static void diag_state_close_socket(void *ctxt);
+static int diag_socket_write(void *ctxt, unsigned char *buf, int len);
+static int diag_socket_read(void *ctxt, unsigned char *buf, int buf_len);
+static void diag_socket_queue_read(void *ctxt);
+static void socket_init_work_fn(struct work_struct *work);
+static int socket_ready_notify(struct notifier_block *nb,
+ unsigned long action, void *data);
+
+static struct diag_peripheral_ops socket_ops = {
+ .open = diag_state_open_socket,
+ .close = diag_state_close_socket,
+ .write = diag_socket_write,
+ .read = diag_socket_read,
+ .queue_read = diag_socket_queue_read
+};
+
+static struct notifier_block socket_notify = {
+ .notifier_call = socket_ready_notify,
+};
+
+static void diag_state_open_socket(void *ctxt)
+{
+ struct diag_socket_info *info = NULL;
+
+ if (!ctxt)
+ return;
+
+ info = (struct diag_socket_info *)(ctxt);
+ atomic_set(&info->diag_state, 1);
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "%s setting diag state to 1", info->name);
+}
+
+static void diag_state_close_socket(void *ctxt)
+{
+ struct diag_socket_info *info = NULL;
+
+ if (!ctxt)
+ return;
+
+ info = (struct diag_socket_info *)(ctxt);
+ atomic_set(&info->diag_state, 0);
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "%s setting diag state to 0", info->name);
+ wake_up_interruptible(&info->read_wait_q);
+ flush_workqueue(info->wq);
+}
+
+static void socket_data_ready(struct sock *sk_ptr)
+{
+ unsigned long flags;
+ struct diag_socket_info *info = NULL;
+
+ if (!sk_ptr) {
+ pr_err_ratelimited("diag: In %s, invalid sk_ptr", __func__);
+ return;
+ }
+
+ info = (struct diag_socket_info *)(sk_ptr->sk_user_data);
+ if (!info) {
+ pr_err_ratelimited("diag: In %s, invalid info\n", __func__);
+ return;
+ }
+
+ spin_lock_irqsave(&info->lock, flags);
+ info->data_ready++;
+ spin_unlock_irqrestore(&info->lock, flags);
+ diag_ws_on_notify();
+
+ queue_work(info->wq, &(info->read_work));
+ wake_up_interruptible(&info->read_wait_q);
+ return;
+}
+
+static void cntl_socket_data_ready(struct sock *sk_ptr)
+{
+ if (!sk_ptr || !cntl_socket) {
+ pr_err_ratelimited("diag: In %s, invalid ptrs. sk_ptr: %pK cntl_socket: %pK\n",
+ __func__, sk_ptr, cntl_socket);
+ return;
+ }
+
+ atomic_inc(&cntl_socket->data_ready);
+ wake_up_interruptible(&cntl_socket->read_wait_q);
+ queue_work(cntl_socket->wq, &(cntl_socket->read_work));
+}
+
+static void socket_flow_cntl(struct sock *sk_ptr)
+{
+ struct diag_socket_info *info = NULL;
+
+ if (!sk_ptr)
+ return;
+
+ info = (struct diag_socket_info *)(sk_ptr->sk_user_data);
+ if (!info) {
+ pr_err_ratelimited("diag: In %s, invalid info\n", __func__);
+ return;
+ }
+
+ atomic_inc(&info->flow_cnt);
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s flow controlled\n", info->name);
+ pr_debug("diag: In %s, channel %s flow controlled\n",
+ __func__, info->name);
+}
+
+static int lookup_server(struct diag_socket_info *info)
+{
+ int ret = 0;
+ struct server_lookup_args *args = NULL;
+ struct sockaddr_msm_ipc *srv_addr = NULL;
+
+ if (!info)
+ return -EINVAL;
+
+ args = kzalloc((sizeof(struct server_lookup_args) +
+ sizeof(struct msm_ipc_server_info)), GFP_KERNEL);
+ if (!args)
+ return -ENOMEM;
+ kmemleak_not_leak(args);
+
+ args->lookup_mask = 0xFFFFFFFF;
+ args->port_name.service = info->svc_id;
+ args->port_name.instance = info->ins_id;
+ args->num_entries_in_array = 1;
+ args->num_entries_found = 0;
+
+ ret = kernel_sock_ioctl(info->hdl, IPC_ROUTER_IOCTL_LOOKUP_SERVER,
+ (unsigned long)args);
+ if (ret < 0) {
+ pr_err("diag: In %s, cannot find service for %s\n", __func__,
+ info->name);
+ kfree(args);
+ return -EFAULT;
+ }
+
+ srv_addr = &info->remote_addr;
+ srv_addr->family = AF_MSM_IPC;
+ srv_addr->address.addrtype = MSM_IPC_ADDR_ID;
+ srv_addr->address.addr.port_addr.node_id = args->srv_info[0].node_id;
+ srv_addr->address.addr.port_addr.port_id = args->srv_info[0].port_id;
+ ret = args->num_entries_found;
+ kfree(args);
+ if (ret < 1)
+ return -EIO;
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s found server node: %d port: %d",
+ info->name, srv_addr->address.addr.port_addr.node_id,
+ srv_addr->address.addr.port_addr.port_id);
+ return 0;
+}
+
+static void __socket_open_channel(struct diag_socket_info *info)
+{
+ if (!info)
+ return;
+
+ if (!info->inited) {
+ pr_debug("diag: In %s, socket %s is not initialized\n",
+ __func__, info->name);
+ return;
+ }
+
+ if (atomic_read(&info->opened)) {
+ pr_debug("diag: In %s, socket %s already opened\n",
+ __func__, info->name);
+ return;
+ }
+
+ atomic_set(&info->opened, 1);
+ diagfwd_channel_open(info->fwd_ctxt);
+}
+
+static void socket_open_client(struct diag_socket_info *info)
+{
+ int ret = 0;
+
+ if (!info || info->port_type != PORT_TYPE_CLIENT)
+ return;
+
+ ret = sock_create(AF_MSM_IPC, SOCK_DGRAM, 0, &info->hdl);
+ if (ret < 0 || !info->hdl) {
+ pr_err("diag: In %s, socket not initialized for %s\n", __func__,
+ info->name);
+ return;
+ }
+
+ write_lock_bh(&info->hdl->sk->sk_callback_lock);
+ info->hdl->sk->sk_user_data = (void *)(info);
+ info->hdl->sk->sk_data_ready = socket_data_ready;
+ info->hdl->sk->sk_write_space = socket_flow_cntl;
+ write_unlock_bh(&info->hdl->sk->sk_callback_lock);
+ ret = lookup_server(info);
+ if (ret) {
+ pr_err("diag: In %s, failed to lookup server, ret: %d\n",
+ __func__, ret);
+ return;
+ }
+ __socket_open_channel(info);
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s opened client\n", info->name);
+}
+
+static void socket_open_server(struct diag_socket_info *info)
+{
+ int ret = 0;
+ struct sockaddr_msm_ipc srv_addr = { 0 };
+
+ if (!info)
+ return;
+
+ ret = sock_create(AF_MSM_IPC, SOCK_DGRAM, 0, &info->hdl);
+ if (ret < 0 || !info->hdl) {
+ pr_err("diag: In %s, socket not initialized for %s\n", __func__,
+ info->name);
+ return;
+ }
+
+ write_lock_bh(&info->hdl->sk->sk_callback_lock);
+ info->hdl->sk->sk_user_data = (void *)(info);
+ info->hdl->sk->sk_data_ready = socket_data_ready;
+ info->hdl->sk->sk_write_space = socket_flow_cntl;
+ write_unlock_bh(&info->hdl->sk->sk_callback_lock);
+
+ srv_addr.family = AF_MSM_IPC;
+ srv_addr.address.addrtype = MSM_IPC_ADDR_NAME;
+ srv_addr.address.addr.port_name.service = info->svc_id;
+ srv_addr.address.addr.port_name.instance = info->ins_id;
+
+ ret = kernel_bind(info->hdl, (struct sockaddr *)&srv_addr,
+ sizeof(srv_addr));
+ if (ret) {
+ pr_err("diag: In %s, failed to bind, ch: %s, svc_id: %d ins_id: %d, err: %d\n",
+ __func__, info->name, info->svc_id, info->ins_id, ret);
+ return;
+ }
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s opened server svc: %d ins: %d",
+ info->name, info->svc_id, info->ins_id);
+}
+
+static void socket_init_work_fn(struct work_struct *work)
+{
+ struct diag_socket_info *info = container_of(work,
+ struct diag_socket_info,
+ init_work);
+ if (!info)
+ return;
+
+ if (!info->inited) {
+ pr_debug("diag: In %s, socket %s is not initialized\n",
+ __func__, info->name);
+ return;
+ }
+
+ switch (info->port_type) {
+ case PORT_TYPE_SERVER:
+ socket_open_server(info);
+ break;
+ case PORT_TYPE_CLIENT:
+ socket_open_client(info);
+ break;
+ default:
+ pr_err("diag: In %s, unknown type %d\n", __func__,
+ info->port_type);
+ break;
+ }
+}
+
+static void __socket_close_channel(struct diag_socket_info *info)
+{
+ if (!info || !info->hdl)
+ return;
+
+ if (!atomic_read(&info->opened))
+ return;
+
+ if (bootup_req[info->peripheral] == PEPIPHERAL_SSR_UP) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: %s is up, stopping cleanup: bootup_req = %d\n",
+ info->name, (int)bootup_req[info->peripheral]);
+ return;
+ }
+
+ memset(&info->remote_addr, 0, sizeof(struct sockaddr_msm_ipc));
+ diagfwd_channel_close(info->fwd_ctxt);
+
+ atomic_set(&info->opened, 0);
+
+ /* Don't close the server. Server should always remain open */
+ if (info->port_type != PORT_TYPE_SERVER) {
+ write_lock_bh(&info->hdl->sk->sk_callback_lock);
+ info->hdl->sk->sk_user_data = NULL;
+ info->hdl->sk->sk_data_ready = NULL;
+ write_unlock_bh(&info->hdl->sk->sk_callback_lock);
+ sock_release(info->hdl);
+ info->hdl = NULL;
+ wake_up_interruptible(&info->read_wait_q);
+ }
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s exiting\n", info->name);
+
+ return;
+}
+
+static void socket_close_channel(struct diag_socket_info *info)
+{
+ if (!info)
+ return;
+
+ __socket_close_channel(info);
+
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s exiting\n", info->name);
+}
+
+static int cntl_socket_process_msg_server(uint32_t cmd, uint32_t svc_id,
+ uint32_t ins_id)
+{
+ uint8_t peripheral;
+ uint8_t found = 0;
+ struct diag_socket_info *info = NULL;
+
+ for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
+ info = &socket_cmd[peripheral];
+ if ((svc_id == info->svc_id) &&
+ (ins_id == info->ins_id)) {
+ found = 1;
+ break;
+ }
+
+ info = &socket_dci_cmd[peripheral];
+ if ((svc_id == info->svc_id) &&
+ (ins_id == info->ins_id)) {
+ found = 1;
+ break;
+ }
+ }
+
+ if (!found)
+ return -EIO;
+
+ switch (cmd) {
+ case CNTL_CMD_NEW_SERVER:
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s received new server\n",
+ info->name);
+ diagfwd_register(TRANSPORT_SOCKET, info->peripheral,
+ info->type, (void *)info, &socket_ops,
+ &info->fwd_ctxt);
+ queue_work(info->wq, &(info->init_work));
+ break;
+ case CNTL_CMD_REMOVE_SERVER:
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s received remove server\n",
+ info->name);
+ socket_close_channel(info);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int cntl_socket_process_msg_client(uint32_t cmd, uint32_t node_id,
+ uint32_t port_id)
+{
+ uint8_t peripheral;
+ uint8_t found = 0;
+ struct diag_socket_info *info = NULL;
+ struct msm_ipc_port_addr remote_port = {0};
+
+ for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
+ info = &socket_data[peripheral];
+ remote_port = info->remote_addr.address.addr.port_addr;
+ if ((remote_port.node_id == node_id) &&
+ (remote_port.port_id == port_id)) {
+ found = 1;
+ break;
+ }
+
+ info = &socket_cntl[peripheral];
+ remote_port = info->remote_addr.address.addr.port_addr;
+ if ((remote_port.node_id == node_id) &&
+ (remote_port.port_id == port_id)) {
+ found = 1;
+ break;
+ }
+
+ info = &socket_dci[peripheral];
+ remote_port = info->remote_addr.address.addr.port_addr;
+ if ((remote_port.node_id == node_id) &&
+ (remote_port.port_id == port_id)) {
+ found = 1;
+ break;
+ }
+ }
+
+ if (!found)
+ return -EIO;
+
+ switch (cmd) {
+ case CNTL_CMD_REMOVE_CLIENT:
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s received remove client\n",
+ info->name);
+ mutex_lock(&driver->diag_notifier_mutex);
+ socket_close_channel(info);
+ mutex_unlock(&driver->diag_notifier_mutex);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int restart_notifier_cb(struct notifier_block *this,
+ unsigned long code,
+ void *data);
+
+struct restart_notifier_block {
+ unsigned processor;
+ char *name;
+ struct notifier_block nb;
+};
+
+static struct restart_notifier_block restart_notifiers[] = {
+ {SOCKET_MODEM, "modem", .nb.notifier_call = restart_notifier_cb},
+ {SOCKET_ADSP, "adsp", .nb.notifier_call = restart_notifier_cb},
+ {SOCKET_WCNSS, "wcnss", .nb.notifier_call = restart_notifier_cb},
+ {SOCKET_SLPI, "slpi", .nb.notifier_call = restart_notifier_cb},
+ {SOCKET_CDSP, "cdsp", .nb.notifier_call = restart_notifier_cb},
+};
+
+
+static void cntl_socket_read_work_fn(struct work_struct *work)
+{
+ union cntl_port_msg msg;
+ int ret = 0;
+ struct kvec iov = { 0 };
+ struct msghdr read_msg = { 0 };
+
+ if (!cntl_socket)
+ return;
+
+ ret = wait_event_interruptible(cntl_socket->read_wait_q,
+ (atomic_read(&cntl_socket->data_ready) > 0));
+ if (ret)
+ return;
+
+ do {
+ iov.iov_base = &msg;
+ iov.iov_len = sizeof(msg);
+ read_msg.msg_name = NULL;
+ read_msg.msg_namelen = 0;
+ ret = kernel_recvmsg(cntl_socket->hdl, &read_msg, &iov, 1,
+ sizeof(msg), MSG_DONTWAIT);
+ if (ret < 0) {
+ pr_debug("diag: In %s, Error recving data %d\n",
+ __func__, ret);
+ break;
+ }
+
+ atomic_dec(&cntl_socket->data_ready);
+
+ switch (msg.srv.cmd) {
+ case CNTL_CMD_NEW_SERVER:
+ case CNTL_CMD_REMOVE_SERVER:
+ cntl_socket_process_msg_server(msg.srv.cmd,
+ msg.srv.service,
+ msg.srv.instance);
+ break;
+ case CNTL_CMD_REMOVE_CLIENT:
+ cntl_socket_process_msg_client(msg.cli.cmd,
+ msg.cli.node_id,
+ msg.cli.port_id);
+ break;
+ }
+ } while (atomic_read(&cntl_socket->data_ready) > 0);
+}
+
+static void socket_read_work_fn(struct work_struct *work)
+{
+ struct diag_socket_info *info = container_of(work,
+ struct diag_socket_info,
+ read_work);
+
+ if (!info)
+ return;
+
+ if (!atomic_read(&info->opened) && info->port_type == PORT_TYPE_SERVER)
+ diagfwd_buffers_init(info->fwd_ctxt);
+
+ diagfwd_channel_read(info->fwd_ctxt);
+}
+
+static void diag_socket_queue_read(void *ctxt)
+{
+ struct diag_socket_info *info = NULL;
+
+ if (!ctxt)
+ return;
+
+ info = (struct diag_socket_info *)ctxt;
+ if (info->hdl && info->wq)
+ queue_work(info->wq, &(info->read_work));
+}
+
+void diag_socket_invalidate(void *ctxt, struct diagfwd_info *fwd_ctxt)
+{
+ struct diag_socket_info *info = NULL;
+
+ if (!ctxt || !fwd_ctxt)
+ return;
+
+ info = (struct diag_socket_info *)ctxt;
+ info->fwd_ctxt = fwd_ctxt;
+}
+
+int diag_socket_check_state(void *ctxt)
+{
+ struct diag_socket_info *info = NULL;
+
+ if (!ctxt)
+ return 0;
+
+ info = (struct diag_socket_info *)ctxt;
+ return (int)(atomic_read(&info->diag_state));
+}
+
+static void __diag_socket_init(struct diag_socket_info *info)
+{
+ uint16_t ins_base = 0;
+ uint16_t ins_offset = 0;
+
+ char wq_name[DIAG_SOCKET_NAME_SZ + 10];
+ if (!info)
+ return;
+
+ init_waitqueue_head(&info->wait_q);
+ info->inited = 0;
+ atomic_set(&info->opened, 0);
+ atomic_set(&info->diag_state, 0);
+ info->pkt_len = 0;
+ info->pkt_read = 0;
+ info->hdl = NULL;
+ info->fwd_ctxt = NULL;
+ info->data_ready = 0;
+ atomic_set(&info->flow_cnt, 0);
+ spin_lock_init(&info->lock);
+ strlcpy(wq_name, "DIAG_SOCKET_", 10);
+ strlcat(wq_name, info->name, sizeof(info->name));
+ init_waitqueue_head(&info->read_wait_q);
+ info->wq = create_singlethread_workqueue(wq_name);
+ if (!info->wq) {
+ pr_err("diag: In %s, unable to create workqueue for socket channel %s\n",
+ __func__, info->name);
+ return;
+ }
+ INIT_WORK(&(info->init_work), socket_init_work_fn);
+ INIT_WORK(&(info->read_work), socket_read_work_fn);
+
+ switch (info->peripheral) {
+ case PERIPHERAL_MODEM:
+ ins_base = MODEM_INST_BASE;
+ break;
+ case PERIPHERAL_LPASS:
+ ins_base = LPASS_INST_BASE;
+ break;
+ case PERIPHERAL_WCNSS:
+ ins_base = WCNSS_INST_BASE;
+ break;
+ case PERIPHERAL_SENSORS:
+ ins_base = SENSORS_INST_BASE;
+ break;
+ case PERIPHERAL_WDSP:
+ ins_base = WDSP_INST_BASE;
+ break;
+ case PERIPHERAL_CDSP:
+ ins_base = CDSP_INST_BASE;
+ break;
+ }
+
+ switch (info->type) {
+ case TYPE_DATA:
+ ins_offset = INST_ID_DATA;
+ info->port_type = PORT_TYPE_SERVER;
+ break;
+ case TYPE_CNTL:
+ ins_offset = INST_ID_CNTL;
+ info->port_type = PORT_TYPE_SERVER;
+ break;
+ case TYPE_DCI:
+ ins_offset = INST_ID_DCI;
+ info->port_type = PORT_TYPE_SERVER;
+ break;
+ case TYPE_CMD:
+ ins_offset = INST_ID_CMD;
+ info->port_type = PORT_TYPE_CLIENT;
+ break;
+ case TYPE_DCI_CMD:
+ ins_offset = INST_ID_DCI_CMD;
+ info->port_type = PORT_TYPE_CLIENT;
+ break;
+ }
+
+ info->svc_id = DIAG_SVC_ID;
+ info->ins_id = ins_base + ins_offset;
+ info->inited = 1;
+}
+
+static void cntl_socket_init_work_fn(struct work_struct *work)
+{
+ int ret = 0;
+
+ if (!cntl_socket)
+ return;
+
+ ret = sock_create(AF_MSM_IPC, SOCK_DGRAM, 0, &cntl_socket->hdl);
+ if (ret < 0 || !cntl_socket->hdl) {
+ pr_err("diag: In %s, cntl socket is not initialized, ret: %d\n",
+ __func__, ret);
+ return;
+ }
+
+ write_lock_bh(&cntl_socket->hdl->sk->sk_callback_lock);
+ cntl_socket->hdl->sk->sk_user_data = (void *)cntl_socket;
+ cntl_socket->hdl->sk->sk_data_ready = cntl_socket_data_ready;
+ write_unlock_bh(&cntl_socket->hdl->sk->sk_callback_lock);
+
+ ret = kernel_sock_ioctl(cntl_socket->hdl,
+ IPC_ROUTER_IOCTL_BIND_CONTROL_PORT, 0);
+ if (ret < 0) {
+ pr_err("diag: In %s Could not bind as control port, ret: %d\n",
+ __func__, ret);
+ }
+
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "Initialized control sockets");
+}
+
+static int __diag_cntl_socket_init(void)
+{
+ cntl_socket = kzalloc(sizeof(struct diag_cntl_socket_info), GFP_KERNEL);
+ if (!cntl_socket)
+ return -ENOMEM;
+
+ cntl_socket->svc_id = DIAG_SVC_ID;
+ cntl_socket->ins_id = 1;
+ atomic_set(&cntl_socket->data_ready, 0);
+ init_waitqueue_head(&cntl_socket->read_wait_q);
+ cntl_socket->wq = create_singlethread_workqueue("DIAG_CNTL_SOCKET");
+ INIT_WORK(&(cntl_socket->read_work), cntl_socket_read_work_fn);
+ INIT_WORK(&(cntl_socket->init_work), cntl_socket_init_work_fn);
+
+ return 0;
+}
+
+int diag_socket_init(void)
+{
+ int err = 0;
+ int i;
+ int peripheral = 0;
+ void *handle;
+ struct diag_socket_info *info = NULL;
+ struct restart_notifier_block *nb;
+
+ for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
+ info = &socket_cntl[peripheral];
+ __diag_socket_init(&socket_cntl[peripheral]);
+
+ diagfwd_cntl_register(TRANSPORT_SOCKET, peripheral,
+ (void *)info, &socket_ops, &(info->fwd_ctxt));
+
+ __diag_socket_init(&socket_data[peripheral]);
+ __diag_socket_init(&socket_cmd[peripheral]);
+ __diag_socket_init(&socket_dci[peripheral]);
+ __diag_socket_init(&socket_dci_cmd[peripheral]);
+ }
+
+ err = __diag_cntl_socket_init();
+ if (err) {
+ pr_err("diag: Unable to open control sockets, err: %d\n", err);
+ goto fail;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(restart_notifiers); i++) {
+ nb = &restart_notifiers[i];
+ if (nb) {
+ handle = subsys_notif_register_notifier(nb->name,
+ &nb->nb);
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "%s: registering notifier for '%s', handle=%p\n",
+ __func__, nb->name, handle);
+ }
+ }
+
+ register_ipcrtr_af_init_notifier(&socket_notify);
+fail:
+ return err;
+}
+
+static int socket_ready_notify(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ uint8_t peripheral;
+ struct diag_socket_info *info = NULL;
+
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "received notification from IPCR");
+
+ if (action != IPCRTR_AF_INIT) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "action not recognized by diag %lu\n", action);
+ return 0;
+ }
+
+ /* Initialize only the servers */
+ for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
+ info = &socket_cntl[peripheral];
+ queue_work(info->wq, &(info->init_work));
+ info = &socket_data[peripheral];
+ queue_work(info->wq, &(info->init_work));
+ info = &socket_dci[peripheral];
+ queue_work(info->wq, &(info->init_work));
+ }
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "Initialized all servers");
+
+ queue_work(cntl_socket->wq, &(cntl_socket->init_work));
+
+ return 0;
+}
+
+static int restart_notifier_cb(struct notifier_block *this, unsigned long code,
+ void *_cmd)
+{
+ struct restart_notifier_block *notifier;
+
+ notifier = container_of(this,
+ struct restart_notifier_block, nb);
+ if (!notifier) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: %s: invalid notifier block\n", __func__);
+ return NOTIFY_DONE;
+ }
+
+ mutex_lock(&driver->diag_notifier_mutex);
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "%s: ssr for processor %d ('%s')\n",
+ __func__, notifier->processor, notifier->name);
+
+ switch (code) {
+
+ case SUBSYS_BEFORE_SHUTDOWN:
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: %s: SUBSYS_BEFORE_SHUTDOWN\n", __func__);
+ bootup_req[notifier->processor] = PEPIPHERAL_SSR_DOWN;
+ break;
+
+ case SUBSYS_AFTER_SHUTDOWN:
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: %s: SUBSYS_AFTER_SHUTDOWN\n", __func__);
+ break;
+
+ case SUBSYS_BEFORE_POWERUP:
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: %s: SUBSYS_BEFORE_POWERUP\n", __func__);
+ break;
+
+ case SUBSYS_AFTER_POWERUP:
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: %s: SUBSYS_AFTER_POWERUP\n", __func__);
+ if (!bootup_req[notifier->processor]) {
+ bootup_req[notifier->processor] = PEPIPHERAL_SSR_DOWN;
+ break;
+ }
+ bootup_req[notifier->processor] = PEPIPHERAL_SSR_UP;
+ break;
+
+ default:
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: code: %lu\n", code);
+ break;
+ }
+ mutex_unlock(&driver->diag_notifier_mutex);
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: bootup_req[%s] = %d\n",
+ notifier->name, (int)bootup_req[notifier->processor]);
+
+ return NOTIFY_DONE;
+}
+
+int diag_socket_init_peripheral(uint8_t peripheral)
+{
+ struct diag_socket_info *info = NULL;
+
+ if (peripheral >= NUM_PERIPHERALS)
+ return -EINVAL;
+
+ info = &socket_data[peripheral];
+ diagfwd_register(TRANSPORT_SOCKET, info->peripheral,
+ info->type, (void *)info, &socket_ops,
+ &info->fwd_ctxt);
+
+ info = &socket_dci[peripheral];
+ diagfwd_register(TRANSPORT_SOCKET, info->peripheral,
+ info->type, (void *)info, &socket_ops,
+ &info->fwd_ctxt);
+ return 0;
+}
+
+static void __diag_socket_exit(struct diag_socket_info *info)
+{
+ if (!info)
+ return;
+
+ diagfwd_deregister(info->peripheral, info->type, (void *)info);
+ info->fwd_ctxt = NULL;
+ info->hdl = NULL;
+ if (info->wq)
+ destroy_workqueue(info->wq);
+
+}
+
+void diag_socket_early_exit(void)
+{
+ int i = 0;
+
+ for (i = 0; i < NUM_PERIPHERALS; i++)
+ __diag_socket_exit(&socket_cntl[i]);
+}
+
+void diag_socket_exit(void)
+{
+ int i = 0;
+
+ for (i = 0; i < NUM_PERIPHERALS; i++) {
+ __diag_socket_exit(&socket_data[i]);
+ __diag_socket_exit(&socket_cmd[i]);
+ __diag_socket_exit(&socket_dci[i]);
+ __diag_socket_exit(&socket_dci_cmd[i]);
+ }
+}
+
+static int diag_socket_read(void *ctxt, unsigned char *buf, int buf_len)
+{
+ int err = 0;
+ int pkt_len = 0;
+ int read_len = 0;
+ int bytes_remaining = 0;
+ int total_recd = 0;
+ int loop_count = 0;
+ uint8_t buf_full = 0;
+ unsigned char *temp = NULL;
+ struct kvec iov = {0};
+ struct msghdr read_msg = {0};
+ struct sockaddr_msm_ipc src_addr = {0};
+ struct diag_socket_info *info = NULL;
+ unsigned long flags;
+
+ info = (struct diag_socket_info *)(ctxt);
+ if (!info)
+ return -ENODEV;
+
+ if (!buf || !ctxt || buf_len <= 0)
+ return -EINVAL;
+
+ temp = buf;
+ bytes_remaining = buf_len;
+
+ err = wait_event_interruptible(info->read_wait_q,
+ (info->data_ready > 0) || (!info->hdl) ||
+ (atomic_read(&info->diag_state) == 0));
+ if (err) {
+ mutex_lock(&driver->diagfwd_channel_mutex[info->peripheral]);
+ diagfwd_channel_read_done(info->fwd_ctxt, buf, 0);
+ mutex_unlock(&driver->diagfwd_channel_mutex[info->peripheral]);
+ return -ERESTARTSYS;
+ }
+
+ /*
+ * There is no need to continue reading over peripheral in this case.
+ * Release the wake source hold earlier.
+ */
+ if (atomic_read(&info->diag_state) == 0) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "%s closing read thread. diag state is closed\n",
+ info->name);
+ mutex_lock(&driver->diagfwd_channel_mutex[info->peripheral]);
+ diagfwd_channel_read_done(info->fwd_ctxt, buf, 0);
+ mutex_unlock(&driver->diagfwd_channel_mutex[info->peripheral]);
+ return 0;
+ }
+
+ if (!info->hdl) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s closing read thread\n",
+ info->name);
+ goto fail;
+ }
+
+ do {
+ loop_count++;
+ iov.iov_base = temp;
+ iov.iov_len = bytes_remaining;
+ read_msg.msg_name = &src_addr;
+ read_msg.msg_namelen = sizeof(src_addr);
+
+ pkt_len = kernel_recvmsg(info->hdl, &read_msg, &iov, 1, 0,
+ MSG_PEEK);
+ if (pkt_len <= 0)
+ break;
+
+ if (pkt_len > bytes_remaining) {
+ buf_full = 1;
+ break;
+ }
+
+ spin_lock_irqsave(&info->lock, flags);
+ info->data_ready--;
+ spin_unlock_irqrestore(&info->lock, flags);
+
+ read_len = kernel_recvmsg(info->hdl, &read_msg, &iov, 1,
+ pkt_len, 0);
+ if (read_len <= 0)
+ goto fail;
+
+ if (!atomic_read(&info->opened) &&
+ info->port_type == PORT_TYPE_SERVER) {
+ /*
+ * This is the first packet from the client. Copy its
+ * address to the connection object. Consider this
+ * channel open for communication.
+ */
+ memcpy(&info->remote_addr, &src_addr, sizeof(src_addr));
+ if (info->ins_id == INST_ID_DCI)
+ atomic_set(&info->opened, 1);
+ else
+ __socket_open_channel(info);
+ }
+
+ if (read_len < 0) {
+ pr_err_ratelimited("diag: In %s, error receiving data, err: %d\n",
+ __func__, pkt_len);
+ err = read_len;
+ goto fail;
+ }
+ temp += read_len;
+ total_recd += read_len;
+ bytes_remaining -= read_len;
+ } while (info->data_ready > 0);
+
+ if (buf_full || (info->type == TYPE_DATA && pkt_len))
+ err = queue_work(info->wq, &(info->read_work));
+
+ if (total_recd > 0) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s read total bytes: %d\n",
+ info->name, total_recd);
+ mutex_lock(&driver->diagfwd_channel_mutex[info->peripheral]);
+ err = diagfwd_channel_read_done(info->fwd_ctxt,
+ buf, total_recd);
+ mutex_unlock(&driver->diagfwd_channel_mutex[info->peripheral]);
+ if (err)
+ goto fail;
+ } else {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s error in read, err: %d\n",
+ info->name, total_recd);
+ goto fail;
+ }
+
+ diag_socket_queue_read(info);
+ return 0;
+
+fail:
+ mutex_lock(&driver->diagfwd_channel_mutex[info->peripheral]);
+ diagfwd_channel_read_done(info->fwd_ctxt, buf, 0);
+ mutex_unlock(&driver->diagfwd_channel_mutex[info->peripheral]);
+ return -EIO;
+}
+
+static int diag_socket_write(void *ctxt, unsigned char *buf, int len)
+{
+ int err = 0;
+ int write_len = 0;
+ struct kvec iov = {0};
+ struct msghdr write_msg = {0};
+ struct diag_socket_info *info = NULL;
+
+ if (!ctxt || !buf || len <= 0)
+ return -EIO;
+
+ info = (struct diag_socket_info *)(ctxt);
+ if (!atomic_read(&info->opened) || !info->hdl)
+ return -ENODEV;
+
+ iov.iov_base = buf;
+ iov.iov_len = len;
+ write_msg.msg_name = &info->remote_addr;
+ write_msg.msg_namelen = sizeof(info->remote_addr);
+ write_msg.msg_flags |= MSG_DONTWAIT;
+ write_len = kernel_sendmsg(info->hdl, &write_msg, &iov, 1, len);
+ if (write_len < 0) {
+ err = write_len;
+ /*
+ * -EAGAIN means that the number of packets in flight is at
+ * max capactity and the peripheral hasn't read the data.
+ */
+ if (err != -EAGAIN) {
+ pr_err_ratelimited("diag: In %s, error sending data, err: %d, ch: %s\n",
+ __func__, err, info->name);
+ }
+ } else if (write_len != len) {
+ err = write_len;
+ pr_err_ratelimited("diag: In %s, wrote partial packet to %s, len: %d, wrote: %d\n",
+ __func__, info->name, len, write_len);
+ }
+
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s wrote to socket, len: %d\n",
+ info->name, write_len);
+
+ return err;
+}
+
diff --git a/drivers/char/diag/diagfwd_socket.h b/drivers/char/diag/diagfwd_socket.h
new file mode 100644
index 000000000000..a9487b1b3ac1
--- /dev/null
+++ b/drivers/char/diag/diagfwd_socket.h
@@ -0,0 +1,110 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DIAGFWD_SOCKET_H
+#define DIAGFWD_SOCKET_H
+
+#include <linux/socket.h>
+#include <linux/msm_ipc.h>
+
+#define DIAG_SOCKET_NAME_SZ 24
+
+#define DIAG_SOCK_MODEM_SVC_ID 64
+#define DIAG_SOCK_MODEM_INS_ID 3
+
+#define PORT_TYPE_SERVER 0
+#define PORT_TYPE_CLIENT 1
+
+#define PEPIPHERAL_AFTER_BOOT 0
+#define PEPIPHERAL_SSR_DOWN 1
+#define PEPIPHERAL_SSR_UP 2
+
+#define CNTL_CMD_NEW_SERVER 4
+#define CNTL_CMD_REMOVE_SERVER 5
+#define CNTL_CMD_REMOVE_CLIENT 6
+
+enum {
+ SOCKET_MODEM,
+ SOCKET_ADSP,
+ SOCKET_WCNSS,
+ SOCKET_SLPI,
+ SOCKET_CDSP,
+ SOCKET_APPS,
+ NUM_SOCKET_SUBSYSTEMS,
+};
+
+struct diag_socket_info {
+ uint8_t peripheral;
+ uint8_t type;
+ uint8_t port_type;
+ uint8_t inited;
+ atomic_t opened;
+ atomic_t diag_state;
+ uint32_t pkt_len;
+ uint32_t pkt_read;
+ uint32_t svc_id;
+ uint32_t ins_id;
+ uint32_t data_ready;
+ atomic_t flow_cnt;
+ char name[DIAG_SOCKET_NAME_SZ];
+ spinlock_t lock;
+ wait_queue_head_t wait_q;
+ struct sockaddr_msm_ipc remote_addr;
+ struct socket *hdl;
+ struct workqueue_struct *wq;
+ struct work_struct init_work;
+ struct work_struct read_work;
+ struct diagfwd_info *fwd_ctxt;
+ wait_queue_head_t read_wait_q;
+};
+
+union cntl_port_msg {
+ struct {
+ uint32_t cmd;
+ uint32_t service;
+ uint32_t instance;
+ uint32_t node_id;
+ uint32_t port_id;
+ } srv;
+ struct {
+ uint32_t cmd;
+ uint32_t node_id;
+ uint32_t port_id;
+ } cli;
+};
+
+struct diag_cntl_socket_info {
+ uint32_t svc_id;
+ uint32_t ins_id;
+ atomic_t data_ready;
+ struct workqueue_struct *wq;
+ struct work_struct read_work;
+ struct work_struct init_work;
+ wait_queue_head_t read_wait_q;
+ struct socket *hdl;
+};
+
+extern struct diag_socket_info socket_data[NUM_PERIPHERALS];
+extern struct diag_socket_info socket_cntl[NUM_PERIPHERALS];
+extern struct diag_socket_info socket_dci[NUM_PERIPHERALS];
+extern struct diag_socket_info socket_cmd[NUM_PERIPHERALS];
+extern struct diag_socket_info socket_dci_cmd[NUM_PERIPHERALS];
+
+extern struct diag_cntl_socket_info *cntl_socket;
+
+int diag_socket_init(void);
+int diag_socket_init_peripheral(uint8_t peripheral);
+void diag_socket_exit(void);
+void diag_socket_early_exit(void);
+void diag_socket_invalidate(void *ctxt, struct diagfwd_info *fwd_ctxt);
+int diag_socket_check_state(void *ctxt);
+#endif
diff --git a/drivers/char/diag/diagmem.c b/drivers/char/diag/diagmem.c
new file mode 100644
index 000000000000..3d9fceddb893
--- /dev/null
+++ b/drivers/char/diag/diagmem.c
@@ -0,0 +1,294 @@
+/* Copyright (c) 2008-2014, 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mempool.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/kmemleak.h>
+#include <linux/ratelimit.h>
+#include <asm/atomic.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/kmemleak.h>
+
+#include "diagchar.h"
+#include "diagmem.h"
+
+struct diag_mempool_t diag_mempools[NUM_MEMORY_POOLS] = {
+ {
+ .id = POOL_TYPE_COPY,
+ .name = "POOL_COPY",
+ .pool = NULL,
+ .itemsize = 0,
+ .poolsize = 0,
+ .count = 0
+ },
+ {
+ .id = POOL_TYPE_HDLC,
+ .name = "POOL_HDLC",
+ .pool = NULL,
+ .itemsize = 0,
+ .poolsize = 0,
+ .count = 0
+ },
+ {
+ .id = POOL_TYPE_USER,
+ .name = "POOL_USER",
+ .pool = NULL,
+ .itemsize = 0,
+ .poolsize = 0,
+ .count = 0
+ },
+ {
+ .id = POOL_TYPE_MUX_APPS,
+ .name = "POOL_MUX_APPS",
+ .pool = NULL,
+ .itemsize = 0,
+ .poolsize = 0,
+ .count = 0
+ },
+ {
+ .id = POOL_TYPE_DCI,
+ .name = "POOL_DCI",
+ .pool = NULL,
+ .itemsize = 0,
+ .poolsize = 0,
+ .count = 0
+ },
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+ {
+ .id = POOL_TYPE_MDM,
+ .name = "POOL_MDM",
+ .pool = NULL,
+ .itemsize = 0,
+ .poolsize = 0,
+ .count = 0
+ },
+ {
+ .id = POOL_TYPE_MDM2,
+ .name = "POOL_MDM2",
+ .pool = NULL,
+ .itemsize = 0,
+ .poolsize = 0,
+ .count = 0
+ },
+ {
+ .id = POOL_TYPE_MDM_DCI,
+ .name = "POOL_MDM_DCI",
+ .pool = NULL,
+ .itemsize = 0,
+ .poolsize = 0,
+ .count = 0
+ },
+ {
+ .id = POOL_TYPE_MDM2_DCI,
+ .name = "POOL_MDM2_DCI",
+ .pool = NULL,
+ .itemsize = 0,
+ .poolsize = 0,
+ .count = 0
+ },
+ {
+ .id = POOL_TYPE_MDM_MUX,
+ .name = "POOL_MDM_MUX",
+ .pool = NULL,
+ .itemsize = 0,
+ .poolsize = 0,
+ .count = 0
+ },
+ {
+ .id = POOL_TYPE_MDM2_MUX,
+ .name = "POOL_MDM2_MUX",
+ .pool = NULL,
+ .itemsize = 0,
+ .poolsize = 0,
+ .count = 0
+ },
+ {
+ .id = POOL_TYPE_MDM_DCI_WRITE,
+ .name = "POOL_MDM_DCI_WRITE",
+ .pool = NULL,
+ .itemsize = 0,
+ .poolsize = 0,
+ .count = 0
+ },
+ {
+ .id = POOL_TYPE_MDM2_DCI_WRITE,
+ .name = "POOL_MDM2_DCI_WRITE",
+ .pool = NULL,
+ .itemsize = 0,
+ .poolsize = 0,
+ .count = 0
+ },
+ {
+ .id = POOL_TYPE_QSC_MUX,
+ .name = "POOL_QSC_MUX",
+ .pool = NULL,
+ .itemsize = 0,
+ .poolsize = 0,
+ .count = 0
+ }
+#endif
+};
+
+void diagmem_setsize(int pool_idx, int itemsize, int poolsize)
+{
+ if (pool_idx < 0 || pool_idx >= NUM_MEMORY_POOLS) {
+ pr_err("diag: Invalid pool index %d in %s\n", pool_idx,
+ __func__);
+ return;
+ }
+
+ diag_mempools[pool_idx].itemsize = itemsize;
+ diag_mempools[pool_idx].poolsize = poolsize;
+ pr_debug("diag: Mempool %s sizes: itemsize %d poolsize %d\n",
+ diag_mempools[pool_idx].name, diag_mempools[pool_idx].itemsize,
+ diag_mempools[pool_idx].poolsize);
+}
+
+void *diagmem_alloc(struct diagchar_dev *driver, int size, int pool_type)
+{
+ void *buf = NULL;
+ int i = 0;
+ unsigned long flags;
+ struct diag_mempool_t *mempool = NULL;
+
+ if (!driver)
+ return NULL;
+
+ for (i = 0; i < NUM_MEMORY_POOLS; i++) {
+ mempool = &diag_mempools[i];
+ if (pool_type != mempool->id)
+ continue;
+ if (!mempool->pool) {
+ pr_err_ratelimited("diag: %s mempool is not initialized yet\n",
+ mempool->name);
+ break;
+ }
+ if (size == 0 || size > mempool->itemsize) {
+ pr_err_ratelimited("diag: cannot alloc from mempool %s, invalid size: %d\n",
+ mempool->name, size);
+ break;
+ }
+ spin_lock_irqsave(&mempool->lock, flags);
+ if (mempool->count < mempool->poolsize) {
+ atomic_add(1, (atomic_t *)&mempool->count);
+ buf = mempool_alloc(mempool->pool, GFP_ATOMIC);
+ kmemleak_not_leak(buf);
+ }
+ spin_unlock_irqrestore(&mempool->lock, flags);
+ if (!buf) {
+ pr_debug_ratelimited("diag: Unable to allocate buffer from memory pool %s, size: %d/%d count: %d/%d\n",
+ mempool->name,
+ size, mempool->itemsize,
+ mempool->count,
+ mempool->poolsize);
+ }
+ break;
+ }
+
+ return buf;
+}
+
+void diagmem_free(struct diagchar_dev *driver, void *buf, int pool_type)
+{
+ int i = 0;
+ unsigned long flags;
+ struct diag_mempool_t *mempool = NULL;
+
+ if (!driver || !buf)
+ return;
+
+ for (i = 0; i < NUM_MEMORY_POOLS; i++) {
+ mempool = &diag_mempools[i];
+ if (pool_type != mempool->id)
+ continue;
+ if (!mempool->pool) {
+ pr_err_ratelimited("diag: %s mempool is not initialized yet\n",
+ mempool->name);
+ break;
+ }
+ spin_lock_irqsave(&mempool->lock, flags);
+ if (mempool->count > 0) {
+ mempool_free(buf, mempool->pool);
+ atomic_add(-1, (atomic_t *)&mempool->count);
+ } else {
+ pr_err_ratelimited("diag: Attempting to free items from %s mempool which is already empty\n",
+ mempool->name);
+ }
+ spin_unlock_irqrestore(&mempool->lock, flags);
+ break;
+ }
+}
+
+void diagmem_init(struct diagchar_dev *driver, int index)
+{
+ struct diag_mempool_t *mempool = NULL;
+ if (!driver)
+ return;
+
+ if (index < 0 || index >= NUM_MEMORY_POOLS) {
+ pr_err("diag: In %s, Invalid index %d\n", __func__, index);
+ return;
+ }
+
+ mempool = &diag_mempools[index];
+ if (mempool->pool) {
+ pr_debug("diag: mempool %s is already initialized\n",
+ mempool->name);
+ return;
+ }
+ if (mempool->itemsize <= 0 || mempool->poolsize <= 0) {
+ pr_err("diag: Unable to initialize %s mempool, itemsize: %d poolsize: %d\n",
+ mempool->name, mempool->itemsize,
+ mempool->poolsize);
+ return;
+ }
+
+ mempool->pool = mempool_create_kmalloc_pool(mempool->poolsize,
+ mempool->itemsize);
+ if (!mempool->pool)
+ pr_err("diag: cannot allocate %s mempool\n", mempool->name);
+ else
+ kmemleak_not_leak(mempool->pool);
+
+ spin_lock_init(&mempool->lock);
+}
+
+void diagmem_exit(struct diagchar_dev *driver, int index)
+{
+ unsigned long flags;
+ struct diag_mempool_t *mempool = NULL;
+
+ if (!driver)
+ return;
+
+ if (index < 0 || index >= NUM_MEMORY_POOLS) {
+ pr_err("diag: In %s, Invalid index %d\n", __func__, index);
+ return;
+ }
+
+ mempool = &diag_mempools[index];
+ spin_lock_irqsave(&mempool->lock, flags);
+ if (mempool->count == 0 && mempool->pool != NULL) {
+ mempool_destroy(mempool->pool);
+ mempool->pool = NULL;
+ } else {
+ pr_err("diag: Unable to destory %s pool, count: %d\n",
+ mempool->name, mempool->count);
+ }
+ spin_unlock_irqrestore(&mempool->lock, flags);
+}
+
diff --git a/drivers/char/diag/diagmem.h b/drivers/char/diag/diagmem.h
new file mode 100644
index 000000000000..d097a3799e9a
--- /dev/null
+++ b/drivers/char/diag/diagmem.h
@@ -0,0 +1,63 @@
+/* Copyright (c) 2008-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DIAGMEM_H
+#define DIAGMEM_H
+#include "diagchar.h"
+
+#define POOL_TYPE_COPY 0
+#define POOL_TYPE_HDLC 1
+#define POOL_TYPE_USER 2
+#define POOL_TYPE_MUX_APPS 3
+#define POOL_TYPE_DCI 4
+#define POOL_TYPE_LOCAL_LAST 5
+
+#define POOL_TYPE_REMOTE_BASE POOL_TYPE_LOCAL_LAST
+#define POOL_TYPE_MDM POOL_TYPE_REMOTE_BASE
+#define POOL_TYPE_MDM2 (POOL_TYPE_REMOTE_BASE + 1)
+#define POOL_TYPE_MDM_DCI (POOL_TYPE_REMOTE_BASE + 2)
+#define POOL_TYPE_MDM2_DCI (POOL_TYPE_REMOTE_BASE + 3)
+#define POOL_TYPE_MDM_MUX (POOL_TYPE_REMOTE_BASE + 4)
+#define POOL_TYPE_MDM2_MUX (POOL_TYPE_REMOTE_BASE + 5)
+#define POOL_TYPE_MDM_DCI_WRITE (POOL_TYPE_REMOTE_BASE + 6)
+#define POOL_TYPE_MDM2_DCI_WRITE (POOL_TYPE_REMOTE_BASE + 7)
+#define POOL_TYPE_QSC_MUX (POOL_TYPE_REMOTE_BASE + 8)
+#define POOL_TYPE_REMOTE_LAST (POOL_TYPE_REMOTE_BASE + 9)
+
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+#define NUM_MEMORY_POOLS POOL_TYPE_REMOTE_LAST
+#else
+#define NUM_MEMORY_POOLS POOL_TYPE_LOCAL_LAST
+#endif
+
+#define DIAG_MEMPOOL_NAME_SZ 24
+#define DIAG_MEMPOOL_GET_NAME(x) (diag_mempools[x].name)
+
+struct diag_mempool_t {
+ int id;
+ char name[DIAG_MEMPOOL_NAME_SZ];
+ mempool_t *pool;
+ unsigned int itemsize;
+ unsigned int poolsize;
+ int count;
+ spinlock_t lock;
+} __packed;
+
+extern struct diag_mempool_t diag_mempools[NUM_MEMORY_POOLS];
+
+void diagmem_setsize(int pool_idx, int itemsize, int poolsize);
+void *diagmem_alloc(struct diagchar_dev *driver, int size, int pool_type);
+void diagmem_free(struct diagchar_dev *driver, void *buf, int pool_type);
+void diagmem_init(struct diagchar_dev *driver, int type);
+void diagmem_exit(struct diagchar_dev *driver, int type);
+
+#endif
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index dbf22719462f..21cff44b25f0 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -333,10 +333,25 @@ config HW_RANDOM_TPM
If unsure, say Y.
+config HW_RANDOM_MSM_LEGACY
+ tristate "Qualcomm MSM Random Number Generator support (LEGACY)"
+ depends on HW_RANDOM && ARCH_QCOM
+ select CRYPTO_AES
+ select CRYPTO_ECB
+ default n
+ ---help---
+ This driver provides kernel-side support for the Random Number
+ Generator hardware found on Qualcomm MSM SoCs.
+
+ To compile this driver as a module, choose M here: the
+ module will be called msm_rng.
+
+ If unsure, say Y.
+
config HW_RANDOM_MSM
tristate "Qualcomm SoCs Random Number Generator support"
depends on HW_RANDOM && ARCH_QCOM
- default HW_RANDOM
+ default n
---help---
This driver provides kernel-side support for the Random Number
Generator hardware found on Qualcomm SoCs.
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
index 5ad397635128..4769472a93fb 100644
--- a/drivers/char/hw_random/Makefile
+++ b/drivers/char/hw_random/Makefile
@@ -32,4 +32,5 @@ obj-$(CONFIG_HW_RANDOM_IPROC_RNG200) += iproc-rng200.o
obj-$(CONFIG_HW_RANDOM_MSM) += msm-rng.o
obj-$(CONFIG_HW_RANDOM_ST) += st-rng.o
obj-$(CONFIG_HW_RANDOM_XGENE) += xgene-rng.o
+obj-$(CONFIG_HW_RANDOM_MSM_LEGACY) += msm_rng.o
obj-$(CONFIG_HW_RANDOM_STM32) += stm32-rng.o
diff --git a/drivers/char/hw_random/msm-rng.c b/drivers/char/hw_random/msm-rng.c
index 96fb986402eb..296b23960815 100644
--- a/drivers/char/hw_random/msm-rng.c
+++ b/drivers/char/hw_random/msm-rng.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2013,2015,2017 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -156,6 +156,7 @@ static int msm_rng_probe(struct platform_device *pdev)
rng->hwrng.init = msm_rng_init,
rng->hwrng.cleanup = msm_rng_cleanup,
rng->hwrng.read = msm_rng_read,
+ rng->hwrng.quality = 700;
ret = devm_hwrng_register(&pdev->dev, &rng->hwrng);
if (ret) {
diff --git a/drivers/char/hw_random/msm_rng.c b/drivers/char/hw_random/msm_rng.c
new file mode 100644
index 000000000000..ba82dbfd0625
--- /dev/null
+++ b/drivers/char/hw_random/msm_rng.c
@@ -0,0 +1,482 @@
+/*
+ * Copyright (c) 2011-2013, 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/hw_random.h>
+#include <linux/clk.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/types.h>
+#include <soc/qcom/socinfo.h>
+#include <linux/msm-bus.h>
+#include <linux/qrng.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/delay.h>
+#include <linux/crypto.h>
+#include <crypto/internal/rng.h>
+
+#include <linux/platform_data/qcom_crypto_device.h>
+
+
+
+#define DRIVER_NAME "msm_rng"
+
+/* Device specific register offsets */
+#define PRNG_DATA_OUT_OFFSET 0x0000
+#define PRNG_STATUS_OFFSET 0x0004
+#define PRNG_LFSR_CFG_OFFSET 0x0100
+#define PRNG_CONFIG_OFFSET 0x0104
+
+/* Device specific register masks and config values */
+#define PRNG_LFSR_CFG_MASK 0xFFFF0000
+#define PRNG_LFSR_CFG_CLOCKS 0x0000DDDD
+#define PRNG_CONFIG_MASK 0xFFFFFFFD
+#define PRNG_HW_ENABLE 0x00000002
+
+#define MAX_HW_FIFO_DEPTH 16 /* FIFO is 16 words deep */
+#define MAX_HW_FIFO_SIZE (MAX_HW_FIFO_DEPTH * 4) /* FIFO is 32 bits wide */
+
+struct msm_rng_device {
+ struct platform_device *pdev;
+ void __iomem *base;
+ struct clk *prng_clk;
+ uint32_t qrng_perf_client;
+ struct mutex rng_lock;
+};
+
+struct msm_rng_device msm_rng_device_info;
+static struct msm_rng_device *msm_rng_dev_cached;
+struct mutex cached_rng_lock;
+static long msm_rng_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ long ret = 0;
+
+ switch (cmd) {
+ case QRNG_IOCTL_RESET_BUS_BANDWIDTH:
+ pr_info("calling msm_rng_bus_scale(LOW)\n");
+ ret = msm_bus_scale_client_update_request(
+ msm_rng_device_info.qrng_perf_client, 0);
+ if (ret)
+ pr_err("failed qrng_reset_bus_bw, ret = %ld\n", ret);
+ break;
+ default:
+ pr_err("Unsupported IOCTL call");
+ break;
+ }
+ return ret;
+}
+
+/*
+ *
+ * This function calls hardware random bit generator directory and retuns it
+ * back to caller
+ *
+ */
+static int msm_rng_direct_read(struct msm_rng_device *msm_rng_dev,
+ void *data, size_t max)
+{
+ struct platform_device *pdev;
+ void __iomem *base;
+ size_t currsize = 0;
+ u32 val;
+ u32 *retdata = data;
+ int ret;
+ int failed = 0;
+
+ pdev = msm_rng_dev->pdev;
+ base = msm_rng_dev->base;
+
+ /* no room for word data */
+ if (max < 4)
+ return 0;
+
+ mutex_lock(&msm_rng_dev->rng_lock);
+
+ if (msm_rng_dev->qrng_perf_client) {
+ ret = msm_bus_scale_client_update_request(
+ msm_rng_dev->qrng_perf_client, 1);
+ if (ret)
+ pr_err("bus_scale_client_update_req failed!\n");
+ }
+ /* enable PRNG clock */
+ ret = clk_prepare_enable(msm_rng_dev->prng_clk);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to enable clock in callback\n");
+ goto err;
+ }
+ /* read random data from h/w */
+ do {
+ /* check status bit if data is available */
+ while (!(readl_relaxed(base + PRNG_STATUS_OFFSET)
+ & 0x00000001)) {
+ if (failed == 10) {
+ pr_err("Data not available after retry\n");
+ break;
+ }
+ pr_err("msm_rng:Data not available!\n");
+ msleep_interruptible(10);
+ failed++;
+ }
+
+ /* read FIFO */
+ val = readl_relaxed(base + PRNG_DATA_OUT_OFFSET);
+ if (!val)
+ break; /* no data to read so just bail */
+
+ /* write data back to callers pointer */
+ *(retdata++) = val;
+ currsize += 4;
+ /* make sure we stay on 32bit boundary */
+ if ((max - currsize) < 4)
+ break;
+
+ } while (currsize < max);
+
+ /* vote to turn off clock */
+ clk_disable_unprepare(msm_rng_dev->prng_clk);
+err:
+ if (msm_rng_dev->qrng_perf_client) {
+ ret = msm_bus_scale_client_update_request(
+ msm_rng_dev->qrng_perf_client, 0);
+ if (ret)
+ pr_err("bus_scale_client_update_req failed!\n");
+ }
+ mutex_unlock(&msm_rng_dev->rng_lock);
+
+ val = 0L;
+ return currsize;
+}
+static int msm_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
+{
+ struct msm_rng_device *msm_rng_dev;
+ int rv = 0;
+
+ msm_rng_dev = (struct msm_rng_device *)rng->priv;
+ rv = msm_rng_direct_read(msm_rng_dev, data, max);
+
+ return rv;
+}
+
+
+static struct hwrng msm_rng = {
+ .name = DRIVER_NAME,
+ .read = msm_rng_read,
+ .quality = 700,
+};
+
+static int msm_rng_enable_hw(struct msm_rng_device *msm_rng_dev)
+{
+ unsigned long val = 0;
+ unsigned long reg_val = 0;
+ int ret = 0;
+
+ if (msm_rng_dev->qrng_perf_client) {
+ ret = msm_bus_scale_client_update_request(
+ msm_rng_dev->qrng_perf_client, 1);
+ if (ret)
+ pr_err("bus_scale_client_update_req failed!\n");
+ }
+ /* Enable the PRNG CLK */
+ ret = clk_prepare_enable(msm_rng_dev->prng_clk);
+ if (ret) {
+ dev_err(&(msm_rng_dev->pdev)->dev,
+ "failed to enable clock in probe\n");
+ return -EPERM;
+ }
+
+ /* Enable PRNG h/w only if it is NOT ON */
+ val = readl_relaxed(msm_rng_dev->base + PRNG_CONFIG_OFFSET) &
+ PRNG_HW_ENABLE;
+ /* PRNG H/W is not ON */
+ if (val != PRNG_HW_ENABLE) {
+ val = readl_relaxed(msm_rng_dev->base + PRNG_LFSR_CFG_OFFSET);
+ val &= PRNG_LFSR_CFG_MASK;
+ val |= PRNG_LFSR_CFG_CLOCKS;
+ writel_relaxed(val, msm_rng_dev->base + PRNG_LFSR_CFG_OFFSET);
+
+ /* The PRNG CONFIG register should be first written */
+ mb();
+
+ reg_val = readl_relaxed(msm_rng_dev->base + PRNG_CONFIG_OFFSET)
+ & PRNG_CONFIG_MASK;
+ reg_val |= PRNG_HW_ENABLE;
+ writel_relaxed(reg_val, msm_rng_dev->base + PRNG_CONFIG_OFFSET);
+
+ /* The PRNG clk should be disabled only after we enable the
+ * PRNG h/w by writing to the PRNG CONFIG register.
+ */
+ mb();
+ }
+ clk_disable_unprepare(msm_rng_dev->prng_clk);
+
+ if (msm_rng_dev->qrng_perf_client) {
+ ret = msm_bus_scale_client_update_request(
+ msm_rng_dev->qrng_perf_client, 0);
+ if (ret)
+ pr_err("bus_scale_client_update_req failed!\n");
+ }
+
+ return 0;
+}
+
+static const struct file_operations msm_rng_fops = {
+ .unlocked_ioctl = msm_rng_ioctl,
+};
+static struct class *msm_rng_class;
+static struct cdev msm_rng_cdev;
+
+static int msm_rng_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct msm_rng_device *msm_rng_dev = NULL;
+ void __iomem *base = NULL;
+ bool configure_qrng = true;
+ int error = 0;
+ int ret = 0;
+ struct device *dev;
+
+ struct msm_bus_scale_pdata *qrng_platform_support = NULL;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ dev_err(&pdev->dev, "invalid address\n");
+ error = -EFAULT;
+ goto err_exit;
+ }
+
+ msm_rng_dev = kzalloc(sizeof(struct msm_rng_device), GFP_KERNEL);
+ if (!msm_rng_dev) {
+ dev_err(&pdev->dev, "cannot allocate memory\n");
+ error = -ENOMEM;
+ goto err_exit;
+ }
+
+ base = ioremap(res->start, resource_size(res));
+ if (!base) {
+ dev_err(&pdev->dev, "ioremap failed\n");
+ error = -ENOMEM;
+ goto err_iomap;
+ }
+ msm_rng_dev->base = base;
+
+ /* create a handle for clock control */
+ if ((pdev->dev.of_node) && (of_property_read_bool(pdev->dev.of_node,
+ "qcom,msm-rng-iface-clk")))
+ msm_rng_dev->prng_clk = clk_get(&pdev->dev,
+ "iface_clk");
+ else
+ msm_rng_dev->prng_clk = clk_get(&pdev->dev, "core_clk");
+ if (IS_ERR(msm_rng_dev->prng_clk)) {
+ dev_err(&pdev->dev, "failed to register clock source\n");
+ error = -EPERM;
+ goto err_clk_get;
+ }
+
+ /* save away pdev and register driver data */
+ msm_rng_dev->pdev = pdev;
+ platform_set_drvdata(pdev, msm_rng_dev);
+
+ if (pdev->dev.of_node) {
+ /* Register bus client */
+ qrng_platform_support = msm_bus_cl_get_pdata(pdev);
+ msm_rng_dev->qrng_perf_client = msm_bus_scale_register_client(
+ qrng_platform_support);
+ msm_rng_device_info.qrng_perf_client =
+ msm_rng_dev->qrng_perf_client;
+ if (!msm_rng_dev->qrng_perf_client)
+ pr_err("Unable to register bus client\n");
+ }
+
+ /* Enable rng h/w for the targets which can access the entire
+ * address space of PRNG.
+ */
+ if ((pdev->dev.of_node) && (of_property_read_bool(pdev->dev.of_node,
+ "qcom,no-qrng-config")))
+ configure_qrng = false;
+ if (configure_qrng) {
+ error = msm_rng_enable_hw(msm_rng_dev);
+ if (error)
+ goto rollback_clk;
+ }
+
+ mutex_init(&msm_rng_dev->rng_lock);
+ mutex_init(&cached_rng_lock);
+
+ /* register with hwrng framework */
+ msm_rng.priv = (unsigned long) msm_rng_dev;
+ error = hwrng_register(&msm_rng);
+ if (error) {
+ dev_err(&pdev->dev, "failed to register hwrng\n");
+ error = -EPERM;
+ goto rollback_clk;
+ }
+ ret = register_chrdev(QRNG_IOC_MAGIC, DRIVER_NAME, &msm_rng_fops);
+
+ msm_rng_class = class_create(THIS_MODULE, "msm-rng");
+ if (IS_ERR(msm_rng_class)) {
+ pr_err("class_create failed\n");
+ return PTR_ERR(msm_rng_class);
+ }
+
+ dev = device_create(msm_rng_class, NULL, MKDEV(QRNG_IOC_MAGIC, 0),
+ NULL, "msm-rng");
+ if (IS_ERR(dev)) {
+ pr_err("Device create failed\n");
+ error = PTR_ERR(dev);
+ goto unregister_chrdev;
+ }
+ cdev_init(&msm_rng_cdev, &msm_rng_fops);
+ msm_rng_dev_cached = msm_rng_dev;
+ return error;
+
+unregister_chrdev:
+ unregister_chrdev(QRNG_IOC_MAGIC, DRIVER_NAME);
+rollback_clk:
+ clk_put(msm_rng_dev->prng_clk);
+err_clk_get:
+ iounmap(msm_rng_dev->base);
+err_iomap:
+ kzfree(msm_rng_dev);
+err_exit:
+ return error;
+}
+
+static int msm_rng_remove(struct platform_device *pdev)
+{
+ struct msm_rng_device *msm_rng_dev = platform_get_drvdata(pdev);
+
+ unregister_chrdev(QRNG_IOC_MAGIC, DRIVER_NAME);
+ hwrng_unregister(&msm_rng);
+ clk_put(msm_rng_dev->prng_clk);
+ iounmap(msm_rng_dev->base);
+ platform_set_drvdata(pdev, NULL);
+ if (msm_rng_dev->qrng_perf_client)
+ msm_bus_scale_unregister_client(msm_rng_dev->qrng_perf_client);
+
+ kzfree(msm_rng_dev);
+ msm_rng_dev_cached = NULL;
+ return 0;
+}
+
+static int qrng_get_random(struct crypto_rng *tfm, const u8 *src,
+ unsigned int slen, u8 *rdata,
+ unsigned int dlen)
+{
+ int sizeread = 0;
+ int rv = -EFAULT;
+
+ if (!msm_rng_dev_cached) {
+ pr_err("%s: msm_rng_dev is not initialized.\n", __func__);
+ rv = -ENODEV;
+ goto err_exit;
+ }
+
+ if (!rdata) {
+ pr_err("%s: data buffer is null!\n", __func__);
+ rv = -EINVAL;
+ goto err_exit;
+ }
+
+ if (signal_pending(current) ||
+ mutex_lock_interruptible(&cached_rng_lock)) {
+ pr_err("%s: mutex lock interrupted!\n", __func__);
+ rv = -ERESTARTSYS;
+ goto err_exit;
+ }
+ sizeread = msm_rng_direct_read(msm_rng_dev_cached, rdata, dlen);
+
+ if (sizeread == dlen)
+ rv = 0;
+
+ mutex_unlock(&cached_rng_lock);
+err_exit:
+ return rv;
+
+}
+
+static int qrng_reset(struct crypto_rng *tfm, const u8 *seed, unsigned int slen)
+{
+ return 0;
+}
+
+static struct rng_alg rng_algs[] = { {
+ .generate = qrng_get_random,
+ .seed = qrng_reset,
+ .seedsize = 0,
+ .base = {
+ .cra_name = "qrng",
+ .cra_driver_name = "fips_hw_qrng",
+ .cra_priority = 300,
+ .cra_ctxsize = 0,
+ .cra_module = THIS_MODULE,
+ }
+} };
+
+static struct of_device_id qrng_match[] = {
+ { .compatible = "qcom,msm-rng",
+ },
+ {}
+};
+
+static struct platform_driver rng_driver = {
+ .probe = msm_rng_probe,
+ .remove = msm_rng_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = qrng_match,
+ }
+};
+
+static int __init msm_rng_init(void)
+{
+ int ret;
+
+ msm_rng_dev_cached = NULL;
+ ret = platform_driver_register(&rng_driver);
+ if (ret) {
+ pr_err("%s: platform_driver_register error:%d\n",
+ __func__, ret);
+ goto err_exit;
+ }
+ ret = crypto_register_rngs(rng_algs, ARRAY_SIZE(rng_algs));
+ if (ret) {
+ pr_err("%s: crypto_register_algs error:%d\n",
+ __func__, ret);
+ goto err_exit;
+ }
+
+err_exit:
+ return ret;
+}
+
+module_init(msm_rng_init);
+
+static void __exit msm_rng_exit(void)
+{
+ crypto_unregister_rngs(rng_algs, ARRAY_SIZE(rng_algs));
+ platform_driver_unregister(&rng_driver);
+}
+
+module_exit(msm_rng_exit);
+
+MODULE_AUTHOR("The Linux Foundation");
+MODULE_DESCRIPTION("Qualcomm MSM Random Number Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/char/misc.c b/drivers/char/misc.c
index 8069b361b8dd..803b7840759a 100644
--- a/drivers/char/misc.c
+++ b/drivers/char/misc.c
@@ -59,7 +59,7 @@ static DEFINE_MUTEX(misc_mtx);
/*
* Assigned numbers, used for dynamic minors
*/
-#define DYNAMIC_MINORS 64 /* like dynamic majors */
+#define DYNAMIC_MINORS 96 /* like dynamic majors */
static DECLARE_BITMAP(misc_minors, DYNAMIC_MINORS);
#ifdef CONFIG_PROC_FS
diff --git a/drivers/char/msm_smd_pkt.c b/drivers/char/msm_smd_pkt.c
new file mode 100644
index 000000000000..a61d273bfb65
--- /dev/null
+++ b/drivers/char/msm_smd_pkt.c
@@ -0,0 +1,1397 @@
+/* Copyright (c) 2008-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+/*
+ * SMD Packet Driver -- Provides a binary SMD non-muxed packet port
+ * interface.
+ */
+
+#include <linux/slab.h>
+#include <linux/cdev.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/device.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/uaccess.h>
+#include <linux/workqueue.h>
+#include <linux/platform_device.h>
+#include <linux/completion.h>
+#include <linux/msm_smd_pkt.h>
+#include <linux/poll.h>
+#include <soc/qcom/smd.h>
+#include <soc/qcom/smsm.h>
+#include <soc/qcom/subsystem_restart.h>
+#include <asm/ioctls.h>
+#include <linux/pm.h>
+#include <linux/of.h>
+#include <linux/ipc_logging.h>
+
+#define MODULE_NAME "msm_smdpkt"
+#define DEVICE_NAME "smdpkt"
+#define WAKEUPSOURCE_TIMEOUT (2000) /* two seconds */
+
+struct smd_pkt_dev {
+ struct list_head dev_list;
+ char dev_name[SMD_MAX_CH_NAME_LEN];
+ char ch_name[SMD_MAX_CH_NAME_LEN];
+ uint32_t edge;
+
+ struct cdev cdev;
+ struct device *devicep;
+ void *pil;
+
+ struct smd_channel *ch;
+ struct mutex ch_lock;
+ struct mutex rx_lock;
+ struct mutex tx_lock;
+ wait_queue_head_t ch_read_wait_queue;
+ wait_queue_head_t ch_write_wait_queue;
+ wait_queue_head_t ch_opened_wait_queue;
+
+ int i;
+ int ref_cnt;
+
+ int blocking_write;
+ int is_open;
+ int poll_mode;
+ unsigned ch_size;
+ uint open_modem_wait;
+
+ int has_reset;
+ int do_reset_notification;
+ struct completion ch_allocated;
+ struct wakeup_source pa_ws; /* Packet Arrival Wakeup Source */
+ struct work_struct packet_arrival_work;
+ spinlock_t pa_spinlock;
+ int ws_locked;
+};
+
+
+struct smd_pkt_driver {
+ struct list_head list;
+ int ref_cnt;
+ char pdriver_name[SMD_MAX_CH_NAME_LEN];
+ struct platform_driver driver;
+};
+
+static DEFINE_MUTEX(smd_pkt_driver_lock_lha1);
+static LIST_HEAD(smd_pkt_driver_list);
+
+struct class *smd_pkt_classp;
+static dev_t smd_pkt_number;
+static struct delayed_work loopback_work;
+static void check_and_wakeup_reader(struct smd_pkt_dev *smd_pkt_devp);
+static void check_and_wakeup_writer(struct smd_pkt_dev *smd_pkt_devp);
+static uint32_t is_modem_smsm_inited(void);
+
+static DEFINE_MUTEX(smd_pkt_dev_lock_lha1);
+static LIST_HEAD(smd_pkt_dev_list);
+static int num_smd_pkt_ports;
+
+#define SMD_PKT_IPC_LOG_PAGE_CNT 2
+static void *smd_pkt_ilctxt;
+
+static int msm_smd_pkt_debug_mask;
+module_param_named(debug_mask, msm_smd_pkt_debug_mask,
+ int, S_IRUGO | S_IWUSR | S_IWGRP);
+
+enum {
+ SMD_PKT_STATUS = 1U << 0,
+ SMD_PKT_READ = 1U << 1,
+ SMD_PKT_WRITE = 1U << 2,
+ SMD_PKT_POLL = 1U << 5,
+};
+
+#define DEBUG
+
+#ifdef DEBUG
+
+#define SMD_PKT_LOG_STRING(x...) \
+do { \
+ if (smd_pkt_ilctxt) \
+ ipc_log_string(smd_pkt_ilctxt, "<SMD_PKT>: "x); \
+} while (0)
+
+#define D_STATUS(x...) \
+do { \
+ if (msm_smd_pkt_debug_mask & SMD_PKT_STATUS) \
+ pr_info("Status: "x); \
+ SMD_PKT_LOG_STRING(x); \
+} while (0)
+
+#define D_READ(x...) \
+do { \
+ if (msm_smd_pkt_debug_mask & SMD_PKT_READ) \
+ pr_info("Read: "x); \
+ SMD_PKT_LOG_STRING(x); \
+} while (0)
+
+#define D_WRITE(x...) \
+do { \
+ if (msm_smd_pkt_debug_mask & SMD_PKT_WRITE) \
+ pr_info("Write: "x); \
+ SMD_PKT_LOG_STRING(x); \
+} while (0)
+
+#define D_POLL(x...) \
+do { \
+ if (msm_smd_pkt_debug_mask & SMD_PKT_POLL) \
+ pr_info("Poll: "x); \
+ SMD_PKT_LOG_STRING(x); \
+} while (0)
+
+#define E_SMD_PKT_SSR(x) \
+do { \
+ if (x->do_reset_notification) \
+ pr_err("%s notifying reset for smd_pkt_dev id:%d\n", \
+ __func__, x->i); \
+} while (0)
+#else
+#define D_STATUS(x...) do {} while (0)
+#define D_READ(x...) do {} while (0)
+#define D_WRITE(x...) do {} while (0)
+#define D_POLL(x...) do {} while (0)
+#define E_SMD_PKT_SSR(x) do {} while (0)
+#endif
+
+static ssize_t open_timeout_store(struct device *d,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t n)
+{
+ struct smd_pkt_dev *smd_pkt_devp;
+ unsigned long tmp;
+
+ mutex_lock(&smd_pkt_dev_lock_lha1);
+ list_for_each_entry(smd_pkt_devp, &smd_pkt_dev_list, dev_list) {
+ if (smd_pkt_devp->devicep == d) {
+ if (!kstrtoul(buf, 10, &tmp)) {
+ smd_pkt_devp->open_modem_wait = tmp;
+ mutex_unlock(&smd_pkt_dev_lock_lha1);
+ return n;
+ }
+ mutex_unlock(&smd_pkt_dev_lock_lha1);
+ pr_err("%s: unable to convert: %s to an int\n",
+ __func__, buf);
+ return -EINVAL;
+ }
+ }
+ mutex_unlock(&smd_pkt_dev_lock_lha1);
+
+ pr_err("%s: unable to match device to valid smd_pkt port\n", __func__);
+ return -EINVAL;
+}
+
+static ssize_t open_timeout_show(struct device *d,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct smd_pkt_dev *smd_pkt_devp;
+
+ mutex_lock(&smd_pkt_dev_lock_lha1);
+ list_for_each_entry(smd_pkt_devp, &smd_pkt_dev_list, dev_list) {
+ if (smd_pkt_devp->devicep == d) {
+ mutex_unlock(&smd_pkt_dev_lock_lha1);
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ smd_pkt_devp->open_modem_wait);
+ }
+ }
+ mutex_unlock(&smd_pkt_dev_lock_lha1);
+ pr_err("%s: unable to match device to valid smd_pkt port\n", __func__);
+ return -EINVAL;
+
+}
+
+static DEVICE_ATTR(open_timeout, 0664, open_timeout_show, open_timeout_store);
+
+/**
+ * loopback_edge_store() - Set the edge type for loopback device
+ * @d: Linux device structure
+ * @attr: Device attribute structure
+ * @buf: Input string
+ * @n: Length of the input string
+ *
+ * This function is used to set the loopback device edge runtime
+ * by writing to the loopback_edge node.
+ */
+static ssize_t loopback_edge_store(struct device *d,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t n)
+{
+ struct smd_pkt_dev *smd_pkt_devp;
+ unsigned long tmp;
+
+ mutex_lock(&smd_pkt_dev_lock_lha1);
+ list_for_each_entry(smd_pkt_devp, &smd_pkt_dev_list, dev_list) {
+ if (smd_pkt_devp->devicep == d) {
+ if (!kstrtoul(buf, 10, &tmp)) {
+ smd_pkt_devp->edge = tmp;
+ mutex_unlock(&smd_pkt_dev_lock_lha1);
+ return n;
+ }
+ mutex_unlock(&smd_pkt_dev_lock_lha1);
+ pr_err("%s: unable to convert: %s to an int\n",
+ __func__, buf);
+ return -EINVAL;
+ }
+ }
+ mutex_unlock(&smd_pkt_dev_lock_lha1);
+ pr_err("%s: unable to match device to valid smd_pkt port\n", __func__);
+ return -EINVAL;
+}
+
+/**
+ * loopback_edge_show() - Get the edge type for loopback device
+ * @d: Linux device structure
+ * @attr: Device attribute structure
+ * @buf: Output buffer
+ *
+ * This function is used to get the loopback device edge runtime
+ * by reading the loopback_edge node.
+ */
+static ssize_t loopback_edge_show(struct device *d,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct smd_pkt_dev *smd_pkt_devp;
+
+ mutex_lock(&smd_pkt_dev_lock_lha1);
+ list_for_each_entry(smd_pkt_devp, &smd_pkt_dev_list, dev_list) {
+ if (smd_pkt_devp->devicep == d) {
+ mutex_unlock(&smd_pkt_dev_lock_lha1);
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ smd_pkt_devp->edge);
+ }
+ }
+ mutex_unlock(&smd_pkt_dev_lock_lha1);
+ pr_err("%s: unable to match device to valid smd_pkt port\n", __func__);
+ return -EINVAL;
+
+}
+
+static DEVICE_ATTR(loopback_edge, 0664, loopback_edge_show,
+ loopback_edge_store);
+
+static int notify_reset(struct smd_pkt_dev *smd_pkt_devp)
+{
+ smd_pkt_devp->do_reset_notification = 0;
+
+ return -ENETRESET;
+}
+
+static void clean_and_signal(struct smd_pkt_dev *smd_pkt_devp)
+{
+ smd_pkt_devp->do_reset_notification = 1;
+ smd_pkt_devp->has_reset = 1;
+
+ smd_pkt_devp->is_open = 0;
+
+ wake_up(&smd_pkt_devp->ch_read_wait_queue);
+ wake_up(&smd_pkt_devp->ch_write_wait_queue);
+ wake_up_interruptible(&smd_pkt_devp->ch_opened_wait_queue);
+ D_STATUS("%s smd_pkt_dev id:%d\n", __func__, smd_pkt_devp->i);
+}
+
+static void loopback_probe_worker(struct work_struct *work)
+{
+
+ /* Wait for the modem SMSM to be inited for the SMD
+ ** Loopback channel to be allocated at the modem. Since
+ ** the wait need to be done atmost once, using msleep
+ ** doesn't degrade the performance.
+ */
+ if (!is_modem_smsm_inited())
+ schedule_delayed_work(&loopback_work, msecs_to_jiffies(1000));
+ else
+ smsm_change_state(SMSM_APPS_STATE,
+ 0, SMSM_SMD_LOOPBACK);
+
+}
+
+static void packet_arrival_worker(struct work_struct *work)
+{
+ struct smd_pkt_dev *smd_pkt_devp;
+ unsigned long flags;
+
+ smd_pkt_devp = container_of(work, struct smd_pkt_dev,
+ packet_arrival_work);
+ mutex_lock(&smd_pkt_devp->ch_lock);
+ spin_lock_irqsave(&smd_pkt_devp->pa_spinlock, flags);
+ if (smd_pkt_devp->ch && smd_pkt_devp->ws_locked) {
+ D_READ("%s locking smd_pkt_dev id:%d wakeup source\n",
+ __func__, smd_pkt_devp->i);
+ /*
+ * Keep system awake long enough to allow userspace client
+ * to process the packet.
+ */
+ __pm_wakeup_event(&smd_pkt_devp->pa_ws, WAKEUPSOURCE_TIMEOUT);
+ }
+ spin_unlock_irqrestore(&smd_pkt_devp->pa_spinlock, flags);
+ mutex_unlock(&smd_pkt_devp->ch_lock);
+}
+
+static long smd_pkt_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ int ret;
+ struct smd_pkt_dev *smd_pkt_devp;
+ uint32_t val;
+
+ smd_pkt_devp = file->private_data;
+ if (!smd_pkt_devp)
+ return -EINVAL;
+
+ mutex_lock(&smd_pkt_devp->ch_lock);
+ switch (cmd) {
+ case TIOCMGET:
+ D_STATUS("%s TIOCMGET command on smd_pkt_dev id:%d\n",
+ __func__, smd_pkt_devp->i);
+ ret = smd_tiocmget(smd_pkt_devp->ch);
+ break;
+ case TIOCMSET:
+ ret = get_user(val, (uint32_t *)arg);
+ if (ret) {
+ pr_err("Error getting TIOCMSET value\n");
+ mutex_unlock(&smd_pkt_devp->ch_lock);
+ return ret;
+ }
+ D_STATUS("%s TIOCSET command on smd_pkt_dev id:%d arg[0x%x]\n",
+ __func__, smd_pkt_devp->i, val);
+ ret = smd_tiocmset(smd_pkt_devp->ch, val, ~val);
+ break;
+ case SMD_PKT_IOCTL_BLOCKING_WRITE:
+ ret = get_user(smd_pkt_devp->blocking_write, (int *)arg);
+ break;
+ default:
+ pr_err_ratelimited("%s: Unrecognized ioctl command %d\n",
+ __func__, cmd);
+ ret = -ENOIOCTLCMD;
+ }
+ mutex_unlock(&smd_pkt_devp->ch_lock);
+
+ return ret;
+}
+
+ssize_t smd_pkt_read(struct file *file,
+ char __user *_buf,
+ size_t count,
+ loff_t *ppos)
+{
+ int r;
+ int bytes_read;
+ int pkt_size;
+ struct smd_pkt_dev *smd_pkt_devp;
+ unsigned long flags;
+ void *buf;
+
+ smd_pkt_devp = file->private_data;
+
+ if (!smd_pkt_devp) {
+ pr_err_ratelimited("%s on NULL smd_pkt_dev\n", __func__);
+ return -EINVAL;
+ }
+
+ if (!smd_pkt_devp->ch) {
+ pr_err_ratelimited("%s on a closed smd_pkt_dev id:%d\n",
+ __func__, smd_pkt_devp->i);
+ return -EINVAL;
+ }
+
+ if (smd_pkt_devp->do_reset_notification) {
+ /* notify client that a reset occurred */
+ E_SMD_PKT_SSR(smd_pkt_devp);
+ return notify_reset(smd_pkt_devp);
+ }
+ D_READ("Begin %s on smd_pkt_dev id:%d buffer_size %zu\n",
+ __func__, smd_pkt_devp->i, count);
+
+ buf = kmalloc(count, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+wait_for_packet:
+ r = wait_event_interruptible(smd_pkt_devp->ch_read_wait_queue,
+ !smd_pkt_devp->ch ||
+ (smd_cur_packet_size(smd_pkt_devp->ch) > 0
+ && smd_read_avail(smd_pkt_devp->ch)) ||
+ smd_pkt_devp->has_reset);
+
+ mutex_lock(&smd_pkt_devp->rx_lock);
+ if (smd_pkt_devp->has_reset) {
+ mutex_unlock(&smd_pkt_devp->rx_lock);
+ E_SMD_PKT_SSR(smd_pkt_devp);
+ kfree(buf);
+ return notify_reset(smd_pkt_devp);
+ }
+
+ if (!smd_pkt_devp->ch) {
+ mutex_unlock(&smd_pkt_devp->rx_lock);
+ pr_err_ratelimited("%s on a closed smd_pkt_dev id:%d\n",
+ __func__, smd_pkt_devp->i);
+ kfree(buf);
+ return -EINVAL;
+ }
+
+ if (r < 0) {
+ mutex_unlock(&smd_pkt_devp->rx_lock);
+ /* qualify error message */
+ if (r != -ERESTARTSYS) {
+ /* we get this anytime a signal comes in */
+ pr_err_ratelimited("%s: wait_event_interruptible on smd_pkt_dev id:%d ret %i\n",
+ __func__, smd_pkt_devp->i, r);
+ }
+ kfree(buf);
+ return r;
+ }
+
+ /* Here we have a whole packet waiting for us */
+ pkt_size = smd_cur_packet_size(smd_pkt_devp->ch);
+
+ if (!pkt_size) {
+ pr_err_ratelimited("%s: No data on smd_pkt_dev id:%d, False wakeup\n",
+ __func__, smd_pkt_devp->i);
+ mutex_unlock(&smd_pkt_devp->rx_lock);
+ goto wait_for_packet;
+ }
+
+ if (pkt_size < 0) {
+ pr_err_ratelimited("%s: Error %d obtaining packet size for Channel %s",
+ __func__, pkt_size, smd_pkt_devp->ch_name);
+ kfree(buf);
+ return pkt_size;
+ }
+
+ if ((uint32_t)pkt_size > count) {
+ pr_err_ratelimited("%s: failure on smd_pkt_dev id: %d - packet size %d > buffer size %zu,",
+ __func__, smd_pkt_devp->i,
+ pkt_size, count);
+ mutex_unlock(&smd_pkt_devp->rx_lock);
+ kfree(buf);
+ return -ETOOSMALL;
+ }
+
+ bytes_read = 0;
+ do {
+ r = smd_read(smd_pkt_devp->ch,
+ (buf + bytes_read),
+ (pkt_size - bytes_read));
+ if (r < 0) {
+ mutex_unlock(&smd_pkt_devp->rx_lock);
+ if (smd_pkt_devp->has_reset) {
+ E_SMD_PKT_SSR(smd_pkt_devp);
+ return notify_reset(smd_pkt_devp);
+ }
+ pr_err_ratelimited("%s Error while reading %d\n",
+ __func__, r);
+ kfree(buf);
+ return r;
+ }
+ bytes_read += r;
+ if (pkt_size != bytes_read)
+ wait_event(smd_pkt_devp->ch_read_wait_queue,
+ smd_read_avail(smd_pkt_devp->ch) ||
+ smd_pkt_devp->has_reset);
+ if (smd_pkt_devp->has_reset) {
+ mutex_unlock(&smd_pkt_devp->rx_lock);
+ E_SMD_PKT_SSR(smd_pkt_devp);
+ kfree(buf);
+ return notify_reset(smd_pkt_devp);
+ }
+ } while (pkt_size != bytes_read);
+ mutex_unlock(&smd_pkt_devp->rx_lock);
+
+ mutex_lock(&smd_pkt_devp->ch_lock);
+ spin_lock_irqsave(&smd_pkt_devp->pa_spinlock, flags);
+ if (smd_pkt_devp->poll_mode &&
+ !smd_cur_packet_size(smd_pkt_devp->ch)) {
+ __pm_relax(&smd_pkt_devp->pa_ws);
+ smd_pkt_devp->ws_locked = 0;
+ smd_pkt_devp->poll_mode = 0;
+ D_READ("%s unlocked smd_pkt_dev id:%d wakeup_source\n",
+ __func__, smd_pkt_devp->i);
+ }
+ spin_unlock_irqrestore(&smd_pkt_devp->pa_spinlock, flags);
+ mutex_unlock(&smd_pkt_devp->ch_lock);
+
+ r = copy_to_user(_buf, buf, bytes_read);
+ if (r) {
+ kfree(buf);
+ return -EFAULT;
+ }
+ D_READ("Finished %s on smd_pkt_dev id:%d %d bytes\n",
+ __func__, smd_pkt_devp->i, bytes_read);
+ kfree(buf);
+
+ /* check and wakeup read threads waiting on this device */
+ check_and_wakeup_reader(smd_pkt_devp);
+
+ return bytes_read;
+}
+
+ssize_t smd_pkt_write(struct file *file,
+ const char __user *_buf,
+ size_t count,
+ loff_t *ppos)
+{
+ int r = 0, bytes_written;
+ struct smd_pkt_dev *smd_pkt_devp;
+ DEFINE_WAIT(write_wait);
+ void *buf;
+
+ smd_pkt_devp = file->private_data;
+
+ if (!smd_pkt_devp) {
+ pr_err_ratelimited("%s on NULL smd_pkt_dev\n", __func__);
+ return -EINVAL;
+ }
+
+ if (!smd_pkt_devp->ch) {
+ pr_err_ratelimited("%s on a closed smd_pkt_dev id:%d\n",
+ __func__, smd_pkt_devp->i);
+ return -EINVAL;
+ }
+
+ if (smd_pkt_devp->do_reset_notification || smd_pkt_devp->has_reset) {
+ E_SMD_PKT_SSR(smd_pkt_devp);
+ /* notify client that a reset occurred */
+ return notify_reset(smd_pkt_devp);
+ }
+ D_WRITE("Begin %s on smd_pkt_dev id:%d data_size %zu\n",
+ __func__, smd_pkt_devp->i, count);
+
+ buf = kmalloc(count, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ r = copy_from_user(buf, _buf, count);
+ if (r) {
+ kfree(buf);
+ return -EFAULT;
+ }
+
+ mutex_lock(&smd_pkt_devp->tx_lock);
+ if (!smd_pkt_devp->blocking_write) {
+ if (smd_write_avail(smd_pkt_devp->ch) < count) {
+ pr_err_ratelimited("%s: Not enough space in smd_pkt_dev id:%d\n",
+ __func__, smd_pkt_devp->i);
+ mutex_unlock(&smd_pkt_devp->tx_lock);
+ kfree(buf);
+ return -ENOMEM;
+ }
+ }
+
+ r = smd_write_start(smd_pkt_devp->ch, count);
+ if (r < 0) {
+ mutex_unlock(&smd_pkt_devp->tx_lock);
+ pr_err_ratelimited("%s: Error:%d in smd_pkt_dev id:%d @ smd_write_start\n",
+ __func__, r, smd_pkt_devp->i);
+ kfree(buf);
+ return r;
+ }
+
+ bytes_written = 0;
+ do {
+ prepare_to_wait(&smd_pkt_devp->ch_write_wait_queue,
+ &write_wait, TASK_UNINTERRUPTIBLE);
+ if (!smd_write_segment_avail(smd_pkt_devp->ch) &&
+ !smd_pkt_devp->has_reset) {
+ smd_enable_read_intr(smd_pkt_devp->ch);
+ schedule();
+ }
+ finish_wait(&smd_pkt_devp->ch_write_wait_queue, &write_wait);
+ smd_disable_read_intr(smd_pkt_devp->ch);
+
+ if (smd_pkt_devp->has_reset) {
+ mutex_unlock(&smd_pkt_devp->tx_lock);
+ E_SMD_PKT_SSR(smd_pkt_devp);
+ kfree(buf);
+ return notify_reset(smd_pkt_devp);
+ }
+ r = smd_write_segment(smd_pkt_devp->ch,
+ (void *)(buf + bytes_written),
+ (count - bytes_written));
+ if (r < 0) {
+ mutex_unlock(&smd_pkt_devp->tx_lock);
+ if (smd_pkt_devp->has_reset) {
+ E_SMD_PKT_SSR(smd_pkt_devp);
+ return notify_reset(smd_pkt_devp);
+ }
+ pr_err_ratelimited("%s on smd_pkt_dev id:%d failed r:%d\n",
+ __func__, smd_pkt_devp->i, r);
+ kfree(buf);
+ return r;
+ }
+ bytes_written += r;
+ } while (bytes_written != count);
+ smd_write_end(smd_pkt_devp->ch);
+ mutex_unlock(&smd_pkt_devp->tx_lock);
+ D_WRITE("Finished %s on smd_pkt_dev id:%d %zu bytes\n",
+ __func__, smd_pkt_devp->i, count);
+
+ kfree(buf);
+ return count;
+}
+
+static unsigned int smd_pkt_poll(struct file *file, poll_table *wait)
+{
+ struct smd_pkt_dev *smd_pkt_devp;
+ unsigned int mask = 0;
+
+ smd_pkt_devp = file->private_data;
+ if (!smd_pkt_devp) {
+ pr_err_ratelimited("%s on a NULL device\n", __func__);
+ return POLLERR;
+ }
+
+ smd_pkt_devp->poll_mode = 1;
+ poll_wait(file, &smd_pkt_devp->ch_read_wait_queue, wait);
+ mutex_lock(&smd_pkt_devp->ch_lock);
+ if (smd_pkt_devp->has_reset || !smd_pkt_devp->ch) {
+ mutex_unlock(&smd_pkt_devp->ch_lock);
+ return POLLERR;
+ }
+
+ if (smd_read_avail(smd_pkt_devp->ch)) {
+ mask |= POLLIN | POLLRDNORM;
+ D_POLL("%s sets POLLIN for smd_pkt_dev id: %d\n",
+ __func__, smd_pkt_devp->i);
+ }
+ mutex_unlock(&smd_pkt_devp->ch_lock);
+
+ return mask;
+}
+
+static void check_and_wakeup_reader(struct smd_pkt_dev *smd_pkt_devp)
+{
+ int sz;
+ unsigned long flags;
+
+ if (!smd_pkt_devp) {
+ pr_err("%s on a NULL device\n", __func__);
+ return;
+ }
+
+ if (!smd_pkt_devp->ch) {
+ pr_err("%s on a closed smd_pkt_dev id:%d\n",
+ __func__, smd_pkt_devp->i);
+ return;
+ }
+
+ sz = smd_cur_packet_size(smd_pkt_devp->ch);
+ if (sz == 0) {
+ D_READ("%s: No packet in smd_pkt_dev id:%d\n",
+ __func__, smd_pkt_devp->i);
+ return;
+ }
+ if (!smd_read_avail(smd_pkt_devp->ch)) {
+ D_READ(
+ "%s: packet size is %d in smd_pkt_dev id:%d - but the data isn't here\n",
+ __func__, sz, smd_pkt_devp->i);
+ return;
+ }
+
+ /* here we have a packet of size sz ready */
+ spin_lock_irqsave(&smd_pkt_devp->pa_spinlock, flags);
+ __pm_stay_awake(&smd_pkt_devp->pa_ws);
+ smd_pkt_devp->ws_locked = 1;
+ spin_unlock_irqrestore(&smd_pkt_devp->pa_spinlock, flags);
+ wake_up(&smd_pkt_devp->ch_read_wait_queue);
+ schedule_work(&smd_pkt_devp->packet_arrival_work);
+ D_READ("%s: wake_up smd_pkt_dev id:%d\n", __func__, smd_pkt_devp->i);
+}
+
+static void check_and_wakeup_writer(struct smd_pkt_dev *smd_pkt_devp)
+{
+ int sz;
+
+ if (!smd_pkt_devp) {
+ pr_err("%s on a NULL device\n", __func__);
+ return;
+ }
+
+ if (!smd_pkt_devp->ch) {
+ pr_err("%s on a closed smd_pkt_dev id:%d\n",
+ __func__, smd_pkt_devp->i);
+ return;
+ }
+
+ sz = smd_write_segment_avail(smd_pkt_devp->ch);
+ if (sz) {
+ D_WRITE("%s: %d bytes write space in smd_pkt_dev id:%d\n",
+ __func__, sz, smd_pkt_devp->i);
+ smd_disable_read_intr(smd_pkt_devp->ch);
+ wake_up(&smd_pkt_devp->ch_write_wait_queue);
+ }
+}
+
+static void ch_notify(void *priv, unsigned event)
+{
+ struct smd_pkt_dev *smd_pkt_devp = priv;
+
+ if (smd_pkt_devp->ch == 0) {
+ if (event != SMD_EVENT_CLOSE)
+ pr_err("%s on a closed smd_pkt_dev id:%d\n",
+ __func__, smd_pkt_devp->i);
+ return;
+ }
+
+ switch (event) {
+ case SMD_EVENT_DATA: {
+ D_STATUS("%s: DATA event in smd_pkt_dev id:%d\n",
+ __func__, smd_pkt_devp->i);
+ check_and_wakeup_reader(smd_pkt_devp);
+ if (smd_pkt_devp->blocking_write)
+ check_and_wakeup_writer(smd_pkt_devp);
+ break;
+ }
+ case SMD_EVENT_OPEN:
+ D_STATUS("%s: OPEN event in smd_pkt_dev id:%d\n",
+ __func__, smd_pkt_devp->i);
+ smd_pkt_devp->has_reset = 0;
+ smd_pkt_devp->is_open = 1;
+ wake_up_interruptible(&smd_pkt_devp->ch_opened_wait_queue);
+ break;
+ case SMD_EVENT_CLOSE:
+ D_STATUS("%s: CLOSE event in smd_pkt_dev id:%d\n",
+ __func__, smd_pkt_devp->i);
+ smd_pkt_devp->is_open = 0;
+ /* put port into reset state */
+ clean_and_signal(smd_pkt_devp);
+ if (!strcmp(smd_pkt_devp->ch_name, "LOOPBACK"))
+ schedule_delayed_work(&loopback_work,
+ msecs_to_jiffies(1000));
+ break;
+ }
+}
+
+static int smd_pkt_dummy_probe(struct platform_device *pdev)
+{
+ struct smd_pkt_dev *smd_pkt_devp;
+
+ mutex_lock(&smd_pkt_dev_lock_lha1);
+ list_for_each_entry(smd_pkt_devp, &smd_pkt_dev_list, dev_list) {
+ if (smd_pkt_devp->edge == pdev->id
+ && !strcmp(pdev->name, smd_pkt_devp->ch_name)) {
+ complete_all(&smd_pkt_devp->ch_allocated);
+ D_STATUS("%s allocated SMD ch for smd_pkt_dev id:%d\n",
+ __func__, smd_pkt_devp->i);
+ break;
+ }
+ }
+ mutex_unlock(&smd_pkt_dev_lock_lha1);
+ return 0;
+}
+
+static uint32_t is_modem_smsm_inited(void)
+{
+ uint32_t modem_state;
+ uint32_t ready_state = (SMSM_INIT | SMSM_SMDINIT);
+
+ modem_state = smsm_get_state(SMSM_MODEM_STATE);
+ return (modem_state & ready_state) == ready_state;
+}
+
+/**
+ * smd_pkt_add_driver() - Add platform drivers for smd pkt device
+ *
+ * @smd_pkt_devp: pointer to the smd pkt device structure
+ *
+ * @returns: 0 for success, standard Linux error code otherwise
+ *
+ * This function is used to register platform driver once for all
+ * smd pkt devices which have same names and increment the reference
+ * count for 2nd to nth devices.
+ */
+static int smd_pkt_add_driver(struct smd_pkt_dev *smd_pkt_devp)
+{
+ int r = 0;
+ struct smd_pkt_driver *smd_pkt_driverp;
+ struct smd_pkt_driver *item;
+
+ if (!smd_pkt_devp) {
+ pr_err("%s on a NULL device\n", __func__);
+ return -EINVAL;
+ }
+ D_STATUS("Begin %s on smd_pkt_ch[%s]\n", __func__,
+ smd_pkt_devp->ch_name);
+
+ mutex_lock(&smd_pkt_driver_lock_lha1);
+ list_for_each_entry(item, &smd_pkt_driver_list, list) {
+ if (!strcmp(item->pdriver_name, smd_pkt_devp->ch_name)) {
+ D_STATUS("%s:%s Already Platform driver reg. cnt:%d\n",
+ __func__, smd_pkt_devp->ch_name, item->ref_cnt);
+ ++item->ref_cnt;
+ goto exit;
+ }
+ }
+
+ smd_pkt_driverp = kzalloc(sizeof(*smd_pkt_driverp), GFP_KERNEL);
+ if (IS_ERR_OR_NULL(smd_pkt_driverp)) {
+ pr_err("%s: kzalloc() failed for smd_pkt_driver[%s]\n",
+ __func__, smd_pkt_devp->ch_name);
+ r = -ENOMEM;
+ goto exit;
+ }
+
+ smd_pkt_driverp->driver.probe = smd_pkt_dummy_probe;
+ scnprintf(smd_pkt_driverp->pdriver_name, SMD_MAX_CH_NAME_LEN,
+ "%s", smd_pkt_devp->ch_name);
+ smd_pkt_driverp->driver.driver.name = smd_pkt_driverp->pdriver_name;
+ smd_pkt_driverp->driver.driver.owner = THIS_MODULE;
+ r = platform_driver_register(&smd_pkt_driverp->driver);
+ if (r) {
+ pr_err("%s: %s Platform driver reg. failed\n",
+ __func__, smd_pkt_devp->ch_name);
+ kfree(smd_pkt_driverp);
+ goto exit;
+ }
+ ++smd_pkt_driverp->ref_cnt;
+ list_add(&smd_pkt_driverp->list, &smd_pkt_driver_list);
+
+exit:
+ D_STATUS("End %s on smd_pkt_ch[%s]\n", __func__, smd_pkt_devp->ch_name);
+ mutex_unlock(&smd_pkt_driver_lock_lha1);
+ return r;
+}
+
+/**
+ * smd_pkt_remove_driver() - Remove the platform drivers for smd pkt device
+ *
+ * @smd_pkt_devp: pointer to the smd pkt device structure
+ *
+ * This function is used to decrement the reference count on
+ * platform drivers for smd pkt devices and removes the drivers
+ * when the reference count becomes zero.
+ */
+static void smd_pkt_remove_driver(struct smd_pkt_dev *smd_pkt_devp)
+{
+ struct smd_pkt_driver *smd_pkt_driverp;
+ bool found_item = false;
+
+ if (!smd_pkt_devp) {
+ pr_err("%s on a NULL device\n", __func__);
+ return;
+ }
+
+ D_STATUS("Begin %s on smd_pkt_ch[%s]\n", __func__,
+ smd_pkt_devp->ch_name);
+ mutex_lock(&smd_pkt_driver_lock_lha1);
+ list_for_each_entry(smd_pkt_driverp, &smd_pkt_driver_list, list) {
+ if (!strcmp(smd_pkt_driverp->pdriver_name,
+ smd_pkt_devp->ch_name)) {
+ found_item = true;
+ D_STATUS("%s:%s Platform driver cnt:%d\n",
+ __func__, smd_pkt_devp->ch_name,
+ smd_pkt_driverp->ref_cnt);
+ if (smd_pkt_driverp->ref_cnt > 0)
+ --smd_pkt_driverp->ref_cnt;
+ else
+ pr_warn("%s reference count <= 0\n", __func__);
+ break;
+ }
+ }
+ if (!found_item)
+ pr_err("%s:%s No item found in list.\n",
+ __func__, smd_pkt_devp->ch_name);
+
+ if (found_item && smd_pkt_driverp->ref_cnt == 0) {
+ platform_driver_unregister(&smd_pkt_driverp->driver);
+ smd_pkt_driverp->driver.probe = NULL;
+ list_del(&smd_pkt_driverp->list);
+ kfree(smd_pkt_driverp);
+ }
+ mutex_unlock(&smd_pkt_driver_lock_lha1);
+ D_STATUS("End %s on smd_pkt_ch[%s]\n", __func__, smd_pkt_devp->ch_name);
+}
+
+int smd_pkt_open(struct inode *inode, struct file *file)
+{
+ int r = 0;
+ struct smd_pkt_dev *smd_pkt_devp;
+ const char *peripheral = NULL;
+
+ smd_pkt_devp = container_of(inode->i_cdev, struct smd_pkt_dev, cdev);
+
+ if (!smd_pkt_devp) {
+ pr_err_ratelimited("%s on a NULL device\n", __func__);
+ return -EINVAL;
+ }
+ D_STATUS("Begin %s on smd_pkt_dev id:%d\n", __func__, smd_pkt_devp->i);
+
+ file->private_data = smd_pkt_devp;
+
+ mutex_lock(&smd_pkt_devp->ch_lock);
+ if (smd_pkt_devp->ch == 0) {
+ unsigned open_wait_rem = smd_pkt_devp->open_modem_wait * 1000;
+
+ reinit_completion(&smd_pkt_devp->ch_allocated);
+
+ r = smd_pkt_add_driver(smd_pkt_devp);
+ if (r) {
+ pr_err_ratelimited("%s: %s Platform driver reg. failed\n",
+ __func__, smd_pkt_devp->ch_name);
+ goto out;
+ }
+
+ peripheral = smd_edge_to_pil_str(smd_pkt_devp->edge);
+ if (!IS_ERR_OR_NULL(peripheral)) {
+ smd_pkt_devp->pil = subsystem_get(peripheral);
+ if (IS_ERR(smd_pkt_devp->pil)) {
+ r = PTR_ERR(smd_pkt_devp->pil);
+ pr_err_ratelimited("%s failed on smd_pkt_dev id:%d - subsystem_get failed for %s\n",
+ __func__, smd_pkt_devp->i, peripheral);
+ /*
+ * Sleep inorder to reduce the frequency of
+ * retry by user-space modules and to avoid
+ * possible watchdog bite.
+ */
+ msleep(open_wait_rem);
+ goto release_pd;
+ }
+ }
+
+ /* Wait for the modem SMSM to be inited for the SMD
+ ** Loopback channel to be allocated at the modem. Since
+ ** the wait need to be done atmost once, using msleep
+ ** doesn't degrade the performance.
+ */
+ if (!strcmp(smd_pkt_devp->ch_name, "LOOPBACK")) {
+ if (!is_modem_smsm_inited())
+ msleep(5000);
+ smsm_change_state(SMSM_APPS_STATE,
+ 0, SMSM_SMD_LOOPBACK);
+ msleep(100);
+ }
+
+ /*
+ * Wait for a packet channel to be allocated so we know
+ * the modem is ready enough.
+ */
+ if (open_wait_rem) {
+ r = wait_for_completion_interruptible_timeout(
+ &smd_pkt_devp->ch_allocated,
+ msecs_to_jiffies(open_wait_rem));
+ if (r >= 0)
+ open_wait_rem = jiffies_to_msecs(r);
+ if (r == 0)
+ r = -ETIMEDOUT;
+ if (r == -ERESTARTSYS) {
+ pr_info_ratelimited("%s: wait on smd_pkt_dev id:%d allocation interrupted\n",
+ __func__, smd_pkt_devp->i);
+ goto release_pil;
+ }
+ if (r < 0) {
+ pr_err_ratelimited("%s: wait on smd_pkt_dev id:%d allocation failed rc:%d\n",
+ __func__, smd_pkt_devp->i, r);
+ goto release_pil;
+ }
+ }
+
+ r = smd_named_open_on_edge(smd_pkt_devp->ch_name,
+ smd_pkt_devp->edge,
+ &smd_pkt_devp->ch,
+ smd_pkt_devp,
+ ch_notify);
+ if (r < 0) {
+ pr_err_ratelimited("%s: %s open failed %d\n", __func__,
+ smd_pkt_devp->ch_name, r);
+ goto release_pil;
+ }
+
+ open_wait_rem = max_t(unsigned, 2000, open_wait_rem);
+ r = wait_event_interruptible_timeout(
+ smd_pkt_devp->ch_opened_wait_queue,
+ smd_pkt_devp->is_open,
+ msecs_to_jiffies(open_wait_rem));
+ if (r == 0)
+ r = -ETIMEDOUT;
+
+ if (r < 0) {
+ /* close the ch to sync smd's state with smd_pkt */
+ smd_close(smd_pkt_devp->ch);
+ smd_pkt_devp->ch = NULL;
+ }
+
+ if (r == -ERESTARTSYS) {
+ pr_info_ratelimited("%s: wait on smd_pkt_dev id:%d OPEN interrupted\n",
+ __func__, smd_pkt_devp->i);
+ } else if (r < 0) {
+ pr_err_ratelimited("%s: wait on smd_pkt_dev id:%d OPEN event failed rc:%d\n",
+ __func__, smd_pkt_devp->i, r);
+ } else if (!smd_pkt_devp->is_open) {
+ pr_err_ratelimited("%s: Invalid OPEN event on smd_pkt_dev id:%d\n",
+ __func__, smd_pkt_devp->i);
+ r = -ENODEV;
+ } else {
+ smd_disable_read_intr(smd_pkt_devp->ch);
+ smd_pkt_devp->ch_size =
+ smd_write_avail(smd_pkt_devp->ch);
+ r = 0;
+ smd_pkt_devp->ref_cnt++;
+ D_STATUS("Finished %s on smd_pkt_dev id:%d\n",
+ __func__, smd_pkt_devp->i);
+ }
+ } else {
+ smd_pkt_devp->ref_cnt++;
+ }
+release_pil:
+ if (peripheral && (r < 0)) {
+ subsystem_put(smd_pkt_devp->pil);
+ smd_pkt_devp->pil = NULL;
+ }
+
+release_pd:
+ if (r < 0)
+ smd_pkt_remove_driver(smd_pkt_devp);
+out:
+ mutex_unlock(&smd_pkt_devp->ch_lock);
+
+
+ return r;
+}
+
+int smd_pkt_release(struct inode *inode, struct file *file)
+{
+ int r = 0;
+ struct smd_pkt_dev *smd_pkt_devp = file->private_data;
+ unsigned long flags;
+
+ if (!smd_pkt_devp) {
+ pr_err_ratelimited("%s on a NULL device\n", __func__);
+ return -EINVAL;
+ }
+ D_STATUS("Begin %s on smd_pkt_dev id:%d\n",
+ __func__, smd_pkt_devp->i);
+
+ mutex_lock(&smd_pkt_devp->ch_lock);
+ mutex_lock(&smd_pkt_devp->rx_lock);
+ mutex_lock(&smd_pkt_devp->tx_lock);
+ if (smd_pkt_devp->ref_cnt > 0)
+ smd_pkt_devp->ref_cnt--;
+
+ if (smd_pkt_devp->ch != 0 && smd_pkt_devp->ref_cnt == 0) {
+ clean_and_signal(smd_pkt_devp);
+ r = smd_close(smd_pkt_devp->ch);
+ smd_pkt_devp->ch = 0;
+ smd_pkt_devp->blocking_write = 0;
+ smd_pkt_devp->poll_mode = 0;
+ smd_pkt_remove_driver(smd_pkt_devp);
+ if (smd_pkt_devp->pil)
+ subsystem_put(smd_pkt_devp->pil);
+ smd_pkt_devp->has_reset = 0;
+ smd_pkt_devp->do_reset_notification = 0;
+ spin_lock_irqsave(&smd_pkt_devp->pa_spinlock, flags);
+ if (smd_pkt_devp->ws_locked) {
+ __pm_relax(&smd_pkt_devp->pa_ws);
+ smd_pkt_devp->ws_locked = 0;
+ }
+ spin_unlock_irqrestore(&smd_pkt_devp->pa_spinlock, flags);
+ }
+ mutex_unlock(&smd_pkt_devp->tx_lock);
+ mutex_unlock(&smd_pkt_devp->rx_lock);
+ mutex_unlock(&smd_pkt_devp->ch_lock);
+
+ if (flush_work(&smd_pkt_devp->packet_arrival_work))
+ D_STATUS("%s: Flushed work for smd_pkt_dev id:%d\n", __func__,
+ smd_pkt_devp->i);
+
+ D_STATUS("Finished %s on smd_pkt_dev id:%d\n",
+ __func__, smd_pkt_devp->i);
+
+ return r;
+}
+
+static const struct file_operations smd_pkt_fops = {
+ .owner = THIS_MODULE,
+ .open = smd_pkt_open,
+ .release = smd_pkt_release,
+ .read = smd_pkt_read,
+ .write = smd_pkt_write,
+ .poll = smd_pkt_poll,
+ .unlocked_ioctl = smd_pkt_ioctl,
+ .compat_ioctl = smd_pkt_ioctl,
+};
+
+static int smd_pkt_init_add_device(struct smd_pkt_dev *smd_pkt_devp, int i)
+{
+ int r = 0;
+
+ smd_pkt_devp->i = i;
+
+ init_waitqueue_head(&smd_pkt_devp->ch_read_wait_queue);
+ init_waitqueue_head(&smd_pkt_devp->ch_write_wait_queue);
+ smd_pkt_devp->is_open = 0;
+ smd_pkt_devp->poll_mode = 0;
+ smd_pkt_devp->ws_locked = 0;
+ init_waitqueue_head(&smd_pkt_devp->ch_opened_wait_queue);
+
+ spin_lock_init(&smd_pkt_devp->pa_spinlock);
+ mutex_init(&smd_pkt_devp->ch_lock);
+ mutex_init(&smd_pkt_devp->rx_lock);
+ mutex_init(&smd_pkt_devp->tx_lock);
+ wakeup_source_init(&smd_pkt_devp->pa_ws, smd_pkt_devp->dev_name);
+ INIT_WORK(&smd_pkt_devp->packet_arrival_work, packet_arrival_worker);
+ init_completion(&smd_pkt_devp->ch_allocated);
+
+ cdev_init(&smd_pkt_devp->cdev, &smd_pkt_fops);
+ smd_pkt_devp->cdev.owner = THIS_MODULE;
+
+ r = cdev_add(&smd_pkt_devp->cdev, (smd_pkt_number + i), 1);
+ if (IS_ERR_VALUE(r)) {
+ pr_err("%s: cdev_add() failed for smd_pkt_dev id:%d ret:%i\n",
+ __func__, i, r);
+ return r;
+ }
+
+ smd_pkt_devp->devicep =
+ device_create(smd_pkt_classp,
+ NULL,
+ (smd_pkt_number + i),
+ NULL,
+ smd_pkt_devp->dev_name);
+
+ if (IS_ERR_OR_NULL(smd_pkt_devp->devicep)) {
+ pr_err("%s: device_create() failed for smd_pkt_dev id:%d\n",
+ __func__, i);
+ r = -ENOMEM;
+ cdev_del(&smd_pkt_devp->cdev);
+ wakeup_source_trash(&smd_pkt_devp->pa_ws);
+ return r;
+ }
+ if (device_create_file(smd_pkt_devp->devicep,
+ &dev_attr_open_timeout))
+ pr_err("%s: unable to create device attr for smd_pkt_dev id:%d\n",
+ __func__, i);
+
+ if (!strcmp(smd_pkt_devp->ch_name, "LOOPBACK")) {
+ if (device_create_file(smd_pkt_devp->devicep,
+ &dev_attr_loopback_edge))
+ pr_err("%s: unable to create device attr for smd_pkt_dev id:%d\n",
+ __func__, i);
+ }
+ mutex_lock(&smd_pkt_dev_lock_lha1);
+ list_add(&smd_pkt_devp->dev_list, &smd_pkt_dev_list);
+ mutex_unlock(&smd_pkt_dev_lock_lha1);
+
+ return r;
+}
+
+static void smd_pkt_core_deinit(void)
+{
+ struct smd_pkt_dev *smd_pkt_devp;
+ struct smd_pkt_dev *index;
+
+ mutex_lock(&smd_pkt_dev_lock_lha1);
+ list_for_each_entry_safe(smd_pkt_devp, index, &smd_pkt_dev_list,
+ dev_list) {
+ cdev_del(&smd_pkt_devp->cdev);
+ list_del(&smd_pkt_devp->dev_list);
+ device_destroy(smd_pkt_classp,
+ MKDEV(MAJOR(smd_pkt_number), smd_pkt_devp->i));
+ kfree(smd_pkt_devp);
+ }
+ mutex_unlock(&smd_pkt_dev_lock_lha1);
+
+ if (!IS_ERR_OR_NULL(smd_pkt_classp))
+ class_destroy(smd_pkt_classp);
+
+ unregister_chrdev_region(MAJOR(smd_pkt_number), num_smd_pkt_ports);
+}
+
+static int smd_pkt_alloc_chrdev_region(void)
+{
+ int r = alloc_chrdev_region(&smd_pkt_number,
+ 0,
+ num_smd_pkt_ports,
+ DEVICE_NAME);
+
+ if (IS_ERR_VALUE(r)) {
+ pr_err("%s: alloc_chrdev_region() failed ret:%i\n",
+ __func__, r);
+ return r;
+ }
+
+ smd_pkt_classp = class_create(THIS_MODULE, DEVICE_NAME);
+ if (IS_ERR(smd_pkt_classp)) {
+ pr_err("%s: class_create() failed ENOMEM\n", __func__);
+ r = -ENOMEM;
+ unregister_chrdev_region(MAJOR(smd_pkt_number),
+ num_smd_pkt_ports);
+ return r;
+ }
+
+ return 0;
+}
+
+static int parse_smdpkt_devicetree(struct device_node *node,
+ struct smd_pkt_dev *smd_pkt_devp)
+{
+ int edge;
+ char *key;
+ const char *ch_name;
+ const char *dev_name;
+ const char *remote_ss;
+
+ key = "qcom,smdpkt-remote";
+ remote_ss = of_get_property(node, key, NULL);
+ if (!remote_ss)
+ goto error;
+
+ edge = smd_remote_ss_to_edge(remote_ss);
+ if (edge < 0)
+ goto error;
+
+ smd_pkt_devp->edge = edge;
+ D_STATUS("%s: %s = %d", __func__, key, edge);
+
+ key = "qcom,smdpkt-port-name";
+ ch_name = of_get_property(node, key, NULL);
+ if (!ch_name)
+ goto error;
+
+ strlcpy(smd_pkt_devp->ch_name, ch_name, SMD_MAX_CH_NAME_LEN);
+ D_STATUS("%s ch_name = %s\n", __func__, ch_name);
+
+ key = "qcom,smdpkt-dev-name";
+ dev_name = of_get_property(node, key, NULL);
+ if (!dev_name)
+ goto error;
+
+ strlcpy(smd_pkt_devp->dev_name, dev_name, SMD_MAX_CH_NAME_LEN);
+ D_STATUS("%s dev_name = %s\n", __func__, dev_name);
+
+ return 0;
+
+error:
+ pr_err("%s: missing key: %s\n", __func__, key);
+ return -ENODEV;
+
+}
+
+static int smd_pkt_devicetree_init(struct platform_device *pdev)
+{
+ int ret;
+ int i = 0;
+ struct device_node *node;
+ struct smd_pkt_dev *smd_pkt_devp;
+ int subnode_num = 0;
+
+ for_each_child_of_node(pdev->dev.of_node, node)
+ ++subnode_num;
+
+ num_smd_pkt_ports = subnode_num;
+
+ ret = smd_pkt_alloc_chrdev_region();
+ if (ret) {
+ pr_err("%s: smd_pkt_alloc_chrdev_region() failed ret:%i\n",
+ __func__, ret);
+ return ret;
+ }
+
+ for_each_child_of_node(pdev->dev.of_node, node) {
+ smd_pkt_devp = kzalloc(sizeof(struct smd_pkt_dev), GFP_KERNEL);
+ if (IS_ERR_OR_NULL(smd_pkt_devp)) {
+ pr_err("%s: kzalloc() failed for smd_pkt_dev id:%d\n",
+ __func__, i);
+ ret = -ENOMEM;
+ goto error_destroy;
+ }
+
+ ret = parse_smdpkt_devicetree(node, smd_pkt_devp);
+ if (ret) {
+ pr_err(" failed to parse_smdpkt_devicetree %d\n", i);
+ kfree(smd_pkt_devp);
+ goto error_destroy;
+ }
+
+ ret = smd_pkt_init_add_device(smd_pkt_devp, i);
+ if (ret < 0) {
+ pr_err("add device failed for idx:%d ret=%d\n", i, ret);
+ kfree(smd_pkt_devp);
+ goto error_destroy;
+ }
+ i++;
+ }
+
+ INIT_DELAYED_WORK(&loopback_work, loopback_probe_worker);
+
+ D_STATUS("SMD Packet Port Driver Initialized.\n");
+ return 0;
+
+error_destroy:
+ smd_pkt_core_deinit();
+ return ret;
+}
+
+static int msm_smd_pkt_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ if (pdev) {
+ if (pdev->dev.of_node) {
+ D_STATUS("%s device tree implementation\n", __func__);
+ ret = smd_pkt_devicetree_init(pdev);
+ if (ret)
+ pr_err("%s: device tree init failed\n",
+ __func__);
+ }
+ }
+
+ return 0;
+}
+
+static const struct of_device_id msm_smd_pkt_match_table[] = {
+ { .compatible = "qcom,smdpkt" },
+ {},
+};
+
+static struct platform_driver msm_smd_pkt_driver = {
+ .probe = msm_smd_pkt_probe,
+ .driver = {
+ .name = MODULE_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = msm_smd_pkt_match_table,
+ },
+};
+
+static int __init smd_pkt_init(void)
+{
+ int rc;
+
+ INIT_LIST_HEAD(&smd_pkt_dev_list);
+ INIT_LIST_HEAD(&smd_pkt_driver_list);
+ rc = platform_driver_register(&msm_smd_pkt_driver);
+ if (rc) {
+ pr_err("%s: msm_smd_driver register failed %d\n",
+ __func__, rc);
+ return rc;
+ }
+
+ smd_pkt_ilctxt = ipc_log_context_create(SMD_PKT_IPC_LOG_PAGE_CNT,
+ "smd_pkt", 0);
+ return 0;
+}
+
+static void __exit smd_pkt_cleanup(void)
+{
+ smd_pkt_core_deinit();
+}
+
+module_init(smd_pkt_init);
+module_exit(smd_pkt_cleanup);
+
+MODULE_DESCRIPTION("MSM Shared Memory Packet Port");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/char/rdbg.c b/drivers/char/rdbg.c
new file mode 100644
index 000000000000..8161d77ca194
--- /dev/null
+++ b/drivers/char/rdbg.c
@@ -0,0 +1,1173 @@
+/*
+ * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include <soc/qcom/smsm.h>
+#include <linux/uaccess.h>
+#include <linux/interrupt.h>
+
+#define SMP2P_NUM_PROCS 16
+#define MAX_RETRIES 20
+
+#define SM_VERSION 1
+#define SM_BLOCKSIZE 128
+
+#define SMQ_MAGIC_INIT 0xFF00FF00
+#define SMQ_MAGIC_PRODUCER (SMQ_MAGIC_INIT | 0x1)
+#define SMQ_MAGIC_CONSUMER (SMQ_MAGIC_INIT | 0x2)
+
+enum SMQ_STATUS {
+ SMQ_SUCCESS = 0,
+ SMQ_ENOMEMORY = -1,
+ SMQ_EBADPARM = -2,
+ SMQ_UNDERFLOW = -3,
+ SMQ_OVERFLOW = -4
+};
+
+enum smq_type {
+ PRODUCER = 1,
+ CONSUMER = 2,
+ INVALID = 3
+};
+
+struct smq_block_map {
+ uint32_t index_read;
+ uint32_t num_blocks;
+ uint8_t *map;
+};
+
+struct smq_node {
+ uint16_t index_block;
+ uint16_t num_blocks;
+} __attribute__ ((__packed__));
+
+struct smq_hdr {
+ uint8_t producer_version;
+ uint8_t consumer_version;
+} __attribute__ ((__packed__));
+
+struct smq_out_state {
+ uint32_t init;
+ uint32_t index_check_queue_for_reset;
+ uint32_t index_sent_write;
+ uint32_t index_free_read;
+} __attribute__ ((__packed__));
+
+struct smq_out {
+ struct smq_out_state s;
+ struct smq_node sent[1];
+};
+
+struct smq_in_state {
+ uint32_t init;
+ uint32_t index_check_queue_for_reset_ack;
+ uint32_t index_sent_read;
+ uint32_t index_free_write;
+} __attribute__ ((__packed__));
+
+struct smq_in {
+ struct smq_in_state s;
+ struct smq_node free[1];
+};
+
+struct smq {
+ struct smq_hdr *hdr;
+ struct smq_out *out;
+ struct smq_in *in;
+ uint8_t *blocks;
+ uint32_t num_blocks;
+ struct mutex *lock;
+ uint32_t initialized;
+ struct smq_block_map block_map;
+ enum smq_type type;
+};
+
+struct gpio_info {
+ int gpio_base_id;
+ int irq_base_id;
+};
+
+struct rdbg_data {
+ struct device *device;
+ struct completion work;
+ struct gpio_info in;
+ struct gpio_info out;
+ bool device_initialized;
+ int gpio_out_offset;
+ bool device_opened;
+ void *smem_addr;
+ size_t smem_size;
+ struct smq producer_smrb;
+ struct smq consumer_smrb;
+ struct mutex write_mutex;
+};
+
+struct rdbg_device {
+ struct cdev cdev;
+ struct class *class;
+ dev_t dev_no;
+ int num_devices;
+ struct rdbg_data *rdbg_data;
+};
+
+static struct rdbg_device g_rdbg_instance = {
+ { {0} },
+ NULL,
+ 0,
+ SMP2P_NUM_PROCS,
+ NULL
+};
+
+struct processor_specific_info {
+ char *name;
+ unsigned int smem_buffer_addr;
+ size_t smem_buffer_size;
+};
+
+static struct processor_specific_info proc_info[SMP2P_NUM_PROCS] = {
+ {0}, /*APPS*/
+ {"rdbg_modem", 0, 0}, /*MODEM*/
+ {"rdbg_adsp", SMEM_LC_DEBUGGER, 16*1024}, /*ADSP*/
+ {0}, /*SMP2P_RESERVED_PROC_1*/
+ {"rdbg_wcnss", 0, 0}, /*WCNSS*/
+ {"rdbg_cdsp", SMEM_LC_DEBUGGER, 16*1024}, /*CDSP*/
+ {NULL}, /*SMP2P_POWER_PROC*/
+ {NULL}, /*SMP2P_TZ_PROC*/
+ {NULL}, /*EMPTY*/
+ {NULL}, /*EMPTY*/
+ {NULL}, /*EMPTY*/
+ {NULL}, /*EMPTY*/
+ {NULL}, /*EMPTY*/
+ {NULL}, /*EMPTY*/
+ {NULL}, /*EMPTY*/
+ {NULL} /*SMP2P_REMOTE_MOCK_PROC*/
+};
+
+static int smq_blockmap_get(struct smq_block_map *block_map,
+ uint32_t *block_index, uint32_t n)
+{
+ uint32_t start;
+ uint32_t mark = 0;
+ uint32_t found = 0;
+ uint32_t i = 0;
+
+ start = block_map->index_read;
+
+ if (n == 1) {
+ do {
+ if (!block_map->map[block_map->index_read]) {
+ *block_index = block_map->index_read;
+ block_map->map[block_map->index_read] = 1;
+ block_map->index_read++;
+ block_map->index_read %= block_map->num_blocks;
+ return SMQ_SUCCESS;
+ }
+ block_map->index_read++;
+ } while (start != (block_map->index_read %=
+ block_map->num_blocks));
+ } else {
+ mark = block_map->num_blocks;
+
+ do {
+ if (!block_map->map[block_map->index_read]) {
+ if (mark > block_map->index_read) {
+ mark = block_map->index_read;
+ start = block_map->index_read;
+ found = 0;
+ }
+
+ found++;
+ if (found == n) {
+ *block_index = mark;
+ for (i = 0; i < n; i++)
+ block_map->map[mark + i] =
+ (uint8_t)(n - i);
+ block_map->index_read += block_map->map
+ [block_map->index_read] - 1;
+ return SMQ_SUCCESS;
+ }
+ } else {
+ found = 0;
+ block_map->index_read += block_map->map
+ [block_map->index_read] - 1;
+ mark = block_map->num_blocks;
+ }
+ block_map->index_read++;
+ } while (start != (block_map->index_read %=
+ block_map->num_blocks));
+ }
+
+ return SMQ_ENOMEMORY;
+}
+
+static void smq_blockmap_put(struct smq_block_map *block_map, uint32_t i)
+{
+ uint32_t num_blocks = block_map->map[i];
+
+ while (num_blocks--) {
+ block_map->map[i] = 0;
+ i++;
+ }
+}
+
+static int smq_blockmap_reset(struct smq_block_map *block_map)
+{
+ if (!block_map->map)
+ return SMQ_ENOMEMORY;
+ memset(block_map->map, 0, block_map->num_blocks + 1);
+ block_map->index_read = 0;
+
+ return SMQ_SUCCESS;
+}
+
+static int smq_blockmap_ctor(struct smq_block_map *block_map,
+ uint32_t num_blocks)
+{
+ if (num_blocks <= 1)
+ return SMQ_ENOMEMORY;
+
+ block_map->map = kcalloc(num_blocks, sizeof(uint8_t), GFP_KERNEL);
+ if (!block_map->map)
+ return SMQ_ENOMEMORY;
+
+ block_map->num_blocks = num_blocks - 1;
+ smq_blockmap_reset(block_map);
+
+ return SMQ_SUCCESS;
+}
+
+static void smq_blockmap_dtor(struct smq_block_map *block_map)
+{
+ kfree(block_map->map);
+ block_map->map = NULL;
+}
+
+static int smq_free(struct smq *smq, void *data)
+{
+ struct smq_node node;
+ uint32_t index_block;
+ int err = SMQ_SUCCESS;
+
+ if (smq->lock)
+ mutex_lock(smq->lock);
+
+ if ((smq->hdr->producer_version != SM_VERSION) &&
+ (smq->out->s.init != SMQ_MAGIC_PRODUCER)) {
+ err = SMQ_UNDERFLOW;
+ goto bail;
+ }
+
+ index_block = ((uint8_t *)data - smq->blocks) / SM_BLOCKSIZE;
+ if (index_block >= smq->num_blocks) {
+ err = SMQ_EBADPARM;
+ goto bail;
+ }
+
+ node.index_block = (uint16_t)index_block;
+ node.num_blocks = 0;
+ *((struct smq_node *)(smq->in->free + smq->in->
+ s.index_free_write)) = node;
+
+ smq->in->s.index_free_write = (smq->in->s.index_free_write + 1)
+ % smq->num_blocks;
+
+bail:
+ if (smq->lock)
+ mutex_unlock(smq->lock);
+ return err;
+}
+
+static int smq_receive(struct smq *smq, void **pp, int *pnsize, int *pbmore)
+{
+ struct smq_node *node;
+ int err = SMQ_SUCCESS;
+ int more = 0;
+
+ if ((smq->hdr->producer_version != SM_VERSION) &&
+ (smq->out->s.init != SMQ_MAGIC_PRODUCER))
+ return SMQ_UNDERFLOW;
+
+ if (smq->in->s.index_sent_read == smq->out->s.index_sent_write) {
+ err = SMQ_UNDERFLOW;
+ goto bail;
+ }
+
+ node = (struct smq_node *)(smq->out->sent + smq->in->s.index_sent_read);
+ if (node->index_block >= smq->num_blocks) {
+ err = SMQ_EBADPARM;
+ goto bail;
+ }
+
+ smq->in->s.index_sent_read = (smq->in->s.index_sent_read + 1)
+ % smq->num_blocks;
+
+ *pp = smq->blocks + (node->index_block * SM_BLOCKSIZE);
+ *pnsize = SM_BLOCKSIZE * node->num_blocks;
+
+ /* Ensure that the reads and writes are updated in the memory
+ * when they are done and not cached. Also, ensure that the reads
+ * and writes are not reordered as they are shared between two cores.
+ */
+ rmb();
+ if (smq->in->s.index_sent_read != smq->out->s.index_sent_write)
+ more = 1;
+
+bail:
+ *pbmore = more;
+ return err;
+}
+
+static int smq_alloc_send(struct smq *smq, const uint8_t *pcb, int nsize)
+{
+ void *pv = 0;
+ int num_blocks;
+ uint32_t index_block = 0;
+ int err = SMQ_SUCCESS;
+ struct smq_node *node = NULL;
+
+ mutex_lock(smq->lock);
+
+ if ((smq->in->s.init == SMQ_MAGIC_CONSUMER) &&
+ (smq->hdr->consumer_version == SM_VERSION)) {
+ if (smq->out->s.index_check_queue_for_reset ==
+ smq->in->s.index_check_queue_for_reset_ack) {
+ while (smq->out->s.index_free_read !=
+ smq->in->s.index_free_write) {
+ node = (struct smq_node *)(
+ smq->in->free +
+ smq->out->s.index_free_read);
+ if (node->index_block >= smq->num_blocks) {
+ err = SMQ_EBADPARM;
+ goto bail;
+ }
+
+ smq->out->s.index_free_read =
+ (smq->out->s.index_free_read + 1)
+ % smq->num_blocks;
+
+ smq_blockmap_put(&smq->block_map,
+ node->index_block);
+ /* Ensure that the reads and writes are
+ * updated in the memory when they are done
+ * and not cached. Also, ensure that the reads
+ * and writes are not reordered as they are
+ * shared between two cores.
+ */
+ rmb();
+ }
+ }
+ }
+
+ num_blocks = ALIGN(nsize, SM_BLOCKSIZE)/SM_BLOCKSIZE;
+ err = smq_blockmap_get(&smq->block_map, &index_block, num_blocks);
+ if (err != SMQ_SUCCESS)
+ goto bail;
+
+ pv = smq->blocks + (SM_BLOCKSIZE * index_block);
+
+ err = copy_from_user((void *)pv, (void *)pcb, nsize);
+ if (err != 0)
+ goto bail;
+
+ ((struct smq_node *)(smq->out->sent +
+ smq->out->s.index_sent_write))->index_block
+ = (uint16_t)index_block;
+ ((struct smq_node *)(smq->out->sent +
+ smq->out->s.index_sent_write))->num_blocks
+ = (uint16_t)num_blocks;
+
+ smq->out->s.index_sent_write = (smq->out->s.index_sent_write + 1)
+ % smq->num_blocks;
+
+bail:
+ if (err != SMQ_SUCCESS) {
+ if (pv)
+ smq_blockmap_put(&smq->block_map, index_block);
+ }
+ mutex_unlock(smq->lock);
+ return err;
+}
+
+static int smq_reset_producer_queue_internal(struct smq *smq,
+ uint32_t reset_num)
+{
+ int retval = 0;
+ uint32_t i;
+
+ if (smq->type != PRODUCER)
+ goto bail;
+
+ mutex_lock(smq->lock);
+ if (smq->out->s.index_check_queue_for_reset != reset_num) {
+ smq->out->s.index_check_queue_for_reset = reset_num;
+ for (i = 0; i < smq->num_blocks; i++)
+ (smq->out->sent + i)->index_block = 0xFFFF;
+
+ smq_blockmap_reset(&smq->block_map);
+ smq->out->s.index_sent_write = 0;
+ smq->out->s.index_free_read = 0;
+ retval = 1;
+ }
+ mutex_unlock(smq->lock);
+
+bail:
+ return retval;
+}
+
+static int smq_check_queue_reset(struct smq *p_cons, struct smq *p_prod)
+{
+ int retval = 0;
+ uint32_t reset_num, i;
+
+ if ((p_cons->type != CONSUMER) ||
+ (p_cons->out->s.init != SMQ_MAGIC_PRODUCER) ||
+ (p_cons->hdr->producer_version != SM_VERSION))
+ goto bail;
+
+ reset_num = p_cons->out->s.index_check_queue_for_reset;
+ if (p_cons->in->s.index_check_queue_for_reset_ack != reset_num) {
+ p_cons->in->s.index_check_queue_for_reset_ack = reset_num;
+ for (i = 0; i < p_cons->num_blocks; i++)
+ (p_cons->in->free + i)->index_block = 0xFFFF;
+
+ p_cons->in->s.index_sent_read = 0;
+ p_cons->in->s.index_free_write = 0;
+
+ retval = smq_reset_producer_queue_internal(p_prod, reset_num);
+ }
+
+bail:
+ return retval;
+}
+
+static int check_subsystem_debug_enabled(void *base_addr, int size)
+{
+ int num_blocks;
+ uint8_t *pb_orig;
+ uint8_t *pb;
+ struct smq smq;
+ int err = 0;
+
+ pb = pb_orig = (uint8_t *)base_addr;
+ pb += sizeof(struct smq_hdr);
+ pb = PTR_ALIGN(pb, 8);
+ size -= pb - (uint8_t *)pb_orig;
+ num_blocks = (int)((size - sizeof(struct smq_out_state) -
+ sizeof(struct smq_in_state))/(SM_BLOCKSIZE +
+ sizeof(struct smq_node) * 2));
+ if (num_blocks <= 0) {
+ err = SMQ_EBADPARM;
+ goto bail;
+ }
+
+ pb += num_blocks * SM_BLOCKSIZE;
+ smq.out = (struct smq_out *)pb;
+ pb += sizeof(struct smq_out_state) + (num_blocks *
+ sizeof(struct smq_node));
+ smq.in = (struct smq_in *)pb;
+
+ if (smq.in->s.init != SMQ_MAGIC_CONSUMER) {
+ pr_err("%s, smq in consumer not initialized", __func__);
+ err = -ECOMM;
+ }
+
+bail:
+ return err;
+}
+
+static void smq_dtor(struct smq *smq)
+{
+ if (smq->initialized == SMQ_MAGIC_INIT) {
+ switch (smq->type) {
+ case PRODUCER:
+ smq->out->s.init = 0;
+ smq_blockmap_dtor(&smq->block_map);
+ break;
+ case CONSUMER:
+ smq->in->s.init = 0;
+ break;
+ default:
+ case INVALID:
+ break;
+ }
+
+ smq->initialized = 0;
+ }
+}
+
+/*
+ * The shared memory is used as a circular ring buffer in each direction.
+ * Thus we have a bi-directional shared memory channel between the AP
+ * and a subsystem. We call this SMQ. Each memory channel contains a header,
+ * data and a control mechanism that is used to synchronize read and write
+ * of data between the AP and the remote subsystem.
+ *
+ * Overall SMQ memory view:
+ *
+ * +------------------------------------------------+
+ * | SMEM buffer |
+ * |-----------------------+------------------------|
+ * |Producer: LA | Producer: Remote |
+ * |Consumer: Remote | subsystem |
+ * | subsystem | Consumer: LA |
+ * | | |
+ * | Producer| Consumer|
+ * +-----------------------+------------------------+
+ * | |
+ * | |
+ * | +--------------------------------------+
+ * | |
+ * | |
+ * v v
+ * +--------------------------------------------------------------+
+ * | Header | Data | Control |
+ * +-----------+---+---+---+-----+----+--+--+-----+---+--+--+-----+
+ * | | b | b | b | | S |n |n | | S |n |n | |
+ * | Producer | l | l | l | | M |o |o | | M |o |o | |
+ * | Ver | o | o | o | | Q |d |d | | Q |d |d | |
+ * |-----------| c | c | c | ... | |e |e | ... | |e |e | ... |
+ * | | k | k | k | | O | | | | I | | | |
+ * | Consumer | | | | | u |0 |1 | | n |0 |1 | |
+ * | Ver | 0 | 1 | 2 | | t | | | | | | | |
+ * +-----------+---+---+---+-----+----+--+--+-----+---+--+--+-----+
+ * | |
+ * + |
+ * |
+ * +------------------------+
+ * |
+ * v
+ * +----+----+----+----+
+ * | SMQ Nodes |
+ * |----|----|----|----|
+ * Node # | 0 | 1 | 2 | ...|
+ * |----|----|----|----|
+ * Starting Block Index # | 0 | 3 | 8 | ...|
+ * |----|----|----|----|
+ * # of blocks | 3 | 5 | 1 | ...|
+ * +----+----+----+----+
+ *
+ * Header: Contains version numbers for software compatibility to ensure
+ * that both producers and consumers on the AP and subsystems know how to
+ * read from and write to the queue.
+ * Both the producer and consumer versions are 1.
+ * +---------+-------------------+
+ * | Size | Field |
+ * +---------+-------------------+
+ * | 1 byte | Producer Version |
+ * +---------+-------------------+
+ * | 1 byte | Consumer Version |
+ * +---------+-------------------+
+ *
+ * Data: The data portion contains multiple blocks [0..N] of a fixed size.
+ * The block size SM_BLOCKSIZE is fixed to 128 bytes for header version #1.
+ * Payload sent from the debug agent app is split (if necessary) and placed
+ * in these blocks. The first data block is placed at the next 8 byte aligned
+ * address after the header.
+ *
+ * The number of blocks for a given SMEM allocation is derived as follows:
+ * Number of Blocks = ((Total Size - Alignment - Size of Header
+ * - Size of SMQIn - Size of SMQOut)/(SM_BLOCKSIZE))
+ *
+ * The producer maintains a private block map of each of these blocks to
+ * determine which of these blocks in the queue is available and which are free.
+ *
+ * Control:
+ * The control portion contains a list of nodes [0..N] where N is number
+ * of available data blocks. Each node identifies the data
+ * block indexes that contain a particular debug message to be transferred,
+ * and the number of blocks it took to hold the contents of the message.
+ *
+ * Each node has the following structure:
+ * +---------+-------------------+
+ * | Size | Field |
+ * +---------+-------------------+
+ * | 2 bytes |Staring Block Index|
+ * +---------+-------------------+
+ * | 2 bytes |Number of Blocks |
+ * +---------+-------------------+
+ *
+ * The producer and the consumer update different parts of the control channel
+ * (SMQOut / SMQIn) respectively. Each of these control data structures contains
+ * information about the last node that was written / read, and the actual nodes
+ * that were written/read.
+ *
+ * SMQOut Structure (R/W by producer, R by consumer):
+ * +---------+-------------------+
+ * | Size | Field |
+ * +---------+-------------------+
+ * | 4 bytes | Magic Init Number |
+ * +---------+-------------------+
+ * | 4 bytes | Reset |
+ * +---------+-------------------+
+ * | 4 bytes | Last Sent Index |
+ * +---------+-------------------+
+ * | 4 bytes | Index Free Read |
+ * +---------+-------------------+
+ *
+ * SMQIn Structure (R/W by consumer, R by producer):
+ * +---------+-------------------+
+ * | Size | Field |
+ * +---------+-------------------+
+ * | 4 bytes | Magic Init Number |
+ * +---------+-------------------+
+ * | 4 bytes | Reset ACK |
+ * +---------+-------------------+
+ * | 4 bytes | Last Read Index |
+ * +---------+-------------------+
+ * | 4 bytes | Index Free Write |
+ * +---------+-------------------+
+ *
+ * Magic Init Number:
+ * Both SMQ Out and SMQ In initialize this field with a predefined magic
+ * number so as to make sure that both the consumer and producer blocks
+ * have fully initialized and have valid data in the shared memory control area.
+ * Producer Magic #: 0xFF00FF01
+ * Consumer Magic #: 0xFF00FF02
+ */
+static int smq_ctor(struct smq *smq, void *base_addr, int size,
+ enum smq_type type, struct mutex *lock_ptr)
+{
+ int num_blocks;
+ uint8_t *pb_orig;
+ uint8_t *pb;
+ uint32_t i;
+ int err;
+
+ if (smq->initialized == SMQ_MAGIC_INIT) {
+ err = SMQ_EBADPARM;
+ goto bail;
+ }
+
+ if (!base_addr || !size) {
+ err = SMQ_EBADPARM;
+ goto bail;
+ }
+
+ if (type == PRODUCER)
+ smq->lock = lock_ptr;
+
+ pb_orig = (uint8_t *)base_addr;
+ smq->hdr = (struct smq_hdr *)pb_orig;
+ pb = pb_orig;
+ pb += sizeof(struct smq_hdr);
+ pb = PTR_ALIGN(pb, 8);
+ size -= pb - (uint8_t *)pb_orig;
+ num_blocks = (int)((size - sizeof(struct smq_out_state) -
+ sizeof(struct smq_in_state))/(SM_BLOCKSIZE +
+ sizeof(struct smq_node) * 2));
+ if (num_blocks <= 0) {
+ err = SMQ_ENOMEMORY;
+ goto bail;
+ }
+
+ smq->blocks = pb;
+ smq->num_blocks = num_blocks;
+ pb += num_blocks * SM_BLOCKSIZE;
+ smq->out = (struct smq_out *)pb;
+ pb += sizeof(struct smq_out_state) + (num_blocks *
+ sizeof(struct smq_node));
+ smq->in = (struct smq_in *)pb;
+ smq->type = type;
+ if (type == PRODUCER) {
+ smq->hdr->producer_version = SM_VERSION;
+ for (i = 0; i < smq->num_blocks; i++)
+ (smq->out->sent + i)->index_block = 0xFFFF;
+
+ err = smq_blockmap_ctor(&smq->block_map, smq->num_blocks);
+ if (err != SMQ_SUCCESS)
+ goto bail;
+
+ smq->out->s.index_sent_write = 0;
+ smq->out->s.index_free_read = 0;
+ if (smq->out->s.init == SMQ_MAGIC_PRODUCER) {
+ smq->out->s.index_check_queue_for_reset += 1;
+ } else {
+ smq->out->s.index_check_queue_for_reset = 1;
+ smq->out->s.init = SMQ_MAGIC_PRODUCER;
+ }
+ } else {
+ smq->hdr->consumer_version = SM_VERSION;
+ for (i = 0; i < smq->num_blocks; i++)
+ (smq->in->free + i)->index_block = 0xFFFF;
+
+ smq->in->s.index_sent_read = 0;
+ smq->in->s.index_free_write = 0;
+ if (smq->out->s.init == SMQ_MAGIC_PRODUCER) {
+ smq->in->s.index_check_queue_for_reset_ack =
+ smq->out->s.index_check_queue_for_reset;
+ } else {
+ smq->in->s.index_check_queue_for_reset_ack = 0;
+ }
+
+ smq->in->s.init = SMQ_MAGIC_CONSUMER;
+ }
+ smq->initialized = SMQ_MAGIC_INIT;
+ err = SMQ_SUCCESS;
+
+bail:
+ return err;
+}
+
+static void send_interrupt_to_subsystem(struct rdbg_data *rdbgdata)
+{
+ int offset = rdbgdata->gpio_out_offset;
+ int val = 1 ^ gpio_get_value(rdbgdata->out.gpio_base_id + offset);
+
+ gpio_set_value(rdbgdata->out.gpio_base_id + offset, val);
+ rdbgdata->gpio_out_offset = (offset + 1) % 32;
+
+ dev_dbg(rdbgdata->device, "%s: sent interrupt %d to subsystem",
+ __func__, val);
+}
+
+static irqreturn_t on_interrupt_from(int irq, void *ptr)
+{
+ struct rdbg_data *rdbgdata = (struct rdbg_data *) ptr;
+
+ dev_dbg(rdbgdata->device, "%s: Received interrupt %d from subsystem",
+ __func__, irq);
+
+ complete(&(rdbgdata->work));
+ return IRQ_HANDLED;
+}
+
+static int initialize_smq(struct rdbg_data *rdbgdata)
+{
+ int err = 0;
+ unsigned char *smem_consumer_buffer = rdbgdata->smem_addr;
+
+ smem_consumer_buffer += (rdbgdata->smem_size/2);
+
+ if (smq_ctor(&(rdbgdata->producer_smrb), (void *)(rdbgdata->smem_addr),
+ ((rdbgdata->smem_size)/2), PRODUCER, &rdbgdata->write_mutex)) {
+ dev_err(rdbgdata->device, "%s: smq producer allocation failed",
+ __func__);
+ err = -ENOMEM;
+ goto bail;
+ }
+
+ if (smq_ctor(&(rdbgdata->consumer_smrb), (void *)smem_consumer_buffer,
+ ((rdbgdata->smem_size)/2), CONSUMER, NULL)) {
+ dev_err(rdbgdata->device, "%s: smq conmsumer allocation failed",
+ __func__);
+ err = -ENOMEM;
+ }
+
+bail:
+ return err;
+
+}
+
+static int rdbg_open(struct inode *inode, struct file *filp)
+{
+ int device_id = -1;
+ struct rdbg_device *device = &g_rdbg_instance;
+ struct rdbg_data *rdbgdata = NULL;
+ int err = 0;
+
+ if (!inode || !device->rdbg_data) {
+ pr_err("Memory not allocated yet");
+ err = -ENODEV;
+ goto bail;
+ }
+
+ device_id = MINOR(inode->i_rdev);
+ rdbgdata = &device->rdbg_data[device_id];
+
+ if (rdbgdata->device_opened) {
+ dev_err(rdbgdata->device, "%s: Device already opened",
+ __func__);
+ err = -EEXIST;
+ goto bail;
+ }
+
+ rdbgdata->smem_size = proc_info[device_id].smem_buffer_size;
+ if (!rdbgdata->smem_size) {
+ dev_err(rdbgdata->device, "%s: smem not initialized", __func__);
+ err = -ENOMEM;
+ goto bail;
+ }
+
+ rdbgdata->smem_addr = smem_find(proc_info[device_id].smem_buffer_addr,
+ rdbgdata->smem_size, 0, SMEM_ANY_HOST_FLAG);
+ if (!rdbgdata->smem_addr) {
+ dev_err(rdbgdata->device, "%s: Could not allocate smem memory",
+ __func__);
+ err = -ENOMEM;
+ goto bail;
+ }
+ dev_dbg(rdbgdata->device, "%s: SMEM address=0x%lx smem_size=%d",
+ __func__, (unsigned long)rdbgdata->smem_addr,
+ (unsigned int)rdbgdata->smem_size);
+
+ if (check_subsystem_debug_enabled(rdbgdata->smem_addr,
+ rdbgdata->smem_size/2)) {
+ dev_err(rdbgdata->device, "%s: Subsystem %s is not debug enabled",
+ __func__, proc_info[device_id].name);
+ err = -ECOMM;
+ goto bail;
+ }
+
+ init_completion(&rdbgdata->work);
+
+ err = request_irq(rdbgdata->in.irq_base_id, on_interrupt_from,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+ proc_info[device_id].name,
+ (void *)&device->rdbg_data[device_id]);
+ if (err) {
+ dev_err(rdbgdata->device,
+ "%s: Failed to register interrupt.Err=%d,irqid=%d.",
+ __func__, err, rdbgdata->in.irq_base_id);
+ goto irq_bail;
+ }
+
+ err = enable_irq_wake(rdbgdata->in.irq_base_id);
+ if (err < 0) {
+ dev_dbg(rdbgdata->device, "enable_irq_wake() failed with err=%d",
+ err);
+ err = 0;
+ }
+
+ mutex_init(&rdbgdata->write_mutex);
+
+ err = initialize_smq(rdbgdata);
+ if (err) {
+ dev_err(rdbgdata->device, "Error initializing smq. Err=%d",
+ err);
+ goto smq_bail;
+ }
+
+ rdbgdata->device_opened = 1;
+
+ filp->private_data = (void *)rdbgdata;
+
+ return 0;
+
+smq_bail:
+ smq_dtor(&(rdbgdata->producer_smrb));
+ smq_dtor(&(rdbgdata->consumer_smrb));
+ mutex_destroy(&rdbgdata->write_mutex);
+irq_bail:
+ free_irq(rdbgdata->in.irq_base_id, (void *)
+ &device->rdbg_data[device_id]);
+bail:
+ return err;
+}
+
+static int rdbg_release(struct inode *inode, struct file *filp)
+{
+ int device_id = -1;
+ struct rdbg_device *rdbgdevice = &g_rdbg_instance;
+ struct rdbg_data *rdbgdata = NULL;
+ int err = 0;
+
+ if (!inode || !rdbgdevice->rdbg_data) {
+ pr_err("Memory not allocated yet");
+ err = -ENODEV;
+ goto bail;
+ }
+
+ device_id = MINOR(inode->i_rdev);
+ rdbgdata = &rdbgdevice->rdbg_data[device_id];
+
+ if (rdbgdata->device_opened == 1) {
+ dev_dbg(rdbgdata->device, "%s: Destroying %s.", __func__,
+ proc_info[device_id].name);
+ rdbgdata->device_opened = 0;
+ complete(&(rdbgdata->work));
+ free_irq(rdbgdata->in.irq_base_id, (void *)
+ &rdbgdevice->rdbg_data[device_id]);
+ if (rdbgdevice->rdbg_data[device_id].producer_smrb.initialized)
+ smq_dtor(&(rdbgdevice->rdbg_data[device_id].
+ producer_smrb));
+ if (rdbgdevice->rdbg_data[device_id].consumer_smrb.initialized)
+ smq_dtor(&(rdbgdevice->rdbg_data[device_id].
+ consumer_smrb));
+ mutex_destroy(&rdbgdata->write_mutex);
+ }
+
+ filp->private_data = NULL;
+
+bail:
+ return err;
+}
+
+static ssize_t rdbg_read(struct file *filp, char __user *buf, size_t size,
+ loff_t *offset)
+{
+ int err = 0;
+ struct rdbg_data *rdbgdata = filp->private_data;
+ void *p_sent_buffer = NULL;
+ int nsize = 0;
+ int more = 0;
+
+ if (!rdbgdata) {
+ pr_err("Invalid argument");
+ err = -EINVAL;
+ goto bail;
+ }
+
+ dev_dbg(rdbgdata->device, "%s: In receive", __func__);
+ err = wait_for_completion_interruptible(&(rdbgdata->work));
+ if (err) {
+ dev_err(rdbgdata->device, "%s: Error in wait", __func__);
+ goto bail;
+ }
+
+ smq_check_queue_reset(&(rdbgdata->consumer_smrb),
+ &(rdbgdata->producer_smrb));
+ if (smq_receive(&(rdbgdata->consumer_smrb), &p_sent_buffer,
+ &nsize, &more) != SMQ_SUCCESS) {
+ dev_err(rdbgdata->device, "%s: Error in smq_recv(). Err code = %d",
+ __func__, err);
+ err = -ENODATA;
+ goto bail;
+ }
+
+ size = ((size < nsize) ? size : nsize);
+ err = copy_to_user(buf, p_sent_buffer, size);
+ if (err != 0) {
+ dev_err(rdbgdata->device, "%s: Error in copy_to_user(). Err code = %d",
+ __func__, err);
+ err = -ENODATA;
+ goto bail;
+ }
+
+ smq_free(&(rdbgdata->consumer_smrb), p_sent_buffer);
+ err = size;
+ dev_dbg(rdbgdata->device, "%s: Read data to buffer with address 0x%lx",
+ __func__, (unsigned long) buf);
+
+bail:
+ return err;
+}
+
+static ssize_t rdbg_write(struct file *filp, const char __user *buf,
+ size_t size, loff_t *offset)
+{
+ int err = 0;
+ int num_retries = 0;
+ struct rdbg_data *rdbgdata = filp->private_data;
+
+ if (!rdbgdata) {
+ pr_err("Invalid argument");
+ err = -EINVAL;
+ goto bail;
+ }
+
+ do {
+ err = smq_alloc_send(&(rdbgdata->producer_smrb), buf, size);
+ dev_dbg(rdbgdata->device, "%s, smq_alloc_send returned %d.",
+ __func__, err);
+ } while (err != 0 && num_retries++ < MAX_RETRIES);
+
+ if (err != 0) {
+ err = -ECOMM;
+ goto bail;
+ }
+
+ send_interrupt_to_subsystem(rdbgdata);
+
+ err = size;
+
+bail:
+ return err;
+}
+
+
+static const struct file_operations rdbg_fops = {
+ .open = rdbg_open,
+ .read = rdbg_read,
+ .write = rdbg_write,
+ .release = rdbg_release,
+};
+
+static int register_smp2p(char *node_name, struct gpio_info *gpio_info_ptr)
+{
+ struct device_node *node = NULL;
+ int cnt = 0;
+ int id = 0;
+
+ node = of_find_compatible_node(NULL, NULL, node_name);
+ if (node) {
+ cnt = of_gpio_count(node);
+ if (cnt && gpio_info_ptr) {
+ id = of_get_gpio(node, 0);
+ gpio_info_ptr->gpio_base_id = id;
+ gpio_info_ptr->irq_base_id = gpio_to_irq(id);
+ return 0;
+ }
+ }
+ return -EINVAL;
+}
+
+static int __init rdbg_init(void)
+{
+ int err = 0;
+ struct rdbg_device *rdbgdevice = &g_rdbg_instance;
+ int minor = 0;
+ int major = 0;
+ int minor_nodes_created = 0;
+
+ char *rdbg_compatible_string = "qcom,smp2pgpio_client_rdbg_";
+ int max_len = strlen(rdbg_compatible_string) + strlen("xx_out");
+
+ char *node_name = kcalloc(max_len, sizeof(char), GFP_KERNEL);
+
+ if (!node_name) {
+ err = -ENOMEM;
+ goto bail;
+ }
+
+ if (rdbgdevice->num_devices < 1 ||
+ rdbgdevice->num_devices > SMP2P_NUM_PROCS) {
+ pr_err("rgdb: invalid num_devices");
+ err = -EDOM;
+ goto name_bail;
+ }
+
+ rdbgdevice->rdbg_data = kcalloc(rdbgdevice->num_devices,
+ sizeof(struct rdbg_data), GFP_KERNEL);
+ if (!rdbgdevice->rdbg_data) {
+ err = -ENOMEM;
+ goto name_bail;
+ }
+
+ err = alloc_chrdev_region(&rdbgdevice->dev_no, 0,
+ rdbgdevice->num_devices, "rdbgctl");
+ if (err) {
+ pr_err("Error in alloc_chrdev_region.");
+ goto data_bail;
+ }
+ major = MAJOR(rdbgdevice->dev_no);
+
+ cdev_init(&rdbgdevice->cdev, &rdbg_fops);
+ rdbgdevice->cdev.owner = THIS_MODULE;
+ err = cdev_add(&rdbgdevice->cdev, MKDEV(major, 0),
+ rdbgdevice->num_devices);
+ if (err) {
+ pr_err("Error in cdev_add");
+ goto chrdev_bail;
+ }
+
+ rdbgdevice->class = class_create(THIS_MODULE, "rdbg");
+ if (IS_ERR(rdbgdevice->class)) {
+ err = PTR_ERR(rdbgdevice->class);
+ pr_err("Error in class_create");
+ goto cdev_bail;
+ }
+
+ for (minor = 0; minor < rdbgdevice->num_devices; minor++) {
+ if (!proc_info[minor].name)
+ continue;
+
+ if (snprintf(node_name, max_len, "%s%d_in",
+ rdbg_compatible_string, minor) <= 0) {
+ pr_err("Error in snprintf");
+ err = -ENOMEM;
+ goto device_bail;
+ }
+
+ if (register_smp2p(node_name,
+ &rdbgdevice->rdbg_data[minor].in)) {
+ pr_debug("No incoming device tree entry found for %s",
+ proc_info[minor].name);
+ continue;
+ }
+
+ if (snprintf(node_name, max_len, "%s%d_out",
+ rdbg_compatible_string, minor) <= 0) {
+ pr_err("Error in snprintf");
+ err = -ENOMEM;
+ goto device_bail;
+ }
+
+ if (register_smp2p(node_name,
+ &rdbgdevice->rdbg_data[minor].out)) {
+ pr_err("No outgoing device tree entry found for %s",
+ proc_info[minor].name);
+ err = -EINVAL;
+ goto device_bail;
+ }
+
+ rdbgdevice->rdbg_data[minor].device = device_create(
+ rdbgdevice->class, NULL, MKDEV(major, minor),
+ NULL, "%s", proc_info[minor].name);
+ if (IS_ERR(rdbgdevice->rdbg_data[minor].device)) {
+ err = PTR_ERR(rdbgdevice->rdbg_data[minor].device);
+ pr_err("Error in device_create");
+ goto device_bail;
+ }
+ rdbgdevice->rdbg_data[minor].device_initialized = 1;
+ minor_nodes_created++;
+ dev_dbg(rdbgdevice->rdbg_data[minor].device,
+ "%s: created /dev/%s c %d %d'", __func__,
+ proc_info[minor].name, major, minor);
+ }
+
+ if (!minor_nodes_created) {
+ pr_err("No device tree entries found");
+ err = -EINVAL;
+ goto class_bail;
+ }
+
+ goto name_bail;
+
+device_bail:
+ for (--minor; minor >= 0; minor--) {
+ if (rdbgdevice->rdbg_data[minor].device_initialized)
+ device_destroy(rdbgdevice->class,
+ MKDEV(MAJOR(rdbgdevice->dev_no), minor));
+ }
+class_bail:
+ class_destroy(rdbgdevice->class);
+cdev_bail:
+ cdev_del(&rdbgdevice->cdev);
+chrdev_bail:
+ unregister_chrdev_region(rdbgdevice->dev_no, rdbgdevice->num_devices);
+data_bail:
+ kfree(rdbgdevice->rdbg_data);
+name_bail:
+ kfree(node_name);
+bail:
+ return err;
+}
+
+static void __exit rdbg_exit(void)
+{
+ struct rdbg_device *rdbgdevice = &g_rdbg_instance;
+ int minor;
+
+ for (minor = 0; minor < rdbgdevice->num_devices; minor++) {
+ if (rdbgdevice->rdbg_data[minor].device_initialized) {
+ device_destroy(rdbgdevice->class,
+ MKDEV(MAJOR(rdbgdevice->dev_no), minor));
+ }
+ }
+ class_destroy(rdbgdevice->class);
+ cdev_del(&rdbgdevice->cdev);
+ unregister_chrdev_region(rdbgdevice->dev_no, 1);
+ kfree(rdbgdevice->rdbg_data);
+}
+
+module_init(rdbg_init);
+module_exit(rdbg_exit);
+
+MODULE_DESCRIPTION("rdbg module");
+MODULE_LICENSE("GPL v2");