summaryrefslogtreecommitdiff
path: root/drivers/soc
diff options
context:
space:
mode:
authorKarthikeyan Ramasubramanian <kramasub@codeaurora.org>2016-01-08 14:44:02 -0700
committerRohit Vaswani <rvaswani@codeaurora.org>2016-03-01 12:22:29 -0800
commit987bbebd9aa667e648a8bdab9dcf9dfa600c4ad1 (patch)
tree8d77747c7df82d1ce07e51bdd53953992f4d4cfa /drivers/soc
parentc064e79d2974f0778a790928ca273b3f733a68c8 (diff)
soc: qcom: Add snapshot of G-Link driver
This snapshot is taken as of msm-3.18 commit e70ad0cd (Promotion of kernel.lnx.3.18-151201.) Signed-off-by: Karthikeyan Ramasubramanian <kramasub@codeaurora.org>
Diffstat (limited to 'drivers/soc')
-rw-r--r--drivers/soc/qcom/Kconfig46
-rw-r--r--drivers/soc/qcom/Makefile5
-rw-r--r--drivers/soc/qcom/glink.c5713
-rw-r--r--drivers/soc/qcom/glink_core_if.h213
-rw-r--r--drivers/soc/qcom/glink_debugfs.c783
-rw-r--r--drivers/soc/qcom/glink_loopback_commands.h104
-rw-r--r--drivers/soc/qcom/glink_loopback_server.c1296
-rw-r--r--drivers/soc/qcom/glink_private.h1009
-rw-r--r--drivers/soc/qcom/glink_smd_xprt.c1942
-rw-r--r--drivers/soc/qcom/glink_smem_native_xprt.c3028
-rw-r--r--drivers/soc/qcom/glink_ssr.c975
-rw-r--r--drivers/soc/qcom/glink_xprt_if.h201
-rw-r--r--drivers/soc/qcom/tracer_pkt.c255
-rw-r--r--drivers/soc/qcom/tracer_pkt_private.h50
14 files changed, 15620 insertions, 0 deletions
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index bfc443f7986a..6901d4edf39e 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -31,6 +31,45 @@ config MSM_SMD_DEBUG
what channels caused interrupt activity, and when internal state
change events occur.
+config MSM_GLINK
+ bool "Generic Link (G-Link)"
+ help
+ G-Link is a generic link transport that replaces SMD. It is used
+ within a System-on-Chip (SoC) for communication between both internal
+ processors and external peripherals. The actual physical transport
+ is handled by transport plug-ins that can be individually enabled and
+ configured separately.
+
+config MSM_GLINK_LOOPBACK_SERVER
+ bool "Generic Link (G-Link) Loopback Server"
+ help
+ G-Link Loopback Server that enable loopback test framework to test
+ and validate the G-Link protocol stack. It support both local and
+ remote clients to configure the loopback server and echo back the
+ data received from the clients.
+
+config MSM_GLINK_SMD_XPRT
+ depends on MSM_SMD
+ depends on MSM_GLINK
+ bool "Generic Link (G-Link) SMD Transport"
+ help
+ G-Link SMD Transport is a G-Link Transport plug-in. It allows G-Link
+ communication to remote entities through a SMD physical transport
+ channel. The remote side is assumed to be pure SMD. The nature of
+ SMD limits this G-Link transport to only connecting with entities
+ internal to the System-on-Chip.
+
+config MSM_GLINK_SMEM_NATIVE_XPRT
+ depends on MSM_SMEM
+ depends on MSM_GLINK
+ bool "Generic Link (G-Link) SMEM Native Transport"
+ help
+ G-Link SMEM Native Transport is a G-Link Transport plug-in. It allows
+ G-Link communication to remote entities through a shared memory
+ physical transport. The nature of shared memory limits this G-Link
+ transport to only connecting with entities internal to the
+ System-on-Chip.
+
config QCOM_GSBI
tristate "QCOM General Serial Bus Interface"
depends on ARCH_QCOM
@@ -146,3 +185,10 @@ config QCOM_MEMORY_DUMP_V2
give a snapshot of the system at the time of the crash.
endif # ARCH_QCOM
+
+config TRACER_PKT
+ bool "Tracer Packet"
+ help
+ Tracer Packet helps in profiling the performance of inter-
+ processor communication protocols. The profiling information
+ can be logged into the tracer packet itself.
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index 9655c66c56cc..82e9876273cf 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -1,5 +1,9 @@
obj-$(CONFIG_MSM_SMEM) += msm_smem.o smem_debug.o
obj-$(CONFIG_MSM_SMD) += msm_smd.o smd_debug.o smd_private.o smd_init_dt.o smsm_debug.o
+obj-$(CONFIG_MSM_GLINK) += glink.o glink_debugfs.o glink_ssr.o
+obj-$(CONFIG_MSM_GLINK_LOOPBACK_SERVER) += glink_loopback_server.o
+obj-$(CONFIG_MSM_GLINK_SMD_XPRT) += glink_smd_xprt.o
+obj-$(CONFIG_MSM_GLINK_SMEM_NATIVE_XPRT)+= glink_smem_native_xprt.o
obj-$(CONFIG_QCOM_GSBI) += qcom_gsbi.o
obj-$(CONFIG_QCOM_PM) += spm.o
obj-$(CONFIG_QCOM_SMD) += smd.o
@@ -14,3 +18,4 @@ obj-$(CONFIG_QCOM_WATCHDOG_V2) += watchdog_v2.o
obj-$(CONFIG_QCOM_MEMORY_DUMP) += memory_dump.o
obj-$(CONFIG_QCOM_MEMORY_DUMP_V2) += memory_dump_v2.o
obj-$(CONFIG_QCOM_WATCHDOG_V2) += watchdog_v2.o
+obj-$(CONFIG_TRACER_PKT) += tracer_pkt.o
diff --git a/drivers/soc/qcom/glink.c b/drivers/soc/qcom/glink.c
new file mode 100644
index 000000000000..f02a96137157
--- /dev/null
+++ b/drivers/soc/qcom/glink.c
@@ -0,0 +1,5713 @@
+/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <asm/arch_timer.h>
+#include <linux/err.h>
+#include <linux/ipc_logging.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+#include <linux/rwsem.h>
+#include <linux/pm_qos.h>
+#include <soc/qcom/glink.h>
+#include <soc/qcom/tracer_pkt.h>
+#include "glink_core_if.h"
+#include "glink_private.h"
+#include "glink_xprt_if.h"
+
+/* Number of internal IPC Logging log pages */
+#define NUM_LOG_PAGES 15
+#define GLINK_PM_QOS_HOLDOFF_MS 10
+#define GLINK_QOS_DEF_NUM_TOKENS 10
+#define GLINK_QOS_DEF_NUM_PRIORITY 1
+#define GLINK_QOS_DEF_MTU 2048
+
+/**
+ * struct glink_qos_priority_bin - Packet Scheduler's priority bucket
+ * @max_rate_kBps: Maximum rate supported by the priority bucket.
+ * @power_state: Transport power state for this priority bin.
+ * @tx_ready: List of channels ready for tx in the priority bucket.
+ * @active_ch_cnt: Active channels of this priority.
+ */
+struct glink_qos_priority_bin {
+ unsigned long max_rate_kBps;
+ uint32_t power_state;
+ struct list_head tx_ready;
+ uint32_t active_ch_cnt;
+};
+
+/**
+ * struct glink_core_xprt_ctx - transport representation structure
+ * @xprt_state_lhb0: controls read/write access to transport state
+ * @list_node: used to chain this transport in a global
+ * transport list
+ * @name: name of this transport
+ * @edge: what this transport connects to
+ * @id: the id to use for channel migration
+ * @versions: array of transport versions this implementation
+ * supports
+ * @versions_entries: number of entries in @versions
+ * @local_version_idx: local version index into @versions this
+ * transport is currently running
+ * @remote_version_idx: remote version index into @versions this
+ * transport is currently running
+ * @l_features: Features negotiated by the local side
+ * @capabilities: Capabilities of underlying transport
+ * @ops: transport defined implementation of common
+ * operations
+ * @local_state: value from local_channel_state_e representing
+ * the local state of this transport
+ * @remote_neg_completed: is the version negotiation with the remote end
+ * completed
+ * @xprt_ctx_lock_lhb1 lock to protect @next_lcid and @channels
+ * @next_lcid: logical channel identifier to assign to the next
+ * created channel
+ * @max_cid: maximum number of channel identifiers supported
+ * @max_iid: maximum number of intent identifiers supported
+ * @tx_work: work item to process @tx_ready
+ * @tx_wq: workqueue to run @tx_work
+ * @channels: list of all existing channels on this transport
+ * @mtu: MTU supported by this transport.
+ * @token_count: Number of tokens to be assigned per assignment.
+ * @curr_qos_rate_kBps: Aggregate of currently supported QoS requests.
+ * @threshold_rate_kBps: Maximum Rate allocated for QoS traffic.
+ * @num_priority: Number of priority buckets in the transport.
+ * @tx_ready_lock_lhb2: lock to protect @tx_ready
+ * @active_high_prio: Highest priority of active channels.
+ * @prio_bin: Pointer to priority buckets.
+ * @pm_qos_req: power management QoS request for TX path
+ * @qos_req_active: a vote is active with the PM QoS system
+ * @tx_path_activity: transmit activity has occurred
+ * @pm_qos_work: removes PM QoS vote due to inactivity
+ * @xprt_dbgfs_lock_lhb3: debugfs channel structure lock
+ */
+struct glink_core_xprt_ctx {
+ struct rwref_lock xprt_state_lhb0;
+ struct list_head list_node;
+ char name[GLINK_NAME_SIZE];
+ char edge[GLINK_NAME_SIZE];
+ uint16_t id;
+ const struct glink_core_version *versions;
+ size_t versions_entries;
+ uint32_t local_version_idx;
+ uint32_t remote_version_idx;
+ uint32_t l_features;
+ uint32_t capabilities;
+ struct glink_transport_if *ops;
+ enum transport_state_e local_state;
+ bool remote_neg_completed;
+
+ spinlock_t xprt_ctx_lock_lhb1;
+ struct list_head channels;
+ uint32_t next_lcid;
+ struct list_head free_lcid_list;
+
+ uint32_t max_cid;
+ uint32_t max_iid;
+ struct work_struct tx_work;
+ struct workqueue_struct *tx_wq;
+
+ size_t mtu;
+ uint32_t token_count;
+ unsigned long curr_qos_rate_kBps;
+ unsigned long threshold_rate_kBps;
+ uint32_t num_priority;
+ spinlock_t tx_ready_lock_lhb2;
+ uint32_t active_high_prio;
+ struct glink_qos_priority_bin *prio_bin;
+
+ struct pm_qos_request pm_qos_req;
+ bool qos_req_active;
+ bool tx_path_activity;
+ struct delayed_work pm_qos_work;
+
+ struct mutex xprt_dbgfs_lock_lhb3;
+};
+
+/**
+ * Channel Context
+ * @xprt_state_lhb0: controls read/write access to channel state
+ * @port_list_node: channel list node used by transport "channels" list
+ * @tx_ready_list_node: channels that have data ready to transmit
+ * @name: name of the channel
+ *
+ * @user_priv: user opaque data type passed into glink_open()
+ * @notify_rx: RX notification function
+ * @notify_tx_done: TX-done notification function (remote side is done)
+ * @notify_state: Channel state (connected / disconnected) notifications
+ * @notify_rx_intent_req: Request from remote side for an intent
+ * @notify_rxv: RX notification function (for io buffer chain)
+ * @notify_rx_sigs: RX signal change notification
+ * @notify_rx_abort: Channel close RX Intent aborted
+ * @notify_tx_abort: Channel close TX aborted
+ * @notify_rx_tracer_pkt: Receive notification for tracer packet
+ * @notify_remote_rx_intent: Receive notification for remote-queued RX intent
+ *
+ * @transport_ptr: Transport this channel uses
+ * @lcid: Local channel ID
+ * @rcid: Remote channel ID
+ * @local_open_state: Local channel state
+ * @remote_opened: Remote channel state (opened or closed)
+ * @int_req_ack: Remote side intent request ACK state
+ * @int_req_ack_complete: Intent tracking completion - received remote ACK
+ * @int_req_complete: Intent tracking completion - received intent
+ *
+ * @local_rx_intent_lst_lock_lhc1: RX intent list lock
+ * @local_rx_intent_list: Active RX Intents queued by client
+ * @local_rx_intent_ntfy_list: Client notified, waiting for rx_done()
+ * @local_rx_intent_free_list: Available intent container structure
+ *
+ * @rmt_rx_intent_lst_lock_lhc2: Remote RX intent list lock
+ * @rmt_rx_intent_list: Remote RX intent list
+ *
+ * @max_used_liid: Maximum Local Intent ID used
+ * @dummy_riid: Dummy remote intent ID
+ *
+ * @tx_lists_lock_lhc3: TX list lock
+ * @tx_active: Ready to transmit
+ *
+ * @tx_pending_rmt_done_lock_lhc4: Remote-done list lock
+ * @tx_pending_remote_done: Transmitted, waiting for remote done
+ * @lsigs: Local signals
+ * @rsigs: Remote signals
+ * @pending_delete: waiting for channel to be deleted
+ * @no_migrate: The local client does not want to
+ * migrate transports
+ * @local_xprt_req: The transport the local side requested
+ * @local_xprt_resp: The response to @local_xprt_req
+ * @remote_xprt_req: The transport the remote side requested
+ * @remote_xprt_resp: The response to @remote_xprt_req
+ * @curr_priority: Channel's current priority.
+ * @initial_priority: Channel's initial priority.
+ * @token_count: Tokens for consumption by packet.
+ * @txd_len: Transmitted data size in the current
+ * token assignment cycle.
+ * @token_start_time: Time at which tokens are assigned.
+ * @req_rate_kBps: Current QoS request by the channel.
+ * @tx_intent_cnt: Intent count to transmit soon in future.
+ * @tx_cnt: Packets to be picked by tx scheduler.
+ */
+struct channel_ctx {
+ struct rwref_lock ch_state_lhc0;
+ struct list_head port_list_node;
+ struct list_head tx_ready_list_node;
+ char name[GLINK_NAME_SIZE];
+
+ /* user info */
+ void *user_priv;
+ void (*notify_rx)(void *handle, const void *priv, const void *pkt_priv,
+ const void *ptr, size_t size);
+ void (*notify_tx_done)(void *handle, const void *priv,
+ const void *pkt_priv, const void *ptr);
+ void (*notify_state)(void *handle, const void *priv, unsigned event);
+ bool (*notify_rx_intent_req)(void *handle, const void *priv,
+ size_t req_size);
+ void (*notify_rxv)(void *handle, const void *priv, const void *pkt_priv,
+ void *iovec, size_t size,
+ void * (*vbuf_provider)(void *iovec, size_t offset,
+ size_t *size),
+ void * (*pbuf_provider)(void *iovec, size_t offset,
+ size_t *size));
+ void (*notify_rx_sigs)(void *handle, const void *priv,
+ uint32_t old_sigs, uint32_t new_sigs);
+ void (*notify_rx_abort)(void *handle, const void *priv,
+ const void *pkt_priv);
+ void (*notify_tx_abort)(void *handle, const void *priv,
+ const void *pkt_priv);
+ void (*notify_rx_tracer_pkt)(void *handle, const void *priv,
+ const void *pkt_priv, const void *ptr, size_t size);
+ void (*notify_remote_rx_intent)(void *handle, const void *priv,
+ size_t size);
+
+ /* internal port state */
+ struct glink_core_xprt_ctx *transport_ptr;
+ uint32_t lcid;
+ uint32_t rcid;
+ enum local_channel_state_e local_open_state;
+ bool remote_opened;
+ bool int_req_ack;
+ struct completion int_req_ack_complete;
+ struct completion int_req_complete;
+
+ spinlock_t local_rx_intent_lst_lock_lhc1;
+ struct list_head local_rx_intent_list;
+ struct list_head local_rx_intent_ntfy_list;
+ struct list_head local_rx_intent_free_list;
+
+ spinlock_t rmt_rx_intent_lst_lock_lhc2;
+ struct list_head rmt_rx_intent_list;
+
+ uint32_t max_used_liid;
+ uint32_t dummy_riid;
+
+ spinlock_t tx_lists_lock_lhc3;
+ struct list_head tx_active;
+
+ spinlock_t tx_pending_rmt_done_lock_lhc4;
+ struct list_head tx_pending_remote_done;
+
+ uint32_t lsigs;
+ uint32_t rsigs;
+ bool pending_delete;
+
+ bool no_migrate;
+ uint16_t local_xprt_req;
+ uint16_t local_xprt_resp;
+ uint16_t remote_xprt_req;
+ uint16_t remote_xprt_resp;
+
+ uint32_t curr_priority;
+ uint32_t initial_priority;
+ uint32_t token_count;
+ size_t txd_len;
+ unsigned long token_start_time;
+ unsigned long req_rate_kBps;
+ uint32_t tx_intent_cnt;
+ uint32_t tx_cnt;
+};
+
+static struct glink_core_if core_impl;
+static void *log_ctx;
+static unsigned glink_debug_mask = QCOM_GLINK_INFO;
+module_param_named(debug_mask, glink_debug_mask,
+ uint, S_IRUGO | S_IWUSR | S_IWGRP);
+
+static unsigned glink_pm_qos;
+module_param_named(pm_qos_enable, glink_pm_qos,
+ uint, S_IRUGO | S_IWUSR | S_IWGRP);
+
+
+static LIST_HEAD(transport_list);
+
+/*
+ * Used while notifying the clients about link state events. Since the clients
+ * need to store the callback information temporarily and since all the
+ * existing accesses to transport list are in non-IRQ context, defining the
+ * transport_list_lock as a mutex.
+ */
+static DEFINE_MUTEX(transport_list_lock_lha0);
+
+struct link_state_notifier_info {
+ struct list_head list;
+ char transport[GLINK_NAME_SIZE];
+ char edge[GLINK_NAME_SIZE];
+ void (*glink_link_state_notif_cb)(
+ struct glink_link_state_cb_info *cb_info, void *priv);
+ void *priv;
+};
+static LIST_HEAD(link_state_notifier_list);
+static DEFINE_MUTEX(link_state_notifier_lock_lha1);
+
+static struct glink_core_xprt_ctx *find_open_transport(const char *edge,
+ const char *name,
+ bool initial_xprt,
+ uint16_t *best_id);
+
+static bool xprt_is_fully_opened(struct glink_core_xprt_ctx *xprt);
+
+static struct channel_ctx *xprt_lcid_to_ch_ctx_get(
+ struct glink_core_xprt_ctx *xprt_ctx,
+ uint32_t lcid);
+
+static struct channel_ctx *xprt_rcid_to_ch_ctx_get(
+ struct glink_core_xprt_ctx *xprt_ctx,
+ uint32_t rcid);
+
+static void xprt_schedule_tx(struct glink_core_xprt_ctx *xprt_ptr,
+ struct channel_ctx *ch_ptr,
+ struct glink_core_tx_pkt *tx_info);
+
+static int xprt_single_threaded_tx(struct glink_core_xprt_ctx *xprt_ptr,
+ struct channel_ctx *ch_ptr,
+ struct glink_core_tx_pkt *tx_info);
+
+static void tx_work_func(struct work_struct *work);
+
+static struct channel_ctx *ch_name_to_ch_ctx_create(
+ struct glink_core_xprt_ctx *xprt_ctx,
+ const char *name);
+
+static void ch_push_remote_rx_intent(struct channel_ctx *ctx, size_t size,
+ uint32_t riid);
+
+static int ch_pop_remote_rx_intent(struct channel_ctx *ctx, size_t size,
+ uint32_t *riid_ptr, size_t *intent_size);
+
+static struct glink_core_rx_intent *ch_push_local_rx_intent(
+ struct channel_ctx *ctx, const void *pkt_priv, size_t size);
+
+static void ch_remove_local_rx_intent(struct channel_ctx *ctx, uint32_t liid);
+
+static struct glink_core_rx_intent *ch_get_local_rx_intent(
+ struct channel_ctx *ctx, uint32_t liid);
+
+static void ch_set_local_rx_intent_notified(struct channel_ctx *ctx,
+ struct glink_core_rx_intent *intent_ptr);
+
+static struct glink_core_rx_intent *ch_get_local_rx_intent_notified(
+ struct channel_ctx *ctx, const void *ptr);
+
+static void ch_remove_local_rx_intent_notified(struct channel_ctx *ctx,
+ struct glink_core_rx_intent *liid_ptr, bool reuse);
+
+static struct glink_core_rx_intent *ch_get_free_local_rx_intent(
+ struct channel_ctx *ctx);
+
+static void ch_purge_intent_lists(struct channel_ctx *ctx);
+
+static void ch_add_rcid(struct glink_core_xprt_ctx *xprt_ctx,
+ struct channel_ctx *ctx,
+ uint32_t rcid);
+
+static bool ch_is_fully_opened(struct channel_ctx *ctx);
+static bool ch_is_fully_closed(struct channel_ctx *ctx);
+
+struct glink_core_tx_pkt *ch_get_tx_pending_remote_done(struct channel_ctx *ctx,
+ uint32_t riid);
+
+static void ch_remove_tx_pending_remote_done(struct channel_ctx *ctx,
+ struct glink_core_tx_pkt *tx_pkt);
+
+static void glink_core_rx_cmd_rx_intent_req_ack(struct glink_transport_if
+ *if_ptr, uint32_t rcid, bool granted);
+
+static bool glink_core_remote_close_common(struct channel_ctx *ctx);
+
+static void check_link_notifier_and_notify(struct glink_core_xprt_ctx *xprt_ptr,
+ enum glink_link_state link_state);
+
+static void glink_core_channel_cleanup(struct glink_core_xprt_ctx *xprt_ptr);
+static void glink_pm_qos_vote(struct glink_core_xprt_ctx *xprt_ptr);
+static void glink_pm_qos_unvote(struct glink_core_xprt_ctx *xprt_ptr);
+static void glink_pm_qos_cancel_worker(struct work_struct *work);
+static bool ch_update_local_state(struct channel_ctx *ctx,
+ enum local_channel_state_e lstate);
+static bool ch_update_rmt_state(struct channel_ctx *ctx, bool rstate);
+static void glink_core_deinit_xprt_qos_cfg(
+ struct glink_core_xprt_ctx *xprt_ptr);
+
+#define glink_prio_to_power_state(xprt_ctx, priority) \
+ ((xprt_ctx)->prio_bin[priority].power_state)
+
+#define GLINK_GET_CH_TX_STATE(ctx) \
+ ((ctx)->tx_intent_cnt || (ctx)->tx_cnt)
+
+/**
+ * glink_ssr() - Clean up locally for SSR by simulating remote close
+ * @subsystem: The name of the subsystem being restarted
+ *
+ * Call into the transport using the ssr(if_ptr) function to allow it to
+ * clean up any necessary structures, then simulate a remote close from
+ * subsystem for all channels on that edge.
+ *
+ * Return: Standard error codes.
+ */
+int glink_ssr(const char *subsystem)
+{
+ int ret = 0;
+ bool transport_found = false;
+ struct glink_core_xprt_ctx *xprt_ctx = NULL;
+ struct channel_ctx *ch_ctx, *temp_ch_ctx;
+ uint32_t i;
+ unsigned long flags;
+
+ mutex_lock(&transport_list_lock_lha0);
+ list_for_each_entry(xprt_ctx, &transport_list, list_node) {
+ if (!strcmp(subsystem, xprt_ctx->edge) &&
+ xprt_is_fully_opened(xprt_ctx)) {
+ GLINK_INFO_XPRT(xprt_ctx, "%s: SSR\n", __func__);
+ spin_lock_irqsave(&xprt_ctx->tx_ready_lock_lhb2,
+ flags);
+ for (i = 0; i < xprt_ctx->num_priority; i++)
+ list_for_each_entry_safe(ch_ctx, temp_ch_ctx,
+ &xprt_ctx->prio_bin[i].tx_ready,
+ tx_ready_list_node)
+ list_del_init(
+ &ch_ctx->tx_ready_list_node);
+ spin_unlock_irqrestore(&xprt_ctx->tx_ready_lock_lhb2,
+ flags);
+
+ xprt_ctx->ops->ssr(xprt_ctx->ops);
+ transport_found = true;
+ }
+ }
+ mutex_unlock(&transport_list_lock_lha0);
+
+ if (!transport_found)
+ ret = -ENODEV;
+
+ return ret;
+}
+EXPORT_SYMBOL(glink_ssr);
+
+/**
+ * glink_core_ch_close_ack_common() - handles the common operations during
+ * close ack.
+ * @ctx: Pointer to channel instance.
+ *
+ * Return: True if the channel is fully closed after the state change,
+ * false otherwise.
+ */
+static bool glink_core_ch_close_ack_common(struct channel_ctx *ctx)
+{
+ bool is_fully_closed;
+
+ if (ctx == NULL)
+ return false;
+ is_fully_closed = ch_update_local_state(ctx, GLINK_CHANNEL_CLOSED);
+ GLINK_INFO_PERF_CH(ctx,
+ "%s: local:GLINK_CHANNEL_CLOSING->GLINK_CHANNEL_CLOSED\n",
+ __func__);
+
+ if (ctx->notify_state) {
+ ctx->notify_state(ctx, ctx->user_priv,
+ GLINK_LOCAL_DISCONNECTED);
+ ch_purge_intent_lists(ctx);
+ GLINK_INFO_PERF_CH(ctx,
+ "%s: notify state: GLINK_LOCAL_DISCONNECTED\n",
+ __func__);
+ }
+
+ return is_fully_closed;
+}
+
+/**
+ * glink_core_remote_close_common() - Handles the common operations during
+ * a remote close.
+ * @ctx: Pointer to channel instance.
+ *
+ * Return: True if the channel is fully closed after the state change,
+ * false otherwise.
+ */
+static bool glink_core_remote_close_common(struct channel_ctx *ctx)
+{
+ bool is_fully_closed;
+
+ if (ctx == NULL)
+ return false;
+ is_fully_closed = ch_update_rmt_state(ctx, false);
+ ctx->rcid = 0;
+
+ if (ctx->local_open_state != GLINK_CHANNEL_CLOSED &&
+ ctx->local_open_state != GLINK_CHANNEL_CLOSING) {
+ if (ctx->notify_state)
+ ctx->notify_state(ctx, ctx->user_priv,
+ GLINK_REMOTE_DISCONNECTED);
+ GLINK_INFO_CH(ctx,
+ "%s: %s: GLINK_REMOTE_DISCONNECTED\n",
+ __func__, "notify state");
+ }
+
+ if (ctx->local_open_state == GLINK_CHANNEL_CLOSED)
+ GLINK_INFO_CH(ctx,
+ "%s: %s, %s\n", __func__,
+ "Did not send GLINK_REMOTE_DISCONNECTED",
+ "local state is already CLOSED");
+
+ complete_all(&ctx->int_req_complete);
+ ch_purge_intent_lists(ctx);
+
+ return is_fully_closed;
+}
+
+/**
+ * glink_qos_calc_rate_kBps() - Calculate the transmit rate in kBps
+ * @pkt_size: Worst case packet size per transmission.
+ * @interval_us: Packet transmit interval in us.
+ *
+ * This function is used to calculate the rate of transmission rate of
+ * a channel in kBps.
+ *
+ * Return: Transmission rate in kBps.
+ */
+static unsigned long glink_qos_calc_rate_kBps(size_t pkt_size,
+ unsigned long interval_us)
+{
+ unsigned long rate_kBps, rem;
+
+ rate_kBps = pkt_size * USEC_PER_SEC;
+ rem = do_div(rate_kBps, (interval_us * 1024));
+ return rate_kBps;
+}
+
+/**
+ * glink_qos_check_feasibility() - Feasibility test on a QoS Request
+ * @xprt_ctx: Transport in which the QoS request is made.
+ * @req_rate_kBps: QoS Request.
+ *
+ * This function is used to perform the schedulability test on a QoS request
+ * over a specific transport.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+static int glink_qos_check_feasibility(struct glink_core_xprt_ctx *xprt_ctx,
+ unsigned long req_rate_kBps)
+{
+ unsigned long new_rate_kBps;
+
+ if (xprt_ctx->num_priority == GLINK_QOS_DEF_NUM_PRIORITY)
+ return -EOPNOTSUPP;
+
+ new_rate_kBps = xprt_ctx->curr_qos_rate_kBps + req_rate_kBps;
+ if (new_rate_kBps > xprt_ctx->threshold_rate_kBps) {
+ GLINK_ERR_XPRT(xprt_ctx,
+ "New_rate(%lu + %lu) > threshold_rate(%lu)\n",
+ xprt_ctx->curr_qos_rate_kBps, req_rate_kBps,
+ xprt_ctx->threshold_rate_kBps);
+ return -EBUSY;
+ }
+ return 0;
+}
+
+/**
+ * glink_qos_update_ch_prio() - Update the channel priority
+ * @ctx: Channel context whose priority is updated.
+ * @new_priority: New priority of the channel.
+ *
+ * This function is called to update the channel priority during QoS request,
+ * QoS Cancel or Priority evaluation by packet scheduler. This function must
+ * be called with transport's tx_ready_lock_lhb2 lock and channel's
+ * tx_lists_lock_lhc3 locked.
+ */
+static void glink_qos_update_ch_prio(struct channel_ctx *ctx,
+ uint32_t new_priority)
+{
+ uint32_t old_priority;
+
+ if (unlikely(!ctx))
+ return;
+
+ old_priority = ctx->curr_priority;
+ if (!list_empty(&ctx->tx_ready_list_node)) {
+ ctx->transport_ptr->prio_bin[old_priority].active_ch_cnt--;
+ list_move(&ctx->tx_ready_list_node,
+ &ctx->transport_ptr->prio_bin[new_priority].tx_ready);
+ ctx->transport_ptr->prio_bin[new_priority].active_ch_cnt++;
+ }
+ ctx->curr_priority = new_priority;
+}
+
+/**
+ * glink_qos_assign_priority() - Assign priority to a channel
+ * @ctx: Channel for which the priority has to be assigned.
+ * @req_rate_kBps: QoS request by the channel.
+ *
+ * This function is used to assign a priority to the channel depending on its
+ * QoS Request.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+static int glink_qos_assign_priority(struct channel_ctx *ctx,
+ unsigned long req_rate_kBps)
+{
+ int ret;
+ uint32_t i;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctx->transport_ptr->tx_ready_lock_lhb2, flags);
+ if (ctx->req_rate_kBps) {
+ spin_unlock_irqrestore(&ctx->transport_ptr->tx_ready_lock_lhb2,
+ flags);
+ GLINK_ERR_CH(ctx, "%s: QoS Request already exists\n", __func__);
+ return -EINVAL;
+ }
+
+ ret = glink_qos_check_feasibility(ctx->transport_ptr, req_rate_kBps);
+ if (ret < 0) {
+ spin_unlock_irqrestore(&ctx->transport_ptr->tx_ready_lock_lhb2,
+ flags);
+ return ret;
+ }
+
+ spin_lock(&ctx->tx_lists_lock_lhc3);
+ i = ctx->transport_ptr->num_priority - 1;
+ while (i > 0 &&
+ ctx->transport_ptr->prio_bin[i-1].max_rate_kBps >= req_rate_kBps)
+ i--;
+
+ ctx->initial_priority = i;
+ glink_qos_update_ch_prio(ctx, i);
+ ctx->req_rate_kBps = req_rate_kBps;
+ if (i > 0) {
+ ctx->transport_ptr->curr_qos_rate_kBps += req_rate_kBps;
+ ctx->token_count = ctx->transport_ptr->token_count;
+ ctx->txd_len = 0;
+ ctx->token_start_time = arch_counter_get_cntpct();
+ }
+ spin_unlock(&ctx->tx_lists_lock_lhc3);
+ spin_unlock_irqrestore(&ctx->transport_ptr->tx_ready_lock_lhb2, flags);
+ return 0;
+}
+
+/**
+ * glink_qos_reset_priority() - Reset the channel priority
+ * @ctx: Channel for which the priority is reset.
+ *
+ * This function is used to reset the channel priority when the QoS request
+ * is cancelled by the channel.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+static int glink_qos_reset_priority(struct channel_ctx *ctx)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctx->transport_ptr->tx_ready_lock_lhb2, flags);
+ spin_lock(&ctx->tx_lists_lock_lhc3);
+ if (ctx->initial_priority > 0) {
+ ctx->initial_priority = 0;
+ glink_qos_update_ch_prio(ctx, 0);
+ ctx->transport_ptr->curr_qos_rate_kBps -= ctx->req_rate_kBps;
+ ctx->txd_len = 0;
+ ctx->req_rate_kBps = 0;
+ }
+ spin_unlock(&ctx->tx_lists_lock_lhc3);
+ spin_unlock_irqrestore(&ctx->transport_ptr->tx_ready_lock_lhb2, flags);
+ return 0;
+}
+
+/**
+ * glink_qos_ch_vote_xprt() - Vote the transport that channel is active
+ * @ctx: Channel context which is active.
+ *
+ * This function is called to vote for the transport either when the channel
+ * is transmitting or when it shows an intention to transmit sooner. This
+ * function must be called with transport's tx_ready_lock_lhb2 lock and
+ * channel's tx_lists_lock_lhc3 locked.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+static int glink_qos_ch_vote_xprt(struct channel_ctx *ctx)
+{
+ uint32_t prio;
+
+ if (unlikely(!ctx || !ctx->transport_ptr))
+ return -EINVAL;
+
+ prio = ctx->curr_priority;
+ ctx->transport_ptr->prio_bin[prio].active_ch_cnt++;
+
+ if (ctx->transport_ptr->prio_bin[prio].active_ch_cnt == 1 &&
+ ctx->transport_ptr->active_high_prio < prio) {
+ /*
+ * One active channel in this priority and this is the
+ * highest active priority bucket
+ */
+ ctx->transport_ptr->active_high_prio = prio;
+ return ctx->transport_ptr->ops->power_vote(
+ ctx->transport_ptr->ops,
+ glink_prio_to_power_state(ctx->transport_ptr,
+ prio));
+ }
+ return 0;
+}
+
+/**
+ * glink_qos_ch_unvote_xprt() - Unvote the transport when channel is inactive
+ * @ctx: Channel context which is inactive.
+ *
+ * This function is called to unvote for the transport either when all the
+ * packets queued by the channel are transmitted by the scheduler. This
+ * function must be called with transport's tx_ready_lock_lhb2 lock and
+ * channel's tx_lists_lock_lhc3 locked.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+static int glink_qos_ch_unvote_xprt(struct channel_ctx *ctx)
+{
+ uint32_t prio;
+
+ if (unlikely(!ctx || !ctx->transport_ptr))
+ return -EINVAL;
+
+ prio = ctx->curr_priority;
+ ctx->transport_ptr->prio_bin[prio].active_ch_cnt--;
+
+ if (ctx->transport_ptr->prio_bin[prio].active_ch_cnt ||
+ ctx->transport_ptr->active_high_prio > prio)
+ return 0;
+
+ /*
+ * No active channel in this priority and this is the
+ * highest active priority bucket
+ */
+ while (prio > 0) {
+ prio--;
+ if (!ctx->transport_ptr->prio_bin[prio].active_ch_cnt)
+ continue;
+
+ ctx->transport_ptr->active_high_prio = prio;
+ return ctx->transport_ptr->ops->power_vote(
+ ctx->transport_ptr->ops,
+ glink_prio_to_power_state(ctx->transport_ptr,
+ prio));
+ }
+ return ctx->transport_ptr->ops->power_unvote(ctx->transport_ptr->ops);
+}
+
+/**
+ * glink_qos_add_ch_tx_intent() - Add the channel's intention to transmit soon
+ * @ctx: Channel context which is going to be active.
+ *
+ * This function is called to update the channel state when it is intending to
+ * transmit sooner. This function must be called with transport's
+ * tx_ready_lock_lhb2 lock and channel's tx_lists_lock_lhc3 locked.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+static int glink_qos_add_ch_tx_intent(struct channel_ctx *ctx)
+{
+ bool active_tx;
+
+ if (unlikely(!ctx))
+ return -EINVAL;
+
+ active_tx = GLINK_GET_CH_TX_STATE(ctx);
+ ctx->tx_intent_cnt++;
+ if (!active_tx)
+ glink_qos_ch_vote_xprt(ctx);
+ return 0;
+}
+
+/**
+ * glink_qos_do_ch_tx() - Update the channel's state that it is transmitting
+ * @ctx: Channel context which is transmitting.
+ *
+ * This function is called to update the channel state when it is queueing a
+ * packet to transmit. This function must be called with transport's
+ * tx_ready_lock_lhb2 lock and channel's tx_lists_lock_lhc3 locked.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+static int glink_qos_do_ch_tx(struct channel_ctx *ctx)
+{
+ bool active_tx;
+
+ if (unlikely(!ctx))
+ return -EINVAL;
+
+ active_tx = GLINK_GET_CH_TX_STATE(ctx);
+ ctx->tx_cnt++;
+ if (ctx->tx_intent_cnt)
+ ctx->tx_intent_cnt--;
+ if (!active_tx)
+ glink_qos_ch_vote_xprt(ctx);
+ return 0;
+}
+
+/**
+ * glink_qos_done_ch_tx() - Update the channel's state when transmission is done
+ * @ctx: Channel context for which all packets are transmitted.
+ *
+ * This function is called to update the channel state when all packets in its
+ * transmit queue are successfully transmitted. This function must be called
+ * with transport's tx_ready_lock_lhb2 lock and channel's tx_lists_lock_lhc3
+ * locked.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+static int glink_qos_done_ch_tx(struct channel_ctx *ctx)
+{
+ bool active_tx;
+
+ if (unlikely(!ctx))
+ return -EINVAL;
+
+ WARN_ON(ctx->tx_cnt == 0);
+ ctx->tx_cnt = 0;
+ active_tx = GLINK_GET_CH_TX_STATE(ctx);
+ if (!active_tx)
+ glink_qos_ch_unvote_xprt(ctx);
+ return 0;
+}
+
+/**
+ * tx_linear_vbuf_provider() - Virtual Buffer Provider for linear buffers
+ * @iovec: Pointer to the beginning of the linear buffer.
+ * @offset: Offset into the buffer whose address is needed.
+ * @size: Pointer to hold the length of the contiguous buffer space.
+ *
+ * This function is used when a linear buffer is transmitted.
+ *
+ * Return: Address of the buffer which is at offset "offset" from the beginning
+ * of the buffer.
+ */
+static void *tx_linear_vbuf_provider(void *iovec, size_t offset, size_t *size)
+{
+ struct glink_core_tx_pkt *tx_info = (struct glink_core_tx_pkt *)iovec;
+
+ if (unlikely(!iovec || !size))
+ return NULL;
+
+ if (offset >= tx_info->size)
+ return NULL;
+
+ if (unlikely(OVERFLOW_ADD_UNSIGNED(void *, tx_info->data, offset)))
+ return NULL;
+
+ *size = tx_info->size - offset;
+
+ return (void *)tx_info->data + offset;
+}
+
+/**
+ * linearize_vector() - Linearize the vector buffer
+ * @iovec: Pointer to the vector buffer.
+ * @size: Size of data in the vector buffer.
+ * vbuf_provider: Virtual address-space Buffer Provider for the vector.
+ * pbuf_provider: Physical address-space Buffer Provider for the vector.
+ *
+ * This function is used to linearize the vector buffer provided by the
+ * transport when the client has registered to receive only the vector
+ * buffer.
+ *
+ * Return: address of the linear buffer on success, NULL on failure.
+ */
+static void *linearize_vector(void *iovec, size_t size,
+ void * (*vbuf_provider)(void *iovec, size_t offset, size_t *buf_size),
+ void * (*pbuf_provider)(void *iovec, size_t offset, size_t *buf_size))
+{
+ void *bounce_buf;
+ void *pdata;
+ void *vdata;
+ size_t data_size;
+ size_t offset = 0;
+
+ bounce_buf = kmalloc(size, GFP_KERNEL);
+ if (!bounce_buf)
+ return ERR_PTR(-ENOMEM);
+
+ do {
+ if (vbuf_provider) {
+ vdata = vbuf_provider(iovec, offset, &data_size);
+ } else {
+ pdata = pbuf_provider(iovec, offset, &data_size);
+ vdata = phys_to_virt((unsigned long)pdata);
+ }
+
+ if (!vdata)
+ break;
+
+ if (OVERFLOW_ADD_UNSIGNED(size_t, data_size, offset)) {
+ GLINK_ERR("%s: overflow data_size %zu + offset %zu\n",
+ __func__, data_size, offset);
+ goto err;
+ }
+
+ memcpy(bounce_buf + offset, vdata, data_size);
+ offset += data_size;
+ } while (offset < size);
+
+ if (offset != size) {
+ GLINK_ERR("%s: Error size_copied %zu != total_size %zu\n",
+ __func__, offset, size);
+ goto err;
+ }
+ return bounce_buf;
+
+err:
+ kfree(bounce_buf);
+ return NULL;
+}
+
+/**
+ * xprt_lcid_to_ch_ctx_get() - lookup a channel by local id
+ * @xprt_ctx: Transport to search for a matching channel.
+ * @lcid: Local channel identifier corresponding to the desired channel.
+ *
+ * If the channel is found, the reference count is incremented to ensure the
+ * lifetime of the channel context. The caller must call rwref_put() when done.
+ *
+ * Return: The channel corresponding to @lcid or NULL if a matching channel
+ * is not found.
+ */
+static struct channel_ctx *xprt_lcid_to_ch_ctx_get(
+ struct glink_core_xprt_ctx *xprt_ctx,
+ uint32_t lcid)
+{
+ struct channel_ctx *entry;
+ unsigned long flags;
+
+ spin_lock_irqsave(&xprt_ctx->xprt_ctx_lock_lhb1, flags);
+ list_for_each_entry(entry, &xprt_ctx->channels, port_list_node)
+ if (entry->lcid == lcid) {
+ rwref_get(&entry->ch_state_lhc0);
+ spin_unlock_irqrestore(&xprt_ctx->xprt_ctx_lock_lhb1,
+ flags);
+ return entry;
+ }
+ spin_unlock_irqrestore(&xprt_ctx->xprt_ctx_lock_lhb1, flags);
+
+ return NULL;
+}
+
+/**
+ * xprt_rcid_to_ch_ctx_get() - lookup a channel by remote id
+ * @xprt_ctx: Transport to search for a matching channel.
+ * @rcid: Remote channel identifier corresponding to the desired channel.
+ *
+ * If the channel is found, the reference count is incremented to ensure the
+ * lifetime of the channel context. The caller must call rwref_put() when done.
+ *
+ * Return: The channel corresponding to @rcid or NULL if a matching channel
+ * is not found.
+ */
+static struct channel_ctx *xprt_rcid_to_ch_ctx_get(
+ struct glink_core_xprt_ctx *xprt_ctx,
+ uint32_t rcid)
+{
+ struct channel_ctx *entry;
+ unsigned long flags;
+
+ spin_lock_irqsave(&xprt_ctx->xprt_ctx_lock_lhb1, flags);
+ list_for_each_entry(entry, &xprt_ctx->channels, port_list_node)
+ if (entry->rcid == rcid) {
+ rwref_get(&entry->ch_state_lhc0);
+ spin_unlock_irqrestore(&xprt_ctx->xprt_ctx_lock_lhb1,
+ flags);
+ return entry;
+ }
+ spin_unlock_irqrestore(&xprt_ctx->xprt_ctx_lock_lhb1, flags);
+
+ return NULL;
+}
+
+/**
+ * ch_check_duplicate_riid() - Checks for duplicate riid
+ * @ctx: Local channel context
+ * @riid: Remote intent ID
+ *
+ * This functions check the riid is present in the remote_rx_list or not
+ */
+bool ch_check_duplicate_riid(struct channel_ctx *ctx, int riid)
+{
+ struct glink_core_rx_intent *intent;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctx->rmt_rx_intent_lst_lock_lhc2, flags);
+ list_for_each_entry(intent, &ctx->rmt_rx_intent_list, list) {
+ if (riid == intent->id) {
+ spin_unlock_irqrestore(
+ &ctx->rmt_rx_intent_lst_lock_lhc2, flags);
+ return true;
+ }
+ }
+ spin_unlock_irqrestore(&ctx->rmt_rx_intent_lst_lock_lhc2, flags);
+ return false;
+}
+
+/**
+ * ch_pop_remote_rx_intent() - Finds a matching RX intent
+ * @ctx: Local channel context
+ * @size: Size of Intent
+ * @riid_ptr: Pointer to return value of remote intent ID
+ *
+ * This functions searches for an RX intent that is >= to the requested size.
+ */
+int ch_pop_remote_rx_intent(struct channel_ctx *ctx, size_t size,
+ uint32_t *riid_ptr, size_t *intent_size)
+{
+ struct glink_core_rx_intent *intent;
+ struct glink_core_rx_intent *intent_tmp;
+ unsigned long flags;
+
+ if (GLINK_MAX_PKT_SIZE < size) {
+ GLINK_ERR_CH(ctx, "%s: R[]:%zu Invalid size.\n", __func__,
+ size);
+ return -EINVAL;
+ }
+
+ if (riid_ptr == NULL)
+ return -EINVAL;
+
+ *riid_ptr = 0;
+ spin_lock_irqsave(&ctx->rmt_rx_intent_lst_lock_lhc2, flags);
+ if (ctx->transport_ptr->capabilities & GCAP_INTENTLESS) {
+ *riid_ptr = ++ctx->dummy_riid;
+ spin_unlock_irqrestore(&ctx->rmt_rx_intent_lst_lock_lhc2,
+ flags);
+ return 0;
+ }
+ list_for_each_entry_safe(intent, intent_tmp, &ctx->rmt_rx_intent_list,
+ list) {
+ if (intent->intent_size >= size) {
+ list_del(&intent->list);
+ GLINK_DBG_CH(ctx,
+ "%s: R[%u]:%zu Removed remote intent\n",
+ __func__,
+ intent->id,
+ intent->intent_size);
+ *riid_ptr = intent->id;
+ *intent_size = intent->intent_size;
+ kfree(intent);
+ spin_unlock_irqrestore(
+ &ctx->rmt_rx_intent_lst_lock_lhc2, flags);
+ return 0;
+ }
+ }
+ spin_unlock_irqrestore(&ctx->rmt_rx_intent_lst_lock_lhc2, flags);
+ return -EAGAIN;
+}
+
+/**
+ * ch_push_remote_rx_intent() - Registers a remote RX intent
+ * @ctx: Local channel context
+ * @size: Size of Intent
+ * @riid: Remote intent ID
+ *
+ * This functions adds a remote RX intent to the remote RX intent list.
+ */
+void ch_push_remote_rx_intent(struct channel_ctx *ctx, size_t size,
+ uint32_t riid)
+{
+ struct glink_core_rx_intent *intent;
+ unsigned long flags;
+ gfp_t gfp_flag;
+
+ if (GLINK_MAX_PKT_SIZE < size) {
+ GLINK_ERR_CH(ctx, "%s: R[%u]:%zu Invalid size.\n", __func__,
+ riid, size);
+ return;
+ }
+
+ if (ch_check_duplicate_riid(ctx, riid)) {
+ GLINK_ERR_CH(ctx, "%s: R[%d]:%zu Duplicate RIID found\n",
+ __func__, riid, size);
+ return;
+ }
+
+ gfp_flag = (ctx->transport_ptr->capabilities & GCAP_AUTO_QUEUE_RX_INT) ?
+ GFP_ATOMIC : GFP_KERNEL;
+ intent = kzalloc(sizeof(struct glink_core_rx_intent), gfp_flag);
+ if (!intent) {
+ GLINK_ERR_CH(ctx,
+ "%s: R[%u]:%zu Memory allocation for intent failed\n",
+ __func__, riid, size);
+ return;
+ }
+ intent->id = riid;
+ intent->intent_size = size;
+
+ spin_lock_irqsave(&ctx->rmt_rx_intent_lst_lock_lhc2, flags);
+ list_add_tail(&intent->list, &ctx->rmt_rx_intent_list);
+
+ complete_all(&ctx->int_req_complete);
+ if (ctx->notify_remote_rx_intent)
+ ctx->notify_remote_rx_intent(ctx, ctx->user_priv, size);
+ spin_unlock_irqrestore(&ctx->rmt_rx_intent_lst_lock_lhc2, flags);
+
+ GLINK_DBG_CH(ctx, "%s: R[%u]:%zu Pushed remote intent\n", __func__,
+ intent->id,
+ intent->intent_size);
+}
+
+/**
+ * ch_push_local_rx_intent() - Create an rx_intent
+ * @ctx: Local channel context
+ * @pkt_priv: Opaque private pointer provided by client to be returned later
+ * @size: Size of intent
+ *
+ * This functions creates a local intent and adds it to the local
+ * intent list.
+ */
+struct glink_core_rx_intent *ch_push_local_rx_intent(struct channel_ctx *ctx,
+ const void *pkt_priv, size_t size)
+{
+ struct glink_core_rx_intent *intent;
+ unsigned long flags;
+ int ret;
+
+ if (GLINK_MAX_PKT_SIZE < size) {
+ GLINK_ERR_CH(ctx,
+ "%s: L[]:%zu Invalid size\n", __func__, size);
+ return NULL;
+ }
+
+ intent = ch_get_free_local_rx_intent(ctx);
+ if (!intent) {
+ if (ctx->max_used_liid >= ctx->transport_ptr->max_iid) {
+ GLINK_ERR_CH(ctx,
+ "%s: All intents are in USE max_iid[%d]",
+ __func__, ctx->transport_ptr->max_iid);
+ return NULL;
+ }
+
+ intent = kzalloc(sizeof(struct glink_core_rx_intent),
+ GFP_KERNEL);
+ if (!intent) {
+ GLINK_ERR_CH(ctx,
+ "%s: Memory Allocation for local rx_intent failed",
+ __func__);
+ return NULL;
+ }
+ intent->id = ++ctx->max_used_liid;
+ }
+
+ /* transport is responsible for allocating/reserving for the intent */
+ ret = ctx->transport_ptr->ops->allocate_rx_intent(
+ ctx->transport_ptr->ops, size, intent);
+ if (ret < 0) {
+ /* intent data allocation failure */
+ GLINK_ERR_CH(ctx, "%s: unable to allocate intent sz[%zu] %d",
+ __func__, size, ret);
+ spin_lock_irqsave(&ctx->local_rx_intent_lst_lock_lhc1, flags);
+ list_add_tail(&intent->list,
+ &ctx->local_rx_intent_free_list);
+ spin_unlock_irqrestore(&ctx->local_rx_intent_lst_lock_lhc1,
+ flags);
+ return NULL;
+ }
+
+ intent->pkt_priv = pkt_priv;
+ intent->intent_size = size;
+ intent->write_offset = 0;
+ intent->pkt_size = 0;
+ intent->bounce_buf = NULL;
+
+ spin_lock_irqsave(&ctx->local_rx_intent_lst_lock_lhc1, flags);
+ list_add_tail(&intent->list, &ctx->local_rx_intent_list);
+ spin_unlock_irqrestore(&ctx->local_rx_intent_lst_lock_lhc1, flags);
+ GLINK_DBG_CH(ctx, "%s: L[%u]:%zu Pushed intent\n", __func__,
+ intent->id,
+ intent->intent_size);
+ return intent;
+}
+
+/**
+ * ch_remove_local_rx_intent() - Find and remove RX Intent from list
+ * @ctx: Local channel context
+ * @liid: Local channel Intent ID
+ *
+ * This functions parses the local intent list for a specific channel
+ * and checks for the intent using the intent ID. If found, the intent
+ * is deleted from the list.
+ */
+void ch_remove_local_rx_intent(struct channel_ctx *ctx, uint32_t liid)
+{
+ struct glink_core_rx_intent *intent, *tmp_intent;
+ unsigned long flags;
+
+ if (ctx->transport_ptr->max_iid < liid) {
+ GLINK_ERR_CH(ctx, "%s: L[%u] Invalid ID.\n", __func__,
+ liid);
+ return;
+ }
+
+ spin_lock_irqsave(&ctx->local_rx_intent_lst_lock_lhc1, flags);
+ list_for_each_entry_safe(intent, tmp_intent, &ctx->local_rx_intent_list,
+ list) {
+ if (liid == intent->id) {
+ list_del(&intent->list);
+ list_add_tail(&intent->list,
+ &ctx->local_rx_intent_free_list);
+ spin_unlock_irqrestore(
+ &ctx->local_rx_intent_lst_lock_lhc1,
+ flags);
+ GLINK_DBG_CH(ctx,
+ "%s: L[%u]:%zu moved intent to Free/unused list\n",
+ __func__,
+ intent->id,
+ intent->intent_size);
+ return;
+ }
+ }
+ spin_unlock_irqrestore(&ctx->local_rx_intent_lst_lock_lhc1, flags);
+ GLINK_ERR_CH(ctx, "%s: L[%u] Intent not found.\n", __func__,
+ liid);
+}
+
+/**
+ * ch_get_dummy_rx_intent() - Get a dummy rx_intent
+ * @ctx: Local channel context
+ * @liid: Local channel Intent ID
+ *
+ * This functions parses the local intent list for a specific channel and
+ * returns either a matching intent or allocates a dummy one if no matching
+ * intents can be found.
+ *
+ * Return: Pointer to the intent if intent is found else NULL
+ */
+struct glink_core_rx_intent *ch_get_dummy_rx_intent(struct channel_ctx *ctx,
+ uint32_t liid)
+{
+ struct glink_core_rx_intent *intent;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctx->local_rx_intent_lst_lock_lhc1, flags);
+ if (!list_empty(&ctx->local_rx_intent_list)) {
+ intent = list_first_entry(&ctx->local_rx_intent_list,
+ struct glink_core_rx_intent, list);
+ spin_unlock_irqrestore(&ctx->local_rx_intent_lst_lock_lhc1,
+ flags);
+ return intent;
+ }
+ spin_unlock_irqrestore(&ctx->local_rx_intent_lst_lock_lhc1, flags);
+
+ intent = ch_get_free_local_rx_intent(ctx);
+ if (!intent) {
+ intent = kzalloc(sizeof(struct glink_core_rx_intent),
+ GFP_ATOMIC);
+ if (!intent) {
+ GLINK_ERR_CH(ctx,
+ "%s: Memory Allocation for local rx_intent failed",
+ __func__);
+ return NULL;
+ }
+ intent->id = ++ctx->max_used_liid;
+ }
+ intent->intent_size = 0;
+ intent->write_offset = 0;
+ intent->pkt_size = 0;
+ intent->bounce_buf = NULL;
+ intent->pkt_priv = NULL;
+
+ spin_lock_irqsave(&ctx->local_rx_intent_lst_lock_lhc1, flags);
+ list_add_tail(&intent->list, &ctx->local_rx_intent_list);
+ spin_unlock_irqrestore(&ctx->local_rx_intent_lst_lock_lhc1, flags);
+ GLINK_DBG_CH(ctx, "%s: L[%u]:%zu Pushed intent\n", __func__,
+ intent->id,
+ intent->intent_size);
+ return intent;
+}
+
+/**
+ * ch_get_local_rx_intent() - Search for an rx_intent
+ * @ctx: Local channel context
+ * @liid: Local channel Intent ID
+ *
+ * This functions parses the local intent list for a specific channel
+ * and checks for the intent using the intent ID. If found, pointer to
+ * the intent is returned.
+ *
+ * Return: Pointer to the intent if intent is found else NULL
+ */
+struct glink_core_rx_intent *ch_get_local_rx_intent(struct channel_ctx *ctx,
+ uint32_t liid)
+{
+ struct glink_core_rx_intent *intent;
+ unsigned long flags;
+
+ if (ctx->transport_ptr->max_iid < liid) {
+ GLINK_ERR_CH(ctx, "%s: L[%u] Invalid ID.\n", __func__,
+ liid);
+ return NULL;
+ }
+
+ if (ctx->transport_ptr->capabilities & GCAP_INTENTLESS)
+ return ch_get_dummy_rx_intent(ctx, liid);
+
+ spin_lock_irqsave(&ctx->local_rx_intent_lst_lock_lhc1, flags);
+ list_for_each_entry(intent, &ctx->local_rx_intent_list, list) {
+ if (liid == intent->id) {
+ spin_unlock_irqrestore(
+ &ctx->local_rx_intent_lst_lock_lhc1, flags);
+ return intent;
+ }
+ }
+ spin_unlock_irqrestore(&ctx->local_rx_intent_lst_lock_lhc1, flags);
+ GLINK_ERR_CH(ctx, "%s: L[%u] Intent not found.\n", __func__,
+ liid);
+ return NULL;
+}
+
+/**
+ * ch_set_local_rx_intent_notified() - Add a rx intent to local intent
+ * notified list
+ * @ctx: Local channel context
+ * @intent_ptr: Pointer to the local intent
+ *
+ * This functions parses the local intent list for a specific channel
+ * and checks for the intent. If found, the function deletes the intent
+ * from local_rx_intent list and adds it to local_rx_intent_notified list.
+ */
+void ch_set_local_rx_intent_notified(struct channel_ctx *ctx,
+ struct glink_core_rx_intent *intent_ptr)
+{
+ struct glink_core_rx_intent *tmp_intent, *intent;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctx->local_rx_intent_lst_lock_lhc1, flags);
+ list_for_each_entry_safe(intent, tmp_intent, &ctx->local_rx_intent_list,
+ list) {
+ if (intent == intent_ptr) {
+ list_del(&intent->list);
+ list_add_tail(&intent->list,
+ &ctx->local_rx_intent_ntfy_list);
+ GLINK_DBG_CH(ctx,
+ "%s: L[%u]:%zu Moved intent %s",
+ __func__,
+ intent_ptr->id,
+ intent_ptr->intent_size,
+ "from local to notify list\n");
+ spin_unlock_irqrestore(
+ &ctx->local_rx_intent_lst_lock_lhc1,
+ flags);
+ return;
+ }
+ }
+ spin_unlock_irqrestore(&ctx->local_rx_intent_lst_lock_lhc1, flags);
+ GLINK_ERR_CH(ctx, "%s: L[%u] Intent not found.\n", __func__,
+ intent_ptr->id);
+}
+
+/**
+ * ch_get_local_rx_intent_notified() - Find rx intent in local notified list
+ * @ctx: Local channel context
+ * @ptr: Pointer to the rx intent
+ *
+ * This functions parses the local intent notify list for a specific channel
+ * and checks for the intent.
+ *
+ * Return: Pointer to the intent if intent is found else NULL.
+ */
+struct glink_core_rx_intent *ch_get_local_rx_intent_notified(
+ struct channel_ctx *ctx, const void *ptr)
+{
+ struct glink_core_rx_intent *ptr_intent;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctx->local_rx_intent_lst_lock_lhc1, flags);
+ list_for_each_entry(ptr_intent, &ctx->local_rx_intent_ntfy_list,
+ list) {
+ if (ptr_intent->data == ptr || ptr_intent->iovec == ptr ||
+ ptr_intent->bounce_buf == ptr) {
+ spin_unlock_irqrestore(
+ &ctx->local_rx_intent_lst_lock_lhc1,
+ flags);
+ return ptr_intent;
+ }
+ }
+ spin_unlock_irqrestore(&ctx->local_rx_intent_lst_lock_lhc1, flags);
+ GLINK_ERR_CH(ctx, "%s: Local intent not found\n", __func__);
+ return NULL;
+}
+
+/**
+ * ch_remove_local_rx_intent_notified() - Remove a rx intent in local intent
+ * notified list
+ * @ctx: Local channel context
+ * @ptr: Pointer to the rx intent
+ * @reuse: Reuse the rx intent
+ *
+ * This functions parses the local intent notify list for a specific channel
+ * and checks for the intent. If found, the function deletes the intent
+ * from local_rx_intent_notified list and adds it to local_rx_intent_free list.
+ */
+void ch_remove_local_rx_intent_notified(struct channel_ctx *ctx,
+ struct glink_core_rx_intent *liid_ptr, bool reuse)
+{
+ struct glink_core_rx_intent *ptr_intent, *tmp_intent;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctx->local_rx_intent_lst_lock_lhc1, flags);
+ list_for_each_entry_safe(ptr_intent, tmp_intent,
+ &ctx->local_rx_intent_ntfy_list, list) {
+ if (ptr_intent == liid_ptr) {
+ list_del(&ptr_intent->list);
+ GLINK_DBG_CH(ctx,
+ "%s: L[%u]:%zu Removed intent from notify list\n",
+ __func__,
+ ptr_intent->id,
+ ptr_intent->intent_size);
+ kfree(ptr_intent->bounce_buf);
+ ptr_intent->bounce_buf = NULL;
+ ptr_intent->write_offset = 0;
+ ptr_intent->pkt_size = 0;
+ if (reuse)
+ list_add_tail(&ptr_intent->list,
+ &ctx->local_rx_intent_list);
+ else
+ list_add_tail(&ptr_intent->list,
+ &ctx->local_rx_intent_free_list);
+ spin_unlock_irqrestore(
+ &ctx->local_rx_intent_lst_lock_lhc1,
+ flags);
+ return;
+ }
+ }
+ spin_unlock_irqrestore(&ctx->local_rx_intent_lst_lock_lhc1, flags);
+ GLINK_ERR_CH(ctx, "%s: L[%u] Intent not found.\n", __func__,
+ liid_ptr->id);
+}
+
+/**
+ * ch_get_free_local_rx_intent() - Return a rx intent in local intent
+ * free list
+ * @ctx: Local channel context
+ *
+ * This functions parses the local_rx_intent_free list for a specific channel
+ * and checks for the free unused intent. If found, the function returns
+ * the free intent pointer else NULL pointer.
+ */
+struct glink_core_rx_intent *ch_get_free_local_rx_intent(
+ struct channel_ctx *ctx)
+{
+ struct glink_core_rx_intent *ptr_intent = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctx->local_rx_intent_lst_lock_lhc1, flags);
+ if (!list_empty(&ctx->local_rx_intent_free_list)) {
+ ptr_intent = list_first_entry(&ctx->local_rx_intent_free_list,
+ struct glink_core_rx_intent,
+ list);
+ list_del(&ptr_intent->list);
+ }
+ spin_unlock_irqrestore(&ctx->local_rx_intent_lst_lock_lhc1, flags);
+ return ptr_intent;
+}
+
+/**
+ * ch_purge_intent_lists() - Remove all intents for a channel
+ *
+ * @ctx: Local channel context
+ *
+ * This functions parses the local intent lists for a specific channel and
+ * removes and frees all intents.
+ */
+void ch_purge_intent_lists(struct channel_ctx *ctx)
+{
+ struct glink_core_rx_intent *ptr_intent, *tmp_intent;
+ struct glink_core_tx_pkt *tx_info, *tx_info_temp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctx->tx_lists_lock_lhc3, flags);
+ list_for_each_entry_safe(tx_info, tx_info_temp, &ctx->tx_active,
+ list_node) {
+ ctx->notify_tx_abort(ctx, ctx->user_priv,
+ tx_info->pkt_priv);
+ rwref_put(&tx_info->pkt_ref);
+ }
+ spin_unlock_irqrestore(&ctx->tx_lists_lock_lhc3, flags);
+
+ spin_lock_irqsave(&ctx->local_rx_intent_lst_lock_lhc1, flags);
+ list_for_each_entry_safe(ptr_intent, tmp_intent,
+ &ctx->local_rx_intent_list, list) {
+ ctx->notify_rx_abort(ctx, ctx->user_priv,
+ ptr_intent->pkt_priv);
+ list_del(&ptr_intent->list);
+ kfree(ptr_intent);
+ }
+
+ if (!list_empty(&ctx->local_rx_intent_ntfy_list))
+ /*
+ * The client is still processing an rx_notify() call and has
+ * not yet called glink_rx_done() to return the pointer to us.
+ * glink_rx_done() will do the appropriate cleanup when this
+ * call occurs, but log a message here just for internal state
+ * tracking.
+ */
+ GLINK_INFO_CH(ctx, "%s: waiting on glink_rx_done()\n",
+ __func__);
+
+ list_for_each_entry_safe(ptr_intent, tmp_intent,
+ &ctx->local_rx_intent_free_list, list) {
+ list_del(&ptr_intent->list);
+ kfree(ptr_intent);
+ }
+ ctx->max_used_liid = 0;
+ spin_unlock_irqrestore(&ctx->local_rx_intent_lst_lock_lhc1, flags);
+
+ spin_lock_irqsave(&ctx->rmt_rx_intent_lst_lock_lhc2, flags);
+ list_for_each_entry_safe(ptr_intent, tmp_intent,
+ &ctx->rmt_rx_intent_list, list) {
+ list_del(&ptr_intent->list);
+ kfree(ptr_intent);
+ }
+ spin_unlock_irqrestore(&ctx->rmt_rx_intent_lst_lock_lhc2, flags);
+}
+
+/**
+ * ch_get_tx_pending_remote_done() - Lookup for a packet that is waiting for
+ * the remote-done notification.
+ * @ctx: Pointer to the channel context
+ * @riid: riid of transmit packet
+ *
+ * This function adds a packet to the tx_pending_remote_done list.
+ *
+ * The tx_lists_lock_lhc3 lock needs to be held while calling this function.
+ *
+ * Return: Pointer to the tx packet
+ */
+struct glink_core_tx_pkt *ch_get_tx_pending_remote_done(
+ struct channel_ctx *ctx, uint32_t riid)
+{
+ struct glink_core_tx_pkt *tx_pkt;
+ unsigned long flags;
+
+ if (!ctx) {
+ GLINK_ERR("%s: Invalid context pointer", __func__);
+ return NULL;
+ }
+
+ spin_lock_irqsave(&ctx->tx_pending_rmt_done_lock_lhc4, flags);
+ list_for_each_entry(tx_pkt, &ctx->tx_pending_remote_done, list_done) {
+ if (tx_pkt->riid == riid) {
+ if (tx_pkt->size_remaining) {
+ GLINK_ERR_CH(ctx, "%s: R[%u] TX not complete",
+ __func__, riid);
+ tx_pkt = NULL;
+ }
+ spin_unlock_irqrestore(
+ &ctx->tx_pending_rmt_done_lock_lhc4, flags);
+ return tx_pkt;
+ }
+ }
+ spin_unlock_irqrestore(&ctx->tx_pending_rmt_done_lock_lhc4, flags);
+
+ GLINK_ERR_CH(ctx, "%s: R[%u] Tx packet for intent not found.\n",
+ __func__, riid);
+ return NULL;
+}
+
+/**
+ * ch_remove_tx_pending_remote_done() - Removes a packet transmit context for a
+ * packet that is waiting for the remote-done notification
+ * @ctx: Pointer to the channel context
+ * @tx_pkt: Pointer to the transmit packet
+ *
+ * This function parses through tx_pending_remote_done and removes a
+ * packet that matches with the tx_pkt.
+ */
+void ch_remove_tx_pending_remote_done(struct channel_ctx *ctx,
+ struct glink_core_tx_pkt *tx_pkt)
+{
+ struct glink_core_tx_pkt *local_tx_pkt, *tmp_tx_pkt;
+ unsigned long flags;
+
+ if (!ctx || !tx_pkt) {
+ GLINK_ERR("%s: Invalid input", __func__);
+ return;
+ }
+
+ spin_lock_irqsave(&ctx->tx_pending_rmt_done_lock_lhc4, flags);
+ list_for_each_entry_safe(local_tx_pkt, tmp_tx_pkt,
+ &ctx->tx_pending_remote_done, list_done) {
+ if (tx_pkt == local_tx_pkt) {
+ list_del_init(&tx_pkt->list_done);
+ GLINK_DBG_CH(ctx,
+ "%s: R[%u] Removed Tx packet for intent\n",
+ __func__,
+ tx_pkt->riid);
+ rwref_put(&tx_pkt->pkt_ref);
+ spin_unlock_irqrestore(
+ &ctx->tx_pending_rmt_done_lock_lhc4, flags);
+ return;
+ }
+ }
+ spin_unlock_irqrestore(&ctx->tx_pending_rmt_done_lock_lhc4, flags);
+
+ GLINK_ERR_CH(ctx, "%s: R[%u] Tx packet for intent not found", __func__,
+ tx_pkt->riid);
+}
+
+/**
+ * glink_add_free_lcid_list() - adds the lcid of a to be deleted channel to
+ * available lcid list
+ * @ctx: Pointer to channel context.
+ */
+static void glink_add_free_lcid_list(struct channel_ctx *ctx)
+{
+ struct channel_lcid *free_lcid;
+ unsigned long flags;
+
+ free_lcid = kzalloc(sizeof(*free_lcid), GFP_KERNEL);
+ if (!free_lcid) {
+ GLINK_ERR(
+ "%s: allocation failed on xprt:edge [%s:%s] for lcid [%d]\n",
+ __func__, ctx->transport_ptr->name,
+ ctx->transport_ptr->edge, ctx->lcid);
+ return;
+ }
+ free_lcid->lcid = ctx->lcid;
+ spin_lock_irqsave(&ctx->transport_ptr->xprt_ctx_lock_lhb1, flags);
+ list_add_tail(&free_lcid->list_node,
+ &ctx->transport_ptr->free_lcid_list);
+ spin_unlock_irqrestore(&ctx->transport_ptr->xprt_ctx_lock_lhb1,
+ flags);
+}
+
+/**
+ * glink_ch_ctx_release - Free the channel context
+ * @ch_st_lock: handle to the rwref_lock associated with the chanel
+ *
+ * This should only be called when the reference count associated with the
+ * channel goes to zero.
+ */
+static void glink_ch_ctx_release(struct rwref_lock *ch_st_lock)
+{
+ struct channel_ctx *ctx = container_of(ch_st_lock, struct channel_ctx,
+ ch_state_lhc0);
+ ctx->transport_ptr = NULL;
+ kfree(ctx);
+ GLINK_INFO("%s: freed the channel ctx in pid [%d]\n", __func__,
+ current->pid);
+ ctx = NULL;
+}
+
+/**
+ * ch_name_to_ch_ctx_create() - lookup a channel by name, create the channel if
+ * it is not found.
+ * @xprt_ctx: Transport to search for a matching channel.
+ * @name: Name of the desired channel.
+ *
+ * Return: The channel corresponding to @name, NULL if a matching channel was
+ * not found AND a new channel could not be created.
+ */
+static struct channel_ctx *ch_name_to_ch_ctx_create(
+ struct glink_core_xprt_ctx *xprt_ctx,
+ const char *name)
+{
+ struct channel_ctx *entry;
+ struct channel_ctx *ctx;
+ struct channel_ctx *temp;
+ unsigned long flags;
+ struct channel_lcid *flcid;
+
+ ctx = kzalloc(sizeof(struct channel_ctx), GFP_KERNEL);
+ if (!ctx) {
+ GLINK_ERR_XPRT(xprt_ctx, "%s: Failed to allocated ctx, %s",
+ "checking if there is one existing\n",
+ __func__);
+ goto check_ctx;
+ }
+
+ ctx->local_open_state = GLINK_CHANNEL_CLOSED;
+ strlcpy(ctx->name, name, GLINK_NAME_SIZE);
+ rwref_lock_init(&ctx->ch_state_lhc0, glink_ch_ctx_release);
+ INIT_LIST_HEAD(&ctx->tx_ready_list_node);
+ init_completion(&ctx->int_req_ack_complete);
+ init_completion(&ctx->int_req_complete);
+ INIT_LIST_HEAD(&ctx->local_rx_intent_list);
+ INIT_LIST_HEAD(&ctx->local_rx_intent_ntfy_list);
+ INIT_LIST_HEAD(&ctx->local_rx_intent_free_list);
+ spin_lock_init(&ctx->local_rx_intent_lst_lock_lhc1);
+ INIT_LIST_HEAD(&ctx->rmt_rx_intent_list);
+ spin_lock_init(&ctx->rmt_rx_intent_lst_lock_lhc2);
+ INIT_LIST_HEAD(&ctx->tx_active);
+ spin_lock_init(&ctx->tx_pending_rmt_done_lock_lhc4);
+ INIT_LIST_HEAD(&ctx->tx_pending_remote_done);
+ spin_lock_init(&ctx->tx_lists_lock_lhc3);
+
+check_ctx:
+ rwref_write_get(&xprt_ctx->xprt_state_lhb0);
+ if (xprt_ctx->local_state != GLINK_XPRT_OPENED) {
+ kfree(ctx);
+ rwref_write_put(&xprt_ctx->xprt_state_lhb0);
+ return NULL;
+ }
+ spin_lock_irqsave(&xprt_ctx->xprt_ctx_lock_lhb1, flags);
+ list_for_each_entry_safe(entry, temp, &xprt_ctx->channels,
+ port_list_node)
+ if (!strcmp(entry->name, name) && !entry->pending_delete) {
+ spin_unlock_irqrestore(&xprt_ctx->xprt_ctx_lock_lhb1,
+ flags);
+ kfree(ctx);
+ rwref_write_put(&xprt_ctx->xprt_state_lhb0);
+ return entry;
+ }
+
+ if (ctx) {
+ if (list_empty(&xprt_ctx->free_lcid_list)) {
+ if (xprt_ctx->next_lcid > xprt_ctx->max_cid) {
+ /* no more channels available */
+ GLINK_ERR_XPRT(xprt_ctx,
+ "%s: unable to exceed %u channels\n",
+ __func__, xprt_ctx->max_cid);
+ spin_unlock_irqrestore(
+ &xprt_ctx->xprt_ctx_lock_lhb1,
+ flags);
+ kfree(ctx);
+ rwref_write_put(&xprt_ctx->xprt_state_lhb0);
+ return NULL;
+ } else {
+ ctx->lcid = xprt_ctx->next_lcid++;
+ }
+ } else {
+ flcid = list_first_entry(&xprt_ctx->free_lcid_list,
+ struct channel_lcid, list_node);
+ ctx->lcid = flcid->lcid;
+ list_del(&flcid->list_node);
+ kfree(flcid);
+ }
+
+ list_add_tail(&ctx->port_list_node, &xprt_ctx->channels);
+
+ GLINK_INFO_PERF_CH_XPRT(ctx, xprt_ctx,
+ "%s: local:GLINK_CHANNEL_CLOSED\n",
+ __func__);
+ }
+ spin_unlock_irqrestore(&xprt_ctx->xprt_ctx_lock_lhb1, flags);
+ rwref_write_put(&xprt_ctx->xprt_state_lhb0);
+ mutex_lock(&xprt_ctx->xprt_dbgfs_lock_lhb3);
+ if (ctx != NULL)
+ glink_debugfs_add_channel(ctx, xprt_ctx);
+ mutex_unlock(&xprt_ctx->xprt_dbgfs_lock_lhb3);
+ return ctx;
+}
+
+/**
+ * ch_add_rcid() - add a remote channel identifier to an existing channel
+ * @xprt_ctx: Transport the channel resides on.
+ * @ctx: Channel receiving the identifier.
+ * @rcid: The remote channel identifier.
+ */
+static void ch_add_rcid(struct glink_core_xprt_ctx *xprt_ctx,
+ struct channel_ctx *ctx,
+ uint32_t rcid)
+{
+ ctx->rcid = rcid;
+}
+
+/**
+ * ch_update_local_state() - Update the local channel state
+ * @ctx: Pointer to channel context.
+ * @lstate: Local channel state.
+ *
+ * Return: True if the channel is fully closed as a result of this update,
+ * false otherwise.
+ */
+static bool ch_update_local_state(struct channel_ctx *ctx,
+ enum local_channel_state_e lstate)
+{
+ bool is_fully_closed;
+
+ rwref_write_get(&ctx->ch_state_lhc0);
+ ctx->local_open_state = lstate;
+ is_fully_closed = ch_is_fully_closed(ctx);
+ rwref_write_put(&ctx->ch_state_lhc0);
+
+ return is_fully_closed;
+}
+
+/**
+ * ch_update_local_state() - Update the local channel state
+ * @ctx: Pointer to channel context.
+ * @rstate: Remote Channel state.
+ *
+ * Return: True if the channel is fully closed as result of this update,
+ * false otherwise.
+ */
+static bool ch_update_rmt_state(struct channel_ctx *ctx, bool rstate)
+{
+ bool is_fully_closed;
+
+ rwref_write_get(&ctx->ch_state_lhc0);
+ ctx->remote_opened = rstate;
+ is_fully_closed = ch_is_fully_closed(ctx);
+ rwref_write_put(&ctx->ch_state_lhc0);
+
+ return is_fully_closed;
+}
+
+/*
+ * ch_is_fully_opened() - Verify if a channel is open
+ * ctx: Pointer to channel context
+ *
+ * Return: True if open, else flase
+ */
+static bool ch_is_fully_opened(struct channel_ctx *ctx)
+{
+ if (ctx->remote_opened && ctx->local_open_state == GLINK_CHANNEL_OPENED)
+ return true;
+
+ return false;
+}
+
+/*
+ * ch_is_fully_closed() - Verify if a channel is closed on both sides
+ * @ctx: Pointer to channel context
+ * @returns: True if open, else flase
+ */
+static bool ch_is_fully_closed(struct channel_ctx *ctx)
+{
+ if (!ctx->remote_opened &&
+ ctx->local_open_state == GLINK_CHANNEL_CLOSED)
+ return true;
+
+ return false;
+}
+
+/**
+ * find_open_transport() - find a specific open transport
+ * @edge: Edge the transport is on.
+ * @name: Name of the transport (or NULL if no preference)
+ * @initial_xprt: The specified transport is the start for migration
+ * @best_id: The best transport found for this connection
+ *
+ * Find an open transport corresponding to the specified @name and @edge. @edge
+ * is expected to be valid. @name is expected to be NULL (unspecified) or
+ * valid. If @name is not specified, then the best transport found on the
+ * specified edge will be returned.
+ *
+ * Return: Transport with the specified name on the specified edge, if open.
+ * NULL if the transport exists, but is not fully open. ENODEV if no such
+ * transport exists.
+ */
+static struct glink_core_xprt_ctx *find_open_transport(const char *edge,
+ const char *name,
+ bool initial_xprt,
+ uint16_t *best_id)
+{
+ struct glink_core_xprt_ctx *xprt;
+ struct glink_core_xprt_ctx *best_xprt;
+ struct glink_core_xprt_ctx *ret;
+ bool first = true;
+
+ ret = (struct glink_core_xprt_ctx *)ERR_PTR(-ENODEV);
+ *best_id = USHRT_MAX;
+
+ mutex_lock(&transport_list_lock_lha0);
+ list_for_each_entry(xprt, &transport_list, list_node) {
+ if (strcmp(edge, xprt->edge))
+ continue;
+ if (first) {
+ first = false;
+ ret = NULL;
+ }
+ if (!xprt_is_fully_opened(xprt))
+ continue;
+
+ if (xprt->id < *best_id) {
+ *best_id = xprt->id;
+ best_xprt = xprt;
+ }
+
+ /*
+ * Braces are required in this instacne because the else will
+ * attach to the wrong if otherwise.
+ */
+ if (name) {
+ if (!strcmp(name, xprt->name))
+ ret = xprt;
+ } else {
+ ret = best_xprt;
+ }
+ }
+
+ mutex_unlock(&transport_list_lock_lha0);
+
+ if (IS_ERR_OR_NULL(ret))
+ return ret;
+ if (!initial_xprt)
+ *best_id = ret->id;
+
+ return ret;
+}
+
+/**
+ * xprt_is_fully_opened() - check the open status of a transport
+ * @xprt: Transport being checked.
+ *
+ * Return: True if the transport is fully opened, false otherwise.
+ */
+static bool xprt_is_fully_opened(struct glink_core_xprt_ctx *xprt)
+{
+ if (xprt->remote_neg_completed &&
+ xprt->local_state == GLINK_XPRT_OPENED)
+ return true;
+
+ return false;
+}
+
+/**
+ * glink_dummy_notify_rx_intent_req() - Dummy RX Request
+ *
+ * @handle: Channel handle (ignored)
+ * @priv: Private data pointer (ignored)
+ * @req_size: Requested size (ignored)
+ *
+ * Dummy RX intent request if client does not implement the optional callback
+ * function.
+ *
+ * Return: False
+ */
+static bool glink_dummy_notify_rx_intent_req(void *handle, const void *priv,
+ size_t req_size)
+{
+ return false;
+}
+
+/**
+ * glink_dummy_notify_rx_sigs() - Dummy signal callback
+ *
+ * @handle: Channel handle (ignored)
+ * @priv: Private data pointer (ignored)
+ * @req_size: Requested size (ignored)
+ *
+ * Dummy signal callback if client does not implement the optional callback
+ * function.
+ *
+ * Return: False
+ */
+static void glink_dummy_notify_rx_sigs(void *handle, const void *priv,
+ uint32_t old_sigs, uint32_t new_sigs)
+{
+ /* intentionally left blank */
+}
+
+/**
+ * glink_dummy_rx_abort() - Dummy rx abort callback
+ *
+ * handle: Channel handle (ignored)
+ * priv: Private data pointer (ignored)
+ * pkt_priv: Private intent data pointer (ignored)
+ *
+ * Dummy rx abort callback if client does not implement the optional callback
+ * function.
+ */
+static void glink_dummy_notify_rx_abort(void *handle, const void *priv,
+ const void *pkt_priv)
+{
+ /* intentionally left blank */
+}
+
+/**
+ * glink_dummy_tx_abort() - Dummy tx abort callback
+ *
+ * @handle: Channel handle (ignored)
+ * @priv: Private data pointer (ignored)
+ * @pkt_priv: Private intent data pointer (ignored)
+ *
+ * Dummy tx abort callback if client does not implement the optional callback
+ * function.
+ */
+static void glink_dummy_notify_tx_abort(void *handle, const void *priv,
+ const void *pkt_priv)
+{
+ /* intentionally left blank */
+}
+
+/**
+ * dummy_poll() - a dummy poll() for transports that don't define one
+ * @if_ptr: The transport interface handle for this transport.
+ * @lcid: The channel to poll.
+ *
+ * Return: An error to indicate that this operation is unsupported.
+ */
+static int dummy_poll(struct glink_transport_if *if_ptr, uint32_t lcid)
+{
+ return -EOPNOTSUPP;
+}
+
+/**
+ * dummy_reuse_rx_intent() - a dummy reuse_rx_intent() for transports that
+ * don't define one
+ * @if_ptr: The transport interface handle for this transport.
+ * @intent: The intent to reuse.
+ *
+ * Return: Success.
+ */
+static int dummy_reuse_rx_intent(struct glink_transport_if *if_ptr,
+ struct glink_core_rx_intent *intent)
+{
+ return 0;
+}
+
+/**
+ * dummy_mask_rx_irq() - a dummy mask_rx_irq() for transports that don't define
+ * one
+ * @if_ptr: The transport interface handle for this transport.
+ * @lcid: The local channel id for this channel.
+ * @mask: True to mask the irq, false to unmask.
+ * @pstruct: Platform defined structure with data necessary for masking.
+ *
+ * Return: An error to indicate that this operation is unsupported.
+ */
+static int dummy_mask_rx_irq(struct glink_transport_if *if_ptr, uint32_t lcid,
+ bool mask, void *pstruct)
+{
+ return -EOPNOTSUPP;
+}
+
+/**
+ * dummy_wait_link_down() - a dummy wait_link_down() for transports that don't
+ * define one
+ * @if_ptr: The transport interface handle for this transport.
+ *
+ * Return: An error to indicate that this operation is unsupported.
+ */
+static int dummy_wait_link_down(struct glink_transport_if *if_ptr)
+{
+ return -EOPNOTSUPP;
+}
+
+/**
+ * dummy_allocate_rx_intent() - a dummy RX intent allocation function that does
+ * not allocate anything
+ * @if_ptr: The transport the intent is associated with.
+ * @size: Size of intent.
+ * @intent: Pointer to the intent structure.
+ *
+ * Return: Success.
+ */
+static int dummy_allocate_rx_intent(struct glink_transport_if *if_ptr,
+ size_t size, struct glink_core_rx_intent *intent)
+{
+ return 0;
+}
+
+/**
+ * dummy_tx_cmd_tracer_pkt() - a dummy tracer packet tx cmd for transports
+ * that don't define one
+ * @if_ptr: The transport interface handle for this transport.
+ * @lcid: The channel in which the tracer packet is transmitted.
+ * @pctx: Context of the packet to be transmitted.
+ *
+ * Return: 0.
+ */
+static int dummy_tx_cmd_tracer_pkt(struct glink_transport_if *if_ptr,
+ uint32_t lcid, struct glink_core_tx_pkt *pctx)
+{
+ pctx->size_remaining = 0;
+ return 0;
+}
+
+/**
+ * dummy_deallocate_rx_intent() - a dummy rx intent deallocation function that
+ * does not deallocate anything
+ * @if_ptr: The transport the intent is associated with.
+ * @intent: Pointer to the intent structure.
+ *
+ * Return: Success.
+ */
+static int dummy_deallocate_rx_intent(struct glink_transport_if *if_ptr,
+ struct glink_core_rx_intent *intent)
+{
+ return 0;
+}
+
+/**
+ * dummy_tx_cmd_local_rx_intent() - dummy local rx intent request
+ * @if_ptr: The transport to transmit on.
+ * @lcid: The local channel id to encode.
+ * @size: The intent size to encode.
+ * @liid: The local intent id to encode.
+ *
+ * Return: Success.
+ */
+static int dummy_tx_cmd_local_rx_intent(struct glink_transport_if *if_ptr,
+ uint32_t lcid, size_t size, uint32_t liid)
+{
+ return 0;
+}
+
+/**
+ * dummy_tx_cmd_local_rx_done() - dummy rx done command
+ * @if_ptr: The transport to transmit on.
+ * @lcid: The local channel id to encode.
+ * @liid: The local intent id to encode.
+ * @reuse: Reuse the consumed intent.
+ */
+static void dummy_tx_cmd_local_rx_done(struct glink_transport_if *if_ptr,
+ uint32_t lcid, uint32_t liid, bool reuse)
+{
+ /* intentionally left blank */
+}
+
+/**
+ * dummy_tx() - dummy tx() that does not send anything
+ * @if_ptr: The transport to transmit on.
+ * @lcid: The local channel id to encode.
+ * @pctx: The data to encode.
+ *
+ * Return: Number of bytes written i.e. zero.
+ */
+static int dummy_tx(struct glink_transport_if *if_ptr, uint32_t lcid,
+ struct glink_core_tx_pkt *pctx)
+{
+ return 0;
+}
+
+/**
+ * dummy_tx_cmd_rx_intent_req() - dummy rx intent request functon
+ * @if_ptr: The transport to transmit on.
+ * @lcid: The local channel id to encode.
+ * @size: The requested intent size to encode.
+ *
+ * Return: Success.
+ */
+static int dummy_tx_cmd_rx_intent_req(struct glink_transport_if *if_ptr,
+ uint32_t lcid, size_t size)
+{
+ return 0;
+}
+
+/**
+ * dummy_tx_cmd_rx_intent_req_ack() - dummy rx intent request ack
+ * @if_ptr: The transport to transmit on.
+ * @lcid: The local channel id to encode.
+ * @granted: The request response to encode.
+ *
+ * Return: Success.
+ */
+static int dummy_tx_cmd_remote_rx_intent_req_ack(
+ struct glink_transport_if *if_ptr,
+ uint32_t lcid, bool granted)
+{
+ return 0;
+}
+
+/**
+ * dummy_tx_cmd_set_sigs() - dummy signals ack transmit function
+ * @if_ptr: The transport to transmit on.
+ * @lcid: The local channel id to encode.
+ * @sigs: The signals to encode.
+ *
+ * Return: Success.
+ */
+static int dummy_tx_cmd_set_sigs(struct glink_transport_if *if_ptr,
+ uint32_t lcid, uint32_t sigs)
+{
+ return 0;
+}
+
+/**
+ * dummy_tx_cmd_ch_close() - dummy channel close transmit function
+ * @if_ptr: The transport to transmit on.
+ * @lcid: The local channel id to encode.
+ *
+ * Return: Success.
+ */
+static int dummy_tx_cmd_ch_close(struct glink_transport_if *if_ptr,
+ uint32_t lcid)
+{
+ return 0;
+}
+
+/**
+ * dummy_tx_cmd_ch_remote_close_ack() - dummy channel close ack sending function
+ * @if_ptr: The transport to transmit on.
+ * @rcid: The remote channel id to encode.
+ */
+static void dummy_tx_cmd_ch_remote_close_ack(struct glink_transport_if *if_ptr,
+ uint32_t rcid)
+{
+ /* intentionally left blank */
+}
+
+/**
+ * dummy_get_power_vote_ramp_time() - Dummy Power vote ramp time
+ * @if_ptr: The transport to transmit on.
+ * @state: The power state being requested from the transport.
+ */
+static unsigned long dummy_get_power_vote_ramp_time(
+ struct glink_transport_if *if_ptr, uint32_t state)
+{
+ return (unsigned long)-EOPNOTSUPP;
+}
+
+/**
+ * dummy_power_vote() - Dummy Power vote operation
+ * @if_ptr: The transport to transmit on.
+ * @state: The power state being requested from the transport.
+ */
+static int dummy_power_vote(struct glink_transport_if *if_ptr,
+ uint32_t state)
+{
+ return -EOPNOTSUPP;
+}
+
+/**
+ * dummy_power_unvote() - Dummy Power unvote operation
+ * @if_ptr: The transport to transmit on.
+ */
+static int dummy_power_unvote(struct glink_transport_if *if_ptr)
+{
+ return -EOPNOTSUPP;
+}
+
+/**
+ * notif_if_up_all_xprts() - Check and notify existing transport state if up
+ * @notif_info: Data structure containing transport information to be notified.
+ *
+ * This function is called when the client registers a notifier to know about
+ * the state of a transport. This function matches the existing transports with
+ * the transport in the "notif_info" parameter. When a matching transport is
+ * found, the callback function in the "notif_info" parameter is called with
+ * the state of the matching transport.
+ *
+ * If an edge or transport is not defined, then all edges and/or transports
+ * will be matched and will receive up notifications.
+ */
+static void notif_if_up_all_xprts(
+ struct link_state_notifier_info *notif_info)
+{
+ struct glink_core_xprt_ctx *xprt_ptr;
+ struct glink_link_state_cb_info cb_info;
+
+ cb_info.link_state = GLINK_LINK_STATE_UP;
+ mutex_lock(&transport_list_lock_lha0);
+ list_for_each_entry(xprt_ptr, &transport_list, list_node) {
+ if (strlen(notif_info->edge) &&
+ strcmp(notif_info->edge, xprt_ptr->edge))
+ continue;
+
+ if (strlen(notif_info->transport) &&
+ strcmp(notif_info->transport, xprt_ptr->name))
+ continue;
+
+ if (!xprt_is_fully_opened(xprt_ptr))
+ continue;
+
+ cb_info.transport = xprt_ptr->name;
+ cb_info.edge = xprt_ptr->edge;
+ notif_info->glink_link_state_notif_cb(&cb_info,
+ notif_info->priv);
+ }
+ mutex_unlock(&transport_list_lock_lha0);
+}
+
+/**
+ * check_link_notifier_and_notify() - Check and notify clients about link state
+ * @xprt_ptr: Transport whose state to be notified.
+ * @link_state: State of the transport to be notified.
+ *
+ * This function is called when the state of the transport changes. This
+ * function matches the transport with the clients that have registered to
+ * be notified about the state changes. When a matching client notifier is
+ * found, the callback function in the client notifier is called with the
+ * new state of the transport.
+ */
+static void check_link_notifier_and_notify(struct glink_core_xprt_ctx *xprt_ptr,
+ enum glink_link_state link_state)
+{
+ struct link_state_notifier_info *notif_info;
+ struct glink_link_state_cb_info cb_info;
+
+ cb_info.link_state = link_state;
+ mutex_lock(&link_state_notifier_lock_lha1);
+ list_for_each_entry(notif_info, &link_state_notifier_list, list) {
+ if (strlen(notif_info->edge) &&
+ strcmp(notif_info->edge, xprt_ptr->edge))
+ continue;
+
+ if (strlen(notif_info->transport) &&
+ strcmp(notif_info->transport, xprt_ptr->name))
+ continue;
+
+ cb_info.transport = xprt_ptr->name;
+ cb_info.edge = xprt_ptr->edge;
+ notif_info->glink_link_state_notif_cb(&cb_info,
+ notif_info->priv);
+ }
+ mutex_unlock(&link_state_notifier_lock_lha1);
+}
+
+/**
+ * Open GLINK channel.
+ *
+ * @cfg_ptr: Open configuration structure (the structure is copied before
+ * glink_open returns). All unused fields should be zero-filled.
+ *
+ * This should not be called from link state callback context by clients.
+ * It is recommended that client should invoke this function from their own
+ * thread.
+ *
+ * Return: Pointer to channel on success, PTR_ERR() with standard Linux
+ * error code on failure.
+ */
+void *glink_open(const struct glink_open_config *cfg)
+{
+ struct channel_ctx *ctx = NULL;
+ struct glink_core_xprt_ctx *transport_ptr;
+ size_t len;
+ int ret;
+ uint16_t best_id;
+
+ if (!cfg->edge || !cfg->name) {
+ GLINK_ERR("%s: !cfg->edge || !cfg->name\n", __func__);
+ return ERR_PTR(-EINVAL);
+ }
+
+ len = strlen(cfg->edge);
+ if (len == 0 || len >= GLINK_NAME_SIZE) {
+ GLINK_ERR("%s: [EDGE] len == 0 || len >= GLINK_NAME_SIZE\n",
+ __func__);
+ return ERR_PTR(-EINVAL);
+ }
+
+ len = strlen(cfg->name);
+ if (len == 0 || len >= GLINK_NAME_SIZE) {
+ GLINK_ERR("%s: [NAME] len == 0 || len >= GLINK_NAME_SIZE\n",
+ __func__);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (cfg->transport) {
+ len = strlen(cfg->transport);
+ if (len == 0 || len >= GLINK_NAME_SIZE) {
+ GLINK_ERR("%s: [TRANSPORT] len == 0 || %s\n",
+ __func__,
+ "len >= GLINK_NAME_SIZE");
+ return ERR_PTR(-EINVAL);
+ }
+ }
+
+ /* confirm required notification parameters */
+ if (!(cfg->notify_rx || cfg->notify_rxv) || !cfg->notify_tx_done
+ || !cfg->notify_state
+ || ((cfg->options & GLINK_OPT_RX_INTENT_NOTIF)
+ && !cfg->notify_remote_rx_intent)) {
+ GLINK_ERR("%s: Incorrect notification parameters\n", __func__);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* find transport */
+ transport_ptr = find_open_transport(cfg->edge, cfg->transport,
+ cfg->options & GLINK_OPT_INITIAL_XPORT,
+ &best_id);
+ if (IS_ERR_OR_NULL(transport_ptr)) {
+ GLINK_ERR("%s:%s %s: Error %d - unable to find transport\n",
+ cfg->transport, cfg->edge, __func__,
+ (unsigned)PTR_ERR(transport_ptr));
+ return ERR_PTR(-ENODEV);
+ }
+
+ /*
+ * look for an existing port structure which can occur in
+ * reopen and remote-open-first cases
+ */
+ ctx = ch_name_to_ch_ctx_create(transport_ptr, cfg->name);
+ if (ctx == NULL) {
+ GLINK_ERR("%s:%s %s: Error - unable to allocate new channel\n",
+ cfg->transport, cfg->edge, __func__);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /* port already exists */
+ if (ctx->local_open_state != GLINK_CHANNEL_CLOSED) {
+ /* not ready to be re-opened */
+ GLINK_INFO_CH_XPRT(ctx, transport_ptr,
+ "%s: Channel not ready to be re-opened. State: %u\n",
+ __func__, ctx->local_open_state);
+ return ERR_PTR(-EBUSY);
+ }
+
+ /* initialize port structure */
+ ctx->user_priv = cfg->priv;
+ ctx->notify_rx = cfg->notify_rx;
+ ctx->notify_tx_done = cfg->notify_tx_done;
+ ctx->notify_state = cfg->notify_state;
+ ctx->notify_rx_intent_req = cfg->notify_rx_intent_req;
+ ctx->notify_rxv = cfg->notify_rxv;
+ ctx->notify_rx_sigs = cfg->notify_rx_sigs;
+ ctx->notify_rx_abort = cfg->notify_rx_abort;
+ ctx->notify_tx_abort = cfg->notify_tx_abort;
+ ctx->notify_rx_tracer_pkt = cfg->notify_rx_tracer_pkt;
+ ctx->notify_remote_rx_intent = cfg->notify_remote_rx_intent;
+
+ if (!ctx->notify_rx_intent_req)
+ ctx->notify_rx_intent_req = glink_dummy_notify_rx_intent_req;
+ if (!ctx->notify_rx_sigs)
+ ctx->notify_rx_sigs = glink_dummy_notify_rx_sigs;
+ if (!ctx->notify_rx_abort)
+ ctx->notify_rx_abort = glink_dummy_notify_rx_abort;
+ if (!ctx->notify_tx_abort)
+ ctx->notify_tx_abort = glink_dummy_notify_tx_abort;
+
+ ctx->local_xprt_req = best_id;
+ ctx->no_migrate = cfg->transport &&
+ !(cfg->options & GLINK_OPT_INITIAL_XPORT);
+ ctx->transport_ptr = transport_ptr;
+ ctx->local_open_state = GLINK_CHANNEL_OPENING;
+ GLINK_INFO_PERF_CH(ctx,
+ "%s: local:GLINK_CHANNEL_CLOSED->GLINK_CHANNEL_OPENING\n",
+ __func__);
+
+ /* start local-open sequence */
+ ret = ctx->transport_ptr->ops->tx_cmd_ch_open(ctx->transport_ptr->ops,
+ ctx->lcid, cfg->name, best_id);
+ if (ret) {
+ /* failure to send open command (transport failure) */
+ ctx->local_open_state = GLINK_CHANNEL_CLOSED;
+ GLINK_ERR_CH(ctx, "%s: Unable to send open command %d\n",
+ __func__, ret);
+ return ERR_PTR(ret);
+ }
+
+ GLINK_INFO_CH(ctx, "%s: Created channel, sent OPEN command. ctx %p\n",
+ __func__, ctx);
+
+ return ctx;
+}
+EXPORT_SYMBOL(glink_open);
+
+/**
+ * glink_get_channel_id_for_handle() - Get logical channel ID
+ *
+ * @handle: handle of channel
+ *
+ * Used internally by G-Link debugfs.
+ *
+ * Return: Logical Channel ID or standard Linux error code
+ */
+int glink_get_channel_id_for_handle(void *handle)
+{
+ struct channel_ctx *ctx = (struct channel_ctx *)handle;
+ if (ctx == NULL)
+ return -EINVAL;
+
+ return ctx->lcid;
+}
+EXPORT_SYMBOL(glink_get_channel_id_for_handle);
+
+/**
+ * glink_get_channel_name_for_handle() - return channel name
+ *
+ * @handle: handle of channel
+ *
+ * Used internally by G-Link debugfs.
+ *
+ * Return: Channel name or NULL
+ */
+char *glink_get_channel_name_for_handle(void *handle)
+{
+ struct channel_ctx *ctx = (struct channel_ctx *)handle;
+ if (ctx == NULL)
+ return NULL;
+
+ return ctx->name;
+}
+EXPORT_SYMBOL(glink_get_channel_name_for_handle);
+
+/**
+ * glink_delete_ch_from_list() - delete the channel from the list
+ * @ctx: Pointer to channel context.
+ * @add_flcid: Boolean value to decide whether the lcid should be added or not.
+ *
+ * This function deletes the channel from the list along with the debugfs
+ * information associated with it. It also adds the channel lcid to the free
+ * lcid list except if the channel is deleted in case of ssr/unregister case.
+ * It can only called when channel is fully closed.
+ */
+static void glink_delete_ch_from_list(struct channel_ctx *ctx, bool add_flcid)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&ctx->transport_ptr->xprt_ctx_lock_lhb1,
+ flags);
+ if (!list_empty(&ctx->port_list_node))
+ list_del_init(&ctx->port_list_node);
+ spin_unlock_irqrestore(
+ &ctx->transport_ptr->xprt_ctx_lock_lhb1,
+ flags);
+ if (add_flcid)
+ glink_add_free_lcid_list(ctx);
+ mutex_lock(&ctx->transport_ptr->xprt_dbgfs_lock_lhb3);
+ glink_debugfs_remove_channel(ctx, ctx->transport_ptr);
+ mutex_unlock(&ctx->transport_ptr->xprt_dbgfs_lock_lhb3);
+ rwref_put(&ctx->ch_state_lhc0);
+}
+
+/**
+ * glink_close() - Close a previously opened channel.
+ *
+ * @handle: handle to close
+ *
+ * Once the closing process has been completed, the GLINK_LOCAL_DISCONNECTED
+ * state event will be sent and the channel can be reopened.
+ *
+ * Return: 0 on success; -EINVAL for invalid handle, -EBUSY is close is
+ * already in progress, standard Linux Error code otherwise.
+ */
+int glink_close(void *handle)
+{
+ struct glink_core_xprt_ctx *xprt_ctx = NULL;
+ struct channel_ctx *ctx = (struct channel_ctx *)handle;
+ int ret;
+ unsigned long flags;
+
+ if (!ctx)
+ return -EINVAL;
+
+ GLINK_INFO_CH(ctx, "%s: Closing channel, ctx: %p\n", __func__, ctx);
+ if (ctx->local_open_state == GLINK_CHANNEL_CLOSED)
+ return 0;
+
+ if (ctx->local_open_state == GLINK_CHANNEL_CLOSING) {
+ /* close already pending */
+ return -EBUSY;
+ }
+
+ spin_lock_irqsave(&ctx->transport_ptr->tx_ready_lock_lhb2, flags);
+ if (!list_empty(&ctx->tx_ready_list_node))
+ list_del_init(&ctx->tx_ready_list_node);
+ spin_unlock_irqrestore(&ctx->transport_ptr->tx_ready_lock_lhb2, flags);
+
+ GLINK_INFO_PERF_CH(ctx,
+ "%s: local:%u->GLINK_CHANNEL_CLOSING\n",
+ __func__, ctx->local_open_state);
+ ctx->local_open_state = GLINK_CHANNEL_CLOSING;
+
+ ctx->pending_delete = true;
+ complete_all(&ctx->int_req_complete);
+
+ if (ctx->transport_ptr->local_state != GLINK_XPRT_DOWN) {
+ glink_qos_reset_priority(ctx);
+ ret = ctx->transport_ptr->ops->tx_cmd_ch_close(
+ ctx->transport_ptr->ops,
+ ctx->lcid);
+ } else if (!strcmp(ctx->transport_ptr->name, "dummy")) {
+ /*
+ * This check will avoid any race condition when clients call
+ * glink_close before the dummy xprt swapping happens in link
+ * down scenario.
+ */
+ ret = 0;
+ xprt_ctx = ctx->transport_ptr;
+ rwref_write_get(&xprt_ctx->xprt_state_lhb0);
+ glink_core_ch_close_ack_common(ctx);
+ if (ch_is_fully_closed(ctx)) {
+ glink_delete_ch_from_list(ctx, false);
+ rwref_put(&xprt_ctx->xprt_state_lhb0);
+ if (list_empty(&xprt_ctx->channels))
+ /* For the xprt reference */
+ rwref_put(&xprt_ctx->xprt_state_lhb0);
+ } else {
+ GLINK_ERR_CH(ctx,
+ "channel Not closed yet local state [%d] remote_state [%d]\n",
+ ctx->local_open_state, ctx->remote_opened);
+ }
+ rwref_write_put(&xprt_ctx->xprt_state_lhb0);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(glink_close);
+
+/**
+ * glink_tx_pkt_release() - Release a packet's transmit information
+ * @tx_pkt_ref: Packet information which needs to be released.
+ *
+ * This function is called when all the references to a packet information
+ * is dropped.
+ */
+static void glink_tx_pkt_release(struct rwref_lock *tx_pkt_ref)
+{
+ struct glink_core_tx_pkt *tx_info = container_of(tx_pkt_ref,
+ struct glink_core_tx_pkt,
+ pkt_ref);
+ if (!list_empty(&tx_info->list_done))
+ list_del_init(&tx_info->list_done);
+ if (!list_empty(&tx_info->list_node))
+ list_del_init(&tx_info->list_node);
+ kfree(tx_info);
+}
+
+/**
+ * glink_tx_common() - Common TX implementation
+ *
+ * @handle: handle returned by glink_open()
+ * @pkt_priv: opaque data value that will be returned to client with
+ * notify_tx_done notification
+ * @data: pointer to the data
+ * @size: size of data
+ * @vbuf_provider: Virtual Address-space Buffer Provider for the tx buffer.
+ * @vbuf_provider: Physical Address-space Buffer Provider for the tx buffer.
+ * @tx_flags: Flags to indicate transmit options
+ *
+ * Return: -EINVAL for invalid handle; -EBUSY if channel isn't ready for
+ * transmit operation (not fully opened); -EAGAIN if remote side
+ * has not provided a receive intent that is big enough.
+ */
+static int glink_tx_common(void *handle, void *pkt_priv,
+ void *data, void *iovec, size_t size,
+ void * (*vbuf_provider)(void *iovec, size_t offset, size_t *size),
+ void * (*pbuf_provider)(void *iovec, size_t offset, size_t *size),
+ uint32_t tx_flags)
+{
+ struct channel_ctx *ctx = (struct channel_ctx *)handle;
+ uint32_t riid;
+ int ret = 0;
+ struct glink_core_tx_pkt *tx_info;
+ size_t intent_size;
+ bool is_atomic =
+ tx_flags & (GLINK_TX_SINGLE_THREADED | GLINK_TX_ATOMIC);
+ enum local_channel_state_e ch_st;
+ unsigned long flags;
+
+ if (!size)
+ return -EINVAL;
+
+ if (!ctx)
+ return -EINVAL;
+
+ rwref_get(&ctx->ch_state_lhc0);
+ if (!(vbuf_provider || pbuf_provider)) {
+ rwref_put(&ctx->ch_state_lhc0);
+ return -EINVAL;
+ }
+
+ if (!ch_is_fully_opened(ctx)) {
+ rwref_put(&ctx->ch_state_lhc0);
+ return -EBUSY;
+ }
+
+ if (size > GLINK_MAX_PKT_SIZE) {
+ rwref_put(&ctx->ch_state_lhc0);
+ return -EINVAL;
+ }
+
+ if (unlikely(tx_flags & GLINK_TX_TRACER_PKT)) {
+ if (!(ctx->transport_ptr->capabilities & GCAP_TRACER_PKT)) {
+ rwref_put(&ctx->ch_state_lhc0);
+ return -EOPNOTSUPP;
+ }
+ tracer_pkt_log_event(data, GLINK_CORE_TX);
+ }
+
+ /* find matching rx intent (first-fit algorithm for now) */
+ if (ch_pop_remote_rx_intent(ctx, size, &riid, &intent_size)) {
+ if (!(tx_flags & GLINK_TX_REQ_INTENT)) {
+ /* no rx intent available */
+ GLINK_ERR_CH(ctx,
+ "%s: R[%u]:%zu Intent not present for lcid\n",
+ __func__, riid, size);
+ rwref_put(&ctx->ch_state_lhc0);
+ return -EAGAIN;
+ }
+ if (is_atomic && !(ctx->transport_ptr->capabilities &
+ GCAP_AUTO_QUEUE_RX_INT)) {
+ GLINK_ERR_CH(ctx,
+ "%s: Cannot request intent in atomic context\n",
+ __func__);
+ rwref_put(&ctx->ch_state_lhc0);
+ return -EINVAL;
+ }
+
+ /* request intent of correct size */
+ reinit_completion(&ctx->int_req_ack_complete);
+ ret = ctx->transport_ptr->ops->tx_cmd_rx_intent_req(
+ ctx->transport_ptr->ops, ctx->lcid, size);
+ if (ret) {
+ GLINK_ERR_CH(ctx, "%s: Request intent failed %d\n",
+ __func__, ret);
+ rwref_put(&ctx->ch_state_lhc0);
+ return ret;
+ }
+
+ while (ch_pop_remote_rx_intent(ctx, size, &riid,
+ &intent_size)) {
+ if (is_atomic) {
+ GLINK_ERR_CH(ctx,
+ "%s Intent of size %zu not ready\n",
+ __func__, size);
+ rwref_put(&ctx->ch_state_lhc0);
+ return -EAGAIN;
+ }
+
+ /* wait for the remote intent req ack */
+ wait_for_completion(&ctx->int_req_ack_complete);
+ if (!ctx->int_req_ack) {
+ GLINK_ERR_CH(ctx,
+ "%s: Intent Request with size: %zu %s",
+ __func__, size,
+ "not granted for lcid\n");
+ rwref_put(&ctx->ch_state_lhc0);
+ return -EAGAIN;
+ }
+
+ /* wait for the rx_intent from remote side */
+ wait_for_completion(&ctx->int_req_complete);
+ reinit_completion(&ctx->int_req_complete);
+ ch_st = ctx->local_open_state;
+ if (ch_st == GLINK_CHANNEL_CLOSING ||
+ ch_st == GLINK_CHANNEL_CLOSED) {
+ GLINK_ERR_CH(ctx,
+ "%s: Channel closed while waiting for intent\n",
+ __func__);
+ rwref_put(&ctx->ch_state_lhc0);
+ return -EBUSY;
+ }
+ }
+ }
+
+ if (!is_atomic) {
+ spin_lock_irqsave(&ctx->transport_ptr->tx_ready_lock_lhb2,
+ flags);
+ glink_pm_qos_vote(ctx->transport_ptr);
+ spin_unlock_irqrestore(&ctx->transport_ptr->tx_ready_lock_lhb2,
+ flags);
+ }
+
+ GLINK_INFO_PERF_CH(ctx, "%s: R[%u]:%zu data[%p], size[%zu]. TID %u\n",
+ __func__, riid, intent_size,
+ data ? data : iovec, size, current->pid);
+ tx_info = kzalloc(sizeof(struct glink_core_tx_pkt),
+ is_atomic ? GFP_ATOMIC : GFP_KERNEL);
+ if (!tx_info) {
+ GLINK_ERR_CH(ctx, "%s: No memory for allocation\n", __func__);
+ ch_push_remote_rx_intent(ctx, intent_size, riid);
+ rwref_put(&ctx->ch_state_lhc0);
+ return -ENOMEM;
+ }
+ rwref_lock_init(&tx_info->pkt_ref, glink_tx_pkt_release);
+ INIT_LIST_HEAD(&tx_info->list_done);
+ INIT_LIST_HEAD(&tx_info->list_node);
+ tx_info->pkt_priv = pkt_priv;
+ tx_info->data = data;
+ tx_info->riid = riid;
+ tx_info->rcid = ctx->rcid;
+ tx_info->size = size;
+ tx_info->size_remaining = size;
+ tx_info->tracer_pkt = tx_flags & GLINK_TX_TRACER_PKT ? true : false;
+ tx_info->iovec = iovec ? iovec : (void *)tx_info;
+ tx_info->vprovider = vbuf_provider;
+ tx_info->pprovider = pbuf_provider;
+ tx_info->intent_size = intent_size;
+
+ /* schedule packet for transmit */
+ if ((tx_flags & GLINK_TX_SINGLE_THREADED) &&
+ (ctx->transport_ptr->capabilities & GCAP_INTENTLESS))
+ ret = xprt_single_threaded_tx(ctx->transport_ptr,
+ ctx, tx_info);
+ else
+ xprt_schedule_tx(ctx->transport_ptr, ctx, tx_info);
+
+ rwref_put(&ctx->ch_state_lhc0);
+ return ret;
+}
+
+/**
+ * glink_tx() - Transmit packet.
+ *
+ * @handle: handle returned by glink_open()
+ * @pkt_priv: opaque data value that will be returned to client with
+ * notify_tx_done notification
+ * @data: pointer to the data
+ * @size: size of data
+ * @tx_flags: Flags to specify transmit specific options
+ *
+ * Return: -EINVAL for invalid handle; -EBUSY if channel isn't ready for
+ * transmit operation (not fully opened); -EAGAIN if remote side
+ * has not provided a receive intent that is big enough.
+ */
+int glink_tx(void *handle, void *pkt_priv, void *data, size_t size,
+ uint32_t tx_flags)
+{
+ return glink_tx_common(handle, pkt_priv, data, NULL, size,
+ tx_linear_vbuf_provider, NULL, tx_flags);
+}
+EXPORT_SYMBOL(glink_tx);
+
+/**
+ * glink_queue_rx_intent() - Register an intent to receive data.
+ *
+ * @handle: handle returned by glink_open()
+ * @pkt_priv: opaque data type that is returned when a packet is received
+ * size: maximum size of data to receive
+ *
+ * Return: 0 for success; standard Linux error code for failure case
+ */
+int glink_queue_rx_intent(void *handle, const void *pkt_priv, size_t size)
+{
+ struct channel_ctx *ctx = (struct channel_ctx *)handle;
+ struct glink_core_rx_intent *intent_ptr;
+ int ret = 0;
+
+ if (!ctx)
+ return -EINVAL;
+
+ if (!ch_is_fully_opened(ctx)) {
+ /* Can only queue rx intents if channel is fully opened */
+ GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
+ __func__);
+ return -EBUSY;
+ }
+
+ intent_ptr = ch_push_local_rx_intent(ctx, pkt_priv, size);
+ if (!intent_ptr) {
+ GLINK_ERR_CH(ctx,
+ "%s: Intent pointer allocation failed size[%zu]\n",
+ __func__, size);
+ return -ENOMEM;
+ }
+ GLINK_DBG_CH(ctx, "%s: L[%u]:%zu\n", __func__, intent_ptr->id,
+ intent_ptr->intent_size);
+
+ if (ctx->transport_ptr->capabilities & GCAP_INTENTLESS)
+ return ret;
+
+ /* notify remote side of rx intent */
+ ret = ctx->transport_ptr->ops->tx_cmd_local_rx_intent(
+ ctx->transport_ptr->ops, ctx->lcid, size, intent_ptr->id);
+ if (ret)
+ /* unable to transmit, dequeue intent */
+ ch_remove_local_rx_intent(ctx, intent_ptr->id);
+
+ return ret;
+}
+EXPORT_SYMBOL(glink_queue_rx_intent);
+
+/**
+ * glink_rx_intent_exists() - Check if an intent exists.
+ *
+ * @handle: handle returned by glink_open()
+ * @size: size of an intent to check or 0 for any intent
+ *
+ * Return: TRUE if an intent exists with greater than or equal to the size
+ * else FALSE
+ */
+bool glink_rx_intent_exists(void *handle, size_t size)
+{
+ struct channel_ctx *ctx = (struct channel_ctx *)handle;
+ struct glink_core_rx_intent *intent;
+ unsigned long flags;
+
+ if (!ctx || !ch_is_fully_opened(ctx))
+ return false;
+
+ spin_lock_irqsave(&ctx->local_rx_intent_lst_lock_lhc1, flags);
+ list_for_each_entry(intent, &ctx->local_rx_intent_list, list) {
+ if (size <= intent->intent_size) {
+ spin_unlock_irqrestore(
+ &ctx->local_rx_intent_lst_lock_lhc1, flags);
+ return true;
+ }
+ }
+ spin_unlock_irqrestore(&ctx->local_rx_intent_lst_lock_lhc1, flags);
+
+ return false;
+}
+EXPORT_SYMBOL(glink_rx_intent_exists);
+
+/**
+ * glink_rx_done() - Return receive buffer to remote side.
+ *
+ * @handle: handle returned by glink_open()
+ * @ptr: data pointer provided in the notify_rx() call
+ * @reuse: if true, receive intent is re-used
+ *
+ * Return: 0 for success; standard Linux error code for failure case
+ */
+int glink_rx_done(void *handle, const void *ptr, bool reuse)
+{
+ struct channel_ctx *ctx = (struct channel_ctx *)handle;
+ struct glink_core_rx_intent *liid_ptr;
+ uint32_t id;
+ int ret = 0;
+
+ liid_ptr = ch_get_local_rx_intent_notified(ctx, ptr);
+
+ if (IS_ERR_OR_NULL(liid_ptr)) {
+ /* invalid pointer */
+ GLINK_ERR_CH(ctx, "%s: Invalid pointer %p\n", __func__, ptr);
+ return -EINVAL;
+ }
+
+ GLINK_INFO_PERF_CH(ctx, "%s: L[%u]: data[%p]. TID %u\n",
+ __func__, liid_ptr->id, ptr, current->pid);
+ id = liid_ptr->id;
+ if (reuse) {
+ ret = ctx->transport_ptr->ops->reuse_rx_intent(
+ ctx->transport_ptr->ops, liid_ptr);
+ if (ret) {
+ GLINK_ERR_CH(ctx, "%s: Intent reuse err %d for %p\n",
+ __func__, ret, ptr);
+ ret = -ENOBUFS;
+ reuse = false;
+ ctx->transport_ptr->ops->deallocate_rx_intent(
+ ctx->transport_ptr->ops, liid_ptr);
+ }
+ } else {
+ ctx->transport_ptr->ops->deallocate_rx_intent(
+ ctx->transport_ptr->ops, liid_ptr);
+ }
+ ch_remove_local_rx_intent_notified(ctx, liid_ptr, reuse);
+ /* send rx done */
+ ctx->transport_ptr->ops->tx_cmd_local_rx_done(ctx->transport_ptr->ops,
+ ctx->lcid, id, reuse);
+
+ return ret;
+}
+EXPORT_SYMBOL(glink_rx_done);
+
+/**
+ * glink_txv() - Transmit a packet in vector form.
+ *
+ * @handle: handle returned by glink_open()
+ * @pkt_priv: opaque data value that will be returned to client with
+ * notify_tx_done notification
+ * @iovec: pointer to the vector (must remain valid until notify_tx_done
+ * notification)
+ * @size: size of data/vector
+ * @vbuf_provider: Client provided helper function to iterate the vector
+ * in physical address space
+ * @pbuf_provider: Client provided helper function to iterate the vector
+ * in virtual address space
+ * @tx_flags: Flags to specify transmit specific options
+ *
+ * Return: -EINVAL for invalid handle; -EBUSY if channel isn't ready for
+ * transmit operation (not fully opened); -EAGAIN if remote side has
+ * not provided a receive intent that is big enough.
+ */
+int glink_txv(void *handle, void *pkt_priv,
+ void *iovec, size_t size,
+ void * (*vbuf_provider)(void *iovec, size_t offset, size_t *size),
+ void * (*pbuf_provider)(void *iovec, size_t offset, size_t *size),
+ uint32_t tx_flags)
+{
+ return glink_tx_common(handle, pkt_priv, NULL, iovec, size,
+ vbuf_provider, pbuf_provider, tx_flags);
+}
+EXPORT_SYMBOL(glink_txv);
+
+/**
+ * glink_sigs_set() - Set the local signals for the GLINK channel
+ *
+ * @handle: handle returned by glink_open()
+ * @sigs: modified signal value
+ *
+ * Return: 0 for success; standard Linux error code for failure case
+ */
+int glink_sigs_set(void *handle, uint32_t sigs)
+{
+ struct channel_ctx *ctx = (struct channel_ctx *)handle;
+ int ret;
+
+ if (!ctx)
+ return -EINVAL;
+
+ if (!ch_is_fully_opened(ctx)) {
+ GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
+ __func__);
+ return -EBUSY;
+ }
+
+ ctx->lsigs = sigs;
+
+ ret = ctx->transport_ptr->ops->tx_cmd_set_sigs(ctx->transport_ptr->ops,
+ ctx->lcid, ctx->lsigs);
+ GLINK_INFO_CH(ctx, "%s: Sent SIGNAL SET command\n", __func__);
+
+ return ret;
+}
+EXPORT_SYMBOL(glink_sigs_set);
+
+/**
+ * glink_sigs_local_get() - Get the local signals for the GLINK channel
+ *
+ * handle: handle returned by glink_open()
+ * sigs: Pointer to hold the signals
+ *
+ * Return: 0 for success; standard Linux error code for failure case
+ */
+int glink_sigs_local_get(void *handle, uint32_t *sigs)
+{
+ struct channel_ctx *ctx = (struct channel_ctx *)handle;
+
+ if (!ctx || !sigs)
+ return -EINVAL;
+
+ if (!ch_is_fully_opened(ctx)) {
+ GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
+ __func__);
+ return -EBUSY;
+ }
+
+ *sigs = ctx->lsigs;
+ return 0;
+}
+EXPORT_SYMBOL(glink_sigs_local_get);
+
+/**
+ * glink_sigs_remote_get() - Get the Remote signals for the GLINK channel
+ *
+ * handle: handle returned by glink_open()
+ * sigs: Pointer to hold the signals
+ *
+ * Return: 0 for success; standard Linux error code for failure case
+ */
+int glink_sigs_remote_get(void *handle, uint32_t *sigs)
+{
+ struct channel_ctx *ctx = (struct channel_ctx *)handle;
+
+ if (!ctx || !sigs)
+ return -EINVAL;
+
+ if (!ch_is_fully_opened(ctx)) {
+ GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
+ __func__);
+ return -EBUSY;
+ }
+
+ *sigs = ctx->rsigs;
+ return 0;
+}
+EXPORT_SYMBOL(glink_sigs_remote_get);
+
+/**
+ * glink_register_link_state_cb() - Register for link state notification
+ * @link_info: Data structure containing the link identification and callback.
+ * @priv: Private information to be passed with the callback.
+ *
+ * This function is used to register a notifier to receive the updates about a
+ * link's/transport's state. This notifier needs to be registered first before
+ * an attempt to open a channel.
+ *
+ * Return: a reference to the notifier handle.
+ */
+void *glink_register_link_state_cb(struct glink_link_info *link_info,
+ void *priv)
+{
+ struct link_state_notifier_info *notif_info;
+
+ if (!link_info || !link_info->glink_link_state_notif_cb)
+ return ERR_PTR(-EINVAL);
+
+ notif_info = kzalloc(sizeof(*notif_info), GFP_KERNEL);
+ if (!notif_info) {
+ GLINK_ERR("%s: Error allocating link state notifier info\n",
+ __func__);
+ return ERR_PTR(-ENOMEM);
+ }
+ if (link_info->transport)
+ strlcpy(notif_info->transport, link_info->transport,
+ GLINK_NAME_SIZE);
+
+ if (link_info->edge)
+ strlcpy(notif_info->edge, link_info->edge, GLINK_NAME_SIZE);
+ notif_info->priv = priv;
+ notif_info->glink_link_state_notif_cb =
+ link_info->glink_link_state_notif_cb;
+
+ mutex_lock(&link_state_notifier_lock_lha1);
+ list_add_tail(&notif_info->list, &link_state_notifier_list);
+ mutex_unlock(&link_state_notifier_lock_lha1);
+
+ notif_if_up_all_xprts(notif_info);
+ return notif_info;
+}
+EXPORT_SYMBOL(glink_register_link_state_cb);
+
+/**
+ * glink_unregister_link_state_cb() - Unregister the link state notification
+ * notif_handle: Handle to be unregistered.
+ *
+ * This function is used to unregister a notifier to stop receiving the updates
+ * about a link's/ transport's state.
+ */
+void glink_unregister_link_state_cb(void *notif_handle)
+{
+ struct link_state_notifier_info *notif_info, *tmp_notif_info;
+
+ if (IS_ERR_OR_NULL(notif_handle))
+ return;
+
+ mutex_lock(&link_state_notifier_lock_lha1);
+ list_for_each_entry_safe(notif_info, tmp_notif_info,
+ &link_state_notifier_list, list) {
+ if (notif_info == notif_handle) {
+ list_del(&notif_info->list);
+ mutex_unlock(&link_state_notifier_lock_lha1);
+ kfree(notif_info);
+ return;
+ }
+ }
+ mutex_unlock(&link_state_notifier_lock_lha1);
+ return;
+}
+EXPORT_SYMBOL(glink_unregister_link_state_cb);
+
+/**
+ * glink_qos_latency() - Register the latency QoS requirement
+ * @handle: Channel handle in which the latency is required.
+ * @latency_us: Latency requirement in units of micro-seconds.
+ * @pkt_size: Worst case packet size for which the latency is required.
+ *
+ * This function is used to register the latency requirement for a channel
+ * and ensures that the latency requirement for this channel is met without
+ * impacting the existing latency requirements of other channels.
+ *
+ * Return: 0 if QoS request is achievable, standard Linux error codes on error
+ */
+int glink_qos_latency(void *handle, unsigned long latency_us, size_t pkt_size)
+{
+ struct channel_ctx *ctx = (struct channel_ctx *)handle;
+ int ret;
+ unsigned long req_rate_kBps;
+
+ if (!ctx || !latency_us || !pkt_size)
+ return -EINVAL;
+
+ if (!ch_is_fully_opened(ctx)) {
+ GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
+ __func__);
+ return -EBUSY;
+ }
+
+ req_rate_kBps = glink_qos_calc_rate_kBps(pkt_size, latency_us);
+
+ ret = glink_qos_assign_priority(ctx, req_rate_kBps);
+ if (ret < 0)
+ GLINK_ERR_CH(ctx, "%s: QoS %lu:%zu cannot be met\n",
+ __func__, latency_us, pkt_size);
+
+ return ret;
+}
+EXPORT_SYMBOL(glink_qos_latency);
+
+/**
+ * glink_qos_cancel() - Cancel or unregister the QoS request
+ * @handle: Channel handle for which the QoS request is cancelled.
+ *
+ * This function is used to cancel/unregister the QoS requests for a channel.
+ *
+ * Return: 0 on success, standard Linux error codes on failure
+ */
+int glink_qos_cancel(void *handle)
+{
+ struct channel_ctx *ctx = (struct channel_ctx *)handle;
+ int ret;
+
+ if (!ctx)
+ return -EINVAL;
+
+ if (!ch_is_fully_opened(ctx)) {
+ GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
+ __func__);
+ return -EBUSY;
+ }
+
+ ret = glink_qos_reset_priority(ctx);
+ return ret;
+}
+EXPORT_SYMBOL(glink_qos_cancel);
+
+/**
+ * glink_qos_start() - Start of the transmission requiring QoS
+ * @handle: Channel handle in which the transmit activity is performed.
+ *
+ * This function is called by the clients to indicate G-Link regarding the
+ * start of the transmission which requires a certain QoS. The clients
+ * must account for the QoS ramp time to ensure meeting the QoS.
+ *
+ * Return: 0 on success, standard Linux error codes on failure
+ */
+int glink_qos_start(void *handle)
+{
+ struct channel_ctx *ctx = (struct channel_ctx *)handle;
+ int ret;
+ unsigned long flags;
+
+ if (!ctx)
+ return -EINVAL;
+
+ if (!ch_is_fully_opened(ctx)) {
+ GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
+ __func__);
+ return -EBUSY;
+ }
+
+ spin_lock_irqsave(&ctx->transport_ptr->tx_ready_lock_lhb2, flags);
+ spin_lock(&ctx->tx_lists_lock_lhc3);
+ ret = glink_qos_add_ch_tx_intent(ctx);
+ spin_unlock(&ctx->tx_lists_lock_lhc3);
+ spin_unlock_irqrestore(&ctx->transport_ptr->tx_ready_lock_lhb2, flags);
+ return ret;
+}
+EXPORT_SYMBOL(glink_qos_start);
+
+/**
+ * glink_qos_get_ramp_time() - Get the QoS ramp time
+ * @handle: Channel handle for which the QoS ramp time is required.
+ * @pkt_size: Worst case packet size.
+ *
+ * This function is called by the clients to obtain the ramp time required
+ * to meet the QoS requirements.
+ *
+ * Return: QoS ramp time is returned in units of micro-seconds on success,
+ * standard Linux error codes cast to unsigned long on error.
+ */
+unsigned long glink_qos_get_ramp_time(void *handle, size_t pkt_size)
+{
+ struct channel_ctx *ctx = (struct channel_ctx *)handle;
+
+ if (!ctx)
+ return (unsigned long)-EINVAL;
+
+ if (!ch_is_fully_opened(ctx)) {
+ GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
+ __func__);
+ return (unsigned long)-EBUSY;
+ }
+
+ return ctx->transport_ptr->ops->get_power_vote_ramp_time(
+ ctx->transport_ptr->ops,
+ glink_prio_to_power_state(ctx->transport_ptr,
+ ctx->initial_priority));
+}
+EXPORT_SYMBOL(glink_qos_get_ramp_time);
+
+/**
+ * glink_rpm_rx_poll() - Poll and receive any available events
+ * @handle: Channel handle in which this operation is performed.
+ *
+ * This function is used to poll and receive events and packets while the
+ * receive interrupt from RPM is disabled.
+ *
+ * Note that even if a return value > 0 is returned indicating that some events
+ * were processed, clients should only use the notification functions passed
+ * into glink_open() to determine if an entire packet has been received since
+ * some events may be internal details that are not visible to clients.
+ *
+ * Return: 0 for no packets available; > 0 for events available; standard
+ * Linux error codes on failure.
+ */
+int glink_rpm_rx_poll(void *handle)
+{
+ struct channel_ctx *ctx = (struct channel_ctx *)handle;
+
+ if (!ctx)
+ return -EINVAL;
+
+ if (!ch_is_fully_opened(ctx))
+ return -EBUSY;
+
+ if (!ctx->transport_ptr ||
+ !(ctx->transport_ptr->capabilities & GCAP_INTENTLESS))
+ return -EOPNOTSUPP;
+
+ return ctx->transport_ptr->ops->poll(ctx->transport_ptr->ops,
+ ctx->lcid);
+}
+EXPORT_SYMBOL(glink_rpm_rx_poll);
+
+/**
+ * glink_rpm_mask_rx_interrupt() - Mask or unmask the RPM receive interrupt
+ * @handle: Channel handle in which this operation is performed.
+ * @mask: Flag to mask or unmask the interrupt.
+ * @pstruct: Pointer to any platform specific data.
+ *
+ * This function is used to mask or unmask the receive interrupt from RPM.
+ * "mask" set to true indicates masking the interrupt and when set to false
+ * indicates unmasking the interrupt.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+int glink_rpm_mask_rx_interrupt(void *handle, bool mask, void *pstruct)
+{
+ struct channel_ctx *ctx = (struct channel_ctx *)handle;
+
+ if (!ctx)
+ return -EINVAL;
+
+ if (!ch_is_fully_opened(ctx))
+ return -EBUSY;
+
+ if (!ctx->transport_ptr ||
+ !(ctx->transport_ptr->capabilities & GCAP_INTENTLESS))
+ return -EOPNOTSUPP;
+
+ return ctx->transport_ptr->ops->mask_rx_irq(ctx->transport_ptr->ops,
+ ctx->lcid, mask, pstruct);
+
+}
+EXPORT_SYMBOL(glink_rpm_mask_rx_interrupt);
+
+/**
+ * glink_wait_link_down() - Get status of link
+ * @handle: Channel handle in which this operation is performed
+ *
+ * This function will query the transport for its status, to allow clients to
+ * proceed in cleanup operations.
+ */
+int glink_wait_link_down(void *handle)
+{
+ struct channel_ctx *ctx = (struct channel_ctx *)handle;
+
+ if (!ctx)
+ return -EINVAL;
+ if (!ctx->transport_ptr)
+ return -EOPNOTSUPP;
+
+ return ctx->transport_ptr->ops->wait_link_down(ctx->transport_ptr->ops);
+}
+EXPORT_SYMBOL(glink_wait_link_down);
+
+/**
+ * glink_xprt_ctx_release - Free the transport context
+ * @ch_st_lock: handle to the rwref_lock associated with the transport
+ *
+ * This should only be called when the reference count associated with the
+ * transport goes to zero.
+ */
+void glink_xprt_ctx_release(struct rwref_lock *xprt_st_lock)
+{
+ struct glink_dbgfs xprt_rm_dbgfs;
+ struct glink_core_xprt_ctx *xprt_ctx = container_of(xprt_st_lock,
+ struct glink_core_xprt_ctx, xprt_state_lhb0);
+ GLINK_INFO("%s: freeing transport [%s->%s]context\n", __func__,
+ xprt_ctx->name,
+ xprt_ctx->edge);
+ xprt_rm_dbgfs.curr_name = xprt_ctx->name;
+ xprt_rm_dbgfs.par_name = "xprt";
+ glink_debugfs_remove_recur(&xprt_rm_dbgfs);
+ GLINK_INFO("%s: xprt debugfs removec\n", __func__);
+ destroy_workqueue(xprt_ctx->tx_wq);
+ glink_core_deinit_xprt_qos_cfg(xprt_ctx);
+ kfree(xprt_ctx);
+ xprt_ctx = NULL;
+}
+
+/**
+ * glink_dummy_xprt_ctx_release - free the dummy transport context
+ * @xprt_st_lock: Handle to the rwref_lock associated with the transport.
+ *
+ * The release function is called when all the channels on this dummy
+ * transport are closed and the reference count goes to zero.
+ */
+static void glink_dummy_xprt_ctx_release(struct rwref_lock *xprt_st_lock)
+{
+ struct glink_core_xprt_ctx *xprt_ctx = container_of(xprt_st_lock,
+ struct glink_core_xprt_ctx, xprt_state_lhb0);
+ GLINK_INFO("%s: freeing transport [%s->%s]context\n", __func__,
+ xprt_ctx->name,
+ xprt_ctx->edge);
+ kfree(xprt_ctx);
+}
+
+/**
+ * glink_xprt_name_to_id() - convert transport name to id
+ * @name: Name of the transport.
+ * @id: Assigned id.
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+int glink_xprt_name_to_id(const char *name, uint16_t *id)
+{
+ if (!strcmp(name, "smem")) {
+ *id = SMEM_XPRT_ID;
+ return 0;
+ }
+ if (!strcmp(name, "mailbox")) {
+ *id = SMEM_XPRT_ID;
+ return 0;
+ }
+ if (!strcmp(name, "smd_trans")) {
+ *id = SMD_TRANS_XPRT_ID;
+ return 0;
+ }
+ if (!strcmp(name, "lloop")) {
+ *id = LLOOP_XPRT_ID;
+ return 0;
+ }
+ if (!strcmp(name, "mock")) {
+ *id = MOCK_XPRT_ID;
+ return 0;
+ }
+ if (!strcmp(name, "mock_low")) {
+ *id = MOCK_XPRT_LOW_ID;
+ return 0;
+ }
+ if (!strcmp(name, "mock_high")) {
+ *id = MOCK_XPRT_HIGH_ID;
+ return 0;
+ }
+ return -ENODEV;
+}
+EXPORT_SYMBOL(glink_xprt_name_to_id);
+
+/**
+ * of_get_glink_core_qos_cfg() - Parse the qos related dt entries
+ * @phandle: The handle to the qos related node in DT.
+ * @cfg: The transport configuration to be filled.
+ *
+ * Return: 0 on Success, standard Linux error otherwise.
+ */
+int of_get_glink_core_qos_cfg(struct device_node *phandle,
+ struct glink_core_transport_cfg *cfg)
+{
+ int rc, i;
+ char *key;
+ uint32_t num_flows;
+ uint32_t *arr32;
+
+ if (!phandle) {
+ GLINK_ERR("%s: phandle is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ key = "qcom,mtu-size";
+ rc = of_property_read_u32(phandle, key, (uint32_t *)&cfg->mtu);
+ if (rc) {
+ GLINK_ERR("%s: missing key %s\n", __func__, key);
+ return -ENODEV;
+ }
+
+ key = "qcom,tput-stats-cycle";
+ rc = of_property_read_u32(phandle, key, &cfg->token_count);
+ if (rc) {
+ GLINK_ERR("%s: missing key %s\n", __func__, key);
+ rc = -ENODEV;
+ goto error;
+ }
+
+ key = "qcom,flow-info";
+ if (!of_find_property(phandle, key, &num_flows)) {
+ GLINK_ERR("%s: missing key %s\n", __func__, key);
+ rc = -ENODEV;
+ goto error;
+ }
+
+ num_flows /= sizeof(uint32_t);
+ if (num_flows % 2) {
+ GLINK_ERR("%s: Invalid flow info length\n", __func__);
+ rc = -EINVAL;
+ goto error;
+ }
+
+ num_flows /= 2;
+ cfg->num_flows = num_flows;
+
+ cfg->flow_info = kmalloc_array(num_flows, sizeof(*(cfg->flow_info)),
+ GFP_KERNEL);
+ if (!cfg->flow_info) {
+ GLINK_ERR("%s: Memory allocation for flow info failed\n",
+ __func__);
+ rc = -ENOMEM;
+ goto error;
+ }
+ arr32 = kmalloc_array(num_flows * 2, sizeof(uint32_t), GFP_KERNEL);
+ if (!arr32) {
+ GLINK_ERR("%s: Memory allocation for temporary array failed\n",
+ __func__);
+ rc = -ENOMEM;
+ goto temp_mem_alloc_fail;
+ }
+
+ of_property_read_u32_array(phandle, key, arr32, num_flows * 2);
+
+ for (i = 0; i < num_flows; i++) {
+ cfg->flow_info[i].mtu_tx_time_us = arr32[2 * i];
+ cfg->flow_info[i].power_state = arr32[2 * i + 1];
+ }
+
+ kfree(arr32);
+ of_node_put(phandle);
+ return 0;
+
+temp_mem_alloc_fail:
+ kfree(cfg->flow_info);
+error:
+ cfg->mtu = 0;
+ cfg->token_count = 0;
+ cfg->num_flows = 0;
+ cfg->flow_info = NULL;
+ return rc;
+}
+EXPORT_SYMBOL(of_get_glink_core_qos_cfg);
+
+/**
+ * glink_core_init_xprt_qos_cfg() - Initialize a transport's QoS configuration
+ * @xprt_ptr: Transport to be initialized with QoS configuration.
+ * @cfg: Data structure containing QoS configuration.
+ *
+ * This function is used during the transport registration to initialize it
+ * with QoS configuration.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+static int glink_core_init_xprt_qos_cfg(struct glink_core_xprt_ctx *xprt_ptr,
+ struct glink_core_transport_cfg *cfg)
+{
+ int i;
+
+ xprt_ptr->mtu = cfg->mtu ? cfg->mtu : GLINK_QOS_DEF_MTU;
+ xprt_ptr->num_priority = cfg->num_flows ? cfg->num_flows :
+ GLINK_QOS_DEF_NUM_PRIORITY;
+ xprt_ptr->token_count = cfg->token_count ? cfg->token_count :
+ GLINK_QOS_DEF_NUM_TOKENS;
+
+ xprt_ptr->prio_bin = kzalloc(xprt_ptr->num_priority *
+ sizeof(struct glink_qos_priority_bin),
+ GFP_KERNEL);
+ if (!xprt_ptr->prio_bin) {
+ GLINK_ERR("%s: unable to allocate priority bins\n", __func__);
+ return -ENOMEM;
+ }
+ for (i = 1; i < xprt_ptr->num_priority; i++) {
+ xprt_ptr->prio_bin[i].max_rate_kBps =
+ glink_qos_calc_rate_kBps(xprt_ptr->mtu,
+ cfg->flow_info[i].mtu_tx_time_us);
+ xprt_ptr->prio_bin[i].power_state =
+ cfg->flow_info[i].power_state;
+ INIT_LIST_HEAD(&xprt_ptr->prio_bin[i].tx_ready);
+ }
+ xprt_ptr->prio_bin[0].max_rate_kBps = 0;
+ if (cfg->flow_info)
+ xprt_ptr->prio_bin[0].power_state =
+ cfg->flow_info[0].power_state;
+ INIT_LIST_HEAD(&xprt_ptr->prio_bin[0].tx_ready);
+ xprt_ptr->threshold_rate_kBps =
+ xprt_ptr->prio_bin[xprt_ptr->num_priority - 1].max_rate_kBps;
+
+ return 0;
+}
+
+/**
+ * glink_core_deinit_xprt_qos_cfg() - Reset a transport's QoS configuration
+ * @xprt_ptr: Transport to be deinitialized.
+ *
+ * This function is used during the time of transport unregistration to
+ * de-initialize the QoS configuration from a transport.
+ */
+static void glink_core_deinit_xprt_qos_cfg(struct glink_core_xprt_ctx *xprt_ptr)
+{
+ kfree(xprt_ptr->prio_bin);
+ xprt_ptr->prio_bin = NULL;
+ xprt_ptr->mtu = 0;
+ xprt_ptr->num_priority = 0;
+ xprt_ptr->token_count = 0;
+ xprt_ptr->threshold_rate_kBps = 0;
+}
+
+/**
+ * glink_core_register_transport() - register a new transport
+ * @if_ptr: The interface to the transport.
+ * @cfg: Description and configuration of the transport.
+ *
+ * Return: 0 on success, EINVAL for invalid input.
+ */
+int glink_core_register_transport(struct glink_transport_if *if_ptr,
+ struct glink_core_transport_cfg *cfg)
+{
+ struct glink_core_xprt_ctx *xprt_ptr;
+ size_t len;
+ uint16_t id;
+ int ret;
+
+ if (!if_ptr || !cfg || !cfg->name || !cfg->edge)
+ return -EINVAL;
+
+ len = strlen(cfg->name);
+ if (len == 0 || len >= GLINK_NAME_SIZE)
+ return -EINVAL;
+
+ len = strlen(cfg->edge);
+ if (len == 0 || len >= GLINK_NAME_SIZE)
+ return -EINVAL;
+
+ if (cfg->versions_entries < 1)
+ return -EINVAL;
+
+ ret = glink_xprt_name_to_id(cfg->name, &id);
+ if (ret)
+ return ret;
+
+ xprt_ptr = kzalloc(sizeof(struct glink_core_xprt_ctx), GFP_KERNEL);
+ if (xprt_ptr == NULL)
+ return -ENOMEM;
+
+ xprt_ptr->id = id;
+ rwref_lock_init(&xprt_ptr->xprt_state_lhb0,
+ glink_xprt_ctx_release);
+ strlcpy(xprt_ptr->name, cfg->name, GLINK_NAME_SIZE);
+ strlcpy(xprt_ptr->edge, cfg->edge, GLINK_NAME_SIZE);
+ xprt_ptr->versions = cfg->versions;
+ xprt_ptr->versions_entries = cfg->versions_entries;
+ xprt_ptr->local_version_idx = cfg->versions_entries - 1;
+ xprt_ptr->remote_version_idx = cfg->versions_entries - 1;
+ xprt_ptr->l_features =
+ cfg->versions[cfg->versions_entries - 1].features;
+ if (!if_ptr->poll)
+ if_ptr->poll = dummy_poll;
+ if (!if_ptr->mask_rx_irq)
+ if_ptr->mask_rx_irq = dummy_mask_rx_irq;
+ if (!if_ptr->reuse_rx_intent)
+ if_ptr->reuse_rx_intent = dummy_reuse_rx_intent;
+ if (!if_ptr->wait_link_down)
+ if_ptr->wait_link_down = dummy_wait_link_down;
+ if (!if_ptr->tx_cmd_tracer_pkt)
+ if_ptr->tx_cmd_tracer_pkt = dummy_tx_cmd_tracer_pkt;
+ if (!if_ptr->get_power_vote_ramp_time)
+ if_ptr->get_power_vote_ramp_time =
+ dummy_get_power_vote_ramp_time;
+ if (!if_ptr->power_vote)
+ if_ptr->power_vote = dummy_power_vote;
+ if (!if_ptr->power_unvote)
+ if_ptr->power_unvote = dummy_power_unvote;
+ xprt_ptr->capabilities = 0;
+ xprt_ptr->ops = if_ptr;
+ spin_lock_init(&xprt_ptr->xprt_ctx_lock_lhb1);
+ xprt_ptr->next_lcid = 1; /* 0 reserved for default unconfigured */
+ INIT_LIST_HEAD(&xprt_ptr->free_lcid_list);
+ xprt_ptr->max_cid = cfg->max_cid;
+ xprt_ptr->max_iid = cfg->max_iid;
+ xprt_ptr->local_state = GLINK_XPRT_DOWN;
+ xprt_ptr->remote_neg_completed = false;
+ INIT_LIST_HEAD(&xprt_ptr->channels);
+ ret = glink_core_init_xprt_qos_cfg(xprt_ptr, cfg);
+ if (ret < 0) {
+ kfree(xprt_ptr);
+ return ret;
+ }
+ spin_lock_init(&xprt_ptr->tx_ready_lock_lhb2);
+ mutex_init(&xprt_ptr->xprt_dbgfs_lock_lhb3);
+ INIT_WORK(&xprt_ptr->tx_work, tx_work_func);
+ xprt_ptr->tx_wq = create_singlethread_workqueue("glink_tx");
+ if (IS_ERR_OR_NULL(xprt_ptr->tx_wq)) {
+ GLINK_ERR("%s: unable to allocate workqueue\n", __func__);
+ glink_core_deinit_xprt_qos_cfg(xprt_ptr);
+ kfree(xprt_ptr);
+ return -ENOMEM;
+ }
+ INIT_DELAYED_WORK(&xprt_ptr->pm_qos_work, glink_pm_qos_cancel_worker);
+ pm_qos_add_request(&xprt_ptr->pm_qos_req, PM_QOS_CPU_DMA_LATENCY,
+ PM_QOS_DEFAULT_VALUE);
+
+ if_ptr->glink_core_priv = xprt_ptr;
+ if_ptr->glink_core_if_ptr = &core_impl;
+
+ mutex_lock(&transport_list_lock_lha0);
+ list_add_tail(&xprt_ptr->list_node, &transport_list);
+ mutex_unlock(&transport_list_lock_lha0);
+ glink_debugfs_add_xprt(xprt_ptr);
+
+ return 0;
+}
+EXPORT_SYMBOL(glink_core_register_transport);
+
+/**
+ * glink_core_unregister_transport() - unregister a transport
+ *
+ * @if_ptr: The interface to the transport.
+ */
+void glink_core_unregister_transport(struct glink_transport_if *if_ptr)
+{
+ struct glink_core_xprt_ctx *xprt_ptr = if_ptr->glink_core_priv;
+
+ GLINK_DBG_XPRT(xprt_ptr, "%s: destroying transport\n", __func__);
+ if (xprt_ptr->local_state != GLINK_XPRT_DOWN) {
+ GLINK_ERR_XPRT(xprt_ptr,
+ "%s: link_down should have been called before this\n",
+ __func__);
+ return;
+ }
+
+ mutex_lock(&transport_list_lock_lha0);
+ list_del(&xprt_ptr->list_node);
+ mutex_unlock(&transport_list_lock_lha0);
+ flush_delayed_work(&xprt_ptr->pm_qos_work);
+ pm_qos_remove_request(&xprt_ptr->pm_qos_req);
+ rwref_put(&xprt_ptr->xprt_state_lhb0);
+}
+EXPORT_SYMBOL(glink_core_unregister_transport);
+
+/**
+ * glink_core_link_up() - transport link-up notification
+ *
+ * @if_ptr: pointer to transport interface
+ */
+static void glink_core_link_up(struct glink_transport_if *if_ptr)
+{
+ struct glink_core_xprt_ctx *xprt_ptr = if_ptr->glink_core_priv;
+
+ /* start local negotiation */
+ xprt_ptr->local_state = GLINK_XPRT_NEGOTIATING;
+ xprt_ptr->local_version_idx = xprt_ptr->versions_entries - 1;
+ xprt_ptr->l_features =
+ xprt_ptr->versions[xprt_ptr->local_version_idx].features;
+ if_ptr->tx_cmd_version(if_ptr,
+ xprt_ptr->versions[xprt_ptr->local_version_idx].version,
+ xprt_ptr->versions[xprt_ptr->local_version_idx].features);
+
+}
+
+/**
+ * glink_core_link_down() - transport link-down notification
+ *
+ * @if_ptr: pointer to transport interface
+ */
+static void glink_core_link_down(struct glink_transport_if *if_ptr)
+{
+ struct glink_core_xprt_ctx *xprt_ptr = if_ptr->glink_core_priv;
+
+ rwref_write_get(&xprt_ptr->xprt_state_lhb0);
+ xprt_ptr->next_lcid = 1;
+ xprt_ptr->local_state = GLINK_XPRT_DOWN;
+ xprt_ptr->local_version_idx = xprt_ptr->versions_entries - 1;
+ xprt_ptr->remote_version_idx = xprt_ptr->versions_entries - 1;
+ xprt_ptr->l_features =
+ xprt_ptr->versions[xprt_ptr->local_version_idx].features;
+ xprt_ptr->remote_neg_completed = false;
+ rwref_write_put(&xprt_ptr->xprt_state_lhb0);
+ GLINK_DBG_XPRT(xprt_ptr,
+ "%s: Flushing work from tx_wq. Thread: %u\n", __func__,
+ current->pid);
+ flush_workqueue(xprt_ptr->tx_wq);
+ glink_core_channel_cleanup(xprt_ptr);
+ check_link_notifier_and_notify(xprt_ptr, GLINK_LINK_STATE_DOWN);
+}
+
+/**
+ * glink_create_dummy_xprt_ctx() - create a dummy transport that replaces all
+ * the transport interface functions with a dummy
+ * @orig_xprt_ctx: Pointer to the original transport context.
+ *
+ * The dummy transport is used only when it is swapped with the actual transport
+ * pointer in ssr/unregister case.
+ *
+ * Return: Pointer to dummy transport context.
+ */
+static struct glink_core_xprt_ctx *glink_create_dummy_xprt_ctx(
+ struct glink_core_xprt_ctx *orig_xprt_ctx)
+{
+
+ struct glink_core_xprt_ctx *xprt_ptr;
+ struct glink_transport_if *if_ptr;
+
+ xprt_ptr = kzalloc(sizeof(*xprt_ptr), GFP_KERNEL);
+ if (!xprt_ptr)
+ return ERR_PTR(-ENOMEM);
+ if_ptr = kmalloc(sizeof(*if_ptr), GFP_KERNEL);
+ if (!if_ptr) {
+ kfree(xprt_ptr);
+ return ERR_PTR(-ENOMEM);
+ }
+ rwref_lock_init(&xprt_ptr->xprt_state_lhb0,
+ glink_dummy_xprt_ctx_release);
+
+ strlcpy(xprt_ptr->name, "dummy", GLINK_NAME_SIZE);
+ strlcpy(xprt_ptr->edge, orig_xprt_ctx->edge, GLINK_NAME_SIZE);
+ if_ptr->poll = dummy_poll;
+ if_ptr->mask_rx_irq = dummy_mask_rx_irq;
+ if_ptr->reuse_rx_intent = dummy_reuse_rx_intent;
+ if_ptr->wait_link_down = dummy_wait_link_down;
+ if_ptr->allocate_rx_intent = dummy_allocate_rx_intent;
+ if_ptr->deallocate_rx_intent = dummy_deallocate_rx_intent;
+ if_ptr->tx_cmd_local_rx_intent = dummy_tx_cmd_local_rx_intent;
+ if_ptr->tx_cmd_local_rx_done = dummy_tx_cmd_local_rx_done;
+ if_ptr->tx = dummy_tx;
+ if_ptr->tx_cmd_rx_intent_req = dummy_tx_cmd_rx_intent_req;
+ if_ptr->tx_cmd_remote_rx_intent_req_ack =
+ dummy_tx_cmd_remote_rx_intent_req_ack;
+ if_ptr->tx_cmd_set_sigs = dummy_tx_cmd_set_sigs;
+ if_ptr->tx_cmd_ch_close = dummy_tx_cmd_ch_close;
+ if_ptr->tx_cmd_ch_remote_close_ack = dummy_tx_cmd_ch_remote_close_ack;
+
+ xprt_ptr->ops = if_ptr;
+ spin_lock_init(&xprt_ptr->xprt_ctx_lock_lhb1);
+ INIT_LIST_HEAD(&xprt_ptr->free_lcid_list);
+ xprt_ptr->local_state = GLINK_XPRT_DOWN;
+ xprt_ptr->remote_neg_completed = false;
+ INIT_LIST_HEAD(&xprt_ptr->channels);
+ spin_lock_init(&xprt_ptr->tx_ready_lock_lhb2);
+ mutex_init(&xprt_ptr->xprt_dbgfs_lock_lhb3);
+ return xprt_ptr;
+}
+
+/**
+ * glink_core_channel_cleanup() - cleanup all channels for the transport
+ *
+ * @xprt_ptr: pointer to transport context
+ *
+ * This function should be called either from link_down or ssr
+ */
+static void glink_core_channel_cleanup(struct glink_core_xprt_ctx *xprt_ptr)
+{
+ unsigned long flags, d_flags;
+ struct channel_ctx *ctx, *tmp_ctx;
+ struct channel_lcid *temp_lcid, *temp_lcid1;
+ struct glink_core_xprt_ctx *dummy_xprt_ctx;
+
+ dummy_xprt_ctx = glink_create_dummy_xprt_ctx(xprt_ptr);
+ if (IS_ERR_OR_NULL(dummy_xprt_ctx)) {
+ GLINK_ERR("%s: Dummy Transport creation failed\n", __func__);
+ return;
+ }
+
+ rwref_get(&dummy_xprt_ctx->xprt_state_lhb0);
+ spin_lock_irqsave(&xprt_ptr->xprt_ctx_lock_lhb1, flags);
+ list_for_each_entry_safe(ctx, tmp_ctx, &xprt_ptr->channels,
+ port_list_node) {
+ rwref_get(&ctx->ch_state_lhc0);
+ if (ctx->local_open_state == GLINK_CHANNEL_OPENED ||
+ ctx->local_open_state == GLINK_CHANNEL_OPENING) {
+ rwref_get(&dummy_xprt_ctx->xprt_state_lhb0);
+ spin_lock_irqsave(&dummy_xprt_ctx->xprt_ctx_lock_lhb1,
+ d_flags);
+ list_move_tail(&ctx->port_list_node,
+ &dummy_xprt_ctx->channels);
+ spin_unlock_irqrestore(
+ &dummy_xprt_ctx->xprt_ctx_lock_lhb1, d_flags);
+ ctx->transport_ptr = dummy_xprt_ctx;
+ } else {
+ /* local state is in either CLOSED or CLOSING */
+ spin_unlock_irqrestore(&xprt_ptr->xprt_ctx_lock_lhb1,
+ flags);
+ glink_core_remote_close_common(ctx);
+ if (ctx->local_open_state == GLINK_CHANNEL_CLOSING)
+ glink_core_ch_close_ack_common(ctx);
+ /* Channel should be fully closed now. Delete here */
+ if (ch_is_fully_closed(ctx))
+ glink_delete_ch_from_list(ctx, false);
+ spin_lock_irqsave(&xprt_ptr->xprt_ctx_lock_lhb1, flags);
+ }
+ rwref_put(&ctx->ch_state_lhc0);
+ }
+ list_for_each_entry_safe(temp_lcid, temp_lcid1,
+ &xprt_ptr->free_lcid_list, list_node) {
+ list_del(&temp_lcid->list_node);
+ kfree(&temp_lcid->list_node);
+ }
+ spin_unlock_irqrestore(&xprt_ptr->xprt_ctx_lock_lhb1, flags);
+
+ spin_lock_irqsave(&dummy_xprt_ctx->xprt_ctx_lock_lhb1, d_flags);
+ list_for_each_entry_safe(ctx, tmp_ctx, &dummy_xprt_ctx->channels,
+ port_list_node) {
+ rwref_get(&ctx->ch_state_lhc0);
+ spin_unlock_irqrestore(&dummy_xprt_ctx->xprt_ctx_lock_lhb1,
+ d_flags);
+ glink_core_remote_close_common(ctx);
+ spin_lock_irqsave(&dummy_xprt_ctx->xprt_ctx_lock_lhb1,
+ d_flags);
+ rwref_put(&ctx->ch_state_lhc0);
+ }
+ spin_unlock_irqrestore(&dummy_xprt_ctx->xprt_ctx_lock_lhb1, d_flags);
+ rwref_put(&dummy_xprt_ctx->xprt_state_lhb0);
+}
+/**
+ * glink_core_rx_cmd_version() - receive version/features from remote system
+ *
+ * @if_ptr: pointer to transport interface
+ * @r_version: remote version
+ * @r_features: remote features
+ *
+ * This function is called in response to a remote-initiated version/feature
+ * negotiation sequence.
+ */
+static void glink_core_rx_cmd_version(struct glink_transport_if *if_ptr,
+ uint32_t r_version, uint32_t r_features)
+{
+ struct glink_core_xprt_ctx *xprt_ptr = if_ptr->glink_core_priv;
+ const struct glink_core_version *versions = xprt_ptr->versions;
+ bool neg_complete = false;
+ uint32_t l_version;
+
+ if (xprt_is_fully_opened(xprt_ptr)) {
+ GLINK_ERR_XPRT(xprt_ptr,
+ "%s: Negotiation already complete\n", __func__);
+ return;
+ }
+
+ l_version = versions[xprt_ptr->remote_version_idx].version;
+
+ GLINK_INFO_XPRT(xprt_ptr,
+ "%s: [local]%x:%08x [remote]%x:%08x\n", __func__,
+ l_version, xprt_ptr->l_features, r_version, r_features);
+
+ if (l_version > r_version) {
+ /* Find matching version */
+ while (true) {
+ uint32_t rver_idx;
+
+ if (xprt_ptr->remote_version_idx == 0) {
+ /* version negotiation failed */
+ GLINK_ERR_XPRT(xprt_ptr,
+ "%s: Transport negotiation failed\n",
+ __func__);
+ l_version = 0;
+ xprt_ptr->l_features = 0;
+ break;
+ }
+ --xprt_ptr->remote_version_idx;
+ rver_idx = xprt_ptr->remote_version_idx;
+
+ if (versions[rver_idx].version <= r_version) {
+ /* found a potential match */
+ l_version = versions[rver_idx].version;
+ xprt_ptr->l_features =
+ versions[rver_idx].features;
+ break;
+ }
+ }
+ }
+
+ if (l_version == r_version) {
+ GLINK_INFO_XPRT(xprt_ptr,
+ "%s: Remote and local version are matched %x:%08x\n",
+ __func__, r_version, r_features);
+ if (xprt_ptr->l_features != r_features) {
+ uint32_t rver_idx = xprt_ptr->remote_version_idx;
+
+ xprt_ptr->l_features = versions[rver_idx]
+ .negotiate_features(if_ptr,
+ &xprt_ptr->versions[rver_idx],
+ r_features);
+ GLINK_INFO_XPRT(xprt_ptr,
+ "%s: negotiate features %x:%08x\n",
+ __func__, l_version, xprt_ptr->l_features);
+ }
+ neg_complete = true;
+ }
+ if_ptr->tx_cmd_version_ack(if_ptr, l_version, xprt_ptr->l_features);
+
+ if (neg_complete) {
+ GLINK_INFO_XPRT(xprt_ptr,
+ "%s: Remote negotiation complete %x:%08x\n", __func__,
+ l_version, xprt_ptr->l_features);
+
+ if (xprt_ptr->local_state == GLINK_XPRT_OPENED) {
+ xprt_ptr->capabilities = if_ptr->set_version(if_ptr,
+ l_version,
+ xprt_ptr->l_features);
+ }
+ if_ptr->glink_core_priv->remote_neg_completed = true;
+ if (xprt_is_fully_opened(xprt_ptr))
+ check_link_notifier_and_notify(xprt_ptr,
+ GLINK_LINK_STATE_UP);
+ }
+}
+
+/**
+ * glink_core_rx_cmd_version_ack() - receive negotiation ack from remote system
+ *
+ * @if_ptr: pointer to transport interface
+ * @r_version: remote version response
+ * @r_features: remote features response
+ *
+ * This function is called in response to a local-initiated version/feature
+ * negotiation sequence and is the counter-offer from the remote side based
+ * upon the initial version and feature set requested.
+ */
+static void glink_core_rx_cmd_version_ack(struct glink_transport_if *if_ptr,
+ uint32_t r_version, uint32_t r_features)
+{
+ struct glink_core_xprt_ctx *xprt_ptr = if_ptr->glink_core_priv;
+ const struct glink_core_version *versions = xprt_ptr->versions;
+ uint32_t l_version;
+ bool neg_complete = false;
+
+ if (xprt_is_fully_opened(xprt_ptr)) {
+ GLINK_ERR_XPRT(xprt_ptr,
+ "%s: Negotiation already complete\n", __func__);
+ return;
+ }
+
+ l_version = versions[xprt_ptr->local_version_idx].version;
+
+ GLINK_INFO_XPRT(xprt_ptr,
+ "%s: [local]%x:%08x [remote]%x:%08x\n", __func__,
+ l_version, xprt_ptr->l_features, r_version, r_features);
+
+ if (l_version > r_version) {
+ /* find matching version */
+ while (true) {
+ uint32_t lver_idx = xprt_ptr->local_version_idx;
+
+ if (xprt_ptr->local_version_idx == 0) {
+ /* version negotiation failed */
+ xprt_ptr->local_state = GLINK_XPRT_FAILED;
+ GLINK_ERR_XPRT(xprt_ptr,
+ "%s: Transport negotiation failed\n",
+ __func__);
+ l_version = 0;
+ xprt_ptr->l_features = 0;
+ break;
+ }
+ --xprt_ptr->local_version_idx;
+ lver_idx = xprt_ptr->local_version_idx;
+
+ if (versions[lver_idx].version <= r_version) {
+ /* found a potential match */
+ l_version = versions[lver_idx].version;
+ xprt_ptr->l_features =
+ versions[lver_idx].features;
+ break;
+ }
+ }
+ } else if (l_version == r_version) {
+ if (xprt_ptr->l_features != r_features) {
+ /* version matches, negotiate features */
+ uint32_t lver_idx = xprt_ptr->local_version_idx;
+
+ xprt_ptr->l_features = versions[lver_idx]
+ .negotiate_features(if_ptr,
+ &versions[lver_idx],
+ r_features);
+ GLINK_INFO_XPRT(xprt_ptr,
+ "%s: negotiation features %x:%08x\n",
+ __func__, l_version, xprt_ptr->l_features);
+ } else {
+ neg_complete = true;
+ }
+ } else {
+ /*
+ * r_version > l_version
+ *
+ * Remote responded with a version greater than what we
+ * requested which is invalid and is treated as failure of the
+ * negotiation algorithm.
+ */
+ GLINK_ERR_XPRT(xprt_ptr,
+ "%s: [local]%x:%08x [remote]%x:%08x neg failure\n",
+ __func__, l_version, xprt_ptr->l_features, r_version,
+ r_features);
+ xprt_ptr->local_state = GLINK_XPRT_FAILED;
+ l_version = 0;
+ xprt_ptr->l_features = 0;
+ }
+
+ if (neg_complete) {
+ /* negotiation complete */
+ GLINK_INFO_XPRT(xprt_ptr,
+ "%s: Local negotiation complete %x:%08x\n",
+ __func__, l_version, xprt_ptr->l_features);
+
+ if (xprt_ptr->remote_neg_completed) {
+ xprt_ptr->capabilities = if_ptr->set_version(if_ptr,
+ l_version,
+ xprt_ptr->l_features);
+ }
+
+ xprt_ptr->local_state = GLINK_XPRT_OPENED;
+ if (xprt_is_fully_opened(xprt_ptr))
+ check_link_notifier_and_notify(xprt_ptr,
+ GLINK_LINK_STATE_UP);
+ } else {
+ if_ptr->tx_cmd_version(if_ptr, l_version, xprt_ptr->l_features);
+ }
+}
+
+/**
+ * find_l_ctx_get() - find a local channel context based on a remote one
+ * @r_ctx: The remote channel to use as a lookup key.
+ *
+ * If the channel is found, the reference count is incremented to ensure the
+ * lifetime of the channel context. The caller must call rwref_put() when done.
+ *
+ * Return: The corresponding local ctx or NULL is not found.
+ */
+static struct channel_ctx *find_l_ctx_get(struct channel_ctx *r_ctx)
+{
+ struct glink_core_xprt_ctx *xprt;
+ struct channel_ctx *ctx;
+ unsigned long flags;
+ struct channel_ctx *l_ctx = NULL;
+
+ mutex_lock(&transport_list_lock_lha0);
+ list_for_each_entry(xprt, &transport_list, list_node)
+ if (!strcmp(r_ctx->transport_ptr->edge, xprt->edge)) {
+ rwref_write_get(&xprt->xprt_state_lhb0);
+ if (xprt->local_state != GLINK_XPRT_OPENED) {
+ rwref_write_put(&xprt->xprt_state_lhb0);
+ continue;
+ }
+ spin_lock_irqsave(&xprt->xprt_ctx_lock_lhb1, flags);
+ list_for_each_entry(ctx, &xprt->channels,
+ port_list_node)
+ if (!strcmp(ctx->name, r_ctx->name) &&
+ ctx->local_xprt_req &&
+ ctx->local_xprt_resp) {
+ l_ctx = ctx;
+ rwref_get(&l_ctx->ch_state_lhc0);
+ }
+ spin_unlock_irqrestore(&xprt->xprt_ctx_lock_lhb1,
+ flags);
+ rwref_write_put(&xprt->xprt_state_lhb0);
+ }
+ mutex_unlock(&transport_list_lock_lha0);
+
+ return l_ctx;
+}
+
+/**
+ * find_r_ctx_get() - find a remote channel context based on a local one
+ * @l_ctx: The local channel to use as a lookup key.
+ *
+ * If the channel is found, the reference count is incremented to ensure the
+ * lifetime of the channel context. The caller must call rwref_put() when done.
+ *
+ * Return: The corresponding remote ctx or NULL is not found.
+ */
+static struct channel_ctx *find_r_ctx_get(struct channel_ctx *l_ctx)
+{
+ struct glink_core_xprt_ctx *xprt;
+ struct channel_ctx *ctx;
+ unsigned long flags;
+ struct channel_ctx *r_ctx = NULL;
+
+ mutex_lock(&transport_list_lock_lha0);
+ list_for_each_entry(xprt, &transport_list, list_node)
+ if (!strcmp(l_ctx->transport_ptr->edge, xprt->edge)) {
+ rwref_write_get(&xprt->xprt_state_lhb0);
+ if (xprt->local_state != GLINK_XPRT_OPENED) {
+ rwref_write_put(&xprt->xprt_state_lhb0);
+ continue;
+ }
+ spin_lock_irqsave(&xprt->xprt_ctx_lock_lhb1, flags);
+ list_for_each_entry(ctx, &xprt->channels,
+ port_list_node)
+ if (!strcmp(ctx->name, l_ctx->name) &&
+ ctx->remote_xprt_req &&
+ ctx->remote_xprt_resp) {
+ r_ctx = ctx;
+ rwref_get(&r_ctx->ch_state_lhc0);
+ }
+ spin_unlock_irqrestore(&xprt->xprt_ctx_lock_lhb1,
+ flags);
+ rwref_write_put(&xprt->xprt_state_lhb0);
+ }
+ mutex_unlock(&transport_list_lock_lha0);
+
+ return r_ctx;
+}
+
+/**
+ * will_migrate() - will a channel migrate to a different transport
+ * @l_ctx: The local channel to migrate.
+ * @r_ctx: The remote channel to migrate.
+ *
+ * One of the channel contexts can be NULL if not known, but at least one ctx
+ * must be provided.
+ *
+ * Return: Bool indicating if migration will occur.
+ */
+static bool will_migrate(struct channel_ctx *l_ctx, struct channel_ctx *r_ctx)
+{
+ uint16_t new_xprt;
+ bool migrate = false;
+
+ if (!r_ctx)
+ r_ctx = find_r_ctx_get(l_ctx);
+ else
+ rwref_get(&r_ctx->ch_state_lhc0);
+ if (!r_ctx)
+ return migrate;
+
+ if (!l_ctx)
+ l_ctx = find_l_ctx_get(r_ctx);
+ else
+ rwref_get(&l_ctx->ch_state_lhc0);
+ if (!l_ctx)
+ goto exit;
+
+ if (l_ctx->local_xprt_req == r_ctx->remote_xprt_req &&
+ l_ctx->local_xprt_req == l_ctx->transport_ptr->id)
+ goto exit;
+ if (l_ctx->no_migrate)
+ goto exit;
+
+ if (l_ctx->local_xprt_req > r_ctx->transport_ptr->id)
+ l_ctx->local_xprt_req = r_ctx->transport_ptr->id;
+
+ new_xprt = max(l_ctx->local_xprt_req, r_ctx->remote_xprt_req);
+
+ if (new_xprt == l_ctx->transport_ptr->id)
+ goto exit;
+
+ migrate = true;
+exit:
+ if (l_ctx)
+ rwref_put(&l_ctx->ch_state_lhc0);
+ if (r_ctx)
+ rwref_put(&r_ctx->ch_state_lhc0);
+
+ return migrate;
+}
+
+/**
+ * ch_migrate() - migrate a channel to a different transport
+ * @l_ctx: The local channel to migrate.
+ * @r_ctx: The remote channel to migrate.
+ *
+ * One of the channel contexts can be NULL if not known, but at least one ctx
+ * must be provided.
+ *
+ * Return: Bool indicating if migration occurred.
+ */
+static bool ch_migrate(struct channel_ctx *l_ctx, struct channel_ctx *r_ctx)
+{
+ uint16_t new_xprt;
+ struct glink_core_xprt_ctx *xprt;
+ unsigned long flags;
+ struct channel_lcid *flcid;
+ uint16_t best_xprt = USHRT_MAX;
+ struct channel_ctx *ctx_clone;
+ bool migrated = false;
+
+ if (!r_ctx)
+ r_ctx = find_r_ctx_get(l_ctx);
+ else
+ rwref_get(&r_ctx->ch_state_lhc0);
+ if (!r_ctx)
+ return migrated;
+
+ if (!l_ctx)
+ l_ctx = find_l_ctx_get(r_ctx);
+ else
+ rwref_get(&l_ctx->ch_state_lhc0);
+ if (!l_ctx) {
+ rwref_put(&r_ctx->ch_state_lhc0);
+ return migrated;
+ }
+
+ if (l_ctx->local_xprt_req == r_ctx->remote_xprt_req &&
+ l_ctx->local_xprt_req == l_ctx->transport_ptr->id)
+ goto exit;
+ if (l_ctx->no_migrate)
+ goto exit;
+
+ if (l_ctx->local_xprt_req > r_ctx->transport_ptr->id)
+ l_ctx->local_xprt_req = r_ctx->transport_ptr->id;
+
+ new_xprt = max(l_ctx->local_xprt_req, r_ctx->remote_xprt_req);
+
+ if (new_xprt == l_ctx->transport_ptr->id)
+ goto exit;
+
+ ctx_clone = kmalloc(sizeof(*ctx_clone), GFP_KERNEL);
+ if (!ctx_clone)
+ goto exit;
+
+ mutex_lock(&transport_list_lock_lha0);
+ list_for_each_entry(xprt, &transport_list, list_node)
+ if (!strcmp(l_ctx->transport_ptr->edge, xprt->edge))
+ if (xprt->id == new_xprt)
+ break;
+ mutex_unlock(&transport_list_lock_lha0);
+
+ spin_lock_irqsave(&l_ctx->transport_ptr->xprt_ctx_lock_lhb1, flags);
+ list_del_init(&l_ctx->port_list_node);
+ spin_unlock_irqrestore(&l_ctx->transport_ptr->xprt_ctx_lock_lhb1,
+ flags);
+
+ memcpy(ctx_clone, l_ctx, sizeof(*ctx_clone));
+ ctx_clone->local_xprt_req = 0;
+ ctx_clone->local_xprt_resp = 0;
+ ctx_clone->remote_xprt_req = 0;
+ ctx_clone->remote_xprt_resp = 0;
+ ctx_clone->notify_state = NULL;
+ ctx_clone->local_open_state = GLINK_CHANNEL_CLOSING;
+ rwref_lock_init(&ctx_clone->ch_state_lhc0, glink_ch_ctx_release);
+ init_completion(&ctx_clone->int_req_ack_complete);
+ init_completion(&ctx_clone->int_req_complete);
+ spin_lock_init(&ctx_clone->local_rx_intent_lst_lock_lhc1);
+ spin_lock_init(&ctx_clone->rmt_rx_intent_lst_lock_lhc2);
+ INIT_LIST_HEAD(&ctx_clone->tx_ready_list_node);
+ INIT_LIST_HEAD(&ctx_clone->local_rx_intent_list);
+ INIT_LIST_HEAD(&ctx_clone->local_rx_intent_ntfy_list);
+ INIT_LIST_HEAD(&ctx_clone->local_rx_intent_free_list);
+ INIT_LIST_HEAD(&ctx_clone->rmt_rx_intent_list);
+ INIT_LIST_HEAD(&ctx_clone->tx_active);
+ spin_lock_init(&ctx_clone->tx_pending_rmt_done_lock_lhc4);
+ INIT_LIST_HEAD(&ctx_clone->tx_pending_remote_done);
+ spin_lock_init(&ctx_clone->tx_lists_lock_lhc3);
+ spin_lock_irqsave(&l_ctx->transport_ptr->xprt_ctx_lock_lhb1, flags);
+ list_add_tail(&ctx_clone->port_list_node,
+ &l_ctx->transport_ptr->channels);
+ spin_unlock_irqrestore(&l_ctx->transport_ptr->xprt_ctx_lock_lhb1,
+ flags);
+
+ l_ctx->transport_ptr->ops->tx_cmd_ch_close(l_ctx->transport_ptr->ops,
+ l_ctx->lcid);
+
+ l_ctx->transport_ptr = xprt;
+ l_ctx->local_xprt_req = 0;
+ l_ctx->local_xprt_resp = 0;
+ if (new_xprt != r_ctx->transport_ptr->id) {
+ r_ctx->local_xprt_req = 0;
+ r_ctx->local_xprt_resp = 0;
+ r_ctx->remote_xprt_req = 0;
+ r_ctx->remote_xprt_resp = 0;
+
+ l_ctx->remote_xprt_req = 0;
+ l_ctx->remote_xprt_resp = 0;
+ l_ctx->remote_opened = false;
+
+ rwref_write_get(&xprt->xprt_state_lhb0);
+ spin_lock_irqsave(&xprt->xprt_ctx_lock_lhb1, flags);
+ if (list_empty(&xprt->free_lcid_list)) {
+ l_ctx->lcid = xprt->next_lcid++;
+ } else {
+ flcid = list_first_entry(&xprt->free_lcid_list,
+ struct channel_lcid, list_node);
+ l_ctx->lcid = flcid->lcid;
+ list_del(&flcid->list_node);
+ kfree(flcid);
+ }
+ list_add_tail(&l_ctx->port_list_node, &xprt->channels);
+ spin_unlock_irqrestore(&xprt->xprt_ctx_lock_lhb1, flags);
+ rwref_write_put(&xprt->xprt_state_lhb0);
+ } else {
+ l_ctx->lcid = r_ctx->lcid;
+ l_ctx->rcid = r_ctx->rcid;
+ l_ctx->remote_opened = r_ctx->remote_opened;
+ l_ctx->remote_xprt_req = r_ctx->remote_xprt_req;
+ l_ctx->remote_xprt_resp = r_ctx->remote_xprt_resp;
+ glink_delete_ch_from_list(r_ctx, false);
+
+ spin_lock_irqsave(&xprt->xprt_ctx_lock_lhb1, flags);
+ list_add_tail(&l_ctx->port_list_node, &xprt->channels);
+ spin_unlock_irqrestore(&xprt->xprt_ctx_lock_lhb1, flags);
+ }
+
+
+ mutex_lock(&transport_list_lock_lha0);
+ list_for_each_entry(xprt, &transport_list, list_node)
+ if (!strcmp(l_ctx->transport_ptr->edge, xprt->edge))
+ if (xprt->id < best_xprt)
+ best_xprt = xprt->id;
+ mutex_unlock(&transport_list_lock_lha0);
+ l_ctx->local_open_state = GLINK_CHANNEL_OPENING;
+ l_ctx->local_xprt_req = best_xprt;
+ l_ctx->transport_ptr->ops->tx_cmd_ch_open(l_ctx->transport_ptr->ops,
+ l_ctx->lcid, l_ctx->name, best_xprt);
+
+ migrated = true;
+exit:
+ rwref_put(&l_ctx->ch_state_lhc0);
+ rwref_put(&r_ctx->ch_state_lhc0);
+
+ return migrated;
+}
+
+/**
+ * calculate_xprt_resp() - calculate the response to a remote xprt request
+ * @r_ctx: The channel the remote xprt request is for.
+ *
+ * Return: The calculated response.
+ */
+static uint16_t calculate_xprt_resp(struct channel_ctx *r_ctx)
+{
+ struct channel_ctx *l_ctx;
+
+ l_ctx = find_l_ctx_get(r_ctx);
+ if (!l_ctx) {
+ r_ctx->remote_xprt_resp = r_ctx->transport_ptr->id;
+ } else if (r_ctx->remote_xprt_req == r_ctx->transport_ptr->id) {
+ r_ctx->remote_xprt_resp = r_ctx->remote_xprt_req;
+ } else {
+ if (!l_ctx->local_xprt_req)
+ r_ctx->remote_xprt_resp = r_ctx->remote_xprt_req;
+ else if (l_ctx->no_migrate)
+ r_ctx->remote_xprt_resp = l_ctx->local_xprt_req;
+ else
+ r_ctx->remote_xprt_resp = max(l_ctx->local_xprt_req,
+ r_ctx->remote_xprt_req);
+ }
+
+ if (l_ctx)
+ rwref_put(&l_ctx->ch_state_lhc0);
+
+ return r_ctx->remote_xprt_resp;
+}
+
+/**
+ * glink_core_rx_cmd_ch_remote_open() - Remote-initiated open command
+ *
+ * @if_ptr: Pointer to transport instance
+ * @rcid: Remote Channel ID
+ * @name: Channel name
+ * @req_xprt: Requested transport to migrate to
+ */
+static void glink_core_rx_cmd_ch_remote_open(struct glink_transport_if *if_ptr,
+ uint32_t rcid, const char *name, uint16_t req_xprt)
+{
+ struct channel_ctx *ctx;
+ uint16_t xprt_resp;
+ bool do_migrate;
+
+ ctx = ch_name_to_ch_ctx_create(if_ptr->glink_core_priv, name);
+ if (ctx == NULL) {
+ GLINK_ERR_XPRT(if_ptr->glink_core_priv,
+ "%s: invalid rcid %u received, name '%s'\n",
+ __func__, rcid, name);
+ return;
+ }
+
+ /* port already exists */
+ if (ctx->remote_opened) {
+ GLINK_ERR_CH(ctx,
+ "%s: Duplicate remote open for rcid %u, name '%s'\n",
+ __func__, rcid, name);
+ return;
+ }
+
+ ctx->remote_opened = true;
+ ch_add_rcid(if_ptr->glink_core_priv, ctx, rcid);
+ ctx->transport_ptr = if_ptr->glink_core_priv;
+
+ ctx->remote_xprt_req = req_xprt;
+ xprt_resp = calculate_xprt_resp(ctx);
+
+ do_migrate = will_migrate(NULL, ctx);
+ GLINK_INFO_CH(ctx, "%s: remote: CLOSED->OPENED ; xprt req:resp %u:%u\n",
+ __func__, req_xprt, xprt_resp);
+
+ if_ptr->tx_cmd_ch_remote_open_ack(if_ptr, rcid, xprt_resp);
+ if (!do_migrate && ch_is_fully_opened(ctx))
+ ctx->notify_state(ctx, ctx->user_priv, GLINK_CONNECTED);
+
+
+ if (do_migrate)
+ ch_migrate(NULL, ctx);
+}
+
+/**
+ * glink_core_rx_cmd_ch_open_ack() - Receive ack to previously sent open request
+ *
+ * if_ptr: Pointer to transport instance
+ * lcid: Local Channel ID
+ * @xprt_resp: Response to the transport migration request
+ */
+static void glink_core_rx_cmd_ch_open_ack(struct glink_transport_if *if_ptr,
+ uint32_t lcid, uint16_t xprt_resp)
+{
+ struct channel_ctx *ctx;
+
+ ctx = xprt_lcid_to_ch_ctx_get(if_ptr->glink_core_priv, lcid);
+ if (!ctx) {
+ /* unknown LCID received - this shouldn't happen */
+ GLINK_ERR_XPRT(if_ptr->glink_core_priv,
+ "%s: invalid lcid %u received\n", __func__,
+ (unsigned)lcid);
+ return;
+ }
+
+ if (ctx->local_open_state != GLINK_CHANNEL_OPENING) {
+ GLINK_ERR_CH(ctx,
+ "%s: unexpected open ack receive for lcid. Current state: %u. Thread: %u\n",
+ __func__, ctx->local_open_state, current->pid);
+ rwref_put(&ctx->ch_state_lhc0);
+ return;
+ }
+
+ ctx->local_xprt_resp = xprt_resp;
+ if (!ch_migrate(ctx, NULL)) {
+ ctx->local_open_state = GLINK_CHANNEL_OPENED;
+ GLINK_INFO_PERF_CH(ctx,
+ "%s: local:GLINK_CHANNEL_OPENING_WAIT->GLINK_CHANNEL_OPENED\n",
+ __func__);
+
+ if (ch_is_fully_opened(ctx)) {
+ ctx->notify_state(ctx, ctx->user_priv, GLINK_CONNECTED);
+ GLINK_INFO_PERF_CH(ctx,
+ "%s: notify state: GLINK_CONNECTED\n",
+ __func__);
+ }
+ }
+ rwref_put(&ctx->ch_state_lhc0);
+}
+
+/**
+ * glink_core_rx_cmd_ch_remote_close() - Receive remote close command
+ *
+ * if_ptr: Pointer to transport instance
+ * rcid: Remote Channel ID
+ */
+static void glink_core_rx_cmd_ch_remote_close(
+ struct glink_transport_if *if_ptr, uint32_t rcid)
+{
+ struct channel_ctx *ctx;
+ bool is_ch_fully_closed;
+
+ ctx = xprt_rcid_to_ch_ctx_get(if_ptr->glink_core_priv, rcid);
+ if (!ctx) {
+ /* unknown LCID received - this shouldn't happen */
+ GLINK_ERR_XPRT(if_ptr->glink_core_priv,
+ "%s: invalid rcid %u received\n", __func__,
+ (unsigned)rcid);
+ return;
+ }
+
+ if (!ctx->remote_opened) {
+ GLINK_ERR_CH(ctx,
+ "%s: unexpected remote close receive for rcid %u\n",
+ __func__, (unsigned)rcid);
+ rwref_put(&ctx->ch_state_lhc0);
+ return;
+ }
+ GLINK_INFO_CH(ctx, "%s: remote: OPENED->CLOSED\n", __func__);
+
+ is_ch_fully_closed = glink_core_remote_close_common(ctx);
+
+ ctx->pending_delete = true;
+ if_ptr->tx_cmd_ch_remote_close_ack(if_ptr, rcid);
+
+ if (is_ch_fully_closed) {
+ glink_delete_ch_from_list(ctx, true);
+ flush_workqueue(ctx->transport_ptr->tx_wq);
+ }
+ rwref_put(&ctx->ch_state_lhc0);
+}
+
+/**
+ * glink_core_rx_cmd_ch_close_ack() - Receive locally-request close ack
+ *
+ * if_ptr: Pointer to transport instance
+ * lcid: Local Channel ID
+ */
+static void glink_core_rx_cmd_ch_close_ack(struct glink_transport_if *if_ptr,
+ uint32_t lcid)
+{
+ struct channel_ctx *ctx;
+ bool is_ch_fully_closed;
+
+ ctx = xprt_lcid_to_ch_ctx_get(if_ptr->glink_core_priv, lcid);
+ if (!ctx) {
+ /* unknown LCID received - this shouldn't happen */
+ GLINK_ERR_XPRT(if_ptr->glink_core_priv,
+ "%s: invalid lcid %u received\n", __func__,
+ (unsigned)lcid);
+ return;
+ }
+
+ if (ctx->local_open_state != GLINK_CHANNEL_CLOSING) {
+ GLINK_ERR_CH(ctx,
+ "%s: unexpected close ack receive for lcid %u\n",
+ __func__, (unsigned)lcid);
+ rwref_put(&ctx->ch_state_lhc0);
+ return;
+ }
+
+ is_ch_fully_closed = glink_core_ch_close_ack_common(ctx);
+ if (is_ch_fully_closed) {
+ glink_delete_ch_from_list(ctx, true);
+ flush_workqueue(ctx->transport_ptr->tx_wq);
+ }
+ rwref_put(&ctx->ch_state_lhc0);
+}
+
+/**
+ * glink_core_remote_rx_intent_put() - Receive remove intent
+ *
+ * @if_ptr: Pointer to transport instance
+ * @rcid: Remote Channel ID
+ * @riid: Remote Intent ID
+ * @size: Size of the remote intent ID
+ */
+static void glink_core_remote_rx_intent_put(struct glink_transport_if *if_ptr,
+ uint32_t rcid, uint32_t riid, size_t size)
+{
+ struct channel_ctx *ctx;
+
+ ctx = xprt_rcid_to_ch_ctx_get(if_ptr->glink_core_priv, rcid);
+ if (!ctx) {
+ /* unknown rcid received - this shouldn't happen */
+ GLINK_ERR_XPRT(if_ptr->glink_core_priv,
+ "%s: invalid rcid received %u\n", __func__,
+ (unsigned)rcid);
+ return;
+ }
+
+ ch_push_remote_rx_intent(ctx, size, riid);
+ rwref_put(&ctx->ch_state_lhc0);
+}
+
+/**
+ * glink_core_rx_cmd_remote_rx_intent_req() - Receive a request for rx_intent
+ * from remote side
+ * if_ptr: Pointer to the transport interface
+ * rcid: Remote channel ID
+ * size: size of the intent
+ *
+ * The function searches for the local channel to which the request for
+ * rx_intent has arrived and informs this request to the local channel through
+ * notify_rx_intent_req callback registered by the local channel.
+ */
+static void glink_core_rx_cmd_remote_rx_intent_req(
+ struct glink_transport_if *if_ptr, uint32_t rcid, size_t size)
+{
+ struct channel_ctx *ctx;
+ bool cb_ret;
+
+ ctx = xprt_rcid_to_ch_ctx_get(if_ptr->glink_core_priv, rcid);
+ if (!ctx) {
+ GLINK_ERR_XPRT(if_ptr->glink_core_priv,
+ "%s: invalid rcid received %u\n", __func__,
+ (unsigned)rcid);
+ return;
+ }
+ if (!ctx->notify_rx_intent_req) {
+ GLINK_ERR_CH(ctx,
+ "%s: Notify function not defined for local channel",
+ __func__);
+ rwref_put(&ctx->ch_state_lhc0);
+ return;
+ }
+
+ cb_ret = ctx->notify_rx_intent_req(ctx, ctx->user_priv, size);
+ if_ptr->tx_cmd_remote_rx_intent_req_ack(if_ptr, ctx->lcid, cb_ret);
+ rwref_put(&ctx->ch_state_lhc0);
+}
+
+/**
+ * glink_core_rx_cmd_remote_rx_intent_req_ack()- Receive ack from remote side
+ * for a local rx_intent request
+ * if_ptr: Pointer to the transport interface
+ * rcid: Remote channel ID
+ * size: size of the intent
+ *
+ * This function receives the ack for rx_intent request from local channel.
+ */
+static void glink_core_rx_cmd_rx_intent_req_ack(struct glink_transport_if
+ *if_ptr, uint32_t rcid, bool granted)
+{
+ struct channel_ctx *ctx;
+
+ ctx = xprt_rcid_to_ch_ctx_get(if_ptr->glink_core_priv, rcid);
+ if (!ctx) {
+ GLINK_ERR_XPRT(if_ptr->glink_core_priv,
+ "%s: Invalid rcid received %u\n", __func__,
+ (unsigned)rcid);
+ return;
+ }
+ ctx->int_req_ack = granted;
+ complete_all(&ctx->int_req_ack_complete);
+ rwref_put(&ctx->ch_state_lhc0);
+}
+
+/**
+ * glink_core_rx_get_pkt_ctx() - lookup RX intent structure
+ *
+ * if_ptr: Pointer to the transport interface
+ * rcid: Remote channel ID
+ * liid: Local RX Intent ID
+ *
+ * Note that this function is designed to always be followed by a call to
+ * glink_core_rx_put_pkt_ctx() to complete an RX operation by the transport.
+ *
+ * Return: Pointer to RX intent structure (or NULL if none found)
+ */
+static struct glink_core_rx_intent *glink_core_rx_get_pkt_ctx(
+ struct glink_transport_if *if_ptr, uint32_t rcid, uint32_t liid)
+{
+ struct channel_ctx *ctx;
+ struct glink_core_rx_intent *intent_ptr;
+
+ ctx = xprt_rcid_to_ch_ctx_get(if_ptr->glink_core_priv, rcid);
+ if (!ctx) {
+ /* unknown LCID received - this shouldn't happen */
+ GLINK_ERR_XPRT(if_ptr->glink_core_priv,
+ "%s: invalid rcid received %u\n", __func__,
+ (unsigned)rcid);
+ return NULL;
+ }
+
+ /* match pending intent */
+ intent_ptr = ch_get_local_rx_intent(ctx, liid);
+ if (intent_ptr == NULL) {
+ GLINK_ERR_CH(ctx,
+ "%s: L[%u]: No matching rx intent\n",
+ __func__, liid);
+ rwref_put(&ctx->ch_state_lhc0);
+ return NULL;
+ }
+
+ rwref_put(&ctx->ch_state_lhc0);
+ return intent_ptr;
+}
+
+/**
+ * glink_core_rx_put_pkt_ctx() - lookup RX intent structure
+ *
+ * if_ptr: Pointer to the transport interface
+ * rcid: Remote channel ID
+ * intent_ptr: Pointer to the RX intent
+ * complete: Packet has been completely received
+ *
+ * Note that this function should always be preceded by a call to
+ * glink_core_rx_get_pkt_ctx().
+ */
+void glink_core_rx_put_pkt_ctx(struct glink_transport_if *if_ptr,
+ uint32_t rcid, struct glink_core_rx_intent *intent_ptr, bool complete)
+{
+ struct channel_ctx *ctx;
+
+ if (!complete) {
+ GLINK_DBG_XPRT(if_ptr->glink_core_priv,
+ "%s: rcid[%u] liid[%u] pkt_size[%zu] write_offset[%zu] Fragment received\n",
+ __func__, rcid, intent_ptr->id,
+ intent_ptr->pkt_size,
+ intent_ptr->write_offset);
+ return;
+ }
+
+ /* packet complete */
+ ctx = xprt_rcid_to_ch_ctx_get(if_ptr->glink_core_priv, rcid);
+ if (!ctx) {
+ /* unknown LCID received - this shouldn't happen */
+ GLINK_ERR_XPRT(if_ptr->glink_core_priv,
+ "%s: invalid rcid received %u\n", __func__,
+ (unsigned)rcid);
+ return;
+ }
+
+ if (unlikely(intent_ptr->tracer_pkt)) {
+ tracer_pkt_log_event(intent_ptr->data, GLINK_CORE_RX);
+ ch_set_local_rx_intent_notified(ctx, intent_ptr);
+ if (ctx->notify_rx_tracer_pkt)
+ ctx->notify_rx_tracer_pkt(ctx, ctx->user_priv,
+ intent_ptr->pkt_priv, intent_ptr->data,
+ intent_ptr->pkt_size);
+ rwref_put(&ctx->ch_state_lhc0);
+ return;
+ }
+
+ GLINK_PERF_CH(ctx, "%s: L[%u]: data[%p] size[%zu]\n",
+ __func__, intent_ptr->id,
+ intent_ptr->data ? intent_ptr->data : intent_ptr->iovec,
+ intent_ptr->write_offset);
+ if (!intent_ptr->data && !ctx->notify_rxv) {
+ /* Received a vector, but client can't handle a vector */
+ intent_ptr->bounce_buf = linearize_vector(intent_ptr->iovec,
+ intent_ptr->pkt_size,
+ intent_ptr->vprovider,
+ intent_ptr->pprovider);
+ if (IS_ERR_OR_NULL(intent_ptr->bounce_buf)) {
+ GLINK_ERR_XPRT(if_ptr->glink_core_priv,
+ "%s: Error %ld linearizing vector\n", __func__,
+ PTR_ERR(intent_ptr->bounce_buf));
+ BUG();
+ rwref_put(&ctx->ch_state_lhc0);
+ return;
+ }
+ }
+
+ ch_set_local_rx_intent_notified(ctx, intent_ptr);
+ if (ctx->notify_rx && (intent_ptr->data || intent_ptr->bounce_buf)) {
+ ctx->notify_rx(ctx, ctx->user_priv, intent_ptr->pkt_priv,
+ intent_ptr->data ?
+ intent_ptr->data : intent_ptr->bounce_buf,
+ intent_ptr->pkt_size);
+ } else if (ctx->notify_rxv) {
+ ctx->notify_rxv(ctx, ctx->user_priv, intent_ptr->pkt_priv,
+ intent_ptr->iovec, intent_ptr->pkt_size,
+ intent_ptr->vprovider, intent_ptr->pprovider);
+ } else {
+ GLINK_ERR_XPRT(if_ptr->glink_core_priv,
+ "%s: Unable to process rx data\n", __func__);
+ BUG();
+ }
+ rwref_put(&ctx->ch_state_lhc0);
+}
+
+/**
+ * glink_core_rx_cmd_tx_done() - Receive Transmit Done Command
+ * @xprt_ptr: Transport to send packet on.
+ * @rcid: Remote channel ID
+ * @riid: Remote intent ID
+ * @reuse: Reuse the consumed intent
+ */
+void glink_core_rx_cmd_tx_done(struct glink_transport_if *if_ptr,
+ uint32_t rcid, uint32_t riid, bool reuse)
+{
+ struct channel_ctx *ctx;
+ struct glink_core_tx_pkt *tx_pkt;
+ unsigned long flags;
+ size_t intent_size;
+
+ ctx = xprt_rcid_to_ch_ctx_get(if_ptr->glink_core_priv, rcid);
+ if (!ctx) {
+ /* unknown RCID received - this shouldn't happen */
+ GLINK_ERR_XPRT(if_ptr->glink_core_priv,
+ "%s: invalid rcid %u received\n", __func__,
+ rcid);
+ return;
+ }
+
+ spin_lock_irqsave(&ctx->tx_lists_lock_lhc3, flags);
+ tx_pkt = ch_get_tx_pending_remote_done(ctx, riid);
+ if (IS_ERR_OR_NULL(tx_pkt)) {
+ /*
+ * FUTURE - in the case of a zero-copy transport, this is a
+ * fatal protocol failure since memory corruption could occur
+ * in this case. Prevent this by adding code in glink_close()
+ * to recall any buffers in flight / wait for them to be
+ * returned.
+ */
+ GLINK_ERR_CH(ctx, "%s: R[%u]: No matching tx\n",
+ __func__,
+ (unsigned)riid);
+ spin_unlock_irqrestore(&ctx->tx_lists_lock_lhc3, flags);
+ rwref_put(&ctx->ch_state_lhc0);
+ return;
+ }
+
+ /* notify client */
+ ctx->notify_tx_done(ctx, ctx->user_priv, tx_pkt->pkt_priv,
+ tx_pkt->data ? tx_pkt->data : tx_pkt->iovec);
+ intent_size = tx_pkt->intent_size;
+ ch_remove_tx_pending_remote_done(ctx, tx_pkt);
+ spin_unlock_irqrestore(&ctx->tx_lists_lock_lhc3, flags);
+
+ if (reuse)
+ ch_push_remote_rx_intent(ctx, intent_size, riid);
+ rwref_put(&ctx->ch_state_lhc0);
+}
+
+/**
+ * xprt_schedule_tx() - Schedules packet for transmit.
+ * @xprt_ptr: Transport to send packet on.
+ * @ch_ptr: Channel to send packet on.
+ * @tx_info: Packet to transmit.
+ */
+static void xprt_schedule_tx(struct glink_core_xprt_ctx *xprt_ptr,
+ struct channel_ctx *ch_ptr,
+ struct glink_core_tx_pkt *tx_info)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&xprt_ptr->tx_ready_lock_lhb2, flags);
+ if (list_empty(&ch_ptr->tx_ready_list_node))
+ list_add_tail(&ch_ptr->tx_ready_list_node,
+ &xprt_ptr->prio_bin[ch_ptr->curr_priority].tx_ready);
+
+ spin_lock(&ch_ptr->tx_lists_lock_lhc3);
+ list_add_tail(&tx_info->list_node, &ch_ptr->tx_active);
+ glink_qos_do_ch_tx(ch_ptr);
+ if (unlikely(tx_info->tracer_pkt))
+ tracer_pkt_log_event((void *)(tx_info->data),
+ GLINK_QUEUE_TO_SCHEDULER);
+
+ spin_unlock(&ch_ptr->tx_lists_lock_lhc3);
+ spin_unlock_irqrestore(&xprt_ptr->tx_ready_lock_lhb2, flags);
+
+ queue_work(xprt_ptr->tx_wq, &xprt_ptr->tx_work);
+}
+
+/**
+ * xprt_single_threaded_tx() - Transmit in the context of sender.
+ * @xprt_ptr: Transport to send packet on.
+ * @ch_ptr: Channel to send packet on.
+ * @tx_info: Packet to transmit.
+ */
+static int xprt_single_threaded_tx(struct glink_core_xprt_ctx *xprt_ptr,
+ struct channel_ctx *ch_ptr,
+ struct glink_core_tx_pkt *tx_info)
+{
+ int ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ch_ptr->tx_pending_rmt_done_lock_lhc4, flags);
+ do {
+ ret = xprt_ptr->ops->tx(ch_ptr->transport_ptr->ops,
+ ch_ptr->lcid, tx_info);
+ } while (ret == -EAGAIN);
+ if (ret < 0 || tx_info->size_remaining) {
+ GLINK_ERR_CH(ch_ptr, "%s: Error %d writing data\n",
+ __func__, ret);
+ kfree(tx_info);
+ } else {
+ list_add_tail(&tx_info->list_done,
+ &ch_ptr->tx_pending_remote_done);
+ ret = 0;
+ }
+ spin_unlock_irqrestore(&ch_ptr->tx_pending_rmt_done_lock_lhc4, flags);
+ return ret;
+}
+
+/**
+ * glink_scheduler_eval_prio() - Evaluate the channel priority
+ * @ctx: Channel whose priority is evaluated.
+ * @xprt_ctx: Transport in which the channel is part of.
+ *
+ * This function is called by the packet scheduler to measure the traffic
+ * rate observed in the channel and compare it against the traffic rate
+ * requested by the channel. The comparison result is used to evaluate the
+ * priority of the channel.
+ */
+static void glink_scheduler_eval_prio(struct channel_ctx *ctx,
+ struct glink_core_xprt_ctx *xprt_ctx)
+{
+ unsigned long token_end_time;
+ unsigned long token_consume_time, rem;
+ unsigned long obs_rate_kBps;
+
+ if (ctx->initial_priority == 0)
+ return;
+
+ if (ctx->token_count)
+ return;
+
+ token_end_time = arch_counter_get_cntpct();
+
+ token_consume_time = NSEC_PER_SEC;
+ rem = do_div(token_consume_time, arch_timer_get_rate());
+ token_consume_time = (token_end_time - ctx->token_start_time) *
+ token_consume_time;
+ rem = do_div(token_consume_time, 1000);
+ obs_rate_kBps = glink_qos_calc_rate_kBps(ctx->txd_len,
+ token_consume_time);
+ if (obs_rate_kBps > ctx->req_rate_kBps) {
+ GLINK_INFO_CH(ctx, "%s: Obs. Rate (%lu) > Req. Rate (%lu)\n",
+ __func__, obs_rate_kBps, ctx->req_rate_kBps);
+ glink_qos_update_ch_prio(ctx, 0);
+ } else {
+ glink_qos_update_ch_prio(ctx, ctx->initial_priority);
+ }
+
+ ctx->token_count = xprt_ctx->token_count;
+ ctx->txd_len = 0;
+ ctx->token_start_time = arch_counter_get_cntpct();
+}
+
+/**
+ * glink_scheduler_tx() - Transmit operation by the scheduler
+ * @ctx: Channel which is scheduled for transmission.
+ * @xprt_ctx: Transport context in which the transmission is performed.
+ *
+ * This function is called by the scheduler after scheduling a channel for
+ * transmission over the transport.
+ *
+ * Return: return value as returned by the transport on success,
+ * standard Linux error codes on failure.
+ */
+static int glink_scheduler_tx(struct channel_ctx *ctx,
+ struct glink_core_xprt_ctx *xprt_ctx)
+{
+ unsigned long flags;
+ struct glink_core_tx_pkt *tx_info;
+ size_t txd_len = 0;
+ size_t tx_len = 0;
+ uint32_t num_pkts = 0;
+ int ret;
+
+ spin_lock_irqsave(&ctx->tx_lists_lock_lhc3, flags);
+ while (txd_len < xprt_ctx->mtu &&
+ !list_empty(&ctx->tx_active)) {
+ tx_info = list_first_entry(&ctx->tx_active,
+ struct glink_core_tx_pkt, list_node);
+ rwref_get(&tx_info->pkt_ref);
+
+ spin_lock(&ctx->tx_pending_rmt_done_lock_lhc4);
+ if (list_empty(&tx_info->list_done))
+ list_add(&tx_info->list_done,
+ &ctx->tx_pending_remote_done);
+ spin_unlock(&ctx->tx_pending_rmt_done_lock_lhc4);
+ spin_unlock_irqrestore(&ctx->tx_lists_lock_lhc3, flags);
+
+ if (unlikely(tx_info->tracer_pkt)) {
+ tracer_pkt_log_event((void *)(tx_info->data),
+ GLINK_SCHEDULER_TX);
+ ret = xprt_ctx->ops->tx_cmd_tracer_pkt(xprt_ctx->ops,
+ ctx->lcid, tx_info);
+ } else {
+ tx_len = tx_info->size_remaining <
+ (xprt_ctx->mtu - txd_len) ?
+ tx_info->size_remaining :
+ (xprt_ctx->mtu - txd_len);
+ tx_info->tx_len = tx_len;
+ ret = xprt_ctx->ops->tx(xprt_ctx->ops,
+ ctx->lcid, tx_info);
+ }
+ spin_lock_irqsave(&ctx->tx_lists_lock_lhc3, flags);
+ if (ret == -EAGAIN) {
+ /*
+ * transport unable to send at the moment and will call
+ * tx_resume() when it can send again.
+ */
+ rwref_put(&tx_info->pkt_ref);
+ break;
+ } else if (ret < 0) {
+ /*
+ * General failure code that indicates that the
+ * transport is unable to recover. In this case, the
+ * communication failure will be detected at a higher
+ * level and a subsystem restart of the affected system
+ * will be triggered.
+ */
+ GLINK_ERR_XPRT(xprt_ctx,
+ "%s: unrecoverable xprt failure %d\n",
+ __func__, ret);
+ rwref_put(&tx_info->pkt_ref);
+ break;
+ } else if (!ret && tx_info->size_remaining) {
+ /*
+ * Transport unable to send any data on this channel.
+ * Break out of the loop so that the scheduler can
+ * continue with the next channel.
+ */
+ break;
+ } else {
+ txd_len += tx_len;
+ }
+
+ if (!tx_info->size_remaining) {
+ num_pkts++;
+ list_del_init(&tx_info->list_node);
+ rwref_put(&tx_info->pkt_ref);
+ }
+ }
+
+ ctx->txd_len += txd_len;
+ if (txd_len) {
+ if (num_pkts >= ctx->token_count)
+ ctx->token_count = 0;
+ else if (num_pkts)
+ ctx->token_count -= num_pkts;
+ else
+ ctx->token_count--;
+ }
+ spin_unlock_irqrestore(&ctx->tx_lists_lock_lhc3, flags);
+
+ return ret;
+}
+
+/**
+ * tx_work_func() - Transmit worker
+ * @work: Linux work structure
+ */
+static void tx_work_func(struct work_struct *work)
+{
+ struct glink_core_xprt_ctx *xprt_ptr =
+ container_of(work, struct glink_core_xprt_ctx, tx_work);
+ struct channel_ctx *ch_ptr;
+ uint32_t prio;
+ uint32_t tx_ready_head_prio;
+ int ret;
+ struct channel_ctx *tx_ready_head = NULL;
+ bool transmitted_successfully = true;
+ unsigned long flags;
+
+ GLINK_PERF("%s: worker starting\n", __func__);
+
+ while (1) {
+ prio = xprt_ptr->num_priority - 1;
+ spin_lock_irqsave(&xprt_ptr->tx_ready_lock_lhb2, flags);
+ while (list_empty(&xprt_ptr->prio_bin[prio].tx_ready)) {
+ if (prio == 0) {
+ spin_unlock_irqrestore(
+ &xprt_ptr->tx_ready_lock_lhb2, flags);
+ return;
+ }
+ prio--;
+ }
+ glink_pm_qos_vote(xprt_ptr);
+ ch_ptr = list_first_entry(&xprt_ptr->prio_bin[prio].tx_ready,
+ struct channel_ctx, tx_ready_list_node);
+ spin_unlock_irqrestore(&xprt_ptr->tx_ready_lock_lhb2, flags);
+
+ if (tx_ready_head == NULL || tx_ready_head_prio < prio) {
+ tx_ready_head = ch_ptr;
+ tx_ready_head_prio = prio;
+ }
+
+ if (ch_ptr == tx_ready_head && !transmitted_successfully) {
+ GLINK_ERR_XPRT(xprt_ptr,
+ "%s: Unable to send data on this transport.\n",
+ __func__);
+ break;
+ }
+ transmitted_successfully = false;
+
+ ret = glink_scheduler_tx(ch_ptr, xprt_ptr);
+ if (ret == -EAGAIN) {
+ /*
+ * transport unable to send at the moment and will call
+ * tx_resume() when it can send again.
+ */
+ break;
+ } else if (ret < 0) {
+ /*
+ * General failure code that indicates that the
+ * transport is unable to recover. In this case, the
+ * communication failure will be detected at a higher
+ * level and a subsystem restart of the affected system
+ * will be triggered.
+ */
+ GLINK_ERR_XPRT(xprt_ptr,
+ "%s: unrecoverable xprt failure %d\n",
+ __func__, ret);
+ break;
+ } else if (!ret) {
+ /*
+ * Transport unable to send any data on this channel,
+ * but didn't return an error. Move to the next channel
+ * and continue.
+ */
+ spin_lock_irqsave(&xprt_ptr->tx_ready_lock_lhb2, flags);
+ list_rotate_left(&xprt_ptr->prio_bin[prio].tx_ready);
+ spin_unlock_irqrestore(&xprt_ptr->tx_ready_lock_lhb2,
+ flags);
+ continue;
+ }
+
+ spin_lock_irqsave(&xprt_ptr->tx_ready_lock_lhb2, flags);
+ spin_lock(&ch_ptr->tx_lists_lock_lhc3);
+
+ glink_scheduler_eval_prio(ch_ptr, xprt_ptr);
+ if (list_empty(&ch_ptr->tx_active)) {
+ list_del_init(&ch_ptr->tx_ready_list_node);
+ glink_qos_done_ch_tx(ch_ptr);
+ }
+
+ spin_unlock(&ch_ptr->tx_lists_lock_lhc3);
+ spin_unlock_irqrestore(&xprt_ptr->tx_ready_lock_lhb2, flags);
+
+ tx_ready_head = NULL;
+ transmitted_successfully = true;
+ }
+ glink_pm_qos_unvote(xprt_ptr);
+ GLINK_PERF("%s: worker exiting\n", __func__);
+}
+
+static void glink_core_tx_resume(struct glink_transport_if *if_ptr)
+{
+ queue_work(if_ptr->glink_core_priv->tx_wq,
+ &if_ptr->glink_core_priv->tx_work);
+}
+
+/**
+ * glink_pm_qos_vote() - Add Power Management QoS Vote
+ * @xprt_ptr: Transport for power vote
+ *
+ * Note - must be called with tx_ready_lock_lhb2 locked.
+ */
+static void glink_pm_qos_vote(struct glink_core_xprt_ctx *xprt_ptr)
+{
+ if (glink_pm_qos && !xprt_ptr->qos_req_active) {
+ GLINK_PERF("%s: qos vote %u us\n", __func__, glink_pm_qos);
+ pm_qos_update_request(&xprt_ptr->pm_qos_req, glink_pm_qos);
+ xprt_ptr->qos_req_active = true;
+ }
+ xprt_ptr->tx_path_activity = true;
+}
+
+/**
+ * glink_pm_qos_unvote() - Schedule Power Management QoS Vote Removal
+ * @xprt_ptr: Transport for power vote removal
+ *
+ * Note - must be called with tx_ready_lock_lhb2 locked.
+ */
+static void glink_pm_qos_unvote(struct glink_core_xprt_ctx *xprt_ptr)
+{
+ xprt_ptr->tx_path_activity = false;
+ if (xprt_ptr->qos_req_active) {
+ GLINK_PERF("%s: qos unvote\n", __func__);
+ schedule_delayed_work(&xprt_ptr->pm_qos_work,
+ msecs_to_jiffies(GLINK_PM_QOS_HOLDOFF_MS));
+ }
+}
+
+/**
+ * glink_pm_qos_cancel_worker() - Remove Power Management QoS Vote
+ * @work: Delayed work structure
+ *
+ * Removes PM QoS vote if no additional transmit activity has occurred between
+ * the unvote and when this worker runs.
+ */
+static void glink_pm_qos_cancel_worker(struct work_struct *work)
+{
+ struct glink_core_xprt_ctx *xprt_ptr;
+ unsigned long flags;
+
+ xprt_ptr = container_of(to_delayed_work(work),
+ struct glink_core_xprt_ctx, pm_qos_work);
+
+ spin_lock_irqsave(&xprt_ptr->tx_ready_lock_lhb2, flags);
+ if (!xprt_ptr->tx_path_activity) {
+ /* no more tx activity */
+ GLINK_PERF("%s: qos off\n", __func__);
+ pm_qos_update_request(&xprt_ptr->pm_qos_req,
+ PM_QOS_DEFAULT_VALUE);
+ xprt_ptr->qos_req_active = false;
+ }
+ xprt_ptr->tx_path_activity = false;
+ spin_unlock_irqrestore(&xprt_ptr->tx_ready_lock_lhb2, flags);
+}
+
+/**
+ * glink_core_rx_cmd_remote_sigs() - Receive remote channel signal command
+ *
+ * if_ptr: Pointer to transport instance
+ * rcid: Remote Channel ID
+ */
+static void glink_core_rx_cmd_remote_sigs(struct glink_transport_if *if_ptr,
+ uint32_t rcid, uint32_t sigs)
+{
+ struct channel_ctx *ctx;
+ uint32_t old_sigs;
+
+ ctx = xprt_rcid_to_ch_ctx_get(if_ptr->glink_core_priv, rcid);
+ if (!ctx) {
+ /* unknown LCID received - this shouldn't happen */
+ GLINK_ERR_XPRT(if_ptr->glink_core_priv,
+ "%s: invalid rcid %u received\n", __func__,
+ (unsigned)rcid);
+ return;
+ }
+
+ if (!ch_is_fully_opened(ctx)) {
+ GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
+ __func__);
+ rwref_put(&ctx->ch_state_lhc0);
+ return;
+ }
+
+ old_sigs = ctx->rsigs;
+ ctx->rsigs = sigs;
+ if (ctx->notify_rx_sigs) {
+ ctx->notify_rx_sigs(ctx, ctx->user_priv, old_sigs, ctx->rsigs);
+ GLINK_INFO_CH(ctx, "%s: notify rx sigs old:0x%x new:0x%x\n",
+ __func__, old_sigs, ctx->rsigs);
+ }
+ rwref_put(&ctx->ch_state_lhc0);
+}
+
+static struct glink_core_if core_impl = {
+ .link_up = glink_core_link_up,
+ .link_down = glink_core_link_down,
+ .rx_cmd_version = glink_core_rx_cmd_version,
+ .rx_cmd_version_ack = glink_core_rx_cmd_version_ack,
+ .rx_cmd_ch_remote_open = glink_core_rx_cmd_ch_remote_open,
+ .rx_cmd_ch_open_ack = glink_core_rx_cmd_ch_open_ack,
+ .rx_cmd_ch_remote_close = glink_core_rx_cmd_ch_remote_close,
+ .rx_cmd_ch_close_ack = glink_core_rx_cmd_ch_close_ack,
+ .rx_get_pkt_ctx = glink_core_rx_get_pkt_ctx,
+ .rx_put_pkt_ctx = glink_core_rx_put_pkt_ctx,
+ .rx_cmd_remote_rx_intent_put = glink_core_remote_rx_intent_put,
+ .rx_cmd_remote_rx_intent_req = glink_core_rx_cmd_remote_rx_intent_req,
+ .rx_cmd_rx_intent_req_ack = glink_core_rx_cmd_rx_intent_req_ack,
+ .rx_cmd_tx_done = glink_core_rx_cmd_tx_done,
+ .tx_resume = glink_core_tx_resume,
+ .rx_cmd_remote_sigs = glink_core_rx_cmd_remote_sigs,
+};
+
+/**
+ * glink_xprt_ctx_iterator_init() - Initializes the transport context list iterator
+ * @xprt_i: pointer to the transport context iterator.
+ *
+ * This function acquires the transport context lock which must then be
+ * released by glink_xprt_ctx_iterator_end()
+ */
+void glink_xprt_ctx_iterator_init(struct xprt_ctx_iterator *xprt_i)
+{
+ if (xprt_i == NULL)
+ return;
+
+ mutex_lock(&transport_list_lock_lha0);
+ xprt_i->xprt_list = &transport_list;
+ xprt_i->i_curr = list_entry(&transport_list,
+ struct glink_core_xprt_ctx, list_node);
+}
+EXPORT_SYMBOL(glink_xprt_ctx_iterator_init);
+
+/**
+ * glink_xprt_ctx_iterator_end() - Ends the transport context list iteration
+ * @xprt_i: pointer to the transport context iterator.
+ */
+void glink_xprt_ctx_iterator_end(struct xprt_ctx_iterator *xprt_i)
+{
+ if (xprt_i == NULL)
+ return;
+
+ xprt_i->xprt_list = NULL;
+ xprt_i->i_curr = NULL;
+ mutex_unlock(&transport_list_lock_lha0);
+}
+EXPORT_SYMBOL(glink_xprt_ctx_iterator_end);
+
+/**
+ * glink_xprt_ctx_iterator_next() - iterates element by element in transport context list
+ * @xprt_i: pointer to the transport context iterator.
+ *
+ * Return: pointer to the transport context structure
+ */
+struct glink_core_xprt_ctx *glink_xprt_ctx_iterator_next(
+ struct xprt_ctx_iterator *xprt_i)
+{
+ struct glink_core_xprt_ctx *xprt_ctx = NULL;
+
+ if (xprt_i == NULL)
+ return xprt_ctx;
+
+ if (list_empty(xprt_i->xprt_list))
+ return xprt_ctx;
+
+ list_for_each_entry_continue(xprt_i->i_curr,
+ xprt_i->xprt_list, list_node) {
+ xprt_ctx = xprt_i->i_curr;
+ break;
+ }
+ return xprt_ctx;
+}
+EXPORT_SYMBOL(glink_xprt_ctx_iterator_next);
+
+/**
+ * glink_get_xprt_name() - get the transport name
+ * @xprt_ctx: pointer to the transport context.
+ *
+ * Return: name of the transport
+ */
+char *glink_get_xprt_name(struct glink_core_xprt_ctx *xprt_ctx)
+{
+ if (xprt_ctx == NULL)
+ return NULL;
+
+ return xprt_ctx->name;
+}
+EXPORT_SYMBOL(glink_get_xprt_name);
+
+/**
+ * glink_get_xprt_name() - get the name of the remote processor/edge
+ * of the transport
+ * @xprt_ctx: pointer to the transport context.
+ *
+ * Return: Name of the remote processor/edge
+ */
+char *glink_get_xprt_edge_name(struct glink_core_xprt_ctx *xprt_ctx)
+{
+ if (xprt_ctx == NULL)
+ return NULL;
+ return xprt_ctx->edge;
+}
+EXPORT_SYMBOL(glink_get_xprt_edge_name);
+
+/**
+ * glink_get_xprt_state() - get the state of the transport
+ * @xprt_ctx: pointer to the transport context.
+ *
+ * Return: Name of the transport state, NULL in case of invalid input
+ */
+const char *glink_get_xprt_state(struct glink_core_xprt_ctx *xprt_ctx)
+{
+ if (xprt_ctx == NULL)
+ return NULL;
+
+ return glink_get_xprt_state_string(xprt_ctx->local_state);
+}
+EXPORT_SYMBOL(glink_get_xprt_state);
+
+/**
+ * glink_get_xprt_version_features() - get the version and feature set
+ * of local transport in glink
+ * @xprt_ctx: pointer to the transport context.
+ *
+ * Return: pointer to the glink_core_version
+ */
+const struct glink_core_version *glink_get_xprt_version_features(
+ struct glink_core_xprt_ctx *xprt_ctx)
+{
+ const struct glink_core_version *ver = NULL;
+ if (xprt_ctx == NULL)
+ return ver;
+
+ ver = &xprt_ctx->versions[xprt_ctx->local_version_idx];
+ return ver;
+}
+EXPORT_SYMBOL(glink_get_xprt_version_features);
+
+/**
+ * glink_ch_ctx_iterator_init() - Initializes the channel context list iterator
+ * @ch_iter: pointer to the channel context iterator.
+ * xprt: pointer to the transport context that holds the channel list
+ *
+ * This function acquires the channel context lock which must then be
+ * released by glink_ch_ctx_iterator_end()
+ */
+void glink_ch_ctx_iterator_init(struct ch_ctx_iterator *ch_iter,
+ struct glink_core_xprt_ctx *xprt)
+{
+ unsigned long flags;
+
+ if (ch_iter == NULL || xprt == NULL)
+ return;
+
+ spin_lock_irqsave(&xprt->xprt_ctx_lock_lhb1, flags);
+ ch_iter->ch_list = &(xprt->channels);
+ ch_iter->i_curr = list_entry(&(xprt->channels),
+ struct channel_ctx, port_list_node);
+ ch_iter->ch_list_flags = flags;
+}
+EXPORT_SYMBOL(glink_ch_ctx_iterator_init);
+
+/**
+ * glink_ch_ctx_iterator_end() - Ends the channel context list iteration
+ * @ch_iter: pointer to the channel context iterator.
+ */
+void glink_ch_ctx_iterator_end(struct ch_ctx_iterator *ch_iter,
+ struct glink_core_xprt_ctx *xprt)
+{
+ if (ch_iter == NULL || xprt == NULL)
+ return;
+
+ spin_unlock_irqrestore(&xprt->xprt_ctx_lock_lhb1,
+ ch_iter->ch_list_flags);
+ ch_iter->ch_list = NULL;
+ ch_iter->i_curr = NULL;
+}
+EXPORT_SYMBOL(glink_ch_ctx_iterator_end);
+
+/**
+ * glink_ch_ctx_iterator_next() - iterates element by element in channel context list
+ * @c_i: pointer to the channel context iterator.
+ *
+ * Return: pointer to the channel context structure
+ */
+struct channel_ctx *glink_ch_ctx_iterator_next(struct ch_ctx_iterator *c_i)
+{
+ struct channel_ctx *ch_ctx = NULL;
+
+ if (c_i == NULL)
+ return ch_ctx;
+
+ if (list_empty(c_i->ch_list))
+ return ch_ctx;
+
+ list_for_each_entry_continue(c_i->i_curr,
+ c_i->ch_list, port_list_node) {
+ ch_ctx = c_i->i_curr;
+ break;
+ }
+ return ch_ctx;
+}
+EXPORT_SYMBOL(glink_ch_ctx_iterator_next);
+
+/**
+ * glink_get_ch_name() - get the channel name
+ * @ch_ctx: pointer to the channel context.
+ *
+ * Return: name of the channel, NULL in case of invalid input
+ */
+char *glink_get_ch_name(struct channel_ctx *ch_ctx)
+{
+ if (ch_ctx == NULL)
+ return NULL;
+
+ return ch_ctx->name;
+}
+EXPORT_SYMBOL(glink_get_ch_name);
+
+/**
+ * glink_get_ch_edge_name() - get the edge on whcih channel is created
+ * @ch_ctx: pointer to the channel context.
+ *
+ * Return: name of the edge, NULL in case of invalid input
+ */
+char *glink_get_ch_edge_name(struct channel_ctx *ch_ctx)
+{
+ if (ch_ctx == NULL)
+ return NULL;
+
+ return ch_ctx->transport_ptr->edge;
+}
+EXPORT_SYMBOL(glink_get_ch_edge_name);
+
+/**
+ * glink_get_ch_lcid() - get the local channel ID
+ * @c_i: pointer to the channel context.
+ *
+ * Return: local channel id, -EINVAL in case of invalid input
+ */
+int glink_get_ch_lcid(struct channel_ctx *ch_ctx)
+{
+ if (ch_ctx == NULL)
+ return -EINVAL;
+
+ return ch_ctx->lcid;
+}
+EXPORT_SYMBOL(glink_get_ch_lcid);
+
+/**
+ * glink_get_ch_rcid() - get the remote channel ID
+ * @ch_ctx: pointer to the channel context.
+ *
+ * Return: remote channel id, -EINVAL in case of invalid input
+ */
+int glink_get_ch_rcid(struct channel_ctx *ch_ctx)
+{
+ if (ch_ctx == NULL)
+ return -EINVAL;
+
+ return ch_ctx->rcid;
+}
+EXPORT_SYMBOL(glink_get_ch_rcid);
+
+/**
+ * glink_get_ch_lstate() - get the local channel state
+ * @ch_ctx: pointer to the channel context.
+ *
+ * Return: Name of the local channel state, NUll in case of invalid input
+ */
+const char *glink_get_ch_lstate(struct channel_ctx *ch_ctx)
+{
+ if (ch_ctx == NULL)
+ return NULL;
+
+ return glink_get_ch_state_string(ch_ctx->local_open_state);
+}
+EXPORT_SYMBOL(glink_get_ch_lstate);
+
+/**
+ * glink_get_ch_rstate() - get the remote channel state
+ * @ch_ctx: pointer to the channel context.
+ *
+ * Return: true if remote side is opened false otherwise
+ */
+bool glink_get_ch_rstate(struct channel_ctx *ch_ctx)
+{
+ if (ch_ctx == NULL)
+ return NULL;
+
+ return ch_ctx->remote_opened;
+}
+EXPORT_SYMBOL(glink_get_ch_rstate);
+
+/**
+ * glink_get_ch_xprt_name() - get the name of the transport to which
+ * the channel belongs
+ * @ch_ctx: pointer to the channel context.
+ *
+ * Return: name of the export, NULL in case of invalid input
+ */
+char *glink_get_ch_xprt_name(struct channel_ctx *ch_ctx)
+{
+ if (ch_ctx == NULL)
+ return NULL;
+
+ return ch_ctx->transport_ptr->name;
+}
+EXPORT_SYMBOL(glink_get_ch_xprt_name);
+
+/**
+ * glink_get_tx_pkt_count() - get the total number of packets sent
+ * through this channel
+ * @ch_ctx: pointer to the channel context.
+ *
+ * Return: number of packets transmitted, -EINVAL in case of invalid input
+ */
+int glink_get_ch_tx_pkt_count(struct channel_ctx *ch_ctx)
+{
+ if (ch_ctx == NULL)
+ return -EINVAL;
+
+ /* FUTURE: packet stats not yet implemented */
+
+ return -ENOSYS;
+}
+EXPORT_SYMBOL(glink_get_ch_tx_pkt_count);
+
+/**
+ * glink_get_ch_rx_pkt_count() - get the total number of packets
+ * recieved at this channel
+ * @ch_ctx: pointer to the channel context.
+ *
+ * Return: number of packets recieved, -EINVAL in case of invalid input
+ */
+int glink_get_ch_rx_pkt_count(struct channel_ctx *ch_ctx)
+{
+ if (ch_ctx == NULL)
+ return -EINVAL;
+
+ /* FUTURE: packet stats not yet implemented */
+
+ return -ENOSYS;
+}
+EXPORT_SYMBOL(glink_get_ch_rx_pkt_count);
+
+/**
+ * glink_get_ch_lintents_queued() - get the total number of intents queued
+ * at local side
+ * @ch_ctx: pointer to the channel context.
+ *
+ * Return: number of intents queued, -EINVAL in case of invalid input
+ */
+int glink_get_ch_lintents_queued(struct channel_ctx *ch_ctx)
+{
+ struct glink_core_rx_intent *intent;
+ int ilrx_count = 0;
+
+ if (ch_ctx == NULL)
+ return -EINVAL;
+
+ list_for_each_entry(intent, &ch_ctx->local_rx_intent_list, list)
+ ilrx_count++;
+
+ return ilrx_count;
+}
+EXPORT_SYMBOL(glink_get_ch_lintents_queued);
+
+/**
+ * glink_get_ch_rintents_queued() - get the total number of intents queued
+ * from remote side
+ * @ch_ctx: pointer to the channel context.
+ *
+ * Return: number of intents queued, -EINVAL in case of invalid input
+ */
+int glink_get_ch_rintents_queued(struct channel_ctx *ch_ctx)
+{
+ struct glink_core_rx_intent *intent;
+ int irrx_count = 0;
+
+ if (ch_ctx == NULL)
+ return -EINVAL;
+
+ list_for_each_entry(intent, &ch_ctx->rmt_rx_intent_list, list)
+ irrx_count++;
+
+ return irrx_count;
+}
+EXPORT_SYMBOL(glink_get_ch_rintents_queued);
+
+/**
+ * glink_get_ch_intent_info() - get the intent details of a channel
+ * @ch_ctx: pointer to the channel context.
+ * ch_ctx_i: pointer to a structure that will contain intent details
+ *
+ * This function is used to get all the channel intent details including locks.
+ */
+void glink_get_ch_intent_info(struct channel_ctx *ch_ctx,
+ struct glink_ch_intent_info *ch_ctx_i)
+{
+ if (ch_ctx == NULL || ch_ctx_i == NULL)
+ return;
+
+ ch_ctx_i->li_lst_lock = &ch_ctx->local_rx_intent_lst_lock_lhc1;
+ ch_ctx_i->li_avail_list = &ch_ctx->local_rx_intent_list;
+ ch_ctx_i->li_used_list = &ch_ctx->local_rx_intent_ntfy_list;
+ ch_ctx_i->ri_lst_lock = &ch_ctx->rmt_rx_intent_lst_lock_lhc2;
+ ch_ctx_i->ri_list = &ch_ctx->rmt_rx_intent_list;
+}
+EXPORT_SYMBOL(glink_get_ch_intent_info);
+
+/**
+ * glink_get_debug_mask() - Return debug mask attribute
+ *
+ * Return: debug mask attribute
+ */
+unsigned glink_get_debug_mask(void)
+{
+ return glink_debug_mask;
+}
+EXPORT_SYMBOL(glink_get_debug_mask);
+
+/**
+ * glink_get_log_ctx() - Return log context for other GLINK modules.
+ *
+ * Return: Log context or NULL if none.
+ */
+void *glink_get_log_ctx(void)
+{
+ return log_ctx;
+}
+EXPORT_SYMBOL(glink_get_log_ctx);
+
+static int glink_init(void)
+{
+ log_ctx = ipc_log_context_create(NUM_LOG_PAGES, "glink", 0);
+ if (!log_ctx)
+ GLINK_ERR("%s: unable to create log context\n", __func__);
+ glink_debugfs_init();
+
+ return 0;
+}
+arch_initcall(glink_init);
+
+MODULE_DESCRIPTION("MSM Generic Link (G-Link) Transport");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/glink_core_if.h b/drivers/soc/qcom/glink_core_if.h
new file mode 100644
index 000000000000..93c59d9c4aa1
--- /dev/null
+++ b/drivers/soc/qcom/glink_core_if.h
@@ -0,0 +1,213 @@
+/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef _SOC_QCOM_GLINK_CORE_IF_H_
+#define _SOC_QCOM_GLINK_CORE_IF_H_
+
+#include <linux/of.h>
+#include <linux/types.h>
+#include "glink_private.h"
+
+/* Local Channel state */
+enum local_channel_state_e {
+ GLINK_CHANNEL_CLOSED = 0,
+ GLINK_CHANNEL_OPENING,
+ GLINK_CHANNEL_OPENED,
+ GLINK_CHANNEL_CLOSING,
+};
+
+/* Transport Negotiation State */
+enum transport_state_e {
+ GLINK_XPRT_DOWN,
+ GLINK_XPRT_NEGOTIATING,
+ GLINK_XPRT_OPENED,
+ GLINK_XPRT_FAILED,
+};
+
+struct channel_ctx;
+struct glink_core_xprt_ctx;
+struct glink_transport_if;
+struct glink_core_version;
+
+/**
+ * struct glink_core_version - Individual version element
+ *
+ * version: supported version
+ * features: all supported features for version
+ */
+struct glink_core_version {
+ uint32_t version;
+ uint32_t features;
+
+ uint32_t (*negotiate_features)(struct glink_transport_if *if_ptr,
+ const struct glink_core_version *version_ptr,
+ uint32_t features);
+};
+
+/**
+ * RX intent
+ *
+ * data: pointer to the data (may be NULL for zero-copy)
+ * id: remote or local intent ID
+ * pkt_size: total size of packet
+ * write_offset: next write offset (initially 0)
+ * intent_size: size of the original intent (do not modify)
+ * tracer_pkt: Flag to indicate if the data is a tracer packet
+ * iovec: Pointer to vector buffer if the transport passes a vector buffer
+ * vprovider: Virtual address-space buffer provider for a vector buffer
+ * pprovider: Physical address-space buffer provider for a vector buffer
+ * pkt_priv: G-Link core owned packet-private data
+ * list: G-Link core owned list node
+ * bounce_buf: Pointer to the temporary/internal bounce buffer
+ */
+struct glink_core_rx_intent {
+ void *data;
+ uint32_t id;
+ size_t pkt_size;
+ size_t write_offset;
+ size_t intent_size;
+ bool tracer_pkt;
+ void *iovec;
+ void * (*vprovider)(void *iovec, size_t offset, size_t *size);
+ void * (*pprovider)(void *iovec, size_t offset, size_t *size);
+
+ /* G-Link-Core-owned elements - please ignore */
+ struct list_head list;
+ const void *pkt_priv;
+ void *bounce_buf;
+};
+
+/**
+ * struct glink_core_flow_info - Flow specific Information
+ * @mtu_tx_time_us: Time to transmit an MTU in microseconds.
+ * @power_state: Power state associated with the traffic flow.
+ */
+struct glink_core_flow_info {
+ unsigned long mtu_tx_time_us;
+ uint32_t power_state;
+};
+
+/**
+ * struct glink_core_transport_cfg - configuration of a new transport
+ * @name: Name of the transport.
+ * @edge: Subsystem the transport connects to.
+ * @versions: Array of transport versions supported.
+ * @versions_entries: Number of entries in @versions.
+ * @max_cid: Maximum number of channel identifiers supported.
+ * @max_iid: Maximum number of intent identifiers supported.
+ * @mtu: MTU supported by this transport.
+ * @num_flows: Number of traffic flows/priority buckets.
+ * @flow_info: Information about each flow/priority.
+ * @token_count: Number of tokens per assignment.
+ */
+struct glink_core_transport_cfg {
+ const char *name;
+ const char *edge;
+ const struct glink_core_version *versions;
+ size_t versions_entries;
+ uint32_t max_cid;
+ uint32_t max_iid;
+
+ size_t mtu;
+ uint32_t num_flows;
+ struct glink_core_flow_info *flow_info;
+ uint32_t token_count;
+};
+
+struct glink_core_if {
+ /* Negotiation */
+ void (*link_up)(struct glink_transport_if *if_ptr);
+ void (*link_down)(struct glink_transport_if *if_ptr);
+ void (*rx_cmd_version)(struct glink_transport_if *if_ptr,
+ uint32_t version,
+ uint32_t features);
+ void (*rx_cmd_version_ack)(struct glink_transport_if *if_ptr,
+ uint32_t version,
+ uint32_t features);
+
+ /* channel management */
+ void (*rx_cmd_ch_remote_open)(struct glink_transport_if *if_ptr,
+ uint32_t rcid, const char *name, uint16_t req_xprt);
+ void (*rx_cmd_ch_open_ack)(struct glink_transport_if *if_ptr,
+ uint32_t lcid, uint16_t xprt_resp);
+ void (*rx_cmd_ch_remote_close)(struct glink_transport_if *if_ptr,
+ uint32_t rcid);
+ void (*rx_cmd_ch_close_ack)(struct glink_transport_if *if_ptr,
+ uint32_t lcid);
+
+ /* channel data */
+ struct glink_core_rx_intent * (*rx_get_pkt_ctx)(
+ struct glink_transport_if *if_ptr,
+ uint32_t rcid, uint32_t liid);
+ void (*rx_put_pkt_ctx)(struct glink_transport_if *if_ptr, uint32_t rcid,
+ struct glink_core_rx_intent *intent_ptr, bool complete);
+ void (*rx_cmd_remote_rx_intent_put)(struct glink_transport_if *if_ptr,
+ uint32_t rcid, uint32_t riid, size_t size);
+ void (*rx_cmd_tx_done)(struct glink_transport_if *if_ptr, uint32_t rcid,
+ uint32_t riid, bool reuse);
+ void (*rx_cmd_remote_rx_intent_req)(struct glink_transport_if *if_ptr,
+ uint32_t rcid, size_t size);
+ void (*rx_cmd_rx_intent_req_ack)(struct glink_transport_if *if_ptr,
+ uint32_t rcid, bool granted);
+ void (*rx_cmd_remote_sigs)(struct glink_transport_if *if_ptr,
+ uint32_t rcid, uint32_t sigs);
+
+ /* channel scheduling */
+ void (*tx_resume)(struct glink_transport_if *if_ptr);
+};
+
+int glink_core_register_transport(struct glink_transport_if *if_ptr,
+ struct glink_core_transport_cfg *cfg);
+
+void glink_core_unregister_transport(struct glink_transport_if *if_ptr);
+
+/**
+ * of_get_glink_core_qos_cfg() - Parse the qos related dt entries
+ * @phandle: The handle to the qos related node in DT.
+ * @cfg: The transport configuration to be filled.
+ *
+ * Return: 0 on Success, standard Linux error otherwise.
+ */
+int of_get_glink_core_qos_cfg(struct device_node *phandle,
+ struct glink_core_transport_cfg *cfg);
+
+/**
+ * rx_linear_vbuf_provider() - Virtual Buffer Provider for linear buffers
+ * iovec: Pointer to the beginning of the linear buffer.
+ * offset: Offset into the buffer whose address is needed.
+ * size: Pointer to hold the length of the contiguous buffer space.
+ *
+ * This function is used when a linear buffer is received while the client has
+ * registered to receive vector buffers.
+ *
+ * Return: Address of the buffer which is at offset "offset" from the beginning
+ * of the buffer.
+ */
+static inline void *rx_linear_vbuf_provider(void *iovec, size_t offset,
+ size_t *size)
+{
+ struct glink_core_rx_intent *rx_info =
+ (struct glink_core_rx_intent *)iovec;
+
+ if (unlikely(!iovec || !size))
+ return NULL;
+
+ if (unlikely(offset >= rx_info->pkt_size))
+ return NULL;
+
+ if (unlikely(OVERFLOW_ADD_UNSIGNED(void *, rx_info->data, offset)))
+ return NULL;
+
+ *size = rx_info->pkt_size - offset;
+ return rx_info->data + offset;
+}
+
+#endif /* _SOC_QCOM_GLINK_CORE_IF_H_ */
diff --git a/drivers/soc/qcom/glink_debugfs.c b/drivers/soc/qcom/glink_debugfs.c
new file mode 100644
index 000000000000..8e65e4ac9b8e
--- /dev/null
+++ b/drivers/soc/qcom/glink_debugfs.c
@@ -0,0 +1,783 @@
+/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/debugfs.h>
+#include <linux/err.h>
+#include <linux/ipc_logging.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <soc/qcom/glink.h>
+#include "glink_private.h"
+#include "glink_core_if.h"
+
+
+static const char * const ss_string[] = {
+ [GLINK_DBGFS_MPSS] = "mpss",
+ [GLINK_DBGFS_APSS] = "apss",
+ [GLINK_DBGFS_LPASS] = "lpass",
+ [GLINK_DBGFS_DSPS] = "dsps",
+ [GLINK_DBGFS_RPM] = "rpm",
+ [GLINK_DBGFS_WCNSS] = "wcnss",
+ [GLINK_DBGFS_LLOOP] = "lloop",
+ [GLINK_DBGFS_MOCK] = "mock"
+};
+
+static const char * const xprt_string[] = {
+ [GLINK_DBGFS_SMEM] = "smem",
+ [GLINK_DBGFS_SMD] = "smd",
+ [GLINK_DBGFS_XLLOOP] = "lloop",
+ [GLINK_DBGFS_XMOCK] = "mock",
+ [GLINK_DBGFS_XMOCK_LOW] = "mock_low",
+ [GLINK_DBGFS_XMOCK_HIGH] = "mock_high"
+};
+
+static const char * const ch_st_string[] = {
+ [GLINK_CHANNEL_CLOSED] = "CLOSED",
+ [GLINK_CHANNEL_OPENING] = "OPENING",
+ [GLINK_CHANNEL_OPENED] = "OPENED",
+ [GLINK_CHANNEL_CLOSING] = "CLOSING",
+};
+
+static const char * const xprt_st_string[] = {
+ [GLINK_XPRT_DOWN] = "DOWN",
+ [GLINK_XPRT_NEGOTIATING] = "NEGOT",
+ [GLINK_XPRT_OPENED] = "OPENED",
+ [GLINK_XPRT_FAILED] = "FAILED"
+};
+
+#if defined(CONFIG_DEBUG_FS)
+#define GLINK_DBGFS_NAME_SIZE (2 * GLINK_NAME_SIZE + 1)
+
+struct glink_dbgfs_dent {
+ struct list_head list_node;
+ char par_name[GLINK_DBGFS_NAME_SIZE];
+ char self_name[GLINK_DBGFS_NAME_SIZE];
+ struct dentry *parent;
+ struct dentry *self;
+ spinlock_t file_list_lock_lhb0;
+ struct list_head file_list;
+};
+
+static struct dentry *dent;
+static LIST_HEAD(dent_list);
+static DEFINE_MUTEX(dent_list_lock_lha0);
+
+static int debugfs_show(struct seq_file *s, void *data)
+{
+ struct glink_dbgfs_data *dfs_d;
+ dfs_d = s->private;
+ dfs_d->o_func(s);
+ return 0;
+}
+
+static int debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, debugfs_show, inode->i_private);
+}
+
+static const struct file_operations debug_ops = {
+ .open = debug_open,
+ .release = single_release,
+ .read = seq_read,
+ .llseek = seq_lseek,
+};
+#endif
+
+/**
+ * glink_get_ss_enum_string() - get the name of the subsystem based on enum value
+ * @enum_id: enum id of a specific subsystem.
+ *
+ * Return: name of the subsystem, NULL in case of invalid input
+ */
+const char *glink_get_ss_enum_string(unsigned int enum_id)
+{
+ if (enum_id >= ARRAY_SIZE(ss_string))
+ return NULL;
+
+ return ss_string[enum_id];
+}
+EXPORT_SYMBOL(glink_get_ss_enum_string);
+
+/**
+ * glink_get_xprt_enum_string() - get the name of the transport based on enum value
+ * @enum_id: enum id of a specific transport.
+ *
+ * Return: name of the transport, NULL in case of invalid input
+ */
+const char *glink_get_xprt_enum_string(unsigned int enum_id)
+{
+ if (enum_id >= ARRAY_SIZE(xprt_string))
+ return NULL;
+ return xprt_string[enum_id];
+}
+EXPORT_SYMBOL(glink_get_xprt_enum_string);
+
+/**
+ * glink_get_xprt_state_string() - get the name of the transport based on enum value
+ * @enum_id: enum id of the state of the transport.
+ *
+ * Return: name of the transport state, NULL in case of invalid input
+ */
+const char *glink_get_xprt_state_string(
+ enum transport_state_e enum_id)
+{
+ if (enum_id >= ARRAY_SIZE(xprt_st_string))
+ return NULL;
+
+ return xprt_st_string[enum_id];
+}
+EXPORT_SYMBOL(glink_get_xprt_state_string);
+
+/**
+ * glink_get_ch_state_string() - get the name of the transport based on enum value
+ * @enum_id: enum id of a specific state of the channel.
+ *
+ * Return: name of the channel state, NULL in case of invalid input
+ */
+const char *glink_get_ch_state_string(
+ enum local_channel_state_e enum_id)
+{
+ if (enum_id >= ARRAY_SIZE(ch_st_string))
+ return NULL;
+
+ return ch_st_string[enum_id];
+}
+EXPORT_SYMBOL(glink_get_ch_state_string);
+
+#if defined(CONFIG_DEBUG_FS)
+/**
+ * glink_dfs_create_file() - create the debugfs file
+ * @name: debugfs file name
+ * @parent: pointer to the parent dentry structure
+ * @show: pointer to the actual function which will be invoked upon
+ * opening this file.
+ *
+ * Return: pointer to the allocated glink_dbgfs_data structure or
+ * NULL in case of an error.
+ *
+ * This function actually create a debugfs file under the parent directory
+ */
+static struct glink_dbgfs_data *glink_dfs_create_file(const char *name,
+ struct dentry *parent, void (*show)(struct seq_file *s),
+ void *dbgfs_data, bool b_free_req)
+{
+ struct dentry *file;
+ struct glink_dbgfs_data *dfs_d;
+
+ dfs_d = kzalloc(sizeof(struct glink_dbgfs_data), GFP_KERNEL);
+ if (dfs_d == NULL)
+ return NULL;
+
+ dfs_d->o_func = show;
+ if (dbgfs_data != NULL) {
+ dfs_d->priv_data = dbgfs_data;
+ dfs_d->b_priv_free_req = b_free_req;
+ }
+ file = debugfs_create_file(name, 0400, parent, dfs_d, &debug_ops);
+ if (!file)
+ GLINK_DBG("%s: unable to create file '%s'\n", __func__,
+ name);
+ dfs_d->dent = file;
+ return dfs_d;
+}
+
+/**
+ * write_ch_intent() - write channel intent details
+ * @s: pointer to the sequential file
+ * @intent: pointer glink core intent structure
+ * @i_type: type of intent
+ * @count: serial number of the intent.
+ *
+ * This function is a helper function of glink_dfs_update_ch_intents()
+ * that prints out details of any specific intent.
+ */
+static void write_ch_intent(struct seq_file *s,
+ struct glink_core_rx_intent *intent,
+ char *i_type, unsigned int count)
+{
+ char *intent_type;
+ /*
+ * formatted, human readable channel state output, ie:
+ * TYPE |SN |ID |PKT_SIZE|W_OFFSET|INT_SIZE|
+ * --------------------------------------------------------------
+ * LOCAL_LIST|#2 |1 |0 |0 |8 |
+ */
+ if (count == 1) {
+ intent_type = i_type;
+ seq_puts(s,
+ "\n--------------------------------------------------------\n");
+ } else {
+ intent_type = "";
+ }
+ seq_printf(s, "%-20s|#%-5d|%-6u|%-10zu|%-10zu|%-10zu|\n",
+ intent_type,
+ count,
+ intent->id,
+ intent->pkt_size,
+ intent->write_offset,
+ intent->intent_size);
+}
+
+/**
+ * glink_dfs_update_ch_intent() - writes the intent details of a specific
+ * channel to the corresponding debugfs file
+ * @s: pointer to the sequential file
+ *
+ * This function extracts the intent details of a channel & prints them to
+ * corrseponding debugfs file of that channel.
+ */
+static void glink_dfs_update_ch_intent(struct seq_file *s)
+{
+ struct glink_dbgfs_data *dfs_d;
+ struct channel_ctx *ch_ctx;
+ struct glink_core_rx_intent *intent;
+ struct glink_core_rx_intent *intent_temp;
+ struct glink_ch_intent_info ch_intent_info;
+ unsigned long flags;
+ unsigned int count = 0;
+
+ dfs_d = s->private;
+ ch_ctx = dfs_d->priv_data;
+ if (ch_ctx != NULL) {
+ glink_get_ch_intent_info(ch_ctx, &ch_intent_info);
+ seq_puts(s,
+ "---------------------------------------------------------------\n");
+ seq_printf(s, "%-20s|%-6s|%-6s|%-10s|%-10s|%-10s|\n",
+ "INTENT TYPE",
+ "SN",
+ "ID",
+ "PKT_SIZE",
+ "W_OFFSET",
+ "INT_SIZE");
+ seq_puts(s,
+ "---------------------------------------------------------------\n");
+ spin_lock_irqsave(ch_intent_info.li_lst_lock, flags);
+ list_for_each_entry_safe(intent, intent_temp,
+ ch_intent_info.li_avail_list, list) {
+ count++;
+ write_ch_intent(s, intent, "LOCAL_AVAIL_LIST", count);
+ }
+
+ count = 0;
+ list_for_each_entry_safe(intent, intent_temp,
+ ch_intent_info.li_used_list, list) {
+ count++;
+ write_ch_intent(s, intent, "LOCAL_USED_LIST", count);
+ }
+ spin_unlock_irqrestore(ch_intent_info.li_lst_lock, flags);
+
+ count = 0;
+ spin_lock_irqsave(ch_intent_info.ri_lst_lock, flags);
+ list_for_each_entry_safe(intent, intent_temp,
+ ch_intent_info.ri_list, list) {
+ count++;
+ write_ch_intent(s, intent, "REMOTE_LIST", count);
+ }
+ spin_unlock_irqrestore(ch_intent_info.ri_lst_lock,
+ flags);
+ seq_puts(s,
+ "---------------------------------------------------------------\n");
+ }
+}
+
+/**
+ * glink_dfs_update_ch_stats() - writes statistics of a specific
+ * channel to the corresponding debugfs file
+ * @s: pointer to the sequential file
+ *
+ * This function extracts other statistics of a channel & prints them to
+ * corrseponding debugfs file of that channel
+ */
+static void glink_dfs_update_ch_stats(struct seq_file *s)
+{
+ /* FUTURE: add channel statistics */
+ seq_puts(s, "not yet implemented\n");
+}
+
+/**
+ * glink_debugfs_remove_channel() - remove all channel specifc files & folder in
+ * debugfs when channel is fully closed
+ * @ch_ctx: pointer to the channel_contenxt
+ * @xprt_ctx: pointer to the transport_context
+ *
+ * This function is invoked when any channel is fully closed. It removes the
+ * folders & other files in debugfs for that channel.
+ */
+void glink_debugfs_remove_channel(struct channel_ctx *ch_ctx,
+ struct glink_core_xprt_ctx *xprt_ctx){
+
+ struct glink_dbgfs ch_rm_dbgfs;
+ char *edge_name;
+ char curr_dir_name[GLINK_DBGFS_NAME_SIZE];
+ char *xprt_name;
+
+ ch_rm_dbgfs.curr_name = glink_get_ch_name(ch_ctx);
+ edge_name = glink_get_xprt_edge_name(xprt_ctx);
+ xprt_name = glink_get_xprt_name(xprt_ctx);
+ if (!xprt_name || !edge_name) {
+ GLINK_ERR("%s: Invalid xprt_name or edge_name for ch '%s'\n",
+ __func__, ch_rm_dbgfs.curr_name);
+ return;
+ }
+ snprintf(curr_dir_name, sizeof(curr_dir_name), "%s_%s",
+ edge_name, xprt_name);
+ ch_rm_dbgfs.par_name = curr_dir_name;
+ glink_debugfs_remove_recur(&ch_rm_dbgfs);
+}
+EXPORT_SYMBOL(glink_debugfs_remove_channel);
+
+/**
+ * glink_debugfs_add_channel() - create channel specifc files & folder in
+ * debugfs when channel is added
+ * @ch_ctx: pointer to the channel_contenxt
+ * @xprt_ctx: pointer to the transport_context
+ *
+ * This function is invoked when a new channel is created. It creates the
+ * folders & other files in debugfs for that channel
+ */
+void glink_debugfs_add_channel(struct channel_ctx *ch_ctx,
+ struct glink_core_xprt_ctx *xprt_ctx)
+{
+ struct glink_dbgfs ch_dbgfs;
+ char *ch_name;
+ char *edge_name;
+ char *xprt_name;
+ char curr_dir_name[GLINK_DBGFS_NAME_SIZE];
+
+ if (ch_ctx == NULL) {
+ GLINK_ERR("%s: Channel Context is NULL\n", __func__);
+ return;
+ }
+
+ ch_name = glink_get_ch_name(ch_ctx);
+ edge_name = glink_get_xprt_edge_name(xprt_ctx);
+ xprt_name = glink_get_xprt_name(xprt_ctx);
+ if (!xprt_name || !edge_name) {
+ GLINK_ERR("%s: Invalid xprt_name or edge_name for ch '%s'\n",
+ __func__, ch_name);
+ return;
+ }
+ snprintf(curr_dir_name, sizeof(curr_dir_name), "%s_%s",
+ edge_name, xprt_name);
+
+ ch_dbgfs.curr_name = curr_dir_name;
+ ch_dbgfs.par_name = "channel";
+ ch_dbgfs.b_dir_create = true;
+ glink_debugfs_create(ch_name, NULL, &ch_dbgfs, NULL, false);
+
+ ch_dbgfs.par_name = ch_dbgfs.curr_name;
+ ch_dbgfs.curr_name = ch_name;
+ ch_dbgfs.b_dir_create = false;
+ glink_debugfs_create("stats", glink_dfs_update_ch_stats,
+ &ch_dbgfs, (void *)ch_ctx, false);
+ glink_debugfs_create("intents", glink_dfs_update_ch_intent,
+ &ch_dbgfs, (void *)ch_ctx, false);
+}
+EXPORT_SYMBOL(glink_debugfs_add_channel);
+
+/**
+ * glink_debugfs_add_xprt() - create transport specifc files & folder in
+ * debugfs when new transport is registerd
+ * @xprt_ctx: pointer to the transport_context
+ *
+ * This function is invoked when a new transport is registered. It creates the
+ * folders & other files in debugfs for that transport
+ */
+void glink_debugfs_add_xprt(struct glink_core_xprt_ctx *xprt_ctx)
+{
+ struct glink_dbgfs xprt_dbgfs;
+ char *xprt_name;
+ char *edge_name;
+ char curr_dir_name[GLINK_DBGFS_NAME_SIZE];
+
+ if (xprt_ctx == NULL)
+ GLINK_ERR("%s: Transport Context is NULL\n", __func__);
+ xprt_name = glink_get_xprt_name(xprt_ctx);
+ edge_name = glink_get_xprt_edge_name(xprt_ctx);
+ if (!xprt_name || !edge_name) {
+ GLINK_ERR("%s: xprt name or edge name is NULL\n", __func__);
+ return;
+ }
+ snprintf(curr_dir_name, sizeof(curr_dir_name), "%s_%s",
+ edge_name, xprt_name);
+ xprt_dbgfs.par_name = "glink";
+ xprt_dbgfs.curr_name = "xprt";
+ xprt_dbgfs.b_dir_create = true;
+ glink_debugfs_create(curr_dir_name, NULL, &xprt_dbgfs, NULL, false);
+ xprt_dbgfs.curr_name = "channel";
+ glink_debugfs_create(curr_dir_name, NULL, &xprt_dbgfs, NULL, false);
+}
+EXPORT_SYMBOL(glink_debugfs_add_xprt);
+
+/**
+ * glink_dfs_create_channel_list() - create & update the channel details
+ * s: pointer to seq_file
+ *
+ * This function updates channel details in debugfs
+ * file present in /glink/channel/channels
+ */
+static void glink_dfs_create_channel_list(struct seq_file *s)
+{
+ struct xprt_ctx_iterator xprt_iter;
+ struct ch_ctx_iterator ch_iter;
+
+ struct glink_core_xprt_ctx *xprt_ctx;
+ struct channel_ctx *ch_ctx;
+ int count = 0;
+ /*
+ * formatted, human readable channel state output, ie:
+ * NAME |LCID|RCID|XPRT|EDGE|LSTATE |RSTATE|LINT-Q|RINT-Q|
+ * --------------------------------------------------------------------
+ * LOCAL_LOOPBACK_CLNT|2 |1 |lloop|local|OPENED|OPENED|5 |6 |
+ * N.B. Number of TX & RX Packets not implemented yet. -ENOSYS is printed
+ */
+ seq_printf(s, "%-20s|%-4s|%-4s|%-10s|%-6s|%-7s|%-7s|%-5s|%-5s|\n",
+ "NAME",
+ "LCID",
+ "RCID",
+ "XPRT",
+ "EDGE",
+ "LSTATE",
+ "RSTATE",
+ "LINTQ",
+ "RINTQ");
+ seq_puts(s,
+ "-------------------------------------------------------------------------------\n");
+ glink_xprt_ctx_iterator_init(&xprt_iter);
+ xprt_ctx = glink_xprt_ctx_iterator_next(&xprt_iter);
+
+ while (xprt_ctx != NULL) {
+ glink_ch_ctx_iterator_init(&ch_iter, xprt_ctx);
+ ch_ctx = glink_ch_ctx_iterator_next(&ch_iter);
+ while (ch_ctx != NULL) {
+ count++;
+ seq_printf(s, "%-20s|%-4i|%-4i|%-10s|%-6s|%-7s|",
+ glink_get_ch_name(ch_ctx),
+ glink_get_ch_lcid(ch_ctx),
+ glink_get_ch_rcid(ch_ctx),
+ glink_get_ch_xprt_name(ch_ctx),
+ glink_get_ch_edge_name(ch_ctx),
+ glink_get_ch_lstate(ch_ctx));
+ seq_printf(s, "%-7s|%-5i|%-5i|\n",
+ (glink_get_ch_rstate(ch_ctx) ? "OPENED" : "CLOSED"),
+ glink_get_ch_lintents_queued(ch_ctx),
+ glink_get_ch_rintents_queued(ch_ctx));
+
+ ch_ctx = glink_ch_ctx_iterator_next(&ch_iter);
+ }
+ glink_ch_ctx_iterator_end(&ch_iter, xprt_ctx);
+ xprt_ctx = glink_xprt_ctx_iterator_next(&xprt_iter);
+ }
+
+ glink_xprt_ctx_iterator_end(&xprt_iter);
+}
+
+/**
+ * glink_dfs_create_xprt_list() - create & update the transport details
+ * @s: pointer to seq_file
+ *
+ * This function updates channel details in debugfs file present
+ * in /glink/xprt/xprts
+ */
+static void glink_dfs_create_xprt_list(struct seq_file *s)
+{
+ struct xprt_ctx_iterator xprt_iter;
+ struct glink_core_xprt_ctx *xprt_ctx;
+ const struct glink_core_version *gver;
+ uint32_t version;
+ uint32_t features;
+ int count = 0;
+ /*
+ * formatted, human readable channel state output, ie:
+ * XPRT_NAME|REMOTE |STATE|VERSION |FEATURES|
+ * ---------------------------------------------
+ * smd_trans|lpass |2 |0 |1 |
+ * smem |mpss |0 |0 |0 |
+ */
+ seq_printf(s, "%-20s|%-20s|%-6s|%-8s|%-8s|\n",
+ "XPRT_NAME",
+ "REMOTE",
+ "STATE",
+ "VERSION",
+ "FEATURES");
+ seq_puts(s,
+ "-------------------------------------------------------------------------------\n");
+ glink_xprt_ctx_iterator_init(&xprt_iter);
+ xprt_ctx = glink_xprt_ctx_iterator_next(&xprt_iter);
+
+ while (xprt_ctx != NULL) {
+ count++;
+ seq_printf(s, "%-20s|%-20s|",
+ glink_get_xprt_name(xprt_ctx),
+ glink_get_xprt_edge_name(xprt_ctx));
+ gver = glink_get_xprt_version_features(xprt_ctx);
+ if (gver != NULL) {
+ version = gver->version;
+ features = gver->features;
+ seq_printf(s, "%-6s|%-8i|%-8i|\n",
+ glink_get_xprt_state(xprt_ctx),
+ version,
+ features);
+ } else {
+ seq_printf(s, "%-6s|%-8i|%-8i|\n",
+ glink_get_xprt_state(xprt_ctx),
+ -ENODATA,
+ -ENODATA);
+ }
+ xprt_ctx = glink_xprt_ctx_iterator_next(&xprt_iter);
+
+ }
+
+ glink_xprt_ctx_iterator_end(&xprt_iter);
+}
+
+/**
+ * glink_dfs_update_list() - update the internally maintained dentry linked list
+ * @curr_dent: pointer to the current dentry object
+ * @parent: pointer to the parent dentry object
+ * @curr: current directory name
+ * @par_dir: parent directory name
+ */
+void glink_dfs_update_list(struct dentry *curr_dent, struct dentry *parent,
+ const char *curr, const char *par_dir)
+{
+ struct glink_dbgfs_dent *dbgfs_dent_s;
+ if (curr_dent != NULL) {
+ dbgfs_dent_s = kzalloc(sizeof(struct glink_dbgfs_dent),
+ GFP_KERNEL);
+ if (dbgfs_dent_s != NULL) {
+ INIT_LIST_HEAD(&dbgfs_dent_s->file_list);
+ spin_lock_init(&dbgfs_dent_s->file_list_lock_lhb0);
+ dbgfs_dent_s->parent = parent;
+ dbgfs_dent_s->self = curr_dent;
+ strlcpy(dbgfs_dent_s->self_name,
+ curr, strlen(curr) + 1);
+ strlcpy(dbgfs_dent_s->par_name, par_dir,
+ strlen(par_dir) + 1);
+ mutex_lock(&dent_list_lock_lha0);
+ list_add_tail(&dbgfs_dent_s->list_node, &dent_list);
+ mutex_unlock(&dent_list_lock_lha0);
+ }
+ } else {
+ GLINK_DBG("%s:create directory failed for par:curr [%s:%s]\n",
+ __func__, par_dir, curr);
+ }
+ return;
+}
+
+/**
+ * glink_remove_dfs_entry() - remove the the entries from dent_list
+ * @entry: pointer to the glink_dbgfs_dent structure
+ *
+ * This function removes the removes the entries from internally maintained
+ * linked list of dentries. It also deletes the file list and associated memory
+ * if present.
+ */
+void glink_remove_dfs_entry(struct glink_dbgfs_dent *entry)
+{
+ struct glink_dbgfs_data *fentry, *fentry_temp;
+ unsigned long flags;
+
+ if (entry == NULL)
+ return;
+ if (!list_empty(&entry->file_list)) {
+ spin_lock_irqsave(&entry->file_list_lock_lhb0, flags);
+ list_for_each_entry_safe(fentry, fentry_temp,
+ &entry->file_list, flist) {
+ if (fentry->b_priv_free_req)
+ kfree(fentry->priv_data);
+ list_del(&fentry->flist);
+ kfree(fentry);
+ fentry = NULL;
+ }
+ spin_unlock_irqrestore(&entry->file_list_lock_lhb0, flags);
+ }
+ list_del(&entry->list_node);
+ kfree(entry);
+ entry = NULL;
+}
+
+/**
+ * glink_debugfs_remove_recur() - remove the the directory & files recursively
+ * @rm_dfs: pointer to the structure glink_dbgfs
+ *
+ * This function removes the files & directories below the given directory.
+ * This also takes care of freeing any memory associated with the debugfs file.
+ */
+void glink_debugfs_remove_recur(struct glink_dbgfs *rm_dfs)
+{
+ const char *c_dir_name;
+ const char *p_dir_name;
+ struct glink_dbgfs_dent *entry, *entry_temp;
+ struct dentry *par_dent = NULL;
+
+ if (rm_dfs == NULL)
+ return;
+
+ c_dir_name = rm_dfs->curr_name;
+ p_dir_name = rm_dfs->par_name;
+
+ mutex_lock(&dent_list_lock_lha0);
+ list_for_each_entry_safe(entry, entry_temp, &dent_list, list_node) {
+ if (!strcmp(entry->par_name, c_dir_name)) {
+ glink_remove_dfs_entry(entry);
+ } else if (!strcmp(entry->self_name, c_dir_name)
+ && !strcmp(entry->par_name, p_dir_name)) {
+ par_dent = entry->self;
+ glink_remove_dfs_entry(entry);
+ }
+ }
+ mutex_unlock(&dent_list_lock_lha0);
+ if (par_dent != NULL)
+ debugfs_remove_recursive(par_dent);
+}
+EXPORT_SYMBOL(glink_debugfs_remove_recur);
+
+/**
+ * glink_debugfs_create() - create the debugfs file
+ * @name: debugfs file name
+ * @show: pointer to the actual function which will be invoked upon
+ * opening this file.
+ * @dir: pointer to a structure debugfs_dir
+ * dbgfs_data: pointer to any private data need to be associated with debugfs
+ * b_free_req: boolean value to decide to free the memory associated with
+ * @dbgfs_data during deletion of the file
+ *
+ * Return: pointer to the file/directory created, NULL in case of error
+ *
+ * This function checks which directory will be used to create the debugfs file
+ * and calls glink_dfs_create_file. Anybody who intend to allocate some memory
+ * for the dbgfs_data and required to free it in deletion, need to set
+ * b_free_req to true. Otherwise, there will be a memory leak.
+ */
+struct dentry *glink_debugfs_create(const char *name,
+ void (*show)(struct seq_file *),
+ struct glink_dbgfs *dir, void *dbgfs_data, bool b_free_req)
+{
+ struct dentry *parent = NULL;
+ struct dentry *dent = NULL;
+ struct glink_dbgfs_dent *entry;
+ struct glink_dbgfs_data *file_data;
+ const char *c_dir_name;
+ const char *p_dir_name;
+ unsigned long flags;
+
+ if (dir == NULL) {
+ GLINK_ERR("%s: debugfs_dir strucutre is null\n", __func__);
+ return NULL;
+ }
+ c_dir_name = dir->curr_name;
+ p_dir_name = dir->par_name;
+
+ mutex_lock(&dent_list_lock_lha0);
+ list_for_each_entry(entry, &dent_list, list_node)
+ if (!strcmp(entry->par_name, p_dir_name)
+ && !strcmp(entry->self_name, c_dir_name)) {
+ parent = entry->self;
+ break;
+ }
+ mutex_unlock(&dent_list_lock_lha0);
+ p_dir_name = c_dir_name;
+ c_dir_name = name;
+ if (parent != NULL) {
+ if (dir->b_dir_create) {
+ dent = debugfs_create_dir(name, parent);
+ if (dent != NULL)
+ glink_dfs_update_list(dent, parent,
+ c_dir_name, p_dir_name);
+ } else {
+ file_data = glink_dfs_create_file(name, parent, show,
+ dbgfs_data, b_free_req);
+ spin_lock_irqsave(&entry->file_list_lock_lhb0, flags);
+ if (file_data != NULL)
+ list_add_tail(&file_data->flist,
+ &entry->file_list);
+ spin_unlock_irqrestore(&entry->file_list_lock_lhb0,
+ flags);
+ }
+ } else {
+ GLINK_DBG("%s: parent dentry is null for [%s]\n",
+ __func__, name);
+ }
+ return dent;
+}
+EXPORT_SYMBOL(glink_debugfs_create);
+
+/**
+ * glink_debugfs_init() - initialize the glink debugfs directory structure
+ *
+ * Return: 0 in success otherwise appropriate error code
+ *
+ * This function initializes the debugfs directory for glink
+ */
+int glink_debugfs_init(void)
+{
+ struct glink_dbgfs dbgfs;
+
+ /* fake parent name */
+ dent = debugfs_create_dir("glink", NULL);
+ if (IS_ERR_OR_NULL(dent))
+ return PTR_ERR(dent);
+
+ glink_dfs_update_list(dent, NULL, "glink", "root");
+
+ dbgfs.b_dir_create = true;
+ dbgfs.curr_name = "glink";
+ dbgfs.par_name = "root";
+ glink_debugfs_create("xprt", NULL, &dbgfs, NULL, false);
+ glink_debugfs_create("channel", NULL, &dbgfs, NULL, false);
+
+ dbgfs.curr_name = "channel";
+ dbgfs.par_name = "glink";
+ dbgfs.b_dir_create = false;
+ glink_debugfs_create("channels", glink_dfs_create_channel_list,
+ &dbgfs, NULL, false);
+ dbgfs.curr_name = "xprt";
+ glink_debugfs_create("xprts", glink_dfs_create_xprt_list,
+ &dbgfs, NULL, false);
+
+ return 0;
+}
+EXPORT_SYMBOL(glink_debugfs_init);
+
+/**
+ * glink_debugfs_exit() - removes the glink debugfs directory
+ *
+ * This function recursively remove all the debugfs directories
+ * starting from dent
+ */
+void glink_debugfs_exit(void)
+{
+ if (dent != NULL)
+ debugfs_remove_recursive(dent);
+}
+EXPORT_SYMBOL(glink_debugfs_exit);
+#else
+void glink_debugfs_remove_recur(struct glink_dbgfs *dfs) { }
+EXPORT_SYMBOL(glink_debugfs_remove_recur);
+
+void glink_debugfs_remove_channel(struct channel_ctx *ch_ctx,
+ struct glink_core_xprt_ctx *xprt_ctx) { }
+EXPORT_SYMBOL(glink_debugfs_remove_channel);
+
+void glink_debugfs_add_channel(struct channel_ctx *ch_ctx,
+ struct glink_core_xprt_ctx *xprt_ctx) { }
+EXPORT_SYMBOL(glink_debugfs_add_channel);
+
+void glink_debugfs_add_xprt(struct glink_core_xprt_ctx *xprt_ctx) { }
+EXPORT_SYMBOL(glink_debugfs_add_xprt);
+
+int glink_debugfs_init(void) { return 0; }
+EXPORT_SYMBOL(glink_debugfs_init);
+
+void glink_debugfs_exit(void) { }
+EXPORT_SYMBOL(glink_debugfs_exit);
+#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/soc/qcom/glink_loopback_commands.h b/drivers/soc/qcom/glink_loopback_commands.h
new file mode 100644
index 000000000000..b4375ddbd1a8
--- /dev/null
+++ b/drivers/soc/qcom/glink_loopback_commands.h
@@ -0,0 +1,104 @@
+/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef _GLINK_LOOPBACK_COMMANDS_H_
+#define _GLINK_LOOPBACK_COMMANDS_H_
+
+#define MAX_NAME_LEN 32
+
+enum request_type {
+ OPEN = 1,
+ CLOSE,
+ QUEUE_RX_INTENT_CONFIG,
+ TX_CONFIG,
+ RX_DONE_CONFIG,
+};
+
+struct req_hdr {
+ uint32_t req_id;
+ uint32_t req_type;
+ uint32_t req_size;
+};
+
+struct open_req {
+ uint32_t delay_ms;
+ uint32_t name_len;
+ char ch_name[MAX_NAME_LEN];
+};
+
+struct close_req {
+ uint32_t delay_ms;
+ uint32_t name_len;
+ char ch_name[MAX_NAME_LEN];
+};
+
+struct queue_rx_intent_config_req {
+ uint32_t num_intents;
+ uint32_t intent_size;
+ uint32_t random_delay;
+ uint32_t delay_ms;
+ uint32_t name_len;
+ char ch_name[MAX_NAME_LEN];
+};
+
+enum transform_type {
+ NO_TRANSFORM = 0,
+ PACKET_COUNT,
+ CHECKSUM,
+};
+
+struct tx_config_req {
+ uint32_t random_delay;
+ uint32_t delay_ms;
+ uint32_t echo_count;
+ uint32_t transform_type;
+ uint32_t name_len;
+ char ch_name[MAX_NAME_LEN];
+};
+
+struct rx_done_config_req {
+ uint32_t random_delay;
+ uint32_t delay_ms;
+ uint32_t name_len;
+ char ch_name[MAX_NAME_LEN];
+};
+
+union req_payload {
+ struct open_req open;
+ struct close_req close;
+ struct queue_rx_intent_config_req q_rx_int_conf;
+ struct tx_config_req tx_conf;
+ struct rx_done_config_req rx_done_conf;
+};
+
+struct req {
+ struct req_hdr hdr;
+ union req_payload payload;
+};
+
+struct resp {
+ uint32_t req_id;
+ uint32_t req_type;
+ uint32_t response;
+};
+
+/*
+ * Tracer Packet Event IDs for Loopback Client/Server.
+ * This being a client of G-Link, the tracer packet events start
+ * from 256.
+ */
+enum loopback_tracer_pkt_events {
+ LOOPBACK_SRV_TX = 256,
+ LOOPBACK_SRV_RX = 257,
+ LOOPBACK_CLNT_TX = 258,
+ LOOPBACK_CLNT_RX = 259,
+};
+#endif
diff --git a/drivers/soc/qcom/glink_loopback_server.c b/drivers/soc/qcom/glink_loopback_server.c
new file mode 100644
index 000000000000..5d95ae7a5081
--- /dev/null
+++ b/drivers/soc/qcom/glink_loopback_server.c
@@ -0,0 +1,1296 @@
+/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/err.h>
+#include <linux/ipc_logging.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/random.h>
+#include <linux/uio.h>
+#include <soc/qcom/glink.h>
+#include <soc/qcom/tracer_pkt.h>
+#include "glink_loopback_commands.h"
+
+
+/* Number of internal IPC Logging log pages */
+#define GLINK_LBSRV_NUM_LOG_PAGES 3
+
+static void *glink_lbsrv_log_ctx;
+
+#define GLINK_LBSRV_IPC_LOG_STR(x...) do { \
+ if (glink_lbsrv_log_ctx) \
+ ipc_log_string(glink_lbsrv_log_ctx, x); \
+} while (0)
+
+#define LBSRV_INFO(x...) GLINK_LBSRV_IPC_LOG_STR("<LBSRV> " x)
+
+#define LBSRV_ERR(x...) do { \
+ pr_err("<LBSRV> " x); \
+ GLINK_LBSRV_IPC_LOG_STR("<LBSRV> " x); \
+} while (0)
+
+enum ch_type {
+ CTL,
+ DATA,
+};
+
+enum buf_type {
+ LINEAR,
+ VECTOR,
+};
+
+struct tx_config_info {
+ uint32_t random_delay;
+ uint32_t delay_ms;
+ uint32_t echo_count;
+ uint32_t transform_type;
+};
+
+struct rx_done_config_info {
+ uint32_t random_delay;
+ uint32_t delay_ms;
+};
+
+struct rmt_rx_intent_req_work_info {
+ size_t req_intent_size;
+ struct delayed_work work;
+ struct ch_info *work_ch_info;
+};
+
+struct queue_rx_intent_work_info {
+ uint32_t req_id;
+ bool deferred;
+ struct ch_info *req_ch_info;
+ uint32_t num_intents;
+ uint32_t intent_size;
+ uint32_t random_delay;
+ uint32_t delay_ms;
+ struct delayed_work work;
+ struct ch_info *work_ch_info;
+};
+
+struct lbsrv_vec {
+ uint32_t num_bufs;
+ struct kvec vec[0];
+};
+
+struct tx_work_info {
+ struct tx_config_info tx_config;
+ struct delayed_work work;
+ struct ch_info *tx_ch_info;
+ void *data;
+ bool tracer_pkt;
+ uint32_t buf_type;
+ size_t size;
+ void * (*vbuf_provider)(void *iovec, size_t offset, size_t *size);
+ void * (*pbuf_provider)(void *iovec, size_t offset, size_t *size);
+};
+
+struct rx_done_work_info {
+ struct delayed_work work;
+ struct ch_info *rx_done_ch_info;
+ void *ptr;
+};
+
+struct rx_work_info {
+ struct ch_info *rx_ch_info;
+ void *pkt_priv;
+ void *ptr;
+ bool tracer_pkt;
+ uint32_t buf_type;
+ size_t size;
+ void * (*vbuf_provider)(void *iovec, size_t offset, size_t *size);
+ void * (*pbuf_provider)(void *iovec, size_t offset, size_t *size);
+ struct delayed_work work;
+};
+
+struct ch_info {
+ struct list_head list;
+ struct mutex ch_info_lock;
+ char name[MAX_NAME_LEN];
+ char edge[GLINK_NAME_SIZE];
+ char transport[GLINK_NAME_SIZE];
+ void *handle;
+ bool fully_opened;
+ uint32_t type;
+ struct delayed_work open_work;
+ struct delayed_work close_work;
+ struct tx_config_info tx_config;
+ struct rx_done_config_info rx_done_config;
+ struct queue_rx_intent_work_info *queue_rx_intent_work_info;
+};
+
+struct ctl_ch_info {
+ char name[MAX_NAME_LEN];
+ char edge[GLINK_NAME_SIZE];
+ char transport[GLINK_NAME_SIZE];
+};
+
+static struct ctl_ch_info ctl_ch_tbl[] = {
+ {"LOCAL_LOOPBACK_SRV", "local", "lloop"},
+ {"LOOPBACK_CTL_APSS", "mpss", "smem"},
+ {"LOOPBACK_CTL_APSS", "lpass", "smem"},
+ {"LOOPBACK_CTL_APSS", "dsps", "smem"},
+ {"LOOPBACK_CTL_APSS", "spss", "mailbox"},
+};
+
+static DEFINE_MUTEX(ctl_ch_list_lock);
+static LIST_HEAD(ctl_ch_list);
+static DEFINE_MUTEX(data_ch_list_lock);
+static LIST_HEAD(data_ch_list);
+
+struct workqueue_struct *glink_lbsrv_wq;
+
+/**
+ * link_state_work_info - Information about work handling link state updates
+ * edge: Remote subsystem name in the link.
+ * transport: Name of the transport/link.
+ * link_state: State of the transport/link.
+ * work: Reference to the work item.
+ */
+struct link_state_work_info {
+ char edge[GLINK_NAME_SIZE];
+ char transport[GLINK_NAME_SIZE];
+ enum glink_link_state link_state;
+ struct delayed_work work;
+};
+
+static void glink_lbsrv_link_state_cb(struct glink_link_state_cb_info *cb_info,
+ void *priv);
+static struct glink_link_info glink_lbsrv_link_info = {
+ NULL, NULL, glink_lbsrv_link_state_cb};
+static void *glink_lbsrv_link_state_notif_handle;
+
+static void glink_lbsrv_open_worker(struct work_struct *work);
+static void glink_lbsrv_close_worker(struct work_struct *work);
+static void glink_lbsrv_rmt_rx_intent_req_worker(struct work_struct *work);
+static void glink_lbsrv_queue_rx_intent_worker(struct work_struct *work);
+static void glink_lbsrv_rx_worker(struct work_struct *work);
+static void glink_lbsrv_rx_done_worker(struct work_struct *work);
+static void glink_lbsrv_tx_worker(struct work_struct *work);
+
+int glink_lbsrv_send_response(void *handle, uint32_t req_id, uint32_t req_type,
+ uint32_t response)
+{
+ struct resp *resp_pkt = kzalloc(sizeof(struct resp), GFP_KERNEL);
+
+ if (!resp_pkt) {
+ LBSRV_ERR("%s: Error allocating response packet\n", __func__);
+ return -ENOMEM;
+ }
+
+ resp_pkt->req_id = req_id;
+ resp_pkt->req_type = req_type;
+ resp_pkt->response = response;
+
+ return glink_tx(handle, (void *)LINEAR, (void *)resp_pkt,
+ sizeof(struct resp), 0);
+}
+
+static uint32_t calc_delay_ms(uint32_t random_delay, uint32_t delay_ms)
+{
+ uint32_t tmp_delay_ms;
+
+ if (random_delay && delay_ms)
+ tmp_delay_ms = prandom_u32() % delay_ms;
+ else if (random_delay)
+ tmp_delay_ms = prandom_u32();
+ else
+ tmp_delay_ms = delay_ms;
+
+ return tmp_delay_ms;
+}
+
+static int create_ch_info(char *name, char *edge, char *transport,
+ uint32_t type, struct ch_info **ret_ch_info)
+{
+ struct ch_info *tmp_ch_info;
+
+ tmp_ch_info = kzalloc(sizeof(struct ch_info), GFP_KERNEL);
+ if (!tmp_ch_info) {
+ LBSRV_ERR("%s: Error allocation ch_info\n", __func__);
+ return -ENOMEM;
+ }
+
+ INIT_LIST_HEAD(&tmp_ch_info->list);
+ mutex_init(&tmp_ch_info->ch_info_lock);
+ strlcpy(tmp_ch_info->name, name, MAX_NAME_LEN);
+ strlcpy(tmp_ch_info->edge, edge, GLINK_NAME_SIZE);
+ strlcpy(tmp_ch_info->transport, transport, GLINK_NAME_SIZE);
+ tmp_ch_info->type = type;
+ INIT_DELAYED_WORK(&tmp_ch_info->open_work,
+ glink_lbsrv_open_worker);
+ INIT_DELAYED_WORK(&tmp_ch_info->close_work,
+ glink_lbsrv_close_worker);
+ tmp_ch_info->tx_config.echo_count = 1;
+
+ if (type == CTL) {
+ mutex_lock(&ctl_ch_list_lock);
+ list_add_tail(&tmp_ch_info->list, &ctl_ch_list);
+ mutex_unlock(&ctl_ch_list_lock);
+ } else if (type == DATA) {
+ mutex_lock(&data_ch_list_lock);
+ list_add_tail(&tmp_ch_info->list, &data_ch_list);
+ mutex_unlock(&data_ch_list_lock);
+ } else {
+ LBSRV_ERR("%s:%s:%s %s: Invalid ch type %d\n", transport,
+ edge, name, __func__, type);
+ kfree(tmp_ch_info);
+ return -EINVAL;
+ }
+ *ret_ch_info = tmp_ch_info;
+ return 0;
+}
+
+struct ch_info *lookup_ch_list(char *name, char *edge, char *transport,
+ uint32_t type)
+{
+ struct list_head *ch_list;
+ struct mutex *lock;
+ struct ch_info *tmp_ch_info;
+
+ if (type == DATA) {
+ ch_list = &data_ch_list;
+ lock = &data_ch_list_lock;
+ } else if (type == CTL) {
+ ch_list = &ctl_ch_list;
+ lock = &ctl_ch_list_lock;
+ } else {
+ LBSRV_ERR("%s:%s:%s %s: Invalid ch type %d\n", transport,
+ edge, name, __func__, type);
+ return NULL;
+ }
+
+ mutex_lock(lock);
+ list_for_each_entry(tmp_ch_info, ch_list, list) {
+ if (!strcmp(name, tmp_ch_info->name) &&
+ !strcmp(edge, tmp_ch_info->edge) &&
+ !strcmp(transport, tmp_ch_info->transport)) {
+ mutex_unlock(lock);
+ return tmp_ch_info;
+ }
+ }
+ mutex_unlock(lock);
+ return NULL;
+}
+
+int glink_lbsrv_handle_open_req(struct ch_info *rx_ch_info,
+ struct open_req req)
+{
+ struct ch_info *tmp_ch_info;
+ int ret;
+ char name[MAX_NAME_LEN];
+ char *temp;
+
+ strlcpy(name, req.ch_name, MAX_NAME_LEN);
+ if (!strcmp(rx_ch_info->transport, "lloop")) {
+ temp = strnstr(name, "_CLNT", MAX_NAME_LEN);
+ if (temp)
+ *temp = '\0';
+ strlcat(name, "_SRV", MAX_NAME_LEN);
+ }
+ LBSRV_INFO("%s:%s:%s %s: delay_ms[%d]\n",
+ rx_ch_info->transport, rx_ch_info->edge,
+ name, __func__, req.delay_ms);
+ tmp_ch_info = lookup_ch_list(name, rx_ch_info->edge,
+ rx_ch_info->transport, DATA);
+ if (tmp_ch_info)
+ goto queue_open_work;
+
+ ret = create_ch_info(name, rx_ch_info->edge, rx_ch_info->transport,
+ DATA, &tmp_ch_info);
+ if (ret)
+ return ret;
+queue_open_work:
+ queue_delayed_work(glink_lbsrv_wq, &tmp_ch_info->open_work,
+ msecs_to_jiffies(req.delay_ms));
+ return 0;
+}
+
+int glink_lbsrv_handle_close_req(struct ch_info *rx_ch_info,
+ struct close_req req)
+{
+ struct ch_info *tmp_ch_info;
+ char name[MAX_NAME_LEN];
+ char *temp;
+
+ strlcpy(name, req.ch_name, MAX_NAME_LEN);
+ if (!strcmp(rx_ch_info->transport, "lloop")) {
+ temp = strnstr(name, "_CLNT", MAX_NAME_LEN);
+ if (temp)
+ *temp = '\0';
+ strlcat(name, "_SRV", MAX_NAME_LEN);
+ }
+ LBSRV_INFO("%s:%s:%s %s: delay_ms[%d]\n",
+ rx_ch_info->transport, rx_ch_info->edge,
+ name, __func__, req.delay_ms);
+ tmp_ch_info = lookup_ch_list(name, rx_ch_info->edge,
+ rx_ch_info->transport, DATA);
+ if (tmp_ch_info)
+ queue_delayed_work(glink_lbsrv_wq, &tmp_ch_info->close_work,
+ msecs_to_jiffies(req.delay_ms));
+ return 0;
+}
+
+int glink_lbsrv_handle_queue_rx_intent_config_req(struct ch_info *rx_ch_info,
+ struct queue_rx_intent_config_req req, uint32_t req_id)
+{
+ struct ch_info *tmp_ch_info;
+ struct queue_rx_intent_work_info *tmp_work_info;
+ char name[MAX_NAME_LEN];
+ char *temp;
+ uint32_t delay_ms;
+
+ strlcpy(name, req.ch_name, MAX_NAME_LEN);
+ if (!strcmp(rx_ch_info->transport, "lloop")) {
+ temp = strnstr(name, "_CLNT", MAX_NAME_LEN);
+ if (temp)
+ *temp = '\0';
+ strlcat(name, "_SRV", MAX_NAME_LEN);
+ }
+ LBSRV_INFO("%s:%s:%s %s: num_intents[%d] size[%d]\n",
+ rx_ch_info->transport, rx_ch_info->edge, name, __func__,
+ req.num_intents, req.intent_size);
+ tmp_ch_info = lookup_ch_list(name, rx_ch_info->edge,
+ rx_ch_info->transport, DATA);
+ if (!tmp_ch_info) {
+ LBSRV_ERR("%s:%s:%s %s: Channel info not found\n",
+ rx_ch_info->transport, rx_ch_info->edge,
+ name, __func__);
+ return -EINVAL;
+ }
+
+ tmp_work_info = kzalloc(sizeof(struct queue_rx_intent_work_info),
+ GFP_KERNEL);
+ if (!tmp_work_info) {
+ LBSRV_ERR("%s: Error allocating work_info\n", __func__);
+ return -ENOMEM;
+ }
+
+ tmp_work_info->req_id = req_id;
+ tmp_work_info->req_ch_info = rx_ch_info;
+ tmp_work_info->num_intents = req.num_intents;
+ tmp_work_info->intent_size = req.intent_size;
+ tmp_work_info->random_delay = req.random_delay;
+ tmp_work_info->delay_ms = req.delay_ms;
+ INIT_DELAYED_WORK(&tmp_work_info->work,
+ glink_lbsrv_queue_rx_intent_worker);
+ tmp_work_info->work_ch_info = tmp_ch_info;
+
+ mutex_lock(&tmp_ch_info->ch_info_lock);
+ if (tmp_ch_info->fully_opened) {
+ mutex_unlock(&tmp_ch_info->ch_info_lock);
+ delay_ms = calc_delay_ms(tmp_work_info->random_delay,
+ tmp_work_info->delay_ms);
+ queue_delayed_work(glink_lbsrv_wq, &tmp_work_info->work,
+ msecs_to_jiffies(delay_ms));
+
+ if (tmp_work_info->random_delay || tmp_work_info->delay_ms)
+ glink_lbsrv_send_response(rx_ch_info->handle, req_id,
+ QUEUE_RX_INTENT_CONFIG, 0);
+ } else {
+ tmp_work_info->deferred = true;
+ tmp_ch_info->queue_rx_intent_work_info = tmp_work_info;
+ mutex_unlock(&tmp_ch_info->ch_info_lock);
+
+ glink_lbsrv_send_response(rx_ch_info->handle, req_id,
+ QUEUE_RX_INTENT_CONFIG, 0);
+ }
+
+ return 0;
+}
+
+int glink_lbsrv_handle_tx_config_req(struct ch_info *rx_ch_info,
+ struct tx_config_req req)
+{
+ struct ch_info *tmp_ch_info;
+ char name[MAX_NAME_LEN];
+ char *temp;
+
+ strlcpy(name, req.ch_name, MAX_NAME_LEN);
+ if (!strcmp(rx_ch_info->transport, "lloop")) {
+ temp = strnstr(name, "_CLNT", MAX_NAME_LEN);
+ if (temp)
+ *temp = '\0';
+ strlcat(name, "_SRV", MAX_NAME_LEN);
+ }
+ LBSRV_INFO("%s:%s:%s %s: echo_count[%d] transform[%d]\n",
+ rx_ch_info->transport, rx_ch_info->edge, name, __func__,
+ req.echo_count, req.transform_type);
+ tmp_ch_info = lookup_ch_list(name, rx_ch_info->edge,
+ rx_ch_info->transport, DATA);
+ if (!tmp_ch_info) {
+ LBSRV_ERR("%s:%s:%s %s: Channel info not found\n",
+ rx_ch_info->transport, rx_ch_info->edge,
+ name, __func__);
+ return -EINVAL;
+ }
+
+ mutex_lock(&tmp_ch_info->ch_info_lock);
+ tmp_ch_info->tx_config.random_delay = req.random_delay;
+ tmp_ch_info->tx_config.delay_ms = req.delay_ms;
+ tmp_ch_info->tx_config.echo_count = req.echo_count;
+ tmp_ch_info->tx_config.transform_type = req.transform_type;
+ mutex_unlock(&tmp_ch_info->ch_info_lock);
+ return 0;
+}
+
+int glink_lbsrv_handle_rx_done_config_req(struct ch_info *rx_ch_info,
+ struct rx_done_config_req req)
+{
+ struct ch_info *tmp_ch_info;
+ char name[MAX_NAME_LEN];
+ char *temp;
+
+ strlcpy(name, req.ch_name, MAX_NAME_LEN);
+ if (!strcmp(rx_ch_info->transport, "lloop")) {
+ temp = strnstr(name, "_CLNT", MAX_NAME_LEN);
+ if (temp)
+ *temp = '\0';
+ strlcat(name, "_SRV", MAX_NAME_LEN);
+ }
+ LBSRV_INFO("%s:%s:%s %s: delay_ms[%d] random_delay[%d]\n",
+ rx_ch_info->transport, rx_ch_info->edge, name,
+ __func__, req.delay_ms, req.random_delay);
+ tmp_ch_info = lookup_ch_list(name, rx_ch_info->edge,
+ rx_ch_info->transport, DATA);
+ if (!tmp_ch_info) {
+ LBSRV_ERR("%s:%s:%s %s: Channel info not found\n",
+ rx_ch_info->transport, rx_ch_info->edge,
+ name, __func__);
+ return -EINVAL;
+ }
+
+ mutex_lock(&tmp_ch_info->ch_info_lock);
+ tmp_ch_info->rx_done_config.random_delay = req.random_delay;
+ tmp_ch_info->rx_done_config.delay_ms = req.delay_ms;
+ mutex_unlock(&tmp_ch_info->ch_info_lock);
+ return 0;
+}
+
+/**
+ * glink_lbsrv_handle_req() - Handle the request commands received by clients
+ *
+ * rx_ch_info: Channel info on which the request is received
+ * pkt: Request structure received from client
+ *
+ * This function handles the all supported request types received from client
+ * and send the response back to client
+ */
+void glink_lbsrv_handle_req(struct ch_info *rx_ch_info, struct req pkt)
+{
+ int ret;
+
+ LBSRV_INFO("%s:%s:%s %s: Request packet type[%d]:id[%d]\n",
+ rx_ch_info->transport, rx_ch_info->edge,
+ rx_ch_info->name, __func__, pkt.hdr.req_type,
+ pkt.hdr.req_id);
+ switch (pkt.hdr.req_type) {
+ case OPEN:
+ ret = glink_lbsrv_handle_open_req(rx_ch_info,
+ pkt.payload.open);
+ break;
+ case CLOSE:
+ ret = glink_lbsrv_handle_close_req(rx_ch_info,
+ pkt.payload.close);
+ break;
+ case QUEUE_RX_INTENT_CONFIG:
+ ret = glink_lbsrv_handle_queue_rx_intent_config_req(
+ rx_ch_info, pkt.payload.q_rx_int_conf, pkt.hdr.req_id);
+ break;
+ case TX_CONFIG:
+ ret = glink_lbsrv_handle_tx_config_req(rx_ch_info,
+ pkt.payload.tx_conf);
+ break;
+ case RX_DONE_CONFIG:
+ ret = glink_lbsrv_handle_rx_done_config_req(rx_ch_info,
+ pkt.payload.rx_done_conf);
+ break;
+ default:
+ LBSRV_ERR("%s:%s:%s %s: Invalid Request type [%d]\n",
+ rx_ch_info->transport, rx_ch_info->edge,
+ rx_ch_info->name, __func__, pkt.hdr.req_type);
+ ret = -1;
+ break;
+ }
+
+ if (pkt.hdr.req_type != QUEUE_RX_INTENT_CONFIG)
+ glink_lbsrv_send_response(rx_ch_info->handle, pkt.hdr.req_id,
+ pkt.hdr.req_type, ret);
+}
+
+static void *glink_lbsrv_vbuf_provider(void *iovec, size_t offset,
+ size_t *buf_size)
+{
+ struct lbsrv_vec *tmp_vec_info = (struct lbsrv_vec *)iovec;
+ uint32_t i;
+ size_t temp_size = 0;
+
+ for (i = 0; i < tmp_vec_info->num_bufs; i++) {
+ temp_size += tmp_vec_info->vec[i].iov_len;
+ if (offset >= temp_size)
+ continue;
+ *buf_size = temp_size - offset;
+ return (void *)tmp_vec_info->vec[i].iov_base +
+ tmp_vec_info->vec[i].iov_len - *buf_size;
+ }
+ *buf_size = 0;
+ return NULL;
+}
+
+static void glink_lbsrv_free_data(void *data, uint32_t buf_type)
+{
+ struct lbsrv_vec *tmp_vec_info;
+ uint32_t i;
+
+ if (buf_type == LINEAR) {
+ kfree(data);
+ } else {
+ tmp_vec_info = (struct lbsrv_vec *)data;
+ for (i = 0; i < tmp_vec_info->num_bufs; i++) {
+ kfree(tmp_vec_info->vec[i].iov_base);
+ tmp_vec_info->vec[i].iov_base = NULL;
+ }
+ kfree(tmp_vec_info);
+ }
+}
+
+static void *copy_linear_data(struct rx_work_info *tmp_rx_work_info)
+{
+ char *data;
+ struct ch_info *rx_ch_info = tmp_rx_work_info->rx_ch_info;
+
+ data = kmalloc(tmp_rx_work_info->size, GFP_KERNEL);
+ if (data)
+ memcpy(data, tmp_rx_work_info->ptr, tmp_rx_work_info->size);
+ else
+ LBSRV_ERR("%s:%s:%s %s: Error allocating the data\n",
+ rx_ch_info->transport, rx_ch_info->edge,
+ rx_ch_info->name, __func__);
+ return data;
+}
+
+static void *copy_vector_data(struct rx_work_info *tmp_rx_work_info)
+{
+ uint32_t num_bufs = 0;
+ struct ch_info *rx_ch_info = tmp_rx_work_info->rx_ch_info;
+ struct lbsrv_vec *tmp_vec_info;
+ void *buf, *pbuf, *dest_buf;
+ size_t offset = 0;
+ size_t buf_size;
+ uint32_t i;
+
+ do {
+ if (tmp_rx_work_info->vbuf_provider)
+ buf = tmp_rx_work_info->vbuf_provider(
+ tmp_rx_work_info->ptr, offset, &buf_size);
+ else
+ buf = tmp_rx_work_info->pbuf_provider(
+ tmp_rx_work_info->ptr, offset, &buf_size);
+ if (!buf)
+ break;
+ offset += buf_size;
+ num_bufs++;
+ } while (buf);
+
+ tmp_vec_info = kzalloc(sizeof(*tmp_vec_info) +
+ num_bufs * sizeof(struct kvec), GFP_KERNEL);
+ if (!tmp_vec_info) {
+ LBSRV_ERR("%s:%s:%s %s: Error allocating vector info\n",
+ rx_ch_info->transport, rx_ch_info->edge,
+ rx_ch_info->name, __func__);
+ return NULL;
+ }
+ tmp_vec_info->num_bufs = num_bufs;
+
+ offset = 0;
+ for (i = 0; i < num_bufs; i++) {
+ if (tmp_rx_work_info->vbuf_provider) {
+ buf = tmp_rx_work_info->vbuf_provider(
+ tmp_rx_work_info->ptr, offset, &buf_size);
+ } else {
+ pbuf = tmp_rx_work_info->pbuf_provider(
+ tmp_rx_work_info->ptr, offset, &buf_size);
+ buf = phys_to_virt((unsigned long)pbuf);
+ }
+ dest_buf = kmalloc(buf_size, GFP_KERNEL);
+ if (!dest_buf) {
+ LBSRV_ERR("%s:%s:%s %s: Error allocating data\n",
+ rx_ch_info->transport, rx_ch_info->edge,
+ rx_ch_info->name, __func__);
+ goto out_copy_vector_data;
+ }
+ memcpy(dest_buf, buf, buf_size);
+ tmp_vec_info->vec[i].iov_base = dest_buf;
+ tmp_vec_info->vec[i].iov_len = buf_size;
+ offset += buf_size;
+ }
+ return tmp_vec_info;
+out_copy_vector_data:
+ glink_lbsrv_free_data((void *)tmp_vec_info, VECTOR);
+ return NULL;
+}
+
+static void *glink_lbsrv_copy_data(struct rx_work_info *tmp_rx_work_info)
+{
+ if (tmp_rx_work_info->buf_type == LINEAR)
+ return copy_linear_data(tmp_rx_work_info);
+ else
+ return copy_vector_data(tmp_rx_work_info);
+}
+
+static int glink_lbsrv_handle_data(struct rx_work_info *tmp_rx_work_info)
+{
+ void *data;
+ int ret;
+ struct ch_info *rx_ch_info = tmp_rx_work_info->rx_ch_info;
+ struct tx_work_info *tmp_tx_work_info;
+ struct rx_done_work_info *tmp_rx_done_work_info;
+ uint32_t delay_ms;
+
+ data = glink_lbsrv_copy_data(tmp_rx_work_info);
+ if (!data) {
+ ret = -ENOMEM;
+ goto out_handle_data;
+ }
+
+ tmp_rx_done_work_info = kmalloc(sizeof(struct rx_done_work_info),
+ GFP_KERNEL);
+ if (!tmp_rx_done_work_info) {
+ LBSRV_ERR("%s:%s:%s %s: Error allocating rx_done_work_info\n",
+ rx_ch_info->transport, rx_ch_info->edge,
+ rx_ch_info->name, __func__);
+ glink_lbsrv_free_data(data, tmp_rx_work_info->buf_type);
+ ret = -ENOMEM;
+ goto out_handle_data;
+ }
+ INIT_DELAYED_WORK(&tmp_rx_done_work_info->work,
+ glink_lbsrv_rx_done_worker);
+ tmp_rx_done_work_info->rx_done_ch_info = rx_ch_info;
+ tmp_rx_done_work_info->ptr = tmp_rx_work_info->ptr;
+ delay_ms = calc_delay_ms(rx_ch_info->rx_done_config.random_delay,
+ rx_ch_info->rx_done_config.delay_ms);
+ queue_delayed_work(glink_lbsrv_wq, &tmp_rx_done_work_info->work,
+ msecs_to_jiffies(delay_ms));
+
+ tmp_tx_work_info = kmalloc(sizeof(struct tx_work_info), GFP_KERNEL);
+ if (!tmp_tx_work_info) {
+ LBSRV_ERR("%s:%s:%s %s: Error allocating tx_work_info\n",
+ rx_ch_info->transport, rx_ch_info->edge,
+ rx_ch_info->name, __func__);
+ glink_lbsrv_free_data(data, tmp_rx_work_info->buf_type);
+ return -ENOMEM;
+ }
+ mutex_lock(&rx_ch_info->ch_info_lock);
+ tmp_tx_work_info->tx_config.random_delay =
+ rx_ch_info->tx_config.random_delay;
+ tmp_tx_work_info->tx_config.delay_ms = rx_ch_info->tx_config.delay_ms;
+ tmp_tx_work_info->tx_config.echo_count =
+ rx_ch_info->tx_config.echo_count;
+ tmp_tx_work_info->tx_config.transform_type =
+ rx_ch_info->tx_config.transform_type;
+ mutex_unlock(&rx_ch_info->ch_info_lock);
+ INIT_DELAYED_WORK(&tmp_tx_work_info->work, glink_lbsrv_tx_worker);
+ tmp_tx_work_info->tx_ch_info = rx_ch_info;
+ tmp_tx_work_info->data = data;
+ tmp_tx_work_info->tracer_pkt = tmp_rx_work_info->tracer_pkt;
+ tmp_tx_work_info->buf_type = tmp_rx_work_info->buf_type;
+ tmp_tx_work_info->size = tmp_rx_work_info->size;
+ if (tmp_tx_work_info->buf_type == VECTOR)
+ tmp_tx_work_info->vbuf_provider = glink_lbsrv_vbuf_provider;
+ else
+ tmp_tx_work_info->vbuf_provider = NULL;
+ tmp_tx_work_info->pbuf_provider = NULL;
+ delay_ms = calc_delay_ms(tmp_tx_work_info->tx_config.random_delay,
+ tmp_tx_work_info->tx_config.delay_ms);
+ queue_delayed_work(glink_lbsrv_wq, &tmp_tx_work_info->work,
+ msecs_to_jiffies(delay_ms));
+ return 0;
+out_handle_data:
+ glink_rx_done(rx_ch_info->handle, tmp_rx_work_info->ptr, false);
+ return ret;
+}
+
+void glink_lpbsrv_notify_rx(void *handle, const void *priv,
+ const void *pkt_priv, const void *ptr, size_t size)
+{
+ struct rx_work_info *tmp_work_info;
+ struct ch_info *rx_ch_info = (struct ch_info *)priv;
+
+ LBSRV_INFO(
+ "%s:%s:%s %s: end (Success) RX priv[%p] data[%p] size[%zu]\n",
+ rx_ch_info->transport, rx_ch_info->edge, rx_ch_info->name,
+ __func__, pkt_priv, (char *)ptr, size);
+ tmp_work_info = kzalloc(sizeof(struct rx_work_info), GFP_ATOMIC);
+ if (!tmp_work_info) {
+ LBSRV_ERR("%s:%s:%s %s: Error allocating rx_work\n",
+ rx_ch_info->transport, rx_ch_info->edge,
+ rx_ch_info->name, __func__);
+ return;
+ }
+
+ tmp_work_info->rx_ch_info = rx_ch_info;
+ tmp_work_info->pkt_priv = (void *)pkt_priv;
+ tmp_work_info->ptr = (void *)ptr;
+ tmp_work_info->buf_type = LINEAR;
+ tmp_work_info->size = size;
+ INIT_DELAYED_WORK(&tmp_work_info->work, glink_lbsrv_rx_worker);
+ queue_delayed_work(glink_lbsrv_wq, &tmp_work_info->work, 0);
+}
+
+void glink_lpbsrv_notify_rxv(void *handle, const void *priv,
+ const void *pkt_priv, void *ptr, size_t size,
+ void * (*vbuf_provider)(void *iovec, size_t offset, size_t *size),
+ void * (*pbuf_provider)(void *iovec, size_t offset, size_t *size))
+{
+ struct rx_work_info *tmp_work_info;
+ struct ch_info *rx_ch_info = (struct ch_info *)priv;
+
+ LBSRV_INFO("%s:%s:%s %s: priv[%p] data[%p] size[%zu]\n",
+ rx_ch_info->transport, rx_ch_info->edge, rx_ch_info->name,
+ __func__, pkt_priv, (char *)ptr, size);
+ tmp_work_info = kzalloc(sizeof(struct rx_work_info), GFP_ATOMIC);
+ if (!tmp_work_info) {
+ LBSRV_ERR("%s:%s:%s %s: Error allocating rx_work\n",
+ rx_ch_info->transport, rx_ch_info->edge,
+ rx_ch_info->name, __func__);
+ return;
+ }
+
+ tmp_work_info->rx_ch_info = rx_ch_info;
+ tmp_work_info->pkt_priv = (void *)pkt_priv;
+ tmp_work_info->ptr = (void *)ptr;
+ tmp_work_info->buf_type = VECTOR;
+ tmp_work_info->size = size;
+ tmp_work_info->vbuf_provider = vbuf_provider;
+ tmp_work_info->pbuf_provider = pbuf_provider;
+ INIT_DELAYED_WORK(&tmp_work_info->work, glink_lbsrv_rx_worker);
+ queue_delayed_work(glink_lbsrv_wq, &tmp_work_info->work, 0);
+}
+
+void glink_lpbsrv_notify_rx_tp(void *handle, const void *priv,
+ const void *pkt_priv, const void *ptr, size_t size)
+{
+ struct rx_work_info *tmp_work_info;
+ struct ch_info *rx_ch_info = (struct ch_info *)priv;
+
+ LBSRV_INFO(
+ "%s:%s:%s %s: end (Success) RX priv[%p] data[%p] size[%zu]\n",
+ rx_ch_info->transport, rx_ch_info->edge, rx_ch_info->name,
+ __func__, pkt_priv, (char *)ptr, size);
+ tracer_pkt_log_event((void *)ptr, LOOPBACK_SRV_RX);
+ tmp_work_info = kmalloc(sizeof(struct rx_work_info), GFP_ATOMIC);
+ if (!tmp_work_info) {
+ LBSRV_ERR("%s:%s:%s %s: Error allocating rx_work\n",
+ rx_ch_info->transport, rx_ch_info->edge,
+ rx_ch_info->name, __func__);
+ return;
+ }
+
+ tmp_work_info->rx_ch_info = rx_ch_info;
+ tmp_work_info->pkt_priv = (void *)pkt_priv;
+ tmp_work_info->ptr = (void *)ptr;
+ tmp_work_info->tracer_pkt = true;
+ tmp_work_info->buf_type = LINEAR;
+ tmp_work_info->size = size;
+ INIT_DELAYED_WORK(&tmp_work_info->work, glink_lbsrv_rx_worker);
+ queue_delayed_work(glink_lbsrv_wq, &tmp_work_info->work, 0);
+}
+
+void glink_lpbsrv_notify_tx_done(void *handle, const void *priv,
+ const void *pkt_priv, const void *ptr)
+{
+ struct ch_info *tx_done_ch_info = (struct ch_info *)priv;
+ LBSRV_INFO("%s:%s:%s %s: end (Success) TX_DONE ptr[%p]\n",
+ tx_done_ch_info->transport, tx_done_ch_info->edge,
+ tx_done_ch_info->name, __func__, ptr);
+
+ if (pkt_priv != (const void *)0xFFFFFFFF)
+ glink_lbsrv_free_data((void *)ptr,
+ (uint32_t)(uintptr_t)pkt_priv);
+}
+
+void glink_lpbsrv_notify_state(void *handle, const void *priv, unsigned event)
+{
+ int ret;
+ uint32_t delay_ms;
+ struct ch_info *tmp_ch_info = (struct ch_info *)priv;
+ struct queue_rx_intent_work_info *tmp_work_info = NULL;
+
+ LBSRV_INFO("%s:%s:%s %s: event[%d]\n",
+ tmp_ch_info->transport, tmp_ch_info->edge,
+ tmp_ch_info->name, __func__, event);
+ if (tmp_ch_info->type == CTL) {
+ if (event == GLINK_CONNECTED) {
+ ret = glink_queue_rx_intent(handle,
+ priv, sizeof(struct req));
+ LBSRV_INFO(
+ "%s:%s:%s %s: QUEUE RX INTENT size[%zu] ret[%d]\n",
+ tmp_ch_info->transport,
+ tmp_ch_info->edge,
+ tmp_ch_info->name,
+ __func__, sizeof(struct req), ret);
+ } else if (event == GLINK_LOCAL_DISCONNECTED) {
+ queue_delayed_work(glink_lbsrv_wq,
+ &tmp_ch_info->open_work,
+ msecs_to_jiffies(0));
+ } else if (event == GLINK_REMOTE_DISCONNECTED)
+ if (!IS_ERR_OR_NULL(tmp_ch_info->handle))
+ queue_delayed_work(glink_lbsrv_wq,
+ &tmp_ch_info->close_work, 0);
+ } else if (tmp_ch_info->type == DATA) {
+
+ if (event == GLINK_CONNECTED) {
+ mutex_lock(&tmp_ch_info->ch_info_lock);
+ tmp_ch_info->fully_opened = true;
+ tmp_work_info = tmp_ch_info->queue_rx_intent_work_info;
+ tmp_ch_info->queue_rx_intent_work_info = NULL;
+ mutex_unlock(&tmp_ch_info->ch_info_lock);
+
+ if (tmp_work_info) {
+ delay_ms = calc_delay_ms(
+ tmp_work_info->random_delay,
+ tmp_work_info->delay_ms);
+ queue_delayed_work(glink_lbsrv_wq,
+ &tmp_work_info->work,
+ msecs_to_jiffies(delay_ms));
+ }
+ } else if (event == GLINK_LOCAL_DISCONNECTED ||
+ event == GLINK_REMOTE_DISCONNECTED) {
+
+ mutex_lock(&tmp_ch_info->ch_info_lock);
+ tmp_ch_info->fully_opened = false;
+ /*
+ * If the state has changed to LOCAL_DISCONNECTED,
+ * the channel has been fully closed and can now be
+ * re-opened. If the handle value is -EBUSY, an earlier
+ * open request failed because the channel was in the
+ * process of closing. Requeue the work from the open
+ * request.
+ */
+ if (event == GLINK_LOCAL_DISCONNECTED &&
+ tmp_ch_info->handle == ERR_PTR(-EBUSY)) {
+ queue_delayed_work(glink_lbsrv_wq,
+ &tmp_ch_info->open_work,
+ msecs_to_jiffies(0));
+ }
+ if (event == GLINK_REMOTE_DISCONNECTED)
+ if (!IS_ERR_OR_NULL(tmp_ch_info->handle))
+ queue_delayed_work(
+ glink_lbsrv_wq,
+ &tmp_ch_info->close_work, 0);
+ mutex_unlock(&tmp_ch_info->ch_info_lock);
+ }
+ }
+}
+
+bool glink_lpbsrv_rmt_rx_intent_req_cb(void *handle, const void *priv,
+ size_t sz)
+{
+ struct rmt_rx_intent_req_work_info *tmp_work_info;
+ struct ch_info *tmp_ch_info = (struct ch_info *)priv;
+ LBSRV_INFO("%s:%s:%s %s: QUEUE RX INTENT to receive size[%zu]\n",
+ tmp_ch_info->transport, tmp_ch_info->edge, tmp_ch_info->name,
+ __func__, sz);
+
+ tmp_work_info = kmalloc(sizeof(struct rmt_rx_intent_req_work_info),
+ GFP_ATOMIC);
+ if (!tmp_work_info) {
+ LBSRV_ERR("%s:%s:%s %s: Error allocating rx_work\n",
+ tmp_ch_info->transport, tmp_ch_info->edge,
+ tmp_ch_info->name, __func__);
+ return false;
+ }
+ tmp_work_info->req_intent_size = sz;
+ tmp_work_info->work_ch_info = tmp_ch_info;
+
+ INIT_DELAYED_WORK(&tmp_work_info->work,
+ glink_lbsrv_rmt_rx_intent_req_worker);
+ queue_delayed_work(glink_lbsrv_wq, &tmp_work_info->work, 0);
+ return true;
+}
+
+void glink_lpbsrv_notify_rx_sigs(void *handle, const void *priv,
+ uint32_t old_sigs, uint32_t new_sigs)
+{
+ LBSRV_INFO(" %s old_sigs[0x%x] New_sigs[0x%x]\n",
+ __func__, old_sigs, new_sigs);
+ glink_sigs_set(handle, new_sigs);
+}
+
+static void glink_lbsrv_rx_worker(struct work_struct *work)
+{
+ struct delayed_work *rx_work = to_delayed_work(work);
+ struct rx_work_info *tmp_rx_work_info =
+ container_of(rx_work, struct rx_work_info, work);
+ struct ch_info *rx_ch_info = tmp_rx_work_info->rx_ch_info;
+ struct req request_pkt;
+ int ret;
+
+ if (rx_ch_info->type == CTL) {
+ request_pkt = *((struct req *)tmp_rx_work_info->ptr);
+ glink_rx_done(rx_ch_info->handle, tmp_rx_work_info->ptr, false);
+ ret = glink_queue_rx_intent(rx_ch_info->handle, rx_ch_info,
+ sizeof(struct req));
+ LBSRV_INFO("%s:%s:%s %s: QUEUE RX INTENT size[%zu] ret[%d]\n",
+ rx_ch_info->transport, rx_ch_info->edge,
+ rx_ch_info->name, __func__,
+ sizeof(struct req), ret);
+ glink_lbsrv_handle_req(rx_ch_info, request_pkt);
+ } else {
+ ret = glink_lbsrv_handle_data(tmp_rx_work_info);
+ }
+ kfree(tmp_rx_work_info);
+}
+
+static void glink_lbsrv_open_worker(struct work_struct *work)
+{
+ struct delayed_work *open_work = to_delayed_work(work);
+ struct ch_info *tmp_ch_info =
+ container_of(open_work, struct ch_info, open_work);
+ struct glink_open_config open_cfg;
+
+ LBSRV_INFO("%s: glink_loopback_server_init\n", __func__);
+ mutex_lock(&tmp_ch_info->ch_info_lock);
+ if (!IS_ERR_OR_NULL(tmp_ch_info->handle)) {
+ mutex_unlock(&tmp_ch_info->ch_info_lock);
+ return;
+ }
+
+ memset(&open_cfg, 0, sizeof(struct glink_open_config));
+ open_cfg.transport = tmp_ch_info->transport;
+ open_cfg.edge = tmp_ch_info->edge;
+ open_cfg.name = tmp_ch_info->name;
+
+ open_cfg.notify_rx = glink_lpbsrv_notify_rx;
+ if (tmp_ch_info->type == DATA)
+ open_cfg.notify_rxv = glink_lpbsrv_notify_rxv;
+ open_cfg.notify_tx_done = glink_lpbsrv_notify_tx_done;
+ open_cfg.notify_state = glink_lpbsrv_notify_state;
+ open_cfg.notify_rx_intent_req = glink_lpbsrv_rmt_rx_intent_req_cb;
+ open_cfg.notify_rx_sigs = glink_lpbsrv_notify_rx_sigs;
+ open_cfg.notify_rx_abort = NULL;
+ open_cfg.notify_tx_abort = NULL;
+ open_cfg.notify_rx_tracer_pkt = glink_lpbsrv_notify_rx_tp;
+ open_cfg.priv = tmp_ch_info;
+
+ tmp_ch_info->handle = glink_open(&open_cfg);
+ if (IS_ERR_OR_NULL(tmp_ch_info->handle)) {
+ LBSRV_ERR("%s:%s:%s %s: unable to open channel\n",
+ open_cfg.transport, open_cfg.edge, open_cfg.name,
+ __func__);
+ mutex_unlock(&tmp_ch_info->ch_info_lock);
+ return;
+ }
+ mutex_unlock(&tmp_ch_info->ch_info_lock);
+ LBSRV_INFO("%s:%s:%s %s: Open complete\n", open_cfg.transport,
+ open_cfg.edge, open_cfg.name, __func__);
+}
+
+static void glink_lbsrv_close_worker(struct work_struct *work)
+{
+ struct delayed_work *close_work = to_delayed_work(work);
+ struct ch_info *tmp_ch_info =
+ container_of(close_work, struct ch_info, close_work);
+
+ mutex_lock(&tmp_ch_info->ch_info_lock);
+ if (!IS_ERR_OR_NULL(tmp_ch_info->handle)) {
+ glink_close(tmp_ch_info->handle);
+ tmp_ch_info->handle = NULL;
+ }
+ mutex_unlock(&tmp_ch_info->ch_info_lock);
+ LBSRV_INFO("%s:%s:%s %s: Close complete\n", tmp_ch_info->transport,
+ tmp_ch_info->edge, tmp_ch_info->name, __func__);
+}
+
+static void glink_lbsrv_rmt_rx_intent_req_worker(struct work_struct *work)
+{
+
+ struct delayed_work *rmt_rx_intent_req_work = to_delayed_work(work);
+ struct rmt_rx_intent_req_work_info *tmp_work_info =
+ container_of(rmt_rx_intent_req_work,
+ struct rmt_rx_intent_req_work_info, work);
+ struct ch_info *tmp_ch_info = tmp_work_info->work_ch_info;
+ int ret;
+
+ mutex_lock(&tmp_ch_info->ch_info_lock);
+ if (IS_ERR_OR_NULL(tmp_ch_info->handle)) {
+ mutex_unlock(&tmp_ch_info->ch_info_lock);
+ LBSRV_ERR("%s:%s:%s %s: Invalid CH handle\n",
+ tmp_ch_info->transport,
+ tmp_ch_info->edge,
+ tmp_ch_info->name, __func__);
+ kfree(tmp_work_info);
+ return;
+ }
+ ret = glink_queue_rx_intent(tmp_ch_info->handle,
+ (void *)tmp_ch_info, tmp_work_info->req_intent_size);
+ mutex_unlock(&tmp_ch_info->ch_info_lock);
+ LBSRV_INFO("%s:%s:%s %s: QUEUE RX INTENT size[%zu] ret[%d]\n",
+ tmp_ch_info->transport, tmp_ch_info->edge,
+ tmp_ch_info->name, __func__, tmp_work_info->req_intent_size,
+ ret);
+ if (ret < 0) {
+ LBSRV_ERR("%s:%s:%s %s: Err %d q'ing intent size %zu\n",
+ tmp_ch_info->transport, tmp_ch_info->edge,
+ tmp_ch_info->name, __func__, ret,
+ tmp_work_info->req_intent_size);
+ }
+ kfree(tmp_work_info);
+ return;
+}
+
+static void glink_lbsrv_queue_rx_intent_worker(struct work_struct *work)
+{
+ struct delayed_work *queue_rx_intent_work = to_delayed_work(work);
+ struct queue_rx_intent_work_info *tmp_work_info =
+ container_of(queue_rx_intent_work,
+ struct queue_rx_intent_work_info, work);
+ struct ch_info *tmp_ch_info = tmp_work_info->work_ch_info;
+ int ret;
+ uint32_t delay_ms;
+
+ while (1) {
+ mutex_lock(&tmp_ch_info->ch_info_lock);
+ if (IS_ERR_OR_NULL(tmp_ch_info->handle)) {
+ mutex_unlock(&tmp_ch_info->ch_info_lock);
+ return;
+ }
+
+ ret = glink_queue_rx_intent(tmp_ch_info->handle,
+ (void *)tmp_ch_info, tmp_work_info->intent_size);
+ mutex_unlock(&tmp_ch_info->ch_info_lock);
+ if (ret < 0) {
+ LBSRV_ERR("%s:%s:%s %s: Err %d q'ing intent size %d\n",
+ tmp_ch_info->transport, tmp_ch_info->edge,
+ tmp_ch_info->name, __func__, ret,
+ tmp_work_info->intent_size);
+ kfree(tmp_work_info);
+ return;
+ }
+ LBSRV_INFO("%s:%s:%s %s: Queued rx intent of size %d\n",
+ tmp_ch_info->transport, tmp_ch_info->edge,
+ tmp_ch_info->name, __func__,
+ tmp_work_info->intent_size);
+ tmp_work_info->num_intents--;
+ if (!tmp_work_info->num_intents)
+ break;
+
+ delay_ms = calc_delay_ms(tmp_work_info->random_delay,
+ tmp_work_info->delay_ms);
+ if (delay_ms) {
+ queue_delayed_work(glink_lbsrv_wq, &tmp_work_info->work,
+ msecs_to_jiffies(delay_ms));
+ return;
+ }
+ }
+ LBSRV_INFO("%s:%s:%s %s: Queued all intents. size:%d\n",
+ tmp_ch_info->transport, tmp_ch_info->edge, tmp_ch_info->name,
+ __func__, tmp_work_info->intent_size);
+
+ if (!tmp_work_info->deferred && !tmp_work_info->random_delay &&
+ !tmp_work_info->delay_ms)
+ glink_lbsrv_send_response(tmp_work_info->req_ch_info->handle,
+ tmp_work_info->req_id, QUEUE_RX_INTENT_CONFIG,
+ 0);
+ kfree(tmp_work_info);
+}
+
+static void glink_lbsrv_rx_done_worker(struct work_struct *work)
+{
+ struct delayed_work *rx_done_work = to_delayed_work(work);
+ struct rx_done_work_info *tmp_work_info =
+ container_of(rx_done_work, struct rx_done_work_info, work);
+ struct ch_info *tmp_ch_info = tmp_work_info->rx_done_ch_info;
+
+ mutex_lock(&tmp_ch_info->ch_info_lock);
+ if (!IS_ERR_OR_NULL(tmp_ch_info->handle))
+ glink_rx_done(tmp_ch_info->handle, tmp_work_info->ptr, false);
+ mutex_unlock(&tmp_ch_info->ch_info_lock);
+ kfree(tmp_work_info);
+}
+
+static void glink_lbsrv_tx_worker(struct work_struct *work)
+{
+ struct delayed_work *tx_work = to_delayed_work(work);
+ struct tx_work_info *tmp_work_info =
+ container_of(tx_work, struct tx_work_info, work);
+ struct ch_info *tmp_ch_info = tmp_work_info->tx_ch_info;
+ int ret;
+ uint32_t delay_ms;
+ uint32_t flags;
+
+ LBSRV_INFO("%s:%s:%s %s: start TX data[%p] size[%zu]\n",
+ tmp_ch_info->transport, tmp_ch_info->edge, tmp_ch_info->name,
+ __func__, tmp_work_info->data, tmp_work_info->size);
+ while (1) {
+ mutex_lock(&tmp_ch_info->ch_info_lock);
+ if (IS_ERR_OR_NULL(tmp_ch_info->handle)) {
+ mutex_unlock(&tmp_ch_info->ch_info_lock);
+ return;
+ }
+
+ flags = 0;
+ if (tmp_work_info->tracer_pkt) {
+ flags |= GLINK_TX_TRACER_PKT;
+ tracer_pkt_log_event(tmp_work_info->data,
+ LOOPBACK_SRV_TX);
+ }
+ if (tmp_work_info->buf_type == LINEAR)
+ ret = glink_tx(tmp_ch_info->handle,
+ (tmp_work_info->tx_config.echo_count > 1 ?
+ (void *)0xFFFFFFFF :
+ (void *)(uintptr_t)
+ tmp_work_info->buf_type),
+ (void *)tmp_work_info->data,
+ tmp_work_info->size, flags);
+ else
+ ret = glink_txv(tmp_ch_info->handle,
+ (tmp_work_info->tx_config.echo_count > 1 ?
+ (void *)0xFFFFFFFF :
+ (void *)(uintptr_t)
+ tmp_work_info->buf_type),
+ (void *)tmp_work_info->data,
+ tmp_work_info->size,
+ tmp_work_info->vbuf_provider,
+ tmp_work_info->pbuf_provider,
+ flags);
+ mutex_unlock(&tmp_ch_info->ch_info_lock);
+ if (ret < 0 && ret != -EAGAIN) {
+ LBSRV_ERR("%s:%s:%s %s: TX Error %d\n",
+ tmp_ch_info->transport,
+ tmp_ch_info->edge,
+ tmp_ch_info->name, __func__, ret);
+ glink_lbsrv_free_data(tmp_work_info->data,
+ tmp_work_info->buf_type);
+ kfree(tmp_work_info);
+ return;
+ }
+ if (ret != -EAGAIN)
+ tmp_work_info->tx_config.echo_count--;
+ if (!tmp_work_info->tx_config.echo_count)
+ break;
+
+ delay_ms = calc_delay_ms(tmp_work_info->tx_config.random_delay,
+ tmp_work_info->tx_config.delay_ms);
+ if (delay_ms) {
+ queue_delayed_work(glink_lbsrv_wq, &tmp_work_info->work,
+ msecs_to_jiffies(delay_ms));
+ return;
+ }
+ }
+ kfree(tmp_work_info);
+}
+
+/**
+ * glink_lbsrv_link_state_worker() - Function to handle link state updates
+ * work: Pointer to the work item in the link_state_work_info.
+ *
+ * This worker function is scheduled when there is a link state update. Since
+ * the loopback server registers for all transports, it receives all link state
+ * updates about all transports that get registered in the system.
+ */
+static void glink_lbsrv_link_state_worker(struct work_struct *work)
+{
+ struct delayed_work *ls_work = to_delayed_work(work);
+ struct link_state_work_info *ls_info =
+ container_of(ls_work, struct link_state_work_info, work);
+ struct ch_info *tmp_ch_info;
+
+ if (ls_info->link_state == GLINK_LINK_STATE_UP) {
+ LBSRV_INFO("%s: LINK_STATE_UP %s:%s\n",
+ __func__, ls_info->edge, ls_info->transport);
+ mutex_lock(&ctl_ch_list_lock);
+ list_for_each_entry(tmp_ch_info, &ctl_ch_list, list) {
+ if (strcmp(tmp_ch_info->edge, ls_info->edge) ||
+ strcmp(tmp_ch_info->transport, ls_info->transport))
+ continue;
+ queue_delayed_work(glink_lbsrv_wq,
+ &tmp_ch_info->open_work, 0);
+ }
+ mutex_unlock(&ctl_ch_list_lock);
+ } else if (ls_info->link_state == GLINK_LINK_STATE_DOWN) {
+ LBSRV_INFO("%s: LINK_STATE_DOWN %s:%s\n",
+ __func__, ls_info->edge, ls_info->transport);
+
+ }
+ kfree(ls_info);
+ return;
+}
+
+/**
+ * glink_lbsrv_link_state_cb() - Callback to receive link state updates
+ * cb_info: Information containing link & its state.
+ * priv: Private data passed during the link state registration.
+ *
+ * This function is called by the GLINK core to notify the loopback server
+ * regarding the link state updates. This function is registered with the
+ * GLINK core by the loopback server during glink_register_link_state_cb().
+ */
+static void glink_lbsrv_link_state_cb(struct glink_link_state_cb_info *cb_info,
+ void *priv)
+{
+ struct link_state_work_info *ls_info;
+
+ if (!cb_info)
+ return;
+
+ LBSRV_INFO("%s: %s:%s\n", __func__, cb_info->edge, cb_info->transport);
+ ls_info = kmalloc(sizeof(*ls_info), GFP_KERNEL);
+ if (!ls_info) {
+ LBSRV_ERR("%s: Error allocating link state info\n", __func__);
+ return;
+ }
+
+ strlcpy(ls_info->edge, cb_info->edge, GLINK_NAME_SIZE);
+ strlcpy(ls_info->transport, cb_info->transport, GLINK_NAME_SIZE);
+ ls_info->link_state = cb_info->link_state;
+ INIT_DELAYED_WORK(&ls_info->work, glink_lbsrv_link_state_worker);
+ queue_delayed_work(glink_lbsrv_wq, &ls_info->work, 0);
+}
+
+static int glink_loopback_server_init(void)
+{
+ int i;
+ int ret;
+ struct ch_info *tmp_ch_info;
+
+ glink_lbsrv_log_ctx = ipc_log_context_create(GLINK_LBSRV_NUM_LOG_PAGES,
+ "glink_lbsrv", 0);
+ if (!glink_lbsrv_log_ctx)
+ pr_err("%s: unable to create log context\n", __func__);
+
+ glink_lbsrv_wq = create_singlethread_workqueue("glink_lbsrv");
+ if (!glink_lbsrv_wq) {
+ LBSRV_ERR("%s: Error creating glink_lbsrv_wq\n", __func__);
+ return -EFAULT;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(ctl_ch_tbl); i++) {
+ ret = create_ch_info(ctl_ch_tbl[i].name, ctl_ch_tbl[i].edge,
+ ctl_ch_tbl[i].transport, CTL,
+ &tmp_ch_info);
+ if (ret < 0) {
+ LBSRV_ERR("%s: Error creating ctl ch index %d\n",
+ __func__, i);
+ continue;
+ }
+ }
+ glink_lbsrv_link_state_notif_handle = glink_register_link_state_cb(
+ &glink_lbsrv_link_info, NULL);
+ return 0;
+}
+
+module_init(glink_loopback_server_init);
+
+MODULE_DESCRIPTION("MSM Generic Link (G-Link) Loopback Server");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/glink_private.h b/drivers/soc/qcom/glink_private.h
new file mode 100644
index 000000000000..bc7e8bc1fd6f
--- /dev/null
+++ b/drivers/soc/qcom/glink_private.h
@@ -0,0 +1,1009 @@
+/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef _SOC_QCOM_GLINK_PRIVATE_H_
+#define _SOC_QCOM_GLINK_PRIVATE_H_
+
+#include <linux/bug.h>
+#include <linux/completion.h>
+#include <linux/dcache.h>
+#include <linux/ipc_logging.h>
+#include <linux/kernel.h>
+#include <linux/kref.h>
+#include <linux/ratelimit.h>
+#include <linux/seq_file.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <soc/qcom/glink.h>
+
+struct glink_core_xprt_ctx;
+struct channel_ctx;
+enum transport_state_e;
+enum local_channel_state_e;
+
+/* Logging Macros */
+enum {
+ QCOM_GLINK_INFO = 1U << 0,
+ QCOM_GLINK_DEBUG = 1U << 1,
+ QCOM_GLINK_GPIO = 1U << 2,
+ QCOM_GLINK_PERF = 1U << 3,
+};
+
+enum glink_dbgfs_ss {
+ GLINK_DBGFS_MPSS,
+ GLINK_DBGFS_APSS,
+ GLINK_DBGFS_LPASS,
+ GLINK_DBGFS_DSPS,
+ GLINK_DBGFS_RPM,
+ GLINK_DBGFS_WCNSS,
+ GLINK_DBGFS_LLOOP,
+ GLINK_DBGFS_MOCK,
+ GLINK_DBGFS_MAX_NUM_SUBS
+};
+
+enum glink_dbgfs_xprt {
+ GLINK_DBGFS_SMEM,
+ GLINK_DBGFS_SMD,
+ GLINK_DBGFS_XLLOOP,
+ GLINK_DBGFS_XMOCK,
+ GLINK_DBGFS_XMOCK_LOW,
+ GLINK_DBGFS_XMOCK_HIGH,
+ GLINK_DBGFS_MAX_NUM_XPRTS
+};
+
+struct glink_dbgfs {
+ const char *curr_name;
+ const char *par_name;
+ bool b_dir_create;
+};
+
+struct glink_dbgfs_data {
+ struct list_head flist;
+ struct dentry *dent;
+ void (*o_func)(struct seq_file *s);
+ void *priv_data;
+ bool b_priv_free_req;
+};
+
+struct xprt_ctx_iterator {
+ struct list_head *xprt_list;
+ struct glink_core_xprt_ctx *i_curr;
+ unsigned long xprt_list_flags;
+};
+
+struct ch_ctx_iterator {
+ struct list_head *ch_list;
+ struct channel_ctx *i_curr;
+ unsigned long ch_list_flags;
+};
+
+struct glink_ch_intent_info {
+ spinlock_t *li_lst_lock;
+ struct list_head *li_avail_list;
+ struct list_head *li_used_list;
+ spinlock_t *ri_lst_lock;
+ struct list_head *ri_list;
+};
+
+/* Tracer Packet Event IDs for G-Link */
+enum glink_tracer_pkt_events {
+ GLINK_CORE_TX = 1,
+ GLINK_QUEUE_TO_SCHEDULER = 2,
+ GLINK_SCHEDULER_TX = 3,
+ GLINK_XPRT_TX = 4,
+ GLINK_XPRT_RX = 5,
+ GLINK_CORE_RX = 6,
+};
+
+/**
+ * glink_get_ss_enum_string() - get the name of the subsystem based on enum value
+ * @enum_id: enum id of a specific subsystem.
+ *
+ * Return: name of the subsystem, NULL in case of invalid input
+ */
+const char *glink_get_ss_enum_string(unsigned int enum_id);
+
+/**
+ * glink_get_xprt_enum_string() - get the name of the transport based on enum value
+ * @enum_id: enum id of a specific transport.
+ *
+ * Return: name of the transport, NULL in case of invalid input
+ */
+const char *glink_get_xprt_enum_string(unsigned int enum_id);
+
+/**
+ * glink_get_xprt_state_string() - get the name of the transport based on enum value
+ * @enum_id: enum id of the state of the transport.
+ *
+ * Return: name of the transport state, NULL in case of invalid input
+ */
+const char *glink_get_xprt_state_string(enum transport_state_e enum_id);
+
+/**
+ * glink_get_ch_state_string() - get the name of the transport based on enum value
+ * @enum_id: enum id of a specific state of the channel.
+ *
+ * Return: name of the channel state, NULL in case of invalid input
+ */
+const char *glink_get_ch_state_string(enum local_channel_state_e enum_id);
+
+#define GLINK_IPC_LOG_STR(x...) do { \
+ if (glink_get_log_ctx()) \
+ ipc_log_string(glink_get_log_ctx(), x); \
+} while (0)
+
+#define GLINK_DBG(x...) do { \
+ if (glink_get_debug_mask() & QCOM_GLINK_DEBUG) \
+ GLINK_IPC_LOG_STR(x); \
+} while (0)
+
+#define GLINK_INFO(x...) do { \
+ if (glink_get_debug_mask() & QCOM_GLINK_INFO) \
+ GLINK_IPC_LOG_STR(x); \
+} while (0)
+
+#define GLINK_INFO_PERF(x...) do { \
+ if (glink_get_debug_mask() & (QCOM_GLINK_INFO | QCOM_GLINK_PERF)) \
+ GLINK_IPC_LOG_STR(x); \
+} while (0)
+
+#define GLINK_PERF(x...) do { \
+ if (glink_get_debug_mask() & QCOM_GLINK_PERF) \
+ GLINK_IPC_LOG_STR("<PERF> " x); \
+} while (0)
+
+#define GLINK_UT_ERR(x...) do { \
+ if (!(glink_get_debug_mask() & QCOM_GLINK_PERF)) \
+ pr_err("<UT> " x); \
+ GLINK_IPC_LOG_STR("<UT> " x); \
+} while (0)
+
+#define GLINK_UT_DBG(x...) do { \
+ if (glink_get_debug_mask() & QCOM_GLINK_DEBUG) \
+ GLINK_IPC_LOG_STR("<UT> " x); \
+} while (0)
+
+#define GLINK_UT_INFO(x...) do { \
+ if (glink_get_debug_mask() & QCOM_GLINK_INFO) \
+ GLINK_IPC_LOG_STR("<UT> " x); \
+} while (0)
+
+#define GLINK_UT_INFO_PERF(x...) do { \
+ if (glink_get_debug_mask() & (QCOM_GLINK_INFO | QCOM_GLINK_PERF)) \
+ GLINK_IPC_LOG_STR("<UT> " x); \
+} while (0)
+
+#define GLINK_UT_PERF(x...) do { \
+ if (glink_get_debug_mask() & QCOM_GLINK_PERF) \
+ GLINK_IPC_LOG_STR("<PERF> " x); \
+} while (0)
+
+#define GLINK_PERF_XPRT(xprt, fmt, args...) do { \
+ if (glink_get_debug_mask() & QCOM_GLINK_PERF) \
+ GLINK_IPC_LOG_STR("<PERF> %s:%s " fmt, \
+ xprt->name, xprt->edge, args); \
+} while (0)
+
+#define GLINK_PERF_CH(ctx, fmt, args...) do { \
+ if (glink_get_debug_mask() & QCOM_GLINK_PERF) \
+ GLINK_IPC_LOG_STR("<PERF> %s:%s:%s[%u:%u] " fmt, \
+ ctx->transport_ptr->name, \
+ ctx->transport_ptr->edge, \
+ ctx->name, \
+ ctx->lcid, \
+ ctx->rcid, args); \
+} while (0)
+
+#define GLINK_PERF_CH_XPRT(ctx, xprt, fmt, args...) do { \
+ if (glink_get_debug_mask() & QCOM_GLINK_PERF) \
+ GLINK_IPC_LOG_STR("<PERF> %s:%s:%s[%u:%u] " fmt, \
+ xprt->name, \
+ xprt->edge, \
+ ctx->name, \
+ ctx->lcid, \
+ ctx->rcid, args); \
+} while (0)
+
+#define GLINK_INFO_PERF_XPRT(xprt, fmt, args...) do { \
+ if (glink_get_debug_mask() & (QCOM_GLINK_INFO | QCOM_GLINK_PERF)) \
+ GLINK_IPC_LOG_STR("<CORE> %s:%s " fmt, \
+ xprt->name, xprt->edge, args); \
+} while (0)
+
+#define GLINK_INFO_PERF_CH(ctx, fmt, args...) do { \
+ if (glink_get_debug_mask() & (QCOM_GLINK_INFO | QCOM_GLINK_PERF)) \
+ GLINK_IPC_LOG_STR("<CORE> %s:%s:%s[%u:%u] " fmt, \
+ ctx->transport_ptr->name, \
+ ctx->transport_ptr->edge, \
+ ctx->name, \
+ ctx->lcid, \
+ ctx->rcid, args); \
+} while (0)
+
+#define GLINK_INFO_PERF_CH_XPRT(ctx, xprt, fmt, args...) do { \
+ if (glink_get_debug_mask() & (QCOM_GLINK_INFO | QCOM_GLINK_PERF)) \
+ GLINK_IPC_LOG_STR("<CORE> %s:%s:%s[%u:%u] " fmt, \
+ xprt->name, \
+ xprt->edge, \
+ ctx->name, \
+ ctx->lcid, \
+ ctx->rcid, args); \
+} while (0)
+
+#define GLINK_INFO_XPRT(xprt, fmt, args...) do { \
+ if (glink_get_debug_mask() & QCOM_GLINK_INFO) \
+ GLINK_IPC_LOG_STR("<CORE> %s:%s " fmt, \
+ xprt->name, xprt->edge, args); \
+} while (0)
+
+#define GLINK_INFO_CH(ctx, fmt, args...) do { \
+ if (glink_get_debug_mask() & QCOM_GLINK_INFO) \
+ GLINK_IPC_LOG_STR("<CORE> %s:%s:%s[%u:%u] " fmt, \
+ ctx->transport_ptr->name, \
+ ctx->transport_ptr->edge, \
+ ctx->name, \
+ ctx->lcid, \
+ ctx->rcid, args); \
+} while (0)
+
+#define GLINK_INFO_CH_XPRT(ctx, xprt, fmt, args...) do { \
+ if (glink_get_debug_mask() & QCOM_GLINK_INFO) \
+ GLINK_IPC_LOG_STR("<CORE> %s:%s:%s[%u:%u] " fmt, \
+ xprt->name, \
+ xprt->edge, \
+ ctx->name, \
+ ctx->lcid, \
+ ctx->rcid, args); \
+} while (0)
+
+#define GLINK_DBG_XPRT(xprt, fmt, args...) do { \
+ if (glink_get_debug_mask() & QCOM_GLINK_DEBUG) \
+ GLINK_IPC_LOG_STR("<CORE> %s:%s " fmt, \
+ xprt->name, xprt->edge, args); \
+} while (0)
+
+#define GLINK_DBG_CH(ctx, fmt, args...) do { \
+ if (glink_get_debug_mask() & QCOM_GLINK_DEBUG) \
+ GLINK_IPC_LOG_STR("<CORE> %s:%s:%s[%u:%u] " fmt, \
+ ctx->transport_ptr->name, \
+ ctx->transport_ptr->edge, \
+ ctx->name, \
+ ctx->lcid, \
+ ctx->rcid, args); \
+} while (0)
+
+#define GLINK_DBG_CH_XPRT(ctx, xprt, fmt, args...) do { \
+ if (glink_get_debug_mask() & QCOM_GLINK_DEBUG) \
+ GLINK_IPC_LOG_STR("<CORE> %s:%s:%s[%u:%u] " fmt, \
+ xprt->name, \
+ xprt->edge, \
+ ctx->name, \
+ ctx->lcid, \
+ ctx->rcid, args); \
+} while (0)
+
+#define GLINK_ERR(x...) do { \
+ pr_err_ratelimited("<CORE> " x); \
+ GLINK_IPC_LOG_STR("<CORE> " x); \
+} while (0)
+
+#define GLINK_ERR_XPRT(xprt, fmt, args...) do { \
+ pr_err_ratelimited("<CORE> %s:%s " fmt, \
+ xprt->name, xprt->edge, args); \
+ GLINK_INFO_XPRT(xprt, fmt, args); \
+} while (0)
+
+#define GLINK_ERR_CH(ctx, fmt, args...) do { \
+ pr_err_ratelimited("<CORE> %s:%s:%s[%u:%u] " fmt, \
+ ctx->transport_ptr->name, \
+ ctx->transport_ptr->edge, \
+ ctx->name, \
+ ctx->lcid, \
+ ctx->rcid, args); \
+ GLINK_INFO_CH(ctx, fmt, args); \
+} while (0)
+
+#define GLINK_ERR_CH_XPRT(ctx, xprt, fmt, args...) do { \
+ pr_err_ratelimited("<CORE> %s:%s:%s[%u:%u] " fmt, \
+ xprt->name, \
+ xprt->edge, \
+ ctx->name, \
+ ctx->lcid, \
+ ctx->rcid, args); \
+ GLINK_INFO_CH_XPRT(ctx, xprt, fmt, args); \
+} while (0)
+
+/**
+ * OVERFLOW_ADD_UNSIGNED() - check for unsigned overflow
+ *
+ * type: type to check for overflow
+ * a: left value to use
+ * b: right value to use
+ * returns: true if a + b will result in overflow; false otherwise
+ */
+#define OVERFLOW_ADD_UNSIGNED(type, a, b) \
+ (((type)~0 - (a)) < (b) ? true : false)
+
+/**
+ * glink_get_debug_mask() - Return debug mask attribute
+ *
+ * Return: debug mask attribute
+ */
+unsigned glink_get_debug_mask(void);
+
+/**
+ * glink_get_log_ctx() - Return log context for other GLINK modules.
+ *
+ * Return: Log context or NULL if none.
+ */
+void *glink_get_log_ctx(void);
+
+/**
+ * glink_get_channel_id_for_handle() - Get logical channel ID
+ *
+ * @handle: handle of channel
+ *
+ * Used internally by G-Link debugfs.
+ *
+ * Return: Logical Channel ID or standard Linux error code
+ */
+int glink_get_channel_id_for_handle(void *handle);
+
+/**
+ * glink_get_channel_name_for_handle() - return channel name
+ *
+ * @handle: handle of channel
+ *
+ * Used internally by G-Link debugfs.
+ *
+ * Return: Channel name or NULL
+ */
+char *glink_get_channel_name_for_handle(void *handle);
+
+/**
+ * glink_debugfs_init() - initialize glink debugfs directory
+ *
+ * Return: error code or success.
+ */
+int glink_debugfs_init(void);
+
+/**
+ * glink_debugfs_exit() - removes glink debugfs directory
+ */
+void glink_debugfs_exit(void);
+
+/**
+ * glink_debugfs_create() - create the debugfs file
+ * @name: debugfs file name
+ * @show: pointer to the actual function which will be invoked upon
+ * opening this file.
+ * @dir: pointer to a structure debugfs_dir
+ * @dbgfs_data: pointer to any private data need to be associated with debugfs
+ * @b_free_req: boolean value to decide to free the memory associated with
+ * @dbgfs_data during deletion of the file
+ *
+ * Return: pointer to the file/directory created, NULL in case of error
+ *
+ * This function checks which directory will be used to create the debugfs file
+ * and calls glink_dfs_create_file. Anybody who intend to allocate some memory
+ * for the dbgfs_data and required to free it in deletion, need to set
+ * b_free_req to true. Otherwise, there will be a memory leak.
+ */
+struct dentry *glink_debugfs_create(const char *name,
+ void (*show)(struct seq_file *),
+ struct glink_dbgfs *dir, void *dbgfs_data, bool b_free_req);
+
+/**
+ * glink_debugfs_remove_recur() - remove the the directory & files recursively
+ * @rm_dfs: pointer to the structure glink_dbgfs
+ *
+ * This function removes the files & directories. This also takes care of
+ * freeing any memory associated with the debugfs file.
+ */
+void glink_debugfs_remove_recur(struct glink_dbgfs *dfs);
+
+/**
+ * glink_debugfs_remove_channel() - remove all channel specifc files & folder in
+ * debugfs when channel is fully closed
+ * @ch_ctx: pointer to the channel_contenxt
+ * @xprt_ctx: pointer to the transport_context
+ *
+ * This function is invoked when any channel is fully closed. It removes the
+ * folders & other files in debugfs for that channel.
+ */
+void glink_debugfs_remove_channel(struct channel_ctx *ch_ctx,
+ struct glink_core_xprt_ctx *xprt_ctx);
+
+/**
+ * glink_debugfs_add_channel() - create channel specifc files & folder in
+ * debugfs when channel is added
+ * @ch_ctx: pointer to the channel_contenxt
+ * @xprt_ctx: pointer to the transport_context
+ *
+ * This function is invoked when a new channel is created. It creates the
+ * folders & other files in debugfs for that channel
+ */
+void glink_debugfs_add_channel(struct channel_ctx *ch_ctx,
+ struct glink_core_xprt_ctx *xprt_ctx);
+
+/**
+ * glink_debugfs_add_xprt() - create transport specifc files & folder in
+ * debugfs when new transport is registerd
+ * @xprt_ctx: pointer to the transport_context
+ *
+ * This function is invoked when a new transport is registered. It creates the
+ * folders & other files in debugfs for that transport
+ */
+void glink_debugfs_add_xprt(struct glink_core_xprt_ctx *xprt_ctx);
+
+/**
+ * glink_xprt_ctx_iterator_init() - Initializes the transport context list iterator
+ * @xprt_i: pointer to the transport context iterator.
+ *
+ * Return: None
+ *
+ * This function acquires the transport context lock which must then be
+ * released by glink_xprt_ctx_iterator_end()
+ */
+void glink_xprt_ctx_iterator_init(struct xprt_ctx_iterator *xprt_i);
+
+/**
+ * glink_xprt_ctx_iterator_end() - Ends the transport context list iteration
+ * @xprt_i: pointer to the transport context iterator.
+ *
+ * Return: None
+ */
+void glink_xprt_ctx_iterator_end(struct xprt_ctx_iterator *xprt_i);
+
+/**
+ * glink_xprt_ctx_iterator_next() - iterates element by element in transport context list
+ * @xprt_i: pointer to the transport context iterator.
+ *
+ * Return: pointer to the transport context structure
+ */
+struct glink_core_xprt_ctx *glink_xprt_ctx_iterator_next(
+ struct xprt_ctx_iterator *xprt_i);
+
+/**
+ * glink_get_xprt_name() - get the transport name
+ * @xprt_ctx: pointer to the transport context.
+ *
+ * Return: name of the transport
+ */
+char *glink_get_xprt_name(struct glink_core_xprt_ctx *xprt_ctx);
+
+/**
+ * glink_get_xprt_edge_name() - get the name of the remote processor/edge
+ * of the transport
+ * @xprt_ctx: pointer to the transport context.
+ *
+ * Return: name of the remote processor/edge
+ */
+char *glink_get_xprt_edge_name(struct glink_core_xprt_ctx *xprt_ctx);
+
+/**
+ * glink_get_xprt_state() - get the state of the transport
+ * @xprt_ctx: pointer to the transport context.
+ *
+ * Return: name of the transport state, NULL in case of invalid input
+ */
+const char *glink_get_xprt_state(struct glink_core_xprt_ctx *xprt_ctx);
+
+/**
+ * glink_get_xprt_version_features() - get the version and feature set
+ * of local transport in glink
+ * @xprt_ctx: pointer to the transport context.
+ *
+ * Return: pointer to the glink_core_version
+ */
+const struct glink_core_version *glink_get_xprt_version_features(
+ struct glink_core_xprt_ctx *xprt_ctx);
+
+/**
+ * glink_ch_ctx_iterator_init() - Initializes the channel context list iterator
+ * @ch_iter: pointer to the channel context iterator.
+ * @xprt: pointer to the transport context that holds the channel list
+ *
+ * This function acquires the channel context lock which must then be
+ * released by glink_ch_ctx_iterator_end()
+ */
+void glink_ch_ctx_iterator_init(struct ch_ctx_iterator *ch_iter,
+ struct glink_core_xprt_ctx *xprt);
+
+/**
+ * glink_ch_ctx_iterator_end() - Ends the channel context list iteration
+ * @ch_iter: pointer to the channel context iterator.
+ *
+ */
+void glink_ch_ctx_iterator_end(struct ch_ctx_iterator *ch_iter,
+ struct glink_core_xprt_ctx *xprt);
+
+/**
+ * glink_ch_ctx_iterator_next() - iterates element by element in channel context list
+ * @c_i: pointer to the channel context iterator.
+ *
+ * Return: pointer to the channel context structure
+ */
+struct channel_ctx *glink_ch_ctx_iterator_next(struct ch_ctx_iterator *ch_iter);
+
+/**
+ * glink_get_ch_name() - get the channel name
+ * @ch_ctx: pointer to the channel context.
+ *
+ * Return: name of the channel, NULL in case of invalid input
+ */
+char *glink_get_ch_name(struct channel_ctx *ch_ctx);
+
+/**
+ * glink_get_ch_edge_name() - get the name of the remote processor/edge
+ * of the channel
+ * @xprt_ctx: pointer to the channel context.
+ *
+ * Return: name of the remote processor/edge
+ */
+char *glink_get_ch_edge_name(struct channel_ctx *ch_ctx);
+
+/**
+ * glink_get_ch_rcid() - get the remote channel ID
+ * @ch_ctx: pointer to the channel context.
+ *
+ * Return: remote channel id, -EINVAL in case of invalid input
+ */
+int glink_get_ch_lcid(struct channel_ctx *ch_ctx);
+
+/**
+ * glink_get_ch_rcid() - get the remote channel ID
+ * @ch_ctx: pointer to the channel context.
+ *
+ * Return: remote channel id, -EINVAL in case of invalid input
+ */
+int glink_get_ch_rcid(struct channel_ctx *ch_ctx);
+
+/**
+ * glink_get_ch_lstate() - get the local channel state
+ * @ch_ctx: pointer to the channel context.
+ *
+ * Return: name of the local channel state, NULL in case of invalid input
+ */
+const char *glink_get_ch_lstate(struct channel_ctx *ch_ctx);
+
+/**
+ * glink_get_ch_rstate() - get the remote channel state
+ * @ch_ctx: pointer to the channel context.
+ *
+ * Return: true if remote side is opened false otherwise
+ */
+bool glink_get_ch_rstate(struct channel_ctx *ch_ctx);
+
+/**
+ * glink_get_ch_xprt_name() - get the name of the transport to which
+ * the channel belongs
+ * @ch_ctx: pointer to the channel context.
+ *
+ * Return: name of the export, NULL in case of invalid input
+ */
+char *glink_get_ch_xprt_name(struct channel_ctx *ch_ctx);
+
+/**
+ * glink_get_tx_pkt_count() - get the total number of packets sent
+ * through this channel
+ * @ch_ctx: pointer to the channel context.
+ *
+ * Return: number of packets transmitted, -EINVAL in case of invalid input
+ */
+int glink_get_ch_tx_pkt_count(struct channel_ctx *ch_ctx);
+
+/**
+ * glink_get_ch_rx_pkt_count() - get the total number of packets
+ * recieved at this channel
+ * @ch_ctx: pointer to the channel context.
+ *
+ * Return: number of packets recieved, -EINVAL in case of invalid input
+ */
+int glink_get_ch_rx_pkt_count(struct channel_ctx *ch_ctx);
+
+/**
+ * glink_get_ch_lintents_queued() - get the total number of intents queued
+ * at local side
+ * @ch_ctx: pointer to the channel context.
+ *
+ * Return: number of intents queued, -EINVAL in case of invalid input
+ */
+int glink_get_ch_lintents_queued(struct channel_ctx *ch_ctx);
+
+/**
+ * glink_get_ch_rintents_queued() - get the total number of intents queued
+ * from remote side
+ * @ch_ctx: pointer to the channel context.
+ *
+ * Return: number of intents queued
+ */
+int glink_get_ch_rintents_queued(struct channel_ctx *ch_ctx);
+
+/**
+ * glink_get_ch_intent_info() - get the intent details of a channel
+ * @ch_ctx: pointer to the channel context.
+ * @ch_ctx_i: pointer to a structure that will contain intent details
+ *
+ * This funcion is used to get all the channel intent details including locks.
+ */
+void glink_get_ch_intent_info(struct channel_ctx *ch_ctx,
+ struct glink_ch_intent_info *ch_ctx_i);
+
+/**
+ * enum ssr_command - G-Link SSR protocol commands
+ */
+enum ssr_command {
+ GLINK_SSR_DO_CLEANUP,
+ GLINK_SSR_CLEANUP_DONE,
+};
+
+/**
+ * struct ssr_notify_data - Contains private data used for client notifications
+ * from G-Link.
+ * tx_done: Indicates whether or not the tx_done notification has
+ * been received.
+ * event: The state notification event received.
+ * responded: Indicates whether or not a cleanup_done response was
+ * received.
+ * edge: The G-Link edge name for the channel associated with
+ * this callback data
+ * do_cleanup_data: Structure containing the G-Link SSR do_cleanup message.
+ */
+struct ssr_notify_data {
+ bool tx_done;
+ unsigned event;
+ bool responded;
+ const char *edge;
+ struct do_cleanup_msg *do_cleanup_data;
+};
+
+/**
+ * struct subsys_info - Subsystem info structure
+ * ssr_name: name of the subsystem recognized by the SSR framework
+ * edge: name of the G-Link edge
+ * xprt: name of the G-Link transport
+ * handle: glink_ssr channel used for this subsystem
+ * link_state_handle: link state handle for this edge, used to unregister
+ * from receiving link state callbacks
+ * link_info: Transport info used in link state callback registration
+ * cb_data: Private callback data structure for notification
+ * functions
+ * subsystem_list_node: used to chain this structure in a list of subsystem
+ * info structures
+ * notify_list: list of subsys_info_leaf structures, containing the
+ * subsystems to notify if this subsystem undergoes SSR
+ * notify_list_len: length of notify_list
+ * link_up: Flag indicating whether transport is up or not
+ * link_up_lock: Lock for protecting the link_up flag
+ */
+struct subsys_info {
+ const char *ssr_name;
+ const char *edge;
+ const char *xprt;
+ void *handle;
+ void *link_state_handle;
+ struct glink_link_info *link_info;
+ struct ssr_notify_data *cb_data;
+ struct list_head subsystem_list_node;
+ struct list_head notify_list;
+ int notify_list_len;
+ bool link_up;
+ spinlock_t link_up_lock;
+};
+
+/**
+ * struct subsys_info_leaf - Subsystem info leaf structure (a subsystem on the
+ * notify list of a subsys_info structure)
+ * ssr_name: Name of the subsystem recognized by the SSR framework
+ * edge: Name of the G-Link edge
+ * xprt: Name of the G-Link transport
+ * restarted: Indicates whether a restart has been triggered for this edge
+ * cb_data: Private callback data structure for notification functions
+ * notify_list_node: used to chain this structure in the notify list
+ */
+struct subsys_info_leaf {
+ const char *ssr_name;
+ const char *edge;
+ const char *xprt;
+ bool restarted;
+ struct ssr_notify_data *cb_data;
+ struct list_head notify_list_node;
+};
+
+/**
+ * struct do_cleanup_msg - The data structure for an SSR do_cleanup message
+ * version: The G-Link SSR protocol version
+ * command: The G-Link SSR command - do_cleanup
+ * seq_num: Sequence number
+ * name_len: Length of the name of the subsystem being restarted
+ * name: G-Link edge name of the subsystem being restarted
+ */
+struct do_cleanup_msg {
+ uint32_t version;
+ uint32_t command;
+ uint32_t seq_num;
+ uint32_t name_len;
+ char name[32];
+};
+
+/**
+ * struct cleanup_done_msg - The data structure for an SSR cleanup_done message
+ * version: The G-Link SSR protocol version
+ * response: The G-Link SSR response to a do_cleanup command, cleanup_done
+ * seq_num: Sequence number
+ */
+struct cleanup_done_msg {
+ uint32_t version;
+ uint32_t response;
+ uint32_t seq_num;
+};
+
+/**
+ * get_info_for_subsystem() - Retrieve information about a subsystem from the
+ * global subsystem_info_list
+ * @subsystem: The name of the subsystem recognized by the SSR
+ * framework
+ *
+ * Return: subsys_info structure containing info for the requested subsystem;
+ * NULL if no structure can be found for the requested subsystem
+ */
+struct subsys_info *get_info_for_subsystem(const char *subsystem);
+
+/**
+ * get_info_for_edge() - Retrieve information about a subsystem from the
+ * global subsystem_info_list
+ * @edge: The name of the edge recognized by G-Link
+ *
+ * Return: subsys_info structure containing info for the requested subsystem;
+ * NULL if no structure can be found for the requested subsystem
+ */
+struct subsys_info *get_info_for_edge(const char *edge);
+
+/**
+ * glink_ssr_get_seq_num() - Get the current SSR sequence number
+ *
+ * Return: The current SSR sequence number
+ */
+uint32_t glink_ssr_get_seq_num(void);
+
+/*
+ * glink_ssr() - SSR cleanup function.
+ *
+ * Return: Standard error code.
+ */
+int glink_ssr(const char *subsystem);
+
+/**
+ * notify for subsystem() - Notify other subsystems that a subsystem is being
+ * restarted
+ * @ss_info: Subsystem info structure for the subsystem being restarted
+ *
+ * This function sends notifications to affected subsystems that the subsystem
+ * in ss_info is being restarted, and waits for the cleanup done response from
+ * all of those subsystems. It also initiates any local cleanup that is
+ * necessary.
+ *
+ * Return: 0 on success, standard error codes otherwise
+ */
+int notify_for_subsystem(struct subsys_info *ss_info);
+
+/**
+ * glink_ssr_wait_cleanup_done() - Get the value of the
+ * notifications_successful flag in glink_ssr.
+ * @timeout_multiplier: timeout multiplier for waiting on all processors
+ *
+ *
+ * Return: True if cleanup_done received from all processors, false otherwise
+ */
+bool glink_ssr_wait_cleanup_done(unsigned ssr_timeout_multiplier);
+
+struct channel_lcid {
+ struct list_head list_node;
+ uint32_t lcid;
+};
+
+/**
+ * struct rwref_lock - Read/Write Reference Lock
+ *
+ * kref: reference count
+ * read_count: number of readers that own the lock
+ * write_count: number of writers (max 1) that own the lock
+ * count_zero: used for internal signaling for non-atomic locks
+ *
+ * A Read/Write Reference Lock is a combination of a read/write spinlock and a
+ * refence count. The main difference is that no locks are held in the
+ * critical section and the lifetime of the object is guaranteed.
+ *
+ * Read Locking
+ * Multiple readers may access the lock at any given time and a read lock will
+ * also ensure that the object exists for the life of the lock.
+ *
+ * rwref_read_get()
+ * use resource in "critical section" (no locks are held)
+ * rwref_read_put()
+ *
+ * Write Locking
+ * A single writer may access the lock at any given time and a write lock will
+ * also ensure that the object exists for the life of the lock.
+ *
+ * rwref_write_get()
+ * use resource in "critical section" (no locks are held)
+ * rwref_write_put()
+ *
+ * Reference Lock
+ * To ensure the lifetime of the lock (and not affect the read or write lock),
+ * a simple reference can be done. By default, rwref_lock_init() will set the
+ * reference count to 1.
+ *
+ * rwref_lock_init() Reference count is 1
+ * rwref_get() Reference count is 2
+ * rwref_put() Reference count is 1
+ * rwref_put() Reference count goes to 0 and object is destroyed
+ */
+struct rwref_lock {
+ struct kref kref;
+ unsigned read_count;
+ unsigned write_count;
+ spinlock_t lock;
+ struct completion count_zero;
+
+ void (*release)(struct rwref_lock *);
+};
+
+/**
+ * rwref_lock_release() - Initialize rwref_lock
+ * lock_ptr: pointer to lock structure
+ */
+static inline void rwref_lock_release(struct kref *kref_ptr)
+{
+ struct rwref_lock *lock_ptr;
+
+ BUG_ON(kref_ptr == NULL);
+
+ lock_ptr = container_of(kref_ptr, struct rwref_lock, kref);
+ if (lock_ptr->release)
+ lock_ptr->release(lock_ptr);
+}
+
+/**
+ * rwref_lock_init() - Initialize rwref_lock
+ * lock_ptr: pointer to lock structure
+ * release: release function called when reference count goes to 0
+ */
+static inline void rwref_lock_init(struct rwref_lock *lock_ptr,
+ void (*release)(struct rwref_lock *))
+{
+ BUG_ON(lock_ptr == NULL);
+
+ kref_init(&lock_ptr->kref);
+ lock_ptr->read_count = 0;
+ lock_ptr->write_count = 0;
+ spin_lock_init(&lock_ptr->lock);
+ init_completion(&lock_ptr->count_zero);
+ lock_ptr->release = release;
+}
+
+/**
+ * rwref_get() - gains a reference count for the object
+ * lock_ptr: pointer to lock structure
+ */
+static inline void rwref_get(struct rwref_lock *lock_ptr)
+{
+ BUG_ON(lock_ptr == NULL);
+
+ kref_get(&lock_ptr->kref);
+}
+
+/**
+ * rwref_put() - puts a reference count for the object
+ * lock_ptr: pointer to lock structure
+ *
+ * If the reference count goes to zero, the release function is called.
+ */
+static inline void rwref_put(struct rwref_lock *lock_ptr)
+{
+ BUG_ON(lock_ptr == NULL);
+
+ kref_put(&lock_ptr->kref, rwref_lock_release);
+}
+
+/**
+ * rwref_read_get() - gains a reference count for a read operation
+ * lock_ptr: pointer to lock structure
+ *
+ * Multiple readers may acquire the lock as long as the write count is zero.
+ */
+static inline void rwref_read_get(struct rwref_lock *lock_ptr)
+{
+ unsigned long flags;
+
+ BUG_ON(lock_ptr == NULL);
+
+ kref_get(&lock_ptr->kref);
+ while (1) {
+ spin_lock_irqsave(&lock_ptr->lock, flags);
+ if (lock_ptr->write_count == 0) {
+ lock_ptr->read_count++;
+ spin_unlock_irqrestore(&lock_ptr->lock, flags);
+ break;
+ }
+ spin_unlock_irqrestore(&lock_ptr->lock, flags);
+ wait_for_completion(&lock_ptr->count_zero);
+ }
+}
+
+/**
+ * rwref_read_put() - returns a reference count for a read operation
+ * lock_ptr: pointer to lock structure
+ *
+ * Must be preceded by a call to rwref_read_get().
+ */
+static inline void rwref_read_put(struct rwref_lock *lock_ptr)
+{
+ unsigned long flags;
+
+ BUG_ON(lock_ptr == NULL);
+
+ spin_lock_irqsave(&lock_ptr->lock, flags);
+ BUG_ON(lock_ptr->read_count == 0);
+ if (--lock_ptr->read_count == 0)
+ complete(&lock_ptr->count_zero);
+ spin_unlock_irqrestore(&lock_ptr->lock, flags);
+ kref_put(&lock_ptr->kref, rwref_lock_release);
+}
+
+/**
+ * rwref_write_get() - gains a reference count for a write operation
+ * lock_ptr: pointer to lock structure
+ *
+ * Only one writer may acquire the lock as long as the reader count is zero.
+ */
+static inline void rwref_write_get(struct rwref_lock *lock_ptr)
+{
+ unsigned long flags;
+
+ BUG_ON(lock_ptr == NULL);
+
+ kref_get(&lock_ptr->kref);
+ while (1) {
+ spin_lock_irqsave(&lock_ptr->lock, flags);
+ if (lock_ptr->read_count == 0 && lock_ptr->write_count == 0) {
+ lock_ptr->write_count++;
+ spin_unlock_irqrestore(&lock_ptr->lock, flags);
+ break;
+ }
+ spin_unlock_irqrestore(&lock_ptr->lock, flags);
+ wait_for_completion(&lock_ptr->count_zero);
+ }
+}
+
+/**
+ * rwref_write_put() - returns a reference count for a write operation
+ * lock_ptr: pointer to lock structure
+ *
+ * Must be preceded by a call to rwref_write_get().
+ */
+static inline void rwref_write_put(struct rwref_lock *lock_ptr)
+{
+ unsigned long flags;
+
+ BUG_ON(lock_ptr == NULL);
+
+ spin_lock_irqsave(&lock_ptr->lock, flags);
+ BUG_ON(lock_ptr->write_count != 1);
+ if (--lock_ptr->write_count == 0)
+ complete(&lock_ptr->count_zero);
+ spin_unlock_irqrestore(&lock_ptr->lock, flags);
+ kref_put(&lock_ptr->kref, rwref_lock_release);
+}
+
+#endif /* _SOC_QCOM_GLINK_PRIVATE_H_ */
diff --git a/drivers/soc/qcom/glink_smd_xprt.c b/drivers/soc/qcom/glink_smd_xprt.c
new file mode 100644
index 000000000000..b94f7f2e2a91
--- /dev/null
+++ b/drivers/soc/qcom/glink_smd_xprt.c
@@ -0,0 +1,1942 @@
+/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/srcu.h>
+#include <linux/termios.h>
+#include <linux/workqueue.h>
+#include <soc/qcom/smd.h>
+#include <soc/qcom/glink.h>
+#include "glink_core_if.h"
+#include "glink_private.h"
+#include "glink_xprt_if.h"
+
+#define NUM_EDGES 5
+#define XPRT_NAME "smd_trans"
+#define SMD_DTR_SIG BIT(31)
+#define SMD_CTS_SIG BIT(30)
+#define SMD_CD_SIG BIT(29)
+#define SMD_RI_SIG BIT(28)
+
+/**
+ * enum command_types - commands send/received from remote system
+ * @CMD_OPEN: Channel open request
+ * @CMD_OPEN_ACK: Response to @CMD_OPEN
+ * @CMD_CLOSE: Channel close request
+ * @CMD_CLOSE_ACK: Response to @CMD_CLOSE
+ */
+enum command_types {
+ CMD_OPEN,
+ CMD_OPEN_ACK,
+ CMD_CLOSE,
+ CMD_CLOSE_ACK,
+};
+
+/*
+ * Max of 64 channels, the 128 offset puts the rcid out of the
+ * range the remote might use
+ */
+#define LEGACY_RCID_CHANNEL_OFFSET 128
+
+#define SMDXPRT_ERR(x...) GLINK_ERR("<SMDXPRT> " x)
+#define SMDXPRT_INFO(x...) GLINK_INFO("<SMDXPRT> " x)
+#define SMDXPRT_DBG(x...) GLINK_DBG("<SMDXPRT> " x)
+
+/**
+ * struct edge_info() - local information for managing an edge
+ * @xprt_if: The transport interface registered with the glink code
+ * associated with this edge.
+ * @xprt_cfg: The transport configuration for the glink core
+ * associated with this edge.
+ * @smd_edge: The smd edge value corresponding to this edge.
+ * @channels: A list of all the channels that currently exist on this
+ * edge.
+ * @channels_lock: Protects @channels "reads" from "writes".
+ * @intentless: Flag indicating this edge is intentless.
+ * @irq_disabled: Flag indicating whether interrupt is enabled or
+ * disabled.
+ * @ssr_sync: Synchronizes SSR with any ongoing activity that might
+ * conflict.
+ * @in_ssr: Prevents new activity that might conflict with an active
+ * SSR.
+ * @ssr_work: Ends SSR processing after giving SMD a chance to wrap up
+ * SSR.
+ * @smd_ch: Private SMD channel for channel migration.
+ * @smd_lock: Serializes write access to @smd_ch.
+ * @in_ssr_lock: Lock to protect the @in_ssr.
+ * @smd_ctl_ch_open: Indicates that @smd_ch is fully open.
+ * @work: Work item for processing migration data.
+ *
+ * Each transport registered with the core is represented by a single instance
+ * of this structure which allows for complete management of the transport.
+ */
+struct edge_info {
+ struct glink_transport_if xprt_if;
+ struct glink_core_transport_cfg xprt_cfg;
+ uint32_t smd_edge;
+ struct list_head channels;
+ spinlock_t channels_lock;
+ bool intentless;
+ bool irq_disabled;
+ struct srcu_struct ssr_sync;
+ bool in_ssr;
+ struct delayed_work ssr_work;
+ smd_channel_t *smd_ch;
+ struct mutex smd_lock;
+ struct mutex in_ssr_lock;
+ bool smd_ctl_ch_open;
+ struct work_struct work;
+};
+
+/**
+ * struct channel() - local information for managing a channel
+ * @node: For chaining this channel on list for its edge.
+ * @name: The name of this channel.
+ * @lcid: The local channel id the core uses for this channel.
+ * @rcid: The true remote channel id for this channel.
+ * @wait_for_probe: This channel is waiting for a probe from SMD.
+ * @had_probed: This channel probed in the past and may skip probe.
+ * @edge: Handle to the edge_info this channel is associated with.
+ * @smd_ch: Handle to the underlying smd channel.
+ * @intents: List of active intents on this channel.
+ * @used_intents: List of consumed intents on this channel.
+ * @intents_lock: Lock to protect @intents and @used_intents.
+ * @next_intent_id: The next id to use for generated intents.
+ * @wq: Handle for running tasks.
+ * @work: Task to process received data.
+ * @cur_intent: The current intent for received data.
+ * @intent_req: Flag indicating if an intent has been requested for rx.
+ * @is_closing: Flag indicating this channel is currently in the closing
+ * state.
+ * @local_legacy: The local side of the channel is in legacy mode.
+ * @remote_legacy: The remote side of the channel is in legacy mode.
+ * @rx_data_lock: Used to serialize RX data processing.
+ * @streaming_ch: Indicates the underlying SMD channel is streaming type.
+ * @tx_resume_needed: Indicates whether a tx_resume call should be triggered.
+ */
+struct channel {
+ struct list_head node;
+ char name[GLINK_NAME_SIZE];
+ uint32_t lcid;
+ uint32_t rcid;
+ bool wait_for_probe;
+ bool had_probed;
+ struct edge_info *edge;
+ smd_channel_t *smd_ch;
+ struct list_head intents;
+ struct list_head used_intents;
+ spinlock_t intents_lock;
+ uint32_t next_intent_id;
+ struct workqueue_struct *wq;
+ struct work_struct work;
+ struct intent_info *cur_intent;
+ bool intent_req;
+ bool is_closing;
+ bool local_legacy;
+ bool remote_legacy;
+ spinlock_t rx_data_lock;
+ bool streaming_ch;
+ bool tx_resume_needed;
+};
+
+/**
+ * struct intent_info() - information for managing an intent
+ * @node: Used for putting this intent in a list for its channel.
+ * @llid: The local intent id the core uses to identify this intent.
+ * @size: The size of the intent in bytes.
+ */
+struct intent_info {
+ struct list_head node;
+ uint32_t liid;
+ size_t size;
+};
+
+/**
+ * struct channel_work() - a task to be processed for a specific channel
+ * @ch: The channel associated with this task.
+ * @iid: Intent id associated with this task, may not always be valid.
+ * @work: The task to be processed.
+ */
+struct channel_work {
+ struct channel *ch;
+ uint32_t iid;
+ struct work_struct work;
+};
+
+/**
+ * struct pdrvs - Tracks a platform driver and its use among channels
+ * @node: For tracking in the pdrv_list.
+ * @pdrv: The platform driver to track.
+ */
+struct pdrvs {
+ struct list_head node;
+ struct platform_driver pdrv;
+};
+
+static uint32_t negotiate_features_v1(struct glink_transport_if *if_ptr,
+ const struct glink_core_version *version,
+ uint32_t features);
+
+static struct edge_info edge_infos[NUM_EDGES] = {
+ {
+ .xprt_cfg.edge = "dsps",
+ .smd_edge = SMD_APPS_DSPS,
+ },
+ {
+ .xprt_cfg.edge = "lpass",
+ .smd_edge = SMD_APPS_QDSP,
+ },
+ {
+ .xprt_cfg.edge = "mpss",
+ .smd_edge = SMD_APPS_MODEM,
+ },
+ {
+ .xprt_cfg.edge = "wcnss",
+ .smd_edge = SMD_APPS_WCNSS,
+ },
+ {
+ .xprt_cfg.edge = "rpm",
+ .smd_edge = SMD_APPS_RPM,
+ .intentless = true,
+ },
+};
+
+static struct glink_core_version versions[] = {
+ {1, 0x00, negotiate_features_v1},
+};
+
+static LIST_HEAD(pdrv_list);
+static DEFINE_MUTEX(pdrv_list_mutex);
+
+static void process_data_event(struct work_struct *work);
+static int add_platform_driver(struct channel *ch);
+static void smd_data_ch_close(struct channel *ch);
+
+/**
+ * check_write_avail() - Check if there is space to to write on the smd channel,
+ * and enable the read interrupt if there is not.
+ * @check_fn: The function to use to check if there is space to write
+ * @ch: The channel to check
+ *
+ * Return: 0 on success or standard Linux error codes.
+ */
+static int check_write_avail(int (*check_fn)(smd_channel_t *),
+ struct channel *ch)
+{
+ int rc = check_fn(ch->smd_ch);
+
+ if (rc == 0) {
+ ch->tx_resume_needed = true;
+ smd_enable_read_intr(ch->smd_ch);
+ rc = check_fn(ch->smd_ch);
+ if (rc > 0) {
+ ch->tx_resume_needed = false;
+ smd_disable_read_intr(ch->smd_ch);
+ }
+ }
+
+ return rc;
+}
+
+/**
+ * process_ctl_event() - process a control channel event task
+ * @work: The migration task to process.
+ */
+static void process_ctl_event(struct work_struct *work)
+{
+ struct command {
+ uint32_t cmd;
+ uint32_t id;
+ uint32_t priority;
+ };
+ struct command cmd;
+ struct edge_info *einfo;
+ struct channel *ch;
+ struct channel *temp_ch;
+ int pkt_size;
+ int read_avail;
+ char name[GLINK_NAME_SIZE];
+ bool found;
+ unsigned long flags;
+
+ einfo = container_of(work, struct edge_info, work);
+
+ mutex_lock(&einfo->in_ssr_lock);
+ if (einfo->in_ssr) {
+ einfo->in_ssr = false;
+ einfo->xprt_if.glink_core_if_ptr->link_up(&einfo->xprt_if);
+ }
+ mutex_unlock(&einfo->in_ssr_lock);
+
+ while (smd_read_avail(einfo->smd_ch)) {
+ found = false;
+ pkt_size = smd_cur_packet_size(einfo->smd_ch);
+ read_avail = smd_read_avail(einfo->smd_ch);
+
+ if (pkt_size != read_avail)
+ continue;
+
+ smd_read(einfo->smd_ch, &cmd, sizeof(cmd));
+ if (cmd.cmd == CMD_OPEN) {
+ smd_read(einfo->smd_ch, name, GLINK_NAME_SIZE);
+ SMDXPRT_INFO("%s RX OPEN '%s'\n", __func__, name);
+
+ spin_lock_irqsave(&einfo->channels_lock, flags);
+ list_for_each_entry(ch, &einfo->channels, node) {
+ if (!strcmp(name, ch->name)) {
+ found = true;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&einfo->channels_lock, flags);
+
+ if (!found) {
+ ch = kzalloc(sizeof(*ch), GFP_KERNEL);
+ if (!ch) {
+ SMDXPRT_ERR("%s: ch alloc failed\n",
+ __func__);
+ continue;
+ }
+ strlcpy(ch->name, name, GLINK_NAME_SIZE);
+ ch->edge = einfo;
+ INIT_LIST_HEAD(&ch->intents);
+ INIT_LIST_HEAD(&ch->used_intents);
+ spin_lock_init(&ch->intents_lock);
+ spin_lock_init(&ch->rx_data_lock);
+ INIT_WORK(&ch->work, process_data_event);
+ ch->wq = create_singlethread_workqueue(
+ ch->name);
+ if (!ch->wq) {
+ SMDXPRT_ERR("%s: ch wq create failed\n",
+ __func__);
+ kfree(ch);
+ continue;
+ }
+
+ /*
+ * Channel could have been added to the list by
+ * someone else so scan again. Channel creation
+ * is non-atomic, so unlock and recheck is
+ * necessary
+ */
+ temp_ch = ch;
+ spin_lock_irqsave(&einfo->channels_lock, flags);
+ list_for_each_entry(ch, &einfo->channels, node)
+ if (!strcmp(name, ch->name)) {
+ found = true;
+ break;
+ }
+
+ if (!found) {
+ ch = temp_ch;
+ list_add_tail(&ch->node,
+ &einfo->channels);
+ spin_unlock_irqrestore(
+ &einfo->channels_lock, flags);
+ } else {
+ spin_unlock_irqrestore(
+ &einfo->channels_lock, flags);
+ destroy_workqueue(temp_ch->wq);
+ kfree(temp_ch);
+ }
+ }
+
+ if (ch->remote_legacy) {
+ SMDXPRT_DBG("%s SMD Remote Open '%s'\n",
+ __func__, name);
+ cmd.cmd = CMD_OPEN_ACK;
+ cmd.priority = SMD_TRANS_XPRT_ID;
+ mutex_lock(&einfo->smd_lock);
+ while (smd_write_avail(einfo->smd_ch) <
+ sizeof(cmd))
+ msleep(20);
+ smd_write(einfo->smd_ch, &cmd, sizeof(cmd));
+ mutex_unlock(&einfo->smd_lock);
+ continue;
+ } else {
+ SMDXPRT_DBG("%s G-Link Remote Open '%s'\n",
+ __func__, name);
+ }
+
+ ch->rcid = cmd.id;
+ einfo->xprt_if.glink_core_if_ptr->rx_cmd_ch_remote_open(
+ &einfo->xprt_if,
+ cmd.id,
+ name,
+ cmd.priority);
+ } else if (cmd.cmd == CMD_OPEN_ACK) {
+ SMDXPRT_INFO("%s RX OPEN ACK lcid %u; xprt_req %u\n",
+ __func__, cmd.id, cmd.priority);
+
+ spin_lock_irqsave(&einfo->channels_lock, flags);
+ list_for_each_entry(ch, &einfo->channels, node)
+ if (cmd.id == ch->lcid) {
+ found = true;
+ break;
+ }
+ spin_unlock_irqrestore(&einfo->channels_lock, flags);
+ if (!found) {
+ GLINK_ERR("%s <SMDXPRT> No channel match %u\n",
+ __func__, cmd.id);
+ continue;
+ }
+
+ add_platform_driver(ch);
+ einfo->xprt_if.glink_core_if_ptr->rx_cmd_ch_open_ack(
+ &einfo->xprt_if,
+ cmd.id,
+ cmd.priority);
+ } else if (cmd.cmd == CMD_CLOSE) {
+ SMDXPRT_INFO("%s RX REMOTE CLOSE rcid %u\n", __func__,
+ cmd.id);
+ spin_lock_irqsave(&einfo->channels_lock, flags);
+ list_for_each_entry(ch, &einfo->channels, node)
+ if (cmd.id == ch->rcid) {
+ found = true;
+ break;
+ }
+ spin_unlock_irqrestore(&einfo->channels_lock, flags);
+
+ if (!found)
+ GLINK_ERR("%s <SMDXPRT> no matching rcid %u\n",
+ __func__, cmd.id);
+
+ if (found && !ch->remote_legacy) {
+ einfo->xprt_if.glink_core_if_ptr->
+ rx_cmd_ch_remote_close(
+ &einfo->xprt_if,
+ cmd.id);
+ } else {
+ /* not found or a legacy channel */
+ SMDXPRT_INFO("%s Sim RX CLOSE ACK lcid %u\n",
+ __func__, cmd.id);
+ cmd.cmd = CMD_CLOSE_ACK;
+ mutex_lock(&einfo->smd_lock);
+ while (smd_write_avail(einfo->smd_ch) <
+ sizeof(cmd))
+ msleep(20);
+ smd_write(einfo->smd_ch, &cmd, sizeof(cmd));
+ mutex_unlock(&einfo->smd_lock);
+ }
+ } else if (cmd.cmd == CMD_CLOSE_ACK) {
+ int rcu_id;
+
+ SMDXPRT_INFO("%s RX CLOSE ACK lcid %u\n", __func__,
+ cmd.id);
+
+ spin_lock_irqsave(&einfo->channels_lock, flags);
+ list_for_each_entry(ch, &einfo->channels, node) {
+ if (cmd.id == ch->lcid) {
+ found = true;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&einfo->channels_lock, flags);
+ if (!found) {
+ GLINK_ERR("%s <SMDXPRT> LCID not found %u\n",
+ __func__, cmd.id);
+ continue;
+ }
+
+ rcu_id = srcu_read_lock(&einfo->ssr_sync);
+ smd_data_ch_close(ch);
+ srcu_read_unlock(&einfo->ssr_sync, rcu_id);
+ einfo->xprt_if.glink_core_if_ptr->rx_cmd_ch_close_ack(
+ &einfo->xprt_if,
+ cmd.id);
+ }
+ }
+}
+
+/**
+ * ctl_ch_notify() - process an event from the smd channel for ch migration
+ * @priv: The edge the event occurred on.
+ * @event: The event to process
+ */
+static void ctl_ch_notify(void *priv, unsigned event)
+{
+ struct edge_info *einfo = priv;
+
+ switch (event) {
+ case SMD_EVENT_DATA:
+ schedule_work(&einfo->work);
+ break;
+ case SMD_EVENT_OPEN:
+ einfo->smd_ctl_ch_open = true;
+ break;
+ case SMD_EVENT_CLOSE:
+ einfo->smd_ctl_ch_open = false;
+ break;
+ }
+}
+
+static int ctl_ch_probe(struct platform_device *pdev)
+{
+ int i;
+ struct edge_info *einfo;
+ int ret = 0;
+
+ for (i = 0; i < NUM_EDGES; ++i)
+ if (pdev->id == edge_infos[i].smd_edge)
+ break;
+
+ einfo = &edge_infos[i];
+ ret = smd_named_open_on_edge("GLINK_CTRL", einfo->smd_edge,
+ &einfo->smd_ch, einfo, ctl_ch_notify);
+ if (ret != 0)
+ SMDXPRT_ERR("%s Opening failed %d for %d:'GLINK_CTRL'\n",
+ __func__, ret, einfo->smd_edge);
+ return ret;
+}
+
+/**
+ * ssr_work_func() - process the end of ssr
+ * @work: The ssr task to finish.
+ */
+static void ssr_work_func(struct work_struct *work)
+{
+ struct delayed_work *w;
+ struct edge_info *einfo;
+
+ w = container_of(work, struct delayed_work, work);
+ einfo = container_of(w, struct edge_info, ssr_work);
+
+ mutex_lock(&einfo->in_ssr_lock);
+ if (einfo->in_ssr) {
+ einfo->in_ssr = false;
+ einfo->xprt_if.glink_core_if_ptr->link_up(&einfo->xprt_if);
+ }
+ mutex_unlock(&einfo->in_ssr_lock);
+}
+
+/**
+ * process_tx_done() - process a tx done task
+ * @work: The tx done task to process.
+ */
+static void process_tx_done(struct work_struct *work)
+{
+ struct channel_work *ch_work;
+ struct channel *ch;
+ struct edge_info *einfo;
+ uint32_t riid;
+
+ ch_work = container_of(work, struct channel_work, work);
+ ch = ch_work->ch;
+ riid = ch_work->iid;
+ einfo = ch->edge;
+ kfree(ch_work);
+ einfo->xprt_if.glink_core_if_ptr->rx_cmd_tx_done(&einfo->xprt_if,
+ ch->rcid,
+ riid,
+ false);
+}
+
+/**
+ * process_open_event() - process an open event task
+ * @work: The open task to process.
+ */
+static void process_open_event(struct work_struct *work)
+{
+ struct channel_work *ch_work;
+ struct channel *ch;
+ struct edge_info *einfo;
+ int ret;
+
+ ch_work = container_of(work, struct channel_work, work);
+ ch = ch_work->ch;
+ einfo = ch->edge;
+ /*
+ * The SMD client is supposed to already know its channel type, but we
+ * are just a translation layer, so we need to dynamically detect the
+ * channel type.
+ */
+ ret = smd_write_segment_avail(ch->smd_ch);
+ if (ret == -ENODEV)
+ ch->streaming_ch = true;
+ if (ch->remote_legacy || !ch->rcid) {
+ ch->remote_legacy = true;
+ ch->rcid = ch->lcid + LEGACY_RCID_CHANNEL_OFFSET;
+ einfo->xprt_if.glink_core_if_ptr->rx_cmd_ch_remote_open(
+ &einfo->xprt_if,
+ ch->rcid,
+ ch->name,
+ SMD_TRANS_XPRT_ID);
+ }
+ kfree(ch_work);
+}
+
+/**
+ * process_close_event() - process a close event task
+ * @work: The close task to process.
+ */
+static void process_close_event(struct work_struct *work)
+{
+ struct channel_work *ch_work;
+ struct channel *ch;
+ struct edge_info *einfo;
+
+ ch_work = container_of(work, struct channel_work, work);
+ ch = ch_work->ch;
+ einfo = ch->edge;
+ kfree(ch_work);
+ if (ch->remote_legacy)
+ einfo->xprt_if.glink_core_if_ptr->rx_cmd_ch_remote_close(
+ &einfo->xprt_if,
+ ch->rcid);
+ ch->rcid = 0;
+}
+
+/**
+ * process_status_event() - process a status event task
+ * @work: The status task to process.
+ */
+static void process_status_event(struct work_struct *work)
+{
+ struct channel_work *ch_work;
+ struct channel *ch;
+ struct edge_info *einfo;
+ uint32_t sigs = 0;
+ int set;
+
+ ch_work = container_of(work, struct channel_work, work);
+ ch = ch_work->ch;
+ einfo = ch->edge;
+ kfree(ch_work);
+
+ set = smd_tiocmget(ch->smd_ch);
+ if (set < 0)
+ return;
+
+ if (set & TIOCM_DTR)
+ sigs |= SMD_DTR_SIG;
+ if (set & TIOCM_RTS)
+ sigs |= SMD_CTS_SIG;
+ if (set & TIOCM_CD)
+ sigs |= SMD_CD_SIG;
+ if (set & TIOCM_RI)
+ sigs |= SMD_RI_SIG;
+
+ einfo->xprt_if.glink_core_if_ptr->rx_cmd_remote_sigs(&einfo->xprt_if,
+ ch->rcid,
+ sigs);
+}
+
+/**
+ * process_reopen_event() - process a reopen ready event task
+ * @work: The reopen ready task to process.
+ */
+static void process_reopen_event(struct work_struct *work)
+{
+ struct channel_work *ch_work;
+ struct channel *ch;
+ struct edge_info *einfo;
+
+ ch_work = container_of(work, struct channel_work, work);
+ ch = ch_work->ch;
+ einfo = ch->edge;
+ kfree(ch_work);
+ if (ch->remote_legacy)
+ einfo->xprt_if.glink_core_if_ptr->rx_cmd_ch_remote_close(
+ &einfo->xprt_if,
+ ch->rcid);
+ if (ch->local_legacy)
+ einfo->xprt_if.glink_core_if_ptr->rx_cmd_ch_close_ack(
+ &einfo->xprt_if,
+ ch->lcid);
+}
+
+/**
+ * process_data_event() - process a data event task
+ * @work: The data task to process.
+ */
+static void process_data_event(struct work_struct *work)
+{
+ struct channel *ch;
+ struct edge_info *einfo;
+ struct glink_core_rx_intent *intent;
+ int pkt_remaining;
+ int read_avail;
+ struct intent_info *i;
+ uint32_t liid;
+ unsigned long intents_flags;
+ unsigned long rx_data_flags;
+
+ ch = container_of(work, struct channel, work);
+ einfo = ch->edge;
+
+ if (ch->tx_resume_needed && smd_write_avail(ch->smd_ch) > 0) {
+ ch->tx_resume_needed = false;
+ smd_disable_read_intr(ch->smd_ch);
+ einfo->xprt_if.glink_core_if_ptr->tx_resume(&einfo->xprt_if);
+ }
+
+ spin_lock_irqsave(&ch->rx_data_lock, rx_data_flags);
+ while (!ch->is_closing && smd_read_avail(ch->smd_ch)) {
+ if (!ch->streaming_ch)
+ pkt_remaining = smd_cur_packet_size(ch->smd_ch);
+ else
+ pkt_remaining = smd_read_avail(ch->smd_ch);
+ GLINK_DBG("%s <SMDXPRT> Reading packet chunk %u '%s' %u:%u\n",
+ __func__, pkt_remaining, ch->name, ch->lcid,
+ ch->rcid);
+ if (!ch->cur_intent && !einfo->intentless) {
+ spin_lock_irqsave(&ch->intents_lock, intents_flags);
+ list_for_each_entry(i, &ch->intents, node) {
+ if (i->size >= pkt_remaining) {
+ list_del(&i->node);
+ ch->cur_intent = i;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&ch->intents_lock,
+ intents_flags);
+ if (!ch->cur_intent) {
+ spin_unlock_irqrestore(&ch->rx_data_lock,
+ rx_data_flags);
+ GLINK_DBG("%s %s Reqesting intent '%s' %u:%u\n",
+ __func__, "<SMDXPRT>", ch->name,
+ ch->lcid, ch->rcid);
+ ch->intent_req = true;
+ einfo->xprt_if.glink_core_if_ptr->
+ rx_cmd_remote_rx_intent_req(
+ &einfo->xprt_if,
+ ch->rcid,
+ pkt_remaining);
+ return;
+ }
+ }
+
+ liid = einfo->intentless ? 0 : ch->cur_intent->liid;
+ read_avail = smd_read_avail(ch->smd_ch);
+ if (ch->streaming_ch && read_avail > pkt_remaining)
+ read_avail = pkt_remaining;
+ intent = einfo->xprt_if.glink_core_if_ptr->rx_get_pkt_ctx(
+ &einfo->xprt_if,
+ ch->rcid,
+ liid);
+ if (!intent->data && einfo->intentless) {
+ intent->data = kmalloc(pkt_remaining, GFP_ATOMIC);
+ if (!intent->data) {
+ GLINK_DBG("%s %s kmalloc failed '%s' %u:%u\n",
+ __func__, "<SMDXPRT>", ch->name,
+ ch->lcid, ch->rcid);
+ continue;
+ }
+ }
+ smd_read(ch->smd_ch, intent->data + intent->write_offset,
+ read_avail);
+ spin_unlock_irqrestore(&ch->rx_data_lock, rx_data_flags);
+ intent->write_offset += read_avail;
+ intent->pkt_size += read_avail;
+ if (read_avail == pkt_remaining && !einfo->intentless) {
+ spin_lock_irqsave(&ch->intents_lock, intents_flags);
+ list_add_tail(&ch->cur_intent->node, &ch->used_intents);
+ spin_unlock_irqrestore(&ch->intents_lock,
+ intents_flags);
+ ch->cur_intent = NULL;
+ }
+ einfo->xprt_if.glink_core_if_ptr->rx_put_pkt_ctx(
+ &einfo->xprt_if,
+ ch->rcid,
+ intent,
+ read_avail == pkt_remaining);
+ spin_lock_irqsave(&ch->rx_data_lock, rx_data_flags);
+ }
+ spin_unlock_irqrestore(&ch->rx_data_lock, rx_data_flags);
+}
+
+/**
+ * smd_data_ch_notify() - process an event from the smd channel
+ * @priv: The channel the event occurred on.
+ * @event: The event to process
+ */
+static void smd_data_ch_notify(void *priv, unsigned event)
+{
+ struct channel *ch = priv;
+ struct channel_work *work;
+
+ switch (event) {
+ case SMD_EVENT_DATA:
+ queue_work(ch->wq, &ch->work);
+ break;
+ case SMD_EVENT_OPEN:
+ work = kmalloc(sizeof(*work), GFP_ATOMIC);
+ if (!work) {
+ SMDXPRT_ERR("%s: unable to process event %d\n",
+ __func__, SMD_EVENT_OPEN);
+ return;
+ }
+ work->ch = ch;
+ INIT_WORK(&work->work, process_open_event);
+ queue_work(ch->wq, &work->work);
+ break;
+ case SMD_EVENT_CLOSE:
+ work = kmalloc(sizeof(*work), GFP_ATOMIC);
+ if (!work) {
+ SMDXPRT_ERR("%s: unable to process event %d\n",
+ __func__, SMD_EVENT_CLOSE);
+ return;
+ }
+ work->ch = ch;
+ INIT_WORK(&work->work, process_close_event);
+ queue_work(ch->wq, &work->work);
+ break;
+ case SMD_EVENT_STATUS:
+ SMDXPRT_DBG("%s Processing STATUS for '%s' %u:%u\n", __func__,
+ ch->name, ch->lcid, ch->rcid);
+
+ work = kmalloc(sizeof(*work), GFP_ATOMIC);
+ if (!work) {
+ SMDXPRT_ERR("%s: unable to process event %d\n",
+ __func__, SMD_EVENT_STATUS);
+ return;
+ }
+ work->ch = ch;
+ INIT_WORK(&work->work, process_status_event);
+ queue_work(ch->wq, &work->work);
+ break;
+ case SMD_EVENT_REOPEN_READY:
+ work = kmalloc(sizeof(*work), GFP_ATOMIC);
+ if (!work) {
+ SMDXPRT_ERR("%s: unable to process event %d\n",
+ __func__, SMD_EVENT_REOPEN_READY);
+ return;
+ }
+ work->ch = ch;
+ INIT_WORK(&work->work, process_reopen_event);
+ queue_work(ch->wq, &work->work);
+ break;
+ }
+}
+
+/**
+ * smd_data_ch_close() - close and cleanup SMD data channel
+ * @ch: Channel to cleanup
+ *
+ * Must be called with einfo->ssr_sync SRCU locked.
+ */
+static void smd_data_ch_close(struct channel *ch)
+{
+ struct intent_info *intent;
+ unsigned long flags;
+
+ SMDXPRT_INFO("%s Closing SMD channel lcid %u\n", __func__, ch->lcid);
+
+ ch->is_closing = true;
+ ch->wait_for_probe = false;
+ ch->tx_resume_needed = false;
+ flush_workqueue(ch->wq);
+
+ if (ch->smd_ch) {
+ smd_close(ch->smd_ch);
+ ch->smd_ch = NULL;
+ } else if (ch->local_legacy) {
+ ch->edge->xprt_if.glink_core_if_ptr->rx_cmd_ch_close_ack(
+ &ch->edge->xprt_if,
+ ch->lcid);
+ }
+
+ ch->local_legacy = false;
+
+ spin_lock_irqsave(&ch->intents_lock, flags);
+ while (!list_empty(&ch->intents)) {
+ intent = list_first_entry(&ch->intents, struct
+ intent_info, node);
+ list_del(&intent->node);
+ kfree(intent);
+ }
+ while (!list_empty(&ch->used_intents)) {
+ intent = list_first_entry(&ch->used_intents,
+ struct intent_info, node);
+ list_del(&intent->node);
+ kfree(intent);
+ }
+ spin_unlock_irqrestore(&ch->intents_lock, flags);
+ ch->is_closing = false;
+}
+
+static void data_ch_probe_body(struct channel *ch)
+{
+ struct edge_info *einfo;
+ int ret;
+
+ einfo = ch->edge;
+ SMDXPRT_DBG("%s Opening SMD channel %d:'%s'\n", __func__,
+ einfo->smd_edge, ch->name);
+
+ ret = smd_named_open_on_edge(ch->name, einfo->smd_edge, &ch->smd_ch, ch,
+ smd_data_ch_notify);
+ if (ret != 0) {
+ SMDXPRT_ERR("%s Opening failed %d for %d:'%s'\n",
+ __func__, ret, einfo->smd_edge, ch->name);
+ return;
+ }
+ smd_disable_read_intr(ch->smd_ch);
+}
+
+static int channel_probe(struct platform_device *pdev)
+{
+ struct channel *ch;
+ struct edge_info *einfo;
+ int i;
+ bool found = false;
+ unsigned long flags;
+
+ for (i = 0; i < NUM_EDGES; ++i) {
+ if (edge_infos[i].smd_edge == pdev->id) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found)
+ return -EPROBE_DEFER;
+
+ einfo = &edge_infos[i];
+
+ found = false;
+ spin_lock_irqsave(&einfo->channels_lock, flags);
+ list_for_each_entry(ch, &einfo->channels, node) {
+ if (!strcmp(pdev->name, ch->name)) {
+ found = true;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&einfo->channels_lock, flags);
+
+ if (!found)
+ return -EPROBE_DEFER;
+
+ if (!ch->wait_for_probe)
+ return -EPROBE_DEFER;
+
+ ch->wait_for_probe = false;
+ ch->had_probed = true;
+
+ data_ch_probe_body(ch);
+
+ return 0;
+}
+
+static int dummy_probe(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static struct platform_driver dummy_driver = {
+ .probe = dummy_probe,
+ .driver = {
+ .name = "dummydriver12345",
+ .owner = THIS_MODULE,
+ },
+};
+
+static struct platform_device dummy_device = {
+ .name = "dummydriver12345",
+};
+
+/**
+ * add_platform_driver() - register the needed platform driver for a channel
+ * @ch: The channel that needs a platform driver registered.
+ *
+ * SMD channels are unique by name/edge tuples, but the platform driver can
+ * only specify the name of the channel, so multiple unique SMD channels can
+ * be covered under one platform driver. Therfore we need to smartly manage
+ * the muxing of channels on platform drivers.
+ *
+ * Return: Success or standard linux error code.
+ */
+static int add_platform_driver(struct channel *ch)
+{
+ struct pdrvs *pdrv;
+ bool found = false;
+ int ret = 0;
+ static bool first = true;
+
+ mutex_lock(&pdrv_list_mutex);
+ ch->wait_for_probe = true;
+ list_for_each_entry(pdrv, &pdrv_list, node) {
+ if (!strcmp(ch->name, pdrv->pdrv.driver.name)) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ pdrv = kzalloc(sizeof(*pdrv), GFP_KERNEL);
+ if (!pdrv) {
+ ret = -ENOMEM;
+ ch->wait_for_probe = false;
+ goto out;
+ }
+ pdrv->pdrv.driver.name = ch->name;
+ pdrv->pdrv.driver.owner = THIS_MODULE;
+ pdrv->pdrv.probe = channel_probe;
+ list_add_tail(&pdrv->node, &pdrv_list);
+ ret = platform_driver_register(&pdrv->pdrv);
+ if (ret) {
+ list_del(&pdrv->node);
+ kfree(pdrv);
+ ch->wait_for_probe = false;
+ }
+ } else {
+ if (ch->had_probed)
+ data_ch_probe_body(ch);
+
+ /*
+ * channel_probe might have seen the device we want, but
+ * returned EPROBE_DEFER so we need to kick the deferred list
+ */
+ platform_driver_register(&dummy_driver);
+ if (first) {
+ platform_device_register(&dummy_device);
+ first = false;
+ }
+ platform_driver_unregister(&dummy_driver);
+ }
+
+out:
+ mutex_unlock(&pdrv_list_mutex);
+ return ret;
+}
+
+/**
+ * tx_cmd_version() - convert a version cmd to wire format and transmit
+ * @if_ptr: The transport to transmit on.
+ * @version: The version number to encode.
+ * @features: The features information to encode.
+ *
+ * The remote side doesn't speak G-Link, so we fake the version negotiation.
+ */
+static void tx_cmd_version(struct glink_transport_if *if_ptr, uint32_t version,
+ uint32_t features)
+{
+ struct edge_info *einfo;
+
+ einfo = container_of(if_ptr, struct edge_info, xprt_if);
+ einfo->xprt_if.glink_core_if_ptr->rx_cmd_version_ack(&einfo->xprt_if,
+ version,
+ features);
+ einfo->xprt_if.glink_core_if_ptr->rx_cmd_version(&einfo->xprt_if,
+ version,
+ features);
+}
+
+/**
+ * tx_cmd_version_ack() - convert a version ack cmd to wire format and transmit
+ * @if_ptr: The transport to transmit on.
+ * @version: The version number to encode.
+ * @features: The features information to encode.
+ *
+ * The remote side doesn't speak G-Link. The core is acking a version command
+ * we faked. Do nothing.
+ */
+static void tx_cmd_version_ack(struct glink_transport_if *if_ptr,
+ uint32_t version,
+ uint32_t features)
+{
+}
+
+/**
+ * set_version() - activate a negotiated version and feature set
+ * @if_ptr: The transport to configure.
+ * @version: The version to use.
+ * @features: The features to use.
+ *
+ * Return: The supported capabilities of the transport.
+ */
+static uint32_t set_version(struct glink_transport_if *if_ptr, uint32_t version,
+ uint32_t features)
+{
+ struct edge_info *einfo;
+ uint32_t capabilities = GCAP_SIGNALS | GCAP_AUTO_QUEUE_RX_INT;
+
+ einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+ return einfo->intentless ?
+ GCAP_INTENTLESS | capabilities : capabilities;
+}
+
+/**
+ * tx_cmd_ch_open() - convert a channel open cmd to wire format and transmit
+ * @if_ptr: The transport to transmit on.
+ * @lcid: The local channel id to encode.
+ * @name: The channel name to encode.
+ * @req_xprt: The transport the core would like to migrate this channel to.
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+static int tx_cmd_ch_open(struct glink_transport_if *if_ptr, uint32_t lcid,
+ const char *name, uint16_t req_xprt)
+{
+ struct command {
+ uint32_t cmd;
+ uint32_t id;
+ uint32_t priority;
+ };
+ struct command cmd;
+ struct edge_info *einfo;
+ struct channel *ch;
+ struct channel *temp_ch;
+ bool found = false;
+ int rcu_id;
+ int ret = 0;
+ int len;
+ unsigned long flags;
+
+ einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+ rcu_id = srcu_read_lock(&einfo->ssr_sync);
+ if (einfo->in_ssr) {
+ srcu_read_unlock(&einfo->ssr_sync, rcu_id);
+ return -EFAULT;
+ }
+
+ spin_lock_irqsave(&einfo->channels_lock, flags);
+ list_for_each_entry(ch, &einfo->channels, node) {
+ if (!strcmp(name, ch->name)) {
+ found = true;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&einfo->channels_lock, flags);
+
+ if (!found) {
+ ch = kzalloc(sizeof(*ch), GFP_KERNEL);
+ if (!ch) {
+ SMDXPRT_ERR("%s: channel struct allocation failed\n",
+ __func__);
+ srcu_read_unlock(&einfo->ssr_sync, rcu_id);
+ return -ENOMEM;
+ }
+ strlcpy(ch->name, name, GLINK_NAME_SIZE);
+ ch->edge = einfo;
+ INIT_LIST_HEAD(&ch->intents);
+ INIT_LIST_HEAD(&ch->used_intents);
+ spin_lock_init(&ch->intents_lock);
+ spin_lock_init(&ch->rx_data_lock);
+ INIT_WORK(&ch->work, process_data_event);
+ ch->wq = create_singlethread_workqueue(ch->name);
+ if (!ch->wq) {
+ SMDXPRT_ERR("%s: channel workqueue create failed\n",
+ __func__);
+ kfree(ch);
+ srcu_read_unlock(&einfo->ssr_sync, rcu_id);
+ return -ENOMEM;
+ }
+
+ /*
+ * Channel could have been added to the list by someone else
+ * so scan again. Channel creation is non-atomic, so unlock
+ * and recheck is necessary
+ */
+ temp_ch = ch;
+ spin_lock_irqsave(&einfo->channels_lock, flags);
+ list_for_each_entry(ch, &einfo->channels, node)
+ if (!strcmp(name, ch->name)) {
+ found = true;
+ break;
+ }
+
+ if (!found) {
+ ch = temp_ch;
+ list_add_tail(&ch->node, &einfo->channels);
+ spin_unlock_irqrestore(&einfo->channels_lock, flags);
+ } else {
+ spin_unlock_irqrestore(&einfo->channels_lock, flags);
+ destroy_workqueue(temp_ch->wq);
+ kfree(temp_ch);
+ }
+ }
+
+ ch->tx_resume_needed = false;
+ ch->lcid = lcid;
+
+ if (einfo->smd_ctl_ch_open) {
+ SMDXPRT_INFO("%s TX OPEN '%s' lcid %u reqxprt %u\n", __func__,
+ name, lcid, req_xprt);
+ cmd.cmd = CMD_OPEN;
+ cmd.id = lcid;
+ cmd.priority = req_xprt;
+ len = strlen(name) + 1;
+ len += sizeof(cmd);
+ mutex_lock(&einfo->smd_lock);
+ while (smd_write_avail(einfo->smd_ch) < len)
+ msleep(20);
+ smd_write_start(einfo->smd_ch, len);
+ smd_write_segment(einfo->smd_ch, &cmd, sizeof(cmd));
+ smd_write_segment(einfo->smd_ch, name, strlen(name) + 1);
+ smd_write_end(einfo->smd_ch);
+ mutex_unlock(&einfo->smd_lock);
+ } else {
+ SMDXPRT_INFO("%s Legacy Open '%s' lcid %u reqxprt %u\n",
+ __func__, name, lcid, req_xprt);
+ ch->rcid = lcid + LEGACY_RCID_CHANNEL_OFFSET;
+ ch->local_legacy = true;
+ ch->remote_legacy = true;
+ ret = add_platform_driver(ch);
+ if (!ret)
+ einfo->xprt_if.glink_core_if_ptr->rx_cmd_ch_open_ack(
+ &einfo->xprt_if,
+ ch->lcid, SMD_TRANS_XPRT_ID);
+ }
+
+ srcu_read_unlock(&einfo->ssr_sync, rcu_id);
+ return ret;
+}
+
+/**
+ * tx_cmd_ch_close() - convert a channel close cmd to wire format and transmit
+ * @if_ptr: The transport to transmit on.
+ * @lcid: The local channel id to encode.
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+static int tx_cmd_ch_close(struct glink_transport_if *if_ptr, uint32_t lcid)
+{
+ struct command {
+ uint32_t cmd;
+ uint32_t id;
+ uint32_t reserved;
+ };
+ struct command cmd;
+ struct edge_info *einfo;
+ struct channel *ch;
+ int rcu_id;
+ bool found = false;
+ unsigned long flags;
+
+ einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+ rcu_id = srcu_read_lock(&einfo->ssr_sync);
+ if (einfo->in_ssr) {
+ srcu_read_unlock(&einfo->ssr_sync, rcu_id);
+ return -EFAULT;
+ }
+
+ spin_lock_irqsave(&einfo->channels_lock, flags);
+ list_for_each_entry(ch, &einfo->channels, node)
+ if (lcid == ch->lcid) {
+ found = true;
+ break;
+ }
+ spin_unlock_irqrestore(&einfo->channels_lock, flags);
+
+ if (!found) {
+ GLINK_ERR("%s <SMDXPRT> LCID not found %u\n", __func__, lcid);
+ srcu_read_unlock(&einfo->ssr_sync, rcu_id);
+ return -ENODEV;
+ }
+
+ if (!ch->local_legacy) {
+ SMDXPRT_INFO("%s TX CLOSE lcid %u\n", __func__, lcid);
+ cmd.cmd = CMD_CLOSE;
+ cmd.id = lcid;
+ cmd.reserved = 0;
+ mutex_lock(&einfo->smd_lock);
+ while (smd_write_avail(einfo->smd_ch) < sizeof(cmd))
+ msleep(20);
+ smd_write(einfo->smd_ch, &cmd, sizeof(cmd));
+ mutex_unlock(&einfo->smd_lock);
+ } else {
+ smd_data_ch_close(ch);
+ }
+ srcu_read_unlock(&einfo->ssr_sync, rcu_id);
+ return 0;
+}
+
+/**
+ * tx_cmd_ch_remote_open_ack() - convert a channel open ack cmd to wire format
+ * and transmit
+ * @if_ptr: The transport to transmit on.
+ * @rcid: The remote channel id to encode.
+ * @xprt_resp: The response to a transport migration request.
+ */
+static void tx_cmd_ch_remote_open_ack(struct glink_transport_if *if_ptr,
+ uint32_t rcid, uint16_t xprt_resp)
+{
+ struct command {
+ uint32_t cmd;
+ uint32_t id;
+ uint32_t priority;
+ };
+ struct command cmd;
+ struct edge_info *einfo;
+ struct channel *ch;
+ bool found = false;
+ unsigned long flags;
+
+ einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+ if (!einfo->smd_ctl_ch_open)
+ return;
+
+ spin_lock_irqsave(&einfo->channels_lock, flags);
+ list_for_each_entry(ch, &einfo->channels, node)
+ if (ch->rcid == rcid) {
+ found = true;
+ break;
+ }
+ spin_unlock_irqrestore(&einfo->channels_lock, flags);
+
+ if (!found) {
+ GLINK_ERR("%s <SMDXPRT> No matching SMD channel for rcid %u\n",
+ __func__, rcid);
+ return;
+ }
+
+ if (ch->remote_legacy) {
+ SMDXPRT_INFO("%s Legacy ch rcid %u xprt_resp %u\n", __func__,
+ rcid, xprt_resp);
+ return;
+ }
+
+ SMDXPRT_INFO("%s TX OPEN ACK rcid %u xprt_resp %u\n", __func__, rcid,
+ xprt_resp);
+
+ cmd.cmd = CMD_OPEN_ACK;
+ cmd.id = ch->rcid;
+ cmd.priority = xprt_resp;
+
+ mutex_lock(&einfo->smd_lock);
+ while (smd_write_avail(einfo->smd_ch) < sizeof(cmd))
+ msleep(20);
+
+ smd_write(einfo->smd_ch, &cmd, sizeof(cmd));
+ mutex_unlock(&einfo->smd_lock);
+}
+
+/**
+ * tx_cmd_ch_remote_close_ack() - convert a channel close ack cmd to wire format
+ * and transmit
+ * @if_ptr: The transport to transmit on.
+ * @rcid: The remote channel id to encode.
+ */
+static void tx_cmd_ch_remote_close_ack(struct glink_transport_if *if_ptr,
+ uint32_t rcid)
+{
+ struct command {
+ uint32_t cmd;
+ uint32_t id;
+ uint32_t reserved;
+ };
+ struct command cmd;
+ struct edge_info *einfo;
+ struct channel *ch;
+ bool found = false;
+ unsigned long flags;
+
+ einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+ spin_lock_irqsave(&einfo->channels_lock, flags);
+ list_for_each_entry(ch, &einfo->channels, node)
+ if (rcid == ch->rcid) {
+ found = true;
+ break;
+ }
+ spin_unlock_irqrestore(&einfo->channels_lock, flags);
+
+ if (!found) {
+ GLINK_ERR("%s <SMDXPRT> No matching SMD channel for rcid %u\n",
+ __func__, rcid);
+ return;
+ }
+
+ if (!ch->remote_legacy) {
+ SMDXPRT_INFO("%s TX CLOSE ACK rcid %u\n", __func__, rcid);
+ cmd.cmd = CMD_CLOSE_ACK;
+ cmd.id = rcid;
+ cmd.reserved = 0;
+ mutex_lock(&einfo->smd_lock);
+ while (smd_write_avail(einfo->smd_ch) < sizeof(cmd))
+ msleep(20);
+ smd_write(einfo->smd_ch, &cmd, sizeof(cmd));
+ mutex_unlock(&einfo->smd_lock);
+ }
+ ch->remote_legacy = false;
+ ch->rcid = 0;
+}
+
+/**
+ * ssr() - process a subsystem restart notification of a transport
+ * @if_ptr: The transport to restart.
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+static int ssr(struct glink_transport_if *if_ptr)
+{
+ struct edge_info *einfo;
+ struct channel *ch;
+ struct intent_info *intent;
+ unsigned long flags;
+
+ einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+ einfo->in_ssr = true;
+ synchronize_srcu(&einfo->ssr_sync);
+
+ einfo->smd_ctl_ch_open = false;
+
+ spin_lock_irqsave(&einfo->channels_lock, flags);
+ list_for_each_entry(ch, &einfo->channels, node) {
+ if (!ch->smd_ch)
+ continue;
+ spin_unlock_irqrestore(&einfo->channels_lock, flags);
+ ch->is_closing = true;
+ ch->wait_for_probe = false;
+ flush_workqueue(ch->wq);
+ smd_close(ch->smd_ch);
+ ch->smd_ch = NULL;
+ ch->local_legacy = false;
+ ch->remote_legacy = false;
+ ch->rcid = 0;
+ ch->tx_resume_needed = false;
+
+ spin_lock_irqsave(&ch->intents_lock, flags);
+ while (!list_empty(&ch->intents)) {
+ intent = list_first_entry(&ch->intents,
+ struct intent_info,
+ node);
+ list_del(&intent->node);
+ kfree(intent);
+ }
+ while (!list_empty(&ch->used_intents)) {
+ intent = list_first_entry(&ch->used_intents,
+ struct intent_info,
+ node);
+ list_del(&intent->node);
+ kfree(intent);
+ }
+ kfree(ch->cur_intent);
+ ch->cur_intent = NULL;
+ spin_unlock_irqrestore(&ch->intents_lock, flags);
+ ch->is_closing = false;
+ spin_lock_irqsave(&einfo->channels_lock, flags);
+ }
+ spin_unlock_irqrestore(&einfo->channels_lock, flags);
+
+ einfo->xprt_if.glink_core_if_ptr->link_down(&einfo->xprt_if);
+ schedule_delayed_work(&einfo->ssr_work, 5 * HZ);
+ return 0;
+}
+
+/**
+ * allocate_rx_intent() - allocate/reserve space for RX Intent
+ * @if_ptr: The transport the intent is associated with.
+ * @size: size of intent.
+ * @intent: Pointer to the intent structure.
+ *
+ * Assign "data" with the buffer created, since the transport creates
+ * a linear buffer and "iovec" with the "intent" itself, so that
+ * the data can be passed to a client that receives only vector buffer.
+ * Note that returning NULL for the pointer is valid (it means that space has
+ * been reserved, but the actual pointer will be provided later).
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+static int allocate_rx_intent(struct glink_transport_if *if_ptr, size_t size,
+ struct glink_core_rx_intent *intent)
+{
+ void *t;
+
+ t = kmalloc(size, GFP_KERNEL);
+ if (!t)
+ return -ENOMEM;
+
+ intent->data = t;
+ intent->iovec = (void *)intent;
+ intent->vprovider = rx_linear_vbuf_provider;
+ intent->pprovider = NULL;
+ return 0;
+}
+
+/**
+ * deallocate_rx_intent() - Deallocate space created for RX Intent
+ * @if_ptr: The transport the intent is associated with.
+ * @intent: Pointer to the intent structure.
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+static int deallocate_rx_intent(struct glink_transport_if *if_ptr,
+ struct glink_core_rx_intent *intent)
+{
+ if (!intent || !intent->data)
+ return -EINVAL;
+
+ kfree(intent->data);
+ intent->data = NULL;
+ intent->iovec = NULL;
+ intent->vprovider = NULL;
+ return 0;
+}
+
+/**
+ * tx_cmd_local_rx_intent() - convert an rx intent cmd to wire format and
+ * transmit
+ * @if_ptr: The transport to transmit on.
+ * @lcid: The local channel id to encode.
+ * @size: The intent size to encode.
+ * @liid: The local intent id to encode.
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+static int tx_cmd_local_rx_intent(struct glink_transport_if *if_ptr,
+ uint32_t lcid, size_t size, uint32_t liid)
+{
+ struct edge_info *einfo;
+ struct channel *ch;
+ struct intent_info *intent;
+ int rcu_id;
+ unsigned long flags;
+
+ einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+ rcu_id = srcu_read_lock(&einfo->ssr_sync);
+ if (einfo->in_ssr) {
+ srcu_read_unlock(&einfo->ssr_sync, rcu_id);
+ return -EFAULT;
+ }
+
+ spin_lock_irqsave(&einfo->channels_lock, flags);
+ list_for_each_entry(ch, &einfo->channels, node) {
+ if (lcid == ch->lcid)
+ break;
+ }
+ spin_unlock_irqrestore(&einfo->channels_lock, flags);
+
+ intent = kmalloc(sizeof(*intent), GFP_KERNEL);
+ if (!intent) {
+ SMDXPRT_ERR("%s: no memory for intent\n", __func__);
+ srcu_read_unlock(&einfo->ssr_sync, rcu_id);
+ return -ENOMEM;
+ }
+
+ intent->liid = liid;
+ intent->size = size;
+ spin_lock_irqsave(&ch->intents_lock, flags);
+ list_add_tail(&intent->node, &ch->intents);
+ spin_unlock_irqrestore(&ch->intents_lock, flags);
+
+ if (ch->intent_req) {
+ ch->intent_req = false;
+ queue_work(ch->wq, &ch->work);
+ }
+
+ srcu_read_unlock(&einfo->ssr_sync, rcu_id);
+ return 0;
+}
+
+/**
+ * tx_cmd_local_rx_done() - convert an rx done cmd to wire format and transmit
+ * @if_ptr: The transport to transmit on.
+ * @lcid: The local channel id to encode.
+ * @liid: The local intent id to encode.
+ * @reuse: Reuse the consumed intent.
+ */
+static void tx_cmd_local_rx_done(struct glink_transport_if *if_ptr,
+ uint32_t lcid, uint32_t liid, bool reuse)
+{
+ struct edge_info *einfo;
+ struct channel *ch;
+ struct intent_info *i;
+ unsigned long flags;
+
+ einfo = container_of(if_ptr, struct edge_info, xprt_if);
+ spin_lock_irqsave(&einfo->channels_lock, flags);
+ list_for_each_entry(ch, &einfo->channels, node) {
+ if (lcid == ch->lcid)
+ break;
+ }
+ spin_unlock_irqrestore(&einfo->channels_lock, flags);
+ spin_lock_irqsave(&ch->intents_lock, flags);
+ list_for_each_entry(i, &ch->used_intents, node) {
+ if (i->liid == liid) {
+ list_del(&i->node);
+ if (reuse)
+ list_add_tail(&i->node, &ch->intents);
+ else
+ kfree(i);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&ch->intents_lock, flags);
+}
+
+/**
+ * tx() - convert a data transmit cmd to wire format and transmit
+ * @if_ptr: The transport to transmit on.
+ * @lcid: The local channel id to encode.
+ * @pctx: The data to encode.
+ *
+ * Return: Number of bytes written or standard Linux error code.
+ */
+static int tx(struct glink_transport_if *if_ptr, uint32_t lcid,
+ struct glink_core_tx_pkt *pctx)
+{
+ struct edge_info *einfo;
+ struct channel *ch;
+ int rc;
+ struct channel_work *tx_done;
+ const void *data_start;
+ size_t tx_size = 0;
+ int rcu_id;
+ unsigned long flags;
+
+ einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+ rcu_id = srcu_read_lock(&einfo->ssr_sync);
+ if (einfo->in_ssr) {
+ srcu_read_unlock(&einfo->ssr_sync, rcu_id);
+ return -EFAULT;
+ }
+
+ spin_lock_irqsave(&einfo->channels_lock, flags);
+ list_for_each_entry(ch, &einfo->channels, node) {
+ if (lcid == ch->lcid)
+ break;
+ }
+ spin_unlock_irqrestore(&einfo->channels_lock, flags);
+
+ data_start = get_tx_vaddr(pctx, pctx->size - pctx->size_remaining,
+ &tx_size);
+ if (!data_start) {
+ srcu_read_unlock(&einfo->ssr_sync, rcu_id);
+ return -EINVAL;
+ }
+
+ if (!ch->streaming_ch) {
+ if (pctx->size == pctx->size_remaining) {
+ rc = check_write_avail(smd_write_avail, ch);
+ if (rc <= 0) {
+ srcu_read_unlock(&einfo->ssr_sync, rcu_id);
+ return rc;
+ }
+ rc = smd_write_start(ch->smd_ch, pctx->size);
+ if (rc) {
+ srcu_read_unlock(&einfo->ssr_sync, rcu_id);
+ return rc;
+ }
+ }
+
+ rc = check_write_avail(smd_write_segment_avail, ch);
+ if (rc <= 0) {
+ srcu_read_unlock(&einfo->ssr_sync, rcu_id);
+ return rc;
+ }
+ if (rc > tx_size)
+ rc = tx_size;
+ rc = smd_write_segment(ch->smd_ch, data_start, rc);
+ if (rc < 0) {
+ SMDXPRT_ERR("%s: write segment failed %d\n", __func__,
+ rc);
+ srcu_read_unlock(&einfo->ssr_sync, rcu_id);
+ return rc;
+ }
+ } else {
+ rc = check_write_avail(smd_write_avail, ch);
+ if (rc <= 0) {
+ srcu_read_unlock(&einfo->ssr_sync, rcu_id);
+ return rc;
+ }
+ if (rc > tx_size)
+ rc = tx_size;
+ rc = smd_write(ch->smd_ch, data_start, rc);
+ if (rc < 0) {
+ SMDXPRT_ERR("%s: write failed %d\n", __func__, rc);
+ srcu_read_unlock(&einfo->ssr_sync, rcu_id);
+ return rc;
+ }
+ }
+
+ pctx->size_remaining -= rc;
+ if (!pctx->size_remaining) {
+ if (!ch->streaming_ch)
+ smd_write_end(ch->smd_ch);
+ tx_done = kmalloc(sizeof(*tx_done), GFP_ATOMIC);
+ tx_done->ch = ch;
+ tx_done->iid = pctx->riid;
+ INIT_WORK(&tx_done->work, process_tx_done);
+ queue_work(ch->wq, &tx_done->work);
+ }
+
+ srcu_read_unlock(&einfo->ssr_sync, rcu_id);
+ return rc;
+}
+
+/**
+ * tx_cmd_rx_intent_req() - convert an rx intent request cmd to wire format and
+ * transmit
+ * @if_ptr: The transport to transmit on.
+ * @lcid: The local channel id to encode.
+ * @size: The requested intent size to encode.
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+static int tx_cmd_rx_intent_req(struct glink_transport_if *if_ptr,
+ uint32_t lcid, size_t size)
+{
+ struct edge_info *einfo;
+ struct channel *ch;
+ unsigned long flags;
+
+ einfo = container_of(if_ptr, struct edge_info, xprt_if);
+ spin_lock_irqsave(&einfo->channels_lock, flags);
+ list_for_each_entry(ch, &einfo->channels, node) {
+ if (lcid == ch->lcid)
+ break;
+ }
+ spin_unlock_irqrestore(&einfo->channels_lock, flags);
+ einfo->xprt_if.glink_core_if_ptr->rx_cmd_rx_intent_req_ack(
+ &einfo->xprt_if,
+ ch->rcid,
+ true);
+ einfo->xprt_if.glink_core_if_ptr->rx_cmd_remote_rx_intent_put(
+ &einfo->xprt_if,
+ ch->rcid,
+ ch->next_intent_id++,
+ size);
+ return 0;
+}
+
+/**
+ * tx_cmd_rx_intent_req_ack() - convert an rx intent request ack cmd to wire
+ * format and transmit
+ * @if_ptr: The transport to transmit on.
+ * @lcid: The local channel id to encode.
+ * @granted: The request response to encode.
+ *
+ * The remote side doesn't speak G-Link. The core is just acking a request we
+ * faked. Do nothing.
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+static int tx_cmd_remote_rx_intent_req_ack(struct glink_transport_if *if_ptr,
+ uint32_t lcid, bool granted)
+{
+ return 0;
+}
+
+/**
+ * tx_cmd_set_sigs() - convert a signal cmd to wire format and transmit
+ * @if_ptr: The transport to transmit on.
+ * @lcid: The local channel id to encode.
+ * @sigs: The signals to encode.
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+static int tx_cmd_set_sigs(struct glink_transport_if *if_ptr, uint32_t lcid,
+ uint32_t sigs)
+{
+ struct edge_info *einfo;
+ struct channel *ch;
+ uint32_t set = 0;
+ uint32_t clear = 0;
+ unsigned long flags;
+
+ einfo = container_of(if_ptr, struct edge_info, xprt_if);
+ spin_lock_irqsave(&einfo->channels_lock, flags);
+ list_for_each_entry(ch, &einfo->channels, node) {
+ if (lcid == ch->lcid)
+ break;
+ }
+ spin_unlock_irqrestore(&einfo->channels_lock, flags);
+
+ if (sigs & SMD_DTR_SIG)
+ set |= TIOCM_DTR;
+ else
+ clear |= TIOCM_DTR;
+
+ if (sigs & SMD_CTS_SIG)
+ set |= TIOCM_RTS;
+ else
+ clear |= TIOCM_RTS;
+
+ if (sigs & SMD_CD_SIG)
+ set |= TIOCM_CD;
+ else
+ clear |= TIOCM_CD;
+
+ if (sigs & SMD_RI_SIG)
+ set |= TIOCM_RI;
+ else
+ clear |= TIOCM_RI;
+
+ return smd_tiocmset(ch->smd_ch, set, clear);
+}
+
+/**
+ * poll() - poll for data on a channel
+ * @if_ptr: The transport to transmit on.
+ * @lcid: The local channel id for the channel.
+ *
+ * Return: 0 if no data available, 1 if data available, or standard Linux error
+ * code.
+ */
+static int poll(struct glink_transport_if *if_ptr, uint32_t lcid)
+{
+ struct edge_info *einfo;
+ struct channel *ch;
+ int rc;
+ unsigned long flags;
+
+ einfo = container_of(if_ptr, struct edge_info, xprt_if);
+ spin_lock_irqsave(&einfo->channels_lock, flags);
+ list_for_each_entry(ch, &einfo->channels, node) {
+ if (lcid == ch->lcid)
+ break;
+ }
+ spin_unlock_irqrestore(&einfo->channels_lock, flags);
+ rc = smd_is_pkt_avail(ch->smd_ch);
+ if (rc == 1)
+ process_data_event(&ch->work);
+ return rc;
+}
+
+/**
+ * mask_rx_irq() - mask the receive irq
+ * @if_ptr: The transport to transmit on.
+ * @lcid: The local channel id for the channel.
+ * @mask: True to mask the irq, false to unmask.
+ * @pstruct: Platform defined structure for handling the masking.
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+static int mask_rx_irq(struct glink_transport_if *if_ptr, uint32_t lcid,
+ bool mask, void *pstruct)
+{
+ struct edge_info *einfo;
+ struct channel *ch;
+ int ret = 0;
+ unsigned long flags;
+
+ einfo = container_of(if_ptr, struct edge_info, xprt_if);
+ spin_lock_irqsave(&einfo->channels_lock, flags);
+ list_for_each_entry(ch, &einfo->channels, node) {
+ if (lcid == ch->lcid)
+ break;
+ }
+ spin_unlock_irqrestore(&einfo->channels_lock, flags);
+ ret = smd_mask_receive_interrupt(ch->smd_ch, mask, pstruct);
+
+ if (ret == 0)
+ einfo->irq_disabled = mask;
+
+ return ret;
+}
+
+/**
+ * negotiate_features_v1() - determine what features of a version can be used
+ * @if_ptr: The transport for which features are negotiated for.
+ * @version: The version negotiated.
+ * @features: The set of requested features.
+ *
+ * Return: What set of the requested features can be supported.
+ */
+static uint32_t negotiate_features_v1(struct glink_transport_if *if_ptr,
+ const struct glink_core_version *version,
+ uint32_t features)
+{
+ return features & version->features;
+}
+
+/**
+* init_xprt_if() - initialize the xprt_if for an edge
+* @einfo: The edge to initialize.
+*/
+static void init_xprt_if(struct edge_info *einfo)
+{
+ einfo->xprt_if.tx_cmd_version = tx_cmd_version;
+ einfo->xprt_if.tx_cmd_version_ack = tx_cmd_version_ack;
+ einfo->xprt_if.set_version = set_version;
+ einfo->xprt_if.tx_cmd_ch_open = tx_cmd_ch_open;
+ einfo->xprt_if.tx_cmd_ch_close = tx_cmd_ch_close;
+ einfo->xprt_if.tx_cmd_ch_remote_open_ack = tx_cmd_ch_remote_open_ack;
+ einfo->xprt_if.tx_cmd_ch_remote_close_ack = tx_cmd_ch_remote_close_ack;
+ einfo->xprt_if.ssr = ssr;
+ einfo->xprt_if.allocate_rx_intent = allocate_rx_intent;
+ einfo->xprt_if.deallocate_rx_intent = deallocate_rx_intent;
+ einfo->xprt_if.tx_cmd_local_rx_intent = tx_cmd_local_rx_intent;
+ einfo->xprt_if.tx_cmd_local_rx_done = tx_cmd_local_rx_done;
+ einfo->xprt_if.tx = tx;
+ einfo->xprt_if.tx_cmd_rx_intent_req = tx_cmd_rx_intent_req;
+ einfo->xprt_if.tx_cmd_remote_rx_intent_req_ack =
+ tx_cmd_remote_rx_intent_req_ack;
+ einfo->xprt_if.tx_cmd_set_sigs = tx_cmd_set_sigs;
+ einfo->xprt_if.poll = poll;
+ einfo->xprt_if.mask_rx_irq = mask_rx_irq;
+}
+
+/**
+ * init_xprt_cfg() - initialize the xprt_cfg for an edge
+ * @einfo: The edge to initialize.
+ */
+static void init_xprt_cfg(struct edge_info *einfo)
+{
+ einfo->xprt_cfg.name = XPRT_NAME;
+ einfo->xprt_cfg.versions = versions;
+ einfo->xprt_cfg.versions_entries = ARRAY_SIZE(versions);
+ einfo->xprt_cfg.max_cid = SZ_64;
+ einfo->xprt_cfg.max_iid = SZ_128;
+}
+
+static struct platform_driver migration_driver = {
+ .probe = ctl_ch_probe,
+ .driver = {
+ .name = "GLINK_CTRL",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init glink_smd_xprt_init(void)
+{
+ int i;
+ int rc;
+ struct edge_info *einfo;
+
+ for (i = 0; i < NUM_EDGES; ++i) {
+ einfo = &edge_infos[i];
+ init_xprt_cfg(einfo);
+ init_xprt_if(einfo);
+ INIT_LIST_HEAD(&einfo->channels);
+ spin_lock_init(&einfo->channels_lock);
+ init_srcu_struct(&einfo->ssr_sync);
+ mutex_init(&einfo->smd_lock);
+ mutex_init(&einfo->in_ssr_lock);
+ INIT_DELAYED_WORK(&einfo->ssr_work, ssr_work_func);
+ INIT_WORK(&einfo->work, process_ctl_event);
+ rc = glink_core_register_transport(&einfo->xprt_if,
+ &einfo->xprt_cfg);
+ if (rc)
+ SMDXPRT_ERR("%s: %s glink register xprt failed %d\n",
+ __func__,
+ einfo->xprt_cfg.edge,
+ rc);
+ else
+ einfo->xprt_if.glink_core_if_ptr->link_up(
+ &einfo->xprt_if);
+ }
+
+ platform_driver_register(&migration_driver);
+
+ return 0;
+}
+arch_initcall(glink_smd_xprt_init);
+
+MODULE_DESCRIPTION("MSM G-Link SMD Transport");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/glink_smem_native_xprt.c b/drivers/soc/qcom/glink_smem_native_xprt.c
new file mode 100644
index 000000000000..b3f19121abfe
--- /dev/null
+++ b/drivers/soc/qcom/glink_smem_native_xprt.c
@@ -0,0 +1,3028 @@
+/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/debugfs.h>
+#include <linux/err.h>
+#include <linux/fs.h>
+#include <linux/gfp.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ipc_logging.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/printk.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/srcu.h>
+#include <linux/wait.h>
+#include <soc/qcom/smem.h>
+#include <soc/qcom/tracer_pkt.h>
+#include "glink_core_if.h"
+#include "glink_private.h"
+#include "glink_xprt_if.h"
+
+#define XPRT_NAME "smem"
+#define FIFO_FULL_RESERVE 8
+#define FIFO_ALIGNMENT 8
+#define TX_BLOCKED_CMD_RESERVE 8 /* size of struct read_notif_request */
+#define SMEM_CH_DESC_SIZE 32
+#define RPM_TOC_ID 0x67727430
+#define RPM_TX_FIFO_ID 0x61703272
+#define RPM_RX_FIFO_ID 0x72326170
+#define RPM_TOC_SIZE 256
+#define RPM_MAX_TOC_ENTRIES 20
+#define RPM_FIFO_ADDR_ALIGN_BYTES 3
+#define TRACER_PKT_FEATURE BIT(2)
+
+/**
+ * enum command_types - definition of the types of commands sent/received
+ * @VERSION_CMD: Version and feature set supported
+ * @VERSION_ACK_CMD: Response for @VERSION_CMD
+ * @OPEN_CMD: Open a channel
+ * @CLOSE_CMD: Close a channel
+ * @OPEN_ACK_CMD: Response to @OPEN_CMD
+ * @RX_INTENT_CMD: RX intent for a channel was queued
+ * @RX_DONE_CMD: Use of RX intent for a channel is complete
+ * @RX_INTENT_REQ_CMD: Request to have RX intent queued
+ * @RX_INTENT_REQ_ACK_CMD: Response for @RX_INTENT_REQ_CMD
+ * @TX_DATA_CMD: Start of a data transfer
+ * @ZERO_COPY_TX_DATA_CMD: Start of a data transfer with zero copy
+ * @CLOSE_ACK_CMD: Response for @CLOSE_CMD
+ * @TX_DATA_CONT_CMD: Continuation or end of a data transfer
+ * @READ_NOTIF_CMD: Request for a notification when this cmd is read
+ * @RX_DONE_W_REUSE_CMD: Same as @RX_DONE but also reuse the used intent
+ * @SIGNALS_CMD: Sideband signals
+ * @TRACER_PKT_CMD: Start of a Tracer Packet Command
+ * @TRACER_PKT_CONT_CMD: Continuation or end of a Tracer Packet Command
+ */
+enum command_types {
+ VERSION_CMD,
+ VERSION_ACK_CMD,
+ OPEN_CMD,
+ CLOSE_CMD,
+ OPEN_ACK_CMD,
+ RX_INTENT_CMD,
+ RX_DONE_CMD,
+ RX_INTENT_REQ_CMD,
+ RX_INTENT_REQ_ACK_CMD,
+ TX_DATA_CMD,
+ ZERO_COPY_TX_DATA_CMD,
+ CLOSE_ACK_CMD,
+ TX_DATA_CONT_CMD,
+ READ_NOTIF_CMD,
+ RX_DONE_W_REUSE_CMD,
+ SIGNALS_CMD,
+ TRACER_PKT_CMD,
+ TRACER_PKT_CONT_CMD,
+};
+
+/**
+ * struct channel_desc - description of a channel fifo with a remote entity
+ * @read_index: The read index for the fifo where data should be
+ * consumed from.
+ * @write_index: The write index for the fifo where data should produced
+ * to.
+ *
+ * This structure resides in SMEM and contains the control information for the
+ * fifo data pipes of the channel. There is one physical channel between us
+ * and a remote entity.
+ */
+struct channel_desc {
+ uint32_t read_index;
+ uint32_t write_index;
+};
+
+/**
+ * struct mailbox_config_info - description of a mailbox tranposrt channel
+ * @tx_read_index: Offset into the tx fifo where data should be read from.
+ * @tx_write_index: Offset into the tx fifo where new data will be placed.
+ * @tx_size: Size of the transmit fifo in bytes.
+ * @rx_read_index: Offset into the rx fifo where data should be read from.
+ * @rx_write_index: Offset into the rx fifo where new data will be placed.
+ * @rx_size: Size of the receive fifo in bytes.
+ * @fifo: The fifos for the channel.
+ */
+struct mailbox_config_info {
+ uint32_t tx_read_index;
+ uint32_t tx_write_index;
+ uint32_t tx_size;
+ uint32_t rx_read_index;
+ uint32_t rx_write_index;
+ uint32_t rx_size;
+ char fifo[]; /* tx fifo, then rx fifo */
+};
+
+/**
+ * struct edge_info - local information for managing a single complete edge
+ * @xprt_if: The transport interface registered with the
+ * glink core associated with this edge.
+ * @xprt_cfg: The transport configuration for the glink core
+ * assocaited with this edge.
+ * @intentless: True if this edge runs in intentless mode.
+ * @irq_disabled: Flag indicating the whether interrupt is enabled
+ * or disabled.
+ * @remote_proc_id: The SMEM processor id for the remote side.
+ * @rx_reset_reg: Reference to the register to reset the rx irq
+ * line, if applicable.
+ * @out_irq_reg: Reference to the register to send an irq to the
+ * remote side.
+ * @out_irq_mask: Mask written to @out_irq_reg to trigger the
+ * correct irq.
+ * @irq_line: The incoming interrupt line.
+ * @tx_irq_count: Number of interrupts triggered.
+ * @rx_irq_count: Number of interrupts received.
+ * @tx_ch_desc: Reference to the channel description structure
+ * for tx in SMEM for this edge.
+ * @rx_ch_desc: Reference to the channel description structure
+ * for rx in SMEM for this edge.
+ * @tx_fifo: Reference to the transmit fifo in SMEM.
+ * @rx_fifo: Reference to the receive fifo in SMEM.
+ * @tx_fifo_size: Total size of @tx_fifo.
+ * @rx_fifo_size: Total size of @rx_fifo.
+ * @read_from_fifo: Memcpy for this edge.
+ * @write_to_fifo: Memcpy for this edge.
+ * @write_lock: Lock to serialize access to @tx_fifo.
+ * @tx_blocked_queue: Queue of entities waiting for the remote side to
+ * signal @tx_fifo has flushed and is now empty.
+ * @tx_resume_needed: A tx resume signal needs to be sent to the glink
+ * core once the remote side indicates @tx_fifo has
+ * flushed.
+ * @tx_blocked_signal_sent: Flag to indicate the flush signal has already
+ * been sent, and a response is pending from the
+ * remote side. Protected by @write_lock.
+ * @kwork: Work to be executed when an irq is received.
+ * @kworker: Handle to the entity processing @kwork.
+ * @task: Handle to the task context used to run @kworker.
+ * @use_ref: Active uses of this transport use this to grab
+ * a reference. Used for ssr synchronization.
+ * @in_ssr: Signals if this transport is in ssr.
+ * @rx_lock: Used to serialize concurrent instances of rx
+ * processing.
+ * @deferred_cmds: List of deferred commands that need to be
+ * processed in process context.
+ * @num_pw_states: Size of @ramp_time_us.
+ * @ramp_time_us: Array of ramp times in microseconds where array
+ * index position represents a power state.
+ * @mailbox: Mailbox transport channel description reference.
+ */
+struct edge_info {
+ struct glink_transport_if xprt_if;
+ struct glink_core_transport_cfg xprt_cfg;
+ bool intentless;
+ bool irq_disabled;
+ uint32_t remote_proc_id;
+ void __iomem *rx_reset_reg;
+ void __iomem *out_irq_reg;
+ uint32_t out_irq_mask;
+ uint32_t irq_line;
+ uint32_t tx_irq_count;
+ uint32_t rx_irq_count;
+ struct channel_desc *tx_ch_desc;
+ struct channel_desc *rx_ch_desc;
+ void __iomem *tx_fifo;
+ void __iomem *rx_fifo;
+ uint32_t tx_fifo_size;
+ uint32_t rx_fifo_size;
+ void * (*read_from_fifo)(void *dest, const void *src, size_t num_bytes);
+ void * (*write_to_fifo)(void *dest, const void *src, size_t num_bytes);
+ spinlock_t write_lock;
+ wait_queue_head_t tx_blocked_queue;
+ bool tx_resume_needed;
+ bool tx_blocked_signal_sent;
+ struct kthread_work kwork;
+ struct kthread_worker kworker;
+ struct task_struct *task;
+ struct srcu_struct use_ref;
+ bool in_ssr;
+ spinlock_t rx_lock;
+ struct list_head deferred_cmds;
+ uint32_t num_pw_states;
+ unsigned long *ramp_time_us;
+ struct mailbox_config_info *mailbox;
+};
+
+/**
+ * struct deferred_cmd - description of a command to be processed later
+ * @list_node: Used to put this command on a list in the edge.
+ * @id: ID of the command.
+ * @param1: Parameter one of the command.
+ * @param2: Parameter two of the command.
+ * @data: Extra data associated with the command, if applicable.
+ *
+ * This structure stores the relevant information of a command that was removed
+ * from the fifo but needs to be processed at a later time.
+ */
+struct deferred_cmd {
+ struct list_head list_node;
+ uint16_t id;
+ uint16_t param1;
+ uint32_t param2;
+ void *data;
+};
+
+static uint32_t negotiate_features_v1(struct glink_transport_if *if_ptr,
+ const struct glink_core_version *version,
+ uint32_t features);
+static void register_debugfs_info(struct edge_info *einfo);
+
+static struct edge_info *edge_infos[NUM_SMEM_SUBSYSTEMS];
+static DEFINE_MUTEX(probe_lock);
+static struct glink_core_version versions[] = {
+ {1, TRACER_PKT_FEATURE, negotiate_features_v1},
+};
+
+/**
+ * send_irq() - send an irq to a remote entity as an event signal
+ * @einfo: Which remote entity that should receive the irq.
+ */
+static void send_irq(struct edge_info *einfo)
+{
+ /*
+ * Any data associated with this event must be visable to the remote
+ * before the interrupt is triggered
+ */
+ wmb();
+ writel_relaxed(einfo->out_irq_mask, einfo->out_irq_reg);
+ einfo->tx_irq_count++;
+}
+
+/**
+ * read_from_fifo() - memcpy from fifo memory
+ * @dest: Destination address.
+ * @src: Source address.
+ * @num_bytes: Number of bytes to copy.
+ *
+ * Return: Destination address.
+ */
+static void *read_from_fifo(void *dest, const void *src, size_t num_bytes)
+{
+ memcpy_fromio(dest, src, num_bytes);
+ return dest;
+}
+
+/**
+ * write_to_fifo() - memcpy to fifo memory
+ * @dest: Destination address.
+ * @src: Source address.
+ * @num_bytes: Number of bytes to copy.
+ *
+ * Return: Destination address.
+ */
+static void *write_to_fifo(void *dest, const void *src, size_t num_bytes)
+{
+ memcpy_toio(dest, src, num_bytes);
+ return dest;
+}
+
+/**
+ * memcpy32_toio() - memcpy to word access only memory
+ * @dest: Destination address.
+ * @src: Source address.
+ * @num_bytes: Number of bytes to copy.
+ *
+ * Return: Destination address.
+ */
+static void *memcpy32_toio(void *dest, const void *src, size_t num_bytes)
+{
+ uint32_t *dest_local = (uint32_t *)dest;
+ uint32_t *src_local = (uint32_t *)src;
+
+ BUG_ON(num_bytes & RPM_FIFO_ADDR_ALIGN_BYTES);
+ BUG_ON(!dest_local ||
+ ((uintptr_t)dest_local & RPM_FIFO_ADDR_ALIGN_BYTES));
+ BUG_ON(!src_local ||
+ ((uintptr_t)src_local & RPM_FIFO_ADDR_ALIGN_BYTES));
+ num_bytes /= sizeof(uint32_t);
+
+ while (num_bytes--)
+ __raw_writel_no_log(*src_local++, dest_local++);
+
+ return dest;
+}
+
+/**
+ * memcpy32_fromio() - memcpy from word access only memory
+ * @dest: Destination address.
+ * @src: Source address.
+ * @num_bytes: Number of bytes to copy.
+ *
+ * Return: Destination address.
+ */
+static void *memcpy32_fromio(void *dest, const void *src, size_t num_bytes)
+{
+ uint32_t *dest_local = (uint32_t *)dest;
+ uint32_t *src_local = (uint32_t *)src;
+
+ BUG_ON(num_bytes & RPM_FIFO_ADDR_ALIGN_BYTES);
+ BUG_ON(!dest_local ||
+ ((uintptr_t)dest_local & RPM_FIFO_ADDR_ALIGN_BYTES));
+ BUG_ON(!src_local ||
+ ((uintptr_t)src_local & RPM_FIFO_ADDR_ALIGN_BYTES));
+ num_bytes /= sizeof(uint32_t);
+
+ while (num_bytes--)
+ *dest_local++ = __raw_readl_no_log(src_local++);
+
+ return dest;
+}
+
+/**
+ * fifo_read_avail() - how many bytes are available to be read from an edge
+ * @einfo: The concerned edge to query.
+ *
+ * Return: The number of bytes available to be read from edge.
+ */
+static uint32_t fifo_read_avail(struct edge_info *einfo)
+{
+ uint32_t read_index = einfo->rx_ch_desc->read_index;
+ uint32_t write_index = einfo->rx_ch_desc->write_index;
+ uint32_t fifo_size = einfo->rx_fifo_size;
+ uint32_t bytes_avail;
+
+ bytes_avail = write_index - read_index;
+ if (write_index < read_index)
+ /*
+ * Case: W < R - Write has wrapped
+ * --------------------------------
+ * In this case, the write operation has wrapped past the end
+ * of the FIFO which means that now calculating the amount of
+ * data in the FIFO results in a negative number. This can be
+ * easily fixed by adding the fifo_size to the value. Even
+ * though the values are unsigned, subtraction is always done
+ * using 2's complement which means that the result will still
+ * be correct once the FIFO size has been added to the negative
+ * result.
+ *
+ * Example:
+ * '-' = data in fifo
+ * '.' = empty
+ *
+ * 0 1
+ * 0123456789012345
+ * |-----w.....r----|
+ * 0 N
+ *
+ * write = 5 = 101b
+ * read = 11 = 1011b
+ * Data in FIFO
+ * (write - read) + fifo_size = (101b - 1011b) + 10000b
+ * = 11111010b + 10000b = 1010b = 10
+ */
+ bytes_avail += fifo_size;
+
+ return bytes_avail;
+}
+
+/**
+ * fifo_write_avail() - how many bytes can be written to the edge
+ * @einfo: The concerned edge to query.
+ *
+ * Calculates the number of bytes that can be transmitted at this time.
+ * Automatically reserves some space to maintain alignment when the fifo is
+ * completely full, and reserves space so that the flush command can always be
+ * transmitted when needed.
+ *
+ * Return: The number of bytes available to be read from edge.
+ */
+static uint32_t fifo_write_avail(struct edge_info *einfo)
+{
+ uint32_t read_index = einfo->tx_ch_desc->read_index;
+ uint32_t write_index = einfo->tx_ch_desc->write_index;
+ uint32_t fifo_size = einfo->tx_fifo_size;
+ uint32_t bytes_avail = read_index - write_index;
+
+ if (read_index <= write_index)
+ bytes_avail += fifo_size;
+ if (bytes_avail < FIFO_FULL_RESERVE + TX_BLOCKED_CMD_RESERVE)
+ bytes_avail = 0;
+ else
+ bytes_avail -= FIFO_FULL_RESERVE + TX_BLOCKED_CMD_RESERVE;
+
+ return bytes_avail;
+}
+
+/**
+ * fifo_read() - read data from an edge
+ * @einfo: The concerned edge to read from.
+ * @_data: Buffer to copy the read data into.
+ * @len: The ammount of data to read in bytes.
+ *
+ * Return: The number of bytes read.
+ */
+static int fifo_read(struct edge_info *einfo, void *_data, int len)
+{
+ void *ptr;
+ void *data = _data;
+ int orig_len = len;
+ uint32_t read_index = einfo->rx_ch_desc->read_index;
+ uint32_t write_index = einfo->rx_ch_desc->write_index;
+ uint32_t fifo_size = einfo->rx_fifo_size;
+ uint32_t n;
+
+ while (len) {
+ ptr = einfo->rx_fifo + read_index;
+ if (read_index <= write_index)
+ n = write_index - read_index;
+ else
+ n = fifo_size - read_index;
+
+ if (n == 0)
+ break;
+ if (n > len)
+ n = len;
+
+ einfo->read_from_fifo(data, ptr, n);
+
+ data += n;
+ len -= n;
+ read_index += n;
+ if (read_index >= fifo_size)
+ read_index -= fifo_size;
+ }
+ einfo->rx_ch_desc->read_index = read_index;
+
+ return orig_len - len;
+}
+
+/**
+ * fifo_write_body() - Copy transmit data into an edge
+ * @einfo: The concerned edge to copy into.
+ * @_data: Buffer of data to copy from.
+ * @len: Size of data to copy in bytes.
+ * @write_index: Index into the channel where the data should be copied.
+ *
+ * Return: Number of bytes remaining to be copied into the edge.
+ */
+static uint32_t fifo_write_body(struct edge_info *einfo, const void *_data,
+ int len, uint32_t *write_index)
+{
+ void *ptr;
+ const void *data = _data;
+ uint32_t read_index = einfo->tx_ch_desc->read_index;
+ uint32_t fifo_size = einfo->tx_fifo_size;
+ uint32_t n;
+
+ while (len) {
+ ptr = einfo->tx_fifo + *write_index;
+ if (*write_index < read_index) {
+ n = read_index - *write_index - FIFO_FULL_RESERVE;
+ } else {
+ if (read_index < FIFO_FULL_RESERVE)
+ n = fifo_size + read_index - *write_index -
+ FIFO_FULL_RESERVE;
+ else
+ n = fifo_size - *write_index;
+ }
+
+ if (n == 0)
+ break;
+ if (n > len)
+ n = len;
+
+ einfo->write_to_fifo(ptr, data, n);
+
+ data += n;
+ len -= n;
+ *write_index += n;
+ if (*write_index >= fifo_size)
+ *write_index -= fifo_size;
+ }
+ return len;
+}
+
+/**
+ * fifo_write() - Write data into an edge
+ * @einfo: The concerned edge to write to.
+ * @data: Buffer of data to write.
+ * @len: Length of data to write, in bytes.
+ *
+ * Wrapper around fifo_write_body() to manage additional details that are
+ * necessary for a complete write event. Does not manage concurrency. Clients
+ * should use fifo_write_avail() to check if there is sufficent space before
+ * calling fifo_write().
+ *
+ * Return: Number of bytes written to the edge.
+ */
+static int fifo_write(struct edge_info *einfo, const void *data, int len)
+{
+ int orig_len = len;
+ uint32_t write_index = einfo->tx_ch_desc->write_index;
+
+ len = fifo_write_body(einfo, data, len, &write_index);
+ einfo->tx_ch_desc->write_index = write_index;
+ send_irq(einfo);
+
+ return orig_len - len;
+}
+
+/**
+ * fifo_write_complex() - writes a transaction of multiple buffers to an edge
+ * @einfo: The concerned edge to write to.
+ * @data1: The first buffer of data to write.
+ * @len1: The length of the first buffer in bytes.
+ * @data2: The second buffer of data to write.
+ * @len2: The length of the second buffer in bytes.
+ * @data3: The thirs buffer of data to write.
+ * @len3: The length of the third buffer in bytes.
+ *
+ * A variant of fifo_write() which optimizes the usecase found in tx(). The
+ * remote side expects all or none of the transmitted data to be available.
+ * This prevents the tx() usecase from calling fifo_write() multiple times. The
+ * alternative would be an allocation and additional memcpy to create a buffer
+ * to copy all the data segments into one location before calling fifo_write().
+ *
+ * Return: Number of bytes written to the edge.
+ */
+static int fifo_write_complex(struct edge_info *einfo,
+ const void *data1, int len1,
+ const void *data2, int len2,
+ const void *data3, int len3)
+{
+ int orig_len = len1 + len2 + len3;
+ uint32_t write_index = einfo->tx_ch_desc->write_index;
+
+ len1 = fifo_write_body(einfo, data1, len1, &write_index);
+ len2 = fifo_write_body(einfo, data2, len2, &write_index);
+ len3 = fifo_write_body(einfo, data3, len3, &write_index);
+ einfo->tx_ch_desc->write_index = write_index;
+ send_irq(einfo);
+
+ return orig_len - len1 - len2 - len3;
+}
+
+/**
+ * send_tx_blocked_signal() - send the flush command as we are blocked from tx
+ * @einfo: The concerned edge which is blocked.
+ *
+ * Used to send a signal to the remote side that we have no more space to
+ * transmit data and therefore need the remote side to signal us when they have
+ * cleared some space by reading some data. This function relies upon the
+ * assumption that fifo_write_avail() will reserve some space so that the flush
+ * signal command can always be put into the transmit fifo, even when "everyone"
+ * else thinks that the transmit fifo is truely full. This function assumes
+ * that it is called with the write_lock already locked.
+ */
+static void send_tx_blocked_signal(struct edge_info *einfo)
+{
+ struct read_notif_request {
+ uint16_t cmd;
+ uint16_t reserved;
+ uint32_t reserved2;
+ };
+ struct read_notif_request read_notif_req;
+
+ read_notif_req.cmd = READ_NOTIF_CMD;
+ read_notif_req.reserved = 0;
+ read_notif_req.reserved2 = 0;
+
+ if (!einfo->tx_blocked_signal_sent) {
+ einfo->tx_blocked_signal_sent = true;
+ fifo_write(einfo, &read_notif_req, sizeof(read_notif_req));
+ }
+}
+
+/**
+ * fifo_tx() - transmit data on an edge
+ * @einfo: The concerned edge to transmit on.
+ * @data: Buffer of data to transmit.
+ * @len: Length of data to transmit in bytes.
+ *
+ * This helper function is the preferred interface to fifo_write() and should
+ * be used in the normal case for transmitting entities. fifo_tx() will block
+ * until there is sufficent room to transmit the requested ammount of data.
+ * fifo_tx() will manage any concurrency between multiple transmitters on a
+ * channel.
+ *
+ * Return: Number of bytes transmitted.
+ */
+static int fifo_tx(struct edge_info *einfo, const void *data, int len)
+{
+ unsigned long flags;
+ int ret;
+
+ DEFINE_WAIT(wait);
+
+ spin_lock_irqsave(&einfo->write_lock, flags);
+ while (fifo_write_avail(einfo) < len) {
+ send_tx_blocked_signal(einfo);
+ spin_unlock_irqrestore(&einfo->write_lock, flags);
+ prepare_to_wait(&einfo->tx_blocked_queue, &wait,
+ TASK_UNINTERRUPTIBLE);
+ if (fifo_write_avail(einfo) < len && !einfo->in_ssr)
+ schedule();
+ finish_wait(&einfo->tx_blocked_queue, &wait);
+ spin_lock_irqsave(&einfo->write_lock, flags);
+ if (einfo->in_ssr) {
+ spin_unlock_irqrestore(&einfo->write_lock, flags);
+ return -EFAULT;
+ }
+ }
+ ret = fifo_write(einfo, data, len);
+ spin_unlock_irqrestore(&einfo->write_lock, flags);
+
+ return ret;
+}
+
+/**
+ * process_rx_data() - process received data from an edge
+ * @einfo: The edge the data was received on.
+ * @cmd_id: ID to specify the type of data.
+ * @rcid: The remote channel id associated with the data.
+ * @intend_id: The intent the data should be put in.
+ */
+static void process_rx_data(struct edge_info *einfo, uint16_t cmd_id,
+ uint32_t rcid, uint32_t intent_id)
+{
+ struct command {
+ uint32_t frag_size;
+ uint32_t size_remaining;
+ };
+ struct command cmd;
+ struct glink_core_rx_intent *intent;
+ char trash[FIFO_ALIGNMENT];
+ int alignment;
+ bool err = false;
+
+ fifo_read(einfo, &cmd, sizeof(cmd));
+
+ intent = einfo->xprt_if.glink_core_if_ptr->rx_get_pkt_ctx(
+ &einfo->xprt_if, rcid, intent_id);
+ if (intent == NULL) {
+ GLINK_ERR("%s: no intent for ch %d liid %d\n", __func__, rcid,
+ intent_id);
+ err = true;
+ } else if (intent->data == NULL) {
+ if (einfo->intentless) {
+ intent->data = kmalloc(cmd.frag_size, GFP_ATOMIC);
+ if (!intent->data)
+ err = true;
+ else
+ intent->intent_size = cmd.frag_size;
+ } else {
+ GLINK_ERR(
+ "%s: intent for ch %d liid %d has no data buff\n",
+ __func__, rcid, intent_id);
+ err = true;
+ }
+ }
+
+ if (!err &&
+ (intent->intent_size - intent->write_offset < cmd.frag_size ||
+ intent->write_offset + cmd.size_remaining > intent->intent_size)) {
+ GLINK_ERR("%s: rx data size:%d and remaining:%d %s %d %s:%d\n",
+ __func__,
+ cmd.frag_size,
+ cmd.size_remaining,
+ "will overflow ch",
+ rcid,
+ "intent",
+ intent_id);
+ err = true;
+ }
+
+ if (err) {
+ alignment = ALIGN(cmd.frag_size, FIFO_ALIGNMENT);
+ alignment -= cmd.frag_size;
+ while (cmd.frag_size) {
+ if (cmd.frag_size > FIFO_ALIGNMENT) {
+ fifo_read(einfo, trash, FIFO_ALIGNMENT);
+ cmd.frag_size -= FIFO_ALIGNMENT;
+ } else {
+ fifo_read(einfo, trash, cmd.frag_size);
+ cmd.frag_size = 0;
+ }
+ }
+ if (alignment)
+ fifo_read(einfo, trash, alignment);
+ return;
+ }
+ fifo_read(einfo, intent->data + intent->write_offset, cmd.frag_size);
+ intent->write_offset += cmd.frag_size;
+ intent->pkt_size += cmd.frag_size;
+
+ alignment = ALIGN(cmd.frag_size, FIFO_ALIGNMENT);
+ alignment -= cmd.frag_size;
+ if (alignment)
+ fifo_read(einfo, trash, alignment);
+
+ if (unlikely((cmd_id == TRACER_PKT_CMD ||
+ cmd_id == TRACER_PKT_CONT_CMD) && !cmd.size_remaining)) {
+ tracer_pkt_log_event(intent->data, GLINK_XPRT_RX);
+ intent->tracer_pkt = true;
+ }
+
+ einfo->xprt_if.glink_core_if_ptr->rx_put_pkt_ctx(&einfo->xprt_if,
+ rcid,
+ intent,
+ cmd.size_remaining ?
+ false : true);
+}
+
+/**
+ * queue_cmd() - queue a deferred command for later processing
+ * @einfo: Edge to queue commands on.
+ * @cmd: Command to queue.
+ * @data: Command specific data to queue with the command.
+ *
+ * Return: True if queuing was successful, false otherwise.
+ */
+static bool queue_cmd(struct edge_info *einfo, void *cmd, void *data)
+{
+ struct command {
+ uint16_t id;
+ uint16_t param1;
+ uint32_t param2;
+ };
+ struct command *_cmd = cmd;
+ struct deferred_cmd *d_cmd;
+
+ d_cmd = kmalloc(sizeof(*d_cmd), GFP_ATOMIC);
+ if (!d_cmd) {
+ GLINK_ERR("%s: Discarding cmd %d\n", __func__, _cmd->id);
+ return false;
+ }
+ d_cmd->id = _cmd->id;
+ d_cmd->param1 = _cmd->param1;
+ d_cmd->param2 = _cmd->param2;
+ d_cmd->data = data;
+ list_add_tail(&d_cmd->list_node, &einfo->deferred_cmds);
+ queue_kthread_work(&einfo->kworker, &einfo->kwork);
+ return true;
+}
+
+/**
+ * get_rx_fifo() - Find the rx fifo for an edge
+ * @einfo: Edge to find the fifo for.
+ *
+ * Return: True if fifo was found, false otherwise.
+ */
+static bool get_rx_fifo(struct edge_info *einfo)
+{
+ if (einfo->mailbox) {
+ einfo->rx_fifo = &einfo->mailbox->fifo[einfo->mailbox->tx_size];
+ einfo->rx_fifo_size = einfo->mailbox->rx_size;
+ } else {
+ einfo->rx_fifo = smem_get_entry(SMEM_GLINK_NATIVE_XPRT_FIFO_1,
+ &einfo->rx_fifo_size,
+ einfo->remote_proc_id,
+ SMEM_ITEM_CACHED_FLAG);
+ if (!einfo->rx_fifo)
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * __rx_worker() - process received commands on a specific edge
+ * @einfo: Edge to process commands on.
+ * @atomic_ctx: Indicates if the caller is in atomic context and requires any
+ * non-atomic operations to be deferred.
+ */
+static void __rx_worker(struct edge_info *einfo, bool atomic_ctx)
+{
+ struct command {
+ uint16_t id;
+ uint16_t param1;
+ uint32_t param2;
+ };
+ struct intent_desc {
+ uint32_t size;
+ uint32_t id;
+ };
+ struct command cmd;
+ struct intent_desc intent;
+ struct intent_desc *intents;
+ int i;
+ bool granted;
+ unsigned long flags;
+ bool trigger_wakeup = false;
+ int rcu_id;
+ uint16_t rcid;
+ uint32_t name_len;
+ uint32_t len;
+ char *name;
+ char trash[FIFO_ALIGNMENT];
+ struct deferred_cmd *d_cmd;
+ void *cmd_data;
+
+ rcu_id = srcu_read_lock(&einfo->use_ref);
+
+ if (unlikely(!einfo->rx_fifo)) {
+ if (!get_rx_fifo(einfo)) {
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return;
+ }
+ einfo->in_ssr = false;
+ einfo->xprt_if.glink_core_if_ptr->link_up(&einfo->xprt_if);
+ }
+
+ if (einfo->in_ssr) {
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return;
+ }
+ if (!atomic_ctx) {
+ if (einfo->tx_resume_needed && fifo_write_avail(einfo)) {
+ einfo->tx_resume_needed = false;
+ einfo->xprt_if.glink_core_if_ptr->tx_resume(
+ &einfo->xprt_if);
+ }
+ spin_lock_irqsave(&einfo->write_lock, flags);
+ if (waitqueue_active(&einfo->tx_blocked_queue)) {
+ einfo->tx_blocked_signal_sent = false;
+ trigger_wakeup = true;
+ }
+ spin_unlock_irqrestore(&einfo->write_lock, flags);
+ if (trigger_wakeup)
+ wake_up_all(&einfo->tx_blocked_queue);
+ }
+
+
+ /*
+ * Access to the fifo needs to be synchronized, however only the calls
+ * into the core from process_rx_data() are compatible with an atomic
+ * processing context. For everything else, we need to do all the fifo
+ * processing, then unlock the lock for the call into the core. Data
+ * in the fifo is allowed to be processed immediately instead of being
+ * ordered with the commands because the channel open process prevents
+ * intents from being queued (which prevents data from being sent) until
+ * all the channel open commands are processed by the core, thus
+ * eliminating a race.
+ */
+ spin_lock_irqsave(&einfo->rx_lock, flags);
+ while (fifo_read_avail(einfo) ||
+ (!atomic_ctx && !list_empty(&einfo->deferred_cmds))) {
+ if (einfo->in_ssr)
+ break;
+
+ if (!atomic_ctx && !list_empty(&einfo->deferred_cmds)) {
+ d_cmd = list_first_entry(&einfo->deferred_cmds,
+ struct deferred_cmd, list_node);
+ list_del(&d_cmd->list_node);
+ cmd.id = d_cmd->id;
+ cmd.param1 = d_cmd->param1;
+ cmd.param2 = d_cmd->param2;
+ cmd_data = d_cmd->data;
+ kfree(d_cmd);
+ } else {
+ fifo_read(einfo, &cmd, sizeof(cmd));
+ cmd_data = NULL;
+ }
+
+ switch (cmd.id) {
+ case VERSION_CMD:
+ if (atomic_ctx) {
+ queue_cmd(einfo, &cmd, NULL);
+ break;
+ }
+ spin_unlock_irqrestore(&einfo->rx_lock, flags);
+ einfo->xprt_if.glink_core_if_ptr->rx_cmd_version(
+ &einfo->xprt_if,
+ cmd.param1,
+ cmd.param2);
+ spin_lock_irqsave(&einfo->rx_lock, flags);
+ break;
+ case VERSION_ACK_CMD:
+ if (atomic_ctx) {
+ queue_cmd(einfo, &cmd, NULL);
+ break;
+ }
+ spin_unlock_irqrestore(&einfo->rx_lock, flags);
+ einfo->xprt_if.glink_core_if_ptr->rx_cmd_version_ack(
+ &einfo->xprt_if,
+ cmd.param1,
+ cmd.param2);
+ spin_lock_irqsave(&einfo->rx_lock, flags);
+ break;
+ case OPEN_CMD:
+ rcid = cmd.param1;
+ name_len = cmd.param2;
+
+ if (cmd_data) {
+ name = cmd_data;
+ } else {
+ len = ALIGN(name_len, FIFO_ALIGNMENT);
+ name = kmalloc(len, GFP_ATOMIC);
+ if (!name) {
+ pr_err("No memory available to rx ch open cmd name. Discarding cmd.\n");
+ while (len) {
+ fifo_read(einfo, trash,
+ FIFO_ALIGNMENT);
+ len -= FIFO_ALIGNMENT;
+ }
+ break;
+ }
+ fifo_read(einfo, name, len);
+ }
+ if (atomic_ctx) {
+ if (!queue_cmd(einfo, &cmd, name))
+ kfree(name);
+ break;
+ }
+
+ spin_unlock_irqrestore(&einfo->rx_lock, flags);
+ einfo->xprt_if.glink_core_if_ptr->rx_cmd_ch_remote_open(
+ &einfo->xprt_if,
+ rcid,
+ name,
+ SMEM_XPRT_ID);
+ kfree(name);
+ spin_lock_irqsave(&einfo->rx_lock, flags);
+ break;
+ case CLOSE_CMD:
+ if (atomic_ctx) {
+ queue_cmd(einfo, &cmd, NULL);
+ break;
+ }
+ spin_unlock_irqrestore(&einfo->rx_lock, flags);
+ einfo->xprt_if.glink_core_if_ptr->
+ rx_cmd_ch_remote_close(
+ &einfo->xprt_if,
+ cmd.param1);
+ spin_lock_irqsave(&einfo->rx_lock, flags);
+ break;
+ case OPEN_ACK_CMD:
+ if (atomic_ctx) {
+ queue_cmd(einfo, &cmd, NULL);
+ break;
+ }
+ spin_unlock_irqrestore(&einfo->rx_lock, flags);
+ einfo->xprt_if.glink_core_if_ptr->rx_cmd_ch_open_ack(
+ &einfo->xprt_if,
+ cmd.param1,
+ SMEM_XPRT_ID);
+ spin_lock_irqsave(&einfo->rx_lock, flags);
+ break;
+ case RX_INTENT_CMD:
+ /*
+ * One intent listed with this command. This is the
+ * expected case and can be optimized over the general
+ * case of an array of intents.
+ */
+ if (cmd.param2 == 1) {
+ if (cmd_data) {
+ intent.id = ((struct intent_desc *)
+ cmd_data)->id;
+ intent.size = ((struct intent_desc *)
+ cmd_data)->size;
+ kfree(cmd_data);
+ } else {
+ fifo_read(einfo, &intent,
+ sizeof(intent));
+ }
+ if (atomic_ctx) {
+ cmd_data = kmalloc(sizeof(intent),
+ GFP_ATOMIC);
+ if (!cmd_data) {
+ pr_err("%s: dropping cmd %d\n",
+ __func__,
+ cmd.id);
+ break;
+ }
+ ((struct intent_desc *)cmd_data)->id =
+ intent.id;
+ ((struct intent_desc *)cmd_data)->size =
+ intent.size;
+ if (!queue_cmd(einfo, &cmd, cmd_data))
+ kfree(cmd_data);
+ break;
+ }
+ spin_unlock_irqrestore(&einfo->rx_lock, flags);
+ einfo->xprt_if.glink_core_if_ptr->
+ rx_cmd_remote_rx_intent_put(
+ &einfo->xprt_if,
+ cmd.param1,
+ intent.id,
+ intent.size);
+ spin_lock_irqsave(&einfo->rx_lock, flags);
+ break;
+ }
+
+ /* Array of intents to process */
+ if (cmd_data) {
+ intents = cmd_data;
+ } else {
+ intents = kmalloc(sizeof(*intents) * cmd.param2,
+ GFP_ATOMIC);
+ if (!intents) {
+ for (i = 0; i < cmd.param2; ++i)
+ fifo_read(einfo, &intent,
+ sizeof(intent));
+ break;
+ }
+ fifo_read(einfo, intents,
+ sizeof(*intents) * cmd.param2);
+ }
+ if (atomic_ctx) {
+ if (!queue_cmd(einfo, &cmd, intents))
+ kfree(intents);
+ break;
+ }
+ spin_unlock_irqrestore(&einfo->rx_lock, flags);
+ for (i = 0; i < cmd.param2; ++i) {
+ einfo->xprt_if.glink_core_if_ptr->
+ rx_cmd_remote_rx_intent_put(
+ &einfo->xprt_if,
+ cmd.param1,
+ intents[i].id,
+ intents[i].size);
+ }
+ kfree(intents);
+ spin_lock_irqsave(&einfo->rx_lock, flags);
+ break;
+ case RX_DONE_CMD:
+ if (atomic_ctx) {
+ queue_cmd(einfo, &cmd, NULL);
+ break;
+ }
+ spin_unlock_irqrestore(&einfo->rx_lock, flags);
+ einfo->xprt_if.glink_core_if_ptr->rx_cmd_tx_done(
+ &einfo->xprt_if,
+ cmd.param1,
+ cmd.param2,
+ false);
+ spin_lock_irqsave(&einfo->rx_lock, flags);
+ break;
+ case RX_INTENT_REQ_CMD:
+ if (atomic_ctx) {
+ queue_cmd(einfo, &cmd, NULL);
+ break;
+ }
+ spin_unlock_irqrestore(&einfo->rx_lock, flags);
+ einfo->xprt_if.glink_core_if_ptr->
+ rx_cmd_remote_rx_intent_req(
+ &einfo->xprt_if,
+ cmd.param1,
+ cmd.param2);
+ spin_lock_irqsave(&einfo->rx_lock, flags);
+ break;
+ case RX_INTENT_REQ_ACK_CMD:
+ if (atomic_ctx) {
+ queue_cmd(einfo, &cmd, NULL);
+ break;
+ }
+ spin_unlock_irqrestore(&einfo->rx_lock, flags);
+ granted = false;
+ if (cmd.param2 == 1)
+ granted = true;
+ einfo->xprt_if.glink_core_if_ptr->
+ rx_cmd_rx_intent_req_ack(
+ &einfo->xprt_if,
+ cmd.param1,
+ granted);
+ spin_lock_irqsave(&einfo->rx_lock, flags);
+ break;
+ case TX_DATA_CMD:
+ case TX_DATA_CONT_CMD:
+ case TRACER_PKT_CMD:
+ case TRACER_PKT_CONT_CMD:
+ process_rx_data(einfo, cmd.id, cmd.param1, cmd.param2);
+ break;
+ case CLOSE_ACK_CMD:
+ if (atomic_ctx) {
+ queue_cmd(einfo, &cmd, NULL);
+ break;
+ }
+ spin_unlock_irqrestore(&einfo->rx_lock, flags);
+ einfo->xprt_if.glink_core_if_ptr->rx_cmd_ch_close_ack(
+ &einfo->xprt_if,
+ cmd.param1);
+ spin_lock_irqsave(&einfo->rx_lock, flags);
+ break;
+ case READ_NOTIF_CMD:
+ send_irq(einfo);
+ break;
+ case SIGNALS_CMD:
+ if (atomic_ctx) {
+ queue_cmd(einfo, &cmd, NULL);
+ break;
+ }
+ spin_unlock_irqrestore(&einfo->rx_lock, flags);
+ einfo->xprt_if.glink_core_if_ptr->rx_cmd_remote_sigs(
+ &einfo->xprt_if,
+ cmd.param1,
+ cmd.param2);
+ spin_lock_irqsave(&einfo->rx_lock, flags);
+ break;
+ case RX_DONE_W_REUSE_CMD:
+ if (atomic_ctx) {
+ queue_cmd(einfo, &cmd, NULL);
+ break;
+ }
+ spin_unlock_irqrestore(&einfo->rx_lock, flags);
+ einfo->xprt_if.glink_core_if_ptr->rx_cmd_tx_done(
+ &einfo->xprt_if,
+ cmd.param1,
+ cmd.param2,
+ true);
+ spin_lock_irqsave(&einfo->rx_lock, flags);
+ break;
+ default:
+ pr_err("Unrecognized command: %d\n", cmd.id);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&einfo->rx_lock, flags);
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+}
+
+/**
+ * rx_worker() - worker function to process received commands
+ * @work: kwork associated with the edge to process commands on.
+ */
+static void rx_worker(struct kthread_work *work)
+{
+ struct edge_info *einfo;
+
+ einfo = container_of(work, struct edge_info, kwork);
+ __rx_worker(einfo, false);
+}
+
+irqreturn_t irq_handler(int irq, void *priv)
+{
+ struct edge_info *einfo = (struct edge_info *)priv;
+
+ if (einfo->rx_reset_reg)
+ writel_relaxed(einfo->out_irq_mask, einfo->rx_reset_reg);
+
+ queue_kthread_work(&einfo->kworker, &einfo->kwork);
+ einfo->rx_irq_count++;
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * tx_cmd_version() - convert a version cmd to wire format and transmit
+ * @if_ptr: The transport to transmit on.
+ * @version: The version number to encode.
+ * @features: The features information to encode.
+ */
+static void tx_cmd_version(struct glink_transport_if *if_ptr, uint32_t version,
+ uint32_t features)
+{
+ struct command {
+ uint16_t id;
+ uint16_t version;
+ uint32_t features;
+ };
+ struct command cmd;
+ struct edge_info *einfo;
+ int rcu_id;
+
+ einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+ rcu_id = srcu_read_lock(&einfo->use_ref);
+ if (einfo->in_ssr) {
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return;
+ }
+
+ cmd.id = VERSION_CMD;
+ cmd.version = version;
+ cmd.features = features;
+
+ fifo_tx(einfo, &cmd, sizeof(cmd));
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+}
+
+/**
+ * tx_cmd_version_ack() - convert a version ack cmd to wire format and transmit
+ * @if_ptr: The transport to transmit on.
+ * @version: The version number to encode.
+ * @features: The features information to encode.
+ */
+static void tx_cmd_version_ack(struct glink_transport_if *if_ptr,
+ uint32_t version,
+ uint32_t features)
+{
+ struct command {
+ uint16_t id;
+ uint16_t version;
+ uint32_t features;
+ };
+ struct command cmd;
+ struct edge_info *einfo;
+ int rcu_id;
+
+ einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+ rcu_id = srcu_read_lock(&einfo->use_ref);
+ if (einfo->in_ssr) {
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return;
+ }
+
+ cmd.id = VERSION_ACK_CMD;
+ cmd.version = version;
+ cmd.features = features;
+
+ fifo_tx(einfo, &cmd, sizeof(cmd));
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+}
+
+/**
+ * set_version() - activate a negotiated version and feature set
+ * @if_ptr: The transport to configure.
+ * @version: The version to use.
+ * @features: The features to use.
+ *
+ * Return: The supported capabilities of the transport.
+ */
+static uint32_t set_version(struct glink_transport_if *if_ptr, uint32_t version,
+ uint32_t features)
+{
+ struct edge_info *einfo;
+ uint32_t ret;
+ int rcu_id;
+
+ einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+ rcu_id = srcu_read_lock(&einfo->use_ref);
+ if (einfo->in_ssr) {
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return 0;
+ }
+
+ ret = einfo->intentless ?
+ GCAP_INTENTLESS | GCAP_SIGNALS : GCAP_SIGNALS;
+
+ if (features & TRACER_PKT_FEATURE)
+ ret |= GCAP_TRACER_PKT;
+
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return ret;
+}
+
+/**
+ * tx_cmd_ch_open() - convert a channel open cmd to wire format and transmit
+ * @if_ptr: The transport to transmit on.
+ * @lcid: The local channel id to encode.
+ * @name: The channel name to encode.
+ * @req_xprt: The transport the core would like to migrate this channel to.
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+static int tx_cmd_ch_open(struct glink_transport_if *if_ptr, uint32_t lcid,
+ const char *name, uint16_t req_xprt)
+{
+ struct command {
+ uint16_t id;
+ uint16_t lcid;
+ uint32_t length;
+ };
+ struct command cmd;
+ struct edge_info *einfo;
+ uint32_t buf_size;
+ void *buf;
+ int rcu_id;
+
+ einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+ rcu_id = srcu_read_lock(&einfo->use_ref);
+ if (einfo->in_ssr) {
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return -EFAULT;
+ }
+
+ cmd.id = OPEN_CMD;
+ cmd.lcid = lcid;
+ cmd.length = strlen(name) + 1;
+
+ buf_size = ALIGN(sizeof(cmd) + cmd.length, FIFO_ALIGNMENT);
+
+ buf = kzalloc(buf_size, GFP_KERNEL);
+ if (!buf) {
+ pr_err("%s: malloc fail for %d size buf\n", __func__, buf_size);
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return -ENOMEM;
+ }
+
+ memcpy(buf, &cmd, sizeof(cmd));
+ memcpy(buf + sizeof(cmd), name, cmd.length);
+
+ fifo_tx(einfo, buf, buf_size);
+
+ kfree(buf);
+
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return 0;
+}
+
+/**
+ * tx_cmd_ch_close() - convert a channel close cmd to wire format and transmit
+ * @if_ptr: The transport to transmit on.
+ * @lcid: The local channel id to encode.
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+static int tx_cmd_ch_close(struct glink_transport_if *if_ptr, uint32_t lcid)
+{
+ struct command {
+ uint16_t id;
+ uint16_t lcid;
+ uint32_t reserved;
+ };
+ struct command cmd;
+ struct edge_info *einfo;
+ int rcu_id;
+
+ einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+ rcu_id = srcu_read_lock(&einfo->use_ref);
+ if (einfo->in_ssr) {
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return -EFAULT;
+ }
+
+ cmd.id = CLOSE_CMD;
+ cmd.lcid = lcid;
+ cmd.reserved = 0;
+
+ fifo_tx(einfo, &cmd, sizeof(cmd));
+
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return 0;
+}
+
+/**
+ * tx_cmd_ch_remote_open_ack() - convert a channel open ack cmd to wire format
+ * and transmit
+ * @if_ptr: The transport to transmit on.
+ * @rcid: The remote channel id to encode.
+ * @xprt_resp: The response to a transport migration request.
+ */
+static void tx_cmd_ch_remote_open_ack(struct glink_transport_if *if_ptr,
+ uint32_t rcid, uint16_t xprt_resp)
+{
+ struct command {
+ uint16_t id;
+ uint16_t rcid;
+ uint32_t reserved;
+ };
+ struct command cmd;
+ struct edge_info *einfo;
+ int rcu_id;
+
+ einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+ rcu_id = srcu_read_lock(&einfo->use_ref);
+ if (einfo->in_ssr) {
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return;
+ }
+
+ cmd.id = OPEN_ACK_CMD;
+ cmd.rcid = rcid;
+ cmd.reserved = 0;
+
+ fifo_tx(einfo, &cmd, sizeof(cmd));
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+}
+
+/**
+ * tx_cmd_ch_remote_close_ack() - convert a channel close ack cmd to wire format
+ * and transmit
+ * @if_ptr: The transport to transmit on.
+ * @rcid: The remote channel id to encode.
+ */
+static void tx_cmd_ch_remote_close_ack(struct glink_transport_if *if_ptr,
+ uint32_t rcid)
+{
+ struct command {
+ uint16_t id;
+ uint16_t rcid;
+ uint32_t reserved;
+ };
+ struct command cmd;
+ struct edge_info *einfo;
+ int rcu_id;
+
+ einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+ rcu_id = srcu_read_lock(&einfo->use_ref);
+ if (einfo->in_ssr) {
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return;
+ }
+
+ cmd.id = CLOSE_ACK_CMD;
+ cmd.rcid = rcid;
+ cmd.reserved = 0;
+
+ fifo_tx(einfo, &cmd, sizeof(cmd));
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+}
+
+/**
+ * ssr() - process a subsystem restart notification of a transport
+ * @if_ptr: The transport to restart
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+static int ssr(struct glink_transport_if *if_ptr)
+{
+ struct edge_info *einfo;
+ struct deferred_cmd *cmd;
+
+ einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+ BUG_ON(einfo->remote_proc_id == SMEM_RPM);
+
+ einfo->in_ssr = true;
+ wake_up_all(&einfo->tx_blocked_queue);
+
+ synchronize_srcu(&einfo->use_ref);
+
+ while (!list_empty(&einfo->deferred_cmds)) {
+ cmd = list_first_entry(&einfo->deferred_cmds,
+ struct deferred_cmd, list_node);
+ list_del(&cmd->list_node);
+ kfree(cmd->data);
+ kfree(cmd);
+ }
+
+ einfo->tx_resume_needed = false;
+ einfo->tx_blocked_signal_sent = false;
+ einfo->rx_fifo = NULL;
+ einfo->rx_fifo_size = 0;
+ einfo->tx_ch_desc->write_index = 0;
+ einfo->rx_ch_desc->read_index = 0;
+ einfo->xprt_if.glink_core_if_ptr->link_down(&einfo->xprt_if);
+
+ return 0;
+}
+
+/**
+ * int wait_link_down() - Check status of read/write indices
+ * @if_ptr: The transport to check
+ *
+ * Return: 1 if indices are all zero, 0 otherwise
+ */
+int wait_link_down(struct glink_transport_if *if_ptr)
+{
+ struct edge_info *einfo;
+
+ einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+ if (einfo->tx_ch_desc->write_index == 0 &&
+ einfo->tx_ch_desc->read_index == 0 &&
+ einfo->rx_ch_desc->write_index == 0 &&
+ einfo->rx_ch_desc->read_index == 0)
+ return 1;
+ else
+ return 0;
+}
+
+/**
+ * allocate_rx_intent() - allocate/reserve space for RX Intent
+ * @if_ptr: The transport the intent is associated with.
+ * @size: size of intent.
+ * @intent: Pointer to the intent structure.
+ *
+ * Assign "data" with the buffer created, since the transport creates
+ * a linear buffer and "iovec" with the "intent" itself, so that
+ * the data can be passed to a client that receives only vector buffer.
+ * Note that returning NULL for the pointer is valid (it means that space has
+ * been reserved, but the actual pointer will be provided later).
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+static int allocate_rx_intent(struct glink_transport_if *if_ptr, size_t size,
+ struct glink_core_rx_intent *intent)
+{
+ void *t;
+
+ t = kmalloc(size, GFP_KERNEL);
+ if (!t)
+ return -ENOMEM;
+
+ intent->data = t;
+ intent->iovec = (void *)intent;
+ intent->vprovider = rx_linear_vbuf_provider;
+ intent->pprovider = NULL;
+ return 0;
+}
+
+/**
+ * deallocate_rx_intent() - Deallocate space created for RX Intent
+ * @if_ptr: The transport the intent is associated with.
+ * @intent: Pointer to the intent structure.
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+static int deallocate_rx_intent(struct glink_transport_if *if_ptr,
+ struct glink_core_rx_intent *intent)
+{
+ if (!intent || !intent->data)
+ return -EINVAL;
+
+ kfree(intent->data);
+ intent->data = NULL;
+ intent->iovec = NULL;
+ intent->vprovider = NULL;
+ return 0;
+}
+
+/**
+ * tx_cmd_local_rx_intent() - convert an rx intent cmd to wire format and
+ * transmit
+ * @if_ptr: The transport to transmit on.
+ * @lcid: The local channel id to encode.
+ * @size: The intent size to encode.
+ * @liid: The local intent id to encode.
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+static int tx_cmd_local_rx_intent(struct glink_transport_if *if_ptr,
+ uint32_t lcid, size_t size, uint32_t liid)
+{
+ struct command {
+ uint16_t id;
+ uint16_t lcid;
+ uint32_t count;
+ uint32_t size;
+ uint32_t liid;
+ };
+ struct command cmd;
+ struct edge_info *einfo;
+ int rcu_id;
+
+ if (size > UINT_MAX) {
+ pr_err("%s: size %zu is too large to encode\n", __func__, size);
+ return -EMSGSIZE;
+ }
+
+ einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+ if (einfo->intentless)
+ return -EOPNOTSUPP;
+
+ rcu_id = srcu_read_lock(&einfo->use_ref);
+ if (einfo->in_ssr) {
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return -EFAULT;
+ }
+
+ cmd.id = RX_INTENT_CMD;
+ cmd.lcid = lcid;
+ cmd.count = 1;
+ cmd.size = size;
+ cmd.liid = liid;
+
+ fifo_tx(einfo, &cmd, sizeof(cmd));
+
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return 0;
+}
+
+/**
+ * tx_cmd_local_rx_done() - convert an rx done cmd to wire format and transmit
+ * @if_ptr: The transport to transmit on.
+ * @lcid: The local channel id to encode.
+ * @liid: The local intent id to encode.
+ * @reuse: Reuse the consumed intent.
+ */
+static void tx_cmd_local_rx_done(struct glink_transport_if *if_ptr,
+ uint32_t lcid, uint32_t liid, bool reuse)
+{
+ struct command {
+ uint16_t id;
+ uint16_t lcid;
+ uint32_t liid;
+ };
+ struct command cmd;
+ struct edge_info *einfo;
+ int rcu_id;
+
+ einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+ if (einfo->intentless)
+ return;
+
+ rcu_id = srcu_read_lock(&einfo->use_ref);
+ if (einfo->in_ssr) {
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return;
+ }
+
+ cmd.id = reuse ? RX_DONE_W_REUSE_CMD : RX_DONE_CMD;
+ cmd.lcid = lcid;
+ cmd.liid = liid;
+
+ fifo_tx(einfo, &cmd, sizeof(cmd));
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+}
+
+/**
+ * tx_cmd_rx_intent_req() - convert an rx intent request cmd to wire format and
+ * transmit
+ * @if_ptr: The transport to transmit on.
+ * @lcid: The local channel id to encode.
+ * @size: The requested intent size to encode.
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+static int tx_cmd_rx_intent_req(struct glink_transport_if *if_ptr,
+ uint32_t lcid, size_t size)
+{
+ struct command {
+ uint16_t id;
+ uint16_t lcid;
+ uint32_t size;
+ };
+ struct command cmd;
+ struct edge_info *einfo;
+ int rcu_id;
+
+ if (size > UINT_MAX) {
+ pr_err("%s: size %zu is too large to encode\n", __func__, size);
+ return -EMSGSIZE;
+ }
+
+ einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+ if (einfo->intentless)
+ return -EOPNOTSUPP;
+
+ rcu_id = srcu_read_lock(&einfo->use_ref);
+ if (einfo->in_ssr) {
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return -EFAULT;
+ }
+
+ cmd.id = RX_INTENT_REQ_CMD,
+ cmd.lcid = lcid;
+ cmd.size = size;
+
+ fifo_tx(einfo, &cmd, sizeof(cmd));
+
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return 0;
+}
+
+/**
+ * tx_cmd_rx_intent_req_ack() - convert an rx intent request ack cmd to wire
+ * format and transmit
+ * @if_ptr: The transport to transmit on.
+ * @lcid: The local channel id to encode.
+ * @granted: The request response to encode.
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+static int tx_cmd_remote_rx_intent_req_ack(struct glink_transport_if *if_ptr,
+ uint32_t lcid, bool granted)
+{
+ struct command {
+ uint16_t id;
+ uint16_t lcid;
+ uint32_t response;
+ };
+ struct command cmd;
+ struct edge_info *einfo;
+ int rcu_id;
+
+ einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+ if (einfo->intentless)
+ return -EOPNOTSUPP;
+
+ rcu_id = srcu_read_lock(&einfo->use_ref);
+ if (einfo->in_ssr) {
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return -EFAULT;
+ }
+
+ cmd.id = RX_INTENT_REQ_ACK_CMD,
+ cmd.lcid = lcid;
+ if (granted)
+ cmd.response = 1;
+ else
+ cmd.response = 0;
+
+ fifo_tx(einfo, &cmd, sizeof(cmd));
+
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return 0;
+}
+
+/**
+ * tx_cmd_set_sigs() - convert a signals ack cmd to wire format and transmit
+ * @if_ptr: The transport to transmit on.
+ * @lcid: The local channel id to encode.
+ * @sigs: The signals to encode.
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+static int tx_cmd_set_sigs(struct glink_transport_if *if_ptr, uint32_t lcid,
+ uint32_t sigs)
+{
+ struct command {
+ uint16_t id;
+ uint16_t lcid;
+ uint32_t sigs;
+ };
+ struct command cmd;
+ struct edge_info *einfo;
+ int rcu_id;
+
+ einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+ rcu_id = srcu_read_lock(&einfo->use_ref);
+ if (einfo->in_ssr) {
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return -EFAULT;
+ }
+
+ cmd.id = SIGNALS_CMD,
+ cmd.lcid = lcid;
+ cmd.sigs = sigs;
+
+ fifo_tx(einfo, &cmd, sizeof(cmd));
+
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return 0;
+}
+
+/**
+ * poll() - poll for data on a channel
+ * @if_ptr: The transport the channel exists on.
+ * @lcid: The local channel id.
+ *
+ * Return: 0 if no data available, 1 if data available.
+ */
+static int poll(struct glink_transport_if *if_ptr, uint32_t lcid)
+{
+ struct edge_info *einfo;
+ int rcu_id;
+
+ einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+ rcu_id = srcu_read_lock(&einfo->use_ref);
+ if (einfo->in_ssr) {
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return -EFAULT;
+ }
+
+ if (fifo_read_avail(einfo)) {
+ __rx_worker(einfo, true);
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return 1;
+ }
+
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return 0;
+}
+
+/**
+ * mask_rx_irq() - mask the receive irq for a channel
+ * @if_ptr: The transport the channel exists on.
+ * @lcid: The local channel id for the channel.
+ * @mask: True to mask the irq, false to unmask.
+ * @pstruct: Platform defined structure for handling the masking.
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+static int mask_rx_irq(struct glink_transport_if *if_ptr, uint32_t lcid,
+ bool mask, void *pstruct)
+{
+ struct edge_info *einfo;
+ struct irq_chip *irq_chip;
+ struct irq_data *irq_data;
+ int rcu_id;
+
+ einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+ rcu_id = srcu_read_lock(&einfo->use_ref);
+ if (einfo->in_ssr) {
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return -EFAULT;
+ }
+
+ irq_chip = irq_get_chip(einfo->irq_line);
+ if (!irq_chip) {
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return -ENODEV;
+ }
+
+ irq_data = irq_get_irq_data(einfo->irq_line);
+ if (!irq_data) {
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return -ENODEV;
+ }
+
+ if (mask) {
+ irq_chip->irq_mask(irq_data);
+ einfo->irq_disabled = true;
+ if (pstruct)
+ irq_set_affinity(einfo->irq_line, pstruct);
+ } else {
+ irq_chip->irq_unmask(irq_data);
+ einfo->irq_disabled = false;
+ }
+
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return 0;
+}
+
+/**
+ * tx_data() - convert a data/tracer_pkt to wire format and transmit
+ * @if_ptr: The transport to transmit on.
+ * @cmd_id: The command ID to transmit.
+ * @lcid: The local channel id to encode.
+ * @pctx: The data to encode.
+ *
+ * Return: Number of bytes written or standard Linux error code.
+ */
+static int tx_data(struct glink_transport_if *if_ptr, uint16_t cmd_id,
+ uint32_t lcid, struct glink_core_tx_pkt *pctx)
+{
+ struct command {
+ uint16_t id;
+ uint16_t lcid;
+ uint32_t riid;
+ uint32_t size;
+ uint32_t size_left;
+ };
+ struct command cmd;
+ struct edge_info *einfo;
+ uint32_t size;
+ uint32_t zeros_size;
+ const void *data_start;
+ char zeros[FIFO_ALIGNMENT] = { 0 };
+ unsigned long flags;
+ size_t tx_size = 0;
+ int rcu_id;
+
+ if (pctx->size < pctx->size_remaining) {
+ GLINK_ERR("%s: size remaining exceeds size. Resetting.\n",
+ __func__);
+ pctx->size_remaining = pctx->size;
+ }
+ if (!pctx->size_remaining)
+ return 0;
+
+ einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+ rcu_id = srcu_read_lock(&einfo->use_ref);
+ if (einfo->in_ssr) {
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return -EFAULT;
+ }
+
+ if (einfo->intentless &&
+ (pctx->size_remaining != pctx->size || cmd_id == TRACER_PKT_CMD)) {
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return -EINVAL;
+ }
+
+ if (cmd_id == TX_DATA_CMD) {
+ if (pctx->size_remaining == pctx->size)
+ cmd.id = TX_DATA_CMD;
+ else
+ cmd.id = TX_DATA_CONT_CMD;
+ } else {
+ if (pctx->size_remaining == pctx->size)
+ cmd.id = TRACER_PKT_CMD;
+ else
+ cmd.id = TRACER_PKT_CONT_CMD;
+ }
+ cmd.lcid = lcid;
+ cmd.riid = pctx->riid;
+ data_start = get_tx_vaddr(pctx, pctx->size - pctx->size_remaining,
+ &tx_size);
+ if (!data_start) {
+ GLINK_ERR("%s: invalid data_start\n", __func__);
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&einfo->write_lock, flags);
+ size = fifo_write_avail(einfo);
+
+ /* Intentless clients expect a complete commit or instant failure */
+ if (einfo->intentless && size < sizeof(cmd) + pctx->size) {
+ spin_unlock_irqrestore(&einfo->write_lock, flags);
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return -ENOSPC;
+ }
+
+ /* Need enough space to write the command and some data */
+ if (size <= sizeof(cmd)) {
+ einfo->tx_resume_needed = true;
+ spin_unlock_irqrestore(&einfo->write_lock, flags);
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return -EAGAIN;
+ }
+ size -= sizeof(cmd);
+ if (size > tx_size)
+ size = tx_size;
+
+ cmd.size = size;
+ pctx->size_remaining -= size;
+ cmd.size_left = pctx->size_remaining;
+ zeros_size = ALIGN(size, FIFO_ALIGNMENT) - cmd.size;
+ if (cmd.id == TRACER_PKT_CMD)
+ tracer_pkt_log_event((void *)(pctx->data), GLINK_XPRT_TX);
+
+ fifo_write_complex(einfo, &cmd, sizeof(cmd), data_start, size, zeros,
+ zeros_size);
+ GLINK_DBG("%s %s: lcid[%u] riid[%u] cmd[%d], size[%d], size_left[%d]\n",
+ "<SMEM>", __func__, cmd.lcid, cmd.riid, cmd.id, cmd.size,
+ cmd.size_left);
+ spin_unlock_irqrestore(&einfo->write_lock, flags);
+
+ /* Fake tx_done for intentless since its not supported over the wire */
+ if (einfo->intentless) {
+ spin_lock_irqsave(&einfo->rx_lock, flags);
+ cmd.id = RX_DONE_CMD;
+ cmd.lcid = pctx->rcid;
+ queue_cmd(einfo, &cmd, NULL);
+ spin_unlock_irqrestore(&einfo->rx_lock, flags);
+ }
+
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return cmd.size;
+}
+
+/**
+ * tx() - convert a data transmit cmd to wire format and transmit
+ * @if_ptr: The transport to transmit on.
+ * @lcid: The local channel id to encode.
+ * @pctx: The data to encode.
+ *
+ * Return: Number of bytes written or standard Linux error code.
+ */
+static int tx(struct glink_transport_if *if_ptr, uint32_t lcid,
+ struct glink_core_tx_pkt *pctx)
+{
+ return tx_data(if_ptr, TX_DATA_CMD, lcid, pctx);
+}
+
+/**
+ * tx_cmd_tracer_pkt() - convert a tracer packet cmd to wire format and transmit
+ * @if_ptr: The transport to transmit on.
+ * @lcid: The local channel id to encode.
+ * @pctx: The data to encode.
+ *
+ * Return: Number of bytes written or standard Linux error code.
+ */
+static int tx_cmd_tracer_pkt(struct glink_transport_if *if_ptr, uint32_t lcid,
+ struct glink_core_tx_pkt *pctx)
+{
+ return tx_data(if_ptr, TRACER_PKT_CMD, lcid, pctx);
+}
+
+/**
+ * get_power_vote_ramp_time() - Get the ramp time required for the power
+ * votes to be applied
+ * @if_ptr: The transport interface on which power voting is requested.
+ * @state: The power state for which ramp time is required.
+ *
+ * Return: The ramp time specific to the power state, standard error otherwise.
+ */
+static unsigned long get_power_vote_ramp_time(
+ struct glink_transport_if *if_ptr,
+ uint32_t state)
+{
+ struct edge_info *einfo;
+
+ einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+ if (state >= einfo->num_pw_states || !(einfo->ramp_time_us))
+ return (unsigned long)ERR_PTR(-EINVAL);
+
+ return einfo->ramp_time_us[state];
+}
+
+/**
+ * power_vote() - Update the power votes to meet qos requirement
+ * @if_ptr: The transport interface on which power voting is requested.
+ * @state: The power state for which the voting should be done.
+ *
+ * Return: 0 on Success, standard error otherwise.
+ */
+static int power_vote(struct glink_transport_if *if_ptr, uint32_t state)
+{
+ return 0;
+}
+
+/**
+ * power_unvote() - Remove the all the power votes
+ * @if_ptr: The transport interface on which power voting is requested.
+ *
+ * Return: 0 on Success, standard error otherwise.
+ */
+static int power_unvote(struct glink_transport_if *if_ptr)
+{
+ return 0;
+}
+
+/**
+ * negotiate_features_v1() - determine what features of a version can be used
+ * @if_ptr: The transport for which features are negotiated for.
+ * @version: The version negotiated.
+ * @features: The set of requested features.
+ *
+ * Return: What set of the requested features can be supported.
+ */
+static uint32_t negotiate_features_v1(struct glink_transport_if *if_ptr,
+ const struct glink_core_version *version,
+ uint32_t features)
+{
+ return features & version->features;
+}
+
+/**
+ * init_xprt_if() - initialize the xprt_if for an edge
+ * @einfo: The edge to initialize.
+ */
+static void init_xprt_if(struct edge_info *einfo)
+{
+ einfo->xprt_if.tx_cmd_version = tx_cmd_version;
+ einfo->xprt_if.tx_cmd_version_ack = tx_cmd_version_ack;
+ einfo->xprt_if.set_version = set_version;
+ einfo->xprt_if.tx_cmd_ch_open = tx_cmd_ch_open;
+ einfo->xprt_if.tx_cmd_ch_close = tx_cmd_ch_close;
+ einfo->xprt_if.tx_cmd_ch_remote_open_ack = tx_cmd_ch_remote_open_ack;
+ einfo->xprt_if.tx_cmd_ch_remote_close_ack = tx_cmd_ch_remote_close_ack;
+ einfo->xprt_if.ssr = ssr;
+ einfo->xprt_if.allocate_rx_intent = allocate_rx_intent;
+ einfo->xprt_if.deallocate_rx_intent = deallocate_rx_intent;
+ einfo->xprt_if.tx_cmd_local_rx_intent = tx_cmd_local_rx_intent;
+ einfo->xprt_if.tx_cmd_local_rx_done = tx_cmd_local_rx_done;
+ einfo->xprt_if.tx = tx;
+ einfo->xprt_if.tx_cmd_rx_intent_req = tx_cmd_rx_intent_req;
+ einfo->xprt_if.tx_cmd_remote_rx_intent_req_ack =
+ tx_cmd_remote_rx_intent_req_ack;
+ einfo->xprt_if.tx_cmd_set_sigs = tx_cmd_set_sigs;
+ einfo->xprt_if.poll = poll;
+ einfo->xprt_if.mask_rx_irq = mask_rx_irq;
+ einfo->xprt_if.wait_link_down = wait_link_down;
+ einfo->xprt_if.tx_cmd_tracer_pkt = tx_cmd_tracer_pkt;
+ einfo->xprt_if.get_power_vote_ramp_time = get_power_vote_ramp_time;
+ einfo->xprt_if.power_vote = power_vote;
+ einfo->xprt_if.power_unvote = power_unvote;
+}
+
+/**
+ * init_xprt_cfg() - initialize the xprt_cfg for an edge
+ * @einfo: The edge to initialize.
+ * @name: The name of the remote side this edge communicates to.
+ */
+static void init_xprt_cfg(struct edge_info *einfo, const char *name)
+{
+ einfo->xprt_cfg.name = XPRT_NAME;
+ einfo->xprt_cfg.edge = name;
+ einfo->xprt_cfg.versions = versions;
+ einfo->xprt_cfg.versions_entries = ARRAY_SIZE(versions);
+ einfo->xprt_cfg.max_cid = SZ_64K;
+ einfo->xprt_cfg.max_iid = SZ_2G;
+}
+
+/**
+ * parse_qos_dt_params() - Parse the power states from DT
+ * @dev: Reference to the platform device for a specific edge.
+ * @einfo: Edge information for the edge probe function is called.
+ *
+ * Return: 0 on success, standard error code otherwise.
+ */
+static int parse_qos_dt_params(struct device_node *node,
+ struct edge_info *einfo)
+{
+ int rc;
+ int i;
+ char *key;
+ uint32_t *arr32;
+ uint32_t num_states;
+
+ key = "qcom,ramp-time";
+ if (!of_find_property(node, key, &num_states))
+ return -ENODEV;
+
+ num_states /= sizeof(uint32_t);
+
+ einfo->num_pw_states = num_states;
+
+ arr32 = kmalloc_array(num_states, sizeof(uint32_t), GFP_KERNEL);
+ if (!arr32)
+ return -ENOMEM;
+
+ einfo->ramp_time_us = kmalloc_array(num_states, sizeof(unsigned long),
+ GFP_KERNEL);
+ if (!einfo->ramp_time_us) {
+ rc = -ENOMEM;
+ goto mem_alloc_fail;
+ }
+
+ rc = of_property_read_u32_array(node, key, arr32, num_states);
+ if (rc) {
+ rc = -ENODEV;
+ goto invalid_key;
+ }
+ for (i = 0; i < num_states; i++)
+ einfo->ramp_time_us[i] = arr32[i];
+
+ rc = 0;
+ return rc;
+
+invalid_key:
+ kfree(einfo->ramp_time_us);
+mem_alloc_fail:
+ kfree(arr32);
+ return rc;
+}
+
+/**
+ * subsys_name_to_id() - translate a subsystem name to a processor id
+ * @name: The subsystem name to look up.
+ *
+ * Return: The processor id corresponding to @name or standard Linux error code.
+ */
+static int subsys_name_to_id(const char *name)
+{
+ if (!name)
+ return -ENODEV;
+
+ if (!strcmp(name, "apss"))
+ return SMEM_APPS;
+ if (!strcmp(name, "dsps"))
+ return SMEM_DSPS;
+ if (!strcmp(name, "lpass"))
+ return SMEM_Q6;
+ if (!strcmp(name, "mpss"))
+ return SMEM_MODEM;
+ if (!strcmp(name, "rpm"))
+ return SMEM_RPM;
+ if (!strcmp(name, "wcnss"))
+ return SMEM_WCNSS;
+ if (!strcmp(name, "spss"))
+ return SMEM_SPSS;
+ return -ENODEV;
+}
+
+static int glink_smem_native_probe(struct platform_device *pdev)
+{
+ struct device_node *node;
+ struct device_node *phandle_node;
+ struct edge_info *einfo;
+ int rc;
+ char *key;
+ const char *subsys_name;
+ uint32_t irq_line;
+ uint32_t irq_mask;
+ struct resource *r;
+
+ node = pdev->dev.of_node;
+
+ einfo = kzalloc(sizeof(*einfo), GFP_KERNEL);
+ if (!einfo) {
+ pr_err("%s: edge_info allocation failed\n", __func__);
+ rc = -ENOMEM;
+ goto edge_info_alloc_fail;
+ }
+
+ key = "label";
+ subsys_name = of_get_property(node, key, NULL);
+ if (!subsys_name) {
+ pr_err("%s: missing key %s\n", __func__, key);
+ rc = -ENODEV;
+ goto missing_key;
+ }
+
+ key = "interrupts";
+ irq_line = irq_of_parse_and_map(node, 0);
+ if (!irq_line) {
+ pr_err("%s: missing key %s\n", __func__, key);
+ rc = -ENODEV;
+ goto missing_key;
+ }
+
+ key = "qcom,irq-mask";
+ rc = of_property_read_u32(node, key, &irq_mask);
+ if (rc) {
+ pr_err("%s: missing key %s\n", __func__, key);
+ rc = -ENODEV;
+ goto missing_key;
+ }
+
+ key = "irq-reg-base";
+ r = platform_get_resource_byname(pdev, IORESOURCE_MEM, key);
+ if (!r) {
+ pr_err("%s: missing key %s\n", __func__, key);
+ rc = -ENODEV;
+ goto missing_key;
+ }
+
+ if (subsys_name_to_id(subsys_name) == -ENODEV) {
+ pr_err("%s: unknown subsystem: %s\n", __func__, subsys_name);
+ rc = -ENODEV;
+ goto invalid_key;
+ }
+ einfo->remote_proc_id = subsys_name_to_id(subsys_name);
+
+ init_xprt_cfg(einfo, subsys_name);
+ init_xprt_if(einfo);
+ spin_lock_init(&einfo->write_lock);
+ init_waitqueue_head(&einfo->tx_blocked_queue);
+ init_kthread_work(&einfo->kwork, rx_worker);
+ init_kthread_worker(&einfo->kworker);
+ einfo->read_from_fifo = read_from_fifo;
+ einfo->write_to_fifo = write_to_fifo;
+ init_srcu_struct(&einfo->use_ref);
+ spin_lock_init(&einfo->rx_lock);
+ INIT_LIST_HEAD(&einfo->deferred_cmds);
+
+ mutex_lock(&probe_lock);
+ if (edge_infos[einfo->remote_proc_id]) {
+ pr_err("%s: duplicate subsys %s is not valid\n", __func__,
+ subsys_name);
+ rc = -ENODEV;
+ mutex_unlock(&probe_lock);
+ goto invalid_key;
+ }
+ edge_infos[einfo->remote_proc_id] = einfo;
+ mutex_unlock(&probe_lock);
+
+ einfo->out_irq_mask = irq_mask;
+ einfo->out_irq_reg = ioremap_nocache(r->start, resource_size(r));
+ if (!einfo->out_irq_reg) {
+ pr_err("%s: unable to map irq reg\n", __func__);
+ rc = -ENOMEM;
+ goto ioremap_fail;
+ }
+
+ einfo->task = kthread_run(kthread_worker_fn, &einfo->kworker,
+ "smem_native_%s", subsys_name);
+ if (IS_ERR(einfo->task)) {
+ rc = PTR_ERR(einfo->task);
+ pr_err("%s: kthread_run failed %d\n", __func__, rc);
+ goto kthread_fail;
+ }
+
+ einfo->tx_ch_desc = smem_alloc(SMEM_GLINK_NATIVE_XPRT_DESCRIPTOR,
+ SMEM_CH_DESC_SIZE,
+ einfo->remote_proc_id,
+ 0);
+ if (PTR_ERR(einfo->tx_ch_desc) == -EPROBE_DEFER) {
+ rc = -EPROBE_DEFER;
+ goto smem_alloc_fail;
+ }
+ if (!einfo->tx_ch_desc) {
+ pr_err("%s: smem alloc of ch descriptor failed\n", __func__);
+ rc = -ENOMEM;
+ goto smem_alloc_fail;
+ }
+ einfo->rx_ch_desc = einfo->tx_ch_desc + 1;
+
+ einfo->tx_fifo_size = SZ_16K;
+ einfo->tx_fifo = smem_alloc(SMEM_GLINK_NATIVE_XPRT_FIFO_0,
+ einfo->tx_fifo_size,
+ einfo->remote_proc_id,
+ SMEM_ITEM_CACHED_FLAG);
+ if (!einfo->tx_fifo) {
+ pr_err("%s: smem alloc of tx fifo failed\n", __func__);
+ rc = -ENOMEM;
+ goto smem_alloc_fail;
+ }
+
+ key = "qcom,qos-config";
+ phandle_node = of_parse_phandle(node, key, 0);
+ if (phandle_node && !(of_get_glink_core_qos_cfg(phandle_node,
+ &einfo->xprt_cfg)))
+ parse_qos_dt_params(node, einfo);
+
+ rc = glink_core_register_transport(&einfo->xprt_if, &einfo->xprt_cfg);
+ if (rc == -EPROBE_DEFER)
+ goto reg_xprt_fail;
+ if (rc) {
+ pr_err("%s: glink core register transport failed: %d\n",
+ __func__, rc);
+ goto reg_xprt_fail;
+ }
+
+ einfo->irq_line = irq_line;
+ rc = request_irq(irq_line, irq_handler,
+ IRQF_TRIGGER_RISING | IRQF_NO_SUSPEND | IRQF_SHARED,
+ node->name, einfo);
+ if (rc < 0) {
+ pr_err("%s: request_irq on %d failed: %d\n", __func__, irq_line,
+ rc);
+ goto request_irq_fail;
+ }
+ rc = enable_irq_wake(irq_line);
+ if (rc < 0)
+ pr_err("%s: enable_irq_wake() failed on %d\n", __func__,
+ irq_line);
+
+ register_debugfs_info(einfo);
+ /* fake an interrupt on this edge to see if the remote side is up */
+ irq_handler(0, einfo);
+ return 0;
+
+request_irq_fail:
+ glink_core_unregister_transport(&einfo->xprt_if);
+reg_xprt_fail:
+smem_alloc_fail:
+ flush_kthread_worker(&einfo->kworker);
+ kthread_stop(einfo->task);
+ einfo->task = NULL;
+kthread_fail:
+ iounmap(einfo->out_irq_reg);
+ioremap_fail:
+ mutex_lock(&probe_lock);
+ edge_infos[einfo->remote_proc_id] = NULL;
+ mutex_unlock(&probe_lock);
+invalid_key:
+missing_key:
+ kfree(einfo);
+edge_info_alloc_fail:
+ return rc;
+}
+
+static int glink_rpm_native_probe(struct platform_device *pdev)
+{
+ struct device_node *node;
+ struct edge_info *einfo;
+ int rc;
+ char *key;
+ const char *subsys_name;
+ uint32_t irq_line;
+ uint32_t irq_mask;
+ struct resource *irq_r;
+ struct resource *msgram_r;
+ void __iomem *msgram;
+ char toc[RPM_TOC_SIZE];
+ uint32_t *tocp;
+ uint32_t num_toc_entries;
+
+ node = pdev->dev.of_node;
+
+ einfo = kzalloc(sizeof(*einfo), GFP_KERNEL);
+ if (!einfo) {
+ pr_err("%s: edge_info allocation failed\n", __func__);
+ rc = -ENOMEM;
+ goto edge_info_alloc_fail;
+ }
+
+ subsys_name = "rpm";
+
+ key = "interrupts";
+ irq_line = irq_of_parse_and_map(node, 0);
+ if (!irq_line) {
+ pr_err("%s: missing key %s\n", __func__, key);
+ rc = -ENODEV;
+ goto missing_key;
+ }
+
+ key = "qcom,irq-mask";
+ rc = of_property_read_u32(node, key, &irq_mask);
+ if (rc) {
+ pr_err("%s: missing key %s\n", __func__, key);
+ rc = -ENODEV;
+ goto missing_key;
+ }
+
+ key = "irq-reg-base";
+ irq_r = platform_get_resource_byname(pdev, IORESOURCE_MEM, key);
+ if (!irq_r) {
+ pr_err("%s: missing key %s\n", __func__, key);
+ rc = -ENODEV;
+ goto missing_key;
+ }
+
+ key = "msgram";
+ msgram_r = platform_get_resource_byname(pdev, IORESOURCE_MEM, key);
+ if (!msgram_r) {
+ pr_err("%s: missing key %s\n", __func__, key);
+ rc = -ENODEV;
+ goto missing_key;
+ }
+
+ if (subsys_name_to_id(subsys_name) == -ENODEV) {
+ pr_err("%s: unknown subsystem: %s\n", __func__, subsys_name);
+ rc = -ENODEV;
+ goto invalid_key;
+ }
+ einfo->remote_proc_id = subsys_name_to_id(subsys_name);
+
+ init_xprt_cfg(einfo, subsys_name);
+ init_xprt_if(einfo);
+ spin_lock_init(&einfo->write_lock);
+ init_waitqueue_head(&einfo->tx_blocked_queue);
+ init_kthread_work(&einfo->kwork, rx_worker);
+ init_kthread_worker(&einfo->kworker);
+ einfo->intentless = true;
+ einfo->read_from_fifo = memcpy32_fromio;
+ einfo->write_to_fifo = memcpy32_toio;
+ init_srcu_struct(&einfo->use_ref);
+ spin_lock_init(&einfo->rx_lock);
+ INIT_LIST_HEAD(&einfo->deferred_cmds);
+
+ mutex_lock(&probe_lock);
+ if (edge_infos[einfo->remote_proc_id]) {
+ pr_err("%s: duplicate subsys %s is not valid\n", __func__,
+ subsys_name);
+ rc = -ENODEV;
+ mutex_unlock(&probe_lock);
+ goto invalid_key;
+ }
+ edge_infos[einfo->remote_proc_id] = einfo;
+ mutex_unlock(&probe_lock);
+
+ einfo->out_irq_mask = irq_mask;
+ einfo->out_irq_reg = ioremap_nocache(irq_r->start,
+ resource_size(irq_r));
+ if (!einfo->out_irq_reg) {
+ pr_err("%s: unable to map irq reg\n", __func__);
+ rc = -ENOMEM;
+ goto irq_ioremap_fail;
+ }
+
+ msgram = ioremap_nocache(msgram_r->start, resource_size(msgram_r));
+ if (!msgram) {
+ pr_err("%s: unable to map msgram\n", __func__);
+ rc = -ENOMEM;
+ goto msgram_ioremap_fail;
+ }
+
+ einfo->task = kthread_run(kthread_worker_fn, &einfo->kworker,
+ "smem_native_%s", subsys_name);
+ if (IS_ERR(einfo->task)) {
+ rc = PTR_ERR(einfo->task);
+ pr_err("%s: kthread_run failed %d\n", __func__, rc);
+ goto kthread_fail;
+ }
+
+ memcpy32_fromio(toc, msgram + resource_size(msgram_r) - RPM_TOC_SIZE,
+ RPM_TOC_SIZE);
+ tocp = (uint32_t *)toc;
+ if (*tocp != RPM_TOC_ID) {
+ rc = -ENODEV;
+ pr_err("%s: TOC id %d is not valid\n", __func__, *tocp);
+ goto toc_init_fail;
+ }
+ ++tocp;
+ num_toc_entries = *tocp;
+ if (num_toc_entries > RPM_MAX_TOC_ENTRIES) {
+ rc = -ENODEV;
+ pr_err("%s: %d is too many toc entries\n", __func__,
+ num_toc_entries);
+ goto toc_init_fail;
+ }
+ ++tocp;
+
+ for (rc = 0; rc < num_toc_entries; ++rc) {
+ if (*tocp != RPM_TX_FIFO_ID) {
+ tocp += 3;
+ continue;
+ }
+ ++tocp;
+ einfo->tx_ch_desc = msgram + *tocp;
+ einfo->tx_fifo = einfo->tx_ch_desc + 1;
+ if ((uintptr_t)einfo->tx_fifo >
+ (uintptr_t)(msgram + resource_size(msgram_r))) {
+ pr_err("%s: invalid tx fifo address\n", __func__);
+ einfo->tx_fifo = NULL;
+ break;
+ }
+ ++tocp;
+ einfo->tx_fifo_size = *tocp;
+ if (einfo->tx_fifo_size > resource_size(msgram_r) ||
+ (uintptr_t)(einfo->tx_fifo + einfo->tx_fifo_size) >
+ (uintptr_t)(msgram + resource_size(msgram_r))) {
+ pr_err("%s: invalid tx fifo size\n", __func__);
+ einfo->tx_fifo = NULL;
+ break;
+ }
+ break;
+ }
+ if (!einfo->tx_fifo) {
+ rc = -ENODEV;
+ pr_err("%s: tx fifo not found\n", __func__);
+ goto toc_init_fail;
+ }
+
+ tocp = (uint32_t *)toc;
+ tocp += 2;
+ for (rc = 0; rc < num_toc_entries; ++rc) {
+ if (*tocp != RPM_RX_FIFO_ID) {
+ tocp += 3;
+ continue;
+ }
+ ++tocp;
+ einfo->rx_ch_desc = msgram + *tocp;
+ einfo->rx_fifo = einfo->rx_ch_desc + 1;
+ if ((uintptr_t)einfo->rx_fifo >
+ (uintptr_t)(msgram + resource_size(msgram_r))) {
+ pr_err("%s: invalid rx fifo address\n", __func__);
+ einfo->rx_fifo = NULL;
+ break;
+ }
+ ++tocp;
+ einfo->rx_fifo_size = *tocp;
+ if (einfo->rx_fifo_size > resource_size(msgram_r) ||
+ (uintptr_t)(einfo->rx_fifo + einfo->rx_fifo_size) >
+ (uintptr_t)(msgram + resource_size(msgram_r))) {
+ pr_err("%s: invalid rx fifo size\n", __func__);
+ einfo->rx_fifo = NULL;
+ break;
+ }
+ break;
+ }
+ if (!einfo->rx_fifo) {
+ rc = -ENODEV;
+ pr_err("%s: rx fifo not found\n", __func__);
+ goto toc_init_fail;
+ }
+
+ einfo->tx_ch_desc->write_index = 0;
+ einfo->rx_ch_desc->read_index = 0;
+
+ rc = glink_core_register_transport(&einfo->xprt_if, &einfo->xprt_cfg);
+ if (rc == -EPROBE_DEFER)
+ goto reg_xprt_fail;
+ if (rc) {
+ pr_err("%s: glink core register transport failed: %d\n",
+ __func__, rc);
+ goto reg_xprt_fail;
+ }
+
+ einfo->irq_line = irq_line;
+ rc = request_irq(irq_line, irq_handler,
+ IRQF_TRIGGER_RISING | IRQF_NO_SUSPEND | IRQF_SHARED,
+ node->name, einfo);
+ if (rc < 0) {
+ pr_err("%s: request_irq on %d failed: %d\n", __func__, irq_line,
+ rc);
+ goto request_irq_fail;
+ }
+ rc = enable_irq_wake(irq_line);
+ if (rc < 0)
+ pr_err("%s: enable_irq_wake() failed on %d\n", __func__,
+ irq_line);
+
+ register_debugfs_info(einfo);
+ einfo->xprt_if.glink_core_if_ptr->link_up(&einfo->xprt_if);
+ return 0;
+
+request_irq_fail:
+ glink_core_unregister_transport(&einfo->xprt_if);
+reg_xprt_fail:
+toc_init_fail:
+ flush_kthread_worker(&einfo->kworker);
+ kthread_stop(einfo->task);
+ einfo->task = NULL;
+kthread_fail:
+ iounmap(msgram);
+msgram_ioremap_fail:
+ iounmap(einfo->out_irq_reg);
+irq_ioremap_fail:
+ mutex_lock(&probe_lock);
+ edge_infos[einfo->remote_proc_id] = NULL;
+ mutex_unlock(&probe_lock);
+invalid_key:
+missing_key:
+ kfree(einfo);
+edge_info_alloc_fail:
+ return rc;
+}
+
+static int glink_mailbox_probe(struct platform_device *pdev)
+{
+ struct device_node *node;
+ struct edge_info *einfo;
+ int rc;
+ char *key;
+ const char *subsys_name;
+ uint32_t irq_line;
+ uint32_t irq_mask;
+ struct resource *irq_r;
+ struct resource *mbox_loc_r;
+ struct resource *mbox_size_r;
+ struct resource *rx_reset_r;
+ void *mbox_loc;
+ void *mbox_size;
+ struct mailbox_config_info *mbox_cfg;
+ uint32_t mbox_cfg_size;
+ phys_addr_t cfg_p_addr;
+
+ node = pdev->dev.of_node;
+
+ einfo = kzalloc(sizeof(*einfo), GFP_KERNEL);
+ if (!einfo) {
+ rc = -ENOMEM;
+ goto edge_info_alloc_fail;
+ }
+
+ key = "label";
+ subsys_name = of_get_property(node, key, NULL);
+ if (!subsys_name) {
+ pr_err("%s: missing key %s\n", __func__, key);
+ rc = -ENODEV;
+ goto missing_key;
+ }
+
+ key = "interrupts";
+ irq_line = irq_of_parse_and_map(node, 0);
+ if (!irq_line) {
+ pr_err("%s: missing key %s\n", __func__, key);
+ rc = -ENODEV;
+ goto missing_key;
+ }
+
+ key = "qcom,irq-mask";
+ rc = of_property_read_u32(node, key, &irq_mask);
+ if (rc) {
+ pr_err("%s: missing key %s\n", __func__, key);
+ rc = -ENODEV;
+ goto missing_key;
+ }
+
+ key = "irq-reg-base";
+ irq_r = platform_get_resource_byname(pdev, IORESOURCE_MEM, key);
+ if (!irq_r) {
+ pr_err("%s: missing key %s\n", __func__, key);
+ rc = -ENODEV;
+ goto missing_key;
+ }
+
+ key = "mbox-loc-addr";
+ mbox_loc_r = platform_get_resource_byname(pdev, IORESOURCE_MEM, key);
+ if (!mbox_loc_r) {
+ pr_err("%s: missing key %s\n", __func__, key);
+ rc = -ENODEV;
+ goto missing_key;
+ }
+
+ key = "mbox-loc-size";
+ mbox_size_r = platform_get_resource_byname(pdev, IORESOURCE_MEM, key);
+ if (!mbox_size_r) {
+ pr_err("%s: missing key %s\n", __func__, key);
+ rc = -ENODEV;
+ goto missing_key;
+ }
+
+ key = "irq-rx-reset";
+ rx_reset_r = platform_get_resource_byname(pdev, IORESOURCE_MEM, key);
+ if (!irq_r) {
+ pr_err("%s: missing key %s\n", __func__, key);
+ rc = -ENODEV;
+ goto missing_key;
+ }
+
+ key = "qcom,tx-ring-size";
+ rc = of_property_read_u32(node, key, &einfo->tx_fifo_size);
+ if (rc) {
+ pr_err("%s: missing key %s\n", __func__, key);
+ rc = -ENODEV;
+ goto missing_key;
+ }
+
+ key = "qcom,rx-ring-size";
+ rc = of_property_read_u32(node, key, &einfo->rx_fifo_size);
+ if (rc) {
+ pr_err("%s: missing key %s\n", __func__, key);
+ rc = -ENODEV;
+ goto missing_key;
+ }
+
+ if (subsys_name_to_id(subsys_name) == -ENODEV) {
+ pr_err("%s: unknown subsystem: %s\n", __func__, subsys_name);
+ rc = -ENODEV;
+ goto invalid_key;
+ }
+ einfo->remote_proc_id = subsys_name_to_id(subsys_name);
+
+ init_xprt_cfg(einfo, subsys_name);
+ einfo->xprt_cfg.name = "mailbox";
+ init_xprt_if(einfo);
+ spin_lock_init(&einfo->write_lock);
+ init_waitqueue_head(&einfo->tx_blocked_queue);
+ init_kthread_work(&einfo->kwork, rx_worker);
+ init_kthread_worker(&einfo->kworker);
+ einfo->read_from_fifo = read_from_fifo;
+ einfo->write_to_fifo = write_to_fifo;
+ init_srcu_struct(&einfo->use_ref);
+ spin_lock_init(&einfo->rx_lock);
+ INIT_LIST_HEAD(&einfo->deferred_cmds);
+
+ mutex_lock(&probe_lock);
+ if (edge_infos[einfo->remote_proc_id]) {
+ pr_err("%s: duplicate subsys %s is not valid\n", __func__,
+ subsys_name);
+ rc = -ENODEV;
+ mutex_unlock(&probe_lock);
+ goto invalid_key;
+ }
+ edge_infos[einfo->remote_proc_id] = einfo;
+ mutex_unlock(&probe_lock);
+
+ einfo->out_irq_mask = irq_mask;
+ einfo->out_irq_reg = ioremap_nocache(irq_r->start,
+ resource_size(irq_r));
+ if (!einfo->out_irq_reg) {
+ pr_err("%s: unable to map irq reg\n", __func__);
+ rc = -ENOMEM;
+ goto irq_ioremap_fail;
+ }
+
+ mbox_loc = ioremap_nocache(mbox_loc_r->start,
+ resource_size(mbox_loc_r));
+ if (!mbox_loc) {
+ pr_err("%s: unable to map mailbox location reg\n", __func__);
+ rc = -ENOMEM;
+ goto mbox_loc_ioremap_fail;
+ }
+
+ mbox_size = ioremap_nocache(mbox_size_r->start,
+ resource_size(mbox_size_r));
+ if (!mbox_size) {
+ pr_err("%s: unable to map mailbox size reg\n", __func__);
+ rc = -ENOMEM;
+ goto mbox_size_ioremap_fail;
+ }
+
+ einfo->rx_reset_reg = ioremap_nocache(rx_reset_r->start,
+ resource_size(rx_reset_r));
+ if (!einfo->rx_reset_reg) {
+ pr_err("%s: unable to map rx reset reg\n", __func__);
+ rc = -ENOMEM;
+ goto rx_reset_ioremap_fail;
+ }
+
+ einfo->task = kthread_run(kthread_worker_fn, &einfo->kworker,
+ "smem_native_%s", subsys_name);
+ if (IS_ERR(einfo->task)) {
+ rc = PTR_ERR(einfo->task);
+ pr_err("%s: kthread_run failed %d\n", __func__, rc);
+ goto kthread_fail;
+ }
+
+ mbox_cfg_size = sizeof(*mbox_cfg) + einfo->tx_fifo_size +
+ einfo->rx_fifo_size;
+ mbox_cfg = smem_alloc(SMEM_GLINK_NATIVE_XPRT_DESCRIPTOR,
+ mbox_cfg_size,
+ einfo->remote_proc_id,
+ 0);
+ if (PTR_ERR(mbox_cfg) == -EPROBE_DEFER) {
+ rc = -EPROBE_DEFER;
+ goto smem_alloc_fail;
+ }
+ if (!mbox_cfg) {
+ pr_err("%s: smem alloc of mailbox struct failed\n", __func__);
+ rc = -ENOMEM;
+ goto smem_alloc_fail;
+ }
+ einfo->mailbox = mbox_cfg;
+ einfo->tx_ch_desc = (struct channel_desc *)(&mbox_cfg->tx_read_index);
+ einfo->rx_ch_desc = (struct channel_desc *)(&mbox_cfg->rx_read_index);
+ mbox_cfg->tx_size = einfo->tx_fifo_size;
+ mbox_cfg->rx_size = einfo->rx_fifo_size;
+ einfo->tx_fifo = &mbox_cfg->fifo[0];
+
+ rc = glink_core_register_transport(&einfo->xprt_if, &einfo->xprt_cfg);
+ if (rc == -EPROBE_DEFER)
+ goto reg_xprt_fail;
+ if (rc) {
+ pr_err("%s: glink core register transport failed: %d\n",
+ __func__, rc);
+ goto reg_xprt_fail;
+ }
+
+ einfo->irq_line = irq_line;
+ rc = request_irq(irq_line, irq_handler,
+ IRQF_TRIGGER_HIGH | IRQF_NO_SUSPEND | IRQF_SHARED,
+ node->name, einfo);
+ if (rc < 0) {
+ pr_err("%s: request_irq on %d failed: %d\n", __func__, irq_line,
+ rc);
+ goto request_irq_fail;
+ }
+ rc = enable_irq_wake(irq_line);
+ if (rc < 0)
+ pr_err("%s: enable_irq_wake() failed on %d\n", __func__,
+ irq_line);
+
+ register_debugfs_info(einfo);
+
+ writel_relaxed(mbox_cfg_size, mbox_size);
+ cfg_p_addr = smem_virt_to_phys(mbox_cfg);
+ writel_relaxed(lower_32_bits(cfg_p_addr), mbox_loc);
+ writel_relaxed(upper_32_bits(cfg_p_addr), mbox_loc + 4);
+ send_irq(einfo);
+ iounmap(mbox_size);
+ iounmap(mbox_loc);
+ return 0;
+
+request_irq_fail:
+ glink_core_unregister_transport(&einfo->xprt_if);
+reg_xprt_fail:
+smem_alloc_fail:
+ flush_kthread_worker(&einfo->kworker);
+ kthread_stop(einfo->task);
+ einfo->task = NULL;
+kthread_fail:
+ iounmap(einfo->rx_reset_reg);
+rx_reset_ioremap_fail:
+ iounmap(mbox_size);
+mbox_size_ioremap_fail:
+ iounmap(mbox_loc);
+mbox_loc_ioremap_fail:
+ iounmap(einfo->out_irq_reg);
+irq_ioremap_fail:
+ mutex_lock(&probe_lock);
+ edge_infos[einfo->remote_proc_id] = NULL;
+ mutex_unlock(&probe_lock);
+invalid_key:
+missing_key:
+ kfree(einfo);
+edge_info_alloc_fail:
+ return rc;
+}
+
+#if defined(CONFIG_DEBUG_FS)
+/**
+ * debug_edge() - generates formatted text output displaying current edge state
+ * @s: File to send the output to.
+ */
+static void debug_edge(struct seq_file *s)
+{
+ struct edge_info *einfo;
+ struct glink_dbgfs_data *dfs_d;
+
+ dfs_d = s->private;
+ einfo = dfs_d->priv_data;
+
+/*
+ * formatted, human readable edge state output, ie:
+ * TX/RX fifo information:
+ID|EDGE |TX READ |TX WRITE |TX SIZE |RX READ |RX WRITE |RX SIZE
+-------------------------------------------------------------------------------
+01|mpss |0x00000128|0x00000128|0x00000800|0x00000256|0x00000256|0x00001000
+ *
+ * Interrupt information:
+ * EDGE |TX INT |RX INT
+ * --------------------------------
+ * mpss |0x00000006|0x00000008
+ */
+ seq_puts(s, "TX/RX fifo information:\n");
+ seq_printf(s, "%2s|%-10s|%-10s|%-10s|%-10s|%-10s|%-10s|%-10s\n",
+ "ID",
+ "EDGE",
+ "TX READ",
+ "TX WRITE",
+ "TX SIZE",
+ "RX READ",
+ "RX WRITE",
+ "RX SIZE");
+ seq_puts(s,
+ "-------------------------------------------------------------------------------\n");
+ if (!einfo)
+ return;
+
+ seq_printf(s, "%02i|%-10s|", einfo->remote_proc_id,
+ einfo->xprt_cfg.edge);
+ if (!einfo->rx_fifo)
+ seq_puts(s, "Link Not Up\n");
+ else
+ seq_printf(s, "0x%08X|0x%08X|0x%08X|0x%08X|0x%08X|0x%08X\n",
+ einfo->tx_ch_desc->read_index,
+ einfo->tx_ch_desc->write_index,
+ einfo->tx_fifo_size,
+ einfo->rx_ch_desc->read_index,
+ einfo->rx_ch_desc->write_index,
+ einfo->rx_fifo_size);
+
+ seq_puts(s, "\nInterrupt information:\n");
+ seq_printf(s, "%-10s|%-10s|%-10s\n", "EDGE", "TX INT", "RX INT");
+ seq_puts(s, "--------------------------------\n");
+ seq_printf(s, "%-10s|0x%08X|0x%08X\n", einfo->xprt_cfg.edge,
+ einfo->tx_irq_count,
+ einfo->rx_irq_count);
+}
+
+/**
+ * register_debugfs_info() - initialize debugfs device entries
+ * @einfo: Pointer to specific edge_info for which register is called.
+ */
+static void register_debugfs_info(struct edge_info *einfo)
+{
+ struct glink_dbgfs dfs;
+ char *curr_dir_name;
+ int dir_name_len;
+
+ dir_name_len = strlen(einfo->xprt_cfg.edge) +
+ strlen(einfo->xprt_cfg.name) + 2;
+ curr_dir_name = kmalloc(dir_name_len, GFP_KERNEL);
+ if (!curr_dir_name) {
+ GLINK_ERR("%s: Memory allocation failed\n", __func__);
+ return;
+ }
+
+ snprintf(curr_dir_name, dir_name_len, "%s_%s",
+ einfo->xprt_cfg.edge, einfo->xprt_cfg.name);
+ dfs.curr_name = curr_dir_name;
+ dfs.par_name = "xprt";
+ dfs.b_dir_create = false;
+ glink_debugfs_create("XPRT_INFO", debug_edge,
+ &dfs, einfo, false);
+ kfree(curr_dir_name);
+}
+
+#else
+static void register_debugfs_info(struct edge_info *einfo)
+{
+}
+#endif /* CONFIG_DEBUG_FS */
+
+static struct of_device_id smem_match_table[] = {
+ { .compatible = "qcom,glink-smem-native-xprt" },
+ {},
+};
+
+static struct platform_driver glink_smem_native_driver = {
+ .probe = glink_smem_native_probe,
+ .driver = {
+ .name = "msm_glink_smem_native_xprt",
+ .owner = THIS_MODULE,
+ .of_match_table = smem_match_table,
+ },
+};
+
+static struct of_device_id rpm_match_table[] = {
+ { .compatible = "qcom,glink-rpm-native-xprt" },
+ {},
+};
+
+static struct platform_driver glink_rpm_native_driver = {
+ .probe = glink_rpm_native_probe,
+ .driver = {
+ .name = "msm_glink_rpm_native_xprt",
+ .owner = THIS_MODULE,
+ .of_match_table = rpm_match_table,
+ },
+};
+
+static struct of_device_id mailbox_match_table[] = {
+ { .compatible = "qcom,glink-mailbox-xprt" },
+ {},
+};
+
+static struct platform_driver glink_mailbox_driver = {
+ .probe = glink_mailbox_probe,
+ .driver = {
+ .name = "msm_glink_mailbox_xprt",
+ .owner = THIS_MODULE,
+ .of_match_table = mailbox_match_table,
+ },
+};
+
+static int __init glink_smem_native_xprt_init(void)
+{
+ int rc;
+
+ rc = platform_driver_register(&glink_smem_native_driver);
+ if (rc) {
+ pr_err("%s: glink_smem_native_driver register failed %d\n",
+ __func__, rc);
+ return rc;
+ }
+
+ rc = platform_driver_register(&glink_rpm_native_driver);
+ if (rc) {
+ pr_err("%s: glink_rpm_native_driver register failed %d\n",
+ __func__, rc);
+ return rc;
+ }
+
+ rc = platform_driver_register(&glink_mailbox_driver);
+ if (rc) {
+ pr_err("%s: glink_mailbox_driver register failed %d\n",
+ __func__, rc);
+ return rc;
+ }
+
+ return 0;
+}
+arch_initcall(glink_smem_native_xprt_init);
+
+MODULE_DESCRIPTION("MSM G-Link SMEM Native Transport");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/glink_ssr.c b/drivers/soc/qcom/glink_ssr.c
new file mode 100644
index 000000000000..9f56f4b3135f
--- /dev/null
+++ b/drivers/soc/qcom/glink_ssr.c
@@ -0,0 +1,975 @@
+/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/err.h>
+#include <linux/ipc_logging.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/suspend.h>
+#include <linux/random.h>
+#include <soc/qcom/glink.h>
+#include <soc/qcom/subsystem_notif.h>
+#include <soc/qcom/subsystem_restart.h>
+#include "glink_private.h"
+
+#define GLINK_SSR_REPLY_TIMEOUT HZ
+#define GLINK_SSR_EVENT_INIT ~0
+#define NUM_LOG_PAGES 3
+
+#define GLINK_SSR_LOG(x...) do { \
+ if (glink_ssr_log_ctx) \
+ ipc_log_string(glink_ssr_log_ctx, x); \
+} while (0)
+
+#define GLINK_SSR_ERR(x...) do { \
+ pr_err(x); \
+ GLINK_SSR_LOG(x); \
+} while (0)
+
+static void *glink_ssr_log_ctx;
+
+/* Global restart counter */
+static uint32_t sequence_number;
+
+/* Flag indicating if responses were received for all SSR notifications */
+static bool notifications_successful;
+
+/* Completion for setting notifications_successful */
+struct completion notifications_successful_complete;
+
+/**
+ * struct restart_notifier_block - restart notifier wrapper structure
+ * subsystem: the name of the subsystem as recognized by the SSR framework
+ * nb: notifier block structure used by the SSR framework
+ */
+struct restart_notifier_block {
+ const char *subsystem;
+ struct notifier_block nb;
+};
+
+/**
+ * struct configure_and_open_ch_work - Work structure for used for opening
+ * glink_ssr channels
+ * edge: The G-Link edge obtained from the link state callback
+ * transport: The G-Link transport obtained from the link state callback
+ * link_state: The link state obtained from the link state callback
+ * ss_info: Subsystem information structure containing the info for this
+ * callback
+ * work: Work structure
+ */
+struct configure_and_open_ch_work {
+ char edge[GLINK_NAME_SIZE];
+ char transport[GLINK_NAME_SIZE];
+ enum glink_link_state link_state;
+ struct subsys_info *ss_info;
+ struct work_struct work;
+};
+
+/**
+ * struct close_ch_work - Work structure for used for closing glink_ssr channels
+ * edge: The G-Link edge name for the channel being closed
+ * handle: G-Link channel handle to be closed
+ * work: Work structure
+ */
+struct close_ch_work {
+ char edge[GLINK_NAME_SIZE];
+ void *handle;
+ struct work_struct work;
+};
+
+static int glink_ssr_restart_notifier_cb(struct notifier_block *this,
+ unsigned long code,
+ void *data);
+static void delete_ss_info_notify_list(struct subsys_info *ss_info);
+static int configure_and_open_channel(struct subsys_info *ss_info);
+static struct workqueue_struct *glink_ssr_wq;
+
+static LIST_HEAD(subsystem_list);
+static atomic_t responses_remaining = ATOMIC_INIT(0);
+static wait_queue_head_t waitqueue;
+
+static void link_state_cb_worker(struct work_struct *work)
+{
+ unsigned long flags;
+ struct configure_and_open_ch_work *ch_open_work =
+ container_of(work, struct configure_and_open_ch_work, work);
+ struct subsys_info *ss_info = ch_open_work->ss_info;
+
+ GLINK_SSR_LOG("<SSR> %s: LINK STATE[%d] %s:%s\n", __func__,
+ ch_open_work->link_state, ch_open_work->edge,
+ ch_open_work->transport);
+
+ if (ss_info && ch_open_work->link_state == GLINK_LINK_STATE_UP) {
+ spin_lock_irqsave(&ss_info->link_up_lock, flags);
+ if (!ss_info->link_up) {
+ ss_info->link_up = true;
+ spin_unlock_irqrestore(&ss_info->link_up_lock, flags);
+ if (!configure_and_open_channel(ss_info)) {
+ glink_unregister_link_state_cb(
+ ss_info->link_state_handle);
+ ss_info->link_state_handle = NULL;
+ }
+ kfree(ch_open_work);
+ return;
+ }
+ spin_unlock_irqrestore(&ss_info->link_up_lock, flags);
+ } else {
+ if (ss_info) {
+ spin_lock_irqsave(&ss_info->link_up_lock, flags);
+ ss_info->link_up = false;
+ spin_unlock_irqrestore(&ss_info->link_up_lock, flags);
+ ss_info->handle = NULL;
+ } else {
+ GLINK_SSR_ERR("<SSR> %s: ss_info is NULL\n", __func__);
+ }
+ }
+
+ kfree(ch_open_work);
+}
+
+/**
+ * glink_lbsrv_link_state_cb() - Callback to receive link state updates
+ * @cb_info: Information containing link & its state.
+ * @priv: Private data passed during the link state registration.
+ *
+ * This function is called by the G-Link core to notify the glink_ssr module
+ * regarding the link state updates. This function is registered with the
+ * G-Link core by the loopback server during glink_register_link_state_cb().
+ */
+static void glink_ssr_link_state_cb(struct glink_link_state_cb_info *cb_info,
+ void *priv)
+{
+ struct subsys_info *ss_info;
+ struct configure_and_open_ch_work *open_ch_work;
+
+ if (!cb_info) {
+ GLINK_SSR_ERR("<SSR> %s: Missing cb_data\n", __func__);
+ return;
+ }
+
+ ss_info = get_info_for_edge(cb_info->edge);
+
+ open_ch_work = kmalloc(sizeof(*open_ch_work), GFP_KERNEL);
+ if (!open_ch_work) {
+ GLINK_SSR_ERR("<SSR> %s: Could not allocate open_ch_work\n",
+ __func__);
+ return;
+ }
+
+ strlcpy(open_ch_work->edge, cb_info->edge, GLINK_NAME_SIZE);
+ strlcpy(open_ch_work->transport, cb_info->transport, GLINK_NAME_SIZE);
+ open_ch_work->link_state = cb_info->link_state;
+ open_ch_work->ss_info = ss_info;
+
+ INIT_WORK(&open_ch_work->work, link_state_cb_worker);
+ queue_work(glink_ssr_wq, &open_ch_work->work);
+}
+
+/**
+ * glink_ssr_notify_rx() - RX Notification callback
+ * @handle: G-Link channel handle
+ * @priv: Private callback data
+ * @pkt_priv: Private packet data
+ * @ptr: Pointer to the data received
+ * @size: Size of the data received
+ *
+ * This function is a notification callback from the G-Link core that data
+ * has been received from the remote side. This data is validate to make
+ * sure it is a cleanup_done message and is processed accordingly if it is.
+ */
+void glink_ssr_notify_rx(void *handle, const void *priv, const void *pkt_priv,
+ const void *ptr, size_t size)
+{
+ struct ssr_notify_data *cb_data = (struct ssr_notify_data *)priv;
+ struct cleanup_done_msg *resp = (struct cleanup_done_msg *)ptr;
+
+ if (unlikely(!cb_data))
+ goto missing_cb_data;
+ if (unlikely(!cb_data->do_cleanup_data))
+ goto missing_do_cleanup_data;
+ if (unlikely(!resp))
+ goto missing_response;
+ if (unlikely(resp->version != cb_data->do_cleanup_data->version))
+ goto version_mismatch;
+ if (unlikely(resp->seq_num != cb_data->do_cleanup_data->seq_num))
+ goto invalid_seq_number;
+ if (unlikely(resp->response != GLINK_SSR_CLEANUP_DONE))
+ goto wrong_response;
+
+ cb_data->responded = true;
+ atomic_dec(&responses_remaining);
+
+ GLINK_SSR_LOG(
+ "<SSR> %s: Response from %s resp[%d] version[%d] seq_num[%d] restarted[%s]\n",
+ __func__, cb_data->edge, resp->response,
+ resp->version, resp->seq_num,
+ cb_data->do_cleanup_data->name);
+
+ kfree(cb_data->do_cleanup_data);
+ cb_data->do_cleanup_data = NULL;
+ wake_up(&waitqueue);
+ return;
+
+missing_cb_data:
+ panic("%s: Missing cb_data!\n", __func__);
+ return;
+missing_do_cleanup_data:
+ panic("%s: Missing do_cleanup data!\n", __func__);
+ return;
+missing_response:
+ GLINK_SSR_ERR("<SSR> %s: Missing response data\n", __func__);
+ return;
+version_mismatch:
+ GLINK_SSR_ERR("<SSR> %s: Version mismatch. %s[%d], %s[%d]\n", __func__,
+ "do_cleanup version", cb_data->do_cleanup_data->version,
+ "cleanup_done version", resp->version);
+ return;
+invalid_seq_number:
+ GLINK_SSR_ERR("<SSR> %s: Invalid seq. number. %s[%d], %s[%d]\n",
+ __func__, "do_cleanup seq num",
+ cb_data->do_cleanup_data->seq_num,
+ "cleanup_done seq_num", resp->seq_num);
+ return;
+wrong_response:
+ GLINK_SSR_ERR("<SSR> %s: Not a cleaup_done message. %s[%d]\n", __func__,
+ "cleanup_done response", resp->response);
+ return;
+}
+
+/**
+ * glink_ssr_notify_tx_done() - Transmit finished notification callback
+ * @handle: G-Link channel handle
+ * @priv: Private callback data
+ * @pkt_priv: Private packet data
+ * @ptr: Pointer to the data received
+ *
+ * This function is a notification callback from the G-Link core that data
+ * we sent has finished transmitting.
+ */
+void glink_ssr_notify_tx_done(void *handle, const void *priv,
+ const void *pkt_priv, const void *ptr)
+{
+ struct ssr_notify_data *cb_data = (struct ssr_notify_data *)priv;
+
+ if (unlikely(!cb_data)) {
+ panic("%s: cb_data is NULL!\n", __func__);
+ return;
+ }
+
+ GLINK_SSR_LOG("<SSR> %s: Notified %s of restart\n",
+ __func__, cb_data->edge);
+
+ cb_data->tx_done = true;
+}
+
+void close_ch_worker(struct work_struct *work)
+{
+ unsigned long flags;
+ void *link_state_handle;
+ struct subsys_info *ss_info;
+ struct close_ch_work *close_work =
+ container_of(work, struct close_ch_work, work);
+
+ glink_close(close_work->handle);
+
+ ss_info = get_info_for_edge(close_work->edge);
+ BUG_ON(!ss_info);
+
+ spin_lock_irqsave(&ss_info->link_up_lock, flags);
+ ss_info->link_up = false;
+ spin_unlock_irqrestore(&ss_info->link_up_lock, flags);
+
+ BUG_ON(ss_info->link_state_handle != NULL);
+ link_state_handle = glink_register_link_state_cb(ss_info->link_info,
+ NULL);
+
+ if (IS_ERR_OR_NULL(link_state_handle))
+ GLINK_SSR_ERR("<SSR> %s: %s, ret[%d]\n", __func__,
+ "Couldn't register link state cb",
+ (int)PTR_ERR(link_state_handle));
+ else
+ ss_info->link_state_handle = link_state_handle;
+
+ BUG_ON(!ss_info->cb_data);
+ kfree(ss_info->cb_data);
+ kfree(close_work);
+}
+
+/**
+ * glink_ssr_notify_state() - Channel state notification callback
+ * @handle: G-Link channel handle
+ * @priv: Private callback data
+ * @event: The state that has been transitioned to
+ *
+ * This function is a notification callback from the G-Link core that the
+ * channel state has changed.
+ */
+void glink_ssr_notify_state(void *handle, const void *priv, unsigned event)
+{
+ struct ssr_notify_data *cb_data = (struct ssr_notify_data *)priv;
+ struct close_ch_work *close_work;
+
+ if (!cb_data) {
+ GLINK_SSR_ERR("<SSR> %s: Could not allocate data for cb_data\n",
+ __func__);
+ } else {
+ GLINK_SSR_LOG("<SSR> %s: event[%d]\n",
+ __func__, event);
+ cb_data->event = event;
+ if (event == GLINK_REMOTE_DISCONNECTED) {
+ close_work =
+ kmalloc(sizeof(struct close_ch_work),
+ GFP_KERNEL);
+ if (!close_work) {
+ GLINK_SSR_ERR(
+ "<SSR> %s: Could not allocate %s\n",
+ __func__, "close work");
+ return;
+ }
+
+ strlcpy(close_work->edge, cb_data->edge,
+ sizeof(close_work->edge));
+ close_work->handle = handle;
+ INIT_WORK(&close_work->work, close_ch_worker);
+ queue_work(glink_ssr_wq, &close_work->work);
+ }
+ }
+}
+
+/**
+ * glink_ssr_notify_rx_intent_req() - RX intent request notification callback
+ * @handle: G-Link channel handle
+ * @priv: Private callback data
+ * @req_size: The size of the requested intent
+ *
+ * This function is a notification callback from the G-Link core of the remote
+ * side's request for an RX intent to be queued.
+ *
+ * Return: Boolean indicating whether or not the request was successfully
+ * received
+ */
+bool glink_ssr_notify_rx_intent_req(void *handle, const void *priv,
+ size_t req_size)
+{
+ struct ssr_notify_data *cb_data = (struct ssr_notify_data *)priv;
+
+ if (!cb_data) {
+ GLINK_SSR_ERR("<SSR> %s: Could not allocate data for cb_data\n",
+ __func__);
+ return false;
+ } else {
+ GLINK_SSR_LOG("<SSR> %s: rx_intent_req of size %zu\n",
+ __func__, req_size);
+ return true;
+ }
+}
+
+/**
+ * glink_ssr_restart_notifier_cb() - SSR restart notifier callback function
+ * @this: Notifier block used by the SSR framework
+ * @code: The SSR code for which stage of restart is occurring
+ * @data: Structure containing private data - not used here.
+ *
+ * This function is a callback for the SSR framework. From here we initiate
+ * our handling of SSR.
+ *
+ * Return: Status of SSR handling
+ */
+static int glink_ssr_restart_notifier_cb(struct notifier_block *this,
+ unsigned long code,
+ void *data)
+{
+ int ret = 0;
+ struct subsys_info *ss_info = NULL;
+ struct restart_notifier_block *notifier =
+ container_of(this, struct restart_notifier_block, nb);
+
+ if (code == SUBSYS_AFTER_SHUTDOWN) {
+ GLINK_SSR_LOG("<SSR> %s: %s: subsystem restart for %s\n",
+ __func__, "SUBSYS_AFTER_SHUTDOWN",
+ notifier->subsystem);
+ ss_info = get_info_for_subsystem(notifier->subsystem);
+ if (ss_info == NULL) {
+ GLINK_SSR_ERR("<SSR> %s: ss_info is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ glink_ssr(ss_info->edge);
+ ret = notify_for_subsystem(ss_info);
+
+ if (ret) {
+ GLINK_SSR_ERR("<SSR>: %s: %s, ret[%d]\n", __func__,
+ "Subsystem notification failed", ret);
+ return ret;
+ }
+ }
+ return NOTIFY_DONE;
+}
+
+/**
+ * notify for subsystem() - Notify other subsystems that a subsystem is being
+ * restarted
+ * @ss_info: Subsystem info structure for the subsystem being restarted
+ *
+ * This function sends notifications to affected subsystems that the subsystem
+ * in ss_info is being restarted, and waits for the cleanup done response from
+ * all of those subsystems. It also initiates any local cleanup that is
+ * necessary.
+ *
+ * Return: 0 on success, standard error codes otherwise
+ */
+int notify_for_subsystem(struct subsys_info *ss_info)
+{
+ struct subsys_info *ss_info_channel;
+ struct subsys_info_leaf *ss_leaf_entry;
+ struct do_cleanup_msg *do_cleanup_data;
+ void *handle;
+ int wait_ret;
+ int ret;
+ unsigned long flags;
+
+ if (!ss_info) {
+ GLINK_SSR_ERR("<SSR> %s: ss_info structure invalid\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ /*
+ * No locking is needed here because ss_info->notify_list_len is
+ * only modified during setup.
+ */
+ atomic_set(&responses_remaining, ss_info->notify_list_len);
+ init_waitqueue_head(&waitqueue);
+ notifications_successful = true;
+
+ list_for_each_entry(ss_leaf_entry, &ss_info->notify_list,
+ notify_list_node) {
+ GLINK_SSR_LOG(
+ "<SSR> %s: Notifying: %s:%s of %s restart, seq_num[%d]\n",
+ __func__, ss_leaf_entry->edge,
+ ss_leaf_entry->xprt, ss_info->edge,
+ sequence_number);
+
+ ss_info_channel =
+ get_info_for_subsystem(ss_leaf_entry->ssr_name);
+ if (ss_info_channel == NULL) {
+ GLINK_SSR_ERR(
+ "<SSR> %s: unable to find subsystem name\n",
+ __func__);
+ return -ENODEV;
+ }
+ handle = ss_info_channel->handle;
+ ss_leaf_entry->cb_data = ss_info_channel->cb_data;
+
+ spin_lock_irqsave(&ss_info->link_up_lock, flags);
+ if (IS_ERR_OR_NULL(ss_info_channel->handle) ||
+ !ss_info_channel->cb_data ||
+ !ss_info_channel->link_up ||
+ ss_info_channel->cb_data->event
+ != GLINK_CONNECTED) {
+
+ GLINK_SSR_LOG(
+ "<SSR> %s: %s:%s %s[%d], %s[%p], %s[%d]\n",
+ __func__, ss_leaf_entry->edge, "Not connected",
+ "resp. remaining",
+ atomic_read(&responses_remaining), "handle",
+ ss_info_channel->handle, "link_up",
+ ss_info_channel->link_up);
+
+ spin_unlock_irqrestore(&ss_info->link_up_lock, flags);
+ atomic_dec(&responses_remaining);
+ continue;
+ }
+ spin_unlock_irqrestore(&ss_info->link_up_lock, flags);
+
+ do_cleanup_data = kmalloc(sizeof(struct do_cleanup_msg),
+ GFP_KERNEL);
+ if (!do_cleanup_data) {
+ GLINK_SSR_ERR(
+ "%s %s: Could not allocate do_cleanup_msg\n",
+ "<SSR>", __func__);
+ return -ENOMEM;
+ }
+
+ do_cleanup_data->version = 0;
+ do_cleanup_data->command = GLINK_SSR_DO_CLEANUP;
+ do_cleanup_data->seq_num = sequence_number;
+ do_cleanup_data->name_len = strlen(ss_info->edge);
+ strlcpy(do_cleanup_data->name, ss_info->edge,
+ do_cleanup_data->name_len + 1);
+ ss_leaf_entry->cb_data->do_cleanup_data = do_cleanup_data;
+
+ ret = glink_queue_rx_intent(handle,
+ (void *)ss_leaf_entry->cb_data,
+ sizeof(struct cleanup_done_msg));
+ if (ret) {
+ GLINK_SSR_ERR(
+ "%s %s: %s, ret[%d], resp. remaining[%d]\n",
+ "<SSR>", __func__,
+ "queue_rx_intent failed", ret,
+ atomic_read(&responses_remaining));
+ kfree(do_cleanup_data);
+ ss_leaf_entry->cb_data->do_cleanup_data = NULL;
+
+ if (strcmp(ss_leaf_entry->ssr_name, "rpm")) {
+ subsystem_restart(ss_leaf_entry->ssr_name);
+ ss_leaf_entry->restarted = true;
+ } else {
+ panic("%s: Could not queue intent for RPM!\n",
+ __func__);
+ }
+ atomic_dec(&responses_remaining);
+ continue;
+ }
+
+ if (strcmp(ss_leaf_entry->ssr_name, "rpm"))
+ ret = glink_tx(handle, ss_leaf_entry->cb_data,
+ do_cleanup_data,
+ sizeof(*do_cleanup_data),
+ GLINK_TX_REQ_INTENT);
+ else
+ ret = glink_tx(handle, ss_leaf_entry->cb_data,
+ do_cleanup_data,
+ sizeof(*do_cleanup_data),
+ GLINK_TX_SINGLE_THREADED);
+
+ if (ret) {
+ GLINK_SSR_ERR("<SSR> %s: tx failed, ret[%d], %s[%d]\n",
+ __func__, ret, "resp. remaining",
+ atomic_read(&responses_remaining));
+ kfree(do_cleanup_data);
+ ss_leaf_entry->cb_data->do_cleanup_data = NULL;
+
+ if (strcmp(ss_leaf_entry->ssr_name, "rpm")) {
+ subsystem_restart(ss_leaf_entry->ssr_name);
+ ss_leaf_entry->restarted = true;
+ } else {
+ panic("%s: glink_tx() to RPM failed!\n",
+ __func__);
+ }
+ atomic_dec(&responses_remaining);
+ continue;
+ }
+
+ sequence_number++;
+ }
+
+ wait_ret = wait_event_timeout(waitqueue,
+ atomic_read(&responses_remaining) == 0,
+ GLINK_SSR_REPLY_TIMEOUT);
+
+ list_for_each_entry(ss_leaf_entry, &ss_info->notify_list,
+ notify_list_node) {
+ if (!wait_ret && !IS_ERR_OR_NULL(ss_leaf_entry->cb_data)
+ && !ss_leaf_entry->cb_data->responded) {
+ GLINK_SSR_ERR("%s %s: Subsystem %s %s\n",
+ "<SSR>", __func__, ss_leaf_entry->edge,
+ "failed to respond. Restarting.");
+
+ notifications_successful = false;
+
+ /* Check for RPM, as it can't be restarted */
+ if (!strcmp(ss_leaf_entry->ssr_name, "rpm"))
+ panic("%s: RPM failed to respond!\n", __func__);
+ else if (!ss_leaf_entry->restarted)
+ subsystem_restart(ss_leaf_entry->ssr_name);
+ }
+ ss_leaf_entry->restarted = false;
+
+ if (!IS_ERR_OR_NULL(ss_leaf_entry->cb_data))
+ ss_leaf_entry->cb_data->responded = false;
+ }
+ complete(&notifications_successful_complete);
+ return 0;
+}
+EXPORT_SYMBOL(notify_for_subsystem);
+
+/**
+ * configure_and_open_channel() - configure and open a G-Link channel for
+ * the given subsystem
+ * @ss_info: The subsys_info structure where the channel will be stored
+ *
+ * Return: 0 on success, standard error codes otherwise
+ */
+static int configure_and_open_channel(struct subsys_info *ss_info)
+{
+ struct glink_open_config open_cfg;
+ struct ssr_notify_data *cb_data = NULL;
+ void *handle = NULL;
+
+ if (!ss_info) {
+ GLINK_SSR_ERR("<SSR> %s: ss_info structure invalid\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ cb_data = kmalloc(sizeof(struct ssr_notify_data), GFP_KERNEL);
+ if (!cb_data) {
+ GLINK_SSR_ERR("<SSR> %s: Could not allocate cb_data\n",
+ __func__);
+ return -ENOMEM;
+ }
+ cb_data->responded = false;
+ cb_data->event = GLINK_SSR_EVENT_INIT;
+ cb_data->edge = ss_info->edge;
+ ss_info->cb_data = cb_data;
+
+ memset(&open_cfg, 0, sizeof(struct glink_open_config));
+
+ if (ss_info->xprt) {
+ open_cfg.transport = ss_info->xprt;
+ } else {
+ open_cfg.transport = NULL;
+ open_cfg.options = GLINK_OPT_INITIAL_XPORT;
+ }
+ open_cfg.edge = ss_info->edge;
+ open_cfg.name = "glink_ssr";
+ open_cfg.notify_rx = glink_ssr_notify_rx;
+ open_cfg.notify_tx_done = glink_ssr_notify_tx_done;
+ open_cfg.notify_state = glink_ssr_notify_state;
+ open_cfg.notify_rx_intent_req = glink_ssr_notify_rx_intent_req;
+ open_cfg.priv = ss_info->cb_data;
+
+ handle = glink_open(&open_cfg);
+ if (IS_ERR_OR_NULL(handle)) {
+ GLINK_SSR_ERR(
+ "<SSR> %s:%s %s: unable to open channel, ret[%d]\n",
+ open_cfg.edge, open_cfg.name, __func__,
+ (int)PTR_ERR(handle));
+ kfree(cb_data);
+ cb_data = NULL;
+ ss_info->cb_data = NULL;
+ return PTR_ERR(handle);
+ }
+ ss_info->handle = handle;
+ return 0;
+}
+
+/**
+ * get_info_for_subsystem() - Retrieve information about a subsystem from the
+ * global subsystem_info_list
+ * @subsystem: The name of the subsystem recognized by the SSR
+ * framework
+ *
+ * Return: subsys_info structure containing info for the requested subsystem;
+ * NULL if no structure can be found for the requested subsystem
+ */
+struct subsys_info *get_info_for_subsystem(const char *subsystem)
+{
+ struct subsys_info *ss_info_entry;
+
+ list_for_each_entry(ss_info_entry, &subsystem_list,
+ subsystem_list_node) {
+ if (!strcmp(subsystem, ss_info_entry->ssr_name))
+ return ss_info_entry;
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL(get_info_for_subsystem);
+
+/**
+ * get_info_for_edge() - Retrieve information about a subsystem from the
+ * global subsystem_info_list
+ * @edge: The name of the edge recognized by G-Link
+ *
+ * Return: subsys_info structure containing info for the requested subsystem;
+ * NULL if no structure can be found for the requested subsystem
+ */
+struct subsys_info *get_info_for_edge(const char *edge)
+{
+ struct subsys_info *ss_info_entry;
+
+ list_for_each_entry(ss_info_entry, &subsystem_list,
+ subsystem_list_node) {
+ if (!strcmp(edge, ss_info_entry->edge))
+ return ss_info_entry;
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL(get_info_for_edge);
+
+/**
+ * glink_ssr_get_seq_num() - Get the current SSR sequence number
+ *
+ * Return: The current SSR sequence number
+ */
+uint32_t glink_ssr_get_seq_num(void)
+{
+ return sequence_number;
+}
+EXPORT_SYMBOL(glink_ssr_get_seq_num);
+
+/**
+ * delete_ss_info_notify_list() - Delete the notify list for a subsystem
+ * @ss_info: The subsystem info structure
+ */
+static void delete_ss_info_notify_list(struct subsys_info *ss_info)
+{
+ struct subsys_info_leaf *leaf, *temp;
+
+ list_for_each_entry_safe(leaf, temp, &ss_info->notify_list,
+ notify_list_node) {
+ list_del(&leaf->notify_list_node);
+ kfree(leaf);
+ }
+}
+
+/**
+ * glink_ssr_wait_cleanup_done() - Get the value of the
+ * notifications_successful flag.
+ * @timeout_multiplier: timeout multiplier for waiting on all processors
+ *
+ * Return: True if cleanup_done received from all processors, false otherwise
+ */
+bool glink_ssr_wait_cleanup_done(unsigned ssr_timeout_multiplier)
+{
+ int wait_ret =
+ wait_for_completion_timeout(&notifications_successful_complete,
+ ssr_timeout_multiplier * GLINK_SSR_REPLY_TIMEOUT);
+ reinit_completion(&notifications_successful_complete);
+
+ if (!notifications_successful || !wait_ret)
+ return false;
+ else
+ return true;
+}
+EXPORT_SYMBOL(glink_ssr_wait_cleanup_done);
+
+/**
+ * glink_ssr_probe() - G-Link SSR platform device probe function
+ * @pdev: Pointer to the platform device structure
+ *
+ * This function parses DT for information on which subsystems should be
+ * notified when each subsystem undergoes SSR. The global subsystem information
+ * list is built from this information. In addition, SSR notifier callback
+ * functions are registered here for the necessary subsystems.
+ *
+ * Return: 0 on success, standard error codes otherwise
+ */
+static int glink_ssr_probe(struct platform_device *pdev)
+{
+ struct device_node *node;
+ struct device_node *phandle_node;
+ struct restart_notifier_block *nb;
+ struct subsys_info *ss_info;
+ struct subsys_info_leaf *ss_info_leaf;
+ struct glink_link_info *link_info;
+ char *key;
+ const char *edge;
+ const char *subsys_name;
+ const char *xprt;
+ void *handle;
+ void *link_state_handle;
+ int phandle_index = 0;
+ int ret = 0;
+
+ if (!pdev) {
+ GLINK_SSR_ERR("<SSR> %s: pdev is NULL\n", __func__);
+ ret = -EINVAL;
+ goto pdev_null_or_ss_info_alloc_failed;
+ }
+
+ node = pdev->dev.of_node;
+
+ ss_info = kmalloc(sizeof(*ss_info), GFP_KERNEL);
+ if (!ss_info) {
+ GLINK_SSR_ERR("<SSR> %s: %s\n", __func__,
+ "Could not allocate subsystem info structure\n");
+ ret = -ENOMEM;
+ goto pdev_null_or_ss_info_alloc_failed;
+ }
+ INIT_LIST_HEAD(&ss_info->notify_list);
+
+ link_info = kmalloc(sizeof(struct glink_link_info),
+ GFP_KERNEL);
+ if (!link_info) {
+ GLINK_SSR_ERR("<SSR> %s: %s\n", __func__,
+ "Could not allocate link info structure\n");
+ ret = -ENOMEM;
+ goto link_info_alloc_failed;
+ }
+ ss_info->link_info = link_info;
+
+ key = "label";
+ subsys_name = of_get_property(node, key, NULL);
+ if (!subsys_name) {
+ GLINK_SSR_ERR("<SSR> %s: missing key %s\n", __func__, key);
+ ret = -ENODEV;
+ goto label_or_edge_missing;
+ }
+
+ key = "qcom,edge";
+ edge = of_get_property(node, key, NULL);
+ if (!edge) {
+ GLINK_SSR_ERR("<SSR> %s: missing key %s\n", __func__, key);
+ ret = -ENODEV;
+ goto label_or_edge_missing;
+ }
+
+ key = "qcom,xprt";
+ xprt = of_get_property(node, key, NULL);
+ if (!xprt)
+ GLINK_SSR_LOG(
+ "%s %s: no transport present for subys/edge %s/%s\n",
+ "<SSR>", __func__, subsys_name, edge);
+
+ ss_info->ssr_name = subsys_name;
+ ss_info->edge = edge;
+ ss_info->xprt = xprt;
+ ss_info->notify_list_len = 0;
+ ss_info->link_info->transport = xprt;
+ ss_info->link_info->edge = edge;
+ ss_info->link_info->glink_link_state_notif_cb = glink_ssr_link_state_cb;
+ ss_info->link_up = false;
+ ss_info->handle = NULL;
+ ss_info->link_state_handle = NULL;
+ ss_info->cb_data = NULL;
+ spin_lock_init(&ss_info->link_up_lock);
+
+ nb = kmalloc(sizeof(struct restart_notifier_block), GFP_KERNEL);
+ if (!nb) {
+ GLINK_SSR_ERR("<SSR> %s: Could not allocate notifier block\n",
+ __func__);
+ ret = -ENOMEM;
+ goto label_or_edge_missing;
+ }
+
+ nb->subsystem = subsys_name;
+ nb->nb.notifier_call = glink_ssr_restart_notifier_cb;
+
+ handle = subsys_notif_register_notifier(nb->subsystem, &nb->nb);
+ if (IS_ERR_OR_NULL(handle)) {
+ GLINK_SSR_ERR("<SSR> %s: Could not register SSR notifier cb\n",
+ __func__);
+ ret = -EINVAL;
+ goto nb_registration_fail;
+ }
+
+ key = "qcom,notify-edges";
+ while (true) {
+ phandle_node = of_parse_phandle(node, key, phandle_index++);
+ if (!phandle_node && phandle_index == 0) {
+ GLINK_SSR_ERR(
+ "<SSR> %s: qcom,notify-edges is not present",
+ __func__);
+ ret = -ENODEV;
+ goto notify_edges_not_present;
+ }
+
+ if (!phandle_node)
+ break;
+
+ ss_info_leaf = kmalloc(sizeof(struct subsys_info_leaf),
+ GFP_KERNEL);
+ if (!ss_info_leaf) {
+ GLINK_SSR_ERR(
+ "<SSR> %s: Could not allocate subsys_info_leaf\n",
+ __func__);
+ ret = -ENOMEM;
+ goto notify_edges_not_present;
+ }
+
+ subsys_name = of_get_property(phandle_node, "label", NULL);
+ edge = of_get_property(phandle_node, "qcom,edge", NULL);
+ xprt = of_get_property(phandle_node, "qcom,xprt", NULL);
+
+ of_node_put(phandle_node);
+
+ if (!subsys_name || !edge) {
+ GLINK_SSR_ERR(
+ "%s, %s: Found DT node with invalid data!\n",
+ "<SSR>", __func__);
+ ret = -EINVAL;
+ goto invalid_dt_node;
+ }
+
+ ss_info_leaf->ssr_name = subsys_name;
+ ss_info_leaf->edge = edge;
+ ss_info_leaf->xprt = xprt;
+ ss_info_leaf->restarted = false;
+ list_add_tail(&ss_info_leaf->notify_list_node,
+ &ss_info->notify_list);
+ ss_info->notify_list_len++;
+ }
+
+ list_add_tail(&ss_info->subsystem_list_node, &subsystem_list);
+
+ link_state_handle = glink_register_link_state_cb(ss_info->link_info,
+ NULL);
+ if (IS_ERR_OR_NULL(link_state_handle)) {
+ GLINK_SSR_ERR("<SSR> %s: Could not register link state cb\n",
+ __func__);
+ ret = PTR_ERR(link_state_handle);
+ goto link_state_register_fail;
+ }
+ ss_info->link_state_handle = link_state_handle;
+
+ return 0;
+
+link_state_register_fail:
+ list_del(&ss_info->subsystem_list_node);
+invalid_dt_node:
+ kfree(ss_info_leaf);
+notify_edges_not_present:
+ subsys_notif_unregister_notifier(handle, &nb->nb);
+ delete_ss_info_notify_list(ss_info);
+nb_registration_fail:
+ kfree(nb);
+label_or_edge_missing:
+ kfree(link_info);
+link_info_alloc_failed:
+ kfree(ss_info);
+pdev_null_or_ss_info_alloc_failed:
+ return ret;
+}
+
+static struct of_device_id match_table[] = {
+ { .compatible = "qcom,glink_ssr" },
+ {},
+};
+
+static struct platform_driver glink_ssr_driver = {
+ .probe = glink_ssr_probe,
+ .driver = {
+ .name = "msm_glink_ssr",
+ .owner = THIS_MODULE,
+ .of_match_table = match_table,
+ },
+};
+
+static int glink_ssr_init(void)
+{
+ int ret;
+
+ glink_ssr_log_ctx =
+ ipc_log_context_create(NUM_LOG_PAGES, "glink_ssr", 0);
+ glink_ssr_wq = create_singlethread_workqueue("glink_ssr_wq");
+ ret = platform_driver_register(&glink_ssr_driver);
+ if (ret)
+ GLINK_SSR_ERR("<SSR> %s: %s ret: %d\n", __func__,
+ "glink_ssr driver registration failed", ret);
+
+ notifications_successful = false;
+ init_completion(&notifications_successful_complete);
+ return 0;
+}
+
+module_init(glink_ssr_init);
+
+MODULE_DESCRIPTION("MSM Generic Link (G-Link) SSR Module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/glink_xprt_if.h b/drivers/soc/qcom/glink_xprt_if.h
new file mode 100644
index 000000000000..6242e867fe72
--- /dev/null
+++ b/drivers/soc/qcom/glink_xprt_if.h
@@ -0,0 +1,201 @@
+/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef _SOC_QCOM_GLINK_XPRT_IF_H_
+#define _SOC_QCOM_GLINK_XPRT_IF_H_
+
+#include <linux/bitops.h>
+#include <linux/list.h>
+#include <linux/types.h>
+
+struct glink_core_xprt_ctx;
+struct glink_core_if;
+struct channel_ctx;
+struct glink_core_rx_intent;
+
+enum buf_type {
+ LINEAR = 0,
+ VECTOR,
+};
+
+enum xprt_ids {
+ SMEM_XPRT_ID = 100,
+ SMD_TRANS_XPRT_ID = 200,
+ LLOOP_XPRT_ID = 300,
+ MOCK_XPRT_HIGH_ID = 390,
+ MOCK_XPRT_ID = 400,
+ MOCK_XPRT_LOW_ID = 410,
+};
+
+#define GCAP_SIGNALS BIT(0)
+#define GCAP_INTENTLESS BIT(1)
+#define GCAP_TRACER_PKT BIT(2)
+#define GCAP_AUTO_QUEUE_RX_INT BIT(3)
+
+/**
+ * struct glink_core_tx_pkt - Transmit Packet information
+ * @list_done: Index to the channel's transmit queue.
+ * @list_done: Index to the channel's acknowledgment queue.
+ * @pkt_priv: Private information specific to the packet.
+ * @data: Pointer to the buffer containing the data.
+ * @riid: Remote receive intent used to transmit the packet.
+ * @rcid: Remote channel receiving the packet.
+ * @size: Total size of the data in the packet.
+ * @tx_len: Data length to transmit in the current transmit slot.
+ * @size_remaining: Remaining size of the data in the packet.
+ * @intent_size: Receive intent size queued by the remote side.
+ * @tracer_pkt: Flag to indicate if the packet is a tracer packet.
+ * @iovec: Pointer to the vector buffer packet.
+ * @vprovider: Packet-specific virtual buffer provider function.
+ * @pprovider: Packet-specific physical buffer provider function.
+ * @pkt_ref: Active references to the packet.
+ */
+struct glink_core_tx_pkt {
+ struct list_head list_node;
+ struct list_head list_done;
+ const void *pkt_priv;
+ const void *data;
+ uint32_t riid;
+ uint32_t rcid;
+ uint32_t size;
+ uint32_t tx_len;
+ uint32_t size_remaining;
+ size_t intent_size;
+ bool tracer_pkt;
+ void *iovec;
+ void * (*vprovider)(void *iovec, size_t offset, size_t *size);
+ void * (*pprovider)(void *iovec, size_t offset, size_t *size);
+ struct rwref_lock pkt_ref;
+};
+
+/**
+ * Note - each call to register the interface must pass a unique
+ * instance of this data.
+ */
+struct glink_transport_if {
+ /* Negotiation */
+ void (*tx_cmd_version)(struct glink_transport_if *if_ptr,
+ uint32_t version,
+ uint32_t features);
+ void (*tx_cmd_version_ack)(struct glink_transport_if *if_ptr,
+ uint32_t version,
+ uint32_t features);
+ uint32_t (*set_version)(struct glink_transport_if *if_ptr,
+ uint32_t version,
+ uint32_t features);
+
+ /* channel state */
+ int (*tx_cmd_ch_open)(struct glink_transport_if *if_ptr, uint32_t lcid,
+ const char *name, uint16_t req_xprt);
+ int (*tx_cmd_ch_close)(struct glink_transport_if *if_ptr,
+ uint32_t lcid);
+ void (*tx_cmd_ch_remote_open_ack)(struct glink_transport_if *if_ptr,
+ uint32_t rcid, uint16_t xprt_resp);
+ void (*tx_cmd_ch_remote_close_ack)(struct glink_transport_if *if_ptr,
+ uint32_t rcid);
+ int (*ssr)(struct glink_transport_if *if_ptr);
+
+ /* channel data */
+ int (*allocate_rx_intent)(struct glink_transport_if *if_ptr,
+ size_t size,
+ struct glink_core_rx_intent *intent);
+ int (*deallocate_rx_intent)(struct glink_transport_if *if_ptr,
+ struct glink_core_rx_intent *intent);
+ /* Optional */
+ int (*reuse_rx_intent)(struct glink_transport_if *if_ptr,
+ struct glink_core_rx_intent *intent);
+
+ int (*tx_cmd_local_rx_intent)(struct glink_transport_if *if_ptr,
+ uint32_t lcid, size_t size, uint32_t liid);
+ void (*tx_cmd_local_rx_done)(struct glink_transport_if *if_ptr,
+ uint32_t lcid, uint32_t liid, bool reuse);
+ int (*tx)(struct glink_transport_if *if_ptr, uint32_t lcid,
+ struct glink_core_tx_pkt *pctx);
+ int (*tx_cmd_rx_intent_req)(struct glink_transport_if *if_ptr,
+ uint32_t lcid, size_t size);
+ int (*tx_cmd_remote_rx_intent_req_ack)(
+ struct glink_transport_if *if_ptr,
+ uint32_t lcid, bool granted);
+ int (*tx_cmd_set_sigs)(struct glink_transport_if *if_ptr,
+ uint32_t lcid, uint32_t sigs);
+
+ /* Optional. If NULL at xprt registration, dummies will be used */
+ int (*poll)(struct glink_transport_if *if_ptr, uint32_t lcid);
+ int (*mask_rx_irq)(struct glink_transport_if *if_ptr, uint32_t lcid,
+ bool mask, void *pstruct);
+ int (*wait_link_down)(struct glink_transport_if *if_ptr);
+ int (*tx_cmd_tracer_pkt)(struct glink_transport_if *if_ptr,
+ uint32_t lcid, struct glink_core_tx_pkt *pctx);
+ unsigned long (*get_power_vote_ramp_time)(
+ struct glink_transport_if *if_ptr, uint32_t state);
+ int (*power_vote)(struct glink_transport_if *if_ptr, uint32_t state);
+ int (*power_unvote)(struct glink_transport_if *if_ptr);
+ /*
+ * Keep data pointers at the end of the structure after all function
+ * pointer to allow for in-place initialization.
+ */
+
+ /* private pointer for core */
+ struct glink_core_xprt_ctx *glink_core_priv;
+
+ /* core pointer (set during transport registration) */
+ struct glink_core_if *glink_core_if_ptr;
+};
+
+#ifdef CONFIG_MSM_GLINK
+
+/**
+ * get_tx_vaddr() - Get the virtual address from which the tx has to be done
+ * @pctx: transmit packet context.
+ * @offset: offset into the packet.
+ * @tx_size: pointer to hold the length of the contiguous buffer
+ * space.
+ *
+ * Return: Address from which the tx has to be done.
+ */
+static inline void *get_tx_vaddr(struct glink_core_tx_pkt *pctx, size_t offset,
+ size_t *tx_size)
+{
+ void *pdata;
+
+ if (pctx->vprovider) {
+ return pctx->vprovider((void *)pctx->iovec, offset, tx_size);
+ } else if (pctx->pprovider) {
+ pdata = pctx->pprovider((void *)pctx->iovec, offset, tx_size);
+ return phys_to_virt((unsigned long)pdata);
+ }
+ return NULL;
+}
+
+/**
+ * glink_xprt_name_to_id() - convert transport name to id
+ * @name: Name of the transport.
+ * @id: Assigned id.
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+int glink_xprt_name_to_id(const char *name, uint16_t *id);
+
+
+#else /* CONFIG_MSM_GLINK */
+static inline void *get_tx_vaddr(struct glink_core_tx_pkt *pctx, size_t offset,
+ size_t *tx_size)
+{
+ return NULL;
+}
+
+static inline int glink_xprt_name_to_id(const char *name, uint16_t *id)
+{
+ return -ENODEV;
+}
+
+#endif /* CONFIG_MSM_GLINK */
+#endif /* _SOC_QCOM_GLINK_XPRT_IF_H_ */
diff --git a/drivers/soc/qcom/tracer_pkt.c b/drivers/soc/qcom/tracer_pkt.c
new file mode 100644
index 000000000000..49c9d27cbbcb
--- /dev/null
+++ b/drivers/soc/qcom/tracer_pkt.c
@@ -0,0 +1,255 @@
+/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <asm/arch_timer.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <soc/qcom/tracer_pkt.h>
+#define CREATE_TRACE_POINTS
+#include "tracer_pkt_private.h"
+
+static unsigned qdss_tracing;
+module_param_named(qdss_tracing_enable, qdss_tracing,
+ uint, S_IRUGO | S_IWUSR | S_IWGRP);
+
+#define TRACER_PKT_VERSION 1
+#define MAX_CC_WLEN 3
+#define HEX_DUMP_HDR "Tracer Packet:"
+
+/**
+ * struct tracer_pkt_hdr - data structure defiining the tracer packet header
+ * @version: Tracer Packet version.
+ * @reserved: Reserved fields in the tracer packet.
+ * @id_valid: Indicates the presence of a subsytem & transport ID.
+ * @qdss_tracing: Enable the event logging to QDSS.
+ * @ccl: Client cookie/private information length in words.
+ * @pkt_len: Length of the tracer packet in words.
+ * @pkt_offset: Offset into the packet to log events, in words.
+ * @clnt_event_cfg: Client-specific event configuration bit mask.
+ * @glink_event_cfg: G-Link-specific event configuration bit mask.
+ * @base_ts: Base timestamp when the tracer packet is initialized.
+ * @cc: Client cookie/private information.
+ */
+struct tracer_pkt_hdr {
+ uint16_t version:4;
+ uint16_t reserved:8;
+ uint16_t id_valid:1;
+ uint16_t qdss_tracing:1;
+ uint16_t ccl:2;
+ uint16_t pkt_len;
+ uint16_t pkt_offset;
+ uint16_t clnt_event_cfg;
+ uint32_t glink_event_cfg;
+ u64 base_ts;
+ uint32_t cc[MAX_CC_WLEN];
+} __attribute__((__packed__));
+
+/**
+ * struct tracer_pkt_event - data structure defining the tracer packet event
+ * @event_id: Event ID.
+ * @event_ts: Timestamp at which the event occured.
+ */
+struct tracer_pkt_event {
+ uint32_t event_id;
+ uint32_t event_ts;
+};
+
+/**
+ * tracer_pkt_init() - initialize the tracer packet
+ * @data: Pointer to the buffer to be initialized with a tracer
+ * packet.
+ * @data_len: Length of the buffer.
+ * @client_event_cfg: Client-specific event configuration mask.
+ * @glink_event_cfg: G-Link-specific event configuration mask.
+ * @pkt_priv: Private/Cookie information to be added to the tracer
+ * packet.
+ * @pkt_priv_len: Length of the private data.
+ *
+ * This function is used to initialize a buffer with the tracer packet header.
+ * The tracer packet header includes the data as passed by the elements in the
+ * parameters.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+int tracer_pkt_init(void *data, size_t data_len,
+ uint16_t client_event_cfg, uint32_t glink_event_cfg,
+ void *pkt_priv, size_t pkt_priv_len)
+{
+ struct tracer_pkt_hdr *pkt_hdr;
+
+ if (!data || !data_len)
+ return -EINVAL;
+
+ if (!IS_ALIGNED(data_len, sizeof(uint32_t)))
+ return -EINVAL;
+
+ if (data_len < sizeof(*pkt_hdr))
+ return -ETOOSMALL;
+
+ pkt_hdr = (struct tracer_pkt_hdr *)data;
+ pkt_hdr->version = TRACER_PKT_VERSION;
+ pkt_hdr->reserved = 0;
+ pkt_hdr->id_valid = 0;
+ pkt_hdr->qdss_tracing = qdss_tracing ? true : false;
+ if (pkt_priv_len > MAX_CC_WLEN * sizeof(uint32_t))
+ pkt_hdr->ccl = MAX_CC_WLEN;
+ else
+ pkt_hdr->ccl = pkt_priv_len/sizeof(uint32_t) +
+ (pkt_priv_len & (sizeof(uint32_t) - 1) ? 1 : 0);
+ pkt_hdr->pkt_len = data_len / sizeof(uint32_t);
+ pkt_hdr->pkt_offset = sizeof(*pkt_hdr) / sizeof(uint32_t);
+ pkt_hdr->clnt_event_cfg = client_event_cfg;
+ pkt_hdr->glink_event_cfg = glink_event_cfg;
+ pkt_hdr->base_ts = arch_counter_get_cntpct();
+ memcpy(pkt_hdr->cc, pkt_priv, pkt_hdr->ccl * sizeof(uint32_t));
+ return 0;
+}
+EXPORT_SYMBOL(tracer_pkt_init);
+
+/**
+ * tracer_pkt_set_event_cfg() - set the event configuration mask in the tracer
+ * packet
+ * @data: Pointer to the buffer to be initialized with event
+ * configuration mask.
+ * @client_event_cfg: Client-specific event configuration mask.
+ * @glink_event_cfg: G-Link-specific event configuration mask.
+ *
+ * This function is used to initialize a buffer with the event configuration
+ * mask as passed by the elements in the parameters.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+int tracer_pkt_set_event_cfg(void *data, uint16_t client_event_cfg,
+ uint32_t glink_event_cfg)
+{
+ struct tracer_pkt_hdr *pkt_hdr;
+
+ if (!data)
+ return -EINVAL;
+
+ pkt_hdr = (struct tracer_pkt_hdr *)data;
+ if (unlikely(pkt_hdr->version != TRACER_PKT_VERSION))
+ return -EINVAL;
+
+ pkt_hdr->clnt_event_cfg = client_event_cfg;
+ pkt_hdr->glink_event_cfg = glink_event_cfg;
+ return 0;
+}
+EXPORT_SYMBOL(tracer_pkt_set_event_cfg);
+
+/**
+ * tracer_pkt_log_event() - log an event specific to the tracer packet
+ * @data: Pointer to the buffer containing tracer packet.
+ * @event_id: Event ID to be logged.
+ *
+ * This function is used to log an event specific to the tracer packet.
+ * The event is logged either into the tracer packet itself or a different
+ * tracing mechanism as configured.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+int tracer_pkt_log_event(void *data, uint32_t event_id)
+{
+ struct tracer_pkt_hdr *pkt_hdr;
+ struct tracer_pkt_event event;
+
+ if (!data)
+ return -EINVAL;
+
+ pkt_hdr = (struct tracer_pkt_hdr *)data;
+ if (unlikely(pkt_hdr->version != TRACER_PKT_VERSION))
+ return -EINVAL;
+
+ if (qdss_tracing) {
+ trace_tracer_pkt_event(event_id, pkt_hdr->cc);
+ return 0;
+ }
+
+ if (unlikely((pkt_hdr->pkt_len - pkt_hdr->pkt_offset) *
+ sizeof(uint32_t) < sizeof(event)))
+ return -ETOOSMALL;
+
+ event.event_id = event_id;
+ event.event_ts = (uint32_t)arch_counter_get_cntpct();
+ memcpy(data + (pkt_hdr->pkt_offset * sizeof(uint32_t)),
+ &event, sizeof(event));
+ pkt_hdr->pkt_offset += sizeof(event)/sizeof(uint32_t);
+ return 0;
+}
+EXPORT_SYMBOL(tracer_pkt_log_event);
+
+/**
+ * tracer_pkt_calc_hex_dump_size() - calculate the hex dump size of a tracer
+ * packet
+ * @data: Pointer to the buffer containing tracer packet.
+ * @data_len: Length of the tracer packet buffer.
+ *
+ * This function is used to calculate the length of the buffer required to
+ * hold the hex dump of the tracer packet.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+size_t tracer_pkt_calc_hex_dump_size(void *data, size_t data_len)
+{
+ size_t hex_dump_size;
+ struct tracer_pkt_hdr *pkt_hdr;
+
+ if (!data || data_len <= 0)
+ return -EINVAL;
+
+ pkt_hdr = (struct tracer_pkt_hdr *)data;
+ if (unlikely(pkt_hdr->version != TRACER_PKT_VERSION))
+ return -EINVAL;
+
+ /*
+ * Hex Dump Prefix + newline
+ * 0x<first_word> + newline
+ * ...
+ * 0x<last_word> + newline + null-termination character.
+ */
+ hex_dump_size = strlen(HEX_DUMP_HDR) + 1 + (pkt_hdr->pkt_len * 11) + 1;
+ return hex_dump_size;
+}
+EXPORT_SYMBOL(tracer_pkt_calc_hex_dump_size);
+
+/**
+ * tracer_pkt_hex_dump() - hex dump the tracer packet into a buffer
+ * @buf: Buffer to contain the hex dump of the tracer packet.
+ * @buf_len: Length of the hex dump buffer.
+ * @data: Buffer containing the tracer packet.
+ * @data_len: Length of the buffer containing the tracer packet.
+ *
+ * This function is used to dump the contents of the tracer packet into
+ * a buffer in a specific hexadecimal format. The hex dump buffer can then
+ * be dumped through debugfs.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+int tracer_pkt_hex_dump(void *buf, size_t buf_len, void *data, size_t data_len)
+{
+ int i, j = 0;
+ char *dst = (char *)buf;
+
+ if (!buf || buf_len <= 0 || !data || data_len <= 0)
+ return -EINVAL;
+
+ if (buf_len < tracer_pkt_calc_hex_dump_size(data, data_len))
+ return -EINVAL;
+
+ j = scnprintf(dst, buf_len, "%s\n", HEX_DUMP_HDR);
+ for (i = 0; i < data_len/sizeof(uint32_t); i++)
+ j += scnprintf(dst + j, buf_len - j, "0x%08x\n",
+ *((uint32_t *)data + i));
+ dst[j] = '\0';
+ return 0;
+}
+EXPORT_SYMBOL(tracer_pkt_hex_dump);
diff --git a/drivers/soc/qcom/tracer_pkt_private.h b/drivers/soc/qcom/tracer_pkt_private.h
new file mode 100644
index 000000000000..fc760e6b68d1
--- /dev/null
+++ b/drivers/soc/qcom/tracer_pkt_private.h
@@ -0,0 +1,50 @@
+/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#if !defined(_TRACER_PKT_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACER_PKT_TRACE_H
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM tracer_pkt
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE tracer_pkt_private
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(tracer_pkt_event,
+
+ TP_PROTO(uint32_t id, uint32_t *cc),
+
+ TP_ARGS(id, cc),
+
+ TP_STRUCT__entry(
+ __field(uint32_t, id)
+ __field(uint32_t, cc1)
+ __field(uint32_t, cc2)
+ __field(uint32_t, cc3)
+ ),
+
+ TP_fast_assign(
+ __entry->id = id;
+ __entry->cc1 = cc[0];
+ __entry->cc2 = cc[1];
+ __entry->cc3 = cc[2];
+ ),
+
+ TP_printk("CC - 0x%08x:0x%08x:0x%08x, ID - %d",
+ __entry->cc1, __entry->cc2, __entry->cc3, __entry->id)
+);
+#endif /*_TRACER_PKT_TRACE_H*/
+
+#include <trace/define_trace.h>
+