summaryrefslogtreecommitdiff
path: root/drivers/soc/qcom/glink.c
diff options
context:
space:
mode:
authorDhoat Harpal <hdhoat@codeaurora.org>2016-05-10 19:03:25 +0530
committerKyle Yan <kyan@codeaurora.org>2016-06-08 15:20:06 -0700
commit05360211f893484705d1dfe1e4c402af647f2ae0 (patch)
treed1500715c9edd6e251543548c5aa6698f4bdcaa2 /drivers/soc/qcom/glink.c
parenta55a0d7fc77fd7d8780c7358fc30b04ec66213e6 (diff)
soc: qcom: glink: Fix parallel migration issue
During parallel migration race conditions are seen in remote open and local open ack function. Edge based lock is introduced to avoid any race condition during simultaneous migration. Edge lock is shared across multiple transport of same edge and is stored in a global list. CRs-Fixed: 1010920 Change-Id: I2b988d2a6112add06fa433c4b1deeec0b6e6bb58 Signed-off-by: Dhoat Harpal <hdhoat@codeaurora.org>
Diffstat (limited to 'drivers/soc/qcom/glink.c')
-rw-r--r--drivers/soc/qcom/glink.c105
1 files changed, 104 insertions, 1 deletions
diff --git a/drivers/soc/qcom/glink.c b/drivers/soc/qcom/glink.c
index 4d5e4b781853..464fe17158cf 100644
--- a/drivers/soc/qcom/glink.c
+++ b/drivers/soc/qcom/glink.c
@@ -143,12 +143,29 @@ struct glink_core_xprt_ctx {
bool qos_req_active;
bool tx_path_activity;
struct delayed_work pm_qos_work;
+ struct glink_core_edge_ctx *edge_ctx;
struct mutex xprt_dbgfs_lock_lhb4;
void *log_ctx;
};
/**
+ * Edge Context
+ * @list_node edge list node used by edge list
+ * @name: name of the edge
+ * @edge_migration_lock:mutex lock for migration over edge
+ * @edge_ref_lock: lock for reference count
+ */
+struct glink_core_edge_ctx {
+ struct list_head list_node;
+ char name[GLINK_NAME_SIZE];
+ struct mutex edge_migration_lock_lhd2;
+ struct rwref_lock edge_ref_lock_lhd1;
+};
+
+static LIST_HEAD(edge_list);
+static DEFINE_MUTEX(edge_list_lock_lhd0);
+/**
* Channel Context
* @xprt_state_lhb0: controls read/write access to channel state
* @port_list_node: channel list node used by transport "channels" list
@@ -955,6 +972,83 @@ err:
}
/**
+ * glink_core_migration_edge_lock() - gains a reference count for edge and
+ * take muted lock
+ * @xprt_ctx: transport of the edge
+ */
+static void glink_core_migration_edge_lock(struct glink_core_xprt_ctx *xprt_ctx)
+{
+ struct glink_core_edge_ctx *edge_ctx = xprt_ctx->edge_ctx;
+
+ rwref_get(&edge_ctx->edge_ref_lock_lhd1);
+ mutex_lock(&edge_ctx->edge_migration_lock_lhd2);
+}
+
+/**
+ * glink_core_migration_edge_unlock() - release a reference count for edge
+ * and release muted lock.
+ * @xprt_ctx: transport of the edge
+ */
+static void glink_core_migration_edge_unlock(
+ struct glink_core_xprt_ctx *xprt_ctx)
+{
+ struct glink_core_edge_ctx *edge_ctx = xprt_ctx->edge_ctx;
+
+ mutex_unlock(&edge_ctx->edge_migration_lock_lhd2);
+ rwref_put(&edge_ctx->edge_ref_lock_lhd1);
+}
+
+/**
+ * glink_edge_ctx_release - Free the edge context
+ * @ch_st_lock: handle to the rwref_lock associated with the edge
+ *
+ * This should only be called when the reference count associated with the
+ * edge goes to zero.
+ */
+static void glink_edge_ctx_release(struct rwref_lock *ch_st_lock)
+{
+ struct glink_core_edge_ctx *ctx = container_of(ch_st_lock,
+ struct glink_core_edge_ctx,
+ edge_ref_lock_lhd1);
+
+ mutex_lock(&edge_list_lock_lhd0);
+ list_del(&ctx->list_node);
+ mutex_unlock(&edge_list_lock_lhd0);
+ kfree(ctx);
+}
+
+
+/**
+ * edge_name_to_ctx_create() - lookup a edge by name, create the edge ctx if
+ * it is not found.
+ * @xprt_ctx: Transport to search for a matching edge.
+ *
+ * Return: The edge ctx corresponding to edge of @xprt_ctx.
+ */
+static struct glink_core_edge_ctx *edge_name_to_ctx_create(
+ struct glink_core_xprt_ctx *xprt_ctx)
+{
+ struct glink_core_edge_ctx *edge_ctx;
+
+ mutex_lock(&edge_list_lock_lhd0);
+ list_for_each_entry(edge_ctx, &edge_list, list_node) {
+ if (!strcmp(edge_ctx->name, xprt_ctx->edge)) {
+ rwref_get(&edge_ctx->edge_ref_lock_lhd1);
+ mutex_unlock(&edge_list_lock_lhd0);
+ return edge_ctx;
+ }
+ }
+ edge_ctx = kzalloc(sizeof(struct glink_core_edge_ctx), GFP_KERNEL);
+ strlcpy(edge_ctx->name, xprt_ctx->edge, GLINK_NAME_SIZE);
+ rwref_lock_init(&edge_ctx->edge_ref_lock_lhd1, glink_edge_ctx_release);
+ mutex_init(&edge_ctx->edge_migration_lock_lhd2);
+ INIT_LIST_HEAD(&edge_ctx->list_node);
+ list_add_tail(&edge_ctx->list_node, &edge_list);
+ mutex_unlock(&edge_list_lock_lhd0);
+ return edge_ctx;
+}
+
+/**
* xprt_lcid_to_ch_ctx_get() - lookup a channel by local id
* @xprt_ctx: Transport to search for a matching channel.
* @lcid: Local channel identifier corresponding to the desired channel.
@@ -3441,6 +3535,7 @@ void glink_xprt_ctx_release(struct rwref_lock *xprt_st_lock)
xprt_rm_dbgfs.par_name = "xprt";
glink_debugfs_remove_recur(&xprt_rm_dbgfs);
GLINK_INFO("%s: xprt debugfs removec\n", __func__);
+ rwref_put(&xprt_ctx->edge_ctx->edge_ref_lock_lhd1);
kthread_stop(xprt_ctx->tx_task);
xprt_ctx->tx_task = NULL;
glink_core_deinit_xprt_qos_cfg(xprt_ctx);
@@ -3710,6 +3805,7 @@ int glink_core_register_transport(struct glink_transport_if *if_ptr,
xprt_ptr->versions_entries = cfg->versions_entries;
xprt_ptr->local_version_idx = cfg->versions_entries - 1;
xprt_ptr->remote_version_idx = cfg->versions_entries - 1;
+ xprt_ptr->edge_ctx = edge_name_to_ctx_create(xprt_ptr);
xprt_ptr->l_features =
cfg->versions[cfg->versions_entries - 1].features;
if (!if_ptr->poll)
@@ -4556,11 +4652,13 @@ static void glink_core_rx_cmd_ch_remote_open(struct glink_transport_if *if_ptr,
uint16_t xprt_resp;
bool do_migrate;
+ glink_core_migration_edge_lock(if_ptr->glink_core_priv);
ctx = ch_name_to_ch_ctx_create(if_ptr->glink_core_priv, name);
if (ctx == NULL) {
GLINK_ERR_XPRT(if_ptr->glink_core_priv,
"%s: invalid rcid %u received, name '%s'\n",
__func__, rcid, name);
+ glink_core_migration_edge_unlock(if_ptr->glink_core_priv);
return;
}
@@ -4569,6 +4667,7 @@ static void glink_core_rx_cmd_ch_remote_open(struct glink_transport_if *if_ptr,
GLINK_ERR_CH(ctx,
"%s: Duplicate remote open for rcid %u, name '%s'\n",
__func__, rcid, name);
+ glink_core_migration_edge_unlock(if_ptr->glink_core_priv);
return;
}
@@ -4590,6 +4689,7 @@ static void glink_core_rx_cmd_ch_remote_open(struct glink_transport_if *if_ptr,
if (do_migrate)
ch_migrate(NULL, ctx);
+ glink_core_migration_edge_unlock(if_ptr->glink_core_priv);
}
/**
@@ -4603,13 +4703,14 @@ static void glink_core_rx_cmd_ch_open_ack(struct glink_transport_if *if_ptr,
uint32_t lcid, uint16_t xprt_resp)
{
struct channel_ctx *ctx;
-
+ glink_core_migration_edge_lock(if_ptr->glink_core_priv);
ctx = xprt_lcid_to_ch_ctx_get(if_ptr->glink_core_priv, lcid);
if (!ctx) {
/* unknown LCID received - this shouldn't happen */
GLINK_ERR_XPRT(if_ptr->glink_core_priv,
"%s: invalid lcid %u received\n", __func__,
(unsigned)lcid);
+ glink_core_migration_edge_unlock(if_ptr->glink_core_priv);
return;
}
@@ -4618,6 +4719,7 @@ static void glink_core_rx_cmd_ch_open_ack(struct glink_transport_if *if_ptr,
"%s: unexpected open ack receive for lcid. Current state: %u. Thread: %u\n",
__func__, ctx->local_open_state, current->pid);
rwref_put(&ctx->ch_state_lhb2);
+ glink_core_migration_edge_unlock(if_ptr->glink_core_priv);
return;
}
@@ -4636,6 +4738,7 @@ static void glink_core_rx_cmd_ch_open_ack(struct glink_transport_if *if_ptr,
}
}
rwref_put(&ctx->ch_state_lhb2);
+ glink_core_migration_edge_unlock(if_ptr->glink_core_priv);
}
/**