From 9026b491f35f901d640fecdbd4abb983f4996762 Mon Sep 17 00:00:00 2001 From: Dhoat Harpal Date: Fri, 9 Sep 2016 16:51:14 +0530 Subject: soc: qcom: glink: Fix list corruption for tx_info Inside glink_scheduler_tx tx_info is not validated after tx operation and taking spin lock, since there are two functions which can release the reference for tx_info while glink_scheduler_tx thread is preempted. These functions are ch_purge_intent_lists and ch_remove_tx_pending_remote_done. Validate tx_info from tx_active list after tx operation and taking spin lock. CRs-Fixed: 1061565 Change-Id: I80c64d66625b9fe9205e8ffaa7cfc851e06fcb94 Signed-off-by: Dhoat Harpal --- drivers/soc/qcom/glink.c | 25 +++++++++++++++++++++++-- 1 file changed, 23 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/soc/qcom/glink.c b/drivers/soc/qcom/glink.c index f00570aa5fe8..5612075ba60c 100644 --- a/drivers/soc/qcom/glink.c +++ b/drivers/soc/qcom/glink.c @@ -5376,7 +5376,7 @@ static int glink_scheduler_tx(struct channel_ctx *ctx, struct glink_core_xprt_ctx *xprt_ctx) { unsigned long flags; - struct glink_core_tx_pkt *tx_info; + struct glink_core_tx_pkt *tx_info, *temp_tx_info; size_t txd_len = 0; size_t tx_len = 0; uint32_t num_pkts = 0; @@ -5411,6 +5411,20 @@ static int glink_scheduler_tx(struct channel_ctx *ctx, ctx->lcid, tx_info); } spin_lock_irqsave(&ctx->tx_lists_lock_lhc3, flags); + if (!list_empty(&ctx->tx_active)) { + /* + * Verify if same tx_info still exist in tx_active + * list and is not removed during tx operation. + * It can happen if SSR and tx done both happen + * before tx_lists_lock_lhc3 is taken. + */ + temp_tx_info = list_first_entry(&ctx->tx_active, + struct glink_core_tx_pkt, list_node); + if (temp_tx_info != tx_info) + continue; + } else { + break; + } if (ret == -EAGAIN) { /* * transport unable to send at the moment and will call @@ -5437,6 +5451,7 @@ static int glink_scheduler_tx(struct channel_ctx *ctx, * Break out of the loop so that the scheduler can * continue with the next channel. */ + rwref_put(&tx_info->pkt_ref); break; } else { txd_len += tx_len; @@ -5445,8 +5460,8 @@ static int glink_scheduler_tx(struct channel_ctx *ctx, if (!tx_info->size_remaining) { num_pkts++; list_del_init(&tx_info->list_node); - rwref_put(&tx_info->pkt_ref); } + rwref_put(&tx_info->pkt_ref); } ctx->txd_len += txd_len; @@ -5495,6 +5510,7 @@ static void tx_func(struct kthread_work *work) glink_pm_qos_vote(xprt_ptr); ch_ptr = list_first_entry(&xprt_ptr->prio_bin[prio].tx_ready, struct channel_ctx, tx_ready_list_node); + rwref_get(&ch_ptr->ch_state_lhb2); spin_unlock_irqrestore(&xprt_ptr->tx_ready_lock_lhb3, flags); if (tx_ready_head == NULL || tx_ready_head_prio < prio) { @@ -5506,6 +5522,7 @@ static void tx_func(struct kthread_work *work) GLINK_ERR_XPRT(xprt_ptr, "%s: Unable to send data on this transport.\n", __func__); + rwref_put(&ch_ptr->ch_state_lhb2); break; } transmitted_successfully = false; @@ -5516,6 +5533,7 @@ static void tx_func(struct kthread_work *work) * transport unable to send at the moment and will call * tx_resume() when it can send again. */ + rwref_put(&ch_ptr->ch_state_lhb2); break; } else if (ret < 0) { /* @@ -5528,6 +5546,7 @@ static void tx_func(struct kthread_work *work) GLINK_ERR_XPRT(xprt_ptr, "%s: unrecoverable xprt failure %d\n", __func__, ret); + rwref_put(&ch_ptr->ch_state_lhb2); break; } else if (!ret) { /* @@ -5539,6 +5558,7 @@ static void tx_func(struct kthread_work *work) list_rotate_left(&xprt_ptr->prio_bin[prio].tx_ready); spin_unlock_irqrestore(&xprt_ptr->tx_ready_lock_lhb3, flags); + rwref_put(&ch_ptr->ch_state_lhb2); continue; } @@ -5556,6 +5576,7 @@ static void tx_func(struct kthread_work *work) tx_ready_head = NULL; transmitted_successfully = true; + rwref_put(&ch_ptr->ch_state_lhb2); } glink_pm_qos_unvote(xprt_ptr); GLINK_PERF("%s: worker exiting\n", __func__); -- cgit v1.2.3