summaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
authorGilad Broner <gbroner@codeaurora.org>2015-03-31 16:17:25 +0300
committerDavid Keitel <dkeitel@codeaurora.org>2016-03-22 11:01:59 -0700
commita27d5a2f83bba21a78f45abe63ada896e7b4fc31 (patch)
tree433943f06979992521980b70287c55037b7aa28a /block
parentb5bec249a9b43ebbd5a9df1bd31bc3f94c2e77b3 (diff)
block: test-iosched: fix spinlock recursion
spin_lock_irq() / spin_unlock_irq() is used so interrupts are enabled after unlocking the spinlock. However, it is not guaranteed they were enabled before. This change uses the proper irqsave / irqrestore variants instead. Without it, a spinlock recursion on the scsi request completion path is possible if completion interrupt occurs when used for UFS testing. Change-Id: I25a9bf6faaa2bbfedc807111fbcb32276cccea2f Signed-off-by: Gilad Broner <gbroner@codeaurora.org>
Diffstat (limited to 'block')
-rw-r--r--block/test-iosched.c21
1 files changed, 13 insertions, 8 deletions
diff --git a/block/test-iosched.c b/block/test-iosched.c
index cf50436d2d17..2a0be42dd930 100644
--- a/block/test-iosched.c
+++ b/block/test-iosched.c
@@ -167,6 +167,7 @@ int test_iosched_add_unique_test_req(struct test_iosched *tios,
struct request *rq;
int rw_flags;
struct test_request *test_rq;
+ unsigned long flags;
if (!tios)
return -ENODEV;
@@ -230,10 +231,10 @@ int test_iosched_add_unique_test_req(struct test_iosched *tios,
"%s: added request %d to the test requests list, type = %d",
__func__, test_rq->req_id, req_unique);
- spin_lock_irq(tios->req_q->queue_lock);
+ spin_lock_irqsave(tios->req_q->queue_lock, flags);
list_add_tail(&test_rq->queuelist, &tios->test_queue);
tios->test_count++;
- spin_unlock_irq(tios->req_q->queue_lock);
+ spin_unlock_irqrestore(tios->req_q->queue_lock, flags);
return 0;
}
@@ -408,14 +409,15 @@ int test_iosched_add_wr_rd_test_req(struct test_iosched *tios,
int pattern, rq_end_io_fn *end_req_io)
{
struct test_request *test_rq = NULL;
+ unsigned long flags;
test_rq = test_iosched_create_test_req(tios, is_err_expcted, direction,
start_sec, num_bios, pattern, end_req_io);
if (test_rq) {
- spin_lock_irq(tios->req_q->queue_lock);
+ spin_lock_irqsave(tios->req_q->queue_lock, flags);
list_add_tail(&test_rq->queuelist, &tios->test_queue);
tios->test_count++;
- spin_unlock_irq(tios->req_q->queue_lock);
+ spin_unlock_irqrestore(tios->req_q->queue_lock, flags);
return 0;
}
return -ENODEV;
@@ -1106,6 +1108,7 @@ static int test_init_queue(struct request_queue *q, struct elevator_type *e)
const char *blk_dev_name;
int ret;
bool found = false;
+ unsigned long flags;
eq = elevator_alloc(q, e);
if (!eq)
@@ -1168,9 +1171,9 @@ static int test_init_queue(struct request_queue *q, struct elevator_type *e)
}
}
- spin_lock_irq(q->queue_lock);
+ spin_lock_irqsave(q->queue_lock, flags);
q->elevator = eq;
- spin_unlock_irq(q->queue_lock);
+ spin_unlock_irqrestore(q->queue_lock, flags);
return 0;
@@ -1209,14 +1212,16 @@ static void test_exit_queue(struct elevator_queue *e)
void test_iosched_add_urgent_req(struct test_iosched *tios,
struct test_request *test_rq)
{
+ unsigned long flags;
+
if (!tios)
return;
- spin_lock_irq(&tios->lock);
+ spin_lock_irqsave(&tios->lock, flags);
test_rq->rq->cmd_flags |= REQ_URGENT;
list_add_tail(&test_rq->queuelist, &tios->urgent_queue);
tios->urgent_count++;
- spin_unlock_irq(&tios->lock);
+ spin_unlock_irqrestore(&tios->lock, flags);
}
EXPORT_SYMBOL(test_iosched_add_urgent_req);