summaryrefslogtreecommitdiff
path: root/drivers/block/xen-blkfront.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/block/xen-blkfront.c')
-rw-r--r--drivers/block/xen-blkfront.c138
1 files changed, 98 insertions, 40 deletions
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index e1f71debdbba..1e44b7880200 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -64,6 +64,7 @@ enum blkif_state {
BLKIF_STATE_DISCONNECTED,
BLKIF_STATE_CONNECTED,
BLKIF_STATE_SUSPENDED,
+ BLKIF_STATE_ERROR,
};
struct grant {
@@ -79,6 +80,7 @@ struct blk_shadow {
struct grant **indirect_grants;
struct scatterlist *sg;
unsigned int num_sg;
+ bool inflight;
};
struct split_bio {
@@ -456,16 +458,31 @@ static int blkif_ioctl(struct block_device *bdev, fmode_t mode,
return 0;
}
+static unsigned long blkif_ring_get_request(struct blkfront_info *info,
+ struct request *req,
+ struct blkif_request **ring_req)
+{
+ unsigned long id;
+
+ *ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt);
+ info->ring.req_prod_pvt++;
+
+ id = get_id_from_freelist(info);
+ info->shadow[id].request = req;
+ info->shadow[id].req.u.rw.id = id;
+
+ return id;
+}
+
static int blkif_queue_discard_req(struct request *req)
{
struct blkfront_info *info = req->rq_disk->private_data;
- struct blkif_request *ring_req;
+ struct blkif_request *ring_req, *final_ring_req;
unsigned long id;
/* Fill out a communications ring structure. */
- ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt);
- id = get_id_from_freelist(info);
- info->shadow[id].request = req;
+ id = blkif_ring_get_request(info, req, &final_ring_req);
+ ring_req = &info->shadow[id].req;
ring_req->operation = BLKIF_OP_DISCARD;
ring_req->u.discard.nr_sectors = blk_rq_sectors(req);
@@ -478,8 +495,9 @@ static int blkif_queue_discard_req(struct request *req)
info->ring.req_prod_pvt++;
- /* Keep a private copy so we can reissue requests when recovering. */
- info->shadow[id].req = *ring_req;
+ /* Copy the request to the ring page. */
+ *final_ring_req = *ring_req;
+ info->shadow[id].inflight = true;
return 0;
}
@@ -569,7 +587,7 @@ static void blkif_setup_rw_req_grant(unsigned long gfn, unsigned int offset,
static int blkif_queue_rw_req(struct request *req)
{
struct blkfront_info *info = req->rq_disk->private_data;
- struct blkif_request *ring_req;
+ struct blkif_request *ring_req, *final_ring_req;
unsigned long id;
int i;
struct setup_rw_req setup = {
@@ -613,9 +631,8 @@ static int blkif_queue_rw_req(struct request *req)
new_persistent_gnts = 0;
/* Fill out a communications ring structure. */
- ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt);
- id = get_id_from_freelist(info);
- info->shadow[id].request = req;
+ id = blkif_ring_get_request(info, req, &final_ring_req);
+ ring_req = &info->shadow[id].req;
BUG_ON(info->max_indirect_segments == 0 &&
GREFS(req->nr_phys_segments) > BLKIF_MAX_SEGMENTS_PER_REQUEST);
@@ -696,8 +713,9 @@ static int blkif_queue_rw_req(struct request *req)
info->ring.req_prod_pvt++;
- /* Keep a private copy so we can reissue requests when recovering. */
- info->shadow[id].req = *ring_req;
+ /* Copy request(s) to the ring page. */
+ *final_ring_req = *ring_req;
+ info->shadow[id].inflight = true;
if (new_persistent_gnts)
gnttab_free_grant_references(setup.gref_head);
@@ -1296,58 +1314,84 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info,
static irqreturn_t blkif_interrupt(int irq, void *dev_id)
{
struct request *req;
- struct blkif_response *bret;
+ struct blkif_response bret;
RING_IDX i, rp;
unsigned long flags;
struct blkfront_info *info = (struct blkfront_info *)dev_id;
int error;
+ unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;
spin_lock_irqsave(&info->io_lock, flags);
if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) {
spin_unlock_irqrestore(&info->io_lock, flags);
+ xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS);
return IRQ_HANDLED;
}
again:
- rp = info->ring.sring->rsp_prod;
+ rp = READ_ONCE(info->ring.sring->rsp_prod);
rmb(); /* Ensure we see queued responses up to 'rp'. */
+ if (RING_RESPONSE_PROD_OVERFLOW(&info->ring, rp)) {
+ pr_alert("%s: illegal number of responses %u\n",
+ info->gd->disk_name, rp - info->ring.rsp_cons);
+ goto err;
+ }
for (i = info->ring.rsp_cons; i != rp; i++) {
unsigned long id;
+ unsigned int op;
+
+ eoiflag = 0;
+
+ RING_COPY_RESPONSE(&info->ring, i, &bret);
+ id = bret.id;
- bret = RING_GET_RESPONSE(&info->ring, i);
- id = bret->id;
/*
* The backend has messed up and given us an id that we would
* never have given to it (we stamp it up to BLK_RING_SIZE -
* look in get_id_from_freelist.
*/
if (id >= BLK_RING_SIZE(info)) {
- WARN(1, "%s: response to %s has incorrect id (%ld)\n",
- info->gd->disk_name, op_name(bret->operation), id);
- /* We can't safely get the 'struct request' as
- * the id is busted. */
- continue;
+ pr_alert("%s: response has incorrect id (%ld)\n",
+ info->gd->disk_name, id);
+ goto err;
}
+ if (!info->shadow[id].inflight) {
+ pr_alert("%s: response references no pending request\n",
+ info->gd->disk_name);
+ goto err;
+ }
+
+ info->shadow[id].inflight = false;
req = info->shadow[id].request;
- if (bret->operation != BLKIF_OP_DISCARD)
- blkif_completion(&info->shadow[id], info, bret);
+ op = info->shadow[id].req.operation;
+ if (op == BLKIF_OP_INDIRECT)
+ op = info->shadow[id].req.u.indirect.indirect_op;
+ if (bret.operation != op) {
+ pr_alert("%s: response has wrong operation (%u instead of %u)\n",
+ info->gd->disk_name, bret.operation, op);
+ goto err;
+ }
+
+ if (bret.operation != BLKIF_OP_DISCARD)
+ blkif_completion(&info->shadow[id], info, &bret);
if (add_id_to_freelist(info, id)) {
WARN(1, "%s: response to %s (id %ld) couldn't be recycled!\n",
- info->gd->disk_name, op_name(bret->operation), id);
+ info->gd->disk_name, op_name(bret.operation), id);
continue;
}
- error = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO;
- switch (bret->operation) {
+ error = (bret.status == BLKIF_RSP_OKAY) ? 0 : -EIO;
+ switch (bret.operation) {
case BLKIF_OP_DISCARD:
- if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
+ if (unlikely(bret.status == BLKIF_RSP_EOPNOTSUPP)) {
struct request_queue *rq = info->rq;
- printk(KERN_WARNING "blkfront: %s: %s op failed\n",
- info->gd->disk_name, op_name(bret->operation));
+
+ pr_warn_ratelimited("blkfront: %s: %s op failed\n",
+ info->gd->disk_name, op_name(bret.operation));
error = -EOPNOTSUPP;
info->feature_discard = 0;
info->feature_secdiscard = 0;
@@ -1358,15 +1402,15 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
break;
case BLKIF_OP_FLUSH_DISKCACHE:
case BLKIF_OP_WRITE_BARRIER:
- if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
- printk(KERN_WARNING "blkfront: %s: %s op failed\n",
- info->gd->disk_name, op_name(bret->operation));
+ if (unlikely(bret.status == BLKIF_RSP_EOPNOTSUPP)) {
+ pr_warn_ratelimited("blkfront: %s: %s op failed\n",
+ info->gd->disk_name, op_name(bret.operation));
error = -EOPNOTSUPP;
}
- if (unlikely(bret->status == BLKIF_RSP_ERROR &&
+ if (unlikely(bret.status == BLKIF_RSP_ERROR &&
info->shadow[id].req.u.rw.nr_segments == 0)) {
- printk(KERN_WARNING "blkfront: %s: empty %s op failed\n",
- info->gd->disk_name, op_name(bret->operation));
+ pr_warn_ratelimited("blkfront: %s: empty %s op failed\n",
+ info->gd->disk_name, op_name(bret.operation));
error = -EOPNOTSUPP;
}
if (unlikely(error)) {
@@ -1378,9 +1422,10 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
/* fall through */
case BLKIF_OP_READ:
case BLKIF_OP_WRITE:
- if (unlikely(bret->status != BLKIF_RSP_OKAY))
- dev_dbg(&info->xbdev->dev, "Bad return from blkdev data "
- "request: %x\n", bret->status);
+ if (unlikely(bret.status != BLKIF_RSP_OKAY))
+ dev_dbg_ratelimited(&info->xbdev->dev,
+ "Bad return from blkdev data request: %x\n",
+ bret.status);
blk_mq_complete_request(req, error);
break;
@@ -1403,6 +1448,18 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
spin_unlock_irqrestore(&info->io_lock, flags);
+ xen_irq_lateeoi(irq, eoiflag);
+
+ return IRQ_HANDLED;
+
+ err:
+ info->connected = BLKIF_STATE_ERROR;
+
+ spin_unlock_irqrestore(&info->io_lock, flags);
+
+ /* No EOI in order to avoid further interrupts. */
+
+ pr_alert("%s disabled for further use\n", info->gd->disk_name);
return IRQ_HANDLED;
}
@@ -1440,8 +1497,8 @@ static int setup_blkring(struct xenbus_device *dev,
if (err)
goto fail;
- err = bind_evtchn_to_irqhandler(info->evtchn, blkif_interrupt, 0,
- "blkif", info);
+ err = bind_evtchn_to_irqhandler_lateeoi(info->evtchn, blkif_interrupt,
+ 0, "blkif", info);
if (err <= 0) {
xenbus_dev_fatal(dev, err,
"bind_evtchn_to_irqhandler failed");
@@ -1913,6 +1970,7 @@ out_of_memory:
info->shadow[i].sg = NULL;
kfree(info->shadow[i].indirect_grants);
info->shadow[i].indirect_grants = NULL;
+ info->shadow[i].inflight = false;
}
if (!list_empty(&info->indirect_pages)) {
struct page *indirect_page, *n;