NVMe: Make iod bio timeout a parameter
[deliverable/linux.git] / drivers / block / nvme-core.c
index 025dd4cad4a608fe09f520479def2847cf7afc16..1a911067061c20f10a95cc72e82fdfef8173832e 100644 (file)
 #define SQ_SIZE(depth)         (depth * sizeof(struct nvme_command))
 #define CQ_SIZE(depth)         (depth * sizeof(struct nvme_completion))
 #define ADMIN_TIMEOUT  (60 * HZ)
-#define IOD_TIMEOUT    (4 * NVME_IO_TIMEOUT)
+#define IOD_TIMEOUT    (retry_time * HZ)
 
 unsigned char io_timeout = 30;
 module_param(io_timeout, byte, 0644);
 MODULE_PARM_DESC(io_timeout, "timeout in seconds for I/O");
 
+static unsigned char retry_time = 30;
+module_param(retry_time, byte, 0644);
+MODULE_PARM_DESC(retry_time, "time in seconds to retry failed I/O");
+
 static int nvme_major;
 module_param(nvme_major, int, 0);
 
@@ -197,16 +201,13 @@ static int alloc_cmdid_killable(struct nvme_queue *nvmeq, void *ctx,
 #define CMD_CTX_CANCELLED      (0x30C + CMD_CTX_BASE)
 #define CMD_CTX_COMPLETED      (0x310 + CMD_CTX_BASE)
 #define CMD_CTX_INVALID                (0x314 + CMD_CTX_BASE)
-#define CMD_CTX_FLUSH          (0x318 + CMD_CTX_BASE)
-#define CMD_CTX_ABORT          (0x31C + CMD_CTX_BASE)
+#define CMD_CTX_ABORT          (0x318 + CMD_CTX_BASE)
 
 static void special_completion(struct nvme_queue *nvmeq, void *ctx,
                                                struct nvme_completion *cqe)
 {
        if (ctx == CMD_CTX_CANCELLED)
                return;
-       if (ctx == CMD_CTX_FLUSH)
-               return;
        if (ctx == CMD_CTX_ABORT) {
                ++nvmeq->dev->abort_limit;
                return;
@@ -629,16 +630,6 @@ static int nvme_submit_flush(struct nvme_queue *nvmeq, struct nvme_ns *ns,
        return 0;
 }
 
-int nvme_submit_flush_data(struct nvme_queue *nvmeq, struct nvme_ns *ns)
-{
-       int cmdid = alloc_cmdid(nvmeq, (void *)CMD_CTX_FLUSH,
-                                       special_completion, NVME_IO_TIMEOUT);
-       if (unlikely(cmdid < 0))
-               return cmdid;
-
-       return nvme_submit_flush(nvmeq, ns, cmdid);
-}
-
 static int nvme_submit_iod(struct nvme_queue *nvmeq, struct nvme_iod *iod)
 {
        struct bio *bio = iod->private;
@@ -654,7 +645,7 @@ static int nvme_submit_iod(struct nvme_queue *nvmeq, struct nvme_iod *iod)
 
        if (bio->bi_rw & REQ_DISCARD)
                return nvme_submit_discard(nvmeq, ns, bio, iod, cmdid);
-       if ((bio->bi_rw & REQ_FLUSH) && !iod->nents)
+       if (bio->bi_rw & REQ_FLUSH)
                return nvme_submit_flush(nvmeq, ns, cmdid);
 
        control = 0;
@@ -688,6 +679,26 @@ static int nvme_submit_iod(struct nvme_queue *nvmeq, struct nvme_iod *iod)
        return 0;
 }
 
+static int nvme_split_flush_data(struct nvme_queue *nvmeq, struct bio *bio)
+{
+       struct bio *split = bio_clone(bio, GFP_ATOMIC);
+       if (!split)
+               return -ENOMEM;
+
+       split->bi_iter.bi_size = 0;
+       split->bi_phys_segments = 0;
+       bio->bi_rw &= ~REQ_FLUSH;
+       bio_chain(split, bio);
+
+       if (!waitqueue_active(&nvmeq->sq_full))
+               add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
+       bio_list_add(&nvmeq->sq_cong, split);
+       bio_list_add(&nvmeq->sq_cong, bio);
+       wake_up_process(nvme_thread);
+
+       return 0;
+}
+
 /*
  * Called with local interrupts disabled and the q_lock held.  May not sleep.
  */
@@ -698,11 +709,8 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
        int psegs = bio_phys_segments(ns->queue, bio);
        int result;
 
-       if ((bio->bi_rw & REQ_FLUSH) && psegs) {
-               result = nvme_submit_flush_data(nvmeq, ns);
-               if (result)
-                       return result;
-       }
+       if ((bio->bi_rw & REQ_FLUSH) && psegs)
+               return nvme_split_flush_data(nvmeq, bio);
 
        iod = nvme_alloc_iod(psegs, bio->bi_iter.bi_size, GFP_ATOMIC);
        if (!iod)
@@ -1484,7 +1492,11 @@ struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write,
                goto put_pages;
        }
 
+       err = -ENOMEM;
        iod = nvme_alloc_iod(count, length, GFP_KERNEL);
+       if (!iod)
+               goto put_pages;
+
        sg = iod->sg;
        sg_init_table(sg, count);
        for (i = 0; i < count; i++) {
@@ -1497,7 +1509,6 @@ struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write,
        sg_mark_end(&sg[i - 1]);
        iod->nents = count;
 
-       err = -ENOMEM;
        nents = dma_map_sg(&dev->pci_dev->dev, sg, count,
                                write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
        if (!nents)
@@ -1897,6 +1908,8 @@ static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid,
        blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift);
        if (dev->max_hw_sectors)
                blk_queue_max_hw_sectors(ns->queue, dev->max_hw_sectors);
+       if (dev->vwc & NVME_CTRL_VWC_PRESENT)
+               blk_queue_flush(ns->queue, REQ_FLUSH | REQ_FUA);
 
        disk->major = nvme_major;
        disk->first_minor = 0;
@@ -2201,6 +2214,7 @@ static int nvme_dev_add(struct nvme_dev *dev)
        nn = le32_to_cpup(&ctrl->nn);
        dev->oncs = le16_to_cpup(&ctrl->oncs);
        dev->abort_limit = ctrl->acl + 1;
+       dev->vwc = ctrl->vwc;
        memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn));
        memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn));
        memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr));
@@ -2905,6 +2919,7 @@ static void __exit nvme_exit(void)
        unregister_blkdev(nvme_major, "nvme");
        destroy_workqueue(nvme_workq);
        BUG_ON(nvme_thread && !IS_ERR(nvme_thread));
+       _nvme_check_size();
 }
 
 MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>");
This page took 0.034887 seconds and 5 git commands to generate.