Merge tag 'pci-v3.15-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/helgaa...
[deliverable/linux.git] / drivers / block / nvme-core.c
1 /*
2 * NVM Express device driver
3 * Copyright (c) 2011, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 */
18
19 #include <linux/nvme.h>
20 #include <linux/bio.h>
21 #include <linux/bitops.h>
22 #include <linux/blkdev.h>
23 #include <linux/delay.h>
24 #include <linux/errno.h>
25 #include <linux/fs.h>
26 #include <linux/genhd.h>
27 #include <linux/idr.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/io.h>
31 #include <linux/kdev_t.h>
32 #include <linux/kthread.h>
33 #include <linux/kernel.h>
34 #include <linux/mm.h>
35 #include <linux/module.h>
36 #include <linux/moduleparam.h>
37 #include <linux/pci.h>
38 #include <linux/poison.h>
39 #include <linux/ptrace.h>
40 #include <linux/sched.h>
41 #include <linux/slab.h>
42 #include <linux/types.h>
43 #include <scsi/sg.h>
44 #include <asm-generic/io-64-nonatomic-lo-hi.h>
45
46 #define NVME_Q_DEPTH 1024
47 #define SQ_SIZE(depth) (depth * sizeof(struct nvme_command))
48 #define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion))
49 #define ADMIN_TIMEOUT (60 * HZ)
50
51 static int nvme_major;
52 module_param(nvme_major, int, 0);
53
54 static int use_threaded_interrupts;
55 module_param(use_threaded_interrupts, int, 0);
56
57 static DEFINE_SPINLOCK(dev_list_lock);
58 static LIST_HEAD(dev_list);
59 static struct task_struct *nvme_thread;
60 static struct workqueue_struct *nvme_workq;
61
62 static void nvme_reset_failed_dev(struct work_struct *ws);
63
64 struct async_cmd_info {
65 struct kthread_work work;
66 struct kthread_worker *worker;
67 u32 result;
68 int status;
69 void *ctx;
70 };
71
72 /*
73 * An NVM Express queue. Each device has at least two (one for admin
74 * commands and one for I/O commands).
75 */
76 struct nvme_queue {
77 struct device *q_dmadev;
78 struct nvme_dev *dev;
79 char irqname[24]; /* nvme4294967295-65535\0 */
80 spinlock_t q_lock;
81 struct nvme_command *sq_cmds;
82 volatile struct nvme_completion *cqes;
83 dma_addr_t sq_dma_addr;
84 dma_addr_t cq_dma_addr;
85 wait_queue_head_t sq_full;
86 wait_queue_t sq_cong_wait;
87 struct bio_list sq_cong;
88 u32 __iomem *q_db;
89 u16 q_depth;
90 u16 cq_vector;
91 u16 sq_head;
92 u16 sq_tail;
93 u16 cq_head;
94 u16 qid;
95 u8 cq_phase;
96 u8 cqe_seen;
97 u8 q_suspended;
98 struct async_cmd_info cmdinfo;
99 unsigned long cmdid_data[];
100 };
101
102 /*
103 * Check we didin't inadvertently grow the command struct
104 */
105 static inline void _nvme_check_size(void)
106 {
107 BUILD_BUG_ON(sizeof(struct nvme_rw_command) != 64);
108 BUILD_BUG_ON(sizeof(struct nvme_create_cq) != 64);
109 BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64);
110 BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64);
111 BUILD_BUG_ON(sizeof(struct nvme_features) != 64);
112 BUILD_BUG_ON(sizeof(struct nvme_format_cmd) != 64);
113 BUILD_BUG_ON(sizeof(struct nvme_abort_cmd) != 64);
114 BUILD_BUG_ON(sizeof(struct nvme_command) != 64);
115 BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != 4096);
116 BUILD_BUG_ON(sizeof(struct nvme_id_ns) != 4096);
117 BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64);
118 BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512);
119 }
120
121 typedef void (*nvme_completion_fn)(struct nvme_dev *, void *,
122 struct nvme_completion *);
123
124 struct nvme_cmd_info {
125 nvme_completion_fn fn;
126 void *ctx;
127 unsigned long timeout;
128 int aborted;
129 };
130
131 static struct nvme_cmd_info *nvme_cmd_info(struct nvme_queue *nvmeq)
132 {
133 return (void *)&nvmeq->cmdid_data[BITS_TO_LONGS(nvmeq->q_depth)];
134 }
135
136 static unsigned nvme_queue_extra(int depth)
137 {
138 return DIV_ROUND_UP(depth, 8) + (depth * sizeof(struct nvme_cmd_info));
139 }
140
141 /**
142 * alloc_cmdid() - Allocate a Command ID
143 * @nvmeq: The queue that will be used for this command
144 * @ctx: A pointer that will be passed to the handler
145 * @handler: The function to call on completion
146 *
147 * Allocate a Command ID for a queue. The data passed in will
148 * be passed to the completion handler. This is implemented by using
149 * the bottom two bits of the ctx pointer to store the handler ID.
150 * Passing in a pointer that's not 4-byte aligned will cause a BUG.
151 * We can change this if it becomes a problem.
152 *
153 * May be called with local interrupts disabled and the q_lock held,
154 * or with interrupts enabled and no locks held.
155 */
156 static int alloc_cmdid(struct nvme_queue *nvmeq, void *ctx,
157 nvme_completion_fn handler, unsigned timeout)
158 {
159 int depth = nvmeq->q_depth - 1;
160 struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
161 int cmdid;
162
163 do {
164 cmdid = find_first_zero_bit(nvmeq->cmdid_data, depth);
165 if (cmdid >= depth)
166 return -EBUSY;
167 } while (test_and_set_bit(cmdid, nvmeq->cmdid_data));
168
169 info[cmdid].fn = handler;
170 info[cmdid].ctx = ctx;
171 info[cmdid].timeout = jiffies + timeout;
172 info[cmdid].aborted = 0;
173 return cmdid;
174 }
175
176 static int alloc_cmdid_killable(struct nvme_queue *nvmeq, void *ctx,
177 nvme_completion_fn handler, unsigned timeout)
178 {
179 int cmdid;
180 wait_event_killable(nvmeq->sq_full,
181 (cmdid = alloc_cmdid(nvmeq, ctx, handler, timeout)) >= 0);
182 return (cmdid < 0) ? -EINTR : cmdid;
183 }
184
185 /* Special values must be less than 0x1000 */
186 #define CMD_CTX_BASE ((void *)POISON_POINTER_DELTA)
187 #define CMD_CTX_CANCELLED (0x30C + CMD_CTX_BASE)
188 #define CMD_CTX_COMPLETED (0x310 + CMD_CTX_BASE)
189 #define CMD_CTX_INVALID (0x314 + CMD_CTX_BASE)
190 #define CMD_CTX_FLUSH (0x318 + CMD_CTX_BASE)
191 #define CMD_CTX_ABORT (0x31C + CMD_CTX_BASE)
192
193 static void special_completion(struct nvme_dev *dev, void *ctx,
194 struct nvme_completion *cqe)
195 {
196 if (ctx == CMD_CTX_CANCELLED)
197 return;
198 if (ctx == CMD_CTX_FLUSH)
199 return;
200 if (ctx == CMD_CTX_ABORT) {
201 ++dev->abort_limit;
202 return;
203 }
204 if (ctx == CMD_CTX_COMPLETED) {
205 dev_warn(&dev->pci_dev->dev,
206 "completed id %d twice on queue %d\n",
207 cqe->command_id, le16_to_cpup(&cqe->sq_id));
208 return;
209 }
210 if (ctx == CMD_CTX_INVALID) {
211 dev_warn(&dev->pci_dev->dev,
212 "invalid id %d completed on queue %d\n",
213 cqe->command_id, le16_to_cpup(&cqe->sq_id));
214 return;
215 }
216
217 dev_warn(&dev->pci_dev->dev, "Unknown special completion %p\n", ctx);
218 }
219
220 static void async_completion(struct nvme_dev *dev, void *ctx,
221 struct nvme_completion *cqe)
222 {
223 struct async_cmd_info *cmdinfo = ctx;
224 cmdinfo->result = le32_to_cpup(&cqe->result);
225 cmdinfo->status = le16_to_cpup(&cqe->status) >> 1;
226 queue_kthread_work(cmdinfo->worker, &cmdinfo->work);
227 }
228
229 /*
230 * Called with local interrupts disabled and the q_lock held. May not sleep.
231 */
232 static void *free_cmdid(struct nvme_queue *nvmeq, int cmdid,
233 nvme_completion_fn *fn)
234 {
235 void *ctx;
236 struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
237
238 if (cmdid >= nvmeq->q_depth) {
239 *fn = special_completion;
240 return CMD_CTX_INVALID;
241 }
242 if (fn)
243 *fn = info[cmdid].fn;
244 ctx = info[cmdid].ctx;
245 info[cmdid].fn = special_completion;
246 info[cmdid].ctx = CMD_CTX_COMPLETED;
247 clear_bit(cmdid, nvmeq->cmdid_data);
248 wake_up(&nvmeq->sq_full);
249 return ctx;
250 }
251
252 static void *cancel_cmdid(struct nvme_queue *nvmeq, int cmdid,
253 nvme_completion_fn *fn)
254 {
255 void *ctx;
256 struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
257 if (fn)
258 *fn = info[cmdid].fn;
259 ctx = info[cmdid].ctx;
260 info[cmdid].fn = special_completion;
261 info[cmdid].ctx = CMD_CTX_CANCELLED;
262 return ctx;
263 }
264
265 struct nvme_queue *get_nvmeq(struct nvme_dev *dev)
266 {
267 return dev->queues[get_cpu() + 1];
268 }
269
270 void put_nvmeq(struct nvme_queue *nvmeq)
271 {
272 put_cpu();
273 }
274
275 /**
276 * nvme_submit_cmd() - Copy a command into a queue and ring the doorbell
277 * @nvmeq: The queue to use
278 * @cmd: The command to send
279 *
280 * Safe to use from interrupt context
281 */
282 static int nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd)
283 {
284 unsigned long flags;
285 u16 tail;
286 spin_lock_irqsave(&nvmeq->q_lock, flags);
287 tail = nvmeq->sq_tail;
288 memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd));
289 if (++tail == nvmeq->q_depth)
290 tail = 0;
291 writel(tail, nvmeq->q_db);
292 nvmeq->sq_tail = tail;
293 spin_unlock_irqrestore(&nvmeq->q_lock, flags);
294
295 return 0;
296 }
297
298 static __le64 **iod_list(struct nvme_iod *iod)
299 {
300 return ((void *)iod) + iod->offset;
301 }
302
303 /*
304 * Will slightly overestimate the number of pages needed. This is OK
305 * as it only leads to a small amount of wasted memory for the lifetime of
306 * the I/O.
307 */
308 static int nvme_npages(unsigned size)
309 {
310 unsigned nprps = DIV_ROUND_UP(size + PAGE_SIZE, PAGE_SIZE);
311 return DIV_ROUND_UP(8 * nprps, PAGE_SIZE - 8);
312 }
313
314 static struct nvme_iod *
315 nvme_alloc_iod(unsigned nseg, unsigned nbytes, gfp_t gfp)
316 {
317 struct nvme_iod *iod = kmalloc(sizeof(struct nvme_iod) +
318 sizeof(__le64 *) * nvme_npages(nbytes) +
319 sizeof(struct scatterlist) * nseg, gfp);
320
321 if (iod) {
322 iod->offset = offsetof(struct nvme_iod, sg[nseg]);
323 iod->npages = -1;
324 iod->length = nbytes;
325 iod->nents = 0;
326 iod->start_time = jiffies;
327 }
328
329 return iod;
330 }
331
332 void nvme_free_iod(struct nvme_dev *dev, struct nvme_iod *iod)
333 {
334 const int last_prp = PAGE_SIZE / 8 - 1;
335 int i;
336 __le64 **list = iod_list(iod);
337 dma_addr_t prp_dma = iod->first_dma;
338
339 if (iod->npages == 0)
340 dma_pool_free(dev->prp_small_pool, list[0], prp_dma);
341 for (i = 0; i < iod->npages; i++) {
342 __le64 *prp_list = list[i];
343 dma_addr_t next_prp_dma = le64_to_cpu(prp_list[last_prp]);
344 dma_pool_free(dev->prp_page_pool, prp_list, prp_dma);
345 prp_dma = next_prp_dma;
346 }
347 kfree(iod);
348 }
349
350 static void nvme_start_io_acct(struct bio *bio)
351 {
352 struct gendisk *disk = bio->bi_bdev->bd_disk;
353 const int rw = bio_data_dir(bio);
354 int cpu = part_stat_lock();
355 part_round_stats(cpu, &disk->part0);
356 part_stat_inc(cpu, &disk->part0, ios[rw]);
357 part_stat_add(cpu, &disk->part0, sectors[rw], bio_sectors(bio));
358 part_inc_in_flight(&disk->part0, rw);
359 part_stat_unlock();
360 }
361
362 static void nvme_end_io_acct(struct bio *bio, unsigned long start_time)
363 {
364 struct gendisk *disk = bio->bi_bdev->bd_disk;
365 const int rw = bio_data_dir(bio);
366 unsigned long duration = jiffies - start_time;
367 int cpu = part_stat_lock();
368 part_stat_add(cpu, &disk->part0, ticks[rw], duration);
369 part_round_stats(cpu, &disk->part0);
370 part_dec_in_flight(&disk->part0, rw);
371 part_stat_unlock();
372 }
373
374 static void bio_completion(struct nvme_dev *dev, void *ctx,
375 struct nvme_completion *cqe)
376 {
377 struct nvme_iod *iod = ctx;
378 struct bio *bio = iod->private;
379 u16 status = le16_to_cpup(&cqe->status) >> 1;
380
381 if (iod->nents) {
382 dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents,
383 bio_data_dir(bio) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
384 nvme_end_io_acct(bio, iod->start_time);
385 }
386 nvme_free_iod(dev, iod);
387 if (status)
388 bio_endio(bio, -EIO);
389 else
390 bio_endio(bio, 0);
391 }
392
393 /* length is in bytes. gfp flags indicates whether we may sleep. */
394 int nvme_setup_prps(struct nvme_dev *dev, struct nvme_common_command *cmd,
395 struct nvme_iod *iod, int total_len, gfp_t gfp)
396 {
397 struct dma_pool *pool;
398 int length = total_len;
399 struct scatterlist *sg = iod->sg;
400 int dma_len = sg_dma_len(sg);
401 u64 dma_addr = sg_dma_address(sg);
402 int offset = offset_in_page(dma_addr);
403 __le64 *prp_list;
404 __le64 **list = iod_list(iod);
405 dma_addr_t prp_dma;
406 int nprps, i;
407
408 cmd->prp1 = cpu_to_le64(dma_addr);
409 length -= (PAGE_SIZE - offset);
410 if (length <= 0)
411 return total_len;
412
413 dma_len -= (PAGE_SIZE - offset);
414 if (dma_len) {
415 dma_addr += (PAGE_SIZE - offset);
416 } else {
417 sg = sg_next(sg);
418 dma_addr = sg_dma_address(sg);
419 dma_len = sg_dma_len(sg);
420 }
421
422 if (length <= PAGE_SIZE) {
423 cmd->prp2 = cpu_to_le64(dma_addr);
424 return total_len;
425 }
426
427 nprps = DIV_ROUND_UP(length, PAGE_SIZE);
428 if (nprps <= (256 / 8)) {
429 pool = dev->prp_small_pool;
430 iod->npages = 0;
431 } else {
432 pool = dev->prp_page_pool;
433 iod->npages = 1;
434 }
435
436 prp_list = dma_pool_alloc(pool, gfp, &prp_dma);
437 if (!prp_list) {
438 cmd->prp2 = cpu_to_le64(dma_addr);
439 iod->npages = -1;
440 return (total_len - length) + PAGE_SIZE;
441 }
442 list[0] = prp_list;
443 iod->first_dma = prp_dma;
444 cmd->prp2 = cpu_to_le64(prp_dma);
445 i = 0;
446 for (;;) {
447 if (i == PAGE_SIZE / 8) {
448 __le64 *old_prp_list = prp_list;
449 prp_list = dma_pool_alloc(pool, gfp, &prp_dma);
450 if (!prp_list)
451 return total_len - length;
452 list[iod->npages++] = prp_list;
453 prp_list[0] = old_prp_list[i - 1];
454 old_prp_list[i - 1] = cpu_to_le64(prp_dma);
455 i = 1;
456 }
457 prp_list[i++] = cpu_to_le64(dma_addr);
458 dma_len -= PAGE_SIZE;
459 dma_addr += PAGE_SIZE;
460 length -= PAGE_SIZE;
461 if (length <= 0)
462 break;
463 if (dma_len > 0)
464 continue;
465 BUG_ON(dma_len < 0);
466 sg = sg_next(sg);
467 dma_addr = sg_dma_address(sg);
468 dma_len = sg_dma_len(sg);
469 }
470
471 return total_len;
472 }
473
474 static int nvme_split_and_submit(struct bio *bio, struct nvme_queue *nvmeq,
475 int len)
476 {
477 struct bio *split = bio_split(bio, len >> 9, GFP_ATOMIC, NULL);
478 if (!split)
479 return -ENOMEM;
480
481 bio_chain(split, bio);
482
483 if (bio_list_empty(&nvmeq->sq_cong))
484 add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
485 bio_list_add(&nvmeq->sq_cong, split);
486 bio_list_add(&nvmeq->sq_cong, bio);
487
488 return 0;
489 }
490
491 /* NVMe scatterlists require no holes in the virtual address */
492 #define BIOVEC_NOT_VIRT_MERGEABLE(vec1, vec2) ((vec2)->bv_offset || \
493 (((vec1)->bv_offset + (vec1)->bv_len) % PAGE_SIZE))
494
495 static int nvme_map_bio(struct nvme_queue *nvmeq, struct nvme_iod *iod,
496 struct bio *bio, enum dma_data_direction dma_dir, int psegs)
497 {
498 struct bio_vec bvec, bvprv;
499 struct bvec_iter iter;
500 struct scatterlist *sg = NULL;
501 int length = 0, nsegs = 0, split_len = bio->bi_iter.bi_size;
502 int first = 1;
503
504 if (nvmeq->dev->stripe_size)
505 split_len = nvmeq->dev->stripe_size -
506 ((bio->bi_iter.bi_sector << 9) &
507 (nvmeq->dev->stripe_size - 1));
508
509 sg_init_table(iod->sg, psegs);
510 bio_for_each_segment(bvec, bio, iter) {
511 if (!first && BIOVEC_PHYS_MERGEABLE(&bvprv, &bvec)) {
512 sg->length += bvec.bv_len;
513 } else {
514 if (!first && BIOVEC_NOT_VIRT_MERGEABLE(&bvprv, &bvec))
515 return nvme_split_and_submit(bio, nvmeq,
516 length);
517
518 sg = sg ? sg + 1 : iod->sg;
519 sg_set_page(sg, bvec.bv_page,
520 bvec.bv_len, bvec.bv_offset);
521 nsegs++;
522 }
523
524 if (split_len - length < bvec.bv_len)
525 return nvme_split_and_submit(bio, nvmeq, split_len);
526 length += bvec.bv_len;
527 bvprv = bvec;
528 first = 0;
529 }
530 iod->nents = nsegs;
531 sg_mark_end(sg);
532 if (dma_map_sg(nvmeq->q_dmadev, iod->sg, iod->nents, dma_dir) == 0)
533 return -ENOMEM;
534
535 BUG_ON(length != bio->bi_iter.bi_size);
536 return length;
537 }
538
539 /*
540 * We reuse the small pool to allocate the 16-byte range here as it is not
541 * worth having a special pool for these or additional cases to handle freeing
542 * the iod.
543 */
544 static int nvme_submit_discard(struct nvme_queue *nvmeq, struct nvme_ns *ns,
545 struct bio *bio, struct nvme_iod *iod, int cmdid)
546 {
547 struct nvme_dsm_range *range;
548 struct nvme_command *cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
549
550 range = dma_pool_alloc(nvmeq->dev->prp_small_pool, GFP_ATOMIC,
551 &iod->first_dma);
552 if (!range)
553 return -ENOMEM;
554
555 iod_list(iod)[0] = (__le64 *)range;
556 iod->npages = 0;
557
558 range->cattr = cpu_to_le32(0);
559 range->nlb = cpu_to_le32(bio->bi_iter.bi_size >> ns->lba_shift);
560 range->slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_iter.bi_sector));
561
562 memset(cmnd, 0, sizeof(*cmnd));
563 cmnd->dsm.opcode = nvme_cmd_dsm;
564 cmnd->dsm.command_id = cmdid;
565 cmnd->dsm.nsid = cpu_to_le32(ns->ns_id);
566 cmnd->dsm.prp1 = cpu_to_le64(iod->first_dma);
567 cmnd->dsm.nr = 0;
568 cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);
569
570 if (++nvmeq->sq_tail == nvmeq->q_depth)
571 nvmeq->sq_tail = 0;
572 writel(nvmeq->sq_tail, nvmeq->q_db);
573
574 return 0;
575 }
576
577 static int nvme_submit_flush(struct nvme_queue *nvmeq, struct nvme_ns *ns,
578 int cmdid)
579 {
580 struct nvme_command *cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
581
582 memset(cmnd, 0, sizeof(*cmnd));
583 cmnd->common.opcode = nvme_cmd_flush;
584 cmnd->common.command_id = cmdid;
585 cmnd->common.nsid = cpu_to_le32(ns->ns_id);
586
587 if (++nvmeq->sq_tail == nvmeq->q_depth)
588 nvmeq->sq_tail = 0;
589 writel(nvmeq->sq_tail, nvmeq->q_db);
590
591 return 0;
592 }
593
594 int nvme_submit_flush_data(struct nvme_queue *nvmeq, struct nvme_ns *ns)
595 {
596 int cmdid = alloc_cmdid(nvmeq, (void *)CMD_CTX_FLUSH,
597 special_completion, NVME_IO_TIMEOUT);
598 if (unlikely(cmdid < 0))
599 return cmdid;
600
601 return nvme_submit_flush(nvmeq, ns, cmdid);
602 }
603
604 /*
605 * Called with local interrupts disabled and the q_lock held. May not sleep.
606 */
607 static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
608 struct bio *bio)
609 {
610 struct nvme_command *cmnd;
611 struct nvme_iod *iod;
612 enum dma_data_direction dma_dir;
613 int cmdid, length, result;
614 u16 control;
615 u32 dsmgmt;
616 int psegs = bio_phys_segments(ns->queue, bio);
617
618 if ((bio->bi_rw & REQ_FLUSH) && psegs) {
619 result = nvme_submit_flush_data(nvmeq, ns);
620 if (result)
621 return result;
622 }
623
624 result = -ENOMEM;
625 iod = nvme_alloc_iod(psegs, bio->bi_iter.bi_size, GFP_ATOMIC);
626 if (!iod)
627 goto nomem;
628 iod->private = bio;
629
630 result = -EBUSY;
631 cmdid = alloc_cmdid(nvmeq, iod, bio_completion, NVME_IO_TIMEOUT);
632 if (unlikely(cmdid < 0))
633 goto free_iod;
634
635 if (bio->bi_rw & REQ_DISCARD) {
636 result = nvme_submit_discard(nvmeq, ns, bio, iod, cmdid);
637 if (result)
638 goto free_cmdid;
639 return result;
640 }
641 if ((bio->bi_rw & REQ_FLUSH) && !psegs)
642 return nvme_submit_flush(nvmeq, ns, cmdid);
643
644 control = 0;
645 if (bio->bi_rw & REQ_FUA)
646 control |= NVME_RW_FUA;
647 if (bio->bi_rw & (REQ_FAILFAST_DEV | REQ_RAHEAD))
648 control |= NVME_RW_LR;
649
650 dsmgmt = 0;
651 if (bio->bi_rw & REQ_RAHEAD)
652 dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;
653
654 cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
655
656 memset(cmnd, 0, sizeof(*cmnd));
657 if (bio_data_dir(bio)) {
658 cmnd->rw.opcode = nvme_cmd_write;
659 dma_dir = DMA_TO_DEVICE;
660 } else {
661 cmnd->rw.opcode = nvme_cmd_read;
662 dma_dir = DMA_FROM_DEVICE;
663 }
664
665 result = nvme_map_bio(nvmeq, iod, bio, dma_dir, psegs);
666 if (result <= 0)
667 goto free_cmdid;
668 length = result;
669
670 cmnd->rw.command_id = cmdid;
671 cmnd->rw.nsid = cpu_to_le32(ns->ns_id);
672 length = nvme_setup_prps(nvmeq->dev, &cmnd->common, iod, length,
673 GFP_ATOMIC);
674 cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_iter.bi_sector));
675 cmnd->rw.length = cpu_to_le16((length >> ns->lba_shift) - 1);
676 cmnd->rw.control = cpu_to_le16(control);
677 cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
678
679 nvme_start_io_acct(bio);
680 if (++nvmeq->sq_tail == nvmeq->q_depth)
681 nvmeq->sq_tail = 0;
682 writel(nvmeq->sq_tail, nvmeq->q_db);
683
684 return 0;
685
686 free_cmdid:
687 free_cmdid(nvmeq, cmdid, NULL);
688 free_iod:
689 nvme_free_iod(nvmeq->dev, iod);
690 nomem:
691 return result;
692 }
693
694 static int nvme_process_cq(struct nvme_queue *nvmeq)
695 {
696 u16 head, phase;
697
698 head = nvmeq->cq_head;
699 phase = nvmeq->cq_phase;
700
701 for (;;) {
702 void *ctx;
703 nvme_completion_fn fn;
704 struct nvme_completion cqe = nvmeq->cqes[head];
705 if ((le16_to_cpu(cqe.status) & 1) != phase)
706 break;
707 nvmeq->sq_head = le16_to_cpu(cqe.sq_head);
708 if (++head == nvmeq->q_depth) {
709 head = 0;
710 phase = !phase;
711 }
712
713 ctx = free_cmdid(nvmeq, cqe.command_id, &fn);
714 fn(nvmeq->dev, ctx, &cqe);
715 }
716
717 /* If the controller ignores the cq head doorbell and continuously
718 * writes to the queue, it is theoretically possible to wrap around
719 * the queue twice and mistakenly return IRQ_NONE. Linux only
720 * requires that 0.1% of your interrupts are handled, so this isn't
721 * a big problem.
722 */
723 if (head == nvmeq->cq_head && phase == nvmeq->cq_phase)
724 return 0;
725
726 writel(head, nvmeq->q_db + nvmeq->dev->db_stride);
727 nvmeq->cq_head = head;
728 nvmeq->cq_phase = phase;
729
730 nvmeq->cqe_seen = 1;
731 return 1;
732 }
733
734 static void nvme_make_request(struct request_queue *q, struct bio *bio)
735 {
736 struct nvme_ns *ns = q->queuedata;
737 struct nvme_queue *nvmeq = get_nvmeq(ns->dev);
738 int result = -EBUSY;
739
740 if (!nvmeq) {
741 put_nvmeq(NULL);
742 bio_endio(bio, -EIO);
743 return;
744 }
745
746 spin_lock_irq(&nvmeq->q_lock);
747 if (!nvmeq->q_suspended && bio_list_empty(&nvmeq->sq_cong))
748 result = nvme_submit_bio_queue(nvmeq, ns, bio);
749 if (unlikely(result)) {
750 if (bio_list_empty(&nvmeq->sq_cong))
751 add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
752 bio_list_add(&nvmeq->sq_cong, bio);
753 }
754
755 nvme_process_cq(nvmeq);
756 spin_unlock_irq(&nvmeq->q_lock);
757 put_nvmeq(nvmeq);
758 }
759
760 static irqreturn_t nvme_irq(int irq, void *data)
761 {
762 irqreturn_t result;
763 struct nvme_queue *nvmeq = data;
764 spin_lock(&nvmeq->q_lock);
765 nvme_process_cq(nvmeq);
766 result = nvmeq->cqe_seen ? IRQ_HANDLED : IRQ_NONE;
767 nvmeq->cqe_seen = 0;
768 spin_unlock(&nvmeq->q_lock);
769 return result;
770 }
771
772 static irqreturn_t nvme_irq_check(int irq, void *data)
773 {
774 struct nvme_queue *nvmeq = data;
775 struct nvme_completion cqe = nvmeq->cqes[nvmeq->cq_head];
776 if ((le16_to_cpu(cqe.status) & 1) != nvmeq->cq_phase)
777 return IRQ_NONE;
778 return IRQ_WAKE_THREAD;
779 }
780
781 static void nvme_abort_command(struct nvme_queue *nvmeq, int cmdid)
782 {
783 spin_lock_irq(&nvmeq->q_lock);
784 cancel_cmdid(nvmeq, cmdid, NULL);
785 spin_unlock_irq(&nvmeq->q_lock);
786 }
787
788 struct sync_cmd_info {
789 struct task_struct *task;
790 u32 result;
791 int status;
792 };
793
794 static void sync_completion(struct nvme_dev *dev, void *ctx,
795 struct nvme_completion *cqe)
796 {
797 struct sync_cmd_info *cmdinfo = ctx;
798 cmdinfo->result = le32_to_cpup(&cqe->result);
799 cmdinfo->status = le16_to_cpup(&cqe->status) >> 1;
800 wake_up_process(cmdinfo->task);
801 }
802
803 /*
804 * Returns 0 on success. If the result is negative, it's a Linux error code;
805 * if the result is positive, it's an NVM Express status code
806 */
807 int nvme_submit_sync_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd,
808 u32 *result, unsigned timeout)
809 {
810 int cmdid;
811 struct sync_cmd_info cmdinfo;
812
813 cmdinfo.task = current;
814 cmdinfo.status = -EINTR;
815
816 cmdid = alloc_cmdid_killable(nvmeq, &cmdinfo, sync_completion,
817 timeout);
818 if (cmdid < 0)
819 return cmdid;
820 cmd->common.command_id = cmdid;
821
822 set_current_state(TASK_KILLABLE);
823 nvme_submit_cmd(nvmeq, cmd);
824 schedule_timeout(timeout);
825
826 if (cmdinfo.status == -EINTR) {
827 nvme_abort_command(nvmeq, cmdid);
828 return -EINTR;
829 }
830
831 if (result)
832 *result = cmdinfo.result;
833
834 return cmdinfo.status;
835 }
836
837 static int nvme_submit_async_cmd(struct nvme_queue *nvmeq,
838 struct nvme_command *cmd,
839 struct async_cmd_info *cmdinfo, unsigned timeout)
840 {
841 int cmdid;
842
843 cmdid = alloc_cmdid_killable(nvmeq, cmdinfo, async_completion, timeout);
844 if (cmdid < 0)
845 return cmdid;
846 cmdinfo->status = -EINTR;
847 cmd->common.command_id = cmdid;
848 nvme_submit_cmd(nvmeq, cmd);
849 return 0;
850 }
851
852 int nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd,
853 u32 *result)
854 {
855 return nvme_submit_sync_cmd(dev->queues[0], cmd, result, ADMIN_TIMEOUT);
856 }
857
858 static int nvme_submit_admin_cmd_async(struct nvme_dev *dev,
859 struct nvme_command *cmd, struct async_cmd_info *cmdinfo)
860 {
861 return nvme_submit_async_cmd(dev->queues[0], cmd, cmdinfo,
862 ADMIN_TIMEOUT);
863 }
864
865 static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
866 {
867 int status;
868 struct nvme_command c;
869
870 memset(&c, 0, sizeof(c));
871 c.delete_queue.opcode = opcode;
872 c.delete_queue.qid = cpu_to_le16(id);
873
874 status = nvme_submit_admin_cmd(dev, &c, NULL);
875 if (status)
876 return -EIO;
877 return 0;
878 }
879
880 static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid,
881 struct nvme_queue *nvmeq)
882 {
883 int status;
884 struct nvme_command c;
885 int flags = NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED;
886
887 memset(&c, 0, sizeof(c));
888 c.create_cq.opcode = nvme_admin_create_cq;
889 c.create_cq.prp1 = cpu_to_le64(nvmeq->cq_dma_addr);
890 c.create_cq.cqid = cpu_to_le16(qid);
891 c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
892 c.create_cq.cq_flags = cpu_to_le16(flags);
893 c.create_cq.irq_vector = cpu_to_le16(nvmeq->cq_vector);
894
895 status = nvme_submit_admin_cmd(dev, &c, NULL);
896 if (status)
897 return -EIO;
898 return 0;
899 }
900
901 static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid,
902 struct nvme_queue *nvmeq)
903 {
904 int status;
905 struct nvme_command c;
906 int flags = NVME_QUEUE_PHYS_CONTIG | NVME_SQ_PRIO_MEDIUM;
907
908 memset(&c, 0, sizeof(c));
909 c.create_sq.opcode = nvme_admin_create_sq;
910 c.create_sq.prp1 = cpu_to_le64(nvmeq->sq_dma_addr);
911 c.create_sq.sqid = cpu_to_le16(qid);
912 c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
913 c.create_sq.sq_flags = cpu_to_le16(flags);
914 c.create_sq.cqid = cpu_to_le16(qid);
915
916 status = nvme_submit_admin_cmd(dev, &c, NULL);
917 if (status)
918 return -EIO;
919 return 0;
920 }
921
922 static int adapter_delete_cq(struct nvme_dev *dev, u16 cqid)
923 {
924 return adapter_delete_queue(dev, nvme_admin_delete_cq, cqid);
925 }
926
927 static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid)
928 {
929 return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid);
930 }
931
932 int nvme_identify(struct nvme_dev *dev, unsigned nsid, unsigned cns,
933 dma_addr_t dma_addr)
934 {
935 struct nvme_command c;
936
937 memset(&c, 0, sizeof(c));
938 c.identify.opcode = nvme_admin_identify;
939 c.identify.nsid = cpu_to_le32(nsid);
940 c.identify.prp1 = cpu_to_le64(dma_addr);
941 c.identify.cns = cpu_to_le32(cns);
942
943 return nvme_submit_admin_cmd(dev, &c, NULL);
944 }
945
946 int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid,
947 dma_addr_t dma_addr, u32 *result)
948 {
949 struct nvme_command c;
950
951 memset(&c, 0, sizeof(c));
952 c.features.opcode = nvme_admin_get_features;
953 c.features.nsid = cpu_to_le32(nsid);
954 c.features.prp1 = cpu_to_le64(dma_addr);
955 c.features.fid = cpu_to_le32(fid);
956
957 return nvme_submit_admin_cmd(dev, &c, result);
958 }
959
960 int nvme_set_features(struct nvme_dev *dev, unsigned fid, unsigned dword11,
961 dma_addr_t dma_addr, u32 *result)
962 {
963 struct nvme_command c;
964
965 memset(&c, 0, sizeof(c));
966 c.features.opcode = nvme_admin_set_features;
967 c.features.prp1 = cpu_to_le64(dma_addr);
968 c.features.fid = cpu_to_le32(fid);
969 c.features.dword11 = cpu_to_le32(dword11);
970
971 return nvme_submit_admin_cmd(dev, &c, result);
972 }
973
974 /**
975 * nvme_abort_cmd - Attempt aborting a command
976 * @cmdid: Command id of a timed out IO
977 * @queue: The queue with timed out IO
978 *
979 * Schedule controller reset if the command was already aborted once before and
980 * still hasn't been returned to the driver, or if this is the admin queue.
981 */
982 static void nvme_abort_cmd(int cmdid, struct nvme_queue *nvmeq)
983 {
984 int a_cmdid;
985 struct nvme_command cmd;
986 struct nvme_dev *dev = nvmeq->dev;
987 struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
988
989 if (!nvmeq->qid || info[cmdid].aborted) {
990 if (work_busy(&dev->reset_work))
991 return;
992 list_del_init(&dev->node);
993 dev_warn(&dev->pci_dev->dev,
994 "I/O %d QID %d timeout, reset controller\n", cmdid,
995 nvmeq->qid);
996 dev->reset_workfn = nvme_reset_failed_dev;
997 queue_work(nvme_workq, &dev->reset_work);
998 return;
999 }
1000
1001 if (!dev->abort_limit)
1002 return;
1003
1004 a_cmdid = alloc_cmdid(dev->queues[0], CMD_CTX_ABORT, special_completion,
1005 ADMIN_TIMEOUT);
1006 if (a_cmdid < 0)
1007 return;
1008
1009 memset(&cmd, 0, sizeof(cmd));
1010 cmd.abort.opcode = nvme_admin_abort_cmd;
1011 cmd.abort.cid = cmdid;
1012 cmd.abort.sqid = cpu_to_le16(nvmeq->qid);
1013 cmd.abort.command_id = a_cmdid;
1014
1015 --dev->abort_limit;
1016 info[cmdid].aborted = 1;
1017 info[cmdid].timeout = jiffies + ADMIN_TIMEOUT;
1018
1019 dev_warn(nvmeq->q_dmadev, "Aborting I/O %d QID %d\n", cmdid,
1020 nvmeq->qid);
1021 nvme_submit_cmd(dev->queues[0], &cmd);
1022 }
1023
1024 /**
1025 * nvme_cancel_ios - Cancel outstanding I/Os
1026 * @queue: The queue to cancel I/Os on
1027 * @timeout: True to only cancel I/Os which have timed out
1028 */
1029 static void nvme_cancel_ios(struct nvme_queue *nvmeq, bool timeout)
1030 {
1031 int depth = nvmeq->q_depth - 1;
1032 struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
1033 unsigned long now = jiffies;
1034 int cmdid;
1035
1036 for_each_set_bit(cmdid, nvmeq->cmdid_data, depth) {
1037 void *ctx;
1038 nvme_completion_fn fn;
1039 static struct nvme_completion cqe = {
1040 .status = cpu_to_le16(NVME_SC_ABORT_REQ << 1),
1041 };
1042
1043 if (timeout && !time_after(now, info[cmdid].timeout))
1044 continue;
1045 if (info[cmdid].ctx == CMD_CTX_CANCELLED)
1046 continue;
1047 if (timeout && nvmeq->dev->initialized) {
1048 nvme_abort_cmd(cmdid, nvmeq);
1049 continue;
1050 }
1051 dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d QID %d\n", cmdid,
1052 nvmeq->qid);
1053 ctx = cancel_cmdid(nvmeq, cmdid, &fn);
1054 fn(nvmeq->dev, ctx, &cqe);
1055 }
1056 }
1057
1058 static void nvme_free_queue(struct nvme_queue *nvmeq)
1059 {
1060 spin_lock_irq(&nvmeq->q_lock);
1061 while (bio_list_peek(&nvmeq->sq_cong)) {
1062 struct bio *bio = bio_list_pop(&nvmeq->sq_cong);
1063 bio_endio(bio, -EIO);
1064 }
1065 spin_unlock_irq(&nvmeq->q_lock);
1066
1067 dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth),
1068 (void *)nvmeq->cqes, nvmeq->cq_dma_addr);
1069 dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
1070 nvmeq->sq_cmds, nvmeq->sq_dma_addr);
1071 kfree(nvmeq);
1072 }
1073
1074 static void nvme_free_queues(struct nvme_dev *dev, int lowest)
1075 {
1076 int i;
1077
1078 for (i = dev->queue_count - 1; i >= lowest; i--) {
1079 nvme_free_queue(dev->queues[i]);
1080 dev->queue_count--;
1081 dev->queues[i] = NULL;
1082 }
1083 }
1084
1085 /**
1086 * nvme_suspend_queue - put queue into suspended state
1087 * @nvmeq - queue to suspend
1088 *
1089 * Returns 1 if already suspended, 0 otherwise.
1090 */
1091 static int nvme_suspend_queue(struct nvme_queue *nvmeq)
1092 {
1093 int vector = nvmeq->dev->entry[nvmeq->cq_vector].vector;
1094
1095 spin_lock_irq(&nvmeq->q_lock);
1096 if (nvmeq->q_suspended) {
1097 spin_unlock_irq(&nvmeq->q_lock);
1098 return 1;
1099 }
1100 nvmeq->q_suspended = 1;
1101 spin_unlock_irq(&nvmeq->q_lock);
1102
1103 irq_set_affinity_hint(vector, NULL);
1104 free_irq(vector, nvmeq);
1105
1106 return 0;
1107 }
1108
1109 static void nvme_clear_queue(struct nvme_queue *nvmeq)
1110 {
1111 spin_lock_irq(&nvmeq->q_lock);
1112 nvme_process_cq(nvmeq);
1113 nvme_cancel_ios(nvmeq, false);
1114 spin_unlock_irq(&nvmeq->q_lock);
1115 }
1116
1117 static void nvme_disable_queue(struct nvme_dev *dev, int qid)
1118 {
1119 struct nvme_queue *nvmeq = dev->queues[qid];
1120
1121 if (!nvmeq)
1122 return;
1123 if (nvme_suspend_queue(nvmeq))
1124 return;
1125
1126 /* Don't tell the adapter to delete the admin queue.
1127 * Don't tell a removed adapter to delete IO queues. */
1128 if (qid && readl(&dev->bar->csts) != -1) {
1129 adapter_delete_sq(dev, qid);
1130 adapter_delete_cq(dev, qid);
1131 }
1132 nvme_clear_queue(nvmeq);
1133 }
1134
1135 static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
1136 int depth, int vector)
1137 {
1138 struct device *dmadev = &dev->pci_dev->dev;
1139 unsigned extra = nvme_queue_extra(depth);
1140 struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq) + extra, GFP_KERNEL);
1141 if (!nvmeq)
1142 return NULL;
1143
1144 nvmeq->cqes = dma_alloc_coherent(dmadev, CQ_SIZE(depth),
1145 &nvmeq->cq_dma_addr, GFP_KERNEL);
1146 if (!nvmeq->cqes)
1147 goto free_nvmeq;
1148 memset((void *)nvmeq->cqes, 0, CQ_SIZE(depth));
1149
1150 nvmeq->sq_cmds = dma_alloc_coherent(dmadev, SQ_SIZE(depth),
1151 &nvmeq->sq_dma_addr, GFP_KERNEL);
1152 if (!nvmeq->sq_cmds)
1153 goto free_cqdma;
1154
1155 nvmeq->q_dmadev = dmadev;
1156 nvmeq->dev = dev;
1157 snprintf(nvmeq->irqname, sizeof(nvmeq->irqname), "nvme%dq%d",
1158 dev->instance, qid);
1159 spin_lock_init(&nvmeq->q_lock);
1160 nvmeq->cq_head = 0;
1161 nvmeq->cq_phase = 1;
1162 init_waitqueue_head(&nvmeq->sq_full);
1163 init_waitqueue_entry(&nvmeq->sq_cong_wait, nvme_thread);
1164 bio_list_init(&nvmeq->sq_cong);
1165 nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
1166 nvmeq->q_depth = depth;
1167 nvmeq->cq_vector = vector;
1168 nvmeq->qid = qid;
1169 nvmeq->q_suspended = 1;
1170 dev->queue_count++;
1171
1172 return nvmeq;
1173
1174 free_cqdma:
1175 dma_free_coherent(dmadev, CQ_SIZE(depth), (void *)nvmeq->cqes,
1176 nvmeq->cq_dma_addr);
1177 free_nvmeq:
1178 kfree(nvmeq);
1179 return NULL;
1180 }
1181
1182 static int queue_request_irq(struct nvme_dev *dev, struct nvme_queue *nvmeq,
1183 const char *name)
1184 {
1185 if (use_threaded_interrupts)
1186 return request_threaded_irq(dev->entry[nvmeq->cq_vector].vector,
1187 nvme_irq_check, nvme_irq, IRQF_SHARED,
1188 name, nvmeq);
1189 return request_irq(dev->entry[nvmeq->cq_vector].vector, nvme_irq,
1190 IRQF_SHARED, name, nvmeq);
1191 }
1192
1193 static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid)
1194 {
1195 struct nvme_dev *dev = nvmeq->dev;
1196 unsigned extra = nvme_queue_extra(nvmeq->q_depth);
1197
1198 nvmeq->sq_tail = 0;
1199 nvmeq->cq_head = 0;
1200 nvmeq->cq_phase = 1;
1201 nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
1202 memset(nvmeq->cmdid_data, 0, extra);
1203 memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq->q_depth));
1204 nvme_cancel_ios(nvmeq, false);
1205 nvmeq->q_suspended = 0;
1206 }
1207
1208 static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
1209 {
1210 struct nvme_dev *dev = nvmeq->dev;
1211 int result;
1212
1213 result = adapter_alloc_cq(dev, qid, nvmeq);
1214 if (result < 0)
1215 return result;
1216
1217 result = adapter_alloc_sq(dev, qid, nvmeq);
1218 if (result < 0)
1219 goto release_cq;
1220
1221 result = queue_request_irq(dev, nvmeq, nvmeq->irqname);
1222 if (result < 0)
1223 goto release_sq;
1224
1225 spin_lock_irq(&nvmeq->q_lock);
1226 nvme_init_queue(nvmeq, qid);
1227 spin_unlock_irq(&nvmeq->q_lock);
1228
1229 return result;
1230
1231 release_sq:
1232 adapter_delete_sq(dev, qid);
1233 release_cq:
1234 adapter_delete_cq(dev, qid);
1235 return result;
1236 }
1237
1238 static int nvme_wait_ready(struct nvme_dev *dev, u64 cap, bool enabled)
1239 {
1240 unsigned long timeout;
1241 u32 bit = enabled ? NVME_CSTS_RDY : 0;
1242
1243 timeout = ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies;
1244
1245 while ((readl(&dev->bar->csts) & NVME_CSTS_RDY) != bit) {
1246 msleep(100);
1247 if (fatal_signal_pending(current))
1248 return -EINTR;
1249 if (time_after(jiffies, timeout)) {
1250 dev_err(&dev->pci_dev->dev,
1251 "Device not ready; aborting initialisation\n");
1252 return -ENODEV;
1253 }
1254 }
1255
1256 return 0;
1257 }
1258
1259 /*
1260 * If the device has been passed off to us in an enabled state, just clear
1261 * the enabled bit. The spec says we should set the 'shutdown notification
1262 * bits', but doing so may cause the device to complete commands to the
1263 * admin queue ... and we don't know what memory that might be pointing at!
1264 */
1265 static int nvme_disable_ctrl(struct nvme_dev *dev, u64 cap)
1266 {
1267 u32 cc = readl(&dev->bar->cc);
1268
1269 if (cc & NVME_CC_ENABLE)
1270 writel(cc & ~NVME_CC_ENABLE, &dev->bar->cc);
1271 return nvme_wait_ready(dev, cap, false);
1272 }
1273
1274 static int nvme_enable_ctrl(struct nvme_dev *dev, u64 cap)
1275 {
1276 return nvme_wait_ready(dev, cap, true);
1277 }
1278
1279 static int nvme_shutdown_ctrl(struct nvme_dev *dev)
1280 {
1281 unsigned long timeout;
1282 u32 cc;
1283
1284 cc = (readl(&dev->bar->cc) & ~NVME_CC_SHN_MASK) | NVME_CC_SHN_NORMAL;
1285 writel(cc, &dev->bar->cc);
1286
1287 timeout = 2 * HZ + jiffies;
1288 while ((readl(&dev->bar->csts) & NVME_CSTS_SHST_MASK) !=
1289 NVME_CSTS_SHST_CMPLT) {
1290 msleep(100);
1291 if (fatal_signal_pending(current))
1292 return -EINTR;
1293 if (time_after(jiffies, timeout)) {
1294 dev_err(&dev->pci_dev->dev,
1295 "Device shutdown incomplete; abort shutdown\n");
1296 return -ENODEV;
1297 }
1298 }
1299
1300 return 0;
1301 }
1302
1303 static int nvme_configure_admin_queue(struct nvme_dev *dev)
1304 {
1305 int result;
1306 u32 aqa;
1307 u64 cap = readq(&dev->bar->cap);
1308 struct nvme_queue *nvmeq;
1309
1310 result = nvme_disable_ctrl(dev, cap);
1311 if (result < 0)
1312 return result;
1313
1314 nvmeq = dev->queues[0];
1315 if (!nvmeq) {
1316 nvmeq = nvme_alloc_queue(dev, 0, 64, 0);
1317 if (!nvmeq)
1318 return -ENOMEM;
1319 dev->queues[0] = nvmeq;
1320 }
1321
1322 aqa = nvmeq->q_depth - 1;
1323 aqa |= aqa << 16;
1324
1325 dev->ctrl_config = NVME_CC_ENABLE | NVME_CC_CSS_NVM;
1326 dev->ctrl_config |= (PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT;
1327 dev->ctrl_config |= NVME_CC_ARB_RR | NVME_CC_SHN_NONE;
1328 dev->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES;
1329
1330 writel(aqa, &dev->bar->aqa);
1331 writeq(nvmeq->sq_dma_addr, &dev->bar->asq);
1332 writeq(nvmeq->cq_dma_addr, &dev->bar->acq);
1333 writel(dev->ctrl_config, &dev->bar->cc);
1334
1335 result = nvme_enable_ctrl(dev, cap);
1336 if (result)
1337 return result;
1338
1339 result = queue_request_irq(dev, nvmeq, nvmeq->irqname);
1340 if (result)
1341 return result;
1342
1343 spin_lock_irq(&nvmeq->q_lock);
1344 nvme_init_queue(nvmeq, 0);
1345 spin_unlock_irq(&nvmeq->q_lock);
1346 return result;
1347 }
1348
1349 struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write,
1350 unsigned long addr, unsigned length)
1351 {
1352 int i, err, count, nents, offset;
1353 struct scatterlist *sg;
1354 struct page **pages;
1355 struct nvme_iod *iod;
1356
1357 if (addr & 3)
1358 return ERR_PTR(-EINVAL);
1359 if (!length || length > INT_MAX - PAGE_SIZE)
1360 return ERR_PTR(-EINVAL);
1361
1362 offset = offset_in_page(addr);
1363 count = DIV_ROUND_UP(offset + length, PAGE_SIZE);
1364 pages = kcalloc(count, sizeof(*pages), GFP_KERNEL);
1365 if (!pages)
1366 return ERR_PTR(-ENOMEM);
1367
1368 err = get_user_pages_fast(addr, count, 1, pages);
1369 if (err < count) {
1370 count = err;
1371 err = -EFAULT;
1372 goto put_pages;
1373 }
1374
1375 iod = nvme_alloc_iod(count, length, GFP_KERNEL);
1376 sg = iod->sg;
1377 sg_init_table(sg, count);
1378 for (i = 0; i < count; i++) {
1379 sg_set_page(&sg[i], pages[i],
1380 min_t(unsigned, length, PAGE_SIZE - offset),
1381 offset);
1382 length -= (PAGE_SIZE - offset);
1383 offset = 0;
1384 }
1385 sg_mark_end(&sg[i - 1]);
1386 iod->nents = count;
1387
1388 err = -ENOMEM;
1389 nents = dma_map_sg(&dev->pci_dev->dev, sg, count,
1390 write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
1391 if (!nents)
1392 goto free_iod;
1393
1394 kfree(pages);
1395 return iod;
1396
1397 free_iod:
1398 kfree(iod);
1399 put_pages:
1400 for (i = 0; i < count; i++)
1401 put_page(pages[i]);
1402 kfree(pages);
1403 return ERR_PTR(err);
1404 }
1405
1406 void nvme_unmap_user_pages(struct nvme_dev *dev, int write,
1407 struct nvme_iod *iod)
1408 {
1409 int i;
1410
1411 dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents,
1412 write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
1413
1414 for (i = 0; i < iod->nents; i++)
1415 put_page(sg_page(&iod->sg[i]));
1416 }
1417
1418 static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
1419 {
1420 struct nvme_dev *dev = ns->dev;
1421 struct nvme_queue *nvmeq;
1422 struct nvme_user_io io;
1423 struct nvme_command c;
1424 unsigned length, meta_len;
1425 int status, i;
1426 struct nvme_iod *iod, *meta_iod = NULL;
1427 dma_addr_t meta_dma_addr;
1428 void *meta, *uninitialized_var(meta_mem);
1429
1430 if (copy_from_user(&io, uio, sizeof(io)))
1431 return -EFAULT;
1432 length = (io.nblocks + 1) << ns->lba_shift;
1433 meta_len = (io.nblocks + 1) * ns->ms;
1434
1435 if (meta_len && ((io.metadata & 3) || !io.metadata))
1436 return -EINVAL;
1437
1438 switch (io.opcode) {
1439 case nvme_cmd_write:
1440 case nvme_cmd_read:
1441 case nvme_cmd_compare:
1442 iod = nvme_map_user_pages(dev, io.opcode & 1, io.addr, length);
1443 break;
1444 default:
1445 return -EINVAL;
1446 }
1447
1448 if (IS_ERR(iod))
1449 return PTR_ERR(iod);
1450
1451 memset(&c, 0, sizeof(c));
1452 c.rw.opcode = io.opcode;
1453 c.rw.flags = io.flags;
1454 c.rw.nsid = cpu_to_le32(ns->ns_id);
1455 c.rw.slba = cpu_to_le64(io.slba);
1456 c.rw.length = cpu_to_le16(io.nblocks);
1457 c.rw.control = cpu_to_le16(io.control);
1458 c.rw.dsmgmt = cpu_to_le32(io.dsmgmt);
1459 c.rw.reftag = cpu_to_le32(io.reftag);
1460 c.rw.apptag = cpu_to_le16(io.apptag);
1461 c.rw.appmask = cpu_to_le16(io.appmask);
1462
1463 if (meta_len) {
1464 meta_iod = nvme_map_user_pages(dev, io.opcode & 1, io.metadata,
1465 meta_len);
1466 if (IS_ERR(meta_iod)) {
1467 status = PTR_ERR(meta_iod);
1468 meta_iod = NULL;
1469 goto unmap;
1470 }
1471
1472 meta_mem = dma_alloc_coherent(&dev->pci_dev->dev, meta_len,
1473 &meta_dma_addr, GFP_KERNEL);
1474 if (!meta_mem) {
1475 status = -ENOMEM;
1476 goto unmap;
1477 }
1478
1479 if (io.opcode & 1) {
1480 int meta_offset = 0;
1481
1482 for (i = 0; i < meta_iod->nents; i++) {
1483 meta = kmap_atomic(sg_page(&meta_iod->sg[i])) +
1484 meta_iod->sg[i].offset;
1485 memcpy(meta_mem + meta_offset, meta,
1486 meta_iod->sg[i].length);
1487 kunmap_atomic(meta);
1488 meta_offset += meta_iod->sg[i].length;
1489 }
1490 }
1491
1492 c.rw.metadata = cpu_to_le64(meta_dma_addr);
1493 }
1494
1495 length = nvme_setup_prps(dev, &c.common, iod, length, GFP_KERNEL);
1496
1497 nvmeq = get_nvmeq(dev);
1498 /*
1499 * Since nvme_submit_sync_cmd sleeps, we can't keep preemption
1500 * disabled. We may be preempted at any point, and be rescheduled
1501 * to a different CPU. That will cause cacheline bouncing, but no
1502 * additional races since q_lock already protects against other CPUs.
1503 */
1504 put_nvmeq(nvmeq);
1505 if (length != (io.nblocks + 1) << ns->lba_shift)
1506 status = -ENOMEM;
1507 else if (!nvmeq || nvmeq->q_suspended)
1508 status = -EBUSY;
1509 else
1510 status = nvme_submit_sync_cmd(nvmeq, &c, NULL, NVME_IO_TIMEOUT);
1511
1512 if (meta_len) {
1513 if (status == NVME_SC_SUCCESS && !(io.opcode & 1)) {
1514 int meta_offset = 0;
1515
1516 for (i = 0; i < meta_iod->nents; i++) {
1517 meta = kmap_atomic(sg_page(&meta_iod->sg[i])) +
1518 meta_iod->sg[i].offset;
1519 memcpy(meta, meta_mem + meta_offset,
1520 meta_iod->sg[i].length);
1521 kunmap_atomic(meta);
1522 meta_offset += meta_iod->sg[i].length;
1523 }
1524 }
1525
1526 dma_free_coherent(&dev->pci_dev->dev, meta_len, meta_mem,
1527 meta_dma_addr);
1528 }
1529
1530 unmap:
1531 nvme_unmap_user_pages(dev, io.opcode & 1, iod);
1532 nvme_free_iod(dev, iod);
1533
1534 if (meta_iod) {
1535 nvme_unmap_user_pages(dev, io.opcode & 1, meta_iod);
1536 nvme_free_iod(dev, meta_iod);
1537 }
1538
1539 return status;
1540 }
1541
1542 static int nvme_user_admin_cmd(struct nvme_dev *dev,
1543 struct nvme_admin_cmd __user *ucmd)
1544 {
1545 struct nvme_admin_cmd cmd;
1546 struct nvme_command c;
1547 int status, length;
1548 struct nvme_iod *uninitialized_var(iod);
1549 unsigned timeout;
1550
1551 if (!capable(CAP_SYS_ADMIN))
1552 return -EACCES;
1553 if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
1554 return -EFAULT;
1555
1556 memset(&c, 0, sizeof(c));
1557 c.common.opcode = cmd.opcode;
1558 c.common.flags = cmd.flags;
1559 c.common.nsid = cpu_to_le32(cmd.nsid);
1560 c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
1561 c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
1562 c.common.cdw10[0] = cpu_to_le32(cmd.cdw10);
1563 c.common.cdw10[1] = cpu_to_le32(cmd.cdw11);
1564 c.common.cdw10[2] = cpu_to_le32(cmd.cdw12);
1565 c.common.cdw10[3] = cpu_to_le32(cmd.cdw13);
1566 c.common.cdw10[4] = cpu_to_le32(cmd.cdw14);
1567 c.common.cdw10[5] = cpu_to_le32(cmd.cdw15);
1568
1569 length = cmd.data_len;
1570 if (cmd.data_len) {
1571 iod = nvme_map_user_pages(dev, cmd.opcode & 1, cmd.addr,
1572 length);
1573 if (IS_ERR(iod))
1574 return PTR_ERR(iod);
1575 length = nvme_setup_prps(dev, &c.common, iod, length,
1576 GFP_KERNEL);
1577 }
1578
1579 timeout = cmd.timeout_ms ? msecs_to_jiffies(cmd.timeout_ms) :
1580 ADMIN_TIMEOUT;
1581 if (length != cmd.data_len)
1582 status = -ENOMEM;
1583 else
1584 status = nvme_submit_sync_cmd(dev->queues[0], &c, &cmd.result,
1585 timeout);
1586
1587 if (cmd.data_len) {
1588 nvme_unmap_user_pages(dev, cmd.opcode & 1, iod);
1589 nvme_free_iod(dev, iod);
1590 }
1591
1592 if ((status >= 0) && copy_to_user(&ucmd->result, &cmd.result,
1593 sizeof(cmd.result)))
1594 status = -EFAULT;
1595
1596 return status;
1597 }
1598
1599 static int nvme_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
1600 unsigned long arg)
1601 {
1602 struct nvme_ns *ns = bdev->bd_disk->private_data;
1603
1604 switch (cmd) {
1605 case NVME_IOCTL_ID:
1606 force_successful_syscall_return();
1607 return ns->ns_id;
1608 case NVME_IOCTL_ADMIN_CMD:
1609 return nvme_user_admin_cmd(ns->dev, (void __user *)arg);
1610 case NVME_IOCTL_SUBMIT_IO:
1611 return nvme_submit_io(ns, (void __user *)arg);
1612 case SG_GET_VERSION_NUM:
1613 return nvme_sg_get_version_num((void __user *)arg);
1614 case SG_IO:
1615 return nvme_sg_io(ns, (void __user *)arg);
1616 default:
1617 return -ENOTTY;
1618 }
1619 }
1620
1621 #ifdef CONFIG_COMPAT
1622 static int nvme_compat_ioctl(struct block_device *bdev, fmode_t mode,
1623 unsigned int cmd, unsigned long arg)
1624 {
1625 struct nvme_ns *ns = bdev->bd_disk->private_data;
1626
1627 switch (cmd) {
1628 case SG_IO:
1629 return nvme_sg_io32(ns, arg);
1630 }
1631 return nvme_ioctl(bdev, mode, cmd, arg);
1632 }
1633 #else
1634 #define nvme_compat_ioctl NULL
1635 #endif
1636
1637 static int nvme_open(struct block_device *bdev, fmode_t mode)
1638 {
1639 struct nvme_ns *ns = bdev->bd_disk->private_data;
1640 struct nvme_dev *dev = ns->dev;
1641
1642 kref_get(&dev->kref);
1643 return 0;
1644 }
1645
1646 static void nvme_free_dev(struct kref *kref);
1647
1648 static void nvme_release(struct gendisk *disk, fmode_t mode)
1649 {
1650 struct nvme_ns *ns = disk->private_data;
1651 struct nvme_dev *dev = ns->dev;
1652
1653 kref_put(&dev->kref, nvme_free_dev);
1654 }
1655
1656 static const struct block_device_operations nvme_fops = {
1657 .owner = THIS_MODULE,
1658 .ioctl = nvme_ioctl,
1659 .compat_ioctl = nvme_compat_ioctl,
1660 .open = nvme_open,
1661 .release = nvme_release,
1662 };
1663
1664 static void nvme_resubmit_bios(struct nvme_queue *nvmeq)
1665 {
1666 while (bio_list_peek(&nvmeq->sq_cong)) {
1667 struct bio *bio = bio_list_pop(&nvmeq->sq_cong);
1668 struct nvme_ns *ns = bio->bi_bdev->bd_disk->private_data;
1669
1670 if (bio_list_empty(&nvmeq->sq_cong))
1671 remove_wait_queue(&nvmeq->sq_full,
1672 &nvmeq->sq_cong_wait);
1673 if (nvme_submit_bio_queue(nvmeq, ns, bio)) {
1674 if (bio_list_empty(&nvmeq->sq_cong))
1675 add_wait_queue(&nvmeq->sq_full,
1676 &nvmeq->sq_cong_wait);
1677 bio_list_add_head(&nvmeq->sq_cong, bio);
1678 break;
1679 }
1680 }
1681 }
1682
1683 static int nvme_kthread(void *data)
1684 {
1685 struct nvme_dev *dev, *next;
1686
1687 while (!kthread_should_stop()) {
1688 set_current_state(TASK_INTERRUPTIBLE);
1689 spin_lock(&dev_list_lock);
1690 list_for_each_entry_safe(dev, next, &dev_list, node) {
1691 int i;
1692 if (readl(&dev->bar->csts) & NVME_CSTS_CFS &&
1693 dev->initialized) {
1694 if (work_busy(&dev->reset_work))
1695 continue;
1696 list_del_init(&dev->node);
1697 dev_warn(&dev->pci_dev->dev,
1698 "Failed status, reset controller\n");
1699 dev->reset_workfn = nvme_reset_failed_dev;
1700 queue_work(nvme_workq, &dev->reset_work);
1701 continue;
1702 }
1703 for (i = 0; i < dev->queue_count; i++) {
1704 struct nvme_queue *nvmeq = dev->queues[i];
1705 if (!nvmeq)
1706 continue;
1707 spin_lock_irq(&nvmeq->q_lock);
1708 if (nvmeq->q_suspended)
1709 goto unlock;
1710 nvme_process_cq(nvmeq);
1711 nvme_cancel_ios(nvmeq, true);
1712 nvme_resubmit_bios(nvmeq);
1713 unlock:
1714 spin_unlock_irq(&nvmeq->q_lock);
1715 }
1716 }
1717 spin_unlock(&dev_list_lock);
1718 schedule_timeout(round_jiffies_relative(HZ));
1719 }
1720 return 0;
1721 }
1722
1723 static void nvme_config_discard(struct nvme_ns *ns)
1724 {
1725 u32 logical_block_size = queue_logical_block_size(ns->queue);
1726 ns->queue->limits.discard_zeroes_data = 0;
1727 ns->queue->limits.discard_alignment = logical_block_size;
1728 ns->queue->limits.discard_granularity = logical_block_size;
1729 ns->queue->limits.max_discard_sectors = 0xffffffff;
1730 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue);
1731 }
1732
1733 static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid,
1734 struct nvme_id_ns *id, struct nvme_lba_range_type *rt)
1735 {
1736 struct nvme_ns *ns;
1737 struct gendisk *disk;
1738 int lbaf;
1739
1740 if (rt->attributes & NVME_LBART_ATTRIB_HIDE)
1741 return NULL;
1742
1743 ns = kzalloc(sizeof(*ns), GFP_KERNEL);
1744 if (!ns)
1745 return NULL;
1746 ns->queue = blk_alloc_queue(GFP_KERNEL);
1747 if (!ns->queue)
1748 goto out_free_ns;
1749 ns->queue->queue_flags = QUEUE_FLAG_DEFAULT;
1750 queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, ns->queue);
1751 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue);
1752 blk_queue_make_request(ns->queue, nvme_make_request);
1753 ns->dev = dev;
1754 ns->queue->queuedata = ns;
1755
1756 disk = alloc_disk(0);
1757 if (!disk)
1758 goto out_free_queue;
1759 ns->ns_id = nsid;
1760 ns->disk = disk;
1761 lbaf = id->flbas & 0xf;
1762 ns->lba_shift = id->lbaf[lbaf].ds;
1763 ns->ms = le16_to_cpu(id->lbaf[lbaf].ms);
1764 blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift);
1765 if (dev->max_hw_sectors)
1766 blk_queue_max_hw_sectors(ns->queue, dev->max_hw_sectors);
1767
1768 disk->major = nvme_major;
1769 disk->first_minor = 0;
1770 disk->fops = &nvme_fops;
1771 disk->private_data = ns;
1772 disk->queue = ns->queue;
1773 disk->driverfs_dev = &dev->pci_dev->dev;
1774 disk->flags = GENHD_FL_EXT_DEVT;
1775 sprintf(disk->disk_name, "nvme%dn%d", dev->instance, nsid);
1776 set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9));
1777
1778 if (dev->oncs & NVME_CTRL_ONCS_DSM)
1779 nvme_config_discard(ns);
1780
1781 return ns;
1782
1783 out_free_queue:
1784 blk_cleanup_queue(ns->queue);
1785 out_free_ns:
1786 kfree(ns);
1787 return NULL;
1788 }
1789
1790 static int set_queue_count(struct nvme_dev *dev, int count)
1791 {
1792 int status;
1793 u32 result;
1794 u32 q_count = (count - 1) | ((count - 1) << 16);
1795
1796 status = nvme_set_features(dev, NVME_FEAT_NUM_QUEUES, q_count, 0,
1797 &result);
1798 if (status)
1799 return status < 0 ? -EIO : -EBUSY;
1800 return min(result & 0xffff, result >> 16) + 1;
1801 }
1802
1803 static size_t db_bar_size(struct nvme_dev *dev, unsigned nr_io_queues)
1804 {
1805 return 4096 + ((nr_io_queues + 1) * 8 * dev->db_stride);
1806 }
1807
1808 static int nvme_setup_io_queues(struct nvme_dev *dev)
1809 {
1810 struct nvme_queue *adminq = dev->queues[0];
1811 struct pci_dev *pdev = dev->pci_dev;
1812 int result, cpu, i, vecs, nr_io_queues, size, q_depth;
1813
1814 nr_io_queues = num_online_cpus();
1815 result = set_queue_count(dev, nr_io_queues);
1816 if (result < 0)
1817 return result;
1818 if (result < nr_io_queues)
1819 nr_io_queues = result;
1820
1821 size = db_bar_size(dev, nr_io_queues);
1822 if (size > 8192) {
1823 iounmap(dev->bar);
1824 do {
1825 dev->bar = ioremap(pci_resource_start(pdev, 0), size);
1826 if (dev->bar)
1827 break;
1828 if (!--nr_io_queues)
1829 return -ENOMEM;
1830 size = db_bar_size(dev, nr_io_queues);
1831 } while (1);
1832 dev->dbs = ((void __iomem *)dev->bar) + 4096;
1833 dev->queues[0]->q_db = dev->dbs;
1834 }
1835
1836 /* Deregister the admin queue's interrupt */
1837 free_irq(dev->entry[0].vector, adminq);
1838
1839 vecs = nr_io_queues;
1840 for (i = 0; i < vecs; i++)
1841 dev->entry[i].entry = i;
1842 for (;;) {
1843 result = pci_enable_msix(pdev, dev->entry, vecs);
1844 if (result <= 0)
1845 break;
1846 vecs = result;
1847 }
1848
1849 if (result < 0) {
1850 vecs = nr_io_queues;
1851 if (vecs > 32)
1852 vecs = 32;
1853 for (;;) {
1854 result = pci_enable_msi_block(pdev, vecs);
1855 if (result == 0) {
1856 for (i = 0; i < vecs; i++)
1857 dev->entry[i].vector = i + pdev->irq;
1858 break;
1859 } else if (result < 0) {
1860 vecs = 1;
1861 break;
1862 }
1863 vecs = result;
1864 }
1865 }
1866
1867 /*
1868 * Should investigate if there's a performance win from allocating
1869 * more queues than interrupt vectors; it might allow the submission
1870 * path to scale better, even if the receive path is limited by the
1871 * number of interrupts.
1872 */
1873 nr_io_queues = vecs;
1874
1875 result = queue_request_irq(dev, adminq, adminq->irqname);
1876 if (result) {
1877 adminq->q_suspended = 1;
1878 goto free_queues;
1879 }
1880
1881 /* Free previously allocated queues that are no longer usable */
1882 spin_lock(&dev_list_lock);
1883 for (i = dev->queue_count - 1; i > nr_io_queues; i--) {
1884 struct nvme_queue *nvmeq = dev->queues[i];
1885
1886 spin_lock_irq(&nvmeq->q_lock);
1887 nvme_cancel_ios(nvmeq, false);
1888 spin_unlock_irq(&nvmeq->q_lock);
1889
1890 nvme_free_queue(nvmeq);
1891 dev->queue_count--;
1892 dev->queues[i] = NULL;
1893 }
1894 spin_unlock(&dev_list_lock);
1895
1896 cpu = cpumask_first(cpu_online_mask);
1897 for (i = 0; i < nr_io_queues; i++) {
1898 irq_set_affinity_hint(dev->entry[i].vector, get_cpu_mask(cpu));
1899 cpu = cpumask_next(cpu, cpu_online_mask);
1900 }
1901
1902 q_depth = min_t(int, NVME_CAP_MQES(readq(&dev->bar->cap)) + 1,
1903 NVME_Q_DEPTH);
1904 for (i = dev->queue_count - 1; i < nr_io_queues; i++) {
1905 dev->queues[i + 1] = nvme_alloc_queue(dev, i + 1, q_depth, i);
1906 if (!dev->queues[i + 1]) {
1907 result = -ENOMEM;
1908 goto free_queues;
1909 }
1910 }
1911
1912 for (; i < num_possible_cpus(); i++) {
1913 int target = i % rounddown_pow_of_two(dev->queue_count - 1);
1914 dev->queues[i + 1] = dev->queues[target + 1];
1915 }
1916
1917 for (i = 1; i < dev->queue_count; i++) {
1918 result = nvme_create_queue(dev->queues[i], i);
1919 if (result) {
1920 for (--i; i > 0; i--)
1921 nvme_disable_queue(dev, i);
1922 goto free_queues;
1923 }
1924 }
1925
1926 return 0;
1927
1928 free_queues:
1929 nvme_free_queues(dev, 1);
1930 return result;
1931 }
1932
1933 /*
1934 * Return: error value if an error occurred setting up the queues or calling
1935 * Identify Device. 0 if these succeeded, even if adding some of the
1936 * namespaces failed. At the moment, these failures are silent. TBD which
1937 * failures should be reported.
1938 */
1939 static int nvme_dev_add(struct nvme_dev *dev)
1940 {
1941 struct pci_dev *pdev = dev->pci_dev;
1942 int res;
1943 unsigned nn, i;
1944 struct nvme_ns *ns;
1945 struct nvme_id_ctrl *ctrl;
1946 struct nvme_id_ns *id_ns;
1947 void *mem;
1948 dma_addr_t dma_addr;
1949 int shift = NVME_CAP_MPSMIN(readq(&dev->bar->cap)) + 12;
1950
1951 mem = dma_alloc_coherent(&pdev->dev, 8192, &dma_addr, GFP_KERNEL);
1952 if (!mem)
1953 return -ENOMEM;
1954
1955 res = nvme_identify(dev, 0, 1, dma_addr);
1956 if (res) {
1957 res = -EIO;
1958 goto out;
1959 }
1960
1961 ctrl = mem;
1962 nn = le32_to_cpup(&ctrl->nn);
1963 dev->oncs = le16_to_cpup(&ctrl->oncs);
1964 dev->abort_limit = ctrl->acl + 1;
1965 memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn));
1966 memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn));
1967 memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr));
1968 if (ctrl->mdts)
1969 dev->max_hw_sectors = 1 << (ctrl->mdts + shift - 9);
1970 if ((pdev->vendor == PCI_VENDOR_ID_INTEL) &&
1971 (pdev->device == 0x0953) && ctrl->vs[3])
1972 dev->stripe_size = 1 << (ctrl->vs[3] + shift);
1973
1974 id_ns = mem;
1975 for (i = 1; i <= nn; i++) {
1976 res = nvme_identify(dev, i, 0, dma_addr);
1977 if (res)
1978 continue;
1979
1980 if (id_ns->ncap == 0)
1981 continue;
1982
1983 res = nvme_get_features(dev, NVME_FEAT_LBA_RANGE, i,
1984 dma_addr + 4096, NULL);
1985 if (res)
1986 memset(mem + 4096, 0, 4096);
1987
1988 ns = nvme_alloc_ns(dev, i, mem, mem + 4096);
1989 if (ns)
1990 list_add_tail(&ns->list, &dev->namespaces);
1991 }
1992 list_for_each_entry(ns, &dev->namespaces, list)
1993 add_disk(ns->disk);
1994 res = 0;
1995
1996 out:
1997 dma_free_coherent(&dev->pci_dev->dev, 8192, mem, dma_addr);
1998 return res;
1999 }
2000
2001 static int nvme_dev_map(struct nvme_dev *dev)
2002 {
2003 int bars, result = -ENOMEM;
2004 struct pci_dev *pdev = dev->pci_dev;
2005
2006 if (pci_enable_device_mem(pdev))
2007 return result;
2008
2009 dev->entry[0].vector = pdev->irq;
2010 pci_set_master(pdev);
2011 bars = pci_select_bars(pdev, IORESOURCE_MEM);
2012 if (pci_request_selected_regions(pdev, bars, "nvme"))
2013 goto disable_pci;
2014
2015 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) &&
2016 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
2017 goto disable;
2018
2019 dev->bar = ioremap(pci_resource_start(pdev, 0), 8192);
2020 if (!dev->bar)
2021 goto disable;
2022 if (readl(&dev->bar->csts) == -1) {
2023 result = -ENODEV;
2024 goto unmap;
2025 }
2026 dev->db_stride = 1 << NVME_CAP_STRIDE(readq(&dev->bar->cap));
2027 dev->dbs = ((void __iomem *)dev->bar) + 4096;
2028
2029 return 0;
2030
2031 unmap:
2032 iounmap(dev->bar);
2033 dev->bar = NULL;
2034 disable:
2035 pci_release_regions(pdev);
2036 disable_pci:
2037 pci_disable_device(pdev);
2038 return result;
2039 }
2040
2041 static void nvme_dev_unmap(struct nvme_dev *dev)
2042 {
2043 if (dev->pci_dev->msi_enabled)
2044 pci_disable_msi(dev->pci_dev);
2045 else if (dev->pci_dev->msix_enabled)
2046 pci_disable_msix(dev->pci_dev);
2047
2048 if (dev->bar) {
2049 iounmap(dev->bar);
2050 dev->bar = NULL;
2051 pci_release_regions(dev->pci_dev);
2052 }
2053
2054 if (pci_is_enabled(dev->pci_dev))
2055 pci_disable_device(dev->pci_dev);
2056 }
2057
2058 struct nvme_delq_ctx {
2059 struct task_struct *waiter;
2060 struct kthread_worker *worker;
2061 atomic_t refcount;
2062 };
2063
2064 static void nvme_wait_dq(struct nvme_delq_ctx *dq, struct nvme_dev *dev)
2065 {
2066 dq->waiter = current;
2067 mb();
2068
2069 for (;;) {
2070 set_current_state(TASK_KILLABLE);
2071 if (!atomic_read(&dq->refcount))
2072 break;
2073 if (!schedule_timeout(ADMIN_TIMEOUT) ||
2074 fatal_signal_pending(current)) {
2075 set_current_state(TASK_RUNNING);
2076
2077 nvme_disable_ctrl(dev, readq(&dev->bar->cap));
2078 nvme_disable_queue(dev, 0);
2079
2080 send_sig(SIGKILL, dq->worker->task, 1);
2081 flush_kthread_worker(dq->worker);
2082 return;
2083 }
2084 }
2085 set_current_state(TASK_RUNNING);
2086 }
2087
2088 static void nvme_put_dq(struct nvme_delq_ctx *dq)
2089 {
2090 atomic_dec(&dq->refcount);
2091 if (dq->waiter)
2092 wake_up_process(dq->waiter);
2093 }
2094
2095 static struct nvme_delq_ctx *nvme_get_dq(struct nvme_delq_ctx *dq)
2096 {
2097 atomic_inc(&dq->refcount);
2098 return dq;
2099 }
2100
2101 static void nvme_del_queue_end(struct nvme_queue *nvmeq)
2102 {
2103 struct nvme_delq_ctx *dq = nvmeq->cmdinfo.ctx;
2104
2105 nvme_clear_queue(nvmeq);
2106 nvme_put_dq(dq);
2107 }
2108
2109 static int adapter_async_del_queue(struct nvme_queue *nvmeq, u8 opcode,
2110 kthread_work_func_t fn)
2111 {
2112 struct nvme_command c;
2113
2114 memset(&c, 0, sizeof(c));
2115 c.delete_queue.opcode = opcode;
2116 c.delete_queue.qid = cpu_to_le16(nvmeq->qid);
2117
2118 init_kthread_work(&nvmeq->cmdinfo.work, fn);
2119 return nvme_submit_admin_cmd_async(nvmeq->dev, &c, &nvmeq->cmdinfo);
2120 }
2121
2122 static void nvme_del_cq_work_handler(struct kthread_work *work)
2123 {
2124 struct nvme_queue *nvmeq = container_of(work, struct nvme_queue,
2125 cmdinfo.work);
2126 nvme_del_queue_end(nvmeq);
2127 }
2128
2129 static int nvme_delete_cq(struct nvme_queue *nvmeq)
2130 {
2131 return adapter_async_del_queue(nvmeq, nvme_admin_delete_cq,
2132 nvme_del_cq_work_handler);
2133 }
2134
2135 static void nvme_del_sq_work_handler(struct kthread_work *work)
2136 {
2137 struct nvme_queue *nvmeq = container_of(work, struct nvme_queue,
2138 cmdinfo.work);
2139 int status = nvmeq->cmdinfo.status;
2140
2141 if (!status)
2142 status = nvme_delete_cq(nvmeq);
2143 if (status)
2144 nvme_del_queue_end(nvmeq);
2145 }
2146
2147 static int nvme_delete_sq(struct nvme_queue *nvmeq)
2148 {
2149 return adapter_async_del_queue(nvmeq, nvme_admin_delete_sq,
2150 nvme_del_sq_work_handler);
2151 }
2152
2153 static void nvme_del_queue_start(struct kthread_work *work)
2154 {
2155 struct nvme_queue *nvmeq = container_of(work, struct nvme_queue,
2156 cmdinfo.work);
2157 allow_signal(SIGKILL);
2158 if (nvme_delete_sq(nvmeq))
2159 nvme_del_queue_end(nvmeq);
2160 }
2161
2162 static void nvme_disable_io_queues(struct nvme_dev *dev)
2163 {
2164 int i;
2165 DEFINE_KTHREAD_WORKER_ONSTACK(worker);
2166 struct nvme_delq_ctx dq;
2167 struct task_struct *kworker_task = kthread_run(kthread_worker_fn,
2168 &worker, "nvme%d", dev->instance);
2169
2170 if (IS_ERR(kworker_task)) {
2171 dev_err(&dev->pci_dev->dev,
2172 "Failed to create queue del task\n");
2173 for (i = dev->queue_count - 1; i > 0; i--)
2174 nvme_disable_queue(dev, i);
2175 return;
2176 }
2177
2178 dq.waiter = NULL;
2179 atomic_set(&dq.refcount, 0);
2180 dq.worker = &worker;
2181 for (i = dev->queue_count - 1; i > 0; i--) {
2182 struct nvme_queue *nvmeq = dev->queues[i];
2183
2184 if (nvme_suspend_queue(nvmeq))
2185 continue;
2186 nvmeq->cmdinfo.ctx = nvme_get_dq(&dq);
2187 nvmeq->cmdinfo.worker = dq.worker;
2188 init_kthread_work(&nvmeq->cmdinfo.work, nvme_del_queue_start);
2189 queue_kthread_work(dq.worker, &nvmeq->cmdinfo.work);
2190 }
2191 nvme_wait_dq(&dq, dev);
2192 kthread_stop(kworker_task);
2193 }
2194
2195 static void nvme_dev_shutdown(struct nvme_dev *dev)
2196 {
2197 int i;
2198
2199 dev->initialized = 0;
2200
2201 spin_lock(&dev_list_lock);
2202 list_del_init(&dev->node);
2203 spin_unlock(&dev_list_lock);
2204
2205 if (!dev->bar || (dev->bar && readl(&dev->bar->csts) == -1)) {
2206 for (i = dev->queue_count - 1; i >= 0; i--) {
2207 struct nvme_queue *nvmeq = dev->queues[i];
2208 nvme_suspend_queue(nvmeq);
2209 nvme_clear_queue(nvmeq);
2210 }
2211 } else {
2212 nvme_disable_io_queues(dev);
2213 nvme_shutdown_ctrl(dev);
2214 nvme_disable_queue(dev, 0);
2215 }
2216 nvme_dev_unmap(dev);
2217 }
2218
2219 static void nvme_dev_remove(struct nvme_dev *dev)
2220 {
2221 struct nvme_ns *ns;
2222
2223 list_for_each_entry(ns, &dev->namespaces, list) {
2224 if (ns->disk->flags & GENHD_FL_UP)
2225 del_gendisk(ns->disk);
2226 if (!blk_queue_dying(ns->queue))
2227 blk_cleanup_queue(ns->queue);
2228 }
2229 }
2230
2231 static int nvme_setup_prp_pools(struct nvme_dev *dev)
2232 {
2233 struct device *dmadev = &dev->pci_dev->dev;
2234 dev->prp_page_pool = dma_pool_create("prp list page", dmadev,
2235 PAGE_SIZE, PAGE_SIZE, 0);
2236 if (!dev->prp_page_pool)
2237 return -ENOMEM;
2238
2239 /* Optimisation for I/Os between 4k and 128k */
2240 dev->prp_small_pool = dma_pool_create("prp list 256", dmadev,
2241 256, 256, 0);
2242 if (!dev->prp_small_pool) {
2243 dma_pool_destroy(dev->prp_page_pool);
2244 return -ENOMEM;
2245 }
2246 return 0;
2247 }
2248
2249 static void nvme_release_prp_pools(struct nvme_dev *dev)
2250 {
2251 dma_pool_destroy(dev->prp_page_pool);
2252 dma_pool_destroy(dev->prp_small_pool);
2253 }
2254
2255 static DEFINE_IDA(nvme_instance_ida);
2256
2257 static int nvme_set_instance(struct nvme_dev *dev)
2258 {
2259 int instance, error;
2260
2261 do {
2262 if (!ida_pre_get(&nvme_instance_ida, GFP_KERNEL))
2263 return -ENODEV;
2264
2265 spin_lock(&dev_list_lock);
2266 error = ida_get_new(&nvme_instance_ida, &instance);
2267 spin_unlock(&dev_list_lock);
2268 } while (error == -EAGAIN);
2269
2270 if (error)
2271 return -ENODEV;
2272
2273 dev->instance = instance;
2274 return 0;
2275 }
2276
2277 static void nvme_release_instance(struct nvme_dev *dev)
2278 {
2279 spin_lock(&dev_list_lock);
2280 ida_remove(&nvme_instance_ida, dev->instance);
2281 spin_unlock(&dev_list_lock);
2282 }
2283
2284 static void nvme_free_namespaces(struct nvme_dev *dev)
2285 {
2286 struct nvme_ns *ns, *next;
2287
2288 list_for_each_entry_safe(ns, next, &dev->namespaces, list) {
2289 list_del(&ns->list);
2290 put_disk(ns->disk);
2291 kfree(ns);
2292 }
2293 }
2294
2295 static void nvme_free_dev(struct kref *kref)
2296 {
2297 struct nvme_dev *dev = container_of(kref, struct nvme_dev, kref);
2298
2299 nvme_free_namespaces(dev);
2300 kfree(dev->queues);
2301 kfree(dev->entry);
2302 kfree(dev);
2303 }
2304
2305 static int nvme_dev_open(struct inode *inode, struct file *f)
2306 {
2307 struct nvme_dev *dev = container_of(f->private_data, struct nvme_dev,
2308 miscdev);
2309 kref_get(&dev->kref);
2310 f->private_data = dev;
2311 return 0;
2312 }
2313
2314 static int nvme_dev_release(struct inode *inode, struct file *f)
2315 {
2316 struct nvme_dev *dev = f->private_data;
2317 kref_put(&dev->kref, nvme_free_dev);
2318 return 0;
2319 }
2320
2321 static long nvme_dev_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
2322 {
2323 struct nvme_dev *dev = f->private_data;
2324 switch (cmd) {
2325 case NVME_IOCTL_ADMIN_CMD:
2326 return nvme_user_admin_cmd(dev, (void __user *)arg);
2327 default:
2328 return -ENOTTY;
2329 }
2330 }
2331
2332 static const struct file_operations nvme_dev_fops = {
2333 .owner = THIS_MODULE,
2334 .open = nvme_dev_open,
2335 .release = nvme_dev_release,
2336 .unlocked_ioctl = nvme_dev_ioctl,
2337 .compat_ioctl = nvme_dev_ioctl,
2338 };
2339
2340 static int nvme_dev_start(struct nvme_dev *dev)
2341 {
2342 int result;
2343
2344 result = nvme_dev_map(dev);
2345 if (result)
2346 return result;
2347
2348 result = nvme_configure_admin_queue(dev);
2349 if (result)
2350 goto unmap;
2351
2352 spin_lock(&dev_list_lock);
2353 list_add(&dev->node, &dev_list);
2354 spin_unlock(&dev_list_lock);
2355
2356 result = nvme_setup_io_queues(dev);
2357 if (result && result != -EBUSY)
2358 goto disable;
2359
2360 return result;
2361
2362 disable:
2363 nvme_disable_queue(dev, 0);
2364 spin_lock(&dev_list_lock);
2365 list_del_init(&dev->node);
2366 spin_unlock(&dev_list_lock);
2367 unmap:
2368 nvme_dev_unmap(dev);
2369 return result;
2370 }
2371
2372 static int nvme_remove_dead_ctrl(void *arg)
2373 {
2374 struct nvme_dev *dev = (struct nvme_dev *)arg;
2375 struct pci_dev *pdev = dev->pci_dev;
2376
2377 if (pci_get_drvdata(pdev))
2378 pci_stop_and_remove_bus_device(pdev);
2379 kref_put(&dev->kref, nvme_free_dev);
2380 return 0;
2381 }
2382
2383 static void nvme_remove_disks(struct work_struct *ws)
2384 {
2385 int i;
2386 struct nvme_dev *dev = container_of(ws, struct nvme_dev, reset_work);
2387
2388 nvme_dev_remove(dev);
2389 spin_lock(&dev_list_lock);
2390 for (i = dev->queue_count - 1; i > 0; i--) {
2391 BUG_ON(!dev->queues[i] || !dev->queues[i]->q_suspended);
2392 nvme_free_queue(dev->queues[i]);
2393 dev->queue_count--;
2394 dev->queues[i] = NULL;
2395 }
2396 spin_unlock(&dev_list_lock);
2397 }
2398
2399 static int nvme_dev_resume(struct nvme_dev *dev)
2400 {
2401 int ret;
2402
2403 ret = nvme_dev_start(dev);
2404 if (ret && ret != -EBUSY)
2405 return ret;
2406 if (ret == -EBUSY) {
2407 spin_lock(&dev_list_lock);
2408 dev->reset_workfn = nvme_remove_disks;
2409 queue_work(nvme_workq, &dev->reset_work);
2410 spin_unlock(&dev_list_lock);
2411 }
2412 dev->initialized = 1;
2413 return 0;
2414 }
2415
2416 static void nvme_dev_reset(struct nvme_dev *dev)
2417 {
2418 nvme_dev_shutdown(dev);
2419 if (nvme_dev_resume(dev)) {
2420 dev_err(&dev->pci_dev->dev, "Device failed to resume\n");
2421 kref_get(&dev->kref);
2422 if (IS_ERR(kthread_run(nvme_remove_dead_ctrl, dev, "nvme%d",
2423 dev->instance))) {
2424 dev_err(&dev->pci_dev->dev,
2425 "Failed to start controller remove task\n");
2426 kref_put(&dev->kref, nvme_free_dev);
2427 }
2428 }
2429 }
2430
2431 static void nvme_reset_failed_dev(struct work_struct *ws)
2432 {
2433 struct nvme_dev *dev = container_of(ws, struct nvme_dev, reset_work);
2434 nvme_dev_reset(dev);
2435 }
2436
2437 static void nvme_reset_workfn(struct work_struct *work)
2438 {
2439 struct nvme_dev *dev = container_of(work, struct nvme_dev, reset_work);
2440 dev->reset_workfn(work);
2441 }
2442
2443 static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2444 {
2445 int result = -ENOMEM;
2446 struct nvme_dev *dev;
2447
2448 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
2449 if (!dev)
2450 return -ENOMEM;
2451 dev->entry = kcalloc(num_possible_cpus(), sizeof(*dev->entry),
2452 GFP_KERNEL);
2453 if (!dev->entry)
2454 goto free;
2455 dev->queues = kcalloc(num_possible_cpus() + 1, sizeof(void *),
2456 GFP_KERNEL);
2457 if (!dev->queues)
2458 goto free;
2459
2460 INIT_LIST_HEAD(&dev->namespaces);
2461 dev->reset_workfn = nvme_reset_failed_dev;
2462 INIT_WORK(&dev->reset_work, nvme_reset_workfn);
2463 dev->pci_dev = pdev;
2464 pci_set_drvdata(pdev, dev);
2465 result = nvme_set_instance(dev);
2466 if (result)
2467 goto free;
2468
2469 result = nvme_setup_prp_pools(dev);
2470 if (result)
2471 goto release;
2472
2473 result = nvme_dev_start(dev);
2474 if (result) {
2475 if (result == -EBUSY)
2476 goto create_cdev;
2477 goto release_pools;
2478 }
2479
2480 kref_init(&dev->kref);
2481 result = nvme_dev_add(dev);
2482 if (result)
2483 goto shutdown;
2484
2485 create_cdev:
2486 scnprintf(dev->name, sizeof(dev->name), "nvme%d", dev->instance);
2487 dev->miscdev.minor = MISC_DYNAMIC_MINOR;
2488 dev->miscdev.parent = &pdev->dev;
2489 dev->miscdev.name = dev->name;
2490 dev->miscdev.fops = &nvme_dev_fops;
2491 result = misc_register(&dev->miscdev);
2492 if (result)
2493 goto remove;
2494
2495 dev->initialized = 1;
2496 return 0;
2497
2498 remove:
2499 nvme_dev_remove(dev);
2500 nvme_free_namespaces(dev);
2501 shutdown:
2502 nvme_dev_shutdown(dev);
2503 release_pools:
2504 nvme_free_queues(dev, 0);
2505 nvme_release_prp_pools(dev);
2506 release:
2507 nvme_release_instance(dev);
2508 free:
2509 kfree(dev->queues);
2510 kfree(dev->entry);
2511 kfree(dev);
2512 return result;
2513 }
2514
2515 static void nvme_shutdown(struct pci_dev *pdev)
2516 {
2517 struct nvme_dev *dev = pci_get_drvdata(pdev);
2518 nvme_dev_shutdown(dev);
2519 }
2520
2521 static void nvme_remove(struct pci_dev *pdev)
2522 {
2523 struct nvme_dev *dev = pci_get_drvdata(pdev);
2524
2525 spin_lock(&dev_list_lock);
2526 list_del_init(&dev->node);
2527 spin_unlock(&dev_list_lock);
2528
2529 pci_set_drvdata(pdev, NULL);
2530 flush_work(&dev->reset_work);
2531 misc_deregister(&dev->miscdev);
2532 nvme_dev_remove(dev);
2533 nvme_dev_shutdown(dev);
2534 nvme_free_queues(dev, 0);
2535 nvme_release_instance(dev);
2536 nvme_release_prp_pools(dev);
2537 kref_put(&dev->kref, nvme_free_dev);
2538 }
2539
2540 /* These functions are yet to be implemented */
2541 #define nvme_error_detected NULL
2542 #define nvme_dump_registers NULL
2543 #define nvme_link_reset NULL
2544 #define nvme_slot_reset NULL
2545 #define nvme_error_resume NULL
2546
2547 static int nvme_suspend(struct device *dev)
2548 {
2549 struct pci_dev *pdev = to_pci_dev(dev);
2550 struct nvme_dev *ndev = pci_get_drvdata(pdev);
2551
2552 nvme_dev_shutdown(ndev);
2553 return 0;
2554 }
2555
2556 static int nvme_resume(struct device *dev)
2557 {
2558 struct pci_dev *pdev = to_pci_dev(dev);
2559 struct nvme_dev *ndev = pci_get_drvdata(pdev);
2560
2561 if (nvme_dev_resume(ndev) && !work_busy(&ndev->reset_work)) {
2562 ndev->reset_workfn = nvme_reset_failed_dev;
2563 queue_work(nvme_workq, &ndev->reset_work);
2564 }
2565 return 0;
2566 }
2567
2568 static SIMPLE_DEV_PM_OPS(nvme_dev_pm_ops, nvme_suspend, nvme_resume);
2569
2570 static const struct pci_error_handlers nvme_err_handler = {
2571 .error_detected = nvme_error_detected,
2572 .mmio_enabled = nvme_dump_registers,
2573 .link_reset = nvme_link_reset,
2574 .slot_reset = nvme_slot_reset,
2575 .resume = nvme_error_resume,
2576 };
2577
2578 /* Move to pci_ids.h later */
2579 #define PCI_CLASS_STORAGE_EXPRESS 0x010802
2580
2581 static DEFINE_PCI_DEVICE_TABLE(nvme_id_table) = {
2582 { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
2583 { 0, }
2584 };
2585 MODULE_DEVICE_TABLE(pci, nvme_id_table);
2586
2587 static struct pci_driver nvme_driver = {
2588 .name = "nvme",
2589 .id_table = nvme_id_table,
2590 .probe = nvme_probe,
2591 .remove = nvme_remove,
2592 .shutdown = nvme_shutdown,
2593 .driver = {
2594 .pm = &nvme_dev_pm_ops,
2595 },
2596 .err_handler = &nvme_err_handler,
2597 };
2598
2599 static int __init nvme_init(void)
2600 {
2601 int result;
2602
2603 nvme_thread = kthread_run(nvme_kthread, NULL, "nvme");
2604 if (IS_ERR(nvme_thread))
2605 return PTR_ERR(nvme_thread);
2606
2607 result = -ENOMEM;
2608 nvme_workq = create_singlethread_workqueue("nvme");
2609 if (!nvme_workq)
2610 goto kill_kthread;
2611
2612 result = register_blkdev(nvme_major, "nvme");
2613 if (result < 0)
2614 goto kill_workq;
2615 else if (result > 0)
2616 nvme_major = result;
2617
2618 result = pci_register_driver(&nvme_driver);
2619 if (result)
2620 goto unregister_blkdev;
2621 return 0;
2622
2623 unregister_blkdev:
2624 unregister_blkdev(nvme_major, "nvme");
2625 kill_workq:
2626 destroy_workqueue(nvme_workq);
2627 kill_kthread:
2628 kthread_stop(nvme_thread);
2629 return result;
2630 }
2631
2632 static void __exit nvme_exit(void)
2633 {
2634 pci_unregister_driver(&nvme_driver);
2635 unregister_blkdev(nvme_major, "nvme");
2636 destroy_workqueue(nvme_workq);
2637 kthread_stop(nvme_thread);
2638 }
2639
2640 MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>");
2641 MODULE_LICENSE("GPL");
2642 MODULE_VERSION("0.8");
2643 module_init(nvme_init);
2644 module_exit(nvme_exit);
This page took 0.08556 seconds and 5 git commands to generate.