2 * NVM Express device driver
3 * Copyright (c) 2011-2014, Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 #include <linux/blkdev.h>
16 #include <linux/blk-mq.h>
17 #include <linux/delay.h>
18 #include <linux/errno.h>
19 #include <linux/hdreg.h>
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/list_sort.h>
23 #include <linux/slab.h>
24 #include <linux/types.h>
26 #include <linux/ptrace.h>
27 #include <linux/nvme_ioctl.h>
28 #include <linux/t10-pi.h>
30 #include <asm/unaligned.h>
35 #define NVME_MINORS (1U << MINORBITS)
37 unsigned char admin_timeout
= 60;
38 module_param(admin_timeout
, byte
, 0644);
39 MODULE_PARM_DESC(admin_timeout
, "timeout in seconds for admin commands");
40 EXPORT_SYMBOL_GPL(admin_timeout
);
42 unsigned char nvme_io_timeout
= 30;
43 module_param_named(io_timeout
, nvme_io_timeout
, byte
, 0644);
44 MODULE_PARM_DESC(io_timeout
, "timeout in seconds for I/O");
45 EXPORT_SYMBOL_GPL(nvme_io_timeout
);
47 unsigned char shutdown_timeout
= 5;
48 module_param(shutdown_timeout
, byte
, 0644);
49 MODULE_PARM_DESC(shutdown_timeout
, "timeout in seconds for controller shutdown");
51 unsigned int nvme_max_retries
= 5;
52 module_param_named(max_retries
, nvme_max_retries
, uint
, 0644);
53 MODULE_PARM_DESC(max_retries
, "max number of retries a command may have");
54 EXPORT_SYMBOL_GPL(nvme_max_retries
);
56 static int nvme_char_major
;
57 module_param(nvme_char_major
, int, 0);
59 static LIST_HEAD(nvme_ctrl_list
);
60 static DEFINE_SPINLOCK(dev_list_lock
);
62 static struct class *nvme_class
;
64 void nvme_cancel_request(struct request
*req
, void *data
, bool reserved
)
68 if (!blk_mq_request_started(req
))
71 dev_dbg_ratelimited(((struct nvme_ctrl
*) data
)->device
,
72 "Cancelling I/O %d", req
->tag
);
74 status
= NVME_SC_ABORT_REQ
;
75 if (blk_queue_dying(req
->q
))
76 status
|= NVME_SC_DNR
;
77 blk_mq_complete_request(req
, status
);
79 EXPORT_SYMBOL_GPL(nvme_cancel_request
);
81 bool nvme_change_ctrl_state(struct nvme_ctrl
*ctrl
,
82 enum nvme_ctrl_state new_state
)
84 enum nvme_ctrl_state old_state
= ctrl
->state
;
87 spin_lock_irq(&ctrl
->lock
);
92 case NVME_CTRL_RESETTING
:
93 case NVME_CTRL_RECONNECTING
:
100 case NVME_CTRL_RESETTING
:
104 case NVME_CTRL_RECONNECTING
:
111 case NVME_CTRL_RECONNECTING
:
120 case NVME_CTRL_DELETING
:
123 case NVME_CTRL_RESETTING
:
124 case NVME_CTRL_RECONNECTING
:
133 case NVME_CTRL_DELETING
:
143 spin_unlock_irq(&ctrl
->lock
);
146 ctrl
->state
= new_state
;
150 EXPORT_SYMBOL_GPL(nvme_change_ctrl_state
);
152 static void nvme_free_ns(struct kref
*kref
)
154 struct nvme_ns
*ns
= container_of(kref
, struct nvme_ns
, kref
);
156 if (ns
->type
== NVME_NS_LIGHTNVM
)
157 nvme_nvm_unregister(ns
->queue
, ns
->disk
->disk_name
);
159 spin_lock(&dev_list_lock
);
160 ns
->disk
->private_data
= NULL
;
161 spin_unlock(&dev_list_lock
);
164 ida_simple_remove(&ns
->ctrl
->ns_ida
, ns
->instance
);
165 nvme_put_ctrl(ns
->ctrl
);
169 static void nvme_put_ns(struct nvme_ns
*ns
)
171 kref_put(&ns
->kref
, nvme_free_ns
);
174 static struct nvme_ns
*nvme_get_ns_from_disk(struct gendisk
*disk
)
178 spin_lock(&dev_list_lock
);
179 ns
= disk
->private_data
;
181 if (!kref_get_unless_zero(&ns
->kref
))
183 if (!try_module_get(ns
->ctrl
->ops
->module
))
186 spin_unlock(&dev_list_lock
);
191 kref_put(&ns
->kref
, nvme_free_ns
);
193 spin_unlock(&dev_list_lock
);
197 void nvme_requeue_req(struct request
*req
)
201 blk_mq_requeue_request(req
);
202 spin_lock_irqsave(req
->q
->queue_lock
, flags
);
203 if (!blk_queue_stopped(req
->q
))
204 blk_mq_kick_requeue_list(req
->q
);
205 spin_unlock_irqrestore(req
->q
->queue_lock
, flags
);
207 EXPORT_SYMBOL_GPL(nvme_requeue_req
);
209 struct request
*nvme_alloc_request(struct request_queue
*q
,
210 struct nvme_command
*cmd
, unsigned int flags
, int qid
)
214 if (qid
== NVME_QID_ANY
) {
215 req
= blk_mq_alloc_request(q
, nvme_is_write(cmd
), flags
);
217 req
= blk_mq_alloc_request_hctx(q
, nvme_is_write(cmd
), flags
,
223 req
->cmd_type
= REQ_TYPE_DRV_PRIV
;
224 req
->cmd_flags
|= REQ_FAILFAST_DRIVER
;
225 req
->cmd
= (unsigned char *)cmd
;
226 req
->cmd_len
= sizeof(struct nvme_command
);
230 EXPORT_SYMBOL_GPL(nvme_alloc_request
);
232 static inline void nvme_setup_flush(struct nvme_ns
*ns
,
233 struct nvme_command
*cmnd
)
235 memset(cmnd
, 0, sizeof(*cmnd
));
236 cmnd
->common
.opcode
= nvme_cmd_flush
;
237 cmnd
->common
.nsid
= cpu_to_le32(ns
->ns_id
);
240 static inline int nvme_setup_discard(struct nvme_ns
*ns
, struct request
*req
,
241 struct nvme_command
*cmnd
)
243 struct nvme_dsm_range
*range
;
246 unsigned int nr_bytes
= blk_rq_bytes(req
);
248 range
= kmalloc(sizeof(*range
), GFP_ATOMIC
);
250 return BLK_MQ_RQ_QUEUE_BUSY
;
252 range
->cattr
= cpu_to_le32(0);
253 range
->nlb
= cpu_to_le32(nr_bytes
>> ns
->lba_shift
);
254 range
->slba
= cpu_to_le64(nvme_block_nr(ns
, blk_rq_pos(req
)));
256 memset(cmnd
, 0, sizeof(*cmnd
));
257 cmnd
->dsm
.opcode
= nvme_cmd_dsm
;
258 cmnd
->dsm
.nsid
= cpu_to_le32(ns
->ns_id
);
260 cmnd
->dsm
.attributes
= cpu_to_le32(NVME_DSMGMT_AD
);
262 req
->completion_data
= range
;
263 page
= virt_to_page(range
);
264 offset
= offset_in_page(range
);
265 blk_add_request_payload(req
, page
, offset
, sizeof(*range
));
268 * we set __data_len back to the size of the area to be discarded
269 * on disk. This allows us to report completion on the full amount
270 * of blocks described by the request.
272 req
->__data_len
= nr_bytes
;
277 static inline void nvme_setup_rw(struct nvme_ns
*ns
, struct request
*req
,
278 struct nvme_command
*cmnd
)
283 if (req
->cmd_flags
& REQ_FUA
)
284 control
|= NVME_RW_FUA
;
285 if (req
->cmd_flags
& (REQ_FAILFAST_DEV
| REQ_RAHEAD
))
286 control
|= NVME_RW_LR
;
288 if (req
->cmd_flags
& REQ_RAHEAD
)
289 dsmgmt
|= NVME_RW_DSM_FREQ_PREFETCH
;
291 memset(cmnd
, 0, sizeof(*cmnd
));
292 cmnd
->rw
.opcode
= (rq_data_dir(req
) ? nvme_cmd_write
: nvme_cmd_read
);
293 cmnd
->rw
.command_id
= req
->tag
;
294 cmnd
->rw
.nsid
= cpu_to_le32(ns
->ns_id
);
295 cmnd
->rw
.slba
= cpu_to_le64(nvme_block_nr(ns
, blk_rq_pos(req
)));
296 cmnd
->rw
.length
= cpu_to_le16((blk_rq_bytes(req
) >> ns
->lba_shift
) - 1);
299 switch (ns
->pi_type
) {
300 case NVME_NS_DPS_PI_TYPE3
:
301 control
|= NVME_RW_PRINFO_PRCHK_GUARD
;
303 case NVME_NS_DPS_PI_TYPE1
:
304 case NVME_NS_DPS_PI_TYPE2
:
305 control
|= NVME_RW_PRINFO_PRCHK_GUARD
|
306 NVME_RW_PRINFO_PRCHK_REF
;
307 cmnd
->rw
.reftag
= cpu_to_le32(
308 nvme_block_nr(ns
, blk_rq_pos(req
)));
311 if (!blk_integrity_rq(req
))
312 control
|= NVME_RW_PRINFO_PRACT
;
315 cmnd
->rw
.control
= cpu_to_le16(control
);
316 cmnd
->rw
.dsmgmt
= cpu_to_le32(dsmgmt
);
319 int nvme_setup_cmd(struct nvme_ns
*ns
, struct request
*req
,
320 struct nvme_command
*cmd
)
324 if (req
->cmd_type
== REQ_TYPE_DRV_PRIV
)
325 memcpy(cmd
, req
->cmd
, sizeof(*cmd
));
326 else if (req_op(req
) == REQ_OP_FLUSH
)
327 nvme_setup_flush(ns
, cmd
);
328 else if (req_op(req
) == REQ_OP_DISCARD
)
329 ret
= nvme_setup_discard(ns
, req
, cmd
);
331 nvme_setup_rw(ns
, req
, cmd
);
335 EXPORT_SYMBOL_GPL(nvme_setup_cmd
);
338 * Returns 0 on success. If the result is negative, it's a Linux error code;
339 * if the result is positive, it's an NVM Express status code
341 int __nvme_submit_sync_cmd(struct request_queue
*q
, struct nvme_command
*cmd
,
342 struct nvme_completion
*cqe
, void *buffer
, unsigned bufflen
,
343 unsigned timeout
, int qid
, int at_head
, int flags
)
348 req
= nvme_alloc_request(q
, cmd
, flags
, qid
);
352 req
->timeout
= timeout
? timeout
: ADMIN_TIMEOUT
;
355 if (buffer
&& bufflen
) {
356 ret
= blk_rq_map_kern(q
, req
, buffer
, bufflen
, GFP_KERNEL
);
361 blk_execute_rq(req
->q
, NULL
, req
, at_head
);
364 blk_mq_free_request(req
);
367 EXPORT_SYMBOL_GPL(__nvme_submit_sync_cmd
);
369 int nvme_submit_sync_cmd(struct request_queue
*q
, struct nvme_command
*cmd
,
370 void *buffer
, unsigned bufflen
)
372 return __nvme_submit_sync_cmd(q
, cmd
, NULL
, buffer
, bufflen
, 0,
375 EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd
);
377 int __nvme_submit_user_cmd(struct request_queue
*q
, struct nvme_command
*cmd
,
378 void __user
*ubuffer
, unsigned bufflen
,
379 void __user
*meta_buffer
, unsigned meta_len
, u32 meta_seed
,
380 u32
*result
, unsigned timeout
)
382 bool write
= nvme_is_write(cmd
);
383 struct nvme_completion cqe
;
384 struct nvme_ns
*ns
= q
->queuedata
;
385 struct gendisk
*disk
= ns
? ns
->disk
: NULL
;
387 struct bio
*bio
= NULL
;
391 req
= nvme_alloc_request(q
, cmd
, 0, NVME_QID_ANY
);
395 req
->timeout
= timeout
? timeout
: ADMIN_TIMEOUT
;
398 if (ubuffer
&& bufflen
) {
399 ret
= blk_rq_map_user(q
, req
, NULL
, ubuffer
, bufflen
,
407 bio
->bi_bdev
= bdget_disk(disk
, 0);
413 if (meta_buffer
&& meta_len
) {
414 struct bio_integrity_payload
*bip
;
416 meta
= kmalloc(meta_len
, GFP_KERNEL
);
423 if (copy_from_user(meta
, meta_buffer
,
430 bip
= bio_integrity_alloc(bio
, GFP_KERNEL
, 1);
436 bip
->bip_iter
.bi_size
= meta_len
;
437 bip
->bip_iter
.bi_sector
= meta_seed
;
439 ret
= bio_integrity_add_page(bio
, virt_to_page(meta
),
440 meta_len
, offset_in_page(meta
));
441 if (ret
!= meta_len
) {
448 blk_execute_rq(req
->q
, disk
, req
, 0);
451 *result
= le32_to_cpu(cqe
.result
);
452 if (meta
&& !ret
&& !write
) {
453 if (copy_to_user(meta_buffer
, meta
, meta_len
))
460 if (disk
&& bio
->bi_bdev
)
462 blk_rq_unmap_user(bio
);
465 blk_mq_free_request(req
);
469 int nvme_submit_user_cmd(struct request_queue
*q
, struct nvme_command
*cmd
,
470 void __user
*ubuffer
, unsigned bufflen
, u32
*result
,
473 return __nvme_submit_user_cmd(q
, cmd
, ubuffer
, bufflen
, NULL
, 0, 0,
477 static void nvme_keep_alive_end_io(struct request
*rq
, int error
)
479 struct nvme_ctrl
*ctrl
= rq
->end_io_data
;
481 blk_mq_free_request(rq
);
484 dev_err(ctrl
->device
,
485 "failed nvme_keep_alive_end_io error=%d\n", error
);
489 schedule_delayed_work(&ctrl
->ka_work
, ctrl
->kato
* HZ
);
492 static int nvme_keep_alive(struct nvme_ctrl
*ctrl
)
494 struct nvme_command c
;
497 memset(&c
, 0, sizeof(c
));
498 c
.common
.opcode
= nvme_admin_keep_alive
;
500 rq
= nvme_alloc_request(ctrl
->admin_q
, &c
, BLK_MQ_REQ_RESERVED
,
505 rq
->timeout
= ctrl
->kato
* HZ
;
506 rq
->end_io_data
= ctrl
;
508 blk_execute_rq_nowait(rq
->q
, NULL
, rq
, 0, nvme_keep_alive_end_io
);
513 static void nvme_keep_alive_work(struct work_struct
*work
)
515 struct nvme_ctrl
*ctrl
= container_of(to_delayed_work(work
),
516 struct nvme_ctrl
, ka_work
);
518 if (nvme_keep_alive(ctrl
)) {
519 /* allocation failure, reset the controller */
520 dev_err(ctrl
->device
, "keep-alive failed\n");
521 ctrl
->ops
->reset_ctrl(ctrl
);
526 void nvme_start_keep_alive(struct nvme_ctrl
*ctrl
)
528 if (unlikely(ctrl
->kato
== 0))
531 INIT_DELAYED_WORK(&ctrl
->ka_work
, nvme_keep_alive_work
);
532 schedule_delayed_work(&ctrl
->ka_work
, ctrl
->kato
* HZ
);
534 EXPORT_SYMBOL_GPL(nvme_start_keep_alive
);
536 void nvme_stop_keep_alive(struct nvme_ctrl
*ctrl
)
538 if (unlikely(ctrl
->kato
== 0))
541 cancel_delayed_work_sync(&ctrl
->ka_work
);
543 EXPORT_SYMBOL_GPL(nvme_stop_keep_alive
);
545 int nvme_identify_ctrl(struct nvme_ctrl
*dev
, struct nvme_id_ctrl
**id
)
547 struct nvme_command c
= { };
550 /* gcc-4.4.4 (at least) has issues with initializers and anon unions */
551 c
.identify
.opcode
= nvme_admin_identify
;
552 c
.identify
.cns
= cpu_to_le32(1);
554 *id
= kmalloc(sizeof(struct nvme_id_ctrl
), GFP_KERNEL
);
558 error
= nvme_submit_sync_cmd(dev
->admin_q
, &c
, *id
,
559 sizeof(struct nvme_id_ctrl
));
565 static int nvme_identify_ns_list(struct nvme_ctrl
*dev
, unsigned nsid
, __le32
*ns_list
)
567 struct nvme_command c
= { };
569 c
.identify
.opcode
= nvme_admin_identify
;
570 c
.identify
.cns
= cpu_to_le32(2);
571 c
.identify
.nsid
= cpu_to_le32(nsid
);
572 return nvme_submit_sync_cmd(dev
->admin_q
, &c
, ns_list
, 0x1000);
575 int nvme_identify_ns(struct nvme_ctrl
*dev
, unsigned nsid
,
576 struct nvme_id_ns
**id
)
578 struct nvme_command c
= { };
581 /* gcc-4.4.4 (at least) has issues with initializers and anon unions */
582 c
.identify
.opcode
= nvme_admin_identify
,
583 c
.identify
.nsid
= cpu_to_le32(nsid
),
585 *id
= kmalloc(sizeof(struct nvme_id_ns
), GFP_KERNEL
);
589 error
= nvme_submit_sync_cmd(dev
->admin_q
, &c
, *id
,
590 sizeof(struct nvme_id_ns
));
596 int nvme_get_features(struct nvme_ctrl
*dev
, unsigned fid
, unsigned nsid
,
597 dma_addr_t dma_addr
, u32
*result
)
599 struct nvme_command c
;
600 struct nvme_completion cqe
;
603 memset(&c
, 0, sizeof(c
));
604 c
.features
.opcode
= nvme_admin_get_features
;
605 c
.features
.nsid
= cpu_to_le32(nsid
);
606 c
.features
.dptr
.prp1
= cpu_to_le64(dma_addr
);
607 c
.features
.fid
= cpu_to_le32(fid
);
609 ret
= __nvme_submit_sync_cmd(dev
->admin_q
, &c
, &cqe
, NULL
, 0, 0,
612 *result
= le32_to_cpu(cqe
.result
);
616 int nvme_set_features(struct nvme_ctrl
*dev
, unsigned fid
, unsigned dword11
,
617 dma_addr_t dma_addr
, u32
*result
)
619 struct nvme_command c
;
620 struct nvme_completion cqe
;
623 memset(&c
, 0, sizeof(c
));
624 c
.features
.opcode
= nvme_admin_set_features
;
625 c
.features
.dptr
.prp1
= cpu_to_le64(dma_addr
);
626 c
.features
.fid
= cpu_to_le32(fid
);
627 c
.features
.dword11
= cpu_to_le32(dword11
);
629 ret
= __nvme_submit_sync_cmd(dev
->admin_q
, &c
, &cqe
, NULL
, 0, 0,
632 *result
= le32_to_cpu(cqe
.result
);
636 int nvme_get_log_page(struct nvme_ctrl
*dev
, struct nvme_smart_log
**log
)
638 struct nvme_command c
= { };
641 c
.common
.opcode
= nvme_admin_get_log_page
,
642 c
.common
.nsid
= cpu_to_le32(0xFFFFFFFF),
643 c
.common
.cdw10
[0] = cpu_to_le32(
644 (((sizeof(struct nvme_smart_log
) / 4) - 1) << 16) |
647 *log
= kmalloc(sizeof(struct nvme_smart_log
), GFP_KERNEL
);
651 error
= nvme_submit_sync_cmd(dev
->admin_q
, &c
, *log
,
652 sizeof(struct nvme_smart_log
));
658 int nvme_set_queue_count(struct nvme_ctrl
*ctrl
, int *count
)
660 u32 q_count
= (*count
- 1) | ((*count
- 1) << 16);
662 int status
, nr_io_queues
;
664 status
= nvme_set_features(ctrl
, NVME_FEAT_NUM_QUEUES
, q_count
, 0,
670 * Degraded controllers might return an error when setting the queue
671 * count. We still want to be able to bring them online and offer
672 * access to the admin queue, as that might be only way to fix them up.
675 dev_err(ctrl
->dev
, "Could not set queue count (%d)\n", status
);
678 nr_io_queues
= min(result
& 0xffff, result
>> 16) + 1;
679 *count
= min(*count
, nr_io_queues
);
684 EXPORT_SYMBOL_GPL(nvme_set_queue_count
);
686 static int nvme_submit_io(struct nvme_ns
*ns
, struct nvme_user_io __user
*uio
)
688 struct nvme_user_io io
;
689 struct nvme_command c
;
690 unsigned length
, meta_len
;
691 void __user
*metadata
;
693 if (copy_from_user(&io
, uio
, sizeof(io
)))
701 case nvme_cmd_compare
:
707 length
= (io
.nblocks
+ 1) << ns
->lba_shift
;
708 meta_len
= (io
.nblocks
+ 1) * ns
->ms
;
709 metadata
= (void __user
*)(uintptr_t)io
.metadata
;
714 } else if (meta_len
) {
715 if ((io
.metadata
& 3) || !io
.metadata
)
719 memset(&c
, 0, sizeof(c
));
720 c
.rw
.opcode
= io
.opcode
;
721 c
.rw
.flags
= io
.flags
;
722 c
.rw
.nsid
= cpu_to_le32(ns
->ns_id
);
723 c
.rw
.slba
= cpu_to_le64(io
.slba
);
724 c
.rw
.length
= cpu_to_le16(io
.nblocks
);
725 c
.rw
.control
= cpu_to_le16(io
.control
);
726 c
.rw
.dsmgmt
= cpu_to_le32(io
.dsmgmt
);
727 c
.rw
.reftag
= cpu_to_le32(io
.reftag
);
728 c
.rw
.apptag
= cpu_to_le16(io
.apptag
);
729 c
.rw
.appmask
= cpu_to_le16(io
.appmask
);
731 return __nvme_submit_user_cmd(ns
->queue
, &c
,
732 (void __user
*)(uintptr_t)io
.addr
, length
,
733 metadata
, meta_len
, io
.slba
, NULL
, 0);
736 static int nvme_user_cmd(struct nvme_ctrl
*ctrl
, struct nvme_ns
*ns
,
737 struct nvme_passthru_cmd __user
*ucmd
)
739 struct nvme_passthru_cmd cmd
;
740 struct nvme_command c
;
741 unsigned timeout
= 0;
744 if (!capable(CAP_SYS_ADMIN
))
746 if (copy_from_user(&cmd
, ucmd
, sizeof(cmd
)))
751 memset(&c
, 0, sizeof(c
));
752 c
.common
.opcode
= cmd
.opcode
;
753 c
.common
.flags
= cmd
.flags
;
754 c
.common
.nsid
= cpu_to_le32(cmd
.nsid
);
755 c
.common
.cdw2
[0] = cpu_to_le32(cmd
.cdw2
);
756 c
.common
.cdw2
[1] = cpu_to_le32(cmd
.cdw3
);
757 c
.common
.cdw10
[0] = cpu_to_le32(cmd
.cdw10
);
758 c
.common
.cdw10
[1] = cpu_to_le32(cmd
.cdw11
);
759 c
.common
.cdw10
[2] = cpu_to_le32(cmd
.cdw12
);
760 c
.common
.cdw10
[3] = cpu_to_le32(cmd
.cdw13
);
761 c
.common
.cdw10
[4] = cpu_to_le32(cmd
.cdw14
);
762 c
.common
.cdw10
[5] = cpu_to_le32(cmd
.cdw15
);
765 timeout
= msecs_to_jiffies(cmd
.timeout_ms
);
767 status
= nvme_submit_user_cmd(ns
? ns
->queue
: ctrl
->admin_q
, &c
,
768 (void __user
*)(uintptr_t)cmd
.addr
, cmd
.data_len
,
769 &cmd
.result
, timeout
);
771 if (put_user(cmd
.result
, &ucmd
->result
))
778 static int nvme_ioctl(struct block_device
*bdev
, fmode_t mode
,
779 unsigned int cmd
, unsigned long arg
)
781 struct nvme_ns
*ns
= bdev
->bd_disk
->private_data
;
785 force_successful_syscall_return();
787 case NVME_IOCTL_ADMIN_CMD
:
788 return nvme_user_cmd(ns
->ctrl
, NULL
, (void __user
*)arg
);
789 case NVME_IOCTL_IO_CMD
:
790 return nvme_user_cmd(ns
->ctrl
, ns
, (void __user
*)arg
);
791 case NVME_IOCTL_SUBMIT_IO
:
792 return nvme_submit_io(ns
, (void __user
*)arg
);
793 #ifdef CONFIG_BLK_DEV_NVME_SCSI
794 case SG_GET_VERSION_NUM
:
795 return nvme_sg_get_version_num((void __user
*)arg
);
797 return nvme_sg_io(ns
, (void __user
*)arg
);
805 static int nvme_compat_ioctl(struct block_device
*bdev
, fmode_t mode
,
806 unsigned int cmd
, unsigned long arg
)
812 return nvme_ioctl(bdev
, mode
, cmd
, arg
);
815 #define nvme_compat_ioctl NULL
818 static int nvme_open(struct block_device
*bdev
, fmode_t mode
)
820 return nvme_get_ns_from_disk(bdev
->bd_disk
) ? 0 : -ENXIO
;
823 static void nvme_release(struct gendisk
*disk
, fmode_t mode
)
825 struct nvme_ns
*ns
= disk
->private_data
;
827 module_put(ns
->ctrl
->ops
->module
);
831 static int nvme_getgeo(struct block_device
*bdev
, struct hd_geometry
*geo
)
833 /* some standard values */
835 geo
->sectors
= 1 << 5;
836 geo
->cylinders
= get_capacity(bdev
->bd_disk
) >> 11;
840 #ifdef CONFIG_BLK_DEV_INTEGRITY
841 static void nvme_init_integrity(struct nvme_ns
*ns
)
843 struct blk_integrity integrity
;
845 memset(&integrity
, 0, sizeof(integrity
));
846 switch (ns
->pi_type
) {
847 case NVME_NS_DPS_PI_TYPE3
:
848 integrity
.profile
= &t10_pi_type3_crc
;
849 integrity
.tag_size
= sizeof(u16
) + sizeof(u32
);
850 integrity
.flags
|= BLK_INTEGRITY_DEVICE_CAPABLE
;
852 case NVME_NS_DPS_PI_TYPE1
:
853 case NVME_NS_DPS_PI_TYPE2
:
854 integrity
.profile
= &t10_pi_type1_crc
;
855 integrity
.tag_size
= sizeof(u16
);
856 integrity
.flags
|= BLK_INTEGRITY_DEVICE_CAPABLE
;
859 integrity
.profile
= NULL
;
862 integrity
.tuple_size
= ns
->ms
;
863 blk_integrity_register(ns
->disk
, &integrity
);
864 blk_queue_max_integrity_segments(ns
->queue
, 1);
867 static void nvme_init_integrity(struct nvme_ns
*ns
)
870 #endif /* CONFIG_BLK_DEV_INTEGRITY */
872 static void nvme_config_discard(struct nvme_ns
*ns
)
874 struct nvme_ctrl
*ctrl
= ns
->ctrl
;
875 u32 logical_block_size
= queue_logical_block_size(ns
->queue
);
877 if (ctrl
->quirks
& NVME_QUIRK_DISCARD_ZEROES
)
878 ns
->queue
->limits
.discard_zeroes_data
= 1;
880 ns
->queue
->limits
.discard_zeroes_data
= 0;
882 ns
->queue
->limits
.discard_alignment
= logical_block_size
;
883 ns
->queue
->limits
.discard_granularity
= logical_block_size
;
884 blk_queue_max_discard_sectors(ns
->queue
, UINT_MAX
);
885 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD
, ns
->queue
);
888 static int nvme_revalidate_disk(struct gendisk
*disk
)
890 struct nvme_ns
*ns
= disk
->private_data
;
891 struct nvme_id_ns
*id
;
896 if (test_bit(NVME_NS_DEAD
, &ns
->flags
)) {
897 set_capacity(disk
, 0);
900 if (nvme_identify_ns(ns
->ctrl
, ns
->ns_id
, &id
)) {
901 dev_warn(disk_to_dev(ns
->disk
), "%s: Identify failure\n",
910 if (nvme_nvm_ns_supported(ns
, id
) && ns
->type
!= NVME_NS_LIGHTNVM
) {
911 if (nvme_nvm_register(ns
->queue
, disk
->disk_name
)) {
912 dev_warn(disk_to_dev(ns
->disk
),
913 "%s: LightNVM init failure\n", __func__
);
917 ns
->type
= NVME_NS_LIGHTNVM
;
920 if (ns
->ctrl
->vs
>= NVME_VS(1, 1))
921 memcpy(ns
->eui
, id
->eui64
, sizeof(ns
->eui
));
922 if (ns
->ctrl
->vs
>= NVME_VS(1, 2))
923 memcpy(ns
->uuid
, id
->nguid
, sizeof(ns
->uuid
));
926 lbaf
= id
->flbas
& NVME_NS_FLBAS_LBA_MASK
;
927 ns
->lba_shift
= id
->lbaf
[lbaf
].ds
;
928 ns
->ms
= le16_to_cpu(id
->lbaf
[lbaf
].ms
);
929 ns
->ext
= ns
->ms
&& (id
->flbas
& NVME_NS_FLBAS_META_EXT
);
932 * If identify namespace failed, use default 512 byte block size so
933 * block layer can use before failing read/write for 0 capacity.
935 if (ns
->lba_shift
== 0)
937 bs
= 1 << ns
->lba_shift
;
938 /* XXX: PI implementation requires metadata equal t10 pi tuple size */
939 pi_type
= ns
->ms
== sizeof(struct t10_pi_tuple
) ?
940 id
->dps
& NVME_NS_DPS_PI_MASK
: 0;
942 blk_mq_freeze_queue(disk
->queue
);
943 if (blk_get_integrity(disk
) && (ns
->pi_type
!= pi_type
||
945 bs
!= queue_logical_block_size(disk
->queue
) ||
946 (ns
->ms
&& ns
->ext
)))
947 blk_integrity_unregister(disk
);
949 ns
->pi_type
= pi_type
;
950 blk_queue_logical_block_size(ns
->queue
, bs
);
952 if (ns
->ms
&& !blk_get_integrity(disk
) && !ns
->ext
)
953 nvme_init_integrity(ns
);
954 if (ns
->ms
&& !(ns
->ms
== 8 && ns
->pi_type
) && !blk_get_integrity(disk
))
955 set_capacity(disk
, 0);
957 set_capacity(disk
, le64_to_cpup(&id
->nsze
) << (ns
->lba_shift
- 9));
959 if (ns
->ctrl
->oncs
& NVME_CTRL_ONCS_DSM
)
960 nvme_config_discard(ns
);
961 blk_mq_unfreeze_queue(disk
->queue
);
967 static char nvme_pr_type(enum pr_type type
)
970 case PR_WRITE_EXCLUSIVE
:
972 case PR_EXCLUSIVE_ACCESS
:
974 case PR_WRITE_EXCLUSIVE_REG_ONLY
:
976 case PR_EXCLUSIVE_ACCESS_REG_ONLY
:
978 case PR_WRITE_EXCLUSIVE_ALL_REGS
:
980 case PR_EXCLUSIVE_ACCESS_ALL_REGS
:
987 static int nvme_pr_command(struct block_device
*bdev
, u32 cdw10
,
988 u64 key
, u64 sa_key
, u8 op
)
990 struct nvme_ns
*ns
= bdev
->bd_disk
->private_data
;
991 struct nvme_command c
;
992 u8 data
[16] = { 0, };
994 put_unaligned_le64(key
, &data
[0]);
995 put_unaligned_le64(sa_key
, &data
[8]);
997 memset(&c
, 0, sizeof(c
));
998 c
.common
.opcode
= op
;
999 c
.common
.nsid
= cpu_to_le32(ns
->ns_id
);
1000 c
.common
.cdw10
[0] = cpu_to_le32(cdw10
);
1002 return nvme_submit_sync_cmd(ns
->queue
, &c
, data
, 16);
1005 static int nvme_pr_register(struct block_device
*bdev
, u64 old
,
1006 u64
new, unsigned flags
)
1010 if (flags
& ~PR_FL_IGNORE_KEY
)
1013 cdw10
= old
? 2 : 0;
1014 cdw10
|= (flags
& PR_FL_IGNORE_KEY
) ? 1 << 3 : 0;
1015 cdw10
|= (1 << 30) | (1 << 31); /* PTPL=1 */
1016 return nvme_pr_command(bdev
, cdw10
, old
, new, nvme_cmd_resv_register
);
1019 static int nvme_pr_reserve(struct block_device
*bdev
, u64 key
,
1020 enum pr_type type
, unsigned flags
)
1024 if (flags
& ~PR_FL_IGNORE_KEY
)
1027 cdw10
= nvme_pr_type(type
) << 8;
1028 cdw10
|= ((flags
& PR_FL_IGNORE_KEY
) ? 1 << 3 : 0);
1029 return nvme_pr_command(bdev
, cdw10
, key
, 0, nvme_cmd_resv_acquire
);
1032 static int nvme_pr_preempt(struct block_device
*bdev
, u64 old
, u64
new,
1033 enum pr_type type
, bool abort
)
1035 u32 cdw10
= nvme_pr_type(type
) << 8 | abort
? 2 : 1;
1036 return nvme_pr_command(bdev
, cdw10
, old
, new, nvme_cmd_resv_acquire
);
1039 static int nvme_pr_clear(struct block_device
*bdev
, u64 key
)
1041 u32 cdw10
= 1 | (key
? 1 << 3 : 0);
1042 return nvme_pr_command(bdev
, cdw10
, key
, 0, nvme_cmd_resv_register
);
1045 static int nvme_pr_release(struct block_device
*bdev
, u64 key
, enum pr_type type
)
1047 u32 cdw10
= nvme_pr_type(type
) << 8 | key
? 1 << 3 : 0;
1048 return nvme_pr_command(bdev
, cdw10
, key
, 0, nvme_cmd_resv_release
);
1051 static const struct pr_ops nvme_pr_ops
= {
1052 .pr_register
= nvme_pr_register
,
1053 .pr_reserve
= nvme_pr_reserve
,
1054 .pr_release
= nvme_pr_release
,
1055 .pr_preempt
= nvme_pr_preempt
,
1056 .pr_clear
= nvme_pr_clear
,
1059 static const struct block_device_operations nvme_fops
= {
1060 .owner
= THIS_MODULE
,
1061 .ioctl
= nvme_ioctl
,
1062 .compat_ioctl
= nvme_compat_ioctl
,
1064 .release
= nvme_release
,
1065 .getgeo
= nvme_getgeo
,
1066 .revalidate_disk
= nvme_revalidate_disk
,
1067 .pr_ops
= &nvme_pr_ops
,
1070 static int nvme_wait_ready(struct nvme_ctrl
*ctrl
, u64 cap
, bool enabled
)
1072 unsigned long timeout
=
1073 ((NVME_CAP_TIMEOUT(cap
) + 1) * HZ
/ 2) + jiffies
;
1074 u32 csts
, bit
= enabled
? NVME_CSTS_RDY
: 0;
1077 while ((ret
= ctrl
->ops
->reg_read32(ctrl
, NVME_REG_CSTS
, &csts
)) == 0) {
1078 if ((csts
& NVME_CSTS_RDY
) == bit
)
1082 if (fatal_signal_pending(current
))
1084 if (time_after(jiffies
, timeout
)) {
1085 dev_err(ctrl
->device
,
1086 "Device not ready; aborting %s\n", enabled
?
1087 "initialisation" : "reset");
1096 * If the device has been passed off to us in an enabled state, just clear
1097 * the enabled bit. The spec says we should set the 'shutdown notification
1098 * bits', but doing so may cause the device to complete commands to the
1099 * admin queue ... and we don't know what memory that might be pointing at!
1101 int nvme_disable_ctrl(struct nvme_ctrl
*ctrl
, u64 cap
)
1105 ctrl
->ctrl_config
&= ~NVME_CC_SHN_MASK
;
1106 ctrl
->ctrl_config
&= ~NVME_CC_ENABLE
;
1108 ret
= ctrl
->ops
->reg_write32(ctrl
, NVME_REG_CC
, ctrl
->ctrl_config
);
1112 /* Checking for ctrl->tagset is a trick to avoid sleeping on module
1113 * load, since we only need the quirk on reset_controller. Notice
1114 * that the HGST device needs this delay only in firmware activation
1115 * procedure; unfortunately we have no (easy) way to verify this.
1117 if ((ctrl
->quirks
& NVME_QUIRK_DELAY_BEFORE_CHK_RDY
) && ctrl
->tagset
)
1118 msleep(NVME_QUIRK_DELAY_AMOUNT
);
1120 return nvme_wait_ready(ctrl
, cap
, false);
1122 EXPORT_SYMBOL_GPL(nvme_disable_ctrl
);
1124 int nvme_enable_ctrl(struct nvme_ctrl
*ctrl
, u64 cap
)
1127 * Default to a 4K page size, with the intention to update this
1128 * path in the future to accomodate architectures with differing
1129 * kernel and IO page sizes.
1131 unsigned dev_page_min
= NVME_CAP_MPSMIN(cap
) + 12, page_shift
= 12;
1134 if (page_shift
< dev_page_min
) {
1135 dev_err(ctrl
->device
,
1136 "Minimum device page size %u too large for host (%u)\n",
1137 1 << dev_page_min
, 1 << page_shift
);
1141 ctrl
->page_size
= 1 << page_shift
;
1143 ctrl
->ctrl_config
= NVME_CC_CSS_NVM
;
1144 ctrl
->ctrl_config
|= (page_shift
- 12) << NVME_CC_MPS_SHIFT
;
1145 ctrl
->ctrl_config
|= NVME_CC_ARB_RR
| NVME_CC_SHN_NONE
;
1146 ctrl
->ctrl_config
|= NVME_CC_IOSQES
| NVME_CC_IOCQES
;
1147 ctrl
->ctrl_config
|= NVME_CC_ENABLE
;
1149 ret
= ctrl
->ops
->reg_write32(ctrl
, NVME_REG_CC
, ctrl
->ctrl_config
);
1152 return nvme_wait_ready(ctrl
, cap
, true);
1154 EXPORT_SYMBOL_GPL(nvme_enable_ctrl
);
1156 int nvme_shutdown_ctrl(struct nvme_ctrl
*ctrl
)
1158 unsigned long timeout
= SHUTDOWN_TIMEOUT
+ jiffies
;
1162 ctrl
->ctrl_config
&= ~NVME_CC_SHN_MASK
;
1163 ctrl
->ctrl_config
|= NVME_CC_SHN_NORMAL
;
1165 ret
= ctrl
->ops
->reg_write32(ctrl
, NVME_REG_CC
, ctrl
->ctrl_config
);
1169 while ((ret
= ctrl
->ops
->reg_read32(ctrl
, NVME_REG_CSTS
, &csts
)) == 0) {
1170 if ((csts
& NVME_CSTS_SHST_MASK
) == NVME_CSTS_SHST_CMPLT
)
1174 if (fatal_signal_pending(current
))
1176 if (time_after(jiffies
, timeout
)) {
1177 dev_err(ctrl
->device
,
1178 "Device shutdown incomplete; abort shutdown\n");
1185 EXPORT_SYMBOL_GPL(nvme_shutdown_ctrl
);
1187 static void nvme_set_queue_limits(struct nvme_ctrl
*ctrl
,
1188 struct request_queue
*q
)
1192 if (ctrl
->max_hw_sectors
) {
1194 (ctrl
->max_hw_sectors
/ (ctrl
->page_size
>> 9)) + 1;
1196 blk_queue_max_hw_sectors(q
, ctrl
->max_hw_sectors
);
1197 blk_queue_max_segments(q
, min_t(u32
, max_segments
, USHRT_MAX
));
1199 if (ctrl
->stripe_size
)
1200 blk_queue_chunk_sectors(q
, ctrl
->stripe_size
>> 9);
1201 blk_queue_virt_boundary(q
, ctrl
->page_size
- 1);
1202 if (ctrl
->vwc
& NVME_CTRL_VWC_PRESENT
)
1204 blk_queue_write_cache(q
, vwc
, vwc
);
1208 * Initialize the cached copies of the Identify data and various controller
1209 * register in our nvme_ctrl structure. This should be called as soon as
1210 * the admin queue is fully up and running.
1212 int nvme_init_identify(struct nvme_ctrl
*ctrl
)
1214 struct nvme_id_ctrl
*id
;
1216 int ret
, page_shift
;
1219 ret
= ctrl
->ops
->reg_read32(ctrl
, NVME_REG_VS
, &ctrl
->vs
);
1221 dev_err(ctrl
->device
, "Reading VS failed (%d)\n", ret
);
1225 ret
= ctrl
->ops
->reg_read64(ctrl
, NVME_REG_CAP
, &cap
);
1227 dev_err(ctrl
->device
, "Reading CAP failed (%d)\n", ret
);
1230 page_shift
= NVME_CAP_MPSMIN(cap
) + 12;
1232 if (ctrl
->vs
>= NVME_VS(1, 1))
1233 ctrl
->subsystem
= NVME_CAP_NSSRC(cap
);
1235 ret
= nvme_identify_ctrl(ctrl
, &id
);
1237 dev_err(ctrl
->device
, "Identify Controller failed (%d)\n", ret
);
1241 ctrl
->vid
= le16_to_cpu(id
->vid
);
1242 ctrl
->oncs
= le16_to_cpup(&id
->oncs
);
1243 atomic_set(&ctrl
->abort_limit
, id
->acl
+ 1);
1244 ctrl
->vwc
= id
->vwc
;
1245 ctrl
->cntlid
= le16_to_cpup(&id
->cntlid
);
1246 memcpy(ctrl
->serial
, id
->sn
, sizeof(id
->sn
));
1247 memcpy(ctrl
->model
, id
->mn
, sizeof(id
->mn
));
1248 memcpy(ctrl
->firmware_rev
, id
->fr
, sizeof(id
->fr
));
1250 max_hw_sectors
= 1 << (id
->mdts
+ page_shift
- 9);
1252 max_hw_sectors
= UINT_MAX
;
1253 ctrl
->max_hw_sectors
=
1254 min_not_zero(ctrl
->max_hw_sectors
, max_hw_sectors
);
1256 if ((ctrl
->quirks
& NVME_QUIRK_STRIPE_SIZE
) && id
->vs
[3]) {
1257 unsigned int max_hw_sectors
;
1259 ctrl
->stripe_size
= 1 << (id
->vs
[3] + page_shift
);
1260 max_hw_sectors
= ctrl
->stripe_size
>> (page_shift
- 9);
1261 if (ctrl
->max_hw_sectors
) {
1262 ctrl
->max_hw_sectors
= min(max_hw_sectors
,
1263 ctrl
->max_hw_sectors
);
1265 ctrl
->max_hw_sectors
= max_hw_sectors
;
1269 nvme_set_queue_limits(ctrl
, ctrl
->admin_q
);
1270 ctrl
->sgls
= le32_to_cpu(id
->sgls
);
1271 ctrl
->kas
= le16_to_cpu(id
->kas
);
1273 if (ctrl
->ops
->is_fabrics
) {
1274 ctrl
->icdoff
= le16_to_cpu(id
->icdoff
);
1275 ctrl
->ioccsz
= le32_to_cpu(id
->ioccsz
);
1276 ctrl
->iorcsz
= le32_to_cpu(id
->iorcsz
);
1277 ctrl
->maxcmd
= le16_to_cpu(id
->maxcmd
);
1280 * In fabrics we need to verify the cntlid matches the
1283 if (ctrl
->cntlid
!= le16_to_cpu(id
->cntlid
))
1286 if (!ctrl
->opts
->discovery_nqn
&& !ctrl
->kas
) {
1288 "keep-alive support is mandatory for fabrics\n");
1292 ctrl
->cntlid
= le16_to_cpu(id
->cntlid
);
1298 EXPORT_SYMBOL_GPL(nvme_init_identify
);
1300 static int nvme_dev_open(struct inode
*inode
, struct file
*file
)
1302 struct nvme_ctrl
*ctrl
;
1303 int instance
= iminor(inode
);
1306 spin_lock(&dev_list_lock
);
1307 list_for_each_entry(ctrl
, &nvme_ctrl_list
, node
) {
1308 if (ctrl
->instance
!= instance
)
1311 if (!ctrl
->admin_q
) {
1315 if (!kref_get_unless_zero(&ctrl
->kref
))
1317 file
->private_data
= ctrl
;
1321 spin_unlock(&dev_list_lock
);
1326 static int nvme_dev_release(struct inode
*inode
, struct file
*file
)
1328 nvme_put_ctrl(file
->private_data
);
1332 static int nvme_dev_user_cmd(struct nvme_ctrl
*ctrl
, void __user
*argp
)
1337 mutex_lock(&ctrl
->namespaces_mutex
);
1338 if (list_empty(&ctrl
->namespaces
)) {
1343 ns
= list_first_entry(&ctrl
->namespaces
, struct nvme_ns
, list
);
1344 if (ns
!= list_last_entry(&ctrl
->namespaces
, struct nvme_ns
, list
)) {
1345 dev_warn(ctrl
->device
,
1346 "NVME_IOCTL_IO_CMD not supported when multiple namespaces present!\n");
1351 dev_warn(ctrl
->device
,
1352 "using deprecated NVME_IOCTL_IO_CMD ioctl on the char device!\n");
1353 kref_get(&ns
->kref
);
1354 mutex_unlock(&ctrl
->namespaces_mutex
);
1356 ret
= nvme_user_cmd(ctrl
, ns
, argp
);
1361 mutex_unlock(&ctrl
->namespaces_mutex
);
1365 static long nvme_dev_ioctl(struct file
*file
, unsigned int cmd
,
1368 struct nvme_ctrl
*ctrl
= file
->private_data
;
1369 void __user
*argp
= (void __user
*)arg
;
1372 case NVME_IOCTL_ADMIN_CMD
:
1373 return nvme_user_cmd(ctrl
, NULL
, argp
);
1374 case NVME_IOCTL_IO_CMD
:
1375 return nvme_dev_user_cmd(ctrl
, argp
);
1376 case NVME_IOCTL_RESET
:
1377 dev_warn(ctrl
->device
, "resetting controller\n");
1378 return ctrl
->ops
->reset_ctrl(ctrl
);
1379 case NVME_IOCTL_SUBSYS_RESET
:
1380 return nvme_reset_subsystem(ctrl
);
1381 case NVME_IOCTL_RESCAN
:
1382 nvme_queue_scan(ctrl
);
1389 static const struct file_operations nvme_dev_fops
= {
1390 .owner
= THIS_MODULE
,
1391 .open
= nvme_dev_open
,
1392 .release
= nvme_dev_release
,
1393 .unlocked_ioctl
= nvme_dev_ioctl
,
1394 .compat_ioctl
= nvme_dev_ioctl
,
1397 static ssize_t
nvme_sysfs_reset(struct device
*dev
,
1398 struct device_attribute
*attr
, const char *buf
,
1401 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
1404 ret
= ctrl
->ops
->reset_ctrl(ctrl
);
1409 static DEVICE_ATTR(reset_controller
, S_IWUSR
, NULL
, nvme_sysfs_reset
);
1411 static ssize_t
nvme_sysfs_rescan(struct device
*dev
,
1412 struct device_attribute
*attr
, const char *buf
,
1415 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
1417 nvme_queue_scan(ctrl
);
1420 static DEVICE_ATTR(rescan_controller
, S_IWUSR
, NULL
, nvme_sysfs_rescan
);
1422 static ssize_t
wwid_show(struct device
*dev
, struct device_attribute
*attr
,
1425 struct nvme_ns
*ns
= dev_to_disk(dev
)->private_data
;
1426 struct nvme_ctrl
*ctrl
= ns
->ctrl
;
1427 int serial_len
= sizeof(ctrl
->serial
);
1428 int model_len
= sizeof(ctrl
->model
);
1430 if (memchr_inv(ns
->uuid
, 0, sizeof(ns
->uuid
)))
1431 return sprintf(buf
, "eui.%16phN\n", ns
->uuid
);
1433 if (memchr_inv(ns
->eui
, 0, sizeof(ns
->eui
)))
1434 return sprintf(buf
, "eui.%8phN\n", ns
->eui
);
1436 while (ctrl
->serial
[serial_len
- 1] == ' ')
1438 while (ctrl
->model
[model_len
- 1] == ' ')
1441 return sprintf(buf
, "nvme.%04x-%*phN-%*phN-%08x\n", ctrl
->vid
,
1442 serial_len
, ctrl
->serial
, model_len
, ctrl
->model
, ns
->ns_id
);
1444 static DEVICE_ATTR(wwid
, S_IRUGO
, wwid_show
, NULL
);
1446 static ssize_t
uuid_show(struct device
*dev
, struct device_attribute
*attr
,
1449 struct nvme_ns
*ns
= dev_to_disk(dev
)->private_data
;
1450 return sprintf(buf
, "%pU\n", ns
->uuid
);
1452 static DEVICE_ATTR(uuid
, S_IRUGO
, uuid_show
, NULL
);
1454 static ssize_t
eui_show(struct device
*dev
, struct device_attribute
*attr
,
1457 struct nvme_ns
*ns
= dev_to_disk(dev
)->private_data
;
1458 return sprintf(buf
, "%8phd\n", ns
->eui
);
1460 static DEVICE_ATTR(eui
, S_IRUGO
, eui_show
, NULL
);
1462 static ssize_t
nsid_show(struct device
*dev
, struct device_attribute
*attr
,
1465 struct nvme_ns
*ns
= dev_to_disk(dev
)->private_data
;
1466 return sprintf(buf
, "%d\n", ns
->ns_id
);
1468 static DEVICE_ATTR(nsid
, S_IRUGO
, nsid_show
, NULL
);
1470 static struct attribute
*nvme_ns_attrs
[] = {
1471 &dev_attr_wwid
.attr
,
1472 &dev_attr_uuid
.attr
,
1474 &dev_attr_nsid
.attr
,
1478 static umode_t
nvme_ns_attrs_are_visible(struct kobject
*kobj
,
1479 struct attribute
*a
, int n
)
1481 struct device
*dev
= container_of(kobj
, struct device
, kobj
);
1482 struct nvme_ns
*ns
= dev_to_disk(dev
)->private_data
;
1484 if (a
== &dev_attr_uuid
.attr
) {
1485 if (!memchr_inv(ns
->uuid
, 0, sizeof(ns
->uuid
)))
1488 if (a
== &dev_attr_eui
.attr
) {
1489 if (!memchr_inv(ns
->eui
, 0, sizeof(ns
->eui
)))
1495 static const struct attribute_group nvme_ns_attr_group
= {
1496 .attrs
= nvme_ns_attrs
,
1497 .is_visible
= nvme_ns_attrs_are_visible
,
1500 #define nvme_show_str_function(field) \
1501 static ssize_t field##_show(struct device *dev, \
1502 struct device_attribute *attr, char *buf) \
1504 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \
1505 return sprintf(buf, "%.*s\n", (int)sizeof(ctrl->field), ctrl->field); \
1507 static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
1509 #define nvme_show_int_function(field) \
1510 static ssize_t field##_show(struct device *dev, \
1511 struct device_attribute *attr, char *buf) \
1513 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \
1514 return sprintf(buf, "%d\n", ctrl->field); \
1516 static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
1518 nvme_show_str_function(model
);
1519 nvme_show_str_function(serial
);
1520 nvme_show_str_function(firmware_rev
);
1521 nvme_show_int_function(cntlid
);
1523 static ssize_t
nvme_sysfs_delete(struct device
*dev
,
1524 struct device_attribute
*attr
, const char *buf
,
1527 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
1529 if (device_remove_file_self(dev
, attr
))
1530 ctrl
->ops
->delete_ctrl(ctrl
);
1533 static DEVICE_ATTR(delete_controller
, S_IWUSR
, NULL
, nvme_sysfs_delete
);
1535 static ssize_t
nvme_sysfs_show_transport(struct device
*dev
,
1536 struct device_attribute
*attr
,
1539 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
1541 return snprintf(buf
, PAGE_SIZE
, "%s\n", ctrl
->ops
->name
);
1543 static DEVICE_ATTR(transport
, S_IRUGO
, nvme_sysfs_show_transport
, NULL
);
1545 static ssize_t
nvme_sysfs_show_subsysnqn(struct device
*dev
,
1546 struct device_attribute
*attr
,
1549 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
1551 return snprintf(buf
, PAGE_SIZE
, "%s\n",
1552 ctrl
->ops
->get_subsysnqn(ctrl
));
1554 static DEVICE_ATTR(subsysnqn
, S_IRUGO
, nvme_sysfs_show_subsysnqn
, NULL
);
1556 static ssize_t
nvme_sysfs_show_address(struct device
*dev
,
1557 struct device_attribute
*attr
,
1560 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
1562 return ctrl
->ops
->get_address(ctrl
, buf
, PAGE_SIZE
);
1564 static DEVICE_ATTR(address
, S_IRUGO
, nvme_sysfs_show_address
, NULL
);
1566 static struct attribute
*nvme_dev_attrs
[] = {
1567 &dev_attr_reset_controller
.attr
,
1568 &dev_attr_rescan_controller
.attr
,
1569 &dev_attr_model
.attr
,
1570 &dev_attr_serial
.attr
,
1571 &dev_attr_firmware_rev
.attr
,
1572 &dev_attr_cntlid
.attr
,
1573 &dev_attr_delete_controller
.attr
,
1574 &dev_attr_transport
.attr
,
1575 &dev_attr_subsysnqn
.attr
,
1576 &dev_attr_address
.attr
,
1580 #define CHECK_ATTR(ctrl, a, name) \
1581 if ((a) == &dev_attr_##name.attr && \
1582 !(ctrl)->ops->get_##name) \
1585 static umode_t
nvme_dev_attrs_are_visible(struct kobject
*kobj
,
1586 struct attribute
*a
, int n
)
1588 struct device
*dev
= container_of(kobj
, struct device
, kobj
);
1589 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
1591 if (a
== &dev_attr_delete_controller
.attr
) {
1592 if (!ctrl
->ops
->delete_ctrl
)
1596 CHECK_ATTR(ctrl
, a
, subsysnqn
);
1597 CHECK_ATTR(ctrl
, a
, address
);
1602 static struct attribute_group nvme_dev_attrs_group
= {
1603 .attrs
= nvme_dev_attrs
,
1604 .is_visible
= nvme_dev_attrs_are_visible
,
1607 static const struct attribute_group
*nvme_dev_attr_groups
[] = {
1608 &nvme_dev_attrs_group
,
1612 static int ns_cmp(void *priv
, struct list_head
*a
, struct list_head
*b
)
1614 struct nvme_ns
*nsa
= container_of(a
, struct nvme_ns
, list
);
1615 struct nvme_ns
*nsb
= container_of(b
, struct nvme_ns
, list
);
1617 return nsa
->ns_id
- nsb
->ns_id
;
1620 static struct nvme_ns
*nvme_find_get_ns(struct nvme_ctrl
*ctrl
, unsigned nsid
)
1622 struct nvme_ns
*ns
, *ret
= NULL
;
1624 mutex_lock(&ctrl
->namespaces_mutex
);
1625 list_for_each_entry(ns
, &ctrl
->namespaces
, list
) {
1626 if (ns
->ns_id
== nsid
) {
1627 kref_get(&ns
->kref
);
1631 if (ns
->ns_id
> nsid
)
1634 mutex_unlock(&ctrl
->namespaces_mutex
);
1638 static void nvme_alloc_ns(struct nvme_ctrl
*ctrl
, unsigned nsid
)
1641 struct gendisk
*disk
;
1642 int node
= dev_to_node(ctrl
->dev
);
1644 ns
= kzalloc_node(sizeof(*ns
), GFP_KERNEL
, node
);
1648 ns
->instance
= ida_simple_get(&ctrl
->ns_ida
, 1, 0, GFP_KERNEL
);
1649 if (ns
->instance
< 0)
1652 ns
->queue
= blk_mq_init_queue(ctrl
->tagset
);
1653 if (IS_ERR(ns
->queue
))
1654 goto out_release_instance
;
1655 queue_flag_set_unlocked(QUEUE_FLAG_NONROT
, ns
->queue
);
1656 ns
->queue
->queuedata
= ns
;
1659 disk
= alloc_disk_node(0, node
);
1661 goto out_free_queue
;
1663 kref_init(&ns
->kref
);
1666 ns
->lba_shift
= 9; /* set to a default value for 512 until disk is validated */
1669 blk_queue_logical_block_size(ns
->queue
, 1 << ns
->lba_shift
);
1670 nvme_set_queue_limits(ctrl
, ns
->queue
);
1672 disk
->fops
= &nvme_fops
;
1673 disk
->private_data
= ns
;
1674 disk
->queue
= ns
->queue
;
1675 disk
->flags
= GENHD_FL_EXT_DEVT
;
1676 sprintf(disk
->disk_name
, "nvme%dn%d", ctrl
->instance
, ns
->instance
);
1678 if (nvme_revalidate_disk(ns
->disk
))
1681 mutex_lock(&ctrl
->namespaces_mutex
);
1682 list_add_tail(&ns
->list
, &ctrl
->namespaces
);
1683 mutex_unlock(&ctrl
->namespaces_mutex
);
1685 kref_get(&ctrl
->kref
);
1686 if (ns
->type
== NVME_NS_LIGHTNVM
)
1689 device_add_disk(ctrl
->device
, ns
->disk
);
1690 if (sysfs_create_group(&disk_to_dev(ns
->disk
)->kobj
,
1691 &nvme_ns_attr_group
))
1692 pr_warn("%s: failed to create sysfs group for identification\n",
1693 ns
->disk
->disk_name
);
1698 blk_cleanup_queue(ns
->queue
);
1699 out_release_instance
:
1700 ida_simple_remove(&ctrl
->ns_ida
, ns
->instance
);
1705 static void nvme_ns_remove(struct nvme_ns
*ns
)
1707 if (test_and_set_bit(NVME_NS_REMOVING
, &ns
->flags
))
1710 if (ns
->disk
->flags
& GENHD_FL_UP
) {
1711 if (blk_get_integrity(ns
->disk
))
1712 blk_integrity_unregister(ns
->disk
);
1713 sysfs_remove_group(&disk_to_dev(ns
->disk
)->kobj
,
1714 &nvme_ns_attr_group
);
1715 del_gendisk(ns
->disk
);
1716 blk_mq_abort_requeue_list(ns
->queue
);
1717 blk_cleanup_queue(ns
->queue
);
1720 mutex_lock(&ns
->ctrl
->namespaces_mutex
);
1721 list_del_init(&ns
->list
);
1722 mutex_unlock(&ns
->ctrl
->namespaces_mutex
);
1727 static void nvme_validate_ns(struct nvme_ctrl
*ctrl
, unsigned nsid
)
1731 ns
= nvme_find_get_ns(ctrl
, nsid
);
1733 if (revalidate_disk(ns
->disk
))
1737 nvme_alloc_ns(ctrl
, nsid
);
1740 static void nvme_remove_invalid_namespaces(struct nvme_ctrl
*ctrl
,
1743 struct nvme_ns
*ns
, *next
;
1745 list_for_each_entry_safe(ns
, next
, &ctrl
->namespaces
, list
) {
1746 if (ns
->ns_id
> nsid
)
1751 static int nvme_scan_ns_list(struct nvme_ctrl
*ctrl
, unsigned nn
)
1755 unsigned i
, j
, nsid
, prev
= 0, num_lists
= DIV_ROUND_UP(nn
, 1024);
1758 ns_list
= kzalloc(0x1000, GFP_KERNEL
);
1762 for (i
= 0; i
< num_lists
; i
++) {
1763 ret
= nvme_identify_ns_list(ctrl
, prev
, ns_list
);
1767 for (j
= 0; j
< min(nn
, 1024U); j
++) {
1768 nsid
= le32_to_cpu(ns_list
[j
]);
1772 nvme_validate_ns(ctrl
, nsid
);
1774 while (++prev
< nsid
) {
1775 ns
= nvme_find_get_ns(ctrl
, prev
);
1785 nvme_remove_invalid_namespaces(ctrl
, prev
);
1791 static void nvme_scan_ns_sequential(struct nvme_ctrl
*ctrl
, unsigned nn
)
1795 for (i
= 1; i
<= nn
; i
++)
1796 nvme_validate_ns(ctrl
, i
);
1798 nvme_remove_invalid_namespaces(ctrl
, nn
);
1801 static void nvme_scan_work(struct work_struct
*work
)
1803 struct nvme_ctrl
*ctrl
=
1804 container_of(work
, struct nvme_ctrl
, scan_work
);
1805 struct nvme_id_ctrl
*id
;
1808 if (ctrl
->state
!= NVME_CTRL_LIVE
)
1811 if (nvme_identify_ctrl(ctrl
, &id
))
1814 nn
= le32_to_cpu(id
->nn
);
1815 if (ctrl
->vs
>= NVME_VS(1, 1) &&
1816 !(ctrl
->quirks
& NVME_QUIRK_IDENTIFY_CNS
)) {
1817 if (!nvme_scan_ns_list(ctrl
, nn
))
1820 nvme_scan_ns_sequential(ctrl
, nn
);
1822 mutex_lock(&ctrl
->namespaces_mutex
);
1823 list_sort(NULL
, &ctrl
->namespaces
, ns_cmp
);
1824 mutex_unlock(&ctrl
->namespaces_mutex
);
1827 if (ctrl
->ops
->post_scan
)
1828 ctrl
->ops
->post_scan(ctrl
);
1831 void nvme_queue_scan(struct nvme_ctrl
*ctrl
)
1834 * Do not queue new scan work when a controller is reset during
1837 if (ctrl
->state
== NVME_CTRL_LIVE
)
1838 schedule_work(&ctrl
->scan_work
);
1840 EXPORT_SYMBOL_GPL(nvme_queue_scan
);
1843 * This function iterates the namespace list unlocked to allow recovery from
1844 * controller failure. It is up to the caller to ensure the namespace list is
1845 * not modified by scan work while this function is executing.
1847 void nvme_remove_namespaces(struct nvme_ctrl
*ctrl
)
1849 struct nvme_ns
*ns
, *next
;
1852 * The dead states indicates the controller was not gracefully
1853 * disconnected. In that case, we won't be able to flush any data while
1854 * removing the namespaces' disks; fail all the queues now to avoid
1855 * potentially having to clean up the failed sync later.
1857 if (ctrl
->state
== NVME_CTRL_DEAD
)
1858 nvme_kill_queues(ctrl
);
1860 list_for_each_entry_safe(ns
, next
, &ctrl
->namespaces
, list
)
1863 EXPORT_SYMBOL_GPL(nvme_remove_namespaces
);
1865 static void nvme_async_event_work(struct work_struct
*work
)
1867 struct nvme_ctrl
*ctrl
=
1868 container_of(work
, struct nvme_ctrl
, async_event_work
);
1870 spin_lock_irq(&ctrl
->lock
);
1871 while (ctrl
->event_limit
> 0) {
1872 int aer_idx
= --ctrl
->event_limit
;
1874 spin_unlock_irq(&ctrl
->lock
);
1875 ctrl
->ops
->submit_async_event(ctrl
, aer_idx
);
1876 spin_lock_irq(&ctrl
->lock
);
1878 spin_unlock_irq(&ctrl
->lock
);
1881 void nvme_complete_async_event(struct nvme_ctrl
*ctrl
,
1882 struct nvme_completion
*cqe
)
1884 u16 status
= le16_to_cpu(cqe
->status
) >> 1;
1885 u32 result
= le32_to_cpu(cqe
->result
);
1887 if (status
== NVME_SC_SUCCESS
|| status
== NVME_SC_ABORT_REQ
) {
1888 ++ctrl
->event_limit
;
1889 schedule_work(&ctrl
->async_event_work
);
1892 if (status
!= NVME_SC_SUCCESS
)
1895 switch (result
& 0xff07) {
1896 case NVME_AER_NOTICE_NS_CHANGED
:
1897 dev_info(ctrl
->device
, "rescanning\n");
1898 nvme_queue_scan(ctrl
);
1901 dev_warn(ctrl
->device
, "async event result %08x\n", result
);
1904 EXPORT_SYMBOL_GPL(nvme_complete_async_event
);
1906 void nvme_queue_async_events(struct nvme_ctrl
*ctrl
)
1908 ctrl
->event_limit
= NVME_NR_AERS
;
1909 schedule_work(&ctrl
->async_event_work
);
1911 EXPORT_SYMBOL_GPL(nvme_queue_async_events
);
1913 static DEFINE_IDA(nvme_instance_ida
);
1915 static int nvme_set_instance(struct nvme_ctrl
*ctrl
)
1917 int instance
, error
;
1920 if (!ida_pre_get(&nvme_instance_ida
, GFP_KERNEL
))
1923 spin_lock(&dev_list_lock
);
1924 error
= ida_get_new(&nvme_instance_ida
, &instance
);
1925 spin_unlock(&dev_list_lock
);
1926 } while (error
== -EAGAIN
);
1931 ctrl
->instance
= instance
;
1935 static void nvme_release_instance(struct nvme_ctrl
*ctrl
)
1937 spin_lock(&dev_list_lock
);
1938 ida_remove(&nvme_instance_ida
, ctrl
->instance
);
1939 spin_unlock(&dev_list_lock
);
1942 void nvme_uninit_ctrl(struct nvme_ctrl
*ctrl
)
1944 flush_work(&ctrl
->async_event_work
);
1945 flush_work(&ctrl
->scan_work
);
1946 nvme_remove_namespaces(ctrl
);
1948 device_destroy(nvme_class
, MKDEV(nvme_char_major
, ctrl
->instance
));
1950 spin_lock(&dev_list_lock
);
1951 list_del(&ctrl
->node
);
1952 spin_unlock(&dev_list_lock
);
1954 EXPORT_SYMBOL_GPL(nvme_uninit_ctrl
);
1956 static void nvme_free_ctrl(struct kref
*kref
)
1958 struct nvme_ctrl
*ctrl
= container_of(kref
, struct nvme_ctrl
, kref
);
1960 put_device(ctrl
->device
);
1961 nvme_release_instance(ctrl
);
1962 ida_destroy(&ctrl
->ns_ida
);
1964 ctrl
->ops
->free_ctrl(ctrl
);
1967 void nvme_put_ctrl(struct nvme_ctrl
*ctrl
)
1969 kref_put(&ctrl
->kref
, nvme_free_ctrl
);
1971 EXPORT_SYMBOL_GPL(nvme_put_ctrl
);
1974 * Initialize a NVMe controller structures. This needs to be called during
1975 * earliest initialization so that we have the initialized structured around
1978 int nvme_init_ctrl(struct nvme_ctrl
*ctrl
, struct device
*dev
,
1979 const struct nvme_ctrl_ops
*ops
, unsigned long quirks
)
1983 ctrl
->state
= NVME_CTRL_NEW
;
1984 spin_lock_init(&ctrl
->lock
);
1985 INIT_LIST_HEAD(&ctrl
->namespaces
);
1986 mutex_init(&ctrl
->namespaces_mutex
);
1987 kref_init(&ctrl
->kref
);
1990 ctrl
->quirks
= quirks
;
1991 INIT_WORK(&ctrl
->scan_work
, nvme_scan_work
);
1992 INIT_WORK(&ctrl
->async_event_work
, nvme_async_event_work
);
1994 ret
= nvme_set_instance(ctrl
);
1998 ctrl
->device
= device_create_with_groups(nvme_class
, ctrl
->dev
,
1999 MKDEV(nvme_char_major
, ctrl
->instance
),
2000 ctrl
, nvme_dev_attr_groups
,
2001 "nvme%d", ctrl
->instance
);
2002 if (IS_ERR(ctrl
->device
)) {
2003 ret
= PTR_ERR(ctrl
->device
);
2004 goto out_release_instance
;
2006 get_device(ctrl
->device
);
2007 ida_init(&ctrl
->ns_ida
);
2009 spin_lock(&dev_list_lock
);
2010 list_add_tail(&ctrl
->node
, &nvme_ctrl_list
);
2011 spin_unlock(&dev_list_lock
);
2014 out_release_instance
:
2015 nvme_release_instance(ctrl
);
2019 EXPORT_SYMBOL_GPL(nvme_init_ctrl
);
2022 * nvme_kill_queues(): Ends all namespace queues
2023 * @ctrl: the dead controller that needs to end
2025 * Call this function when the driver determines it is unable to get the
2026 * controller in a state capable of servicing IO.
2028 void nvme_kill_queues(struct nvme_ctrl
*ctrl
)
2032 mutex_lock(&ctrl
->namespaces_mutex
);
2033 list_for_each_entry(ns
, &ctrl
->namespaces
, list
) {
2035 * Revalidating a dead namespace sets capacity to 0. This will
2036 * end buffered writers dirtying pages that can't be synced.
2038 if (!test_and_set_bit(NVME_NS_DEAD
, &ns
->flags
))
2039 revalidate_disk(ns
->disk
);
2041 blk_set_queue_dying(ns
->queue
);
2042 blk_mq_abort_requeue_list(ns
->queue
);
2043 blk_mq_start_stopped_hw_queues(ns
->queue
, true);
2045 mutex_unlock(&ctrl
->namespaces_mutex
);
2047 EXPORT_SYMBOL_GPL(nvme_kill_queues
);
2049 void nvme_stop_queues(struct nvme_ctrl
*ctrl
)
2053 mutex_lock(&ctrl
->namespaces_mutex
);
2054 list_for_each_entry(ns
, &ctrl
->namespaces
, list
) {
2055 spin_lock_irq(ns
->queue
->queue_lock
);
2056 queue_flag_set(QUEUE_FLAG_STOPPED
, ns
->queue
);
2057 spin_unlock_irq(ns
->queue
->queue_lock
);
2059 blk_mq_cancel_requeue_work(ns
->queue
);
2060 blk_mq_stop_hw_queues(ns
->queue
);
2062 mutex_unlock(&ctrl
->namespaces_mutex
);
2064 EXPORT_SYMBOL_GPL(nvme_stop_queues
);
2066 void nvme_start_queues(struct nvme_ctrl
*ctrl
)
2070 mutex_lock(&ctrl
->namespaces_mutex
);
2071 list_for_each_entry(ns
, &ctrl
->namespaces
, list
) {
2072 queue_flag_clear_unlocked(QUEUE_FLAG_STOPPED
, ns
->queue
);
2073 blk_mq_start_stopped_hw_queues(ns
->queue
, true);
2074 blk_mq_kick_requeue_list(ns
->queue
);
2076 mutex_unlock(&ctrl
->namespaces_mutex
);
2078 EXPORT_SYMBOL_GPL(nvme_start_queues
);
2080 int __init
nvme_core_init(void)
2084 result
= __register_chrdev(nvme_char_major
, 0, NVME_MINORS
, "nvme",
2088 else if (result
> 0)
2089 nvme_char_major
= result
;
2091 nvme_class
= class_create(THIS_MODULE
, "nvme");
2092 if (IS_ERR(nvme_class
)) {
2093 result
= PTR_ERR(nvme_class
);
2094 goto unregister_chrdev
;
2100 __unregister_chrdev(nvme_char_major
, 0, NVME_MINORS
, "nvme");
2104 void nvme_core_exit(void)
2106 class_destroy(nvme_class
);
2107 __unregister_chrdev(nvme_char_major
, 0, NVME_MINORS
, "nvme");
2110 MODULE_LICENSE("GPL");
2111 MODULE_VERSION("1.0");
2112 module_init(nvme_core_init
);
2113 module_exit(nvme_core_exit
);