2 * Copyright (c) 2011-2014, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 #include <linux/nvme.h>
18 #include <linux/pci.h>
19 #include <linux/kref.h>
20 #include <linux/blk-mq.h>
24 * Driver internal status code for commands that were cancelled due
25 * to timeouts or controller shutdown. The value is negative so
26 * that it a) doesn't overlap with the unsigned hardware error codes,
27 * and b) can easily be tested for.
29 NVME_SC_CANCELLED
= -EINTR
,
32 extern unsigned char nvme_io_timeout
;
33 #define NVME_IO_TIMEOUT (nvme_io_timeout * HZ)
35 extern unsigned char admin_timeout
;
36 #define ADMIN_TIMEOUT (admin_timeout * HZ)
38 extern unsigned char shutdown_timeout
;
39 #define SHUTDOWN_TIMEOUT (shutdown_timeout * HZ)
47 * List of workarounds for devices that required behavior not specified in
52 * Prefers I/O aligned to a stripe size specified in a vendor
53 * specific Identify field.
55 NVME_QUIRK_STRIPE_SIZE
= (1 << 0),
58 * The controller doesn't handle Identify value others than 0 or 1
61 NVME_QUIRK_IDENTIFY_CNS
= (1 << 1),
64 * The controller deterministically returns O's on reads to discarded
67 NVME_QUIRK_DISCARD_ZEROES
= (1 << 2),
70 enum nvme_ctrl_state
{
78 enum nvme_ctrl_state state
;
80 const struct nvme_ctrl_ops
*ops
;
81 struct request_queue
*admin_q
;
85 struct blk_mq_tag_set
*tagset
;
86 struct list_head namespaces
;
87 struct mutex namespaces_mutex
;
88 struct device
*device
; /* char device */
89 struct list_head node
;
105 atomic_t abort_limit
;
110 unsigned long quirks
;
111 struct work_struct scan_work
;
112 struct work_struct async_event_work
;
116 * An NVM Express namespace is equivalent to a SCSI LUN
119 struct list_head list
;
121 struct nvme_ctrl
*ctrl
;
122 struct request_queue
*queue
;
123 struct gendisk
*disk
;
138 #define NVME_NS_REMOVING 0
139 #define NVME_NS_DEAD 1
141 u64 mode_select_num_blocks
;
142 u32 mode_select_block_len
;
145 struct nvme_ctrl_ops
{
146 struct module
*module
;
147 int (*reg_read32
)(struct nvme_ctrl
*ctrl
, u32 off
, u32
*val
);
148 int (*reg_write32
)(struct nvme_ctrl
*ctrl
, u32 off
, u32 val
);
149 int (*reg_read64
)(struct nvme_ctrl
*ctrl
, u32 off
, u64
*val
);
150 int (*reset_ctrl
)(struct nvme_ctrl
*ctrl
);
151 void (*free_ctrl
)(struct nvme_ctrl
*ctrl
);
152 void (*post_scan
)(struct nvme_ctrl
*ctrl
);
153 void (*submit_async_event
)(struct nvme_ctrl
*ctrl
, int aer_idx
);
156 static inline bool nvme_ctrl_ready(struct nvme_ctrl
*ctrl
)
160 if (ctrl
->ops
->reg_read32(ctrl
, NVME_REG_CSTS
, &val
))
162 return val
& NVME_CSTS_RDY
;
165 static inline int nvme_reset_subsystem(struct nvme_ctrl
*ctrl
)
167 if (!ctrl
->subsystem
)
169 return ctrl
->ops
->reg_write32(ctrl
, NVME_REG_NSSR
, 0x4E564D65);
172 static inline u64
nvme_block_nr(struct nvme_ns
*ns
, sector_t sector
)
174 return (sector
>> (ns
->lba_shift
- 9));
177 static inline unsigned nvme_map_len(struct request
*rq
)
179 if (rq
->cmd_flags
& REQ_DISCARD
)
180 return sizeof(struct nvme_dsm_range
);
182 return blk_rq_bytes(rq
);
185 static inline void nvme_cleanup_cmd(struct request
*req
)
187 if (req
->cmd_flags
& REQ_DISCARD
)
188 kfree(req
->completion_data
);
191 static inline int nvme_error_status(u16 status
)
193 switch (status
& 0x7ff) {
194 case NVME_SC_SUCCESS
:
196 case NVME_SC_CAP_EXCEEDED
:
203 static inline bool nvme_req_needs_retry(struct request
*req
, u16 status
)
205 return !(status
& NVME_SC_DNR
|| blk_noretry_request(req
)) &&
206 (jiffies
- req
->start_time
) < req
->timeout
;
209 bool nvme_change_ctrl_state(struct nvme_ctrl
*ctrl
,
210 enum nvme_ctrl_state new_state
);
211 int nvme_disable_ctrl(struct nvme_ctrl
*ctrl
, u64 cap
);
212 int nvme_enable_ctrl(struct nvme_ctrl
*ctrl
, u64 cap
);
213 int nvme_shutdown_ctrl(struct nvme_ctrl
*ctrl
);
214 int nvme_init_ctrl(struct nvme_ctrl
*ctrl
, struct device
*dev
,
215 const struct nvme_ctrl_ops
*ops
, unsigned long quirks
);
216 void nvme_uninit_ctrl(struct nvme_ctrl
*ctrl
);
217 void nvme_put_ctrl(struct nvme_ctrl
*ctrl
);
218 int nvme_init_identify(struct nvme_ctrl
*ctrl
);
220 void nvme_queue_scan(struct nvme_ctrl
*ctrl
);
221 void nvme_remove_namespaces(struct nvme_ctrl
*ctrl
);
223 #define NVME_NR_AERS 1
224 void nvme_complete_async_event(struct nvme_ctrl
*ctrl
,
225 struct nvme_completion
*cqe
);
226 void nvme_queue_async_events(struct nvme_ctrl
*ctrl
);
228 void nvme_stop_queues(struct nvme_ctrl
*ctrl
);
229 void nvme_start_queues(struct nvme_ctrl
*ctrl
);
230 void nvme_kill_queues(struct nvme_ctrl
*ctrl
);
232 struct request
*nvme_alloc_request(struct request_queue
*q
,
233 struct nvme_command
*cmd
, unsigned int flags
);
234 void nvme_requeue_req(struct request
*req
);
235 int nvme_setup_cmd(struct nvme_ns
*ns
, struct request
*req
,
236 struct nvme_command
*cmd
);
237 int nvme_submit_sync_cmd(struct request_queue
*q
, struct nvme_command
*cmd
,
238 void *buf
, unsigned bufflen
);
239 int __nvme_submit_sync_cmd(struct request_queue
*q
, struct nvme_command
*cmd
,
240 struct nvme_completion
*cqe
, void *buffer
, unsigned bufflen
,
242 int nvme_submit_user_cmd(struct request_queue
*q
, struct nvme_command
*cmd
,
243 void __user
*ubuffer
, unsigned bufflen
, u32
*result
,
245 int __nvme_submit_user_cmd(struct request_queue
*q
, struct nvme_command
*cmd
,
246 void __user
*ubuffer
, unsigned bufflen
,
247 void __user
*meta_buffer
, unsigned meta_len
, u32 meta_seed
,
248 u32
*result
, unsigned timeout
);
249 int nvme_identify_ctrl(struct nvme_ctrl
*dev
, struct nvme_id_ctrl
**id
);
250 int nvme_identify_ns(struct nvme_ctrl
*dev
, unsigned nsid
,
251 struct nvme_id_ns
**id
);
252 int nvme_get_log_page(struct nvme_ctrl
*dev
, struct nvme_smart_log
**log
);
253 int nvme_get_features(struct nvme_ctrl
*dev
, unsigned fid
, unsigned nsid
,
254 dma_addr_t dma_addr
, u32
*result
);
255 int nvme_set_features(struct nvme_ctrl
*dev
, unsigned fid
, unsigned dword11
,
256 dma_addr_t dma_addr
, u32
*result
);
257 int nvme_set_queue_count(struct nvme_ctrl
*ctrl
, int *count
);
261 int nvme_sg_io(struct nvme_ns
*ns
, struct sg_io_hdr __user
*u_hdr
);
262 int nvme_sg_io32(struct nvme_ns
*ns
, unsigned long arg
);
263 int nvme_sg_get_version_num(int __user
*ip
);
266 int nvme_nvm_ns_supported(struct nvme_ns
*ns
, struct nvme_id_ns
*id
);
267 int nvme_nvm_register(struct request_queue
*q
, char *disk_name
);
268 void nvme_nvm_unregister(struct request_queue
*q
, char *disk_name
);
270 static inline int nvme_nvm_register(struct request_queue
*q
, char *disk_name
)
275 static inline void nvme_nvm_unregister(struct request_queue
*q
, char *disk_name
) {};
277 static inline int nvme_nvm_ns_supported(struct nvme_ns
*ns
, struct nvme_id_ns
*id
)
281 #endif /* CONFIG_NVM */
283 int __init
nvme_core_init(void);
284 void nvme_core_exit(void);