null_blk: set a separate timer for each command
[deliverable/linux.git] / drivers / block / null_blk.c
CommitLineData
f2298c04 1#include <linux/module.h>
fc1bc354 2
f2298c04
JA
3#include <linux/moduleparam.h>
4#include <linux/sched.h>
5#include <linux/fs.h>
6#include <linux/blkdev.h>
7#include <linux/init.h>
8#include <linux/slab.h>
9#include <linux/blk-mq.h>
10#include <linux/hrtimer.h>
b2b7e001 11#include <linux/lightnvm.h>
f2298c04
JA
12
13struct nullb_cmd {
14 struct list_head list;
15 struct llist_node ll_list;
16 struct call_single_data csd;
17 struct request *rq;
18 struct bio *bio;
19 unsigned int tag;
20 struct nullb_queue *nq;
3c395a96 21 struct hrtimer timer;
f2298c04
JA
22};
23
24struct nullb_queue {
25 unsigned long *tag_map;
26 wait_queue_head_t wait;
27 unsigned int queue_depth;
28
29 struct nullb_cmd *cmds;
30};
31
32struct nullb {
33 struct list_head list;
34 unsigned int index;
35 struct request_queue *q;
36 struct gendisk *disk;
24d2f903 37 struct blk_mq_tag_set tag_set;
f2298c04
JA
38 struct hrtimer timer;
39 unsigned int queue_depth;
40 spinlock_t lock;
41
42 struct nullb_queue *queues;
43 unsigned int nr_queues;
b2b7e001 44 char disk_name[DISK_NAME_LEN];
f2298c04
JA
45};
46
47static LIST_HEAD(nullb_list);
48static struct mutex lock;
49static int null_major;
50static int nullb_indexes;
6bb9535b 51static struct kmem_cache *ppa_cache;
f2298c04 52
f2298c04
JA
53enum {
54 NULL_IRQ_NONE = 0,
55 NULL_IRQ_SOFTIRQ = 1,
56 NULL_IRQ_TIMER = 2,
ce2c350b 57};
f2298c04 58
ce2c350b 59enum {
f2298c04
JA
60 NULL_Q_BIO = 0,
61 NULL_Q_RQ = 1,
62 NULL_Q_MQ = 2,
63};
64
2d263a78 65static int submit_queues;
f2298c04
JA
66module_param(submit_queues, int, S_IRUGO);
67MODULE_PARM_DESC(submit_queues, "Number of submission queues");
68
69static int home_node = NUMA_NO_NODE;
70module_param(home_node, int, S_IRUGO);
71MODULE_PARM_DESC(home_node, "Home node for the device");
72
73static int queue_mode = NULL_Q_MQ;
709c8667
MB
74
75static int null_param_store_val(const char *str, int *val, int min, int max)
76{
77 int ret, new_val;
78
79 ret = kstrtoint(str, 10, &new_val);
80 if (ret)
81 return -EINVAL;
82
83 if (new_val < min || new_val > max)
84 return -EINVAL;
85
86 *val = new_val;
87 return 0;
88}
89
90static int null_set_queue_mode(const char *str, const struct kernel_param *kp)
91{
92 return null_param_store_val(str, &queue_mode, NULL_Q_BIO, NULL_Q_MQ);
93}
94
9c27847d 95static const struct kernel_param_ops null_queue_mode_param_ops = {
709c8667
MB
96 .set = null_set_queue_mode,
97 .get = param_get_int,
98};
99
100device_param_cb(queue_mode, &null_queue_mode_param_ops, &queue_mode, S_IRUGO);
54ae81cd 101MODULE_PARM_DESC(queue_mode, "Block interface to use (0=bio,1=rq,2=multiqueue)");
f2298c04
JA
102
103static int gb = 250;
104module_param(gb, int, S_IRUGO);
105MODULE_PARM_DESC(gb, "Size in GB");
106
107static int bs = 512;
108module_param(bs, int, S_IRUGO);
109MODULE_PARM_DESC(bs, "Block size (in bytes)");
110
111static int nr_devices = 2;
112module_param(nr_devices, int, S_IRUGO);
113MODULE_PARM_DESC(nr_devices, "Number of devices to register");
114
b2b7e001
MB
115static bool use_lightnvm;
116module_param(use_lightnvm, bool, S_IRUGO);
117MODULE_PARM_DESC(use_lightnvm, "Register as a LightNVM device");
118
f2298c04 119static int irqmode = NULL_IRQ_SOFTIRQ;
709c8667
MB
120
121static int null_set_irqmode(const char *str, const struct kernel_param *kp)
122{
123 return null_param_store_val(str, &irqmode, NULL_IRQ_NONE,
124 NULL_IRQ_TIMER);
125}
126
9c27847d 127static const struct kernel_param_ops null_irqmode_param_ops = {
709c8667
MB
128 .set = null_set_irqmode,
129 .get = param_get_int,
130};
131
132device_param_cb(irqmode, &null_irqmode_param_ops, &irqmode, S_IRUGO);
f2298c04
JA
133MODULE_PARM_DESC(irqmode, "IRQ completion handler. 0-none, 1-softirq, 2-timer");
134
135static int completion_nsec = 10000;
136module_param(completion_nsec, int, S_IRUGO);
137MODULE_PARM_DESC(completion_nsec, "Time in ns to complete a request in hardware. Default: 10,000ns");
138
139static int hw_queue_depth = 64;
140module_param(hw_queue_depth, int, S_IRUGO);
141MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: 64");
142
20005244 143static bool use_per_node_hctx = false;
f2298c04 144module_param(use_per_node_hctx, bool, S_IRUGO);
20005244 145MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: false");
f2298c04
JA
146
147static void put_tag(struct nullb_queue *nq, unsigned int tag)
148{
149 clear_bit_unlock(tag, nq->tag_map);
150
151 if (waitqueue_active(&nq->wait))
152 wake_up(&nq->wait);
153}
154
155static unsigned int get_tag(struct nullb_queue *nq)
156{
157 unsigned int tag;
158
159 do {
160 tag = find_first_zero_bit(nq->tag_map, nq->queue_depth);
161 if (tag >= nq->queue_depth)
162 return -1U;
163 } while (test_and_set_bit_lock(tag, nq->tag_map));
164
165 return tag;
166}
167
168static void free_cmd(struct nullb_cmd *cmd)
169{
170 put_tag(cmd->nq, cmd->tag);
171}
172
3c395a96
PV
173static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer);
174
f2298c04
JA
175static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq)
176{
177 struct nullb_cmd *cmd;
178 unsigned int tag;
179
180 tag = get_tag(nq);
181 if (tag != -1U) {
182 cmd = &nq->cmds[tag];
183 cmd->tag = tag;
184 cmd->nq = nq;
3c395a96
PV
185 if (irqmode == NULL_IRQ_TIMER) {
186 hrtimer_init(&cmd->timer, CLOCK_MONOTONIC,
187 HRTIMER_MODE_REL);
188 cmd->timer.function = null_cmd_timer_expired;
189 }
f2298c04
JA
190 return cmd;
191 }
192
193 return NULL;
194}
195
196static struct nullb_cmd *alloc_cmd(struct nullb_queue *nq, int can_wait)
197{
198 struct nullb_cmd *cmd;
199 DEFINE_WAIT(wait);
200
201 cmd = __alloc_cmd(nq);
202 if (cmd || !can_wait)
203 return cmd;
204
205 do {
206 prepare_to_wait(&nq->wait, &wait, TASK_UNINTERRUPTIBLE);
207 cmd = __alloc_cmd(nq);
208 if (cmd)
209 break;
210
211 io_schedule();
212 } while (1);
213
214 finish_wait(&nq->wait, &wait);
215 return cmd;
216}
217
218static void end_cmd(struct nullb_cmd *cmd)
219{
ce2c350b
CH
220 switch (queue_mode) {
221 case NULL_Q_MQ:
c8a446ad 222 blk_mq_end_request(cmd->rq, 0);
ce2c350b
CH
223 return;
224 case NULL_Q_RQ:
225 INIT_LIST_HEAD(&cmd->rq->queuelist);
226 blk_end_request_all(cmd->rq, 0);
227 break;
228 case NULL_Q_BIO:
4246a0b6 229 bio_endio(cmd->bio);
ce2c350b
CH
230 break;
231 }
f2298c04 232
ce2c350b 233 free_cmd(cmd);
f2298c04
JA
234}
235
236static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
237{
3c395a96
PV
238 struct nullb_cmd *cmd = container_of(timer, struct nullb_cmd, timer);
239 struct request_queue *q = NULL;
21974061 240
3c395a96
PV
241 if (cmd->rq)
242 q = cmd->rq->q;
8b70f45e 243
3c395a96
PV
244 if (q && !q->mq_ops && blk_queue_stopped(q)) {
245 spin_lock(q->queue_lock);
246 if (blk_queue_stopped(q))
247 blk_start_queue(q);
248 spin_unlock(q->queue_lock);
f2298c04 249 }
3c395a96 250 end_cmd(cmd);
f2298c04
JA
251
252 return HRTIMER_NORESTART;
253}
254
255static void null_cmd_end_timer(struct nullb_cmd *cmd)
256{
3c395a96 257 ktime_t kt = ktime_set(0, completion_nsec);
f2298c04 258
3c395a96 259 hrtimer_start(&cmd->timer, kt, HRTIMER_MODE_REL);
f2298c04
JA
260}
261
262static void null_softirq_done_fn(struct request *rq)
263{
d891fa70
JA
264 if (queue_mode == NULL_Q_MQ)
265 end_cmd(blk_mq_rq_to_pdu(rq));
266 else
267 end_cmd(rq->special);
f2298c04
JA
268}
269
f2298c04
JA
270static inline void null_handle_cmd(struct nullb_cmd *cmd)
271{
272 /* Complete IO by inline, softirq or timer */
273 switch (irqmode) {
f2298c04 274 case NULL_IRQ_SOFTIRQ:
ce2c350b
CH
275 switch (queue_mode) {
276 case NULL_Q_MQ:
f4829a9b 277 blk_mq_complete_request(cmd->rq, cmd->rq->errors);
ce2c350b
CH
278 break;
279 case NULL_Q_RQ:
280 blk_complete_request(cmd->rq);
281 break;
282 case NULL_Q_BIO:
283 /*
284 * XXX: no proper submitting cpu information available.
285 */
286 end_cmd(cmd);
287 break;
288 }
289 break;
290 case NULL_IRQ_NONE:
f2298c04 291 end_cmd(cmd);
f2298c04
JA
292 break;
293 case NULL_IRQ_TIMER:
294 null_cmd_end_timer(cmd);
295 break;
296 }
297}
298
299static struct nullb_queue *nullb_to_queue(struct nullb *nullb)
300{
301 int index = 0;
302
303 if (nullb->nr_queues != 1)
304 index = raw_smp_processor_id() / ((nr_cpu_ids + nullb->nr_queues - 1) / nullb->nr_queues);
305
306 return &nullb->queues[index];
307}
308
dece1635 309static blk_qc_t null_queue_bio(struct request_queue *q, struct bio *bio)
f2298c04
JA
310{
311 struct nullb *nullb = q->queuedata;
312 struct nullb_queue *nq = nullb_to_queue(nullb);
313 struct nullb_cmd *cmd;
314
315 cmd = alloc_cmd(nq, 1);
316 cmd->bio = bio;
317
318 null_handle_cmd(cmd);
dece1635 319 return BLK_QC_T_NONE;
f2298c04
JA
320}
321
322static int null_rq_prep_fn(struct request_queue *q, struct request *req)
323{
324 struct nullb *nullb = q->queuedata;
325 struct nullb_queue *nq = nullb_to_queue(nullb);
326 struct nullb_cmd *cmd;
327
328 cmd = alloc_cmd(nq, 0);
329 if (cmd) {
330 cmd->rq = req;
331 req->special = cmd;
332 return BLKPREP_OK;
333 }
8b70f45e 334 blk_stop_queue(q);
f2298c04
JA
335
336 return BLKPREP_DEFER;
337}
338
339static void null_request_fn(struct request_queue *q)
340{
341 struct request *rq;
342
343 while ((rq = blk_fetch_request(q)) != NULL) {
344 struct nullb_cmd *cmd = rq->special;
345
346 spin_unlock_irq(q->queue_lock);
347 null_handle_cmd(cmd);
348 spin_lock_irq(q->queue_lock);
349 }
350}
351
74c45052
JA
352static int null_queue_rq(struct blk_mq_hw_ctx *hctx,
353 const struct blk_mq_queue_data *bd)
f2298c04 354{
74c45052 355 struct nullb_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
f2298c04 356
3c395a96
PV
357 if (irqmode == NULL_IRQ_TIMER) {
358 hrtimer_init(&cmd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
359 cmd->timer.function = null_cmd_timer_expired;
360 }
74c45052 361 cmd->rq = bd->rq;
f2298c04
JA
362 cmd->nq = hctx->driver_data;
363
74c45052 364 blk_mq_start_request(bd->rq);
e2490073 365
f2298c04
JA
366 null_handle_cmd(cmd);
367 return BLK_MQ_RQ_QUEUE_OK;
368}
369
2d263a78
MB
370static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq)
371{
372 BUG_ON(!nullb);
373 BUG_ON(!nq);
374
375 init_waitqueue_head(&nq->wait);
376 nq->queue_depth = nullb->queue_depth;
377}
378
f2298c04
JA
379static int null_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
380 unsigned int index)
381{
382 struct nullb *nullb = data;
383 struct nullb_queue *nq = &nullb->queues[index];
384
f2298c04 385 hctx->driver_data = nq;
2d263a78
MB
386 null_init_queue(nullb, nq);
387 nullb->nr_queues++;
f2298c04
JA
388
389 return 0;
390}
391
392static struct blk_mq_ops null_mq_ops = {
393 .queue_rq = null_queue_rq,
394 .map_queue = blk_mq_map_queue,
395 .init_hctx = null_init_hctx,
ce2c350b 396 .complete = null_softirq_done_fn,
f2298c04
JA
397};
398
de65d2d2
MB
399static void cleanup_queue(struct nullb_queue *nq)
400{
401 kfree(nq->tag_map);
402 kfree(nq->cmds);
403}
404
405static void cleanup_queues(struct nullb *nullb)
406{
407 int i;
408
409 for (i = 0; i < nullb->nr_queues; i++)
410 cleanup_queue(&nullb->queues[i]);
411
412 kfree(nullb->queues);
413}
414
f2298c04
JA
415static void null_del_dev(struct nullb *nullb)
416{
417 list_del_init(&nullb->list);
418
b2b7e001 419 if (use_lightnvm)
54514aa4
MB
420 nvm_unregister(nullb->disk_name);
421 else
422 del_gendisk(nullb->disk);
518d00b7 423 blk_cleanup_queue(nullb->q);
24d2f903
CH
424 if (queue_mode == NULL_Q_MQ)
425 blk_mq_free_tag_set(&nullb->tag_set);
54514aa4
MB
426 if (!use_lightnvm)
427 put_disk(nullb->disk);
de65d2d2 428 cleanup_queues(nullb);
f2298c04
JA
429 kfree(nullb);
430}
431
b2b7e001
MB
432#ifdef CONFIG_NVM
433
434static void null_lnvm_end_io(struct request *rq, int error)
435{
436 struct nvm_rq *rqd = rq->end_io_data;
437 struct nvm_dev *dev = rqd->dev;
438
439 dev->mt->end_io(rqd, error);
440
441 blk_put_request(rq);
442}
443
444static int null_lnvm_submit_io(struct request_queue *q, struct nvm_rq *rqd)
445{
446 struct request *rq;
447 struct bio *bio = rqd->bio;
448
449 rq = blk_mq_alloc_request(q, bio_rw(bio), GFP_KERNEL, 0);
450 if (IS_ERR(rq))
451 return -ENOMEM;
452
453 rq->cmd_type = REQ_TYPE_DRV_PRIV;
454 rq->__sector = bio->bi_iter.bi_sector;
455 rq->ioprio = bio_prio(bio);
456
457 if (bio_has_data(bio))
458 rq->nr_phys_segments = bio_phys_segments(q, bio);
459
460 rq->__data_len = bio->bi_iter.bi_size;
461 rq->bio = rq->biotail = bio;
462
463 rq->end_io_data = rqd;
464
465 blk_execute_rq_nowait(q, NULL, rq, 0, null_lnvm_end_io);
466
467 return 0;
468}
469
470static int null_lnvm_id(struct request_queue *q, struct nvm_id *id)
471{
472 sector_t size = gb * 1024 * 1024 * 1024ULL;
5b40db99 473 sector_t blksize;
b2b7e001
MB
474 struct nvm_id_group *grp;
475
476 id->ver_id = 0x1;
477 id->vmnt = 0;
478 id->cgrps = 1;
479 id->cap = 0x3;
480 id->dom = 0x1;
5b40db99
MB
481
482 id->ppaf.blk_offset = 0;
483 id->ppaf.blk_len = 16;
484 id->ppaf.pg_offset = 16;
485 id->ppaf.pg_len = 16;
486 id->ppaf.sect_offset = 32;
487 id->ppaf.sect_len = 8;
488 id->ppaf.pln_offset = 40;
489 id->ppaf.pln_len = 8;
490 id->ppaf.lun_offset = 48;
491 id->ppaf.lun_len = 8;
492 id->ppaf.ch_offset = 56;
493 id->ppaf.ch_len = 8;
b2b7e001
MB
494
495 do_div(size, bs); /* convert size to pages */
5b40db99 496 do_div(size, 256); /* concert size to pgs pr blk */
b2b7e001
MB
497 grp = &id->groups[0];
498 grp->mtype = 0;
5b40db99 499 grp->fmtype = 0;
b2b7e001 500 grp->num_ch = 1;
b2b7e001 501 grp->num_pg = 256;
5b40db99
MB
502 blksize = size;
503 do_div(size, (1 << 16));
504 grp->num_lun = size + 1;
505 do_div(blksize, grp->num_lun);
506 grp->num_blk = blksize;
507 grp->num_pln = 1;
508
b2b7e001
MB
509 grp->fpg_sz = bs;
510 grp->csecs = bs;
511 grp->trdt = 25000;
512 grp->trdm = 25000;
513 grp->tprt = 500000;
514 grp->tprm = 500000;
515 grp->tbet = 1500000;
516 grp->tbem = 1500000;
517 grp->mpos = 0x010101; /* single plane rwe */
518 grp->cpar = hw_queue_depth;
519
520 return 0;
521}
522
523static void *null_lnvm_create_dma_pool(struct request_queue *q, char *name)
524{
525 mempool_t *virtmem_pool;
526
6bb9535b 527 virtmem_pool = mempool_create_slab_pool(64, ppa_cache);
b2b7e001
MB
528 if (!virtmem_pool) {
529 pr_err("null_blk: Unable to create virtual memory pool\n");
530 return NULL;
531 }
532
533 return virtmem_pool;
534}
535
536static void null_lnvm_destroy_dma_pool(void *pool)
537{
538 mempool_destroy(pool);
539}
540
541static void *null_lnvm_dev_dma_alloc(struct request_queue *q, void *pool,
542 gfp_t mem_flags, dma_addr_t *dma_handler)
543{
544 return mempool_alloc(pool, mem_flags);
545}
546
547static void null_lnvm_dev_dma_free(void *pool, void *entry,
548 dma_addr_t dma_handler)
549{
550 mempool_free(entry, pool);
551}
552
553static struct nvm_dev_ops null_lnvm_dev_ops = {
554 .identity = null_lnvm_id,
555 .submit_io = null_lnvm_submit_io,
556
557 .create_dma_pool = null_lnvm_create_dma_pool,
558 .destroy_dma_pool = null_lnvm_destroy_dma_pool,
559 .dev_dma_alloc = null_lnvm_dev_dma_alloc,
560 .dev_dma_free = null_lnvm_dev_dma_free,
561
562 /* Simulate nvme protocol restriction */
563 .max_phys_sect = 64,
564};
565#else
566static struct nvm_dev_ops null_lnvm_dev_ops;
567#endif /* CONFIG_NVM */
568
f2298c04
JA
569static int null_open(struct block_device *bdev, fmode_t mode)
570{
571 return 0;
572}
573
574static void null_release(struct gendisk *disk, fmode_t mode)
575{
576}
577
578static const struct block_device_operations null_fops = {
579 .owner = THIS_MODULE,
580 .open = null_open,
581 .release = null_release,
582};
583
584static int setup_commands(struct nullb_queue *nq)
585{
586 struct nullb_cmd *cmd;
587 int i, tag_size;
588
589 nq->cmds = kzalloc(nq->queue_depth * sizeof(*cmd), GFP_KERNEL);
590 if (!nq->cmds)
2d263a78 591 return -ENOMEM;
f2298c04
JA
592
593 tag_size = ALIGN(nq->queue_depth, BITS_PER_LONG) / BITS_PER_LONG;
594 nq->tag_map = kzalloc(tag_size * sizeof(unsigned long), GFP_KERNEL);
595 if (!nq->tag_map) {
596 kfree(nq->cmds);
2d263a78 597 return -ENOMEM;
f2298c04
JA
598 }
599
600 for (i = 0; i < nq->queue_depth; i++) {
601 cmd = &nq->cmds[i];
602 INIT_LIST_HEAD(&cmd->list);
603 cmd->ll_list.next = NULL;
604 cmd->tag = -1U;
605 }
606
607 return 0;
608}
609
f2298c04
JA
610static int setup_queues(struct nullb *nullb)
611{
2d263a78
MB
612 nullb->queues = kzalloc(submit_queues * sizeof(struct nullb_queue),
613 GFP_KERNEL);
f2298c04 614 if (!nullb->queues)
2d263a78 615 return -ENOMEM;
f2298c04
JA
616
617 nullb->nr_queues = 0;
618 nullb->queue_depth = hw_queue_depth;
619
2d263a78
MB
620 return 0;
621}
622
623static int init_driver_queues(struct nullb *nullb)
624{
625 struct nullb_queue *nq;
626 int i, ret = 0;
f2298c04
JA
627
628 for (i = 0; i < submit_queues; i++) {
629 nq = &nullb->queues[i];
2d263a78
MB
630
631 null_init_queue(nullb, nq);
632
633 ret = setup_commands(nq);
634 if (ret)
31f9690e 635 return ret;
f2298c04
JA
636 nullb->nr_queues++;
637 }
2d263a78 638 return 0;
f2298c04
JA
639}
640
641static int null_add_dev(void)
642{
643 struct gendisk *disk;
644 struct nullb *nullb;
645 sector_t size;
dc501dc0 646 int rv;
f2298c04
JA
647
648 nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, home_node);
dc501dc0
RE
649 if (!nullb) {
650 rv = -ENOMEM;
24d2f903 651 goto out;
dc501dc0 652 }
f2298c04
JA
653
654 spin_lock_init(&nullb->lock);
655
57053d8c
MB
656 if (queue_mode == NULL_Q_MQ && use_per_node_hctx)
657 submit_queues = nr_online_nodes;
658
dc501dc0
RE
659 rv = setup_queues(nullb);
660 if (rv)
24d2f903 661 goto out_free_nullb;
f2298c04
JA
662
663 if (queue_mode == NULL_Q_MQ) {
cdef54dd 664 nullb->tag_set.ops = &null_mq_ops;
24d2f903
CH
665 nullb->tag_set.nr_hw_queues = submit_queues;
666 nullb->tag_set.queue_depth = hw_queue_depth;
667 nullb->tag_set.numa_node = home_node;
668 nullb->tag_set.cmd_size = sizeof(struct nullb_cmd);
669 nullb->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
670 nullb->tag_set.driver_data = nullb;
671
dc501dc0
RE
672 rv = blk_mq_alloc_tag_set(&nullb->tag_set);
673 if (rv)
24d2f903
CH
674 goto out_cleanup_queues;
675
676 nullb->q = blk_mq_init_queue(&nullb->tag_set);
35b489d3 677 if (IS_ERR(nullb->q)) {
dc501dc0 678 rv = -ENOMEM;
24d2f903 679 goto out_cleanup_tags;
dc501dc0 680 }
f2298c04
JA
681 } else if (queue_mode == NULL_Q_BIO) {
682 nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node);
dc501dc0
RE
683 if (!nullb->q) {
684 rv = -ENOMEM;
24d2f903 685 goto out_cleanup_queues;
dc501dc0 686 }
f2298c04 687 blk_queue_make_request(nullb->q, null_queue_bio);
31f9690e
JK
688 rv = init_driver_queues(nullb);
689 if (rv)
690 goto out_cleanup_blk_queue;
f2298c04
JA
691 } else {
692 nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node);
dc501dc0
RE
693 if (!nullb->q) {
694 rv = -ENOMEM;
24d2f903 695 goto out_cleanup_queues;
dc501dc0 696 }
f2298c04 697 blk_queue_prep_rq(nullb->q, null_rq_prep_fn);
24d2f903 698 blk_queue_softirq_done(nullb->q, null_softirq_done_fn);
31f9690e
JK
699 rv = init_driver_queues(nullb);
700 if (rv)
701 goto out_cleanup_blk_queue;
f2298c04
JA
702 }
703
f2298c04
JA
704 nullb->q->queuedata = nullb;
705 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q);
b277da0a 706 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, nullb->q);
f2298c04 707
f2298c04
JA
708
709 mutex_lock(&lock);
710 list_add_tail(&nullb->list, &nullb_list);
711 nullb->index = nullb_indexes++;
712 mutex_unlock(&lock);
713
714 blk_queue_logical_block_size(nullb->q, bs);
715 blk_queue_physical_block_size(nullb->q, bs);
716
b2b7e001
MB
717 sprintf(nullb->disk_name, "nullb%d", nullb->index);
718
719 if (use_lightnvm) {
720 rv = nvm_register(nullb->q, nullb->disk_name,
721 &null_lnvm_dev_ops);
722 if (rv)
723 goto out_cleanup_blk_queue;
724 goto done;
725 }
726
727 disk = nullb->disk = alloc_disk_node(1, home_node);
728 if (!disk) {
729 rv = -ENOMEM;
730 goto out_cleanup_lightnvm;
731 }
f2298c04 732 size = gb * 1024 * 1024 * 1024ULL;
5fdb7e1b 733 set_capacity(disk, size >> 9);
f2298c04 734
227290b4 735 disk->flags |= GENHD_FL_EXT_DEVT | GENHD_FL_SUPPRESS_PARTITION_INFO;
f2298c04
JA
736 disk->major = null_major;
737 disk->first_minor = nullb->index;
738 disk->fops = &null_fops;
739 disk->private_data = nullb;
740 disk->queue = nullb->q;
b2b7e001
MB
741 strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN);
742
f2298c04 743 add_disk(disk);
b2b7e001 744done:
f2298c04 745 return 0;
24d2f903 746
b2b7e001
MB
747out_cleanup_lightnvm:
748 if (use_lightnvm)
749 nvm_unregister(nullb->disk_name);
24d2f903
CH
750out_cleanup_blk_queue:
751 blk_cleanup_queue(nullb->q);
752out_cleanup_tags:
753 if (queue_mode == NULL_Q_MQ)
754 blk_mq_free_tag_set(&nullb->tag_set);
755out_cleanup_queues:
756 cleanup_queues(nullb);
757out_free_nullb:
758 kfree(nullb);
759out:
dc501dc0 760 return rv;
f2298c04
JA
761}
762
763static int __init null_init(void)
764{
765 unsigned int i;
766
9967d8ac
R
767 if (bs > PAGE_SIZE) {
768 pr_warn("null_blk: invalid block size\n");
769 pr_warn("null_blk: defaults block size to %lu\n", PAGE_SIZE);
770 bs = PAGE_SIZE;
771 }
f2298c04 772
6bb9535b
MB
773 if (use_lightnvm && bs != 4096) {
774 pr_warn("null_blk: LightNVM only supports 4k block size\n");
775 pr_warn("null_blk: defaults block size to 4k\n");
776 bs = 4096;
777 }
778
b2b7e001
MB
779 if (use_lightnvm && queue_mode != NULL_Q_MQ) {
780 pr_warn("null_blk: LightNVM only supported for blk-mq\n");
781 pr_warn("null_blk: defaults queue mode to blk-mq\n");
782 queue_mode = NULL_Q_MQ;
783 }
784
d15ee6b1 785 if (queue_mode == NULL_Q_MQ && use_per_node_hctx) {
fc1bc354 786 if (submit_queues < nr_online_nodes) {
d15ee6b1
MB
787 pr_warn("null_blk: submit_queues param is set to %u.",
788 nr_online_nodes);
fc1bc354
MB
789 submit_queues = nr_online_nodes;
790 }
d15ee6b1 791 } else if (submit_queues > nr_cpu_ids)
f2298c04
JA
792 submit_queues = nr_cpu_ids;
793 else if (!submit_queues)
794 submit_queues = 1;
795
796 mutex_init(&lock);
797
f2298c04
JA
798 null_major = register_blkdev(0, "nullb");
799 if (null_major < 0)
800 return null_major;
801
6bb9535b
MB
802 if (use_lightnvm) {
803 ppa_cache = kmem_cache_create("ppa_cache", 64 * sizeof(u64),
804 0, 0, NULL);
805 if (!ppa_cache) {
806 pr_err("null_blk: unable to create ppa cache\n");
807 return -ENOMEM;
808 }
809 }
810
f2298c04
JA
811 for (i = 0; i < nr_devices; i++) {
812 if (null_add_dev()) {
813 unregister_blkdev(null_major, "nullb");
6bb9535b 814 goto err_ppa;
f2298c04
JA
815 }
816 }
817
818 pr_info("null: module loaded\n");
819 return 0;
6bb9535b
MB
820err_ppa:
821 kmem_cache_destroy(ppa_cache);
822 return -EINVAL;
f2298c04
JA
823}
824
825static void __exit null_exit(void)
826{
827 struct nullb *nullb;
828
829 unregister_blkdev(null_major, "nullb");
830
831 mutex_lock(&lock);
832 while (!list_empty(&nullb_list)) {
833 nullb = list_entry(nullb_list.next, struct nullb, list);
834 null_del_dev(nullb);
835 }
836 mutex_unlock(&lock);
6bb9535b
MB
837
838 kmem_cache_destroy(ppa_cache);
f2298c04
JA
839}
840
841module_init(null_init);
842module_exit(null_exit);
843
844MODULE_AUTHOR("Jens Axboe <jaxboe@fusionio.com>");
845MODULE_LICENSE("GPL");
This page took 0.169074 seconds and 5 git commands to generate.