6134339aa6cf87e58614caaf0f0ba5918eee2ed8
[deliverable/linux.git] / drivers / lightnvm / core.c
1 /*
2 * Copyright (C) 2015 IT University of Copenhagen. All rights reserved.
3 * Initial release: Matias Bjorling <m@bjorling.me>
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version
7 * 2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; see the file COPYING. If not, write to
16 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
17 * USA.
18 *
19 */
20
21 #include <linux/blkdev.h>
22 #include <linux/blk-mq.h>
23 #include <linux/list.h>
24 #include <linux/types.h>
25 #include <linux/sem.h>
26 #include <linux/bitmap.h>
27 #include <linux/module.h>
28 #include <linux/miscdevice.h>
29 #include <linux/lightnvm.h>
30 #include <uapi/linux/lightnvm.h>
31
32 static LIST_HEAD(nvm_targets);
33 static LIST_HEAD(nvm_mgrs);
34 static LIST_HEAD(nvm_devices);
35 static DECLARE_RWSEM(nvm_lock);
36
37 static struct nvm_tgt_type *nvm_find_target_type(const char *name)
38 {
39 struct nvm_tgt_type *tt;
40
41 list_for_each_entry(tt, &nvm_targets, list)
42 if (!strcmp(name, tt->name))
43 return tt;
44
45 return NULL;
46 }
47
48 int nvm_register_target(struct nvm_tgt_type *tt)
49 {
50 int ret = 0;
51
52 down_write(&nvm_lock);
53 if (nvm_find_target_type(tt->name))
54 ret = -EEXIST;
55 else
56 list_add(&tt->list, &nvm_targets);
57 up_write(&nvm_lock);
58
59 return ret;
60 }
61 EXPORT_SYMBOL(nvm_register_target);
62
63 void nvm_unregister_target(struct nvm_tgt_type *tt)
64 {
65 if (!tt)
66 return;
67
68 down_write(&nvm_lock);
69 list_del(&tt->list);
70 up_write(&nvm_lock);
71 }
72 EXPORT_SYMBOL(nvm_unregister_target);
73
74 void *nvm_dev_dma_alloc(struct nvm_dev *dev, gfp_t mem_flags,
75 dma_addr_t *dma_handler)
76 {
77 return dev->ops->dev_dma_alloc(dev, dev->ppalist_pool, mem_flags,
78 dma_handler);
79 }
80 EXPORT_SYMBOL(nvm_dev_dma_alloc);
81
82 void nvm_dev_dma_free(struct nvm_dev *dev, void *ppa_list,
83 dma_addr_t dma_handler)
84 {
85 dev->ops->dev_dma_free(dev->ppalist_pool, ppa_list, dma_handler);
86 }
87 EXPORT_SYMBOL(nvm_dev_dma_free);
88
89 static struct nvmm_type *nvm_find_mgr_type(const char *name)
90 {
91 struct nvmm_type *mt;
92
93 list_for_each_entry(mt, &nvm_mgrs, list)
94 if (!strcmp(name, mt->name))
95 return mt;
96
97 return NULL;
98 }
99
100 struct nvmm_type *nvm_init_mgr(struct nvm_dev *dev)
101 {
102 struct nvmm_type *mt;
103 int ret;
104
105 lockdep_assert_held(&nvm_lock);
106
107 list_for_each_entry(mt, &nvm_mgrs, list) {
108 ret = mt->register_mgr(dev);
109 if (ret < 0) {
110 pr_err("nvm: media mgr failed to init (%d) on dev %s\n",
111 ret, dev->name);
112 return NULL; /* initialization failed */
113 } else if (ret > 0)
114 return mt;
115 }
116
117 return NULL;
118 }
119
120 int nvm_register_mgr(struct nvmm_type *mt)
121 {
122 struct nvm_dev *dev;
123 int ret = 0;
124
125 down_write(&nvm_lock);
126 if (nvm_find_mgr_type(mt->name)) {
127 ret = -EEXIST;
128 goto finish;
129 } else {
130 list_add(&mt->list, &nvm_mgrs);
131 }
132
133 /* try to register media mgr if any device have none configured */
134 list_for_each_entry(dev, &nvm_devices, devices) {
135 if (dev->mt)
136 continue;
137
138 dev->mt = nvm_init_mgr(dev);
139 }
140 finish:
141 up_write(&nvm_lock);
142
143 return ret;
144 }
145 EXPORT_SYMBOL(nvm_register_mgr);
146
147 void nvm_unregister_mgr(struct nvmm_type *mt)
148 {
149 if (!mt)
150 return;
151
152 down_write(&nvm_lock);
153 list_del(&mt->list);
154 up_write(&nvm_lock);
155 }
156 EXPORT_SYMBOL(nvm_unregister_mgr);
157
158 static struct nvm_dev *nvm_find_nvm_dev(const char *name)
159 {
160 struct nvm_dev *dev;
161
162 list_for_each_entry(dev, &nvm_devices, devices)
163 if (!strcmp(name, dev->name))
164 return dev;
165
166 return NULL;
167 }
168
169 struct nvm_block *nvm_get_blk(struct nvm_dev *dev, struct nvm_lun *lun,
170 unsigned long flags)
171 {
172 return dev->mt->get_blk(dev, lun, flags);
173 }
174 EXPORT_SYMBOL(nvm_get_blk);
175
176 /* Assumes that all valid pages have already been moved on release to bm */
177 void nvm_put_blk(struct nvm_dev *dev, struct nvm_block *blk)
178 {
179 return dev->mt->put_blk(dev, blk);
180 }
181 EXPORT_SYMBOL(nvm_put_blk);
182
183 int nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
184 {
185 return dev->mt->submit_io(dev, rqd);
186 }
187 EXPORT_SYMBOL(nvm_submit_io);
188
189 int nvm_erase_blk(struct nvm_dev *dev, struct nvm_block *blk)
190 {
191 return dev->mt->erase_blk(dev, blk, 0);
192 }
193 EXPORT_SYMBOL(nvm_erase_blk);
194
195 void nvm_addr_to_generic_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
196 {
197 int i;
198
199 if (rqd->nr_pages > 1) {
200 for (i = 0; i < rqd->nr_pages; i++)
201 rqd->ppa_list[i] = dev_to_generic_addr(dev,
202 rqd->ppa_list[i]);
203 } else {
204 rqd->ppa_addr = dev_to_generic_addr(dev, rqd->ppa_addr);
205 }
206 }
207 EXPORT_SYMBOL(nvm_addr_to_generic_mode);
208
209 void nvm_generic_to_addr_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
210 {
211 int i;
212
213 if (rqd->nr_pages > 1) {
214 for (i = 0; i < rqd->nr_pages; i++)
215 rqd->ppa_list[i] = generic_to_dev_addr(dev,
216 rqd->ppa_list[i]);
217 } else {
218 rqd->ppa_addr = generic_to_dev_addr(dev, rqd->ppa_addr);
219 }
220 }
221 EXPORT_SYMBOL(nvm_generic_to_addr_mode);
222
223 int nvm_erase_ppa(struct nvm_dev *dev, struct ppa_addr ppa)
224 {
225 int plane_cnt = 0, pl_idx, ret;
226 struct nvm_rq rqd;
227
228 if (!dev->ops->erase_block)
229 return 0;
230
231 if (dev->plane_mode == NVM_PLANE_SINGLE) {
232 rqd.nr_pages = 1;
233 rqd.ppa_addr = ppa;
234 } else {
235 plane_cnt = (1 << dev->plane_mode);
236 rqd.nr_pages = plane_cnt;
237
238 rqd.ppa_list = nvm_dev_dma_alloc(dev, GFP_KERNEL,
239 &rqd.dma_ppa_list);
240 if (!rqd.ppa_list) {
241 pr_err("nvm: failed to allocate dma memory\n");
242 return -ENOMEM;
243 }
244
245 for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) {
246 ppa.g.pl = pl_idx;
247 rqd.ppa_list[pl_idx] = ppa;
248 }
249 }
250
251 nvm_generic_to_addr_mode(dev, &rqd);
252
253 ret = dev->ops->erase_block(dev, &rqd);
254
255 if (plane_cnt)
256 nvm_dev_dma_free(dev, rqd.ppa_list, rqd.dma_ppa_list);
257
258 return ret;
259 }
260 EXPORT_SYMBOL(nvm_erase_ppa);
261
262 static int nvm_core_init(struct nvm_dev *dev)
263 {
264 struct nvm_id *id = &dev->identity;
265 struct nvm_id_group *grp = &id->groups[0];
266
267 /* device values */
268 dev->nr_chnls = grp->num_ch;
269 dev->luns_per_chnl = grp->num_lun;
270 dev->pgs_per_blk = grp->num_pg;
271 dev->blks_per_lun = grp->num_blk;
272 dev->nr_planes = grp->num_pln;
273 dev->sec_size = grp->csecs;
274 dev->oob_size = grp->sos;
275 dev->sec_per_pg = grp->fpg_sz / grp->csecs;
276 memcpy(&dev->ppaf, &id->ppaf, sizeof(struct nvm_addr_format));
277
278 dev->plane_mode = NVM_PLANE_SINGLE;
279 dev->max_rq_size = dev->ops->max_phys_sect * dev->sec_size;
280
281 if (grp->mtype != 0) {
282 pr_err("nvm: memory type not supported\n");
283 return -EINVAL;
284 }
285
286 if (grp->fmtype != 0 && grp->fmtype != 1) {
287 pr_err("nvm: flash type not supported\n");
288 return -EINVAL;
289 }
290
291 if (grp->mpos & 0x020202)
292 dev->plane_mode = NVM_PLANE_DOUBLE;
293 if (grp->mpos & 0x040404)
294 dev->plane_mode = NVM_PLANE_QUAD;
295
296 /* calculated values */
297 dev->sec_per_pl = dev->sec_per_pg * dev->nr_planes;
298 dev->sec_per_blk = dev->sec_per_pl * dev->pgs_per_blk;
299 dev->sec_per_lun = dev->sec_per_blk * dev->blks_per_lun;
300 dev->nr_luns = dev->luns_per_chnl * dev->nr_chnls;
301
302 dev->total_blocks = dev->nr_planes *
303 dev->blks_per_lun *
304 dev->luns_per_chnl *
305 dev->nr_chnls;
306 dev->total_pages = dev->total_blocks * dev->pgs_per_blk;
307 INIT_LIST_HEAD(&dev->online_targets);
308
309 return 0;
310 }
311
312 static void nvm_free(struct nvm_dev *dev)
313 {
314 if (!dev)
315 return;
316
317 if (dev->mt)
318 dev->mt->unregister_mgr(dev);
319 }
320
321 static int nvm_init(struct nvm_dev *dev)
322 {
323 int ret = -EINVAL;
324
325 if (!dev->q || !dev->ops)
326 return ret;
327
328 if (dev->ops->identity(dev, &dev->identity)) {
329 pr_err("nvm: device could not be identified\n");
330 goto err;
331 }
332
333 pr_debug("nvm: ver:%x nvm_vendor:%x groups:%u\n",
334 dev->identity.ver_id, dev->identity.vmnt,
335 dev->identity.cgrps);
336
337 if (dev->identity.ver_id != 1) {
338 pr_err("nvm: device not supported by kernel.");
339 goto err;
340 }
341
342 if (dev->identity.cgrps != 1) {
343 pr_err("nvm: only one group configuration supported.");
344 goto err;
345 }
346
347 ret = nvm_core_init(dev);
348 if (ret) {
349 pr_err("nvm: could not initialize core structures.\n");
350 goto err;
351 }
352
353 pr_info("nvm: registered %s [%u/%u/%u/%u/%u/%u]\n",
354 dev->name, dev->sec_per_pg, dev->nr_planes,
355 dev->pgs_per_blk, dev->blks_per_lun, dev->nr_luns,
356 dev->nr_chnls);
357 return 0;
358 err:
359 pr_err("nvm: failed to initialize nvm\n");
360 return ret;
361 }
362
363 static void nvm_exit(struct nvm_dev *dev)
364 {
365 if (dev->ppalist_pool)
366 dev->ops->destroy_dma_pool(dev->ppalist_pool);
367 nvm_free(dev);
368
369 pr_info("nvm: successfully unloaded\n");
370 }
371
372 int nvm_register(struct request_queue *q, char *disk_name,
373 struct nvm_dev_ops *ops)
374 {
375 struct nvm_dev *dev;
376 int ret;
377
378 if (!ops->identity)
379 return -EINVAL;
380
381 dev = kzalloc(sizeof(struct nvm_dev), GFP_KERNEL);
382 if (!dev)
383 return -ENOMEM;
384
385 dev->q = q;
386 dev->ops = ops;
387 strncpy(dev->name, disk_name, DISK_NAME_LEN);
388
389 ret = nvm_init(dev);
390 if (ret)
391 goto err_init;
392
393 if (dev->ops->max_phys_sect > 256) {
394 pr_info("nvm: max sectors supported is 256.\n");
395 ret = -EINVAL;
396 goto err_init;
397 }
398
399 if (dev->ops->max_phys_sect > 1) {
400 dev->ppalist_pool = dev->ops->create_dma_pool(dev, "ppalist");
401 if (!dev->ppalist_pool) {
402 pr_err("nvm: could not create ppa pool\n");
403 ret = -ENOMEM;
404 goto err_init;
405 }
406 }
407
408 /* register device with a supported media manager */
409 down_write(&nvm_lock);
410 dev->mt = nvm_init_mgr(dev);
411 list_add(&dev->devices, &nvm_devices);
412 up_write(&nvm_lock);
413
414 return 0;
415 err_init:
416 kfree(dev);
417 return ret;
418 }
419 EXPORT_SYMBOL(nvm_register);
420
421 void nvm_unregister(char *disk_name)
422 {
423 struct nvm_dev *dev;
424
425 down_write(&nvm_lock);
426 dev = nvm_find_nvm_dev(disk_name);
427 if (!dev) {
428 pr_err("nvm: could not find device %s to unregister\n",
429 disk_name);
430 up_write(&nvm_lock);
431 return;
432 }
433
434 list_del(&dev->devices);
435 up_write(&nvm_lock);
436
437 nvm_exit(dev);
438 kfree(dev);
439 }
440 EXPORT_SYMBOL(nvm_unregister);
441
442 static const struct block_device_operations nvm_fops = {
443 .owner = THIS_MODULE,
444 };
445
446 static int nvm_create_target(struct nvm_dev *dev,
447 struct nvm_ioctl_create *create)
448 {
449 struct nvm_ioctl_create_simple *s = &create->conf.s;
450 struct request_queue *tqueue;
451 struct gendisk *tdisk;
452 struct nvm_tgt_type *tt;
453 struct nvm_target *t;
454 void *targetdata;
455
456 if (!dev->mt) {
457 pr_info("nvm: device has no media manager registered.\n");
458 return -ENODEV;
459 }
460
461 down_write(&nvm_lock);
462 tt = nvm_find_target_type(create->tgttype);
463 if (!tt) {
464 pr_err("nvm: target type %s not found\n", create->tgttype);
465 up_write(&nvm_lock);
466 return -EINVAL;
467 }
468
469 list_for_each_entry(t, &dev->online_targets, list) {
470 if (!strcmp(create->tgtname, t->disk->disk_name)) {
471 pr_err("nvm: target name already exists.\n");
472 up_write(&nvm_lock);
473 return -EINVAL;
474 }
475 }
476 up_write(&nvm_lock);
477
478 t = kmalloc(sizeof(struct nvm_target), GFP_KERNEL);
479 if (!t)
480 return -ENOMEM;
481
482 tqueue = blk_alloc_queue_node(GFP_KERNEL, dev->q->node);
483 if (!tqueue)
484 goto err_t;
485 blk_queue_make_request(tqueue, tt->make_rq);
486
487 tdisk = alloc_disk(0);
488 if (!tdisk)
489 goto err_queue;
490
491 sprintf(tdisk->disk_name, "%s", create->tgtname);
492 tdisk->flags = GENHD_FL_EXT_DEVT;
493 tdisk->major = 0;
494 tdisk->first_minor = 0;
495 tdisk->fops = &nvm_fops;
496 tdisk->queue = tqueue;
497
498 targetdata = tt->init(dev, tdisk, s->lun_begin, s->lun_end);
499 if (IS_ERR(targetdata))
500 goto err_init;
501
502 tdisk->private_data = targetdata;
503 tqueue->queuedata = targetdata;
504
505 blk_queue_max_hw_sectors(tqueue, 8 * dev->ops->max_phys_sect);
506
507 set_capacity(tdisk, tt->capacity(targetdata));
508 add_disk(tdisk);
509
510 t->type = tt;
511 t->disk = tdisk;
512
513 down_write(&nvm_lock);
514 list_add_tail(&t->list, &dev->online_targets);
515 up_write(&nvm_lock);
516
517 return 0;
518 err_init:
519 put_disk(tdisk);
520 err_queue:
521 blk_cleanup_queue(tqueue);
522 err_t:
523 kfree(t);
524 return -ENOMEM;
525 }
526
527 static void nvm_remove_target(struct nvm_target *t)
528 {
529 struct nvm_tgt_type *tt = t->type;
530 struct gendisk *tdisk = t->disk;
531 struct request_queue *q = tdisk->queue;
532
533 lockdep_assert_held(&nvm_lock);
534
535 del_gendisk(tdisk);
536 blk_cleanup_queue(q);
537
538 if (tt->exit)
539 tt->exit(tdisk->private_data);
540
541 put_disk(tdisk);
542
543 list_del(&t->list);
544 kfree(t);
545 }
546
547 static int __nvm_configure_create(struct nvm_ioctl_create *create)
548 {
549 struct nvm_dev *dev;
550 struct nvm_ioctl_create_simple *s;
551
552 down_write(&nvm_lock);
553 dev = nvm_find_nvm_dev(create->dev);
554 up_write(&nvm_lock);
555 if (!dev) {
556 pr_err("nvm: device not found\n");
557 return -EINVAL;
558 }
559
560 if (create->conf.type != NVM_CONFIG_TYPE_SIMPLE) {
561 pr_err("nvm: config type not valid\n");
562 return -EINVAL;
563 }
564 s = &create->conf.s;
565
566 if (s->lun_begin > s->lun_end || s->lun_end > dev->nr_luns) {
567 pr_err("nvm: lun out of bound (%u:%u > %u)\n",
568 s->lun_begin, s->lun_end, dev->nr_luns);
569 return -EINVAL;
570 }
571
572 return nvm_create_target(dev, create);
573 }
574
575 static int __nvm_configure_remove(struct nvm_ioctl_remove *remove)
576 {
577 struct nvm_target *t = NULL;
578 struct nvm_dev *dev;
579 int ret = -1;
580
581 down_write(&nvm_lock);
582 list_for_each_entry(dev, &nvm_devices, devices)
583 list_for_each_entry(t, &dev->online_targets, list) {
584 if (!strcmp(remove->tgtname, t->disk->disk_name)) {
585 nvm_remove_target(t);
586 ret = 0;
587 break;
588 }
589 }
590 up_write(&nvm_lock);
591
592 if (ret) {
593 pr_err("nvm: target \"%s\" doesn't exist.\n", remove->tgtname);
594 return -EINVAL;
595 }
596
597 return 0;
598 }
599
600 #ifdef CONFIG_NVM_DEBUG
601 static int nvm_configure_show(const char *val)
602 {
603 struct nvm_dev *dev;
604 char opcode, devname[DISK_NAME_LEN];
605 int ret;
606
607 ret = sscanf(val, "%c %32s", &opcode, devname);
608 if (ret != 2) {
609 pr_err("nvm: invalid command. Use \"opcode devicename\".\n");
610 return -EINVAL;
611 }
612
613 down_write(&nvm_lock);
614 dev = nvm_find_nvm_dev(devname);
615 up_write(&nvm_lock);
616 if (!dev) {
617 pr_err("nvm: device not found\n");
618 return -EINVAL;
619 }
620
621 if (!dev->mt)
622 return 0;
623
624 dev->mt->lun_info_print(dev);
625
626 return 0;
627 }
628
629 static int nvm_configure_remove(const char *val)
630 {
631 struct nvm_ioctl_remove remove;
632 char opcode;
633 int ret;
634
635 ret = sscanf(val, "%c %256s", &opcode, remove.tgtname);
636 if (ret != 2) {
637 pr_err("nvm: invalid command. Use \"d targetname\".\n");
638 return -EINVAL;
639 }
640
641 remove.flags = 0;
642
643 return __nvm_configure_remove(&remove);
644 }
645
646 static int nvm_configure_create(const char *val)
647 {
648 struct nvm_ioctl_create create;
649 char opcode;
650 int lun_begin, lun_end, ret;
651
652 ret = sscanf(val, "%c %256s %256s %48s %u:%u", &opcode, create.dev,
653 create.tgtname, create.tgttype,
654 &lun_begin, &lun_end);
655 if (ret != 6) {
656 pr_err("nvm: invalid command. Use \"opcode device name tgttype lun_begin:lun_end\".\n");
657 return -EINVAL;
658 }
659
660 create.flags = 0;
661 create.conf.type = NVM_CONFIG_TYPE_SIMPLE;
662 create.conf.s.lun_begin = lun_begin;
663 create.conf.s.lun_end = lun_end;
664
665 return __nvm_configure_create(&create);
666 }
667
668
669 /* Exposes administrative interface through /sys/module/lnvm/configure_by_str */
670 static int nvm_configure_by_str_event(const char *val,
671 const struct kernel_param *kp)
672 {
673 char opcode;
674 int ret;
675
676 ret = sscanf(val, "%c", &opcode);
677 if (ret != 1) {
678 pr_err("nvm: string must have the format of \"cmd ...\"\n");
679 return -EINVAL;
680 }
681
682 switch (opcode) {
683 case 'a':
684 return nvm_configure_create(val);
685 case 'd':
686 return nvm_configure_remove(val);
687 case 's':
688 return nvm_configure_show(val);
689 default:
690 pr_err("nvm: invalid command\n");
691 return -EINVAL;
692 }
693
694 return 0;
695 }
696
697 static int nvm_configure_get(char *buf, const struct kernel_param *kp)
698 {
699 int sz = 0;
700 char *buf_start = buf;
701 struct nvm_dev *dev;
702
703 buf += sprintf(buf, "available devices:\n");
704 down_write(&nvm_lock);
705 list_for_each_entry(dev, &nvm_devices, devices) {
706 if (sz > 4095 - DISK_NAME_LEN)
707 break;
708 buf += sprintf(buf, " %32s\n", dev->name);
709 }
710 up_write(&nvm_lock);
711
712 return buf - buf_start - 1;
713 }
714
715 static const struct kernel_param_ops nvm_configure_by_str_event_param_ops = {
716 .set = nvm_configure_by_str_event,
717 .get = nvm_configure_get,
718 };
719
720 #undef MODULE_PARAM_PREFIX
721 #define MODULE_PARAM_PREFIX "lnvm."
722
723 module_param_cb(configure_debug, &nvm_configure_by_str_event_param_ops, NULL,
724 0644);
725
726 #endif /* CONFIG_NVM_DEBUG */
727
728 static long nvm_ioctl_info(struct file *file, void __user *arg)
729 {
730 struct nvm_ioctl_info *info;
731 struct nvm_tgt_type *tt;
732 int tgt_iter = 0;
733
734 if (!capable(CAP_SYS_ADMIN))
735 return -EPERM;
736
737 info = memdup_user(arg, sizeof(struct nvm_ioctl_info));
738 if (IS_ERR(info))
739 return -EFAULT;
740
741 info->version[0] = NVM_VERSION_MAJOR;
742 info->version[1] = NVM_VERSION_MINOR;
743 info->version[2] = NVM_VERSION_PATCH;
744
745 down_write(&nvm_lock);
746 list_for_each_entry(tt, &nvm_targets, list) {
747 struct nvm_ioctl_info_tgt *tgt = &info->tgts[tgt_iter];
748
749 tgt->version[0] = tt->version[0];
750 tgt->version[1] = tt->version[1];
751 tgt->version[2] = tt->version[2];
752 strncpy(tgt->tgtname, tt->name, NVM_TTYPE_NAME_MAX);
753
754 tgt_iter++;
755 }
756
757 info->tgtsize = tgt_iter;
758 up_write(&nvm_lock);
759
760 if (copy_to_user(arg, info, sizeof(struct nvm_ioctl_info))) {
761 kfree(info);
762 return -EFAULT;
763 }
764
765 kfree(info);
766 return 0;
767 }
768
769 static long nvm_ioctl_get_devices(struct file *file, void __user *arg)
770 {
771 struct nvm_ioctl_get_devices *devices;
772 struct nvm_dev *dev;
773 int i = 0;
774
775 if (!capable(CAP_SYS_ADMIN))
776 return -EPERM;
777
778 devices = kzalloc(sizeof(struct nvm_ioctl_get_devices), GFP_KERNEL);
779 if (!devices)
780 return -ENOMEM;
781
782 down_write(&nvm_lock);
783 list_for_each_entry(dev, &nvm_devices, devices) {
784 struct nvm_ioctl_device_info *info = &devices->info[i];
785
786 sprintf(info->devname, "%s", dev->name);
787 if (dev->mt) {
788 info->bmversion[0] = dev->mt->version[0];
789 info->bmversion[1] = dev->mt->version[1];
790 info->bmversion[2] = dev->mt->version[2];
791 sprintf(info->bmname, "%s", dev->mt->name);
792 } else {
793 sprintf(info->bmname, "none");
794 }
795
796 i++;
797 if (i > 31) {
798 pr_err("nvm: max 31 devices can be reported.\n");
799 break;
800 }
801 }
802 up_write(&nvm_lock);
803
804 devices->nr_devices = i;
805
806 if (copy_to_user(arg, devices,
807 sizeof(struct nvm_ioctl_get_devices))) {
808 kfree(devices);
809 return -EFAULT;
810 }
811
812 kfree(devices);
813 return 0;
814 }
815
816 static long nvm_ioctl_dev_create(struct file *file, void __user *arg)
817 {
818 struct nvm_ioctl_create create;
819
820 if (!capable(CAP_SYS_ADMIN))
821 return -EPERM;
822
823 if (copy_from_user(&create, arg, sizeof(struct nvm_ioctl_create)))
824 return -EFAULT;
825
826 create.dev[DISK_NAME_LEN - 1] = '\0';
827 create.tgttype[NVM_TTYPE_NAME_MAX - 1] = '\0';
828 create.tgtname[DISK_NAME_LEN - 1] = '\0';
829
830 if (create.flags != 0) {
831 pr_err("nvm: no flags supported\n");
832 return -EINVAL;
833 }
834
835 return __nvm_configure_create(&create);
836 }
837
838 static long nvm_ioctl_dev_remove(struct file *file, void __user *arg)
839 {
840 struct nvm_ioctl_remove remove;
841
842 if (!capable(CAP_SYS_ADMIN))
843 return -EPERM;
844
845 if (copy_from_user(&remove, arg, sizeof(struct nvm_ioctl_remove)))
846 return -EFAULT;
847
848 remove.tgtname[DISK_NAME_LEN - 1] = '\0';
849
850 if (remove.flags != 0) {
851 pr_err("nvm: no flags supported\n");
852 return -EINVAL;
853 }
854
855 return __nvm_configure_remove(&remove);
856 }
857
858 static long nvm_ctl_ioctl(struct file *file, uint cmd, unsigned long arg)
859 {
860 void __user *argp = (void __user *)arg;
861
862 switch (cmd) {
863 case NVM_INFO:
864 return nvm_ioctl_info(file, argp);
865 case NVM_GET_DEVICES:
866 return nvm_ioctl_get_devices(file, argp);
867 case NVM_DEV_CREATE:
868 return nvm_ioctl_dev_create(file, argp);
869 case NVM_DEV_REMOVE:
870 return nvm_ioctl_dev_remove(file, argp);
871 }
872 return 0;
873 }
874
875 static const struct file_operations _ctl_fops = {
876 .open = nonseekable_open,
877 .unlocked_ioctl = nvm_ctl_ioctl,
878 .owner = THIS_MODULE,
879 .llseek = noop_llseek,
880 };
881
882 static struct miscdevice _nvm_misc = {
883 .minor = MISC_DYNAMIC_MINOR,
884 .name = "lightnvm",
885 .nodename = "lightnvm/control",
886 .fops = &_ctl_fops,
887 };
888
889 MODULE_ALIAS_MISCDEV(MISC_DYNAMIC_MINOR);
890
891 static int __init nvm_mod_init(void)
892 {
893 int ret;
894
895 ret = misc_register(&_nvm_misc);
896 if (ret)
897 pr_err("nvm: misc_register failed for control device");
898
899 return ret;
900 }
901
902 static void __exit nvm_mod_exit(void)
903 {
904 misc_deregister(&_nvm_misc);
905 }
906
907 MODULE_AUTHOR("Matias Bjorling <m@bjorling.me>");
908 MODULE_LICENSE("GPL v2");
909 MODULE_VERSION("0.1");
910 module_init(nvm_mod_init);
911 module_exit(nvm_mod_exit);
This page took 0.049906 seconds and 5 git commands to generate.