2 * Copyright (C) 2015 IT University of Copenhagen. All rights reserved.
3 * Initial release: Matias Bjorling <m@bjorling.me>
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version
7 * 2 as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; see the file COPYING. If not, write to
16 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
21 #include <linux/blkdev.h>
22 #include <linux/blk-mq.h>
23 #include <linux/list.h>
24 #include <linux/types.h>
25 #include <linux/sem.h>
26 #include <linux/bitmap.h>
27 #include <linux/module.h>
28 #include <linux/miscdevice.h>
29 #include <linux/lightnvm.h>
30 #include <linux/sched/sysctl.h>
31 #include <uapi/linux/lightnvm.h>
33 static LIST_HEAD(nvm_targets
);
34 static LIST_HEAD(nvm_mgrs
);
35 static LIST_HEAD(nvm_devices
);
36 static DECLARE_RWSEM(nvm_lock
);
38 static struct nvm_tgt_type
*nvm_find_target_type(const char *name
)
40 struct nvm_tgt_type
*tt
;
42 list_for_each_entry(tt
, &nvm_targets
, list
)
43 if (!strcmp(name
, tt
->name
))
49 int nvm_register_target(struct nvm_tgt_type
*tt
)
53 down_write(&nvm_lock
);
54 if (nvm_find_target_type(tt
->name
))
57 list_add(&tt
->list
, &nvm_targets
);
62 EXPORT_SYMBOL(nvm_register_target
);
64 void nvm_unregister_target(struct nvm_tgt_type
*tt
)
69 down_write(&nvm_lock
);
73 EXPORT_SYMBOL(nvm_unregister_target
);
75 void *nvm_dev_dma_alloc(struct nvm_dev
*dev
, gfp_t mem_flags
,
76 dma_addr_t
*dma_handler
)
78 return dev
->ops
->dev_dma_alloc(dev
, dev
->ppalist_pool
, mem_flags
,
81 EXPORT_SYMBOL(nvm_dev_dma_alloc
);
83 void nvm_dev_dma_free(struct nvm_dev
*dev
, void *ppa_list
,
84 dma_addr_t dma_handler
)
86 dev
->ops
->dev_dma_free(dev
->ppalist_pool
, ppa_list
, dma_handler
);
88 EXPORT_SYMBOL(nvm_dev_dma_free
);
90 static struct nvmm_type
*nvm_find_mgr_type(const char *name
)
94 list_for_each_entry(mt
, &nvm_mgrs
, list
)
95 if (!strcmp(name
, mt
->name
))
101 struct nvmm_type
*nvm_init_mgr(struct nvm_dev
*dev
)
103 struct nvmm_type
*mt
;
106 lockdep_assert_held(&nvm_lock
);
108 list_for_each_entry(mt
, &nvm_mgrs
, list
) {
109 ret
= mt
->register_mgr(dev
);
111 pr_err("nvm: media mgr failed to init (%d) on dev %s\n",
113 return NULL
; /* initialization failed */
121 int nvm_register_mgr(struct nvmm_type
*mt
)
126 down_write(&nvm_lock
);
127 if (nvm_find_mgr_type(mt
->name
)) {
131 list_add(&mt
->list
, &nvm_mgrs
);
134 /* try to register media mgr if any device have none configured */
135 list_for_each_entry(dev
, &nvm_devices
, devices
) {
139 dev
->mt
= nvm_init_mgr(dev
);
146 EXPORT_SYMBOL(nvm_register_mgr
);
148 void nvm_unregister_mgr(struct nvmm_type
*mt
)
153 down_write(&nvm_lock
);
157 EXPORT_SYMBOL(nvm_unregister_mgr
);
159 static struct nvm_dev
*nvm_find_nvm_dev(const char *name
)
163 list_for_each_entry(dev
, &nvm_devices
, devices
)
164 if (!strcmp(name
, dev
->name
))
170 struct nvm_block
*nvm_get_blk_unlocked(struct nvm_dev
*dev
, struct nvm_lun
*lun
,
173 return dev
->mt
->get_blk_unlocked(dev
, lun
, flags
);
175 EXPORT_SYMBOL(nvm_get_blk_unlocked
);
177 /* Assumes that all valid pages have already been moved on release to bm */
178 void nvm_put_blk_unlocked(struct nvm_dev
*dev
, struct nvm_block
*blk
)
180 return dev
->mt
->put_blk_unlocked(dev
, blk
);
182 EXPORT_SYMBOL(nvm_put_blk_unlocked
);
184 struct nvm_block
*nvm_get_blk(struct nvm_dev
*dev
, struct nvm_lun
*lun
,
187 return dev
->mt
->get_blk(dev
, lun
, flags
);
189 EXPORT_SYMBOL(nvm_get_blk
);
191 /* Assumes that all valid pages have already been moved on release to bm */
192 void nvm_put_blk(struct nvm_dev
*dev
, struct nvm_block
*blk
)
194 return dev
->mt
->put_blk(dev
, blk
);
196 EXPORT_SYMBOL(nvm_put_blk
);
198 int nvm_submit_io(struct nvm_dev
*dev
, struct nvm_rq
*rqd
)
200 return dev
->mt
->submit_io(dev
, rqd
);
202 EXPORT_SYMBOL(nvm_submit_io
);
204 int nvm_erase_blk(struct nvm_dev
*dev
, struct nvm_block
*blk
)
206 return dev
->mt
->erase_blk(dev
, blk
, 0);
208 EXPORT_SYMBOL(nvm_erase_blk
);
210 void nvm_addr_to_generic_mode(struct nvm_dev
*dev
, struct nvm_rq
*rqd
)
214 if (rqd
->nr_pages
> 1) {
215 for (i
= 0; i
< rqd
->nr_pages
; i
++)
216 rqd
->ppa_list
[i
] = dev_to_generic_addr(dev
,
219 rqd
->ppa_addr
= dev_to_generic_addr(dev
, rqd
->ppa_addr
);
222 EXPORT_SYMBOL(nvm_addr_to_generic_mode
);
224 void nvm_generic_to_addr_mode(struct nvm_dev
*dev
, struct nvm_rq
*rqd
)
228 if (rqd
->nr_pages
> 1) {
229 for (i
= 0; i
< rqd
->nr_pages
; i
++)
230 rqd
->ppa_list
[i
] = generic_to_dev_addr(dev
,
233 rqd
->ppa_addr
= generic_to_dev_addr(dev
, rqd
->ppa_addr
);
236 EXPORT_SYMBOL(nvm_generic_to_addr_mode
);
238 int nvm_set_rqd_ppalist(struct nvm_dev
*dev
, struct nvm_rq
*rqd
,
239 struct ppa_addr
*ppas
, int nr_ppas
)
241 int i
, plane_cnt
, pl_idx
;
243 if (dev
->plane_mode
== NVM_PLANE_SINGLE
&& nr_ppas
== 1) {
245 rqd
->ppa_addr
= ppas
[0];
250 plane_cnt
= (1 << dev
->plane_mode
);
251 rqd
->nr_pages
= plane_cnt
* nr_ppas
;
253 if (dev
->ops
->max_phys_sect
< rqd
->nr_pages
)
256 rqd
->ppa_list
= nvm_dev_dma_alloc(dev
, GFP_KERNEL
, &rqd
->dma_ppa_list
);
257 if (!rqd
->ppa_list
) {
258 pr_err("nvm: failed to allocate dma memory\n");
262 for (pl_idx
= 0; pl_idx
< plane_cnt
; pl_idx
++) {
263 for (i
= 0; i
< nr_ppas
; i
++) {
264 ppas
[i
].g
.pl
= pl_idx
;
265 rqd
->ppa_list
[(pl_idx
* nr_ppas
) + i
] = ppas
[i
];
271 EXPORT_SYMBOL(nvm_set_rqd_ppalist
);
273 void nvm_free_rqd_ppalist(struct nvm_dev
*dev
, struct nvm_rq
*rqd
)
278 nvm_dev_dma_free(dev
, rqd
->ppa_list
, rqd
->dma_ppa_list
);
280 EXPORT_SYMBOL(nvm_free_rqd_ppalist
);
282 int nvm_erase_ppa(struct nvm_dev
*dev
, struct ppa_addr
*ppas
, int nr_ppas
)
287 if (!dev
->ops
->erase_block
)
290 memset(&rqd
, 0, sizeof(struct nvm_rq
));
292 ret
= nvm_set_rqd_ppalist(dev
, &rqd
, ppas
, nr_ppas
);
296 nvm_generic_to_addr_mode(dev
, &rqd
);
298 ret
= dev
->ops
->erase_block(dev
, &rqd
);
300 nvm_free_rqd_ppalist(dev
, &rqd
);
304 EXPORT_SYMBOL(nvm_erase_ppa
);
306 void nvm_end_io(struct nvm_rq
*rqd
, int error
)
311 EXPORT_SYMBOL(nvm_end_io
);
313 static void nvm_end_io_sync(struct nvm_rq
*rqd
)
315 struct completion
*waiting
= rqd
->wait
;
322 int nvm_submit_ppa(struct nvm_dev
*dev
, struct ppa_addr
*ppa
, int nr_ppas
,
323 int opcode
, int flags
, void *buf
, int len
)
325 DECLARE_COMPLETION_ONSTACK(wait
);
329 unsigned long hang_check
;
331 bio
= bio_map_kern(dev
->q
, buf
, len
, GFP_KERNEL
);
332 if (IS_ERR_OR_NULL(bio
))
335 memset(&rqd
, 0, sizeof(struct nvm_rq
));
336 ret
= nvm_set_rqd_ppalist(dev
, &rqd
, ppa
, nr_ppas
);
346 rqd
.end_io
= nvm_end_io_sync
;
348 nvm_generic_to_addr_mode(dev
, &rqd
);
350 ret
= dev
->ops
->submit_io(dev
, &rqd
);
352 /* Prevent hang_check timer from firing at us during very long I/O */
353 hang_check
= sysctl_hung_task_timeout_secs
;
355 while (!wait_for_completion_io_timeout(&wait
, hang_check
* (HZ
/2)));
357 wait_for_completion_io(&wait
);
359 nvm_free_rqd_ppalist(dev
, &rqd
);
363 EXPORT_SYMBOL(nvm_submit_ppa
);
365 static int nvm_init_slc_tbl(struct nvm_dev
*dev
, struct nvm_id_group
*grp
)
369 dev
->lps_per_blk
= dev
->pgs_per_blk
;
370 dev
->lptbl
= kcalloc(dev
->lps_per_blk
, sizeof(int), GFP_KERNEL
);
374 /* Just a linear array */
375 for (i
= 0; i
< dev
->lps_per_blk
; i
++)
381 static int nvm_init_mlc_tbl(struct nvm_dev
*dev
, struct nvm_id_group
*grp
)
384 struct nvm_id_lp_mlc
*mlc
= &grp
->lptbl
.mlc
;
389 dev
->lps_per_blk
= mlc
->num_pairs
;
390 dev
->lptbl
= kcalloc(dev
->lps_per_blk
, sizeof(int), GFP_KERNEL
);
394 /* The lower page table encoding consists of a list of bytes, where each
395 * has a lower and an upper half. The first half byte maintains the
396 * increment value and every value after is an offset added to the
397 * previous incrementation value */
398 dev
->lptbl
[0] = mlc
->pairs
[0] & 0xF;
399 for (i
= 1; i
< dev
->lps_per_blk
; i
++) {
400 p
= mlc
->pairs
[i
>> 1];
401 if (i
& 0x1) /* upper */
402 dev
->lptbl
[i
] = dev
->lptbl
[i
- 1] + ((p
& 0xF0) >> 4);
404 dev
->lptbl
[i
] = dev
->lptbl
[i
- 1] + (p
& 0xF);
410 static int nvm_core_init(struct nvm_dev
*dev
)
412 struct nvm_id
*id
= &dev
->identity
;
413 struct nvm_id_group
*grp
= &id
->groups
[0];
416 dev
->nr_chnls
= grp
->num_ch
;
417 dev
->luns_per_chnl
= grp
->num_lun
;
418 dev
->pgs_per_blk
= grp
->num_pg
;
419 dev
->blks_per_lun
= grp
->num_blk
;
420 dev
->nr_planes
= grp
->num_pln
;
421 dev
->sec_size
= grp
->csecs
;
422 dev
->oob_size
= grp
->sos
;
423 dev
->sec_per_pg
= grp
->fpg_sz
/ grp
->csecs
;
424 dev
->mccap
= grp
->mccap
;
425 memcpy(&dev
->ppaf
, &id
->ppaf
, sizeof(struct nvm_addr_format
));
427 dev
->plane_mode
= NVM_PLANE_SINGLE
;
428 dev
->max_rq_size
= dev
->ops
->max_phys_sect
* dev
->sec_size
;
430 if (grp
->mtype
!= 0) {
431 pr_err("nvm: memory type not supported\n");
435 switch (grp
->fmtype
) {
436 case NVM_ID_FMTYPE_SLC
:
437 if (nvm_init_slc_tbl(dev
, grp
))
440 case NVM_ID_FMTYPE_MLC
:
441 if (nvm_init_mlc_tbl(dev
, grp
))
445 pr_err("nvm: flash type not supported\n");
449 if (!dev
->lps_per_blk
)
450 pr_info("nvm: lower page programming table missing\n");
452 if (grp
->mpos
& 0x020202)
453 dev
->plane_mode
= NVM_PLANE_DOUBLE
;
454 if (grp
->mpos
& 0x040404)
455 dev
->plane_mode
= NVM_PLANE_QUAD
;
457 /* calculated values */
458 dev
->sec_per_pl
= dev
->sec_per_pg
* dev
->nr_planes
;
459 dev
->sec_per_blk
= dev
->sec_per_pl
* dev
->pgs_per_blk
;
460 dev
->sec_per_lun
= dev
->sec_per_blk
* dev
->blks_per_lun
;
461 dev
->nr_luns
= dev
->luns_per_chnl
* dev
->nr_chnls
;
463 dev
->total_blocks
= dev
->nr_planes
*
467 dev
->total_pages
= dev
->total_blocks
* dev
->pgs_per_blk
;
468 INIT_LIST_HEAD(&dev
->online_targets
);
469 mutex_init(&dev
->mlock
);
474 static void nvm_free(struct nvm_dev
*dev
)
480 dev
->mt
->unregister_mgr(dev
);
485 static int nvm_init(struct nvm_dev
*dev
)
489 if (!dev
->q
|| !dev
->ops
)
492 if (dev
->ops
->identity(dev
, &dev
->identity
)) {
493 pr_err("nvm: device could not be identified\n");
497 pr_debug("nvm: ver:%x nvm_vendor:%x groups:%u\n",
498 dev
->identity
.ver_id
, dev
->identity
.vmnt
,
499 dev
->identity
.cgrps
);
501 if (dev
->identity
.ver_id
!= 1) {
502 pr_err("nvm: device not supported by kernel.");
506 if (dev
->identity
.cgrps
!= 1) {
507 pr_err("nvm: only one group configuration supported.");
511 ret
= nvm_core_init(dev
);
513 pr_err("nvm: could not initialize core structures.\n");
517 pr_info("nvm: registered %s [%u/%u/%u/%u/%u/%u]\n",
518 dev
->name
, dev
->sec_per_pg
, dev
->nr_planes
,
519 dev
->pgs_per_blk
, dev
->blks_per_lun
, dev
->nr_luns
,
523 pr_err("nvm: failed to initialize nvm\n");
527 static void nvm_exit(struct nvm_dev
*dev
)
529 if (dev
->ppalist_pool
)
530 dev
->ops
->destroy_dma_pool(dev
->ppalist_pool
);
533 pr_info("nvm: successfully unloaded\n");
536 int nvm_register(struct request_queue
*q
, char *disk_name
,
537 struct nvm_dev_ops
*ops
)
545 dev
= kzalloc(sizeof(struct nvm_dev
), GFP_KERNEL
);
551 strncpy(dev
->name
, disk_name
, DISK_NAME_LEN
);
557 if (dev
->ops
->max_phys_sect
> 256) {
558 pr_info("nvm: max sectors supported is 256.\n");
563 if (dev
->ops
->max_phys_sect
> 1) {
564 dev
->ppalist_pool
= dev
->ops
->create_dma_pool(dev
, "ppalist");
565 if (!dev
->ppalist_pool
) {
566 pr_err("nvm: could not create ppa pool\n");
572 /* register device with a supported media manager */
573 down_write(&nvm_lock
);
574 dev
->mt
= nvm_init_mgr(dev
);
575 list_add(&dev
->devices
, &nvm_devices
);
583 EXPORT_SYMBOL(nvm_register
);
585 void nvm_unregister(char *disk_name
)
589 down_write(&nvm_lock
);
590 dev
= nvm_find_nvm_dev(disk_name
);
592 pr_err("nvm: could not find device %s to unregister\n",
598 list_del(&dev
->devices
);
604 EXPORT_SYMBOL(nvm_unregister
);
606 static const struct block_device_operations nvm_fops
= {
607 .owner
= THIS_MODULE
,
610 static int nvm_create_target(struct nvm_dev
*dev
,
611 struct nvm_ioctl_create
*create
)
613 struct nvm_ioctl_create_simple
*s
= &create
->conf
.s
;
614 struct request_queue
*tqueue
;
615 struct gendisk
*tdisk
;
616 struct nvm_tgt_type
*tt
;
617 struct nvm_target
*t
;
621 pr_info("nvm: device has no media manager registered.\n");
625 down_write(&nvm_lock
);
626 tt
= nvm_find_target_type(create
->tgttype
);
628 pr_err("nvm: target type %s not found\n", create
->tgttype
);
633 list_for_each_entry(t
, &dev
->online_targets
, list
) {
634 if (!strcmp(create
->tgtname
, t
->disk
->disk_name
)) {
635 pr_err("nvm: target name already exists.\n");
642 t
= kmalloc(sizeof(struct nvm_target
), GFP_KERNEL
);
646 tqueue
= blk_alloc_queue_node(GFP_KERNEL
, dev
->q
->node
);
649 blk_queue_make_request(tqueue
, tt
->make_rq
);
651 tdisk
= alloc_disk(0);
655 sprintf(tdisk
->disk_name
, "%s", create
->tgtname
);
656 tdisk
->flags
= GENHD_FL_EXT_DEVT
;
658 tdisk
->first_minor
= 0;
659 tdisk
->fops
= &nvm_fops
;
660 tdisk
->queue
= tqueue
;
662 targetdata
= tt
->init(dev
, tdisk
, s
->lun_begin
, s
->lun_end
);
663 if (IS_ERR(targetdata
))
666 tdisk
->private_data
= targetdata
;
667 tqueue
->queuedata
= targetdata
;
669 blk_queue_max_hw_sectors(tqueue
, 8 * dev
->ops
->max_phys_sect
);
671 set_capacity(tdisk
, tt
->capacity(targetdata
));
677 down_write(&nvm_lock
);
678 list_add_tail(&t
->list
, &dev
->online_targets
);
685 blk_cleanup_queue(tqueue
);
691 static void nvm_remove_target(struct nvm_target
*t
)
693 struct nvm_tgt_type
*tt
= t
->type
;
694 struct gendisk
*tdisk
= t
->disk
;
695 struct request_queue
*q
= tdisk
->queue
;
697 lockdep_assert_held(&nvm_lock
);
700 blk_cleanup_queue(q
);
703 tt
->exit(tdisk
->private_data
);
711 static int __nvm_configure_create(struct nvm_ioctl_create
*create
)
714 struct nvm_ioctl_create_simple
*s
;
716 down_write(&nvm_lock
);
717 dev
= nvm_find_nvm_dev(create
->dev
);
720 pr_err("nvm: device not found\n");
724 if (create
->conf
.type
!= NVM_CONFIG_TYPE_SIMPLE
) {
725 pr_err("nvm: config type not valid\n");
730 if (s
->lun_begin
> s
->lun_end
|| s
->lun_end
> dev
->nr_luns
) {
731 pr_err("nvm: lun out of bound (%u:%u > %u)\n",
732 s
->lun_begin
, s
->lun_end
, dev
->nr_luns
);
736 return nvm_create_target(dev
, create
);
739 static int __nvm_configure_remove(struct nvm_ioctl_remove
*remove
)
741 struct nvm_target
*t
= NULL
;
745 down_write(&nvm_lock
);
746 list_for_each_entry(dev
, &nvm_devices
, devices
)
747 list_for_each_entry(t
, &dev
->online_targets
, list
) {
748 if (!strcmp(remove
->tgtname
, t
->disk
->disk_name
)) {
749 nvm_remove_target(t
);
757 pr_err("nvm: target \"%s\" doesn't exist.\n", remove
->tgtname
);
764 #ifdef CONFIG_NVM_DEBUG
765 static int nvm_configure_show(const char *val
)
768 char opcode
, devname
[DISK_NAME_LEN
];
771 ret
= sscanf(val
, "%c %32s", &opcode
, devname
);
773 pr_err("nvm: invalid command. Use \"opcode devicename\".\n");
777 down_write(&nvm_lock
);
778 dev
= nvm_find_nvm_dev(devname
);
781 pr_err("nvm: device not found\n");
788 dev
->mt
->lun_info_print(dev
);
793 static int nvm_configure_remove(const char *val
)
795 struct nvm_ioctl_remove remove
;
799 ret
= sscanf(val
, "%c %256s", &opcode
, remove
.tgtname
);
801 pr_err("nvm: invalid command. Use \"d targetname\".\n");
807 return __nvm_configure_remove(&remove
);
810 static int nvm_configure_create(const char *val
)
812 struct nvm_ioctl_create create
;
814 int lun_begin
, lun_end
, ret
;
816 ret
= sscanf(val
, "%c %256s %256s %48s %u:%u", &opcode
, create
.dev
,
817 create
.tgtname
, create
.tgttype
,
818 &lun_begin
, &lun_end
);
820 pr_err("nvm: invalid command. Use \"opcode device name tgttype lun_begin:lun_end\".\n");
825 create
.conf
.type
= NVM_CONFIG_TYPE_SIMPLE
;
826 create
.conf
.s
.lun_begin
= lun_begin
;
827 create
.conf
.s
.lun_end
= lun_end
;
829 return __nvm_configure_create(&create
);
833 /* Exposes administrative interface through /sys/module/lnvm/configure_by_str */
834 static int nvm_configure_by_str_event(const char *val
,
835 const struct kernel_param
*kp
)
840 ret
= sscanf(val
, "%c", &opcode
);
842 pr_err("nvm: string must have the format of \"cmd ...\"\n");
848 return nvm_configure_create(val
);
850 return nvm_configure_remove(val
);
852 return nvm_configure_show(val
);
854 pr_err("nvm: invalid command\n");
861 static int nvm_configure_get(char *buf
, const struct kernel_param
*kp
)
864 char *buf_start
= buf
;
867 buf
+= sprintf(buf
, "available devices:\n");
868 down_write(&nvm_lock
);
869 list_for_each_entry(dev
, &nvm_devices
, devices
) {
870 if (sz
> 4095 - DISK_NAME_LEN
)
872 buf
+= sprintf(buf
, " %32s\n", dev
->name
);
876 return buf
- buf_start
- 1;
879 static const struct kernel_param_ops nvm_configure_by_str_event_param_ops
= {
880 .set
= nvm_configure_by_str_event
,
881 .get
= nvm_configure_get
,
884 #undef MODULE_PARAM_PREFIX
885 #define MODULE_PARAM_PREFIX "lnvm."
887 module_param_cb(configure_debug
, &nvm_configure_by_str_event_param_ops
, NULL
,
890 #endif /* CONFIG_NVM_DEBUG */
892 static long nvm_ioctl_info(struct file
*file
, void __user
*arg
)
894 struct nvm_ioctl_info
*info
;
895 struct nvm_tgt_type
*tt
;
898 if (!capable(CAP_SYS_ADMIN
))
901 info
= memdup_user(arg
, sizeof(struct nvm_ioctl_info
));
905 info
->version
[0] = NVM_VERSION_MAJOR
;
906 info
->version
[1] = NVM_VERSION_MINOR
;
907 info
->version
[2] = NVM_VERSION_PATCH
;
909 down_write(&nvm_lock
);
910 list_for_each_entry(tt
, &nvm_targets
, list
) {
911 struct nvm_ioctl_info_tgt
*tgt
= &info
->tgts
[tgt_iter
];
913 tgt
->version
[0] = tt
->version
[0];
914 tgt
->version
[1] = tt
->version
[1];
915 tgt
->version
[2] = tt
->version
[2];
916 strncpy(tgt
->tgtname
, tt
->name
, NVM_TTYPE_NAME_MAX
);
921 info
->tgtsize
= tgt_iter
;
924 if (copy_to_user(arg
, info
, sizeof(struct nvm_ioctl_info
))) {
933 static long nvm_ioctl_get_devices(struct file
*file
, void __user
*arg
)
935 struct nvm_ioctl_get_devices
*devices
;
939 if (!capable(CAP_SYS_ADMIN
))
942 devices
= kzalloc(sizeof(struct nvm_ioctl_get_devices
), GFP_KERNEL
);
946 down_write(&nvm_lock
);
947 list_for_each_entry(dev
, &nvm_devices
, devices
) {
948 struct nvm_ioctl_device_info
*info
= &devices
->info
[i
];
950 sprintf(info
->devname
, "%s", dev
->name
);
952 info
->bmversion
[0] = dev
->mt
->version
[0];
953 info
->bmversion
[1] = dev
->mt
->version
[1];
954 info
->bmversion
[2] = dev
->mt
->version
[2];
955 sprintf(info
->bmname
, "%s", dev
->mt
->name
);
957 sprintf(info
->bmname
, "none");
962 pr_err("nvm: max 31 devices can be reported.\n");
968 devices
->nr_devices
= i
;
970 if (copy_to_user(arg
, devices
,
971 sizeof(struct nvm_ioctl_get_devices
))) {
980 static long nvm_ioctl_dev_create(struct file
*file
, void __user
*arg
)
982 struct nvm_ioctl_create create
;
984 if (!capable(CAP_SYS_ADMIN
))
987 if (copy_from_user(&create
, arg
, sizeof(struct nvm_ioctl_create
)))
990 create
.dev
[DISK_NAME_LEN
- 1] = '\0';
991 create
.tgttype
[NVM_TTYPE_NAME_MAX
- 1] = '\0';
992 create
.tgtname
[DISK_NAME_LEN
- 1] = '\0';
994 if (create
.flags
!= 0) {
995 pr_err("nvm: no flags supported\n");
999 return __nvm_configure_create(&create
);
1002 static long nvm_ioctl_dev_remove(struct file
*file
, void __user
*arg
)
1004 struct nvm_ioctl_remove remove
;
1006 if (!capable(CAP_SYS_ADMIN
))
1009 if (copy_from_user(&remove
, arg
, sizeof(struct nvm_ioctl_remove
)))
1012 remove
.tgtname
[DISK_NAME_LEN
- 1] = '\0';
1014 if (remove
.flags
!= 0) {
1015 pr_err("nvm: no flags supported\n");
1019 return __nvm_configure_remove(&remove
);
1022 static long nvm_ctl_ioctl(struct file
*file
, uint cmd
, unsigned long arg
)
1024 void __user
*argp
= (void __user
*)arg
;
1028 return nvm_ioctl_info(file
, argp
);
1029 case NVM_GET_DEVICES
:
1030 return nvm_ioctl_get_devices(file
, argp
);
1031 case NVM_DEV_CREATE
:
1032 return nvm_ioctl_dev_create(file
, argp
);
1033 case NVM_DEV_REMOVE
:
1034 return nvm_ioctl_dev_remove(file
, argp
);
1039 static const struct file_operations _ctl_fops
= {
1040 .open
= nonseekable_open
,
1041 .unlocked_ioctl
= nvm_ctl_ioctl
,
1042 .owner
= THIS_MODULE
,
1043 .llseek
= noop_llseek
,
1046 static struct miscdevice _nvm_misc
= {
1047 .minor
= MISC_DYNAMIC_MINOR
,
1049 .nodename
= "lightnvm/control",
1053 MODULE_ALIAS_MISCDEV(MISC_DYNAMIC_MINOR
);
1055 static int __init
nvm_mod_init(void)
1059 ret
= misc_register(&_nvm_misc
);
1061 pr_err("nvm: misc_register failed for control device");
1066 static void __exit
nvm_mod_exit(void)
1068 misc_deregister(&_nvm_misc
);
1071 MODULE_AUTHOR("Matias Bjorling <m@bjorling.me>");
1072 MODULE_LICENSE("GPL v2");
1073 MODULE_VERSION("0.1");
1074 module_init(nvm_mod_init
);
1075 module_exit(nvm_mod_exit
);