lightnvm: rename dma helper functions
[deliverable/linux.git] / drivers / lightnvm / core.c
index 0dc9a80adb9443407aca24836f756b12c6ab5569..1873a3bc913d40becb32de05c620e5840d2f11b8 100644 (file)
 #include <linux/sched/sysctl.h>
 #include <uapi/linux/lightnvm.h>
 
-static LIST_HEAD(nvm_targets);
+static LIST_HEAD(nvm_tgt_types);
 static LIST_HEAD(nvm_mgrs);
 static LIST_HEAD(nvm_devices);
+static LIST_HEAD(nvm_targets);
 static DECLARE_RWSEM(nvm_lock);
 
+static struct nvm_target *nvm_find_target(const char *name)
+{
+       struct nvm_target *tgt;
+
+       list_for_each_entry(tgt, &nvm_targets, list)
+               if (!strcmp(name, tgt->disk->disk_name))
+                       return tgt;
+
+       return NULL;
+}
+
 static struct nvm_tgt_type *nvm_find_target_type(const char *name)
 {
        struct nvm_tgt_type *tt;
 
-       list_for_each_entry(tt, &nvm_targets, list)
+       list_for_each_entry(tt, &nvm_tgt_types, list)
                if (!strcmp(name, tt->name))
                        return tt;
 
        return NULL;
 }
 
-int nvm_register_target(struct nvm_tgt_type *tt)
+int nvm_register_tgt_type(struct nvm_tgt_type *tt)
 {
        int ret = 0;
 
@@ -54,14 +66,14 @@ int nvm_register_target(struct nvm_tgt_type *tt)
        if (nvm_find_target_type(tt->name))
                ret = -EEXIST;
        else
-               list_add(&tt->list, &nvm_targets);
+               list_add(&tt->list, &nvm_tgt_types);
        up_write(&nvm_lock);
 
        return ret;
 }
-EXPORT_SYMBOL(nvm_register_target);
+EXPORT_SYMBOL(nvm_register_tgt_type);
 
-void nvm_unregister_target(struct nvm_tgt_type *tt)
+void nvm_unregister_tgt_type(struct nvm_tgt_type *tt)
 {
        if (!tt)
                return;
@@ -70,20 +82,20 @@ void nvm_unregister_target(struct nvm_tgt_type *tt)
        list_del(&tt->list);
        up_write(&nvm_lock);
 }
-EXPORT_SYMBOL(nvm_unregister_target);
+EXPORT_SYMBOL(nvm_unregister_tgt_type);
 
 void *nvm_dev_dma_alloc(struct nvm_dev *dev, gfp_t mem_flags,
                                                        dma_addr_t *dma_handler)
 {
-       return dev->ops->dev_dma_alloc(dev, dev->ppalist_pool, mem_flags,
+       return dev->ops->dev_dma_alloc(dev, dev->dma_pool, mem_flags,
                                                                dma_handler);
 }
 EXPORT_SYMBOL(nvm_dev_dma_alloc);
 
-void nvm_dev_dma_free(struct nvm_dev *dev, void *ppa_list,
+void nvm_dev_dma_free(struct nvm_dev *dev, void *addr,
                                                        dma_addr_t dma_handler)
 {
-       dev->ops->dev_dma_free(dev->ppalist_pool, ppa_list, dma_handler);
+       dev->ops->dev_dma_free(dev->dma_pool, addr, dma_handler);
 }
 EXPORT_SYMBOL(nvm_dev_dma_free);
 
@@ -239,33 +251,36 @@ void nvm_generic_to_addr_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
 EXPORT_SYMBOL(nvm_generic_to_addr_mode);
 
 int nvm_set_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd,
-                                       struct ppa_addr *ppas, int nr_ppas)
+                               struct ppa_addr *ppas, int nr_ppas, int vblk)
 {
        int i, plane_cnt, pl_idx;
 
-       if (dev->plane_mode == NVM_PLANE_SINGLE && nr_ppas == 1) {
-               rqd->nr_pages = 1;
+       if ((!vblk || dev->plane_mode == NVM_PLANE_SINGLE) && nr_ppas == 1) {
+               rqd->nr_pages = nr_ppas;
                rqd->ppa_addr = ppas[0];
 
                return 0;
        }
 
-       plane_cnt = dev->plane_mode;
-       rqd->nr_pages = plane_cnt * nr_ppas;
-
-       if (dev->ops->max_phys_sect < rqd->nr_pages)
-               return -EINVAL;
-
+       rqd->nr_pages = nr_ppas;
        rqd->ppa_list = nvm_dev_dma_alloc(dev, GFP_KERNEL, &rqd->dma_ppa_list);
        if (!rqd->ppa_list) {
                pr_err("nvm: failed to allocate dma memory\n");
                return -ENOMEM;
        }
 
-       for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) {
+       if (!vblk) {
+               for (i = 0; i < nr_ppas; i++)
+                       rqd->ppa_list[i] = ppas[i];
+       } else {
+               plane_cnt = dev->plane_mode;
+               rqd->nr_pages *= plane_cnt;
+
                for (i = 0; i < nr_ppas; i++) {
-                       ppas[i].g.pl = pl_idx;
-                       rqd->ppa_list[(pl_idx * nr_ppas) + i] = ppas[i];
+                       for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) {
+                               ppas[i].g.pl = pl_idx;
+                               rqd->ppa_list[(pl_idx * nr_ppas) + i] = ppas[i];
+                       }
                }
        }
 
@@ -292,7 +307,7 @@ int nvm_erase_ppa(struct nvm_dev *dev, struct ppa_addr *ppas, int nr_ppas)
 
        memset(&rqd, 0, sizeof(struct nvm_rq));
 
-       ret = nvm_set_rqd_ppalist(dev, &rqd, ppas, nr_ppas);
+       ret = nvm_set_rqd_ppalist(dev, &rqd, ppas, nr_ppas, 1);
        if (ret)
                return ret;
 
@@ -322,11 +337,10 @@ static void nvm_end_io_sync(struct nvm_rq *rqd)
        complete(waiting);
 }
 
-int nvm_submit_ppa(struct nvm_dev *dev, struct ppa_addr *ppa, int nr_ppas,
-                               int opcode, int flags, void *buf, int len)
+int __nvm_submit_ppa(struct nvm_dev *dev, struct nvm_rq *rqd, int opcode,
+                                               int flags, void *buf, int len)
 {
        DECLARE_COMPLETION_ONSTACK(wait);
-       struct nvm_rq rqd;
        struct bio *bio;
        int ret;
        unsigned long hang_check;
@@ -335,23 +349,21 @@ int nvm_submit_ppa(struct nvm_dev *dev, struct ppa_addr *ppa, int nr_ppas,
        if (IS_ERR_OR_NULL(bio))
                return -ENOMEM;
 
-       memset(&rqd, 0, sizeof(struct nvm_rq));
-       ret = nvm_set_rqd_ppalist(dev, &rqd, ppa, nr_ppas);
+       nvm_generic_to_addr_mode(dev, rqd);
+
+       rqd->dev = dev;
+       rqd->opcode = opcode;
+       rqd->flags = flags;
+       rqd->bio = bio;
+       rqd->wait = &wait;
+       rqd->end_io = nvm_end_io_sync;
+
+       ret = dev->ops->submit_io(dev, rqd);
        if (ret) {
                bio_put(bio);
                return ret;
        }
 
-       rqd.opcode = opcode;
-       rqd.bio = bio;
-       rqd.wait = &wait;
-       rqd.dev = dev;
-       rqd.end_io = nvm_end_io_sync;
-       rqd.flags = flags;
-       nvm_generic_to_addr_mode(dev, &rqd);
-
-       ret = dev->ops->submit_io(dev, &rqd);
-
        /* Prevent hang_check timer from firing at us during very long I/O */
        hang_check = sysctl_hung_task_timeout_secs;
        if (hang_check)
@@ -359,12 +371,113 @@ int nvm_submit_ppa(struct nvm_dev *dev, struct ppa_addr *ppa, int nr_ppas,
        else
                wait_for_completion_io(&wait);
 
+       return rqd->error;
+}
+
+/**
+ * nvm_submit_ppa_list - submit user-defined ppa list to device. The user must
+ *                      take to free ppa list if necessary.
+ * @dev:       device
+ * @ppa_list:  user created ppa_list
+ * @nr_ppas:   length of ppa_list
+ * @opcode:    device opcode
+ * @flags:     device flags
+ * @buf:       data buffer
+ * @len:       data buffer length
+ */
+int nvm_submit_ppa_list(struct nvm_dev *dev, struct ppa_addr *ppa_list,
+                       int nr_ppas, int opcode, int flags, void *buf, int len)
+{
+       struct nvm_rq rqd;
+
+       if (dev->ops->max_phys_sect < nr_ppas)
+               return -EINVAL;
+
+       memset(&rqd, 0, sizeof(struct nvm_rq));
+
+       rqd.nr_pages = nr_ppas;
+       if (nr_ppas > 1)
+               rqd.ppa_list = ppa_list;
+       else
+               rqd.ppa_addr = ppa_list[0];
+
+       return __nvm_submit_ppa(dev, &rqd, opcode, flags, buf, len);
+}
+EXPORT_SYMBOL(nvm_submit_ppa_list);
+
+/**
+ * nvm_submit_ppa - submit PPAs to device. PPAs will automatically be unfolded
+ *                 as single, dual, quad plane PPAs depending on device type.
+ * @dev:       device
+ * @ppa:       user created ppa_list
+ * @nr_ppas:   length of ppa_list
+ * @opcode:    device opcode
+ * @flags:     device flags
+ * @buf:       data buffer
+ * @len:       data buffer length
+ */
+int nvm_submit_ppa(struct nvm_dev *dev, struct ppa_addr *ppa, int nr_ppas,
+                               int opcode, int flags, void *buf, int len)
+{
+       struct nvm_rq rqd;
+       int ret;
+
+       memset(&rqd, 0, sizeof(struct nvm_rq));
+       ret = nvm_set_rqd_ppalist(dev, &rqd, ppa, nr_ppas, 1);
+       if (ret)
+               return ret;
+
+       ret = __nvm_submit_ppa(dev, &rqd, opcode, flags, buf, len);
+
        nvm_free_rqd_ppalist(dev, &rqd);
 
-       return rqd.error;
+       return ret;
 }
 EXPORT_SYMBOL(nvm_submit_ppa);
 
+/*
+ * folds a bad block list from its plane representation to its virtual
+ * block representation. The fold is done in place and reduced size is
+ * returned.
+ *
+ * If any of the planes status are bad or grown bad block, the virtual block
+ * is marked bad. If not bad, the first plane state acts as the block state.
+ */
+int nvm_bb_tbl_fold(struct nvm_dev *dev, u8 *blks, int nr_blks)
+{
+       int blk, offset, pl, blktype;
+
+       if (nr_blks != dev->blks_per_lun * dev->plane_mode)
+               return -EINVAL;
+
+       for (blk = 0; blk < dev->blks_per_lun; blk++) {
+               offset = blk * dev->plane_mode;
+               blktype = blks[offset];
+
+               /* Bad blocks on any planes take precedence over other types */
+               for (pl = 0; pl < dev->plane_mode; pl++) {
+                       if (blks[offset + pl] &
+                                       (NVM_BLK_T_BAD|NVM_BLK_T_GRWN_BAD)) {
+                               blktype = blks[offset + pl];
+                               break;
+                       }
+               }
+
+               blks[blk] = blktype;
+       }
+
+       return dev->blks_per_lun;
+}
+EXPORT_SYMBOL(nvm_bb_tbl_fold);
+
+int nvm_get_bb_tbl(struct nvm_dev *dev, struct ppa_addr ppa, u8 *blks)
+{
+       ppa = generic_to_dev_addr(dev, ppa);
+
+       return dev->ops->get_bb_tbl(dev, ppa, blks);
+}
+EXPORT_SYMBOL(nvm_get_bb_tbl);
+
 static int nvm_init_slc_tbl(struct nvm_dev *dev, struct nvm_id_group *grp)
 {
        int i;
@@ -414,6 +527,7 @@ static int nvm_core_init(struct nvm_dev *dev)
 {
        struct nvm_id *id = &dev->identity;
        struct nvm_id_group *grp = &id->groups[0];
+       int ret;
 
        /* device values */
        dev->nr_chnls = grp->num_ch;
@@ -421,6 +535,8 @@ static int nvm_core_init(struct nvm_dev *dev)
        dev->pgs_per_blk = grp->num_pg;
        dev->blks_per_lun = grp->num_blk;
        dev->nr_planes = grp->num_pln;
+       dev->fpg_size = grp->fpg_sz;
+       dev->pfpg_size = grp->fpg_sz * grp->num_pln;
        dev->sec_size = grp->csecs;
        dev->oob_size = grp->sos;
        dev->sec_per_pg = grp->fpg_sz / grp->csecs;
@@ -430,33 +546,16 @@ static int nvm_core_init(struct nvm_dev *dev)
        dev->plane_mode = NVM_PLANE_SINGLE;
        dev->max_rq_size = dev->ops->max_phys_sect * dev->sec_size;
 
-       if (grp->mtype != 0) {
-               pr_err("nvm: memory type not supported\n");
-               return -EINVAL;
-       }
-
-       switch (grp->fmtype) {
-       case NVM_ID_FMTYPE_SLC:
-               if (nvm_init_slc_tbl(dev, grp))
-                       return -ENOMEM;
-               break;
-       case NVM_ID_FMTYPE_MLC:
-               if (nvm_init_mlc_tbl(dev, grp))
-                       return -ENOMEM;
-               break;
-       default:
-               pr_err("nvm: flash type not supported\n");
-               return -EINVAL;
-       }
-
-       if (!dev->lps_per_blk)
-               pr_info("nvm: lower page programming table missing\n");
-
        if (grp->mpos & 0x020202)
                dev->plane_mode = NVM_PLANE_DOUBLE;
        if (grp->mpos & 0x040404)
                dev->plane_mode = NVM_PLANE_QUAD;
 
+       if (grp->mtype != 0) {
+               pr_err("nvm: memory type not supported\n");
+               return -EINVAL;
+       }
+
        /* calculated values */
        dev->sec_per_pl = dev->sec_per_pg * dev->nr_planes;
        dev->sec_per_blk = dev->sec_per_pl * dev->pgs_per_blk;
@@ -468,11 +567,33 @@ static int nvm_core_init(struct nvm_dev *dev)
                                        sizeof(unsigned long), GFP_KERNEL);
        if (!dev->lun_map)
                return -ENOMEM;
-       INIT_LIST_HEAD(&dev->online_targets);
+
+       switch (grp->fmtype) {
+       case NVM_ID_FMTYPE_SLC:
+               if (nvm_init_slc_tbl(dev, grp)) {
+                       ret = -ENOMEM;
+                       goto err_fmtype;
+               }
+               break;
+       case NVM_ID_FMTYPE_MLC:
+               if (nvm_init_mlc_tbl(dev, grp)) {
+                       ret = -ENOMEM;
+                       goto err_fmtype;
+               }
+               break;
+       default:
+               pr_err("nvm: flash type not supported\n");
+               ret = -EINVAL;
+               goto err_fmtype;
+       }
+
        mutex_init(&dev->mlock);
        spin_lock_init(&dev->lock);
 
        return 0;
+err_fmtype:
+       kfree(dev->lun_map);
+       return ret;
 }
 
 static void nvm_free(struct nvm_dev *dev)
@@ -484,6 +605,7 @@ static void nvm_free(struct nvm_dev *dev)
                dev->mt->unregister_mgr(dev);
 
        kfree(dev->lptbl);
+       kfree(dev->lun_map);
 }
 
 static int nvm_init(struct nvm_dev *dev)
@@ -530,8 +652,8 @@ err:
 
 static void nvm_exit(struct nvm_dev *dev)
 {
-       if (dev->ppalist_pool)
-               dev->ops->destroy_dma_pool(dev->ppalist_pool);
+       if (dev->dma_pool)
+               dev->ops->destroy_dma_pool(dev->dma_pool);
        nvm_free(dev);
 
        pr_info("nvm: successfully unloaded\n");
@@ -565,9 +687,9 @@ int nvm_register(struct request_queue *q, char *disk_name,
        }
 
        if (dev->ops->max_phys_sect > 1) {
-               dev->ppalist_pool = dev->ops->create_dma_pool(dev, "ppalist");
-               if (!dev->ppalist_pool) {
-                       pr_err("nvm: could not create ppa pool\n");
+               dev->dma_pool = dev->ops->create_dma_pool(dev, "ppalist");
+               if (!dev->dma_pool) {
+                       pr_err("nvm: could not create dma pool\n");
                        ret = -ENOMEM;
                        goto err_init;
                }
@@ -613,7 +735,6 @@ void nvm_unregister(char *disk_name)
        up_write(&nvm_lock);
 
        nvm_exit(dev);
-       kfree(dev->lun_map);
        kfree(dev);
 }
 EXPORT_SYMBOL(nvm_unregister);
@@ -645,12 +766,11 @@ static int nvm_create_target(struct nvm_dev *dev,
                return -EINVAL;
        }
 
-       list_for_each_entry(t, &dev->online_targets, list) {
-               if (!strcmp(create->tgtname, t->disk->disk_name)) {
-                       pr_err("nvm: target name already exists.\n");
-                       up_write(&nvm_lock);
-                       return -EINVAL;
-               }
+       t = nvm_find_target(create->tgtname);
+       if (t) {
+               pr_err("nvm: target name already exists.\n");
+               up_write(&nvm_lock);
+               return -EINVAL;
        }
        up_write(&nvm_lock);
 
@@ -690,7 +810,7 @@ static int nvm_create_target(struct nvm_dev *dev,
        t->disk = tdisk;
 
        down_write(&nvm_lock);
-       list_add_tail(&t->list, &dev->online_targets);
+       list_add_tail(&t->list, &nvm_targets);
        up_write(&nvm_lock);
 
        return 0;
@@ -753,26 +873,19 @@ static int __nvm_configure_create(struct nvm_ioctl_create *create)
 
 static int __nvm_configure_remove(struct nvm_ioctl_remove *remove)
 {
-       struct nvm_target *t = NULL;
-       struct nvm_dev *dev;
-       int ret = -1;
+       struct nvm_target *t;
 
        down_write(&nvm_lock);
-       list_for_each_entry(dev, &nvm_devices, devices)
-               list_for_each_entry(t, &dev->online_targets, list) {
-                       if (!strcmp(remove->tgtname, t->disk->disk_name)) {
-                               nvm_remove_target(t);
-                               ret = 0;
-                               break;
-                       }
-               }
-       up_write(&nvm_lock);
-
-       if (ret) {
+       t = nvm_find_target(remove->tgtname);
+       if (!t) {
                pr_err("nvm: target \"%s\" doesn't exist.\n", remove->tgtname);
+               up_write(&nvm_lock);
                return -EINVAL;
        }
 
+       nvm_remove_target(t);
+       up_write(&nvm_lock);
+
        return 0;
 }
 
@@ -921,7 +1034,7 @@ static long nvm_ioctl_info(struct file *file, void __user *arg)
        info->version[2] = NVM_VERSION_PATCH;
 
        down_write(&nvm_lock);
-       list_for_each_entry(tt, &nvm_targets, list) {
+       list_for_each_entry(tt, &nvm_tgt_types, list) {
                struct nvm_ioctl_info_tgt *tgt = &info->tgts[tgt_iter];
 
                tgt->version[0] = tt->version[0];
This page took 0.030563 seconds and 5 git commands to generate.