nvme: use a work item to submit async event requests
[deliverable/linux.git] / drivers / lightnvm / rrpc.c
1 /*
2 * Copyright (C) 2015 IT University of Copenhagen
3 * Initial release: Matias Bjorling <m@bjorling.me>
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version
7 * 2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * Implementation of a Round-robin page-based Hybrid FTL for Open-channel SSDs.
15 */
16
17 #include "rrpc.h"
18
19 static struct kmem_cache *rrpc_gcb_cache, *rrpc_rq_cache;
20 static DECLARE_RWSEM(rrpc_lock);
21
22 static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio,
23 struct nvm_rq *rqd, unsigned long flags);
24
25 #define rrpc_for_each_lun(rrpc, rlun, i) \
26 for ((i) = 0, rlun = &(rrpc)->luns[0]; \
27 (i) < (rrpc)->nr_luns; (i)++, rlun = &(rrpc)->luns[(i)])
28
29 static void rrpc_page_invalidate(struct rrpc *rrpc, struct rrpc_addr *a)
30 {
31 struct rrpc_block *rblk = a->rblk;
32 unsigned int pg_offset;
33
34 lockdep_assert_held(&rrpc->rev_lock);
35
36 if (a->addr == ADDR_EMPTY || !rblk)
37 return;
38
39 spin_lock(&rblk->lock);
40
41 div_u64_rem(a->addr, rrpc->dev->pgs_per_blk, &pg_offset);
42 WARN_ON(test_and_set_bit(pg_offset, rblk->invalid_pages));
43 rblk->nr_invalid_pages++;
44
45 spin_unlock(&rblk->lock);
46
47 rrpc->rev_trans_map[a->addr - rrpc->poffset].addr = ADDR_EMPTY;
48 }
49
50 static void rrpc_invalidate_range(struct rrpc *rrpc, sector_t slba,
51 unsigned len)
52 {
53 sector_t i;
54
55 spin_lock(&rrpc->rev_lock);
56 for (i = slba; i < slba + len; i++) {
57 struct rrpc_addr *gp = &rrpc->trans_map[i];
58
59 rrpc_page_invalidate(rrpc, gp);
60 gp->rblk = NULL;
61 }
62 spin_unlock(&rrpc->rev_lock);
63 }
64
65 static struct nvm_rq *rrpc_inflight_laddr_acquire(struct rrpc *rrpc,
66 sector_t laddr, unsigned int pages)
67 {
68 struct nvm_rq *rqd;
69 struct rrpc_inflight_rq *inf;
70
71 rqd = mempool_alloc(rrpc->rq_pool, GFP_ATOMIC);
72 if (!rqd)
73 return ERR_PTR(-ENOMEM);
74
75 inf = rrpc_get_inflight_rq(rqd);
76 if (rrpc_lock_laddr(rrpc, laddr, pages, inf)) {
77 mempool_free(rqd, rrpc->rq_pool);
78 return NULL;
79 }
80
81 return rqd;
82 }
83
84 static void rrpc_inflight_laddr_release(struct rrpc *rrpc, struct nvm_rq *rqd)
85 {
86 struct rrpc_inflight_rq *inf = rrpc_get_inflight_rq(rqd);
87
88 rrpc_unlock_laddr(rrpc, inf);
89
90 mempool_free(rqd, rrpc->rq_pool);
91 }
92
93 static void rrpc_discard(struct rrpc *rrpc, struct bio *bio)
94 {
95 sector_t slba = bio->bi_iter.bi_sector / NR_PHY_IN_LOG;
96 sector_t len = bio->bi_iter.bi_size / RRPC_EXPOSED_PAGE_SIZE;
97 struct nvm_rq *rqd;
98
99 do {
100 rqd = rrpc_inflight_laddr_acquire(rrpc, slba, len);
101 schedule();
102 } while (!rqd);
103
104 if (IS_ERR(rqd)) {
105 pr_err("rrpc: unable to acquire inflight IO\n");
106 bio_io_error(bio);
107 return;
108 }
109
110 rrpc_invalidate_range(rrpc, slba, len);
111 rrpc_inflight_laddr_release(rrpc, rqd);
112 }
113
114 static int block_is_full(struct rrpc *rrpc, struct rrpc_block *rblk)
115 {
116 return (rblk->next_page == rrpc->dev->pgs_per_blk);
117 }
118
119 static u64 block_to_addr(struct rrpc *rrpc, struct rrpc_block *rblk)
120 {
121 struct nvm_block *blk = rblk->parent;
122
123 return blk->id * rrpc->dev->pgs_per_blk;
124 }
125
126 static struct ppa_addr linear_to_generic_addr(struct nvm_dev *dev,
127 struct ppa_addr r)
128 {
129 struct ppa_addr l;
130 int secs, pgs, blks, luns;
131 sector_t ppa = r.ppa;
132
133 l.ppa = 0;
134
135 div_u64_rem(ppa, dev->sec_per_pg, &secs);
136 l.g.sec = secs;
137
138 sector_div(ppa, dev->sec_per_pg);
139 div_u64_rem(ppa, dev->sec_per_blk, &pgs);
140 l.g.pg = pgs;
141
142 sector_div(ppa, dev->pgs_per_blk);
143 div_u64_rem(ppa, dev->blks_per_lun, &blks);
144 l.g.blk = blks;
145
146 sector_div(ppa, dev->blks_per_lun);
147 div_u64_rem(ppa, dev->luns_per_chnl, &luns);
148 l.g.lun = luns;
149
150 sector_div(ppa, dev->luns_per_chnl);
151 l.g.ch = ppa;
152
153 return l;
154 }
155
156 static struct ppa_addr rrpc_ppa_to_gaddr(struct nvm_dev *dev, u64 addr)
157 {
158 struct ppa_addr paddr;
159
160 paddr.ppa = addr;
161 return linear_to_generic_addr(dev, paddr);
162 }
163
164 /* requires lun->lock taken */
165 static void rrpc_set_lun_cur(struct rrpc_lun *rlun, struct rrpc_block *rblk)
166 {
167 struct rrpc *rrpc = rlun->rrpc;
168
169 BUG_ON(!rblk);
170
171 if (rlun->cur) {
172 spin_lock(&rlun->cur->lock);
173 WARN_ON(!block_is_full(rrpc, rlun->cur));
174 spin_unlock(&rlun->cur->lock);
175 }
176 rlun->cur = rblk;
177 }
178
179 static struct rrpc_block *rrpc_get_blk(struct rrpc *rrpc, struct rrpc_lun *rlun,
180 unsigned long flags)
181 {
182 struct nvm_lun *lun = rlun->parent;
183 struct nvm_block *blk;
184 struct rrpc_block *rblk;
185
186 spin_lock(&lun->lock);
187 blk = nvm_get_blk_unlocked(rrpc->dev, rlun->parent, flags);
188 if (!blk) {
189 pr_err("nvm: rrpc: cannot get new block from media manager\n");
190 spin_unlock(&lun->lock);
191 return NULL;
192 }
193
194 rblk = &rlun->blocks[blk->id];
195 list_add_tail(&rblk->list, &rlun->open_list);
196 spin_unlock(&lun->lock);
197
198 blk->priv = rblk;
199 bitmap_zero(rblk->invalid_pages, rrpc->dev->pgs_per_blk);
200 rblk->next_page = 0;
201 rblk->nr_invalid_pages = 0;
202 atomic_set(&rblk->data_cmnt_size, 0);
203
204 return rblk;
205 }
206
207 static void rrpc_put_blk(struct rrpc *rrpc, struct rrpc_block *rblk)
208 {
209 struct rrpc_lun *rlun = rblk->rlun;
210 struct nvm_lun *lun = rlun->parent;
211
212 spin_lock(&lun->lock);
213 nvm_put_blk_unlocked(rrpc->dev, rblk->parent);
214 list_del(&rblk->list);
215 spin_unlock(&lun->lock);
216 }
217
218 static void rrpc_put_blks(struct rrpc *rrpc)
219 {
220 struct rrpc_lun *rlun;
221 int i;
222
223 for (i = 0; i < rrpc->nr_luns; i++) {
224 rlun = &rrpc->luns[i];
225 if (rlun->cur)
226 rrpc_put_blk(rrpc, rlun->cur);
227 if (rlun->gc_cur)
228 rrpc_put_blk(rrpc, rlun->gc_cur);
229 }
230 }
231
232 static struct rrpc_lun *get_next_lun(struct rrpc *rrpc)
233 {
234 int next = atomic_inc_return(&rrpc->next_lun);
235
236 return &rrpc->luns[next % rrpc->nr_luns];
237 }
238
239 static void rrpc_gc_kick(struct rrpc *rrpc)
240 {
241 struct rrpc_lun *rlun;
242 unsigned int i;
243
244 for (i = 0; i < rrpc->nr_luns; i++) {
245 rlun = &rrpc->luns[i];
246 queue_work(rrpc->krqd_wq, &rlun->ws_gc);
247 }
248 }
249
250 /*
251 * timed GC every interval.
252 */
253 static void rrpc_gc_timer(unsigned long data)
254 {
255 struct rrpc *rrpc = (struct rrpc *)data;
256
257 rrpc_gc_kick(rrpc);
258 mod_timer(&rrpc->gc_timer, jiffies + msecs_to_jiffies(10));
259 }
260
261 static void rrpc_end_sync_bio(struct bio *bio)
262 {
263 struct completion *waiting = bio->bi_private;
264
265 if (bio->bi_error)
266 pr_err("nvm: gc request failed (%u).\n", bio->bi_error);
267
268 complete(waiting);
269 }
270
271 /*
272 * rrpc_move_valid_pages -- migrate live data off the block
273 * @rrpc: the 'rrpc' structure
274 * @block: the block from which to migrate live pages
275 *
276 * Description:
277 * GC algorithms may call this function to migrate remaining live
278 * pages off the block prior to erasing it. This function blocks
279 * further execution until the operation is complete.
280 */
281 static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block *rblk)
282 {
283 struct request_queue *q = rrpc->dev->q;
284 struct rrpc_rev_addr *rev;
285 struct nvm_rq *rqd;
286 struct bio *bio;
287 struct page *page;
288 int slot;
289 int nr_pgs_per_blk = rrpc->dev->pgs_per_blk;
290 u64 phys_addr;
291 DECLARE_COMPLETION_ONSTACK(wait);
292
293 if (bitmap_full(rblk->invalid_pages, nr_pgs_per_blk))
294 return 0;
295
296 bio = bio_alloc(GFP_NOIO, 1);
297 if (!bio) {
298 pr_err("nvm: could not alloc bio to gc\n");
299 return -ENOMEM;
300 }
301
302 page = mempool_alloc(rrpc->page_pool, GFP_NOIO);
303 if (!page)
304 return -ENOMEM;
305
306 while ((slot = find_first_zero_bit(rblk->invalid_pages,
307 nr_pgs_per_blk)) < nr_pgs_per_blk) {
308
309 /* Lock laddr */
310 phys_addr = (rblk->parent->id * nr_pgs_per_blk) + slot;
311
312 try:
313 spin_lock(&rrpc->rev_lock);
314 /* Get logical address from physical to logical table */
315 rev = &rrpc->rev_trans_map[phys_addr - rrpc->poffset];
316 /* already updated by previous regular write */
317 if (rev->addr == ADDR_EMPTY) {
318 spin_unlock(&rrpc->rev_lock);
319 continue;
320 }
321
322 rqd = rrpc_inflight_laddr_acquire(rrpc, rev->addr, 1);
323 if (IS_ERR_OR_NULL(rqd)) {
324 spin_unlock(&rrpc->rev_lock);
325 schedule();
326 goto try;
327 }
328
329 spin_unlock(&rrpc->rev_lock);
330
331 /* Perform read to do GC */
332 bio->bi_iter.bi_sector = rrpc_get_sector(rev->addr);
333 bio->bi_rw = READ;
334 bio->bi_private = &wait;
335 bio->bi_end_io = rrpc_end_sync_bio;
336
337 /* TODO: may fail when EXP_PG_SIZE > PAGE_SIZE */
338 bio_add_pc_page(q, bio, page, RRPC_EXPOSED_PAGE_SIZE, 0);
339
340 if (rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_GC)) {
341 pr_err("rrpc: gc read failed.\n");
342 rrpc_inflight_laddr_release(rrpc, rqd);
343 goto finished;
344 }
345 wait_for_completion_io(&wait);
346 if (bio->bi_error) {
347 rrpc_inflight_laddr_release(rrpc, rqd);
348 goto finished;
349 }
350
351 bio_reset(bio);
352 reinit_completion(&wait);
353
354 bio->bi_iter.bi_sector = rrpc_get_sector(rev->addr);
355 bio->bi_rw = WRITE;
356 bio->bi_private = &wait;
357 bio->bi_end_io = rrpc_end_sync_bio;
358
359 bio_add_pc_page(q, bio, page, RRPC_EXPOSED_PAGE_SIZE, 0);
360
361 /* turn the command around and write the data back to a new
362 * address
363 */
364 if (rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_GC)) {
365 pr_err("rrpc: gc write failed.\n");
366 rrpc_inflight_laddr_release(rrpc, rqd);
367 goto finished;
368 }
369 wait_for_completion_io(&wait);
370
371 rrpc_inflight_laddr_release(rrpc, rqd);
372 if (bio->bi_error)
373 goto finished;
374
375 bio_reset(bio);
376 }
377
378 finished:
379 mempool_free(page, rrpc->page_pool);
380 bio_put(bio);
381
382 if (!bitmap_full(rblk->invalid_pages, nr_pgs_per_blk)) {
383 pr_err("nvm: failed to garbage collect block\n");
384 return -EIO;
385 }
386
387 return 0;
388 }
389
390 static void rrpc_block_gc(struct work_struct *work)
391 {
392 struct rrpc_block_gc *gcb = container_of(work, struct rrpc_block_gc,
393 ws_gc);
394 struct rrpc *rrpc = gcb->rrpc;
395 struct rrpc_block *rblk = gcb->rblk;
396 struct nvm_dev *dev = rrpc->dev;
397 struct nvm_lun *lun = rblk->parent->lun;
398 struct rrpc_lun *rlun = &rrpc->luns[lun->id - rrpc->lun_offset];
399
400 mempool_free(gcb, rrpc->gcb_pool);
401 pr_debug("nvm: block '%lu' being reclaimed\n", rblk->parent->id);
402
403 if (rrpc_move_valid_pages(rrpc, rblk))
404 goto put_back;
405
406 if (nvm_erase_blk(dev, rblk->parent))
407 goto put_back;
408
409 rrpc_put_blk(rrpc, rblk);
410
411 return;
412
413 put_back:
414 spin_lock(&rlun->lock);
415 list_add_tail(&rblk->prio, &rlun->prio_list);
416 spin_unlock(&rlun->lock);
417 }
418
419 /* the block with highest number of invalid pages, will be in the beginning
420 * of the list
421 */
422 static struct rrpc_block *rblock_max_invalid(struct rrpc_block *ra,
423 struct rrpc_block *rb)
424 {
425 if (ra->nr_invalid_pages == rb->nr_invalid_pages)
426 return ra;
427
428 return (ra->nr_invalid_pages < rb->nr_invalid_pages) ? rb : ra;
429 }
430
431 /* linearly find the block with highest number of invalid pages
432 * requires lun->lock
433 */
434 static struct rrpc_block *block_prio_find_max(struct rrpc_lun *rlun)
435 {
436 struct list_head *prio_list = &rlun->prio_list;
437 struct rrpc_block *rblock, *max;
438
439 BUG_ON(list_empty(prio_list));
440
441 max = list_first_entry(prio_list, struct rrpc_block, prio);
442 list_for_each_entry(rblock, prio_list, prio)
443 max = rblock_max_invalid(max, rblock);
444
445 return max;
446 }
447
448 static void rrpc_lun_gc(struct work_struct *work)
449 {
450 struct rrpc_lun *rlun = container_of(work, struct rrpc_lun, ws_gc);
451 struct rrpc *rrpc = rlun->rrpc;
452 struct nvm_lun *lun = rlun->parent;
453 struct rrpc_block_gc *gcb;
454 unsigned int nr_blocks_need;
455
456 nr_blocks_need = rrpc->dev->blks_per_lun / GC_LIMIT_INVERSE;
457
458 if (nr_blocks_need < rrpc->nr_luns)
459 nr_blocks_need = rrpc->nr_luns;
460
461 spin_lock(&rlun->lock);
462 while (nr_blocks_need > lun->nr_free_blocks &&
463 !list_empty(&rlun->prio_list)) {
464 struct rrpc_block *rblock = block_prio_find_max(rlun);
465 struct nvm_block *block = rblock->parent;
466
467 if (!rblock->nr_invalid_pages)
468 break;
469
470 gcb = mempool_alloc(rrpc->gcb_pool, GFP_ATOMIC);
471 if (!gcb)
472 break;
473
474 list_del_init(&rblock->prio);
475
476 BUG_ON(!block_is_full(rrpc, rblock));
477
478 pr_debug("rrpc: selected block '%lu' for GC\n", block->id);
479
480 gcb->rrpc = rrpc;
481 gcb->rblk = rblock;
482 INIT_WORK(&gcb->ws_gc, rrpc_block_gc);
483
484 queue_work(rrpc->kgc_wq, &gcb->ws_gc);
485
486 nr_blocks_need--;
487 }
488 spin_unlock(&rlun->lock);
489
490 /* TODO: Hint that request queue can be started again */
491 }
492
493 static void rrpc_gc_queue(struct work_struct *work)
494 {
495 struct rrpc_block_gc *gcb = container_of(work, struct rrpc_block_gc,
496 ws_gc);
497 struct rrpc *rrpc = gcb->rrpc;
498 struct rrpc_block *rblk = gcb->rblk;
499 struct nvm_lun *lun = rblk->parent->lun;
500 struct rrpc_lun *rlun = &rrpc->luns[lun->id - rrpc->lun_offset];
501
502 spin_lock(&rlun->lock);
503 list_add_tail(&rblk->prio, &rlun->prio_list);
504 spin_unlock(&rlun->lock);
505
506 mempool_free(gcb, rrpc->gcb_pool);
507 pr_debug("nvm: block '%lu' is full, allow GC (sched)\n",
508 rblk->parent->id);
509 }
510
511 static const struct block_device_operations rrpc_fops = {
512 .owner = THIS_MODULE,
513 };
514
515 static struct rrpc_lun *rrpc_get_lun_rr(struct rrpc *rrpc, int is_gc)
516 {
517 unsigned int i;
518 struct rrpc_lun *rlun, *max_free;
519
520 if (!is_gc)
521 return get_next_lun(rrpc);
522
523 /* during GC, we don't care about RR, instead we want to make
524 * sure that we maintain evenness between the block luns.
525 */
526 max_free = &rrpc->luns[0];
527 /* prevent GC-ing lun from devouring pages of a lun with
528 * little free blocks. We don't take the lock as we only need an
529 * estimate.
530 */
531 rrpc_for_each_lun(rrpc, rlun, i) {
532 if (rlun->parent->nr_free_blocks >
533 max_free->parent->nr_free_blocks)
534 max_free = rlun;
535 }
536
537 return max_free;
538 }
539
540 static struct rrpc_addr *rrpc_update_map(struct rrpc *rrpc, sector_t laddr,
541 struct rrpc_block *rblk, u64 paddr)
542 {
543 struct rrpc_addr *gp;
544 struct rrpc_rev_addr *rev;
545
546 BUG_ON(laddr >= rrpc->nr_pages);
547
548 gp = &rrpc->trans_map[laddr];
549 spin_lock(&rrpc->rev_lock);
550 if (gp->rblk)
551 rrpc_page_invalidate(rrpc, gp);
552
553 gp->addr = paddr;
554 gp->rblk = rblk;
555
556 rev = &rrpc->rev_trans_map[gp->addr - rrpc->poffset];
557 rev->addr = laddr;
558 spin_unlock(&rrpc->rev_lock);
559
560 return gp;
561 }
562
563 static u64 rrpc_alloc_addr(struct rrpc *rrpc, struct rrpc_block *rblk)
564 {
565 u64 addr = ADDR_EMPTY;
566
567 spin_lock(&rblk->lock);
568 if (block_is_full(rrpc, rblk))
569 goto out;
570
571 addr = block_to_addr(rrpc, rblk) + rblk->next_page;
572
573 rblk->next_page++;
574 out:
575 spin_unlock(&rblk->lock);
576 return addr;
577 }
578
579 /* Simple round-robin Logical to physical address translation.
580 *
581 * Retrieve the mapping using the active append point. Then update the ap for
582 * the next write to the disk.
583 *
584 * Returns rrpc_addr with the physical address and block. Remember to return to
585 * rrpc->addr_cache when request is finished.
586 */
587 static struct rrpc_addr *rrpc_map_page(struct rrpc *rrpc, sector_t laddr,
588 int is_gc)
589 {
590 struct rrpc_lun *rlun;
591 struct rrpc_block *rblk;
592 struct nvm_lun *lun;
593 u64 paddr;
594
595 rlun = rrpc_get_lun_rr(rrpc, is_gc);
596 lun = rlun->parent;
597
598 if (!is_gc && lun->nr_free_blocks < rrpc->nr_luns * 4)
599 return NULL;
600
601 spin_lock(&rlun->lock);
602
603 rblk = rlun->cur;
604 retry:
605 paddr = rrpc_alloc_addr(rrpc, rblk);
606
607 if (paddr == ADDR_EMPTY) {
608 rblk = rrpc_get_blk(rrpc, rlun, 0);
609 if (rblk) {
610 rrpc_set_lun_cur(rlun, rblk);
611 goto retry;
612 }
613
614 if (is_gc) {
615 /* retry from emergency gc block */
616 paddr = rrpc_alloc_addr(rrpc, rlun->gc_cur);
617 if (paddr == ADDR_EMPTY) {
618 rblk = rrpc_get_blk(rrpc, rlun, 1);
619 if (!rblk) {
620 pr_err("rrpc: no more blocks");
621 goto err;
622 }
623
624 rlun->gc_cur = rblk;
625 paddr = rrpc_alloc_addr(rrpc, rlun->gc_cur);
626 }
627 rblk = rlun->gc_cur;
628 }
629 }
630
631 spin_unlock(&rlun->lock);
632 return rrpc_update_map(rrpc, laddr, rblk, paddr);
633 err:
634 spin_unlock(&rlun->lock);
635 return NULL;
636 }
637
638 static void rrpc_run_gc(struct rrpc *rrpc, struct rrpc_block *rblk)
639 {
640 struct rrpc_block_gc *gcb;
641
642 gcb = mempool_alloc(rrpc->gcb_pool, GFP_ATOMIC);
643 if (!gcb) {
644 pr_err("rrpc: unable to queue block for gc.");
645 return;
646 }
647
648 gcb->rrpc = rrpc;
649 gcb->rblk = rblk;
650
651 INIT_WORK(&gcb->ws_gc, rrpc_gc_queue);
652 queue_work(rrpc->kgc_wq, &gcb->ws_gc);
653 }
654
655 static void rrpc_end_io_write(struct rrpc *rrpc, struct rrpc_rq *rrqd,
656 sector_t laddr, uint8_t npages)
657 {
658 struct rrpc_addr *p;
659 struct rrpc_block *rblk;
660 struct nvm_lun *lun;
661 int cmnt_size, i;
662
663 for (i = 0; i < npages; i++) {
664 p = &rrpc->trans_map[laddr + i];
665 rblk = p->rblk;
666 lun = rblk->parent->lun;
667
668 cmnt_size = atomic_inc_return(&rblk->data_cmnt_size);
669 if (unlikely(cmnt_size == rrpc->dev->pgs_per_blk)) {
670 struct nvm_block *blk = rblk->parent;
671 struct rrpc_lun *rlun = rblk->rlun;
672
673 spin_lock(&lun->lock);
674 lun->nr_open_blocks--;
675 lun->nr_closed_blocks++;
676 blk->state &= ~NVM_BLK_ST_OPEN;
677 blk->state |= NVM_BLK_ST_CLOSED;
678 list_move_tail(&rblk->list, &rlun->closed_list);
679 spin_unlock(&lun->lock);
680
681 rrpc_run_gc(rrpc, rblk);
682 }
683 }
684 }
685
686 static void rrpc_end_io(struct nvm_rq *rqd)
687 {
688 struct rrpc *rrpc = container_of(rqd->ins, struct rrpc, instance);
689 struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
690 uint8_t npages = rqd->nr_pages;
691 sector_t laddr = rrpc_get_laddr(rqd->bio) - npages;
692
693 if (bio_data_dir(rqd->bio) == WRITE)
694 rrpc_end_io_write(rrpc, rrqd, laddr, npages);
695
696 bio_put(rqd->bio);
697
698 if (rrqd->flags & NVM_IOTYPE_GC)
699 return;
700
701 rrpc_unlock_rq(rrpc, rqd);
702
703 if (npages > 1)
704 nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, rqd->dma_ppa_list);
705 if (rqd->metadata)
706 nvm_dev_dma_free(rrpc->dev, rqd->metadata, rqd->dma_metadata);
707
708 mempool_free(rqd, rrpc->rq_pool);
709 }
710
711 static int rrpc_read_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
712 struct nvm_rq *rqd, unsigned long flags, int npages)
713 {
714 struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd);
715 struct rrpc_addr *gp;
716 sector_t laddr = rrpc_get_laddr(bio);
717 int is_gc = flags & NVM_IOTYPE_GC;
718 int i;
719
720 if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd)) {
721 nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, rqd->dma_ppa_list);
722 return NVM_IO_REQUEUE;
723 }
724
725 for (i = 0; i < npages; i++) {
726 /* We assume that mapping occurs at 4KB granularity */
727 BUG_ON(!(laddr + i >= 0 && laddr + i < rrpc->nr_pages));
728 gp = &rrpc->trans_map[laddr + i];
729
730 if (gp->rblk) {
731 rqd->ppa_list[i] = rrpc_ppa_to_gaddr(rrpc->dev,
732 gp->addr);
733 } else {
734 BUG_ON(is_gc);
735 rrpc_unlock_laddr(rrpc, r);
736 nvm_dev_dma_free(rrpc->dev, rqd->ppa_list,
737 rqd->dma_ppa_list);
738 return NVM_IO_DONE;
739 }
740 }
741
742 rqd->opcode = NVM_OP_HBREAD;
743
744 return NVM_IO_OK;
745 }
746
747 static int rrpc_read_rq(struct rrpc *rrpc, struct bio *bio, struct nvm_rq *rqd,
748 unsigned long flags)
749 {
750 struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
751 int is_gc = flags & NVM_IOTYPE_GC;
752 sector_t laddr = rrpc_get_laddr(bio);
753 struct rrpc_addr *gp;
754
755 if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd))
756 return NVM_IO_REQUEUE;
757
758 BUG_ON(!(laddr >= 0 && laddr < rrpc->nr_pages));
759 gp = &rrpc->trans_map[laddr];
760
761 if (gp->rblk) {
762 rqd->ppa_addr = rrpc_ppa_to_gaddr(rrpc->dev, gp->addr);
763 } else {
764 BUG_ON(is_gc);
765 rrpc_unlock_rq(rrpc, rqd);
766 return NVM_IO_DONE;
767 }
768
769 rqd->opcode = NVM_OP_HBREAD;
770 rrqd->addr = gp;
771
772 return NVM_IO_OK;
773 }
774
775 static int rrpc_write_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
776 struct nvm_rq *rqd, unsigned long flags, int npages)
777 {
778 struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd);
779 struct rrpc_addr *p;
780 sector_t laddr = rrpc_get_laddr(bio);
781 int is_gc = flags & NVM_IOTYPE_GC;
782 int i;
783
784 if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd)) {
785 nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, rqd->dma_ppa_list);
786 return NVM_IO_REQUEUE;
787 }
788
789 for (i = 0; i < npages; i++) {
790 /* We assume that mapping occurs at 4KB granularity */
791 p = rrpc_map_page(rrpc, laddr + i, is_gc);
792 if (!p) {
793 BUG_ON(is_gc);
794 rrpc_unlock_laddr(rrpc, r);
795 nvm_dev_dma_free(rrpc->dev, rqd->ppa_list,
796 rqd->dma_ppa_list);
797 rrpc_gc_kick(rrpc);
798 return NVM_IO_REQUEUE;
799 }
800
801 rqd->ppa_list[i] = rrpc_ppa_to_gaddr(rrpc->dev,
802 p->addr);
803 }
804
805 rqd->opcode = NVM_OP_HBWRITE;
806
807 return NVM_IO_OK;
808 }
809
810 static int rrpc_write_rq(struct rrpc *rrpc, struct bio *bio,
811 struct nvm_rq *rqd, unsigned long flags)
812 {
813 struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
814 struct rrpc_addr *p;
815 int is_gc = flags & NVM_IOTYPE_GC;
816 sector_t laddr = rrpc_get_laddr(bio);
817
818 if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd))
819 return NVM_IO_REQUEUE;
820
821 p = rrpc_map_page(rrpc, laddr, is_gc);
822 if (!p) {
823 BUG_ON(is_gc);
824 rrpc_unlock_rq(rrpc, rqd);
825 rrpc_gc_kick(rrpc);
826 return NVM_IO_REQUEUE;
827 }
828
829 rqd->ppa_addr = rrpc_ppa_to_gaddr(rrpc->dev, p->addr);
830 rqd->opcode = NVM_OP_HBWRITE;
831 rrqd->addr = p;
832
833 return NVM_IO_OK;
834 }
835
836 static int rrpc_setup_rq(struct rrpc *rrpc, struct bio *bio,
837 struct nvm_rq *rqd, unsigned long flags, uint8_t npages)
838 {
839 if (npages > 1) {
840 rqd->ppa_list = nvm_dev_dma_alloc(rrpc->dev, GFP_KERNEL,
841 &rqd->dma_ppa_list);
842 if (!rqd->ppa_list) {
843 pr_err("rrpc: not able to allocate ppa list\n");
844 return NVM_IO_ERR;
845 }
846
847 if (bio_rw(bio) == WRITE)
848 return rrpc_write_ppalist_rq(rrpc, bio, rqd, flags,
849 npages);
850
851 return rrpc_read_ppalist_rq(rrpc, bio, rqd, flags, npages);
852 }
853
854 if (bio_rw(bio) == WRITE)
855 return rrpc_write_rq(rrpc, bio, rqd, flags);
856
857 return rrpc_read_rq(rrpc, bio, rqd, flags);
858 }
859
860 static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio,
861 struct nvm_rq *rqd, unsigned long flags)
862 {
863 int err;
864 struct rrpc_rq *rrq = nvm_rq_to_pdu(rqd);
865 uint8_t nr_pages = rrpc_get_pages(bio);
866 int bio_size = bio_sectors(bio) << 9;
867
868 if (bio_size < rrpc->dev->sec_size)
869 return NVM_IO_ERR;
870 else if (bio_size > rrpc->dev->max_rq_size)
871 return NVM_IO_ERR;
872
873 err = rrpc_setup_rq(rrpc, bio, rqd, flags, nr_pages);
874 if (err)
875 return err;
876
877 bio_get(bio);
878 rqd->bio = bio;
879 rqd->ins = &rrpc->instance;
880 rqd->nr_pages = nr_pages;
881 rrq->flags = flags;
882
883 err = nvm_submit_io(rrpc->dev, rqd);
884 if (err) {
885 pr_err("rrpc: I/O submission failed: %d\n", err);
886 bio_put(bio);
887 if (!(flags & NVM_IOTYPE_GC)) {
888 rrpc_unlock_rq(rrpc, rqd);
889 if (rqd->nr_pages > 1)
890 nvm_dev_dma_free(rrpc->dev,
891 rqd->ppa_list, rqd->dma_ppa_list);
892 }
893 return NVM_IO_ERR;
894 }
895
896 return NVM_IO_OK;
897 }
898
899 static blk_qc_t rrpc_make_rq(struct request_queue *q, struct bio *bio)
900 {
901 struct rrpc *rrpc = q->queuedata;
902 struct nvm_rq *rqd;
903 int err;
904
905 if (bio->bi_rw & REQ_DISCARD) {
906 rrpc_discard(rrpc, bio);
907 return BLK_QC_T_NONE;
908 }
909
910 rqd = mempool_alloc(rrpc->rq_pool, GFP_KERNEL);
911 if (!rqd) {
912 pr_err_ratelimited("rrpc: not able to queue bio.");
913 bio_io_error(bio);
914 return BLK_QC_T_NONE;
915 }
916 memset(rqd, 0, sizeof(struct nvm_rq));
917
918 err = rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_NONE);
919 switch (err) {
920 case NVM_IO_OK:
921 return BLK_QC_T_NONE;
922 case NVM_IO_ERR:
923 bio_io_error(bio);
924 break;
925 case NVM_IO_DONE:
926 bio_endio(bio);
927 break;
928 case NVM_IO_REQUEUE:
929 spin_lock(&rrpc->bio_lock);
930 bio_list_add(&rrpc->requeue_bios, bio);
931 spin_unlock(&rrpc->bio_lock);
932 queue_work(rrpc->kgc_wq, &rrpc->ws_requeue);
933 break;
934 }
935
936 mempool_free(rqd, rrpc->rq_pool);
937 return BLK_QC_T_NONE;
938 }
939
940 static void rrpc_requeue(struct work_struct *work)
941 {
942 struct rrpc *rrpc = container_of(work, struct rrpc, ws_requeue);
943 struct bio_list bios;
944 struct bio *bio;
945
946 bio_list_init(&bios);
947
948 spin_lock(&rrpc->bio_lock);
949 bio_list_merge(&bios, &rrpc->requeue_bios);
950 bio_list_init(&rrpc->requeue_bios);
951 spin_unlock(&rrpc->bio_lock);
952
953 while ((bio = bio_list_pop(&bios)))
954 rrpc_make_rq(rrpc->disk->queue, bio);
955 }
956
957 static void rrpc_gc_free(struct rrpc *rrpc)
958 {
959 struct rrpc_lun *rlun;
960 int i;
961
962 if (rrpc->krqd_wq)
963 destroy_workqueue(rrpc->krqd_wq);
964
965 if (rrpc->kgc_wq)
966 destroy_workqueue(rrpc->kgc_wq);
967
968 if (!rrpc->luns)
969 return;
970
971 for (i = 0; i < rrpc->nr_luns; i++) {
972 rlun = &rrpc->luns[i];
973
974 if (!rlun->blocks)
975 break;
976 vfree(rlun->blocks);
977 }
978 }
979
980 static int rrpc_gc_init(struct rrpc *rrpc)
981 {
982 rrpc->krqd_wq = alloc_workqueue("rrpc-lun", WQ_MEM_RECLAIM|WQ_UNBOUND,
983 rrpc->nr_luns);
984 if (!rrpc->krqd_wq)
985 return -ENOMEM;
986
987 rrpc->kgc_wq = alloc_workqueue("rrpc-bg", WQ_MEM_RECLAIM, 1);
988 if (!rrpc->kgc_wq)
989 return -ENOMEM;
990
991 setup_timer(&rrpc->gc_timer, rrpc_gc_timer, (unsigned long)rrpc);
992
993 return 0;
994 }
995
996 static void rrpc_map_free(struct rrpc *rrpc)
997 {
998 vfree(rrpc->rev_trans_map);
999 vfree(rrpc->trans_map);
1000 }
1001
1002 static int rrpc_l2p_update(u64 slba, u32 nlb, __le64 *entries, void *private)
1003 {
1004 struct rrpc *rrpc = (struct rrpc *)private;
1005 struct nvm_dev *dev = rrpc->dev;
1006 struct rrpc_addr *addr = rrpc->trans_map + slba;
1007 struct rrpc_rev_addr *raddr = rrpc->rev_trans_map;
1008 sector_t max_pages = dev->total_pages * (dev->sec_size >> 9);
1009 u64 elba = slba + nlb;
1010 u64 i;
1011
1012 if (unlikely(elba > dev->total_pages)) {
1013 pr_err("nvm: L2P data from device is out of bounds!\n");
1014 return -EINVAL;
1015 }
1016
1017 for (i = 0; i < nlb; i++) {
1018 u64 pba = le64_to_cpu(entries[i]);
1019 /* LNVM treats address-spaces as silos, LBA and PBA are
1020 * equally large and zero-indexed.
1021 */
1022 if (unlikely(pba >= max_pages && pba != U64_MAX)) {
1023 pr_err("nvm: L2P data entry is out of bounds!\n");
1024 return -EINVAL;
1025 }
1026
1027 /* Address zero is a special one. The first page on a disk is
1028 * protected. As it often holds internal device boot
1029 * information.
1030 */
1031 if (!pba)
1032 continue;
1033
1034 addr[i].addr = pba;
1035 raddr[pba].addr = slba + i;
1036 }
1037
1038 return 0;
1039 }
1040
1041 static int rrpc_map_init(struct rrpc *rrpc)
1042 {
1043 struct nvm_dev *dev = rrpc->dev;
1044 sector_t i;
1045 int ret;
1046
1047 rrpc->trans_map = vzalloc(sizeof(struct rrpc_addr) * rrpc->nr_pages);
1048 if (!rrpc->trans_map)
1049 return -ENOMEM;
1050
1051 rrpc->rev_trans_map = vmalloc(sizeof(struct rrpc_rev_addr)
1052 * rrpc->nr_pages);
1053 if (!rrpc->rev_trans_map)
1054 return -ENOMEM;
1055
1056 for (i = 0; i < rrpc->nr_pages; i++) {
1057 struct rrpc_addr *p = &rrpc->trans_map[i];
1058 struct rrpc_rev_addr *r = &rrpc->rev_trans_map[i];
1059
1060 p->addr = ADDR_EMPTY;
1061 r->addr = ADDR_EMPTY;
1062 }
1063
1064 if (!dev->ops->get_l2p_tbl)
1065 return 0;
1066
1067 /* Bring up the mapping table from device */
1068 ret = dev->ops->get_l2p_tbl(dev, 0, dev->total_pages,
1069 rrpc_l2p_update, rrpc);
1070 if (ret) {
1071 pr_err("nvm: rrpc: could not read L2P table.\n");
1072 return -EINVAL;
1073 }
1074
1075 return 0;
1076 }
1077
1078
1079 /* Minimum pages needed within a lun */
1080 #define PAGE_POOL_SIZE 16
1081 #define ADDR_POOL_SIZE 64
1082
1083 static int rrpc_core_init(struct rrpc *rrpc)
1084 {
1085 down_write(&rrpc_lock);
1086 if (!rrpc_gcb_cache) {
1087 rrpc_gcb_cache = kmem_cache_create("rrpc_gcb",
1088 sizeof(struct rrpc_block_gc), 0, 0, NULL);
1089 if (!rrpc_gcb_cache) {
1090 up_write(&rrpc_lock);
1091 return -ENOMEM;
1092 }
1093
1094 rrpc_rq_cache = kmem_cache_create("rrpc_rq",
1095 sizeof(struct nvm_rq) + sizeof(struct rrpc_rq),
1096 0, 0, NULL);
1097 if (!rrpc_rq_cache) {
1098 kmem_cache_destroy(rrpc_gcb_cache);
1099 up_write(&rrpc_lock);
1100 return -ENOMEM;
1101 }
1102 }
1103 up_write(&rrpc_lock);
1104
1105 rrpc->page_pool = mempool_create_page_pool(PAGE_POOL_SIZE, 0);
1106 if (!rrpc->page_pool)
1107 return -ENOMEM;
1108
1109 rrpc->gcb_pool = mempool_create_slab_pool(rrpc->dev->nr_luns,
1110 rrpc_gcb_cache);
1111 if (!rrpc->gcb_pool)
1112 return -ENOMEM;
1113
1114 rrpc->rq_pool = mempool_create_slab_pool(64, rrpc_rq_cache);
1115 if (!rrpc->rq_pool)
1116 return -ENOMEM;
1117
1118 spin_lock_init(&rrpc->inflights.lock);
1119 INIT_LIST_HEAD(&rrpc->inflights.reqs);
1120
1121 return 0;
1122 }
1123
1124 static void rrpc_core_free(struct rrpc *rrpc)
1125 {
1126 mempool_destroy(rrpc->page_pool);
1127 mempool_destroy(rrpc->gcb_pool);
1128 mempool_destroy(rrpc->rq_pool);
1129 }
1130
1131 static void rrpc_luns_free(struct rrpc *rrpc)
1132 {
1133 kfree(rrpc->luns);
1134 }
1135
1136 static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
1137 {
1138 struct nvm_dev *dev = rrpc->dev;
1139 struct rrpc_lun *rlun;
1140 int i, j;
1141
1142 if (dev->pgs_per_blk > MAX_INVALID_PAGES_STORAGE * BITS_PER_LONG) {
1143 pr_err("rrpc: number of pages per block too high.");
1144 return -EINVAL;
1145 }
1146
1147 spin_lock_init(&rrpc->rev_lock);
1148
1149 rrpc->luns = kcalloc(rrpc->nr_luns, sizeof(struct rrpc_lun),
1150 GFP_KERNEL);
1151 if (!rrpc->luns)
1152 return -ENOMEM;
1153
1154 /* 1:1 mapping */
1155 for (i = 0; i < rrpc->nr_luns; i++) {
1156 struct nvm_lun *lun = dev->mt->get_lun(dev, lun_begin + i);
1157
1158 rlun = &rrpc->luns[i];
1159 rlun->rrpc = rrpc;
1160 rlun->parent = lun;
1161 INIT_LIST_HEAD(&rlun->prio_list);
1162 INIT_LIST_HEAD(&rlun->open_list);
1163 INIT_LIST_HEAD(&rlun->closed_list);
1164
1165 INIT_WORK(&rlun->ws_gc, rrpc_lun_gc);
1166 spin_lock_init(&rlun->lock);
1167
1168 rrpc->total_blocks += dev->blks_per_lun;
1169 rrpc->nr_pages += dev->sec_per_lun;
1170
1171 rlun->blocks = vzalloc(sizeof(struct rrpc_block) *
1172 rrpc->dev->blks_per_lun);
1173 if (!rlun->blocks)
1174 goto err;
1175
1176 for (j = 0; j < rrpc->dev->blks_per_lun; j++) {
1177 struct rrpc_block *rblk = &rlun->blocks[j];
1178 struct nvm_block *blk = &lun->blocks[j];
1179
1180 rblk->parent = blk;
1181 rblk->rlun = rlun;
1182 INIT_LIST_HEAD(&rblk->prio);
1183 spin_lock_init(&rblk->lock);
1184 }
1185 }
1186
1187 return 0;
1188 err:
1189 return -ENOMEM;
1190 }
1191
1192 static void rrpc_free(struct rrpc *rrpc)
1193 {
1194 rrpc_gc_free(rrpc);
1195 rrpc_map_free(rrpc);
1196 rrpc_core_free(rrpc);
1197 rrpc_luns_free(rrpc);
1198
1199 kfree(rrpc);
1200 }
1201
1202 static void rrpc_exit(void *private)
1203 {
1204 struct rrpc *rrpc = private;
1205
1206 del_timer(&rrpc->gc_timer);
1207
1208 flush_workqueue(rrpc->krqd_wq);
1209 flush_workqueue(rrpc->kgc_wq);
1210
1211 rrpc_free(rrpc);
1212 }
1213
1214 static sector_t rrpc_capacity(void *private)
1215 {
1216 struct rrpc *rrpc = private;
1217 struct nvm_dev *dev = rrpc->dev;
1218 sector_t reserved, provisioned;
1219
1220 /* cur, gc, and two emergency blocks for each lun */
1221 reserved = rrpc->nr_luns * dev->max_pages_per_blk * 4;
1222 provisioned = rrpc->nr_pages - reserved;
1223
1224 if (reserved > rrpc->nr_pages) {
1225 pr_err("rrpc: not enough space available to expose storage.\n");
1226 return 0;
1227 }
1228
1229 sector_div(provisioned, 10);
1230 return provisioned * 9 * NR_PHY_IN_LOG;
1231 }
1232
1233 /*
1234 * Looks up the logical address from reverse trans map and check if its valid by
1235 * comparing the logical to physical address with the physical address.
1236 * Returns 0 on free, otherwise 1 if in use
1237 */
1238 static void rrpc_block_map_update(struct rrpc *rrpc, struct rrpc_block *rblk)
1239 {
1240 struct nvm_dev *dev = rrpc->dev;
1241 int offset;
1242 struct rrpc_addr *laddr;
1243 u64 paddr, pladdr;
1244
1245 for (offset = 0; offset < dev->pgs_per_blk; offset++) {
1246 paddr = block_to_addr(rrpc, rblk) + offset;
1247
1248 pladdr = rrpc->rev_trans_map[paddr].addr;
1249 if (pladdr == ADDR_EMPTY)
1250 continue;
1251
1252 laddr = &rrpc->trans_map[pladdr];
1253
1254 if (paddr == laddr->addr) {
1255 laddr->rblk = rblk;
1256 } else {
1257 set_bit(offset, rblk->invalid_pages);
1258 rblk->nr_invalid_pages++;
1259 }
1260 }
1261 }
1262
1263 static int rrpc_blocks_init(struct rrpc *rrpc)
1264 {
1265 struct rrpc_lun *rlun;
1266 struct rrpc_block *rblk;
1267 int lun_iter, blk_iter;
1268
1269 for (lun_iter = 0; lun_iter < rrpc->nr_luns; lun_iter++) {
1270 rlun = &rrpc->luns[lun_iter];
1271
1272 for (blk_iter = 0; blk_iter < rrpc->dev->blks_per_lun;
1273 blk_iter++) {
1274 rblk = &rlun->blocks[blk_iter];
1275 rrpc_block_map_update(rrpc, rblk);
1276 }
1277 }
1278
1279 return 0;
1280 }
1281
1282 static int rrpc_luns_configure(struct rrpc *rrpc)
1283 {
1284 struct rrpc_lun *rlun;
1285 struct rrpc_block *rblk;
1286 int i;
1287
1288 for (i = 0; i < rrpc->nr_luns; i++) {
1289 rlun = &rrpc->luns[i];
1290
1291 rblk = rrpc_get_blk(rrpc, rlun, 0);
1292 if (!rblk)
1293 goto err;
1294
1295 rrpc_set_lun_cur(rlun, rblk);
1296
1297 /* Emergency gc block */
1298 rblk = rrpc_get_blk(rrpc, rlun, 1);
1299 if (!rblk)
1300 goto err;
1301 rlun->gc_cur = rblk;
1302 }
1303
1304 return 0;
1305 err:
1306 rrpc_put_blks(rrpc);
1307 return -EINVAL;
1308 }
1309
1310 static struct nvm_tgt_type tt_rrpc;
1311
1312 static void *rrpc_init(struct nvm_dev *dev, struct gendisk *tdisk,
1313 int lun_begin, int lun_end)
1314 {
1315 struct request_queue *bqueue = dev->q;
1316 struct request_queue *tqueue = tdisk->queue;
1317 struct rrpc *rrpc;
1318 int ret;
1319
1320 if (!(dev->identity.dom & NVM_RSP_L2P)) {
1321 pr_err("nvm: rrpc: device does not support l2p (%x)\n",
1322 dev->identity.dom);
1323 return ERR_PTR(-EINVAL);
1324 }
1325
1326 rrpc = kzalloc(sizeof(struct rrpc), GFP_KERNEL);
1327 if (!rrpc)
1328 return ERR_PTR(-ENOMEM);
1329
1330 rrpc->instance.tt = &tt_rrpc;
1331 rrpc->dev = dev;
1332 rrpc->disk = tdisk;
1333
1334 bio_list_init(&rrpc->requeue_bios);
1335 spin_lock_init(&rrpc->bio_lock);
1336 INIT_WORK(&rrpc->ws_requeue, rrpc_requeue);
1337
1338 rrpc->nr_luns = lun_end - lun_begin + 1;
1339
1340 /* simple round-robin strategy */
1341 atomic_set(&rrpc->next_lun, -1);
1342
1343 ret = rrpc_luns_init(rrpc, lun_begin, lun_end);
1344 if (ret) {
1345 pr_err("nvm: rrpc: could not initialize luns\n");
1346 goto err;
1347 }
1348
1349 rrpc->poffset = dev->sec_per_lun * lun_begin;
1350 rrpc->lun_offset = lun_begin;
1351
1352 ret = rrpc_core_init(rrpc);
1353 if (ret) {
1354 pr_err("nvm: rrpc: could not initialize core\n");
1355 goto err;
1356 }
1357
1358 ret = rrpc_map_init(rrpc);
1359 if (ret) {
1360 pr_err("nvm: rrpc: could not initialize maps\n");
1361 goto err;
1362 }
1363
1364 ret = rrpc_blocks_init(rrpc);
1365 if (ret) {
1366 pr_err("nvm: rrpc: could not initialize state for blocks\n");
1367 goto err;
1368 }
1369
1370 ret = rrpc_luns_configure(rrpc);
1371 if (ret) {
1372 pr_err("nvm: rrpc: not enough blocks available in LUNs.\n");
1373 goto err;
1374 }
1375
1376 ret = rrpc_gc_init(rrpc);
1377 if (ret) {
1378 pr_err("nvm: rrpc: could not initialize gc\n");
1379 goto err;
1380 }
1381
1382 /* inherit the size from the underlying device */
1383 blk_queue_logical_block_size(tqueue, queue_physical_block_size(bqueue));
1384 blk_queue_max_hw_sectors(tqueue, queue_max_hw_sectors(bqueue));
1385
1386 pr_info("nvm: rrpc initialized with %u luns and %llu pages.\n",
1387 rrpc->nr_luns, (unsigned long long)rrpc->nr_pages);
1388
1389 mod_timer(&rrpc->gc_timer, jiffies + msecs_to_jiffies(10));
1390
1391 return rrpc;
1392 err:
1393 rrpc_free(rrpc);
1394 return ERR_PTR(ret);
1395 }
1396
1397 /* round robin, page-based FTL, and cost-based GC */
1398 static struct nvm_tgt_type tt_rrpc = {
1399 .name = "rrpc",
1400 .version = {1, 0, 0},
1401
1402 .make_rq = rrpc_make_rq,
1403 .capacity = rrpc_capacity,
1404 .end_io = rrpc_end_io,
1405
1406 .init = rrpc_init,
1407 .exit = rrpc_exit,
1408 };
1409
1410 static int __init rrpc_module_init(void)
1411 {
1412 return nvm_register_target(&tt_rrpc);
1413 }
1414
1415 static void rrpc_module_exit(void)
1416 {
1417 nvm_unregister_target(&tt_rrpc);
1418 }
1419
1420 module_init(rrpc_module_init);
1421 module_exit(rrpc_module_exit);
1422 MODULE_LICENSE("GPL v2");
1423 MODULE_DESCRIPTION("Block-Device Target for Open-Channel SSDs");
This page took 0.084461 seconds and 5 git commands to generate.