Commit | Line | Data |
---|---|---|
ae1519ec MB |
1 | /* |
2 | * Copyright (C) 2015 IT University of Copenhagen | |
3 | * Initial release: Matias Bjorling <m@bjorling.me> | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or | |
6 | * modify it under the terms of the GNU General Public License version | |
7 | * 2 as published by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, but | |
10 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
12 | * General Public License for more details. | |
13 | * | |
14 | * Implementation of a Round-robin page-based Hybrid FTL for Open-channel SSDs. | |
15 | */ | |
16 | ||
17 | #include "rrpc.h" | |
18 | ||
19 | static struct kmem_cache *rrpc_gcb_cache, *rrpc_rq_cache; | |
20 | static DECLARE_RWSEM(rrpc_lock); | |
21 | ||
22 | static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio, | |
23 | struct nvm_rq *rqd, unsigned long flags); | |
24 | ||
25 | #define rrpc_for_each_lun(rrpc, rlun, i) \ | |
26 | for ((i) = 0, rlun = &(rrpc)->luns[0]; \ | |
27 | (i) < (rrpc)->nr_luns; (i)++, rlun = &(rrpc)->luns[(i)]) | |
28 | ||
29 | static void rrpc_page_invalidate(struct rrpc *rrpc, struct rrpc_addr *a) | |
30 | { | |
31 | struct rrpc_block *rblk = a->rblk; | |
32 | unsigned int pg_offset; | |
33 | ||
34 | lockdep_assert_held(&rrpc->rev_lock); | |
35 | ||
36 | if (a->addr == ADDR_EMPTY || !rblk) | |
37 | return; | |
38 | ||
39 | spin_lock(&rblk->lock); | |
40 | ||
afb18e0e | 41 | div_u64_rem(a->addr, rrpc->dev->sec_per_blk, &pg_offset); |
ae1519ec MB |
42 | WARN_ON(test_and_set_bit(pg_offset, rblk->invalid_pages)); |
43 | rblk->nr_invalid_pages++; | |
44 | ||
45 | spin_unlock(&rblk->lock); | |
46 | ||
47 | rrpc->rev_trans_map[a->addr - rrpc->poffset].addr = ADDR_EMPTY; | |
48 | } | |
49 | ||
50 | static void rrpc_invalidate_range(struct rrpc *rrpc, sector_t slba, | |
51 | unsigned len) | |
52 | { | |
53 | sector_t i; | |
54 | ||
55 | spin_lock(&rrpc->rev_lock); | |
56 | for (i = slba; i < slba + len; i++) { | |
57 | struct rrpc_addr *gp = &rrpc->trans_map[i]; | |
58 | ||
59 | rrpc_page_invalidate(rrpc, gp); | |
60 | gp->rblk = NULL; | |
61 | } | |
62 | spin_unlock(&rrpc->rev_lock); | |
63 | } | |
64 | ||
65 | static struct nvm_rq *rrpc_inflight_laddr_acquire(struct rrpc *rrpc, | |
66 | sector_t laddr, unsigned int pages) | |
67 | { | |
68 | struct nvm_rq *rqd; | |
69 | struct rrpc_inflight_rq *inf; | |
70 | ||
71 | rqd = mempool_alloc(rrpc->rq_pool, GFP_ATOMIC); | |
72 | if (!rqd) | |
73 | return ERR_PTR(-ENOMEM); | |
74 | ||
75 | inf = rrpc_get_inflight_rq(rqd); | |
76 | if (rrpc_lock_laddr(rrpc, laddr, pages, inf)) { | |
77 | mempool_free(rqd, rrpc->rq_pool); | |
78 | return NULL; | |
79 | } | |
80 | ||
81 | return rqd; | |
82 | } | |
83 | ||
84 | static void rrpc_inflight_laddr_release(struct rrpc *rrpc, struct nvm_rq *rqd) | |
85 | { | |
86 | struct rrpc_inflight_rq *inf = rrpc_get_inflight_rq(rqd); | |
87 | ||
88 | rrpc_unlock_laddr(rrpc, inf); | |
89 | ||
90 | mempool_free(rqd, rrpc->rq_pool); | |
91 | } | |
92 | ||
93 | static void rrpc_discard(struct rrpc *rrpc, struct bio *bio) | |
94 | { | |
95 | sector_t slba = bio->bi_iter.bi_sector / NR_PHY_IN_LOG; | |
96 | sector_t len = bio->bi_iter.bi_size / RRPC_EXPOSED_PAGE_SIZE; | |
97 | struct nvm_rq *rqd; | |
98 | ||
99 | do { | |
100 | rqd = rrpc_inflight_laddr_acquire(rrpc, slba, len); | |
101 | schedule(); | |
102 | } while (!rqd); | |
103 | ||
104 | if (IS_ERR(rqd)) { | |
105 | pr_err("rrpc: unable to acquire inflight IO\n"); | |
106 | bio_io_error(bio); | |
107 | return; | |
108 | } | |
109 | ||
110 | rrpc_invalidate_range(rrpc, slba, len); | |
111 | rrpc_inflight_laddr_release(rrpc, rqd); | |
112 | } | |
113 | ||
114 | static int block_is_full(struct rrpc *rrpc, struct rrpc_block *rblk) | |
115 | { | |
afb18e0e | 116 | return (rblk->next_page == rrpc->dev->sec_per_blk); |
ae1519ec MB |
117 | } |
118 | ||
afb18e0e JG |
119 | /* Calculate relative addr for the given block, considering instantiated LUNs */ |
120 | static u64 block_to_rel_addr(struct rrpc *rrpc, struct rrpc_block *rblk) | |
121 | { | |
122 | struct nvm_block *blk = rblk->parent; | |
123 | int lun_blk = blk->id % (rrpc->dev->blks_per_lun * rrpc->nr_luns); | |
124 | ||
125 | return lun_blk * rrpc->dev->sec_per_blk; | |
126 | } | |
127 | ||
128 | /* Calculate global addr for the given block */ | |
b7ceb7d5 | 129 | static u64 block_to_addr(struct rrpc *rrpc, struct rrpc_block *rblk) |
ae1519ec MB |
130 | { |
131 | struct nvm_block *blk = rblk->parent; | |
132 | ||
afb18e0e | 133 | return blk->id * rrpc->dev->sec_per_blk; |
ae1519ec MB |
134 | } |
135 | ||
7386af27 MB |
136 | static struct ppa_addr linear_to_generic_addr(struct nvm_dev *dev, |
137 | struct ppa_addr r) | |
138 | { | |
139 | struct ppa_addr l; | |
140 | int secs, pgs, blks, luns; | |
141 | sector_t ppa = r.ppa; | |
142 | ||
143 | l.ppa = 0; | |
144 | ||
145 | div_u64_rem(ppa, dev->sec_per_pg, &secs); | |
146 | l.g.sec = secs; | |
147 | ||
148 | sector_div(ppa, dev->sec_per_pg); | |
afb18e0e | 149 | div_u64_rem(ppa, dev->pgs_per_blk, &pgs); |
7386af27 MB |
150 | l.g.pg = pgs; |
151 | ||
152 | sector_div(ppa, dev->pgs_per_blk); | |
153 | div_u64_rem(ppa, dev->blks_per_lun, &blks); | |
154 | l.g.blk = blks; | |
155 | ||
156 | sector_div(ppa, dev->blks_per_lun); | |
157 | div_u64_rem(ppa, dev->luns_per_chnl, &luns); | |
158 | l.g.lun = luns; | |
159 | ||
160 | sector_div(ppa, dev->luns_per_chnl); | |
161 | l.g.ch = ppa; | |
162 | ||
163 | return l; | |
164 | } | |
165 | ||
b7ceb7d5 | 166 | static struct ppa_addr rrpc_ppa_to_gaddr(struct nvm_dev *dev, u64 addr) |
ae1519ec MB |
167 | { |
168 | struct ppa_addr paddr; | |
169 | ||
170 | paddr.ppa = addr; | |
7386af27 | 171 | return linear_to_generic_addr(dev, paddr); |
ae1519ec MB |
172 | } |
173 | ||
174 | /* requires lun->lock taken */ | |
175 | static void rrpc_set_lun_cur(struct rrpc_lun *rlun, struct rrpc_block *rblk) | |
176 | { | |
177 | struct rrpc *rrpc = rlun->rrpc; | |
178 | ||
179 | BUG_ON(!rblk); | |
180 | ||
181 | if (rlun->cur) { | |
182 | spin_lock(&rlun->cur->lock); | |
183 | WARN_ON(!block_is_full(rrpc, rlun->cur)); | |
184 | spin_unlock(&rlun->cur->lock); | |
185 | } | |
186 | rlun->cur = rblk; | |
187 | } | |
188 | ||
189 | static struct rrpc_block *rrpc_get_blk(struct rrpc *rrpc, struct rrpc_lun *rlun, | |
190 | unsigned long flags) | |
191 | { | |
ff0e498b | 192 | struct nvm_lun *lun = rlun->parent; |
ae1519ec MB |
193 | struct nvm_block *blk; |
194 | struct rrpc_block *rblk; | |
195 | ||
ff0e498b JG |
196 | spin_lock(&lun->lock); |
197 | blk = nvm_get_blk_unlocked(rrpc->dev, rlun->parent, flags); | |
198 | if (!blk) { | |
199 | pr_err("nvm: rrpc: cannot get new block from media manager\n"); | |
200 | spin_unlock(&lun->lock); | |
ae1519ec | 201 | return NULL; |
ff0e498b | 202 | } |
ae1519ec | 203 | |
afb18e0e | 204 | rblk = rrpc_get_rblk(rlun, blk->id); |
ff0e498b JG |
205 | list_add_tail(&rblk->list, &rlun->open_list); |
206 | spin_unlock(&lun->lock); | |
ae1519ec | 207 | |
ff0e498b | 208 | blk->priv = rblk; |
afb18e0e | 209 | bitmap_zero(rblk->invalid_pages, rrpc->dev->sec_per_blk); |
ae1519ec MB |
210 | rblk->next_page = 0; |
211 | rblk->nr_invalid_pages = 0; | |
212 | atomic_set(&rblk->data_cmnt_size, 0); | |
213 | ||
214 | return rblk; | |
215 | } | |
216 | ||
217 | static void rrpc_put_blk(struct rrpc *rrpc, struct rrpc_block *rblk) | |
218 | { | |
ff0e498b JG |
219 | struct rrpc_lun *rlun = rblk->rlun; |
220 | struct nvm_lun *lun = rlun->parent; | |
221 | ||
222 | spin_lock(&lun->lock); | |
223 | nvm_put_blk_unlocked(rrpc->dev, rblk->parent); | |
224 | list_del(&rblk->list); | |
225 | spin_unlock(&lun->lock); | |
ae1519ec MB |
226 | } |
227 | ||
d3d1a438 WT |
228 | static void rrpc_put_blks(struct rrpc *rrpc) |
229 | { | |
230 | struct rrpc_lun *rlun; | |
231 | int i; | |
232 | ||
233 | for (i = 0; i < rrpc->nr_luns; i++) { | |
234 | rlun = &rrpc->luns[i]; | |
235 | if (rlun->cur) | |
236 | rrpc_put_blk(rrpc, rlun->cur); | |
237 | if (rlun->gc_cur) | |
238 | rrpc_put_blk(rrpc, rlun->gc_cur); | |
239 | } | |
240 | } | |
241 | ||
ae1519ec MB |
242 | static struct rrpc_lun *get_next_lun(struct rrpc *rrpc) |
243 | { | |
244 | int next = atomic_inc_return(&rrpc->next_lun); | |
245 | ||
246 | return &rrpc->luns[next % rrpc->nr_luns]; | |
247 | } | |
248 | ||
249 | static void rrpc_gc_kick(struct rrpc *rrpc) | |
250 | { | |
251 | struct rrpc_lun *rlun; | |
252 | unsigned int i; | |
253 | ||
254 | for (i = 0; i < rrpc->nr_luns; i++) { | |
255 | rlun = &rrpc->luns[i]; | |
256 | queue_work(rrpc->krqd_wq, &rlun->ws_gc); | |
257 | } | |
258 | } | |
259 | ||
260 | /* | |
261 | * timed GC every interval. | |
262 | */ | |
263 | static void rrpc_gc_timer(unsigned long data) | |
264 | { | |
265 | struct rrpc *rrpc = (struct rrpc *)data; | |
266 | ||
267 | rrpc_gc_kick(rrpc); | |
268 | mod_timer(&rrpc->gc_timer, jiffies + msecs_to_jiffies(10)); | |
269 | } | |
270 | ||
271 | static void rrpc_end_sync_bio(struct bio *bio) | |
272 | { | |
273 | struct completion *waiting = bio->bi_private; | |
274 | ||
275 | if (bio->bi_error) | |
276 | pr_err("nvm: gc request failed (%u).\n", bio->bi_error); | |
277 | ||
278 | complete(waiting); | |
279 | } | |
280 | ||
281 | /* | |
282 | * rrpc_move_valid_pages -- migrate live data off the block | |
283 | * @rrpc: the 'rrpc' structure | |
284 | * @block: the block from which to migrate live pages | |
285 | * | |
286 | * Description: | |
287 | * GC algorithms may call this function to migrate remaining live | |
288 | * pages off the block prior to erasing it. This function blocks | |
289 | * further execution until the operation is complete. | |
290 | */ | |
291 | static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block *rblk) | |
292 | { | |
293 | struct request_queue *q = rrpc->dev->q; | |
294 | struct rrpc_rev_addr *rev; | |
295 | struct nvm_rq *rqd; | |
296 | struct bio *bio; | |
297 | struct page *page; | |
298 | int slot; | |
afb18e0e | 299 | int nr_sec_per_blk = rrpc->dev->sec_per_blk; |
b7ceb7d5 | 300 | u64 phys_addr; |
ae1519ec MB |
301 | DECLARE_COMPLETION_ONSTACK(wait); |
302 | ||
afb18e0e | 303 | if (bitmap_full(rblk->invalid_pages, nr_sec_per_blk)) |
ae1519ec MB |
304 | return 0; |
305 | ||
306 | bio = bio_alloc(GFP_NOIO, 1); | |
307 | if (!bio) { | |
308 | pr_err("nvm: could not alloc bio to gc\n"); | |
309 | return -ENOMEM; | |
310 | } | |
311 | ||
312 | page = mempool_alloc(rrpc->page_pool, GFP_NOIO); | |
16c6d048 WT |
313 | if (!page) { |
314 | bio_put(bio); | |
3bfbc6ad | 315 | return -ENOMEM; |
16c6d048 | 316 | } |
ae1519ec MB |
317 | |
318 | while ((slot = find_first_zero_bit(rblk->invalid_pages, | |
afb18e0e | 319 | nr_sec_per_blk)) < nr_sec_per_blk) { |
ae1519ec MB |
320 | |
321 | /* Lock laddr */ | |
afb18e0e | 322 | phys_addr = rblk->parent->id * nr_sec_per_blk + slot; |
ae1519ec MB |
323 | |
324 | try: | |
325 | spin_lock(&rrpc->rev_lock); | |
326 | /* Get logical address from physical to logical table */ | |
327 | rev = &rrpc->rev_trans_map[phys_addr - rrpc->poffset]; | |
328 | /* already updated by previous regular write */ | |
329 | if (rev->addr == ADDR_EMPTY) { | |
330 | spin_unlock(&rrpc->rev_lock); | |
331 | continue; | |
332 | } | |
333 | ||
334 | rqd = rrpc_inflight_laddr_acquire(rrpc, rev->addr, 1); | |
335 | if (IS_ERR_OR_NULL(rqd)) { | |
336 | spin_unlock(&rrpc->rev_lock); | |
337 | schedule(); | |
338 | goto try; | |
339 | } | |
340 | ||
341 | spin_unlock(&rrpc->rev_lock); | |
342 | ||
343 | /* Perform read to do GC */ | |
344 | bio->bi_iter.bi_sector = rrpc_get_sector(rev->addr); | |
345 | bio->bi_rw = READ; | |
346 | bio->bi_private = &wait; | |
347 | bio->bi_end_io = rrpc_end_sync_bio; | |
348 | ||
349 | /* TODO: may fail when EXP_PG_SIZE > PAGE_SIZE */ | |
350 | bio_add_pc_page(q, bio, page, RRPC_EXPOSED_PAGE_SIZE, 0); | |
351 | ||
352 | if (rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_GC)) { | |
353 | pr_err("rrpc: gc read failed.\n"); | |
354 | rrpc_inflight_laddr_release(rrpc, rqd); | |
355 | goto finished; | |
356 | } | |
357 | wait_for_completion_io(&wait); | |
2b11c1b2 WT |
358 | if (bio->bi_error) { |
359 | rrpc_inflight_laddr_release(rrpc, rqd); | |
360 | goto finished; | |
361 | } | |
ae1519ec MB |
362 | |
363 | bio_reset(bio); | |
364 | reinit_completion(&wait); | |
365 | ||
366 | bio->bi_iter.bi_sector = rrpc_get_sector(rev->addr); | |
367 | bio->bi_rw = WRITE; | |
368 | bio->bi_private = &wait; | |
369 | bio->bi_end_io = rrpc_end_sync_bio; | |
370 | ||
371 | bio_add_pc_page(q, bio, page, RRPC_EXPOSED_PAGE_SIZE, 0); | |
372 | ||
373 | /* turn the command around and write the data back to a new | |
374 | * address | |
375 | */ | |
376 | if (rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_GC)) { | |
377 | pr_err("rrpc: gc write failed.\n"); | |
378 | rrpc_inflight_laddr_release(rrpc, rqd); | |
379 | goto finished; | |
380 | } | |
381 | wait_for_completion_io(&wait); | |
382 | ||
383 | rrpc_inflight_laddr_release(rrpc, rqd); | |
2b11c1b2 WT |
384 | if (bio->bi_error) |
385 | goto finished; | |
ae1519ec MB |
386 | |
387 | bio_reset(bio); | |
388 | } | |
389 | ||
390 | finished: | |
391 | mempool_free(page, rrpc->page_pool); | |
392 | bio_put(bio); | |
393 | ||
afb18e0e | 394 | if (!bitmap_full(rblk->invalid_pages, nr_sec_per_blk)) { |
ae1519ec MB |
395 | pr_err("nvm: failed to garbage collect block\n"); |
396 | return -EIO; | |
397 | } | |
398 | ||
399 | return 0; | |
400 | } | |
401 | ||
402 | static void rrpc_block_gc(struct work_struct *work) | |
403 | { | |
404 | struct rrpc_block_gc *gcb = container_of(work, struct rrpc_block_gc, | |
405 | ws_gc); | |
406 | struct rrpc *rrpc = gcb->rrpc; | |
407 | struct rrpc_block *rblk = gcb->rblk; | |
408 | struct nvm_dev *dev = rrpc->dev; | |
d0ca798f WT |
409 | struct nvm_lun *lun = rblk->parent->lun; |
410 | struct rrpc_lun *rlun = &rrpc->luns[lun->id - rrpc->lun_offset]; | |
ae1519ec | 411 | |
d0ca798f | 412 | mempool_free(gcb, rrpc->gcb_pool); |
ae1519ec MB |
413 | pr_debug("nvm: block '%lu' being reclaimed\n", rblk->parent->id); |
414 | ||
415 | if (rrpc_move_valid_pages(rrpc, rblk)) | |
d0ca798f WT |
416 | goto put_back; |
417 | ||
418 | if (nvm_erase_blk(dev, rblk->parent)) | |
419 | goto put_back; | |
ae1519ec | 420 | |
ae1519ec | 421 | rrpc_put_blk(rrpc, rblk); |
d0ca798f WT |
422 | |
423 | return; | |
424 | ||
425 | put_back: | |
426 | spin_lock(&rlun->lock); | |
427 | list_add_tail(&rblk->prio, &rlun->prio_list); | |
428 | spin_unlock(&rlun->lock); | |
ae1519ec MB |
429 | } |
430 | ||
431 | /* the block with highest number of invalid pages, will be in the beginning | |
432 | * of the list | |
433 | */ | |
434 | static struct rrpc_block *rblock_max_invalid(struct rrpc_block *ra, | |
435 | struct rrpc_block *rb) | |
436 | { | |
437 | if (ra->nr_invalid_pages == rb->nr_invalid_pages) | |
438 | return ra; | |
439 | ||
440 | return (ra->nr_invalid_pages < rb->nr_invalid_pages) ? rb : ra; | |
441 | } | |
442 | ||
443 | /* linearly find the block with highest number of invalid pages | |
444 | * requires lun->lock | |
445 | */ | |
446 | static struct rrpc_block *block_prio_find_max(struct rrpc_lun *rlun) | |
447 | { | |
448 | struct list_head *prio_list = &rlun->prio_list; | |
449 | struct rrpc_block *rblock, *max; | |
450 | ||
451 | BUG_ON(list_empty(prio_list)); | |
452 | ||
453 | max = list_first_entry(prio_list, struct rrpc_block, prio); | |
454 | list_for_each_entry(rblock, prio_list, prio) | |
455 | max = rblock_max_invalid(max, rblock); | |
456 | ||
457 | return max; | |
458 | } | |
459 | ||
460 | static void rrpc_lun_gc(struct work_struct *work) | |
461 | { | |
462 | struct rrpc_lun *rlun = container_of(work, struct rrpc_lun, ws_gc); | |
463 | struct rrpc *rrpc = rlun->rrpc; | |
464 | struct nvm_lun *lun = rlun->parent; | |
465 | struct rrpc_block_gc *gcb; | |
466 | unsigned int nr_blocks_need; | |
467 | ||
468 | nr_blocks_need = rrpc->dev->blks_per_lun / GC_LIMIT_INVERSE; | |
469 | ||
470 | if (nr_blocks_need < rrpc->nr_luns) | |
471 | nr_blocks_need = rrpc->nr_luns; | |
472 | ||
b262924b | 473 | spin_lock(&rlun->lock); |
ae1519ec MB |
474 | while (nr_blocks_need > lun->nr_free_blocks && |
475 | !list_empty(&rlun->prio_list)) { | |
476 | struct rrpc_block *rblock = block_prio_find_max(rlun); | |
477 | struct nvm_block *block = rblock->parent; | |
478 | ||
479 | if (!rblock->nr_invalid_pages) | |
480 | break; | |
481 | ||
b262924b WT |
482 | gcb = mempool_alloc(rrpc->gcb_pool, GFP_ATOMIC); |
483 | if (!gcb) | |
484 | break; | |
485 | ||
ae1519ec MB |
486 | list_del_init(&rblock->prio); |
487 | ||
488 | BUG_ON(!block_is_full(rrpc, rblock)); | |
489 | ||
490 | pr_debug("rrpc: selected block '%lu' for GC\n", block->id); | |
491 | ||
ae1519ec MB |
492 | gcb->rrpc = rrpc; |
493 | gcb->rblk = rblock; | |
494 | INIT_WORK(&gcb->ws_gc, rrpc_block_gc); | |
495 | ||
496 | queue_work(rrpc->kgc_wq, &gcb->ws_gc); | |
497 | ||
498 | nr_blocks_need--; | |
499 | } | |
b262924b | 500 | spin_unlock(&rlun->lock); |
ae1519ec MB |
501 | |
502 | /* TODO: Hint that request queue can be started again */ | |
503 | } | |
504 | ||
505 | static void rrpc_gc_queue(struct work_struct *work) | |
506 | { | |
507 | struct rrpc_block_gc *gcb = container_of(work, struct rrpc_block_gc, | |
508 | ws_gc); | |
509 | struct rrpc *rrpc = gcb->rrpc; | |
510 | struct rrpc_block *rblk = gcb->rblk; | |
511 | struct nvm_lun *lun = rblk->parent->lun; | |
6adb03de | 512 | struct nvm_block *blk = rblk->parent; |
ae1519ec MB |
513 | struct rrpc_lun *rlun = &rrpc->luns[lun->id - rrpc->lun_offset]; |
514 | ||
515 | spin_lock(&rlun->lock); | |
516 | list_add_tail(&rblk->prio, &rlun->prio_list); | |
517 | spin_unlock(&rlun->lock); | |
518 | ||
6adb03de JG |
519 | spin_lock(&lun->lock); |
520 | lun->nr_open_blocks--; | |
521 | lun->nr_closed_blocks++; | |
522 | blk->state &= ~NVM_BLK_ST_OPEN; | |
523 | blk->state |= NVM_BLK_ST_CLOSED; | |
524 | list_move_tail(&rblk->list, &rlun->closed_list); | |
525 | spin_unlock(&lun->lock); | |
526 | ||
ae1519ec MB |
527 | mempool_free(gcb, rrpc->gcb_pool); |
528 | pr_debug("nvm: block '%lu' is full, allow GC (sched)\n", | |
529 | rblk->parent->id); | |
530 | } | |
531 | ||
532 | static const struct block_device_operations rrpc_fops = { | |
533 | .owner = THIS_MODULE, | |
534 | }; | |
535 | ||
536 | static struct rrpc_lun *rrpc_get_lun_rr(struct rrpc *rrpc, int is_gc) | |
537 | { | |
538 | unsigned int i; | |
539 | struct rrpc_lun *rlun, *max_free; | |
540 | ||
541 | if (!is_gc) | |
542 | return get_next_lun(rrpc); | |
543 | ||
544 | /* during GC, we don't care about RR, instead we want to make | |
545 | * sure that we maintain evenness between the block luns. | |
546 | */ | |
547 | max_free = &rrpc->luns[0]; | |
548 | /* prevent GC-ing lun from devouring pages of a lun with | |
549 | * little free blocks. We don't take the lock as we only need an | |
550 | * estimate. | |
551 | */ | |
552 | rrpc_for_each_lun(rrpc, rlun, i) { | |
553 | if (rlun->parent->nr_free_blocks > | |
554 | max_free->parent->nr_free_blocks) | |
555 | max_free = rlun; | |
556 | } | |
557 | ||
558 | return max_free; | |
559 | } | |
560 | ||
561 | static struct rrpc_addr *rrpc_update_map(struct rrpc *rrpc, sector_t laddr, | |
b7ceb7d5 | 562 | struct rrpc_block *rblk, u64 paddr) |
ae1519ec MB |
563 | { |
564 | struct rrpc_addr *gp; | |
565 | struct rrpc_rev_addr *rev; | |
566 | ||
4ece44af | 567 | BUG_ON(laddr >= rrpc->nr_sects); |
ae1519ec MB |
568 | |
569 | gp = &rrpc->trans_map[laddr]; | |
570 | spin_lock(&rrpc->rev_lock); | |
571 | if (gp->rblk) | |
572 | rrpc_page_invalidate(rrpc, gp); | |
573 | ||
574 | gp->addr = paddr; | |
575 | gp->rblk = rblk; | |
576 | ||
577 | rev = &rrpc->rev_trans_map[gp->addr - rrpc->poffset]; | |
578 | rev->addr = laddr; | |
579 | spin_unlock(&rrpc->rev_lock); | |
580 | ||
581 | return gp; | |
582 | } | |
583 | ||
b7ceb7d5 | 584 | static u64 rrpc_alloc_addr(struct rrpc *rrpc, struct rrpc_block *rblk) |
ae1519ec | 585 | { |
b7ceb7d5 | 586 | u64 addr = ADDR_EMPTY; |
ae1519ec MB |
587 | |
588 | spin_lock(&rblk->lock); | |
589 | if (block_is_full(rrpc, rblk)) | |
590 | goto out; | |
591 | ||
592 | addr = block_to_addr(rrpc, rblk) + rblk->next_page; | |
593 | ||
594 | rblk->next_page++; | |
595 | out: | |
596 | spin_unlock(&rblk->lock); | |
597 | return addr; | |
598 | } | |
599 | ||
600 | /* Simple round-robin Logical to physical address translation. | |
601 | * | |
602 | * Retrieve the mapping using the active append point. Then update the ap for | |
603 | * the next write to the disk. | |
604 | * | |
605 | * Returns rrpc_addr with the physical address and block. Remember to return to | |
606 | * rrpc->addr_cache when request is finished. | |
607 | */ | |
608 | static struct rrpc_addr *rrpc_map_page(struct rrpc *rrpc, sector_t laddr, | |
609 | int is_gc) | |
610 | { | |
611 | struct rrpc_lun *rlun; | |
612 | struct rrpc_block *rblk; | |
613 | struct nvm_lun *lun; | |
b7ceb7d5 | 614 | u64 paddr; |
ae1519ec MB |
615 | |
616 | rlun = rrpc_get_lun_rr(rrpc, is_gc); | |
617 | lun = rlun->parent; | |
618 | ||
619 | if (!is_gc && lun->nr_free_blocks < rrpc->nr_luns * 4) | |
620 | return NULL; | |
621 | ||
622 | spin_lock(&rlun->lock); | |
623 | ||
624 | rblk = rlun->cur; | |
625 | retry: | |
626 | paddr = rrpc_alloc_addr(rrpc, rblk); | |
627 | ||
628 | if (paddr == ADDR_EMPTY) { | |
629 | rblk = rrpc_get_blk(rrpc, rlun, 0); | |
630 | if (rblk) { | |
631 | rrpc_set_lun_cur(rlun, rblk); | |
632 | goto retry; | |
633 | } | |
634 | ||
635 | if (is_gc) { | |
636 | /* retry from emergency gc block */ | |
637 | paddr = rrpc_alloc_addr(rrpc, rlun->gc_cur); | |
638 | if (paddr == ADDR_EMPTY) { | |
639 | rblk = rrpc_get_blk(rrpc, rlun, 1); | |
640 | if (!rblk) { | |
641 | pr_err("rrpc: no more blocks"); | |
642 | goto err; | |
643 | } | |
644 | ||
645 | rlun->gc_cur = rblk; | |
646 | paddr = rrpc_alloc_addr(rrpc, rlun->gc_cur); | |
647 | } | |
648 | rblk = rlun->gc_cur; | |
649 | } | |
650 | } | |
651 | ||
652 | spin_unlock(&rlun->lock); | |
653 | return rrpc_update_map(rrpc, laddr, rblk, paddr); | |
654 | err: | |
655 | spin_unlock(&rlun->lock); | |
656 | return NULL; | |
657 | } | |
658 | ||
659 | static void rrpc_run_gc(struct rrpc *rrpc, struct rrpc_block *rblk) | |
660 | { | |
661 | struct rrpc_block_gc *gcb; | |
662 | ||
663 | gcb = mempool_alloc(rrpc->gcb_pool, GFP_ATOMIC); | |
664 | if (!gcb) { | |
665 | pr_err("rrpc: unable to queue block for gc."); | |
666 | return; | |
667 | } | |
668 | ||
669 | gcb->rrpc = rrpc; | |
670 | gcb->rblk = rblk; | |
671 | ||
672 | INIT_WORK(&gcb->ws_gc, rrpc_gc_queue); | |
673 | queue_work(rrpc->kgc_wq, &gcb->ws_gc); | |
674 | } | |
675 | ||
676 | static void rrpc_end_io_write(struct rrpc *rrpc, struct rrpc_rq *rrqd, | |
677 | sector_t laddr, uint8_t npages) | |
678 | { | |
679 | struct rrpc_addr *p; | |
680 | struct rrpc_block *rblk; | |
681 | struct nvm_lun *lun; | |
682 | int cmnt_size, i; | |
683 | ||
684 | for (i = 0; i < npages; i++) { | |
685 | p = &rrpc->trans_map[laddr + i]; | |
686 | rblk = p->rblk; | |
687 | lun = rblk->parent->lun; | |
688 | ||
689 | cmnt_size = atomic_inc_return(&rblk->data_cmnt_size); | |
afb18e0e | 690 | if (unlikely(cmnt_size == rrpc->dev->sec_per_blk)) |
ae1519ec MB |
691 | rrpc_run_gc(rrpc, rblk); |
692 | } | |
693 | } | |
694 | ||
72d256ec | 695 | static void rrpc_end_io(struct nvm_rq *rqd) |
ae1519ec MB |
696 | { |
697 | struct rrpc *rrpc = container_of(rqd->ins, struct rrpc, instance); | |
698 | struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd); | |
699 | uint8_t npages = rqd->nr_pages; | |
700 | sector_t laddr = rrpc_get_laddr(rqd->bio) - npages; | |
701 | ||
702 | if (bio_data_dir(rqd->bio) == WRITE) | |
703 | rrpc_end_io_write(rrpc, rrqd, laddr, npages); | |
704 | ||
3cd485b1 WT |
705 | bio_put(rqd->bio); |
706 | ||
ae1519ec | 707 | if (rrqd->flags & NVM_IOTYPE_GC) |
91276162 | 708 | return; |
ae1519ec MB |
709 | |
710 | rrpc_unlock_rq(rrpc, rqd); | |
ae1519ec MB |
711 | |
712 | if (npages > 1) | |
713 | nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, rqd->dma_ppa_list); | |
714 | if (rqd->metadata) | |
715 | nvm_dev_dma_free(rrpc->dev, rqd->metadata, rqd->dma_metadata); | |
716 | ||
717 | mempool_free(rqd, rrpc->rq_pool); | |
ae1519ec MB |
718 | } |
719 | ||
720 | static int rrpc_read_ppalist_rq(struct rrpc *rrpc, struct bio *bio, | |
721 | struct nvm_rq *rqd, unsigned long flags, int npages) | |
722 | { | |
723 | struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd); | |
724 | struct rrpc_addr *gp; | |
725 | sector_t laddr = rrpc_get_laddr(bio); | |
726 | int is_gc = flags & NVM_IOTYPE_GC; | |
727 | int i; | |
728 | ||
729 | if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd)) { | |
730 | nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, rqd->dma_ppa_list); | |
731 | return NVM_IO_REQUEUE; | |
732 | } | |
733 | ||
734 | for (i = 0; i < npages; i++) { | |
735 | /* We assume that mapping occurs at 4KB granularity */ | |
4ece44af | 736 | BUG_ON(!(laddr + i >= 0 && laddr + i < rrpc->nr_sects)); |
ae1519ec MB |
737 | gp = &rrpc->trans_map[laddr + i]; |
738 | ||
739 | if (gp->rblk) { | |
740 | rqd->ppa_list[i] = rrpc_ppa_to_gaddr(rrpc->dev, | |
741 | gp->addr); | |
742 | } else { | |
743 | BUG_ON(is_gc); | |
744 | rrpc_unlock_laddr(rrpc, r); | |
745 | nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, | |
746 | rqd->dma_ppa_list); | |
747 | return NVM_IO_DONE; | |
748 | } | |
749 | } | |
750 | ||
751 | rqd->opcode = NVM_OP_HBREAD; | |
752 | ||
753 | return NVM_IO_OK; | |
754 | } | |
755 | ||
756 | static int rrpc_read_rq(struct rrpc *rrpc, struct bio *bio, struct nvm_rq *rqd, | |
757 | unsigned long flags) | |
758 | { | |
759 | struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd); | |
760 | int is_gc = flags & NVM_IOTYPE_GC; | |
761 | sector_t laddr = rrpc_get_laddr(bio); | |
762 | struct rrpc_addr *gp; | |
763 | ||
764 | if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd)) | |
765 | return NVM_IO_REQUEUE; | |
766 | ||
4ece44af | 767 | BUG_ON(!(laddr >= 0 && laddr < rrpc->nr_sects)); |
ae1519ec MB |
768 | gp = &rrpc->trans_map[laddr]; |
769 | ||
770 | if (gp->rblk) { | |
771 | rqd->ppa_addr = rrpc_ppa_to_gaddr(rrpc->dev, gp->addr); | |
772 | } else { | |
773 | BUG_ON(is_gc); | |
774 | rrpc_unlock_rq(rrpc, rqd); | |
775 | return NVM_IO_DONE; | |
776 | } | |
777 | ||
778 | rqd->opcode = NVM_OP_HBREAD; | |
779 | rrqd->addr = gp; | |
780 | ||
781 | return NVM_IO_OK; | |
782 | } | |
783 | ||
784 | static int rrpc_write_ppalist_rq(struct rrpc *rrpc, struct bio *bio, | |
785 | struct nvm_rq *rqd, unsigned long flags, int npages) | |
786 | { | |
787 | struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd); | |
788 | struct rrpc_addr *p; | |
789 | sector_t laddr = rrpc_get_laddr(bio); | |
790 | int is_gc = flags & NVM_IOTYPE_GC; | |
791 | int i; | |
792 | ||
793 | if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd)) { | |
794 | nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, rqd->dma_ppa_list); | |
795 | return NVM_IO_REQUEUE; | |
796 | } | |
797 | ||
798 | for (i = 0; i < npages; i++) { | |
799 | /* We assume that mapping occurs at 4KB granularity */ | |
800 | p = rrpc_map_page(rrpc, laddr + i, is_gc); | |
801 | if (!p) { | |
802 | BUG_ON(is_gc); | |
803 | rrpc_unlock_laddr(rrpc, r); | |
804 | nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, | |
805 | rqd->dma_ppa_list); | |
806 | rrpc_gc_kick(rrpc); | |
807 | return NVM_IO_REQUEUE; | |
808 | } | |
809 | ||
810 | rqd->ppa_list[i] = rrpc_ppa_to_gaddr(rrpc->dev, | |
811 | p->addr); | |
812 | } | |
813 | ||
814 | rqd->opcode = NVM_OP_HBWRITE; | |
815 | ||
816 | return NVM_IO_OK; | |
817 | } | |
818 | ||
819 | static int rrpc_write_rq(struct rrpc *rrpc, struct bio *bio, | |
820 | struct nvm_rq *rqd, unsigned long flags) | |
821 | { | |
822 | struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd); | |
823 | struct rrpc_addr *p; | |
824 | int is_gc = flags & NVM_IOTYPE_GC; | |
825 | sector_t laddr = rrpc_get_laddr(bio); | |
826 | ||
827 | if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd)) | |
828 | return NVM_IO_REQUEUE; | |
829 | ||
830 | p = rrpc_map_page(rrpc, laddr, is_gc); | |
831 | if (!p) { | |
832 | BUG_ON(is_gc); | |
833 | rrpc_unlock_rq(rrpc, rqd); | |
834 | rrpc_gc_kick(rrpc); | |
835 | return NVM_IO_REQUEUE; | |
836 | } | |
837 | ||
838 | rqd->ppa_addr = rrpc_ppa_to_gaddr(rrpc->dev, p->addr); | |
839 | rqd->opcode = NVM_OP_HBWRITE; | |
840 | rrqd->addr = p; | |
841 | ||
842 | return NVM_IO_OK; | |
843 | } | |
844 | ||
845 | static int rrpc_setup_rq(struct rrpc *rrpc, struct bio *bio, | |
846 | struct nvm_rq *rqd, unsigned long flags, uint8_t npages) | |
847 | { | |
848 | if (npages > 1) { | |
849 | rqd->ppa_list = nvm_dev_dma_alloc(rrpc->dev, GFP_KERNEL, | |
850 | &rqd->dma_ppa_list); | |
851 | if (!rqd->ppa_list) { | |
852 | pr_err("rrpc: not able to allocate ppa list\n"); | |
853 | return NVM_IO_ERR; | |
854 | } | |
855 | ||
856 | if (bio_rw(bio) == WRITE) | |
857 | return rrpc_write_ppalist_rq(rrpc, bio, rqd, flags, | |
858 | npages); | |
859 | ||
860 | return rrpc_read_ppalist_rq(rrpc, bio, rqd, flags, npages); | |
861 | } | |
862 | ||
863 | if (bio_rw(bio) == WRITE) | |
864 | return rrpc_write_rq(rrpc, bio, rqd, flags); | |
865 | ||
866 | return rrpc_read_rq(rrpc, bio, rqd, flags); | |
867 | } | |
868 | ||
869 | static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio, | |
870 | struct nvm_rq *rqd, unsigned long flags) | |
871 | { | |
872 | int err; | |
873 | struct rrpc_rq *rrq = nvm_rq_to_pdu(rqd); | |
874 | uint8_t nr_pages = rrpc_get_pages(bio); | |
875 | int bio_size = bio_sectors(bio) << 9; | |
876 | ||
877 | if (bio_size < rrpc->dev->sec_size) | |
878 | return NVM_IO_ERR; | |
879 | else if (bio_size > rrpc->dev->max_rq_size) | |
880 | return NVM_IO_ERR; | |
881 | ||
882 | err = rrpc_setup_rq(rrpc, bio, rqd, flags, nr_pages); | |
883 | if (err) | |
884 | return err; | |
885 | ||
886 | bio_get(bio); | |
887 | rqd->bio = bio; | |
888 | rqd->ins = &rrpc->instance; | |
889 | rqd->nr_pages = nr_pages; | |
890 | rrq->flags = flags; | |
891 | ||
892 | err = nvm_submit_io(rrpc->dev, rqd); | |
893 | if (err) { | |
894 | pr_err("rrpc: I/O submission failed: %d\n", err); | |
3cd485b1 | 895 | bio_put(bio); |
c27278bd WT |
896 | if (!(flags & NVM_IOTYPE_GC)) { |
897 | rrpc_unlock_rq(rrpc, rqd); | |
898 | if (rqd->nr_pages > 1) | |
899 | nvm_dev_dma_free(rrpc->dev, | |
900 | rqd->ppa_list, rqd->dma_ppa_list); | |
901 | } | |
ae1519ec MB |
902 | return NVM_IO_ERR; |
903 | } | |
904 | ||
905 | return NVM_IO_OK; | |
906 | } | |
907 | ||
dece1635 | 908 | static blk_qc_t rrpc_make_rq(struct request_queue *q, struct bio *bio) |
ae1519ec MB |
909 | { |
910 | struct rrpc *rrpc = q->queuedata; | |
911 | struct nvm_rq *rqd; | |
912 | int err; | |
913 | ||
914 | if (bio->bi_rw & REQ_DISCARD) { | |
915 | rrpc_discard(rrpc, bio); | |
dece1635 | 916 | return BLK_QC_T_NONE; |
ae1519ec MB |
917 | } |
918 | ||
919 | rqd = mempool_alloc(rrpc->rq_pool, GFP_KERNEL); | |
920 | if (!rqd) { | |
921 | pr_err_ratelimited("rrpc: not able to queue bio."); | |
922 | bio_io_error(bio); | |
dece1635 | 923 | return BLK_QC_T_NONE; |
ae1519ec MB |
924 | } |
925 | memset(rqd, 0, sizeof(struct nvm_rq)); | |
926 | ||
927 | err = rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_NONE); | |
928 | switch (err) { | |
929 | case NVM_IO_OK: | |
dece1635 | 930 | return BLK_QC_T_NONE; |
ae1519ec MB |
931 | case NVM_IO_ERR: |
932 | bio_io_error(bio); | |
933 | break; | |
934 | case NVM_IO_DONE: | |
935 | bio_endio(bio); | |
936 | break; | |
937 | case NVM_IO_REQUEUE: | |
938 | spin_lock(&rrpc->bio_lock); | |
939 | bio_list_add(&rrpc->requeue_bios, bio); | |
940 | spin_unlock(&rrpc->bio_lock); | |
941 | queue_work(rrpc->kgc_wq, &rrpc->ws_requeue); | |
942 | break; | |
943 | } | |
944 | ||
945 | mempool_free(rqd, rrpc->rq_pool); | |
dece1635 | 946 | return BLK_QC_T_NONE; |
ae1519ec MB |
947 | } |
948 | ||
949 | static void rrpc_requeue(struct work_struct *work) | |
950 | { | |
951 | struct rrpc *rrpc = container_of(work, struct rrpc, ws_requeue); | |
952 | struct bio_list bios; | |
953 | struct bio *bio; | |
954 | ||
955 | bio_list_init(&bios); | |
956 | ||
957 | spin_lock(&rrpc->bio_lock); | |
958 | bio_list_merge(&bios, &rrpc->requeue_bios); | |
959 | bio_list_init(&rrpc->requeue_bios); | |
960 | spin_unlock(&rrpc->bio_lock); | |
961 | ||
962 | while ((bio = bio_list_pop(&bios))) | |
963 | rrpc_make_rq(rrpc->disk->queue, bio); | |
964 | } | |
965 | ||
966 | static void rrpc_gc_free(struct rrpc *rrpc) | |
967 | { | |
968 | struct rrpc_lun *rlun; | |
969 | int i; | |
970 | ||
971 | if (rrpc->krqd_wq) | |
972 | destroy_workqueue(rrpc->krqd_wq); | |
973 | ||
974 | if (rrpc->kgc_wq) | |
975 | destroy_workqueue(rrpc->kgc_wq); | |
976 | ||
977 | if (!rrpc->luns) | |
978 | return; | |
979 | ||
980 | for (i = 0; i < rrpc->nr_luns; i++) { | |
981 | rlun = &rrpc->luns[i]; | |
982 | ||
983 | if (!rlun->blocks) | |
984 | break; | |
985 | vfree(rlun->blocks); | |
986 | } | |
987 | } | |
988 | ||
989 | static int rrpc_gc_init(struct rrpc *rrpc) | |
990 | { | |
991 | rrpc->krqd_wq = alloc_workqueue("rrpc-lun", WQ_MEM_RECLAIM|WQ_UNBOUND, | |
992 | rrpc->nr_luns); | |
993 | if (!rrpc->krqd_wq) | |
994 | return -ENOMEM; | |
995 | ||
996 | rrpc->kgc_wq = alloc_workqueue("rrpc-bg", WQ_MEM_RECLAIM, 1); | |
997 | if (!rrpc->kgc_wq) | |
998 | return -ENOMEM; | |
999 | ||
1000 | setup_timer(&rrpc->gc_timer, rrpc_gc_timer, (unsigned long)rrpc); | |
1001 | ||
1002 | return 0; | |
1003 | } | |
1004 | ||
1005 | static void rrpc_map_free(struct rrpc *rrpc) | |
1006 | { | |
1007 | vfree(rrpc->rev_trans_map); | |
1008 | vfree(rrpc->trans_map); | |
1009 | } | |
1010 | ||
1011 | static int rrpc_l2p_update(u64 slba, u32 nlb, __le64 *entries, void *private) | |
1012 | { | |
1013 | struct rrpc *rrpc = (struct rrpc *)private; | |
1014 | struct nvm_dev *dev = rrpc->dev; | |
1015 | struct rrpc_addr *addr = rrpc->trans_map + slba; | |
1016 | struct rrpc_rev_addr *raddr = rrpc->rev_trans_map; | |
ae1519ec MB |
1017 | u64 elba = slba + nlb; |
1018 | u64 i; | |
1019 | ||
4ece44af | 1020 | if (unlikely(elba > dev->total_secs)) { |
ae1519ec MB |
1021 | pr_err("nvm: L2P data from device is out of bounds!\n"); |
1022 | return -EINVAL; | |
1023 | } | |
1024 | ||
1025 | for (i = 0; i < nlb; i++) { | |
1026 | u64 pba = le64_to_cpu(entries[i]); | |
afb18e0e | 1027 | unsigned int mod; |
ae1519ec MB |
1028 | /* LNVM treats address-spaces as silos, LBA and PBA are |
1029 | * equally large and zero-indexed. | |
1030 | */ | |
4ece44af | 1031 | if (unlikely(pba >= dev->total_secs && pba != U64_MAX)) { |
ae1519ec MB |
1032 | pr_err("nvm: L2P data entry is out of bounds!\n"); |
1033 | return -EINVAL; | |
1034 | } | |
1035 | ||
1036 | /* Address zero is a special one. The first page on a disk is | |
1037 | * protected. As it often holds internal device boot | |
1038 | * information. | |
1039 | */ | |
1040 | if (!pba) | |
1041 | continue; | |
1042 | ||
afb18e0e JG |
1043 | div_u64_rem(pba, rrpc->nr_sects, &mod); |
1044 | ||
ae1519ec | 1045 | addr[i].addr = pba; |
afb18e0e | 1046 | raddr[mod].addr = slba + i; |
ae1519ec MB |
1047 | } |
1048 | ||
1049 | return 0; | |
1050 | } | |
1051 | ||
1052 | static int rrpc_map_init(struct rrpc *rrpc) | |
1053 | { | |
1054 | struct nvm_dev *dev = rrpc->dev; | |
1055 | sector_t i; | |
1056 | int ret; | |
1057 | ||
4ece44af | 1058 | rrpc->trans_map = vzalloc(sizeof(struct rrpc_addr) * rrpc->nr_sects); |
ae1519ec MB |
1059 | if (!rrpc->trans_map) |
1060 | return -ENOMEM; | |
1061 | ||
1062 | rrpc->rev_trans_map = vmalloc(sizeof(struct rrpc_rev_addr) | |
4ece44af | 1063 | * rrpc->nr_sects); |
ae1519ec MB |
1064 | if (!rrpc->rev_trans_map) |
1065 | return -ENOMEM; | |
1066 | ||
4ece44af | 1067 | for (i = 0; i < rrpc->nr_sects; i++) { |
ae1519ec MB |
1068 | struct rrpc_addr *p = &rrpc->trans_map[i]; |
1069 | struct rrpc_rev_addr *r = &rrpc->rev_trans_map[i]; | |
1070 | ||
1071 | p->addr = ADDR_EMPTY; | |
1072 | r->addr = ADDR_EMPTY; | |
1073 | } | |
1074 | ||
1075 | if (!dev->ops->get_l2p_tbl) | |
1076 | return 0; | |
1077 | ||
1078 | /* Bring up the mapping table from device */ | |
4ece44af MB |
1079 | ret = dev->ops->get_l2p_tbl(dev, 0, dev->total_secs, rrpc_l2p_update, |
1080 | rrpc); | |
ae1519ec MB |
1081 | if (ret) { |
1082 | pr_err("nvm: rrpc: could not read L2P table.\n"); | |
1083 | return -EINVAL; | |
1084 | } | |
1085 | ||
1086 | return 0; | |
1087 | } | |
1088 | ||
1089 | ||
1090 | /* Minimum pages needed within a lun */ | |
1091 | #define PAGE_POOL_SIZE 16 | |
1092 | #define ADDR_POOL_SIZE 64 | |
1093 | ||
1094 | static int rrpc_core_init(struct rrpc *rrpc) | |
1095 | { | |
1096 | down_write(&rrpc_lock); | |
1097 | if (!rrpc_gcb_cache) { | |
1098 | rrpc_gcb_cache = kmem_cache_create("rrpc_gcb", | |
1099 | sizeof(struct rrpc_block_gc), 0, 0, NULL); | |
1100 | if (!rrpc_gcb_cache) { | |
1101 | up_write(&rrpc_lock); | |
1102 | return -ENOMEM; | |
1103 | } | |
1104 | ||
1105 | rrpc_rq_cache = kmem_cache_create("rrpc_rq", | |
1106 | sizeof(struct nvm_rq) + sizeof(struct rrpc_rq), | |
1107 | 0, 0, NULL); | |
1108 | if (!rrpc_rq_cache) { | |
1109 | kmem_cache_destroy(rrpc_gcb_cache); | |
1110 | up_write(&rrpc_lock); | |
1111 | return -ENOMEM; | |
1112 | } | |
1113 | } | |
1114 | up_write(&rrpc_lock); | |
1115 | ||
1116 | rrpc->page_pool = mempool_create_page_pool(PAGE_POOL_SIZE, 0); | |
1117 | if (!rrpc->page_pool) | |
1118 | return -ENOMEM; | |
1119 | ||
1120 | rrpc->gcb_pool = mempool_create_slab_pool(rrpc->dev->nr_luns, | |
1121 | rrpc_gcb_cache); | |
1122 | if (!rrpc->gcb_pool) | |
1123 | return -ENOMEM; | |
1124 | ||
1125 | rrpc->rq_pool = mempool_create_slab_pool(64, rrpc_rq_cache); | |
1126 | if (!rrpc->rq_pool) | |
1127 | return -ENOMEM; | |
1128 | ||
1129 | spin_lock_init(&rrpc->inflights.lock); | |
1130 | INIT_LIST_HEAD(&rrpc->inflights.reqs); | |
1131 | ||
1132 | return 0; | |
1133 | } | |
1134 | ||
1135 | static void rrpc_core_free(struct rrpc *rrpc) | |
1136 | { | |
1137 | mempool_destroy(rrpc->page_pool); | |
1138 | mempool_destroy(rrpc->gcb_pool); | |
1139 | mempool_destroy(rrpc->rq_pool); | |
1140 | } | |
1141 | ||
1142 | static void rrpc_luns_free(struct rrpc *rrpc) | |
1143 | { | |
1144 | kfree(rrpc->luns); | |
1145 | } | |
1146 | ||
1147 | static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end) | |
1148 | { | |
1149 | struct nvm_dev *dev = rrpc->dev; | |
1150 | struct rrpc_lun *rlun; | |
1151 | int i, j; | |
1152 | ||
afb18e0e | 1153 | if (dev->sec_per_blk > MAX_INVALID_PAGES_STORAGE * BITS_PER_LONG) { |
4b79beb4 WT |
1154 | pr_err("rrpc: number of pages per block too high."); |
1155 | return -EINVAL; | |
1156 | } | |
1157 | ||
ae1519ec MB |
1158 | spin_lock_init(&rrpc->rev_lock); |
1159 | ||
1160 | rrpc->luns = kcalloc(rrpc->nr_luns, sizeof(struct rrpc_lun), | |
1161 | GFP_KERNEL); | |
1162 | if (!rrpc->luns) | |
1163 | return -ENOMEM; | |
1164 | ||
1165 | /* 1:1 mapping */ | |
1166 | for (i = 0; i < rrpc->nr_luns; i++) { | |
1167 | struct nvm_lun *lun = dev->mt->get_lun(dev, lun_begin + i); | |
1168 | ||
ae1519ec MB |
1169 | rlun = &rrpc->luns[i]; |
1170 | rlun->rrpc = rrpc; | |
1171 | rlun->parent = lun; | |
1172 | INIT_LIST_HEAD(&rlun->prio_list); | |
ff0e498b JG |
1173 | INIT_LIST_HEAD(&rlun->open_list); |
1174 | INIT_LIST_HEAD(&rlun->closed_list); | |
1175 | ||
ae1519ec MB |
1176 | INIT_WORK(&rlun->ws_gc, rrpc_lun_gc); |
1177 | spin_lock_init(&rlun->lock); | |
1178 | ||
1179 | rrpc->total_blocks += dev->blks_per_lun; | |
4ece44af | 1180 | rrpc->nr_sects += dev->sec_per_lun; |
ae1519ec MB |
1181 | |
1182 | rlun->blocks = vzalloc(sizeof(struct rrpc_block) * | |
1183 | rrpc->dev->blks_per_lun); | |
1184 | if (!rlun->blocks) | |
1185 | goto err; | |
1186 | ||
1187 | for (j = 0; j < rrpc->dev->blks_per_lun; j++) { | |
1188 | struct rrpc_block *rblk = &rlun->blocks[j]; | |
1189 | struct nvm_block *blk = &lun->blocks[j]; | |
1190 | ||
1191 | rblk->parent = blk; | |
d7a64d27 | 1192 | rblk->rlun = rlun; |
ae1519ec MB |
1193 | INIT_LIST_HEAD(&rblk->prio); |
1194 | spin_lock_init(&rblk->lock); | |
1195 | } | |
1196 | } | |
1197 | ||
1198 | return 0; | |
1199 | err: | |
1200 | return -ENOMEM; | |
1201 | } | |
1202 | ||
1203 | static void rrpc_free(struct rrpc *rrpc) | |
1204 | { | |
1205 | rrpc_gc_free(rrpc); | |
1206 | rrpc_map_free(rrpc); | |
1207 | rrpc_core_free(rrpc); | |
1208 | rrpc_luns_free(rrpc); | |
1209 | ||
1210 | kfree(rrpc); | |
1211 | } | |
1212 | ||
1213 | static void rrpc_exit(void *private) | |
1214 | { | |
1215 | struct rrpc *rrpc = private; | |
1216 | ||
1217 | del_timer(&rrpc->gc_timer); | |
1218 | ||
1219 | flush_workqueue(rrpc->krqd_wq); | |
1220 | flush_workqueue(rrpc->kgc_wq); | |
1221 | ||
1222 | rrpc_free(rrpc); | |
1223 | } | |
1224 | ||
1225 | static sector_t rrpc_capacity(void *private) | |
1226 | { | |
1227 | struct rrpc *rrpc = private; | |
1228 | struct nvm_dev *dev = rrpc->dev; | |
1229 | sector_t reserved, provisioned; | |
1230 | ||
1231 | /* cur, gc, and two emergency blocks for each lun */ | |
1232 | reserved = rrpc->nr_luns * dev->max_pages_per_blk * 4; | |
4ece44af | 1233 | provisioned = rrpc->nr_sects - reserved; |
ae1519ec | 1234 | |
4ece44af | 1235 | if (reserved > rrpc->nr_sects) { |
ae1519ec MB |
1236 | pr_err("rrpc: not enough space available to expose storage.\n"); |
1237 | return 0; | |
1238 | } | |
1239 | ||
1240 | sector_div(provisioned, 10); | |
1241 | return provisioned * 9 * NR_PHY_IN_LOG; | |
1242 | } | |
1243 | ||
1244 | /* | |
1245 | * Looks up the logical address from reverse trans map and check if its valid by | |
1246 | * comparing the logical to physical address with the physical address. | |
1247 | * Returns 0 on free, otherwise 1 if in use | |
1248 | */ | |
1249 | static void rrpc_block_map_update(struct rrpc *rrpc, struct rrpc_block *rblk) | |
1250 | { | |
1251 | struct nvm_dev *dev = rrpc->dev; | |
1252 | int offset; | |
1253 | struct rrpc_addr *laddr; | |
afb18e0e | 1254 | u64 bpaddr, paddr, pladdr; |
ae1519ec | 1255 | |
afb18e0e JG |
1256 | bpaddr = block_to_rel_addr(rrpc, rblk); |
1257 | for (offset = 0; offset < dev->sec_per_blk; offset++) { | |
1258 | paddr = bpaddr + offset; | |
ae1519ec MB |
1259 | |
1260 | pladdr = rrpc->rev_trans_map[paddr].addr; | |
1261 | if (pladdr == ADDR_EMPTY) | |
1262 | continue; | |
1263 | ||
1264 | laddr = &rrpc->trans_map[pladdr]; | |
1265 | ||
1266 | if (paddr == laddr->addr) { | |
1267 | laddr->rblk = rblk; | |
1268 | } else { | |
1269 | set_bit(offset, rblk->invalid_pages); | |
1270 | rblk->nr_invalid_pages++; | |
1271 | } | |
1272 | } | |
1273 | } | |
1274 | ||
1275 | static int rrpc_blocks_init(struct rrpc *rrpc) | |
1276 | { | |
1277 | struct rrpc_lun *rlun; | |
1278 | struct rrpc_block *rblk; | |
1279 | int lun_iter, blk_iter; | |
1280 | ||
1281 | for (lun_iter = 0; lun_iter < rrpc->nr_luns; lun_iter++) { | |
1282 | rlun = &rrpc->luns[lun_iter]; | |
1283 | ||
1284 | for (blk_iter = 0; blk_iter < rrpc->dev->blks_per_lun; | |
1285 | blk_iter++) { | |
1286 | rblk = &rlun->blocks[blk_iter]; | |
1287 | rrpc_block_map_update(rrpc, rblk); | |
1288 | } | |
1289 | } | |
1290 | ||
1291 | return 0; | |
1292 | } | |
1293 | ||
1294 | static int rrpc_luns_configure(struct rrpc *rrpc) | |
1295 | { | |
1296 | struct rrpc_lun *rlun; | |
1297 | struct rrpc_block *rblk; | |
1298 | int i; | |
1299 | ||
1300 | for (i = 0; i < rrpc->nr_luns; i++) { | |
1301 | rlun = &rrpc->luns[i]; | |
1302 | ||
1303 | rblk = rrpc_get_blk(rrpc, rlun, 0); | |
1304 | if (!rblk) | |
d3d1a438 | 1305 | goto err; |
ae1519ec MB |
1306 | |
1307 | rrpc_set_lun_cur(rlun, rblk); | |
1308 | ||
1309 | /* Emergency gc block */ | |
1310 | rblk = rrpc_get_blk(rrpc, rlun, 1); | |
1311 | if (!rblk) | |
d3d1a438 | 1312 | goto err; |
ae1519ec MB |
1313 | rlun->gc_cur = rblk; |
1314 | } | |
1315 | ||
1316 | return 0; | |
d3d1a438 WT |
1317 | err: |
1318 | rrpc_put_blks(rrpc); | |
1319 | return -EINVAL; | |
ae1519ec MB |
1320 | } |
1321 | ||
1322 | static struct nvm_tgt_type tt_rrpc; | |
1323 | ||
1324 | static void *rrpc_init(struct nvm_dev *dev, struct gendisk *tdisk, | |
1325 | int lun_begin, int lun_end) | |
1326 | { | |
1327 | struct request_queue *bqueue = dev->q; | |
1328 | struct request_queue *tqueue = tdisk->queue; | |
1329 | struct rrpc *rrpc; | |
1330 | int ret; | |
1331 | ||
1332 | if (!(dev->identity.dom & NVM_RSP_L2P)) { | |
1333 | pr_err("nvm: rrpc: device does not support l2p (%x)\n", | |
1334 | dev->identity.dom); | |
1335 | return ERR_PTR(-EINVAL); | |
1336 | } | |
1337 | ||
1338 | rrpc = kzalloc(sizeof(struct rrpc), GFP_KERNEL); | |
1339 | if (!rrpc) | |
1340 | return ERR_PTR(-ENOMEM); | |
1341 | ||
1342 | rrpc->instance.tt = &tt_rrpc; | |
1343 | rrpc->dev = dev; | |
1344 | rrpc->disk = tdisk; | |
1345 | ||
1346 | bio_list_init(&rrpc->requeue_bios); | |
1347 | spin_lock_init(&rrpc->bio_lock); | |
1348 | INIT_WORK(&rrpc->ws_requeue, rrpc_requeue); | |
1349 | ||
1350 | rrpc->nr_luns = lun_end - lun_begin + 1; | |
1351 | ||
1352 | /* simple round-robin strategy */ | |
1353 | atomic_set(&rrpc->next_lun, -1); | |
1354 | ||
1355 | ret = rrpc_luns_init(rrpc, lun_begin, lun_end); | |
1356 | if (ret) { | |
1357 | pr_err("nvm: rrpc: could not initialize luns\n"); | |
1358 | goto err; | |
1359 | } | |
1360 | ||
1361 | rrpc->poffset = dev->sec_per_lun * lun_begin; | |
1362 | rrpc->lun_offset = lun_begin; | |
1363 | ||
1364 | ret = rrpc_core_init(rrpc); | |
1365 | if (ret) { | |
1366 | pr_err("nvm: rrpc: could not initialize core\n"); | |
1367 | goto err; | |
1368 | } | |
1369 | ||
1370 | ret = rrpc_map_init(rrpc); | |
1371 | if (ret) { | |
1372 | pr_err("nvm: rrpc: could not initialize maps\n"); | |
1373 | goto err; | |
1374 | } | |
1375 | ||
1376 | ret = rrpc_blocks_init(rrpc); | |
1377 | if (ret) { | |
1378 | pr_err("nvm: rrpc: could not initialize state for blocks\n"); | |
1379 | goto err; | |
1380 | } | |
1381 | ||
1382 | ret = rrpc_luns_configure(rrpc); | |
1383 | if (ret) { | |
1384 | pr_err("nvm: rrpc: not enough blocks available in LUNs.\n"); | |
1385 | goto err; | |
1386 | } | |
1387 | ||
1388 | ret = rrpc_gc_init(rrpc); | |
1389 | if (ret) { | |
1390 | pr_err("nvm: rrpc: could not initialize gc\n"); | |
1391 | goto err; | |
1392 | } | |
1393 | ||
1394 | /* inherit the size from the underlying device */ | |
1395 | blk_queue_logical_block_size(tqueue, queue_physical_block_size(bqueue)); | |
1396 | blk_queue_max_hw_sectors(tqueue, queue_max_hw_sectors(bqueue)); | |
1397 | ||
1398 | pr_info("nvm: rrpc initialized with %u luns and %llu pages.\n", | |
4ece44af | 1399 | rrpc->nr_luns, (unsigned long long)rrpc->nr_sects); |
ae1519ec MB |
1400 | |
1401 | mod_timer(&rrpc->gc_timer, jiffies + msecs_to_jiffies(10)); | |
1402 | ||
1403 | return rrpc; | |
1404 | err: | |
1405 | rrpc_free(rrpc); | |
1406 | return ERR_PTR(ret); | |
1407 | } | |
1408 | ||
1409 | /* round robin, page-based FTL, and cost-based GC */ | |
1410 | static struct nvm_tgt_type tt_rrpc = { | |
1411 | .name = "rrpc", | |
1412 | .version = {1, 0, 0}, | |
1413 | ||
1414 | .make_rq = rrpc_make_rq, | |
1415 | .capacity = rrpc_capacity, | |
1416 | .end_io = rrpc_end_io, | |
1417 | ||
1418 | .init = rrpc_init, | |
1419 | .exit = rrpc_exit, | |
1420 | }; | |
1421 | ||
1422 | static int __init rrpc_module_init(void) | |
1423 | { | |
1424 | return nvm_register_target(&tt_rrpc); | |
1425 | } | |
1426 | ||
1427 | static void rrpc_module_exit(void) | |
1428 | { | |
1429 | nvm_unregister_target(&tt_rrpc); | |
1430 | } | |
1431 | ||
1432 | module_init(rrpc_module_init); | |
1433 | module_exit(rrpc_module_exit); | |
1434 | MODULE_LICENSE("GPL v2"); | |
1435 | MODULE_DESCRIPTION("Block-Device Target for Open-Channel SSDs"); |