2 * Block Translation Table
3 * Copyright (c) 2014-2015, Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 #include <linux/highmem.h>
15 #include <linux/debugfs.h>
16 #include <linux/blkdev.h>
17 #include <linux/module.h>
18 #include <linux/device.h>
19 #include <linux/mutex.h>
20 #include <linux/hdreg.h>
21 #include <linux/genhd.h>
22 #include <linux/sizes.h>
23 #include <linux/ndctl.h>
29 enum log_ent_request
{
36 static int arena_read_bytes(struct arena_info
*arena
, resource_size_t offset
,
39 struct nd_btt
*nd_btt
= arena
->nd_btt
;
40 struct nd_namespace_common
*ndns
= nd_btt
->ndns
;
42 /* arena offsets are 4K from the base of the device */
44 return nvdimm_read_bytes(ndns
, offset
, buf
, n
);
47 static int arena_write_bytes(struct arena_info
*arena
, resource_size_t offset
,
50 struct nd_btt
*nd_btt
= arena
->nd_btt
;
51 struct nd_namespace_common
*ndns
= nd_btt
->ndns
;
53 /* arena offsets are 4K from the base of the device */
55 return nvdimm_write_bytes(ndns
, offset
, buf
, n
);
58 static int btt_info_write(struct arena_info
*arena
, struct btt_sb
*super
)
62 ret
= arena_write_bytes(arena
, arena
->info2off
, super
,
63 sizeof(struct btt_sb
));
67 return arena_write_bytes(arena
, arena
->infooff
, super
,
68 sizeof(struct btt_sb
));
71 static int btt_info_read(struct arena_info
*arena
, struct btt_sb
*super
)
74 return arena_read_bytes(arena
, arena
->infooff
, super
,
75 sizeof(struct btt_sb
));
79 * 'raw' version of btt_map write
81 * mapping is in little-endian
82 * mapping contains 'E' and 'Z' flags as desired
84 static int __btt_map_write(struct arena_info
*arena
, u32 lba
, __le32 mapping
)
86 u64 ns_off
= arena
->mapoff
+ (lba
* MAP_ENT_SIZE
);
88 WARN_ON(lba
>= arena
->external_nlba
);
89 return arena_write_bytes(arena
, ns_off
, &mapping
, MAP_ENT_SIZE
);
92 static int btt_map_write(struct arena_info
*arena
, u32 lba
, u32 mapping
,
93 u32 z_flag
, u32 e_flag
)
99 * This 'mapping' is supposed to be just the LBA mapping, without
100 * any flags set, so strip the flag bits.
102 mapping
&= MAP_LBA_MASK
;
104 ze
= (z_flag
<< 1) + e_flag
;
108 * We want to set neither of the Z or E flags, and
109 * in the actual layout, this means setting the bit
110 * positions of both to '1' to indicate a 'normal'
113 mapping
|= MAP_ENT_NORMAL
;
116 mapping
|= (1 << MAP_ERR_SHIFT
);
119 mapping
|= (1 << MAP_TRIM_SHIFT
);
123 * The case where Z and E are both sent in as '1' could be
124 * construed as a valid 'normal' case, but we decide not to,
127 WARN_ONCE(1, "Invalid use of Z and E flags\n");
131 mapping_le
= cpu_to_le32(mapping
);
132 return __btt_map_write(arena
, lba
, mapping_le
);
135 static int btt_map_read(struct arena_info
*arena
, u32 lba
, u32
*mapping
,
136 int *trim
, int *error
)
140 u32 raw_mapping
, postmap
, ze
, z_flag
, e_flag
;
141 u64 ns_off
= arena
->mapoff
+ (lba
* MAP_ENT_SIZE
);
143 WARN_ON(lba
>= arena
->external_nlba
);
145 ret
= arena_read_bytes(arena
, ns_off
, &in
, MAP_ENT_SIZE
);
149 raw_mapping
= le32_to_cpu(in
);
151 z_flag
= (raw_mapping
& MAP_TRIM_MASK
) >> MAP_TRIM_SHIFT
;
152 e_flag
= (raw_mapping
& MAP_ERR_MASK
) >> MAP_ERR_SHIFT
;
153 ze
= (z_flag
<< 1) + e_flag
;
154 postmap
= raw_mapping
& MAP_LBA_MASK
;
156 /* Reuse the {z,e}_flag variables for *trim and *error */
162 /* Initial state. Return postmap = premap */
188 static int btt_log_read_pair(struct arena_info
*arena
, u32 lane
,
189 struct log_entry
*ent
)
192 return arena_read_bytes(arena
,
193 arena
->logoff
+ (2 * lane
* LOG_ENT_SIZE
), ent
,
197 static struct dentry
*debugfs_root
;
199 static void arena_debugfs_init(struct arena_info
*a
, struct dentry
*parent
,
205 /* If for some reason, parent bttN was not created, exit */
209 snprintf(dirname
, 32, "arena%d", idx
);
210 d
= debugfs_create_dir(dirname
, parent
);
211 if (IS_ERR_OR_NULL(d
))
215 debugfs_create_x64("size", S_IRUGO
, d
, &a
->size
);
216 debugfs_create_x64("external_lba_start", S_IRUGO
, d
,
217 &a
->external_lba_start
);
218 debugfs_create_x32("internal_nlba", S_IRUGO
, d
, &a
->internal_nlba
);
219 debugfs_create_u32("internal_lbasize", S_IRUGO
, d
,
220 &a
->internal_lbasize
);
221 debugfs_create_x32("external_nlba", S_IRUGO
, d
, &a
->external_nlba
);
222 debugfs_create_u32("external_lbasize", S_IRUGO
, d
,
223 &a
->external_lbasize
);
224 debugfs_create_u32("nfree", S_IRUGO
, d
, &a
->nfree
);
225 debugfs_create_u16("version_major", S_IRUGO
, d
, &a
->version_major
);
226 debugfs_create_u16("version_minor", S_IRUGO
, d
, &a
->version_minor
);
227 debugfs_create_x64("nextoff", S_IRUGO
, d
, &a
->nextoff
);
228 debugfs_create_x64("infooff", S_IRUGO
, d
, &a
->infooff
);
229 debugfs_create_x64("dataoff", S_IRUGO
, d
, &a
->dataoff
);
230 debugfs_create_x64("mapoff", S_IRUGO
, d
, &a
->mapoff
);
231 debugfs_create_x64("logoff", S_IRUGO
, d
, &a
->logoff
);
232 debugfs_create_x64("info2off", S_IRUGO
, d
, &a
->info2off
);
233 debugfs_create_x32("flags", S_IRUGO
, d
, &a
->flags
);
236 static void btt_debugfs_init(struct btt
*btt
)
239 struct arena_info
*arena
;
241 btt
->debugfs_dir
= debugfs_create_dir(dev_name(&btt
->nd_btt
->dev
),
243 if (IS_ERR_OR_NULL(btt
->debugfs_dir
))
246 list_for_each_entry(arena
, &btt
->arena_list
, list
) {
247 arena_debugfs_init(arena
, btt
->debugfs_dir
, i
);
253 * This function accepts two log entries, and uses the
254 * sequence number to find the 'older' entry.
255 * It also updates the sequence number in this old entry to
256 * make it the 'new' one if the mark_flag is set.
257 * Finally, it returns which of the entries was the older one.
259 * TODO The logic feels a bit kludge-y. make it better..
261 static int btt_log_get_old(struct log_entry
*ent
)
266 * the first ever time this is seen, the entry goes into [0]
267 * the next time, the following logic works out to put this
268 * (next) entry into [1]
270 if (ent
[0].seq
== 0) {
271 ent
[0].seq
= cpu_to_le32(1);
275 if (ent
[0].seq
== ent
[1].seq
)
277 if (le32_to_cpu(ent
[0].seq
) + le32_to_cpu(ent
[1].seq
) > 5)
280 if (le32_to_cpu(ent
[0].seq
) < le32_to_cpu(ent
[1].seq
)) {
281 if (le32_to_cpu(ent
[1].seq
) - le32_to_cpu(ent
[0].seq
) == 1)
286 if (le32_to_cpu(ent
[0].seq
) - le32_to_cpu(ent
[1].seq
) == 1)
295 static struct device
*to_dev(struct arena_info
*arena
)
297 return &arena
->nd_btt
->dev
;
301 * This function copies the desired (old/new) log entry into ent if
302 * it is not NULL. It returns the sub-slot number (0 or 1)
303 * where the desired log entry was found. Negative return values
306 static int btt_log_read(struct arena_info
*arena
, u32 lane
,
307 struct log_entry
*ent
, int old_flag
)
310 int old_ent
, ret_ent
;
311 struct log_entry log
[2];
313 ret
= btt_log_read_pair(arena
, lane
, log
);
317 old_ent
= btt_log_get_old(log
);
318 if (old_ent
< 0 || old_ent
> 1) {
319 dev_info(to_dev(arena
),
320 "log corruption (%d): lane %d seq [%d, %d]\n",
321 old_ent
, lane
, log
[0].seq
, log
[1].seq
);
322 /* TODO set error state? */
326 ret_ent
= (old_flag
? old_ent
: (1 - old_ent
));
329 memcpy(ent
, &log
[ret_ent
], LOG_ENT_SIZE
);
335 * This function commits a log entry to media
336 * It does _not_ prepare the freelist entry for the next write
337 * btt_flog_write is the wrapper for updating the freelist elements
339 static int __btt_log_write(struct arena_info
*arena
, u32 lane
,
340 u32 sub
, struct log_entry
*ent
)
344 * Ignore the padding in log_entry for calculating log_half.
345 * The entry is 'committed' when we write the sequence number,
346 * and we want to ensure that that is the last thing written.
347 * We don't bother writing the padding as that would be extra
348 * media wear and write amplification
350 unsigned int log_half
= (LOG_ENT_SIZE
- 2 * sizeof(u64
)) / 2;
351 u64 ns_off
= arena
->logoff
+ (((2 * lane
) + sub
) * LOG_ENT_SIZE
);
354 /* split the 16B write into atomic, durable halves */
355 ret
= arena_write_bytes(arena
, ns_off
, src
, log_half
);
361 return arena_write_bytes(arena
, ns_off
, src
, log_half
);
364 static int btt_flog_write(struct arena_info
*arena
, u32 lane
, u32 sub
,
365 struct log_entry
*ent
)
369 ret
= __btt_log_write(arena
, lane
, sub
, ent
);
373 /* prepare the next free entry */
374 arena
->freelist
[lane
].sub
= 1 - arena
->freelist
[lane
].sub
;
375 if (++(arena
->freelist
[lane
].seq
) == 4)
376 arena
->freelist
[lane
].seq
= 1;
377 arena
->freelist
[lane
].block
= le32_to_cpu(ent
->old_map
);
383 * This function initializes the BTT map to the initial state, which is
384 * all-zeroes, and indicates an identity mapping
386 static int btt_map_init(struct arena_info
*arena
)
391 size_t chunk_size
= SZ_2M
;
392 size_t mapsize
= arena
->logoff
- arena
->mapoff
;
394 zerobuf
= kzalloc(chunk_size
, GFP_KERNEL
);
399 size_t size
= min(mapsize
, chunk_size
);
401 ret
= arena_write_bytes(arena
, arena
->mapoff
+ offset
, zerobuf
,
417 * This function initializes the BTT log with 'fake' entries pointing
418 * to the initial reserved set of blocks as being free
420 static int btt_log_init(struct arena_info
*arena
)
424 struct log_entry log
, zerolog
;
426 memset(&zerolog
, 0, sizeof(zerolog
));
428 for (i
= 0; i
< arena
->nfree
; i
++) {
429 log
.lba
= cpu_to_le32(i
);
430 log
.old_map
= cpu_to_le32(arena
->external_nlba
+ i
);
431 log
.new_map
= cpu_to_le32(arena
->external_nlba
+ i
);
432 log
.seq
= cpu_to_le32(LOG_SEQ_INIT
);
433 ret
= __btt_log_write(arena
, i
, 0, &log
);
436 ret
= __btt_log_write(arena
, i
, 1, &zerolog
);
444 static int btt_freelist_init(struct arena_info
*arena
)
448 struct log_entry log_new
, log_old
;
450 arena
->freelist
= kcalloc(arena
->nfree
, sizeof(struct free_entry
),
452 if (!arena
->freelist
)
455 for (i
= 0; i
< arena
->nfree
; i
++) {
456 old
= btt_log_read(arena
, i
, &log_old
, LOG_OLD_ENT
);
460 new = btt_log_read(arena
, i
, &log_new
, LOG_NEW_ENT
);
464 /* sub points to the next one to be overwritten */
465 arena
->freelist
[i
].sub
= 1 - new;
466 arena
->freelist
[i
].seq
= nd_inc_seq(le32_to_cpu(log_new
.seq
));
467 arena
->freelist
[i
].block
= le32_to_cpu(log_new
.old_map
);
469 /* This implies a newly created or untouched flog entry */
470 if (log_new
.old_map
== log_new
.new_map
)
473 /* Check if map recovery is needed */
474 ret
= btt_map_read(arena
, le32_to_cpu(log_new
.lba
), &map_entry
,
478 if ((le32_to_cpu(log_new
.new_map
) != map_entry
) &&
479 (le32_to_cpu(log_new
.old_map
) == map_entry
)) {
481 * Last transaction wrote the flog, but wasn't able
482 * to complete the map write. So fix up the map.
484 ret
= btt_map_write(arena
, le32_to_cpu(log_new
.lba
),
485 le32_to_cpu(log_new
.new_map
), 0, 0);
495 static int btt_rtt_init(struct arena_info
*arena
)
497 arena
->rtt
= kcalloc(arena
->nfree
, sizeof(u32
), GFP_KERNEL
);
498 if (arena
->rtt
== NULL
)
504 static int btt_maplocks_init(struct arena_info
*arena
)
508 arena
->map_locks
= kcalloc(arena
->nfree
, sizeof(struct aligned_lock
),
510 if (!arena
->map_locks
)
513 for (i
= 0; i
< arena
->nfree
; i
++)
514 spin_lock_init(&arena
->map_locks
[i
].lock
);
519 static struct arena_info
*alloc_arena(struct btt
*btt
, size_t size
,
520 size_t start
, size_t arena_off
)
522 struct arena_info
*arena
;
523 u64 logsize
, mapsize
, datasize
;
524 u64 available
= size
;
526 arena
= kzalloc(sizeof(struct arena_info
), GFP_KERNEL
);
529 arena
->nd_btt
= btt
->nd_btt
;
535 arena
->external_lba_start
= start
;
536 arena
->external_lbasize
= btt
->lbasize
;
537 arena
->internal_lbasize
= roundup(arena
->external_lbasize
,
538 INT_LBASIZE_ALIGNMENT
);
539 arena
->nfree
= BTT_DEFAULT_NFREE
;
540 arena
->version_major
= 1;
541 arena
->version_minor
= 1;
543 if (available
% BTT_PG_SIZE
)
544 available
-= (available
% BTT_PG_SIZE
);
546 /* Two pages are reserved for the super block and its copy */
547 available
-= 2 * BTT_PG_SIZE
;
549 /* The log takes a fixed amount of space based on nfree */
550 logsize
= roundup(2 * arena
->nfree
* sizeof(struct log_entry
),
552 available
-= logsize
;
554 /* Calculate optimal split between map and data area */
555 arena
->internal_nlba
= div_u64(available
- BTT_PG_SIZE
,
556 arena
->internal_lbasize
+ MAP_ENT_SIZE
);
557 arena
->external_nlba
= arena
->internal_nlba
- arena
->nfree
;
559 mapsize
= roundup((arena
->external_nlba
* MAP_ENT_SIZE
), BTT_PG_SIZE
);
560 datasize
= available
- mapsize
;
562 /* 'Absolute' values, relative to start of storage space */
563 arena
->infooff
= arena_off
;
564 arena
->dataoff
= arena
->infooff
+ BTT_PG_SIZE
;
565 arena
->mapoff
= arena
->dataoff
+ datasize
;
566 arena
->logoff
= arena
->mapoff
+ mapsize
;
567 arena
->info2off
= arena
->logoff
+ logsize
;
571 static void free_arenas(struct btt
*btt
)
573 struct arena_info
*arena
, *next
;
575 list_for_each_entry_safe(arena
, next
, &btt
->arena_list
, list
) {
576 list_del(&arena
->list
);
578 kfree(arena
->map_locks
);
579 kfree(arena
->freelist
);
580 debugfs_remove_recursive(arena
->debugfs_dir
);
586 * This function checks if the metadata layout is valid and error free
588 static int arena_is_valid(struct arena_info
*arena
, struct btt_sb
*super
,
589 u8
*uuid
, u32 lbasize
)
593 if (memcmp(super
->uuid
, uuid
, 16))
596 checksum
= le64_to_cpu(super
->checksum
);
598 if (checksum
!= nd_btt_sb_checksum(super
))
600 super
->checksum
= cpu_to_le64(checksum
);
602 if (lbasize
!= le32_to_cpu(super
->external_lbasize
))
605 /* TODO: figure out action for this */
606 if ((le32_to_cpu(super
->flags
) & IB_FLAG_ERROR_MASK
) != 0)
607 dev_info(to_dev(arena
), "Found arena with an error flag\n");
613 * This function reads an existing valid btt superblock and
614 * populates the corresponding arena_info struct
616 static void parse_arena_meta(struct arena_info
*arena
, struct btt_sb
*super
,
619 arena
->internal_nlba
= le32_to_cpu(super
->internal_nlba
);
620 arena
->internal_lbasize
= le32_to_cpu(super
->internal_lbasize
);
621 arena
->external_nlba
= le32_to_cpu(super
->external_nlba
);
622 arena
->external_lbasize
= le32_to_cpu(super
->external_lbasize
);
623 arena
->nfree
= le32_to_cpu(super
->nfree
);
624 arena
->version_major
= le16_to_cpu(super
->version_major
);
625 arena
->version_minor
= le16_to_cpu(super
->version_minor
);
627 arena
->nextoff
= (super
->nextoff
== 0) ? 0 : (arena_off
+
628 le64_to_cpu(super
->nextoff
));
629 arena
->infooff
= arena_off
;
630 arena
->dataoff
= arena_off
+ le64_to_cpu(super
->dataoff
);
631 arena
->mapoff
= arena_off
+ le64_to_cpu(super
->mapoff
);
632 arena
->logoff
= arena_off
+ le64_to_cpu(super
->logoff
);
633 arena
->info2off
= arena_off
+ le64_to_cpu(super
->info2off
);
635 arena
->size
= (le64_to_cpu(super
->nextoff
) > 0)
636 ? (le64_to_cpu(super
->nextoff
))
637 : (arena
->info2off
- arena
->infooff
+ BTT_PG_SIZE
);
639 arena
->flags
= le32_to_cpu(super
->flags
);
642 static int discover_arenas(struct btt
*btt
)
645 struct arena_info
*arena
;
646 struct btt_sb
*super
;
647 size_t remaining
= btt
->rawsize
;
652 super
= kzalloc(sizeof(*super
), GFP_KERNEL
);
657 /* Alloc memory for arena */
658 arena
= alloc_arena(btt
, 0, 0, 0);
664 arena
->infooff
= cur_off
;
665 ret
= btt_info_read(arena
, super
);
669 if (!arena_is_valid(arena
, super
, btt
->nd_btt
->uuid
,
671 if (remaining
== btt
->rawsize
) {
672 btt
->init_state
= INIT_NOTFOUND
;
673 dev_info(to_dev(arena
), "No existing arenas\n");
676 dev_info(to_dev(arena
),
677 "Found corrupted metadata!\n");
683 arena
->external_lba_start
= cur_nlba
;
684 parse_arena_meta(arena
, super
, cur_off
);
686 ret
= btt_freelist_init(arena
);
690 ret
= btt_rtt_init(arena
);
694 ret
= btt_maplocks_init(arena
);
698 list_add_tail(&arena
->list
, &btt
->arena_list
);
700 remaining
-= arena
->size
;
701 cur_off
+= arena
->size
;
702 cur_nlba
+= arena
->external_nlba
;
705 if (arena
->nextoff
== 0)
708 btt
->num_arenas
= num_arenas
;
709 btt
->nlba
= cur_nlba
;
710 btt
->init_state
= INIT_READY
;
723 static int create_arenas(struct btt
*btt
)
725 size_t remaining
= btt
->rawsize
;
729 struct arena_info
*arena
;
730 size_t arena_size
= min_t(u64
, ARENA_MAX_SIZE
, remaining
);
732 remaining
-= arena_size
;
733 if (arena_size
< ARENA_MIN_SIZE
)
736 arena
= alloc_arena(btt
, arena_size
, btt
->nlba
, cur_off
);
741 btt
->nlba
+= arena
->external_nlba
;
742 if (remaining
>= ARENA_MIN_SIZE
)
743 arena
->nextoff
= arena
->size
;
746 cur_off
+= arena_size
;
747 list_add_tail(&arena
->list
, &btt
->arena_list
);
754 * This function completes arena initialization by writing
756 * It is only called for an uninitialized arena when a write
757 * to that arena occurs for the first time.
759 static int btt_arena_write_layout(struct arena_info
*arena
, u8
*uuid
)
762 struct btt_sb
*super
;
764 ret
= btt_map_init(arena
);
768 ret
= btt_log_init(arena
);
772 super
= kzalloc(sizeof(struct btt_sb
), GFP_NOIO
);
776 strncpy(super
->signature
, BTT_SIG
, BTT_SIG_LEN
);
777 memcpy(super
->uuid
, uuid
, 16);
778 super
->flags
= cpu_to_le32(arena
->flags
);
779 super
->version_major
= cpu_to_le16(arena
->version_major
);
780 super
->version_minor
= cpu_to_le16(arena
->version_minor
);
781 super
->external_lbasize
= cpu_to_le32(arena
->external_lbasize
);
782 super
->external_nlba
= cpu_to_le32(arena
->external_nlba
);
783 super
->internal_lbasize
= cpu_to_le32(arena
->internal_lbasize
);
784 super
->internal_nlba
= cpu_to_le32(arena
->internal_nlba
);
785 super
->nfree
= cpu_to_le32(arena
->nfree
);
786 super
->infosize
= cpu_to_le32(sizeof(struct btt_sb
));
787 super
->nextoff
= cpu_to_le64(arena
->nextoff
);
789 * Subtract arena->infooff (arena start) so numbers are relative
792 super
->dataoff
= cpu_to_le64(arena
->dataoff
- arena
->infooff
);
793 super
->mapoff
= cpu_to_le64(arena
->mapoff
- arena
->infooff
);
794 super
->logoff
= cpu_to_le64(arena
->logoff
- arena
->infooff
);
795 super
->info2off
= cpu_to_le64(arena
->info2off
- arena
->infooff
);
798 super
->checksum
= cpu_to_le64(nd_btt_sb_checksum(super
));
800 ret
= btt_info_write(arena
, super
);
807 * This function completes the initialization for the BTT namespace
808 * such that it is ready to accept IOs
810 static int btt_meta_init(struct btt
*btt
)
813 struct arena_info
*arena
;
815 mutex_lock(&btt
->init_lock
);
816 list_for_each_entry(arena
, &btt
->arena_list
, list
) {
817 ret
= btt_arena_write_layout(arena
, btt
->nd_btt
->uuid
);
821 ret
= btt_freelist_init(arena
);
825 ret
= btt_rtt_init(arena
);
829 ret
= btt_maplocks_init(arena
);
834 btt
->init_state
= INIT_READY
;
837 mutex_unlock(&btt
->init_lock
);
841 static u32
btt_meta_size(struct btt
*btt
)
843 return btt
->lbasize
- btt
->sector_size
;
847 * This function calculates the arena in which the given LBA lies
848 * by doing a linear walk. This is acceptable since we expect only
849 * a few arenas. If we have backing devices that get much larger,
850 * we can construct a balanced binary tree of arenas at init time
851 * so that this range search becomes faster.
853 static int lba_to_arena(struct btt
*btt
, sector_t sector
, __u32
*premap
,
854 struct arena_info
**arena
)
856 struct arena_info
*arena_list
;
857 __u64 lba
= div_u64(sector
<< SECTOR_SHIFT
, btt
->sector_size
);
859 list_for_each_entry(arena_list
, &btt
->arena_list
, list
) {
860 if (lba
< arena_list
->external_nlba
) {
865 lba
-= arena_list
->external_nlba
;
872 * The following (lock_map, unlock_map) are mostly just to improve
873 * readability, since they index into an array of locks
875 static void lock_map(struct arena_info
*arena
, u32 premap
)
876 __acquires(&arena
->map_locks
[idx
].lock
)
878 u32 idx
= (premap
* MAP_ENT_SIZE
/ L1_CACHE_BYTES
) % arena
->nfree
;
880 spin_lock(&arena
->map_locks
[idx
].lock
);
883 static void unlock_map(struct arena_info
*arena
, u32 premap
)
884 __releases(&arena
->map_locks
[idx
].lock
)
886 u32 idx
= (premap
* MAP_ENT_SIZE
/ L1_CACHE_BYTES
) % arena
->nfree
;
888 spin_unlock(&arena
->map_locks
[idx
].lock
);
891 static u64
to_namespace_offset(struct arena_info
*arena
, u64 lba
)
893 return arena
->dataoff
+ ((u64
)lba
* arena
->internal_lbasize
);
896 static int btt_data_read(struct arena_info
*arena
, struct page
*page
,
897 unsigned int off
, u32 lba
, u32 len
)
900 u64 nsoff
= to_namespace_offset(arena
, lba
);
901 void *mem
= kmap_atomic(page
);
903 ret
= arena_read_bytes(arena
, nsoff
, mem
+ off
, len
);
909 static int btt_data_write(struct arena_info
*arena
, u32 lba
,
910 struct page
*page
, unsigned int off
, u32 len
)
913 u64 nsoff
= to_namespace_offset(arena
, lba
);
914 void *mem
= kmap_atomic(page
);
916 ret
= arena_write_bytes(arena
, nsoff
, mem
+ off
, len
);
922 static void zero_fill_data(struct page
*page
, unsigned int off
, u32 len
)
924 void *mem
= kmap_atomic(page
);
926 memset(mem
+ off
, 0, len
);
930 #ifdef CONFIG_BLK_DEV_INTEGRITY
931 static int btt_rw_integrity(struct btt
*btt
, struct bio_integrity_payload
*bip
,
932 struct arena_info
*arena
, u32 postmap
, int rw
)
934 unsigned int len
= btt_meta_size(btt
);
941 meta_nsoff
= to_namespace_offset(arena
, postmap
) + btt
->sector_size
;
944 unsigned int cur_len
;
948 bv
= bvec_iter_bvec(bip
->bip_vec
, bip
->bip_iter
);
950 * The 'bv' obtained from bvec_iter_bvec has its .bv_len and
951 * .bv_offset already adjusted for iter->bi_bvec_done, and we
952 * can use those directly
955 cur_len
= min(len
, bv
.bv_len
);
956 mem
= kmap_atomic(bv
.bv_page
);
958 ret
= arena_write_bytes(arena
, meta_nsoff
,
959 mem
+ bv
.bv_offset
, cur_len
);
961 ret
= arena_read_bytes(arena
, meta_nsoff
,
962 mem
+ bv
.bv_offset
, cur_len
);
969 meta_nsoff
+= cur_len
;
970 bvec_iter_advance(bip
->bip_vec
, &bip
->bip_iter
, cur_len
);
976 #else /* CONFIG_BLK_DEV_INTEGRITY */
977 static int btt_rw_integrity(struct btt
*btt
, struct bio_integrity_payload
*bip
,
978 struct arena_info
*arena
, u32 postmap
, int rw
)
984 static int btt_read_pg(struct btt
*btt
, struct bio_integrity_payload
*bip
,
985 struct page
*page
, unsigned int off
, sector_t sector
,
990 struct arena_info
*arena
= NULL
;
991 u32 lane
= 0, premap
, postmap
;
996 lane
= nd_region_acquire_lane(btt
->nd_region
);
998 ret
= lba_to_arena(btt
, sector
, &premap
, &arena
);
1002 cur_len
= min(btt
->sector_size
, len
);
1004 ret
= btt_map_read(arena
, premap
, &postmap
, &t_flag
, &e_flag
);
1009 * We loop to make sure that the post map LBA didn't change
1010 * from under us between writing the RTT and doing the actual
1017 zero_fill_data(page
, off
, cur_len
);
1026 arena
->rtt
[lane
] = RTT_VALID
| postmap
;
1028 * Barrier to make sure this write is not reordered
1029 * to do the verification map_read before the RTT store
1033 ret
= btt_map_read(arena
, premap
, &new_map
, &t_flag
,
1038 if (postmap
== new_map
)
1044 ret
= btt_data_read(arena
, page
, off
, postmap
, cur_len
);
1049 ret
= btt_rw_integrity(btt
, bip
, arena
, postmap
, READ
);
1054 arena
->rtt
[lane
] = RTT_INVALID
;
1055 nd_region_release_lane(btt
->nd_region
, lane
);
1059 sector
+= btt
->sector_size
>> SECTOR_SHIFT
;
1065 arena
->rtt
[lane
] = RTT_INVALID
;
1067 nd_region_release_lane(btt
->nd_region
, lane
);
1071 static int btt_write_pg(struct btt
*btt
, struct bio_integrity_payload
*bip
,
1072 sector_t sector
, struct page
*page
, unsigned int off
,
1076 struct arena_info
*arena
= NULL
;
1077 u32 premap
= 0, old_postmap
, new_postmap
, lane
= 0, i
;
1078 struct log_entry log
;
1084 lane
= nd_region_acquire_lane(btt
->nd_region
);
1086 ret
= lba_to_arena(btt
, sector
, &premap
, &arena
);
1089 cur_len
= min(btt
->sector_size
, len
);
1091 if ((arena
->flags
& IB_FLAG_ERROR_MASK
) != 0) {
1096 new_postmap
= arena
->freelist
[lane
].block
;
1098 /* Wait if the new block is being read from */
1099 for (i
= 0; i
< arena
->nfree
; i
++)
1100 while (arena
->rtt
[i
] == (RTT_VALID
| new_postmap
))
1104 if (new_postmap
>= arena
->internal_nlba
) {
1109 ret
= btt_data_write(arena
, new_postmap
, page
, off
, cur_len
);
1114 ret
= btt_rw_integrity(btt
, bip
, arena
, new_postmap
,
1120 lock_map(arena
, premap
);
1121 ret
= btt_map_read(arena
, premap
, &old_postmap
, NULL
, NULL
);
1124 if (old_postmap
>= arena
->internal_nlba
) {
1129 log
.lba
= cpu_to_le32(premap
);
1130 log
.old_map
= cpu_to_le32(old_postmap
);
1131 log
.new_map
= cpu_to_le32(new_postmap
);
1132 log
.seq
= cpu_to_le32(arena
->freelist
[lane
].seq
);
1133 sub
= arena
->freelist
[lane
].sub
;
1134 ret
= btt_flog_write(arena
, lane
, sub
, &log
);
1138 ret
= btt_map_write(arena
, premap
, new_postmap
, 0, 0);
1142 unlock_map(arena
, premap
);
1143 nd_region_release_lane(btt
->nd_region
, lane
);
1147 sector
+= btt
->sector_size
>> SECTOR_SHIFT
;
1153 unlock_map(arena
, premap
);
1155 nd_region_release_lane(btt
->nd_region
, lane
);
1159 static int btt_do_bvec(struct btt
*btt
, struct bio_integrity_payload
*bip
,
1160 struct page
*page
, unsigned int len
, unsigned int off
,
1161 int rw
, sector_t sector
)
1166 ret
= btt_read_pg(btt
, bip
, page
, off
, sector
, len
);
1167 flush_dcache_page(page
);
1169 flush_dcache_page(page
);
1170 ret
= btt_write_pg(btt
, bip
, sector
, page
, off
, len
);
1176 static void btt_make_request(struct request_queue
*q
, struct bio
*bio
)
1178 struct bio_integrity_payload
*bip
= bio_integrity(bio
);
1179 struct btt
*btt
= q
->queuedata
;
1180 struct bvec_iter iter
;
1181 unsigned long start
;
1182 struct bio_vec bvec
;
1187 * bio_integrity_enabled also checks if the bio already has an
1188 * integrity payload attached. If it does, we *don't* do a
1189 * bio_integrity_prep here - the payload has been generated by
1190 * another kernel subsystem, and we just pass it through.
1192 if (bio_integrity_enabled(bio
) && bio_integrity_prep(bio
)) {
1197 do_acct
= nd_iostat_start(bio
, &start
);
1198 rw
= bio_data_dir(bio
);
1199 bio_for_each_segment(bvec
, bio
, iter
) {
1200 unsigned int len
= bvec
.bv_len
;
1202 BUG_ON(len
> PAGE_SIZE
);
1203 /* Make sure len is in multiples of sector size. */
1204 /* XXX is this right? */
1205 BUG_ON(len
< btt
->sector_size
);
1206 BUG_ON(len
% btt
->sector_size
);
1208 err
= btt_do_bvec(btt
, bip
, bvec
.bv_page
, len
, bvec
.bv_offset
,
1209 rw
, iter
.bi_sector
);
1211 dev_info(&btt
->nd_btt
->dev
,
1212 "io error in %s sector %lld, len %d,\n",
1213 (rw
== READ
) ? "READ" : "WRITE",
1214 (unsigned long long) iter
.bi_sector
, len
);
1219 nd_iostat_end(bio
, start
);
1222 bio_endio(bio
, err
);
1225 static int btt_rw_page(struct block_device
*bdev
, sector_t sector
,
1226 struct page
*page
, int rw
)
1228 struct btt
*btt
= bdev
->bd_disk
->private_data
;
1230 btt_do_bvec(btt
, NULL
, page
, PAGE_CACHE_SIZE
, 0, rw
, sector
);
1231 page_endio(page
, rw
& WRITE
, 0);
1236 static int btt_getgeo(struct block_device
*bd
, struct hd_geometry
*geo
)
1238 /* some standard values */
1239 geo
->heads
= 1 << 6;
1240 geo
->sectors
= 1 << 5;
1241 geo
->cylinders
= get_capacity(bd
->bd_disk
) >> 11;
1245 static const struct block_device_operations btt_fops
= {
1246 .owner
= THIS_MODULE
,
1247 .rw_page
= btt_rw_page
,
1248 .getgeo
= btt_getgeo
,
1249 .revalidate_disk
= nvdimm_revalidate_disk
,
1252 static int btt_blk_init(struct btt
*btt
)
1254 struct nd_btt
*nd_btt
= btt
->nd_btt
;
1255 struct nd_namespace_common
*ndns
= nd_btt
->ndns
;
1257 /* create a new disk and request queue for btt */
1258 btt
->btt_queue
= blk_alloc_queue(GFP_KERNEL
);
1259 if (!btt
->btt_queue
)
1262 btt
->btt_disk
= alloc_disk(0);
1263 if (!btt
->btt_disk
) {
1264 blk_cleanup_queue(btt
->btt_queue
);
1268 nvdimm_namespace_disk_name(ndns
, btt
->btt_disk
->disk_name
);
1269 btt
->btt_disk
->driverfs_dev
= &btt
->nd_btt
->dev
;
1270 btt
->btt_disk
->major
= btt_major
;
1271 btt
->btt_disk
->first_minor
= 0;
1272 btt
->btt_disk
->fops
= &btt_fops
;
1273 btt
->btt_disk
->private_data
= btt
;
1274 btt
->btt_disk
->queue
= btt
->btt_queue
;
1275 btt
->btt_disk
->flags
= GENHD_FL_EXT_DEVT
;
1277 blk_queue_make_request(btt
->btt_queue
, btt_make_request
);
1278 blk_queue_logical_block_size(btt
->btt_queue
, btt
->sector_size
);
1279 blk_queue_max_hw_sectors(btt
->btt_queue
, UINT_MAX
);
1280 blk_queue_bounce_limit(btt
->btt_queue
, BLK_BOUNCE_ANY
);
1281 queue_flag_set_unlocked(QUEUE_FLAG_NONROT
, btt
->btt_queue
);
1282 btt
->btt_queue
->queuedata
= btt
;
1284 set_capacity(btt
->btt_disk
, 0);
1285 add_disk(btt
->btt_disk
);
1286 if (btt_meta_size(btt
)) {
1287 int rc
= nd_integrity_init(btt
->btt_disk
, btt_meta_size(btt
));
1290 del_gendisk(btt
->btt_disk
);
1291 put_disk(btt
->btt_disk
);
1292 blk_cleanup_queue(btt
->btt_queue
);
1296 set_capacity(btt
->btt_disk
, btt
->nlba
* btt
->sector_size
>> 9);
1297 revalidate_disk(btt
->btt_disk
);
1302 static void btt_blk_cleanup(struct btt
*btt
)
1304 blk_integrity_unregister(btt
->btt_disk
);
1305 del_gendisk(btt
->btt_disk
);
1306 put_disk(btt
->btt_disk
);
1307 blk_cleanup_queue(btt
->btt_queue
);
1311 * btt_init - initialize a block translation table for the given device
1312 * @nd_btt: device with BTT geometry and backing device info
1313 * @rawsize: raw size in bytes of the backing device
1314 * @lbasize: lba size of the backing device
1315 * @uuid: A uuid for the backing device - this is stored on media
1316 * @maxlane: maximum number of parallel requests the device can handle
1318 * Initialize a Block Translation Table on a backing device to provide
1319 * single sector power fail atomicity.
1325 * Pointer to a new struct btt on success, NULL on failure.
1327 static struct btt
*btt_init(struct nd_btt
*nd_btt
, unsigned long long rawsize
,
1328 u32 lbasize
, u8
*uuid
, struct nd_region
*nd_region
)
1332 struct device
*dev
= &nd_btt
->dev
;
1334 btt
= kzalloc(sizeof(struct btt
), GFP_KERNEL
);
1338 btt
->nd_btt
= nd_btt
;
1339 btt
->rawsize
= rawsize
;
1340 btt
->lbasize
= lbasize
;
1341 btt
->sector_size
= ((lbasize
>= 4096) ? 4096 : 512);
1342 INIT_LIST_HEAD(&btt
->arena_list
);
1343 mutex_init(&btt
->init_lock
);
1344 btt
->nd_region
= nd_region
;
1346 ret
= discover_arenas(btt
);
1348 dev_err(dev
, "init: error in arena_discover: %d\n", ret
);
1352 if (btt
->init_state
!= INIT_READY
&& nd_region
->ro
) {
1353 dev_info(dev
, "%s is read-only, unable to init btt metadata\n",
1354 dev_name(&nd_region
->dev
));
1356 } else if (btt
->init_state
!= INIT_READY
) {
1357 btt
->num_arenas
= (rawsize
/ ARENA_MAX_SIZE
) +
1358 ((rawsize
% ARENA_MAX_SIZE
) ? 1 : 0);
1359 dev_dbg(dev
, "init: %d arenas for %llu rawsize\n",
1360 btt
->num_arenas
, rawsize
);
1362 ret
= create_arenas(btt
);
1364 dev_info(dev
, "init: create_arenas: %d\n", ret
);
1368 ret
= btt_meta_init(btt
);
1370 dev_err(dev
, "init: error in meta_init: %d\n", ret
);
1375 ret
= btt_blk_init(btt
);
1377 dev_err(dev
, "init: error in blk_init: %d\n", ret
);
1381 btt_debugfs_init(btt
);
1391 * btt_fini - de-initialize a BTT
1392 * @btt: the BTT handle that was generated by btt_init
1394 * De-initialize a Block Translation Table on device removal
1399 static void btt_fini(struct btt
*btt
)
1402 btt_blk_cleanup(btt
);
1404 debugfs_remove_recursive(btt
->debugfs_dir
);
1409 int nvdimm_namespace_attach_btt(struct nd_namespace_common
*ndns
)
1411 struct nd_btt
*nd_btt
= to_nd_btt(ndns
->claim
);
1412 struct nd_region
*nd_region
;
1416 if (!nd_btt
->uuid
|| !nd_btt
->ndns
|| !nd_btt
->lbasize
)
1419 rawsize
= nvdimm_namespace_capacity(ndns
) - SZ_4K
;
1420 if (rawsize
< ARENA_MIN_SIZE
) {
1423 nd_region
= to_nd_region(nd_btt
->dev
.parent
);
1424 btt
= btt_init(nd_btt
, rawsize
, nd_btt
->lbasize
, nd_btt
->uuid
,
1432 EXPORT_SYMBOL(nvdimm_namespace_attach_btt
);
1434 int nvdimm_namespace_detach_btt(struct nd_namespace_common
*ndns
)
1436 struct nd_btt
*nd_btt
= to_nd_btt(ndns
->claim
);
1437 struct btt
*btt
= nd_btt
->btt
;
1444 EXPORT_SYMBOL(nvdimm_namespace_detach_btt
);
1446 static int __init
nd_btt_init(void)
1450 BUILD_BUG_ON(sizeof(struct btt_sb
) != SZ_4K
);
1452 btt_major
= register_blkdev(0, "btt");
1456 debugfs_root
= debugfs_create_dir("btt", NULL
);
1457 if (IS_ERR_OR_NULL(debugfs_root
)) {
1465 unregister_blkdev(btt_major
, "btt");
1470 static void __exit
nd_btt_exit(void)
1472 debugfs_remove_recursive(debugfs_root
);
1473 unregister_blkdev(btt_major
, "btt");
1476 MODULE_ALIAS_ND_DEVICE(ND_DEVICE_BTT
);
1477 MODULE_AUTHOR("Vishal Verma <vishal.l.verma@linux.intel.com>");
1478 MODULE_LICENSE("GPL v2");
1479 module_init(nd_btt_init
);
1480 module_exit(nd_btt_exit
);