| 1 | #include <linux/err.h> |
| 2 | #include <linux/slab.h> |
| 3 | #include <linux/spinlock.h> |
| 4 | #include <linux/hardirq.h> |
| 5 | #include "ctree.h" |
| 6 | #include "extent_map.h" |
| 7 | |
| 8 | |
| 9 | static struct kmem_cache *extent_map_cache; |
| 10 | |
| 11 | int __init extent_map_init(void) |
| 12 | { |
| 13 | extent_map_cache = kmem_cache_create("btrfs_extent_map", |
| 14 | sizeof(struct extent_map), 0, |
| 15 | SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL); |
| 16 | if (!extent_map_cache) |
| 17 | return -ENOMEM; |
| 18 | return 0; |
| 19 | } |
| 20 | |
| 21 | void extent_map_exit(void) |
| 22 | { |
| 23 | if (extent_map_cache) |
| 24 | kmem_cache_destroy(extent_map_cache); |
| 25 | } |
| 26 | |
| 27 | /** |
| 28 | * extent_map_tree_init - initialize extent map tree |
| 29 | * @tree: tree to initialize |
| 30 | * |
| 31 | * Initialize the extent tree @tree. Should be called for each new inode |
| 32 | * or other user of the extent_map interface. |
| 33 | */ |
| 34 | void extent_map_tree_init(struct extent_map_tree *tree) |
| 35 | { |
| 36 | tree->map = RB_ROOT; |
| 37 | INIT_LIST_HEAD(&tree->modified_extents); |
| 38 | rwlock_init(&tree->lock); |
| 39 | } |
| 40 | |
| 41 | /** |
| 42 | * alloc_extent_map - allocate new extent map structure |
| 43 | * |
| 44 | * Allocate a new extent_map structure. The new structure is |
| 45 | * returned with a reference count of one and needs to be |
| 46 | * freed using free_extent_map() |
| 47 | */ |
| 48 | struct extent_map *alloc_extent_map(void) |
| 49 | { |
| 50 | struct extent_map *em; |
| 51 | em = kmem_cache_zalloc(extent_map_cache, GFP_NOFS); |
| 52 | if (!em) |
| 53 | return NULL; |
| 54 | RB_CLEAR_NODE(&em->rb_node); |
| 55 | em->flags = 0; |
| 56 | em->compress_type = BTRFS_COMPRESS_NONE; |
| 57 | em->generation = 0; |
| 58 | atomic_set(&em->refs, 1); |
| 59 | INIT_LIST_HEAD(&em->list); |
| 60 | return em; |
| 61 | } |
| 62 | |
| 63 | /** |
| 64 | * free_extent_map - drop reference count of an extent_map |
| 65 | * @em: extent map beeing releasead |
| 66 | * |
| 67 | * Drops the reference out on @em by one and free the structure |
| 68 | * if the reference count hits zero. |
| 69 | */ |
| 70 | void free_extent_map(struct extent_map *em) |
| 71 | { |
| 72 | if (!em) |
| 73 | return; |
| 74 | WARN_ON(atomic_read(&em->refs) == 0); |
| 75 | if (atomic_dec_and_test(&em->refs)) { |
| 76 | WARN_ON(extent_map_in_tree(em)); |
| 77 | WARN_ON(!list_empty(&em->list)); |
| 78 | kmem_cache_free(extent_map_cache, em); |
| 79 | } |
| 80 | } |
| 81 | |
| 82 | /* simple helper to do math around the end of an extent, handling wrap */ |
| 83 | static u64 range_end(u64 start, u64 len) |
| 84 | { |
| 85 | if (start + len < start) |
| 86 | return (u64)-1; |
| 87 | return start + len; |
| 88 | } |
| 89 | |
| 90 | static int tree_insert(struct rb_root *root, struct extent_map *em) |
| 91 | { |
| 92 | struct rb_node **p = &root->rb_node; |
| 93 | struct rb_node *parent = NULL; |
| 94 | struct extent_map *entry = NULL; |
| 95 | struct rb_node *orig_parent = NULL; |
| 96 | u64 end = range_end(em->start, em->len); |
| 97 | |
| 98 | while (*p) { |
| 99 | parent = *p; |
| 100 | entry = rb_entry(parent, struct extent_map, rb_node); |
| 101 | |
| 102 | if (em->start < entry->start) |
| 103 | p = &(*p)->rb_left; |
| 104 | else if (em->start >= extent_map_end(entry)) |
| 105 | p = &(*p)->rb_right; |
| 106 | else |
| 107 | return -EEXIST; |
| 108 | } |
| 109 | |
| 110 | orig_parent = parent; |
| 111 | while (parent && em->start >= extent_map_end(entry)) { |
| 112 | parent = rb_next(parent); |
| 113 | entry = rb_entry(parent, struct extent_map, rb_node); |
| 114 | } |
| 115 | if (parent) |
| 116 | if (end > entry->start && em->start < extent_map_end(entry)) |
| 117 | return -EEXIST; |
| 118 | |
| 119 | parent = orig_parent; |
| 120 | entry = rb_entry(parent, struct extent_map, rb_node); |
| 121 | while (parent && em->start < entry->start) { |
| 122 | parent = rb_prev(parent); |
| 123 | entry = rb_entry(parent, struct extent_map, rb_node); |
| 124 | } |
| 125 | if (parent) |
| 126 | if (end > entry->start && em->start < extent_map_end(entry)) |
| 127 | return -EEXIST; |
| 128 | |
| 129 | rb_link_node(&em->rb_node, orig_parent, p); |
| 130 | rb_insert_color(&em->rb_node, root); |
| 131 | return 0; |
| 132 | } |
| 133 | |
| 134 | /* |
| 135 | * search through the tree for an extent_map with a given offset. If |
| 136 | * it can't be found, try to find some neighboring extents |
| 137 | */ |
| 138 | static struct rb_node *__tree_search(struct rb_root *root, u64 offset, |
| 139 | struct rb_node **prev_ret, |
| 140 | struct rb_node **next_ret) |
| 141 | { |
| 142 | struct rb_node *n = root->rb_node; |
| 143 | struct rb_node *prev = NULL; |
| 144 | struct rb_node *orig_prev = NULL; |
| 145 | struct extent_map *entry; |
| 146 | struct extent_map *prev_entry = NULL; |
| 147 | |
| 148 | while (n) { |
| 149 | entry = rb_entry(n, struct extent_map, rb_node); |
| 150 | prev = n; |
| 151 | prev_entry = entry; |
| 152 | |
| 153 | if (offset < entry->start) |
| 154 | n = n->rb_left; |
| 155 | else if (offset >= extent_map_end(entry)) |
| 156 | n = n->rb_right; |
| 157 | else |
| 158 | return n; |
| 159 | } |
| 160 | |
| 161 | if (prev_ret) { |
| 162 | orig_prev = prev; |
| 163 | while (prev && offset >= extent_map_end(prev_entry)) { |
| 164 | prev = rb_next(prev); |
| 165 | prev_entry = rb_entry(prev, struct extent_map, rb_node); |
| 166 | } |
| 167 | *prev_ret = prev; |
| 168 | prev = orig_prev; |
| 169 | } |
| 170 | |
| 171 | if (next_ret) { |
| 172 | prev_entry = rb_entry(prev, struct extent_map, rb_node); |
| 173 | while (prev && offset < prev_entry->start) { |
| 174 | prev = rb_prev(prev); |
| 175 | prev_entry = rb_entry(prev, struct extent_map, rb_node); |
| 176 | } |
| 177 | *next_ret = prev; |
| 178 | } |
| 179 | return NULL; |
| 180 | } |
| 181 | |
| 182 | /* check to see if two extent_map structs are adjacent and safe to merge */ |
| 183 | static int mergable_maps(struct extent_map *prev, struct extent_map *next) |
| 184 | { |
| 185 | if (test_bit(EXTENT_FLAG_PINNED, &prev->flags)) |
| 186 | return 0; |
| 187 | |
| 188 | /* |
| 189 | * don't merge compressed extents, we need to know their |
| 190 | * actual size |
| 191 | */ |
| 192 | if (test_bit(EXTENT_FLAG_COMPRESSED, &prev->flags)) |
| 193 | return 0; |
| 194 | |
| 195 | if (test_bit(EXTENT_FLAG_LOGGING, &prev->flags) || |
| 196 | test_bit(EXTENT_FLAG_LOGGING, &next->flags)) |
| 197 | return 0; |
| 198 | |
| 199 | /* |
| 200 | * We don't want to merge stuff that hasn't been written to the log yet |
| 201 | * since it may not reflect exactly what is on disk, and that would be |
| 202 | * bad. |
| 203 | */ |
| 204 | if (!list_empty(&prev->list) || !list_empty(&next->list)) |
| 205 | return 0; |
| 206 | |
| 207 | if (extent_map_end(prev) == next->start && |
| 208 | prev->flags == next->flags && |
| 209 | prev->bdev == next->bdev && |
| 210 | ((next->block_start == EXTENT_MAP_HOLE && |
| 211 | prev->block_start == EXTENT_MAP_HOLE) || |
| 212 | (next->block_start == EXTENT_MAP_INLINE && |
| 213 | prev->block_start == EXTENT_MAP_INLINE) || |
| 214 | (next->block_start == EXTENT_MAP_DELALLOC && |
| 215 | prev->block_start == EXTENT_MAP_DELALLOC) || |
| 216 | (next->block_start < EXTENT_MAP_LAST_BYTE - 1 && |
| 217 | next->block_start == extent_map_block_end(prev)))) { |
| 218 | return 1; |
| 219 | } |
| 220 | return 0; |
| 221 | } |
| 222 | |
| 223 | static void try_merge_map(struct extent_map_tree *tree, struct extent_map *em) |
| 224 | { |
| 225 | struct extent_map *merge = NULL; |
| 226 | struct rb_node *rb; |
| 227 | |
| 228 | if (em->start != 0) { |
| 229 | rb = rb_prev(&em->rb_node); |
| 230 | if (rb) |
| 231 | merge = rb_entry(rb, struct extent_map, rb_node); |
| 232 | if (rb && mergable_maps(merge, em)) { |
| 233 | em->start = merge->start; |
| 234 | em->orig_start = merge->orig_start; |
| 235 | em->len += merge->len; |
| 236 | em->block_len += merge->block_len; |
| 237 | em->block_start = merge->block_start; |
| 238 | em->mod_len = (em->mod_len + em->mod_start) - merge->mod_start; |
| 239 | em->mod_start = merge->mod_start; |
| 240 | em->generation = max(em->generation, merge->generation); |
| 241 | |
| 242 | rb_erase(&merge->rb_node, &tree->map); |
| 243 | RB_CLEAR_NODE(&merge->rb_node); |
| 244 | free_extent_map(merge); |
| 245 | } |
| 246 | } |
| 247 | |
| 248 | rb = rb_next(&em->rb_node); |
| 249 | if (rb) |
| 250 | merge = rb_entry(rb, struct extent_map, rb_node); |
| 251 | if (rb && mergable_maps(em, merge)) { |
| 252 | em->len += merge->len; |
| 253 | em->block_len += merge->block_len; |
| 254 | rb_erase(&merge->rb_node, &tree->map); |
| 255 | RB_CLEAR_NODE(&merge->rb_node); |
| 256 | em->mod_len = (merge->mod_start + merge->mod_len) - em->mod_start; |
| 257 | em->generation = max(em->generation, merge->generation); |
| 258 | free_extent_map(merge); |
| 259 | } |
| 260 | } |
| 261 | |
| 262 | /** |
| 263 | * unpin_extent_cache - unpin an extent from the cache |
| 264 | * @tree: tree to unpin the extent in |
| 265 | * @start: logical offset in the file |
| 266 | * @len: length of the extent |
| 267 | * @gen: generation that this extent has been modified in |
| 268 | * |
| 269 | * Called after an extent has been written to disk properly. Set the generation |
| 270 | * to the generation that actually added the file item to the inode so we know |
| 271 | * we need to sync this extent when we call fsync(). |
| 272 | */ |
| 273 | int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len, |
| 274 | u64 gen) |
| 275 | { |
| 276 | int ret = 0; |
| 277 | struct extent_map *em; |
| 278 | bool prealloc = false; |
| 279 | |
| 280 | write_lock(&tree->lock); |
| 281 | em = lookup_extent_mapping(tree, start, len); |
| 282 | |
| 283 | WARN_ON(!em || em->start != start); |
| 284 | |
| 285 | if (!em) |
| 286 | goto out; |
| 287 | |
| 288 | if (!test_bit(EXTENT_FLAG_LOGGING, &em->flags)) |
| 289 | list_move(&em->list, &tree->modified_extents); |
| 290 | em->generation = gen; |
| 291 | clear_bit(EXTENT_FLAG_PINNED, &em->flags); |
| 292 | em->mod_start = em->start; |
| 293 | em->mod_len = em->len; |
| 294 | |
| 295 | if (test_bit(EXTENT_FLAG_FILLING, &em->flags)) { |
| 296 | prealloc = true; |
| 297 | clear_bit(EXTENT_FLAG_FILLING, &em->flags); |
| 298 | } |
| 299 | |
| 300 | try_merge_map(tree, em); |
| 301 | |
| 302 | if (prealloc) { |
| 303 | em->mod_start = em->start; |
| 304 | em->mod_len = em->len; |
| 305 | } |
| 306 | |
| 307 | free_extent_map(em); |
| 308 | out: |
| 309 | write_unlock(&tree->lock); |
| 310 | return ret; |
| 311 | |
| 312 | } |
| 313 | |
| 314 | void clear_em_logging(struct extent_map_tree *tree, struct extent_map *em) |
| 315 | { |
| 316 | clear_bit(EXTENT_FLAG_LOGGING, &em->flags); |
| 317 | if (extent_map_in_tree(em)) |
| 318 | try_merge_map(tree, em); |
| 319 | } |
| 320 | |
| 321 | static inline void setup_extent_mapping(struct extent_map_tree *tree, |
| 322 | struct extent_map *em, |
| 323 | int modified) |
| 324 | { |
| 325 | atomic_inc(&em->refs); |
| 326 | em->mod_start = em->start; |
| 327 | em->mod_len = em->len; |
| 328 | |
| 329 | if (modified) |
| 330 | list_move(&em->list, &tree->modified_extents); |
| 331 | else |
| 332 | try_merge_map(tree, em); |
| 333 | } |
| 334 | |
| 335 | /** |
| 336 | * add_extent_mapping - add new extent map to the extent tree |
| 337 | * @tree: tree to insert new map in |
| 338 | * @em: map to insert |
| 339 | * |
| 340 | * Insert @em into @tree or perform a simple forward/backward merge with |
| 341 | * existing mappings. The extent_map struct passed in will be inserted |
| 342 | * into the tree directly, with an additional reference taken, or a |
| 343 | * reference dropped if the merge attempt was successful. |
| 344 | */ |
| 345 | int add_extent_mapping(struct extent_map_tree *tree, |
| 346 | struct extent_map *em, int modified) |
| 347 | { |
| 348 | int ret = 0; |
| 349 | |
| 350 | ret = tree_insert(&tree->map, em); |
| 351 | if (ret) |
| 352 | goto out; |
| 353 | |
| 354 | setup_extent_mapping(tree, em, modified); |
| 355 | out: |
| 356 | return ret; |
| 357 | } |
| 358 | |
| 359 | static struct extent_map * |
| 360 | __lookup_extent_mapping(struct extent_map_tree *tree, |
| 361 | u64 start, u64 len, int strict) |
| 362 | { |
| 363 | struct extent_map *em; |
| 364 | struct rb_node *rb_node; |
| 365 | struct rb_node *prev = NULL; |
| 366 | struct rb_node *next = NULL; |
| 367 | u64 end = range_end(start, len); |
| 368 | |
| 369 | rb_node = __tree_search(&tree->map, start, &prev, &next); |
| 370 | if (!rb_node) { |
| 371 | if (prev) |
| 372 | rb_node = prev; |
| 373 | else if (next) |
| 374 | rb_node = next; |
| 375 | else |
| 376 | return NULL; |
| 377 | } |
| 378 | |
| 379 | em = rb_entry(rb_node, struct extent_map, rb_node); |
| 380 | |
| 381 | if (strict && !(end > em->start && start < extent_map_end(em))) |
| 382 | return NULL; |
| 383 | |
| 384 | atomic_inc(&em->refs); |
| 385 | return em; |
| 386 | } |
| 387 | |
| 388 | /** |
| 389 | * lookup_extent_mapping - lookup extent_map |
| 390 | * @tree: tree to lookup in |
| 391 | * @start: byte offset to start the search |
| 392 | * @len: length of the lookup range |
| 393 | * |
| 394 | * Find and return the first extent_map struct in @tree that intersects the |
| 395 | * [start, len] range. There may be additional objects in the tree that |
| 396 | * intersect, so check the object returned carefully to make sure that no |
| 397 | * additional lookups are needed. |
| 398 | */ |
| 399 | struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree, |
| 400 | u64 start, u64 len) |
| 401 | { |
| 402 | return __lookup_extent_mapping(tree, start, len, 1); |
| 403 | } |
| 404 | |
| 405 | /** |
| 406 | * search_extent_mapping - find a nearby extent map |
| 407 | * @tree: tree to lookup in |
| 408 | * @start: byte offset to start the search |
| 409 | * @len: length of the lookup range |
| 410 | * |
| 411 | * Find and return the first extent_map struct in @tree that intersects the |
| 412 | * [start, len] range. |
| 413 | * |
| 414 | * If one can't be found, any nearby extent may be returned |
| 415 | */ |
| 416 | struct extent_map *search_extent_mapping(struct extent_map_tree *tree, |
| 417 | u64 start, u64 len) |
| 418 | { |
| 419 | return __lookup_extent_mapping(tree, start, len, 0); |
| 420 | } |
| 421 | |
| 422 | /** |
| 423 | * remove_extent_mapping - removes an extent_map from the extent tree |
| 424 | * @tree: extent tree to remove from |
| 425 | * @em: extent map beeing removed |
| 426 | * |
| 427 | * Removes @em from @tree. No reference counts are dropped, and no checks |
| 428 | * are done to see if the range is in use |
| 429 | */ |
| 430 | int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em) |
| 431 | { |
| 432 | int ret = 0; |
| 433 | |
| 434 | WARN_ON(test_bit(EXTENT_FLAG_PINNED, &em->flags)); |
| 435 | rb_erase(&em->rb_node, &tree->map); |
| 436 | if (!test_bit(EXTENT_FLAG_LOGGING, &em->flags)) |
| 437 | list_del_init(&em->list); |
| 438 | RB_CLEAR_NODE(&em->rb_node); |
| 439 | return ret; |
| 440 | } |
| 441 | |
| 442 | void replace_extent_mapping(struct extent_map_tree *tree, |
| 443 | struct extent_map *cur, |
| 444 | struct extent_map *new, |
| 445 | int modified) |
| 446 | { |
| 447 | WARN_ON(test_bit(EXTENT_FLAG_PINNED, &cur->flags)); |
| 448 | ASSERT(extent_map_in_tree(cur)); |
| 449 | if (!test_bit(EXTENT_FLAG_LOGGING, &cur->flags)) |
| 450 | list_del_init(&cur->list); |
| 451 | rb_replace_node(&cur->rb_node, &new->rb_node, &tree->map); |
| 452 | RB_CLEAR_NODE(&cur->rb_node); |
| 453 | |
| 454 | setup_extent_mapping(tree, new, modified); |
| 455 | } |