f2fs: add a slab cache entry for small discards
[deliverable/linux.git] / fs / f2fs / segment.c
index fa284d397199faed53f1c8ffb4de611440ad2e73..823526ec5243dc26304363349d5ed3c875b0a800 100644 (file)
 #include "node.h"
 #include <trace/events/f2fs.h>
 
+#define __reverse_ffz(x) __reverse_ffs(~(x))
+
+static struct kmem_cache *discard_entry_slab;
+
+/*
+ * __reverse_ffs is copied from include/asm-generic/bitops/__ffs.h since
+ * MSB and LSB are reversed in a byte by f2fs_set_bit.
+ */
+static inline unsigned long __reverse_ffs(unsigned long word)
+{
+       int num = 0;
+
+#if BITS_PER_LONG == 64
+       if ((word & 0xffffffff) == 0) {
+               num += 32;
+               word >>= 32;
+       }
+#endif
+       if ((word & 0xffff) == 0) {
+               num += 16;
+               word >>= 16;
+       }
+       if ((word & 0xff) == 0) {
+               num += 8;
+               word >>= 8;
+       }
+       if ((word & 0xf0) == 0)
+               num += 4;
+       else
+               word >>= 4;
+       if ((word & 0xc) == 0)
+               num += 2;
+       else
+               word >>= 2;
+       if ((word & 0x2) == 0)
+               num += 1;
+       return num;
+}
+
+/*
+ * __find_rev_next(_zero)_bit is copied from lib/find_next_bit.c becasue
+ * f2fs_set_bit makes MSB and LSB reversed in a byte.
+ * Example:
+ *                             LSB <--> MSB
+ *   f2fs_set_bit(0, bitmap) => 0000 0001
+ *   f2fs_set_bit(7, bitmap) => 1000 0000
+ */
+static unsigned long __find_rev_next_bit(const unsigned long *addr,
+                       unsigned long size, unsigned long offset)
+{
+       const unsigned long *p = addr + BIT_WORD(offset);
+       unsigned long result = offset & ~(BITS_PER_LONG - 1);
+       unsigned long tmp;
+       unsigned long mask, submask;
+       unsigned long quot, rest;
+
+       if (offset >= size)
+               return size;
+
+       size -= result;
+       offset %= BITS_PER_LONG;
+       if (!offset)
+               goto aligned;
+
+       tmp = *(p++);
+       quot = (offset >> 3) << 3;
+       rest = offset & 0x7;
+       mask = ~0UL << quot;
+       submask = (unsigned char)(0xff << rest) >> rest;
+       submask <<= quot;
+       mask &= submask;
+       tmp &= mask;
+       if (size < BITS_PER_LONG)
+               goto found_first;
+       if (tmp)
+               goto found_middle;
+
+       size -= BITS_PER_LONG;
+       result += BITS_PER_LONG;
+aligned:
+       while (size & ~(BITS_PER_LONG-1)) {
+               tmp = *(p++);
+               if (tmp)
+                       goto found_middle;
+               result += BITS_PER_LONG;
+               size -= BITS_PER_LONG;
+       }
+       if (!size)
+               return result;
+       tmp = *p;
+found_first:
+       tmp &= (~0UL >> (BITS_PER_LONG - size));
+       if (tmp == 0UL)         /* Are any bits set? */
+               return result + size;   /* Nope. */
+found_middle:
+       return result + __reverse_ffs(tmp);
+}
+
+static unsigned long __find_rev_next_zero_bit(const unsigned long *addr,
+                       unsigned long size, unsigned long offset)
+{
+       const unsigned long *p = addr + BIT_WORD(offset);
+       unsigned long result = offset & ~(BITS_PER_LONG - 1);
+       unsigned long tmp;
+       unsigned long mask, submask;
+       unsigned long quot, rest;
+
+       if (offset >= size)
+               return size;
+
+       size -= result;
+       offset %= BITS_PER_LONG;
+       if (!offset)
+               goto aligned;
+
+       tmp = *(p++);
+       quot = (offset >> 3) << 3;
+       rest = offset & 0x7;
+       mask = ~(~0UL << quot);
+       submask = (unsigned char)~((unsigned char)(0xff << rest) >> rest);
+       submask <<= quot;
+       mask += submask;
+       tmp |= mask;
+       if (size < BITS_PER_LONG)
+               goto found_first;
+       if (~tmp)
+               goto found_middle;
+
+       size -= BITS_PER_LONG;
+       result += BITS_PER_LONG;
+aligned:
+       while (size & ~(BITS_PER_LONG - 1)) {
+               tmp = *(p++);
+               if (~tmp)
+                       goto found_middle;
+               result += BITS_PER_LONG;
+               size -= BITS_PER_LONG;
+       }
+       if (!size)
+               return result;
+       tmp = *p;
+
+found_first:
+       tmp |= ~0UL << size;
+       if (tmp == ~0UL)        /* Are any bits zero? */
+               return result + size;   /* Nope. */
+found_middle:
+       return result + __reverse_ffz(tmp);
+}
+
 /*
  * This function balances dirty node and dentry pages.
  * In addition, it controls garbage collection.
@@ -459,13 +609,18 @@ static void __next_free_blkoff(struct f2fs_sb_info *sbi,
                        struct curseg_info *seg, block_t start)
 {
        struct seg_entry *se = get_seg_entry(sbi, seg->segno);
-       block_t ofs;
-       for (ofs = start; ofs < sbi->blocks_per_seg; ofs++) {
-               if (!f2fs_test_bit(ofs, se->ckpt_valid_map)
-                       && !f2fs_test_bit(ofs, se->cur_valid_map))
-                       break;
-       }
-       seg->next_blkoff = ofs;
+       int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long);
+       unsigned long target_map[entries];
+       unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map;
+       unsigned long *cur_map = (unsigned long *)se->cur_valid_map;
+       int i, pos;
+
+       for (i = 0; i < entries; i++)
+               target_map[i] = ckpt_map[i] | cur_map[i];
+
+       pos = __find_rev_next_zero_bit(target_map, sbi->blocks_per_seg, start);
+
+       seg->next_blkoff = pos;
 }
 
 /*
@@ -1645,6 +1800,10 @@ int build_segment_manager(struct f2fs_sb_info *sbi)
        sm_info->ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
        sm_info->rec_prefree_segments = DEF_RECLAIM_PREFREE_SEGMENTS;
 
+       INIT_LIST_HEAD(&sm_info->discard_list);
+       sm_info->nr_discards = 0;
+       sm_info->max_discards = 0;
+
        err = build_sit_info(sbi);
        if (err)
                return err;
@@ -1760,3 +1919,17 @@ void destroy_segment_manager(struct f2fs_sb_info *sbi)
        sbi->sm_info = NULL;
        kfree(sm_info);
 }
+
+int __init create_segment_manager_caches(void)
+{
+       discard_entry_slab = f2fs_kmem_cache_create("discard_entry",
+                       sizeof(struct discard_entry), NULL);
+       if (!discard_entry_slab)
+               return -ENOMEM;
+       return 0;
+}
+
+void destroy_segment_manager_caches(void)
+{
+       kmem_cache_destroy(discard_entry_slab);
+}
This page took 0.101995 seconds and 5 git commands to generate.