net/mlx5e: Reorganize ethtool statistics
[deliverable/linux.git] / drivers / md / bitmap.c
index 3fe86b54d50be37e88f83504cff5de90820b5fb7..d8129ec93ebdebea943aac2b43147a786f445055 100644 (file)
@@ -46,7 +46,7 @@ static inline char *bmname(struct bitmap *bitmap)
  * allocated while we're using it
  */
 static int bitmap_checkpage(struct bitmap_counts *bitmap,
-                           unsigned long page, int create)
+                           unsigned long page, int create, int no_hijack)
 __releases(bitmap->lock)
 __acquires(bitmap->lock)
 {
@@ -90,6 +90,9 @@ __acquires(bitmap->lock)
 
        if (mappage == NULL) {
                pr_debug("md/bitmap: map page allocation failed, hijacking\n");
+               /* We don't support hijack for cluster raid */
+               if (no_hijack)
+                       return -ENOMEM;
                /* failed - set the hijacked flag so that we can use the
                 * pointer as a counter */
                if (!bitmap->bp[page].map)
@@ -756,7 +759,7 @@ static int bitmap_storage_alloc(struct bitmap_storage *store,
                bytes += sizeof(bitmap_super_t);
 
        num_pages = DIV_ROUND_UP(bytes, PAGE_SIZE);
-       offset = slot_number * (num_pages - 1);
+       offset = slot_number * num_pages;
 
        store->filemap = kmalloc(sizeof(struct page *)
                                 * num_pages, GFP_KERNEL);
@@ -900,6 +903,11 @@ static void bitmap_file_set_bit(struct bitmap *bitmap, sector_t block)
        struct page *page;
        void *kaddr;
        unsigned long chunk = block >> bitmap->counts.chunkshift;
+       struct bitmap_storage *store = &bitmap->storage;
+       unsigned long node_offset = 0;
+
+       if (mddev_is_clustered(bitmap->mddev))
+               node_offset = bitmap->cluster_slot * store->file_pages;
 
        page = filemap_get_page(&bitmap->storage, chunk);
        if (!page)
@@ -915,7 +923,7 @@ static void bitmap_file_set_bit(struct bitmap *bitmap, sector_t block)
        kunmap_atomic(kaddr);
        pr_debug("set file bit %lu page %lu\n", bit, page->index);
        /* record page number so it gets flushed to disk when unplug occurs */
-       set_page_attr(bitmap, page->index, BITMAP_PAGE_DIRTY);
+       set_page_attr(bitmap, page->index - node_offset, BITMAP_PAGE_DIRTY);
 }
 
 static void bitmap_file_clear_bit(struct bitmap *bitmap, sector_t block)
@@ -924,6 +932,11 @@ static void bitmap_file_clear_bit(struct bitmap *bitmap, sector_t block)
        struct page *page;
        void *paddr;
        unsigned long chunk = block >> bitmap->counts.chunkshift;
+       struct bitmap_storage *store = &bitmap->storage;
+       unsigned long node_offset = 0;
+
+       if (mddev_is_clustered(bitmap->mddev))
+               node_offset = bitmap->cluster_slot * store->file_pages;
 
        page = filemap_get_page(&bitmap->storage, chunk);
        if (!page)
@@ -935,8 +948,8 @@ static void bitmap_file_clear_bit(struct bitmap *bitmap, sector_t block)
        else
                clear_bit_le(bit, paddr);
        kunmap_atomic(paddr);
-       if (!test_page_attr(bitmap, page->index, BITMAP_PAGE_NEEDWRITE)) {
-               set_page_attr(bitmap, page->index, BITMAP_PAGE_PENDING);
+       if (!test_page_attr(bitmap, page->index - node_offset, BITMAP_PAGE_NEEDWRITE)) {
+               set_page_attr(bitmap, page->index - node_offset, BITMAP_PAGE_PENDING);
                bitmap->allclean = 0;
        }
 }
@@ -1321,7 +1334,7 @@ __acquires(bitmap->lock)
        sector_t csize;
        int err;
 
-       err = bitmap_checkpage(bitmap, page, create);
+       err = bitmap_checkpage(bitmap, page, create, 0);
 
        if (bitmap->bp[page].hijacked ||
            bitmap->bp[page].map == NULL)
@@ -1594,6 +1607,27 @@ void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector, bool force)
 }
 EXPORT_SYMBOL(bitmap_cond_end_sync);
 
+void bitmap_sync_with_cluster(struct mddev *mddev,
+                             sector_t old_lo, sector_t old_hi,
+                             sector_t new_lo, sector_t new_hi)
+{
+       struct bitmap *bitmap = mddev->bitmap;
+       sector_t sector, blocks = 0;
+
+       for (sector = old_lo; sector < new_lo; ) {
+               bitmap_end_sync(bitmap, sector, &blocks, 0);
+               sector += blocks;
+       }
+       WARN((blocks > new_lo) && old_lo, "alignment is not correct for lo\n");
+
+       for (sector = old_hi; sector < new_hi; ) {
+               bitmap_start_sync(bitmap, sector, &blocks, 0);
+               sector += blocks;
+       }
+       WARN((blocks > new_hi) && old_hi, "alignment is not correct for hi\n");
+}
+EXPORT_SYMBOL(bitmap_sync_with_cluster);
+
 static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed)
 {
        /* For each chunk covered by any of these sectors, set the
@@ -1814,6 +1848,9 @@ int bitmap_load(struct mddev *mddev)
        if (!bitmap)
                goto out;
 
+       if (mddev_is_clustered(mddev))
+               md_cluster_ops->load_bitmaps(mddev, mddev->bitmap_info.nodes);
+
        /* Clear out old bitmap info first:  Either there is none, or we
         * are resuming after someone else has possibly changed things,
         * so we should forget old cached info.
@@ -1890,14 +1927,14 @@ int bitmap_copy_from_slot(struct mddev *mddev, int slot,
 
        if (clear_bits) {
                bitmap_update_sb(bitmap);
-               /* Setting this for the ev_page should be enough.
-                * And we do not require both write_all and PAGE_DIRT either
-                */
+               /* BITMAP_PAGE_PENDING is set, but bitmap_unplug needs
+                * BITMAP_PAGE_DIRTY or _NEEDWRITE to write ... */
                for (i = 0; i < bitmap->storage.file_pages; i++)
-                       set_page_attr(bitmap, i, BITMAP_PAGE_DIRTY);
-               bitmap_write_all(bitmap);
+                       if (test_page_attr(bitmap, i, BITMAP_PAGE_PENDING))
+                               set_page_attr(bitmap, i, BITMAP_PAGE_NEEDWRITE);
                bitmap_unplug(bitmap);
        }
+       bitmap_unplug(mddev->bitmap);
        *low = lo;
        *high = hi;
 err:
@@ -2032,6 +2069,35 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks,
                     chunks << chunkshift);
 
        spin_lock_irq(&bitmap->counts.lock);
+       /* For cluster raid, need to pre-allocate bitmap */
+       if (mddev_is_clustered(bitmap->mddev)) {
+               unsigned long page;
+               for (page = 0; page < pages; page++) {
+                       ret = bitmap_checkpage(&bitmap->counts, page, 1, 1);
+                       if (ret) {
+                               unsigned long k;
+
+                               /* deallocate the page memory */
+                               for (k = 0; k < page; k++) {
+                                       kfree(new_bp[k].map);
+                               }
+
+                               /* restore some fields from old_counts */
+                               bitmap->counts.bp = old_counts.bp;
+                               bitmap->counts.pages = old_counts.pages;
+                               bitmap->counts.missing_pages = old_counts.pages;
+                               bitmap->counts.chunkshift = old_counts.chunkshift;
+                               bitmap->counts.chunks = old_counts.chunks;
+                               bitmap->mddev->bitmap_info.chunksize = 1 << (old_counts.chunkshift +
+                                                                            BITMAP_BLOCK_SHIFT);
+                               blocks = old_counts.chunks << old_counts.chunkshift;
+                               pr_err("Could not pre-allocate in-memory bitmap for cluster raid\n");
+                               break;
+                       } else
+                               bitmap->counts.bp[page].count += 1;
+               }
+       }
+
        for (block = 0; block < blocks; ) {
                bitmap_counter_t *bmc_old, *bmc_new;
                int set;
This page took 0.04218 seconds and 5 git commands to generate.