zram: revive swap_slot_free_notify
[deliverable/linux.git] / mm / zsmalloc.c
index e7414cec220b3bce6cd5abbdd6a769f1f2c334a5..e72efb109fde5e5dc23007fa302a21cba7d103c5 100644 (file)
@@ -281,7 +281,6 @@ struct mapping_area {
 #endif
        char *vm_addr; /* address of kmap_atomic()'ed pages */
        enum zs_mapmode vm_mm; /* mapping mode */
-       bool huge;
 };
 
 static int create_handle_cache(struct zs_pool *pool)
@@ -309,7 +308,12 @@ static void free_handle(struct zs_pool *pool, unsigned long handle)
 
 static void record_obj(unsigned long handle, unsigned long obj)
 {
-       *(unsigned long *)handle = obj;
+       /*
+        * lsb of @obj represents handle lock while other bits
+        * represent object value the handle is pointing so
+        * updating shouldn't do store tearing.
+        */
+       WRITE_ONCE(*(unsigned long *)handle, obj);
 }
 
 /* zpool driver */
@@ -490,6 +494,8 @@ static void __exit zs_stat_exit(void)
        debugfs_remove_recursive(zs_stat_root);
 }
 
+static unsigned long zs_can_compact(struct size_class *class);
+
 static int zs_stats_size_show(struct seq_file *s, void *v)
 {
        int i;
@@ -497,14 +503,15 @@ static int zs_stats_size_show(struct seq_file *s, void *v)
        struct size_class *class;
        int objs_per_zspage;
        unsigned long class_almost_full, class_almost_empty;
-       unsigned long obj_allocated, obj_used, pages_used;
+       unsigned long obj_allocated, obj_used, pages_used, freeable;
        unsigned long total_class_almost_full = 0, total_class_almost_empty = 0;
        unsigned long total_objs = 0, total_used_objs = 0, total_pages = 0;
+       unsigned long total_freeable = 0;
 
-       seq_printf(s, " %5s %5s %11s %12s %13s %10s %10s %16s\n",
+       seq_printf(s, " %5s %5s %11s %12s %13s %10s %10s %16s %8s\n",
                        "class", "size", "almost_full", "almost_empty",
                        "obj_allocated", "obj_used", "pages_used",
-                       "pages_per_zspage");
+                       "pages_per_zspage", "freeable");
 
        for (i = 0; i < zs_size_classes; i++) {
                class = pool->size_class[i];
@@ -517,6 +524,7 @@ static int zs_stats_size_show(struct seq_file *s, void *v)
                class_almost_empty = zs_stat_get(class, CLASS_ALMOST_EMPTY);
                obj_allocated = zs_stat_get(class, OBJ_ALLOCATED);
                obj_used = zs_stat_get(class, OBJ_USED);
+               freeable = zs_can_compact(class);
                spin_unlock(&class->lock);
 
                objs_per_zspage = get_maxobj_per_zspage(class->size,
@@ -524,23 +532,25 @@ static int zs_stats_size_show(struct seq_file *s, void *v)
                pages_used = obj_allocated / objs_per_zspage *
                                class->pages_per_zspage;
 
-               seq_printf(s, " %5u %5u %11lu %12lu %13lu %10lu %10lu %16d\n",
+               seq_printf(s, " %5u %5u %11lu %12lu %13lu"
+                               " %10lu %10lu %16d %8lu\n",
                        i, class->size, class_almost_full, class_almost_empty,
                        obj_allocated, obj_used, pages_used,
-                       class->pages_per_zspage);
+                       class->pages_per_zspage, freeable);
 
                total_class_almost_full += class_almost_full;
                total_class_almost_empty += class_almost_empty;
                total_objs += obj_allocated;
                total_used_objs += obj_used;
                total_pages += pages_used;
+               total_freeable += freeable;
        }
 
        seq_puts(s, "\n");
-       seq_printf(s, " %5s %5s %11lu %12lu %13lu %10lu %10lu\n",
+       seq_printf(s, " %5s %5s %11lu %12lu %13lu %10lu %10lu %16s %8lu\n",
                        "Total", "", total_class_almost_full,
                        total_class_almost_empty, total_objs,
-                       total_used_objs, total_pages);
+                       total_used_objs, total_pages, "", total_freeable);
 
        return 0;
 }
@@ -1122,11 +1132,9 @@ static void __zs_unmap_object(struct mapping_area *area,
                goto out;
 
        buf = area->vm_buf;
-       if (!area->huge) {
-               buf = buf + ZS_HANDLE_SIZE;
-               size -= ZS_HANDLE_SIZE;
-               off += ZS_HANDLE_SIZE;
-       }
+       buf = buf + ZS_HANDLE_SIZE;
+       size -= ZS_HANDLE_SIZE;
+       off += ZS_HANDLE_SIZE;
 
        sizes[0] = PAGE_SIZE - off;
        sizes[1] = size - sizes[0];
@@ -1635,6 +1643,13 @@ static int migrate_zspage(struct zs_pool *pool, struct size_class *class,
                free_obj = obj_malloc(d_page, class, handle);
                zs_object_copy(free_obj, used_obj, class);
                index++;
+               /*
+                * record_obj updates handle's value to free_obj and it will
+                * invalidate lock bit(ie, HANDLE_PIN_BIT) of handle, which
+                * breaks synchronization using pin_tag(e,g, zs_free) so
+                * let's keep the lock bit.
+                */
+               free_obj |= BIT(HANDLE_PIN_BIT);
                record_obj(handle, free_obj);
                unpin_tag(handle);
                obj_free(pool, class, used_obj);
This page took 0.027245 seconds and 5 git commands to generate.