mm/sl[aou]b: Extract a common function for kmem_cache_destroy
[deliverable/linux.git] / mm / slub.c
index 24aa362edef7607e33dca49f775767d8957b8caf..724adea343845e06e6c7afe8eb228ad97749ce3b 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -624,7 +624,7 @@ static void object_err(struct kmem_cache *s, struct page *page,
        print_trailer(s, page, object);
 }
 
-static void slab_err(struct kmem_cache *s, struct page *page, char *fmt, ...)
+static void slab_err(struct kmem_cache *s, struct page *page, const char *fmt, ...)
 {
        va_list args;
        char buf[100];
@@ -3146,7 +3146,7 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page,
                                     sizeof(long), GFP_ATOMIC);
        if (!map)
                return;
-       slab_err(s, page, "%s", text);
+       slab_err(s, page, text, s->name);
        slab_lock(page);
 
        get_map(s, page, map);
@@ -3178,7 +3178,7 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
                        discard_slab(s, page);
                } else {
                        list_slab_objects(s, page,
-                               "Objects remaining on kmem_cache_close()");
+                       "Objects remaining in %s on kmem_cache_close()");
                }
        }
 }
@@ -3191,7 +3191,6 @@ static inline int kmem_cache_close(struct kmem_cache *s)
        int node;
 
        flush_all(s);
-       free_percpu(s->cpu_slab);
        /* Attempt to free all objects */
        for_each_node_state(node, N_NORMAL_MEMORY) {
                struct kmem_cache_node *n = get_node(s, node);
@@ -3200,33 +3199,20 @@ static inline int kmem_cache_close(struct kmem_cache *s)
                if (n->nr_partial || slabs_node(s, node))
                        return 1;
        }
+       free_percpu(s->cpu_slab);
        free_kmem_cache_nodes(s);
        return 0;
 }
 
-/*
- * Close a cache and release the kmem_cache structure
- * (must be used for caches created using kmem_cache_create)
- */
-void kmem_cache_destroy(struct kmem_cache *s)
+int __kmem_cache_shutdown(struct kmem_cache *s)
 {
-       mutex_lock(&slab_mutex);
-       s->refcount--;
-       if (!s->refcount) {
-               list_del(&s->list);
-               mutex_unlock(&slab_mutex);
-               if (kmem_cache_close(s)) {
-                       printk(KERN_ERR "SLUB %s: %s called for cache that "
-                               "still has objects.\n", s->name, __func__);
-                       dump_stack();
-               }
-               if (s->flags & SLAB_DESTROY_BY_RCU)
-                       rcu_barrier();
-               sysfs_slab_remove(s);
-       } else
-               mutex_unlock(&slab_mutex);
+       return kmem_cache_close(s);
+}
+
+void __kmem_cache_destroy(struct kmem_cache *s)
+{
+       sysfs_slab_remove(s);
 }
-EXPORT_SYMBOL(kmem_cache_destroy);
 
 /********************************************************************
  *             Kmalloc subsystem
This page took 0.027013 seconds and 5 git commands to generate.