Recent change to use slab allocations earlier exposed a bug where
SLUB can call schedule_work and try to call sysfs before it is
safe to do so.
Reported-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Tested-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Nick Piggin <npiggin@suse.de>
Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
struct kmem_cache *s;
char *text;
size_t realsize;
struct kmem_cache *s;
char *text;
size_t realsize;
+ unsigned long slabflags;
s = kmalloc_caches_dma[index];
if (s)
s = kmalloc_caches_dma[index];
if (s)
(unsigned int)realsize);
s = kmalloc(kmem_size, flags & ~SLUB_DMA);
(unsigned int)realsize);
s = kmalloc(kmem_size, flags & ~SLUB_DMA);
+ /*
+ * Must defer sysfs creation to a workqueue because we don't know
+ * what context we are called from. Before sysfs comes up, we don't
+ * need to do anything because our sysfs initcall will start by
+ * adding all existing slabs to sysfs.
+ */
+ slabflags = SLAB_CACHE_DMA;
+ if (slab_state >= SYSFS)
+ slabflags |= __SYSFS_ADD_DEFERRED;
+
if (!s || !text || !kmem_cache_open(s, flags, text,
if (!s || !text || !kmem_cache_open(s, flags, text,
- realsize, ARCH_KMALLOC_MINALIGN,
- SLAB_CACHE_DMA|__SYSFS_ADD_DEFERRED, NULL)) {
+ realsize, ARCH_KMALLOC_MINALIGN, slabflags, NULL)) {
kfree(s);
kfree(text);
goto unlock_out;
kfree(s);
kfree(text);
goto unlock_out;
list_add(&s->list, &slab_caches);
kmalloc_caches_dma[index] = s;
list_add(&s->list, &slab_caches);
kmalloc_caches_dma[index] = s;
- schedule_work(&sysfs_add_work);
+ if (slab_state >= SYSFS)
+ schedule_work(&sysfs_add_work);
unlock_out:
up_write(&slub_lock);
unlock_out:
up_write(&slub_lock);