Set c->node to -1 if we allocate from a debug slab instead for SlabDebug
which requires access the page struct cacheline.
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Tested-by: Alexey Dobriyan <adobriyan@sw.ru>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
c->page->inuse++;
c->page->freelist = object[c->offset];
c->page->inuse++;
c->page->freelist = object[c->offset];
slab_unlock(c->page);
return object;
}
slab_unlock(c->page);
return object;
}
local_irq_save(flags);
c = get_cpu_slab(s, smp_processor_id());
local_irq_save(flags);
c = get_cpu_slab(s, smp_processor_id());
- if (unlikely(!c->page || !c->freelist ||
- !node_match(c, node)))
+ if (unlikely(!c->freelist || !node_match(c, node)))
object = __slab_alloc(s, gfpflags, node, addr, c);
object = __slab_alloc(s, gfpflags, node, addr, c);
local_irq_save(flags);
debug_check_no_locks_freed(object, s->objsize);
c = get_cpu_slab(s, smp_processor_id());
local_irq_save(flags);
debug_check_no_locks_freed(object, s->objsize);
c = get_cpu_slab(s, smp_processor_id());
- if (likely(page == c->page && !SlabDebug(page))) {
+ if (likely(page == c->page && c->node >= 0)) {
object[c->offset] = c->freelist;
c->freelist = object;
} else
object[c->offset] = c->freelist;
c->freelist = object;
} else
for_each_possible_cpu(cpu) {
struct page *page;
for_each_possible_cpu(cpu) {
struct page *page;
struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
if (!c)
continue;
page = c->page;
struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
if (!c)
continue;
page = c->page;
+ node = c->node;
+ if (node < 0)
+ continue;
if (page) {
if (flags & SO_CPU) {
int x = 0;
if (page) {
if (flags & SO_CPU) {
int x = 0;