From 833b706cc8b7b555e18d3426e9616bd066883a7a Mon Sep 17 00:00:00 2001 From: Joonsoo Kim Date: Wed, 6 Aug 2014 16:04:33 -0700 Subject: [PATCH] slab: destroy a slab without holding any alien cache lock I haven't heard that this alien cache lock is contended, but to reduce chance of contention would be better generally. And with this change, we can simplify complex lockdep annotation in slab code. In the following patch, it will be implemented. Signed-off-by: Joonsoo Kim Acked-by: Christoph Lameter Cc: Pekka Enberg Cc: David Rientjes Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/slab.c | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/mm/slab.c b/mm/slab.c index e4ce73c32a7a..e4dc0896b891 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -1050,10 +1050,10 @@ static void free_alien_cache(struct alien_cache **alc_ptr) } static void __drain_alien_cache(struct kmem_cache *cachep, - struct array_cache *ac, int node) + struct array_cache *ac, int node, + struct list_head *list) { struct kmem_cache_node *n = get_node(cachep, node); - LIST_HEAD(list); if (ac->avail) { spin_lock(&n->list_lock); @@ -1065,10 +1065,9 @@ static void __drain_alien_cache(struct kmem_cache *cachep, if (n->shared) transfer_objects(n->shared, ac, ac->limit); - free_block(cachep, ac->entry, ac->avail, node, &list); + free_block(cachep, ac->entry, ac->avail, node, list); ac->avail = 0; spin_unlock(&n->list_lock); - slabs_destroy(cachep, &list); } } @@ -1086,8 +1085,11 @@ static void reap_alien(struct kmem_cache *cachep, struct kmem_cache_node *n) if (alc) { ac = &alc->ac; if (ac->avail && spin_trylock_irq(&alc->lock)) { - __drain_alien_cache(cachep, ac, node); + LIST_HEAD(list); + + __drain_alien_cache(cachep, ac, node, &list); spin_unlock_irq(&alc->lock); + slabs_destroy(cachep, &list); } } } @@ -1104,10 +1106,13 @@ static void drain_alien_cache(struct kmem_cache *cachep, for_each_online_node(i) { alc = alien[i]; if (alc) { + LIST_HEAD(list); + ac = &alc->ac; spin_lock_irqsave(&alc->lock, flags); - __drain_alien_cache(cachep, ac, i); + __drain_alien_cache(cachep, ac, i, &list); spin_unlock_irqrestore(&alc->lock, flags); + slabs_destroy(cachep, &list); } } } @@ -1138,10 +1143,11 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) spin_lock(&alien->lock); if (unlikely(ac->avail == ac->limit)) { STATS_INC_ACOVERFLOW(cachep); - __drain_alien_cache(cachep, ac, nodeid); + __drain_alien_cache(cachep, ac, nodeid, &list); } ac_put_obj(cachep, ac, objp); spin_unlock(&alien->lock); + slabs_destroy(cachep, &list); } else { n = get_node(cachep, nodeid); spin_lock(&n->list_lock); -- 2.34.1