]> git.proxmox.com Git - mirror_ubuntu-kernels.git/commitdiff
mm: slub: work around unneeded lockdep warning
authorDave Hansen <dave.hansen@linux.intel.com>
Fri, 24 Jan 2014 15:20:23 +0000 (07:20 -0800)
committerPekka Enberg <penberg@kernel.org>
Fri, 31 Jan 2014 11:41:26 +0000 (13:41 +0200)
The slub code does some setup during early boot in
early_kmem_cache_node_alloc() with some local data.  There is no
possible way that another CPU can see this data, so the slub code
doesn't unnecessarily lock it.  However, some new lockdep asserts
check to make sure that add_partial() _always_ has the list_lock
held.

Just add the locking, even though it is technically unnecessary.

Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Russell King <linux@arm.linux.org.uk>
Acked-by: David Rientjes <rientjes@google.com>
Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
Signed-off-by: Pekka Enberg <penberg@kernel.org>
mm/slub.c

index a99e9e67c60e9b5f02e3308510f5b3f6f6fc3dc5..432bddf484bbf943da517f364f759ea5f448ef29 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2890,7 +2890,13 @@ static void early_kmem_cache_node_alloc(int node)
        init_kmem_cache_node(n);
        inc_slabs_node(kmem_cache_node, node, page->objects);
 
+       /*
+        * the lock is for lockdep's sake, not for any actual
+        * race protection
+        */
+       spin_lock(&n->list_lock);
        add_partial(n, page, DEACTIVATE_TO_HEAD);
+       spin_unlock(&n->list_lock);
 }
 
 static void free_kmem_cache_nodes(struct kmem_cache *s)