]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/commitdiff
Merge tag 'v4.5-rc6' into locking/core, to pick up fixes
authorIngo Molnar <mingo@kernel.org>
Mon, 29 Feb 2016 08:55:22 +0000 (09:55 +0100)
committerIngo Molnar <mingo@kernel.org>
Mon, 29 Feb 2016 08:55:22 +0000 (09:55 +0100)
Signed-off-by: Ingo Molnar <mingo@kernel.org>
18 files changed:
arch/c6x/kernel/setup.c
arch/microblaze/kernel/setup.c
arch/powerpc/kernel/setup_32.c
arch/powerpc/kernel/setup_64.c
arch/s390/kernel/early.c
arch/sparc/kernel/head_64.S
arch/x86/include/asm/barrier.h
arch/x86/kernel/process.c
arch/x86/lguest/boot.c
include/linux/compiler.h
include/linux/lockdep.h
init/main.c
kernel/futex.c
kernel/locking/lockdep.c
lib/test_static_keys.c
tools/lib/lockdep/common.c
tools/lib/lockdep/include/liblockdep/common.h
tools/lib/lockdep/preload.c

index 72e17f7ebd6ff0fba193ac07190d42b41e106e76..786e36e2f61de91c8f446cc941cff895cdb91578 100644 (file)
@@ -281,8 +281,6 @@ notrace void __init machine_init(unsigned long dt_ptr)
         */
        set_ist(_vectors_start);
 
-       lockdep_init();
-
        /*
         * dtb is passed in from bootloader.
         * fdt is linked in blob.
index 89a2a93949274b16b3508b314d0cbdf6db5946e6..f31ebb5dc26c21922f240545d038f0d6b5699300 100644 (file)
@@ -130,8 +130,6 @@ void __init machine_early_init(const char *cmdline, unsigned int ram,
        memset(__bss_start, 0, __bss_stop-__bss_start);
        memset(_ssbss, 0, _esbss-_ssbss);
 
-       lockdep_init();
-
 /* initialize device tree for usage in early_printk */
        early_init_devtree(_fdt_start);
 
index ad8c9db61237223c3b807e3a9afed1487c44af1e..d544fa31175766bf48790f75bcd5458d6a505080 100644 (file)
@@ -114,8 +114,6 @@ extern unsigned int memset_nocache_branch; /* Insn to be replaced by NOP */
 
 notrace void __init machine_init(u64 dt_ptr)
 {
-       lockdep_init();
-
        /* Enable early debugging if any specified (see udbg.h) */
        udbg_early_init();
 
index 5c03a6a9b0542fac3d042f2481f367aae9178d38..f98be8383a39994868ba41d0f8492e99f48eb1fe 100644 (file)
@@ -255,9 +255,6 @@ void __init early_setup(unsigned long dt_ptr)
        setup_paca(&boot_paca);
        fixup_boot_paca();
 
-       /* Initialize lockdep early or else spinlocks will blow */
-       lockdep_init();
-
        /* -------- printk is now safe to use ------- */
 
        /* Enable early debugging if any specified (see udbg.h) */
index c55576bbaa1f7555479994d6848c9aa5b60f189b..a0684de5a93b99ae199f9bf6906327f4513f662e 100644 (file)
@@ -448,7 +448,6 @@ void __init startup_init(void)
        rescue_initrd();
        clear_bss_section();
        init_kernel_storage_key();
-       lockdep_init();
        lockdep_off();
        setup_lowcore_early();
        setup_facility_list();
index f2d30cab5b3f388fa9b446cf5b12afda92fa0a9d..cd1f592cd3479f8c94c599bfc589087184d4d180 100644 (file)
@@ -696,14 +696,6 @@ tlb_fixup_done:
        call    __bzero
         sub    %o1, %o0, %o1
 
-#ifdef CONFIG_LOCKDEP
-       /* We have this call this super early, as even prom_init can grab
-        * spinlocks and thus call into the lockdep code.
-        */
-       call    lockdep_init
-        nop
-#endif
-
        call    prom_init
         mov    %l7, %o0                        ! OpenPROM cif handler
 
index a584e1c50918406a0398cf4a013432e47abf143e..bfb28caf97b1be1f2d6aa8893bd905a39030e310 100644 (file)
@@ -6,18 +6,17 @@
 
 /*
  * Force strict CPU ordering.
- * And yes, this is required on UP too when we're talking
+ * And yes, this might be required on UP too when we're talking
  * to devices.
  */
 
 #ifdef CONFIG_X86_32
-/*
- * Some non-Intel clones support out of order store. wmb() ceases to be a
- * nop for these.
- */
-#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
-#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
-#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
+#define mb() asm volatile(ALTERNATIVE("lock; addl $0,0(%%esp)", "mfence", \
+                                     X86_FEATURE_XMM2) ::: "memory", "cc")
+#define rmb() asm volatile(ALTERNATIVE("lock; addl $0,0(%%esp)", "lfence", \
+                                      X86_FEATURE_XMM2) ::: "memory", "cc")
+#define wmb() asm volatile(ALTERNATIVE("lock; addl $0,0(%%esp)", "sfence", \
+                                      X86_FEATURE_XMM2) ::: "memory", "cc")
 #else
 #define mb()   asm volatile("mfence":::"memory")
 #define rmb()  asm volatile("lfence":::"memory")
index 9f7c21c22477e59462d72e930d79a4c2a238a051..9decee2bfdbeedb795d3e1f423e606cdec5e49e0 100644 (file)
@@ -418,9 +418,9 @@ static void mwait_idle(void)
        if (!current_set_polling_and_test()) {
                trace_cpu_idle_rcuidle(1, smp_processor_id());
                if (this_cpu_has(X86_BUG_CLFLUSH_MONITOR)) {
-                       smp_mb(); /* quirk */
+                       mb(); /* quirk */
                        clflush((void *)&current_thread_info()->flags);
-                       smp_mb(); /* quirk */
+                       mb(); /* quirk */
                }
 
                __monitor((void *)&current_thread_info()->flags, 0, 0);
index 4ba229ac3f4ff127dddf0c33fb91cc42f2e60b22..f56cc418c87de8c783f3071bf598be3878b17acb 100644 (file)
@@ -1520,12 +1520,6 @@ __init void lguest_init(void)
         */
        reserve_top_address(lguest_data.reserve_mem);
 
-       /*
-        * If we don't initialize the lock dependency checker now, it crashes
-        * atomic_notifier_chain_register, then paravirt_disable_iospace.
-        */
-       lockdep_init();
-
        /* Hook in our special panic hypercall code. */
        atomic_notifier_chain_register(&panic_notifier_list, &paniced);
 
index 48f5aab117ae12625d041cd18555031e87c178fc..a27f4f17c382b1cbf8884ac33b8134dafe624b2d 100644 (file)
@@ -263,8 +263,9 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
  * In contrast to ACCESS_ONCE these two macros will also work on aggregate
  * data types like structs or unions. If the size of the accessed data
  * type exceeds the word size of the machine (e.g., 32 bits or 64 bits)
- * READ_ONCE() and WRITE_ONCE()  will fall back to memcpy and print a
- * compile-time warning.
+ * READ_ONCE() and WRITE_ONCE() will fall back to memcpy(). There's at
+ * least two memcpy()s: one for the __builtin_memcpy() and then one for
+ * the macro doing the copy of variable - '__u' allocated on the stack.
  *
  * Their two major use cases are: (1) Mediating communication between
  * process-level code and irq/NMI handlers, all running on the same CPU,
index 4dca42fd32f52d17326e436c4d9fcbd86a0e8d24..d026b190c53066d25753ce98f0d7c66d864a6c0a 100644 (file)
@@ -261,7 +261,6 @@ struct held_lock {
 /*
  * Initialization, self-test and debugging-output methods:
  */
-extern void lockdep_init(void);
 extern void lockdep_info(void);
 extern void lockdep_reset(void);
 extern void lockdep_reset_lock(struct lockdep_map *lock);
@@ -392,7 +391,6 @@ static inline void lockdep_on(void)
 # define lockdep_set_current_reclaim_state(g)  do { } while (0)
 # define lockdep_clear_current_reclaim_state() do { } while (0)
 # define lockdep_trace_alloc(g)                        do { } while (0)
-# define lockdep_init()                                do { } while (0)
 # define lockdep_info()                                do { } while (0)
 # define lockdep_init_map(lock, name, key, sub) \
                do { (void)(name); (void)(key); } while (0)
index 58c9e374704bb20cfff41fa92afcf7cafe498c32..b3008bcfb1dc42d72b5faed1776c2e8c3618919d 100644 (file)
@@ -499,11 +499,6 @@ asmlinkage __visible void __init start_kernel(void)
        char *command_line;
        char *after_dashes;
 
-       /*
-        * Need to run as early as possible, to initialize the
-        * lockdep hash:
-        */
-       lockdep_init();
        set_task_stack_end_magic(&init_task);
        smp_setup_processor_id();
        debug_objects_early_init();
index 5d6ce6413ef1d227b32c99a4330bee1289f9f571..bae542e4b2e9af5e47507c507f459977f200609d 100644 (file)
  *   futex_wait(futex, val);
  *
  *   waiters++; (a)
- *   mb(); (A) <-- paired with -.
- *                              |
- *   lock(hash_bucket(futex));  |
- *                              |
- *   uval = *futex;             |
- *                              |        *futex = newval;
- *                              |        sys_futex(WAKE, futex);
- *                              |          futex_wake(futex);
- *                              |
- *                              `------->  mb(); (B)
+ *   smp_mb(); (A) <-- paired with -.
+ *                                  |
+ *   lock(hash_bucket(futex));      |
+ *                                  |
+ *   uval = *futex;                 |
+ *                                  |        *futex = newval;
+ *                                  |        sys_futex(WAKE, futex);
+ *                                  |          futex_wake(futex);
+ *                                  |
+ *                                  `--------> smp_mb(); (B)
  *   if (uval == val)
  *     queue();
  *     unlock(hash_bucket(futex));
@@ -334,7 +334,7 @@ static inline void futex_get_mm(union futex_key *key)
        /*
         * Ensure futex_get_mm() implies a full barrier such that
         * get_futex_key() implies a full barrier. This is relied upon
-        * as full barrier (B), see the ordering comment above.
+        * as smp_mb(); (B), see the ordering comment above.
         */
        smp_mb__after_atomic();
 }
@@ -407,10 +407,10 @@ static void get_futex_key_refs(union futex_key *key)
 
        switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
        case FUT_OFF_INODE:
-               ihold(key->shared.inode); /* implies MB (B) */
+               ihold(key->shared.inode); /* implies smp_mb(); (B) */
                break;
        case FUT_OFF_MMSHARED:
-               futex_get_mm(key); /* implies MB (B) */
+               futex_get_mm(key); /* implies smp_mb(); (B) */
                break;
        default:
                /*
@@ -418,7 +418,7 @@ static void get_futex_key_refs(union futex_key *key)
                 * mm, therefore the only purpose of calling get_futex_key_refs
                 * is because we need the barrier for the lockless waiter check.
                 */
-               smp_mb(); /* explicit MB (B) */
+               smp_mb(); /* explicit smp_mb(); (B) */
        }
 }
 
@@ -497,7 +497,7 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
        if (!fshared) {
                key->private.mm = mm;
                key->private.address = address;
-               get_futex_key_refs(key);  /* implies MB (B) */
+               get_futex_key_refs(key);  /* implies smp_mb(); (B) */
                return 0;
        }
 
@@ -520,7 +520,20 @@ again:
        else
                err = 0;
 
-       lock_page(page);
+       /*
+        * The treatment of mapping from this point on is critical. The page
+        * lock protects many things but in this context the page lock
+        * stabilizes mapping, prevents inode freeing in the shared
+        * file-backed region case and guards against movement to swap cache.
+        *
+        * Strictly speaking the page lock is not needed in all cases being
+        * considered here and page lock forces unnecessarily serialization
+        * From this point on, mapping will be re-verified if necessary and
+        * page lock will be acquired only if it is unavoidable
+        */
+       page = compound_head(page);
+       mapping = READ_ONCE(page->mapping);
+
        /*
         * If page->mapping is NULL, then it cannot be a PageAnon
         * page; but it might be the ZERO_PAGE or in the gate area or
@@ -536,19 +549,31 @@ again:
         * shmem_writepage move it from filecache to swapcache beneath us:
         * an unlikely race, but we do need to retry for page->mapping.
         */
-       mapping = compound_head(page)->mapping;
-       if (!mapping) {
-               int shmem_swizzled = PageSwapCache(page);
+       if (unlikely(!mapping)) {
+               int shmem_swizzled;
+
+               /*
+                * Page lock is required to identify which special case above
+                * applies. If this is really a shmem page then the page lock
+                * will prevent unexpected transitions.
+                */
+               lock_page(page);
+               shmem_swizzled = PageSwapCache(page) || page->mapping;
                unlock_page(page);
                put_page(page);
+
                if (shmem_swizzled)
                        goto again;
+
                return -EFAULT;
        }
 
        /*
         * Private mappings are handled in a simple way.
         *
+        * If the futex key is stored on an anonymous page, then the associated
+        * object is the mm which is implicitly pinned by the calling process.
+        *
         * NOTE: When userspace waits on a MAP_SHARED mapping, even if
         * it's a read-only handle, it's expected that futexes attach to
         * the object not the particular process.
@@ -566,16 +591,74 @@ again:
                key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */
                key->private.mm = mm;
                key->private.address = address;
+
+               get_futex_key_refs(key); /* implies smp_mb(); (B) */
+
        } else {
+               struct inode *inode;
+
+               /*
+                * The associated futex object in this case is the inode and
+                * the page->mapping must be traversed. Ordinarily this should
+                * be stabilised under page lock but it's not strictly
+                * necessary in this case as we just want to pin the inode, not
+                * update the radix tree or anything like that.
+                *
+                * The RCU read lock is taken as the inode is finally freed
+                * under RCU. If the mapping still matches expectations then the
+                * mapping->host can be safely accessed as being a valid inode.
+                */
+               rcu_read_lock();
+
+               if (READ_ONCE(page->mapping) != mapping) {
+                       rcu_read_unlock();
+                       put_page(page);
+
+                       goto again;
+               }
+
+               inode = READ_ONCE(mapping->host);
+               if (!inode) {
+                       rcu_read_unlock();
+                       put_page(page);
+
+                       goto again;
+               }
+
+               /*
+                * Take a reference unless it is about to be freed. Previously
+                * this reference was taken by ihold under the page lock
+                * pinning the inode in place so i_lock was unnecessary. The
+                * only way for this check to fail is if the inode was
+                * truncated in parallel so warn for now if this happens.
+                *
+                * We are not calling into get_futex_key_refs() in file-backed
+                * cases, therefore a successful atomic_inc return below will
+                * guarantee that get_futex_key() will still imply smp_mb(); (B).
+                */
+               if (WARN_ON_ONCE(!atomic_inc_not_zero(&inode->i_count))) {
+                       rcu_read_unlock();
+                       put_page(page);
+
+                       goto again;
+               }
+
+               /* Should be impossible but lets be paranoid for now */
+               if (WARN_ON_ONCE(inode->i_mapping != mapping)) {
+                       err = -EFAULT;
+                       rcu_read_unlock();
+                       iput(inode);
+
+                       goto out;
+               }
+
                key->both.offset |= FUT_OFF_INODE; /* inode-based key */
-               key->shared.inode = mapping->host;
+               key->shared.inode = inode;
                key->shared.pgoff = basepage_index(page);
+               rcu_read_unlock();
        }
 
-       get_futex_key_refs(key); /* implies MB (B) */
-
 out:
-       unlock_page(page);
        put_page(page);
        return err;
 }
@@ -1864,7 +1947,7 @@ static inline struct futex_hash_bucket *queue_lock(struct futex_q *q)
 
        q->lock_ptr = &hb->lock;
 
-       spin_lock(&hb->lock); /* implies MB (A) */
+       spin_lock(&hb->lock); /* implies smp_mb(); (A) */
        return hb;
 }
 
index 716547fdb8731dcc395d70f2664ec6fb22378db6..3261214323fa1104287c0ab2fdac76cab9c88848 100644 (file)
@@ -123,8 +123,6 @@ static inline int debug_locks_off_graph_unlock(void)
        return ret;
 }
 
-static int lockdep_initialized;
-
 unsigned long nr_list_entries;
 static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
 
@@ -433,19 +431,6 @@ unsigned int nr_process_chains;
 unsigned int max_lockdep_depth;
 
 #ifdef CONFIG_DEBUG_LOCKDEP
-/*
- * We cannot printk in early bootup code. Not even early_printk()
- * might work. So we mark any initialization errors and printk
- * about it later on, in lockdep_info().
- */
-static int lockdep_init_error;
-static const char *lock_init_error;
-static unsigned long lockdep_init_trace_data[20];
-static struct stack_trace lockdep_init_trace = {
-       .max_entries = ARRAY_SIZE(lockdep_init_trace_data),
-       .entries = lockdep_init_trace_data,
-};
-
 /*
  * Various lockdep statistics:
  */
@@ -669,20 +654,6 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
        struct hlist_head *hash_head;
        struct lock_class *class;
 
-#ifdef CONFIG_DEBUG_LOCKDEP
-       /*
-        * If the architecture calls into lockdep before initializing
-        * the hashes then we'll warn about it later. (we cannot printk
-        * right now)
-        */
-       if (unlikely(!lockdep_initialized)) {
-               lockdep_init();
-               lockdep_init_error = 1;
-               lock_init_error = lock->name;
-               save_stack_trace(&lockdep_init_trace);
-       }
-#endif
-
        if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
                debug_locks_off();
                printk(KERN_ERR
@@ -4013,28 +3984,6 @@ out_restore:
        raw_local_irq_restore(flags);
 }
 
-void lockdep_init(void)
-{
-       int i;
-
-       /*
-        * Some architectures have their own start_kernel()
-        * code which calls lockdep_init(), while we also
-        * call lockdep_init() from the start_kernel() itself,
-        * and we want to initialize the hashes only once:
-        */
-       if (lockdep_initialized)
-               return;
-
-       for (i = 0; i < CLASSHASH_SIZE; i++)
-               INIT_HLIST_HEAD(classhash_table + i);
-
-       for (i = 0; i < CHAINHASH_SIZE; i++)
-               INIT_HLIST_HEAD(chainhash_table + i);
-
-       lockdep_initialized = 1;
-}
-
 void __init lockdep_info(void)
 {
        printk("Lock dependency validator: Copyright (c) 2006 Red Hat, Inc., Ingo Molnar\n");
@@ -4061,14 +4010,6 @@ void __init lockdep_info(void)
 
        printk(" per task-struct memory footprint: %lu bytes\n",
                sizeof(struct held_lock) * MAX_LOCK_DEPTH);
-
-#ifdef CONFIG_DEBUG_LOCKDEP
-       if (lockdep_init_error) {
-               printk("WARNING: lockdep init error: lock '%s' was acquired before lockdep_init().\n", lock_init_error);
-               printk("Call stack leading to lockdep invocation was:\n");
-               print_stack_trace(&lockdep_init_trace, 0);
-       }
-#endif
 }
 
 static void
index c61b299e367ffac86450ac59c123d0925285b96c..915d75df20864d07428376750c1bd148c216a585 100644 (file)
@@ -46,8 +46,11 @@ struct test_key {
        bool                    (*test_key)(void);
 };
 
-#define test_key_func(key, branch) \
-       ({bool func(void) { return branch(key); } func; })
+#define test_key_func(key, branch)     \
+static bool key ## _ ## branch(void)   \
+{                                      \
+       return branch(&key);            \
+}
 
 static void invert_key(struct static_key *key)
 {
@@ -92,6 +95,25 @@ static int verify_keys(struct test_key *keys, int size, bool invert)
        return 0;
 }
 
+test_key_func(old_true_key, static_key_true)
+test_key_func(old_false_key, static_key_false)
+test_key_func(true_key, static_branch_likely)
+test_key_func(true_key, static_branch_unlikely)
+test_key_func(false_key, static_branch_likely)
+test_key_func(false_key, static_branch_unlikely)
+test_key_func(base_old_true_key, static_key_true)
+test_key_func(base_inv_old_true_key, static_key_true)
+test_key_func(base_old_false_key, static_key_false)
+test_key_func(base_inv_old_false_key, static_key_false)
+test_key_func(base_true_key, static_branch_likely)
+test_key_func(base_true_key, static_branch_unlikely)
+test_key_func(base_inv_true_key, static_branch_likely)
+test_key_func(base_inv_true_key, static_branch_unlikely)
+test_key_func(base_false_key, static_branch_likely)
+test_key_func(base_false_key, static_branch_unlikely)
+test_key_func(base_inv_false_key, static_branch_likely)
+test_key_func(base_inv_false_key, static_branch_unlikely)
+
 static int __init test_static_key_init(void)
 {
        int ret;
@@ -102,95 +124,95 @@ static int __init test_static_key_init(void)
                {
                        .init_state     = true,
                        .key            = &old_true_key,
-                       .test_key       = test_key_func(&old_true_key, static_key_true),
+                       .test_key       = &old_true_key_static_key_true,
                },
                {
                        .init_state     = false,
                        .key            = &old_false_key,
-                       .test_key       = test_key_func(&old_false_key, static_key_false),
+                       .test_key       = &old_false_key_static_key_false,
                },
                /* internal keys - new keys */
                {
                        .init_state     = true,
                        .key            = &true_key.key,
-                       .test_key       = test_key_func(&true_key, static_branch_likely),
+                       .test_key       = &true_key_static_branch_likely,
                },
                {
                        .init_state     = true,
                        .key            = &true_key.key,
-                       .test_key       = test_key_func(&true_key, static_branch_unlikely),
+                       .test_key       = &true_key_static_branch_unlikely,
                },
                {
                        .init_state     = false,
                        .key            = &false_key.key,
-                       .test_key       = test_key_func(&false_key, static_branch_likely),
+                       .test_key       = &false_key_static_branch_likely,
                },
                {
                        .init_state     = false,
                        .key            = &false_key.key,
-                       .test_key       = test_key_func(&false_key, static_branch_unlikely),
+                       .test_key       = &false_key_static_branch_unlikely,
                },
                /* external keys - old keys */
                {
                        .init_state     = true,
                        .key            = &base_old_true_key,
-                       .test_key       = test_key_func(&base_old_true_key, static_key_true),
+                       .test_key       = &base_old_true_key_static_key_true,
                },
                {
                        .init_state     = false,
                        .key            = &base_inv_old_true_key,
-                       .test_key       = test_key_func(&base_inv_old_true_key, static_key_true),
+                       .test_key       = &base_inv_old_true_key_static_key_true,
                },
                {
                        .init_state     = false,
                        .key            = &base_old_false_key,
-                       .test_key       = test_key_func(&base_old_false_key, static_key_false),
+                       .test_key       = &base_old_false_key_static_key_false,
                },
                {
                        .init_state     = true,
                        .key            = &base_inv_old_false_key,
-                       .test_key       = test_key_func(&base_inv_old_false_key, static_key_false),
+                       .test_key       = &base_inv_old_false_key_static_key_false,
                },
                /* external keys - new keys */
                {
                        .init_state     = true,
                        .key            = &base_true_key.key,
-                       .test_key       = test_key_func(&base_true_key, static_branch_likely),
+                       .test_key       = &base_true_key_static_branch_likely,
                },
                {
                        .init_state     = true,
                        .key            = &base_true_key.key,
-                       .test_key       = test_key_func(&base_true_key, static_branch_unlikely),
+                       .test_key       = &base_true_key_static_branch_unlikely,
                },
                {
                        .init_state     = false,
                        .key            = &base_inv_true_key.key,
-                       .test_key       = test_key_func(&base_inv_true_key, static_branch_likely),
+                       .test_key       = &base_inv_true_key_static_branch_likely,
                },
                {
                        .init_state     = false,
                        .key            = &base_inv_true_key.key,
-                       .test_key       = test_key_func(&base_inv_true_key, static_branch_unlikely),
+                       .test_key       = &base_inv_true_key_static_branch_unlikely,
                },
                {
                        .init_state     = false,
                        .key            = &base_false_key.key,
-                       .test_key       = test_key_func(&base_false_key, static_branch_likely),
+                       .test_key       = &base_false_key_static_branch_likely,
                },
                {
                        .init_state     = false,
                        .key            = &base_false_key.key,
-                       .test_key       = test_key_func(&base_false_key, static_branch_unlikely),
+                       .test_key       = &base_false_key_static_branch_unlikely,
                },
                {
                        .init_state     = true,
                        .key            = &base_inv_false_key.key,
-                       .test_key       = test_key_func(&base_inv_false_key, static_branch_likely),
+                       .test_key       = &base_inv_false_key_static_branch_likely,
                },
                {
                        .init_state     = true,
                        .key            = &base_inv_false_key.key,
-                       .test_key       = test_key_func(&base_inv_false_key, static_branch_unlikely),
+                       .test_key       = &base_inv_false_key_static_branch_unlikely,
                },
        };
 
index 9be663340f0a4c48e9ec51c996b13c3a8b596fdc..d1c89cc06f5f6710471c42dc5b2a475a54ee7658 100644 (file)
@@ -11,11 +11,6 @@ static __thread struct task_struct current_obj;
 bool debug_locks = true;
 bool debug_locks_silent;
 
-__attribute__((constructor)) static void liblockdep_init(void)
-{
-       lockdep_init();
-}
-
 __attribute__((destructor)) static void liblockdep_exit(void)
 {
        debug_check_no_locks_held();
index a60c14b9662aefd6f17b47dec534bce2f53d9f03..6e66277ec4375c84693b25756fdf8336faf79fcf 100644 (file)
@@ -44,7 +44,6 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
 void lock_release(struct lockdep_map *lock, int nested,
                        unsigned long ip);
 extern void debug_check_no_locks_freed(const void *from, unsigned long len);
-extern void lockdep_init(void);
 
 #define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
        { .name = (_name), .key = (void *)(_key), }
index 21cdf869a01b8b7facc084c4dc66cf73afa163bb..52844847569c99b878c031da8f25a13d1c0b5838 100644 (file)
@@ -439,7 +439,5 @@ __attribute__((constructor)) static void init_preload(void)
        ll_pthread_rwlock_unlock = dlsym(RTLD_NEXT, "pthread_rwlock_unlock");
 #endif
 
-       lockdep_init();
-
        __init_state = done;
 }