]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/commitdiff
Merge tag 'v4.6-rc6' into locking/core, to pick up fixes
authorIngo Molnar <mingo@kernel.org>
Thu, 5 May 2016 07:57:27 +0000 (09:57 +0200)
committerIngo Molnar <mingo@kernel.org>
Thu, 5 May 2016 07:57:27 +0000 (09:57 +0200)
Signed-off-by: Ingo Molnar <mingo@kernel.org>
1  2 
kernel/locking/lockdep.c

diff --combined kernel/locking/lockdep.c
index 7cc43ef856c1f86d302c5b86f136125587e70547,78c1c0ee6dc1256904e1afb90611818813fc031c..874d53eaf389e3034d9f345903fc86746ebee295
@@@ -708,7 -708,7 +708,7 @@@ look_up_lock_class(struct lockdep_map *
   * yet. Otherwise we look it up. We cache the result in the lock object
   * itself, so actual lookup of the hash should be once per lock object.
   */
 -static inline struct lock_class *
 +static struct lock_class *
  register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
  {
        struct lockdep_subclass_key *key;
@@@ -2176,15 -2176,37 +2176,37 @@@ cache_hit
        chain->irq_context = hlock->irq_context;
        i = get_first_held_lock(curr, hlock);
        chain->depth = curr->lockdep_depth + 1 - i;
+       BUILD_BUG_ON((1UL << 24) <= ARRAY_SIZE(chain_hlocks));
+       BUILD_BUG_ON((1UL << 6)  <= ARRAY_SIZE(curr->held_locks));
+       BUILD_BUG_ON((1UL << 8*sizeof(chain_hlocks[0])) <= ARRAY_SIZE(lock_classes));
        if (likely(nr_chain_hlocks + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) {
                chain->base = nr_chain_hlocks;
-               nr_chain_hlocks += chain->depth;
                for (j = 0; j < chain->depth - 1; j++, i++) {
                        int lock_id = curr->held_locks[i].class_idx - 1;
                        chain_hlocks[chain->base + j] = lock_id;
                }
                chain_hlocks[chain->base + j] = class - lock_classes;
        }
+       if (nr_chain_hlocks < MAX_LOCKDEP_CHAIN_HLOCKS)
+               nr_chain_hlocks += chain->depth;
+ #ifdef CONFIG_DEBUG_LOCKDEP
+       /*
+        * Important for check_no_collision().
+        */
+       if (unlikely(nr_chain_hlocks > MAX_LOCKDEP_CHAIN_HLOCKS)) {
+               if (debug_locks_off_graph_unlock())
+                       return 0;
+               print_lockdep_off("BUG: MAX_LOCKDEP_CHAIN_HLOCKS too low!");
+               dump_stack();
+               return 0;
+       }
+ #endif
        hlist_add_head_rcu(&chain->entry, hash_head);
        debug_atomic_inc(chain_lookup_misses);
        inc_chains();
@@@ -2932,6 -2954,11 +2954,11 @@@ static int mark_irqflags(struct task_st
        return 1;
  }
  
+ static inline unsigned int task_irq_context(struct task_struct *task)
+ {
+       return 2 * !!task->hardirq_context + !!task->softirq_context;
+ }
  static int separate_irq_context(struct task_struct *curr,
                struct held_lock *hlock)
  {
        /*
         * Keep track of points where we cross into an interrupt context:
         */
-       hlock->irq_context = 2*(curr->hardirq_context ? 1 : 0) +
-                               curr->softirq_context;
        if (depth) {
                struct held_lock *prev_hlock;
  
@@@ -2973,6 -2998,11 +2998,11 @@@ static inline int mark_irqflags(struct 
        return 1;
  }
  
+ static inline unsigned int task_irq_context(struct task_struct *task)
+ {
+       return 0;
+ }
  static inline int separate_irq_context(struct task_struct *curr,
                struct held_lock *hlock)
  {
@@@ -3241,6 -3271,7 +3271,7 @@@ static int __lock_acquire(struct lockde
        hlock->acquire_ip = ip;
        hlock->instance = lock;
        hlock->nest_lock = nest_lock;
+       hlock->irq_context = task_irq_context(curr);
        hlock->trylock = trylock;
        hlock->read = read;
        hlock->check = check;