# define rwsem_release(l, n, i) do { } while (0)
#endif
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# ifdef CONFIG_PROVE_LOCKING
+# define map_acquire(l) lock_acquire(l, 0, 0, 0, 2, _THIS_IP_)
+# else
+# define map_acquire(l) lock_acquire(l, 0, 0, 0, 1, _THIS_IP_)
+# endif
+# define map_release(l) lock_release(l, 1, _THIS_IP_)
+#else
+# define map_acquire(l) do { } while (0)
+# define map_release(l) do { } while (0)
+#endif
+
#endif /* __LINUX_LOCKDEP_H */
BUG_ON(get_wq_data(work) != cwq);
work_clear_pending(work);
- lock_acquire(&cwq->wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_);
- lock_acquire(&lockdep_map, 0, 0, 0, 2, _THIS_IP_);
+ map_acquire(&cwq->wq->lockdep_map);
+ map_acquire(&lockdep_map);
f(work);
- lock_release(&lockdep_map, 1, _THIS_IP_);
- lock_release(&cwq->wq->lockdep_map, 1, _THIS_IP_);
+ map_release(&lockdep_map);
+ map_release(&cwq->wq->lockdep_map);
if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
int cpu;
might_sleep();
- lock_acquire(&wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_);
- lock_release(&wq->lockdep_map, 1, _THIS_IP_);
+ map_acquire(&wq->lockdep_map);
+ map_release(&wq->lockdep_map);
for_each_cpu_mask_nr(cpu, *cpu_map)
flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
}
if (!cwq)
return 0;
- lock_acquire(&cwq->wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_);
- lock_release(&cwq->wq->lockdep_map, 1, _THIS_IP_);
+ map_acquire(&cwq->wq->lockdep_map);
+ map_release(&cwq->wq->lockdep_map);
prev = NULL;
spin_lock_irq(&cwq->lock);
might_sleep();
- lock_acquire(&work->lockdep_map, 0, 0, 0, 2, _THIS_IP_);
- lock_release(&work->lockdep_map, 1, _THIS_IP_);
+ map_acquire(&work->lockdep_map);
+ map_release(&work->lockdep_map);
cwq = get_wq_data(work);
if (!cwq)
if (cwq->thread == NULL)
return;
- lock_acquire(&cwq->wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_);
- lock_release(&cwq->wq->lockdep_map, 1, _THIS_IP_);
+ map_acquire(&cwq->wq->lockdep_map);
+ map_release(&cwq->wq->lockdep_map);
flush_cpu_workqueue(cwq);
/*