]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/commitdiff
staging: lustre: libcfs: add lock-class for cfs_percpt_lock
authorLiang Zhen <liang.zhen@intel.com>
Mon, 28 Mar 2016 00:26:26 +0000 (20:26 -0400)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 29 Mar 2016 19:10:20 +0000 (12:10 -0700)
initialise lock-class for each sublock of cfs_percpt_lock
to eliminate false alarm ""possible recursive locking detected"

Signed-off-by: Liang Zhen <liang.zhen@intel.com>
Intel-bug-id: https://jira.hpdd.intel.com/browse/LU-6432
Reviewed-on: http://review.whamcloud.com/14368
Reviewed-by: James Simmons <uja.ornl@yahoo.com>
Reviewed-by: Andreas Dilger <andreas.dilger@intel.com>
Reviewed-by: Oleg Drokin <oleg.drokin@intel.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h
drivers/staging/lustre/lnet/libcfs/libcfs_lock.c

index 71ad93b2c6af41aa7cad1fbbe3b4819d2dec2e1c..81d8079e3b5ea8c6e09502d6ddf8e6508b524027 100644 (file)
@@ -256,8 +256,8 @@ struct cfs_percpt_lock {
  * create a cpu-partition lock based on CPU partition table \a cptab,
  * each private lock has extra \a psize bytes padding data
  */
-struct cfs_percpt_lock *cfs_percpt_lock_alloc(struct cfs_cpt_table *cptab);
-
+struct cfs_percpt_lock *cfs_percpt_lock_create(struct cfs_cpt_table *cptab,
+                                              struct lock_class_key *keys);
 /* destroy a cpu-partition lock */
 void cfs_percpt_lock_free(struct cfs_percpt_lock *pcl);
 
@@ -267,6 +267,21 @@ void cfs_percpt_lock(struct cfs_percpt_lock *pcl, int index);
 /* unlock private lock \a index of \a pcl */
 void cfs_percpt_unlock(struct cfs_percpt_lock *pcl, int index);
 
+#define CFS_PERCPT_LOCK_KEYS   256
+
+/* NB: don't allocate keys dynamically, lockdep needs them to be in ".data" */
+#define cfs_percpt_lock_alloc(cptab)                                   \
+({                                                                     \
+       static struct lock_class_key ___keys[CFS_PERCPT_LOCK_KEYS];     \
+       struct cfs_percpt_lock *___lk;                                  \
+                                                                       \
+       if (cfs_cpt_number(cptab) > CFS_PERCPT_LOCK_KEYS)               \
+               ___lk = cfs_percpt_lock_create(cptab, NULL);            \
+       else                                                            \
+               ___lk = cfs_percpt_lock_create(cptab, ___keys);         \
+       ___lk;                                                          \
+})
+
 /**
  * iterate over all CPU partitions in \a cptab
  */
index d38954a38f9e6a7c5c4a874c5639558a2608125b..83543f928279823d5db029956e05f031edd8f063 100644 (file)
@@ -49,7 +49,8 @@ EXPORT_SYMBOL(cfs_percpt_lock_free);
  * reason we always allocate cacheline-aligned memory block.
  */
 struct cfs_percpt_lock *
-cfs_percpt_lock_alloc(struct cfs_cpt_table *cptab)
+cfs_percpt_lock_create(struct cfs_cpt_table *cptab,
+                      struct lock_class_key *keys)
 {
        struct cfs_percpt_lock  *pcl;
        spinlock_t              *lock;
@@ -67,12 +68,18 @@ cfs_percpt_lock_alloc(struct cfs_cpt_table *cptab)
                return NULL;
        }
 
-       cfs_percpt_for_each(lock, i, pcl->pcl_locks)
+       if (!keys)
+               CWARN("Cannot setup class key for percpt lock, you may see recursive locking warnings which are actually fake.\n");
+
+       cfs_percpt_for_each(lock, i, pcl->pcl_locks) {
                spin_lock_init(lock);
+               if (keys != NULL)
+                       lockdep_set_class(lock, &keys[i]);
+       }
 
        return pcl;
 }
-EXPORT_SYMBOL(cfs_percpt_lock_alloc);
+EXPORT_SYMBOL(cfs_percpt_lock_create);
 
 /**
  * lock a CPU partition