]> git.proxmox.com Git - mirror_spl.git/commitdiff
Add rwsem_tryupgrade for 4.9.20-rt16 kernel (SPL)
authorTony Hutter <hutter2@llnl.gov>
Wed, 15 Aug 2018 18:58:54 +0000 (11:58 -0700)
committerTony Hutter <hutter2@llnl.gov>
Fri, 6 Jul 2018 09:45:01 +0000 (02:45 -0700)
(This is the SPL backported code from 11d0525cb)

The RT rwsem implementation was changed to allow multiple readers
as of the 4.9.20-rt16 patch set.  This results in a build failure
because the existing implementation was forced to directly access
the rwsem structure which has changed.

While this could be accommodated by adding additional compatibility
code.  This patch resolves the build issue by simply assuming the
rwsem can never be upgraded.  This functionality is a performance
optimization and all callers must already handle this case.

Converting the last remaining use of __SPIN_LOCK_UNLOCKED to
spin_lock_init() was additionally required to get a clean build.

Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #7589

include/sys/isa_defs.h
include/sys/rwlock.h
module/spl/spl-rwlock.c
module/spl/spl-vnode.c

index 55597828054b010234d5a198818548ee1794ff24..13dcb357c29c654cbd98d483e4fd6b680a15db1b 100644 (file)
 
 #include <sys/byteorder.h>
 
+/*
+ * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS will be defined by the Linux
+ * kernel for architectures which support efficient unaligned access.
+ */
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+#define        HAVE_EFFICIENT_UNALIGNED_ACCESS
+#endif
+
 #if defined(__LITTLE_ENDIAN) && !defined(_LITTLE_ENDIAN)
 #define        _LITTLE_ENDIAN __LITTLE_ENDIAN
 #endif
index 325dfc499c98c75a728fb8767c1b3818c9441a49..2699229684addd6ba8e5321350a178f744a62ce1 100644 (file)
@@ -135,7 +135,7 @@ RW_LOCK_HELD(krwlock_t *rwp)
 }
 
 /*
- * The following functions must be a #define   and not static inline.
+ * The following functions must be a #define and not static inline.
  * This ensures that the native linux semaphore functions (down/up)
  * will be correctly located in the users code which is important
  * for the built in kernel lock analysis tools
@@ -151,10 +151,10 @@ RW_LOCK_HELD(krwlock_t *rwp)
        spl_rw_set_type(rwp, type);                                     \
 })
 
-#define        rw_destroy(rwp)                                                 \
-({                                                                     \
-       VERIFY(!RW_LOCK_HELD(rwp));                                     \
-})
+/*
+ * The Linux rwsem implementation does not require a matching destroy.
+ */
+#define        rw_destroy(rwp)         ((void) 0)
 
 #define        rw_tryenter(rwp, rw)                                            \
 ({                                                                     \
index bf7ee2f837f12d935ff9a6f913d2a025569aea68..ac28c91899b89975d9d56bfb17517761f2937b96 100644 (file)
 static int
 __rwsem_tryupgrade(struct rw_semaphore *rwsem)
 {
-
+#if defined(READER_BIAS) && defined(WRITER_BIAS)
+       /*
+        * After the 4.9.20-rt16 kernel the realtime patch series lifted the
+        * single reader restriction.  While this could be accommodated by
+        * adding additional compatibility code assume the rwsem can never
+        * be upgraded.  All caller must already cleanly handle this case.
+        */
+       return (0);
+#else
        ASSERT((struct task_struct *)
            ((unsigned long)rwsem->lock.owner & ~RT_MUTEX_OWNER_MASKALL) ==
            current);
 
        /*
-        * Under the realtime patch series, rwsem is implemented as a
-        * single mutex held by readers and writers alike. However,
-        * this implementation would prevent a thread from taking a
-        * read lock twice, as the mutex would already be locked on
+        * Prior to 4.9.20-rt16 kernel the realtime patch series, rwsem is
+        * implemented as a single mutex held by readers and writers alike.
+        * However, this implementation would prevent a thread from taking
+        * read lock twice, as the mutex would already be locked on
         * the second attempt. Therefore the implementation allows a
         * single thread to take a rwsem as read lock multiple times
         * tracking that nesting as read_depth counter.
@@ -59,6 +67,7 @@ __rwsem_tryupgrade(struct rw_semaphore *rwsem)
                return (1);
        }
        return (0);
+#endif
 }
 #elif defined(CONFIG_RWSEM_GENERIC_SPINLOCK)
 static int
index 74ae8fe06f5c44fd8ce90df0332955df212cafb2..cd0015f6b857edcf314dbb06594de4d2734b5fd1 100644 (file)
@@ -670,6 +670,8 @@ vn_file_cache_destructor(void *buf, void *cdrarg)
 int
 spl_vn_init(void)
 {
+       spin_lock_init(&vn_file_lock);
+
        vn_cache = kmem_cache_create("spl_vn_cache",
            sizeof (struct vnode), 64, vn_cache_constructor,
            vn_cache_destructor, NULL, NULL, NULL, 0);