]> git.proxmox.com Git - zfsonlinux.git/blame - spl-patches/0004-Add-rwsem_tryupgrade-for-4.9.20-rt16-kernel-SPL.patch
bump version to 0.7.11-pve1~bpo1
[zfsonlinux.git] / spl-patches / 0004-Add-rwsem_tryupgrade-for-4.9.20-rt16-kernel-SPL.patch
CommitLineData
f0371a1b
SI
1From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
2From: Tony Hutter <hutter2@llnl.gov>
3Date: Wed, 15 Aug 2018 11:58:54 -0700
4Subject: [PATCH] Add rwsem_tryupgrade for 4.9.20-rt16 kernel (SPL)
5
6(This is the SPL backported code from 11d0525cb)
7
8The RT rwsem implementation was changed to allow multiple readers
9as of the 4.9.20-rt16 patch set. This results in a build failure
10because the existing implementation was forced to directly access
11the rwsem structure which has changed.
12
13While this could be accommodated by adding additional compatibility
14code. This patch resolves the build issue by simply assuming the
15rwsem can never be upgraded. This functionality is a performance
16optimization and all callers must already handle this case.
17
18Converting the last remaining use of __SPIN_LOCK_UNLOCKED to
19spin_lock_init() was additionally required to get a clean build.
20
21Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
22Closes #7589
23
24Signed-off-by: Stoiko Ivanov <s.ivanov@proxmox.com>
25---
26 include/sys/isa_defs.h | 8 ++++++++
27 include/sys/rwlock.h | 10 +++++-----
28 module/spl/spl-rwlock.c | 19 ++++++++++++++-----
29 module/spl/spl-vnode.c | 2 ++
30 4 files changed, 29 insertions(+), 10 deletions(-)
31
32diff --git a/include/sys/isa_defs.h b/include/sys/isa_defs.h
33index 5559782..13dcb35 100644
34--- a/include/sys/isa_defs.h
35+++ b/include/sys/isa_defs.h
36@@ -210,6 +210,14 @@
37
38 #include <sys/byteorder.h>
39
40+/*
41+ * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS will be defined by the Linux
42+ * kernel for architectures which support efficient unaligned access.
43+ */
44+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
45+#define HAVE_EFFICIENT_UNALIGNED_ACCESS
46+#endif
47+
48 #if defined(__LITTLE_ENDIAN) && !defined(_LITTLE_ENDIAN)
49 #define _LITTLE_ENDIAN __LITTLE_ENDIAN
50 #endif
51diff --git a/include/sys/rwlock.h b/include/sys/rwlock.h
52index 325dfc4..2699229 100644
53--- a/include/sys/rwlock.h
54+++ b/include/sys/rwlock.h
55@@ -135,7 +135,7 @@ RW_LOCK_HELD(krwlock_t *rwp)
56 }
57
58 /*
59- * The following functions must be a #define and not static inline.
60+ * The following functions must be a #define and not static inline.
61 * This ensures that the native linux semaphore functions (down/up)
62 * will be correctly located in the users code which is important
63 * for the built in kernel lock analysis tools
64@@ -151,10 +151,10 @@ RW_LOCK_HELD(krwlock_t *rwp)
65 spl_rw_set_type(rwp, type); \
66 })
67
68-#define rw_destroy(rwp) \
69-({ \
70- VERIFY(!RW_LOCK_HELD(rwp)); \
71-})
72+/*
73+ * The Linux rwsem implementation does not require a matching destroy.
74+ */
75+#define rw_destroy(rwp) ((void) 0)
76
77 #define rw_tryenter(rwp, rw) \
78 ({ \
79diff --git a/module/spl/spl-rwlock.c b/module/spl/spl-rwlock.c
80index bf7ee2f..ac28c91 100644
81--- a/module/spl/spl-rwlock.c
82+++ b/module/spl/spl-rwlock.c
83@@ -34,16 +34,24 @@
84 static int
85 __rwsem_tryupgrade(struct rw_semaphore *rwsem)
86 {
87-
88+#if defined(READER_BIAS) && defined(WRITER_BIAS)
89+ /*
90+ * After the 4.9.20-rt16 kernel the realtime patch series lifted the
91+ * single reader restriction. While this could be accommodated by
92+ * adding additional compatibility code assume the rwsem can never
93+ * be upgraded. All caller must already cleanly handle this case.
94+ */
95+ return (0);
96+#else
97 ASSERT((struct task_struct *)
98 ((unsigned long)rwsem->lock.owner & ~RT_MUTEX_OWNER_MASKALL) ==
99 current);
100
101 /*
102- * Under the realtime patch series, rwsem is implemented as a
103- * single mutex held by readers and writers alike. However,
104- * this implementation would prevent a thread from taking a
105- * read lock twice, as the mutex would already be locked on
106+ * Prior to 4.9.20-rt16 kernel the realtime patch series, rwsem is
107+ * implemented as a single mutex held by readers and writers alike.
108+ * However, this implementation would prevent a thread from taking
109+ * a read lock twice, as the mutex would already be locked on
110 * the second attempt. Therefore the implementation allows a
111 * single thread to take a rwsem as read lock multiple times
112 * tracking that nesting as read_depth counter.
113@@ -59,6 +67,7 @@ __rwsem_tryupgrade(struct rw_semaphore *rwsem)
114 return (1);
115 }
116 return (0);
117+#endif
118 }
119 #elif defined(CONFIG_RWSEM_GENERIC_SPINLOCK)
120 static int
121diff --git a/module/spl/spl-vnode.c b/module/spl/spl-vnode.c
122index 74ae8fe..cd0015f 100644
123--- a/module/spl/spl-vnode.c
124+++ b/module/spl/spl-vnode.c
125@@ -670,6 +670,8 @@ vn_file_cache_destructor(void *buf, void *cdrarg)
126 int
127 spl_vn_init(void)
128 {
129+ spin_lock_init(&vn_file_lock);
130+
131 vn_cache = kmem_cache_create("spl_vn_cache",
132 sizeof (struct vnode), 64, vn_cache_constructor,
133 vn_cache_destructor, NULL, NULL, NULL, 0);
134--
1352.11.0
136