]> git.proxmox.com Git - zfsonlinux.git/blob - spl-patches/0004-Remove-all-spin_is_locked-calls.patch
update SPL to 0.7.7
[zfsonlinux.git] / spl-patches / 0004-Remove-all-spin_is_locked-calls.patch
1 From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
2 From: James Cowgill <jcowgill@users.noreply.github.com>
3 Date: Mon, 30 Oct 2017 18:16:56 +0000
4 Subject: [PATCH] Remove all spin_is_locked calls
5 MIME-Version: 1.0
6 Content-Type: text/plain; charset=UTF-8
7 Content-Transfer-Encoding: 8bit
8
9 On systems with CONFIG_SMP turned off, spin_is_locked always returns
10 false causing these assertions to fail. Remove them as suggested in
11 zfsonlinux/zfs#6558.
12
13 Reviewed-by: George Melikov <mail@gmelikov.ru>
14 Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
15 Signed-off-by: James Cowgill <james.cowgill@mips.com>
16 Closes #665
17 (cherry picked from commit 9e573b7f9a5cf3e1cb6bb1b66bc35d5bb93cfaa9)
18 Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
19 ---
20 module/spl/spl-kmem-cache.c | 4 ----
21 module/spl/spl-taskq.c | 13 -------------
22 module/spl/spl-tsd.c | 1 -
23 module/spl/spl-vnode.c | 2 --
24 4 files changed, 20 deletions(-)
25
26 diff --git a/module/spl/spl-kmem-cache.c b/module/spl/spl-kmem-cache.c
27 index 45576b9..3668669 100644
28 --- a/module/spl/spl-kmem-cache.c
29 +++ b/module/spl/spl-kmem-cache.c
30 @@ -382,7 +382,6 @@ spl_slab_free(spl_kmem_slab_t *sks,
31
32 skc = sks->sks_cache;
33 ASSERT(skc->skc_magic == SKC_MAGIC);
34 - ASSERT(spin_is_locked(&skc->skc_lock));
35
36 /*
37 * Update slab/objects counters in the cache, then remove the
38 @@ -583,7 +582,6 @@ __spl_cache_flush(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flush)
39
40 ASSERT(skc->skc_magic == SKC_MAGIC);
41 ASSERT(skm->skm_magic == SKM_MAGIC);
42 - ASSERT(spin_is_locked(&skc->skc_lock));
43
44 for (i = 0; i < count; i++)
45 spl_cache_shrink(skc, skm->skm_objs[i]);
46 @@ -1125,7 +1123,6 @@ spl_cache_obj(spl_kmem_cache_t *skc, spl_kmem_slab_t *sks)
47
48 ASSERT(skc->skc_magic == SKC_MAGIC);
49 ASSERT(sks->sks_magic == SKS_MAGIC);
50 - ASSERT(spin_is_locked(&skc->skc_lock));
51
52 sko = list_entry(sks->sks_free_list.next, spl_kmem_obj_t, sko_list);
53 ASSERT(sko->sko_magic == SKO_MAGIC);
54 @@ -1396,7 +1393,6 @@ spl_cache_shrink(spl_kmem_cache_t *skc, void *obj)
55 spl_kmem_obj_t *sko = NULL;
56
57 ASSERT(skc->skc_magic == SKC_MAGIC);
58 - ASSERT(spin_is_locked(&skc->skc_lock));
59
60 sko = spl_sko_from_obj(skc, obj);
61 ASSERT(sko->sko_magic == SKO_MAGIC);
62 diff --git a/module/spl/spl-taskq.c b/module/spl/spl-taskq.c
63 index 89c53a5..ae26bdb 100644
64 --- a/module/spl/spl-taskq.c
65 +++ b/module/spl/spl-taskq.c
66 @@ -103,7 +103,6 @@ task_alloc(taskq_t *tq, uint_t flags, unsigned long *irqflags)
67 int count = 0;
68
69 ASSERT(tq);
70 - ASSERT(spin_is_locked(&tq->tq_lock));
71 retry:
72 /* Acquire taskq_ent_t's from free list if available */
73 if (!list_empty(&tq->tq_free_list) && !(flags & TQ_NEW)) {
74 @@ -168,7 +167,6 @@ task_free(taskq_t *tq, taskq_ent_t *t)
75 {
76 ASSERT(tq);
77 ASSERT(t);
78 - ASSERT(spin_is_locked(&tq->tq_lock));
79 ASSERT(list_empty(&t->tqent_list));
80 ASSERT(!timer_pending(&t->tqent_timer));
81
82 @@ -185,7 +183,6 @@ task_done(taskq_t *tq, taskq_ent_t *t)
83 {
84 ASSERT(tq);
85 ASSERT(t);
86 - ASSERT(spin_is_locked(&tq->tq_lock));
87
88 /* Wake tasks blocked in taskq_wait_id() */
89 wake_up_all(&t->tqent_waitq);
90 @@ -274,7 +271,6 @@ taskq_lowest_id(taskq_t *tq)
91 taskq_thread_t *tqt;
92
93 ASSERT(tq);
94 - ASSERT(spin_is_locked(&tq->tq_lock));
95
96 if (!list_empty(&tq->tq_pend_list)) {
97 t = list_entry(tq->tq_pend_list.next, taskq_ent_t, tqent_list);
98 @@ -312,7 +308,6 @@ taskq_insert_in_order(taskq_t *tq, taskq_thread_t *tqt)
99
100 ASSERT(tq);
101 ASSERT(tqt);
102 - ASSERT(spin_is_locked(&tq->tq_lock));
103
104 list_for_each_prev(l, &tq->tq_active_list) {
105 w = list_entry(l, taskq_thread_t, tqt_active_list);
106 @@ -335,8 +330,6 @@ taskq_find_list(taskq_t *tq, struct list_head *lh, taskqid_t id)
107 struct list_head *l;
108 taskq_ent_t *t;
109
110 - ASSERT(spin_is_locked(&tq->tq_lock));
111 -
112 list_for_each(l, lh) {
113 t = list_entry(l, taskq_ent_t, tqent_list);
114
115 @@ -363,8 +356,6 @@ taskq_find(taskq_t *tq, taskqid_t id)
116 struct list_head *l;
117 taskq_ent_t *t;
118
119 - ASSERT(spin_is_locked(&tq->tq_lock));
120 -
121 t = taskq_find_list(tq, &tq->tq_delay_list, id);
122 if (t)
123 return (t);
124 @@ -774,8 +765,6 @@ taskq_next_ent(taskq_t *tq)
125 {
126 struct list_head *list;
127
128 - ASSERT(spin_is_locked(&tq->tq_lock));
129 -
130 if (!list_empty(&tq->tq_prio_list))
131 list = &tq->tq_prio_list;
132 else if (!list_empty(&tq->tq_pend_list))
133 @@ -840,8 +829,6 @@ taskq_thread_spawn(taskq_t *tq)
134 static int
135 taskq_thread_should_stop(taskq_t *tq, taskq_thread_t *tqt)
136 {
137 - ASSERT(spin_is_locked(&tq->tq_lock));
138 -
139 if (!(tq->tq_flags & TASKQ_DYNAMIC))
140 return (0);
141
142 diff --git a/module/spl/spl-tsd.c b/module/spl/spl-tsd.c
143 index bf82350..4c80029 100644
144 --- a/module/spl/spl-tsd.c
145 +++ b/module/spl/spl-tsd.c
146 @@ -315,7 +315,6 @@ tsd_hash_add_pid(tsd_hash_table_t *table, pid_t pid)
147 static void
148 tsd_hash_del(tsd_hash_table_t *table, tsd_hash_entry_t *entry)
149 {
150 - ASSERT(spin_is_locked(&table->ht_lock));
151 hlist_del(&entry->he_list);
152 list_del_init(&entry->he_key_list);
153 list_del_init(&entry->he_pid_list);
154 diff --git a/module/spl/spl-vnode.c b/module/spl/spl-vnode.c
155 index 77bfb45..a804e10 100644
156 --- a/module/spl/spl-vnode.c
157 +++ b/module/spl/spl-vnode.c
158 @@ -414,8 +414,6 @@ file_find(int fd, struct task_struct *task)
159 {
160 file_t *fp;
161
162 - ASSERT(spin_is_locked(&vn_file_lock));
163 -
164 list_for_each_entry(fp, &vn_file_list, f_list) {
165 if (fd == fp->f_fd && fp->f_task == task) {
166 ASSERT(atomic_read(&fp->f_ref) != 0);
167 --
168 2.14.2
169