]> git.proxmox.com Git - pve-kernel.git/blame - patches/kernel/0126-locking-barriers-Convert-users-of-lockless_dereferen.patch
KPTI: add follow-up fixes
[pve-kernel.git] / patches / kernel / 0126-locking-barriers-Convert-users-of-lockless_dereferen.patch
CommitLineData
321d628a
FG
1From 9d02a406fe5f64f282832e7d0ab8fcd2631fc15a Mon Sep 17 00:00:00 2001
2From: Will Deacon <will.deacon@arm.com>
3Date: Tue, 24 Oct 2017 11:22:48 +0100
e4cdf2a5 4Subject: [PATCH 126/241] locking/barriers: Convert users of
321d628a
FG
5 lockless_dereference() to READ_ONCE()
6MIME-Version: 1.0
7Content-Type: text/plain; charset=UTF-8
8Content-Transfer-Encoding: 8bit
9
10CVE-2017-5754
11
12[ Note, this is a Git cherry-pick of the following commit:
13
14 506458efaf15 ("locking/barriers: Convert users of lockless_dereference() to READ_ONCE()")
15
16 ... for easier x86 PTI code testing and back-porting. ]
17
18READ_ONCE() now has an implicit smp_read_barrier_depends() call, so it
19can be used instead of lockless_dereference() without any change in
20semantics.
21
22Signed-off-by: Will Deacon <will.deacon@arm.com>
23Cc: Linus Torvalds <torvalds@linux-foundation.org>
24Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
25Cc: Peter Zijlstra <peterz@infradead.org>
26Cc: Thomas Gleixner <tglx@linutronix.de>
27Link: http://lkml.kernel.org/r/1508840570-22169-4-git-send-email-will.deacon@arm.com
28Signed-off-by: Ingo Molnar <mingo@kernel.org>
29(cherry picked from commit 3382290ed2d5e275429cef510ab21889d3ccd164)
30Signed-off-by: Andy Whitcroft <apw@canonical.com>
31Signed-off-by: Kleber Sacilotto de Souza <kleber.souza@canonical.com>
32(cherry picked from commit 7252704bfd83e951d00ec75526ed2bf64a7f6ee1)
33Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
34---
35 arch/x86/include/asm/mmu_context.h | 4 ++--
36 fs/overlayfs/ovl_entry.h | 2 +-
37 include/linux/rculist.h | 4 ++--
38 include/linux/rcupdate.h | 4 ++--
39 mm/slab.h | 2 +-
40 arch/x86/events/core.c | 2 +-
41 arch/x86/kernel/ldt.c | 2 +-
42 drivers/md/dm-mpath.c | 20 ++++++++++----------
43 fs/dcache.c | 4 ++--
44 fs/overlayfs/readdir.c | 2 +-
45 kernel/events/core.c | 4 ++--
46 kernel/seccomp.c | 2 +-
47 kernel/task_work.c | 2 +-
48 13 files changed, 27 insertions(+), 27 deletions(-)
49
50diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
51index 3c856a15b98e..efc530642f7d 100644
52--- a/arch/x86/include/asm/mmu_context.h
53+++ b/arch/x86/include/asm/mmu_context.h
54@@ -72,8 +72,8 @@ static inline void load_mm_ldt(struct mm_struct *mm)
55 #ifdef CONFIG_MODIFY_LDT_SYSCALL
56 struct ldt_struct *ldt;
57
58- /* lockless_dereference synchronizes with smp_store_release */
59- ldt = lockless_dereference(mm->context.ldt);
60+ /* READ_ONCE synchronizes with smp_store_release */
61+ ldt = READ_ONCE(mm->context.ldt);
62
63 /*
64 * Any change to mm->context.ldt is followed by an IPI to all
65diff --git a/fs/overlayfs/ovl_entry.h b/fs/overlayfs/ovl_entry.h
66index 25d9b5adcd42..36b49bd09264 100644
67--- a/fs/overlayfs/ovl_entry.h
68+++ b/fs/overlayfs/ovl_entry.h
69@@ -77,5 +77,5 @@ static inline struct ovl_inode *OVL_I(struct inode *inode)
70
71 static inline struct dentry *ovl_upperdentry_dereference(struct ovl_inode *oi)
72 {
73- return lockless_dereference(oi->__upperdentry);
74+ return READ_ONCE(oi->__upperdentry);
75 }
76diff --git a/include/linux/rculist.h b/include/linux/rculist.h
77index b1fd8bf85fdc..3a2bb7d8ed4d 100644
78--- a/include/linux/rculist.h
79+++ b/include/linux/rculist.h
80@@ -274,7 +274,7 @@ static inline void list_splice_tail_init_rcu(struct list_head *list,
81 * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
82 */
83 #define list_entry_rcu(ptr, type, member) \
84- container_of(lockless_dereference(ptr), type, member)
85+ container_of(READ_ONCE(ptr), type, member)
86
87 /**
88 * Where are list_empty_rcu() and list_first_entry_rcu()?
89@@ -367,7 +367,7 @@ static inline void list_splice_tail_init_rcu(struct list_head *list,
90 * example is when items are added to the list, but never deleted.
91 */
92 #define list_entry_lockless(ptr, type, member) \
93- container_of((typeof(ptr))lockless_dereference(ptr), type, member)
94+ container_of((typeof(ptr))READ_ONCE(ptr), type, member)
95
96 /**
97 * list_for_each_entry_lockless - iterate over rcu list of given type
98diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
99index f816fc72b51e..ae494eb7b401 100644
100--- a/include/linux/rcupdate.h
101+++ b/include/linux/rcupdate.h
102@@ -341,7 +341,7 @@ static inline void rcu_preempt_sleep_check(void) { }
103 #define __rcu_dereference_check(p, c, space) \
104 ({ \
105 /* Dependency order vs. p above. */ \
106- typeof(*p) *________p1 = (typeof(*p) *__force)lockless_dereference(p); \
107+ typeof(*p) *________p1 = (typeof(*p) *__force)READ_ONCE(p); \
108 RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_check() usage"); \
109 rcu_dereference_sparse(p, space); \
110 ((typeof(*p) __force __kernel *)(________p1)); \
111@@ -355,7 +355,7 @@ static inline void rcu_preempt_sleep_check(void) { }
112 #define rcu_dereference_raw(p) \
113 ({ \
114 /* Dependency order vs. p above. */ \
115- typeof(p) ________p1 = lockless_dereference(p); \
116+ typeof(p) ________p1 = READ_ONCE(p); \
117 ((typeof(*p) __force __kernel *)(________p1)); \
118 })
119
120diff --git a/mm/slab.h b/mm/slab.h
121index 6885e1192ec5..494cccef822a 100644
122--- a/mm/slab.h
123+++ b/mm/slab.h
124@@ -257,7 +257,7 @@ cache_from_memcg_idx(struct kmem_cache *s, int idx)
125 * memcg_caches issues a write barrier to match this (see
126 * memcg_create_kmem_cache()).
127 */
128- cachep = lockless_dereference(arr->entries[idx]);
129+ cachep = READ_ONCE(arr->entries[idx]);
130 rcu_read_unlock();
131
132 return cachep;
133diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
134index 939050169d12..18685de61288 100644
135--- a/arch/x86/events/core.c
136+++ b/arch/x86/events/core.c
137@@ -2336,7 +2336,7 @@ static unsigned long get_segment_base(unsigned int segment)
138 struct ldt_struct *ldt;
139
140 /* IRQs are off, so this synchronizes with smp_store_release */
141- ldt = lockless_dereference(current->active_mm->context.ldt);
142+ ldt = READ_ONCE(current->active_mm->context.ldt);
143 if (!ldt || idx >= ldt->nr_entries)
144 return 0;
145
146diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
147index 0402d44deb4d..b8be2413cb74 100644
148--- a/arch/x86/kernel/ldt.c
149+++ b/arch/x86/kernel/ldt.c
150@@ -102,7 +102,7 @@ static void finalize_ldt_struct(struct ldt_struct *ldt)
151 static void install_ldt(struct mm_struct *current_mm,
152 struct ldt_struct *ldt)
153 {
154- /* Synchronizes with lockless_dereference in load_mm_ldt. */
155+ /* Synchronizes with READ_ONCE in load_mm_ldt. */
156 smp_store_release(&current_mm->context.ldt, ldt);
157
158 /* Activate the LDT for all CPUs using current_mm. */
159diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
160index d24e4b05f5da..731b7ffc7e37 100644
161--- a/drivers/md/dm-mpath.c
162+++ b/drivers/md/dm-mpath.c
163@@ -366,7 +366,7 @@ static struct pgpath *choose_path_in_pg(struct multipath *m,
164
165 pgpath = path_to_pgpath(path);
166
167- if (unlikely(lockless_dereference(m->current_pg) != pg)) {
168+ if (unlikely(READ_ONCE(m->current_pg) != pg)) {
169 /* Only update current_pgpath if pg changed */
170 spin_lock_irqsave(&m->lock, flags);
171 m->current_pgpath = pgpath;
172@@ -390,7 +390,7 @@ static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes)
173 }
174
175 /* Were we instructed to switch PG? */
176- if (lockless_dereference(m->next_pg)) {
177+ if (READ_ONCE(m->next_pg)) {
178 spin_lock_irqsave(&m->lock, flags);
179 pg = m->next_pg;
180 if (!pg) {
181@@ -406,7 +406,7 @@ static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes)
182
183 /* Don't change PG until it has no remaining paths */
184 check_current_pg:
185- pg = lockless_dereference(m->current_pg);
186+ pg = READ_ONCE(m->current_pg);
187 if (pg) {
188 pgpath = choose_path_in_pg(m, pg, nr_bytes);
189 if (!IS_ERR_OR_NULL(pgpath))
190@@ -473,7 +473,7 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
191 struct request *clone;
192
193 /* Do we need to select a new pgpath? */
194- pgpath = lockless_dereference(m->current_pgpath);
195+ pgpath = READ_ONCE(m->current_pgpath);
196 if (!pgpath || !test_bit(MPATHF_QUEUE_IO, &m->flags))
197 pgpath = choose_pgpath(m, nr_bytes);
198
199@@ -535,7 +535,7 @@ static int __multipath_map_bio(struct multipath *m, struct bio *bio, struct dm_m
200 bool queue_io;
201
202 /* Do we need to select a new pgpath? */
203- pgpath = lockless_dereference(m->current_pgpath);
204+ pgpath = READ_ONCE(m->current_pgpath);
205 queue_io = test_bit(MPATHF_QUEUE_IO, &m->flags);
206 if (!pgpath || !queue_io)
207 pgpath = choose_pgpath(m, nr_bytes);
208@@ -1799,7 +1799,7 @@ static int multipath_prepare_ioctl(struct dm_target *ti,
209 struct pgpath *current_pgpath;
210 int r;
211
212- current_pgpath = lockless_dereference(m->current_pgpath);
213+ current_pgpath = READ_ONCE(m->current_pgpath);
214 if (!current_pgpath)
215 current_pgpath = choose_pgpath(m, 0);
216
217@@ -1821,7 +1821,7 @@ static int multipath_prepare_ioctl(struct dm_target *ti,
218 }
219
220 if (r == -ENOTCONN) {
221- if (!lockless_dereference(m->current_pg)) {
222+ if (!READ_ONCE(m->current_pg)) {
223 /* Path status changed, redo selection */
224 (void) choose_pgpath(m, 0);
225 }
226@@ -1890,9 +1890,9 @@ static int multipath_busy(struct dm_target *ti)
227 return (m->queue_mode != DM_TYPE_MQ_REQUEST_BASED);
228
229 /* Guess which priority_group will be used at next mapping time */
230- pg = lockless_dereference(m->current_pg);
231- next_pg = lockless_dereference(m->next_pg);
232- if (unlikely(!lockless_dereference(m->current_pgpath) && next_pg))
233+ pg = READ_ONCE(m->current_pg);
234+ next_pg = READ_ONCE(m->next_pg);
235+ if (unlikely(!READ_ONCE(m->current_pgpath) && next_pg))
236 pg = next_pg;
237
238 if (!pg) {
239diff --git a/fs/dcache.c b/fs/dcache.c
240index 3203470c59c2..ccc2bcdcfdfb 100644
241--- a/fs/dcache.c
242+++ b/fs/dcache.c
243@@ -231,7 +231,7 @@ static inline int dentry_cmp(const struct dentry *dentry, const unsigned char *c
244 {
245 /*
246 * Be careful about RCU walk racing with rename:
247- * use 'lockless_dereference' to fetch the name pointer.
248+ * use 'READ_ONCE' to fetch the name pointer.
249 *
250 * NOTE! Even if a rename will mean that the length
251 * was not loaded atomically, we don't care. The
252@@ -245,7 +245,7 @@ static inline int dentry_cmp(const struct dentry *dentry, const unsigned char *c
253 * early because the data cannot match (there can
254 * be no NUL in the ct/tcount data)
255 */
256- const unsigned char *cs = lockless_dereference(dentry->d_name.name);
257+ const unsigned char *cs = READ_ONCE(dentry->d_name.name);
258
259 return dentry_string_cmp(cs, ct, tcount);
260 }
261diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c
262index 3ff960372cb9..7920a3f62c19 100644
263--- a/fs/overlayfs/readdir.c
264+++ b/fs/overlayfs/readdir.c
265@@ -440,7 +440,7 @@ static int ovl_dir_fsync(struct file *file, loff_t start, loff_t end,
266 if (!od->is_upper && OVL_TYPE_UPPER(ovl_path_type(dentry))) {
267 struct inode *inode = file_inode(file);
268
269- realfile = lockless_dereference(od->upperfile);
270+ realfile = READ_ONCE(od->upperfile);
271 if (!realfile) {
272 struct path upperpath;
273
274diff --git a/kernel/events/core.c b/kernel/events/core.c
275index 5d4398d1fa19..9f51738bf32e 100644
276--- a/kernel/events/core.c
277+++ b/kernel/events/core.c
278@@ -4221,7 +4221,7 @@ static void perf_remove_from_owner(struct perf_event *event)
279 * indeed free this event, otherwise we need to serialize on
280 * owner->perf_event_mutex.
281 */
282- owner = lockless_dereference(event->owner);
283+ owner = READ_ONCE(event->owner);
284 if (owner) {
285 /*
286 * Since delayed_put_task_struct() also drops the last
287@@ -4318,7 +4318,7 @@ int perf_event_release_kernel(struct perf_event *event)
288 * Cannot change, child events are not migrated, see the
289 * comment with perf_event_ctx_lock_nested().
290 */
291- ctx = lockless_dereference(child->ctx);
292+ ctx = READ_ONCE(child->ctx);
293 /*
294 * Since child_mutex nests inside ctx::mutex, we must jump
295 * through hoops. We start by grabbing a reference on the ctx.
296diff --git a/kernel/seccomp.c b/kernel/seccomp.c
297index 34aced9ff3ff..3fd2c4b23697 100644
298--- a/kernel/seccomp.c
299+++ b/kernel/seccomp.c
300@@ -188,7 +188,7 @@ static u32 seccomp_run_filters(const struct seccomp_data *sd,
301 u32 ret = SECCOMP_RET_ALLOW;
302 /* Make sure cross-thread synced filter points somewhere sane. */
303 struct seccomp_filter *f =
304- lockless_dereference(current->seccomp.filter);
305+ READ_ONCE(current->seccomp.filter);
306
307 /* Ensure unexpected behavior doesn't result in failing open. */
308 if (unlikely(WARN_ON(f == NULL)))
309diff --git a/kernel/task_work.c b/kernel/task_work.c
310index e056d5429783..0371093a2331 100644
311--- a/kernel/task_work.c
312+++ b/kernel/task_work.c
313@@ -67,7 +67,7 @@ task_work_cancel(struct task_struct *task, task_work_func_t func)
314 * we raced with task_work_run(), *pprev == NULL/exited.
315 */
316 raw_spin_lock_irqsave(&task->pi_lock, flags);
317- while ((work = lockless_dereference(*pprev))) {
318+ while ((work = READ_ONCE(*pprev))) {
319 if (work->func != func)
320 pprev = &work->next;
321 else if (cmpxchg(pprev, work, work->next) == work)
322--
3232.14.2
324