]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - kernel/sched/membarrier.c
Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[mirror_ubuntu-jammy-kernel.git] / kernel / sched / membarrier.c
CommitLineData
c942fddf 1// SPDX-License-Identifier: GPL-2.0-or-later
22e4ebb9
MD
2/*
3 * Copyright (C) 2010-2017 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 *
5 * membarrier system call
22e4ebb9 6 */
325ea10c 7#include "sched.h"
22e4ebb9
MD
8
9/*
10 * Bitmask made from a "or" of all commands within enum membarrier_cmd,
11 * except MEMBARRIER_CMD_QUERY.
12 */
70216e18 13#ifdef CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE
97fb7a0a
IM
14#define MEMBARRIER_PRIVATE_EXPEDITED_SYNC_CORE_BITMASK \
15 (MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE \
70216e18
MD
16 | MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_SYNC_CORE)
17#else
18#define MEMBARRIER_PRIVATE_EXPEDITED_SYNC_CORE_BITMASK 0
19#endif
20
97fb7a0a
IM
21#define MEMBARRIER_CMD_BITMASK \
22 (MEMBARRIER_CMD_GLOBAL | MEMBARRIER_CMD_GLOBAL_EXPEDITED \
23 | MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED \
24 | MEMBARRIER_CMD_PRIVATE_EXPEDITED \
25 | MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED \
70216e18 26 | MEMBARRIER_PRIVATE_EXPEDITED_SYNC_CORE_BITMASK)
22e4ebb9
MD
27
28static void ipi_mb(void *info)
29{
30 smp_mb(); /* IPIs should be serializing but paranoid. */
31}
32
c5f58bd5
MD
33static int membarrier_global_expedited(void)
34{
35 int cpu;
36 bool fallback = false;
37 cpumask_var_t tmpmask;
38
39 if (num_online_cpus() == 1)
40 return 0;
41
42 /*
43 * Matches memory barriers around rq->curr modification in
44 * scheduler.
45 */
46 smp_mb(); /* system call entry is not a mb. */
47
48 /*
49 * Expedited membarrier commands guarantee that they won't
50 * block, hence the GFP_NOWAIT allocation flag and fallback
51 * implementation.
52 */
53 if (!zalloc_cpumask_var(&tmpmask, GFP_NOWAIT)) {
54 /* Fallback for OOM. */
55 fallback = true;
56 }
57
58 cpus_read_lock();
59 for_each_online_cpu(cpu) {
60 struct task_struct *p;
61
62 /*
63 * Skipping the current CPU is OK even through we can be
64 * migrated at any point. The current CPU, at the point
65 * where we read raw_smp_processor_id(), is ensured to
66 * be in program order with respect to the caller
67 * thread. Therefore, we can skip this CPU from the
68 * iteration.
69 */
70 if (cpu == raw_smp_processor_id())
71 continue;
97fb7a0a 72
c5f58bd5
MD
73 rcu_read_lock();
74 p = task_rcu_dereference(&cpu_rq(cpu)->curr);
75 if (p && p->mm && (atomic_read(&p->mm->membarrier_state) &
76 MEMBARRIER_STATE_GLOBAL_EXPEDITED)) {
77 if (!fallback)
78 __cpumask_set_cpu(cpu, tmpmask);
79 else
80 smp_call_function_single(cpu, ipi_mb, NULL, 1);
81 }
82 rcu_read_unlock();
83 }
84 if (!fallback) {
85 preempt_disable();
86 smp_call_function_many(tmpmask, ipi_mb, NULL, 1);
87 preempt_enable();
88 free_cpumask_var(tmpmask);
89 }
90 cpus_read_unlock();
91
92 /*
93 * Memory barrier on the caller thread _after_ we finished
94 * waiting for the last IPI. Matches memory barriers around
95 * rq->curr modification in scheduler.
96 */
97 smp_mb(); /* exit from system call is not a mb */
98 return 0;
99}
100
70216e18 101static int membarrier_private_expedited(int flags)
22e4ebb9
MD
102{
103 int cpu;
104 bool fallback = false;
105 cpumask_var_t tmpmask;
106
70216e18
MD
107 if (flags & MEMBARRIER_FLAG_SYNC_CORE) {
108 if (!IS_ENABLED(CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE))
109 return -EINVAL;
110 if (!(atomic_read(&current->mm->membarrier_state) &
111 MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY))
112 return -EPERM;
113 } else {
114 if (!(atomic_read(&current->mm->membarrier_state) &
115 MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY))
116 return -EPERM;
117 }
a961e409 118
22e4ebb9 119 if (num_online_cpus() == 1)
a961e409 120 return 0;
22e4ebb9
MD
121
122 /*
123 * Matches memory barriers around rq->curr modification in
124 * scheduler.
125 */
126 smp_mb(); /* system call entry is not a mb. */
127
128 /*
129 * Expedited membarrier commands guarantee that they won't
130 * block, hence the GFP_NOWAIT allocation flag and fallback
131 * implementation.
132 */
133 if (!zalloc_cpumask_var(&tmpmask, GFP_NOWAIT)) {
134 /* Fallback for OOM. */
135 fallback = true;
136 }
137
138 cpus_read_lock();
139 for_each_online_cpu(cpu) {
140 struct task_struct *p;
141
142 /*
143 * Skipping the current CPU is OK even through we can be
144 * migrated at any point. The current CPU, at the point
145 * where we read raw_smp_processor_id(), is ensured to
146 * be in program order with respect to the caller
147 * thread. Therefore, we can skip this CPU from the
148 * iteration.
149 */
150 if (cpu == raw_smp_processor_id())
151 continue;
152 rcu_read_lock();
153 p = task_rcu_dereference(&cpu_rq(cpu)->curr);
154 if (p && p->mm == current->mm) {
155 if (!fallback)
156 __cpumask_set_cpu(cpu, tmpmask);
157 else
158 smp_call_function_single(cpu, ipi_mb, NULL, 1);
159 }
160 rcu_read_unlock();
161 }
162 if (!fallback) {
54167607 163 preempt_disable();
22e4ebb9 164 smp_call_function_many(tmpmask, ipi_mb, NULL, 1);
54167607 165 preempt_enable();
22e4ebb9
MD
166 free_cpumask_var(tmpmask);
167 }
168 cpus_read_unlock();
169
170 /*
171 * Memory barrier on the caller thread _after_ we finished
172 * waiting for the last IPI. Matches memory barriers around
173 * rq->curr modification in scheduler.
174 */
175 smp_mb(); /* exit from system call is not a mb */
97fb7a0a 176
a961e409
MD
177 return 0;
178}
179
c5f58bd5
MD
180static int membarrier_register_global_expedited(void)
181{
182 struct task_struct *p = current;
183 struct mm_struct *mm = p->mm;
184
185 if (atomic_read(&mm->membarrier_state) &
186 MEMBARRIER_STATE_GLOBAL_EXPEDITED_READY)
187 return 0;
188 atomic_or(MEMBARRIER_STATE_GLOBAL_EXPEDITED, &mm->membarrier_state);
189 if (atomic_read(&mm->mm_users) == 1 && get_nr_threads(p) == 1) {
190 /*
191 * For single mm user, single threaded process, we can
192 * simply issue a memory barrier after setting
193 * MEMBARRIER_STATE_GLOBAL_EXPEDITED to guarantee that
194 * no memory access following registration is reordered
195 * before registration.
196 */
197 smp_mb();
198 } else {
199 /*
200 * For multi-mm user threads, we need to ensure all
201 * future scheduler executions will observe the new
202 * thread flag state for this mm.
203 */
c9a863bb 204 synchronize_rcu();
c5f58bd5
MD
205 }
206 atomic_or(MEMBARRIER_STATE_GLOBAL_EXPEDITED_READY,
207 &mm->membarrier_state);
97fb7a0a 208
c5f58bd5
MD
209 return 0;
210}
211
70216e18 212static int membarrier_register_private_expedited(int flags)
a961e409
MD
213{
214 struct task_struct *p = current;
215 struct mm_struct *mm = p->mm;
70216e18
MD
216 int state = MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY;
217
218 if (flags & MEMBARRIER_FLAG_SYNC_CORE) {
219 if (!IS_ENABLED(CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE))
220 return -EINVAL;
221 state = MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY;
222 }
a961e409
MD
223
224 /*
225 * We need to consider threads belonging to different thread
226 * groups, which use the same mm. (CLONE_VM but not
227 * CLONE_THREAD).
228 */
70216e18 229 if (atomic_read(&mm->membarrier_state) & state)
c5f58bd5 230 return 0;
3ccfebed 231 atomic_or(MEMBARRIER_STATE_PRIVATE_EXPEDITED, &mm->membarrier_state);
70216e18
MD
232 if (flags & MEMBARRIER_FLAG_SYNC_CORE)
233 atomic_or(MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE,
234 &mm->membarrier_state);
3ccfebed
MD
235 if (!(atomic_read(&mm->mm_users) == 1 && get_nr_threads(p) == 1)) {
236 /*
237 * Ensure all future scheduler executions will observe the
238 * new thread flag state for this process.
239 */
c9a863bb 240 synchronize_rcu();
3ccfebed 241 }
70216e18 242 atomic_or(state, &mm->membarrier_state);
97fb7a0a 243
c5f58bd5 244 return 0;
22e4ebb9
MD
245}
246
247/**
248 * sys_membarrier - issue memory barriers on a set of threads
249 * @cmd: Takes command values defined in enum membarrier_cmd.
250 * @flags: Currently needs to be 0. For future extensions.
251 *
252 * If this system call is not implemented, -ENOSYS is returned. If the
253 * command specified does not exist, not available on the running
254 * kernel, or if the command argument is invalid, this system call
255 * returns -EINVAL. For a given command, with flags argument set to 0,
256 * this system call is guaranteed to always return the same value until
257 * reboot.
258 *
259 * All memory accesses performed in program order from each targeted thread
260 * is guaranteed to be ordered with respect to sys_membarrier(). If we use
261 * the semantic "barrier()" to represent a compiler barrier forcing memory
262 * accesses to be performed in program order across the barrier, and
263 * smp_mb() to represent explicit memory barriers forcing full memory
264 * ordering across the barrier, we have the following ordering table for
265 * each pair of barrier(), sys_membarrier() and smp_mb():
266 *
267 * The pair ordering is detailed as (O: ordered, X: not ordered):
268 *
269 * barrier() smp_mb() sys_membarrier()
270 * barrier() X X O
271 * smp_mb() X O O
272 * sys_membarrier() O O O
273 */
274SYSCALL_DEFINE2(membarrier, int, cmd, int, flags)
275{
276 if (unlikely(flags))
277 return -EINVAL;
278 switch (cmd) {
279 case MEMBARRIER_CMD_QUERY:
280 {
281 int cmd_mask = MEMBARRIER_CMD_BITMASK;
282
283 if (tick_nohz_full_enabled())
c5f58bd5 284 cmd_mask &= ~MEMBARRIER_CMD_GLOBAL;
22e4ebb9
MD
285 return cmd_mask;
286 }
c5f58bd5
MD
287 case MEMBARRIER_CMD_GLOBAL:
288 /* MEMBARRIER_CMD_GLOBAL is not compatible with nohz_full. */
22e4ebb9
MD
289 if (tick_nohz_full_enabled())
290 return -EINVAL;
291 if (num_online_cpus() > 1)
78d125d3 292 synchronize_rcu();
22e4ebb9 293 return 0;
c5f58bd5
MD
294 case MEMBARRIER_CMD_GLOBAL_EXPEDITED:
295 return membarrier_global_expedited();
296 case MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED:
297 return membarrier_register_global_expedited();
22e4ebb9 298 case MEMBARRIER_CMD_PRIVATE_EXPEDITED:
70216e18 299 return membarrier_private_expedited(0);
a961e409 300 case MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED:
70216e18
MD
301 return membarrier_register_private_expedited(0);
302 case MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE:
303 return membarrier_private_expedited(MEMBARRIER_FLAG_SYNC_CORE);
304 case MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_SYNC_CORE:
305 return membarrier_register_private_expedited(MEMBARRIER_FLAG_SYNC_CORE);
22e4ebb9
MD
306 default:
307 return -EINVAL;
308 }
309}