]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - kernel/sched/membarrier.c
Merge branch 'kvm-insert-lfence' into kvm-master
[mirror_ubuntu-bionic-kernel.git] / kernel / sched / membarrier.c
CommitLineData
22e4ebb9
MD
1/*
2 * Copyright (C) 2010-2017 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
3 *
4 * membarrier system call
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16
17#include <linux/syscalls.h>
18#include <linux/membarrier.h>
19#include <linux/tick.h>
20#include <linux/cpumask.h>
a961e409 21#include <linux/atomic.h>
22e4ebb9
MD
22
23#include "sched.h" /* for cpu_rq(). */
24
25/*
26 * Bitmask made from a "or" of all commands within enum membarrier_cmd,
27 * except MEMBARRIER_CMD_QUERY.
28 */
29#define MEMBARRIER_CMD_BITMASK \
a961e409
MD
30 (MEMBARRIER_CMD_SHARED | MEMBARRIER_CMD_PRIVATE_EXPEDITED \
31 | MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED)
22e4ebb9
MD
32
33static void ipi_mb(void *info)
34{
35 smp_mb(); /* IPIs should be serializing but paranoid. */
36}
37
a961e409 38static int membarrier_private_expedited(void)
22e4ebb9
MD
39{
40 int cpu;
41 bool fallback = false;
42 cpumask_var_t tmpmask;
43
a961e409
MD
44 if (!(atomic_read(&current->mm->membarrier_state)
45 & MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY))
46 return -EPERM;
47
22e4ebb9 48 if (num_online_cpus() == 1)
a961e409 49 return 0;
22e4ebb9
MD
50
51 /*
52 * Matches memory barriers around rq->curr modification in
53 * scheduler.
54 */
55 smp_mb(); /* system call entry is not a mb. */
56
57 /*
58 * Expedited membarrier commands guarantee that they won't
59 * block, hence the GFP_NOWAIT allocation flag and fallback
60 * implementation.
61 */
62 if (!zalloc_cpumask_var(&tmpmask, GFP_NOWAIT)) {
63 /* Fallback for OOM. */
64 fallback = true;
65 }
66
67 cpus_read_lock();
68 for_each_online_cpu(cpu) {
69 struct task_struct *p;
70
71 /*
72 * Skipping the current CPU is OK even through we can be
73 * migrated at any point. The current CPU, at the point
74 * where we read raw_smp_processor_id(), is ensured to
75 * be in program order with respect to the caller
76 * thread. Therefore, we can skip this CPU from the
77 * iteration.
78 */
79 if (cpu == raw_smp_processor_id())
80 continue;
81 rcu_read_lock();
82 p = task_rcu_dereference(&cpu_rq(cpu)->curr);
83 if (p && p->mm == current->mm) {
84 if (!fallback)
85 __cpumask_set_cpu(cpu, tmpmask);
86 else
87 smp_call_function_single(cpu, ipi_mb, NULL, 1);
88 }
89 rcu_read_unlock();
90 }
91 if (!fallback) {
92 smp_call_function_many(tmpmask, ipi_mb, NULL, 1);
93 free_cpumask_var(tmpmask);
94 }
95 cpus_read_unlock();
96
97 /*
98 * Memory barrier on the caller thread _after_ we finished
99 * waiting for the last IPI. Matches memory barriers around
100 * rq->curr modification in scheduler.
101 */
102 smp_mb(); /* exit from system call is not a mb */
a961e409
MD
103 return 0;
104}
105
106static void membarrier_register_private_expedited(void)
107{
108 struct task_struct *p = current;
109 struct mm_struct *mm = p->mm;
110
111 /*
112 * We need to consider threads belonging to different thread
113 * groups, which use the same mm. (CLONE_VM but not
114 * CLONE_THREAD).
115 */
116 if (atomic_read(&mm->membarrier_state)
117 & MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY)
118 return;
119 atomic_or(MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY,
120 &mm->membarrier_state);
22e4ebb9
MD
121}
122
123/**
124 * sys_membarrier - issue memory barriers on a set of threads
125 * @cmd: Takes command values defined in enum membarrier_cmd.
126 * @flags: Currently needs to be 0. For future extensions.
127 *
128 * If this system call is not implemented, -ENOSYS is returned. If the
129 * command specified does not exist, not available on the running
130 * kernel, or if the command argument is invalid, this system call
131 * returns -EINVAL. For a given command, with flags argument set to 0,
132 * this system call is guaranteed to always return the same value until
133 * reboot.
134 *
135 * All memory accesses performed in program order from each targeted thread
136 * is guaranteed to be ordered with respect to sys_membarrier(). If we use
137 * the semantic "barrier()" to represent a compiler barrier forcing memory
138 * accesses to be performed in program order across the barrier, and
139 * smp_mb() to represent explicit memory barriers forcing full memory
140 * ordering across the barrier, we have the following ordering table for
141 * each pair of barrier(), sys_membarrier() and smp_mb():
142 *
143 * The pair ordering is detailed as (O: ordered, X: not ordered):
144 *
145 * barrier() smp_mb() sys_membarrier()
146 * barrier() X X O
147 * smp_mb() X O O
148 * sys_membarrier() O O O
149 */
150SYSCALL_DEFINE2(membarrier, int, cmd, int, flags)
151{
152 if (unlikely(flags))
153 return -EINVAL;
154 switch (cmd) {
155 case MEMBARRIER_CMD_QUERY:
156 {
157 int cmd_mask = MEMBARRIER_CMD_BITMASK;
158
159 if (tick_nohz_full_enabled())
160 cmd_mask &= ~MEMBARRIER_CMD_SHARED;
161 return cmd_mask;
162 }
163 case MEMBARRIER_CMD_SHARED:
164 /* MEMBARRIER_CMD_SHARED is not compatible with nohz_full. */
165 if (tick_nohz_full_enabled())
166 return -EINVAL;
167 if (num_online_cpus() > 1)
168 synchronize_sched();
169 return 0;
170 case MEMBARRIER_CMD_PRIVATE_EXPEDITED:
a961e409
MD
171 return membarrier_private_expedited();
172 case MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED:
173 membarrier_register_private_expedited();
22e4ebb9
MD
174 return 0;
175 default:
176 return -EINVAL;
177 }
178}