]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - include/linux/smp.h
arm/arm64: KVM: Advertise SMCCC v1.1
[mirror_ubuntu-artful-kernel.git] / include / linux / smp.h
1 #ifndef __LINUX_SMP_H
2 #define __LINUX_SMP_H
3
4 /*
5 * Generic SMP support
6 * Alan Cox. <alan@redhat.com>
7 */
8
9 #include <linux/errno.h>
10 #include <linux/types.h>
11 #include <linux/list.h>
12 #include <linux/cpumask.h>
13 #include <linux/init.h>
14 #include <linux/llist.h>
15
16 typedef void (*smp_call_func_t)(void *info);
17 struct call_single_data {
18 struct llist_node llist;
19 smp_call_func_t func;
20 void *info;
21 unsigned int flags;
22 };
23
24 /* total number of cpus in this system (may exceed NR_CPUS) */
25 extern unsigned int total_cpus;
26
27 int smp_call_function_single(int cpuid, smp_call_func_t func, void *info,
28 int wait);
29
30 /*
31 * Call a function on all processors
32 */
33 int on_each_cpu(smp_call_func_t func, void *info, int wait);
34
35 /*
36 * Call a function on processors specified by mask, which might include
37 * the local one.
38 */
39 void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
40 void *info, bool wait);
41
42 /*
43 * Call a function on each processor for which the supplied function
44 * cond_func returns a positive value. This may include the local
45 * processor.
46 */
47 void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
48 smp_call_func_t func, void *info, bool wait,
49 gfp_t gfp_flags);
50
51 int smp_call_function_single_async(int cpu, struct call_single_data *csd);
52
53 #ifdef CONFIG_X86
54 /* indicate usage of IBRS to control execution speculation */
55 extern int use_ibrs;
56 extern u32 sysctl_ibrs_enabled;
57 extern struct mutex spec_ctrl_mutex;
58 #define ibrs_supported (use_ibrs & 0x2)
59 #define ibrs_disabled (use_ibrs & 0x4)
60 static inline void set_ibrs_inuse(void)
61 {
62 if (ibrs_supported)
63 use_ibrs |= 0x1;
64 }
65 static inline void clear_ibrs_inuse(void)
66 {
67 use_ibrs &= ~0x1;
68 }
69 static inline int check_ibrs_inuse(void)
70 {
71 if (use_ibrs & 0x1)
72 return 1;
73 else
74 /* rmb to prevent wrong speculation for security */
75 rmb();
76 return 0;
77 }
78 static inline void set_ibrs_supported(void)
79 {
80 use_ibrs |= 0x2;
81 if (!ibrs_disabled)
82 set_ibrs_inuse();
83 }
84 static inline void set_ibrs_disabled(void)
85 {
86 use_ibrs |= 0x4;
87 if (check_ibrs_inuse())
88 clear_ibrs_inuse();
89 }
90 static inline void clear_ibrs_disabled(void)
91 {
92 use_ibrs &= ~0x4;
93 set_ibrs_inuse();
94 }
95 #define ibrs_inuse (check_ibrs_inuse())
96
97 /* indicate usage of IBPB to control execution speculation */
98 extern int use_ibpb;
99 extern u32 sysctl_ibpb_enabled;
100 #define ibpb_supported (use_ibpb & 0x2)
101 #define ibpb_disabled (use_ibpb & 0x4)
102 static inline void set_ibpb_inuse(void)
103 {
104 if (ibpb_supported)
105 use_ibpb |= 0x1;
106 }
107 static inline void clear_ibpb_inuse(void)
108 {
109 use_ibpb &= ~0x1;
110 }
111 static inline int check_ibpb_inuse(void)
112 {
113 if (use_ibpb & 0x1)
114 return 1;
115 else
116 /* rmb to prevent wrong speculation for security */
117 rmb();
118 return 0;
119 }
120 static inline void set_ibpb_supported(void)
121 {
122 use_ibpb |= 0x2;
123 if (!ibpb_disabled)
124 set_ibpb_inuse();
125 }
126 static inline void set_ibpb_disabled(void)
127 {
128 use_ibpb |= 0x4;
129 if (check_ibpb_inuse())
130 clear_ibpb_inuse();
131 }
132 static inline void clear_ibpb_disabled(void)
133 {
134 use_ibpb &= ~0x4;
135 set_ibpb_inuse();
136 }
137 #define ibpb_inuse (check_ibpb_inuse())
138 #endif
139
140 #ifdef CONFIG_SMP
141
142 #include <linux/preempt.h>
143 #include <linux/kernel.h>
144 #include <linux/compiler.h>
145 #include <linux/thread_info.h>
146 #include <asm/smp.h>
147
148 /*
149 * main cross-CPU interfaces, handles INIT, TLB flush, STOP, etc.
150 * (defined in asm header):
151 */
152
153 /*
154 * stops all CPUs but the current one:
155 */
156 extern void smp_send_stop(void);
157
158 /*
159 * sends a 'reschedule' event to another CPU:
160 */
161 extern void smp_send_reschedule(int cpu);
162
163
164 /*
165 * Prepare machine for booting other CPUs.
166 */
167 extern void smp_prepare_cpus(unsigned int max_cpus);
168
169 /*
170 * Bring a CPU up
171 */
172 extern int __cpu_up(unsigned int cpunum, struct task_struct *tidle);
173
174 /*
175 * Final polishing of CPUs
176 */
177 extern void smp_cpus_done(unsigned int max_cpus);
178
179 /*
180 * Call a function on all other processors
181 */
182 int smp_call_function(smp_call_func_t func, void *info, int wait);
183 void smp_call_function_many(const struct cpumask *mask,
184 smp_call_func_t func, void *info, bool wait);
185
186 int smp_call_function_any(const struct cpumask *mask,
187 smp_call_func_t func, void *info, int wait);
188
189 void kick_all_cpus_sync(void);
190 void wake_up_all_idle_cpus(void);
191
192 /*
193 * Generic and arch helpers
194 */
195 void __init call_function_init(void);
196 void generic_smp_call_function_single_interrupt(void);
197 #define generic_smp_call_function_interrupt \
198 generic_smp_call_function_single_interrupt
199
200 /*
201 * Mark the boot cpu "online" so that it can call console drivers in
202 * printk() and can access its per-cpu storage.
203 */
204 void smp_prepare_boot_cpu(void);
205
206 extern unsigned int setup_max_cpus;
207 extern void __init setup_nr_cpu_ids(void);
208 extern void __init smp_init(void);
209
210 extern int __boot_cpu_id;
211
212 static inline int get_boot_cpu_id(void)
213 {
214 return __boot_cpu_id;
215 }
216
217 #else /* !SMP */
218
219 static inline void smp_send_stop(void) { }
220
221 /*
222 * These macros fold the SMP functionality into a single CPU system
223 */
224 #define raw_smp_processor_id() 0
225 static inline int up_smp_call_function(smp_call_func_t func, void *info)
226 {
227 return 0;
228 }
229 #define smp_call_function(func, info, wait) \
230 (up_smp_call_function(func, info))
231
232 static inline void smp_send_reschedule(int cpu) { }
233 #define smp_prepare_boot_cpu() do {} while (0)
234 #define smp_call_function_many(mask, func, info, wait) \
235 (up_smp_call_function(func, info))
236 static inline void call_function_init(void) { }
237
238 static inline int
239 smp_call_function_any(const struct cpumask *mask, smp_call_func_t func,
240 void *info, int wait)
241 {
242 return smp_call_function_single(0, func, info, wait);
243 }
244
245 static inline void kick_all_cpus_sync(void) { }
246 static inline void wake_up_all_idle_cpus(void) { }
247
248 #ifdef CONFIG_UP_LATE_INIT
249 extern void __init up_late_init(void);
250 static inline void smp_init(void) { up_late_init(); }
251 #else
252 static inline void smp_init(void) { }
253 #endif
254
255 static inline int get_boot_cpu_id(void)
256 {
257 return 0;
258 }
259
260 #endif /* !SMP */
261
262 /*
263 * smp_processor_id(): get the current CPU ID.
264 *
265 * if DEBUG_PREEMPT is enabled then we check whether it is
266 * used in a preemption-safe way. (smp_processor_id() is safe
267 * if it's used in a preemption-off critical section, or in
268 * a thread that is bound to the current CPU.)
269 *
270 * NOTE: raw_smp_processor_id() is for internal use only
271 * (smp_processor_id() is the preferred variant), but in rare
272 * instances it might also be used to turn off false positives
273 * (i.e. smp_processor_id() use that the debugging code reports but
274 * which use for some reason is legal). Don't use this to hack around
275 * the warning message, as your code might not work under PREEMPT.
276 */
277 #ifdef CONFIG_DEBUG_PREEMPT
278 extern unsigned int debug_smp_processor_id(void);
279 # define smp_processor_id() debug_smp_processor_id()
280 #else
281 # define smp_processor_id() raw_smp_processor_id()
282 #endif
283
284 #define get_cpu() ({ preempt_disable(); smp_processor_id(); })
285 #define put_cpu() preempt_enable()
286
287 /*
288 * Callback to arch code if there's nosmp or maxcpus=0 on the
289 * boot command line:
290 */
291 extern void arch_disable_smp_support(void);
292
293 extern void arch_enable_nonboot_cpus_begin(void);
294 extern void arch_enable_nonboot_cpus_end(void);
295
296 void smp_setup_processor_id(void);
297
298 int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par,
299 bool phys);
300
301 /* SMP core functions */
302 int smpcfd_prepare_cpu(unsigned int cpu);
303 int smpcfd_dead_cpu(unsigned int cpu);
304 int smpcfd_dying_cpu(unsigned int cpu);
305
306 #endif /* __LINUX_SMP_H */