]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - kernel/up.c
x86/unwind/orc: Prevent unwinding before ORC initialization
[mirror_ubuntu-jammy-kernel.git] / kernel / up.c
CommitLineData
457c8996 1// SPDX-License-Identifier: GPL-2.0-only
53ce3d95
AM
2/*
3 * Uniprocessor-only support functions. The counterpart to kernel/smp.c
4 */
5
6e962814 6#include <linux/interrupt.h>
53ce3d95 7#include <linux/kernel.h>
9984de1a 8#include <linux/export.h>
53ce3d95 9#include <linux/smp.h>
47ae4b05 10#include <linux/hypervisor.h>
53ce3d95
AM
11
12int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
13 int wait)
14{
081192b2
DD
15 unsigned long flags;
16
1e474b28
PM
17 if (cpu != 0)
18 return -ENXIO;
93423b86 19
081192b2
DD
20 local_irq_save(flags);
21 func(info);
22 local_irq_restore(flags);
93423b86 23
53ce3d95
AM
24 return 0;
25}
26EXPORT_SYMBOL(smp_call_function_single);
fa688207 27
966a9671 28int smp_call_function_single_async(int cpu, call_single_data_t *csd)
40c01e8b
CH
29{
30 unsigned long flags;
31
32 local_irq_save(flags);
33 csd->func(csd->info);
34 local_irq_restore(flags);
08eed44c 35 return 0;
40c01e8b 36}
c46fff2a 37EXPORT_SYMBOL(smp_call_function_single_async);
40c01e8b 38
caa75932 39void on_each_cpu(smp_call_func_t func, void *info, int wait)
bff2dc42
DD
40{
41 unsigned long flags;
42
43 local_irq_save(flags);
44 func(info);
45 local_irq_restore(flags);
bff2dc42
DD
46}
47EXPORT_SYMBOL(on_each_cpu);
48
fa688207
DD
49/*
50 * Note we still need to test the mask even for UP
51 * because we actually can get an empty mask from
52 * code that on SMP might call us without the local
53 * CPU in the mask.
54 */
55void on_each_cpu_mask(const struct cpumask *mask,
56 smp_call_func_t func, void *info, bool wait)
57{
58 unsigned long flags;
59
60 if (cpumask_test_cpu(0, mask)) {
61 local_irq_save(flags);
62 func(info);
63 local_irq_restore(flags);
64 }
65}
66EXPORT_SYMBOL(on_each_cpu_mask);
67
68/*
69 * Preemption is disabled here to make sure the cond_func is called under the
70 * same condtions in UP and SMP.
71 */
5671d814 72void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
cb923159 73 void *info, bool wait, const struct cpumask *mask)
fa688207
DD
74{
75 unsigned long flags;
76
77 preempt_disable();
78 if (cond_func(0, info)) {
79 local_irq_save(flags);
80 func(info);
81 local_irq_restore(flags);
82 }
83 preempt_enable();
84}
7d49b28a
RR
85EXPORT_SYMBOL(on_each_cpu_cond_mask);
86
5671d814 87void on_each_cpu_cond(smp_cond_func_t cond_func, smp_call_func_t func,
cb923159 88 void *info, bool wait)
7d49b28a 89{
cb923159 90 on_each_cpu_cond_mask(cond_func, func, info, wait, NULL);
7d49b28a 91}
fa688207 92EXPORT_SYMBOL(on_each_cpu_cond);
df8ce9d7
JG
93
94int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys)
95{
96 int ret;
97
98 if (cpu != 0)
99 return -ENXIO;
100
101 if (phys)
102 hypervisor_pin_vcpu(0);
103 ret = func(par);
104 if (phys)
105 hypervisor_pin_vcpu(-1);
106
107 return ret;
108}
109EXPORT_SYMBOL_GPL(smp_call_on_cpu);