]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
1965aae3 PA |
2 | #ifndef _ASM_X86_SMP_H |
3 | #define _ASM_X86_SMP_H | |
c27cfeff | 4 | #ifndef __ASSEMBLY__ |
53ebef49 | 5 | #include <linux/cpumask.h> |
7e1efc0c | 6 | #include <asm/percpu.h> |
53ebef49 | 7 | |
b23dab08 GC |
8 | /* |
9 | * We need the APIC definitions automatically as part of 'smp.h' | |
10 | */ | |
11 | #ifdef CONFIG_X86_LOCAL_APIC | |
12 | # include <asm/mpspec.h> | |
13 | # include <asm/apic.h> | |
14 | # ifdef CONFIG_X86_IO_APIC | |
15 | # include <asm/io_apic.h> | |
16 | # endif | |
17 | #endif | |
b23dab08 | 18 | #include <asm/thread_info.h> |
fb8fd077 | 19 | #include <asm/cpumask.h> |
b23dab08 | 20 | |
53ebef49 GC |
21 | extern int smp_num_siblings; |
22 | extern unsigned int num_processors; | |
c27cfeff | 23 | |
0816b0f0 VZ |
24 | DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_sibling_map); |
25 | DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map); | |
2e4c54da | 26 | DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_die_map); |
b3d7336d | 27 | /* cpus sharing the last level cache: */ |
0816b0f0 VZ |
28 | DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map); |
29 | DECLARE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id); | |
30 | DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number); | |
23ca4bba | 31 | |
b3d7336d YL |
32 | static inline struct cpumask *cpu_llc_shared_mask(int cpu) |
33 | { | |
34 | return per_cpu(cpu_llc_shared_map, cpu); | |
35 | } | |
36 | ||
0816b0f0 | 37 | DECLARE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_cpu_to_apicid); |
3e9e57fa | 38 | DECLARE_EARLY_PER_CPU_READ_MOSTLY(u32, x86_cpu_to_acpiid); |
0816b0f0 | 39 | DECLARE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_bios_cpu_apicid); |
4e62445b | 40 | #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32) |
0816b0f0 | 41 | DECLARE_EARLY_PER_CPU_READ_MOSTLY(int, x86_cpu_to_logical_apicid); |
4c321ff8 | 42 | #endif |
7e1efc0c | 43 | |
8239c25f TG |
44 | struct task_struct; |
45 | ||
16694024 GC |
46 | struct smp_ops { |
47 | void (*smp_prepare_boot_cpu)(void); | |
48 | void (*smp_prepare_cpus)(unsigned max_cpus); | |
16694024 GC |
49 | void (*smp_cpus_done)(unsigned max_cpus); |
50 | ||
76fac077 | 51 | void (*stop_other_cpus)(int wait); |
0ee59413 | 52 | void (*crash_stop_other_cpus)(void); |
16694024 | 53 | void (*smp_send_reschedule)(int cpu); |
3b16cf87 | 54 | |
5cdaf183 | 55 | int (*cpu_up)(unsigned cpu, struct task_struct *tidle); |
93be71b6 AN |
56 | int (*cpu_disable)(void); |
57 | void (*cpu_die)(unsigned int cpu); | |
58 | void (*play_dead)(void); | |
59 | ||
bcda016e | 60 | void (*send_call_func_ipi)(const struct cpumask *mask); |
3b16cf87 | 61 | void (*send_call_func_single_ipi)(int cpu); |
16694024 GC |
62 | }; |
63 | ||
14522076 GC |
64 | /* Globals due to paravirt */ |
65 | extern void set_cpu_sibling_map(int cpu); | |
66 | ||
c76cb368 GC |
67 | #ifdef CONFIG_SMP |
68 | extern struct smp_ops smp_ops; | |
8678969e | 69 | |
377d6984 GC |
70 | static inline void smp_send_stop(void) |
71 | { | |
76fac077 AK |
72 | smp_ops.stop_other_cpus(0); |
73 | } | |
74 | ||
75 | static inline void stop_other_cpus(void) | |
76 | { | |
77 | smp_ops.stop_other_cpus(1); | |
377d6984 GC |
78 | } |
79 | ||
1e3fac83 GC |
80 | static inline void smp_prepare_boot_cpu(void) |
81 | { | |
82 | smp_ops.smp_prepare_boot_cpu(); | |
83 | } | |
84 | ||
7557da67 GC |
85 | static inline void smp_prepare_cpus(unsigned int max_cpus) |
86 | { | |
87 | smp_ops.smp_prepare_cpus(max_cpus); | |
88 | } | |
89 | ||
c5597649 GC |
90 | static inline void smp_cpus_done(unsigned int max_cpus) |
91 | { | |
92 | smp_ops.smp_cpus_done(max_cpus); | |
93 | } | |
94 | ||
8239c25f | 95 | static inline int __cpu_up(unsigned int cpu, struct task_struct *tidle) |
71d19549 | 96 | { |
5cdaf183 | 97 | return smp_ops.cpu_up(cpu, tidle); |
71d19549 GC |
98 | } |
99 | ||
93be71b6 AN |
100 | static inline int __cpu_disable(void) |
101 | { | |
102 | return smp_ops.cpu_disable(); | |
103 | } | |
104 | ||
105 | static inline void __cpu_die(unsigned int cpu) | |
106 | { | |
107 | smp_ops.cpu_die(cpu); | |
108 | } | |
109 | ||
110 | static inline void play_dead(void) | |
111 | { | |
112 | smp_ops.play_dead(); | |
113 | } | |
114 | ||
8678969e GC |
115 | static inline void smp_send_reschedule(int cpu) |
116 | { | |
117 | smp_ops.smp_send_reschedule(cpu); | |
118 | } | |
64b1a21e | 119 | |
3b16cf87 JA |
120 | static inline void arch_send_call_function_single_ipi(int cpu) |
121 | { | |
122 | smp_ops.send_call_func_single_ipi(cpu); | |
123 | } | |
124 | ||
b643deca | 125 | static inline void arch_send_call_function_ipi_mask(const struct cpumask *mask) |
64b1a21e | 126 | { |
b643deca | 127 | smp_ops.send_call_func_ipi(mask); |
64b1a21e | 128 | } |
71d19549 | 129 | |
8227dce7 | 130 | void cpu_disable_common(void); |
1e3fac83 | 131 | void native_smp_prepare_boot_cpu(void); |
7557da67 | 132 | void native_smp_prepare_cpus(unsigned int max_cpus); |
63e708f8 | 133 | void calculate_max_logical_packages(void); |
c5597649 | 134 | void native_smp_cpus_done(unsigned int max_cpus); |
66c7ceb4 | 135 | int common_cpu_up(unsigned int cpunum, struct task_struct *tidle); |
5cdaf183 | 136 | int native_cpu_up(unsigned int cpunum, struct task_struct *tidle); |
93be71b6 | 137 | int native_cpu_disable(void); |
2a442c9c | 138 | int common_cpu_die(unsigned int cpu); |
93be71b6 | 139 | void native_cpu_die(unsigned int cpu); |
406f992e | 140 | void hlt_play_dead(void); |
93be71b6 | 141 | void native_play_dead(void); |
a21f5d88 | 142 | void play_dead_common(void); |
a7b480e7 BP |
143 | void wbinvd_on_cpu(int cpu); |
144 | int wbinvd_on_all_cpus(void); | |
93be71b6 | 145 | |
d0a7166b | 146 | void native_smp_send_reschedule(int cpu); |
bcda016e | 147 | void native_send_call_func_ipi(const struct cpumask *mask); |
3b16cf87 | 148 | void native_send_call_func_single_ipi(int cpu); |
7eb43a6d | 149 | void x86_idle_thread_init(unsigned int cpu, struct task_struct *idle); |
93b016f8 | 150 | |
30106c17 | 151 | void smp_store_boot_cpu_info(void); |
1d89a7f0 | 152 | void smp_store_cpu_info(int id); |
89f579ce YW |
153 | |
154 | asmlinkage __visible void smp_reboot_interrupt(void); | |
155 | __visible void smp_reschedule_interrupt(struct pt_regs *regs); | |
156 | __visible void smp_call_function_interrupt(struct pt_regs *regs); | |
157 | __visible void smp_call_function_single_interrupt(struct pt_regs *r); | |
158 | ||
c70dcb74 | 159 | #define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu) |
3e9e57fa | 160 | #define cpu_acpi_id(cpu) per_cpu(x86_cpu_to_acpiid, cpu) |
a9c057c1 | 161 | |
0f08c3b2 DL |
162 | /* |
163 | * This function is needed by all SMP systems. It must _always_ be valid | |
164 | * from the initial startup. We map APIC_BASE very early in page_setup(), | |
165 | * so this is correct in the x86 case. | |
166 | */ | |
9ed7d75b PZ |
167 | #define raw_smp_processor_id() this_cpu_read(cpu_number) |
168 | #define __smp_processor_id() __this_cpu_read(cpu_number) | |
0f08c3b2 DL |
169 | |
170 | #ifdef CONFIG_X86_32 | |
171 | extern int safe_smp_processor_id(void); | |
172 | #else | |
173 | # define safe_smp_processor_id() smp_processor_id() | |
174 | #endif | |
175 | ||
a7b480e7 BP |
176 | #else /* !CONFIG_SMP */ |
177 | #define wbinvd_on_cpu(cpu) wbinvd() | |
178 | static inline int wbinvd_on_all_cpus(void) | |
179 | { | |
180 | wbinvd(); | |
181 | return 0; | |
182 | } | |
14adf855 | 183 | #endif /* CONFIG_SMP */ |
a9c057c1 | 184 | |
148f9bb8 | 185 | extern unsigned disabled_cpus; |
2fe60147 | 186 | |
1b000843 | 187 | #ifdef CONFIG_X86_LOCAL_APIC |
1b000843 | 188 | extern int hard_smp_processor_id(void); |
1b000843 GC |
189 | |
190 | #else /* CONFIG_X86_LOCAL_APIC */ | |
7b6e1062 | 191 | #define hard_smp_processor_id() 0 |
1b000843 GC |
192 | #endif /* CONFIG_X86_LOCAL_APIC */ |
193 | ||
99e8b9ca DZ |
194 | #ifdef CONFIG_DEBUG_NMI_SELFTEST |
195 | extern void nmi_selftest(void); | |
196 | #else | |
197 | #define nmi_selftest() do { } while (0) | |
198 | #endif | |
199 | ||
c27cfeff | 200 | #endif /* __ASSEMBLY__ */ |
1965aae3 | 201 | #endif /* _ASM_X86_SMP_H */ |