]>
Commit | Line | Data |
---|---|---|
40685236 JP |
1 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
2 | ||
4fe29a85 | 3 | #include <linux/kernel.h> |
523d0fb4 | 4 | #include <linux/export.h> |
4fe29a85 GOC |
5 | #include <linux/init.h> |
6 | #include <linux/bootmem.h> | |
7 | #include <linux/percpu.h> | |
1ecd2765 | 8 | #include <linux/kexec.h> |
17b4cceb | 9 | #include <linux/crash_dump.h> |
8a87dd9a JSR |
10 | #include <linux/smp.h> |
11 | #include <linux/topology.h> | |
5f5d8405 | 12 | #include <linux/pfn.h> |
4fe29a85 GOC |
13 | #include <asm/sections.h> |
14 | #include <asm/processor.h> | |
523d0fb4 | 15 | #include <asm/desc.h> |
4fe29a85 | 16 | #include <asm/setup.h> |
0fc0906e | 17 | #include <asm/mpspec.h> |
76eb4131 | 18 | #include <asm/apicdef.h> |
1ecd2765 | 19 | #include <asm/highmem.h> |
1a51e3a0 | 20 | #include <asm/proto.h> |
06879033 | 21 | #include <asm/cpumask.h> |
34019be1 | 22 | #include <asm/cpu.h> |
60a5317f | 23 | #include <asm/stackprotector.h> |
76eb4131 | 24 | |
0816b0f0 | 25 | DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number); |
ea927906 | 26 | EXPORT_PER_CPU_SYMBOL(cpu_number); |
ea927906 | 27 | |
1688401a BG |
28 | #ifdef CONFIG_X86_64 |
29 | #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load) | |
30 | #else | |
31 | #define BOOT_PERCPU_OFFSET 0 | |
32 | #endif | |
33 | ||
2c773dd3 | 34 | DEFINE_PER_CPU_READ_MOSTLY(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET; |
1688401a BG |
35 | EXPORT_PER_CPU_SYMBOL(this_cpu_off); |
36 | ||
404f6aac | 37 | unsigned long __per_cpu_offset[NR_CPUS] __ro_after_init = { |
34019be1 | 38 | [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET, |
9939ddaf | 39 | }; |
9939ddaf | 40 | EXPORT_SYMBOL(__per_cpu_offset); |
4fe29a85 | 41 | |
6b19b0c2 TH |
42 | /* |
43 | * On x86_64 symbols referenced from code should be reachable using | |
44 | * 32bit relocations. Reserve space for static percpu variables in | |
45 | * modules so that they are always served from the first chunk which | |
46 | * is located at the percpu segment base. On x86_32, anything can | |
47 | * address anywhere. No need to reserve space in the first chunk. | |
48 | */ | |
49 | #ifdef CONFIG_X86_64 | |
50 | #define PERCPU_FIRST_CHUNK_RESERVE PERCPU_MODULE_RESERVE | |
51 | #else | |
52 | #define PERCPU_FIRST_CHUNK_RESERVE 0 | |
53 | #endif | |
54 | ||
4518e6a0 | 55 | #ifdef CONFIG_X86_32 |
89c92151 TH |
56 | /** |
57 | * pcpu_need_numa - determine percpu allocation needs to consider NUMA | |
58 | * | |
59 | * If NUMA is not configured or there is only one NUMA node available, | |
60 | * there is no reason to consider NUMA. This function determines | |
61 | * whether percpu allocation should consider NUMA or not. | |
62 | * | |
63 | * RETURNS: | |
64 | * true if NUMA should be considered; otherwise, false. | |
65 | */ | |
66 | static bool __init pcpu_need_numa(void) | |
67 | { | |
68 | #ifdef CONFIG_NEED_MULTIPLE_NODES | |
69 | pg_data_t *last = NULL; | |
70 | unsigned int cpu; | |
71 | ||
72 | for_each_possible_cpu(cpu) { | |
73 | int node = early_cpu_to_node(cpu); | |
74 | ||
75 | if (node_online(node) && NODE_DATA(node) && | |
76 | last && last != NODE_DATA(node)) | |
77 | return true; | |
78 | ||
79 | last = NODE_DATA(node); | |
80 | } | |
81 | #endif | |
82 | return false; | |
83 | } | |
4518e6a0 | 84 | #endif |
89c92151 | 85 | |
5f5d8405 TH |
86 | /** |
87 | * pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu | |
88 | * @cpu: cpu to allocate for | |
89 | * @size: size allocation in bytes | |
90 | * @align: alignment | |
91 | * | |
92 | * Allocate @size bytes aligned at @align for cpu @cpu. This wrapper | |
93 | * does the right thing for NUMA regardless of the current | |
94 | * configuration. | |
95 | * | |
96 | * RETURNS: | |
97 | * Pointer to the allocated area on success, NULL on failure. | |
98 | */ | |
99 | static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size, | |
100 | unsigned long align) | |
101 | { | |
102 | const unsigned long goal = __pa(MAX_DMA_ADDRESS); | |
103 | #ifdef CONFIG_NEED_MULTIPLE_NODES | |
104 | int node = early_cpu_to_node(cpu); | |
105 | void *ptr; | |
106 | ||
107 | if (!node_online(node) || !NODE_DATA(node)) { | |
108 | ptr = __alloc_bootmem_nopanic(size, align, goal); | |
109 | pr_info("cpu %d has no node %d or node-local memory\n", | |
110 | cpu, node); | |
111 | pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n", | |
112 | cpu, size, __pa(ptr)); | |
113 | } else { | |
114 | ptr = __alloc_bootmem_node_nopanic(NODE_DATA(node), | |
115 | size, align, goal); | |
40685236 JP |
116 | pr_debug("per cpu data for cpu%d %lu bytes on node%d at %016lx\n", |
117 | cpu, size, node, __pa(ptr)); | |
5f5d8405 TH |
118 | } |
119 | return ptr; | |
120 | #else | |
121 | return __alloc_bootmem_nopanic(size, align, goal); | |
122 | #endif | |
123 | } | |
124 | ||
d4b95f80 TH |
125 | /* |
126 | * Helpers for first chunk memory allocation | |
127 | */ | |
3cbc8565 | 128 | static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align) |
d4b95f80 | 129 | { |
3cbc8565 | 130 | return pcpu_alloc_bootmem(cpu, size, align); |
d4b95f80 TH |
131 | } |
132 | ||
133 | static void __init pcpu_fc_free(void *ptr, size_t size) | |
134 | { | |
135 | free_bootmem(__pa(ptr), size); | |
136 | } | |
137 | ||
4518e6a0 | 138 | static int __init pcpu_cpu_distance(unsigned int from, unsigned int to) |
a530b795 | 139 | { |
4518e6a0 | 140 | #ifdef CONFIG_NEED_MULTIPLE_NODES |
a530b795 TH |
141 | if (early_cpu_to_node(from) == early_cpu_to_node(to)) |
142 | return LOCAL_DISTANCE; | |
143 | else | |
144 | return REMOTE_DISTANCE; | |
8ac83757 | 145 | #else |
4518e6a0 | 146 | return LOCAL_DISTANCE; |
8ac83757 | 147 | #endif |
89c92151 TH |
148 | } |
149 | ||
00ae4064 | 150 | static void __init pcpup_populate_pte(unsigned long addr) |
458a3e64 TH |
151 | { |
152 | populate_extra_pte(addr); | |
153 | } | |
154 | ||
b2d2f431 BG |
155 | static inline void setup_percpu_segment(int cpu) |
156 | { | |
157 | #ifdef CONFIG_X86_32 | |
158 | struct desc_struct gdt; | |
159 | ||
160 | pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF, | |
161 | 0x2 | DESCTYPE_S, 0x8); | |
162 | gdt.s = 1; | |
69218e47 | 163 | write_gdt_entry(get_cpu_gdt_rw(cpu), |
b2d2f431 BG |
164 | GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S); |
165 | #endif | |
166 | } | |
167 | ||
4fe29a85 GOC |
168 | void __init setup_per_cpu_areas(void) |
169 | { | |
5f5d8405 | 170 | unsigned int cpu; |
11124411 | 171 | unsigned long delta; |
fb435d52 | 172 | int rc; |
a1681965 | 173 | |
ab14398a | 174 | pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n", |
a1681965 | 175 | NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids); |
11124411 | 176 | |
8ac83757 | 177 | /* |
4518e6a0 TH |
178 | * Allocate percpu area. Embedding allocator is our favorite; |
179 | * however, on NUMA configurations, it can result in very | |
180 | * sparse unit mapping and vmalloc area isn't spacious enough | |
181 | * on 32bit. Use page in that case. | |
8ac83757 | 182 | */ |
4518e6a0 TH |
183 | #ifdef CONFIG_X86_32 |
184 | if (pcpu_chosen_fc == PCPU_FC_AUTO && pcpu_need_numa()) | |
185 | pcpu_chosen_fc = PCPU_FC_PAGE; | |
186 | #endif | |
fb435d52 | 187 | rc = -EINVAL; |
4518e6a0 | 188 | if (pcpu_chosen_fc != PCPU_FC_PAGE) { |
4518e6a0 TH |
189 | const size_t dyn_size = PERCPU_MODULE_RESERVE + |
190 | PERCPU_DYNAMIC_RESERVE - PERCPU_FIRST_CHUNK_RESERVE; | |
d5e28005 TH |
191 | size_t atom_size; |
192 | ||
193 | /* | |
194 | * On 64bit, use PMD_SIZE for atom_size so that embedded | |
195 | * percpu areas are aligned to PMD. This, in the future, | |
196 | * can also allow using PMD mappings in vmalloc area. Use | |
197 | * PAGE_SIZE on 32bit as vmalloc space is highly contended | |
198 | * and large vmalloc area allocs can easily fail. | |
199 | */ | |
200 | #ifdef CONFIG_X86_64 | |
201 | atom_size = PMD_SIZE; | |
202 | #else | |
203 | atom_size = PAGE_SIZE; | |
204 | #endif | |
4518e6a0 TH |
205 | rc = pcpu_embed_first_chunk(PERCPU_FIRST_CHUNK_RESERVE, |
206 | dyn_size, atom_size, | |
207 | pcpu_cpu_distance, | |
208 | pcpu_fc_alloc, pcpu_fc_free); | |
fb435d52 | 209 | if (rc < 0) |
40685236 | 210 | pr_warning("%s allocator failed (%d), falling back to page size\n", |
4518e6a0 | 211 | pcpu_fc_names[pcpu_chosen_fc], rc); |
fa8a7094 | 212 | } |
fb435d52 | 213 | if (rc < 0) |
4518e6a0 TH |
214 | rc = pcpu_page_first_chunk(PERCPU_FIRST_CHUNK_RESERVE, |
215 | pcpu_fc_alloc, pcpu_fc_free, | |
216 | pcpup_populate_pte); | |
fb435d52 TH |
217 | if (rc < 0) |
218 | panic("cannot initialize percpu area (err=%d)", rc); | |
11124411 | 219 | |
5f5d8405 | 220 | /* alrighty, percpu areas up and running */ |
11124411 TH |
221 | delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; |
222 | for_each_possible_cpu(cpu) { | |
fb435d52 | 223 | per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu]; |
26f80bd6 | 224 | per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu); |
ea927906 | 225 | per_cpu(cpu_number, cpu) = cpu; |
b2d2f431 | 226 | setup_percpu_segment(cpu); |
60a5317f | 227 | setup_stack_canary_segment(cpu); |
0d77e7f0 | 228 | /* |
cf3997f5 TH |
229 | * Copy data used in early init routines from the |
230 | * initial arrays to the per cpu data areas. These | |
231 | * arrays then become expendable and the *_early_ptr's | |
232 | * are zeroed indicating that the static arrays are | |
233 | * gone. | |
0d77e7f0 | 234 | */ |
ec70de8b | 235 | #ifdef CONFIG_X86_LOCAL_APIC |
0d77e7f0 | 236 | per_cpu(x86_cpu_to_apicid, cpu) = |
cf3997f5 | 237 | early_per_cpu_map(x86_cpu_to_apicid, cpu); |
0d77e7f0 | 238 | per_cpu(x86_bios_cpu_apicid, cpu) = |
cf3997f5 | 239 | early_per_cpu_map(x86_bios_cpu_apicid, cpu); |
3e9e57fa VK |
240 | per_cpu(x86_cpu_to_acpiid, cpu) = |
241 | early_per_cpu_map(x86_cpu_to_acpiid, cpu); | |
ec70de8b | 242 | #endif |
4c321ff8 TH |
243 | #ifdef CONFIG_X86_32 |
244 | per_cpu(x86_cpu_to_logical_apicid, cpu) = | |
245 | early_per_cpu_map(x86_cpu_to_logical_apicid, cpu); | |
246 | #endif | |
1a51e3a0 | 247 | #ifdef CONFIG_X86_64 |
26f80bd6 | 248 | per_cpu(irq_stack_ptr, cpu) = |
cf3997f5 | 249 | per_cpu(irq_stack_union.irq_stack, cpu) + |
4950d6d4 | 250 | IRQ_STACK_SIZE; |
645a7919 | 251 | #endif |
6470aff6 BG |
252 | #ifdef CONFIG_NUMA |
253 | per_cpu(x86_cpu_to_node_map, cpu) = | |
cf3997f5 | 254 | early_per_cpu_map(x86_cpu_to_node_map, cpu); |
9aebbdb6 | 255 | /* |
a4ce96ac | 256 | * Ensure that the boot cpu numa_node is correct when the boot |
9aebbdb6 YL |
257 | * cpu is on a node that doesn't have memory installed. |
258 | * Also cpu_up() will call cpu_to_node() for APs when | |
259 | * MEMORY_HOTPLUG is defined, before per_cpu(numa_node) is set | |
260 | * up later with c_init aka intel_init/amd_init. | |
261 | * So set them all (boot cpu and all APs). | |
262 | */ | |
263 | set_cpu_numa_node(cpu, early_cpu_to_node(cpu)); | |
6470aff6 | 264 | #endif |
1a51e3a0 | 265 | /* |
c273fb3b | 266 | * Up to this point, the boot CPU has been using .init.data |
2697fbd5 | 267 | * area. Reload any changed state for the boot CPU. |
1a51e3a0 | 268 | */ |
f6e9456c | 269 | if (!cpu) |
552be871 | 270 | switch_to_new_gdt(cpu); |
4fe29a85 GOC |
271 | } |
272 | ||
0d77e7f0 | 273 | /* indicate the early static arrays will soon be gone */ |
22f25138 | 274 | #ifdef CONFIG_X86_LOCAL_APIC |
0d77e7f0 BG |
275 | early_per_cpu_ptr(x86_cpu_to_apicid) = NULL; |
276 | early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL; | |
3e9e57fa | 277 | early_per_cpu_ptr(x86_cpu_to_acpiid) = NULL; |
22f25138 | 278 | #endif |
4c321ff8 TH |
279 | #ifdef CONFIG_X86_32 |
280 | early_per_cpu_ptr(x86_cpu_to_logical_apicid) = NULL; | |
281 | #endif | |
645a7919 | 282 | #ifdef CONFIG_NUMA |
0d77e7f0 BG |
283 | early_per_cpu_ptr(x86_cpu_to_node_map) = NULL; |
284 | #endif | |
9f0e8d04 | 285 | |
9f248bde MT |
286 | /* Setup node to cpumask map */ |
287 | setup_node_to_cpumask_map(); | |
c2d1cec1 MT |
288 | |
289 | /* Setup cpu initialized, callin, callout masks */ | |
290 | setup_cpu_local_masks(); | |
23b2a4dd AL |
291 | |
292 | #ifdef CONFIG_X86_32 | |
293 | /* | |
d2b6dc61 AL |
294 | * Sync back kernel address range again. We already did this in |
295 | * setup_arch(), but percpu data also needs to be available in | |
296 | * the smpboot asm. We can't reliably pick up percpu mappings | |
297 | * using vmalloc_fault(), because exception dispatch needs | |
298 | * percpu data. | |
23b2a4dd AL |
299 | */ |
300 | clone_pgd_range(initial_page_table + KERNEL_PGD_BOUNDARY, | |
301 | swapper_pg_dir + KERNEL_PGD_BOUNDARY, | |
302 | KERNEL_PGD_PTRS); | |
303 | ||
304 | /* | |
305 | * sync back low identity map too. It is used for example | |
306 | * in the 32-bit EFI stub. | |
307 | */ | |
308 | clone_pgd_range(initial_page_table, | |
309 | swapper_pg_dir + KERNEL_PGD_BOUNDARY, | |
310 | min(KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY)); | |
311 | #endif | |
4fe29a85 | 312 | } |