]>
Commit | Line | Data |
---|---|---|
4fe29a85 GOC |
1 | #include <linux/kernel.h> |
2 | #include <linux/module.h> | |
3 | #include <linux/init.h> | |
4 | #include <linux/bootmem.h> | |
5 | #include <linux/percpu.h> | |
1ecd2765 | 6 | #include <linux/kexec.h> |
17b4cceb | 7 | #include <linux/crash_dump.h> |
8a87dd9a JSR |
8 | #include <linux/smp.h> |
9 | #include <linux/topology.h> | |
5f5d8405 | 10 | #include <linux/pfn.h> |
4fe29a85 GOC |
11 | #include <asm/sections.h> |
12 | #include <asm/processor.h> | |
13 | #include <asm/setup.h> | |
0fc0906e | 14 | #include <asm/mpspec.h> |
76eb4131 | 15 | #include <asm/apicdef.h> |
1ecd2765 | 16 | #include <asm/highmem.h> |
1a51e3a0 | 17 | #include <asm/proto.h> |
06879033 | 18 | #include <asm/cpumask.h> |
34019be1 | 19 | #include <asm/cpu.h> |
60a5317f | 20 | #include <asm/stackprotector.h> |
76eb4131 | 21 | |
c90aa894 MT |
22 | #ifdef CONFIG_DEBUG_PER_CPU_MAPS |
23 | # define DBG(x...) printk(KERN_DEBUG x) | |
24 | #else | |
25 | # define DBG(x...) | |
26 | #endif | |
27 | ||
ea927906 BG |
28 | DEFINE_PER_CPU(int, cpu_number); |
29 | EXPORT_PER_CPU_SYMBOL(cpu_number); | |
ea927906 | 30 | |
1688401a BG |
31 | #ifdef CONFIG_X86_64 |
32 | #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load) | |
33 | #else | |
34 | #define BOOT_PERCPU_OFFSET 0 | |
35 | #endif | |
36 | ||
37 | DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET; | |
38 | EXPORT_PER_CPU_SYMBOL(this_cpu_off); | |
39 | ||
9939ddaf | 40 | unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = { |
34019be1 | 41 | [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET, |
9939ddaf | 42 | }; |
9939ddaf | 43 | EXPORT_SYMBOL(__per_cpu_offset); |
4fe29a85 | 44 | |
6b19b0c2 TH |
45 | /* |
46 | * On x86_64 symbols referenced from code should be reachable using | |
47 | * 32bit relocations. Reserve space for static percpu variables in | |
48 | * modules so that they are always served from the first chunk which | |
49 | * is located at the percpu segment base. On x86_32, anything can | |
50 | * address anywhere. No need to reserve space in the first chunk. | |
51 | */ | |
52 | #ifdef CONFIG_X86_64 | |
53 | #define PERCPU_FIRST_CHUNK_RESERVE PERCPU_MODULE_RESERVE | |
54 | #else | |
55 | #define PERCPU_FIRST_CHUNK_RESERVE 0 | |
56 | #endif | |
57 | ||
89c92151 TH |
58 | /** |
59 | * pcpu_need_numa - determine percpu allocation needs to consider NUMA | |
60 | * | |
61 | * If NUMA is not configured or there is only one NUMA node available, | |
62 | * there is no reason to consider NUMA. This function determines | |
63 | * whether percpu allocation should consider NUMA or not. | |
64 | * | |
65 | * RETURNS: | |
66 | * true if NUMA should be considered; otherwise, false. | |
67 | */ | |
68 | static bool __init pcpu_need_numa(void) | |
69 | { | |
70 | #ifdef CONFIG_NEED_MULTIPLE_NODES | |
71 | pg_data_t *last = NULL; | |
72 | unsigned int cpu; | |
73 | ||
74 | for_each_possible_cpu(cpu) { | |
75 | int node = early_cpu_to_node(cpu); | |
76 | ||
77 | if (node_online(node) && NODE_DATA(node) && | |
78 | last && last != NODE_DATA(node)) | |
79 | return true; | |
80 | ||
81 | last = NODE_DATA(node); | |
82 | } | |
83 | #endif | |
84 | return false; | |
85 | } | |
86 | ||
5f5d8405 TH |
87 | /** |
88 | * pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu | |
89 | * @cpu: cpu to allocate for | |
90 | * @size: size allocation in bytes | |
91 | * @align: alignment | |
92 | * | |
93 | * Allocate @size bytes aligned at @align for cpu @cpu. This wrapper | |
94 | * does the right thing for NUMA regardless of the current | |
95 | * configuration. | |
96 | * | |
97 | * RETURNS: | |
98 | * Pointer to the allocated area on success, NULL on failure. | |
99 | */ | |
100 | static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size, | |
101 | unsigned long align) | |
102 | { | |
103 | const unsigned long goal = __pa(MAX_DMA_ADDRESS); | |
104 | #ifdef CONFIG_NEED_MULTIPLE_NODES | |
105 | int node = early_cpu_to_node(cpu); | |
106 | void *ptr; | |
107 | ||
108 | if (!node_online(node) || !NODE_DATA(node)) { | |
109 | ptr = __alloc_bootmem_nopanic(size, align, goal); | |
110 | pr_info("cpu %d has no node %d or node-local memory\n", | |
111 | cpu, node); | |
112 | pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n", | |
113 | cpu, size, __pa(ptr)); | |
114 | } else { | |
115 | ptr = __alloc_bootmem_node_nopanic(NODE_DATA(node), | |
116 | size, align, goal); | |
117 | pr_debug("per cpu data for cpu%d %lu bytes on node%d at " | |
118 | "%016lx\n", cpu, size, node, __pa(ptr)); | |
119 | } | |
120 | return ptr; | |
121 | #else | |
122 | return __alloc_bootmem_nopanic(size, align, goal); | |
123 | #endif | |
124 | } | |
125 | ||
8ac83757 | 126 | /* |
97c9bf06 | 127 | * Large page remap allocator |
8ac83757 TH |
128 | * |
129 | * This allocator uses PMD page as unit. A PMD page is allocated for | |
130 | * each cpu and each is remapped into vmalloc area using PMD mapping. | |
131 | * As PMD page is quite large, only part of it is used for the first | |
132 | * chunk. Unused part is returned to the bootmem allocator. | |
133 | * | |
134 | * So, the PMD pages are mapped twice - once to the physical mapping | |
135 | * and to the vmalloc area for the first percpu chunk. The double | |
136 | * mapping does add one more PMD TLB entry pressure but still is much | |
137 | * better than only using 4k mappings while still being NUMA friendly. | |
138 | */ | |
139 | #ifdef CONFIG_NEED_MULTIPLE_NODES | |
0ff2587f TH |
140 | struct pcpul_ent { |
141 | unsigned int cpu; | |
142 | void *ptr; | |
143 | }; | |
144 | ||
e59a1bb2 TH |
145 | static size_t pcpul_size; |
146 | static struct pcpul_ent *pcpul_map; | |
0ff2587f | 147 | static struct vm_struct pcpul_vm; |
8ac83757 | 148 | |
97c9bf06 | 149 | static struct page * __init pcpul_get_page(unsigned int cpu, int pageno) |
8ac83757 TH |
150 | { |
151 | size_t off = (size_t)pageno << PAGE_SHIFT; | |
152 | ||
97c9bf06 | 153 | if (off >= pcpul_size) |
8ac83757 TH |
154 | return NULL; |
155 | ||
0ff2587f | 156 | return virt_to_page(pcpul_map[cpu].ptr + off); |
8ac83757 TH |
157 | } |
158 | ||
97c9bf06 | 159 | static ssize_t __init setup_pcpu_lpage(size_t static_size) |
8ac83757 | 160 | { |
0ff2587f | 161 | size_t map_size, dyn_size; |
8ac83757 | 162 | unsigned int cpu; |
e59a1bb2 | 163 | int i, j; |
8ac83757 TH |
164 | ssize_t ret; |
165 | ||
166 | /* | |
167 | * If large page isn't supported, there's no benefit in doing | |
168 | * this. Also, on non-NUMA, embedding is better. | |
169 | */ | |
e59a1bb2 | 170 | if (!cpu_has_pse || !pcpu_need_numa()) |
8ac83757 TH |
171 | return -EINVAL; |
172 | ||
8ac83757 TH |
173 | /* |
174 | * Currently supports only single page. Supporting multiple | |
175 | * pages won't be too difficult if it ever becomes necessary. | |
176 | */ | |
97c9bf06 | 177 | pcpul_size = PFN_ALIGN(static_size + PERCPU_MODULE_RESERVE + |
6b19b0c2 | 178 | PERCPU_DYNAMIC_RESERVE); |
97c9bf06 | 179 | if (pcpul_size > PMD_SIZE) { |
8ac83757 TH |
180 | pr_warning("PERCPU: static data is larger than large page, " |
181 | "can't use large page\n"); | |
182 | return -EINVAL; | |
183 | } | |
97c9bf06 | 184 | dyn_size = pcpul_size - static_size - PERCPU_FIRST_CHUNK_RESERVE; |
8ac83757 TH |
185 | |
186 | /* allocate pointer array and alloc large pages */ | |
0ff2587f TH |
187 | map_size = PFN_ALIGN(num_possible_cpus() * sizeof(pcpul_map[0])); |
188 | pcpul_map = alloc_bootmem(map_size); | |
8ac83757 TH |
189 | |
190 | for_each_possible_cpu(cpu) { | |
0ff2587f TH |
191 | pcpul_map[cpu].cpu = cpu; |
192 | pcpul_map[cpu].ptr = pcpu_alloc_bootmem(cpu, PMD_SIZE, | |
193 | PMD_SIZE); | |
194 | if (!pcpul_map[cpu].ptr) | |
8ac83757 TH |
195 | goto enomem; |
196 | ||
197 | /* | |
97c9bf06 | 198 | * Only use pcpul_size bytes and give back the rest. |
8ac83757 TH |
199 | * |
200 | * Ingo: The 2MB up-rounding bootmem is needed to make | |
201 | * sure the partial 2MB page is still fully RAM - it's | |
202 | * not well-specified to have a PAT-incompatible area | |
203 | * (unmapped RAM, device memory, etc.) in that hole. | |
204 | */ | |
0ff2587f | 205 | free_bootmem(__pa(pcpul_map[cpu].ptr + pcpul_size), |
97c9bf06 | 206 | PMD_SIZE - pcpul_size); |
8ac83757 | 207 | |
0ff2587f | 208 | memcpy(pcpul_map[cpu].ptr, __per_cpu_load, static_size); |
8ac83757 TH |
209 | } |
210 | ||
211 | /* allocate address and map */ | |
0ff2587f TH |
212 | pcpul_vm.flags = VM_ALLOC; |
213 | pcpul_vm.size = num_possible_cpus() * PMD_SIZE; | |
214 | vm_area_register_early(&pcpul_vm, PMD_SIZE); | |
8ac83757 TH |
215 | |
216 | for_each_possible_cpu(cpu) { | |
0ff2587f | 217 | pmd_t *pmd, pmd_v; |
8ac83757 | 218 | |
0ff2587f TH |
219 | pmd = populate_extra_pmd((unsigned long)pcpul_vm.addr + |
220 | cpu * PMD_SIZE); | |
221 | pmd_v = pfn_pmd(page_to_pfn(virt_to_page(pcpul_map[cpu].ptr)), | |
222 | PAGE_KERNEL_LARGE); | |
223 | set_pmd(pmd, pmd_v); | |
8ac83757 TH |
224 | } |
225 | ||
226 | /* we're ready, commit */ | |
227 | pr_info("PERCPU: Remapped at %p with large pages, static data " | |
0ff2587f | 228 | "%zu bytes\n", pcpul_vm.addr, static_size); |
8ac83757 | 229 | |
97c9bf06 | 230 | ret = pcpu_setup_first_chunk(pcpul_get_page, static_size, |
6074d5b0 | 231 | PERCPU_FIRST_CHUNK_RESERVE, dyn_size, |
0ff2587f | 232 | PMD_SIZE, pcpul_vm.addr, NULL); |
e59a1bb2 TH |
233 | |
234 | /* sort pcpul_map array for pcpu_lpage_remapped() */ | |
235 | for (i = 0; i < num_possible_cpus() - 1; i++) | |
236 | for (j = i + 1; j < num_possible_cpus(); j++) | |
237 | if (pcpul_map[i].ptr > pcpul_map[j].ptr) { | |
238 | struct pcpul_ent tmp = pcpul_map[i]; | |
239 | pcpul_map[i] = pcpul_map[j]; | |
240 | pcpul_map[j] = tmp; | |
241 | } | |
242 | ||
243 | return ret; | |
8ac83757 TH |
244 | |
245 | enomem: | |
246 | for_each_possible_cpu(cpu) | |
0ff2587f TH |
247 | if (pcpul_map[cpu].ptr) |
248 | free_bootmem(__pa(pcpul_map[cpu].ptr), pcpul_size); | |
0ff2587f | 249 | free_bootmem(__pa(pcpul_map), map_size); |
e59a1bb2 TH |
250 | return -ENOMEM; |
251 | } | |
252 | ||
253 | /** | |
254 | * pcpu_lpage_remapped - determine whether a kaddr is in pcpul recycled area | |
255 | * @kaddr: the kernel address in question | |
256 | * | |
257 | * Determine whether @kaddr falls in the pcpul recycled area. This is | |
258 | * used by pageattr to detect VM aliases and break up the pcpu PMD | |
259 | * mapping such that the same physical page is not mapped under | |
260 | * different attributes. | |
261 | * | |
262 | * The recycled area is always at the tail of a partially used PMD | |
263 | * page. | |
264 | * | |
265 | * RETURNS: | |
266 | * Address of corresponding remapped pcpu address if match is found; | |
267 | * otherwise, NULL. | |
268 | */ | |
269 | void *pcpu_lpage_remapped(void *kaddr) | |
270 | { | |
271 | void *pmd_addr = (void *)((unsigned long)kaddr & PMD_MASK); | |
272 | unsigned long offset = (unsigned long)kaddr & ~PMD_MASK; | |
273 | int left = 0, right = num_possible_cpus() - 1; | |
274 | int pos; | |
275 | ||
276 | /* pcpul in use at all? */ | |
277 | if (!pcpul_map) | |
278 | return NULL; | |
279 | ||
280 | /* okay, perform binary search */ | |
281 | while (left <= right) { | |
282 | pos = (left + right) / 2; | |
283 | ||
284 | if (pcpul_map[pos].ptr < pmd_addr) | |
285 | left = pos + 1; | |
286 | else if (pcpul_map[pos].ptr > pmd_addr) | |
287 | right = pos - 1; | |
288 | else { | |
289 | /* it shouldn't be in the area for the first chunk */ | |
290 | WARN_ON(offset < pcpul_size); | |
291 | ||
292 | return pcpul_vm.addr + | |
293 | pcpul_map[pos].cpu * PMD_SIZE + offset; | |
294 | } | |
295 | } | |
296 | ||
297 | return NULL; | |
8ac83757 TH |
298 | } |
299 | #else | |
97c9bf06 | 300 | static ssize_t __init setup_pcpu_lpage(size_t static_size) |
8ac83757 TH |
301 | { |
302 | return -EINVAL; | |
303 | } | |
304 | #endif | |
305 | ||
89c92151 TH |
306 | /* |
307 | * Embedding allocator | |
308 | * | |
309 | * The first chunk is sized to just contain the static area plus | |
66c3a757 TH |
310 | * module and dynamic reserves and embedded into linear physical |
311 | * mapping so that it can use PMD mapping without additional TLB | |
312 | * pressure. | |
89c92151 | 313 | */ |
89c92151 TH |
314 | static ssize_t __init setup_pcpu_embed(size_t static_size) |
315 | { | |
66c3a757 | 316 | size_t reserve = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE; |
89c92151 TH |
317 | |
318 | /* | |
319 | * If large page isn't supported, there's no benefit in doing | |
320 | * this. Also, embedding allocation doesn't play well with | |
321 | * NUMA. | |
322 | */ | |
323 | if (!cpu_has_pse || pcpu_need_numa()) | |
324 | return -EINVAL; | |
325 | ||
66c3a757 TH |
326 | return pcpu_embed_first_chunk(static_size, PERCPU_FIRST_CHUNK_RESERVE, |
327 | reserve - PERCPU_FIRST_CHUNK_RESERVE, -1); | |
89c92151 TH |
328 | } |
329 | ||
5f5d8405 TH |
330 | /* |
331 | * 4k page allocator | |
332 | * | |
333 | * This is the basic allocator. Static percpu area is allocated | |
334 | * page-by-page and most of initialization is done by the generic | |
335 | * setup function. | |
336 | */ | |
8d408b4b TH |
337 | static struct page **pcpu4k_pages __initdata; |
338 | static int pcpu4k_nr_static_pages __initdata; | |
339 | ||
340 | static struct page * __init pcpu4k_get_page(unsigned int cpu, int pageno) | |
341 | { | |
342 | if (pageno < pcpu4k_nr_static_pages) | |
343 | return pcpu4k_pages[cpu * pcpu4k_nr_static_pages + pageno]; | |
344 | return NULL; | |
345 | } | |
346 | ||
458a3e64 TH |
347 | static void __init pcpu4k_populate_pte(unsigned long addr) |
348 | { | |
349 | populate_extra_pte(addr); | |
350 | } | |
351 | ||
5f5d8405 TH |
352 | static ssize_t __init setup_pcpu_4k(size_t static_size) |
353 | { | |
354 | size_t pages_size; | |
355 | unsigned int cpu; | |
356 | int i, j; | |
357 | ssize_t ret; | |
358 | ||
359 | pcpu4k_nr_static_pages = PFN_UP(static_size); | |
360 | ||
361 | /* unaligned allocations can't be freed, round up to page size */ | |
362 | pages_size = PFN_ALIGN(pcpu4k_nr_static_pages * num_possible_cpus() | |
363 | * sizeof(pcpu4k_pages[0])); | |
364 | pcpu4k_pages = alloc_bootmem(pages_size); | |
365 | ||
366 | /* allocate and copy */ | |
367 | j = 0; | |
368 | for_each_possible_cpu(cpu) | |
369 | for (i = 0; i < pcpu4k_nr_static_pages; i++) { | |
370 | void *ptr; | |
371 | ||
372 | ptr = pcpu_alloc_bootmem(cpu, PAGE_SIZE, PAGE_SIZE); | |
373 | if (!ptr) | |
374 | goto enomem; | |
375 | ||
376 | memcpy(ptr, __per_cpu_load + i * PAGE_SIZE, PAGE_SIZE); | |
377 | pcpu4k_pages[j++] = virt_to_page(ptr); | |
378 | } | |
379 | ||
380 | /* we're ready, commit */ | |
381 | pr_info("PERCPU: Allocated %d 4k pages, static data %zu bytes\n", | |
382 | pcpu4k_nr_static_pages, static_size); | |
383 | ||
6b19b0c2 | 384 | ret = pcpu_setup_first_chunk(pcpu4k_get_page, static_size, |
6074d5b0 TH |
385 | PERCPU_FIRST_CHUNK_RESERVE, -1, |
386 | -1, NULL, pcpu4k_populate_pte); | |
5f5d8405 TH |
387 | goto out_free_ar; |
388 | ||
389 | enomem: | |
390 | while (--j >= 0) | |
391 | free_bootmem(__pa(page_address(pcpu4k_pages[j])), PAGE_SIZE); | |
392 | ret = -ENOMEM; | |
393 | out_free_ar: | |
394 | free_bootmem(__pa(pcpu4k_pages), pages_size); | |
395 | return ret; | |
396 | } | |
397 | ||
b2d2f431 BG |
398 | static inline void setup_percpu_segment(int cpu) |
399 | { | |
400 | #ifdef CONFIG_X86_32 | |
401 | struct desc_struct gdt; | |
402 | ||
403 | pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF, | |
404 | 0x2 | DESCTYPE_S, 0x8); | |
405 | gdt.s = 1; | |
406 | write_gdt_entry(get_cpu_gdt_table(cpu), | |
407 | GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S); | |
408 | #endif | |
409 | } | |
410 | ||
4fe29a85 GOC |
411 | /* |
412 | * Great future plan: | |
413 | * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data. | |
414 | * Always point %gs to its beginning | |
415 | */ | |
416 | void __init setup_per_cpu_areas(void) | |
417 | { | |
5f5d8405 TH |
418 | size_t static_size = __per_cpu_end - __per_cpu_start; |
419 | unsigned int cpu; | |
11124411 TH |
420 | unsigned long delta; |
421 | size_t pcpu_unit_size; | |
5f5d8405 | 422 | ssize_t ret; |
a1681965 | 423 | |
ab14398a | 424 | pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n", |
a1681965 | 425 | NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids); |
11124411 | 426 | |
8ac83757 TH |
427 | /* |
428 | * Allocate percpu area. If PSE is supported, try to make use | |
429 | * of large page mappings. Please read comments on top of | |
430 | * each allocator for details. | |
431 | */ | |
97c9bf06 | 432 | ret = setup_pcpu_lpage(static_size); |
8ac83757 TH |
433 | if (ret < 0) |
434 | ret = setup_pcpu_embed(static_size); | |
89c92151 TH |
435 | if (ret < 0) |
436 | ret = setup_pcpu_4k(static_size); | |
5f5d8405 TH |
437 | if (ret < 0) |
438 | panic("cannot allocate static percpu area (%zu bytes, err=%zd)", | |
439 | static_size, ret); | |
1a51e3a0 | 440 | |
5f5d8405 | 441 | pcpu_unit_size = ret; |
11124411 | 442 | |
5f5d8405 | 443 | /* alrighty, percpu areas up and running */ |
11124411 TH |
444 | delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; |
445 | for_each_possible_cpu(cpu) { | |
446 | per_cpu_offset(cpu) = delta + cpu * pcpu_unit_size; | |
26f80bd6 | 447 | per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu); |
ea927906 | 448 | per_cpu(cpu_number, cpu) = cpu; |
b2d2f431 | 449 | setup_percpu_segment(cpu); |
60a5317f | 450 | setup_stack_canary_segment(cpu); |
0d77e7f0 | 451 | /* |
cf3997f5 TH |
452 | * Copy data used in early init routines from the |
453 | * initial arrays to the per cpu data areas. These | |
454 | * arrays then become expendable and the *_early_ptr's | |
455 | * are zeroed indicating that the static arrays are | |
456 | * gone. | |
0d77e7f0 | 457 | */ |
ec70de8b | 458 | #ifdef CONFIG_X86_LOCAL_APIC |
0d77e7f0 | 459 | per_cpu(x86_cpu_to_apicid, cpu) = |
cf3997f5 | 460 | early_per_cpu_map(x86_cpu_to_apicid, cpu); |
0d77e7f0 | 461 | per_cpu(x86_bios_cpu_apicid, cpu) = |
cf3997f5 | 462 | early_per_cpu_map(x86_bios_cpu_apicid, cpu); |
ec70de8b | 463 | #endif |
1a51e3a0 | 464 | #ifdef CONFIG_X86_64 |
26f80bd6 | 465 | per_cpu(irq_stack_ptr, cpu) = |
cf3997f5 TH |
466 | per_cpu(irq_stack_union.irq_stack, cpu) + |
467 | IRQ_STACK_SIZE - 64; | |
6470aff6 BG |
468 | #ifdef CONFIG_NUMA |
469 | per_cpu(x86_cpu_to_node_map, cpu) = | |
cf3997f5 | 470 | early_per_cpu_map(x86_cpu_to_node_map, cpu); |
2697fbd5 | 471 | #endif |
6470aff6 | 472 | #endif |
1a51e3a0 | 473 | /* |
34019be1 | 474 | * Up to this point, the boot CPU has been using .data.init |
2697fbd5 | 475 | * area. Reload any changed state for the boot CPU. |
1a51e3a0 | 476 | */ |
34019be1 | 477 | if (cpu == boot_cpu_id) |
552be871 | 478 | switch_to_new_gdt(cpu); |
4fe29a85 GOC |
479 | } |
480 | ||
0d77e7f0 | 481 | /* indicate the early static arrays will soon be gone */ |
22f25138 | 482 | #ifdef CONFIG_X86_LOCAL_APIC |
0d77e7f0 BG |
483 | early_per_cpu_ptr(x86_cpu_to_apicid) = NULL; |
484 | early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL; | |
22f25138 | 485 | #endif |
6470aff6 | 486 | #if defined(CONFIG_X86_64) && defined(CONFIG_NUMA) |
0d77e7f0 BG |
487 | early_per_cpu_ptr(x86_cpu_to_node_map) = NULL; |
488 | #endif | |
9f0e8d04 | 489 | |
35d5a9a6 YL |
490 | #if defined(CONFIG_X86_64) && defined(CONFIG_NUMA) |
491 | /* | |
492 | * make sure boot cpu node_number is right, when boot cpu is on the | |
493 | * node that doesn't have mem installed | |
494 | */ | |
495 | per_cpu(node_number, boot_cpu_id) = cpu_to_node(boot_cpu_id); | |
496 | #endif | |
497 | ||
9f248bde MT |
498 | /* Setup node to cpumask map */ |
499 | setup_node_to_cpumask_map(); | |
c2d1cec1 MT |
500 | |
501 | /* Setup cpu initialized, callin, callout masks */ | |
502 | setup_cpu_local_masks(); | |
4fe29a85 | 503 | } |