]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - arch/x86/kernel/setup_percpu.c
x86: make vmlinux_32.lds.S use PERCPU() macro
[mirror_ubuntu-bionic-kernel.git] / arch / x86 / kernel / setup_percpu.c
CommitLineData
4fe29a85
GOC
1#include <linux/kernel.h>
2#include <linux/module.h>
3#include <linux/init.h>
4#include <linux/bootmem.h>
5#include <linux/percpu.h>
1ecd2765 6#include <linux/kexec.h>
17b4cceb 7#include <linux/crash_dump.h>
8a87dd9a
JSR
8#include <linux/smp.h>
9#include <linux/topology.h>
4fe29a85
GOC
10#include <asm/sections.h>
11#include <asm/processor.h>
12#include <asm/setup.h>
0fc0906e 13#include <asm/mpspec.h>
76eb4131 14#include <asm/apicdef.h>
1ecd2765 15#include <asm/highmem.h>
06879033 16#include <asm/cpumask.h>
76eb4131 17
c90aa894
MT
18#ifdef CONFIG_DEBUG_PER_CPU_MAPS
19# define DBG(x...) printk(KERN_DEBUG x)
20#else
21# define DBG(x...)
22#endif
23
f8955ebe 24#ifdef CONFIG_X86_LOCAL_APIC
2fe60147
AS
25unsigned int num_processors;
26unsigned disabled_cpus __cpuinitdata;
27/* Processor that is doing the boot up */
28unsigned int boot_cpu_physical_apicid = -1U;
29EXPORT_SYMBOL(boot_cpu_physical_apicid);
8a87dd9a 30unsigned int max_physical_apicid;
2fe60147 31
0fc0906e
AS
32/* Bitmask of physically existing CPUs */
33physid_mask_t phys_cpu_present_map;
f8955ebe 34#endif
0fc0906e 35
c90aa894
MT
36/*
37 * Map cpu index to physical APIC ID
38 */
23ca4bba
MT
39DEFINE_EARLY_PER_CPU(u16, x86_cpu_to_apicid, BAD_APICID);
40DEFINE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid, BAD_APICID);
41EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid);
42EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid);
43
44#if defined(CONFIG_NUMA) && defined(CONFIG_X86_64)
c90aa894 45#define X86_64_NUMA 1 /* (used later) */
23ca4bba 46
c90aa894
MT
47/*
48 * Map cpu index to node index
49 */
23ca4bba
MT
50DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE);
51EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map);
9f248bde 52
c90aa894
MT
53/*
54 * Which logical CPUs are on which nodes
55 */
9f248bde
MT
56cpumask_t *node_to_cpumask_map;
57EXPORT_SYMBOL(node_to_cpumask_map);
58
c90aa894
MT
59/*
60 * Setup node_to_cpumask_map
61 */
9f248bde
MT
62static void __init setup_node_to_cpumask_map(void);
63
64#else
65static inline void setup_node_to_cpumask_map(void) { }
23ca4bba
MT
66#endif
67
c90aa894 68#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
4fe29a85
GOC
69/*
70 * Copy data used in early init routines from the initial arrays to the
71 * per cpu data areas. These arrays then become expendable and the
72 * *_early_ptr's are zeroed indicating that the static arrays are gone.
73 */
74static void __init setup_per_cpu_maps(void)
75{
76 int cpu;
77
78 for_each_possible_cpu(cpu) {
23ca4bba
MT
79 per_cpu(x86_cpu_to_apicid, cpu) =
80 early_per_cpu_map(x86_cpu_to_apicid, cpu);
b447a468 81 per_cpu(x86_bios_cpu_apicid, cpu) =
23ca4bba
MT
82 early_per_cpu_map(x86_bios_cpu_apicid, cpu);
83#ifdef X86_64_NUMA
b447a468 84 per_cpu(x86_cpu_to_node_map, cpu) =
23ca4bba 85 early_per_cpu_map(x86_cpu_to_node_map, cpu);
4fe29a85
GOC
86#endif
87 }
88
89 /* indicate the early static arrays will soon be gone */
23ca4bba
MT
90 early_per_cpu_ptr(x86_cpu_to_apicid) = NULL;
91 early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL;
92#ifdef X86_64_NUMA
93 early_per_cpu_ptr(x86_cpu_to_node_map) = NULL;
4fe29a85
GOC
94#endif
95}
96
97#ifdef CONFIG_X86_32
98/*
99 * Great future not-so-futuristic plan: make i386 and x86_64 do it
100 * the same way
101 */
102unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
103EXPORT_SYMBOL(__per_cpu_offset);
3461b0af
MT
104static inline void setup_cpu_pda_map(void) { }
105
106#elif !defined(CONFIG_SMP)
107static inline void setup_cpu_pda_map(void) { }
108
109#else /* CONFIG_SMP && CONFIG_X86_64 */
110
111/*
112 * Allocate cpu_pda pointer table and array via alloc_bootmem.
113 */
114static void __init setup_cpu_pda_map(void)
115{
116 char *pda;
117 struct x8664_pda **new_cpu_pda;
118 unsigned long size;
119 int cpu;
120
121 size = roundup(sizeof(struct x8664_pda), cache_line_size());
122
123 /* allocate cpu_pda array and pointer table */
124 {
125 unsigned long tsize = nr_cpu_ids * sizeof(void *);
126 unsigned long asize = size * (nr_cpu_ids - 1);
127
128 tsize = roundup(tsize, cache_line_size());
129 new_cpu_pda = alloc_bootmem(tsize + asize);
130 pda = (char *)new_cpu_pda + tsize;
131 }
132
133 /* initialize pointer table to static pda's */
134 for_each_possible_cpu(cpu) {
135 if (cpu == 0) {
136 /* leave boot cpu pda in place */
137 new_cpu_pda[0] = cpu_pda(0);
138 continue;
139 }
140 new_cpu_pda[cpu] = (struct x8664_pda *)pda;
141 new_cpu_pda[cpu]->in_bootmem = 1;
142 pda += size;
143 }
144
145 /* point to new pointer table */
146 _cpu_pda = new_cpu_pda;
147}
c2d1cec1
MT
148
149#endif /* CONFIG_SMP && CONFIG_X86_64 */
150
151#ifdef CONFIG_X86_64
152
153/* correctly size the local cpu masks */
154static void setup_cpu_local_masks(void)
155{
156 alloc_bootmem_cpumask_var(&cpu_initialized_mask);
157 alloc_bootmem_cpumask_var(&cpu_callin_mask);
158 alloc_bootmem_cpumask_var(&cpu_callout_mask);
159 alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask);
160}
161
162#else /* CONFIG_X86_32 */
163
164static inline void setup_cpu_local_masks(void)
165{
166}
167
168#endif /* CONFIG_X86_32 */
4fe29a85
GOC
169
170/*
171 * Great future plan:
172 * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data.
173 * Always point %gs to its beginning
174 */
175void __init setup_per_cpu_areas(void)
176{
d6c88a50 177 ssize_t size, old_size;
3461b0af
MT
178 char *ptr;
179 int cpu;
1f8ff037 180 unsigned long align = 1;
4fe29a85 181
3461b0af
MT
182 /* Setup cpu_pda map */
183 setup_cpu_pda_map();
184
4fe29a85 185 /* Copy section for each CPU (we discard the original) */
1f3fcd4b 186 old_size = PERCPU_ENOUGH_ROOM;
1f8ff037 187 align = max_t(unsigned long, PAGE_SIZE, align);
d6c88a50 188 size = roundup(old_size, align);
a1681965 189
ab14398a 190 pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n",
a1681965
MT
191 NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids);
192
ab14398a 193 pr_info("PERCPU: Allocating %zd bytes of per cpu data\n", size);
b447a468 194
3461b0af 195 for_each_possible_cpu(cpu) {
4fe29a85 196#ifndef CONFIG_NEED_MULTIPLE_NODES
1f8ff037
YL
197 ptr = __alloc_bootmem(size, align,
198 __pa(MAX_DMA_ADDRESS));
4fe29a85 199#else
3461b0af 200 int node = early_cpu_to_node(cpu);
b447a468 201 if (!node_online(node) || !NODE_DATA(node)) {
1f8ff037
YL
202 ptr = __alloc_bootmem(size, align,
203 __pa(MAX_DMA_ADDRESS));
ab14398a 204 pr_info("cpu %d has no node %d or node-local memory\n",
3461b0af 205 cpu, node);
ab14398a
CG
206 pr_debug("per cpu data for cpu%d at %016lx\n",
207 cpu, __pa(ptr));
208 } else {
1f8ff037
YL
209 ptr = __alloc_bootmem_node(NODE_DATA(node), size, align,
210 __pa(MAX_DMA_ADDRESS));
ab14398a
CG
211 pr_debug("per cpu data for cpu%d on node%d at %016lx\n",
212 cpu, node, __pa(ptr));
a677f58a 213 }
4fe29a85 214#endif
3461b0af 215 per_cpu_offset(cpu) = ptr - __per_cpu_start;
4fe29a85 216 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
c90aa894
MT
217
218 DBG("PERCPU: cpu %4d %p\n", cpu, ptr);
4fe29a85
GOC
219 }
220
b447a468 221 /* Setup percpu data maps */
4fe29a85 222 setup_per_cpu_maps();
9f0e8d04 223
9f248bde
MT
224 /* Setup node to cpumask map */
225 setup_node_to_cpumask_map();
c2d1cec1
MT
226
227 /* Setup cpu initialized, callin, callout masks */
228 setup_cpu_local_masks();
4fe29a85
GOC
229}
230
231#endif
c45a707d 232
23ca4bba 233#ifdef X86_64_NUMA
9f248bde
MT
234
235/*
236 * Allocate node_to_cpumask_map based on number of available nodes
237 * Requires node_possible_map to be valid.
238 *
239 * Note: node_to_cpumask() is not valid until after this is done.
c90aa894 240 * (Use CONFIG_DEBUG_PER_CPU_MAPS to check this.)
9f248bde
MT
241 */
242static void __init setup_node_to_cpumask_map(void)
243{
244 unsigned int node, num = 0;
245 cpumask_t *map;
246
247 /* setup nr_node_ids if not done yet */
248 if (nr_node_ids == MAX_NUMNODES) {
249 for_each_node_mask(node, node_possible_map)
250 num = node;
251 nr_node_ids = num + 1;
252 }
253
254 /* allocate the map */
255 map = alloc_bootmem_low(nr_node_ids * sizeof(cpumask_t));
c90aa894 256 DBG("node_to_cpumask_map at %p for %d nodes\n", map, nr_node_ids);
9f248bde 257
55410791 258 pr_debug("Node to cpumask map at %p for %d nodes\n",
cfc1b9a6 259 map, nr_node_ids);
9f248bde
MT
260
261 /* node_to_cpumask() will now work */
262 node_to_cpumask_map = map;
263}
264
23ca4bba
MT
265void __cpuinit numa_set_node(int cpu, int node)
266{
267 int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);
268
c90aa894
MT
269 /* early setting, no percpu area yet */
270 if (cpu_to_node_map) {
23ca4bba 271 cpu_to_node_map[cpu] = node;
c90aa894
MT
272 return;
273 }
23ca4bba 274
c90aa894
MT
275#ifdef CONFIG_DEBUG_PER_CPU_MAPS
276 if (cpu >= nr_cpu_ids || !per_cpu_offset(cpu)) {
277 printk(KERN_ERR "numa_set_node: invalid cpu# (%d)\n", cpu);
278 dump_stack();
279 return;
280 }
281#endif
282 per_cpu(x86_cpu_to_node_map, cpu) = node;
23ca4bba 283
c90aa894
MT
284 if (node != NUMA_NO_NODE)
285 cpu_pda(cpu)->nodenumber = node;
23ca4bba
MT
286}
287
288void __cpuinit numa_clear_node(int cpu)
289{
290 numa_set_node(cpu, NUMA_NO_NODE);
291}
292
9f248bde
MT
293#ifndef CONFIG_DEBUG_PER_CPU_MAPS
294
23ca4bba
MT
295void __cpuinit numa_add_cpu(int cpu)
296{
297 cpu_set(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
298}
299
300void __cpuinit numa_remove_cpu(int cpu)
301{
c90aa894 302 cpu_clear(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
23ca4bba 303}
23ca4bba 304
9f248bde
MT
305#else /* CONFIG_DEBUG_PER_CPU_MAPS */
306
307/*
308 * --------- debug versions of the numa functions ---------
309 */
310static void __cpuinit numa_set_cpumask(int cpu, int enable)
311{
c90aa894 312 int node = early_cpu_to_node(cpu);
9f248bde
MT
313 cpumask_t *mask;
314 char buf[64];
315
316 if (node_to_cpumask_map == NULL) {
317 printk(KERN_ERR "node_to_cpumask_map NULL\n");
318 dump_stack();
319 return;
320 }
321
322 mask = &node_to_cpumask_map[node];
323 if (enable)
324 cpu_set(cpu, *mask);
325 else
326 cpu_clear(cpu, *mask);
327
29c0177e 328 cpulist_scnprintf(buf, sizeof(buf), mask);
9f248bde 329 printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n",
8a87dd9a
JSR
330 enable ? "numa_add_cpu" : "numa_remove_cpu", cpu, node, buf);
331}
9f248bde
MT
332
333void __cpuinit numa_add_cpu(int cpu)
334{
335 numa_set_cpumask(cpu, 1);
336}
337
338void __cpuinit numa_remove_cpu(int cpu)
339{
340 numa_set_cpumask(cpu, 0);
341}
23ca4bba
MT
342
343int cpu_to_node(int cpu)
344{
345 if (early_per_cpu_ptr(x86_cpu_to_node_map)) {
346 printk(KERN_WARNING
347 "cpu_to_node(%d): usage too early!\n", cpu);
348 dump_stack();
349 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
350 }
351 return per_cpu(x86_cpu_to_node_map, cpu);
352}
353EXPORT_SYMBOL(cpu_to_node);
354
9f248bde
MT
355/*
356 * Same function as cpu_to_node() but used if called before the
357 * per_cpu areas are setup.
358 */
23ca4bba
MT
359int early_cpu_to_node(int cpu)
360{
361 if (early_per_cpu_ptr(x86_cpu_to_node_map))
362 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
363
364 if (!per_cpu_offset(cpu)) {
365 printk(KERN_WARNING
366 "early_cpu_to_node(%d): no per_cpu area!\n", cpu);
9f248bde 367 dump_stack();
23ca4bba
MT
368 return NUMA_NO_NODE;
369 }
370 return per_cpu(x86_cpu_to_node_map, cpu);
371}
9f248bde 372
6a2f47ca
MT
373
374/* empty cpumask */
375static const cpumask_t cpu_mask_none;
376
9f248bde
MT
377/*
378 * Returns a pointer to the bitmask of CPUs on Node 'node'.
379 */
393d68fb 380const cpumask_t *cpumask_of_node(int node)
9f248bde
MT
381{
382 if (node_to_cpumask_map == NULL) {
383 printk(KERN_WARNING
393d68fb 384 "cpumask_of_node(%d): no node_to_cpumask_map!\n",
9f248bde
MT
385 node);
386 dump_stack();
11369f35 387 return (const cpumask_t *)&cpu_online_map;
9f248bde 388 }
6a2f47ca
MT
389 if (node >= nr_node_ids) {
390 printk(KERN_WARNING
393d68fb 391 "cpumask_of_node(%d): node > nr_node_ids(%d)\n",
6a2f47ca
MT
392 node, nr_node_ids);
393 dump_stack();
11369f35 394 return &cpu_mask_none;
6a2f47ca 395 }
11369f35 396 return &node_to_cpumask_map[node];
9f248bde 397}
393d68fb 398EXPORT_SYMBOL(cpumask_of_node);
9f248bde
MT
399
400/*
401 * Returns a bitmask of CPUs on Node 'node'.
6a2f47ca
MT
402 *
403 * Side note: this function creates the returned cpumask on the stack
404 * so with a high NR_CPUS count, excessive stack space is used. The
405 * node_to_cpumask_ptr function should be used whenever possible.
9f248bde
MT
406 */
407cpumask_t node_to_cpumask(int node)
408{
409 if (node_to_cpumask_map == NULL) {
410 printk(KERN_WARNING
411 "node_to_cpumask(%d): no node_to_cpumask_map!\n", node);
412 dump_stack();
413 return cpu_online_map;
414 }
6a2f47ca
MT
415 if (node >= nr_node_ids) {
416 printk(KERN_WARNING
417 "node_to_cpumask(%d): node > nr_node_ids(%d)\n",
418 node, nr_node_ids);
419 dump_stack();
420 return cpu_mask_none;
421 }
9f248bde
MT
422 return node_to_cpumask_map[node];
423}
424EXPORT_SYMBOL(node_to_cpumask);
425
426/*
427 * --------- end of debug versions of the numa functions ---------
428 */
429
430#endif /* CONFIG_DEBUG_PER_CPU_MAPS */
431
432#endif /* X86_64_NUMA */
1ecd2765 433