]>
Commit | Line | Data |
---|---|---|
4fe29a85 GOC |
1 | #include <linux/kernel.h> |
2 | #include <linux/module.h> | |
3 | #include <linux/init.h> | |
4 | #include <linux/bootmem.h> | |
5 | #include <linux/percpu.h> | |
1ecd2765 | 6 | #include <linux/kexec.h> |
17b4cceb | 7 | #include <linux/crash_dump.h> |
8a87dd9a JSR |
8 | #include <linux/smp.h> |
9 | #include <linux/topology.h> | |
4fe29a85 GOC |
10 | #include <asm/sections.h> |
11 | #include <asm/processor.h> | |
12 | #include <asm/setup.h> | |
0fc0906e | 13 | #include <asm/mpspec.h> |
76eb4131 | 14 | #include <asm/apicdef.h> |
1ecd2765 | 15 | #include <asm/highmem.h> |
1a51e3a0 | 16 | #include <asm/proto.h> |
06879033 | 17 | #include <asm/cpumask.h> |
76eb4131 | 18 | |
c90aa894 MT |
19 | #ifdef CONFIG_DEBUG_PER_CPU_MAPS |
20 | # define DBG(x...) printk(KERN_DEBUG x) | |
21 | #else | |
22 | # define DBG(x...) | |
23 | #endif | |
24 | ||
f8955ebe | 25 | #ifdef CONFIG_X86_LOCAL_APIC |
2fe60147 AS |
26 | unsigned int num_processors; |
27 | unsigned disabled_cpus __cpuinitdata; | |
28 | /* Processor that is doing the boot up */ | |
29 | unsigned int boot_cpu_physical_apicid = -1U; | |
30 | EXPORT_SYMBOL(boot_cpu_physical_apicid); | |
8a87dd9a | 31 | unsigned int max_physical_apicid; |
2fe60147 | 32 | |
0fc0906e AS |
33 | /* Bitmask of physically existing CPUs */ |
34 | physid_mask_t phys_cpu_present_map; | |
f8955ebe | 35 | #endif |
0fc0906e | 36 | |
c90aa894 MT |
37 | /* |
38 | * Map cpu index to physical APIC ID | |
39 | */ | |
23ca4bba MT |
40 | DEFINE_EARLY_PER_CPU(u16, x86_cpu_to_apicid, BAD_APICID); |
41 | DEFINE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid, BAD_APICID); | |
42 | EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid); | |
43 | EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid); | |
44 | ||
45 | #if defined(CONFIG_NUMA) && defined(CONFIG_X86_64) | |
c90aa894 | 46 | #define X86_64_NUMA 1 /* (used later) */ |
23ca4bba | 47 | |
c90aa894 MT |
48 | /* |
49 | * Map cpu index to node index | |
50 | */ | |
23ca4bba MT |
51 | DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE); |
52 | EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map); | |
9f248bde | 53 | |
c90aa894 MT |
54 | /* |
55 | * Which logical CPUs are on which nodes | |
56 | */ | |
9f248bde MT |
57 | cpumask_t *node_to_cpumask_map; |
58 | EXPORT_SYMBOL(node_to_cpumask_map); | |
59 | ||
c90aa894 MT |
60 | /* |
61 | * Setup node_to_cpumask_map | |
62 | */ | |
9f248bde MT |
63 | static void __init setup_node_to_cpumask_map(void); |
64 | ||
65 | #else | |
66 | static inline void setup_node_to_cpumask_map(void) { } | |
23ca4bba MT |
67 | #endif |
68 | ||
b12d8db8 TH |
69 | /* |
70 | * Define load_pda_offset() and per-cpu __pda for x86_64. | |
71 | * load_pda_offset() is responsible for loading the offset of pda into | |
72 | * %gs. | |
73 | * | |
74 | * On SMP, pda offset also duals as percpu base address and thus it | |
75 | * should be at the start of per-cpu area. To achieve this, it's | |
76 | * preallocated in vmlinux_64.lds.S directly instead of using | |
77 | * DEFINE_PER_CPU(). | |
78 | */ | |
1a51e3a0 TH |
79 | #ifdef CONFIG_X86_64 |
80 | void __cpuinit load_pda_offset(int cpu) | |
81 | { | |
82 | /* Memory clobbers used to order pda/percpu accesses */ | |
83 | mb(); | |
84 | wrmsrl(MSR_GS_BASE, cpu_pda(cpu)); | |
85 | mb(); | |
86 | } | |
b12d8db8 TH |
87 | #ifndef CONFIG_SMP |
88 | DEFINE_PER_CPU(struct x8664_pda, __pda); | |
b12d8db8 | 89 | #endif |
a338af2c | 90 | EXPORT_PER_CPU_SYMBOL(__pda); |
1a51e3a0 TH |
91 | #endif /* CONFIG_SMP && CONFIG_X86_64 */ |
92 | ||
93 | #ifdef CONFIG_X86_64 | |
94 | ||
95 | /* correctly size the local cpu masks */ | |
96 | static void setup_cpu_local_masks(void) | |
97 | { | |
98 | alloc_bootmem_cpumask_var(&cpu_initialized_mask); | |
99 | alloc_bootmem_cpumask_var(&cpu_callin_mask); | |
100 | alloc_bootmem_cpumask_var(&cpu_callout_mask); | |
101 | alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask); | |
102 | } | |
103 | ||
104 | #else /* CONFIG_X86_32 */ | |
105 | ||
106 | static inline void setup_cpu_local_masks(void) | |
107 | { | |
108 | } | |
109 | ||
110 | #endif /* CONFIG_X86_32 */ | |
111 | ||
c90aa894 | 112 | #ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA |
4fe29a85 GOC |
113 | /* |
114 | * Copy data used in early init routines from the initial arrays to the | |
115 | * per cpu data areas. These arrays then become expendable and the | |
116 | * *_early_ptr's are zeroed indicating that the static arrays are gone. | |
117 | */ | |
118 | static void __init setup_per_cpu_maps(void) | |
119 | { | |
120 | int cpu; | |
121 | ||
122 | for_each_possible_cpu(cpu) { | |
23ca4bba MT |
123 | per_cpu(x86_cpu_to_apicid, cpu) = |
124 | early_per_cpu_map(x86_cpu_to_apicid, cpu); | |
b447a468 | 125 | per_cpu(x86_bios_cpu_apicid, cpu) = |
23ca4bba MT |
126 | early_per_cpu_map(x86_bios_cpu_apicid, cpu); |
127 | #ifdef X86_64_NUMA | |
b447a468 | 128 | per_cpu(x86_cpu_to_node_map, cpu) = |
23ca4bba | 129 | early_per_cpu_map(x86_cpu_to_node_map, cpu); |
4fe29a85 GOC |
130 | #endif |
131 | } | |
132 | ||
133 | /* indicate the early static arrays will soon be gone */ | |
23ca4bba MT |
134 | early_per_cpu_ptr(x86_cpu_to_apicid) = NULL; |
135 | early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL; | |
136 | #ifdef X86_64_NUMA | |
137 | early_per_cpu_ptr(x86_cpu_to_node_map) = NULL; | |
4fe29a85 GOC |
138 | #endif |
139 | } | |
140 | ||
9939ddaf TH |
141 | #ifdef CONFIG_X86_64 |
142 | unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = { | |
143 | [0] = (unsigned long)__per_cpu_load, | |
144 | }; | |
145 | #else | |
4fe29a85 | 146 | unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; |
1a51e3a0 | 147 | #endif |
9939ddaf | 148 | EXPORT_SYMBOL(__per_cpu_offset); |
4fe29a85 GOC |
149 | |
150 | /* | |
151 | * Great future plan: | |
152 | * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data. | |
153 | * Always point %gs to its beginning | |
154 | */ | |
155 | void __init setup_per_cpu_areas(void) | |
156 | { | |
d6c88a50 | 157 | ssize_t size, old_size; |
3461b0af MT |
158 | char *ptr; |
159 | int cpu; | |
1f8ff037 | 160 | unsigned long align = 1; |
4fe29a85 | 161 | |
4fe29a85 | 162 | /* Copy section for each CPU (we discard the original) */ |
1f3fcd4b | 163 | old_size = PERCPU_ENOUGH_ROOM; |
1f8ff037 | 164 | align = max_t(unsigned long, PAGE_SIZE, align); |
d6c88a50 | 165 | size = roundup(old_size, align); |
a1681965 | 166 | |
ab14398a | 167 | pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n", |
a1681965 MT |
168 | NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids); |
169 | ||
ab14398a | 170 | pr_info("PERCPU: Allocating %zd bytes of per cpu data\n", size); |
b447a468 | 171 | |
3461b0af | 172 | for_each_possible_cpu(cpu) { |
4fe29a85 | 173 | #ifndef CONFIG_NEED_MULTIPLE_NODES |
1f8ff037 YL |
174 | ptr = __alloc_bootmem(size, align, |
175 | __pa(MAX_DMA_ADDRESS)); | |
4fe29a85 | 176 | #else |
3461b0af | 177 | int node = early_cpu_to_node(cpu); |
b447a468 | 178 | if (!node_online(node) || !NODE_DATA(node)) { |
1f8ff037 YL |
179 | ptr = __alloc_bootmem(size, align, |
180 | __pa(MAX_DMA_ADDRESS)); | |
ab14398a | 181 | pr_info("cpu %d has no node %d or node-local memory\n", |
3461b0af | 182 | cpu, node); |
ab14398a CG |
183 | pr_debug("per cpu data for cpu%d at %016lx\n", |
184 | cpu, __pa(ptr)); | |
185 | } else { | |
1f8ff037 YL |
186 | ptr = __alloc_bootmem_node(NODE_DATA(node), size, align, |
187 | __pa(MAX_DMA_ADDRESS)); | |
ab14398a CG |
188 | pr_debug("per cpu data for cpu%d on node%d at %016lx\n", |
189 | cpu, node, __pa(ptr)); | |
a677f58a | 190 | } |
4fe29a85 | 191 | #endif |
1a51e3a0 | 192 | |
3e5d8f97 | 193 | memcpy(ptr, __per_cpu_load, __per_cpu_end - __per_cpu_start); |
9939ddaf | 194 | per_cpu_offset(cpu) = ptr - __per_cpu_start; |
26f80bd6 | 195 | per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu); |
1a51e3a0 | 196 | #ifdef CONFIG_X86_64 |
26f80bd6 BG |
197 | per_cpu(irq_stack_ptr, cpu) = |
198 | (char *)per_cpu(irq_stack, cpu) + IRQ_STACK_SIZE - 64; | |
1a51e3a0 TH |
199 | /* |
200 | * CPU0 modified pda in the init data area, reload pda | |
201 | * offset for CPU0 and clear the area for others. | |
202 | */ | |
203 | if (cpu == 0) | |
204 | load_pda_offset(0); | |
205 | else | |
206 | memset(cpu_pda(cpu), 0, sizeof(*cpu_pda(cpu))); | |
207 | #endif | |
c90aa894 MT |
208 | |
209 | DBG("PERCPU: cpu %4d %p\n", cpu, ptr); | |
4fe29a85 GOC |
210 | } |
211 | ||
b447a468 | 212 | /* Setup percpu data maps */ |
4fe29a85 | 213 | setup_per_cpu_maps(); |
9f0e8d04 | 214 | |
9f248bde MT |
215 | /* Setup node to cpumask map */ |
216 | setup_node_to_cpumask_map(); | |
c2d1cec1 MT |
217 | |
218 | /* Setup cpu initialized, callin, callout masks */ | |
219 | setup_cpu_local_masks(); | |
4fe29a85 GOC |
220 | } |
221 | ||
222 | #endif | |
c45a707d | 223 | |
23ca4bba | 224 | #ifdef X86_64_NUMA |
9f248bde MT |
225 | |
226 | /* | |
227 | * Allocate node_to_cpumask_map based on number of available nodes | |
228 | * Requires node_possible_map to be valid. | |
229 | * | |
230 | * Note: node_to_cpumask() is not valid until after this is done. | |
c90aa894 | 231 | * (Use CONFIG_DEBUG_PER_CPU_MAPS to check this.) |
9f248bde MT |
232 | */ |
233 | static void __init setup_node_to_cpumask_map(void) | |
234 | { | |
235 | unsigned int node, num = 0; | |
236 | cpumask_t *map; | |
237 | ||
238 | /* setup nr_node_ids if not done yet */ | |
239 | if (nr_node_ids == MAX_NUMNODES) { | |
240 | for_each_node_mask(node, node_possible_map) | |
241 | num = node; | |
242 | nr_node_ids = num + 1; | |
243 | } | |
244 | ||
245 | /* allocate the map */ | |
246 | map = alloc_bootmem_low(nr_node_ids * sizeof(cpumask_t)); | |
c90aa894 | 247 | DBG("node_to_cpumask_map at %p for %d nodes\n", map, nr_node_ids); |
9f248bde | 248 | |
55410791 | 249 | pr_debug("Node to cpumask map at %p for %d nodes\n", |
cfc1b9a6 | 250 | map, nr_node_ids); |
9f248bde MT |
251 | |
252 | /* node_to_cpumask() will now work */ | |
253 | node_to_cpumask_map = map; | |
254 | } | |
255 | ||
23ca4bba MT |
256 | void __cpuinit numa_set_node(int cpu, int node) |
257 | { | |
258 | int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map); | |
259 | ||
c90aa894 MT |
260 | /* early setting, no percpu area yet */ |
261 | if (cpu_to_node_map) { | |
23ca4bba | 262 | cpu_to_node_map[cpu] = node; |
c90aa894 MT |
263 | return; |
264 | } | |
23ca4bba | 265 | |
c90aa894 MT |
266 | #ifdef CONFIG_DEBUG_PER_CPU_MAPS |
267 | if (cpu >= nr_cpu_ids || !per_cpu_offset(cpu)) { | |
268 | printk(KERN_ERR "numa_set_node: invalid cpu# (%d)\n", cpu); | |
269 | dump_stack(); | |
270 | return; | |
271 | } | |
272 | #endif | |
273 | per_cpu(x86_cpu_to_node_map, cpu) = node; | |
23ca4bba | 274 | |
c90aa894 MT |
275 | if (node != NUMA_NO_NODE) |
276 | cpu_pda(cpu)->nodenumber = node; | |
23ca4bba MT |
277 | } |
278 | ||
279 | void __cpuinit numa_clear_node(int cpu) | |
280 | { | |
281 | numa_set_node(cpu, NUMA_NO_NODE); | |
282 | } | |
283 | ||
9f248bde MT |
284 | #ifndef CONFIG_DEBUG_PER_CPU_MAPS |
285 | ||
23ca4bba MT |
286 | void __cpuinit numa_add_cpu(int cpu) |
287 | { | |
288 | cpu_set(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]); | |
289 | } | |
290 | ||
291 | void __cpuinit numa_remove_cpu(int cpu) | |
292 | { | |
c90aa894 | 293 | cpu_clear(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]); |
23ca4bba | 294 | } |
23ca4bba | 295 | |
9f248bde MT |
296 | #else /* CONFIG_DEBUG_PER_CPU_MAPS */ |
297 | ||
298 | /* | |
299 | * --------- debug versions of the numa functions --------- | |
300 | */ | |
301 | static void __cpuinit numa_set_cpumask(int cpu, int enable) | |
302 | { | |
c90aa894 | 303 | int node = early_cpu_to_node(cpu); |
9f248bde MT |
304 | cpumask_t *mask; |
305 | char buf[64]; | |
306 | ||
307 | if (node_to_cpumask_map == NULL) { | |
308 | printk(KERN_ERR "node_to_cpumask_map NULL\n"); | |
309 | dump_stack(); | |
310 | return; | |
311 | } | |
312 | ||
313 | mask = &node_to_cpumask_map[node]; | |
314 | if (enable) | |
315 | cpu_set(cpu, *mask); | |
316 | else | |
317 | cpu_clear(cpu, *mask); | |
318 | ||
29c0177e | 319 | cpulist_scnprintf(buf, sizeof(buf), mask); |
9f248bde | 320 | printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n", |
8a87dd9a JSR |
321 | enable ? "numa_add_cpu" : "numa_remove_cpu", cpu, node, buf); |
322 | } | |
9f248bde MT |
323 | |
324 | void __cpuinit numa_add_cpu(int cpu) | |
325 | { | |
326 | numa_set_cpumask(cpu, 1); | |
327 | } | |
328 | ||
329 | void __cpuinit numa_remove_cpu(int cpu) | |
330 | { | |
331 | numa_set_cpumask(cpu, 0); | |
332 | } | |
23ca4bba MT |
333 | |
334 | int cpu_to_node(int cpu) | |
335 | { | |
336 | if (early_per_cpu_ptr(x86_cpu_to_node_map)) { | |
337 | printk(KERN_WARNING | |
338 | "cpu_to_node(%d): usage too early!\n", cpu); | |
339 | dump_stack(); | |
340 | return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu]; | |
341 | } | |
342 | return per_cpu(x86_cpu_to_node_map, cpu); | |
343 | } | |
344 | EXPORT_SYMBOL(cpu_to_node); | |
345 | ||
9f248bde MT |
346 | /* |
347 | * Same function as cpu_to_node() but used if called before the | |
348 | * per_cpu areas are setup. | |
349 | */ | |
23ca4bba MT |
350 | int early_cpu_to_node(int cpu) |
351 | { | |
352 | if (early_per_cpu_ptr(x86_cpu_to_node_map)) | |
353 | return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu]; | |
354 | ||
355 | if (!per_cpu_offset(cpu)) { | |
356 | printk(KERN_WARNING | |
357 | "early_cpu_to_node(%d): no per_cpu area!\n", cpu); | |
9f248bde | 358 | dump_stack(); |
23ca4bba MT |
359 | return NUMA_NO_NODE; |
360 | } | |
361 | return per_cpu(x86_cpu_to_node_map, cpu); | |
362 | } | |
9f248bde | 363 | |
6a2f47ca MT |
364 | |
365 | /* empty cpumask */ | |
366 | static const cpumask_t cpu_mask_none; | |
367 | ||
9f248bde MT |
368 | /* |
369 | * Returns a pointer to the bitmask of CPUs on Node 'node'. | |
370 | */ | |
393d68fb | 371 | const cpumask_t *cpumask_of_node(int node) |
9f248bde MT |
372 | { |
373 | if (node_to_cpumask_map == NULL) { | |
374 | printk(KERN_WARNING | |
393d68fb | 375 | "cpumask_of_node(%d): no node_to_cpumask_map!\n", |
9f248bde MT |
376 | node); |
377 | dump_stack(); | |
11369f35 | 378 | return (const cpumask_t *)&cpu_online_map; |
9f248bde | 379 | } |
6a2f47ca MT |
380 | if (node >= nr_node_ids) { |
381 | printk(KERN_WARNING | |
393d68fb | 382 | "cpumask_of_node(%d): node > nr_node_ids(%d)\n", |
6a2f47ca MT |
383 | node, nr_node_ids); |
384 | dump_stack(); | |
11369f35 | 385 | return &cpu_mask_none; |
6a2f47ca | 386 | } |
11369f35 | 387 | return &node_to_cpumask_map[node]; |
9f248bde | 388 | } |
393d68fb | 389 | EXPORT_SYMBOL(cpumask_of_node); |
9f248bde MT |
390 | |
391 | /* | |
392 | * Returns a bitmask of CPUs on Node 'node'. | |
6a2f47ca MT |
393 | * |
394 | * Side note: this function creates the returned cpumask on the stack | |
395 | * so with a high NR_CPUS count, excessive stack space is used. The | |
396 | * node_to_cpumask_ptr function should be used whenever possible. | |
9f248bde MT |
397 | */ |
398 | cpumask_t node_to_cpumask(int node) | |
399 | { | |
400 | if (node_to_cpumask_map == NULL) { | |
401 | printk(KERN_WARNING | |
402 | "node_to_cpumask(%d): no node_to_cpumask_map!\n", node); | |
403 | dump_stack(); | |
404 | return cpu_online_map; | |
405 | } | |
6a2f47ca MT |
406 | if (node >= nr_node_ids) { |
407 | printk(KERN_WARNING | |
408 | "node_to_cpumask(%d): node > nr_node_ids(%d)\n", | |
409 | node, nr_node_ids); | |
410 | dump_stack(); | |
411 | return cpu_mask_none; | |
412 | } | |
9f248bde MT |
413 | return node_to_cpumask_map[node]; |
414 | } | |
415 | EXPORT_SYMBOL(node_to_cpumask); | |
416 | ||
417 | /* | |
418 | * --------- end of debug versions of the numa functions --------- | |
419 | */ | |
420 | ||
421 | #endif /* CONFIG_DEBUG_PER_CPU_MAPS */ | |
422 | ||
423 | #endif /* X86_64_NUMA */ | |
1ecd2765 | 424 |