]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/x86/kernel/setup_percpu.c
x86: smp.h move cpu_callin_mask and cpu_callin_map declartion to cpumask.h
[mirror_ubuntu-artful-kernel.git] / arch / x86 / kernel / setup_percpu.c
1 #include <linux/kernel.h>
2 #include <linux/module.h>
3 #include <linux/init.h>
4 #include <linux/bootmem.h>
5 #include <linux/percpu.h>
6 #include <linux/kexec.h>
7 #include <linux/crash_dump.h>
8 #include <linux/smp.h>
9 #include <linux/topology.h>
10 #include <asm/sections.h>
11 #include <asm/processor.h>
12 #include <asm/setup.h>
13 #include <asm/mpspec.h>
14 #include <asm/apicdef.h>
15 #include <asm/highmem.h>
16 #include <asm/cpumask.h>
17
18 #ifdef CONFIG_X86_LOCAL_APIC
19 unsigned int num_processors;
20 unsigned disabled_cpus __cpuinitdata;
21 /* Processor that is doing the boot up */
22 unsigned int boot_cpu_physical_apicid = -1U;
23 EXPORT_SYMBOL(boot_cpu_physical_apicid);
24 unsigned int max_physical_apicid;
25
26 /* Bitmask of physically existing CPUs */
27 physid_mask_t phys_cpu_present_map;
28 #endif
29
30 /* map cpu index to physical APIC ID */
31 DEFINE_EARLY_PER_CPU(u16, x86_cpu_to_apicid, BAD_APICID);
32 DEFINE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid, BAD_APICID);
33 EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid);
34 EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid);
35
36 #if defined(CONFIG_NUMA) && defined(CONFIG_X86_64)
37 #define X86_64_NUMA 1
38
39 /* map cpu index to node index */
40 DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE);
41 EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map);
42
43 /* which logical CPUs are on which nodes */
44 cpumask_t *node_to_cpumask_map;
45 EXPORT_SYMBOL(node_to_cpumask_map);
46
47 /* setup node_to_cpumask_map */
48 static void __init setup_node_to_cpumask_map(void);
49
50 #else
51 static inline void setup_node_to_cpumask_map(void) { }
52 #endif
53
54 #if defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) && defined(CONFIG_X86_SMP)
55 /*
56 * Copy data used in early init routines from the initial arrays to the
57 * per cpu data areas. These arrays then become expendable and the
58 * *_early_ptr's are zeroed indicating that the static arrays are gone.
59 */
60 static void __init setup_per_cpu_maps(void)
61 {
62 int cpu;
63
64 for_each_possible_cpu(cpu) {
65 per_cpu(x86_cpu_to_apicid, cpu) =
66 early_per_cpu_map(x86_cpu_to_apicid, cpu);
67 per_cpu(x86_bios_cpu_apicid, cpu) =
68 early_per_cpu_map(x86_bios_cpu_apicid, cpu);
69 #ifdef X86_64_NUMA
70 per_cpu(x86_cpu_to_node_map, cpu) =
71 early_per_cpu_map(x86_cpu_to_node_map, cpu);
72 #endif
73 }
74
75 /* indicate the early static arrays will soon be gone */
76 early_per_cpu_ptr(x86_cpu_to_apicid) = NULL;
77 early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL;
78 #ifdef X86_64_NUMA
79 early_per_cpu_ptr(x86_cpu_to_node_map) = NULL;
80 #endif
81 }
82
83 #ifdef CONFIG_X86_32
84 /*
85 * Great future not-so-futuristic plan: make i386 and x86_64 do it
86 * the same way
87 */
88 unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
89 EXPORT_SYMBOL(__per_cpu_offset);
90 static inline void setup_cpu_pda_map(void) { }
91
92 #elif !defined(CONFIG_SMP)
93 static inline void setup_cpu_pda_map(void) { }
94
95 #else /* CONFIG_SMP && CONFIG_X86_64 */
96
97 /*
98 * Allocate cpu_pda pointer table and array via alloc_bootmem.
99 */
100 static void __init setup_cpu_pda_map(void)
101 {
102 char *pda;
103 struct x8664_pda **new_cpu_pda;
104 unsigned long size;
105 int cpu;
106
107 size = roundup(sizeof(struct x8664_pda), cache_line_size());
108
109 /* allocate cpu_pda array and pointer table */
110 {
111 unsigned long tsize = nr_cpu_ids * sizeof(void *);
112 unsigned long asize = size * (nr_cpu_ids - 1);
113
114 tsize = roundup(tsize, cache_line_size());
115 new_cpu_pda = alloc_bootmem(tsize + asize);
116 pda = (char *)new_cpu_pda + tsize;
117 }
118
119 /* initialize pointer table to static pda's */
120 for_each_possible_cpu(cpu) {
121 if (cpu == 0) {
122 /* leave boot cpu pda in place */
123 new_cpu_pda[0] = cpu_pda(0);
124 continue;
125 }
126 new_cpu_pda[cpu] = (struct x8664_pda *)pda;
127 new_cpu_pda[cpu]->in_bootmem = 1;
128 pda += size;
129 }
130
131 /* point to new pointer table */
132 _cpu_pda = new_cpu_pda;
133 }
134
135 #endif /* CONFIG_SMP && CONFIG_X86_64 */
136
137 #ifdef CONFIG_X86_64
138
139 /* correctly size the local cpu masks */
140 static void setup_cpu_local_masks(void)
141 {
142 alloc_bootmem_cpumask_var(&cpu_initialized_mask);
143 alloc_bootmem_cpumask_var(&cpu_callin_mask);
144 alloc_bootmem_cpumask_var(&cpu_callout_mask);
145 alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask);
146 }
147
148 #else /* CONFIG_X86_32 */
149
150 static inline void setup_cpu_local_masks(void)
151 {
152 }
153
154 #endif /* CONFIG_X86_32 */
155
156 /*
157 * Great future plan:
158 * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data.
159 * Always point %gs to its beginning
160 */
161 void __init setup_per_cpu_areas(void)
162 {
163 ssize_t size, old_size;
164 char *ptr;
165 int cpu;
166 unsigned long align = 1;
167
168 /* Setup cpu_pda map */
169 setup_cpu_pda_map();
170
171 /* Copy section for each CPU (we discard the original) */
172 old_size = PERCPU_ENOUGH_ROOM;
173 align = max_t(unsigned long, PAGE_SIZE, align);
174 size = roundup(old_size, align);
175
176 pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n",
177 NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids);
178
179 pr_info("PERCPU: Allocating %zd bytes of per cpu data\n", size);
180
181 for_each_possible_cpu(cpu) {
182 #ifndef CONFIG_NEED_MULTIPLE_NODES
183 ptr = __alloc_bootmem(size, align,
184 __pa(MAX_DMA_ADDRESS));
185 #else
186 int node = early_cpu_to_node(cpu);
187 if (!node_online(node) || !NODE_DATA(node)) {
188 ptr = __alloc_bootmem(size, align,
189 __pa(MAX_DMA_ADDRESS));
190 pr_info("cpu %d has no node %d or node-local memory\n",
191 cpu, node);
192 pr_debug("per cpu data for cpu%d at %016lx\n",
193 cpu, __pa(ptr));
194 } else {
195 ptr = __alloc_bootmem_node(NODE_DATA(node), size, align,
196 __pa(MAX_DMA_ADDRESS));
197 pr_debug("per cpu data for cpu%d on node%d at %016lx\n",
198 cpu, node, __pa(ptr));
199 }
200 #endif
201 per_cpu_offset(cpu) = ptr - __per_cpu_start;
202 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
203 }
204
205 /* Setup percpu data maps */
206 setup_per_cpu_maps();
207
208 /* Setup node to cpumask map */
209 setup_node_to_cpumask_map();
210
211 /* Setup cpu initialized, callin, callout masks */
212 setup_cpu_local_masks();
213 }
214
215 #endif
216
217 #ifdef X86_64_NUMA
218
219 /*
220 * Allocate node_to_cpumask_map based on number of available nodes
221 * Requires node_possible_map to be valid.
222 *
223 * Note: node_to_cpumask() is not valid until after this is done.
224 */
225 static void __init setup_node_to_cpumask_map(void)
226 {
227 unsigned int node, num = 0;
228 cpumask_t *map;
229
230 /* setup nr_node_ids if not done yet */
231 if (nr_node_ids == MAX_NUMNODES) {
232 for_each_node_mask(node, node_possible_map)
233 num = node;
234 nr_node_ids = num + 1;
235 }
236
237 /* allocate the map */
238 map = alloc_bootmem_low(nr_node_ids * sizeof(cpumask_t));
239
240 pr_debug("Node to cpumask map at %p for %d nodes\n",
241 map, nr_node_ids);
242
243 /* node_to_cpumask() will now work */
244 node_to_cpumask_map = map;
245 }
246
247 void __cpuinit numa_set_node(int cpu, int node)
248 {
249 int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);
250
251 if (cpu_pda(cpu) && node != NUMA_NO_NODE)
252 cpu_pda(cpu)->nodenumber = node;
253
254 if (cpu_to_node_map)
255 cpu_to_node_map[cpu] = node;
256
257 else if (per_cpu_offset(cpu))
258 per_cpu(x86_cpu_to_node_map, cpu) = node;
259
260 else
261 pr_debug("Setting node for non-present cpu %d\n", cpu);
262 }
263
264 void __cpuinit numa_clear_node(int cpu)
265 {
266 numa_set_node(cpu, NUMA_NO_NODE);
267 }
268
269 #ifndef CONFIG_DEBUG_PER_CPU_MAPS
270
271 void __cpuinit numa_add_cpu(int cpu)
272 {
273 cpu_set(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
274 }
275
276 void __cpuinit numa_remove_cpu(int cpu)
277 {
278 cpu_clear(cpu, node_to_cpumask_map[cpu_to_node(cpu)]);
279 }
280
281 #else /* CONFIG_DEBUG_PER_CPU_MAPS */
282
283 /*
284 * --------- debug versions of the numa functions ---------
285 */
286 static void __cpuinit numa_set_cpumask(int cpu, int enable)
287 {
288 int node = cpu_to_node(cpu);
289 cpumask_t *mask;
290 char buf[64];
291
292 if (node_to_cpumask_map == NULL) {
293 printk(KERN_ERR "node_to_cpumask_map NULL\n");
294 dump_stack();
295 return;
296 }
297
298 mask = &node_to_cpumask_map[node];
299 if (enable)
300 cpu_set(cpu, *mask);
301 else
302 cpu_clear(cpu, *mask);
303
304 cpulist_scnprintf(buf, sizeof(buf), mask);
305 printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n",
306 enable ? "numa_add_cpu" : "numa_remove_cpu", cpu, node, buf);
307 }
308
309 void __cpuinit numa_add_cpu(int cpu)
310 {
311 numa_set_cpumask(cpu, 1);
312 }
313
314 void __cpuinit numa_remove_cpu(int cpu)
315 {
316 numa_set_cpumask(cpu, 0);
317 }
318
319 int cpu_to_node(int cpu)
320 {
321 if (early_per_cpu_ptr(x86_cpu_to_node_map)) {
322 printk(KERN_WARNING
323 "cpu_to_node(%d): usage too early!\n", cpu);
324 dump_stack();
325 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
326 }
327 return per_cpu(x86_cpu_to_node_map, cpu);
328 }
329 EXPORT_SYMBOL(cpu_to_node);
330
331 /*
332 * Same function as cpu_to_node() but used if called before the
333 * per_cpu areas are setup.
334 */
335 int early_cpu_to_node(int cpu)
336 {
337 if (early_per_cpu_ptr(x86_cpu_to_node_map))
338 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
339
340 if (!per_cpu_offset(cpu)) {
341 printk(KERN_WARNING
342 "early_cpu_to_node(%d): no per_cpu area!\n", cpu);
343 dump_stack();
344 return NUMA_NO_NODE;
345 }
346 return per_cpu(x86_cpu_to_node_map, cpu);
347 }
348
349
350 /* empty cpumask */
351 static const cpumask_t cpu_mask_none;
352
353 /*
354 * Returns a pointer to the bitmask of CPUs on Node 'node'.
355 */
356 const cpumask_t *cpumask_of_node(int node)
357 {
358 if (node_to_cpumask_map == NULL) {
359 printk(KERN_WARNING
360 "cpumask_of_node(%d): no node_to_cpumask_map!\n",
361 node);
362 dump_stack();
363 return (const cpumask_t *)&cpu_online_map;
364 }
365 if (node >= nr_node_ids) {
366 printk(KERN_WARNING
367 "cpumask_of_node(%d): node > nr_node_ids(%d)\n",
368 node, nr_node_ids);
369 dump_stack();
370 return &cpu_mask_none;
371 }
372 return &node_to_cpumask_map[node];
373 }
374 EXPORT_SYMBOL(cpumask_of_node);
375
376 /*
377 * Returns a bitmask of CPUs on Node 'node'.
378 *
379 * Side note: this function creates the returned cpumask on the stack
380 * so with a high NR_CPUS count, excessive stack space is used. The
381 * node_to_cpumask_ptr function should be used whenever possible.
382 */
383 cpumask_t node_to_cpumask(int node)
384 {
385 if (node_to_cpumask_map == NULL) {
386 printk(KERN_WARNING
387 "node_to_cpumask(%d): no node_to_cpumask_map!\n", node);
388 dump_stack();
389 return cpu_online_map;
390 }
391 if (node >= nr_node_ids) {
392 printk(KERN_WARNING
393 "node_to_cpumask(%d): node > nr_node_ids(%d)\n",
394 node, nr_node_ids);
395 dump_stack();
396 return cpu_mask_none;
397 }
398 return node_to_cpumask_map[node];
399 }
400 EXPORT_SYMBOL(node_to_cpumask);
401
402 /*
403 * --------- end of debug versions of the numa functions ---------
404 */
405
406 #endif /* CONFIG_DEBUG_PER_CPU_MAPS */
407
408 #endif /* X86_64_NUMA */
409