]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - arch/x86/kernel/setup.c
x86: check command line when CONFIG_X86_MPPARSE is not set, v2
[mirror_ubuntu-bionic-kernel.git] / arch / x86 / kernel / setup.c
1 #include <linux/kernel.h>
2 #include <linux/module.h>
3 #include <linux/init.h>
4 #include <linux/bootmem.h>
5 #include <linux/percpu.h>
6 #include <linux/kexec.h>
7 #include <asm/smp.h>
8 #include <asm/percpu.h>
9 #include <asm/sections.h>
10 #include <asm/processor.h>
11 #include <asm/setup.h>
12 #include <asm/topology.h>
13 #include <asm/mpspec.h>
14 #include <asm/apicdef.h>
15 #include <asm/highmem.h>
16
17 #ifdef CONFIG_X86_LOCAL_APIC
18 unsigned int num_processors;
19 unsigned disabled_cpus __cpuinitdata;
20 /* Processor that is doing the boot up */
21 unsigned int boot_cpu_physical_apicid = -1U;
22 unsigned int max_physical_apicid;
23 EXPORT_SYMBOL(boot_cpu_physical_apicid);
24
25 /* Bitmask of physically existing CPUs */
26 physid_mask_t phys_cpu_present_map;
27 #endif
28
29 /* map cpu index to physical APIC ID */
30 DEFINE_EARLY_PER_CPU(u16, x86_cpu_to_apicid, BAD_APICID);
31 DEFINE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid, BAD_APICID);
32 EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid);
33 EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid);
34
35 #if defined(CONFIG_NUMA) && defined(CONFIG_X86_64)
36 #define X86_64_NUMA 1
37
38 /* map cpu index to node index */
39 DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE);
40 EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map);
41
42 /* which logical CPUs are on which nodes */
43 cpumask_t *node_to_cpumask_map;
44 EXPORT_SYMBOL(node_to_cpumask_map);
45
46 /* setup node_to_cpumask_map */
47 static void __init setup_node_to_cpumask_map(void);
48
49 #else
50 static inline void setup_node_to_cpumask_map(void) { }
51 #endif
52
53 #if defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) && defined(CONFIG_X86_SMP)
54 /*
55 * Copy data used in early init routines from the initial arrays to the
56 * per cpu data areas. These arrays then become expendable and the
57 * *_early_ptr's are zeroed indicating that the static arrays are gone.
58 */
59 static void __init setup_per_cpu_maps(void)
60 {
61 int cpu;
62
63 for_each_possible_cpu(cpu) {
64 per_cpu(x86_cpu_to_apicid, cpu) =
65 early_per_cpu_map(x86_cpu_to_apicid, cpu);
66 per_cpu(x86_bios_cpu_apicid, cpu) =
67 early_per_cpu_map(x86_bios_cpu_apicid, cpu);
68 #ifdef X86_64_NUMA
69 per_cpu(x86_cpu_to_node_map, cpu) =
70 early_per_cpu_map(x86_cpu_to_node_map, cpu);
71 #endif
72 }
73
74 /* indicate the early static arrays will soon be gone */
75 early_per_cpu_ptr(x86_cpu_to_apicid) = NULL;
76 early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL;
77 #ifdef X86_64_NUMA
78 early_per_cpu_ptr(x86_cpu_to_node_map) = NULL;
79 #endif
80 }
81
82 #ifdef CONFIG_HAVE_CPUMASK_OF_CPU_MAP
83 cpumask_t *cpumask_of_cpu_map __read_mostly;
84 EXPORT_SYMBOL(cpumask_of_cpu_map);
85
86 /* requires nr_cpu_ids to be initialized */
87 static void __init setup_cpumask_of_cpu(void)
88 {
89 int i;
90
91 /* alloc_bootmem zeroes memory */
92 cpumask_of_cpu_map = alloc_bootmem_low(sizeof(cpumask_t) * nr_cpu_ids);
93 for (i = 0; i < nr_cpu_ids; i++)
94 cpu_set(i, cpumask_of_cpu_map[i]);
95 }
96 #else
97 static inline void setup_cpumask_of_cpu(void) { }
98 #endif
99
100 #ifdef CONFIG_X86_32
101 /*
102 * Great future not-so-futuristic plan: make i386 and x86_64 do it
103 * the same way
104 */
105 unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
106 EXPORT_SYMBOL(__per_cpu_offset);
107 static inline void setup_cpu_pda_map(void) { }
108
109 #elif !defined(CONFIG_SMP)
110 static inline void setup_cpu_pda_map(void) { }
111
112 #else /* CONFIG_SMP && CONFIG_X86_64 */
113
114 /*
115 * Allocate cpu_pda pointer table and array via alloc_bootmem.
116 */
117 static void __init setup_cpu_pda_map(void)
118 {
119 char *pda;
120 struct x8664_pda **new_cpu_pda;
121 unsigned long size;
122 int cpu;
123
124 size = roundup(sizeof(struct x8664_pda), cache_line_size());
125
126 /* allocate cpu_pda array and pointer table */
127 {
128 unsigned long tsize = nr_cpu_ids * sizeof(void *);
129 unsigned long asize = size * (nr_cpu_ids - 1);
130
131 tsize = roundup(tsize, cache_line_size());
132 new_cpu_pda = alloc_bootmem(tsize + asize);
133 pda = (char *)new_cpu_pda + tsize;
134 }
135
136 /* initialize pointer table to static pda's */
137 for_each_possible_cpu(cpu) {
138 if (cpu == 0) {
139 /* leave boot cpu pda in place */
140 new_cpu_pda[0] = cpu_pda(0);
141 continue;
142 }
143 new_cpu_pda[cpu] = (struct x8664_pda *)pda;
144 new_cpu_pda[cpu]->in_bootmem = 1;
145 pda += size;
146 }
147
148 /* point to new pointer table */
149 _cpu_pda = new_cpu_pda;
150 }
151 #endif
152
153 /*
154 * Great future plan:
155 * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data.
156 * Always point %gs to its beginning
157 */
158 void __init setup_per_cpu_areas(void)
159 {
160 ssize_t size = PERCPU_ENOUGH_ROOM;
161 char *ptr;
162 int cpu;
163
164 /* no processor from mptable or madt */
165 if (!num_processors)
166 num_processors = 1;
167
168 #ifdef CONFIG_HOTPLUG_CPU
169 prefill_possible_map();
170 #else
171 nr_cpu_ids = num_processors;
172 #endif
173
174 /* Setup cpu_pda map */
175 setup_cpu_pda_map();
176
177 /* Copy section for each CPU (we discard the original) */
178 size = PERCPU_ENOUGH_ROOM;
179 printk(KERN_INFO "PERCPU: Allocating %zd bytes of per cpu data\n",
180 size);
181
182 for_each_possible_cpu(cpu) {
183 #ifndef CONFIG_NEED_MULTIPLE_NODES
184 ptr = alloc_bootmem_pages(size);
185 #else
186 int node = early_cpu_to_node(cpu);
187 if (!node_online(node) || !NODE_DATA(node)) {
188 ptr = alloc_bootmem_pages(size);
189 printk(KERN_INFO
190 "cpu %d has no node %d or node-local memory\n",
191 cpu, node);
192 }
193 else
194 ptr = alloc_bootmem_pages_node(NODE_DATA(node), size);
195 #endif
196 per_cpu_offset(cpu) = ptr - __per_cpu_start;
197 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
198
199 }
200
201 printk(KERN_DEBUG "NR_CPUS: %d, nr_cpu_ids: %d, nr_node_ids %d\n",
202 NR_CPUS, nr_cpu_ids, nr_node_ids);
203
204 /* Setup percpu data maps */
205 setup_per_cpu_maps();
206
207 /* Setup node to cpumask map */
208 setup_node_to_cpumask_map();
209
210 /* Setup cpumask_of_cpu map */
211 setup_cpumask_of_cpu();
212 }
213
214 #endif
215
216 void __init parse_setup_data(void)
217 {
218 struct setup_data *data;
219 u64 pa_data;
220
221 if (boot_params.hdr.version < 0x0209)
222 return;
223 pa_data = boot_params.hdr.setup_data;
224 while (pa_data) {
225 data = early_ioremap(pa_data, PAGE_SIZE);
226 switch (data->type) {
227 case SETUP_E820_EXT:
228 parse_e820_ext(data, pa_data);
229 break;
230 default:
231 break;
232 }
233 #ifndef CONFIG_DEBUG_BOOT_PARAMS
234 free_early(pa_data, pa_data+sizeof(*data)+data->len);
235 #endif
236 pa_data = data->next;
237 early_iounmap(data, PAGE_SIZE);
238 }
239 }
240
241 #ifdef X86_64_NUMA
242
243 /*
244 * Allocate node_to_cpumask_map based on number of available nodes
245 * Requires node_possible_map to be valid.
246 *
247 * Note: node_to_cpumask() is not valid until after this is done.
248 */
249 static void __init setup_node_to_cpumask_map(void)
250 {
251 unsigned int node, num = 0;
252 cpumask_t *map;
253
254 /* setup nr_node_ids if not done yet */
255 if (nr_node_ids == MAX_NUMNODES) {
256 for_each_node_mask(node, node_possible_map)
257 num = node;
258 nr_node_ids = num + 1;
259 }
260
261 /* allocate the map */
262 map = alloc_bootmem_low(nr_node_ids * sizeof(cpumask_t));
263
264 Dprintk(KERN_DEBUG "Node to cpumask map at %p for %d nodes\n",
265 map, nr_node_ids);
266
267 /* node_to_cpumask() will now work */
268 node_to_cpumask_map = map;
269 }
270
271 void __cpuinit numa_set_node(int cpu, int node)
272 {
273 int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);
274
275 if (cpu_pda(cpu) && node != NUMA_NO_NODE)
276 cpu_pda(cpu)->nodenumber = node;
277
278 if (cpu_to_node_map)
279 cpu_to_node_map[cpu] = node;
280
281 else if (per_cpu_offset(cpu))
282 per_cpu(x86_cpu_to_node_map, cpu) = node;
283
284 else
285 Dprintk(KERN_INFO "Setting node for non-present cpu %d\n", cpu);
286 }
287
288 void __cpuinit numa_clear_node(int cpu)
289 {
290 numa_set_node(cpu, NUMA_NO_NODE);
291 }
292
293 #ifndef CONFIG_DEBUG_PER_CPU_MAPS
294
295 void __cpuinit numa_add_cpu(int cpu)
296 {
297 cpu_set(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
298 }
299
300 void __cpuinit numa_remove_cpu(int cpu)
301 {
302 cpu_clear(cpu, node_to_cpumask_map[cpu_to_node(cpu)]);
303 }
304
305 #else /* CONFIG_DEBUG_PER_CPU_MAPS */
306
307 /*
308 * --------- debug versions of the numa functions ---------
309 */
310 static void __cpuinit numa_set_cpumask(int cpu, int enable)
311 {
312 int node = cpu_to_node(cpu);
313 cpumask_t *mask;
314 char buf[64];
315
316 if (node_to_cpumask_map == NULL) {
317 printk(KERN_ERR "node_to_cpumask_map NULL\n");
318 dump_stack();
319 return;
320 }
321
322 mask = &node_to_cpumask_map[node];
323 if (enable)
324 cpu_set(cpu, *mask);
325 else
326 cpu_clear(cpu, *mask);
327
328 cpulist_scnprintf(buf, sizeof(buf), *mask);
329 printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n",
330 enable? "numa_add_cpu":"numa_remove_cpu", cpu, node, buf);
331 }
332
333 void __cpuinit numa_add_cpu(int cpu)
334 {
335 numa_set_cpumask(cpu, 1);
336 }
337
338 void __cpuinit numa_remove_cpu(int cpu)
339 {
340 numa_set_cpumask(cpu, 0);
341 }
342
343 int cpu_to_node(int cpu)
344 {
345 if (early_per_cpu_ptr(x86_cpu_to_node_map)) {
346 printk(KERN_WARNING
347 "cpu_to_node(%d): usage too early!\n", cpu);
348 dump_stack();
349 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
350 }
351 return per_cpu(x86_cpu_to_node_map, cpu);
352 }
353 EXPORT_SYMBOL(cpu_to_node);
354
355 /*
356 * Same function as cpu_to_node() but used if called before the
357 * per_cpu areas are setup.
358 */
359 int early_cpu_to_node(int cpu)
360 {
361 if (early_per_cpu_ptr(x86_cpu_to_node_map))
362 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
363
364 if (!per_cpu_offset(cpu)) {
365 printk(KERN_WARNING
366 "early_cpu_to_node(%d): no per_cpu area!\n", cpu);
367 dump_stack();
368 return NUMA_NO_NODE;
369 }
370 return per_cpu(x86_cpu_to_node_map, cpu);
371 }
372
373 /*
374 * Returns a pointer to the bitmask of CPUs on Node 'node'.
375 */
376 cpumask_t *_node_to_cpumask_ptr(int node)
377 {
378 if (node_to_cpumask_map == NULL) {
379 printk(KERN_WARNING
380 "_node_to_cpumask_ptr(%d): no node_to_cpumask_map!\n",
381 node);
382 dump_stack();
383 return &cpu_online_map;
384 }
385 BUG_ON(node >= nr_node_ids);
386 return &node_to_cpumask_map[node];
387 }
388 EXPORT_SYMBOL(_node_to_cpumask_ptr);
389
390 /*
391 * Returns a bitmask of CPUs on Node 'node'.
392 */
393 cpumask_t node_to_cpumask(int node)
394 {
395 if (node_to_cpumask_map == NULL) {
396 printk(KERN_WARNING
397 "node_to_cpumask(%d): no node_to_cpumask_map!\n", node);
398 dump_stack();
399 return cpu_online_map;
400 }
401 BUG_ON(node >= nr_node_ids);
402 return node_to_cpumask_map[node];
403 }
404 EXPORT_SYMBOL(node_to_cpumask);
405
406 /*
407 * --------- end of debug versions of the numa functions ---------
408 */
409
410 #endif /* CONFIG_DEBUG_PER_CPU_MAPS */
411
412 #endif /* X86_64_NUMA */
413
414
415 /*
416 * --------- Crashkernel reservation ------------------------------
417 */
418
419 static inline unsigned long long get_total_mem(void)
420 {
421 unsigned long long total;
422
423 total = max_low_pfn - min_low_pfn;
424 #ifdef CONFIG_HIGHMEM
425 total += highend_pfn - highstart_pfn;
426 #endif
427
428 return total << PAGE_SHIFT;
429 }
430
431 #ifdef CONFIG_KEXEC
432 void __init reserve_crashkernel(void)
433 {
434 unsigned long long total_mem;
435 unsigned long long crash_size, crash_base;
436 int ret;
437
438 total_mem = get_total_mem();
439
440 ret = parse_crashkernel(boot_command_line, total_mem,
441 &crash_size, &crash_base);
442 if (ret == 0 && crash_size > 0) {
443 if (crash_base <= 0) {
444 printk(KERN_INFO "crashkernel reservation failed - "
445 "you have to specify a base address\n");
446 return;
447 }
448
449 if (reserve_bootmem_generic(crash_base, crash_size,
450 BOOTMEM_EXCLUSIVE) < 0) {
451 printk(KERN_INFO "crashkernel reservation failed - "
452 "memory is in use\n");
453 return;
454 }
455
456 printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
457 "for crashkernel (System RAM: %ldMB)\n",
458 (unsigned long)(crash_size >> 20),
459 (unsigned long)(crash_base >> 20),
460 (unsigned long)(total_mem >> 20));
461
462 crashk_res.start = crash_base;
463 crashk_res.end = crash_base + crash_size - 1;
464 insert_resource(&iomem_resource, &crashk_res);
465 }
466 }
467 #else
468 void __init reserve_crashkernel(void)
469 {}
470 #endif
471