]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/powerpc/kernel/setup_64.c
Merge remote-tracking branch 'regulator/fix/max77802' into regulator-linus
[mirror_ubuntu-artful-kernel.git] / arch / powerpc / kernel / setup_64.c
1 /*
2 *
3 * Common boot and setup code.
4 *
5 * Copyright (C) 2001 PPC64 Team, IBM Corp
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13 #define DEBUG
14
15 #include <linux/export.h>
16 #include <linux/string.h>
17 #include <linux/sched.h>
18 #include <linux/init.h>
19 #include <linux/kernel.h>
20 #include <linux/reboot.h>
21 #include <linux/delay.h>
22 #include <linux/initrd.h>
23 #include <linux/seq_file.h>
24 #include <linux/ioport.h>
25 #include <linux/console.h>
26 #include <linux/utsname.h>
27 #include <linux/tty.h>
28 #include <linux/root_dev.h>
29 #include <linux/notifier.h>
30 #include <linux/cpu.h>
31 #include <linux/unistd.h>
32 #include <linux/serial.h>
33 #include <linux/serial_8250.h>
34 #include <linux/bootmem.h>
35 #include <linux/pci.h>
36 #include <linux/lockdep.h>
37 #include <linux/memblock.h>
38 #include <linux/memory.h>
39 #include <linux/nmi.h>
40
41 #include <asm/io.h>
42 #include <asm/kdump.h>
43 #include <asm/prom.h>
44 #include <asm/processor.h>
45 #include <asm/pgtable.h>
46 #include <asm/smp.h>
47 #include <asm/elf.h>
48 #include <asm/machdep.h>
49 #include <asm/paca.h>
50 #include <asm/time.h>
51 #include <asm/cputable.h>
52 #include <asm/dt_cpu_ftrs.h>
53 #include <asm/sections.h>
54 #include <asm/btext.h>
55 #include <asm/nvram.h>
56 #include <asm/setup.h>
57 #include <asm/rtas.h>
58 #include <asm/iommu.h>
59 #include <asm/serial.h>
60 #include <asm/cache.h>
61 #include <asm/page.h>
62 #include <asm/mmu.h>
63 #include <asm/firmware.h>
64 #include <asm/xmon.h>
65 #include <asm/udbg.h>
66 #include <asm/kexec.h>
67 #include <asm/code-patching.h>
68 #include <asm/livepatch.h>
69 #include <asm/opal.h>
70 #include <asm/cputhreads.h>
71
72 #ifdef DEBUG
73 #define DBG(fmt...) udbg_printf(fmt)
74 #else
75 #define DBG(fmt...)
76 #endif
77
78 int spinning_secondaries;
79 u64 ppc64_pft_size;
80
81 struct ppc64_caches ppc64_caches = {
82 .l1d = {
83 .block_size = 0x40,
84 .log_block_size = 6,
85 },
86 .l1i = {
87 .block_size = 0x40,
88 .log_block_size = 6
89 },
90 };
91 EXPORT_SYMBOL_GPL(ppc64_caches);
92
93 #if defined(CONFIG_PPC_BOOK3E) && defined(CONFIG_SMP)
94 void __init setup_tlb_core_data(void)
95 {
96 int cpu;
97
98 BUILD_BUG_ON(offsetof(struct tlb_core_data, lock) != 0);
99
100 for_each_possible_cpu(cpu) {
101 int first = cpu_first_thread_sibling(cpu);
102
103 /*
104 * If we boot via kdump on a non-primary thread,
105 * make sure we point at the thread that actually
106 * set up this TLB.
107 */
108 if (cpu_first_thread_sibling(boot_cpuid) == first)
109 first = boot_cpuid;
110
111 paca[cpu].tcd_ptr = &paca[first].tcd;
112
113 /*
114 * If we have threads, we need either tlbsrx.
115 * or e6500 tablewalk mode, or else TLB handlers
116 * will be racy and could produce duplicate entries.
117 * Should we panic instead?
118 */
119 WARN_ONCE(smt_enabled_at_boot >= 2 &&
120 !mmu_has_feature(MMU_FTR_USE_TLBRSRV) &&
121 book3e_htw_mode != PPC_HTW_E6500,
122 "%s: unsupported MMU configuration\n", __func__);
123 }
124 }
125 #endif
126
127 #ifdef CONFIG_SMP
128
129 static char *smt_enabled_cmdline;
130
131 /* Look for ibm,smt-enabled OF option */
132 void __init check_smt_enabled(void)
133 {
134 struct device_node *dn;
135 const char *smt_option;
136
137 /* Default to enabling all threads */
138 smt_enabled_at_boot = threads_per_core;
139
140 /* Allow the command line to overrule the OF option */
141 if (smt_enabled_cmdline) {
142 if (!strcmp(smt_enabled_cmdline, "on"))
143 smt_enabled_at_boot = threads_per_core;
144 else if (!strcmp(smt_enabled_cmdline, "off"))
145 smt_enabled_at_boot = 0;
146 else {
147 int smt;
148 int rc;
149
150 rc = kstrtoint(smt_enabled_cmdline, 10, &smt);
151 if (!rc)
152 smt_enabled_at_boot =
153 min(threads_per_core, smt);
154 }
155 } else {
156 dn = of_find_node_by_path("/options");
157 if (dn) {
158 smt_option = of_get_property(dn, "ibm,smt-enabled",
159 NULL);
160
161 if (smt_option) {
162 if (!strcmp(smt_option, "on"))
163 smt_enabled_at_boot = threads_per_core;
164 else if (!strcmp(smt_option, "off"))
165 smt_enabled_at_boot = 0;
166 }
167
168 of_node_put(dn);
169 }
170 }
171 }
172
173 /* Look for smt-enabled= cmdline option */
174 static int __init early_smt_enabled(char *p)
175 {
176 smt_enabled_cmdline = p;
177 return 0;
178 }
179 early_param("smt-enabled", early_smt_enabled);
180
181 #endif /* CONFIG_SMP */
182
183 /** Fix up paca fields required for the boot cpu */
184 static void __init fixup_boot_paca(void)
185 {
186 /* The boot cpu is started */
187 get_paca()->cpu_start = 1;
188 /* Allow percpu accesses to work until we setup percpu data */
189 get_paca()->data_offset = 0;
190 }
191
192 static void __init configure_exceptions(void)
193 {
194 /*
195 * Setup the trampolines from the lowmem exception vectors
196 * to the kdump kernel when not using a relocatable kernel.
197 */
198 setup_kdump_trampoline();
199
200 /* Under a PAPR hypervisor, we need hypercalls */
201 if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
202 /* Enable AIL if possible */
203 pseries_enable_reloc_on_exc();
204
205 /*
206 * Tell the hypervisor that we want our exceptions to
207 * be taken in little endian mode.
208 *
209 * We don't call this for big endian as our calling convention
210 * makes us always enter in BE, and the call may fail under
211 * some circumstances with kdump.
212 */
213 #ifdef __LITTLE_ENDIAN__
214 pseries_little_endian_exceptions();
215 #endif
216 } else {
217 /* Set endian mode using OPAL */
218 if (firmware_has_feature(FW_FEATURE_OPAL))
219 opal_configure_cores();
220
221 /* AIL on native is done in cpu_ready_for_interrupts() */
222 }
223 }
224
225 static void cpu_ready_for_interrupts(void)
226 {
227 /*
228 * Enable AIL if supported, and we are in hypervisor mode. This
229 * is called once for every processor.
230 *
231 * If we are not in hypervisor mode the job is done once for
232 * the whole partition in configure_exceptions().
233 */
234 if (cpu_has_feature(CPU_FTR_HVMODE) &&
235 cpu_has_feature(CPU_FTR_ARCH_207S)) {
236 unsigned long lpcr = mfspr(SPRN_LPCR);
237 mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3);
238 }
239
240 /*
241 * Fixup HFSCR:TM based on CPU features. The bit is set by our
242 * early asm init because at that point we haven't updated our
243 * CPU features from firmware and device-tree. Here we have,
244 * so let's do it.
245 */
246 if (cpu_has_feature(CPU_FTR_HVMODE) && !cpu_has_feature(CPU_FTR_TM_COMP))
247 mtspr(SPRN_HFSCR, mfspr(SPRN_HFSCR) & ~HFSCR_TM);
248
249 /* Set IR and DR in PACA MSR */
250 get_paca()->kernel_msr = MSR_KERNEL;
251 }
252
253 /*
254 * Early initialization entry point. This is called by head.S
255 * with MMU translation disabled. We rely on the "feature" of
256 * the CPU that ignores the top 2 bits of the address in real
257 * mode so we can access kernel globals normally provided we
258 * only toy with things in the RMO region. From here, we do
259 * some early parsing of the device-tree to setup out MEMBLOCK
260 * data structures, and allocate & initialize the hash table
261 * and segment tables so we can start running with translation
262 * enabled.
263 *
264 * It is this function which will call the probe() callback of
265 * the various platform types and copy the matching one to the
266 * global ppc_md structure. Your platform can eventually do
267 * some very early initializations from the probe() routine, but
268 * this is not recommended, be very careful as, for example, the
269 * device-tree is not accessible via normal means at this point.
270 */
271
272 void __init early_setup(unsigned long dt_ptr)
273 {
274 static __initdata struct paca_struct boot_paca;
275
276 /* -------- printk is _NOT_ safe to use here ! ------- */
277
278 /* Try new device tree based feature discovery ... */
279 if (!dt_cpu_ftrs_init(__va(dt_ptr)))
280 /* Otherwise use the old style CPU table */
281 identify_cpu(0, mfspr(SPRN_PVR));
282
283 /* Assume we're on cpu 0 for now. Don't write to the paca yet! */
284 initialise_paca(&boot_paca, 0);
285 setup_paca(&boot_paca);
286 fixup_boot_paca();
287
288 /* -------- printk is now safe to use ------- */
289
290 /* Enable early debugging if any specified (see udbg.h) */
291 udbg_early_init();
292
293 DBG(" -> early_setup(), dt_ptr: 0x%lx\n", dt_ptr);
294
295 /*
296 * Do early initialization using the flattened device
297 * tree, such as retrieving the physical memory map or
298 * calculating/retrieving the hash table size.
299 */
300 early_init_devtree(__va(dt_ptr));
301
302 /* Now we know the logical id of our boot cpu, setup the paca. */
303 setup_paca(&paca[boot_cpuid]);
304 fixup_boot_paca();
305
306 /*
307 * Configure exception handlers. This include setting up trampolines
308 * if needed, setting exception endian mode, etc...
309 */
310 configure_exceptions();
311
312 /* Apply all the dynamic patching */
313 apply_feature_fixups();
314 setup_feature_keys();
315
316 /* Initialize the hash table or TLB handling */
317 early_init_mmu();
318
319 /*
320 * At this point, we can let interrupts switch to virtual mode
321 * (the MMU has been setup), so adjust the MSR in the PACA to
322 * have IR and DR set and enable AIL if it exists
323 */
324 cpu_ready_for_interrupts();
325
326 DBG(" <- early_setup()\n");
327
328 #ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX
329 /*
330 * This needs to be done *last* (after the above DBG() even)
331 *
332 * Right after we return from this function, we turn on the MMU
333 * which means the real-mode access trick that btext does will
334 * no longer work, it needs to switch to using a real MMU
335 * mapping. This call will ensure that it does
336 */
337 btext_map();
338 #endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */
339 }
340
341 #ifdef CONFIG_SMP
342 void early_setup_secondary(void)
343 {
344 /* Mark interrupts disabled in PACA */
345 get_paca()->soft_enabled = 0;
346
347 /* Initialize the hash table or TLB handling */
348 early_init_mmu_secondary();
349
350 /*
351 * At this point, we can let interrupts switch to virtual mode
352 * (the MMU has been setup), so adjust the MSR in the PACA to
353 * have IR and DR set.
354 */
355 cpu_ready_for_interrupts();
356 }
357
358 #endif /* CONFIG_SMP */
359
360 #if defined(CONFIG_SMP) || defined(CONFIG_KEXEC_CORE)
361 static bool use_spinloop(void)
362 {
363 if (!IS_ENABLED(CONFIG_PPC_BOOK3E))
364 return true;
365
366 /*
367 * When book3e boots from kexec, the ePAPR spin table does
368 * not get used.
369 */
370 return of_property_read_bool(of_chosen, "linux,booted-from-kexec");
371 }
372
373 void smp_release_cpus(void)
374 {
375 unsigned long *ptr;
376 int i;
377
378 if (!use_spinloop())
379 return;
380
381 DBG(" -> smp_release_cpus()\n");
382
383 /* All secondary cpus are spinning on a common spinloop, release them
384 * all now so they can start to spin on their individual paca
385 * spinloops. For non SMP kernels, the secondary cpus never get out
386 * of the common spinloop.
387 */
388
389 ptr = (unsigned long *)((unsigned long)&__secondary_hold_spinloop
390 - PHYSICAL_START);
391 *ptr = ppc_function_entry(generic_secondary_smp_init);
392
393 /* And wait a bit for them to catch up */
394 for (i = 0; i < 100000; i++) {
395 mb();
396 HMT_low();
397 if (spinning_secondaries == 0)
398 break;
399 udelay(1);
400 }
401 DBG("spinning_secondaries = %d\n", spinning_secondaries);
402
403 DBG(" <- smp_release_cpus()\n");
404 }
405 #endif /* CONFIG_SMP || CONFIG_KEXEC_CORE */
406
407 /*
408 * Initialize some remaining members of the ppc64_caches and systemcfg
409 * structures
410 * (at least until we get rid of them completely). This is mostly some
411 * cache informations about the CPU that will be used by cache flush
412 * routines and/or provided to userland
413 */
414
415 static void init_cache_info(struct ppc_cache_info *info, u32 size, u32 lsize,
416 u32 bsize, u32 sets)
417 {
418 info->size = size;
419 info->sets = sets;
420 info->line_size = lsize;
421 info->block_size = bsize;
422 info->log_block_size = __ilog2(bsize);
423 if (bsize)
424 info->blocks_per_page = PAGE_SIZE / bsize;
425 else
426 info->blocks_per_page = 0;
427
428 if (sets == 0)
429 info->assoc = 0xffff;
430 else
431 info->assoc = size / (sets * lsize);
432 }
433
434 static bool __init parse_cache_info(struct device_node *np,
435 bool icache,
436 struct ppc_cache_info *info)
437 {
438 static const char *ipropnames[] __initdata = {
439 "i-cache-size",
440 "i-cache-sets",
441 "i-cache-block-size",
442 "i-cache-line-size",
443 };
444 static const char *dpropnames[] __initdata = {
445 "d-cache-size",
446 "d-cache-sets",
447 "d-cache-block-size",
448 "d-cache-line-size",
449 };
450 const char **propnames = icache ? ipropnames : dpropnames;
451 const __be32 *sizep, *lsizep, *bsizep, *setsp;
452 u32 size, lsize, bsize, sets;
453 bool success = true;
454
455 size = 0;
456 sets = -1u;
457 lsize = bsize = cur_cpu_spec->dcache_bsize;
458 sizep = of_get_property(np, propnames[0], NULL);
459 if (sizep != NULL)
460 size = be32_to_cpu(*sizep);
461 setsp = of_get_property(np, propnames[1], NULL);
462 if (setsp != NULL)
463 sets = be32_to_cpu(*setsp);
464 bsizep = of_get_property(np, propnames[2], NULL);
465 lsizep = of_get_property(np, propnames[3], NULL);
466 if (bsizep == NULL)
467 bsizep = lsizep;
468 if (lsizep != NULL)
469 lsize = be32_to_cpu(*lsizep);
470 if (bsizep != NULL)
471 bsize = be32_to_cpu(*bsizep);
472 if (sizep == NULL || bsizep == NULL || lsizep == NULL)
473 success = false;
474
475 /*
476 * OF is weird .. it represents fully associative caches
477 * as "1 way" which doesn't make much sense and doesn't
478 * leave room for direct mapped. We'll assume that 0
479 * in OF means direct mapped for that reason.
480 */
481 if (sets == 1)
482 sets = 0;
483 else if (sets == 0)
484 sets = 1;
485
486 init_cache_info(info, size, lsize, bsize, sets);
487
488 return success;
489 }
490
491 void __init initialize_cache_info(void)
492 {
493 struct device_node *cpu = NULL, *l2, *l3 = NULL;
494 u32 pvr;
495
496 DBG(" -> initialize_cache_info()\n");
497
498 /*
499 * All shipping POWER8 machines have a firmware bug that
500 * puts incorrect information in the device-tree. This will
501 * be (hopefully) fixed for future chips but for now hard
502 * code the values if we are running on one of these
503 */
504 pvr = PVR_VER(mfspr(SPRN_PVR));
505 if (pvr == PVR_POWER8 || pvr == PVR_POWER8E ||
506 pvr == PVR_POWER8NVL) {
507 /* size lsize blk sets */
508 init_cache_info(&ppc64_caches.l1i, 0x8000, 128, 128, 32);
509 init_cache_info(&ppc64_caches.l1d, 0x10000, 128, 128, 64);
510 init_cache_info(&ppc64_caches.l2, 0x80000, 128, 0, 512);
511 init_cache_info(&ppc64_caches.l3, 0x800000, 128, 0, 8192);
512 } else
513 cpu = of_find_node_by_type(NULL, "cpu");
514
515 /*
516 * We're assuming *all* of the CPUs have the same
517 * d-cache and i-cache sizes... -Peter
518 */
519 if (cpu) {
520 if (!parse_cache_info(cpu, false, &ppc64_caches.l1d))
521 DBG("Argh, can't find dcache properties !\n");
522
523 if (!parse_cache_info(cpu, true, &ppc64_caches.l1i))
524 DBG("Argh, can't find icache properties !\n");
525
526 /*
527 * Try to find the L2 and L3 if any. Assume they are
528 * unified and use the D-side properties.
529 */
530 l2 = of_find_next_cache_node(cpu);
531 of_node_put(cpu);
532 if (l2) {
533 parse_cache_info(l2, false, &ppc64_caches.l2);
534 l3 = of_find_next_cache_node(l2);
535 of_node_put(l2);
536 }
537 if (l3) {
538 parse_cache_info(l3, false, &ppc64_caches.l3);
539 of_node_put(l3);
540 }
541 }
542
543 /* For use by binfmt_elf */
544 dcache_bsize = ppc64_caches.l1d.block_size;
545 icache_bsize = ppc64_caches.l1i.block_size;
546
547 cur_cpu_spec->dcache_bsize = dcache_bsize;
548 cur_cpu_spec->icache_bsize = icache_bsize;
549
550 DBG(" <- initialize_cache_info()\n");
551 }
552
553 /* This returns the limit below which memory accesses to the linear
554 * mapping are guarnateed not to cause a TLB or SLB miss. This is
555 * used to allocate interrupt or emergency stacks for which our
556 * exception entry path doesn't deal with being interrupted.
557 */
558 static __init u64 safe_stack_limit(void)
559 {
560 #ifdef CONFIG_PPC_BOOK3E
561 /* Freescale BookE bolts the entire linear mapping */
562 if (mmu_has_feature(MMU_FTR_TYPE_FSL_E))
563 return linear_map_top;
564 /* Other BookE, we assume the first GB is bolted */
565 return 1ul << 30;
566 #else
567 /* BookS, the first segment is bolted */
568 if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
569 return 1UL << SID_SHIFT_1T;
570 return 1UL << SID_SHIFT;
571 #endif
572 }
573
574 void __init irqstack_early_init(void)
575 {
576 u64 limit = safe_stack_limit();
577 unsigned int i;
578
579 /*
580 * Interrupt stacks must be in the first segment since we
581 * cannot afford to take SLB misses on them.
582 */
583 for_each_possible_cpu(i) {
584 softirq_ctx[i] = (struct thread_info *)
585 __va(memblock_alloc_base(THREAD_SIZE,
586 THREAD_SIZE, limit));
587 hardirq_ctx[i] = (struct thread_info *)
588 __va(memblock_alloc_base(THREAD_SIZE,
589 THREAD_SIZE, limit));
590 }
591 }
592
593 #ifdef CONFIG_PPC_BOOK3E
594 void __init exc_lvl_early_init(void)
595 {
596 unsigned int i;
597 unsigned long sp;
598
599 for_each_possible_cpu(i) {
600 sp = memblock_alloc(THREAD_SIZE, THREAD_SIZE);
601 critirq_ctx[i] = (struct thread_info *)__va(sp);
602 paca[i].crit_kstack = __va(sp + THREAD_SIZE);
603
604 sp = memblock_alloc(THREAD_SIZE, THREAD_SIZE);
605 dbgirq_ctx[i] = (struct thread_info *)__va(sp);
606 paca[i].dbg_kstack = __va(sp + THREAD_SIZE);
607
608 sp = memblock_alloc(THREAD_SIZE, THREAD_SIZE);
609 mcheckirq_ctx[i] = (struct thread_info *)__va(sp);
610 paca[i].mc_kstack = __va(sp + THREAD_SIZE);
611 }
612
613 if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC))
614 patch_exception(0x040, exc_debug_debug_book3e);
615 }
616 #endif
617
618 /*
619 * Emergency stacks are used for a range of things, from asynchronous
620 * NMIs (system reset, machine check) to synchronous, process context.
621 * We set preempt_count to zero, even though that isn't necessarily correct. To
622 * get the right value we'd need to copy it from the previous thread_info, but
623 * doing that might fault causing more problems.
624 * TODO: what to do with accounting?
625 */
626 static void emerg_stack_init_thread_info(struct thread_info *ti, int cpu)
627 {
628 ti->task = NULL;
629 ti->cpu = cpu;
630 ti->preempt_count = 0;
631 ti->local_flags = 0;
632 ti->flags = 0;
633 klp_init_thread_info(ti);
634 }
635
636 /*
637 * Stack space used when we detect a bad kernel stack pointer, and
638 * early in SMP boots before relocation is enabled. Exclusive emergency
639 * stack for machine checks.
640 */
641 void __init emergency_stack_init(void)
642 {
643 u64 limit;
644 unsigned int i;
645
646 /*
647 * Emergency stacks must be under 256MB, we cannot afford to take
648 * SLB misses on them. The ABI also requires them to be 128-byte
649 * aligned.
650 *
651 * Since we use these as temporary stacks during secondary CPU
652 * bringup, we need to get at them in real mode. This means they
653 * must also be within the RMO region.
654 *
655 * The IRQ stacks allocated elsewhere in this file are zeroed and
656 * initialized in kernel/irq.c. These are initialized here in order
657 * to have emergency stacks available as early as possible.
658 */
659 limit = min(safe_stack_limit(), ppc64_rma_size);
660
661 for_each_possible_cpu(i) {
662 struct thread_info *ti;
663 ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit));
664 memset(ti, 0, THREAD_SIZE);
665 emerg_stack_init_thread_info(ti, i);
666 paca[i].emergency_sp = (void *)ti + THREAD_SIZE;
667
668 #ifdef CONFIG_PPC_BOOK3S_64
669 /* emergency stack for NMI exception handling. */
670 ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit));
671 memset(ti, 0, THREAD_SIZE);
672 emerg_stack_init_thread_info(ti, i);
673 paca[i].nmi_emergency_sp = (void *)ti + THREAD_SIZE;
674
675 /* emergency stack for machine check exception handling. */
676 ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit));
677 memset(ti, 0, THREAD_SIZE);
678 emerg_stack_init_thread_info(ti, i);
679 paca[i].mc_emergency_sp = (void *)ti + THREAD_SIZE;
680 #endif
681 }
682 }
683
684 #ifdef CONFIG_SMP
685 #define PCPU_DYN_SIZE ()
686
687 static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
688 {
689 return __alloc_bootmem_node(NODE_DATA(early_cpu_to_node(cpu)), size, align,
690 __pa(MAX_DMA_ADDRESS));
691 }
692
693 static void __init pcpu_fc_free(void *ptr, size_t size)
694 {
695 free_bootmem(__pa(ptr), size);
696 }
697
698 static int pcpu_cpu_distance(unsigned int from, unsigned int to)
699 {
700 if (early_cpu_to_node(from) == early_cpu_to_node(to))
701 return LOCAL_DISTANCE;
702 else
703 return REMOTE_DISTANCE;
704 }
705
706 unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
707 EXPORT_SYMBOL(__per_cpu_offset);
708
709 void __init setup_per_cpu_areas(void)
710 {
711 const size_t dyn_size = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE;
712 size_t atom_size;
713 unsigned long delta;
714 unsigned int cpu;
715 int rc;
716
717 /*
718 * Linear mapping is one of 4K, 1M and 16M. For 4K, no need
719 * to group units. For larger mappings, use 1M atom which
720 * should be large enough to contain a number of units.
721 */
722 if (mmu_linear_psize == MMU_PAGE_4K)
723 atom_size = PAGE_SIZE;
724 else
725 atom_size = 1 << 20;
726
727 rc = pcpu_embed_first_chunk(0, dyn_size, atom_size, pcpu_cpu_distance,
728 pcpu_fc_alloc, pcpu_fc_free);
729 if (rc < 0)
730 panic("cannot initialize percpu area (err=%d)", rc);
731
732 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
733 for_each_possible_cpu(cpu) {
734 __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
735 paca[cpu].data_offset = __per_cpu_offset[cpu];
736 }
737 }
738 #endif
739
740 #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
741 unsigned long memory_block_size_bytes(void)
742 {
743 if (ppc_md.memory_block_size)
744 return ppc_md.memory_block_size();
745
746 return MIN_MEMORY_BLOCK_SIZE;
747 }
748 #endif
749
750 #if defined(CONFIG_PPC_INDIRECT_PIO) || defined(CONFIG_PPC_INDIRECT_MMIO)
751 struct ppc_pci_io ppc_pci_io;
752 EXPORT_SYMBOL(ppc_pci_io);
753 #endif
754
755 #ifdef CONFIG_HARDLOCKUP_DETECTOR
756 u64 hw_nmi_get_sample_period(int watchdog_thresh)
757 {
758 return ppc_proc_freq * watchdog_thresh;
759 }
760
761 /*
762 * The hardlockup detector breaks PMU event based branches and is likely
763 * to get false positives in KVM guests, so disable it by default.
764 */
765 static int __init disable_hardlockup_detector(void)
766 {
767 hardlockup_detector_disable();
768
769 return 0;
770 }
771 early_initcall(disable_hardlockup_detector);
772 #endif