]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/powerpc/kernel/dt_cpu_ftrs.c
Merge remote-tracking branch 'regulator/fix/max77802' into regulator-linus
[mirror_ubuntu-artful-kernel.git] / arch / powerpc / kernel / dt_cpu_ftrs.c
CommitLineData
5a61ef74
NP
1/*
2 * Copyright 2017, Nicholas Piggin, IBM Corporation
3 * Licensed under GPLv2.
4 */
5
6#define pr_fmt(fmt) "dt-cpu-ftrs: " fmt
7
8#include <linux/export.h>
9#include <linux/init.h>
10#include <linux/jump_label.h>
a2b05b7a 11#include <linux/libfdt.h>
5a61ef74
NP
12#include <linux/memblock.h>
13#include <linux/printk.h>
14#include <linux/sched.h>
15#include <linux/string.h>
16#include <linux/threads.h>
17
18#include <asm/cputable.h>
19#include <asm/dt_cpu_ftrs.h>
20#include <asm/mmu.h>
21#include <asm/oprofile_impl.h>
22#include <asm/prom.h>
23#include <asm/setup.h>
24
25
26/* Device-tree visible constants follow */
27#define ISA_V2_07B 2070
28#define ISA_V3_0B 3000
29
30#define USABLE_PR (1U << 0)
31#define USABLE_OS (1U << 1)
32#define USABLE_HV (1U << 2)
33
34#define HV_SUPPORT_HFSCR (1U << 0)
35#define OS_SUPPORT_FSCR (1U << 0)
36
37/* For parsing, we define all bits set as "NONE" case */
38#define HV_SUPPORT_NONE 0xffffffffU
39#define OS_SUPPORT_NONE 0xffffffffU
40
41struct dt_cpu_feature {
42 const char *name;
43 uint32_t isa;
44 uint32_t usable_privilege;
45 uint32_t hv_support;
46 uint32_t os_support;
47 uint32_t hfscr_bit_nr;
48 uint32_t fscr_bit_nr;
49 uint32_t hwcap_bit_nr;
50 /* fdt parsing */
51 unsigned long node;
52 int enabled;
53 int disabled;
54};
55
56#define CPU_FTRS_BASE \
57 (CPU_FTR_USE_TB | \
58 CPU_FTR_LWSYNC | \
59 CPU_FTR_FPU_UNAVAILABLE |\
60 CPU_FTR_NODSISRALIGN |\
61 CPU_FTR_NOEXECUTE |\
62 CPU_FTR_COHERENT_ICACHE | \
63 CPU_FTR_STCX_CHECKS_ADDRESS |\
64 CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
65 CPU_FTR_DAWR | \
66 CPU_FTR_ARCH_206 |\
67 CPU_FTR_ARCH_207S)
68
69#define MMU_FTRS_HASH_BASE (MMU_FTRS_POWER8)
70
71#define COMMON_USER_BASE (PPC_FEATURE_32 | PPC_FEATURE_64 | \
72 PPC_FEATURE_ARCH_2_06 |\
73 PPC_FEATURE_ICACHE_SNOOP)
74#define COMMON_USER2_BASE (PPC_FEATURE2_ARCH_2_07 | \
75 PPC_FEATURE2_ISEL)
76/*
77 * Set up the base CPU
78 */
79
80extern void __flush_tlb_power8(unsigned int action);
81extern void __flush_tlb_power9(unsigned int action);
82extern long __machine_check_early_realmode_p8(struct pt_regs *regs);
83extern long __machine_check_early_realmode_p9(struct pt_regs *regs);
84
85static int hv_mode;
86
87static struct {
88 u64 lpcr;
89 u64 hfscr;
90 u64 fscr;
91} system_registers;
92
93static void (*init_pmu_registers)(void);
94
95static void cpufeatures_flush_tlb(void)
96{
97 unsigned long rb;
98 unsigned int i, num_sets;
99
100 /*
101 * This is a temporary measure to keep equivalent TLB flush as the
102 * cputable based setup code.
103 */
104 switch (PVR_VER(mfspr(SPRN_PVR))) {
105 case PVR_POWER8:
106 case PVR_POWER8E:
107 case PVR_POWER8NVL:
108 num_sets = POWER8_TLB_SETS;
109 break;
110 case PVR_POWER9:
111 num_sets = POWER9_TLB_SETS_HASH;
112 break;
113 default:
114 num_sets = 1;
115 pr_err("unknown CPU version for boot TLB flush\n");
116 break;
117 }
118
119 asm volatile("ptesync" : : : "memory");
120 rb = TLBIEL_INVAL_SET;
121 for (i = 0; i < num_sets; i++) {
122 asm volatile("tlbiel %0" : : "r" (rb));
123 rb += 1 << TLBIEL_INVAL_SET_SHIFT;
124 }
125 asm volatile("ptesync" : : : "memory");
126}
127
128static void __restore_cpu_cpufeatures(void)
129{
130 /*
131 * LPCR is restored by the power on engine already. It can be changed
132 * after early init e.g., by radix enable, and we have no unified API
133 * for saving and restoring such SPRs.
134 *
135 * This ->restore hook should really be removed from idle and register
136 * restore moved directly into the idle restore code, because this code
137 * doesn't know how idle is implemented or what it needs restored here.
138 *
139 * The best we can do to accommodate secondary boot and idle restore
140 * for now is "or" LPCR with existing.
141 */
142
143 mtspr(SPRN_LPCR, system_registers.lpcr | mfspr(SPRN_LPCR));
144 if (hv_mode) {
145 mtspr(SPRN_LPID, 0);
146 mtspr(SPRN_HFSCR, system_registers.hfscr);
147 }
148 mtspr(SPRN_FSCR, system_registers.fscr);
149
150 if (init_pmu_registers)
151 init_pmu_registers();
152
153 cpufeatures_flush_tlb();
154}
155
156static char dt_cpu_name[64];
157
158static struct cpu_spec __initdata base_cpu_spec = {
159 .cpu_name = NULL,
160 .cpu_features = CPU_FTRS_BASE,
161 .cpu_user_features = COMMON_USER_BASE,
162 .cpu_user_features2 = COMMON_USER2_BASE,
163 .mmu_features = 0,
164 .icache_bsize = 32, /* minimum block size, fixed by */
165 .dcache_bsize = 32, /* cache info init. */
166 .num_pmcs = 0,
167 .pmc_type = PPC_PMC_DEFAULT,
168 .oprofile_cpu_type = NULL,
169 .oprofile_type = PPC_OPROFILE_INVALID,
170 .cpu_setup = NULL,
171 .cpu_restore = __restore_cpu_cpufeatures,
172 .flush_tlb = NULL,
173 .machine_check_early = NULL,
174 .platform = NULL,
175};
176
177static void __init cpufeatures_setup_cpu(void)
178{
179 set_cur_cpu_spec(&base_cpu_spec);
180
181 cur_cpu_spec->pvr_mask = -1;
182 cur_cpu_spec->pvr_value = mfspr(SPRN_PVR);
183
184 /* Initialize the base environment -- clear FSCR/HFSCR. */
185 hv_mode = !!(mfmsr() & MSR_HV);
186 if (hv_mode) {
187 /* CPU_FTR_HVMODE is used early in PACA setup */
188 cur_cpu_spec->cpu_features |= CPU_FTR_HVMODE;
189 mtspr(SPRN_HFSCR, 0);
190 }
191 mtspr(SPRN_FSCR, 0);
192
193 /*
194 * LPCR does not get cleared, to match behaviour with secondaries
195 * in __restore_cpu_cpufeatures. Once the idle code is fixed, this
196 * could clear LPCR too.
197 */
198}
199
200static int __init feat_try_enable_unknown(struct dt_cpu_feature *f)
201{
202 if (f->hv_support == HV_SUPPORT_NONE) {
203 } else if (f->hv_support & HV_SUPPORT_HFSCR) {
204 u64 hfscr = mfspr(SPRN_HFSCR);
205 hfscr |= 1UL << f->hfscr_bit_nr;
206 mtspr(SPRN_HFSCR, hfscr);
207 } else {
208 /* Does not have a known recipe */
209 return 0;
210 }
211
212 if (f->os_support == OS_SUPPORT_NONE) {
213 } else if (f->os_support & OS_SUPPORT_FSCR) {
214 u64 fscr = mfspr(SPRN_FSCR);
215 fscr |= 1UL << f->fscr_bit_nr;
216 mtspr(SPRN_FSCR, fscr);
217 } else {
218 /* Does not have a known recipe */
219 return 0;
220 }
221
222 if ((f->usable_privilege & USABLE_PR) && (f->hwcap_bit_nr != -1)) {
223 uint32_t word = f->hwcap_bit_nr / 32;
224 uint32_t bit = f->hwcap_bit_nr % 32;
225
226 if (word == 0)
227 cur_cpu_spec->cpu_user_features |= 1U << bit;
228 else if (word == 1)
229 cur_cpu_spec->cpu_user_features2 |= 1U << bit;
230 else
231 pr_err("%s could not advertise to user (no hwcap bits)\n", f->name);
232 }
233
234 return 1;
235}
236
237static int __init feat_enable(struct dt_cpu_feature *f)
238{
239 if (f->hv_support != HV_SUPPORT_NONE) {
240 if (f->hfscr_bit_nr != -1) {
241 u64 hfscr = mfspr(SPRN_HFSCR);
242 hfscr |= 1UL << f->hfscr_bit_nr;
243 mtspr(SPRN_HFSCR, hfscr);
244 }
245 }
246
247 if (f->os_support != OS_SUPPORT_NONE) {
248 if (f->fscr_bit_nr != -1) {
249 u64 fscr = mfspr(SPRN_FSCR);
250 fscr |= 1UL << f->fscr_bit_nr;
251 mtspr(SPRN_FSCR, fscr);
252 }
253 }
254
255 if ((f->usable_privilege & USABLE_PR) && (f->hwcap_bit_nr != -1)) {
256 uint32_t word = f->hwcap_bit_nr / 32;
257 uint32_t bit = f->hwcap_bit_nr % 32;
258
259 if (word == 0)
260 cur_cpu_spec->cpu_user_features |= 1U << bit;
261 else if (word == 1)
262 cur_cpu_spec->cpu_user_features2 |= 1U << bit;
263 else
264 pr_err("CPU feature: %s could not advertise to user (no hwcap bits)\n", f->name);
265 }
266
267 return 1;
268}
269
270static int __init feat_disable(struct dt_cpu_feature *f)
271{
272 return 0;
273}
274
275static int __init feat_enable_hv(struct dt_cpu_feature *f)
276{
277 u64 lpcr;
278
279 if (!hv_mode) {
280 pr_err("CPU feature hypervisor present in device tree but HV mode not enabled in the CPU. Ignoring.\n");
281 return 0;
282 }
283
284 mtspr(SPRN_LPID, 0);
285
286 lpcr = mfspr(SPRN_LPCR);
287 lpcr &= ~LPCR_LPES0; /* HV external interrupts */
288 mtspr(SPRN_LPCR, lpcr);
289
290 cur_cpu_spec->cpu_features |= CPU_FTR_HVMODE;
291
292 return 1;
293}
294
295static int __init feat_enable_le(struct dt_cpu_feature *f)
296{
297 cur_cpu_spec->cpu_user_features |= PPC_FEATURE_TRUE_LE;
298 return 1;
299}
300
301static int __init feat_enable_smt(struct dt_cpu_feature *f)
302{
303 cur_cpu_spec->cpu_features |= CPU_FTR_SMT;
304 cur_cpu_spec->cpu_user_features |= PPC_FEATURE_SMT;
305 return 1;
306}
307
308static int __init feat_enable_idle_nap(struct dt_cpu_feature *f)
309{
310 u64 lpcr;
311
312 /* Set PECE wakeup modes for ISA 207 */
313 lpcr = mfspr(SPRN_LPCR);
314 lpcr |= LPCR_PECE0;
315 lpcr |= LPCR_PECE1;
316 lpcr |= LPCR_PECE2;
317 mtspr(SPRN_LPCR, lpcr);
318
319 return 1;
320}
321
322static int __init feat_enable_align_dsisr(struct dt_cpu_feature *f)
323{
324 cur_cpu_spec->cpu_features &= ~CPU_FTR_NODSISRALIGN;
325
326 return 1;
327}
328
329static int __init feat_enable_idle_stop(struct dt_cpu_feature *f)
330{
331 u64 lpcr;
332
333 /* Set PECE wakeup modes for ISAv3.0B */
334 lpcr = mfspr(SPRN_LPCR);
335 lpcr |= LPCR_PECE0;
336 lpcr |= LPCR_PECE1;
337 lpcr |= LPCR_PECE2;
338 mtspr(SPRN_LPCR, lpcr);
339
340 return 1;
341}
342
343static int __init feat_enable_mmu_hash(struct dt_cpu_feature *f)
344{
345 u64 lpcr;
346
347 lpcr = mfspr(SPRN_LPCR);
348 lpcr &= ~LPCR_ISL;
349
350 /* VRMASD */
351 lpcr |= LPCR_VPM0;
352 lpcr &= ~LPCR_VPM1;
353 lpcr |= 0x10UL << LPCR_VRMASD_SH; /* L=1 LP=00 */
354 mtspr(SPRN_LPCR, lpcr);
355
356 cur_cpu_spec->mmu_features |= MMU_FTRS_HASH_BASE;
357 cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_MMU;
358
359 return 1;
360}
361
362static int __init feat_enable_mmu_hash_v3(struct dt_cpu_feature *f)
363{
364 u64 lpcr;
365
366 lpcr = mfspr(SPRN_LPCR);
367 lpcr &= ~LPCR_ISL;
368 mtspr(SPRN_LPCR, lpcr);
369
370 cur_cpu_spec->mmu_features |= MMU_FTRS_HASH_BASE;
371 cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_MMU;
372
373 return 1;
374}
375
376
377static int __init feat_enable_mmu_radix(struct dt_cpu_feature *f)
378{
379#ifdef CONFIG_PPC_RADIX_MMU
380 cur_cpu_spec->mmu_features |= MMU_FTR_TYPE_RADIX;
381 cur_cpu_spec->mmu_features |= MMU_FTRS_HASH_BASE;
382 cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_MMU;
383
384 return 1;
385#endif
386 return 0;
387}
388
389static int __init feat_enable_dscr(struct dt_cpu_feature *f)
390{
391 u64 lpcr;
392
393 feat_enable(f);
394
395 lpcr = mfspr(SPRN_LPCR);
396 lpcr &= ~LPCR_DPFD;
397 lpcr |= (4UL << LPCR_DPFD_SH);
398 mtspr(SPRN_LPCR, lpcr);
399
400 return 1;
401}
402
403static void hfscr_pmu_enable(void)
404{
405 u64 hfscr = mfspr(SPRN_HFSCR);
406 hfscr |= PPC_BIT(60);
407 mtspr(SPRN_HFSCR, hfscr);
408}
409
410static void init_pmu_power8(void)
411{
412 if (hv_mode) {
413 mtspr(SPRN_MMCRC, 0);
414 mtspr(SPRN_MMCRH, 0);
415 }
416
417 mtspr(SPRN_MMCRA, 0);
418 mtspr(SPRN_MMCR0, 0);
419 mtspr(SPRN_MMCR1, 0);
420 mtspr(SPRN_MMCR2, 0);
421 mtspr(SPRN_MMCRS, 0);
422}
423
424static int __init feat_enable_mce_power8(struct dt_cpu_feature *f)
425{
426 cur_cpu_spec->platform = "power8";
427 cur_cpu_spec->flush_tlb = __flush_tlb_power8;
428 cur_cpu_spec->machine_check_early = __machine_check_early_realmode_p8;
429
430 return 1;
431}
432
433static int __init feat_enable_pmu_power8(struct dt_cpu_feature *f)
434{
435 hfscr_pmu_enable();
436
437 init_pmu_power8();
438 init_pmu_registers = init_pmu_power8;
439
440 cur_cpu_spec->cpu_features |= CPU_FTR_MMCRA;
441 cur_cpu_spec->cpu_user_features |= PPC_FEATURE_PSERIES_PERFMON_COMPAT;
442 if (pvr_version_is(PVR_POWER8E))
443 cur_cpu_spec->cpu_features |= CPU_FTR_PMAO_BUG;
444
445 cur_cpu_spec->num_pmcs = 6;
446 cur_cpu_spec->pmc_type = PPC_PMC_IBM;
447 cur_cpu_spec->oprofile_cpu_type = "ppc64/power8";
448
449 return 1;
450}
451
452static void init_pmu_power9(void)
453{
454 if (hv_mode)
455 mtspr(SPRN_MMCRC, 0);
456
457 mtspr(SPRN_MMCRA, 0);
458 mtspr(SPRN_MMCR0, 0);
459 mtspr(SPRN_MMCR1, 0);
460 mtspr(SPRN_MMCR2, 0);
461}
462
463static int __init feat_enable_mce_power9(struct dt_cpu_feature *f)
464{
465 cur_cpu_spec->platform = "power9";
466 cur_cpu_spec->flush_tlb = __flush_tlb_power9;
467 cur_cpu_spec->machine_check_early = __machine_check_early_realmode_p9;
468
469 return 1;
470}
471
472static int __init feat_enable_pmu_power9(struct dt_cpu_feature *f)
473{
474 hfscr_pmu_enable();
475
476 init_pmu_power9();
477 init_pmu_registers = init_pmu_power9;
478
479 cur_cpu_spec->cpu_features |= CPU_FTR_MMCRA;
480 cur_cpu_spec->cpu_user_features |= PPC_FEATURE_PSERIES_PERFMON_COMPAT;
481
482 cur_cpu_spec->num_pmcs = 6;
483 cur_cpu_spec->pmc_type = PPC_PMC_IBM;
484 cur_cpu_spec->oprofile_cpu_type = "ppc64/power9";
485
486 return 1;
487}
488
489static int __init feat_enable_tm(struct dt_cpu_feature *f)
490{
491#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
492 feat_enable(f);
493 cur_cpu_spec->cpu_user_features2 |= PPC_FEATURE2_HTM_NOSC;
494 return 1;
495#endif
496 return 0;
497}
498
499static int __init feat_enable_fp(struct dt_cpu_feature *f)
500{
501 feat_enable(f);
502 cur_cpu_spec->cpu_features &= ~CPU_FTR_FPU_UNAVAILABLE;
503
504 return 1;
505}
506
507static int __init feat_enable_vector(struct dt_cpu_feature *f)
508{
509#ifdef CONFIG_ALTIVEC
510 feat_enable(f);
511 cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC;
512 cur_cpu_spec->cpu_features |= CPU_FTR_VMX_COPY;
513 cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC;
514
515 return 1;
516#endif
517 return 0;
518}
519
520static int __init feat_enable_vsx(struct dt_cpu_feature *f)
521{
522#ifdef CONFIG_VSX
523 feat_enable(f);
524 cur_cpu_spec->cpu_features |= CPU_FTR_VSX;
525 cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_VSX;
526
527 return 1;
528#endif
529 return 0;
530}
531
532static int __init feat_enable_purr(struct dt_cpu_feature *f)
533{
534 cur_cpu_spec->cpu_features |= CPU_FTR_PURR | CPU_FTR_SPURR;
535
536 return 1;
537}
538
539static int __init feat_enable_ebb(struct dt_cpu_feature *f)
540{
541 /*
542 * PPC_FEATURE2_EBB is enabled in PMU init code because it has
543 * historically been related to the PMU facility. This may have
544 * to be decoupled if EBB becomes more generic. For now, follow
545 * existing convention.
546 */
547 f->hwcap_bit_nr = -1;
548 feat_enable(f);
549
550 return 1;
551}
552
553static int __init feat_enable_dbell(struct dt_cpu_feature *f)
554{
555 u64 lpcr;
556
557 /* P9 has an HFSCR for privileged state */
558 feat_enable(f);
559
560 cur_cpu_spec->cpu_features |= CPU_FTR_DBELL;
561
562 lpcr = mfspr(SPRN_LPCR);
563 lpcr |= LPCR_PECEDH; /* hyp doorbell wakeup */
564 mtspr(SPRN_LPCR, lpcr);
565
566 return 1;
567}
568
569static int __init feat_enable_hvi(struct dt_cpu_feature *f)
570{
571 u64 lpcr;
572
573 /*
574 * POWER9 XIVE interrupts including in OPAL XICS compatibility
575 * are always delivered as hypervisor virtualization interrupts (HVI)
576 * rather than EE.
577 *
578 * However LPES0 is not set here, in the chance that an EE does get
579 * delivered to the host somehow, the EE handler would not expect it
580 * to be delivered in LPES0 mode (e.g., using SRR[01]). This could
581 * happen if there is a bug in interrupt controller code, or IC is
582 * misconfigured in systemsim.
583 */
584
585 lpcr = mfspr(SPRN_LPCR);
586 lpcr |= LPCR_HVICE; /* enable hvi interrupts */
587 lpcr |= LPCR_HEIC; /* disable ee interrupts when MSR_HV */
588 lpcr |= LPCR_PECE_HVEE; /* hvi can wake from stop */
589 mtspr(SPRN_LPCR, lpcr);
590
591 return 1;
592}
593
594static int __init feat_enable_large_ci(struct dt_cpu_feature *f)
595{
596 cur_cpu_spec->mmu_features |= MMU_FTR_CI_LARGE_PAGE;
597
598 return 1;
599}
600
601struct dt_cpu_feature_match {
602 const char *name;
603 int (*enable)(struct dt_cpu_feature *f);
604 u64 cpu_ftr_bit_mask;
605};
606
607static struct dt_cpu_feature_match __initdata
608 dt_cpu_feature_match_table[] = {
609 {"hypervisor", feat_enable_hv, 0},
610 {"big-endian", feat_enable, 0},
611 {"little-endian", feat_enable_le, CPU_FTR_REAL_LE},
612 {"smt", feat_enable_smt, 0},
613 {"interrupt-facilities", feat_enable, 0},
614 {"timer-facilities", feat_enable, 0},
615 {"timer-facilities-v3", feat_enable, 0},
616 {"debug-facilities", feat_enable, 0},
617 {"come-from-address-register", feat_enable, CPU_FTR_CFAR},
618 {"branch-tracing", feat_enable, 0},
619 {"floating-point", feat_enable_fp, 0},
620 {"vector", feat_enable_vector, 0},
621 {"vector-scalar", feat_enable_vsx, 0},
622 {"vector-scalar-v3", feat_enable, 0},
623 {"decimal-floating-point", feat_enable, 0},
624 {"decimal-integer", feat_enable, 0},
625 {"quadword-load-store", feat_enable, 0},
626 {"vector-crypto", feat_enable, 0},
627 {"mmu-hash", feat_enable_mmu_hash, 0},
628 {"mmu-radix", feat_enable_mmu_radix, 0},
629 {"mmu-hash-v3", feat_enable_mmu_hash_v3, 0},
630 {"virtual-page-class-key-protection", feat_enable, 0},
631 {"transactional-memory", feat_enable_tm, CPU_FTR_TM},
632 {"transactional-memory-v3", feat_enable_tm, 0},
633 {"idle-nap", feat_enable_idle_nap, 0},
634 {"alignment-interrupt-dsisr", feat_enable_align_dsisr, 0},
635 {"idle-stop", feat_enable_idle_stop, 0},
636 {"machine-check-power8", feat_enable_mce_power8, 0},
637 {"performance-monitor-power8", feat_enable_pmu_power8, 0},
638 {"data-stream-control-register", feat_enable_dscr, CPU_FTR_DSCR},
639 {"event-based-branch", feat_enable_ebb, 0},
640 {"target-address-register", feat_enable, 0},
641 {"branch-history-rolling-buffer", feat_enable, 0},
642 {"control-register", feat_enable, CPU_FTR_CTRL},
643 {"processor-control-facility", feat_enable_dbell, CPU_FTR_DBELL},
644 {"processor-control-facility-v3", feat_enable_dbell, CPU_FTR_DBELL},
645 {"processor-utilization-of-resources-register", feat_enable_purr, 0},
5a61ef74
NP
646 {"no-execute", feat_enable, 0},
647 {"strong-access-ordering", feat_enable, CPU_FTR_SAO},
648 {"cache-inhibited-large-page", feat_enable_large_ci, 0},
649 {"coprocessor-icswx", feat_enable, CPU_FTR_ICSWX},
650 {"hypervisor-virtualization-interrupt", feat_enable_hvi, 0},
651 {"program-priority-register", feat_enable, CPU_FTR_HAS_PPR},
652 {"wait", feat_enable, 0},
653 {"atomic-memory-operations", feat_enable, 0},
654 {"branch-v3", feat_enable, 0},
655 {"copy-paste", feat_enable, 0},
656 {"decimal-floating-point-v3", feat_enable, 0},
657 {"decimal-integer-v3", feat_enable, 0},
658 {"fixed-point-v3", feat_enable, 0},
659 {"floating-point-v3", feat_enable, 0},
660 {"group-start-register", feat_enable, 0},
661 {"pc-relative-addressing", feat_enable, 0},
662 {"machine-check-power9", feat_enable_mce_power9, 0},
663 {"performance-monitor-power9", feat_enable_pmu_power9, 0},
664 {"event-based-branch-v3", feat_enable, 0},
665 {"random-number-generator", feat_enable, 0},
666 {"system-call-vectored", feat_disable, 0},
667 {"trace-interrupt-v3", feat_enable, 0},
668 {"vector-v3", feat_enable, 0},
669 {"vector-binary128", feat_enable, 0},
670 {"vector-binary16", feat_enable, 0},
671 {"wait-v3", feat_enable, 0},
672};
673
a2b05b7a
NP
674static bool __initdata using_dt_cpu_ftrs;
675static bool __initdata enable_unknown = true;
676
677static int __init dt_cpu_ftrs_parse(char *str)
678{
679 if (!str)
680 return 0;
681
682 if (!strcmp(str, "off"))
683 using_dt_cpu_ftrs = false;
684 else if (!strcmp(str, "known"))
685 enable_unknown = false;
686 else
687 return 1;
688
689 return 0;
690}
691early_param("dt_cpu_ftrs", dt_cpu_ftrs_parse);
5a61ef74
NP
692
693static void __init cpufeatures_setup_start(u32 isa)
694{
695 pr_info("setup for ISA %d\n", isa);
696
697 if (isa >= 3000) {
698 cur_cpu_spec->cpu_features |= CPU_FTR_ARCH_300;
699 cur_cpu_spec->cpu_user_features2 |= PPC_FEATURE2_ARCH_3_00;
700 }
701}
702
703static bool __init cpufeatures_process_feature(struct dt_cpu_feature *f)
704{
705 const struct dt_cpu_feature_match *m;
706 bool known = false;
707 int i;
708
709 for (i = 0; i < ARRAY_SIZE(dt_cpu_feature_match_table); i++) {
710 m = &dt_cpu_feature_match_table[i];
711 if (!strcmp(f->name, m->name)) {
712 known = true;
713 if (m->enable(f))
714 break;
715
716 pr_info("not enabling: %s (disabled or unsupported by kernel)\n",
717 f->name);
718 return false;
719 }
720 }
721
a2b05b7a 722 if (!known && enable_unknown) {
5a61ef74
NP
723 if (!feat_try_enable_unknown(f)) {
724 pr_info("not enabling: %s (unknown and unsupported by kernel)\n",
725 f->name);
726 return false;
727 }
728 }
729
730 if (m->cpu_ftr_bit_mask)
731 cur_cpu_spec->cpu_features |= m->cpu_ftr_bit_mask;
732
733 if (known)
734 pr_debug("enabling: %s\n", f->name);
735 else
736 pr_debug("enabling: %s (unknown)\n", f->name);
737
738 return true;
739}
740
741static __init void cpufeatures_cpu_quirks(void)
742{
743 int version = mfspr(SPRN_PVR);
744
745 /*
746 * Not all quirks can be derived from the cpufeatures device tree.
747 */
748 if ((version & 0xffffff00) == 0x004e0100)
749 cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD1;
750}
751
752static void __init cpufeatures_setup_finished(void)
753{
754 cpufeatures_cpu_quirks();
755
756 if (hv_mode && !(cur_cpu_spec->cpu_features & CPU_FTR_HVMODE)) {
757 pr_err("hypervisor not present in device tree but HV mode is enabled in the CPU. Enabling.\n");
758 cur_cpu_spec->cpu_features |= CPU_FTR_HVMODE;
759 }
760
761 system_registers.lpcr = mfspr(SPRN_LPCR);
762 system_registers.hfscr = mfspr(SPRN_HFSCR);
763 system_registers.fscr = mfspr(SPRN_FSCR);
764
765 cpufeatures_flush_tlb();
766
767 pr_info("final cpu/mmu features = 0x%016lx 0x%08x\n",
768 cur_cpu_spec->cpu_features, cur_cpu_spec->mmu_features);
769}
770
a2b05b7a
NP
771static int __init disabled_on_cmdline(void)
772{
773 unsigned long root, chosen;
774 const char *p;
775
776 root = of_get_flat_dt_root();
777 chosen = of_get_flat_dt_subnode_by_name(root, "chosen");
778 if (chosen == -FDT_ERR_NOTFOUND)
779 return false;
780
781 p = of_get_flat_dt_prop(chosen, "bootargs", NULL);
782 if (!p)
783 return false;
784
785 if (strstr(p, "dt_cpu_ftrs=off"))
786 return true;
787
788 return false;
789}
790
5a61ef74
NP
791static int __init fdt_find_cpu_features(unsigned long node, const char *uname,
792 int depth, void *data)
793{
794 if (of_flat_dt_is_compatible(node, "ibm,powerpc-cpu-features")
795 && of_get_flat_dt_prop(node, "isa", NULL))
796 return 1;
797
798 return 0;
799}
800
5a61ef74
NP
801bool __init dt_cpu_ftrs_in_use(void)
802{
803 return using_dt_cpu_ftrs;
804}
805
806bool __init dt_cpu_ftrs_init(void *fdt)
807{
a2b05b7a
NP
808 using_dt_cpu_ftrs = false;
809
5a61ef74
NP
810 /* Setup and verify the FDT, if it fails we just bail */
811 if (!early_init_dt_verify(fdt))
812 return false;
813
814 if (!of_scan_flat_dt(fdt_find_cpu_features, NULL))
815 return false;
816
a2b05b7a
NP
817 if (disabled_on_cmdline())
818 return false;
819
5a61ef74
NP
820 cpufeatures_setup_cpu();
821
822 using_dt_cpu_ftrs = true;
823 return true;
824}
825
826static int nr_dt_cpu_features;
827static struct dt_cpu_feature *dt_cpu_features;
828
829static int __init process_cpufeatures_node(unsigned long node,
830 const char *uname, int i)
831{
832 const __be32 *prop;
833 struct dt_cpu_feature *f;
834 int len;
835
836 f = &dt_cpu_features[i];
837 memset(f, 0, sizeof(struct dt_cpu_feature));
838
839 f->node = node;
840
841 f->name = uname;
842
843 prop = of_get_flat_dt_prop(node, "isa", &len);
844 if (!prop) {
845 pr_warn("%s: missing isa property\n", uname);
846 return 0;
847 }
848 f->isa = be32_to_cpup(prop);
849
850 prop = of_get_flat_dt_prop(node, "usable-privilege", &len);
851 if (!prop) {
852 pr_warn("%s: missing usable-privilege property", uname);
853 return 0;
854 }
855 f->usable_privilege = be32_to_cpup(prop);
856
857 prop = of_get_flat_dt_prop(node, "hv-support", &len);
858 if (prop)
859 f->hv_support = be32_to_cpup(prop);
860 else
861 f->hv_support = HV_SUPPORT_NONE;
862
863 prop = of_get_flat_dt_prop(node, "os-support", &len);
864 if (prop)
865 f->os_support = be32_to_cpup(prop);
866 else
867 f->os_support = OS_SUPPORT_NONE;
868
869 prop = of_get_flat_dt_prop(node, "hfscr-bit-nr", &len);
870 if (prop)
871 f->hfscr_bit_nr = be32_to_cpup(prop);
872 else
873 f->hfscr_bit_nr = -1;
874 prop = of_get_flat_dt_prop(node, "fscr-bit-nr", &len);
875 if (prop)
876 f->fscr_bit_nr = be32_to_cpup(prop);
877 else
878 f->fscr_bit_nr = -1;
879 prop = of_get_flat_dt_prop(node, "hwcap-bit-nr", &len);
880 if (prop)
881 f->hwcap_bit_nr = be32_to_cpup(prop);
882 else
883 f->hwcap_bit_nr = -1;
884
885 if (f->usable_privilege & USABLE_HV) {
886 if (!(mfmsr() & MSR_HV)) {
887 pr_warn("%s: HV feature passed to guest\n", uname);
888 return 0;
889 }
890
891 if (f->hv_support == HV_SUPPORT_NONE && f->hfscr_bit_nr != -1) {
892 pr_warn("%s: unwanted hfscr_bit_nr\n", uname);
893 return 0;
894 }
895
896 if (f->hv_support == HV_SUPPORT_HFSCR) {
897 if (f->hfscr_bit_nr == -1) {
898 pr_warn("%s: missing hfscr_bit_nr\n", uname);
899 return 0;
900 }
901 }
902 } else {
903 if (f->hv_support != HV_SUPPORT_NONE || f->hfscr_bit_nr != -1) {
904 pr_warn("%s: unwanted hv_support/hfscr_bit_nr\n", uname);
905 return 0;
906 }
907 }
908
909 if (f->usable_privilege & USABLE_OS) {
910 if (f->os_support == OS_SUPPORT_NONE && f->fscr_bit_nr != -1) {
911 pr_warn("%s: unwanted fscr_bit_nr\n", uname);
912 return 0;
913 }
914
915 if (f->os_support == OS_SUPPORT_FSCR) {
916 if (f->fscr_bit_nr == -1) {
917 pr_warn("%s: missing fscr_bit_nr\n", uname);
918 return 0;
919 }
920 }
921 } else {
922 if (f->os_support != OS_SUPPORT_NONE || f->fscr_bit_nr != -1) {
923 pr_warn("%s: unwanted os_support/fscr_bit_nr\n", uname);
924 return 0;
925 }
926 }
927
928 if (!(f->usable_privilege & USABLE_PR)) {
929 if (f->hwcap_bit_nr != -1) {
930 pr_warn("%s: unwanted hwcap_bit_nr\n", uname);
931 return 0;
932 }
933 }
934
935 /* Do all the independent features in the first pass */
936 if (!of_get_flat_dt_prop(node, "dependencies", &len)) {
937 if (cpufeatures_process_feature(f))
938 f->enabled = 1;
939 else
940 f->disabled = 1;
941 }
942
943 return 0;
944}
945
946static void __init cpufeatures_deps_enable(struct dt_cpu_feature *f)
947{
948 const __be32 *prop;
949 int len;
950 int nr_deps;
951 int i;
952
953 if (f->enabled || f->disabled)
954 return;
955
956 prop = of_get_flat_dt_prop(f->node, "dependencies", &len);
957 if (!prop) {
958 pr_warn("%s: missing dependencies property", f->name);
959 return;
960 }
961
962 nr_deps = len / sizeof(int);
963
964 for (i = 0; i < nr_deps; i++) {
965 unsigned long phandle = be32_to_cpu(prop[i]);
966 int j;
967
968 for (j = 0; j < nr_dt_cpu_features; j++) {
969 struct dt_cpu_feature *d = &dt_cpu_features[j];
970
971 if (of_get_flat_dt_phandle(d->node) == phandle) {
972 cpufeatures_deps_enable(d);
973 if (d->disabled) {
974 f->disabled = 1;
975 return;
976 }
977 }
978 }
979 }
980
981 if (cpufeatures_process_feature(f))
982 f->enabled = 1;
983 else
984 f->disabled = 1;
985}
986
987static int __init scan_cpufeatures_subnodes(unsigned long node,
988 const char *uname,
989 void *data)
990{
991 int *count = data;
992
993 process_cpufeatures_node(node, uname, *count);
994
995 (*count)++;
996
997 return 0;
998}
999
1000static int __init count_cpufeatures_subnodes(unsigned long node,
1001 const char *uname,
1002 void *data)
1003{
1004 int *count = data;
1005
1006 (*count)++;
1007
1008 return 0;
1009}
1010
1011static int __init dt_cpu_ftrs_scan_callback(unsigned long node, const char
1012 *uname, int depth, void *data)
1013{
1014 const __be32 *prop;
1015 int count, i;
1016 u32 isa;
1017
1018 /* We are scanning "ibm,powerpc-cpu-features" nodes only */
1019 if (!of_flat_dt_is_compatible(node, "ibm,powerpc-cpu-features"))
1020 return 0;
1021
1022 prop = of_get_flat_dt_prop(node, "isa", NULL);
1023 if (!prop)
1024 /* We checked before, "can't happen" */
1025 return 0;
1026
1027 isa = be32_to_cpup(prop);
1028
1029 /* Count and allocate space for cpu features */
1030 of_scan_flat_dt_subnodes(node, count_cpufeatures_subnodes,
1031 &nr_dt_cpu_features);
1032 dt_cpu_features = __va(
1033 memblock_alloc(sizeof(struct dt_cpu_feature)*
1034 nr_dt_cpu_features, PAGE_SIZE));
1035
1036 cpufeatures_setup_start(isa);
1037
1038 /* Scan nodes into dt_cpu_features and enable those without deps */
1039 count = 0;
1040 of_scan_flat_dt_subnodes(node, scan_cpufeatures_subnodes, &count);
1041
1042 /* Recursive enable remaining features with dependencies */
1043 for (i = 0; i < nr_dt_cpu_features; i++) {
1044 struct dt_cpu_feature *f = &dt_cpu_features[i];
1045
1046 cpufeatures_deps_enable(f);
1047 }
1048
1049 prop = of_get_flat_dt_prop(node, "display-name", NULL);
1050 if (prop && strlen((char *)prop) != 0) {
1051 strlcpy(dt_cpu_name, (char *)prop, sizeof(dt_cpu_name));
1052 cur_cpu_spec->cpu_name = dt_cpu_name;
1053 }
1054
1055 cpufeatures_setup_finished();
1056
1057 memblock_free(__pa(dt_cpu_features),
1058 sizeof(struct dt_cpu_feature)*nr_dt_cpu_features);
1059
1060 return 0;
1061}
1062
1063void __init dt_cpu_ftrs_scan(void)
1064{
a2b05b7a
NP
1065 if (!using_dt_cpu_ftrs)
1066 return;
1067
5a61ef74
NP
1068 of_scan_flat_dt(dt_cpu_ftrs_scan_callback, NULL);
1069}