1 // SPDX-License-Identifier: GPL-2.0-or-later
4 * Copyright (C) 2001 Todd Inglett, IBM Corporation
6 * pSeries LPAR support.
9 /* Enables debugging of low-level hash table routines - careful! */
11 #define pr_fmt(fmt) "lpar: " fmt
13 #include <linux/kernel.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/console.h>
16 #include <linux/export.h>
17 #include <linux/jump_label.h>
18 #include <linux/delay.h>
19 #include <linux/stop_machine.h>
20 #include <asm/processor.h>
23 #include <asm/pgtable.h>
24 #include <asm/machdep.h>
25 #include <asm/mmu_context.h>
26 #include <asm/iommu.h>
29 #include <asm/cputable.h>
32 #include <asm/trace.h>
33 #include <asm/firmware.h>
34 #include <asm/plpar_wrappers.h>
35 #include <asm/kexec.h>
36 #include <asm/fadump.h>
37 #include <asm/asm-prototypes.h>
38 #include <asm/debugfs.h>
42 /* Flag bits for H_BULK_REMOVE */
43 #define HBR_REQUEST 0x4000000000000000UL
44 #define HBR_RESPONSE 0x8000000000000000UL
45 #define HBR_END 0xc000000000000000UL
46 #define HBR_AVPN 0x0200000000000000UL
47 #define HBR_ANDCOND 0x0100000000000000UL
51 EXPORT_SYMBOL(plpar_hcall
);
52 EXPORT_SYMBOL(plpar_hcall9
);
53 EXPORT_SYMBOL(plpar_hcall_norets
);
55 void vpa_init(int cpu
)
57 int hwcpu
= get_hard_smp_processor_id(cpu
);
60 struct paca_struct
*pp
;
61 struct dtl_entry
*dtl
;
64 * The spec says it "may be problematic" if CPU x registers the VPA of
65 * CPU y. We should never do that, but wail if we ever do.
67 WARN_ON(cpu
!= smp_processor_id());
69 if (cpu_has_feature(CPU_FTR_ALTIVEC
))
70 lppaca_of(cpu
).vmxregs_in_use
= 1;
72 if (cpu_has_feature(CPU_FTR_ARCH_207S
))
73 lppaca_of(cpu
).ebb_regs_in_use
= 1;
75 addr
= __pa(&lppaca_of(cpu
));
76 ret
= register_vpa(hwcpu
, addr
);
79 pr_err("WARNING: VPA registration for cpu %d (hw %d) of area "
80 "%lx failed with %ld\n", cpu
, hwcpu
, addr
, ret
);
84 #ifdef CONFIG_PPC_BOOK3S_64
86 * PAPR says this feature is SLB-Buffer but firmware never
87 * reports that. All SPLPAR support SLB shadow buffer.
89 if (!radix_enabled() && firmware_has_feature(FW_FEATURE_SPLPAR
)) {
90 addr
= __pa(paca_ptrs
[cpu
]->slb_shadow_ptr
);
91 ret
= register_slb_shadow(hwcpu
, addr
);
93 pr_err("WARNING: SLB shadow buffer registration for "
94 "cpu %d (hw %d) of area %lx failed with %ld\n",
95 cpu
, hwcpu
, addr
, ret
);
97 #endif /* CONFIG_PPC_BOOK3S_64 */
100 * Register dispatch trace log, if one has been allocated.
103 dtl
= pp
->dispatch_log
;
107 lppaca_of(cpu
).dtl_idx
= 0;
109 /* hypervisor reads buffer length from this field */
110 dtl
->enqueue_to_dispatch_time
= cpu_to_be32(DISPATCH_LOG_BYTES
);
111 ret
= register_dtl(hwcpu
, __pa(dtl
));
113 pr_err("WARNING: DTL registration of cpu %d (hw %d) "
114 "failed with %ld\n", smp_processor_id(),
116 lppaca_of(cpu
).dtl_enable_mask
= 2;
120 #ifdef CONFIG_PPC_BOOK3S_64
122 static long pSeries_lpar_hpte_insert(unsigned long hpte_group
,
123 unsigned long vpn
, unsigned long pa
,
124 unsigned long rflags
, unsigned long vflags
,
125 int psize
, int apsize
, int ssize
)
127 unsigned long lpar_rc
;
130 unsigned long hpte_v
, hpte_r
;
132 if (!(vflags
& HPTE_V_BOLTED
))
133 pr_devel("hpte_insert(group=%lx, vpn=%016lx, "
134 "pa=%016lx, rflags=%lx, vflags=%lx, psize=%d)\n",
135 hpte_group
, vpn
, pa
, rflags
, vflags
, psize
);
137 hpte_v
= hpte_encode_v(vpn
, psize
, apsize
, ssize
) | vflags
| HPTE_V_VALID
;
138 hpte_r
= hpte_encode_r(pa
, psize
, apsize
) | rflags
;
140 if (!(vflags
& HPTE_V_BOLTED
))
141 pr_devel(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v
, hpte_r
);
143 /* Now fill in the actual HPTE */
144 /* Set CEC cookie to 0 */
146 /* I-cache Invalidate = 0 */
147 /* I-cache synchronize = 0 */
151 if (firmware_has_feature(FW_FEATURE_XCMO
) && !(hpte_r
& HPTE_R_N
))
152 flags
|= H_COALESCE_CAND
;
154 lpar_rc
= plpar_pte_enter(flags
, hpte_group
, hpte_v
, hpte_r
, &slot
);
155 if (unlikely(lpar_rc
== H_PTEG_FULL
)) {
156 pr_devel("Hash table group is full\n");
161 * Since we try and ioremap PHBs we don't own, the pte insert
162 * will fail. However we must catch the failure in hash_page
163 * or we will loop forever, so return -2 in this case.
165 if (unlikely(lpar_rc
!= H_SUCCESS
)) {
166 pr_err("Failed hash pte insert with error %ld\n", lpar_rc
);
169 if (!(vflags
& HPTE_V_BOLTED
))
170 pr_devel(" -> slot: %lu\n", slot
& 7);
172 /* Because of iSeries, we have to pass down the secondary
173 * bucket bit here as well
175 return (slot
& 7) | (!!(vflags
& HPTE_V_SECONDARY
) << 3);
178 static DEFINE_SPINLOCK(pSeries_lpar_tlbie_lock
);
180 static long pSeries_lpar_hpte_remove(unsigned long hpte_group
)
182 unsigned long slot_offset
;
183 unsigned long lpar_rc
;
185 unsigned long dummy1
, dummy2
;
187 /* pick a random slot to start at */
188 slot_offset
= mftb() & 0x7;
190 for (i
= 0; i
< HPTES_PER_GROUP
; i
++) {
192 /* don't remove a bolted entry */
193 lpar_rc
= plpar_pte_remove(H_ANDCOND
, hpte_group
+ slot_offset
,
194 (0x1UL
<< 4), &dummy1
, &dummy2
);
195 if (lpar_rc
== H_SUCCESS
)
199 * The test for adjunct partition is performed before the
200 * ANDCOND test. H_RESOURCE may be returned, so we need to
201 * check for that as well.
203 BUG_ON(lpar_rc
!= H_NOT_FOUND
&& lpar_rc
!= H_RESOURCE
);
212 static void manual_hpte_clear_all(void)
214 unsigned long size_bytes
= 1UL << ppc64_pft_size
;
215 unsigned long hpte_count
= size_bytes
>> 4;
223 /* Read in batches of 4,
224 * invalidate only valid entries not in the VRMA
225 * hpte_count will be a multiple of 4
227 for (i
= 0; i
< hpte_count
; i
+= 4) {
228 lpar_rc
= plpar_pte_read_4_raw(0, i
, (void *)ptes
);
229 if (lpar_rc
!= H_SUCCESS
) {
230 pr_info("Failed to read hash page table at %ld err %ld\n",
234 for (j
= 0; j
< 4; j
++){
235 if ((ptes
[j
].pteh
& HPTE_V_VRMA_MASK
) ==
238 if (ptes
[j
].pteh
& HPTE_V_VALID
)
239 plpar_pte_remove_raw(0, i
+ j
, 0,
240 &(ptes
[j
].pteh
), &(ptes
[j
].ptel
));
245 static int hcall_hpte_clear_all(void)
250 rc
= plpar_hcall_norets(H_CLEAR_HPT
);
251 } while (rc
== H_CONTINUE
);
256 static void pseries_hpte_clear_all(void)
260 rc
= hcall_hpte_clear_all();
262 manual_hpte_clear_all();
264 #ifdef __LITTLE_ENDIAN__
266 * Reset exceptions to big endian.
268 * FIXME this is a hack for kexec, we need to reset the exception
269 * endian before starting the new kernel and this is a convenient place
272 * This is also called on boot when a fadump happens. In that case we
273 * must not change the exception endian mode.
275 if (firmware_has_feature(FW_FEATURE_SET_MODE
) && !is_fadump_active())
276 pseries_big_endian_exceptions();
281 * NOTE: for updatepp ops we are fortunate that the linux "newpp" bits and
282 * the low 3 bits of flags happen to line up. So no transform is needed.
283 * We can probably optimize here and assume the high bits of newpp are
284 * already zero. For now I am paranoid.
286 static long pSeries_lpar_hpte_updatepp(unsigned long slot
,
289 int psize
, int apsize
,
290 int ssize
, unsigned long inv_flags
)
292 unsigned long lpar_rc
;
294 unsigned long want_v
;
296 want_v
= hpte_encode_avpn(vpn
, psize
, ssize
);
298 flags
= (newpp
& 7) | H_AVPN
;
299 if (mmu_has_feature(MMU_FTR_KERNEL_RO
))
300 /* Move pp0 into bit 8 (IBM 55) */
301 flags
|= (newpp
& HPTE_R_PP0
) >> 55;
303 pr_devel(" update: avpnv=%016lx, hash=%016lx, f=%lx, psize: %d ...",
304 want_v
, slot
, flags
, psize
);
306 lpar_rc
= plpar_pte_protect(flags
, slot
, want_v
);
308 if (lpar_rc
== H_NOT_FOUND
) {
309 pr_devel("not found !\n");
315 BUG_ON(lpar_rc
!= H_SUCCESS
);
320 static long __pSeries_lpar_hpte_find(unsigned long want_v
, unsigned long hpte_group
)
329 for (i
= 0; i
< HPTES_PER_GROUP
; i
+= 4, hpte_group
+= 4) {
331 lpar_rc
= plpar_pte_read_4(0, hpte_group
, (void *)ptes
);
332 if (lpar_rc
!= H_SUCCESS
) {
333 pr_info("Failed to read hash page table at %ld err %ld\n",
334 hpte_group
, lpar_rc
);
338 for (j
= 0; j
< 4; j
++) {
339 if (HPTE_V_COMPARE(ptes
[j
].pteh
, want_v
) &&
340 (ptes
[j
].pteh
& HPTE_V_VALID
))
348 static long pSeries_lpar_hpte_find(unsigned long vpn
, int psize
, int ssize
)
352 unsigned long want_v
;
353 unsigned long hpte_group
;
355 hash
= hpt_hash(vpn
, mmu_psize_defs
[psize
].shift
, ssize
);
356 want_v
= hpte_encode_avpn(vpn
, psize
, ssize
);
358 /* Bolted entries are always in the primary group */
359 hpte_group
= (hash
& htab_hash_mask
) * HPTES_PER_GROUP
;
360 slot
= __pSeries_lpar_hpte_find(want_v
, hpte_group
);
363 return hpte_group
+ slot
;
366 static void pSeries_lpar_hpte_updateboltedpp(unsigned long newpp
,
368 int psize
, int ssize
)
371 unsigned long lpar_rc
, slot
, vsid
, flags
;
373 vsid
= get_kernel_vsid(ea
, ssize
);
374 vpn
= hpt_vpn(ea
, vsid
, ssize
);
376 slot
= pSeries_lpar_hpte_find(vpn
, psize
, ssize
);
380 if (mmu_has_feature(MMU_FTR_KERNEL_RO
))
381 /* Move pp0 into bit 8 (IBM 55) */
382 flags
|= (newpp
& HPTE_R_PP0
) >> 55;
384 lpar_rc
= plpar_pte_protect(flags
, slot
, 0);
386 BUG_ON(lpar_rc
!= H_SUCCESS
);
389 static void pSeries_lpar_hpte_invalidate(unsigned long slot
, unsigned long vpn
,
390 int psize
, int apsize
,
391 int ssize
, int local
)
393 unsigned long want_v
;
394 unsigned long lpar_rc
;
395 unsigned long dummy1
, dummy2
;
397 pr_devel(" inval : slot=%lx, vpn=%016lx, psize: %d, local: %d\n",
398 slot
, vpn
, psize
, local
);
400 want_v
= hpte_encode_avpn(vpn
, psize
, ssize
);
401 lpar_rc
= plpar_pte_remove(H_AVPN
, slot
, want_v
, &dummy1
, &dummy2
);
402 if (lpar_rc
== H_NOT_FOUND
)
405 BUG_ON(lpar_rc
!= H_SUCCESS
);
410 * As defined in the PAPR's section 14.5.4.1.8
411 * The control mask doesn't include the returned reference and change bit from
414 #define HBLKR_AVPN 0x0100000000000000UL
415 #define HBLKR_CTRL_MASK 0xf800000000000000UL
416 #define HBLKR_CTRL_SUCCESS 0x8000000000000000UL
417 #define HBLKR_CTRL_ERRNOTFOUND 0x8800000000000000UL
418 #define HBLKR_CTRL_ERRBUSY 0xa000000000000000UL
421 * H_BLOCK_REMOVE caller.
422 * @idx should point to the latest @param entry set with a PTEX.
423 * If PTE cannot be processed because another CPUs has already locked that
424 * group, those entries are put back in @param starting at index 1.
425 * If entries has to be retried and @retry_busy is set to true, these entries
426 * are retried until success. If @retry_busy is set to false, the returned
427 * is the number of entries yet to process.
429 static unsigned long call_block_remove(unsigned long idx
, unsigned long *param
,
432 unsigned long i
, rc
, new_idx
;
433 unsigned long retbuf
[PLPAR_HCALL9_BUFSIZE
];
436 pr_warn("Unexpected empty call to H_BLOCK_REMOVE");
441 if (idx
> PLPAR_HCALL9_BUFSIZE
) {
442 pr_err("Too many PTEs (%lu) for H_BLOCK_REMOVE", idx
);
443 idx
= PLPAR_HCALL9_BUFSIZE
;
444 } else if (idx
< PLPAR_HCALL9_BUFSIZE
)
445 param
[idx
] = HBR_END
;
447 rc
= plpar_hcall9(H_BLOCK_REMOVE
, retbuf
,
449 param
[1], param
[2], param
[3], param
[4], /* TS0-7 */
450 param
[5], param
[6], param
[7], param
[8]);
454 BUG_ON(rc
!= H_PARTIAL
);
456 /* Check that the unprocessed entries were 'not found' or 'busy' */
457 for (i
= 0; i
< idx
-1; i
++) {
458 unsigned long ctrl
= retbuf
[i
] & HBLKR_CTRL_MASK
;
460 if (ctrl
== HBLKR_CTRL_ERRBUSY
) {
461 param
[++new_idx
] = param
[i
+1];
465 BUG_ON(ctrl
!= HBLKR_CTRL_SUCCESS
466 && ctrl
!= HBLKR_CTRL_ERRNOTFOUND
);
470 * If there were entries found busy, retry these entries if requested,
471 * of if all the entries have to be retried.
473 if (new_idx
&& (retry_busy
|| new_idx
== (PLPAR_HCALL9_BUFSIZE
-1))) {
481 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
483 * Limit iterations holding pSeries_lpar_tlbie_lock to 3. We also need
484 * to make sure that we avoid bouncing the hypervisor tlbie lock.
486 #define PPC64_HUGE_HPTE_BATCH 12
488 static void hugepage_block_invalidate(unsigned long *slot
, unsigned long *vpn
,
489 int count
, int psize
, int ssize
)
491 unsigned long param
[PLPAR_HCALL9_BUFSIZE
];
492 unsigned long shift
, current_vpgb
, vpgb
;
495 shift
= mmu_psize_defs
[psize
].shift
;
497 for (i
= 0; i
< count
; i
++) {
499 * Shifting 3 bits more on the right to get a
500 * 8 pages aligned virtual addresse.
502 vpgb
= (vpn
[i
] >> (shift
- VPN_SHIFT
+ 3));
503 if (!pix
|| vpgb
!= current_vpgb
) {
505 * Need to start a new 8 pages block, flush
506 * the current one if needed.
509 (void)call_block_remove(pix
, param
, true);
511 param
[0] = hpte_encode_avpn(vpn
[i
], psize
, ssize
);
515 param
[pix
++] = HBR_REQUEST
| HBLKR_AVPN
| slot
[i
];
516 if (pix
== PLPAR_HCALL9_BUFSIZE
) {
517 pix
= call_block_remove(pix
, param
, false);
519 * pix = 0 means that all the entries were
520 * removed, we can start a new block.
521 * Otherwise, this means that there are entries
522 * to retry, and pix points to latest one, so
523 * we should increment it and try to continue
531 (void)call_block_remove(pix
, param
, true);
534 static void hugepage_bulk_invalidate(unsigned long *slot
, unsigned long *vpn
,
535 int count
, int psize
, int ssize
)
537 unsigned long param
[PLPAR_HCALL9_BUFSIZE
];
538 int i
= 0, pix
= 0, rc
;
540 for (i
= 0; i
< count
; i
++) {
542 if (!firmware_has_feature(FW_FEATURE_BULK_REMOVE
)) {
543 pSeries_lpar_hpte_invalidate(slot
[i
], vpn
[i
], psize
, 0,
546 param
[pix
] = HBR_REQUEST
| HBR_AVPN
| slot
[i
];
547 param
[pix
+1] = hpte_encode_avpn(vpn
[i
], psize
, ssize
);
550 rc
= plpar_hcall9(H_BULK_REMOVE
, param
,
551 param
[0], param
[1], param
[2],
552 param
[3], param
[4], param
[5],
554 BUG_ON(rc
!= H_SUCCESS
);
560 param
[pix
] = HBR_END
;
561 rc
= plpar_hcall9(H_BULK_REMOVE
, param
, param
[0], param
[1],
562 param
[2], param
[3], param
[4], param
[5],
564 BUG_ON(rc
!= H_SUCCESS
);
568 static inline void __pSeries_lpar_hugepage_invalidate(unsigned long *slot
,
570 int count
, int psize
,
573 unsigned long flags
= 0;
574 int lock_tlbie
= !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE
);
577 spin_lock_irqsave(&pSeries_lpar_tlbie_lock
, flags
);
579 if (firmware_has_feature(FW_FEATURE_BLOCK_REMOVE
))
580 hugepage_block_invalidate(slot
, vpn
, count
, psize
, ssize
);
582 hugepage_bulk_invalidate(slot
, vpn
, count
, psize
, ssize
);
585 spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock
, flags
);
588 static void pSeries_lpar_hugepage_invalidate(unsigned long vsid
,
590 unsigned char *hpte_slot_array
,
591 int psize
, int ssize
, int local
)
594 unsigned long s_addr
= addr
;
595 unsigned int max_hpte_count
, valid
;
596 unsigned long vpn_array
[PPC64_HUGE_HPTE_BATCH
];
597 unsigned long slot_array
[PPC64_HUGE_HPTE_BATCH
];
598 unsigned long shift
, hidx
, vpn
= 0, hash
, slot
;
600 shift
= mmu_psize_defs
[psize
].shift
;
601 max_hpte_count
= 1U << (PMD_SHIFT
- shift
);
603 for (i
= 0; i
< max_hpte_count
; i
++) {
604 valid
= hpte_valid(hpte_slot_array
, i
);
607 hidx
= hpte_hash_index(hpte_slot_array
, i
);
610 addr
= s_addr
+ (i
* (1ul << shift
));
611 vpn
= hpt_vpn(addr
, vsid
, ssize
);
612 hash
= hpt_hash(vpn
, shift
, ssize
);
613 if (hidx
& _PTEIDX_SECONDARY
)
616 slot
= (hash
& htab_hash_mask
) * HPTES_PER_GROUP
;
617 slot
+= hidx
& _PTEIDX_GROUP_IX
;
619 slot_array
[index
] = slot
;
620 vpn_array
[index
] = vpn
;
621 if (index
== PPC64_HUGE_HPTE_BATCH
- 1) {
623 * Now do a bluk invalidate
625 __pSeries_lpar_hugepage_invalidate(slot_array
,
627 PPC64_HUGE_HPTE_BATCH
,
634 __pSeries_lpar_hugepage_invalidate(slot_array
, vpn_array
,
635 index
, psize
, ssize
);
638 static void pSeries_lpar_hugepage_invalidate(unsigned long vsid
,
640 unsigned char *hpte_slot_array
,
641 int psize
, int ssize
, int local
)
643 WARN(1, "%s called without THP support\n", __func__
);
647 static int pSeries_lpar_hpte_removebolted(unsigned long ea
,
648 int psize
, int ssize
)
651 unsigned long slot
, vsid
;
653 vsid
= get_kernel_vsid(ea
, ssize
);
654 vpn
= hpt_vpn(ea
, vsid
, ssize
);
656 slot
= pSeries_lpar_hpte_find(vpn
, psize
, ssize
);
661 * lpar doesn't use the passed actual page size
663 pSeries_lpar_hpte_invalidate(slot
, vpn
, psize
, 0, ssize
, 0);
668 static inline unsigned long compute_slot(real_pte_t pte
,
674 unsigned long slot
, hash
, hidx
;
676 hash
= hpt_hash(vpn
, shift
, ssize
);
677 hidx
= __rpte_to_hidx(pte
, index
);
678 if (hidx
& _PTEIDX_SECONDARY
)
680 slot
= (hash
& htab_hash_mask
) * HPTES_PER_GROUP
;
681 slot
+= hidx
& _PTEIDX_GROUP_IX
;
686 * The hcall H_BLOCK_REMOVE implies that the virtual pages to processed are
687 * "all within the same naturally aligned 8 page virtual address block".
689 static void do_block_remove(unsigned long number
, struct ppc64_tlb_batch
*batch
,
690 unsigned long *param
)
693 unsigned long i
, pix
= 0;
694 unsigned long index
, shift
, slot
, current_vpgb
, vpgb
;
698 psize
= batch
->psize
;
699 ssize
= batch
->ssize
;
701 for (i
= 0; i
< number
; i
++) {
704 pte_iterate_hashed_subpages(pte
, psize
, vpn
, index
, shift
) {
706 * Shifting 3 bits more on the right to get a
707 * 8 pages aligned virtual addresse.
709 vpgb
= (vpn
>> (shift
- VPN_SHIFT
+ 3));
710 if (!pix
|| vpgb
!= current_vpgb
) {
712 * Need to start a new 8 pages block, flush
713 * the current one if needed.
716 (void)call_block_remove(pix
, param
,
719 param
[0] = hpte_encode_avpn(vpn
, psize
,
724 slot
= compute_slot(pte
, vpn
, index
, shift
, ssize
);
725 param
[pix
++] = HBR_REQUEST
| HBLKR_AVPN
| slot
;
727 if (pix
== PLPAR_HCALL9_BUFSIZE
) {
728 pix
= call_block_remove(pix
, param
, false);
730 * pix = 0 means that all the entries were
731 * removed, we can start a new block.
732 * Otherwise, this means that there are entries
733 * to retry, and pix points to latest one, so
734 * we should increment it and try to continue
740 } pte_iterate_hashed_end();
744 (void)call_block_remove(pix
, param
, true);
748 * Take a spinlock around flushes to avoid bouncing the hypervisor tlbie
751 static void pSeries_lpar_flush_hash_range(unsigned long number
, int local
)
754 unsigned long i
, pix
, rc
;
755 unsigned long flags
= 0;
756 struct ppc64_tlb_batch
*batch
= this_cpu_ptr(&ppc64_tlb_batch
);
757 int lock_tlbie
= !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE
);
758 unsigned long param
[PLPAR_HCALL9_BUFSIZE
];
759 unsigned long index
, shift
, slot
;
764 spin_lock_irqsave(&pSeries_lpar_tlbie_lock
, flags
);
766 if (firmware_has_feature(FW_FEATURE_BLOCK_REMOVE
)) {
767 do_block_remove(number
, batch
, param
);
771 psize
= batch
->psize
;
772 ssize
= batch
->ssize
;
774 for (i
= 0; i
< number
; i
++) {
777 pte_iterate_hashed_subpages(pte
, psize
, vpn
, index
, shift
) {
778 slot
= compute_slot(pte
, vpn
, index
, shift
, ssize
);
779 if (!firmware_has_feature(FW_FEATURE_BULK_REMOVE
)) {
781 * lpar doesn't use the passed actual page size
783 pSeries_lpar_hpte_invalidate(slot
, vpn
, psize
,
786 param
[pix
] = HBR_REQUEST
| HBR_AVPN
| slot
;
787 param
[pix
+1] = hpte_encode_avpn(vpn
, psize
,
791 rc
= plpar_hcall9(H_BULK_REMOVE
, param
,
792 param
[0], param
[1], param
[2],
793 param
[3], param
[4], param
[5],
795 BUG_ON(rc
!= H_SUCCESS
);
799 } pte_iterate_hashed_end();
802 param
[pix
] = HBR_END
;
803 rc
= plpar_hcall9(H_BULK_REMOVE
, param
, param
[0], param
[1],
804 param
[2], param
[3], param
[4], param
[5],
806 BUG_ON(rc
!= H_SUCCESS
);
811 spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock
, flags
);
814 static int __init
disable_bulk_remove(char *str
)
816 if (strcmp(str
, "off") == 0 &&
817 firmware_has_feature(FW_FEATURE_BULK_REMOVE
)) {
818 pr_info("Disabling BULK_REMOVE firmware feature");
819 powerpc_firmware_features
&= ~FW_FEATURE_BULK_REMOVE
;
824 __setup("bulk_remove=", disable_bulk_remove
);
826 #define HPT_RESIZE_TIMEOUT 10000 /* ms */
828 struct hpt_resize_state
{
833 static int pseries_lpar_resize_hpt_commit(void *data
)
835 struct hpt_resize_state
*state
= data
;
837 state
->commit_rc
= plpar_resize_hpt_commit(0, state
->shift
);
838 if (state
->commit_rc
!= H_SUCCESS
)
841 /* Hypervisor has transitioned the HTAB, update our globals */
842 ppc64_pft_size
= state
->shift
;
843 htab_size_bytes
= 1UL << ppc64_pft_size
;
844 htab_hash_mask
= (htab_size_bytes
>> 7) - 1;
849 /* Must be called in user context */
850 static int pseries_lpar_resize_hpt(unsigned long shift
)
852 struct hpt_resize_state state
= {
854 .commit_rc
= H_FUNCTION
,
856 unsigned int delay
, total_delay
= 0;
862 if (!firmware_has_feature(FW_FEATURE_HPT_RESIZE
))
865 pr_info("Attempting to resize HPT to shift %lu\n", shift
);
869 rc
= plpar_resize_hpt_prepare(0, shift
);
870 while (H_IS_LONG_BUSY(rc
)) {
871 delay
= get_longbusy_msecs(rc
);
872 total_delay
+= delay
;
873 if (total_delay
> HPT_RESIZE_TIMEOUT
) {
874 /* prepare with shift==0 cancels an in-progress resize */
875 rc
= plpar_resize_hpt_prepare(0, 0);
877 pr_warn("Unexpected error %d cancelling timed out HPT resize\n",
882 rc
= plpar_resize_hpt_prepare(0, shift
);
891 pr_warn("Invalid argument from H_RESIZE_HPT_PREPARE\n");
894 pr_warn("Operation not permitted from H_RESIZE_HPT_PREPARE\n");
897 pr_warn("Unexpected error %d from H_RESIZE_HPT_PREPARE\n", rc
);
903 rc
= stop_machine(pseries_lpar_resize_hpt_commit
, &state
, NULL
);
908 switch (state
.commit_rc
) {
913 pr_warn("Unexpected error %d from H_RESIZE_HPT_COMMIT\n",
919 pr_info("HPT resize to shift %lu complete (%lld ms / %lld ms)\n",
920 shift
, (long long) ktime_ms_delta(t1
, t0
),
921 (long long) ktime_ms_delta(t2
, t1
));
926 static int pseries_lpar_register_process_table(unsigned long base
,
927 unsigned long page_size
, unsigned long table_size
)
930 unsigned long flags
= 0;
933 flags
|= PROC_TABLE_NEW
;
935 flags
|= PROC_TABLE_RADIX
| PROC_TABLE_GTSE
;
937 flags
|= PROC_TABLE_HPT_SLB
;
939 rc
= plpar_hcall_norets(H_REGISTER_PROC_TBL
, flags
, base
,
940 page_size
, table_size
);
941 if (!H_IS_LONG_BUSY(rc
))
943 mdelay(get_longbusy_msecs(rc
));
945 if (rc
!= H_SUCCESS
) {
946 pr_err("Failed to register process table (rc=%ld)\n", rc
);
952 void __init
hpte_init_pseries(void)
954 mmu_hash_ops
.hpte_invalidate
= pSeries_lpar_hpte_invalidate
;
955 mmu_hash_ops
.hpte_updatepp
= pSeries_lpar_hpte_updatepp
;
956 mmu_hash_ops
.hpte_updateboltedpp
= pSeries_lpar_hpte_updateboltedpp
;
957 mmu_hash_ops
.hpte_insert
= pSeries_lpar_hpte_insert
;
958 mmu_hash_ops
.hpte_remove
= pSeries_lpar_hpte_remove
;
959 mmu_hash_ops
.hpte_removebolted
= pSeries_lpar_hpte_removebolted
;
960 mmu_hash_ops
.flush_hash_range
= pSeries_lpar_flush_hash_range
;
961 mmu_hash_ops
.hpte_clear_all
= pseries_hpte_clear_all
;
962 mmu_hash_ops
.hugepage_invalidate
= pSeries_lpar_hugepage_invalidate
;
963 register_process_table
= pseries_lpar_register_process_table
;
965 if (firmware_has_feature(FW_FEATURE_HPT_RESIZE
))
966 mmu_hash_ops
.resize_hpt
= pseries_lpar_resize_hpt
;
969 void radix_init_pseries(void)
971 pr_info("Using radix MMU under hypervisor\n");
972 register_process_table
= pseries_lpar_register_process_table
;
975 #ifdef CONFIG_PPC_SMLPAR
976 #define CMO_FREE_HINT_DEFAULT 1
977 static int cmo_free_hint_flag
= CMO_FREE_HINT_DEFAULT
;
979 static int __init
cmo_free_hint(char *str
)
982 parm
= strstrip(str
);
984 if (strcasecmp(parm
, "no") == 0 || strcasecmp(parm
, "off") == 0) {
985 pr_info("%s: CMO free page hinting is not active.\n", __func__
);
986 cmo_free_hint_flag
= 0;
990 cmo_free_hint_flag
= 1;
991 pr_info("%s: CMO free page hinting is active.\n", __func__
);
993 if (strcasecmp(parm
, "yes") == 0 || strcasecmp(parm
, "on") == 0)
999 __setup("cmo_free_hint=", cmo_free_hint
);
1001 static void pSeries_set_page_state(struct page
*page
, int order
,
1002 unsigned long state
)
1005 unsigned long cmo_page_sz
, addr
;
1007 cmo_page_sz
= cmo_get_page_size();
1008 addr
= __pa((unsigned long)page_address(page
));
1010 for (i
= 0; i
< (1 << order
); i
++, addr
+= PAGE_SIZE
) {
1011 for (j
= 0; j
< PAGE_SIZE
; j
+= cmo_page_sz
)
1012 plpar_hcall_norets(H_PAGE_INIT
, state
, addr
+ j
, 0);
1016 void arch_free_page(struct page
*page
, int order
)
1018 if (radix_enabled())
1020 if (!cmo_free_hint_flag
|| !firmware_has_feature(FW_FEATURE_CMO
))
1023 pSeries_set_page_state(page
, order
, H_PAGE_SET_UNUSED
);
1025 EXPORT_SYMBOL(arch_free_page
);
1027 #endif /* CONFIG_PPC_SMLPAR */
1028 #endif /* CONFIG_PPC_BOOK3S_64 */
1030 #ifdef CONFIG_TRACEPOINTS
1031 #ifdef CONFIG_JUMP_LABEL
1032 struct static_key hcall_tracepoint_key
= STATIC_KEY_INIT
;
1034 int hcall_tracepoint_regfunc(void)
1036 static_key_slow_inc(&hcall_tracepoint_key
);
1040 void hcall_tracepoint_unregfunc(void)
1042 static_key_slow_dec(&hcall_tracepoint_key
);
1046 * We optimise our hcall path by placing hcall_tracepoint_refcount
1047 * directly in the TOC so we can check if the hcall tracepoints are
1048 * enabled via a single load.
1051 /* NB: reg/unreg are called while guarded with the tracepoints_mutex */
1052 extern long hcall_tracepoint_refcount
;
1054 int hcall_tracepoint_regfunc(void)
1056 hcall_tracepoint_refcount
++;
1060 void hcall_tracepoint_unregfunc(void)
1062 hcall_tracepoint_refcount
--;
1067 * Since the tracing code might execute hcalls we need to guard against
1068 * recursion. One example of this are spinlocks calling H_YIELD on
1069 * shared processor partitions.
1071 static DEFINE_PER_CPU(unsigned int, hcall_trace_depth
);
1074 void __trace_hcall_entry(unsigned long opcode
, unsigned long *args
)
1076 unsigned long flags
;
1077 unsigned int *depth
;
1080 * We cannot call tracepoints inside RCU idle regions which
1081 * means we must not trace H_CEDE.
1083 if (opcode
== H_CEDE
)
1086 local_irq_save(flags
);
1088 depth
= this_cpu_ptr(&hcall_trace_depth
);
1095 trace_hcall_entry(opcode
, args
);
1099 local_irq_restore(flags
);
1102 void __trace_hcall_exit(long opcode
, long retval
, unsigned long *retbuf
)
1104 unsigned long flags
;
1105 unsigned int *depth
;
1107 if (opcode
== H_CEDE
)
1110 local_irq_save(flags
);
1112 depth
= this_cpu_ptr(&hcall_trace_depth
);
1118 trace_hcall_exit(opcode
, retval
, retbuf
);
1123 local_irq_restore(flags
);
1129 * H_GET_MPP hcall returns info in 7 parms
1131 int h_get_mpp(struct hvcall_mpp_data
*mpp_data
)
1134 unsigned long retbuf
[PLPAR_HCALL9_BUFSIZE
];
1136 rc
= plpar_hcall9(H_GET_MPP
, retbuf
);
1138 mpp_data
->entitled_mem
= retbuf
[0];
1139 mpp_data
->mapped_mem
= retbuf
[1];
1141 mpp_data
->group_num
= (retbuf
[2] >> 2 * 8) & 0xffff;
1142 mpp_data
->pool_num
= retbuf
[2] & 0xffff;
1144 mpp_data
->mem_weight
= (retbuf
[3] >> 7 * 8) & 0xff;
1145 mpp_data
->unallocated_mem_weight
= (retbuf
[3] >> 6 * 8) & 0xff;
1146 mpp_data
->unallocated_entitlement
= retbuf
[3] & 0xffffffffffffUL
;
1148 mpp_data
->pool_size
= retbuf
[4];
1149 mpp_data
->loan_request
= retbuf
[5];
1150 mpp_data
->backing_mem
= retbuf
[6];
1154 EXPORT_SYMBOL(h_get_mpp
);
1156 int h_get_mpp_x(struct hvcall_mpp_x_data
*mpp_x_data
)
1159 unsigned long retbuf
[PLPAR_HCALL9_BUFSIZE
] = { 0 };
1161 rc
= plpar_hcall9(H_GET_MPP_X
, retbuf
);
1163 mpp_x_data
->coalesced_bytes
= retbuf
[0];
1164 mpp_x_data
->pool_coalesced_bytes
= retbuf
[1];
1165 mpp_x_data
->pool_purr_cycles
= retbuf
[2];
1166 mpp_x_data
->pool_spurr_cycles
= retbuf
[3];
1171 static unsigned long vsid_unscramble(unsigned long vsid
, int ssize
)
1173 unsigned long protovsid
;
1174 unsigned long va_bits
= VA_BITS
;
1175 unsigned long modinv
, vsid_modulus
;
1176 unsigned long max_mod_inv
, tmp_modinv
;
1178 if (!mmu_has_feature(MMU_FTR_68_BIT_VA
))
1181 if (ssize
== MMU_SEGSIZE_256M
) {
1182 modinv
= VSID_MULINV_256M
;
1183 vsid_modulus
= ((1UL << (va_bits
- SID_SHIFT
)) - 1);
1185 modinv
= VSID_MULINV_1T
;
1186 vsid_modulus
= ((1UL << (va_bits
- SID_SHIFT_1T
)) - 1);
1190 * vsid outside our range.
1192 if (vsid
>= vsid_modulus
)
1196 * If modinv is the modular multiplicate inverse of (x % vsid_modulus)
1197 * and vsid = (protovsid * x) % vsid_modulus, then we say:
1198 * protovsid = (vsid * modinv) % vsid_modulus
1201 /* Check if (vsid * modinv) overflow (63 bits) */
1202 max_mod_inv
= 0x7fffffffffffffffull
/ vsid
;
1203 if (modinv
< max_mod_inv
)
1204 return (vsid
* modinv
) % vsid_modulus
;
1206 tmp_modinv
= modinv
/max_mod_inv
;
1207 modinv
%= max_mod_inv
;
1209 protovsid
= (((vsid
* max_mod_inv
) % vsid_modulus
) * tmp_modinv
) % vsid_modulus
;
1210 protovsid
= (protovsid
+ vsid
* modinv
) % vsid_modulus
;
1215 static int __init
reserve_vrma_context_id(void)
1217 unsigned long protovsid
;
1220 * Reserve context ids which map to reserved virtual addresses. For now
1221 * we only reserve the context id which maps to the VRMA VSID. We ignore
1222 * the addresses in "ibm,adjunct-virtual-addresses" because we don't
1223 * enable adjunct support via the "ibm,client-architecture-support"
1226 protovsid
= vsid_unscramble(VRMA_VSID
, MMU_SEGSIZE_1T
);
1227 hash__reserve_context_id(protovsid
>> ESID_BITS_1T
);
1230 machine_device_initcall(pseries
, reserve_vrma_context_id
);
1232 #ifdef CONFIG_DEBUG_FS
1233 /* debugfs file interface for vpa data */
1234 static ssize_t
vpa_file_read(struct file
*filp
, char __user
*buf
, size_t len
,
1237 int cpu
= (long)filp
->private_data
;
1238 struct lppaca
*lppaca
= &lppaca_of(cpu
);
1240 return simple_read_from_buffer(buf
, len
, pos
, lppaca
,
1241 sizeof(struct lppaca
));
1244 static const struct file_operations vpa_fops
= {
1245 .open
= simple_open
,
1246 .read
= vpa_file_read
,
1247 .llseek
= default_llseek
,
1250 static int __init
vpa_debugfs_init(void)
1254 static struct dentry
*vpa_dir
;
1256 if (!firmware_has_feature(FW_FEATURE_SPLPAR
))
1259 vpa_dir
= debugfs_create_dir("vpa", powerpc_debugfs_root
);
1261 pr_warn("%s: can't create vpa root dir\n", __func__
);
1265 /* set up the per-cpu vpa file*/
1266 for_each_possible_cpu(i
) {
1269 sprintf(name
, "cpu-%ld", i
);
1271 d
= debugfs_create_file(name
, 0400, vpa_dir
, (void *)i
,
1274 pr_warn("%s: can't create per-cpu vpa file\n",
1282 machine_arch_initcall(pseries
, vpa_debugfs_init
);
1283 #endif /* CONFIG_DEBUG_FS */