1 // SPDX-License-Identifier: GPL-2.0-or-later
4 * Copyright (C) 2001 Todd Inglett, IBM Corporation
6 * pSeries LPAR support.
9 /* Enables debugging of low-level hash table routines - careful! */
11 #define pr_fmt(fmt) "lpar: " fmt
13 #include <linux/kernel.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/console.h>
16 #include <linux/export.h>
17 #include <linux/jump_label.h>
18 #include <linux/delay.h>
19 #include <linux/stop_machine.h>
20 #include <linux/spinlock.h>
21 #include <linux/cpuhotplug.h>
22 #include <linux/workqueue.h>
23 #include <linux/proc_fs.h>
24 #include <asm/processor.h>
27 #include <asm/pgtable.h>
28 #include <asm/machdep.h>
29 #include <asm/mmu_context.h>
30 #include <asm/iommu.h>
33 #include <asm/cputable.h>
36 #include <asm/trace.h>
37 #include <asm/firmware.h>
38 #include <asm/plpar_wrappers.h>
39 #include <asm/kexec.h>
40 #include <asm/fadump.h>
41 #include <asm/asm-prototypes.h>
42 #include <asm/debugfs.h>
46 /* Flag bits for H_BULK_REMOVE */
47 #define HBR_REQUEST 0x4000000000000000UL
48 #define HBR_RESPONSE 0x8000000000000000UL
49 #define HBR_END 0xc000000000000000UL
50 #define HBR_AVPN 0x0200000000000000UL
51 #define HBR_ANDCOND 0x0100000000000000UL
55 EXPORT_SYMBOL(plpar_hcall
);
56 EXPORT_SYMBOL(plpar_hcall9
);
57 EXPORT_SYMBOL(plpar_hcall_norets
);
59 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
60 static u8 dtl_mask
= DTL_LOG_PREEMPT
;
65 void alloc_dtl_buffers(unsigned long *time_limit
)
68 struct paca_struct
*pp
;
69 struct dtl_entry
*dtl
;
71 for_each_possible_cpu(cpu
) {
75 dtl
= kmem_cache_alloc(dtl_cache
, GFP_KERNEL
);
77 pr_warn("Failed to allocate dispatch trace log for cpu %d\n",
79 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
80 pr_warn("Stolen time statistics will be unreliable\n");
86 pp
->dispatch_log
= dtl
;
87 pp
->dispatch_log_end
= dtl
+ N_DISPATCH_LOG
;
90 if (time_limit
&& time_after(jiffies
, *time_limit
)) {
92 *time_limit
= jiffies
+ HZ
;
97 void register_dtl_buffer(int cpu
)
100 struct paca_struct
*pp
;
101 struct dtl_entry
*dtl
;
102 int hwcpu
= get_hard_smp_processor_id(cpu
);
105 dtl
= pp
->dispatch_log
;
106 if (dtl
&& dtl_mask
) {
109 lppaca_of(cpu
).dtl_idx
= 0;
111 /* hypervisor reads buffer length from this field */
112 dtl
->enqueue_to_dispatch_time
= cpu_to_be32(DISPATCH_LOG_BYTES
);
113 ret
= register_dtl(hwcpu
, __pa(dtl
));
115 pr_err("WARNING: DTL registration of cpu %d (hw %d) failed with %ld\n",
118 lppaca_of(cpu
).dtl_enable_mask
= dtl_mask
;
122 #ifdef CONFIG_PPC_SPLPAR
124 struct delayed_work work
;
128 struct vcpu_dispatch_data
{
139 int numa_remote_disp
;
144 * This represents the number of cpus in the hypervisor. Since there is no
145 * architected way to discover the number of processors in the host, we
146 * provision for dealing with NR_CPUS. This is currently 2048 by default, and
147 * is sufficient for our purposes. This will need to be tweaked if
148 * CONFIG_NR_CPUS is changed.
150 #define NR_CPUS_H NR_CPUS
152 DEFINE_RWLOCK(dtl_access_lock
);
153 static DEFINE_PER_CPU(struct vcpu_dispatch_data
, vcpu_disp_data
);
154 static DEFINE_PER_CPU(u64
, dtl_entry_ridx
);
155 static DEFINE_PER_CPU(struct dtl_worker
, dtl_workers
);
156 static enum cpuhp_state dtl_worker_state
;
157 static DEFINE_MUTEX(dtl_enable_mutex
);
158 static int vcpudispatch_stats_on __read_mostly
;
159 static int vcpudispatch_stats_freq
= 50;
160 static __be32
*vcpu_associativity
, *pcpu_associativity
;
163 static void free_dtl_buffers(unsigned long *time_limit
)
165 #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
167 struct paca_struct
*pp
;
169 for_each_possible_cpu(cpu
) {
171 if (!pp
->dispatch_log
)
173 kmem_cache_free(dtl_cache
, pp
->dispatch_log
);
175 pp
->dispatch_log
= 0;
176 pp
->dispatch_log_end
= 0;
179 if (time_limit
&& time_after(jiffies
, *time_limit
)) {
181 *time_limit
= jiffies
+ HZ
;
187 static int init_cpu_associativity(void)
189 vcpu_associativity
= kcalloc(num_possible_cpus() / threads_per_core
,
190 VPHN_ASSOC_BUFSIZE
* sizeof(__be32
), GFP_KERNEL
);
191 pcpu_associativity
= kcalloc(NR_CPUS_H
/ threads_per_core
,
192 VPHN_ASSOC_BUFSIZE
* sizeof(__be32
), GFP_KERNEL
);
194 if (!vcpu_associativity
|| !pcpu_associativity
) {
195 pr_err("error allocating memory for associativity information\n");
202 static void destroy_cpu_associativity(void)
204 kfree(vcpu_associativity
);
205 kfree(pcpu_associativity
);
206 vcpu_associativity
= pcpu_associativity
= 0;
209 static __be32
*__get_cpu_associativity(int cpu
, __be32
*cpu_assoc
, int flag
)
214 assoc
= &cpu_assoc
[(int)(cpu
/ threads_per_core
) * VPHN_ASSOC_BUFSIZE
];
216 rc
= hcall_vphn(cpu
, flag
, &assoc
[0]);
224 static __be32
*get_pcpu_associativity(int cpu
)
226 return __get_cpu_associativity(cpu
, pcpu_associativity
, VPHN_FLAG_PCPU
);
229 static __be32
*get_vcpu_associativity(int cpu
)
231 return __get_cpu_associativity(cpu
, vcpu_associativity
, VPHN_FLAG_VCPU
);
234 static int cpu_relative_dispatch_distance(int last_disp_cpu
, int cur_disp_cpu
)
236 __be32
*last_disp_cpu_assoc
, *cur_disp_cpu_assoc
;
238 if (last_disp_cpu
>= NR_CPUS_H
|| cur_disp_cpu
>= NR_CPUS_H
)
241 last_disp_cpu_assoc
= get_pcpu_associativity(last_disp_cpu
);
242 cur_disp_cpu_assoc
= get_pcpu_associativity(cur_disp_cpu
);
244 if (!last_disp_cpu_assoc
|| !cur_disp_cpu_assoc
)
247 return cpu_distance(last_disp_cpu_assoc
, cur_disp_cpu_assoc
);
250 static int cpu_home_node_dispatch_distance(int disp_cpu
)
252 __be32
*disp_cpu_assoc
, *vcpu_assoc
;
253 int vcpu_id
= smp_processor_id();
255 if (disp_cpu
>= NR_CPUS_H
) {
256 pr_debug_ratelimited("vcpu dispatch cpu %d > %d\n",
257 disp_cpu
, NR_CPUS_H
);
261 disp_cpu_assoc
= get_pcpu_associativity(disp_cpu
);
262 vcpu_assoc
= get_vcpu_associativity(vcpu_id
);
264 if (!disp_cpu_assoc
|| !vcpu_assoc
)
267 return cpu_distance(disp_cpu_assoc
, vcpu_assoc
);
270 static void update_vcpu_disp_stat(int disp_cpu
)
272 struct vcpu_dispatch_data
*disp
;
275 disp
= this_cpu_ptr(&vcpu_disp_data
);
276 if (disp
->last_disp_cpu
== -1) {
277 disp
->last_disp_cpu
= disp_cpu
;
283 if (disp
->last_disp_cpu
== disp_cpu
||
284 (cpu_first_thread_sibling(disp
->last_disp_cpu
) ==
285 cpu_first_thread_sibling(disp_cpu
)))
286 disp
->same_cpu_disp
++;
288 distance
= cpu_relative_dispatch_distance(disp
->last_disp_cpu
,
291 pr_debug_ratelimited("vcpudispatch_stats: cpu %d: error determining associativity\n",
296 disp
->same_chip_disp
++;
299 disp
->diff_chip_disp
++;
302 disp
->far_chip_disp
++;
305 pr_debug_ratelimited("vcpudispatch_stats: cpu %d (%d -> %d): unexpected relative dispatch distance %d\n",
314 distance
= cpu_home_node_dispatch_distance(disp_cpu
);
316 pr_debug_ratelimited("vcpudispatch_stats: cpu %d: error determining associativity\n",
321 disp
->numa_home_disp
++;
324 disp
->numa_remote_disp
++;
327 disp
->numa_far_disp
++;
330 pr_debug_ratelimited("vcpudispatch_stats: cpu %d on %d: unexpected numa dispatch distance %d\n",
337 disp
->last_disp_cpu
= disp_cpu
;
340 static void process_dtl_buffer(struct work_struct
*work
)
342 struct dtl_entry dtle
;
343 u64 i
= __this_cpu_read(dtl_entry_ridx
);
344 struct dtl_entry
*dtl
= local_paca
->dispatch_log
+ (i
% N_DISPATCH_LOG
);
345 struct dtl_entry
*dtl_end
= local_paca
->dispatch_log_end
;
346 struct lppaca
*vpa
= local_paca
->lppaca_ptr
;
347 struct dtl_worker
*d
= container_of(work
, struct dtl_worker
, work
.work
);
349 if (!local_paca
->dispatch_log
)
352 /* if we have been migrated away, we cancel ourself */
353 if (d
->cpu
!= smp_processor_id()) {
354 pr_debug("vcpudispatch_stats: cpu %d worker migrated -- canceling worker\n",
359 if (i
== be64_to_cpu(vpa
->dtl_idx
))
362 while (i
< be64_to_cpu(vpa
->dtl_idx
)) {
365 if (i
+ N_DISPATCH_LOG
< be64_to_cpu(vpa
->dtl_idx
)) {
366 /* buffer has overflowed */
367 pr_debug_ratelimited("vcpudispatch_stats: cpu %d lost %lld DTL samples\n",
369 be64_to_cpu(vpa
->dtl_idx
) - N_DISPATCH_LOG
- i
);
370 i
= be64_to_cpu(vpa
->dtl_idx
) - N_DISPATCH_LOG
;
371 dtl
= local_paca
->dispatch_log
+ (i
% N_DISPATCH_LOG
);
374 update_vcpu_disp_stat(be16_to_cpu(dtle
.processor_id
));
378 dtl
= local_paca
->dispatch_log
;
381 __this_cpu_write(dtl_entry_ridx
, i
);
384 schedule_delayed_work_on(d
->cpu
, to_delayed_work(work
),
385 HZ
/ vcpudispatch_stats_freq
);
388 static int dtl_worker_online(unsigned int cpu
)
390 struct dtl_worker
*d
= &per_cpu(dtl_workers
, cpu
);
392 memset(d
, 0, sizeof(*d
));
393 INIT_DELAYED_WORK(&d
->work
, process_dtl_buffer
);
396 #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
397 per_cpu(dtl_entry_ridx
, cpu
) = 0;
398 register_dtl_buffer(cpu
);
400 per_cpu(dtl_entry_ridx
, cpu
) = be64_to_cpu(lppaca_of(cpu
).dtl_idx
);
403 schedule_delayed_work_on(cpu
, &d
->work
, HZ
/ vcpudispatch_stats_freq
);
407 static int dtl_worker_offline(unsigned int cpu
)
409 struct dtl_worker
*d
= &per_cpu(dtl_workers
, cpu
);
411 cancel_delayed_work_sync(&d
->work
);
413 #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
414 unregister_dtl(get_hard_smp_processor_id(cpu
));
420 static void set_global_dtl_mask(u8 mask
)
425 for_each_present_cpu(cpu
)
426 lppaca_of(cpu
).dtl_enable_mask
= dtl_mask
;
429 static void reset_global_dtl_mask(void)
433 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
434 dtl_mask
= DTL_LOG_PREEMPT
;
438 for_each_present_cpu(cpu
)
439 lppaca_of(cpu
).dtl_enable_mask
= dtl_mask
;
442 static int dtl_worker_enable(unsigned long *time_limit
)
446 if (!write_trylock(&dtl_access_lock
)) {
451 set_global_dtl_mask(DTL_LOG_ALL
);
453 /* Setup dtl buffers and register those */
454 alloc_dtl_buffers(time_limit
);
456 state
= cpuhp_setup_state(CPUHP_AP_ONLINE_DYN
, "powerpc/dtl:online",
457 dtl_worker_online
, dtl_worker_offline
);
459 pr_err("vcpudispatch_stats: unable to setup workqueue for DTL processing\n");
460 free_dtl_buffers(time_limit
);
461 reset_global_dtl_mask();
462 write_unlock(&dtl_access_lock
);
466 dtl_worker_state
= state
;
472 static void dtl_worker_disable(unsigned long *time_limit
)
474 cpuhp_remove_state(dtl_worker_state
);
475 free_dtl_buffers(time_limit
);
476 reset_global_dtl_mask();
477 write_unlock(&dtl_access_lock
);
480 static ssize_t
vcpudispatch_stats_write(struct file
*file
, const char __user
*p
,
481 size_t count
, loff_t
*ppos
)
483 unsigned long time_limit
= jiffies
+ HZ
;
484 struct vcpu_dispatch_data
*disp
;
491 if (copy_from_user(buf
, p
, count
))
495 rc
= kstrtoint(buf
, 0, &cmd
);
496 if (rc
|| cmd
< 0 || cmd
> 1) {
497 pr_err("vcpudispatch_stats: please use 0 to disable or 1 to enable dispatch statistics\n");
498 return rc
? rc
: -EINVAL
;
501 mutex_lock(&dtl_enable_mutex
);
503 if ((cmd
== 0 && !vcpudispatch_stats_on
) ||
504 (cmd
== 1 && vcpudispatch_stats_on
))
508 rc
= init_cpu_associativity();
512 for_each_possible_cpu(cpu
) {
513 disp
= per_cpu_ptr(&vcpu_disp_data
, cpu
);
514 memset(disp
, 0, sizeof(*disp
));
515 disp
->last_disp_cpu
= -1;
518 rc
= dtl_worker_enable(&time_limit
);
520 destroy_cpu_associativity();
524 dtl_worker_disable(&time_limit
);
525 destroy_cpu_associativity();
528 vcpudispatch_stats_on
= cmd
;
531 mutex_unlock(&dtl_enable_mutex
);
537 static int vcpudispatch_stats_display(struct seq_file
*p
, void *v
)
540 struct vcpu_dispatch_data
*disp
;
542 if (!vcpudispatch_stats_on
) {
543 seq_puts(p
, "off\n");
547 for_each_online_cpu(cpu
) {
548 disp
= per_cpu_ptr(&vcpu_disp_data
, cpu
);
549 seq_printf(p
, "cpu%d", cpu
);
550 seq_put_decimal_ull(p
, " ", disp
->total_disp
);
551 seq_put_decimal_ull(p
, " ", disp
->same_cpu_disp
);
552 seq_put_decimal_ull(p
, " ", disp
->same_chip_disp
);
553 seq_put_decimal_ull(p
, " ", disp
->diff_chip_disp
);
554 seq_put_decimal_ull(p
, " ", disp
->far_chip_disp
);
555 seq_put_decimal_ull(p
, " ", disp
->numa_home_disp
);
556 seq_put_decimal_ull(p
, " ", disp
->numa_remote_disp
);
557 seq_put_decimal_ull(p
, " ", disp
->numa_far_disp
);
564 static int vcpudispatch_stats_open(struct inode
*inode
, struct file
*file
)
566 return single_open(file
, vcpudispatch_stats_display
, NULL
);
569 static const struct file_operations vcpudispatch_stats_proc_ops
= {
570 .open
= vcpudispatch_stats_open
,
572 .write
= vcpudispatch_stats_write
,
574 .release
= single_release
,
577 static ssize_t
vcpudispatch_stats_freq_write(struct file
*file
,
578 const char __user
*p
, size_t count
, loff_t
*ppos
)
586 if (copy_from_user(buf
, p
, count
))
590 rc
= kstrtoint(buf
, 0, &freq
);
591 if (rc
|| freq
< 1 || freq
> HZ
) {
592 pr_err("vcpudispatch_stats_freq: please specify a frequency between 1 and %d\n",
594 return rc
? rc
: -EINVAL
;
597 vcpudispatch_stats_freq
= freq
;
602 static int vcpudispatch_stats_freq_display(struct seq_file
*p
, void *v
)
604 seq_printf(p
, "%d\n", vcpudispatch_stats_freq
);
608 static int vcpudispatch_stats_freq_open(struct inode
*inode
, struct file
*file
)
610 return single_open(file
, vcpudispatch_stats_freq_display
, NULL
);
613 static const struct file_operations vcpudispatch_stats_freq_proc_ops
= {
614 .open
= vcpudispatch_stats_freq_open
,
616 .write
= vcpudispatch_stats_freq_write
,
618 .release
= single_release
,
621 static int __init
vcpudispatch_stats_procfs_init(void)
623 if (!lppaca_shared_proc(get_lppaca()))
626 if (!proc_create("powerpc/vcpudispatch_stats", 0600, NULL
,
627 &vcpudispatch_stats_proc_ops
))
628 pr_err("vcpudispatch_stats: error creating procfs file\n");
629 else if (!proc_create("powerpc/vcpudispatch_stats_freq", 0600, NULL
,
630 &vcpudispatch_stats_freq_proc_ops
))
631 pr_err("vcpudispatch_stats_freq: error creating procfs file\n");
636 machine_device_initcall(pseries
, vcpudispatch_stats_procfs_init
);
637 #endif /* CONFIG_PPC_SPLPAR */
639 void vpa_init(int cpu
)
641 int hwcpu
= get_hard_smp_processor_id(cpu
);
646 * The spec says it "may be problematic" if CPU x registers the VPA of
647 * CPU y. We should never do that, but wail if we ever do.
649 WARN_ON(cpu
!= smp_processor_id());
651 if (cpu_has_feature(CPU_FTR_ALTIVEC
))
652 lppaca_of(cpu
).vmxregs_in_use
= 1;
654 if (cpu_has_feature(CPU_FTR_ARCH_207S
))
655 lppaca_of(cpu
).ebb_regs_in_use
= 1;
657 addr
= __pa(&lppaca_of(cpu
));
658 ret
= register_vpa(hwcpu
, addr
);
661 pr_err("WARNING: VPA registration for cpu %d (hw %d) of area "
662 "%lx failed with %ld\n", cpu
, hwcpu
, addr
, ret
);
666 #ifdef CONFIG_PPC_BOOK3S_64
668 * PAPR says this feature is SLB-Buffer but firmware never
669 * reports that. All SPLPAR support SLB shadow buffer.
671 if (!radix_enabled() && firmware_has_feature(FW_FEATURE_SPLPAR
)) {
672 addr
= __pa(paca_ptrs
[cpu
]->slb_shadow_ptr
);
673 ret
= register_slb_shadow(hwcpu
, addr
);
675 pr_err("WARNING: SLB shadow buffer registration for "
676 "cpu %d (hw %d) of area %lx failed with %ld\n",
677 cpu
, hwcpu
, addr
, ret
);
679 #endif /* CONFIG_PPC_BOOK3S_64 */
682 * Register dispatch trace log, if one has been allocated.
684 register_dtl_buffer(cpu
);
687 #ifdef CONFIG_PPC_BOOK3S_64
689 static long pSeries_lpar_hpte_insert(unsigned long hpte_group
,
690 unsigned long vpn
, unsigned long pa
,
691 unsigned long rflags
, unsigned long vflags
,
692 int psize
, int apsize
, int ssize
)
694 unsigned long lpar_rc
;
697 unsigned long hpte_v
, hpte_r
;
699 if (!(vflags
& HPTE_V_BOLTED
))
700 pr_devel("hpte_insert(group=%lx, vpn=%016lx, "
701 "pa=%016lx, rflags=%lx, vflags=%lx, psize=%d)\n",
702 hpte_group
, vpn
, pa
, rflags
, vflags
, psize
);
704 hpte_v
= hpte_encode_v(vpn
, psize
, apsize
, ssize
) | vflags
| HPTE_V_VALID
;
705 hpte_r
= hpte_encode_r(pa
, psize
, apsize
) | rflags
;
707 if (!(vflags
& HPTE_V_BOLTED
))
708 pr_devel(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v
, hpte_r
);
710 /* Now fill in the actual HPTE */
711 /* Set CEC cookie to 0 */
713 /* I-cache Invalidate = 0 */
714 /* I-cache synchronize = 0 */
718 if (firmware_has_feature(FW_FEATURE_XCMO
) && !(hpte_r
& HPTE_R_N
))
719 flags
|= H_COALESCE_CAND
;
721 lpar_rc
= plpar_pte_enter(flags
, hpte_group
, hpte_v
, hpte_r
, &slot
);
722 if (unlikely(lpar_rc
== H_PTEG_FULL
)) {
723 pr_devel("Hash table group is full\n");
728 * Since we try and ioremap PHBs we don't own, the pte insert
729 * will fail. However we must catch the failure in hash_page
730 * or we will loop forever, so return -2 in this case.
732 if (unlikely(lpar_rc
!= H_SUCCESS
)) {
733 pr_err("Failed hash pte insert with error %ld\n", lpar_rc
);
736 if (!(vflags
& HPTE_V_BOLTED
))
737 pr_devel(" -> slot: %lu\n", slot
& 7);
739 /* Because of iSeries, we have to pass down the secondary
740 * bucket bit here as well
742 return (slot
& 7) | (!!(vflags
& HPTE_V_SECONDARY
) << 3);
745 static DEFINE_SPINLOCK(pSeries_lpar_tlbie_lock
);
747 static long pSeries_lpar_hpte_remove(unsigned long hpte_group
)
749 unsigned long slot_offset
;
750 unsigned long lpar_rc
;
752 unsigned long dummy1
, dummy2
;
754 /* pick a random slot to start at */
755 slot_offset
= mftb() & 0x7;
757 for (i
= 0; i
< HPTES_PER_GROUP
; i
++) {
759 /* don't remove a bolted entry */
760 lpar_rc
= plpar_pte_remove(H_ANDCOND
, hpte_group
+ slot_offset
,
761 (0x1UL
<< 4), &dummy1
, &dummy2
);
762 if (lpar_rc
== H_SUCCESS
)
766 * The test for adjunct partition is performed before the
767 * ANDCOND test. H_RESOURCE may be returned, so we need to
768 * check for that as well.
770 BUG_ON(lpar_rc
!= H_NOT_FOUND
&& lpar_rc
!= H_RESOURCE
);
779 static void manual_hpte_clear_all(void)
781 unsigned long size_bytes
= 1UL << ppc64_pft_size
;
782 unsigned long hpte_count
= size_bytes
>> 4;
790 /* Read in batches of 4,
791 * invalidate only valid entries not in the VRMA
792 * hpte_count will be a multiple of 4
794 for (i
= 0; i
< hpte_count
; i
+= 4) {
795 lpar_rc
= plpar_pte_read_4_raw(0, i
, (void *)ptes
);
796 if (lpar_rc
!= H_SUCCESS
) {
797 pr_info("Failed to read hash page table at %ld err %ld\n",
801 for (j
= 0; j
< 4; j
++){
802 if ((ptes
[j
].pteh
& HPTE_V_VRMA_MASK
) ==
805 if (ptes
[j
].pteh
& HPTE_V_VALID
)
806 plpar_pte_remove_raw(0, i
+ j
, 0,
807 &(ptes
[j
].pteh
), &(ptes
[j
].ptel
));
812 static int hcall_hpte_clear_all(void)
817 rc
= plpar_hcall_norets(H_CLEAR_HPT
);
818 } while (rc
== H_CONTINUE
);
823 static void pseries_hpte_clear_all(void)
827 rc
= hcall_hpte_clear_all();
829 manual_hpte_clear_all();
831 #ifdef __LITTLE_ENDIAN__
833 * Reset exceptions to big endian.
835 * FIXME this is a hack for kexec, we need to reset the exception
836 * endian before starting the new kernel and this is a convenient place
839 * This is also called on boot when a fadump happens. In that case we
840 * must not change the exception endian mode.
842 if (firmware_has_feature(FW_FEATURE_SET_MODE
) && !is_fadump_active())
843 pseries_big_endian_exceptions();
848 * NOTE: for updatepp ops we are fortunate that the linux "newpp" bits and
849 * the low 3 bits of flags happen to line up. So no transform is needed.
850 * We can probably optimize here and assume the high bits of newpp are
851 * already zero. For now I am paranoid.
853 static long pSeries_lpar_hpte_updatepp(unsigned long slot
,
856 int psize
, int apsize
,
857 int ssize
, unsigned long inv_flags
)
859 unsigned long lpar_rc
;
861 unsigned long want_v
;
863 want_v
= hpte_encode_avpn(vpn
, psize
, ssize
);
865 flags
= (newpp
& 7) | H_AVPN
;
866 if (mmu_has_feature(MMU_FTR_KERNEL_RO
))
867 /* Move pp0 into bit 8 (IBM 55) */
868 flags
|= (newpp
& HPTE_R_PP0
) >> 55;
870 pr_devel(" update: avpnv=%016lx, hash=%016lx, f=%lx, psize: %d ...",
871 want_v
, slot
, flags
, psize
);
873 lpar_rc
= plpar_pte_protect(flags
, slot
, want_v
);
875 if (lpar_rc
== H_NOT_FOUND
) {
876 pr_devel("not found !\n");
882 BUG_ON(lpar_rc
!= H_SUCCESS
);
887 static long __pSeries_lpar_hpte_find(unsigned long want_v
, unsigned long hpte_group
)
896 for (i
= 0; i
< HPTES_PER_GROUP
; i
+= 4, hpte_group
+= 4) {
898 lpar_rc
= plpar_pte_read_4(0, hpte_group
, (void *)ptes
);
899 if (lpar_rc
!= H_SUCCESS
) {
900 pr_info("Failed to read hash page table at %ld err %ld\n",
901 hpte_group
, lpar_rc
);
905 for (j
= 0; j
< 4; j
++) {
906 if (HPTE_V_COMPARE(ptes
[j
].pteh
, want_v
) &&
907 (ptes
[j
].pteh
& HPTE_V_VALID
))
915 static long pSeries_lpar_hpte_find(unsigned long vpn
, int psize
, int ssize
)
919 unsigned long want_v
;
920 unsigned long hpte_group
;
922 hash
= hpt_hash(vpn
, mmu_psize_defs
[psize
].shift
, ssize
);
923 want_v
= hpte_encode_avpn(vpn
, psize
, ssize
);
925 /* Bolted entries are always in the primary group */
926 hpte_group
= (hash
& htab_hash_mask
) * HPTES_PER_GROUP
;
927 slot
= __pSeries_lpar_hpte_find(want_v
, hpte_group
);
930 return hpte_group
+ slot
;
933 static void pSeries_lpar_hpte_updateboltedpp(unsigned long newpp
,
935 int psize
, int ssize
)
938 unsigned long lpar_rc
, slot
, vsid
, flags
;
940 vsid
= get_kernel_vsid(ea
, ssize
);
941 vpn
= hpt_vpn(ea
, vsid
, ssize
);
943 slot
= pSeries_lpar_hpte_find(vpn
, psize
, ssize
);
947 if (mmu_has_feature(MMU_FTR_KERNEL_RO
))
948 /* Move pp0 into bit 8 (IBM 55) */
949 flags
|= (newpp
& HPTE_R_PP0
) >> 55;
951 lpar_rc
= plpar_pte_protect(flags
, slot
, 0);
953 BUG_ON(lpar_rc
!= H_SUCCESS
);
956 static void pSeries_lpar_hpte_invalidate(unsigned long slot
, unsigned long vpn
,
957 int psize
, int apsize
,
958 int ssize
, int local
)
960 unsigned long want_v
;
961 unsigned long lpar_rc
;
962 unsigned long dummy1
, dummy2
;
964 pr_devel(" inval : slot=%lx, vpn=%016lx, psize: %d, local: %d\n",
965 slot
, vpn
, psize
, local
);
967 want_v
= hpte_encode_avpn(vpn
, psize
, ssize
);
968 lpar_rc
= plpar_pte_remove(H_AVPN
, slot
, want_v
, &dummy1
, &dummy2
);
969 if (lpar_rc
== H_NOT_FOUND
)
972 BUG_ON(lpar_rc
!= H_SUCCESS
);
977 * As defined in the PAPR's section 14.5.4.1.8
978 * The control mask doesn't include the returned reference and change bit from
981 #define HBLKR_AVPN 0x0100000000000000UL
982 #define HBLKR_CTRL_MASK 0xf800000000000000UL
983 #define HBLKR_CTRL_SUCCESS 0x8000000000000000UL
984 #define HBLKR_CTRL_ERRNOTFOUND 0x8800000000000000UL
985 #define HBLKR_CTRL_ERRBUSY 0xa000000000000000UL
988 * H_BLOCK_REMOVE caller.
989 * @idx should point to the latest @param entry set with a PTEX.
990 * If PTE cannot be processed because another CPUs has already locked that
991 * group, those entries are put back in @param starting at index 1.
992 * If entries has to be retried and @retry_busy is set to true, these entries
993 * are retried until success. If @retry_busy is set to false, the returned
994 * is the number of entries yet to process.
996 static unsigned long call_block_remove(unsigned long idx
, unsigned long *param
,
999 unsigned long i
, rc
, new_idx
;
1000 unsigned long retbuf
[PLPAR_HCALL9_BUFSIZE
];
1003 pr_warn("Unexpected empty call to H_BLOCK_REMOVE");
1008 if (idx
> PLPAR_HCALL9_BUFSIZE
) {
1009 pr_err("Too many PTEs (%lu) for H_BLOCK_REMOVE", idx
);
1010 idx
= PLPAR_HCALL9_BUFSIZE
;
1011 } else if (idx
< PLPAR_HCALL9_BUFSIZE
)
1012 param
[idx
] = HBR_END
;
1014 rc
= plpar_hcall9(H_BLOCK_REMOVE
, retbuf
,
1016 param
[1], param
[2], param
[3], param
[4], /* TS0-7 */
1017 param
[5], param
[6], param
[7], param
[8]);
1018 if (rc
== H_SUCCESS
)
1021 BUG_ON(rc
!= H_PARTIAL
);
1023 /* Check that the unprocessed entries were 'not found' or 'busy' */
1024 for (i
= 0; i
< idx
-1; i
++) {
1025 unsigned long ctrl
= retbuf
[i
] & HBLKR_CTRL_MASK
;
1027 if (ctrl
== HBLKR_CTRL_ERRBUSY
) {
1028 param
[++new_idx
] = param
[i
+1];
1032 BUG_ON(ctrl
!= HBLKR_CTRL_SUCCESS
1033 && ctrl
!= HBLKR_CTRL_ERRNOTFOUND
);
1037 * If there were entries found busy, retry these entries if requested,
1038 * of if all the entries have to be retried.
1040 if (new_idx
&& (retry_busy
|| new_idx
== (PLPAR_HCALL9_BUFSIZE
-1))) {
1048 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1050 * Limit iterations holding pSeries_lpar_tlbie_lock to 3. We also need
1051 * to make sure that we avoid bouncing the hypervisor tlbie lock.
1053 #define PPC64_HUGE_HPTE_BATCH 12
1055 static void hugepage_block_invalidate(unsigned long *slot
, unsigned long *vpn
,
1056 int count
, int psize
, int ssize
)
1058 unsigned long param
[PLPAR_HCALL9_BUFSIZE
];
1059 unsigned long shift
, current_vpgb
, vpgb
;
1062 shift
= mmu_psize_defs
[psize
].shift
;
1064 for (i
= 0; i
< count
; i
++) {
1066 * Shifting 3 bits more on the right to get a
1067 * 8 pages aligned virtual addresse.
1069 vpgb
= (vpn
[i
] >> (shift
- VPN_SHIFT
+ 3));
1070 if (!pix
|| vpgb
!= current_vpgb
) {
1072 * Need to start a new 8 pages block, flush
1073 * the current one if needed.
1076 (void)call_block_remove(pix
, param
, true);
1077 current_vpgb
= vpgb
;
1078 param
[0] = hpte_encode_avpn(vpn
[i
], psize
, ssize
);
1082 param
[pix
++] = HBR_REQUEST
| HBLKR_AVPN
| slot
[i
];
1083 if (pix
== PLPAR_HCALL9_BUFSIZE
) {
1084 pix
= call_block_remove(pix
, param
, false);
1086 * pix = 0 means that all the entries were
1087 * removed, we can start a new block.
1088 * Otherwise, this means that there are entries
1089 * to retry, and pix points to latest one, so
1090 * we should increment it and try to continue
1098 (void)call_block_remove(pix
, param
, true);
1101 static void hugepage_bulk_invalidate(unsigned long *slot
, unsigned long *vpn
,
1102 int count
, int psize
, int ssize
)
1104 unsigned long param
[PLPAR_HCALL9_BUFSIZE
];
1105 int i
= 0, pix
= 0, rc
;
1107 for (i
= 0; i
< count
; i
++) {
1109 if (!firmware_has_feature(FW_FEATURE_BULK_REMOVE
)) {
1110 pSeries_lpar_hpte_invalidate(slot
[i
], vpn
[i
], psize
, 0,
1113 param
[pix
] = HBR_REQUEST
| HBR_AVPN
| slot
[i
];
1114 param
[pix
+1] = hpte_encode_avpn(vpn
[i
], psize
, ssize
);
1117 rc
= plpar_hcall9(H_BULK_REMOVE
, param
,
1118 param
[0], param
[1], param
[2],
1119 param
[3], param
[4], param
[5],
1120 param
[6], param
[7]);
1121 BUG_ON(rc
!= H_SUCCESS
);
1127 param
[pix
] = HBR_END
;
1128 rc
= plpar_hcall9(H_BULK_REMOVE
, param
, param
[0], param
[1],
1129 param
[2], param
[3], param
[4], param
[5],
1130 param
[6], param
[7]);
1131 BUG_ON(rc
!= H_SUCCESS
);
1135 static inline void __pSeries_lpar_hugepage_invalidate(unsigned long *slot
,
1137 int count
, int psize
,
1140 unsigned long flags
= 0;
1141 int lock_tlbie
= !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE
);
1144 spin_lock_irqsave(&pSeries_lpar_tlbie_lock
, flags
);
1146 if (firmware_has_feature(FW_FEATURE_BLOCK_REMOVE
))
1147 hugepage_block_invalidate(slot
, vpn
, count
, psize
, ssize
);
1149 hugepage_bulk_invalidate(slot
, vpn
, count
, psize
, ssize
);
1152 spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock
, flags
);
1155 static void pSeries_lpar_hugepage_invalidate(unsigned long vsid
,
1157 unsigned char *hpte_slot_array
,
1158 int psize
, int ssize
, int local
)
1161 unsigned long s_addr
= addr
;
1162 unsigned int max_hpte_count
, valid
;
1163 unsigned long vpn_array
[PPC64_HUGE_HPTE_BATCH
];
1164 unsigned long slot_array
[PPC64_HUGE_HPTE_BATCH
];
1165 unsigned long shift
, hidx
, vpn
= 0, hash
, slot
;
1167 shift
= mmu_psize_defs
[psize
].shift
;
1168 max_hpte_count
= 1U << (PMD_SHIFT
- shift
);
1170 for (i
= 0; i
< max_hpte_count
; i
++) {
1171 valid
= hpte_valid(hpte_slot_array
, i
);
1174 hidx
= hpte_hash_index(hpte_slot_array
, i
);
1177 addr
= s_addr
+ (i
* (1ul << shift
));
1178 vpn
= hpt_vpn(addr
, vsid
, ssize
);
1179 hash
= hpt_hash(vpn
, shift
, ssize
);
1180 if (hidx
& _PTEIDX_SECONDARY
)
1183 slot
= (hash
& htab_hash_mask
) * HPTES_PER_GROUP
;
1184 slot
+= hidx
& _PTEIDX_GROUP_IX
;
1186 slot_array
[index
] = slot
;
1187 vpn_array
[index
] = vpn
;
1188 if (index
== PPC64_HUGE_HPTE_BATCH
- 1) {
1190 * Now do a bluk invalidate
1192 __pSeries_lpar_hugepage_invalidate(slot_array
,
1194 PPC64_HUGE_HPTE_BATCH
,
1201 __pSeries_lpar_hugepage_invalidate(slot_array
, vpn_array
,
1202 index
, psize
, ssize
);
1205 static void pSeries_lpar_hugepage_invalidate(unsigned long vsid
,
1207 unsigned char *hpte_slot_array
,
1208 int psize
, int ssize
, int local
)
1210 WARN(1, "%s called without THP support\n", __func__
);
1214 static int pSeries_lpar_hpte_removebolted(unsigned long ea
,
1215 int psize
, int ssize
)
1218 unsigned long slot
, vsid
;
1220 vsid
= get_kernel_vsid(ea
, ssize
);
1221 vpn
= hpt_vpn(ea
, vsid
, ssize
);
1223 slot
= pSeries_lpar_hpte_find(vpn
, psize
, ssize
);
1228 * lpar doesn't use the passed actual page size
1230 pSeries_lpar_hpte_invalidate(slot
, vpn
, psize
, 0, ssize
, 0);
1235 static inline unsigned long compute_slot(real_pte_t pte
,
1237 unsigned long index
,
1238 unsigned long shift
,
1241 unsigned long slot
, hash
, hidx
;
1243 hash
= hpt_hash(vpn
, shift
, ssize
);
1244 hidx
= __rpte_to_hidx(pte
, index
);
1245 if (hidx
& _PTEIDX_SECONDARY
)
1247 slot
= (hash
& htab_hash_mask
) * HPTES_PER_GROUP
;
1248 slot
+= hidx
& _PTEIDX_GROUP_IX
;
1253 * The hcall H_BLOCK_REMOVE implies that the virtual pages to processed are
1254 * "all within the same naturally aligned 8 page virtual address block".
1256 static void do_block_remove(unsigned long number
, struct ppc64_tlb_batch
*batch
,
1257 unsigned long *param
)
1260 unsigned long i
, pix
= 0;
1261 unsigned long index
, shift
, slot
, current_vpgb
, vpgb
;
1265 psize
= batch
->psize
;
1266 ssize
= batch
->ssize
;
1268 for (i
= 0; i
< number
; i
++) {
1269 vpn
= batch
->vpn
[i
];
1270 pte
= batch
->pte
[i
];
1271 pte_iterate_hashed_subpages(pte
, psize
, vpn
, index
, shift
) {
1273 * Shifting 3 bits more on the right to get a
1274 * 8 pages aligned virtual addresse.
1276 vpgb
= (vpn
>> (shift
- VPN_SHIFT
+ 3));
1277 if (!pix
|| vpgb
!= current_vpgb
) {
1279 * Need to start a new 8 pages block, flush
1280 * the current one if needed.
1283 (void)call_block_remove(pix
, param
,
1285 current_vpgb
= vpgb
;
1286 param
[0] = hpte_encode_avpn(vpn
, psize
,
1291 slot
= compute_slot(pte
, vpn
, index
, shift
, ssize
);
1292 param
[pix
++] = HBR_REQUEST
| HBLKR_AVPN
| slot
;
1294 if (pix
== PLPAR_HCALL9_BUFSIZE
) {
1295 pix
= call_block_remove(pix
, param
, false);
1297 * pix = 0 means that all the entries were
1298 * removed, we can start a new block.
1299 * Otherwise, this means that there are entries
1300 * to retry, and pix points to latest one, so
1301 * we should increment it and try to continue
1307 } pte_iterate_hashed_end();
1311 (void)call_block_remove(pix
, param
, true);
1315 * Take a spinlock around flushes to avoid bouncing the hypervisor tlbie
1318 static void pSeries_lpar_flush_hash_range(unsigned long number
, int local
)
1321 unsigned long i
, pix
, rc
;
1322 unsigned long flags
= 0;
1323 struct ppc64_tlb_batch
*batch
= this_cpu_ptr(&ppc64_tlb_batch
);
1324 int lock_tlbie
= !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE
);
1325 unsigned long param
[PLPAR_HCALL9_BUFSIZE
];
1326 unsigned long index
, shift
, slot
;
1331 spin_lock_irqsave(&pSeries_lpar_tlbie_lock
, flags
);
1333 if (firmware_has_feature(FW_FEATURE_BLOCK_REMOVE
)) {
1334 do_block_remove(number
, batch
, param
);
1338 psize
= batch
->psize
;
1339 ssize
= batch
->ssize
;
1341 for (i
= 0; i
< number
; i
++) {
1342 vpn
= batch
->vpn
[i
];
1343 pte
= batch
->pte
[i
];
1344 pte_iterate_hashed_subpages(pte
, psize
, vpn
, index
, shift
) {
1345 slot
= compute_slot(pte
, vpn
, index
, shift
, ssize
);
1346 if (!firmware_has_feature(FW_FEATURE_BULK_REMOVE
)) {
1348 * lpar doesn't use the passed actual page size
1350 pSeries_lpar_hpte_invalidate(slot
, vpn
, psize
,
1353 param
[pix
] = HBR_REQUEST
| HBR_AVPN
| slot
;
1354 param
[pix
+1] = hpte_encode_avpn(vpn
, psize
,
1358 rc
= plpar_hcall9(H_BULK_REMOVE
, param
,
1359 param
[0], param
[1], param
[2],
1360 param
[3], param
[4], param
[5],
1361 param
[6], param
[7]);
1362 BUG_ON(rc
!= H_SUCCESS
);
1366 } pte_iterate_hashed_end();
1369 param
[pix
] = HBR_END
;
1370 rc
= plpar_hcall9(H_BULK_REMOVE
, param
, param
[0], param
[1],
1371 param
[2], param
[3], param
[4], param
[5],
1372 param
[6], param
[7]);
1373 BUG_ON(rc
!= H_SUCCESS
);
1378 spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock
, flags
);
1381 static int __init
disable_bulk_remove(char *str
)
1383 if (strcmp(str
, "off") == 0 &&
1384 firmware_has_feature(FW_FEATURE_BULK_REMOVE
)) {
1385 pr_info("Disabling BULK_REMOVE firmware feature");
1386 powerpc_firmware_features
&= ~FW_FEATURE_BULK_REMOVE
;
1391 __setup("bulk_remove=", disable_bulk_remove
);
1393 #define HPT_RESIZE_TIMEOUT 10000 /* ms */
1395 struct hpt_resize_state
{
1396 unsigned long shift
;
1400 static int pseries_lpar_resize_hpt_commit(void *data
)
1402 struct hpt_resize_state
*state
= data
;
1404 state
->commit_rc
= plpar_resize_hpt_commit(0, state
->shift
);
1405 if (state
->commit_rc
!= H_SUCCESS
)
1408 /* Hypervisor has transitioned the HTAB, update our globals */
1409 ppc64_pft_size
= state
->shift
;
1410 htab_size_bytes
= 1UL << ppc64_pft_size
;
1411 htab_hash_mask
= (htab_size_bytes
>> 7) - 1;
1417 * Must be called in process context. The caller must hold the
1420 static int pseries_lpar_resize_hpt(unsigned long shift
)
1422 struct hpt_resize_state state
= {
1424 .commit_rc
= H_FUNCTION
,
1426 unsigned int delay
, total_delay
= 0;
1432 if (!firmware_has_feature(FW_FEATURE_HPT_RESIZE
))
1435 pr_info("Attempting to resize HPT to shift %lu\n", shift
);
1439 rc
= plpar_resize_hpt_prepare(0, shift
);
1440 while (H_IS_LONG_BUSY(rc
)) {
1441 delay
= get_longbusy_msecs(rc
);
1442 total_delay
+= delay
;
1443 if (total_delay
> HPT_RESIZE_TIMEOUT
) {
1444 /* prepare with shift==0 cancels an in-progress resize */
1445 rc
= plpar_resize_hpt_prepare(0, 0);
1446 if (rc
!= H_SUCCESS
)
1447 pr_warn("Unexpected error %d cancelling timed out HPT resize\n",
1452 rc
= plpar_resize_hpt_prepare(0, shift
);
1461 pr_warn("Invalid argument from H_RESIZE_HPT_PREPARE\n");
1464 pr_warn("Operation not permitted from H_RESIZE_HPT_PREPARE\n");
1467 pr_warn("Unexpected error %d from H_RESIZE_HPT_PREPARE\n", rc
);
1473 rc
= stop_machine_cpuslocked(pseries_lpar_resize_hpt_commit
,
1479 switch (state
.commit_rc
) {
1484 pr_warn("Unexpected error %d from H_RESIZE_HPT_COMMIT\n",
1490 pr_info("HPT resize to shift %lu complete (%lld ms / %lld ms)\n",
1491 shift
, (long long) ktime_ms_delta(t1
, t0
),
1492 (long long) ktime_ms_delta(t2
, t1
));
1497 static int pseries_lpar_register_process_table(unsigned long base
,
1498 unsigned long page_size
, unsigned long table_size
)
1501 unsigned long flags
= 0;
1504 flags
|= PROC_TABLE_NEW
;
1505 if (radix_enabled())
1506 flags
|= PROC_TABLE_RADIX
| PROC_TABLE_GTSE
;
1508 flags
|= PROC_TABLE_HPT_SLB
;
1510 rc
= plpar_hcall_norets(H_REGISTER_PROC_TBL
, flags
, base
,
1511 page_size
, table_size
);
1512 if (!H_IS_LONG_BUSY(rc
))
1514 mdelay(get_longbusy_msecs(rc
));
1516 if (rc
!= H_SUCCESS
) {
1517 pr_err("Failed to register process table (rc=%ld)\n", rc
);
1523 void __init
hpte_init_pseries(void)
1525 mmu_hash_ops
.hpte_invalidate
= pSeries_lpar_hpte_invalidate
;
1526 mmu_hash_ops
.hpte_updatepp
= pSeries_lpar_hpte_updatepp
;
1527 mmu_hash_ops
.hpte_updateboltedpp
= pSeries_lpar_hpte_updateboltedpp
;
1528 mmu_hash_ops
.hpte_insert
= pSeries_lpar_hpte_insert
;
1529 mmu_hash_ops
.hpte_remove
= pSeries_lpar_hpte_remove
;
1530 mmu_hash_ops
.hpte_removebolted
= pSeries_lpar_hpte_removebolted
;
1531 mmu_hash_ops
.flush_hash_range
= pSeries_lpar_flush_hash_range
;
1532 mmu_hash_ops
.hpte_clear_all
= pseries_hpte_clear_all
;
1533 mmu_hash_ops
.hugepage_invalidate
= pSeries_lpar_hugepage_invalidate
;
1535 if (firmware_has_feature(FW_FEATURE_HPT_RESIZE
))
1536 mmu_hash_ops
.resize_hpt
= pseries_lpar_resize_hpt
;
1539 * On POWER9, we need to do a H_REGISTER_PROC_TBL hcall
1540 * to inform the hypervisor that we wish to use the HPT.
1542 if (cpu_has_feature(CPU_FTR_ARCH_300
))
1543 pseries_lpar_register_process_table(0, 0, 0);
1546 void radix_init_pseries(void)
1548 pr_info("Using radix MMU under hypervisor\n");
1550 pseries_lpar_register_process_table(__pa(process_tb
),
1551 0, PRTB_SIZE_SHIFT
- 12);
1554 #ifdef CONFIG_PPC_SMLPAR
1555 #define CMO_FREE_HINT_DEFAULT 1
1556 static int cmo_free_hint_flag
= CMO_FREE_HINT_DEFAULT
;
1558 static int __init
cmo_free_hint(char *str
)
1561 parm
= strstrip(str
);
1563 if (strcasecmp(parm
, "no") == 0 || strcasecmp(parm
, "off") == 0) {
1564 pr_info("%s: CMO free page hinting is not active.\n", __func__
);
1565 cmo_free_hint_flag
= 0;
1569 cmo_free_hint_flag
= 1;
1570 pr_info("%s: CMO free page hinting is active.\n", __func__
);
1572 if (strcasecmp(parm
, "yes") == 0 || strcasecmp(parm
, "on") == 0)
1578 __setup("cmo_free_hint=", cmo_free_hint
);
1580 static void pSeries_set_page_state(struct page
*page
, int order
,
1581 unsigned long state
)
1584 unsigned long cmo_page_sz
, addr
;
1586 cmo_page_sz
= cmo_get_page_size();
1587 addr
= __pa((unsigned long)page_address(page
));
1589 for (i
= 0; i
< (1 << order
); i
++, addr
+= PAGE_SIZE
) {
1590 for (j
= 0; j
< PAGE_SIZE
; j
+= cmo_page_sz
)
1591 plpar_hcall_norets(H_PAGE_INIT
, state
, addr
+ j
, 0);
1595 void arch_free_page(struct page
*page
, int order
)
1597 if (radix_enabled())
1599 if (!cmo_free_hint_flag
|| !firmware_has_feature(FW_FEATURE_CMO
))
1602 pSeries_set_page_state(page
, order
, H_PAGE_SET_UNUSED
);
1604 EXPORT_SYMBOL(arch_free_page
);
1606 #endif /* CONFIG_PPC_SMLPAR */
1607 #endif /* CONFIG_PPC_BOOK3S_64 */
1609 #ifdef CONFIG_TRACEPOINTS
1610 #ifdef CONFIG_JUMP_LABEL
1611 struct static_key hcall_tracepoint_key
= STATIC_KEY_INIT
;
1613 int hcall_tracepoint_regfunc(void)
1615 static_key_slow_inc(&hcall_tracepoint_key
);
1619 void hcall_tracepoint_unregfunc(void)
1621 static_key_slow_dec(&hcall_tracepoint_key
);
1625 * We optimise our hcall path by placing hcall_tracepoint_refcount
1626 * directly in the TOC so we can check if the hcall tracepoints are
1627 * enabled via a single load.
1630 /* NB: reg/unreg are called while guarded with the tracepoints_mutex */
1631 extern long hcall_tracepoint_refcount
;
1633 int hcall_tracepoint_regfunc(void)
1635 hcall_tracepoint_refcount
++;
1639 void hcall_tracepoint_unregfunc(void)
1641 hcall_tracepoint_refcount
--;
1646 * Since the tracing code might execute hcalls we need to guard against
1647 * recursion. One example of this are spinlocks calling H_YIELD on
1648 * shared processor partitions.
1650 static DEFINE_PER_CPU(unsigned int, hcall_trace_depth
);
1653 void __trace_hcall_entry(unsigned long opcode
, unsigned long *args
)
1655 unsigned long flags
;
1656 unsigned int *depth
;
1659 * We cannot call tracepoints inside RCU idle regions which
1660 * means we must not trace H_CEDE.
1662 if (opcode
== H_CEDE
)
1665 local_irq_save(flags
);
1667 depth
= this_cpu_ptr(&hcall_trace_depth
);
1674 trace_hcall_entry(opcode
, args
);
1678 local_irq_restore(flags
);
1681 void __trace_hcall_exit(long opcode
, long retval
, unsigned long *retbuf
)
1683 unsigned long flags
;
1684 unsigned int *depth
;
1686 if (opcode
== H_CEDE
)
1689 local_irq_save(flags
);
1691 depth
= this_cpu_ptr(&hcall_trace_depth
);
1697 trace_hcall_exit(opcode
, retval
, retbuf
);
1702 local_irq_restore(flags
);
1708 * H_GET_MPP hcall returns info in 7 parms
1710 int h_get_mpp(struct hvcall_mpp_data
*mpp_data
)
1713 unsigned long retbuf
[PLPAR_HCALL9_BUFSIZE
];
1715 rc
= plpar_hcall9(H_GET_MPP
, retbuf
);
1717 mpp_data
->entitled_mem
= retbuf
[0];
1718 mpp_data
->mapped_mem
= retbuf
[1];
1720 mpp_data
->group_num
= (retbuf
[2] >> 2 * 8) & 0xffff;
1721 mpp_data
->pool_num
= retbuf
[2] & 0xffff;
1723 mpp_data
->mem_weight
= (retbuf
[3] >> 7 * 8) & 0xff;
1724 mpp_data
->unallocated_mem_weight
= (retbuf
[3] >> 6 * 8) & 0xff;
1725 mpp_data
->unallocated_entitlement
= retbuf
[3] & 0xffffffffffffUL
;
1727 mpp_data
->pool_size
= retbuf
[4];
1728 mpp_data
->loan_request
= retbuf
[5];
1729 mpp_data
->backing_mem
= retbuf
[6];
1733 EXPORT_SYMBOL(h_get_mpp
);
1735 int h_get_mpp_x(struct hvcall_mpp_x_data
*mpp_x_data
)
1738 unsigned long retbuf
[PLPAR_HCALL9_BUFSIZE
] = { 0 };
1740 rc
= plpar_hcall9(H_GET_MPP_X
, retbuf
);
1742 mpp_x_data
->coalesced_bytes
= retbuf
[0];
1743 mpp_x_data
->pool_coalesced_bytes
= retbuf
[1];
1744 mpp_x_data
->pool_purr_cycles
= retbuf
[2];
1745 mpp_x_data
->pool_spurr_cycles
= retbuf
[3];
1750 static unsigned long vsid_unscramble(unsigned long vsid
, int ssize
)
1752 unsigned long protovsid
;
1753 unsigned long va_bits
= VA_BITS
;
1754 unsigned long modinv
, vsid_modulus
;
1755 unsigned long max_mod_inv
, tmp_modinv
;
1757 if (!mmu_has_feature(MMU_FTR_68_BIT_VA
))
1760 if (ssize
== MMU_SEGSIZE_256M
) {
1761 modinv
= VSID_MULINV_256M
;
1762 vsid_modulus
= ((1UL << (va_bits
- SID_SHIFT
)) - 1);
1764 modinv
= VSID_MULINV_1T
;
1765 vsid_modulus
= ((1UL << (va_bits
- SID_SHIFT_1T
)) - 1);
1769 * vsid outside our range.
1771 if (vsid
>= vsid_modulus
)
1775 * If modinv is the modular multiplicate inverse of (x % vsid_modulus)
1776 * and vsid = (protovsid * x) % vsid_modulus, then we say:
1777 * protovsid = (vsid * modinv) % vsid_modulus
1780 /* Check if (vsid * modinv) overflow (63 bits) */
1781 max_mod_inv
= 0x7fffffffffffffffull
/ vsid
;
1782 if (modinv
< max_mod_inv
)
1783 return (vsid
* modinv
) % vsid_modulus
;
1785 tmp_modinv
= modinv
/max_mod_inv
;
1786 modinv
%= max_mod_inv
;
1788 protovsid
= (((vsid
* max_mod_inv
) % vsid_modulus
) * tmp_modinv
) % vsid_modulus
;
1789 protovsid
= (protovsid
+ vsid
* modinv
) % vsid_modulus
;
1794 static int __init
reserve_vrma_context_id(void)
1796 unsigned long protovsid
;
1799 * Reserve context ids which map to reserved virtual addresses. For now
1800 * we only reserve the context id which maps to the VRMA VSID. We ignore
1801 * the addresses in "ibm,adjunct-virtual-addresses" because we don't
1802 * enable adjunct support via the "ibm,client-architecture-support"
1805 protovsid
= vsid_unscramble(VRMA_VSID
, MMU_SEGSIZE_1T
);
1806 hash__reserve_context_id(protovsid
>> ESID_BITS_1T
);
1809 machine_device_initcall(pseries
, reserve_vrma_context_id
);
1811 #ifdef CONFIG_DEBUG_FS
1812 /* debugfs file interface for vpa data */
1813 static ssize_t
vpa_file_read(struct file
*filp
, char __user
*buf
, size_t len
,
1816 int cpu
= (long)filp
->private_data
;
1817 struct lppaca
*lppaca
= &lppaca_of(cpu
);
1819 return simple_read_from_buffer(buf
, len
, pos
, lppaca
,
1820 sizeof(struct lppaca
));
1823 static const struct file_operations vpa_fops
= {
1824 .open
= simple_open
,
1825 .read
= vpa_file_read
,
1826 .llseek
= default_llseek
,
1829 static int __init
vpa_debugfs_init(void)
1833 static struct dentry
*vpa_dir
;
1835 if (!firmware_has_feature(FW_FEATURE_SPLPAR
))
1838 vpa_dir
= debugfs_create_dir("vpa", powerpc_debugfs_root
);
1840 pr_warn("%s: can't create vpa root dir\n", __func__
);
1844 /* set up the per-cpu vpa file*/
1845 for_each_possible_cpu(i
) {
1848 sprintf(name
, "cpu-%ld", i
);
1850 d
= debugfs_create_file(name
, 0400, vpa_dir
, (void *)i
,
1853 pr_warn("%s: can't create per-cpu vpa file\n",
1861 machine_arch_initcall(pseries
, vpa_debugfs_init
);
1862 #endif /* CONFIG_DEBUG_FS */