1 // SPDX-License-Identifier: GPL-2.0-or-later
4 * Copyright (C) 2001 Todd Inglett, IBM Corporation
6 * pSeries LPAR support.
9 /* Enables debugging of low-level hash table routines - careful! */
11 #define pr_fmt(fmt) "lpar: " fmt
13 #include <linux/kernel.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/console.h>
16 #include <linux/export.h>
17 #include <linux/jump_label.h>
18 #include <linux/delay.h>
19 #include <linux/stop_machine.h>
20 #include <linux/spinlock.h>
21 #include <linux/cpuhotplug.h>
22 #include <linux/workqueue.h>
23 #include <linux/proc_fs.h>
24 #include <asm/processor.h>
27 #include <linux/pgtable.h>
28 #include <asm/machdep.h>
29 #include <asm/mmu_context.h>
30 #include <asm/iommu.h>
33 #include <asm/cputable.h>
36 #include <asm/trace.h>
37 #include <asm/firmware.h>
38 #include <asm/plpar_wrappers.h>
39 #include <asm/kexec.h>
40 #include <asm/fadump.h>
41 #include <asm/asm-prototypes.h>
42 #include <asm/debugfs.h>
46 /* Flag bits for H_BULK_REMOVE */
47 #define HBR_REQUEST 0x4000000000000000UL
48 #define HBR_RESPONSE 0x8000000000000000UL
49 #define HBR_END 0xc000000000000000UL
50 #define HBR_AVPN 0x0200000000000000UL
51 #define HBR_ANDCOND 0x0100000000000000UL
55 EXPORT_SYMBOL(plpar_hcall
);
56 EXPORT_SYMBOL(plpar_hcall9
);
57 EXPORT_SYMBOL(plpar_hcall_norets
);
60 * H_BLOCK_REMOVE supported block size for this page size in segment who's base
61 * page size is that page size.
63 * The first index is the segment base page size, the second one is the actual
66 static int hblkrm_size
[MMU_PAGE_COUNT
][MMU_PAGE_COUNT
] __ro_after_init
;
69 * Due to the involved complexity, and that the current hypervisor is only
70 * returning this value or 0, we are limiting the support of the H_BLOCK_REMOVE
71 * buffer size to 8 size block.
73 #define HBLKRM_SUPPORTED_BLOCK_SIZE 8
75 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
76 static u8 dtl_mask
= DTL_LOG_PREEMPT
;
81 void alloc_dtl_buffers(unsigned long *time_limit
)
84 struct paca_struct
*pp
;
85 struct dtl_entry
*dtl
;
87 for_each_possible_cpu(cpu
) {
91 dtl
= kmem_cache_alloc(dtl_cache
, GFP_KERNEL
);
93 pr_warn("Failed to allocate dispatch trace log for cpu %d\n",
95 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
96 pr_warn("Stolen time statistics will be unreliable\n");
102 pp
->dispatch_log
= dtl
;
103 pp
->dispatch_log_end
= dtl
+ N_DISPATCH_LOG
;
106 if (time_limit
&& time_after(jiffies
, *time_limit
)) {
108 *time_limit
= jiffies
+ HZ
;
113 void register_dtl_buffer(int cpu
)
116 struct paca_struct
*pp
;
117 struct dtl_entry
*dtl
;
118 int hwcpu
= get_hard_smp_processor_id(cpu
);
121 dtl
= pp
->dispatch_log
;
122 if (dtl
&& dtl_mask
) {
125 lppaca_of(cpu
).dtl_idx
= 0;
127 /* hypervisor reads buffer length from this field */
128 dtl
->enqueue_to_dispatch_time
= cpu_to_be32(DISPATCH_LOG_BYTES
);
129 ret
= register_dtl(hwcpu
, __pa(dtl
));
131 pr_err("WARNING: DTL registration of cpu %d (hw %d) failed with %ld\n",
134 lppaca_of(cpu
).dtl_enable_mask
= dtl_mask
;
138 #ifdef CONFIG_PPC_SPLPAR
140 struct delayed_work work
;
144 struct vcpu_dispatch_data
{
155 int numa_remote_disp
;
160 * This represents the number of cpus in the hypervisor. Since there is no
161 * architected way to discover the number of processors in the host, we
162 * provision for dealing with NR_CPUS. This is currently 2048 by default, and
163 * is sufficient for our purposes. This will need to be tweaked if
164 * CONFIG_NR_CPUS is changed.
166 #define NR_CPUS_H NR_CPUS
168 DEFINE_RWLOCK(dtl_access_lock
);
169 static DEFINE_PER_CPU(struct vcpu_dispatch_data
, vcpu_disp_data
);
170 static DEFINE_PER_CPU(u64
, dtl_entry_ridx
);
171 static DEFINE_PER_CPU(struct dtl_worker
, dtl_workers
);
172 static enum cpuhp_state dtl_worker_state
;
173 static DEFINE_MUTEX(dtl_enable_mutex
);
174 static int vcpudispatch_stats_on __read_mostly
;
175 static int vcpudispatch_stats_freq
= 50;
176 static __be32
*vcpu_associativity
, *pcpu_associativity
;
179 static void free_dtl_buffers(unsigned long *time_limit
)
181 #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
183 struct paca_struct
*pp
;
185 for_each_possible_cpu(cpu
) {
187 if (!pp
->dispatch_log
)
189 kmem_cache_free(dtl_cache
, pp
->dispatch_log
);
191 pp
->dispatch_log
= 0;
192 pp
->dispatch_log_end
= 0;
195 if (time_limit
&& time_after(jiffies
, *time_limit
)) {
197 *time_limit
= jiffies
+ HZ
;
203 static int init_cpu_associativity(void)
205 vcpu_associativity
= kcalloc(num_possible_cpus() / threads_per_core
,
206 VPHN_ASSOC_BUFSIZE
* sizeof(__be32
), GFP_KERNEL
);
207 pcpu_associativity
= kcalloc(NR_CPUS_H
/ threads_per_core
,
208 VPHN_ASSOC_BUFSIZE
* sizeof(__be32
), GFP_KERNEL
);
210 if (!vcpu_associativity
|| !pcpu_associativity
) {
211 pr_err("error allocating memory for associativity information\n");
218 static void destroy_cpu_associativity(void)
220 kfree(vcpu_associativity
);
221 kfree(pcpu_associativity
);
222 vcpu_associativity
= pcpu_associativity
= 0;
225 static __be32
*__get_cpu_associativity(int cpu
, __be32
*cpu_assoc
, int flag
)
230 assoc
= &cpu_assoc
[(int)(cpu
/ threads_per_core
) * VPHN_ASSOC_BUFSIZE
];
232 rc
= hcall_vphn(cpu
, flag
, &assoc
[0]);
240 static __be32
*get_pcpu_associativity(int cpu
)
242 return __get_cpu_associativity(cpu
, pcpu_associativity
, VPHN_FLAG_PCPU
);
245 static __be32
*get_vcpu_associativity(int cpu
)
247 return __get_cpu_associativity(cpu
, vcpu_associativity
, VPHN_FLAG_VCPU
);
250 static int cpu_relative_dispatch_distance(int last_disp_cpu
, int cur_disp_cpu
)
252 __be32
*last_disp_cpu_assoc
, *cur_disp_cpu_assoc
;
254 if (last_disp_cpu
>= NR_CPUS_H
|| cur_disp_cpu
>= NR_CPUS_H
)
257 last_disp_cpu_assoc
= get_pcpu_associativity(last_disp_cpu
);
258 cur_disp_cpu_assoc
= get_pcpu_associativity(cur_disp_cpu
);
260 if (!last_disp_cpu_assoc
|| !cur_disp_cpu_assoc
)
263 return cpu_distance(last_disp_cpu_assoc
, cur_disp_cpu_assoc
);
266 static int cpu_home_node_dispatch_distance(int disp_cpu
)
268 __be32
*disp_cpu_assoc
, *vcpu_assoc
;
269 int vcpu_id
= smp_processor_id();
271 if (disp_cpu
>= NR_CPUS_H
) {
272 pr_debug_ratelimited("vcpu dispatch cpu %d > %d\n",
273 disp_cpu
, NR_CPUS_H
);
277 disp_cpu_assoc
= get_pcpu_associativity(disp_cpu
);
278 vcpu_assoc
= get_vcpu_associativity(vcpu_id
);
280 if (!disp_cpu_assoc
|| !vcpu_assoc
)
283 return cpu_distance(disp_cpu_assoc
, vcpu_assoc
);
286 static void update_vcpu_disp_stat(int disp_cpu
)
288 struct vcpu_dispatch_data
*disp
;
291 disp
= this_cpu_ptr(&vcpu_disp_data
);
292 if (disp
->last_disp_cpu
== -1) {
293 disp
->last_disp_cpu
= disp_cpu
;
299 if (disp
->last_disp_cpu
== disp_cpu
||
300 (cpu_first_thread_sibling(disp
->last_disp_cpu
) ==
301 cpu_first_thread_sibling(disp_cpu
)))
302 disp
->same_cpu_disp
++;
304 distance
= cpu_relative_dispatch_distance(disp
->last_disp_cpu
,
307 pr_debug_ratelimited("vcpudispatch_stats: cpu %d: error determining associativity\n",
312 disp
->same_chip_disp
++;
315 disp
->diff_chip_disp
++;
318 disp
->far_chip_disp
++;
321 pr_debug_ratelimited("vcpudispatch_stats: cpu %d (%d -> %d): unexpected relative dispatch distance %d\n",
330 distance
= cpu_home_node_dispatch_distance(disp_cpu
);
332 pr_debug_ratelimited("vcpudispatch_stats: cpu %d: error determining associativity\n",
337 disp
->numa_home_disp
++;
340 disp
->numa_remote_disp
++;
343 disp
->numa_far_disp
++;
346 pr_debug_ratelimited("vcpudispatch_stats: cpu %d on %d: unexpected numa dispatch distance %d\n",
353 disp
->last_disp_cpu
= disp_cpu
;
356 static void process_dtl_buffer(struct work_struct
*work
)
358 struct dtl_entry dtle
;
359 u64 i
= __this_cpu_read(dtl_entry_ridx
);
360 struct dtl_entry
*dtl
= local_paca
->dispatch_log
+ (i
% N_DISPATCH_LOG
);
361 struct dtl_entry
*dtl_end
= local_paca
->dispatch_log_end
;
362 struct lppaca
*vpa
= local_paca
->lppaca_ptr
;
363 struct dtl_worker
*d
= container_of(work
, struct dtl_worker
, work
.work
);
365 if (!local_paca
->dispatch_log
)
368 /* if we have been migrated away, we cancel ourself */
369 if (d
->cpu
!= smp_processor_id()) {
370 pr_debug("vcpudispatch_stats: cpu %d worker migrated -- canceling worker\n",
375 if (i
== be64_to_cpu(vpa
->dtl_idx
))
378 while (i
< be64_to_cpu(vpa
->dtl_idx
)) {
381 if (i
+ N_DISPATCH_LOG
< be64_to_cpu(vpa
->dtl_idx
)) {
382 /* buffer has overflowed */
383 pr_debug_ratelimited("vcpudispatch_stats: cpu %d lost %lld DTL samples\n",
385 be64_to_cpu(vpa
->dtl_idx
) - N_DISPATCH_LOG
- i
);
386 i
= be64_to_cpu(vpa
->dtl_idx
) - N_DISPATCH_LOG
;
387 dtl
= local_paca
->dispatch_log
+ (i
% N_DISPATCH_LOG
);
390 update_vcpu_disp_stat(be16_to_cpu(dtle
.processor_id
));
394 dtl
= local_paca
->dispatch_log
;
397 __this_cpu_write(dtl_entry_ridx
, i
);
400 schedule_delayed_work_on(d
->cpu
, to_delayed_work(work
),
401 HZ
/ vcpudispatch_stats_freq
);
404 static int dtl_worker_online(unsigned int cpu
)
406 struct dtl_worker
*d
= &per_cpu(dtl_workers
, cpu
);
408 memset(d
, 0, sizeof(*d
));
409 INIT_DELAYED_WORK(&d
->work
, process_dtl_buffer
);
412 #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
413 per_cpu(dtl_entry_ridx
, cpu
) = 0;
414 register_dtl_buffer(cpu
);
416 per_cpu(dtl_entry_ridx
, cpu
) = be64_to_cpu(lppaca_of(cpu
).dtl_idx
);
419 schedule_delayed_work_on(cpu
, &d
->work
, HZ
/ vcpudispatch_stats_freq
);
423 static int dtl_worker_offline(unsigned int cpu
)
425 struct dtl_worker
*d
= &per_cpu(dtl_workers
, cpu
);
427 cancel_delayed_work_sync(&d
->work
);
429 #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
430 unregister_dtl(get_hard_smp_processor_id(cpu
));
436 static void set_global_dtl_mask(u8 mask
)
441 for_each_present_cpu(cpu
)
442 lppaca_of(cpu
).dtl_enable_mask
= dtl_mask
;
445 static void reset_global_dtl_mask(void)
449 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
450 dtl_mask
= DTL_LOG_PREEMPT
;
454 for_each_present_cpu(cpu
)
455 lppaca_of(cpu
).dtl_enable_mask
= dtl_mask
;
458 static int dtl_worker_enable(unsigned long *time_limit
)
462 if (!write_trylock(&dtl_access_lock
)) {
467 set_global_dtl_mask(DTL_LOG_ALL
);
469 /* Setup dtl buffers and register those */
470 alloc_dtl_buffers(time_limit
);
472 state
= cpuhp_setup_state(CPUHP_AP_ONLINE_DYN
, "powerpc/dtl:online",
473 dtl_worker_online
, dtl_worker_offline
);
475 pr_err("vcpudispatch_stats: unable to setup workqueue for DTL processing\n");
476 free_dtl_buffers(time_limit
);
477 reset_global_dtl_mask();
478 write_unlock(&dtl_access_lock
);
482 dtl_worker_state
= state
;
488 static void dtl_worker_disable(unsigned long *time_limit
)
490 cpuhp_remove_state(dtl_worker_state
);
491 free_dtl_buffers(time_limit
);
492 reset_global_dtl_mask();
493 write_unlock(&dtl_access_lock
);
496 static ssize_t
vcpudispatch_stats_write(struct file
*file
, const char __user
*p
,
497 size_t count
, loff_t
*ppos
)
499 unsigned long time_limit
= jiffies
+ HZ
;
500 struct vcpu_dispatch_data
*disp
;
507 if (copy_from_user(buf
, p
, count
))
511 rc
= kstrtoint(buf
, 0, &cmd
);
512 if (rc
|| cmd
< 0 || cmd
> 1) {
513 pr_err("vcpudispatch_stats: please use 0 to disable or 1 to enable dispatch statistics\n");
514 return rc
? rc
: -EINVAL
;
517 mutex_lock(&dtl_enable_mutex
);
519 if ((cmd
== 0 && !vcpudispatch_stats_on
) ||
520 (cmd
== 1 && vcpudispatch_stats_on
))
524 rc
= init_cpu_associativity();
528 for_each_possible_cpu(cpu
) {
529 disp
= per_cpu_ptr(&vcpu_disp_data
, cpu
);
530 memset(disp
, 0, sizeof(*disp
));
531 disp
->last_disp_cpu
= -1;
534 rc
= dtl_worker_enable(&time_limit
);
536 destroy_cpu_associativity();
540 dtl_worker_disable(&time_limit
);
541 destroy_cpu_associativity();
544 vcpudispatch_stats_on
= cmd
;
547 mutex_unlock(&dtl_enable_mutex
);
553 static int vcpudispatch_stats_display(struct seq_file
*p
, void *v
)
556 struct vcpu_dispatch_data
*disp
;
558 if (!vcpudispatch_stats_on
) {
559 seq_puts(p
, "off\n");
563 for_each_online_cpu(cpu
) {
564 disp
= per_cpu_ptr(&vcpu_disp_data
, cpu
);
565 seq_printf(p
, "cpu%d", cpu
);
566 seq_put_decimal_ull(p
, " ", disp
->total_disp
);
567 seq_put_decimal_ull(p
, " ", disp
->same_cpu_disp
);
568 seq_put_decimal_ull(p
, " ", disp
->same_chip_disp
);
569 seq_put_decimal_ull(p
, " ", disp
->diff_chip_disp
);
570 seq_put_decimal_ull(p
, " ", disp
->far_chip_disp
);
571 seq_put_decimal_ull(p
, " ", disp
->numa_home_disp
);
572 seq_put_decimal_ull(p
, " ", disp
->numa_remote_disp
);
573 seq_put_decimal_ull(p
, " ", disp
->numa_far_disp
);
580 static int vcpudispatch_stats_open(struct inode
*inode
, struct file
*file
)
582 return single_open(file
, vcpudispatch_stats_display
, NULL
);
585 static const struct proc_ops vcpudispatch_stats_proc_ops
= {
586 .proc_open
= vcpudispatch_stats_open
,
587 .proc_read
= seq_read
,
588 .proc_write
= vcpudispatch_stats_write
,
589 .proc_lseek
= seq_lseek
,
590 .proc_release
= single_release
,
593 static ssize_t
vcpudispatch_stats_freq_write(struct file
*file
,
594 const char __user
*p
, size_t count
, loff_t
*ppos
)
602 if (copy_from_user(buf
, p
, count
))
606 rc
= kstrtoint(buf
, 0, &freq
);
607 if (rc
|| freq
< 1 || freq
> HZ
) {
608 pr_err("vcpudispatch_stats_freq: please specify a frequency between 1 and %d\n",
610 return rc
? rc
: -EINVAL
;
613 vcpudispatch_stats_freq
= freq
;
618 static int vcpudispatch_stats_freq_display(struct seq_file
*p
, void *v
)
620 seq_printf(p
, "%d\n", vcpudispatch_stats_freq
);
624 static int vcpudispatch_stats_freq_open(struct inode
*inode
, struct file
*file
)
626 return single_open(file
, vcpudispatch_stats_freq_display
, NULL
);
629 static const struct proc_ops vcpudispatch_stats_freq_proc_ops
= {
630 .proc_open
= vcpudispatch_stats_freq_open
,
631 .proc_read
= seq_read
,
632 .proc_write
= vcpudispatch_stats_freq_write
,
633 .proc_lseek
= seq_lseek
,
634 .proc_release
= single_release
,
637 static int __init
vcpudispatch_stats_procfs_init(void)
640 * Avoid smp_processor_id while preemptible. All CPUs should have
641 * the same value for lppaca_shared_proc.
644 if (!lppaca_shared_proc(get_lppaca())) {
650 if (!proc_create("powerpc/vcpudispatch_stats", 0600, NULL
,
651 &vcpudispatch_stats_proc_ops
))
652 pr_err("vcpudispatch_stats: error creating procfs file\n");
653 else if (!proc_create("powerpc/vcpudispatch_stats_freq", 0600, NULL
,
654 &vcpudispatch_stats_freq_proc_ops
))
655 pr_err("vcpudispatch_stats_freq: error creating procfs file\n");
660 machine_device_initcall(pseries
, vcpudispatch_stats_procfs_init
);
661 #endif /* CONFIG_PPC_SPLPAR */
663 void vpa_init(int cpu
)
665 int hwcpu
= get_hard_smp_processor_id(cpu
);
670 * The spec says it "may be problematic" if CPU x registers the VPA of
671 * CPU y. We should never do that, but wail if we ever do.
673 WARN_ON(cpu
!= smp_processor_id());
675 if (cpu_has_feature(CPU_FTR_ALTIVEC
))
676 lppaca_of(cpu
).vmxregs_in_use
= 1;
678 if (cpu_has_feature(CPU_FTR_ARCH_207S
))
679 lppaca_of(cpu
).ebb_regs_in_use
= 1;
681 addr
= __pa(&lppaca_of(cpu
));
682 ret
= register_vpa(hwcpu
, addr
);
685 pr_err("WARNING: VPA registration for cpu %d (hw %d) of area "
686 "%lx failed with %ld\n", cpu
, hwcpu
, addr
, ret
);
690 #ifdef CONFIG_PPC_BOOK3S_64
692 * PAPR says this feature is SLB-Buffer but firmware never
693 * reports that. All SPLPAR support SLB shadow buffer.
695 if (!radix_enabled() && firmware_has_feature(FW_FEATURE_SPLPAR
)) {
696 addr
= __pa(paca_ptrs
[cpu
]->slb_shadow_ptr
);
697 ret
= register_slb_shadow(hwcpu
, addr
);
699 pr_err("WARNING: SLB shadow buffer registration for "
700 "cpu %d (hw %d) of area %lx failed with %ld\n",
701 cpu
, hwcpu
, addr
, ret
);
703 #endif /* CONFIG_PPC_BOOK3S_64 */
706 * Register dispatch trace log, if one has been allocated.
708 register_dtl_buffer(cpu
);
711 #ifdef CONFIG_PPC_BOOK3S_64
713 static long pSeries_lpar_hpte_insert(unsigned long hpte_group
,
714 unsigned long vpn
, unsigned long pa
,
715 unsigned long rflags
, unsigned long vflags
,
716 int psize
, int apsize
, int ssize
)
718 unsigned long lpar_rc
;
721 unsigned long hpte_v
, hpte_r
;
723 if (!(vflags
& HPTE_V_BOLTED
))
724 pr_devel("hpte_insert(group=%lx, vpn=%016lx, "
725 "pa=%016lx, rflags=%lx, vflags=%lx, psize=%d)\n",
726 hpte_group
, vpn
, pa
, rflags
, vflags
, psize
);
728 hpte_v
= hpte_encode_v(vpn
, psize
, apsize
, ssize
) | vflags
| HPTE_V_VALID
;
729 hpte_r
= hpte_encode_r(pa
, psize
, apsize
) | rflags
;
731 if (!(vflags
& HPTE_V_BOLTED
))
732 pr_devel(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v
, hpte_r
);
734 /* Now fill in the actual HPTE */
735 /* Set CEC cookie to 0 */
737 /* I-cache Invalidate = 0 */
738 /* I-cache synchronize = 0 */
742 if (firmware_has_feature(FW_FEATURE_XCMO
) && !(hpte_r
& HPTE_R_N
))
743 flags
|= H_COALESCE_CAND
;
745 lpar_rc
= plpar_pte_enter(flags
, hpte_group
, hpte_v
, hpte_r
, &slot
);
746 if (unlikely(lpar_rc
== H_PTEG_FULL
)) {
747 pr_devel("Hash table group is full\n");
752 * Since we try and ioremap PHBs we don't own, the pte insert
753 * will fail. However we must catch the failure in hash_page
754 * or we will loop forever, so return -2 in this case.
756 if (unlikely(lpar_rc
!= H_SUCCESS
)) {
757 pr_err("Failed hash pte insert with error %ld\n", lpar_rc
);
760 if (!(vflags
& HPTE_V_BOLTED
))
761 pr_devel(" -> slot: %lu\n", slot
& 7);
763 /* Because of iSeries, we have to pass down the secondary
764 * bucket bit here as well
766 return (slot
& 7) | (!!(vflags
& HPTE_V_SECONDARY
) << 3);
769 static DEFINE_SPINLOCK(pSeries_lpar_tlbie_lock
);
771 static long pSeries_lpar_hpte_remove(unsigned long hpte_group
)
773 unsigned long slot_offset
;
774 unsigned long lpar_rc
;
776 unsigned long dummy1
, dummy2
;
778 /* pick a random slot to start at */
779 slot_offset
= mftb() & 0x7;
781 for (i
= 0; i
< HPTES_PER_GROUP
; i
++) {
783 /* don't remove a bolted entry */
784 lpar_rc
= plpar_pte_remove(H_ANDCOND
, hpte_group
+ slot_offset
,
785 HPTE_V_BOLTED
, &dummy1
, &dummy2
);
786 if (lpar_rc
== H_SUCCESS
)
790 * The test for adjunct partition is performed before the
791 * ANDCOND test. H_RESOURCE may be returned, so we need to
792 * check for that as well.
794 BUG_ON(lpar_rc
!= H_NOT_FOUND
&& lpar_rc
!= H_RESOURCE
);
803 static void manual_hpte_clear_all(void)
805 unsigned long size_bytes
= 1UL << ppc64_pft_size
;
806 unsigned long hpte_count
= size_bytes
>> 4;
814 /* Read in batches of 4,
815 * invalidate only valid entries not in the VRMA
816 * hpte_count will be a multiple of 4
818 for (i
= 0; i
< hpte_count
; i
+= 4) {
819 lpar_rc
= plpar_pte_read_4_raw(0, i
, (void *)ptes
);
820 if (lpar_rc
!= H_SUCCESS
) {
821 pr_info("Failed to read hash page table at %ld err %ld\n",
825 for (j
= 0; j
< 4; j
++){
826 if ((ptes
[j
].pteh
& HPTE_V_VRMA_MASK
) ==
829 if (ptes
[j
].pteh
& HPTE_V_VALID
)
830 plpar_pte_remove_raw(0, i
+ j
, 0,
831 &(ptes
[j
].pteh
), &(ptes
[j
].ptel
));
836 static int hcall_hpte_clear_all(void)
841 rc
= plpar_hcall_norets(H_CLEAR_HPT
);
842 } while (rc
== H_CONTINUE
);
847 static void pseries_hpte_clear_all(void)
851 rc
= hcall_hpte_clear_all();
853 manual_hpte_clear_all();
855 #ifdef __LITTLE_ENDIAN__
857 * Reset exceptions to big endian.
859 * FIXME this is a hack for kexec, we need to reset the exception
860 * endian before starting the new kernel and this is a convenient place
863 * This is also called on boot when a fadump happens. In that case we
864 * must not change the exception endian mode.
866 if (firmware_has_feature(FW_FEATURE_SET_MODE
) && !is_fadump_active())
867 pseries_big_endian_exceptions();
872 * NOTE: for updatepp ops we are fortunate that the linux "newpp" bits and
873 * the low 3 bits of flags happen to line up. So no transform is needed.
874 * We can probably optimize here and assume the high bits of newpp are
875 * already zero. For now I am paranoid.
877 static long pSeries_lpar_hpte_updatepp(unsigned long slot
,
880 int psize
, int apsize
,
881 int ssize
, unsigned long inv_flags
)
883 unsigned long lpar_rc
;
885 unsigned long want_v
;
887 want_v
= hpte_encode_avpn(vpn
, psize
, ssize
);
889 flags
= (newpp
& 7) | H_AVPN
;
890 if (mmu_has_feature(MMU_FTR_KERNEL_RO
))
891 /* Move pp0 into bit 8 (IBM 55) */
892 flags
|= (newpp
& HPTE_R_PP0
) >> 55;
894 pr_devel(" update: avpnv=%016lx, hash=%016lx, f=%lx, psize: %d ...",
895 want_v
, slot
, flags
, psize
);
897 lpar_rc
= plpar_pte_protect(flags
, slot
, want_v
);
899 if (lpar_rc
== H_NOT_FOUND
) {
900 pr_devel("not found !\n");
906 BUG_ON(lpar_rc
!= H_SUCCESS
);
911 static long __pSeries_lpar_hpte_find(unsigned long want_v
, unsigned long hpte_group
)
920 for (i
= 0; i
< HPTES_PER_GROUP
; i
+= 4, hpte_group
+= 4) {
922 lpar_rc
= plpar_pte_read_4(0, hpte_group
, (void *)ptes
);
923 if (lpar_rc
!= H_SUCCESS
) {
924 pr_info("Failed to read hash page table at %ld err %ld\n",
925 hpte_group
, lpar_rc
);
929 for (j
= 0; j
< 4; j
++) {
930 if (HPTE_V_COMPARE(ptes
[j
].pteh
, want_v
) &&
931 (ptes
[j
].pteh
& HPTE_V_VALID
))
939 static long pSeries_lpar_hpte_find(unsigned long vpn
, int psize
, int ssize
)
943 unsigned long want_v
;
944 unsigned long hpte_group
;
946 hash
= hpt_hash(vpn
, mmu_psize_defs
[psize
].shift
, ssize
);
947 want_v
= hpte_encode_avpn(vpn
, psize
, ssize
);
950 * We try to keep bolted entries always in primary hash
951 * But in some case we can find them in secondary too.
953 hpte_group
= (hash
& htab_hash_mask
) * HPTES_PER_GROUP
;
954 slot
= __pSeries_lpar_hpte_find(want_v
, hpte_group
);
956 /* Try in secondary */
957 hpte_group
= (~hash
& htab_hash_mask
) * HPTES_PER_GROUP
;
958 slot
= __pSeries_lpar_hpte_find(want_v
, hpte_group
);
962 return hpte_group
+ slot
;
965 static void pSeries_lpar_hpte_updateboltedpp(unsigned long newpp
,
967 int psize
, int ssize
)
970 unsigned long lpar_rc
, slot
, vsid
, flags
;
972 vsid
= get_kernel_vsid(ea
, ssize
);
973 vpn
= hpt_vpn(ea
, vsid
, ssize
);
975 slot
= pSeries_lpar_hpte_find(vpn
, psize
, ssize
);
979 if (mmu_has_feature(MMU_FTR_KERNEL_RO
))
980 /* Move pp0 into bit 8 (IBM 55) */
981 flags
|= (newpp
& HPTE_R_PP0
) >> 55;
983 lpar_rc
= plpar_pte_protect(flags
, slot
, 0);
985 BUG_ON(lpar_rc
!= H_SUCCESS
);
988 static void pSeries_lpar_hpte_invalidate(unsigned long slot
, unsigned long vpn
,
989 int psize
, int apsize
,
990 int ssize
, int local
)
992 unsigned long want_v
;
993 unsigned long lpar_rc
;
994 unsigned long dummy1
, dummy2
;
996 pr_devel(" inval : slot=%lx, vpn=%016lx, psize: %d, local: %d\n",
997 slot
, vpn
, psize
, local
);
999 want_v
= hpte_encode_avpn(vpn
, psize
, ssize
);
1000 lpar_rc
= plpar_pte_remove(H_AVPN
, slot
, want_v
, &dummy1
, &dummy2
);
1001 if (lpar_rc
== H_NOT_FOUND
)
1004 BUG_ON(lpar_rc
!= H_SUCCESS
);
1009 * As defined in the PAPR's section 14.5.4.1.8
1010 * The control mask doesn't include the returned reference and change bit from
1011 * the processed PTE.
1013 #define HBLKR_AVPN 0x0100000000000000UL
1014 #define HBLKR_CTRL_MASK 0xf800000000000000UL
1015 #define HBLKR_CTRL_SUCCESS 0x8000000000000000UL
1016 #define HBLKR_CTRL_ERRNOTFOUND 0x8800000000000000UL
1017 #define HBLKR_CTRL_ERRBUSY 0xa000000000000000UL
1020 * Returned true if we are supporting this block size for the specified segment
1021 * base page size and actual page size.
1023 * Currently, we only support 8 size block.
1025 static inline bool is_supported_hlbkrm(int bpsize
, int psize
)
1027 return (hblkrm_size
[bpsize
][psize
] == HBLKRM_SUPPORTED_BLOCK_SIZE
);
1031 * H_BLOCK_REMOVE caller.
1032 * @idx should point to the latest @param entry set with a PTEX.
1033 * If PTE cannot be processed because another CPUs has already locked that
1034 * group, those entries are put back in @param starting at index 1.
1035 * If entries has to be retried and @retry_busy is set to true, these entries
1036 * are retried until success. If @retry_busy is set to false, the returned
1037 * is the number of entries yet to process.
1039 static unsigned long call_block_remove(unsigned long idx
, unsigned long *param
,
1042 unsigned long i
, rc
, new_idx
;
1043 unsigned long retbuf
[PLPAR_HCALL9_BUFSIZE
];
1046 pr_warn("Unexpected empty call to H_BLOCK_REMOVE");
1051 if (idx
> PLPAR_HCALL9_BUFSIZE
) {
1052 pr_err("Too many PTEs (%lu) for H_BLOCK_REMOVE", idx
);
1053 idx
= PLPAR_HCALL9_BUFSIZE
;
1054 } else if (idx
< PLPAR_HCALL9_BUFSIZE
)
1055 param
[idx
] = HBR_END
;
1057 rc
= plpar_hcall9(H_BLOCK_REMOVE
, retbuf
,
1059 param
[1], param
[2], param
[3], param
[4], /* TS0-7 */
1060 param
[5], param
[6], param
[7], param
[8]);
1061 if (rc
== H_SUCCESS
)
1064 BUG_ON(rc
!= H_PARTIAL
);
1066 /* Check that the unprocessed entries were 'not found' or 'busy' */
1067 for (i
= 0; i
< idx
-1; i
++) {
1068 unsigned long ctrl
= retbuf
[i
] & HBLKR_CTRL_MASK
;
1070 if (ctrl
== HBLKR_CTRL_ERRBUSY
) {
1071 param
[++new_idx
] = param
[i
+1];
1075 BUG_ON(ctrl
!= HBLKR_CTRL_SUCCESS
1076 && ctrl
!= HBLKR_CTRL_ERRNOTFOUND
);
1080 * If there were entries found busy, retry these entries if requested,
1081 * of if all the entries have to be retried.
1083 if (new_idx
&& (retry_busy
|| new_idx
== (PLPAR_HCALL9_BUFSIZE
-1))) {
1091 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1093 * Limit iterations holding pSeries_lpar_tlbie_lock to 3. We also need
1094 * to make sure that we avoid bouncing the hypervisor tlbie lock.
1096 #define PPC64_HUGE_HPTE_BATCH 12
1098 static void hugepage_block_invalidate(unsigned long *slot
, unsigned long *vpn
,
1099 int count
, int psize
, int ssize
)
1101 unsigned long param
[PLPAR_HCALL9_BUFSIZE
];
1102 unsigned long shift
, current_vpgb
, vpgb
;
1105 shift
= mmu_psize_defs
[psize
].shift
;
1107 for (i
= 0; i
< count
; i
++) {
1109 * Shifting 3 bits more on the right to get a
1110 * 8 pages aligned virtual addresse.
1112 vpgb
= (vpn
[i
] >> (shift
- VPN_SHIFT
+ 3));
1113 if (!pix
|| vpgb
!= current_vpgb
) {
1115 * Need to start a new 8 pages block, flush
1116 * the current one if needed.
1119 (void)call_block_remove(pix
, param
, true);
1120 current_vpgb
= vpgb
;
1121 param
[0] = hpte_encode_avpn(vpn
[i
], psize
, ssize
);
1125 param
[pix
++] = HBR_REQUEST
| HBLKR_AVPN
| slot
[i
];
1126 if (pix
== PLPAR_HCALL9_BUFSIZE
) {
1127 pix
= call_block_remove(pix
, param
, false);
1129 * pix = 0 means that all the entries were
1130 * removed, we can start a new block.
1131 * Otherwise, this means that there are entries
1132 * to retry, and pix points to latest one, so
1133 * we should increment it and try to continue
1141 (void)call_block_remove(pix
, param
, true);
1144 static void hugepage_bulk_invalidate(unsigned long *slot
, unsigned long *vpn
,
1145 int count
, int psize
, int ssize
)
1147 unsigned long param
[PLPAR_HCALL9_BUFSIZE
];
1148 int i
= 0, pix
= 0, rc
;
1150 for (i
= 0; i
< count
; i
++) {
1152 if (!firmware_has_feature(FW_FEATURE_BULK_REMOVE
)) {
1153 pSeries_lpar_hpte_invalidate(slot
[i
], vpn
[i
], psize
, 0,
1156 param
[pix
] = HBR_REQUEST
| HBR_AVPN
| slot
[i
];
1157 param
[pix
+1] = hpte_encode_avpn(vpn
[i
], psize
, ssize
);
1160 rc
= plpar_hcall9(H_BULK_REMOVE
, param
,
1161 param
[0], param
[1], param
[2],
1162 param
[3], param
[4], param
[5],
1163 param
[6], param
[7]);
1164 BUG_ON(rc
!= H_SUCCESS
);
1170 param
[pix
] = HBR_END
;
1171 rc
= plpar_hcall9(H_BULK_REMOVE
, param
, param
[0], param
[1],
1172 param
[2], param
[3], param
[4], param
[5],
1173 param
[6], param
[7]);
1174 BUG_ON(rc
!= H_SUCCESS
);
1178 static inline void __pSeries_lpar_hugepage_invalidate(unsigned long *slot
,
1180 int count
, int psize
,
1183 unsigned long flags
= 0;
1184 int lock_tlbie
= !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE
);
1187 spin_lock_irqsave(&pSeries_lpar_tlbie_lock
, flags
);
1189 /* Assuming THP size is 16M */
1190 if (is_supported_hlbkrm(psize
, MMU_PAGE_16M
))
1191 hugepage_block_invalidate(slot
, vpn
, count
, psize
, ssize
);
1193 hugepage_bulk_invalidate(slot
, vpn
, count
, psize
, ssize
);
1196 spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock
, flags
);
1199 static void pSeries_lpar_hugepage_invalidate(unsigned long vsid
,
1201 unsigned char *hpte_slot_array
,
1202 int psize
, int ssize
, int local
)
1205 unsigned long s_addr
= addr
;
1206 unsigned int max_hpte_count
, valid
;
1207 unsigned long vpn_array
[PPC64_HUGE_HPTE_BATCH
];
1208 unsigned long slot_array
[PPC64_HUGE_HPTE_BATCH
];
1209 unsigned long shift
, hidx
, vpn
= 0, hash
, slot
;
1211 shift
= mmu_psize_defs
[psize
].shift
;
1212 max_hpte_count
= 1U << (PMD_SHIFT
- shift
);
1214 for (i
= 0; i
< max_hpte_count
; i
++) {
1215 valid
= hpte_valid(hpte_slot_array
, i
);
1218 hidx
= hpte_hash_index(hpte_slot_array
, i
);
1221 addr
= s_addr
+ (i
* (1ul << shift
));
1222 vpn
= hpt_vpn(addr
, vsid
, ssize
);
1223 hash
= hpt_hash(vpn
, shift
, ssize
);
1224 if (hidx
& _PTEIDX_SECONDARY
)
1227 slot
= (hash
& htab_hash_mask
) * HPTES_PER_GROUP
;
1228 slot
+= hidx
& _PTEIDX_GROUP_IX
;
1230 slot_array
[index
] = slot
;
1231 vpn_array
[index
] = vpn
;
1232 if (index
== PPC64_HUGE_HPTE_BATCH
- 1) {
1234 * Now do a bluk invalidate
1236 __pSeries_lpar_hugepage_invalidate(slot_array
,
1238 PPC64_HUGE_HPTE_BATCH
,
1245 __pSeries_lpar_hugepage_invalidate(slot_array
, vpn_array
,
1246 index
, psize
, ssize
);
1249 static void pSeries_lpar_hugepage_invalidate(unsigned long vsid
,
1251 unsigned char *hpte_slot_array
,
1252 int psize
, int ssize
, int local
)
1254 WARN(1, "%s called without THP support\n", __func__
);
1258 static int pSeries_lpar_hpte_removebolted(unsigned long ea
,
1259 int psize
, int ssize
)
1262 unsigned long slot
, vsid
;
1264 vsid
= get_kernel_vsid(ea
, ssize
);
1265 vpn
= hpt_vpn(ea
, vsid
, ssize
);
1267 slot
= pSeries_lpar_hpte_find(vpn
, psize
, ssize
);
1272 * lpar doesn't use the passed actual page size
1274 pSeries_lpar_hpte_invalidate(slot
, vpn
, psize
, 0, ssize
, 0);
1279 static inline unsigned long compute_slot(real_pte_t pte
,
1281 unsigned long index
,
1282 unsigned long shift
,
1285 unsigned long slot
, hash
, hidx
;
1287 hash
= hpt_hash(vpn
, shift
, ssize
);
1288 hidx
= __rpte_to_hidx(pte
, index
);
1289 if (hidx
& _PTEIDX_SECONDARY
)
1291 slot
= (hash
& htab_hash_mask
) * HPTES_PER_GROUP
;
1292 slot
+= hidx
& _PTEIDX_GROUP_IX
;
1297 * The hcall H_BLOCK_REMOVE implies that the virtual pages to processed are
1298 * "all within the same naturally aligned 8 page virtual address block".
1300 static void do_block_remove(unsigned long number
, struct ppc64_tlb_batch
*batch
,
1301 unsigned long *param
)
1304 unsigned long i
, pix
= 0;
1305 unsigned long index
, shift
, slot
, current_vpgb
, vpgb
;
1309 psize
= batch
->psize
;
1310 ssize
= batch
->ssize
;
1312 for (i
= 0; i
< number
; i
++) {
1313 vpn
= batch
->vpn
[i
];
1314 pte
= batch
->pte
[i
];
1315 pte_iterate_hashed_subpages(pte
, psize
, vpn
, index
, shift
) {
1317 * Shifting 3 bits more on the right to get a
1318 * 8 pages aligned virtual addresse.
1320 vpgb
= (vpn
>> (shift
- VPN_SHIFT
+ 3));
1321 if (!pix
|| vpgb
!= current_vpgb
) {
1323 * Need to start a new 8 pages block, flush
1324 * the current one if needed.
1327 (void)call_block_remove(pix
, param
,
1329 current_vpgb
= vpgb
;
1330 param
[0] = hpte_encode_avpn(vpn
, psize
,
1335 slot
= compute_slot(pte
, vpn
, index
, shift
, ssize
);
1336 param
[pix
++] = HBR_REQUEST
| HBLKR_AVPN
| slot
;
1338 if (pix
== PLPAR_HCALL9_BUFSIZE
) {
1339 pix
= call_block_remove(pix
, param
, false);
1341 * pix = 0 means that all the entries were
1342 * removed, we can start a new block.
1343 * Otherwise, this means that there are entries
1344 * to retry, and pix points to latest one, so
1345 * we should increment it and try to continue
1351 } pte_iterate_hashed_end();
1355 (void)call_block_remove(pix
, param
, true);
1359 * TLB Block Invalidate Characteristics
1361 * These characteristics define the size of the block the hcall H_BLOCK_REMOVE
1362 * is able to process for each couple segment base page size, actual page size.
1364 * The ibm,get-system-parameter properties is returning a buffer with the
1367 * [ 2 bytes size of the RTAS buffer (excluding these 2 bytes) ]
1369 * TLB Block Invalidate Specifiers:
1370 * [ 1 byte LOG base 2 of the TLB invalidate block size being specified ]
1371 * [ 1 byte Number of page sizes (N) that are supported for the specified
1372 * TLB invalidate block size ]
1373 * [ 1 byte Encoded segment base page size and actual page size
1374 * MSB=0 means 4k segment base page size and actual page size
1375 * MSB=1 the penc value in mmu_psize_def ]
1378 * Next TLB Block Invalidate Specifiers...
1382 static inline void set_hblkrm_bloc_size(int bpsize
, int psize
,
1383 unsigned int block_size
)
1385 if (block_size
> hblkrm_size
[bpsize
][psize
])
1386 hblkrm_size
[bpsize
][psize
] = block_size
;
1390 * Decode the Encoded segment base page size and actual page size.
1392 * - bit 7 is the L bit
1393 * - bits 0-5 are the penc value
1394 * If the L bit is 0, this means 4K segment base page size and actual page size
1395 * otherwise the penc value should be read.
1397 #define HBLKRM_L_MASK 0x80
1398 #define HBLKRM_PENC_MASK 0x3f
1399 static inline void __init
check_lp_set_hblkrm(unsigned int lp
,
1400 unsigned int block_size
)
1402 unsigned int bpsize
, psize
;
1404 /* First, check the L bit, if not set, this means 4K */
1405 if ((lp
& HBLKRM_L_MASK
) == 0) {
1406 set_hblkrm_bloc_size(MMU_PAGE_4K
, MMU_PAGE_4K
, block_size
);
1410 lp
&= HBLKRM_PENC_MASK
;
1411 for (bpsize
= 0; bpsize
< MMU_PAGE_COUNT
; bpsize
++) {
1412 struct mmu_psize_def
*def
= &mmu_psize_defs
[bpsize
];
1414 for (psize
= 0; psize
< MMU_PAGE_COUNT
; psize
++) {
1415 if (def
->penc
[psize
] == lp
) {
1416 set_hblkrm_bloc_size(bpsize
, psize
, block_size
);
1423 #define SPLPAR_TLB_BIC_TOKEN 50
1426 * The size of the TLB Block Invalidate Characteristics is variable. But at the
1427 * maximum it will be the number of possible page sizes *2 + 10 bytes.
1428 * Currently MMU_PAGE_COUNT is 16, which means 42 bytes. Use a cache line size
1429 * (128 bytes) for the buffer to get plenty of space.
1431 #define SPLPAR_TLB_BIC_MAXLENGTH 128
1433 void __init
pseries_lpar_read_hblkrm_characteristics(void)
1435 unsigned char local_buffer
[SPLPAR_TLB_BIC_MAXLENGTH
];
1436 int call_status
, len
, idx
, bpsize
;
1438 if (!firmware_has_feature(FW_FEATURE_BLOCK_REMOVE
))
1441 spin_lock(&rtas_data_buf_lock
);
1442 memset(rtas_data_buf
, 0, RTAS_DATA_BUF_SIZE
);
1443 call_status
= rtas_call(rtas_token("ibm,get-system-parameter"), 3, 1,
1445 SPLPAR_TLB_BIC_TOKEN
,
1446 __pa(rtas_data_buf
),
1447 RTAS_DATA_BUF_SIZE
);
1448 memcpy(local_buffer
, rtas_data_buf
, SPLPAR_TLB_BIC_MAXLENGTH
);
1449 local_buffer
[SPLPAR_TLB_BIC_MAXLENGTH
- 1] = '\0';
1450 spin_unlock(&rtas_data_buf_lock
);
1452 if (call_status
!= 0) {
1453 pr_warn("%s %s Error calling get-system-parameter (0x%x)\n",
1454 __FILE__
, __func__
, call_status
);
1459 * The first two (2) bytes of the data in the buffer are the length of
1460 * the returned data, not counting these first two (2) bytes.
1462 len
= be16_to_cpu(*((u16
*)local_buffer
)) + 2;
1463 if (len
> SPLPAR_TLB_BIC_MAXLENGTH
) {
1464 pr_warn("%s too large returned buffer %d", __func__
, len
);
1470 u8 block_shift
= local_buffer
[idx
++];
1472 unsigned int npsize
;
1477 block_size
= 1 << block_shift
;
1479 for (npsize
= local_buffer
[idx
++];
1480 npsize
> 0 && idx
< len
; npsize
--)
1481 check_lp_set_hblkrm((unsigned int) local_buffer
[idx
++],
1485 for (bpsize
= 0; bpsize
< MMU_PAGE_COUNT
; bpsize
++)
1486 for (idx
= 0; idx
< MMU_PAGE_COUNT
; idx
++)
1487 if (hblkrm_size
[bpsize
][idx
])
1488 pr_info("H_BLOCK_REMOVE supports base psize:%d psize:%d block size:%d",
1489 bpsize
, idx
, hblkrm_size
[bpsize
][idx
]);
1493 * Take a spinlock around flushes to avoid bouncing the hypervisor tlbie
1496 static void pSeries_lpar_flush_hash_range(unsigned long number
, int local
)
1499 unsigned long i
, pix
, rc
;
1500 unsigned long flags
= 0;
1501 struct ppc64_tlb_batch
*batch
= this_cpu_ptr(&ppc64_tlb_batch
);
1502 int lock_tlbie
= !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE
);
1503 unsigned long param
[PLPAR_HCALL9_BUFSIZE
];
1504 unsigned long index
, shift
, slot
;
1509 spin_lock_irqsave(&pSeries_lpar_tlbie_lock
, flags
);
1511 if (is_supported_hlbkrm(batch
->psize
, batch
->psize
)) {
1512 do_block_remove(number
, batch
, param
);
1516 psize
= batch
->psize
;
1517 ssize
= batch
->ssize
;
1519 for (i
= 0; i
< number
; i
++) {
1520 vpn
= batch
->vpn
[i
];
1521 pte
= batch
->pte
[i
];
1522 pte_iterate_hashed_subpages(pte
, psize
, vpn
, index
, shift
) {
1523 slot
= compute_slot(pte
, vpn
, index
, shift
, ssize
);
1524 if (!firmware_has_feature(FW_FEATURE_BULK_REMOVE
)) {
1526 * lpar doesn't use the passed actual page size
1528 pSeries_lpar_hpte_invalidate(slot
, vpn
, psize
,
1531 param
[pix
] = HBR_REQUEST
| HBR_AVPN
| slot
;
1532 param
[pix
+1] = hpte_encode_avpn(vpn
, psize
,
1536 rc
= plpar_hcall9(H_BULK_REMOVE
, param
,
1537 param
[0], param
[1], param
[2],
1538 param
[3], param
[4], param
[5],
1539 param
[6], param
[7]);
1540 BUG_ON(rc
!= H_SUCCESS
);
1544 } pte_iterate_hashed_end();
1547 param
[pix
] = HBR_END
;
1548 rc
= plpar_hcall9(H_BULK_REMOVE
, param
, param
[0], param
[1],
1549 param
[2], param
[3], param
[4], param
[5],
1550 param
[6], param
[7]);
1551 BUG_ON(rc
!= H_SUCCESS
);
1556 spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock
, flags
);
1559 static int __init
disable_bulk_remove(char *str
)
1561 if (strcmp(str
, "off") == 0 &&
1562 firmware_has_feature(FW_FEATURE_BULK_REMOVE
)) {
1563 pr_info("Disabling BULK_REMOVE firmware feature");
1564 powerpc_firmware_features
&= ~FW_FEATURE_BULK_REMOVE
;
1569 __setup("bulk_remove=", disable_bulk_remove
);
1571 #define HPT_RESIZE_TIMEOUT 10000 /* ms */
1573 struct hpt_resize_state
{
1574 unsigned long shift
;
1578 static int pseries_lpar_resize_hpt_commit(void *data
)
1580 struct hpt_resize_state
*state
= data
;
1582 state
->commit_rc
= plpar_resize_hpt_commit(0, state
->shift
);
1583 if (state
->commit_rc
!= H_SUCCESS
)
1586 /* Hypervisor has transitioned the HTAB, update our globals */
1587 ppc64_pft_size
= state
->shift
;
1588 htab_size_bytes
= 1UL << ppc64_pft_size
;
1589 htab_hash_mask
= (htab_size_bytes
>> 7) - 1;
1595 * Must be called in process context. The caller must hold the
1598 static int pseries_lpar_resize_hpt(unsigned long shift
)
1600 struct hpt_resize_state state
= {
1602 .commit_rc
= H_FUNCTION
,
1604 unsigned int delay
, total_delay
= 0;
1610 if (!firmware_has_feature(FW_FEATURE_HPT_RESIZE
))
1613 pr_info("Attempting to resize HPT to shift %lu\n", shift
);
1617 rc
= plpar_resize_hpt_prepare(0, shift
);
1618 while (H_IS_LONG_BUSY(rc
)) {
1619 delay
= get_longbusy_msecs(rc
);
1620 total_delay
+= delay
;
1621 if (total_delay
> HPT_RESIZE_TIMEOUT
) {
1622 /* prepare with shift==0 cancels an in-progress resize */
1623 rc
= plpar_resize_hpt_prepare(0, 0);
1624 if (rc
!= H_SUCCESS
)
1625 pr_warn("Unexpected error %d cancelling timed out HPT resize\n",
1630 rc
= plpar_resize_hpt_prepare(0, shift
);
1639 pr_warn("Invalid argument from H_RESIZE_HPT_PREPARE\n");
1642 pr_warn("Operation not permitted from H_RESIZE_HPT_PREPARE\n");
1645 pr_warn("Unexpected error %d from H_RESIZE_HPT_PREPARE\n", rc
);
1651 rc
= stop_machine_cpuslocked(pseries_lpar_resize_hpt_commit
,
1657 switch (state
.commit_rc
) {
1662 pr_warn("Unexpected error %d from H_RESIZE_HPT_COMMIT\n",
1668 pr_info("HPT resize to shift %lu complete (%lld ms / %lld ms)\n",
1669 shift
, (long long) ktime_ms_delta(t1
, t0
),
1670 (long long) ktime_ms_delta(t2
, t1
));
1675 static int pseries_lpar_register_process_table(unsigned long base
,
1676 unsigned long page_size
, unsigned long table_size
)
1679 unsigned long flags
= 0;
1682 flags
|= PROC_TABLE_NEW
;
1683 if (radix_enabled())
1684 flags
|= PROC_TABLE_RADIX
| PROC_TABLE_GTSE
;
1686 flags
|= PROC_TABLE_HPT_SLB
;
1688 rc
= plpar_hcall_norets(H_REGISTER_PROC_TBL
, flags
, base
,
1689 page_size
, table_size
);
1690 if (!H_IS_LONG_BUSY(rc
))
1692 mdelay(get_longbusy_msecs(rc
));
1694 if (rc
!= H_SUCCESS
) {
1695 pr_err("Failed to register process table (rc=%ld)\n", rc
);
1701 void __init
hpte_init_pseries(void)
1703 mmu_hash_ops
.hpte_invalidate
= pSeries_lpar_hpte_invalidate
;
1704 mmu_hash_ops
.hpte_updatepp
= pSeries_lpar_hpte_updatepp
;
1705 mmu_hash_ops
.hpte_updateboltedpp
= pSeries_lpar_hpte_updateboltedpp
;
1706 mmu_hash_ops
.hpte_insert
= pSeries_lpar_hpte_insert
;
1707 mmu_hash_ops
.hpte_remove
= pSeries_lpar_hpte_remove
;
1708 mmu_hash_ops
.hpte_removebolted
= pSeries_lpar_hpte_removebolted
;
1709 mmu_hash_ops
.flush_hash_range
= pSeries_lpar_flush_hash_range
;
1710 mmu_hash_ops
.hpte_clear_all
= pseries_hpte_clear_all
;
1711 mmu_hash_ops
.hugepage_invalidate
= pSeries_lpar_hugepage_invalidate
;
1713 if (firmware_has_feature(FW_FEATURE_HPT_RESIZE
))
1714 mmu_hash_ops
.resize_hpt
= pseries_lpar_resize_hpt
;
1717 * On POWER9, we need to do a H_REGISTER_PROC_TBL hcall
1718 * to inform the hypervisor that we wish to use the HPT.
1720 if (cpu_has_feature(CPU_FTR_ARCH_300
))
1721 pseries_lpar_register_process_table(0, 0, 0);
1724 void radix_init_pseries(void)
1726 pr_info("Using radix MMU under hypervisor\n");
1728 pseries_lpar_register_process_table(__pa(process_tb
),
1729 0, PRTB_SIZE_SHIFT
- 12);
1732 #ifdef CONFIG_PPC_SMLPAR
1733 #define CMO_FREE_HINT_DEFAULT 1
1734 static int cmo_free_hint_flag
= CMO_FREE_HINT_DEFAULT
;
1736 static int __init
cmo_free_hint(char *str
)
1739 parm
= strstrip(str
);
1741 if (strcasecmp(parm
, "no") == 0 || strcasecmp(parm
, "off") == 0) {
1742 pr_info("%s: CMO free page hinting is not active.\n", __func__
);
1743 cmo_free_hint_flag
= 0;
1747 cmo_free_hint_flag
= 1;
1748 pr_info("%s: CMO free page hinting is active.\n", __func__
);
1750 if (strcasecmp(parm
, "yes") == 0 || strcasecmp(parm
, "on") == 0)
1756 __setup("cmo_free_hint=", cmo_free_hint
);
1758 static void pSeries_set_page_state(struct page
*page
, int order
,
1759 unsigned long state
)
1762 unsigned long cmo_page_sz
, addr
;
1764 cmo_page_sz
= cmo_get_page_size();
1765 addr
= __pa((unsigned long)page_address(page
));
1767 for (i
= 0; i
< (1 << order
); i
++, addr
+= PAGE_SIZE
) {
1768 for (j
= 0; j
< PAGE_SIZE
; j
+= cmo_page_sz
)
1769 plpar_hcall_norets(H_PAGE_INIT
, state
, addr
+ j
, 0);
1773 void arch_free_page(struct page
*page
, int order
)
1775 if (radix_enabled())
1777 if (!cmo_free_hint_flag
|| !firmware_has_feature(FW_FEATURE_CMO
))
1780 pSeries_set_page_state(page
, order
, H_PAGE_SET_UNUSED
);
1782 EXPORT_SYMBOL(arch_free_page
);
1784 #endif /* CONFIG_PPC_SMLPAR */
1785 #endif /* CONFIG_PPC_BOOK3S_64 */
1787 #ifdef CONFIG_TRACEPOINTS
1788 #ifdef CONFIG_JUMP_LABEL
1789 struct static_key hcall_tracepoint_key
= STATIC_KEY_INIT
;
1791 int hcall_tracepoint_regfunc(void)
1793 static_key_slow_inc(&hcall_tracepoint_key
);
1797 void hcall_tracepoint_unregfunc(void)
1799 static_key_slow_dec(&hcall_tracepoint_key
);
1803 * We optimise our hcall path by placing hcall_tracepoint_refcount
1804 * directly in the TOC so we can check if the hcall tracepoints are
1805 * enabled via a single load.
1808 /* NB: reg/unreg are called while guarded with the tracepoints_mutex */
1809 extern long hcall_tracepoint_refcount
;
1811 int hcall_tracepoint_regfunc(void)
1813 hcall_tracepoint_refcount
++;
1817 void hcall_tracepoint_unregfunc(void)
1819 hcall_tracepoint_refcount
--;
1824 * Since the tracing code might execute hcalls we need to guard against
1825 * recursion. One example of this are spinlocks calling H_YIELD on
1826 * shared processor partitions.
1828 static DEFINE_PER_CPU(unsigned int, hcall_trace_depth
);
1831 void __trace_hcall_entry(unsigned long opcode
, unsigned long *args
)
1833 unsigned long flags
;
1834 unsigned int *depth
;
1837 * We cannot call tracepoints inside RCU idle regions which
1838 * means we must not trace H_CEDE.
1840 if (opcode
== H_CEDE
)
1843 local_irq_save(flags
);
1845 depth
= this_cpu_ptr(&hcall_trace_depth
);
1852 trace_hcall_entry(opcode
, args
);
1856 local_irq_restore(flags
);
1859 void __trace_hcall_exit(long opcode
, long retval
, unsigned long *retbuf
)
1861 unsigned long flags
;
1862 unsigned int *depth
;
1864 if (opcode
== H_CEDE
)
1867 local_irq_save(flags
);
1869 depth
= this_cpu_ptr(&hcall_trace_depth
);
1875 trace_hcall_exit(opcode
, retval
, retbuf
);
1880 local_irq_restore(flags
);
1886 * H_GET_MPP hcall returns info in 7 parms
1888 int h_get_mpp(struct hvcall_mpp_data
*mpp_data
)
1891 unsigned long retbuf
[PLPAR_HCALL9_BUFSIZE
];
1893 rc
= plpar_hcall9(H_GET_MPP
, retbuf
);
1895 mpp_data
->entitled_mem
= retbuf
[0];
1896 mpp_data
->mapped_mem
= retbuf
[1];
1898 mpp_data
->group_num
= (retbuf
[2] >> 2 * 8) & 0xffff;
1899 mpp_data
->pool_num
= retbuf
[2] & 0xffff;
1901 mpp_data
->mem_weight
= (retbuf
[3] >> 7 * 8) & 0xff;
1902 mpp_data
->unallocated_mem_weight
= (retbuf
[3] >> 6 * 8) & 0xff;
1903 mpp_data
->unallocated_entitlement
= retbuf
[3] & 0xffffffffffffUL
;
1905 mpp_data
->pool_size
= retbuf
[4];
1906 mpp_data
->loan_request
= retbuf
[5];
1907 mpp_data
->backing_mem
= retbuf
[6];
1911 EXPORT_SYMBOL(h_get_mpp
);
1913 int h_get_mpp_x(struct hvcall_mpp_x_data
*mpp_x_data
)
1916 unsigned long retbuf
[PLPAR_HCALL9_BUFSIZE
] = { 0 };
1918 rc
= plpar_hcall9(H_GET_MPP_X
, retbuf
);
1920 mpp_x_data
->coalesced_bytes
= retbuf
[0];
1921 mpp_x_data
->pool_coalesced_bytes
= retbuf
[1];
1922 mpp_x_data
->pool_purr_cycles
= retbuf
[2];
1923 mpp_x_data
->pool_spurr_cycles
= retbuf
[3];
1928 static unsigned long vsid_unscramble(unsigned long vsid
, int ssize
)
1930 unsigned long protovsid
;
1931 unsigned long va_bits
= VA_BITS
;
1932 unsigned long modinv
, vsid_modulus
;
1933 unsigned long max_mod_inv
, tmp_modinv
;
1935 if (!mmu_has_feature(MMU_FTR_68_BIT_VA
))
1938 if (ssize
== MMU_SEGSIZE_256M
) {
1939 modinv
= VSID_MULINV_256M
;
1940 vsid_modulus
= ((1UL << (va_bits
- SID_SHIFT
)) - 1);
1942 modinv
= VSID_MULINV_1T
;
1943 vsid_modulus
= ((1UL << (va_bits
- SID_SHIFT_1T
)) - 1);
1947 * vsid outside our range.
1949 if (vsid
>= vsid_modulus
)
1953 * If modinv is the modular multiplicate inverse of (x % vsid_modulus)
1954 * and vsid = (protovsid * x) % vsid_modulus, then we say:
1955 * protovsid = (vsid * modinv) % vsid_modulus
1958 /* Check if (vsid * modinv) overflow (63 bits) */
1959 max_mod_inv
= 0x7fffffffffffffffull
/ vsid
;
1960 if (modinv
< max_mod_inv
)
1961 return (vsid
* modinv
) % vsid_modulus
;
1963 tmp_modinv
= modinv
/max_mod_inv
;
1964 modinv
%= max_mod_inv
;
1966 protovsid
= (((vsid
* max_mod_inv
) % vsid_modulus
) * tmp_modinv
) % vsid_modulus
;
1967 protovsid
= (protovsid
+ vsid
* modinv
) % vsid_modulus
;
1972 static int __init
reserve_vrma_context_id(void)
1974 unsigned long protovsid
;
1977 * Reserve context ids which map to reserved virtual addresses. For now
1978 * we only reserve the context id which maps to the VRMA VSID. We ignore
1979 * the addresses in "ibm,adjunct-virtual-addresses" because we don't
1980 * enable adjunct support via the "ibm,client-architecture-support"
1983 protovsid
= vsid_unscramble(VRMA_VSID
, MMU_SEGSIZE_1T
);
1984 hash__reserve_context_id(protovsid
>> ESID_BITS_1T
);
1987 machine_device_initcall(pseries
, reserve_vrma_context_id
);
1989 #ifdef CONFIG_DEBUG_FS
1990 /* debugfs file interface for vpa data */
1991 static ssize_t
vpa_file_read(struct file
*filp
, char __user
*buf
, size_t len
,
1994 int cpu
= (long)filp
->private_data
;
1995 struct lppaca
*lppaca
= &lppaca_of(cpu
);
1997 return simple_read_from_buffer(buf
, len
, pos
, lppaca
,
1998 sizeof(struct lppaca
));
2001 static const struct file_operations vpa_fops
= {
2002 .open
= simple_open
,
2003 .read
= vpa_file_read
,
2004 .llseek
= default_llseek
,
2007 static int __init
vpa_debugfs_init(void)
2011 struct dentry
*vpa_dir
;
2013 if (!firmware_has_feature(FW_FEATURE_SPLPAR
))
2016 vpa_dir
= debugfs_create_dir("vpa", powerpc_debugfs_root
);
2018 /* set up the per-cpu vpa file*/
2019 for_each_possible_cpu(i
) {
2020 sprintf(name
, "cpu-%ld", i
);
2021 debugfs_create_file(name
, 0400, vpa_dir
, (void *)i
, &vpa_fops
);
2026 machine_arch_initcall(pseries
, vpa_debugfs_init
);
2027 #endif /* CONFIG_DEBUG_FS */