1 // SPDX-License-Identifier: GPL-2.0-or-later
4 * Copyright (C) 2001 Todd Inglett, IBM Corporation
6 * pSeries LPAR support.
9 /* Enables debugging of low-level hash table routines - careful! */
11 #define pr_fmt(fmt) "lpar: " fmt
13 #include <linux/kernel.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/console.h>
16 #include <linux/export.h>
17 #include <linux/jump_label.h>
18 #include <linux/delay.h>
19 #include <linux/stop_machine.h>
20 #include <linux/spinlock.h>
21 #include <linux/cpuhotplug.h>
22 #include <linux/workqueue.h>
23 #include <linux/proc_fs.h>
24 #include <asm/processor.h>
27 #include <asm/pgtable.h>
28 #include <asm/machdep.h>
29 #include <asm/mmu_context.h>
30 #include <asm/iommu.h>
33 #include <asm/cputable.h>
36 #include <asm/trace.h>
37 #include <asm/firmware.h>
38 #include <asm/plpar_wrappers.h>
39 #include <asm/kexec.h>
40 #include <asm/fadump.h>
41 #include <asm/asm-prototypes.h>
42 #include <asm/debugfs.h>
46 /* Flag bits for H_BULK_REMOVE */
47 #define HBR_REQUEST 0x4000000000000000UL
48 #define HBR_RESPONSE 0x8000000000000000UL
49 #define HBR_END 0xc000000000000000UL
50 #define HBR_AVPN 0x0200000000000000UL
51 #define HBR_ANDCOND 0x0100000000000000UL
55 EXPORT_SYMBOL(plpar_hcall
);
56 EXPORT_SYMBOL(plpar_hcall9
);
57 EXPORT_SYMBOL(plpar_hcall_norets
);
60 * H_BLOCK_REMOVE supported block size for this page size in segment who's base
61 * page size is that page size.
63 * The first index is the segment base page size, the second one is the actual
66 static int hblkrm_size
[MMU_PAGE_COUNT
][MMU_PAGE_COUNT
] __ro_after_init
;
69 * Due to the involved complexity, and that the current hypervisor is only
70 * returning this value or 0, we are limiting the support of the H_BLOCK_REMOVE
71 * buffer size to 8 size block.
73 #define HBLKRM_SUPPORTED_BLOCK_SIZE 8
75 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
76 static u8 dtl_mask
= DTL_LOG_PREEMPT
;
81 void alloc_dtl_buffers(unsigned long *time_limit
)
84 struct paca_struct
*pp
;
85 struct dtl_entry
*dtl
;
87 for_each_possible_cpu(cpu
) {
91 dtl
= kmem_cache_alloc(dtl_cache
, GFP_KERNEL
);
93 pr_warn("Failed to allocate dispatch trace log for cpu %d\n",
95 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
96 pr_warn("Stolen time statistics will be unreliable\n");
102 pp
->dispatch_log
= dtl
;
103 pp
->dispatch_log_end
= dtl
+ N_DISPATCH_LOG
;
106 if (time_limit
&& time_after(jiffies
, *time_limit
)) {
108 *time_limit
= jiffies
+ HZ
;
113 void register_dtl_buffer(int cpu
)
116 struct paca_struct
*pp
;
117 struct dtl_entry
*dtl
;
118 int hwcpu
= get_hard_smp_processor_id(cpu
);
121 dtl
= pp
->dispatch_log
;
122 if (dtl
&& dtl_mask
) {
125 lppaca_of(cpu
).dtl_idx
= 0;
127 /* hypervisor reads buffer length from this field */
128 dtl
->enqueue_to_dispatch_time
= cpu_to_be32(DISPATCH_LOG_BYTES
);
129 ret
= register_dtl(hwcpu
, __pa(dtl
));
131 pr_err("WARNING: DTL registration of cpu %d (hw %d) failed with %ld\n",
134 lppaca_of(cpu
).dtl_enable_mask
= dtl_mask
;
138 #ifdef CONFIG_PPC_SPLPAR
140 struct delayed_work work
;
144 struct vcpu_dispatch_data
{
155 int numa_remote_disp
;
160 * This represents the number of cpus in the hypervisor. Since there is no
161 * architected way to discover the number of processors in the host, we
162 * provision for dealing with NR_CPUS. This is currently 2048 by default, and
163 * is sufficient for our purposes. This will need to be tweaked if
164 * CONFIG_NR_CPUS is changed.
166 #define NR_CPUS_H NR_CPUS
168 DEFINE_RWLOCK(dtl_access_lock
);
169 static DEFINE_PER_CPU(struct vcpu_dispatch_data
, vcpu_disp_data
);
170 static DEFINE_PER_CPU(u64
, dtl_entry_ridx
);
171 static DEFINE_PER_CPU(struct dtl_worker
, dtl_workers
);
172 static enum cpuhp_state dtl_worker_state
;
173 static DEFINE_MUTEX(dtl_enable_mutex
);
174 static int vcpudispatch_stats_on __read_mostly
;
175 static int vcpudispatch_stats_freq
= 50;
176 static __be32
*vcpu_associativity
, *pcpu_associativity
;
179 static void free_dtl_buffers(unsigned long *time_limit
)
181 #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
183 struct paca_struct
*pp
;
185 for_each_possible_cpu(cpu
) {
187 if (!pp
->dispatch_log
)
189 kmem_cache_free(dtl_cache
, pp
->dispatch_log
);
191 pp
->dispatch_log
= 0;
192 pp
->dispatch_log_end
= 0;
195 if (time_limit
&& time_after(jiffies
, *time_limit
)) {
197 *time_limit
= jiffies
+ HZ
;
203 static int init_cpu_associativity(void)
205 vcpu_associativity
= kcalloc(num_possible_cpus() / threads_per_core
,
206 VPHN_ASSOC_BUFSIZE
* sizeof(__be32
), GFP_KERNEL
);
207 pcpu_associativity
= kcalloc(NR_CPUS_H
/ threads_per_core
,
208 VPHN_ASSOC_BUFSIZE
* sizeof(__be32
), GFP_KERNEL
);
210 if (!vcpu_associativity
|| !pcpu_associativity
) {
211 pr_err("error allocating memory for associativity information\n");
218 static void destroy_cpu_associativity(void)
220 kfree(vcpu_associativity
);
221 kfree(pcpu_associativity
);
222 vcpu_associativity
= pcpu_associativity
= 0;
225 static __be32
*__get_cpu_associativity(int cpu
, __be32
*cpu_assoc
, int flag
)
230 assoc
= &cpu_assoc
[(int)(cpu
/ threads_per_core
) * VPHN_ASSOC_BUFSIZE
];
232 rc
= hcall_vphn(cpu
, flag
, &assoc
[0]);
240 static __be32
*get_pcpu_associativity(int cpu
)
242 return __get_cpu_associativity(cpu
, pcpu_associativity
, VPHN_FLAG_PCPU
);
245 static __be32
*get_vcpu_associativity(int cpu
)
247 return __get_cpu_associativity(cpu
, vcpu_associativity
, VPHN_FLAG_VCPU
);
250 static int cpu_relative_dispatch_distance(int last_disp_cpu
, int cur_disp_cpu
)
252 __be32
*last_disp_cpu_assoc
, *cur_disp_cpu_assoc
;
254 if (last_disp_cpu
>= NR_CPUS_H
|| cur_disp_cpu
>= NR_CPUS_H
)
257 last_disp_cpu_assoc
= get_pcpu_associativity(last_disp_cpu
);
258 cur_disp_cpu_assoc
= get_pcpu_associativity(cur_disp_cpu
);
260 if (!last_disp_cpu_assoc
|| !cur_disp_cpu_assoc
)
263 return cpu_distance(last_disp_cpu_assoc
, cur_disp_cpu_assoc
);
266 static int cpu_home_node_dispatch_distance(int disp_cpu
)
268 __be32
*disp_cpu_assoc
, *vcpu_assoc
;
269 int vcpu_id
= smp_processor_id();
271 if (disp_cpu
>= NR_CPUS_H
) {
272 pr_debug_ratelimited("vcpu dispatch cpu %d > %d\n",
273 disp_cpu
, NR_CPUS_H
);
277 disp_cpu_assoc
= get_pcpu_associativity(disp_cpu
);
278 vcpu_assoc
= get_vcpu_associativity(vcpu_id
);
280 if (!disp_cpu_assoc
|| !vcpu_assoc
)
283 return cpu_distance(disp_cpu_assoc
, vcpu_assoc
);
286 static void update_vcpu_disp_stat(int disp_cpu
)
288 struct vcpu_dispatch_data
*disp
;
291 disp
= this_cpu_ptr(&vcpu_disp_data
);
292 if (disp
->last_disp_cpu
== -1) {
293 disp
->last_disp_cpu
= disp_cpu
;
299 if (disp
->last_disp_cpu
== disp_cpu
||
300 (cpu_first_thread_sibling(disp
->last_disp_cpu
) ==
301 cpu_first_thread_sibling(disp_cpu
)))
302 disp
->same_cpu_disp
++;
304 distance
= cpu_relative_dispatch_distance(disp
->last_disp_cpu
,
307 pr_debug_ratelimited("vcpudispatch_stats: cpu %d: error determining associativity\n",
312 disp
->same_chip_disp
++;
315 disp
->diff_chip_disp
++;
318 disp
->far_chip_disp
++;
321 pr_debug_ratelimited("vcpudispatch_stats: cpu %d (%d -> %d): unexpected relative dispatch distance %d\n",
330 distance
= cpu_home_node_dispatch_distance(disp_cpu
);
332 pr_debug_ratelimited("vcpudispatch_stats: cpu %d: error determining associativity\n",
337 disp
->numa_home_disp
++;
340 disp
->numa_remote_disp
++;
343 disp
->numa_far_disp
++;
346 pr_debug_ratelimited("vcpudispatch_stats: cpu %d on %d: unexpected numa dispatch distance %d\n",
353 disp
->last_disp_cpu
= disp_cpu
;
356 static void process_dtl_buffer(struct work_struct
*work
)
358 struct dtl_entry dtle
;
359 u64 i
= __this_cpu_read(dtl_entry_ridx
);
360 struct dtl_entry
*dtl
= local_paca
->dispatch_log
+ (i
% N_DISPATCH_LOG
);
361 struct dtl_entry
*dtl_end
= local_paca
->dispatch_log_end
;
362 struct lppaca
*vpa
= local_paca
->lppaca_ptr
;
363 struct dtl_worker
*d
= container_of(work
, struct dtl_worker
, work
.work
);
365 if (!local_paca
->dispatch_log
)
368 /* if we have been migrated away, we cancel ourself */
369 if (d
->cpu
!= smp_processor_id()) {
370 pr_debug("vcpudispatch_stats: cpu %d worker migrated -- canceling worker\n",
375 if (i
== be64_to_cpu(vpa
->dtl_idx
))
378 while (i
< be64_to_cpu(vpa
->dtl_idx
)) {
381 if (i
+ N_DISPATCH_LOG
< be64_to_cpu(vpa
->dtl_idx
)) {
382 /* buffer has overflowed */
383 pr_debug_ratelimited("vcpudispatch_stats: cpu %d lost %lld DTL samples\n",
385 be64_to_cpu(vpa
->dtl_idx
) - N_DISPATCH_LOG
- i
);
386 i
= be64_to_cpu(vpa
->dtl_idx
) - N_DISPATCH_LOG
;
387 dtl
= local_paca
->dispatch_log
+ (i
% N_DISPATCH_LOG
);
390 update_vcpu_disp_stat(be16_to_cpu(dtle
.processor_id
));
394 dtl
= local_paca
->dispatch_log
;
397 __this_cpu_write(dtl_entry_ridx
, i
);
400 schedule_delayed_work_on(d
->cpu
, to_delayed_work(work
),
401 HZ
/ vcpudispatch_stats_freq
);
404 static int dtl_worker_online(unsigned int cpu
)
406 struct dtl_worker
*d
= &per_cpu(dtl_workers
, cpu
);
408 memset(d
, 0, sizeof(*d
));
409 INIT_DELAYED_WORK(&d
->work
, process_dtl_buffer
);
412 #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
413 per_cpu(dtl_entry_ridx
, cpu
) = 0;
414 register_dtl_buffer(cpu
);
416 per_cpu(dtl_entry_ridx
, cpu
) = be64_to_cpu(lppaca_of(cpu
).dtl_idx
);
419 schedule_delayed_work_on(cpu
, &d
->work
, HZ
/ vcpudispatch_stats_freq
);
423 static int dtl_worker_offline(unsigned int cpu
)
425 struct dtl_worker
*d
= &per_cpu(dtl_workers
, cpu
);
427 cancel_delayed_work_sync(&d
->work
);
429 #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
430 unregister_dtl(get_hard_smp_processor_id(cpu
));
436 static void set_global_dtl_mask(u8 mask
)
441 for_each_present_cpu(cpu
)
442 lppaca_of(cpu
).dtl_enable_mask
= dtl_mask
;
445 static void reset_global_dtl_mask(void)
449 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
450 dtl_mask
= DTL_LOG_PREEMPT
;
454 for_each_present_cpu(cpu
)
455 lppaca_of(cpu
).dtl_enable_mask
= dtl_mask
;
458 static int dtl_worker_enable(unsigned long *time_limit
)
462 if (!write_trylock(&dtl_access_lock
)) {
467 set_global_dtl_mask(DTL_LOG_ALL
);
469 /* Setup dtl buffers and register those */
470 alloc_dtl_buffers(time_limit
);
472 state
= cpuhp_setup_state(CPUHP_AP_ONLINE_DYN
, "powerpc/dtl:online",
473 dtl_worker_online
, dtl_worker_offline
);
475 pr_err("vcpudispatch_stats: unable to setup workqueue for DTL processing\n");
476 free_dtl_buffers(time_limit
);
477 reset_global_dtl_mask();
478 write_unlock(&dtl_access_lock
);
482 dtl_worker_state
= state
;
488 static void dtl_worker_disable(unsigned long *time_limit
)
490 cpuhp_remove_state(dtl_worker_state
);
491 free_dtl_buffers(time_limit
);
492 reset_global_dtl_mask();
493 write_unlock(&dtl_access_lock
);
496 static ssize_t
vcpudispatch_stats_write(struct file
*file
, const char __user
*p
,
497 size_t count
, loff_t
*ppos
)
499 unsigned long time_limit
= jiffies
+ HZ
;
500 struct vcpu_dispatch_data
*disp
;
507 if (copy_from_user(buf
, p
, count
))
511 rc
= kstrtoint(buf
, 0, &cmd
);
512 if (rc
|| cmd
< 0 || cmd
> 1) {
513 pr_err("vcpudispatch_stats: please use 0 to disable or 1 to enable dispatch statistics\n");
514 return rc
? rc
: -EINVAL
;
517 mutex_lock(&dtl_enable_mutex
);
519 if ((cmd
== 0 && !vcpudispatch_stats_on
) ||
520 (cmd
== 1 && vcpudispatch_stats_on
))
524 rc
= init_cpu_associativity();
528 for_each_possible_cpu(cpu
) {
529 disp
= per_cpu_ptr(&vcpu_disp_data
, cpu
);
530 memset(disp
, 0, sizeof(*disp
));
531 disp
->last_disp_cpu
= -1;
534 rc
= dtl_worker_enable(&time_limit
);
536 destroy_cpu_associativity();
540 dtl_worker_disable(&time_limit
);
541 destroy_cpu_associativity();
544 vcpudispatch_stats_on
= cmd
;
547 mutex_unlock(&dtl_enable_mutex
);
553 static int vcpudispatch_stats_display(struct seq_file
*p
, void *v
)
556 struct vcpu_dispatch_data
*disp
;
558 if (!vcpudispatch_stats_on
) {
559 seq_puts(p
, "off\n");
563 for_each_online_cpu(cpu
) {
564 disp
= per_cpu_ptr(&vcpu_disp_data
, cpu
);
565 seq_printf(p
, "cpu%d", cpu
);
566 seq_put_decimal_ull(p
, " ", disp
->total_disp
);
567 seq_put_decimal_ull(p
, " ", disp
->same_cpu_disp
);
568 seq_put_decimal_ull(p
, " ", disp
->same_chip_disp
);
569 seq_put_decimal_ull(p
, " ", disp
->diff_chip_disp
);
570 seq_put_decimal_ull(p
, " ", disp
->far_chip_disp
);
571 seq_put_decimal_ull(p
, " ", disp
->numa_home_disp
);
572 seq_put_decimal_ull(p
, " ", disp
->numa_remote_disp
);
573 seq_put_decimal_ull(p
, " ", disp
->numa_far_disp
);
580 static int vcpudispatch_stats_open(struct inode
*inode
, struct file
*file
)
582 return single_open(file
, vcpudispatch_stats_display
, NULL
);
585 static const struct file_operations vcpudispatch_stats_proc_ops
= {
586 .open
= vcpudispatch_stats_open
,
588 .write
= vcpudispatch_stats_write
,
590 .release
= single_release
,
593 static ssize_t
vcpudispatch_stats_freq_write(struct file
*file
,
594 const char __user
*p
, size_t count
, loff_t
*ppos
)
602 if (copy_from_user(buf
, p
, count
))
606 rc
= kstrtoint(buf
, 0, &freq
);
607 if (rc
|| freq
< 1 || freq
> HZ
) {
608 pr_err("vcpudispatch_stats_freq: please specify a frequency between 1 and %d\n",
610 return rc
? rc
: -EINVAL
;
613 vcpudispatch_stats_freq
= freq
;
618 static int vcpudispatch_stats_freq_display(struct seq_file
*p
, void *v
)
620 seq_printf(p
, "%d\n", vcpudispatch_stats_freq
);
624 static int vcpudispatch_stats_freq_open(struct inode
*inode
, struct file
*file
)
626 return single_open(file
, vcpudispatch_stats_freq_display
, NULL
);
629 static const struct file_operations vcpudispatch_stats_freq_proc_ops
= {
630 .open
= vcpudispatch_stats_freq_open
,
632 .write
= vcpudispatch_stats_freq_write
,
634 .release
= single_release
,
637 static int __init
vcpudispatch_stats_procfs_init(void)
639 if (!lppaca_shared_proc(get_lppaca()))
642 if (!proc_create("powerpc/vcpudispatch_stats", 0600, NULL
,
643 &vcpudispatch_stats_proc_ops
))
644 pr_err("vcpudispatch_stats: error creating procfs file\n");
645 else if (!proc_create("powerpc/vcpudispatch_stats_freq", 0600, NULL
,
646 &vcpudispatch_stats_freq_proc_ops
))
647 pr_err("vcpudispatch_stats_freq: error creating procfs file\n");
652 machine_device_initcall(pseries
, vcpudispatch_stats_procfs_init
);
653 #endif /* CONFIG_PPC_SPLPAR */
655 void vpa_init(int cpu
)
657 int hwcpu
= get_hard_smp_processor_id(cpu
);
662 * The spec says it "may be problematic" if CPU x registers the VPA of
663 * CPU y. We should never do that, but wail if we ever do.
665 WARN_ON(cpu
!= smp_processor_id());
667 if (cpu_has_feature(CPU_FTR_ALTIVEC
))
668 lppaca_of(cpu
).vmxregs_in_use
= 1;
670 if (cpu_has_feature(CPU_FTR_ARCH_207S
))
671 lppaca_of(cpu
).ebb_regs_in_use
= 1;
673 addr
= __pa(&lppaca_of(cpu
));
674 ret
= register_vpa(hwcpu
, addr
);
677 pr_err("WARNING: VPA registration for cpu %d (hw %d) of area "
678 "%lx failed with %ld\n", cpu
, hwcpu
, addr
, ret
);
682 #ifdef CONFIG_PPC_BOOK3S_64
684 * PAPR says this feature is SLB-Buffer but firmware never
685 * reports that. All SPLPAR support SLB shadow buffer.
687 if (!radix_enabled() && firmware_has_feature(FW_FEATURE_SPLPAR
)) {
688 addr
= __pa(paca_ptrs
[cpu
]->slb_shadow_ptr
);
689 ret
= register_slb_shadow(hwcpu
, addr
);
691 pr_err("WARNING: SLB shadow buffer registration for "
692 "cpu %d (hw %d) of area %lx failed with %ld\n",
693 cpu
, hwcpu
, addr
, ret
);
695 #endif /* CONFIG_PPC_BOOK3S_64 */
698 * Register dispatch trace log, if one has been allocated.
700 register_dtl_buffer(cpu
);
703 #ifdef CONFIG_PPC_BOOK3S_64
705 static long pSeries_lpar_hpte_insert(unsigned long hpte_group
,
706 unsigned long vpn
, unsigned long pa
,
707 unsigned long rflags
, unsigned long vflags
,
708 int psize
, int apsize
, int ssize
)
710 unsigned long lpar_rc
;
713 unsigned long hpte_v
, hpte_r
;
715 if (!(vflags
& HPTE_V_BOLTED
))
716 pr_devel("hpte_insert(group=%lx, vpn=%016lx, "
717 "pa=%016lx, rflags=%lx, vflags=%lx, psize=%d)\n",
718 hpte_group
, vpn
, pa
, rflags
, vflags
, psize
);
720 hpte_v
= hpte_encode_v(vpn
, psize
, apsize
, ssize
) | vflags
| HPTE_V_VALID
;
721 hpte_r
= hpte_encode_r(pa
, psize
, apsize
) | rflags
;
723 if (!(vflags
& HPTE_V_BOLTED
))
724 pr_devel(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v
, hpte_r
);
726 /* Now fill in the actual HPTE */
727 /* Set CEC cookie to 0 */
729 /* I-cache Invalidate = 0 */
730 /* I-cache synchronize = 0 */
734 if (firmware_has_feature(FW_FEATURE_XCMO
) && !(hpte_r
& HPTE_R_N
))
735 flags
|= H_COALESCE_CAND
;
737 lpar_rc
= plpar_pte_enter(flags
, hpte_group
, hpte_v
, hpte_r
, &slot
);
738 if (unlikely(lpar_rc
== H_PTEG_FULL
)) {
739 pr_devel("Hash table group is full\n");
744 * Since we try and ioremap PHBs we don't own, the pte insert
745 * will fail. However we must catch the failure in hash_page
746 * or we will loop forever, so return -2 in this case.
748 if (unlikely(lpar_rc
!= H_SUCCESS
)) {
749 pr_err("Failed hash pte insert with error %ld\n", lpar_rc
);
752 if (!(vflags
& HPTE_V_BOLTED
))
753 pr_devel(" -> slot: %lu\n", slot
& 7);
755 /* Because of iSeries, we have to pass down the secondary
756 * bucket bit here as well
758 return (slot
& 7) | (!!(vflags
& HPTE_V_SECONDARY
) << 3);
761 static DEFINE_SPINLOCK(pSeries_lpar_tlbie_lock
);
763 static long pSeries_lpar_hpte_remove(unsigned long hpte_group
)
765 unsigned long slot_offset
;
766 unsigned long lpar_rc
;
768 unsigned long dummy1
, dummy2
;
770 /* pick a random slot to start at */
771 slot_offset
= mftb() & 0x7;
773 for (i
= 0; i
< HPTES_PER_GROUP
; i
++) {
775 /* don't remove a bolted entry */
776 lpar_rc
= plpar_pte_remove(H_ANDCOND
, hpte_group
+ slot_offset
,
777 (0x1UL
<< 4), &dummy1
, &dummy2
);
778 if (lpar_rc
== H_SUCCESS
)
782 * The test for adjunct partition is performed before the
783 * ANDCOND test. H_RESOURCE may be returned, so we need to
784 * check for that as well.
786 BUG_ON(lpar_rc
!= H_NOT_FOUND
&& lpar_rc
!= H_RESOURCE
);
795 static void manual_hpte_clear_all(void)
797 unsigned long size_bytes
= 1UL << ppc64_pft_size
;
798 unsigned long hpte_count
= size_bytes
>> 4;
806 /* Read in batches of 4,
807 * invalidate only valid entries not in the VRMA
808 * hpte_count will be a multiple of 4
810 for (i
= 0; i
< hpte_count
; i
+= 4) {
811 lpar_rc
= plpar_pte_read_4_raw(0, i
, (void *)ptes
);
812 if (lpar_rc
!= H_SUCCESS
) {
813 pr_info("Failed to read hash page table at %ld err %ld\n",
817 for (j
= 0; j
< 4; j
++){
818 if ((ptes
[j
].pteh
& HPTE_V_VRMA_MASK
) ==
821 if (ptes
[j
].pteh
& HPTE_V_VALID
)
822 plpar_pte_remove_raw(0, i
+ j
, 0,
823 &(ptes
[j
].pteh
), &(ptes
[j
].ptel
));
828 static int hcall_hpte_clear_all(void)
833 rc
= plpar_hcall_norets(H_CLEAR_HPT
);
834 } while (rc
== H_CONTINUE
);
839 static void pseries_hpte_clear_all(void)
843 rc
= hcall_hpte_clear_all();
845 manual_hpte_clear_all();
847 #ifdef __LITTLE_ENDIAN__
849 * Reset exceptions to big endian.
851 * FIXME this is a hack for kexec, we need to reset the exception
852 * endian before starting the new kernel and this is a convenient place
855 * This is also called on boot when a fadump happens. In that case we
856 * must not change the exception endian mode.
858 if (firmware_has_feature(FW_FEATURE_SET_MODE
) && !is_fadump_active())
859 pseries_big_endian_exceptions();
864 * NOTE: for updatepp ops we are fortunate that the linux "newpp" bits and
865 * the low 3 bits of flags happen to line up. So no transform is needed.
866 * We can probably optimize here and assume the high bits of newpp are
867 * already zero. For now I am paranoid.
869 static long pSeries_lpar_hpte_updatepp(unsigned long slot
,
872 int psize
, int apsize
,
873 int ssize
, unsigned long inv_flags
)
875 unsigned long lpar_rc
;
877 unsigned long want_v
;
879 want_v
= hpte_encode_avpn(vpn
, psize
, ssize
);
881 flags
= (newpp
& 7) | H_AVPN
;
882 if (mmu_has_feature(MMU_FTR_KERNEL_RO
))
883 /* Move pp0 into bit 8 (IBM 55) */
884 flags
|= (newpp
& HPTE_R_PP0
) >> 55;
886 pr_devel(" update: avpnv=%016lx, hash=%016lx, f=%lx, psize: %d ...",
887 want_v
, slot
, flags
, psize
);
889 lpar_rc
= plpar_pte_protect(flags
, slot
, want_v
);
891 if (lpar_rc
== H_NOT_FOUND
) {
892 pr_devel("not found !\n");
898 BUG_ON(lpar_rc
!= H_SUCCESS
);
903 static long __pSeries_lpar_hpte_find(unsigned long want_v
, unsigned long hpte_group
)
912 for (i
= 0; i
< HPTES_PER_GROUP
; i
+= 4, hpte_group
+= 4) {
914 lpar_rc
= plpar_pte_read_4(0, hpte_group
, (void *)ptes
);
915 if (lpar_rc
!= H_SUCCESS
) {
916 pr_info("Failed to read hash page table at %ld err %ld\n",
917 hpte_group
, lpar_rc
);
921 for (j
= 0; j
< 4; j
++) {
922 if (HPTE_V_COMPARE(ptes
[j
].pteh
, want_v
) &&
923 (ptes
[j
].pteh
& HPTE_V_VALID
))
931 static long pSeries_lpar_hpte_find(unsigned long vpn
, int psize
, int ssize
)
935 unsigned long want_v
;
936 unsigned long hpte_group
;
938 hash
= hpt_hash(vpn
, mmu_psize_defs
[psize
].shift
, ssize
);
939 want_v
= hpte_encode_avpn(vpn
, psize
, ssize
);
941 /* Bolted entries are always in the primary group */
942 hpte_group
= (hash
& htab_hash_mask
) * HPTES_PER_GROUP
;
943 slot
= __pSeries_lpar_hpte_find(want_v
, hpte_group
);
946 return hpte_group
+ slot
;
949 static void pSeries_lpar_hpte_updateboltedpp(unsigned long newpp
,
951 int psize
, int ssize
)
954 unsigned long lpar_rc
, slot
, vsid
, flags
;
956 vsid
= get_kernel_vsid(ea
, ssize
);
957 vpn
= hpt_vpn(ea
, vsid
, ssize
);
959 slot
= pSeries_lpar_hpte_find(vpn
, psize
, ssize
);
963 if (mmu_has_feature(MMU_FTR_KERNEL_RO
))
964 /* Move pp0 into bit 8 (IBM 55) */
965 flags
|= (newpp
& HPTE_R_PP0
) >> 55;
967 lpar_rc
= plpar_pte_protect(flags
, slot
, 0);
969 BUG_ON(lpar_rc
!= H_SUCCESS
);
972 static void pSeries_lpar_hpte_invalidate(unsigned long slot
, unsigned long vpn
,
973 int psize
, int apsize
,
974 int ssize
, int local
)
976 unsigned long want_v
;
977 unsigned long lpar_rc
;
978 unsigned long dummy1
, dummy2
;
980 pr_devel(" inval : slot=%lx, vpn=%016lx, psize: %d, local: %d\n",
981 slot
, vpn
, psize
, local
);
983 want_v
= hpte_encode_avpn(vpn
, psize
, ssize
);
984 lpar_rc
= plpar_pte_remove(H_AVPN
, slot
, want_v
, &dummy1
, &dummy2
);
985 if (lpar_rc
== H_NOT_FOUND
)
988 BUG_ON(lpar_rc
!= H_SUCCESS
);
993 * As defined in the PAPR's section 14.5.4.1.8
994 * The control mask doesn't include the returned reference and change bit from
997 #define HBLKR_AVPN 0x0100000000000000UL
998 #define HBLKR_CTRL_MASK 0xf800000000000000UL
999 #define HBLKR_CTRL_SUCCESS 0x8000000000000000UL
1000 #define HBLKR_CTRL_ERRNOTFOUND 0x8800000000000000UL
1001 #define HBLKR_CTRL_ERRBUSY 0xa000000000000000UL
1004 * Returned true if we are supporting this block size for the specified segment
1005 * base page size and actual page size.
1007 * Currently, we only support 8 size block.
1009 static inline bool is_supported_hlbkrm(int bpsize
, int psize
)
1011 return (hblkrm_size
[bpsize
][psize
] == HBLKRM_SUPPORTED_BLOCK_SIZE
);
1015 * H_BLOCK_REMOVE caller.
1016 * @idx should point to the latest @param entry set with a PTEX.
1017 * If PTE cannot be processed because another CPUs has already locked that
1018 * group, those entries are put back in @param starting at index 1.
1019 * If entries has to be retried and @retry_busy is set to true, these entries
1020 * are retried until success. If @retry_busy is set to false, the returned
1021 * is the number of entries yet to process.
1023 static unsigned long call_block_remove(unsigned long idx
, unsigned long *param
,
1026 unsigned long i
, rc
, new_idx
;
1027 unsigned long retbuf
[PLPAR_HCALL9_BUFSIZE
];
1030 pr_warn("Unexpected empty call to H_BLOCK_REMOVE");
1035 if (idx
> PLPAR_HCALL9_BUFSIZE
) {
1036 pr_err("Too many PTEs (%lu) for H_BLOCK_REMOVE", idx
);
1037 idx
= PLPAR_HCALL9_BUFSIZE
;
1038 } else if (idx
< PLPAR_HCALL9_BUFSIZE
)
1039 param
[idx
] = HBR_END
;
1041 rc
= plpar_hcall9(H_BLOCK_REMOVE
, retbuf
,
1043 param
[1], param
[2], param
[3], param
[4], /* TS0-7 */
1044 param
[5], param
[6], param
[7], param
[8]);
1045 if (rc
== H_SUCCESS
)
1048 BUG_ON(rc
!= H_PARTIAL
);
1050 /* Check that the unprocessed entries were 'not found' or 'busy' */
1051 for (i
= 0; i
< idx
-1; i
++) {
1052 unsigned long ctrl
= retbuf
[i
] & HBLKR_CTRL_MASK
;
1054 if (ctrl
== HBLKR_CTRL_ERRBUSY
) {
1055 param
[++new_idx
] = param
[i
+1];
1059 BUG_ON(ctrl
!= HBLKR_CTRL_SUCCESS
1060 && ctrl
!= HBLKR_CTRL_ERRNOTFOUND
);
1064 * If there were entries found busy, retry these entries if requested,
1065 * of if all the entries have to be retried.
1067 if (new_idx
&& (retry_busy
|| new_idx
== (PLPAR_HCALL9_BUFSIZE
-1))) {
1075 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1077 * Limit iterations holding pSeries_lpar_tlbie_lock to 3. We also need
1078 * to make sure that we avoid bouncing the hypervisor tlbie lock.
1080 #define PPC64_HUGE_HPTE_BATCH 12
1082 static void hugepage_block_invalidate(unsigned long *slot
, unsigned long *vpn
,
1083 int count
, int psize
, int ssize
)
1085 unsigned long param
[PLPAR_HCALL9_BUFSIZE
];
1086 unsigned long shift
, current_vpgb
, vpgb
;
1089 shift
= mmu_psize_defs
[psize
].shift
;
1091 for (i
= 0; i
< count
; i
++) {
1093 * Shifting 3 bits more on the right to get a
1094 * 8 pages aligned virtual addresse.
1096 vpgb
= (vpn
[i
] >> (shift
- VPN_SHIFT
+ 3));
1097 if (!pix
|| vpgb
!= current_vpgb
) {
1099 * Need to start a new 8 pages block, flush
1100 * the current one if needed.
1103 (void)call_block_remove(pix
, param
, true);
1104 current_vpgb
= vpgb
;
1105 param
[0] = hpte_encode_avpn(vpn
[i
], psize
, ssize
);
1109 param
[pix
++] = HBR_REQUEST
| HBLKR_AVPN
| slot
[i
];
1110 if (pix
== PLPAR_HCALL9_BUFSIZE
) {
1111 pix
= call_block_remove(pix
, param
, false);
1113 * pix = 0 means that all the entries were
1114 * removed, we can start a new block.
1115 * Otherwise, this means that there are entries
1116 * to retry, and pix points to latest one, so
1117 * we should increment it and try to continue
1125 (void)call_block_remove(pix
, param
, true);
1128 static void hugepage_bulk_invalidate(unsigned long *slot
, unsigned long *vpn
,
1129 int count
, int psize
, int ssize
)
1131 unsigned long param
[PLPAR_HCALL9_BUFSIZE
];
1132 int i
= 0, pix
= 0, rc
;
1134 for (i
= 0; i
< count
; i
++) {
1136 if (!firmware_has_feature(FW_FEATURE_BULK_REMOVE
)) {
1137 pSeries_lpar_hpte_invalidate(slot
[i
], vpn
[i
], psize
, 0,
1140 param
[pix
] = HBR_REQUEST
| HBR_AVPN
| slot
[i
];
1141 param
[pix
+1] = hpte_encode_avpn(vpn
[i
], psize
, ssize
);
1144 rc
= plpar_hcall9(H_BULK_REMOVE
, param
,
1145 param
[0], param
[1], param
[2],
1146 param
[3], param
[4], param
[5],
1147 param
[6], param
[7]);
1148 BUG_ON(rc
!= H_SUCCESS
);
1154 param
[pix
] = HBR_END
;
1155 rc
= plpar_hcall9(H_BULK_REMOVE
, param
, param
[0], param
[1],
1156 param
[2], param
[3], param
[4], param
[5],
1157 param
[6], param
[7]);
1158 BUG_ON(rc
!= H_SUCCESS
);
1162 static inline void __pSeries_lpar_hugepage_invalidate(unsigned long *slot
,
1164 int count
, int psize
,
1167 unsigned long flags
= 0;
1168 int lock_tlbie
= !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE
);
1171 spin_lock_irqsave(&pSeries_lpar_tlbie_lock
, flags
);
1173 /* Assuming THP size is 16M */
1174 if (is_supported_hlbkrm(psize
, MMU_PAGE_16M
))
1175 hugepage_block_invalidate(slot
, vpn
, count
, psize
, ssize
);
1177 hugepage_bulk_invalidate(slot
, vpn
, count
, psize
, ssize
);
1180 spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock
, flags
);
1183 static void pSeries_lpar_hugepage_invalidate(unsigned long vsid
,
1185 unsigned char *hpte_slot_array
,
1186 int psize
, int ssize
, int local
)
1189 unsigned long s_addr
= addr
;
1190 unsigned int max_hpte_count
, valid
;
1191 unsigned long vpn_array
[PPC64_HUGE_HPTE_BATCH
];
1192 unsigned long slot_array
[PPC64_HUGE_HPTE_BATCH
];
1193 unsigned long shift
, hidx
, vpn
= 0, hash
, slot
;
1195 shift
= mmu_psize_defs
[psize
].shift
;
1196 max_hpte_count
= 1U << (PMD_SHIFT
- shift
);
1198 for (i
= 0; i
< max_hpte_count
; i
++) {
1199 valid
= hpte_valid(hpte_slot_array
, i
);
1202 hidx
= hpte_hash_index(hpte_slot_array
, i
);
1205 addr
= s_addr
+ (i
* (1ul << shift
));
1206 vpn
= hpt_vpn(addr
, vsid
, ssize
);
1207 hash
= hpt_hash(vpn
, shift
, ssize
);
1208 if (hidx
& _PTEIDX_SECONDARY
)
1211 slot
= (hash
& htab_hash_mask
) * HPTES_PER_GROUP
;
1212 slot
+= hidx
& _PTEIDX_GROUP_IX
;
1214 slot_array
[index
] = slot
;
1215 vpn_array
[index
] = vpn
;
1216 if (index
== PPC64_HUGE_HPTE_BATCH
- 1) {
1218 * Now do a bluk invalidate
1220 __pSeries_lpar_hugepage_invalidate(slot_array
,
1222 PPC64_HUGE_HPTE_BATCH
,
1229 __pSeries_lpar_hugepage_invalidate(slot_array
, vpn_array
,
1230 index
, psize
, ssize
);
1233 static void pSeries_lpar_hugepage_invalidate(unsigned long vsid
,
1235 unsigned char *hpte_slot_array
,
1236 int psize
, int ssize
, int local
)
1238 WARN(1, "%s called without THP support\n", __func__
);
1242 static int pSeries_lpar_hpte_removebolted(unsigned long ea
,
1243 int psize
, int ssize
)
1246 unsigned long slot
, vsid
;
1248 vsid
= get_kernel_vsid(ea
, ssize
);
1249 vpn
= hpt_vpn(ea
, vsid
, ssize
);
1251 slot
= pSeries_lpar_hpte_find(vpn
, psize
, ssize
);
1256 * lpar doesn't use the passed actual page size
1258 pSeries_lpar_hpte_invalidate(slot
, vpn
, psize
, 0, ssize
, 0);
1263 static inline unsigned long compute_slot(real_pte_t pte
,
1265 unsigned long index
,
1266 unsigned long shift
,
1269 unsigned long slot
, hash
, hidx
;
1271 hash
= hpt_hash(vpn
, shift
, ssize
);
1272 hidx
= __rpte_to_hidx(pte
, index
);
1273 if (hidx
& _PTEIDX_SECONDARY
)
1275 slot
= (hash
& htab_hash_mask
) * HPTES_PER_GROUP
;
1276 slot
+= hidx
& _PTEIDX_GROUP_IX
;
1281 * The hcall H_BLOCK_REMOVE implies that the virtual pages to processed are
1282 * "all within the same naturally aligned 8 page virtual address block".
1284 static void do_block_remove(unsigned long number
, struct ppc64_tlb_batch
*batch
,
1285 unsigned long *param
)
1288 unsigned long i
, pix
= 0;
1289 unsigned long index
, shift
, slot
, current_vpgb
, vpgb
;
1293 psize
= batch
->psize
;
1294 ssize
= batch
->ssize
;
1296 for (i
= 0; i
< number
; i
++) {
1297 vpn
= batch
->vpn
[i
];
1298 pte
= batch
->pte
[i
];
1299 pte_iterate_hashed_subpages(pte
, psize
, vpn
, index
, shift
) {
1301 * Shifting 3 bits more on the right to get a
1302 * 8 pages aligned virtual addresse.
1304 vpgb
= (vpn
>> (shift
- VPN_SHIFT
+ 3));
1305 if (!pix
|| vpgb
!= current_vpgb
) {
1307 * Need to start a new 8 pages block, flush
1308 * the current one if needed.
1311 (void)call_block_remove(pix
, param
,
1313 current_vpgb
= vpgb
;
1314 param
[0] = hpte_encode_avpn(vpn
, psize
,
1319 slot
= compute_slot(pte
, vpn
, index
, shift
, ssize
);
1320 param
[pix
++] = HBR_REQUEST
| HBLKR_AVPN
| slot
;
1322 if (pix
== PLPAR_HCALL9_BUFSIZE
) {
1323 pix
= call_block_remove(pix
, param
, false);
1325 * pix = 0 means that all the entries were
1326 * removed, we can start a new block.
1327 * Otherwise, this means that there are entries
1328 * to retry, and pix points to latest one, so
1329 * we should increment it and try to continue
1335 } pte_iterate_hashed_end();
1339 (void)call_block_remove(pix
, param
, true);
1343 * TLB Block Invalidate Characteristics
1345 * These characteristics define the size of the block the hcall H_BLOCK_REMOVE
1346 * is able to process for each couple segment base page size, actual page size.
1348 * The ibm,get-system-parameter properties is returning a buffer with the
1351 * [ 2 bytes size of the RTAS buffer (excluding these 2 bytes) ]
1353 * TLB Block Invalidate Specifiers:
1354 * [ 1 byte LOG base 2 of the TLB invalidate block size being specified ]
1355 * [ 1 byte Number of page sizes (N) that are supported for the specified
1356 * TLB invalidate block size ]
1357 * [ 1 byte Encoded segment base page size and actual page size
1358 * MSB=0 means 4k segment base page size and actual page size
1359 * MSB=1 the penc value in mmu_psize_def ]
1362 * Next TLB Block Invalidate Specifiers...
1366 static inline void set_hblkrm_bloc_size(int bpsize
, int psize
,
1367 unsigned int block_size
)
1369 if (block_size
> hblkrm_size
[bpsize
][psize
])
1370 hblkrm_size
[bpsize
][psize
] = block_size
;
1374 * Decode the Encoded segment base page size and actual page size.
1376 * - bit 7 is the L bit
1377 * - bits 0-5 are the penc value
1378 * If the L bit is 0, this means 4K segment base page size and actual page size
1379 * otherwise the penc value should be read.
1381 #define HBLKRM_L_MASK 0x80
1382 #define HBLKRM_PENC_MASK 0x3f
1383 static inline void __init
check_lp_set_hblkrm(unsigned int lp
,
1384 unsigned int block_size
)
1386 unsigned int bpsize
, psize
;
1388 /* First, check the L bit, if not set, this means 4K */
1389 if ((lp
& HBLKRM_L_MASK
) == 0) {
1390 set_hblkrm_bloc_size(MMU_PAGE_4K
, MMU_PAGE_4K
, block_size
);
1394 lp
&= HBLKRM_PENC_MASK
;
1395 for (bpsize
= 0; bpsize
< MMU_PAGE_COUNT
; bpsize
++) {
1396 struct mmu_psize_def
*def
= &mmu_psize_defs
[bpsize
];
1398 for (psize
= 0; psize
< MMU_PAGE_COUNT
; psize
++) {
1399 if (def
->penc
[psize
] == lp
) {
1400 set_hblkrm_bloc_size(bpsize
, psize
, block_size
);
1407 #define SPLPAR_TLB_BIC_TOKEN 50
1410 * The size of the TLB Block Invalidate Characteristics is variable. But at the
1411 * maximum it will be the number of possible page sizes *2 + 10 bytes.
1412 * Currently MMU_PAGE_COUNT is 16, which means 42 bytes. Use a cache line size
1413 * (128 bytes) for the buffer to get plenty of space.
1415 #define SPLPAR_TLB_BIC_MAXLENGTH 128
1417 void __init
pseries_lpar_read_hblkrm_characteristics(void)
1419 unsigned char local_buffer
[SPLPAR_TLB_BIC_MAXLENGTH
];
1420 int call_status
, len
, idx
, bpsize
;
1422 spin_lock(&rtas_data_buf_lock
);
1423 memset(rtas_data_buf
, 0, RTAS_DATA_BUF_SIZE
);
1424 call_status
= rtas_call(rtas_token("ibm,get-system-parameter"), 3, 1,
1426 SPLPAR_TLB_BIC_TOKEN
,
1427 __pa(rtas_data_buf
),
1428 RTAS_DATA_BUF_SIZE
);
1429 memcpy(local_buffer
, rtas_data_buf
, SPLPAR_TLB_BIC_MAXLENGTH
);
1430 local_buffer
[SPLPAR_TLB_BIC_MAXLENGTH
- 1] = '\0';
1431 spin_unlock(&rtas_data_buf_lock
);
1433 if (call_status
!= 0) {
1434 pr_warn("%s %s Error calling get-system-parameter (0x%x)\n",
1435 __FILE__
, __func__
, call_status
);
1440 * The first two (2) bytes of the data in the buffer are the length of
1441 * the returned data, not counting these first two (2) bytes.
1443 len
= be16_to_cpu(*((u16
*)local_buffer
)) + 2;
1444 if (len
> SPLPAR_TLB_BIC_MAXLENGTH
) {
1445 pr_warn("%s too large returned buffer %d", __func__
, len
);
1451 u8 block_shift
= local_buffer
[idx
++];
1453 unsigned int npsize
;
1458 block_size
= 1 << block_shift
;
1460 for (npsize
= local_buffer
[idx
++];
1461 npsize
> 0 && idx
< len
; npsize
--)
1462 check_lp_set_hblkrm((unsigned int) local_buffer
[idx
++],
1466 for (bpsize
= 0; bpsize
< MMU_PAGE_COUNT
; bpsize
++)
1467 for (idx
= 0; idx
< MMU_PAGE_COUNT
; idx
++)
1468 if (hblkrm_size
[bpsize
][idx
])
1469 pr_info("H_BLOCK_REMOVE supports base psize:%d psize:%d block size:%d",
1470 bpsize
, idx
, hblkrm_size
[bpsize
][idx
]);
1474 * Take a spinlock around flushes to avoid bouncing the hypervisor tlbie
1477 static void pSeries_lpar_flush_hash_range(unsigned long number
, int local
)
1480 unsigned long i
, pix
, rc
;
1481 unsigned long flags
= 0;
1482 struct ppc64_tlb_batch
*batch
= this_cpu_ptr(&ppc64_tlb_batch
);
1483 int lock_tlbie
= !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE
);
1484 unsigned long param
[PLPAR_HCALL9_BUFSIZE
];
1485 unsigned long index
, shift
, slot
;
1490 spin_lock_irqsave(&pSeries_lpar_tlbie_lock
, flags
);
1492 if (is_supported_hlbkrm(batch
->psize
, batch
->psize
)) {
1493 do_block_remove(number
, batch
, param
);
1497 psize
= batch
->psize
;
1498 ssize
= batch
->ssize
;
1500 for (i
= 0; i
< number
; i
++) {
1501 vpn
= batch
->vpn
[i
];
1502 pte
= batch
->pte
[i
];
1503 pte_iterate_hashed_subpages(pte
, psize
, vpn
, index
, shift
) {
1504 slot
= compute_slot(pte
, vpn
, index
, shift
, ssize
);
1505 if (!firmware_has_feature(FW_FEATURE_BULK_REMOVE
)) {
1507 * lpar doesn't use the passed actual page size
1509 pSeries_lpar_hpte_invalidate(slot
, vpn
, psize
,
1512 param
[pix
] = HBR_REQUEST
| HBR_AVPN
| slot
;
1513 param
[pix
+1] = hpte_encode_avpn(vpn
, psize
,
1517 rc
= plpar_hcall9(H_BULK_REMOVE
, param
,
1518 param
[0], param
[1], param
[2],
1519 param
[3], param
[4], param
[5],
1520 param
[6], param
[7]);
1521 BUG_ON(rc
!= H_SUCCESS
);
1525 } pte_iterate_hashed_end();
1528 param
[pix
] = HBR_END
;
1529 rc
= plpar_hcall9(H_BULK_REMOVE
, param
, param
[0], param
[1],
1530 param
[2], param
[3], param
[4], param
[5],
1531 param
[6], param
[7]);
1532 BUG_ON(rc
!= H_SUCCESS
);
1537 spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock
, flags
);
1540 static int __init
disable_bulk_remove(char *str
)
1542 if (strcmp(str
, "off") == 0 &&
1543 firmware_has_feature(FW_FEATURE_BULK_REMOVE
)) {
1544 pr_info("Disabling BULK_REMOVE firmware feature");
1545 powerpc_firmware_features
&= ~FW_FEATURE_BULK_REMOVE
;
1550 __setup("bulk_remove=", disable_bulk_remove
);
1552 #define HPT_RESIZE_TIMEOUT 10000 /* ms */
1554 struct hpt_resize_state
{
1555 unsigned long shift
;
1559 static int pseries_lpar_resize_hpt_commit(void *data
)
1561 struct hpt_resize_state
*state
= data
;
1563 state
->commit_rc
= plpar_resize_hpt_commit(0, state
->shift
);
1564 if (state
->commit_rc
!= H_SUCCESS
)
1567 /* Hypervisor has transitioned the HTAB, update our globals */
1568 ppc64_pft_size
= state
->shift
;
1569 htab_size_bytes
= 1UL << ppc64_pft_size
;
1570 htab_hash_mask
= (htab_size_bytes
>> 7) - 1;
1576 * Must be called in process context. The caller must hold the
1579 static int pseries_lpar_resize_hpt(unsigned long shift
)
1581 struct hpt_resize_state state
= {
1583 .commit_rc
= H_FUNCTION
,
1585 unsigned int delay
, total_delay
= 0;
1591 if (!firmware_has_feature(FW_FEATURE_HPT_RESIZE
))
1594 pr_info("Attempting to resize HPT to shift %lu\n", shift
);
1598 rc
= plpar_resize_hpt_prepare(0, shift
);
1599 while (H_IS_LONG_BUSY(rc
)) {
1600 delay
= get_longbusy_msecs(rc
);
1601 total_delay
+= delay
;
1602 if (total_delay
> HPT_RESIZE_TIMEOUT
) {
1603 /* prepare with shift==0 cancels an in-progress resize */
1604 rc
= plpar_resize_hpt_prepare(0, 0);
1605 if (rc
!= H_SUCCESS
)
1606 pr_warn("Unexpected error %d cancelling timed out HPT resize\n",
1611 rc
= plpar_resize_hpt_prepare(0, shift
);
1620 pr_warn("Invalid argument from H_RESIZE_HPT_PREPARE\n");
1623 pr_warn("Operation not permitted from H_RESIZE_HPT_PREPARE\n");
1626 pr_warn("Unexpected error %d from H_RESIZE_HPT_PREPARE\n", rc
);
1632 rc
= stop_machine_cpuslocked(pseries_lpar_resize_hpt_commit
,
1638 switch (state
.commit_rc
) {
1643 pr_warn("Unexpected error %d from H_RESIZE_HPT_COMMIT\n",
1649 pr_info("HPT resize to shift %lu complete (%lld ms / %lld ms)\n",
1650 shift
, (long long) ktime_ms_delta(t1
, t0
),
1651 (long long) ktime_ms_delta(t2
, t1
));
1656 static int pseries_lpar_register_process_table(unsigned long base
,
1657 unsigned long page_size
, unsigned long table_size
)
1660 unsigned long flags
= 0;
1663 flags
|= PROC_TABLE_NEW
;
1664 if (radix_enabled())
1665 flags
|= PROC_TABLE_RADIX
| PROC_TABLE_GTSE
;
1667 flags
|= PROC_TABLE_HPT_SLB
;
1669 rc
= plpar_hcall_norets(H_REGISTER_PROC_TBL
, flags
, base
,
1670 page_size
, table_size
);
1671 if (!H_IS_LONG_BUSY(rc
))
1673 mdelay(get_longbusy_msecs(rc
));
1675 if (rc
!= H_SUCCESS
) {
1676 pr_err("Failed to register process table (rc=%ld)\n", rc
);
1682 void __init
hpte_init_pseries(void)
1684 mmu_hash_ops
.hpte_invalidate
= pSeries_lpar_hpte_invalidate
;
1685 mmu_hash_ops
.hpte_updatepp
= pSeries_lpar_hpte_updatepp
;
1686 mmu_hash_ops
.hpte_updateboltedpp
= pSeries_lpar_hpte_updateboltedpp
;
1687 mmu_hash_ops
.hpte_insert
= pSeries_lpar_hpte_insert
;
1688 mmu_hash_ops
.hpte_remove
= pSeries_lpar_hpte_remove
;
1689 mmu_hash_ops
.hpte_removebolted
= pSeries_lpar_hpte_removebolted
;
1690 mmu_hash_ops
.flush_hash_range
= pSeries_lpar_flush_hash_range
;
1691 mmu_hash_ops
.hpte_clear_all
= pseries_hpte_clear_all
;
1692 mmu_hash_ops
.hugepage_invalidate
= pSeries_lpar_hugepage_invalidate
;
1694 if (firmware_has_feature(FW_FEATURE_HPT_RESIZE
))
1695 mmu_hash_ops
.resize_hpt
= pseries_lpar_resize_hpt
;
1698 * On POWER9, we need to do a H_REGISTER_PROC_TBL hcall
1699 * to inform the hypervisor that we wish to use the HPT.
1701 if (cpu_has_feature(CPU_FTR_ARCH_300
))
1702 pseries_lpar_register_process_table(0, 0, 0);
1705 void radix_init_pseries(void)
1707 pr_info("Using radix MMU under hypervisor\n");
1709 pseries_lpar_register_process_table(__pa(process_tb
),
1710 0, PRTB_SIZE_SHIFT
- 12);
1713 #ifdef CONFIG_PPC_SMLPAR
1714 #define CMO_FREE_HINT_DEFAULT 1
1715 static int cmo_free_hint_flag
= CMO_FREE_HINT_DEFAULT
;
1717 static int __init
cmo_free_hint(char *str
)
1720 parm
= strstrip(str
);
1722 if (strcasecmp(parm
, "no") == 0 || strcasecmp(parm
, "off") == 0) {
1723 pr_info("%s: CMO free page hinting is not active.\n", __func__
);
1724 cmo_free_hint_flag
= 0;
1728 cmo_free_hint_flag
= 1;
1729 pr_info("%s: CMO free page hinting is active.\n", __func__
);
1731 if (strcasecmp(parm
, "yes") == 0 || strcasecmp(parm
, "on") == 0)
1737 __setup("cmo_free_hint=", cmo_free_hint
);
1739 static void pSeries_set_page_state(struct page
*page
, int order
,
1740 unsigned long state
)
1743 unsigned long cmo_page_sz
, addr
;
1745 cmo_page_sz
= cmo_get_page_size();
1746 addr
= __pa((unsigned long)page_address(page
));
1748 for (i
= 0; i
< (1 << order
); i
++, addr
+= PAGE_SIZE
) {
1749 for (j
= 0; j
< PAGE_SIZE
; j
+= cmo_page_sz
)
1750 plpar_hcall_norets(H_PAGE_INIT
, state
, addr
+ j
, 0);
1754 void arch_free_page(struct page
*page
, int order
)
1756 if (radix_enabled())
1758 if (!cmo_free_hint_flag
|| !firmware_has_feature(FW_FEATURE_CMO
))
1761 pSeries_set_page_state(page
, order
, H_PAGE_SET_UNUSED
);
1763 EXPORT_SYMBOL(arch_free_page
);
1765 #endif /* CONFIG_PPC_SMLPAR */
1766 #endif /* CONFIG_PPC_BOOK3S_64 */
1768 #ifdef CONFIG_TRACEPOINTS
1769 #ifdef CONFIG_JUMP_LABEL
1770 struct static_key hcall_tracepoint_key
= STATIC_KEY_INIT
;
1772 int hcall_tracepoint_regfunc(void)
1774 static_key_slow_inc(&hcall_tracepoint_key
);
1778 void hcall_tracepoint_unregfunc(void)
1780 static_key_slow_dec(&hcall_tracepoint_key
);
1784 * We optimise our hcall path by placing hcall_tracepoint_refcount
1785 * directly in the TOC so we can check if the hcall tracepoints are
1786 * enabled via a single load.
1789 /* NB: reg/unreg are called while guarded with the tracepoints_mutex */
1790 extern long hcall_tracepoint_refcount
;
1792 int hcall_tracepoint_regfunc(void)
1794 hcall_tracepoint_refcount
++;
1798 void hcall_tracepoint_unregfunc(void)
1800 hcall_tracepoint_refcount
--;
1805 * Since the tracing code might execute hcalls we need to guard against
1806 * recursion. One example of this are spinlocks calling H_YIELD on
1807 * shared processor partitions.
1809 static DEFINE_PER_CPU(unsigned int, hcall_trace_depth
);
1812 void __trace_hcall_entry(unsigned long opcode
, unsigned long *args
)
1814 unsigned long flags
;
1815 unsigned int *depth
;
1818 * We cannot call tracepoints inside RCU idle regions which
1819 * means we must not trace H_CEDE.
1821 if (opcode
== H_CEDE
)
1824 local_irq_save(flags
);
1826 depth
= this_cpu_ptr(&hcall_trace_depth
);
1833 trace_hcall_entry(opcode
, args
);
1837 local_irq_restore(flags
);
1840 void __trace_hcall_exit(long opcode
, long retval
, unsigned long *retbuf
)
1842 unsigned long flags
;
1843 unsigned int *depth
;
1845 if (opcode
== H_CEDE
)
1848 local_irq_save(flags
);
1850 depth
= this_cpu_ptr(&hcall_trace_depth
);
1856 trace_hcall_exit(opcode
, retval
, retbuf
);
1861 local_irq_restore(flags
);
1867 * H_GET_MPP hcall returns info in 7 parms
1869 int h_get_mpp(struct hvcall_mpp_data
*mpp_data
)
1872 unsigned long retbuf
[PLPAR_HCALL9_BUFSIZE
];
1874 rc
= plpar_hcall9(H_GET_MPP
, retbuf
);
1876 mpp_data
->entitled_mem
= retbuf
[0];
1877 mpp_data
->mapped_mem
= retbuf
[1];
1879 mpp_data
->group_num
= (retbuf
[2] >> 2 * 8) & 0xffff;
1880 mpp_data
->pool_num
= retbuf
[2] & 0xffff;
1882 mpp_data
->mem_weight
= (retbuf
[3] >> 7 * 8) & 0xff;
1883 mpp_data
->unallocated_mem_weight
= (retbuf
[3] >> 6 * 8) & 0xff;
1884 mpp_data
->unallocated_entitlement
= retbuf
[3] & 0xffffffffffffUL
;
1886 mpp_data
->pool_size
= retbuf
[4];
1887 mpp_data
->loan_request
= retbuf
[5];
1888 mpp_data
->backing_mem
= retbuf
[6];
1892 EXPORT_SYMBOL(h_get_mpp
);
1894 int h_get_mpp_x(struct hvcall_mpp_x_data
*mpp_x_data
)
1897 unsigned long retbuf
[PLPAR_HCALL9_BUFSIZE
] = { 0 };
1899 rc
= plpar_hcall9(H_GET_MPP_X
, retbuf
);
1901 mpp_x_data
->coalesced_bytes
= retbuf
[0];
1902 mpp_x_data
->pool_coalesced_bytes
= retbuf
[1];
1903 mpp_x_data
->pool_purr_cycles
= retbuf
[2];
1904 mpp_x_data
->pool_spurr_cycles
= retbuf
[3];
1909 static unsigned long vsid_unscramble(unsigned long vsid
, int ssize
)
1911 unsigned long protovsid
;
1912 unsigned long va_bits
= VA_BITS
;
1913 unsigned long modinv
, vsid_modulus
;
1914 unsigned long max_mod_inv
, tmp_modinv
;
1916 if (!mmu_has_feature(MMU_FTR_68_BIT_VA
))
1919 if (ssize
== MMU_SEGSIZE_256M
) {
1920 modinv
= VSID_MULINV_256M
;
1921 vsid_modulus
= ((1UL << (va_bits
- SID_SHIFT
)) - 1);
1923 modinv
= VSID_MULINV_1T
;
1924 vsid_modulus
= ((1UL << (va_bits
- SID_SHIFT_1T
)) - 1);
1928 * vsid outside our range.
1930 if (vsid
>= vsid_modulus
)
1934 * If modinv is the modular multiplicate inverse of (x % vsid_modulus)
1935 * and vsid = (protovsid * x) % vsid_modulus, then we say:
1936 * protovsid = (vsid * modinv) % vsid_modulus
1939 /* Check if (vsid * modinv) overflow (63 bits) */
1940 max_mod_inv
= 0x7fffffffffffffffull
/ vsid
;
1941 if (modinv
< max_mod_inv
)
1942 return (vsid
* modinv
) % vsid_modulus
;
1944 tmp_modinv
= modinv
/max_mod_inv
;
1945 modinv
%= max_mod_inv
;
1947 protovsid
= (((vsid
* max_mod_inv
) % vsid_modulus
) * tmp_modinv
) % vsid_modulus
;
1948 protovsid
= (protovsid
+ vsid
* modinv
) % vsid_modulus
;
1953 static int __init
reserve_vrma_context_id(void)
1955 unsigned long protovsid
;
1958 * Reserve context ids which map to reserved virtual addresses. For now
1959 * we only reserve the context id which maps to the VRMA VSID. We ignore
1960 * the addresses in "ibm,adjunct-virtual-addresses" because we don't
1961 * enable adjunct support via the "ibm,client-architecture-support"
1964 protovsid
= vsid_unscramble(VRMA_VSID
, MMU_SEGSIZE_1T
);
1965 hash__reserve_context_id(protovsid
>> ESID_BITS_1T
);
1968 machine_device_initcall(pseries
, reserve_vrma_context_id
);
1970 #ifdef CONFIG_DEBUG_FS
1971 /* debugfs file interface for vpa data */
1972 static ssize_t
vpa_file_read(struct file
*filp
, char __user
*buf
, size_t len
,
1975 int cpu
= (long)filp
->private_data
;
1976 struct lppaca
*lppaca
= &lppaca_of(cpu
);
1978 return simple_read_from_buffer(buf
, len
, pos
, lppaca
,
1979 sizeof(struct lppaca
));
1982 static const struct file_operations vpa_fops
= {
1983 .open
= simple_open
,
1984 .read
= vpa_file_read
,
1985 .llseek
= default_llseek
,
1988 static int __init
vpa_debugfs_init(void)
1992 static struct dentry
*vpa_dir
;
1994 if (!firmware_has_feature(FW_FEATURE_SPLPAR
))
1997 vpa_dir
= debugfs_create_dir("vpa", powerpc_debugfs_root
);
1999 pr_warn("%s: can't create vpa root dir\n", __func__
);
2003 /* set up the per-cpu vpa file*/
2004 for_each_possible_cpu(i
) {
2007 sprintf(name
, "cpu-%ld", i
);
2009 d
= debugfs_create_file(name
, 0400, vpa_dir
, (void *)i
,
2012 pr_warn("%s: can't create per-cpu vpa file\n",
2020 machine_arch_initcall(pseries
, vpa_debugfs_init
);
2021 #endif /* CONFIG_DEBUG_FS */