1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __ASM_POWERPC_MMU_CONTEXT_H
3 #define __ASM_POWERPC_MMU_CONTEXT_H
6 #include <linux/kernel.h>
8 #include <linux/sched.h>
9 #include <linux/spinlock.h>
11 #include <asm/cputable.h>
12 #include <asm/cputhreads.h>
15 * Most if the context management is out of line
17 extern int init_new_context(struct task_struct
*tsk
, struct mm_struct
*mm
);
18 extern void destroy_context(struct mm_struct
*mm
);
19 #ifdef CONFIG_SPAPR_TCE_IOMMU
20 struct mm_iommu_table_group_mem_t
;
22 extern int isolate_lru_page(struct page
*page
); /* from internal.h */
23 extern bool mm_iommu_preregistered(struct mm_struct
*mm
);
24 extern long mm_iommu_new(struct mm_struct
*mm
,
25 unsigned long ua
, unsigned long entries
,
26 struct mm_iommu_table_group_mem_t
**pmem
);
27 extern long mm_iommu_newdev(struct mm_struct
*mm
, unsigned long ua
,
28 unsigned long entries
, unsigned long dev_hpa
,
29 struct mm_iommu_table_group_mem_t
**pmem
);
30 extern long mm_iommu_put(struct mm_struct
*mm
,
31 struct mm_iommu_table_group_mem_t
*mem
);
32 extern void mm_iommu_init(struct mm_struct
*mm
);
33 extern void mm_iommu_cleanup(struct mm_struct
*mm
);
34 extern struct mm_iommu_table_group_mem_t
*mm_iommu_lookup(struct mm_struct
*mm
,
35 unsigned long ua
, unsigned long size
);
36 extern struct mm_iommu_table_group_mem_t
*mm_iommu_lookup_rm(
37 struct mm_struct
*mm
, unsigned long ua
, unsigned long size
);
38 extern struct mm_iommu_table_group_mem_t
*mm_iommu_get(struct mm_struct
*mm
,
39 unsigned long ua
, unsigned long entries
);
40 extern long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t
*mem
,
41 unsigned long ua
, unsigned int pageshift
, unsigned long *hpa
);
42 extern long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t
*mem
,
43 unsigned long ua
, unsigned int pageshift
, unsigned long *hpa
);
44 extern void mm_iommu_ua_mark_dirty_rm(struct mm_struct
*mm
, unsigned long ua
);
45 extern bool mm_iommu_is_devmem(struct mm_struct
*mm
, unsigned long hpa
,
46 unsigned int pageshift
, unsigned long *size
);
47 extern long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t
*mem
);
48 extern void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t
*mem
);
50 static inline bool mm_iommu_is_devmem(struct mm_struct
*mm
, unsigned long hpa
,
51 unsigned int pageshift
, unsigned long *size
)
56 extern void switch_slb(struct task_struct
*tsk
, struct mm_struct
*mm
);
57 extern void set_context(unsigned long id
, pgd_t
*pgd
);
59 #ifdef CONFIG_PPC_BOOK3S_64
60 extern void radix__switch_mmu_context(struct mm_struct
*prev
,
61 struct mm_struct
*next
);
62 static inline void switch_mmu_context(struct mm_struct
*prev
,
63 struct mm_struct
*next
,
64 struct task_struct
*tsk
)
67 return radix__switch_mmu_context(prev
, next
);
68 return switch_slb(tsk
, next
);
71 extern int hash__alloc_context_id(void);
72 extern void hash__reserve_context_id(int id
);
73 extern void __destroy_context(int context_id
);
74 static inline void mmu_context_init(void) { }
76 static inline int alloc_extended_context(struct mm_struct
*mm
,
81 int index
= ea
>> MAX_EA_BITS_PER_CONTEXT
;
83 context_id
= hash__alloc_context_id();
87 VM_WARN_ON(mm
->context
.extended_id
[index
]);
88 mm
->context
.extended_id
[index
] = context_id
;
92 static inline bool need_extra_context(struct mm_struct
*mm
, unsigned long ea
)
96 context_id
= get_user_context(&mm
->context
, ea
);
103 extern void switch_mmu_context(struct mm_struct
*prev
, struct mm_struct
*next
,
104 struct task_struct
*tsk
);
105 extern unsigned long __init_new_context(void);
106 extern void __destroy_context(unsigned long context_id
);
107 extern void mmu_context_init(void);
108 static inline int alloc_extended_context(struct mm_struct
*mm
,
111 /* non book3s_64 should never find this called */
116 static inline bool need_extra_context(struct mm_struct
*mm
, unsigned long ea
)
122 #if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE) && defined(CONFIG_PPC_RADIX_MMU)
123 extern void radix_kvm_prefetch_workaround(struct mm_struct
*mm
);
125 static inline void radix_kvm_prefetch_workaround(struct mm_struct
*mm
) { }
128 extern void switch_cop(struct mm_struct
*next
);
129 extern int use_cop(unsigned long acop
, struct mm_struct
*mm
);
130 extern void drop_cop(unsigned long acop
, struct mm_struct
*mm
);
132 #ifdef CONFIG_PPC_BOOK3S_64
133 static inline void inc_mm_active_cpus(struct mm_struct
*mm
)
135 atomic_inc(&mm
->context
.active_cpus
);
138 static inline void dec_mm_active_cpus(struct mm_struct
*mm
)
140 atomic_dec(&mm
->context
.active_cpus
);
143 static inline void mm_context_add_copro(struct mm_struct
*mm
)
146 * If any copro is in use, increment the active CPU count
147 * in order to force TLB invalidations to be global as to
148 * propagate to the Nest MMU.
150 if (atomic_inc_return(&mm
->context
.copros
) == 1)
151 inc_mm_active_cpus(mm
);
154 static inline void mm_context_remove_copro(struct mm_struct
*mm
)
159 * When removing the last copro, we need to broadcast a global
160 * flush of the full mm, as the next TLBI may be local and the
161 * nMMU and/or PSL need to be cleaned up.
163 * Both the 'copros' and 'active_cpus' counts are looked at in
164 * flush_all_mm() to determine the scope (local/global) of the
165 * TLBIs, so we need to flush first before decrementing
166 * 'copros'. If this API is used by several callers for the
167 * same context, it can lead to over-flushing. It's hopefully
168 * not common enough to be a problem.
170 * Skip on hash, as we don't know how to do the proper flush
171 * for the time being. Invalidations will remain global if
172 * used on hash. Note that we can't drop 'copros' either, as
173 * it could make some invalidations local with no flush
176 if (radix_enabled()) {
179 c
= atomic_dec_if_positive(&mm
->context
.copros
);
180 /* Detect imbalance between add and remove */
184 dec_mm_active_cpus(mm
);
188 static inline void inc_mm_active_cpus(struct mm_struct
*mm
) { }
189 static inline void dec_mm_active_cpus(struct mm_struct
*mm
) { }
190 static inline void mm_context_add_copro(struct mm_struct
*mm
) { }
191 static inline void mm_context_remove_copro(struct mm_struct
*mm
) { }
195 extern void switch_mm_irqs_off(struct mm_struct
*prev
, struct mm_struct
*next
,
196 struct task_struct
*tsk
);
198 static inline void switch_mm(struct mm_struct
*prev
, struct mm_struct
*next
,
199 struct task_struct
*tsk
)
203 local_irq_save(flags
);
204 switch_mm_irqs_off(prev
, next
, tsk
);
205 local_irq_restore(flags
);
207 #define switch_mm_irqs_off switch_mm_irqs_off
210 #define deactivate_mm(tsk,mm) do { } while (0)
213 * After we have set current->mm to a new value, this activates
214 * the context for the new mm so we see the new mappings.
216 static inline void activate_mm(struct mm_struct
*prev
, struct mm_struct
*next
)
218 switch_mm(prev
, next
, current
);
221 /* We don't currently use enter_lazy_tlb() for anything */
222 static inline void enter_lazy_tlb(struct mm_struct
*mm
,
223 struct task_struct
*tsk
)
225 /* 64-bit Book3E keeps track of current PGD in the PACA */
226 #ifdef CONFIG_PPC_BOOK3E_64
227 get_paca()->pgd
= NULL
;
231 #ifdef CONFIG_PPC_BOOK3E_64
232 static inline void arch_exit_mmap(struct mm_struct
*mm
)
236 extern void arch_exit_mmap(struct mm_struct
*mm
);
239 static inline void arch_unmap(struct mm_struct
*mm
,
240 struct vm_area_struct
*vma
,
241 unsigned long start
, unsigned long end
)
243 if (start
<= mm
->context
.vdso_base
&& mm
->context
.vdso_base
< end
)
244 mm
->context
.vdso_base
= 0;
247 static inline void arch_bprm_mm_init(struct mm_struct
*mm
,
248 struct vm_area_struct
*vma
)
252 #ifdef CONFIG_PPC_MEM_KEYS
253 bool arch_vma_access_permitted(struct vm_area_struct
*vma
, bool write
,
254 bool execute
, bool foreign
);
255 void arch_dup_pkeys(struct mm_struct
*oldmm
, struct mm_struct
*mm
);
256 #else /* CONFIG_PPC_MEM_KEYS */
257 static inline bool arch_vma_access_permitted(struct vm_area_struct
*vma
,
258 bool write
, bool execute
, bool foreign
)
260 /* by default, allow everything */
264 #define pkey_mm_init(mm)
265 #define thread_pkey_regs_save(thread)
266 #define thread_pkey_regs_restore(new_thread, old_thread)
267 #define thread_pkey_regs_init(thread)
268 #define arch_dup_pkeys(oldmm, mm)
270 static inline u64
pte_to_hpte_pkey_bits(u64 pteflags
)
275 #endif /* CONFIG_PPC_MEM_KEYS */
277 static inline int arch_dup_mmap(struct mm_struct
*oldmm
,
278 struct mm_struct
*mm
)
280 arch_dup_pkeys(oldmm
, mm
);
284 #endif /* __KERNEL__ */
285 #endif /* __ASM_POWERPC_MMU_CONTEXT_H */