1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Based on arch/arm/include/asm/mmu_context.h
5 * Copyright (C) 1996 Russell King.
6 * Copyright (C) 2012 ARM Ltd.
8 #ifndef __ASM_MMU_CONTEXT_H
9 #define __ASM_MMU_CONTEXT_H
13 #include <linux/compiler.h>
14 #include <linux/sched.h>
15 #include <linux/sched/hotplug.h>
16 #include <linux/mm_types.h>
18 #include <asm/cacheflush.h>
19 #include <asm/cpufeature.h>
20 #include <asm/proc-fns.h>
21 #include <asm-generic/mm_hooks.h>
22 #include <asm/cputype.h>
23 #include <asm/pgtable.h>
24 #include <asm/sysreg.h>
25 #include <asm/tlbflush.h>
27 extern bool rodata_full
;
29 static inline void contextidr_thread_switch(struct task_struct
*next
)
31 if (!IS_ENABLED(CONFIG_PID_IN_CONTEXTIDR
))
34 write_sysreg(task_pid_nr(next
), contextidr_el1
);
39 * Set TTBR0 to reserved_pg_dir. No translations will be possible via TTBR0.
41 static inline void cpu_set_reserved_ttbr0(void)
43 unsigned long ttbr
= phys_to_ttbr(__pa_symbol(reserved_pg_dir
));
45 write_sysreg(ttbr
, ttbr0_el1
);
49 static inline void cpu_switch_mm(pgd_t
*pgd
, struct mm_struct
*mm
)
51 BUG_ON(pgd
== swapper_pg_dir
);
52 cpu_set_reserved_ttbr0();
53 cpu_do_switch_mm(virt_to_phys(pgd
),mm
);
57 * TCR.T0SZ value to use when the ID map is active. Usually equals
58 * TCR_T0SZ(VA_BITS), unless system RAM is positioned very high in
59 * physical memory, in which case it will be smaller.
61 extern u64 idmap_t0sz
;
62 extern u64 idmap_ptrs_per_pgd
;
64 static inline bool __cpu_uses_extended_idmap(void)
66 return unlikely(idmap_t0sz
!= TCR_T0SZ(vabits_actual
));
70 * True if the extended ID map requires an extra level of translation table
73 static inline bool __cpu_uses_extended_idmap_level(void)
75 return ARM64_HW_PGTABLE_LEVELS(64 - idmap_t0sz
) > CONFIG_PGTABLE_LEVELS
;
79 * Set TCR.T0SZ to its default value (based on VA_BITS)
81 static inline void __cpu_set_tcr_t0sz(unsigned long t0sz
)
85 if (!__cpu_uses_extended_idmap())
88 tcr
= read_sysreg(tcr_el1
);
89 tcr
&= ~TCR_T0SZ_MASK
;
90 tcr
|= t0sz
<< TCR_T0SZ_OFFSET
;
91 write_sysreg(tcr
, tcr_el1
);
95 #define cpu_set_default_tcr_t0sz() __cpu_set_tcr_t0sz(TCR_T0SZ(vabits_actual))
96 #define cpu_set_idmap_tcr_t0sz() __cpu_set_tcr_t0sz(idmap_t0sz)
99 * Remove the idmap from TTBR0_EL1 and install the pgd of the active mm.
101 * The idmap lives in the same VA range as userspace, but uses global entries
102 * and may use a different TCR_EL1.T0SZ. To avoid issues resulting from
103 * speculative TLB fetches, we must temporarily install the reserved page
104 * tables while we invalidate the TLBs and set up the correct TCR_EL1.T0SZ.
106 * If current is a not a user task, the mm covers the TTBR1_EL1 page tables,
107 * which should not be installed in TTBR0_EL1. In this case we can leave the
108 * reserved page tables in place.
110 static inline void cpu_uninstall_idmap(void)
112 struct mm_struct
*mm
= current
->active_mm
;
114 cpu_set_reserved_ttbr0();
115 local_flush_tlb_all();
116 cpu_set_default_tcr_t0sz();
118 if (mm
!= &init_mm
&& !system_uses_ttbr0_pan())
119 cpu_switch_mm(mm
->pgd
, mm
);
122 static inline void cpu_install_idmap(void)
124 cpu_set_reserved_ttbr0();
125 local_flush_tlb_all();
126 cpu_set_idmap_tcr_t0sz();
128 cpu_switch_mm(lm_alias(idmap_pg_dir
), &init_mm
);
132 * Atomically replaces the active TTBR1_EL1 PGD with a new VA-compatible PGD,
133 * avoiding the possibility of conflicting TLB entries being allocated.
135 static inline void cpu_replace_ttbr1(pgd_t
*pgdp
)
137 typedef void (ttbr_replace_func
)(phys_addr_t
);
138 extern ttbr_replace_func idmap_cpu_replace_ttbr1
;
139 ttbr_replace_func
*replace_phys
;
141 /* phys_to_ttbr() zeros lower 2 bits of ttbr with 52-bit PA */
142 phys_addr_t ttbr1
= phys_to_ttbr(virt_to_phys(pgdp
));
144 if (system_supports_cnp() && !WARN_ON(pgdp
!= lm_alias(swapper_pg_dir
))) {
146 * cpu_replace_ttbr1() is used when there's a boot CPU
147 * up (i.e. cpufeature framework is not up yet) and
148 * latter only when we enable CNP via cpufeature's
150 * Also we rely on the cpu_hwcap bit being set before
151 * calling the enable() function.
153 ttbr1
|= TTBR_CNP_BIT
;
156 replace_phys
= (void *)__pa_symbol(idmap_cpu_replace_ttbr1
);
160 cpu_uninstall_idmap();
164 * It would be nice to return ASIDs back to the allocator, but unfortunately
165 * that introduces a race with a generation rollover where we could erroneously
166 * free an ASID allocated in a future generation. We could workaround this by
167 * freeing the ASID from the context of the dying mm (e.g. in arch_exit_mmap),
168 * but we'd then need to make sure that we didn't dirty any TLBs afterwards.
169 * Setting a reserved TTBR0 or EPD0 would work, but it all gets ugly when you
170 * take CPU migration into account.
172 #define destroy_context(mm) do { } while(0)
173 void check_and_switch_context(struct mm_struct
*mm
, unsigned int cpu
);
175 #define init_new_context(tsk,mm) ({ atomic64_set(&(mm)->context.id, 0); 0; })
177 #ifdef CONFIG_ARM64_SW_TTBR0_PAN
178 static inline void update_saved_ttbr0(struct task_struct
*tsk
,
179 struct mm_struct
*mm
)
183 if (!system_uses_ttbr0_pan())
187 ttbr
= phys_to_ttbr(__pa_symbol(reserved_pg_dir
));
189 ttbr
= phys_to_ttbr(virt_to_phys(mm
->pgd
)) | ASID(mm
) << 48;
191 WRITE_ONCE(task_thread_info(tsk
)->ttbr0
, ttbr
);
194 static inline void update_saved_ttbr0(struct task_struct
*tsk
,
195 struct mm_struct
*mm
)
201 enter_lazy_tlb(struct mm_struct
*mm
, struct task_struct
*tsk
)
204 * We don't actually care about the ttbr0 mapping, so point it at the
207 update_saved_ttbr0(tsk
, &init_mm
);
210 static inline void __switch_mm(struct mm_struct
*next
)
212 unsigned int cpu
= smp_processor_id();
215 * init_mm.pgd does not contain any user mappings and it is always
216 * active for kernel addresses in TTBR1. Just set the reserved TTBR0.
218 if (next
== &init_mm
) {
219 cpu_set_reserved_ttbr0();
223 check_and_switch_context(next
, cpu
);
227 switch_mm(struct mm_struct
*prev
, struct mm_struct
*next
,
228 struct task_struct
*tsk
)
234 * Update the saved TTBR0_EL1 of the scheduled-in task as the previous
235 * value may have not been initialised yet (activate_mm caller) or the
236 * ASID has changed since the last run (following the context switch
237 * of another thread of the same process).
239 update_saved_ttbr0(tsk
, next
);
242 #define deactivate_mm(tsk,mm) do { } while (0)
243 #define activate_mm(prev,next) switch_mm(prev, next, current)
245 void verify_cpu_asid_bits(void);
246 void post_ttbr_update_workaround(void);
248 #endif /* !__ASSEMBLY__ */
250 #endif /* !__ASM_MMU_CONTEXT_H */