]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
6bc9a396 CL |
2 | #ifndef _ASM_SCORE_MMU_CONTEXT_H |
3 | #define _ASM_SCORE_MMU_CONTEXT_H | |
4 | ||
5 | #include <linux/errno.h> | |
6 | #include <linux/sched.h> | |
589ee628 | 7 | #include <linux/mm_types.h> |
6bc9a396 | 8 | #include <linux/slab.h> |
589ee628 | 9 | |
6bc9a396 CL |
10 | #include <asm-generic/mm_hooks.h> |
11 | ||
12 | #include <asm/cacheflush.h> | |
13 | #include <asm/tlbflush.h> | |
14 | #include <asm/scoreregs.h> | |
15 | ||
16 | /* | |
17 | * For the fast tlb miss handlers, we keep a per cpu array of pointers | |
18 | * to the current pgd for each processor. Also, the proc. id is stuffed | |
19 | * into the context register. | |
20 | */ | |
21 | extern unsigned long asid_cache; | |
22 | extern unsigned long pgd_current; | |
23 | ||
24 | #define TLBMISS_HANDLER_SETUP_PGD(pgd) (pgd_current = (unsigned long)(pgd)) | |
25 | ||
26 | #define TLBMISS_HANDLER_SETUP() \ | |
27 | do { \ | |
28 | write_c0_context(0); \ | |
29 | TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir) \ | |
30 | } while (0) | |
31 | ||
32 | /* | |
33 | * All unused by hardware upper bits will be considered | |
34 | * as a software asid extension. | |
35 | */ | |
36 | #define ASID_VERSION_MASK 0xfffff000 | |
37 | #define ASID_FIRST_VERSION 0x1000 | |
38 | ||
39 | /* PEVN --------- VPN ---------- --ASID--- -NA- */ | |
40 | /* binary: 0000 0000 0000 0000 0000 0000 0001 0000 */ | |
41 | /* binary: 0000 0000 0000 0000 0000 1111 1111 0000 */ | |
42 | #define ASID_INC 0x10 | |
43 | #define ASID_MASK 0xff0 | |
44 | ||
45 | static inline void enter_lazy_tlb(struct mm_struct *mm, | |
46 | struct task_struct *tsk) | |
47 | {} | |
48 | ||
49 | static inline void | |
50 | get_new_mmu_context(struct mm_struct *mm) | |
51 | { | |
52 | unsigned long asid = asid_cache + ASID_INC; | |
53 | ||
54 | if (!(asid & ASID_MASK)) { | |
55 | local_flush_tlb_all(); /* start new asid cycle */ | |
56 | if (!asid) /* fix version if needed */ | |
57 | asid = ASID_FIRST_VERSION; | |
58 | } | |
59 | ||
60 | mm->context = asid; | |
61 | asid_cache = asid; | |
62 | } | |
63 | ||
64 | /* | |
65 | * Initialize the context related info for a new mm_struct | |
66 | * instance. | |
67 | */ | |
68 | static inline int | |
69 | init_new_context(struct task_struct *tsk, struct mm_struct *mm) | |
70 | { | |
71 | mm->context = 0; | |
72 | return 0; | |
73 | } | |
74 | ||
75 | static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | |
76 | struct task_struct *tsk) | |
77 | { | |
78 | unsigned long flags; | |
79 | ||
80 | local_irq_save(flags); | |
81 | if ((next->context ^ asid_cache) & ASID_VERSION_MASK) | |
82 | get_new_mmu_context(next); | |
83 | ||
84 | pevn_set(next->context); | |
85 | TLBMISS_HANDLER_SETUP_PGD(next->pgd); | |
86 | local_irq_restore(flags); | |
87 | } | |
88 | ||
89 | /* | |
90 | * Destroy context related info for an mm_struct that is about | |
91 | * to be put to rest. | |
92 | */ | |
93 | static inline void destroy_context(struct mm_struct *mm) | |
94 | {} | |
95 | ||
96 | static inline void | |
97 | deactivate_mm(struct task_struct *task, struct mm_struct *mm) | |
98 | {} | |
99 | ||
100 | /* | |
101 | * After we have set current->mm to a new value, this activates | |
102 | * the context for the new mm so we see the new mappings. | |
103 | */ | |
104 | static inline void | |
105 | activate_mm(struct mm_struct *prev, struct mm_struct *next) | |
106 | { | |
107 | unsigned long flags; | |
108 | ||
109 | local_irq_save(flags); | |
110 | get_new_mmu_context(next); | |
111 | pevn_set(next->context); | |
112 | TLBMISS_HANDLER_SETUP_PGD(next->pgd); | |
113 | local_irq_restore(flags); | |
114 | } | |
115 | ||
116 | #endif /* _ASM_SCORE_MMU_CONTEXT_H */ |