]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
51533b61 MS |
2 | /* |
3 | * Low level TLB handling. | |
4 | * | |
5 | * Copyright (C) 2000-2003, Axis Communications AB. | |
6 | * | |
7 | * Authors: Bjorn Wesen <bjornw@axis.com> | |
8 | * Tobias Anderberg <tobiasa@axis.com>, CRISv32 port. | |
9 | */ | |
589ee628 | 10 | #include <linux/mm_types.h> |
51533b61 MS |
11 | |
12 | #include <asm/tlb.h> | |
13 | #include <asm/mmu_context.h> | |
556dcee7 JN |
14 | #include <arch/hwregs/asm/mmu_defs_asm.h> |
15 | #include <arch/hwregs/supp_reg.h> | |
51533b61 MS |
16 | |
17 | #define UPDATE_TLB_SEL_IDX(val) \ | |
52d82ef1 JN |
18 | do { \ |
19 | unsigned long tlb_sel; \ | |
51533b61 MS |
20 | \ |
21 | tlb_sel = REG_FIELD(mmu, rw_mm_tlb_sel, idx, val); \ | |
22 | SUPP_REG_WR(RW_MM_TLB_SEL, tlb_sel); \ | |
23 | } while(0) | |
24 | ||
25 | #define UPDATE_TLB_HILO(tlb_hi, tlb_lo) \ | |
26 | do { \ | |
27 | SUPP_REG_WR(RW_MM_TLB_HI, tlb_hi); \ | |
28 | SUPP_REG_WR(RW_MM_TLB_LO, tlb_lo); \ | |
29 | } while(0) | |
30 | ||
31 | /* | |
32 | * The TLB can host up to 256 different mm contexts at the same time. The running | |
33 | * context is found in the PID register. Each TLB entry contains a page_id that | |
34 | * has to match the PID register to give a hit. page_id_map keeps track of which | |
52d82ef1 JN |
35 | * mm's is assigned to which page_id's, making sure it's known when to |
36 | * invalidate TLB entries. | |
51533b61 MS |
37 | * |
38 | * The last page_id is never running, it is used as an invalid page_id so that | |
39 | * it's possible to make TLB entries that will nerver match. | |
40 | * | |
41 | * Note; the flushes needs to be atomic otherwise an interrupt hander that uses | |
42 | * vmalloc'ed memory might cause a TLB load in the middle of a flush. | |
43 | */ | |
44 | ||
45 | /* Flush all TLB entries. */ | |
46 | void | |
47 | __flush_tlb_all(void) | |
48 | { | |
49 | int i; | |
50 | int mmu; | |
51 | unsigned long flags; | |
52 | unsigned long mmu_tlb_hi; | |
53 | unsigned long mmu_tlb_sel; | |
54 | ||
55 | /* | |
56 | * Mask with 0xf so similar TLB entries aren't written in the same 4-way | |
57 | * entry group. | |
58 | */ | |
5cf885d0 | 59 | local_irq_save(flags); |
51533b61 MS |
60 | |
61 | for (mmu = 1; mmu <= 2; mmu++) { | |
62 | SUPP_BANK_SEL(mmu); /* Select the MMU */ | |
63 | for (i = 0; i < NUM_TLB_ENTRIES; i++) { | |
64 | /* Store invalid entry */ | |
65 | mmu_tlb_sel = REG_FIELD(mmu, rw_mm_tlb_sel, idx, i); | |
66 | ||
67 | mmu_tlb_hi = (REG_FIELD(mmu, rw_mm_tlb_hi, pid, INVALID_PAGEID) | |
68 | | REG_FIELD(mmu, rw_mm_tlb_hi, vpn, i & 0xf)); | |
69 | ||
70 | SUPP_REG_WR(RW_MM_TLB_SEL, mmu_tlb_sel); | |
71 | SUPP_REG_WR(RW_MM_TLB_HI, mmu_tlb_hi); | |
72 | SUPP_REG_WR(RW_MM_TLB_LO, 0); | |
73 | } | |
74 | } | |
75 | ||
76 | local_irq_restore(flags); | |
77 | } | |
78 | ||
79 | /* Flush an entire user address space. */ | |
80 | void | |
81 | __flush_tlb_mm(struct mm_struct *mm) | |
82 | { | |
83 | int i; | |
84 | int mmu; | |
85 | unsigned long flags; | |
86 | unsigned long page_id; | |
87 | unsigned long tlb_hi; | |
88 | unsigned long mmu_tlb_hi; | |
89 | ||
90 | page_id = mm->context.page_id; | |
91 | ||
92 | if (page_id == NO_CONTEXT) | |
93 | return; | |
94 | ||
95 | /* Mark the TLB entries that match the page_id as invalid. */ | |
5cf885d0 | 96 | local_irq_save(flags); |
51533b61 MS |
97 | |
98 | for (mmu = 1; mmu <= 2; mmu++) { | |
99 | SUPP_BANK_SEL(mmu); | |
100 | for (i = 0; i < NUM_TLB_ENTRIES; i++) { | |
101 | UPDATE_TLB_SEL_IDX(i); | |
102 | ||
103 | /* Get the page_id */ | |
104 | SUPP_REG_RD(RW_MM_TLB_HI, tlb_hi); | |
105 | ||
106 | /* Check if the page_id match. */ | |
107 | if ((tlb_hi & 0xff) == page_id) { | |
108 | mmu_tlb_hi = (REG_FIELD(mmu, rw_mm_tlb_hi, pid, | |
109 | INVALID_PAGEID) | |
110 | | REG_FIELD(mmu, rw_mm_tlb_hi, vpn, | |
111 | i & 0xf)); | |
112 | ||
113 | UPDATE_TLB_HILO(mmu_tlb_hi, 0); | |
114 | } | |
115 | } | |
116 | } | |
117 | ||
118 | local_irq_restore(flags); | |
119 | } | |
120 | ||
121 | /* Invalidate a single page. */ | |
122 | void | |
123 | __flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) | |
124 | { | |
125 | int i; | |
126 | int mmu; | |
127 | unsigned long page_id; | |
128 | unsigned long flags; | |
129 | unsigned long tlb_hi; | |
130 | unsigned long mmu_tlb_hi; | |
131 | ||
132 | page_id = vma->vm_mm->context.page_id; | |
133 | ||
134 | if (page_id == NO_CONTEXT) | |
135 | return; | |
136 | ||
137 | addr &= PAGE_MASK; | |
138 | ||
139 | /* | |
140 | * Invalidate those TLB entries that match both the mm context and the | |
141 | * requested virtual address. | |
142 | */ | |
5cf885d0 | 143 | local_irq_save(flags); |
51533b61 MS |
144 | |
145 | for (mmu = 1; mmu <= 2; mmu++) { | |
146 | SUPP_BANK_SEL(mmu); | |
147 | for (i = 0; i < NUM_TLB_ENTRIES; i++) { | |
148 | UPDATE_TLB_SEL_IDX(i); | |
149 | SUPP_REG_RD(RW_MM_TLB_HI, tlb_hi); | |
150 | ||
151 | /* Check if page_id and address matches */ | |
152 | if (((tlb_hi & 0xff) == page_id) && | |
153 | ((tlb_hi & PAGE_MASK) == addr)) { | |
154 | mmu_tlb_hi = REG_FIELD(mmu, rw_mm_tlb_hi, pid, | |
155 | INVALID_PAGEID) | addr; | |
156 | ||
157 | UPDATE_TLB_HILO(mmu_tlb_hi, 0); | |
158 | } | |
159 | } | |
160 | } | |
161 | ||
162 | local_irq_restore(flags); | |
163 | } | |
164 | ||
165 | /* | |
166 | * Initialize the context related info for a new mm_struct | |
167 | * instance. | |
168 | */ | |
169 | ||
170 | int | |
171 | init_new_context(struct task_struct *tsk, struct mm_struct *mm) | |
172 | { | |
173 | mm->context.page_id = NO_CONTEXT; | |
174 | return 0; | |
175 | } | |
176 | ||
a7e4705b HD |
177 | static DEFINE_SPINLOCK(mmu_context_lock); |
178 | ||
51533b61 MS |
179 | /* Called in schedule() just before actually doing the switch_to. */ |
180 | void | |
181 | switch_mm(struct mm_struct *prev, struct mm_struct *next, | |
182 | struct task_struct *tsk) | |
183 | { | |
52d82ef1 JN |
184 | if (prev != next) { |
185 | int cpu = smp_processor_id(); | |
186 | ||
187 | /* Make sure there is a MMU context. */ | |
188 | spin_lock(&mmu_context_lock); | |
189 | get_mmu_context(next); | |
b9d65c04 | 190 | cpumask_set_cpu(cpu, mm_cpumask(next)); |
52d82ef1 JN |
191 | spin_unlock(&mmu_context_lock); |
192 | ||
193 | /* | |
3ad2f3fb | 194 | * Remember the pgd for the fault handlers. Keep a separate |
52d82ef1 JN |
195 | * copy of it because current and active_mm might be invalid |
196 | * at points where * there's still a need to derefer the pgd. | |
197 | */ | |
198 | per_cpu(current_pgd, cpu) = next->pgd; | |
199 | ||
200 | /* Switch context in the MMU. */ | |
201 | if (tsk && task_thread_info(tsk)) { | |
202 | SPEC_REG_WR(SPEC_REG_PID, next->context.page_id | | |
203 | task_thread_info(tsk)->tls); | |
204 | } else { | |
205 | SPEC_REG_WR(SPEC_REG_PID, next->context.page_id); | |
206 | } | |
207 | } | |
51533b61 MS |
208 | } |
209 |