]>
Commit | Line | Data |
---|---|---|
2874c5fd | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
61e85e36 JB |
2 | /* |
3 | * OpenRISC tlb.c | |
4 | * | |
5 | * Linux architectural port borrowing liberally from similar works of | |
6 | * others. All original copyrights apply as per the original source | |
7 | * declaration. | |
8 | * | |
9 | * Modifications for the OpenRISC architecture: | |
10 | * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> | |
11 | * Copyright (C) 2010-2011 Julius Baxter <julius.baxter@orsoc.se> | |
12 | * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> | |
61e85e36 JB |
13 | */ |
14 | ||
15 | #include <linux/sched.h> | |
16 | #include <linux/kernel.h> | |
17 | #include <linux/errno.h> | |
18 | #include <linux/string.h> | |
19 | #include <linux/types.h> | |
20 | #include <linux/ptrace.h> | |
21 | #include <linux/mman.h> | |
22 | #include <linux/mm.h> | |
23 | #include <linux/init.h> | |
24 | ||
61e85e36 JB |
25 | #include <asm/tlbflush.h> |
26 | #include <asm/pgtable.h> | |
27 | #include <asm/mmu_context.h> | |
28 | #include <asm/spr_defs.h> | |
29 | ||
30 | #define NO_CONTEXT -1 | |
31 | ||
32 | #define NUM_DTLB_SETS (1 << ((mfspr(SPR_IMMUCFGR) & SPR_IMMUCFGR_NTS) >> \ | |
33 | SPR_DMMUCFGR_NTS_OFF)) | |
34 | #define NUM_ITLB_SETS (1 << ((mfspr(SPR_IMMUCFGR) & SPR_IMMUCFGR_NTS) >> \ | |
35 | SPR_IMMUCFGR_NTS_OFF)) | |
36 | #define DTLB_OFFSET(addr) (((addr) >> PAGE_SHIFT) & (NUM_DTLB_SETS-1)) | |
37 | #define ITLB_OFFSET(addr) (((addr) >> PAGE_SHIFT) & (NUM_ITLB_SETS-1)) | |
38 | /* | |
39 | * Invalidate all TLB entries. | |
40 | * | |
41 | * This comes down to setting the 'valid' bit for all xTLBMR registers to 0. | |
42 | * Easiest way to accomplish this is to just zero out the xTLBMR register | |
43 | * completely. | |
44 | * | |
45 | */ | |
46 | ||
8e6d08e0 | 47 | void local_flush_tlb_all(void) |
61e85e36 JB |
48 | { |
49 | int i; | |
50 | unsigned long num_tlb_sets; | |
51 | ||
52 | /* Determine number of sets for IMMU. */ | |
53 | /* FIXME: Assumption is I & D nsets equal. */ | |
54 | num_tlb_sets = NUM_ITLB_SETS; | |
55 | ||
56 | for (i = 0; i < num_tlb_sets; i++) { | |
57 | mtspr_off(SPR_DTLBMR_BASE(0), i, 0); | |
58 | mtspr_off(SPR_ITLBMR_BASE(0), i, 0); | |
59 | } | |
60 | } | |
61 | ||
62 | #define have_dtlbeir (mfspr(SPR_DMMUCFGR) & SPR_DMMUCFGR_TEIRI) | |
63 | #define have_itlbeir (mfspr(SPR_IMMUCFGR) & SPR_IMMUCFGR_TEIRI) | |
64 | ||
65 | /* | |
66 | * Invalidate a single page. This is what the xTLBEIR register is for. | |
67 | * | |
68 | * There's no point in checking the vma for PAGE_EXEC to determine whether it's | |
69 | * the data or instruction TLB that should be flushed... that would take more | |
70 | * than the few instructions that the following compiles down to! | |
71 | * | |
72 | * The case where we don't have the xTLBEIR register really only works for | |
73 | * MMU's with a single way and is hard-coded that way. | |
74 | */ | |
75 | ||
76 | #define flush_dtlb_page_eir(addr) mtspr(SPR_DTLBEIR, addr) | |
77 | #define flush_dtlb_page_no_eir(addr) \ | |
78 | mtspr_off(SPR_DTLBMR_BASE(0), DTLB_OFFSET(addr), 0); | |
79 | ||
80 | #define flush_itlb_page_eir(addr) mtspr(SPR_ITLBEIR, addr) | |
81 | #define flush_itlb_page_no_eir(addr) \ | |
82 | mtspr_off(SPR_ITLBMR_BASE(0), ITLB_OFFSET(addr), 0); | |
83 | ||
8e6d08e0 | 84 | void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) |
61e85e36 JB |
85 | { |
86 | if (have_dtlbeir) | |
87 | flush_dtlb_page_eir(addr); | |
88 | else | |
89 | flush_dtlb_page_no_eir(addr); | |
90 | ||
91 | if (have_itlbeir) | |
92 | flush_itlb_page_eir(addr); | |
93 | else | |
94 | flush_itlb_page_no_eir(addr); | |
95 | } | |
96 | ||
8e6d08e0 SK |
97 | void local_flush_tlb_range(struct vm_area_struct *vma, |
98 | unsigned long start, unsigned long end) | |
61e85e36 JB |
99 | { |
100 | int addr; | |
101 | bool dtlbeir; | |
102 | bool itlbeir; | |
103 | ||
104 | dtlbeir = have_dtlbeir; | |
105 | itlbeir = have_itlbeir; | |
106 | ||
107 | for (addr = start; addr < end; addr += PAGE_SIZE) { | |
108 | if (dtlbeir) | |
109 | flush_dtlb_page_eir(addr); | |
110 | else | |
111 | flush_dtlb_page_no_eir(addr); | |
112 | ||
113 | if (itlbeir) | |
114 | flush_itlb_page_eir(addr); | |
115 | else | |
116 | flush_itlb_page_no_eir(addr); | |
117 | } | |
118 | } | |
119 | ||
120 | /* | |
121 | * Invalidate the selected mm context only. | |
122 | * | |
123 | * FIXME: Due to some bug here, we're flushing everything for now. | |
124 | * This should be changed to loop over over mm and call flush_tlb_range. | |
125 | */ | |
126 | ||
8e6d08e0 | 127 | void local_flush_tlb_mm(struct mm_struct *mm) |
61e85e36 JB |
128 | { |
129 | ||
130 | /* Was seeing bugs with the mm struct passed to us. Scrapped most of | |
131 | this function. */ | |
132 | /* Several architctures do this */ | |
8e6d08e0 | 133 | local_flush_tlb_all(); |
61e85e36 JB |
134 | } |
135 | ||
136 | /* called in schedule() just before actually doing the switch_to */ | |
137 | ||
138 | void switch_mm(struct mm_struct *prev, struct mm_struct *next, | |
139 | struct task_struct *next_tsk) | |
140 | { | |
141 | /* remember the pgd for the fault handlers | |
142 | * this is similar to the pgd register in some other CPU's. | |
143 | * we need our own copy of it because current and active_mm | |
144 | * might be invalid at points where we still need to derefer | |
145 | * the pgd. | |
146 | */ | |
8e6d08e0 | 147 | current_pgd[smp_processor_id()] = next->pgd; |
61e85e36 JB |
148 | |
149 | /* We don't have context support implemented, so flush all | |
150 | * entries belonging to previous map | |
151 | */ | |
152 | ||
153 | if (prev != next) | |
8e6d08e0 | 154 | local_flush_tlb_mm(prev); |
61e85e36 JB |
155 | |
156 | } | |
157 | ||
158 | /* | |
159 | * Initialize the context related info for a new mm_struct | |
160 | * instance. | |
161 | */ | |
162 | ||
163 | int init_new_context(struct task_struct *tsk, struct mm_struct *mm) | |
164 | { | |
165 | mm->context = NO_CONTEXT; | |
166 | return 0; | |
167 | } | |
168 | ||
169 | /* called by __exit_mm to destroy the used MMU context if any before | |
170 | * destroying the mm itself. this is only called when the last user of the mm | |
171 | * drops it. | |
172 | */ | |
173 | ||
174 | void destroy_context(struct mm_struct *mm) | |
175 | { | |
176 | flush_tlb_mm(mm); | |
177 | ||
178 | } | |
179 | ||
180 | /* called once during VM initialization, from init.c */ | |
181 | ||
182 | void __init tlb_init(void) | |
183 | { | |
184 | /* Do nothing... */ | |
185 | /* invalidate the entire TLB */ | |
186 | /* flush_tlb_all(); */ | |
187 | } |