]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * PowerPC64 Segment Translation Support. | |
3 | * | |
4 | * Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com | |
5 | * Copyright (c) 2001 Dave Engebretsen | |
6 | * | |
7 | * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM | |
8 | * | |
9 | * This program is free software; you can redistribute it and/or | |
10 | * modify it under the terms of the GNU General Public License | |
11 | * as published by the Free Software Foundation; either version | |
12 | * 2 of the License, or (at your option) any later version. | |
13 | */ | |
14 | ||
1da177e4 LT |
15 | #include <asm/pgtable.h> |
16 | #include <asm/mmu.h> | |
17 | #include <asm/mmu_context.h> | |
18 | #include <asm/paca.h> | |
19 | #include <asm/cputable.h> | |
533f0817 DG |
20 | #include <asm/lmb.h> |
21 | #include <asm/abs_addr.h> | |
799d6046 | 22 | #include <asm/firmware.h> |
1da177e4 | 23 | |
1f8d419e DG |
24 | struct stab_entry { |
25 | unsigned long esid_data; | |
26 | unsigned long vsid_data; | |
27 | }; | |
28 | ||
1da177e4 LT |
29 | #define NR_STAB_CACHE_ENTRIES 8 |
30 | DEFINE_PER_CPU(long, stab_cache_ptr); | |
31 | DEFINE_PER_CPU(long, stab_cache[NR_STAB_CACHE_ENTRIES]); | |
32 | ||
33 | /* | |
34 | * Create a segment table entry for the given esid/vsid pair. | |
35 | */ | |
36 | static int make_ste(unsigned long stab, unsigned long esid, unsigned long vsid) | |
37 | { | |
38 | unsigned long esid_data, vsid_data; | |
39 | unsigned long entry, group, old_esid, castout_entry, i; | |
40 | unsigned int global_entry; | |
41 | struct stab_entry *ste, *castout_ste; | |
b5666f70 | 42 | unsigned long kernel_segment = (esid << SID_SHIFT) >= PAGE_OFFSET; |
1da177e4 LT |
43 | |
44 | vsid_data = vsid << STE_VSID_SHIFT; | |
45 | esid_data = esid << SID_SHIFT | STE_ESID_KP | STE_ESID_V; | |
46 | if (! kernel_segment) | |
47 | esid_data |= STE_ESID_KS; | |
48 | ||
49 | /* Search the primary group first. */ | |
50 | global_entry = (esid & 0x1f) << 3; | |
51 | ste = (struct stab_entry *)(stab | ((esid & 0x1f) << 7)); | |
52 | ||
53 | /* Find an empty entry, if one exists. */ | |
54 | for (group = 0; group < 2; group++) { | |
55 | for (entry = 0; entry < 8; entry++, ste++) { | |
56 | if (!(ste->esid_data & STE_ESID_V)) { | |
57 | ste->vsid_data = vsid_data; | |
58 | asm volatile("eieio":::"memory"); | |
59 | ste->esid_data = esid_data; | |
60 | return (global_entry | entry); | |
61 | } | |
62 | } | |
63 | /* Now search the secondary group. */ | |
64 | global_entry = ((~esid) & 0x1f) << 3; | |
65 | ste = (struct stab_entry *)(stab | (((~esid) & 0x1f) << 7)); | |
66 | } | |
67 | ||
68 | /* | |
69 | * Could not find empty entry, pick one with a round robin selection. | |
70 | * Search all entries in the two groups. | |
71 | */ | |
72 | castout_entry = get_paca()->stab_rr; | |
73 | for (i = 0; i < 16; i++) { | |
74 | if (castout_entry < 8) { | |
75 | global_entry = (esid & 0x1f) << 3; | |
76 | ste = (struct stab_entry *)(stab | ((esid & 0x1f) << 7)); | |
77 | castout_ste = ste + castout_entry; | |
78 | } else { | |
79 | global_entry = ((~esid) & 0x1f) << 3; | |
80 | ste = (struct stab_entry *)(stab | (((~esid) & 0x1f) << 7)); | |
81 | castout_ste = ste + (castout_entry - 8); | |
82 | } | |
83 | ||
84 | /* Dont cast out the first kernel segment */ | |
b5666f70 | 85 | if ((castout_ste->esid_data & ESID_MASK) != PAGE_OFFSET) |
1da177e4 LT |
86 | break; |
87 | ||
88 | castout_entry = (castout_entry + 1) & 0xf; | |
89 | } | |
90 | ||
91 | get_paca()->stab_rr = (castout_entry + 1) & 0xf; | |
92 | ||
93 | /* Modify the old entry to the new value. */ | |
94 | ||
95 | /* Force previous translations to complete. DRENG */ | |
96 | asm volatile("isync" : : : "memory"); | |
97 | ||
98 | old_esid = castout_ste->esid_data >> SID_SHIFT; | |
99 | castout_ste->esid_data = 0; /* Invalidate old entry */ | |
100 | ||
101 | asm volatile("sync" : : : "memory"); /* Order update */ | |
102 | ||
103 | castout_ste->vsid_data = vsid_data; | |
104 | asm volatile("eieio" : : : "memory"); /* Order update */ | |
105 | castout_ste->esid_data = esid_data; | |
106 | ||
107 | asm volatile("slbie %0" : : "r" (old_esid << SID_SHIFT)); | |
108 | /* Ensure completion of slbie */ | |
109 | asm volatile("sync" : : : "memory"); | |
110 | ||
111 | return (global_entry | (castout_entry & 0x7)); | |
112 | } | |
113 | ||
114 | /* | |
115 | * Allocate a segment table entry for the given ea and mm | |
116 | */ | |
117 | static int __ste_allocate(unsigned long ea, struct mm_struct *mm) | |
118 | { | |
119 | unsigned long vsid; | |
120 | unsigned char stab_entry; | |
121 | unsigned long offset; | |
122 | ||
123 | /* Kernel or user address? */ | |
51fae6de | 124 | if (is_kernel_addr(ea)) { |
1da177e4 LT |
125 | vsid = get_kernel_vsid(ea); |
126 | } else { | |
127 | if ((ea >= TASK_SIZE_USER64) || (! mm)) | |
128 | return 1; | |
129 | ||
130 | vsid = get_vsid(mm->context.id, ea); | |
131 | } | |
132 | ||
133 | stab_entry = make_ste(get_paca()->stab_addr, GET_ESID(ea), vsid); | |
134 | ||
51fae6de | 135 | if (!is_kernel_addr(ea)) { |
1da177e4 LT |
136 | offset = __get_cpu_var(stab_cache_ptr); |
137 | if (offset < NR_STAB_CACHE_ENTRIES) | |
138 | __get_cpu_var(stab_cache[offset++]) = stab_entry; | |
139 | else | |
140 | offset = NR_STAB_CACHE_ENTRIES+1; | |
141 | __get_cpu_var(stab_cache_ptr) = offset; | |
142 | ||
143 | /* Order update */ | |
144 | asm volatile("sync":::"memory"); | |
145 | } | |
146 | ||
147 | return 0; | |
148 | } | |
149 | ||
150 | int ste_allocate(unsigned long ea) | |
151 | { | |
152 | return __ste_allocate(ea, current->mm); | |
153 | } | |
154 | ||
155 | /* | |
156 | * Do the segment table work for a context switch: flush all user | |
157 | * entries from the table, then preload some probably useful entries | |
158 | * for the new task | |
159 | */ | |
160 | void switch_stab(struct task_struct *tsk, struct mm_struct *mm) | |
161 | { | |
162 | struct stab_entry *stab = (struct stab_entry *) get_paca()->stab_addr; | |
163 | struct stab_entry *ste; | |
164 | unsigned long offset = __get_cpu_var(stab_cache_ptr); | |
165 | unsigned long pc = KSTK_EIP(tsk); | |
166 | unsigned long stack = KSTK_ESP(tsk); | |
167 | unsigned long unmapped_base; | |
168 | ||
169 | /* Force previous translations to complete. DRENG */ | |
170 | asm volatile("isync" : : : "memory"); | |
171 | ||
172 | if (offset <= NR_STAB_CACHE_ENTRIES) { | |
173 | int i; | |
174 | ||
175 | for (i = 0; i < offset; i++) { | |
176 | ste = stab + __get_cpu_var(stab_cache[i]); | |
177 | ste->esid_data = 0; /* invalidate entry */ | |
178 | } | |
179 | } else { | |
180 | unsigned long entry; | |
181 | ||
182 | /* Invalidate all entries. */ | |
183 | ste = stab; | |
184 | ||
185 | /* Never flush the first entry. */ | |
186 | ste += 1; | |
187 | for (entry = 1; | |
3c726f8d | 188 | entry < (HW_PAGE_SIZE / sizeof(struct stab_entry)); |
1da177e4 LT |
189 | entry++, ste++) { |
190 | unsigned long ea; | |
191 | ea = ste->esid_data & ESID_MASK; | |
51fae6de | 192 | if (!is_kernel_addr(ea)) { |
1da177e4 LT |
193 | ste->esid_data = 0; |
194 | } | |
195 | } | |
196 | } | |
197 | ||
198 | asm volatile("sync; slbia; sync":::"memory"); | |
199 | ||
200 | __get_cpu_var(stab_cache_ptr) = 0; | |
201 | ||
202 | /* Now preload some entries for the new task */ | |
203 | if (test_tsk_thread_flag(tsk, TIF_32BIT)) | |
204 | unmapped_base = TASK_UNMAPPED_BASE_USER32; | |
205 | else | |
206 | unmapped_base = TASK_UNMAPPED_BASE_USER64; | |
207 | ||
208 | __ste_allocate(pc, mm); | |
209 | ||
210 | if (GET_ESID(pc) == GET_ESID(stack)) | |
211 | return; | |
212 | ||
213 | __ste_allocate(stack, mm); | |
214 | ||
215 | if ((GET_ESID(pc) == GET_ESID(unmapped_base)) | |
216 | || (GET_ESID(stack) == GET_ESID(unmapped_base))) | |
217 | return; | |
218 | ||
219 | __ste_allocate(unmapped_base, mm); | |
220 | ||
221 | /* Order update */ | |
222 | asm volatile("sync" : : : "memory"); | |
223 | } | |
224 | ||
533f0817 DG |
225 | /* |
226 | * Allocate segment tables for secondary CPUs. These must all go in | |
227 | * the first (bolted) segment, so that do_stab_bolted won't get a | |
228 | * recursive segment miss on the segment table itself. | |
229 | */ | |
0108d3fe | 230 | void __init stabs_alloc(void) |
533f0817 DG |
231 | { |
232 | int cpu; | |
233 | ||
234 | if (cpu_has_feature(CPU_FTR_SLB)) | |
235 | return; | |
236 | ||
0e551954 | 237 | for_each_possible_cpu(cpu) { |
533f0817 DG |
238 | unsigned long newstab; |
239 | ||
240 | if (cpu == 0) | |
241 | continue; /* stab for CPU 0 is statically allocated */ | |
242 | ||
3c726f8d BH |
243 | newstab = lmb_alloc_base(HW_PAGE_SIZE, HW_PAGE_SIZE, |
244 | 1<<SID_SHIFT); | |
b5666f70 | 245 | newstab = (unsigned long)__va(newstab); |
533f0817 | 246 | |
3c726f8d | 247 | memset((void *)newstab, 0, HW_PAGE_SIZE); |
533f0817 DG |
248 | |
249 | paca[cpu].stab_addr = newstab; | |
250 | paca[cpu].stab_real = virt_to_abs(newstab); | |
799d6046 | 251 | printk(KERN_INFO "Segment table for CPU %d at 0x%lx " |
3c726f8d BH |
252 | "virtual, 0x%lx absolute\n", |
253 | cpu, paca[cpu].stab_addr, paca[cpu].stab_real); | |
533f0817 DG |
254 | } |
255 | } | |
256 | ||
1da177e4 LT |
257 | /* |
258 | * Build an entry for the base kernel segment and put it into | |
259 | * the segment table or SLB. All other segment table or SLB | |
260 | * entries are faulted in. | |
261 | */ | |
262 | void stab_initialize(unsigned long stab) | |
263 | { | |
b5666f70 | 264 | unsigned long vsid = get_kernel_vsid(PAGE_OFFSET); |
799d6046 | 265 | unsigned long stabreal; |
1da177e4 | 266 | |
3c726f8d | 267 | asm volatile("isync; slbia; isync":::"memory"); |
b5666f70 | 268 | make_ste(stab, GET_ESID(PAGE_OFFSET), vsid); |
1da177e4 | 269 | |
3c726f8d BH |
270 | /* Order update */ |
271 | asm volatile("sync":::"memory"); | |
799d6046 PM |
272 | |
273 | /* Set ASR */ | |
274 | stabreal = get_paca()->stab_real | 0x1ul; | |
275 | ||
276 | #ifdef CONFIG_PPC_ISERIES | |
277 | if (firmware_has_feature(FW_FEATURE_ISERIES)) { | |
278 | HvCall1(HvCallBaseSetASR, stabreal); | |
279 | return; | |
280 | } | |
281 | #endif /* CONFIG_PPC_ISERIES */ | |
ca507eaf | 282 | |
799d6046 | 283 | mtspr(SPRN_ASR, stabreal); |
1da177e4 | 284 | } |