]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | |
3 | * License. See the file "COPYING" in the main directory of this archive | |
4 | * for more details. | |
5 | * | |
6 | * Synthesize TLB refill handlers at runtime. | |
7 | * | |
8 | * Copyright (C) 2004,2005 by Thiemo Seufer | |
fded2e50 | 9 | * Copyright (C) 2005 Maciej W. Rozycki |
1da177e4 LT |
10 | */ |
11 | ||
12 | #include <stdarg.h> | |
13 | ||
14 | #include <linux/config.h> | |
15 | #include <linux/mm.h> | |
16 | #include <linux/kernel.h> | |
17 | #include <linux/types.h> | |
18 | #include <linux/string.h> | |
19 | #include <linux/init.h> | |
20 | ||
21 | #include <asm/pgtable.h> | |
22 | #include <asm/cacheflush.h> | |
23 | #include <asm/mmu_context.h> | |
24 | #include <asm/inst.h> | |
25 | #include <asm/elf.h> | |
26 | #include <asm/smp.h> | |
27 | #include <asm/war.h> | |
28 | ||
29 | /* #define DEBUG_TLB */ | |
30 | ||
31 | static __init int __attribute__((unused)) r45k_bvahwbug(void) | |
32 | { | |
33 | /* XXX: We should probe for the presence of this bug, but we don't. */ | |
34 | return 0; | |
35 | } | |
36 | ||
37 | static __init int __attribute__((unused)) r4k_250MHZhwbug(void) | |
38 | { | |
39 | /* XXX: We should probe for the presence of this bug, but we don't. */ | |
40 | return 0; | |
41 | } | |
42 | ||
43 | static __init int __attribute__((unused)) bcm1250_m3_war(void) | |
44 | { | |
45 | return BCM1250_M3_WAR; | |
46 | } | |
47 | ||
48 | static __init int __attribute__((unused)) r10000_llsc_war(void) | |
49 | { | |
50 | return R10000_LLSC_WAR; | |
51 | } | |
52 | ||
53 | /* | |
54 | * A little micro-assembler, intended for TLB refill handler | |
55 | * synthesizing. It is intentionally kept simple, does only support | |
56 | * a subset of instructions, and does not try to hide pipeline effects | |
57 | * like branch delay slots. | |
58 | */ | |
59 | ||
60 | enum fields | |
61 | { | |
62 | RS = 0x001, | |
63 | RT = 0x002, | |
64 | RD = 0x004, | |
65 | RE = 0x008, | |
66 | SIMM = 0x010, | |
67 | UIMM = 0x020, | |
68 | BIMM = 0x040, | |
69 | JIMM = 0x080, | |
70 | FUNC = 0x100, | |
71 | }; | |
72 | ||
73 | #define OP_MASK 0x2f | |
74 | #define OP_SH 26 | |
75 | #define RS_MASK 0x1f | |
76 | #define RS_SH 21 | |
77 | #define RT_MASK 0x1f | |
78 | #define RT_SH 16 | |
79 | #define RD_MASK 0x1f | |
80 | #define RD_SH 11 | |
81 | #define RE_MASK 0x1f | |
82 | #define RE_SH 6 | |
83 | #define IMM_MASK 0xffff | |
84 | #define IMM_SH 0 | |
85 | #define JIMM_MASK 0x3ffffff | |
86 | #define JIMM_SH 0 | |
87 | #define FUNC_MASK 0x2f | |
88 | #define FUNC_SH 0 | |
89 | ||
90 | enum opcode { | |
91 | insn_invalid, | |
92 | insn_addu, insn_addiu, insn_and, insn_andi, insn_beq, | |
93 | insn_beql, insn_bgez, insn_bgezl, insn_bltz, insn_bltzl, | |
94 | insn_bne, insn_daddu, insn_daddiu, insn_dmfc0, insn_dmtc0, | |
1b3a6e97 | 95 | insn_dsll, insn_dsll32, insn_dsra, insn_dsrl, |
1da177e4 LT |
96 | insn_dsubu, insn_eret, insn_j, insn_jal, insn_jr, insn_ld, |
97 | insn_ll, insn_lld, insn_lui, insn_lw, insn_mfc0, insn_mtc0, | |
98 | insn_ori, insn_rfe, insn_sc, insn_scd, insn_sd, insn_sll, | |
99 | insn_sra, insn_srl, insn_subu, insn_sw, insn_tlbp, insn_tlbwi, | |
100 | insn_tlbwr, insn_xor, insn_xori | |
101 | }; | |
102 | ||
103 | struct insn { | |
104 | enum opcode opcode; | |
105 | u32 match; | |
106 | enum fields fields; | |
107 | }; | |
108 | ||
109 | /* This macro sets the non-variable bits of an instruction. */ | |
110 | #define M(a, b, c, d, e, f) \ | |
111 | ((a) << OP_SH \ | |
112 | | (b) << RS_SH \ | |
113 | | (c) << RT_SH \ | |
114 | | (d) << RD_SH \ | |
115 | | (e) << RE_SH \ | |
116 | | (f) << FUNC_SH) | |
117 | ||
118 | static __initdata struct insn insn_table[] = { | |
119 | { insn_addiu, M(addiu_op,0,0,0,0,0), RS | RT | SIMM }, | |
120 | { insn_addu, M(spec_op,0,0,0,0,addu_op), RS | RT | RD }, | |
121 | { insn_and, M(spec_op,0,0,0,0,and_op), RS | RT | RD }, | |
122 | { insn_andi, M(andi_op,0,0,0,0,0), RS | RT | UIMM }, | |
123 | { insn_beq, M(beq_op,0,0,0,0,0), RS | RT | BIMM }, | |
124 | { insn_beql, M(beql_op,0,0,0,0,0), RS | RT | BIMM }, | |
125 | { insn_bgez, M(bcond_op,0,bgez_op,0,0,0), RS | BIMM }, | |
126 | { insn_bgezl, M(bcond_op,0,bgezl_op,0,0,0), RS | BIMM }, | |
127 | { insn_bltz, M(bcond_op,0,bltz_op,0,0,0), RS | BIMM }, | |
128 | { insn_bltzl, M(bcond_op,0,bltzl_op,0,0,0), RS | BIMM }, | |
129 | { insn_bne, M(bne_op,0,0,0,0,0), RS | RT | BIMM }, | |
130 | { insn_daddiu, M(daddiu_op,0,0,0,0,0), RS | RT | SIMM }, | |
131 | { insn_daddu, M(spec_op,0,0,0,0,daddu_op), RS | RT | RD }, | |
132 | { insn_dmfc0, M(cop0_op,dmfc_op,0,0,0,0), RT | RD }, | |
133 | { insn_dmtc0, M(cop0_op,dmtc_op,0,0,0,0), RT | RD }, | |
134 | { insn_dsll, M(spec_op,0,0,0,0,dsll_op), RT | RD | RE }, | |
135 | { insn_dsll32, M(spec_op,0,0,0,0,dsll32_op), RT | RD | RE }, | |
136 | { insn_dsra, M(spec_op,0,0,0,0,dsra_op), RT | RD | RE }, | |
137 | { insn_dsrl, M(spec_op,0,0,0,0,dsrl_op), RT | RD | RE }, | |
1da177e4 LT |
138 | { insn_dsubu, M(spec_op,0,0,0,0,dsubu_op), RS | RT | RD }, |
139 | { insn_eret, M(cop0_op,cop_op,0,0,0,eret_op), 0 }, | |
140 | { insn_j, M(j_op,0,0,0,0,0), JIMM }, | |
141 | { insn_jal, M(jal_op,0,0,0,0,0), JIMM }, | |
142 | { insn_jr, M(spec_op,0,0,0,0,jr_op), RS }, | |
143 | { insn_ld, M(ld_op,0,0,0,0,0), RS | RT | SIMM }, | |
144 | { insn_ll, M(ll_op,0,0,0,0,0), RS | RT | SIMM }, | |
145 | { insn_lld, M(lld_op,0,0,0,0,0), RS | RT | SIMM }, | |
146 | { insn_lui, M(lui_op,0,0,0,0,0), RT | SIMM }, | |
147 | { insn_lw, M(lw_op,0,0,0,0,0), RS | RT | SIMM }, | |
148 | { insn_mfc0, M(cop0_op,mfc_op,0,0,0,0), RT | RD }, | |
149 | { insn_mtc0, M(cop0_op,mtc_op,0,0,0,0), RT | RD }, | |
150 | { insn_ori, M(ori_op,0,0,0,0,0), RS | RT | UIMM }, | |
151 | { insn_rfe, M(cop0_op,cop_op,0,0,0,rfe_op), 0 }, | |
152 | { insn_sc, M(sc_op,0,0,0,0,0), RS | RT | SIMM }, | |
153 | { insn_scd, M(scd_op,0,0,0,0,0), RS | RT | SIMM }, | |
154 | { insn_sd, M(sd_op,0,0,0,0,0), RS | RT | SIMM }, | |
155 | { insn_sll, M(spec_op,0,0,0,0,sll_op), RT | RD | RE }, | |
156 | { insn_sra, M(spec_op,0,0,0,0,sra_op), RT | RD | RE }, | |
157 | { insn_srl, M(spec_op,0,0,0,0,srl_op), RT | RD | RE }, | |
158 | { insn_subu, M(spec_op,0,0,0,0,subu_op), RS | RT | RD }, | |
159 | { insn_sw, M(sw_op,0,0,0,0,0), RS | RT | SIMM }, | |
160 | { insn_tlbp, M(cop0_op,cop_op,0,0,0,tlbp_op), 0 }, | |
161 | { insn_tlbwi, M(cop0_op,cop_op,0,0,0,tlbwi_op), 0 }, | |
162 | { insn_tlbwr, M(cop0_op,cop_op,0,0,0,tlbwr_op), 0 }, | |
163 | { insn_xor, M(spec_op,0,0,0,0,xor_op), RS | RT | RD }, | |
164 | { insn_xori, M(xori_op,0,0,0,0,0), RS | RT | UIMM }, | |
165 | { insn_invalid, 0, 0 } | |
166 | }; | |
167 | ||
168 | #undef M | |
169 | ||
170 | static __init u32 build_rs(u32 arg) | |
171 | { | |
172 | if (arg & ~RS_MASK) | |
173 | printk(KERN_WARNING "TLB synthesizer field overflow\n"); | |
174 | ||
175 | return (arg & RS_MASK) << RS_SH; | |
176 | } | |
177 | ||
178 | static __init u32 build_rt(u32 arg) | |
179 | { | |
180 | if (arg & ~RT_MASK) | |
181 | printk(KERN_WARNING "TLB synthesizer field overflow\n"); | |
182 | ||
183 | return (arg & RT_MASK) << RT_SH; | |
184 | } | |
185 | ||
186 | static __init u32 build_rd(u32 arg) | |
187 | { | |
188 | if (arg & ~RD_MASK) | |
189 | printk(KERN_WARNING "TLB synthesizer field overflow\n"); | |
190 | ||
191 | return (arg & RD_MASK) << RD_SH; | |
192 | } | |
193 | ||
194 | static __init u32 build_re(u32 arg) | |
195 | { | |
196 | if (arg & ~RE_MASK) | |
197 | printk(KERN_WARNING "TLB synthesizer field overflow\n"); | |
198 | ||
199 | return (arg & RE_MASK) << RE_SH; | |
200 | } | |
201 | ||
202 | static __init u32 build_simm(s32 arg) | |
203 | { | |
204 | if (arg > 0x7fff || arg < -0x8000) | |
205 | printk(KERN_WARNING "TLB synthesizer field overflow\n"); | |
206 | ||
207 | return arg & 0xffff; | |
208 | } | |
209 | ||
210 | static __init u32 build_uimm(u32 arg) | |
211 | { | |
212 | if (arg & ~IMM_MASK) | |
213 | printk(KERN_WARNING "TLB synthesizer field overflow\n"); | |
214 | ||
215 | return arg & IMM_MASK; | |
216 | } | |
217 | ||
218 | static __init u32 build_bimm(s32 arg) | |
219 | { | |
220 | if (arg > 0x1ffff || arg < -0x20000) | |
221 | printk(KERN_WARNING "TLB synthesizer field overflow\n"); | |
222 | ||
223 | if (arg & 0x3) | |
224 | printk(KERN_WARNING "Invalid TLB synthesizer branch target\n"); | |
225 | ||
226 | return ((arg < 0) ? (1 << 15) : 0) | ((arg >> 2) & 0x7fff); | |
227 | } | |
228 | ||
229 | static __init u32 build_jimm(u32 arg) | |
230 | { | |
231 | if (arg & ~((JIMM_MASK) << 2)) | |
232 | printk(KERN_WARNING "TLB synthesizer field overflow\n"); | |
233 | ||
234 | return (arg >> 2) & JIMM_MASK; | |
235 | } | |
236 | ||
237 | static __init u32 build_func(u32 arg) | |
238 | { | |
239 | if (arg & ~FUNC_MASK) | |
240 | printk(KERN_WARNING "TLB synthesizer field overflow\n"); | |
241 | ||
242 | return arg & FUNC_MASK; | |
243 | } | |
244 | ||
245 | /* | |
246 | * The order of opcode arguments is implicitly left to right, | |
247 | * starting with RS and ending with FUNC or IMM. | |
248 | */ | |
249 | static void __init build_insn(u32 **buf, enum opcode opc, ...) | |
250 | { | |
251 | struct insn *ip = NULL; | |
252 | unsigned int i; | |
253 | va_list ap; | |
254 | u32 op; | |
255 | ||
256 | for (i = 0; insn_table[i].opcode != insn_invalid; i++) | |
257 | if (insn_table[i].opcode == opc) { | |
258 | ip = &insn_table[i]; | |
259 | break; | |
260 | } | |
261 | ||
262 | if (!ip) | |
263 | panic("Unsupported TLB synthesizer instruction %d", opc); | |
264 | ||
265 | op = ip->match; | |
266 | va_start(ap, opc); | |
267 | if (ip->fields & RS) op |= build_rs(va_arg(ap, u32)); | |
268 | if (ip->fields & RT) op |= build_rt(va_arg(ap, u32)); | |
269 | if (ip->fields & RD) op |= build_rd(va_arg(ap, u32)); | |
270 | if (ip->fields & RE) op |= build_re(va_arg(ap, u32)); | |
271 | if (ip->fields & SIMM) op |= build_simm(va_arg(ap, s32)); | |
272 | if (ip->fields & UIMM) op |= build_uimm(va_arg(ap, u32)); | |
273 | if (ip->fields & BIMM) op |= build_bimm(va_arg(ap, s32)); | |
274 | if (ip->fields & JIMM) op |= build_jimm(va_arg(ap, u32)); | |
275 | if (ip->fields & FUNC) op |= build_func(va_arg(ap, u32)); | |
276 | va_end(ap); | |
277 | ||
278 | **buf = op; | |
279 | (*buf)++; | |
280 | } | |
281 | ||
282 | #define I_u1u2u3(op) \ | |
283 | static inline void i##op(u32 **buf, unsigned int a, \ | |
284 | unsigned int b, unsigned int c) \ | |
285 | { \ | |
286 | build_insn(buf, insn##op, a, b, c); \ | |
287 | } | |
288 | ||
289 | #define I_u2u1u3(op) \ | |
290 | static inline void i##op(u32 **buf, unsigned int a, \ | |
291 | unsigned int b, unsigned int c) \ | |
292 | { \ | |
293 | build_insn(buf, insn##op, b, a, c); \ | |
294 | } | |
295 | ||
296 | #define I_u3u1u2(op) \ | |
297 | static inline void i##op(u32 **buf, unsigned int a, \ | |
298 | unsigned int b, unsigned int c) \ | |
299 | { \ | |
300 | build_insn(buf, insn##op, b, c, a); \ | |
301 | } | |
302 | ||
303 | #define I_u1u2s3(op) \ | |
304 | static inline void i##op(u32 **buf, unsigned int a, \ | |
305 | unsigned int b, signed int c) \ | |
306 | { \ | |
307 | build_insn(buf, insn##op, a, b, c); \ | |
308 | } | |
309 | ||
310 | #define I_u2s3u1(op) \ | |
311 | static inline void i##op(u32 **buf, unsigned int a, \ | |
312 | signed int b, unsigned int c) \ | |
313 | { \ | |
314 | build_insn(buf, insn##op, c, a, b); \ | |
315 | } | |
316 | ||
317 | #define I_u2u1s3(op) \ | |
318 | static inline void i##op(u32 **buf, unsigned int a, \ | |
319 | unsigned int b, signed int c) \ | |
320 | { \ | |
321 | build_insn(buf, insn##op, b, a, c); \ | |
322 | } | |
323 | ||
324 | #define I_u1u2(op) \ | |
325 | static inline void i##op(u32 **buf, unsigned int a, \ | |
326 | unsigned int b) \ | |
327 | { \ | |
328 | build_insn(buf, insn##op, a, b); \ | |
329 | } | |
330 | ||
331 | #define I_u1s2(op) \ | |
332 | static inline void i##op(u32 **buf, unsigned int a, \ | |
333 | signed int b) \ | |
334 | { \ | |
335 | build_insn(buf, insn##op, a, b); \ | |
336 | } | |
337 | ||
338 | #define I_u1(op) \ | |
339 | static inline void i##op(u32 **buf, unsigned int a) \ | |
340 | { \ | |
341 | build_insn(buf, insn##op, a); \ | |
342 | } | |
343 | ||
344 | #define I_0(op) \ | |
345 | static inline void i##op(u32 **buf) \ | |
346 | { \ | |
347 | build_insn(buf, insn##op); \ | |
348 | } | |
349 | ||
350 | I_u2u1s3(_addiu); | |
351 | I_u3u1u2(_addu); | |
352 | I_u2u1u3(_andi); | |
353 | I_u3u1u2(_and); | |
354 | I_u1u2s3(_beq); | |
355 | I_u1u2s3(_beql); | |
356 | I_u1s2(_bgez); | |
357 | I_u1s2(_bgezl); | |
358 | I_u1s2(_bltz); | |
359 | I_u1s2(_bltzl); | |
360 | I_u1u2s3(_bne); | |
361 | I_u1u2(_dmfc0); | |
362 | I_u1u2(_dmtc0); | |
363 | I_u2u1s3(_daddiu); | |
364 | I_u3u1u2(_daddu); | |
365 | I_u2u1u3(_dsll); | |
366 | I_u2u1u3(_dsll32); | |
367 | I_u2u1u3(_dsra); | |
368 | I_u2u1u3(_dsrl); | |
1da177e4 LT |
369 | I_u3u1u2(_dsubu); |
370 | I_0(_eret); | |
371 | I_u1(_j); | |
372 | I_u1(_jal); | |
373 | I_u1(_jr); | |
374 | I_u2s3u1(_ld); | |
375 | I_u2s3u1(_ll); | |
376 | I_u2s3u1(_lld); | |
377 | I_u1s2(_lui); | |
378 | I_u2s3u1(_lw); | |
379 | I_u1u2(_mfc0); | |
380 | I_u1u2(_mtc0); | |
381 | I_u2u1u3(_ori); | |
382 | I_0(_rfe); | |
383 | I_u2s3u1(_sc); | |
384 | I_u2s3u1(_scd); | |
385 | I_u2s3u1(_sd); | |
386 | I_u2u1u3(_sll); | |
387 | I_u2u1u3(_sra); | |
388 | I_u2u1u3(_srl); | |
389 | I_u3u1u2(_subu); | |
390 | I_u2s3u1(_sw); | |
391 | I_0(_tlbp); | |
392 | I_0(_tlbwi); | |
393 | I_0(_tlbwr); | |
394 | I_u3u1u2(_xor) | |
395 | I_u2u1u3(_xori); | |
396 | ||
397 | /* | |
398 | * handling labels | |
399 | */ | |
400 | ||
401 | enum label_id { | |
402 | label_invalid, | |
403 | label_second_part, | |
404 | label_leave, | |
405 | label_vmalloc, | |
406 | label_vmalloc_done, | |
407 | label_tlbw_hazard, | |
408 | label_split, | |
409 | label_nopage_tlbl, | |
410 | label_nopage_tlbs, | |
411 | label_nopage_tlbm, | |
412 | label_smp_pgtable_change, | |
413 | label_r3000_write_probe_fail, | |
1da177e4 LT |
414 | }; |
415 | ||
416 | struct label { | |
417 | u32 *addr; | |
418 | enum label_id lab; | |
419 | }; | |
420 | ||
421 | static __init void build_label(struct label **lab, u32 *addr, | |
422 | enum label_id l) | |
423 | { | |
424 | (*lab)->addr = addr; | |
425 | (*lab)->lab = l; | |
426 | (*lab)++; | |
427 | } | |
428 | ||
429 | #define L_LA(lb) \ | |
430 | static inline void l##lb(struct label **lab, u32 *addr) \ | |
431 | { \ | |
432 | build_label(lab, addr, label##lb); \ | |
433 | } | |
434 | ||
435 | L_LA(_second_part) | |
436 | L_LA(_leave) | |
437 | L_LA(_vmalloc) | |
438 | L_LA(_vmalloc_done) | |
439 | L_LA(_tlbw_hazard) | |
440 | L_LA(_split) | |
441 | L_LA(_nopage_tlbl) | |
442 | L_LA(_nopage_tlbs) | |
443 | L_LA(_nopage_tlbm) | |
444 | L_LA(_smp_pgtable_change) | |
445 | L_LA(_r3000_write_probe_fail) | |
1da177e4 LT |
446 | |
447 | /* convenience macros for instructions */ | |
875d43e7 | 448 | #ifdef CONFIG_64BIT |
1da177e4 LT |
449 | # define i_LW(buf, rs, rt, off) i_ld(buf, rs, rt, off) |
450 | # define i_SW(buf, rs, rt, off) i_sd(buf, rs, rt, off) | |
451 | # define i_SLL(buf, rs, rt, sh) i_dsll(buf, rs, rt, sh) | |
452 | # define i_SRA(buf, rs, rt, sh) i_dsra(buf, rs, rt, sh) | |
453 | # define i_SRL(buf, rs, rt, sh) i_dsrl(buf, rs, rt, sh) | |
454 | # define i_MFC0(buf, rt, rd) i_dmfc0(buf, rt, rd) | |
455 | # define i_MTC0(buf, rt, rd) i_dmtc0(buf, rt, rd) | |
456 | # define i_ADDIU(buf, rs, rt, val) i_daddiu(buf, rs, rt, val) | |
457 | # define i_ADDU(buf, rs, rt, rd) i_daddu(buf, rs, rt, rd) | |
458 | # define i_SUBU(buf, rs, rt, rd) i_dsubu(buf, rs, rt, rd) | |
459 | # define i_LL(buf, rs, rt, off) i_lld(buf, rs, rt, off) | |
460 | # define i_SC(buf, rs, rt, off) i_scd(buf, rs, rt, off) | |
461 | #else | |
462 | # define i_LW(buf, rs, rt, off) i_lw(buf, rs, rt, off) | |
463 | # define i_SW(buf, rs, rt, off) i_sw(buf, rs, rt, off) | |
464 | # define i_SLL(buf, rs, rt, sh) i_sll(buf, rs, rt, sh) | |
465 | # define i_SRA(buf, rs, rt, sh) i_sra(buf, rs, rt, sh) | |
466 | # define i_SRL(buf, rs, rt, sh) i_srl(buf, rs, rt, sh) | |
467 | # define i_MFC0(buf, rt, rd) i_mfc0(buf, rt, rd) | |
468 | # define i_MTC0(buf, rt, rd) i_mtc0(buf, rt, rd) | |
469 | # define i_ADDIU(buf, rs, rt, val) i_addiu(buf, rs, rt, val) | |
470 | # define i_ADDU(buf, rs, rt, rd) i_addu(buf, rs, rt, rd) | |
471 | # define i_SUBU(buf, rs, rt, rd) i_subu(buf, rs, rt, rd) | |
472 | # define i_LL(buf, rs, rt, off) i_ll(buf, rs, rt, off) | |
473 | # define i_SC(buf, rs, rt, off) i_sc(buf, rs, rt, off) | |
474 | #endif | |
475 | ||
476 | #define i_b(buf, off) i_beq(buf, 0, 0, off) | |
477 | #define i_beqz(buf, rs, off) i_beq(buf, rs, 0, off) | |
478 | #define i_beqzl(buf, rs, off) i_beql(buf, rs, 0, off) | |
479 | #define i_bnez(buf, rs, off) i_bne(buf, rs, 0, off) | |
480 | #define i_bnezl(buf, rs, off) i_bnel(buf, rs, 0, off) | |
481 | #define i_move(buf, a, b) i_ADDU(buf, a, 0, b) | |
482 | #define i_nop(buf) i_sll(buf, 0, 0, 0) | |
483 | #define i_ssnop(buf) i_sll(buf, 0, 0, 1) | |
484 | #define i_ehb(buf) i_sll(buf, 0, 0, 3) | |
485 | ||
875d43e7 | 486 | #ifdef CONFIG_64BIT |
1da177e4 LT |
487 | static __init int __attribute__((unused)) in_compat_space_p(long addr) |
488 | { | |
489 | /* Is this address in 32bit compat space? */ | |
490 | return (((addr) & 0xffffffff00000000) == 0xffffffff00000000); | |
491 | } | |
492 | ||
493 | static __init int __attribute__((unused)) rel_highest(long val) | |
494 | { | |
495 | return ((((val + 0x800080008000L) >> 48) & 0xffff) ^ 0x8000) - 0x8000; | |
496 | } | |
497 | ||
498 | static __init int __attribute__((unused)) rel_higher(long val) | |
499 | { | |
500 | return ((((val + 0x80008000L) >> 32) & 0xffff) ^ 0x8000) - 0x8000; | |
501 | } | |
502 | #endif | |
503 | ||
504 | static __init int rel_hi(long val) | |
505 | { | |
506 | return ((((val + 0x8000L) >> 16) & 0xffff) ^ 0x8000) - 0x8000; | |
507 | } | |
508 | ||
509 | static __init int rel_lo(long val) | |
510 | { | |
511 | return ((val & 0xffff) ^ 0x8000) - 0x8000; | |
512 | } | |
513 | ||
514 | static __init void i_LA_mostly(u32 **buf, unsigned int rs, long addr) | |
515 | { | |
766160c2 | 516 | #ifdef CONFIG_64BIT |
1da177e4 LT |
517 | if (!in_compat_space_p(addr)) { |
518 | i_lui(buf, rs, rel_highest(addr)); | |
519 | if (rel_higher(addr)) | |
520 | i_daddiu(buf, rs, rs, rel_higher(addr)); | |
521 | if (rel_hi(addr)) { | |
522 | i_dsll(buf, rs, rs, 16); | |
523 | i_daddiu(buf, rs, rs, rel_hi(addr)); | |
524 | i_dsll(buf, rs, rs, 16); | |
525 | } else | |
526 | i_dsll32(buf, rs, rs, 0); | |
527 | } else | |
528 | #endif | |
529 | i_lui(buf, rs, rel_hi(addr)); | |
530 | } | |
531 | ||
532 | static __init void __attribute__((unused)) i_LA(u32 **buf, unsigned int rs, | |
533 | long addr) | |
534 | { | |
535 | i_LA_mostly(buf, rs, addr); | |
536 | if (rel_lo(addr)) | |
537 | i_ADDIU(buf, rs, rs, rel_lo(addr)); | |
538 | } | |
539 | ||
540 | /* | |
541 | * handle relocations | |
542 | */ | |
543 | ||
544 | struct reloc { | |
545 | u32 *addr; | |
546 | unsigned int type; | |
547 | enum label_id lab; | |
548 | }; | |
549 | ||
550 | static __init void r_mips_pc16(struct reloc **rel, u32 *addr, | |
551 | enum label_id l) | |
552 | { | |
553 | (*rel)->addr = addr; | |
554 | (*rel)->type = R_MIPS_PC16; | |
555 | (*rel)->lab = l; | |
556 | (*rel)++; | |
557 | } | |
558 | ||
559 | static inline void __resolve_relocs(struct reloc *rel, struct label *lab) | |
560 | { | |
561 | long laddr = (long)lab->addr; | |
562 | long raddr = (long)rel->addr; | |
563 | ||
564 | switch (rel->type) { | |
565 | case R_MIPS_PC16: | |
566 | *rel->addr |= build_bimm(laddr - (raddr + 4)); | |
567 | break; | |
568 | ||
569 | default: | |
570 | panic("Unsupported TLB synthesizer relocation %d", | |
571 | rel->type); | |
572 | } | |
573 | } | |
574 | ||
575 | static __init void resolve_relocs(struct reloc *rel, struct label *lab) | |
576 | { | |
577 | struct label *l; | |
578 | ||
579 | for (; rel->lab != label_invalid; rel++) | |
580 | for (l = lab; l->lab != label_invalid; l++) | |
581 | if (rel->lab == l->lab) | |
582 | __resolve_relocs(rel, l); | |
583 | } | |
584 | ||
585 | static __init void move_relocs(struct reloc *rel, u32 *first, u32 *end, | |
586 | long off) | |
587 | { | |
588 | for (; rel->lab != label_invalid; rel++) | |
589 | if (rel->addr >= first && rel->addr < end) | |
590 | rel->addr += off; | |
591 | } | |
592 | ||
593 | static __init void move_labels(struct label *lab, u32 *first, u32 *end, | |
594 | long off) | |
595 | { | |
596 | for (; lab->lab != label_invalid; lab++) | |
597 | if (lab->addr >= first && lab->addr < end) | |
598 | lab->addr += off; | |
599 | } | |
600 | ||
601 | static __init void copy_handler(struct reloc *rel, struct label *lab, | |
602 | u32 *first, u32 *end, u32 *target) | |
603 | { | |
604 | long off = (long)(target - first); | |
605 | ||
606 | memcpy(target, first, (end - first) * sizeof(u32)); | |
607 | ||
608 | move_relocs(rel, first, end, off); | |
609 | move_labels(lab, first, end, off); | |
610 | } | |
611 | ||
612 | static __init int __attribute__((unused)) insn_has_bdelay(struct reloc *rel, | |
613 | u32 *addr) | |
614 | { | |
615 | for (; rel->lab != label_invalid; rel++) { | |
616 | if (rel->addr == addr | |
617 | && (rel->type == R_MIPS_PC16 | |
618 | || rel->type == R_MIPS_26)) | |
619 | return 1; | |
620 | } | |
621 | ||
622 | return 0; | |
623 | } | |
624 | ||
625 | /* convenience functions for labeled branches */ | |
626 | static void __attribute__((unused)) il_bltz(u32 **p, struct reloc **r, | |
627 | unsigned int reg, enum label_id l) | |
628 | { | |
629 | r_mips_pc16(r, *p, l); | |
630 | i_bltz(p, reg, 0); | |
631 | } | |
632 | ||
633 | static void __attribute__((unused)) il_b(u32 **p, struct reloc **r, | |
634 | enum label_id l) | |
635 | { | |
636 | r_mips_pc16(r, *p, l); | |
637 | i_b(p, 0); | |
638 | } | |
639 | ||
640 | static void il_beqz(u32 **p, struct reloc **r, unsigned int reg, | |
641 | enum label_id l) | |
642 | { | |
643 | r_mips_pc16(r, *p, l); | |
644 | i_beqz(p, reg, 0); | |
645 | } | |
646 | ||
647 | static void __attribute__((unused)) | |
648 | il_beqzl(u32 **p, struct reloc **r, unsigned int reg, enum label_id l) | |
649 | { | |
650 | r_mips_pc16(r, *p, l); | |
651 | i_beqzl(p, reg, 0); | |
652 | } | |
653 | ||
654 | static void il_bnez(u32 **p, struct reloc **r, unsigned int reg, | |
655 | enum label_id l) | |
656 | { | |
657 | r_mips_pc16(r, *p, l); | |
658 | i_bnez(p, reg, 0); | |
659 | } | |
660 | ||
661 | static void il_bgezl(u32 **p, struct reloc **r, unsigned int reg, | |
662 | enum label_id l) | |
663 | { | |
664 | r_mips_pc16(r, *p, l); | |
665 | i_bgezl(p, reg, 0); | |
666 | } | |
667 | ||
668 | /* The only general purpose registers allowed in TLB handlers. */ | |
669 | #define K0 26 | |
670 | #define K1 27 | |
671 | ||
672 | /* Some CP0 registers */ | |
673 | #define C0_INDEX 0 | |
674 | #define C0_ENTRYLO0 2 | |
675 | #define C0_ENTRYLO1 3 | |
676 | #define C0_CONTEXT 4 | |
677 | #define C0_BADVADDR 8 | |
678 | #define C0_ENTRYHI 10 | |
679 | #define C0_EPC 14 | |
680 | #define C0_XCONTEXT 20 | |
681 | ||
875d43e7 | 682 | #ifdef CONFIG_64BIT |
1da177e4 LT |
683 | # define GET_CONTEXT(buf, reg) i_MFC0(buf, reg, C0_XCONTEXT) |
684 | #else | |
685 | # define GET_CONTEXT(buf, reg) i_MFC0(buf, reg, C0_CONTEXT) | |
686 | #endif | |
687 | ||
688 | /* The worst case length of the handler is around 18 instructions for | |
689 | * R3000-style TLBs and up to 63 instructions for R4000-style TLBs. | |
690 | * Maximum space available is 32 instructions for R3000 and 64 | |
691 | * instructions for R4000. | |
692 | * | |
693 | * We deliberately chose a buffer size of 128, so we won't scribble | |
694 | * over anything important on overflow before we panic. | |
695 | */ | |
696 | static __initdata u32 tlb_handler[128]; | |
697 | ||
698 | /* simply assume worst case size for labels and relocs */ | |
699 | static __initdata struct label labels[128]; | |
700 | static __initdata struct reloc relocs[128]; | |
701 | ||
702 | /* | |
703 | * The R3000 TLB handler is simple. | |
704 | */ | |
705 | static void __init build_r3000_tlb_refill_handler(void) | |
706 | { | |
707 | long pgdc = (long)pgd_current; | |
708 | u32 *p; | |
709 | ||
710 | memset(tlb_handler, 0, sizeof(tlb_handler)); | |
711 | p = tlb_handler; | |
712 | ||
713 | i_mfc0(&p, K0, C0_BADVADDR); | |
714 | i_lui(&p, K1, rel_hi(pgdc)); /* cp0 delay */ | |
715 | i_lw(&p, K1, rel_lo(pgdc), K1); | |
716 | i_srl(&p, K0, K0, 22); /* load delay */ | |
717 | i_sll(&p, K0, K0, 2); | |
718 | i_addu(&p, K1, K1, K0); | |
719 | i_mfc0(&p, K0, C0_CONTEXT); | |
720 | i_lw(&p, K1, 0, K1); /* cp0 delay */ | |
721 | i_andi(&p, K0, K0, 0xffc); /* load delay */ | |
722 | i_addu(&p, K1, K1, K0); | |
723 | i_lw(&p, K0, 0, K1); | |
724 | i_nop(&p); /* load delay */ | |
725 | i_mtc0(&p, K0, C0_ENTRYLO0); | |
726 | i_mfc0(&p, K1, C0_EPC); /* cp0 delay */ | |
727 | i_tlbwr(&p); /* cp0 delay */ | |
728 | i_jr(&p, K1); | |
729 | i_rfe(&p); /* branch delay */ | |
730 | ||
731 | if (p > tlb_handler + 32) | |
732 | panic("TLB refill handler space exceeded"); | |
733 | ||
41986a6e | 734 | printk("Synthesized TLB refill handler (%u instructions).\n", |
1da177e4 LT |
735 | (unsigned int)(p - tlb_handler)); |
736 | #ifdef DEBUG_TLB | |
737 | { | |
738 | int i; | |
739 | ||
740 | for (i = 0; i < (p - tlb_handler); i++) | |
741 | printk("%08x\n", tlb_handler[i]); | |
742 | } | |
743 | #endif | |
744 | ||
745 | memcpy((void *)CAC_BASE, tlb_handler, 0x80); | |
746 | flush_icache_range(CAC_BASE, CAC_BASE + 0x80); | |
747 | } | |
748 | ||
749 | /* | |
750 | * The R4000 TLB handler is much more complicated. We have two | |
751 | * consecutive handler areas with 32 instructions space each. | |
752 | * Since they aren't used at the same time, we can overflow in the | |
753 | * other one.To keep things simple, we first assume linear space, | |
754 | * then we relocate it to the final handler layout as needed. | |
755 | */ | |
756 | static __initdata u32 final_handler[64]; | |
757 | ||
758 | /* | |
759 | * Hazards | |
760 | * | |
761 | * From the IDT errata for the QED RM5230 (Nevada), processor revision 1.0: | |
762 | * 2. A timing hazard exists for the TLBP instruction. | |
763 | * | |
764 | * stalling_instruction | |
765 | * TLBP | |
766 | * | |
767 | * The JTLB is being read for the TLBP throughout the stall generated by the | |
768 | * previous instruction. This is not really correct as the stalling instruction | |
769 | * can modify the address used to access the JTLB. The failure symptom is that | |
770 | * the TLBP instruction will use an address created for the stalling instruction | |
771 | * and not the address held in C0_ENHI and thus report the wrong results. | |
772 | * | |
773 | * The software work-around is to not allow the instruction preceding the TLBP | |
774 | * to stall - make it an NOP or some other instruction guaranteed not to stall. | |
775 | * | |
776 | * Errata 2 will not be fixed. This errata is also on the R5000. | |
777 | * | |
778 | * As if we MIPS hackers wouldn't know how to nop pipelines happy ... | |
779 | */ | |
780 | static __init void __attribute__((unused)) build_tlb_probe_entry(u32 **p) | |
781 | { | |
782 | switch (current_cpu_data.cputype) { | |
783 | case CPU_R5000: | |
784 | case CPU_R5000A: | |
785 | case CPU_NEVADA: | |
786 | i_nop(p); | |
787 | i_tlbp(p); | |
788 | break; | |
789 | ||
790 | default: | |
791 | i_tlbp(p); | |
792 | break; | |
793 | } | |
794 | } | |
795 | ||
796 | /* | |
797 | * Write random or indexed TLB entry, and care about the hazards from | |
798 | * the preceeding mtc0 and for the following eret. | |
799 | */ | |
800 | enum tlb_write_entry { tlb_random, tlb_indexed }; | |
801 | ||
802 | static __init void build_tlb_write_entry(u32 **p, struct label **l, | |
803 | struct reloc **r, | |
804 | enum tlb_write_entry wmode) | |
805 | { | |
806 | void(*tlbw)(u32 **) = NULL; | |
807 | ||
808 | switch (wmode) { | |
809 | case tlb_random: tlbw = i_tlbwr; break; | |
810 | case tlb_indexed: tlbw = i_tlbwi; break; | |
811 | } | |
812 | ||
813 | switch (current_cpu_data.cputype) { | |
814 | case CPU_R4000PC: | |
815 | case CPU_R4000SC: | |
816 | case CPU_R4000MC: | |
817 | case CPU_R4400PC: | |
818 | case CPU_R4400SC: | |
819 | case CPU_R4400MC: | |
820 | /* | |
821 | * This branch uses up a mtc0 hazard nop slot and saves | |
822 | * two nops after the tlbw instruction. | |
823 | */ | |
824 | il_bgezl(p, r, 0, label_tlbw_hazard); | |
825 | tlbw(p); | |
826 | l_tlbw_hazard(l, *p); | |
827 | i_nop(p); | |
828 | break; | |
829 | ||
6cbe0631 | 830 | case CPU_R4300: |
1da177e4 LT |
831 | case CPU_R4600: |
832 | case CPU_R4700: | |
833 | case CPU_R5000: | |
834 | case CPU_R5000A: | |
835 | case CPU_5KC: | |
836 | case CPU_TX49XX: | |
837 | case CPU_AU1000: | |
838 | case CPU_AU1100: | |
839 | case CPU_AU1500: | |
840 | case CPU_AU1550: | |
e3ad1c23 | 841 | case CPU_AU1200: |
1da177e4 LT |
842 | i_nop(p); |
843 | tlbw(p); | |
844 | break; | |
845 | ||
846 | case CPU_R10000: | |
847 | case CPU_R12000: | |
848 | case CPU_4KC: | |
849 | case CPU_SB1: | |
850 | case CPU_4KSC: | |
851 | case CPU_20KC: | |
852 | case CPU_25KF: | |
853 | tlbw(p); | |
854 | break; | |
855 | ||
856 | case CPU_NEVADA: | |
857 | i_nop(p); /* QED specifies 2 nops hazard */ | |
858 | /* | |
859 | * This branch uses up a mtc0 hazard nop slot and saves | |
860 | * a nop after the tlbw instruction. | |
861 | */ | |
862 | il_bgezl(p, r, 0, label_tlbw_hazard); | |
863 | tlbw(p); | |
864 | l_tlbw_hazard(l, *p); | |
865 | break; | |
866 | ||
867 | case CPU_RM7000: | |
868 | i_nop(p); | |
869 | i_nop(p); | |
870 | i_nop(p); | |
871 | i_nop(p); | |
872 | tlbw(p); | |
873 | break; | |
874 | ||
875 | case CPU_4KEC: | |
876 | case CPU_24K: | |
877 | i_ehb(p); | |
878 | tlbw(p); | |
879 | break; | |
880 | ||
881 | case CPU_RM9000: | |
882 | /* | |
883 | * When the JTLB is updated by tlbwi or tlbwr, a subsequent | |
884 | * use of the JTLB for instructions should not occur for 4 | |
885 | * cpu cycles and use for data translations should not occur | |
886 | * for 3 cpu cycles. | |
887 | */ | |
888 | i_ssnop(p); | |
889 | i_ssnop(p); | |
890 | i_ssnop(p); | |
891 | i_ssnop(p); | |
892 | tlbw(p); | |
893 | i_ssnop(p); | |
894 | i_ssnop(p); | |
895 | i_ssnop(p); | |
896 | i_ssnop(p); | |
897 | break; | |
898 | ||
899 | case CPU_VR4111: | |
900 | case CPU_VR4121: | |
901 | case CPU_VR4122: | |
902 | case CPU_VR4181: | |
903 | case CPU_VR4181A: | |
904 | i_nop(p); | |
905 | i_nop(p); | |
906 | tlbw(p); | |
907 | i_nop(p); | |
908 | i_nop(p); | |
909 | break; | |
910 | ||
911 | case CPU_VR4131: | |
912 | case CPU_VR4133: | |
913 | i_nop(p); | |
914 | i_nop(p); | |
915 | tlbw(p); | |
916 | break; | |
917 | ||
918 | default: | |
919 | panic("No TLB refill handler yet (CPU type: %d)", | |
920 | current_cpu_data.cputype); | |
921 | break; | |
922 | } | |
923 | } | |
924 | ||
875d43e7 | 925 | #ifdef CONFIG_64BIT |
1da177e4 LT |
926 | /* |
927 | * TMP and PTR are scratch. | |
928 | * TMP will be clobbered, PTR will hold the pmd entry. | |
929 | */ | |
930 | static __init void | |
931 | build_get_pmde64(u32 **p, struct label **l, struct reloc **r, | |
932 | unsigned int tmp, unsigned int ptr) | |
933 | { | |
934 | long pgdc = (long)pgd_current; | |
935 | ||
936 | /* | |
937 | * The vmalloc handling is not in the hotpath. | |
938 | */ | |
939 | i_dmfc0(p, tmp, C0_BADVADDR); | |
940 | il_bltz(p, r, tmp, label_vmalloc); | |
941 | /* No i_nop needed here, since the next insn doesn't touch TMP. */ | |
942 | ||
943 | #ifdef CONFIG_SMP | |
1b3a6e97 | 944 | # ifdef CONFIG_BUILD_ELF64 |
1da177e4 | 945 | /* |
1b3a6e97 | 946 | * 64 bit SMP running in XKPHYS has smp_processor_id() << 3 |
1da177e4 LT |
947 | * stored in CONTEXT. |
948 | */ | |
1b3a6e97 TS |
949 | i_dmfc0(p, ptr, C0_CONTEXT); |
950 | i_dsrl(p, ptr, ptr, 23); | |
951 | i_LA_mostly(p, tmp, pgdc); | |
952 | i_daddu(p, ptr, ptr, tmp); | |
953 | i_dmfc0(p, tmp, C0_BADVADDR); | |
954 | i_ld(p, ptr, rel_lo(pgdc), ptr); | |
955 | # else | |
956 | /* | |
957 | * 64 bit SMP running in compat space has the lower part of | |
958 | * &pgd_current[smp_processor_id()] stored in CONTEXT. | |
959 | */ | |
960 | if (!in_compat_space_p(pgdc)) | |
961 | panic("Invalid page directory address!"); | |
962 | ||
963 | i_dmfc0(p, ptr, C0_CONTEXT); | |
964 | i_dsra(p, ptr, ptr, 23); | |
965 | i_ld(p, ptr, 0, ptr); | |
966 | # endif | |
1da177e4 LT |
967 | #else |
968 | i_LA_mostly(p, ptr, pgdc); | |
969 | i_ld(p, ptr, rel_lo(pgdc), ptr); | |
970 | #endif | |
971 | ||
972 | l_vmalloc_done(l, *p); | |
973 | i_dsrl(p, tmp, tmp, PGDIR_SHIFT-3); /* get pgd offset in bytes */ | |
974 | i_andi(p, tmp, tmp, (PTRS_PER_PGD - 1)<<3); | |
975 | i_daddu(p, ptr, ptr, tmp); /* add in pgd offset */ | |
976 | i_dmfc0(p, tmp, C0_BADVADDR); /* get faulting address */ | |
977 | i_ld(p, ptr, 0, ptr); /* get pmd pointer */ | |
978 | i_dsrl(p, tmp, tmp, PMD_SHIFT-3); /* get pmd offset in bytes */ | |
979 | i_andi(p, tmp, tmp, (PTRS_PER_PMD - 1)<<3); | |
980 | i_daddu(p, ptr, ptr, tmp); /* add in pmd offset */ | |
981 | } | |
982 | ||
983 | /* | |
984 | * BVADDR is the faulting address, PTR is scratch. | |
985 | * PTR will hold the pgd for vmalloc. | |
986 | */ | |
987 | static __init void | |
988 | build_get_pgd_vmalloc64(u32 **p, struct label **l, struct reloc **r, | |
989 | unsigned int bvaddr, unsigned int ptr) | |
990 | { | |
991 | long swpd = (long)swapper_pg_dir; | |
992 | ||
993 | l_vmalloc(l, *p); | |
994 | i_LA(p, ptr, VMALLOC_START); | |
995 | i_dsubu(p, bvaddr, bvaddr, ptr); | |
996 | ||
997 | if (in_compat_space_p(swpd) && !rel_lo(swpd)) { | |
998 | il_b(p, r, label_vmalloc_done); | |
999 | i_lui(p, ptr, rel_hi(swpd)); | |
1000 | } else { | |
1001 | i_LA_mostly(p, ptr, swpd); | |
1002 | il_b(p, r, label_vmalloc_done); | |
1003 | i_daddiu(p, ptr, ptr, rel_lo(swpd)); | |
1004 | } | |
1005 | } | |
1006 | ||
875d43e7 | 1007 | #else /* !CONFIG_64BIT */ |
1da177e4 LT |
1008 | |
1009 | /* | |
1010 | * TMP and PTR are scratch. | |
1011 | * TMP will be clobbered, PTR will hold the pgd entry. | |
1012 | */ | |
1013 | static __init void __attribute__((unused)) | |
1014 | build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr) | |
1015 | { | |
1016 | long pgdc = (long)pgd_current; | |
1017 | ||
1018 | /* 32 bit SMP has smp_processor_id() stored in CONTEXT. */ | |
1019 | #ifdef CONFIG_SMP | |
1020 | i_mfc0(p, ptr, C0_CONTEXT); | |
1021 | i_LA_mostly(p, tmp, pgdc); | |
1022 | i_srl(p, ptr, ptr, 23); | |
1da177e4 LT |
1023 | i_addu(p, ptr, tmp, ptr); |
1024 | #else | |
1025 | i_LA_mostly(p, ptr, pgdc); | |
1026 | #endif | |
1027 | i_mfc0(p, tmp, C0_BADVADDR); /* get faulting address */ | |
1028 | i_lw(p, ptr, rel_lo(pgdc), ptr); | |
1029 | i_srl(p, tmp, tmp, PGDIR_SHIFT); /* get pgd only bits */ | |
1030 | i_sll(p, tmp, tmp, PGD_T_LOG2); | |
1031 | i_addu(p, ptr, ptr, tmp); /* add in pgd offset */ | |
1032 | } | |
1033 | ||
875d43e7 | 1034 | #endif /* !CONFIG_64BIT */ |
1da177e4 LT |
1035 | |
1036 | static __init void build_adjust_context(u32 **p, unsigned int ctx) | |
1037 | { | |
1038 | unsigned int shift = 4 - (PTE_T_LOG2 + 1); | |
1039 | unsigned int mask = (PTRS_PER_PTE / 2 - 1) << (PTE_T_LOG2 + 1); | |
1040 | ||
1041 | switch (current_cpu_data.cputype) { | |
1042 | case CPU_VR41XX: | |
1043 | case CPU_VR4111: | |
1044 | case CPU_VR4121: | |
1045 | case CPU_VR4122: | |
1046 | case CPU_VR4131: | |
1047 | case CPU_VR4181: | |
1048 | case CPU_VR4181A: | |
1049 | case CPU_VR4133: | |
1050 | shift += 2; | |
1051 | break; | |
1052 | ||
1053 | default: | |
1054 | break; | |
1055 | } | |
1056 | ||
1057 | if (shift) | |
1058 | i_SRL(p, ctx, ctx, shift); | |
1059 | i_andi(p, ctx, ctx, mask); | |
1060 | } | |
1061 | ||
1062 | static __init void build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr) | |
1063 | { | |
1064 | /* | |
1065 | * Bug workaround for the Nevada. It seems as if under certain | |
1066 | * circumstances the move from cp0_context might produce a | |
1067 | * bogus result when the mfc0 instruction and its consumer are | |
1068 | * in a different cacheline or a load instruction, probably any | |
1069 | * memory reference, is between them. | |
1070 | */ | |
1071 | switch (current_cpu_data.cputype) { | |
1072 | case CPU_NEVADA: | |
1073 | i_LW(p, ptr, 0, ptr); | |
1074 | GET_CONTEXT(p, tmp); /* get context reg */ | |
1075 | break; | |
1076 | ||
1077 | default: | |
1078 | GET_CONTEXT(p, tmp); /* get context reg */ | |
1079 | i_LW(p, ptr, 0, ptr); | |
1080 | break; | |
1081 | } | |
1082 | ||
1083 | build_adjust_context(p, tmp); | |
1084 | i_ADDU(p, ptr, ptr, tmp); /* add in offset */ | |
1085 | } | |
1086 | ||
1087 | static __init void build_update_entries(u32 **p, unsigned int tmp, | |
1088 | unsigned int ptep) | |
1089 | { | |
1090 | /* | |
1091 | * 64bit address support (36bit on a 32bit CPU) in a 32bit | |
1092 | * Kernel is a special case. Only a few CPUs use it. | |
1093 | */ | |
1094 | #ifdef CONFIG_64BIT_PHYS_ADDR | |
1095 | if (cpu_has_64bits) { | |
1096 | i_ld(p, tmp, 0, ptep); /* get even pte */ | |
1097 | i_ld(p, ptep, sizeof(pte_t), ptep); /* get odd pte */ | |
1098 | i_dsrl(p, tmp, tmp, 6); /* convert to entrylo0 */ | |
1099 | i_mtc0(p, tmp, C0_ENTRYLO0); /* load it */ | |
1100 | i_dsrl(p, ptep, ptep, 6); /* convert to entrylo1 */ | |
1101 | i_mtc0(p, ptep, C0_ENTRYLO1); /* load it */ | |
1102 | } else { | |
1103 | int pte_off_even = sizeof(pte_t) / 2; | |
1104 | int pte_off_odd = pte_off_even + sizeof(pte_t); | |
1105 | ||
1106 | /* The pte entries are pre-shifted */ | |
1107 | i_lw(p, tmp, pte_off_even, ptep); /* get even pte */ | |
1108 | i_mtc0(p, tmp, C0_ENTRYLO0); /* load it */ | |
1109 | i_lw(p, ptep, pte_off_odd, ptep); /* get odd pte */ | |
1110 | i_mtc0(p, ptep, C0_ENTRYLO1); /* load it */ | |
1111 | } | |
1112 | #else | |
1113 | i_LW(p, tmp, 0, ptep); /* get even pte */ | |
1114 | i_LW(p, ptep, sizeof(pte_t), ptep); /* get odd pte */ | |
1115 | if (r45k_bvahwbug()) | |
1116 | build_tlb_probe_entry(p); | |
1117 | i_SRL(p, tmp, tmp, 6); /* convert to entrylo0 */ | |
1118 | if (r4k_250MHZhwbug()) | |
1119 | i_mtc0(p, 0, C0_ENTRYLO0); | |
1120 | i_mtc0(p, tmp, C0_ENTRYLO0); /* load it */ | |
1121 | i_SRL(p, ptep, ptep, 6); /* convert to entrylo1 */ | |
1122 | if (r45k_bvahwbug()) | |
1123 | i_mfc0(p, tmp, C0_INDEX); | |
1124 | if (r4k_250MHZhwbug()) | |
1125 | i_mtc0(p, 0, C0_ENTRYLO1); | |
1126 | i_mtc0(p, ptep, C0_ENTRYLO1); /* load it */ | |
1127 | #endif | |
1128 | } | |
1129 | ||
1130 | static void __init build_r4000_tlb_refill_handler(void) | |
1131 | { | |
1132 | u32 *p = tlb_handler; | |
1133 | struct label *l = labels; | |
1134 | struct reloc *r = relocs; | |
1135 | u32 *f; | |
1136 | unsigned int final_len; | |
1137 | ||
1138 | memset(tlb_handler, 0, sizeof(tlb_handler)); | |
1139 | memset(labels, 0, sizeof(labels)); | |
1140 | memset(relocs, 0, sizeof(relocs)); | |
1141 | memset(final_handler, 0, sizeof(final_handler)); | |
1142 | ||
1143 | /* | |
1144 | * create the plain linear handler | |
1145 | */ | |
1146 | if (bcm1250_m3_war()) { | |
1147 | i_MFC0(&p, K0, C0_BADVADDR); | |
1148 | i_MFC0(&p, K1, C0_ENTRYHI); | |
1149 | i_xor(&p, K0, K0, K1); | |
1150 | i_SRL(&p, K0, K0, PAGE_SHIFT + 1); | |
1151 | il_bnez(&p, &r, K0, label_leave); | |
1152 | /* No need for i_nop */ | |
1153 | } | |
1154 | ||
875d43e7 | 1155 | #ifdef CONFIG_64BIT |
1da177e4 LT |
1156 | build_get_pmde64(&p, &l, &r, K0, K1); /* get pmd in K1 */ |
1157 | #else | |
1158 | build_get_pgde32(&p, K0, K1); /* get pgd in K1 */ | |
1159 | #endif | |
1160 | ||
1161 | build_get_ptep(&p, K0, K1); | |
1162 | build_update_entries(&p, K0, K1); | |
1163 | build_tlb_write_entry(&p, &l, &r, tlb_random); | |
1164 | l_leave(&l, p); | |
1165 | i_eret(&p); /* return from trap */ | |
1166 | ||
875d43e7 | 1167 | #ifdef CONFIG_64BIT |
1da177e4 LT |
1168 | build_get_pgd_vmalloc64(&p, &l, &r, K0, K1); |
1169 | #endif | |
1170 | ||
1171 | /* | |
1172 | * Overflow check: For the 64bit handler, we need at least one | |
1173 | * free instruction slot for the wrap-around branch. In worst | |
1174 | * case, if the intended insertion point is a delay slot, we | |
1175 | * need three, with the the second nop'ed and the third being | |
1176 | * unused. | |
1177 | */ | |
875d43e7 | 1178 | #ifdef CONFIG_32BIT |
1da177e4 LT |
1179 | if ((p - tlb_handler) > 64) |
1180 | panic("TLB refill handler space exceeded"); | |
1181 | #else | |
1182 | if (((p - tlb_handler) > 63) | |
1183 | || (((p - tlb_handler) > 61) | |
1184 | && insn_has_bdelay(relocs, tlb_handler + 29))) | |
1185 | panic("TLB refill handler space exceeded"); | |
1186 | #endif | |
1187 | ||
1188 | /* | |
1189 | * Now fold the handler in the TLB refill handler space. | |
1190 | */ | |
875d43e7 | 1191 | #ifdef CONFIG_32BIT |
1da177e4 LT |
1192 | f = final_handler; |
1193 | /* Simplest case, just copy the handler. */ | |
1194 | copy_handler(relocs, labels, tlb_handler, p, f); | |
1195 | final_len = p - tlb_handler; | |
875d43e7 | 1196 | #else /* CONFIG_64BIT */ |
1da177e4 LT |
1197 | f = final_handler + 32; |
1198 | if ((p - tlb_handler) <= 32) { | |
1199 | /* Just copy the handler. */ | |
1200 | copy_handler(relocs, labels, tlb_handler, p, f); | |
1201 | final_len = p - tlb_handler; | |
1202 | } else { | |
1203 | u32 *split = tlb_handler + 30; | |
1204 | ||
1205 | /* | |
1206 | * Find the split point. | |
1207 | */ | |
1208 | if (insn_has_bdelay(relocs, split - 1)) | |
1209 | split--; | |
1210 | ||
1211 | /* Copy first part of the handler. */ | |
1212 | copy_handler(relocs, labels, tlb_handler, split, f); | |
1213 | f += split - tlb_handler; | |
1214 | ||
1215 | /* Insert branch. */ | |
1216 | l_split(&l, final_handler); | |
1217 | il_b(&f, &r, label_split); | |
1218 | if (insn_has_bdelay(relocs, split)) | |
1219 | i_nop(&f); | |
1220 | else { | |
1221 | copy_handler(relocs, labels, split, split + 1, f); | |
1222 | move_labels(labels, f, f + 1, -1); | |
1223 | f++; | |
1224 | split++; | |
1225 | } | |
1226 | ||
1227 | /* Copy the rest of the handler. */ | |
1228 | copy_handler(relocs, labels, split, p, final_handler); | |
1229 | final_len = (f - (final_handler + 32)) + (p - split); | |
1230 | } | |
875d43e7 | 1231 | #endif /* CONFIG_64BIT */ |
1da177e4 LT |
1232 | |
1233 | resolve_relocs(relocs, labels); | |
1234 | printk("Synthesized TLB refill handler (%u instructions).\n", | |
1235 | final_len); | |
1236 | ||
1237 | #ifdef DEBUG_TLB | |
1238 | { | |
1239 | int i; | |
1240 | ||
4c0a2d42 MR |
1241 | f = final_handler; |
1242 | #ifdef CONFIG_64BIT | |
1243 | if (final_len > 32) | |
1244 | final_len = 64; | |
1245 | else | |
1246 | f = final_handler + 32; | |
1247 | #endif /* CONFIG_64BIT */ | |
9678e28b | 1248 | for (i = 0; i < final_len; i++) |
4c0a2d42 | 1249 | printk("%08x\n", f[i]); |
1da177e4 LT |
1250 | } |
1251 | #endif | |
1252 | ||
1253 | memcpy((void *)CAC_BASE, final_handler, 0x100); | |
1254 | flush_icache_range(CAC_BASE, CAC_BASE + 0x100); | |
1255 | } | |
1256 | ||
1257 | /* | |
1258 | * TLB load/store/modify handlers. | |
1259 | * | |
1260 | * Only the fastpath gets synthesized at runtime, the slowpath for | |
1261 | * do_page_fault remains normal asm. | |
1262 | */ | |
1263 | extern void tlb_do_page_fault_0(void); | |
1264 | extern void tlb_do_page_fault_1(void); | |
1265 | ||
1266 | #define __tlb_handler_align \ | |
1267 | __attribute__((__aligned__(1 << CONFIG_MIPS_L1_CACHE_SHIFT))) | |
1268 | ||
1269 | /* | |
1270 | * 128 instructions for the fastpath handler is generous and should | |
1271 | * never be exceeded. | |
1272 | */ | |
1273 | #define FASTPATH_SIZE 128 | |
1274 | ||
1275 | u32 __tlb_handler_align handle_tlbl[FASTPATH_SIZE]; | |
1276 | u32 __tlb_handler_align handle_tlbs[FASTPATH_SIZE]; | |
1277 | u32 __tlb_handler_align handle_tlbm[FASTPATH_SIZE]; | |
1278 | ||
1279 | static void __init | |
63b2d2f4 | 1280 | iPTE_LW(u32 **p, struct label **l, unsigned int pte, unsigned int ptr) |
1da177e4 LT |
1281 | { |
1282 | #ifdef CONFIG_SMP | |
1283 | # ifdef CONFIG_64BIT_PHYS_ADDR | |
1284 | if (cpu_has_64bits) | |
63b2d2f4 | 1285 | i_lld(p, pte, 0, ptr); |
1da177e4 LT |
1286 | else |
1287 | # endif | |
63b2d2f4 | 1288 | i_LL(p, pte, 0, ptr); |
1da177e4 LT |
1289 | #else |
1290 | # ifdef CONFIG_64BIT_PHYS_ADDR | |
1291 | if (cpu_has_64bits) | |
63b2d2f4 | 1292 | i_ld(p, pte, 0, ptr); |
1da177e4 LT |
1293 | else |
1294 | # endif | |
63b2d2f4 | 1295 | i_LW(p, pte, 0, ptr); |
1da177e4 LT |
1296 | #endif |
1297 | } | |
1298 | ||
1299 | static void __init | |
63b2d2f4 TS |
1300 | iPTE_SW(u32 **p, struct reloc **r, unsigned int pte, unsigned int ptr, |
1301 | unsigned int mode) | |
1da177e4 | 1302 | { |
63b2d2f4 TS |
1303 | #ifdef CONFIG_64BIT_PHYS_ADDR |
1304 | unsigned int hwmode = mode & (_PAGE_VALID | _PAGE_DIRTY); | |
1305 | #endif | |
1306 | ||
1307 | i_ori(p, pte, pte, mode); | |
1da177e4 LT |
1308 | #ifdef CONFIG_SMP |
1309 | # ifdef CONFIG_64BIT_PHYS_ADDR | |
1310 | if (cpu_has_64bits) | |
63b2d2f4 | 1311 | i_scd(p, pte, 0, ptr); |
1da177e4 LT |
1312 | else |
1313 | # endif | |
63b2d2f4 | 1314 | i_SC(p, pte, 0, ptr); |
1da177e4 LT |
1315 | |
1316 | if (r10000_llsc_war()) | |
1317 | il_beqzl(p, r, pte, label_smp_pgtable_change); | |
1318 | else | |
1319 | il_beqz(p, r, pte, label_smp_pgtable_change); | |
1320 | ||
1321 | # ifdef CONFIG_64BIT_PHYS_ADDR | |
1322 | if (!cpu_has_64bits) { | |
1323 | /* no i_nop needed */ | |
1324 | i_ll(p, pte, sizeof(pte_t) / 2, ptr); | |
63b2d2f4 | 1325 | i_ori(p, pte, pte, hwmode); |
1da177e4 LT |
1326 | i_sc(p, pte, sizeof(pte_t) / 2, ptr); |
1327 | il_beqz(p, r, pte, label_smp_pgtable_change); | |
1328 | /* no i_nop needed */ | |
1329 | i_lw(p, pte, 0, ptr); | |
1330 | } else | |
1331 | i_nop(p); | |
1332 | # else | |
1333 | i_nop(p); | |
1334 | # endif | |
1335 | #else | |
1336 | # ifdef CONFIG_64BIT_PHYS_ADDR | |
1337 | if (cpu_has_64bits) | |
63b2d2f4 | 1338 | i_sd(p, pte, 0, ptr); |
1da177e4 LT |
1339 | else |
1340 | # endif | |
63b2d2f4 | 1341 | i_SW(p, pte, 0, ptr); |
1da177e4 LT |
1342 | |
1343 | # ifdef CONFIG_64BIT_PHYS_ADDR | |
1344 | if (!cpu_has_64bits) { | |
1345 | i_lw(p, pte, sizeof(pte_t) / 2, ptr); | |
63b2d2f4 | 1346 | i_ori(p, pte, pte, hwmode); |
1da177e4 LT |
1347 | i_sw(p, pte, sizeof(pte_t) / 2, ptr); |
1348 | i_lw(p, pte, 0, ptr); | |
1349 | } | |
1350 | # endif | |
1351 | #endif | |
1352 | } | |
1353 | ||
1354 | /* | |
1355 | * Check if PTE is present, if not then jump to LABEL. PTR points to | |
1356 | * the page table where this PTE is located, PTE will be re-loaded | |
1357 | * with it's original value. | |
1358 | */ | |
1359 | static void __init | |
1360 | build_pte_present(u32 **p, struct label **l, struct reloc **r, | |
1361 | unsigned int pte, unsigned int ptr, enum label_id lid) | |
1362 | { | |
1363 | i_andi(p, pte, pte, _PAGE_PRESENT | _PAGE_READ); | |
1364 | i_xori(p, pte, pte, _PAGE_PRESENT | _PAGE_READ); | |
1365 | il_bnez(p, r, pte, lid); | |
63b2d2f4 | 1366 | iPTE_LW(p, l, pte, ptr); |
1da177e4 LT |
1367 | } |
1368 | ||
1369 | /* Make PTE valid, store result in PTR. */ | |
1370 | static void __init | |
1371 | build_make_valid(u32 **p, struct reloc **r, unsigned int pte, | |
1372 | unsigned int ptr) | |
1373 | { | |
63b2d2f4 TS |
1374 | unsigned int mode = _PAGE_VALID | _PAGE_ACCESSED; |
1375 | ||
1376 | iPTE_SW(p, r, pte, ptr, mode); | |
1da177e4 LT |
1377 | } |
1378 | ||
1379 | /* | |
1380 | * Check if PTE can be written to, if not branch to LABEL. Regardless | |
1381 | * restore PTE with value from PTR when done. | |
1382 | */ | |
1383 | static void __init | |
1384 | build_pte_writable(u32 **p, struct label **l, struct reloc **r, | |
1385 | unsigned int pte, unsigned int ptr, enum label_id lid) | |
1386 | { | |
1387 | i_andi(p, pte, pte, _PAGE_PRESENT | _PAGE_WRITE); | |
1388 | i_xori(p, pte, pte, _PAGE_PRESENT | _PAGE_WRITE); | |
1389 | il_bnez(p, r, pte, lid); | |
63b2d2f4 | 1390 | iPTE_LW(p, l, pte, ptr); |
1da177e4 LT |
1391 | } |
1392 | ||
1393 | /* Make PTE writable, update software status bits as well, then store | |
1394 | * at PTR. | |
1395 | */ | |
1396 | static void __init | |
1397 | build_make_write(u32 **p, struct reloc **r, unsigned int pte, | |
1398 | unsigned int ptr) | |
1399 | { | |
63b2d2f4 TS |
1400 | unsigned int mode = (_PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID |
1401 | | _PAGE_DIRTY); | |
1402 | ||
1403 | iPTE_SW(p, r, pte, ptr, mode); | |
1da177e4 LT |
1404 | } |
1405 | ||
1406 | /* | |
1407 | * Check if PTE can be modified, if not branch to LABEL. Regardless | |
1408 | * restore PTE with value from PTR when done. | |
1409 | */ | |
1410 | static void __init | |
1411 | build_pte_modifiable(u32 **p, struct label **l, struct reloc **r, | |
1412 | unsigned int pte, unsigned int ptr, enum label_id lid) | |
1413 | { | |
1414 | i_andi(p, pte, pte, _PAGE_WRITE); | |
1415 | il_beqz(p, r, pte, lid); | |
63b2d2f4 | 1416 | iPTE_LW(p, l, pte, ptr); |
1da177e4 LT |
1417 | } |
1418 | ||
1419 | /* | |
1420 | * R3000 style TLB load/store/modify handlers. | |
1421 | */ | |
1422 | ||
fded2e50 MR |
1423 | /* |
1424 | * This places the pte into ENTRYLO0 and writes it with tlbwi. | |
1425 | * Then it returns. | |
1426 | */ | |
1da177e4 | 1427 | static void __init |
fded2e50 | 1428 | build_r3000_pte_reload_tlbwi(u32 **p, unsigned int pte, unsigned int tmp) |
1da177e4 | 1429 | { |
fded2e50 MR |
1430 | i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */ |
1431 | i_mfc0(p, tmp, C0_EPC); /* cp0 delay */ | |
1432 | i_tlbwi(p); | |
1433 | i_jr(p, tmp); | |
1434 | i_rfe(p); /* branch delay */ | |
1da177e4 LT |
1435 | } |
1436 | ||
1437 | /* | |
fded2e50 MR |
1438 | * This places the pte into ENTRYLO0 and writes it with tlbwi |
1439 | * or tlbwr as appropriate. This is because the index register | |
1440 | * may have the probe fail bit set as a result of a trap on a | |
1441 | * kseg2 access, i.e. without refill. Then it returns. | |
1da177e4 LT |
1442 | */ |
1443 | static void __init | |
fded2e50 MR |
1444 | build_r3000_tlb_reload_write(u32 **p, struct label **l, struct reloc **r, |
1445 | unsigned int pte, unsigned int tmp) | |
1da177e4 LT |
1446 | { |
1447 | i_mfc0(p, tmp, C0_INDEX); | |
fded2e50 MR |
1448 | i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */ |
1449 | il_bltz(p, r, tmp, label_r3000_write_probe_fail); /* cp0 delay */ | |
1450 | i_mfc0(p, tmp, C0_EPC); /* branch delay */ | |
1451 | i_tlbwi(p); /* cp0 delay */ | |
1452 | i_jr(p, tmp); | |
1453 | i_rfe(p); /* branch delay */ | |
1da177e4 | 1454 | l_r3000_write_probe_fail(l, *p); |
fded2e50 MR |
1455 | i_tlbwr(p); /* cp0 delay */ |
1456 | i_jr(p, tmp); | |
1457 | i_rfe(p); /* branch delay */ | |
1da177e4 LT |
1458 | } |
1459 | ||
1460 | static void __init | |
1461 | build_r3000_tlbchange_handler_head(u32 **p, unsigned int pte, | |
1462 | unsigned int ptr) | |
1463 | { | |
1464 | long pgdc = (long)pgd_current; | |
1465 | ||
1466 | i_mfc0(p, pte, C0_BADVADDR); | |
1467 | i_lui(p, ptr, rel_hi(pgdc)); /* cp0 delay */ | |
1468 | i_lw(p, ptr, rel_lo(pgdc), ptr); | |
1469 | i_srl(p, pte, pte, 22); /* load delay */ | |
1470 | i_sll(p, pte, pte, 2); | |
1471 | i_addu(p, ptr, ptr, pte); | |
1472 | i_mfc0(p, pte, C0_CONTEXT); | |
1473 | i_lw(p, ptr, 0, ptr); /* cp0 delay */ | |
1474 | i_andi(p, pte, pte, 0xffc); /* load delay */ | |
1475 | i_addu(p, ptr, ptr, pte); | |
1476 | i_lw(p, pte, 0, ptr); | |
fded2e50 | 1477 | i_tlbp(p); /* load delay */ |
1da177e4 LT |
1478 | } |
1479 | ||
1480 | static void __init build_r3000_tlb_load_handler(void) | |
1481 | { | |
1482 | u32 *p = handle_tlbl; | |
1483 | struct label *l = labels; | |
1484 | struct reloc *r = relocs; | |
1485 | ||
1486 | memset(handle_tlbl, 0, sizeof(handle_tlbl)); | |
1487 | memset(labels, 0, sizeof(labels)); | |
1488 | memset(relocs, 0, sizeof(relocs)); | |
1489 | ||
1490 | build_r3000_tlbchange_handler_head(&p, K0, K1); | |
1491 | build_pte_present(&p, &l, &r, K0, K1, label_nopage_tlbl); | |
d925c262 | 1492 | i_nop(&p); /* load delay */ |
1da177e4 | 1493 | build_make_valid(&p, &r, K0, K1); |
fded2e50 | 1494 | build_r3000_tlb_reload_write(&p, &l, &r, K0, K1); |
1da177e4 LT |
1495 | |
1496 | l_nopage_tlbl(&l, p); | |
1497 | i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff); | |
1498 | i_nop(&p); | |
1499 | ||
1500 | if ((p - handle_tlbl) > FASTPATH_SIZE) | |
1501 | panic("TLB load handler fastpath space exceeded"); | |
1502 | ||
1503 | resolve_relocs(relocs, labels); | |
1504 | printk("Synthesized TLB load handler fastpath (%u instructions).\n", | |
1505 | (unsigned int)(p - handle_tlbl)); | |
1506 | ||
1507 | #ifdef DEBUG_TLB | |
1508 | { | |
1509 | int i; | |
1510 | ||
9678e28b | 1511 | for (i = 0; i < (p - handle_tlbl); i++) |
1da177e4 LT |
1512 | printk("%08x\n", handle_tlbl[i]); |
1513 | } | |
1514 | #endif | |
1515 | ||
1516 | flush_icache_range((unsigned long)handle_tlbl, | |
1517 | (unsigned long)handle_tlbl + FASTPATH_SIZE * sizeof(u32)); | |
1518 | } | |
1519 | ||
1520 | static void __init build_r3000_tlb_store_handler(void) | |
1521 | { | |
1522 | u32 *p = handle_tlbs; | |
1523 | struct label *l = labels; | |
1524 | struct reloc *r = relocs; | |
1525 | ||
1526 | memset(handle_tlbs, 0, sizeof(handle_tlbs)); | |
1527 | memset(labels, 0, sizeof(labels)); | |
1528 | memset(relocs, 0, sizeof(relocs)); | |
1529 | ||
1530 | build_r3000_tlbchange_handler_head(&p, K0, K1); | |
1531 | build_pte_writable(&p, &l, &r, K0, K1, label_nopage_tlbs); | |
d925c262 | 1532 | i_nop(&p); /* load delay */ |
1da177e4 | 1533 | build_make_write(&p, &r, K0, K1); |
fded2e50 | 1534 | build_r3000_tlb_reload_write(&p, &l, &r, K0, K1); |
1da177e4 LT |
1535 | |
1536 | l_nopage_tlbs(&l, p); | |
1537 | i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); | |
1538 | i_nop(&p); | |
1539 | ||
1540 | if ((p - handle_tlbs) > FASTPATH_SIZE) | |
1541 | panic("TLB store handler fastpath space exceeded"); | |
1542 | ||
1543 | resolve_relocs(relocs, labels); | |
1544 | printk("Synthesized TLB store handler fastpath (%u instructions).\n", | |
1545 | (unsigned int)(p - handle_tlbs)); | |
1546 | ||
1547 | #ifdef DEBUG_TLB | |
1548 | { | |
1549 | int i; | |
1550 | ||
9678e28b | 1551 | for (i = 0; i < (p - handle_tlbs); i++) |
1da177e4 LT |
1552 | printk("%08x\n", handle_tlbs[i]); |
1553 | } | |
1554 | #endif | |
1555 | ||
1556 | flush_icache_range((unsigned long)handle_tlbs, | |
1557 | (unsigned long)handle_tlbs + FASTPATH_SIZE * sizeof(u32)); | |
1558 | } | |
1559 | ||
1560 | static void __init build_r3000_tlb_modify_handler(void) | |
1561 | { | |
1562 | u32 *p = handle_tlbm; | |
1563 | struct label *l = labels; | |
1564 | struct reloc *r = relocs; | |
1565 | ||
1566 | memset(handle_tlbm, 0, sizeof(handle_tlbm)); | |
1567 | memset(labels, 0, sizeof(labels)); | |
1568 | memset(relocs, 0, sizeof(relocs)); | |
1569 | ||
1570 | build_r3000_tlbchange_handler_head(&p, K0, K1); | |
1571 | build_pte_modifiable(&p, &l, &r, K0, K1, label_nopage_tlbm); | |
d925c262 | 1572 | i_nop(&p); /* load delay */ |
1da177e4 | 1573 | build_make_write(&p, &r, K0, K1); |
fded2e50 | 1574 | build_r3000_pte_reload_tlbwi(&p, K0, K1); |
1da177e4 LT |
1575 | |
1576 | l_nopage_tlbm(&l, p); | |
1577 | i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); | |
1578 | i_nop(&p); | |
1579 | ||
1580 | if ((p - handle_tlbm) > FASTPATH_SIZE) | |
1581 | panic("TLB modify handler fastpath space exceeded"); | |
1582 | ||
1583 | resolve_relocs(relocs, labels); | |
1584 | printk("Synthesized TLB modify handler fastpath (%u instructions).\n", | |
1585 | (unsigned int)(p - handle_tlbm)); | |
1586 | ||
1587 | #ifdef DEBUG_TLB | |
1588 | { | |
1589 | int i; | |
1590 | ||
9678e28b | 1591 | for (i = 0; i < (p - handle_tlbm); i++) |
1da177e4 LT |
1592 | printk("%08x\n", handle_tlbm[i]); |
1593 | } | |
1594 | #endif | |
1595 | ||
1596 | flush_icache_range((unsigned long)handle_tlbm, | |
1597 | (unsigned long)handle_tlbm + FASTPATH_SIZE * sizeof(u32)); | |
1598 | } | |
1599 | ||
1600 | /* | |
1601 | * R4000 style TLB load/store/modify handlers. | |
1602 | */ | |
1603 | static void __init | |
1604 | build_r4000_tlbchange_handler_head(u32 **p, struct label **l, | |
1605 | struct reloc **r, unsigned int pte, | |
1606 | unsigned int ptr) | |
1607 | { | |
875d43e7 | 1608 | #ifdef CONFIG_64BIT |
1da177e4 LT |
1609 | build_get_pmde64(p, l, r, pte, ptr); /* get pmd in ptr */ |
1610 | #else | |
1611 | build_get_pgde32(p, pte, ptr); /* get pgd in ptr */ | |
1612 | #endif | |
1613 | ||
1614 | i_MFC0(p, pte, C0_BADVADDR); | |
1615 | i_LW(p, ptr, 0, ptr); | |
1616 | i_SRL(p, pte, pte, PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2); | |
1617 | i_andi(p, pte, pte, (PTRS_PER_PTE - 1) << PTE_T_LOG2); | |
1618 | i_ADDU(p, ptr, ptr, pte); | |
1619 | ||
1620 | #ifdef CONFIG_SMP | |
1621 | l_smp_pgtable_change(l, *p); | |
1622 | # endif | |
63b2d2f4 | 1623 | iPTE_LW(p, l, pte, ptr); /* get even pte */ |
1da177e4 LT |
1624 | build_tlb_probe_entry(p); |
1625 | } | |
1626 | ||
1627 | static void __init | |
1628 | build_r4000_tlbchange_handler_tail(u32 **p, struct label **l, | |
1629 | struct reloc **r, unsigned int tmp, | |
1630 | unsigned int ptr) | |
1631 | { | |
1632 | i_ori(p, ptr, ptr, sizeof(pte_t)); | |
1633 | i_xori(p, ptr, ptr, sizeof(pte_t)); | |
1634 | build_update_entries(p, tmp, ptr); | |
1635 | build_tlb_write_entry(p, l, r, tlb_indexed); | |
1636 | l_leave(l, *p); | |
1637 | i_eret(p); /* return from trap */ | |
1638 | ||
875d43e7 | 1639 | #ifdef CONFIG_64BIT |
1da177e4 LT |
1640 | build_get_pgd_vmalloc64(p, l, r, tmp, ptr); |
1641 | #endif | |
1642 | } | |
1643 | ||
1644 | static void __init build_r4000_tlb_load_handler(void) | |
1645 | { | |
1646 | u32 *p = handle_tlbl; | |
1647 | struct label *l = labels; | |
1648 | struct reloc *r = relocs; | |
1649 | ||
1650 | memset(handle_tlbl, 0, sizeof(handle_tlbl)); | |
1651 | memset(labels, 0, sizeof(labels)); | |
1652 | memset(relocs, 0, sizeof(relocs)); | |
1653 | ||
1654 | if (bcm1250_m3_war()) { | |
1655 | i_MFC0(&p, K0, C0_BADVADDR); | |
1656 | i_MFC0(&p, K1, C0_ENTRYHI); | |
1657 | i_xor(&p, K0, K0, K1); | |
1658 | i_SRL(&p, K0, K0, PAGE_SHIFT + 1); | |
1659 | il_bnez(&p, &r, K0, label_leave); | |
1660 | /* No need for i_nop */ | |
1661 | } | |
1662 | ||
1663 | build_r4000_tlbchange_handler_head(&p, &l, &r, K0, K1); | |
1664 | build_pte_present(&p, &l, &r, K0, K1, label_nopage_tlbl); | |
1665 | build_make_valid(&p, &r, K0, K1); | |
1666 | build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1); | |
1667 | ||
1668 | l_nopage_tlbl(&l, p); | |
1669 | i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff); | |
1670 | i_nop(&p); | |
1671 | ||
1672 | if ((p - handle_tlbl) > FASTPATH_SIZE) | |
1673 | panic("TLB load handler fastpath space exceeded"); | |
1674 | ||
1675 | resolve_relocs(relocs, labels); | |
1676 | printk("Synthesized TLB load handler fastpath (%u instructions).\n", | |
1677 | (unsigned int)(p - handle_tlbl)); | |
1678 | ||
1679 | #ifdef DEBUG_TLB | |
1680 | { | |
1681 | int i; | |
1682 | ||
9678e28b | 1683 | for (i = 0; i < (p - handle_tlbl); i++) |
1da177e4 LT |
1684 | printk("%08x\n", handle_tlbl[i]); |
1685 | } | |
1686 | #endif | |
1687 | ||
1688 | flush_icache_range((unsigned long)handle_tlbl, | |
1689 | (unsigned long)handle_tlbl + FASTPATH_SIZE * sizeof(u32)); | |
1690 | } | |
1691 | ||
1692 | static void __init build_r4000_tlb_store_handler(void) | |
1693 | { | |
1694 | u32 *p = handle_tlbs; | |
1695 | struct label *l = labels; | |
1696 | struct reloc *r = relocs; | |
1697 | ||
1698 | memset(handle_tlbs, 0, sizeof(handle_tlbs)); | |
1699 | memset(labels, 0, sizeof(labels)); | |
1700 | memset(relocs, 0, sizeof(relocs)); | |
1701 | ||
1702 | build_r4000_tlbchange_handler_head(&p, &l, &r, K0, K1); | |
1703 | build_pte_writable(&p, &l, &r, K0, K1, label_nopage_tlbs); | |
1704 | build_make_write(&p, &r, K0, K1); | |
1705 | build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1); | |
1706 | ||
1707 | l_nopage_tlbs(&l, p); | |
1708 | i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); | |
1709 | i_nop(&p); | |
1710 | ||
1711 | if ((p - handle_tlbs) > FASTPATH_SIZE) | |
1712 | panic("TLB store handler fastpath space exceeded"); | |
1713 | ||
1714 | resolve_relocs(relocs, labels); | |
1715 | printk("Synthesized TLB store handler fastpath (%u instructions).\n", | |
1716 | (unsigned int)(p - handle_tlbs)); | |
1717 | ||
1718 | #ifdef DEBUG_TLB | |
1719 | { | |
1720 | int i; | |
1721 | ||
9678e28b | 1722 | for (i = 0; i < (p - handle_tlbs); i++) |
1da177e4 LT |
1723 | printk("%08x\n", handle_tlbs[i]); |
1724 | } | |
1725 | #endif | |
1726 | ||
1727 | flush_icache_range((unsigned long)handle_tlbs, | |
1728 | (unsigned long)handle_tlbs + FASTPATH_SIZE * sizeof(u32)); | |
1729 | } | |
1730 | ||
1731 | static void __init build_r4000_tlb_modify_handler(void) | |
1732 | { | |
1733 | u32 *p = handle_tlbm; | |
1734 | struct label *l = labels; | |
1735 | struct reloc *r = relocs; | |
1736 | ||
1737 | memset(handle_tlbm, 0, sizeof(handle_tlbm)); | |
1738 | memset(labels, 0, sizeof(labels)); | |
1739 | memset(relocs, 0, sizeof(relocs)); | |
1740 | ||
1741 | build_r4000_tlbchange_handler_head(&p, &l, &r, K0, K1); | |
1742 | build_pte_modifiable(&p, &l, &r, K0, K1, label_nopage_tlbm); | |
1743 | /* Present and writable bits set, set accessed and dirty bits. */ | |
1744 | build_make_write(&p, &r, K0, K1); | |
1745 | build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1); | |
1746 | ||
1747 | l_nopage_tlbm(&l, p); | |
1748 | i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); | |
1749 | i_nop(&p); | |
1750 | ||
1751 | if ((p - handle_tlbm) > FASTPATH_SIZE) | |
1752 | panic("TLB modify handler fastpath space exceeded"); | |
1753 | ||
1754 | resolve_relocs(relocs, labels); | |
1755 | printk("Synthesized TLB modify handler fastpath (%u instructions).\n", | |
1756 | (unsigned int)(p - handle_tlbm)); | |
1757 | ||
1758 | #ifdef DEBUG_TLB | |
1759 | { | |
1760 | int i; | |
1761 | ||
9678e28b | 1762 | for (i = 0; i < (p - handle_tlbm); i++) |
1da177e4 LT |
1763 | printk("%08x\n", handle_tlbm[i]); |
1764 | } | |
1765 | #endif | |
1766 | ||
1767 | flush_icache_range((unsigned long)handle_tlbm, | |
1768 | (unsigned long)handle_tlbm + FASTPATH_SIZE * sizeof(u32)); | |
1769 | } | |
1770 | ||
1771 | void __init build_tlb_refill_handler(void) | |
1772 | { | |
1773 | /* | |
1774 | * The refill handler is generated per-CPU, multi-node systems | |
1775 | * may have local storage for it. The other handlers are only | |
1776 | * needed once. | |
1777 | */ | |
1778 | static int run_once = 0; | |
1779 | ||
1780 | switch (current_cpu_data.cputype) { | |
1781 | case CPU_R2000: | |
1782 | case CPU_R3000: | |
1783 | case CPU_R3000A: | |
1784 | case CPU_R3081E: | |
1785 | case CPU_TX3912: | |
1786 | case CPU_TX3922: | |
1787 | case CPU_TX3927: | |
1788 | build_r3000_tlb_refill_handler(); | |
1789 | if (!run_once) { | |
1790 | build_r3000_tlb_load_handler(); | |
1791 | build_r3000_tlb_store_handler(); | |
1792 | build_r3000_tlb_modify_handler(); | |
1793 | run_once++; | |
1794 | } | |
1795 | break; | |
1796 | ||
1797 | case CPU_R6000: | |
1798 | case CPU_R6000A: | |
1799 | panic("No R6000 TLB refill handler yet"); | |
1800 | break; | |
1801 | ||
1802 | case CPU_R8000: | |
1803 | panic("No R8000 TLB refill handler yet"); | |
1804 | break; | |
1805 | ||
1806 | default: | |
1807 | build_r4000_tlb_refill_handler(); | |
1808 | if (!run_once) { | |
1809 | build_r4000_tlb_load_handler(); | |
1810 | build_r4000_tlb_store_handler(); | |
1811 | build_r4000_tlb_modify_handler(); | |
1812 | run_once++; | |
1813 | } | |
1814 | } | |
1815 | } |