]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | |
3 | * License. See the file "COPYING" in the main directory of this archive | |
4 | * for more details. | |
5 | * | |
6 | * arch/sh64/kernel/traps.c | |
7 | * | |
8 | * Copyright (C) 2000, 2001 Paolo Alberelli | |
9 | * Copyright (C) 2003, 2004 Paul Mundt | |
10 | * Copyright (C) 2003, 2004 Richard Curnow | |
11 | * | |
12 | */ | |
13 | ||
14 | /* | |
15 | * 'Traps.c' handles hardware traps and faults after we have saved some | |
16 | * state in 'entry.S'. | |
17 | */ | |
18 | #include <linux/sched.h> | |
19 | #include <linux/kernel.h> | |
20 | #include <linux/string.h> | |
21 | #include <linux/errno.h> | |
22 | #include <linux/ptrace.h> | |
23 | #include <linux/timer.h> | |
24 | #include <linux/mm.h> | |
25 | #include <linux/smp.h> | |
1da177e4 LT |
26 | #include <linux/init.h> |
27 | #include <linux/delay.h> | |
28 | #include <linux/spinlock.h> | |
29 | #include <linux/kallsyms.h> | |
30 | #include <linux/interrupt.h> | |
31 | #include <linux/sysctl.h> | |
32 | #include <linux/module.h> | |
1da177e4 LT |
33 | #include <asm/system.h> |
34 | #include <asm/uaccess.h> | |
35 | #include <asm/io.h> | |
36 | #include <asm/atomic.h> | |
37 | #include <asm/processor.h> | |
38 | #include <asm/pgtable.h> | |
39 | ||
40 | #undef DEBUG_EXCEPTION | |
41 | #ifdef DEBUG_EXCEPTION | |
42 | /* implemented in ../lib/dbg.c */ | |
43 | extern void show_excp_regs(char *fname, int trapnr, int signr, | |
44 | struct pt_regs *regs); | |
45 | #else | |
46 | #define show_excp_regs(a, b, c, d) | |
47 | #endif | |
48 | ||
49 | static void do_unhandled_exception(int trapnr, int signr, char *str, char *fn_name, | |
50 | unsigned long error_code, struct pt_regs *regs, struct task_struct *tsk); | |
51 | ||
52 | #define DO_ERROR(trapnr, signr, str, name, tsk) \ | |
53 | asmlinkage void do_##name(unsigned long error_code, struct pt_regs *regs) \ | |
54 | { \ | |
55 | do_unhandled_exception(trapnr, signr, str, __stringify(name), error_code, regs, current); \ | |
56 | } | |
57 | ||
58 | spinlock_t die_lock; | |
59 | ||
60 | void die(const char * str, struct pt_regs * regs, long err) | |
61 | { | |
62 | console_verbose(); | |
63 | spin_lock_irq(&die_lock); | |
64 | printk("%s: %lx\n", str, (err & 0xffffff)); | |
65 | show_regs(regs); | |
66 | spin_unlock_irq(&die_lock); | |
67 | do_exit(SIGSEGV); | |
68 | } | |
69 | ||
70 | static inline void die_if_kernel(const char * str, struct pt_regs * regs, long err) | |
71 | { | |
72 | if (!user_mode(regs)) | |
73 | die(str, regs, err); | |
74 | } | |
75 | ||
76 | static void die_if_no_fixup(const char * str, struct pt_regs * regs, long err) | |
77 | { | |
78 | if (!user_mode(regs)) { | |
79 | const struct exception_table_entry *fixup; | |
80 | fixup = search_exception_tables(regs->pc); | |
81 | if (fixup) { | |
82 | regs->pc = fixup->fixup; | |
83 | return; | |
84 | } | |
85 | die(str, regs, err); | |
86 | } | |
87 | } | |
88 | ||
89 | DO_ERROR(13, SIGILL, "illegal slot instruction", illegal_slot_inst, current) | |
90 | DO_ERROR(87, SIGSEGV, "address error (exec)", address_error_exec, current) | |
91 | ||
92 | ||
93 | /* Implement misaligned load/store handling for kernel (and optionally for user | |
94 | mode too). Limitation : only SHmedia mode code is handled - there is no | |
95 | handling at all for misaligned accesses occurring in SHcompact code yet. */ | |
96 | ||
97 | static int misaligned_fixup(struct pt_regs *regs); | |
98 | ||
99 | asmlinkage void do_address_error_load(unsigned long error_code, struct pt_regs *regs) | |
100 | { | |
101 | if (misaligned_fixup(regs) < 0) { | |
102 | do_unhandled_exception(7, SIGSEGV, "address error(load)", | |
103 | "do_address_error_load", | |
104 | error_code, regs, current); | |
105 | } | |
106 | return; | |
107 | } | |
108 | ||
109 | asmlinkage void do_address_error_store(unsigned long error_code, struct pt_regs *regs) | |
110 | { | |
111 | if (misaligned_fixup(regs) < 0) { | |
112 | do_unhandled_exception(8, SIGSEGV, "address error(store)", | |
113 | "do_address_error_store", | |
114 | error_code, regs, current); | |
115 | } | |
116 | return; | |
117 | } | |
118 | ||
119 | #if defined(CONFIG_SH64_ID2815_WORKAROUND) | |
120 | ||
121 | #define OPCODE_INVALID 0 | |
122 | #define OPCODE_USER_VALID 1 | |
123 | #define OPCODE_PRIV_VALID 2 | |
124 | ||
125 | /* getcon/putcon - requires checking which control register is referenced. */ | |
126 | #define OPCODE_CTRL_REG 3 | |
127 | ||
128 | /* Table of valid opcodes for SHmedia mode. | |
129 | Form a 10-bit value by concatenating the major/minor opcodes i.e. | |
130 | opcode[31:26,20:16]. The 6 MSBs of this value index into the following | |
131 | array. The 4 LSBs select the bit-pair in the entry (bits 1:0 correspond to | |
132 | LSBs==4'b0000 etc). */ | |
133 | static unsigned long shmedia_opcode_table[64] = { | |
134 | 0x55554044,0x54445055,0x15141514,0x14541414,0x00000000,0x10001000,0x01110055,0x04050015, | |
135 | 0x00000444,0xc0000000,0x44545515,0x40405555,0x55550015,0x10005555,0x55555505,0x04050000, | |
136 | 0x00000555,0x00000404,0x00040445,0x15151414,0x00000000,0x00000000,0x00000000,0x00000000, | |
137 | 0x00000055,0x40404444,0x00000404,0xc0009495,0x00000000,0x00000000,0x00000000,0x00000000, | |
138 | 0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555, | |
139 | 0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555, | |
140 | 0x80005050,0x04005055,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555, | |
141 | 0x81055554,0x00000404,0x55555555,0x55555555,0x00000000,0x00000000,0x00000000,0x00000000 | |
142 | }; | |
143 | ||
144 | void do_reserved_inst(unsigned long error_code, struct pt_regs *regs) | |
145 | { | |
146 | /* Workaround SH5-101 cut2 silicon defect #2815 : | |
147 | in some situations, inter-mode branches from SHcompact -> SHmedia | |
148 | which should take ITLBMISS or EXECPROT exceptions at the target | |
149 | falsely take RESINST at the target instead. */ | |
150 | ||
151 | unsigned long opcode = 0x6ff4fff0; /* guaranteed reserved opcode */ | |
152 | unsigned long pc, aligned_pc; | |
153 | int get_user_error; | |
154 | int trapnr = 12; | |
155 | int signr = SIGILL; | |
156 | char *exception_name = "reserved_instruction"; | |
157 | ||
158 | pc = regs->pc; | |
159 | if ((pc & 3) == 1) { | |
160 | /* SHmedia : check for defect. This requires executable vmas | |
161 | to be readable too. */ | |
162 | aligned_pc = pc & ~3; | |
163 | if (!access_ok(VERIFY_READ, aligned_pc, sizeof(unsigned long))) { | |
164 | get_user_error = -EFAULT; | |
165 | } else { | |
166 | get_user_error = __get_user(opcode, (unsigned long *)aligned_pc); | |
167 | } | |
168 | if (get_user_error >= 0) { | |
169 | unsigned long index, shift; | |
170 | unsigned long major, minor, combined; | |
171 | unsigned long reserved_field; | |
172 | reserved_field = opcode & 0xf; /* These bits are currently reserved as zero in all valid opcodes */ | |
173 | major = (opcode >> 26) & 0x3f; | |
174 | minor = (opcode >> 16) & 0xf; | |
175 | combined = (major << 4) | minor; | |
176 | index = major; | |
177 | shift = minor << 1; | |
178 | if (reserved_field == 0) { | |
179 | int opcode_state = (shmedia_opcode_table[index] >> shift) & 0x3; | |
180 | switch (opcode_state) { | |
181 | case OPCODE_INVALID: | |
182 | /* Trap. */ | |
183 | break; | |
184 | case OPCODE_USER_VALID: | |
185 | /* Restart the instruction : the branch to the instruction will now be from an RTE | |
186 | not from SHcompact so the silicon defect won't be triggered. */ | |
187 | return; | |
188 | case OPCODE_PRIV_VALID: | |
189 | if (!user_mode(regs)) { | |
190 | /* Should only ever get here if a module has | |
191 | SHcompact code inside it. If so, the same fix up is needed. */ | |
192 | return; /* same reason */ | |
193 | } | |
194 | /* Otherwise, user mode trying to execute a privileged instruction - | |
195 | fall through to trap. */ | |
196 | break; | |
197 | case OPCODE_CTRL_REG: | |
198 | /* If in privileged mode, return as above. */ | |
199 | if (!user_mode(regs)) return; | |
200 | /* In user mode ... */ | |
201 | if (combined == 0x9f) { /* GETCON */ | |
202 | unsigned long regno = (opcode >> 20) & 0x3f; | |
203 | if (regno >= 62) { | |
204 | return; | |
205 | } | |
206 | /* Otherwise, reserved or privileged control register, => trap */ | |
207 | } else if (combined == 0x1bf) { /* PUTCON */ | |
208 | unsigned long regno = (opcode >> 4) & 0x3f; | |
209 | if (regno >= 62) { | |
210 | return; | |
211 | } | |
212 | /* Otherwise, reserved or privileged control register, => trap */ | |
213 | } else { | |
214 | /* Trap */ | |
215 | } | |
216 | break; | |
217 | default: | |
218 | /* Fall through to trap. */ | |
219 | break; | |
220 | } | |
221 | } | |
222 | /* fall through to normal resinst processing */ | |
223 | } else { | |
224 | /* Error trying to read opcode. This typically means a | |
225 | real fault, not a RESINST any more. So change the | |
226 | codes. */ | |
227 | trapnr = 87; | |
228 | exception_name = "address error (exec)"; | |
229 | signr = SIGSEGV; | |
230 | } | |
231 | } | |
232 | ||
233 | do_unhandled_exception(trapnr, signr, exception_name, "do_reserved_inst", error_code, regs, current); | |
234 | } | |
235 | ||
236 | #else /* CONFIG_SH64_ID2815_WORKAROUND */ | |
237 | ||
238 | /* If the workaround isn't needed, this is just a straightforward reserved | |
239 | instruction */ | |
240 | DO_ERROR(12, SIGILL, "reserved instruction", reserved_inst, current) | |
241 | ||
242 | #endif /* CONFIG_SH64_ID2815_WORKAROUND */ | |
243 | ||
1da177e4 LT |
244 | /* Called with interrupts disabled */ |
245 | asmlinkage void do_exception_error(unsigned long ex, struct pt_regs *regs) | |
246 | { | |
1da177e4 LT |
247 | show_excp_regs(__FUNCTION__, -1, -1, regs); |
248 | die_if_kernel("exception", regs, ex); | |
249 | } | |
250 | ||
251 | int do_unknown_trapa(unsigned long scId, struct pt_regs *regs) | |
252 | { | |
253 | /* Syscall debug */ | |
254 | printk("System call ID error: [0x1#args:8 #syscall:16 0x%lx]\n", scId); | |
255 | ||
256 | die_if_kernel("unknown trapa", regs, scId); | |
257 | ||
258 | return -ENOSYS; | |
259 | } | |
260 | ||
261 | void show_stack(struct task_struct *tsk, unsigned long *sp) | |
262 | { | |
263 | #ifdef CONFIG_KALLSYMS | |
264 | extern void sh64_unwind(struct pt_regs *regs); | |
265 | struct pt_regs *regs; | |
266 | ||
267 | regs = tsk ? tsk->thread.kregs : NULL; | |
268 | ||
269 | sh64_unwind(regs); | |
270 | #else | |
271 | printk(KERN_ERR "Can't backtrace on sh64 without CONFIG_KALLSYMS\n"); | |
272 | #endif | |
273 | } | |
274 | ||
275 | void show_task(unsigned long *sp) | |
276 | { | |
277 | show_stack(NULL, sp); | |
278 | } | |
279 | ||
280 | void dump_stack(void) | |
281 | { | |
282 | show_task(NULL); | |
283 | } | |
284 | /* Needed by any user of WARN_ON in view of the defn in include/asm-sh/bug.h */ | |
285 | EXPORT_SYMBOL(dump_stack); | |
286 | ||
287 | static void do_unhandled_exception(int trapnr, int signr, char *str, char *fn_name, | |
288 | unsigned long error_code, struct pt_regs *regs, struct task_struct *tsk) | |
289 | { | |
290 | show_excp_regs(fn_name, trapnr, signr, regs); | |
291 | tsk->thread.error_code = error_code; | |
292 | tsk->thread.trap_no = trapnr; | |
293 | ||
294 | if (user_mode(regs)) | |
295 | force_sig(signr, tsk); | |
296 | ||
297 | die_if_no_fixup(str, regs, error_code); | |
298 | } | |
299 | ||
300 | static int read_opcode(unsigned long long pc, unsigned long *result_opcode, int from_user_mode) | |
301 | { | |
302 | int get_user_error; | |
303 | unsigned long aligned_pc; | |
304 | unsigned long opcode; | |
305 | ||
306 | if ((pc & 3) == 1) { | |
307 | /* SHmedia */ | |
308 | aligned_pc = pc & ~3; | |
309 | if (from_user_mode) { | |
310 | if (!access_ok(VERIFY_READ, aligned_pc, sizeof(unsigned long))) { | |
311 | get_user_error = -EFAULT; | |
312 | } else { | |
313 | get_user_error = __get_user(opcode, (unsigned long *)aligned_pc); | |
314 | *result_opcode = opcode; | |
315 | } | |
316 | return get_user_error; | |
317 | } else { | |
318 | /* If the fault was in the kernel, we can either read | |
319 | * this directly, or if not, we fault. | |
320 | */ | |
321 | *result_opcode = *(unsigned long *) aligned_pc; | |
322 | return 0; | |
323 | } | |
324 | } else if ((pc & 1) == 0) { | |
325 | /* SHcompact */ | |
326 | /* TODO : provide handling for this. We don't really support | |
327 | user-mode SHcompact yet, and for a kernel fault, this would | |
328 | have to come from a module built for SHcompact. */ | |
329 | return -EFAULT; | |
330 | } else { | |
331 | /* misaligned */ | |
332 | return -EFAULT; | |
333 | } | |
334 | } | |
335 | ||
336 | static int address_is_sign_extended(__u64 a) | |
337 | { | |
338 | __u64 b; | |
339 | #if (NEFF == 32) | |
340 | b = (__u64)(__s64)(__s32)(a & 0xffffffffUL); | |
341 | return (b == a) ? 1 : 0; | |
342 | #else | |
343 | #error "Sign extend check only works for NEFF==32" | |
344 | #endif | |
345 | } | |
346 | ||
347 | static int generate_and_check_address(struct pt_regs *regs, | |
348 | __u32 opcode, | |
349 | int displacement_not_indexed, | |
350 | int width_shift, | |
351 | __u64 *address) | |
352 | { | |
353 | /* return -1 for fault, 0 for OK */ | |
354 | ||
355 | __u64 base_address, addr; | |
356 | int basereg; | |
357 | ||
358 | basereg = (opcode >> 20) & 0x3f; | |
359 | base_address = regs->regs[basereg]; | |
360 | if (displacement_not_indexed) { | |
361 | __s64 displacement; | |
362 | displacement = (opcode >> 10) & 0x3ff; | |
363 | displacement = ((displacement << 54) >> 54); /* sign extend */ | |
364 | addr = (__u64)((__s64)base_address + (displacement << width_shift)); | |
365 | } else { | |
366 | __u64 offset; | |
367 | int offsetreg; | |
368 | offsetreg = (opcode >> 10) & 0x3f; | |
369 | offset = regs->regs[offsetreg]; | |
370 | addr = base_address + offset; | |
371 | } | |
372 | ||
373 | /* Check sign extended */ | |
374 | if (!address_is_sign_extended(addr)) { | |
375 | return -1; | |
376 | } | |
377 | ||
378 | #if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP) | |
379 | /* Check accessible. For misaligned access in the kernel, assume the | |
380 | address is always accessible (and if not, just fault when the | |
381 | load/store gets done.) */ | |
382 | if (user_mode(regs)) { | |
383 | if (addr >= TASK_SIZE) { | |
384 | return -1; | |
385 | } | |
386 | /* Do access_ok check later - it depends on whether it's a load or a store. */ | |
387 | } | |
388 | #endif | |
389 | ||
390 | *address = addr; | |
391 | return 0; | |
392 | } | |
393 | ||
394 | /* Default value as for sh */ | |
395 | #if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP) | |
396 | static int user_mode_unaligned_fixup_count = 10; | |
397 | static int user_mode_unaligned_fixup_enable = 1; | |
398 | #endif | |
399 | ||
400 | static int kernel_mode_unaligned_fixup_count = 32; | |
401 | ||
402 | static void misaligned_kernel_word_load(__u64 address, int do_sign_extend, __u64 *result) | |
403 | { | |
404 | unsigned short x; | |
405 | unsigned char *p, *q; | |
406 | p = (unsigned char *) (int) address; | |
407 | q = (unsigned char *) &x; | |
408 | q[0] = p[0]; | |
409 | q[1] = p[1]; | |
410 | ||
411 | if (do_sign_extend) { | |
412 | *result = (__u64)(__s64) *(short *) &x; | |
413 | } else { | |
414 | *result = (__u64) x; | |
415 | } | |
416 | } | |
417 | ||
418 | static void misaligned_kernel_word_store(__u64 address, __u64 value) | |
419 | { | |
420 | unsigned short x; | |
421 | unsigned char *p, *q; | |
422 | p = (unsigned char *) (int) address; | |
423 | q = (unsigned char *) &x; | |
424 | ||
425 | x = (__u16) value; | |
426 | p[0] = q[0]; | |
427 | p[1] = q[1]; | |
428 | } | |
429 | ||
430 | static int misaligned_load(struct pt_regs *regs, | |
431 | __u32 opcode, | |
432 | int displacement_not_indexed, | |
433 | int width_shift, | |
434 | int do_sign_extend) | |
435 | { | |
436 | /* Return -1 for a fault, 0 for OK */ | |
437 | int error; | |
438 | int destreg; | |
439 | __u64 address; | |
440 | ||
441 | error = generate_and_check_address(regs, opcode, | |
442 | displacement_not_indexed, width_shift, &address); | |
443 | if (error < 0) { | |
444 | return error; | |
445 | } | |
446 | ||
447 | destreg = (opcode >> 4) & 0x3f; | |
448 | #if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP) | |
449 | if (user_mode(regs)) { | |
450 | __u64 buffer; | |
451 | ||
452 | if (!access_ok(VERIFY_READ, (unsigned long) address, 1UL<<width_shift)) { | |
453 | return -1; | |
454 | } | |
455 | ||
456 | if (__copy_user(&buffer, (const void *)(int)address, (1 << width_shift)) > 0) { | |
457 | return -1; /* fault */ | |
458 | } | |
459 | switch (width_shift) { | |
460 | case 1: | |
461 | if (do_sign_extend) { | |
462 | regs->regs[destreg] = (__u64)(__s64) *(__s16 *) &buffer; | |
463 | } else { | |
464 | regs->regs[destreg] = (__u64) *(__u16 *) &buffer; | |
465 | } | |
466 | break; | |
467 | case 2: | |
468 | regs->regs[destreg] = (__u64)(__s64) *(__s32 *) &buffer; | |
469 | break; | |
470 | case 3: | |
471 | regs->regs[destreg] = buffer; | |
472 | break; | |
473 | default: | |
474 | printk("Unexpected width_shift %d in misaligned_load, PC=%08lx\n", | |
475 | width_shift, (unsigned long) regs->pc); | |
476 | break; | |
477 | } | |
478 | } else | |
479 | #endif | |
480 | { | |
481 | /* kernel mode - we can take short cuts since if we fault, it's a genuine bug */ | |
482 | __u64 lo, hi; | |
483 | ||
484 | switch (width_shift) { | |
485 | case 1: | |
486 | misaligned_kernel_word_load(address, do_sign_extend, ®s->regs[destreg]); | |
487 | break; | |
488 | case 2: | |
489 | asm ("ldlo.l %1, 0, %0" : "=r" (lo) : "r" (address)); | |
490 | asm ("ldhi.l %1, 3, %0" : "=r" (hi) : "r" (address)); | |
491 | regs->regs[destreg] = lo | hi; | |
492 | break; | |
493 | case 3: | |
494 | asm ("ldlo.q %1, 0, %0" : "=r" (lo) : "r" (address)); | |
495 | asm ("ldhi.q %1, 7, %0" : "=r" (hi) : "r" (address)); | |
496 | regs->regs[destreg] = lo | hi; | |
497 | break; | |
498 | ||
499 | default: | |
500 | printk("Unexpected width_shift %d in misaligned_load, PC=%08lx\n", | |
501 | width_shift, (unsigned long) regs->pc); | |
502 | break; | |
503 | } | |
504 | } | |
505 | ||
506 | return 0; | |
507 | ||
508 | } | |
509 | ||
510 | static int misaligned_store(struct pt_regs *regs, | |
511 | __u32 opcode, | |
512 | int displacement_not_indexed, | |
513 | int width_shift) | |
514 | { | |
515 | /* Return -1 for a fault, 0 for OK */ | |
516 | int error; | |
517 | int srcreg; | |
518 | __u64 address; | |
519 | ||
520 | error = generate_and_check_address(regs, opcode, | |
521 | displacement_not_indexed, width_shift, &address); | |
522 | if (error < 0) { | |
523 | return error; | |
524 | } | |
525 | ||
526 | srcreg = (opcode >> 4) & 0x3f; | |
527 | #if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP) | |
528 | if (user_mode(regs)) { | |
529 | __u64 buffer; | |
530 | ||
531 | if (!access_ok(VERIFY_WRITE, (unsigned long) address, 1UL<<width_shift)) { | |
532 | return -1; | |
533 | } | |
534 | ||
535 | switch (width_shift) { | |
536 | case 1: | |
537 | *(__u16 *) &buffer = (__u16) regs->regs[srcreg]; | |
538 | break; | |
539 | case 2: | |
540 | *(__u32 *) &buffer = (__u32) regs->regs[srcreg]; | |
541 | break; | |
542 | case 3: | |
543 | buffer = regs->regs[srcreg]; | |
544 | break; | |
545 | default: | |
546 | printk("Unexpected width_shift %d in misaligned_store, PC=%08lx\n", | |
547 | width_shift, (unsigned long) regs->pc); | |
548 | break; | |
549 | } | |
550 | ||
551 | if (__copy_user((void *)(int)address, &buffer, (1 << width_shift)) > 0) { | |
552 | return -1; /* fault */ | |
553 | } | |
554 | } else | |
555 | #endif | |
556 | { | |
557 | /* kernel mode - we can take short cuts since if we fault, it's a genuine bug */ | |
558 | __u64 val = regs->regs[srcreg]; | |
559 | ||
560 | switch (width_shift) { | |
561 | case 1: | |
562 | misaligned_kernel_word_store(address, val); | |
563 | break; | |
564 | case 2: | |
565 | asm ("stlo.l %1, 0, %0" : : "r" (val), "r" (address)); | |
566 | asm ("sthi.l %1, 3, %0" : : "r" (val), "r" (address)); | |
567 | break; | |
568 | case 3: | |
569 | asm ("stlo.q %1, 0, %0" : : "r" (val), "r" (address)); | |
570 | asm ("sthi.q %1, 7, %0" : : "r" (val), "r" (address)); | |
571 | break; | |
572 | ||
573 | default: | |
574 | printk("Unexpected width_shift %d in misaligned_store, PC=%08lx\n", | |
575 | width_shift, (unsigned long) regs->pc); | |
576 | break; | |
577 | } | |
578 | } | |
579 | ||
580 | return 0; | |
581 | ||
582 | } | |
583 | ||
584 | #if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP) | |
585 | /* Never need to fix up misaligned FPU accesses within the kernel since that's a real | |
586 | error. */ | |
587 | static int misaligned_fpu_load(struct pt_regs *regs, | |
588 | __u32 opcode, | |
589 | int displacement_not_indexed, | |
590 | int width_shift, | |
591 | int do_paired_load) | |
592 | { | |
593 | /* Return -1 for a fault, 0 for OK */ | |
594 | int error; | |
595 | int destreg; | |
596 | __u64 address; | |
597 | ||
598 | error = generate_and_check_address(regs, opcode, | |
599 | displacement_not_indexed, width_shift, &address); | |
600 | if (error < 0) { | |
601 | return error; | |
602 | } | |
603 | ||
604 | destreg = (opcode >> 4) & 0x3f; | |
605 | if (user_mode(regs)) { | |
606 | __u64 buffer; | |
607 | __u32 buflo, bufhi; | |
608 | ||
609 | if (!access_ok(VERIFY_READ, (unsigned long) address, 1UL<<width_shift)) { | |
610 | return -1; | |
611 | } | |
612 | ||
613 | if (__copy_user(&buffer, (const void *)(int)address, (1 << width_shift)) > 0) { | |
614 | return -1; /* fault */ | |
615 | } | |
616 | /* 'current' may be the current owner of the FPU state, so | |
617 | context switch the registers into memory so they can be | |
618 | indexed by register number. */ | |
619 | if (last_task_used_math == current) { | |
256b22ca | 620 | enable_fpu(); |
332fd57b | 621 | save_fpu(current, regs); |
256b22ca | 622 | disable_fpu(); |
1da177e4 LT |
623 | last_task_used_math = NULL; |
624 | regs->sr |= SR_FD; | |
625 | } | |
626 | ||
627 | buflo = *(__u32*) &buffer; | |
628 | bufhi = *(1 + (__u32*) &buffer); | |
629 | ||
630 | switch (width_shift) { | |
631 | case 2: | |
632 | current->thread.fpu.hard.fp_regs[destreg] = buflo; | |
633 | break; | |
634 | case 3: | |
635 | if (do_paired_load) { | |
636 | current->thread.fpu.hard.fp_regs[destreg] = buflo; | |
637 | current->thread.fpu.hard.fp_regs[destreg+1] = bufhi; | |
638 | } else { | |
639 | #if defined(CONFIG_LITTLE_ENDIAN) | |
640 | current->thread.fpu.hard.fp_regs[destreg] = bufhi; | |
641 | current->thread.fpu.hard.fp_regs[destreg+1] = buflo; | |
642 | #else | |
643 | current->thread.fpu.hard.fp_regs[destreg] = buflo; | |
644 | current->thread.fpu.hard.fp_regs[destreg+1] = bufhi; | |
645 | #endif | |
646 | } | |
647 | break; | |
648 | default: | |
649 | printk("Unexpected width_shift %d in misaligned_fpu_load, PC=%08lx\n", | |
650 | width_shift, (unsigned long) regs->pc); | |
651 | break; | |
652 | } | |
653 | return 0; | |
654 | } else { | |
655 | die ("Misaligned FPU load inside kernel", regs, 0); | |
656 | return -1; | |
657 | } | |
658 | ||
659 | ||
660 | } | |
661 | ||
662 | static int misaligned_fpu_store(struct pt_regs *regs, | |
663 | __u32 opcode, | |
664 | int displacement_not_indexed, | |
665 | int width_shift, | |
666 | int do_paired_load) | |
667 | { | |
668 | /* Return -1 for a fault, 0 for OK */ | |
669 | int error; | |
670 | int srcreg; | |
671 | __u64 address; | |
672 | ||
673 | error = generate_and_check_address(regs, opcode, | |
674 | displacement_not_indexed, width_shift, &address); | |
675 | if (error < 0) { | |
676 | return error; | |
677 | } | |
678 | ||
679 | srcreg = (opcode >> 4) & 0x3f; | |
680 | if (user_mode(regs)) { | |
681 | __u64 buffer; | |
682 | /* Initialise these to NaNs. */ | |
683 | __u32 buflo=0xffffffffUL, bufhi=0xffffffffUL; | |
684 | ||
685 | if (!access_ok(VERIFY_WRITE, (unsigned long) address, 1UL<<width_shift)) { | |
686 | return -1; | |
687 | } | |
688 | ||
689 | /* 'current' may be the current owner of the FPU state, so | |
690 | context switch the registers into memory so they can be | |
691 | indexed by register number. */ | |
692 | if (last_task_used_math == current) { | |
256b22ca | 693 | enable_fpu(); |
332fd57b | 694 | save_fpu(current, regs); |
256b22ca | 695 | disable_fpu(); |
1da177e4 LT |
696 | last_task_used_math = NULL; |
697 | regs->sr |= SR_FD; | |
698 | } | |
699 | ||
700 | switch (width_shift) { | |
701 | case 2: | |
702 | buflo = current->thread.fpu.hard.fp_regs[srcreg]; | |
703 | break; | |
704 | case 3: | |
705 | if (do_paired_load) { | |
706 | buflo = current->thread.fpu.hard.fp_regs[srcreg]; | |
707 | bufhi = current->thread.fpu.hard.fp_regs[srcreg+1]; | |
708 | } else { | |
709 | #if defined(CONFIG_LITTLE_ENDIAN) | |
710 | bufhi = current->thread.fpu.hard.fp_regs[srcreg]; | |
711 | buflo = current->thread.fpu.hard.fp_regs[srcreg+1]; | |
712 | #else | |
713 | buflo = current->thread.fpu.hard.fp_regs[srcreg]; | |
714 | bufhi = current->thread.fpu.hard.fp_regs[srcreg+1]; | |
715 | #endif | |
716 | } | |
717 | break; | |
718 | default: | |
719 | printk("Unexpected width_shift %d in misaligned_fpu_store, PC=%08lx\n", | |
720 | width_shift, (unsigned long) regs->pc); | |
721 | break; | |
722 | } | |
723 | ||
724 | *(__u32*) &buffer = buflo; | |
725 | *(1 + (__u32*) &buffer) = bufhi; | |
726 | if (__copy_user((void *)(int)address, &buffer, (1 << width_shift)) > 0) { | |
727 | return -1; /* fault */ | |
728 | } | |
729 | return 0; | |
730 | } else { | |
731 | die ("Misaligned FPU load inside kernel", regs, 0); | |
732 | return -1; | |
733 | } | |
734 | } | |
735 | #endif | |
736 | ||
737 | static int misaligned_fixup(struct pt_regs *regs) | |
738 | { | |
739 | unsigned long opcode; | |
740 | int error; | |
741 | int major, minor; | |
742 | ||
743 | #if !defined(CONFIG_SH64_USER_MISALIGNED_FIXUP) | |
744 | /* Never fixup user mode misaligned accesses without this option enabled. */ | |
745 | return -1; | |
746 | #else | |
747 | if (!user_mode_unaligned_fixup_enable) return -1; | |
748 | #endif | |
749 | ||
750 | error = read_opcode(regs->pc, &opcode, user_mode(regs)); | |
751 | if (error < 0) { | |
752 | return error; | |
753 | } | |
754 | major = (opcode >> 26) & 0x3f; | |
755 | minor = (opcode >> 16) & 0xf; | |
756 | ||
757 | #if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP) | |
758 | if (user_mode(regs) && (user_mode_unaligned_fixup_count > 0)) { | |
759 | --user_mode_unaligned_fixup_count; | |
760 | /* Only do 'count' worth of these reports, to remove a potential DoS against syslog */ | |
761 | printk("Fixing up unaligned userspace access in \"%s\" pid=%d pc=0x%08x ins=0x%08lx\n", | |
19c5870c | 762 | current->comm, task_pid_nr(current), (__u32)regs->pc, opcode); |
1da177e4 LT |
763 | } else |
764 | #endif | |
765 | if (!user_mode(regs) && (kernel_mode_unaligned_fixup_count > 0)) { | |
766 | --kernel_mode_unaligned_fixup_count; | |
767 | if (in_interrupt()) { | |
768 | printk("Fixing up unaligned kernelspace access in interrupt pc=0x%08x ins=0x%08lx\n", | |
769 | (__u32)regs->pc, opcode); | |
770 | } else { | |
771 | printk("Fixing up unaligned kernelspace access in \"%s\" pid=%d pc=0x%08x ins=0x%08lx\n", | |
19c5870c | 772 | current->comm, task_pid_nr(current), (__u32)regs->pc, opcode); |
1da177e4 LT |
773 | } |
774 | } | |
775 | ||
776 | ||
777 | switch (major) { | |
778 | case (0x84>>2): /* LD.W */ | |
779 | error = misaligned_load(regs, opcode, 1, 1, 1); | |
780 | break; | |
781 | case (0xb0>>2): /* LD.UW */ | |
782 | error = misaligned_load(regs, opcode, 1, 1, 0); | |
783 | break; | |
784 | case (0x88>>2): /* LD.L */ | |
785 | error = misaligned_load(regs, opcode, 1, 2, 1); | |
786 | break; | |
787 | case (0x8c>>2): /* LD.Q */ | |
788 | error = misaligned_load(regs, opcode, 1, 3, 0); | |
789 | break; | |
790 | ||
791 | case (0xa4>>2): /* ST.W */ | |
792 | error = misaligned_store(regs, opcode, 1, 1); | |
793 | break; | |
794 | case (0xa8>>2): /* ST.L */ | |
795 | error = misaligned_store(regs, opcode, 1, 2); | |
796 | break; | |
797 | case (0xac>>2): /* ST.Q */ | |
798 | error = misaligned_store(regs, opcode, 1, 3); | |
799 | break; | |
800 | ||
801 | case (0x40>>2): /* indexed loads */ | |
802 | switch (minor) { | |
803 | case 0x1: /* LDX.W */ | |
804 | error = misaligned_load(regs, opcode, 0, 1, 1); | |
805 | break; | |
806 | case 0x5: /* LDX.UW */ | |
807 | error = misaligned_load(regs, opcode, 0, 1, 0); | |
808 | break; | |
809 | case 0x2: /* LDX.L */ | |
810 | error = misaligned_load(regs, opcode, 0, 2, 1); | |
811 | break; | |
812 | case 0x3: /* LDX.Q */ | |
813 | error = misaligned_load(regs, opcode, 0, 3, 0); | |
814 | break; | |
815 | default: | |
816 | error = -1; | |
817 | break; | |
818 | } | |
819 | break; | |
820 | ||
821 | case (0x60>>2): /* indexed stores */ | |
822 | switch (minor) { | |
823 | case 0x1: /* STX.W */ | |
824 | error = misaligned_store(regs, opcode, 0, 1); | |
825 | break; | |
826 | case 0x2: /* STX.L */ | |
827 | error = misaligned_store(regs, opcode, 0, 2); | |
828 | break; | |
829 | case 0x3: /* STX.Q */ | |
830 | error = misaligned_store(regs, opcode, 0, 3); | |
831 | break; | |
832 | default: | |
833 | error = -1; | |
834 | break; | |
835 | } | |
836 | break; | |
837 | ||
838 | #if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP) | |
839 | case (0x94>>2): /* FLD.S */ | |
840 | error = misaligned_fpu_load(regs, opcode, 1, 2, 0); | |
841 | break; | |
842 | case (0x98>>2): /* FLD.P */ | |
843 | error = misaligned_fpu_load(regs, opcode, 1, 3, 1); | |
844 | break; | |
845 | case (0x9c>>2): /* FLD.D */ | |
846 | error = misaligned_fpu_load(regs, opcode, 1, 3, 0); | |
847 | break; | |
848 | case (0x1c>>2): /* floating indexed loads */ | |
849 | switch (minor) { | |
850 | case 0x8: /* FLDX.S */ | |
851 | error = misaligned_fpu_load(regs, opcode, 0, 2, 0); | |
852 | break; | |
853 | case 0xd: /* FLDX.P */ | |
854 | error = misaligned_fpu_load(regs, opcode, 0, 3, 1); | |
855 | break; | |
856 | case 0x9: /* FLDX.D */ | |
857 | error = misaligned_fpu_load(regs, opcode, 0, 3, 0); | |
858 | break; | |
859 | default: | |
860 | error = -1; | |
861 | break; | |
862 | } | |
863 | break; | |
864 | case (0xb4>>2): /* FLD.S */ | |
865 | error = misaligned_fpu_store(regs, opcode, 1, 2, 0); | |
866 | break; | |
867 | case (0xb8>>2): /* FLD.P */ | |
868 | error = misaligned_fpu_store(regs, opcode, 1, 3, 1); | |
869 | break; | |
870 | case (0xbc>>2): /* FLD.D */ | |
871 | error = misaligned_fpu_store(regs, opcode, 1, 3, 0); | |
872 | break; | |
873 | case (0x3c>>2): /* floating indexed stores */ | |
874 | switch (minor) { | |
875 | case 0x8: /* FSTX.S */ | |
876 | error = misaligned_fpu_store(regs, opcode, 0, 2, 0); | |
877 | break; | |
878 | case 0xd: /* FSTX.P */ | |
879 | error = misaligned_fpu_store(regs, opcode, 0, 3, 1); | |
880 | break; | |
881 | case 0x9: /* FSTX.D */ | |
882 | error = misaligned_fpu_store(regs, opcode, 0, 3, 0); | |
883 | break; | |
884 | default: | |
885 | error = -1; | |
886 | break; | |
887 | } | |
888 | break; | |
889 | #endif | |
890 | ||
891 | default: | |
892 | /* Fault */ | |
893 | error = -1; | |
894 | break; | |
895 | } | |
896 | ||
897 | if (error < 0) { | |
898 | return error; | |
899 | } else { | |
900 | regs->pc += 4; /* Skip the instruction that's just been emulated */ | |
901 | return 0; | |
902 | } | |
903 | ||
904 | } | |
905 | ||
906 | static ctl_table unaligned_table[] = { | |
e3c6449d EB |
907 | { |
908 | .ctl_name = CTL_UNNUMBERED, | |
909 | .procname = "kernel_reports", | |
910 | .data = &kernel_mode_unaligned_fixup_count, | |
911 | .maxlen = sizeof(int), | |
912 | .mode = 0644, | |
913 | .proc_handler = &proc_dointvec | |
914 | }, | |
1da177e4 | 915 | #if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP) |
e3c6449d EB |
916 | { |
917 | .ctl_name = CTL_UNNUMBERED, | |
918 | .procname = "user_reports", | |
919 | .data = &user_mode_unaligned_fixup_count, | |
920 | .maxlen = sizeof(int), | |
921 | .mode = 0644, | |
922 | .proc_handler = &proc_dointvec | |
923 | }, | |
924 | { | |
925 | .ctl_name = CTL_UNNUMBERED, | |
926 | .procname = "user_enable", | |
927 | .data = &user_mode_unaligned_fixup_enable, | |
928 | .maxlen = sizeof(int), | |
929 | .mode = 0644, | |
930 | .proc_handler = &proc_dointvec}, | |
1da177e4 | 931 | #endif |
e3c6449d | 932 | {} |
1da177e4 LT |
933 | }; |
934 | ||
935 | static ctl_table unaligned_root[] = { | |
e3c6449d EB |
936 | { |
937 | .ctl_name = CTL_UNNUMBERED, | |
938 | .procname = "unaligned_fixup", | |
939 | .mode = 0555, | |
940 | unaligned_table | |
941 | }, | |
942 | {} | |
1da177e4 LT |
943 | }; |
944 | ||
945 | static ctl_table sh64_root[] = { | |
e3c6449d EB |
946 | { |
947 | .ctl_name = CTL_UNNUMBERED, | |
948 | .procname = "sh64", | |
949 | .mode = 0555, | |
950 | .child = unaligned_root | |
951 | }, | |
952 | {} | |
1da177e4 LT |
953 | }; |
954 | static struct ctl_table_header *sysctl_header; | |
955 | static int __init init_sysctl(void) | |
956 | { | |
0b4d4147 | 957 | sysctl_header = register_sysctl_table(sh64_root); |
1da177e4 LT |
958 | return 0; |
959 | } | |
960 | ||
961 | __initcall(init_sysctl); | |
962 | ||
963 | ||
964 | asmlinkage void do_debug_interrupt(unsigned long code, struct pt_regs *regs) | |
965 | { | |
966 | u64 peek_real_address_q(u64 addr); | |
967 | u64 poke_real_address_q(u64 addr, u64 val); | |
968 | unsigned long long DM_EXP_CAUSE_PHY = 0x0c100010; | |
969 | unsigned long long exp_cause; | |
970 | /* It's not worth ioremapping the debug module registers for the amount | |
971 | of access we make to them - just go direct to their physical | |
972 | addresses. */ | |
973 | exp_cause = peek_real_address_q(DM_EXP_CAUSE_PHY); | |
974 | if (exp_cause & ~4) { | |
975 | printk("DM.EXP_CAUSE had unexpected bits set (=%08lx)\n", | |
976 | (unsigned long)(exp_cause & 0xffffffff)); | |
977 | } | |
978 | show_state(); | |
979 | /* Clear all DEBUGINT causes */ | |
980 | poke_real_address_q(DM_EXP_CAUSE_PHY, 0x0); | |
981 | } |