]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - arch/mips/kernel/unaligned.c
MIPS: Always clear FCSR cause bits after emulation
[mirror_ubuntu-hirsute-kernel.git] / arch / mips / kernel / unaligned.c
CommitLineData
1da177e4
LT
1/*
2 * Handle unaligned accesses by emulation.
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 1996, 1998, 1999, 2002 by Ralf Baechle
9 * Copyright (C) 1999 Silicon Graphics, Inc.
9d8e5736 10 * Copyright (C) 2014 Imagination Technologies Ltd.
1da177e4
LT
11 *
12 * This file contains exception handler for address error exception with the
13 * special capability to execute faulting instructions in software. The
14 * handler does not try to handle the case when the program counter points
15 * to an address not aligned to a word boundary.
16 *
17 * Putting data to unaligned addresses is a bad practice even on Intel where
18 * only the performance is affected. Much worse is that such code is non-
19 * portable. Due to several programs that die on MIPS due to alignment
20 * problems I decided to implement this handler anyway though I originally
21 * didn't intend to do this at all for user code.
22 *
23 * For now I enable fixing of address errors by default to make life easier.
24 * I however intend to disable this somewhen in the future when the alignment
70342287 25 * problems with user programs have been fixed. For programmers this is the
1da177e4
LT
26 * right way to go.
27 *
28 * Fixing address errors is a per process option. The option is inherited
70342287 29 * across fork(2) and execve(2) calls. If you really want to use the
1da177e4
LT
30 * option in your user programs - I discourage the use of the software
31 * emulation strongly - use the following code in your userland stuff:
32 *
33 * #include <sys/sysmips.h>
34 *
35 * ...
36 * sysmips(MIPS_FIXADE, x);
37 * ...
38 *
39 * The argument x is 0 for disabling software emulation, enabled otherwise.
40 *
41 * Below a little program to play around with this feature.
42 *
43 * #include <stdio.h>
44 * #include <sys/sysmips.h>
45 *
46 * struct foo {
70342287 47 * unsigned char bar[8];
1da177e4
LT
48 * };
49 *
50 * main(int argc, char *argv[])
51 * {
70342287
RB
52 * struct foo x = {0, 1, 2, 3, 4, 5, 6, 7};
53 * unsigned int *p = (unsigned int *) (x.bar + 3);
54 * int i;
1da177e4 55 *
70342287
RB
56 * if (argc > 1)
57 * sysmips(MIPS_FIXADE, atoi(argv[1]));
1da177e4 58 *
70342287 59 * printf("*p = %08lx\n", *p);
1da177e4 60 *
70342287 61 * *p = 0xdeadface;
1da177e4 62 *
70342287
RB
63 * for(i = 0; i <= 7; i++)
64 * printf("%02x ", x.bar[i]);
65 * printf("\n");
1da177e4
LT
66 * }
67 *
68 * Coprocessor loads are not supported; I think this case is unimportant
69 * in the practice.
70 *
71 * TODO: Handle ndc (attempted store to doubleword in uncached memory)
70342287
RB
72 * exception for the R6000.
73 * A store crossing a page boundary might be executed only partially.
74 * Undo the partial store in this case.
1da177e4 75 */
c3fc5cd5 76#include <linux/context_tracking.h>
1da177e4 77#include <linux/mm.h>
1da177e4
LT
78#include <linux/signal.h>
79#include <linux/smp.h>
e8edc6e0 80#include <linux/sched.h>
6312e0ee 81#include <linux/debugfs.h>
7f788d2d
DZ
82#include <linux/perf_event.h>
83
1da177e4
LT
84#include <asm/asm.h>
85#include <asm/branch.h>
86#include <asm/byteorder.h>
69f3a7de 87#include <asm/cop2.h>
102cedc3
LY
88#include <asm/fpu.h>
89#include <asm/fpu_emulator.h>
1da177e4
LT
90#include <asm/inst.h>
91#include <asm/uaccess.h>
34c2f668
LY
92#include <asm/fpu.h>
93#include <asm/fpu_emulator.h>
1da177e4 94
70342287 95#define STR(x) __STR(x)
1da177e4
LT
96#define __STR(x) #x
97
6312e0ee
AN
98enum {
99 UNALIGNED_ACTION_QUIET,
100 UNALIGNED_ACTION_SIGNAL,
101 UNALIGNED_ACTION_SHOW,
102};
103#ifdef CONFIG_DEBUG_FS
104static u32 unaligned_instructions;
105static u32 unaligned_action;
106#else
107#define unaligned_action UNALIGNED_ACTION_QUIET
1da177e4 108#endif
6312e0ee 109extern void show_registers(struct pt_regs *regs);
1da177e4 110
34c2f668
LY
111#ifdef __BIG_ENDIAN
112#define LoadHW(addr, value, res) \
113 __asm__ __volatile__ (".set\tnoat\n" \
9d8e5736
MC
114 "1:\t"user_lb("%0", "0(%2)")"\n" \
115 "2:\t"user_lbu("$1", "1(%2)")"\n\t" \
34c2f668
LY
116 "sll\t%0, 0x8\n\t" \
117 "or\t%0, $1\n\t" \
118 "li\t%1, 0\n" \
119 "3:\t.set\tat\n\t" \
120 ".insn\n\t" \
121 ".section\t.fixup,\"ax\"\n\t" \
122 "4:\tli\t%1, %3\n\t" \
123 "j\t3b\n\t" \
124 ".previous\n\t" \
125 ".section\t__ex_table,\"a\"\n\t" \
126 STR(PTR)"\t1b, 4b\n\t" \
127 STR(PTR)"\t2b, 4b\n\t" \
128 ".previous" \
129 : "=&r" (value), "=r" (res) \
130 : "r" (addr), "i" (-EFAULT));
131
0593a44c 132#ifndef CONFIG_CPU_MIPSR6
34c2f668
LY
133#define LoadW(addr, value, res) \
134 __asm__ __volatile__ ( \
9d8e5736
MC
135 "1:\t"user_lwl("%0", "(%2)")"\n" \
136 "2:\t"user_lwr("%0", "3(%2)")"\n\t" \
34c2f668
LY
137 "li\t%1, 0\n" \
138 "3:\n\t" \
139 ".insn\n\t" \
140 ".section\t.fixup,\"ax\"\n\t" \
141 "4:\tli\t%1, %3\n\t" \
142 "j\t3b\n\t" \
143 ".previous\n\t" \
144 ".section\t__ex_table,\"a\"\n\t" \
145 STR(PTR)"\t1b, 4b\n\t" \
146 STR(PTR)"\t2b, 4b\n\t" \
147 ".previous" \
148 : "=&r" (value), "=r" (res) \
149 : "r" (addr), "i" (-EFAULT));
0593a44c
LY
150#else
151/* MIPSR6 has no lwl instruction */
152#define LoadW(addr, value, res) \
153 __asm__ __volatile__ ( \
154 ".set\tpush\n" \
155 ".set\tnoat\n\t" \
156 "1:"user_lb("%0", "0(%2)")"\n\t" \
157 "2:"user_lbu("$1", "1(%2)")"\n\t" \
158 "sll\t%0, 0x8\n\t" \
159 "or\t%0, $1\n\t" \
160 "3:"user_lbu("$1", "2(%2)")"\n\t" \
161 "sll\t%0, 0x8\n\t" \
162 "or\t%0, $1\n\t" \
163 "4:"user_lbu("$1", "3(%2)")"\n\t" \
164 "sll\t%0, 0x8\n\t" \
165 "or\t%0, $1\n\t" \
166 "li\t%1, 0\n" \
167 ".set\tpop\n" \
168 "10:\n\t" \
169 ".insn\n\t" \
170 ".section\t.fixup,\"ax\"\n\t" \
171 "11:\tli\t%1, %3\n\t" \
172 "j\t10b\n\t" \
173 ".previous\n\t" \
174 ".section\t__ex_table,\"a\"\n\t" \
175 STR(PTR)"\t1b, 11b\n\t" \
176 STR(PTR)"\t2b, 11b\n\t" \
177 STR(PTR)"\t3b, 11b\n\t" \
178 STR(PTR)"\t4b, 11b\n\t" \
179 ".previous" \
180 : "=&r" (value), "=r" (res) \
181 : "r" (addr), "i" (-EFAULT));
182#endif /* CONFIG_CPU_MIPSR6 */
34c2f668
LY
183
184#define LoadHWU(addr, value, res) \
185 __asm__ __volatile__ ( \
186 ".set\tnoat\n" \
9d8e5736
MC
187 "1:\t"user_lbu("%0", "0(%2)")"\n" \
188 "2:\t"user_lbu("$1", "1(%2)")"\n\t" \
34c2f668
LY
189 "sll\t%0, 0x8\n\t" \
190 "or\t%0, $1\n\t" \
191 "li\t%1, 0\n" \
192 "3:\n\t" \
193 ".insn\n\t" \
194 ".set\tat\n\t" \
195 ".section\t.fixup,\"ax\"\n\t" \
196 "4:\tli\t%1, %3\n\t" \
197 "j\t3b\n\t" \
198 ".previous\n\t" \
199 ".section\t__ex_table,\"a\"\n\t" \
200 STR(PTR)"\t1b, 4b\n\t" \
201 STR(PTR)"\t2b, 4b\n\t" \
202 ".previous" \
203 : "=&r" (value), "=r" (res) \
204 : "r" (addr), "i" (-EFAULT));
205
0593a44c 206#ifndef CONFIG_CPU_MIPSR6
34c2f668
LY
207#define LoadWU(addr, value, res) \
208 __asm__ __volatile__ ( \
9d8e5736
MC
209 "1:\t"user_lwl("%0", "(%2)")"\n" \
210 "2:\t"user_lwr("%0", "3(%2)")"\n\t" \
34c2f668
LY
211 "dsll\t%0, %0, 32\n\t" \
212 "dsrl\t%0, %0, 32\n\t" \
213 "li\t%1, 0\n" \
214 "3:\n\t" \
215 ".insn\n\t" \
216 "\t.section\t.fixup,\"ax\"\n\t" \
217 "4:\tli\t%1, %3\n\t" \
218 "j\t3b\n\t" \
219 ".previous\n\t" \
220 ".section\t__ex_table,\"a\"\n\t" \
221 STR(PTR)"\t1b, 4b\n\t" \
222 STR(PTR)"\t2b, 4b\n\t" \
223 ".previous" \
224 : "=&r" (value), "=r" (res) \
225 : "r" (addr), "i" (-EFAULT));
226
227#define LoadDW(addr, value, res) \
228 __asm__ __volatile__ ( \
229 "1:\tldl\t%0, (%2)\n" \
230 "2:\tldr\t%0, 7(%2)\n\t" \
231 "li\t%1, 0\n" \
232 "3:\n\t" \
233 ".insn\n\t" \
234 "\t.section\t.fixup,\"ax\"\n\t" \
235 "4:\tli\t%1, %3\n\t" \
236 "j\t3b\n\t" \
237 ".previous\n\t" \
238 ".section\t__ex_table,\"a\"\n\t" \
239 STR(PTR)"\t1b, 4b\n\t" \
240 STR(PTR)"\t2b, 4b\n\t" \
241 ".previous" \
242 : "=&r" (value), "=r" (res) \
243 : "r" (addr), "i" (-EFAULT));
0593a44c
LY
244#else
245/* MIPSR6 has not lwl and ldl instructions */
246#define LoadWU(addr, value, res) \
247 __asm__ __volatile__ ( \
248 ".set\tpush\n\t" \
249 ".set\tnoat\n\t" \
250 "1:"user_lbu("%0", "0(%2)")"\n\t" \
251 "2:"user_lbu("$1", "1(%2)")"\n\t" \
252 "sll\t%0, 0x8\n\t" \
253 "or\t%0, $1\n\t" \
254 "3:"user_lbu("$1", "2(%2)")"\n\t" \
255 "sll\t%0, 0x8\n\t" \
256 "or\t%0, $1\n\t" \
257 "4:"user_lbu("$1", "3(%2)")"\n\t" \
258 "sll\t%0, 0x8\n\t" \
259 "or\t%0, $1\n\t" \
260 "li\t%1, 0\n" \
261 ".set\tpop\n" \
262 "10:\n\t" \
263 ".insn\n\t" \
264 ".section\t.fixup,\"ax\"\n\t" \
265 "11:\tli\t%1, %3\n\t" \
266 "j\t10b\n\t" \
267 ".previous\n\t" \
268 ".section\t__ex_table,\"a\"\n\t" \
269 STR(PTR)"\t1b, 11b\n\t" \
270 STR(PTR)"\t2b, 11b\n\t" \
271 STR(PTR)"\t3b, 11b\n\t" \
272 STR(PTR)"\t4b, 11b\n\t" \
273 ".previous" \
274 : "=&r" (value), "=r" (res) \
275 : "r" (addr), "i" (-EFAULT));
276
277#define LoadDW(addr, value, res) \
278 __asm__ __volatile__ ( \
279 ".set\tpush\n\t" \
280 ".set\tnoat\n\t" \
281 "1:lb\t%0, 0(%2)\n\t" \
282 "2:lbu\t $1, 1(%2)\n\t" \
283 "dsll\t%0, 0x8\n\t" \
284 "or\t%0, $1\n\t" \
285 "3:lbu\t$1, 2(%2)\n\t" \
286 "dsll\t%0, 0x8\n\t" \
287 "or\t%0, $1\n\t" \
288 "4:lbu\t$1, 3(%2)\n\t" \
289 "dsll\t%0, 0x8\n\t" \
290 "or\t%0, $1\n\t" \
291 "5:lbu\t$1, 4(%2)\n\t" \
292 "dsll\t%0, 0x8\n\t" \
293 "or\t%0, $1\n\t" \
294 "6:lbu\t$1, 5(%2)\n\t" \
295 "dsll\t%0, 0x8\n\t" \
296 "or\t%0, $1\n\t" \
297 "7:lbu\t$1, 6(%2)\n\t" \
298 "dsll\t%0, 0x8\n\t" \
299 "or\t%0, $1\n\t" \
300 "8:lbu\t$1, 7(%2)\n\t" \
301 "dsll\t%0, 0x8\n\t" \
302 "or\t%0, $1\n\t" \
303 "li\t%1, 0\n" \
304 ".set\tpop\n\t" \
305 "10:\n\t" \
306 ".insn\n\t" \
307 ".section\t.fixup,\"ax\"\n\t" \
308 "11:\tli\t%1, %3\n\t" \
309 "j\t10b\n\t" \
310 ".previous\n\t" \
311 ".section\t__ex_table,\"a\"\n\t" \
312 STR(PTR)"\t1b, 11b\n\t" \
313 STR(PTR)"\t2b, 11b\n\t" \
314 STR(PTR)"\t3b, 11b\n\t" \
315 STR(PTR)"\t4b, 11b\n\t" \
316 STR(PTR)"\t5b, 11b\n\t" \
317 STR(PTR)"\t6b, 11b\n\t" \
318 STR(PTR)"\t7b, 11b\n\t" \
319 STR(PTR)"\t8b, 11b\n\t" \
320 ".previous" \
321 : "=&r" (value), "=r" (res) \
322 : "r" (addr), "i" (-EFAULT));
323#endif /* CONFIG_CPU_MIPSR6 */
324
34c2f668
LY
325
326#define StoreHW(addr, value, res) \
327 __asm__ __volatile__ ( \
328 ".set\tnoat\n" \
9d8e5736 329 "1:\t"user_sb("%1", "1(%2)")"\n" \
34c2f668 330 "srl\t$1, %1, 0x8\n" \
9d8e5736 331 "2:\t"user_sb("$1", "0(%2)")"\n" \
34c2f668
LY
332 ".set\tat\n\t" \
333 "li\t%0, 0\n" \
334 "3:\n\t" \
335 ".insn\n\t" \
336 ".section\t.fixup,\"ax\"\n\t" \
337 "4:\tli\t%0, %3\n\t" \
338 "j\t3b\n\t" \
339 ".previous\n\t" \
340 ".section\t__ex_table,\"a\"\n\t" \
341 STR(PTR)"\t1b, 4b\n\t" \
342 STR(PTR)"\t2b, 4b\n\t" \
343 ".previous" \
344 : "=r" (res) \
345 : "r" (value), "r" (addr), "i" (-EFAULT));
346
0593a44c 347#ifndef CONFIG_CPU_MIPSR6
34c2f668
LY
348#define StoreW(addr, value, res) \
349 __asm__ __volatile__ ( \
9d8e5736
MC
350 "1:\t"user_swl("%1", "(%2)")"\n" \
351 "2:\t"user_swr("%1", "3(%2)")"\n\t" \
34c2f668
LY
352 "li\t%0, 0\n" \
353 "3:\n\t" \
354 ".insn\n\t" \
355 ".section\t.fixup,\"ax\"\n\t" \
356 "4:\tli\t%0, %3\n\t" \
357 "j\t3b\n\t" \
358 ".previous\n\t" \
359 ".section\t__ex_table,\"a\"\n\t" \
360 STR(PTR)"\t1b, 4b\n\t" \
361 STR(PTR)"\t2b, 4b\n\t" \
362 ".previous" \
363 : "=r" (res) \
364 : "r" (value), "r" (addr), "i" (-EFAULT));
365
366#define StoreDW(addr, value, res) \
367 __asm__ __volatile__ ( \
368 "1:\tsdl\t%1,(%2)\n" \
369 "2:\tsdr\t%1, 7(%2)\n\t" \
370 "li\t%0, 0\n" \
371 "3:\n\t" \
372 ".insn\n\t" \
373 ".section\t.fixup,\"ax\"\n\t" \
374 "4:\tli\t%0, %3\n\t" \
375 "j\t3b\n\t" \
376 ".previous\n\t" \
377 ".section\t__ex_table,\"a\"\n\t" \
378 STR(PTR)"\t1b, 4b\n\t" \
379 STR(PTR)"\t2b, 4b\n\t" \
380 ".previous" \
381 : "=r" (res) \
382 : "r" (value), "r" (addr), "i" (-EFAULT));
0593a44c
LY
383#else
384/* MIPSR6 has no swl and sdl instructions */
385#define StoreW(addr, value, res) \
386 __asm__ __volatile__ ( \
387 ".set\tpush\n\t" \
388 ".set\tnoat\n\t" \
389 "1:"user_sb("%1", "3(%2)")"\n\t" \
390 "srl\t$1, %1, 0x8\n\t" \
391 "2:"user_sb("$1", "2(%2)")"\n\t" \
392 "srl\t$1, $1, 0x8\n\t" \
393 "3:"user_sb("$1", "1(%2)")"\n\t" \
394 "srl\t$1, $1, 0x8\n\t" \
395 "4:"user_sb("$1", "0(%2)")"\n\t" \
396 ".set\tpop\n\t" \
397 "li\t%0, 0\n" \
398 "10:\n\t" \
399 ".insn\n\t" \
400 ".section\t.fixup,\"ax\"\n\t" \
401 "11:\tli\t%0, %3\n\t" \
402 "j\t10b\n\t" \
403 ".previous\n\t" \
404 ".section\t__ex_table,\"a\"\n\t" \
405 STR(PTR)"\t1b, 11b\n\t" \
406 STR(PTR)"\t2b, 11b\n\t" \
407 STR(PTR)"\t3b, 11b\n\t" \
408 STR(PTR)"\t4b, 11b\n\t" \
409 ".previous" \
410 : "=&r" (res) \
411 : "r" (value), "r" (addr), "i" (-EFAULT) \
412 : "memory");
413
414#define StoreDW(addr, value, res) \
415 __asm__ __volatile__ ( \
416 ".set\tpush\n\t" \
417 ".set\tnoat\n\t" \
418 "1:sb\t%1, 7(%2)\n\t" \
419 "dsrl\t$1, %1, 0x8\n\t" \
420 "2:sb\t$1, 6(%2)\n\t" \
421 "dsrl\t$1, $1, 0x8\n\t" \
422 "3:sb\t$1, 5(%2)\n\t" \
423 "dsrl\t$1, $1, 0x8\n\t" \
424 "4:sb\t$1, 4(%2)\n\t" \
425 "dsrl\t$1, $1, 0x8\n\t" \
426 "5:sb\t$1, 3(%2)\n\t" \
427 "dsrl\t$1, $1, 0x8\n\t" \
428 "6:sb\t$1, 2(%2)\n\t" \
429 "dsrl\t$1, $1, 0x8\n\t" \
430 "7:sb\t$1, 1(%2)\n\t" \
431 "dsrl\t$1, $1, 0x8\n\t" \
432 "8:sb\t$1, 0(%2)\n\t" \
433 "dsrl\t$1, $1, 0x8\n\t" \
434 ".set\tpop\n\t" \
435 "li\t%0, 0\n" \
436 "10:\n\t" \
437 ".insn\n\t" \
438 ".section\t.fixup,\"ax\"\n\t" \
439 "11:\tli\t%0, %3\n\t" \
440 "j\t10b\n\t" \
441 ".previous\n\t" \
442 ".section\t__ex_table,\"a\"\n\t" \
443 STR(PTR)"\t1b, 11b\n\t" \
444 STR(PTR)"\t2b, 11b\n\t" \
445 STR(PTR)"\t3b, 11b\n\t" \
446 STR(PTR)"\t4b, 11b\n\t" \
447 STR(PTR)"\t5b, 11b\n\t" \
448 STR(PTR)"\t6b, 11b\n\t" \
449 STR(PTR)"\t7b, 11b\n\t" \
450 STR(PTR)"\t8b, 11b\n\t" \
451 ".previous" \
452 : "=&r" (res) \
453 : "r" (value), "r" (addr), "i" (-EFAULT) \
454 : "memory");
455#endif /* CONFIG_CPU_MIPSR6 */
456
457#else /* __BIG_ENDIAN */
34c2f668 458
34c2f668
LY
459#define LoadHW(addr, value, res) \
460 __asm__ __volatile__ (".set\tnoat\n" \
9d8e5736
MC
461 "1:\t"user_lb("%0", "1(%2)")"\n" \
462 "2:\t"user_lbu("$1", "0(%2)")"\n\t" \
34c2f668
LY
463 "sll\t%0, 0x8\n\t" \
464 "or\t%0, $1\n\t" \
465 "li\t%1, 0\n" \
466 "3:\t.set\tat\n\t" \
467 ".insn\n\t" \
468 ".section\t.fixup,\"ax\"\n\t" \
469 "4:\tli\t%1, %3\n\t" \
470 "j\t3b\n\t" \
471 ".previous\n\t" \
472 ".section\t__ex_table,\"a\"\n\t" \
473 STR(PTR)"\t1b, 4b\n\t" \
474 STR(PTR)"\t2b, 4b\n\t" \
475 ".previous" \
476 : "=&r" (value), "=r" (res) \
477 : "r" (addr), "i" (-EFAULT));
478
0593a44c 479#ifndef CONFIG_CPU_MIPSR6
34c2f668
LY
480#define LoadW(addr, value, res) \
481 __asm__ __volatile__ ( \
9d8e5736
MC
482 "1:\t"user_lwl("%0", "3(%2)")"\n" \
483 "2:\t"user_lwr("%0", "(%2)")"\n\t" \
34c2f668
LY
484 "li\t%1, 0\n" \
485 "3:\n\t" \
486 ".insn\n\t" \
487 ".section\t.fixup,\"ax\"\n\t" \
488 "4:\tli\t%1, %3\n\t" \
489 "j\t3b\n\t" \
490 ".previous\n\t" \
491 ".section\t__ex_table,\"a\"\n\t" \
492 STR(PTR)"\t1b, 4b\n\t" \
493 STR(PTR)"\t2b, 4b\n\t" \
494 ".previous" \
495 : "=&r" (value), "=r" (res) \
496 : "r" (addr), "i" (-EFAULT));
0593a44c
LY
497#else
498/* MIPSR6 has no lwl instruction */
499#define LoadW(addr, value, res) \
500 __asm__ __volatile__ ( \
501 ".set\tpush\n" \
502 ".set\tnoat\n\t" \
503 "1:"user_lb("%0", "3(%2)")"\n\t" \
504 "2:"user_lbu("$1", "2(%2)")"\n\t" \
505 "sll\t%0, 0x8\n\t" \
506 "or\t%0, $1\n\t" \
507 "3:"user_lbu("$1", "1(%2)")"\n\t" \
508 "sll\t%0, 0x8\n\t" \
509 "or\t%0, $1\n\t" \
510 "4:"user_lbu("$1", "0(%2)")"\n\t" \
511 "sll\t%0, 0x8\n\t" \
512 "or\t%0, $1\n\t" \
513 "li\t%1, 0\n" \
514 ".set\tpop\n" \
515 "10:\n\t" \
516 ".insn\n\t" \
517 ".section\t.fixup,\"ax\"\n\t" \
518 "11:\tli\t%1, %3\n\t" \
519 "j\t10b\n\t" \
520 ".previous\n\t" \
521 ".section\t__ex_table,\"a\"\n\t" \
522 STR(PTR)"\t1b, 11b\n\t" \
523 STR(PTR)"\t2b, 11b\n\t" \
524 STR(PTR)"\t3b, 11b\n\t" \
525 STR(PTR)"\t4b, 11b\n\t" \
526 ".previous" \
527 : "=&r" (value), "=r" (res) \
528 : "r" (addr), "i" (-EFAULT));
529#endif /* CONFIG_CPU_MIPSR6 */
530
34c2f668
LY
531
532#define LoadHWU(addr, value, res) \
533 __asm__ __volatile__ ( \
534 ".set\tnoat\n" \
9d8e5736
MC
535 "1:\t"user_lbu("%0", "1(%2)")"\n" \
536 "2:\t"user_lbu("$1", "0(%2)")"\n\t" \
34c2f668
LY
537 "sll\t%0, 0x8\n\t" \
538 "or\t%0, $1\n\t" \
539 "li\t%1, 0\n" \
540 "3:\n\t" \
541 ".insn\n\t" \
542 ".set\tat\n\t" \
543 ".section\t.fixup,\"ax\"\n\t" \
544 "4:\tli\t%1, %3\n\t" \
545 "j\t3b\n\t" \
546 ".previous\n\t" \
547 ".section\t__ex_table,\"a\"\n\t" \
548 STR(PTR)"\t1b, 4b\n\t" \
549 STR(PTR)"\t2b, 4b\n\t" \
550 ".previous" \
551 : "=&r" (value), "=r" (res) \
552 : "r" (addr), "i" (-EFAULT));
553
0593a44c 554#ifndef CONFIG_CPU_MIPSR6
34c2f668
LY
555#define LoadWU(addr, value, res) \
556 __asm__ __volatile__ ( \
9d8e5736
MC
557 "1:\t"user_lwl("%0", "3(%2)")"\n" \
558 "2:\t"user_lwr("%0", "(%2)")"\n\t" \
34c2f668
LY
559 "dsll\t%0, %0, 32\n\t" \
560 "dsrl\t%0, %0, 32\n\t" \
561 "li\t%1, 0\n" \
562 "3:\n\t" \
563 ".insn\n\t" \
564 "\t.section\t.fixup,\"ax\"\n\t" \
565 "4:\tli\t%1, %3\n\t" \
566 "j\t3b\n\t" \
567 ".previous\n\t" \
568 ".section\t__ex_table,\"a\"\n\t" \
569 STR(PTR)"\t1b, 4b\n\t" \
570 STR(PTR)"\t2b, 4b\n\t" \
571 ".previous" \
572 : "=&r" (value), "=r" (res) \
573 : "r" (addr), "i" (-EFAULT));
574
575#define LoadDW(addr, value, res) \
576 __asm__ __volatile__ ( \
577 "1:\tldl\t%0, 7(%2)\n" \
578 "2:\tldr\t%0, (%2)\n\t" \
579 "li\t%1, 0\n" \
580 "3:\n\t" \
581 ".insn\n\t" \
582 "\t.section\t.fixup,\"ax\"\n\t" \
583 "4:\tli\t%1, %3\n\t" \
584 "j\t3b\n\t" \
585 ".previous\n\t" \
586 ".section\t__ex_table,\"a\"\n\t" \
587 STR(PTR)"\t1b, 4b\n\t" \
588 STR(PTR)"\t2b, 4b\n\t" \
589 ".previous" \
590 : "=&r" (value), "=r" (res) \
591 : "r" (addr), "i" (-EFAULT));
0593a44c
LY
592#else
593/* MIPSR6 has not lwl and ldl instructions */
594#define LoadWU(addr, value, res) \
595 __asm__ __volatile__ ( \
596 ".set\tpush\n\t" \
597 ".set\tnoat\n\t" \
598 "1:"user_lbu("%0", "3(%2)")"\n\t" \
599 "2:"user_lbu("$1", "2(%2)")"\n\t" \
600 "sll\t%0, 0x8\n\t" \
601 "or\t%0, $1\n\t" \
602 "3:"user_lbu("$1", "1(%2)")"\n\t" \
603 "sll\t%0, 0x8\n\t" \
604 "or\t%0, $1\n\t" \
605 "4:"user_lbu("$1", "0(%2)")"\n\t" \
606 "sll\t%0, 0x8\n\t" \
607 "or\t%0, $1\n\t" \
608 "li\t%1, 0\n" \
609 ".set\tpop\n" \
610 "10:\n\t" \
611 ".insn\n\t" \
612 ".section\t.fixup,\"ax\"\n\t" \
613 "11:\tli\t%1, %3\n\t" \
614 "j\t10b\n\t" \
615 ".previous\n\t" \
616 ".section\t__ex_table,\"a\"\n\t" \
617 STR(PTR)"\t1b, 11b\n\t" \
618 STR(PTR)"\t2b, 11b\n\t" \
619 STR(PTR)"\t3b, 11b\n\t" \
620 STR(PTR)"\t4b, 11b\n\t" \
621 ".previous" \
622 : "=&r" (value), "=r" (res) \
623 : "r" (addr), "i" (-EFAULT));
624
625#define LoadDW(addr, value, res) \
626 __asm__ __volatile__ ( \
627 ".set\tpush\n\t" \
628 ".set\tnoat\n\t" \
629 "1:lb\t%0, 7(%2)\n\t" \
630 "2:lbu\t$1, 6(%2)\n\t" \
631 "dsll\t%0, 0x8\n\t" \
632 "or\t%0, $1\n\t" \
633 "3:lbu\t$1, 5(%2)\n\t" \
634 "dsll\t%0, 0x8\n\t" \
635 "or\t%0, $1\n\t" \
636 "4:lbu\t$1, 4(%2)\n\t" \
637 "dsll\t%0, 0x8\n\t" \
638 "or\t%0, $1\n\t" \
639 "5:lbu\t$1, 3(%2)\n\t" \
640 "dsll\t%0, 0x8\n\t" \
641 "or\t%0, $1\n\t" \
642 "6:lbu\t$1, 2(%2)\n\t" \
643 "dsll\t%0, 0x8\n\t" \
644 "or\t%0, $1\n\t" \
645 "7:lbu\t$1, 1(%2)\n\t" \
646 "dsll\t%0, 0x8\n\t" \
647 "or\t%0, $1\n\t" \
648 "8:lbu\t$1, 0(%2)\n\t" \
649 "dsll\t%0, 0x8\n\t" \
650 "or\t%0, $1\n\t" \
651 "li\t%1, 0\n" \
652 ".set\tpop\n\t" \
653 "10:\n\t" \
654 ".insn\n\t" \
655 ".section\t.fixup,\"ax\"\n\t" \
656 "11:\tli\t%1, %3\n\t" \
657 "j\t10b\n\t" \
658 ".previous\n\t" \
659 ".section\t__ex_table,\"a\"\n\t" \
660 STR(PTR)"\t1b, 11b\n\t" \
661 STR(PTR)"\t2b, 11b\n\t" \
662 STR(PTR)"\t3b, 11b\n\t" \
663 STR(PTR)"\t4b, 11b\n\t" \
664 STR(PTR)"\t5b, 11b\n\t" \
665 STR(PTR)"\t6b, 11b\n\t" \
666 STR(PTR)"\t7b, 11b\n\t" \
667 STR(PTR)"\t8b, 11b\n\t" \
668 ".previous" \
669 : "=&r" (value), "=r" (res) \
670 : "r" (addr), "i" (-EFAULT));
671#endif /* CONFIG_CPU_MIPSR6 */
34c2f668
LY
672
673#define StoreHW(addr, value, res) \
674 __asm__ __volatile__ ( \
675 ".set\tnoat\n" \
9d8e5736 676 "1:\t"user_sb("%1", "0(%2)")"\n" \
34c2f668 677 "srl\t$1,%1, 0x8\n" \
9d8e5736 678 "2:\t"user_sb("$1", "1(%2)")"\n" \
34c2f668
LY
679 ".set\tat\n\t" \
680 "li\t%0, 0\n" \
681 "3:\n\t" \
682 ".insn\n\t" \
683 ".section\t.fixup,\"ax\"\n\t" \
684 "4:\tli\t%0, %3\n\t" \
685 "j\t3b\n\t" \
686 ".previous\n\t" \
687 ".section\t__ex_table,\"a\"\n\t" \
688 STR(PTR)"\t1b, 4b\n\t" \
689 STR(PTR)"\t2b, 4b\n\t" \
690 ".previous" \
691 : "=r" (res) \
692 : "r" (value), "r" (addr), "i" (-EFAULT));
0593a44c 693#ifndef CONFIG_CPU_MIPSR6
34c2f668
LY
694#define StoreW(addr, value, res) \
695 __asm__ __volatile__ ( \
9d8e5736
MC
696 "1:\t"user_swl("%1", "3(%2)")"\n" \
697 "2:\t"user_swr("%1", "(%2)")"\n\t" \
34c2f668
LY
698 "li\t%0, 0\n" \
699 "3:\n\t" \
700 ".insn\n\t" \
701 ".section\t.fixup,\"ax\"\n\t" \
702 "4:\tli\t%0, %3\n\t" \
703 "j\t3b\n\t" \
704 ".previous\n\t" \
705 ".section\t__ex_table,\"a\"\n\t" \
706 STR(PTR)"\t1b, 4b\n\t" \
707 STR(PTR)"\t2b, 4b\n\t" \
708 ".previous" \
709 : "=r" (res) \
710 : "r" (value), "r" (addr), "i" (-EFAULT));
711
712#define StoreDW(addr, value, res) \
713 __asm__ __volatile__ ( \
714 "1:\tsdl\t%1, 7(%2)\n" \
715 "2:\tsdr\t%1, (%2)\n\t" \
716 "li\t%0, 0\n" \
717 "3:\n\t" \
718 ".insn\n\t" \
719 ".section\t.fixup,\"ax\"\n\t" \
720 "4:\tli\t%0, %3\n\t" \
721 "j\t3b\n\t" \
722 ".previous\n\t" \
723 ".section\t__ex_table,\"a\"\n\t" \
724 STR(PTR)"\t1b, 4b\n\t" \
725 STR(PTR)"\t2b, 4b\n\t" \
726 ".previous" \
727 : "=r" (res) \
728 : "r" (value), "r" (addr), "i" (-EFAULT));
0593a44c
LY
729#else
730/* MIPSR6 has no swl and sdl instructions */
731#define StoreW(addr, value, res) \
732 __asm__ __volatile__ ( \
733 ".set\tpush\n\t" \
734 ".set\tnoat\n\t" \
735 "1:"user_sb("%1", "0(%2)")"\n\t" \
736 "srl\t$1, %1, 0x8\n\t" \
737 "2:"user_sb("$1", "1(%2)")"\n\t" \
738 "srl\t$1, $1, 0x8\n\t" \
739 "3:"user_sb("$1", "2(%2)")"\n\t" \
740 "srl\t$1, $1, 0x8\n\t" \
741 "4:"user_sb("$1", "3(%2)")"\n\t" \
742 ".set\tpop\n\t" \
743 "li\t%0, 0\n" \
744 "10:\n\t" \
745 ".insn\n\t" \
746 ".section\t.fixup,\"ax\"\n\t" \
747 "11:\tli\t%0, %3\n\t" \
748 "j\t10b\n\t" \
749 ".previous\n\t" \
750 ".section\t__ex_table,\"a\"\n\t" \
751 STR(PTR)"\t1b, 11b\n\t" \
752 STR(PTR)"\t2b, 11b\n\t" \
753 STR(PTR)"\t3b, 11b\n\t" \
754 STR(PTR)"\t4b, 11b\n\t" \
755 ".previous" \
756 : "=&r" (res) \
757 : "r" (value), "r" (addr), "i" (-EFAULT) \
758 : "memory");
759
760#define StoreDW(addr, value, res) \
761 __asm__ __volatile__ ( \
762 ".set\tpush\n\t" \
763 ".set\tnoat\n\t" \
764 "1:sb\t%1, 0(%2)\n\t" \
765 "dsrl\t$1, %1, 0x8\n\t" \
766 "2:sb\t$1, 1(%2)\n\t" \
767 "dsrl\t$1, $1, 0x8\n\t" \
768 "3:sb\t$1, 2(%2)\n\t" \
769 "dsrl\t$1, $1, 0x8\n\t" \
770 "4:sb\t$1, 3(%2)\n\t" \
771 "dsrl\t$1, $1, 0x8\n\t" \
772 "5:sb\t$1, 4(%2)\n\t" \
773 "dsrl\t$1, $1, 0x8\n\t" \
774 "6:sb\t$1, 5(%2)\n\t" \
775 "dsrl\t$1, $1, 0x8\n\t" \
776 "7:sb\t$1, 6(%2)\n\t" \
777 "dsrl\t$1, $1, 0x8\n\t" \
778 "8:sb\t$1, 7(%2)\n\t" \
779 "dsrl\t$1, $1, 0x8\n\t" \
780 ".set\tpop\n\t" \
781 "li\t%0, 0\n" \
782 "10:\n\t" \
783 ".insn\n\t" \
784 ".section\t.fixup,\"ax\"\n\t" \
785 "11:\tli\t%0, %3\n\t" \
786 "j\t10b\n\t" \
787 ".previous\n\t" \
788 ".section\t__ex_table,\"a\"\n\t" \
789 STR(PTR)"\t1b, 11b\n\t" \
790 STR(PTR)"\t2b, 11b\n\t" \
791 STR(PTR)"\t3b, 11b\n\t" \
792 STR(PTR)"\t4b, 11b\n\t" \
793 STR(PTR)"\t5b, 11b\n\t" \
794 STR(PTR)"\t6b, 11b\n\t" \
795 STR(PTR)"\t7b, 11b\n\t" \
796 STR(PTR)"\t8b, 11b\n\t" \
797 ".previous" \
798 : "=&r" (res) \
799 : "r" (value), "r" (addr), "i" (-EFAULT) \
800 : "memory");
801#endif /* CONFIG_CPU_MIPSR6 */
34c2f668
LY
802#endif
803
7f18f151
RB
804static void emulate_load_store_insn(struct pt_regs *regs,
805 void __user *addr, unsigned int __user *pc)
1da177e4
LT
806{
807 union mips_instruction insn;
808 unsigned long value;
809 unsigned int res;
34c2f668
LY
810 unsigned long origpc;
811 unsigned long orig31;
102cedc3 812 void __user *fault_addr = NULL;
c1771216
LY
813#ifdef CONFIG_EVA
814 mm_segment_t seg;
815#endif
34c2f668
LY
816 origpc = (unsigned long)pc;
817 orig31 = regs->regs[31];
818
a8b0ca17 819 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
7f788d2d 820
1da177e4
LT
821 /*
822 * This load never faults.
823 */
fe00f943 824 __get_user(insn.word, pc);
1da177e4
LT
825
826 switch (insn.i_format.opcode) {
34c2f668
LY
827 /*
828 * These are instructions that a compiler doesn't generate. We
829 * can assume therefore that the code is MIPS-aware and
830 * really buggy. Emulating these instructions would break the
831 * semantics anyway.
832 */
1da177e4
LT
833 case ll_op:
834 case lld_op:
835 case sc_op:
836 case scd_op:
837
34c2f668
LY
838 /*
839 * For these instructions the only way to create an address
840 * error is an attempted access to kernel/supervisor address
841 * space.
842 */
1da177e4
LT
843 case ldl_op:
844 case ldr_op:
845 case lwl_op:
846 case lwr_op:
847 case sdl_op:
848 case sdr_op:
849 case swl_op:
850 case swr_op:
851 case lb_op:
852 case lbu_op:
853 case sb_op:
854 goto sigbus;
855
34c2f668
LY
856 /*
857 * The remaining opcodes are the ones that are really of
858 * interest.
859 */
c1771216
LY
860#ifdef CONFIG_EVA
861 case spec3_op:
862 /*
863 * we can land here only from kernel accessing user memory,
864 * so we need to "switch" the address limit to user space, so
865 * address check can work properly.
866 */
867 seg = get_fs();
868 set_fs(USER_DS);
869 switch (insn.spec3_format.func) {
870 case lhe_op:
871 if (!access_ok(VERIFY_READ, addr, 2)) {
872 set_fs(seg);
873 goto sigbus;
874 }
875 LoadHW(addr, value, res);
876 if (res) {
877 set_fs(seg);
878 goto fault;
879 }
880 compute_return_epc(regs);
881 regs->regs[insn.spec3_format.rt] = value;
882 break;
883 case lwe_op:
884 if (!access_ok(VERIFY_READ, addr, 4)) {
885 set_fs(seg);
886 goto sigbus;
887 }
888 LoadW(addr, value, res);
889 if (res) {
890 set_fs(seg);
891 goto fault;
892 }
893 compute_return_epc(regs);
894 regs->regs[insn.spec3_format.rt] = value;
895 break;
896 case lhue_op:
897 if (!access_ok(VERIFY_READ, addr, 2)) {
898 set_fs(seg);
899 goto sigbus;
900 }
901 LoadHWU(addr, value, res);
902 if (res) {
903 set_fs(seg);
904 goto fault;
905 }
906 compute_return_epc(regs);
907 regs->regs[insn.spec3_format.rt] = value;
908 break;
909 case she_op:
910 if (!access_ok(VERIFY_WRITE, addr, 2)) {
911 set_fs(seg);
912 goto sigbus;
913 }
914 compute_return_epc(regs);
915 value = regs->regs[insn.spec3_format.rt];
916 StoreHW(addr, value, res);
917 if (res) {
918 set_fs(seg);
919 goto fault;
920 }
921 break;
922 case swe_op:
923 if (!access_ok(VERIFY_WRITE, addr, 4)) {
924 set_fs(seg);
925 goto sigbus;
926 }
927 compute_return_epc(regs);
928 value = regs->regs[insn.spec3_format.rt];
929 StoreW(addr, value, res);
930 if (res) {
931 set_fs(seg);
932 goto fault;
933 }
934 break;
935 default:
936 set_fs(seg);
937 goto sigill;
938 }
939 set_fs(seg);
940 break;
941#endif
1da177e4
LT
942 case lh_op:
943 if (!access_ok(VERIFY_READ, addr, 2))
944 goto sigbus;
945
34c2f668 946 LoadHW(addr, value, res);
1da177e4
LT
947 if (res)
948 goto fault;
7f18f151
RB
949 compute_return_epc(regs);
950 regs->regs[insn.i_format.rt] = value;
1da177e4
LT
951 break;
952
953 case lw_op:
954 if (!access_ok(VERIFY_READ, addr, 4))
955 goto sigbus;
956
34c2f668 957 LoadW(addr, value, res);
1da177e4
LT
958 if (res)
959 goto fault;
7f18f151
RB
960 compute_return_epc(regs);
961 regs->regs[insn.i_format.rt] = value;
1da177e4
LT
962 break;
963
964 case lhu_op:
965 if (!access_ok(VERIFY_READ, addr, 2))
966 goto sigbus;
967
34c2f668 968 LoadHWU(addr, value, res);
1da177e4
LT
969 if (res)
970 goto fault;
7f18f151
RB
971 compute_return_epc(regs);
972 regs->regs[insn.i_format.rt] = value;
1da177e4
LT
973 break;
974
975 case lwu_op:
875d43e7 976#ifdef CONFIG_64BIT
1da177e4
LT
977 /*
978 * A 32-bit kernel might be running on a 64-bit processor. But
979 * if we're on a 32-bit processor and an i-cache incoherency
980 * or race makes us see a 64-bit instruction here the sdl/sdr
981 * would blow up, so for now we don't handle unaligned 64-bit
982 * instructions on 32-bit kernels.
983 */
984 if (!access_ok(VERIFY_READ, addr, 4))
985 goto sigbus;
986
34c2f668 987 LoadWU(addr, value, res);
1da177e4
LT
988 if (res)
989 goto fault;
7f18f151
RB
990 compute_return_epc(regs);
991 regs->regs[insn.i_format.rt] = value;
1da177e4 992 break;
875d43e7 993#endif /* CONFIG_64BIT */
1da177e4
LT
994
995 /* Cannot handle 64-bit instructions in 32-bit kernel */
996 goto sigill;
997
998 case ld_op:
875d43e7 999#ifdef CONFIG_64BIT
1da177e4
LT
1000 /*
1001 * A 32-bit kernel might be running on a 64-bit processor. But
1002 * if we're on a 32-bit processor and an i-cache incoherency
1003 * or race makes us see a 64-bit instruction here the sdl/sdr
1004 * would blow up, so for now we don't handle unaligned 64-bit
1005 * instructions on 32-bit kernels.
1006 */
1007 if (!access_ok(VERIFY_READ, addr, 8))
1008 goto sigbus;
1009
34c2f668 1010 LoadDW(addr, value, res);
1da177e4
LT
1011 if (res)
1012 goto fault;
7f18f151
RB
1013 compute_return_epc(regs);
1014 regs->regs[insn.i_format.rt] = value;
1da177e4 1015 break;
875d43e7 1016#endif /* CONFIG_64BIT */
1da177e4
LT
1017
1018 /* Cannot handle 64-bit instructions in 32-bit kernel */
1019 goto sigill;
1020
1021 case sh_op:
1022 if (!access_ok(VERIFY_WRITE, addr, 2))
1023 goto sigbus;
1024
34c2f668 1025 compute_return_epc(regs);
1da177e4 1026 value = regs->regs[insn.i_format.rt];
34c2f668 1027 StoreHW(addr, value, res);
1da177e4
LT
1028 if (res)
1029 goto fault;
1030 break;
1031
1032 case sw_op:
1033 if (!access_ok(VERIFY_WRITE, addr, 4))
1034 goto sigbus;
1035
34c2f668 1036 compute_return_epc(regs);
1da177e4 1037 value = regs->regs[insn.i_format.rt];
34c2f668 1038 StoreW(addr, value, res);
1da177e4
LT
1039 if (res)
1040 goto fault;
1041 break;
1042
1043 case sd_op:
875d43e7 1044#ifdef CONFIG_64BIT
1da177e4
LT
1045 /*
1046 * A 32-bit kernel might be running on a 64-bit processor. But
1047 * if we're on a 32-bit processor and an i-cache incoherency
1048 * or race makes us see a 64-bit instruction here the sdl/sdr
1049 * would blow up, so for now we don't handle unaligned 64-bit
1050 * instructions on 32-bit kernels.
1051 */
1052 if (!access_ok(VERIFY_WRITE, addr, 8))
1053 goto sigbus;
1054
34c2f668 1055 compute_return_epc(regs);
1da177e4 1056 value = regs->regs[insn.i_format.rt];
34c2f668 1057 StoreDW(addr, value, res);
1da177e4
LT
1058 if (res)
1059 goto fault;
1060 break;
875d43e7 1061#endif /* CONFIG_64BIT */
1da177e4
LT
1062
1063 /* Cannot handle 64-bit instructions in 32-bit kernel */
1064 goto sigill;
1065
1066 case lwc1_op:
1067 case ldc1_op:
1068 case swc1_op:
1069 case sdc1_op:
102cedc3
LY
1070 die_if_kernel("Unaligned FP access in kernel code", regs);
1071 BUG_ON(!used_math());
102cedc3
LY
1072
1073 lose_fpu(1); /* Save FPU state for the emulator. */
1074 res = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
1075 &fault_addr);
1076 own_fpu(1); /* Restore FPU state. */
1077
1078 /* Signal if something went wrong. */
1079 process_fpemu_return(res, fault_addr);
1080
1081 if (res == 0)
1082 break;
1083 return;
1da177e4 1084
0593a44c 1085#ifndef CONFIG_CPU_MIPSR6
69f3a7de
RB
1086 /*
1087 * COP2 is available to implementor for application specific use.
1088 * It's up to applications to register a notifier chain and do
1089 * whatever they have to do, including possible sending of signals.
0593a44c
LY
1090 *
1091 * This instruction has been reallocated in Release 6
69f3a7de 1092 */
1da177e4 1093 case lwc2_op:
69f3a7de
RB
1094 cu2_notifier_call_chain(CU2_LWC2_OP, regs);
1095 break;
1096
1da177e4 1097 case ldc2_op:
69f3a7de
RB
1098 cu2_notifier_call_chain(CU2_LDC2_OP, regs);
1099 break;
1100
1da177e4 1101 case swc2_op:
69f3a7de
RB
1102 cu2_notifier_call_chain(CU2_SWC2_OP, regs);
1103 break;
1104
1da177e4 1105 case sdc2_op:
69f3a7de
RB
1106 cu2_notifier_call_chain(CU2_SDC2_OP, regs);
1107 break;
0593a44c 1108#endif
1da177e4
LT
1109 default:
1110 /*
1111 * Pheeee... We encountered an yet unknown instruction or
1112 * cache coherence problem. Die sucker, die ...
1113 */
1114 goto sigill;
1115 }
1116
6312e0ee 1117#ifdef CONFIG_DEBUG_FS
1da177e4
LT
1118 unaligned_instructions++;
1119#endif
1120
7f18f151 1121 return;
1da177e4
LT
1122
1123fault:
34c2f668
LY
1124 /* roll back jump/branch */
1125 regs->cp0_epc = origpc;
1126 regs->regs[31] = orig31;
1127 /* Did we have an exception handler installed? */
1128 if (fixup_exception(regs))
1129 return;
1130
1131 die_if_kernel("Unhandled kernel unaligned access", regs);
1132 force_sig(SIGSEGV, current);
1133
1134 return;
1135
1136sigbus:
1137 die_if_kernel("Unhandled kernel unaligned access", regs);
1138 force_sig(SIGBUS, current);
1139
1140 return;
1141
1142sigill:
1143 die_if_kernel
1144 ("Unhandled kernel unaligned access or invalid instruction", regs);
1145 force_sig(SIGILL, current);
1146}
1147
1148/* Recode table from 16-bit register notation to 32-bit GPR. */
1149const int reg16to32[] = { 16, 17, 2, 3, 4, 5, 6, 7 };
1150
1151/* Recode table from 16-bit STORE register notation to 32-bit GPR. */
1152const int reg16to32st[] = { 0, 17, 2, 3, 4, 5, 6, 7 };
1153
74338805
DD
1154static void emulate_load_store_microMIPS(struct pt_regs *regs,
1155 void __user *addr)
34c2f668
LY
1156{
1157 unsigned long value;
1158 unsigned int res;
1159 int i;
1160 unsigned int reg = 0, rvar;
1161 unsigned long orig31;
1162 u16 __user *pc16;
1163 u16 halfword;
1164 unsigned int word;
1165 unsigned long origpc, contpc;
1166 union mips_instruction insn;
1167 struct mm_decoded_insn mminsn;
1168 void __user *fault_addr = NULL;
1169
1170 origpc = regs->cp0_epc;
1171 orig31 = regs->regs[31];
1172
1173 mminsn.micro_mips_mode = 1;
1174
1175 /*
1176 * This load never faults.
1177 */
1178 pc16 = (unsigned short __user *)msk_isa16_mode(regs->cp0_epc);
1179 __get_user(halfword, pc16);
1180 pc16++;
1181 contpc = regs->cp0_epc + 2;
1182 word = ((unsigned int)halfword << 16);
1183 mminsn.pc_inc = 2;
1184
1185 if (!mm_insn_16bit(halfword)) {
1186 __get_user(halfword, pc16);
1187 pc16++;
1188 contpc = regs->cp0_epc + 4;
1189 mminsn.pc_inc = 4;
1190 word |= halfword;
1191 }
1192 mminsn.insn = word;
1193
1194 if (get_user(halfword, pc16))
1195 goto fault;
1196 mminsn.next_pc_inc = 2;
1197 word = ((unsigned int)halfword << 16);
1198
1199 if (!mm_insn_16bit(halfword)) {
1200 pc16++;
1201 if (get_user(halfword, pc16))
1202 goto fault;
1203 mminsn.next_pc_inc = 4;
1204 word |= halfword;
1205 }
1206 mminsn.next_insn = word;
1207
1208 insn = (union mips_instruction)(mminsn.insn);
1209 if (mm_isBranchInstr(regs, mminsn, &contpc))
1210 insn = (union mips_instruction)(mminsn.next_insn);
1211
1212 /* Parse instruction to find what to do */
1213
1214 switch (insn.mm_i_format.opcode) {
1215
1216 case mm_pool32a_op:
1217 switch (insn.mm_x_format.func) {
1218 case mm_lwxs_op:
1219 reg = insn.mm_x_format.rd;
1220 goto loadW;
1221 }
1222
1223 goto sigbus;
1224
1225 case mm_pool32b_op:
1226 switch (insn.mm_m_format.func) {
1227 case mm_lwp_func:
1228 reg = insn.mm_m_format.rd;
1229 if (reg == 31)
1230 goto sigbus;
1231
1232 if (!access_ok(VERIFY_READ, addr, 8))
1233 goto sigbus;
1234
1235 LoadW(addr, value, res);
1236 if (res)
1237 goto fault;
1238 regs->regs[reg] = value;
1239 addr += 4;
1240 LoadW(addr, value, res);
1241 if (res)
1242 goto fault;
1243 regs->regs[reg + 1] = value;
1244 goto success;
1245
1246 case mm_swp_func:
1247 reg = insn.mm_m_format.rd;
1248 if (reg == 31)
1249 goto sigbus;
1250
1251 if (!access_ok(VERIFY_WRITE, addr, 8))
1252 goto sigbus;
1253
1254 value = regs->regs[reg];
1255 StoreW(addr, value, res);
1256 if (res)
1257 goto fault;
1258 addr += 4;
1259 value = regs->regs[reg + 1];
1260 StoreW(addr, value, res);
1261 if (res)
1262 goto fault;
1263 goto success;
1264
1265 case mm_ldp_func:
1266#ifdef CONFIG_64BIT
1267 reg = insn.mm_m_format.rd;
1268 if (reg == 31)
1269 goto sigbus;
1270
1271 if (!access_ok(VERIFY_READ, addr, 16))
1272 goto sigbus;
1273
1274 LoadDW(addr, value, res);
1275 if (res)
1276 goto fault;
1277 regs->regs[reg] = value;
1278 addr += 8;
1279 LoadDW(addr, value, res);
1280 if (res)
1281 goto fault;
1282 regs->regs[reg + 1] = value;
1283 goto success;
1284#endif /* CONFIG_64BIT */
1285
1286 goto sigill;
1287
1288 case mm_sdp_func:
1289#ifdef CONFIG_64BIT
1290 reg = insn.mm_m_format.rd;
1291 if (reg == 31)
1292 goto sigbus;
1293
1294 if (!access_ok(VERIFY_WRITE, addr, 16))
1295 goto sigbus;
1296
1297 value = regs->regs[reg];
1298 StoreDW(addr, value, res);
1299 if (res)
1300 goto fault;
1301 addr += 8;
1302 value = regs->regs[reg + 1];
1303 StoreDW(addr, value, res);
1304 if (res)
1305 goto fault;
1306 goto success;
1307#endif /* CONFIG_64BIT */
1308
1309 goto sigill;
1310
1311 case mm_lwm32_func:
1312 reg = insn.mm_m_format.rd;
1313 rvar = reg & 0xf;
1314 if ((rvar > 9) || !reg)
1315 goto sigill;
1316 if (reg & 0x10) {
1317 if (!access_ok
1318 (VERIFY_READ, addr, 4 * (rvar + 1)))
1319 goto sigbus;
1320 } else {
1321 if (!access_ok(VERIFY_READ, addr, 4 * rvar))
1322 goto sigbus;
1323 }
1324 if (rvar == 9)
1325 rvar = 8;
1326 for (i = 16; rvar; rvar--, i++) {
1327 LoadW(addr, value, res);
1328 if (res)
1329 goto fault;
1330 addr += 4;
1331 regs->regs[i] = value;
1332 }
1333 if ((reg & 0xf) == 9) {
1334 LoadW(addr, value, res);
1335 if (res)
1336 goto fault;
1337 addr += 4;
1338 regs->regs[30] = value;
1339 }
1340 if (reg & 0x10) {
1341 LoadW(addr, value, res);
1342 if (res)
1343 goto fault;
1344 regs->regs[31] = value;
1345 }
1346 goto success;
1347
1348 case mm_swm32_func:
1349 reg = insn.mm_m_format.rd;
1350 rvar = reg & 0xf;
1351 if ((rvar > 9) || !reg)
1352 goto sigill;
1353 if (reg & 0x10) {
1354 if (!access_ok
1355 (VERIFY_WRITE, addr, 4 * (rvar + 1)))
1356 goto sigbus;
1357 } else {
1358 if (!access_ok(VERIFY_WRITE, addr, 4 * rvar))
1359 goto sigbus;
1360 }
1361 if (rvar == 9)
1362 rvar = 8;
1363 for (i = 16; rvar; rvar--, i++) {
1364 value = regs->regs[i];
1365 StoreW(addr, value, res);
1366 if (res)
1367 goto fault;
1368 addr += 4;
1369 }
1370 if ((reg & 0xf) == 9) {
1371 value = regs->regs[30];
1372 StoreW(addr, value, res);
1373 if (res)
1374 goto fault;
1375 addr += 4;
1376 }
1377 if (reg & 0x10) {
1378 value = regs->regs[31];
1379 StoreW(addr, value, res);
1380 if (res)
1381 goto fault;
1382 }
1383 goto success;
1384
1385 case mm_ldm_func:
1386#ifdef CONFIG_64BIT
1387 reg = insn.mm_m_format.rd;
1388 rvar = reg & 0xf;
1389 if ((rvar > 9) || !reg)
1390 goto sigill;
1391 if (reg & 0x10) {
1392 if (!access_ok
1393 (VERIFY_READ, addr, 8 * (rvar + 1)))
1394 goto sigbus;
1395 } else {
1396 if (!access_ok(VERIFY_READ, addr, 8 * rvar))
1397 goto sigbus;
1398 }
1399 if (rvar == 9)
1400 rvar = 8;
1401
1402 for (i = 16; rvar; rvar--, i++) {
1403 LoadDW(addr, value, res);
1404 if (res)
1405 goto fault;
1406 addr += 4;
1407 regs->regs[i] = value;
1408 }
1409 if ((reg & 0xf) == 9) {
1410 LoadDW(addr, value, res);
1411 if (res)
1412 goto fault;
1413 addr += 8;
1414 regs->regs[30] = value;
1415 }
1416 if (reg & 0x10) {
1417 LoadDW(addr, value, res);
1418 if (res)
1419 goto fault;
1420 regs->regs[31] = value;
1421 }
1422 goto success;
1423#endif /* CONFIG_64BIT */
1424
1425 goto sigill;
1426
1427 case mm_sdm_func:
1428#ifdef CONFIG_64BIT
1429 reg = insn.mm_m_format.rd;
1430 rvar = reg & 0xf;
1431 if ((rvar > 9) || !reg)
1432 goto sigill;
1433 if (reg & 0x10) {
1434 if (!access_ok
1435 (VERIFY_WRITE, addr, 8 * (rvar + 1)))
1436 goto sigbus;
1437 } else {
1438 if (!access_ok(VERIFY_WRITE, addr, 8 * rvar))
1439 goto sigbus;
1440 }
1441 if (rvar == 9)
1442 rvar = 8;
1443
1444 for (i = 16; rvar; rvar--, i++) {
1445 value = regs->regs[i];
1446 StoreDW(addr, value, res);
1447 if (res)
1448 goto fault;
1449 addr += 8;
1450 }
1451 if ((reg & 0xf) == 9) {
1452 value = regs->regs[30];
1453 StoreDW(addr, value, res);
1454 if (res)
1455 goto fault;
1456 addr += 8;
1457 }
1458 if (reg & 0x10) {
1459 value = regs->regs[31];
1460 StoreDW(addr, value, res);
1461 if (res)
1462 goto fault;
1463 }
1464 goto success;
1465#endif /* CONFIG_64BIT */
1466
1467 goto sigill;
1468
1469 /* LWC2, SWC2, LDC2, SDC2 are not serviced */
1470 }
1471
1472 goto sigbus;
1473
1474 case mm_pool32c_op:
1475 switch (insn.mm_m_format.func) {
1476 case mm_lwu_func:
1477 reg = insn.mm_m_format.rd;
1478 goto loadWU;
1479 }
1480
1481 /* LL,SC,LLD,SCD are not serviced */
1482 goto sigbus;
1483
1484 case mm_pool32f_op:
1485 switch (insn.mm_x_format.func) {
1486 case mm_lwxc1_func:
1487 case mm_swxc1_func:
1488 case mm_ldxc1_func:
1489 case mm_sdxc1_func:
1490 goto fpu_emul;
1491 }
1492
1493 goto sigbus;
1494
1495 case mm_ldc132_op:
1496 case mm_sdc132_op:
1497 case mm_lwc132_op:
1498 case mm_swc132_op:
1499fpu_emul:
1500 /* roll back jump/branch */
1501 regs->cp0_epc = origpc;
1502 regs->regs[31] = orig31;
1503
1504 die_if_kernel("Unaligned FP access in kernel code", regs);
1505 BUG_ON(!used_math());
1506 BUG_ON(!is_fpu_owner());
1507
1508 lose_fpu(1); /* save the FPU state for the emulator */
1509 res = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
1510 &fault_addr);
1511 own_fpu(1); /* restore FPU state */
1512
1513 /* If something went wrong, signal */
1514 process_fpemu_return(res, fault_addr);
1515
1516 if (res == 0)
1517 goto success;
1518 return;
1519
1520 case mm_lh32_op:
1521 reg = insn.mm_i_format.rt;
1522 goto loadHW;
1523
1524 case mm_lhu32_op:
1525 reg = insn.mm_i_format.rt;
1526 goto loadHWU;
1527
1528 case mm_lw32_op:
1529 reg = insn.mm_i_format.rt;
1530 goto loadW;
1531
1532 case mm_sh32_op:
1533 reg = insn.mm_i_format.rt;
1534 goto storeHW;
1535
1536 case mm_sw32_op:
1537 reg = insn.mm_i_format.rt;
1538 goto storeW;
1539
1540 case mm_ld32_op:
1541 reg = insn.mm_i_format.rt;
1542 goto loadDW;
1543
1544 case mm_sd32_op:
1545 reg = insn.mm_i_format.rt;
1546 goto storeDW;
1547
1548 case mm_pool16c_op:
1549 switch (insn.mm16_m_format.func) {
1550 case mm_lwm16_op:
1551 reg = insn.mm16_m_format.rlist;
1552 rvar = reg + 1;
1553 if (!access_ok(VERIFY_READ, addr, 4 * rvar))
1554 goto sigbus;
1555
1556 for (i = 16; rvar; rvar--, i++) {
1557 LoadW(addr, value, res);
1558 if (res)
1559 goto fault;
1560 addr += 4;
1561 regs->regs[i] = value;
1562 }
1563 LoadW(addr, value, res);
1564 if (res)
1565 goto fault;
1566 regs->regs[31] = value;
1567
1568 goto success;
1569
1570 case mm_swm16_op:
1571 reg = insn.mm16_m_format.rlist;
1572 rvar = reg + 1;
1573 if (!access_ok(VERIFY_WRITE, addr, 4 * rvar))
1574 goto sigbus;
1575
1576 for (i = 16; rvar; rvar--, i++) {
1577 value = regs->regs[i];
1578 StoreW(addr, value, res);
1579 if (res)
1580 goto fault;
1581 addr += 4;
1582 }
1583 value = regs->regs[31];
1584 StoreW(addr, value, res);
1585 if (res)
1586 goto fault;
1587
1588 goto success;
1589
1590 }
1591
1592 goto sigbus;
1593
1594 case mm_lhu16_op:
1595 reg = reg16to32[insn.mm16_rb_format.rt];
1596 goto loadHWU;
1597
1598 case mm_lw16_op:
1599 reg = reg16to32[insn.mm16_rb_format.rt];
1600 goto loadW;
1601
1602 case mm_sh16_op:
1603 reg = reg16to32st[insn.mm16_rb_format.rt];
1604 goto storeHW;
1605
1606 case mm_sw16_op:
1607 reg = reg16to32st[insn.mm16_rb_format.rt];
1608 goto storeW;
1609
1610 case mm_lwsp16_op:
1611 reg = insn.mm16_r5_format.rt;
1612 goto loadW;
1613
1614 case mm_swsp16_op:
1615 reg = insn.mm16_r5_format.rt;
1616 goto storeW;
1617
1618 case mm_lwgp16_op:
1619 reg = reg16to32[insn.mm16_r3_format.rt];
1620 goto loadW;
1621
1622 default:
1623 goto sigill;
1624 }
1625
1626loadHW:
1627 if (!access_ok(VERIFY_READ, addr, 2))
1628 goto sigbus;
1629
1630 LoadHW(addr, value, res);
1631 if (res)
1632 goto fault;
1633 regs->regs[reg] = value;
1634 goto success;
1635
1636loadHWU:
1637 if (!access_ok(VERIFY_READ, addr, 2))
1638 goto sigbus;
1639
1640 LoadHWU(addr, value, res);
1641 if (res)
1642 goto fault;
1643 regs->regs[reg] = value;
1644 goto success;
1645
1646loadW:
1647 if (!access_ok(VERIFY_READ, addr, 4))
1648 goto sigbus;
1649
1650 LoadW(addr, value, res);
1651 if (res)
1652 goto fault;
1653 regs->regs[reg] = value;
1654 goto success;
1655
1656loadWU:
1657#ifdef CONFIG_64BIT
1658 /*
1659 * A 32-bit kernel might be running on a 64-bit processor. But
1660 * if we're on a 32-bit processor and an i-cache incoherency
1661 * or race makes us see a 64-bit instruction here the sdl/sdr
1662 * would blow up, so for now we don't handle unaligned 64-bit
1663 * instructions on 32-bit kernels.
1664 */
1665 if (!access_ok(VERIFY_READ, addr, 4))
1666 goto sigbus;
1667
1668 LoadWU(addr, value, res);
1669 if (res)
1670 goto fault;
1671 regs->regs[reg] = value;
1672 goto success;
1673#endif /* CONFIG_64BIT */
1674
1675 /* Cannot handle 64-bit instructions in 32-bit kernel */
1676 goto sigill;
1677
1678loadDW:
1679#ifdef CONFIG_64BIT
1680 /*
1681 * A 32-bit kernel might be running on a 64-bit processor. But
1682 * if we're on a 32-bit processor and an i-cache incoherency
1683 * or race makes us see a 64-bit instruction here the sdl/sdr
1684 * would blow up, so for now we don't handle unaligned 64-bit
1685 * instructions on 32-bit kernels.
1686 */
1687 if (!access_ok(VERIFY_READ, addr, 8))
1688 goto sigbus;
1689
1690 LoadDW(addr, value, res);
1691 if (res)
1692 goto fault;
1693 regs->regs[reg] = value;
1694 goto success;
1695#endif /* CONFIG_64BIT */
1696
1697 /* Cannot handle 64-bit instructions in 32-bit kernel */
1698 goto sigill;
1699
1700storeHW:
1701 if (!access_ok(VERIFY_WRITE, addr, 2))
1702 goto sigbus;
1703
1704 value = regs->regs[reg];
1705 StoreHW(addr, value, res);
1706 if (res)
1707 goto fault;
1708 goto success;
1709
1710storeW:
1711 if (!access_ok(VERIFY_WRITE, addr, 4))
1712 goto sigbus;
1713
1714 value = regs->regs[reg];
1715 StoreW(addr, value, res);
1716 if (res)
1717 goto fault;
1718 goto success;
1719
1720storeDW:
1721#ifdef CONFIG_64BIT
1722 /*
1723 * A 32-bit kernel might be running on a 64-bit processor. But
1724 * if we're on a 32-bit processor and an i-cache incoherency
1725 * or race makes us see a 64-bit instruction here the sdl/sdr
1726 * would blow up, so for now we don't handle unaligned 64-bit
1727 * instructions on 32-bit kernels.
1728 */
1729 if (!access_ok(VERIFY_WRITE, addr, 8))
1730 goto sigbus;
1731
1732 value = regs->regs[reg];
1733 StoreDW(addr, value, res);
1734 if (res)
1735 goto fault;
1736 goto success;
1737#endif /* CONFIG_64BIT */
1738
1739 /* Cannot handle 64-bit instructions in 32-bit kernel */
1740 goto sigill;
1741
1742success:
1743 regs->cp0_epc = contpc; /* advance or branch */
1744
1745#ifdef CONFIG_DEBUG_FS
1746 unaligned_instructions++;
1747#endif
1748 return;
1749
1750fault:
1751 /* roll back jump/branch */
1752 regs->cp0_epc = origpc;
1753 regs->regs[31] = orig31;
1da177e4
LT
1754 /* Did we have an exception handler installed? */
1755 if (fixup_exception(regs))
7f18f151 1756 return;
1da177e4 1757
49a89efb 1758 die_if_kernel("Unhandled kernel unaligned access", regs);
a6d5ff04 1759 force_sig(SIGSEGV, current);
1da177e4 1760
7f18f151 1761 return;
1da177e4
LT
1762
1763sigbus:
1764 die_if_kernel("Unhandled kernel unaligned access", regs);
a6d5ff04 1765 force_sig(SIGBUS, current);
1da177e4 1766
7f18f151 1767 return;
1da177e4
LT
1768
1769sigill:
34c2f668
LY
1770 die_if_kernel
1771 ("Unhandled kernel unaligned access or invalid instruction", regs);
a6d5ff04 1772 force_sig(SIGILL, current);
1da177e4
LT
1773}
1774
451b001b
SH
1775static void emulate_load_store_MIPS16e(struct pt_regs *regs, void __user * addr)
1776{
1777 unsigned long value;
1778 unsigned int res;
1779 int reg;
1780 unsigned long orig31;
1781 u16 __user *pc16;
1782 unsigned long origpc;
1783 union mips16e_instruction mips16inst, oldinst;
1784
1785 origpc = regs->cp0_epc;
1786 orig31 = regs->regs[31];
1787 pc16 = (unsigned short __user *)msk_isa16_mode(origpc);
1788 /*
1789 * This load never faults.
1790 */
1791 __get_user(mips16inst.full, pc16);
1792 oldinst = mips16inst;
1793
1794 /* skip EXTEND instruction */
1795 if (mips16inst.ri.opcode == MIPS16e_extend_op) {
1796 pc16++;
1797 __get_user(mips16inst.full, pc16);
1798 } else if (delay_slot(regs)) {
1799 /* skip jump instructions */
1800 /* JAL/JALX are 32 bits but have OPCODE in first short int */
1801 if (mips16inst.ri.opcode == MIPS16e_jal_op)
1802 pc16++;
1803 pc16++;
1804 if (get_user(mips16inst.full, pc16))
1805 goto sigbus;
1806 }
1807
1808 switch (mips16inst.ri.opcode) {
1809 case MIPS16e_i64_op: /* I64 or RI64 instruction */
1810 switch (mips16inst.i64.func) { /* I64/RI64 func field check */
1811 case MIPS16e_ldpc_func:
1812 case MIPS16e_ldsp_func:
1813 reg = reg16to32[mips16inst.ri64.ry];
1814 goto loadDW;
1815
1816 case MIPS16e_sdsp_func:
1817 reg = reg16to32[mips16inst.ri64.ry];
1818 goto writeDW;
1819
1820 case MIPS16e_sdrasp_func:
1821 reg = 29; /* GPRSP */
1822 goto writeDW;
1823 }
1824
1825 goto sigbus;
1826
1827 case MIPS16e_swsp_op:
1828 case MIPS16e_lwpc_op:
1829 case MIPS16e_lwsp_op:
1830 reg = reg16to32[mips16inst.ri.rx];
1831 break;
1832
1833 case MIPS16e_i8_op:
1834 if (mips16inst.i8.func != MIPS16e_swrasp_func)
1835 goto sigbus;
1836 reg = 29; /* GPRSP */
1837 break;
1838
1839 default:
1840 reg = reg16to32[mips16inst.rri.ry];
1841 break;
1842 }
1843
1844 switch (mips16inst.ri.opcode) {
1845
1846 case MIPS16e_lb_op:
1847 case MIPS16e_lbu_op:
1848 case MIPS16e_sb_op:
1849 goto sigbus;
1850
1851 case MIPS16e_lh_op:
1852 if (!access_ok(VERIFY_READ, addr, 2))
1853 goto sigbus;
1854
1855 LoadHW(addr, value, res);
1856 if (res)
1857 goto fault;
1858 MIPS16e_compute_return_epc(regs, &oldinst);
1859 regs->regs[reg] = value;
1860 break;
1861
1862 case MIPS16e_lhu_op:
1863 if (!access_ok(VERIFY_READ, addr, 2))
1864 goto sigbus;
1865
1866 LoadHWU(addr, value, res);
1867 if (res)
1868 goto fault;
1869 MIPS16e_compute_return_epc(regs, &oldinst);
1870 regs->regs[reg] = value;
1871 break;
1872
1873 case MIPS16e_lw_op:
1874 case MIPS16e_lwpc_op:
1875 case MIPS16e_lwsp_op:
1876 if (!access_ok(VERIFY_READ, addr, 4))
1877 goto sigbus;
1878
1879 LoadW(addr, value, res);
1880 if (res)
1881 goto fault;
1882 MIPS16e_compute_return_epc(regs, &oldinst);
1883 regs->regs[reg] = value;
1884 break;
1885
1886 case MIPS16e_lwu_op:
1887#ifdef CONFIG_64BIT
1888 /*
1889 * A 32-bit kernel might be running on a 64-bit processor. But
1890 * if we're on a 32-bit processor and an i-cache incoherency
1891 * or race makes us see a 64-bit instruction here the sdl/sdr
1892 * would blow up, so for now we don't handle unaligned 64-bit
1893 * instructions on 32-bit kernels.
1894 */
1895 if (!access_ok(VERIFY_READ, addr, 4))
1896 goto sigbus;
1897
1898 LoadWU(addr, value, res);
1899 if (res)
1900 goto fault;
1901 MIPS16e_compute_return_epc(regs, &oldinst);
1902 regs->regs[reg] = value;
1903 break;
1904#endif /* CONFIG_64BIT */
1905
1906 /* Cannot handle 64-bit instructions in 32-bit kernel */
1907 goto sigill;
1908
1909 case MIPS16e_ld_op:
1910loadDW:
1911#ifdef CONFIG_64BIT
1912 /*
1913 * A 32-bit kernel might be running on a 64-bit processor. But
1914 * if we're on a 32-bit processor and an i-cache incoherency
1915 * or race makes us see a 64-bit instruction here the sdl/sdr
1916 * would blow up, so for now we don't handle unaligned 64-bit
1917 * instructions on 32-bit kernels.
1918 */
1919 if (!access_ok(VERIFY_READ, addr, 8))
1920 goto sigbus;
1921
1922 LoadDW(addr, value, res);
1923 if (res)
1924 goto fault;
1925 MIPS16e_compute_return_epc(regs, &oldinst);
1926 regs->regs[reg] = value;
1927 break;
1928#endif /* CONFIG_64BIT */
1929
1930 /* Cannot handle 64-bit instructions in 32-bit kernel */
1931 goto sigill;
1932
1933 case MIPS16e_sh_op:
1934 if (!access_ok(VERIFY_WRITE, addr, 2))
1935 goto sigbus;
1936
1937 MIPS16e_compute_return_epc(regs, &oldinst);
1938 value = regs->regs[reg];
1939 StoreHW(addr, value, res);
1940 if (res)
1941 goto fault;
1942 break;
1943
1944 case MIPS16e_sw_op:
1945 case MIPS16e_swsp_op:
1946 case MIPS16e_i8_op: /* actually - MIPS16e_swrasp_func */
1947 if (!access_ok(VERIFY_WRITE, addr, 4))
1948 goto sigbus;
1949
1950 MIPS16e_compute_return_epc(regs, &oldinst);
1951 value = regs->regs[reg];
1952 StoreW(addr, value, res);
1953 if (res)
1954 goto fault;
1955 break;
1956
1957 case MIPS16e_sd_op:
1958writeDW:
1959#ifdef CONFIG_64BIT
1960 /*
1961 * A 32-bit kernel might be running on a 64-bit processor. But
1962 * if we're on a 32-bit processor and an i-cache incoherency
1963 * or race makes us see a 64-bit instruction here the sdl/sdr
1964 * would blow up, so for now we don't handle unaligned 64-bit
1965 * instructions on 32-bit kernels.
1966 */
1967 if (!access_ok(VERIFY_WRITE, addr, 8))
1968 goto sigbus;
1969
1970 MIPS16e_compute_return_epc(regs, &oldinst);
1971 value = regs->regs[reg];
1972 StoreDW(addr, value, res);
1973 if (res)
1974 goto fault;
1975 break;
1976#endif /* CONFIG_64BIT */
1977
1978 /* Cannot handle 64-bit instructions in 32-bit kernel */
1979 goto sigill;
1980
1981 default:
1982 /*
1983 * Pheeee... We encountered an yet unknown instruction or
1984 * cache coherence problem. Die sucker, die ...
1985 */
1986 goto sigill;
1987 }
1988
1989#ifdef CONFIG_DEBUG_FS
1990 unaligned_instructions++;
1991#endif
1992
1993 return;
1994
1995fault:
1996 /* roll back jump/branch */
1997 regs->cp0_epc = origpc;
1998 regs->regs[31] = orig31;
1999 /* Did we have an exception handler installed? */
2000 if (fixup_exception(regs))
2001 return;
2002
2003 die_if_kernel("Unhandled kernel unaligned access", regs);
2004 force_sig(SIGSEGV, current);
2005
2006 return;
2007
2008sigbus:
2009 die_if_kernel("Unhandled kernel unaligned access", regs);
2010 force_sig(SIGBUS, current);
2011
2012 return;
2013
2014sigill:
2015 die_if_kernel
2016 ("Unhandled kernel unaligned access or invalid instruction", regs);
2017 force_sig(SIGILL, current);
2018}
fc192e50 2019
1da177e4
LT
2020asmlinkage void do_ade(struct pt_regs *regs)
2021{
c3fc5cd5 2022 enum ctx_state prev_state;
fe00f943 2023 unsigned int __user *pc;
1da177e4 2024 mm_segment_t seg;
1da177e4 2025
c3fc5cd5 2026 prev_state = exception_enter();
7f788d2d 2027 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS,
a8b0ca17 2028 1, regs, regs->cp0_badvaddr);
1da177e4
LT
2029 /*
2030 * Did we catch a fault trying to load an instruction?
1da177e4 2031 */
34c2f668 2032 if (regs->cp0_badvaddr == regs->cp0_epc)
1da177e4
LT
2033 goto sigbus;
2034
293c5bd1 2035 if (user_mode(regs) && !test_thread_flag(TIF_FIXADE))
1da177e4 2036 goto sigbus;
6312e0ee
AN
2037 if (unaligned_action == UNALIGNED_ACTION_SIGNAL)
2038 goto sigbus;
1da177e4
LT
2039
2040 /*
2041 * Do branch emulation only if we didn't forward the exception.
2042 * This is all so but ugly ...
2043 */
34c2f668
LY
2044
2045 /*
2046 * Are we running in microMIPS mode?
2047 */
2048 if (get_isa16_mode(regs->cp0_epc)) {
2049 /*
2050 * Did we catch a fault trying to load an instruction in
2051 * 16-bit mode?
2052 */
2053 if (regs->cp0_badvaddr == msk_isa16_mode(regs->cp0_epc))
2054 goto sigbus;
2055 if (unaligned_action == UNALIGNED_ACTION_SHOW)
2056 show_registers(regs);
2057
2058 if (cpu_has_mmips) {
2059 seg = get_fs();
2060 if (!user_mode(regs))
2061 set_fs(KERNEL_DS);
2062 emulate_load_store_microMIPS(regs,
2063 (void __user *)regs->cp0_badvaddr);
2064 set_fs(seg);
2065
2066 return;
2067 }
2068
451b001b
SH
2069 if (cpu_has_mips16) {
2070 seg = get_fs();
2071 if (!user_mode(regs))
2072 set_fs(KERNEL_DS);
2073 emulate_load_store_MIPS16e(regs,
2074 (void __user *)regs->cp0_badvaddr);
2075 set_fs(seg);
2076
2077 return;
2078 }
2079
34c2f668
LY
2080 goto sigbus;
2081 }
2082
2083 if (unaligned_action == UNALIGNED_ACTION_SHOW)
2084 show_registers(regs);
2085 pc = (unsigned int __user *)exception_epc(regs);
2086
1da177e4
LT
2087 seg = get_fs();
2088 if (!user_mode(regs))
2089 set_fs(KERNEL_DS);
7f18f151 2090 emulate_load_store_insn(regs, (void __user *)regs->cp0_badvaddr, pc);
1da177e4
LT
2091 set_fs(seg);
2092
2093 return;
2094
2095sigbus:
2096 die_if_kernel("Kernel unaligned instruction access", regs);
2097 force_sig(SIGBUS, current);
2098
2099 /*
2100 * XXX On return from the signal handler we should advance the epc
2101 */
c3fc5cd5 2102 exception_exit(prev_state);
1da177e4 2103}
6312e0ee
AN
2104
2105#ifdef CONFIG_DEBUG_FS
2106extern struct dentry *mips_debugfs_dir;
2107static int __init debugfs_unaligned(void)
2108{
2109 struct dentry *d;
2110
2111 if (!mips_debugfs_dir)
2112 return -ENODEV;
2113 d = debugfs_create_u32("unaligned_instructions", S_IRUGO,
2114 mips_debugfs_dir, &unaligned_instructions);
b517531c
Z
2115 if (!d)
2116 return -ENOMEM;
6312e0ee
AN
2117 d = debugfs_create_u32("unaligned_action", S_IRUGO | S_IWUSR,
2118 mips_debugfs_dir, &unaligned_action);
b517531c
Z
2119 if (!d)
2120 return -ENOMEM;
6312e0ee
AN
2121 return 0;
2122}
2123__initcall(debugfs_unaligned);
2124#endif