]>
git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/powerpc/kernel/align.c
1 /* align.c - handle alignment exceptions for the Power PC.
3 * Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au>
4 * Copyright (c) 1998-1999 TiVo, Inc.
5 * PowerPC 403GCX modifications.
6 * Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu>
7 * PowerPC 403GCX/405GP modifications.
8 * Copyright (c) 2001-2002 PPC64 team, IBM Corp
9 * 64-bit and Power4 support
10 * Copyright (c) 2005 Benjamin Herrenschmidt, IBM Corp
11 * <benh@kernel.crashing.org>
12 * Merge ppc32 and ppc64 implementations
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License
16 * as published by the Free Software Foundation; either version
17 * 2 of the License, or (at your option) any later version.
20 #include <linux/kernel.h>
22 #include <asm/processor.h>
23 #include <asm/uaccess.h>
24 #include <asm/system.h>
25 #include <asm/cache.h>
26 #include <asm/cputable.h>
33 #define IS_XFORM(inst) (((inst) >> 26) == 31)
34 #define IS_DSFORM(inst) (((inst) >> 26) >= 56)
36 #define INVALID { 0, 0 }
38 /* Bits in the flags field */
39 #define LD 0 /* load */
40 #define ST 1 /* store */
41 #define SE 2 /* sign-extend value, or FP ld/st as word */
42 #define F 4 /* to/from fp regs */
43 #define U 8 /* update index register */
44 #define M 0x10 /* multiple load/store */
45 #define SW 0x20 /* byte swap */
46 #define S 0x40 /* single-precision fp or... */
47 #define SX 0x40 /* ... byte count in XER */
48 #define HARD 0x80 /* string, stwcx. */
49 #define E4 0x40 /* SPE endianness is word */
50 #define E8 0x80 /* SPE endianness is double word */
51 #define SPLT 0x80 /* VSX SPLAT load */
53 /* DSISR bits reported for a DCBZ instruction: */
54 #define DCBZ 0x5f /* 8xx/82xx dcbz faults when cache not enabled */
56 #define SWAP(a, b) (t = (a), (a) = (b), (b) = t)
59 * The PowerPC stores certain bits of the instruction that caused the
60 * alignment exception in the DSISR register. This array maps those
61 * bits to information about the operand length and what the
62 * instruction would do.
64 static struct aligninfo aligninfo
[128] = {
65 { 4, LD
}, /* 00 0 0000: lwz / lwarx */
66 INVALID
, /* 00 0 0001 */
67 { 4, ST
}, /* 00 0 0010: stw */
68 INVALID
, /* 00 0 0011 */
69 { 2, LD
}, /* 00 0 0100: lhz */
70 { 2, LD
+SE
}, /* 00 0 0101: lha */
71 { 2, ST
}, /* 00 0 0110: sth */
72 { 4, LD
+M
}, /* 00 0 0111: lmw */
73 { 4, LD
+F
+S
}, /* 00 0 1000: lfs */
74 { 8, LD
+F
}, /* 00 0 1001: lfd */
75 { 4, ST
+F
+S
}, /* 00 0 1010: stfs */
76 { 8, ST
+F
}, /* 00 0 1011: stfd */
77 INVALID
, /* 00 0 1100 */
78 { 8, LD
}, /* 00 0 1101: ld/ldu/lwa */
79 INVALID
, /* 00 0 1110 */
80 { 8, ST
}, /* 00 0 1111: std/stdu */
81 { 4, LD
+U
}, /* 00 1 0000: lwzu */
82 INVALID
, /* 00 1 0001 */
83 { 4, ST
+U
}, /* 00 1 0010: stwu */
84 INVALID
, /* 00 1 0011 */
85 { 2, LD
+U
}, /* 00 1 0100: lhzu */
86 { 2, LD
+SE
+U
}, /* 00 1 0101: lhau */
87 { 2, ST
+U
}, /* 00 1 0110: sthu */
88 { 4, ST
+M
}, /* 00 1 0111: stmw */
89 { 4, LD
+F
+S
+U
}, /* 00 1 1000: lfsu */
90 { 8, LD
+F
+U
}, /* 00 1 1001: lfdu */
91 { 4, ST
+F
+S
+U
}, /* 00 1 1010: stfsu */
92 { 8, ST
+F
+U
}, /* 00 1 1011: stfdu */
93 { 16, LD
+F
}, /* 00 1 1100: lfdp */
94 INVALID
, /* 00 1 1101 */
95 { 16, ST
+F
}, /* 00 1 1110: stfdp */
96 INVALID
, /* 00 1 1111 */
97 { 8, LD
}, /* 01 0 0000: ldx */
98 INVALID
, /* 01 0 0001 */
99 { 8, ST
}, /* 01 0 0010: stdx */
100 INVALID
, /* 01 0 0011 */
101 INVALID
, /* 01 0 0100 */
102 { 4, LD
+SE
}, /* 01 0 0101: lwax */
103 INVALID
, /* 01 0 0110 */
104 INVALID
, /* 01 0 0111 */
105 { 4, LD
+M
+HARD
+SX
}, /* 01 0 1000: lswx */
106 { 4, LD
+M
+HARD
}, /* 01 0 1001: lswi */
107 { 4, ST
+M
+HARD
+SX
}, /* 01 0 1010: stswx */
108 { 4, ST
+M
+HARD
}, /* 01 0 1011: stswi */
109 INVALID
, /* 01 0 1100 */
110 { 8, LD
+U
}, /* 01 0 1101: ldu */
111 INVALID
, /* 01 0 1110 */
112 { 8, ST
+U
}, /* 01 0 1111: stdu */
113 { 8, LD
+U
}, /* 01 1 0000: ldux */
114 INVALID
, /* 01 1 0001 */
115 { 8, ST
+U
}, /* 01 1 0010: stdux */
116 INVALID
, /* 01 1 0011 */
117 INVALID
, /* 01 1 0100 */
118 { 4, LD
+SE
+U
}, /* 01 1 0101: lwaux */
119 INVALID
, /* 01 1 0110 */
120 INVALID
, /* 01 1 0111 */
121 INVALID
, /* 01 1 1000 */
122 INVALID
, /* 01 1 1001 */
123 INVALID
, /* 01 1 1010 */
124 INVALID
, /* 01 1 1011 */
125 INVALID
, /* 01 1 1100 */
126 INVALID
, /* 01 1 1101 */
127 INVALID
, /* 01 1 1110 */
128 INVALID
, /* 01 1 1111 */
129 INVALID
, /* 10 0 0000 */
130 INVALID
, /* 10 0 0001 */
131 INVALID
, /* 10 0 0010: stwcx. */
132 INVALID
, /* 10 0 0011 */
133 INVALID
, /* 10 0 0100 */
134 INVALID
, /* 10 0 0101 */
135 INVALID
, /* 10 0 0110 */
136 INVALID
, /* 10 0 0111 */
137 { 4, LD
+SW
}, /* 10 0 1000: lwbrx */
138 INVALID
, /* 10 0 1001 */
139 { 4, ST
+SW
}, /* 10 0 1010: stwbrx */
140 INVALID
, /* 10 0 1011 */
141 { 2, LD
+SW
}, /* 10 0 1100: lhbrx */
142 { 4, LD
+SE
}, /* 10 0 1101 lwa */
143 { 2, ST
+SW
}, /* 10 0 1110: sthbrx */
144 INVALID
, /* 10 0 1111 */
145 INVALID
, /* 10 1 0000 */
146 INVALID
, /* 10 1 0001 */
147 INVALID
, /* 10 1 0010 */
148 INVALID
, /* 10 1 0011 */
149 INVALID
, /* 10 1 0100 */
150 INVALID
, /* 10 1 0101 */
151 INVALID
, /* 10 1 0110 */
152 INVALID
, /* 10 1 0111 */
153 INVALID
, /* 10 1 1000 */
154 INVALID
, /* 10 1 1001 */
155 INVALID
, /* 10 1 1010 */
156 INVALID
, /* 10 1 1011 */
157 INVALID
, /* 10 1 1100 */
158 INVALID
, /* 10 1 1101 */
159 INVALID
, /* 10 1 1110 */
160 { 0, ST
+HARD
}, /* 10 1 1111: dcbz */
161 { 4, LD
}, /* 11 0 0000: lwzx */
162 INVALID
, /* 11 0 0001 */
163 { 4, ST
}, /* 11 0 0010: stwx */
164 INVALID
, /* 11 0 0011 */
165 { 2, LD
}, /* 11 0 0100: lhzx */
166 { 2, LD
+SE
}, /* 11 0 0101: lhax */
167 { 2, ST
}, /* 11 0 0110: sthx */
168 INVALID
, /* 11 0 0111 */
169 { 4, LD
+F
+S
}, /* 11 0 1000: lfsx */
170 { 8, LD
+F
}, /* 11 0 1001: lfdx */
171 { 4, ST
+F
+S
}, /* 11 0 1010: stfsx */
172 { 8, ST
+F
}, /* 11 0 1011: stfdx */
173 { 16, LD
+F
}, /* 11 0 1100: lfdpx */
174 { 4, LD
+F
+SE
}, /* 11 0 1101: lfiwax */
175 { 16, ST
+F
}, /* 11 0 1110: stfdpx */
176 { 4, ST
+F
}, /* 11 0 1111: stfiwx */
177 { 4, LD
+U
}, /* 11 1 0000: lwzux */
178 INVALID
, /* 11 1 0001 */
179 { 4, ST
+U
}, /* 11 1 0010: stwux */
180 INVALID
, /* 11 1 0011 */
181 { 2, LD
+U
}, /* 11 1 0100: lhzux */
182 { 2, LD
+SE
+U
}, /* 11 1 0101: lhaux */
183 { 2, ST
+U
}, /* 11 1 0110: sthux */
184 INVALID
, /* 11 1 0111 */
185 { 4, LD
+F
+S
+U
}, /* 11 1 1000: lfsux */
186 { 8, LD
+F
+U
}, /* 11 1 1001: lfdux */
187 { 4, ST
+F
+S
+U
}, /* 11 1 1010: stfsux */
188 { 8, ST
+F
+U
}, /* 11 1 1011: stfdux */
189 INVALID
, /* 11 1 1100 */
190 INVALID
, /* 11 1 1101 */
191 INVALID
, /* 11 1 1110 */
192 INVALID
, /* 11 1 1111 */
196 * Create a DSISR value from the instruction
198 static inline unsigned make_dsisr(unsigned instr
)
203 /* bits 6:15 --> 22:31 */
204 dsisr
= (instr
& 0x03ff0000) >> 16;
206 if (IS_XFORM(instr
)) {
207 /* bits 29:30 --> 15:16 */
208 dsisr
|= (instr
& 0x00000006) << 14;
210 dsisr
|= (instr
& 0x00000040) << 8;
211 /* bits 21:24 --> 18:21 */
212 dsisr
|= (instr
& 0x00000780) << 3;
215 dsisr
|= (instr
& 0x04000000) >> 12;
216 /* bits 1: 4 --> 18:21 */
217 dsisr
|= (instr
& 0x78000000) >> 17;
218 /* bits 30:31 --> 12:13 */
219 if (IS_DSFORM(instr
))
220 dsisr
|= (instr
& 0x00000003) << 18;
227 * The dcbz (data cache block zero) instruction
228 * gives an alignment fault if used on non-cacheable
229 * memory. We handle the fault mainly for the
230 * case when we are running with the cache disabled
233 static int emulate_dcbz(struct pt_regs
*regs
, unsigned char __user
*addr
)
239 size
= ppc64_caches
.dline_size
;
241 size
= L1_CACHE_BYTES
;
243 p
= (long __user
*) (regs
->dar
& -size
);
244 if (user_mode(regs
) && !access_ok(VERIFY_WRITE
, p
, size
))
246 for (i
= 0; i
< size
/ sizeof(long); ++i
)
247 if (__put_user_inatomic(0, p
+i
))
253 * Emulate load & store multiple instructions
254 * On 64-bit machines, these instructions only affect/use the
255 * bottom 4 bytes of each register, and the loads clear the
256 * top 4 bytes of the affected register.
259 #define REG_BYTE(rp, i) *((u8 *)((rp) + ((i) >> 2)) + ((i) & 3) + 4)
261 #define REG_BYTE(rp, i) *((u8 *)(rp) + (i))
264 #define SWIZ_PTR(p) ((unsigned char __user *)((p) ^ swiz))
266 static int emulate_multiple(struct pt_regs
*regs
, unsigned char __user
*addr
,
267 unsigned int reg
, unsigned int nb
,
268 unsigned int flags
, unsigned int instr
,
272 unsigned int nb0
, i
, bswiz
;
276 * We do not try to emulate 8 bytes multiple as they aren't really
277 * available in our operating environments and we don't try to
278 * emulate multiples operations in kernel land as they should never
279 * be used/generated there at least not on unaligned boundaries
281 if (unlikely((nb
> 4) || !user_mode(regs
)))
284 /* lmw, stmw, lswi/x, stswi/x */
288 nb
= regs
->xer
& 127;
292 unsigned long pc
= regs
->nip
^ (swiz
& 4);
294 if (__get_user_inatomic(instr
,
295 (unsigned int __user
*)pc
))
297 if (swiz
== 0 && (flags
& SW
))
298 instr
= cpu_to_le32(instr
);
299 nb
= (instr
>> 11) & 0x1f;
303 if (nb
+ reg
* 4 > 128) {
304 nb0
= nb
+ reg
* 4 - 128;
312 if (!access_ok((flags
& ST
? VERIFY_WRITE
: VERIFY_READ
), addr
, nb
+nb0
))
313 return -EFAULT
; /* bad address */
315 rptr
= ®s
->gpr
[reg
];
316 p
= (unsigned long) addr
;
317 bswiz
= (flags
& SW
)? 3: 0;
321 * This zeroes the top 4 bytes of the affected registers
322 * in 64-bit mode, and also zeroes out any remaining
323 * bytes of the last register for lsw*.
325 memset(rptr
, 0, ((nb
+ 3) / 4) * sizeof(unsigned long));
327 memset(®s
->gpr
[0], 0,
328 ((nb0
+ 3) / 4) * sizeof(unsigned long));
330 for (i
= 0; i
< nb
; ++i
, ++p
)
331 if (__get_user_inatomic(REG_BYTE(rptr
, i
^ bswiz
),
335 rptr
= ®s
->gpr
[0];
337 for (i
= 0; i
< nb0
; ++i
, ++p
)
338 if (__get_user_inatomic(REG_BYTE(rptr
,
345 for (i
= 0; i
< nb
; ++i
, ++p
)
346 if (__put_user_inatomic(REG_BYTE(rptr
, i
^ bswiz
),
350 rptr
= ®s
->gpr
[0];
352 for (i
= 0; i
< nb0
; ++i
, ++p
)
353 if (__put_user_inatomic(REG_BYTE(rptr
,
363 * Emulate floating-point pair loads and stores.
364 * Only POWER6 has these instructions, and it does true little-endian,
365 * so we don't need the address swizzling.
367 static int emulate_fp_pair(unsigned char __user
*addr
, unsigned int reg
,
370 char *ptr
= (char *) ¤t
->thread
.TS_FPR(reg
);
376 return 0; /* invalid form: FRS/FRT must be even */
378 /* not byte-swapped - easy */
380 ret
= __copy_from_user(ptr
, addr
, 16);
382 ret
= __copy_to_user(addr
, ptr
, 16);
384 /* each FPR value is byte-swapped separately */
386 for (i
= 0; i
< 16; ++i
) {
388 ret
|= __get_user(ptr
[i
^7], addr
+ i
);
390 ret
|= __put_user(ptr
[i
^7], addr
+ i
);
395 return 1; /* exception handled and fixed up */
400 static struct aligninfo spe_aligninfo
[32] = {
401 { 8, LD
+E8
}, /* 0 00 00: evldd[x] */
402 { 8, LD
+E4
}, /* 0 00 01: evldw[x] */
403 { 8, LD
}, /* 0 00 10: evldh[x] */
404 INVALID
, /* 0 00 11 */
405 { 2, LD
}, /* 0 01 00: evlhhesplat[x] */
406 INVALID
, /* 0 01 01 */
407 { 2, LD
}, /* 0 01 10: evlhhousplat[x] */
408 { 2, LD
+SE
}, /* 0 01 11: evlhhossplat[x] */
409 { 4, LD
}, /* 0 10 00: evlwhe[x] */
410 INVALID
, /* 0 10 01 */
411 { 4, LD
}, /* 0 10 10: evlwhou[x] */
412 { 4, LD
+SE
}, /* 0 10 11: evlwhos[x] */
413 { 4, LD
+E4
}, /* 0 11 00: evlwwsplat[x] */
414 INVALID
, /* 0 11 01 */
415 { 4, LD
}, /* 0 11 10: evlwhsplat[x] */
416 INVALID
, /* 0 11 11 */
418 { 8, ST
+E8
}, /* 1 00 00: evstdd[x] */
419 { 8, ST
+E4
}, /* 1 00 01: evstdw[x] */
420 { 8, ST
}, /* 1 00 10: evstdh[x] */
421 INVALID
, /* 1 00 11 */
422 INVALID
, /* 1 01 00 */
423 INVALID
, /* 1 01 01 */
424 INVALID
, /* 1 01 10 */
425 INVALID
, /* 1 01 11 */
426 { 4, ST
}, /* 1 10 00: evstwhe[x] */
427 INVALID
, /* 1 10 01 */
428 { 4, ST
}, /* 1 10 10: evstwho[x] */
429 INVALID
, /* 1 10 11 */
430 { 4, ST
+E4
}, /* 1 11 00: evstwwe[x] */
431 INVALID
, /* 1 11 01 */
432 { 4, ST
+E4
}, /* 1 11 10: evstwwo[x] */
433 INVALID
, /* 1 11 11 */
439 #define EVLHHESPLAT 0x04
440 #define EVLHHOUSPLAT 0x06
441 #define EVLHHOSSPLAT 0x07
445 #define EVLWWSPLAT 0x0C
446 #define EVLWHSPLAT 0x0E
456 * Emulate SPE loads and stores.
457 * Only Book-E has these instructions, and it does true little-endian,
458 * so we don't need the address swizzling.
460 static int emulate_spe(struct pt_regs
*regs
, unsigned int reg
,
470 unsigned char __user
*p
, *addr
;
471 unsigned long *evr
= ¤t
->thread
.evr
[reg
];
472 unsigned int nb
, flags
;
474 instr
= (instr
>> 1) & 0x1f;
476 /* DAR has the operand effective address */
477 addr
= (unsigned char __user
*)regs
->dar
;
479 nb
= spe_aligninfo
[instr
].len
;
480 flags
= spe_aligninfo
[instr
].flags
;
482 /* Verify the address of the operand */
483 if (unlikely(user_mode(regs
) &&
484 !access_ok((flags
& ST
? VERIFY_WRITE
: VERIFY_READ
),
489 if (unlikely(!user_mode(regs
)))
492 flush_spe_to_thread(current
);
494 /* If we are loading, get the data from user space, else
495 * get it from register values
504 data
.w
[1] = regs
->gpr
[reg
];
507 data
.h
[2] = *evr
>> 16;
508 data
.h
[3] = regs
->gpr
[reg
] >> 16;
511 data
.h
[2] = *evr
& 0xffff;
512 data
.h
[3] = regs
->gpr
[reg
] & 0xffff;
518 data
.w
[1] = regs
->gpr
[reg
];
524 temp
.ll
= data
.ll
= 0;
530 ret
|= __get_user_inatomic(temp
.v
[0], p
++);
531 ret
|= __get_user_inatomic(temp
.v
[1], p
++);
532 ret
|= __get_user_inatomic(temp
.v
[2], p
++);
533 ret
|= __get_user_inatomic(temp
.v
[3], p
++);
535 ret
|= __get_user_inatomic(temp
.v
[4], p
++);
536 ret
|= __get_user_inatomic(temp
.v
[5], p
++);
538 ret
|= __get_user_inatomic(temp
.v
[6], p
++);
539 ret
|= __get_user_inatomic(temp
.v
[7], p
++);
551 data
.h
[0] = temp
.h
[3];
552 data
.h
[2] = temp
.h
[3];
556 data
.h
[1] = temp
.h
[3];
557 data
.h
[3] = temp
.h
[3];
560 data
.h
[0] = temp
.h
[2];
561 data
.h
[2] = temp
.h
[3];
565 data
.h
[1] = temp
.h
[2];
566 data
.h
[3] = temp
.h
[3];
569 data
.w
[0] = temp
.w
[1];
570 data
.w
[1] = temp
.w
[1];
573 data
.h
[0] = temp
.h
[2];
574 data
.h
[1] = temp
.h
[2];
575 data
.h
[2] = temp
.h
[3];
576 data
.h
[3] = temp
.h
[3];
584 switch (flags
& 0xf0) {
586 SWAP(data
.v
[0], data
.v
[7]);
587 SWAP(data
.v
[1], data
.v
[6]);
588 SWAP(data
.v
[2], data
.v
[5]);
589 SWAP(data
.v
[3], data
.v
[4]);
593 SWAP(data
.v
[0], data
.v
[3]);
594 SWAP(data
.v
[1], data
.v
[2]);
595 SWAP(data
.v
[4], data
.v
[7]);
596 SWAP(data
.v
[5], data
.v
[6]);
598 /* Its half word endian */
600 SWAP(data
.v
[0], data
.v
[1]);
601 SWAP(data
.v
[2], data
.v
[3]);
602 SWAP(data
.v
[4], data
.v
[5]);
603 SWAP(data
.v
[6], data
.v
[7]);
609 data
.w
[0] = (s16
)data
.h
[1];
610 data
.w
[1] = (s16
)data
.h
[3];
613 /* Store result to memory or update registers */
619 ret
|= __put_user_inatomic(data
.v
[0], p
++);
620 ret
|= __put_user_inatomic(data
.v
[1], p
++);
621 ret
|= __put_user_inatomic(data
.v
[2], p
++);
622 ret
|= __put_user_inatomic(data
.v
[3], p
++);
624 ret
|= __put_user_inatomic(data
.v
[4], p
++);
625 ret
|= __put_user_inatomic(data
.v
[5], p
++);
627 ret
|= __put_user_inatomic(data
.v
[6], p
++);
628 ret
|= __put_user_inatomic(data
.v
[7], p
++);
634 regs
->gpr
[reg
] = data
.w
[1];
639 #endif /* CONFIG_SPE */
643 * Emulate VSX instructions...
645 static int emulate_vsx(unsigned char __user
*addr
, unsigned int reg
,
646 unsigned int areg
, struct pt_regs
*regs
,
647 unsigned int flags
, unsigned int length
)
649 char *ptr
= (char *) ¤t
->thread
.TS_FPR(reg
);
652 flush_vsx_to_thread(current
);
655 ret
= __copy_to_user(addr
, ptr
, length
);
658 ret
= __copy_from_user(ptr
, addr
, length
);
661 ret
|= __copy_from_user(ptr
, addr
, length
);
664 regs
->gpr
[areg
] = regs
->dar
;
672 * Called on alignment exception. Attempts to fixup
674 * Return 1 on success
675 * Return 0 if unable to handle the interrupt
676 * Return -EFAULT if data address is bad
679 int fix_alignment(struct pt_regs
*regs
)
681 unsigned int instr
, nb
, flags
, instruction
= 0;
682 unsigned int reg
, areg
;
684 unsigned char __user
*addr
;
685 unsigned long p
, swiz
;
696 unsigned char hi48
[6];
702 * We require a complete register set, if not, then our assembly
705 CHECK_FULL_REGS(regs
);
709 /* Some processors don't provide us with a DSISR we can use here,
710 * let's make one up from the instruction
712 if (cpu_has_feature(CPU_FTR_NODSISRALIGN
)) {
713 unsigned long pc
= regs
->nip
;
715 if (cpu_has_feature(CPU_FTR_PPC_LE
) && (regs
->msr
& MSR_LE
))
717 if (unlikely(__get_user_inatomic(instr
,
718 (unsigned int __user
*)pc
)))
720 if (cpu_has_feature(CPU_FTR_REAL_LE
) && (regs
->msr
& MSR_LE
))
721 instr
= cpu_to_le32(instr
);
722 dsisr
= make_dsisr(instr
);
726 /* extract the operation and registers from the dsisr */
727 reg
= (dsisr
>> 5) & 0x1f; /* source/dest register */
728 areg
= dsisr
& 0x1f; /* register to update */
731 if ((instr
>> 26) == 0x4)
732 return emulate_spe(regs
, reg
, instr
);
735 instr
= (dsisr
>> 10) & 0x7f;
736 instr
|= (dsisr
>> 13) & 0x60;
738 /* Lookup the operation in our table */
739 nb
= aligninfo
[instr
].len
;
740 flags
= aligninfo
[instr
].flags
;
742 /* Byteswap little endian loads and stores */
744 if (regs
->msr
& MSR_LE
) {
747 * So-called "PowerPC little endian" mode works by
748 * swizzling addresses rather than by actually doing
749 * any byte-swapping. To emulate this, we XOR each
750 * byte address with 7. We also byte-swap, because
751 * the processor's address swizzling depends on the
752 * operand size (it xors the address with 7 for bytes,
753 * 6 for halfwords, 4 for words, 0 for doublewords) but
754 * we will xor with 7 and load/store each byte separately.
756 if (cpu_has_feature(CPU_FTR_PPC_LE
))
760 /* DAR has the operand effective address */
761 addr
= (unsigned char __user
*)regs
->dar
;
764 if ((instruction
& 0xfc00003e) == 0x7c000018) {
765 /* Additional register addressing bit (64 VSX vs 32 FPR/GPR */
766 reg
|= (instruction
& 0x1) << 5;
767 /* Simple inline decoder instead of a table */
768 if (instruction
& 0x200)
770 else if (instruction
& 0x080)
775 if (instruction
& 0x100)
777 if (instruction
& 0x040)
779 /* splat load needs a special decoder */
780 if ((instruction
& 0x400) == 0){
784 return emulate_vsx(addr
, reg
, areg
, regs
, flags
, nb
);
787 /* A size of 0 indicates an instruction we don't support, with
788 * the exception of DCBZ which is handled as a special case here
791 return emulate_dcbz(regs
, addr
);
792 if (unlikely(nb
== 0))
795 /* Load/Store Multiple instructions are handled in their own
799 return emulate_multiple(regs
, addr
, reg
, nb
,
802 /* Verify the address of the operand */
803 if (unlikely(user_mode(regs
) &&
804 !access_ok((flags
& ST
? VERIFY_WRITE
: VERIFY_READ
),
808 /* Force the fprs into the save area so we can reference them */
811 if (unlikely(!user_mode(regs
)))
813 flush_fp_to_thread(current
);
816 /* Special case for 16-byte FP loads and stores */
818 return emulate_fp_pair(addr
, reg
, flags
);
820 /* If we are loading, get the data from user space, else
821 * get it from register values
826 p
= (unsigned long) addr
;
829 ret
|= __get_user_inatomic(data
.v
[0], SWIZ_PTR(p
++));
830 ret
|= __get_user_inatomic(data
.v
[1], SWIZ_PTR(p
++));
831 ret
|= __get_user_inatomic(data
.v
[2], SWIZ_PTR(p
++));
832 ret
|= __get_user_inatomic(data
.v
[3], SWIZ_PTR(p
++));
834 ret
|= __get_user_inatomic(data
.v
[4], SWIZ_PTR(p
++));
835 ret
|= __get_user_inatomic(data
.v
[5], SWIZ_PTR(p
++));
837 ret
|= __get_user_inatomic(data
.v
[6], SWIZ_PTR(p
++));
838 ret
|= __get_user_inatomic(data
.v
[7], SWIZ_PTR(p
++));
842 } else if (flags
& F
) {
843 data
.dd
= current
->thread
.TS_FPR(reg
);
845 /* Single-precision FP store requires conversion... */
846 #ifdef CONFIG_PPC_FPU
849 cvt_df(&data
.dd
, (float *)&data
.v
[4], ¤t
->thread
);
856 data
.ll
= regs
->gpr
[reg
];
861 SWAP(data
.v
[0], data
.v
[7]);
862 SWAP(data
.v
[1], data
.v
[6]);
863 SWAP(data
.v
[2], data
.v
[5]);
864 SWAP(data
.v
[3], data
.v
[4]);
867 SWAP(data
.v
[4], data
.v
[7]);
868 SWAP(data
.v
[5], data
.v
[6]);
871 SWAP(data
.v
[6], data
.v
[7]);
876 /* Perform other misc operations like sign extension
877 * or floating point single precision conversion
879 switch (flags
& ~(U
|SW
)) {
880 case LD
+SE
: /* sign extending integer loads */
881 case LD
+F
+SE
: /* sign extend for lfiwax */
883 data
.ll
= data
.x16
.low16
;
884 else /* nb must be 4 */
885 data
.ll
= data
.x32
.low32
;
888 /* Single-precision FP load requires conversion... */
890 #ifdef CONFIG_PPC_FPU
893 cvt_fd((float *)&data
.v
[4], &data
.dd
, ¤t
->thread
);
901 /* Store result to memory or update registers */
904 p
= (unsigned long) addr
;
907 ret
|= __put_user_inatomic(data
.v
[0], SWIZ_PTR(p
++));
908 ret
|= __put_user_inatomic(data
.v
[1], SWIZ_PTR(p
++));
909 ret
|= __put_user_inatomic(data
.v
[2], SWIZ_PTR(p
++));
910 ret
|= __put_user_inatomic(data
.v
[3], SWIZ_PTR(p
++));
912 ret
|= __put_user_inatomic(data
.v
[4], SWIZ_PTR(p
++));
913 ret
|= __put_user_inatomic(data
.v
[5], SWIZ_PTR(p
++));
915 ret
|= __put_user_inatomic(data
.v
[6], SWIZ_PTR(p
++));
916 ret
|= __put_user_inatomic(data
.v
[7], SWIZ_PTR(p
++));
920 } else if (flags
& F
)
921 current
->thread
.TS_FPR(reg
) = data
.dd
;
923 regs
->gpr
[reg
] = data
.ll
;
925 /* Update RA as needed */
927 regs
->gpr
[areg
] = regs
->dar
;