]> git.proxmox.com Git - mirror_qemu.git/blame - target-arm/helper-a64.c
target-arm: Implement AArch64 EL1 exception handling
[mirror_qemu.git] / target-arm / helper-a64.c
CommitLineData
d3e35a1f
AG
1/*
2 * AArch64 specific helpers
3 *
4 * Copyright (c) 2013 Alexander Graf <agraf@suse.de>
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include "cpu.h"
21#include "exec/gdbstub.h"
22#include "helper.h"
23#include "qemu/host-utils.h"
24#include "sysemu/sysemu.h"
25#include "qemu/bitops.h"
52e60cdd 26#include "internals.h"
8220e911
AG
27
28/* C2.4.7 Multiply and divide */
29/* special cases for 0 and LLONG_MIN are mandated by the standard */
30uint64_t HELPER(udiv64)(uint64_t num, uint64_t den)
31{
32 if (den == 0) {
33 return 0;
34 }
35 return num / den;
36}
37
38int64_t HELPER(sdiv64)(int64_t num, int64_t den)
39{
40 if (den == 0) {
41 return 0;
42 }
43 if (num == LLONG_MIN && den == -1) {
44 return LLONG_MIN;
45 }
46 return num / den;
47}
680ead21
CF
48
49uint64_t HELPER(clz64)(uint64_t x)
50{
51 return clz64(x);
52}
82e14b02 53
e80c5020
CF
54uint64_t HELPER(cls64)(uint64_t x)
55{
56 return clrsb64(x);
57}
58
59uint32_t HELPER(cls32)(uint32_t x)
60{
61 return clrsb32(x);
62}
63
b05c3068
AB
64uint32_t HELPER(clz32)(uint32_t x)
65{
66 return clz32(x);
67}
68
82e14b02
AG
69uint64_t HELPER(rbit64)(uint64_t x)
70{
71 /* assign the correct byte position */
72 x = bswap64(x);
73
74 /* assign the correct nibble position */
75 x = ((x & 0xf0f0f0f0f0f0f0f0ULL) >> 4)
76 | ((x & 0x0f0f0f0f0f0f0f0fULL) << 4);
77
78 /* assign the correct bit position */
79 x = ((x & 0x8888888888888888ULL) >> 3)
80 | ((x & 0x4444444444444444ULL) >> 1)
81 | ((x & 0x2222222222222222ULL) << 1)
82 | ((x & 0x1111111111111111ULL) << 3);
83
84 return x;
85}
da7dafe7
CF
86
87/* Convert a softfloat float_relation_ (as returned by
88 * the float*_compare functions) to the correct ARM
89 * NZCV flag state.
90 */
91static inline uint32_t float_rel_to_flags(int res)
92{
93 uint64_t flags;
94 switch (res) {
95 case float_relation_equal:
96 flags = PSTATE_Z | PSTATE_C;
97 break;
98 case float_relation_less:
99 flags = PSTATE_N;
100 break;
101 case float_relation_greater:
102 flags = PSTATE_C;
103 break;
104 case float_relation_unordered:
105 default:
106 flags = PSTATE_C | PSTATE_V;
107 break;
108 }
109 return flags;
110}
111
112uint64_t HELPER(vfp_cmps_a64)(float32 x, float32 y, void *fp_status)
113{
114 return float_rel_to_flags(float32_compare_quiet(x, y, fp_status));
115}
116
117uint64_t HELPER(vfp_cmpes_a64)(float32 x, float32 y, void *fp_status)
118{
119 return float_rel_to_flags(float32_compare(x, y, fp_status));
120}
121
122uint64_t HELPER(vfp_cmpd_a64)(float64 x, float64 y, void *fp_status)
123{
124 return float_rel_to_flags(float64_compare_quiet(x, y, fp_status));
125}
126
127uint64_t HELPER(vfp_cmped_a64)(float64 x, float64 y, void *fp_status)
128{
129 return float_rel_to_flags(float64_compare(x, y, fp_status));
130}
7c51048f 131
f5e51e7f
PM
132float32 HELPER(vfp_mulxs)(float32 a, float32 b, void *fpstp)
133{
134 float_status *fpst = fpstp;
135
136 if ((float32_is_zero(a) && float32_is_infinity(b)) ||
137 (float32_is_infinity(a) && float32_is_zero(b))) {
138 /* 2.0 with the sign bit set to sign(A) XOR sign(B) */
139 return make_float32((1U << 30) |
140 ((float32_val(a) ^ float32_val(b)) & (1U << 31)));
141 }
142 return float32_mul(a, b, fpst);
143}
144
145float64 HELPER(vfp_mulxd)(float64 a, float64 b, void *fpstp)
146{
147 float_status *fpst = fpstp;
148
149 if ((float64_is_zero(a) && float64_is_infinity(b)) ||
150 (float64_is_infinity(a) && float64_is_zero(b))) {
151 /* 2.0 with the sign bit set to sign(A) XOR sign(B) */
152 return make_float64((1ULL << 62) |
153 ((float64_val(a) ^ float64_val(b)) & (1ULL << 63)));
154 }
155 return float64_mul(a, b, fpst);
156}
157
7c51048f
MM
158uint64_t HELPER(simd_tbl)(CPUARMState *env, uint64_t result, uint64_t indices,
159 uint32_t rn, uint32_t numregs)
160{
161 /* Helper function for SIMD TBL and TBX. We have to do the table
162 * lookup part for the 64 bits worth of indices we're passed in.
163 * result is the initial results vector (either zeroes for TBL
164 * or some guest values for TBX), rn the register number where
165 * the table starts, and numregs the number of registers in the table.
166 * We return the results of the lookups.
167 */
168 int shift;
169
170 for (shift = 0; shift < 64; shift += 8) {
171 int index = extract64(indices, shift, 8);
172 if (index < 16 * numregs) {
173 /* Convert index (a byte offset into the virtual table
174 * which is a series of 128-bit vectors concatenated)
175 * into the correct vfp.regs[] element plus a bit offset
176 * into that element, bearing in mind that the table
177 * can wrap around from V31 to V0.
178 */
179 int elt = (rn * 2 + (index >> 3)) % 64;
180 int bitidx = (index & 7) * 8;
181 uint64_t val = extract64(env->vfp.regs[elt], bitidx, 8);
182
183 result = deposit64(result, shift, 8, val);
184 }
185 }
186 return result;
187}
8908f4d1 188
a984e42c
PM
189/* Helper function for 64 bit polynomial multiply case:
190 * perform PolynomialMult(op1, op2) and return either the top or
191 * bottom half of the 128 bit result.
192 */
193uint64_t HELPER(neon_pmull_64_lo)(uint64_t op1, uint64_t op2)
194{
195 int bitnum;
196 uint64_t res = 0;
197
198 for (bitnum = 0; bitnum < 64; bitnum++) {
199 if (op1 & (1ULL << bitnum)) {
200 res ^= op2 << bitnum;
201 }
202 }
203 return res;
204}
205uint64_t HELPER(neon_pmull_64_hi)(uint64_t op1, uint64_t op2)
206{
207 int bitnum;
208 uint64_t res = 0;
209
210 /* bit 0 of op1 can't influence the high 64 bits at all */
211 for (bitnum = 1; bitnum < 64; bitnum++) {
212 if (op1 & (1ULL << bitnum)) {
213 res ^= op2 >> (64 - bitnum);
214 }
215 }
216 return res;
217}
218
8908f4d1
AB
219/* 64bit/double versions of the neon float compare functions */
220uint64_t HELPER(neon_ceq_f64)(float64 a, float64 b, void *fpstp)
221{
222 float_status *fpst = fpstp;
223 return -float64_eq_quiet(a, b, fpst);
224}
225
226uint64_t HELPER(neon_cge_f64)(float64 a, float64 b, void *fpstp)
227{
228 float_status *fpst = fpstp;
229 return -float64_le(b, a, fpst);
230}
231
232uint64_t HELPER(neon_cgt_f64)(float64 a, float64 b, void *fpstp)
233{
234 float_status *fpst = fpstp;
235 return -float64_lt(b, a, fpst);
236}
057d5f62
PM
237
238/* Reciprocal step and sqrt step. Note that unlike the A32/T32
239 * versions, these do a fully fused multiply-add or
240 * multiply-add-and-halve.
241 */
242#define float32_two make_float32(0x40000000)
243#define float32_three make_float32(0x40400000)
244#define float32_one_point_five make_float32(0x3fc00000)
245
246#define float64_two make_float64(0x4000000000000000ULL)
247#define float64_three make_float64(0x4008000000000000ULL)
248#define float64_one_point_five make_float64(0x3FF8000000000000ULL)
249
250float32 HELPER(recpsf_f32)(float32 a, float32 b, void *fpstp)
251{
252 float_status *fpst = fpstp;
253
254 a = float32_chs(a);
255 if ((float32_is_infinity(a) && float32_is_zero(b)) ||
256 (float32_is_infinity(b) && float32_is_zero(a))) {
257 return float32_two;
258 }
259 return float32_muladd(a, b, float32_two, 0, fpst);
260}
261
262float64 HELPER(recpsf_f64)(float64 a, float64 b, void *fpstp)
263{
264 float_status *fpst = fpstp;
265
266 a = float64_chs(a);
267 if ((float64_is_infinity(a) && float64_is_zero(b)) ||
268 (float64_is_infinity(b) && float64_is_zero(a))) {
269 return float64_two;
270 }
271 return float64_muladd(a, b, float64_two, 0, fpst);
272}
273
274float32 HELPER(rsqrtsf_f32)(float32 a, float32 b, void *fpstp)
275{
276 float_status *fpst = fpstp;
277
278 a = float32_chs(a);
279 if ((float32_is_infinity(a) && float32_is_zero(b)) ||
280 (float32_is_infinity(b) && float32_is_zero(a))) {
281 return float32_one_point_five;
282 }
283 return float32_muladd(a, b, float32_three, float_muladd_halve_result, fpst);
284}
285
286float64 HELPER(rsqrtsf_f64)(float64 a, float64 b, void *fpstp)
287{
288 float_status *fpst = fpstp;
289
290 a = float64_chs(a);
291 if ((float64_is_infinity(a) && float64_is_zero(b)) ||
292 (float64_is_infinity(b) && float64_is_zero(a))) {
293 return float64_one_point_five;
294 }
295 return float64_muladd(a, b, float64_three, float_muladd_halve_result, fpst);
296}
6781fa11
PM
297
298/* Pairwise long add: add pairs of adjacent elements into
299 * double-width elements in the result (eg _s8 is an 8x8->16 op)
300 */
301uint64_t HELPER(neon_addlp_s8)(uint64_t a)
302{
303 uint64_t nsignmask = 0x0080008000800080ULL;
304 uint64_t wsignmask = 0x8000800080008000ULL;
305 uint64_t elementmask = 0x00ff00ff00ff00ffULL;
306 uint64_t tmp1, tmp2;
307 uint64_t res, signres;
308
309 /* Extract odd elements, sign extend each to a 16 bit field */
310 tmp1 = a & elementmask;
311 tmp1 ^= nsignmask;
312 tmp1 |= wsignmask;
313 tmp1 = (tmp1 - nsignmask) ^ wsignmask;
314 /* Ditto for the even elements */
315 tmp2 = (a >> 8) & elementmask;
316 tmp2 ^= nsignmask;
317 tmp2 |= wsignmask;
318 tmp2 = (tmp2 - nsignmask) ^ wsignmask;
319
320 /* calculate the result by summing bits 0..14, 16..22, etc,
321 * and then adjusting the sign bits 15, 23, etc manually.
322 * This ensures the addition can't overflow the 16 bit field.
323 */
324 signres = (tmp1 ^ tmp2) & wsignmask;
325 res = (tmp1 & ~wsignmask) + (tmp2 & ~wsignmask);
326 res ^= signres;
327
328 return res;
329}
330
331uint64_t HELPER(neon_addlp_u8)(uint64_t a)
332{
333 uint64_t tmp;
334
335 tmp = a & 0x00ff00ff00ff00ffULL;
336 tmp += (a >> 8) & 0x00ff00ff00ff00ffULL;
337 return tmp;
338}
339
340uint64_t HELPER(neon_addlp_s16)(uint64_t a)
341{
342 int32_t reslo, reshi;
343
344 reslo = (int32_t)(int16_t)a + (int32_t)(int16_t)(a >> 16);
345 reshi = (int32_t)(int16_t)(a >> 32) + (int32_t)(int16_t)(a >> 48);
346
347 return (uint32_t)reslo | (((uint64_t)reshi) << 32);
348}
349
350uint64_t HELPER(neon_addlp_u16)(uint64_t a)
351{
352 uint64_t tmp;
353
354 tmp = a & 0x0000ffff0000ffffULL;
355 tmp += (a >> 16) & 0x0000ffff0000ffffULL;
356 return tmp;
357}
8f0c6758
AB
358
359/* Floating-point reciprocal exponent - see FPRecpX in ARM ARM */
360float32 HELPER(frecpx_f32)(float32 a, void *fpstp)
361{
362 float_status *fpst = fpstp;
363 uint32_t val32, sbit;
364 int32_t exp;
365
366 if (float32_is_any_nan(a)) {
367 float32 nan = a;
368 if (float32_is_signaling_nan(a)) {
369 float_raise(float_flag_invalid, fpst);
370 nan = float32_maybe_silence_nan(a);
371 }
372 if (fpst->default_nan_mode) {
373 nan = float32_default_nan;
374 }
375 return nan;
376 }
377
378 val32 = float32_val(a);
379 sbit = 0x80000000ULL & val32;
380 exp = extract32(val32, 23, 8);
381
382 if (exp == 0) {
383 return make_float32(sbit | (0xfe << 23));
384 } else {
385 return make_float32(sbit | (~exp & 0xff) << 23);
386 }
387}
388
389float64 HELPER(frecpx_f64)(float64 a, void *fpstp)
390{
391 float_status *fpst = fpstp;
392 uint64_t val64, sbit;
393 int64_t exp;
394
395 if (float64_is_any_nan(a)) {
396 float64 nan = a;
397 if (float64_is_signaling_nan(a)) {
398 float_raise(float_flag_invalid, fpst);
399 nan = float64_maybe_silence_nan(a);
400 }
401 if (fpst->default_nan_mode) {
402 nan = float64_default_nan;
403 }
404 return nan;
405 }
406
407 val64 = float64_val(a);
408 sbit = 0x8000000000000000ULL & val64;
409 exp = extract64(float64_val(a), 52, 11);
410
411 if (exp == 0) {
412 return make_float64(sbit | (0x7feULL << 52));
413 } else {
414 return make_float64(sbit | (~exp & 0x7ffULL) << 52);
415 }
416}
5553955e
PM
417
418float32 HELPER(fcvtx_f64_to_f32)(float64 a, CPUARMState *env)
419{
420 /* Von Neumann rounding is implemented by using round-to-zero
421 * and then setting the LSB of the result if Inexact was raised.
422 */
423 float32 r;
424 float_status *fpst = &env->vfp.fp_status;
425 float_status tstat = *fpst;
426 int exflags;
427
428 set_float_rounding_mode(float_round_to_zero, &tstat);
429 set_float_exception_flags(0, &tstat);
430 r = float64_to_float32(a, &tstat);
431 r = float32_maybe_silence_nan(r);
432 exflags = get_float_exception_flags(&tstat);
433 if (exflags & float_flag_inexact) {
434 r = make_float32(float32_val(r) | 1);
435 }
436 exflags |= get_float_exception_flags(fpst);
437 set_float_exception_flags(exflags, fpst);
438 return r;
439}
52e60cdd
RH
440
441/* Handle a CPU exception. */
442void aarch64_cpu_do_interrupt(CPUState *cs)
443{
444 ARMCPU *cpu = ARM_CPU(cs);
445 CPUARMState *env = &cpu->env;
446 target_ulong addr = env->cp15.c12_vbar;
447 int i;
448
449 if (arm_current_pl(env) == 0) {
450 if (env->aarch64) {
451 addr += 0x400;
452 } else {
453 addr += 0x600;
454 }
455 } else if (pstate_read(env) & PSTATE_SP) {
456 addr += 0x200;
457 }
458
459 arm_log_exception(cs->exception_index);
460 qemu_log_mask(CPU_LOG_INT, "...from EL%d\n", arm_current_pl(env));
461 if (qemu_loglevel_mask(CPU_LOG_INT)
462 && !excp_is_internal(cs->exception_index)) {
463 qemu_log_mask(CPU_LOG_INT, "...with ESR 0x%" PRIx32 "\n",
464 env->exception.syndrome);
465 }
466
467 env->cp15.esr_el1 = env->exception.syndrome;
468 env->cp15.far_el1 = env->exception.vaddress;
469
470 switch (cs->exception_index) {
471 case EXCP_PREFETCH_ABORT:
472 case EXCP_DATA_ABORT:
473 qemu_log_mask(CPU_LOG_INT, "...with FAR 0x%" PRIx64 "\n",
474 env->cp15.far_el1);
475 break;
476 case EXCP_BKPT:
477 case EXCP_UDEF:
478 case EXCP_SWI:
479 break;
480 case EXCP_IRQ:
481 addr += 0x80;
482 break;
483 case EXCP_FIQ:
484 addr += 0x100;
485 break;
486 default:
487 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
488 }
489
490 if (is_a64(env)) {
491 env->banked_spsr[0] = pstate_read(env);
492 env->sp_el[arm_current_pl(env)] = env->xregs[31];
493 env->xregs[31] = env->sp_el[1];
494 env->elr_el1 = env->pc;
495 } else {
496 env->banked_spsr[0] = cpsr_read(env);
497 if (!env->thumb) {
498 env->cp15.esr_el1 |= 1 << 25;
499 }
500 env->elr_el1 = env->regs[15];
501
502 for (i = 0; i < 15; i++) {
503 env->xregs[i] = env->regs[i];
504 }
505
506 env->condexec_bits = 0;
507 }
508
509 pstate_write(env, PSTATE_DAIF | PSTATE_MODE_EL1h);
510 env->aarch64 = 1;
511
512 env->pc = addr;
513 cs->interrupt_request |= CPU_INTERRUPT_EXITTB;
514}