]> git.proxmox.com Git - mirror_qemu.git/blame - target-arm/helper-a64.c
nbd: fix max_discard/max_transfer_length
[mirror_qemu.git] / target-arm / helper-a64.c
CommitLineData
d3e35a1f
AG
1/*
2 * AArch64 specific helpers
3 *
4 * Copyright (c) 2013 Alexander Graf <agraf@suse.de>
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include "cpu.h"
21#include "exec/gdbstub.h"
2ef6175a 22#include "exec/helper-proto.h"
d3e35a1f
AG
23#include "qemu/host-utils.h"
24#include "sysemu/sysemu.h"
25#include "qemu/bitops.h"
52e60cdd 26#include "internals.h"
130f2e7d
PM
27#include "qemu/crc32c.h"
28#include <zlib.h> /* For crc32 */
8220e911
AG
29
30/* C2.4.7 Multiply and divide */
31/* special cases for 0 and LLONG_MIN are mandated by the standard */
32uint64_t HELPER(udiv64)(uint64_t num, uint64_t den)
33{
34 if (den == 0) {
35 return 0;
36 }
37 return num / den;
38}
39
40int64_t HELPER(sdiv64)(int64_t num, int64_t den)
41{
42 if (den == 0) {
43 return 0;
44 }
45 if (num == LLONG_MIN && den == -1) {
46 return LLONG_MIN;
47 }
48 return num / den;
49}
680ead21
CF
50
51uint64_t HELPER(clz64)(uint64_t x)
52{
53 return clz64(x);
54}
82e14b02 55
e80c5020
CF
56uint64_t HELPER(cls64)(uint64_t x)
57{
58 return clrsb64(x);
59}
60
61uint32_t HELPER(cls32)(uint32_t x)
62{
63 return clrsb32(x);
64}
65
b05c3068
AB
66uint32_t HELPER(clz32)(uint32_t x)
67{
68 return clz32(x);
69}
70
82e14b02
AG
71uint64_t HELPER(rbit64)(uint64_t x)
72{
73 /* assign the correct byte position */
74 x = bswap64(x);
75
76 /* assign the correct nibble position */
77 x = ((x & 0xf0f0f0f0f0f0f0f0ULL) >> 4)
78 | ((x & 0x0f0f0f0f0f0f0f0fULL) << 4);
79
80 /* assign the correct bit position */
81 x = ((x & 0x8888888888888888ULL) >> 3)
82 | ((x & 0x4444444444444444ULL) >> 1)
83 | ((x & 0x2222222222222222ULL) << 1)
84 | ((x & 0x1111111111111111ULL) << 3);
85
86 return x;
87}
da7dafe7
CF
88
89/* Convert a softfloat float_relation_ (as returned by
90 * the float*_compare functions) to the correct ARM
91 * NZCV flag state.
92 */
93static inline uint32_t float_rel_to_flags(int res)
94{
95 uint64_t flags;
96 switch (res) {
97 case float_relation_equal:
98 flags = PSTATE_Z | PSTATE_C;
99 break;
100 case float_relation_less:
101 flags = PSTATE_N;
102 break;
103 case float_relation_greater:
104 flags = PSTATE_C;
105 break;
106 case float_relation_unordered:
107 default:
108 flags = PSTATE_C | PSTATE_V;
109 break;
110 }
111 return flags;
112}
113
114uint64_t HELPER(vfp_cmps_a64)(float32 x, float32 y, void *fp_status)
115{
116 return float_rel_to_flags(float32_compare_quiet(x, y, fp_status));
117}
118
119uint64_t HELPER(vfp_cmpes_a64)(float32 x, float32 y, void *fp_status)
120{
121 return float_rel_to_flags(float32_compare(x, y, fp_status));
122}
123
124uint64_t HELPER(vfp_cmpd_a64)(float64 x, float64 y, void *fp_status)
125{
126 return float_rel_to_flags(float64_compare_quiet(x, y, fp_status));
127}
128
129uint64_t HELPER(vfp_cmped_a64)(float64 x, float64 y, void *fp_status)
130{
131 return float_rel_to_flags(float64_compare(x, y, fp_status));
132}
7c51048f 133
f5e51e7f
PM
134float32 HELPER(vfp_mulxs)(float32 a, float32 b, void *fpstp)
135{
136 float_status *fpst = fpstp;
137
dabf0058
XH
138 a = float32_squash_input_denormal(a, fpst);
139 b = float32_squash_input_denormal(b, fpst);
140
f5e51e7f
PM
141 if ((float32_is_zero(a) && float32_is_infinity(b)) ||
142 (float32_is_infinity(a) && float32_is_zero(b))) {
143 /* 2.0 with the sign bit set to sign(A) XOR sign(B) */
144 return make_float32((1U << 30) |
145 ((float32_val(a) ^ float32_val(b)) & (1U << 31)));
146 }
147 return float32_mul(a, b, fpst);
148}
149
150float64 HELPER(vfp_mulxd)(float64 a, float64 b, void *fpstp)
151{
152 float_status *fpst = fpstp;
153
dabf0058
XH
154 a = float64_squash_input_denormal(a, fpst);
155 b = float64_squash_input_denormal(b, fpst);
156
f5e51e7f
PM
157 if ((float64_is_zero(a) && float64_is_infinity(b)) ||
158 (float64_is_infinity(a) && float64_is_zero(b))) {
159 /* 2.0 with the sign bit set to sign(A) XOR sign(B) */
160 return make_float64((1ULL << 62) |
161 ((float64_val(a) ^ float64_val(b)) & (1ULL << 63)));
162 }
163 return float64_mul(a, b, fpst);
164}
165
7c51048f
MM
166uint64_t HELPER(simd_tbl)(CPUARMState *env, uint64_t result, uint64_t indices,
167 uint32_t rn, uint32_t numregs)
168{
169 /* Helper function for SIMD TBL and TBX. We have to do the table
170 * lookup part for the 64 bits worth of indices we're passed in.
171 * result is the initial results vector (either zeroes for TBL
172 * or some guest values for TBX), rn the register number where
173 * the table starts, and numregs the number of registers in the table.
174 * We return the results of the lookups.
175 */
176 int shift;
177
178 for (shift = 0; shift < 64; shift += 8) {
179 int index = extract64(indices, shift, 8);
180 if (index < 16 * numregs) {
181 /* Convert index (a byte offset into the virtual table
182 * which is a series of 128-bit vectors concatenated)
183 * into the correct vfp.regs[] element plus a bit offset
184 * into that element, bearing in mind that the table
185 * can wrap around from V31 to V0.
186 */
187 int elt = (rn * 2 + (index >> 3)) % 64;
188 int bitidx = (index & 7) * 8;
189 uint64_t val = extract64(env->vfp.regs[elt], bitidx, 8);
190
191 result = deposit64(result, shift, 8, val);
192 }
193 }
194 return result;
195}
8908f4d1
AB
196
197/* 64bit/double versions of the neon float compare functions */
198uint64_t HELPER(neon_ceq_f64)(float64 a, float64 b, void *fpstp)
199{
200 float_status *fpst = fpstp;
201 return -float64_eq_quiet(a, b, fpst);
202}
203
204uint64_t HELPER(neon_cge_f64)(float64 a, float64 b, void *fpstp)
205{
206 float_status *fpst = fpstp;
207 return -float64_le(b, a, fpst);
208}
209
210uint64_t HELPER(neon_cgt_f64)(float64 a, float64 b, void *fpstp)
211{
212 float_status *fpst = fpstp;
213 return -float64_lt(b, a, fpst);
214}
057d5f62
PM
215
216/* Reciprocal step and sqrt step. Note that unlike the A32/T32
217 * versions, these do a fully fused multiply-add or
218 * multiply-add-and-halve.
219 */
220#define float32_two make_float32(0x40000000)
221#define float32_three make_float32(0x40400000)
222#define float32_one_point_five make_float32(0x3fc00000)
223
224#define float64_two make_float64(0x4000000000000000ULL)
225#define float64_three make_float64(0x4008000000000000ULL)
226#define float64_one_point_five make_float64(0x3FF8000000000000ULL)
227
228float32 HELPER(recpsf_f32)(float32 a, float32 b, void *fpstp)
229{
230 float_status *fpst = fpstp;
231
a8eb6e19
PM
232 a = float32_squash_input_denormal(a, fpst);
233 b = float32_squash_input_denormal(b, fpst);
234
057d5f62
PM
235 a = float32_chs(a);
236 if ((float32_is_infinity(a) && float32_is_zero(b)) ||
237 (float32_is_infinity(b) && float32_is_zero(a))) {
238 return float32_two;
239 }
240 return float32_muladd(a, b, float32_two, 0, fpst);
241}
242
243float64 HELPER(recpsf_f64)(float64 a, float64 b, void *fpstp)
244{
245 float_status *fpst = fpstp;
246
a8eb6e19
PM
247 a = float64_squash_input_denormal(a, fpst);
248 b = float64_squash_input_denormal(b, fpst);
249
057d5f62
PM
250 a = float64_chs(a);
251 if ((float64_is_infinity(a) && float64_is_zero(b)) ||
252 (float64_is_infinity(b) && float64_is_zero(a))) {
253 return float64_two;
254 }
255 return float64_muladd(a, b, float64_two, 0, fpst);
256}
257
258float32 HELPER(rsqrtsf_f32)(float32 a, float32 b, void *fpstp)
259{
260 float_status *fpst = fpstp;
261
a8eb6e19
PM
262 a = float32_squash_input_denormal(a, fpst);
263 b = float32_squash_input_denormal(b, fpst);
264
057d5f62
PM
265 a = float32_chs(a);
266 if ((float32_is_infinity(a) && float32_is_zero(b)) ||
267 (float32_is_infinity(b) && float32_is_zero(a))) {
268 return float32_one_point_five;
269 }
270 return float32_muladd(a, b, float32_three, float_muladd_halve_result, fpst);
271}
272
273float64 HELPER(rsqrtsf_f64)(float64 a, float64 b, void *fpstp)
274{
275 float_status *fpst = fpstp;
276
a8eb6e19
PM
277 a = float64_squash_input_denormal(a, fpst);
278 b = float64_squash_input_denormal(b, fpst);
279
057d5f62
PM
280 a = float64_chs(a);
281 if ((float64_is_infinity(a) && float64_is_zero(b)) ||
282 (float64_is_infinity(b) && float64_is_zero(a))) {
283 return float64_one_point_five;
284 }
285 return float64_muladd(a, b, float64_three, float_muladd_halve_result, fpst);
286}
6781fa11
PM
287
288/* Pairwise long add: add pairs of adjacent elements into
289 * double-width elements in the result (eg _s8 is an 8x8->16 op)
290 */
291uint64_t HELPER(neon_addlp_s8)(uint64_t a)
292{
293 uint64_t nsignmask = 0x0080008000800080ULL;
294 uint64_t wsignmask = 0x8000800080008000ULL;
295 uint64_t elementmask = 0x00ff00ff00ff00ffULL;
296 uint64_t tmp1, tmp2;
297 uint64_t res, signres;
298
299 /* Extract odd elements, sign extend each to a 16 bit field */
300 tmp1 = a & elementmask;
301 tmp1 ^= nsignmask;
302 tmp1 |= wsignmask;
303 tmp1 = (tmp1 - nsignmask) ^ wsignmask;
304 /* Ditto for the even elements */
305 tmp2 = (a >> 8) & elementmask;
306 tmp2 ^= nsignmask;
307 tmp2 |= wsignmask;
308 tmp2 = (tmp2 - nsignmask) ^ wsignmask;
309
310 /* calculate the result by summing bits 0..14, 16..22, etc,
311 * and then adjusting the sign bits 15, 23, etc manually.
312 * This ensures the addition can't overflow the 16 bit field.
313 */
314 signres = (tmp1 ^ tmp2) & wsignmask;
315 res = (tmp1 & ~wsignmask) + (tmp2 & ~wsignmask);
316 res ^= signres;
317
318 return res;
319}
320
321uint64_t HELPER(neon_addlp_u8)(uint64_t a)
322{
323 uint64_t tmp;
324
325 tmp = a & 0x00ff00ff00ff00ffULL;
326 tmp += (a >> 8) & 0x00ff00ff00ff00ffULL;
327 return tmp;
328}
329
330uint64_t HELPER(neon_addlp_s16)(uint64_t a)
331{
332 int32_t reslo, reshi;
333
334 reslo = (int32_t)(int16_t)a + (int32_t)(int16_t)(a >> 16);
335 reshi = (int32_t)(int16_t)(a >> 32) + (int32_t)(int16_t)(a >> 48);
336
337 return (uint32_t)reslo | (((uint64_t)reshi) << 32);
338}
339
340uint64_t HELPER(neon_addlp_u16)(uint64_t a)
341{
342 uint64_t tmp;
343
344 tmp = a & 0x0000ffff0000ffffULL;
345 tmp += (a >> 16) & 0x0000ffff0000ffffULL;
346 return tmp;
347}
8f0c6758
AB
348
349/* Floating-point reciprocal exponent - see FPRecpX in ARM ARM */
350float32 HELPER(frecpx_f32)(float32 a, void *fpstp)
351{
352 float_status *fpst = fpstp;
353 uint32_t val32, sbit;
354 int32_t exp;
355
356 if (float32_is_any_nan(a)) {
357 float32 nan = a;
358 if (float32_is_signaling_nan(a)) {
359 float_raise(float_flag_invalid, fpst);
360 nan = float32_maybe_silence_nan(a);
361 }
362 if (fpst->default_nan_mode) {
363 nan = float32_default_nan;
364 }
365 return nan;
366 }
367
368 val32 = float32_val(a);
369 sbit = 0x80000000ULL & val32;
370 exp = extract32(val32, 23, 8);
371
372 if (exp == 0) {
373 return make_float32(sbit | (0xfe << 23));
374 } else {
375 return make_float32(sbit | (~exp & 0xff) << 23);
376 }
377}
378
379float64 HELPER(frecpx_f64)(float64 a, void *fpstp)
380{
381 float_status *fpst = fpstp;
382 uint64_t val64, sbit;
383 int64_t exp;
384
385 if (float64_is_any_nan(a)) {
386 float64 nan = a;
387 if (float64_is_signaling_nan(a)) {
388 float_raise(float_flag_invalid, fpst);
389 nan = float64_maybe_silence_nan(a);
390 }
391 if (fpst->default_nan_mode) {
392 nan = float64_default_nan;
393 }
394 return nan;
395 }
396
397 val64 = float64_val(a);
398 sbit = 0x8000000000000000ULL & val64;
399 exp = extract64(float64_val(a), 52, 11);
400
401 if (exp == 0) {
402 return make_float64(sbit | (0x7feULL << 52));
403 } else {
404 return make_float64(sbit | (~exp & 0x7ffULL) << 52);
405 }
406}
5553955e
PM
407
408float32 HELPER(fcvtx_f64_to_f32)(float64 a, CPUARMState *env)
409{
410 /* Von Neumann rounding is implemented by using round-to-zero
411 * and then setting the LSB of the result if Inexact was raised.
412 */
413 float32 r;
414 float_status *fpst = &env->vfp.fp_status;
415 float_status tstat = *fpst;
416 int exflags;
417
418 set_float_rounding_mode(float_round_to_zero, &tstat);
419 set_float_exception_flags(0, &tstat);
420 r = float64_to_float32(a, &tstat);
421 r = float32_maybe_silence_nan(r);
422 exflags = get_float_exception_flags(&tstat);
423 if (exflags & float_flag_inexact) {
424 r = make_float32(float32_val(r) | 1);
425 }
426 exflags |= get_float_exception_flags(fpst);
427 set_float_exception_flags(exflags, fpst);
428 return r;
429}
52e60cdd 430
130f2e7d
PM
431/* 64-bit versions of the CRC helpers. Note that although the operation
432 * (and the prototypes of crc32c() and crc32() mean that only the bottom
433 * 32 bits of the accumulator and result are used, we pass and return
434 * uint64_t for convenience of the generated code. Unlike the 32-bit
435 * instruction set versions, val may genuinely have 64 bits of data in it.
436 * The upper bytes of val (above the number specified by 'bytes') must have
437 * been zeroed out by the caller.
438 */
439uint64_t HELPER(crc32_64)(uint64_t acc, uint64_t val, uint32_t bytes)
440{
441 uint8_t buf[8];
442
443 stq_le_p(buf, val);
444
445 /* zlib crc32 converts the accumulator and output to one's complement. */
446 return crc32(acc ^ 0xffffffff, buf, bytes) ^ 0xffffffff;
447}
448
449uint64_t HELPER(crc32c_64)(uint64_t acc, uint64_t val, uint32_t bytes)
450{
451 uint8_t buf[8];
452
453 stq_le_p(buf, val);
454
455 /* Linux crc32c converts the output to one's complement. */
456 return crc32c(acc, buf, bytes) ^ 0xffffffff;
457}
458
0adf7d3c
RH
459#if !defined(CONFIG_USER_ONLY)
460
52e60cdd
RH
461/* Handle a CPU exception. */
462void aarch64_cpu_do_interrupt(CPUState *cs)
463{
464 ARMCPU *cpu = ARM_CPU(cs);
465 CPUARMState *env = &cpu->env;
9e729b57
EI
466 unsigned int new_el = arm_excp_target_el(cs, cs->exception_index);
467 target_ulong addr = env->cp15.vbar_el[new_el];
468 unsigned int new_mode = aarch64_pstate_mode(new_el, true);
52e60cdd
RH
469 int i;
470
dcbff19b 471 if (arm_current_el(env) < new_el) {
52e60cdd
RH
472 if (env->aarch64) {
473 addr += 0x400;
474 } else {
475 addr += 0x600;
476 }
477 } else if (pstate_read(env) & PSTATE_SP) {
478 addr += 0x200;
479 }
480
481 arm_log_exception(cs->exception_index);
dcbff19b 482 qemu_log_mask(CPU_LOG_INT, "...from EL%d\n", arm_current_el(env));
52e60cdd
RH
483 if (qemu_loglevel_mask(CPU_LOG_INT)
484 && !excp_is_internal(cs->exception_index)) {
485 qemu_log_mask(CPU_LOG_INT, "...with ESR 0x%" PRIx32 "\n",
486 env->exception.syndrome);
487 }
488
98128601
RH
489 if (arm_is_psci_call(cpu, cs->exception_index)) {
490 arm_handle_psci_call(cpu);
491 qemu_log_mask(CPU_LOG_INT, "...handled as PSCI call\n");
492 return;
493 }
494
52e60cdd
RH
495 switch (cs->exception_index) {
496 case EXCP_PREFETCH_ABORT:
497 case EXCP_DATA_ABORT:
2dd081ae 498 env->cp15.far_el[new_el] = env->exception.vaddress;
52e60cdd 499 qemu_log_mask(CPU_LOG_INT, "...with FAR 0x%" PRIx64 "\n",
9e729b57 500 env->cp15.far_el[new_el]);
2dd081ae 501 /* fall through */
52e60cdd
RH
502 case EXCP_BKPT:
503 case EXCP_UDEF:
504 case EXCP_SWI:
35979d71 505 case EXCP_HVC:
607d98b8 506 case EXCP_HYP_TRAP:
e0d6e6a5 507 case EXCP_SMC:
2dd081ae 508 env->cp15.esr_el[new_el] = env->exception.syndrome;
52e60cdd
RH
509 break;
510 case EXCP_IRQ:
136e67e9 511 case EXCP_VIRQ:
52e60cdd
RH
512 addr += 0x80;
513 break;
514 case EXCP_FIQ:
136e67e9 515 case EXCP_VFIQ:
52e60cdd
RH
516 addr += 0x100;
517 break;
518 default:
519 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
520 }
521
522 if (is_a64(env)) {
9e729b57 523 env->banked_spsr[aarch64_banked_spsr_index(new_el)] = pstate_read(env);
dcbff19b 524 aarch64_save_sp(env, arm_current_el(env));
9e729b57 525 env->elr_el[new_el] = env->pc;
52e60cdd
RH
526 } else {
527 env->banked_spsr[0] = cpsr_read(env);
528 if (!env->thumb) {
9e729b57 529 env->cp15.esr_el[new_el] |= 1 << 25;
52e60cdd 530 }
9e729b57 531 env->elr_el[new_el] = env->regs[15];
52e60cdd
RH
532
533 for (i = 0; i < 15; i++) {
534 env->xregs[i] = env->regs[i];
535 }
536
537 env->condexec_bits = 0;
538 }
539
9e729b57 540 pstate_write(env, PSTATE_DAIF | new_mode);
52e60cdd 541 env->aarch64 = 1;
9e729b57 542 aarch64_restore_sp(env, new_el);
52e60cdd
RH
543
544 env->pc = addr;
545 cs->interrupt_request |= CPU_INTERRUPT_EXITTB;
546}
0adf7d3c 547#endif