]> git.proxmox.com Git - mirror_qemu.git/blame - target/hppa/op_helper.c
Merge tag 'pull-maintainer-may24-160524-2' of https://gitlab.com/stsquad/qemu into...
[mirror_qemu.git] / target / hppa / op_helper.c
CommitLineData
61766fe9
RH
1/*
2 * Helpers for HPPA instructions.
3 *
4 * Copyright (c) 2016 Richard Henderson <rth@twiddle.net>
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
d6ea4236 9 * version 2.1 of the License, or (at your option) any later version.
61766fe9
RH
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include "qemu/osdep.h"
cd617484 21#include "qemu/log.h"
61766fe9
RH
22#include "cpu.h"
23#include "exec/exec-all.h"
24#include "exec/helper-proto.h"
96d6407f 25#include "exec/cpu_ldst.h"
49c29d6c 26#include "qemu/timer.h"
23c3d569 27#include "trace.h"
61766fe9 28
8905770b 29G_NORETURN void HELPER(excp)(CPUHPPAState *env, int excp)
61766fe9 30{
25f32708 31 CPUState *cs = env_cpu(env);
61766fe9
RH
32
33 cs->exception_index = excp;
34 cpu_loop_exit(cs);
35}
36
8905770b 37G_NORETURN void hppa_dynamic_excp(CPUHPPAState *env, int excp, uintptr_t ra)
b2167459 38{
25f32708 39 CPUState *cs = env_cpu(env);
b2167459
RH
40
41 cs->exception_index = excp;
42 cpu_loop_exit_restore(cs, ra);
43}
44
25460fc5
RH
45static void atomic_store_mask32(CPUHPPAState *env, target_ulong addr,
46 uint32_t val, uint32_t mask, uintptr_t ra)
96d6407f 47{
3b916140 48 int mmu_idx = cpu_mmu_index(env_cpu(env), 0);
25460fc5 49 uint32_t old, new, cmp, *haddr;
9f54dc1c
RH
50 void *vaddr;
51
52 vaddr = probe_access(env, addr, 3, MMU_DATA_STORE, mmu_idx, ra);
53 if (vaddr == NULL) {
54 cpu_loop_exit_atomic(env_cpu(env), ra);
55 }
56 haddr = (uint32_t *)((uintptr_t)vaddr & -4);
57 mask = addr & 1 ? 0x00ffffffu : 0xffffff00u;
96d6407f 58
96d6407f
RH
59 old = *haddr;
60 while (1) {
9f54dc1c 61 new = be32_to_cpu((cpu_to_be32(old) & ~mask) | (val & mask));
d73415a3 62 cmp = qatomic_cmpxchg(haddr, old, new);
96d6407f
RH
63 if (cmp == old) {
64 return;
65 }
66 old = cmp;
67 }
96d6407f
RH
68}
69
25460fc5
RH
70static void atomic_store_mask64(CPUHPPAState *env, target_ulong addr,
71 uint64_t val, uint64_t mask,
72 int size, uintptr_t ra)
73{
74#ifdef CONFIG_ATOMIC64
3b916140 75 int mmu_idx = cpu_mmu_index(env_cpu(env), 0);
25460fc5
RH
76 uint64_t old, new, cmp, *haddr;
77 void *vaddr;
78
79 vaddr = probe_access(env, addr, size, MMU_DATA_STORE, mmu_idx, ra);
80 if (vaddr == NULL) {
81 cpu_loop_exit_atomic(env_cpu(env), ra);
82 }
83 haddr = (uint64_t *)((uintptr_t)vaddr & -8);
84
85 old = *haddr;
86 while (1) {
87 new = be32_to_cpu((cpu_to_be32(old) & ~mask) | (val & mask));
88 cmp = qatomic_cmpxchg__nocheck(haddr, old, new);
89 if (cmp == old) {
90 return;
91 }
92 old = cmp;
93 }
94#else
95 cpu_loop_exit_atomic(env_cpu(env), ra);
96#endif
97}
98
c53e401e 99static void do_stby_b(CPUHPPAState *env, target_ulong addr, target_ulong val,
5010e5c4 100 bool parallel, uintptr_t ra)
96d6407f 101{
96d6407f
RH
102 switch (addr & 3) {
103 case 3:
104 cpu_stb_data_ra(env, addr, val, ra);
105 break;
106 case 2:
107 cpu_stw_data_ra(env, addr, val, ra);
108 break;
109 case 1:
110 /* The 3 byte store must appear atomic. */
f9f46db4 111 if (parallel) {
25460fc5 112 atomic_store_mask32(env, addr, val, 0x00ffffffu, ra);
96d6407f
RH
113 } else {
114 cpu_stb_data_ra(env, addr, val >> 16, ra);
115 cpu_stw_data_ra(env, addr + 1, val, ra);
116 }
117 break;
118 default:
119 cpu_stl_data_ra(env, addr, val, ra);
120 break;
121 }
122}
123
25460fc5
RH
124static void do_stdby_b(CPUHPPAState *env, target_ulong addr, uint64_t val,
125 bool parallel, uintptr_t ra)
126{
127 switch (addr & 7) {
128 case 7:
129 cpu_stb_data_ra(env, addr, val, ra);
130 break;
131 case 6:
132 cpu_stw_data_ra(env, addr, val, ra);
133 break;
134 case 5:
135 /* The 3 byte store must appear atomic. */
136 if (parallel) {
137 atomic_store_mask32(env, addr, val, 0x00ffffffu, ra);
138 } else {
139 cpu_stb_data_ra(env, addr, val >> 16, ra);
140 cpu_stw_data_ra(env, addr + 1, val, ra);
141 }
142 break;
143 case 4:
144 cpu_stl_data_ra(env, addr, val, ra);
145 break;
146 case 3:
147 /* The 5 byte store must appear atomic. */
148 if (parallel) {
149 atomic_store_mask64(env, addr, val, 0x000000ffffffffffull, 5, ra);
150 } else {
151 cpu_stb_data_ra(env, addr, val >> 32, ra);
152 cpu_stl_data_ra(env, addr + 1, val, ra);
153 }
154 break;
155 case 2:
156 /* The 6 byte store must appear atomic. */
157 if (parallel) {
158 atomic_store_mask64(env, addr, val, 0x0000ffffffffffffull, 6, ra);
159 } else {
160 cpu_stw_data_ra(env, addr, val >> 32, ra);
161 cpu_stl_data_ra(env, addr + 2, val, ra);
162 }
163 break;
164 case 1:
165 /* The 7 byte store must appear atomic. */
166 if (parallel) {
167 atomic_store_mask64(env, addr, val, 0x00ffffffffffffffull, 7, ra);
168 } else {
169 cpu_stb_data_ra(env, addr, val >> 48, ra);
170 cpu_stw_data_ra(env, addr + 1, val >> 32, ra);
171 cpu_stl_data_ra(env, addr + 3, val, ra);
172 }
173 break;
174 default:
175 cpu_stq_data_ra(env, addr, val, ra);
176 break;
177 }
178}
179
c53e401e 180void HELPER(stby_b)(CPUHPPAState *env, target_ulong addr, target_ulong val)
f9f46db4 181{
5010e5c4 182 do_stby_b(env, addr, val, false, GETPC());
f9f46db4
EC
183}
184
185void HELPER(stby_b_parallel)(CPUHPPAState *env, target_ulong addr,
c53e401e 186 target_ulong val)
f9f46db4 187{
5010e5c4 188 do_stby_b(env, addr, val, true, GETPC());
f9f46db4
EC
189}
190
c53e401e 191void HELPER(stdby_b)(CPUHPPAState *env, target_ulong addr, target_ulong val)
25460fc5
RH
192{
193 do_stdby_b(env, addr, val, false, GETPC());
194}
195
196void HELPER(stdby_b_parallel)(CPUHPPAState *env, target_ulong addr,
c53e401e 197 target_ulong val)
25460fc5
RH
198{
199 do_stdby_b(env, addr, val, true, GETPC());
200}
201
c53e401e 202static void do_stby_e(CPUHPPAState *env, target_ulong addr, target_ulong val,
5010e5c4 203 bool parallel, uintptr_t ra)
96d6407f 204{
96d6407f
RH
205 switch (addr & 3) {
206 case 3:
207 /* The 3 byte store must appear atomic. */
f9f46db4 208 if (parallel) {
25460fc5
RH
209 atomic_store_mask32(env, addr - 3, val, 0xffffff00u, ra);
210 } else {
211 cpu_stw_data_ra(env, addr - 3, val >> 16, ra);
212 cpu_stb_data_ra(env, addr - 1, val >> 8, ra);
213 }
214 break;
215 case 2:
216 cpu_stw_data_ra(env, addr - 2, val >> 16, ra);
217 break;
218 case 1:
219 cpu_stb_data_ra(env, addr - 1, val >> 24, ra);
220 break;
221 default:
222 /* Nothing is stored, but protection is checked and the
223 cacheline is marked dirty. */
3b916140 224 probe_write(env, addr, 0, cpu_mmu_index(env_cpu(env), 0), ra);
25460fc5
RH
225 break;
226 }
227}
228
229static void do_stdby_e(CPUHPPAState *env, target_ulong addr, uint64_t val,
230 bool parallel, uintptr_t ra)
231{
232 switch (addr & 7) {
233 case 7:
234 /* The 7 byte store must appear atomic. */
235 if (parallel) {
236 atomic_store_mask64(env, addr - 7, val,
237 0xffffffffffffff00ull, 7, ra);
238 } else {
239 cpu_stl_data_ra(env, addr - 7, val >> 32, ra);
240 cpu_stw_data_ra(env, addr - 3, val >> 16, ra);
241 cpu_stb_data_ra(env, addr - 1, val >> 8, ra);
242 }
243 break;
244 case 6:
245 /* The 6 byte store must appear atomic. */
246 if (parallel) {
247 atomic_store_mask64(env, addr - 6, val,
248 0xffffffffffff0000ull, 6, ra);
249 } else {
250 cpu_stl_data_ra(env, addr - 6, val >> 32, ra);
251 cpu_stw_data_ra(env, addr - 2, val >> 16, ra);
252 }
253 break;
254 case 5:
255 /* The 5 byte store must appear atomic. */
256 if (parallel) {
257 atomic_store_mask64(env, addr - 5, val,
258 0xffffffffff000000ull, 5, ra);
259 } else {
260 cpu_stl_data_ra(env, addr - 5, val >> 32, ra);
261 cpu_stb_data_ra(env, addr - 1, val >> 24, ra);
262 }
263 break;
264 case 4:
265 cpu_stl_data_ra(env, addr - 4, val >> 32, ra);
266 break;
267 case 3:
268 /* The 3 byte store must appear atomic. */
269 if (parallel) {
518d2f43 270 atomic_store_mask32(env, addr - 3, val >> 32, 0xffffff00u, ra);
96d6407f 271 } else {
518d2f43
SS
272 cpu_stw_data_ra(env, addr - 3, val >> 48, ra);
273 cpu_stb_data_ra(env, addr - 1, val >> 40, ra);
96d6407f
RH
274 }
275 break;
276 case 2:
518d2f43 277 cpu_stw_data_ra(env, addr - 2, val >> 48, ra);
96d6407f
RH
278 break;
279 case 1:
518d2f43 280 cpu_stb_data_ra(env, addr - 1, val >> 56, ra);
96d6407f
RH
281 break;
282 default:
283 /* Nothing is stored, but protection is checked and the
284 cacheline is marked dirty. */
3b916140 285 probe_write(env, addr, 0, cpu_mmu_index(env_cpu(env), 0), ra);
96d6407f
RH
286 break;
287 }
288}
289
c53e401e 290void HELPER(stby_e)(CPUHPPAState *env, target_ulong addr, target_ulong val)
f9f46db4 291{
5010e5c4 292 do_stby_e(env, addr, val, false, GETPC());
f9f46db4
EC
293}
294
295void HELPER(stby_e_parallel)(CPUHPPAState *env, target_ulong addr,
c53e401e 296 target_ulong val)
f9f46db4 297{
5010e5c4 298 do_stby_e(env, addr, val, true, GETPC());
f9f46db4
EC
299}
300
c53e401e 301void HELPER(stdby_e)(CPUHPPAState *env, target_ulong addr, target_ulong val)
25460fc5
RH
302{
303 do_stdby_e(env, addr, val, false, GETPC());
304}
305
306void HELPER(stdby_e_parallel)(CPUHPPAState *env, target_ulong addr,
c53e401e 307 target_ulong val)
25460fc5
RH
308{
309 do_stdby_e(env, addr, val, true, GETPC());
310}
311
b1af755c
RH
312void HELPER(ldc_check)(target_ulong addr)
313{
314 if (unlikely(addr & 0xf)) {
315 qemu_log_mask(LOG_GUEST_ERROR,
316 "Undefined ldc to unaligned address mod 16: "
317 TARGET_FMT_lx "\n", addr);
318 }
319}
320
c53e401e 321target_ulong HELPER(probe)(CPUHPPAState *env, target_ulong addr,
eed14219 322 uint32_t level, uint32_t want)
98a9cb79 323{
813dff13 324#ifdef CONFIG_USER_ONLY
bef6f008 325 return page_check_range(addr, 1, want);
813dff13 326#else
576fc937 327 int prot, excp, mmu_idx;
eed14219 328 hwaddr phys;
98a9cb79 329
23c3d569 330 trace_hppa_tlb_probe(addr, level, want);
eed14219
RH
331 /* Fail if the requested privilege level is higher than current. */
332 if (level < (env->iaoq_f & 3)) {
333 return 0;
334 }
335
576fc937 336 mmu_idx = PRIV_P_TO_MMU_IDX(level, env->psw & PSW_P);
190d7fa5 337 excp = hppa_get_physical_address(env, addr, mmu_idx, 0, &phys, &prot);
eed14219 338 if (excp >= 0) {
5ccd5017 339 cpu_restore_state(env_cpu(env), GETPC());
31efbe72 340 hppa_set_ior_and_isr(env, addr, MMU_IDX_MMU_DISABLED(mmu_idx));
eed14219
RH
341 if (excp == EXCP_DTLB_MISS) {
342 excp = EXCP_NA_DTLB_MISS;
343 }
5ccd5017 344 helper_excp(env, excp);
eed14219
RH
345 }
346 return (want & prot) != 0;
813dff13 347#endif
98a9cb79
RH
348}
349
c53e401e 350target_ulong HELPER(read_interval_timer)(void)
49c29d6c
RH
351{
352#ifdef CONFIG_USER_ONLY
353 /* In user-mode, QEMU_CLOCK_VIRTUAL doesn't exist.
354 Just pass through the host cpu clock ticks. */
355 return cpu_get_host_ticks();
356#else
357 /* In system mode we have access to a decent high-resolution clock.
358 In order to make OS-level time accounting work with the cr16,
359 present it with a well-timed clock fixed at 250MHz. */
360 return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) >> 2;
361#endif
362}
0843563f
RH
363
364uint64_t HELPER(hadd_ss)(uint64_t r1, uint64_t r2)
365{
366 uint64_t ret = 0;
367
368 for (int i = 0; i < 64; i += 16) {
369 int f1 = sextract64(r1, i, 16);
370 int f2 = sextract64(r2, i, 16);
371 int fr = f1 + f2;
372
373 fr = MIN(fr, INT16_MAX);
374 fr = MAX(fr, INT16_MIN);
375 ret = deposit64(ret, i, 16, fr);
376 }
377 return ret;
378}
379
380uint64_t HELPER(hadd_us)(uint64_t r1, uint64_t r2)
381{
382 uint64_t ret = 0;
383
384 for (int i = 0; i < 64; i += 16) {
385 int f1 = extract64(r1, i, 16);
386 int f2 = sextract64(r2, i, 16);
387 int fr = f1 + f2;
388
389 fr = MIN(fr, UINT16_MAX);
390 fr = MAX(fr, 0);
391 ret = deposit64(ret, i, 16, fr);
392 }
393 return ret;
394}
10c9e58d 395
1b3cb7c8
RH
396uint64_t HELPER(havg)(uint64_t r1, uint64_t r2)
397{
398 uint64_t ret = 0;
399
400 for (int i = 0; i < 64; i += 16) {
401 int f1 = extract64(r1, i, 16);
402 int f2 = extract64(r2, i, 16);
403 int fr = f1 + f2;
404
405 ret = deposit64(ret, i, 16, (fr >> 1) | (fr & 1));
406 }
407 return ret;
408}
409
10c9e58d
RH
410uint64_t HELPER(hsub_ss)(uint64_t r1, uint64_t r2)
411{
412 uint64_t ret = 0;
413
414 for (int i = 0; i < 64; i += 16) {
415 int f1 = sextract64(r1, i, 16);
416 int f2 = sextract64(r2, i, 16);
417 int fr = f1 - f2;
418
419 fr = MIN(fr, INT16_MAX);
420 fr = MAX(fr, INT16_MIN);
421 ret = deposit64(ret, i, 16, fr);
422 }
423 return ret;
424}
425
426uint64_t HELPER(hsub_us)(uint64_t r1, uint64_t r2)
427{
428 uint64_t ret = 0;
429
430 for (int i = 0; i < 64; i += 16) {
431 int f1 = extract64(r1, i, 16);
432 int f2 = sextract64(r2, i, 16);
433 int fr = f1 - f2;
434
435 fr = MIN(fr, UINT16_MAX);
436 fr = MAX(fr, 0);
437 ret = deposit64(ret, i, 16, fr);
438 }
439 return ret;
440}
3bbb8e48
RH
441
442uint64_t HELPER(hshladd)(uint64_t r1, uint64_t r2, uint32_t sh)
443{
444 uint64_t ret = 0;
445
446 for (int i = 0; i < 64; i += 16) {
447 int f1 = sextract64(r1, i, 16);
448 int f2 = sextract64(r2, i, 16);
449 int fr = (f1 << sh) + f2;
450
451 fr = MIN(fr, INT16_MAX);
452 fr = MAX(fr, INT16_MIN);
453 ret = deposit64(ret, i, 16, fr);
454 }
455 return ret;
456}
457
458uint64_t HELPER(hshradd)(uint64_t r1, uint64_t r2, uint32_t sh)
459{
460 uint64_t ret = 0;
461
462 for (int i = 0; i < 64; i += 16) {
463 int f1 = sextract64(r1, i, 16);
464 int f2 = sextract64(r2, i, 16);
465 int fr = (f1 >> sh) + f2;
466
467 fr = MIN(fr, INT16_MAX);
468 fr = MAX(fr, INT16_MIN);
469 ret = deposit64(ret, i, 16, fr);
470 }
471 return ret;
472}