]> git.proxmox.com Git - qemu.git/blame - cpu-all.h
Register Linux dyntick timer as per-thread signal
[qemu.git] / cpu-all.h
CommitLineData
5a9fdfec
FB
1/*
2 * defines common to all virtual CPUs
5fafdf24 3 *
5a9fdfec
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
5a9fdfec
FB
18 */
19#ifndef CPU_ALL_H
20#define CPU_ALL_H
21
7d99a001 22#include "qemu-common.h"
1ad2134f 23#include "cpu-common.h"
0ac4bd56 24
5fafdf24
TS
25/* some important defines:
26 *
0ac4bd56
FB
27 * WORDS_ALIGNED : if defined, the host cpu can only make word aligned
28 * memory accesses.
5fafdf24 29 *
e2542fe2 30 * HOST_WORDS_BIGENDIAN : if defined, the host cpu is big endian and
0ac4bd56 31 * otherwise little endian.
5fafdf24 32 *
0ac4bd56 33 * (TARGET_WORDS_ALIGNED : same for target cpu (not supported yet))
5fafdf24 34 *
0ac4bd56
FB
35 * TARGET_WORDS_BIGENDIAN : same for target cpu
36 */
37
939ef593 38#include "softfloat.h"
f193c797 39
e2542fe2 40#if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
f193c797
FB
41#define BSWAP_NEEDED
42#endif
43
44#ifdef BSWAP_NEEDED
45
46static inline uint16_t tswap16(uint16_t s)
47{
48 return bswap16(s);
49}
50
51static inline uint32_t tswap32(uint32_t s)
52{
53 return bswap32(s);
54}
55
56static inline uint64_t tswap64(uint64_t s)
57{
58 return bswap64(s);
59}
60
61static inline void tswap16s(uint16_t *s)
62{
63 *s = bswap16(*s);
64}
65
66static inline void tswap32s(uint32_t *s)
67{
68 *s = bswap32(*s);
69}
70
71static inline void tswap64s(uint64_t *s)
72{
73 *s = bswap64(*s);
74}
75
76#else
77
78static inline uint16_t tswap16(uint16_t s)
79{
80 return s;
81}
82
83static inline uint32_t tswap32(uint32_t s)
84{
85 return s;
86}
87
88static inline uint64_t tswap64(uint64_t s)
89{
90 return s;
91}
92
93static inline void tswap16s(uint16_t *s)
94{
95}
96
97static inline void tswap32s(uint32_t *s)
98{
99}
100
101static inline void tswap64s(uint64_t *s)
102{
103}
104
105#endif
106
107#if TARGET_LONG_SIZE == 4
108#define tswapl(s) tswap32(s)
109#define tswapls(s) tswap32s((uint32_t *)(s))
0a962c02 110#define bswaptls(s) bswap32s(s)
f193c797
FB
111#else
112#define tswapl(s) tswap64(s)
113#define tswapls(s) tswap64s((uint64_t *)(s))
0a962c02 114#define bswaptls(s) bswap64s(s)
f193c797
FB
115#endif
116
0ca9d380
AJ
117typedef union {
118 float32 f;
119 uint32_t l;
120} CPU_FloatU;
121
832ed0fa
FB
122/* NOTE: arm FPA is horrible as double 32 bit words are stored in big
123 endian ! */
0ac4bd56 124typedef union {
53cd6637 125 float64 d;
cf67c6ba 126#if defined(HOST_WORDS_BIGENDIAN)
0ac4bd56 127 struct {
0ac4bd56 128 uint32_t upper;
832ed0fa 129 uint32_t lower;
0ac4bd56
FB
130 } l;
131#else
132 struct {
0ac4bd56 133 uint32_t lower;
832ed0fa 134 uint32_t upper;
0ac4bd56
FB
135 } l;
136#endif
137 uint64_t ll;
138} CPU_DoubleU;
139
602308f0
AJ
140typedef union {
141 floatx80 d;
142 struct {
143 uint64_t lower;
144 uint16_t upper;
145 } l;
146} CPU_LDoubleU;
602308f0 147
1f587329
BS
148typedef union {
149 float128 q;
c8f930c0 150#if defined(HOST_WORDS_BIGENDIAN)
1f587329
BS
151 struct {
152 uint32_t upmost;
153 uint32_t upper;
154 uint32_t lower;
155 uint32_t lowest;
156 } l;
157 struct {
158 uint64_t upper;
159 uint64_t lower;
160 } ll;
161#else
162 struct {
163 uint32_t lowest;
164 uint32_t lower;
165 uint32_t upper;
166 uint32_t upmost;
167 } l;
168 struct {
169 uint64_t lower;
170 uint64_t upper;
171 } ll;
172#endif
173} CPU_QuadU;
1f587329 174
61382a50
FB
175/* CPU memory access without any memory or io remapping */
176
83d73968
FB
177/*
178 * the generic syntax for the memory accesses is:
179 *
180 * load: ld{type}{sign}{size}{endian}_{access_type}(ptr)
181 *
182 * store: st{type}{size}{endian}_{access_type}(ptr, val)
183 *
184 * type is:
185 * (empty): integer access
186 * f : float access
5fafdf24 187 *
83d73968
FB
188 * sign is:
189 * (empty): for floats or 32 bit size
190 * u : unsigned
191 * s : signed
192 *
193 * size is:
194 * b: 8 bits
195 * w: 16 bits
196 * l: 32 bits
197 * q: 64 bits
5fafdf24 198 *
83d73968
FB
199 * endian is:
200 * (empty): target cpu endianness or 8 bit access
201 * r : reversed target cpu endianness (not implemented yet)
202 * be : big endian (not implemented yet)
203 * le : little endian (not implemented yet)
204 *
205 * access_type is:
206 * raw : host memory access
207 * user : user mode access using soft MMU
208 * kernel : kernel mode access using soft MMU
209 */
8bba3ea1 210static inline int ldub_p(const void *ptr)
5a9fdfec
FB
211{
212 return *(uint8_t *)ptr;
213}
214
8bba3ea1 215static inline int ldsb_p(const void *ptr)
5a9fdfec
FB
216{
217 return *(int8_t *)ptr;
218}
219
c27004ec 220static inline void stb_p(void *ptr, int v)
5a9fdfec
FB
221{
222 *(uint8_t *)ptr = v;
223}
224
225/* NOTE: on arm, putting 2 in /proc/sys/debug/alignment so that the
226 kernel handles unaligned load/stores may give better results, but
227 it is a system wide setting : bad */
e2542fe2 228#if defined(HOST_WORDS_BIGENDIAN) || defined(WORDS_ALIGNED)
5a9fdfec
FB
229
230/* conservative code for little endian unaligned accesses */
8bba3ea1 231static inline int lduw_le_p(const void *ptr)
5a9fdfec 232{
e58ffeb3 233#ifdef _ARCH_PPC
5a9fdfec
FB
234 int val;
235 __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (ptr));
236 return val;
237#else
e01fe6d5 238 const uint8_t *p = ptr;
5a9fdfec
FB
239 return p[0] | (p[1] << 8);
240#endif
241}
242
8bba3ea1 243static inline int ldsw_le_p(const void *ptr)
5a9fdfec 244{
e58ffeb3 245#ifdef _ARCH_PPC
5a9fdfec
FB
246 int val;
247 __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (ptr));
248 return (int16_t)val;
249#else
e01fe6d5 250 const uint8_t *p = ptr;
5a9fdfec
FB
251 return (int16_t)(p[0] | (p[1] << 8));
252#endif
253}
254
8bba3ea1 255static inline int ldl_le_p(const void *ptr)
5a9fdfec 256{
e58ffeb3 257#ifdef _ARCH_PPC
5a9fdfec
FB
258 int val;
259 __asm__ __volatile__ ("lwbrx %0,0,%1" : "=r" (val) : "r" (ptr));
260 return val;
261#else
e01fe6d5 262 const uint8_t *p = ptr;
5a9fdfec
FB
263 return p[0] | (p[1] << 8) | (p[2] << 16) | (p[3] << 24);
264#endif
265}
266
8bba3ea1 267static inline uint64_t ldq_le_p(const void *ptr)
5a9fdfec 268{
e01fe6d5 269 const uint8_t *p = ptr;
5a9fdfec 270 uint32_t v1, v2;
f0aca822
FB
271 v1 = ldl_le_p(p);
272 v2 = ldl_le_p(p + 4);
5a9fdfec
FB
273 return v1 | ((uint64_t)v2 << 32);
274}
275
2df3b95d 276static inline void stw_le_p(void *ptr, int v)
5a9fdfec 277{
e58ffeb3 278#ifdef _ARCH_PPC
5a9fdfec
FB
279 __asm__ __volatile__ ("sthbrx %1,0,%2" : "=m" (*(uint16_t *)ptr) : "r" (v), "r" (ptr));
280#else
281 uint8_t *p = ptr;
282 p[0] = v;
283 p[1] = v >> 8;
284#endif
285}
286
2df3b95d 287static inline void stl_le_p(void *ptr, int v)
5a9fdfec 288{
e58ffeb3 289#ifdef _ARCH_PPC
5a9fdfec
FB
290 __asm__ __volatile__ ("stwbrx %1,0,%2" : "=m" (*(uint32_t *)ptr) : "r" (v), "r" (ptr));
291#else
292 uint8_t *p = ptr;
293 p[0] = v;
294 p[1] = v >> 8;
295 p[2] = v >> 16;
296 p[3] = v >> 24;
297#endif
298}
299
2df3b95d 300static inline void stq_le_p(void *ptr, uint64_t v)
5a9fdfec
FB
301{
302 uint8_t *p = ptr;
f0aca822
FB
303 stl_le_p(p, (uint32_t)v);
304 stl_le_p(p + 4, v >> 32);
5a9fdfec
FB
305}
306
307/* float access */
308
8bba3ea1 309static inline float32 ldfl_le_p(const void *ptr)
5a9fdfec
FB
310{
311 union {
53cd6637 312 float32 f;
5a9fdfec
FB
313 uint32_t i;
314 } u;
2df3b95d 315 u.i = ldl_le_p(ptr);
5a9fdfec
FB
316 return u.f;
317}
318
2df3b95d 319static inline void stfl_le_p(void *ptr, float32 v)
5a9fdfec
FB
320{
321 union {
53cd6637 322 float32 f;
5a9fdfec
FB
323 uint32_t i;
324 } u;
325 u.f = v;
2df3b95d 326 stl_le_p(ptr, u.i);
5a9fdfec
FB
327}
328
8bba3ea1 329static inline float64 ldfq_le_p(const void *ptr)
5a9fdfec 330{
0ac4bd56 331 CPU_DoubleU u;
2df3b95d
FB
332 u.l.lower = ldl_le_p(ptr);
333 u.l.upper = ldl_le_p(ptr + 4);
5a9fdfec
FB
334 return u.d;
335}
336
2df3b95d 337static inline void stfq_le_p(void *ptr, float64 v)
5a9fdfec 338{
0ac4bd56 339 CPU_DoubleU u;
5a9fdfec 340 u.d = v;
2df3b95d
FB
341 stl_le_p(ptr, u.l.lower);
342 stl_le_p(ptr + 4, u.l.upper);
5a9fdfec
FB
343}
344
2df3b95d
FB
345#else
346
8bba3ea1 347static inline int lduw_le_p(const void *ptr)
2df3b95d
FB
348{
349 return *(uint16_t *)ptr;
350}
351
8bba3ea1 352static inline int ldsw_le_p(const void *ptr)
2df3b95d
FB
353{
354 return *(int16_t *)ptr;
355}
93ac68bc 356
8bba3ea1 357static inline int ldl_le_p(const void *ptr)
2df3b95d
FB
358{
359 return *(uint32_t *)ptr;
360}
361
8bba3ea1 362static inline uint64_t ldq_le_p(const void *ptr)
2df3b95d
FB
363{
364 return *(uint64_t *)ptr;
365}
366
367static inline void stw_le_p(void *ptr, int v)
368{
369 *(uint16_t *)ptr = v;
370}
371
372static inline void stl_le_p(void *ptr, int v)
373{
374 *(uint32_t *)ptr = v;
375}
376
377static inline void stq_le_p(void *ptr, uint64_t v)
378{
379 *(uint64_t *)ptr = v;
380}
381
382/* float access */
383
8bba3ea1 384static inline float32 ldfl_le_p(const void *ptr)
2df3b95d
FB
385{
386 return *(float32 *)ptr;
387}
388
8bba3ea1 389static inline float64 ldfq_le_p(const void *ptr)
2df3b95d
FB
390{
391 return *(float64 *)ptr;
392}
393
394static inline void stfl_le_p(void *ptr, float32 v)
395{
396 *(float32 *)ptr = v;
397}
398
399static inline void stfq_le_p(void *ptr, float64 v)
400{
401 *(float64 *)ptr = v;
402}
403#endif
404
e2542fe2 405#if !defined(HOST_WORDS_BIGENDIAN) || defined(WORDS_ALIGNED)
2df3b95d 406
8bba3ea1 407static inline int lduw_be_p(const void *ptr)
93ac68bc 408{
83d73968
FB
409#if defined(__i386__)
410 int val;
411 asm volatile ("movzwl %1, %0\n"
412 "xchgb %b0, %h0\n"
413 : "=q" (val)
414 : "m" (*(uint16_t *)ptr));
415 return val;
416#else
e01fe6d5 417 const uint8_t *b = ptr;
83d73968
FB
418 return ((b[0] << 8) | b[1]);
419#endif
93ac68bc
FB
420}
421
8bba3ea1 422static inline int ldsw_be_p(const void *ptr)
93ac68bc 423{
83d73968
FB
424#if defined(__i386__)
425 int val;
426 asm volatile ("movzwl %1, %0\n"
427 "xchgb %b0, %h0\n"
428 : "=q" (val)
429 : "m" (*(uint16_t *)ptr));
430 return (int16_t)val;
431#else
e01fe6d5 432 const uint8_t *b = ptr;
83d73968
FB
433 return (int16_t)((b[0] << 8) | b[1]);
434#endif
93ac68bc
FB
435}
436
8bba3ea1 437static inline int ldl_be_p(const void *ptr)
93ac68bc 438{
4f2ac237 439#if defined(__i386__) || defined(__x86_64__)
83d73968
FB
440 int val;
441 asm volatile ("movl %1, %0\n"
442 "bswap %0\n"
443 : "=r" (val)
444 : "m" (*(uint32_t *)ptr));
445 return val;
446#else
e01fe6d5 447 const uint8_t *b = ptr;
83d73968
FB
448 return (b[0] << 24) | (b[1] << 16) | (b[2] << 8) | b[3];
449#endif
93ac68bc
FB
450}
451
8bba3ea1 452static inline uint64_t ldq_be_p(const void *ptr)
93ac68bc
FB
453{
454 uint32_t a,b;
2df3b95d 455 a = ldl_be_p(ptr);
4d7a0880 456 b = ldl_be_p((uint8_t *)ptr + 4);
93ac68bc
FB
457 return (((uint64_t)a<<32)|b);
458}
459
2df3b95d 460static inline void stw_be_p(void *ptr, int v)
93ac68bc 461{
83d73968
FB
462#if defined(__i386__)
463 asm volatile ("xchgb %b0, %h0\n"
464 "movw %w0, %1\n"
465 : "=q" (v)
466 : "m" (*(uint16_t *)ptr), "0" (v));
467#else
93ac68bc
FB
468 uint8_t *d = (uint8_t *) ptr;
469 d[0] = v >> 8;
470 d[1] = v;
83d73968 471#endif
93ac68bc
FB
472}
473
2df3b95d 474static inline void stl_be_p(void *ptr, int v)
93ac68bc 475{
4f2ac237 476#if defined(__i386__) || defined(__x86_64__)
83d73968
FB
477 asm volatile ("bswap %0\n"
478 "movl %0, %1\n"
479 : "=r" (v)
480 : "m" (*(uint32_t *)ptr), "0" (v));
481#else
93ac68bc
FB
482 uint8_t *d = (uint8_t *) ptr;
483 d[0] = v >> 24;
484 d[1] = v >> 16;
485 d[2] = v >> 8;
486 d[3] = v;
83d73968 487#endif
93ac68bc
FB
488}
489
2df3b95d 490static inline void stq_be_p(void *ptr, uint64_t v)
93ac68bc 491{
2df3b95d 492 stl_be_p(ptr, v >> 32);
4d7a0880 493 stl_be_p((uint8_t *)ptr + 4, v);
0ac4bd56
FB
494}
495
496/* float access */
497
8bba3ea1 498static inline float32 ldfl_be_p(const void *ptr)
0ac4bd56
FB
499{
500 union {
53cd6637 501 float32 f;
0ac4bd56
FB
502 uint32_t i;
503 } u;
2df3b95d 504 u.i = ldl_be_p(ptr);
0ac4bd56
FB
505 return u.f;
506}
507
2df3b95d 508static inline void stfl_be_p(void *ptr, float32 v)
0ac4bd56
FB
509{
510 union {
53cd6637 511 float32 f;
0ac4bd56
FB
512 uint32_t i;
513 } u;
514 u.f = v;
2df3b95d 515 stl_be_p(ptr, u.i);
0ac4bd56
FB
516}
517
8bba3ea1 518static inline float64 ldfq_be_p(const void *ptr)
0ac4bd56
FB
519{
520 CPU_DoubleU u;
2df3b95d 521 u.l.upper = ldl_be_p(ptr);
4d7a0880 522 u.l.lower = ldl_be_p((uint8_t *)ptr + 4);
0ac4bd56
FB
523 return u.d;
524}
525
2df3b95d 526static inline void stfq_be_p(void *ptr, float64 v)
0ac4bd56
FB
527{
528 CPU_DoubleU u;
529 u.d = v;
2df3b95d 530 stl_be_p(ptr, u.l.upper);
4d7a0880 531 stl_be_p((uint8_t *)ptr + 4, u.l.lower);
93ac68bc
FB
532}
533
5a9fdfec
FB
534#else
535
8bba3ea1 536static inline int lduw_be_p(const void *ptr)
5a9fdfec
FB
537{
538 return *(uint16_t *)ptr;
539}
540
8bba3ea1 541static inline int ldsw_be_p(const void *ptr)
5a9fdfec
FB
542{
543 return *(int16_t *)ptr;
544}
545
8bba3ea1 546static inline int ldl_be_p(const void *ptr)
5a9fdfec
FB
547{
548 return *(uint32_t *)ptr;
549}
550
8bba3ea1 551static inline uint64_t ldq_be_p(const void *ptr)
5a9fdfec
FB
552{
553 return *(uint64_t *)ptr;
554}
555
2df3b95d 556static inline void stw_be_p(void *ptr, int v)
5a9fdfec
FB
557{
558 *(uint16_t *)ptr = v;
559}
560
2df3b95d 561static inline void stl_be_p(void *ptr, int v)
5a9fdfec
FB
562{
563 *(uint32_t *)ptr = v;
564}
565
2df3b95d 566static inline void stq_be_p(void *ptr, uint64_t v)
5a9fdfec
FB
567{
568 *(uint64_t *)ptr = v;
569}
570
571/* float access */
572
8bba3ea1 573static inline float32 ldfl_be_p(const void *ptr)
5a9fdfec 574{
53cd6637 575 return *(float32 *)ptr;
5a9fdfec
FB
576}
577
8bba3ea1 578static inline float64 ldfq_be_p(const void *ptr)
5a9fdfec 579{
53cd6637 580 return *(float64 *)ptr;
5a9fdfec
FB
581}
582
2df3b95d 583static inline void stfl_be_p(void *ptr, float32 v)
5a9fdfec 584{
53cd6637 585 *(float32 *)ptr = v;
5a9fdfec
FB
586}
587
2df3b95d 588static inline void stfq_be_p(void *ptr, float64 v)
5a9fdfec 589{
53cd6637 590 *(float64 *)ptr = v;
5a9fdfec 591}
2df3b95d
FB
592
593#endif
594
595/* target CPU memory access functions */
596#if defined(TARGET_WORDS_BIGENDIAN)
597#define lduw_p(p) lduw_be_p(p)
598#define ldsw_p(p) ldsw_be_p(p)
599#define ldl_p(p) ldl_be_p(p)
600#define ldq_p(p) ldq_be_p(p)
601#define ldfl_p(p) ldfl_be_p(p)
602#define ldfq_p(p) ldfq_be_p(p)
603#define stw_p(p, v) stw_be_p(p, v)
604#define stl_p(p, v) stl_be_p(p, v)
605#define stq_p(p, v) stq_be_p(p, v)
606#define stfl_p(p, v) stfl_be_p(p, v)
607#define stfq_p(p, v) stfq_be_p(p, v)
608#else
609#define lduw_p(p) lduw_le_p(p)
610#define ldsw_p(p) ldsw_le_p(p)
611#define ldl_p(p) ldl_le_p(p)
612#define ldq_p(p) ldq_le_p(p)
613#define ldfl_p(p) ldfl_le_p(p)
614#define ldfq_p(p) ldfq_le_p(p)
615#define stw_p(p, v) stw_le_p(p, v)
616#define stl_p(p, v) stl_le_p(p, v)
617#define stq_p(p, v) stq_le_p(p, v)
618#define stfl_p(p, v) stfl_le_p(p, v)
619#define stfq_p(p, v) stfq_le_p(p, v)
5a9fdfec
FB
620#endif
621
61382a50
FB
622/* MMU memory access macros */
623
53a5960a 624#if defined(CONFIG_USER_ONLY)
0e62fd79
AJ
625#include <assert.h>
626#include "qemu-types.h"
627
53a5960a
PB
628/* On some host systems the guest address space is reserved on the host.
629 * This allows the guest address space to be offset to a convenient location.
630 */
379f6698
PB
631#if defined(CONFIG_USE_GUEST_BASE)
632extern unsigned long guest_base;
633extern int have_guest_base;
68a1c816 634extern unsigned long reserved_va;
379f6698 635#define GUEST_BASE guest_base
18e9ea8a 636#define RESERVED_VA reserved_va
379f6698
PB
637#else
638#define GUEST_BASE 0ul
18e9ea8a 639#define RESERVED_VA 0ul
379f6698 640#endif
53a5960a
PB
641
642/* All direct uses of g2h and h2g need to go away for usermode softmmu. */
643#define g2h(x) ((void *)((unsigned long)(x) + GUEST_BASE))
b9f83121
RH
644
645#if HOST_LONG_BITS <= TARGET_VIRT_ADDR_SPACE_BITS
646#define h2g_valid(x) 1
647#else
648#define h2g_valid(x) ({ \
649 unsigned long __guest = (unsigned long)(x) - GUEST_BASE; \
650 __guest < (1ul << TARGET_VIRT_ADDR_SPACE_BITS); \
651})
652#endif
653
0e62fd79
AJ
654#define h2g(x) ({ \
655 unsigned long __ret = (unsigned long)(x) - GUEST_BASE; \
656 /* Check if given address fits target address space */ \
b9f83121 657 assert(h2g_valid(x)); \
0e62fd79
AJ
658 (abi_ulong)__ret; \
659})
53a5960a
PB
660
661#define saddr(x) g2h(x)
662#define laddr(x) g2h(x)
663
664#else /* !CONFIG_USER_ONLY */
c27004ec
FB
665/* NOTE: we use double casts if pointers and target_ulong have
666 different sizes */
53a5960a
PB
667#define saddr(x) (uint8_t *)(long)(x)
668#define laddr(x) (uint8_t *)(long)(x)
669#endif
670
671#define ldub_raw(p) ldub_p(laddr((p)))
672#define ldsb_raw(p) ldsb_p(laddr((p)))
673#define lduw_raw(p) lduw_p(laddr((p)))
674#define ldsw_raw(p) ldsw_p(laddr((p)))
675#define ldl_raw(p) ldl_p(laddr((p)))
676#define ldq_raw(p) ldq_p(laddr((p)))
677#define ldfl_raw(p) ldfl_p(laddr((p)))
678#define ldfq_raw(p) ldfq_p(laddr((p)))
679#define stb_raw(p, v) stb_p(saddr((p)), v)
680#define stw_raw(p, v) stw_p(saddr((p)), v)
681#define stl_raw(p, v) stl_p(saddr((p)), v)
682#define stq_raw(p, v) stq_p(saddr((p)), v)
683#define stfl_raw(p, v) stfl_p(saddr((p)), v)
684#define stfq_raw(p, v) stfq_p(saddr((p)), v)
c27004ec
FB
685
686
5fafdf24 687#if defined(CONFIG_USER_ONLY)
61382a50
FB
688
689/* if user mode, no other memory access functions */
690#define ldub(p) ldub_raw(p)
691#define ldsb(p) ldsb_raw(p)
692#define lduw(p) lduw_raw(p)
693#define ldsw(p) ldsw_raw(p)
694#define ldl(p) ldl_raw(p)
695#define ldq(p) ldq_raw(p)
696#define ldfl(p) ldfl_raw(p)
697#define ldfq(p) ldfq_raw(p)
698#define stb(p, v) stb_raw(p, v)
699#define stw(p, v) stw_raw(p, v)
700#define stl(p, v) stl_raw(p, v)
701#define stq(p, v) stq_raw(p, v)
702#define stfl(p, v) stfl_raw(p, v)
703#define stfq(p, v) stfq_raw(p, v)
704
705#define ldub_code(p) ldub_raw(p)
706#define ldsb_code(p) ldsb_raw(p)
707#define lduw_code(p) lduw_raw(p)
708#define ldsw_code(p) ldsw_raw(p)
709#define ldl_code(p) ldl_raw(p)
bc98a7ef 710#define ldq_code(p) ldq_raw(p)
61382a50
FB
711
712#define ldub_kernel(p) ldub_raw(p)
713#define ldsb_kernel(p) ldsb_raw(p)
714#define lduw_kernel(p) lduw_raw(p)
715#define ldsw_kernel(p) ldsw_raw(p)
716#define ldl_kernel(p) ldl_raw(p)
bc98a7ef 717#define ldq_kernel(p) ldq_raw(p)
0ac4bd56
FB
718#define ldfl_kernel(p) ldfl_raw(p)
719#define ldfq_kernel(p) ldfq_raw(p)
61382a50
FB
720#define stb_kernel(p, v) stb_raw(p, v)
721#define stw_kernel(p, v) stw_raw(p, v)
722#define stl_kernel(p, v) stl_raw(p, v)
723#define stq_kernel(p, v) stq_raw(p, v)
0ac4bd56
FB
724#define stfl_kernel(p, v) stfl_raw(p, v)
725#define stfq_kernel(p, vt) stfq_raw(p, v)
61382a50
FB
726
727#endif /* defined(CONFIG_USER_ONLY) */
728
5a9fdfec
FB
729/* page related stuff */
730
03875444 731#define TARGET_PAGE_SIZE (1 << TARGET_PAGE_BITS)
5a9fdfec
FB
732#define TARGET_PAGE_MASK ~(TARGET_PAGE_SIZE - 1)
733#define TARGET_PAGE_ALIGN(addr) (((addr) + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK)
734
53a5960a 735/* ??? These should be the larger of unsigned long and target_ulong. */
83fb7adf
FB
736extern unsigned long qemu_real_host_page_size;
737extern unsigned long qemu_host_page_bits;
738extern unsigned long qemu_host_page_size;
739extern unsigned long qemu_host_page_mask;
5a9fdfec 740
83fb7adf 741#define HOST_PAGE_ALIGN(addr) (((addr) + qemu_host_page_size - 1) & qemu_host_page_mask)
5a9fdfec
FB
742
743/* same as PROT_xxx */
744#define PAGE_READ 0x0001
745#define PAGE_WRITE 0x0002
746#define PAGE_EXEC 0x0004
747#define PAGE_BITS (PAGE_READ | PAGE_WRITE | PAGE_EXEC)
748#define PAGE_VALID 0x0008
749/* original state of the write flag (used when tracking self-modifying
750 code */
5fafdf24 751#define PAGE_WRITE_ORG 0x0010
2e9a5713
PB
752#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
753/* FIXME: Code that sets/uses this is broken and needs to go away. */
50a9569b 754#define PAGE_RESERVED 0x0020
2e9a5713 755#endif
5a9fdfec 756
b480d9b7 757#if defined(CONFIG_USER_ONLY)
5a9fdfec 758void page_dump(FILE *f);
5cd2c5b6 759
b480d9b7
PB
760typedef int (*walk_memory_regions_fn)(void *, abi_ulong,
761 abi_ulong, unsigned long);
5cd2c5b6
RH
762int walk_memory_regions(void *, walk_memory_regions_fn);
763
53a5960a
PB
764int page_get_flags(target_ulong address);
765void page_set_flags(target_ulong start, target_ulong end, int flags);
3d97b40b 766int page_check_range(target_ulong start, target_ulong len, int flags);
b480d9b7 767#endif
5a9fdfec 768
c5be9f08 769CPUState *cpu_copy(CPUState *env);
950f1472 770CPUState *qemu_get_cpu(int cpu);
c5be9f08 771
f5c848ee
JK
772#define CPU_DUMP_CODE 0x00010000
773
9a78eead 774void cpu_dump_state(CPUState *env, FILE *f, fprintf_function cpu_fprintf,
7fe48483 775 int flags);
9a78eead
SW
776void cpu_dump_statistics(CPUState *env, FILE *f, fprintf_function cpu_fprintf,
777 int flags);
7fe48483 778
a5e50b26 779void QEMU_NORETURN cpu_abort(CPUState *env, const char *fmt, ...)
2c80e423 780 GCC_FMT_ATTR(2, 3);
f0aca822 781extern CPUState *first_cpu;
e2f22898 782extern CPUState *cpu_single_env;
db1a4972 783
9c76219e
RH
784/* Flags for use in ENV->INTERRUPT_PENDING.
785
786 The numbers assigned here are non-sequential in order to preserve
787 binary compatibility with the vmstate dump. Bit 0 (0x0001) was
788 previously used for CPU_INTERRUPT_EXIT, and is cleared when loading
789 the vmstate dump. */
790
791/* External hardware interrupt pending. This is typically used for
792 interrupts from devices. */
793#define CPU_INTERRUPT_HARD 0x0002
794
795/* Exit the current TB. This is typically used when some system-level device
796 makes some change to the memory mapping. E.g. the a20 line change. */
797#define CPU_INTERRUPT_EXITTB 0x0004
798
799/* Halt the CPU. */
800#define CPU_INTERRUPT_HALT 0x0020
801
802/* Debug event pending. */
803#define CPU_INTERRUPT_DEBUG 0x0080
804
805/* Several target-specific external hardware interrupts. Each target/cpu.h
806 should define proper names based on these defines. */
807#define CPU_INTERRUPT_TGT_EXT_0 0x0008
808#define CPU_INTERRUPT_TGT_EXT_1 0x0010
809#define CPU_INTERRUPT_TGT_EXT_2 0x0040
810#define CPU_INTERRUPT_TGT_EXT_3 0x0200
811#define CPU_INTERRUPT_TGT_EXT_4 0x1000
812
813/* Several target-specific internal interrupts. These differ from the
814 preceeding target-specific interrupts in that they are intended to
815 originate from within the cpu itself, typically in response to some
816 instruction being executed. These, therefore, are not masked while
817 single-stepping within the debugger. */
818#define CPU_INTERRUPT_TGT_INT_0 0x0100
819#define CPU_INTERRUPT_TGT_INT_1 0x0400
820#define CPU_INTERRUPT_TGT_INT_2 0x0800
821
822/* First unused bit: 0x2000. */
823
3125f763
RH
824/* The set of all bits that should be masked when single-stepping. */
825#define CPU_INTERRUPT_SSTEP_MASK \
826 (CPU_INTERRUPT_HARD \
827 | CPU_INTERRUPT_TGT_EXT_0 \
828 | CPU_INTERRUPT_TGT_EXT_1 \
829 | CPU_INTERRUPT_TGT_EXT_2 \
830 | CPU_INTERRUPT_TGT_EXT_3 \
831 | CPU_INTERRUPT_TGT_EXT_4)
98699967 832
ec6959d0
JK
833#ifndef CONFIG_USER_ONLY
834typedef void (*CPUInterruptHandler)(CPUState *, int);
835
836extern CPUInterruptHandler cpu_interrupt_handler;
837
838static inline void cpu_interrupt(CPUState *s, int mask)
839{
840 cpu_interrupt_handler(s, mask);
841}
842#else /* USER_ONLY */
843void cpu_interrupt(CPUState *env, int mask);
844#endif /* USER_ONLY */
845
b54ad049 846void cpu_reset_interrupt(CPUState *env, int mask);
68a79315 847
3098dba0
AJ
848void cpu_exit(CPUState *s);
849
f3e27037 850bool qemu_cpu_has_work(CPUState *env);
6a4955a8 851
a1d1bb31
AL
852/* Breakpoint/watchpoint flags */
853#define BP_MEM_READ 0x01
854#define BP_MEM_WRITE 0x02
855#define BP_MEM_ACCESS (BP_MEM_READ | BP_MEM_WRITE)
06d55cc1 856#define BP_STOP_BEFORE_ACCESS 0x04
6e140f28 857#define BP_WATCHPOINT_HIT 0x08
a1d1bb31 858#define BP_GDB 0x10
2dc9f411 859#define BP_CPU 0x20
a1d1bb31
AL
860
861int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
862 CPUBreakpoint **breakpoint);
863int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags);
864void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint);
865void cpu_breakpoint_remove_all(CPUState *env, int mask);
866int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
867 int flags, CPUWatchpoint **watchpoint);
868int cpu_watchpoint_remove(CPUState *env, target_ulong addr,
869 target_ulong len, int flags);
870void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint);
871void cpu_watchpoint_remove_all(CPUState *env, int mask);
60897d36
EI
872
873#define SSTEP_ENABLE 0x1 /* Enable simulated HW single stepping */
874#define SSTEP_NOIRQ 0x2 /* Do not use IRQ while single stepping */
875#define SSTEP_NOTIMER 0x4 /* Do not Timers while single stepping */
876
c33a346e 877void cpu_single_step(CPUState *env, int enabled);
d95dc32d 878void cpu_reset(CPUState *s);
3ae9501c 879int cpu_is_stopped(CPUState *env);
e82bcec2 880void run_on_cpu(CPUState *env, void (*func)(void *data), void *data);
4c3a88a2 881
5fafdf24 882#define CPU_LOG_TB_OUT_ASM (1 << 0)
9fddaa0c 883#define CPU_LOG_TB_IN_ASM (1 << 1)
f193c797
FB
884#define CPU_LOG_TB_OP (1 << 2)
885#define CPU_LOG_TB_OP_OPT (1 << 3)
886#define CPU_LOG_INT (1 << 4)
887#define CPU_LOG_EXEC (1 << 5)
888#define CPU_LOG_PCALL (1 << 6)
fd872598 889#define CPU_LOG_IOPORT (1 << 7)
9fddaa0c 890#define CPU_LOG_TB_CPU (1 << 8)
eca1bdf4 891#define CPU_LOG_RESET (1 << 9)
f193c797
FB
892
893/* define log items */
894typedef struct CPULogItem {
895 int mask;
896 const char *name;
897 const char *help;
898} CPULogItem;
899
c7cd6a37 900extern const CPULogItem cpu_log_items[];
f193c797 901
34865134
FB
902void cpu_set_log(int log_flags);
903void cpu_set_log_filename(const char *filename);
f193c797 904int cpu_str_to_log_mask(const char *str);
34865134 905
b3755a91
PB
906#if !defined(CONFIG_USER_ONLY)
907
4fcc562b
PB
908/* Return the physical page corresponding to a virtual one. Use it
909 only for debugging because no protection checks are done. Return -1
910 if no page found. */
911target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr);
912
33417e70
FB
913/* memory API */
914
edf75d59 915extern int phys_ram_fd;
c227f099 916extern ram_addr_t ram_size;
f471a17e 917
cd19cfa2
HY
918/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
919#define RAM_PREALLOC_MASK (1 << 0)
920
f471a17e
AW
921typedef struct RAMBlock {
922 uint8_t *host;
923 ram_addr_t offset;
924 ram_addr_t length;
cd19cfa2 925 uint32_t flags;
cc9e98cb 926 char idstr[256];
f471a17e 927 QLIST_ENTRY(RAMBlock) next;
04b16653
AW
928#if defined(__linux__) && !defined(TARGET_S390X)
929 int fd;
930#endif
f471a17e
AW
931} RAMBlock;
932
933typedef struct RAMList {
934 uint8_t *phys_dirty;
f471a17e
AW
935 QLIST_HEAD(ram, RAMBlock) blocks;
936} RAMList;
937extern RAMList ram_list;
edf75d59 938
c902760f
MT
939extern const char *mem_path;
940extern int mem_prealloc;
941
edf75d59 942/* physical memory access */
0f459d16
PB
943
944/* MMIO pages are identified by a combination of an IO device index and
945 3 flags. The ROMD code stores the page ram offset in iotlb entry,
946 so only a limited number of ids are avaiable. */
947
98699967 948#define IO_MEM_NB_ENTRIES (1 << (TARGET_PAGE_BITS - IO_MEM_SHIFT))
edf75d59 949
0f459d16
PB
950/* Flags stored in the low bits of the TLB virtual address. These are
951 defined so that fast path ram access is all zeros. */
952/* Zero if TLB entry is valid. */
953#define TLB_INVALID_MASK (1 << 3)
954/* Set if TLB entry references a clean RAM page. The iotlb entry will
955 contain the page physical address. */
956#define TLB_NOTDIRTY (1 << 4)
957/* Set if TLB entry is an IO callback. */
958#define TLB_MMIO (1 << 5)
959
74576198
AL
960#define VGA_DIRTY_FLAG 0x01
961#define CODE_DIRTY_FLAG 0x02
74576198 962#define MIGRATION_DIRTY_FLAG 0x08
0a962c02 963
1ccde1cb 964/* read dirty bit (return 0 or 1) */
c227f099 965static inline int cpu_physical_memory_is_dirty(ram_addr_t addr)
1ccde1cb 966{
f471a17e 967 return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] == 0xff;
0a962c02
FB
968}
969
ca39b46e
YT
970static inline int cpu_physical_memory_get_dirty_flags(ram_addr_t addr)
971{
f471a17e 972 return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS];
ca39b46e
YT
973}
974
c227f099 975static inline int cpu_physical_memory_get_dirty(ram_addr_t addr,
0a962c02
FB
976 int dirty_flags)
977{
f471a17e 978 return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] & dirty_flags;
1ccde1cb
FB
979}
980
c227f099 981static inline void cpu_physical_memory_set_dirty(ram_addr_t addr)
1ccde1cb 982{
f471a17e 983 ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] = 0xff;
1ccde1cb
FB
984}
985
ca39b46e
YT
986static inline int cpu_physical_memory_set_dirty_flags(ram_addr_t addr,
987 int dirty_flags)
988{
f471a17e 989 return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] |= dirty_flags;
ca39b46e
YT
990}
991
992static inline void cpu_physical_memory_mask_dirty_range(ram_addr_t start,
993 int length,
994 int dirty_flags)
995{
996 int i, mask, len;
997 uint8_t *p;
998
999 len = length >> TARGET_PAGE_BITS;
1000 mask = ~dirty_flags;
f471a17e 1001 p = ram_list.phys_dirty + (start >> TARGET_PAGE_BITS);
ca39b46e
YT
1002 for (i = 0; i < len; i++) {
1003 p[i] &= mask;
1004 }
1005}
1006
c227f099 1007void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 1008 int dirty_flags);
04c504cc 1009void cpu_tlb_update_dirty(CPUState *env);
1ccde1cb 1010
74576198
AL
1011int cpu_physical_memory_set_dirty_tracking(int enable);
1012
1013int cpu_physical_memory_get_dirty_tracking(void);
1014
c227f099
AL
1015int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
1016 target_phys_addr_t end_addr);
2bec46dc 1017
e5896b12
AP
1018int cpu_physical_log_start(target_phys_addr_t start_addr,
1019 ram_addr_t size);
1020
1021int cpu_physical_log_stop(target_phys_addr_t start_addr,
1022 ram_addr_t size);
1023
055403b2 1024void dump_exec_info(FILE *f, fprintf_function cpu_fprintf);
b3755a91
PB
1025#endif /* !CONFIG_USER_ONLY */
1026
1027int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
1028 uint8_t *buf, int len, int is_write);
1029
5a9fdfec 1030#endif /* CPU_ALL_H */