]> git.proxmox.com Git - qemu.git/blame_incremental - cpu-all.h
qemu_ram_alloc: Add DeviceState and name parameters
[qemu.git] / cpu-all.h
... / ...
CommitLineData
1/*
2 * defines common to all virtual CPUs
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19#ifndef CPU_ALL_H
20#define CPU_ALL_H
21
22#include "qemu-common.h"
23#include "cpu-common.h"
24
25/* some important defines:
26 *
27 * WORDS_ALIGNED : if defined, the host cpu can only make word aligned
28 * memory accesses.
29 *
30 * HOST_WORDS_BIGENDIAN : if defined, the host cpu is big endian and
31 * otherwise little endian.
32 *
33 * (TARGET_WORDS_ALIGNED : same for target cpu (not supported yet))
34 *
35 * TARGET_WORDS_BIGENDIAN : same for target cpu
36 */
37
38#include "softfloat.h"
39
40#if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
41#define BSWAP_NEEDED
42#endif
43
44#ifdef BSWAP_NEEDED
45
46static inline uint16_t tswap16(uint16_t s)
47{
48 return bswap16(s);
49}
50
51static inline uint32_t tswap32(uint32_t s)
52{
53 return bswap32(s);
54}
55
56static inline uint64_t tswap64(uint64_t s)
57{
58 return bswap64(s);
59}
60
61static inline void tswap16s(uint16_t *s)
62{
63 *s = bswap16(*s);
64}
65
66static inline void tswap32s(uint32_t *s)
67{
68 *s = bswap32(*s);
69}
70
71static inline void tswap64s(uint64_t *s)
72{
73 *s = bswap64(*s);
74}
75
76#else
77
78static inline uint16_t tswap16(uint16_t s)
79{
80 return s;
81}
82
83static inline uint32_t tswap32(uint32_t s)
84{
85 return s;
86}
87
88static inline uint64_t tswap64(uint64_t s)
89{
90 return s;
91}
92
93static inline void tswap16s(uint16_t *s)
94{
95}
96
97static inline void tswap32s(uint32_t *s)
98{
99}
100
101static inline void tswap64s(uint64_t *s)
102{
103}
104
105#endif
106
107#if TARGET_LONG_SIZE == 4
108#define tswapl(s) tswap32(s)
109#define tswapls(s) tswap32s((uint32_t *)(s))
110#define bswaptls(s) bswap32s(s)
111#else
112#define tswapl(s) tswap64(s)
113#define tswapls(s) tswap64s((uint64_t *)(s))
114#define bswaptls(s) bswap64s(s)
115#endif
116
117typedef union {
118 float32 f;
119 uint32_t l;
120} CPU_FloatU;
121
122/* NOTE: arm FPA is horrible as double 32 bit words are stored in big
123 endian ! */
124typedef union {
125 float64 d;
126#if defined(HOST_WORDS_BIGENDIAN) \
127 || (defined(__arm__) && !defined(__VFP_FP__) && !defined(CONFIG_SOFTFLOAT))
128 struct {
129 uint32_t upper;
130 uint32_t lower;
131 } l;
132#else
133 struct {
134 uint32_t lower;
135 uint32_t upper;
136 } l;
137#endif
138 uint64_t ll;
139} CPU_DoubleU;
140
141#ifdef TARGET_SPARC
142typedef union {
143 float128 q;
144#if defined(HOST_WORDS_BIGENDIAN) \
145 || (defined(__arm__) && !defined(__VFP_FP__) && !defined(CONFIG_SOFTFLOAT))
146 struct {
147 uint32_t upmost;
148 uint32_t upper;
149 uint32_t lower;
150 uint32_t lowest;
151 } l;
152 struct {
153 uint64_t upper;
154 uint64_t lower;
155 } ll;
156#else
157 struct {
158 uint32_t lowest;
159 uint32_t lower;
160 uint32_t upper;
161 uint32_t upmost;
162 } l;
163 struct {
164 uint64_t lower;
165 uint64_t upper;
166 } ll;
167#endif
168} CPU_QuadU;
169#endif
170
171/* CPU memory access without any memory or io remapping */
172
173/*
174 * the generic syntax for the memory accesses is:
175 *
176 * load: ld{type}{sign}{size}{endian}_{access_type}(ptr)
177 *
178 * store: st{type}{size}{endian}_{access_type}(ptr, val)
179 *
180 * type is:
181 * (empty): integer access
182 * f : float access
183 *
184 * sign is:
185 * (empty): for floats or 32 bit size
186 * u : unsigned
187 * s : signed
188 *
189 * size is:
190 * b: 8 bits
191 * w: 16 bits
192 * l: 32 bits
193 * q: 64 bits
194 *
195 * endian is:
196 * (empty): target cpu endianness or 8 bit access
197 * r : reversed target cpu endianness (not implemented yet)
198 * be : big endian (not implemented yet)
199 * le : little endian (not implemented yet)
200 *
201 * access_type is:
202 * raw : host memory access
203 * user : user mode access using soft MMU
204 * kernel : kernel mode access using soft MMU
205 */
206static inline int ldub_p(const void *ptr)
207{
208 return *(uint8_t *)ptr;
209}
210
211static inline int ldsb_p(const void *ptr)
212{
213 return *(int8_t *)ptr;
214}
215
216static inline void stb_p(void *ptr, int v)
217{
218 *(uint8_t *)ptr = v;
219}
220
221/* NOTE: on arm, putting 2 in /proc/sys/debug/alignment so that the
222 kernel handles unaligned load/stores may give better results, but
223 it is a system wide setting : bad */
224#if defined(HOST_WORDS_BIGENDIAN) || defined(WORDS_ALIGNED)
225
226/* conservative code for little endian unaligned accesses */
227static inline int lduw_le_p(const void *ptr)
228{
229#ifdef _ARCH_PPC
230 int val;
231 __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (ptr));
232 return val;
233#else
234 const uint8_t *p = ptr;
235 return p[0] | (p[1] << 8);
236#endif
237}
238
239static inline int ldsw_le_p(const void *ptr)
240{
241#ifdef _ARCH_PPC
242 int val;
243 __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (ptr));
244 return (int16_t)val;
245#else
246 const uint8_t *p = ptr;
247 return (int16_t)(p[0] | (p[1] << 8));
248#endif
249}
250
251static inline int ldl_le_p(const void *ptr)
252{
253#ifdef _ARCH_PPC
254 int val;
255 __asm__ __volatile__ ("lwbrx %0,0,%1" : "=r" (val) : "r" (ptr));
256 return val;
257#else
258 const uint8_t *p = ptr;
259 return p[0] | (p[1] << 8) | (p[2] << 16) | (p[3] << 24);
260#endif
261}
262
263static inline uint64_t ldq_le_p(const void *ptr)
264{
265 const uint8_t *p = ptr;
266 uint32_t v1, v2;
267 v1 = ldl_le_p(p);
268 v2 = ldl_le_p(p + 4);
269 return v1 | ((uint64_t)v2 << 32);
270}
271
272static inline void stw_le_p(void *ptr, int v)
273{
274#ifdef _ARCH_PPC
275 __asm__ __volatile__ ("sthbrx %1,0,%2" : "=m" (*(uint16_t *)ptr) : "r" (v), "r" (ptr));
276#else
277 uint8_t *p = ptr;
278 p[0] = v;
279 p[1] = v >> 8;
280#endif
281}
282
283static inline void stl_le_p(void *ptr, int v)
284{
285#ifdef _ARCH_PPC
286 __asm__ __volatile__ ("stwbrx %1,0,%2" : "=m" (*(uint32_t *)ptr) : "r" (v), "r" (ptr));
287#else
288 uint8_t *p = ptr;
289 p[0] = v;
290 p[1] = v >> 8;
291 p[2] = v >> 16;
292 p[3] = v >> 24;
293#endif
294}
295
296static inline void stq_le_p(void *ptr, uint64_t v)
297{
298 uint8_t *p = ptr;
299 stl_le_p(p, (uint32_t)v);
300 stl_le_p(p + 4, v >> 32);
301}
302
303/* float access */
304
305static inline float32 ldfl_le_p(const void *ptr)
306{
307 union {
308 float32 f;
309 uint32_t i;
310 } u;
311 u.i = ldl_le_p(ptr);
312 return u.f;
313}
314
315static inline void stfl_le_p(void *ptr, float32 v)
316{
317 union {
318 float32 f;
319 uint32_t i;
320 } u;
321 u.f = v;
322 stl_le_p(ptr, u.i);
323}
324
325static inline float64 ldfq_le_p(const void *ptr)
326{
327 CPU_DoubleU u;
328 u.l.lower = ldl_le_p(ptr);
329 u.l.upper = ldl_le_p(ptr + 4);
330 return u.d;
331}
332
333static inline void stfq_le_p(void *ptr, float64 v)
334{
335 CPU_DoubleU u;
336 u.d = v;
337 stl_le_p(ptr, u.l.lower);
338 stl_le_p(ptr + 4, u.l.upper);
339}
340
341#else
342
343static inline int lduw_le_p(const void *ptr)
344{
345 return *(uint16_t *)ptr;
346}
347
348static inline int ldsw_le_p(const void *ptr)
349{
350 return *(int16_t *)ptr;
351}
352
353static inline int ldl_le_p(const void *ptr)
354{
355 return *(uint32_t *)ptr;
356}
357
358static inline uint64_t ldq_le_p(const void *ptr)
359{
360 return *(uint64_t *)ptr;
361}
362
363static inline void stw_le_p(void *ptr, int v)
364{
365 *(uint16_t *)ptr = v;
366}
367
368static inline void stl_le_p(void *ptr, int v)
369{
370 *(uint32_t *)ptr = v;
371}
372
373static inline void stq_le_p(void *ptr, uint64_t v)
374{
375 *(uint64_t *)ptr = v;
376}
377
378/* float access */
379
380static inline float32 ldfl_le_p(const void *ptr)
381{
382 return *(float32 *)ptr;
383}
384
385static inline float64 ldfq_le_p(const void *ptr)
386{
387 return *(float64 *)ptr;
388}
389
390static inline void stfl_le_p(void *ptr, float32 v)
391{
392 *(float32 *)ptr = v;
393}
394
395static inline void stfq_le_p(void *ptr, float64 v)
396{
397 *(float64 *)ptr = v;
398}
399#endif
400
401#if !defined(HOST_WORDS_BIGENDIAN) || defined(WORDS_ALIGNED)
402
403static inline int lduw_be_p(const void *ptr)
404{
405#if defined(__i386__)
406 int val;
407 asm volatile ("movzwl %1, %0\n"
408 "xchgb %b0, %h0\n"
409 : "=q" (val)
410 : "m" (*(uint16_t *)ptr));
411 return val;
412#else
413 const uint8_t *b = ptr;
414 return ((b[0] << 8) | b[1]);
415#endif
416}
417
418static inline int ldsw_be_p(const void *ptr)
419{
420#if defined(__i386__)
421 int val;
422 asm volatile ("movzwl %1, %0\n"
423 "xchgb %b0, %h0\n"
424 : "=q" (val)
425 : "m" (*(uint16_t *)ptr));
426 return (int16_t)val;
427#else
428 const uint8_t *b = ptr;
429 return (int16_t)((b[0] << 8) | b[1]);
430#endif
431}
432
433static inline int ldl_be_p(const void *ptr)
434{
435#if defined(__i386__) || defined(__x86_64__)
436 int val;
437 asm volatile ("movl %1, %0\n"
438 "bswap %0\n"
439 : "=r" (val)
440 : "m" (*(uint32_t *)ptr));
441 return val;
442#else
443 const uint8_t *b = ptr;
444 return (b[0] << 24) | (b[1] << 16) | (b[2] << 8) | b[3];
445#endif
446}
447
448static inline uint64_t ldq_be_p(const void *ptr)
449{
450 uint32_t a,b;
451 a = ldl_be_p(ptr);
452 b = ldl_be_p((uint8_t *)ptr + 4);
453 return (((uint64_t)a<<32)|b);
454}
455
456static inline void stw_be_p(void *ptr, int v)
457{
458#if defined(__i386__)
459 asm volatile ("xchgb %b0, %h0\n"
460 "movw %w0, %1\n"
461 : "=q" (v)
462 : "m" (*(uint16_t *)ptr), "0" (v));
463#else
464 uint8_t *d = (uint8_t *) ptr;
465 d[0] = v >> 8;
466 d[1] = v;
467#endif
468}
469
470static inline void stl_be_p(void *ptr, int v)
471{
472#if defined(__i386__) || defined(__x86_64__)
473 asm volatile ("bswap %0\n"
474 "movl %0, %1\n"
475 : "=r" (v)
476 : "m" (*(uint32_t *)ptr), "0" (v));
477#else
478 uint8_t *d = (uint8_t *) ptr;
479 d[0] = v >> 24;
480 d[1] = v >> 16;
481 d[2] = v >> 8;
482 d[3] = v;
483#endif
484}
485
486static inline void stq_be_p(void *ptr, uint64_t v)
487{
488 stl_be_p(ptr, v >> 32);
489 stl_be_p((uint8_t *)ptr + 4, v);
490}
491
492/* float access */
493
494static inline float32 ldfl_be_p(const void *ptr)
495{
496 union {
497 float32 f;
498 uint32_t i;
499 } u;
500 u.i = ldl_be_p(ptr);
501 return u.f;
502}
503
504static inline void stfl_be_p(void *ptr, float32 v)
505{
506 union {
507 float32 f;
508 uint32_t i;
509 } u;
510 u.f = v;
511 stl_be_p(ptr, u.i);
512}
513
514static inline float64 ldfq_be_p(const void *ptr)
515{
516 CPU_DoubleU u;
517 u.l.upper = ldl_be_p(ptr);
518 u.l.lower = ldl_be_p((uint8_t *)ptr + 4);
519 return u.d;
520}
521
522static inline void stfq_be_p(void *ptr, float64 v)
523{
524 CPU_DoubleU u;
525 u.d = v;
526 stl_be_p(ptr, u.l.upper);
527 stl_be_p((uint8_t *)ptr + 4, u.l.lower);
528}
529
530#else
531
532static inline int lduw_be_p(const void *ptr)
533{
534 return *(uint16_t *)ptr;
535}
536
537static inline int ldsw_be_p(const void *ptr)
538{
539 return *(int16_t *)ptr;
540}
541
542static inline int ldl_be_p(const void *ptr)
543{
544 return *(uint32_t *)ptr;
545}
546
547static inline uint64_t ldq_be_p(const void *ptr)
548{
549 return *(uint64_t *)ptr;
550}
551
552static inline void stw_be_p(void *ptr, int v)
553{
554 *(uint16_t *)ptr = v;
555}
556
557static inline void stl_be_p(void *ptr, int v)
558{
559 *(uint32_t *)ptr = v;
560}
561
562static inline void stq_be_p(void *ptr, uint64_t v)
563{
564 *(uint64_t *)ptr = v;
565}
566
567/* float access */
568
569static inline float32 ldfl_be_p(const void *ptr)
570{
571 return *(float32 *)ptr;
572}
573
574static inline float64 ldfq_be_p(const void *ptr)
575{
576 return *(float64 *)ptr;
577}
578
579static inline void stfl_be_p(void *ptr, float32 v)
580{
581 *(float32 *)ptr = v;
582}
583
584static inline void stfq_be_p(void *ptr, float64 v)
585{
586 *(float64 *)ptr = v;
587}
588
589#endif
590
591/* target CPU memory access functions */
592#if defined(TARGET_WORDS_BIGENDIAN)
593#define lduw_p(p) lduw_be_p(p)
594#define ldsw_p(p) ldsw_be_p(p)
595#define ldl_p(p) ldl_be_p(p)
596#define ldq_p(p) ldq_be_p(p)
597#define ldfl_p(p) ldfl_be_p(p)
598#define ldfq_p(p) ldfq_be_p(p)
599#define stw_p(p, v) stw_be_p(p, v)
600#define stl_p(p, v) stl_be_p(p, v)
601#define stq_p(p, v) stq_be_p(p, v)
602#define stfl_p(p, v) stfl_be_p(p, v)
603#define stfq_p(p, v) stfq_be_p(p, v)
604#else
605#define lduw_p(p) lduw_le_p(p)
606#define ldsw_p(p) ldsw_le_p(p)
607#define ldl_p(p) ldl_le_p(p)
608#define ldq_p(p) ldq_le_p(p)
609#define ldfl_p(p) ldfl_le_p(p)
610#define ldfq_p(p) ldfq_le_p(p)
611#define stw_p(p, v) stw_le_p(p, v)
612#define stl_p(p, v) stl_le_p(p, v)
613#define stq_p(p, v) stq_le_p(p, v)
614#define stfl_p(p, v) stfl_le_p(p, v)
615#define stfq_p(p, v) stfq_le_p(p, v)
616#endif
617
618/* MMU memory access macros */
619
620#if defined(CONFIG_USER_ONLY)
621#include <assert.h>
622#include "qemu-types.h"
623
624/* On some host systems the guest address space is reserved on the host.
625 * This allows the guest address space to be offset to a convenient location.
626 */
627#if defined(CONFIG_USE_GUEST_BASE)
628extern unsigned long guest_base;
629extern int have_guest_base;
630extern unsigned long reserved_va;
631#define GUEST_BASE guest_base
632#else
633#define GUEST_BASE 0ul
634#endif
635
636/* All direct uses of g2h and h2g need to go away for usermode softmmu. */
637#define g2h(x) ((void *)((unsigned long)(x) + GUEST_BASE))
638
639#if HOST_LONG_BITS <= TARGET_VIRT_ADDR_SPACE_BITS
640#define h2g_valid(x) 1
641#else
642#define h2g_valid(x) ({ \
643 unsigned long __guest = (unsigned long)(x) - GUEST_BASE; \
644 __guest < (1ul << TARGET_VIRT_ADDR_SPACE_BITS); \
645})
646#endif
647
648#define h2g(x) ({ \
649 unsigned long __ret = (unsigned long)(x) - GUEST_BASE; \
650 /* Check if given address fits target address space */ \
651 assert(h2g_valid(x)); \
652 (abi_ulong)__ret; \
653})
654
655#define saddr(x) g2h(x)
656#define laddr(x) g2h(x)
657
658#else /* !CONFIG_USER_ONLY */
659/* NOTE: we use double casts if pointers and target_ulong have
660 different sizes */
661#define saddr(x) (uint8_t *)(long)(x)
662#define laddr(x) (uint8_t *)(long)(x)
663#endif
664
665#define ldub_raw(p) ldub_p(laddr((p)))
666#define ldsb_raw(p) ldsb_p(laddr((p)))
667#define lduw_raw(p) lduw_p(laddr((p)))
668#define ldsw_raw(p) ldsw_p(laddr((p)))
669#define ldl_raw(p) ldl_p(laddr((p)))
670#define ldq_raw(p) ldq_p(laddr((p)))
671#define ldfl_raw(p) ldfl_p(laddr((p)))
672#define ldfq_raw(p) ldfq_p(laddr((p)))
673#define stb_raw(p, v) stb_p(saddr((p)), v)
674#define stw_raw(p, v) stw_p(saddr((p)), v)
675#define stl_raw(p, v) stl_p(saddr((p)), v)
676#define stq_raw(p, v) stq_p(saddr((p)), v)
677#define stfl_raw(p, v) stfl_p(saddr((p)), v)
678#define stfq_raw(p, v) stfq_p(saddr((p)), v)
679
680
681#if defined(CONFIG_USER_ONLY)
682
683/* if user mode, no other memory access functions */
684#define ldub(p) ldub_raw(p)
685#define ldsb(p) ldsb_raw(p)
686#define lduw(p) lduw_raw(p)
687#define ldsw(p) ldsw_raw(p)
688#define ldl(p) ldl_raw(p)
689#define ldq(p) ldq_raw(p)
690#define ldfl(p) ldfl_raw(p)
691#define ldfq(p) ldfq_raw(p)
692#define stb(p, v) stb_raw(p, v)
693#define stw(p, v) stw_raw(p, v)
694#define stl(p, v) stl_raw(p, v)
695#define stq(p, v) stq_raw(p, v)
696#define stfl(p, v) stfl_raw(p, v)
697#define stfq(p, v) stfq_raw(p, v)
698
699#define ldub_code(p) ldub_raw(p)
700#define ldsb_code(p) ldsb_raw(p)
701#define lduw_code(p) lduw_raw(p)
702#define ldsw_code(p) ldsw_raw(p)
703#define ldl_code(p) ldl_raw(p)
704#define ldq_code(p) ldq_raw(p)
705
706#define ldub_kernel(p) ldub_raw(p)
707#define ldsb_kernel(p) ldsb_raw(p)
708#define lduw_kernel(p) lduw_raw(p)
709#define ldsw_kernel(p) ldsw_raw(p)
710#define ldl_kernel(p) ldl_raw(p)
711#define ldq_kernel(p) ldq_raw(p)
712#define ldfl_kernel(p) ldfl_raw(p)
713#define ldfq_kernel(p) ldfq_raw(p)
714#define stb_kernel(p, v) stb_raw(p, v)
715#define stw_kernel(p, v) stw_raw(p, v)
716#define stl_kernel(p, v) stl_raw(p, v)
717#define stq_kernel(p, v) stq_raw(p, v)
718#define stfl_kernel(p, v) stfl_raw(p, v)
719#define stfq_kernel(p, vt) stfq_raw(p, v)
720
721#endif /* defined(CONFIG_USER_ONLY) */
722
723/* page related stuff */
724
725#define TARGET_PAGE_SIZE (1 << TARGET_PAGE_BITS)
726#define TARGET_PAGE_MASK ~(TARGET_PAGE_SIZE - 1)
727#define TARGET_PAGE_ALIGN(addr) (((addr) + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK)
728
729/* ??? These should be the larger of unsigned long and target_ulong. */
730extern unsigned long qemu_real_host_page_size;
731extern unsigned long qemu_host_page_bits;
732extern unsigned long qemu_host_page_size;
733extern unsigned long qemu_host_page_mask;
734
735#define HOST_PAGE_ALIGN(addr) (((addr) + qemu_host_page_size - 1) & qemu_host_page_mask)
736
737/* same as PROT_xxx */
738#define PAGE_READ 0x0001
739#define PAGE_WRITE 0x0002
740#define PAGE_EXEC 0x0004
741#define PAGE_BITS (PAGE_READ | PAGE_WRITE | PAGE_EXEC)
742#define PAGE_VALID 0x0008
743/* original state of the write flag (used when tracking self-modifying
744 code */
745#define PAGE_WRITE_ORG 0x0010
746#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
747/* FIXME: Code that sets/uses this is broken and needs to go away. */
748#define PAGE_RESERVED 0x0020
749#endif
750
751#if defined(CONFIG_USER_ONLY)
752void page_dump(FILE *f);
753
754typedef int (*walk_memory_regions_fn)(void *, abi_ulong,
755 abi_ulong, unsigned long);
756int walk_memory_regions(void *, walk_memory_regions_fn);
757
758int page_get_flags(target_ulong address);
759void page_set_flags(target_ulong start, target_ulong end, int flags);
760int page_check_range(target_ulong start, target_ulong len, int flags);
761#endif
762
763CPUState *cpu_copy(CPUState *env);
764CPUState *qemu_get_cpu(int cpu);
765
766void cpu_dump_state(CPUState *env, FILE *f,
767 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
768 int flags);
769void cpu_dump_statistics (CPUState *env, FILE *f,
770 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
771 int flags);
772
773void QEMU_NORETURN cpu_abort(CPUState *env, const char *fmt, ...)
774 __attribute__ ((__format__ (__printf__, 2, 3)));
775extern CPUState *first_cpu;
776extern CPUState *cpu_single_env;
777
778#define CPU_INTERRUPT_HARD 0x02 /* hardware interrupt pending */
779#define CPU_INTERRUPT_EXITTB 0x04 /* exit the current TB (use for x86 a20 case) */
780#define CPU_INTERRUPT_TIMER 0x08 /* internal timer exception pending */
781#define CPU_INTERRUPT_FIQ 0x10 /* Fast interrupt pending. */
782#define CPU_INTERRUPT_HALT 0x20 /* CPU halt wanted */
783#define CPU_INTERRUPT_SMI 0x40 /* (x86 only) SMI interrupt pending */
784#define CPU_INTERRUPT_DEBUG 0x80 /* Debug event occured. */
785#define CPU_INTERRUPT_VIRQ 0x100 /* virtual interrupt pending. */
786#define CPU_INTERRUPT_NMI 0x200 /* NMI pending. */
787#define CPU_INTERRUPT_INIT 0x400 /* INIT pending. */
788#define CPU_INTERRUPT_SIPI 0x800 /* SIPI pending. */
789#define CPU_INTERRUPT_MCE 0x1000 /* (x86 only) MCE pending. */
790
791void cpu_interrupt(CPUState *s, int mask);
792void cpu_reset_interrupt(CPUState *env, int mask);
793
794void cpu_exit(CPUState *s);
795
796int qemu_cpu_has_work(CPUState *env);
797
798/* Breakpoint/watchpoint flags */
799#define BP_MEM_READ 0x01
800#define BP_MEM_WRITE 0x02
801#define BP_MEM_ACCESS (BP_MEM_READ | BP_MEM_WRITE)
802#define BP_STOP_BEFORE_ACCESS 0x04
803#define BP_WATCHPOINT_HIT 0x08
804#define BP_GDB 0x10
805#define BP_CPU 0x20
806
807int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
808 CPUBreakpoint **breakpoint);
809int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags);
810void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint);
811void cpu_breakpoint_remove_all(CPUState *env, int mask);
812int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
813 int flags, CPUWatchpoint **watchpoint);
814int cpu_watchpoint_remove(CPUState *env, target_ulong addr,
815 target_ulong len, int flags);
816void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint);
817void cpu_watchpoint_remove_all(CPUState *env, int mask);
818
819#define SSTEP_ENABLE 0x1 /* Enable simulated HW single stepping */
820#define SSTEP_NOIRQ 0x2 /* Do not use IRQ while single stepping */
821#define SSTEP_NOTIMER 0x4 /* Do not Timers while single stepping */
822
823void cpu_single_step(CPUState *env, int enabled);
824void cpu_reset(CPUState *s);
825int cpu_is_stopped(CPUState *env);
826void run_on_cpu(CPUState *env, void (*func)(void *data), void *data);
827
828#define CPU_LOG_TB_OUT_ASM (1 << 0)
829#define CPU_LOG_TB_IN_ASM (1 << 1)
830#define CPU_LOG_TB_OP (1 << 2)
831#define CPU_LOG_TB_OP_OPT (1 << 3)
832#define CPU_LOG_INT (1 << 4)
833#define CPU_LOG_EXEC (1 << 5)
834#define CPU_LOG_PCALL (1 << 6)
835#define CPU_LOG_IOPORT (1 << 7)
836#define CPU_LOG_TB_CPU (1 << 8)
837#define CPU_LOG_RESET (1 << 9)
838
839/* define log items */
840typedef struct CPULogItem {
841 int mask;
842 const char *name;
843 const char *help;
844} CPULogItem;
845
846extern const CPULogItem cpu_log_items[];
847
848void cpu_set_log(int log_flags);
849void cpu_set_log_filename(const char *filename);
850int cpu_str_to_log_mask(const char *str);
851
852#if !defined(CONFIG_USER_ONLY)
853
854/* Return the physical page corresponding to a virtual one. Use it
855 only for debugging because no protection checks are done. Return -1
856 if no page found. */
857target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr);
858
859/* memory API */
860
861extern int phys_ram_fd;
862extern ram_addr_t ram_size;
863
864typedef struct RAMBlock {
865 uint8_t *host;
866 ram_addr_t offset;
867 ram_addr_t length;
868 QLIST_ENTRY(RAMBlock) next;
869} RAMBlock;
870
871typedef struct RAMList {
872 uint8_t *phys_dirty;
873 QLIST_HEAD(ram, RAMBlock) blocks;
874} RAMList;
875extern RAMList ram_list;
876
877extern const char *mem_path;
878extern int mem_prealloc;
879
880/* physical memory access */
881
882/* MMIO pages are identified by a combination of an IO device index and
883 3 flags. The ROMD code stores the page ram offset in iotlb entry,
884 so only a limited number of ids are avaiable. */
885
886#define IO_MEM_NB_ENTRIES (1 << (TARGET_PAGE_BITS - IO_MEM_SHIFT))
887
888/* Flags stored in the low bits of the TLB virtual address. These are
889 defined so that fast path ram access is all zeros. */
890/* Zero if TLB entry is valid. */
891#define TLB_INVALID_MASK (1 << 3)
892/* Set if TLB entry references a clean RAM page. The iotlb entry will
893 contain the page physical address. */
894#define TLB_NOTDIRTY (1 << 4)
895/* Set if TLB entry is an IO callback. */
896#define TLB_MMIO (1 << 5)
897
898#define VGA_DIRTY_FLAG 0x01
899#define CODE_DIRTY_FLAG 0x02
900#define MIGRATION_DIRTY_FLAG 0x08
901
902/* read dirty bit (return 0 or 1) */
903static inline int cpu_physical_memory_is_dirty(ram_addr_t addr)
904{
905 return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] == 0xff;
906}
907
908static inline int cpu_physical_memory_get_dirty_flags(ram_addr_t addr)
909{
910 return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS];
911}
912
913static inline int cpu_physical_memory_get_dirty(ram_addr_t addr,
914 int dirty_flags)
915{
916 return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] & dirty_flags;
917}
918
919static inline void cpu_physical_memory_set_dirty(ram_addr_t addr)
920{
921 ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] = 0xff;
922}
923
924static inline int cpu_physical_memory_set_dirty_flags(ram_addr_t addr,
925 int dirty_flags)
926{
927 return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] |= dirty_flags;
928}
929
930static inline void cpu_physical_memory_mask_dirty_range(ram_addr_t start,
931 int length,
932 int dirty_flags)
933{
934 int i, mask, len;
935 uint8_t *p;
936
937 len = length >> TARGET_PAGE_BITS;
938 mask = ~dirty_flags;
939 p = ram_list.phys_dirty + (start >> TARGET_PAGE_BITS);
940 for (i = 0; i < len; i++) {
941 p[i] &= mask;
942 }
943}
944
945void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
946 int dirty_flags);
947void cpu_tlb_update_dirty(CPUState *env);
948
949int cpu_physical_memory_set_dirty_tracking(int enable);
950
951int cpu_physical_memory_get_dirty_tracking(void);
952
953int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
954 target_phys_addr_t end_addr);
955
956void dump_exec_info(FILE *f,
957 int (*cpu_fprintf)(FILE *f, const char *fmt, ...));
958#endif /* !CONFIG_USER_ONLY */
959
960int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
961 uint8_t *buf, int len, int is_write);
962
963void cpu_inject_x86_mce(CPUState *cenv, int bank, uint64_t status,
964 uint64_t mcg_status, uint64_t addr, uint64_t misc);
965
966#endif /* CPU_ALL_H */