]> git.proxmox.com Git - qemu.git/blame - cpu-all.h
x86_64 and better i386 support
[qemu.git] / cpu-all.h
CommitLineData
5a9fdfec
FB
1/*
2 * defines common to all virtual CPUs
5fafdf24 3 *
5a9fdfec
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20#ifndef CPU_ALL_H
21#define CPU_ALL_H
22
f54b3f92 23#if defined(__arm__) || defined(__sparc__) || defined(__mips__) || defined(__hppa__)
0ac4bd56
FB
24#define WORDS_ALIGNED
25#endif
26
5fafdf24
TS
27/* some important defines:
28 *
0ac4bd56
FB
29 * WORDS_ALIGNED : if defined, the host cpu can only make word aligned
30 * memory accesses.
5fafdf24 31 *
0ac4bd56
FB
32 * WORDS_BIGENDIAN : if defined, the host cpu is big endian and
33 * otherwise little endian.
5fafdf24 34 *
0ac4bd56 35 * (TARGET_WORDS_ALIGNED : same for target cpu (not supported yet))
5fafdf24 36 *
0ac4bd56
FB
37 * TARGET_WORDS_BIGENDIAN : same for target cpu
38 */
39
f193c797
FB
40#include "bswap.h"
41
42#if defined(WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
43#define BSWAP_NEEDED
44#endif
45
46#ifdef BSWAP_NEEDED
47
48static inline uint16_t tswap16(uint16_t s)
49{
50 return bswap16(s);
51}
52
53static inline uint32_t tswap32(uint32_t s)
54{
55 return bswap32(s);
56}
57
58static inline uint64_t tswap64(uint64_t s)
59{
60 return bswap64(s);
61}
62
63static inline void tswap16s(uint16_t *s)
64{
65 *s = bswap16(*s);
66}
67
68static inline void tswap32s(uint32_t *s)
69{
70 *s = bswap32(*s);
71}
72
73static inline void tswap64s(uint64_t *s)
74{
75 *s = bswap64(*s);
76}
77
78#else
79
80static inline uint16_t tswap16(uint16_t s)
81{
82 return s;
83}
84
85static inline uint32_t tswap32(uint32_t s)
86{
87 return s;
88}
89
90static inline uint64_t tswap64(uint64_t s)
91{
92 return s;
93}
94
95static inline void tswap16s(uint16_t *s)
96{
97}
98
99static inline void tswap32s(uint32_t *s)
100{
101}
102
103static inline void tswap64s(uint64_t *s)
104{
105}
106
107#endif
108
109#if TARGET_LONG_SIZE == 4
110#define tswapl(s) tswap32(s)
111#define tswapls(s) tswap32s((uint32_t *)(s))
0a962c02 112#define bswaptls(s) bswap32s(s)
f193c797
FB
113#else
114#define tswapl(s) tswap64(s)
115#define tswapls(s) tswap64s((uint64_t *)(s))
0a962c02 116#define bswaptls(s) bswap64s(s)
f193c797
FB
117#endif
118
0ca9d380
AJ
119typedef union {
120 float32 f;
121 uint32_t l;
122} CPU_FloatU;
123
832ed0fa
FB
124/* NOTE: arm FPA is horrible as double 32 bit words are stored in big
125 endian ! */
0ac4bd56 126typedef union {
53cd6637 127 float64 d;
9d60cac0
FB
128#if defined(WORDS_BIGENDIAN) \
129 || (defined(__arm__) && !defined(__VFP_FP__) && !defined(CONFIG_SOFTFLOAT))
0ac4bd56 130 struct {
0ac4bd56 131 uint32_t upper;
832ed0fa 132 uint32_t lower;
0ac4bd56
FB
133 } l;
134#else
135 struct {
0ac4bd56 136 uint32_t lower;
832ed0fa 137 uint32_t upper;
0ac4bd56
FB
138 } l;
139#endif
140 uint64_t ll;
141} CPU_DoubleU;
142
1f587329
BS
143#ifdef TARGET_SPARC
144typedef union {
145 float128 q;
146#if defined(WORDS_BIGENDIAN) \
147 || (defined(__arm__) && !defined(__VFP_FP__) && !defined(CONFIG_SOFTFLOAT))
148 struct {
149 uint32_t upmost;
150 uint32_t upper;
151 uint32_t lower;
152 uint32_t lowest;
153 } l;
154 struct {
155 uint64_t upper;
156 uint64_t lower;
157 } ll;
158#else
159 struct {
160 uint32_t lowest;
161 uint32_t lower;
162 uint32_t upper;
163 uint32_t upmost;
164 } l;
165 struct {
166 uint64_t lower;
167 uint64_t upper;
168 } ll;
169#endif
170} CPU_QuadU;
171#endif
172
61382a50
FB
173/* CPU memory access without any memory or io remapping */
174
83d73968
FB
175/*
176 * the generic syntax for the memory accesses is:
177 *
178 * load: ld{type}{sign}{size}{endian}_{access_type}(ptr)
179 *
180 * store: st{type}{size}{endian}_{access_type}(ptr, val)
181 *
182 * type is:
183 * (empty): integer access
184 * f : float access
5fafdf24 185 *
83d73968
FB
186 * sign is:
187 * (empty): for floats or 32 bit size
188 * u : unsigned
189 * s : signed
190 *
191 * size is:
192 * b: 8 bits
193 * w: 16 bits
194 * l: 32 bits
195 * q: 64 bits
5fafdf24 196 *
83d73968
FB
197 * endian is:
198 * (empty): target cpu endianness or 8 bit access
199 * r : reversed target cpu endianness (not implemented yet)
200 * be : big endian (not implemented yet)
201 * le : little endian (not implemented yet)
202 *
203 * access_type is:
204 * raw : host memory access
205 * user : user mode access using soft MMU
206 * kernel : kernel mode access using soft MMU
207 */
c27004ec 208static inline int ldub_p(void *ptr)
5a9fdfec
FB
209{
210 return *(uint8_t *)ptr;
211}
212
c27004ec 213static inline int ldsb_p(void *ptr)
5a9fdfec
FB
214{
215 return *(int8_t *)ptr;
216}
217
c27004ec 218static inline void stb_p(void *ptr, int v)
5a9fdfec
FB
219{
220 *(uint8_t *)ptr = v;
221}
222
223/* NOTE: on arm, putting 2 in /proc/sys/debug/alignment so that the
224 kernel handles unaligned load/stores may give better results, but
225 it is a system wide setting : bad */
2df3b95d 226#if defined(WORDS_BIGENDIAN) || defined(WORDS_ALIGNED)
5a9fdfec
FB
227
228/* conservative code for little endian unaligned accesses */
2df3b95d 229static inline int lduw_le_p(void *ptr)
5a9fdfec
FB
230{
231#ifdef __powerpc__
232 int val;
233 __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (ptr));
234 return val;
235#else
236 uint8_t *p = ptr;
237 return p[0] | (p[1] << 8);
238#endif
239}
240
2df3b95d 241static inline int ldsw_le_p(void *ptr)
5a9fdfec
FB
242{
243#ifdef __powerpc__
244 int val;
245 __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (ptr));
246 return (int16_t)val;
247#else
248 uint8_t *p = ptr;
249 return (int16_t)(p[0] | (p[1] << 8));
250#endif
251}
252
2df3b95d 253static inline int ldl_le_p(void *ptr)
5a9fdfec
FB
254{
255#ifdef __powerpc__
256 int val;
257 __asm__ __volatile__ ("lwbrx %0,0,%1" : "=r" (val) : "r" (ptr));
258 return val;
259#else
260 uint8_t *p = ptr;
261 return p[0] | (p[1] << 8) | (p[2] << 16) | (p[3] << 24);
262#endif
263}
264
2df3b95d 265static inline uint64_t ldq_le_p(void *ptr)
5a9fdfec
FB
266{
267 uint8_t *p = ptr;
268 uint32_t v1, v2;
f0aca822
FB
269 v1 = ldl_le_p(p);
270 v2 = ldl_le_p(p + 4);
5a9fdfec
FB
271 return v1 | ((uint64_t)v2 << 32);
272}
273
2df3b95d 274static inline void stw_le_p(void *ptr, int v)
5a9fdfec
FB
275{
276#ifdef __powerpc__
277 __asm__ __volatile__ ("sthbrx %1,0,%2" : "=m" (*(uint16_t *)ptr) : "r" (v), "r" (ptr));
278#else
279 uint8_t *p = ptr;
280 p[0] = v;
281 p[1] = v >> 8;
282#endif
283}
284
2df3b95d 285static inline void stl_le_p(void *ptr, int v)
5a9fdfec
FB
286{
287#ifdef __powerpc__
288 __asm__ __volatile__ ("stwbrx %1,0,%2" : "=m" (*(uint32_t *)ptr) : "r" (v), "r" (ptr));
289#else
290 uint8_t *p = ptr;
291 p[0] = v;
292 p[1] = v >> 8;
293 p[2] = v >> 16;
294 p[3] = v >> 24;
295#endif
296}
297
2df3b95d 298static inline void stq_le_p(void *ptr, uint64_t v)
5a9fdfec
FB
299{
300 uint8_t *p = ptr;
f0aca822
FB
301 stl_le_p(p, (uint32_t)v);
302 stl_le_p(p + 4, v >> 32);
5a9fdfec
FB
303}
304
305/* float access */
306
2df3b95d 307static inline float32 ldfl_le_p(void *ptr)
5a9fdfec
FB
308{
309 union {
53cd6637 310 float32 f;
5a9fdfec
FB
311 uint32_t i;
312 } u;
2df3b95d 313 u.i = ldl_le_p(ptr);
5a9fdfec
FB
314 return u.f;
315}
316
2df3b95d 317static inline void stfl_le_p(void *ptr, float32 v)
5a9fdfec
FB
318{
319 union {
53cd6637 320 float32 f;
5a9fdfec
FB
321 uint32_t i;
322 } u;
323 u.f = v;
2df3b95d 324 stl_le_p(ptr, u.i);
5a9fdfec
FB
325}
326
2df3b95d 327static inline float64 ldfq_le_p(void *ptr)
5a9fdfec 328{
0ac4bd56 329 CPU_DoubleU u;
2df3b95d
FB
330 u.l.lower = ldl_le_p(ptr);
331 u.l.upper = ldl_le_p(ptr + 4);
5a9fdfec
FB
332 return u.d;
333}
334
2df3b95d 335static inline void stfq_le_p(void *ptr, float64 v)
5a9fdfec 336{
0ac4bd56 337 CPU_DoubleU u;
5a9fdfec 338 u.d = v;
2df3b95d
FB
339 stl_le_p(ptr, u.l.lower);
340 stl_le_p(ptr + 4, u.l.upper);
5a9fdfec
FB
341}
342
2df3b95d
FB
343#else
344
345static inline int lduw_le_p(void *ptr)
346{
347 return *(uint16_t *)ptr;
348}
349
350static inline int ldsw_le_p(void *ptr)
351{
352 return *(int16_t *)ptr;
353}
93ac68bc 354
2df3b95d
FB
355static inline int ldl_le_p(void *ptr)
356{
357 return *(uint32_t *)ptr;
358}
359
360static inline uint64_t ldq_le_p(void *ptr)
361{
362 return *(uint64_t *)ptr;
363}
364
365static inline void stw_le_p(void *ptr, int v)
366{
367 *(uint16_t *)ptr = v;
368}
369
370static inline void stl_le_p(void *ptr, int v)
371{
372 *(uint32_t *)ptr = v;
373}
374
375static inline void stq_le_p(void *ptr, uint64_t v)
376{
377 *(uint64_t *)ptr = v;
378}
379
380/* float access */
381
382static inline float32 ldfl_le_p(void *ptr)
383{
384 return *(float32 *)ptr;
385}
386
387static inline float64 ldfq_le_p(void *ptr)
388{
389 return *(float64 *)ptr;
390}
391
392static inline void stfl_le_p(void *ptr, float32 v)
393{
394 *(float32 *)ptr = v;
395}
396
397static inline void stfq_le_p(void *ptr, float64 v)
398{
399 *(float64 *)ptr = v;
400}
401#endif
402
403#if !defined(WORDS_BIGENDIAN) || defined(WORDS_ALIGNED)
404
405static inline int lduw_be_p(void *ptr)
93ac68bc 406{
83d73968
FB
407#if defined(__i386__)
408 int val;
409 asm volatile ("movzwl %1, %0\n"
410 "xchgb %b0, %h0\n"
411 : "=q" (val)
412 : "m" (*(uint16_t *)ptr));
413 return val;
414#else
93ac68bc 415 uint8_t *b = (uint8_t *) ptr;
83d73968
FB
416 return ((b[0] << 8) | b[1]);
417#endif
93ac68bc
FB
418}
419
2df3b95d 420static inline int ldsw_be_p(void *ptr)
93ac68bc 421{
83d73968
FB
422#if defined(__i386__)
423 int val;
424 asm volatile ("movzwl %1, %0\n"
425 "xchgb %b0, %h0\n"
426 : "=q" (val)
427 : "m" (*(uint16_t *)ptr));
428 return (int16_t)val;
429#else
430 uint8_t *b = (uint8_t *) ptr;
431 return (int16_t)((b[0] << 8) | b[1]);
432#endif
93ac68bc
FB
433}
434
2df3b95d 435static inline int ldl_be_p(void *ptr)
93ac68bc 436{
4f2ac237 437#if defined(__i386__) || defined(__x86_64__)
83d73968
FB
438 int val;
439 asm volatile ("movl %1, %0\n"
440 "bswap %0\n"
441 : "=r" (val)
442 : "m" (*(uint32_t *)ptr));
443 return val;
444#else
93ac68bc 445 uint8_t *b = (uint8_t *) ptr;
83d73968
FB
446 return (b[0] << 24) | (b[1] << 16) | (b[2] << 8) | b[3];
447#endif
93ac68bc
FB
448}
449
2df3b95d 450static inline uint64_t ldq_be_p(void *ptr)
93ac68bc
FB
451{
452 uint32_t a,b;
2df3b95d
FB
453 a = ldl_be_p(ptr);
454 b = ldl_be_p(ptr+4);
93ac68bc
FB
455 return (((uint64_t)a<<32)|b);
456}
457
2df3b95d 458static inline void stw_be_p(void *ptr, int v)
93ac68bc 459{
83d73968
FB
460#if defined(__i386__)
461 asm volatile ("xchgb %b0, %h0\n"
462 "movw %w0, %1\n"
463 : "=q" (v)
464 : "m" (*(uint16_t *)ptr), "0" (v));
465#else
93ac68bc
FB
466 uint8_t *d = (uint8_t *) ptr;
467 d[0] = v >> 8;
468 d[1] = v;
83d73968 469#endif
93ac68bc
FB
470}
471
2df3b95d 472static inline void stl_be_p(void *ptr, int v)
93ac68bc 473{
4f2ac237 474#if defined(__i386__) || defined(__x86_64__)
83d73968
FB
475 asm volatile ("bswap %0\n"
476 "movl %0, %1\n"
477 : "=r" (v)
478 : "m" (*(uint32_t *)ptr), "0" (v));
479#else
93ac68bc
FB
480 uint8_t *d = (uint8_t *) ptr;
481 d[0] = v >> 24;
482 d[1] = v >> 16;
483 d[2] = v >> 8;
484 d[3] = v;
83d73968 485#endif
93ac68bc
FB
486}
487
2df3b95d 488static inline void stq_be_p(void *ptr, uint64_t v)
93ac68bc 489{
2df3b95d
FB
490 stl_be_p(ptr, v >> 32);
491 stl_be_p(ptr + 4, v);
0ac4bd56
FB
492}
493
494/* float access */
495
2df3b95d 496static inline float32 ldfl_be_p(void *ptr)
0ac4bd56
FB
497{
498 union {
53cd6637 499 float32 f;
0ac4bd56
FB
500 uint32_t i;
501 } u;
2df3b95d 502 u.i = ldl_be_p(ptr);
0ac4bd56
FB
503 return u.f;
504}
505
2df3b95d 506static inline void stfl_be_p(void *ptr, float32 v)
0ac4bd56
FB
507{
508 union {
53cd6637 509 float32 f;
0ac4bd56
FB
510 uint32_t i;
511 } u;
512 u.f = v;
2df3b95d 513 stl_be_p(ptr, u.i);
0ac4bd56
FB
514}
515
2df3b95d 516static inline float64 ldfq_be_p(void *ptr)
0ac4bd56
FB
517{
518 CPU_DoubleU u;
2df3b95d
FB
519 u.l.upper = ldl_be_p(ptr);
520 u.l.lower = ldl_be_p(ptr + 4);
0ac4bd56
FB
521 return u.d;
522}
523
2df3b95d 524static inline void stfq_be_p(void *ptr, float64 v)
0ac4bd56
FB
525{
526 CPU_DoubleU u;
527 u.d = v;
2df3b95d
FB
528 stl_be_p(ptr, u.l.upper);
529 stl_be_p(ptr + 4, u.l.lower);
93ac68bc
FB
530}
531
5a9fdfec
FB
532#else
533
2df3b95d 534static inline int lduw_be_p(void *ptr)
5a9fdfec
FB
535{
536 return *(uint16_t *)ptr;
537}
538
2df3b95d 539static inline int ldsw_be_p(void *ptr)
5a9fdfec
FB
540{
541 return *(int16_t *)ptr;
542}
543
2df3b95d 544static inline int ldl_be_p(void *ptr)
5a9fdfec
FB
545{
546 return *(uint32_t *)ptr;
547}
548
2df3b95d 549static inline uint64_t ldq_be_p(void *ptr)
5a9fdfec
FB
550{
551 return *(uint64_t *)ptr;
552}
553
2df3b95d 554static inline void stw_be_p(void *ptr, int v)
5a9fdfec
FB
555{
556 *(uint16_t *)ptr = v;
557}
558
2df3b95d 559static inline void stl_be_p(void *ptr, int v)
5a9fdfec
FB
560{
561 *(uint32_t *)ptr = v;
562}
563
2df3b95d 564static inline void stq_be_p(void *ptr, uint64_t v)
5a9fdfec
FB
565{
566 *(uint64_t *)ptr = v;
567}
568
569/* float access */
570
2df3b95d 571static inline float32 ldfl_be_p(void *ptr)
5a9fdfec 572{
53cd6637 573 return *(float32 *)ptr;
5a9fdfec
FB
574}
575
2df3b95d 576static inline float64 ldfq_be_p(void *ptr)
5a9fdfec 577{
53cd6637 578 return *(float64 *)ptr;
5a9fdfec
FB
579}
580
2df3b95d 581static inline void stfl_be_p(void *ptr, float32 v)
5a9fdfec 582{
53cd6637 583 *(float32 *)ptr = v;
5a9fdfec
FB
584}
585
2df3b95d 586static inline void stfq_be_p(void *ptr, float64 v)
5a9fdfec 587{
53cd6637 588 *(float64 *)ptr = v;
5a9fdfec 589}
2df3b95d
FB
590
591#endif
592
593/* target CPU memory access functions */
594#if defined(TARGET_WORDS_BIGENDIAN)
595#define lduw_p(p) lduw_be_p(p)
596#define ldsw_p(p) ldsw_be_p(p)
597#define ldl_p(p) ldl_be_p(p)
598#define ldq_p(p) ldq_be_p(p)
599#define ldfl_p(p) ldfl_be_p(p)
600#define ldfq_p(p) ldfq_be_p(p)
601#define stw_p(p, v) stw_be_p(p, v)
602#define stl_p(p, v) stl_be_p(p, v)
603#define stq_p(p, v) stq_be_p(p, v)
604#define stfl_p(p, v) stfl_be_p(p, v)
605#define stfq_p(p, v) stfq_be_p(p, v)
606#else
607#define lduw_p(p) lduw_le_p(p)
608#define ldsw_p(p) ldsw_le_p(p)
609#define ldl_p(p) ldl_le_p(p)
610#define ldq_p(p) ldq_le_p(p)
611#define ldfl_p(p) ldfl_le_p(p)
612#define ldfq_p(p) ldfq_le_p(p)
613#define stw_p(p, v) stw_le_p(p, v)
614#define stl_p(p, v) stl_le_p(p, v)
615#define stq_p(p, v) stq_le_p(p, v)
616#define stfl_p(p, v) stfl_le_p(p, v)
617#define stfq_p(p, v) stfq_le_p(p, v)
5a9fdfec
FB
618#endif
619
61382a50
FB
620/* MMU memory access macros */
621
53a5960a
PB
622#if defined(CONFIG_USER_ONLY)
623/* On some host systems the guest address space is reserved on the host.
624 * This allows the guest address space to be offset to a convenient location.
625 */
626//#define GUEST_BASE 0x20000000
627#define GUEST_BASE 0
628
629/* All direct uses of g2h and h2g need to go away for usermode softmmu. */
630#define g2h(x) ((void *)((unsigned long)(x) + GUEST_BASE))
631#define h2g(x) ((target_ulong)(x - GUEST_BASE))
632
633#define saddr(x) g2h(x)
634#define laddr(x) g2h(x)
635
636#else /* !CONFIG_USER_ONLY */
c27004ec
FB
637/* NOTE: we use double casts if pointers and target_ulong have
638 different sizes */
53a5960a
PB
639#define saddr(x) (uint8_t *)(long)(x)
640#define laddr(x) (uint8_t *)(long)(x)
641#endif
642
643#define ldub_raw(p) ldub_p(laddr((p)))
644#define ldsb_raw(p) ldsb_p(laddr((p)))
645#define lduw_raw(p) lduw_p(laddr((p)))
646#define ldsw_raw(p) ldsw_p(laddr((p)))
647#define ldl_raw(p) ldl_p(laddr((p)))
648#define ldq_raw(p) ldq_p(laddr((p)))
649#define ldfl_raw(p) ldfl_p(laddr((p)))
650#define ldfq_raw(p) ldfq_p(laddr((p)))
651#define stb_raw(p, v) stb_p(saddr((p)), v)
652#define stw_raw(p, v) stw_p(saddr((p)), v)
653#define stl_raw(p, v) stl_p(saddr((p)), v)
654#define stq_raw(p, v) stq_p(saddr((p)), v)
655#define stfl_raw(p, v) stfl_p(saddr((p)), v)
656#define stfq_raw(p, v) stfq_p(saddr((p)), v)
c27004ec
FB
657
658
5fafdf24 659#if defined(CONFIG_USER_ONLY)
61382a50
FB
660
661/* if user mode, no other memory access functions */
662#define ldub(p) ldub_raw(p)
663#define ldsb(p) ldsb_raw(p)
664#define lduw(p) lduw_raw(p)
665#define ldsw(p) ldsw_raw(p)
666#define ldl(p) ldl_raw(p)
667#define ldq(p) ldq_raw(p)
668#define ldfl(p) ldfl_raw(p)
669#define ldfq(p) ldfq_raw(p)
670#define stb(p, v) stb_raw(p, v)
671#define stw(p, v) stw_raw(p, v)
672#define stl(p, v) stl_raw(p, v)
673#define stq(p, v) stq_raw(p, v)
674#define stfl(p, v) stfl_raw(p, v)
675#define stfq(p, v) stfq_raw(p, v)
676
677#define ldub_code(p) ldub_raw(p)
678#define ldsb_code(p) ldsb_raw(p)
679#define lduw_code(p) lduw_raw(p)
680#define ldsw_code(p) ldsw_raw(p)
681#define ldl_code(p) ldl_raw(p)
bc98a7ef 682#define ldq_code(p) ldq_raw(p)
61382a50
FB
683
684#define ldub_kernel(p) ldub_raw(p)
685#define ldsb_kernel(p) ldsb_raw(p)
686#define lduw_kernel(p) lduw_raw(p)
687#define ldsw_kernel(p) ldsw_raw(p)
688#define ldl_kernel(p) ldl_raw(p)
bc98a7ef 689#define ldq_kernel(p) ldq_raw(p)
0ac4bd56
FB
690#define ldfl_kernel(p) ldfl_raw(p)
691#define ldfq_kernel(p) ldfq_raw(p)
61382a50
FB
692#define stb_kernel(p, v) stb_raw(p, v)
693#define stw_kernel(p, v) stw_raw(p, v)
694#define stl_kernel(p, v) stl_raw(p, v)
695#define stq_kernel(p, v) stq_raw(p, v)
0ac4bd56
FB
696#define stfl_kernel(p, v) stfl_raw(p, v)
697#define stfq_kernel(p, vt) stfq_raw(p, v)
61382a50
FB
698
699#endif /* defined(CONFIG_USER_ONLY) */
700
5a9fdfec
FB
701/* page related stuff */
702
03875444 703#define TARGET_PAGE_SIZE (1 << TARGET_PAGE_BITS)
5a9fdfec
FB
704#define TARGET_PAGE_MASK ~(TARGET_PAGE_SIZE - 1)
705#define TARGET_PAGE_ALIGN(addr) (((addr) + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK)
706
53a5960a 707/* ??? These should be the larger of unsigned long and target_ulong. */
83fb7adf
FB
708extern unsigned long qemu_real_host_page_size;
709extern unsigned long qemu_host_page_bits;
710extern unsigned long qemu_host_page_size;
711extern unsigned long qemu_host_page_mask;
5a9fdfec 712
83fb7adf 713#define HOST_PAGE_ALIGN(addr) (((addr) + qemu_host_page_size - 1) & qemu_host_page_mask)
5a9fdfec
FB
714
715/* same as PROT_xxx */
716#define PAGE_READ 0x0001
717#define PAGE_WRITE 0x0002
718#define PAGE_EXEC 0x0004
719#define PAGE_BITS (PAGE_READ | PAGE_WRITE | PAGE_EXEC)
720#define PAGE_VALID 0x0008
721/* original state of the write flag (used when tracking self-modifying
722 code */
5fafdf24 723#define PAGE_WRITE_ORG 0x0010
50a9569b 724#define PAGE_RESERVED 0x0020
5a9fdfec
FB
725
726void page_dump(FILE *f);
53a5960a
PB
727int page_get_flags(target_ulong address);
728void page_set_flags(target_ulong start, target_ulong end, int flags);
3d97b40b 729int page_check_range(target_ulong start, target_ulong len, int flags);
5a9fdfec 730
c5be9f08
TS
731CPUState *cpu_copy(CPUState *env);
732
5fafdf24 733void cpu_dump_state(CPUState *env, FILE *f,
7fe48483
FB
734 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
735 int flags);
76a66253
JM
736void cpu_dump_statistics (CPUState *env, FILE *f,
737 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
738 int flags);
7fe48483 739
a90b7318 740void cpu_abort(CPUState *env, const char *fmt, ...)
c3d2689d
AZ
741 __attribute__ ((__format__ (__printf__, 2, 3)))
742 __attribute__ ((__noreturn__));
f0aca822 743extern CPUState *first_cpu;
e2f22898 744extern CPUState *cpu_single_env;
9acbed06 745extern int code_copy_enabled;
5a9fdfec 746
9acbed06
FB
747#define CPU_INTERRUPT_EXIT 0x01 /* wants exit from main loop */
748#define CPU_INTERRUPT_HARD 0x02 /* hardware interrupt pending */
749#define CPU_INTERRUPT_EXITTB 0x04 /* exit the current TB (use for x86 a20 case) */
ef792f9d 750#define CPU_INTERRUPT_TIMER 0x08 /* internal timer exception pending */
98699967 751#define CPU_INTERRUPT_FIQ 0x10 /* Fast interrupt pending. */
ba3c64fb 752#define CPU_INTERRUPT_HALT 0x20 /* CPU halt wanted */
3b21e03e 753#define CPU_INTERRUPT_SMI 0x40 /* (x86 only) SMI interrupt pending */
6658ffb8 754#define CPU_INTERRUPT_DEBUG 0x80 /* Debug event occured. */
0573fbfc 755#define CPU_INTERRUPT_VIRQ 0x100 /* virtual interrupt pending. */
474ea849 756#define CPU_INTERRUPT_NMI 0x200 /* NMI pending. */
98699967 757
4690764b 758void cpu_interrupt(CPUState *s, int mask);
b54ad049 759void cpu_reset_interrupt(CPUState *env, int mask);
68a79315 760
6658ffb8
PB
761int cpu_watchpoint_insert(CPUState *env, target_ulong addr);
762int cpu_watchpoint_remove(CPUState *env, target_ulong addr);
2e12669a
FB
763int cpu_breakpoint_insert(CPUState *env, target_ulong pc);
764int cpu_breakpoint_remove(CPUState *env, target_ulong pc);
60897d36
EI
765
766#define SSTEP_ENABLE 0x1 /* Enable simulated HW single stepping */
767#define SSTEP_NOIRQ 0x2 /* Do not use IRQ while single stepping */
768#define SSTEP_NOTIMER 0x4 /* Do not Timers while single stepping */
769
c33a346e 770void cpu_single_step(CPUState *env, int enabled);
d95dc32d 771void cpu_reset(CPUState *s);
4c3a88a2 772
13eb76e0
FB
773/* Return the physical page corresponding to a virtual one. Use it
774 only for debugging because no protection checks are done. Return -1
775 if no page found. */
9b3c35e0 776target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr);
13eb76e0 777
5fafdf24 778#define CPU_LOG_TB_OUT_ASM (1 << 0)
9fddaa0c 779#define CPU_LOG_TB_IN_ASM (1 << 1)
f193c797
FB
780#define CPU_LOG_TB_OP (1 << 2)
781#define CPU_LOG_TB_OP_OPT (1 << 3)
782#define CPU_LOG_INT (1 << 4)
783#define CPU_LOG_EXEC (1 << 5)
784#define CPU_LOG_PCALL (1 << 6)
fd872598 785#define CPU_LOG_IOPORT (1 << 7)
9fddaa0c 786#define CPU_LOG_TB_CPU (1 << 8)
f193c797
FB
787
788/* define log items */
789typedef struct CPULogItem {
790 int mask;
791 const char *name;
792 const char *help;
793} CPULogItem;
794
795extern CPULogItem cpu_log_items[];
796
34865134
FB
797void cpu_set_log(int log_flags);
798void cpu_set_log_filename(const char *filename);
f193c797 799int cpu_str_to_log_mask(const char *str);
34865134 800
09683d35
FB
801/* IO ports API */
802
803/* NOTE: as these functions may be even used when there is an isa
804 brige on non x86 targets, we always defined them */
805#ifndef NO_CPU_IO_DEFS
806void cpu_outb(CPUState *env, int addr, int val);
807void cpu_outw(CPUState *env, int addr, int val);
808void cpu_outl(CPUState *env, int addr, int val);
809int cpu_inb(CPUState *env, int addr);
810int cpu_inw(CPUState *env, int addr);
811int cpu_inl(CPUState *env, int addr);
812#endif
813
00f82b8a
AJ
814/* address in the RAM (different from a physical address) */
815#ifdef USE_KQEMU
816typedef uint32_t ram_addr_t;
817#else
818typedef unsigned long ram_addr_t;
819#endif
820
33417e70
FB
821/* memory API */
822
00f82b8a 823extern ram_addr_t phys_ram_size;
edf75d59
FB
824extern int phys_ram_fd;
825extern uint8_t *phys_ram_base;
1ccde1cb 826extern uint8_t *phys_ram_dirty;
00f82b8a 827extern ram_addr_t ram_size;
edf75d59
FB
828
829/* physical memory access */
edf75d59
FB
830#define TLB_INVALID_MASK (1 << 3)
831#define IO_MEM_SHIFT 4
98699967 832#define IO_MEM_NB_ENTRIES (1 << (TARGET_PAGE_BITS - IO_MEM_SHIFT))
edf75d59
FB
833
834#define IO_MEM_RAM (0 << IO_MEM_SHIFT) /* hardcoded offset */
835#define IO_MEM_ROM (1 << IO_MEM_SHIFT) /* hardcoded offset */
836#define IO_MEM_UNASSIGNED (2 << IO_MEM_SHIFT)
1ccde1cb 837#define IO_MEM_NOTDIRTY (4 << IO_MEM_SHIFT) /* used internally, never use directly */
2a4188a3
FB
838/* acts like a ROM when read and like a device when written. As an
839 exception, the write memory callback gets the ram offset instead of
840 the physical address */
841#define IO_MEM_ROMD (1)
db7b5426 842#define IO_MEM_SUBPAGE (2)
4254fab8 843#define IO_MEM_SUBWIDTH (4)
edf75d59 844
7727994d
FB
845typedef void CPUWriteMemoryFunc(void *opaque, target_phys_addr_t addr, uint32_t value);
846typedef uint32_t CPUReadMemoryFunc(void *opaque, target_phys_addr_t addr);
33417e70 847
5fafdf24 848void cpu_register_physical_memory(target_phys_addr_t start_addr,
00f82b8a
AJ
849 ram_addr_t size,
850 ram_addr_t phys_offset);
851ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr);
852ram_addr_t qemu_ram_alloc(ram_addr_t);
e9a1ab19 853void qemu_ram_free(ram_addr_t addr);
33417e70
FB
854int cpu_register_io_memory(int io_index,
855 CPUReadMemoryFunc **mem_read,
7727994d
FB
856 CPUWriteMemoryFunc **mem_write,
857 void *opaque);
8926b517
FB
858CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index);
859CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index);
33417e70 860
2e12669a 861void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0 862 int len, int is_write);
5fafdf24 863static inline void cpu_physical_memory_read(target_phys_addr_t addr,
2e12669a 864 uint8_t *buf, int len)
8b1f24b0
FB
865{
866 cpu_physical_memory_rw(addr, buf, len, 0);
867}
5fafdf24 868static inline void cpu_physical_memory_write(target_phys_addr_t addr,
2e12669a 869 const uint8_t *buf, int len)
8b1f24b0
FB
870{
871 cpu_physical_memory_rw(addr, (uint8_t *)buf, len, 1);
872}
aab33094
FB
873uint32_t ldub_phys(target_phys_addr_t addr);
874uint32_t lduw_phys(target_phys_addr_t addr);
8df1cd07 875uint32_t ldl_phys(target_phys_addr_t addr);
aab33094 876uint64_t ldq_phys(target_phys_addr_t addr);
8df1cd07 877void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val);
bc98a7ef 878void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val);
aab33094
FB
879void stb_phys(target_phys_addr_t addr, uint32_t val);
880void stw_phys(target_phys_addr_t addr, uint32_t val);
8df1cd07 881void stl_phys(target_phys_addr_t addr, uint32_t val);
aab33094 882void stq_phys(target_phys_addr_t addr, uint64_t val);
8b1f24b0 883
5fafdf24 884void cpu_physical_memory_write_rom(target_phys_addr_t addr,
d0ecd2aa 885 const uint8_t *buf, int len);
5fafdf24 886int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
8b1f24b0 887 uint8_t *buf, int len, int is_write);
13eb76e0 888
04c504cc
FB
889#define VGA_DIRTY_FLAG 0x01
890#define CODE_DIRTY_FLAG 0x02
0a962c02 891
1ccde1cb 892/* read dirty bit (return 0 or 1) */
04c504cc 893static inline int cpu_physical_memory_is_dirty(ram_addr_t addr)
1ccde1cb 894{
0a962c02
FB
895 return phys_ram_dirty[addr >> TARGET_PAGE_BITS] == 0xff;
896}
897
5fafdf24 898static inline int cpu_physical_memory_get_dirty(ram_addr_t addr,
0a962c02
FB
899 int dirty_flags)
900{
901 return phys_ram_dirty[addr >> TARGET_PAGE_BITS] & dirty_flags;
1ccde1cb
FB
902}
903
04c504cc 904static inline void cpu_physical_memory_set_dirty(ram_addr_t addr)
1ccde1cb 905{
0a962c02 906 phys_ram_dirty[addr >> TARGET_PAGE_BITS] = 0xff;
1ccde1cb
FB
907}
908
04c504cc 909void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 910 int dirty_flags);
04c504cc 911void cpu_tlb_update_dirty(CPUState *env);
1ccde1cb 912
e3db7226
FB
913void dump_exec_info(FILE *f,
914 int (*cpu_fprintf)(FILE *f, const char *fmt, ...));
915
effedbc9
FB
916/*******************************************/
917/* host CPU ticks (if available) */
918
919#if defined(__powerpc__)
920
5fafdf24 921static inline uint32_t get_tbl(void)
effedbc9
FB
922{
923 uint32_t tbl;
924 asm volatile("mftb %0" : "=r" (tbl));
925 return tbl;
926}
927
5fafdf24 928static inline uint32_t get_tbu(void)
effedbc9
FB
929{
930 uint32_t tbl;
931 asm volatile("mftbu %0" : "=r" (tbl));
932 return tbl;
933}
934
935static inline int64_t cpu_get_real_ticks(void)
936{
937 uint32_t l, h, h1;
938 /* NOTE: we test if wrapping has occurred */
939 do {
940 h = get_tbu();
941 l = get_tbl();
942 h1 = get_tbu();
943 } while (h != h1);
944 return ((int64_t)h << 32) | l;
945}
946
947#elif defined(__i386__)
948
949static inline int64_t cpu_get_real_ticks(void)
5f1ce948
FB
950{
951 int64_t val;
952 asm volatile ("rdtsc" : "=A" (val));
953 return val;
954}
955
effedbc9
FB
956#elif defined(__x86_64__)
957
958static inline int64_t cpu_get_real_ticks(void)
959{
960 uint32_t low,high;
961 int64_t val;
962 asm volatile("rdtsc" : "=a" (low), "=d" (high));
963 val = high;
964 val <<= 32;
965 val |= low;
966 return val;
967}
968
f54b3f92
AJ
969#elif defined(__hppa__)
970
971static inline int64_t cpu_get_real_ticks(void)
972{
973 int val;
974 asm volatile ("mfctl %%cr16, %0" : "=r"(val));
975 return val;
976}
977
effedbc9
FB
978#elif defined(__ia64)
979
980static inline int64_t cpu_get_real_ticks(void)
981{
982 int64_t val;
983 asm volatile ("mov %0 = ar.itc" : "=r"(val) :: "memory");
984 return val;
985}
986
987#elif defined(__s390__)
988
989static inline int64_t cpu_get_real_ticks(void)
990{
991 int64_t val;
992 asm volatile("stck 0(%1)" : "=m" (val) : "a" (&val) : "cc");
993 return val;
994}
995
3142255c 996#elif defined(__sparc_v8plus__) || defined(__sparc_v8plusa__) || defined(__sparc_v9__)
effedbc9
FB
997
998static inline int64_t cpu_get_real_ticks (void)
999{
1000#if defined(_LP64)
1001 uint64_t rval;
1002 asm volatile("rd %%tick,%0" : "=r"(rval));
1003 return rval;
1004#else
1005 union {
1006 uint64_t i64;
1007 struct {
1008 uint32_t high;
1009 uint32_t low;
1010 } i32;
1011 } rval;
1012 asm volatile("rd %%tick,%1; srlx %1,32,%0"
1013 : "=r"(rval.i32.high), "=r"(rval.i32.low));
1014 return rval.i64;
1015#endif
1016}
c4b89d18
TS
1017
1018#elif defined(__mips__)
1019
1020static inline int64_t cpu_get_real_ticks(void)
1021{
1022#if __mips_isa_rev >= 2
1023 uint32_t count;
1024 static uint32_t cyc_per_count = 0;
1025
1026 if (!cyc_per_count)
1027 __asm__ __volatile__("rdhwr %0, $3" : "=r" (cyc_per_count));
1028
1029 __asm__ __volatile__("rdhwr %1, $2" : "=r" (count));
1030 return (int64_t)(count * cyc_per_count);
1031#else
1032 /* FIXME */
1033 static int64_t ticks = 0;
1034 return ticks++;
1035#endif
1036}
1037
46152182
PB
1038#else
1039/* The host CPU doesn't have an easily accessible cycle counter.
85028e4d
TS
1040 Just return a monotonically increasing value. This will be
1041 totally wrong, but hopefully better than nothing. */
46152182
PB
1042static inline int64_t cpu_get_real_ticks (void)
1043{
1044 static int64_t ticks = 0;
1045 return ticks++;
1046}
effedbc9
FB
1047#endif
1048
1049/* profiling */
1050#ifdef CONFIG_PROFILER
1051static inline int64_t profile_getclock(void)
1052{
1053 return cpu_get_real_ticks();
1054}
1055
5f1ce948
FB
1056extern int64_t kqemu_time, kqemu_time_start;
1057extern int64_t qemu_time, qemu_time_start;
1058extern int64_t tlb_flush_time;
1059extern int64_t kqemu_exec_count;
1060extern int64_t dev_time;
1061extern int64_t kqemu_ret_int_count;
1062extern int64_t kqemu_ret_excp_count;
1063extern int64_t kqemu_ret_intr_count;
1064
57fec1fe
FB
1065extern int64_t dyngen_tb_count1;
1066extern int64_t dyngen_tb_count;
1067extern int64_t dyngen_op_count;
1068extern int64_t dyngen_old_op_count;
1069extern int64_t dyngen_tcg_del_op_count;
1070extern int dyngen_op_count_max;
1071extern int64_t dyngen_code_in_len;
1072extern int64_t dyngen_code_out_len;
1073extern int64_t dyngen_interm_time;
1074extern int64_t dyngen_code_time;
1075extern int64_t dyngen_restore_count;
1076extern int64_t dyngen_restore_time;
5f1ce948
FB
1077#endif
1078
5a9fdfec 1079#endif /* CPU_ALL_H */