]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * defines common to all virtual CPUs | |
3 | * | |
4 | * Copyright (c) 2003 Fabrice Bellard | |
5 | * | |
6 | * This library is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU Lesser General Public | |
8 | * License as published by the Free Software Foundation; either | |
9 | * version 2 of the License, or (at your option) any later version. | |
10 | * | |
11 | * This library is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | * Lesser General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU Lesser General Public | |
17 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. | |
18 | */ | |
19 | #ifndef CPU_ALL_H | |
20 | #define CPU_ALL_H | |
21 | ||
22 | #include "qemu-common.h" | |
23 | #include "cpu-common.h" | |
24 | ||
25 | /* some important defines: | |
26 | * | |
27 | * WORDS_ALIGNED : if defined, the host cpu can only make word aligned | |
28 | * memory accesses. | |
29 | * | |
30 | * HOST_WORDS_BIGENDIAN : if defined, the host cpu is big endian and | |
31 | * otherwise little endian. | |
32 | * | |
33 | * (TARGET_WORDS_ALIGNED : same for target cpu (not supported yet)) | |
34 | * | |
35 | * TARGET_WORDS_BIGENDIAN : same for target cpu | |
36 | */ | |
37 | ||
38 | #include "softfloat.h" | |
39 | ||
40 | #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) | |
41 | #define BSWAP_NEEDED | |
42 | #endif | |
43 | ||
44 | #ifdef BSWAP_NEEDED | |
45 | ||
46 | static inline uint16_t tswap16(uint16_t s) | |
47 | { | |
48 | return bswap16(s); | |
49 | } | |
50 | ||
51 | static inline uint32_t tswap32(uint32_t s) | |
52 | { | |
53 | return bswap32(s); | |
54 | } | |
55 | ||
56 | static inline uint64_t tswap64(uint64_t s) | |
57 | { | |
58 | return bswap64(s); | |
59 | } | |
60 | ||
61 | static inline void tswap16s(uint16_t *s) | |
62 | { | |
63 | *s = bswap16(*s); | |
64 | } | |
65 | ||
66 | static inline void tswap32s(uint32_t *s) | |
67 | { | |
68 | *s = bswap32(*s); | |
69 | } | |
70 | ||
71 | static inline void tswap64s(uint64_t *s) | |
72 | { | |
73 | *s = bswap64(*s); | |
74 | } | |
75 | ||
76 | #else | |
77 | ||
78 | static inline uint16_t tswap16(uint16_t s) | |
79 | { | |
80 | return s; | |
81 | } | |
82 | ||
83 | static inline uint32_t tswap32(uint32_t s) | |
84 | { | |
85 | return s; | |
86 | } | |
87 | ||
88 | static inline uint64_t tswap64(uint64_t s) | |
89 | { | |
90 | return s; | |
91 | } | |
92 | ||
93 | static inline void tswap16s(uint16_t *s) | |
94 | { | |
95 | } | |
96 | ||
97 | static inline void tswap32s(uint32_t *s) | |
98 | { | |
99 | } | |
100 | ||
101 | static inline void tswap64s(uint64_t *s) | |
102 | { | |
103 | } | |
104 | ||
105 | #endif | |
106 | ||
107 | #if TARGET_LONG_SIZE == 4 | |
108 | #define tswapl(s) tswap32(s) | |
109 | #define tswapls(s) tswap32s((uint32_t *)(s)) | |
110 | #define bswaptls(s) bswap32s(s) | |
111 | #else | |
112 | #define tswapl(s) tswap64(s) | |
113 | #define tswapls(s) tswap64s((uint64_t *)(s)) | |
114 | #define bswaptls(s) bswap64s(s) | |
115 | #endif | |
116 | ||
117 | typedef union { | |
118 | float32 f; | |
119 | uint32_t l; | |
120 | } CPU_FloatU; | |
121 | ||
122 | /* NOTE: arm FPA is horrible as double 32 bit words are stored in big | |
123 | endian ! */ | |
124 | typedef union { | |
125 | float64 d; | |
126 | #if defined(HOST_WORDS_BIGENDIAN) \ | |
127 | || (defined(__arm__) && !defined(__VFP_FP__) && !defined(CONFIG_SOFTFLOAT)) | |
128 | struct { | |
129 | uint32_t upper; | |
130 | uint32_t lower; | |
131 | } l; | |
132 | #else | |
133 | struct { | |
134 | uint32_t lower; | |
135 | uint32_t upper; | |
136 | } l; | |
137 | #endif | |
138 | uint64_t ll; | |
139 | } CPU_DoubleU; | |
140 | ||
141 | #if defined(FLOATX80) | |
142 | typedef union { | |
143 | floatx80 d; | |
144 | struct { | |
145 | uint64_t lower; | |
146 | uint16_t upper; | |
147 | } l; | |
148 | } CPU_LDoubleU; | |
149 | #endif | |
150 | ||
151 | #if defined(CONFIG_SOFTFLOAT) | |
152 | typedef union { | |
153 | float128 q; | |
154 | #if defined(HOST_WORDS_BIGENDIAN) | |
155 | struct { | |
156 | uint32_t upmost; | |
157 | uint32_t upper; | |
158 | uint32_t lower; | |
159 | uint32_t lowest; | |
160 | } l; | |
161 | struct { | |
162 | uint64_t upper; | |
163 | uint64_t lower; | |
164 | } ll; | |
165 | #else | |
166 | struct { | |
167 | uint32_t lowest; | |
168 | uint32_t lower; | |
169 | uint32_t upper; | |
170 | uint32_t upmost; | |
171 | } l; | |
172 | struct { | |
173 | uint64_t lower; | |
174 | uint64_t upper; | |
175 | } ll; | |
176 | #endif | |
177 | } CPU_QuadU; | |
178 | #endif | |
179 | ||
180 | /* CPU memory access without any memory or io remapping */ | |
181 | ||
182 | /* | |
183 | * the generic syntax for the memory accesses is: | |
184 | * | |
185 | * load: ld{type}{sign}{size}{endian}_{access_type}(ptr) | |
186 | * | |
187 | * store: st{type}{size}{endian}_{access_type}(ptr, val) | |
188 | * | |
189 | * type is: | |
190 | * (empty): integer access | |
191 | * f : float access | |
192 | * | |
193 | * sign is: | |
194 | * (empty): for floats or 32 bit size | |
195 | * u : unsigned | |
196 | * s : signed | |
197 | * | |
198 | * size is: | |
199 | * b: 8 bits | |
200 | * w: 16 bits | |
201 | * l: 32 bits | |
202 | * q: 64 bits | |
203 | * | |
204 | * endian is: | |
205 | * (empty): target cpu endianness or 8 bit access | |
206 | * r : reversed target cpu endianness (not implemented yet) | |
207 | * be : big endian (not implemented yet) | |
208 | * le : little endian (not implemented yet) | |
209 | * | |
210 | * access_type is: | |
211 | * raw : host memory access | |
212 | * user : user mode access using soft MMU | |
213 | * kernel : kernel mode access using soft MMU | |
214 | */ | |
215 | static inline int ldub_p(const void *ptr) | |
216 | { | |
217 | return *(uint8_t *)ptr; | |
218 | } | |
219 | ||
220 | static inline int ldsb_p(const void *ptr) | |
221 | { | |
222 | return *(int8_t *)ptr; | |
223 | } | |
224 | ||
225 | static inline void stb_p(void *ptr, int v) | |
226 | { | |
227 | *(uint8_t *)ptr = v; | |
228 | } | |
229 | ||
230 | /* NOTE: on arm, putting 2 in /proc/sys/debug/alignment so that the | |
231 | kernel handles unaligned load/stores may give better results, but | |
232 | it is a system wide setting : bad */ | |
233 | #if defined(HOST_WORDS_BIGENDIAN) || defined(WORDS_ALIGNED) | |
234 | ||
235 | /* conservative code for little endian unaligned accesses */ | |
236 | static inline int lduw_le_p(const void *ptr) | |
237 | { | |
238 | #ifdef _ARCH_PPC | |
239 | int val; | |
240 | __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (ptr)); | |
241 | return val; | |
242 | #else | |
243 | const uint8_t *p = ptr; | |
244 | return p[0] | (p[1] << 8); | |
245 | #endif | |
246 | } | |
247 | ||
248 | static inline int ldsw_le_p(const void *ptr) | |
249 | { | |
250 | #ifdef _ARCH_PPC | |
251 | int val; | |
252 | __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (ptr)); | |
253 | return (int16_t)val; | |
254 | #else | |
255 | const uint8_t *p = ptr; | |
256 | return (int16_t)(p[0] | (p[1] << 8)); | |
257 | #endif | |
258 | } | |
259 | ||
260 | static inline int ldl_le_p(const void *ptr) | |
261 | { | |
262 | #ifdef _ARCH_PPC | |
263 | int val; | |
264 | __asm__ __volatile__ ("lwbrx %0,0,%1" : "=r" (val) : "r" (ptr)); | |
265 | return val; | |
266 | #else | |
267 | const uint8_t *p = ptr; | |
268 | return p[0] | (p[1] << 8) | (p[2] << 16) | (p[3] << 24); | |
269 | #endif | |
270 | } | |
271 | ||
272 | static inline uint64_t ldq_le_p(const void *ptr) | |
273 | { | |
274 | const uint8_t *p = ptr; | |
275 | uint32_t v1, v2; | |
276 | v1 = ldl_le_p(p); | |
277 | v2 = ldl_le_p(p + 4); | |
278 | return v1 | ((uint64_t)v2 << 32); | |
279 | } | |
280 | ||
281 | static inline void stw_le_p(void *ptr, int v) | |
282 | { | |
283 | #ifdef _ARCH_PPC | |
284 | __asm__ __volatile__ ("sthbrx %1,0,%2" : "=m" (*(uint16_t *)ptr) : "r" (v), "r" (ptr)); | |
285 | #else | |
286 | uint8_t *p = ptr; | |
287 | p[0] = v; | |
288 | p[1] = v >> 8; | |
289 | #endif | |
290 | } | |
291 | ||
292 | static inline void stl_le_p(void *ptr, int v) | |
293 | { | |
294 | #ifdef _ARCH_PPC | |
295 | __asm__ __volatile__ ("stwbrx %1,0,%2" : "=m" (*(uint32_t *)ptr) : "r" (v), "r" (ptr)); | |
296 | #else | |
297 | uint8_t *p = ptr; | |
298 | p[0] = v; | |
299 | p[1] = v >> 8; | |
300 | p[2] = v >> 16; | |
301 | p[3] = v >> 24; | |
302 | #endif | |
303 | } | |
304 | ||
305 | static inline void stq_le_p(void *ptr, uint64_t v) | |
306 | { | |
307 | uint8_t *p = ptr; | |
308 | stl_le_p(p, (uint32_t)v); | |
309 | stl_le_p(p + 4, v >> 32); | |
310 | } | |
311 | ||
312 | /* float access */ | |
313 | ||
314 | static inline float32 ldfl_le_p(const void *ptr) | |
315 | { | |
316 | union { | |
317 | float32 f; | |
318 | uint32_t i; | |
319 | } u; | |
320 | u.i = ldl_le_p(ptr); | |
321 | return u.f; | |
322 | } | |
323 | ||
324 | static inline void stfl_le_p(void *ptr, float32 v) | |
325 | { | |
326 | union { | |
327 | float32 f; | |
328 | uint32_t i; | |
329 | } u; | |
330 | u.f = v; | |
331 | stl_le_p(ptr, u.i); | |
332 | } | |
333 | ||
334 | static inline float64 ldfq_le_p(const void *ptr) | |
335 | { | |
336 | CPU_DoubleU u; | |
337 | u.l.lower = ldl_le_p(ptr); | |
338 | u.l.upper = ldl_le_p(ptr + 4); | |
339 | return u.d; | |
340 | } | |
341 | ||
342 | static inline void stfq_le_p(void *ptr, float64 v) | |
343 | { | |
344 | CPU_DoubleU u; | |
345 | u.d = v; | |
346 | stl_le_p(ptr, u.l.lower); | |
347 | stl_le_p(ptr + 4, u.l.upper); | |
348 | } | |
349 | ||
350 | #else | |
351 | ||
352 | static inline int lduw_le_p(const void *ptr) | |
353 | { | |
354 | return *(uint16_t *)ptr; | |
355 | } | |
356 | ||
357 | static inline int ldsw_le_p(const void *ptr) | |
358 | { | |
359 | return *(int16_t *)ptr; | |
360 | } | |
361 | ||
362 | static inline int ldl_le_p(const void *ptr) | |
363 | { | |
364 | return *(uint32_t *)ptr; | |
365 | } | |
366 | ||
367 | static inline uint64_t ldq_le_p(const void *ptr) | |
368 | { | |
369 | return *(uint64_t *)ptr; | |
370 | } | |
371 | ||
372 | static inline void stw_le_p(void *ptr, int v) | |
373 | { | |
374 | *(uint16_t *)ptr = v; | |
375 | } | |
376 | ||
377 | static inline void stl_le_p(void *ptr, int v) | |
378 | { | |
379 | *(uint32_t *)ptr = v; | |
380 | } | |
381 | ||
382 | static inline void stq_le_p(void *ptr, uint64_t v) | |
383 | { | |
384 | *(uint64_t *)ptr = v; | |
385 | } | |
386 | ||
387 | /* float access */ | |
388 | ||
389 | static inline float32 ldfl_le_p(const void *ptr) | |
390 | { | |
391 | return *(float32 *)ptr; | |
392 | } | |
393 | ||
394 | static inline float64 ldfq_le_p(const void *ptr) | |
395 | { | |
396 | return *(float64 *)ptr; | |
397 | } | |
398 | ||
399 | static inline void stfl_le_p(void *ptr, float32 v) | |
400 | { | |
401 | *(float32 *)ptr = v; | |
402 | } | |
403 | ||
404 | static inline void stfq_le_p(void *ptr, float64 v) | |
405 | { | |
406 | *(float64 *)ptr = v; | |
407 | } | |
408 | #endif | |
409 | ||
410 | #if !defined(HOST_WORDS_BIGENDIAN) || defined(WORDS_ALIGNED) | |
411 | ||
412 | static inline int lduw_be_p(const void *ptr) | |
413 | { | |
414 | #if defined(__i386__) | |
415 | int val; | |
416 | asm volatile ("movzwl %1, %0\n" | |
417 | "xchgb %b0, %h0\n" | |
418 | : "=q" (val) | |
419 | : "m" (*(uint16_t *)ptr)); | |
420 | return val; | |
421 | #else | |
422 | const uint8_t *b = ptr; | |
423 | return ((b[0] << 8) | b[1]); | |
424 | #endif | |
425 | } | |
426 | ||
427 | static inline int ldsw_be_p(const void *ptr) | |
428 | { | |
429 | #if defined(__i386__) | |
430 | int val; | |
431 | asm volatile ("movzwl %1, %0\n" | |
432 | "xchgb %b0, %h0\n" | |
433 | : "=q" (val) | |
434 | : "m" (*(uint16_t *)ptr)); | |
435 | return (int16_t)val; | |
436 | #else | |
437 | const uint8_t *b = ptr; | |
438 | return (int16_t)((b[0] << 8) | b[1]); | |
439 | #endif | |
440 | } | |
441 | ||
442 | static inline int ldl_be_p(const void *ptr) | |
443 | { | |
444 | #if defined(__i386__) || defined(__x86_64__) | |
445 | int val; | |
446 | asm volatile ("movl %1, %0\n" | |
447 | "bswap %0\n" | |
448 | : "=r" (val) | |
449 | : "m" (*(uint32_t *)ptr)); | |
450 | return val; | |
451 | #else | |
452 | const uint8_t *b = ptr; | |
453 | return (b[0] << 24) | (b[1] << 16) | (b[2] << 8) | b[3]; | |
454 | #endif | |
455 | } | |
456 | ||
457 | static inline uint64_t ldq_be_p(const void *ptr) | |
458 | { | |
459 | uint32_t a,b; | |
460 | a = ldl_be_p(ptr); | |
461 | b = ldl_be_p((uint8_t *)ptr + 4); | |
462 | return (((uint64_t)a<<32)|b); | |
463 | } | |
464 | ||
465 | static inline void stw_be_p(void *ptr, int v) | |
466 | { | |
467 | #if defined(__i386__) | |
468 | asm volatile ("xchgb %b0, %h0\n" | |
469 | "movw %w0, %1\n" | |
470 | : "=q" (v) | |
471 | : "m" (*(uint16_t *)ptr), "0" (v)); | |
472 | #else | |
473 | uint8_t *d = (uint8_t *) ptr; | |
474 | d[0] = v >> 8; | |
475 | d[1] = v; | |
476 | #endif | |
477 | } | |
478 | ||
479 | static inline void stl_be_p(void *ptr, int v) | |
480 | { | |
481 | #if defined(__i386__) || defined(__x86_64__) | |
482 | asm volatile ("bswap %0\n" | |
483 | "movl %0, %1\n" | |
484 | : "=r" (v) | |
485 | : "m" (*(uint32_t *)ptr), "0" (v)); | |
486 | #else | |
487 | uint8_t *d = (uint8_t *) ptr; | |
488 | d[0] = v >> 24; | |
489 | d[1] = v >> 16; | |
490 | d[2] = v >> 8; | |
491 | d[3] = v; | |
492 | #endif | |
493 | } | |
494 | ||
495 | static inline void stq_be_p(void *ptr, uint64_t v) | |
496 | { | |
497 | stl_be_p(ptr, v >> 32); | |
498 | stl_be_p((uint8_t *)ptr + 4, v); | |
499 | } | |
500 | ||
501 | /* float access */ | |
502 | ||
503 | static inline float32 ldfl_be_p(const void *ptr) | |
504 | { | |
505 | union { | |
506 | float32 f; | |
507 | uint32_t i; | |
508 | } u; | |
509 | u.i = ldl_be_p(ptr); | |
510 | return u.f; | |
511 | } | |
512 | ||
513 | static inline void stfl_be_p(void *ptr, float32 v) | |
514 | { | |
515 | union { | |
516 | float32 f; | |
517 | uint32_t i; | |
518 | } u; | |
519 | u.f = v; | |
520 | stl_be_p(ptr, u.i); | |
521 | } | |
522 | ||
523 | static inline float64 ldfq_be_p(const void *ptr) | |
524 | { | |
525 | CPU_DoubleU u; | |
526 | u.l.upper = ldl_be_p(ptr); | |
527 | u.l.lower = ldl_be_p((uint8_t *)ptr + 4); | |
528 | return u.d; | |
529 | } | |
530 | ||
531 | static inline void stfq_be_p(void *ptr, float64 v) | |
532 | { | |
533 | CPU_DoubleU u; | |
534 | u.d = v; | |
535 | stl_be_p(ptr, u.l.upper); | |
536 | stl_be_p((uint8_t *)ptr + 4, u.l.lower); | |
537 | } | |
538 | ||
539 | #else | |
540 | ||
541 | static inline int lduw_be_p(const void *ptr) | |
542 | { | |
543 | return *(uint16_t *)ptr; | |
544 | } | |
545 | ||
546 | static inline int ldsw_be_p(const void *ptr) | |
547 | { | |
548 | return *(int16_t *)ptr; | |
549 | } | |
550 | ||
551 | static inline int ldl_be_p(const void *ptr) | |
552 | { | |
553 | return *(uint32_t *)ptr; | |
554 | } | |
555 | ||
556 | static inline uint64_t ldq_be_p(const void *ptr) | |
557 | { | |
558 | return *(uint64_t *)ptr; | |
559 | } | |
560 | ||
561 | static inline void stw_be_p(void *ptr, int v) | |
562 | { | |
563 | *(uint16_t *)ptr = v; | |
564 | } | |
565 | ||
566 | static inline void stl_be_p(void *ptr, int v) | |
567 | { | |
568 | *(uint32_t *)ptr = v; | |
569 | } | |
570 | ||
571 | static inline void stq_be_p(void *ptr, uint64_t v) | |
572 | { | |
573 | *(uint64_t *)ptr = v; | |
574 | } | |
575 | ||
576 | /* float access */ | |
577 | ||
578 | static inline float32 ldfl_be_p(const void *ptr) | |
579 | { | |
580 | return *(float32 *)ptr; | |
581 | } | |
582 | ||
583 | static inline float64 ldfq_be_p(const void *ptr) | |
584 | { | |
585 | return *(float64 *)ptr; | |
586 | } | |
587 | ||
588 | static inline void stfl_be_p(void *ptr, float32 v) | |
589 | { | |
590 | *(float32 *)ptr = v; | |
591 | } | |
592 | ||
593 | static inline void stfq_be_p(void *ptr, float64 v) | |
594 | { | |
595 | *(float64 *)ptr = v; | |
596 | } | |
597 | ||
598 | #endif | |
599 | ||
600 | /* target CPU memory access functions */ | |
601 | #if defined(TARGET_WORDS_BIGENDIAN) | |
602 | #define lduw_p(p) lduw_be_p(p) | |
603 | #define ldsw_p(p) ldsw_be_p(p) | |
604 | #define ldl_p(p) ldl_be_p(p) | |
605 | #define ldq_p(p) ldq_be_p(p) | |
606 | #define ldfl_p(p) ldfl_be_p(p) | |
607 | #define ldfq_p(p) ldfq_be_p(p) | |
608 | #define stw_p(p, v) stw_be_p(p, v) | |
609 | #define stl_p(p, v) stl_be_p(p, v) | |
610 | #define stq_p(p, v) stq_be_p(p, v) | |
611 | #define stfl_p(p, v) stfl_be_p(p, v) | |
612 | #define stfq_p(p, v) stfq_be_p(p, v) | |
613 | #else | |
614 | #define lduw_p(p) lduw_le_p(p) | |
615 | #define ldsw_p(p) ldsw_le_p(p) | |
616 | #define ldl_p(p) ldl_le_p(p) | |
617 | #define ldq_p(p) ldq_le_p(p) | |
618 | #define ldfl_p(p) ldfl_le_p(p) | |
619 | #define ldfq_p(p) ldfq_le_p(p) | |
620 | #define stw_p(p, v) stw_le_p(p, v) | |
621 | #define stl_p(p, v) stl_le_p(p, v) | |
622 | #define stq_p(p, v) stq_le_p(p, v) | |
623 | #define stfl_p(p, v) stfl_le_p(p, v) | |
624 | #define stfq_p(p, v) stfq_le_p(p, v) | |
625 | #endif | |
626 | ||
627 | /* MMU memory access macros */ | |
628 | ||
629 | #if defined(CONFIG_USER_ONLY) | |
630 | #include <assert.h> | |
631 | #include "qemu-types.h" | |
632 | ||
633 | /* On some host systems the guest address space is reserved on the host. | |
634 | * This allows the guest address space to be offset to a convenient location. | |
635 | */ | |
636 | #if defined(CONFIG_USE_GUEST_BASE) | |
637 | extern unsigned long guest_base; | |
638 | extern int have_guest_base; | |
639 | extern unsigned long reserved_va; | |
640 | #define GUEST_BASE guest_base | |
641 | #define RESERVED_VA reserved_va | |
642 | #else | |
643 | #define GUEST_BASE 0ul | |
644 | #define RESERVED_VA 0ul | |
645 | #endif | |
646 | ||
647 | /* All direct uses of g2h and h2g need to go away for usermode softmmu. */ | |
648 | #define g2h(x) ((void *)((unsigned long)(x) + GUEST_BASE)) | |
649 | ||
650 | #if HOST_LONG_BITS <= TARGET_VIRT_ADDR_SPACE_BITS | |
651 | #define h2g_valid(x) 1 | |
652 | #else | |
653 | #define h2g_valid(x) ({ \ | |
654 | unsigned long __guest = (unsigned long)(x) - GUEST_BASE; \ | |
655 | __guest < (1ul << TARGET_VIRT_ADDR_SPACE_BITS); \ | |
656 | }) | |
657 | #endif | |
658 | ||
659 | #define h2g(x) ({ \ | |
660 | unsigned long __ret = (unsigned long)(x) - GUEST_BASE; \ | |
661 | /* Check if given address fits target address space */ \ | |
662 | assert(h2g_valid(x)); \ | |
663 | (abi_ulong)__ret; \ | |
664 | }) | |
665 | ||
666 | #define saddr(x) g2h(x) | |
667 | #define laddr(x) g2h(x) | |
668 | ||
669 | #else /* !CONFIG_USER_ONLY */ | |
670 | /* NOTE: we use double casts if pointers and target_ulong have | |
671 | different sizes */ | |
672 | #define saddr(x) (uint8_t *)(long)(x) | |
673 | #define laddr(x) (uint8_t *)(long)(x) | |
674 | #endif | |
675 | ||
676 | #define ldub_raw(p) ldub_p(laddr((p))) | |
677 | #define ldsb_raw(p) ldsb_p(laddr((p))) | |
678 | #define lduw_raw(p) lduw_p(laddr((p))) | |
679 | #define ldsw_raw(p) ldsw_p(laddr((p))) | |
680 | #define ldl_raw(p) ldl_p(laddr((p))) | |
681 | #define ldq_raw(p) ldq_p(laddr((p))) | |
682 | #define ldfl_raw(p) ldfl_p(laddr((p))) | |
683 | #define ldfq_raw(p) ldfq_p(laddr((p))) | |
684 | #define stb_raw(p, v) stb_p(saddr((p)), v) | |
685 | #define stw_raw(p, v) stw_p(saddr((p)), v) | |
686 | #define stl_raw(p, v) stl_p(saddr((p)), v) | |
687 | #define stq_raw(p, v) stq_p(saddr((p)), v) | |
688 | #define stfl_raw(p, v) stfl_p(saddr((p)), v) | |
689 | #define stfq_raw(p, v) stfq_p(saddr((p)), v) | |
690 | ||
691 | ||
692 | #if defined(CONFIG_USER_ONLY) | |
693 | ||
694 | /* if user mode, no other memory access functions */ | |
695 | #define ldub(p) ldub_raw(p) | |
696 | #define ldsb(p) ldsb_raw(p) | |
697 | #define lduw(p) lduw_raw(p) | |
698 | #define ldsw(p) ldsw_raw(p) | |
699 | #define ldl(p) ldl_raw(p) | |
700 | #define ldq(p) ldq_raw(p) | |
701 | #define ldfl(p) ldfl_raw(p) | |
702 | #define ldfq(p) ldfq_raw(p) | |
703 | #define stb(p, v) stb_raw(p, v) | |
704 | #define stw(p, v) stw_raw(p, v) | |
705 | #define stl(p, v) stl_raw(p, v) | |
706 | #define stq(p, v) stq_raw(p, v) | |
707 | #define stfl(p, v) stfl_raw(p, v) | |
708 | #define stfq(p, v) stfq_raw(p, v) | |
709 | ||
710 | #define ldub_code(p) ldub_raw(p) | |
711 | #define ldsb_code(p) ldsb_raw(p) | |
712 | #define lduw_code(p) lduw_raw(p) | |
713 | #define ldsw_code(p) ldsw_raw(p) | |
714 | #define ldl_code(p) ldl_raw(p) | |
715 | #define ldq_code(p) ldq_raw(p) | |
716 | ||
717 | #define ldub_kernel(p) ldub_raw(p) | |
718 | #define ldsb_kernel(p) ldsb_raw(p) | |
719 | #define lduw_kernel(p) lduw_raw(p) | |
720 | #define ldsw_kernel(p) ldsw_raw(p) | |
721 | #define ldl_kernel(p) ldl_raw(p) | |
722 | #define ldq_kernel(p) ldq_raw(p) | |
723 | #define ldfl_kernel(p) ldfl_raw(p) | |
724 | #define ldfq_kernel(p) ldfq_raw(p) | |
725 | #define stb_kernel(p, v) stb_raw(p, v) | |
726 | #define stw_kernel(p, v) stw_raw(p, v) | |
727 | #define stl_kernel(p, v) stl_raw(p, v) | |
728 | #define stq_kernel(p, v) stq_raw(p, v) | |
729 | #define stfl_kernel(p, v) stfl_raw(p, v) | |
730 | #define stfq_kernel(p, vt) stfq_raw(p, v) | |
731 | ||
732 | #endif /* defined(CONFIG_USER_ONLY) */ | |
733 | ||
734 | /* page related stuff */ | |
735 | ||
736 | #define TARGET_PAGE_SIZE (1 << TARGET_PAGE_BITS) | |
737 | #define TARGET_PAGE_MASK ~(TARGET_PAGE_SIZE - 1) | |
738 | #define TARGET_PAGE_ALIGN(addr) (((addr) + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK) | |
739 | ||
740 | /* ??? These should be the larger of unsigned long and target_ulong. */ | |
741 | extern unsigned long qemu_real_host_page_size; | |
742 | extern unsigned long qemu_host_page_bits; | |
743 | extern unsigned long qemu_host_page_size; | |
744 | extern unsigned long qemu_host_page_mask; | |
745 | ||
746 | #define HOST_PAGE_ALIGN(addr) (((addr) + qemu_host_page_size - 1) & qemu_host_page_mask) | |
747 | ||
748 | /* same as PROT_xxx */ | |
749 | #define PAGE_READ 0x0001 | |
750 | #define PAGE_WRITE 0x0002 | |
751 | #define PAGE_EXEC 0x0004 | |
752 | #define PAGE_BITS (PAGE_READ | PAGE_WRITE | PAGE_EXEC) | |
753 | #define PAGE_VALID 0x0008 | |
754 | /* original state of the write flag (used when tracking self-modifying | |
755 | code */ | |
756 | #define PAGE_WRITE_ORG 0x0010 | |
757 | #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY) | |
758 | /* FIXME: Code that sets/uses this is broken and needs to go away. */ | |
759 | #define PAGE_RESERVED 0x0020 | |
760 | #endif | |
761 | ||
762 | #if defined(CONFIG_USER_ONLY) | |
763 | void page_dump(FILE *f); | |
764 | ||
765 | typedef int (*walk_memory_regions_fn)(void *, abi_ulong, | |
766 | abi_ulong, unsigned long); | |
767 | int walk_memory_regions(void *, walk_memory_regions_fn); | |
768 | ||
769 | int page_get_flags(target_ulong address); | |
770 | void page_set_flags(target_ulong start, target_ulong end, int flags); | |
771 | int page_check_range(target_ulong start, target_ulong len, int flags); | |
772 | #endif | |
773 | ||
774 | CPUState *cpu_copy(CPUState *env); | |
775 | CPUState *qemu_get_cpu(int cpu); | |
776 | ||
777 | #define CPU_DUMP_CODE 0x00010000 | |
778 | ||
779 | void cpu_dump_state(CPUState *env, FILE *f, fprintf_function cpu_fprintf, | |
780 | int flags); | |
781 | void cpu_dump_statistics(CPUState *env, FILE *f, fprintf_function cpu_fprintf, | |
782 | int flags); | |
783 | ||
784 | void QEMU_NORETURN cpu_abort(CPUState *env, const char *fmt, ...) | |
785 | GCC_FMT_ATTR(2, 3); | |
786 | extern CPUState *first_cpu; | |
787 | extern CPUState *cpu_single_env; | |
788 | ||
789 | #define CPU_INTERRUPT_HARD 0x02 /* hardware interrupt pending */ | |
790 | #define CPU_INTERRUPT_EXITTB 0x04 /* exit the current TB (use for x86 a20 case) */ | |
791 | #define CPU_INTERRUPT_TIMER 0x08 /* internal timer exception pending */ | |
792 | #define CPU_INTERRUPT_FIQ 0x10 /* Fast interrupt pending. */ | |
793 | #define CPU_INTERRUPT_HALT 0x20 /* CPU halt wanted */ | |
794 | #define CPU_INTERRUPT_SMI 0x40 /* (x86 only) SMI interrupt pending */ | |
795 | #define CPU_INTERRUPT_DEBUG 0x80 /* Debug event occured. */ | |
796 | #define CPU_INTERRUPT_VIRQ 0x100 /* virtual interrupt pending. */ | |
797 | #define CPU_INTERRUPT_NMI 0x200 /* NMI pending. */ | |
798 | #define CPU_INTERRUPT_INIT 0x400 /* INIT pending. */ | |
799 | #define CPU_INTERRUPT_SIPI 0x800 /* SIPI pending. */ | |
800 | #define CPU_INTERRUPT_MCE 0x1000 /* (x86 only) MCE pending. */ | |
801 | ||
802 | void cpu_interrupt(CPUState *s, int mask); | |
803 | void cpu_reset_interrupt(CPUState *env, int mask); | |
804 | ||
805 | void cpu_exit(CPUState *s); | |
806 | ||
807 | int qemu_cpu_has_work(CPUState *env); | |
808 | ||
809 | /* Breakpoint/watchpoint flags */ | |
810 | #define BP_MEM_READ 0x01 | |
811 | #define BP_MEM_WRITE 0x02 | |
812 | #define BP_MEM_ACCESS (BP_MEM_READ | BP_MEM_WRITE) | |
813 | #define BP_STOP_BEFORE_ACCESS 0x04 | |
814 | #define BP_WATCHPOINT_HIT 0x08 | |
815 | #define BP_GDB 0x10 | |
816 | #define BP_CPU 0x20 | |
817 | ||
818 | int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags, | |
819 | CPUBreakpoint **breakpoint); | |
820 | int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags); | |
821 | void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint); | |
822 | void cpu_breakpoint_remove_all(CPUState *env, int mask); | |
823 | int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len, | |
824 | int flags, CPUWatchpoint **watchpoint); | |
825 | int cpu_watchpoint_remove(CPUState *env, target_ulong addr, | |
826 | target_ulong len, int flags); | |
827 | void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint); | |
828 | void cpu_watchpoint_remove_all(CPUState *env, int mask); | |
829 | ||
830 | #define SSTEP_ENABLE 0x1 /* Enable simulated HW single stepping */ | |
831 | #define SSTEP_NOIRQ 0x2 /* Do not use IRQ while single stepping */ | |
832 | #define SSTEP_NOTIMER 0x4 /* Do not Timers while single stepping */ | |
833 | ||
834 | void cpu_single_step(CPUState *env, int enabled); | |
835 | void cpu_reset(CPUState *s); | |
836 | int cpu_is_stopped(CPUState *env); | |
837 | void run_on_cpu(CPUState *env, void (*func)(void *data), void *data); | |
838 | ||
839 | #define CPU_LOG_TB_OUT_ASM (1 << 0) | |
840 | #define CPU_LOG_TB_IN_ASM (1 << 1) | |
841 | #define CPU_LOG_TB_OP (1 << 2) | |
842 | #define CPU_LOG_TB_OP_OPT (1 << 3) | |
843 | #define CPU_LOG_INT (1 << 4) | |
844 | #define CPU_LOG_EXEC (1 << 5) | |
845 | #define CPU_LOG_PCALL (1 << 6) | |
846 | #define CPU_LOG_IOPORT (1 << 7) | |
847 | #define CPU_LOG_TB_CPU (1 << 8) | |
848 | #define CPU_LOG_RESET (1 << 9) | |
849 | ||
850 | /* define log items */ | |
851 | typedef struct CPULogItem { | |
852 | int mask; | |
853 | const char *name; | |
854 | const char *help; | |
855 | } CPULogItem; | |
856 | ||
857 | extern const CPULogItem cpu_log_items[]; | |
858 | ||
859 | void cpu_set_log(int log_flags); | |
860 | void cpu_set_log_filename(const char *filename); | |
861 | int cpu_str_to_log_mask(const char *str); | |
862 | ||
863 | #if !defined(CONFIG_USER_ONLY) | |
864 | ||
865 | /* Return the physical page corresponding to a virtual one. Use it | |
866 | only for debugging because no protection checks are done. Return -1 | |
867 | if no page found. */ | |
868 | target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr); | |
869 | ||
870 | /* memory API */ | |
871 | ||
872 | extern int phys_ram_fd; | |
873 | extern ram_addr_t ram_size; | |
874 | ||
875 | /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */ | |
876 | #define RAM_PREALLOC_MASK (1 << 0) | |
877 | ||
878 | typedef struct RAMBlock { | |
879 | uint8_t *host; | |
880 | ram_addr_t offset; | |
881 | ram_addr_t length; | |
882 | uint32_t flags; | |
883 | char idstr[256]; | |
884 | QLIST_ENTRY(RAMBlock) next; | |
885 | #if defined(__linux__) && !defined(TARGET_S390X) | |
886 | int fd; | |
887 | #endif | |
888 | } RAMBlock; | |
889 | ||
890 | typedef struct RAMList { | |
891 | uint8_t *phys_dirty; | |
892 | QLIST_HEAD(ram, RAMBlock) blocks; | |
893 | } RAMList; | |
894 | extern RAMList ram_list; | |
895 | ||
896 | extern const char *mem_path; | |
897 | extern int mem_prealloc; | |
898 | ||
899 | /* physical memory access */ | |
900 | ||
901 | /* MMIO pages are identified by a combination of an IO device index and | |
902 | 3 flags. The ROMD code stores the page ram offset in iotlb entry, | |
903 | so only a limited number of ids are avaiable. */ | |
904 | ||
905 | #define IO_MEM_NB_ENTRIES (1 << (TARGET_PAGE_BITS - IO_MEM_SHIFT)) | |
906 | ||
907 | /* Flags stored in the low bits of the TLB virtual address. These are | |
908 | defined so that fast path ram access is all zeros. */ | |
909 | /* Zero if TLB entry is valid. */ | |
910 | #define TLB_INVALID_MASK (1 << 3) | |
911 | /* Set if TLB entry references a clean RAM page. The iotlb entry will | |
912 | contain the page physical address. */ | |
913 | #define TLB_NOTDIRTY (1 << 4) | |
914 | /* Set if TLB entry is an IO callback. */ | |
915 | #define TLB_MMIO (1 << 5) | |
916 | ||
917 | #define VGA_DIRTY_FLAG 0x01 | |
918 | #define CODE_DIRTY_FLAG 0x02 | |
919 | #define MIGRATION_DIRTY_FLAG 0x08 | |
920 | ||
921 | /* read dirty bit (return 0 or 1) */ | |
922 | static inline int cpu_physical_memory_is_dirty(ram_addr_t addr) | |
923 | { | |
924 | return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] == 0xff; | |
925 | } | |
926 | ||
927 | static inline int cpu_physical_memory_get_dirty_flags(ram_addr_t addr) | |
928 | { | |
929 | return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS]; | |
930 | } | |
931 | ||
932 | static inline int cpu_physical_memory_get_dirty(ram_addr_t addr, | |
933 | int dirty_flags) | |
934 | { | |
935 | return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] & dirty_flags; | |
936 | } | |
937 | ||
938 | static inline void cpu_physical_memory_set_dirty(ram_addr_t addr) | |
939 | { | |
940 | ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] = 0xff; | |
941 | } | |
942 | ||
943 | static inline int cpu_physical_memory_set_dirty_flags(ram_addr_t addr, | |
944 | int dirty_flags) | |
945 | { | |
946 | return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] |= dirty_flags; | |
947 | } | |
948 | ||
949 | static inline void cpu_physical_memory_mask_dirty_range(ram_addr_t start, | |
950 | int length, | |
951 | int dirty_flags) | |
952 | { | |
953 | int i, mask, len; | |
954 | uint8_t *p; | |
955 | ||
956 | len = length >> TARGET_PAGE_BITS; | |
957 | mask = ~dirty_flags; | |
958 | p = ram_list.phys_dirty + (start >> TARGET_PAGE_BITS); | |
959 | for (i = 0; i < len; i++) { | |
960 | p[i] &= mask; | |
961 | } | |
962 | } | |
963 | ||
964 | void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end, | |
965 | int dirty_flags); | |
966 | void cpu_tlb_update_dirty(CPUState *env); | |
967 | ||
968 | int cpu_physical_memory_set_dirty_tracking(int enable); | |
969 | ||
970 | int cpu_physical_memory_get_dirty_tracking(void); | |
971 | ||
972 | int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, | |
973 | target_phys_addr_t end_addr); | |
974 | ||
975 | int cpu_physical_log_start(target_phys_addr_t start_addr, | |
976 | ram_addr_t size); | |
977 | ||
978 | int cpu_physical_log_stop(target_phys_addr_t start_addr, | |
979 | ram_addr_t size); | |
980 | ||
981 | void dump_exec_info(FILE *f, fprintf_function cpu_fprintf); | |
982 | #endif /* !CONFIG_USER_ONLY */ | |
983 | ||
984 | int cpu_memory_rw_debug(CPUState *env, target_ulong addr, | |
985 | uint8_t *buf, int len, int is_write); | |
986 | ||
987 | #endif /* CPU_ALL_H */ |