]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * defines common to all virtual CPUs | |
3 | * | |
4 | * Copyright (c) 2003 Fabrice Bellard | |
5 | * | |
6 | * This library is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU Lesser General Public | |
8 | * License as published by the Free Software Foundation; either | |
9 | * version 2 of the License, or (at your option) any later version. | |
10 | * | |
11 | * This library is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | * Lesser General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU Lesser General Public | |
17 | * License along with this library; if not, write to the Free Software | |
18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
19 | */ | |
20 | #ifndef CPU_ALL_H | |
21 | #define CPU_ALL_H | |
22 | ||
23 | #if defined(__arm__) || defined(__sparc__) | |
24 | #define WORDS_ALIGNED | |
25 | #endif | |
26 | ||
27 | /* some important defines: | |
28 | * | |
29 | * WORDS_ALIGNED : if defined, the host cpu can only make word aligned | |
30 | * memory accesses. | |
31 | * | |
32 | * WORDS_BIGENDIAN : if defined, the host cpu is big endian and | |
33 | * otherwise little endian. | |
34 | * | |
35 | * (TARGET_WORDS_ALIGNED : same for target cpu (not supported yet)) | |
36 | * | |
37 | * TARGET_WORDS_BIGENDIAN : same for target cpu | |
38 | */ | |
39 | ||
40 | #include "bswap.h" | |
41 | ||
42 | #if defined(WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) | |
43 | #define BSWAP_NEEDED | |
44 | #endif | |
45 | ||
46 | #ifdef BSWAP_NEEDED | |
47 | ||
48 | static inline uint16_t tswap16(uint16_t s) | |
49 | { | |
50 | return bswap16(s); | |
51 | } | |
52 | ||
53 | static inline uint32_t tswap32(uint32_t s) | |
54 | { | |
55 | return bswap32(s); | |
56 | } | |
57 | ||
58 | static inline uint64_t tswap64(uint64_t s) | |
59 | { | |
60 | return bswap64(s); | |
61 | } | |
62 | ||
63 | static inline void tswap16s(uint16_t *s) | |
64 | { | |
65 | *s = bswap16(*s); | |
66 | } | |
67 | ||
68 | static inline void tswap32s(uint32_t *s) | |
69 | { | |
70 | *s = bswap32(*s); | |
71 | } | |
72 | ||
73 | static inline void tswap64s(uint64_t *s) | |
74 | { | |
75 | *s = bswap64(*s); | |
76 | } | |
77 | ||
78 | #else | |
79 | ||
80 | static inline uint16_t tswap16(uint16_t s) | |
81 | { | |
82 | return s; | |
83 | } | |
84 | ||
85 | static inline uint32_t tswap32(uint32_t s) | |
86 | { | |
87 | return s; | |
88 | } | |
89 | ||
90 | static inline uint64_t tswap64(uint64_t s) | |
91 | { | |
92 | return s; | |
93 | } | |
94 | ||
95 | static inline void tswap16s(uint16_t *s) | |
96 | { | |
97 | } | |
98 | ||
99 | static inline void tswap32s(uint32_t *s) | |
100 | { | |
101 | } | |
102 | ||
103 | static inline void tswap64s(uint64_t *s) | |
104 | { | |
105 | } | |
106 | ||
107 | #endif | |
108 | ||
109 | #if TARGET_LONG_SIZE == 4 | |
110 | #define tswapl(s) tswap32(s) | |
111 | #define tswapls(s) tswap32s((uint32_t *)(s)) | |
112 | #define bswaptls(s) bswap32s(s) | |
113 | #else | |
114 | #define tswapl(s) tswap64(s) | |
115 | #define tswapls(s) tswap64s((uint64_t *)(s)) | |
116 | #define bswaptls(s) bswap64s(s) | |
117 | #endif | |
118 | ||
119 | /* NOTE: arm FPA is horrible as double 32 bit words are stored in big | |
120 | endian ! */ | |
121 | typedef union { | |
122 | float64 d; | |
123 | #if defined(WORDS_BIGENDIAN) \ | |
124 | || (defined(__arm__) && !defined(__VFP_FP__) && !defined(CONFIG_SOFTFLOAT)) | |
125 | struct { | |
126 | uint32_t upper; | |
127 | uint32_t lower; | |
128 | } l; | |
129 | #else | |
130 | struct { | |
131 | uint32_t lower; | |
132 | uint32_t upper; | |
133 | } l; | |
134 | #endif | |
135 | uint64_t ll; | |
136 | } CPU_DoubleU; | |
137 | ||
138 | /* CPU memory access without any memory or io remapping */ | |
139 | ||
140 | /* | |
141 | * the generic syntax for the memory accesses is: | |
142 | * | |
143 | * load: ld{type}{sign}{size}{endian}_{access_type}(ptr) | |
144 | * | |
145 | * store: st{type}{size}{endian}_{access_type}(ptr, val) | |
146 | * | |
147 | * type is: | |
148 | * (empty): integer access | |
149 | * f : float access | |
150 | * | |
151 | * sign is: | |
152 | * (empty): for floats or 32 bit size | |
153 | * u : unsigned | |
154 | * s : signed | |
155 | * | |
156 | * size is: | |
157 | * b: 8 bits | |
158 | * w: 16 bits | |
159 | * l: 32 bits | |
160 | * q: 64 bits | |
161 | * | |
162 | * endian is: | |
163 | * (empty): target cpu endianness or 8 bit access | |
164 | * r : reversed target cpu endianness (not implemented yet) | |
165 | * be : big endian (not implemented yet) | |
166 | * le : little endian (not implemented yet) | |
167 | * | |
168 | * access_type is: | |
169 | * raw : host memory access | |
170 | * user : user mode access using soft MMU | |
171 | * kernel : kernel mode access using soft MMU | |
172 | */ | |
173 | static inline int ldub_p(void *ptr) | |
174 | { | |
175 | return *(uint8_t *)ptr; | |
176 | } | |
177 | ||
178 | static inline int ldsb_p(void *ptr) | |
179 | { | |
180 | return *(int8_t *)ptr; | |
181 | } | |
182 | ||
183 | static inline void stb_p(void *ptr, int v) | |
184 | { | |
185 | *(uint8_t *)ptr = v; | |
186 | } | |
187 | ||
188 | /* NOTE: on arm, putting 2 in /proc/sys/debug/alignment so that the | |
189 | kernel handles unaligned load/stores may give better results, but | |
190 | it is a system wide setting : bad */ | |
191 | #if !defined(TARGET_WORDS_BIGENDIAN) && (defined(WORDS_BIGENDIAN) || defined(WORDS_ALIGNED)) | |
192 | ||
193 | /* conservative code for little endian unaligned accesses */ | |
194 | static inline int lduw_p(void *ptr) | |
195 | { | |
196 | #ifdef __powerpc__ | |
197 | int val; | |
198 | __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (ptr)); | |
199 | return val; | |
200 | #else | |
201 | uint8_t *p = ptr; | |
202 | return p[0] | (p[1] << 8); | |
203 | #endif | |
204 | } | |
205 | ||
206 | static inline int ldsw_p(void *ptr) | |
207 | { | |
208 | #ifdef __powerpc__ | |
209 | int val; | |
210 | __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (ptr)); | |
211 | return (int16_t)val; | |
212 | #else | |
213 | uint8_t *p = ptr; | |
214 | return (int16_t)(p[0] | (p[1] << 8)); | |
215 | #endif | |
216 | } | |
217 | ||
218 | static inline int ldl_p(void *ptr) | |
219 | { | |
220 | #ifdef __powerpc__ | |
221 | int val; | |
222 | __asm__ __volatile__ ("lwbrx %0,0,%1" : "=r" (val) : "r" (ptr)); | |
223 | return val; | |
224 | #else | |
225 | uint8_t *p = ptr; | |
226 | return p[0] | (p[1] << 8) | (p[2] << 16) | (p[3] << 24); | |
227 | #endif | |
228 | } | |
229 | ||
230 | static inline uint64_t ldq_p(void *ptr) | |
231 | { | |
232 | uint8_t *p = ptr; | |
233 | uint32_t v1, v2; | |
234 | v1 = ldl_p(p); | |
235 | v2 = ldl_p(p + 4); | |
236 | return v1 | ((uint64_t)v2 << 32); | |
237 | } | |
238 | ||
239 | static inline void stw_p(void *ptr, int v) | |
240 | { | |
241 | #ifdef __powerpc__ | |
242 | __asm__ __volatile__ ("sthbrx %1,0,%2" : "=m" (*(uint16_t *)ptr) : "r" (v), "r" (ptr)); | |
243 | #else | |
244 | uint8_t *p = ptr; | |
245 | p[0] = v; | |
246 | p[1] = v >> 8; | |
247 | #endif | |
248 | } | |
249 | ||
250 | static inline void stl_p(void *ptr, int v) | |
251 | { | |
252 | #ifdef __powerpc__ | |
253 | __asm__ __volatile__ ("stwbrx %1,0,%2" : "=m" (*(uint32_t *)ptr) : "r" (v), "r" (ptr)); | |
254 | #else | |
255 | uint8_t *p = ptr; | |
256 | p[0] = v; | |
257 | p[1] = v >> 8; | |
258 | p[2] = v >> 16; | |
259 | p[3] = v >> 24; | |
260 | #endif | |
261 | } | |
262 | ||
263 | static inline void stq_p(void *ptr, uint64_t v) | |
264 | { | |
265 | uint8_t *p = ptr; | |
266 | stl_p(p, (uint32_t)v); | |
267 | stl_p(p + 4, v >> 32); | |
268 | } | |
269 | ||
270 | /* float access */ | |
271 | ||
272 | static inline float32 ldfl_p(void *ptr) | |
273 | { | |
274 | union { | |
275 | float32 f; | |
276 | uint32_t i; | |
277 | } u; | |
278 | u.i = ldl_p(ptr); | |
279 | return u.f; | |
280 | } | |
281 | ||
282 | static inline void stfl_p(void *ptr, float32 v) | |
283 | { | |
284 | union { | |
285 | float32 f; | |
286 | uint32_t i; | |
287 | } u; | |
288 | u.f = v; | |
289 | stl_p(ptr, u.i); | |
290 | } | |
291 | ||
292 | static inline float64 ldfq_p(void *ptr) | |
293 | { | |
294 | CPU_DoubleU u; | |
295 | u.l.lower = ldl_p(ptr); | |
296 | u.l.upper = ldl_p(ptr + 4); | |
297 | return u.d; | |
298 | } | |
299 | ||
300 | static inline void stfq_p(void *ptr, float64 v) | |
301 | { | |
302 | CPU_DoubleU u; | |
303 | u.d = v; | |
304 | stl_p(ptr, u.l.lower); | |
305 | stl_p(ptr + 4, u.l.upper); | |
306 | } | |
307 | ||
308 | #elif defined(TARGET_WORDS_BIGENDIAN) && (!defined(WORDS_BIGENDIAN) || defined(WORDS_ALIGNED)) | |
309 | ||
310 | static inline int lduw_p(void *ptr) | |
311 | { | |
312 | #if defined(__i386__) | |
313 | int val; | |
314 | asm volatile ("movzwl %1, %0\n" | |
315 | "xchgb %b0, %h0\n" | |
316 | : "=q" (val) | |
317 | : "m" (*(uint16_t *)ptr)); | |
318 | return val; | |
319 | #else | |
320 | uint8_t *b = (uint8_t *) ptr; | |
321 | return ((b[0] << 8) | b[1]); | |
322 | #endif | |
323 | } | |
324 | ||
325 | static inline int ldsw_p(void *ptr) | |
326 | { | |
327 | #if defined(__i386__) | |
328 | int val; | |
329 | asm volatile ("movzwl %1, %0\n" | |
330 | "xchgb %b0, %h0\n" | |
331 | : "=q" (val) | |
332 | : "m" (*(uint16_t *)ptr)); | |
333 | return (int16_t)val; | |
334 | #else | |
335 | uint8_t *b = (uint8_t *) ptr; | |
336 | return (int16_t)((b[0] << 8) | b[1]); | |
337 | #endif | |
338 | } | |
339 | ||
340 | static inline int ldl_p(void *ptr) | |
341 | { | |
342 | #if defined(__i386__) || defined(__x86_64__) | |
343 | int val; | |
344 | asm volatile ("movl %1, %0\n" | |
345 | "bswap %0\n" | |
346 | : "=r" (val) | |
347 | : "m" (*(uint32_t *)ptr)); | |
348 | return val; | |
349 | #else | |
350 | uint8_t *b = (uint8_t *) ptr; | |
351 | return (b[0] << 24) | (b[1] << 16) | (b[2] << 8) | b[3]; | |
352 | #endif | |
353 | } | |
354 | ||
355 | static inline uint64_t ldq_p(void *ptr) | |
356 | { | |
357 | uint32_t a,b; | |
358 | a = ldl_p(ptr); | |
359 | b = ldl_p(ptr+4); | |
360 | return (((uint64_t)a<<32)|b); | |
361 | } | |
362 | ||
363 | static inline void stw_p(void *ptr, int v) | |
364 | { | |
365 | #if defined(__i386__) | |
366 | asm volatile ("xchgb %b0, %h0\n" | |
367 | "movw %w0, %1\n" | |
368 | : "=q" (v) | |
369 | : "m" (*(uint16_t *)ptr), "0" (v)); | |
370 | #else | |
371 | uint8_t *d = (uint8_t *) ptr; | |
372 | d[0] = v >> 8; | |
373 | d[1] = v; | |
374 | #endif | |
375 | } | |
376 | ||
377 | static inline void stl_p(void *ptr, int v) | |
378 | { | |
379 | #if defined(__i386__) || defined(__x86_64__) | |
380 | asm volatile ("bswap %0\n" | |
381 | "movl %0, %1\n" | |
382 | : "=r" (v) | |
383 | : "m" (*(uint32_t *)ptr), "0" (v)); | |
384 | #else | |
385 | uint8_t *d = (uint8_t *) ptr; | |
386 | d[0] = v >> 24; | |
387 | d[1] = v >> 16; | |
388 | d[2] = v >> 8; | |
389 | d[3] = v; | |
390 | #endif | |
391 | } | |
392 | ||
393 | static inline void stq_p(void *ptr, uint64_t v) | |
394 | { | |
395 | stl_p(ptr, v >> 32); | |
396 | stl_p(ptr + 4, v); | |
397 | } | |
398 | ||
399 | /* float access */ | |
400 | ||
401 | static inline float32 ldfl_p(void *ptr) | |
402 | { | |
403 | union { | |
404 | float32 f; | |
405 | uint32_t i; | |
406 | } u; | |
407 | u.i = ldl_p(ptr); | |
408 | return u.f; | |
409 | } | |
410 | ||
411 | static inline void stfl_p(void *ptr, float32 v) | |
412 | { | |
413 | union { | |
414 | float32 f; | |
415 | uint32_t i; | |
416 | } u; | |
417 | u.f = v; | |
418 | stl_p(ptr, u.i); | |
419 | } | |
420 | ||
421 | static inline float64 ldfq_p(void *ptr) | |
422 | { | |
423 | CPU_DoubleU u; | |
424 | u.l.upper = ldl_p(ptr); | |
425 | u.l.lower = ldl_p(ptr + 4); | |
426 | return u.d; | |
427 | } | |
428 | ||
429 | static inline void stfq_p(void *ptr, float64 v) | |
430 | { | |
431 | CPU_DoubleU u; | |
432 | u.d = v; | |
433 | stl_p(ptr, u.l.upper); | |
434 | stl_p(ptr + 4, u.l.lower); | |
435 | } | |
436 | ||
437 | #else | |
438 | ||
439 | static inline int lduw_p(void *ptr) | |
440 | { | |
441 | return *(uint16_t *)ptr; | |
442 | } | |
443 | ||
444 | static inline int ldsw_p(void *ptr) | |
445 | { | |
446 | return *(int16_t *)ptr; | |
447 | } | |
448 | ||
449 | static inline int ldl_p(void *ptr) | |
450 | { | |
451 | return *(uint32_t *)ptr; | |
452 | } | |
453 | ||
454 | static inline uint64_t ldq_p(void *ptr) | |
455 | { | |
456 | return *(uint64_t *)ptr; | |
457 | } | |
458 | ||
459 | static inline void stw_p(void *ptr, int v) | |
460 | { | |
461 | *(uint16_t *)ptr = v; | |
462 | } | |
463 | ||
464 | static inline void stl_p(void *ptr, int v) | |
465 | { | |
466 | *(uint32_t *)ptr = v; | |
467 | } | |
468 | ||
469 | static inline void stq_p(void *ptr, uint64_t v) | |
470 | { | |
471 | *(uint64_t *)ptr = v; | |
472 | } | |
473 | ||
474 | /* float access */ | |
475 | ||
476 | static inline float32 ldfl_p(void *ptr) | |
477 | { | |
478 | return *(float32 *)ptr; | |
479 | } | |
480 | ||
481 | static inline float64 ldfq_p(void *ptr) | |
482 | { | |
483 | return *(float64 *)ptr; | |
484 | } | |
485 | ||
486 | static inline void stfl_p(void *ptr, float32 v) | |
487 | { | |
488 | *(float32 *)ptr = v; | |
489 | } | |
490 | ||
491 | static inline void stfq_p(void *ptr, float64 v) | |
492 | { | |
493 | *(float64 *)ptr = v; | |
494 | } | |
495 | #endif | |
496 | ||
497 | /* MMU memory access macros */ | |
498 | ||
499 | /* NOTE: we use double casts if pointers and target_ulong have | |
500 | different sizes */ | |
501 | #define ldub_raw(p) ldub_p((uint8_t *)(long)(p)) | |
502 | #define ldsb_raw(p) ldsb_p((uint8_t *)(long)(p)) | |
503 | #define lduw_raw(p) lduw_p((uint8_t *)(long)(p)) | |
504 | #define ldsw_raw(p) ldsw_p((uint8_t *)(long)(p)) | |
505 | #define ldl_raw(p) ldl_p((uint8_t *)(long)(p)) | |
506 | #define ldq_raw(p) ldq_p((uint8_t *)(long)(p)) | |
507 | #define ldfl_raw(p) ldfl_p((uint8_t *)(long)(p)) | |
508 | #define ldfq_raw(p) ldfq_p((uint8_t *)(long)(p)) | |
509 | #define stb_raw(p, v) stb_p((uint8_t *)(long)(p), v) | |
510 | #define stw_raw(p, v) stw_p((uint8_t *)(long)(p), v) | |
511 | #define stl_raw(p, v) stl_p((uint8_t *)(long)(p), v) | |
512 | #define stq_raw(p, v) stq_p((uint8_t *)(long)(p), v) | |
513 | #define stfl_raw(p, v) stfl_p((uint8_t *)(long)(p), v) | |
514 | #define stfq_raw(p, v) stfq_p((uint8_t *)(long)(p), v) | |
515 | ||
516 | ||
517 | #if defined(CONFIG_USER_ONLY) | |
518 | ||
519 | /* if user mode, no other memory access functions */ | |
520 | #define ldub(p) ldub_raw(p) | |
521 | #define ldsb(p) ldsb_raw(p) | |
522 | #define lduw(p) lduw_raw(p) | |
523 | #define ldsw(p) ldsw_raw(p) | |
524 | #define ldl(p) ldl_raw(p) | |
525 | #define ldq(p) ldq_raw(p) | |
526 | #define ldfl(p) ldfl_raw(p) | |
527 | #define ldfq(p) ldfq_raw(p) | |
528 | #define stb(p, v) stb_raw(p, v) | |
529 | #define stw(p, v) stw_raw(p, v) | |
530 | #define stl(p, v) stl_raw(p, v) | |
531 | #define stq(p, v) stq_raw(p, v) | |
532 | #define stfl(p, v) stfl_raw(p, v) | |
533 | #define stfq(p, v) stfq_raw(p, v) | |
534 | ||
535 | #define ldub_code(p) ldub_raw(p) | |
536 | #define ldsb_code(p) ldsb_raw(p) | |
537 | #define lduw_code(p) lduw_raw(p) | |
538 | #define ldsw_code(p) ldsw_raw(p) | |
539 | #define ldl_code(p) ldl_raw(p) | |
540 | ||
541 | #define ldub_kernel(p) ldub_raw(p) | |
542 | #define ldsb_kernel(p) ldsb_raw(p) | |
543 | #define lduw_kernel(p) lduw_raw(p) | |
544 | #define ldsw_kernel(p) ldsw_raw(p) | |
545 | #define ldl_kernel(p) ldl_raw(p) | |
546 | #define ldfl_kernel(p) ldfl_raw(p) | |
547 | #define ldfq_kernel(p) ldfq_raw(p) | |
548 | #define stb_kernel(p, v) stb_raw(p, v) | |
549 | #define stw_kernel(p, v) stw_raw(p, v) | |
550 | #define stl_kernel(p, v) stl_raw(p, v) | |
551 | #define stq_kernel(p, v) stq_raw(p, v) | |
552 | #define stfl_kernel(p, v) stfl_raw(p, v) | |
553 | #define stfq_kernel(p, vt) stfq_raw(p, v) | |
554 | ||
555 | #endif /* defined(CONFIG_USER_ONLY) */ | |
556 | ||
557 | /* page related stuff */ | |
558 | ||
559 | #define TARGET_PAGE_SIZE (1 << TARGET_PAGE_BITS) | |
560 | #define TARGET_PAGE_MASK ~(TARGET_PAGE_SIZE - 1) | |
561 | #define TARGET_PAGE_ALIGN(addr) (((addr) + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK) | |
562 | ||
563 | extern unsigned long qemu_real_host_page_size; | |
564 | extern unsigned long qemu_host_page_bits; | |
565 | extern unsigned long qemu_host_page_size; | |
566 | extern unsigned long qemu_host_page_mask; | |
567 | ||
568 | #define HOST_PAGE_ALIGN(addr) (((addr) + qemu_host_page_size - 1) & qemu_host_page_mask) | |
569 | ||
570 | /* same as PROT_xxx */ | |
571 | #define PAGE_READ 0x0001 | |
572 | #define PAGE_WRITE 0x0002 | |
573 | #define PAGE_EXEC 0x0004 | |
574 | #define PAGE_BITS (PAGE_READ | PAGE_WRITE | PAGE_EXEC) | |
575 | #define PAGE_VALID 0x0008 | |
576 | /* original state of the write flag (used when tracking self-modifying | |
577 | code */ | |
578 | #define PAGE_WRITE_ORG 0x0010 | |
579 | ||
580 | void page_dump(FILE *f); | |
581 | int page_get_flags(unsigned long address); | |
582 | void page_set_flags(unsigned long start, unsigned long end, int flags); | |
583 | void page_unprotect_range(uint8_t *data, unsigned long data_size); | |
584 | ||
585 | #define SINGLE_CPU_DEFINES | |
586 | #ifdef SINGLE_CPU_DEFINES | |
587 | ||
588 | #if defined(TARGET_I386) | |
589 | ||
590 | #define CPUState CPUX86State | |
591 | #define cpu_init cpu_x86_init | |
592 | #define cpu_exec cpu_x86_exec | |
593 | #define cpu_gen_code cpu_x86_gen_code | |
594 | #define cpu_signal_handler cpu_x86_signal_handler | |
595 | ||
596 | #elif defined(TARGET_ARM) | |
597 | ||
598 | #define CPUState CPUARMState | |
599 | #define cpu_init cpu_arm_init | |
600 | #define cpu_exec cpu_arm_exec | |
601 | #define cpu_gen_code cpu_arm_gen_code | |
602 | #define cpu_signal_handler cpu_arm_signal_handler | |
603 | ||
604 | #elif defined(TARGET_SPARC) | |
605 | ||
606 | #define CPUState CPUSPARCState | |
607 | #define cpu_init cpu_sparc_init | |
608 | #define cpu_exec cpu_sparc_exec | |
609 | #define cpu_gen_code cpu_sparc_gen_code | |
610 | #define cpu_signal_handler cpu_sparc_signal_handler | |
611 | ||
612 | #elif defined(TARGET_PPC) | |
613 | ||
614 | #define CPUState CPUPPCState | |
615 | #define cpu_init cpu_ppc_init | |
616 | #define cpu_exec cpu_ppc_exec | |
617 | #define cpu_gen_code cpu_ppc_gen_code | |
618 | #define cpu_signal_handler cpu_ppc_signal_handler | |
619 | ||
620 | #elif defined(TARGET_MIPS) | |
621 | #define CPUState CPUMIPSState | |
622 | #define cpu_init cpu_mips_init | |
623 | #define cpu_exec cpu_mips_exec | |
624 | #define cpu_gen_code cpu_mips_gen_code | |
625 | #define cpu_signal_handler cpu_mips_signal_handler | |
626 | ||
627 | #else | |
628 | ||
629 | #error unsupported target CPU | |
630 | ||
631 | #endif | |
632 | ||
633 | #endif /* SINGLE_CPU_DEFINES */ | |
634 | ||
635 | void cpu_dump_state(CPUState *env, FILE *f, | |
636 | int (*cpu_fprintf)(FILE *f, const char *fmt, ...), | |
637 | int flags); | |
638 | ||
639 | void cpu_abort(CPUState *env, const char *fmt, ...); | |
640 | extern CPUState *cpu_single_env; | |
641 | extern int code_copy_enabled; | |
642 | ||
643 | #define CPU_INTERRUPT_EXIT 0x01 /* wants exit from main loop */ | |
644 | #define CPU_INTERRUPT_HARD 0x02 /* hardware interrupt pending */ | |
645 | #define CPU_INTERRUPT_EXITTB 0x04 /* exit the current TB (use for x86 a20 case) */ | |
646 | #define CPU_INTERRUPT_TIMER 0x08 /* internal timer exception pending */ | |
647 | void cpu_interrupt(CPUState *s, int mask); | |
648 | void cpu_reset_interrupt(CPUState *env, int mask); | |
649 | ||
650 | int cpu_breakpoint_insert(CPUState *env, target_ulong pc); | |
651 | int cpu_breakpoint_remove(CPUState *env, target_ulong pc); | |
652 | void cpu_single_step(CPUState *env, int enabled); | |
653 | void cpu_reset(CPUState *s); | |
654 | ||
655 | /* Return the physical page corresponding to a virtual one. Use it | |
656 | only for debugging because no protection checks are done. Return -1 | |
657 | if no page found. */ | |
658 | target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr); | |
659 | ||
660 | #define CPU_LOG_TB_OUT_ASM (1 << 0) | |
661 | #define CPU_LOG_TB_IN_ASM (1 << 1) | |
662 | #define CPU_LOG_TB_OP (1 << 2) | |
663 | #define CPU_LOG_TB_OP_OPT (1 << 3) | |
664 | #define CPU_LOG_INT (1 << 4) | |
665 | #define CPU_LOG_EXEC (1 << 5) | |
666 | #define CPU_LOG_PCALL (1 << 6) | |
667 | #define CPU_LOG_IOPORT (1 << 7) | |
668 | #define CPU_LOG_TB_CPU (1 << 8) | |
669 | ||
670 | /* define log items */ | |
671 | typedef struct CPULogItem { | |
672 | int mask; | |
673 | const char *name; | |
674 | const char *help; | |
675 | } CPULogItem; | |
676 | ||
677 | extern CPULogItem cpu_log_items[]; | |
678 | ||
679 | void cpu_set_log(int log_flags); | |
680 | void cpu_set_log_filename(const char *filename); | |
681 | int cpu_str_to_log_mask(const char *str); | |
682 | ||
683 | /* IO ports API */ | |
684 | ||
685 | /* NOTE: as these functions may be even used when there is an isa | |
686 | brige on non x86 targets, we always defined them */ | |
687 | #ifndef NO_CPU_IO_DEFS | |
688 | void cpu_outb(CPUState *env, int addr, int val); | |
689 | void cpu_outw(CPUState *env, int addr, int val); | |
690 | void cpu_outl(CPUState *env, int addr, int val); | |
691 | int cpu_inb(CPUState *env, int addr); | |
692 | int cpu_inw(CPUState *env, int addr); | |
693 | int cpu_inl(CPUState *env, int addr); | |
694 | #endif | |
695 | ||
696 | /* memory API */ | |
697 | ||
698 | extern int phys_ram_size; | |
699 | extern int phys_ram_fd; | |
700 | extern uint8_t *phys_ram_base; | |
701 | extern uint8_t *phys_ram_dirty; | |
702 | ||
703 | /* physical memory access */ | |
704 | #define IO_MEM_NB_ENTRIES 256 | |
705 | #define TLB_INVALID_MASK (1 << 3) | |
706 | #define IO_MEM_SHIFT 4 | |
707 | ||
708 | #define IO_MEM_RAM (0 << IO_MEM_SHIFT) /* hardcoded offset */ | |
709 | #define IO_MEM_ROM (1 << IO_MEM_SHIFT) /* hardcoded offset */ | |
710 | #define IO_MEM_UNASSIGNED (2 << IO_MEM_SHIFT) | |
711 | #define IO_MEM_NOTDIRTY (4 << IO_MEM_SHIFT) /* used internally, never use directly */ | |
712 | ||
713 | typedef void CPUWriteMemoryFunc(void *opaque, target_phys_addr_t addr, uint32_t value); | |
714 | typedef uint32_t CPUReadMemoryFunc(void *opaque, target_phys_addr_t addr); | |
715 | ||
716 | void cpu_register_physical_memory(target_phys_addr_t start_addr, | |
717 | unsigned long size, | |
718 | unsigned long phys_offset); | |
719 | int cpu_register_io_memory(int io_index, | |
720 | CPUReadMemoryFunc **mem_read, | |
721 | CPUWriteMemoryFunc **mem_write, | |
722 | void *opaque); | |
723 | CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index); | |
724 | CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index); | |
725 | ||
726 | void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, | |
727 | int len, int is_write); | |
728 | static inline void cpu_physical_memory_read(target_phys_addr_t addr, | |
729 | uint8_t *buf, int len) | |
730 | { | |
731 | cpu_physical_memory_rw(addr, buf, len, 0); | |
732 | } | |
733 | static inline void cpu_physical_memory_write(target_phys_addr_t addr, | |
734 | const uint8_t *buf, int len) | |
735 | { | |
736 | cpu_physical_memory_rw(addr, (uint8_t *)buf, len, 1); | |
737 | } | |
738 | uint32_t ldub_phys(target_phys_addr_t addr); | |
739 | uint32_t lduw_phys(target_phys_addr_t addr); | |
740 | uint32_t ldl_phys(target_phys_addr_t addr); | |
741 | uint64_t ldq_phys(target_phys_addr_t addr); | |
742 | void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val); | |
743 | void stb_phys(target_phys_addr_t addr, uint32_t val); | |
744 | void stw_phys(target_phys_addr_t addr, uint32_t val); | |
745 | void stl_phys(target_phys_addr_t addr, uint32_t val); | |
746 | void stq_phys(target_phys_addr_t addr, uint64_t val); | |
747 | ||
748 | int cpu_memory_rw_debug(CPUState *env, target_ulong addr, | |
749 | uint8_t *buf, int len, int is_write); | |
750 | ||
751 | #define VGA_DIRTY_FLAG 0x01 | |
752 | #define CODE_DIRTY_FLAG 0x02 | |
753 | ||
754 | /* read dirty bit (return 0 or 1) */ | |
755 | static inline int cpu_physical_memory_is_dirty(ram_addr_t addr) | |
756 | { | |
757 | return phys_ram_dirty[addr >> TARGET_PAGE_BITS] == 0xff; | |
758 | } | |
759 | ||
760 | static inline int cpu_physical_memory_get_dirty(ram_addr_t addr, | |
761 | int dirty_flags) | |
762 | { | |
763 | return phys_ram_dirty[addr >> TARGET_PAGE_BITS] & dirty_flags; | |
764 | } | |
765 | ||
766 | static inline void cpu_physical_memory_set_dirty(ram_addr_t addr) | |
767 | { | |
768 | phys_ram_dirty[addr >> TARGET_PAGE_BITS] = 0xff; | |
769 | } | |
770 | ||
771 | void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end, | |
772 | int dirty_flags); | |
773 | void cpu_tlb_update_dirty(CPUState *env); | |
774 | ||
775 | void dump_exec_info(FILE *f, | |
776 | int (*cpu_fprintf)(FILE *f, const char *fmt, ...)); | |
777 | ||
778 | #endif /* CPU_ALL_H */ |