]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * defines common to all virtual CPUs | |
3 | * | |
4 | * Copyright (c) 2003 Fabrice Bellard | |
5 | * | |
6 | * This library is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU Lesser General Public | |
8 | * License as published by the Free Software Foundation; either | |
9 | * version 2 of the License, or (at your option) any later version. | |
10 | * | |
11 | * This library is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | * Lesser General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU Lesser General Public | |
17 | * License along with this library; if not, write to the Free Software | |
18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
19 | */ | |
20 | #ifndef CPU_ALL_H | |
21 | #define CPU_ALL_H | |
22 | ||
23 | #if defined(__arm__) || defined(__sparc__) || defined(__mips__) || defined(__hppa__) | |
24 | #define WORDS_ALIGNED | |
25 | #endif | |
26 | ||
27 | /* some important defines: | |
28 | * | |
29 | * WORDS_ALIGNED : if defined, the host cpu can only make word aligned | |
30 | * memory accesses. | |
31 | * | |
32 | * WORDS_BIGENDIAN : if defined, the host cpu is big endian and | |
33 | * otherwise little endian. | |
34 | * | |
35 | * (TARGET_WORDS_ALIGNED : same for target cpu (not supported yet)) | |
36 | * | |
37 | * TARGET_WORDS_BIGENDIAN : same for target cpu | |
38 | */ | |
39 | ||
40 | #include "bswap.h" | |
41 | #include "softfloat.h" | |
42 | ||
43 | #if defined(WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) | |
44 | #define BSWAP_NEEDED | |
45 | #endif | |
46 | ||
47 | #ifdef BSWAP_NEEDED | |
48 | ||
49 | static inline uint16_t tswap16(uint16_t s) | |
50 | { | |
51 | return bswap16(s); | |
52 | } | |
53 | ||
54 | static inline uint32_t tswap32(uint32_t s) | |
55 | { | |
56 | return bswap32(s); | |
57 | } | |
58 | ||
59 | static inline uint64_t tswap64(uint64_t s) | |
60 | { | |
61 | return bswap64(s); | |
62 | } | |
63 | ||
64 | static inline void tswap16s(uint16_t *s) | |
65 | { | |
66 | *s = bswap16(*s); | |
67 | } | |
68 | ||
69 | static inline void tswap32s(uint32_t *s) | |
70 | { | |
71 | *s = bswap32(*s); | |
72 | } | |
73 | ||
74 | static inline void tswap64s(uint64_t *s) | |
75 | { | |
76 | *s = bswap64(*s); | |
77 | } | |
78 | ||
79 | #else | |
80 | ||
81 | static inline uint16_t tswap16(uint16_t s) | |
82 | { | |
83 | return s; | |
84 | } | |
85 | ||
86 | static inline uint32_t tswap32(uint32_t s) | |
87 | { | |
88 | return s; | |
89 | } | |
90 | ||
91 | static inline uint64_t tswap64(uint64_t s) | |
92 | { | |
93 | return s; | |
94 | } | |
95 | ||
96 | static inline void tswap16s(uint16_t *s) | |
97 | { | |
98 | } | |
99 | ||
100 | static inline void tswap32s(uint32_t *s) | |
101 | { | |
102 | } | |
103 | ||
104 | static inline void tswap64s(uint64_t *s) | |
105 | { | |
106 | } | |
107 | ||
108 | #endif | |
109 | ||
110 | #if TARGET_LONG_SIZE == 4 | |
111 | #define tswapl(s) tswap32(s) | |
112 | #define tswapls(s) tswap32s((uint32_t *)(s)) | |
113 | #define bswaptls(s) bswap32s(s) | |
114 | #else | |
115 | #define tswapl(s) tswap64(s) | |
116 | #define tswapls(s) tswap64s((uint64_t *)(s)) | |
117 | #define bswaptls(s) bswap64s(s) | |
118 | #endif | |
119 | ||
120 | typedef union { | |
121 | float32 f; | |
122 | uint32_t l; | |
123 | } CPU_FloatU; | |
124 | ||
125 | /* NOTE: arm FPA is horrible as double 32 bit words are stored in big | |
126 | endian ! */ | |
127 | typedef union { | |
128 | float64 d; | |
129 | #if defined(WORDS_BIGENDIAN) \ | |
130 | || (defined(__arm__) && !defined(__VFP_FP__) && !defined(CONFIG_SOFTFLOAT)) | |
131 | struct { | |
132 | uint32_t upper; | |
133 | uint32_t lower; | |
134 | } l; | |
135 | #else | |
136 | struct { | |
137 | uint32_t lower; | |
138 | uint32_t upper; | |
139 | } l; | |
140 | #endif | |
141 | uint64_t ll; | |
142 | } CPU_DoubleU; | |
143 | ||
144 | #ifdef TARGET_SPARC | |
145 | typedef union { | |
146 | float128 q; | |
147 | #if defined(WORDS_BIGENDIAN) \ | |
148 | || (defined(__arm__) && !defined(__VFP_FP__) && !defined(CONFIG_SOFTFLOAT)) | |
149 | struct { | |
150 | uint32_t upmost; | |
151 | uint32_t upper; | |
152 | uint32_t lower; | |
153 | uint32_t lowest; | |
154 | } l; | |
155 | struct { | |
156 | uint64_t upper; | |
157 | uint64_t lower; | |
158 | } ll; | |
159 | #else | |
160 | struct { | |
161 | uint32_t lowest; | |
162 | uint32_t lower; | |
163 | uint32_t upper; | |
164 | uint32_t upmost; | |
165 | } l; | |
166 | struct { | |
167 | uint64_t lower; | |
168 | uint64_t upper; | |
169 | } ll; | |
170 | #endif | |
171 | } CPU_QuadU; | |
172 | #endif | |
173 | ||
174 | /* CPU memory access without any memory or io remapping */ | |
175 | ||
176 | /* | |
177 | * the generic syntax for the memory accesses is: | |
178 | * | |
179 | * load: ld{type}{sign}{size}{endian}_{access_type}(ptr) | |
180 | * | |
181 | * store: st{type}{size}{endian}_{access_type}(ptr, val) | |
182 | * | |
183 | * type is: | |
184 | * (empty): integer access | |
185 | * f : float access | |
186 | * | |
187 | * sign is: | |
188 | * (empty): for floats or 32 bit size | |
189 | * u : unsigned | |
190 | * s : signed | |
191 | * | |
192 | * size is: | |
193 | * b: 8 bits | |
194 | * w: 16 bits | |
195 | * l: 32 bits | |
196 | * q: 64 bits | |
197 | * | |
198 | * endian is: | |
199 | * (empty): target cpu endianness or 8 bit access | |
200 | * r : reversed target cpu endianness (not implemented yet) | |
201 | * be : big endian (not implemented yet) | |
202 | * le : little endian (not implemented yet) | |
203 | * | |
204 | * access_type is: | |
205 | * raw : host memory access | |
206 | * user : user mode access using soft MMU | |
207 | * kernel : kernel mode access using soft MMU | |
208 | */ | |
209 | static inline int ldub_p(void *ptr) | |
210 | { | |
211 | return *(uint8_t *)ptr; | |
212 | } | |
213 | ||
214 | static inline int ldsb_p(void *ptr) | |
215 | { | |
216 | return *(int8_t *)ptr; | |
217 | } | |
218 | ||
219 | static inline void stb_p(void *ptr, int v) | |
220 | { | |
221 | *(uint8_t *)ptr = v; | |
222 | } | |
223 | ||
224 | /* NOTE: on arm, putting 2 in /proc/sys/debug/alignment so that the | |
225 | kernel handles unaligned load/stores may give better results, but | |
226 | it is a system wide setting : bad */ | |
227 | #if defined(WORDS_BIGENDIAN) || defined(WORDS_ALIGNED) | |
228 | ||
229 | /* conservative code for little endian unaligned accesses */ | |
230 | static inline int lduw_le_p(void *ptr) | |
231 | { | |
232 | #ifdef __powerpc__ | |
233 | int val; | |
234 | __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (ptr)); | |
235 | return val; | |
236 | #else | |
237 | uint8_t *p = ptr; | |
238 | return p[0] | (p[1] << 8); | |
239 | #endif | |
240 | } | |
241 | ||
242 | static inline int ldsw_le_p(void *ptr) | |
243 | { | |
244 | #ifdef __powerpc__ | |
245 | int val; | |
246 | __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (ptr)); | |
247 | return (int16_t)val; | |
248 | #else | |
249 | uint8_t *p = ptr; | |
250 | return (int16_t)(p[0] | (p[1] << 8)); | |
251 | #endif | |
252 | } | |
253 | ||
254 | static inline int ldl_le_p(void *ptr) | |
255 | { | |
256 | #ifdef __powerpc__ | |
257 | int val; | |
258 | __asm__ __volatile__ ("lwbrx %0,0,%1" : "=r" (val) : "r" (ptr)); | |
259 | return val; | |
260 | #else | |
261 | uint8_t *p = ptr; | |
262 | return p[0] | (p[1] << 8) | (p[2] << 16) | (p[3] << 24); | |
263 | #endif | |
264 | } | |
265 | ||
266 | static inline uint64_t ldq_le_p(void *ptr) | |
267 | { | |
268 | uint8_t *p = ptr; | |
269 | uint32_t v1, v2; | |
270 | v1 = ldl_le_p(p); | |
271 | v2 = ldl_le_p(p + 4); | |
272 | return v1 | ((uint64_t)v2 << 32); | |
273 | } | |
274 | ||
275 | static inline void stw_le_p(void *ptr, int v) | |
276 | { | |
277 | #ifdef __powerpc__ | |
278 | __asm__ __volatile__ ("sthbrx %1,0,%2" : "=m" (*(uint16_t *)ptr) : "r" (v), "r" (ptr)); | |
279 | #else | |
280 | uint8_t *p = ptr; | |
281 | p[0] = v; | |
282 | p[1] = v >> 8; | |
283 | #endif | |
284 | } | |
285 | ||
286 | static inline void stl_le_p(void *ptr, int v) | |
287 | { | |
288 | #ifdef __powerpc__ | |
289 | __asm__ __volatile__ ("stwbrx %1,0,%2" : "=m" (*(uint32_t *)ptr) : "r" (v), "r" (ptr)); | |
290 | #else | |
291 | uint8_t *p = ptr; | |
292 | p[0] = v; | |
293 | p[1] = v >> 8; | |
294 | p[2] = v >> 16; | |
295 | p[3] = v >> 24; | |
296 | #endif | |
297 | } | |
298 | ||
299 | static inline void stq_le_p(void *ptr, uint64_t v) | |
300 | { | |
301 | uint8_t *p = ptr; | |
302 | stl_le_p(p, (uint32_t)v); | |
303 | stl_le_p(p + 4, v >> 32); | |
304 | } | |
305 | ||
306 | /* float access */ | |
307 | ||
308 | static inline float32 ldfl_le_p(void *ptr) | |
309 | { | |
310 | union { | |
311 | float32 f; | |
312 | uint32_t i; | |
313 | } u; | |
314 | u.i = ldl_le_p(ptr); | |
315 | return u.f; | |
316 | } | |
317 | ||
318 | static inline void stfl_le_p(void *ptr, float32 v) | |
319 | { | |
320 | union { | |
321 | float32 f; | |
322 | uint32_t i; | |
323 | } u; | |
324 | u.f = v; | |
325 | stl_le_p(ptr, u.i); | |
326 | } | |
327 | ||
328 | static inline float64 ldfq_le_p(void *ptr) | |
329 | { | |
330 | CPU_DoubleU u; | |
331 | u.l.lower = ldl_le_p(ptr); | |
332 | u.l.upper = ldl_le_p(ptr + 4); | |
333 | return u.d; | |
334 | } | |
335 | ||
336 | static inline void stfq_le_p(void *ptr, float64 v) | |
337 | { | |
338 | CPU_DoubleU u; | |
339 | u.d = v; | |
340 | stl_le_p(ptr, u.l.lower); | |
341 | stl_le_p(ptr + 4, u.l.upper); | |
342 | } | |
343 | ||
344 | #else | |
345 | ||
346 | static inline int lduw_le_p(void *ptr) | |
347 | { | |
348 | return *(uint16_t *)ptr; | |
349 | } | |
350 | ||
351 | static inline int ldsw_le_p(void *ptr) | |
352 | { | |
353 | return *(int16_t *)ptr; | |
354 | } | |
355 | ||
356 | static inline int ldl_le_p(void *ptr) | |
357 | { | |
358 | return *(uint32_t *)ptr; | |
359 | } | |
360 | ||
361 | static inline uint64_t ldq_le_p(void *ptr) | |
362 | { | |
363 | return *(uint64_t *)ptr; | |
364 | } | |
365 | ||
366 | static inline void stw_le_p(void *ptr, int v) | |
367 | { | |
368 | *(uint16_t *)ptr = v; | |
369 | } | |
370 | ||
371 | static inline void stl_le_p(void *ptr, int v) | |
372 | { | |
373 | *(uint32_t *)ptr = v; | |
374 | } | |
375 | ||
376 | static inline void stq_le_p(void *ptr, uint64_t v) | |
377 | { | |
378 | *(uint64_t *)ptr = v; | |
379 | } | |
380 | ||
381 | /* float access */ | |
382 | ||
383 | static inline float32 ldfl_le_p(void *ptr) | |
384 | { | |
385 | return *(float32 *)ptr; | |
386 | } | |
387 | ||
388 | static inline float64 ldfq_le_p(void *ptr) | |
389 | { | |
390 | return *(float64 *)ptr; | |
391 | } | |
392 | ||
393 | static inline void stfl_le_p(void *ptr, float32 v) | |
394 | { | |
395 | *(float32 *)ptr = v; | |
396 | } | |
397 | ||
398 | static inline void stfq_le_p(void *ptr, float64 v) | |
399 | { | |
400 | *(float64 *)ptr = v; | |
401 | } | |
402 | #endif | |
403 | ||
404 | #if !defined(WORDS_BIGENDIAN) || defined(WORDS_ALIGNED) | |
405 | ||
406 | static inline int lduw_be_p(void *ptr) | |
407 | { | |
408 | #if defined(__i386__) | |
409 | int val; | |
410 | asm volatile ("movzwl %1, %0\n" | |
411 | "xchgb %b0, %h0\n" | |
412 | : "=q" (val) | |
413 | : "m" (*(uint16_t *)ptr)); | |
414 | return val; | |
415 | #else | |
416 | uint8_t *b = (uint8_t *) ptr; | |
417 | return ((b[0] << 8) | b[1]); | |
418 | #endif | |
419 | } | |
420 | ||
421 | static inline int ldsw_be_p(void *ptr) | |
422 | { | |
423 | #if defined(__i386__) | |
424 | int val; | |
425 | asm volatile ("movzwl %1, %0\n" | |
426 | "xchgb %b0, %h0\n" | |
427 | : "=q" (val) | |
428 | : "m" (*(uint16_t *)ptr)); | |
429 | return (int16_t)val; | |
430 | #else | |
431 | uint8_t *b = (uint8_t *) ptr; | |
432 | return (int16_t)((b[0] << 8) | b[1]); | |
433 | #endif | |
434 | } | |
435 | ||
436 | static inline int ldl_be_p(void *ptr) | |
437 | { | |
438 | #if defined(__i386__) || defined(__x86_64__) | |
439 | int val; | |
440 | asm volatile ("movl %1, %0\n" | |
441 | "bswap %0\n" | |
442 | : "=r" (val) | |
443 | : "m" (*(uint32_t *)ptr)); | |
444 | return val; | |
445 | #else | |
446 | uint8_t *b = (uint8_t *) ptr; | |
447 | return (b[0] << 24) | (b[1] << 16) | (b[2] << 8) | b[3]; | |
448 | #endif | |
449 | } | |
450 | ||
451 | static inline uint64_t ldq_be_p(void *ptr) | |
452 | { | |
453 | uint32_t a,b; | |
454 | a = ldl_be_p(ptr); | |
455 | b = ldl_be_p((uint8_t *)ptr + 4); | |
456 | return (((uint64_t)a<<32)|b); | |
457 | } | |
458 | ||
459 | static inline void stw_be_p(void *ptr, int v) | |
460 | { | |
461 | #if defined(__i386__) | |
462 | asm volatile ("xchgb %b0, %h0\n" | |
463 | "movw %w0, %1\n" | |
464 | : "=q" (v) | |
465 | : "m" (*(uint16_t *)ptr), "0" (v)); | |
466 | #else | |
467 | uint8_t *d = (uint8_t *) ptr; | |
468 | d[0] = v >> 8; | |
469 | d[1] = v; | |
470 | #endif | |
471 | } | |
472 | ||
473 | static inline void stl_be_p(void *ptr, int v) | |
474 | { | |
475 | #if defined(__i386__) || defined(__x86_64__) | |
476 | asm volatile ("bswap %0\n" | |
477 | "movl %0, %1\n" | |
478 | : "=r" (v) | |
479 | : "m" (*(uint32_t *)ptr), "0" (v)); | |
480 | #else | |
481 | uint8_t *d = (uint8_t *) ptr; | |
482 | d[0] = v >> 24; | |
483 | d[1] = v >> 16; | |
484 | d[2] = v >> 8; | |
485 | d[3] = v; | |
486 | #endif | |
487 | } | |
488 | ||
489 | static inline void stq_be_p(void *ptr, uint64_t v) | |
490 | { | |
491 | stl_be_p(ptr, v >> 32); | |
492 | stl_be_p((uint8_t *)ptr + 4, v); | |
493 | } | |
494 | ||
495 | /* float access */ | |
496 | ||
497 | static inline float32 ldfl_be_p(void *ptr) | |
498 | { | |
499 | union { | |
500 | float32 f; | |
501 | uint32_t i; | |
502 | } u; | |
503 | u.i = ldl_be_p(ptr); | |
504 | return u.f; | |
505 | } | |
506 | ||
507 | static inline void stfl_be_p(void *ptr, float32 v) | |
508 | { | |
509 | union { | |
510 | float32 f; | |
511 | uint32_t i; | |
512 | } u; | |
513 | u.f = v; | |
514 | stl_be_p(ptr, u.i); | |
515 | } | |
516 | ||
517 | static inline float64 ldfq_be_p(void *ptr) | |
518 | { | |
519 | CPU_DoubleU u; | |
520 | u.l.upper = ldl_be_p(ptr); | |
521 | u.l.lower = ldl_be_p((uint8_t *)ptr + 4); | |
522 | return u.d; | |
523 | } | |
524 | ||
525 | static inline void stfq_be_p(void *ptr, float64 v) | |
526 | { | |
527 | CPU_DoubleU u; | |
528 | u.d = v; | |
529 | stl_be_p(ptr, u.l.upper); | |
530 | stl_be_p((uint8_t *)ptr + 4, u.l.lower); | |
531 | } | |
532 | ||
533 | #else | |
534 | ||
535 | static inline int lduw_be_p(void *ptr) | |
536 | { | |
537 | return *(uint16_t *)ptr; | |
538 | } | |
539 | ||
540 | static inline int ldsw_be_p(void *ptr) | |
541 | { | |
542 | return *(int16_t *)ptr; | |
543 | } | |
544 | ||
545 | static inline int ldl_be_p(void *ptr) | |
546 | { | |
547 | return *(uint32_t *)ptr; | |
548 | } | |
549 | ||
550 | static inline uint64_t ldq_be_p(void *ptr) | |
551 | { | |
552 | return *(uint64_t *)ptr; | |
553 | } | |
554 | ||
555 | static inline void stw_be_p(void *ptr, int v) | |
556 | { | |
557 | *(uint16_t *)ptr = v; | |
558 | } | |
559 | ||
560 | static inline void stl_be_p(void *ptr, int v) | |
561 | { | |
562 | *(uint32_t *)ptr = v; | |
563 | } | |
564 | ||
565 | static inline void stq_be_p(void *ptr, uint64_t v) | |
566 | { | |
567 | *(uint64_t *)ptr = v; | |
568 | } | |
569 | ||
570 | /* float access */ | |
571 | ||
572 | static inline float32 ldfl_be_p(void *ptr) | |
573 | { | |
574 | return *(float32 *)ptr; | |
575 | } | |
576 | ||
577 | static inline float64 ldfq_be_p(void *ptr) | |
578 | { | |
579 | return *(float64 *)ptr; | |
580 | } | |
581 | ||
582 | static inline void stfl_be_p(void *ptr, float32 v) | |
583 | { | |
584 | *(float32 *)ptr = v; | |
585 | } | |
586 | ||
587 | static inline void stfq_be_p(void *ptr, float64 v) | |
588 | { | |
589 | *(float64 *)ptr = v; | |
590 | } | |
591 | ||
592 | #endif | |
593 | ||
594 | /* target CPU memory access functions */ | |
595 | #if defined(TARGET_WORDS_BIGENDIAN) | |
596 | #define lduw_p(p) lduw_be_p(p) | |
597 | #define ldsw_p(p) ldsw_be_p(p) | |
598 | #define ldl_p(p) ldl_be_p(p) | |
599 | #define ldq_p(p) ldq_be_p(p) | |
600 | #define ldfl_p(p) ldfl_be_p(p) | |
601 | #define ldfq_p(p) ldfq_be_p(p) | |
602 | #define stw_p(p, v) stw_be_p(p, v) | |
603 | #define stl_p(p, v) stl_be_p(p, v) | |
604 | #define stq_p(p, v) stq_be_p(p, v) | |
605 | #define stfl_p(p, v) stfl_be_p(p, v) | |
606 | #define stfq_p(p, v) stfq_be_p(p, v) | |
607 | #else | |
608 | #define lduw_p(p) lduw_le_p(p) | |
609 | #define ldsw_p(p) ldsw_le_p(p) | |
610 | #define ldl_p(p) ldl_le_p(p) | |
611 | #define ldq_p(p) ldq_le_p(p) | |
612 | #define ldfl_p(p) ldfl_le_p(p) | |
613 | #define ldfq_p(p) ldfq_le_p(p) | |
614 | #define stw_p(p, v) stw_le_p(p, v) | |
615 | #define stl_p(p, v) stl_le_p(p, v) | |
616 | #define stq_p(p, v) stq_le_p(p, v) | |
617 | #define stfl_p(p, v) stfl_le_p(p, v) | |
618 | #define stfq_p(p, v) stfq_le_p(p, v) | |
619 | #endif | |
620 | ||
621 | /* MMU memory access macros */ | |
622 | ||
623 | #if defined(CONFIG_USER_ONLY) | |
624 | /* On some host systems the guest address space is reserved on the host. | |
625 | * This allows the guest address space to be offset to a convenient location. | |
626 | */ | |
627 | //#define GUEST_BASE 0x20000000 | |
628 | #define GUEST_BASE 0 | |
629 | ||
630 | /* All direct uses of g2h and h2g need to go away for usermode softmmu. */ | |
631 | #define g2h(x) ((void *)((unsigned long)(x) + GUEST_BASE)) | |
632 | #define h2g(x) ((target_ulong)(x - GUEST_BASE)) | |
633 | ||
634 | #define saddr(x) g2h(x) | |
635 | #define laddr(x) g2h(x) | |
636 | ||
637 | #else /* !CONFIG_USER_ONLY */ | |
638 | /* NOTE: we use double casts if pointers and target_ulong have | |
639 | different sizes */ | |
640 | #define saddr(x) (uint8_t *)(long)(x) | |
641 | #define laddr(x) (uint8_t *)(long)(x) | |
642 | #endif | |
643 | ||
644 | #define ldub_raw(p) ldub_p(laddr((p))) | |
645 | #define ldsb_raw(p) ldsb_p(laddr((p))) | |
646 | #define lduw_raw(p) lduw_p(laddr((p))) | |
647 | #define ldsw_raw(p) ldsw_p(laddr((p))) | |
648 | #define ldl_raw(p) ldl_p(laddr((p))) | |
649 | #define ldq_raw(p) ldq_p(laddr((p))) | |
650 | #define ldfl_raw(p) ldfl_p(laddr((p))) | |
651 | #define ldfq_raw(p) ldfq_p(laddr((p))) | |
652 | #define stb_raw(p, v) stb_p(saddr((p)), v) | |
653 | #define stw_raw(p, v) stw_p(saddr((p)), v) | |
654 | #define stl_raw(p, v) stl_p(saddr((p)), v) | |
655 | #define stq_raw(p, v) stq_p(saddr((p)), v) | |
656 | #define stfl_raw(p, v) stfl_p(saddr((p)), v) | |
657 | #define stfq_raw(p, v) stfq_p(saddr((p)), v) | |
658 | ||
659 | ||
660 | #if defined(CONFIG_USER_ONLY) | |
661 | ||
662 | /* if user mode, no other memory access functions */ | |
663 | #define ldub(p) ldub_raw(p) | |
664 | #define ldsb(p) ldsb_raw(p) | |
665 | #define lduw(p) lduw_raw(p) | |
666 | #define ldsw(p) ldsw_raw(p) | |
667 | #define ldl(p) ldl_raw(p) | |
668 | #define ldq(p) ldq_raw(p) | |
669 | #define ldfl(p) ldfl_raw(p) | |
670 | #define ldfq(p) ldfq_raw(p) | |
671 | #define stb(p, v) stb_raw(p, v) | |
672 | #define stw(p, v) stw_raw(p, v) | |
673 | #define stl(p, v) stl_raw(p, v) | |
674 | #define stq(p, v) stq_raw(p, v) | |
675 | #define stfl(p, v) stfl_raw(p, v) | |
676 | #define stfq(p, v) stfq_raw(p, v) | |
677 | ||
678 | #define ldub_code(p) ldub_raw(p) | |
679 | #define ldsb_code(p) ldsb_raw(p) | |
680 | #define lduw_code(p) lduw_raw(p) | |
681 | #define ldsw_code(p) ldsw_raw(p) | |
682 | #define ldl_code(p) ldl_raw(p) | |
683 | #define ldq_code(p) ldq_raw(p) | |
684 | ||
685 | #define ldub_kernel(p) ldub_raw(p) | |
686 | #define ldsb_kernel(p) ldsb_raw(p) | |
687 | #define lduw_kernel(p) lduw_raw(p) | |
688 | #define ldsw_kernel(p) ldsw_raw(p) | |
689 | #define ldl_kernel(p) ldl_raw(p) | |
690 | #define ldq_kernel(p) ldq_raw(p) | |
691 | #define ldfl_kernel(p) ldfl_raw(p) | |
692 | #define ldfq_kernel(p) ldfq_raw(p) | |
693 | #define stb_kernel(p, v) stb_raw(p, v) | |
694 | #define stw_kernel(p, v) stw_raw(p, v) | |
695 | #define stl_kernel(p, v) stl_raw(p, v) | |
696 | #define stq_kernel(p, v) stq_raw(p, v) | |
697 | #define stfl_kernel(p, v) stfl_raw(p, v) | |
698 | #define stfq_kernel(p, vt) stfq_raw(p, v) | |
699 | ||
700 | #endif /* defined(CONFIG_USER_ONLY) */ | |
701 | ||
702 | /* page related stuff */ | |
703 | ||
704 | #define TARGET_PAGE_SIZE (1 << TARGET_PAGE_BITS) | |
705 | #define TARGET_PAGE_MASK ~(TARGET_PAGE_SIZE - 1) | |
706 | #define TARGET_PAGE_ALIGN(addr) (((addr) + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK) | |
707 | ||
708 | /* ??? These should be the larger of unsigned long and target_ulong. */ | |
709 | extern unsigned long qemu_real_host_page_size; | |
710 | extern unsigned long qemu_host_page_bits; | |
711 | extern unsigned long qemu_host_page_size; | |
712 | extern unsigned long qemu_host_page_mask; | |
713 | ||
714 | #define HOST_PAGE_ALIGN(addr) (((addr) + qemu_host_page_size - 1) & qemu_host_page_mask) | |
715 | ||
716 | /* same as PROT_xxx */ | |
717 | #define PAGE_READ 0x0001 | |
718 | #define PAGE_WRITE 0x0002 | |
719 | #define PAGE_EXEC 0x0004 | |
720 | #define PAGE_BITS (PAGE_READ | PAGE_WRITE | PAGE_EXEC) | |
721 | #define PAGE_VALID 0x0008 | |
722 | /* original state of the write flag (used when tracking self-modifying | |
723 | code */ | |
724 | #define PAGE_WRITE_ORG 0x0010 | |
725 | #define PAGE_RESERVED 0x0020 | |
726 | ||
727 | void page_dump(FILE *f); | |
728 | int page_get_flags(target_ulong address); | |
729 | void page_set_flags(target_ulong start, target_ulong end, int flags); | |
730 | int page_check_range(target_ulong start, target_ulong len, int flags); | |
731 | ||
732 | CPUState *cpu_copy(CPUState *env); | |
733 | ||
734 | void cpu_dump_state(CPUState *env, FILE *f, | |
735 | int (*cpu_fprintf)(FILE *f, const char *fmt, ...), | |
736 | int flags); | |
737 | void cpu_dump_statistics (CPUState *env, FILE *f, | |
738 | int (*cpu_fprintf)(FILE *f, const char *fmt, ...), | |
739 | int flags); | |
740 | ||
741 | void cpu_abort(CPUState *env, const char *fmt, ...) | |
742 | __attribute__ ((__format__ (__printf__, 2, 3))) | |
743 | __attribute__ ((__noreturn__)); | |
744 | extern CPUState *first_cpu; | |
745 | extern CPUState *cpu_single_env; | |
746 | extern int code_copy_enabled; | |
747 | ||
748 | #define CPU_INTERRUPT_EXIT 0x01 /* wants exit from main loop */ | |
749 | #define CPU_INTERRUPT_HARD 0x02 /* hardware interrupt pending */ | |
750 | #define CPU_INTERRUPT_EXITTB 0x04 /* exit the current TB (use for x86 a20 case) */ | |
751 | #define CPU_INTERRUPT_TIMER 0x08 /* internal timer exception pending */ | |
752 | #define CPU_INTERRUPT_FIQ 0x10 /* Fast interrupt pending. */ | |
753 | #define CPU_INTERRUPT_HALT 0x20 /* CPU halt wanted */ | |
754 | #define CPU_INTERRUPT_SMI 0x40 /* (x86 only) SMI interrupt pending */ | |
755 | #define CPU_INTERRUPT_DEBUG 0x80 /* Debug event occured. */ | |
756 | #define CPU_INTERRUPT_VIRQ 0x100 /* virtual interrupt pending. */ | |
757 | #define CPU_INTERRUPT_NMI 0x200 /* NMI pending. */ | |
758 | ||
759 | void cpu_interrupt(CPUState *s, int mask); | |
760 | void cpu_reset_interrupt(CPUState *env, int mask); | |
761 | ||
762 | int cpu_watchpoint_insert(CPUState *env, target_ulong addr); | |
763 | int cpu_watchpoint_remove(CPUState *env, target_ulong addr); | |
764 | int cpu_breakpoint_insert(CPUState *env, target_ulong pc); | |
765 | int cpu_breakpoint_remove(CPUState *env, target_ulong pc); | |
766 | ||
767 | #define SSTEP_ENABLE 0x1 /* Enable simulated HW single stepping */ | |
768 | #define SSTEP_NOIRQ 0x2 /* Do not use IRQ while single stepping */ | |
769 | #define SSTEP_NOTIMER 0x4 /* Do not Timers while single stepping */ | |
770 | ||
771 | void cpu_single_step(CPUState *env, int enabled); | |
772 | void cpu_reset(CPUState *s); | |
773 | ||
774 | /* Return the physical page corresponding to a virtual one. Use it | |
775 | only for debugging because no protection checks are done. Return -1 | |
776 | if no page found. */ | |
777 | target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr); | |
778 | ||
779 | #define CPU_LOG_TB_OUT_ASM (1 << 0) | |
780 | #define CPU_LOG_TB_IN_ASM (1 << 1) | |
781 | #define CPU_LOG_TB_OP (1 << 2) | |
782 | #define CPU_LOG_TB_OP_OPT (1 << 3) | |
783 | #define CPU_LOG_INT (1 << 4) | |
784 | #define CPU_LOG_EXEC (1 << 5) | |
785 | #define CPU_LOG_PCALL (1 << 6) | |
786 | #define CPU_LOG_IOPORT (1 << 7) | |
787 | #define CPU_LOG_TB_CPU (1 << 8) | |
788 | ||
789 | /* define log items */ | |
790 | typedef struct CPULogItem { | |
791 | int mask; | |
792 | const char *name; | |
793 | const char *help; | |
794 | } CPULogItem; | |
795 | ||
796 | extern CPULogItem cpu_log_items[]; | |
797 | ||
798 | void cpu_set_log(int log_flags); | |
799 | void cpu_set_log_filename(const char *filename); | |
800 | int cpu_str_to_log_mask(const char *str); | |
801 | ||
802 | /* IO ports API */ | |
803 | ||
804 | /* NOTE: as these functions may be even used when there is an isa | |
805 | brige on non x86 targets, we always defined them */ | |
806 | #ifndef NO_CPU_IO_DEFS | |
807 | void cpu_outb(CPUState *env, int addr, int val); | |
808 | void cpu_outw(CPUState *env, int addr, int val); | |
809 | void cpu_outl(CPUState *env, int addr, int val); | |
810 | int cpu_inb(CPUState *env, int addr); | |
811 | int cpu_inw(CPUState *env, int addr); | |
812 | int cpu_inl(CPUState *env, int addr); | |
813 | #endif | |
814 | ||
815 | /* address in the RAM (different from a physical address) */ | |
816 | #ifdef USE_KQEMU | |
817 | typedef uint32_t ram_addr_t; | |
818 | #else | |
819 | typedef unsigned long ram_addr_t; | |
820 | #endif | |
821 | ||
822 | /* memory API */ | |
823 | ||
824 | extern ram_addr_t phys_ram_size; | |
825 | extern int phys_ram_fd; | |
826 | extern uint8_t *phys_ram_base; | |
827 | extern uint8_t *phys_ram_dirty; | |
828 | extern ram_addr_t ram_size; | |
829 | ||
830 | /* physical memory access */ | |
831 | #define TLB_INVALID_MASK (1 << 3) | |
832 | #define IO_MEM_SHIFT 4 | |
833 | #define IO_MEM_NB_ENTRIES (1 << (TARGET_PAGE_BITS - IO_MEM_SHIFT)) | |
834 | ||
835 | #define IO_MEM_RAM (0 << IO_MEM_SHIFT) /* hardcoded offset */ | |
836 | #define IO_MEM_ROM (1 << IO_MEM_SHIFT) /* hardcoded offset */ | |
837 | #define IO_MEM_UNASSIGNED (2 << IO_MEM_SHIFT) | |
838 | #define IO_MEM_NOTDIRTY (4 << IO_MEM_SHIFT) /* used internally, never use directly */ | |
839 | /* acts like a ROM when read and like a device when written. As an | |
840 | exception, the write memory callback gets the ram offset instead of | |
841 | the physical address */ | |
842 | #define IO_MEM_ROMD (1) | |
843 | #define IO_MEM_SUBPAGE (2) | |
844 | #define IO_MEM_SUBWIDTH (4) | |
845 | ||
846 | typedef void CPUWriteMemoryFunc(void *opaque, target_phys_addr_t addr, uint32_t value); | |
847 | typedef uint32_t CPUReadMemoryFunc(void *opaque, target_phys_addr_t addr); | |
848 | ||
849 | void cpu_register_physical_memory(target_phys_addr_t start_addr, | |
850 | ram_addr_t size, | |
851 | ram_addr_t phys_offset); | |
852 | ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr); | |
853 | ram_addr_t qemu_ram_alloc(ram_addr_t); | |
854 | void qemu_ram_free(ram_addr_t addr); | |
855 | int cpu_register_io_memory(int io_index, | |
856 | CPUReadMemoryFunc **mem_read, | |
857 | CPUWriteMemoryFunc **mem_write, | |
858 | void *opaque); | |
859 | CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index); | |
860 | CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index); | |
861 | ||
862 | void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, | |
863 | int len, int is_write); | |
864 | static inline void cpu_physical_memory_read(target_phys_addr_t addr, | |
865 | uint8_t *buf, int len) | |
866 | { | |
867 | cpu_physical_memory_rw(addr, buf, len, 0); | |
868 | } | |
869 | static inline void cpu_physical_memory_write(target_phys_addr_t addr, | |
870 | const uint8_t *buf, int len) | |
871 | { | |
872 | cpu_physical_memory_rw(addr, (uint8_t *)buf, len, 1); | |
873 | } | |
874 | uint32_t ldub_phys(target_phys_addr_t addr); | |
875 | uint32_t lduw_phys(target_phys_addr_t addr); | |
876 | uint32_t ldl_phys(target_phys_addr_t addr); | |
877 | uint64_t ldq_phys(target_phys_addr_t addr); | |
878 | void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val); | |
879 | void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val); | |
880 | void stb_phys(target_phys_addr_t addr, uint32_t val); | |
881 | void stw_phys(target_phys_addr_t addr, uint32_t val); | |
882 | void stl_phys(target_phys_addr_t addr, uint32_t val); | |
883 | void stq_phys(target_phys_addr_t addr, uint64_t val); | |
884 | ||
885 | void cpu_physical_memory_write_rom(target_phys_addr_t addr, | |
886 | const uint8_t *buf, int len); | |
887 | int cpu_memory_rw_debug(CPUState *env, target_ulong addr, | |
888 | uint8_t *buf, int len, int is_write); | |
889 | ||
890 | #define VGA_DIRTY_FLAG 0x01 | |
891 | #define CODE_DIRTY_FLAG 0x02 | |
892 | ||
893 | /* read dirty bit (return 0 or 1) */ | |
894 | static inline int cpu_physical_memory_is_dirty(ram_addr_t addr) | |
895 | { | |
896 | return phys_ram_dirty[addr >> TARGET_PAGE_BITS] == 0xff; | |
897 | } | |
898 | ||
899 | static inline int cpu_physical_memory_get_dirty(ram_addr_t addr, | |
900 | int dirty_flags) | |
901 | { | |
902 | return phys_ram_dirty[addr >> TARGET_PAGE_BITS] & dirty_flags; | |
903 | } | |
904 | ||
905 | static inline void cpu_physical_memory_set_dirty(ram_addr_t addr) | |
906 | { | |
907 | phys_ram_dirty[addr >> TARGET_PAGE_BITS] = 0xff; | |
908 | } | |
909 | ||
910 | void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end, | |
911 | int dirty_flags); | |
912 | void cpu_tlb_update_dirty(CPUState *env); | |
913 | ||
914 | void dump_exec_info(FILE *f, | |
915 | int (*cpu_fprintf)(FILE *f, const char *fmt, ...)); | |
916 | ||
917 | /*******************************************/ | |
918 | /* host CPU ticks (if available) */ | |
919 | ||
920 | #if defined(__powerpc__) | |
921 | ||
922 | static inline uint32_t get_tbl(void) | |
923 | { | |
924 | uint32_t tbl; | |
925 | asm volatile("mftb %0" : "=r" (tbl)); | |
926 | return tbl; | |
927 | } | |
928 | ||
929 | static inline uint32_t get_tbu(void) | |
930 | { | |
931 | uint32_t tbl; | |
932 | asm volatile("mftbu %0" : "=r" (tbl)); | |
933 | return tbl; | |
934 | } | |
935 | ||
936 | static inline int64_t cpu_get_real_ticks(void) | |
937 | { | |
938 | uint32_t l, h, h1; | |
939 | /* NOTE: we test if wrapping has occurred */ | |
940 | do { | |
941 | h = get_tbu(); | |
942 | l = get_tbl(); | |
943 | h1 = get_tbu(); | |
944 | } while (h != h1); | |
945 | return ((int64_t)h << 32) | l; | |
946 | } | |
947 | ||
948 | #elif defined(__i386__) | |
949 | ||
950 | static inline int64_t cpu_get_real_ticks(void) | |
951 | { | |
952 | int64_t val; | |
953 | asm volatile ("rdtsc" : "=A" (val)); | |
954 | return val; | |
955 | } | |
956 | ||
957 | #elif defined(__x86_64__) | |
958 | ||
959 | static inline int64_t cpu_get_real_ticks(void) | |
960 | { | |
961 | uint32_t low,high; | |
962 | int64_t val; | |
963 | asm volatile("rdtsc" : "=a" (low), "=d" (high)); | |
964 | val = high; | |
965 | val <<= 32; | |
966 | val |= low; | |
967 | return val; | |
968 | } | |
969 | ||
970 | #elif defined(__hppa__) | |
971 | ||
972 | static inline int64_t cpu_get_real_ticks(void) | |
973 | { | |
974 | int val; | |
975 | asm volatile ("mfctl %%cr16, %0" : "=r"(val)); | |
976 | return val; | |
977 | } | |
978 | ||
979 | #elif defined(__ia64) | |
980 | ||
981 | static inline int64_t cpu_get_real_ticks(void) | |
982 | { | |
983 | int64_t val; | |
984 | asm volatile ("mov %0 = ar.itc" : "=r"(val) :: "memory"); | |
985 | return val; | |
986 | } | |
987 | ||
988 | #elif defined(__s390__) | |
989 | ||
990 | static inline int64_t cpu_get_real_ticks(void) | |
991 | { | |
992 | int64_t val; | |
993 | asm volatile("stck 0(%1)" : "=m" (val) : "a" (&val) : "cc"); | |
994 | return val; | |
995 | } | |
996 | ||
997 | #elif defined(__sparc_v8plus__) || defined(__sparc_v8plusa__) || defined(__sparc_v9__) | |
998 | ||
999 | static inline int64_t cpu_get_real_ticks (void) | |
1000 | { | |
1001 | #if defined(_LP64) | |
1002 | uint64_t rval; | |
1003 | asm volatile("rd %%tick,%0" : "=r"(rval)); | |
1004 | return rval; | |
1005 | #else | |
1006 | union { | |
1007 | uint64_t i64; | |
1008 | struct { | |
1009 | uint32_t high; | |
1010 | uint32_t low; | |
1011 | } i32; | |
1012 | } rval; | |
1013 | asm volatile("rd %%tick,%1; srlx %1,32,%0" | |
1014 | : "=r"(rval.i32.high), "=r"(rval.i32.low)); | |
1015 | return rval.i64; | |
1016 | #endif | |
1017 | } | |
1018 | ||
1019 | #elif defined(__mips__) | |
1020 | ||
1021 | static inline int64_t cpu_get_real_ticks(void) | |
1022 | { | |
1023 | #if __mips_isa_rev >= 2 | |
1024 | uint32_t count; | |
1025 | static uint32_t cyc_per_count = 0; | |
1026 | ||
1027 | if (!cyc_per_count) | |
1028 | __asm__ __volatile__("rdhwr %0, $3" : "=r" (cyc_per_count)); | |
1029 | ||
1030 | __asm__ __volatile__("rdhwr %1, $2" : "=r" (count)); | |
1031 | return (int64_t)(count * cyc_per_count); | |
1032 | #else | |
1033 | /* FIXME */ | |
1034 | static int64_t ticks = 0; | |
1035 | return ticks++; | |
1036 | #endif | |
1037 | } | |
1038 | ||
1039 | #else | |
1040 | /* The host CPU doesn't have an easily accessible cycle counter. | |
1041 | Just return a monotonically increasing value. This will be | |
1042 | totally wrong, but hopefully better than nothing. */ | |
1043 | static inline int64_t cpu_get_real_ticks (void) | |
1044 | { | |
1045 | static int64_t ticks = 0; | |
1046 | return ticks++; | |
1047 | } | |
1048 | #endif | |
1049 | ||
1050 | /* profiling */ | |
1051 | #ifdef CONFIG_PROFILER | |
1052 | static inline int64_t profile_getclock(void) | |
1053 | { | |
1054 | return cpu_get_real_ticks(); | |
1055 | } | |
1056 | ||
1057 | extern int64_t kqemu_time, kqemu_time_start; | |
1058 | extern int64_t qemu_time, qemu_time_start; | |
1059 | extern int64_t tlb_flush_time; | |
1060 | extern int64_t kqemu_exec_count; | |
1061 | extern int64_t dev_time; | |
1062 | extern int64_t kqemu_ret_int_count; | |
1063 | extern int64_t kqemu_ret_excp_count; | |
1064 | extern int64_t kqemu_ret_intr_count; | |
1065 | ||
1066 | extern int64_t dyngen_tb_count1; | |
1067 | extern int64_t dyngen_tb_count; | |
1068 | extern int64_t dyngen_op_count; | |
1069 | extern int64_t dyngen_old_op_count; | |
1070 | extern int64_t dyngen_tcg_del_op_count; | |
1071 | extern int dyngen_op_count_max; | |
1072 | extern int64_t dyngen_code_in_len; | |
1073 | extern int64_t dyngen_code_out_len; | |
1074 | extern int64_t dyngen_interm_time; | |
1075 | extern int64_t dyngen_code_time; | |
1076 | extern int64_t dyngen_restore_count; | |
1077 | extern int64_t dyngen_restore_time; | |
1078 | #endif | |
1079 | ||
1080 | #endif /* CPU_ALL_H */ |