]> git.proxmox.com Git - mirror_qemu.git/blame - cpu-exec.c
spapr: Refactor spapr_populate_memory() to allow memoryless nodes
[mirror_qemu.git] / cpu-exec.c
CommitLineData
7d13299d 1/*
e965fc38 2 * emulator main execution loop
5fafdf24 3 *
66321a11 4 * Copyright (c) 2003-2005 Fabrice Bellard
7d13299d 5 *
3ef693a0
FB
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
7d13299d 10 *
3ef693a0
FB
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
7d13299d 15 *
3ef693a0 16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
7d13299d 18 */
e4533c7a 19#include "config.h"
cea5f9a2 20#include "cpu.h"
6db8b538 21#include "trace.h"
76cad711 22#include "disas/disas.h"
7cb69cae 23#include "tcg.h"
1de7afc9 24#include "qemu/atomic.h"
9c17d615 25#include "sysemu/qtest.h"
c2aa5f81
ST
26#include "qemu/timer.h"
27
28/* -icount align implementation. */
29
30typedef struct SyncClocks {
31 int64_t diff_clk;
32 int64_t last_cpu_icount;
7f7bc144 33 int64_t realtime_clock;
c2aa5f81
ST
34} SyncClocks;
35
36#if !defined(CONFIG_USER_ONLY)
37/* Allow the guest to have a max 3ms advance.
38 * The difference between the 2 clocks could therefore
39 * oscillate around 0.
40 */
41#define VM_CLOCK_ADVANCE 3000000
7f7bc144
ST
42#define THRESHOLD_REDUCE 1.5
43#define MAX_DELAY_PRINT_RATE 2000000000LL
44#define MAX_NB_PRINTS 100
c2aa5f81
ST
45
46static void align_clocks(SyncClocks *sc, const CPUState *cpu)
47{
48 int64_t cpu_icount;
49
50 if (!icount_align_option) {
51 return;
52 }
53
54 cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low;
55 sc->diff_clk += cpu_icount_to_ns(sc->last_cpu_icount - cpu_icount);
56 sc->last_cpu_icount = cpu_icount;
57
58 if (sc->diff_clk > VM_CLOCK_ADVANCE) {
59#ifndef _WIN32
60 struct timespec sleep_delay, rem_delay;
61 sleep_delay.tv_sec = sc->diff_clk / 1000000000LL;
62 sleep_delay.tv_nsec = sc->diff_clk % 1000000000LL;
63 if (nanosleep(&sleep_delay, &rem_delay) < 0) {
64 sc->diff_clk -= (sleep_delay.tv_sec - rem_delay.tv_sec) * 1000000000LL;
65 sc->diff_clk -= sleep_delay.tv_nsec - rem_delay.tv_nsec;
66 } else {
67 sc->diff_clk = 0;
68 }
69#else
70 Sleep(sc->diff_clk / SCALE_MS);
71 sc->diff_clk = 0;
72#endif
73 }
74}
75
7f7bc144
ST
76static void print_delay(const SyncClocks *sc)
77{
78 static float threshold_delay;
79 static int64_t last_realtime_clock;
80 static int nb_prints;
81
82 if (icount_align_option &&
83 sc->realtime_clock - last_realtime_clock >= MAX_DELAY_PRINT_RATE &&
84 nb_prints < MAX_NB_PRINTS) {
85 if ((-sc->diff_clk / (float)1000000000LL > threshold_delay) ||
86 (-sc->diff_clk / (float)1000000000LL <
87 (threshold_delay - THRESHOLD_REDUCE))) {
88 threshold_delay = (-sc->diff_clk / 1000000000LL) + 1;
89 printf("Warning: The guest is now late by %.1f to %.1f seconds\n",
90 threshold_delay - 1,
91 threshold_delay);
92 nb_prints++;
93 last_realtime_clock = sc->realtime_clock;
94 }
95 }
96}
97
c2aa5f81
ST
98static void init_delay_params(SyncClocks *sc,
99 const CPUState *cpu)
100{
101 if (!icount_align_option) {
102 return;
103 }
7f7bc144 104 sc->realtime_clock = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
c2aa5f81 105 sc->diff_clk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) -
7f7bc144 106 sc->realtime_clock +
c2aa5f81
ST
107 cpu_get_clock_offset();
108 sc->last_cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low;
27498bef
ST
109 if (sc->diff_clk < max_delay) {
110 max_delay = sc->diff_clk;
111 }
112 if (sc->diff_clk > max_advance) {
113 max_advance = sc->diff_clk;
114 }
7f7bc144
ST
115
116 /* Print every 2s max if the guest is late. We limit the number
117 of printed messages to NB_PRINT_MAX(currently 100) */
118 print_delay(sc);
c2aa5f81
ST
119}
120#else
121static void align_clocks(SyncClocks *sc, const CPUState *cpu)
122{
123}
124
125static void init_delay_params(SyncClocks *sc, const CPUState *cpu)
126{
127}
128#endif /* CONFIG USER ONLY */
7d13299d 129
5638d180 130void cpu_loop_exit(CPUState *cpu)
e4533c7a 131{
d77953b9 132 cpu->current_tb = NULL;
6f03bef0 133 siglongjmp(cpu->jmp_env, 1);
e4533c7a 134}
bfed01fc 135
fbf9eeb3
FB
136/* exit the current TB from a signal handler. The host registers are
137 restored in a state compatible with the CPU emulator
138 */
9eff14f3 139#if defined(CONFIG_SOFTMMU)
0ea8cb88 140void cpu_resume_from_signal(CPUState *cpu, void *puc)
9eff14f3 141{
9eff14f3
BS
142 /* XXX: restore cpu registers saved in host registers */
143
27103424 144 cpu->exception_index = -1;
6f03bef0 145 siglongjmp(cpu->jmp_env, 1);
9eff14f3 146}
9eff14f3 147#endif
fbf9eeb3 148
77211379
PM
149/* Execute a TB, and fix up the CPU state afterwards if necessary */
150static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, uint8_t *tb_ptr)
151{
152 CPUArchState *env = cpu->env_ptr;
03afa5f8
RH
153 uintptr_t next_tb;
154
155#if defined(DEBUG_DISAS)
156 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
157#if defined(TARGET_I386)
158 log_cpu_state(cpu, CPU_DUMP_CCOP);
159#elif defined(TARGET_M68K)
160 /* ??? Should not modify env state for dumping. */
161 cpu_m68k_flush_flags(env, env->cc_op);
162 env->cc_op = CC_OP_FLAGS;
163 env->sr = (env->sr & 0xffe0) | env->cc_dest | (env->cc_x << 4);
164 log_cpu_state(cpu, 0);
165#else
166 log_cpu_state(cpu, 0);
167#endif
168 }
169#endif /* DEBUG_DISAS */
170
171 next_tb = tcg_qemu_tb_exec(env, tb_ptr);
6db8b538
AB
172 trace_exec_tb_exit((void *) (next_tb & ~TB_EXIT_MASK),
173 next_tb & TB_EXIT_MASK);
174
77211379
PM
175 if ((next_tb & TB_EXIT_MASK) > TB_EXIT_IDX1) {
176 /* We didn't start executing this TB (eg because the instruction
177 * counter hit zero); we must restore the guest PC to the address
178 * of the start of the TB.
179 */
bdf7ae5b 180 CPUClass *cc = CPU_GET_CLASS(cpu);
77211379 181 TranslationBlock *tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
bdf7ae5b
AF
182 if (cc->synchronize_from_tb) {
183 cc->synchronize_from_tb(cpu, tb);
184 } else {
185 assert(cc->set_pc);
186 cc->set_pc(cpu, tb->pc);
187 }
77211379 188 }
378df4b2
PM
189 if ((next_tb & TB_EXIT_MASK) == TB_EXIT_REQUESTED) {
190 /* We were asked to stop executing TBs (probably a pending
191 * interrupt. We've now stopped, so clear the flag.
192 */
193 cpu->tcg_exit_req = 0;
194 }
77211379
PM
195 return next_tb;
196}
197
2e70f6ef
PB
198/* Execute the code without caching the generated code. An interpreter
199 could be used if available. */
9349b4f9 200static void cpu_exec_nocache(CPUArchState *env, int max_cycles,
cea5f9a2 201 TranslationBlock *orig_tb)
2e70f6ef 202{
d77953b9 203 CPUState *cpu = ENV_GET_CPU(env);
2e70f6ef
PB
204 TranslationBlock *tb;
205
206 /* Should never happen.
207 We only end up here when an existing TB is too long. */
208 if (max_cycles > CF_COUNT_MASK)
209 max_cycles = CF_COUNT_MASK;
210
648f034c 211 tb = tb_gen_code(cpu, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
2e70f6ef 212 max_cycles);
d77953b9 213 cpu->current_tb = tb;
2e70f6ef 214 /* execute the generated code */
6db8b538 215 trace_exec_tb_nocache(tb, tb->pc);
77211379 216 cpu_tb_exec(cpu, tb->tc_ptr);
d77953b9 217 cpu->current_tb = NULL;
2e70f6ef
PB
218 tb_phys_invalidate(tb, -1);
219 tb_free(tb);
220}
221
9349b4f9 222static TranslationBlock *tb_find_slow(CPUArchState *env,
cea5f9a2 223 target_ulong pc,
8a40a180 224 target_ulong cs_base,
c068688b 225 uint64_t flags)
8a40a180 226{
8cd70437 227 CPUState *cpu = ENV_GET_CPU(env);
8a40a180 228 TranslationBlock *tb, **ptb1;
8a40a180 229 unsigned int h;
337fc758 230 tb_page_addr_t phys_pc, phys_page1;
41c1b1c9 231 target_ulong virt_page2;
3b46e624 232
5e5f07e0 233 tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
3b46e624 234
8a40a180 235 /* find translated block using physical mappings */
41c1b1c9 236 phys_pc = get_page_addr_code(env, pc);
8a40a180 237 phys_page1 = phys_pc & TARGET_PAGE_MASK;
8a40a180 238 h = tb_phys_hash_func(phys_pc);
5e5f07e0 239 ptb1 = &tcg_ctx.tb_ctx.tb_phys_hash[h];
8a40a180
FB
240 for(;;) {
241 tb = *ptb1;
242 if (!tb)
243 goto not_found;
5fafdf24 244 if (tb->pc == pc &&
8a40a180 245 tb->page_addr[0] == phys_page1 &&
5fafdf24 246 tb->cs_base == cs_base &&
8a40a180
FB
247 tb->flags == flags) {
248 /* check next page if needed */
249 if (tb->page_addr[1] != -1) {
337fc758
BS
250 tb_page_addr_t phys_page2;
251
5fafdf24 252 virt_page2 = (pc & TARGET_PAGE_MASK) +
8a40a180 253 TARGET_PAGE_SIZE;
41c1b1c9 254 phys_page2 = get_page_addr_code(env, virt_page2);
8a40a180
FB
255 if (tb->page_addr[1] == phys_page2)
256 goto found;
257 } else {
258 goto found;
259 }
260 }
261 ptb1 = &tb->phys_hash_next;
262 }
263 not_found:
2e70f6ef 264 /* if no translated code available, then translate it now */
648f034c 265 tb = tb_gen_code(cpu, pc, cs_base, flags, 0);
3b46e624 266
8a40a180 267 found:
2c90fe2b
KB
268 /* Move the last found TB to the head of the list */
269 if (likely(*ptb1)) {
270 *ptb1 = tb->phys_hash_next;
5e5f07e0
EV
271 tb->phys_hash_next = tcg_ctx.tb_ctx.tb_phys_hash[h];
272 tcg_ctx.tb_ctx.tb_phys_hash[h] = tb;
2c90fe2b 273 }
8a40a180 274 /* we add the TB in the virtual pc hash table */
8cd70437 275 cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
8a40a180
FB
276 return tb;
277}
278
9349b4f9 279static inline TranslationBlock *tb_find_fast(CPUArchState *env)
8a40a180 280{
8cd70437 281 CPUState *cpu = ENV_GET_CPU(env);
8a40a180
FB
282 TranslationBlock *tb;
283 target_ulong cs_base, pc;
6b917547 284 int flags;
8a40a180
FB
285
286 /* we record a subset of the CPU state. It will
287 always be the same before a given translated block
288 is executed. */
6b917547 289 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
8cd70437 290 tb = cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
551bd27f
TS
291 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
292 tb->flags != flags)) {
cea5f9a2 293 tb = tb_find_slow(env, pc, cs_base, flags);
8a40a180
FB
294 }
295 return tb;
296}
297
1009d2ed
JK
298static CPUDebugExcpHandler *debug_excp_handler;
299
84e3b602 300void cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
1009d2ed 301{
1009d2ed 302 debug_excp_handler = handler;
1009d2ed
JK
303}
304
9349b4f9 305static void cpu_handle_debug_exception(CPUArchState *env)
1009d2ed 306{
ff4700b0 307 CPUState *cpu = ENV_GET_CPU(env);
1009d2ed
JK
308 CPUWatchpoint *wp;
309
ff4700b0
AF
310 if (!cpu->watchpoint_hit) {
311 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
1009d2ed
JK
312 wp->flags &= ~BP_WATCHPOINT_HIT;
313 }
314 }
315 if (debug_excp_handler) {
316 debug_excp_handler(env);
317 }
318}
319
7d13299d
FB
320/* main execution loop */
321
1a28cac3
MT
322volatile sig_atomic_t exit_request;
323
9349b4f9 324int cpu_exec(CPUArchState *env)
7d13299d 325{
c356a1bc 326 CPUState *cpu = ENV_GET_CPU(env);
97a8ea5a
AF
327#if !(defined(CONFIG_USER_ONLY) && \
328 (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
329 CPUClass *cc = CPU_GET_CLASS(cpu);
693fa551
AF
330#endif
331#ifdef TARGET_I386
332 X86CPU *x86_cpu = X86_CPU(cpu);
97a8ea5a 333#endif
8a40a180 334 int ret, interrupt_request;
8a40a180 335 TranslationBlock *tb;
c27004ec 336 uint8_t *tc_ptr;
3e9bd63a 337 uintptr_t next_tb;
c2aa5f81
ST
338 SyncClocks sc;
339
bae2c270
PM
340 /* This must be volatile so it is not trashed by longjmp() */
341 volatile bool have_tb_lock = false;
8c6939c0 342
259186a7 343 if (cpu->halted) {
3993c6bd 344 if (!cpu_has_work(cpu)) {
eda48c34
PB
345 return EXCP_HALTED;
346 }
347
259186a7 348 cpu->halted = 0;
eda48c34 349 }
5a1e3cfc 350
4917cf44 351 current_cpu = cpu;
e4533c7a 352
4917cf44 353 /* As long as current_cpu is null, up to the assignment just above,
ec9bd89f
OH
354 * requests by other threads to exit the execution loop are expected to
355 * be issued using the exit_request global. We must make sure that our
4917cf44 356 * evaluation of the global value is performed past the current_cpu
ec9bd89f
OH
357 * value transition point, which requires a memory barrier as well as
358 * an instruction scheduling constraint on modern architectures. */
359 smp_mb();
360
c629a4bc 361 if (unlikely(exit_request)) {
fcd7d003 362 cpu->exit_request = 1;
1a28cac3
MT
363 }
364
ecb644f4 365#if defined(TARGET_I386)
6792a57b
JK
366 /* put eflags in CPU temporary format */
367 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
80cf2c81 368 env->df = 1 - (2 * ((env->eflags >> 10) & 1));
6792a57b
JK
369 CC_OP = CC_OP_EFLAGS;
370 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
93ac68bc 371#elif defined(TARGET_SPARC)
e6e5906b
PB
372#elif defined(TARGET_M68K)
373 env->cc_op = CC_OP_FLAGS;
374 env->cc_dest = env->sr & 0xf;
375 env->cc_x = (env->sr >> 4) & 1;
ecb644f4
TS
376#elif defined(TARGET_ALPHA)
377#elif defined(TARGET_ARM)
d2fbca94 378#elif defined(TARGET_UNICORE32)
ecb644f4 379#elif defined(TARGET_PPC)
4e85f82c 380 env->reserve_addr = -1;
81ea0e13 381#elif defined(TARGET_LM32)
b779e29e 382#elif defined(TARGET_MICROBLAZE)
6af0bf9c 383#elif defined(TARGET_MIPS)
d15a9c23 384#elif defined(TARGET_MOXIE)
e67db06e 385#elif defined(TARGET_OPENRISC)
fdf9b3e8 386#elif defined(TARGET_SH4)
f1ccf904 387#elif defined(TARGET_CRIS)
10ec5117 388#elif defined(TARGET_S390X)
2328826b 389#elif defined(TARGET_XTENSA)
48e06fe0 390#elif defined(TARGET_TRICORE)
fdf9b3e8 391 /* XXXXX */
e4533c7a
FB
392#else
393#error unsupported target CPU
394#endif
27103424 395 cpu->exception_index = -1;
9d27abd9 396
c2aa5f81
ST
397 /* Calculate difference between guest clock and host clock.
398 * This delay includes the delay of the last cycle, so
399 * what we have to do is sleep until it is 0. As for the
400 * advance/delay we gain here, we try to fix it next time.
401 */
402 init_delay_params(&sc, cpu);
403
7d13299d 404 /* prepare setjmp context for exception handling */
3fb2ded1 405 for(;;) {
6f03bef0 406 if (sigsetjmp(cpu->jmp_env, 0) == 0) {
3fb2ded1 407 /* if an exception is pending, we execute it here */
27103424
AF
408 if (cpu->exception_index >= 0) {
409 if (cpu->exception_index >= EXCP_INTERRUPT) {
3fb2ded1 410 /* exit request from the cpu execution loop */
27103424 411 ret = cpu->exception_index;
1009d2ed
JK
412 if (ret == EXCP_DEBUG) {
413 cpu_handle_debug_exception(env);
414 }
3fb2ded1 415 break;
72d239ed
AJ
416 } else {
417#if defined(CONFIG_USER_ONLY)
3fb2ded1 418 /* if user mode only, we simulate a fake exception
9f083493 419 which will be handled outside the cpu execution
3fb2ded1 420 loop */
83479e77 421#if defined(TARGET_I386)
97a8ea5a 422 cc->do_interrupt(cpu);
83479e77 423#endif
27103424 424 ret = cpu->exception_index;
3fb2ded1 425 break;
72d239ed 426#else
97a8ea5a 427 cc->do_interrupt(cpu);
27103424 428 cpu->exception_index = -1;
83479e77 429#endif
3fb2ded1 430 }
5fafdf24 431 }
9df217a3 432
b5fc09ae 433 next_tb = 0; /* force lookup of first TB */
3fb2ded1 434 for(;;) {
259186a7 435 interrupt_request = cpu->interrupt_request;
e1638bd8 436 if (unlikely(interrupt_request)) {
ed2803da 437 if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
e1638bd8 438 /* Mask out external interrupts for this step. */
3125f763 439 interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
e1638bd8 440 }
6658ffb8 441 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
259186a7 442 cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
27103424 443 cpu->exception_index = EXCP_DEBUG;
5638d180 444 cpu_loop_exit(cpu);
6658ffb8 445 }
a90b7318 446#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
b779e29e 447 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
48e06fe0
BK
448 defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || \
449 defined(TARGET_UNICORE32) || defined(TARGET_TRICORE)
a90b7318 450 if (interrupt_request & CPU_INTERRUPT_HALT) {
259186a7
AF
451 cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
452 cpu->halted = 1;
27103424 453 cpu->exception_index = EXCP_HLT;
5638d180 454 cpu_loop_exit(cpu);
a90b7318
AZ
455 }
456#endif
4a92a558
PB
457#if defined(TARGET_I386)
458 if (interrupt_request & CPU_INTERRUPT_INIT) {
459 cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0);
460 do_cpu_init(x86_cpu);
461 cpu->exception_index = EXCP_HALTED;
462 cpu_loop_exit(cpu);
463 }
464#else
465 if (interrupt_request & CPU_INTERRUPT_RESET) {
466 cpu_reset(cpu);
467 }
468#endif
68a79315 469#if defined(TARGET_I386)
5d62c43a
JK
470#if !defined(CONFIG_USER_ONLY)
471 if (interrupt_request & CPU_INTERRUPT_POLL) {
259186a7 472 cpu->interrupt_request &= ~CPU_INTERRUPT_POLL;
693fa551 473 apic_poll_irq(x86_cpu->apic_state);
5d62c43a
JK
474 }
475#endif
4a92a558 476 if (interrupt_request & CPU_INTERRUPT_SIPI) {
693fa551 477 do_cpu_sipi(x86_cpu);
b09ea7d5 478 } else if (env->hflags2 & HF2_GIF_MASK) {
db620f46
FB
479 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
480 !(env->hflags & HF_SMM_MASK)) {
77b2bc2c
BS
481 cpu_svm_check_intercept_param(env, SVM_EXIT_SMI,
482 0);
259186a7 483 cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
693fa551 484 do_smm_enter(x86_cpu);
db620f46
FB
485 next_tb = 0;
486 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
487 !(env->hflags2 & HF2_NMI_MASK)) {
259186a7 488 cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
db620f46 489 env->hflags2 |= HF2_NMI_MASK;
e694d4e2 490 do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
db620f46 491 next_tb = 0;
e965fc38 492 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
259186a7 493 cpu->interrupt_request &= ~CPU_INTERRUPT_MCE;
e694d4e2 494 do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
79c4f6b0 495 next_tb = 0;
db620f46
FB
496 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
497 (((env->hflags2 & HF2_VINTR_MASK) &&
498 (env->hflags2 & HF2_HIF_MASK)) ||
499 (!(env->hflags2 & HF2_VINTR_MASK) &&
500 (env->eflags & IF_MASK &&
501 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
502 int intno;
77b2bc2c
BS
503 cpu_svm_check_intercept_param(env, SVM_EXIT_INTR,
504 0);
259186a7
AF
505 cpu->interrupt_request &= ~(CPU_INTERRUPT_HARD |
506 CPU_INTERRUPT_VIRQ);
db620f46 507 intno = cpu_get_pic_interrupt(env);
4f213879 508 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
509 do_interrupt_x86_hardirq(env, intno, 1);
510 /* ensure that no TB jump will be modified as
511 the program flow was changed */
512 next_tb = 0;
0573fbfc 513#if !defined(CONFIG_USER_ONLY)
db620f46
FB
514 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
515 (env->eflags & IF_MASK) &&
516 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
517 int intno;
518 /* FIXME: this should respect TPR */
77b2bc2c
BS
519 cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR,
520 0);
fdfba1a2
EI
521 intno = ldl_phys(cpu->as,
522 env->vm_vmcb
523 + offsetof(struct vmcb,
524 control.int_vector));
93fcfe39 525 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
e694d4e2 526 do_interrupt_x86_hardirq(env, intno, 1);
259186a7 527 cpu->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
db620f46 528 next_tb = 0;
907a5b26 529#endif
db620f46 530 }
68a79315 531 }
ce09776b 532#elif defined(TARGET_PPC)
47103572 533 if (interrupt_request & CPU_INTERRUPT_HARD) {
e9df014c 534 ppc_hw_interrupt(env);
259186a7
AF
535 if (env->pending_interrupts == 0) {
536 cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
537 }
b5fc09ae 538 next_tb = 0;
ce09776b 539 }
81ea0e13
MW
540#elif defined(TARGET_LM32)
541 if ((interrupt_request & CPU_INTERRUPT_HARD)
542 && (env->ie & IE_IE)) {
27103424 543 cpu->exception_index = EXCP_IRQ;
97a8ea5a 544 cc->do_interrupt(cpu);
81ea0e13
MW
545 next_tb = 0;
546 }
b779e29e
EI
547#elif defined(TARGET_MICROBLAZE)
548 if ((interrupt_request & CPU_INTERRUPT_HARD)
549 && (env->sregs[SR_MSR] & MSR_IE)
550 && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
551 && !(env->iflags & (D_FLAG | IMM_FLAG))) {
27103424 552 cpu->exception_index = EXCP_IRQ;
97a8ea5a 553 cc->do_interrupt(cpu);
b779e29e
EI
554 next_tb = 0;
555 }
6af0bf9c
FB
556#elif defined(TARGET_MIPS)
557 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
4cdc1cd1 558 cpu_mips_hw_interrupts_pending(env)) {
6af0bf9c 559 /* Raise it */
27103424 560 cpu->exception_index = EXCP_EXT_INTERRUPT;
6af0bf9c 561 env->error_code = 0;
97a8ea5a 562 cc->do_interrupt(cpu);
b5fc09ae 563 next_tb = 0;
6af0bf9c 564 }
48e06fe0
BK
565#elif defined(TARGET_TRICORE)
566 if ((interrupt_request & CPU_INTERRUPT_HARD)) {
567 cc->do_interrupt(cpu);
568 next_tb = 0;
569 }
570
b6a71ef7
JL
571#elif defined(TARGET_OPENRISC)
572 {
573 int idx = -1;
574 if ((interrupt_request & CPU_INTERRUPT_HARD)
575 && (env->sr & SR_IEE)) {
576 idx = EXCP_INT;
577 }
578 if ((interrupt_request & CPU_INTERRUPT_TIMER)
579 && (env->sr & SR_TEE)) {
580 idx = EXCP_TICK;
581 }
582 if (idx >= 0) {
27103424 583 cpu->exception_index = idx;
97a8ea5a 584 cc->do_interrupt(cpu);
b6a71ef7
JL
585 next_tb = 0;
586 }
587 }
e95c8d51 588#elif defined(TARGET_SPARC)
d532b26c
IK
589 if (interrupt_request & CPU_INTERRUPT_HARD) {
590 if (cpu_interrupts_enabled(env) &&
591 env->interrupt_index > 0) {
592 int pil = env->interrupt_index & 0xf;
593 int type = env->interrupt_index & 0xf0;
594
595 if (((type == TT_EXTINT) &&
596 cpu_pil_allowed(env, pil)) ||
597 type != TT_EXTINT) {
27103424 598 cpu->exception_index = env->interrupt_index;
97a8ea5a 599 cc->do_interrupt(cpu);
d532b26c
IK
600 next_tb = 0;
601 }
602 }
e965fc38 603 }
b5ff1b31
FB
604#elif defined(TARGET_ARM)
605 if (interrupt_request & CPU_INTERRUPT_FIQ
4cc35614 606 && !(env->daif & PSTATE_F)) {
27103424 607 cpu->exception_index = EXCP_FIQ;
97a8ea5a 608 cc->do_interrupt(cpu);
b5fc09ae 609 next_tb = 0;
b5ff1b31 610 }
9ee6e8bb
PB
611 /* ARMv7-M interrupt return works by loading a magic value
612 into the PC. On real hardware the load causes the
613 return to occur. The qemu implementation performs the
614 jump normally, then does the exception return when the
615 CPU tries to execute code at the magic address.
616 This will cause the magic PC value to be pushed to
a1c7273b 617 the stack if an interrupt occurred at the wrong time.
9ee6e8bb
PB
618 We avoid this by disabling interrupts when
619 pc contains a magic address. */
b5ff1b31 620 if (interrupt_request & CPU_INTERRUPT_HARD
9ee6e8bb 621 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
4cc35614 622 || !(env->daif & PSTATE_I))) {
27103424 623 cpu->exception_index = EXCP_IRQ;
97a8ea5a 624 cc->do_interrupt(cpu);
b5fc09ae 625 next_tb = 0;
b5ff1b31 626 }
d2fbca94
GX
627#elif defined(TARGET_UNICORE32)
628 if (interrupt_request & CPU_INTERRUPT_HARD
629 && !(env->uncached_asr & ASR_I)) {
27103424 630 cpu->exception_index = UC32_EXCP_INTR;
97a8ea5a 631 cc->do_interrupt(cpu);
d2fbca94
GX
632 next_tb = 0;
633 }
fdf9b3e8 634#elif defined(TARGET_SH4)
e96e2044 635 if (interrupt_request & CPU_INTERRUPT_HARD) {
97a8ea5a 636 cc->do_interrupt(cpu);
b5fc09ae 637 next_tb = 0;
e96e2044 638 }
eddf68a6 639#elif defined(TARGET_ALPHA)
6a80e088
RH
640 {
641 int idx = -1;
642 /* ??? This hard-codes the OSF/1 interrupt levels. */
e965fc38 643 switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) {
6a80e088
RH
644 case 0 ... 3:
645 if (interrupt_request & CPU_INTERRUPT_HARD) {
646 idx = EXCP_DEV_INTERRUPT;
647 }
648 /* FALLTHRU */
649 case 4:
650 if (interrupt_request & CPU_INTERRUPT_TIMER) {
651 idx = EXCP_CLK_INTERRUPT;
652 }
653 /* FALLTHRU */
654 case 5:
655 if (interrupt_request & CPU_INTERRUPT_SMP) {
656 idx = EXCP_SMP_INTERRUPT;
657 }
658 /* FALLTHRU */
659 case 6:
660 if (interrupt_request & CPU_INTERRUPT_MCHK) {
661 idx = EXCP_MCHK;
662 }
663 }
664 if (idx >= 0) {
27103424 665 cpu->exception_index = idx;
6a80e088 666 env->error_code = 0;
97a8ea5a 667 cc->do_interrupt(cpu);
6a80e088
RH
668 next_tb = 0;
669 }
eddf68a6 670 }
f1ccf904 671#elif defined(TARGET_CRIS)
1b1a38b0 672 if (interrupt_request & CPU_INTERRUPT_HARD
fb9fb692
EI
673 && (env->pregs[PR_CCS] & I_FLAG)
674 && !env->locked_irq) {
27103424 675 cpu->exception_index = EXCP_IRQ;
97a8ea5a 676 cc->do_interrupt(cpu);
1b1a38b0
EI
677 next_tb = 0;
678 }
8219314b
LP
679 if (interrupt_request & CPU_INTERRUPT_NMI) {
680 unsigned int m_flag_archval;
681 if (env->pregs[PR_VR] < 32) {
682 m_flag_archval = M_FLAG_V10;
683 } else {
684 m_flag_archval = M_FLAG_V32;
685 }
686 if ((env->pregs[PR_CCS] & m_flag_archval)) {
27103424 687 cpu->exception_index = EXCP_NMI;
97a8ea5a 688 cc->do_interrupt(cpu);
8219314b
LP
689 next_tb = 0;
690 }
f1ccf904 691 }
0633879f
PB
692#elif defined(TARGET_M68K)
693 if (interrupt_request & CPU_INTERRUPT_HARD
694 && ((env->sr & SR_I) >> SR_I_SHIFT)
695 < env->pending_level) {
696 /* Real hardware gets the interrupt vector via an
697 IACK cycle at this point. Current emulated
698 hardware doesn't rely on this, so we
699 provide/save the vector when the interrupt is
700 first signalled. */
27103424 701 cpu->exception_index = env->pending_vector;
3c688828 702 do_interrupt_m68k_hardirq(env);
b5fc09ae 703 next_tb = 0;
0633879f 704 }
3110e292
AG
705#elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
706 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
707 (env->psw.mask & PSW_MASK_EXT)) {
97a8ea5a 708 cc->do_interrupt(cpu);
3110e292
AG
709 next_tb = 0;
710 }
40643d7c
MF
711#elif defined(TARGET_XTENSA)
712 if (interrupt_request & CPU_INTERRUPT_HARD) {
27103424 713 cpu->exception_index = EXC_IRQ;
97a8ea5a 714 cc->do_interrupt(cpu);
40643d7c
MF
715 next_tb = 0;
716 }
68a79315 717#endif
ff2712ba 718 /* Don't use the cached interrupt_request value,
9d05095e 719 do_interrupt may have updated the EXITTB flag. */
259186a7
AF
720 if (cpu->interrupt_request & CPU_INTERRUPT_EXITTB) {
721 cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
bf3e8bf1
FB
722 /* ensure that no TB jump will be modified as
723 the program flow was changed */
b5fc09ae 724 next_tb = 0;
bf3e8bf1 725 }
be214e6c 726 }
fcd7d003
AF
727 if (unlikely(cpu->exit_request)) {
728 cpu->exit_request = 0;
27103424 729 cpu->exception_index = EXCP_INTERRUPT;
5638d180 730 cpu_loop_exit(cpu);
3fb2ded1 731 }
5e5f07e0 732 spin_lock(&tcg_ctx.tb_ctx.tb_lock);
bae2c270 733 have_tb_lock = true;
cea5f9a2 734 tb = tb_find_fast(env);
d5975363
PB
735 /* Note: we do it here to avoid a gcc bug on Mac OS X when
736 doing it in tb_find_slow */
5e5f07e0 737 if (tcg_ctx.tb_ctx.tb_invalidated_flag) {
d5975363
PB
738 /* as some TB could have been invalidated because
739 of memory exceptions while generating the code, we
740 must recompute the hash index here */
741 next_tb = 0;
5e5f07e0 742 tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
d5975363 743 }
c30d1aea
PM
744 if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
745 qemu_log("Trace %p [" TARGET_FMT_lx "] %s\n",
746 tb->tc_ptr, tb->pc, lookup_symbol(tb->pc));
747 }
8a40a180
FB
748 /* see if we can patch the calling TB. When the TB
749 spans two pages, we cannot safely do a direct
750 jump. */
040f2fb2 751 if (next_tb != 0 && tb->page_addr[1] == -1) {
0980011b
PM
752 tb_add_jump((TranslationBlock *)(next_tb & ~TB_EXIT_MASK),
753 next_tb & TB_EXIT_MASK, tb);
3fb2ded1 754 }
bae2c270 755 have_tb_lock = false;
5e5f07e0 756 spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
55e8b85e 757
758 /* cpu_interrupt might be called while translating the
759 TB, but before it is linked into a potentially
760 infinite loop and becomes env->current_tb. Avoid
761 starting execution if there is a pending interrupt. */
d77953b9 762 cpu->current_tb = tb;
b0052d15 763 barrier();
fcd7d003 764 if (likely(!cpu->exit_request)) {
6db8b538 765 trace_exec_tb(tb, tb->pc);
2e70f6ef 766 tc_ptr = tb->tc_ptr;
e965fc38 767 /* execute the generated code */
77211379 768 next_tb = cpu_tb_exec(cpu, tc_ptr);
378df4b2
PM
769 switch (next_tb & TB_EXIT_MASK) {
770 case TB_EXIT_REQUESTED:
771 /* Something asked us to stop executing
772 * chained TBs; just continue round the main
773 * loop. Whatever requested the exit will also
774 * have set something else (eg exit_request or
775 * interrupt_request) which we will handle
776 * next time around the loop.
777 */
778 tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
779 next_tb = 0;
780 break;
781 case TB_EXIT_ICOUNT_EXPIRED:
782 {
bf20dc07 783 /* Instruction counter expired. */
2e70f6ef 784 int insns_left;
0980011b 785 tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
28ecfd7a 786 insns_left = cpu->icount_decr.u32;
efee7340 787 if (cpu->icount_extra && insns_left >= 0) {
2e70f6ef 788 /* Refill decrementer and continue execution. */
efee7340
AF
789 cpu->icount_extra += insns_left;
790 if (cpu->icount_extra > 0xffff) {
2e70f6ef
PB
791 insns_left = 0xffff;
792 } else {
efee7340 793 insns_left = cpu->icount_extra;
2e70f6ef 794 }
efee7340 795 cpu->icount_extra -= insns_left;
28ecfd7a 796 cpu->icount_decr.u16.low = insns_left;
2e70f6ef
PB
797 } else {
798 if (insns_left > 0) {
799 /* Execute remaining instructions. */
cea5f9a2 800 cpu_exec_nocache(env, insns_left, tb);
c2aa5f81 801 align_clocks(&sc, cpu);
2e70f6ef 802 }
27103424 803 cpu->exception_index = EXCP_INTERRUPT;
2e70f6ef 804 next_tb = 0;
5638d180 805 cpu_loop_exit(cpu);
2e70f6ef 806 }
378df4b2
PM
807 break;
808 }
809 default:
810 break;
2e70f6ef
PB
811 }
812 }
d77953b9 813 cpu->current_tb = NULL;
c2aa5f81
ST
814 /* Try to align the host and virtual clocks
815 if the guest is in advance */
816 align_clocks(&sc, cpu);
4cbf74b6
FB
817 /* reset soft MMU for next block (it can currently
818 only be set by a memory fault) */
50a518e3 819 } /* for(;;) */
0d101938
JK
820 } else {
821 /* Reload env after longjmp - the compiler may have smashed all
822 * local variables as longjmp is marked 'noreturn'. */
4917cf44
AF
823 cpu = current_cpu;
824 env = cpu->env_ptr;
6c78f29a
JL
825#if !(defined(CONFIG_USER_ONLY) && \
826 (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
827 cc = CPU_GET_CLASS(cpu);
693fa551
AF
828#endif
829#ifdef TARGET_I386
830 x86_cpu = X86_CPU(cpu);
6c78f29a 831#endif
bae2c270
PM
832 if (have_tb_lock) {
833 spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
834 have_tb_lock = false;
835 }
7d13299d 836 }
3fb2ded1
FB
837 } /* for(;;) */
838
7d13299d 839
e4533c7a 840#if defined(TARGET_I386)
9de5e440 841 /* restore flags in standard format */
e694d4e2 842 env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
80cf2c81 843 | (env->df & DF_MASK);
e4533c7a 844#elif defined(TARGET_ARM)
b7bcbe95 845 /* XXX: Save/restore host fpu exception state?. */
d2fbca94 846#elif defined(TARGET_UNICORE32)
93ac68bc 847#elif defined(TARGET_SPARC)
67867308 848#elif defined(TARGET_PPC)
81ea0e13 849#elif defined(TARGET_LM32)
e6e5906b
PB
850#elif defined(TARGET_M68K)
851 cpu_m68k_flush_flags(env, env->cc_op);
852 env->cc_op = CC_OP_FLAGS;
853 env->sr = (env->sr & 0xffe0)
854 | env->cc_dest | (env->cc_x << 4);
b779e29e 855#elif defined(TARGET_MICROBLAZE)
6af0bf9c 856#elif defined(TARGET_MIPS)
48e06fe0 857#elif defined(TARGET_TRICORE)
d15a9c23 858#elif defined(TARGET_MOXIE)
e67db06e 859#elif defined(TARGET_OPENRISC)
fdf9b3e8 860#elif defined(TARGET_SH4)
eddf68a6 861#elif defined(TARGET_ALPHA)
f1ccf904 862#elif defined(TARGET_CRIS)
10ec5117 863#elif defined(TARGET_S390X)
2328826b 864#elif defined(TARGET_XTENSA)
fdf9b3e8 865 /* XXXXX */
e4533c7a
FB
866#else
867#error unsupported target CPU
868#endif
1057eaa7 869
4917cf44
AF
870 /* fail safe : never use current_cpu outside cpu_exec() */
871 current_cpu = NULL;
7d13299d
FB
872 return ret;
873}