From: Richard Henderson Date: Wed, 13 Sep 2023 22:46:45 +0000 (-0700) Subject: accel/tcg: Remove cpu_neg() X-Git-Tag: v8.2.0~136^2~30 X-Git-Url: https://git.proxmox.com/?a=commitdiff_plain;h=a953b5fa153fc384d2631cda8213efe983501609;p=mirror_qemu.git accel/tcg: Remove cpu_neg() Now that CPUNegativeOffsetState is part of CPUState, we can reference it directly. Reviewed-by: Philippe Mathieu-Daudé Signed-off-by: Richard Henderson --- diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c index de60fdb612..3a18dd84ef 100644 --- a/accel/tcg/cpu-exec.c +++ b/accel/tcg/cpu-exec.c @@ -73,7 +73,7 @@ static void align_clocks(SyncClocks *sc, CPUState *cpu) return; } - cpu_icount = cpu->icount_extra + cpu_neg(cpu)->icount_decr.u16.low; + cpu_icount = cpu->icount_extra + cpu->neg.icount_decr.u16.low; sc->diff_clk += icount_to_ns(sc->last_cpu_icount - cpu_icount); sc->last_cpu_icount = cpu_icount; @@ -124,7 +124,7 @@ static void init_delay_params(SyncClocks *sc, CPUState *cpu) sc->realtime_clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT); sc->diff_clk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - sc->realtime_clock; sc->last_cpu_icount - = cpu->icount_extra + cpu_neg(cpu)->icount_decr.u16.low; + = cpu->icount_extra + cpu->neg.icount_decr.u16.low; if (sc->diff_clk < max_delay) { max_delay = sc->diff_clk; } @@ -717,7 +717,7 @@ static inline bool cpu_handle_exception(CPUState *cpu, int *ret) if (cpu->exception_index < 0) { #ifndef CONFIG_USER_ONLY if (replay_has_exception() - && cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra == 0) { + && cpu->neg.icount_decr.u16.low + cpu->icount_extra == 0) { /* Execute just one insn to trigger exception pending in the log */ cpu->cflags_next_tb = (curr_cflags(cpu) & ~CF_USE_ICOUNT) | CF_LAST_IO | CF_NOIRQ | 1; @@ -807,7 +807,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu, * Ensure zeroing happens before reading cpu->exit_request or * cpu->interrupt_request (see also smp_wmb in cpu_exit()) */ - qatomic_set_mb(&cpu_neg(cpu)->icount_decr.u16.high, 0); + qatomic_set_mb(&cpu->neg.icount_decr.u16.high, 0); if (unlikely(qatomic_read(&cpu->interrupt_request))) { int interrupt_request; @@ -898,7 +898,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu, if (unlikely(qatomic_read(&cpu->exit_request)) || (icount_enabled() && (cpu->cflags_next_tb == -1 || cpu->cflags_next_tb & CF_USE_ICOUNT) - && cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra == 0)) { + && cpu->neg.icount_decr.u16.low + cpu->icount_extra == 0)) { qatomic_set(&cpu->exit_request, 0); if (cpu->exception_index == -1) { cpu->exception_index = EXCP_INTERRUPT; @@ -923,7 +923,7 @@ static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb, } *last_tb = NULL; - insns_left = qatomic_read(&cpu_neg(cpu)->icount_decr.u32); + insns_left = qatomic_read(&cpu->neg.icount_decr.u32); if (insns_left < 0) { /* Something asked us to stop executing chained TBs; just * continue round the main loop. Whatever requested the exit @@ -942,7 +942,7 @@ static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb, icount_update(cpu); /* Refill decrementer and continue execution. */ insns_left = MIN(0xffff, cpu->icount_budget); - cpu_neg(cpu)->icount_decr.u16.low = insns_left; + cpu->neg.icount_decr.u16.low = insns_left; cpu->icount_extra = cpu->icount_budget - insns_left; /* diff --git a/accel/tcg/tcg-accel-ops-icount.c b/accel/tcg/tcg-accel-ops-icount.c index 0af643b217..b25685fb71 100644 --- a/accel/tcg/tcg-accel-ops-icount.c +++ b/accel/tcg/tcg-accel-ops-icount.c @@ -111,14 +111,14 @@ void icount_prepare_for_run(CPUState *cpu, int64_t cpu_budget) * each vCPU execution. However u16.high can be raised * asynchronously by cpu_exit/cpu_interrupt/tcg_handle_interrupt */ - g_assert(cpu_neg(cpu)->icount_decr.u16.low == 0); + g_assert(cpu->neg.icount_decr.u16.low == 0); g_assert(cpu->icount_extra == 0); replay_mutex_lock(); cpu->icount_budget = MIN(icount_get_limit(), cpu_budget); insns_left = MIN(0xffff, cpu->icount_budget); - cpu_neg(cpu)->icount_decr.u16.low = insns_left; + cpu->neg.icount_decr.u16.low = insns_left; cpu->icount_extra = cpu->icount_budget - insns_left; if (cpu->icount_budget == 0) { @@ -138,7 +138,7 @@ void icount_process_data(CPUState *cpu) icount_update(cpu); /* Reset the counters */ - cpu_neg(cpu)->icount_decr.u16.low = 0; + cpu->neg.icount_decr.u16.low = 0; cpu->icount_extra = 0; cpu->icount_budget = 0; diff --git a/accel/tcg/tcg-accel-ops.c b/accel/tcg/tcg-accel-ops.c index 3973591508..d885cc1d3c 100644 --- a/accel/tcg/tcg-accel-ops.c +++ b/accel/tcg/tcg-accel-ops.c @@ -91,7 +91,7 @@ void tcg_handle_interrupt(CPUState *cpu, int mask) if (!qemu_cpu_is_self(cpu)) { qemu_cpu_kick(cpu); } else { - qatomic_set(&cpu_neg(cpu)->icount_decr.u16.high, -1); + qatomic_set(&cpu->neg.icount_decr.u16.high, -1); } } diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c index 098d99b5d4..ed0c7ef7ce 100644 --- a/accel/tcg/translate-all.c +++ b/accel/tcg/translate-all.c @@ -214,7 +214,7 @@ void cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb, * Reset the cycle counter to the start of the block and * shift if to the number of actually executed instructions. */ - cpu_neg(cpu)->icount_decr.u16.low += insns_left; + cpu->neg.icount_decr.u16.low += insns_left; } cpu->cc->tcg_ops->restore_state_to_opc(cpu, tb, data); @@ -623,7 +623,7 @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr) cc = CPU_GET_CLASS(cpu); if (cc->tcg_ops->io_recompile_replay_branch && cc->tcg_ops->io_recompile_replay_branch(cpu, tb)) { - cpu_neg(cpu)->icount_decr.u16.low++; + cpu->neg.icount_decr.u16.low++; n = 2; } @@ -779,7 +779,7 @@ void cpu_interrupt(CPUState *cpu, int mask) { g_assert(qemu_mutex_iothread_locked()); cpu->interrupt_request |= mask; - qatomic_set(&cpu_neg(cpu)->icount_decr.u16.high, -1); + qatomic_set(&cpu->neg.icount_decr.u16.high, -1); } #endif /* CONFIG_USER_ONLY */ diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h index 25cd63e1b8..114ec70359 100644 --- a/include/exec/cpu-all.h +++ b/include/exec/cpu-all.h @@ -471,17 +471,6 @@ static inline CPUNegativeOffsetState *env_neg(CPUArchState *env) return &env_cpu(env)->neg; } -/** - * cpu_neg(cpu) - * @cpu: The generic CPUState - * - * Return the CPUNegativeOffsetState associated with the cpu. - */ -static inline CPUNegativeOffsetState *cpu_neg(CPUState *cpu) -{ - return &cpu->neg; -} - /** * env_tlb(env) * @env: The architecture environment diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h index b2f5cd4c2a..2e4d337805 100644 --- a/include/exec/exec-all.h +++ b/include/exec/exec-all.h @@ -71,7 +71,7 @@ G_NORETURN void cpu_loop_exit_atomic(CPUState *cpu, uintptr_t pc); */ static inline bool cpu_loop_exit_requested(CPUState *cpu) { - return (int32_t)qatomic_read(&cpu_neg(cpu)->icount_decr.u32) < 0; + return (int32_t)qatomic_read(&cpu->neg.icount_decr.u32) < 0; } #if !defined(CONFIG_USER_ONLY) && defined(CONFIG_TCG) diff --git a/softmmu/icount.c b/softmmu/icount.c index 956d15e343..4527bfbd6e 100644 --- a/softmmu/icount.c +++ b/softmmu/icount.c @@ -75,7 +75,7 @@ static void icount_enable_adaptive(void) static int64_t icount_get_executed(CPUState *cpu) { return (cpu->icount_budget - - (cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra)); + (cpu->neg.icount_decr.u16.low + cpu->icount_extra)); } /*