#include "exec/memory.h"
#include "exec/cpu_ldst.h"
#include "exec/cputlb.h"
+#include "exec/tb-flush.h"
#include "exec/memory-internal.h"
#include "exec/ram_addr.h"
#include "tcg/tcg.h"
}
}
-void tlb_flush_counts(size_t *pfull, size_t *ppart, size_t *pelide)
-{
- CPUState *cpu;
- size_t full = 0, part = 0, elide = 0;
-
- CPU_FOREACH(cpu) {
- full += qatomic_read(&cpu->neg.tlb.c.full_flush_count);
- part += qatomic_read(&cpu->neg.tlb.c.part_flush_count);
- elide += qatomic_read(&cpu->neg.tlb.c.elide_flush_count);
- }
- *pfull = full;
- *ppart = part;
- *pelide = elide;
-}
-
static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
{
uint16_t asked = data.host_int;
/* Handle clean RAM pages. */
if (unlikely(flags & TLB_NOTDIRTY)) {
- notdirty_write(env_cpu(env), addr, 1, *pfull, retaddr);
+ int dirtysize = size == 0 ? 1 : size;
+ notdirty_write(env_cpu(env), addr, dirtysize, *pfull, retaddr);
flags &= ~TLB_NOTDIRTY;
}
/* Handle clean RAM pages. */
if (unlikely(flags & TLB_NOTDIRTY)) {
- notdirty_write(env_cpu(env), addr, 1, *pfull, 0);
+ int dirtysize = size == 0 ? 1 : size;
+ notdirty_write(env_cpu(env), addr, dirtysize, *pfull, 0);
flags &= ~TLB_NOTDIRTY;
}
/* Handle clean RAM pages. */
if (unlikely(flags & TLB_NOTDIRTY)) {
- notdirty_write(env_cpu(env), addr, 1, full, retaddr);
+ int dirtysize = size == 0 ? 1 : size;
+ notdirty_write(env_cpu(env), addr, dirtysize, full, retaddr);
flags &= ~TLB_NOTDIRTY;
}
/* Handle clean RAM pages. */
if (flags & TLB_NOTDIRTY) {
- notdirty_write(env_cpu(env), addr, 1, full, retaddr);
+ notdirty_write(env_cpu(env), addr, size, full, retaddr);
}
}
* @size: number of bytes
* @mmu_idx: virtual address context
* @ra: return address into tcg generated code, or 0
- * Context: iothread lock held
+ * Context: BQL held
*
* Load @size bytes from @addr, which is memory-mapped i/o.
* The bytes are concatenated in big-endian order with @ret_be.
section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra);
mr = section->mr;
- qemu_mutex_lock_iothread();
+ bql_lock();
ret = int_ld_mmio_beN(cpu, full, ret_be, addr, size, mmu_idx,
type, ra, mr, mr_offset);
- qemu_mutex_unlock_iothread();
+ bql_unlock();
return ret;
}
section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra);
mr = section->mr;
- qemu_mutex_lock_iothread();
+ bql_lock();
a = int_ld_mmio_beN(cpu, full, ret_be, addr, size - 8, mmu_idx,
MMU_DATA_LOAD, ra, mr, mr_offset);
b = int_ld_mmio_beN(cpu, full, ret_be, addr + size - 8, 8, mmu_idx,
MMU_DATA_LOAD, ra, mr, mr_offset + size - 8);
- qemu_mutex_unlock_iothread();
+ bql_unlock();
return int128_make128(b, a);
}
* @size: number of bytes
* @mmu_idx: virtual address context
* @ra: return address into tcg generated code, or 0
- * Context: iothread lock held
+ * Context: BQL held
*
* Store @size bytes at @addr, which is memory-mapped i/o.
* The bytes to store are extracted in little-endian order from @val_le;
section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra);
mr = section->mr;
- qemu_mutex_lock_iothread();
+ bql_lock();
ret = int_st_mmio_leN(cpu, full, val_le, addr, size, mmu_idx,
ra, mr, mr_offset);
- qemu_mutex_unlock_iothread();
+ bql_unlock();
return ret;
}
section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra);
mr = section->mr;
- qemu_mutex_lock_iothread();
+ bql_lock();
int_st_mmio_leN(cpu, full, int128_getlo(val_le), addr, 8,
mmu_idx, ra, mr, mr_offset);
ret = int_st_mmio_leN(cpu, full, int128_gethi(val_le), addr + 8,
size - 8, mmu_idx, ra, mr, mr_offset + 8);
- qemu_mutex_unlock_iothread();
+ bql_unlock();
return ret;
}
case MO_ATOM_WITHIN16_PAIR:
/* Since size > 8, this is the half that must be atomic. */
- if (!HAVE_ATOMIC128_RW) {
+ if (!HAVE_CMPXCHG128) {
cpu_loop_exit_atomic(cpu, ra);
}
return store_whole_le16(p->haddr, p->size, val_le);