F: docs/sphinx/dbus*
F: docs/sphinx/fakedbusdoc.py
F: tests/qtest/dbus*
+F: scripts/xml-preprocess*
Seccomp
M: Daniel P. Berrange <berrange@redhat.com>
@if test -f meson-private/coredata.dat; then \
./config.status --skip-meson; \
else \
- ./config.status && touch build.ninja.stamp; \
+ ./config.status; \
fi
# 2. meson.stamp exists if meson has run at least once (so ninja reconfigure
# works), but otherwise never needs to be updated
+
meson-private/coredata.dat: meson.stamp
meson.stamp: config-host.mak
@touch meson.stamp
-# 3. ensure generated build files are up-to-date
+# 3. ensure meson-generated build files are up-to-date
ifneq ($(NINJA),)
Makefile.ninja: build.ninja
endif
ifneq ($(MESON),)
-# A separate rule is needed for Makefile dependencies to avoid -n
+# The path to meson always points to pyvenv/bin/meson, but the absolute
+# paths could change. In that case, force a regeneration of build.ninja.
+# Note that this invocation of $(NINJA), just like when Make rebuilds
+# Makefiles, does not include -n.
build.ninja: build.ninja.stamp
$(build-files):
build.ninja.stamp: meson.stamp $(build-files)
- $(MESON) setup --reconfigure $(SRC_PATH) && touch $@
+ @if test "$$(cat build.ninja.stamp)" = "$(MESON)" && test -n "$(NINJA)"; then \
+ $(NINJA) build.ninja; \
+ else \
+ echo "$(MESON) setup --reconfigure $(SRC_PATH)"; \
+ $(MESON) setup --reconfigure $(SRC_PATH); \
+ fi && echo "$(MESON)" > $@
Makefile.mtest: build.ninja scripts/mtest2make.py
$(MESON) introspect --targets --tests --benchmarks | $(PYTHON) scripts/mtest2make.py > $@
"kvm_init_vcpu: kvm_arch_init_vcpu failed (%lu)",
kvm_arch_vcpu_id(cpu));
}
+ cpu->kvm_vcpu_stats_fd = kvm_vcpu_ioctl(cpu, KVM_GET_STATS_FD, NULL);
+
err:
return ret;
}
/* Read stats header */
kvm_stats_header = &descriptors->kvm_stats_header;
- ret = read(stats_fd, kvm_stats_header, sizeof(*kvm_stats_header));
+ ret = pread(stats_fd, kvm_stats_header, sizeof(*kvm_stats_header), 0);
if (ret != sizeof(*kvm_stats_header)) {
error_setg(errp, "KVM stats: failed to read stats header: "
"expected %zu actual %zu",
}
static void query_stats(StatsResultList **result, StatsTarget target,
- strList *names, int stats_fd, Error **errp)
+ strList *names, int stats_fd, CPUState *cpu,
+ Error **errp)
{
struct kvm_stats_desc *kvm_stats_desc;
struct kvm_stats_header *kvm_stats_header;
break;
case STATS_TARGET_VCPU:
add_stats_entry(result, STATS_PROVIDER_KVM,
- current_cpu->parent_obj.canonical_path,
+ cpu->parent_obj.canonical_path,
stats_list);
break;
default:
add_stats_schema(result, STATS_PROVIDER_KVM, target, stats_list);
}
-static void query_stats_vcpu(CPUState *cpu, run_on_cpu_data data)
+static void query_stats_vcpu(CPUState *cpu, StatsArgs *kvm_stats_args)
{
- StatsArgs *kvm_stats_args = (StatsArgs *) data.host_ptr;
- int stats_fd = kvm_vcpu_ioctl(cpu, KVM_GET_STATS_FD, NULL);
+ int stats_fd = cpu->kvm_vcpu_stats_fd;
Error *local_err = NULL;
if (stats_fd == -1) {
return;
}
query_stats(kvm_stats_args->result.stats, STATS_TARGET_VCPU,
- kvm_stats_args->names, stats_fd, kvm_stats_args->errp);
- close(stats_fd);
+ kvm_stats_args->names, stats_fd, cpu,
+ kvm_stats_args->errp);
}
-static void query_stats_schema_vcpu(CPUState *cpu, run_on_cpu_data data)
+static void query_stats_schema_vcpu(CPUState *cpu, StatsArgs *kvm_stats_args)
{
- StatsArgs *kvm_stats_args = (StatsArgs *) data.host_ptr;
- int stats_fd = kvm_vcpu_ioctl(cpu, KVM_GET_STATS_FD, NULL);
+ int stats_fd = cpu->kvm_vcpu_stats_fd;
Error *local_err = NULL;
if (stats_fd == -1) {
}
query_stats_schema(kvm_stats_args->result.schema, STATS_TARGET_VCPU, stats_fd,
kvm_stats_args->errp);
- close(stats_fd);
}
static void query_stats_cb(StatsResultList **result, StatsTarget target,
error_setg_errno(errp, errno, "KVM stats: ioctl failed");
return;
}
- query_stats(result, target, names, stats_fd, errp);
+ query_stats(result, target, names, stats_fd, NULL, errp);
close(stats_fd);
break;
}
if (!apply_str_list_filter(cpu->parent_obj.canonical_path, targets)) {
continue;
}
- run_on_cpu(cpu, query_stats_vcpu, RUN_ON_CPU_HOST_PTR(&stats_args));
+ query_stats_vcpu(cpu, &stats_args);
}
break;
}
if (first_cpu) {
stats_args.result.schema = result;
stats_args.errp = errp;
- run_on_cpu(first_cpu, query_stats_schema_vcpu, RUN_ON_CPU_HOST_PTR(&stats_args));
+ query_stats_schema_vcpu(first_cpu, &stats_args);
}
}
{
}
-void tlb_set_dirty(CPUState *cpu, target_ulong vaddr)
+void tlb_set_dirty(CPUState *cpu, vaddr vaddr)
{
}
{
}
-int probe_access_flags(CPUArchState *env, target_ulong addr, int size,
+int probe_access_flags(CPUArchState *env, vaddr addr, int size,
MMUAccessType access_type, int mmu_idx,
bool nonfault, void **phost, uintptr_t retaddr)
{
g_assert_not_reached();
}
-void *probe_access(CPUArchState *env, target_ulong addr, int size,
+void *probe_access(CPUArchState *env, vaddr addr, int size,
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
{
/* Handled by hardware accelerator. */
}
struct tb_desc {
- target_ulong pc;
- target_ulong cs_base;
+ vaddr pc;
+ uint64_t cs_base;
CPUArchState *env;
tb_page_addr_t page_addr0;
uint32_t flags;
return true;
} else {
tb_page_addr_t phys_page1;
- target_ulong virt_page1;
+ vaddr virt_page1;
/*
* We know that the first page matched, and an otherwise valid TB
return false;
}
-static TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
- target_ulong cs_base, uint32_t flags,
+static TranslationBlock *tb_htable_lookup(CPUState *cpu, vaddr pc,
+ uint64_t cs_base, uint32_t flags,
uint32_t cflags)
{
tb_page_addr_t phys_pc;
}
/* Might cause an exception, so have a longjmp destination ready */
-static inline TranslationBlock *tb_lookup(CPUState *cpu, target_ulong pc,
- target_ulong cs_base,
- uint32_t flags, uint32_t cflags)
+static inline TranslationBlock *tb_lookup(CPUState *cpu, vaddr pc,
+ uint64_t cs_base, uint32_t flags,
+ uint32_t cflags)
{
TranslationBlock *tb;
CPUJumpCache *jc;
return tb;
}
-static void log_cpu_exec(target_ulong pc, CPUState *cpu,
+static void log_cpu_exec(vaddr pc, CPUState *cpu,
const TranslationBlock *tb)
{
if (qemu_log_in_addr_range(pc)) {
qemu_log_mask(CPU_LOG_EXEC,
"Trace %d: %p [%08" PRIx64
- "/" TARGET_FMT_lx "/%08x/%08x] %s\n",
+ "/%" VADDR_PRIx "/%08x/%08x] %s\n",
cpu->cpu_index, tb->tc.ptr, tb->cs_base, pc,
tb->flags, tb->cflags, lookup_symbol(pc));
}
}
-static bool check_for_breakpoints_slow(CPUState *cpu, target_ulong pc,
+static bool check_for_breakpoints_slow(CPUState *cpu, vaddr pc,
uint32_t *cflags)
{
CPUBreakpoint *bp;
return false;
}
-static inline bool check_for_breakpoints(CPUState *cpu, target_ulong pc,
+static inline bool check_for_breakpoints(CPUState *cpu, vaddr pc,
uint32_t *cflags)
{
return unlikely(!QTAILQ_EMPTY(&cpu->breakpoints)) &&
{
CPUState *cpu = env_cpu(env);
TranslationBlock *tb;
- target_ulong cs_base, pc;
+ vaddr pc;
+ uint64_t cs_base;
uint32_t flags, cflags;
cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
cc->set_pc(cpu, last_tb->pc);
}
if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
- target_ulong pc = log_pc(cpu, last_tb);
+ vaddr pc = log_pc(cpu, last_tb);
if (qemu_log_in_addr_range(pc)) {
- qemu_log("Stopped execution of TB chain before %p ["
- TARGET_FMT_lx "] %s\n",
+ qemu_log("Stopped execution of TB chain before %p [%"
+ VADDR_PRIx "] %s\n",
last_tb->tc.ptr, pc, lookup_symbol(pc));
}
}
{
CPUArchState *env = cpu->env_ptr;
TranslationBlock *tb;
- target_ulong cs_base, pc;
+ vaddr pc;
+ uint64_t cs_base;
uint32_t flags, cflags;
int tb_exit;
}
static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb,
- target_ulong pc,
- TranslationBlock **last_tb, int *tb_exit)
+ vaddr pc, TranslationBlock **last_tb,
+ int *tb_exit)
{
int32_t insns_left;
while (!cpu_handle_interrupt(cpu, &last_tb)) {
TranslationBlock *tb;
- target_ulong cs_base, pc;
+ vaddr pc;
+ uint64_t cs_base;
uint32_t flags, cflags;
cpu_get_tb_cpu_state(cpu->env_ptr, &pc, &cs_base, &flags);
desc->window_max_entries = max_entries;
}
-static void tb_jmp_cache_clear_page(CPUState *cpu, target_ulong page_addr)
+static void tb_jmp_cache_clear_page(CPUState *cpu, vaddr page_addr)
{
CPUJumpCache *jc = cpu->tb_jmp_cache;
int i, i0;
}
static bool tlb_hit_page_mask_anyprot(CPUTLBEntry *tlb_entry,
- target_ulong page, target_ulong mask)
+ vaddr page, vaddr mask)
{
page &= mask;
mask &= TARGET_PAGE_MASK | TLB_INVALID_MASK;
page == (tlb_entry->addr_code & mask));
}
-static inline bool tlb_hit_page_anyprot(CPUTLBEntry *tlb_entry,
- target_ulong page)
+static inline bool tlb_hit_page_anyprot(CPUTLBEntry *tlb_entry, vaddr page)
{
return tlb_hit_page_mask_anyprot(tlb_entry, page, -1);
}
/* Called with tlb_c.lock held */
static bool tlb_flush_entry_mask_locked(CPUTLBEntry *tlb_entry,
- target_ulong page,
- target_ulong mask)
+ vaddr page,
+ vaddr mask)
{
if (tlb_hit_page_mask_anyprot(tlb_entry, page, mask)) {
memset(tlb_entry, -1, sizeof(*tlb_entry));
return false;
}
-static inline bool tlb_flush_entry_locked(CPUTLBEntry *tlb_entry,
- target_ulong page)
+static inline bool tlb_flush_entry_locked(CPUTLBEntry *tlb_entry, vaddr page)
{
return tlb_flush_entry_mask_locked(tlb_entry, page, -1);
}
/* Called with tlb_c.lock held */
static void tlb_flush_vtlb_page_mask_locked(CPUArchState *env, int mmu_idx,
- target_ulong page,
- target_ulong mask)
+ vaddr page,
+ vaddr mask)
{
CPUTLBDesc *d = &env_tlb(env)->d[mmu_idx];
int k;
}
static inline void tlb_flush_vtlb_page_locked(CPUArchState *env, int mmu_idx,
- target_ulong page)
+ vaddr page)
{
tlb_flush_vtlb_page_mask_locked(env, mmu_idx, page, -1);
}
-static void tlb_flush_page_locked(CPUArchState *env, int midx,
- target_ulong page)
+static void tlb_flush_page_locked(CPUArchState *env, int midx, vaddr page)
{
- target_ulong lp_addr = env_tlb(env)->d[midx].large_page_addr;
- target_ulong lp_mask = env_tlb(env)->d[midx].large_page_mask;
+ vaddr lp_addr = env_tlb(env)->d[midx].large_page_addr;
+ vaddr lp_mask = env_tlb(env)->d[midx].large_page_mask;
/* Check if we need to flush due to large pages. */
if ((page & lp_mask) == lp_addr) {
- tlb_debug("forcing full flush midx %d ("
- TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
+ tlb_debug("forcing full flush midx %d (%"
+ VADDR_PRIx "/%" VADDR_PRIx ")\n",
midx, lp_addr, lp_mask);
tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime());
} else {
* at @addr from the tlbs indicated by @idxmap from @cpu.
*/
static void tlb_flush_page_by_mmuidx_async_0(CPUState *cpu,
- target_ulong addr,
+ vaddr addr,
uint16_t idxmap)
{
CPUArchState *env = cpu->env_ptr;
assert_cpu_is_self(cpu);
- tlb_debug("page addr:" TARGET_FMT_lx " mmu_map:0x%x\n", addr, idxmap);
+ tlb_debug("page addr: %" VADDR_PRIx " mmu_map:0x%x\n", addr, idxmap);
qemu_spin_lock(&env_tlb(env)->c.lock);
for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
static void tlb_flush_page_by_mmuidx_async_1(CPUState *cpu,
run_on_cpu_data data)
{
- target_ulong addr_and_idxmap = (target_ulong) data.target_ptr;
- target_ulong addr = addr_and_idxmap & TARGET_PAGE_MASK;
+ vaddr addr_and_idxmap = data.target_ptr;
+ vaddr addr = addr_and_idxmap & TARGET_PAGE_MASK;
uint16_t idxmap = addr_and_idxmap & ~TARGET_PAGE_MASK;
tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap);
}
typedef struct {
- target_ulong addr;
+ vaddr addr;
uint16_t idxmap;
} TLBFlushPageByMMUIdxData;
g_free(d);
}
-void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, uint16_t idxmap)
+void tlb_flush_page_by_mmuidx(CPUState *cpu, vaddr addr, uint16_t idxmap)
{
- tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%" PRIx16 "\n", addr, idxmap);
+ tlb_debug("addr: %" VADDR_PRIx " mmu_idx:%" PRIx16 "\n", addr, idxmap);
/* This should already be page aligned */
addr &= TARGET_PAGE_MASK;
}
}
-void tlb_flush_page(CPUState *cpu, target_ulong addr)
+void tlb_flush_page(CPUState *cpu, vaddr addr)
{
tlb_flush_page_by_mmuidx(cpu, addr, ALL_MMUIDX_BITS);
}
-void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, target_ulong addr,
+void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, vaddr addr,
uint16_t idxmap)
{
- tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
+ tlb_debug("addr: %" VADDR_PRIx " mmu_idx:%"PRIx16"\n", addr, idxmap);
/* This should already be page aligned */
addr &= TARGET_PAGE_MASK;
tlb_flush_page_by_mmuidx_async_0(src_cpu, addr, idxmap);
}
-void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr)
+void tlb_flush_page_all_cpus(CPUState *src, vaddr addr)
{
tlb_flush_page_by_mmuidx_all_cpus(src, addr, ALL_MMUIDX_BITS);
}
void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
- target_ulong addr,
+ vaddr addr,
uint16_t idxmap)
{
- tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
+ tlb_debug("addr: %" VADDR_PRIx " mmu_idx:%"PRIx16"\n", addr, idxmap);
/* This should already be page aligned */
addr &= TARGET_PAGE_MASK;
}
}
-void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr)
+void tlb_flush_page_all_cpus_synced(CPUState *src, vaddr addr)
{
tlb_flush_page_by_mmuidx_all_cpus_synced(src, addr, ALL_MMUIDX_BITS);
}
static void tlb_flush_range_locked(CPUArchState *env, int midx,
- target_ulong addr, target_ulong len,
+ vaddr addr, vaddr len,
unsigned bits)
{
CPUTLBDesc *d = &env_tlb(env)->d[midx];
CPUTLBDescFast *f = &env_tlb(env)->f[midx];
- target_ulong mask = MAKE_64BIT_MASK(0, bits);
+ vaddr mask = MAKE_64BIT_MASK(0, bits);
/*
* If @bits is smaller than the tlb size, there may be multiple entries
*/
if (mask < f->mask || len > f->mask) {
tlb_debug("forcing full flush midx %d ("
- TARGET_FMT_lx "/" TARGET_FMT_lx "+" TARGET_FMT_lx ")\n",
+ "%" VADDR_PRIx "/%" VADDR_PRIx "+%" VADDR_PRIx ")\n",
midx, addr, mask, len);
tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime());
return;
*/
if (((addr + len - 1) & d->large_page_mask) == d->large_page_addr) {
tlb_debug("forcing full flush midx %d ("
- TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
+ "%" VADDR_PRIx "/%" VADDR_PRIx ")\n",
midx, d->large_page_addr, d->large_page_mask);
tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime());
return;
}
- for (target_ulong i = 0; i < len; i += TARGET_PAGE_SIZE) {
- target_ulong page = addr + i;
+ for (vaddr i = 0; i < len; i += TARGET_PAGE_SIZE) {
+ vaddr page = addr + i;
CPUTLBEntry *entry = tlb_entry(env, midx, page);
if (tlb_flush_entry_mask_locked(entry, page, mask)) {
}
typedef struct {
- target_ulong addr;
- target_ulong len;
+ vaddr addr;
+ vaddr len;
uint16_t idxmap;
uint16_t bits;
} TLBFlushRangeData;
assert_cpu_is_self(cpu);
- tlb_debug("range:" TARGET_FMT_lx "/%u+" TARGET_FMT_lx " mmu_map:0x%x\n",
+ tlb_debug("range: %" VADDR_PRIx "/%u+%" VADDR_PRIx " mmu_map:0x%x\n",
d.addr, d.bits, d.len, d.idxmap);
qemu_spin_lock(&env_tlb(env)->c.lock);
* overlap the flushed pages, which includes the previous.
*/
d.addr -= TARGET_PAGE_SIZE;
- for (target_ulong i = 0, n = d.len / TARGET_PAGE_SIZE + 1; i < n; i++) {
+ for (vaddr i = 0, n = d.len / TARGET_PAGE_SIZE + 1; i < n; i++) {
tb_jmp_cache_clear_page(cpu, d.addr);
d.addr += TARGET_PAGE_SIZE;
}
g_free(d);
}
-void tlb_flush_range_by_mmuidx(CPUState *cpu, target_ulong addr,
- target_ulong len, uint16_t idxmap,
+void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr,
+ vaddr len, uint16_t idxmap,
unsigned bits)
{
TLBFlushRangeData d;
}
}
-void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, target_ulong addr,
+void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, vaddr addr,
uint16_t idxmap, unsigned bits)
{
tlb_flush_range_by_mmuidx(cpu, addr, TARGET_PAGE_SIZE, idxmap, bits);
}
void tlb_flush_range_by_mmuidx_all_cpus(CPUState *src_cpu,
- target_ulong addr, target_ulong len,
+ vaddr addr, vaddr len,
uint16_t idxmap, unsigned bits)
{
TLBFlushRangeData d;
}
void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *src_cpu,
- target_ulong addr,
- uint16_t idxmap, unsigned bits)
+ vaddr addr, uint16_t idxmap,
+ unsigned bits)
{
tlb_flush_range_by_mmuidx_all_cpus(src_cpu, addr, TARGET_PAGE_SIZE,
idxmap, bits);
}
void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
- target_ulong addr,
- target_ulong len,
+ vaddr addr,
+ vaddr len,
uint16_t idxmap,
unsigned bits)
{
}
void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
- target_ulong addr,
+ vaddr addr,
uint16_t idxmap,
unsigned bits)
{
/* Called with tlb_c.lock held */
static inline void tlb_set_dirty1_locked(CPUTLBEntry *tlb_entry,
- target_ulong vaddr)
+ vaddr addr)
{
- if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) {
- tlb_entry->addr_write = vaddr;
+ if (tlb_entry->addr_write == (addr | TLB_NOTDIRTY)) {
+ tlb_entry->addr_write = addr;
}
}
/* update the TLB corresponding to virtual page vaddr
so that it is no longer dirty */
-void tlb_set_dirty(CPUState *cpu, target_ulong vaddr)
+void tlb_set_dirty(CPUState *cpu, vaddr addr)
{
CPUArchState *env = cpu->env_ptr;
int mmu_idx;
assert_cpu_is_self(cpu);
- vaddr &= TARGET_PAGE_MASK;
+ addr &= TARGET_PAGE_MASK;
qemu_spin_lock(&env_tlb(env)->c.lock);
for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
- tlb_set_dirty1_locked(tlb_entry(env, mmu_idx, vaddr), vaddr);
+ tlb_set_dirty1_locked(tlb_entry(env, mmu_idx, addr), addr);
}
for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
int k;
for (k = 0; k < CPU_VTLB_SIZE; k++) {
- tlb_set_dirty1_locked(&env_tlb(env)->d[mmu_idx].vtable[k], vaddr);
+ tlb_set_dirty1_locked(&env_tlb(env)->d[mmu_idx].vtable[k], addr);
}
}
qemu_spin_unlock(&env_tlb(env)->c.lock);
/* Our TLB does not support large pages, so remember the area covered by
large pages and trigger a full TLB flush if these are invalidated. */
static void tlb_add_large_page(CPUArchState *env, int mmu_idx,
- target_ulong vaddr, target_ulong size)
+ vaddr addr, uint64_t size)
{
- target_ulong lp_addr = env_tlb(env)->d[mmu_idx].large_page_addr;
- target_ulong lp_mask = ~(size - 1);
+ vaddr lp_addr = env_tlb(env)->d[mmu_idx].large_page_addr;
+ vaddr lp_mask = ~(size - 1);
- if (lp_addr == (target_ulong)-1) {
+ if (lp_addr == (vaddr)-1) {
/* No previous large page. */
- lp_addr = vaddr;
+ lp_addr = addr;
} else {
/* Extend the existing region to include the new page.
This is a compromise between unnecessary flushes and
the cost of maintaining a full variable size TLB. */
lp_mask &= env_tlb(env)->d[mmu_idx].large_page_mask;
- while (((lp_addr ^ vaddr) & lp_mask) != 0) {
+ while (((lp_addr ^ addr) & lp_mask) != 0) {
lp_mask <<= 1;
}
}
env_tlb(env)->d[mmu_idx].large_page_mask = lp_mask;
}
+static inline void tlb_set_compare(CPUTLBEntryFull *full, CPUTLBEntry *ent,
+ target_ulong address, int flags,
+ MMUAccessType access_type, bool enable)
+{
+ if (enable) {
+ address |= flags & TLB_FLAGS_MASK;
+ flags &= TLB_SLOW_FLAGS_MASK;
+ if (flags) {
+ address |= TLB_FORCE_SLOW;
+ }
+ } else {
+ address = -1;
+ flags = 0;
+ }
+ ent->addr_idx[access_type] = address;
+ full->slow_flags[access_type] = flags;
+}
+
/*
* Add a new TLB entry. At most one entry for a given virtual address
* is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
* critical section.
*/
void tlb_set_page_full(CPUState *cpu, int mmu_idx,
- target_ulong vaddr, CPUTLBEntryFull *full)
+ vaddr addr, CPUTLBEntryFull *full)
{
CPUArchState *env = cpu->env_ptr;
CPUTLB *tlb = env_tlb(env);
CPUTLBDesc *desc = &tlb->d[mmu_idx];
MemoryRegionSection *section;
- unsigned int index;
- target_ulong address;
- target_ulong write_address;
+ unsigned int index, read_flags, write_flags;
uintptr_t addend;
CPUTLBEntry *te, tn;
hwaddr iotlb, xlat, sz, paddr_page;
- target_ulong vaddr_page;
+ vaddr addr_page;
int asidx, wp_flags, prot;
bool is_ram, is_romd;
sz = TARGET_PAGE_SIZE;
} else {
sz = (hwaddr)1 << full->lg_page_size;
- tlb_add_large_page(env, mmu_idx, vaddr, sz);
+ tlb_add_large_page(env, mmu_idx, addr, sz);
}
- vaddr_page = vaddr & TARGET_PAGE_MASK;
+ addr_page = addr & TARGET_PAGE_MASK;
paddr_page = full->phys_addr & TARGET_PAGE_MASK;
prot = full->prot;
&xlat, &sz, full->attrs, &prot);
assert(sz >= TARGET_PAGE_SIZE);
- tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" HWADDR_FMT_plx
+ tlb_debug("vaddr=%" VADDR_PRIx " paddr=0x" HWADDR_FMT_plx
" prot=%x idx=%d\n",
- vaddr, full->phys_addr, prot, mmu_idx);
+ addr, full->phys_addr, prot, mmu_idx);
- address = vaddr_page;
+ read_flags = 0;
if (full->lg_page_size < TARGET_PAGE_BITS) {
/* Repeat the MMU check and TLB fill on every access. */
- address |= TLB_INVALID_MASK;
+ read_flags |= TLB_INVALID_MASK;
}
if (full->attrs.byte_swap) {
- address |= TLB_BSWAP;
+ read_flags |= TLB_BSWAP;
}
is_ram = memory_region_is_ram(section->mr);
addend = 0;
}
- write_address = address;
+ write_flags = read_flags;
if (is_ram) {
iotlb = memory_region_get_ram_addr(section->mr) + xlat;
/*
*/
if (prot & PAGE_WRITE) {
if (section->readonly) {
- write_address |= TLB_DISCARD_WRITE;
+ write_flags |= TLB_DISCARD_WRITE;
} else if (cpu_physical_memory_is_clean(iotlb)) {
- write_address |= TLB_NOTDIRTY;
+ write_flags |= TLB_NOTDIRTY;
}
}
} else {
* Reads to romd devices go through the ram_ptr found above,
* but of course reads to I/O must go through MMIO.
*/
- write_address |= TLB_MMIO;
+ write_flags |= TLB_MMIO;
if (!is_romd) {
- address = write_address;
+ read_flags = write_flags;
}
}
- wp_flags = cpu_watchpoint_address_matches(cpu, vaddr_page,
+ wp_flags = cpu_watchpoint_address_matches(cpu, addr_page,
TARGET_PAGE_SIZE);
- index = tlb_index(env, mmu_idx, vaddr_page);
- te = tlb_entry(env, mmu_idx, vaddr_page);
+ index = tlb_index(env, mmu_idx, addr_page);
+ te = tlb_entry(env, mmu_idx, addr_page);
/*
* Hold the TLB lock for the rest of the function. We could acquire/release
tlb->c.dirty |= 1 << mmu_idx;
/* Make sure there's no cached translation for the new page. */
- tlb_flush_vtlb_page_locked(env, mmu_idx, vaddr_page);
+ tlb_flush_vtlb_page_locked(env, mmu_idx, addr_page);
/*
* Only evict the old entry to the victim tlb if it's for a
* different page; otherwise just overwrite the stale data.
*/
- if (!tlb_hit_page_anyprot(te, vaddr_page) && !tlb_entry_is_empty(te)) {
+ if (!tlb_hit_page_anyprot(te, addr_page) && !tlb_entry_is_empty(te)) {
unsigned vidx = desc->vindex++ % CPU_VTLB_SIZE;
CPUTLBEntry *tv = &desc->vtable[vidx];
* TARGET_PAGE_BITS, and either
* + the ram_addr_t of the page base of the target RAM (RAM)
* + the offset within section->mr of the page base (I/O, ROMD)
- * We subtract the vaddr_page (which is page aligned and thus won't
+ * We subtract addr_page (which is page aligned and thus won't
* disturb the low bits) to give an offset which can be added to the
* (non-page-aligned) vaddr of the eventual memory access to get
* the MemoryRegion offset for the access. Note that the vaddr we
* vaddr we add back in io_readx()/io_writex()/get_page_addr_code().
*/
desc->fulltlb[index] = *full;
- desc->fulltlb[index].xlat_section = iotlb - vaddr_page;
- desc->fulltlb[index].phys_addr = paddr_page;
+ full = &desc->fulltlb[index];
+ full->xlat_section = iotlb - addr_page;
+ full->phys_addr = paddr_page;
/* Now calculate the new entry */
- tn.addend = addend - vaddr_page;
- if (prot & PAGE_READ) {
- tn.addr_read = address;
- if (wp_flags & BP_MEM_READ) {
- tn.addr_read |= TLB_WATCHPOINT;
- }
- } else {
- tn.addr_read = -1;
- }
+ tn.addend = addend - addr_page;
- if (prot & PAGE_EXEC) {
- tn.addr_code = address;
- } else {
- tn.addr_code = -1;
+ tlb_set_compare(full, &tn, addr_page, read_flags,
+ MMU_INST_FETCH, prot & PAGE_EXEC);
+
+ if (wp_flags & BP_MEM_READ) {
+ read_flags |= TLB_WATCHPOINT;
}
+ tlb_set_compare(full, &tn, addr_page, read_flags,
+ MMU_DATA_LOAD, prot & PAGE_READ);
- tn.addr_write = -1;
- if (prot & PAGE_WRITE) {
- tn.addr_write = write_address;
- if (prot & PAGE_WRITE_INV) {
- tn.addr_write |= TLB_INVALID_MASK;
- }
- if (wp_flags & BP_MEM_WRITE) {
- tn.addr_write |= TLB_WATCHPOINT;
- }
+ if (prot & PAGE_WRITE_INV) {
+ write_flags |= TLB_INVALID_MASK;
+ }
+ if (wp_flags & BP_MEM_WRITE) {
+ write_flags |= TLB_WATCHPOINT;
}
+ tlb_set_compare(full, &tn, addr_page, write_flags,
+ MMU_DATA_STORE, prot & PAGE_WRITE);
copy_tlb_helper_locked(te, &tn);
tlb_n_used_entries_inc(env, mmu_idx);
qemu_spin_unlock(&tlb->c.lock);
}
-void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
+void tlb_set_page_with_attrs(CPUState *cpu, vaddr addr,
hwaddr paddr, MemTxAttrs attrs, int prot,
- int mmu_idx, target_ulong size)
+ int mmu_idx, uint64_t size)
{
CPUTLBEntryFull full = {
.phys_addr = paddr,
};
assert(is_power_of_2(size));
- tlb_set_page_full(cpu, mmu_idx, vaddr, &full);
+ tlb_set_page_full(cpu, mmu_idx, addr, &full);
}
-void tlb_set_page(CPUState *cpu, target_ulong vaddr,
+void tlb_set_page(CPUState *cpu, vaddr addr,
hwaddr paddr, int prot,
- int mmu_idx, target_ulong size)
+ int mmu_idx, uint64_t size)
{
- tlb_set_page_with_attrs(cpu, vaddr, paddr, MEMTXATTRS_UNSPECIFIED,
+ tlb_set_page_with_attrs(cpu, addr, paddr, MEMTXATTRS_UNSPECIFIED,
prot, mmu_idx, size);
}
* caller's prior references to the TLB table (e.g. CPUTLBEntry pointers) must
* be discarded and looked up again (e.g. via tlb_entry()).
*/
-static void tlb_fill(CPUState *cpu, target_ulong addr, int size,
+static void tlb_fill(CPUState *cpu, vaddr addr, int size,
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
{
bool ok;
}
static uint64_t io_readx(CPUArchState *env, CPUTLBEntryFull *full,
- int mmu_idx, target_ulong addr, uintptr_t retaddr,
+ int mmu_idx, vaddr addr, uintptr_t retaddr,
MMUAccessType access_type, MemOp op)
{
CPUState *cpu = env_cpu(env);
}
static void io_writex(CPUArchState *env, CPUTLBEntryFull *full,
- int mmu_idx, uint64_t val, target_ulong addr,
+ int mmu_idx, uint64_t val, vaddr addr,
uintptr_t retaddr, MemOp op)
{
CPUState *cpu = env_cpu(env);
/* Return true if ADDR is present in the victim tlb, and has been copied
back to the main tlb. */
static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
- MMUAccessType access_type, target_ulong page)
+ MMUAccessType access_type, vaddr page)
{
size_t vidx;
assert_cpu_is_self(env_cpu(env));
for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) {
CPUTLBEntry *vtlb = &env_tlb(env)->d[mmu_idx].vtable[vidx];
- target_ulong cmp = tlb_read_idx(vtlb, access_type);
+ uint64_t cmp = tlb_read_idx(vtlb, access_type);
if (cmp == page) {
/* Found entry in victim tlb, swap tlb and iotlb. */
}
}
-static int probe_access_internal(CPUArchState *env, target_ulong addr,
+static int probe_access_internal(CPUArchState *env, vaddr addr,
int fault_size, MMUAccessType access_type,
int mmu_idx, bool nonfault,
void **phost, CPUTLBEntryFull **pfull,
{
uintptr_t index = tlb_index(env, mmu_idx, addr);
CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
- target_ulong tlb_addr = tlb_read_idx(entry, access_type);
- target_ulong page_addr = addr & TARGET_PAGE_MASK;
- int flags = TLB_FLAGS_MASK;
+ uint64_t tlb_addr = tlb_read_idx(entry, access_type);
+ vaddr page_addr = addr & TARGET_PAGE_MASK;
+ int flags = TLB_FLAGS_MASK & ~TLB_FORCE_SLOW;
+ CPUTLBEntryFull *full;
if (!tlb_hit_page(tlb_addr, page_addr)) {
if (!victim_tlb_hit(env, mmu_idx, index, access_type, page_addr)) {
}
flags &= tlb_addr;
- *pfull = &env_tlb(env)->d[mmu_idx].fulltlb[index];
+ *pfull = full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
+ flags |= full->slow_flags[access_type];
/* Fold all "mmio-like" bits into TLB_MMIO. This is not RAM. */
if (unlikely(flags & ~(TLB_WATCHPOINT | TLB_NOTDIRTY))) {
return flags;
}
-int probe_access_full(CPUArchState *env, target_ulong addr, int size,
+int probe_access_full(CPUArchState *env, vaddr addr, int size,
MMUAccessType access_type, int mmu_idx,
bool nonfault, void **phost, CPUTLBEntryFull **pfull,
uintptr_t retaddr)
return flags;
}
-int probe_access_flags(CPUArchState *env, target_ulong addr, int size,
+int probe_access_flags(CPUArchState *env, vaddr addr, int size,
MMUAccessType access_type, int mmu_idx,
bool nonfault, void **phost, uintptr_t retaddr)
{
return flags;
}
-void *probe_access(CPUArchState *env, target_ulong addr, int size,
+void *probe_access(CPUArchState *env, vaddr addr, int size,
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
{
CPUTLBEntryFull *full;
* NOTE: This function will trigger an exception if the page is
* not executable.
*/
-tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
+tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, vaddr addr,
void **hostp)
{
CPUTLBEntryFull *full;
* from the same thread (which a mem callback will be) this is safe.
*/
-bool tlb_plugin_lookup(CPUState *cpu, target_ulong addr, int mmu_idx,
+bool tlb_plugin_lookup(CPUState *cpu, vaddr addr, int mmu_idx,
bool is_store, struct qemu_plugin_hwaddr *data)
{
CPUArchState *env = cpu->env_ptr;
CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr);
uintptr_t index = tlb_index(env, mmu_idx, addr);
- target_ulong tlb_addr = is_store ? tlb_addr_write(tlbe) : tlbe->addr_read;
+ uint64_t tlb_addr = is_store ? tlb_addr_write(tlbe) : tlbe->addr_read;
if (likely(tlb_hit(tlb_addr, addr))) {
/* We must have an iotlb entry for MMIO */
typedef struct MMULookupPageData {
CPUTLBEntryFull *full;
void *haddr;
- target_ulong addr;
+ vaddr addr;
int flags;
int size;
} MMULookupPageData;
static bool mmu_lookup1(CPUArchState *env, MMULookupPageData *data,
int mmu_idx, MMUAccessType access_type, uintptr_t ra)
{
- target_ulong addr = data->addr;
+ vaddr addr = data->addr;
uintptr_t index = tlb_index(env, mmu_idx, addr);
CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
- target_ulong tlb_addr = tlb_read_idx(entry, access_type);
+ uint64_t tlb_addr = tlb_read_idx(entry, access_type);
bool maybe_resized = false;
+ CPUTLBEntryFull *full;
+ int flags;
/* If the TLB entry is for a different page, reload and try again. */
if (!tlb_hit(tlb_addr, addr)) {
tlb_addr = tlb_read_idx(entry, access_type) & ~TLB_INVALID_MASK;
}
- data->flags = tlb_addr & TLB_FLAGS_MASK;
- data->full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
+ full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
+ flags = tlb_addr & (TLB_FLAGS_MASK & ~TLB_FORCE_SLOW);
+ flags |= full->slow_flags[access_type];
+
+ data->full = full;
+ data->flags = flags;
/* Compute haddr speculatively; depending on flags it might be invalid. */
data->haddr = (void *)((uintptr_t)addr + entry->addend);
MMUAccessType access_type, uintptr_t ra)
{
CPUTLBEntryFull *full = data->full;
- target_ulong addr = data->addr;
+ vaddr addr = data->addr;
int flags = data->flags;
int size = data->size;
* Resolve the translation for the page(s) beginning at @addr, for MemOp.size
* bytes. Return true if the lookup crosses a page boundary.
*/
-static bool mmu_lookup(CPUArchState *env, target_ulong addr, MemOpIdx oi,
+static bool mmu_lookup(CPUArchState *env, vaddr addr, MemOpIdx oi,
uintptr_t ra, MMUAccessType type, MMULookupLocals *l)
{
unsigned a_bits;
* Probe for an atomic operation. Do not allow unaligned operations,
* or io operations to proceed. Return the host address.
*/
-static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
- MemOpIdx oi, int size, uintptr_t retaddr)
+static void *atomic_mmu_lookup(CPUArchState *env, vaddr addr, MemOpIdx oi,
+ int size, uintptr_t retaddr)
{
uintptr_t mmu_idx = get_mmuidx(oi);
MemOp mop = get_memop(oi);
int a_bits = get_alignment_bits(mop);
uintptr_t index;
CPUTLBEntry *tlbe;
- target_ulong tlb_addr;
+ vaddr tlb_addr;
void *hostaddr;
CPUTLBEntryFull *full;
*/
goto stop_the_world;
}
- /* Collect TLB_WATCHPOINT for read. */
+ /* Collect tlb flags for read. */
tlb_addr |= tlbe->addr_read;
/* Notice an IO access or a needs-MMU-lookup access */
notdirty_write(env_cpu(env), addr, size, full, retaddr);
}
- if (unlikely(tlb_addr & TLB_WATCHPOINT)) {
- cpu_check_watchpoint(env_cpu(env), addr, size, full->attrs,
- BP_MEM_READ | BP_MEM_WRITE, retaddr);
+ if (unlikely(tlb_addr & TLB_FORCE_SLOW)) {
+ int wp_flags = 0;
+
+ if (full->slow_flags[MMU_DATA_STORE] & TLB_WATCHPOINT) {
+ wp_flags |= BP_MEM_WRITE;
+ }
+ if (full->slow_flags[MMU_DATA_LOAD] & TLB_WATCHPOINT) {
+ wp_flags |= BP_MEM_READ;
+ }
+ if (wp_flags) {
+ cpu_check_watchpoint(env_cpu(env), addr, size,
+ full->attrs, wp_flags, retaddr);
+ }
}
return hostaddr;
MMUAccessType type, uintptr_t ra)
{
CPUTLBEntryFull *full = p->full;
- target_ulong addr = p->addr;
+ vaddr addr = p->addr;
int i, size = p->size;
QEMU_IOTHREAD_LOCK_GUARD();
return ret;
}
-static uint8_t do_ld1_mmu(CPUArchState *env, target_ulong addr, MemOpIdx oi,
+static uint8_t do_ld1_mmu(CPUArchState *env, vaddr addr, MemOpIdx oi,
uintptr_t ra, MMUAccessType access_type)
{
MMULookupLocals l;
bool crosspage;
+ cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
crosspage = mmu_lookup(env, addr, oi, ra, access_type, &l);
tcg_debug_assert(!crosspage);
return do_ld1_mmu(env, addr, oi, retaddr, MMU_DATA_LOAD);
}
-static uint16_t do_ld2_mmu(CPUArchState *env, target_ulong addr, MemOpIdx oi,
+static uint16_t do_ld2_mmu(CPUArchState *env, vaddr addr, MemOpIdx oi,
uintptr_t ra, MMUAccessType access_type)
{
MMULookupLocals l;
uint16_t ret;
uint8_t a, b;
+ cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
crosspage = mmu_lookup(env, addr, oi, ra, access_type, &l);
if (likely(!crosspage)) {
return do_ld_2(env, &l.page[0], l.mmu_idx, access_type, l.memop, ra);
return do_ld2_mmu(env, addr, oi, retaddr, MMU_DATA_LOAD);
}
-static uint32_t do_ld4_mmu(CPUArchState *env, target_ulong addr, MemOpIdx oi,
+static uint32_t do_ld4_mmu(CPUArchState *env, vaddr addr, MemOpIdx oi,
uintptr_t ra, MMUAccessType access_type)
{
MMULookupLocals l;
bool crosspage;
uint32_t ret;
+ cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
crosspage = mmu_lookup(env, addr, oi, ra, access_type, &l);
if (likely(!crosspage)) {
return do_ld_4(env, &l.page[0], l.mmu_idx, access_type, l.memop, ra);
return do_ld4_mmu(env, addr, oi, retaddr, MMU_DATA_LOAD);
}
-static uint64_t do_ld8_mmu(CPUArchState *env, target_ulong addr, MemOpIdx oi,
+static uint64_t do_ld8_mmu(CPUArchState *env, vaddr addr, MemOpIdx oi,
uintptr_t ra, MMUAccessType access_type)
{
MMULookupLocals l;
bool crosspage;
uint64_t ret;
+ cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
crosspage = mmu_lookup(env, addr, oi, ra, access_type, &l);
if (likely(!crosspage)) {
return do_ld_8(env, &l.page[0], l.mmu_idx, access_type, l.memop, ra);
return (int32_t)helper_ldul_mmu(env, addr, oi, retaddr);
}
-static Int128 do_ld16_mmu(CPUArchState *env, target_ulong addr,
+static Int128 do_ld16_mmu(CPUArchState *env, vaddr addr,
MemOpIdx oi, uintptr_t ra)
{
MMULookupLocals l;
Int128 ret;
int first;
+ cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD, &l);
if (likely(!crosspage)) {
/* Perform the load host endian. */
uint64_t val_le, int mmu_idx, uintptr_t ra)
{
CPUTLBEntryFull *full = p->full;
- target_ulong addr = p->addr;
+ vaddr addr = p->addr;
int i, size = p->size;
QEMU_IOTHREAD_LOCK_GUARD();
bool crosspage;
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_8);
+ cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE, &l);
tcg_debug_assert(!crosspage);
do_st_1(env, &l.page[0], val, l.mmu_idx, ra);
}
-static void do_st2_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
+static void do_st2_mmu(CPUArchState *env, vaddr addr, uint16_t val,
MemOpIdx oi, uintptr_t ra)
{
MMULookupLocals l;
bool crosspage;
uint8_t a, b;
+ cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE, &l);
if (likely(!crosspage)) {
do_st_2(env, &l.page[0], val, l.mmu_idx, l.memop, ra);
do_st2_mmu(env, addr, val, oi, retaddr);
}
-static void do_st4_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
+static void do_st4_mmu(CPUArchState *env, vaddr addr, uint32_t val,
MemOpIdx oi, uintptr_t ra)
{
MMULookupLocals l;
bool crosspage;
+ cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE, &l);
if (likely(!crosspage)) {
do_st_4(env, &l.page[0], val, l.mmu_idx, l.memop, ra);
do_st4_mmu(env, addr, val, oi, retaddr);
}
-static void do_st8_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
+static void do_st8_mmu(CPUArchState *env, vaddr addr, uint64_t val,
MemOpIdx oi, uintptr_t ra)
{
MMULookupLocals l;
bool crosspage;
+ cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE, &l);
if (likely(!crosspage)) {
do_st_8(env, &l.page[0], val, l.mmu_idx, l.memop, ra);
do_st8_mmu(env, addr, val, oi, retaddr);
}
-static void do_st16_mmu(CPUArchState *env, target_ulong addr, Int128 val,
+static void do_st16_mmu(CPUArchState *env, vaddr addr, Int128 val,
MemOpIdx oi, uintptr_t ra)
{
MMULookupLocals l;
uint64_t a, b;
int first;
+ cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE, &l);
if (likely(!crosspage)) {
/* Swap to host endian if necessary, then store. */
G_NORETURN void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr);
#endif /* CONFIG_SOFTMMU */
-TranslationBlock *tb_gen_code(CPUState *cpu, target_ulong pc,
- target_ulong cs_base, uint32_t flags,
+TranslationBlock *tb_gen_code(CPUState *cpu, vaddr pc,
+ uint64_t cs_base, uint32_t flags,
int cflags);
void page_init(void);
void tb_htable_init(void);
uintptr_t host_pc);
/* Return the current PC from CPU, which may be cached in TB. */
-static inline target_ulong log_pc(CPUState *cpu, const TranslationBlock *tb)
+static inline vaddr log_pc(CPUState *cpu, const TranslationBlock *tb)
{
if (tb_cflags(tb) & CF_PCREL) {
return cpu->cc->get_pc(cpu);
extern bool one_insn_per_tb;
+/**
+ * tcg_req_mo:
+ * @type: TCGBar
+ *
+ * Filter @type to the barrier that is required for the guest
+ * memory ordering vs the host memory ordering. A non-zero
+ * result indicates that some barrier is required.
+ *
+ * If TCG_GUEST_DEFAULT_MO is not defined, assume that the
+ * guest requires strict ordering.
+ *
+ * This is a macro so that it's constant even without optimization.
+ */
+#ifdef TCG_GUEST_DEFAULT_MO
+# define tcg_req_mo(type) \
+ ((type) & TCG_GUEST_DEFAULT_MO & ~TCG_TARGET_DEFAULT_MO)
+#else
+# define tcg_req_mo(type) ((type) & ~TCG_TARGET_DEFAULT_MO)
+#endif
+
+/**
+ * cpu_req_mo:
+ * @type: TCGBar
+ *
+ * If tcg_req_mo indicates a barrier for @type is required
+ * for the guest memory model, issue a host memory barrier.
+ */
+#define cpu_req_mo(type) \
+ do { \
+ if (tcg_req_mo(type)) { \
+ smp_mb(); \
+ } \
+ } while (0)
+
#endif /* ACCEL_TCG_INTERNAL_H */
return human_readable_text_from_str(buf);
}
-#ifdef CONFIG_PROFILER
-
-int64_t dev_time;
-
-HumanReadableText *qmp_x_query_profile(Error **errp)
-{
- g_autoptr(GString) buf = g_string_new("");
- static int64_t last_cpu_exec_time;
- int64_t cpu_exec_time;
- int64_t delta;
-
- cpu_exec_time = tcg_cpu_exec_time();
- delta = cpu_exec_time - last_cpu_exec_time;
-
- g_string_append_printf(buf, "async time %" PRId64 " (%0.3f)\n",
- dev_time, dev_time / (double)NANOSECONDS_PER_SECOND);
- g_string_append_printf(buf, "qemu time %" PRId64 " (%0.3f)\n",
- delta, delta / (double)NANOSECONDS_PER_SECOND);
- last_cpu_exec_time = cpu_exec_time;
- dev_time = 0;
-
- return human_readable_text_from_str(buf);
-}
-#else
-HumanReadableText *qmp_x_query_profile(Error **errp)
-{
- error_setg(errp, "Internal profiler not compiled");
- return NULL;
-}
-#endif
-
static void hmp_tcg_register(void)
{
monitor_register_hmp_info_hrt("jit", qmp_x_query_jit);
#define TB_JMP_ADDR_MASK (TB_JMP_PAGE_SIZE - 1)
#define TB_JMP_PAGE_MASK (TB_JMP_CACHE_SIZE - TB_JMP_PAGE_SIZE)
-static inline unsigned int tb_jmp_cache_hash_page(target_ulong pc)
+static inline unsigned int tb_jmp_cache_hash_page(vaddr pc)
{
- target_ulong tmp;
+ vaddr tmp;
tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS));
return (tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK;
}
-static inline unsigned int tb_jmp_cache_hash_func(target_ulong pc)
+static inline unsigned int tb_jmp_cache_hash_func(vaddr pc)
{
- target_ulong tmp;
+ vaddr tmp;
tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS));
return (((tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK)
| (tmp & TB_JMP_ADDR_MASK));
#else
/* In user-mode we can get better hashing because we do not have a TLB */
-static inline unsigned int tb_jmp_cache_hash_func(target_ulong pc)
+static inline unsigned int tb_jmp_cache_hash_func(vaddr pc)
{
return (pc ^ (pc >> TB_JMP_CACHE_BITS)) & (TB_JMP_CACHE_SIZE - 1);
}
#endif /* CONFIG_SOFTMMU */
static inline
-uint32_t tb_hash_func(tb_page_addr_t phys_pc, target_ulong pc,
+uint32_t tb_hash_func(tb_page_addr_t phys_pc, vaddr pc,
uint32_t flags, uint64_t flags2, uint32_t cf_mask)
{
return qemu_xxhash8(phys_pc, pc, flags2, flags, cf_mask);
struct rcu_head rcu;
struct {
TranslationBlock *tb;
- target_ulong pc;
+ vaddr pc;
} array[TB_JMP_CACHE_SIZE];
};
/* Call with mmap_lock held. */
static void tb_record(TranslationBlock *tb, PageDesc *p1, PageDesc *p2)
{
- target_ulong addr;
+ vaddr addr;
int flags;
assert_memory_lock();
int tcg_cpus_exec(CPUState *cpu)
{
int ret;
-#ifdef CONFIG_PROFILER
- int64_t ti;
-#endif
assert(tcg_enabled());
-#ifdef CONFIG_PROFILER
- ti = profile_getclock();
-#endif
cpu_exec_start(cpu);
ret = cpu_exec(cpu);
cpu_exec_end(cpu);
-#ifdef CONFIG_PROFILER
- qatomic_set(&tcg_ctx->prof.cpu_exec_time,
- tcg_ctx->prof.cpu_exec_time + profile_getclock() - ti);
-#endif
return ret;
}
* they can set the appropriate CONFIG flags in ${target}-softmmu.mak
*
* Once a guest architecture has been converted to the new primitives
- * there are two remaining limitations to check.
- *
- * - The guest can't be oversized (e.g. 64 bit guest on 32 bit host)
- * - The host must have a stronger memory order than the guest
- *
- * It may be possible in future to support strong guests on weak hosts
- * but that will require tagging all load/stores in a guest with their
- * implicit memory order requirements which would likely slow things
- * down a lot.
+ * there is one remaining limitation to check:
+ * - The guest can't be oversized (e.g. 64 bit guest on 32 bit host)
*/
-static bool check_tcg_memory_orders_compatible(void)
-{
-#if defined(TCG_GUEST_DEFAULT_MO) && defined(TCG_TARGET_DEFAULT_MO)
- return (TCG_GUEST_DEFAULT_MO & ~TCG_TARGET_DEFAULT_MO) == 0;
-#else
- return false;
-#endif
-}
-
static bool default_mttcg_enabled(void)
{
if (icount_enabled() || TCG_OVERSIZED_GUEST) {
return false;
- } else {
+ }
#ifdef TARGET_SUPPORTS_MTTCG
- return check_tcg_memory_orders_compatible();
+# ifndef TCG_GUEST_DEFAULT_MO
+# error "TARGET_SUPPORTS_MTTCG without TCG_GUEST_DEFAULT_MO"
+# endif
+ return true;
#else
- return false;
+ return false;
#endif
- }
}
static void tcg_accel_instance_init(Object *obj)
warn_report("Guest not yet converted to MTTCG - "
"you may get unexpected results");
#endif
- if (!check_tcg_memory_orders_compatible()) {
- warn_report("Guest expects a stronger memory ordering "
- "than the host provides");
- error_printf("This may cause strange/hard to debug errors\n");
- }
s->mttcg_enabled = true;
}
} else if (strcmp(value, "single") == 0) {
uintptr_t host_pc)
{
uint64_t data[TARGET_INSN_START_WORDS];
-#ifdef CONFIG_PROFILER
- TCGProfile *prof = &tcg_ctx->prof;
- int64_t ti = profile_getclock();
-#endif
int insns_left = cpu_unwind_data_from_tb(tb, host_pc, data);
if (insns_left < 0) {
}
cpu->cc->tcg_ops->restore_state_to_opc(cpu, tb, data);
-
-#ifdef CONFIG_PROFILER
- qatomic_set(&prof->restore_time,
- prof->restore_time + profile_getclock() - ti);
- qatomic_set(&prof->restore_count, prof->restore_count + 1);
-#endif
}
bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc)
* Return the size of the generated code, or negative on error.
*/
static int setjmp_gen_code(CPUArchState *env, TranslationBlock *tb,
- target_ulong pc, void *host_pc,
+ vaddr pc, void *host_pc,
int *max_insns, int64_t *ti)
{
int ret = sigsetjmp(tcg_ctx->jmp_trans, 0);
tcg_ctx->cpu = NULL;
*max_insns = tb->icount;
-#ifdef CONFIG_PROFILER
- qatomic_set(&tcg_ctx->prof.tb_count, tcg_ctx->prof.tb_count + 1);
- qatomic_set(&tcg_ctx->prof.interm_time,
- tcg_ctx->prof.interm_time + profile_getclock() - *ti);
- *ti = profile_getclock();
-#endif
-
return tcg_gen_code(tcg_ctx, tb, pc);
}
/* Called with mmap_lock held for user mode emulation. */
TranslationBlock *tb_gen_code(CPUState *cpu,
- target_ulong pc, target_ulong cs_base,
+ vaddr pc, uint64_t cs_base,
uint32_t flags, int cflags)
{
CPUArchState *env = cpu->env_ptr;
tb_page_addr_t phys_pc;
tcg_insn_unit *gen_code_buf;
int gen_code_size, search_size, max_insns;
-#ifdef CONFIG_PROFILER
- TCGProfile *prof = &tcg_ctx->prof;
-#endif
int64_t ti;
void *host_pc;
tb_overflow:
-#ifdef CONFIG_PROFILER
- /* includes aborted translations because of exceptions */
- qatomic_set(&prof->tb_count1, prof->tb_count1 + 1);
- ti = profile_getclock();
-#endif
-
trace_translate_block(tb, pc, tb->tc.ptr);
gen_code_size = setjmp_gen_code(env, tb, pc, host_pc, &max_insns, &ti);
*/
perf_report_code(pc, tb, tcg_splitwx_to_rx(gen_code_buf));
-#ifdef CONFIG_PROFILER
- qatomic_set(&prof->code_time, prof->code_time + profile_getclock() - ti);
- qatomic_set(&prof->code_in_len, prof->code_in_len + tb->size);
- qatomic_set(&prof->code_out_len, prof->code_out_len + gen_code_size);
- qatomic_set(&prof->search_out_len, prof->search_out_len + search_size);
-#endif
-
if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) &&
qemu_log_in_addr_range(pc)) {
FILE *logfile = qemu_log_trylock();
/* The exception probably happened in a helper. The CPU state should
have been saved before calling it. Fetch the PC from there. */
CPUArchState *env = cpu->env_ptr;
- target_ulong pc, cs_base;
+ vaddr pc;
+ uint64_t cs_base;
tb_page_addr_t addr;
uint32_t flags;
cpu->cflags_next_tb = curr_cflags(cpu) | CF_MEMI_ONLY | CF_LAST_IO | n;
if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
- target_ulong pc = log_pc(cpu, tb);
+ vaddr pc = log_pc(cpu, tb);
if (qemu_log_in_addr_range(pc)) {
- qemu_log("cpu_io_recompile: rewound execution of TB to "
- TARGET_FMT_lx "\n", pc);
+ qemu_log("cpu_io_recompile: rewound execution of TB to %"
+ VADDR_PRIx "\n", pc);
}
}
}
}
-bool translator_use_goto_tb(DisasContextBase *db, target_ulong dest)
+bool translator_use_goto_tb(DisasContextBase *db, vaddr dest)
{
/* Suppress goto_tb if requested. */
if (tb_cflags(db->tb) & CF_NO_GOTO_TB) {
}
void translator_loop(CPUState *cpu, TranslationBlock *tb, int *max_insns,
- target_ulong pc, void *host_pc,
- const TranslatorOps *ops, DisasContextBase *db)
+ vaddr pc, void *host_pc, const TranslatorOps *ops,
+ DisasContextBase *db)
{
uint32_t cflags = tb_cflags(tb);
TCGOp *icount_start_insn;
}
static void *translator_access(CPUArchState *env, DisasContextBase *db,
- target_ulong pc, size_t len)
+ vaddr pc, size_t len)
{
void *host;
- target_ulong base, end;
+ vaddr base, end;
TranslationBlock *tb;
tb = db->tb;
return current_tb_invalidated ? 2 : 1;
}
-static int probe_access_internal(CPUArchState *env, target_ulong addr,
+static int probe_access_internal(CPUArchState *env, vaddr addr,
int fault_size, MMUAccessType access_type,
bool nonfault, uintptr_t ra)
{
cpu_loop_exit_sigsegv(env_cpu(env), addr, access_type, maperr, ra);
}
-int probe_access_flags(CPUArchState *env, target_ulong addr, int size,
+int probe_access_flags(CPUArchState *env, vaddr addr, int size,
MMUAccessType access_type, int mmu_idx,
bool nonfault, void **phost, uintptr_t ra)
{
return flags;
}
-void *probe_access(CPUArchState *env, target_ulong addr, int size,
+void *probe_access(CPUArchState *env, vaddr addr, int size,
MMUAccessType access_type, int mmu_idx, uintptr_t ra)
{
int flags;
return size ? g2h(env_cpu(env), addr) : NULL;
}
-tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
+tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, vaddr addr,
void **hostp)
{
int flags;
/* The softmmu versions of these helpers are in cputlb.c. */
-static void *cpu_mmu_lookup(CPUArchState *env, abi_ptr addr,
+static void *cpu_mmu_lookup(CPUArchState *env, vaddr addr,
MemOp mop, uintptr_t ra, MMUAccessType type)
{
int a_bits = get_alignment_bits(mop);
uint8_t ret;
tcg_debug_assert((mop & MO_SIZE) == MO_8);
+ cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD);
ret = ldub_p(haddr);
clear_helper_retaddr();
uint16_t ret;
tcg_debug_assert((mop & MO_SIZE) == MO_16);
+ cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD);
ret = load_atom_2(env, ra, haddr, mop);
clear_helper_retaddr();
uint32_t ret;
tcg_debug_assert((mop & MO_SIZE) == MO_32);
+ cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD);
ret = load_atom_4(env, ra, haddr, mop);
clear_helper_retaddr();
uint64_t ret;
tcg_debug_assert((mop & MO_SIZE) == MO_64);
+ cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD);
ret = load_atom_8(env, ra, haddr, mop);
clear_helper_retaddr();
Int128 ret;
tcg_debug_assert((mop & MO_SIZE) == MO_128);
+ cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD);
ret = load_atom_16(env, ra, haddr, mop);
clear_helper_retaddr();
void *haddr;
tcg_debug_assert((mop & MO_SIZE) == MO_8);
+ cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE);
stb_p(haddr, val);
clear_helper_retaddr();
void *haddr;
tcg_debug_assert((mop & MO_SIZE) == MO_16);
+ cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE);
if (mop & MO_BSWAP) {
void *haddr;
tcg_debug_assert((mop & MO_SIZE) == MO_32);
+ cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE);
if (mop & MO_BSWAP) {
void *haddr;
tcg_debug_assert((mop & MO_SIZE) == MO_64);
+ cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE);
if (mop & MO_BSWAP) {
void *haddr;
tcg_debug_assert((mop & MO_SIZE) == MO_128);
+ cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE);
if (mop & MO_BSWAP) {
/*
* Do not allow unaligned operations to proceed. Return the host address.
*/
-static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
- MemOpIdx oi, int size, uintptr_t retaddr)
+static void *atomic_mmu_lookup(CPUArchState *env, vaddr addr, MemOpIdx oi,
+ int size, uintptr_t retaddr)
{
MemOp mop = get_memop(oi);
int a_bits = get_alignment_bits(mop);
#include "qemu/timer.h"
#include "qemu/dbus.h"
+#ifdef G_OS_UNIX
#include <gio/gunixfdlist.h>
+#endif
+
+#include "ui/dbus.h"
#include "ui/dbus-display1.h"
#define AUDIO_CAP "dbus"
static gboolean
dbus_audio_register_listener(AudioState *s,
GDBusMethodInvocation *invocation,
+#ifdef G_OS_UNIX
GUnixFDList *fd_list,
+#endif
GVariant *arg_listener,
bool out)
{
return DBUS_METHOD_INVOCATION_HANDLED;
}
+#ifdef G_OS_WIN32
+ if (!dbus_win32_import_socket(invocation, arg_listener, &fd)) {
+ return DBUS_METHOD_INVOCATION_HANDLED;
+ }
+#else
fd = g_unix_fd_list_get(fd_list, g_variant_get_handle(arg_listener), &err);
if (err) {
g_dbus_method_invocation_return_error(invocation,
err->message);
return DBUS_METHOD_INVOCATION_HANDLED;
}
+#endif
socket = g_socket_new_from_fd(fd, &err);
if (err) {
DBUS_DISPLAY_ERROR_FAILED,
"Couldn't make a socket: %s",
err->message);
+#ifdef G_OS_WIN32
+ closesocket(fd);
+#else
+ close(fd);
+#endif
return DBUS_METHOD_INVOCATION_HANDLED;
}
socket_conn = g_socket_connection_factory_create_connection(socket);
if (out) {
qemu_dbus_display1_audio_complete_register_out_listener(
- da->iface, invocation, NULL);
+ da->iface, invocation
+#ifdef G_OS_UNIX
+ , NULL
+#endif
+ );
} else {
qemu_dbus_display1_audio_complete_register_in_listener(
- da->iface, invocation, NULL);
+ da->iface, invocation
+#ifdef G_OS_UNIX
+ , NULL
+#endif
+ );
}
listener_conn =
static gboolean
dbus_audio_register_out_listener(AudioState *s,
GDBusMethodInvocation *invocation,
+#ifdef G_OS_UNIX
GUnixFDList *fd_list,
+#endif
GVariant *arg_listener)
{
return dbus_audio_register_listener(s, invocation,
- fd_list, arg_listener, true);
+#ifdef G_OS_UNIX
+ fd_list,
+#endif
+ arg_listener, true);
}
static gboolean
dbus_audio_register_in_listener(AudioState *s,
GDBusMethodInvocation *invocation,
+#ifdef G_OS_UNIX
GUnixFDList *fd_list,
+#endif
GVariant *arg_listener)
{
return dbus_audio_register_listener(s, invocation,
- fd_list, arg_listener, false);
+#ifdef G_OS_UNIX
+ fd_list,
+#endif
+ arg_listener, false);
}
static void
backend->conf.max_auth_key_len = VHOST_USER_MAX_AUTH_KEY_LEN;
}
-static int64_t cryptodev_vhost_user_sym_create_session(
+static int64_t cryptodev_vhost_user_crypto_create_session(
CryptoDevBackend *backend,
- CryptoDevBackendSymSessionInfo *sess_info,
+ CryptoDevBackendSessionInfo *sess_info,
uint32_t queue_index, Error **errp)
{
CryptoDevBackendClient *cc =
void *opaque)
{
uint32_t op_code = sess_info->op_code;
- CryptoDevBackendSymSessionInfo *sym_sess_info;
int64_t ret;
Error *local_error = NULL;
int status;
switch (op_code) {
case VIRTIO_CRYPTO_CIPHER_CREATE_SESSION:
+ case VIRTIO_CRYPTO_AKCIPHER_CREATE_SESSION:
case VIRTIO_CRYPTO_HASH_CREATE_SESSION:
case VIRTIO_CRYPTO_MAC_CREATE_SESSION:
case VIRTIO_CRYPTO_AEAD_CREATE_SESSION:
- sym_sess_info = &sess_info->u.sym_sess_info;
- ret = cryptodev_vhost_user_sym_create_session(backend, sym_sess_info,
+ ret = cryptodev_vhost_user_crypto_create_session(backend, sess_info,
queue_index, &local_error);
break;
entry = g_new0(StatsResult, 1);
entry->provider = STATS_PROVIDER_CRYPTODEV;
- entry->qom_path = g_strdup(object_get_canonical_path(obj));
+ entry->qom_path = object_get_canonical_path(obj);
entry->stats = stats_list;
QAPI_LIST_PREPEND(*stats_results, entry);
uint8_t wce;
/* don't support live migration */
- if (flags != VHOST_SET_CONFIG_TYPE_MASTER) {
+ if (flags != VHOST_SET_CONFIG_TYPE_FRONTEND) {
return -EINVAL;
}
}
break;
+ case EXCP_SYSCALL:
+ /* doesn't do anything */
+ break;
+
case EXCP_INTERRUPT:
/* just indicate that signals should be handled asap */
break;
}
}
- dwMode |= ENABLE_LINE_INPUT;
+ dwMode |= ENABLE_LINE_INPUT | ENABLE_VIRTUAL_TERMINAL_INPUT;
if (is_console) {
/* set the terminal in raw mode */
if ! test -e "$source_path/.git"
then
- git_submodules_action="ignore"
+ git_submodules_action="validate"
fi
# test for any invalid configuration combinations
if test "$?" -ne 0 ; then
error_exit "meson setup failed"
fi
+ echo "$meson" > build.ninja.stamp
else
if test -f meson-private/cmd_line.txt; then
# Adjust old command line options that were removed
int fd;
/* don't support live migration */
- if (flags != VHOST_SET_CONFIG_TYPE_MASTER) {
+ if (flags != VHOST_SET_CONFIG_TYPE_FRONTEND) {
return -1;
}
}
#if defined(CONFIG_USER_ONLY)
-void tb_invalidate_phys_addr(target_ulong addr)
+void tb_invalidate_phys_addr(hwaddr addr)
{
mmap_lock();
tb_invalidate_phys_page(addr);
``size``
- ``b`` : 8 bits
- ``w`` : 16 bits
+ - ``24`` : 24 bits
- ``l`` : 32 bits
- ``q`` : 64 bits
Regexes for git grep
- ``\<ld[us]\?[bwlq]\(_[hbl]e\)\?_p\>``
- ``\<st[bwlq]\(_[hbl]e\)\?_p\>``
+ - ``\<st24\(_[hbl]e\)\?_p\>``
- ``\<ldn_\([hbl]e\)?_p\>``
- ``\<stn_\([hbl]e\)?_p\>``
devices/ccid.rst
devices/cxl.rst
devices/ivshmem.rst
+ devices/keyboard.rst
devices/net.rst
devices/nvme.rst
devices/usb.rst
--- /dev/null
+.. _keyboard:
+
+Sparc32 keyboard
+----------------
+SUN Type 4, 5 and 5c keyboards have dip switches to choose the language layout
+of the keyboard. Solaris makes an ioctl to query the value of the dipswitches
+and uses that value to select keyboard layout. Also the SUN bios like the one
+in the file ss5.bin uses this value to support at least some keyboard layouts.
+However, the OpenBIOS provided with qemu is hardcoded to always use an
+US keyboard layout.
+
+With the escc.chnA-sunkbd-layout driver property it is possible to select
+keyboard layout. Example:
+
+-global escc.chnA-sunkbd-layout=de
+
+Depending on type of keyboard, the keyboard can have 6 or 5 dip-switches to
+select keyboard layout, giving up to 64 different layouts. Not all
+combinations are supported by Solaris and even less by Sun OpenBoot BIOS.
+
+The dip switch settings can be given as hexadecimal number, decimal number
+or in some cases as a language string. Examples:
+
+-global escc.chnA-sunkbd-layout=0x2b
+
+-global escc.chnA-sunkbd-layout=43
+
+-global escc.chnA-sunkbd-layout=sv
+
+The above 3 examples all select a swedish keyboard layout. Table 3-15 at
+https://docs.oracle.com/cd/E19683-01/806-6642/new-43/index.html explains which
+keytable file is used for different dip switch settings. The information
+in that table can be summarized in this table:
+
+.. list-table:: Language selection values for escc.chnA-sunkbd-layout
+ :widths: 10 10 10
+ :header-rows: 1
+
+ * - Hexadecimal value
+ - Decimal value
+ - Language code
+ * - 0x21
+ - 33
+ - en-us
+ * - 0x23
+ - 35
+ - fr
+ * - 0x24
+ - 36
+ - da
+ * - 0x25
+ - 37
+ - de
+ * - 0x26
+ - 38
+ - it
+ * - 0x27
+ - 39
+ - nl
+ * - 0x28
+ - 40
+ - no
+ * - 0x29
+ - 41
+ - pt
+ * - 0x2a
+ - 42
+ - es
+ * - 0x2b
+ - 43
+ - sv
+ * - 0x2c
+ - 44
+ - fr-ch
+ * - 0x2d
+ - 45
+ - de-ch
+ * - 0x2e
+ - 46
+ - en-gb
+ * - 0x2f
+ - 47
+ - ko
+ * - 0x30
+ - 48
+ - tw
+ * - 0x31
+ - 49
+ - ja
+ * - 0x32
+ - 50
+ - fr-ca
+ * - 0x33
+ - 51
+ - hu
+ * - 0x34
+ - 52
+ - pl
+ * - 0x35
+ - 53
+ - cz
+ * - 0x36
+ - 54
+ - ru
+ * - 0x37
+ - 55
+ - lv
+ * - 0x38
+ - 56
+ - tr
+ * - 0x39
+ - 57
+ - gr
+ * - 0x3a
+ - 58
+ - ar
+ * - 0x3b
+ - 59
+ - lt
+ * - 0x3c
+ - 60
+ - nl-be
+ * - 0x3c
+ - 60
+ - be
+
+Not all dip switch values have a corresponding language code and both "be" and
+"nl-be" correspond to the same dip switch value. By default, if no value is
+given to escc.chnA-sunkbd-layout 0x21 (en-us) will be used.
the minimum memory page size (CAP.MPSMIN). The default value (``0``)
has this property inherit the ``mdts`` value.
+Flexible Data Placement
+-----------------------
+
+The device may be configured to support TP4146 ("Flexible Data Placement") by
+configuring it (``fdp=on``) on the subsystem::
+
+ -device nvme-subsys,id=nvme-subsys-0,nqn=subsys0,fdp=on,fdp.nruh=16
+
+The subsystem emulates a single Endurance Group, on which Flexible Data
+Placement will be supported. Also note that the device emulation deviates
+slightly from the specification, by always enabling the "FDP Mode" feature on
+the controller if the subsystems is configured for Flexible Data Placement.
+
+Enabling Flexible Data Placement on the subsyste enables the following
+parameters:
+
+``fdp.nrg`` (default: ``1``)
+ Set the number of Reclaim Groups.
+
+``fdp.nruh`` (default: ``0``)
+ Set the number of Reclaim Unit Handles. This is a mandatory paramater and
+ must be non-zero.
+
+``fdp.runs`` (default: ``96M``)
+ Set the Reclaim Unit Nominal Size. Defaults to 96 MiB.
+
+Namespaces within this subsystem may requests Reclaim Unit Handles::
+
+ -device nvme-ns,drive=nvm-1,fdp.ruhs=RUHLIST
+
+The ``RUHLIST`` is a semicolon separated list (i.e. ``0;1;2;3``) and may
+include ranges (i.e. ``0;8-15``). If no reclaim unit handle list is specified,
+the controller will assign the controller-specified reclaim unit handle to
+placement handle identifier 0.
+
Metadata
--------
.. code-block:: console
- echo 0000:01:00.1 > /sys/bus/pci/drivers/nvme/bind
\ No newline at end of file
+ echo 0000:01:00.1 > /sys/bus/pci/drivers/nvme/bind
- Non Volatile RAM M48T02/M48T08
- Slave I/O: timers, interrupt controllers, Zilog serial ports,
- keyboard and power/reset logic
+ :ref:`keyboard` and power/reset logic
- ESP SCSI controller with hard disk and CD-ROM support
};
}
-static inline void float16_unpack_raw(FloatParts64 *p, float16 f)
+static void QEMU_FLATTEN float16_unpack_raw(FloatParts64 *p, float16 f)
{
unpack_raw64(p, &float16_params, f);
}
-static inline void bfloat16_unpack_raw(FloatParts64 *p, bfloat16 f)
+static void QEMU_FLATTEN bfloat16_unpack_raw(FloatParts64 *p, bfloat16 f)
{
unpack_raw64(p, &bfloat16_params, f);
}
-static inline void float32_unpack_raw(FloatParts64 *p, float32 f)
+static void QEMU_FLATTEN float32_unpack_raw(FloatParts64 *p, float32 f)
{
unpack_raw64(p, &float32_params, f);
}
-static inline void float64_unpack_raw(FloatParts64 *p, float64 f)
+static void QEMU_FLATTEN float64_unpack_raw(FloatParts64 *p, float64 f)
{
unpack_raw64(p, &float64_params, f);
}
-static void floatx80_unpack_raw(FloatParts128 *p, floatx80 f)
+static void QEMU_FLATTEN floatx80_unpack_raw(FloatParts128 *p, floatx80 f)
{
*p = (FloatParts128) {
.cls = float_class_unclassified,
};
}
-static void float128_unpack_raw(FloatParts128 *p, float128 f)
+static void QEMU_FLATTEN float128_unpack_raw(FloatParts128 *p, float128 f)
{
const int f_size = float128_params.frac_size - 64;
const int e_size = float128_params.exp_size;
return ret;
}
-static inline float16 float16_pack_raw(const FloatParts64 *p)
+static float16 QEMU_FLATTEN float16_pack_raw(const FloatParts64 *p)
{
return make_float16(pack_raw64(p, &float16_params));
}
-static inline bfloat16 bfloat16_pack_raw(const FloatParts64 *p)
+static bfloat16 QEMU_FLATTEN bfloat16_pack_raw(const FloatParts64 *p)
{
return pack_raw64(p, &bfloat16_params);
}
-static inline float32 float32_pack_raw(const FloatParts64 *p)
+static float32 QEMU_FLATTEN float32_pack_raw(const FloatParts64 *p)
{
return make_float32(pack_raw64(p, &float32_params));
}
-static inline float64 float64_pack_raw(const FloatParts64 *p)
+static float64 QEMU_FLATTEN float64_pack_raw(const FloatParts64 *p)
{
return make_float64(pack_raw64(p, &float64_params));
}
-static float128 float128_pack_raw(const FloatParts128 *p)
+static float128 QEMU_FLATTEN float128_pack_raw(const FloatParts128 *p)
{
const int f_size = float128_params.frac_size - 64;
const int e_size = float128_params.exp_size;
Show host USB devices.
ERST
-#if defined(CONFIG_TCG)
- {
- .name = "profile",
- .args_type = "",
- .params = "",
- .help = "show profiling information",
- .cmd_info_hrt = qmp_x_query_profile,
- },
-#endif
-
-SRST
- ``info profile``
- Show profiling information.
-ERST
-
{
.name = "capture",
.args_type = "",
}
/* ACPI PM1aCNT */
-static void acpi_pm1_cnt_write(ACPIREGS *ar, uint16_t val)
-{
- ar->pm1.cnt.cnt = val & ~(ACPI_BITMASK_SLEEP_ENABLE);
-
- if (val & ACPI_BITMASK_SLEEP_ENABLE) {
- /* change suspend type */
- uint16_t sus_typ = (val >> 10) & 7;
- switch (sus_typ) {
- case 0: /* soft power off */
- qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
- break;
- case 1:
- qemu_system_suspend_request();
- break;
- default:
- if (sus_typ == ar->pm1.cnt.s4_val) { /* S4 request */
- qapi_event_send_suspend_disk();
- qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
- }
- break;
- }
- }
-}
-
void acpi_pm1_cnt_update(ACPIREGS *ar,
bool sci_enable, bool sci_disable)
{
static uint64_t acpi_pm_cnt_read(void *opaque, hwaddr addr, unsigned width)
{
ACPIREGS *ar = opaque;
- return ar->pm1.cnt.cnt;
+ return ar->pm1.cnt.cnt >> addr * 8;
}
static void acpi_pm_cnt_write(void *opaque, hwaddr addr, uint64_t val,
unsigned width)
{
- acpi_pm1_cnt_write(opaque, val);
+ ACPIREGS *ar = opaque;
+
+ if (addr == 1) {
+ val = val << 8 | (ar->pm1.cnt.cnt & 0xff);
+ }
+ ar->pm1.cnt.cnt = val & ~(ACPI_BITMASK_SLEEP_ENABLE);
+
+ if (val & ACPI_BITMASK_SLEEP_ENABLE) {
+ /* change suspend type */
+ uint16_t sus_typ = (val >> 10) & 7;
+ switch (sus_typ) {
+ case 0: /* soft power off */
+ qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
+ break;
+ case 1:
+ qemu_system_suspend_request();
+ break;
+ default:
+ if (sus_typ == ar->pm1.cnt.s4_val) { /* S4 request */
+ qapi_event_send_suspend_disk();
+ qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
+ }
+ break;
+ }
+ }
}
static const MemoryRegionOps acpi_pm_cnt_ops = {
mc->possible_cpu_arch_ids = sbsa_ref_possible_cpu_arch_ids;
mc->cpu_index_to_instance_props = sbsa_ref_cpu_index_to_props;
mc->get_default_cpu_node_id = sbsa_ref_get_default_cpu_node_id;
+ /* platform instead of architectural choice */
+ mc->cpu_cluster_has_numa_boundary = true;
}
static const TypeInfo sbsa_ref_info = {
mc->smp_props.clusters_supported = true;
mc->auto_enable_numa_with_memhp = true;
mc->auto_enable_numa_with_memdev = true;
+ /* platform instead of architectural choice */
+ mc->cpu_cluster_has_numa_boundary = true;
mc->default_ram_id = "mach-virt.ram";
mc->default_nic = "virtio-net-pci";
-specific_ss.add(when: 'CONFIG_VIRTIO_BLK', if_true: files('virtio-blk.c'))
+system_ss.add(when: 'CONFIG_VIRTIO_BLK', if_true: files('virtio-blk.c'))
specific_ss.add(when: 'CONFIG_XEN_BUS', if_true: files('xen-block.c'))
#include "qemu/main-loop.h"
#include "qemu/thread.h"
#include "qemu/error-report.h"
-#include "hw/virtio/virtio-access.h"
#include "hw/virtio/virtio-blk.h"
#include "virtio-blk.h"
#include "block/aio.h"
ret = vhost_dev_set_config(&s->dev, &blkcfg->wce,
offsetof(struct virtio_blk_config, wce),
sizeof(blkcfg->wce),
- VHOST_SET_CONFIG_TYPE_MASTER);
+ VHOST_SET_CONFIG_TYPE_FRONTEND);
if (ret) {
error_report("set device config space failed");
return;
#include "qemu/module.h"
#include "hw/char/escc.h"
#include "ui/console.h"
+
+#include "qemu/cutils.h"
#include "trace.h"
/*
#define R_MISC1I 14
#define R_EXTINT 15
+static uint8_t sunkbd_layout_dip_switch(const char *sunkbd_layout);
static void handle_kbd_command(ESCCChannelState *s, int val);
static int serial_can_receive(void *opaque);
static void serial_receive_byte(ESCCChannelState *s, int ch);
.event = sunkbd_handle_event,
};
+static uint8_t sunkbd_layout_dip_switch(const char *kbd_layout)
+{
+ /* Return the value of the dip-switches in a SUN Type 5 keyboard */
+ static uint8_t ret = 0xff;
+
+ if ((ret == 0xff) && kbd_layout) {
+ int i;
+ struct layout_values {
+ const char *lang;
+ uint8_t dip;
+ } languages[] =
+ /*
+ * Dip values from table 3-16 Layouts for Type 4, 5 and 5c Keyboards
+ */
+ {
+ {"en-us", 0x21}, /* U.S.A. (US5.kt) */
+ /* 0x22 is some other US (US_UNIX5.kt) */
+ {"fr", 0x23}, /* France (France5.kt) */
+ {"da", 0x24}, /* Denmark (Denmark5.kt) */
+ {"de", 0x25}, /* Germany (Germany5.kt) */
+ {"it", 0x26}, /* Italy (Italy5.kt) */
+ {"nl", 0x27}, /* The Netherlands (Netherland5.kt) */
+ {"no", 0x28}, /* Norway (Norway.kt) */
+ {"pt", 0x29}, /* Portugal (Portugal5.kt) */
+ {"es", 0x2a}, /* Spain (Spain5.kt) */
+ {"sv", 0x2b}, /* Sweden (Sweden5.kt) */
+ {"fr-ch", 0x2c}, /* Switzerland/French (Switzer_Fr5.kt) */
+ {"de-ch", 0x2d}, /* Switzerland/German (Switzer_Ge5.kt) */
+ {"en-gb", 0x2e}, /* Great Britain (UK5.kt) */
+ {"ko", 0x2f}, /* Korea (Korea5.kt) */
+ {"tw", 0x30}, /* Taiwan (Taiwan5.kt) */
+ {"ja", 0x31}, /* Japan (Japan5.kt) */
+ {"fr-ca", 0x32}, /* Canada/French (Canada_Fr5.kt) */
+ {"hu", 0x33}, /* Hungary (Hungary5.kt) */
+ {"pl", 0x34}, /* Poland (Poland5.kt) */
+ {"cz", 0x35}, /* Czech (Czech5.kt) */
+ {"ru", 0x36}, /* Russia (Russia5.kt) */
+ {"lv", 0x37}, /* Latvia (Latvia5.kt) */
+ {"tr", 0x38}, /* Turkey-Q5 (TurkeyQ5.kt) */
+ {"gr", 0x39}, /* Greece (Greece5.kt) */
+ {"ar", 0x3a}, /* Arabic (Arabic5.kt) */
+ {"lt", 0x3b}, /* Lithuania (Lithuania5.kt) */
+ {"nl-be", 0x3c}, /* Belgium (Belgian5.kt) */
+ {"be", 0x3c}, /* Belgium (Belgian5.kt) */
+ };
+
+ for (i = 0;
+ i < sizeof(languages) / sizeof(struct layout_values);
+ i++) {
+ if (!strcmp(kbd_layout, languages[i].lang)) {
+ ret = languages[i].dip;
+ return ret;
+ }
+ }
+
+ /* Found no known language code */
+ if ((kbd_layout[0] >= '0') && (kbd_layout[0] <= '9')) {
+ unsigned int tmp;
+
+ /* As a fallback we also accept numeric dip switch value */
+ if (!qemu_strtoui(kbd_layout, NULL, 0, &tmp)) {
+ ret = tmp;
+ }
+ }
+ }
+
+ if (ret == 0xff) {
+ /* Final fallback if keyboard_layout was not set or recognized */
+ ret = 0x21; /* en-us layout */
+ }
+ return ret;
+}
+
static void handle_kbd_command(ESCCChannelState *s, int val)
{
trace_escc_kbd_command(val);
case 0xf:
clear_queue(s);
put_queue(s, 0xfe);
- put_queue(s, 0x21); /* en-us layout */
+ put_queue(s, sunkbd_layout_dip_switch(s->sunkbd_layout));
break;
default:
break;
DEFINE_PROP_UINT32("chnAtype", ESCCState, chn[1].type, 0),
DEFINE_PROP_CHR("chrB", ESCCState, chn[0].chr),
DEFINE_PROP_CHR("chrA", ESCCState, chn[1].chr),
+ DEFINE_PROP_STRING("chnA-sunkbd-layout", ESCCState, chn[1].sunkbd_layout),
DEFINE_PROP_END_OF_LIST(),
};
g_string_free(s, true);
}
+static void validate_cpu_cluster_to_numa_boundary(MachineState *ms)
+{
+ MachineClass *mc = MACHINE_GET_CLASS(ms);
+ NumaState *state = ms->numa_state;
+ const CPUArchIdList *possible_cpus = mc->possible_cpu_arch_ids(ms);
+ const CPUArchId *cpus = possible_cpus->cpus;
+ int i, j;
+
+ if (state->num_nodes <= 1 || possible_cpus->len <= 1) {
+ return;
+ }
+
+ /*
+ * The Linux scheduling domain can't be parsed when the multiple CPUs
+ * in one cluster have been associated with different NUMA nodes. However,
+ * it's fine to associate one NUMA node with CPUs in different clusters.
+ */
+ for (i = 0; i < possible_cpus->len; i++) {
+ for (j = i + 1; j < possible_cpus->len; j++) {
+ if (cpus[i].props.has_socket_id &&
+ cpus[i].props.has_cluster_id &&
+ cpus[i].props.has_node_id &&
+ cpus[j].props.has_socket_id &&
+ cpus[j].props.has_cluster_id &&
+ cpus[j].props.has_node_id &&
+ cpus[i].props.socket_id == cpus[j].props.socket_id &&
+ cpus[i].props.cluster_id == cpus[j].props.cluster_id &&
+ cpus[i].props.node_id != cpus[j].props.node_id) {
+ warn_report("CPU-%d and CPU-%d in socket-%" PRId64 "-cluster-%" PRId64
+ " have been associated with node-%" PRId64 " and node-%" PRId64
+ " respectively. It can cause OSes like Linux to"
+ " misbehave", i, j, cpus[i].props.socket_id,
+ cpus[i].props.cluster_id, cpus[i].props.node_id,
+ cpus[j].props.node_id);
+ }
+ }
+ }
+}
+
MemoryRegion *machine_consume_memdev(MachineState *machine,
HostMemoryBackend *backend)
{
numa_complete_configuration(machine);
if (machine->numa_state->num_nodes) {
machine_numa_finish_cpu_init(machine);
+ if (machine_class->cpu_cluster_has_numa_boundary) {
+ validate_cpu_cluster_to_numa_boundary(machine);
+ }
}
}
static uint64_t dev_reg_read(void *opaque, hwaddr offset, unsigned size)
{
- return 0;
+ CXLDeviceState *cxl_dstate = opaque;
+
+ switch (size) {
+ case 1:
+ return cxl_dstate->dev_reg_state[offset];
+ case 2:
+ return cxl_dstate->dev_reg_state16[offset / size];
+ case 4:
+ return cxl_dstate->dev_reg_state32[offset / size];
+ case 8:
+ return cxl_dstate->dev_reg_state64[offset / size];
+ default:
+ g_assert_not_reached();
+ }
}
static uint64_t mailbox_reg_read(void *opaque, hwaddr offset, unsigned size)
&cxl_dstate->memory_device);
}
-static void device_reg_init_common(CXLDeviceState *cxl_dstate) { }
+void cxl_event_set_status(CXLDeviceState *cxl_dstate, CXLEventLogType log_type,
+ bool available)
+{
+ if (available) {
+ cxl_dstate->event_status |= (1 << log_type);
+ } else {
+ cxl_dstate->event_status &= ~(1 << log_type);
+ }
+
+ ARRAY_FIELD_DP64(cxl_dstate->dev_reg_state64, CXL_DEV_EVENT_STATUS,
+ EVENT_STATUS, cxl_dstate->event_status);
+}
+
+static void device_reg_init_common(CXLDeviceState *cxl_dstate)
+{
+ CXLEventLogType log;
+
+ for (log = 0; log < CXL_EVENT_TYPE_MAX; log++) {
+ cxl_event_set_status(cxl_dstate, log, false);
+ }
+}
static void mailbox_reg_init_common(CXLDeviceState *cxl_dstate)
{
ARRAY_FIELD_DP64(cap_hdrs, CXL_DEV_CAP_ARRAY, CAP_VERSION, 1);
ARRAY_FIELD_DP64(cap_hdrs, CXL_DEV_CAP_ARRAY, CAP_COUNT, cap_count);
- cxl_device_cap_init(cxl_dstate, DEVICE_STATUS, 1);
+ cxl_device_cap_init(cxl_dstate, DEVICE_STATUS, 1, 2);
device_reg_init_common(cxl_dstate);
- cxl_device_cap_init(cxl_dstate, MAILBOX, 2);
+ cxl_device_cap_init(cxl_dstate, MAILBOX, 2, 1);
mailbox_reg_init_common(cxl_dstate);
- cxl_device_cap_init(cxl_dstate, MEMORY_DEVICE, 0x4000);
+ cxl_device_cap_init(cxl_dstate, MEMORY_DEVICE, 0x4000, 1);
memdev_reg_init_common(cxl_dstate);
cxl_initialize_mailbox(cxl_dstate);
--- /dev/null
+/*
+ * CXL Event processing
+ *
+ * Copyright(C) 2023 Intel Corporation.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See the
+ * COPYING file in the top-level directory.
+ */
+
+#include <stdint.h>
+
+#include "qemu/osdep.h"
+#include "qemu/bswap.h"
+#include "qemu/typedefs.h"
+#include "qemu/error-report.h"
+#include "hw/pci/msi.h"
+#include "hw/pci/msix.h"
+#include "hw/cxl/cxl.h"
+#include "hw/cxl/cxl_events.h"
+
+/* Artificial limit on the number of events a log can hold */
+#define CXL_TEST_EVENT_OVERFLOW 8
+
+static void reset_overflow(CXLEventLog *log)
+{
+ log->overflow_err_count = 0;
+ log->first_overflow_timestamp = 0;
+ log->last_overflow_timestamp = 0;
+}
+
+void cxl_event_init(CXLDeviceState *cxlds, int start_msg_num)
+{
+ CXLEventLog *log;
+ int i;
+
+ for (i = 0; i < CXL_EVENT_TYPE_MAX; i++) {
+ log = &cxlds->event_logs[i];
+ log->next_handle = 1;
+ log->overflow_err_count = 0;
+ log->first_overflow_timestamp = 0;
+ log->last_overflow_timestamp = 0;
+ log->irq_enabled = false;
+ log->irq_vec = start_msg_num++;
+ qemu_mutex_init(&log->lock);
+ QSIMPLEQ_INIT(&log->events);
+ }
+
+ /* Override -- Dynamic Capacity uses the same vector as info */
+ cxlds->event_logs[CXL_EVENT_TYPE_DYNAMIC_CAP].irq_vec =
+ cxlds->event_logs[CXL_EVENT_TYPE_INFO].irq_vec;
+
+}
+
+static CXLEvent *cxl_event_get_head(CXLEventLog *log)
+{
+ return QSIMPLEQ_FIRST(&log->events);
+}
+
+static CXLEvent *cxl_event_get_next(CXLEvent *entry)
+{
+ return QSIMPLEQ_NEXT(entry, node);
+}
+
+static int cxl_event_count(CXLEventLog *log)
+{
+ CXLEvent *event;
+ int rc = 0;
+
+ QSIMPLEQ_FOREACH(event, &log->events, node) {
+ rc++;
+ }
+
+ return rc;
+}
+
+static bool cxl_event_empty(CXLEventLog *log)
+{
+ return QSIMPLEQ_EMPTY(&log->events);
+}
+
+static void cxl_event_delete_head(CXLDeviceState *cxlds,
+ CXLEventLogType log_type,
+ CXLEventLog *log)
+{
+ CXLEvent *entry = cxl_event_get_head(log);
+
+ reset_overflow(log);
+ QSIMPLEQ_REMOVE_HEAD(&log->events, node);
+ if (cxl_event_empty(log)) {
+ cxl_event_set_status(cxlds, log_type, false);
+ }
+ g_free(entry);
+}
+
+/*
+ * return true if an interrupt should be generated as a result
+ * of inserting this event.
+ */
+bool cxl_event_insert(CXLDeviceState *cxlds, CXLEventLogType log_type,
+ CXLEventRecordRaw *event)
+{
+ uint64_t time;
+ CXLEventLog *log;
+ CXLEvent *entry;
+
+ if (log_type >= CXL_EVENT_TYPE_MAX) {
+ return false;
+ }
+
+ time = cxl_device_get_timestamp(cxlds);
+
+ log = &cxlds->event_logs[log_type];
+
+ QEMU_LOCK_GUARD(&log->lock);
+
+ if (cxl_event_count(log) >= CXL_TEST_EVENT_OVERFLOW) {
+ if (log->overflow_err_count == 0) {
+ log->first_overflow_timestamp = time;
+ }
+ log->overflow_err_count++;
+ log->last_overflow_timestamp = time;
+ return false;
+ }
+
+ entry = g_new0(CXLEvent, 1);
+
+ memcpy(&entry->data, event, sizeof(*event));
+
+ entry->data.hdr.handle = cpu_to_le16(log->next_handle);
+ log->next_handle++;
+ /* 0 handle is never valid */
+ if (log->next_handle == 0) {
+ log->next_handle++;
+ }
+ entry->data.hdr.timestamp = cpu_to_le64(time);
+
+ QSIMPLEQ_INSERT_TAIL(&log->events, entry, node);
+ cxl_event_set_status(cxlds, log_type, true);
+
+ /* Count went from 0 to 1 */
+ return cxl_event_count(log) == 1;
+}
+
+CXLRetCode cxl_event_get_records(CXLDeviceState *cxlds, CXLGetEventPayload *pl,
+ uint8_t log_type, int max_recs,
+ uint16_t *len)
+{
+ CXLEventLog *log;
+ CXLEvent *entry;
+ uint16_t nr;
+
+ if (log_type >= CXL_EVENT_TYPE_MAX) {
+ return CXL_MBOX_INVALID_INPUT;
+ }
+
+ log = &cxlds->event_logs[log_type];
+
+ QEMU_LOCK_GUARD(&log->lock);
+
+ entry = cxl_event_get_head(log);
+ for (nr = 0; entry && nr < max_recs; nr++) {
+ memcpy(&pl->records[nr], &entry->data, CXL_EVENT_RECORD_SIZE);
+ entry = cxl_event_get_next(entry);
+ }
+
+ if (!cxl_event_empty(log)) {
+ pl->flags |= CXL_GET_EVENT_FLAG_MORE_RECORDS;
+ }
+
+ if (log->overflow_err_count) {
+ pl->flags |= CXL_GET_EVENT_FLAG_OVERFLOW;
+ pl->overflow_err_count = cpu_to_le16(log->overflow_err_count);
+ pl->first_overflow_timestamp = cpu_to_le64(log->first_overflow_timestamp);
+ pl->last_overflow_timestamp = cpu_to_le64(log->last_overflow_timestamp);
+ }
+
+ pl->record_count = cpu_to_le16(nr);
+ *len = CXL_EVENT_PAYLOAD_HDR_SIZE + (CXL_EVENT_RECORD_SIZE * nr);
+
+ return CXL_MBOX_SUCCESS;
+}
+
+CXLRetCode cxl_event_clear_records(CXLDeviceState *cxlds, CXLClearEventPayload *pl)
+{
+ CXLEventLog *log;
+ uint8_t log_type;
+ CXLEvent *entry;
+ int nr;
+
+ log_type = pl->event_log;
+
+ if (log_type >= CXL_EVENT_TYPE_MAX) {
+ return CXL_MBOX_INVALID_INPUT;
+ }
+
+ log = &cxlds->event_logs[log_type];
+
+ QEMU_LOCK_GUARD(&log->lock);
+ /*
+ * Must itterate the queue twice.
+ * "The device shall verify the event record handles specified in the input
+ * payload are in temporal order. If the device detects an older event
+ * record that will not be cleared when Clear Event Records is executed,
+ * the device shall return the Invalid Handle return code and shall not
+ * clear any of the specified event records."
+ * -- CXL 3.0 8.2.9.2.3
+ */
+ entry = cxl_event_get_head(log);
+ for (nr = 0; entry && nr < pl->nr_recs; nr++) {
+ uint16_t handle = pl->handle[nr];
+
+ /* NOTE: Both handles are little endian. */
+ if (handle == 0 || entry->data.hdr.handle != handle) {
+ return CXL_MBOX_INVALID_INPUT;
+ }
+ entry = cxl_event_get_next(entry);
+ }
+
+ entry = cxl_event_get_head(log);
+ for (nr = 0; entry && nr < pl->nr_recs; nr++) {
+ cxl_event_delete_head(cxlds, log_type, log);
+ entry = cxl_event_get_head(log);
+ }
+
+ return CXL_MBOX_SUCCESS;
+}
+
+void cxl_event_irq_assert(CXLType3Dev *ct3d)
+{
+ CXLDeviceState *cxlds = &ct3d->cxl_dstate;
+ PCIDevice *pdev = &ct3d->parent_obj;
+ int i;
+
+ for (i = 0; i < CXL_EVENT_TYPE_MAX; i++) {
+ CXLEventLog *log = &cxlds->event_logs[i];
+
+ if (!log->irq_enabled || cxl_event_empty(log)) {
+ continue;
+ }
+
+ /* Notifies interrupt, legacy IRQ is not supported */
+ if (msix_enabled(pdev)) {
+ msix_notify(pdev, log->irq_vec);
+ } else if (msi_enabled(pdev)) {
+ msi_notify(pdev, log->irq_vec);
+ }
+ }
+}
#include "qemu/osdep.h"
#include "hw/cxl/cxl.h"
+#include "hw/cxl/cxl_events.h"
#include "hw/pci/pci.h"
#include "qemu/cutils.h"
#include "qemu/log.h"
#define GET_PARTITION_INFO 0x0
#define GET_LSA 0x2
#define SET_LSA 0x3
+ MEDIA_AND_POISON = 0x43,
+ #define GET_POISON_LIST 0x0
+ #define INJECT_POISON 0x1
+ #define CLEAR_POISON 0x2
};
-/* 8.2.8.4.5.1 Command Return Codes */
-typedef enum {
- CXL_MBOX_SUCCESS = 0x0,
- CXL_MBOX_BG_STARTED = 0x1,
- CXL_MBOX_INVALID_INPUT = 0x2,
- CXL_MBOX_UNSUPPORTED = 0x3,
- CXL_MBOX_INTERNAL_ERROR = 0x4,
- CXL_MBOX_RETRY_REQUIRED = 0x5,
- CXL_MBOX_BUSY = 0x6,
- CXL_MBOX_MEDIA_DISABLED = 0x7,
- CXL_MBOX_FW_XFER_IN_PROGRESS = 0x8,
- CXL_MBOX_FW_XFER_OUT_OF_ORDER = 0x9,
- CXL_MBOX_FW_AUTH_FAILED = 0xa,
- CXL_MBOX_FW_INVALID_SLOT = 0xb,
- CXL_MBOX_FW_ROLLEDBACK = 0xc,
- CXL_MBOX_FW_REST_REQD = 0xd,
- CXL_MBOX_INVALID_HANDLE = 0xe,
- CXL_MBOX_INVALID_PA = 0xf,
- CXL_MBOX_INJECT_POISON_LIMIT = 0x10,
- CXL_MBOX_PERMANENT_MEDIA_FAILURE = 0x11,
- CXL_MBOX_ABORTED = 0x12,
- CXL_MBOX_INVALID_SECURITY_STATE = 0x13,
- CXL_MBOX_INCORRECT_PASSPHRASE = 0x14,
- CXL_MBOX_UNSUPPORTED_MAILBOX = 0x15,
- CXL_MBOX_INVALID_PAYLOAD_LENGTH = 0x16,
- CXL_MBOX_MAX = 0x17
-} CXLRetCode;
-
struct cxl_cmd;
typedef CXLRetCode (*opcode_handler)(struct cxl_cmd *cmd,
CXLDeviceState *cxl_dstate, uint16_t *len);
uint8_t *payload;
};
-#define DEFINE_MAILBOX_HANDLER_ZEROED(name, size) \
- uint16_t __zero##name = size; \
- static CXLRetCode cmd_##name(struct cxl_cmd *cmd, \
- CXLDeviceState *cxl_dstate, uint16_t *len) \
- { \
- *len = __zero##name; \
- memset(cmd->payload, 0, *len); \
- return CXL_MBOX_SUCCESS; \
+static CXLRetCode cmd_events_get_records(struct cxl_cmd *cmd,
+ CXLDeviceState *cxlds,
+ uint16_t *len)
+{
+ CXLGetEventPayload *pl;
+ uint8_t log_type;
+ int max_recs;
+
+ if (cmd->in < sizeof(log_type)) {
+ return CXL_MBOX_INVALID_INPUT;
+ }
+
+ log_type = *((uint8_t *)cmd->payload);
+
+ pl = (CXLGetEventPayload *)cmd->payload;
+ memset(pl, 0, sizeof(*pl));
+
+ max_recs = (cxlds->payload_size - CXL_EVENT_PAYLOAD_HDR_SIZE) /
+ CXL_EVENT_RECORD_SIZE;
+ if (max_recs > 0xFFFF) {
+ max_recs = 0xFFFF;
+ }
+
+ return cxl_event_get_records(cxlds, pl, log_type, max_recs, len);
+}
+
+static CXLRetCode cmd_events_clear_records(struct cxl_cmd *cmd,
+ CXLDeviceState *cxlds,
+ uint16_t *len)
+{
+ CXLClearEventPayload *pl;
+
+ pl = (CXLClearEventPayload *)cmd->payload;
+ *len = 0;
+ return cxl_event_clear_records(cxlds, pl);
+}
+
+static CXLRetCode cmd_events_get_interrupt_policy(struct cxl_cmd *cmd,
+ CXLDeviceState *cxlds,
+ uint16_t *len)
+{
+ CXLEventInterruptPolicy *policy;
+ CXLEventLog *log;
+
+ policy = (CXLEventInterruptPolicy *)cmd->payload;
+ memset(policy, 0, sizeof(*policy));
+
+ log = &cxlds->event_logs[CXL_EVENT_TYPE_INFO];
+ if (log->irq_enabled) {
+ policy->info_settings = CXL_EVENT_INT_SETTING(log->irq_vec);
+ }
+
+ log = &cxlds->event_logs[CXL_EVENT_TYPE_WARN];
+ if (log->irq_enabled) {
+ policy->warn_settings = CXL_EVENT_INT_SETTING(log->irq_vec);
+ }
+
+ log = &cxlds->event_logs[CXL_EVENT_TYPE_FAIL];
+ if (log->irq_enabled) {
+ policy->failure_settings = CXL_EVENT_INT_SETTING(log->irq_vec);
+ }
+
+ log = &cxlds->event_logs[CXL_EVENT_TYPE_FATAL];
+ if (log->irq_enabled) {
+ policy->fatal_settings = CXL_EVENT_INT_SETTING(log->irq_vec);
+ }
+
+ log = &cxlds->event_logs[CXL_EVENT_TYPE_DYNAMIC_CAP];
+ if (log->irq_enabled) {
+ /* Dynamic Capacity borrows the same vector as info */
+ policy->dyn_cap_settings = CXL_INT_MSI_MSIX;
+ }
+
+ *len = sizeof(*policy);
+ return CXL_MBOX_SUCCESS;
+}
+
+static CXLRetCode cmd_events_set_interrupt_policy(struct cxl_cmd *cmd,
+ CXLDeviceState *cxlds,
+ uint16_t *len)
+{
+ CXLEventInterruptPolicy *policy;
+ CXLEventLog *log;
+
+ if (*len < CXL_EVENT_INT_SETTING_MIN_LEN) {
+ return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
}
-#define DEFINE_MAILBOX_HANDLER_NOP(name) \
- static CXLRetCode cmd_##name(struct cxl_cmd *cmd, \
- CXLDeviceState *cxl_dstate, uint16_t *len) \
- { \
- return CXL_MBOX_SUCCESS; \
+
+ policy = (CXLEventInterruptPolicy *)cmd->payload;
+
+ log = &cxlds->event_logs[CXL_EVENT_TYPE_INFO];
+ log->irq_enabled = (policy->info_settings & CXL_EVENT_INT_MODE_MASK) ==
+ CXL_INT_MSI_MSIX;
+
+ log = &cxlds->event_logs[CXL_EVENT_TYPE_WARN];
+ log->irq_enabled = (policy->warn_settings & CXL_EVENT_INT_MODE_MASK) ==
+ CXL_INT_MSI_MSIX;
+
+ log = &cxlds->event_logs[CXL_EVENT_TYPE_FAIL];
+ log->irq_enabled = (policy->failure_settings & CXL_EVENT_INT_MODE_MASK) ==
+ CXL_INT_MSI_MSIX;
+
+ log = &cxlds->event_logs[CXL_EVENT_TYPE_FATAL];
+ log->irq_enabled = (policy->fatal_settings & CXL_EVENT_INT_MODE_MASK) ==
+ CXL_INT_MSI_MSIX;
+
+ /* DCD is optional */
+ if (*len < sizeof(*policy)) {
+ return CXL_MBOX_SUCCESS;
}
-DEFINE_MAILBOX_HANDLER_ZEROED(events_get_records, 0x20);
-DEFINE_MAILBOX_HANDLER_NOP(events_clear_records);
-DEFINE_MAILBOX_HANDLER_ZEROED(events_get_interrupt_policy, 4);
-DEFINE_MAILBOX_HANDLER_NOP(events_set_interrupt_policy);
+ log = &cxlds->event_logs[CXL_EVENT_TYPE_DYNAMIC_CAP];
+ log->irq_enabled = (policy->dyn_cap_settings & CXL_EVENT_INT_MODE_MASK) ==
+ CXL_INT_MSI_MSIX;
+
+ *len = sizeof(*policy);
+ return CXL_MBOX_SUCCESS;
+}
/* 8.2.9.2.1 */
static CXLRetCode cmd_firmware_update_get_info(struct cxl_cmd *cmd,
stq_le_p(&id->persistent_capacity, cxl_dstate->pmem_size / CXL_CAPACITY_MULTIPLIER);
stq_le_p(&id->volatile_capacity, cxl_dstate->vmem_size / CXL_CAPACITY_MULTIPLIER);
stl_le_p(&id->lsa_size, cvc->get_lsa_size(ct3d));
+ /* 256 poison records */
+ st24_le_p(id->poison_list_max_mer, 256);
+ /* No limit - so limited by main poison record limit */
+ stw_le_p(&id->inject_poison_limit, 0);
*len = sizeof(*id);
return CXL_MBOX_SUCCESS;
return CXL_MBOX_SUCCESS;
}
+/*
+ * This is very inefficient, but good enough for now!
+ * Also the payload will always fit, so no need to handle the MORE flag and
+ * make this stateful. We may want to allow longer poison lists to aid
+ * testing that kernel functionality.
+ */
+static CXLRetCode cmd_media_get_poison_list(struct cxl_cmd *cmd,
+ CXLDeviceState *cxl_dstate,
+ uint16_t *len)
+{
+ struct get_poison_list_pl {
+ uint64_t pa;
+ uint64_t length;
+ } QEMU_PACKED;
+
+ struct get_poison_list_out_pl {
+ uint8_t flags;
+ uint8_t rsvd1;
+ uint64_t overflow_timestamp;
+ uint16_t count;
+ uint8_t rsvd2[0x14];
+ struct {
+ uint64_t addr;
+ uint32_t length;
+ uint32_t resv;
+ } QEMU_PACKED records[];
+ } QEMU_PACKED;
+
+ struct get_poison_list_pl *in = (void *)cmd->payload;
+ struct get_poison_list_out_pl *out = (void *)cmd->payload;
+ CXLType3Dev *ct3d = container_of(cxl_dstate, CXLType3Dev, cxl_dstate);
+ uint16_t record_count = 0, i = 0;
+ uint64_t query_start, query_length;
+ CXLPoisonList *poison_list = &ct3d->poison_list;
+ CXLPoison *ent;
+ uint16_t out_pl_len;
+
+ query_start = ldq_le_p(&in->pa);
+ /* 64 byte alignemnt required */
+ if (query_start & 0x3f) {
+ return CXL_MBOX_INVALID_INPUT;
+ }
+ query_length = ldq_le_p(&in->length) * CXL_CACHE_LINE_SIZE;
+
+ QLIST_FOREACH(ent, poison_list, node) {
+ /* Check for no overlap */
+ if (ent->start >= query_start + query_length ||
+ ent->start + ent->length <= query_start) {
+ continue;
+ }
+ record_count++;
+ }
+ out_pl_len = sizeof(*out) + record_count * sizeof(out->records[0]);
+ assert(out_pl_len <= CXL_MAILBOX_MAX_PAYLOAD_SIZE);
+
+ memset(out, 0, out_pl_len);
+ QLIST_FOREACH(ent, poison_list, node) {
+ uint64_t start, stop;
+
+ /* Check for no overlap */
+ if (ent->start >= query_start + query_length ||
+ ent->start + ent->length <= query_start) {
+ continue;
+ }
+
+ /* Deal with overlap */
+ start = MAX(ROUND_DOWN(ent->start, 64ull), query_start);
+ stop = MIN(ROUND_DOWN(ent->start, 64ull) + ent->length,
+ query_start + query_length);
+ stq_le_p(&out->records[i].addr, start | (ent->type & 0x7));
+ stl_le_p(&out->records[i].length, (stop - start) / CXL_CACHE_LINE_SIZE);
+ i++;
+ }
+ if (ct3d->poison_list_overflowed) {
+ out->flags = (1 << 1);
+ stq_le_p(&out->overflow_timestamp, ct3d->poison_list_overflow_ts);
+ }
+ stw_le_p(&out->count, record_count);
+ *len = out_pl_len;
+ return CXL_MBOX_SUCCESS;
+}
+
+static CXLRetCode cmd_media_inject_poison(struct cxl_cmd *cmd,
+ CXLDeviceState *cxl_dstate,
+ uint16_t *len_unused)
+{
+ CXLType3Dev *ct3d = container_of(cxl_dstate, CXLType3Dev, cxl_dstate);
+ CXLPoisonList *poison_list = &ct3d->poison_list;
+ CXLPoison *ent;
+ struct inject_poison_pl {
+ uint64_t dpa;
+ };
+ struct inject_poison_pl *in = (void *)cmd->payload;
+ uint64_t dpa = ldq_le_p(&in->dpa);
+ CXLPoison *p;
+
+ QLIST_FOREACH(ent, poison_list, node) {
+ if (dpa >= ent->start &&
+ dpa + CXL_CACHE_LINE_SIZE <= ent->start + ent->length) {
+ return CXL_MBOX_SUCCESS;
+ }
+ }
+
+ if (ct3d->poison_list_cnt == CXL_POISON_LIST_LIMIT) {
+ return CXL_MBOX_INJECT_POISON_LIMIT;
+ }
+ p = g_new0(CXLPoison, 1);
+
+ p->length = CXL_CACHE_LINE_SIZE;
+ p->start = dpa;
+ p->type = CXL_POISON_TYPE_INJECTED;
+
+ /*
+ * Possible todo: Merge with existing entry if next to it and if same type
+ */
+ QLIST_INSERT_HEAD(poison_list, p, node);
+ ct3d->poison_list_cnt++;
+
+ return CXL_MBOX_SUCCESS;
+}
+
+static CXLRetCode cmd_media_clear_poison(struct cxl_cmd *cmd,
+ CXLDeviceState *cxl_dstate,
+ uint16_t *len_unused)
+{
+ CXLType3Dev *ct3d = container_of(cxl_dstate, CXLType3Dev, cxl_dstate);
+ CXLPoisonList *poison_list = &ct3d->poison_list;
+ CXLType3Class *cvc = CXL_TYPE3_GET_CLASS(ct3d);
+ struct clear_poison_pl {
+ uint64_t dpa;
+ uint8_t data[64];
+ };
+ CXLPoison *ent;
+ uint64_t dpa;
+
+ struct clear_poison_pl *in = (void *)cmd->payload;
+
+ dpa = ldq_le_p(&in->dpa);
+ if (dpa + CXL_CACHE_LINE_SIZE > cxl_dstate->mem_size) {
+ return CXL_MBOX_INVALID_PA;
+ }
+
+ /* Clearing a region with no poison is not an error so always do so */
+ if (cvc->set_cacheline) {
+ if (!cvc->set_cacheline(ct3d, dpa, in->data)) {
+ return CXL_MBOX_INTERNAL_ERROR;
+ }
+ }
+
+ QLIST_FOREACH(ent, poison_list, node) {
+ /*
+ * Test for contained in entry. Simpler than general case
+ * as clearing 64 bytes and entries 64 byte aligned
+ */
+ if ((dpa >= ent->start) && (dpa < ent->start + ent->length)) {
+ break;
+ }
+ }
+ if (!ent) {
+ return CXL_MBOX_SUCCESS;
+ }
+
+ QLIST_REMOVE(ent, node);
+ ct3d->poison_list_cnt--;
+
+ if (dpa > ent->start) {
+ CXLPoison *frag;
+ /* Cannot overflow as replacing existing entry */
+
+ frag = g_new0(CXLPoison, 1);
+
+ frag->start = ent->start;
+ frag->length = dpa - ent->start;
+ frag->type = ent->type;
+
+ QLIST_INSERT_HEAD(poison_list, frag, node);
+ ct3d->poison_list_cnt++;
+ }
+
+ if (dpa + CXL_CACHE_LINE_SIZE < ent->start + ent->length) {
+ CXLPoison *frag;
+
+ if (ct3d->poison_list_cnt == CXL_POISON_LIST_LIMIT) {
+ cxl_set_poison_list_overflowed(ct3d);
+ } else {
+ frag = g_new0(CXLPoison, 1);
+
+ frag->start = dpa + CXL_CACHE_LINE_SIZE;
+ frag->length = ent->start + ent->length - frag->start;
+ frag->type = ent->type;
+ QLIST_INSERT_HEAD(poison_list, frag, node);
+ ct3d->poison_list_cnt++;
+ }
+ }
+ /* Any fragments have been added, free original entry */
+ g_free(ent);
+
+ return CXL_MBOX_SUCCESS;
+}
+
#define IMMEDIATE_CONFIG_CHANGE (1 << 1)
#define IMMEDIATE_DATA_CHANGE (1 << 2)
#define IMMEDIATE_POLICY_CHANGE (1 << 3)
[EVENTS][CLEAR_RECORDS] = { "EVENTS_CLEAR_RECORDS",
cmd_events_clear_records, ~0, IMMEDIATE_LOG_CHANGE },
[EVENTS][GET_INTERRUPT_POLICY] = { "EVENTS_GET_INTERRUPT_POLICY",
- cmd_events_get_interrupt_policy, 0, 0 },
+ cmd_events_get_interrupt_policy, 0, 0 },
[EVENTS][SET_INTERRUPT_POLICY] = { "EVENTS_SET_INTERRUPT_POLICY",
- cmd_events_set_interrupt_policy, 4, IMMEDIATE_CONFIG_CHANGE },
+ cmd_events_set_interrupt_policy,
+ ~0, IMMEDIATE_CONFIG_CHANGE },
[FIRMWARE_UPDATE][GET_INFO] = { "FIRMWARE_UPDATE_GET_INFO",
cmd_firmware_update_get_info, 0, 0 },
[TIMESTAMP][GET] = { "TIMESTAMP_GET", cmd_timestamp_get, 0, 0 },
[CCLS][GET_LSA] = { "CCLS_GET_LSA", cmd_ccls_get_lsa, 8, 0 },
[CCLS][SET_LSA] = { "CCLS_SET_LSA", cmd_ccls_set_lsa,
~0, IMMEDIATE_CONFIG_CHANGE | IMMEDIATE_DATA_CHANGE },
+ [MEDIA_AND_POISON][GET_POISON_LIST] = { "MEDIA_AND_POISON_GET_POISON_LIST",
+ cmd_media_get_poison_list, 16, 0 },
+ [MEDIA_AND_POISON][INJECT_POISON] = { "MEDIA_AND_POISON_INJECT_POISON",
+ cmd_media_inject_poison, 8, 0 },
+ [MEDIA_AND_POISON][CLEAR_POISON] = { "MEDIA_AND_POISON_CLEAR_POISON",
+ cmd_media_clear_poison, 72, 0 },
};
void cxl_process_mailbox(CXLDeviceState *cxl_dstate)
'cxl-mailbox-utils.c',
'cxl-host.c',
'cxl-cdat.c',
+ 'cxl-events.c',
),
if_false: files(
'cxl-host-stubs.c',
ret = vhost_dev_set_config(&g->vhost->dev, config_data,
0, sizeof(struct virtio_gpu_config),
- VHOST_SET_CONFIG_TYPE_MASTER);
+ VHOST_SET_CONFIG_TYPE_FRONTEND);
if (ret) {
error_report("vhost-user-gpu: set device config space failed");
return;
void *pdata = NULL;
res->dmabuf_fd = -1;
- if (res->iov_cnt == 1) {
+ if (res->iov_cnt == 1 &&
+ res->iov[0].iov_len < 4096) {
pdata = res->iov[0].iov_base;
} else {
virtio_gpu_create_udmabuf(res);
#include "hw/virtio/virtio.h"
#include "hw/virtio/virtio-gpu.h"
+#include "ui/egl-helpers.h"
+
#include <virglrenderer.h>
-static struct virgl_renderer_callbacks virtio_gpu_3d_cbs;
+#if VIRGL_RENDERER_CALLBACKS_VERSION >= 4
+static void *
+virgl_get_egl_display(G_GNUC_UNUSED void *cookie)
+{
+ return qemu_egl_display;
+}
+#endif
static void virgl_cmd_create_resource_2d(VirtIOGPU *g,
struct virtio_gpu_ctrl_command *cmd)
struct virtio_gpu_ctrl_command *cmd)
{
struct virtio_gpu_set_scanout ss;
- struct virgl_renderer_resource_info info;
int ret;
VIRTIO_GPU_FILL_CMD(ss);
}
g->parent_obj.enable = 1;
- memset(&info, 0, sizeof(info));
-
if (ss.resource_id && ss.r.width && ss.r.height) {
+ struct virgl_renderer_resource_info info;
+ void *d3d_tex2d = NULL;
+
+#ifdef HAVE_VIRGL_D3D_INFO_EXT
+ struct virgl_renderer_resource_info_ext ext;
+ memset(&ext, 0, sizeof(ext));
+ ret = virgl_renderer_resource_get_info_ext(ss.resource_id, &ext);
+ info = ext.base;
+ d3d_tex2d = ext.d3d_tex2d;
+#else
+ memset(&info, 0, sizeof(info));
ret = virgl_renderer_resource_get_info(ss.resource_id, &info);
+#endif
if (ret == -1) {
qemu_log_mask(LOG_GUEST_ERROR,
"%s: illegal resource specified %d\n",
g->parent_obj.scanout[ss.scanout_id].con, info.tex_id,
info.flags & VIRTIO_GPU_RESOURCE_FLAG_Y_0_TOP,
info.width, info.height,
- ss.r.x, ss.r.y, ss.r.width, ss.r.height);
+ ss.r.x, ss.r.y, ss.r.width, ss.r.height,
+ d3d_tex2d);
} else {
dpy_gfx_replace_surface(
g->parent_obj.scanout[ss.scanout_id].con, NULL);
int virtio_gpu_virgl_init(VirtIOGPU *g)
{
int ret;
+ uint32_t flags = 0;
+
+#if VIRGL_RENDERER_CALLBACKS_VERSION >= 4
+ if (qemu_egl_display) {
+ virtio_gpu_3d_cbs.version = 4;
+ virtio_gpu_3d_cbs.get_egl_display = virgl_get_egl_display;
+ }
+#endif
+#ifdef VIRGL_RENDERER_D3D11_SHARE_TEXTURE
+ if (qemu_egl_angle_d3d) {
+ flags |= VIRGL_RENDERER_D3D11_SHARE_TEXTURE;
+ }
+#endif
- ret = virgl_renderer_init(g, 0, &virtio_gpu_3d_cbs);
+ ret = virgl_renderer_init(g, flags, &virtio_gpu_3d_cbs);
if (ret != 0) {
error_report("virgl could not be initialized: %d", ret);
return ret;
return height * stride;
}
+#ifdef WIN32
+static void
+win32_pixman_image_destroy(pixman_image_t *image, void *data)
+{
+ HANDLE handle = data;
+
+ qemu_win32_map_free(pixman_image_get_data(image), handle, &error_warn);
+}
+#endif
+
static void virtio_gpu_resource_create_2d(VirtIOGPU *g,
struct virtio_gpu_ctrl_command *cmd)
{
res->hostmem = calc_image_hostmem(pformat, c2d.width, c2d.height);
if (res->hostmem + g->hostmem < g->conf_max_hostmem) {
+ void *bits = NULL;
+#ifdef WIN32
+ bits = qemu_win32_map_alloc(res->hostmem, &res->handle, &error_warn);
+ if (!bits) {
+ goto end;
+ }
+#endif
res->image = pixman_image_create_bits(pformat,
c2d.width,
c2d.height,
- NULL, 0);
+ bits, res->hostmem / c2d.height);
+#ifdef WIN32
+ if (res->image) {
+ pixman_image_set_destroy_function(res->image, win32_pixman_image_destroy, res->handle);
+ }
+#endif
}
+#ifdef WIN32
+end:
+#endif
if (!res->image) {
qemu_log_mask(LOG_GUEST_ERROR,
"%s: resource creation failed %d %d %d\n",
struct virtio_gpu_ctrl_command *cmd)
{
struct virtio_gpu_simple_resource *res;
- int h;
+ int h, bpp;
uint32_t src_offset, dst_offset, stride;
- int bpp;
pixman_format_code_t format;
struct virtio_gpu_transfer_to_host_2d t2d;
+ void *img_data;
VIRTIO_GPU_FILL_CMD(t2d);
virtio_gpu_t2d_bswap(&t2d);
format = pixman_image_get_format(res->image);
bpp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(format), 8);
stride = pixman_image_get_stride(res->image);
+ img_data = pixman_image_get_data(res->image);
- if (t2d.offset || t2d.r.x || t2d.r.y ||
- t2d.r.width != pixman_image_get_width(res->image)) {
- void *img_data = pixman_image_get_data(res->image);
+ if (t2d.r.x || t2d.r.width != pixman_image_get_width(res->image)) {
for (h = 0; h < t2d.r.height; h++) {
src_offset = t2d.offset + stride * h;
dst_offset = (t2d.r.y + h) * stride + (t2d.r.x * bpp);
iov_to_buf(res->iov, res->iov_cnt, src_offset,
- (uint8_t *)img_data
- + dst_offset, t2d.r.width * bpp);
+ (uint8_t *)img_data + dst_offset,
+ t2d.r.width * bpp);
}
} else {
- iov_to_buf(res->iov, res->iov_cnt, 0,
- pixman_image_get_data(res->image),
- pixman_image_get_stride(res->image)
- * pixman_image_get_height(res->image));
+ src_offset = t2d.offset;
+ dst_offset = t2d.r.y * stride + t2d.r.x * bpp;
+ iov_to_buf(res->iov, res->iov_cnt, src_offset,
+ (uint8_t *)img_data + dst_offset,
+ stride * t2d.r.height);
}
}
struct virtio_gpu_resource_flush rf;
struct virtio_gpu_scanout *scanout;
pixman_region16_t flush_region;
+ bool within_bounds = false;
+ bool update_submitted = false;
int i;
VIRTIO_GPU_FILL_CMD(rf);
rf.r.x < scanout->x + scanout->width &&
rf.r.x + rf.r.width >= scanout->x &&
rf.r.y < scanout->y + scanout->height &&
- rf.r.y + rf.r.height >= scanout->y &&
- console_has_gl(scanout->con)) {
- dpy_gl_update(scanout->con, 0, 0, scanout->width,
- scanout->height);
+ rf.r.y + rf.r.height >= scanout->y) {
+ within_bounds = true;
+
+ if (console_has_gl(scanout->con)) {
+ dpy_gl_update(scanout->con, 0, 0, scanout->width,
+ scanout->height);
+ update_submitted = true;
+ }
}
}
- return;
+
+ if (update_submitted) {
+ return;
+ }
+ if (!within_bounds) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: flush bounds outside scanouts"
+ " bounds for flush %d: %d %d %d %d\n",
+ __func__, rf.resource_id, rf.r.x, rf.r.y,
+ rf.r.width, rf.r.height);
+ cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
+ return;
+ }
}
if (!res->blob &&
if (console_has_gl(scanout->con)) {
if (!virtio_gpu_update_dmabuf(g, scanout_id, res, fb, r)) {
virtio_gpu_update_scanout(g, scanout_id, res, r);
- return;
+ } else {
+ *error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY;
}
+ return;
}
data = res->blob;
*error = VIRTIO_GPU_RESP_ERR_UNSPEC;
return;
}
+#ifdef WIN32
+ qemu_displaysurface_win32_set_handle(scanout->ds, res->handle, fb->offset);
+#endif
pixman_image_unref(rect);
dpy_gfx_replace_surface(g->parent_obj.scanout[scanout_id].con,
struct virtio_gpu_simple_resource *res;
struct virtio_gpu_scanout *scanout;
uint32_t resource_id, pformat;
+ void *bits = NULL;
int i;
g->hostmem = 0;
g_free(res);
return -EINVAL;
}
+
+ res->hostmem = calc_image_hostmem(pformat, res->width, res->height);
+#ifdef WIN32
+ bits = qemu_win32_map_alloc(res->hostmem, &res->handle, &error_warn);
+ if (!bits) {
+ g_free(res);
+ return -EINVAL;
+ }
+#endif
res->image = pixman_image_create_bits(pformat,
res->width, res->height,
- NULL, 0);
+ bits, res->hostmem / res->height);
if (!res->image) {
g_free(res);
return -EINVAL;
}
- res->hostmem = calc_image_hostmem(pformat, res->width, res->height);
res->addrs = g_new(uint64_t, res->iov_cnt);
res->iov = g_new(struct iovec, res->iov_cnt);
if (!scanout->ds) {
return -EINVAL;
}
+#ifdef WIN32
+ qemu_displaysurface_win32_set_handle(scanout->ds, res->handle, 0);
+#endif
dpy_gfx_replace_surface(scanout->con, scanout->ds);
dpy_gfx_update_full(scanout->con);
n->start, size);
map.iova = n->start;
- map.size = size;
+ map.size = size - 1; /* Inclusive */
iova_tree_remove(as->iova_tree, map);
}
IntelIOMMUState *s = vtd_as->iommu_state;
uint8_t bus_n = pci_bus_num(vtd_as->bus);
VTDContextEntry ce;
+ DMAMap map = { .iova = 0, .size = HWADDR_MAX };
- /*
- * The replay can be triggered by either a invalidation or a newly
- * created entry. No matter what, we release existing mappings
- * (it means flushing caches for UNMAP-only registers).
- */
- vtd_address_space_unmap(vtd_as, n);
+ /* replay is protected by BQL, page walk will re-setup it safely */
+ iova_tree_remove(vtd_as->iova_tree, map);
if (vtd_dev_to_context_entry(s, bus_n, vtd_as->devfn, &ce) == 0) {
trace_vtd_replay_ce_valid(s->root_scalable ? "scalable mode" :
PCI_FUNC(vtd_as->devfn),
vtd_get_domain_id(s, &ce, vtd_as->pasid),
ce.hi, ce.lo);
- if (vtd_as_has_map_notifier(vtd_as)) {
+ if (n->notifier_flags & IOMMU_NOTIFIER_MAP) {
/* This is required only for MAP typed notifiers */
vtd_page_walk_info info = {
.hook_fn = vtd_replay_hook,
static void pc_machine_initfn(Object *obj)
{
PCMachineState *pcms = PC_MACHINE(obj);
+ PCMachineClass *pcmc = PC_MACHINE_GET_CLASS(pcms);
#ifdef CONFIG_VMPORT
pcms->vmport = ON_OFF_AUTO_AUTO;
pcms->vmport = ON_OFF_AUTO_OFF;
#endif /* CONFIG_VMPORT */
pcms->max_ram_below_4g = 0; /* use default */
- pcms->smbios_entry_point_type = SMBIOS_ENTRY_POINT_TYPE_32;
+ pcms->smbios_entry_point_type = pcmc->default_smbios_ep_type;
/* acpi build is enabled by default if machine supports it */
- pcms->acpi_build_enabled = PC_MACHINE_GET_CLASS(pcms)->has_acpi_build;
+ pcms->acpi_build_enabled = pcmc->has_acpi_build;
pcms->smbus_enabled = true;
pcms->sata_enabled = true;
pcms->i8042_enabled = true;
mc->nvdimm_supported = true;
mc->smp_props.dies_supported = true;
mc->default_ram_id = "pc.ram";
+ pcmc->default_smbios_ep_type = SMBIOS_ENTRY_POINT_TYPE_64;
object_class_property_add(oc, PC_MACHINE_MAX_RAM_BELOW_4G, "size",
pc_machine_get_max_ram_below_4g, pc_machine_set_max_ram_below_4g,
static void pc_i440fx_8_0_machine_options(MachineClass *m)
{
+ PCMachineClass *pcmc = PC_MACHINE_CLASS(m);
+
pc_i440fx_8_1_machine_options(m);
m->alias = NULL;
m->is_default = false;
compat_props_add(m->compat_props, hw_compat_8_0, hw_compat_8_0_len);
compat_props_add(m->compat_props, pc_compat_8_0, pc_compat_8_0_len);
+
+ /* For pc-i44fx-8.0 and older, use SMBIOS 2.8 by default */
+ pcmc->default_smbios_ep_type = SMBIOS_ENTRY_POINT_TYPE_32;
}
DEFINE_I440FX_MACHINE(v8_0, "pc-i440fx-8.0", NULL,
m->default_nic = "e1000e";
m->default_kernel_irqchip_split = false;
m->no_floppy = 1;
+ m->max_cpus = 1024;
m->no_parallel = !module_object_class_by_name(TYPE_ISA_PARALLEL);
machine_class_allow_dynamic_sysbus_dev(m, TYPE_AMD_IOMMU_DEVICE);
machine_class_allow_dynamic_sysbus_dev(m, TYPE_INTEL_IOMMU_DEVICE);
machine_class_allow_dynamic_sysbus_dev(m, TYPE_RAMFB_DEVICE);
machine_class_allow_dynamic_sysbus_dev(m, TYPE_VMBUS_BRIDGE);
- m->max_cpus = 288;
}
static void pc_q35_8_1_machine_options(MachineClass *m)
static void pc_q35_8_0_machine_options(MachineClass *m)
{
+ PCMachineClass *pcmc = PC_MACHINE_CLASS(m);
+
pc_q35_8_1_machine_options(m);
m->alias = NULL;
compat_props_add(m->compat_props, hw_compat_8_0, hw_compat_8_0_len);
compat_props_add(m->compat_props, pc_compat_8_0, pc_compat_8_0_len);
+
+ /* For pc-q35-8.0 and older, use SMBIOS 2.8 by default */
+ pcmc->default_smbios_ep_type = SMBIOS_ENTRY_POINT_TYPE_32;
+ m->max_cpus = 288;
}
DEFINE_Q35_MACHINE(v8_0, "pc-q35-8.0", NULL,
ret = vhost_dev_set_config(&vhi->vhost->dev, config_data,
0, sizeof(virtio_input_config),
- VHOST_SET_CONFIG_TYPE_MASTER);
+ VHOST_SET_CONFIG_TYPE_FRONTEND);
if (ret) {
error_report("vhost-user-input: set device config space failed");
return;
ComponentRegisters *regs = &cxl_cstate->crb;
MemoryRegion *mr = ®s->component_registers;
uint8_t *pci_conf = pci_dev->config;
- unsigned short msix_num = 1;
+ unsigned short msix_num = 6;
int i, rc;
QTAILQ_INIT(&ct3d->error_list);
if (rc) {
goto err_release_cdat;
}
+ cxl_event_init(&ct3d->cxl_dstate, 2);
return;
*/
}
+static bool set_cacheline(CXLType3Dev *ct3d, uint64_t dpa_offset, uint8_t *data)
+{
+ MemoryRegion *vmr = NULL, *pmr = NULL;
+ AddressSpace *as;
+
+ if (ct3d->hostvmem) {
+ vmr = host_memory_backend_get_memory(ct3d->hostvmem);
+ }
+ if (ct3d->hostpmem) {
+ pmr = host_memory_backend_get_memory(ct3d->hostpmem);
+ }
+
+ if (!vmr && !pmr) {
+ return false;
+ }
+
+ if (dpa_offset + CXL_CACHE_LINE_SIZE > ct3d->cxl_dstate.mem_size) {
+ return false;
+ }
+
+ if (vmr) {
+ if (dpa_offset < memory_region_size(vmr)) {
+ as = &ct3d->hostvmem_as;
+ } else {
+ as = &ct3d->hostpmem_as;
+ dpa_offset -= memory_region_size(vmr);
+ }
+ } else {
+ as = &ct3d->hostpmem_as;
+ }
+
+ address_space_write(as, dpa_offset, MEMTXATTRS_UNSPECIFIED, &data,
+ CXL_CACHE_LINE_SIZE);
+ return true;
+}
+
+void cxl_set_poison_list_overflowed(CXLType3Dev *ct3d)
+{
+ ct3d->poison_list_overflowed = true;
+ ct3d->poison_list_overflow_ts =
+ cxl_device_get_timestamp(&ct3d->cxl_dstate);
+}
+
+void qmp_cxl_inject_poison(const char *path, uint64_t start, uint64_t length,
+ Error **errp)
+{
+ Object *obj = object_resolve_path(path, NULL);
+ CXLType3Dev *ct3d;
+ CXLPoison *p;
+
+ if (length % 64) {
+ error_setg(errp, "Poison injection must be in multiples of 64 bytes");
+ return;
+ }
+ if (start % 64) {
+ error_setg(errp, "Poison start address must be 64 byte aligned");
+ return;
+ }
+ if (!obj) {
+ error_setg(errp, "Unable to resolve path");
+ return;
+ }
+ if (!object_dynamic_cast(obj, TYPE_CXL_TYPE3)) {
+ error_setg(errp, "Path does not point to a CXL type 3 device");
+ return;
+ }
+
+ ct3d = CXL_TYPE3(obj);
+
+ QLIST_FOREACH(p, &ct3d->poison_list, node) {
+ if (((start >= p->start) && (start < p->start + p->length)) ||
+ ((start + length > p->start) &&
+ (start + length <= p->start + p->length))) {
+ error_setg(errp, "Overlap with existing poisoned region not supported");
+ return;
+ }
+ }
+
+ if (ct3d->poison_list_cnt == CXL_POISON_LIST_LIMIT) {
+ cxl_set_poison_list_overflowed(ct3d);
+ return;
+ }
+
+ p = g_new0(CXLPoison, 1);
+ p->length = length;
+ p->start = start;
+ p->type = CXL_POISON_TYPE_INTERNAL; /* Different from injected via the mbox */
+
+ QLIST_INSERT_HEAD(&ct3d->poison_list, p, node);
+ ct3d->poison_list_cnt++;
+}
+
/* For uncorrectable errors include support for multiple header recording */
void qmp_cxl_inject_uncorrectable_errors(const char *path,
CXLUncorErrorRecordList *errors,
pcie_aer_inject_error(PCI_DEVICE(obj), &err);
}
+static void cxl_assign_event_header(CXLEventRecordHdr *hdr,
+ const QemuUUID *uuid, uint32_t flags,
+ uint8_t length, uint64_t timestamp)
+{
+ st24_le_p(&hdr->flags, flags);
+ hdr->length = length;
+ memcpy(&hdr->id, uuid, sizeof(hdr->id));
+ stq_le_p(&hdr->timestamp, timestamp);
+}
+
+static const QemuUUID gen_media_uuid = {
+ .data = UUID(0xfbcd0a77, 0xc260, 0x417f,
+ 0x85, 0xa9, 0x08, 0x8b, 0x16, 0x21, 0xeb, 0xa6),
+};
+
+static const QemuUUID dram_uuid = {
+ .data = UUID(0x601dcbb3, 0x9c06, 0x4eab, 0xb8, 0xaf,
+ 0x4e, 0x9b, 0xfb, 0x5c, 0x96, 0x24),
+};
+
+static const QemuUUID memory_module_uuid = {
+ .data = UUID(0xfe927475, 0xdd59, 0x4339, 0xa5, 0x86,
+ 0x79, 0xba, 0xb1, 0x13, 0xb7, 0x74),
+};
+
+#define CXL_GMER_VALID_CHANNEL BIT(0)
+#define CXL_GMER_VALID_RANK BIT(1)
+#define CXL_GMER_VALID_DEVICE BIT(2)
+#define CXL_GMER_VALID_COMPONENT BIT(3)
+
+static int ct3d_qmp_cxl_event_log_enc(CxlEventLog log)
+{
+ switch (log) {
+ case CXL_EVENT_LOG_INFORMATIONAL:
+ return CXL_EVENT_TYPE_INFO;
+ case CXL_EVENT_LOG_WARNING:
+ return CXL_EVENT_TYPE_WARN;
+ case CXL_EVENT_LOG_FAILURE:
+ return CXL_EVENT_TYPE_FAIL;
+ case CXL_EVENT_LOG_FATAL:
+ return CXL_EVENT_TYPE_FATAL;
+/* DCD not yet supported */
+ default:
+ return -EINVAL;
+ }
+}
+/* Component ID is device specific. Define this as a string. */
+void qmp_cxl_inject_general_media_event(const char *path, CxlEventLog log,
+ uint8_t flags, uint64_t dpa,
+ uint8_t descriptor, uint8_t type,
+ uint8_t transaction_type,
+ bool has_channel, uint8_t channel,
+ bool has_rank, uint8_t rank,
+ bool has_device, uint32_t device,
+ const char *component_id,
+ Error **errp)
+{
+ Object *obj = object_resolve_path(path, NULL);
+ CXLEventGenMedia gem;
+ CXLEventRecordHdr *hdr = &gem.hdr;
+ CXLDeviceState *cxlds;
+ CXLType3Dev *ct3d;
+ uint16_t valid_flags = 0;
+ uint8_t enc_log;
+ int rc;
+
+ if (!obj) {
+ error_setg(errp, "Unable to resolve path");
+ return;
+ }
+ if (!object_dynamic_cast(obj, TYPE_CXL_TYPE3)) {
+ error_setg(errp, "Path does not point to a CXL type 3 device");
+ return;
+ }
+ ct3d = CXL_TYPE3(obj);
+ cxlds = &ct3d->cxl_dstate;
+
+ rc = ct3d_qmp_cxl_event_log_enc(log);
+ if (rc < 0) {
+ error_setg(errp, "Unhandled error log type");
+ return;
+ }
+ enc_log = rc;
+
+ memset(&gem, 0, sizeof(gem));
+ cxl_assign_event_header(hdr, &gen_media_uuid, flags, sizeof(gem),
+ cxl_device_get_timestamp(&ct3d->cxl_dstate));
+
+ stq_le_p(&gem.phys_addr, dpa);
+ gem.descriptor = descriptor;
+ gem.type = type;
+ gem.transaction_type = transaction_type;
+
+ if (has_channel) {
+ gem.channel = channel;
+ valid_flags |= CXL_GMER_VALID_CHANNEL;
+ }
+
+ if (has_rank) {
+ gem.rank = rank;
+ valid_flags |= CXL_GMER_VALID_RANK;
+ }
+
+ if (has_device) {
+ st24_le_p(gem.device, device);
+ valid_flags |= CXL_GMER_VALID_DEVICE;
+ }
+
+ if (component_id) {
+ strncpy((char *)gem.component_id, component_id,
+ sizeof(gem.component_id) - 1);
+ valid_flags |= CXL_GMER_VALID_COMPONENT;
+ }
+
+ stw_le_p(&gem.validity_flags, valid_flags);
+
+ if (cxl_event_insert(cxlds, enc_log, (CXLEventRecordRaw *)&gem)) {
+ cxl_event_irq_assert(ct3d);
+ }
+}
+
+#define CXL_DRAM_VALID_CHANNEL BIT(0)
+#define CXL_DRAM_VALID_RANK BIT(1)
+#define CXL_DRAM_VALID_NIBBLE_MASK BIT(2)
+#define CXL_DRAM_VALID_BANK_GROUP BIT(3)
+#define CXL_DRAM_VALID_BANK BIT(4)
+#define CXL_DRAM_VALID_ROW BIT(5)
+#define CXL_DRAM_VALID_COLUMN BIT(6)
+#define CXL_DRAM_VALID_CORRECTION_MASK BIT(7)
+
+void qmp_cxl_inject_dram_event(const char *path, CxlEventLog log, uint8_t flags,
+ uint64_t dpa, uint8_t descriptor,
+ uint8_t type, uint8_t transaction_type,
+ bool has_channel, uint8_t channel,
+ bool has_rank, uint8_t rank,
+ bool has_nibble_mask, uint32_t nibble_mask,
+ bool has_bank_group, uint8_t bank_group,
+ bool has_bank, uint8_t bank,
+ bool has_row, uint32_t row,
+ bool has_column, uint16_t column,
+ bool has_correction_mask, uint64List *correction_mask,
+ Error **errp)
+{
+ Object *obj = object_resolve_path(path, NULL);
+ CXLEventDram dram;
+ CXLEventRecordHdr *hdr = &dram.hdr;
+ CXLDeviceState *cxlds;
+ CXLType3Dev *ct3d;
+ uint16_t valid_flags = 0;
+ uint8_t enc_log;
+ int rc;
+
+ if (!obj) {
+ error_setg(errp, "Unable to resolve path");
+ return;
+ }
+ if (!object_dynamic_cast(obj, TYPE_CXL_TYPE3)) {
+ error_setg(errp, "Path does not point to a CXL type 3 device");
+ return;
+ }
+ ct3d = CXL_TYPE3(obj);
+ cxlds = &ct3d->cxl_dstate;
+
+ rc = ct3d_qmp_cxl_event_log_enc(log);
+ if (rc < 0) {
+ error_setg(errp, "Unhandled error log type");
+ return;
+ }
+ enc_log = rc;
+
+ memset(&dram, 0, sizeof(dram));
+ cxl_assign_event_header(hdr, &dram_uuid, flags, sizeof(dram),
+ cxl_device_get_timestamp(&ct3d->cxl_dstate));
+ stq_le_p(&dram.phys_addr, dpa);
+ dram.descriptor = descriptor;
+ dram.type = type;
+ dram.transaction_type = transaction_type;
+
+ if (has_channel) {
+ dram.channel = channel;
+ valid_flags |= CXL_DRAM_VALID_CHANNEL;
+ }
+
+ if (has_rank) {
+ dram.rank = rank;
+ valid_flags |= CXL_DRAM_VALID_RANK;
+ }
+
+ if (has_nibble_mask) {
+ st24_le_p(dram.nibble_mask, nibble_mask);
+ valid_flags |= CXL_DRAM_VALID_NIBBLE_MASK;
+ }
+
+ if (has_bank_group) {
+ dram.bank_group = bank_group;
+ valid_flags |= CXL_DRAM_VALID_BANK_GROUP;
+ }
+
+ if (has_bank) {
+ dram.bank = bank;
+ valid_flags |= CXL_DRAM_VALID_BANK;
+ }
+
+ if (has_row) {
+ st24_le_p(dram.row, row);
+ valid_flags |= CXL_DRAM_VALID_ROW;
+ }
+
+ if (has_column) {
+ stw_le_p(&dram.column, column);
+ valid_flags |= CXL_DRAM_VALID_COLUMN;
+ }
+
+ if (has_correction_mask) {
+ int count = 0;
+ while (correction_mask && count < 4) {
+ stq_le_p(&dram.correction_mask[count],
+ correction_mask->value);
+ count++;
+ correction_mask = correction_mask->next;
+ }
+ valid_flags |= CXL_DRAM_VALID_CORRECTION_MASK;
+ }
+
+ stw_le_p(&dram.validity_flags, valid_flags);
+
+ if (cxl_event_insert(cxlds, enc_log, (CXLEventRecordRaw *)&dram)) {
+ cxl_event_irq_assert(ct3d);
+ }
+ return;
+}
+
+void qmp_cxl_inject_memory_module_event(const char *path, CxlEventLog log,
+ uint8_t flags, uint8_t type,
+ uint8_t health_status,
+ uint8_t media_status,
+ uint8_t additional_status,
+ uint8_t life_used,
+ int16_t temperature,
+ uint32_t dirty_shutdown_count,
+ uint32_t corrected_volatile_error_count,
+ uint32_t corrected_persistent_error_count,
+ Error **errp)
+{
+ Object *obj = object_resolve_path(path, NULL);
+ CXLEventMemoryModule module;
+ CXLEventRecordHdr *hdr = &module.hdr;
+ CXLDeviceState *cxlds;
+ CXLType3Dev *ct3d;
+ uint8_t enc_log;
+ int rc;
+
+ if (!obj) {
+ error_setg(errp, "Unable to resolve path");
+ return;
+ }
+ if (!object_dynamic_cast(obj, TYPE_CXL_TYPE3)) {
+ error_setg(errp, "Path does not point to a CXL type 3 device");
+ return;
+ }
+ ct3d = CXL_TYPE3(obj);
+ cxlds = &ct3d->cxl_dstate;
+
+ rc = ct3d_qmp_cxl_event_log_enc(log);
+ if (rc < 0) {
+ error_setg(errp, "Unhandled error log type");
+ return;
+ }
+ enc_log = rc;
+
+ memset(&module, 0, sizeof(module));
+ cxl_assign_event_header(hdr, &memory_module_uuid, flags, sizeof(module),
+ cxl_device_get_timestamp(&ct3d->cxl_dstate));
+
+ module.type = type;
+ module.health_status = health_status;
+ module.media_status = media_status;
+ module.additional_status = additional_status;
+ module.life_used = life_used;
+ stw_le_p(&module.temperature, temperature);
+ stl_le_p(&module.dirty_shutdown_count, dirty_shutdown_count);
+ stl_le_p(&module.corrected_volatile_error_count, corrected_volatile_error_count);
+ stl_le_p(&module.corrected_persistent_error_count, corrected_persistent_error_count);
+
+ if (cxl_event_insert(cxlds, enc_log, (CXLEventRecordRaw *)&module)) {
+ cxl_event_irq_assert(ct3d);
+ }
+}
+
static void ct3_class_init(ObjectClass *oc, void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
cvc->get_lsa_size = get_lsa_size;
cvc->get_lsa = get_lsa;
cvc->set_lsa = set_lsa;
+ cvc->set_cacheline = set_cacheline;
}
static const TypeInfo ct3d_info = {
#include "qapi/error.h"
#include "qapi/qapi-commands-cxl.h"
+void qmp_cxl_inject_general_media_event(const char *path, CxlEventLog log,
+ uint8_t flags, uint64_t dpa,
+ uint8_t descriptor, uint8_t type,
+ uint8_t transaction_type,
+ bool has_channel, uint8_t channel,
+ bool has_rank, uint8_t rank,
+ bool has_device, uint32_t device,
+ const char *component_id,
+ Error **errp) {}
+
+void qmp_cxl_inject_dram_event(const char *path, CxlEventLog log, uint8_t flags,
+ uint64_t dpa, uint8_t descriptor,
+ uint8_t type, uint8_t transaction_type,
+ bool has_channel, uint8_t channel,
+ bool has_rank, uint8_t rank,
+ bool has_nibble_mask, uint32_t nibble_mask,
+ bool has_bank_group, uint8_t bank_group,
+ bool has_bank, uint8_t bank,
+ bool has_row, uint32_t row,
+ bool has_column, uint16_t column,
+ bool has_correction_mask, uint64List *correction_mask,
+ Error **errp) {}
+
+void qmp_cxl_inject_memory_module_event(const char *path, CxlEventLog log,
+ uint8_t flags, uint8_t type,
+ uint8_t health_status,
+ uint8_t media_status,
+ uint8_t additional_status,
+ uint8_t life_used,
+ int16_t temperature,
+ uint32_t dirty_shutdown_count,
+ uint32_t corrected_volatile_error_count,
+ uint32_t corrected_persistent_error_count,
+ Error **errp) {}
+
+void qmp_cxl_inject_poison(const char *path, uint64_t start, uint64_t length,
+ Error **errp)
+{
+ error_setg(errp, "CXL Type 3 support is not compiled in");
+}
+
void qmp_cxl_inject_uncorrectable_errors(const char *path,
CXLUncorErrorRecordList *errors,
Error **errp)
switch (nc->info->type) {
case NET_CLIENT_DRIVER_TAP:
vhost_net = tap_get_vhost_net(nc);
+ /*
+ * tap_get_vhost_net() can return NULL if a tap net-device backend is
+ * created with 'vhost=off' option, 'vhostforce=off' or no vhost or
+ * vhostforce or vhostfd options at all. Please see net_init_tap_one().
+ * Hence, we omit the assertion here.
+ */
break;
#ifdef CONFIG_VHOST_NET_USER
case NET_CLIENT_DRIVER_VHOST_USER:
if (nc->peer && nc->peer->info->type == NET_CLIENT_DRIVER_VHOST_VDPA) {
vhost_net_set_config(get_vhost_net(nc->peer),
(uint8_t *)&netcfg, 0, n->config_size,
- VHOST_SET_CONFIG_TYPE_MASTER);
+ VHOST_SET_CONFIG_TYPE_FRONTEND);
}
}
return guest_offloads_mask & features;
}
-static inline uint64_t virtio_net_supported_guest_offloads(VirtIONet *n)
+uint64_t virtio_net_supported_guest_offloads(const VirtIONet *n)
{
VirtIODevice *vdev = VIRTIO_DEVICE(n);
return virtio_net_guest_offloads_by_features(vdev->guest_features);
struct virtio_net_config netcfg = {};
memcpy(&netcfg.mac, &n->nic_conf.macaddr, ETH_ALEN);
vhost_net_set_config(get_vhost_net(nc->peer),
- (uint8_t *)&netcfg, 0, ETH_ALEN, VHOST_SET_CONFIG_TYPE_MASTER);
+ (uint8_t *)&netcfg, 0, ETH_ALEN, VHOST_SET_CONFIG_TYPE_FRONTEND);
}
QTAILQ_INIT(&n->rsc_chains);
n->qdev = dev;
* subsys=<subsys_id>
* -device nvme-ns,drive=<drive_id>,bus=<bus_name>,nsid=<nsid>,\
* zoned=<true|false[optional]>, \
- * subsys=<subsys_id>,detached=<true|false[optional]>
+ * subsys=<subsys_id>,shared=<true|false[optional]>, \
+ * detached=<true|false[optional]>, \
+ * zoned.zone_size=<N[optional]>, \
+ * zoned.zone_capacity=<N[optional]>, \
+ * zoned.descr_ext_size=<N[optional]>, \
+ * zoned.max_active=<N[optional]>, \
+ * zoned.max_open=<N[optional]>, \
+ * zoned.cross_read=<true|false[optional]>
*
* Note cmb_size_mb denotes size of CMB in MB. CMB is assumed to be at
* offset 0 in BAR2 and supports only WDS, RDS and SQS for now. By default, the
case NVME_CMD_WRITE:
case NVME_CMD_WRITE_ZEROES:
case NVME_CMD_ZONE_APPEND:
+ case NVME_CMD_COPY:
status = NVME_WRITE_FAULT;
break;
default:
}
}
+static inline uint16_t nvme_check_copy_mcl(NvmeNamespace *ns,
+ NvmeCopyAIOCB *iocb, uint16_t nr)
+{
+ uint32_t copy_len = 0;
+
+ for (int idx = 0; idx < nr; idx++) {
+ uint32_t nlb;
+ nvme_copy_source_range_parse(iocb->ranges, idx, iocb->format, NULL,
+ &nlb, NULL, NULL, NULL);
+ copy_len += nlb + 1;
+ }
+
+ if (copy_len > ns->id_ns.mcl) {
+ return NVME_CMD_SIZE_LIMIT | NVME_DNR;
+ }
+
+ return NVME_SUCCESS;
+}
+
static void nvme_copy_out_completed_cb(void *opaque, int ret)
{
NvmeCopyAIOCB *iocb = opaque;
}
}
+ status = nvme_check_copy_mcl(ns, iocb, nr);
+ if (status) {
+ goto invalid;
+ }
+
iocb->req = req;
iocb->ret = 0;
iocb->nr = nr;
NvmeRuHandle *ruh;
uint8_t lbafi = NVME_ID_NS_FLBAS_INDEX(ns->id_ns.flbas);
g_autofree unsigned int *ruhids = NULL;
- unsigned int *ruhid;
- char *r, *p, *token;
+ unsigned int n, m, *ruhid;
+ const char *endptr, *token;
+ char *r, *p;
uint16_t *ph;
if (!ns->params.fdp.ruhs) {
/* parse the placement handle identifiers */
while ((token = qemu_strsep(&p, ";")) != NULL) {
- ns->fdp.nphs += 1;
- if (ns->fdp.nphs > NVME_FDP_MAXPIDS ||
- ns->fdp.nphs == endgrp->fdp.nruh) {
- error_setg(errp, "too many placement handles");
+ if (qemu_strtoui(token, &endptr, 0, &n) < 0) {
+ error_setg(errp, "cannot parse reclaim unit handle identifier");
free(r);
return false;
}
- if (qemu_strtoui(token, NULL, 0, ruhid++) < 0) {
- error_setg(errp, "cannot parse reclaim unit handle identifier");
- free(r);
- return false;
+ m = n;
+
+ /* parse range */
+ if (*endptr == '-') {
+ token = endptr + 1;
+
+ if (qemu_strtoui(token, NULL, 0, &m) < 0) {
+ error_setg(errp, "cannot parse reclaim unit handle identifier");
+ free(r);
+ return false;
+ }
+
+ if (m < n) {
+ error_setg(errp, "invalid reclaim unit handle identifier range");
+ free(r);
+ return false;
+ }
+ }
+
+ for (; n <= m; n++) {
+ if (ns->fdp.nphs++ == endgrp->fdp.nruh) {
+ error_setg(errp, "too many placement handles");
+ free(r);
+ return false;
+ }
+
+ *ruhid++ = n;
}
}
free(r);
+ /* verify that the ruhids are unique */
+ for (unsigned int i = 0; i < ns->fdp.nphs; i++) {
+ for (unsigned int j = i + 1; j < ns->fdp.nphs; j++) {
+ if (ruhids[i] == ruhids[j]) {
+ error_setg(errp, "duplicate reclaim unit handle identifier: %u",
+ ruhids[i]);
+ return false;
+ }
+ }
+ }
+
ph = ns->fdp.phs = g_new(uint16_t, ns->fdp.nphs);
ruhid = ruhids;
endgrp->fdp.nrg = subsys->params.fdp.nrg;
- if (!subsys->params.fdp.nruh) {
- error_setg(errp, "fdp.nruh must be non-zero");
+ if (!subsys->params.fdp.nruh ||
+ subsys->params.fdp.nruh > NVME_FDP_MAXPIDS) {
+ error_setg(errp, "fdp.nruh must be non-zero and less than %u",
+ NVME_FDP_MAXPIDS);
return false;
}
#include "migration/vmstate.h"
#include "net/net.h"
#include "sysemu/numa.h"
+#include "sysemu/runstate.h"
#include "sysemu/sysemu.h"
#include "hw/loader.h"
#include "qemu/error-report.h"
static void pci_add_option_rom(PCIDevice *pdev, bool is_default_rom,
Error **errp)
{
- int64_t size;
+ int64_t size = 0;
g_autofree char *path = NULL;
- void *ptr;
char name[32];
const VMStateDescription *vmsd;
+ /*
+ * In case of incoming migration ROM will come with migration stream, no
+ * reason to load the file. Neither we want to fail if local ROM file
+ * mismatches with specified romsize.
+ */
+ bool load_file = !runstate_check(RUN_STATE_INMIGRATE);
+
if (!pdev->romfile || !strlen(pdev->romfile)) {
return;
}
return;
}
- path = qemu_find_file(QEMU_FILE_TYPE_BIOS, pdev->romfile);
- if (path == NULL) {
- path = g_strdup(pdev->romfile);
- }
+ if (load_file || pdev->romsize == -1) {
+ path = qemu_find_file(QEMU_FILE_TYPE_BIOS, pdev->romfile);
+ if (path == NULL) {
+ path = g_strdup(pdev->romfile);
+ }
- size = get_image_size(path);
- if (size < 0) {
- error_setg(errp, "failed to find romfile \"%s\"", pdev->romfile);
- return;
- } else if (size == 0) {
- error_setg(errp, "romfile \"%s\" is empty", pdev->romfile);
- return;
- } else if (size > 2 * GiB) {
- error_setg(errp, "romfile \"%s\" too large (size cannot exceed 2 GiB)",
- pdev->romfile);
- return;
- }
- if (pdev->romsize != -1) {
- if (size > pdev->romsize) {
- error_setg(errp, "romfile \"%s\" (%u bytes) "
- "is too large for ROM size %u",
- pdev->romfile, (uint32_t)size, pdev->romsize);
+ size = get_image_size(path);
+ if (size < 0) {
+ error_setg(errp, "failed to find romfile \"%s\"", pdev->romfile);
+ return;
+ } else if (size == 0) {
+ error_setg(errp, "romfile \"%s\" is empty", pdev->romfile);
+ return;
+ } else if (size > 2 * GiB) {
+ error_setg(errp,
+ "romfile \"%s\" too large (size cannot exceed 2 GiB)",
+ pdev->romfile);
return;
}
- } else {
- pdev->romsize = pow2ceil(size);
+ if (pdev->romsize != -1) {
+ if (size > pdev->romsize) {
+ error_setg(errp, "romfile \"%s\" (%u bytes) "
+ "is too large for ROM size %u",
+ pdev->romfile, (uint32_t)size, pdev->romsize);
+ return;
+ }
+ } else {
+ pdev->romsize = pow2ceil(size);
+ }
}
vmsd = qdev_get_vmsd(DEVICE(pdev));
memory_region_init_rom(&pdev->rom, OBJECT(pdev), name, pdev->romsize,
&error_fatal);
- ptr = memory_region_get_ram_ptr(&pdev->rom);
- if (load_image_size(path, ptr, size) < 0) {
- error_setg(errp, "failed to load romfile \"%s\"", pdev->romfile);
- return;
- }
+ if (load_file) {
+ void *ptr = memory_region_get_ram_ptr(&pdev->rom);
- if (is_default_rom) {
- /* Only the default rom images will be patched (if needed). */
- pci_patch_ids(pdev, ptr, size);
+ if (load_image_size(path, ptr, size) < 0) {
+ error_setg(errp, "failed to load romfile \"%s\"", pdev->romfile);
+ return;
+ }
+
+ if (is_default_rom) {
+ /* Only the default rom images will be patched (if needed). */
+ pci_patch_ids(pdev, ptr, size);
+ }
}
pci_register_bar(pdev, PCI_ROM_SLOT, 0, &pdev->rom);
#include "qom/object.h"
#include "qemu/event_notifier.h"
#include "sysemu/kvm.h"
-#include "util/event_notifier-posix.c"
static void probe_pci_info(PCIDevice *dev, Error **errp);
static void proxy_device_reset(DeviceState *dev);
mc->cpu_index_to_instance_props = riscv_numa_cpu_index_to_props;
mc->get_default_cpu_node_id = riscv_numa_get_default_cpu_node_id;
mc->numa_mem_supported = true;
+ /* platform instead of architectural choice */
+ mc->cpu_cluster_has_numa_boundary = true;
mc->default_ram_id = "riscv.spike.ram";
object_class_property_add_str(oc, "signature", NULL, spike_set_signature);
object_class_property_set_description(oc, "signature",
mc->cpu_index_to_instance_props = riscv_numa_cpu_index_to_props;
mc->get_default_cpu_node_id = riscv_numa_get_default_cpu_node_id;
mc->numa_mem_supported = true;
+ /* platform instead of architectural choice */
+ mc->cpu_cluster_has_numa_boundary = true;
mc->default_ram_id = "riscv_virt_board.ram";
assert(!mc->get_hotplug_handler);
mc->get_hotplug_handler = virt_machine_get_hotplug_handler;
#include "qemu/error-report.h"
#include "qemu/log.h"
#include "qemu/module.h"
-#include "hw/virtio/virtio-access.h"
#include "hw/virtio/virtio-bus.h"
#include "hw/s390x/adapter.h"
#include "hw/s390x/s390_flic.h"
depends on VIRTIO
select SCSI
+config VHOST_SCSI_COMMON
+ bool
+ depends on VIRTIO
+
config VHOST_SCSI
bool
default y
+ select VHOST_SCSI_COMMON
depends on VIRTIO && VHOST_KERNEL
config VHOST_USER_SCSI
bool
# Only PCI devices are provided for now
default y if VIRTIO_PCI
+ select VHOST_SCSI_COMMON
depends on VIRTIO && VHOST_USER && LINUX
scsi_ss = ss.source_set()
+specific_scsi_ss = ss.source_set()
+virtio_scsi_ss = ss.source_set()
+specific_virtio_scsi_ss = ss.source_set()
+
scsi_ss.add(files(
'emulation.c',
'scsi-bus.c',
scsi_ss.add(when: 'CONFIG_MEGASAS_SCSI_PCI', if_true: files('megasas.c'))
scsi_ss.add(when: 'CONFIG_MPTSAS_SCSI_PCI', if_true: files('mptsas.c', 'mptconfig.c', 'mptendian.c'))
scsi_ss.add(when: 'CONFIG_VMW_PVSCSI_SCSI_PCI', if_true: files('vmw_pvscsi.c'))
-system_ss.add_all(when: 'CONFIG_SCSI', if_true: scsi_ss)
-specific_scsi_ss = ss.source_set()
+virtio_scsi_ss.add(files('virtio-scsi-dataplane.c'))
+virtio_scsi_ss.add(when: 'CONFIG_VHOST_SCSI', if_true: files('vhost-scsi.c'))
+virtio_scsi_ss.add(when: 'CONFIG_VHOST_USER_SCSI', if_true: files('vhost-user-scsi.c'))
-virtio_scsi_ss = ss.source_set()
-virtio_scsi_ss.add(files('virtio-scsi.c', 'virtio-scsi-dataplane.c'))
-virtio_scsi_ss.add(when: 'CONFIG_VHOST_SCSI', if_true: files('vhost-scsi-common.c', 'vhost-scsi.c'))
-virtio_scsi_ss.add(when: 'CONFIG_VHOST_USER_SCSI', if_true: files('vhost-scsi-common.c', 'vhost-user-scsi.c'))
-specific_scsi_ss.add_all(when: 'CONFIG_VIRTIO_SCSI', if_true: virtio_scsi_ss)
+specific_virtio_scsi_ss.add(files('virtio-scsi.c'))
+specific_virtio_scsi_ss.add(when: 'CONFIG_VHOST_SCSI_COMMON', if_true: files('vhost-scsi-common.c'))
+
+specific_scsi_ss.add_all(when: 'CONFIG_VIRTIO_SCSI', if_true: specific_virtio_scsi_ss)
+scsi_ss.add_all(when: 'CONFIG_VIRTIO_SCSI', if_true: virtio_scsi_ss)
specific_scsi_ss.add(when: 'CONFIG_SPAPR_VSCSI', if_true: files('spapr_vscsi.c'))
+system_ss.add_all(when: 'CONFIG_SCSI', if_true: scsi_ss)
specific_ss.add_all(when: 'CONFIG_SCSI', if_true: specific_scsi_ss)
#include "hw/virtio/vhost.h"
#include "hw/virtio/virtio-scsi.h"
#include "hw/virtio/virtio-bus.h"
-#include "hw/virtio/virtio-access.h"
#include "hw/fw-path-provider.h"
#include "hw/qdev-properties.h"
#include "qemu/cutils.h"
#include "hw/virtio/vhost-backend.h"
#include "hw/virtio/vhost-user-scsi.h"
#include "hw/virtio/virtio.h"
-#include "hw/virtio/virtio-access.h"
#include "chardev/char-fe.h"
#include "sysemu/sysemu.h"
#include "hw/scsi/scsi.h"
#include "scsi/constants.h"
#include "hw/virtio/virtio-bus.h"
-#include "hw/virtio/virtio-access.h"
/* Context: QEMU global mutex held */
void virtio_scsi_dataplane_setup(VirtIOSCSI *s, Error **errp)
uint32_t total_queues = VIRTIO_SCSI_VQ_NUM_FIXED +
s->parent_obj.conf.num_queues;
- if (!s->dataplane_started) {
+ /*
+ * Drain is called when stopping dataplane but the host notifier has
+ * already been detached. Detaching multiple times is a no-op if nothing
+ * else is using the monitoring same file descriptor, but avoid it just in
+ * case.
+ *
+ * Also, don't detach if dataplane has not even been started yet because
+ * the host notifier isn't attached.
+ */
+ if (s->dataplane_stopping || !s->dataplane_started) {
return;
}
uint32_t total_queues = VIRTIO_SCSI_VQ_NUM_FIXED +
s->parent_obj.conf.num_queues;
- if (!s->dataplane_started) {
+ /*
+ * Drain is called when stopping dataplane. Keep the host notifier detached
+ * so it's not left dangling after dataplane is stopped.
+ *
+ * Also, don't attach if dataplane has not even been started yet. We're not
+ * ready.
+ */
+ if (s->dataplane_stopping || !s->dataplane_started) {
return;
}
*/
#include "qemu/osdep.h"
+#include "block/block_int-common.h"
#include "qemu/units.h"
#include "cpu.h"
#include "hw/boards.h"
memory_region_add_subregion(get_system_memory(),
NIAGARA_VDISK_BASE, &s->vdisk_ram);
dinfo->is_default = 1;
- rom_add_file_fixed(blk_name(blk), NIAGARA_VDISK_BASE, -1);
+ rom_add_file_fixed(blk_bs(blk)->filename, NIAGARA_VDISK_BASE, -1);
} else {
- error_report("could not load ram disk '%s'", blk_name(blk));
+ error_report("could not load ram disk '%s'",
+ blk_bs(blk)->filename);
exit(1);
}
}
depends on VIRTIO_MEM_SUPPORTED
select MEM_DEVICE
+config VHOST_VSOCK_COMMON
+ bool
+ depends on VIRTIO
+
config VHOST_VSOCK
bool
default y
+ select VHOST_VSOCK_COMMON
depends on VIRTIO && VHOST_KERNEL
config VHOST_USER_VSOCK
bool
default y
+ select VHOST_VSOCK_COMMON
depends on VIRTIO && VHOST_USER
config VHOST_USER_I2C
softmmu_virtio_ss.add(files('virtio-bus.c'))
softmmu_virtio_ss.add(when: 'CONFIG_VIRTIO_PCI', if_true: files('virtio-pci.c'))
softmmu_virtio_ss.add(when: 'CONFIG_VIRTIO_MMIO', if_true: files('virtio-mmio.c'))
+softmmu_virtio_ss.add(when: 'CONFIG_VIRTIO_CRYPTO', if_true: files('virtio-crypto.c'))
+softmmu_virtio_ss.add(when: 'CONFIG_VHOST_VSOCK_COMMON', if_true: files('vhost-vsock-common.c'))
+softmmu_virtio_ss.add(when: 'CONFIG_VIRTIO_IOMMU', if_true: files('virtio-iommu.c'))
+softmmu_virtio_ss.add(when: 'CONFIG_VHOST_VDPA_DEV', if_true: files('vdpa-dev.c'))
specific_virtio_ss = ss.source_set()
specific_virtio_ss.add(files('virtio.c'))
specific_virtio_ss.add(files('virtio-config-io.c', 'virtio-qmp.c'))
if have_vhost
- specific_virtio_ss.add(files('vhost.c', 'vhost-backend.c', 'vhost-iova-tree.c'))
+ softmmu_virtio_ss.add(files('vhost.c'))
+ specific_virtio_ss.add(files('vhost-backend.c', 'vhost-iova-tree.c'))
if have_vhost_user
specific_virtio_ss.add(files('vhost-user.c'))
endif
endif
specific_virtio_ss.add(when: 'CONFIG_VIRTIO_BALLOON', if_true: files('virtio-balloon.c'))
-specific_virtio_ss.add(when: 'CONFIG_VIRTIO_CRYPTO', if_true: files('virtio-crypto.c'))
specific_virtio_ss.add(when: 'CONFIG_VHOST_USER_FS', if_true: files('vhost-user-fs.c'))
specific_virtio_ss.add(when: 'CONFIG_VIRTIO_PMEM', if_true: files('virtio-pmem.c'))
-specific_virtio_ss.add(when: 'CONFIG_VHOST_VSOCK', if_true: files('vhost-vsock.c', 'vhost-vsock-common.c'))
-specific_virtio_ss.add(when: 'CONFIG_VHOST_USER_VSOCK', if_true: files('vhost-user-vsock.c', 'vhost-vsock-common.c'))
+specific_virtio_ss.add(when: 'CONFIG_VHOST_VSOCK', if_true: files('vhost-vsock.c'))
+specific_virtio_ss.add(when: 'CONFIG_VHOST_USER_VSOCK', if_true: files('vhost-user-vsock.c'))
specific_virtio_ss.add(when: 'CONFIG_VIRTIO_RNG', if_true: files('virtio-rng.c'))
-specific_virtio_ss.add(when: 'CONFIG_VIRTIO_IOMMU', if_true: files('virtio-iommu.c'))
specific_virtio_ss.add(when: 'CONFIG_VIRTIO_MEM', if_true: files('virtio-mem.c'))
specific_virtio_ss.add(when: 'CONFIG_VHOST_USER_I2C', if_true: files('vhost-user-i2c.c'))
specific_virtio_ss.add(when: 'CONFIG_VHOST_USER_RNG', if_true: files('vhost-user-rng.c'))
specific_virtio_ss.add(when: 'CONFIG_VHOST_USER_GPIO', if_true: files('vhost-user-gpio.c'))
specific_virtio_ss.add(when: ['CONFIG_VIRTIO_PCI', 'CONFIG_VHOST_USER_GPIO'], if_true: files('vhost-user-gpio-pci.c'))
-specific_virtio_ss.add(when: 'CONFIG_VHOST_VDPA_DEV', if_true: files('vdpa-dev.c'))
virtio_pci_ss = ss.source_set()
virtio_pci_ss.add(when: 'CONFIG_VHOST_VSOCK', if_true: files('vhost-vsock-pci.c'))
#include "hw/virtio/vhost.h"
#include "hw/virtio/virtio.h"
#include "hw/virtio/virtio-bus.h"
-#include "hw/virtio/virtio-access.h"
#include "hw/virtio/vdpa-dev.h"
#include "sysemu/sysemu.h"
#include "sysemu/runstate.h"
int ret;
ret = vhost_dev_set_config(&s->dev, s->config, 0, s->config_size,
- VHOST_SET_CONFIG_TYPE_MASTER);
+ VHOST_SET_CONFIG_TYPE_FRONTEND);
if (ret) {
error_report("set device config space failed");
return;
void vhost_svq_start(VhostShadowVirtqueue *svq, VirtIODevice *vdev,
VirtQueue *vq, VhostIOVATree *iova_tree)
{
- size_t desc_size, driver_size, device_size;
+ size_t desc_size;
event_notifier_set_handler(&svq->hdev_call, vhost_svq_handle_call);
svq->next_guest_avail_elem = NULL;
svq->vring.num = virtio_queue_get_num(vdev, virtio_get_queue_index(vq));
svq->num_free = svq->vring.num;
- driver_size = vhost_svq_driver_area_size(svq);
- device_size = vhost_svq_device_area_size(svq);
- svq->vring.desc = qemu_memalign(qemu_real_host_page_size(), driver_size);
+ svq->vring.desc = mmap(NULL, vhost_svq_driver_area_size(svq),
+ PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS,
+ -1, 0);
desc_size = sizeof(vring_desc_t) * svq->vring.num;
svq->vring.avail = (void *)((char *)svq->vring.desc + desc_size);
- memset(svq->vring.desc, 0, driver_size);
- svq->vring.used = qemu_memalign(qemu_real_host_page_size(), device_size);
- memset(svq->vring.used, 0, device_size);
+ svq->vring.used = mmap(NULL, vhost_svq_device_area_size(svq),
+ PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS,
+ -1, 0);
svq->desc_state = g_new0(SVQDescState, svq->vring.num);
svq->desc_next = g_new0(uint16_t, svq->vring.num);
for (unsigned i = 0; i < svq->vring.num - 1; i++) {
svq->vq = NULL;
g_free(svq->desc_next);
g_free(svq->desc_state);
- qemu_vfree(svq->vring.desc);
- qemu_vfree(svq->vring.used);
+ munmap(svq->vring.desc, vhost_svq_driver_area_size(svq));
+ munmap(svq->vring.used, vhost_svq_device_area_size(svq));
event_notifier_set_handler(&svq->hdev_call, NULL);
}
#include "qemu/osdep.h"
#include "qapi/error.h"
#include "hw/virtio/vhost.h"
+#include "hw/virtio/virtio-crypto.h"
#include "hw/virtio/vhost-user.h"
#include "hw/virtio/vhost-backend.h"
#include "hw/virtio/virtio.h"
VHOST_USER_MAX
} VhostUserRequest;
-typedef enum VhostUserSlaveRequest {
+typedef enum VhostUserBackendRequest {
VHOST_USER_BACKEND_NONE = 0,
VHOST_USER_BACKEND_IOTLB_MSG = 1,
VHOST_USER_BACKEND_CONFIG_CHANGE_MSG = 2,
VHOST_USER_BACKEND_VRING_HOST_NOTIFIER_MSG = 3,
VHOST_USER_BACKEND_MAX
-} VhostUserSlaveRequest;
+} VhostUserBackendRequest;
typedef struct VhostUserMemoryRegion {
uint64_t guest_phys_addr;
#define VHOST_CRYPTO_SYM_HMAC_MAX_KEY_LEN 512
#define VHOST_CRYPTO_SYM_CIPHER_MAX_KEY_LEN 64
+#define VHOST_CRYPTO_ASYM_MAX_KEY_LEN 1024
typedef struct VhostUserCryptoSession {
+ uint64_t op_code;
+ union {
+ struct {
+ CryptoDevBackendSymSessionInfo session_setup_data;
+ uint8_t key[VHOST_CRYPTO_SYM_CIPHER_MAX_KEY_LEN];
+ uint8_t auth_key[VHOST_CRYPTO_SYM_HMAC_MAX_KEY_LEN];
+ } sym;
+ struct {
+ CryptoDevBackendAsymSessionInfo session_setup_data;
+ uint8_t key[VHOST_CRYPTO_ASYM_MAX_KEY_LEN];
+ } asym;
+ } u;
+
/* session id for success, -1 on errors */
int64_t session_id;
- CryptoDevBackendSymSessionInfo session_setup_data;
- uint8_t key[VHOST_CRYPTO_SYM_CIPHER_MAX_KEY_LEN];
- uint8_t auth_key[VHOST_CRYPTO_SYM_HMAC_MAX_KEY_LEN];
} VhostUserCryptoSession;
static VhostUserConfig c __attribute__ ((unused));
struct vhost_dev *dev;
/* Shared between vhost devs of the same virtio device */
VhostUserState *user;
- QIOChannel *slave_ioc;
- GSource *slave_src;
+ QIOChannel *backend_ioc;
+ GSource *backend_src;
NotifierWithReturn postcopy_notifier;
struct PostCopyFD postcopy_fd;
uint64_t postcopy_client_bases[VHOST_USER_MAX_RAM_SLOTS];
return vhost_user_write(dev, &msg, NULL, 0);
}
-static int vhost_user_slave_handle_config_change(struct vhost_dev *dev)
+static int vhost_user_backend_handle_config_change(struct vhost_dev *dev)
{
if (!dev->config_ops || !dev->config_ops->vhost_dev_config_notifier) {
return -ENOSYS;
return n;
}
-static int vhost_user_slave_handle_vring_host_notifier(struct vhost_dev *dev,
+static int vhost_user_backend_handle_vring_host_notifier(struct vhost_dev *dev,
VhostUserVringArea *area,
int fd)
{
return 0;
}
-static void close_slave_channel(struct vhost_user *u)
+static void close_backend_channel(struct vhost_user *u)
{
- g_source_destroy(u->slave_src);
- g_source_unref(u->slave_src);
- u->slave_src = NULL;
- object_unref(OBJECT(u->slave_ioc));
- u->slave_ioc = NULL;
+ g_source_destroy(u->backend_src);
+ g_source_unref(u->backend_src);
+ u->backend_src = NULL;
+ object_unref(OBJECT(u->backend_ioc));
+ u->backend_ioc = NULL;
}
-static gboolean slave_read(QIOChannel *ioc, GIOCondition condition,
+static gboolean backend_read(QIOChannel *ioc, GIOCondition condition,
gpointer opaque)
{
struct vhost_dev *dev = opaque;
ret = vhost_backend_handle_iotlb_msg(dev, &payload.iotlb);
break;
case VHOST_USER_BACKEND_CONFIG_CHANGE_MSG:
- ret = vhost_user_slave_handle_config_change(dev);
+ ret = vhost_user_backend_handle_config_change(dev);
break;
case VHOST_USER_BACKEND_VRING_HOST_NOTIFIER_MSG:
- ret = vhost_user_slave_handle_vring_host_notifier(dev, &payload.area,
+ ret = vhost_user_backend_handle_vring_host_notifier(dev, &payload.area,
fd ? fd[0] : -1);
break;
default:
goto fdcleanup;
err:
- close_slave_channel(u);
+ close_backend_channel(u);
rc = G_SOURCE_REMOVE;
fdcleanup:
return rc;
}
-static int vhost_setup_slave_channel(struct vhost_dev *dev)
+static int vhost_setup_backend_channel(struct vhost_dev *dev)
{
VhostUserMsg msg = {
.hdr.request = VHOST_USER_SET_BACKEND_REQ_FD,
error_report_err(local_err);
return -ECONNREFUSED;
}
- u->slave_ioc = ioc;
- u->slave_src = qio_channel_add_watch_source(u->slave_ioc,
+ u->backend_ioc = ioc;
+ u->backend_src = qio_channel_add_watch_source(u->backend_ioc,
G_IO_IN | G_IO_HUP,
- slave_read, dev, NULL, NULL);
+ backend_read, dev, NULL, NULL);
if (reply_supported) {
msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
out:
close(sv[1]);
if (ret) {
- close_slave_channel(u);
+ close_backend_channel(u);
}
return ret;
virtio_has_feature(dev->protocol_features,
VHOST_USER_PROTOCOL_F_REPLY_ACK))) {
error_setg(errp, "IOMMU support requires reply-ack and "
- "slave-req protocol features.");
+ "backend-req protocol features.");
return -EINVAL;
}
}
if (dev->vq_index == 0) {
- err = vhost_setup_slave_channel(dev);
+ err = vhost_setup_backend_channel(dev);
if (err < 0) {
error_setg_errno(errp, EPROTO, "vhost_backend_init failed");
return -EPROTO;
close(u->postcopy_fd.fd);
u->postcopy_fd.handler = NULL;
}
- if (u->slave_ioc) {
- close_slave_channel(u);
+ if (u->backend_ioc) {
+ close_backend_channel(u);
}
g_free(u->region_rb);
u->region_rb = NULL;
return ret;
}
- /* If reply_ack supported, slave has to ack specified MTU is valid */
+ /* If reply_ack supported, backend has to ack specified MTU is valid */
if (reply_supported) {
return process_message_reply(dev, &msg);
}
int ret;
bool crypto_session = virtio_has_feature(dev->protocol_features,
VHOST_USER_PROTOCOL_F_CRYPTO_SESSION);
- CryptoDevBackendSymSessionInfo *sess_info = session_info;
+ CryptoDevBackendSessionInfo *backend_info = session_info;
VhostUserMsg msg = {
.hdr.request = VHOST_USER_CREATE_CRYPTO_SESSION,
.hdr.flags = VHOST_USER_VERSION,
return -ENOTSUP;
}
- memcpy(&msg.payload.session.session_setup_data, sess_info,
- sizeof(CryptoDevBackendSymSessionInfo));
- if (sess_info->key_len) {
- memcpy(&msg.payload.session.key, sess_info->cipher_key,
- sess_info->key_len);
- }
- if (sess_info->auth_key_len > 0) {
- memcpy(&msg.payload.session.auth_key, sess_info->auth_key,
- sess_info->auth_key_len);
+ if (backend_info->op_code == VIRTIO_CRYPTO_AKCIPHER_CREATE_SESSION) {
+ CryptoDevBackendAsymSessionInfo *sess = &backend_info->u.asym_sess_info;
+ size_t keylen;
+
+ memcpy(&msg.payload.session.u.asym.session_setup_data, sess,
+ sizeof(CryptoDevBackendAsymSessionInfo));
+ if (sess->keylen) {
+ keylen = sizeof(msg.payload.session.u.asym.key);
+ if (sess->keylen > keylen) {
+ error_report("Unsupported asymmetric key size");
+ return -ENOTSUP;
+ }
+
+ memcpy(&msg.payload.session.u.asym.key, sess->key,
+ sess->keylen);
+ }
+ } else {
+ CryptoDevBackendSymSessionInfo *sess = &backend_info->u.sym_sess_info;
+ size_t keylen;
+
+ memcpy(&msg.payload.session.u.sym.session_setup_data, sess,
+ sizeof(CryptoDevBackendSymSessionInfo));
+ if (sess->key_len) {
+ keylen = sizeof(msg.payload.session.u.sym.key);
+ if (sess->key_len > keylen) {
+ error_report("Unsupported cipher key size");
+ return -ENOTSUP;
+ }
+
+ memcpy(&msg.payload.session.u.sym.key, sess->cipher_key,
+ sess->key_len);
+ }
+
+ if (sess->auth_key_len > 0) {
+ keylen = sizeof(msg.payload.session.u.sym.auth_key);
+ if (sess->auth_key_len > keylen) {
+ error_report("Unsupported auth key size");
+ return -ENOTSUP;
+ }
+
+ memcpy(&msg.payload.session.u.sym.auth_key, sess->auth_key,
+ sess->auth_key_len);
+ }
}
+
+ msg.payload.session.op_code = backend_info->op_code;
+ msg.payload.session.session_id = backend_info->session_id;
ret = vhost_user_write(dev, &msg, NULL, 0);
if (ret < 0) {
error_report("vhost_user_write() return %d, create session failed",
#include "cpu.h"
#include "trace.h"
#include "qapi/error.h"
-#include "hw/virtio/virtio-access.h"
/*
* Return one past the end of the end of section. Be careful with uint64_t
#include "qemu/osdep.h"
#include "standard-headers/linux/virtio_vsock.h"
#include "qapi/error.h"
-#include "hw/virtio/virtio-access.h"
+#include "hw/virtio/virtio-bus.h"
#include "qemu/error-report.h"
#include "hw/qdev-properties.h"
#include "hw/virtio/vhost.h"
#include "qemu/log.h"
#include "standard-headers/linux/vhost_types.h"
#include "hw/virtio/virtio-bus.h"
-#include "hw/virtio/virtio-access.h"
#include "migration/blocker.h"
#include "migration/qemu-file-types.h"
#include "sysemu/dma.h"
memset(hdev, 0, sizeof(struct vhost_dev));
}
+static void vhost_dev_disable_notifiers_nvqs(struct vhost_dev *hdev,
+ VirtIODevice *vdev,
+ unsigned int nvqs)
+{
+ BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
+ int i, r;
+
+ /*
+ * Batch all the host notifiers in a single transaction to avoid
+ * quadratic time complexity in address_space_update_ioeventfds().
+ */
+ memory_region_transaction_begin();
+
+ for (i = 0; i < nvqs; ++i) {
+ r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
+ false);
+ if (r < 0) {
+ error_report("vhost VQ %d notifier cleanup failed: %d", i, -r);
+ }
+ assert(r >= 0);
+ }
+
+ /*
+ * The transaction expects the ioeventfds to be open when it
+ * commits. Do it now, before the cleanup loop.
+ */
+ memory_region_transaction_commit();
+
+ for (i = 0; i < nvqs; ++i) {
+ virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i);
+ }
+ virtio_device_release_ioeventfd(vdev);
+}
+
/* Stop processing guest IO notifications in qemu.
* Start processing them in vhost in kernel.
*/
if (r < 0) {
error_report("vhost VQ %d notifier binding failed: %d", i, -r);
memory_region_transaction_commit();
- vhost_dev_disable_notifiers(hdev, vdev);
+ vhost_dev_disable_notifiers_nvqs(hdev, vdev, i);
return r;
}
}
*/
void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
{
- BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
- int i, r;
-
- /*
- * Batch all the host notifiers in a single transaction to avoid
- * quadratic time complexity in address_space_update_ioeventfds().
- */
- memory_region_transaction_begin();
-
- for (i = 0; i < hdev->nvqs; ++i) {
- r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
- false);
- if (r < 0) {
- error_report("vhost VQ %d notifier cleanup failed: %d", i, -r);
- }
- assert (r >= 0);
- }
-
- /*
- * The transaction expects the ioeventfds to be open when it
- * commits. Do it now, before the cleanup loop.
- */
- memory_region_transaction_commit();
-
- for (i = 0; i < hdev->nvqs; ++i) {
- virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i);
- }
- virtio_device_release_ioeventfd(vdev);
+ vhost_dev_disable_notifiers_nvqs(hdev, vdev, hdev->nvqs);
}
/* Test and clear event pending status.
r = event_notifier_init(
&hdev->vqs[VHOST_QUEUE_NUM_CONFIG_INR].masked_config_notifier, 0);
if (r < 0) {
- return r;
+ VHOST_OPS_DEBUG(r, "event_notifier_init failed");
+ goto fail_vq;
}
event_notifier_test_and_clear(
&hdev->vqs[VHOST_QUEUE_NUM_CONFIG_INR].masked_config_notifier);
}
fail_mem:
+ if (vhost_dev_has_iommu(hdev)) {
+ memory_listener_unregister(&hdev->iommu_listener);
+ }
fail_features:
vdev->vhost_started = false;
hdev->started = false;
#include "hw/virtio/virtio.h"
#include "hw/virtio/virtio-crypto.h"
#include "hw/qdev-properties.h"
-#include "hw/virtio/virtio-access.h"
#include "standard-headers/linux/virtio_ids.h"
#include "sysemu/cryptodev-vhost.h"
#include "qemu/osdep.h"
#include "qemu/log.h"
#include "qemu/iov.h"
+#include "exec/target_page.h"
#include "hw/qdev-properties.h"
#include "hw/virtio/virtio.h"
#include "sysemu/kvm.h"
#include "standard-headers/linux/virtio_ids.h"
#include "hw/virtio/virtio-bus.h"
-#include "hw/virtio/virtio-access.h"
#include "hw/virtio/virtio-iommu.h"
#include "hw/pci/pci_bus.h"
#include "hw/pci/pci.h"
* in vfio realize
*/
s->config.bypass = s->boot_bypass;
- s->config.page_size_mask = TARGET_PAGE_MASK;
+ s->config.page_size_mask = qemu_target_page_mask();
s->config.input_range.end = UINT64_MAX;
s->config.domain_range.end = UINT32_MAX;
s->config.probe_size = VIOMMU_PROBE_SIZE;
#include "sysemu/reset.h"
#include "hw/virtio/virtio.h"
#include "hw/virtio/virtio-bus.h"
-#include "hw/virtio/virtio-access.h"
#include "hw/virtio/virtio-mem.h"
#include "qapi/error.h"
#include "qapi/visitor.h"
* anonymous RAM. In any other case, reading unplugged *can* populate a
* fresh page, consuming actual memory.
*/
- return !qemu_ram_is_shared(rb) && rb->fd < 0 &&
+ return !qemu_ram_is_shared(rb) && qemu_ram_get_fd(rb) < 0 &&
qemu_ram_pagesize(rb) == qemu_real_host_page_size();
}
#endif /* VIRTIO_MEM_HAS_LEGACY_GUESTS */
}
}
-static bool virtio_mem_test_bitmap(const VirtIOMEM *vmem, uint64_t start_gpa,
- uint64_t size, bool plugged)
+static bool virtio_mem_is_range_plugged(const VirtIOMEM *vmem,
+ uint64_t start_gpa, uint64_t size)
{
const unsigned long first_bit = (start_gpa - vmem->addr) / vmem->block_size;
const unsigned long last_bit = first_bit + (size / vmem->block_size) - 1;
unsigned long found_bit;
/* We fake a shorter bitmap to avoid searching too far. */
- if (plugged) {
- found_bit = find_next_zero_bit(vmem->bitmap, last_bit + 1, first_bit);
- } else {
- found_bit = find_next_bit(vmem->bitmap, last_bit + 1, first_bit);
- }
+ found_bit = find_next_zero_bit(vmem->bitmap, last_bit + 1, first_bit);
return found_bit > last_bit;
}
-static void virtio_mem_set_bitmap(VirtIOMEM *vmem, uint64_t start_gpa,
- uint64_t size, bool plugged)
+static bool virtio_mem_is_range_unplugged(const VirtIOMEM *vmem,
+ uint64_t start_gpa, uint64_t size)
+{
+ const unsigned long first_bit = (start_gpa - vmem->addr) / vmem->block_size;
+ const unsigned long last_bit = first_bit + (size / vmem->block_size) - 1;
+ unsigned long found_bit;
+
+ /* We fake a shorter bitmap to avoid searching too far. */
+ found_bit = find_next_bit(vmem->bitmap, last_bit + 1, first_bit);
+ return found_bit > last_bit;
+}
+
+static void virtio_mem_set_range_plugged(VirtIOMEM *vmem, uint64_t start_gpa,
+ uint64_t size)
{
const unsigned long bit = (start_gpa - vmem->addr) / vmem->block_size;
const unsigned long nbits = size / vmem->block_size;
- if (plugged) {
- bitmap_set(vmem->bitmap, bit, nbits);
- } else {
- bitmap_clear(vmem->bitmap, bit, nbits);
- }
+ bitmap_set(vmem->bitmap, bit, nbits);
+}
+
+static void virtio_mem_set_range_unplugged(VirtIOMEM *vmem, uint64_t start_gpa,
+ uint64_t size)
+{
+ const unsigned long bit = (start_gpa - vmem->addr) / vmem->block_size;
+ const unsigned long nbits = size / vmem->block_size;
+
+ bitmap_clear(vmem->bitmap, bit, nbits);
}
static void virtio_mem_send_response(VirtIOMEM *vmem, VirtQueueElement *elem,
{
const uint64_t offset = start_gpa - vmem->addr;
RAMBlock *rb = vmem->memdev->mr.ram_block;
+ int ret = 0;
if (virtio_mem_is_busy()) {
return -EBUSY;
return -EBUSY;
}
virtio_mem_notify_unplug(vmem, offset, size);
- } else {
- int ret = 0;
-
- if (vmem->prealloc) {
- void *area = memory_region_get_ram_ptr(&vmem->memdev->mr) + offset;
- int fd = memory_region_get_fd(&vmem->memdev->mr);
- Error *local_err = NULL;
-
- qemu_prealloc_mem(fd, area, size, 1, NULL, &local_err);
- if (local_err) {
- static bool warned;
-
- /*
- * Warn only once, we don't want to fill the log with these
- * warnings.
- */
- if (!warned) {
- warn_report_err(local_err);
- warned = true;
- } else {
- error_free(local_err);
- }
- ret = -EBUSY;
+ virtio_mem_set_range_unplugged(vmem, start_gpa, size);
+ return 0;
+ }
+
+ if (vmem->prealloc) {
+ void *area = memory_region_get_ram_ptr(&vmem->memdev->mr) + offset;
+ int fd = memory_region_get_fd(&vmem->memdev->mr);
+ Error *local_err = NULL;
+
+ qemu_prealloc_mem(fd, area, size, 1, NULL, &local_err);
+ if (local_err) {
+ static bool warned;
+
+ /*
+ * Warn only once, we don't want to fill the log with these
+ * warnings.
+ */
+ if (!warned) {
+ warn_report_err(local_err);
+ warned = true;
+ } else {
+ error_free(local_err);
}
+ ret = -EBUSY;
}
- if (!ret) {
- ret = virtio_mem_notify_plug(vmem, offset, size);
- }
+ }
- if (ret) {
- /* Could be preallocation or a notifier populated memory. */
- ram_block_discard_range(vmem->memdev->mr.ram_block, offset, size);
- return -EBUSY;
- }
+ if (!ret) {
+ ret = virtio_mem_notify_plug(vmem, offset, size);
}
- virtio_mem_set_bitmap(vmem, start_gpa, size, plug);
+ if (ret) {
+ /* Could be preallocation or a notifier populated memory. */
+ ram_block_discard_range(vmem->memdev->mr.ram_block, offset, size);
+ return -EBUSY;
+ }
+
+ virtio_mem_set_range_plugged(vmem, start_gpa, size);
return 0;
}
}
/* test if really all blocks are in the opposite state */
- if (!virtio_mem_test_bitmap(vmem, gpa, size, !plug)) {
+ if ((plug && !virtio_mem_is_range_unplugged(vmem, gpa, size)) ||
+ (!plug && !virtio_mem_is_range_plugged(vmem, gpa, size))) {
return VIRTIO_MEM_RESP_ERROR;
}
return;
}
- if (virtio_mem_test_bitmap(vmem, gpa, size, true)) {
+ if (virtio_mem_is_range_plugged(vmem, gpa, size)) {
resp.u.state.state = cpu_to_le16(VIRTIO_MEM_STATE_PLUGGED);
- } else if (virtio_mem_test_bitmap(vmem, gpa, size, false)) {
+ } else if (virtio_mem_is_range_unplugged(vmem, gpa, size)) {
resp.u.state.state = cpu_to_le16(VIRTIO_MEM_STATE_UNPLUGGED);
} else {
resp.u.state.state = cpu_to_le16(VIRTIO_MEM_STATE_MIXED);
return false;
}
- return virtio_mem_test_bitmap(vmem, start_gpa, end_gpa - start_gpa, true);
+ return virtio_mem_is_range_plugged(vmem, start_gpa, end_gpa - start_gpa);
}
struct VirtIOMEMReplayData {
"VHOST_USER_PROTOCOL_F_CONFIG: Vhost-user messaging for virtio "
"device configuration space supported"),
FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_BACKEND_SEND_FD, \
- "VHOST_USER_PROTOCOL_F_BACKEND_SEND_FD: Slave fd communication "
+ "VHOST_USER_PROTOCOL_F_BACKEND_SEND_FD: Backend fd communication "
"channel supported"),
FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_HOST_NOTIFIER, \
"VHOST_USER_PROTOCOL_F_HOST_NOTIFIER: Host notifiers for specified "
#define TLB_NOTDIRTY (1 << (TARGET_PAGE_BITS_MIN - 2))
/* Set if TLB entry is an IO callback. */
#define TLB_MMIO (1 << (TARGET_PAGE_BITS_MIN - 3))
-/* Set if TLB entry contains a watchpoint. */
-#define TLB_WATCHPOINT (1 << (TARGET_PAGE_BITS_MIN - 4))
-/* Set if TLB entry requires byte swap. */
-#define TLB_BSWAP (1 << (TARGET_PAGE_BITS_MIN - 5))
/* Set if TLB entry writes ignored. */
-#define TLB_DISCARD_WRITE (1 << (TARGET_PAGE_BITS_MIN - 6))
+#define TLB_DISCARD_WRITE (1 << (TARGET_PAGE_BITS_MIN - 4))
+/* Set if the slow path must be used; more flags in CPUTLBEntryFull. */
+#define TLB_FORCE_SLOW (1 << (TARGET_PAGE_BITS_MIN - 5))
-/* Use this mask to check interception with an alignment mask
+/*
+ * Use this mask to check interception with an alignment mask
* in a TCG backend.
*/
#define TLB_FLAGS_MASK \
(TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO \
- | TLB_WATCHPOINT | TLB_BSWAP | TLB_DISCARD_WRITE)
+ | TLB_FORCE_SLOW | TLB_DISCARD_WRITE)
+
+/*
+ * Flags stored in CPUTLBEntryFull.slow_flags[x].
+ * TLB_FORCE_SLOW must be set in CPUTLBEntry.addr_idx[x].
+ */
+/* Set if TLB entry requires byte swap. */
+#define TLB_BSWAP (1 << 0)
+/* Set if TLB entry contains a watchpoint. */
+#define TLB_WATCHPOINT (1 << 1)
+
+#define TLB_SLOW_FLAGS_MASK (TLB_BSWAP | TLB_WATCHPOINT)
+
+/* The two sets of flags must not overlap. */
+QEMU_BUILD_BUG_ON(TLB_FLAGS_MASK & TLB_SLOW_FLAGS_MASK);
/**
* tlb_hit_page: return true if page aligned @addr is a hit against the
/* @lg_page_size contains the log2 of the page size. */
uint8_t lg_page_size;
+ /*
+ * Additional tlb flags for use by the slow path. If non-zero,
+ * the corresponding CPUTLBEntry comparator must have TLB_FORCE_SLOW.
+ */
+ uint8_t slow_flags[MMU_ACCESS_COUNT];
+
/*
* Allow target-specific additions to this structure.
* This may be used to cache items from the guest cpu
* we must flush the entire tlb. The region is matched if
* (addr & large_page_mask) == large_page_addr.
*/
- target_ulong large_page_addr;
- target_ulong large_page_mask;
+ vaddr large_page_addr;
+ vaddr large_page_mask;
/* host time (in ns) at the beginning of the time window */
int64_t window_begin_ns;
/* maximum number of entries observed in the window */
#include "tcg/oversized-guest.h"
-static inline target_ulong tlb_read_idx(const CPUTLBEntry *entry,
- MMUAccessType access_type)
+static inline uint64_t tlb_read_idx(const CPUTLBEntry *entry,
+ MMUAccessType access_type)
{
/* Do not rearrange the CPUTLBEntry structure members. */
QEMU_BUILD_BUG_ON(offsetof(CPUTLBEntry, addr_read) !=
#endif
}
-static inline target_ulong tlb_addr_write(const CPUTLBEntry *entry)
+static inline uint64_t tlb_addr_write(const CPUTLBEntry *entry)
{
return tlb_read_idx(entry, MMU_DATA_STORE);
}
/* Find the TLB index corresponding to the mmu_idx + address pair. */
static inline uintptr_t tlb_index(CPUArchState *env, uintptr_t mmu_idx,
- target_ulong addr)
+ vaddr addr)
{
uintptr_t size_mask = env_tlb(env)->f[mmu_idx].mask >> CPU_TLB_ENTRY_BITS;
/* Find the TLB entry corresponding to the mmu_idx + address pair. */
static inline CPUTLBEntry *tlb_entry(CPUArchState *env, uintptr_t mmu_idx,
- target_ulong addr)
+ vaddr addr)
{
return &env_tlb(env)->f[mmu_idx].table[tlb_index(env, mmu_idx, addr)];
}
* Flush one page from the TLB of the specified CPU, for all
* MMU indexes.
*/
-void tlb_flush_page(CPUState *cpu, target_ulong addr);
+void tlb_flush_page(CPUState *cpu, vaddr addr);
/**
* tlb_flush_page_all_cpus:
* @cpu: src CPU of the flush
* Flush one page from the TLB of the specified CPU, for all
* MMU indexes.
*/
-void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr);
+void tlb_flush_page_all_cpus(CPUState *src, vaddr addr);
/**
* tlb_flush_page_all_cpus_synced:
* @cpu: src CPU of the flush
* the source vCPUs safe work is complete. This will depend on when
* the guests translation ends the TB.
*/
-void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr);
+void tlb_flush_page_all_cpus_synced(CPUState *src, vaddr addr);
/**
* tlb_flush:
* @cpu: CPU whose TLB should be flushed
* Flush one page from the TLB of the specified CPU, for the specified
* MMU indexes.
*/
-void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr,
+void tlb_flush_page_by_mmuidx(CPUState *cpu, vaddr addr,
uint16_t idxmap);
/**
* tlb_flush_page_by_mmuidx_all_cpus:
* Flush one page from the TLB of all CPUs, for the specified
* MMU indexes.
*/
-void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu, target_ulong addr,
+void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu, vaddr addr,
uint16_t idxmap);
/**
* tlb_flush_page_by_mmuidx_all_cpus_synced:
* complete once the source vCPUs safe work is complete. This will
* depend on when the guests translation ends the TB.
*/
-void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, target_ulong addr,
+void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr addr,
uint16_t idxmap);
/**
* tlb_flush_by_mmuidx:
*
* Similar to tlb_flush_page_mask, but with a bitmap of indexes.
*/
-void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, target_ulong addr,
+void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, vaddr addr,
uint16_t idxmap, unsigned bits);
/* Similarly, with broadcast and syncing. */
-void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *cpu, target_ulong addr,
+void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *cpu, vaddr addr,
uint16_t idxmap, unsigned bits);
void tlb_flush_page_bits_by_mmuidx_all_cpus_synced
- (CPUState *cpu, target_ulong addr, uint16_t idxmap, unsigned bits);
+ (CPUState *cpu, vaddr addr, uint16_t idxmap, unsigned bits);
/**
* tlb_flush_range_by_mmuidx
* For each mmuidx in @idxmap, flush all pages within [@addr,@addr+@len),
* comparing only the low @bits worth of each virtual page.
*/
-void tlb_flush_range_by_mmuidx(CPUState *cpu, target_ulong addr,
- target_ulong len, uint16_t idxmap,
+void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr,
+ vaddr len, uint16_t idxmap,
unsigned bits);
/* Similarly, with broadcast and syncing. */
-void tlb_flush_range_by_mmuidx_all_cpus(CPUState *cpu, target_ulong addr,
- target_ulong len, uint16_t idxmap,
+void tlb_flush_range_by_mmuidx_all_cpus(CPUState *cpu, vaddr addr,
+ vaddr len, uint16_t idxmap,
unsigned bits);
void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu,
- target_ulong addr,
- target_ulong len,
+ vaddr addr,
+ vaddr len,
uint16_t idxmap,
unsigned bits);
* tlb_set_page_full:
* @cpu: CPU context
* @mmu_idx: mmu index of the tlb to modify
- * @vaddr: virtual address of the entry to add
+ * @addr: virtual address of the entry to add
* @full: the details of the tlb entry
*
* Add an entry to @cpu tlb index @mmu_idx. All of the fields of
* single TARGET_PAGE_SIZE region is mapped; @full->lg_page_size is only
* used by tlb_flush_page.
*/
-void tlb_set_page_full(CPUState *cpu, int mmu_idx, target_ulong vaddr,
+void tlb_set_page_full(CPUState *cpu, int mmu_idx, vaddr addr,
CPUTLBEntryFull *full);
/**
* tlb_set_page_with_attrs:
* @cpu: CPU to add this TLB entry for
- * @vaddr: virtual address of page to add entry for
+ * @addr: virtual address of page to add entry for
* @paddr: physical address of the page
* @attrs: memory transaction attributes
* @prot: access permissions (PAGE_READ/PAGE_WRITE/PAGE_EXEC bits)
* @size: size of the page in bytes
*
* Add an entry to this CPU's TLB (a mapping from virtual address
- * @vaddr to physical address @paddr) with the specified memory
+ * @addr to physical address @paddr) with the specified memory
* transaction attributes. This is generally called by the target CPU
* specific code after it has been called through the tlb_fill()
* entry point and performed a successful page table walk to find
* single TARGET_PAGE_SIZE region is mapped; the supplied @size is only
* used by tlb_flush_page.
*/
-void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
+void tlb_set_page_with_attrs(CPUState *cpu, vaddr addr,
hwaddr paddr, MemTxAttrs attrs,
- int prot, int mmu_idx, target_ulong size);
+ int prot, int mmu_idx, vaddr size);
/* tlb_set_page:
*
* This function is equivalent to calling tlb_set_page_with_attrs()
* with an @attrs argument of MEMTXATTRS_UNSPECIFIED. It's provided
* as a convenience for CPUs which don't use memory transaction attributes.
*/
-void tlb_set_page(CPUState *cpu, target_ulong vaddr,
+void tlb_set_page(CPUState *cpu, vaddr addr,
hwaddr paddr, int prot,
- int mmu_idx, target_ulong size);
+ int mmu_idx, vaddr size);
#else
static inline void tlb_init(CPUState *cpu)
{
static inline void tlb_destroy(CPUState *cpu)
{
}
-static inline void tlb_flush_page(CPUState *cpu, target_ulong addr)
+static inline void tlb_flush_page(CPUState *cpu, vaddr addr)
{
}
-static inline void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr)
+static inline void tlb_flush_page_all_cpus(CPUState *src, vaddr addr)
{
}
-static inline void tlb_flush_page_all_cpus_synced(CPUState *src,
- target_ulong addr)
+static inline void tlb_flush_page_all_cpus_synced(CPUState *src, vaddr addr)
{
}
static inline void tlb_flush(CPUState *cpu)
{
}
static inline void tlb_flush_page_by_mmuidx(CPUState *cpu,
- target_ulong addr, uint16_t idxmap)
+ vaddr addr, uint16_t idxmap)
{
}
{
}
static inline void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu,
- target_ulong addr,
+ vaddr addr,
uint16_t idxmap)
{
}
static inline void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu,
- target_ulong addr,
+ vaddr addr,
uint16_t idxmap)
{
}
{
}
static inline void tlb_flush_page_bits_by_mmuidx(CPUState *cpu,
- target_ulong addr,
+ vaddr addr,
uint16_t idxmap,
unsigned bits)
{
}
static inline void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *cpu,
- target_ulong addr,
+ vaddr addr,
uint16_t idxmap,
unsigned bits)
{
}
static inline void
-tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *cpu, target_ulong addr,
+tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr addr,
uint16_t idxmap, unsigned bits)
{
}
-static inline void tlb_flush_range_by_mmuidx(CPUState *cpu, target_ulong addr,
- target_ulong len, uint16_t idxmap,
+static inline void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr,
+ vaddr len, uint16_t idxmap,
unsigned bits)
{
}
static inline void tlb_flush_range_by_mmuidx_all_cpus(CPUState *cpu,
- target_ulong addr,
- target_ulong len,
+ vaddr addr,
+ vaddr len,
uint16_t idxmap,
unsigned bits)
{
}
static inline void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu,
- target_ulong addr,
- target_long len,
+ vaddr addr,
+ vaddr len,
uint16_t idxmap,
unsigned bits)
{
* Finally, return the host address for a page that is backed by RAM,
* or NULL if the page requires I/O.
*/
-void *probe_access(CPUArchState *env, target_ulong addr, int size,
+void *probe_access(CPUArchState *env, vaddr addr, int size,
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr);
-static inline void *probe_write(CPUArchState *env, target_ulong addr, int size,
+static inline void *probe_write(CPUArchState *env, vaddr addr, int size,
int mmu_idx, uintptr_t retaddr)
{
return probe_access(env, addr, size, MMU_DATA_STORE, mmu_idx, retaddr);
}
-static inline void *probe_read(CPUArchState *env, target_ulong addr, int size,
+static inline void *probe_read(CPUArchState *env, vaddr addr, int size,
int mmu_idx, uintptr_t retaddr)
{
return probe_access(env, addr, size, MMU_DATA_LOAD, mmu_idx, retaddr);
* Do handle clean pages, so exclude TLB_NOTDIRY from the returned flags.
* For simplicity, all "mmio-like" flags are folded to TLB_MMIO.
*/
-int probe_access_flags(CPUArchState *env, target_ulong addr, int size,
+int probe_access_flags(CPUArchState *env, vaddr addr, int size,
MMUAccessType access_type, int mmu_idx,
bool nonfault, void **phost, uintptr_t retaddr);
* and must be consumed or copied immediately, before any further
* access or changes to TLB @mmu_idx.
*/
-int probe_access_full(CPUArchState *env, target_ulong addr, int size,
+int probe_access_full(CPUArchState *env, vaddr addr, int size,
MMUAccessType access_type, int mmu_idx,
bool nonfault, void **phost,
CPUTLBEntryFull **pfull, uintptr_t retaddr);
/* TranslationBlock invalidate API */
#if defined(CONFIG_USER_ONLY)
-void tb_invalidate_phys_addr(target_ulong addr);
+void tb_invalidate_phys_addr(hwaddr addr);
#else
void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs);
#endif
*
* Note: this function can trigger an exception.
*/
-tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
+tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, vaddr addr,
void **hostp);
/**
* Note: this function can trigger an exception.
*/
static inline tb_page_addr_t get_page_addr_code(CPUArchState *env,
- target_ulong addr)
+ vaddr addr)
{
return get_page_addr_code_hostp(env, addr, NULL);
}
static inline void mmap_unlock(void) {}
void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length);
-void tlb_set_dirty(CPUState *cpu, target_ulong vaddr);
+void tlb_set_dirty(CPUState *cpu, vaddr addr);
MemoryRegionSection *
address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
#define EXEC_TARGET_PAGE_H
size_t qemu_target_page_size(void);
+int qemu_target_page_mask(void);
int qemu_target_page_bits(void);
int qemu_target_page_bits_min(void);
* - When too many instructions have been translated.
*/
void translator_loop(CPUState *cpu, TranslationBlock *tb, int *max_insns,
- target_ulong pc, void *host_pc,
- const TranslatorOps *ops, DisasContextBase *db);
+ vaddr pc, void *host_pc, const TranslatorOps *ops,
+ DisasContextBase *db);
/**
* translator_use_goto_tb
* Return true if goto_tb is allowed between the current TB
* and the destination PC.
*/
-bool translator_use_goto_tb(DisasContextBase *db, target_ulong dest);
+bool translator_use_goto_tb(DisasContextBase *db, vaddr dest);
/**
* translator_io_start
bool nvdimm_supported;
bool numa_mem_supported;
bool auto_enable_numa;
+ bool cpu_cluster_has_numa_boundary;
SMPCompatProps smp_props;
const char *default_ram_id;
ESCCChnType type;
uint8_t rx, tx;
QemuInputHandlerState *hs;
+ char *sunkbd_layout;
} ESCCChannelState;
struct ESCCState {
MMU_DATA_LOAD = 0,
MMU_DATA_STORE = 1,
MMU_INST_FETCH = 2
+#define MMU_ACCESS_COUNT 3
} MMUAccessType;
typedef struct CPUWatchpoint CPUWatchpoint;
struct kvm_dirty_gfn *kvm_dirty_gfns;
uint32_t kvm_fetch_index;
uint64_t dirty_pages;
+ int kvm_vcpu_stats_fd;
/* Use by accel-block: CPU is executing an ioctl() */
QemuLockCnt in_ioctl_lock;
#include "cxl_component.h"
#include "cxl_device.h"
+#define CXL_CACHE_LINE_SIZE 64
#define CXL_COMPONENT_REG_BAR_IDX 0
#define CXL_DEVICE_REG_BAR_IDX 2
#include "hw/cxl/cxl_component.h"
#include "hw/pci/pci_device.h"
#include "hw/register.h"
+#include "hw/cxl/cxl_events.h"
/*
* The following is how a CXL device's Memory Device registers are laid out.
(CXL_DEVICE_CAP_REG_SIZE + CXL_DEVICE_STATUS_REGISTERS_LENGTH + \
CXL_MAILBOX_REGISTERS_LENGTH + CXL_MEMORY_DEVICE_REGISTERS_LENGTH)
+/* 8.2.8.4.5.1 Command Return Codes */
+typedef enum {
+ CXL_MBOX_SUCCESS = 0x0,
+ CXL_MBOX_BG_STARTED = 0x1,
+ CXL_MBOX_INVALID_INPUT = 0x2,
+ CXL_MBOX_UNSUPPORTED = 0x3,
+ CXL_MBOX_INTERNAL_ERROR = 0x4,
+ CXL_MBOX_RETRY_REQUIRED = 0x5,
+ CXL_MBOX_BUSY = 0x6,
+ CXL_MBOX_MEDIA_DISABLED = 0x7,
+ CXL_MBOX_FW_XFER_IN_PROGRESS = 0x8,
+ CXL_MBOX_FW_XFER_OUT_OF_ORDER = 0x9,
+ CXL_MBOX_FW_AUTH_FAILED = 0xa,
+ CXL_MBOX_FW_INVALID_SLOT = 0xb,
+ CXL_MBOX_FW_ROLLEDBACK = 0xc,
+ CXL_MBOX_FW_REST_REQD = 0xd,
+ CXL_MBOX_INVALID_HANDLE = 0xe,
+ CXL_MBOX_INVALID_PA = 0xf,
+ CXL_MBOX_INJECT_POISON_LIMIT = 0x10,
+ CXL_MBOX_PERMANENT_MEDIA_FAILURE = 0x11,
+ CXL_MBOX_ABORTED = 0x12,
+ CXL_MBOX_INVALID_SECURITY_STATE = 0x13,
+ CXL_MBOX_INCORRECT_PASSPHRASE = 0x14,
+ CXL_MBOX_UNSUPPORTED_MAILBOX = 0x15,
+ CXL_MBOX_INVALID_PAYLOAD_LENGTH = 0x16,
+ CXL_MBOX_MAX = 0x17
+} CXLRetCode;
+
+typedef struct CXLEvent {
+ CXLEventRecordRaw data;
+ QSIMPLEQ_ENTRY(CXLEvent) node;
+} CXLEvent;
+
+typedef struct CXLEventLog {
+ uint16_t next_handle;
+ uint16_t overflow_err_count;
+ uint64_t first_overflow_timestamp;
+ uint64_t last_overflow_timestamp;
+ bool irq_enabled;
+ int irq_vec;
+ QemuMutex lock;
+ QSIMPLEQ_HEAD(, CXLEvent) events;
+} CXLEventLog;
+
typedef struct cxl_device_state {
MemoryRegion device_registers;
/* mmio for device capabilities array - 8.2.8.2 */
- MemoryRegion device;
+ struct {
+ MemoryRegion device;
+ union {
+ uint8_t dev_reg_state[CXL_DEVICE_STATUS_REGISTERS_LENGTH];
+ uint16_t dev_reg_state16[CXL_DEVICE_STATUS_REGISTERS_LENGTH / 2];
+ uint32_t dev_reg_state32[CXL_DEVICE_STATUS_REGISTERS_LENGTH / 4];
+ uint64_t dev_reg_state64[CXL_DEVICE_STATUS_REGISTERS_LENGTH / 8];
+ };
+ uint64_t event_status;
+ };
MemoryRegion memory_device;
struct {
MemoryRegion caps;
uint64_t mem_size;
uint64_t pmem_size;
uint64_t vmem_size;
+
+ CXLEventLog event_logs[CXL_EVENT_TYPE_MAX];
} CXLDeviceState;
/* Initialize the register block for a device */
FIELD(CXL_DEV_CAP_ARRAY, CAP_VERSION, 16, 8)
FIELD(CXL_DEV_CAP_ARRAY, CAP_COUNT, 32, 16)
+void cxl_event_set_status(CXLDeviceState *cxl_dstate, CXLEventLogType log_type,
+ bool available);
+
/*
* Helper macro to initialize capability headers for CXL devices.
*
void cxl_initialize_mailbox(CXLDeviceState *cxl_dstate);
void cxl_process_mailbox(CXLDeviceState *cxl_dstate);
-#define cxl_device_cap_init(dstate, reg, cap_id) \
+#define cxl_device_cap_init(dstate, reg, cap_id, ver) \
do { \
uint32_t *cap_hdrs = dstate->caps_reg_state32; \
int which = R_CXL_DEV_##reg##_CAP_HDR0; \
FIELD_DP32(cap_hdrs[which], CXL_DEV_##reg##_CAP_HDR0, \
CAP_ID, cap_id); \
cap_hdrs[which] = FIELD_DP32( \
- cap_hdrs[which], CXL_DEV_##reg##_CAP_HDR0, CAP_VERSION, 1); \
+ cap_hdrs[which], CXL_DEV_##reg##_CAP_HDR0, CAP_VERSION, ver); \
cap_hdrs[which + 1] = \
FIELD_DP32(cap_hdrs[which + 1], CXL_DEV_##reg##_CAP_HDR1, \
CAP_OFFSET, CXL_##reg##_REGISTERS_OFFSET); \
CAP_LENGTH, CXL_##reg##_REGISTERS_LENGTH); \
} while (0)
+/* CXL 3.0 8.2.8.3.1 Event Status Register */
+REG64(CXL_DEV_EVENT_STATUS, 0)
+ FIELD(CXL_DEV_EVENT_STATUS, EVENT_STATUS, 0, 32)
+
/* CXL 2.0 8.2.8.4.3 Mailbox Capabilities Register */
REG32(CXL_DEV_MAILBOX_CAP, 0)
FIELD(CXL_DEV_MAILBOX_CAP, PAYLOAD_SIZE, 0, 5)
typedef QTAILQ_HEAD(, CXLError) CXLErrorList;
+typedef struct CXLPoison {
+ uint64_t start, length;
+ uint8_t type;
+#define CXL_POISON_TYPE_EXTERNAL 0x1
+#define CXL_POISON_TYPE_INTERNAL 0x2
+#define CXL_POISON_TYPE_INJECTED 0x3
+ QLIST_ENTRY(CXLPoison) node;
+} CXLPoison;
+
+typedef QLIST_HEAD(, CXLPoison) CXLPoisonList;
+#define CXL_POISON_LIST_LIMIT 256
+
struct CXLType3Dev {
/* Private */
PCIDevice parent_obj;
/* Error injection */
CXLErrorList error_list;
+
+ /* Poison Injection - cache */
+ CXLPoisonList poison_list;
+ unsigned int poison_list_cnt;
+ bool poison_list_overflowed;
+ uint64_t poison_list_overflow_ts;
};
#define TYPE_CXL_TYPE3 "cxl-type3"
uint64_t offset);
void (*set_lsa)(CXLType3Dev *ct3d, const void *buf, uint64_t size,
uint64_t offset);
+ bool (*set_cacheline)(CXLType3Dev *ct3d, uint64_t dpa_offset, uint8_t *data);
};
MemTxResult cxl_type3_read(PCIDevice *d, hwaddr host_addr, uint64_t *data,
uint64_t cxl_device_get_timestamp(CXLDeviceState *cxlds);
+void cxl_event_init(CXLDeviceState *cxlds, int start_msg_num);
+bool cxl_event_insert(CXLDeviceState *cxlds, CXLEventLogType log_type,
+ CXLEventRecordRaw *event);
+CXLRetCode cxl_event_get_records(CXLDeviceState *cxlds, CXLGetEventPayload *pl,
+ uint8_t log_type, int max_recs,
+ uint16_t *len);
+CXLRetCode cxl_event_clear_records(CXLDeviceState *cxlds,
+ CXLClearEventPayload *pl);
+
+void cxl_event_irq_assert(CXLType3Dev *ct3d);
+
+void cxl_set_poison_list_overflowed(CXLType3Dev *ct3d);
+
#endif
--- /dev/null
+/*
+ * QEMU CXL Events
+ *
+ * Copyright (c) 2022 Intel
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See the
+ * COPYING file in the top-level directory.
+ */
+
+#ifndef CXL_EVENTS_H
+#define CXL_EVENTS_H
+
+#include "qemu/uuid.h"
+
+/*
+ * CXL rev 3.0 section 8.2.9.2.2; Table 8-49
+ *
+ * Define these as the bit position for the event status register for ease of
+ * setting the status.
+ */
+typedef enum CXLEventLogType {
+ CXL_EVENT_TYPE_INFO = 0,
+ CXL_EVENT_TYPE_WARN = 1,
+ CXL_EVENT_TYPE_FAIL = 2,
+ CXL_EVENT_TYPE_FATAL = 3,
+ CXL_EVENT_TYPE_DYNAMIC_CAP = 4,
+ CXL_EVENT_TYPE_MAX
+} CXLEventLogType;
+
+/*
+ * Common Event Record Format
+ * CXL rev 3.0 section 8.2.9.2.1; Table 8-42
+ */
+#define CXL_EVENT_REC_HDR_RES_LEN 0xf
+typedef struct CXLEventRecordHdr {
+ QemuUUID id;
+ uint8_t length;
+ uint8_t flags[3];
+ uint16_t handle;
+ uint16_t related_handle;
+ uint64_t timestamp;
+ uint8_t maint_op_class;
+ uint8_t reserved[CXL_EVENT_REC_HDR_RES_LEN];
+} QEMU_PACKED CXLEventRecordHdr;
+
+#define CXL_EVENT_RECORD_DATA_LENGTH 0x50
+typedef struct CXLEventRecordRaw {
+ CXLEventRecordHdr hdr;
+ uint8_t data[CXL_EVENT_RECORD_DATA_LENGTH];
+} QEMU_PACKED CXLEventRecordRaw;
+#define CXL_EVENT_RECORD_SIZE (sizeof(CXLEventRecordRaw))
+
+/*
+ * Get Event Records output payload
+ * CXL rev 3.0 section 8.2.9.2.2; Table 8-50
+ */
+#define CXL_GET_EVENT_FLAG_OVERFLOW BIT(0)
+#define CXL_GET_EVENT_FLAG_MORE_RECORDS BIT(1)
+typedef struct CXLGetEventPayload {
+ uint8_t flags;
+ uint8_t reserved1;
+ uint16_t overflow_err_count;
+ uint64_t first_overflow_timestamp;
+ uint64_t last_overflow_timestamp;
+ uint16_t record_count;
+ uint8_t reserved2[0xa];
+ CXLEventRecordRaw records[];
+} QEMU_PACKED CXLGetEventPayload;
+#define CXL_EVENT_PAYLOAD_HDR_SIZE (sizeof(CXLGetEventPayload))
+
+/*
+ * Clear Event Records input payload
+ * CXL rev 3.0 section 8.2.9.2.3; Table 8-51
+ */
+typedef struct CXLClearEventPayload {
+ uint8_t event_log; /* CXLEventLogType */
+ uint8_t clear_flags;
+ uint8_t nr_recs;
+ uint8_t reserved[3];
+ uint16_t handle[];
+} CXLClearEventPayload;
+
+/**
+ * Event Interrupt Policy
+ *
+ * CXL rev 3.0 section 8.2.9.2.4; Table 8-52
+ */
+typedef enum CXLEventIntMode {
+ CXL_INT_NONE = 0x00,
+ CXL_INT_MSI_MSIX = 0x01,
+ CXL_INT_FW = 0x02,
+ CXL_INT_RES = 0x03,
+} CXLEventIntMode;
+#define CXL_EVENT_INT_MODE_MASK 0x3
+#define CXL_EVENT_INT_SETTING(vector) ((((uint8_t)vector & 0xf) << 4) | CXL_INT_MSI_MSIX)
+typedef struct CXLEventInterruptPolicy {
+ uint8_t info_settings;
+ uint8_t warn_settings;
+ uint8_t failure_settings;
+ uint8_t fatal_settings;
+ uint8_t dyn_cap_settings;
+} QEMU_PACKED CXLEventInterruptPolicy;
+/* DCD is optional but other fields are not */
+#define CXL_EVENT_INT_SETTING_MIN_LEN 4
+
+/*
+ * General Media Event Record
+ * CXL rev 3.0 Section 8.2.9.2.1.1; Table 8-43
+ */
+#define CXL_EVENT_GEN_MED_COMP_ID_SIZE 0x10
+#define CXL_EVENT_GEN_MED_RES_SIZE 0x2e
+typedef struct CXLEventGenMedia {
+ CXLEventRecordHdr hdr;
+ uint64_t phys_addr;
+ uint8_t descriptor;
+ uint8_t type;
+ uint8_t transaction_type;
+ uint16_t validity_flags;
+ uint8_t channel;
+ uint8_t rank;
+ uint8_t device[3];
+ uint8_t component_id[CXL_EVENT_GEN_MED_COMP_ID_SIZE];
+ uint8_t reserved[CXL_EVENT_GEN_MED_RES_SIZE];
+} QEMU_PACKED CXLEventGenMedia;
+
+/*
+ * DRAM Event Record
+ * CXL Rev 3.0 Section 8.2.9.2.1.2: Table 8-44
+ * All fields little endian.
+ */
+typedef struct CXLEventDram {
+ CXLEventRecordHdr hdr;
+ uint64_t phys_addr;
+ uint8_t descriptor;
+ uint8_t type;
+ uint8_t transaction_type;
+ uint16_t validity_flags;
+ uint8_t channel;
+ uint8_t rank;
+ uint8_t nibble_mask[3];
+ uint8_t bank_group;
+ uint8_t bank;
+ uint8_t row[3];
+ uint16_t column;
+ uint64_t correction_mask[4];
+ uint8_t reserved[0x17];
+} QEMU_PACKED CXLEventDram;
+
+/*
+ * Memory Module Event Record
+ * CXL Rev 3.0 Section 8.2.9.2.1.3: Table 8-45
+ * All fields little endian.
+ */
+typedef struct CXLEventMemoryModule {
+ CXLEventRecordHdr hdr;
+ uint8_t type;
+ uint8_t health_status;
+ uint8_t media_status;
+ uint8_t additional_status;
+ uint8_t life_used;
+ int16_t temperature;
+ uint32_t dirty_shutdown_count;
+ uint32_t corrected_volatile_error_count;
+ uint32_t corrected_persistent_error_count;
+ uint8_t reserved[0x3d];
+} QEMU_PACKED CXLEventMemoryModule;
+
+#endif /* CXL_EVENTS_H */
bool smbios_defaults;
bool smbios_legacy_mode;
bool smbios_uuid_encoded;
+ SmbiosEntryPointType default_smbios_ep_type;
/* RAM / address space compat: */
bool gigabyte_align;
} VhostBackendType;
typedef enum VhostSetConfigType {
- VHOST_SET_CONFIG_TYPE_MASTER = 0,
+ VHOST_SET_CONFIG_TYPE_FRONTEND = 0,
VHOST_SET_CONFIG_TYPE_MIGRATION = 1,
} VhostSetConfigType;
unsigned int iov_cnt;
uint32_t scanout_bitmask;
pixman_image_t *image;
+#ifdef WIN32
+ HANDLE handle;
+#endif
uint64_t hostmem;
uint64_t blob_size;
unsigned out_num);
void virtio_net_set_netclient_name(VirtIONet *n, const char *name,
const char *type);
+uint64_t virtio_net_supported_guest_offloads(const VirtIONet *n);
#endif
return !!(features & (1ULL << fbit));
}
-static inline bool virtio_vdev_has_feature(VirtIODevice *vdev,
+static inline bool virtio_vdev_has_feature(const VirtIODevice *vdev,
unsigned int fbit)
{
return virtio_has_feature(vdev->guest_features, fbit);
#undef bswap64
#define bswap64(_x) __builtin_bswap64(_x)
+static inline uint32_t bswap24(uint32_t x)
+{
+ return (((x & 0x000000ffU) << 16) |
+ ((x & 0x0000ff00U) << 0) |
+ ((x & 0x00ff0000U) >> 16));
+}
+
static inline void bswap16s(uint16_t *s)
{
*s = __builtin_bswap16(*s);
}
+static inline void bswap24s(uint32_t *s)
+{
+ *s = bswap24(*s & 0x00ffffffU);
+}
+
static inline void bswap32s(uint32_t *s)
{
*s = __builtin_bswap32(*s);
#if HOST_BIG_ENDIAN
#define be_bswap(v, size) (v)
#define le_bswap(v, size) glue(__builtin_bswap, size)(v)
+#define le_bswap24(v) bswap24(v)
#define be_bswaps(v, size)
#define le_bswaps(p, size) \
do { *p = glue(__builtin_bswap, size)(*p); } while (0)
#else
#define le_bswap(v, size) (v)
+#define le_bswap24(v) (v)
#define be_bswap(v, size) glue(__builtin_bswap, size)(v)
#define le_bswaps(v, size)
#define be_bswaps(p, size) \
* size is:
* b: 8 bits
* w: 16 bits
+ * 24: 24 bits
* l: 32 bits
* q: 64 bits
*
__builtin_memcpy(ptr, &v, sizeof(v));
}
+static inline void st24_he_p(void *ptr, uint32_t v)
+{
+ __builtin_memcpy(ptr, &v, 3);
+}
+
static inline int ldl_he_p(const void *ptr)
{
int32_t r;
stw_he_p(ptr, le_bswap(v, 16));
}
+static inline void st24_le_p(void *ptr, uint32_t v)
+{
+ st24_he_p(ptr, le_bswap24(v));
+}
+
static inline void stl_le_p(void *ptr, uint32_t v)
{
stl_he_p(ptr, le_bswap(v, 32));
* It would only fail if not called from an instrumented memory access
* which would be an abuse of the API.
*/
-bool tlb_plugin_lookup(CPUState *cpu, target_ulong addr, int mmu_idx,
+bool tlb_plugin_lookup(CPUState *cpu, vaddr addr, int mmu_idx,
bool is_store, struct qemu_plugin_hwaddr *data);
#endif /* PLUGIN_MEMORY_H */
}
#endif
-#ifdef CONFIG_PROFILER
-static inline int64_t profile_getclock(void)
-{
- return get_clock();
-}
-
-extern int64_t dev_time;
-#endif
-
#endif
win32_close_exception_handler(struct _EXCEPTION_RECORD*, void*,
struct _CONTEXT*, void*);
+void *qemu_win32_map_alloc(size_t size, HANDLE *h, Error **errp);
+void qemu_win32_map_free(void *ptr, HANDLE h, Error **errp);
+
#ifdef __cplusplus
}
#endif
return i < ARRAY_SIZE(op->output_pref) ? op->output_pref[i] : 0;
}
-typedef struct TCGProfile {
- int64_t cpu_exec_time;
- int64_t tb_count1;
- int64_t tb_count;
- int64_t op_count; /* total insn count */
- int op_count_max; /* max insn per TB */
- int temp_count_max;
- int64_t temp_count;
- int64_t del_op_count;
- int64_t code_in_len;
- int64_t code_out_len;
- int64_t search_out_len;
- int64_t interm_time;
- int64_t code_time;
- int64_t la_time;
- int64_t opt_time;
- int64_t restore_count;
- int64_t restore_time;
- int64_t table_op_count[NB_OPS];
-} TCGProfile;
-
struct TCGContext {
uint8_t *pool_cur, *pool_end;
TCGPool *pool_first, *pool_current, *pool_first_large;
tcg_insn_unit *code_buf; /* pointer for start of tb */
tcg_insn_unit *code_ptr; /* pointer for running end of tb */
-#ifdef CONFIG_PROFILER
- TCGProfile prof;
-#endif
-
#ifdef CONFIG_DEBUG_TCG
int goto_tb_issue_mask;
const TCGOpcode *vecop_list;
return temp_tcgv_ptr(t);
}
-int64_t tcg_cpu_exec_time(void);
void tcg_dump_info(GString *buf);
void tcg_dump_op_count(GString *buf);
#include "qom/object.h"
#include "qemu/notify.h"
#include "qapi/qapi-types-ui.h"
+#include "ui/input.h"
#ifdef CONFIG_OPENGL
# include <epoxy/gl.h>
void kbd_put_string_console(QemuConsole *s, const char *str, int len);
void kbd_put_keysym(int keysym);
+/* Touch devices */
+typedef struct touch_slot {
+ int x;
+ int y;
+ int tracking_id;
+} touch_slot;
+
+void console_handle_touch_event(QemuConsole *con,
+ struct touch_slot touch_slots[INPUT_EVENT_SLOTS_MAX],
+ uint64_t num_slot,
+ int width, int height,
+ double x, double y,
+ InputMultiTouchType type,
+ Error **errp);
/* consoles */
#define TYPE_QEMU_CONSOLE "qemu-console"
uint32_t y;
uint32_t width;
uint32_t height;
+ void *d3d_tex2d;
} ScanoutTexture;
typedef struct DisplaySurface {
GLenum gltype;
GLuint texture;
#endif
+#ifdef WIN32
+ HANDLE handle;
+ uint32_t handle_offset;
+#endif
} DisplaySurface;
typedef struct QemuUIInfo {
uint32_t backing_width,
uint32_t backing_height,
uint32_t x, uint32_t y,
- uint32_t w, uint32_t h);
+ uint32_t w, uint32_t h,
+ void *d3d_tex2d);
/* optional (default to true if has dpy_gl_scanout_dmabuf) */
bool (*dpy_has_dmabuf)(DisplayChangeListener *dcl);
/* optional */
DisplaySurface *qemu_create_displaysurface_pixman(pixman_image_t *image);
DisplaySurface *qemu_create_placeholder_surface(int w, int h,
const char *msg);
+#ifdef WIN32
+void qemu_displaysurface_win32_set_handle(DisplaySurface *surface,
+ HANDLE h, uint32_t offset);
+#endif
PixelFormat qemu_default_pixelformat(int bpp);
DisplaySurface *qemu_create_displaysurface(int width, int height);
void dpy_gl_scanout_texture(QemuConsole *con,
uint32_t backing_id, bool backing_y_0_top,
uint32_t backing_width, uint32_t backing_height,
- uint32_t x, uint32_t y, uint32_t w, uint32_t h);
+ uint32_t x, uint32_t y, uint32_t w, uint32_t h,
+ void *d3d_tex2d);
void dpy_gl_scanout_dmabuf(QemuConsole *con,
QemuDmaBuf *dmabuf);
void dpy_gl_cursor_dmabuf(QemuConsole *con, QemuDmaBuf *dmabuf,
extern EGLDisplay *qemu_egl_display;
extern EGLConfig qemu_egl_config;
extern DisplayGLMode qemu_egl_mode;
+extern bool qemu_egl_angle_d3d;
typedef struct egl_fb {
int width;
void egl_fb_setup_new_tex(egl_fb *fb, int width, int height);
void egl_fb_blit(egl_fb *dst, egl_fb *src, bool flip);
void egl_fb_read(DisplaySurface *dst, egl_fb *src);
+void egl_fb_read_rect(DisplaySurface *dst, egl_fb *src, int x, int y, int w, int h);
void egl_texture_blit(QemuGLShader *gls, egl_fb *dst, egl_fb *src, bool flip);
void egl_texture_blend(QemuGLShader *gls, egl_fb *dst, egl_fb *src, bool flip,
int x, int y, double scale_x, double scale_y);
+extern EGLContext qemu_egl_rn_ctx;
+
#ifdef CONFIG_GBM
extern int qemu_egl_rn_fd;
extern struct gbm_device *qemu_egl_rn_gbm_dev;
-extern EGLContext qemu_egl_rn_ctx;
int egl_rendernode_init(const char *rendernode, DisplayGLMode mode);
int egl_get_fd_for_texture(uint32_t tex_id, EGLint *stride, EGLint *fourcc,
#endif
+#ifdef WIN32
+int qemu_egl_init_dpy_win32(EGLNativeDisplayType dpy, DisplayGLMode mode);
+#endif
+
EGLContext qemu_egl_init_ctx(void);
bool qemu_egl_has_dmabuf(void);
bool egl_init(const char *rendernode, DisplayGLMode mode, Error **errp);
+const char *qemu_egl_get_error_string(void);
+
#endif /* EGL_HELPERS_H */
uint32_t backing_width,
uint32_t backing_height,
uint32_t x, uint32_t y,
- uint32_t w, uint32_t h);
+ uint32_t w, uint32_t h,
+ void *d3d_tex2d);
void gd_egl_scanout_dmabuf(DisplayChangeListener *dcl,
QemuDmaBuf *dmabuf);
void gd_egl_cursor_dmabuf(DisplayChangeListener *dcl,
uint32_t backing_width,
uint32_t backing_height,
uint32_t x, uint32_t y,
- uint32_t w, uint32_t h);
+ uint32_t w, uint32_t h,
+ void *d3d_tex2d);
void gd_gl_area_scanout_disable(DisplayChangeListener *dcl);
void gd_gl_area_scanout_flush(DisplayChangeListener *dcl,
uint32_t x, uint32_t y, uint32_t w, uint32_t h);
uint32_t backing_width,
uint32_t backing_height,
uint32_t x, uint32_t y,
- uint32_t w, uint32_t h);
+ uint32_t w, uint32_t h,
+ void *d3d_tex2d);
void sdl2_gl_scanout_flush(DisplayChangeListener *dcl,
uint32_t x, uint32_t y, uint32_t w, uint32_t h);
switch(trapnr) {
case 0x80:
+#ifndef TARGET_X86_64
+ case EXCP_SYSCALL:
+#endif
/* linux syscall from int $0x80 */
ret = do_syscall(env,
env->regs[R_EAX],
env->regs[R_EAX] = ret;
}
break;
-#ifndef TARGET_ABI32
+#ifdef TARGET_X86_64
case EXCP_SYSCALL:
- /* linux syscall from syscall instruction */
+ /* linux syscall from syscall instruction. */
ret = do_syscall(env,
env->regs[R_EAX],
env->regs[R_EDI],
env->regs[R_EAX] = ret;
}
break;
-#endif
-#ifdef TARGET_X86_64
case EXCP_VSYSCALL:
emulate_vsyscall(env);
break;
gdbus_codegen_error = '@0@ uses gdbus-codegen, which does not support control flow integrity'
endif
+xml_pp = find_program('scripts/xml-preprocess.py')
+
lttng = not_found
if 'ust' in get_option('trace_backends')
lttng = dependency('lttng-ust', required: true, version: '>= 2.1',
virgl = dependency('virglrenderer',
method: 'pkg-config',
required: get_option('virglrenderer'))
+ if virgl.found()
+ config_host_data.set('HAVE_VIRGL_D3D_INFO_EXT',
+ cc.has_member('struct virgl_renderer_resource_info_ext', 'd3d_tex2d',
+ prefix: '#include <virglrenderer.h>',
+ dependencies: virgl))
+ endif
endif
blkio = not_found
if not get_option('blkio').auto() or have_block
error_message: '-display dbus requires glib>=2.64') \
.require(gdbus_codegen.found(),
error_message: gdbus_codegen_error.format('-display dbus')) \
- .require(targetos != 'windows',
- error_message: '-display dbus is not available on Windows') \
.allowed()
have_virtfs = get_option('virtfs') \
dependencies: numa))
endif
config_host_data.set('CONFIG_OPENGL', opengl.found())
-config_host_data.set('CONFIG_PROFILER', get_option('profiler'))
config_host_data.set('CONFIG_RBD', rbd.found())
config_host_data.set('CONFIG_RDMA', rdma.found())
config_host_data.set('CONFIG_SAFESTACK', get_option('safe_stack'))
config_host_data.set('CONFIG_DUP3', cc.has_function('dup3'))
config_host_data.set('CONFIG_FALLOCATE', cc.has_function('fallocate'))
config_host_data.set('CONFIG_POSIX_FALLOCATE', cc.has_function('posix_fallocate'))
+config_host_data.set('CONFIG_GETCPU', cc.has_function('getcpu', prefix: gnu_source_prefix))
+config_host_data.set('CONFIG_SCHED_GETCPU', cc.has_function('sched_getcpu', prefix: '#include <sched.h>'))
# Note that we need to specify prefix: here to avoid incorrectly
# thinking that Windows has posix_memalign()
config_host_data.set('CONFIG_POSIX_MEMALIGN', cc.has_function('posix_memalign', prefix: '#include <stdlib.h>'))
summary_info += {'QEMU_OBJCFLAGS': ' '.join(qemu_common_flags)}
endif
summary_info += {'QEMU_LDFLAGS': ' '.join(qemu_ldflags)}
-summary_info += {'profiler': get_option('profiler')}
summary_info += {'link-time optimization (LTO)': get_option('b_lto')}
summary_info += {'PIE': get_option('b_pie')}
summary_info += {'static build': get_option('prefer_static')}
option('gprof', type: 'boolean', value: false,
description: 'QEMU profiling with gprof',
deprecated: true)
-option('profiler', type: 'boolean', value: false,
- description: 'profiler support')
option('slirp_smbd', type : 'feature', value : 'auto',
description: 'use smbd (at path --smbd=*) in slirp networking')
/* The device always have SVQ enabled */
bool always_svq;
+
+ /* The device can isolate CVQ in its own ASID */
+ bool cvq_isolated;
+
bool started;
} VhostVDPAState;
VIRTIO_F_VERSION_1,
VIRTIO_NET_F_CSUM,
VIRTIO_NET_F_GUEST_CSUM,
+ VIRTIO_NET_F_CTRL_GUEST_OFFLOADS,
VIRTIO_NET_F_GSO,
VIRTIO_NET_F_GUEST_TSO4,
VIRTIO_NET_F_GUEST_TSO6,
static const uint64_t vdpa_svq_device_features =
BIT_ULL(VIRTIO_NET_F_CSUM) |
BIT_ULL(VIRTIO_NET_F_GUEST_CSUM) |
+ BIT_ULL(VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) |
BIT_ULL(VIRTIO_NET_F_MTU) |
BIT_ULL(VIRTIO_NET_F_MAC) |
BIT_ULL(VIRTIO_NET_F_GUEST_TSO4) |
return s->vhost_net;
}
+static size_t vhost_vdpa_net_cvq_cmd_len(void)
+{
+ /*
+ * MAC_TABLE_SET is the ctrl command that produces the longer out buffer.
+ * In buffer is always 1 byte, so it should fit here
+ */
+ return sizeof(struct virtio_net_ctrl_hdr) +
+ 2 * sizeof(struct virtio_net_ctrl_mac) +
+ MAC_TABLE_ENTRIES * ETH_ALEN;
+}
+
+static size_t vhost_vdpa_net_cvq_cmd_page_len(void)
+{
+ return ROUND_UP(vhost_vdpa_net_cvq_cmd_len(), qemu_real_host_page_size());
+}
+
static bool vhost_vdpa_net_valid_svq_features(uint64_t features, Error **errp)
{
uint64_t invalid_dev_features =
{
VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
- qemu_vfree(s->cvq_cmd_out_buffer);
- qemu_vfree(s->status);
+ /*
+ * If a peer NIC is attached, do not cleanup anything.
+ * Cleanup will happen as a part of qemu_cleanup() -> net_cleanup()
+ * when the guest is shutting down.
+ */
+ if (nc->peer && nc->peer->info->type == NET_CLIENT_DRIVER_NIC) {
+ return;
+ }
+ munmap(s->cvq_cmd_out_buffer, vhost_vdpa_net_cvq_cmd_page_len());
+ munmap(s->status, vhost_vdpa_net_cvq_cmd_page_len());
if (s->vhost_net) {
vhost_net_cleanup(s->vhost_net);
g_free(s->vhost_net);
.check_peer_type = vhost_vdpa_check_peer_type,
};
-static int64_t vhost_vdpa_get_vring_group(int device_fd, unsigned vq_index)
+static int64_t vhost_vdpa_get_vring_group(int device_fd, unsigned vq_index,
+ Error **errp)
{
struct vhost_vring_state state = {
.index = vq_index,
int r = ioctl(device_fd, VHOST_VDPA_GET_VRING_GROUP, &state);
if (unlikely(r < 0)) {
- error_report("Cannot get VQ %u group: %s", vq_index,
- g_strerror(errno));
+ r = -errno;
+ error_setg_errno(errp, errno, "Cannot get VQ %u group", vq_index);
return r;
}
vhost_iova_tree_remove(tree, *map);
}
-static size_t vhost_vdpa_net_cvq_cmd_len(void)
-{
- /*
- * MAC_TABLE_SET is the ctrl command that produces the longer out buffer.
- * In buffer is always 1 byte, so it should fit here
- */
- return sizeof(struct virtio_net_ctrl_hdr) +
- 2 * sizeof(struct virtio_net_ctrl_mac) +
- MAC_TABLE_ENTRIES * ETH_ALEN;
-}
-
-static size_t vhost_vdpa_net_cvq_cmd_page_len(void)
-{
- return ROUND_UP(vhost_vdpa_net_cvq_cmd_len(), qemu_real_host_page_size());
-}
-
/** Map CVQ buffer. */
static int vhost_vdpa_cvq_map_buf(struct vhost_vdpa *v, void *buf, size_t size,
bool write)
{
VhostVDPAState *s, *s0;
struct vhost_vdpa *v;
- uint64_t backend_features;
int64_t cvq_group;
- int cvq_index, r;
+ int r;
+ Error *err = NULL;
assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
/*
* If we early return in these cases SVQ will not be enabled. The migration
* will be blocked as long as vhost-vdpa backends will not offer _F_LOG.
- *
- * Calling VHOST_GET_BACKEND_FEATURES as they are not available in v->dev
- * yet.
*/
- r = ioctl(v->device_fd, VHOST_GET_BACKEND_FEATURES, &backend_features);
- if (unlikely(r < 0)) {
- error_report("Cannot get vdpa backend_features: %s(%d)",
- g_strerror(errno), errno);
- return -1;
+ if (!vhost_vdpa_net_valid_svq_features(v->dev->features, NULL)) {
+ return 0;
}
- if (!(backend_features & BIT_ULL(VHOST_BACKEND_F_IOTLB_ASID)) ||
- !vhost_vdpa_net_valid_svq_features(v->dev->features, NULL)) {
+
+ if (!s->cvq_isolated) {
return 0;
}
- /*
- * Check if all the virtqueues of the virtio device are in a different vq
- * than the last vq. VQ group of last group passed in cvq_group.
- */
- cvq_index = v->dev->vq_index_end - 1;
- cvq_group = vhost_vdpa_get_vring_group(v->device_fd, cvq_index);
+ cvq_group = vhost_vdpa_get_vring_group(v->device_fd,
+ v->dev->vq_index_end - 1,
+ &err);
if (unlikely(cvq_group < 0)) {
+ error_report_err(err);
return cvq_group;
}
- for (int i = 0; i < cvq_index; ++i) {
- int64_t group = vhost_vdpa_get_vring_group(v->device_fd, i);
-
- if (unlikely(group < 0)) {
- return group;
- }
-
- if (group == cvq_group) {
- return 0;
- }
- }
r = vhost_vdpa_set_address_space_id(v, cvq_group, VHOST_VDPA_NET_CVQ_ASID);
if (unlikely(r < 0)) {
static int vhost_vdpa_net_load_mac(VhostVDPAState *s, const VirtIONet *n)
{
- uint64_t features = n->parent_obj.guest_features;
- if (features & BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR)) {
+ if (virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
ssize_t dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_MAC,
VIRTIO_NET_CTRL_MAC_ADDR_SET,
n->mac, sizeof(n->mac));
const VirtIONet *n)
{
struct virtio_net_ctrl_mq mq;
- uint64_t features = n->parent_obj.guest_features;
ssize_t dev_written;
- if (!(features & BIT_ULL(VIRTIO_NET_F_MQ))) {
+ if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_MQ)) {
return 0;
}
return *s->status != VIRTIO_NET_OK;
}
+static int vhost_vdpa_net_load_offloads(VhostVDPAState *s,
+ const VirtIONet *n)
+{
+ uint64_t offloads;
+ ssize_t dev_written;
+
+ if (!virtio_vdev_has_feature(&n->parent_obj,
+ VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) {
+ return 0;
+ }
+
+ if (n->curr_guest_offloads == virtio_net_supported_guest_offloads(n)) {
+ /*
+ * According to VirtIO standard, "Upon feature negotiation
+ * corresponding offload gets enabled to preserve
+ * backward compatibility.".
+ *
+ * Therefore, there is no need to send this CVQ command if the
+ * driver also enables all supported offloads, which aligns with
+ * the device's defaults.
+ *
+ * Note that the device's defaults can mismatch the driver's
+ * configuration only at live migration.
+ */
+ return 0;
+ }
+
+ offloads = cpu_to_le64(n->curr_guest_offloads);
+ dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_GUEST_OFFLOADS,
+ VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET,
+ &offloads, sizeof(offloads));
+ if (unlikely(dev_written < 0)) {
+ return dev_written;
+ }
+
+ return *s->status != VIRTIO_NET_OK;
+}
+
static int vhost_vdpa_net_load(NetClientState *nc)
{
VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
if (unlikely(r)) {
return r;
}
+ r = vhost_vdpa_net_load_offloads(s, n);
+ if (unlikely(r)) {
+ return r;
+ }
return 0;
}
}
if (*s->status != VIRTIO_NET_OK) {
- return VIRTIO_NET_ERR;
+ goto out;
}
status = VIRTIO_NET_ERR;
.avail_handler = vhost_vdpa_net_handle_ctrl_avail,
};
+/**
+ * Probe if CVQ is isolated
+ *
+ * @device_fd The vdpa device fd
+ * @features Features offered by the device.
+ * @cvq_index The control vq pair index
+ *
+ * Returns <0 in case of failure, 0 if false and 1 if true.
+ */
+static int vhost_vdpa_probe_cvq_isolation(int device_fd, uint64_t features,
+ int cvq_index, Error **errp)
+{
+ uint64_t backend_features;
+ int64_t cvq_group;
+ uint8_t status = VIRTIO_CONFIG_S_ACKNOWLEDGE |
+ VIRTIO_CONFIG_S_DRIVER |
+ VIRTIO_CONFIG_S_FEATURES_OK;
+ int r;
+
+ ERRP_GUARD();
+
+ r = ioctl(device_fd, VHOST_GET_BACKEND_FEATURES, &backend_features);
+ if (unlikely(r < 0)) {
+ error_setg_errno(errp, errno, "Cannot get vdpa backend_features");
+ return r;
+ }
+
+ if (!(backend_features & BIT_ULL(VHOST_BACKEND_F_IOTLB_ASID))) {
+ return 0;
+ }
+
+ r = ioctl(device_fd, VHOST_SET_FEATURES, &features);
+ if (unlikely(r)) {
+ error_setg_errno(errp, errno, "Cannot set features");
+ }
+
+ r = ioctl(device_fd, VHOST_VDPA_SET_STATUS, &status);
+ if (unlikely(r)) {
+ error_setg_errno(errp, -r, "Cannot set device features");
+ goto out;
+ }
+
+ cvq_group = vhost_vdpa_get_vring_group(device_fd, cvq_index, errp);
+ if (unlikely(cvq_group < 0)) {
+ if (cvq_group != -ENOTSUP) {
+ r = cvq_group;
+ goto out;
+ }
+
+ /*
+ * The kernel report VHOST_BACKEND_F_IOTLB_ASID if the vdpa frontend
+ * support ASID even if the parent driver does not. The CVQ cannot be
+ * isolated in this case.
+ */
+ error_free(*errp);
+ *errp = NULL;
+ r = 0;
+ goto out;
+ }
+
+ for (int i = 0; i < cvq_index; ++i) {
+ int64_t group = vhost_vdpa_get_vring_group(device_fd, i, errp);
+ if (unlikely(group < 0)) {
+ r = group;
+ goto out;
+ }
+
+ if (group == (int64_t)cvq_group) {
+ r = 0;
+ goto out;
+ }
+ }
+
+ r = 1;
+
+out:
+ status = 0;
+ ioctl(device_fd, VHOST_VDPA_SET_STATUS, &status);
+ return r;
+}
+
static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
const char *device,
const char *name,
bool is_datapath,
bool svq,
struct vhost_vdpa_iova_range iova_range,
- uint64_t features)
+ uint64_t features,
+ Error **errp)
{
NetClientState *nc = NULL;
VhostVDPAState *s;
int ret = 0;
assert(name);
+ int cvq_isolated;
+
if (is_datapath) {
nc = qemu_new_net_client(&net_vhost_vdpa_info, peer, device,
name);
} else {
+ cvq_isolated = vhost_vdpa_probe_cvq_isolation(vdpa_device_fd, features,
+ queue_pair_index * 2,
+ errp);
+ if (unlikely(cvq_isolated < 0)) {
+ return NULL;
+ }
+
nc = qemu_new_net_control_client(&net_vhost_vdpa_cvq_info, peer,
device, name);
}
vhost_vdpa_net_valid_svq_features(features,
&s->vhost_vdpa.migration_blocker);
} else if (!is_datapath) {
- s->cvq_cmd_out_buffer = qemu_memalign(qemu_real_host_page_size(),
- vhost_vdpa_net_cvq_cmd_page_len());
- memset(s->cvq_cmd_out_buffer, 0, vhost_vdpa_net_cvq_cmd_page_len());
- s->status = qemu_memalign(qemu_real_host_page_size(),
- vhost_vdpa_net_cvq_cmd_page_len());
- memset(s->status, 0, vhost_vdpa_net_cvq_cmd_page_len());
+ s->cvq_cmd_out_buffer = mmap(NULL, vhost_vdpa_net_cvq_cmd_page_len(),
+ PROT_READ | PROT_WRITE,
+ MAP_SHARED | MAP_ANONYMOUS, -1, 0);
+ s->status = mmap(NULL, vhost_vdpa_net_cvq_cmd_page_len(),
+ PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS,
+ -1, 0);
s->vhost_vdpa.shadow_vq_ops = &vhost_vdpa_net_svq_ops;
s->vhost_vdpa.shadow_vq_ops_opaque = s;
+ s->cvq_isolated = cvq_isolated;
/*
- * TODO: We cannot migrate devices with CVQ as there is no way to set
- * the device state (MAC, MQ, etc) before starting the datapath.
+ * TODO: We cannot migrate devices with CVQ and no x-svq enabled as
+ * there is no way to set the device state (MAC, MQ, etc) before
+ * starting the datapath.
*
* Migration blocker ownership now belongs to s->vhost_vdpa.
*/
- error_setg(&s->vhost_vdpa.migration_blocker,
- "net vdpa cannot migrate with CVQ feature");
+ if (!svq) {
+ error_setg(&s->vhost_vdpa.migration_blocker,
+ "net vdpa cannot migrate with CVQ feature");
+ }
}
ret = vhost_vdpa_add(nc, (void *)&s->vhost_vdpa, queue_pair_index, nvqs);
if (ret) {
for (i = 0; i < queue_pairs; i++) {
ncs[i] = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
vdpa_device_fd, i, 2, true, opts->x_svq,
- iova_range, features);
+ iova_range, features, errp);
if (!ncs[i])
goto err;
}
if (has_cvq) {
nc = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
vdpa_device_fd, i, 1, false,
- opts->x_svq, iova_range, features);
+ opts->x_svq, iova_range, features, errp);
if (!nc)
goto err;
}
# = CXL devices
##
+##
+# @CxlEventLog:
+#
+# CXL has a number of separate event logs for different types of
+# events. Each such event log is handled and signaled independently.
+#
+# @informational: Information Event Log
+#
+# @warning: Warning Event Log
+#
+# @failure: Failure Event Log
+#
+# @fatal: Fatal Event Log
+#
+# Since: 8.1
+##
+{ 'enum': 'CxlEventLog',
+ 'data': ['informational',
+ 'warning',
+ 'failure',
+ 'fatal']
+ }
+
+##
+# @cxl-inject-general-media-event:
+#
+# Inject an event record for a General Media Event (CXL r3.0
+# 8.2.9.2.1.1). This event type is reported via one of the event logs
+# specified via the log parameter.
+#
+# @path: CXL type 3 device canonical QOM path
+#
+# @log: event log to add the event to
+#
+# @flags: Event Record Flags. See CXL r3.0 Table 8-42 Common Event
+# Record Format, Event Record Flags for subfield definitions.
+#
+# @dpa: Device Physical Address (relative to @path device). Note
+# lower bits include some flags. See CXL r3.0 Table 8-43 General
+# Media Event Record, Physical Address.
+#
+# @descriptor: Memory Event Descriptor with additional memory event
+# information. See CXL r3.0 Table 8-43 General Media Event
+# Record, Memory Event Descriptor for bit definitions.
+#
+# @type: Type of memory event that occurred. See CXL r3.0 Table 8-43
+# General Media Event Record, Memory Event Type for possible
+# values.
+#
+# @transaction-type: Type of first transaction that caused the event
+# to occur. See CXL r3.0 Table 8-43 General Media Event Record,
+# Transaction Type for possible values.
+#
+# @channel: The channel of the memory event location. A channel is an
+# interface that can be independently accessed for a transaction.
+#
+# @rank: The rank of the memory event location. A rank is a set of
+# memory devices on a channel that together execute a transaction.
+#
+# @device: Bitmask that represents all devices in the rank associated
+# with the memory event location.
+#
+# @component-id: Device specific component identifier for the event.
+# May describe a field replaceable sub-component of the device.
+#
+# Since: 8.1
+##
+{ 'command': 'cxl-inject-general-media-event',
+ 'data': { 'path': 'str', 'log': 'CxlEventLog', 'flags': 'uint8',
+ 'dpa': 'uint64', 'descriptor': 'uint8',
+ 'type': 'uint8', 'transaction-type': 'uint8',
+ '*channel': 'uint8', '*rank': 'uint8',
+ '*device': 'uint32', '*component-id': 'str' } }
+
+##
+# @cxl-inject-dram-event:
+#
+# Inject an event record for a DRAM Event (CXL r3.0 8.2.9.2.1.2).
+# This event type is reported via one of the event logs specified via
+# the log parameter.
+#
+# @path: CXL type 3 device canonical QOM path
+#
+# @log: Event log to add the event to
+#
+# @flags: Event Record Flags. See CXL r3.0 Table 8-42 Common Event
+# Record Format, Event Record Flags for subfield definitions.
+#
+# @dpa: Device Physical Address (relative to @path device). Note
+# lower bits include some flags. See CXL r3.0 Table 8-44 DRAM
+# Event Record, Physical Address.
+#
+# @descriptor: Memory Event Descriptor with additional memory event
+# information. See CXL r3.0 Table 8-44 DRAM Event Record, Memory
+# Event Descriptor for bit definitions.
+#
+# @type: Type of memory event that occurred. See CXL r3.0 Table 8-44
+# DRAM Event Record, Memory Event Type for possible values.
+#
+# @transaction-type: Type of first transaction that caused the event
+# to occur. See CXL r3.0 Table 8-44 DRAM Event Record,
+# Transaction Type for possible values.
+#
+# @channel: The channel of the memory event location. A channel is an
+# interface that can be independently accessed for a transaction.
+#
+# @rank: The rank of the memory event location. A rank is a set of
+# memory devices on a channel that together execute a transaction.
+#
+# @nibble-mask: Identifies one or more nibbles that the error affects
+#
+# @bank-group: Bank group of the memory event location, incorporating
+# a number of Banks.
+#
+# @bank: Bank of the memory event location. A single bank is accessed
+# per read or write of the memory.
+#
+# @row: Row address within the DRAM.
+#
+# @column: Column address within the DRAM.
+#
+# @correction-mask: Bits within each nibble. Used in order of bits
+# set in the nibble-mask. Up to 4 nibbles may be covered.
+#
+# Since: 8.1
+##
+{ 'command': 'cxl-inject-dram-event',
+ 'data': { 'path': 'str', 'log': 'CxlEventLog', 'flags': 'uint8',
+ 'dpa': 'uint64', 'descriptor': 'uint8',
+ 'type': 'uint8', 'transaction-type': 'uint8',
+ '*channel': 'uint8', '*rank': 'uint8', '*nibble-mask': 'uint32',
+ '*bank-group': 'uint8', '*bank': 'uint8', '*row': 'uint32',
+ '*column': 'uint16', '*correction-mask': [ 'uint64' ]
+ }}
+
+##
+# @cxl-inject-memory-module-event:
+#
+# Inject an event record for a Memory Module Event (CXL r3.0
+# 8.2.9.2.1.3). This event includes a copy of the Device Health
+# info at the time of the event.
+#
+# @path: CXL type 3 device canonical QOM path
+#
+# @log: Event Log to add the event to
+#
+# @flags: Event Record Flags. See CXL r3.0 Table 8-42 Common Event
+# Record Format, Event Record Flags for subfield definitions.
+#
+# @type: Device Event Type. See CXL r3.0 Table 8-45 Memory Module
+# Event Record for bit definitions for bit definiions.
+#
+# @health-status: Overall health summary bitmap. See CXL r3.0 Table
+# 8-100 Get Health Info Output Payload, Health Status for bit
+# definitions.
+#
+# @media-status: Overall media health summary. See CXL r3.0 Table
+# 8-100 Get Health Info Output Payload, Media Status for bit
+# definitions.
+#
+# @additional-status: See CXL r3.0 Table 8-100 Get Health Info Output
+# Payload, Additional Status for subfield definitions.
+#
+# @life-used: Percentage (0-100) of factory expected life span.
+#
+# @temperature: Device temperature in degrees Celsius.
+#
+# @dirty-shutdown-count: Number of times the device has been unable
+# to determine whether data loss may have occurred.
+#
+# @corrected-volatile-error-count: Total number of correctable errors
+# in volatile memory.
+#
+# @corrected-persistent-error-count: Total number of correctable
+# errors in persistent memory
+#
+# Since: 8.1
+##
+{ 'command': 'cxl-inject-memory-module-event',
+ 'data': { 'path': 'str', 'log': 'CxlEventLog', 'flags' : 'uint8',
+ 'type': 'uint8', 'health-status': 'uint8',
+ 'media-status': 'uint8', 'additional-status': 'uint8',
+ 'life-used': 'uint8', 'temperature' : 'int16',
+ 'dirty-shutdown-count': 'uint32',
+ 'corrected-volatile-error-count': 'uint32',
+ 'corrected-persistent-error-count': 'uint32'
+ }}
+
+##
+# @cxl-inject-poison:
+#
+# Poison records indicate that a CXL memory device knows that a
+# particular memory region may be corrupted. This may be because of
+# locally detected errors (e.g. ECC failure) or poisoned writes
+# received from other components in the system. This injection
+# mechanism enables testing of the OS handling of poison records which
+# may be queried via the CXL mailbox.
+#
+# @path: CXL type 3 device canonical QOM path
+#
+# @start: Start address; must be 64 byte aligned.
+#
+# @length: Length of poison to inject; must be a multiple of 64 bytes.
+#
+# Since: 8.1
+##
+{ 'command': 'cxl-inject-poison',
+ 'data': { 'path': 'str', 'start': 'uint64', 'length': 'size' }}
+
##
# @CxlUncorErrorType:
#
'if': 'CONFIG_TCG',
'features': [ 'unstable' ] }
-##
-# @x-query-profile:
-#
-# Query TCG profiling information
-#
-# Features:
-#
-# @unstable: This command is meant for debugging.
-#
-# Returns: profile information
-#
-# Since: 6.2
-##
-{ 'command': 'x-query-profile',
- 'returns': 'HumanReadableText',
- 'if': 'CONFIG_TCG',
- 'features': [ 'unstable' ] }
-
##
# @x-query-ramblock:
#
{ 'name': 'none' },
{ 'name': 'gtk', 'if': 'CONFIG_GTK' },
{ 'name': 'sdl', 'if': 'CONFIG_SDL' },
- { 'name': 'egl-headless',
- 'if': { 'all': ['CONFIG_OPENGL', 'CONFIG_GBM'] } },
+ { 'name': 'egl-headless', 'if': 'CONFIG_OPENGL' },
{ 'name': 'curses', 'if': 'CONFIG_CURSES' },
{ 'name': 'cocoa', 'if': 'CONFIG_COCOA' },
{ 'name': 'spice-app', 'if': 'CONFIG_SPICE' },
'cocoa': { 'type': 'DisplayCocoa', 'if': 'CONFIG_COCOA' },
'curses': { 'type': 'DisplayCurses', 'if': 'CONFIG_CURSES' },
'egl-headless': { 'type': 'DisplayEGLHeadless',
- 'if': { 'all': ['CONFIG_OPENGL', 'CONFIG_GBM'] } },
+ 'if': 'CONFIG_OPENGL' },
'dbus': { 'type': 'DisplayDBus', 'if': 'CONFIG_DBUS_DISPLAY' },
'sdl': { 'type': 'DisplaySDL', 'if': 'CONFIG_SDL' }
}
shift
maybe_modules="$@"
-# if not running in a git checkout, do nothing
-test "$command" = "ignore" && exit 0
-
+test -z "$maybe_modules" && exit 0
test -z "$GIT" && GIT=$(command -v git)
cd "$(dirname "$0")/.."
+no_git_error=
+if ! test -e ".git"; then
+ no_git_error='no git checkout exists'
+elif test -z "$GIT"; then
+ no_git_error='git binary not found'
+fi
+
+is_git() {
+ test -z "$no_git_error"
+}
+
update_error() {
echo "$0: $*"
echo
}
validate_error() {
- if test "$1" = "validate"; then
+ if is_git && test "$1" = "validate"; then
echo "GIT submodules checkout is out of date, and submodules"
echo "configured for validate only. Please run"
echo " scripts/git-submodule.sh update $maybe_modules"
test "$CURSTATUS" = "$OLDSTATUS"
}
-if test -n "$maybe_modules" && ! test -e ".git"
-then
- echo "$0: unexpectedly called with submodules but no git checkout exists"
- exit 1
-fi
-
-if test -n "$maybe_modules" && test -z "$GIT"
-then
- echo "$0: unexpectedly called with submodules but git binary not found"
- exit 1
+if is_git; then
+ test -e $substat || touch $substat
+ modules=""
+ for m in $maybe_modules
+ do
+ $GIT submodule status $m 1> /dev/null 2>&1
+ if test $? = 0
+ then
+ modules="$modules $m"
+ grep $m $substat > /dev/null 2>&1 || $GIT submodule status $module >> $substat
+ else
+ echo "warn: ignoring non-existent submodule $m"
+ fi
+ done
+else
+ modules=$maybe_modules
fi
-modules=""
-for m in $maybe_modules
-do
- $GIT submodule status $m 1> /dev/null 2>&1
- if test $? = 0
- then
- modules="$modules $m"
- else
- echo "warn: ignoring non-existent submodule $m"
- fi
-done
-
case "$command" in
status|validate)
- test -f "$substat" || validate_error "$command"
- test -z "$maybe_modules" && exit 0
for module in $modules; do
- check_updated $module || validate_error "$command"
+ if is_git; then
+ check_updated $module || validate_error "$command"
+ elif ! (set xyz "$module"/* && test -e "$2"); then
+ # The directory does not exist or it contains no files
+ echo "$0: sources not available for $module and $no_git_error"
+ validate_error "$command"
+ fi
done
- exit 0
;;
+
update)
- test -e $substat || touch $substat
- test -z "$maybe_modules" && exit 0
+ is_git || {
+ echo "$0: unexpectedly called with submodules but $no_git_error"
+ exit 1
+ }
$GIT submodule update --init $modules 1>/dev/null
test $? -ne 0 && update_error "failed to update modules"
printf "%s\n" ' jemalloc/system/tcmalloc)'
printf "%s\n" ' --enable-module-upgrades try to load modules from alternate paths for'
printf "%s\n" ' upgrades'
- printf "%s\n" ' --enable-profiler profiler support'
printf "%s\n" ' --enable-rng-none dummy RNG, avoid using /dev/(u)random and'
printf "%s\n" ' getrandom()'
printf "%s\n" ' --enable-safe-stack SafeStack Stack Smash Protection (requires'
--with-pkgversion=*) quote_sh "-Dpkgversion=$2" ;;
--enable-png) printf "%s" -Dpng=enabled ;;
--disable-png) printf "%s" -Dpng=disabled ;;
- --enable-profiler) printf "%s" -Dprofiler=true ;;
- --disable-profiler) printf "%s" -Dprofiler=false ;;
--enable-pvrdma) printf "%s" -Dpvrdma=enabled ;;
--disable-pvrdma) printf "%s" -Dpvrdma=disabled ;;
--enable-qcow1) printf "%s" -Dqcow1=enabled ;;
if stap.found()
install_data('qemu-trace-stap', install_dir: get_option('bindir'))
endif
+
+test('xml-preprocess', files('xml-preprocess-test.py'), suite: ['unit'])
--- /dev/null
+#!/usr/bin/env python3
+#
+# Copyright (c) 2023 Red Hat, Inc.
+#
+# SPDX-License-Identifier: MIT
+"""Unit tests for xml-preprocess"""
+
+import contextlib
+import importlib
+import os
+import platform
+import subprocess
+import tempfile
+import unittest
+from io import StringIO
+
+xmlpp = importlib.import_module("xml-preprocess")
+
+
+class TestXmlPreprocess(unittest.TestCase):
+ """Tests for xml-preprocess.Preprocessor"""
+
+ def test_preprocess_xml(self):
+ with tempfile.NamedTemporaryFile(mode="w", delete=False) as temp_file:
+ temp_file.write("<root></root>")
+ temp_file_name = temp_file.name
+ result = xmlpp.preprocess_xml(temp_file_name)
+ self.assertEqual(result, "<root></root>")
+ os.remove(temp_file_name)
+
+ def test_save_xml(self):
+ with tempfile.NamedTemporaryFile(mode="w", delete=False) as temp_file:
+ temp_file_name = temp_file.name
+ xmlpp.save_xml("<root></root>", temp_file_name)
+ self.assertTrue(os.path.isfile(temp_file_name))
+ os.remove(temp_file_name)
+
+ def test_include(self):
+ with tempfile.NamedTemporaryFile(mode="w", delete=False) as inc_file:
+ inc_file.write("<included>Content from included file</included>")
+ inc_file_name = inc_file.name
+ xml_str = f"<?include {inc_file_name} ?>"
+ expected = "<included>Content from included file</included>"
+ xpp = xmlpp.Preprocessor()
+ result = xpp.preprocess(xml_str)
+ self.assertEqual(result, expected)
+ os.remove(inc_file_name)
+ self.assertRaises(FileNotFoundError, xpp.preprocess, xml_str)
+
+ def test_envvar(self):
+ os.environ["TEST_ENV_VAR"] = "TestValue"
+ xml_str = "<root>$(env.TEST_ENV_VAR)</root>"
+ expected = "<root>TestValue</root>"
+ xpp = xmlpp.Preprocessor()
+ result = xpp.preprocess(xml_str)
+ self.assertEqual(result, expected)
+ self.assertRaises(KeyError, xpp.preprocess, "$(env.UNKNOWN)")
+
+ def test_sys_var(self):
+ xml_str = "<root>$(sys.ARCH)</root>"
+ expected = f"<root>{platform.architecture()[0]}</root>"
+ xpp = xmlpp.Preprocessor()
+ result = xpp.preprocess(xml_str)
+ self.assertEqual(result, expected)
+ self.assertRaises(KeyError, xpp.preprocess, "$(sys.UNKNOWN)")
+
+ def test_cus_var(self):
+ xml_str = "<root>$(var.USER)</root>"
+ expected = "<root></root>"
+ xpp = xmlpp.Preprocessor()
+ result = xpp.preprocess(xml_str)
+ self.assertEqual(result, expected)
+ xml_str = "<?define USER=FOO?><root>$(var.USER)</root>"
+ expected = "<root>FOO</root>"
+ xpp = xmlpp.Preprocessor()
+ result = xpp.preprocess(xml_str)
+ self.assertEqual(result, expected)
+
+ def test_error_warning(self):
+ xml_str = "<root><?warning \"test warn\"?></root>"
+ expected = "<root></root>"
+ xpp = xmlpp.Preprocessor()
+ out = StringIO()
+ with contextlib.redirect_stdout(out):
+ result = xpp.preprocess(xml_str)
+ self.assertEqual(result, expected)
+ self.assertEqual(out.getvalue(), "[Warning]: test warn\n")
+ self.assertRaises(RuntimeError, xpp.preprocess, "<?error \"test\"?>")
+
+ def test_cmd(self):
+ xpp = xmlpp.Preprocessor()
+ result = xpp.preprocess('<root><?cmd "echo hello world"?></root>')
+ self.assertEqual(result, "<root>hello world</root>")
+ self.assertRaises(
+ subprocess.CalledProcessError,
+ xpp.preprocess, '<?cmd "test-unknown-cmd"?>'
+ )
+
+ def test_foreach(self):
+ xpp = xmlpp.Preprocessor()
+ result = xpp.preprocess(
+ '<root><?foreach x in a;b;c?>$(var.x)<?endforeach?></root>'
+ )
+ self.assertEqual(result, "<root>abc</root>")
+
+ def test_if_elseif(self):
+ xpp = xmlpp.Preprocessor()
+ result = xpp.preprocess('<root><?if True?>ok<?endif?></root>')
+ self.assertEqual(result, "<root>ok</root>")
+ result = xpp.preprocess('<root><?if False?>ok<?endif?></root>')
+ self.assertEqual(result, "<root></root>")
+ result = xpp.preprocess('<root><?if True?>ok<?else?>ko<?endif?></root>')
+ self.assertEqual(result, "<root>ok</root>")
+ result = xpp.preprocess('<root><?if False?>ok<?else?>ko<?endif?></root>')
+ self.assertEqual(result, "<root>ko</root>")
+ result = xpp.preprocess(
+ '<root><?if False?>ok<?elseif True?>ok2<?else?>ko<?endif?></root>'
+ )
+ self.assertEqual(result, "<root>ok2</root>")
+ result = xpp.preprocess(
+ '<root><?if False?>ok<?elseif False?>ok<?else?>ko<?endif?></root>'
+ )
+ self.assertEqual(result, "<root>ko</root>")
+
+ def test_ifdef(self):
+ xpp = xmlpp.Preprocessor()
+ result = xpp.preprocess('<root><?ifdef USER?>ok<?else?>ko<?endif?></root>')
+ self.assertEqual(result, "<root>ko</root>")
+ result = xpp.preprocess(
+ '<?define USER=FOO?><root><?ifdef USER?>ok<?else?>ko<?endif?></root>'
+ )
+ self.assertEqual(result, "<root>ok</root>")
+
+
+if __name__ == "__main__":
+ unittest.main()
--- /dev/null
+#!/usr/bin/env python3
+#
+# Copyright (c) 2017-2019 Tony Su
+# Copyright (c) 2023 Red Hat, Inc.
+#
+# SPDX-License-Identifier: MIT
+#
+# Adapted from https://github.com/peitaosu/XML-Preprocessor
+#
+"""This is a XML Preprocessor which can be used to process your XML file before
+you use it, to process conditional statements, variables, iteration
+statements, error/warning, execute command, etc.
+
+## XML Schema
+
+### Include Files
+```
+<?include path/to/file ?>
+```
+
+### Variables
+```
+$(env.EnvironmentVariable)
+
+$(sys.SystemVariable)
+
+$(var.CustomVariable)
+```
+
+### Conditional Statements
+```
+<?if ?>
+
+<?ifdef ?>
+
+<?ifndef ?>
+
+<?else?>
+
+<?elseif ?>
+
+<?endif?>
+```
+
+### Iteration Statements
+```
+<?foreach VARNAME in 1;2;3?>
+ $(var.VARNAME)
+<?endforeach?>
+```
+
+### Errors and Warnings
+```
+<?error "This is error message!" ?>
+
+<?warning "This is warning message!" ?>
+```
+
+### Commands
+```
+<? cmd "echo hello world" ?>
+```
+"""
+
+import os
+import platform
+import re
+import subprocess
+import sys
+from typing import Optional
+from xml.dom import minidom
+
+
+class Preprocessor():
+ """This class holds the XML preprocessing state"""
+
+ def __init__(self):
+ self.sys_vars = {
+ "ARCH": platform.architecture()[0],
+ "SOURCE": os.path.abspath(__file__),
+ "CURRENT": os.getcwd(),
+ }
+ self.cus_vars = {}
+
+ def _pp_include(self, xml_str: str) -> str:
+ include_regex = r"(<\?include([\w\s\\/.:_-]+)\s*\?>)"
+ matches = re.findall(include_regex, xml_str)
+ for group_inc, group_xml in matches:
+ inc_file_path = group_xml.strip()
+ with open(inc_file_path, "r", encoding="utf-8") as inc_file:
+ inc_file_content = inc_file.read()
+ xml_str = xml_str.replace(group_inc, inc_file_content)
+ return xml_str
+
+ def _pp_env_var(self, xml_str: str) -> str:
+ envvar_regex = r"(\$\(env\.(\w+)\))"
+ matches = re.findall(envvar_regex, xml_str)
+ for group_env, group_var in matches:
+ xml_str = xml_str.replace(group_env, os.environ[group_var])
+ return xml_str
+
+ def _pp_sys_var(self, xml_str: str) -> str:
+ sysvar_regex = r"(\$\(sys\.(\w+)\))"
+ matches = re.findall(sysvar_regex, xml_str)
+ for group_sys, group_var in matches:
+ xml_str = xml_str.replace(group_sys, self.sys_vars[group_var])
+ return xml_str
+
+ def _pp_cus_var(self, xml_str: str) -> str:
+ define_regex = r"(<\?define\s*(\w+)\s*=\s*([\w\s\"]+)\s*\?>)"
+ matches = re.findall(define_regex, xml_str)
+ for group_def, group_name, group_var in matches:
+ group_name = group_name.strip()
+ group_var = group_var.strip().strip("\"")
+ self.cus_vars[group_name] = group_var
+ xml_str = xml_str.replace(group_def, "")
+ cusvar_regex = r"(\$\(var\.(\w+)\))"
+ matches = re.findall(cusvar_regex, xml_str)
+ for group_cus, group_var in matches:
+ xml_str = xml_str.replace(
+ group_cus,
+ self.cus_vars.get(group_var, "")
+ )
+ return xml_str
+
+ def _pp_foreach(self, xml_str: str) -> str:
+ foreach_regex = r"(<\?foreach\s+(\w+)\s+in\s+([\w;]+)\s*\?>(.*)<\?endforeach\?>)"
+ matches = re.findall(foreach_regex, xml_str)
+ for group_for, group_name, group_vars, group_text in matches:
+ group_texts = ""
+ for var in group_vars.split(";"):
+ self.cus_vars[group_name] = var
+ group_texts += self._pp_cus_var(group_text)
+ xml_str = xml_str.replace(group_for, group_texts)
+ return xml_str
+
+ def _pp_error_warning(self, xml_str: str) -> str:
+ error_regex = r"<\?error\s*\"([^\"]+)\"\s*\?>"
+ matches = re.findall(error_regex, xml_str)
+ for group_var in matches:
+ raise RuntimeError("[Error]: " + group_var)
+ warning_regex = r"(<\?warning\s*\"([^\"]+)\"\s*\?>)"
+ matches = re.findall(warning_regex, xml_str)
+ for group_wrn, group_var in matches:
+ print("[Warning]: " + group_var)
+ xml_str = xml_str.replace(group_wrn, "")
+ return xml_str
+
+ def _pp_if_eval(self, xml_str: str) -> str:
+ ifelif_regex = (
+ r"(<\?(if|elseif)\s*([^\"\s=<>!]+)\s*([!=<>]+)\s*\"*([^\"=<>!]+)\"*\s*\?>)"
+ )
+ matches = re.findall(ifelif_regex, xml_str)
+ for ifelif, tag, left, operator, right in matches:
+ if "<" in operator or ">" in operator:
+ result = eval(f"{left} {operator} {right}")
+ else:
+ result = eval(f'"{left}" {operator} "{right}"')
+ xml_str = xml_str.replace(ifelif, f"<?{tag} {result}?>")
+ return xml_str
+
+ def _pp_ifdef_ifndef(self, xml_str: str) -> str:
+ ifndef_regex = r"(<\?(ifdef|ifndef)\s*([\w]+)\s*\?>)"
+ matches = re.findall(ifndef_regex, xml_str)
+ for group_ifndef, group_tag, group_var in matches:
+ if group_tag == "ifdef":
+ result = group_var in self.cus_vars
+ else:
+ result = group_var not in self.cus_vars
+ xml_str = xml_str.replace(group_ifndef, f"<?if {result}?>")
+ return xml_str
+
+ def _pp_if_elseif(self, xml_str: str) -> str:
+ if_elif_else_regex = (
+ r"(<\?if\s(True|False)\?>"
+ r"(.*?)"
+ r"<\?elseif\s(True|False)\?>"
+ r"(.*?)"
+ r"<\?else\?>"
+ r"(.*?)"
+ r"<\?endif\?>)"
+ )
+ if_else_regex = (
+ r"(<\?if\s(True|False)\?>"
+ r"(.*?)"
+ r"<\?else\?>"
+ r"(.*?)"
+ r"<\?endif\?>)"
+ )
+ if_regex = r"(<\?if\s(True|False)\?>(.*?)<\?endif\?>)"
+ matches = re.findall(if_elif_else_regex, xml_str, re.DOTALL)
+ for (group_full, group_if, group_if_elif, group_elif,
+ group_elif_else, group_else) in matches:
+ result = ""
+ if group_if == "True":
+ result = group_if_elif
+ elif group_elif == "True":
+ result = group_elif_else
+ else:
+ result = group_else
+ xml_str = xml_str.replace(group_full, result)
+ matches = re.findall(if_else_regex, xml_str, re.DOTALL)
+ for group_full, group_if, group_if_else, group_else in matches:
+ result = ""
+ if group_if == "True":
+ result = group_if_else
+ else:
+ result = group_else
+ xml_str = xml_str.replace(group_full, result)
+ matches = re.findall(if_regex, xml_str, re.DOTALL)
+ for group_full, group_if, group_text in matches:
+ result = ""
+ if group_if == "True":
+ result = group_text
+ xml_str = xml_str.replace(group_full, result)
+ return xml_str
+
+ def _pp_command(self, xml_str: str) -> str:
+ cmd_regex = r"(<\?cmd\s*\"([^\"]+)\"\s*\?>)"
+ matches = re.findall(cmd_regex, xml_str)
+ for group_cmd, group_exec in matches:
+ output = subprocess.check_output(
+ group_exec, shell=True,
+ text=True, stderr=subprocess.STDOUT
+ )
+ xml_str = xml_str.replace(group_cmd, output)
+ return xml_str
+
+ def _pp_blanks(self, xml_str: str) -> str:
+ right_blank_regex = r">[\n\s\t\r]*"
+ left_blank_regex = r"[\n\s\t\r]*<"
+ xml_str = re.sub(right_blank_regex, ">", xml_str)
+ xml_str = re.sub(left_blank_regex, "<", xml_str)
+ return xml_str
+
+ def preprocess(self, xml_str: str) -> str:
+ fns = [
+ self._pp_blanks,
+ self._pp_include,
+ self._pp_foreach,
+ self._pp_env_var,
+ self._pp_sys_var,
+ self._pp_cus_var,
+ self._pp_if_eval,
+ self._pp_ifdef_ifndef,
+ self._pp_if_elseif,
+ self._pp_command,
+ self._pp_error_warning,
+ ]
+
+ while True:
+ changed = False
+ for func in fns:
+ out_xml = func(xml_str)
+ if not changed and out_xml != xml_str:
+ changed = True
+ xml_str = out_xml
+ if not changed:
+ break
+
+ return xml_str
+
+
+def preprocess_xml(path: str) -> str:
+ with open(path, "r", encoding="utf-8") as original_file:
+ input_xml = original_file.read()
+
+ proc = Preprocessor()
+ return proc.preprocess(input_xml)
+
+
+def save_xml(xml_str: str, path: Optional[str]):
+ xml = minidom.parseString(xml_str)
+ with open(path, "w", encoding="utf-8") if path else sys.stdout as output_file:
+ output_file.write(xml.toprettyxml())
+
+
+def main():
+ if len(sys.argv) < 2:
+ print("Usage: xml-preprocessor input.xml [output.xml]")
+ sys.exit(1)
+
+ output_file = None
+ if len(sys.argv) == 3:
+ output_file = sys.argv[2]
+
+ input_file = sys.argv[1]
+ output_xml = preprocess_xml(input_file)
+ save_xml(output_xml, output_file)
+
+
+if __name__ == "__main__":
+ main()
return TARGET_PAGE_SIZE;
}
+int qemu_target_page_mask(void)
+{
+ return TARGET_PAGE_MASK;
+}
+
int qemu_target_page_bits(void)
{
return TARGET_PAGE_BITS;
int qemu_main_loop(void)
{
int status = EXIT_SUCCESS;
-#ifdef CONFIG_PROFILER
- int64_t ti;
-#endif
while (!main_loop_should_exit(&status)) {
-#ifdef CONFIG_PROFILER
- ti = profile_getclock();
-#endif
main_loop_wait(false);
-#ifdef CONFIG_PROFILER
- dev_time += profile_getclock() - ti;
-#endif
}
return status;
}
/*
- * Processes a reply on the slave channel.
- * Entered with slave_mutex held and releases it before exit.
+ * Processes a reply on the backend channel.
+ * Entered with backend_mutex held and releases it before exit.
* Returns true on success.
*/
static bool
goto out;
}
- if (!vu_message_read_default(dev, dev->slave_fd, &msg_reply)) {
+ if (!vu_message_read_default(dev, dev->backend_fd, &msg_reply)) {
goto out;
}
result = msg_reply.payload.u64 == 0;
out:
- pthread_mutex_unlock(&dev->slave_mutex);
+ pthread_mutex_unlock(&dev->backend_mutex);
return result;
}
return false;
}
- pthread_mutex_lock(&dev->slave_mutex);
- if (!vu_message_write(dev, dev->slave_fd, &vmsg)) {
- pthread_mutex_unlock(&dev->slave_mutex);
+ pthread_mutex_lock(&dev->backend_mutex);
+ if (!vu_message_write(dev, dev->backend_fd, &vmsg)) {
+ pthread_mutex_unlock(&dev->backend_mutex);
return false;
}
- /* Also unlocks the slave_mutex */
+ /* Also unlocks the backend_mutex */
return vu_process_message_reply(dev, &vmsg);
}
* a device implementation can return it in its callback
* (get_protocol_features) if it wants to use this for
* simulation, but it is otherwise not desirable (if even
- * implemented by the master.)
+ * implemented by the frontend.)
*/
uint64_t features = 1ULL << VHOST_USER_PROTOCOL_F_MQ |
1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD |
* of the other features are required.
* Theoretically, one could use only kick messages, or do them without
* having F_REPLY_ACK, but too many (possibly pending) messages on the
- * socket will eventually cause the master to hang, to avoid this in
+ * socket will eventually cause the frontend to hang, to avoid this in
* scenarios where not desired enforce that the settings are in a way
* that actually enables the simulation case.
*/
}
static bool
-vu_set_slave_req_fd(VuDev *dev, VhostUserMsg *vmsg)
+vu_set_backend_req_fd(VuDev *dev, VhostUserMsg *vmsg)
{
if (vmsg->fd_num != 1) {
- vu_panic(dev, "Invalid slave_req_fd message (%d fd's)", vmsg->fd_num);
+ vu_panic(dev, "Invalid backend_req_fd message (%d fd's)", vmsg->fd_num);
return false;
}
- if (dev->slave_fd != -1) {
- close(dev->slave_fd);
+ if (dev->backend_fd != -1) {
+ close(dev->backend_fd);
}
- dev->slave_fd = vmsg->fds[0];
- DPRINT("Got slave_fd: %d\n", vmsg->fds[0]);
+ dev->backend_fd = vmsg->fds[0];
+ DPRINT("Got backend_fd: %d\n", vmsg->fds[0]);
return false;
}
}
if (ret) {
- /* resize to zero to indicate an error to master */
+ /* resize to zero to indicate an error to frontend */
vmsg->size = 0;
}
case VHOST_USER_SET_VRING_ENABLE:
return vu_set_vring_enable_exec(dev, vmsg);
case VHOST_USER_SET_BACKEND_REQ_FD:
- return vu_set_slave_req_fd(dev, vmsg);
+ return vu_set_backend_req_fd(dev, vmsg);
case VHOST_USER_GET_CONFIG:
return vu_get_config(dev, vmsg);
case VHOST_USER_SET_CONFIG:
}
vu_close_log(dev);
- if (dev->slave_fd != -1) {
- close(dev->slave_fd);
- dev->slave_fd = -1;
+ if (dev->backend_fd != -1) {
+ close(dev->backend_fd);
+ dev->backend_fd = -1;
}
- pthread_mutex_destroy(&dev->slave_mutex);
+ pthread_mutex_destroy(&dev->backend_mutex);
if (dev->sock != -1) {
close(dev->sock);
dev->remove_watch = remove_watch;
dev->iface = iface;
dev->log_call_fd = -1;
- pthread_mutex_init(&dev->slave_mutex, NULL);
- dev->slave_fd = -1;
+ pthread_mutex_init(&dev->backend_mutex, NULL);
+ dev->backend_fd = -1;
dev->max_queues = max_queues;
dev->vq = malloc(max_queues * sizeof(dev->vq[0]));
vmsg.flags |= VHOST_USER_NEED_REPLY_MASK;
}
- vu_message_write(dev, dev->slave_fd, &vmsg);
+ vu_message_write(dev, dev->backend_fd, &vmsg);
if (ack) {
- vu_message_read_default(dev, dev->slave_fd, &vmsg);
+ vu_message_read_default(dev, dev->backend_fd, &vmsg);
}
return;
}
.flags = VHOST_USER_VERSION,
};
- vu_message_write(dev, dev->slave_fd, &vmsg);
+ vu_message_write(dev, dev->backend_fd, &vmsg);
}
static inline void
#define VHOST_USER_HDR_SIZE offsetof(VhostUserMsg, payload.u64)
typedef enum VhostSetConfigType {
- VHOST_SET_CONFIG_TYPE_MASTER = 0,
+ VHOST_SET_CONFIG_TYPE_FRONTEND = 0,
VHOST_SET_CONFIG_TYPE_MIGRATION = 1,
} VhostSetConfigType;
VHOST_USER_MAX
} VhostUserRequest;
-typedef enum VhostUserSlaveRequest {
+typedef enum VhostUserBackendRequest {
VHOST_USER_BACKEND_NONE = 0,
VHOST_USER_BACKEND_IOTLB_MSG = 1,
VHOST_USER_BACKEND_CONFIG_CHANGE_MSG = 2,
VHOST_USER_BACKEND_VRING_CALL = 4,
VHOST_USER_BACKEND_VRING_ERR = 5,
VHOST_USER_BACKEND_MAX
-} VhostUserSlaveRequest;
+} VhostUserBackendRequest;
typedef struct VhostUserMemoryRegion {
uint64_t guest_phys_addr;
* Zero value indicates a vm reset happened. */
uint16_t version;
- /* The size of VuDescStateSplit array. It's equal to the virtqueue
- * size. Slave could get it from queue size field of VhostUserInflight. */
+ /*
+ * The size of VuDescStateSplit array. It's equal to the virtqueue size.
+ * Backend could get it from queue size field of VhostUserInflight.
+ */
uint16_t desc_num;
/* The head of list that track the last batch of used descriptors. */
VuVirtq *vq;
VuDevInflightInfo inflight_info;
int log_call_fd;
- /* Must be held while using slave_fd */
- pthread_mutex_t slave_mutex;
- int slave_fd;
+ /* Must be held while using backend_fd */
+ pthread_mutex_t backend_mutex;
+ int backend_fd;
uint64_t log_size;
uint8_t *log_table;
uint64_t features;
* vu_init:
* @dev: a VuDev context
* @max_queues: maximum number of virtqueues
- * @socket: the socket connected to vhost-user master
+ * @socket: the socket connected to vhost-user frontend
* @panic: a panic callback
* @set_watch: a set_watch callback
* @remove_watch: a remove_watch callback
MemTxResult response, uintptr_t retaddr);
#endif
-static inline void cpu_get_tb_cpu_state(CPUAlphaState *env, target_ulong *pc,
- target_ulong *cs_base, uint32_t *pflags)
+static inline void cpu_get_tb_cpu_state(CPUAlphaState *env, vaddr *pc,
+ uint64_t *cs_base, uint32_t *pflags)
{
*pc = env->pc;
*cs_base = 0;
}
#endif
-void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
- target_ulong *cs_base, uint32_t *flags);
+void cpu_get_tb_cpu_state(CPUARMState *env, vaddr *pc,
+ uint64_t *cs_base, uint32_t *flags);
enum {
QEMU_PSCI_CONDUIT_DISABLED = 0,
return true;
}
-void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
- target_ulong *cs_base, uint32_t *pflags)
+void cpu_get_tb_cpu_state(CPUARMState *env, vaddr *pc,
+ uint64_t *cs_base, uint32_t *pflags)
{
CPUARMTBFlags flags;
TB_FLAGS_SKIP = 2,
};
-static inline void cpu_get_tb_cpu_state(CPUAVRState *env, target_ulong *pc,
- target_ulong *cs_base, uint32_t *pflags)
+static inline void cpu_get_tb_cpu_state(CPUAVRState *env, vaddr *pc,
+ uint64_t *cs_base, uint32_t *pflags)
{
uint32_t flags = 0;
#include "exec/cpu-all.h"
-static inline void cpu_get_tb_cpu_state(CPUCRISState *env, target_ulong *pc,
- target_ulong *cs_base, uint32_t *flags)
+static inline void cpu_get_tb_cpu_state(CPUCRISState *env, vaddr *pc,
+ uint64_t *cs_base, uint32_t *flags)
{
*pc = env->pc;
*cs_base = 0;
FIELD(TB_FLAGS, IS_TIGHT_LOOP, 0, 1)
-static inline void cpu_get_tb_cpu_state(CPUHexagonState *env, target_ulong *pc,
- target_ulong *cs_base, uint32_t *flags)
+static inline void cpu_get_tb_cpu_state(CPUHexagonState *env, vaddr *pc,
+ uint64_t *cs_base, uint32_t *flags)
{
uint32_t hex_flags = 0;
*pc = env->gpr[HEX_REG_PC];
#define TB_FLAG_PRIV_SHIFT 8
#define TB_FLAG_UNALIGN 0x400
-static inline void cpu_get_tb_cpu_state(CPUHPPAState *env, target_ulong *pc,
- target_ulong *cs_base,
- uint32_t *pflags)
+static inline void cpu_get_tb_cpu_state(CPUHPPAState *env, vaddr *pc,
+ uint64_t *cs_base, uint32_t *pflags)
{
uint32_t flags = env->psw_n * PSW_N;
CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER */
#ifdef TARGET_X86_64
-#define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
+#define TCG_EXT2_X86_64_FEATURES CPUID_EXT2_LM
#else
#define TCG_EXT2_X86_64_FEATURES 0
#endif
#define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
- TCG_EXT2_X86_64_FEATURES)
+ CPUID_EXT2_SYSCALL | TCG_EXT2_X86_64_FEATURES)
#define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
- CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
+ CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A | \
+ CPUID_EXT3_3DNOWPREFETCH)
#define TCG_EXT4_FEATURES 0
#define TCG_SVM_FEATURES (CPUID_SVM_NPT | CPUID_SVM_VGIF | \
CPUID_SVM_SVME_ADDR_CHK)
CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \
- CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_AVX2)
+ CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_RDSEED)
/* missing:
CPUID_7_0_EBX_HLE
- CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
- CPUID_7_0_EBX_RDSEED */
+ CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM */
+
+#if defined CONFIG_SOFTMMU || defined CONFIG_LINUX
+#define TCG_7_0_ECX_RDPID CPUID_7_0_ECX_RDPID
+#else
+#define TCG_7_0_ECX_RDPID 0
+#endif
#define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU | \
/* CPUID_7_0_ECX_OSPKE is dynamic */ \
- CPUID_7_0_ECX_LA57 | CPUID_7_0_ECX_PKS | CPUID_7_0_ECX_VAES)
+ CPUID_7_0_ECX_LA57 | CPUID_7_0_ECX_PKS | CPUID_7_0_ECX_VAES | \
+ TCG_7_0_ECX_RDPID)
+
#define TCG_7_0_EDX_FEATURES CPUID_7_0_EDX_FSRM
#define TCG_7_1_EAX_FEATURES (CPUID_7_1_EAX_FZRM | CPUID_7_1_EAX_FSRS | \
CPUID_7_1_EAX_FSRC)
#define TCG_SGX_12_0_EBX_FEATURES 0
#define TCG_SGX_12_1_EAX_FEATURES 0
+#define TCG_8000_0008_EBX (CPUID_8000_0008_EBX_XSAVEERPTR | \
+ CPUID_8000_0008_EBX_WBNOINVD)
+
FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
[FEAT_1_EDX] = {
.type = CPUID_FEATURE_WORD,
"amd-psfd", NULL, NULL, NULL,
},
.cpuid = { .eax = 0x80000008, .reg = R_EBX, },
- .tcg_features = 0,
+ .tcg_features = TCG_8000_0008_EBX,
.unmigratable_flags = 0,
},
[FEAT_8000_0021_EAX] = {
*ecx |= 1 << 1; /* CmpLegacy bit */
}
}
+ if (tcg_enabled() && env->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 &&
+ !(env->hflags & HF_LMA_MASK)) {
+ *edx &= ~CPUID_EXT2_SYSCALL;
+ }
break;
case 0x80000002:
case 0x80000003:
#include "hw/i386/apic.h"
#endif
-static inline void cpu_get_tb_cpu_state(CPUX86State *env, target_ulong *pc,
- target_ulong *cs_base, uint32_t *flags)
+static inline void cpu_get_tb_cpu_state(CPUX86State *env, vaddr *pc,
+ uint64_t *cs_base, uint32_t *flags)
{
*cs_base = env->segs[R_CS].base;
*pc = *cs_base + env->eip;
DEF_HELPER_1(sysenter, void, env)
DEF_HELPER_2(sysexit, void, env, int)
-#ifdef TARGET_X86_64
DEF_HELPER_2(syscall, void, env, int)
DEF_HELPER_2(sysret, void, env, int)
-#endif
DEF_HELPER_FLAGS_2(pause, TCG_CALL_NO_WG, noreturn, env, int)
DEF_HELPER_FLAGS_3(raise_interrupt, TCG_CALL_NO_WG, noreturn, env, int, int)
DEF_HELPER_FLAGS_2(raise_exception, TCG_CALL_NO_WG, noreturn, env, int)
DEF_HELPER_FLAGS_1(single_step, TCG_CALL_NO_WG, noreturn, env)
DEF_HELPER_1(rechecking_single_step, void, env)
DEF_HELPER_1(cpuid, void, env)
+DEF_HELPER_FLAGS_1(rdpid, TCG_CALL_NO_WG, tl, env)
DEF_HELPER_1(rdtsc, void, env)
-DEF_HELPER_1(rdtscp, void, env)
DEF_HELPER_FLAGS_1(rdpmc, TCG_CALL_NO_WG, noreturn, env)
#ifndef CONFIG_USER_ONLY
env->regs[R_EDX] = (uint32_t)(val >> 32);
}
-void helper_rdtscp(CPUX86State *env)
-{
- helper_rdtsc(env);
- env->regs[R_ECX] = (uint32_t)(env->tsc_aux);
-}
-
G_NORETURN void helper_rdpmc(CPUX86State *env)
{
if (((env->cr[4] & CR4_PCE_MASK) == 0 ) &&
env->pkru = val;
tlb_flush(cs);
}
+
+target_ulong HELPER(rdpid)(CPUX86State *env)
+{
+#if defined CONFIG_SOFTMMU
+ return env->tsc_aux;
+#elif defined CONFIG_LINUX && defined CONFIG_GETCPU
+ unsigned cpu, node;
+ getcpu(&cpu, &node);
+ return (node << 12) | (cpu & 0xfff);
+#elif defined CONFIG_SCHED_GETCPU
+ return sched_getcpu();
+#else
+ return 0;
+#endif
+}
e2);
env->eip = offset;
}
+#endif /* TARGET_X86_64 */
void helper_sysret(CPUX86State *env, int dflag)
{
raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
}
selector = (env->star >> 48) & 0xffff;
+#ifdef TARGET_X86_64
if (env->hflags & HF_LMA_MASK) {
cpu_load_eflags(env, (uint32_t)(env->regs[11]), TF_MASK | AC_MASK
| ID_MASK | IF_MASK | IOPL_MASK | VM_MASK | RF_MASK |
DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
DESC_W_MASK | DESC_A_MASK);
- } else {
+ } else
+#endif
+ {
env->eflags |= IF_MASK;
cpu_x86_load_seg_cache(env, R_CS, selector | 3,
0, 0xffffffff,
DESC_W_MASK | DESC_A_MASK);
}
}
-#endif /* TARGET_X86_64 */
/* real mode interrupt */
static void do_interrupt_real(CPUX86State *env, int intno, int is_int,
#include "tcg/helper-tcg.h"
#include "../seg_helper.h"
-#ifdef TARGET_X86_64
void helper_syscall(CPUX86State *env, int next_eip_addend)
{
int selector;
raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
}
selector = (env->star >> 32) & 0xffff;
+#ifdef TARGET_X86_64
if (env->hflags & HF_LMA_MASK) {
int code64;
} else {
env->eip = env->cstar;
}
- } else {
+ } else
+#endif
+ {
env->regs[R_ECX] = (uint32_t)(env->eip + next_eip_addend);
env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
env->eip = (uint32_t)env->star;
}
}
-#endif /* TARGET_X86_64 */
void handle_even_inj(CPUX86State *env, int intno, int is_int,
int error_code, int is_hw, int rm)
gen_cmpxchg8b(s, env, modrm);
break;
- case 7: /* RDSEED */
+ case 7: /* RDSEED, RDPID with f3 prefix */
+ if (mod != 3 ||
+ (s->prefix & (PREFIX_LOCK | PREFIX_REPNZ))) {
+ goto illegal_op;
+ }
+ if (s->prefix & PREFIX_REPZ) {
+ if (!(s->cpuid_ext_features & CPUID_7_0_ECX_RDPID)) {
+ goto illegal_op;
+ }
+ gen_helper_rdpid(s->T0, cpu_env);
+ rm = (modrm & 7) | REX_B(s);
+ gen_op_mov_reg_v(s, dflag, rm, s->T0);
+ break;
+ } else {
+ if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_RDSEED)) {
+ goto illegal_op;
+ }
+ goto do_rdrand;
+ }
+
case 6: /* RDRAND */
if (mod != 3 ||
(s->prefix & (PREFIX_LOCK | PREFIX_REPZ | PREFIX_REPNZ)) ||
!(s->cpuid_ext_features & CPUID_EXT_RDRAND)) {
goto illegal_op;
}
+ do_rdrand:
translator_io_start(&s->base);
gen_helper_rdrand(s->T0, cpu_env);
rm = (modrm & 7) | REX_B(s);
s->base.is_jmp = DISAS_NORETURN;
break;
case 0x134: /* sysenter */
- /* For Intel SYSENTER is valid on 64-bit */
- if (CODE64(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1)
+ /* For AMD SYSENTER is not valid in long mode */
+ if (LMA(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1) {
goto illegal_op;
+ }
if (!PE(s)) {
gen_exception_gpf(s);
} else {
}
break;
case 0x135: /* sysexit */
- /* For Intel SYSEXIT is valid on 64-bit */
- if (CODE64(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1)
+ /* For AMD SYSEXIT is not valid in long mode */
+ if (LMA(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1) {
goto illegal_op;
- if (!PE(s)) {
+ }
+ if (!PE(s) || CPL(s) != 0) {
gen_exception_gpf(s);
} else {
gen_helper_sysexit(cpu_env, tcg_constant_i32(dflag - 1));
s->base.is_jmp = DISAS_EOB_ONLY;
}
break;
-#ifdef TARGET_X86_64
case 0x105: /* syscall */
- /* XXX: is it usable in real mode ? */
+ /* For Intel SYSCALL is only valid in long mode */
+ if (!LMA(s) && env->cpuid_vendor1 == CPUID_VENDOR_INTEL_1) {
+ goto illegal_op;
+ }
gen_update_cc_op(s);
gen_update_eip_cur(s);
gen_helper_syscall(cpu_env, cur_insn_len_i32(s));
gen_eob_worker(s, false, true);
break;
case 0x107: /* sysret */
- if (!PE(s)) {
+ /* For Intel SYSRET is only valid in long mode */
+ if (!LMA(s) && env->cpuid_vendor1 == CPUID_VENDOR_INTEL_1) {
+ goto illegal_op;
+ }
+ if (!PE(s) || CPL(s) != 0) {
gen_exception_gpf(s);
} else {
gen_helper_sysret(cpu_env, tcg_constant_i32(dflag - 1));
gen_eob_worker(s, false, true);
}
break;
-#endif
case 0x1a2: /* cpuid */
gen_update_cc_op(s);
gen_update_eip_cur(s);
gen_update_cc_op(s);
gen_update_eip_cur(s);
translator_io_start(&s->base);
- gen_helper_rdtscp(cpu_env);
+ gen_helper_rdtsc(cpu_env);
+ gen_helper_rdpid(s->T0, cpu_env);
+ gen_op_mov_reg_v(s, dflag, R_ECX, s->T0);
break;
default:
break;
case 0x108: /* invd */
- case 0x109: /* wbinvd */
+ case 0x109: /* wbinvd; wbnoinvd with REPZ prefix */
if (check_cpl0(s)) {
- gen_svm_check_intercept(s, (b & 2) ? SVM_EXIT_INVD : SVM_EXIT_WBINVD);
+ gen_svm_check_intercept(s, (b & 1) ? SVM_EXIT_WBINVD : SVM_EXIT_INVD);
/* nothing to do */
}
break;
#include "tcg/helper-tcg.h"
#include "tcg/seg_helper.h"
-#ifdef TARGET_X86_64
void helper_syscall(CPUX86State *env, int next_eip_addend)
{
CPUState *cs = env_cpu(env);
env->exception_next_eip = env->eip + next_eip_addend;
cpu_loop_exit(cs);
}
-#endif /* TARGET_X86_64 */
/*
* fake user mode interrupt. is_int is TRUE if coming from the int
#define HW_FLAGS_EUEN_FPE 0x04
#define HW_FLAGS_EUEN_SXE 0x08
-static inline void cpu_get_tb_cpu_state(CPULoongArchState *env,
- target_ulong *pc,
- target_ulong *cs_base,
- uint32_t *flags)
+static inline void cpu_get_tb_cpu_state(CPULoongArchState *env, vaddr *pc,
+ uint64_t *cs_base, uint32_t *flags)
{
*pc = env->pc;
*cs_base = 0;
#define TB_FLAGS_TRACE 16
#define TB_FLAGS_TRACE_BIT (1 << TB_FLAGS_TRACE)
-static inline void cpu_get_tb_cpu_state(CPUM68KState *env, target_ulong *pc,
- target_ulong *cs_base, uint32_t *flags)
+static inline void cpu_get_tb_cpu_state(CPUM68KState *env, vaddr *pc,
+ uint64_t *cs_base, uint32_t *flags)
{
*pc = env->pc;
*cs_base = 0;
#include "exec/cpu-defs.h"
#include "qemu/cpu-float.h"
+/* MicroBlaze is always in-order. */
+#define TCG_GUEST_DEFAULT_MO TCG_MO_ALL
+
typedef struct CPUArchState CPUMBState;
#if !defined(CONFIG_USER_ONLY)
#include "mmu.h"
/* Ensure there is no overlap between the two masks. */
QEMU_BUILD_BUG_ON(MSR_TB_MASK & IFLAGS_TB_MASK);
-static inline void cpu_get_tb_cpu_state(CPUMBState *env, target_ulong *pc,
- target_ulong *cs_base, uint32_t *flags)
+static inline void cpu_get_tb_cpu_state(CPUMBState *env, vaddr *pc,
+ uint64_t *cs_base, uint32_t *flags)
{
*pc = env->pc;
*flags = (env->iflags & IFLAGS_TB_MASK) | (env->msr & MSR_TB_MASK);
/* helper.c */
target_ulong exception_resume_pc(CPUMIPSState *env);
-static inline void cpu_get_tb_cpu_state(CPUMIPSState *env, target_ulong *pc,
- target_ulong *cs_base, uint32_t *flags)
+static inline void cpu_get_tb_cpu_state(CPUMIPSState *env, vaddr *pc,
+ uint64_t *cs_base, uint32_t *flags)
{
*pc = env->active_tc.PC;
*cs_base = 0;
FIELD(TBFLAGS, U, 1, 1) /* Overlaps CR_STATUS_U */
FIELD(TBFLAGS, R0_0, 2, 1) /* Set if R0 == 0. */
-static inline void cpu_get_tb_cpu_state(CPUNios2State *env, target_ulong *pc,
- target_ulong *cs_base, uint32_t *flags)
+static inline void cpu_get_tb_cpu_state(CPUNios2State *env, vaddr *pc,
+ uint64_t *cs_base, uint32_t *flags)
{
unsigned crs = FIELD_EX32(env->ctrl[CR_STATUS], CR_STATUS, CRS);
env->shadow_gpr[0][i] = val;
}
-static inline void cpu_get_tb_cpu_state(CPUOpenRISCState *env,
- target_ulong *pc,
- target_ulong *cs_base, uint32_t *flags)
+static inline void cpu_get_tb_cpu_state(CPUOpenRISCState *env, vaddr *pc,
+ uint64_t *cs_base, uint32_t *flags)
{
*pc = env->pc;
*cs_base = 0;
#define is_book3s_arch2x(ctx) (!!((ctx)->insns_flags & PPC_SEGMENT_64B))
#ifdef CONFIG_DEBUG_TCG
-void cpu_get_tb_cpu_state(CPUPPCState *env, target_ulong *pc,
- target_ulong *cs_base, uint32_t *flags);
+void cpu_get_tb_cpu_state(CPUPPCState *env, vaddr *pc,
+ uint64_t *cs_base, uint32_t *flags);
#else
-static inline void cpu_get_tb_cpu_state(CPUPPCState *env, target_ulong *pc,
- target_ulong *cs_base, uint32_t *flags)
+static inline void cpu_get_tb_cpu_state(CPUPPCState *env, vaddr *pc,
+ uint64_t *cs_base, uint32_t *flags)
{
*pc = env->nip;
*cs_base = 0;
}
#ifdef CONFIG_DEBUG_TCG
-void cpu_get_tb_cpu_state(CPUPPCState *env, target_ulong *pc,
- target_ulong *cs_base, uint32_t *flags)
+void cpu_get_tb_cpu_state(CPUPPCState *env, vaddr *pc,
+ uint64_t *cs_base, uint32_t *flags)
{
uint32_t hflags_current = env->hflags;
uint32_t hflags_rebuilt;
return cpu->cfg.vlen >> (sew + 3 - lmul);
}
-void cpu_get_tb_cpu_state(CPURISCVState *env, target_ulong *pc,
- target_ulong *cs_base, uint32_t *pflags);
+void cpu_get_tb_cpu_state(CPURISCVState *env, vaddr *pc,
+ uint64_t *cs_base, uint32_t *pflags);
void riscv_cpu_update_mask(CPURISCVState *env);
#endif
}
-void cpu_get_tb_cpu_state(CPURISCVState *env, target_ulong *pc,
- target_ulong *cs_base, uint32_t *pflags)
+void cpu_get_tb_cpu_state(CPURISCVState *env, vaddr *pc,
+ uint64_t *cs_base, uint32_t *pflags)
{
CPUState *cs = env_cpu(env);
RISCVCPU *cpu = RISCV_CPU(cs);
#define RX_CPU_IRQ 0
#define RX_CPU_FIR 1
-static inline void cpu_get_tb_cpu_state(CPURXState *env, target_ulong *pc,
- target_ulong *cs_base, uint32_t *flags)
+static inline void cpu_get_tb_cpu_state(CPURXState *env, vaddr *pc,
+ uint64_t *cs_base, uint32_t *flags)
{
*pc = env->pc;
*cs_base = 0;
#endif
}
-static inline void cpu_get_tb_cpu_state(CPUS390XState* env, target_ulong *pc,
- target_ulong *cs_base, uint32_t *flags)
+static inline void cpu_get_tb_cpu_state(CPUS390XState *env, vaddr *pc,
+ uint64_t *cs_base, uint32_t *flags)
{
if (env->psw.addr & 1) {
/*
env->sr = sr & ~((1u << SR_M) | (1u << SR_Q) | (1u << SR_T));
}
-static inline void cpu_get_tb_cpu_state(CPUSH4State *env, target_ulong *pc,
- target_ulong *cs_base, uint32_t *flags)
+static inline void cpu_get_tb_cpu_state(CPUSH4State *env, vaddr *pc,
+ uint64_t *cs_base, uint32_t *flags)
{
*pc = env->pc;
/* For a gUSA region, notice the end of the region. */
#define TB_FLAG_HYPER (1 << 7)
#define TB_FLAG_ASI_SHIFT 24
-static inline void cpu_get_tb_cpu_state(CPUSPARCState *env, target_ulong *pc,
- target_ulong *cs_base, uint32_t *pflags)
+static inline void cpu_get_tb_cpu_state(CPUSPARCState *env, vaddr *pc,
+ uint64_t *cs_base, uint32_t *pflags)
{
uint32_t flags;
*pc = env->pc;
#include "exec/helper-info.c.inc"
#undef HELPER_H
-#define DYNAMIC_PC 1 /* dynamic pc value */
-#define JUMP_PC 2 /* dynamic pc value which takes only two values
- according to jump_pc[T2] */
+/* Dynamic PC, must exit to main loop. */
+#define DYNAMIC_PC 1
+/* Dynamic PC, one of two values according to jump_pc[T2]. */
+#define JUMP_PC 2
+/* Dynamic PC, may lookup next TB. */
+#define DYNAMIC_PC_LOOKUP 3
#define DISAS_EXIT DISAS_TARGET_0
#define IS_IMM (insn & (1<<13))
-static inline void gen_update_fprs_dirty(DisasContext *dc, int rd)
+static void gen_update_fprs_dirty(DisasContext *dc, int rd)
{
#if defined(TARGET_SPARC64)
int bit = (rd < 32) ? 1 : 2;
#endif
#endif
-static inline void gen_address_mask(DisasContext *dc, TCGv addr)
+static void gen_address_mask(DisasContext *dc, TCGv addr)
{
#ifdef TARGET_SPARC64
if (AM_CHECK(dc))
#endif
}
-static inline TCGv gen_load_gpr(DisasContext *dc, int reg)
+static TCGv gen_load_gpr(DisasContext *dc, int reg)
{
if (reg > 0) {
assert(reg < 32);
}
}
-static inline void gen_store_gpr(DisasContext *dc, int reg, TCGv v)
+static void gen_store_gpr(DisasContext *dc, int reg, TCGv v)
{
if (reg > 0) {
assert(reg < 32);
}
}
-static inline TCGv gen_dest_gpr(DisasContext *dc, int reg)
+static TCGv gen_dest_gpr(DisasContext *dc, int reg)
{
if (reg > 0) {
assert(reg < 32);
tcg_gen_movi_tl(cpu_npc, npc);
tcg_gen_exit_tb(s->base.tb, tb_num);
} else {
- /* jump to another page: currently not optimized */
+ /* jump to another page: we can use an indirect jump */
tcg_gen_movi_tl(cpu_pc, pc);
tcg_gen_movi_tl(cpu_npc, npc);
- tcg_gen_exit_tb(NULL, 0);
+ tcg_gen_lookup_and_goto_ptr();
}
}
// XXX suboptimal
-static inline void gen_mov_reg_N(TCGv reg, TCGv_i32 src)
+static void gen_mov_reg_N(TCGv reg, TCGv_i32 src)
{
tcg_gen_extu_i32_tl(reg, src);
tcg_gen_extract_tl(reg, reg, PSR_NEG_SHIFT, 1);
}
-static inline void gen_mov_reg_Z(TCGv reg, TCGv_i32 src)
+static void gen_mov_reg_Z(TCGv reg, TCGv_i32 src)
{
tcg_gen_extu_i32_tl(reg, src);
tcg_gen_extract_tl(reg, reg, PSR_ZERO_SHIFT, 1);
}
-static inline void gen_mov_reg_V(TCGv reg, TCGv_i32 src)
+static void gen_mov_reg_V(TCGv reg, TCGv_i32 src)
{
tcg_gen_extu_i32_tl(reg, src);
tcg_gen_extract_tl(reg, reg, PSR_OVF_SHIFT, 1);
}
-static inline void gen_mov_reg_C(TCGv reg, TCGv_i32 src)
+static void gen_mov_reg_C(TCGv reg, TCGv_i32 src)
{
tcg_gen_extu_i32_tl(reg, src);
tcg_gen_extract_tl(reg, reg, PSR_CARRY_SHIFT, 1);
}
-static inline void gen_op_add_cc(TCGv dst, TCGv src1, TCGv src2)
+static void gen_op_add_cc(TCGv dst, TCGv src1, TCGv src2)
{
tcg_gen_mov_tl(cpu_cc_src, src1);
tcg_gen_mov_tl(cpu_cc_src2, src2);
}
}
-static inline void gen_op_sub_cc(TCGv dst, TCGv src1, TCGv src2)
+static void gen_op_sub_cc(TCGv dst, TCGv src1, TCGv src2)
{
tcg_gen_mov_tl(cpu_cc_src, src1);
tcg_gen_mov_tl(cpu_cc_src2, src2);
}
}
-static inline void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2)
+static void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2)
{
TCGv r_temp, zero, t0;
tcg_gen_mov_tl(dst, cpu_cc_dst);
}
-static inline void gen_op_multiply(TCGv dst, TCGv src1, TCGv src2, int sign_ext)
+static void gen_op_multiply(TCGv dst, TCGv src1, TCGv src2, int sign_ext)
{
#if TARGET_LONG_BITS == 32
if (sign_ext) {
#endif
}
-static inline void gen_op_umul(TCGv dst, TCGv src1, TCGv src2)
+static void gen_op_umul(TCGv dst, TCGv src1, TCGv src2)
{
/* zero-extend truncated operands before multiplication */
gen_op_multiply(dst, src1, src2, 0);
}
-static inline void gen_op_smul(TCGv dst, TCGv src1, TCGv src2)
+static void gen_op_smul(TCGv dst, TCGv src1, TCGv src2)
{
/* sign-extend truncated operands before multiplication */
gen_op_multiply(dst, src1, src2, 1);
}
// 1
-static inline void gen_op_eval_ba(TCGv dst)
+static void gen_op_eval_ba(TCGv dst)
{
tcg_gen_movi_tl(dst, 1);
}
// Z
-static inline void gen_op_eval_be(TCGv dst, TCGv_i32 src)
+static void gen_op_eval_be(TCGv dst, TCGv_i32 src)
{
gen_mov_reg_Z(dst, src);
}
// Z | (N ^ V)
-static inline void gen_op_eval_ble(TCGv dst, TCGv_i32 src)
+static void gen_op_eval_ble(TCGv dst, TCGv_i32 src)
{
TCGv t0 = tcg_temp_new();
gen_mov_reg_N(t0, src);
}
// N ^ V
-static inline void gen_op_eval_bl(TCGv dst, TCGv_i32 src)
+static void gen_op_eval_bl(TCGv dst, TCGv_i32 src)
{
TCGv t0 = tcg_temp_new();
gen_mov_reg_V(t0, src);
}
// C | Z
-static inline void gen_op_eval_bleu(TCGv dst, TCGv_i32 src)
+static void gen_op_eval_bleu(TCGv dst, TCGv_i32 src)
{
TCGv t0 = tcg_temp_new();
gen_mov_reg_Z(t0, src);
}
// C
-static inline void gen_op_eval_bcs(TCGv dst, TCGv_i32 src)
+static void gen_op_eval_bcs(TCGv dst, TCGv_i32 src)
{
gen_mov_reg_C(dst, src);
}
// V
-static inline void gen_op_eval_bvs(TCGv dst, TCGv_i32 src)
+static void gen_op_eval_bvs(TCGv dst, TCGv_i32 src)
{
gen_mov_reg_V(dst, src);
}
// 0
-static inline void gen_op_eval_bn(TCGv dst)
+static void gen_op_eval_bn(TCGv dst)
{
tcg_gen_movi_tl(dst, 0);
}
// N
-static inline void gen_op_eval_bneg(TCGv dst, TCGv_i32 src)
+static void gen_op_eval_bneg(TCGv dst, TCGv_i32 src)
{
gen_mov_reg_N(dst, src);
}
// !Z
-static inline void gen_op_eval_bne(TCGv dst, TCGv_i32 src)
+static void gen_op_eval_bne(TCGv dst, TCGv_i32 src)
{
gen_mov_reg_Z(dst, src);
tcg_gen_xori_tl(dst, dst, 0x1);
}
// !(Z | (N ^ V))
-static inline void gen_op_eval_bg(TCGv dst, TCGv_i32 src)
+static void gen_op_eval_bg(TCGv dst, TCGv_i32 src)
{
gen_op_eval_ble(dst, src);
tcg_gen_xori_tl(dst, dst, 0x1);
}
// !(N ^ V)
-static inline void gen_op_eval_bge(TCGv dst, TCGv_i32 src)
+static void gen_op_eval_bge(TCGv dst, TCGv_i32 src)
{
gen_op_eval_bl(dst, src);
tcg_gen_xori_tl(dst, dst, 0x1);
}
// !(C | Z)
-static inline void gen_op_eval_bgu(TCGv dst, TCGv_i32 src)
+static void gen_op_eval_bgu(TCGv dst, TCGv_i32 src)
{
gen_op_eval_bleu(dst, src);
tcg_gen_xori_tl(dst, dst, 0x1);
}
// !C
-static inline void gen_op_eval_bcc(TCGv dst, TCGv_i32 src)
+static void gen_op_eval_bcc(TCGv dst, TCGv_i32 src)
{
gen_mov_reg_C(dst, src);
tcg_gen_xori_tl(dst, dst, 0x1);
}
// !N
-static inline void gen_op_eval_bpos(TCGv dst, TCGv_i32 src)
+static void gen_op_eval_bpos(TCGv dst, TCGv_i32 src)
{
gen_mov_reg_N(dst, src);
tcg_gen_xori_tl(dst, dst, 0x1);
}
// !V
-static inline void gen_op_eval_bvc(TCGv dst, TCGv_i32 src)
+static void gen_op_eval_bvc(TCGv dst, TCGv_i32 src)
{
gen_mov_reg_V(dst, src);
tcg_gen_xori_tl(dst, dst, 0x1);
2 >
3 unordered
*/
-static inline void gen_mov_reg_FCC0(TCGv reg, TCGv src,
+static void gen_mov_reg_FCC0(TCGv reg, TCGv src,
unsigned int fcc_offset)
{
tcg_gen_shri_tl(reg, src, FSR_FCC0_SHIFT + fcc_offset);
tcg_gen_andi_tl(reg, reg, 0x1);
}
-static inline void gen_mov_reg_FCC1(TCGv reg, TCGv src,
- unsigned int fcc_offset)
+static void gen_mov_reg_FCC1(TCGv reg, TCGv src, unsigned int fcc_offset)
{
tcg_gen_shri_tl(reg, src, FSR_FCC1_SHIFT + fcc_offset);
tcg_gen_andi_tl(reg, reg, 0x1);
}
// !0: FCC0 | FCC1
-static inline void gen_op_eval_fbne(TCGv dst, TCGv src,
- unsigned int fcc_offset)
+static void gen_op_eval_fbne(TCGv dst, TCGv src, unsigned int fcc_offset)
{
TCGv t0 = tcg_temp_new();
gen_mov_reg_FCC0(dst, src, fcc_offset);
}
// 1 or 2: FCC0 ^ FCC1
-static inline void gen_op_eval_fblg(TCGv dst, TCGv src,
- unsigned int fcc_offset)
+static void gen_op_eval_fblg(TCGv dst, TCGv src, unsigned int fcc_offset)
{
TCGv t0 = tcg_temp_new();
gen_mov_reg_FCC0(dst, src, fcc_offset);
}
// 1 or 3: FCC0
-static inline void gen_op_eval_fbul(TCGv dst, TCGv src,
- unsigned int fcc_offset)
+static void gen_op_eval_fbul(TCGv dst, TCGv src, unsigned int fcc_offset)
{
gen_mov_reg_FCC0(dst, src, fcc_offset);
}
// 1: FCC0 & !FCC1
-static inline void gen_op_eval_fbl(TCGv dst, TCGv src,
- unsigned int fcc_offset)
+static void gen_op_eval_fbl(TCGv dst, TCGv src, unsigned int fcc_offset)
{
TCGv t0 = tcg_temp_new();
gen_mov_reg_FCC0(dst, src, fcc_offset);
}
// 2 or 3: FCC1
-static inline void gen_op_eval_fbug(TCGv dst, TCGv src,
- unsigned int fcc_offset)
+static void gen_op_eval_fbug(TCGv dst, TCGv src, unsigned int fcc_offset)
{
gen_mov_reg_FCC1(dst, src, fcc_offset);
}
// 2: !FCC0 & FCC1
-static inline void gen_op_eval_fbg(TCGv dst, TCGv src,
- unsigned int fcc_offset)
+static void gen_op_eval_fbg(TCGv dst, TCGv src, unsigned int fcc_offset)
{
TCGv t0 = tcg_temp_new();
gen_mov_reg_FCC0(dst, src, fcc_offset);
}
// 3: FCC0 & FCC1
-static inline void gen_op_eval_fbu(TCGv dst, TCGv src,
- unsigned int fcc_offset)
+static void gen_op_eval_fbu(TCGv dst, TCGv src, unsigned int fcc_offset)
{
TCGv t0 = tcg_temp_new();
gen_mov_reg_FCC0(dst, src, fcc_offset);
}
// 0: !(FCC0 | FCC1)
-static inline void gen_op_eval_fbe(TCGv dst, TCGv src,
- unsigned int fcc_offset)
+static void gen_op_eval_fbe(TCGv dst, TCGv src, unsigned int fcc_offset)
{
TCGv t0 = tcg_temp_new();
gen_mov_reg_FCC0(dst, src, fcc_offset);
}
// 0 or 3: !(FCC0 ^ FCC1)
-static inline void gen_op_eval_fbue(TCGv dst, TCGv src,
- unsigned int fcc_offset)
+static void gen_op_eval_fbue(TCGv dst, TCGv src, unsigned int fcc_offset)
{
TCGv t0 = tcg_temp_new();
gen_mov_reg_FCC0(dst, src, fcc_offset);
}
// 0 or 2: !FCC0
-static inline void gen_op_eval_fbge(TCGv dst, TCGv src,
- unsigned int fcc_offset)
+static void gen_op_eval_fbge(TCGv dst, TCGv src, unsigned int fcc_offset)
{
gen_mov_reg_FCC0(dst, src, fcc_offset);
tcg_gen_xori_tl(dst, dst, 0x1);
}
// !1: !(FCC0 & !FCC1)
-static inline void gen_op_eval_fbuge(TCGv dst, TCGv src,
- unsigned int fcc_offset)
+static void gen_op_eval_fbuge(TCGv dst, TCGv src, unsigned int fcc_offset)
{
TCGv t0 = tcg_temp_new();
gen_mov_reg_FCC0(dst, src, fcc_offset);
}
// 0 or 1: !FCC1
-static inline void gen_op_eval_fble(TCGv dst, TCGv src,
- unsigned int fcc_offset)
+static void gen_op_eval_fble(TCGv dst, TCGv src, unsigned int fcc_offset)
{
gen_mov_reg_FCC1(dst, src, fcc_offset);
tcg_gen_xori_tl(dst, dst, 0x1);
}
// !2: !(!FCC0 & FCC1)
-static inline void gen_op_eval_fbule(TCGv dst, TCGv src,
- unsigned int fcc_offset)
+static void gen_op_eval_fbule(TCGv dst, TCGv src, unsigned int fcc_offset)
{
TCGv t0 = tcg_temp_new();
gen_mov_reg_FCC0(dst, src, fcc_offset);
}
// !3: !(FCC0 & FCC1)
-static inline void gen_op_eval_fbo(TCGv dst, TCGv src,
- unsigned int fcc_offset)
+static void gen_op_eval_fbo(TCGv dst, TCGv src, unsigned int fcc_offset)
{
TCGv t0 = tcg_temp_new();
gen_mov_reg_FCC0(dst, src, fcc_offset);
tcg_gen_xori_tl(dst, dst, 0x1);
}
-static inline void gen_branch2(DisasContext *dc, target_ulong pc1,
- target_ulong pc2, TCGv r_cond)
+static void gen_branch2(DisasContext *dc, target_ulong pc1,
+ target_ulong pc2, TCGv r_cond)
{
TCGLabel *l1 = gen_new_label();
{
target_ulong npc = dc->npc;
- if (likely(npc != DYNAMIC_PC)) {
+ if (npc & 3) {
+ switch (npc) {
+ case DYNAMIC_PC:
+ case DYNAMIC_PC_LOOKUP:
+ tcg_gen_mov_tl(cpu_pc, cpu_npc);
+ tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
+ tcg_gen_movcond_tl(TCG_COND_NE, cpu_npc,
+ cpu_cond, tcg_constant_tl(0),
+ tcg_constant_tl(pc1), cpu_npc);
+ dc->pc = npc;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ } else {
dc->pc = npc;
dc->jump_pc[0] = pc1;
dc->jump_pc[1] = npc + 4;
dc->npc = JUMP_PC;
- } else {
- TCGv t, z;
-
- tcg_gen_mov_tl(cpu_pc, cpu_npc);
-
- tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
- t = tcg_constant_tl(pc1);
- z = tcg_constant_tl(0);
- tcg_gen_movcond_tl(TCG_COND_NE, cpu_npc, cpu_cond, z, t, cpu_npc);
-
- dc->pc = DYNAMIC_PC;
}
}
-static inline void gen_generic_branch(DisasContext *dc)
+static void gen_generic_branch(DisasContext *dc)
{
TCGv npc0 = tcg_constant_tl(dc->jump_pc[0]);
TCGv npc1 = tcg_constant_tl(dc->jump_pc[1]);
/* call this function before using the condition register as it may
have been set for a jump */
-static inline void flush_cond(DisasContext *dc)
+static void flush_cond(DisasContext *dc)
{
if (dc->npc == JUMP_PC) {
gen_generic_branch(dc);
- dc->npc = DYNAMIC_PC;
+ dc->npc = DYNAMIC_PC_LOOKUP;
}
}
-static inline void save_npc(DisasContext *dc)
+static void save_npc(DisasContext *dc)
{
- if (dc->npc == JUMP_PC) {
- gen_generic_branch(dc);
- dc->npc = DYNAMIC_PC;
- } else if (dc->npc != DYNAMIC_PC) {
+ if (dc->npc & 3) {
+ switch (dc->npc) {
+ case JUMP_PC:
+ gen_generic_branch(dc);
+ dc->npc = DYNAMIC_PC_LOOKUP;
+ break;
+ case DYNAMIC_PC:
+ case DYNAMIC_PC_LOOKUP:
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ } else {
tcg_gen_movi_tl(cpu_npc, dc->npc);
}
}
-static inline void update_psr(DisasContext *dc)
+static void update_psr(DisasContext *dc)
{
if (dc->cc_op != CC_OP_FLAGS) {
dc->cc_op = CC_OP_FLAGS;
}
}
-static inline void save_state(DisasContext *dc)
+static void save_state(DisasContext *dc)
{
tcg_gen_movi_tl(cpu_pc, dc->pc);
save_npc(dc);
gen_helper_check_align(cpu_env, addr, tcg_constant_i32(mask));
}
-static inline void gen_mov_pc_npc(DisasContext *dc)
+static void gen_mov_pc_npc(DisasContext *dc)
{
- if (dc->npc == JUMP_PC) {
- gen_generic_branch(dc);
- tcg_gen_mov_tl(cpu_pc, cpu_npc);
- dc->pc = DYNAMIC_PC;
- } else if (dc->npc == DYNAMIC_PC) {
- tcg_gen_mov_tl(cpu_pc, cpu_npc);
- dc->pc = DYNAMIC_PC;
+ if (dc->npc & 3) {
+ switch (dc->npc) {
+ case JUMP_PC:
+ gen_generic_branch(dc);
+ tcg_gen_mov_tl(cpu_pc, cpu_npc);
+ dc->pc = DYNAMIC_PC_LOOKUP;
+ break;
+ case DYNAMIC_PC:
+ case DYNAMIC_PC_LOOKUP:
+ tcg_gen_mov_tl(cpu_pc, cpu_npc);
+ dc->pc = dc->npc;
+ break;
+ default:
+ g_assert_not_reached();
+ }
} else {
dc->pc = dc->npc;
}
}
-static inline void gen_op_next_insn(void)
+static void gen_op_next_insn(void)
{
tcg_gen_mov_tl(cpu_pc, cpu_npc);
tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
cmp->c2 = tcg_constant_tl(0);
}
-static inline void gen_cond_reg(TCGv r_dst, int cond, TCGv r_src)
+static void gen_cond_reg(TCGv r_dst, int cond, TCGv r_src)
{
DisasCompare cmp;
gen_compare_reg(&cmp, cond, r_src);
}
}
-static inline void gen_op_fcmps(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
+static void gen_op_fcmps(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
{
switch (fccno) {
case 0:
}
}
-static inline void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
+static void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
{
switch (fccno) {
case 0:
}
}
-static inline void gen_op_fcmpq(int fccno)
+static void gen_op_fcmpq(int fccno)
{
switch (fccno) {
case 0:
}
}
-static inline void gen_op_fcmpes(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
+static void gen_op_fcmpes(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
{
switch (fccno) {
case 0:
}
}
-static inline void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
+static void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
{
switch (fccno) {
case 0:
}
}
-static inline void gen_op_fcmpeq(int fccno)
+static void gen_op_fcmpeq(int fccno)
{
switch (fccno) {
case 0:
#else
-static inline void gen_op_fcmps(int fccno, TCGv r_rs1, TCGv r_rs2)
+static void gen_op_fcmps(int fccno, TCGv r_rs1, TCGv r_rs2)
{
gen_helper_fcmps(cpu_fsr, cpu_env, r_rs1, r_rs2);
}
-static inline void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
+static void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
{
gen_helper_fcmpd(cpu_fsr, cpu_env, r_rs1, r_rs2);
}
-static inline void gen_op_fcmpq(int fccno)
+static void gen_op_fcmpq(int fccno)
{
gen_helper_fcmpq(cpu_fsr, cpu_env);
}
-static inline void gen_op_fcmpes(int fccno, TCGv r_rs1, TCGv r_rs2)
+static void gen_op_fcmpes(int fccno, TCGv r_rs1, TCGv r_rs2)
{
gen_helper_fcmpes(cpu_fsr, cpu_env, r_rs1, r_rs2);
}
-static inline void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
+static void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
{
gen_helper_fcmped(cpu_fsr, cpu_env, r_rs1, r_rs2);
}
-static inline void gen_op_fcmpeq(int fccno)
+static void gen_op_fcmpeq(int fccno)
{
gen_helper_fcmpeq(cpu_fsr, cpu_env);
}
return 0;
}
-static inline void gen_op_clear_ieee_excp_and_FTT(void)
+static void gen_op_clear_ieee_excp_and_FTT(void)
{
tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_CEXC_NMASK);
}
-static inline void gen_fop_FF(DisasContext *dc, int rd, int rs,
+static void gen_fop_FF(DisasContext *dc, int rd, int rs,
void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i32))
{
TCGv_i32 dst, src;
gen_store_fpr_F(dc, rd, dst);
}
-static inline void gen_ne_fop_FF(DisasContext *dc, int rd, int rs,
- void (*gen)(TCGv_i32, TCGv_i32))
+static void gen_ne_fop_FF(DisasContext *dc, int rd, int rs,
+ void (*gen)(TCGv_i32, TCGv_i32))
{
TCGv_i32 dst, src;
gen_store_fpr_F(dc, rd, dst);
}
-static inline void gen_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2,
+static void gen_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2,
void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32))
{
TCGv_i32 dst, src1, src2;
}
#ifdef TARGET_SPARC64
-static inline void gen_ne_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2,
- void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32))
+static void gen_ne_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2,
+ void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32))
{
TCGv_i32 dst, src1, src2;
}
#endif
-static inline void gen_fop_DD(DisasContext *dc, int rd, int rs,
- void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i64))
+static void gen_fop_DD(DisasContext *dc, int rd, int rs,
+ void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i64))
{
TCGv_i64 dst, src;
}
#ifdef TARGET_SPARC64
-static inline void gen_ne_fop_DD(DisasContext *dc, int rd, int rs,
- void (*gen)(TCGv_i64, TCGv_i64))
+static void gen_ne_fop_DD(DisasContext *dc, int rd, int rs,
+ void (*gen)(TCGv_i64, TCGv_i64))
{
TCGv_i64 dst, src;
}
#endif
-static inline void gen_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
+static void gen_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i64))
{
TCGv_i64 dst, src1, src2;
}
#ifdef TARGET_SPARC64
-static inline void gen_ne_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
- void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64))
+static void gen_ne_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
+ void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64))
{
TCGv_i64 dst, src1, src2;
gen_store_fpr_D(dc, rd, dst);
}
-static inline void gen_gsr_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
- void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
+static void gen_gsr_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
+ void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
{
TCGv_i64 dst, src1, src2;
gen_store_fpr_D(dc, rd, dst);
}
-static inline void gen_ne_fop_DDDD(DisasContext *dc, int rd, int rs1, int rs2,
- void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
+static void gen_ne_fop_DDDD(DisasContext *dc, int rd, int rs1, int rs2,
+ void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
{
TCGv_i64 dst, src0, src1, src2;
}
#endif
-static inline void gen_fop_QQ(DisasContext *dc, int rd, int rs,
- void (*gen)(TCGv_ptr))
+static void gen_fop_QQ(DisasContext *dc, int rd, int rs,
+ void (*gen)(TCGv_ptr))
{
gen_op_load_fpr_QT1(QFPREG(rs));
}
#ifdef TARGET_SPARC64
-static inline void gen_ne_fop_QQ(DisasContext *dc, int rd, int rs,
- void (*gen)(TCGv_ptr))
+static void gen_ne_fop_QQ(DisasContext *dc, int rd, int rs,
+ void (*gen)(TCGv_ptr))
{
gen_op_load_fpr_QT1(QFPREG(rs));
}
#endif
-static inline void gen_fop_QQQ(DisasContext *dc, int rd, int rs1, int rs2,
- void (*gen)(TCGv_ptr))
+static void gen_fop_QQQ(DisasContext *dc, int rd, int rs1, int rs2,
+ void (*gen)(TCGv_ptr))
{
gen_op_load_fpr_QT0(QFPREG(rs1));
gen_op_load_fpr_QT1(QFPREG(rs2));
gen_update_fprs_dirty(dc, QFPREG(rd));
}
-static inline void gen_fop_DFF(DisasContext *dc, int rd, int rs1, int rs2,
+static void gen_fop_DFF(DisasContext *dc, int rd, int rs1, int rs2,
void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32, TCGv_i32))
{
TCGv_i64 dst;
gen_store_fpr_D(dc, rd, dst);
}
-static inline void gen_fop_QDD(DisasContext *dc, int rd, int rs1, int rs2,
- void (*gen)(TCGv_ptr, TCGv_i64, TCGv_i64))
+static void gen_fop_QDD(DisasContext *dc, int rd, int rs1, int rs2,
+ void (*gen)(TCGv_ptr, TCGv_i64, TCGv_i64))
{
TCGv_i64 src1, src2;
}
#ifdef TARGET_SPARC64
-static inline void gen_fop_DF(DisasContext *dc, int rd, int rs,
- void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32))
+static void gen_fop_DF(DisasContext *dc, int rd, int rs,
+ void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32))
{
TCGv_i64 dst;
TCGv_i32 src;
}
#endif
-static inline void gen_ne_fop_DF(DisasContext *dc, int rd, int rs,
- void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32))
+static void gen_ne_fop_DF(DisasContext *dc, int rd, int rs,
+ void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32))
{
TCGv_i64 dst;
TCGv_i32 src;
gen_store_fpr_D(dc, rd, dst);
}
-static inline void gen_fop_FD(DisasContext *dc, int rd, int rs,
- void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i64))
+static void gen_fop_FD(DisasContext *dc, int rd, int rs,
+ void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i64))
{
TCGv_i32 dst;
TCGv_i64 src;
gen_store_fpr_F(dc, rd, dst);
}
-static inline void gen_fop_FQ(DisasContext *dc, int rd, int rs,
- void (*gen)(TCGv_i32, TCGv_ptr))
+static void gen_fop_FQ(DisasContext *dc, int rd, int rs,
+ void (*gen)(TCGv_i32, TCGv_ptr))
{
TCGv_i32 dst;
gen_store_fpr_F(dc, rd, dst);
}
-static inline void gen_fop_DQ(DisasContext *dc, int rd, int rs,
- void (*gen)(TCGv_i64, TCGv_ptr))
+static void gen_fop_DQ(DisasContext *dc, int rd, int rs,
+ void (*gen)(TCGv_i64, TCGv_ptr))
{
TCGv_i64 dst;
gen_store_fpr_D(dc, rd, dst);
}
-static inline void gen_ne_fop_QF(DisasContext *dc, int rd, int rs,
- void (*gen)(TCGv_ptr, TCGv_i32))
+static void gen_ne_fop_QF(DisasContext *dc, int rd, int rs,
+ void (*gen)(TCGv_ptr, TCGv_i32))
{
TCGv_i32 src;
gen_update_fprs_dirty(dc, QFPREG(rd));
}
-static inline void gen_ne_fop_QD(DisasContext *dc, int rd, int rs,
- void (*gen)(TCGv_ptr, TCGv_i64))
+static void gen_ne_fop_QD(DisasContext *dc, int rd, int rs,
+ void (*gen)(TCGv_ptr, TCGv_i64))
{
TCGv_i64 src;
}
#ifndef CONFIG_USER_ONLY
-static inline void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr, TCGv_env cpu_env)
+static void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr, TCGv_env cpu_env)
{
TCGv_i32 r_tl = tcg_temp_new_i32();
tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, 0xff);
tcg_gen_st32_tl(cpu_tmp0, cpu_env,
offsetof(CPUSPARCState, asi));
- /* End TB to notice changed ASI. */
+ /*
+ * End TB to notice changed ASI.
+ * TODO: Could notice src1 = %g0 and IS_IMM,
+ * update DisasContext and not exit the TB.
+ */
save_state(dc);
gen_op_next_insn();
- tcg_gen_exit_tb(NULL, 0);
+ tcg_gen_lookup_and_goto_ptr();
dc->base.is_jmp = DISAS_NORETURN;
break;
case 0x6: /* V9 wrfprs */
gen_mov_pc_npc(dc);
gen_check_align(cpu_tmp0, 3);
tcg_gen_mov_tl(cpu_npc, cpu_tmp0);
- dc->npc = DYNAMIC_PC;
+ dc->npc = DYNAMIC_PC_LOOKUP;
goto jmp_insn;
#endif
} else {
gen_check_align(cpu_tmp0, 3);
gen_address_mask(dc, cpu_tmp0);
tcg_gen_mov_tl(cpu_npc, cpu_tmp0);
- dc->npc = DYNAMIC_PC;
+ dc->npc = DYNAMIC_PC_LOOKUP;
}
goto jmp_insn;
#if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
break;
}
/* default case for non jump instructions */
- if (dc->npc == DYNAMIC_PC) {
- dc->pc = DYNAMIC_PC;
- gen_op_next_insn();
- } else if (dc->npc == JUMP_PC) {
- /* we can do a static jump */
- gen_branch2(dc, dc->jump_pc[0], dc->jump_pc[1], cpu_cond);
- dc->base.is_jmp = DISAS_NORETURN;
+ if (dc->npc & 3) {
+ switch (dc->npc) {
+ case DYNAMIC_PC:
+ case DYNAMIC_PC_LOOKUP:
+ dc->pc = dc->npc;
+ gen_op_next_insn();
+ break;
+ case JUMP_PC:
+ /* we can do a static jump */
+ gen_branch2(dc, dc->jump_pc[0], dc->jump_pc[1], cpu_cond);
+ dc->base.is_jmp = DISAS_NORETURN;
+ break;
+ default:
+ g_assert_not_reached();
+ }
} else {
dc->pc = dc->npc;
dc->npc = dc->npc + 4;
static void sparc_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
{
DisasContext *dc = container_of(dcbase, DisasContext, base);
+ target_ulong npc = dc->npc;
- if (dc->npc & JUMP_PC) {
- assert(dc->jump_pc[1] == dc->pc + 4);
- tcg_gen_insn_start(dc->pc, dc->jump_pc[0] | JUMP_PC);
- } else {
- tcg_gen_insn_start(dc->pc, dc->npc);
+ if (npc & 3) {
+ switch (npc) {
+ case JUMP_PC:
+ assert(dc->jump_pc[1] == dc->pc + 4);
+ npc = dc->jump_pc[0] | JUMP_PC;
+ break;
+ case DYNAMIC_PC:
+ case DYNAMIC_PC_LOOKUP:
+ npc = DYNAMIC_PC;
+ break;
+ default:
+ g_assert_not_reached();
+ }
}
+ tcg_gen_insn_start(dc->pc, npc);
}
static void sparc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
static void sparc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
{
DisasContext *dc = container_of(dcbase, DisasContext, base);
+ bool may_lookup;
switch (dc->base.is_jmp) {
case DISAS_NEXT:
case DISAS_TOO_MANY:
- if (dc->pc != DYNAMIC_PC &&
- (dc->npc != DYNAMIC_PC && dc->npc != JUMP_PC)) {
+ if (((dc->pc | dc->npc) & 3) == 0) {
/* static PC and NPC: we can use direct chaining */
gen_goto_tb(dc, 0, dc->pc, dc->npc);
- } else {
- if (dc->pc != DYNAMIC_PC) {
- tcg_gen_movi_tl(cpu_pc, dc->pc);
+ break;
+ }
+
+ if (dc->pc & 3) {
+ switch (dc->pc) {
+ case DYNAMIC_PC_LOOKUP:
+ may_lookup = true;
+ break;
+ case DYNAMIC_PC:
+ may_lookup = false;
+ break;
+ default:
+ g_assert_not_reached();
}
- save_npc(dc);
+ } else {
+ tcg_gen_movi_tl(cpu_pc, dc->pc);
+ may_lookup = true;
+ }
+
+ save_npc(dc);
+ if (may_lookup) {
+ tcg_gen_lookup_and_goto_ptr();
+ } else {
tcg_gen_exit_tb(NULL, 0);
}
break;
void cpu_state_reset(CPUTriCoreState *s);
void tricore_tcg_init(void);
-static inline void cpu_get_tb_cpu_state(CPUTriCoreState *env, target_ulong *pc,
- target_ulong *cs_base, uint32_t *flags)
+static inline void cpu_get_tb_cpu_state(CPUTriCoreState *env, vaddr *pc,
+ uint64_t *cs_base, uint32_t *flags)
{
uint32_t new_flags = 0;
*pc = env->PC;
#include "exec/cpu-all.h"
-static inline void cpu_get_tb_cpu_state(CPUXtensaState *env, target_ulong *pc,
- target_ulong *cs_base, uint32_t *flags)
+static inline void cpu_get_tb_cpu_state(CPUXtensaState *env, vaddr *pc,
+ uint64_t *cs_base, uint32_t *flags)
{
*pc = env->pc;
*cs_base = 0;
TCGv_i64 tmp = tcg_temp_ebb_new_i64();
tcg_gen_dup_i64(vece, tmp, c);
- tcg_gen_gvec_2s(dofs, aofs, oprsz, maxsz, c, &g);
+ tcg_gen_gvec_2s(dofs, aofs, oprsz, maxsz, tmp, &g);
tcg_temp_free_i64(tmp);
}
* The requested alignment cannot overlap the TLB flags.
* FIXME: Must keep the count up-to-date with "exec/cpu-all.h".
*/
- tcg_debug_assert(a_bits + 6 <= tcg_ctx->page_bits);
+ tcg_debug_assert(a_bits + 5 <= tcg_ctx->page_bits);
#endif
}
void tcg_gen_mb(TCGBar mb_type)
{
- if (tcg_ctx->gen_tb->cflags & CF_PARALLEL) {
+#ifdef CONFIG_USER_ONLY
+ bool parallel = tcg_ctx->gen_tb->cflags & CF_PARALLEL;
+#else
+ /*
+ * It is tempting to elide the barrier in a uniprocessor context.
+ * However, even with a single cpu we have i/o threads running in
+ * parallel, and lack of memory order can result in e.g. virtio
+ * queue entries being read incorrectly.
+ */
+ bool parallel = true;
+#endif
+
+ if (parallel) {
tcg_gen_op1(INDEX_op_mb, mb_type);
}
}
QTAILQ_REMOVE(&s->ops, op, link);
QTAILQ_INSERT_TAIL(&s->free_ops, op, link);
s->nb_ops--;
-
-#ifdef CONFIG_PROFILER
- qatomic_set(&s->prof.del_op_count, s->prof.del_op_count + 1);
-#endif
}
void tcg_remove_ops_after(TCGOp *op)
tcg_out_helper_load_common_args(s, ldst, parm, info, next_arg);
}
-#ifdef CONFIG_PROFILER
-
-/* avoid copy/paste errors */
-#define PROF_ADD(to, from, field) \
- do { \
- (to)->field += qatomic_read(&((from)->field)); \
- } while (0)
-
-#define PROF_MAX(to, from, field) \
- do { \
- typeof((from)->field) val__ = qatomic_read(&((from)->field)); \
- if (val__ > (to)->field) { \
- (to)->field = val__; \
- } \
- } while (0)
-
-/* Pass in a zero'ed @prof */
-static inline
-void tcg_profile_snapshot(TCGProfile *prof, bool counters, bool table)
-{
- unsigned int n_ctxs = qatomic_read(&tcg_cur_ctxs);
- unsigned int i;
-
- for (i = 0; i < n_ctxs; i++) {
- TCGContext *s = qatomic_read(&tcg_ctxs[i]);
- const TCGProfile *orig = &s->prof;
-
- if (counters) {
- PROF_ADD(prof, orig, cpu_exec_time);
- PROF_ADD(prof, orig, tb_count1);
- PROF_ADD(prof, orig, tb_count);
- PROF_ADD(prof, orig, op_count);
- PROF_MAX(prof, orig, op_count_max);
- PROF_ADD(prof, orig, temp_count);
- PROF_MAX(prof, orig, temp_count_max);
- PROF_ADD(prof, orig, del_op_count);
- PROF_ADD(prof, orig, code_in_len);
- PROF_ADD(prof, orig, code_out_len);
- PROF_ADD(prof, orig, search_out_len);
- PROF_ADD(prof, orig, interm_time);
- PROF_ADD(prof, orig, code_time);
- PROF_ADD(prof, orig, la_time);
- PROF_ADD(prof, orig, opt_time);
- PROF_ADD(prof, orig, restore_count);
- PROF_ADD(prof, orig, restore_time);
- }
- if (table) {
- int i;
-
- for (i = 0; i < NB_OPS; i++) {
- PROF_ADD(prof, orig, table_op_count[i]);
- }
- }
- }
-}
-
-#undef PROF_ADD
-#undef PROF_MAX
-
-static void tcg_profile_snapshot_counters(TCGProfile *prof)
-{
- tcg_profile_snapshot(prof, true, false);
-}
-
-static void tcg_profile_snapshot_table(TCGProfile *prof)
-{
- tcg_profile_snapshot(prof, false, true);
-}
-
-void tcg_dump_op_count(GString *buf)
-{
- TCGProfile prof = {};
- int i;
-
- tcg_profile_snapshot_table(&prof);
- for (i = 0; i < NB_OPS; i++) {
- g_string_append_printf(buf, "%s %" PRId64 "\n", tcg_op_defs[i].name,
- prof.table_op_count[i]);
- }
-}
-
-int64_t tcg_cpu_exec_time(void)
-{
- unsigned int n_ctxs = qatomic_read(&tcg_cur_ctxs);
- unsigned int i;
- int64_t ret = 0;
-
- for (i = 0; i < n_ctxs; i++) {
- const TCGContext *s = qatomic_read(&tcg_ctxs[i]);
- const TCGProfile *prof = &s->prof;
-
- ret += qatomic_read(&prof->cpu_exec_time);
- }
- return ret;
-}
-#else
void tcg_dump_op_count(GString *buf)
{
g_string_append_printf(buf, "[TCG profiler not compiled]\n");
}
-int64_t tcg_cpu_exec_time(void)
-{
- error_report("%s: TCG profiler not compiled", __func__);
- exit(EXIT_FAILURE);
-}
-#endif
-
-
int tcg_gen_code(TCGContext *s, TranslationBlock *tb, uint64_t pc_start)
{
-#ifdef CONFIG_PROFILER
- TCGProfile *prof = &s->prof;
-#endif
int i, start_words, num_insns;
TCGOp *op;
-#ifdef CONFIG_PROFILER
- {
- int n = 0;
-
- QTAILQ_FOREACH(op, &s->ops, link) {
- n++;
- }
- qatomic_set(&prof->op_count, prof->op_count + n);
- if (n > prof->op_count_max) {
- qatomic_set(&prof->op_count_max, n);
- }
-
- n = s->nb_temps;
- qatomic_set(&prof->temp_count, prof->temp_count + n);
- if (n > prof->temp_count_max) {
- qatomic_set(&prof->temp_count_max, n);
- }
- }
-#endif
-
if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP)
&& qemu_log_in_addr_range(pc_start))) {
FILE *logfile = qemu_log_trylock();
}
#endif
-#ifdef CONFIG_PROFILER
- qatomic_set(&prof->opt_time, prof->opt_time - profile_getclock());
-#endif
-
tcg_optimize(s);
-#ifdef CONFIG_PROFILER
- qatomic_set(&prof->opt_time, prof->opt_time + profile_getclock());
- qatomic_set(&prof->la_time, prof->la_time - profile_getclock());
-#endif
-
reachable_code_pass(s);
liveness_pass_0(s);
liveness_pass_1(s);
}
}
-#ifdef CONFIG_PROFILER
- qatomic_set(&prof->la_time, prof->la_time + profile_getclock());
-#endif
-
if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_OPT)
&& qemu_log_in_addr_range(pc_start))) {
FILE *logfile = qemu_log_trylock();
QTAILQ_FOREACH(op, &s->ops, link) {
TCGOpcode opc = op->opc;
-#ifdef CONFIG_PROFILER
- qatomic_set(&prof->table_op_count[opc], prof->table_op_count[opc] + 1);
-#endif
-
switch (opc) {
case INDEX_op_mov_i32:
case INDEX_op_mov_i64:
return tcg_current_code_size(s);
}
-#ifdef CONFIG_PROFILER
-void tcg_dump_info(GString *buf)
-{
- TCGProfile prof = {};
- const TCGProfile *s;
- int64_t tb_count;
- int64_t tb_div_count;
- int64_t tot;
-
- tcg_profile_snapshot_counters(&prof);
- s = &prof;
- tb_count = s->tb_count;
- tb_div_count = tb_count ? tb_count : 1;
- tot = s->interm_time + s->code_time;
-
- g_string_append_printf(buf, "JIT cycles %" PRId64
- " (%0.3f s at 2.4 GHz)\n",
- tot, tot / 2.4e9);
- g_string_append_printf(buf, "translated TBs %" PRId64
- " (aborted=%" PRId64 " %0.1f%%)\n",
- tb_count, s->tb_count1 - tb_count,
- (double)(s->tb_count1 - s->tb_count)
- / (s->tb_count1 ? s->tb_count1 : 1) * 100.0);
- g_string_append_printf(buf, "avg ops/TB %0.1f max=%d\n",
- (double)s->op_count / tb_div_count, s->op_count_max);
- g_string_append_printf(buf, "deleted ops/TB %0.2f\n",
- (double)s->del_op_count / tb_div_count);
- g_string_append_printf(buf, "avg temps/TB %0.2f max=%d\n",
- (double)s->temp_count / tb_div_count,
- s->temp_count_max);
- g_string_append_printf(buf, "avg host code/TB %0.1f\n",
- (double)s->code_out_len / tb_div_count);
- g_string_append_printf(buf, "avg search data/TB %0.1f\n",
- (double)s->search_out_len / tb_div_count);
-
- g_string_append_printf(buf, "cycles/op %0.1f\n",
- s->op_count ? (double)tot / s->op_count : 0);
- g_string_append_printf(buf, "cycles/in byte %0.1f\n",
- s->code_in_len ? (double)tot / s->code_in_len : 0);
- g_string_append_printf(buf, "cycles/out byte %0.1f\n",
- s->code_out_len ? (double)tot / s->code_out_len : 0);
- g_string_append_printf(buf, "cycles/search byte %0.1f\n",
- s->search_out_len ?
- (double)tot / s->search_out_len : 0);
- if (tot == 0) {
- tot = 1;
- }
- g_string_append_printf(buf, " gen_interm time %0.1f%%\n",
- (double)s->interm_time / tot * 100.0);
- g_string_append_printf(buf, " gen_code time %0.1f%%\n",
- (double)s->code_time / tot * 100.0);
- g_string_append_printf(buf, "optim./code time %0.1f%%\n",
- (double)s->opt_time / (s->code_time ?
- s->code_time : 1)
- * 100.0);
- g_string_append_printf(buf, "liveness/code time %0.1f%%\n",
- (double)s->la_time / (s->code_time ?
- s->code_time : 1) * 100.0);
- g_string_append_printf(buf, "cpu_restore count %" PRId64 "\n",
- s->restore_count);
- g_string_append_printf(buf, " avg cycles %0.1f\n",
- s->restore_count ?
- (double)s->restore_time / s->restore_count : 0);
-}
-#else
void tcg_dump_info(GString *buf)
{
g_string_append_printf(buf, "[TCG profiler not compiled]\n");
}
-#endif
#ifdef ELF_HOST_MACHINE
/* In order to use this feature, the backend needs to do three things:
#define MAX_CPUS 8 /* lets not go nuts */
typedef struct {
- uint64_t last_pc;
uint64_t insn_count;
} InstructionCount;
{
unsigned int i = cpu_index % MAX_CPUS;
InstructionCount *c = &counts[i];
- uint64_t this_pc = GPOINTER_TO_UINT(udata);
- if (this_pc == c->last_pc) {
- g_autofree gchar *out = g_strdup_printf("detected repeat execution @ 0x%"
- PRIx64 "\n", this_pc);
- qemu_plugin_outs(out);
- }
- c->last_pc = this_pc;
+
c->insn_count++;
}
#include "qemu/osdep.h"
+#include "qemu/sockets.h"
#include "qemu/dbus.h"
#include "qemu/sockets.h"
#include <gio/gio.h>
g_autoptr(GSocketConnection) socketc = NULL;
GDBusConnection *conn;
+#ifdef WIN32
+ socket = g_socket_new_from_fd(_get_osfhandle(fd), &err);
+#else
socket = g_socket_new_from_fd(fd, &err);
+#endif
g_assert_no_error(err);
socketc = g_socket_connection_factory_create_connection(socket);
qemu_dbus_display1_console_call_register_listener_finish(
QEMU_DBUS_DISPLAY1_CONSOLE(source_object),
- NULL, res, &err);
+#ifndef WIN32
+ NULL,
+#endif
+ res, &err);
g_assert_no_error(err);
test->listener_conn = g_thread_join(test->thread);
g_autoptr(GError) err = NULL;
g_autoptr(GDBusConnection) conn = NULL;
g_autoptr(QemuDBusDisplay1ConsoleProxy) console = NULL;
- g_autoptr(GUnixFDList) fd_list = NULL;
g_autoptr(GMainLoop) loop = NULL;
QTestState *qts = NULL;
- int pair[2], idx;
+ int pair[2];
TestDBusConsoleRegister test;
+#ifdef WIN32
+ WSAPROTOCOL_INFOW info;
+ g_autoptr(GVariant) listener = NULL;
+#else
+ g_autoptr(GUnixFDList) fd_list = NULL;
+ int idx;
+#endif
test_setup(&qts, &conn);
g_assert_cmpint(qemu_socketpair(AF_UNIX, SOCK_STREAM, 0, pair), ==, 0);
+#ifndef WIN32
fd_list = g_unix_fd_list_new();
idx = g_unix_fd_list_append(fd_list, pair[1], NULL);
+#endif
console = QEMU_DBUS_DISPLAY1_CONSOLE_PROXY(
qemu_dbus_display1_console_proxy_new_sync(
test.thread = g_thread_new(NULL, test_dbus_p2p_server_setup_thread,
GINT_TO_POINTER(pair[0]));
+#ifdef WIN32
+ if (WSADuplicateSocketW(_get_osfhandle(pair[1]),
+ GetProcessId((HANDLE) qtest_pid(qts)),
+ &info) == SOCKET_ERROR)
+ {
+ g_autofree char *emsg = g_win32_error_message(WSAGetLastError());
+ g_error("WSADuplicateSocket failed: %s", emsg);
+ }
+ close(pair[1]);
+ listener = g_variant_new_fixed_array(G_VARIANT_TYPE_BYTE,
+ &info,
+ sizeof(info),
+ 1);
+#endif
+
qemu_dbus_display1_console_call_register_listener(
QEMU_DBUS_DISPLAY1_CONSOLE(console),
+#ifdef WIN32
+ listener,
+#else
g_variant_new_handle(idx),
+#endif
G_DBUS_CALL_FLAGS_NONE,
-1,
+#ifndef WIN32
fd_list,
+#endif
NULL,
test_dbus_console_registered,
&test);
return ret;
}
+pid_t qtest_pid(QTestState *s)
+{
+ return s->qemu_pid;
+}
+
bool qtest_probe_child(QTestState *s)
{
pid_t pid = s->qemu_pid;
* Returns: Value retrieved from property.
*/
bool qtest_qom_get_bool(QTestState *s, const char *path, const char *property);
+
+/**
+ * qtest_pid:
+ * @s: QTestState instance to operate on.
+ *
+ * Returns: the PID of the QEMU process, or <= 0
+ */
+pid_t qtest_pid(QTestState *s);
+
#endif
'numa-test'
]
-if dbus_display and targetos != 'windows'
+if dbus_display
qtests_i386 += ['dbus-display-test']
endif
{ "query-balloon", ERROR_CLASS_DEVICE_NOT_ACTIVE },
{ "query-hotpluggable-cpus", ERROR_CLASS_GENERIC_ERROR },
{ "query-vm-generation-id", ERROR_CLASS_GENERIC_ERROR },
-#ifndef CONFIG_PROFILER
- { "x-query-profile", ERROR_CLASS_GENERIC_ERROR },
-#endif
/* Only valid with a USB bus added */
{ "x-query-usb", ERROR_CLASS_GENERIC_ERROR },
/* Only valid with accel=tcg */
memory: CFLAGS+=-DCHECK_UNALIGNED=1
-# non-inline runs will trigger the duplicate instruction heuristics in libinsn.so
-run-plugin-%-with-libinsn.so:
- $(call run-test, $@, \
- $(QEMU) -monitor none -display none \
- -chardev file$(COMMA)path=$@.out$(COMMA)id=output \
- -plugin ../../plugin/libinsn.so$(COMMA)inline=on \
- -d plugin -D $*-with-libinsn.so.pout \
- $(QEMU_OPTS) $*)
-
# Running
QEMU_OPTS+=-device isa-debugcon,chardev=output -device isa-debug-exit,iobase=0xf4,iosize=0x4 -kernel
SKIP_I386_TESTS+=test-i386-fprem
endif
-# non-inline runs will trigger the duplicate instruction heuristics in libinsn.so
-run-plugin-%-with-libinsn.so:
- $(call run-test, $@, $(QEMU) $(QEMU_OPTS) \
- -plugin ../../plugin/libinsn.so$(COMMA)inline=on \
- -d plugin -D $*-with-libinsn.so.pout $*)
-
# Update TESTS
I386_TESTS:=$(filter-out $(SKIP_I386_TESTS), $(ALL_X86_TESTS))
TESTS=$(MULTIARCH_TESTS) $(I386_TESTS)
memory: CFLAGS+=-DCHECK_UNALIGNED=1
-# non-inline runs will trigger the duplicate instruction heuristics in libinsn.so
-run-plugin-%-with-libinsn.so:
- $(call run-test, $@, \
- $(QEMU) -monitor none -display none \
- -chardev file$(COMMA)path=$@.out$(COMMA)id=output \
- -plugin ../../plugin/libinsn.so$(COMMA)inline=on \
- -d plugin -D $*-with-libinsn.so.pout \
- $(QEMU_OPTS) $*)
-
# Running
QEMU_OPTS+=-device isa-debugcon,chardev=output -device isa-debug-exit,iobase=0xf4,iosize=0x4 -kernel
con->scanout.texture.x,
con->scanout.texture.y,
con->scanout.texture.width,
- con->scanout.texture.height);
+ con->scanout.texture.height,
+ con->scanout.texture.d3d_tex2d);
}
}
return s;
}
+#ifdef WIN32
+void qemu_displaysurface_win32_set_handle(DisplaySurface *surface,
+ HANDLE h, uint32_t offset)
+{
+ assert(!surface->handle);
+
+ surface->handle = h;
+ surface->handle_offset = offset;
+}
+
+static void
+win32_pixman_image_destroy(pixman_image_t *image, void *data)
+{
+ DisplaySurface *surface = data;
+
+ if (!surface->handle) {
+ return;
+ }
+
+ assert(surface->handle_offset == 0);
+
+ qemu_win32_map_free(
+ pixman_image_get_data(surface->image),
+ surface->handle,
+ &error_warn
+ );
+}
+#endif
+
DisplaySurface *qemu_create_displaysurface(int width, int height)
{
- DisplaySurface *surface = g_new0(DisplaySurface, 1);
+ DisplaySurface *surface;
+ void *bits = NULL;
+#ifdef WIN32
+ HANDLE handle = NULL;
+#endif
- trace_displaysurface_create(surface, width, height);
- surface->format = PIXMAN_x8r8g8b8;
- surface->image = pixman_image_create_bits(surface->format,
- width, height,
- NULL, width * 4);
- assert(surface->image != NULL);
+ trace_displaysurface_create(width, height);
+
+#ifdef WIN32
+ bits = qemu_win32_map_alloc(width * height * 4, &handle, &error_abort);
+#endif
+
+ surface = qemu_create_displaysurface_from(
+ width, height,
+ PIXMAN_x8r8g8b8,
+ width * 4, bits
+ );
surface->flags = QEMU_ALLOCATED_FLAG;
+#ifdef WIN32
+ qemu_displaysurface_win32_set_handle(surface, handle, 0);
+#endif
return surface;
}
width, height,
(void *)data, linesize);
assert(surface->image != NULL);
+#ifdef WIN32
+ pixman_image_set_destroy_function(surface->image,
+ win32_pixman_image_destroy, surface);
+#endif
return surface;
}
return true;
}
+void console_handle_touch_event(QemuConsole *con,
+ struct touch_slot touch_slots[INPUT_EVENT_SLOTS_MAX],
+ uint64_t num_slot,
+ int width, int height,
+ double x, double y,
+ InputMultiTouchType type,
+ Error **errp)
+{
+ struct touch_slot *slot;
+ bool needs_sync = false;
+ int update;
+ int i;
+
+ if (num_slot >= INPUT_EVENT_SLOTS_MAX) {
+ error_setg(errp,
+ "Unexpected touch slot number: % " PRId64" >= %d",
+ num_slot, INPUT_EVENT_SLOTS_MAX);
+ return;
+ }
+
+ slot = &touch_slots[num_slot];
+ slot->x = x;
+ slot->y = y;
+
+ if (type == INPUT_MULTI_TOUCH_TYPE_BEGIN) {
+ slot->tracking_id = num_slot;
+ }
+
+ for (i = 0; i < INPUT_EVENT_SLOTS_MAX; ++i) {
+ if (i == num_slot) {
+ update = type;
+ } else {
+ update = INPUT_MULTI_TOUCH_TYPE_UPDATE;
+ }
+
+ slot = &touch_slots[i];
+
+ if (slot->tracking_id == -1) {
+ continue;
+ }
+
+ if (update == INPUT_MULTI_TOUCH_TYPE_END) {
+ slot->tracking_id = -1;
+ qemu_input_queue_mtt(con, update, i, slot->tracking_id);
+ needs_sync = true;
+ } else {
+ qemu_input_queue_mtt(con, update, i, slot->tracking_id);
+ qemu_input_queue_btn(con, INPUT_BUTTON_TOUCH, true);
+ qemu_input_queue_mtt_abs(con,
+ INPUT_AXIS_X, (int) slot->x,
+ 0, width,
+ i, slot->tracking_id);
+ qemu_input_queue_mtt_abs(con,
+ INPUT_AXIS_Y, (int) slot->y,
+ 0, height,
+ i, slot->tracking_id);
+ needs_sync = true;
+ }
+ }
+
+ if (needs_sync) {
+ qemu_input_event_sync();
+ }
+}
+
void qemu_console_set_display_gl_ctx(QemuConsole *con, DisplayGLCtx *gl)
{
/* display has opengl support */
uint32_t backing_width,
uint32_t backing_height,
uint32_t x, uint32_t y,
- uint32_t width, uint32_t height)
+ uint32_t width, uint32_t height,
+ void *d3d_tex2d)
{
DisplayState *s = con->ds;
DisplayChangeListener *dcl;
con->scanout.kind = SCANOUT_TEXTURE;
con->scanout.texture = (ScanoutTexture) {
backing_id, backing_y_0_top, backing_width, backing_height,
- x, y, width, height
+ x, y, width, height, d3d_tex2d,
};
QLIST_FOREACH(dcl, &s->listeners, next) {
if (con != (dcl->con ? dcl->con : active_console)) {
dcl->ops->dpy_gl_scanout_texture(dcl, backing_id,
backing_y_0_top,
backing_width, backing_height,
- x, y, width, height);
+ x, y, width, height,
+ d3d_tex2d);
}
}
}
if (con == NULL) {
con = active_console;
}
- return con->cursor;
+ return con ? con->cursor : NULL;
}
bool qemu_console_is_visible(QemuConsole *con)
#include "qemu/config-file.h"
#include "qemu/option.h"
+#ifdef G_OS_UNIX
#include <gio/gunixfdlist.h>
+#endif
#include "dbus.h"
dbus_chr_register(
DBusChardev *dc,
GDBusMethodInvocation *invocation,
+#ifdef G_OS_UNIX
GUnixFDList *fd_list,
+#endif
GVariant *arg_stream,
QemuDBusDisplay1Chardev *object)
{
g_autoptr(GError) err = NULL;
int fd;
+#ifdef G_OS_WIN32
+ if (!dbus_win32_import_socket(invocation, arg_stream, &fd)) {
+ return DBUS_METHOD_INVOCATION_HANDLED;
+ }
+#else
fd = g_unix_fd_list_get(fd_list, g_variant_get_handle(arg_stream), &err);
if (err) {
g_dbus_method_invocation_return_error(
"Couldn't get peer FD: %s", err->message);
return DBUS_METHOD_INVOCATION_HANDLED;
}
+#endif
if (qemu_chr_add_client(CHARDEV(dc), fd) < 0) {
g_dbus_method_invocation_return_error(invocation,
DBUS_DISPLAY_ERROR,
DBUS_DISPLAY_ERROR_FAILED,
"Couldn't register FD!");
+#ifdef G_OS_WIN32
+ closesocket(fd);
+#else
close(fd);
+#endif
return DBUS_METHOD_INVOCATION_HANDLED;
}
"owner", g_dbus_method_invocation_get_sender(invocation),
NULL);
- qemu_dbus_display1_chardev_complete_register(object, invocation, NULL);
+ qemu_dbus_display1_chardev_complete_register(object, invocation
+#ifndef G_OS_WIN32
+ , NULL
+#endif
+ );
return DBUS_METHOD_INVOCATION_HANDLED;
}
#include "ui/kbd-state.h"
#include "trace.h"
+#ifdef G_OS_UNIX
#include <gio/gunixfdlist.h>
+#endif
#include "dbus.h"
+static struct touch_slot touch_slots[INPUT_EVENT_SLOTS_MAX];
+
struct _DBusDisplayConsole {
GDBusObjectSkeleton parent_instance;
DisplayChangeListener dcl;
QKbdState *kbd;
QemuDBusDisplay1Mouse *iface_mouse;
+ QemuDBusDisplay1MultiTouch *iface_touch;
gboolean last_set;
guint last_x;
guint last_y;
uint32_t backing_width,
uint32_t backing_height,
uint32_t x, uint32_t y,
- uint32_t w, uint32_t h)
+ uint32_t w, uint32_t h,
+ void *d3d_tex2d)
{
DBusDisplayConsole *ddc = container_of(dcl, DBusDisplayConsole, dcl);
return DBUS_METHOD_INVOCATION_HANDLED;
}
+#ifdef G_OS_WIN32
+bool
+dbus_win32_import_socket(GDBusMethodInvocation *invocation,
+ GVariant *arg_listener, int *socket)
+{
+ gsize n;
+ WSAPROTOCOL_INFOW *info = (void *)g_variant_get_fixed_array(arg_listener, &n, 1);
+
+ if (!info || n != sizeof(*info)) {
+ g_dbus_method_invocation_return_error(
+ invocation,
+ DBUS_DISPLAY_ERROR,
+ DBUS_DISPLAY_ERROR_FAILED,
+ "Failed to get socket infos");
+ return false;
+ }
+
+ *socket = WSASocketW(FROM_PROTOCOL_INFO,
+ FROM_PROTOCOL_INFO,
+ FROM_PROTOCOL_INFO,
+ info, 0, 0);
+ if (*socket == INVALID_SOCKET) {
+ g_autofree gchar *emsg = g_win32_error_message(WSAGetLastError());
+ g_dbus_method_invocation_return_error(
+ invocation,
+ DBUS_DISPLAY_ERROR,
+ DBUS_DISPLAY_ERROR_FAILED,
+ "Couldn't create socket: %s", emsg);
+ return false;
+ }
+
+ return true;
+}
+#endif
+
static gboolean
dbus_console_register_listener(DBusDisplayConsole *ddc,
GDBusMethodInvocation *invocation,
+#ifdef G_OS_UNIX
GUnixFDList *fd_list,
+#endif
GVariant *arg_listener)
{
const char *sender = g_dbus_method_invocation_get_sender(invocation);
return DBUS_METHOD_INVOCATION_HANDLED;
}
+#ifdef G_OS_WIN32
+ if (!dbus_win32_import_socket(invocation, arg_listener, &fd)) {
+ return DBUS_METHOD_INVOCATION_HANDLED;
+ }
+#else
fd = g_unix_fd_list_get(fd_list, g_variant_get_handle(arg_listener), &err);
if (err) {
g_dbus_method_invocation_return_error(
"Couldn't get peer fd: %s", err->message);
return DBUS_METHOD_INVOCATION_HANDLED;
}
+#endif
socket = g_socket_new_from_fd(fd, &err);
if (err) {
DBUS_DISPLAY_ERROR,
DBUS_DISPLAY_ERROR_FAILED,
"Couldn't make a socket: %s", err->message);
+#ifdef G_OS_WIN32
+ closesocket(fd);
+#else
close(fd);
+#endif
return DBUS_METHOD_INVOCATION_HANDLED;
}
socket_conn = g_socket_connection_factory_create_connection(socket);
qemu_dbus_display1_console_complete_register_listener(
- ddc->iface, invocation, NULL);
+ ddc->iface, invocation
+#ifdef G_OS_UNIX
+ , NULL
+#endif
+ );
listener_conn = g_dbus_connection_new_sync(
G_IO_STREAM(socket_conn),
return DBUS_METHOD_INVOCATION_HANDLED;
}
+static gboolean
+dbus_touch_send_event(DBusDisplayConsole *ddc,
+ GDBusMethodInvocation *invocation,
+ guint kind, uint64_t num_slot,
+ double x, double y)
+{
+ Error *error = NULL;
+ int width, height;
+ trace_dbus_touch_send_event(kind, num_slot, x, y);
+
+ if (kind != INPUT_MULTI_TOUCH_TYPE_BEGIN &&
+ kind != INPUT_MULTI_TOUCH_TYPE_UPDATE &&
+ kind != INPUT_MULTI_TOUCH_TYPE_CANCEL &&
+ kind != INPUT_MULTI_TOUCH_TYPE_END)
+ {
+ g_dbus_method_invocation_return_error(
+ invocation, DBUS_DISPLAY_ERROR,
+ DBUS_DISPLAY_ERROR_INVALID,
+ "Invalid touch event kind");
+ return DBUS_METHOD_INVOCATION_HANDLED;
+ }
+ width = qemu_console_get_width(ddc->dcl.con, 0);
+ height = qemu_console_get_height(ddc->dcl.con, 0);
+
+ console_handle_touch_event(ddc->dcl.con, touch_slots,
+ num_slot, width, height,
+ x, y, kind, &error);
+ if (error != NULL) {
+ g_dbus_method_invocation_return_error(
+ invocation, DBUS_DISPLAY_ERROR,
+ DBUS_DISPLAY_ERROR_INVALID,
+ error_get_pretty(error), NULL);
+ error_free(error);
+ } else {
+ qemu_dbus_display1_multi_touch_complete_send_event(ddc->iface_touch,
+ invocation);
+ }
+ return DBUS_METHOD_INVOCATION_HANDLED;
+}
+
static gboolean
dbus_mouse_set_pos(DBusDisplayConsole *ddc,
GDBusMethodInvocation *invocation,
g_autofree char *label = NULL;
char device_addr[256] = "";
DBusDisplayConsole *ddc;
- int idx;
+ int idx, i;
+ const char *interfaces[] = {
+ "org.qemu.Display1.Keyboard",
+ "org.qemu.Display1.Mouse",
+ "org.qemu.Display1.MultiTouch",
+ NULL
+ };
assert(display);
assert(con);
"width", qemu_console_get_width(con, 0),
"height", qemu_console_get_height(con, 0),
"device-address", device_addr,
+ "interfaces", interfaces,
NULL);
g_object_connect(ddc->iface,
"swapped-signal::handle-register-listener",
g_dbus_object_skeleton_add_interface(G_DBUS_OBJECT_SKELETON(ddc),
G_DBUS_INTERFACE_SKELETON(ddc->iface_mouse));
+ ddc->iface_touch = qemu_dbus_display1_multi_touch_skeleton_new();
+ g_object_connect(ddc->iface_touch,
+ "swapped-signal::handle-send-event", dbus_touch_send_event, ddc,
+ NULL);
+ qemu_dbus_display1_multi_touch_set_max_slots(ddc->iface_touch,
+ INPUT_EVENT_SLOTS_MAX);
+ g_dbus_object_skeleton_add_interface(G_DBUS_OBJECT_SKELETON(ddc),
+ G_DBUS_INTERFACE_SKELETON(ddc->iface_touch));
+
+ for (i = 0; i < INPUT_EVENT_SLOTS_MAX; i++) {
+ struct touch_slot *slot = &touch_slots[i];
+ slot->tracking_id = -1;
+ }
+
register_displaychangelistener(&ddc->dcl);
ddc->mouse_mode_notifier.notify = dbus_mouse_mode_change;
qemu_add_mouse_mode_change_notifier(&ddc->mouse_mode_notifier);
The list of consoles available on ``/org/qemu/Display1/Console_$id``.
-->
<property name="ConsoleIDs" type="au" access="read"/>
+
+ <!--
+ Interfaces:
+
+ This property lists extra interfaces provided by the
+ /org/qemu/Display1/VM object, and can be used to detect
+ the capabilities with which they are communicating.
+
+ Unlike the standard D-Bus Introspectable interface, querying this
+ property does not require parsing XML.
+
+ (earlier version of the display interface do not provide this property)
+ -->
+ <property name="Interfaces" type="as" access="read"/>
</interface>
<!--
"Text" (see :dbus:prop:`Type` and other properties).
Interactions with a console may be done with
- :dbus:iface:`org.qemu.Display1.Keyboard` and
- :dbus:iface:`org.qemu.Display1.Mouse` interfaces when available.
+ :dbus:iface:`org.qemu.Display1.Keyboard`,
+ :dbus:iface:`org.qemu.Display1.Mouse` and
+ :dbus:iface:`org.qemu.Display1.MultiTouch` interfaces when available.
-->
<interface name="org.qemu.Display1.Console">
<!--
:dbus:iface:`org.qemu.Display1.Listener` interface.
-->
<method name="RegisterListener">
+ <?if $(env.TARGETOS) == windows?>
+ <arg type="ay" name="listener" direction="in">
+ <annotation name="org.gtk.GDBus.C.ForceGVariant" value="true"/>
+ </arg>
+ <?else?>
<arg type="h" name="listener" direction="in"/>
+ <?endif?>
</method>
<!--
The device address (ex: "pci/0000/02.0").
-->
<property name="DeviceAddress" type="s" access="read"/>
+
+ <!--
+ Interfaces:
+
+ This property lists extra interfaces provided by the
+ ``/org/qemu/Display1/Console_$id`` object, and can be used to detect the
+ capabilities with which they are communicating.
+
+ Unlike the standard D-Bus Introspectable interface, querying this
+ property does not require parsing XML.
+
+ (earlier version of the display interface do not provide this property)
+ -->
+ <property name="Interfaces" type="as" access="read"/>
</interface>
<!--
org.qemu.Display1.Keyboard:
- This interface in implemented on ``/org/qemu/Display1/Console_$id`` (see
+ This interface is optionally implemented on
+ ``/org/qemu/Display1/Console_$id`` (see
:dbus:iface:`~org.qemu.Display1.Console`).
-->
<interface name="org.qemu.Display1.Keyboard">
<!--
org.qemu.Display1.Mouse:
- This interface in implemented on ``/org/qemu/Display1/Console_$id`` (see
+ This interface is optionally implemented on
+ ``/org/qemu/Display1/Console_$id`` (see
:dbus:iface:`~org.qemu.Display1.Console` documentation).
.. _dbus-button-values:
<property name="IsAbsolute" type="b" access="read"/>
</interface>
+ <!--
+ org.qemu.Display1.MultiTouch:
+
+ This interface in implemented on ``/org/qemu/Display1/Console_$id`` (see
+ :dbus:iface:`~org.qemu.Display1.Console` documentation).
+
+ .. _dbus-kind-values:
+
+ **Kind values**::
+
+ Begin = 0
+ Update = 1
+ End = 2
+ Cancel = 3
+ -->
+ <interface name="org.qemu.Display1.MultiTouch">
+ <!--
+ SendEvent:
+ @kind: The touch event kind
+ @num_slot: The slot number.
+ @x: The x coordinates.
+ @y: The y coordinates.
+
+ Send a touch gesture event.
+ -->
+ <method name="SendEvent">
+ <arg type="u" name="kind" direction="in"/>
+ <arg type="t" name="num_slot" direction="in"/>
+ <arg type="d" name="x" direction="in"/>
+ <arg type="d" name="y" direction="in"/>
+ </method>
+
+ <!--
+ MaxSlots:
+
+ The maximum number of slots.
+ -->
+ <property name="MaxSlots" type="i" access="read"/>
+ </interface>
+
<!--
org.qemu.Display1.Listener:
</arg>
</method>
+ <?if $(env.TARGETOS) != windows?>
<!--
ScanoutDMABUF:
@dmabuf: the DMABUF file descriptor.
<arg type="i" name="width" direction="in"/>
<arg type="i" name="height" direction="in"/>
</method>
+ <?endif?>
<!--
Disable:
<annotation name="org.gtk.GDBus.C.ForceGVariant" value="true"/>
</arg>
</method>
+
+ <!--
+ Interfaces:
+
+ This property lists extra interfaces provided by the
+ /org/qemu/Display1/Listener object, and can be used to detect
+ the capabilities with which they are communicating.
+
+ Unlike the standard D-Bus Introspectable interface, querying this
+ property does not require parsing XML.
+
+ (earlier version of the display interface do not provide this property)
+ -->
+ <property name="Interfaces" type="as" access="read"/>
+ </interface>
+
+ <!--
+ org.qemu.Display1.Listener.Win32.Map:
+
+ This optional client-side interface can complement
+ org.qemu.Display1.Listener on ``/org/qemu/Display1/Listener`` for Windows
+ specific shared memory scanouts.
+ -->
+ <interface name="org.qemu.Display1.Listener.Win32.Map">
+ <!--
+ ScanoutMap:
+ @handle: the shared map handle value.
+ @offset: mapping offset.
+ @width: display width, in pixels.
+ @height: display height, in pixels.
+ @stride: stride, in bytes.
+ @pixman_format: image format (ex: ``PIXMAN_X8R8G8B8``).
+
+ Resize and update the display content with a shared map.
+ -->
+ <method name="ScanoutMap">
+ <arg type="t" name="handle" direction="in"/>
+ <arg type="u" name="offset" direction="in"/>
+ <arg type="u" name="width" direction="in"/>
+ <arg type="u" name="height" direction="in"/>
+ <arg type="u" name="stride" direction="in"/>
+ <arg type="u" name="pixman_format" direction="in"/>
+ </method>
+
+ <!--
+ UpdateMap:
+ @x: the X update position, in pixels.
+ @y: the Y update position, in pixels.
+ @width: the update width, in pixels.
+ @height: the update height, in pixels.
+
+ Update the display content with the current shared map and the given region.
+ -->
+ <method name="UpdateMap">
+ <arg type="i" name="x" direction="in"/>
+ <arg type="i" name="y" direction="in"/>
+ <arg type="i" name="width" direction="in"/>
+ <arg type="i" name="height" direction="in"/>
+ </method>
+ </interface>
+
+ <!--
+ org.qemu.Display1.Listener.Win32.D3d11:
+
+ This optional client-side interface can complement
+ org.qemu.Display1.Listener on ``/org/qemu/Display1/Listener`` for Windows
+ specific Direct3D texture sharing of the scanouts.
+ -->
+ <interface name="org.qemu.Display1.Listener.Win32.D3d11">
+ <!--
+ ScanoutTexture2d:
+ @handle: the NT handle for the shared texture (to be opened back with ID3D11Device1::OpenSharedResource1).
+ @texture_width: texture width, in pixels.
+ @texture_height: texture height, in pixels.
+ @y0_top: whether Y position 0 is the top or not.
+ @x: the X scanout position, in pixels.
+ @y: the Y scanout position, in pixels.
+ @width: the scanout width, in pixels.
+ @height: the scanout height, in pixels.
+
+ Resize and update the display content with a Direct3D 11 2D texture.
+ You must acquire and release the associated KeyedMutex 0 during rendering.
+ -->
+ <method name="ScanoutTexture2d">
+ <arg type="t" name="handle" direction="in"/>
+ <arg type="u" name="texture_width" direction="in"/>
+ <arg type="u" name="texture_height" direction="in"/>
+ <arg type="b" name="y0_top" direction="in"/>
+ <arg type="u" name="x" direction="in"/>
+ <arg type="u" name="y" direction="in"/>
+ <arg type="u" name="width" direction="in"/>
+ <arg type="u" name="height" direction="in"/>
+ </method>
+
+ <!--
+ UpdateTexture2d:
+ @x: the X update position, in pixels.
+ @y: the Y update position, in pixels.
+ @width: the update width, in pixels.
+ @height: the update height, in pixels.
+
+ Update the display content with the current Direct3D 2D texture and the given region.
+ You must acquire and release the associated KeyedMutex 0 during rendering.
+ -->
+ <method name="UpdateTexture2d">
+ <arg type="i" name="x" direction="in"/>
+ <arg type="i" name="y" direction="in"/>
+ <arg type="i" name="width" direction="in"/>
+ <arg type="i" name="height" direction="in"/>
+ </method>
</interface>
<!--
<annotation name="org.gtk.GDBus.C.ForceGVariant" value="true"/>
</arg>
</method>
+
+ <!--
+ Interfaces:
+
+ This property lists extra interfaces provided by the
+ /org/qemu/Display1/Clipboard object, and can be used to detect
+ the capabilities with which they are communicating.
+
+ Unlike the standard D-Bus Introspectable interface, querying this
+ property does not require parsing XML.
+
+ (earlier version of the display interface do not provide this property)
+ -->
+ <property name="Interfaces" type="as" access="read"/>
</interface>
<!--
:dbus:iface:`org.qemu.Display1.AudioOutListener` interface.
-->
<method name="RegisterOutListener">
+ <?if $(env.TARGETOS) == windows?>
+ <arg type="ay" name="listener" direction="in">
+ <annotation name="org.gtk.GDBus.C.ForceGVariant" value="true"/>
+ </arg>
+ <?else?>
<arg type="h" name="listener" direction="in"/>
+ <?endif?>
</method>
<!--
:dbus:iface:`org.qemu.Display1.AudioInListener` interface.
-->
<method name="RegisterInListener">
+ <?if $(env.TARGETOS) == windows?>
+ <arg type="ay" name="listener" direction="in">
+ <annotation name="org.gtk.GDBus.C.ForceGVariant" value="true"/>
+ </arg>
+ <?else?>
<arg type="h" name="listener" direction="in"/>
+ <?endif?>
</method>
+
+ <!--
+ Interfaces:
+
+ This property lists extra interfaces provided by the
+ /org/qemu/Display1/Audio object, and can be used to detect
+ the capabilities with which they are communicating.
+
+ Unlike the standard D-Bus Introspectable interface, querying this
+ property does not require parsing XML.
+
+ (earlier version of the display interface do not provide this property)
+ -->
+ <property name="Interfaces" type="as" access="read"/>
</interface>
<!--
<annotation name="org.gtk.GDBus.C.ForceGVariant" value="true"/>
</arg>
</method>
+
+ <!--
+ Interfaces:
+
+ This property lists extra interfaces provided by the
+ /org/qemu/Display1/AudioOutListener object, and can be used to detect
+ the capabilities with which they are communicating.
+
+ Unlike the standard D-Bus Introspectable interface, querying this
+ property does not require parsing XML.
+
+ (earlier version of the display interface do not provide this property)
+ -->
+ <property name="Interfaces" type="as" access="read"/>
</interface>
<!--
<annotation name="org.gtk.GDBus.C.ForceGVariant" value="true"/>
</arg>
</method>
+
+ <!--
+ Interfaces:
+
+ This property lists extra interfaces provided by the
+ /org/qemu/Display1/AudioInListener object, and can be used to detect
+ the capabilities with which they are communicating.
+
+ Unlike the standard D-Bus Introspectable interface, querying this
+ property does not require parsing XML.
+
+ (earlier version of the display interface do not provide this property)
+ -->
+ <property name="Interfaces" type="as" access="read"/>
</interface>
<!--
The current handler, if any, will be replaced.
-->
<method name="Register">
+ <?if $(env.TARGETOS) == windows?>
+ <arg type="ay" name="listener" direction="in">
+ <annotation name="org.gtk.GDBus.C.ForceGVariant" value="true"/>
+ </arg>
+ <?else?>
<arg type="h" name="stream" direction="in"/>
+ <?endif?>
</method>
<!--
The D-Bus unique name of the registered handler.
-->
<property name="Owner" type="s" access="read"/>
+
+ <!--
+ Interfaces:
+
+ This property lists extra interfaces provided by the
+ ``/org/qemu/Display1/Chardev_$i`` object, and can be used to detect
+ the capabilities with which they are communicating.
+
+ Unlike the standard D-Bus Introspectable interface, querying this
+ property does not require parsing XML.
+
+ (earlier version of the display interface do not provide this property)
+ -->
+ <property name="Interfaces" type="as" access="read"/>
</interface>
</node>
*/
#include "qemu/osdep.h"
#include "qemu/error-report.h"
+#include "qapi/error.h"
#include "sysemu/sysemu.h"
#include "dbus.h"
+#ifdef G_OS_UNIX
#include <gio/gunixfdlist.h>
+#endif
+#ifdef WIN32
+#include <d3d11.h>
+#include <dxgi1_2.h>
+#endif
#ifdef CONFIG_OPENGL
#include "ui/shader.h"
#endif
#include "trace.h"
+static void dbus_gfx_switch(DisplayChangeListener *dcl,
+ struct DisplaySurface *new_surface);
+
+enum share_kind {
+ SHARE_KIND_NONE,
+ SHARE_KIND_MAPPED,
+ SHARE_KIND_D3DTEX,
+};
+
struct _DBusDisplayListener {
GObject parent;
DisplayChangeListener dcl;
DisplaySurface *ds;
+ enum share_kind ds_share;
+
int gl_updates;
+
+ bool ds_mapped;
+ bool can_share_map;
+
+#ifdef WIN32
+ QemuDBusDisplay1ListenerWin32Map *map_proxy;
+ QemuDBusDisplay1ListenerWin32D3d11 *d3d11_proxy;
+ HANDLE peer_process;
+ ID3D11Texture2D *d3d_texture;
+#ifdef CONFIG_OPENGL
+ egl_fb fb;
+#endif
+#endif
};
G_DEFINE_TYPE(DBusDisplayListener, dbus_display_listener, G_TYPE_OBJECT)
-#if defined(CONFIG_OPENGL) && defined(CONFIG_GBM)
+static void dbus_gfx_update(DisplayChangeListener *dcl,
+ int x, int y, int w, int h);
+
+#ifdef CONFIG_OPENGL
+static void dbus_scanout_disable(DisplayChangeListener *dcl)
+{
+ DBusDisplayListener *ddl = container_of(dcl, DBusDisplayListener, dcl);
+
+ qemu_dbus_display1_listener_call_disable(
+ ddl->proxy, G_DBUS_CALL_FLAGS_NONE, -1, NULL, NULL, NULL);
+}
+
+#ifdef WIN32
+static bool d3d_texture2d_share(ID3D11Texture2D *d3d_texture,
+ HANDLE *handle, Error **errp)
+{
+ IDXGIResource1 *dxgiResource = NULL;
+ HRESULT hr;
+
+ hr = d3d_texture->lpVtbl->QueryInterface(d3d_texture,
+ &IID_IDXGIResource1,
+ (void **)&dxgiResource);
+ if (FAILED(hr)) {
+ goto fail;
+ }
+
+ hr = dxgiResource->lpVtbl->CreateSharedHandle(
+ dxgiResource,
+ NULL,
+ DXGI_SHARED_RESOURCE_READ | DXGI_SHARED_RESOURCE_WRITE,
+ NULL,
+ handle
+ );
+
+ dxgiResource->lpVtbl->Release(dxgiResource);
+
+ if (SUCCEEDED(hr)) {
+ return true;
+ }
+
+fail:
+ error_setg_win32(errp, GetLastError(), "failed to create shared handle");
+ return false;
+}
+
+static bool d3d_texture2d_acquire0(ID3D11Texture2D *d3d_texture, Error **errp)
+{
+ IDXGIKeyedMutex *dxgiMutex = NULL;
+ HRESULT hr;
+
+ hr = d3d_texture->lpVtbl->QueryInterface(d3d_texture,
+ &IID_IDXGIKeyedMutex,
+ (void **)&dxgiMutex);
+ if (FAILED(hr)) {
+ goto fail;
+ }
+
+ hr = dxgiMutex->lpVtbl->AcquireSync(dxgiMutex, 0, INFINITE);
+
+ dxgiMutex->lpVtbl->Release(dxgiMutex);
+
+ if (SUCCEEDED(hr)) {
+ return true;
+ }
+
+fail:
+ error_setg_win32(errp, GetLastError(), "failed to acquire texture mutex");
+ return false;
+}
+
+static bool d3d_texture2d_release0(ID3D11Texture2D *d3d_texture, Error **errp)
+{
+ IDXGIKeyedMutex *dxgiMutex = NULL;
+ HRESULT hr;
+
+ hr = d3d_texture->lpVtbl->QueryInterface(d3d_texture,
+ &IID_IDXGIKeyedMutex,
+ (void **)&dxgiMutex);
+ if (FAILED(hr)) {
+ goto fail;
+ }
+
+ hr = dxgiMutex->lpVtbl->ReleaseSync(dxgiMutex, 0);
+
+ dxgiMutex->lpVtbl->Release(dxgiMutex);
+
+ if (SUCCEEDED(hr)) {
+ return true;
+ }
+
+fail:
+ error_setg_win32(errp, GetLastError(), "failed to release texture mutex");
+ return false;
+}
+#endif /* WIN32 */
+
static void dbus_update_gl_cb(GObject *source_object,
- GAsyncResult *res,
- gpointer user_data)
+ GAsyncResult *res,
+ gpointer user_data)
{
g_autoptr(GError) err = NULL;
DBusDisplayListener *ddl = user_data;
+ bool success;
+
+#ifdef CONFIG_GBM
+ success = qemu_dbus_display1_listener_call_update_dmabuf_finish(
+ ddl->proxy, res, &err);
+#endif
+
+#ifdef WIN32
+ success = qemu_dbus_display1_listener_win32_d3d11_call_update_texture2d_finish(
+ ddl->d3d11_proxy, res, &err);
+ d3d_texture2d_acquire0(ddl->d3d_texture, &error_warn);
+#endif
- if (!qemu_dbus_display1_listener_call_update_dmabuf_finish(ddl->proxy,
- res, &err)) {
+ if (!success) {
error_report("Failed to call update: %s", err->message);
}
g_object_unref(ddl);
}
-static void dbus_call_update_gl(DBusDisplayListener *ddl,
+static void dbus_call_update_gl(DisplayChangeListener *dcl,
int x, int y, int w, int h)
{
- graphic_hw_gl_block(ddl->dcl.con, true);
+ DBusDisplayListener *ddl = container_of(dcl, DBusDisplayListener, dcl);
+
+ trace_dbus_update_gl(x, y, w, h);
+
glFlush();
+#ifdef CONFIG_GBM
+ graphic_hw_gl_block(ddl->dcl.con, true);
qemu_dbus_display1_listener_call_update_dmabuf(ddl->proxy,
x, y, w, h,
G_DBUS_CALL_FLAGS_NONE,
DBUS_DEFAULT_TIMEOUT, NULL,
dbus_update_gl_cb,
g_object_ref(ddl));
-}
-
-static void dbus_scanout_disable(DisplayChangeListener *dcl)
-{
- DBusDisplayListener *ddl = container_of(dcl, DBusDisplayListener, dcl);
+#endif
- ddl->ds = NULL;
- qemu_dbus_display1_listener_call_disable(
- ddl->proxy, G_DBUS_CALL_FLAGS_NONE, -1, NULL, NULL, NULL);
+#ifdef WIN32
+ switch (ddl->ds_share) {
+ case SHARE_KIND_MAPPED:
+ egl_fb_read_rect(ddl->ds, &ddl->fb, x, y, w, h);
+ dbus_gfx_update(dcl, x, y, w, h);
+ break;
+ case SHARE_KIND_D3DTEX:
+ Error *err = NULL;
+ assert(ddl->d3d_texture);
+
+ graphic_hw_gl_block(ddl->dcl.con, true);
+ if (!d3d_texture2d_release0(ddl->d3d_texture, &err)) {
+ error_report_err(err);
+ return;
+ }
+ qemu_dbus_display1_listener_win32_d3d11_call_update_texture2d(
+ ddl->d3d11_proxy,
+ x, y, w, h,
+ G_DBUS_CALL_FLAGS_NONE,
+ DBUS_DEFAULT_TIMEOUT, NULL,
+ dbus_update_gl_cb,
+ g_object_ref(ddl));
+ break;
+ default:
+ g_warn_if_reached();
+ }
+#endif
}
+#ifdef CONFIG_GBM
static void dbus_scanout_dmabuf(DisplayChangeListener *dcl,
QemuDmaBuf *dmabuf)
{
fd_list,
NULL, NULL, NULL);
}
+#endif /* GBM */
+#endif /* OPENGL */
+
+#ifdef WIN32
+static bool dbus_scanout_map(DBusDisplayListener *ddl)
+{
+ g_autoptr(GError) err = NULL;
+ BOOL success;
+ HANDLE target_handle;
+
+ if (ddl->ds_share == SHARE_KIND_MAPPED) {
+ return true;
+ }
+
+ if (!ddl->can_share_map || !ddl->ds->handle) {
+ return false;
+ }
+
+ success = DuplicateHandle(
+ GetCurrentProcess(),
+ ddl->ds->handle,
+ ddl->peer_process,
+ &target_handle,
+ FILE_MAP_READ | SECTION_QUERY,
+ FALSE, 0);
+ if (!success) {
+ g_autofree char *msg = g_win32_error_message(GetLastError());
+ g_debug("Failed to DuplicateHandle: %s", msg);
+ ddl->can_share_map = false;
+ return false;
+ }
+
+ if (!qemu_dbus_display1_listener_win32_map_call_scanout_map_sync(
+ ddl->map_proxy,
+ GPOINTER_TO_UINT(target_handle),
+ ddl->ds->handle_offset,
+ surface_width(ddl->ds),
+ surface_height(ddl->ds),
+ surface_stride(ddl->ds),
+ surface_format(ddl->ds),
+ G_DBUS_CALL_FLAGS_NONE,
+ DBUS_DEFAULT_TIMEOUT,
+ NULL,
+ &err)) {
+ g_debug("Failed to call ScanoutMap: %s", err->message);
+ ddl->can_share_map = false;
+ return false;
+ }
+
+ ddl->ds_share = SHARE_KIND_MAPPED;
+
+ return true;
+}
+
+static bool
+dbus_scanout_share_d3d_texture(
+ DBusDisplayListener *ddl,
+ ID3D11Texture2D *tex,
+ bool backing_y_0_top,
+ uint32_t backing_width,
+ uint32_t backing_height,
+ uint32_t x, uint32_t y,
+ uint32_t w, uint32_t h)
+{
+ Error *err = NULL;
+ BOOL success;
+ HANDLE share_handle, target_handle;
+
+ if (!d3d_texture2d_release0(tex, &err)) {
+ error_report_err(err);
+ return false;
+ }
+
+ if (!d3d_texture2d_share(tex, &share_handle, &err)) {
+ error_report_err(err);
+ return false;
+ }
+
+ success = DuplicateHandle(
+ GetCurrentProcess(),
+ share_handle,
+ ddl->peer_process,
+ &target_handle,
+ 0,
+ FALSE, DUPLICATE_SAME_ACCESS);
+ if (!success) {
+ g_autofree char *msg = g_win32_error_message(GetLastError());
+ g_debug("Failed to DuplicateHandle: %s", msg);
+ CloseHandle(share_handle);
+ return false;
+ }
+
+ qemu_dbus_display1_listener_win32_d3d11_call_scanout_texture2d(
+ ddl->d3d11_proxy,
+ GPOINTER_TO_INT(target_handle),
+ backing_width,
+ backing_height,
+ backing_y_0_top,
+ x, y, w, h,
+ G_DBUS_CALL_FLAGS_NONE,
+ -1,
+ NULL, NULL, NULL);
+
+ CloseHandle(share_handle);
+
+ if (!d3d_texture2d_acquire0(tex, &err)) {
+ error_report_err(err);
+ return false;
+ }
+
+ ddl->d3d_texture = tex;
+ ddl->ds_share = SHARE_KIND_D3DTEX;
+
+ return true;
+}
+#endif
+#ifdef CONFIG_OPENGL
static void dbus_scanout_texture(DisplayChangeListener *dcl,
uint32_t tex_id,
bool backing_y_0_top,
uint32_t backing_width,
uint32_t backing_height,
uint32_t x, uint32_t y,
- uint32_t w, uint32_t h)
+ uint32_t w, uint32_t h,
+ void *d3d_tex2d)
{
+ trace_dbus_scanout_texture(tex_id, backing_y_0_top,
+ backing_width, backing_height, x, y, w, h);
+#ifdef CONFIG_GBM
QemuDmaBuf dmabuf = {
.width = backing_width,
.height = backing_height,
dbus_scanout_dmabuf(dcl, &dmabuf);
close(dmabuf.fd);
+#endif
+
+#ifdef WIN32
+ DBusDisplayListener *ddl = container_of(dcl, DBusDisplayListener, dcl);
+
+ /* there must be a matching gfx_switch before */
+ assert(surface_width(ddl->ds) == w);
+ assert(surface_height(ddl->ds) == h);
+
+ if (d3d_tex2d) {
+ dbus_scanout_share_d3d_texture(ddl, d3d_tex2d, backing_y_0_top,
+ backing_width, backing_height, x, y, w, h);
+ } else {
+ dbus_scanout_map(ddl);
+ egl_fb_setup_for_tex(&ddl->fb, backing_width, backing_height, tex_id, false);
+ }
+#endif
}
+#ifdef CONFIG_GBM
static void dbus_cursor_dmabuf(DisplayChangeListener *dcl,
QemuDmaBuf *dmabuf, bool have_hot,
uint32_t hot_x, uint32_t hot_y)
NULL);
}
-static void dbus_cursor_position(DisplayChangeListener *dcl,
+static void dbus_release_dmabuf(DisplayChangeListener *dcl,
+ QemuDmaBuf *dmabuf)
+{
+ dbus_scanout_disable(dcl);
+}
+#endif /* GBM */
+
+static void dbus_gl_cursor_position(DisplayChangeListener *dcl,
uint32_t pos_x, uint32_t pos_y)
{
DBusDisplayListener *ddl = container_of(dcl, DBusDisplayListener, dcl);
G_DBUS_CALL_FLAGS_NONE, -1, NULL, NULL, NULL);
}
-static void dbus_release_dmabuf(DisplayChangeListener *dcl,
- QemuDmaBuf *dmabuf)
-{
- dbus_scanout_disable(dcl);
-}
-
static void dbus_scanout_update(DisplayChangeListener *dcl,
uint32_t x, uint32_t y,
uint32_t w, uint32_t h)
{
- DBusDisplayListener *ddl = container_of(dcl, DBusDisplayListener, dcl);
-
- dbus_call_update_gl(ddl, x, y, w, h);
+ dbus_call_update_gl(dcl, x, y, w, h);
}
static void dbus_gl_refresh(DisplayChangeListener *dcl)
}
if (ddl->gl_updates) {
- dbus_call_update_gl(ddl, 0, 0,
+ dbus_call_update_gl(dcl, 0, 0,
surface_width(ddl->ds), surface_height(ddl->ds));
ddl->gl_updates = 0;
}
}
-#endif
+#endif /* OPENGL */
static void dbus_refresh(DisplayChangeListener *dcl)
{
graphic_hw_update(dcl->con);
}
-#if defined(CONFIG_OPENGL) && defined(CONFIG_GBM)
+#ifdef CONFIG_OPENGL
static void dbus_gl_gfx_update(DisplayChangeListener *dcl,
int x, int y, int w, int h)
{
size_t stride;
assert(ddl->ds);
- stride = w * DIV_ROUND_UP(PIXMAN_FORMAT_BPP(surface_format(ddl->ds)), 8);
trace_dbus_update(x, y, w, h);
+#ifdef WIN32
+ if (dbus_scanout_map(ddl)) {
+ qemu_dbus_display1_listener_win32_map_call_update_map(
+ ddl->map_proxy,
+ x, y, w, h,
+ G_DBUS_CALL_FLAGS_NONE,
+ DBUS_DEFAULT_TIMEOUT, NULL, NULL, NULL);
+ return;
+ }
+#endif
+
if (x == 0 && y == 0 && w == surface_width(ddl->ds) && h == surface_height(ddl->ds)) {
v_data = g_variant_new_from_data(
G_VARIANT_TYPE("ay"),
}
/* make a copy, since gvariant only handles linear data */
+ stride = w * DIV_ROUND_UP(PIXMAN_FORMAT_BPP(surface_format(ddl->ds)), 8);
img = pixman_image_create_bits(surface_format(ddl->ds),
w, h, NULL, stride);
pixman_image_composite(PIXMAN_OP_SRC, ddl->ds->image, NULL, img,
DBUS_DEFAULT_TIMEOUT, NULL, NULL, NULL);
}
-#if defined(CONFIG_OPENGL) && defined(CONFIG_GBM)
+#ifdef CONFIG_OPENGL
static void dbus_gl_gfx_switch(DisplayChangeListener *dcl,
struct DisplaySurface *new_surface)
{
DBusDisplayListener *ddl = container_of(dcl, DBusDisplayListener, dcl);
+ trace_dbus_gl_gfx_switch(new_surface);
+
ddl->ds = new_surface;
+ ddl->ds_share = SHARE_KIND_NONE;
if (ddl->ds) {
int width = surface_width(ddl->ds);
int height = surface_height(ddl->ds);
/* TODO: lazy send dmabuf (there are unnecessary sent otherwise) */
dbus_scanout_texture(&ddl->dcl, ddl->ds->texture, false,
- width, height, 0, 0, width, height);
+ width, height, 0, 0, width, height, NULL);
}
}
#endif
DBusDisplayListener *ddl = container_of(dcl, DBusDisplayListener, dcl);
ddl->ds = new_surface;
- if (!ddl->ds) {
- /* why not call disable instead? */
- return;
- }
+ ddl->ds_share = SHARE_KIND_NONE;
}
static void dbus_mouse_set(DisplayChangeListener *dcl,
NULL);
}
-#if defined(CONFIG_OPENGL) && defined(CONFIG_GBM)
+#ifdef CONFIG_OPENGL
const DisplayChangeListenerOps dbus_gl_dcl_ops = {
.dpy_name = "dbus-gl",
.dpy_gfx_update = dbus_gl_gfx_update,
.dpy_gl_scanout_disable = dbus_scanout_disable,
.dpy_gl_scanout_texture = dbus_scanout_texture,
+#ifdef CONFIG_GBM
.dpy_gl_scanout_dmabuf = dbus_scanout_dmabuf,
.dpy_gl_cursor_dmabuf = dbus_cursor_dmabuf,
- .dpy_gl_cursor_position = dbus_cursor_position,
.dpy_gl_release_dmabuf = dbus_release_dmabuf,
+#endif
+ .dpy_gl_cursor_position = dbus_gl_cursor_position,
.dpy_gl_update = dbus_scanout_update,
};
#endif
g_clear_object(&ddl->conn);
g_clear_pointer(&ddl->bus_name, g_free);
g_clear_object(&ddl->proxy);
+#ifdef WIN32
+ g_clear_object(&ddl->map_proxy);
+ g_clear_object(&ddl->d3d11_proxy);
+ g_clear_pointer(&ddl->peer_process, CloseHandle);
+#ifdef CONFIG_OPENGL
+ egl_fb_destroy(&ddl->fb);
+#endif
+#endif
G_OBJECT_CLASS(dbus_display_listener_parent_class)->dispose(object);
}
DBusDisplayListener *ddl = DBUS_DISPLAY_LISTENER(object);
ddl->dcl.ops = &dbus_dcl_ops;
-#if defined(CONFIG_OPENGL) && defined(CONFIG_GBM)
+#ifdef CONFIG_OPENGL
if (display_opengl) {
ddl->dcl.ops = &dbus_gl_dcl_ops;
}
return ddl->console;
}
+#ifdef WIN32
+static bool
+dbus_display_listener_implements(DBusDisplayListener *ddl, const char *iface)
+{
+ QemuDBusDisplay1Listener *l = QEMU_DBUS_DISPLAY1_LISTENER(ddl->proxy);
+ bool implements;
+
+ implements = g_strv_contains(qemu_dbus_display1_listener_get_interfaces(l), iface);
+ if (!implements) {
+ g_debug("Display listener does not implement: `%s`", iface);
+ }
+
+ return implements;
+}
+
+static bool
+dbus_display_listener_setup_peer_process(DBusDisplayListener *ddl)
+{
+ g_autoptr(GError) err = NULL;
+ GDBusConnection *conn;
+ GIOStream *stream;
+ GSocket *sock;
+ g_autoptr(GCredentials) creds = NULL;
+ DWORD *pid;
+
+ if (ddl->peer_process) {
+ return true;
+ }
+
+ conn = g_dbus_proxy_get_connection(G_DBUS_PROXY(ddl->proxy));
+ stream = g_dbus_connection_get_stream(conn);
+
+ if (!G_IS_UNIX_CONNECTION(stream)) {
+ return false;
+ }
+
+ sock = g_socket_connection_get_socket(G_SOCKET_CONNECTION(stream));
+ creds = g_socket_get_credentials(sock, &err);
+
+ if (!creds) {
+ g_debug("Failed to get peer credentials: %s", err->message);
+ return false;
+ }
+
+ pid = g_credentials_get_native(creds, G_CREDENTIALS_TYPE_WIN32_PID);
+
+ if (pid == NULL) {
+ g_debug("Failed to get peer PID");
+ return false;
+ }
+
+ ddl->peer_process = OpenProcess(
+ PROCESS_DUP_HANDLE | PROCESS_QUERY_INFORMATION,
+ false, *pid);
+
+ if (!ddl->peer_process) {
+ g_autofree char *msg = g_win32_error_message(GetLastError());
+ g_debug("Failed to OpenProcess: %s", msg);
+ return false;
+ }
+
+ return true;
+}
+#endif
+
+static void
+dbus_display_listener_setup_d3d11(DBusDisplayListener *ddl)
+{
+#ifdef WIN32
+ g_autoptr(GError) err = NULL;
+
+ if (!dbus_display_listener_implements(ddl,
+ "org.qemu.Display1.Listener.Win32.D3d11")) {
+ return;
+ }
+
+ if (!dbus_display_listener_setup_peer_process(ddl)) {
+ return;
+ }
+
+ ddl->d3d11_proxy =
+ qemu_dbus_display1_listener_win32_d3d11_proxy_new_sync(ddl->conn,
+ G_DBUS_PROXY_FLAGS_DO_NOT_AUTO_START,
+ NULL,
+ "/org/qemu/Display1/Listener",
+ NULL,
+ &err);
+ if (!ddl->d3d11_proxy) {
+ g_debug("Failed to setup win32 d3d11 proxy: %s", err->message);
+ return;
+ }
+#endif
+}
+
+static void
+dbus_display_listener_setup_shared_map(DBusDisplayListener *ddl)
+{
+#ifdef WIN32
+ g_autoptr(GError) err = NULL;
+
+ if (!dbus_display_listener_implements(ddl, "org.qemu.Display1.Listener.Win32.Map")) {
+ return;
+ }
+
+ if (!dbus_display_listener_setup_peer_process(ddl)) {
+ return;
+ }
+
+ ddl->map_proxy =
+ qemu_dbus_display1_listener_win32_map_proxy_new_sync(ddl->conn,
+ G_DBUS_PROXY_FLAGS_DO_NOT_AUTO_START,
+ NULL,
+ "/org/qemu/Display1/Listener",
+ NULL,
+ &err);
+ if (!ddl->map_proxy) {
+ g_debug("Failed to setup win32 map proxy: %s", err->message);
+ return;
+ }
+
+ ddl->can_share_map = true;
+#endif
+}
+
DBusDisplayListener *
dbus_display_listener_new(const char *bus_name,
GDBusConnection *conn,
ddl->conn = conn;
ddl->console = console;
+ dbus_display_listener_setup_shared_map(ddl);
+ dbus_display_listener_setup_d3d11(ddl);
+
con = qemu_console_lookup_by_index(dbus_display_console_get_index(console));
assert(con);
ddl->dcl.con = con;
static QEMUGLContext dbus_create_context(DisplayGLCtx *dgc,
QEMUGLParams *params)
{
-#ifdef CONFIG_GBM
eglMakeCurrent(qemu_egl_display, EGL_NO_SURFACE, EGL_NO_SURFACE,
qemu_egl_rn_ctx);
-#endif
return qemu_egl_create_context(dgc, params);
}
DisplayChangeListener *dcl)
{
return
-#ifdef CONFIG_GBM
dcl->ops == &dbus_gl_dcl_ops ||
-#endif
dcl->ops == &dbus_console_dcl_ops;
}
Notifier notifier;
};
+#ifdef WIN32
+bool
+dbus_win32_import_socket(GDBusMethodInvocation *invocation,
+ GVariant *arg_listener, int *socket);
+#endif
+
#define TYPE_DBUS_DISPLAY "dbus-display"
OBJECT_DECLARE_SIMPLE_TYPE(DBusDisplay, DBUS_DISPLAY)
#include "qemu/osdep.h"
+#include "qemu/error-report.h"
#include "ui/egl-context.h"
QEMUGLContext qemu_egl_create_context(DisplayGLCtx *dgc,
int qemu_egl_make_context_current(DisplayGLCtx *dgc,
QEMUGLContext ctx)
{
- return eglMakeCurrent(qemu_egl_display,
- EGL_NO_SURFACE, EGL_NO_SURFACE, ctx);
+ if (!eglMakeCurrent(qemu_egl_display,
+ EGL_NO_SURFACE, EGL_NO_SURFACE, ctx)) {
+ error_report("egl: eglMakeCurrent failed: %s", qemu_egl_get_error_string());
+ return -1;
+ }
+
+ return 0;
}
uint32_t backing_width,
uint32_t backing_height,
uint32_t x, uint32_t y,
- uint32_t w, uint32_t h)
+ uint32_t w, uint32_t h,
+ void *d3d_tex2d)
{
egl_dpy *edpy = container_of(dcl, egl_dpy, dcl);
}
}
+#ifdef CONFIG_GBM
+
static void egl_scanout_dmabuf(DisplayChangeListener *dcl,
QemuDmaBuf *dmabuf)
{
egl_scanout_texture(dcl, dmabuf->texture,
false, dmabuf->width, dmabuf->height,
- 0, 0, dmabuf->width, dmabuf->height);
+ 0, 0, dmabuf->width, dmabuf->height, NULL);
}
static void egl_cursor_dmabuf(DisplayChangeListener *dcl,
}
}
+static void egl_release_dmabuf(DisplayChangeListener *dcl,
+ QemuDmaBuf *dmabuf)
+{
+ egl_dmabuf_release_texture(dmabuf);
+}
+
+#endif
+
static void egl_cursor_position(DisplayChangeListener *dcl,
uint32_t pos_x, uint32_t pos_y)
{
edpy->pos_y = pos_y;
}
-static void egl_release_dmabuf(DisplayChangeListener *dcl,
- QemuDmaBuf *dmabuf)
-{
- egl_dmabuf_release_texture(dmabuf);
-}
-
static void egl_scanout_flush(DisplayChangeListener *dcl,
uint32_t x, uint32_t y,
uint32_t w, uint32_t h)
.dpy_gl_scanout_disable = egl_scanout_disable,
.dpy_gl_scanout_texture = egl_scanout_texture,
+#ifdef CONFIG_GBM
.dpy_gl_scanout_dmabuf = egl_scanout_dmabuf,
.dpy_gl_cursor_dmabuf = egl_cursor_dmabuf,
- .dpy_gl_cursor_position = egl_cursor_position,
.dpy_gl_release_dmabuf = egl_release_dmabuf,
+#endif
+ .dpy_gl_cursor_position = egl_cursor_position,
.dpy_gl_update = egl_scanout_flush,
};
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
#include "qemu/osdep.h"
+
#include "qemu/drm.h"
#include "qemu/error-report.h"
#include "ui/console.h"
#include "ui/egl-helpers.h"
#include "sysemu/sysemu.h"
#include "qapi/error.h"
+#include "trace.h"
EGLDisplay *qemu_egl_display;
EGLConfig qemu_egl_config;
DisplayGLMode qemu_egl_mode;
+bool qemu_egl_angle_d3d;
/* ------------------------------------------------------------------ */
-#if defined(CONFIG_X11) || defined(CONFIG_GBM)
-static const char *egl_get_error_string(void)
+const char *qemu_egl_get_error_string(void)
{
EGLint error = eglGetError();
return "Unknown EGL error";
}
}
-#endif
static void egl_fb_delete_texture(egl_fb *fb)
{
GL_BGRA, GL_UNSIGNED_BYTE, surface_data(dst));
}
+void egl_fb_read_rect(DisplaySurface *dst, egl_fb *src, int x, int y, int w, int h)
+{
+ assert(surface_width(dst) == src->width);
+ assert(surface_height(dst) == src->height);
+ assert(surface_format(dst) == PIXMAN_x8r8g8b8);
+
+ glBindFramebuffer(GL_READ_FRAMEBUFFER, src->framebuffer);
+ glReadBuffer(GL_COLOR_ATTACHMENT0_EXT);
+ glPixelStorei(GL_PACK_ROW_LENGTH, surface_stride(dst) / 4);
+ glReadPixels(x, y, w, h,
+ GL_BGRA, GL_UNSIGNED_BYTE, surface_data(dst) + x * 4);
+ glPixelStorei(GL_PACK_ROW_LENGTH, 0);
+}
+
void egl_texture_blit(QemuGLShader *gls, egl_fb *dst, egl_fb *src, bool flip)
{
glBindFramebuffer(GL_FRAMEBUFFER_EXT, dst->framebuffer);
/* ---------------------------------------------------------------------- */
+EGLContext qemu_egl_rn_ctx;
+
#ifdef CONFIG_GBM
int qemu_egl_rn_fd;
struct gbm_device *qemu_egl_rn_gbm_dev;
-EGLContext qemu_egl_rn_ctx;
int egl_rendernode_init(const char *rendernode, DisplayGLMode mode)
{
/* ---------------------------------------------------------------------- */
-#if defined(CONFIG_X11) || defined(CONFIG_GBM)
+#if defined(CONFIG_X11) || defined(CONFIG_GBM) || defined(WIN32)
/*
* Taken from glamor_egl.h from the Xorg xserver, which is MIT licensed
/* In practise any EGL 1.5 implementation would support the EXT extension */
if (epoxy_has_egl_extension(NULL, "EGL_EXT_platform_base")) {
- PFNEGLGETPLATFORMDISPLAYEXTPROC getPlatformDisplayEXT =
- (void *) eglGetProcAddress("eglGetPlatformDisplayEXT");
- if (getPlatformDisplayEXT && platform != 0) {
- dpy = getPlatformDisplayEXT(platform, native, NULL);
+ if (platform != 0) {
+ dpy = eglGetPlatformDisplayEXT(platform, native, NULL);
}
}
qemu_egl_display = qemu_egl_get_display(dpy, platform);
if (qemu_egl_display == EGL_NO_DISPLAY) {
- error_report("egl: eglGetDisplay failed: %s", egl_get_error_string());
+ error_report("egl: eglGetDisplay failed: %s", qemu_egl_get_error_string());
return -1;
}
b = eglInitialize(qemu_egl_display, &major, &minor);
if (b == EGL_FALSE) {
- error_report("egl: eglInitialize failed: %s", egl_get_error_string());
+ error_report("egl: eglInitialize failed: %s", qemu_egl_get_error_string());
return -1;
}
b = eglBindAPI(gles ? EGL_OPENGL_ES_API : EGL_OPENGL_API);
if (b == EGL_FALSE) {
error_report("egl: eglBindAPI failed (%s mode): %s",
- gles ? "gles" : "core", egl_get_error_string());
+ gles ? "gles" : "core", qemu_egl_get_error_string());
return -1;
}
&qemu_egl_config, 1, &n);
if (b == EGL_FALSE || n != 1) {
error_report("egl: eglChooseConfig failed (%s mode): %s",
- gles ? "gles" : "core", egl_get_error_string());
+ gles ? "gles" : "core", qemu_egl_get_error_string());
return -1;
}
return 0;
}
+#endif
+
+#if defined(CONFIG_X11) || defined(CONFIG_GBM)
int qemu_egl_init_dpy_x11(EGLNativeDisplayType dpy, DisplayGLMode mode)
{
#ifdef EGL_KHR_platform_x11
return qemu_egl_init_dpy(dpy, 0, mode);
#endif
}
+#endif
+
+
+#ifdef WIN32
+int qemu_egl_init_dpy_win32(EGLNativeDisplayType dpy, DisplayGLMode mode)
+{
+ /* prefer GL ES, as that's what ANGLE supports */
+ if (mode == DISPLAYGL_MODE_ON) {
+ mode = DISPLAYGL_MODE_ES;
+ }
+
+ if (qemu_egl_init_dpy(dpy, 0, mode) < 0) {
+ return -1;
+ }
+
+#ifdef EGL_D3D11_DEVICE_ANGLE
+ if (epoxy_has_egl_extension(qemu_egl_display, "EGL_EXT_device_query")) {
+ EGLDeviceEXT device;
+ void *d3d11_device;
+ if (!eglQueryDisplayAttribEXT(qemu_egl_display,
+ EGL_DEVICE_EXT,
+ (EGLAttrib *)&device)) {
+ return 0;
+ }
+
+ if (!eglQueryDeviceAttribEXT(device,
+ EGL_D3D11_DEVICE_ANGLE,
+ (EGLAttrib *)&d3d11_device)) {
+ return 0;
+ }
+
+ trace_egl_init_d3d11_device(device);
+ qemu_egl_angle_d3d = device != NULL;
+ }
+#endif
+
+ return 0;
+}
#endif
bool qemu_egl_has_dmabuf(void)
return false;
}
-#ifdef CONFIG_GBM
+#ifdef WIN32
+ if (qemu_egl_init_dpy_win32(EGL_DEFAULT_DISPLAY, mode) < 0) {
+ error_setg(errp, "egl: init failed");
+ return false;
+ }
+ qemu_egl_rn_ctx = qemu_egl_init_ctx();
+ if (!qemu_egl_rn_ctx) {
+ error_setg(errp, "egl: egl_init_ctx failed");
+ return false;
+ }
+#elif defined(CONFIG_GBM)
if (egl_rendernode_init(rendernode, mode) < 0) {
error_setg(errp, "egl: render node init failed");
return false;
}
+#endif
+
+ if (!qemu_egl_rn_ctx) {
+ error_setg(errp, "egl: not available on this platform");
+ return false;
+ }
+
display_opengl = 1;
return true;
-#else
- error_setg(errp, "egl: not available on this platform");
- return false;
-#endif
}
#include "qemu/osdep.h"
#include "qemu/main-loop.h"
+#include "qemu/error-report.h"
#include "trace.h"
uint32_t backing_id, bool backing_y_0_top,
uint32_t backing_width, uint32_t backing_height,
uint32_t x, uint32_t y,
- uint32_t w, uint32_t h)
+ uint32_t w, uint32_t h,
+ void *d3d_tex2d)
{
VirtualConsole *vc = container_of(dcl, VirtualConsole, gfx.dcl);
gd_egl_scanout_texture(dcl, dmabuf->texture,
dmabuf->y0_top, dmabuf->width, dmabuf->height,
- 0, 0, dmabuf->width, dmabuf->height);
+ dmabuf->x, dmabuf->y, dmabuf->scanout_width,
+ dmabuf->scanout_height, NULL);
if (dmabuf->allow_fences) {
vc->gfx.guest_fb.dmabuf = dmabuf;
{
VirtualConsole *vc = container_of(dgc, VirtualConsole, gfx.dgc);
- return eglMakeCurrent(qemu_egl_display, vc->gfx.esurface,
- vc->gfx.esurface, ctx);
+ if (!eglMakeCurrent(qemu_egl_display, vc->gfx.esurface,
+ vc->gfx.esurface, ctx)) {
+ error_report("egl: eglMakeCurrent failed: %s", qemu_egl_get_error_string());
+ return -1;
+ }
+
+ return 0;
}
uint32_t backing_width,
uint32_t backing_height,
uint32_t x, uint32_t y,
- uint32_t w, uint32_t h)
+ uint32_t w, uint32_t h,
+ void *d3d_tex2d)
{
VirtualConsole *vc = container_of(dcl, VirtualConsole, gfx.dcl);
gd_gl_area_scanout_texture(dcl, dmabuf->texture,
dmabuf->y0_top, dmabuf->width, dmabuf->height,
- 0, 0, dmabuf->width, dmabuf->height);
+ dmabuf->x, dmabuf->y, dmabuf->scanout_width,
+ dmabuf->scanout_height, NULL);
if (dmabuf->allow_fences) {
vc->gfx.guest_fb.dmabuf = dmabuf;
DECLARE_INSTANCE_CHECKER(VCChardev, VC_CHARDEV,
TYPE_CHARDEV_VC)
-struct touch_slot {
- int x;
- int y;
- int tracking_id;
-};
static struct touch_slot touch_slots[INPUT_EVENT_SLOTS_MAX];
bool gtk_use_gl_area;
QemuDmaBuf *dmabuf)
{
#ifdef CONFIG_GBM
+ VirtualConsole *vc = container_of(dcl, VirtualConsole, gfx.dcl);
+
egl_dmabuf_release_texture(dmabuf);
+ if (vc->gfx.guest_fb.dmabuf == dmabuf) {
+ vc->gfx.guest_fb.dmabuf = NULL;
+ }
#endif
}
void *opaque)
{
VirtualConsole *vc = opaque;
- struct touch_slot *slot;
uint64_t num_slot = GPOINTER_TO_UINT(touch->sequence);
- bool needs_sync = false;
- int update;
int type = -1;
- int i;
-
- if (num_slot >= INPUT_EVENT_SLOTS_MAX) {
- warn_report("gtk: unexpected touch slot number: % " PRId64" >= %d\n",
- num_slot, INPUT_EVENT_SLOTS_MAX);
- return FALSE;
- }
-
- slot = &touch_slots[num_slot];
- slot->x = touch->x;
- slot->y = touch->y;
switch (touch->type) {
case GDK_TOUCH_BEGIN:
type = INPUT_MULTI_TOUCH_TYPE_BEGIN;
- slot->tracking_id = num_slot;
break;
case GDK_TOUCH_UPDATE:
type = INPUT_MULTI_TOUCH_TYPE_UPDATE;
break;
default:
warn_report("gtk: unexpected touch event type\n");
+ return FALSE;
}
- for (i = 0; i < INPUT_EVENT_SLOTS_MAX; ++i) {
- if (i == num_slot) {
- update = type;
- } else {
- update = INPUT_MULTI_TOUCH_TYPE_UPDATE;
- }
-
- slot = &touch_slots[i];
-
- if (slot->tracking_id == -1) {
- continue;
- }
-
- if (update == INPUT_MULTI_TOUCH_TYPE_END) {
- slot->tracking_id = -1;
- qemu_input_queue_mtt(vc->gfx.dcl.con, update, i, slot->tracking_id);
- needs_sync = true;
- } else {
- qemu_input_queue_mtt(vc->gfx.dcl.con, update, i, slot->tracking_id);
- qemu_input_queue_btn(vc->gfx.dcl.con, INPUT_BUTTON_TOUCH, true);
- qemu_input_queue_mtt_abs(vc->gfx.dcl.con,
- INPUT_AXIS_X, (int) slot->x,
- 0, surface_width(vc->gfx.ds),
- i, slot->tracking_id);
- qemu_input_queue_mtt_abs(vc->gfx.dcl.con,
- INPUT_AXIS_Y, (int) slot->y,
- 0, surface_height(vc->gfx.ds),
- i, slot->tracking_id);
- needs_sync = true;
- }
- }
-
- if (needs_sync) {
- qemu_input_event_sync();
- }
-
+ console_handle_touch_event(vc->gfx.dcl.con, touch_slots,
+ num_slot, surface_width(vc->gfx.ds),
+ surface_height(vc->gfx.ds), touch->x,
+ touch->y, type, &error_warn);
return TRUE;
}
ui_modules += {'opengl' : opengl_ss}
endif
-if opengl.found() and gbm.found()
+if opengl.found()
egl_headless_ss = ss.source_set()
- egl_headless_ss.add(when: [opengl, gbm, pixman],
- if_true: files('egl-headless.c'))
+ egl_headless_ss.add(when: [opengl, pixman],
+ if_true: [files('egl-headless.c'), gbm])
ui_modules += {'egl-headless' : egl_headless_ss}
endif
if dbus_display
dbus_ss = ss.source_set()
+ env = environment()
+ env.set('TARGETOS', targetos)
+ xml = custom_target('dbus-display preprocess',
+ input: 'dbus-display1.xml',
+ output: 'dbus-display1.xml',
+ env: env,
+ command: [xml_pp, '@INPUT@', '@OUTPUT@'])
dbus_display1 = custom_target('dbus-display gdbus-codegen',
output: ['dbus-display1.h', 'dbus-display1.c'],
- input: files('dbus-display1.xml'),
+ input: xml,
command: [gdbus_codegen, '@INPUT@',
'--glib-min-required', '2.64',
'--output-directory', meson.current_build_dir(),
#include "qemu/osdep.h"
#include "ui/console.h"
#include "standard-headers/drm/drm_fourcc.h"
+#include "trace.h"
PixelFormat qemu_pixelformat_from_pixman(pixman_format_code_t format)
{
uint32_t backing_width,
uint32_t backing_height,
uint32_t x, uint32_t y,
- uint32_t w, uint32_t h)
+ uint32_t w, uint32_t h,
+ void *d3d_tex2d)
{
struct sdl2_console *scon = container_of(dcl, struct sdl2_console, dcl);
SDL_SetHint(SDL_HINT_RENDER_DRIVER, driver);
SDL_SetHint(SDL_HINT_RENDER_BATCHING, "1");
- }
- scon->real_renderer = SDL_CreateRenderer(scon->real_window, -1, 0);
- if (scon->opengl) {
scon->winctx = SDL_GL_CreateContext(scon->real_window);
+ } else {
+ /* The SDL renderer is only used by sdl2-2D, when OpenGL is disabled */
+ scon->real_renderer = SDL_CreateRenderer(scon->real_window, -1, 0);
}
sdl_update_caption(scon);
}
return;
}
- SDL_GL_DeleteContext(scon->winctx);
- scon->winctx = NULL;
- SDL_DestroyRenderer(scon->real_renderer);
- scon->real_renderer = NULL;
+ if (scon->winctx) {
+ SDL_GL_DeleteContext(scon->winctx);
+ scon->winctx = NULL;
+ }
+ if (scon->real_renderer) {
+ SDL_DestroyRenderer(scon->real_renderer);
+ scon->real_renderer = NULL;
+ }
SDL_DestroyWindow(scon->real_window);
scon->real_window = NULL;
}
uint32_t backing_width,
uint32_t backing_height,
uint32_t x, uint32_t y,
- uint32_t w, uint32_t h)
+ uint32_t w, uint32_t h,
+ void *d3d_tex2d)
{
SimpleSpiceDisplay *ssd = container_of(dcl, SimpleSpiceDisplay, dcl);
EGLint stride = 0, fourcc = 0;
console_txt_new(int w, int h) "%dx%d"
console_select(int nr) "%d"
console_refresh(int interval) "interval %d ms"
-displaysurface_create(void *display_surface, int w, int h) "surface=%p, %dx%d"
+displaysurface_create(int w, int h) "%dx%d"
displaysurface_create_from(void *display_surface, int w, int h, uint32_t format) "surface=%p, %dx%d, format 0x%x"
displaysurface_create_pixman(void *display_surface) "surface=%p"
displaysurface_free(void *display_surface) "surface=%p"
dbus_mouse_release(unsigned int button) "button %u"
dbus_mouse_set_pos(unsigned int x, unsigned int y) "x=%u, y=%u"
dbus_mouse_rel_motion(int dx, int dy) "dx=%d, dy=%d"
+dbus_touch_send_event(unsigned int kind, uint32_t num_slot, uint32_t x, uint32_t y) "kind=%u, num_slot=%u, x=%d, y=%d"
dbus_update(int x, int y, int w, int h) "x=%d, y=%d, w=%d, h=%d"
+dbus_update_gl(int x, int y, int w, int h) "x=%d, y=%d, w=%d, h=%d"
dbus_clipboard_grab_failed(void) ""
dbus_clipboard_register(const char *bus_name) "peer %s"
dbus_clipboard_unregister(const char *bus_name) "peer %s"
+dbus_scanout_texture(uint32_t tex_id, bool backing_y_0_top, uint32_t backing_width, uint32_t backing_height, uint32_t x, uint32_t y, uint32_t w, uint32_t h) "tex_id:%u y0top:%d back:%ux%u %u+%u-%ux%u"
+dbus_gl_gfx_switch(void *p) "surf: %p"
+
+# egl-helpers.c
+egl_init_d3d11_device(void *p) "d3d device: %p"
*/
return qemu_fdatasync(fd);
}
+
+void *qemu_win32_map_alloc(size_t size, HANDLE *h, Error **errp)
+{
+ void *bits;
+
+ trace_win32_map_alloc(size);
+
+ *h = CreateFileMapping(INVALID_HANDLE_VALUE, NULL, PAGE_READWRITE, 0,
+ size, NULL);
+ if (*h == NULL) {
+ error_setg_win32(errp, GetLastError(), "Failed to CreateFileMapping");
+ return NULL;
+ }
+
+ bits = MapViewOfFile(*h, FILE_MAP_ALL_ACCESS, 0, 0, size);
+ if (bits == NULL) {
+ error_setg_win32(errp, GetLastError(), "Failed to MapViewOfFile");
+ CloseHandle(*h);
+ return NULL;
+ }
+
+ return bits;
+}
+
+void qemu_win32_map_free(void *ptr, HANDLE h, Error **errp)
+{
+ trace_win32_map_free(ptr, h);
+
+ if (UnmapViewOfFile(ptr) == 0) {
+ error_setg_win32(errp, GetLastError(), "Failed to UnmapViewOfFile");
+ }
+ CloseHandle(h);
+}
qemu_vfree(void *ptr) "ptr %p"
qemu_anon_ram_free(void *ptr, size_t size) "ptr %p size %zu"
+# oslib-win32.c
+win32_map_alloc(size_t size) "size:%zd"
+win32_map_free(void *ptr, void *h) "ptr:%p handle:%p"
+
# hbitmap.c
hbitmap_iter_skip_words(const void *hb, void *hbi, uint64_t pos, unsigned long cur) "hb %p hbi %p pos %"PRId64" cur 0x%lx"
hbitmap_reset(void *hb, uint64_t start, uint64_t count, uint64_t sbit, uint64_t ebit) "hb %p items %"PRIu64",%"PRIu64" bits %"PRIu64"..%"PRIu64