F: target/s390x/kvm/
F: target/s390x/machine.c
F: target/s390x/sigp.c
-F: hw/s390x/pv.c
-F: include/hw/s390x/pv.h
F: gdb-xml/s390*.xml
T: git https://github.com/borntraeger/qemu.git s390-next
L: qemu-s390x@nongnu.org
F: include/hw/virtio/vhost-user-gpio.h
F: tests/qtest/libqos/virtio-gpio.*
+vhost-user-scmi
+R: mzamazal@redhat.com
+S: Supported
+F: hw/virtio/vhost-user-scmi*
+F: include/hw/virtio/vhost-user-scmi.h
+F: tests/qtest/libqos/virtio-scmi.*
+
virtio-crypto
M: Gonglei <arei.gonglei@huawei.com>
S: Supported
F: hw/virtio/virtio-crypto-pci.c
F: include/hw/virtio/virtio-crypto.h
+virtio based memory device
+M: David Hildenbrand <david@redhat.com>
+S: Supported
+F: hw/virtio/virtio-md-pci.c
+F: include/hw/virtio/virtio-md-pci.h
+F: stubs/virtio-md-pci.c
+
virtio-mem
M: David Hildenbrand <david@redhat.com>
S: Supported
CMPXCHG_HELPER(cmpxchgq_le, uint64_t)
#endif
-#ifdef CONFIG_CMPXCHG128
+#if HAVE_CMPXCHG128
CMPXCHG_HELPER(cmpxchgo_be, Int128)
CMPXCHG_HELPER(cmpxchgo_le, Int128)
#endif
if (qemu_log_in_addr_range(pc)) {
qemu_log_mask(CPU_LOG_EXEC,
"Trace %d: %p [%08" PRIx64
- "/%" VADDR_PRIx "/%08x/%08x] %s\n",
+ "/%016" VADDR_PRIx "/%08x/%08x] %s\n",
cpu->cpu_index, tb->tc.ptr, tb->cs_base, pc,
tb->flags, tb->cflags, lookup_symbol(pc));
if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
vaddr pc = log_pc(cpu, last_tb);
if (qemu_log_in_addr_range(pc)) {
- qemu_log("Stopped execution of TB chain before %p [%"
+ qemu_log("Stopped execution of TB chain before %p [%016"
VADDR_PRIx "] %s\n",
last_tb->tc.ptr, pc, lookup_symbol(pc));
}
}
}
+static void cpu_exec_longjmp_cleanup(CPUState *cpu)
+{
+ /* Non-buggy compilers preserve this; assert the correct value. */
+ g_assert(cpu == current_cpu);
+
+#ifdef CONFIG_USER_ONLY
+ clear_helper_retaddr();
+ if (have_mmap_lock()) {
+ mmap_unlock();
+ }
+#else
+ /*
+ * For softmmu, a tlb_fill fault during translation will land here,
+ * and we need to release any page locks held. In system mode we
+ * have one tcg_ctx per thread, so we know it was this cpu doing
+ * the translation.
+ *
+ * Alternative 1: Install a cleanup to be called via an exception
+ * handling safe longjmp. It seems plausible that all our hosts
+ * support such a thing. We'd have to properly register unwind info
+ * for the JIT for EH, rather that just for GDB.
+ *
+ * Alternative 2: Set and restore cpu->jmp_env in tb_gen_code to
+ * capture the cpu_loop_exit longjmp, perform the cleanup, and
+ * jump again to arrive here.
+ */
+ if (tcg_ctx->gen_tb) {
+ tb_unlock_pages(tcg_ctx->gen_tb);
+ tcg_ctx->gen_tb = NULL;
+ }
+#endif
+ if (qemu_mutex_iothread_locked()) {
+ qemu_mutex_unlock_iothread();
+ }
+ assert_no_pages_locked();
+}
+
void cpu_exec_step_atomic(CPUState *cpu)
{
CPUArchState *env = cpu->env_ptr;
cpu_tb_exec(cpu, tb, &tb_exit);
cpu_exec_exit(cpu);
} else {
-#ifdef CONFIG_USER_ONLY
- clear_helper_retaddr();
- if (have_mmap_lock()) {
- mmap_unlock();
- }
-#endif
- if (qemu_mutex_iothread_locked()) {
- qemu_mutex_unlock_iothread();
- }
- assert_no_pages_locked();
+ cpu_exec_longjmp_cleanup(cpu);
}
/*
{
/* Prepare setjmp context for exception handling. */
if (unlikely(sigsetjmp(cpu->jmp_env, 0) != 0)) {
- /* Non-buggy compilers preserve this; assert the correct value. */
- g_assert(cpu == current_cpu);
-
-#ifdef CONFIG_USER_ONLY
- clear_helper_retaddr();
- if (have_mmap_lock()) {
- mmap_unlock();
- }
-#endif
- if (qemu_mutex_iothread_locked()) {
- qemu_mutex_unlock_iothread();
- }
-
- assert_no_pages_locked();
+ cpu_exec_longjmp_cleanup(cpu);
}
return cpu_exec_loop(cpu, sc);
/* Check if we need to flush due to large pages. */
if ((page & lp_mask) == lp_addr) {
- tlb_debug("forcing full flush midx %d (%"
- VADDR_PRIx "/%" VADDR_PRIx ")\n",
+ tlb_debug("forcing full flush midx %d (%016"
+ VADDR_PRIx "/%016" VADDR_PRIx ")\n",
midx, lp_addr, lp_mask);
tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime());
} else {
assert_cpu_is_self(cpu);
- tlb_debug("page addr: %" VADDR_PRIx " mmu_map:0x%x\n", addr, idxmap);
+ tlb_debug("page addr: %016" VADDR_PRIx " mmu_map:0x%x\n", addr, idxmap);
qemu_spin_lock(&env_tlb(env)->c.lock);
for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
void tlb_flush_page_by_mmuidx(CPUState *cpu, vaddr addr, uint16_t idxmap)
{
- tlb_debug("addr: %" VADDR_PRIx " mmu_idx:%" PRIx16 "\n", addr, idxmap);
+ tlb_debug("addr: %016" VADDR_PRIx " mmu_idx:%" PRIx16 "\n", addr, idxmap);
/* This should already be page aligned */
addr &= TARGET_PAGE_MASK;
void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, vaddr addr,
uint16_t idxmap)
{
- tlb_debug("addr: %" VADDR_PRIx " mmu_idx:%"PRIx16"\n", addr, idxmap);
+ tlb_debug("addr: %016" VADDR_PRIx " mmu_idx:%"PRIx16"\n", addr, idxmap);
/* This should already be page aligned */
addr &= TARGET_PAGE_MASK;
vaddr addr,
uint16_t idxmap)
{
- tlb_debug("addr: %" VADDR_PRIx " mmu_idx:%"PRIx16"\n", addr, idxmap);
+ tlb_debug("addr: %016" VADDR_PRIx " mmu_idx:%"PRIx16"\n", addr, idxmap);
/* This should already be page aligned */
addr &= TARGET_PAGE_MASK;
*/
if (mask < f->mask || len > f->mask) {
tlb_debug("forcing full flush midx %d ("
- "%" VADDR_PRIx "/%" VADDR_PRIx "+%" VADDR_PRIx ")\n",
+ "%016" VADDR_PRIx "/%016" VADDR_PRIx "+%016" VADDR_PRIx ")\n",
midx, addr, mask, len);
tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime());
return;
*/
if (((addr + len - 1) & d->large_page_mask) == d->large_page_addr) {
tlb_debug("forcing full flush midx %d ("
- "%" VADDR_PRIx "/%" VADDR_PRIx ")\n",
+ "%016" VADDR_PRIx "/%016" VADDR_PRIx ")\n",
midx, d->large_page_addr, d->large_page_mask);
tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime());
return;
assert_cpu_is_self(cpu);
- tlb_debug("range: %" VADDR_PRIx "/%u+%" VADDR_PRIx " mmu_map:0x%x\n",
+ tlb_debug("range: %016" VADDR_PRIx "/%u+%016" VADDR_PRIx " mmu_map:0x%x\n",
d.addr, d.bits, d.len, d.idxmap);
qemu_spin_lock(&env_tlb(env)->c.lock);
&xlat, &sz, full->attrs, &prot);
assert(sz >= TARGET_PAGE_SIZE);
- tlb_debug("vaddr=%" VADDR_PRIx " paddr=0x" HWADDR_FMT_plx
+ tlb_debug("vaddr=%016" VADDR_PRIx " paddr=0x" HWADDR_FMT_plx
" prot=%x idx=%d\n",
addr, full->phys_addr, prot, mmu_idx);
#include "atomic_template.h"
#endif
-#if defined(CONFIG_ATOMIC128) || defined(CONFIG_CMPXCHG128)
+#if defined(CONFIG_ATOMIC128) || HAVE_CMPXCHG128
#define DATA_SIZE 16
#include "atomic_template.h"
#endif
#define ACCEL_TCG_INTERNAL_H
#include "exec/exec-all.h"
+#include "exec/translate-all.h"
/*
* Access to the various translations structures need to be serialised
void page_table_config_init(void);
#endif
+#ifdef CONFIG_USER_ONLY
+/*
+ * For user-only, page_protect sets the page read-only.
+ * Since most execution is already on read-only pages, and we'd need to
+ * account for other TBs on the same page, defer undoing any page protection
+ * until we receive the write fault.
+ */
+static inline void tb_lock_page0(tb_page_addr_t p0)
+{
+ page_protect(p0);
+}
+
+static inline void tb_lock_page1(tb_page_addr_t p0, tb_page_addr_t p1)
+{
+ page_protect(p1);
+}
+
+static inline void tb_unlock_page1(tb_page_addr_t p0, tb_page_addr_t p1) { }
+static inline void tb_unlock_pages(TranslationBlock *tb) { }
+#else
+void tb_lock_page0(tb_page_addr_t);
+void tb_lock_page1(tb_page_addr_t, tb_page_addr_t);
+void tb_unlock_page1(tb_page_addr_t, tb_page_addr_t);
+void tb_unlock_pages(TranslationBlock *);
+#endif
+
#ifdef CONFIG_SOFTMMU
void tb_invalidate_phys_range_fast(ram_addr_t ram_addr,
unsigned size,
void page_init(void);
void tb_htable_init(void);
void tb_reset_jump(TranslationBlock *tb, int n);
-TranslationBlock *tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
- tb_page_addr_t phys_page2);
+TranslationBlock *tb_link_page(TranslationBlock *tb);
bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc);
void cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
uintptr_t host_pc);
* another process, because the fallback start_exclusive solution
* provides no protection across processes.
*/
- if (!page_check_range(h2g(pv), 8, PAGE_WRITE_ORG)) {
- uint64_t *p = __builtin_assume_aligned(pv, 8);
- return *p;
+ WITH_MMAP_LOCK_GUARD() {
+ if (!page_check_range(h2g(pv), 8, PAGE_WRITE_ORG)) {
+ uint64_t *p = __builtin_assume_aligned(pv, 8);
+ return *p;
+ }
}
#endif
return atomic16_read_ro(p);
}
-#ifdef CONFIG_USER_ONLY
/*
* We can only use cmpxchg to emulate a load if the page is writable.
* If the page is not writable, then assume the value is immutable
* and requires no locking. This ignores the case of MAP_SHARED with
* another process, because the fallback start_exclusive solution
* provides no protection across processes.
+ *
+ * In system mode all guest pages are writable. For user mode,
+ * we must take mmap_lock so that the query remains valid until
+ * the write is complete -- tests/tcg/multiarch/munmap-pthread.c
+ * is an example that can race.
*/
- if (!page_check_range(h2g(p), 16, PAGE_WRITE_ORG)) {
- return *p;
- }
+ WITH_MMAP_LOCK_GUARD() {
+#ifdef CONFIG_USER_ONLY
+ if (!page_check_range(h2g(p), 16, PAGE_WRITE_ORG)) {
+ return *p;
+ }
#endif
-
- /*
- * In system mode all guest pages are writable, and for user-only
- * we have just checked writability. Try cmpxchg.
- */
- if (HAVE_ATOMIC128_RW) {
- return atomic16_read_rw(p);
+ if (HAVE_ATOMIC128_RW) {
+ return atomic16_read_rw(p);
+ }
}
/* Ultimate fallback: re-execute in serial context. */
*/
#define assert_page_locked(pd) tcg_debug_assert(have_mmap_lock())
-static inline void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1,
- PageDesc **ret_p2, tb_page_addr_t phys2,
- bool alloc)
-{
- *ret_p1 = NULL;
- *ret_p2 = NULL;
-}
-
-static inline void page_unlock(PageDesc *pd) { }
-static inline void page_lock_tb(const TranslationBlock *tb) { }
-static inline void page_unlock_tb(const TranslationBlock *tb) { }
+static inline void tb_lock_pages(const TranslationBlock *tb) { }
/*
* For user-only, since we are protecting all of memory with a single lock,
}
/* Call with mmap_lock held. */
-static void tb_record(TranslationBlock *tb, PageDesc *p1, PageDesc *p2)
+static void tb_record(TranslationBlock *tb)
{
vaddr addr;
int flags;
qemu_spin_lock(&pd->lock);
}
+/* Like qemu_spin_trylock, returns false on success */
+static bool page_trylock(PageDesc *pd)
+{
+ bool busy = qemu_spin_trylock(&pd->lock);
+ if (!busy) {
+ page_lock__debug(pd);
+ }
+ return busy;
+}
+
static void page_unlock(PageDesc *pd)
{
qemu_spin_unlock(&pd->lock);
page_unlock__debug(pd);
}
+void tb_lock_page0(tb_page_addr_t paddr)
+{
+ page_lock(page_find_alloc(paddr >> TARGET_PAGE_BITS, true));
+}
+
+void tb_lock_page1(tb_page_addr_t paddr0, tb_page_addr_t paddr1)
+{
+ tb_page_addr_t pindex0 = paddr0 >> TARGET_PAGE_BITS;
+ tb_page_addr_t pindex1 = paddr1 >> TARGET_PAGE_BITS;
+ PageDesc *pd0, *pd1;
+
+ if (pindex0 == pindex1) {
+ /* Identical pages, and the first page is already locked. */
+ return;
+ }
+
+ pd1 = page_find_alloc(pindex1, true);
+ if (pindex0 < pindex1) {
+ /* Correct locking order, we may block. */
+ page_lock(pd1);
+ return;
+ }
+
+ /* Incorrect locking order, we cannot block lest we deadlock. */
+ if (!page_trylock(pd1)) {
+ return;
+ }
+
+ /*
+ * Drop the lock on page0 and get both page locks in the right order.
+ * Restart translation via longjmp.
+ */
+ pd0 = page_find_alloc(pindex0, false);
+ page_unlock(pd0);
+ page_lock(pd1);
+ page_lock(pd0);
+ siglongjmp(tcg_ctx->jmp_trans, -3);
+}
+
+void tb_unlock_page1(tb_page_addr_t paddr0, tb_page_addr_t paddr1)
+{
+ tb_page_addr_t pindex0 = paddr0 >> TARGET_PAGE_BITS;
+ tb_page_addr_t pindex1 = paddr1 >> TARGET_PAGE_BITS;
+
+ if (pindex0 != pindex1) {
+ page_unlock(page_find_alloc(pindex1, false));
+ }
+}
+
+static void tb_lock_pages(TranslationBlock *tb)
+{
+ tb_page_addr_t paddr0 = tb_page_addr0(tb);
+ tb_page_addr_t paddr1 = tb_page_addr1(tb);
+ tb_page_addr_t pindex0 = paddr0 >> TARGET_PAGE_BITS;
+ tb_page_addr_t pindex1 = paddr1 >> TARGET_PAGE_BITS;
+
+ if (unlikely(paddr0 == -1)) {
+ return;
+ }
+ if (unlikely(paddr1 != -1) && pindex0 != pindex1) {
+ if (pindex0 < pindex1) {
+ page_lock(page_find_alloc(pindex0, true));
+ page_lock(page_find_alloc(pindex1, true));
+ return;
+ }
+ page_lock(page_find_alloc(pindex1, true));
+ }
+ page_lock(page_find_alloc(pindex0, true));
+}
+
+void tb_unlock_pages(TranslationBlock *tb)
+{
+ tb_page_addr_t paddr0 = tb_page_addr0(tb);
+ tb_page_addr_t paddr1 = tb_page_addr1(tb);
+ tb_page_addr_t pindex0 = paddr0 >> TARGET_PAGE_BITS;
+ tb_page_addr_t pindex1 = paddr1 >> TARGET_PAGE_BITS;
+
+ if (unlikely(paddr0 == -1)) {
+ return;
+ }
+ if (unlikely(paddr1 != -1) && pindex0 != pindex1) {
+ page_unlock(page_find_alloc(pindex1, false));
+ }
+ page_unlock(page_find_alloc(pindex0, false));
+}
+
static inline struct page_entry *
page_entry_new(PageDesc *pd, tb_page_addr_t index)
{
/* returns false on success */
static bool page_entry_trylock(struct page_entry *pe)
{
- bool busy;
-
- busy = qemu_spin_trylock(&pe->pd->lock);
+ bool busy = page_trylock(pe->pd);
if (!busy) {
g_assert(!pe->locked);
pe->locked = true;
- page_lock__debug(pe->pd);
}
return busy;
}
* Add the tb in the target page and protect it if necessary.
* Called with @p->lock held.
*/
-static inline void tb_page_add(PageDesc *p, TranslationBlock *tb,
- unsigned int n)
+static void tb_page_add(PageDesc *p, TranslationBlock *tb, unsigned int n)
{
bool page_already_protected;
}
}
-static void tb_record(TranslationBlock *tb, PageDesc *p1, PageDesc *p2)
+static void tb_record(TranslationBlock *tb)
{
- tb_page_add(p1, tb, 0);
- if (unlikely(p2)) {
- tb_page_add(p2, tb, 1);
+ tb_page_addr_t paddr0 = tb_page_addr0(tb);
+ tb_page_addr_t paddr1 = tb_page_addr1(tb);
+ tb_page_addr_t pindex0 = paddr0 >> TARGET_PAGE_BITS;
+ tb_page_addr_t pindex1 = paddr0 >> TARGET_PAGE_BITS;
+
+ assert(paddr0 != -1);
+ if (unlikely(paddr1 != -1) && pindex0 != pindex1) {
+ tb_page_add(page_find_alloc(pindex1, false), tb, 1);
}
+ tb_page_add(page_find_alloc(pindex0, false), tb, 0);
}
-static inline void tb_page_remove(PageDesc *pd, TranslationBlock *tb)
+static void tb_page_remove(PageDesc *pd, TranslationBlock *tb)
{
TranslationBlock *tb1;
uintptr_t *pprev;
static void tb_remove(TranslationBlock *tb)
{
- PageDesc *pd;
-
- pd = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
- tb_page_remove(pd, tb);
- if (unlikely(tb->page_addr[1] != -1)) {
- pd = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
- tb_page_remove(pd, tb);
- }
-}
-
-static void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1,
- PageDesc **ret_p2, tb_page_addr_t phys2, bool alloc)
-{
- PageDesc *p1, *p2;
- tb_page_addr_t page1;
- tb_page_addr_t page2;
-
- assert_memory_lock();
- g_assert(phys1 != -1);
-
- page1 = phys1 >> TARGET_PAGE_BITS;
- page2 = phys2 >> TARGET_PAGE_BITS;
-
- p1 = page_find_alloc(page1, alloc);
- if (ret_p1) {
- *ret_p1 = p1;
- }
- if (likely(phys2 == -1)) {
- page_lock(p1);
- return;
- } else if (page1 == page2) {
- page_lock(p1);
- if (ret_p2) {
- *ret_p2 = p1;
- }
- return;
- }
- p2 = page_find_alloc(page2, alloc);
- if (ret_p2) {
- *ret_p2 = p2;
- }
- if (page1 < page2) {
- page_lock(p1);
- page_lock(p2);
- } else {
- page_lock(p2);
- page_lock(p1);
- }
-}
-
-/* lock the page(s) of a TB in the correct acquisition order */
-static void page_lock_tb(const TranslationBlock *tb)
-{
- page_lock_pair(NULL, tb_page_addr0(tb), NULL, tb_page_addr1(tb), false);
-}
-
-static void page_unlock_tb(const TranslationBlock *tb)
-{
- PageDesc *p1 = page_find(tb_page_addr0(tb) >> TARGET_PAGE_BITS);
-
- page_unlock(p1);
- if (unlikely(tb_page_addr1(tb) != -1)) {
- PageDesc *p2 = page_find(tb_page_addr1(tb) >> TARGET_PAGE_BITS);
-
- if (p2 != p1) {
- page_unlock(p2);
- }
+ tb_page_addr_t paddr0 = tb_page_addr0(tb);
+ tb_page_addr_t paddr1 = tb_page_addr1(tb);
+ tb_page_addr_t pindex0 = paddr0 >> TARGET_PAGE_BITS;
+ tb_page_addr_t pindex1 = paddr0 >> TARGET_PAGE_BITS;
+
+ assert(paddr0 != -1);
+ if (unlikely(paddr1 != -1) && pindex0 != pindex1) {
+ tb_page_remove(page_find_alloc(pindex1, false), tb);
}
+ tb_page_remove(page_find_alloc(pindex0, false), tb);
}
#endif /* CONFIG_USER_ONLY */
void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
{
if (page_addr == -1 && tb_page_addr0(tb) != -1) {
- page_lock_tb(tb);
+ tb_lock_pages(tb);
do_tb_phys_invalidate(tb, true);
- page_unlock_tb(tb);
+ tb_unlock_pages(tb);
} else {
do_tb_phys_invalidate(tb, false);
}
}
/*
- * Add a new TB and link it to the physical page tables. phys_page2 is
- * (-1) to indicate that only one page contains the TB.
- *
+ * Add a new TB and link it to the physical page tables.
* Called with mmap_lock held for user-mode emulation.
*
* Returns a pointer @tb, or a pointer to an existing TB that matches @tb.
* for the same block of guest code that @tb corresponds to. In that case,
* the caller should discard the original @tb, and use instead the returned TB.
*/
-TranslationBlock *tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
- tb_page_addr_t phys_page2)
+TranslationBlock *tb_link_page(TranslationBlock *tb)
{
- PageDesc *p;
- PageDesc *p2 = NULL;
void *existing_tb = NULL;
uint32_t h;
assert_memory_lock();
tcg_debug_assert(!(tb->cflags & CF_INVALID));
- /*
- * Add the TB to the page list, acquiring first the pages's locks.
- * We keep the locks held until after inserting the TB in the hash table,
- * so that if the insertion fails we know for sure that the TBs are still
- * in the page descriptors.
- * Note that inserting into the hash table first isn't an option, since
- * we can only insert TBs that are fully initialized.
- */
- page_lock_pair(&p, phys_pc, &p2, phys_page2, true);
- tb_record(tb, p, p2);
+ tb_record(tb);
/* add in the hash table */
- h = tb_hash_func(phys_pc, (tb->cflags & CF_PCREL ? 0 : tb->pc),
+ h = tb_hash_func(tb_page_addr0(tb), (tb->cflags & CF_PCREL ? 0 : tb->pc),
tb->flags, tb->cs_base, tb->cflags);
qht_insert(&tb_ctx.htable, tb, h, &existing_tb);
/* remove TB from the page(s) if we couldn't insert it */
if (unlikely(existing_tb)) {
tb_remove(tb);
- tb = existing_tb;
+ tb_unlock_pages(tb);
+ return existing_tb;
}
- if (p2 && p2 != p) {
- page_unlock(p2);
- }
- page_unlock(p);
+ tb_unlock_pages(tb);
return tb;
}
DEF_HELPER_FLAGS_5(atomic_cmpxchgq_le, TCG_CALL_NO_WG,
i64, env, i64, i64, i64, i32)
#endif
-#ifdef CONFIG_CMPXCHG128
+#if HAVE_CMPXCHG128
DEF_HELPER_FLAGS_5(atomic_cmpxchgo_be, TCG_CALL_NO_WG,
i128, env, i64, i128, i128, i32)
DEF_HELPER_FLAGS_5(atomic_cmpxchgo_le, TCG_CALL_NO_WG,
{
CPUArchState *env = cpu->env_ptr;
TranslationBlock *tb, *existing_tb;
- tb_page_addr_t phys_pc;
+ tb_page_addr_t phys_pc, phys_p2;
tcg_insn_unit *gen_code_buf;
int gen_code_size, search_size, max_insns;
int64_t ti;
QEMU_BUILD_BUG_ON(CF_COUNT_MASK + 1 != TCG_MAX_INSNS);
buffer_overflow:
+ assert_no_pages_locked();
tb = tcg_tb_alloc(tcg_ctx);
if (unlikely(!tb)) {
/* flush must be done */
tb->cflags = cflags;
tb_set_page_addr0(tb, phys_pc);
tb_set_page_addr1(tb, -1);
+ if (phys_pc != -1) {
+ tb_lock_page0(phys_pc);
+ }
+
tcg_ctx->gen_tb = tb;
tcg_ctx->addr_type = TARGET_LONG_BITS == 32 ? TCG_TYPE_I32 : TCG_TYPE_I64;
#ifdef CONFIG_SOFTMMU
tcg_ctx->guest_mo = TCG_MO_ALL;
#endif
- tb_overflow:
-
+ restart_translate:
trace_translate_block(tb, pc, tb->tc.ptr);
gen_code_size = setjmp_gen_code(env, tb, pc, host_pc, &max_insns, &ti);
qemu_log_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT,
"Restarting code generation for "
"code_gen_buffer overflow\n");
+ tb_unlock_pages(tb);
goto buffer_overflow;
case -2:
"Restarting code generation with "
"smaller translation block (max %d insns)\n",
max_insns);
- goto tb_overflow;
+
+ /*
+ * The half-sized TB may not cross pages.
+ * TODO: Fix all targets that cross pages except with
+ * the first insn, at which point this can't be reached.
+ */
+ phys_p2 = tb_page_addr1(tb);
+ if (unlikely(phys_p2 != -1)) {
+ tb_unlock_page1(phys_pc, phys_p2);
+ tb_set_page_addr1(tb, -1);
+ }
+ goto restart_translate;
+
+ case -3:
+ /*
+ * We had a page lock ordering problem. In order to avoid
+ * deadlock we had to drop the lock on page0, which means
+ * that everything we translated so far is compromised.
+ * Restart with locks held on both pages.
+ */
+ qemu_log_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT,
+ "Restarting code generation with re-locked pages");
+ goto restart_translate;
default:
g_assert_not_reached();
}
}
+ tcg_ctx->gen_tb = NULL;
+
search_size = encode_search(tb, (void *)gen_code_buf + gen_code_size);
if (unlikely(search_size < 0)) {
+ tb_unlock_pages(tb);
goto buffer_overflow;
}
tb->tc.size = gen_code_size;
* before attempting to link to other TBs or add to the lookup table.
*/
if (tb_page_addr0(tb) == -1) {
+ assert_no_pages_locked();
return tb;
}
* No explicit memory barrier is required -- tb_link_page() makes the
* TB visible in a consistent state.
*/
- existing_tb = tb_link_page(tb, tb_page_addr0(tb), tb_page_addr1(tb));
+ existing_tb = tb_link_page(tb);
+ assert_no_pages_locked();
+
/* if the TB already exists, discard what we just translated */
if (unlikely(existing_tb != tb)) {
uintptr_t orig_aligned = (uintptr_t)gen_code_buf;
if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
vaddr pc = log_pc(cpu, tb);
if (qemu_log_in_addr_range(pc)) {
- qemu_log("cpu_io_recompile: rewound execution of TB to %"
+ qemu_log("cpu_io_recompile: rewound execution of TB to %016"
VADDR_PRIx "\n", pc);
}
}
#include "qemu/error-report.h"
#include "exec/exec-all.h"
#include "exec/translator.h"
-#include "exec/translate-all.h"
#include "exec/plugin-gen.h"
#include "tcg/tcg-op-common.h"
+#include "internal.h"
static void gen_io_start(void)
{
db->host_addr[0] = host_pc;
db->host_addr[1] = NULL;
-#ifdef CONFIG_USER_ONLY
- page_protect(pc);
-#endif
-
ops->init_disas_context(db, cpu);
tcg_debug_assert(db->is_jmp == DISAS_NEXT); /* no early exit */
host = db->host_addr[1];
base = TARGET_PAGE_ALIGN(db->pc_first);
if (host == NULL) {
- tb_page_addr_t phys_page =
- get_page_addr_code_hostp(env, base, &db->host_addr[1]);
+ tb_page_addr_t page0, old_page1, new_page1;
+
+ new_page1 = get_page_addr_code_hostp(env, base, &db->host_addr[1]);
/*
* If the second page is MMIO, treat as if the first page
* was MMIO as well, so that we do not cache the TB.
*/
- if (unlikely(phys_page == -1)) {
+ if (unlikely(new_page1 == -1)) {
+ tb_unlock_pages(tb);
tb_set_page_addr0(tb, -1);
return NULL;
}
- tb_set_page_addr1(tb, phys_page);
-#ifdef CONFIG_USER_ONLY
- page_protect(end);
-#endif
+ /*
+ * If this is not the first time around, and page1 matches,
+ * then we already have the page locked. Alternately, we're
+ * not doing anything to prevent the PTE from changing, so
+ * we might wind up with a different page, requiring us to
+ * re-do the locking.
+ */
+ old_page1 = tb_page_addr1(tb);
+ if (likely(new_page1 != old_page1)) {
+ page0 = tb_page_addr0(tb);
+ if (unlikely(old_page1 != -1)) {
+ tb_unlock_page1(page0, old_page1);
+ }
+ tb_set_page_addr1(tb, new_page1);
+ tb_lock_page1(page0, new_page1);
+ }
host = db->host_addr[1];
}
static IntervalTreeRoot pageflags_root;
-static PageFlagsNode *pageflags_find(target_ulong start, target_long last)
+static PageFlagsNode *pageflags_find(target_ulong start, target_ulong last)
{
IntervalTreeNode *n;
}
static PageFlagsNode *pageflags_next(PageFlagsNode *p, target_ulong start,
- target_long last)
+ target_ulong last)
{
IntervalTreeNode *n;
}
}
-int page_check_range(target_ulong start, target_ulong len, int flags)
+bool page_check_range(target_ulong start, target_ulong len, int flags)
{
target_ulong last;
int locked; /* tri-state: =0: unlocked, +1: global, -1: local */
- int ret;
+ bool ret;
if (len == 0) {
- return 0; /* trivial length */
+ return true; /* trivial length */
}
last = start + len - 1;
if (last < start) {
- return -1; /* wrap around */
+ return false; /* wrap around */
}
locked = have_mmap_lock();
p = pageflags_find(start, last);
}
if (!p) {
- ret = -1; /* entire region invalid */
+ ret = false; /* entire region invalid */
break;
}
}
if (start < p->itree.start) {
- ret = -1; /* initial bytes invalid */
+ ret = false; /* initial bytes invalid */
break;
}
missing = flags & ~p->flags;
- if (missing & PAGE_READ) {
- ret = -1; /* page not readable */
+ if (missing & ~PAGE_WRITE) {
+ ret = false; /* page doesn't match */
break;
}
if (missing & PAGE_WRITE) {
if (!(p->flags & PAGE_WRITE_ORG)) {
- ret = -1; /* page not writable */
+ ret = false; /* page not writable */
break;
}
/* Asking about writable, but has been protected: undo. */
if (!page_unprotect(start, 0)) {
- ret = -1;
+ ret = false;
break;
}
/* TODO: page_unprotect should take a range, not a single page. */
if (last - start < TARGET_PAGE_SIZE) {
- ret = 0; /* ok */
+ ret = true; /* ok */
break;
}
start += TARGET_PAGE_SIZE;
}
if (last <= p->itree.last) {
- ret = 0; /* ok */
+ ret = true; /* ok */
break;
}
start = p->itree.last + 1;
return ret;
}
+bool page_check_range_empty(target_ulong start, target_ulong last)
+{
+ assert(last >= start);
+ assert_memory_lock();
+ return pageflags_find(start, last) == NULL;
+}
+
+target_ulong page_find_range_empty(target_ulong min, target_ulong max,
+ target_ulong len, target_ulong align)
+{
+ target_ulong len_m1, align_m1;
+
+ assert(min <= max);
+ assert(max <= GUEST_ADDR_MAX);
+ assert(len != 0);
+ assert(is_power_of_2(align));
+ assert_memory_lock();
+
+ len_m1 = len - 1;
+ align_m1 = align - 1;
+
+ /* Iteratively narrow the search region. */
+ while (1) {
+ PageFlagsNode *p;
+
+ /* Align min and double-check there's enough space remaining. */
+ min = (min + align_m1) & ~align_m1;
+ if (min > max) {
+ return -1;
+ }
+ if (len_m1 > max - min) {
+ return -1;
+ }
+
+ p = pageflags_find(min, min + len_m1);
+ if (p == NULL) {
+ /* Found! */
+ return min;
+ }
+ if (max <= p->itree.last) {
+ /* Existing allocation fills the remainder of the search region. */
+ return -1;
+ }
+ /* Skip across existing allocation. */
+ min = p->itree.last + 1;
+ }
+}
+
void page_protect(tb_page_addr_t address)
{
PageFlagsNode *p;
#include "atomic_template.h"
#endif
-#if defined(CONFIG_ATOMIC128) || defined(CONFIG_CMPXCHG128)
+#if defined(CONFIG_ATOMIC128) || HAVE_CMPXCHG128
#define DATA_SIZE 16
#include "atomic_template.h"
#endif
/*
- * QEMU Pipewire audio driver
+ * QEMU PipeWire audio driver
*
* Copyright (c) 2023 Red Hat Inc.
*
PWVoice v;
} PWVoiceIn;
+#define PW_VOICE_IN(v) ((PWVoiceIn *)v)
+#define PW_VOICE_OUT(v) ((PWVoiceOut *)v)
+
static void
stream_destroy(void *data)
{
trace_pw_state_changed(pw_stream_get_node_id(v->stream),
pw_stream_state_as_string(state));
-
- switch (state) {
- case PW_STREAM_STATE_ERROR:
- case PW_STREAM_STATE_UNCONNECTED:
- break;
- case PW_STREAM_STATE_PAUSED:
- case PW_STREAM_STATE_CONNECTING:
- case PW_STREAM_STATE_STREAMING:
- break;
- }
}
static const struct pw_stream_events capture_stream_events = {
}
static int
-create_stream(pwaudio *c, PWVoice *v, const char *stream_name,
- const char *name, enum spa_direction dir)
+qpw_stream_new(pwaudio *c, PWVoice *v, const char *stream_name,
+ const char *name, enum spa_direction dir)
{
int res;
uint32_t n_params;
struct pw_properties *props;
props = pw_properties_new(NULL, NULL);
+ if (!props) {
+ error_report("Failed to create PW properties: %s", g_strerror(errno));
+ return -1;
+ }
/* 75% of the timer period for faster updates */
buf_samples = (uint64_t)v->g->dev->timer_period * v->info.rate
pw_properties_set(props, PW_KEY_TARGET_OBJECT, name);
}
v->stream = pw_stream_new(c->core, stream_name, props);
-
if (v->stream == NULL) {
+ error_report("Failed to create PW stream: %s", g_strerror(errno));
return -1;
}
PW_STREAM_FLAG_MAP_BUFFERS |
PW_STREAM_FLAG_RT_PROCESS, params, n_params);
if (res < 0) {
+ error_report("Failed to connect PW stream: %s", g_strerror(errno));
pw_stream_destroy(v->stream);
return -1;
}
return 0;
}
-static int
-qpw_stream_new(pwaudio *c, PWVoice *v, const char *stream_name,
- const char *name, enum spa_direction dir)
+static void
+qpw_set_position(uint32_t channels, uint32_t position[SPA_AUDIO_MAX_CHANNELS])
{
- int r;
-
- switch (v->info.channels) {
+ memcpy(position, (uint32_t[SPA_AUDIO_MAX_CHANNELS]) { SPA_AUDIO_CHANNEL_UNKNOWN, },
+ sizeof(uint32_t) * SPA_AUDIO_MAX_CHANNELS);
+ /*
+ * TODO: This currently expects the only frontend supporting more than 2
+ * channels is the usb-audio. We will need some means to set channel
+ * order when a new frontend gains multi-channel support.
+ */
+ switch (channels) {
case 8:
- v->info.position[0] = SPA_AUDIO_CHANNEL_FL;
- v->info.position[1] = SPA_AUDIO_CHANNEL_FR;
- v->info.position[2] = SPA_AUDIO_CHANNEL_FC;
- v->info.position[3] = SPA_AUDIO_CHANNEL_LFE;
- v->info.position[4] = SPA_AUDIO_CHANNEL_RL;
- v->info.position[5] = SPA_AUDIO_CHANNEL_RR;
- v->info.position[6] = SPA_AUDIO_CHANNEL_SL;
- v->info.position[7] = SPA_AUDIO_CHANNEL_SR;
- break;
+ position[6] = SPA_AUDIO_CHANNEL_SL;
+ position[7] = SPA_AUDIO_CHANNEL_SR;
+ /* fallthrough */
case 6:
- v->info.position[0] = SPA_AUDIO_CHANNEL_FL;
- v->info.position[1] = SPA_AUDIO_CHANNEL_FR;
- v->info.position[2] = SPA_AUDIO_CHANNEL_FC;
- v->info.position[3] = SPA_AUDIO_CHANNEL_LFE;
- v->info.position[4] = SPA_AUDIO_CHANNEL_RL;
- v->info.position[5] = SPA_AUDIO_CHANNEL_RR;
- break;
- case 5:
- v->info.position[0] = SPA_AUDIO_CHANNEL_FL;
- v->info.position[1] = SPA_AUDIO_CHANNEL_FR;
- v->info.position[2] = SPA_AUDIO_CHANNEL_FC;
- v->info.position[3] = SPA_AUDIO_CHANNEL_LFE;
- v->info.position[4] = SPA_AUDIO_CHANNEL_RC;
- break;
- case 4:
- v->info.position[0] = SPA_AUDIO_CHANNEL_FL;
- v->info.position[1] = SPA_AUDIO_CHANNEL_FR;
- v->info.position[2] = SPA_AUDIO_CHANNEL_FC;
- v->info.position[3] = SPA_AUDIO_CHANNEL_RC;
- break;
- case 3:
- v->info.position[0] = SPA_AUDIO_CHANNEL_FL;
- v->info.position[1] = SPA_AUDIO_CHANNEL_FR;
- v->info.position[2] = SPA_AUDIO_CHANNEL_LFE;
- break;
+ position[2] = SPA_AUDIO_CHANNEL_FC;
+ position[3] = SPA_AUDIO_CHANNEL_LFE;
+ position[4] = SPA_AUDIO_CHANNEL_RL;
+ position[5] = SPA_AUDIO_CHANNEL_RR;
+ /* fallthrough */
case 2:
- v->info.position[0] = SPA_AUDIO_CHANNEL_FL;
- v->info.position[1] = SPA_AUDIO_CHANNEL_FR;
+ position[0] = SPA_AUDIO_CHANNEL_FL;
+ position[1] = SPA_AUDIO_CHANNEL_FR;
break;
case 1:
- v->info.position[0] = SPA_AUDIO_CHANNEL_MONO;
+ position[0] = SPA_AUDIO_CHANNEL_MONO;
break;
default:
- for (size_t i = 0; i < v->info.channels; i++) {
- v->info.position[i] = SPA_AUDIO_CHANNEL_UNKNOWN;
- }
- break;
+ dolog("Internal error: unsupported channel count %d\n", channels);
}
-
- /* create a new unconnected pwstream */
- r = create_stream(c, v, stream_name, name, dir);
- if (r < 0) {
- AUD_log(AUDIO_CAP, "Failed to create stream.");
- return -1;
- }
-
- return r;
}
static int
v->info.format = audfmt_to_pw(as->fmt, as->endianness);
v->info.channels = as->nchannels;
+ qpw_set_position(as->nchannels, v->info.position);
v->info.rate = as->freq;
obt_as.fmt =
r = qpw_stream_new(c, v, ppdo->stream_name ? : c->dev->id,
ppdo->name, SPA_DIRECTION_OUTPUT);
if (r < 0) {
- error_report("qpw_stream_new for playback failed");
pw_thread_loop_unlock(c->thread_loop);
return -1;
}
v->info.format = audfmt_to_pw(as->fmt, as->endianness);
v->info.channels = as->nchannels;
+ qpw_set_position(as->nchannels, v->info.position);
v->info.rate = as->freq;
obt_as.fmt =
r = qpw_stream_new(c, v, ppdo->stream_name ? : c->dev->id,
ppdo->name, SPA_DIRECTION_INPUT);
if (r < 0) {
- error_report("qpw_stream_new for recording failed");
pw_thread_loop_unlock(c->thread_loop);
return -1;
}
}
static void
-qpw_fini_out(HWVoiceOut *hw)
+qpw_voice_fini(PWVoice *v)
{
- PWVoiceOut *pw = (PWVoiceOut *) hw;
- PWVoice *v = &pw->v;
+ pwaudio *c = v->g;
- if (v->stream) {
- pwaudio *c = v->g;
- pw_thread_loop_lock(c->thread_loop);
- pw_stream_destroy(v->stream);
- v->stream = NULL;
- pw_thread_loop_unlock(c->thread_loop);
+ if (!v->stream) {
+ return;
}
+ pw_thread_loop_lock(c->thread_loop);
+ pw_stream_destroy(v->stream);
+ v->stream = NULL;
+ pw_thread_loop_unlock(c->thread_loop);
}
static void
-qpw_fini_in(HWVoiceIn *hw)
+qpw_fini_out(HWVoiceOut *hw)
{
- PWVoiceIn *pw = (PWVoiceIn *) hw;
- PWVoice *v = &pw->v;
+ qpw_voice_fini(&PW_VOICE_OUT(hw)->v);
+}
- if (v->stream) {
- pwaudio *c = v->g;
- pw_thread_loop_lock(c->thread_loop);
- pw_stream_destroy(v->stream);
- v->stream = NULL;
- pw_thread_loop_unlock(c->thread_loop);
- }
+static void
+qpw_fini_in(HWVoiceIn *hw)
+{
+ qpw_voice_fini(&PW_VOICE_IN(hw)->v);
}
static void
-qpw_enable_out(HWVoiceOut *hw, bool enable)
+qpw_voice_set_enabled(PWVoice *v, bool enable)
{
- PWVoiceOut *po = (PWVoiceOut *) hw;
- PWVoice *v = &po->v;
pwaudio *c = v->g;
pw_thread_loop_lock(c->thread_loop);
pw_stream_set_active(v->stream, enable);
pw_thread_loop_unlock(c->thread_loop);
}
+static void
+qpw_enable_out(HWVoiceOut *hw, bool enable)
+{
+ qpw_voice_set_enabled(&PW_VOICE_OUT(hw)->v, enable);
+}
+
static void
qpw_enable_in(HWVoiceIn *hw, bool enable)
{
- PWVoiceIn *pi = (PWVoiceIn *) hw;
- PWVoice *v = &pi->v;
- pwaudio *c = v->g;
- pw_thread_loop_lock(c->thread_loop);
- pw_stream_set_active(v->stream, enable);
- pw_thread_loop_unlock(c->thread_loop);
+ qpw_voice_set_enabled(&PW_VOICE_IN(hw)->v, enable);
}
static void
-qpw_volume_out(HWVoiceOut *hw, Volume *vol)
+qpw_voice_set_volume(PWVoice *v, Volume *vol)
{
- PWVoiceOut *pw = (PWVoiceOut *) hw;
- PWVoice *v = &pw->v;
pwaudio *c = v->g;
int i, ret;
}
static void
-qpw_volume_in(HWVoiceIn *hw, Volume *vol)
+qpw_volume_out(HWVoiceOut *hw, Volume *vol)
{
- PWVoiceIn *pw = (PWVoiceIn *) hw;
- PWVoice *v = &pw->v;
- pwaudio *c = v->g;
- int i, ret;
-
- pw_thread_loop_lock(c->thread_loop);
- v->volume.channels = vol->channels;
-
- for (i = 0; i < vol->channels; ++i) {
- v->volume.values[i] = (float)vol->vol[i] / 255;
- }
-
- ret = pw_stream_set_control(v->stream,
- SPA_PROP_channelVolumes, v->volume.channels, v->volume.values, 0);
- trace_pw_vol(ret == 0 ? "success" : "failed");
+ qpw_voice_set_volume(&PW_VOICE_OUT(hw)->v, vol);
+}
- v->muted = vol->mute;
- float val = v->muted ? 1.f : 0.f;
- ret = pw_stream_set_control(v->stream, SPA_PROP_mute, 1, &val, 0);
- pw_thread_loop_unlock(c->thread_loop);
+static void
+qpw_volume_in(HWVoiceIn *hw, Volume *vol)
+{
+ qpw_voice_set_volume(&PW_VOICE_IN(hw)->v, vol);
}
static int wait_resync(pwaudio *pw)
}
return 0;
}
+
static void
on_core_error(void *data, uint32_t id, int seq, int res, const char *message)
{
qpw_audio_init(Audiodev *dev)
{
g_autofree pwaudio *pw = g_new0(pwaudio, 1);
- pw_init(NULL, NULL);
- trace_pw_audio_init();
assert(dev->driver == AUDIODEV_DRIVER_PIPEWIRE);
+ trace_pw_audio_init();
+
+ pw_init(NULL, NULL);
pw->dev = dev;
- pw->thread_loop = pw_thread_loop_new("Pipewire thread loop", NULL);
+ pw->thread_loop = pw_thread_loop_new("PipeWire thread loop", NULL);
if (pw->thread_loop == NULL) {
- error_report("Could not create Pipewire loop");
+ error_report("Could not create PipeWire loop: %s", g_strerror(errno));
goto fail;
}
pw->context =
pw_context_new(pw_thread_loop_get_loop(pw->thread_loop), NULL, 0);
if (pw->context == NULL) {
- error_report("Could not create Pipewire context");
+ error_report("Could not create PipeWire context: %s", g_strerror(errno));
goto fail;
}
if (pw_thread_loop_start(pw->thread_loop) < 0) {
- error_report("Could not start Pipewire loop");
+ error_report("Could not start PipeWire loop: %s", g_strerror(errno));
goto fail;
}
if (pw->thread_loop) {
pw_thread_loop_stop(pw->thread_loop);
}
- if (pw->context) {
- g_clear_pointer(&pw->context, pw_context_destroy);
- }
- if (pw->thread_loop) {
- g_clear_pointer(&pw->thread_loop, pw_thread_loop_destroy);
- }
+ g_clear_pointer(&pw->context, pw_context_destroy);
+ g_clear_pointer(&pw->thread_loop, pw_thread_loop_destroy);
return NULL;
}
pw_write(int32_t filled, int32_t avail, uint32_t index, size_t len) "filled=%d avail=%d index=%u len=%zu"
pw_vol(const char *ret) "set volume: %s"
pw_period(uint64_t quantum, uint32_t rate) "period =%" PRIu64 "/%u"
-pw_audio_init(void) "Initialize Pipewire context"
+pw_audio_init(void) "Initialize PipeWire context"
# audio.c
audio_timer_start(int interval) "interval %d ms"
/*
- * QEMU Block driver for NBD
+ * QEMU Block driver for NBD
*
* Copyright (c) 2019 Virtuozzo International GmbH.
- * Copyright (C) 2016 Red Hat, Inc.
+ * Copyright Red Hat
* Copyright (C) 2008 Bull S.A.S.
* Author: Laurent Vivier <Laurent.Vivier@bull.net>
*
#define EN_OPTSTR ":exportname="
#define MAX_NBD_REQUESTS 16
-#define HANDLE_TO_INDEX(bs, handle) ((handle) ^ (uint64_t)(intptr_t)(bs))
-#define INDEX_TO_HANDLE(bs, index) ((index) ^ (uint64_t)(intptr_t)(bs))
+#define COOKIE_TO_INDEX(cookie) ((cookie) - 1)
+#define INDEX_TO_COOKIE(index) ((index) + 1)
typedef struct {
Coroutine *coroutine;
reconnect_delay_timer_del(s);
}
-static coroutine_fn int nbd_receive_replies(BDRVNBDState *s, uint64_t handle)
+static coroutine_fn int nbd_receive_replies(BDRVNBDState *s, uint64_t cookie)
{
int ret;
- uint64_t ind = HANDLE_TO_INDEX(s, handle), ind2;
+ uint64_t ind = COOKIE_TO_INDEX(cookie), ind2;
QEMU_LOCK_GUARD(&s->receive_mutex);
while (true) {
- if (s->reply.handle == handle) {
+ if (s->reply.cookie == cookie) {
/* We are done */
return 0;
}
- if (s->reply.handle != 0) {
+ if (s->reply.cookie != 0) {
/*
* Some other request is being handled now. It should already be
- * woken by whoever set s->reply.handle (or never wait in this
+ * woken by whoever set s->reply.cookie (or never wait in this
* yield). So, we should not wake it here.
*/
- ind2 = HANDLE_TO_INDEX(s, s->reply.handle);
+ ind2 = COOKIE_TO_INDEX(s->reply.cookie);
assert(!s->requests[ind2].receiving);
s->requests[ind].receiving = true;
/*
* We may be woken for 2 reasons:
* 1. From this function, executing in parallel coroutine, when our
- * handle is received.
+ * cookie is received.
* 2. From nbd_co_receive_one_chunk(), when previous request is
- * finished and s->reply.handle set to 0.
+ * finished and s->reply.cookie set to 0.
* Anyway, it's OK to lock the mutex and go to the next iteration.
*/
continue;
}
- /* We are under mutex and handle is 0. We have to do the dirty work. */
- assert(s->reply.handle == 0);
+ /* We are under mutex and cookie is 0. We have to do the dirty work. */
+ assert(s->reply.cookie == 0);
ret = nbd_receive_reply(s->bs, s->ioc, &s->reply, NULL);
if (ret <= 0) {
ret = ret ? ret : -EIO;
nbd_channel_error(s, -EINVAL);
return -EINVAL;
}
- ind2 = HANDLE_TO_INDEX(s, s->reply.handle);
+ ind2 = COOKIE_TO_INDEX(s->reply.cookie);
if (ind2 >= MAX_NBD_REQUESTS || !s->requests[ind2].coroutine) {
nbd_channel_error(s, -EINVAL);
return -EINVAL;
}
- if (s->reply.handle == handle) {
+ if (s->reply.cookie == cookie) {
/* We are done */
return 0;
}
qemu_mutex_unlock(&s->requests_lock);
qemu_co_mutex_lock(&s->send_mutex);
- request->handle = INDEX_TO_HANDLE(s, i);
+ request->cookie = INDEX_TO_COOKIE(i);
assert(s->ioc);
* corresponding to the server's error reply), and errp is unchanged.
*/
static coroutine_fn int nbd_co_do_receive_one_chunk(
- BDRVNBDState *s, uint64_t handle, bool only_structured,
+ BDRVNBDState *s, uint64_t cookie, bool only_structured,
int *request_ret, QEMUIOVector *qiov, void **payload, Error **errp)
{
int ret;
- int i = HANDLE_TO_INDEX(s, handle);
+ int i = COOKIE_TO_INDEX(cookie);
void *local_payload = NULL;
NBDStructuredReplyChunk *chunk;
}
*request_ret = 0;
- ret = nbd_receive_replies(s, handle);
+ ret = nbd_receive_replies(s, cookie);
if (ret < 0) {
error_setg(errp, "Connection closed");
return -EIO;
}
assert(s->ioc);
- assert(s->reply.handle == handle);
+ assert(s->reply.cookie == cookie);
if (nbd_reply_is_simple(&s->reply)) {
if (only_structured) {
* Return value is a fatal error code or normal nbd reply error code
*/
static coroutine_fn int nbd_co_receive_one_chunk(
- BDRVNBDState *s, uint64_t handle, bool only_structured,
+ BDRVNBDState *s, uint64_t cookie, bool only_structured,
int *request_ret, QEMUIOVector *qiov, NBDReply *reply, void **payload,
Error **errp)
{
- int ret = nbd_co_do_receive_one_chunk(s, handle, only_structured,
+ int ret = nbd_co_do_receive_one_chunk(s, cookie, only_structured,
request_ret, qiov, payload, errp);
if (ret < 0) {
/* For assert at loop start in nbd_connection_entry */
*reply = s->reply;
}
- s->reply.handle = 0;
+ s->reply.cookie = 0;
nbd_recv_coroutines_wake(s);
* NBD_FOREACH_REPLY_CHUNK
* The pointer stored in @payload requires g_free() to free it.
*/
-#define NBD_FOREACH_REPLY_CHUNK(s, iter, handle, structured, \
+#define NBD_FOREACH_REPLY_CHUNK(s, iter, cookie, structured, \
qiov, reply, payload) \
for (iter = (NBDReplyChunkIter) { .only_structured = structured }; \
- nbd_reply_chunk_iter_receive(s, &iter, handle, qiov, reply, payload);)
+ nbd_reply_chunk_iter_receive(s, &iter, cookie, qiov, reply, payload);)
/*
* nbd_reply_chunk_iter_receive
*/
static bool coroutine_fn nbd_reply_chunk_iter_receive(BDRVNBDState *s,
NBDReplyChunkIter *iter,
- uint64_t handle,
+ uint64_t cookie,
QEMUIOVector *qiov,
NBDReply *reply,
void **payload)
reply = &local_reply;
}
- ret = nbd_co_receive_one_chunk(s, handle, iter->only_structured,
+ ret = nbd_co_receive_one_chunk(s, cookie, iter->only_structured,
&request_ret, qiov, reply, payload,
&local_err);
if (ret < 0) {
break_loop:
qemu_mutex_lock(&s->requests_lock);
- s->requests[HANDLE_TO_INDEX(s, handle)].coroutine = NULL;
+ s->requests[COOKIE_TO_INDEX(cookie)].coroutine = NULL;
s->in_flight--;
qemu_co_queue_next(&s->free_sema);
qemu_mutex_unlock(&s->requests_lock);
return false;
}
-static int coroutine_fn nbd_co_receive_return_code(BDRVNBDState *s, uint64_t handle,
- int *request_ret, Error **errp)
+static int coroutine_fn
+nbd_co_receive_return_code(BDRVNBDState *s, uint64_t cookie,
+ int *request_ret, Error **errp)
{
NBDReplyChunkIter iter;
- NBD_FOREACH_REPLY_CHUNK(s, iter, handle, false, NULL, NULL, NULL) {
+ NBD_FOREACH_REPLY_CHUNK(s, iter, cookie, false, NULL, NULL, NULL) {
/* nbd_reply_chunk_iter_receive does all the work */
}
return iter.ret;
}
-static int coroutine_fn nbd_co_receive_cmdread_reply(BDRVNBDState *s, uint64_t handle,
- uint64_t offset, QEMUIOVector *qiov,
- int *request_ret, Error **errp)
+static int coroutine_fn
+nbd_co_receive_cmdread_reply(BDRVNBDState *s, uint64_t cookie,
+ uint64_t offset, QEMUIOVector *qiov,
+ int *request_ret, Error **errp)
{
NBDReplyChunkIter iter;
NBDReply reply;
void *payload = NULL;
Error *local_err = NULL;
- NBD_FOREACH_REPLY_CHUNK(s, iter, handle, s->info.structured_reply,
+ NBD_FOREACH_REPLY_CHUNK(s, iter, cookie, s->info.structured_reply,
qiov, &reply, &payload)
{
int ret;
return iter.ret;
}
-static int coroutine_fn nbd_co_receive_blockstatus_reply(BDRVNBDState *s,
- uint64_t handle, uint64_t length,
- NBDExtent *extent,
- int *request_ret, Error **errp)
+static int coroutine_fn
+nbd_co_receive_blockstatus_reply(BDRVNBDState *s, uint64_t cookie,
+ uint64_t length, NBDExtent *extent,
+ int *request_ret, Error **errp)
{
NBDReplyChunkIter iter;
NBDReply reply;
bool received = false;
assert(!extent->length);
- NBD_FOREACH_REPLY_CHUNK(s, iter, handle, false, NULL, &reply, &payload) {
+ NBD_FOREACH_REPLY_CHUNK(s, iter, cookie, false, NULL, &reply, &payload) {
int ret;
NBDStructuredReplyChunk *chunk = &reply.structured;
continue;
}
- ret = nbd_co_receive_return_code(s, request->handle,
+ ret = nbd_co_receive_return_code(s, request->cookie,
&request_ret, &local_err);
if (local_err) {
trace_nbd_co_request_fail(request->from, request->len,
- request->handle, request->flags,
+ request->cookie, request->flags,
request->type,
nbd_cmd_lookup(request->type),
ret, error_get_pretty(local_err));
continue;
}
- ret = nbd_co_receive_cmdread_reply(s, request.handle, offset, qiov,
+ ret = nbd_co_receive_cmdread_reply(s, request.cookie, offset, qiov,
&request_ret, &local_err);
if (local_err) {
- trace_nbd_co_request_fail(request.from, request.len, request.handle,
+ trace_nbd_co_request_fail(request.from, request.len, request.cookie,
request.flags, request.type,
nbd_cmd_lookup(request.type),
ret, error_get_pretty(local_err));
continue;
}
- ret = nbd_co_receive_blockstatus_reply(s, request.handle, bytes,
+ ret = nbd_co_receive_blockstatus_reply(s, request.cookie, bytes,
&extent, &request_ret,
&local_err);
if (local_err) {
- trace_nbd_co_request_fail(request.from, request.len, request.handle,
+ trace_nbd_co_request_fail(request.from, request.len, request.cookie,
request.flags, request.type,
nbd_cmd_lookup(request.type),
ret, error_get_pretty(local_err));
q->sq.tail * NVME_SQ_ENTRY_BYTES, cmd, sizeof(*cmd));
q->sq.tail = (q->sq.tail + 1) % NVME_QUEUE_SIZE;
q->need_kick++;
- blk_io_plug_call(nvme_unplug_fn, q);
qemu_mutex_unlock(&q->lock);
+
+ blk_io_plug_call(nvme_unplug_fn, q);
}
static void nvme_admin_cmd_sync_cb(void *opaque, int ret)
void mmap_unlock(void)
{
+ assert(mmap_lock_count > 0);
if (--mmap_lock_count == 0) {
pthread_mutex_unlock(&mmap_mutex);
}
static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size,
abi_ulong alignment)
{
- abi_ulong addr;
- abi_ulong end_addr;
- int prot;
- int looped = 0;
-
- if (size > reserved_va) {
- return (abi_ulong)-1;
- }
+ abi_ulong ret;
- size = HOST_PAGE_ALIGN(size) + alignment;
- end_addr = start + size;
- if (end_addr > reserved_va) {
- end_addr = reserved_va + 1;
+ ret = page_find_range_empty(start, reserved_va, size, alignment);
+ if (ret == -1 && start > TARGET_PAGE_SIZE) {
+ /* Restart at the beginning of the address space. */
+ ret = page_find_range_empty(TARGET_PAGE_SIZE, start - 1,
+ size, alignment);
}
- addr = end_addr - qemu_host_page_size;
- while (1) {
- if (addr > end_addr) {
- if (looped) {
- return (abi_ulong)-1;
- }
- end_addr = reserved_va + 1;
- addr = end_addr - qemu_host_page_size;
- looped = 1;
- continue;
- }
- prot = page_get_flags(addr);
- if (prot) {
- end_addr = addr;
- }
- if (end_addr - addr >= size) {
- break;
- }
- addr -= qemu_host_page_size;
- }
-
- if (start == mmap_next_start) {
- mmap_next_start = addr;
- }
- /* addr is sufficiently low to align it up */
- if (alignment != 0) {
- addr = (addr + alignment) & ~(alignment - 1);
- }
- return addr;
+ return ret;
}
/*
}
/* Reject the mapping if any page within the range is mapped */
- if ((flags & MAP_EXCL) && page_check_range(start, len, 0) < 0) {
+ if ((flags & MAP_EXCL) && !page_check_range_empty(start, end - 1)) {
errno = EINVAL;
goto fail;
}
static inline bool access_ok(int type, abi_ulong addr, abi_ulong size)
{
- return page_check_range((target_ulong)addr, size, type) == 0;
+ return page_check_range((target_ulong)addr, size, type);
}
/*
#CONFIG_VFIO_CCW=n
#CONFIG_VIRTIO_PCI=n
#CONFIG_WDT_DIAG288=n
+#CONFIG_PCIE_DEVICES=n
# Boards:
#
cpu="s390"
fi
elif check_define __riscv ; then
- cpu="riscv"
+ if check_define _LP64 ; then
+ cpu="riscv64"
+ else
+ cpu="riscv32"
+ fi
elif check_define __arm__ ; then
cpu="arm"
elif check_define __aarch64__ ; then
mips64)
linux_arch=mips
;;
+ riscv32|riscv64)
+ linux_arch=riscv
+ ;;
*)
# For most CPUs the kernel architecture name and QEMU CPU name match.
linux_arch="$cpu"
cmd->state = VG_CMD_STATE_PENDING;
}
+static gboolean
+get_edid_cb(gint fd, GIOCondition condition, gpointer user_data)
+{
+ struct virtio_gpu_resp_edid resp_edid;
+ VuGpu *vg = user_data;
+ struct virtio_gpu_ctrl_command *cmd = QTAILQ_LAST(&vg->fenceq);
+
+ g_debug("get edid cb");
+ assert(cmd->cmd_hdr.type == VIRTIO_GPU_CMD_GET_EDID);
+ if (!vg_recv_msg(vg, VHOST_USER_GPU_GET_EDID,
+ sizeof(resp_edid), &resp_edid)) {
+ return G_SOURCE_CONTINUE;
+ }
+
+ QTAILQ_REMOVE(&vg->fenceq, cmd, next);
+ vg_ctrl_response(vg, cmd, &resp_edid.hdr, sizeof(resp_edid));
+
+ vg->wait_in = 0;
+ vg_handle_ctrl(&vg->dev.parent, 0);
+
+ return G_SOURCE_REMOVE;
+}
+
+void
+vg_get_edid(VuGpu *vg, struct virtio_gpu_ctrl_command *cmd)
+{
+ struct virtio_gpu_cmd_get_edid get_edid;
+
+ VUGPU_FILL_CMD(get_edid);
+ virtio_gpu_bswap_32(&get_edid, sizeof(get_edid));
+
+ VhostUserGpuMsg msg = {
+ .request = VHOST_USER_GPU_GET_EDID,
+ .size = sizeof(VhostUserGpuEdidRequest),
+ .payload.edid_req = {
+ .scanout_id = get_edid.scanout,
+ },
+ };
+
+ assert(vg->wait_in == 0);
+
+ vg_send_msg(vg, &msg, -1);
+ vg->wait_in = g_unix_fd_add(vg->sock_fd, G_IO_IN | G_IO_HUP,
+ get_edid_cb, vg);
+ cmd->state = VG_CMD_STATE_PENDING;
+}
+
static void
vg_resource_create_2d(VuGpu *g,
struct virtio_gpu_ctrl_command *cmd)
case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING:
vg_resource_detach_backing(vg, cmd);
break;
- /* case VIRTIO_GPU_CMD_GET_EDID: */
- /* break */
+ case VIRTIO_GPU_CMD_GET_EDID:
+ vg_get_edid(vg, cmd);
+ break;
default:
g_warning("TODO handle ctrl %x\n", cmd->cmd_hdr.type);
cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
static gboolean
protocol_features_cb(gint fd, GIOCondition condition, gpointer user_data)
{
+ const uint64_t protocol_edid = (1 << VHOST_USER_GPU_PROTOCOL_F_EDID);
VuGpu *g = user_data;
- uint64_t u64;
+ uint64_t protocol_features;
VhostUserGpuMsg msg = {
.request = VHOST_USER_GPU_GET_PROTOCOL_FEATURES
};
- if (!vg_recv_msg(g, msg.request, sizeof(u64), &u64)) {
+ if (!vg_recv_msg(g, msg.request,
+ sizeof(protocol_features), &protocol_features)) {
return G_SOURCE_CONTINUE;
}
+ protocol_features &= protocol_edid;
+
msg = (VhostUserGpuMsg) {
.request = VHOST_USER_GPU_SET_PROTOCOL_FEATURES,
.size = sizeof(uint64_t),
- .payload.u64 = 0
+ .payload.u64 = protocol_features,
};
vg_send_msg(g, &msg, -1);
g->wait_in = 0;
vg_handle_ctrl(&g->dev.parent, 0);
+ if (g->edid_inited && !(protocol_features & protocol_edid)) {
+ g_printerr("EDID feature set by the frontend but it does not support "
+ "the EDID vhost-user-gpu protocol.\n");
+ exit(EXIT_FAILURE);
+ }
+
return G_SOURCE_REMOVE;
}
set_gpu_protocol_features(VuGpu *g)
{
VhostUserGpuMsg msg = {
- .request = VHOST_USER_GPU_GET_PROTOCOL_FEATURES
+ .request = VHOST_USER_GPU_GET_PROTOCOL_FEATURES,
};
vg_send_msg(g, &msg, -1);
if (opt_virgl) {
features |= 1 << VIRTIO_GPU_F_VIRGL;
}
+ features |= 1 << VIRTIO_GPU_F_EDID;
return features;
}
g->virgl_inited = true;
}
+ g->edid_inited = !!(features & (1 << VIRTIO_GPU_F_EDID));
+
g->virgl = virgl;
}
case VIRTIO_GPU_CMD_GET_DISPLAY_INFO:
vg_get_display_info(g, cmd);
break;
+ case VIRTIO_GPU_CMD_GET_EDID:
+ vg_get_edid(g, cmd);
+ break;
default:
g_debug("TODO handle ctrl %x\n", cmd->cmd_hdr.type);
cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
VHOST_USER_GPU_UPDATE,
VHOST_USER_GPU_DMABUF_SCANOUT,
VHOST_USER_GPU_DMABUF_UPDATE,
+ VHOST_USER_GPU_GET_EDID,
} VhostUserGpuRequest;
typedef struct VhostUserGpuDisplayInfoReply {
int fd_drm_fourcc;
} QEMU_PACKED VhostUserGpuDMABUFScanout;
+typedef struct VhostUserGpuEdidRequest {
+ uint32_t scanout_id;
+} QEMU_PACKED VhostUserGpuEdidRequest;
+
typedef struct VhostUserGpuMsg {
uint32_t request; /* VhostUserGpuRequest */
uint32_t flags;
VhostUserGpuScanout scanout;
VhostUserGpuUpdate update;
VhostUserGpuDMABUFScanout dmabuf_scanout;
+ VhostUserGpuEdidRequest edid_req;
+ struct virtio_gpu_resp_edid resp_edid;
struct virtio_gpu_resp_display_info display_info;
uint64_t u64;
} payload;
#define VHOST_USER_GPU_MSG_FLAG_REPLY 0x4
+#define VHOST_USER_GPU_PROTOCOL_F_EDID 0
+
struct virtio_gpu_scanout {
uint32_t width, height;
int x, y;
bool virgl;
bool virgl_inited;
+ bool edid_inited;
uint32_t inflight;
struct virtio_gpu_scanout scanout[VIRTIO_GPU_MAX_SCANOUTS];
struct iovec **iov);
void vg_cleanup_mapping_iov(VuGpu *g, struct iovec *iov, uint32_t count);
void vg_get_display_info(VuGpu *vg, struct virtio_gpu_ctrl_command *cmd);
+void vg_get_edid(VuGpu *vg, struct virtio_gpu_ctrl_command *cmd);
void vg_wait_ok(VuGpu *g);
common_ss.add(when: 'CONFIG_MICROBLAZE_DIS', if_true: files('microblaze.c'))
common_ss.add(when: 'CONFIG_MIPS_DIS', if_true: files('mips.c', 'nanomips.c'))
common_ss.add(when: 'CONFIG_NIOS2_DIS', if_true: files('nios2.c'))
-common_ss.add(when: 'CONFIG_RISCV_DIS', if_true: files('riscv.c'))
+common_ss.add(when: 'CONFIG_RISCV_DIS', if_true: files(
+ 'riscv.c',
+ 'riscv-xthead.c',
+ 'riscv-xventana.c'
+))
common_ss.add(when: 'CONFIG_SH4_DIS', if_true: files('sh4.c'))
common_ss.add(when: 'CONFIG_SPARC_DIS', if_true: files('sparc.c'))
common_ss.add(when: 'CONFIG_XTENSA_DIS', if_true: files('xtensa.c'))
--- /dev/null
+/*
+ * QEMU RISC-V Disassembler for xthead.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "disas/riscv.h"
+#include "disas/riscv-xthead.h"
+
+typedef enum {
+ /* 0 is reserved for rv_op_illegal. */
+ /* XTheadBa */
+ rv_op_th_addsl = 1,
+ /* XTheadBb */
+ rv_op_th_srri,
+ rv_op_th_srriw,
+ rv_op_th_ext,
+ rv_op_th_extu,
+ rv_op_th_ff0,
+ rv_op_th_ff1,
+ rv_op_th_rev,
+ rv_op_th_revw,
+ rv_op_th_tstnbz,
+ /* XTheadBs */
+ rv_op_th_tst,
+ /* XTheadCmo */
+ rv_op_th_dcache_call,
+ rv_op_th_dcache_ciall,
+ rv_op_th_dcache_iall,
+ rv_op_th_dcache_cpa,
+ rv_op_th_dcache_cipa,
+ rv_op_th_dcache_ipa,
+ rv_op_th_dcache_cva,
+ rv_op_th_dcache_civa,
+ rv_op_th_dcache_iva,
+ rv_op_th_dcache_csw,
+ rv_op_th_dcache_cisw,
+ rv_op_th_dcache_isw,
+ rv_op_th_dcache_cpal1,
+ rv_op_th_dcache_cval1,
+ rv_op_th_icache_iall,
+ rv_op_th_icache_ialls,
+ rv_op_th_icache_ipa,
+ rv_op_th_icache_iva,
+ rv_op_th_l2cache_call,
+ rv_op_th_l2cache_ciall,
+ rv_op_th_l2cache_iall,
+ /* XTheadCondMov */
+ rv_op_th_mveqz,
+ rv_op_th_mvnez,
+ /* XTheadFMemIdx */
+ rv_op_th_flrd,
+ rv_op_th_flrw,
+ rv_op_th_flurd,
+ rv_op_th_flurw,
+ rv_op_th_fsrd,
+ rv_op_th_fsrw,
+ rv_op_th_fsurd,
+ rv_op_th_fsurw,
+ /* XTheadFmv */
+ rv_op_th_fmv_hw_x,
+ rv_op_th_fmv_x_hw,
+ /* XTheadMac */
+ rv_op_th_mula,
+ rv_op_th_mulah,
+ rv_op_th_mulaw,
+ rv_op_th_muls,
+ rv_op_th_mulsw,
+ rv_op_th_mulsh,
+ /* XTheadMemIdx */
+ rv_op_th_lbia,
+ rv_op_th_lbib,
+ rv_op_th_lbuia,
+ rv_op_th_lbuib,
+ rv_op_th_lhia,
+ rv_op_th_lhib,
+ rv_op_th_lhuia,
+ rv_op_th_lhuib,
+ rv_op_th_lwia,
+ rv_op_th_lwib,
+ rv_op_th_lwuia,
+ rv_op_th_lwuib,
+ rv_op_th_ldia,
+ rv_op_th_ldib,
+ rv_op_th_sbia,
+ rv_op_th_sbib,
+ rv_op_th_shia,
+ rv_op_th_shib,
+ rv_op_th_swia,
+ rv_op_th_swib,
+ rv_op_th_sdia,
+ rv_op_th_sdib,
+ rv_op_th_lrb,
+ rv_op_th_lrbu,
+ rv_op_th_lrh,
+ rv_op_th_lrhu,
+ rv_op_th_lrw,
+ rv_op_th_lrwu,
+ rv_op_th_lrd,
+ rv_op_th_srb,
+ rv_op_th_srh,
+ rv_op_th_srw,
+ rv_op_th_srd,
+ rv_op_th_lurb,
+ rv_op_th_lurbu,
+ rv_op_th_lurh,
+ rv_op_th_lurhu,
+ rv_op_th_lurw,
+ rv_op_th_lurwu,
+ rv_op_th_lurd,
+ rv_op_th_surb,
+ rv_op_th_surh,
+ rv_op_th_surw,
+ rv_op_th_surd,
+ /* XTheadMemPair */
+ rv_op_th_ldd,
+ rv_op_th_lwd,
+ rv_op_th_lwud,
+ rv_op_th_sdd,
+ rv_op_th_swd,
+ /* XTheadSync */
+ rv_op_th_sfence_vmas,
+ rv_op_th_sync,
+ rv_op_th_sync_i,
+ rv_op_th_sync_is,
+ rv_op_th_sync_s,
+} rv_xthead_op;
+
+const rv_opcode_data xthead_opcode_data[] = {
+ { "th.illegal", rv_codec_illegal, rv_fmt_none, NULL, 0, 0, 0 },
+ /* XTheadBa */
+ { "th.addsl", rv_codec_r_imm2, rv_fmt_rd_rs1_rs2_imm, NULL, 0, 0, 0 },
+ /* XTheadBb */
+ { "th.srri", rv_codec_r2_imm6, rv_fmt_rd_rs1_imm, NULL, 0, 0, 0 },
+ { "th.srriw", rv_codec_r2_imm5, rv_fmt_rd_rs1_imm, NULL, 0, 0, 0 },
+ { "th.ext", rv_codec_r2_immhl, rv_fmt_rd_rs1_immh_imml, NULL, 0, 0, 0 },
+ { "th.extu", rv_codec_r2_immhl, rv_fmt_rd_rs1_immh_imml, NULL, 0, 0, 0 },
+ { "th.ff0", rv_codec_r2, rv_fmt_rd_rs1, NULL, 0, 0, 0 },
+ { "th.ff1", rv_codec_r2, rv_fmt_rd_rs1, NULL, 0, 0, 0 },
+ { "th.rev", rv_codec_r2, rv_fmt_rd_rs1, NULL, 0, 0, 0 },
+ { "th.revw", rv_codec_r2, rv_fmt_rd_rs1, NULL, 0, 0, 0 },
+ { "th.tstnbz", rv_codec_r2, rv_fmt_rd_rs1, NULL, 0, 0, 0 },
+ /* XTheadBs */
+ { "th.tst", rv_codec_r2_imm6, rv_fmt_rd_rs1_imm, NULL, 0, 0, 0 },
+ /* XTheadCmo */
+ { "th.dcache.call", rv_codec_none, rv_fmt_none, NULL, 0, 0, 0 },
+ { "th.dcache.ciall", rv_codec_none, rv_fmt_none, NULL, 0, 0, 0 },
+ { "th.dcache.iall", rv_codec_none, rv_fmt_none, NULL, 0, 0, 0 },
+ { "th.dcache.cpa", rv_codec_r, rv_fmt_rs1, NULL, 0, 0, 0 },
+ { "th.dcache.cipa", rv_codec_r, rv_fmt_rs1, NULL, 0, 0, 0 },
+ { "th.dcache.ipa", rv_codec_r, rv_fmt_rs1, NULL, 0, 0, 0 },
+ { "th.dcache.cva", rv_codec_r, rv_fmt_rs1, NULL, 0, 0, 0 },
+ { "th.dcache.civa", rv_codec_r, rv_fmt_rs1, NULL, 0, 0, 0 },
+ { "th.dcache.iva", rv_codec_r, rv_fmt_rs1, NULL, 0, 0, 0 },
+ { "th.dcache.csw", rv_codec_r, rv_fmt_rs1, NULL, 0, 0, 0 },
+ { "th.dcache.cisw", rv_codec_r, rv_fmt_rs1, NULL, 0, 0, 0 },
+ { "th.dcache.isw", rv_codec_r, rv_fmt_rs1, NULL, 0, 0, 0 },
+ { "th.dcache.cpal1", rv_codec_r, rv_fmt_rs1, NULL, 0, 0, 0 },
+ { "th.dcache.cval1", rv_codec_r, rv_fmt_rs1, NULL, 0, 0, 0 },
+ { "th.icache.iall", rv_codec_none, rv_fmt_none, NULL, 0, 0, 0 },
+ { "th.icache.ialls", rv_codec_none, rv_fmt_none, NULL, 0, 0, 0 },
+ { "th.icache.ipa", rv_codec_r, rv_fmt_rs1, NULL, 0, 0, 0 },
+ { "th.icache.iva", rv_codec_r, rv_fmt_rs1, NULL, 0, 0, 0 },
+ { "th.l2cache.call", rv_codec_none, rv_fmt_none, NULL, 0, 0, 0 },
+ { "th.l2cache.ciall", rv_codec_none, rv_fmt_none, NULL, 0, 0, 0 },
+ { "th.l2cache.iall", rv_codec_none, rv_fmt_none, NULL, 0, 0, 0 },
+ /* XTheadCondMov */
+ { "th.mveqz", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 },
+ { "th.mvnez", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 },
+ /* XTheadFMemIdx */
+ { "th.flrd", rv_codec_r_imm2, rv_fmt_frd_rs1_rs2_imm, NULL, 0, 0, 0 },
+ { "th.flrw", rv_codec_r_imm2, rv_fmt_frd_rs1_rs2_imm, NULL, 0, 0, 0 },
+ { "th.flurd", rv_codec_r_imm2, rv_fmt_frd_rs1_rs2_imm, NULL, 0, 0, 0 },
+ { "th.flurw", rv_codec_r_imm2, rv_fmt_frd_rs1_rs2_imm, NULL, 0, 0, 0 },
+ { "th.fsrd", rv_codec_r_imm2, rv_fmt_frd_rs1_rs2_imm, NULL, 0, 0, 0 },
+ { "th.fsrw", rv_codec_r_imm2, rv_fmt_frd_rs1_rs2_imm, NULL, 0, 0, 0 },
+ { "th.fsurd", rv_codec_r_imm2, rv_fmt_frd_rs1_rs2_imm, NULL, 0, 0, 0 },
+ { "th.fsurw", rv_codec_r_imm2, rv_fmt_frd_rs1_rs2_imm, NULL, 0, 0, 0 },
+ /* XTheadFmv */
+ { "th.fmv.hw.x", rv_codec_r, rv_fmt_rd_frs1, NULL, 0, 0, 0 },
+ { "th.fmv.x.hw", rv_codec_r, rv_fmt_rd_frs1, NULL, 0, 0, 0 },
+ /* XTheadMac */
+ { "th.mula", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 },
+ { "th.mulaw", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 },
+ { "th.mulah", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 },
+ { "th.muls", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 },
+ { "th.mulsw", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 },
+ { "th.mulsh", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 },
+ /* XTheadMemIdx */
+ { "th.lbia", rv_codec_r2_imm2_imm5, rv_fmt_rd_rs1_immh_imml_addr, NULL, 0, 0, 0 },
+ { "th.lbib", rv_codec_r2_imm2_imm5, rv_fmt_rd_rs1_immh_imml, NULL, 0, 0, 0 },
+ { "th.lbuia", rv_codec_r2_imm2_imm5, rv_fmt_rd_rs1_immh_imml_addr, NULL, 0, 0, 0 },
+ { "th.lbuib", rv_codec_r2_imm2_imm5, rv_fmt_rd_rs1_immh_imml_addr, NULL, 0, 0, 0 },
+ { "th.lhia", rv_codec_r2_imm2_imm5, rv_fmt_rd_rs1_immh_imml_addr, NULL, 0, 0, 0 },
+ { "th.lhib", rv_codec_r2_imm2_imm5, rv_fmt_rd_rs1_immh_imml_addr, NULL, 0, 0, 0 },
+ { "th.lhuia", rv_codec_r2_imm2_imm5, rv_fmt_rd_rs1_immh_imml_addr, NULL, 0, 0, 0 },
+ { "th.lhuib", rv_codec_r2_imm2_imm5, rv_fmt_rd_rs1_immh_imml_addr, NULL, 0, 0, 0 },
+ { "th.lwia", rv_codec_r2_imm2_imm5, rv_fmt_rd_rs1_immh_imml_addr, NULL, 0, 0, 0 },
+ { "th.lwib", rv_codec_r2_imm2_imm5, rv_fmt_rd_rs1_immh_imml_addr, NULL, 0, 0, 0 },
+ { "th.lwuia", rv_codec_r2_imm2_imm5, rv_fmt_rd_rs1_immh_imml_addr, NULL, 0, 0, 0 },
+ { "th.lwuib", rv_codec_r2_imm2_imm5, rv_fmt_rd_rs1_immh_imml_addr, NULL, 0, 0, 0 },
+ { "th.ldia", rv_codec_r2_imm2_imm5, rv_fmt_rd_rs1_immh_imml_addr, NULL, 0, 0, 0 },
+ { "th.ldib", rv_codec_r2_imm2_imm5, rv_fmt_rd_rs1_immh_imml_addr, NULL, 0, 0, 0 },
+ { "th.sbia", rv_codec_r2_imm2_imm5, rv_fmt_rd_rs1_immh_imml_addr, NULL, 0, 0, 0 },
+ { "th.sbib", rv_codec_r2_imm2_imm5, rv_fmt_rd_rs1_immh_imml_addr, NULL, 0, 0, 0 },
+ { "th.shia", rv_codec_r2_imm2_imm5, rv_fmt_rd_rs1_immh_imml_addr, NULL, 0, 0, 0 },
+ { "th.shib", rv_codec_r2_imm2_imm5, rv_fmt_rd_rs1_immh_imml_addr, NULL, 0, 0, 0 },
+ { "th.swia", rv_codec_r2_imm2_imm5, rv_fmt_rd_rs1_immh_imml_addr, NULL, 0, 0, 0 },
+ { "th.swib", rv_codec_r2_imm2_imm5, rv_fmt_rd_rs1_immh_imml_addr, NULL, 0, 0, 0 },
+ { "th.sdia", rv_codec_r2_imm2_imm5, rv_fmt_rd_rs1_immh_imml_addr, NULL, 0, 0, 0 },
+ { "th.sdib", rv_codec_r2_imm2_imm5, rv_fmt_rd_rs1_immh_imml_addr, NULL, 0, 0, 0 },
+ { "th.lrb", rv_codec_r_imm2, rv_fmt_rd_rs1_rs2_imm, NULL, 0, 0, 0 },
+ { "th.lrbu", rv_codec_r_imm2, rv_fmt_rd_rs1_rs2_imm, NULL, 0, 0, 0 },
+ { "th.lrh", rv_codec_r_imm2, rv_fmt_rd_rs1_rs2_imm, NULL, 0, 0, 0 },
+ { "th.lrhu", rv_codec_r_imm2, rv_fmt_rd_rs1_rs2_imm, NULL, 0, 0, 0 },
+ { "th.lrw", rv_codec_r_imm2, rv_fmt_rd_rs1_rs2_imm, NULL, 0, 0, 0 },
+ { "th.lrwu", rv_codec_r_imm2, rv_fmt_rd_rs1_rs2_imm, NULL, 0, 0, 0 },
+ { "th.lrd", rv_codec_r_imm2, rv_fmt_rd_rs1_rs2_imm, NULL, 0, 0, 0 },
+ { "th.srb", rv_codec_r_imm2, rv_fmt_rd_rs1_rs2_imm, NULL, 0, 0, 0 },
+ { "th.srh", rv_codec_r_imm2, rv_fmt_rd_rs1_rs2_imm, NULL, 0, 0, 0 },
+ { "th.srw", rv_codec_r_imm2, rv_fmt_rd_rs1_rs2_imm, NULL, 0, 0, 0 },
+ { "th.srd", rv_codec_r_imm2, rv_fmt_rd_rs1_rs2_imm, NULL, 0, 0, 0 },
+ { "th.lurb", rv_codec_r_imm2, rv_fmt_rd_rs1_rs2_imm, NULL, 0, 0, 0 },
+ { "th.lurbu", rv_codec_r_imm2, rv_fmt_rd_rs1_rs2_imm, NULL, 0, 0, 0 },
+ { "th.lurh", rv_codec_r_imm2, rv_fmt_rd_rs1_rs2_imm, NULL, 0, 0, 0 },
+ { "th.lurhu", rv_codec_r_imm2, rv_fmt_rd_rs1_rs2_imm, NULL, 0, 0, 0 },
+ { "th.lurw", rv_codec_r_imm2, rv_fmt_rd_rs1_rs2_imm, NULL, 0, 0, 0 },
+ { "th.lurwu", rv_codec_r_imm2, rv_fmt_rd_rs1_rs2_imm, NULL, 0, 0, 0 },
+ { "th.lurd", rv_codec_r_imm2, rv_fmt_rd_rs1_rs2_imm, NULL, 0, 0, 0 },
+ { "th.surb", rv_codec_r_imm2, rv_fmt_rd_rs1_rs2_imm, NULL, 0, 0, 0 },
+ { "th.surh", rv_codec_r_imm2, rv_fmt_rd_rs1_rs2_imm, NULL, 0, 0, 0 },
+ { "th.surw", rv_codec_r_imm2, rv_fmt_rd_rs1_rs2_imm, NULL, 0, 0, 0 },
+ { "th.surd", rv_codec_r_imm2, rv_fmt_rd_rs1_rs2_imm, NULL, 0, 0, 0 },
+ /* XTheadMemPair */
+ { "th.ldd", rv_codec_r_imm2, rv_fmt_rd2_imm, NULL, 0, 0, 0 },
+ { "th.lwd", rv_codec_r_imm2, rv_fmt_rd2_imm, NULL, 0, 0, 0 },
+ { "th.lwud", rv_codec_r_imm2, rv_fmt_rd2_imm, NULL, 0, 0, 0 },
+ { "th.sdd", rv_codec_r_imm2, rv_fmt_rd2_imm, NULL, 0, 0, 0 },
+ { "th.swd", rv_codec_r_imm2, rv_fmt_rd2_imm, NULL, 0, 0, 0 },
+ /* XTheadSync */
+ { "th.sfence.vmas", rv_codec_r, rv_fmt_rs1_rs2, NULL, 0, 0, 0 },
+ { "th.sync", rv_codec_none, rv_fmt_none, NULL, 0, 0, 0 },
+ { "th.sync.i", rv_codec_none, rv_fmt_none, NULL, 0, 0, 0 },
+ { "th.sync.is", rv_codec_none, rv_fmt_none, NULL, 0, 0, 0 },
+ { "th.sync.s", rv_codec_none, rv_fmt_none, NULL, 0, 0, 0 },
+};
+
+void decode_xtheadba(rv_decode *dec, rv_isa isa)
+{
+ rv_inst inst = dec->inst;
+ rv_opcode op = rv_op_illegal;
+
+ switch (((inst >> 0) & 0b11)) {
+ case 3:
+ switch (((inst >> 2) & 0b11111)) {
+ case 2:
+ /* custom-0 */
+ switch ((inst >> 12) & 0b111) {
+ case 1:
+ switch ((inst >> 25) & 0b1111111) {
+ case 0b0000000:
+ case 0b0000001:
+ case 0b0000010:
+ case 0b0000011: op = rv_op_th_addsl; break;
+ }
+ break;
+ }
+ break;
+ /* custom-0 */
+ }
+ break;
+ }
+
+ dec->op = op;
+}
+
+void decode_xtheadbb(rv_decode *dec, rv_isa isa)
+{
+ rv_inst inst = dec->inst;
+ rv_opcode op = rv_op_illegal;
+
+ switch (((inst >> 0) & 0b11)) {
+ case 3:
+ switch (((inst >> 2) & 0b11111)) {
+ case 2:
+ /* custom-0 */
+ switch ((inst >> 12) & 0b111) {
+ case 1:
+ switch ((inst >> 25) & 0b1111111) {
+ case 0b0001010: op = rv_op_th_srriw; break;
+ case 0b1000000:
+ if (((inst >> 20) & 0b11111) == 0) {
+ op = rv_op_th_tstnbz;
+ }
+ break;
+ case 0b1000001:
+ if (((inst >> 20) & 0b11111) == 0) {
+ op = rv_op_th_rev;
+ }
+ break;
+ case 0b1000010:
+ if (((inst >> 20) & 0b11111) == 0) {
+ op = rv_op_th_ff0;
+ }
+ break;
+ case 0b1000011:
+ if (((inst >> 20) & 0b11111) == 0) {
+ op = rv_op_th_ff1;
+ }
+ break;
+ case 0b1000100:
+ case 0b1001000:
+ if (((inst >> 20) & 0b11111) == 0) {
+ op = rv_op_th_revw;
+ }
+ break;
+ case 0b0000100:
+ case 0b0000101: op = rv_op_th_srri; break;
+ }
+ break;
+ case 2: op = rv_op_th_ext; break;
+ case 3: op = rv_op_th_extu; break;
+ }
+ break;
+ /* custom-0 */
+ }
+ break;
+ }
+
+ dec->op = op;
+}
+
+void decode_xtheadbs(rv_decode *dec, rv_isa isa)
+{
+ rv_inst inst = dec->inst;
+ rv_opcode op = rv_op_illegal;
+
+ switch (((inst >> 0) & 0b11)) {
+ case 3:
+ switch (((inst >> 2) & 0b11111)) {
+ case 2:
+ /* custom-0 */
+ switch ((inst >> 12) & 0b111) {
+ case 1:
+ switch ((inst >> 26) & 0b111111) {
+ case 0b100010: op = rv_op_th_tst; break;
+ }
+ break;
+ }
+ break;
+ /* custom-0 */
+ }
+ break;
+ }
+
+ dec->op = op;
+}
+
+void decode_xtheadcmo(rv_decode *dec, rv_isa isa)
+{
+ rv_inst inst = dec->inst;
+ rv_opcode op = rv_op_illegal;
+
+ switch (((inst >> 0) & 0b11)) {
+ case 3:
+ switch (((inst >> 2) & 0b11111)) {
+ case 2:
+ /* custom-0 */
+ switch ((inst >> 12) & 0b111) {
+ case 0:
+ switch ((inst >> 20 & 0b111111111111)) {
+ case 0b000000000001:
+ if (((inst >> 20) & 0b11111) == 0) {
+ op = rv_op_th_dcache_call;
+ }
+ break;
+ case 0b000000000011:
+ if (((inst >> 20) & 0b11111) == 0) {
+ op = rv_op_th_dcache_ciall;
+ }
+ break;
+ case 0b000000000010:
+ if (((inst >> 20) & 0b11111) == 0) {
+ op = rv_op_th_dcache_iall;
+ }
+ break;
+ case 0b000000101001: op = rv_op_th_dcache_cpa; break;
+ case 0b000000101011: op = rv_op_th_dcache_cipa; break;
+ case 0b000000101010: op = rv_op_th_dcache_ipa; break;
+ case 0b000000100101: op = rv_op_th_dcache_cva; break;
+ case 0b000000100111: op = rv_op_th_dcache_civa; break;
+ case 0b000000100110: op = rv_op_th_dcache_iva; break;
+ case 0b000000100001: op = rv_op_th_dcache_csw; break;
+ case 0b000000100011: op = rv_op_th_dcache_cisw; break;
+ case 0b000000100010: op = rv_op_th_dcache_isw; break;
+ case 0b000000101000: op = rv_op_th_dcache_cpal1; break;
+ case 0b000000100100: op = rv_op_th_dcache_cval1; break;
+ case 0b000000010000:
+ if (((inst >> 20) & 0b11111) == 0) {
+ op = rv_op_th_icache_iall;
+ }
+ break;
+ case 0b000000010001:
+ if (((inst >> 20) & 0b11111) == 0) {
+ op = rv_op_th_icache_ialls;
+ }
+ break;
+ case 0b000000111000: op = rv_op_th_icache_ipa; break;
+ case 0b000000110000: op = rv_op_th_icache_iva; break;
+ case 0b000000010101:
+ if (((inst >> 20) & 0b11111) == 0) {
+ op = rv_op_th_l2cache_call;
+ }
+ break;
+ case 0b000000010111:
+ if (((inst >> 20) & 0b11111) == 0) {
+ op = rv_op_th_l2cache_ciall;
+ }
+ break;
+ case 0b000000010110:
+ if (((inst >> 20) & 0b11111) == 0) {
+ op = rv_op_th_l2cache_iall;
+ }
+ break;
+ }
+ break;
+ }
+ break;
+ /* custom-0 */
+ }
+ break;
+ }
+
+ dec->op = op;
+}
+
+void decode_xtheadcondmov(rv_decode *dec, rv_isa isa)
+{
+ rv_inst inst = dec->inst;
+ rv_opcode op = rv_op_illegal;
+
+ switch (((inst >> 0) & 0b11)) {
+ case 3:
+ switch (((inst >> 2) & 0b11111)) {
+ case 2:
+ /* custom-0 */
+ switch ((inst >> 12) & 0b111) {
+ case 1:
+ switch ((inst >> 25) & 0b1111111) {
+ case 0b0100000: op = rv_op_th_mveqz; break;
+ case 0b0100001: op = rv_op_th_mvnez; break;
+ }
+ break;
+ }
+ break;
+ /* custom-0 */
+ }
+ break;
+ }
+
+ dec->op = op;
+}
+
+void decode_xtheadfmemidx(rv_decode *dec, rv_isa isa)
+{
+ rv_inst inst = dec->inst;
+ rv_opcode op = rv_op_illegal;
+
+ switch (((inst >> 0) & 0b11)) {
+ case 3:
+ switch (((inst >> 2) & 0b11111)) {
+ case 2:
+ /* custom-0 */
+ switch ((inst >> 12) & 0b111) {
+ case 6:
+ switch ((inst >> 27) & 0b11111) {
+ case 8: op = rv_op_th_flrw; break;
+ case 10: op = rv_op_th_flurw; break;
+ case 12: op = rv_op_th_flrd; break;
+ case 14: op = rv_op_th_flurd; break;
+ }
+ break;
+ case 7:
+ switch ((inst >> 27) & 0b11111) {
+ case 8: op = rv_op_th_fsrw; break;
+ case 10: op = rv_op_th_fsurw; break;
+ case 12: op = rv_op_th_fsrd; break;
+ case 14: op = rv_op_th_fsurd; break;
+ }
+ break;
+ }
+ break;
+ /* custom-0 */
+ }
+ break;
+ }
+
+ dec->op = op;
+}
+
+void decode_xtheadfmv(rv_decode *dec, rv_isa isa)
+{
+ rv_inst inst = dec->inst;
+ rv_opcode op = rv_op_illegal;
+
+ switch (((inst >> 0) & 0b11)) {
+ case 3:
+ switch (((inst >> 2) & 0b11111)) {
+ case 2:
+ /* custom-0 */
+ switch ((inst >> 12) & 0b111) {
+ case 1:
+ switch ((inst >> 25) & 0b1111111) {
+ case 0b1010000:
+ if (((inst >> 20) & 0b11111) == 0) {
+ op = rv_op_th_fmv_hw_x;
+ }
+ break;
+ case 0b1100000:
+ if (((inst >> 20) & 0b11111) == 0) {
+ op = rv_op_th_fmv_x_hw;
+ }
+ break;
+ }
+ break;
+ }
+ break;
+ /* custom-0 */
+ }
+ break;
+ }
+
+ dec->op = op;
+}
+
+void decode_xtheadmac(rv_decode *dec, rv_isa isa)
+{
+ rv_inst inst = dec->inst;
+ rv_opcode op = rv_op_illegal;
+
+ switch (((inst >> 0) & 0b11)) {
+ case 3:
+ switch (((inst >> 2) & 0b11111)) {
+ case 2:
+ /* custom-0 */
+ switch ((inst >> 12) & 0b111) {
+ case 1:
+ switch ((inst >> 25) & 0b1111111) {
+ case 0b0010000: op = rv_op_th_mula; break;
+ case 0b0010001: op = rv_op_th_muls; break;
+ case 0b0010010: op = rv_op_th_mulaw; break;
+ case 0b0010011: op = rv_op_th_mulsw; break;
+ case 0b0010100: op = rv_op_th_mulah; break;
+ case 0b0010101: op = rv_op_th_mulsh; break;
+ }
+ break;
+ }
+ break;
+ /* custom-0 */
+ }
+ break;
+ }
+
+ dec->op = op;
+}
+
+void decode_xtheadmemidx(rv_decode *dec, rv_isa isa)
+{
+ rv_inst inst = dec->inst;
+ rv_opcode op = rv_op_illegal;
+
+ switch (((inst >> 0) & 0b11)) {
+ case 3:
+ switch (((inst >> 2) & 0b11111)) {
+ case 2:
+ /* custom-0 */
+ switch ((inst >> 12) & 0b111) {
+ case 4:
+ switch ((inst >> 27) & 0b11111) {
+ case 0: op = rv_op_th_lrb; break;
+ case 1: op = rv_op_th_lbib; break;
+ case 2: op = rv_op_th_lurb; break;
+ case 3: op = rv_op_th_lbia; break;
+ case 4: op = rv_op_th_lrh; break;
+ case 5: op = rv_op_th_lhib; break;
+ case 6: op = rv_op_th_lurh; break;
+ case 7: op = rv_op_th_lhia; break;
+ case 8: op = rv_op_th_lrw; break;
+ case 9: op = rv_op_th_lwib; break;
+ case 10: op = rv_op_th_lurw; break;
+ case 11: op = rv_op_th_lwia; break;
+ case 12: op = rv_op_th_lrd; break;
+ case 13: op = rv_op_th_ldib; break;
+ case 14: op = rv_op_th_lurd; break;
+ case 15: op = rv_op_th_ldia; break;
+ case 16: op = rv_op_th_lrbu; break;
+ case 17: op = rv_op_th_lbuib; break;
+ case 18: op = rv_op_th_lurbu; break;
+ case 19: op = rv_op_th_lbuia; break;
+ case 20: op = rv_op_th_lrhu; break;
+ case 21: op = rv_op_th_lhuib; break;
+ case 22: op = rv_op_th_lurhu; break;
+ case 23: op = rv_op_th_lhuia; break;
+ case 24: op = rv_op_th_lrwu; break;
+ case 25: op = rv_op_th_lwuib; break;
+ case 26: op = rv_op_th_lurwu; break;
+ case 27: op = rv_op_th_lwuia; break;
+ }
+ break;
+ case 5:
+ switch ((inst >> 27) & 0b11111) {
+ case 0: op = rv_op_th_srb; break;
+ case 1: op = rv_op_th_sbib; break;
+ case 2: op = rv_op_th_surb; break;
+ case 3: op = rv_op_th_sbia; break;
+ case 4: op = rv_op_th_srh; break;
+ case 5: op = rv_op_th_shib; break;
+ case 6: op = rv_op_th_surh; break;
+ case 7: op = rv_op_th_shia; break;
+ case 8: op = rv_op_th_srw; break;
+ case 9: op = rv_op_th_swib; break;
+ case 10: op = rv_op_th_surw; break;
+ case 11: op = rv_op_th_swia; break;
+ case 12: op = rv_op_th_srd; break;
+ case 13: op = rv_op_th_sdib; break;
+ case 14: op = rv_op_th_surd; break;
+ case 15: op = rv_op_th_sdia; break;
+ }
+ break;
+ break;
+ }
+ break;
+ /* custom-0 */
+ }
+ break;
+ }
+
+ dec->op = op;
+}
+
+void decode_xtheadmempair(rv_decode *dec, rv_isa isa)
+{
+ rv_inst inst = dec->inst;
+ rv_opcode op = rv_op_illegal;
+
+ switch (((inst >> 0) & 0b11)) {
+ case 3:
+ switch (((inst >> 2) & 0b11111)) {
+ case 2:
+ /* custom-0 */
+ switch ((inst >> 12) & 0b111) {
+ case 4:
+ switch ((inst >> 27) & 0b11111) {
+ case 28: op = rv_op_th_lwd; break;
+ case 30: op = rv_op_th_lwud; break;
+ case 31: op = rv_op_th_ldd; break;
+ }
+ break;
+ case 5:
+ switch ((inst >> 27) & 0b11111) {
+ case 28: op = rv_op_th_swd; break;
+ case 31: op = rv_op_th_sdd; break;
+ }
+ break;
+ }
+ break;
+ /* custom-0 */
+ }
+ break;
+ }
+
+ dec->op = op;
+}
+
+void decode_xtheadsync(rv_decode *dec, rv_isa isa)
+{
+ rv_inst inst = dec->inst;
+ rv_opcode op = rv_op_illegal;
+
+ switch (((inst >> 0) & 0b11)) {
+ case 3:
+ switch (((inst >> 2) & 0b11111)) {
+ case 2:
+ /* custom-0 */
+ switch ((inst >> 12) & 0b111) {
+ case 0:
+ switch ((inst >> 25) & 0b1111111) {
+ case 0b0000010: op = rv_op_th_sfence_vmas; break;
+ case 0b0000000:
+ switch ((inst >> 20) & 0b11111) {
+ case 0b11000: op = rv_op_th_sync; break;
+ case 0b11010: op = rv_op_th_sync_i; break;
+ case 0b11011: op = rv_op_th_sync_is; break;
+ case 0b11001: op = rv_op_th_sync_s; break;
+ }
+ break;
+ }
+ break;
+ }
+ break;
+ /* custom-0 */
+ }
+ break;
+ }
+
+ dec->op = op;
+}
--- /dev/null
+/*
+ * QEMU disassembler -- RISC-V specific header (xthead*).
+ *
+ * Copyright (c) 2023 VRULL GmbH
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef DISAS_RISCV_XTHEAD_H
+#define DISAS_RISCV_XTHEAD_H
+
+#include "disas/riscv.h"
+
+extern const rv_opcode_data xthead_opcode_data[];
+
+void decode_xtheadba(rv_decode *, rv_isa);
+void decode_xtheadbb(rv_decode *, rv_isa);
+void decode_xtheadbs(rv_decode *, rv_isa);
+void decode_xtheadcmo(rv_decode *, rv_isa);
+void decode_xtheadcondmov(rv_decode *, rv_isa);
+void decode_xtheadfmemidx(rv_decode *, rv_isa);
+void decode_xtheadfmv(rv_decode *, rv_isa);
+void decode_xtheadmac(rv_decode *, rv_isa);
+void decode_xtheadmemidx(rv_decode *, rv_isa);
+void decode_xtheadmempair(rv_decode *, rv_isa);
+void decode_xtheadsync(rv_decode *, rv_isa);
+
+#endif /* DISAS_RISCV_XTHEAD_H */
--- /dev/null
+/*
+ * QEMU RISC-V Disassembler for xventana.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "disas/riscv.h"
+#include "disas/riscv-xventana.h"
+
+typedef enum {
+ /* 0 is reserved for rv_op_illegal. */
+ ventana_op_vt_maskc = 1,
+ ventana_op_vt_maskcn = 2,
+} rv_ventana_op;
+
+const rv_opcode_data ventana_opcode_data[] = {
+ { "vt.illegal", rv_codec_illegal, rv_fmt_none, NULL, 0, 0, 0 },
+ { "vt.maskc", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 },
+ { "vt.maskcn", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 },
+};
+
+void decode_xventanacondops(rv_decode *dec, rv_isa isa)
+{
+ rv_inst inst = dec->inst;
+ rv_opcode op = rv_op_illegal;
+
+ switch (((inst >> 0) & 0b11)) {
+ case 3:
+ switch (((inst >> 2) & 0b11111)) {
+ case 30:
+ switch (((inst >> 22) & 0b1111111000) | ((inst >> 12) & 0b0000000111)) {
+ case 6: op = ventana_op_vt_maskc; break;
+ case 7: op = ventana_op_vt_maskcn; break;
+ }
+ break;
+ }
+ break;
+ }
+
+ dec->op = op;
+}
--- /dev/null
+/*
+ * QEMU disassembler -- RISC-V specific header (xventana*).
+ *
+ * Copyright (c) 2023 VRULL GmbH
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef DISAS_RISCV_XVENTANA_H
+#define DISAS_RISCV_XVENTANA_H
+
+#include "disas/riscv.h"
+
+extern const rv_opcode_data ventana_opcode_data[];
+
+void decode_xventanacondops(rv_decode*, rv_isa);
+
+#endif /* DISAS_RISCV_XVENTANA_H */
*/
#include "qemu/osdep.h"
+#include "qemu/bitops.h"
#include "disas/dis-asm.h"
#include "target/riscv/cpu_cfg.h"
+#include "disas/riscv.h"
-/* types */
-
-typedef uint64_t rv_inst;
-typedef uint16_t rv_opcode;
-
-/* enums */
-
-typedef enum {
- rv32,
- rv64,
- rv128
-} rv_isa;
-
-typedef enum {
- rv_rm_rne = 0,
- rv_rm_rtz = 1,
- rv_rm_rdn = 2,
- rv_rm_rup = 3,
- rv_rm_rmm = 4,
- rv_rm_dyn = 7,
-} rv_rm;
-
-typedef enum {
- rv_fence_i = 8,
- rv_fence_o = 4,
- rv_fence_r = 2,
- rv_fence_w = 1,
-} rv_fence;
-
-typedef enum {
- rv_ireg_zero,
- rv_ireg_ra,
- rv_ireg_sp,
- rv_ireg_gp,
- rv_ireg_tp,
- rv_ireg_t0,
- rv_ireg_t1,
- rv_ireg_t2,
- rv_ireg_s0,
- rv_ireg_s1,
- rv_ireg_a0,
- rv_ireg_a1,
- rv_ireg_a2,
- rv_ireg_a3,
- rv_ireg_a4,
- rv_ireg_a5,
- rv_ireg_a6,
- rv_ireg_a7,
- rv_ireg_s2,
- rv_ireg_s3,
- rv_ireg_s4,
- rv_ireg_s5,
- rv_ireg_s6,
- rv_ireg_s7,
- rv_ireg_s8,
- rv_ireg_s9,
- rv_ireg_s10,
- rv_ireg_s11,
- rv_ireg_t3,
- rv_ireg_t4,
- rv_ireg_t5,
- rv_ireg_t6,
-} rv_ireg;
-
-typedef enum {
- rvc_end,
- rvc_rd_eq_ra,
- rvc_rd_eq_x0,
- rvc_rs1_eq_x0,
- rvc_rs2_eq_x0,
- rvc_rs2_eq_rs1,
- rvc_rs1_eq_ra,
- rvc_imm_eq_zero,
- rvc_imm_eq_n1,
- rvc_imm_eq_p1,
- rvc_csr_eq_0x001,
- rvc_csr_eq_0x002,
- rvc_csr_eq_0x003,
- rvc_csr_eq_0xc00,
- rvc_csr_eq_0xc01,
- rvc_csr_eq_0xc02,
- rvc_csr_eq_0xc80,
- rvc_csr_eq_0xc81,
- rvc_csr_eq_0xc82,
-} rvc_constraint;
+/* Vendor extensions */
+#include "disas/riscv-xthead.h"
+#include "disas/riscv-xventana.h"
typedef enum {
- rv_codec_illegal,
- rv_codec_none,
- rv_codec_u,
- rv_codec_uj,
- rv_codec_i,
- rv_codec_i_sh5,
- rv_codec_i_sh6,
- rv_codec_i_sh7,
- rv_codec_i_csr,
- rv_codec_s,
- rv_codec_sb,
- rv_codec_r,
- rv_codec_r_m,
- rv_codec_r4_m,
- rv_codec_r_a,
- rv_codec_r_l,
- rv_codec_r_f,
- rv_codec_cb,
- rv_codec_cb_imm,
- rv_codec_cb_sh5,
- rv_codec_cb_sh6,
- rv_codec_ci,
- rv_codec_ci_sh5,
- rv_codec_ci_sh6,
- rv_codec_ci_16sp,
- rv_codec_ci_lwsp,
- rv_codec_ci_ldsp,
- rv_codec_ci_lqsp,
- rv_codec_ci_li,
- rv_codec_ci_lui,
- rv_codec_ci_none,
- rv_codec_ciw_4spn,
- rv_codec_cj,
- rv_codec_cj_jal,
- rv_codec_cl_lw,
- rv_codec_cl_ld,
- rv_codec_cl_lq,
- rv_codec_cr,
- rv_codec_cr_mv,
- rv_codec_cr_jalr,
- rv_codec_cr_jr,
- rv_codec_cs,
- rv_codec_cs_sw,
- rv_codec_cs_sd,
- rv_codec_cs_sq,
- rv_codec_css_swsp,
- rv_codec_css_sdsp,
- rv_codec_css_sqsp,
- rv_codec_k_bs,
- rv_codec_k_rnum,
- rv_codec_v_r,
- rv_codec_v_ldst,
- rv_codec_v_i,
- rv_codec_vsetvli,
- rv_codec_vsetivli,
- rv_codec_zcb_ext,
- rv_codec_zcb_mul,
- rv_codec_zcb_lb,
- rv_codec_zcb_lh,
- rv_codec_zcmp_cm_pushpop,
- rv_codec_zcmp_cm_mv,
- rv_codec_zcmt_jt,
-} rv_codec;
-
-typedef enum {
- rv_op_illegal = 0,
+ /* 0 is reserved for rv_op_illegal. */
rv_op_lui = 1,
rv_op_auipc = 2,
rv_op_jal = 3,
rv_op_cm_jalt = 788,
rv_op_czero_eqz = 789,
rv_op_czero_nez = 790,
+ rv_op_fcvt_bf16_s = 791,
+ rv_op_fcvt_s_bf16 = 792,
+ rv_op_vfncvtbf16_f_f_w = 793,
+ rv_op_vfwcvtbf16_f_f_v = 794,
+ rv_op_vfwmaccbf16_vv = 795,
+ rv_op_vfwmaccbf16_vf = 796,
+ rv_op_flh = 797,
+ rv_op_fsh = 798,
+ rv_op_fmv_h_x = 799,
+ rv_op_fmv_x_h = 800,
+ rv_op_fli_s = 801,
+ rv_op_fli_d = 802,
+ rv_op_fli_q = 803,
+ rv_op_fli_h = 804,
+ rv_op_fminm_s = 805,
+ rv_op_fmaxm_s = 806,
+ rv_op_fminm_d = 807,
+ rv_op_fmaxm_d = 808,
+ rv_op_fminm_q = 809,
+ rv_op_fmaxm_q = 810,
+ rv_op_fminm_h = 811,
+ rv_op_fmaxm_h = 812,
+ rv_op_fround_s = 813,
+ rv_op_froundnx_s = 814,
+ rv_op_fround_d = 815,
+ rv_op_froundnx_d = 816,
+ rv_op_fround_q = 817,
+ rv_op_froundnx_q = 818,
+ rv_op_fround_h = 819,
+ rv_op_froundnx_h = 820,
+ rv_op_fcvtmod_w_d = 821,
+ rv_op_fmvh_x_d = 822,
+ rv_op_fmvp_d_x = 823,
+ rv_op_fmvh_x_q = 824,
+ rv_op_fmvp_q_x = 825,
+ rv_op_fleq_s = 826,
+ rv_op_fltq_s = 827,
+ rv_op_fleq_d = 828,
+ rv_op_fltq_d = 829,
+ rv_op_fleq_q = 830,
+ rv_op_fltq_q = 831,
+ rv_op_fleq_h = 832,
+ rv_op_fltq_h = 833,
} rv_op;
-/* structures */
-
-typedef struct {
- RISCVCPUConfig *cfg;
- uint64_t pc;
- uint64_t inst;
- int32_t imm;
- uint16_t op;
- uint8_t codec;
- uint8_t rd;
- uint8_t rs1;
- uint8_t rs2;
- uint8_t rs3;
- uint8_t rm;
- uint8_t pred;
- uint8_t succ;
- uint8_t aq;
- uint8_t rl;
- uint8_t bs;
- uint8_t rnum;
- uint8_t vm;
- uint32_t vzimm;
- uint8_t rlist;
-} rv_decode;
-
-typedef struct {
- const int op;
- const rvc_constraint *constraints;
-} rv_comp_data;
-
-enum {
- rvcd_imm_nz = 0x1
-};
-
-typedef struct {
- const char * const name;
- const rv_codec codec;
- const char * const format;
- const rv_comp_data *pseudo;
- const short decomp_rv32;
- const short decomp_rv64;
- const short decomp_rv128;
- const short decomp_data;
-} rv_opcode_data;
-
/* register names */
static const char rv_ireg_name_sym[32][5] = {
"v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"
};
-/* instruction formats */
-
-#define rv_fmt_none "O\t"
-#define rv_fmt_rs1 "O\t1"
-#define rv_fmt_offset "O\to"
-#define rv_fmt_pred_succ "O\tp,s"
-#define rv_fmt_rs1_rs2 "O\t1,2"
-#define rv_fmt_rd_imm "O\t0,i"
-#define rv_fmt_rd_offset "O\t0,o"
-#define rv_fmt_rd_rs1_rs2 "O\t0,1,2"
-#define rv_fmt_frd_rs1 "O\t3,1"
-#define rv_fmt_frd_frs1 "O\t3,4"
-#define rv_fmt_rd_frs1 "O\t0,4"
-#define rv_fmt_rd_frs1_frs2 "O\t0,4,5"
-#define rv_fmt_frd_frs1_frs2 "O\t3,4,5"
-#define rv_fmt_rm_frd_frs1 "O\tr,3,4"
-#define rv_fmt_rm_frd_rs1 "O\tr,3,1"
-#define rv_fmt_rm_rd_frs1 "O\tr,0,4"
-#define rv_fmt_rm_frd_frs1_frs2 "O\tr,3,4,5"
-#define rv_fmt_rm_frd_frs1_frs2_frs3 "O\tr,3,4,5,6"
-#define rv_fmt_rd_rs1_imm "O\t0,1,i"
-#define rv_fmt_rd_rs1_offset "O\t0,1,i"
-#define rv_fmt_rd_offset_rs1 "O\t0,i(1)"
-#define rv_fmt_frd_offset_rs1 "O\t3,i(1)"
-#define rv_fmt_rd_csr_rs1 "O\t0,c,1"
-#define rv_fmt_rd_csr_zimm "O\t0,c,7"
-#define rv_fmt_rs2_offset_rs1 "O\t2,i(1)"
-#define rv_fmt_frs2_offset_rs1 "O\t5,i(1)"
-#define rv_fmt_rs1_rs2_offset "O\t1,2,o"
-#define rv_fmt_rs2_rs1_offset "O\t2,1,o"
-#define rv_fmt_aqrl_rd_rs2_rs1 "OAR\t0,2,(1)"
-#define rv_fmt_aqrl_rd_rs1 "OAR\t0,(1)"
-#define rv_fmt_rd "O\t0"
-#define rv_fmt_rd_zimm "O\t0,7"
-#define rv_fmt_rd_rs1 "O\t0,1"
-#define rv_fmt_rd_rs2 "O\t0,2"
-#define rv_fmt_rs1_offset "O\t1,o"
-#define rv_fmt_rs2_offset "O\t2,o"
-#define rv_fmt_rs1_rs2_bs "O\t1,2,b"
-#define rv_fmt_rd_rs1_rnum "O\t0,1,n"
-#define rv_fmt_ldst_vd_rs1_vm "O\tD,(1)m"
-#define rv_fmt_ldst_vd_rs1_rs2_vm "O\tD,(1),2m"
-#define rv_fmt_ldst_vd_rs1_vs2_vm "O\tD,(1),Fm"
-#define rv_fmt_vd_vs2_vs1 "O\tD,F,E"
-#define rv_fmt_vd_vs2_vs1_vl "O\tD,F,El"
-#define rv_fmt_vd_vs2_vs1_vm "O\tD,F,Em"
-#define rv_fmt_vd_vs2_rs1_vl "O\tD,F,1l"
-#define rv_fmt_vd_vs2_fs1_vl "O\tD,F,4l"
-#define rv_fmt_vd_vs2_rs1_vm "O\tD,F,1m"
-#define rv_fmt_vd_vs2_fs1_vm "O\tD,F,4m"
-#define rv_fmt_vd_vs2_imm_vl "O\tD,F,il"
-#define rv_fmt_vd_vs2_imm_vm "O\tD,F,im"
-#define rv_fmt_vd_vs2_uimm_vm "O\tD,F,um"
-#define rv_fmt_vd_vs1_vs2_vm "O\tD,E,Fm"
-#define rv_fmt_vd_rs1_vs2_vm "O\tD,1,Fm"
-#define rv_fmt_vd_fs1_vs2_vm "O\tD,4,Fm"
-#define rv_fmt_vd_vs1 "O\tD,E"
-#define rv_fmt_vd_rs1 "O\tD,1"
-#define rv_fmt_vd_fs1 "O\tD,4"
-#define rv_fmt_vd_imm "O\tD,i"
-#define rv_fmt_vd_vs2 "O\tD,F"
-#define rv_fmt_vd_vs2_vm "O\tD,Fm"
-#define rv_fmt_rd_vs2_vm "O\t0,Fm"
-#define rv_fmt_rd_vs2 "O\t0,F"
-#define rv_fmt_fd_vs2 "O\t3,F"
-#define rv_fmt_vd_vm "O\tDm"
-#define rv_fmt_vsetvli "O\t0,1,v"
-#define rv_fmt_vsetivli "O\t0,u,v"
-#define rv_fmt_rs1_rs2_zce_ldst "O\t2,i(1)"
-#define rv_fmt_push_rlist "O\tx,-i"
-#define rv_fmt_pop_rlist "O\tx,i"
-#define rv_fmt_zcmt_index "O\ti"
+/* The FLI.[HSDQ] numeric constants (0.0 for symbolic constants).
+ * The constants use the hex floating-point literal representation
+ * that is printed when using the printf %a format specifier,
+ * which matches the output that is generated by the disassembler.
+ */
+static const char rv_fli_name_const[32][9] =
+{
+ "0x1p+0", "min", "0x1p-16", "0x1p-15",
+ "0x1p-8", "0x1p-7", "0x1p-4", "0x1p-3",
+ "0x1p-2", "0x1.4p-2", "0x1.8p-2", "0x1.cp-2",
+ "0x1p-1", "0x1.4p-1", "0x1.8p-1", "0x1.cp-1",
+ "0x1p+0", "0x1.4p+0", "0x1.8p+0", "0x1.cp+0",
+ "0x1p+1", "0x1.4p+1", "0x1.8p+1", "0x1p+2",
+ "0x1p+3", "0x1p+4", "0x1p+7", "0x1p+8",
+ "0x1p+15", "0x1p+16", "inf", "nan"
+};
/* pseudo-instruction constraints */
/* instruction metadata */
-const rv_opcode_data opcode_data[] = {
+const rv_opcode_data rvi_opcode_data[] = {
{ "illegal", rv_codec_illegal, rv_fmt_none, NULL, 0, 0, 0 },
- { "lui", rv_codec_u, rv_fmt_rd_imm, NULL, 0, 0, 0 },
- { "auipc", rv_codec_u, rv_fmt_rd_offset, NULL, 0, 0, 0 },
+ { "lui", rv_codec_u, rv_fmt_rd_uimm, NULL, 0, 0, 0 },
+ { "auipc", rv_codec_u, rv_fmt_rd_uoffset, NULL, 0, 0, 0 },
{ "jal", rv_codec_uj, rv_fmt_rd_offset, rvcp_jal, 0, 0, 0 },
{ "jalr", rv_codec_i, rv_fmt_rd_rs1_offset, rvcp_jalr, 0, 0, 0 },
{ "beq", rv_codec_sb, rv_fmt_rs1_rs2_offset, rvcp_beq, 0, 0, 0 },
rv_op_addi },
{ "c.addi16sp", rv_codec_ci_16sp, rv_fmt_rd_rs1_imm, NULL, rv_op_addi,
rv_op_addi, rv_op_addi, rvcd_imm_nz },
- { "c.lui", rv_codec_ci_lui, rv_fmt_rd_imm, NULL, rv_op_lui, rv_op_lui,
+ { "c.lui", rv_codec_ci_lui, rv_fmt_rd_uimm, NULL, rv_op_lui, rv_op_lui,
rv_op_lui, rvcd_imm_nz },
{ "c.srli", rv_codec_cb_sh6, rv_fmt_rd_rs1_imm, NULL, rv_op_srli,
rv_op_srli, rv_op_srli, rvcd_imm_nz },
{ "cm.jalt", rv_codec_zcmt_jt, rv_fmt_zcmt_index, NULL, 0 },
{ "czero.eqz", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 },
{ "czero.nez", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 },
+ { "fcvt.bf16.s", rv_codec_r_m, rv_fmt_rm_frd_frs1, NULL, 0, 0, 0 },
+ { "fcvt.s.bf16", rv_codec_r_m, rv_fmt_rm_frd_frs1, NULL, 0, 0, 0 },
+ { "vfncvtbf16.f.f.w", rv_codec_v_r, rv_fmt_vd_vs2_vm, NULL, 0, 0, 0 },
+ { "vfwcvtbf16.f.f.v", rv_codec_v_r, rv_fmt_vd_vs2_vm, NULL, 0, 0, 0 },
+ { "vfwmaccbf16.vv", rv_codec_v_r, rv_fmt_vd_vs1_vs2_vm, NULL, 0, 0, 0 },
+ { "vfwmaccbf16.vf", rv_codec_v_r, rv_fmt_vd_fs1_vs2_vm, NULL, 0, 0, 0 },
+ { "flh", rv_codec_i, rv_fmt_frd_offset_rs1, NULL, 0, 0, 0 },
+ { "fsh", rv_codec_s, rv_fmt_frs2_offset_rs1, NULL, 0, 0, 0 },
+ { "fmv.h.x", rv_codec_r, rv_fmt_frd_rs1, NULL, 0, 0, 0 },
+ { "fmv.x.h", rv_codec_r, rv_fmt_rd_frs1, NULL, 0, 0, 0 },
+ { "fli.s", rv_codec_fli, rv_fmt_fli, NULL, 0, 0, 0 },
+ { "fli.d", rv_codec_fli, rv_fmt_fli, NULL, 0, 0, 0 },
+ { "fli.q", rv_codec_fli, rv_fmt_fli, NULL, 0, 0, 0 },
+ { "fli.h", rv_codec_fli, rv_fmt_fli, NULL, 0, 0, 0 },
+ { "fminm.s", rv_codec_r, rv_fmt_frd_frs1_frs2, NULL, 0, 0, 0 },
+ { "fmaxm.s", rv_codec_r, rv_fmt_frd_frs1_frs2, NULL, 0, 0, 0 },
+ { "fminm.d", rv_codec_r, rv_fmt_frd_frs1_frs2, NULL, 0, 0, 0 },
+ { "fmaxm.d", rv_codec_r, rv_fmt_frd_frs1_frs2, NULL, 0, 0, 0 },
+ { "fminm.q", rv_codec_r, rv_fmt_frd_frs1_frs2, NULL, 0, 0, 0 },
+ { "fmaxm.q", rv_codec_r, rv_fmt_frd_frs1_frs2, NULL, 0, 0, 0 },
+ { "fminm.h", rv_codec_r, rv_fmt_frd_frs1_frs2, NULL, 0, 0, 0 },
+ { "fmaxm.h", rv_codec_r, rv_fmt_frd_frs1_frs2, NULL, 0, 0, 0 },
+ { "fround.s", rv_codec_r_m, rv_fmt_rm_frd_frs1, NULL, 0, 0, 0 },
+ { "froundnx.s", rv_codec_r_m, rv_fmt_rm_frd_frs1, NULL, 0, 0, 0 },
+ { "fround.d", rv_codec_r_m, rv_fmt_rm_frd_frs1, NULL, 0, 0, 0 },
+ { "froundnx.d", rv_codec_r_m, rv_fmt_rm_frd_frs1, NULL, 0, 0, 0 },
+ { "fround.q", rv_codec_r_m, rv_fmt_rm_frd_frs1, NULL, 0, 0, 0 },
+ { "froundnx.q", rv_codec_r_m, rv_fmt_rm_frd_frs1, NULL, 0, 0, 0 },
+ { "fround.h", rv_codec_r_m, rv_fmt_rm_frd_frs1, NULL, 0, 0, 0 },
+ { "froundnx.h", rv_codec_r_m, rv_fmt_rm_frd_frs1, NULL, 0, 0, 0 },
+ { "fcvtmod.w.d", rv_codec_r_m, rv_fmt_rm_rd_frs1, NULL, 0, 0, 0 },
+ { "fmvh.x.d", rv_codec_r, rv_fmt_rd_frs1, NULL, 0, 0, 0 },
+ { "fmvp.d.x", rv_codec_r, rv_fmt_frd_rs1_rs2, NULL, 0, 0, 0 },
+ { "fmvh.x.q", rv_codec_r, rv_fmt_rd_frs1, NULL, 0, 0, 0 },
+ { "fmvp.q.x", rv_codec_r, rv_fmt_frd_rs1_rs2, NULL, 0, 0, 0 },
+ { "fleq.s", rv_codec_r, rv_fmt_rd_frs1_frs2, NULL, 0, 0, 0 },
+ { "fltq.s", rv_codec_r, rv_fmt_rd_frs1_frs2, NULL, 0, 0, 0 },
+ { "fleq.d", rv_codec_r, rv_fmt_rd_frs1_frs2, NULL, 0, 0, 0 },
+ { "fltq.d", rv_codec_r, rv_fmt_rd_frs1_frs2, NULL, 0, 0, 0 },
+ { "fleq.q", rv_codec_r, rv_fmt_rd_frs1_frs2, NULL, 0, 0, 0 },
+ { "fltq.q", rv_codec_r, rv_fmt_rd_frs1_frs2, NULL, 0, 0, 0 },
+ { "fleq.h", rv_codec_r, rv_fmt_rd_frs1_frs2, NULL, 0, 0, 0 },
+ { "fltq.h", rv_codec_r, rv_fmt_rd_frs1_frs2, NULL, 0, 0, 0 },
};
/* CSR names */
case 3: op = rv_op_vloxei8_v; break;
}
break;
+ case 1: op = rv_op_flh; break;
case 2: op = rv_op_flw; break;
case 3: op = rv_op_fld; break;
case 4: op = rv_op_flq; break;
case 3: op = rv_op_vsoxei8_v; break;
}
break;
+ case 1: op = rv_op_fsh; break;
case 2: op = rv_op_fsw; break;
case 3: op = rv_op_fsd; break;
case 4: op = rv_op_fsq; break;
switch ((inst >> 12) & 0b111) {
case 0: op = rv_op_fmin_s; break;
case 1: op = rv_op_fmax_s; break;
+ case 2: op = rv_op_fminm_s; break;
+ case 3: op = rv_op_fmaxm_s; break;
}
break;
case 21:
switch ((inst >> 12) & 0b111) {
case 0: op = rv_op_fmin_d; break;
case 1: op = rv_op_fmax_d; break;
+ case 2: op = rv_op_fminm_d; break;
+ case 3: op = rv_op_fmaxm_d; break;
+ }
+ break;
+ case 22:
+ switch (((inst >> 12) & 0b111)) {
+ case 2: op = rv_op_fminm_h; break;
+ case 3: op = rv_op_fmaxm_h; break;
}
break;
case 23:
switch ((inst >> 12) & 0b111) {
case 0: op = rv_op_fmin_q; break;
case 1: op = rv_op_fmax_q; break;
+ case 2: op = rv_op_fminm_q; break;
+ case 3: op = rv_op_fmaxm_q; break;
}
break;
case 32:
switch ((inst >> 20) & 0b11111) {
case 1: op = rv_op_fcvt_s_d; break;
case 3: op = rv_op_fcvt_s_q; break;
+ case 4: op = rv_op_fround_s; break;
+ case 5: op = rv_op_froundnx_s; break;
+ case 6: op = rv_op_fcvt_s_bf16; break;
}
break;
case 33:
switch ((inst >> 20) & 0b11111) {
case 0: op = rv_op_fcvt_d_s; break;
case 3: op = rv_op_fcvt_d_q; break;
+ case 4: op = rv_op_fround_d; break;
+ case 5: op = rv_op_froundnx_d; break;
+ }
+ break;
+ case 34:
+ switch (((inst >> 20) & 0b11111)) {
+ case 4: op = rv_op_fround_h; break;
+ case 5: op = rv_op_froundnx_h; break;
+ case 8: op = rv_op_fcvt_bf16_s; break;
}
break;
case 35:
switch ((inst >> 20) & 0b11111) {
case 0: op = rv_op_fcvt_q_s; break;
case 1: op = rv_op_fcvt_q_d; break;
+ case 4: op = rv_op_fround_q; break;
+ case 5: op = rv_op_froundnx_q; break;
}
break;
case 44:
case 0: op = rv_op_fle_s; break;
case 1: op = rv_op_flt_s; break;
case 2: op = rv_op_feq_s; break;
+ case 4: op = rv_op_fleq_s; break;
+ case 5: op = rv_op_fltq_s; break;
}
break;
case 81:
case 0: op = rv_op_fle_d; break;
case 1: op = rv_op_flt_d; break;
case 2: op = rv_op_feq_d; break;
+ case 4: op = rv_op_fleq_d; break;
+ case 5: op = rv_op_fltq_d; break;
+ }
+ break;
+ case 82:
+ switch (((inst >> 12) & 0b111)) {
+ case 4: op = rv_op_fleq_h; break;
+ case 5: op = rv_op_fltq_h; break;
}
break;
case 83:
case 0: op = rv_op_fle_q; break;
case 1: op = rv_op_flt_q; break;
case 2: op = rv_op_feq_q; break;
+ case 4: op = rv_op_fleq_q; break;
+ case 5: op = rv_op_fltq_q; break;
+ }
+ break;
+ case 89:
+ switch (((inst >> 12) & 0b111)) {
+ case 0: op = rv_op_fmvp_d_x; break;
+ }
+ break;
+ case 91:
+ switch (((inst >> 12) & 0b111)) {
+ case 0: op = rv_op_fmvp_q_x; break;
}
break;
case 96:
case 1: op = rv_op_fcvt_wu_d; break;
case 2: op = rv_op_fcvt_l_d; break;
case 3: op = rv_op_fcvt_lu_d; break;
+ case 8: op = rv_op_fcvtmod_w_d; break;
}
break;
case 99:
((inst >> 12) & 0b00000111)) {
case 0: op = rv_op_fmv_x_d; break;
case 1: op = rv_op_fclass_d; break;
+ case 8: op = rv_op_fmvh_x_d; break;
+ }
+ break;
+ case 114:
+ switch (((inst >> 17) & 0b11111000) |
+ ((inst >> 12) & 0b00000111)) {
+ case 0: op = rv_op_fmv_x_h; break;
}
break;
case 115:
((inst >> 12) & 0b00000111)) {
case 0: op = rv_op_fmv_x_q; break;
case 1: op = rv_op_fclass_q; break;
+ case 8: op = rv_op_fmvh_x_q; break;
}
break;
case 120:
switch (((inst >> 17) & 0b11111000) |
((inst >> 12) & 0b00000111)) {
case 0: op = rv_op_fmv_s_x; break;
+ case 8: op = rv_op_fli_s; break;
}
break;
case 121:
switch (((inst >> 17) & 0b11111000) |
((inst >> 12) & 0b00000111)) {
case 0: op = rv_op_fmv_d_x; break;
+ case 8: op = rv_op_fli_d; break;
+ }
+ break;
+ case 122:
+ switch (((inst >> 17) & 0b11111000) |
+ ((inst >> 12) & 0b00000111)) {
+ case 0: op = rv_op_fmv_h_x; break;
+ case 8: op = rv_op_fli_h; break;
}
break;
case 123:
switch (((inst >> 17) & 0b11111000) |
((inst >> 12) & 0b00000111)) {
case 0: op = rv_op_fmv_q_x; break;
+ case 8: op = rv_op_fli_q; break;
}
break;
}
case 10: op = rv_op_vfwcvt_f_xu_v; break;
case 11: op = rv_op_vfwcvt_f_x_v; break;
case 12: op = rv_op_vfwcvt_f_f_v; break;
+ case 13: op = rv_op_vfwcvtbf16_f_f_v; break;
case 14: op = rv_op_vfwcvt_rtz_xu_f_v; break;
case 15: op = rv_op_vfwcvt_rtz_x_f_v; break;
case 16: op = rv_op_vfncvt_xu_f_w; break;
case 21: op = rv_op_vfncvt_rod_f_f_w; break;
case 22: op = rv_op_vfncvt_rtz_xu_f_w; break;
case 23: op = rv_op_vfncvt_rtz_x_f_w; break;
+ case 29: op = rv_op_vfncvtbf16_f_f_w; break;
}
break;
case 19:
case 52: op = rv_op_vfwadd_wv; break;
case 54: op = rv_op_vfwsub_wv; break;
case 56: op = rv_op_vfwmul_vv; break;
+ case 59: op = rv_op_vfwmaccbf16_vv; break;
case 60: op = rv_op_vfwmacc_vv; break;
case 61: op = rv_op_vfwnmacc_vv; break;
case 62: op = rv_op_vfwmsac_vv; break;
case 52: op = rv_op_vfwadd_wf; break;
case 54: op = rv_op_vfwsub_wf; break;
case 56: op = rv_op_vfwmul_vf; break;
+ case 59: op = rv_op_vfwmaccbf16_vf; break;
case 60: op = rv_op_vfwmacc_vf; break;
case 61: op = rv_op_vfwnmacc_vf; break;
case 62: op = rv_op_vfwmsac_vf; break;
return ((inst << 56) >> 60);
}
+static uint32_t operand_imm6(rv_inst inst)
+{
+ return (inst << 38) >> 60;
+}
+
+static uint32_t operand_imm2(rv_inst inst)
+{
+ return (inst << 37) >> 62;
+}
+
+static uint32_t operand_immh(rv_inst inst)
+{
+ return (inst << 32) >> 58;
+}
+
+static uint32_t operand_imml(rv_inst inst)
+{
+ return (inst << 38) >> 58;
+}
+
static uint32_t calculate_stack_adj(rv_isa isa, uint32_t rlist, uint32_t spimm)
{
int xlen_bytes_log2 = isa == rv64 ? 3 : 2;
static void decode_inst_operands(rv_decode *dec, rv_isa isa)
{
+ const rv_opcode_data *opcode_data = dec->opcode_data;
rv_inst inst = dec->inst;
dec->codec = opcode_data[dec->op].codec;
switch (dec->codec) {
break;
case rv_codec_zcmt_jt:
dec->imm = operand_tbl_index(inst);
+ break;
+ case rv_codec_fli:
+ dec->rd = operand_rd(inst);
+ dec->imm = operand_rs1(inst);
+ break;
+ case rv_codec_r2_imm5:
+ dec->rd = operand_rd(inst);
+ dec->rs1 = operand_rs1(inst);
+ dec->imm = operand_rs2(inst);
+ break;
+ case rv_codec_r2:
+ dec->rd = operand_rd(inst);
+ dec->rs1 = operand_rs1(inst);
+ break;
+ case rv_codec_r2_imm6:
+ dec->rd = operand_rd(inst);
+ dec->rs1 = operand_rs1(inst);
+ dec->imm = operand_imm6(inst);
+ break;
+ case rv_codec_r_imm2:
+ dec->rd = operand_rd(inst);
+ dec->rs1 = operand_rs1(inst);
+ dec->rs2 = operand_rs2(inst);
+ dec->imm = operand_imm2(inst);
+ break;
+ case rv_codec_r2_immhl:
+ dec->rd = operand_rd(inst);
+ dec->rs1 = operand_rs1(inst);
+ dec->imm = operand_immh(inst);
+ dec->imm1 = operand_imml(inst);
+ break;
+ case rv_codec_r2_imm2_imm5:
+ dec->rd = operand_rd(inst);
+ dec->rs1 = operand_rs1(inst);
+ dec->imm = sextract32(operand_rs2(inst), 0, 5);
+ dec->imm1 = operand_imm2(inst);
break;
};
}
static void format_inst(char *buf, size_t buflen, size_t tab, rv_decode *dec)
{
+ const rv_opcode_data *opcode_data = dec->opcode_data;
char tmp[64];
const char *fmt;
snprintf(tmp, sizeof(tmp), "%u", ((uint32_t)dec->imm & 0b11111));
append(buf, tmp, buflen);
break;
+ case 'j':
+ snprintf(tmp, sizeof(tmp), "%d", dec->imm1);
+ append(buf, tmp, buflen);
+ break;
case 'o':
snprintf(tmp, sizeof(tmp), "%d", dec->imm);
append(buf, tmp, buflen);
dec->pc + dec->imm);
append(buf, tmp, buflen);
break;
+ case 'U':
+ fmt++;
+ snprintf(tmp, sizeof(tmp), "%d", dec->imm >> 12);
+ append(buf, tmp, buflen);
+ if (*fmt == 'o') {
+ while (strlen(buf) < tab * 2) {
+ append(buf, " ", buflen);
+ }
+ snprintf(tmp, sizeof(tmp), "# 0x%" PRIx64,
+ dec->pc + dec->imm);
+ append(buf, tmp, buflen);
+ }
+ break;
case 'c': {
const char *name = csr_name(dec->imm & 0xfff);
if (name) {
append(buf, tmp, buflen);
break;
}
+ case 'h':
+ append(buf, rv_fli_name_const[dec->imm], buflen);
+ break;
default:
break;
}
static void decode_inst_lift_pseudo(rv_decode *dec)
{
+ const rv_opcode_data *opcode_data = dec->opcode_data;
const rv_comp_data *comp_data = opcode_data[dec->op].pseudo;
if (!comp_data) {
return;
static void decode_inst_decompress_rv32(rv_decode *dec)
{
+ const rv_opcode_data *opcode_data = dec->opcode_data;
int decomp_op = opcode_data[dec->op].decomp_rv32;
if (decomp_op != rv_op_illegal) {
if ((opcode_data[dec->op].decomp_data & rvcd_imm_nz)
static void decode_inst_decompress_rv64(rv_decode *dec)
{
+ const rv_opcode_data *opcode_data = dec->opcode_data;
int decomp_op = opcode_data[dec->op].decomp_rv64;
if (decomp_op != rv_op_illegal) {
if ((opcode_data[dec->op].decomp_data & rvcd_imm_nz)
static void decode_inst_decompress_rv128(rv_decode *dec)
{
+ const rv_opcode_data *opcode_data = dec->opcode_data;
int decomp_op = opcode_data[dec->op].decomp_rv128;
if (decomp_op != rv_op_illegal) {
if ((opcode_data[dec->op].decomp_data & rvcd_imm_nz)
dec.pc = pc;
dec.inst = inst;
dec.cfg = cfg;
- decode_inst_opcode(&dec, isa);
+
+ static const struct {
+ bool (*guard_func)(const RISCVCPUConfig *);
+ const rv_opcode_data *opcode_data;
+ void (*decode_func)(rv_decode *, rv_isa);
+ } decoders[] = {
+ { always_true_p, rvi_opcode_data, decode_inst_opcode },
+ { has_xtheadba_p, xthead_opcode_data, decode_xtheadba },
+ { has_xtheadbb_p, xthead_opcode_data, decode_xtheadbb },
+ { has_xtheadbs_p, xthead_opcode_data, decode_xtheadbs },
+ { has_xtheadcmo_p, xthead_opcode_data, decode_xtheadcmo },
+ { has_xtheadcondmov_p, xthead_opcode_data, decode_xtheadcondmov },
+ { has_xtheadfmemidx_p, xthead_opcode_data, decode_xtheadfmemidx },
+ { has_xtheadfmv_p, xthead_opcode_data, decode_xtheadfmv },
+ { has_xtheadmac_p, xthead_opcode_data, decode_xtheadmac },
+ { has_xtheadmemidx_p, xthead_opcode_data, decode_xtheadmemidx },
+ { has_xtheadmempair_p, xthead_opcode_data, decode_xtheadmempair },
+ { has_xtheadsync_p, xthead_opcode_data, decode_xtheadsync },
+ { has_XVentanaCondOps_p, ventana_opcode_data, decode_xventanacondops },
+ };
+
+ for (size_t i = 0; i < ARRAY_SIZE(decoders); i++) {
+ bool (*guard_func)(const RISCVCPUConfig *) = decoders[i].guard_func;
+ const rv_opcode_data *opcode_data = decoders[i].opcode_data;
+ void (*decode_func)(rv_decode *, rv_isa) = decoders[i].decode_func;
+
+ if (guard_func(cfg)) {
+ dec.opcode_data = opcode_data;
+ decode_func(&dec, isa);
+ if (dec.op != rv_op_illegal)
+ break;
+ }
+ }
+
+ if (dec.op == rv_op_illegal) {
+ dec.opcode_data = rvi_opcode_data;
+ }
+
decode_inst_operands(&dec, isa);
decode_inst_decompress(&dec, isa);
decode_inst_lift_pseudo(&dec);
--- /dev/null
+/*
+ * QEMU disassembler -- RISC-V specific header.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef DISAS_RISCV_H
+#define DISAS_RISCV_H
+
+#include "qemu/osdep.h"
+#include "target/riscv/cpu_cfg.h"
+
+/* types */
+
+typedef uint64_t rv_inst;
+typedef uint16_t rv_opcode;
+
+/* enums */
+
+typedef enum {
+ rv32,
+ rv64,
+ rv128
+} rv_isa;
+
+typedef enum {
+ rv_rm_rne = 0,
+ rv_rm_rtz = 1,
+ rv_rm_rdn = 2,
+ rv_rm_rup = 3,
+ rv_rm_rmm = 4,
+ rv_rm_dyn = 7,
+} rv_rm;
+
+typedef enum {
+ rv_fence_i = 8,
+ rv_fence_o = 4,
+ rv_fence_r = 2,
+ rv_fence_w = 1,
+} rv_fence;
+
+typedef enum {
+ rv_ireg_zero,
+ rv_ireg_ra,
+ rv_ireg_sp,
+ rv_ireg_gp,
+ rv_ireg_tp,
+ rv_ireg_t0,
+ rv_ireg_t1,
+ rv_ireg_t2,
+ rv_ireg_s0,
+ rv_ireg_s1,
+ rv_ireg_a0,
+ rv_ireg_a1,
+ rv_ireg_a2,
+ rv_ireg_a3,
+ rv_ireg_a4,
+ rv_ireg_a5,
+ rv_ireg_a6,
+ rv_ireg_a7,
+ rv_ireg_s2,
+ rv_ireg_s3,
+ rv_ireg_s4,
+ rv_ireg_s5,
+ rv_ireg_s6,
+ rv_ireg_s7,
+ rv_ireg_s8,
+ rv_ireg_s9,
+ rv_ireg_s10,
+ rv_ireg_s11,
+ rv_ireg_t3,
+ rv_ireg_t4,
+ rv_ireg_t5,
+ rv_ireg_t6,
+} rv_ireg;
+
+typedef enum {
+ rvc_end,
+ rvc_rd_eq_ra,
+ rvc_rd_eq_x0,
+ rvc_rs1_eq_x0,
+ rvc_rs2_eq_x0,
+ rvc_rs2_eq_rs1,
+ rvc_rs1_eq_ra,
+ rvc_imm_eq_zero,
+ rvc_imm_eq_n1,
+ rvc_imm_eq_p1,
+ rvc_csr_eq_0x001,
+ rvc_csr_eq_0x002,
+ rvc_csr_eq_0x003,
+ rvc_csr_eq_0xc00,
+ rvc_csr_eq_0xc01,
+ rvc_csr_eq_0xc02,
+ rvc_csr_eq_0xc80,
+ rvc_csr_eq_0xc81,
+ rvc_csr_eq_0xc82,
+} rvc_constraint;
+
+typedef enum {
+ rv_codec_illegal,
+ rv_codec_none,
+ rv_codec_u,
+ rv_codec_uj,
+ rv_codec_i,
+ rv_codec_i_sh5,
+ rv_codec_i_sh6,
+ rv_codec_i_sh7,
+ rv_codec_i_csr,
+ rv_codec_s,
+ rv_codec_sb,
+ rv_codec_r,
+ rv_codec_r_m,
+ rv_codec_r4_m,
+ rv_codec_r_a,
+ rv_codec_r_l,
+ rv_codec_r_f,
+ rv_codec_cb,
+ rv_codec_cb_imm,
+ rv_codec_cb_sh5,
+ rv_codec_cb_sh6,
+ rv_codec_ci,
+ rv_codec_ci_sh5,
+ rv_codec_ci_sh6,
+ rv_codec_ci_16sp,
+ rv_codec_ci_lwsp,
+ rv_codec_ci_ldsp,
+ rv_codec_ci_lqsp,
+ rv_codec_ci_li,
+ rv_codec_ci_lui,
+ rv_codec_ci_none,
+ rv_codec_ciw_4spn,
+ rv_codec_cj,
+ rv_codec_cj_jal,
+ rv_codec_cl_lw,
+ rv_codec_cl_ld,
+ rv_codec_cl_lq,
+ rv_codec_cr,
+ rv_codec_cr_mv,
+ rv_codec_cr_jalr,
+ rv_codec_cr_jr,
+ rv_codec_cs,
+ rv_codec_cs_sw,
+ rv_codec_cs_sd,
+ rv_codec_cs_sq,
+ rv_codec_css_swsp,
+ rv_codec_css_sdsp,
+ rv_codec_css_sqsp,
+ rv_codec_k_bs,
+ rv_codec_k_rnum,
+ rv_codec_v_r,
+ rv_codec_v_ldst,
+ rv_codec_v_i,
+ rv_codec_vsetvli,
+ rv_codec_vsetivli,
+ rv_codec_zcb_ext,
+ rv_codec_zcb_mul,
+ rv_codec_zcb_lb,
+ rv_codec_zcb_lh,
+ rv_codec_zcmp_cm_pushpop,
+ rv_codec_zcmp_cm_mv,
+ rv_codec_zcmt_jt,
+ rv_codec_r2_imm5,
+ rv_codec_r2,
+ rv_codec_r2_imm6,
+ rv_codec_r_imm2,
+ rv_codec_r2_immhl,
+ rv_codec_r2_imm2_imm5,
+ rv_codec_fli,
+} rv_codec;
+
+/* structures */
+
+typedef struct {
+ const int op;
+ const rvc_constraint *constraints;
+} rv_comp_data;
+
+typedef struct {
+ const char * const name;
+ const rv_codec codec;
+ const char * const format;
+ const rv_comp_data *pseudo;
+ const short decomp_rv32;
+ const short decomp_rv64;
+ const short decomp_rv128;
+ const short decomp_data;
+} rv_opcode_data;
+
+typedef struct {
+ RISCVCPUConfig *cfg;
+ uint64_t pc;
+ uint64_t inst;
+ const rv_opcode_data *opcode_data;
+ int32_t imm;
+ int32_t imm1;
+ uint16_t op;
+ uint8_t codec;
+ uint8_t rd;
+ uint8_t rs1;
+ uint8_t rs2;
+ uint8_t rs3;
+ uint8_t rm;
+ uint8_t pred;
+ uint8_t succ;
+ uint8_t aq;
+ uint8_t rl;
+ uint8_t bs;
+ uint8_t rnum;
+ uint8_t vm;
+ uint32_t vzimm;
+ uint8_t rlist;
+} rv_decode;
+
+enum {
+ rv_op_illegal = 0
+};
+
+enum {
+ rvcd_imm_nz = 0x1
+};
+
+/* instruction formats */
+
+#define rv_fmt_none "O\t"
+#define rv_fmt_rs1 "O\t1"
+#define rv_fmt_offset "O\to"
+#define rv_fmt_pred_succ "O\tp,s"
+#define rv_fmt_rs1_rs2 "O\t1,2"
+#define rv_fmt_rd_imm "O\t0,i"
+#define rv_fmt_rd_uimm "O\t0,Ui"
+#define rv_fmt_rd_offset "O\t0,o"
+#define rv_fmt_rd_uoffset "O\t0,Uo"
+#define rv_fmt_rd_rs1_rs2 "O\t0,1,2"
+#define rv_fmt_frd_rs1 "O\t3,1"
+#define rv_fmt_frd_rs1_rs2 "O\t3,1,2"
+#define rv_fmt_frd_frs1 "O\t3,4"
+#define rv_fmt_rd_frs1 "O\t0,4"
+#define rv_fmt_rd_frs1_frs2 "O\t0,4,5"
+#define rv_fmt_frd_frs1_frs2 "O\t3,4,5"
+#define rv_fmt_rm_frd_frs1 "O\tr,3,4"
+#define rv_fmt_rm_frd_rs1 "O\tr,3,1"
+#define rv_fmt_rm_rd_frs1 "O\tr,0,4"
+#define rv_fmt_rm_frd_frs1_frs2 "O\tr,3,4,5"
+#define rv_fmt_rm_frd_frs1_frs2_frs3 "O\tr,3,4,5,6"
+#define rv_fmt_rd_rs1_imm "O\t0,1,i"
+#define rv_fmt_rd_rs1_offset "O\t0,1,i"
+#define rv_fmt_rd_offset_rs1 "O\t0,i(1)"
+#define rv_fmt_frd_offset_rs1 "O\t3,i(1)"
+#define rv_fmt_rd_csr_rs1 "O\t0,c,1"
+#define rv_fmt_rd_csr_zimm "O\t0,c,7"
+#define rv_fmt_rs2_offset_rs1 "O\t2,i(1)"
+#define rv_fmt_frs2_offset_rs1 "O\t5,i(1)"
+#define rv_fmt_rs1_rs2_offset "O\t1,2,o"
+#define rv_fmt_rs2_rs1_offset "O\t2,1,o"
+#define rv_fmt_aqrl_rd_rs2_rs1 "OAR\t0,2,(1)"
+#define rv_fmt_aqrl_rd_rs1 "OAR\t0,(1)"
+#define rv_fmt_rd "O\t0"
+#define rv_fmt_rd_zimm "O\t0,7"
+#define rv_fmt_rd_rs1 "O\t0,1"
+#define rv_fmt_rd_rs2 "O\t0,2"
+#define rv_fmt_rs1_offset "O\t1,o"
+#define rv_fmt_rs2_offset "O\t2,o"
+#define rv_fmt_rs1_rs2_bs "O\t1,2,b"
+#define rv_fmt_rd_rs1_rnum "O\t0,1,n"
+#define rv_fmt_ldst_vd_rs1_vm "O\tD,(1)m"
+#define rv_fmt_ldst_vd_rs1_rs2_vm "O\tD,(1),2m"
+#define rv_fmt_ldst_vd_rs1_vs2_vm "O\tD,(1),Fm"
+#define rv_fmt_vd_vs2_vs1 "O\tD,F,E"
+#define rv_fmt_vd_vs2_vs1_vl "O\tD,F,El"
+#define rv_fmt_vd_vs2_vs1_vm "O\tD,F,Em"
+#define rv_fmt_vd_vs2_rs1_vl "O\tD,F,1l"
+#define rv_fmt_vd_vs2_fs1_vl "O\tD,F,4l"
+#define rv_fmt_vd_vs2_rs1_vm "O\tD,F,1m"
+#define rv_fmt_vd_vs2_fs1_vm "O\tD,F,4m"
+#define rv_fmt_vd_vs2_imm_vl "O\tD,F,il"
+#define rv_fmt_vd_vs2_imm_vm "O\tD,F,im"
+#define rv_fmt_vd_vs2_uimm_vm "O\tD,F,um"
+#define rv_fmt_vd_vs1_vs2_vm "O\tD,E,Fm"
+#define rv_fmt_vd_rs1_vs2_vm "O\tD,1,Fm"
+#define rv_fmt_vd_fs1_vs2_vm "O\tD,4,Fm"
+#define rv_fmt_vd_vs1 "O\tD,E"
+#define rv_fmt_vd_rs1 "O\tD,1"
+#define rv_fmt_vd_fs1 "O\tD,4"
+#define rv_fmt_vd_imm "O\tD,i"
+#define rv_fmt_vd_vs2 "O\tD,F"
+#define rv_fmt_vd_vs2_vm "O\tD,Fm"
+#define rv_fmt_rd_vs2_vm "O\t0,Fm"
+#define rv_fmt_rd_vs2 "O\t0,F"
+#define rv_fmt_fd_vs2 "O\t3,F"
+#define rv_fmt_vd_vm "O\tDm"
+#define rv_fmt_vsetvli "O\t0,1,v"
+#define rv_fmt_vsetivli "O\t0,u,v"
+#define rv_fmt_rs1_rs2_zce_ldst "O\t2,i(1)"
+#define rv_fmt_push_rlist "O\tx,-i"
+#define rv_fmt_pop_rlist "O\tx,i"
+#define rv_fmt_zcmt_index "O\ti"
+#define rv_fmt_rd_rs1_rs2_imm "O\t0,1,2,i"
+#define rv_fmt_frd_rs1_rs2_imm "O\t3,1,2,i"
+#define rv_fmt_rd_rs1_immh_imml "O\t0,1,i,j"
+#define rv_fmt_rd_rs1_immh_imml_addr "O\t0,(1),i,j"
+#define rv_fmt_rd2_imm "O\t0,2,(1),i"
+#define rv_fmt_fli "O\t3,h"
+
+#endif /* DISAS_RISCV_H */
Use ``-run-with async-teardown=on`` instead.
+``-chroot`` (since 8.1)
+'''''''''''''''''''''''
+
+Use ``-run-with chroot=dir`` instead.
+
``-singlestep`` (since 8.1)
'''''''''''''''''''''''''''
.. code-block:: c
- int somefunc(void) {
+ int somefunc(void)
+ {
int ret = -1;
char *foo = g_strdup_printf("foo%", "wibble");
GList *bar = .....
.. code-block:: c
- int somefunc(void) {
+ int somefunc(void)
+ {
g_autofree char *foo = g_strdup_printf("foo%", "wibble");
g_autoptr (GList) bar = .....
.. code-block:: c
- char *somefunc(void) {
+ char *somefunc(void)
+ {
g_autofree char *foo = g_strdup_printf("foo%", "wibble");
g_autoptr (GList) bar = .....
def test(self):
do_something()
+QEMU_TEST_FLAKY_TESTS
+^^^^^^^^^^^^^^^^^^^^^
+Some tests are not working reliably and thus are disabled by default.
+Set this environment variable to enable them.
+
Uninstalling Avocado
~~~~~~~~~~~~~~~~~~~~
Comma-separated list of RPCs to disable (no spaces, use ``help`` to
list available RPCs).
+.. option:: -a, --allow-rpcs=LIST
+
+ Comma-separated list of RPCs to enable (no spaces, use ``help`` to
+ list available RPCs).
+
.. option:: -D, --dump-conf
Dump the configuration in a format compatible with ``qemu-ga.conf``
:fourcc: ``i32``, the DMABUF fourcc
+VhostUserGpuEdidRequest
+^^^^^^^^^^^^^^^^^^^^^^^
+
++------------+
+| scanout-id |
++------------+
+
+:scanout-id: ``u32``, the scanout to get edid from
+
+
C structure
-----------
VhostUserGpuScanout scanout;
VhostUserGpuUpdate update;
VhostUserGpuDMABUFScanout dmabuf_scanout;
+ VhostUserGpuEdidRequest edid_req;
+ struct virtio_gpu_resp_edid resp_edid;
struct virtio_gpu_resp_display_info display_info;
uint64_t u64;
} payload;
Protocol features
-----------------
-None yet.
+.. code:: c
+
+ #define VHOST_USER_GPU_PROTOCOL_F_EDID 0
-As the protocol may need to evolve, new messages and communication
-changes are negotiated thanks to preliminary
+New messages and communication changes are negotiated thanks to the
``VHOST_USER_GPU_GET_PROTOCOL_FEATURES`` and
``VHOST_USER_GPU_SET_PROTOCOL_FEATURES`` requests.
Note: there is no data payload, since the scanout is shared thanks
to DMABUF, that must have been set previously with
``VHOST_USER_GPU_DMABUF_SCANOUT``.
+
+``VHOST_USER_GPU_GET_EDID``
+ :id: 11
+ :request payload: ``struct VhostUserGpuEdidRequest``
+ :reply payload: ``struct virtio_gpu_resp_edid`` (from virtio specification)
+
+ Retrieve the EDID data for a given scanout.
+ This message requires the ``VHOST_USER_GPU_PROTOCOL_F_EDID`` protocol
+ feature to be supported.
...
int ret = pcie_endpoint_cap_init(d, 0x70);
...
- pcie_ari_init(d, 0x100, 1);
+ pcie_ari_init(d, 0x100);
...
/* Add and initialize the SR/IOV capability */
...
int ret = pcie_endpoint_cap_init(d, 0x60);
...
- pcie_ari_init(d, 0x100, 1);
+ pcie_ari_init(d, 0x100);
...
memory_region_init(mr, ... )
pcie_sriov_vf_register_bar(d, bar_nr, mr);
When this option is "on", ACLINT devices will be emulated instead of
SiFive CLINT. When not specified, this option is assumed to be "off".
+ This option is restricted to the TCG accelerator.
- aia=[none|aplic|aplic-imsic]
When using the ``sifive_u`` or ``virt`` machine there are three different
firmware boot options:
-1. ``-bios default`` - This is the default behaviour if no -bios option
-is included. This option will load the default OpenSBI firmware automatically.
-The firmware is included with the QEMU release and no user interaction is
-required. All a user needs to do is specify the kernel they want to boot
-with the -kernel option
-2. ``-bios none`` - QEMU will not automatically load any firmware. It is up
-to the user to load all the images they need.
-3. ``-bios <file>`` - Tells QEMU to load the specified file as the firmware.
+
+* ``-bios default``
+
+This is the default behaviour if no ``-bios`` option is included. This option
+will load the default OpenSBI firmware automatically. The firmware is included
+with the QEMU release and no user interaction is required. All a user needs to
+do is specify the kernel they want to boot with the ``-kernel`` option
+
+* ``-bios none``
+
+QEMU will not automatically load any firmware. It is up to the user to load all
+the images they need.
+
+* ``-bios <file>``
+
+Tells QEMU to load the specified file as the firmware.
hwaddr base = sbsa_ref_memmap[SBSA_XHCI].base;
int irq = sbsa_ref_irqmap[SBSA_XHCI];
DeviceState *dev = qdev_new(TYPE_XHCI_SYSBUS);
+ qdev_prop_set_uint32(dev, "slots", XHCI_MAXSLOTS);
sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, base);
#include "hw/arm/smmuv3.h"
#include "hw/acpi/acpi.h"
#include "target/arm/internals.h"
-#include "hw/mem/memory-device.h"
#include "hw/mem/pc-dimm.h"
#include "hw/mem/nvdimm.h"
#include "hw/acpi/generic_event_device.h"
-#include "hw/virtio/virtio-mem-pci.h"
+#include "hw/virtio/virtio-md-pci.h"
#include "hw/virtio/virtio-iommu.h"
#include "hw/char/pl011.h"
#include "qemu/guest-random.h"
virt_set_high_memmap(vms, base, pa_bits);
if (device_memory_size > 0) {
- ms->device_memory = g_malloc0(sizeof(*ms->device_memory));
- ms->device_memory->base = device_memory_base;
- memory_region_init(&ms->device_memory->mr, OBJECT(vms),
- "device-memory", device_memory_size);
+ machine_memory_devices_init(ms, device_memory_base, device_memory_size);
}
}
memory_region_add_subregion(sysmem, vms->memmap[VIRT_MEM].base,
machine->ram);
- if (machine->device_memory) {
- memory_region_add_subregion(sysmem, machine->device_memory->base,
- &machine->device_memory->mr);
- }
virt_flash_fdt(vms, sysmem, secure_sysmem ?: sysmem);
dev, &error_abort);
}
-static void virt_virtio_md_pci_pre_plug(HotplugHandler *hotplug_dev,
- DeviceState *dev, Error **errp)
-{
- HotplugHandler *hotplug_dev2 = qdev_get_bus_hotplug_handler(dev);
- Error *local_err = NULL;
-
- if (!hotplug_dev2 && dev->hotplugged) {
- /*
- * Without a bus hotplug handler, we cannot control the plug/unplug
- * order. We should never reach this point when hotplugging on ARM.
- * However, it's nice to add a safety net, similar to what we have
- * on x86.
- */
- error_setg(errp, "hotplug of virtio based memory devices not supported"
- " on this bus.");
- return;
- }
- /*
- * First, see if we can plug this memory device at all. If that
- * succeeds, branch of to the actual hotplug handler.
- */
- memory_device_pre_plug(MEMORY_DEVICE(dev), MACHINE(hotplug_dev), NULL,
- &local_err);
- if (!local_err && hotplug_dev2) {
- hotplug_handler_pre_plug(hotplug_dev2, dev, &local_err);
- }
- error_propagate(errp, local_err);
-}
-
-static void virt_virtio_md_pci_plug(HotplugHandler *hotplug_dev,
- DeviceState *dev, Error **errp)
-{
- HotplugHandler *hotplug_dev2 = qdev_get_bus_hotplug_handler(dev);
- Error *local_err = NULL;
-
- /*
- * Plug the memory device first and then branch off to the actual
- * hotplug handler. If that one fails, we can easily undo the memory
- * device bits.
- */
- memory_device_plug(MEMORY_DEVICE(dev), MACHINE(hotplug_dev));
- if (hotplug_dev2) {
- hotplug_handler_plug(hotplug_dev2, dev, &local_err);
- if (local_err) {
- memory_device_unplug(MEMORY_DEVICE(dev), MACHINE(hotplug_dev));
- }
- }
- error_propagate(errp, local_err);
-}
-
-static void virt_virtio_md_pci_unplug_request(HotplugHandler *hotplug_dev,
- DeviceState *dev, Error **errp)
-{
- /* We don't support hot unplug of virtio based memory devices */
- error_setg(errp, "virtio based memory devices cannot be unplugged.");
-}
-
-
static void virt_machine_device_pre_plug_cb(HotplugHandler *hotplug_dev,
DeviceState *dev, Error **errp)
{
if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) {
virt_memory_pre_plug(hotplug_dev, dev, errp);
- } else if (object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_MEM_PCI)) {
- virt_virtio_md_pci_pre_plug(hotplug_dev, dev, errp);
+ } else if (object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_MD_PCI)) {
+ virtio_md_pci_pre_plug(VIRTIO_MD_PCI(dev), MACHINE(hotplug_dev), errp);
} else if (object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_IOMMU_PCI)) {
hwaddr db_start = 0, db_end = 0;
char *resv_prop_str;
SYS_BUS_DEVICE(dev));
}
}
+
if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) {
virt_memory_plug(hotplug_dev, dev, errp);
- }
-
- if (object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_MEM_PCI)) {
- virt_virtio_md_pci_plug(hotplug_dev, dev, errp);
+ } else if (object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_MD_PCI)) {
+ virtio_md_pci_plug(VIRTIO_MD_PCI(dev), MACHINE(hotplug_dev), errp);
}
if (object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_IOMMU_PCI)) {
{
if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) {
virt_dimm_unplug_request(hotplug_dev, dev, errp);
- } else if (object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_MEM_PCI)) {
- virt_virtio_md_pci_unplug_request(hotplug_dev, dev, errp);
+ } else if (object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_MD_PCI)) {
+ virtio_md_pci_unplug_request(VIRTIO_MD_PCI(dev), MACHINE(hotplug_dev),
+ errp);
} else {
error_setg(errp, "device unplug request for unsupported device"
" type: %s", object_get_typename(OBJECT(dev)));
{
if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) {
virt_dimm_unplug(hotplug_dev, dev, errp);
+ } else if (object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_MD_PCI)) {
+ virtio_md_pci_unplug(VIRTIO_MD_PCI(dev), MACHINE(hotplug_dev), errp);
} else {
error_setg(errp, "virt: device unplug for unsupported device"
" type: %s", object_get_typename(OBJECT(dev)));
if (device_is_dynamic_sysbus(mc, dev) ||
object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM) ||
- object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_MEM_PCI) ||
+ object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_MD_PCI) ||
object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_IOMMU_PCI)) {
return HOTPLUG_HANDLER(machine);
}
memory_region_transaction_commit();
- /*
- * These fields are visible to the IOThread so we rely on implicit barriers
- * in aio_context_acquire() on the write side and aio_notify_accept() on
- * the read side.
- */
- s->starting = false;
- vblk->dataplane_started = true;
trace_virtio_blk_data_plane_start(s);
old_context = blk_get_aio_context(s->conf->conf.blk);
event_notifier_set(virtio_queue_get_host_notifier(vq));
}
+ /*
+ * These fields must be visible to the IOThread when it processes the
+ * virtqueue, otherwise it will think dataplane has not started yet.
+ *
+ * Make sure ->dataplane_started is false when blk_set_aio_context() is
+ * called above so that draining does not cause the host notifier to be
+ * detached/attached prematurely.
+ */
+ s->starting = false;
+ vblk->dataplane_started = true;
+ smp_wmb(); /* paired with aio_notify_accept() on the read side */
+
/* Get this show started by hooking up our callbacks */
if (!blk_in_drain(s->conf->conf.blk)) {
aio_context_acquire(s->ctx);
fail_guest_notifiers:
vblk->dataplane_disabled = true;
s->starting = false;
- vblk->dataplane_started = true;
return -ENOSYS;
}
aio_wait_bh_oneshot(s->ctx, virtio_blk_data_plane_stop_bh, s);
}
- aio_context_acquire(s->ctx);
-
- /* Wait for virtio_blk_dma_restart_bh() and in flight I/O to complete */
- blk_drain(s->conf->conf.blk);
-
- /*
- * Try to switch bs back to the QEMU main loop. If other users keep the
- * BlockBackend in the iothread, that's ok
- */
- blk_set_aio_context(s->conf->conf.blk, qemu_get_aio_context(), NULL);
-
- aio_context_release(s->ctx);
-
/*
* Batch all the host notifiers in a single transaction to avoid
* quadratic time complexity in address_space_update_ioeventfds().
virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus), i);
}
+ /*
+ * Set ->dataplane_started to false before draining so that host notifiers
+ * are not detached/attached anymore.
+ */
+ vblk->dataplane_started = false;
+
+ aio_context_acquire(s->ctx);
+
+ /* Wait for virtio_blk_dma_restart_bh() and in flight I/O to complete */
+ blk_drain(s->conf->conf.blk);
+
+ /*
+ * Try to switch bs back to the QEMU main loop. If other users keep the
+ * BlockBackend in the iothread, that's ok
+ */
+ blk_set_aio_context(s->conf->conf.blk, qemu_get_aio_context(), NULL);
+
+ aio_context_release(s->ctx);
+
qemu_bh_cancel(s->bh);
notify_guest_bh(s); /* final chance to notify guest */
/* Clean up guest notifier (irq) */
k->set_guest_notifiers(qbus->parent, nvqs, false);
- vblk->dataplane_started = false;
s->stopping = false;
}
return;
}
}
+
+unsigned int machine_topo_get_cores_per_socket(const MachineState *ms)
+{
+ return ms->smp.cores * ms->smp.clusters * ms->smp.dies;
+}
+
+unsigned int machine_topo_get_threads_per_socket(const MachineState *ms)
+{
+ return ms->smp.threads * machine_topo_get_cores_per_socket(ms);
+}
GlobalProperty hw_compat_8_0[] = {
{ "migration", "multifd-flush-after-each-section", "on"},
+ { TYPE_PCI_DEVICE, "x-pcie-ari-nextfn-1", "on" },
};
const size_t hw_compat_8_0_len = G_N_ELEMENTS(hw_compat_8_0);
VHOST_USER_GPU_UPDATE,
VHOST_USER_GPU_DMABUF_SCANOUT,
VHOST_USER_GPU_DMABUF_UPDATE,
+ VHOST_USER_GPU_GET_EDID,
} VhostUserGpuRequest;
typedef struct VhostUserGpuDisplayInfoReply {
int fd_drm_fourcc;
} QEMU_PACKED VhostUserGpuDMABUFScanout;
+typedef struct VhostUserGpuEdidRequest {
+ uint32_t scanout_id;
+} QEMU_PACKED VhostUserGpuEdidRequest;
+
typedef struct VhostUserGpuMsg {
uint32_t request; /* VhostUserGpuRequest */
uint32_t flags;
VhostUserGpuScanout scanout;
VhostUserGpuUpdate update;
VhostUserGpuDMABUFScanout dmabuf_scanout;
+ VhostUserGpuEdidRequest edid_req;
+ struct virtio_gpu_resp_edid resp_edid;
struct virtio_gpu_resp_display_info display_info;
uint64_t u64;
} payload;
#define VHOST_USER_GPU_MSG_FLAG_REPLY 0x4
+#define VHOST_USER_GPU_PROTOCOL_F_EDID 0
+
static void vhost_user_gpu_update_blocked(VhostUserGPU *g, bool blocked);
static void
.request = msg->request,
.flags = VHOST_USER_GPU_MSG_FLAG_REPLY,
.size = sizeof(uint64_t),
+ .payload = {
+ .u64 = (1 << VHOST_USER_GPU_PROTOCOL_F_EDID)
+ }
};
vhost_user_gpu_send_msg(g, &reply);
vhost_user_gpu_send_msg(g, &reply);
break;
}
+ case VHOST_USER_GPU_GET_EDID: {
+ VhostUserGpuEdidRequest *m = &msg->payload.edid_req;
+ struct virtio_gpu_resp_edid resp = { {} };
+ VhostUserGpuMsg reply = {
+ .request = msg->request,
+ .flags = VHOST_USER_GPU_MSG_FLAG_REPLY,
+ .size = sizeof(reply.payload.resp_edid),
+ };
+
+ if (m->scanout_id >= g->parent_obj.conf.max_outputs) {
+ error_report("invalid scanout: %d", m->scanout_id);
+ break;
+ }
+
+ resp.hdr.type = VIRTIO_GPU_RESP_OK_EDID;
+ virtio_gpu_base_generate_edid(VIRTIO_GPU_BASE(g), m->scanout_id, &resp);
+ memcpy(&reply.payload.resp_edid, &resp, sizeof(resp));
+ vhost_user_gpu_send_msg(g, &reply);
+ break;
+ }
case VHOST_USER_GPU_SCANOUT: {
VhostUserGpuScanout *m = &msg->payload.scanout;
/*
* Add the check for configure interrupt, Use VIRTIO_CONFIG_IRQ_IDX -1
- * as the Marco of configure interrupt's IDX, If this driver does not
+ * as the macro of configure interrupt's IDX, If this driver does not
* support, the function will return
*/
/*
* Add the check for configure interrupt, Use VIRTIO_CONFIG_IRQ_IDX -1
- * as the Marco of configure interrupt's IDX, If this driver does not
+ * as the macro of configure interrupt's IDX, If this driver does not
* support, the function will return
*/
#include "migration/blocker.h"
#include "qapi/error.h"
#include "qemu/error-report.h"
+#include "hw/display/edid.h"
#include "trace.h"
void
}
}
+void
+virtio_gpu_base_generate_edid(VirtIOGPUBase *g, int scanout,
+ struct virtio_gpu_resp_edid *edid)
+{
+ qemu_edid_info info = {
+ .width_mm = g->req_state[scanout].width_mm,
+ .height_mm = g->req_state[scanout].height_mm,
+ .prefx = g->req_state[scanout].width,
+ .prefy = g->req_state[scanout].height,
+ .refresh_rate = g->req_state[scanout].refresh_rate,
+ };
+
+ edid->size = cpu_to_le32(sizeof(edid->edid));
+ qemu_edid_generate(edid->edid, sizeof(edid->edid), &info);
+}
+
static void virtio_gpu_invalidate_display(void *opaque)
{
}
}
dmabuf = g_new0(VGPUDMABuf, 1);
- dmabuf->buf.width = fb->width;
- dmabuf->buf.height = fb->height;
+ dmabuf->buf.width = r->width;
+ dmabuf->buf.height = r->height;
dmabuf->buf.stride = fb->stride;
dmabuf->buf.x = r->x;
dmabuf->buf.y = r->y;
- dmabuf->buf.scanout_width = r->width;
- dmabuf->buf.scanout_height = r->height;
+ dmabuf->buf.backing_width = fb->width;
+ dmabuf->buf.backing_height = fb->height;
dmabuf->buf.fourcc = qemu_pixman_to_drm_format(fb->format);
dmabuf->buf.fd = res->dmabuf_fd;
dmabuf->buf.allow_fences = true;
g->dmabuf.primary[scanout_id] = new_primary;
qemu_console_resize(scanout->con,
- new_primary->buf.scanout_width,
- new_primary->buf.scanout_height);
+ new_primary->buf.width,
+ new_primary->buf.height);
dpy_gl_scanout_dmabuf(scanout->con, &new_primary->buf);
if (old_primary) {
#include "hw/virtio/virtio-gpu-bswap.h"
#include "hw/virtio/virtio-gpu-pixman.h"
#include "hw/virtio/virtio-bus.h"
-#include "hw/display/edid.h"
#include "hw/qdev-properties.h"
#include "qemu/log.h"
#include "qemu/module.h"
sizeof(display_info));
}
-static void
-virtio_gpu_generate_edid(VirtIOGPU *g, int scanout,
- struct virtio_gpu_resp_edid *edid)
-{
- VirtIOGPUBase *b = VIRTIO_GPU_BASE(g);
- qemu_edid_info info = {
- .width_mm = b->req_state[scanout].width_mm,
- .height_mm = b->req_state[scanout].height_mm,
- .prefx = b->req_state[scanout].width,
- .prefy = b->req_state[scanout].height,
- .refresh_rate = b->req_state[scanout].refresh_rate,
- };
-
- edid->size = cpu_to_le32(sizeof(edid->edid));
- qemu_edid_generate(edid->edid, sizeof(edid->edid), &info);
-}
-
void virtio_gpu_get_edid(VirtIOGPU *g,
struct virtio_gpu_ctrl_command *cmd)
{
trace_virtio_gpu_cmd_get_edid(get_edid.scanout);
memset(&edid, 0, sizeof(edid));
edid.hdr.type = VIRTIO_GPU_RESP_OK_EDID;
- virtio_gpu_generate_edid(g, get_edid.scanout, &edid);
+ virtio_gpu_base_generate_edid(VIRTIO_GPU_BASE(g), get_edid.scanout, &edid);
virtio_gpu_ctrl_response(g, cmd, &edid.hdr, sizeof(edid));
}
goto end;
}
#endif
- res->image = pixman_image_create_bits(pformat,
- c2d.width,
- c2d.height,
- bits, res->hostmem / c2d.height);
+ res->image = pixman_image_create_bits(
+ pformat,
+ c2d.width,
+ c2d.height,
+ bits, c2d.height ? res->hostmem / c2d.height : 0);
#ifdef WIN32
if (res->image) {
pixman_image_set_destroy_function(res->image, win32_pixman_image_destroy, res->handle);
return -EINVAL;
}
#endif
- res->image = pixman_image_create_bits(pformat,
- res->width, res->height,
- bits, res->hostmem / res->height);
+ res->image = pixman_image_create_bits(
+ pformat,
+ res->width, res->height,
+ bits, res->height ? res->hostmem / res->height : 0);
if (!res->image) {
g_free(res);
return -EINVAL;
VirtIOGPU *g = VIRTIO_GPU(vdev);
struct virtio_gpu_simple_resource *res, *tmp;
struct virtio_gpu_ctrl_command *cmd;
+ int i = 0;
QTAILQ_FOREACH_SAFE(res, &g->reslist, next, tmp) {
virtio_gpu_resource_destroy(g, res);
g_free(cmd);
}
+ for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
+ dpy_gfx_replace_surface(g->parent_obj.scanout[i].con, NULL);
+ }
+
virtio_gpu_base_reset(VIRTIO_GPU_BASE(vdev));
}
MachineClass *mc = MACHINE_GET_CLASS(machine);
X86MachineState *x86ms = X86_MACHINE(machine);
const CPUArchIdList *apic_ids = mc->possible_cpu_arch_ids(machine);
- PCMachineState *pcms = PC_MACHINE(machine);
int nb_numa_nodes = machine->numa_state->num_nodes;
NodeInfo *numa_info = machine->numa_state->nodes;
- ram_addr_t hotpluggable_address_space_size =
- object_property_get_int(OBJECT(pcms), PC_MACHINE_DEVMEM_REGION_SIZE,
- NULL);
AcpiTable table = { .sig = "SRAT", .rev = 1, .oem_id = x86ms->oem_id,
.oem_table_id = x86ms->oem_table_id };
* Memory devices may override proximity set by this entry,
* providing _PXM method if necessary.
*/
- if (hotpluggable_address_space_size) {
+ if (machine->device_memory) {
build_srat_memory(table_data, machine->device_memory->base,
- hotpluggable_address_space_size, nb_numa_nodes - 1,
+ memory_region_size(&machine->device_memory->mr),
+ nb_numa_nodes - 1,
MEM_AFFINITY_HOTPLUGGABLE | MEM_AFFINITY_ENABLED);
}
#include "hw/net/ne2000-isa.h"
#include "standard-headers/asm-x86/bootparam.h"
#include "hw/virtio/virtio-iommu.h"
-#include "hw/virtio/virtio-pmem-pci.h"
-#include "hw/virtio/virtio-mem-pci.h"
+#include "hw/virtio/virtio-md-pci.h"
#include "hw/i386/kvm/xen_overlay.h"
#include "hw/i386/kvm/xen_evtchn.h"
#include "hw/i386/kvm/xen_gnttab.h"
#include "hw/i386/kvm/xen_xenstore.h"
-#include "hw/mem/memory-device.h"
#include "sysemu/replay.h"
#include "target/i386/cpu.h"
#include "e820_memory_layout.h"
exit(EXIT_FAILURE);
}
- /* always allocate the device memory information */
- machine->device_memory = g_malloc0(sizeof(*machine->device_memory));
-
/* initialize device memory address space */
if (pcmc->has_reserved_memory &&
(machine->ram_size < machine->maxram_size)) {
ram_addr_t device_mem_size;
+ hwaddr device_mem_base;
if (machine->ram_slots > ACPI_MAX_RAM_SLOTS) {
error_report("unsupported amount of memory slots: %"PRIu64,
exit(EXIT_FAILURE);
}
- pc_get_device_memory_range(pcms, &machine->device_memory->base, &device_mem_size);
+ pc_get_device_memory_range(pcms, &device_mem_base, &device_mem_size);
- if ((machine->device_memory->base + device_mem_size) <
- device_mem_size) {
+ if (device_mem_base + device_mem_size < device_mem_size) {
error_report("unsupported amount of maximum memory: " RAM_ADDR_FMT,
machine->maxram_size);
exit(EXIT_FAILURE);
}
-
- memory_region_init(&machine->device_memory->mr, OBJECT(pcms),
- "device-memory", device_mem_size);
- memory_region_add_subregion(system_memory, machine->device_memory->base,
- &machine->device_memory->mr);
+ machine_memory_devices_init(machine, device_mem_base, device_mem_size);
}
if (pcms->cxl_devices_state.is_enabled) {
rom_set_fw(fw_cfg);
- if (pcmc->has_reserved_memory && machine->device_memory->base) {
+ if (machine->device_memory) {
uint64_t *val = g_malloc(sizeof(*val));
PCMachineClass *pcmc = PC_MACHINE_GET_CLASS(pcms);
uint64_t res_mem_end = machine->device_memory->base;
error_propagate(errp, local_err);
}
-static void pc_virtio_md_pci_pre_plug(HotplugHandler *hotplug_dev,
- DeviceState *dev, Error **errp)
-{
- HotplugHandler *hotplug_dev2 = qdev_get_bus_hotplug_handler(dev);
- Error *local_err = NULL;
-
- if (!hotplug_dev2 && dev->hotplugged) {
- /*
- * Without a bus hotplug handler, we cannot control the plug/unplug
- * order. We should never reach this point when hotplugging on x86,
- * however, better add a safety net.
- */
- error_setg(errp, "hotplug of virtio based memory devices not supported"
- " on this bus.");
- return;
- }
- /*
- * First, see if we can plug this memory device at all. If that
- * succeeds, branch of to the actual hotplug handler.
- */
- memory_device_pre_plug(MEMORY_DEVICE(dev), MACHINE(hotplug_dev), NULL,
- &local_err);
- if (!local_err && hotplug_dev2) {
- hotplug_handler_pre_plug(hotplug_dev2, dev, &local_err);
- }
- error_propagate(errp, local_err);
-}
-
-static void pc_virtio_md_pci_plug(HotplugHandler *hotplug_dev,
- DeviceState *dev, Error **errp)
-{
- HotplugHandler *hotplug_dev2 = qdev_get_bus_hotplug_handler(dev);
- Error *local_err = NULL;
-
- /*
- * Plug the memory device first and then branch off to the actual
- * hotplug handler. If that one fails, we can easily undo the memory
- * device bits.
- */
- memory_device_plug(MEMORY_DEVICE(dev), MACHINE(hotplug_dev));
- if (hotplug_dev2) {
- hotplug_handler_plug(hotplug_dev2, dev, &local_err);
- if (local_err) {
- memory_device_unplug(MEMORY_DEVICE(dev), MACHINE(hotplug_dev));
- }
- }
- error_propagate(errp, local_err);
-}
-
-static void pc_virtio_md_pci_unplug_request(HotplugHandler *hotplug_dev,
- DeviceState *dev, Error **errp)
-{
- /* We don't support hot unplug of virtio based memory devices */
- error_setg(errp, "virtio based memory devices cannot be unplugged.");
-}
-
-static void pc_virtio_md_pci_unplug(HotplugHandler *hotplug_dev,
- DeviceState *dev, Error **errp)
-{
- /* We don't support hot unplug of virtio based memory devices */
-}
-
static void pc_machine_device_pre_plug_cb(HotplugHandler *hotplug_dev,
DeviceState *dev, Error **errp)
{
pc_memory_pre_plug(hotplug_dev, dev, errp);
} else if (object_dynamic_cast(OBJECT(dev), TYPE_CPU)) {
x86_cpu_pre_plug(hotplug_dev, dev, errp);
- } else if (object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_PMEM_PCI) ||
- object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_MEM_PCI)) {
- pc_virtio_md_pci_pre_plug(hotplug_dev, dev, errp);
+ } else if (object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_MD_PCI)) {
+ virtio_md_pci_pre_plug(VIRTIO_MD_PCI(dev), MACHINE(hotplug_dev), errp);
} else if (object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_IOMMU_PCI)) {
/* Declare the APIC range as the reserved MSI region */
char *resv_prop_str = g_strdup_printf("0xfee00000:0xfeefffff:%d",
pc_memory_plug(hotplug_dev, dev, errp);
} else if (object_dynamic_cast(OBJECT(dev), TYPE_CPU)) {
x86_cpu_plug(hotplug_dev, dev, errp);
- } else if (object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_PMEM_PCI) ||
- object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_MEM_PCI)) {
- pc_virtio_md_pci_plug(hotplug_dev, dev, errp);
+ } else if (object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_MD_PCI)) {
+ virtio_md_pci_plug(VIRTIO_MD_PCI(dev), MACHINE(hotplug_dev), errp);
}
}
pc_memory_unplug_request(hotplug_dev, dev, errp);
} else if (object_dynamic_cast(OBJECT(dev), TYPE_CPU)) {
x86_cpu_unplug_request_cb(hotplug_dev, dev, errp);
- } else if (object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_PMEM_PCI) ||
- object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_MEM_PCI)) {
- pc_virtio_md_pci_unplug_request(hotplug_dev, dev, errp);
+ } else if (object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_MD_PCI)) {
+ virtio_md_pci_unplug_request(VIRTIO_MD_PCI(dev), MACHINE(hotplug_dev),
+ errp);
} else {
error_setg(errp, "acpi: device unplug request for not supported device"
" type: %s", object_get_typename(OBJECT(dev)));
pc_memory_unplug(hotplug_dev, dev, errp);
} else if (object_dynamic_cast(OBJECT(dev), TYPE_CPU)) {
x86_cpu_unplug_cb(hotplug_dev, dev, errp);
- } else if (object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_PMEM_PCI) ||
- object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_MEM_PCI)) {
- pc_virtio_md_pci_unplug(hotplug_dev, dev, errp);
+ } else if (object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_MD_PCI)) {
+ virtio_md_pci_unplug(VIRTIO_MD_PCI(dev), MACHINE(hotplug_dev), errp);
} else {
error_setg(errp, "acpi: device unplug for not supported device"
" type: %s", object_get_typename(OBJECT(dev)));
{
if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM) ||
object_dynamic_cast(OBJECT(dev), TYPE_CPU) ||
- object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_PMEM_PCI) ||
- object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_MEM_PCI) ||
+ object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_MD_PCI) ||
object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_IOMMU_PCI) ||
object_dynamic_cast(OBJECT(dev), TYPE_X86_IOMMU_DEVICE)) {
return HOTPLUG_HANDLER(machine);
return NULL;
}
-static void
-pc_machine_get_device_memory_region_size(Object *obj, Visitor *v,
- const char *name, void *opaque,
- Error **errp)
-{
- MachineState *ms = MACHINE(obj);
- int64_t value = 0;
-
- if (ms->device_memory) {
- value = memory_region_size(&ms->device_memory->mr);
- }
-
- visit_type_int(v, name, &value, errp);
-}
-
static void pc_machine_get_vmport(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
object_class_property_set_description(oc, PC_MACHINE_MAX_RAM_BELOW_4G,
"Maximum ram below the 4G boundary (32bit boundary)");
- object_class_property_add(oc, PC_MACHINE_DEVMEM_REGION_SIZE, "int",
- pc_machine_get_device_memory_region_size, NULL,
- NULL, NULL);
-
object_class_property_add(oc, PC_MACHINE_VMPORT, "OnOffAuto",
pc_machine_get_vmport, pc_machine_set_vmport,
NULL, NULL);
X86MachineState *x86ms = X86_MACHINE(machine);
MemoryRegion *system_memory = get_system_memory();
MemoryRegion *system_io = get_system_io();
- PCIBus *pci_bus;
+ PCIBus *pci_bus = NULL;
ISABus *isa_bus;
int piix3_devfn = -1;
qemu_irq smi_irq;
BusState *idebus[MAX_IDE_BUS];
ISADevice *rtc_state;
MemoryRegion *ram_memory;
- MemoryRegion *pci_memory;
- MemoryRegion *rom_memory;
+ MemoryRegion *pci_memory = NULL;
+ MemoryRegion *rom_memory = system_memory;
ram_addr_t lowmem;
- uint64_t hole64_size;
- DeviceState *i440fx_host;
+ uint64_t hole64_size = 0;
/*
* Calculate ram split, for memory below and above 4G. It's a bit
}
if (pcmc->pci_enabled) {
+ Object *phb;
+
pci_memory = g_new(MemoryRegion, 1);
memory_region_init(pci_memory, NULL, "pci", UINT64_MAX);
rom_memory = pci_memory;
- i440fx_host = qdev_new(host_type);
- hole64_size = object_property_get_uint(OBJECT(i440fx_host),
+
+ phb = OBJECT(qdev_new(host_type));
+ object_property_add_child(OBJECT(machine), "i440fx", phb);
+ object_property_set_link(phb, PCI_HOST_PROP_RAM_MEM,
+ OBJECT(ram_memory), &error_fatal);
+ object_property_set_link(phb, PCI_HOST_PROP_PCI_MEM,
+ OBJECT(pci_memory), &error_fatal);
+ object_property_set_link(phb, PCI_HOST_PROP_SYSTEM_MEM,
+ OBJECT(system_memory), &error_fatal);
+ object_property_set_link(phb, PCI_HOST_PROP_IO_MEM,
+ OBJECT(system_io), &error_fatal);
+ object_property_set_uint(phb, PCI_HOST_BELOW_4G_MEM_SIZE,
+ x86ms->below_4g_mem_size, &error_fatal);
+ object_property_set_uint(phb, PCI_HOST_ABOVE_4G_MEM_SIZE,
+ x86ms->above_4g_mem_size, &error_fatal);
+ object_property_set_str(phb, I440FX_HOST_PROP_PCI_TYPE, pci_type,
+ &error_fatal);
+ sysbus_realize_and_unref(SYS_BUS_DEVICE(phb), &error_fatal);
+
+ pci_bus = PCI_BUS(qdev_get_child_bus(DEVICE(phb), "pci.0"));
+ pci_bus_map_irqs(pci_bus,
+ xen_enabled() ? xen_pci_slot_get_pirq
+ : pc_pci_slot_get_pirq);
+ pcms->bus = pci_bus;
+
+ hole64_size = object_property_get_uint(phb,
PCI_HOST_PROP_PCI_HOLE64_SIZE,
&error_abort);
- } else {
- pci_memory = NULL;
- rom_memory = system_memory;
- i440fx_host = NULL;
- hole64_size = 0;
}
pc_guest_info_init(pcms);
if (!xen_enabled()) {
pc_memory_init(pcms, system_memory, rom_memory, hole64_size);
} else {
+ assert(machine->ram_size == x86ms->below_4g_mem_size +
+ x86ms->above_4g_mem_size);
+
pc_system_flash_cleanup_unused(pcms);
if (machine->kernel_filename != NULL) {
/* For xen HVM direct kernel boot, load linux here */
PIIX3State *piix3;
PCIDevice *pci_dev;
- pci_bus = i440fx_init(pci_type,
- i440fx_host,
- system_memory, system_io, machine->ram_size,
- x86ms->below_4g_mem_size,
- x86ms->above_4g_mem_size,
- pci_memory, ram_memory);
- pci_bus_map_irqs(pci_bus,
- xen_enabled() ? xen_pci_slot_get_pirq
- : pc_pci_slot_get_pirq);
- pcms->bus = pci_bus;
-
- pci_dev = pci_create_simple_multifunction(pci_bus, -1, true,
- TYPE_PIIX3_DEVICE);
+ pci_dev = pci_create_simple_multifunction(pci_bus, -1, TYPE_PIIX3_DEVICE);
if (xen_enabled()) {
pci_device_set_intx_routing_notifier(
rtc_state = ISA_DEVICE(object_resolve_path_component(OBJECT(pci_dev),
"rtc"));
} else {
- pci_bus = NULL;
isa_bus = isa_bus_new(NULL, system_memory, system_io,
&error_abort);
return -1;
}
- ehci = pci_new_multifunction(PCI_DEVFN(slot, 7), true, name);
+ ehci = pci_new_multifunction(PCI_DEVFN(slot, 7), name);
pci_realize_and_unref(ehci, bus, &error_fatal);
usbbus = QLIST_FIRST(&ehci->qdev.child_bus);
for (i = 0; i < 3; i++) {
- uhci = pci_new_multifunction(PCI_DEVFN(slot, comp[i].func), true,
+ uhci = pci_new_multifunction(PCI_DEVFN(slot, comp[i].func),
comp[i].name);
qdev_prop_set_string(&uhci->qdev, "masterbus", usbbus->name);
qdev_prop_set_uint32(&uhci->qdev, "firstport", comp[i].port);
PCMachineState *pcms = PC_MACHINE(machine);
PCMachineClass *pcmc = PC_MACHINE_GET_CLASS(pcms);
X86MachineState *x86ms = X86_MACHINE(machine);
- Q35PCIHost *q35_host;
- PCIHostState *phb;
+ Object *phb;
PCIBus *host_bus;
PCIDevice *lpc;
DeviceState *lpc_dev;
}
/* create pci host bus */
- q35_host = Q35_HOST_DEVICE(qdev_new(TYPE_Q35_HOST_DEVICE));
+ phb = OBJECT(qdev_new(TYPE_Q35_HOST_DEVICE));
if (pcmc->pci_enabled) {
- pci_hole64_size = object_property_get_uint(OBJECT(q35_host),
+ pci_hole64_size = object_property_get_uint(phb,
PCI_HOST_PROP_PCI_HOLE64_SIZE,
&error_abort);
}
/* allocate ram and load rom/bios */
pc_memory_init(pcms, system_memory, rom_memory, pci_hole64_size);
- object_property_add_child(OBJECT(machine), "q35", OBJECT(q35_host));
- object_property_set_link(OBJECT(q35_host), MCH_HOST_PROP_RAM_MEM,
+ object_property_add_child(OBJECT(machine), "q35", phb);
+ object_property_set_link(phb, PCI_HOST_PROP_RAM_MEM,
OBJECT(machine->ram), NULL);
- object_property_set_link(OBJECT(q35_host), MCH_HOST_PROP_PCI_MEM,
+ object_property_set_link(phb, PCI_HOST_PROP_PCI_MEM,
OBJECT(pci_memory), NULL);
- object_property_set_link(OBJECT(q35_host), MCH_HOST_PROP_SYSTEM_MEM,
+ object_property_set_link(phb, PCI_HOST_PROP_SYSTEM_MEM,
OBJECT(system_memory), NULL);
- object_property_set_link(OBJECT(q35_host), MCH_HOST_PROP_IO_MEM,
+ object_property_set_link(phb, PCI_HOST_PROP_IO_MEM,
OBJECT(system_io), NULL);
- object_property_set_int(OBJECT(q35_host), PCI_HOST_BELOW_4G_MEM_SIZE,
+ object_property_set_int(phb, PCI_HOST_BELOW_4G_MEM_SIZE,
x86ms->below_4g_mem_size, NULL);
- object_property_set_int(OBJECT(q35_host), PCI_HOST_ABOVE_4G_MEM_SIZE,
+ object_property_set_int(phb, PCI_HOST_ABOVE_4G_MEM_SIZE,
x86ms->above_4g_mem_size, NULL);
+ object_property_set_bool(phb, PCI_HOST_BYPASS_IOMMU,
+ pcms->default_bus_bypass_iommu, NULL);
+ sysbus_realize_and_unref(SYS_BUS_DEVICE(phb), &error_fatal);
+
/* pci */
- sysbus_realize_and_unref(SYS_BUS_DEVICE(q35_host), &error_fatal);
- phb = PCI_HOST_BRIDGE(q35_host);
- host_bus = phb->bus;
+ host_bus = PCI_BUS(qdev_get_child_bus(DEVICE(phb), "pcie.0"));
+ pcms->bus = host_bus;
+
/* create ISA bus */
- lpc = pci_new_multifunction(PCI_DEVFN(ICH9_LPC_DEV, ICH9_LPC_FUNC), true,
+ lpc = pci_new_multifunction(PCI_DEVFN(ICH9_LPC_DEV, ICH9_LPC_FUNC),
TYPE_ICH9_LPC_DEVICE);
qdev_prop_set_bit(DEVICE(lpc), "smm-enabled",
x86_machine_is_smm_enabled(x86ms));
ahci = pci_create_simple_multifunction(host_bus,
PCI_DEVFN(ICH9_SATA1_DEV,
ICH9_SATA1_FUNC),
- true, "ich9-ahci");
+ "ich9-ahci");
idebus[0] = qdev_get_child_bus(&ahci->qdev, "ide.0");
idebus[1] = qdev_get_child_bus(&ahci->qdev, "ide.1");
g_assert(MAX_SATA_PORTS == ahci_get_num_ports(ahci));
smb = pci_create_simple_multifunction(host_bus,
PCI_DEVFN(ICH9_SMB_DEV,
ICH9_SMB_FUNC),
- true, TYPE_ICH9_SMB_DEVICE);
+ TYPE_ICH9_SMB_DEVICE);
pcms->smbus = I2C_BUS(qdev_get_child_bus(DEVICE(smb), "i2c"));
smbus_eeprom_init(pcms->smbus, 8, NULL, 0);
cmd646_update_irq(pci_dev);
break;
case 2:
- bm->status = (val & 0x60) | (bm->status & 1) | (bm->status & ~val & 0x06);
+ bmdma_status_writeb(bm, val);
break;
case 3:
if (bm == &bm->pci_dev->bmdma[0]) {
ide_bus_init_output_irq(&d->bus[i], qdev_get_gpio_in(ds, i));
bmdma_init(&d->bus[i], &d->bmdma[i], d);
- d->bmdma[i].bus = &d->bus[i];
ide_bus_register_restart_cb(&d->bus[i]);
}
}
bm->cmd = val & 0x09;
}
+void bmdma_status_writeb(BMDMAState *bm, uint32_t val)
+{
+ bm->status = (val & 0x60) | (bm->status & BM_STATUS_DMAING)
+ | (bm->status & ~val & (BM_STATUS_ERROR | BM_STATUS_INT));
+}
+
static uint64_t bmdma_addr_read(void *opaque, hwaddr addr,
unsigned width)
{
bus->dma = &bm->dma;
bm->irq = bus->irq;
bus->irq = qemu_allocate_irq(bmdma_irq, bm, 0);
+ bm->bus = bus;
bm->pci_dev = d;
}
+static void pci_ide_init(Object *obj)
+{
+ PCIIDEState *d = PCI_IDE(obj);
+
+ qdev_init_gpio_out_named(DEVICE(d), d->isa_irq, "isa-irq",
+ ARRAY_SIZE(d->isa_irq));
+}
+
static const TypeInfo pci_ide_type_info = {
.name = TYPE_PCI_IDE,
.parent = TYPE_PCI_DEVICE,
.instance_size = sizeof(PCIIDEState),
+ .instance_init = pci_ide_init,
.abstract = true,
.interfaces = (InterfaceInfo[]) {
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
*/
#include "qemu/osdep.h"
-#include "migration/vmstate.h"
#include "qapi/error.h"
#include "hw/pci/pci.h"
#include "hw/ide/piix.h"
bmdma_cmd_writeb(bm, val);
break;
case 2:
- bm->status = (val & 0x60) | (bm->status & 1) | (bm->status & ~val & 0x06);
+ bmdma_status_writeb(bm, val);
break;
}
}
pci_set_word(pci_conf + PCI_COMMAND, 0x0000);
pci_set_word(pci_conf + PCI_STATUS,
PCI_STATUS_DEVSEL_MEDIUM | PCI_STATUS_FAST_BACK);
- pci_set_byte(pci_conf + 0x20, 0x01); /* BMIBA: 20-23h */
+ pci_set_long(pci_conf + 0x20, 0x1); /* BMIBA: 20-23h */
}
static bool pci_piix_init_bus(PCIIDEState *d, unsigned i, Error **errp)
ide_bus_init_output_irq(&d->bus[i], isa_get_irq(NULL, port_info[i].isairq));
bmdma_init(&d->bus[i], &d->bmdma[i], d);
- d->bmdma[i].bus = &d->bus[i];
ide_bus_register_restart_cb(&d->bus[i]);
return true;
bmdma_setup_bar(d);
pci_register_bar(dev, 4, PCI_BASE_ADDRESS_SPACE_IO, &d->bmdma_bar);
- vmstate_register(VMSTATE_IF(dev), 0, &vmstate_ide_pci, d);
-
for (unsigned i = 0; i < 2; i++) {
if (!pci_piix_init_bus(d, i, errp)) {
return;
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
dc->reset = piix_ide_reset;
+ dc->vmsd = &vmstate_ide_pci;
k->realize = pci_piix_ide_realize;
k->exit = pci_piix_ide_exitfn;
k->vendor_id = PCI_VENDOR_ID_INTEL;
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
dc->reset = piix_ide_reset;
+ dc->vmsd = &vmstate_ide_pci;
k->realize = pci_piix_ide_realize;
k->exit = pci_piix_ide_exitfn;
k->vendor_id = PCI_VENDOR_ID_INTEL;
break;
case 0x02:
case 0x12:
- d->i.bmdma[0].status = (val & 0x60) | (d->i.bmdma[0].status & 1) |
- (d->i.bmdma[0].status & ~val & 6);
+ bmdma_status_writeb(&d->i.bmdma[0], val);
break;
case 0x04 ... 0x07:
bmdma_addr_ioport_ops.write(&d->i.bmdma[0], addr - 4, val, size);
break;
case 0x0a:
case 0x1a:
- d->i.bmdma[1].status = (val & 0x60) | (d->i.bmdma[1].status & 1) |
- (d->i.bmdma[1].status & ~val & 6);
+ bmdma_status_writeb(&d->i.bmdma[1], val);
break;
case 0x0c ... 0x0f:
bmdma_addr_ioport_ops.write(&d->i.bmdma[1], addr - 12, val, size);
ide_bus_init_output_irq(&s->bus[i], qdev_get_gpio_in(ds, i));
bmdma_init(&s->bus[i], &s->bmdma[i], s);
- s->bmdma[i].bus = &s->bus[i];
ide_bus_register_restart_cb(&s->bus[i]);
}
}
#include "sysemu/dma.h"
#include "hw/isa/vt82c686.h"
#include "hw/ide/pci.h"
+#include "hw/irq.h"
#include "trace.h"
static uint64_t bmdma_read(void *opaque, hwaddr addr,
bmdma_cmd_writeb(bm, val);
break;
case 2:
- bm->status = (val & 0x60) | (bm->status & 1) | (bm->status & ~val & 0x06);
+ bmdma_status_writeb(bm, val);
break;
default:;
}
static void via_ide_set_irq(void *opaque, int n, int level)
{
- PCIDevice *d = PCI_DEVICE(opaque);
+ PCIIDEState *s = opaque;
+ PCIDevice *d = PCI_DEVICE(s);
if (level) {
d->config[0x70 + n * 8] |= 0x80;
d->config[0x70 + n * 8] &= ~0x80;
}
- via_isa_set_irq(pci_get_function_0(d), 14 + n, level);
+ qemu_set_irq(s->isa_irq[n], level);
}
static void via_ide_reset(DeviceState *dev)
ide_bus_init_output_irq(&d->bus[i], qdev_get_gpio_in(ds, i));
bmdma_init(&d->bus[i], &d->bmdma[i], d);
- d->bmdma[i].bus = &d->bus[i];
ide_bus_register_restart_cb(&d->bus[i]);
}
}
config S390_FLIC_KVM
bool
- default y
depends on S390_FLIC && KVM
config OMPIC
},
};
-void via_isa_set_irq(PCIDevice *d, int n, int level)
-{
- ViaISAState *s = VIA_ISA(d);
- qemu_set_irq(s->isa_irqs_in[n], level);
-}
-
static void via_isa_request_i8259_irq(void *opaque, int irq, int level)
{
ViaISAState *s = opaque;
if (!qdev_realize(DEVICE(&s->ide), BUS(pci_bus), errp)) {
return;
}
+ for (i = 0; i < 2; i++) {
+ qdev_connect_gpio_out_named(DEVICE(&s->ide), "isa-irq", i,
+ s->isa_irqs_in[14 + i]);
+ }
/* Functions 2-3: USB Ports */
for (i = 0; i < ARRAY_SIZE(s->uhci); i++) {
PCI_COMMAND_MASTER | PCI_COMMAND_SPECIAL);
pci_set_word(pci_conf + PCI_STATUS, PCI_STATUS_DEVSEL_MEDIUM);
+ pci_conf[0x4c] = 0x04; /* IDE interrupt Routing */
pci_conf[0x58] = 0x40; /* Miscellaneous Control 0 */
pci_conf[0x67] = 0x08; /* Fast IR Config */
pci_conf[0x6b] = 0x01; /* Fast IR I/O Base */
/* initialize device memory address space */
if (machine->ram_size < machine->maxram_size) {
- machine->device_memory = g_malloc0(sizeof(*machine->device_memory));
ram_addr_t device_mem_size = machine->maxram_size - machine->ram_size;
+ hwaddr device_mem_base;
if (machine->ram_slots > ACPI_MAX_RAM_SLOTS) {
error_report("unsupported amount of memory slots: %"PRIu64,
exit(EXIT_FAILURE);
}
/* device memory base is the top of high memory address. */
- machine->device_memory->base = 0x90000000 + highram_size;
- machine->device_memory->base =
- ROUND_UP(machine->device_memory->base, 1 * GiB);
-
- memory_region_init(&machine->device_memory->mr, OBJECT(lams),
- "device-memory", device_mem_size);
- memory_region_add_subregion(address_space_mem, machine->device_memory->base,
- &machine->device_memory->mr);
+ device_mem_base = ROUND_UP(VIRT_HIGHMEM_BASE + highram_size, 1 * GiB);
+ machine_memory_devices_init(machine, device_mem_base, device_mem_size);
}
/* Add isa io region */
#include "qemu/range.h"
#include "hw/virtio/vhost.h"
#include "sysemu/kvm.h"
+#include "exec/address-spaces.h"
#include "trace.h"
static gint memory_device_addr_sort(gconstpointer a, gconstpointer b)
return 0;
}
-static int memory_device_used_region_size(Object *obj, void *opaque)
-{
- uint64_t *size = opaque;
-
- if (object_dynamic_cast(obj, TYPE_MEMORY_DEVICE)) {
- const DeviceState *dev = DEVICE(obj);
- const MemoryDeviceState *md = MEMORY_DEVICE(obj);
-
- if (dev->realized) {
- *size += memory_device_get_region_size(md, &error_abort);
- }
- }
-
- object_child_foreach(obj, memory_device_used_region_size, opaque);
- return 0;
-}
-
-static void memory_device_check_addable(MachineState *ms, uint64_t size,
+static void memory_device_check_addable(MachineState *ms, MemoryRegion *mr,
Error **errp)
{
- uint64_t used_region_size = 0;
+ const uint64_t used_region_size = ms->device_memory->used_region_size;
+ const uint64_t size = memory_region_size(mr);
/* we will need a new memory slot for kvm and vhost */
if (kvm_enabled() && !kvm_has_free_slot(ms)) {
}
/* will we exceed the total amount of memory specified */
- memory_device_used_region_size(OBJECT(ms), &used_region_size);
if (used_region_size + size < used_region_size ||
used_region_size + size > ms->maxram_size - ms->ram_size) {
error_setg(errp, "not enough space, currently 0x%" PRIx64
uint64_t align, uint64_t size,
Error **errp)
{
- Error *err = NULL;
GSList *list = NULL, *item;
Range as, new = range_empty;
- if (!ms->device_memory) {
- error_setg(errp, "memory devices (e.g. for memory hotplug) are not "
- "supported by the machine");
- return 0;
- }
-
- if (!memory_region_size(&ms->device_memory->mr)) {
- error_setg(errp, "memory devices (e.g. for memory hotplug) are not "
- "enabled, please specify the maxmem option");
- return 0;
- }
range_init_nofail(&as, ms->device_memory->base,
memory_region_size(&ms->device_memory->mr));
align);
}
- memory_device_check_addable(ms, size, &err);
- if (err) {
- error_propagate(errp, err);
- return 0;
- }
-
if (hint && !QEMU_IS_ALIGNED(*hint, align)) {
error_setg(errp, "address must be aligned to 0x%" PRIx64 " bytes",
align);
uint64_t addr, align = 0;
MemoryRegion *mr;
+ if (!ms->device_memory) {
+ error_setg(errp, "the configuration is not prepared for memory devices"
+ " (e.g., for memory hotplug), consider specifying the"
+ " maxmem option");
+ return;
+ }
+
mr = mdc->get_memory_region(md, &local_err);
if (local_err) {
goto out;
}
+ memory_device_check_addable(ms, mr, &local_err);
+ if (local_err) {
+ goto out;
+ }
+
if (legacy_align) {
align = *legacy_align;
} else {
mr = mdc->get_memory_region(md, &error_abort);
g_assert(ms->device_memory);
+ ms->device_memory->used_region_size += memory_region_size(mr);
memory_region_add_subregion(&ms->device_memory->mr,
addr - ms->device_memory->base, mr);
trace_memory_device_plug(DEVICE(md)->id ? DEVICE(md)->id : "", addr);
g_assert(ms->device_memory);
memory_region_del_subregion(&ms->device_memory->mr, mr);
+ ms->device_memory->used_region_size -= memory_region_size(mr);
trace_memory_device_unplug(DEVICE(md)->id ? DEVICE(md)->id : "",
mdc->get_addr(md));
}
return memory_region_size(mr);
}
+void machine_memory_devices_init(MachineState *ms, hwaddr base, uint64_t size)
+{
+ g_assert(size);
+ g_assert(!ms->device_memory);
+ ms->device_memory = g_new0(DeviceMemoryState, 1);
+ ms->device_memory->base = base;
+
+ memory_region_init(&ms->device_memory->mr, OBJECT(ms), "device-memory",
+ size);
+ memory_region_add_subregion(get_system_memory(), ms->device_memory->base,
+ &ms->device_memory->mr);
+}
+
static const TypeInfo memory_device_info = {
.name = TYPE_MEMORY_DEVICE,
.parent = TYPE_INTERFACE,
boston_lcd_event, NULL, s, NULL, true);
ahci = pci_create_simple_multifunction(&PCI_BRIDGE(&pcie2->root)->sec_bus,
- PCI_DEVFN(0, 0),
- true, TYPE_ICH9_AHCI);
+ PCI_DEVFN(0, 0), TYPE_ICH9_AHCI);
g_assert(ARRAY_SIZE(hd) == ahci_get_num_ports(ahci));
ide_drive_get(hd, ahci_get_num_ports(ahci));
ahci_ide_create_devs(ahci, hd);
/* South bridge -> IP5 */
pci_dev = pci_create_simple_multifunction(pci_bus,
PCI_DEVFN(FULOONG2E_VIA_SLOT, 0),
- true, TYPE_VT82C686B_ISA);
+ TYPE_VT82C686B_ISA);
object_property_add_alias(OBJECT(machine), "rtc-time",
object_resolve_path_component(OBJECT(pci_dev),
"rtc"),
if (!machine->cpu_type) {
machine->cpu_type = MIPS_CPU_TYPE_NAME("Loongson-3A1000");
}
- if (!strstr(machine->cpu_type, "Loongson-3A1000")) {
- error_report("Loongson-3/TCG needs cpu type Loongson-3A1000");
+ if (!cpu_type_supports_isa(machine->cpu_type, INSN_LOONGSON3A)) {
+ error_report("Loongson-3/TCG needs a Loongson-3 series cpu");
exit(1);
}
} else {
pci_bus_map_irqs(pci_bus, malta_pci_slot_get_pirq);
/* Southbridge */
- piix4 = pci_create_simple_multifunction(pci_bus, PIIX4_PCI_DEVFN, true,
+ piix4 = pci_create_simple_multifunction(pci_bus, PIIX4_PCI_DEVFN,
TYPE_PIIX4_PCI_DEVICE);
isa_bus = ISA_BUS(qdev_get_child_bus(DEVICE(piix4), "isa.0"));
config SIFIVE_E_PRCI
bool
+config SIFIVE_E_AON
+ bool
+
config SIFIVE_U_OTP
bool
system_ss.add(when: 'CONFIG_MCHP_PFSOC_SYSREG', if_true: files('mchp_pfsoc_sysreg.c'))
system_ss.add(when: 'CONFIG_SIFIVE_TEST', if_true: files('sifive_test.c'))
system_ss.add(when: 'CONFIG_SIFIVE_E_PRCI', if_true: files('sifive_e_prci.c'))
+system_ss.add(when: 'CONFIG_SIFIVE_E_AON', if_true: files('sifive_e_aon.c'))
system_ss.add(when: 'CONFIG_SIFIVE_U_OTP', if_true: files('sifive_u_otp.c'))
system_ss.add(when: 'CONFIG_SIFIVE_U_PRCI', if_true: files('sifive_u_prci.c'))
--- /dev/null
+/*
+ * SiFive HiFive1 AON (Always On Domain) for QEMU.
+ *
+ * Copyright (c) 2022 SiFive, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/timer.h"
+#include "qemu/log.h"
+#include "hw/irq.h"
+#include "hw/registerfields.h"
+#include "hw/misc/sifive_e_aon.h"
+#include "qapi/visitor.h"
+#include "qapi/error.h"
+#include "sysemu/watchdog.h"
+#include "hw/qdev-properties.h"
+
+REG32(AON_WDT_WDOGCFG, 0x0)
+ FIELD(AON_WDT_WDOGCFG, SCALE, 0, 4)
+ FIELD(AON_WDT_WDOGCFG, RSVD0, 4, 4)
+ FIELD(AON_WDT_WDOGCFG, RSTEN, 8, 1)
+ FIELD(AON_WDT_WDOGCFG, ZEROCMP, 9, 1)
+ FIELD(AON_WDT_WDOGCFG, RSVD1, 10, 2)
+ FIELD(AON_WDT_WDOGCFG, EN_ALWAYS, 12, 1)
+ FIELD(AON_WDT_WDOGCFG, EN_CORE_AWAKE, 13, 1)
+ FIELD(AON_WDT_WDOGCFG, RSVD2, 14, 14)
+ FIELD(AON_WDT_WDOGCFG, IP0, 28, 1)
+ FIELD(AON_WDT_WDOGCFG, RSVD3, 29, 3)
+REG32(AON_WDT_WDOGCOUNT, 0x8)
+ FIELD(AON_WDT_WDOGCOUNT, VALUE, 0, 31)
+REG32(AON_WDT_WDOGS, 0x10)
+REG32(AON_WDT_WDOGFEED, 0x18)
+REG32(AON_WDT_WDOGKEY, 0x1c)
+REG32(AON_WDT_WDOGCMP0, 0x20)
+
+static void sifive_e_aon_wdt_update_wdogcount(SiFiveEAONState *r)
+{
+ int64_t now;
+ if (FIELD_EX32(r->wdogcfg, AON_WDT_WDOGCFG, EN_ALWAYS) == 0 &&
+ FIELD_EX32(r->wdogcfg, AON_WDT_WDOGCFG, EN_CORE_AWAKE) == 0) {
+ return;
+ }
+
+ now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
+ r->wdogcount += muldiv64(now - r->wdog_restart_time,
+ r->wdogclk_freq, NANOSECONDS_PER_SECOND);
+
+ /* Clean the most significant bit. */
+ r->wdogcount &= R_AON_WDT_WDOGCOUNT_VALUE_MASK;
+ r->wdog_restart_time = now;
+}
+
+static void sifive_e_aon_wdt_update_state(SiFiveEAONState *r)
+{
+ uint16_t wdogs;
+ bool cmp_signal = false;
+ sifive_e_aon_wdt_update_wdogcount(r);
+ wdogs = (uint16_t)(r->wdogcount >>
+ FIELD_EX32(r->wdogcfg, AON_WDT_WDOGCFG, SCALE));
+
+ if (wdogs >= r->wdogcmp0) {
+ cmp_signal = true;
+ if (FIELD_EX32(r->wdogcfg, AON_WDT_WDOGCFG, ZEROCMP) == 1) {
+ r->wdogcount = 0;
+ wdogs = 0;
+ }
+ }
+
+ if (cmp_signal) {
+ if (FIELD_EX32(r->wdogcfg, AON_WDT_WDOGCFG, RSTEN) == 1) {
+ watchdog_perform_action();
+ }
+ r->wdogcfg = FIELD_DP32(r->wdogcfg, AON_WDT_WDOGCFG, IP0, 1);
+ }
+
+ qemu_set_irq(r->wdog_irq, FIELD_EX32(r->wdogcfg, AON_WDT_WDOGCFG, IP0));
+
+ if (wdogs < r->wdogcmp0 &&
+ (FIELD_EX32(r->wdogcfg, AON_WDT_WDOGCFG, EN_ALWAYS) == 1 ||
+ FIELD_EX32(r->wdogcfg, AON_WDT_WDOGCFG, EN_CORE_AWAKE) == 1)) {
+ int64_t next = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
+ next += muldiv64((r->wdogcmp0 - wdogs) <<
+ FIELD_EX32(r->wdogcfg, AON_WDT_WDOGCFG, SCALE),
+ NANOSECONDS_PER_SECOND, r->wdogclk_freq);
+ timer_mod(r->wdog_timer, next);
+ } else {
+ timer_mod(r->wdog_timer, INT64_MAX);
+ }
+}
+
+/*
+ * Callback used when the timer set using timer_mod expires.
+ */
+static void sifive_e_aon_wdt_expired_cb(void *opaque)
+{
+ SiFiveEAONState *r = SIFIVE_E_AON(opaque);
+ sifive_e_aon_wdt_update_state(r);
+}
+
+static uint64_t
+sifive_e_aon_wdt_read(void *opaque, hwaddr addr, unsigned int size)
+{
+ SiFiveEAONState *r = SIFIVE_E_AON(opaque);
+
+ switch (addr) {
+ case A_AON_WDT_WDOGCFG:
+ return r->wdogcfg;
+ case A_AON_WDT_WDOGCOUNT:
+ sifive_e_aon_wdt_update_wdogcount(r);
+ return r->wdogcount;
+ case A_AON_WDT_WDOGS:
+ sifive_e_aon_wdt_update_wdogcount(r);
+ return r->wdogcount >>
+ FIELD_EX32(r->wdogcfg,
+ AON_WDT_WDOGCFG,
+ SCALE);
+ case A_AON_WDT_WDOGFEED:
+ return 0;
+ case A_AON_WDT_WDOGKEY:
+ return r->wdogunlock;
+ case A_AON_WDT_WDOGCMP0:
+ return r->wdogcmp0;
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: bad read: addr=0x%x\n",
+ __func__, (int)addr);
+ }
+
+ return 0;
+}
+
+static void
+sifive_e_aon_wdt_write(void *opaque, hwaddr addr,
+ uint64_t val64, unsigned int size)
+{
+ SiFiveEAONState *r = SIFIVE_E_AON(opaque);
+ uint32_t value = val64;
+
+ switch (addr) {
+ case A_AON_WDT_WDOGCFG: {
+ uint8_t new_en_always;
+ uint8_t new_en_core_awake;
+ uint8_t old_en_always;
+ uint8_t old_en_core_awake;
+ if (r->wdogunlock == 0) {
+ return;
+ }
+
+ new_en_always = FIELD_EX32(value, AON_WDT_WDOGCFG, EN_ALWAYS);
+ new_en_core_awake = FIELD_EX32(value, AON_WDT_WDOGCFG, EN_CORE_AWAKE);
+ old_en_always = FIELD_EX32(r->wdogcfg, AON_WDT_WDOGCFG, EN_ALWAYS);
+ old_en_core_awake = FIELD_EX32(r->wdogcfg, AON_WDT_WDOGCFG,
+ EN_CORE_AWAKE);
+
+ if ((old_en_always ||
+ old_en_core_awake) == 1 &&
+ (new_en_always ||
+ new_en_core_awake) == 0) {
+ sifive_e_aon_wdt_update_wdogcount(r);
+ } else if ((old_en_always ||
+ old_en_core_awake) == 0 &&
+ (new_en_always ||
+ new_en_core_awake) == 1) {
+ r->wdog_restart_time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
+ }
+ r->wdogcfg = value;
+ r->wdogunlock = 0;
+ break;
+ }
+ case A_AON_WDT_WDOGCOUNT:
+ if (r->wdogunlock == 0) {
+ return;
+ }
+ r->wdogcount = value & R_AON_WDT_WDOGCOUNT_VALUE_MASK;
+ r->wdog_restart_time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
+ r->wdogunlock = 0;
+ break;
+ case A_AON_WDT_WDOGS:
+ return;
+ case A_AON_WDT_WDOGFEED:
+ if (r->wdogunlock == 0) {
+ return;
+ }
+ if (value == SIFIVE_E_AON_WDOGFEED) {
+ r->wdogcount = 0;
+ r->wdog_restart_time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
+ }
+ r->wdogunlock = 0;
+ break;
+ case A_AON_WDT_WDOGKEY:
+ if (value == SIFIVE_E_AON_WDOGKEY) {
+ r->wdogunlock = 1;
+ }
+ break;
+ case A_AON_WDT_WDOGCMP0:
+ if (r->wdogunlock == 0) {
+ return;
+ }
+ r->wdogcmp0 = (uint16_t) value;
+ r->wdogunlock = 0;
+ break;
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: bad write: addr=0x%x v=0x%x\n",
+ __func__, (int)addr, (int)value);
+ }
+ sifive_e_aon_wdt_update_state(r);
+}
+
+static uint64_t
+sifive_e_aon_read(void *opaque, hwaddr addr, unsigned int size)
+{
+ if (addr < SIFIVE_E_AON_RTC) {
+ return sifive_e_aon_wdt_read(opaque, addr, size);
+ } else if (addr < SIFIVE_E_AON_MAX) {
+ qemu_log_mask(LOG_UNIMP, "%s: Unimplemented read: addr=0x%x\n",
+ __func__, (int)addr);
+ } else {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: bad read: addr=0x%x\n",
+ __func__, (int)addr);
+ }
+ return 0;
+}
+
+static void
+sifive_e_aon_write(void *opaque, hwaddr addr,
+ uint64_t val64, unsigned int size)
+{
+ if (addr < SIFIVE_E_AON_RTC) {
+ sifive_e_aon_wdt_write(opaque, addr, val64, size);
+ } else if (addr < SIFIVE_E_AON_MAX) {
+ qemu_log_mask(LOG_UNIMP, "%s: Unimplemented write: addr=0x%x\n",
+ __func__, (int)addr);
+ } else {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: bad write: addr=0x%x\n",
+ __func__, (int)addr);
+ }
+}
+
+static const MemoryRegionOps sifive_e_aon_ops = {
+ .read = sifive_e_aon_read,
+ .write = sifive_e_aon_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+ .impl = {
+ .min_access_size = 4,
+ .max_access_size = 4
+ },
+ .valid = {
+ .min_access_size = 4,
+ .max_access_size = 4
+ }
+};
+
+static void sifive_e_aon_reset(DeviceState *dev)
+{
+ SiFiveEAONState *r = SIFIVE_E_AON(dev);
+
+ r->wdogcfg = FIELD_DP32(r->wdogcfg, AON_WDT_WDOGCFG, RSTEN, 0);
+ r->wdogcfg = FIELD_DP32(r->wdogcfg, AON_WDT_WDOGCFG, EN_ALWAYS, 0);
+ r->wdogcfg = FIELD_DP32(r->wdogcfg, AON_WDT_WDOGCFG, EN_CORE_AWAKE, 0);
+ r->wdogcmp0 = 0xbeef;
+
+ sifive_e_aon_wdt_update_state(r);
+}
+
+static void sifive_e_aon_init(Object *obj)
+{
+ SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
+ SiFiveEAONState *r = SIFIVE_E_AON(obj);
+
+ memory_region_init_io(&r->mmio, OBJECT(r), &sifive_e_aon_ops, r,
+ TYPE_SIFIVE_E_AON, SIFIVE_E_AON_MAX);
+ sysbus_init_mmio(sbd, &r->mmio);
+
+ /* watchdog timer */
+ r->wdog_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
+ sifive_e_aon_wdt_expired_cb, r);
+ r->wdogclk_freq = SIFIVE_E_LFCLK_DEFAULT_FREQ;
+ sysbus_init_irq(sbd, &r->wdog_irq);
+}
+
+static Property sifive_e_aon_properties[] = {
+ DEFINE_PROP_UINT64("wdogclk-frequency", SiFiveEAONState, wdogclk_freq,
+ SIFIVE_E_LFCLK_DEFAULT_FREQ),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void sifive_e_aon_class_init(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+
+ dc->reset = sifive_e_aon_reset;
+ device_class_set_props(dc, sifive_e_aon_properties);
+}
+
+static const TypeInfo sifive_e_aon_info = {
+ .name = TYPE_SIFIVE_E_AON,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(SiFiveEAONState),
+ .instance_init = sifive_e_aon_init,
+ .class_init = sifive_e_aon_class_init,
+};
+
+static void sifive_e_aon_register_types(void)
+{
+ type_register_static(&sifive_e_aon_info);
+}
+
+type_init(sifive_e_aon_register_types)
config E1000E_PCI_EXPRESS
bool
- default y if PCI_DEVICES
+ default y if PCI_DEVICES || PCIE_DEVICES
depends on PCI_EXPRESS && MSI_NONBROKEN
config IGB_PCI_EXPRESS
bool
- default y if PCI_DEVICES
+ default y if PCI_DEVICES || PCIE_DEVICES
depends on PCI_EXPRESS && MSI_NONBROKEN
config RTL8139_PCI
hw_error("Failed to initialize AER capability");
}
- pcie_ari_init(pci_dev, 0x150, 1);
+ pcie_ari_init(pci_dev, 0x150);
pcie_sriov_pf_init(pci_dev, IGB_CAP_SRIOV_OFFSET, TYPE_IGBVF,
IGB_82576_VF_DEV_ID, IGB_MAX_VF_FUNCTIONS, IGB_MAX_VF_FUNCTIONS,
hw_error("Failed to initialize AER capability");
}
- pcie_ari_init(dev, 0x150, 1);
+ pcie_ari_init(dev, 0x150);
}
static void igbvf_pci_uninit(PCIDevice *dev)
}
/*
* Add the check for configure interrupt, Use VIRTIO_CONFIG_IRQ_IDX -1
- * as the Marco of configure interrupt's IDX, If this driver does not
+ * as the macro of configure interrupt's IDX, If this driver does not
* support, the function will return false
*/
}
/*
*Add the check for configure interrupt, Use VIRTIO_CONFIG_IRQ_IDX -1
- * as the Marco of configure interrupt's IDX, If this driver does not
+ * as the macro of configure interrupt's IDX, If this driver does not
* support, the function will return
*/
vdc->vmsd = &vmstate_virtio_net_device;
vdc->primary_unplug_pending = primary_unplug_pending;
vdc->get_vhost = virtio_net_get_vhost;
+ vdc->toggle_device_iotlb = vhost_toggle_device_iotlb;
}
static const TypeInfo virtio_net_info = {
PCIDevice *pci = PCI_DEVICE(n);
uint64_t dbs_addr = le64_to_cpu(req->cmd.dptr.prp1);
uint64_t eis_addr = le64_to_cpu(req->cmd.dptr.prp2);
+ uint32_t v;
int i;
/* Address should be page aligned */
NvmeCQueue *cq = n->cq[i];
if (sq) {
+ v = cpu_to_le32(sq->tail);
+
/*
* CAP.DSTRD is 0, so offset of ith sq db_addr is (i<<3)
* nvme_process_db() uses this hard-coded way to calculate
*/
sq->db_addr = dbs_addr + (i << 3);
sq->ei_addr = eis_addr + (i << 3);
- pci_dma_write(pci, sq->db_addr, &sq->tail, sizeof(sq->tail));
+ pci_dma_write(pci, sq->db_addr, &v, sizeof(sq->tail));
if (n->params.ioeventfd && sq->sqid != 0) {
if (!nvme_init_sq_ioeventfd(sq)) {
}
if (cq) {
+ v = cpu_to_le32(cq->head);
+
/* CAP.DSTRD is 0, so offset of ith cq db_addr is (i<<3)+(1<<2) */
cq->db_addr = dbs_addr + (i << 3) + (1 << 2);
cq->ei_addr = eis_addr + (i << 3) + (1 << 2);
- pci_dma_write(pci, cq->db_addr, &cq->head, sizeof(cq->head));
+ pci_dma_write(pci, cq->db_addr, &v, sizeof(cq->head));
if (n->params.ioeventfd && cq->cqid != 0) {
if (!nvme_init_cq_ioeventfd(cq)) {
static void nvme_process_db(NvmeCtrl *n, hwaddr addr, int val)
{
PCIDevice *pci = PCI_DEVICE(n);
- uint32_t qid;
+ uint32_t qid, v;
if (unlikely(addr & ((1 << 2) - 1))) {
NVME_GUEST_ERR(pci_nvme_ub_db_wr_misaligned,
start_sqs = nvme_cq_full(cq) ? 1 : 0;
cq->head = new_head;
if (!qid && n->dbbuf_enabled) {
- pci_dma_write(pci, cq->db_addr, &cq->head, sizeof(cq->head));
+ v = cpu_to_le32(cq->head);
+ pci_dma_write(pci, cq->db_addr, &v, sizeof(cq->head));
}
if (start_sqs) {
NvmeSQueue *sq;
sq->tail = new_tail;
if (!qid && n->dbbuf_enabled) {
+ v = cpu_to_le32(sq->tail);
+
/*
* The spec states "the host shall also update the controller's
* corresponding doorbell property to match the value of that entry
* including ones that run on Linux, are not updating Admin Queues,
* so we can't trust reading it for an appropriate sq tail.
*/
- pci_dma_write(pci, sq->db_addr, &sq->tail, sizeof(sq->tail));
+ pci_dma_write(pci, sq->db_addr, &v, sizeof(sq->tail));
}
qemu_bh_schedule(sq->bh);
pcie_endpoint_cap_init(pci_dev, 0x80);
pcie_cap_flr_init(pci_dev);
if (n->params.sriov_max_vfs) {
- pcie_ari_init(pci_dev, 0x100, 1);
+ pcie_ari_init(pci_dev, 0x100);
}
/* add one to max_ioqpairs to account for the admin queue pair */
bool xlnx_efuse_set_bit(XlnxEFuse *s, unsigned int bit)
{
+ uint32_t set, *row;
+
if (efuse_ro_bits_find(s, bit)) {
g_autofree char *path = object_get_canonical_path(OBJECT(s));
return false;
}
- s->fuse32[bit / 32] |= 1 << (bit % 32);
- efuse_bdrv_sync(s, bit);
+ /* Avoid back-end write unless there is a real update */
+ row = &s->fuse32[bit / 32];
+ set = 1 << (bit % 32);
+ if (!(set & *row)) {
+ *row |= set;
+ efuse_bdrv_sync(s, bit);
+ }
return true;
}
#include "qemu/range.h"
#include "hw/i386/pc.h"
#include "hw/pci/pci.h"
-#include "hw/pci/pci_bus.h"
#include "hw/pci/pci_host.h"
#include "hw/pci-host/i440fx.h"
#include "hw/qdev-properties.h"
struct I440FXState {
PCIHostState parent_obj;
+
+ MemoryRegion *system_memory;
+ MemoryRegion *io_memory;
+ MemoryRegion *pci_address_space;
+ MemoryRegion *ram_memory;
Range pci_hole;
+ uint64_t below_4g_mem_size;
+ uint64_t above_4g_mem_size;
uint64_t pci_hole64_size;
bool pci_hole64_fix;
uint32_t short_root_bus;
+
+ char *pci_type;
};
#define I440FX_PAM 0x59
*/
#define I440FX_COREBOOT_RAM_SIZE 0x57
+static void i440fx_realize(PCIDevice *dev, Error **errp)
+{
+ dev->config[I440FX_SMRAM] = 0x02;
+
+ if (object_property_get_bool(qdev_get_machine(), "iommu", NULL)) {
+ warn_report("i440fx doesn't support emulated iommu");
+ }
+}
+
static void i440fx_update_memory_mappings(PCII440FXState *d)
{
int i;
static void i440fx_pcihost_initfn(Object *obj)
{
- PCIHostState *s = PCI_HOST_BRIDGE(obj);
+ I440FXState *s = I440FX_PCI_HOST_BRIDGE(obj);
+ PCIHostState *phb = PCI_HOST_BRIDGE(obj);
- memory_region_init_io(&s->conf_mem, obj, &pci_host_conf_le_ops, s,
+ memory_region_init_io(&phb->conf_mem, obj, &pci_host_conf_le_ops, phb,
"pci-conf-idx", 4);
- memory_region_init_io(&s->data_mem, obj, &pci_host_data_le_ops, s,
+ memory_region_init_io(&phb->data_mem, obj, &pci_host_data_le_ops, phb,
"pci-conf-data", 4);
-}
-static void i440fx_pcihost_realize(DeviceState *dev, Error **errp)
-{
- PCIHostState *s = PCI_HOST_BRIDGE(dev);
- SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
+ object_property_add_link(obj, PCI_HOST_PROP_RAM_MEM, TYPE_MEMORY_REGION,
+ (Object **) &s->ram_memory,
+ qdev_prop_allow_set_link_before_realize, 0);
- memory_region_add_subregion(s->bus->address_space_io, 0xcf8, &s->conf_mem);
- sysbus_init_ioports(sbd, 0xcf8, 4);
+ object_property_add_link(obj, PCI_HOST_PROP_PCI_MEM, TYPE_MEMORY_REGION,
+ (Object **) &s->pci_address_space,
+ qdev_prop_allow_set_link_before_realize, 0);
- memory_region_add_subregion(s->bus->address_space_io, 0xcfc, &s->data_mem);
- sysbus_init_ioports(sbd, 0xcfc, 4);
+ object_property_add_link(obj, PCI_HOST_PROP_SYSTEM_MEM, TYPE_MEMORY_REGION,
+ (Object **) &s->system_memory,
+ qdev_prop_allow_set_link_before_realize, 0);
- /* register i440fx 0xcf8 port as coalesced pio */
- memory_region_set_flush_coalesced(&s->data_mem);
- memory_region_add_coalescing(&s->conf_mem, 0, 4);
+ object_property_add_link(obj, PCI_HOST_PROP_IO_MEM, TYPE_MEMORY_REGION,
+ (Object **) &s->io_memory,
+ qdev_prop_allow_set_link_before_realize, 0);
}
-static void i440fx_realize(PCIDevice *dev, Error **errp)
-{
- dev->config[I440FX_SMRAM] = 0x02;
-
- if (object_property_get_bool(qdev_get_machine(), "iommu", NULL)) {
- warn_report("i440fx doesn't support emulated iommu");
- }
-}
-
-PCIBus *i440fx_init(const char *pci_type,
- DeviceState *dev,
- MemoryRegion *address_space_mem,
- MemoryRegion *address_space_io,
- ram_addr_t ram_size,
- ram_addr_t below_4g_mem_size,
- ram_addr_t above_4g_mem_size,
- MemoryRegion *pci_address_space,
- MemoryRegion *ram_memory)
+static void i440fx_pcihost_realize(DeviceState *dev, Error **errp)
{
+ ERRP_GUARD();
+ I440FXState *s = I440FX_PCI_HOST_BRIDGE(dev);
+ PCIHostState *phb = PCI_HOST_BRIDGE(dev);
+ SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
PCIBus *b;
PCIDevice *d;
- PCIHostState *s;
PCII440FXState *f;
unsigned i;
- I440FXState *i440fx;
- s = PCI_HOST_BRIDGE(dev);
- b = pci_root_bus_new(dev, NULL, pci_address_space,
- address_space_io, 0, TYPE_PCI_BUS);
- s->bus = b;
- object_property_add_child(qdev_get_machine(), "i440fx", OBJECT(dev));
- sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
+ memory_region_add_subregion(s->io_memory, 0xcf8, &phb->conf_mem);
+ sysbus_init_ioports(sbd, 0xcf8, 4);
+
+ memory_region_add_subregion(s->io_memory, 0xcfc, &phb->data_mem);
+ sysbus_init_ioports(sbd, 0xcfc, 4);
- d = pci_create_simple(b, 0, pci_type);
+ /* register i440fx 0xcf8 port as coalesced pio */
+ memory_region_set_flush_coalesced(&phb->data_mem);
+ memory_region_add_coalescing(&phb->conf_mem, 0, 4);
+
+ b = pci_root_bus_new(dev, NULL, s->pci_address_space,
+ s->io_memory, 0, TYPE_PCI_BUS);
+ phb->bus = b;
+
+ d = pci_create_simple(b, 0, s->pci_type);
f = I440FX_PCI_DEVICE(d);
- f->system_memory = address_space_mem;
- f->pci_address_space = pci_address_space;
- f->ram_memory = ram_memory;
- i440fx = I440FX_PCI_HOST_BRIDGE(dev);
- range_set_bounds(&i440fx->pci_hole, below_4g_mem_size,
+ range_set_bounds(&s->pci_hole, s->below_4g_mem_size,
IO_APIC_DEFAULT_ADDRESS - 1);
/* setup pci memory mapping */
- pc_pci_as_mapping_init(f->system_memory, f->pci_address_space);
+ pc_pci_as_mapping_init(s->system_memory, s->pci_address_space);
/* if *disabled* show SMRAM to all CPUs */
memory_region_init_alias(&f->smram_region, OBJECT(d), "smram-region",
- f->pci_address_space, 0xa0000, 0x20000);
- memory_region_add_subregion_overlap(f->system_memory, 0xa0000,
+ s->pci_address_space, SMRAM_C_BASE, SMRAM_C_SIZE);
+ memory_region_add_subregion_overlap(s->system_memory, SMRAM_C_BASE,
&f->smram_region, 1);
memory_region_set_enabled(&f->smram_region, true);
memory_region_init(&f->smram, OBJECT(d), "smram", 4 * GiB);
memory_region_set_enabled(&f->smram, true);
memory_region_init_alias(&f->low_smram, OBJECT(d), "smram-low",
- f->ram_memory, 0xa0000, 0x20000);
+ s->ram_memory, SMRAM_C_BASE, SMRAM_C_SIZE);
memory_region_set_enabled(&f->low_smram, true);
- memory_region_add_subregion(&f->smram, 0xa0000, &f->low_smram);
+ memory_region_add_subregion(&f->smram, SMRAM_C_BASE, &f->low_smram);
object_property_add_const_link(qdev_get_machine(), "smram",
OBJECT(&f->smram));
- init_pam(&f->pam_regions[0], OBJECT(d), f->ram_memory, f->system_memory,
- f->pci_address_space, PAM_BIOS_BASE, PAM_BIOS_SIZE);
+ init_pam(&f->pam_regions[0], OBJECT(d), s->ram_memory, s->system_memory,
+ s->pci_address_space, PAM_BIOS_BASE, PAM_BIOS_SIZE);
for (i = 0; i < ARRAY_SIZE(f->pam_regions) - 1; ++i) {
- init_pam(&f->pam_regions[i + 1], OBJECT(d), f->ram_memory,
- f->system_memory, f->pci_address_space,
+ init_pam(&f->pam_regions[i + 1], OBJECT(d), s->ram_memory,
+ s->system_memory, s->pci_address_space,
PAM_EXPAN_BASE + i * PAM_EXPAN_SIZE, PAM_EXPAN_SIZE);
}
+ ram_addr_t ram_size = s->below_4g_mem_size + s->above_4g_mem_size;
ram_size = ram_size / 8 / 1024 / 1024;
if (ram_size > 255) {
ram_size = 255;
d->config[I440FX_COREBOOT_RAM_SIZE] = ram_size;
i440fx_update_memory_mappings(f);
-
- return b;
}
static void i440fx_class_init(ObjectClass *klass, void *data)
DEFINE_PROP_SIZE(PCI_HOST_PROP_PCI_HOLE64_SIZE, I440FXState,
pci_hole64_size, I440FX_PCI_HOST_HOLE64_SIZE_DEFAULT),
DEFINE_PROP_UINT32("short_root_bus", I440FXState, short_root_bus, 0),
+ DEFINE_PROP_SIZE(PCI_HOST_BELOW_4G_MEM_SIZE, I440FXState,
+ below_4g_mem_size, 0),
+ DEFINE_PROP_SIZE(PCI_HOST_ABOVE_4G_MEM_SIZE, I440FXState,
+ above_4g_mem_size, 0),
DEFINE_PROP_BOOL("x-pci-hole64-fix", I440FXState, pci_hole64_fix, true),
+ DEFINE_PROP_STRING(I440FX_HOST_PROP_PCI_TYPE, I440FXState, pci_type),
DEFINE_PROP_END_OF_LIST(),
};
s->mch.pci_address_space,
s->mch.address_space_io,
0, TYPE_PCIE_BUS);
- PC_MACHINE(qdev_get_machine())->bus = pci->bus;
- pci->bypass_iommu =
- PC_MACHINE(qdev_get_machine())->default_bus_bypass_iommu;
+
qdev_realize(DEVICE(&s->mch), BUS(pci->bus), &error_fatal);
}
object_property_add_uint64_ptr(obj, PCIE_HOST_MCFG_SIZE,
&pehb->size, OBJ_PROP_FLAG_READ);
- object_property_add_link(obj, MCH_HOST_PROP_RAM_MEM, TYPE_MEMORY_REGION,
+ object_property_add_link(obj, PCI_HOST_PROP_RAM_MEM, TYPE_MEMORY_REGION,
(Object **) &s->mch.ram_memory,
qdev_prop_allow_set_link_before_realize, 0);
- object_property_add_link(obj, MCH_HOST_PROP_PCI_MEM, TYPE_MEMORY_REGION,
+ object_property_add_link(obj, PCI_HOST_PROP_PCI_MEM, TYPE_MEMORY_REGION,
(Object **) &s->mch.pci_address_space,
qdev_prop_allow_set_link_before_realize, 0);
- object_property_add_link(obj, MCH_HOST_PROP_SYSTEM_MEM, TYPE_MEMORY_REGION,
+ object_property_add_link(obj, PCI_HOST_PROP_SYSTEM_MEM, TYPE_MEMORY_REGION,
(Object **) &s->mch.system_memory,
qdev_prop_allow_set_link_before_realize, 0);
- object_property_add_link(obj, MCH_HOST_PROP_IO_MEM, TYPE_MEMORY_REGION,
+ object_property_add_link(obj, PCI_HOST_PROP_IO_MEM, TYPE_MEMORY_REGION,
(Object **) &s->mch.address_space_io,
qdev_prop_allow_set_link_before_realize, 0);
}
static const MemoryRegionOps blackhole_ops = {
.read = blackhole_read,
.write = blackhole_write,
- .endianness = DEVICE_NATIVE_ENDIAN,
.valid.min_access_size = 1,
.valid.max_access_size = 4,
.impl.min_access_size = 4,
pci_setup_iommu(phb->bus, sabre_pci_dma_iommu, s->iommu);
/* APB secondary busses */
- pci_dev = pci_new_multifunction(PCI_DEVFN(1, 0), true,
- TYPE_SIMBA_PCI_BRIDGE);
+ pci_dev = pci_new_multifunction(PCI_DEVFN(1, 0), TYPE_SIMBA_PCI_BRIDGE);
s->bridgeB = PCI_BRIDGE(pci_dev);
pci_bridge_map_irq(s->bridgeB, "pciB", pci_simbaB_map_irq);
pci_realize_and_unref(pci_dev, phb->bus, &error_fatal);
- pci_dev = pci_new_multifunction(PCI_DEVFN(1, 1), true,
- TYPE_SIMBA_PCI_BRIDGE);
+ pci_dev = pci_new_multifunction(PCI_DEVFN(1, 1), TYPE_SIMBA_PCI_BRIDGE);
s->bridgeA = PCI_BRIDGE(pci_dev);
pci_bridge_map_irq(s->bridgeA, "pciA", pci_simbaA_map_irq);
pci_realize_and_unref(pci_dev, phb->bus, &error_fatal);
config PCI_DEVICES
bool
+config PCIE_DEVICES
+ bool
+
config MSI_NONBROKEN
# selected by interrupt controllers that do not support MSI,
# or support it and have a good implementation. See commit
static char *pcibus_get_dev_path(DeviceState *dev);
static char *pcibus_get_fw_dev_path(DeviceState *dev);
static void pcibus_reset(BusState *qbus);
+static bool pcie_has_upstream_port(PCIDevice *dev);
static Property pci_props[] = {
DEFINE_PROP_PCI_DEVFN("addr", PCIDevice, devfn, -1),
DEFINE_PROP_UINT32("acpi-index", PCIDevice, acpi_index, 0),
DEFINE_PROP_BIT("x-pcie-err-unc-mask", PCIDevice, cap_present,
QEMU_PCIE_ERR_UNC_MASK_BITNR, true),
+ DEFINE_PROP_BIT("x-pcie-ari-nextfn-1", PCIDevice, cap_present,
+ QEMU_PCIE_ARI_NEXTFN_1_BITNR, false),
DEFINE_PROP_END_OF_LIST()
};
}
}
+ /*
+ * A PCIe Downstream Port that do not have ARI Forwarding enabled must
+ * associate only Device 0 with the device attached to the bus
+ * representing the Link from the Port (PCIe base spec rev 4.0 ver 0.3,
+ * sec 7.3.1).
+ * With ARI, PCI_SLOT() can return non-zero value as the traditional
+ * 5-bit Device Number and 3-bit Function Number fields in its associated
+ * Routing IDs, Requester IDs and Completer IDs are interpreted as a
+ * single 8-bit Function Number. Hence, ignore ARI capable devices.
+ */
+ if (pci_is_express(pci_dev) &&
+ !pcie_find_capability(pci_dev, PCI_EXT_CAP_ID_ARI) &&
+ pcie_has_upstream_port(pci_dev) &&
+ PCI_SLOT(pci_dev->devfn)) {
+ warn_report("PCI: slot %d is not valid for %s,"
+ " parent device only allows plugging into slot 0.",
+ PCI_SLOT(pci_dev->devfn), pci_dev->name);
+ }
+
if (pci_dev->failover_pair_id) {
if (!pci_bus_is_express(pci_get_bus(pci_dev))) {
error_setg(errp, "failover primary device must be on "
pci_dev->msi_trigger = pci_msi_trigger;
}
-PCIDevice *pci_new_multifunction(int devfn, bool multifunction,
- const char *name)
+static PCIDevice *pci_new_internal(int devfn, bool multifunction,
+ const char *name)
{
DeviceState *dev;
return PCI_DEVICE(dev);
}
+PCIDevice *pci_new_multifunction(int devfn, const char *name)
+{
+ return pci_new_internal(devfn, true, name);
+}
+
PCIDevice *pci_new(int devfn, const char *name)
{
- return pci_new_multifunction(devfn, false, name);
+ return pci_new_internal(devfn, false, name);
}
bool pci_realize_and_unref(PCIDevice *dev, PCIBus *bus, Error **errp)
}
PCIDevice *pci_create_simple_multifunction(PCIBus *bus, int devfn,
- bool multifunction,
const char *name)
{
- PCIDevice *dev = pci_new_multifunction(devfn, multifunction, name);
+ PCIDevice *dev = pci_new_multifunction(devfn, name);
pci_realize_and_unref(dev, bus, &error_fatal);
return dev;
}
PCIDevice *pci_create_simple(PCIBus *bus, int devfn, const char *name)
{
- return pci_create_simple_multifunction(bus, devfn, false, name);
+ PCIDevice *dev = pci_new(devfn, name);
+ pci_realize_and_unref(dev, bus, &error_fatal);
+ return dev;
}
static uint8_t pci_find_space(PCIDevice *pdev, uint8_t size)
static Property pci_host_properties_common[] = {
DEFINE_PROP_BOOL("x-config-reg-migration-enabled", PCIHostState,
mig_enabled, true),
- DEFINE_PROP_BOOL("bypass-iommu", PCIHostState, bypass_iommu, false),
+ DEFINE_PROP_BOOL(PCI_HOST_BYPASS_IOMMU, PCIHostState, bypass_iommu, false),
DEFINE_PROP_END_OF_LIST(),
};
PCI_EXP_FLAGS_TYPE) >> PCI_EXP_FLAGS_TYPE_SHIFT;
}
+uint8_t pcie_cap_get_version(const PCIDevice *dev)
+{
+ uint32_t pos = dev->exp.exp_cap;
+ assert(pos > 0);
+ return pci_get_word(dev->config + pos + PCI_EXP_FLAGS) & PCI_EXP_FLAGS_VERS;
+}
+
/* MSI/MSI-X */
/* pci express interrupt message number */
/* 7.8.2 PCI Express Capabilities Register: Interrupt Message Number */
pci_word_test_and_set_mask(dev->w1cmask + pos + PCI_EXP_SLTSTA,
PCI_EXP_HP_EV_SUPPORTED);
+ /* Avoid migration abortion when this device hot-removed by guest */
+ pci_word_test_and_clear_mask(dev->cmask + pos + PCI_EXP_SLTSTA,
+ PCI_EXP_SLTSTA_PDS);
+
dev->exp.hpev_notified = false;
qbus_set_hotplug_handler(BUS(pci_bridge_get_sec_bus(PCI_BRIDGE(dev))),
*/
/* ARI */
-void pcie_ari_init(PCIDevice *dev, uint16_t offset, uint16_t nextfn)
+void pcie_ari_init(PCIDevice *dev, uint16_t offset)
{
+ uint16_t nextfn = dev->cap_present & QEMU_PCIE_ARI_NEXTFN_1 ? 1 : 0;
+
pcie_add_capability(dev, PCI_EXT_CAP_ID_ARI, PCI_ARI_VER,
offset, PCI_ARI_SIZEOF);
pci_set_long(dev->config + offset + PCI_ARI_CAP, (nextfn & 0xff) << 8);
error_free(local_err);
}
object_unparent(OBJECT(vf));
+ object_unref(OBJECT(vf));
}
g_free(dev->exp.sriov_pf.vf);
dev->exp.sriov_pf.vf = NULL;
/* VIA VT8231 South Bridge (multifunction PCI device) */
via = OBJECT(pci_create_simple_multifunction(pci_bus, PCI_DEVFN(12, 0),
- true, TYPE_VT8231_ISA));
+ TYPE_VT8231_ISA));
for (i = 0; i < PCI_NUM_PINS; i++) {
pm->via_pirq[i] = qdev_get_gpio_in_named(DEVICE(via), "pirq", i);
}
cpu_to_be32(lmb_size & 0xffffffff)};
MemoryDeviceInfoList *dimms = NULL;
- /*
- * Don't create the node if there is no device memory
- */
- if (machine->ram_size == machine->maxram_size) {
+ /* Don't create the node if there is no device memory. */
+ if (!machine->device_memory) {
return 0;
}
int rtas;
GString *hypertas = g_string_sized_new(256);
GString *qemu_hypertas = g_string_sized_new(256);
- uint64_t max_device_addr = MACHINE(spapr)->device_memory->base +
- memory_region_size(&MACHINE(spapr)->device_memory->mr);
uint32_t lrdr_capacity[] = {
- cpu_to_be32(max_device_addr >> 32),
- cpu_to_be32(max_device_addr & 0xffffffff),
+ 0,
+ 0,
cpu_to_be32(SPAPR_MEMORY_BLOCK_SIZE >> 32),
cpu_to_be32(SPAPR_MEMORY_BLOCK_SIZE & 0xffffffff),
cpu_to_be32(ms->smp.max_cpus / ms->smp.threads),
};
+ /* Do we have device memory? */
+ if (MACHINE(spapr)->device_memory) {
+ uint64_t max_device_addr = MACHINE(spapr)->device_memory->base +
+ memory_region_size(&MACHINE(spapr)->device_memory->mr);
+
+ lrdr_capacity[0] = cpu_to_be32(max_device_addr >> 32);
+ lrdr_capacity[1] = cpu_to_be32(max_device_addr & 0xffffffff);
+ }
+
_FDT(rtas = fdt_add_subnode(fdt, 0, "rtas"));
/* hypertas */
uint32_t nr_lmbs = (machine->maxram_size - machine->ram_size)/lmb_size;
int i;
+ g_assert(!nr_lmbs || machine->device_memory);
for (i = 0; i < nr_lmbs; i++) {
uint64_t addr;
/* map RAM */
memory_region_add_subregion(sysmem, 0, machine->ram);
- /* always allocate the device memory information */
- machine->device_memory = g_malloc0(sizeof(*machine->device_memory));
-
/* initialize hotplug memory address space */
if (machine->ram_size < machine->maxram_size) {
ram_addr_t device_mem_size = machine->maxram_size - machine->ram_size;
+ hwaddr device_mem_base;
+
/*
* Limit the number of hotpluggable memory slots to half the number
* slots that KVM supports, leaving the other half for PCI and other
exit(1);
}
- machine->device_memory->base = ROUND_UP(machine->ram_size,
- SPAPR_DEVICE_MEM_ALIGN);
- memory_region_init(&machine->device_memory->mr, OBJECT(spapr),
- "device-memory", device_mem_size);
- memory_region_add_subregion(sysmem, machine->device_memory->base,
- &machine->device_memory->mr);
+ device_mem_base = ROUND_UP(machine->ram_size, SPAPR_DEVICE_MEM_ALIGN);
+ machine_memory_devices_init(machine, device_mem_base, device_mem_size);
}
if (smc->dr_lmb_enabled) {
int i;
/* Do we have device memory? */
- if (MACHINE(spapr)->maxram_size > ram_top) {
+ if (MACHINE(spapr)->device_memory) {
/* Can't just use maxram_size, because there may be an
* alignment gap between normal and device memory regions
*/
if (addr < machine->ram_size) {
return true;
}
- if ((addr >= dms->base)
+ if (dms && (addr >= dms->base)
&& ((addr - dms->base) < memory_region_size(&dms->mr))) {
return true;
}
select SIFIVE_PLIC
select SIFIVE_UART
select SIFIVE_E_PRCI
+ select SIFIVE_E_AON
select UNIMP
config SIFIVE_U
if (ms->numa_state->num_nodes > ms->smp.cpus) {
error_report("Number of NUMA nodes (%d)"
- " cannot exceed the number of available CPUs (%d).",
- ms->numa_state->num_nodes, ms->smp.max_cpus);
+ " cannot exceed the number of available CPUs (%u).",
+ ms->numa_state->num_nodes, ms->smp.cpus);
exit(EXIT_FAILURE);
}
if (ms->numa_state->num_nodes) {
#include "hw/intc/riscv_aclint.h"
#include "hw/intc/sifive_plic.h"
#include "hw/misc/sifive_e_prci.h"
+#include "hw/misc/sifive_e_aon.h"
#include "chardev/char.h"
#include "sysemu/sysemu.h"
object_property_set_int(OBJECT(&s->cpus), "resetvec", 0x1004, &error_abort);
object_initialize_child(obj, "riscv.sifive.e.gpio0", &s->gpio,
TYPE_SIFIVE_GPIO);
+ object_initialize_child(obj, "riscv.sifive.e.aon", &s->aon,
+ TYPE_SIFIVE_E_AON);
}
static void sifive_e_soc_realize(DeviceState *dev, Error **errp)
RISCV_ACLINT_DEFAULT_MTIMER_SIZE, 0, ms->smp.cpus,
RISCV_ACLINT_DEFAULT_MTIMECMP, RISCV_ACLINT_DEFAULT_MTIME,
RISCV_ACLINT_DEFAULT_TIMEBASE_FREQ, false);
- create_unimplemented_device("riscv.sifive.e.aon",
- memmap[SIFIVE_E_DEV_AON].base, memmap[SIFIVE_E_DEV_AON].size);
sifive_e_prci_create(memmap[SIFIVE_E_DEV_PRCI].base);
+ /* AON */
+
+ if (!sysbus_realize(SYS_BUS_DEVICE(&s->aon), errp)) {
+ return;
+ }
+
+ /* Map AON registers */
+ sysbus_mmio_map(SYS_BUS_DEVICE(&s->aon), 0, memmap[SIFIVE_E_DEV_AON].base);
+
/* GPIO */
if (!sysbus_realize(SYS_BUS_DEVICE(&s->gpio), errp)) {
qdev_get_gpio_in(DEVICE(s->plic),
SIFIVE_E_GPIO0_IRQ0 + i));
}
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->aon), 0,
+ qdev_get_gpio_in(DEVICE(s->plic),
+ SIFIVE_E_AON_WDT_IRQ));
sifive_uart_create(sys_mem, memmap[SIFIVE_E_DEV_UART0].base,
serial_hd(0), qdev_get_gpio_in(DEVICE(s->plic), SIFIVE_E_UART0_IRQ));
#include "chardev/char.h"
#include "sysemu/device_tree.h"
#include "sysemu/sysemu.h"
+#include "sysemu/tcg.h"
#include "sysemu/kvm.h"
#include "sysemu/tpm.h"
#include "hw/pci/pci.h"
s->soc[socket].hartid_base + cpu);
qemu_fdt_add_subnode(ms->fdt, cpu_name);
- satp_mode_max = satp_mode_max_from_map(
- s->soc[socket].harts[cpu].cfg.satp_mode.map);
- sv_name = g_strdup_printf("riscv,%s",
- satp_mode_str(satp_mode_max, is_32_bit));
- qemu_fdt_setprop_string(ms->fdt, cpu_name, "mmu-type", sv_name);
- g_free(sv_name);
-
+ if (cpu_ptr->cfg.satp_mode.supported != 0) {
+ satp_mode_max = satp_mode_max_from_map(cpu_ptr->cfg.satp_mode.map);
+ sv_name = g_strdup_printf("riscv,%s",
+ satp_mode_str(satp_mode_max, is_32_bit));
+ qemu_fdt_setprop_string(ms->fdt, cpu_name, "mmu-type", sv_name);
+ g_free(sv_name);
+ }
name = riscv_isa_string(cpu_ptr);
qemu_fdt_setprop_string(ms->fdt, cpu_name, "riscv,isa", name);
g_free(clust_name);
- if (!kvm_enabled()) {
+ if (tcg_enabled()) {
if (s->have_aclint) {
create_fdt_socket_aclint(s, memmap, socket,
&intc_phandles[phandle_pos]);
target_ulong start_addr = memmap[VIRT_DRAM].base;
target_ulong firmware_end_addr, kernel_start_addr;
const char *firmware_name = riscv_default_firmware_name(&s->soc[0]);
- uint32_t fdt_load_addr;
+ uint64_t fdt_load_addr;
uint64_t kernel_entry = 0;
BlockBackend *pflash_blk0;
+ /* load/create device tree */
+ if (machine->dtb) {
+ machine->fdt = load_device_tree(machine->dtb, &s->fdt_size);
+ if (!machine->fdt) {
+ error_report("load_device_tree() failed");
+ exit(1);
+ }
+ } else {
+ create_fdt(s, memmap);
+ }
+
/*
* Only direct boot kernel is currently supported for KVM VM,
* so the "-bios" parameter is not supported when KVM is enabled.
hart_count, &error_abort);
sysbus_realize(SYS_BUS_DEVICE(&s->soc[i]), &error_fatal);
- if (!kvm_enabled()) {
+ if (tcg_enabled()) {
if (s->have_aclint) {
if (s->aia_type == VIRT_AIA_TYPE_APLIC_IMSIC) {
/* Per-socket ACLINT MTIMER */
}
virt_flash_map(s, system_memory);
- /* load/create device tree */
- if (machine->dtb) {
- machine->fdt = load_device_tree(machine->dtb, &s->fdt_size);
- if (!machine->fdt) {
- error_report("load_device_tree() failed");
- exit(1);
- }
- } else {
- create_fdt(s, memmap);
- }
-
s->machine_done.notify = virt_machine_done;
qemu_add_machine_init_done_notifier(&s->machine_done);
}
machine_class_allow_dynamic_sysbus_dev(mc, TYPE_TPM_TIS_SYSBUS);
#endif
- object_class_property_add_bool(oc, "aclint", virt_get_aclint,
- virt_set_aclint);
- object_class_property_set_description(oc, "aclint",
- "Set on/off to enable/disable "
- "emulating ACLINT devices");
-
+ if (tcg_enabled()) {
+ object_class_property_add_bool(oc, "aclint", virt_get_aclint,
+ virt_set_aclint);
+ object_class_property_set_description(oc, "aclint",
+ "Set on/off to enable/disable "
+ "emulating ACLINT devices");
+ }
object_class_property_add_str(oc, "aia", virt_get_aia,
virt_set_aia);
object_class_property_set_description(oc, "aia",
imply VFIO_AP
imply VFIO_CCW
imply WDT_DIAG288
- select PCI
+ imply PCIE_DEVICES
+ select PCI_EXPRESS
select S390_FLIC
+ select S390_FLIC_KVM if KVM
select SCLPCONSOLE
select VIRTIO_CCW
select MSI_NONBROKEN
#include "hw/s390x/vfio-ccw.h"
#include "hw/s390x/css.h"
#include "hw/s390x/ebcdic.h"
-#include "hw/s390x/pv.h"
+#include "target/s390x/kvm/pv.h"
#include "hw/scsi/scsi.h"
#include "hw/virtio/virtio-net.h"
#include "ipl.h"
'tod-kvm.c',
's390-skeys-kvm.c',
's390-stattrib-kvm.c',
- 'pv.c',
's390-pci-kvm.c',
))
s390x_ss.add(when: 'CONFIG_TCG', if_true: files(
+++ /dev/null
-/*
- * Protected Virtualization functions
- *
- * Copyright IBM Corp. 2020
- * Author(s):
- * Janosch Frank <frankja@linux.ibm.com>
- *
- * This work is licensed under the terms of the GNU GPL, version 2 or (at
- * your option) any later version. See the COPYING file in the top-level
- * directory.
- */
-#include "qemu/osdep.h"
-
-#include <linux/kvm.h>
-
-#include "qemu/units.h"
-#include "qapi/error.h"
-#include "qemu/error-report.h"
-#include "sysemu/kvm.h"
-#include "sysemu/cpus.h"
-#include "qom/object_interfaces.h"
-#include "exec/confidential-guest-support.h"
-#include "hw/s390x/ipl.h"
-#include "hw/s390x/pv.h"
-#include "hw/s390x/sclp.h"
-#include "target/s390x/kvm/kvm_s390x.h"
-
-static bool info_valid;
-static struct kvm_s390_pv_info_vm info_vm;
-static struct kvm_s390_pv_info_dump info_dump;
-
-static int __s390_pv_cmd(uint32_t cmd, const char *cmdname, void *data)
-{
- struct kvm_pv_cmd pv_cmd = {
- .cmd = cmd,
- .data = (uint64_t)data,
- };
- int rc;
-
- do {
- rc = kvm_vm_ioctl(kvm_state, KVM_S390_PV_COMMAND, &pv_cmd);
- } while (rc == -EINTR);
-
- if (rc) {
- error_report("KVM PV command %d (%s) failed: header rc %x rrc %x "
- "IOCTL rc: %d", cmd, cmdname, pv_cmd.rc, pv_cmd.rrc,
- rc);
- }
- return rc;
-}
-
-/*
- * This macro lets us pass the command as a string to the function so
- * we can print it on an error.
- */
-#define s390_pv_cmd(cmd, data) __s390_pv_cmd(cmd, #cmd, data)
-#define s390_pv_cmd_exit(cmd, data) \
-{ \
- int rc; \
- \
- rc = __s390_pv_cmd(cmd, #cmd, data);\
- if (rc) { \
- exit(1); \
- } \
-}
-
-int s390_pv_query_info(void)
-{
- struct kvm_s390_pv_info info = {
- .header.id = KVM_PV_INFO_VM,
- .header.len_max = sizeof(info.header) + sizeof(info.vm),
- };
- int rc;
-
- /* Info API's first user is dump so they are bundled */
- if (!kvm_s390_get_protected_dump()) {
- return 0;
- }
-
- rc = s390_pv_cmd(KVM_PV_INFO, &info);
- if (rc) {
- error_report("KVM PV INFO cmd %x failed: %s",
- info.header.id, strerror(-rc));
- return rc;
- }
- memcpy(&info_vm, &info.vm, sizeof(info.vm));
-
- info.header.id = KVM_PV_INFO_DUMP;
- info.header.len_max = sizeof(info.header) + sizeof(info.dump);
- rc = s390_pv_cmd(KVM_PV_INFO, &info);
- if (rc) {
- error_report("KVM PV INFO cmd %x failed: %s",
- info.header.id, strerror(-rc));
- return rc;
- }
-
- memcpy(&info_dump, &info.dump, sizeof(info.dump));
- info_valid = true;
-
- return rc;
-}
-
-int s390_pv_vm_enable(void)
-{
- return s390_pv_cmd(KVM_PV_ENABLE, NULL);
-}
-
-void s390_pv_vm_disable(void)
-{
- s390_pv_cmd_exit(KVM_PV_DISABLE, NULL);
-}
-
-static void *s390_pv_do_unprot_async_fn(void *p)
-{
- s390_pv_cmd_exit(KVM_PV_ASYNC_CLEANUP_PERFORM, NULL);
- return NULL;
-}
-
-bool s390_pv_vm_try_disable_async(S390CcwMachineState *ms)
-{
- /*
- * t is only needed to create the thread; once qemu_thread_create
- * returns, it can safely be discarded.
- */
- QemuThread t;
-
- /*
- * If the feature is not present or if the VM is not larger than 2 GiB,
- * KVM_PV_ASYNC_CLEANUP_PREPARE fill fail; no point in attempting it.
- */
- if ((MACHINE(ms)->maxram_size <= 2 * GiB) ||
- !kvm_check_extension(kvm_state, KVM_CAP_S390_PROTECTED_ASYNC_DISABLE)) {
- return false;
- }
- if (s390_pv_cmd(KVM_PV_ASYNC_CLEANUP_PREPARE, NULL) != 0) {
- return false;
- }
-
- qemu_thread_create(&t, "async_cleanup", s390_pv_do_unprot_async_fn, NULL,
- QEMU_THREAD_DETACHED);
-
- return true;
-}
-
-int s390_pv_set_sec_parms(uint64_t origin, uint64_t length)
-{
- struct kvm_s390_pv_sec_parm args = {
- .origin = origin,
- .length = length,
- };
-
- return s390_pv_cmd(KVM_PV_SET_SEC_PARMS, &args);
-}
-
-/*
- * Called for each component in the SE type IPL parameter block 0.
- */
-int s390_pv_unpack(uint64_t addr, uint64_t size, uint64_t tweak)
-{
- struct kvm_s390_pv_unp args = {
- .addr = addr,
- .size = size,
- .tweak = tweak,
- };
-
- return s390_pv_cmd(KVM_PV_UNPACK, &args);
-}
-
-void s390_pv_prep_reset(void)
-{
- s390_pv_cmd_exit(KVM_PV_PREP_RESET, NULL);
-}
-
-int s390_pv_verify(void)
-{
- return s390_pv_cmd(KVM_PV_VERIFY, NULL);
-}
-
-void s390_pv_unshare(void)
-{
- s390_pv_cmd_exit(KVM_PV_UNSHARE_ALL, NULL);
-}
-
-void s390_pv_inject_reset_error(CPUState *cs)
-{
- int r1 = (cs->kvm_run->s390_sieic.ipa & 0x00f0) >> 4;
- CPUS390XState *env = &S390_CPU(cs)->env;
-
- /* Report that we are unable to enter protected mode */
- env->regs[r1 + 1] = DIAG_308_RC_INVAL_FOR_PV;
-}
-
-uint64_t kvm_s390_pv_dmp_get_size_cpu(void)
-{
- return info_dump.dump_cpu_buffer_len;
-}
-
-uint64_t kvm_s390_pv_dmp_get_size_completion_data(void)
-{
- return info_dump.dump_config_finalize_len;
-}
-
-uint64_t kvm_s390_pv_dmp_get_size_mem_state(void)
-{
- return info_dump.dump_config_mem_buffer_per_1m;
-}
-
-bool kvm_s390_pv_info_basic_valid(void)
-{
- return info_valid;
-}
-
-static int s390_pv_dump_cmd(uint64_t subcmd, uint64_t uaddr, uint64_t gaddr,
- uint64_t len)
-{
- struct kvm_s390_pv_dmp dmp = {
- .subcmd = subcmd,
- .buff_addr = uaddr,
- .buff_len = len,
- .gaddr = gaddr,
- };
- int ret;
-
- ret = s390_pv_cmd(KVM_PV_DUMP, (void *)&dmp);
- if (ret) {
- error_report("KVM DUMP command %ld failed", subcmd);
- }
- return ret;
-}
-
-int kvm_s390_dump_cpu(S390CPU *cpu, void *buff)
-{
- struct kvm_s390_pv_dmp dmp = {
- .subcmd = KVM_PV_DUMP_CPU,
- .buff_addr = (uint64_t)buff,
- .gaddr = 0,
- .buff_len = info_dump.dump_cpu_buffer_len,
- };
- struct kvm_pv_cmd pv = {
- .cmd = KVM_PV_DUMP,
- .data = (uint64_t)&dmp,
- };
-
- return kvm_vcpu_ioctl(CPU(cpu), KVM_S390_PV_CPU_COMMAND, &pv);
-}
-
-int kvm_s390_dump_init(void)
-{
- return s390_pv_dump_cmd(KVM_PV_DUMP_INIT, 0, 0, 0);
-}
-
-int kvm_s390_dump_mem_state(uint64_t gaddr, size_t len, void *dest)
-{
- return s390_pv_dump_cmd(KVM_PV_DUMP_CONFIG_STOR_STATE, (uint64_t)dest,
- gaddr, len);
-}
-
-int kvm_s390_dump_completion_data(void *buff)
-{
- return s390_pv_dump_cmd(KVM_PV_DUMP_COMPLETE, (uint64_t)buff, 0,
- info_dump.dump_config_finalize_len);
-}
-
-#define TYPE_S390_PV_GUEST "s390-pv-guest"
-OBJECT_DECLARE_SIMPLE_TYPE(S390PVGuest, S390_PV_GUEST)
-
-/**
- * S390PVGuest:
- *
- * The S390PVGuest object is basically a dummy used to tell the
- * confidential guest support system to use s390's PV mechanism.
- *
- * # $QEMU \
- * -object s390-pv-guest,id=pv0 \
- * -machine ...,confidential-guest-support=pv0
- */
-struct S390PVGuest {
- ConfidentialGuestSupport parent_obj;
-};
-
-typedef struct S390PVGuestClass S390PVGuestClass;
-
-struct S390PVGuestClass {
- ConfidentialGuestSupportClass parent_class;
-};
-
-/*
- * If protected virtualization is enabled, the amount of data that the
- * Read SCP Info Service Call can use is limited to one page. The
- * available space also depends on the Extended-Length SCCB (ELS)
- * feature which can take more buffer space to store feature
- * information. This impacts the maximum number of CPUs supported in
- * the machine.
- */
-static uint32_t s390_pv_get_max_cpus(void)
-{
- int offset_cpu = s390_has_feat(S390_FEAT_EXTENDED_LENGTH_SCCB) ?
- offsetof(ReadInfo, entries) : SCLP_READ_SCP_INFO_FIXED_CPU_OFFSET;
-
- return (TARGET_PAGE_SIZE - offset_cpu) / sizeof(CPUEntry);
-}
-
-static bool s390_pv_check_cpus(Error **errp)
-{
- MachineState *ms = MACHINE(qdev_get_machine());
- uint32_t pv_max_cpus = s390_pv_get_max_cpus();
-
- if (ms->smp.max_cpus > pv_max_cpus) {
- error_setg(errp, "Protected VMs support a maximum of %d CPUs",
- pv_max_cpus);
- return false;
- }
-
- return true;
-}
-
-static bool s390_pv_guest_check(ConfidentialGuestSupport *cgs, Error **errp)
-{
- return s390_pv_check_cpus(errp);
-}
-
-int s390_pv_kvm_init(ConfidentialGuestSupport *cgs, Error **errp)
-{
- if (!object_dynamic_cast(OBJECT(cgs), TYPE_S390_PV_GUEST)) {
- return 0;
- }
-
- if (!s390_has_feat(S390_FEAT_UNPACK)) {
- error_setg(errp,
- "CPU model does not support Protected Virtualization");
- return -1;
- }
-
- if (!s390_pv_guest_check(cgs, errp)) {
- return -1;
- }
-
- cgs->ready = true;
-
- return 0;
-}
-
-OBJECT_DEFINE_TYPE_WITH_INTERFACES(S390PVGuest,
- s390_pv_guest,
- S390_PV_GUEST,
- CONFIDENTIAL_GUEST_SUPPORT,
- { TYPE_USER_CREATABLE },
- { NULL })
-
-static void s390_pv_guest_class_init(ObjectClass *oc, void *data)
-{
-}
-
-static void s390_pv_guest_init(Object *obj)
-{
-}
-
-static void s390_pv_guest_finalize(Object *obj)
-{
-}
#include <linux/kvm.h>
#include "kvm/kvm_s390x.h"
-#include "hw/s390x/pv.h"
+#include "target/s390x/kvm/pv.h"
#include "hw/s390x/s390-pci-bus.h"
#include "hw/s390x/s390-pci-kvm.h"
#include "hw/s390x/s390-pci-inst.h"
#include "hw/s390x/tod.h"
#include "sysemu/sysemu.h"
#include "sysemu/cpus.h"
-#include "hw/s390x/pv.h"
+#include "target/s390x/kvm/pv.h"
#include "migration/blocker.h"
#include "qapi/visitor.h"
#include "qemu/module.h"
#include "sysemu/runstate.h"
#include "hw/s390x/tod.h"
-#include "hw/s390x/pv.h"
+#include "target/s390x/kvm/pv.h"
#include "kvm/kvm_s390x.h"
static void kvm_s390_get_tod_raw(S390TOD *tod, Error **errp)
static void scsi_req_dequeue(SCSIRequest *req);
static uint8_t *scsi_target_alloc_buf(SCSIRequest *req, size_t len);
static void scsi_target_free_buf(SCSIRequest *req);
+static void scsi_clear_reported_luns_changed(SCSIRequest *req);
static int next_scsi_bus;
/* SCSIReqOps implementation for unit attention conditions. */
-static int32_t scsi_unit_attention(SCSIRequest *req, uint8_t *buf)
+static void scsi_fetch_unit_attention_sense(SCSIRequest *req)
{
+ SCSISense *ua = NULL;
+
if (req->dev->unit_attention.key == UNIT_ATTENTION) {
- scsi_req_build_sense(req, req->dev->unit_attention);
+ ua = &req->dev->unit_attention;
} else if (req->bus->unit_attention.key == UNIT_ATTENTION) {
- scsi_req_build_sense(req, req->bus->unit_attention);
+ ua = &req->bus->unit_attention;
}
+
+ /*
+ * Fetch the unit attention sense immediately so that another
+ * scsi_req_new does not use reqops_unit_attention.
+ */
+ if (ua) {
+ scsi_req_build_sense(req, *ua);
+ *ua = SENSE_CODE(NO_SENSE);
+ }
+}
+
+static int32_t scsi_unit_attention(SCSIRequest *req, uint8_t *buf)
+{
scsi_req_complete(req, CHECK_CONDITION);
return 0;
}
static const struct SCSIReqOps reqops_unit_attention = {
.size = sizeof(SCSIRequest),
+ .init_req = scsi_fetch_unit_attention_sense,
.send_command = scsi_unit_attention
};
/* store the LUN list length */
stl_be_p(&r->buf[0], len - 8);
+
+ /*
+ * If a REPORT LUNS command enters the enabled command state, [...]
+ * the device server shall clear any pending unit attention condition
+ * with an additional sense code of REPORTED LUNS DATA HAS CHANGED.
+ */
+ scsi_clear_reported_luns_changed(&r->req);
+
return true;
}
object_ref(OBJECT(d));
object_ref(OBJECT(qbus->parent));
notifier_list_init(&req->cancel_notifiers);
+
+ if (reqops->init_req) {
+ reqops->init_req(req);
+ }
+
trace_scsi_req_alloc(req->dev->id, req->lun, req->tag);
return req;
}
return req->ops->get_buf(req);
}
-static void scsi_clear_unit_attention(SCSIRequest *req)
+static void scsi_clear_reported_luns_changed(SCSIRequest *req)
{
SCSISense *ua;
- if (req->dev->unit_attention.key != UNIT_ATTENTION &&
- req->bus->unit_attention.key != UNIT_ATTENTION) {
- return;
- }
-
- /*
- * If an INQUIRY command enters the enabled command state,
- * the device server shall [not] clear any unit attention condition;
- * See also MMC-6, paragraphs 6.5 and 6.6.2.
- */
- if (req->cmd.buf[0] == INQUIRY ||
- req->cmd.buf[0] == GET_CONFIGURATION ||
- req->cmd.buf[0] == GET_EVENT_STATUS_NOTIFICATION) {
- return;
- }
if (req->dev->unit_attention.key == UNIT_ATTENTION) {
ua = &req->dev->unit_attention;
- } else {
+ } else if (req->bus->unit_attention.key == UNIT_ATTENTION) {
ua = &req->bus->unit_attention;
- }
-
- /*
- * If a REPORT LUNS command enters the enabled command state, [...]
- * the device server shall clear any pending unit attention condition
- * with an additional sense code of REPORTED LUNS DATA HAS CHANGED.
- */
- if (req->cmd.buf[0] == REPORT_LUNS &&
- !(ua->asc == SENSE_CODE(REPORTED_LUNS_CHANGED).asc &&
- ua->ascq == SENSE_CODE(REPORTED_LUNS_CHANGED).ascq)) {
+ } else {
return;
}
- *ua = SENSE_CODE(NO_SENSE);
+ if (ua->asc == SENSE_CODE(REPORTED_LUNS_CHANGED).asc &&
+ ua->ascq == SENSE_CODE(REPORTED_LUNS_CHANGED).ascq) {
+ *ua = SENSE_CODE(NO_SENSE);
+ }
}
int scsi_req_get_sense(SCSIRequest *req, uint8_t *buf, int len)
req->dev->sense_is_ua = false;
}
- /*
- * Unit attention state is now stored in the device's sense buffer
- * if the HBA didn't do autosense. Clear the pending unit attention
- * flags.
- */
- scsi_clear_unit_attention(req);
-
scsi_req_ref(req);
scsi_req_dequeue(req);
req->bus->info->complete(req, req->residual);
{
char sock_str[128];
size_t tbl_len = SMBIOS_TYPE_4_LEN_V28;
+ unsigned threads_per_socket;
+ unsigned cores_per_socket;
if (smbios_ep_type == SMBIOS_ENTRY_POINT_TYPE_64) {
tbl_len = SMBIOS_TYPE_4_LEN_V30;
SMBIOS_TABLE_SET_STR(4, asset_tag_number_str, type4.asset);
SMBIOS_TABLE_SET_STR(4, part_number_str, type4.part);
- t->core_count = (ms->smp.cores > 255) ? 0xFF : ms->smp.cores;
+ threads_per_socket = machine_topo_get_threads_per_socket(ms);
+ cores_per_socket = machine_topo_get_cores_per_socket(ms);
+
+ t->core_count = (cores_per_socket > 255) ? 0xFF : cores_per_socket;
t->core_enabled = t->core_count;
- t->thread_count = (ms->smp.threads > 255) ? 0xFF : ms->smp.threads;
+ t->thread_count = (threads_per_socket > 255) ? 0xFF : threads_per_socket;
t->processor_characteristics = cpu_to_le16(0x02); /* Unknown */
t->processor_family2 = cpu_to_le16(0x01); /* Other */
if (tbl_len == SMBIOS_TYPE_4_LEN_V30) {
- t->core_count2 = t->core_enabled2 = cpu_to_le16(ms->smp.cores);
- t->thread_count2 = cpu_to_le16(ms->smp.threads);
+ t->core_count2 = t->core_enabled2 = cpu_to_le16(cores_per_socket);
+ t->thread_count2 = cpu_to_le16(threads_per_socket);
}
SMBIOS_BUILD_TABLE_POST;
smbios_build_type_2_table();
smbios_build_type_3_table();
- smbios_smp_sockets = DIV_ROUND_UP(ms->smp.cpus,
- ms->smp.cores * ms->smp.threads);
+ smbios_smp_sockets = ms->smp.sockets;
assert(smbios_smp_sockets >= 1);
for (i = 0; i < smbios_smp_sockets; i++) {
pci_bus_set_slot_reserved_mask(pci_busA, 0xfffffff1);
pci_bus_set_slot_reserved_mask(pci_busB, 0xfffffff0);
- ebus = pci_new_multifunction(PCI_DEVFN(1, 0), true, TYPE_EBUS);
+ ebus = pci_new_multifunction(PCI_DEVFN(1, 0), TYPE_EBUS);
qdev_prop_set_uint64(DEVICE(ebus), "console-serial-base",
hwdef->console_serial_base);
pci_realize_and_unref(ebus, pci_busA, &error_fatal);
if (!nd->model || strcmp(nd->model, mc->default_nic) == 0) {
if (!onboard_nic) {
- pci_dev = pci_new_multifunction(PCI_DEVFN(1, 1),
- true, mc->default_nic);
+ pci_dev = pci_new_multifunction(PCI_DEVFN(1, 1), mc->default_nic);
bus = pci_busA;
memcpy(&macaddr, &nd->macaddr.a, sizeof(MACAddr));
onboard_nic = true;
static Property tpm_tis_sysbus_properties[] = {
DEFINE_PROP_UINT32("irq", TPMStateSysBus, state.irq_num, TPM_TIS_IRQ),
DEFINE_PROP_TPMBE("tpmdev", TPMStateSysBus, state.be_driver),
- DEFINE_PROP_BOOL("ppi", TPMStateSysBus, state.ppi_enabled, false),
DEFINE_PROP_END_OF_LIST(),
};
config USB_XHCI_PCI
bool
- default y if PCI_DEVICES
+ default y if PCI_DEVICES || PCIE_DEVICES
depends on PCI
select USB_XHCI
#include "hw/vfio/vfio-common.h"
#include "hw/s390x/ap-device.h"
#include "qemu/error-report.h"
+#include "qemu/event_notifier.h"
+#include "qemu/main-loop.h"
#include "qemu/module.h"
#include "qemu/option.h"
#include "qemu/config-file.h"
struct VFIOAPDevice {
APDevice apdev;
VFIODevice vdev;
+ EventNotifier req_notifier;
};
OBJECT_DECLARE_SIMPLE_TYPE(VFIOAPDevice, VFIO_AP_DEVICE)
return vfio_get_group(groupid, &address_space_memory, errp);
}
+static void vfio_ap_req_notifier_handler(void *opaque)
+{
+ VFIOAPDevice *vapdev = opaque;
+ Error *err = NULL;
+
+ if (!event_notifier_test_and_clear(&vapdev->req_notifier)) {
+ return;
+ }
+
+ qdev_unplug(DEVICE(vapdev), &err);
+
+ if (err) {
+ warn_reportf_err(err, VFIO_MSG_PREFIX, vapdev->vdev.name);
+ }
+}
+
+static void vfio_ap_register_irq_notifier(VFIOAPDevice *vapdev,
+ unsigned int irq, Error **errp)
+{
+ int fd;
+ size_t argsz;
+ IOHandler *fd_read;
+ EventNotifier *notifier;
+ struct vfio_irq_info *irq_info;
+ VFIODevice *vdev = &vapdev->vdev;
+
+ switch (irq) {
+ case VFIO_AP_REQ_IRQ_INDEX:
+ notifier = &vapdev->req_notifier;
+ fd_read = vfio_ap_req_notifier_handler;
+ break;
+ default:
+ error_setg(errp, "vfio: Unsupported device irq(%d)", irq);
+ return;
+ }
+
+ if (vdev->num_irqs < irq + 1) {
+ error_setg(errp, "vfio: IRQ %u not available (number of irqs %u)",
+ irq, vdev->num_irqs);
+ return;
+ }
+
+ argsz = sizeof(*irq_info);
+ irq_info = g_malloc0(argsz);
+ irq_info->index = irq;
+ irq_info->argsz = argsz;
+
+ if (ioctl(vdev->fd, VFIO_DEVICE_GET_IRQ_INFO,
+ irq_info) < 0 || irq_info->count < 1) {
+ error_setg_errno(errp, errno, "vfio: Error getting irq info");
+ goto out_free_info;
+ }
+
+ if (event_notifier_init(notifier, 0)) {
+ error_setg_errno(errp, errno,
+ "vfio: Unable to init event notifier for irq (%d)",
+ irq);
+ goto out_free_info;
+ }
+
+ fd = event_notifier_get_fd(notifier);
+ qemu_set_fd_handler(fd, fd_read, NULL, vapdev);
+
+ if (vfio_set_irq_signaling(vdev, irq, 0, VFIO_IRQ_SET_ACTION_TRIGGER, fd,
+ errp)) {
+ qemu_set_fd_handler(fd, NULL, NULL, vapdev);
+ event_notifier_cleanup(notifier);
+ }
+
+out_free_info:
+ g_free(irq_info);
+
+}
+
+static void vfio_ap_unregister_irq_notifier(VFIOAPDevice *vapdev,
+ unsigned int irq)
+{
+ Error *err = NULL;
+ EventNotifier *notifier;
+
+ switch (irq) {
+ case VFIO_AP_REQ_IRQ_INDEX:
+ notifier = &vapdev->req_notifier;
+ break;
+ default:
+ error_report("vfio: Unsupported device irq(%d)", irq);
+ return;
+ }
+
+ if (vfio_set_irq_signaling(&vapdev->vdev, irq, 0,
+ VFIO_IRQ_SET_ACTION_TRIGGER, -1, &err)) {
+ warn_reportf_err(err, VFIO_MSG_PREFIX, vapdev->vdev.name);
+ }
+
+ qemu_set_fd_handler(event_notifier_get_fd(notifier),
+ NULL, NULL, vapdev);
+ event_notifier_cleanup(notifier);
+}
+
static void vfio_ap_realize(DeviceState *dev, Error **errp)
{
int ret;
char *mdevid;
+ Error *err = NULL;
VFIOGroup *vfio_group;
APDevice *apdev = AP_DEVICE(dev);
VFIOAPDevice *vapdev = VFIO_AP_DEVICE(apdev);
goto out_get_dev_err;
}
+ vfio_ap_register_irq_notifier(vapdev, VFIO_AP_REQ_IRQ_INDEX, &err);
+ if (err) {
+ /*
+ * Report this error, but do not make it a failing condition.
+ * Lack of this IRQ in the host does not prevent normal operation.
+ */
+ error_report_err(err);
+ }
+
return;
out_get_dev_err:
VFIOAPDevice *vapdev = VFIO_AP_DEVICE(apdev);
VFIOGroup *group = vapdev->vdev.group;
+ vfio_ap_unregister_irq_notifier(vapdev, VFIO_AP_REQ_IRQ_INDEX);
vfio_ap_put_device(vapdev);
vfio_put_group(group);
}
}
static Error *multiple_devices_migration_blocker;
-static Error *giommu_migration_blocker;
static unsigned int vfio_migratable_device_num(void)
{
multiple_devices_migration_blocker = NULL;
}
-static bool vfio_viommu_preset(void)
+bool vfio_viommu_preset(VFIODevice *vbasedev)
{
- VFIOAddressSpace *space;
-
- QLIST_FOREACH(space, &vfio_address_spaces, list) {
- if (space->as != &address_space_memory) {
- return true;
- }
- }
-
- return false;
-}
-
-int vfio_block_giommu_migration(VFIODevice *vbasedev, Error **errp)
-{
- int ret;
-
- if (giommu_migration_blocker ||
- !vfio_viommu_preset()) {
- return 0;
- }
-
- if (vbasedev->enable_migration == ON_OFF_AUTO_ON) {
- error_setg(errp,
- "Migration is currently not supported with vIOMMU enabled");
- return -EINVAL;
- }
-
- error_setg(&giommu_migration_blocker,
- "Migration is currently not supported with vIOMMU enabled");
- ret = migrate_add_blocker(giommu_migration_blocker, errp);
- if (ret < 0) {
- error_free(giommu_migration_blocker);
- giommu_migration_blocker = NULL;
- }
-
- return ret;
-}
-
-void vfio_migration_finalize(void)
-{
- if (!giommu_migration_blocker ||
- vfio_viommu_preset()) {
- return;
- }
-
- migrate_del_blocker(giommu_migration_blocker);
- error_free(giommu_migration_blocker);
- giommu_migration_blocker = NULL;
+ return vbasedev->group->container->space->as != &address_space_memory;
}
static void vfio_set_migration_error(int err)
return 0;
}
+static void vfio_migration_deinit(VFIODevice *vbasedev)
+{
+ VFIOMigration *migration = vbasedev->migration;
+
+ remove_migration_state_change_notifier(&migration->migration_state);
+ qemu_del_vm_change_state_handler(migration->vm_state);
+ unregister_savevm(VMSTATE_IF(vbasedev->dev), "vfio", vbasedev);
+ vfio_migration_free(vbasedev);
+ vfio_unblock_multiple_devices_migration();
+}
+
static int vfio_block_migration(VFIODevice *vbasedev, Error *err, Error **errp)
{
int ret;
bytes_transferred = 0;
}
-int vfio_migration_realize(VFIODevice *vbasedev, Error **errp)
+/*
+ * Return true when either migration initialized or blocker registered.
+ * Currently only return false when adding blocker fails which will
+ * de-register vfio device.
+ */
+bool vfio_migration_realize(VFIODevice *vbasedev, Error **errp)
{
Error *err = NULL;
int ret;
if (vbasedev->enable_migration == ON_OFF_AUTO_OFF) {
error_setg(&err, "%s: Migration is disabled for VFIO device",
vbasedev->name);
- return vfio_block_migration(vbasedev, err, errp);
+ return !vfio_block_migration(vbasedev, err, errp);
}
ret = vfio_migration_init(vbasedev);
vbasedev->name, ret, strerror(-ret));
}
- return vfio_block_migration(vbasedev, err, errp);
+ return !vfio_block_migration(vbasedev, err, errp);
}
if (!vbasedev->dirty_pages_supported) {
error_setg(&err,
"%s: VFIO device doesn't support device dirty tracking",
vbasedev->name);
- return vfio_block_migration(vbasedev, err, errp);
+ goto add_blocker;
}
warn_report("%s: VFIO device doesn't support device dirty tracking",
ret = vfio_block_multiple_devices_migration(vbasedev, errp);
if (ret) {
- return ret;
+ goto out_deinit;
}
- ret = vfio_block_giommu_migration(vbasedev, errp);
- if (ret) {
- return ret;
+ if (vfio_viommu_preset(vbasedev)) {
+ error_setg(&err, "%s: Migration is currently not supported "
+ "with vIOMMU enabled", vbasedev->name);
+ goto add_blocker;
}
trace_vfio_migration_realize(vbasedev->name);
- return 0;
+ return true;
+
+add_blocker:
+ ret = vfio_block_migration(vbasedev, err, errp);
+out_deinit:
+ if (ret) {
+ vfio_migration_deinit(vbasedev);
+ }
+ return !ret;
}
void vfio_migration_exit(VFIODevice *vbasedev)
{
if (vbasedev->migration) {
- VFIOMigration *migration = vbasedev->migration;
-
- remove_migration_state_change_notifier(&migration->migration_state);
- qemu_del_vm_change_state_handler(migration->vm_state);
- unregister_savevm(VMSTATE_IF(vbasedev->dev), "vfio", vbasedev);
- vfio_migration_free(vbasedev);
- vfio_unblock_multiple_devices_migration();
+ vfio_migration_deinit(vbasedev);
}
if (vbasedev->migration_blocker) {
.set = set_nv_gpudirect_clique_id,
};
+static bool is_valid_std_cap_offset(uint8_t pos)
+{
+ return (pos >= PCI_STD_HEADER_SIZEOF &&
+ pos <= (PCI_CFG_SPACE_SIZE - PCI_CAP_SIZEOF));
+}
+
static int vfio_add_nv_gpudirect_cap(VFIOPCIDevice *vdev, Error **errp)
{
PCIDevice *pdev = &vdev->pdev;
*/
ret = pread(vdev->vbasedev.fd, &tmp, 1,
vdev->config_offset + PCI_CAPABILITY_LIST);
- if (ret != 1 || !tmp) {
+ if (ret != 1 || !is_valid_std_cap_offset(tmp)) {
error_setg(errp, "NVIDIA GPUDirect Clique ID: error getting cap list");
return -EINVAL;
}
d4_conflict = true;
}
tmp = pdev->config[tmp + PCI_CAP_LIST_NEXT];
- } while (tmp);
+ } while (is_valid_std_cap_offset(tmp));
if (!c8_conflict) {
pos = 0xC8;
vfio_bar_quirk_finalize(vdev, i);
vfio_region_finalize(&bar->region);
- if (bar->size) {
+ if (bar->mr) {
+ assert(bar->size);
object_unparent(OBJECT(bar->mr));
g_free(bar->mr);
+ bar->mr = NULL;
}
}
vfio_set_long_bits(vdev->emulated_config_bits + pos, mask, mask);
}
+static void vfio_pci_enable_rp_atomics(VFIOPCIDevice *vdev)
+{
+ struct vfio_device_info_cap_pci_atomic_comp *cap;
+ g_autofree struct vfio_device_info *info = NULL;
+ PCIBus *bus = pci_get_bus(&vdev->pdev);
+ PCIDevice *parent = bus->parent_dev;
+ struct vfio_info_cap_header *hdr;
+ uint32_t mask = 0;
+ uint8_t *pos;
+
+ /*
+ * PCIe Atomic Ops completer support is only added automatically for single
+ * function devices downstream of a root port supporting DEVCAP2. Support
+ * is added during realize and, if added, removed during device exit. The
+ * single function requirement avoids conflicting requirements should a
+ * slot be composed of multiple devices with differing capabilities.
+ */
+ if (pci_bus_is_root(bus) || !parent || !parent->exp.exp_cap ||
+ pcie_cap_get_type(parent) != PCI_EXP_TYPE_ROOT_PORT ||
+ pcie_cap_get_version(parent) != PCI_EXP_FLAGS_VER2 ||
+ vdev->pdev.devfn ||
+ vdev->pdev.cap_present & QEMU_PCI_CAP_MULTIFUNCTION) {
+ return;
+ }
+
+ pos = parent->config + parent->exp.exp_cap + PCI_EXP_DEVCAP2;
+
+ /* Abort if there'a already an Atomic Ops configuration on the root port */
+ if (pci_get_long(pos) & (PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
+ PCI_EXP_DEVCAP2_ATOMIC_COMP64 |
+ PCI_EXP_DEVCAP2_ATOMIC_COMP128)) {
+ return;
+ }
+
+ info = vfio_get_device_info(vdev->vbasedev.fd);
+ if (!info) {
+ return;
+ }
+
+ hdr = vfio_get_device_info_cap(info, VFIO_DEVICE_INFO_CAP_PCI_ATOMIC_COMP);
+ if (!hdr) {
+ return;
+ }
+
+ cap = (void *)hdr;
+ if (cap->flags & VFIO_PCI_ATOMIC_COMP32) {
+ mask |= PCI_EXP_DEVCAP2_ATOMIC_COMP32;
+ }
+ if (cap->flags & VFIO_PCI_ATOMIC_COMP64) {
+ mask |= PCI_EXP_DEVCAP2_ATOMIC_COMP64;
+ }
+ if (cap->flags & VFIO_PCI_ATOMIC_COMP128) {
+ mask |= PCI_EXP_DEVCAP2_ATOMIC_COMP128;
+ }
+
+ if (!mask) {
+ return;
+ }
+
+ pci_long_test_and_set_mask(pos, mask);
+ vdev->clear_parent_atomics_on_exit = true;
+}
+
+static void vfio_pci_disable_rp_atomics(VFIOPCIDevice *vdev)
+{
+ if (vdev->clear_parent_atomics_on_exit) {
+ PCIDevice *parent = pci_get_bus(&vdev->pdev)->parent_dev;
+ uint8_t *pos = parent->config + parent->exp.exp_cap + PCI_EXP_DEVCAP2;
+
+ pci_long_test_and_clear_mask(pos, PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
+ PCI_EXP_DEVCAP2_ATOMIC_COMP64 |
+ PCI_EXP_DEVCAP2_ATOMIC_COMP128);
+ }
+}
+
static int vfio_setup_pcie_cap(VFIOPCIDevice *vdev, int pos, uint8_t size,
Error **errp)
{
QEMU_PCI_EXP_LNKCAP_MLS(QEMU_PCI_EXP_LNK_2_5GT), ~0);
vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKCTL, 0, ~0);
}
+
+ vfio_pci_enable_rp_atomics(vdev);
}
/*
}
if (!pdev->failover_pair_id) {
- ret = vfio_migration_realize(vbasedev, errp);
- if (ret) {
- error_report("%s: Migration disabled", vbasedev->name);
+ if (!vfio_migration_realize(vbasedev, errp)) {
+ goto out_deregister;
}
}
return;
out_deregister:
+ if (vdev->interrupt == VFIO_INT_INTx) {
+ vfio_intx_disable(vdev);
+ }
pci_device_set_intx_routing_notifier(&vdev->pdev, NULL);
if (vdev->irqchip_change_notifier.notify) {
kvm_irqchip_remove_change_notifier(&vdev->irqchip_change_notifier);
*/
vfio_put_device(vdev);
vfio_put_group(group);
- vfio_migration_finalize();
}
static void vfio_exitfn(PCIDevice *pdev)
timer_free(vdev->intx.mmap_timer);
}
vfio_teardown_msi(vdev);
+ vfio_pci_disable_rp_atomics(vdev);
vfio_bars_exit(vdev);
vfio_migration_exit(&vdev->vbasedev);
}
bool no_vfio_ioeventfd;
bool enable_ramfb;
bool defer_kvm_irq_routing;
+ bool clear_parent_atomics_on_exit;
VFIODisplay *dpy;
Notifier irqchip_change_notifier;
};
default y
depends on VIRTIO
+config VIRTIO_MD
+ bool
+ select MEM_DEVICE
+
config VIRTIO_PMEM_SUPPORTED
bool
default y
depends on VIRTIO
depends on VIRTIO_PMEM_SUPPORTED
- select MEM_DEVICE
+ select VIRTIO_MD
config VIRTIO_MEM_SUPPORTED
bool
depends on VIRTIO
depends on LINUX
depends on VIRTIO_MEM_SUPPORTED
- select MEM_DEVICE
+ select VIRTIO_MD
config VHOST_VSOCK_COMMON
bool
bool
default y
depends on VIRTIO && VHOST_VDPA && LINUX
+
+config VHOST_USER_SCMI
+ bool
+ default y
+ depends on VIRTIO && VHOST_USER
specific_virtio_ss.add(when: 'CONFIG_VHOST_USER_RNG', if_true: files('vhost-user-rng.c'))
specific_virtio_ss.add(when: 'CONFIG_VHOST_USER_GPIO', if_true: files('vhost-user-gpio.c'))
specific_virtio_ss.add(when: ['CONFIG_VIRTIO_PCI', 'CONFIG_VHOST_USER_GPIO'], if_true: files('vhost-user-gpio-pci.c'))
+specific_virtio_ss.add(when: 'CONFIG_VHOST_USER_SCMI', if_true: files('vhost-user-scmi.c'))
+specific_virtio_ss.add(when: ['CONFIG_VIRTIO_PCI', 'CONFIG_VHOST_USER_SCMI'], if_true: files('vhost-user-scmi-pci.c'))
virtio_pci_ss = ss.source_set()
virtio_pci_ss.add(when: 'CONFIG_VHOST_VSOCK', if_true: files('vhost-vsock-pci.c'))
virtio_pci_ss.add(when: 'CONFIG_VIRTIO_IOMMU', if_true: files('virtio-iommu-pci.c'))
virtio_pci_ss.add(when: 'CONFIG_VIRTIO_MEM', if_true: files('virtio-mem-pci.c'))
virtio_pci_ss.add(when: 'CONFIG_VHOST_VDPA_DEV', if_true: files('vdpa-dev-pci.c'))
+virtio_pci_ss.add(when: 'CONFIG_VIRTIO_MD', if_true: files('virtio-md-pci.c'))
specific_virtio_ss.add_all(when: 'CONFIG_VIRTIO_PCI', if_true: virtio_pci_ss)
vhost_vdpa_dma_unmap(void *vdpa, int fd, uint32_t msg_type, uint32_t asid, uint64_t iova, uint64_t size, uint8_t type) "vdpa:%p fd: %d msg_type: %"PRIu32" asid: %"PRIu32" iova: 0x%"PRIx64" size: 0x%"PRIx64" type: %"PRIu8
vhost_vdpa_listener_begin_batch(void *v, int fd, uint32_t msg_type, uint8_t type) "vdpa:%p fd: %d msg_type: %"PRIu32" type: %"PRIu8
vhost_vdpa_listener_commit(void *v, int fd, uint32_t msg_type, uint8_t type) "vdpa:%p fd: %d msg_type: %"PRIu32" type: %"PRIu8
+vhost_vdpa_listener_region_add_unaligned(void *v, const char *name, uint64_t offset_as, uint64_t offset_page) "vdpa: %p region %s offset_within_address_space %"PRIu64" offset_within_region %"PRIu64
vhost_vdpa_listener_region_add(void *vdpa, uint64_t iova, uint64_t llend, void *vaddr, bool readonly) "vdpa: %p iova 0x%"PRIx64" llend 0x%"PRIx64" vaddr: %p read-only: %d"
+vhost_vdpa_listener_region_del_unaligned(void *v, const char *name, uint64_t offset_as, uint64_t offset_page) "vdpa: %p region %s offset_within_address_space %"PRIu64" offset_within_region %"PRIu64
vhost_vdpa_listener_region_del(void *vdpa, uint64_t iova, uint64_t llend) "vdpa: %p iova 0x%"PRIx64" llend 0x%"PRIx64
vhost_vdpa_add_status(void *dev, uint8_t status) "dev: %p status: 0x%"PRIx8
vhost_vdpa_init(void *dev, void *vdpa) "dev: %p vdpa: %p"
vhost_vdpa_dump_regions(void *dev, int i, uint64_t guest_phys_addr, uint64_t memory_size, uint64_t userspace_addr, uint64_t flags_padding) "dev: %p %d: guest_phys_addr: 0x%"PRIx64" memory_size: 0x%"PRIx64" userspace_addr: 0x%"PRIx64" flags_padding: 0x%"PRIx64
vhost_vdpa_set_features(void *dev, uint64_t features) "dev: %p features: 0x%"PRIx64
vhost_vdpa_get_device_id(void *dev, uint32_t device_id) "dev: %p device_id %"PRIu32
-vhost_vdpa_reset_device(void *dev, uint8_t status) "dev: %p status: 0x%"PRIx8
+vhost_vdpa_reset_device(void *dev) "dev: %p"
vhost_vdpa_get_vq_index(void *dev, int idx, int vq_idx) "dev: %p idx: %d vq idx: %d"
vhost_vdpa_set_vring_ready(void *dev) "dev: %p"
vhost_vdpa_dump_config(void *dev, const char *line) "dev: %p %s"
virtio_iommu_notify_flag_add(const char *name) "add notifier to mr %s"
virtio_iommu_notify_flag_del(const char *name) "del notifier from mr %s"
virtio_iommu_switch_address_space(uint8_t bus, uint8_t slot, uint8_t fn, bool on) "Device %02x:%02x.%x switching address space (iommu enabled=%d)"
+virtio_iommu_freeze_granule(uint64_t page_size_mask) "granule set to 0x%"PRIx64
# virtio-mem.c
virtio_mem_send_response(uint16_t type) "type=%" PRIu16
addrs[i] = map->iova + off;
needle_last = int128_add(int128_make64(needle.translated_addr),
- int128_make64(iovec[i].iov_len));
+ int128_makes64(iovec[i].iov_len - 1));
map_last = int128_make64(map->translated_addr + map->size);
if (unlikely(int128_gt(needle_last, map_last))) {
qemu_log_mask(LOG_GUEST_ERROR,
void vhost_user_cleanup(VhostUserState *user)
{
}
+
+void vhost_toggle_device_iotlb(VirtIODevice *vdev)
+{
+}
/*
* Add the check for configure interrupt, Use VIRTIO_CONFIG_IRQ_IDX -1
- * as the Marco of configure interrupt's IDX, If this driver does not
+ * as the macro of configure interrupt's IDX, If this driver does not
* support, the function will return
*/
/*
* Add the check for configure interrupt, Use VIRTIO_CONFIG_IRQ_IDX -1
- * as the Marco of configure interrupt's IDX, If this driver does not
+ * as the macro of configure interrupt's IDX, If this driver does not
* support, the function will return
*/
/*
* Add the check for configure interrupt, Use VIRTIO_CONFIG_IRQ_IDX -1
- * as the Marco of configure interrupt's IDX, If this driver does not
+ * as the macro of configure interrupt's IDX, If this driver does not
* support, the function will return
*/
--- /dev/null
+/*
+ * Vhost-user SCMI virtio device PCI glue
+ *
+ * SPDX-FileCopyrightText: Red Hat, Inc.
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "hw/qdev-properties.h"
+#include "hw/virtio/vhost-user-scmi.h"
+#include "hw/virtio/virtio-pci.h"
+
+struct VHostUserSCMIPCI {
+ VirtIOPCIProxy parent_obj;
+ VHostUserSCMI vdev;
+};
+
+typedef struct VHostUserSCMIPCI VHostUserSCMIPCI;
+
+#define TYPE_VHOST_USER_SCMI_PCI "vhost-user-scmi-pci-base"
+
+DECLARE_INSTANCE_CHECKER(VHostUserSCMIPCI, VHOST_USER_SCMI_PCI,
+ TYPE_VHOST_USER_SCMI_PCI)
+
+static void vhost_user_scmi_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
+{
+ VHostUserSCMIPCI *dev = VHOST_USER_SCMI_PCI(vpci_dev);
+ DeviceState *vdev = DEVICE(&dev->vdev);
+
+ vpci_dev->nvectors = 1;
+ qdev_realize(vdev, BUS(&vpci_dev->bus), errp);
+}
+
+static void vhost_user_scmi_pci_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
+ PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
+ k->realize = vhost_user_scmi_pci_realize;
+ set_bit(DEVICE_CATEGORY_INPUT, dc->categories);
+ pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
+ pcidev_k->device_id = 0; /* Set by virtio-pci based on virtio id */
+ pcidev_k->revision = 0x00;
+ pcidev_k->class_id = PCI_CLASS_COMMUNICATION_OTHER;
+}
+
+static void vhost_user_scmi_pci_instance_init(Object *obj)
+{
+ VHostUserSCMIPCI *dev = VHOST_USER_SCMI_PCI(obj);
+
+ virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
+ TYPE_VHOST_USER_SCMI);
+}
+
+static const VirtioPCIDeviceTypeInfo vhost_user_scmi_pci_info = {
+ .base_name = TYPE_VHOST_USER_SCMI_PCI,
+ .non_transitional_name = "vhost-user-scmi-pci",
+ .instance_size = sizeof(VHostUserSCMIPCI),
+ .instance_init = vhost_user_scmi_pci_instance_init,
+ .class_init = vhost_user_scmi_pci_class_init,
+};
+
+static void vhost_user_scmi_pci_register(void)
+{
+ virtio_pci_types_register(&vhost_user_scmi_pci_info);
+}
+
+type_init(vhost_user_scmi_pci_register);
--- /dev/null
+/*
+ * Vhost-user SCMI virtio device
+ *
+ * SPDX-FileCopyrightText: Red Hat, Inc.
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * Implementation based on other vhost-user devices in QEMU.
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "qemu/error-report.h"
+#include "hw/virtio/virtio-bus.h"
+#include "hw/virtio/vhost-user-scmi.h"
+#include "standard-headers/linux/virtio_ids.h"
+#include "standard-headers/linux/virtio_scmi.h"
+#include "trace.h"
+
+/*
+ * In this version, we don't support VIRTIO_SCMI_F_SHARED_MEMORY.
+ * Note that VIRTIO_SCMI_F_SHARED_MEMORY is currently not supported in
+ * Linux VirtIO SCMI guest driver.
+ */
+static const int feature_bits[] = {
+ VIRTIO_F_VERSION_1,
+ VIRTIO_F_NOTIFY_ON_EMPTY,
+ VIRTIO_RING_F_INDIRECT_DESC,
+ VIRTIO_RING_F_EVENT_IDX,
+ VIRTIO_F_RING_RESET,
+ VIRTIO_SCMI_F_P2A_CHANNELS,
+ VHOST_INVALID_FEATURE_BIT
+};
+
+static int vu_scmi_start(VirtIODevice *vdev)
+{
+ VHostUserSCMI *scmi = VHOST_USER_SCMI(vdev);
+ BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
+ VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
+ struct vhost_dev *vhost_dev = &scmi->vhost_dev;
+ int ret, i;
+
+ if (!k->set_guest_notifiers) {
+ error_report("binding does not support guest notifiers");
+ return -ENOSYS;
+ }
+
+ ret = vhost_dev_enable_notifiers(vhost_dev, vdev);
+ if (ret < 0) {
+ error_report("Error enabling host notifiers: %d", ret);
+ return ret;
+ }
+
+ ret = k->set_guest_notifiers(qbus->parent, vhost_dev->nvqs, true);
+ if (ret < 0) {
+ error_report("Error binding guest notifier: %d", ret);
+ goto err_host_notifiers;
+ }
+
+ vhost_ack_features(&scmi->vhost_dev, feature_bits, vdev->guest_features);
+
+ ret = vhost_dev_start(&scmi->vhost_dev, vdev, true);
+ if (ret < 0) {
+ error_report("Error starting vhost-user-scmi: %d", ret);
+ goto err_guest_notifiers;
+ }
+
+ /*
+ * guest_notifier_mask/pending not used yet, so just unmask
+ * everything here. virtio-pci will do the right thing by
+ * enabling/disabling irqfd.
+ */
+ for (i = 0; i < scmi->vhost_dev.nvqs; i++) {
+ vhost_virtqueue_mask(&scmi->vhost_dev, vdev, i, false);
+ }
+ return 0;
+
+err_guest_notifiers:
+ k->set_guest_notifiers(qbus->parent, vhost_dev->nvqs, false);
+err_host_notifiers:
+ vhost_dev_disable_notifiers(vhost_dev, vdev);
+
+ return ret;
+}
+
+static void vu_scmi_stop(VirtIODevice *vdev)
+{
+ VHostUserSCMI *scmi = VHOST_USER_SCMI(vdev);
+ BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
+ VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
+ struct vhost_dev *vhost_dev = &scmi->vhost_dev;
+ int ret;
+
+ if (!k->set_guest_notifiers) {
+ return;
+ }
+
+ vhost_dev_stop(vhost_dev, vdev, true);
+
+ ret = k->set_guest_notifiers(qbus->parent, vhost_dev->nvqs, false);
+ if (ret < 0) {
+ error_report("vhost guest notifier cleanup failed: %d", ret);
+ return;
+ }
+ vhost_dev_disable_notifiers(vhost_dev, vdev);
+}
+
+static void vu_scmi_set_status(VirtIODevice *vdev, uint8_t status)
+{
+ VHostUserSCMI *scmi = VHOST_USER_SCMI(vdev);
+ bool should_start = virtio_device_should_start(vdev, status);
+
+ if (!scmi->connected) {
+ return;
+ }
+ if (vhost_dev_is_started(&scmi->vhost_dev) == should_start) {
+ return;
+ }
+
+ if (should_start) {
+ vu_scmi_start(vdev);
+ } else {
+ vu_scmi_stop(vdev);
+ }
+}
+
+static uint64_t vu_scmi_get_features(VirtIODevice *vdev, uint64_t features,
+ Error **errp)
+{
+ VHostUserSCMI *scmi = VHOST_USER_SCMI(vdev);
+
+ return vhost_get_features(&scmi->vhost_dev, feature_bits, features);
+}
+
+static void vu_scmi_handle_output(VirtIODevice *vdev, VirtQueue *vq)
+{
+ /*
+ * Not normally called; it's the daemon that handles the queue;
+ * however virtio's cleanup path can call this.
+ */
+}
+
+static void vu_scmi_guest_notifier_mask(VirtIODevice *vdev, int idx, bool mask)
+{
+ VHostUserSCMI *scmi = VHOST_USER_SCMI(vdev);
+
+ if (idx == VIRTIO_CONFIG_IRQ_IDX) {
+ return;
+ }
+
+ vhost_virtqueue_mask(&scmi->vhost_dev, vdev, idx, mask);
+}
+
+static bool vu_scmi_guest_notifier_pending(VirtIODevice *vdev, int idx)
+{
+ VHostUserSCMI *scmi = VHOST_USER_SCMI(vdev);
+
+ return vhost_virtqueue_pending(&scmi->vhost_dev, idx);
+}
+
+static void vu_scmi_connect(DeviceState *dev)
+{
+ VirtIODevice *vdev = VIRTIO_DEVICE(dev);
+ VHostUserSCMI *scmi = VHOST_USER_SCMI(vdev);
+
+ if (scmi->connected) {
+ return;
+ }
+ scmi->connected = true;
+
+ /* restore vhost state */
+ if (virtio_device_started(vdev, vdev->status)) {
+ vu_scmi_start(vdev);
+ }
+}
+
+static void vu_scmi_disconnect(DeviceState *dev)
+{
+ VirtIODevice *vdev = VIRTIO_DEVICE(dev);
+ VHostUserSCMI *scmi = VHOST_USER_SCMI(vdev);
+
+ if (!scmi->connected) {
+ return;
+ }
+ scmi->connected = false;
+
+ if (vhost_dev_is_started(&scmi->vhost_dev)) {
+ vu_scmi_stop(vdev);
+ }
+}
+
+static void vu_scmi_event(void *opaque, QEMUChrEvent event)
+{
+ DeviceState *dev = opaque;
+
+ switch (event) {
+ case CHR_EVENT_OPENED:
+ vu_scmi_connect(dev);
+ break;
+ case CHR_EVENT_CLOSED:
+ vu_scmi_disconnect(dev);
+ break;
+ case CHR_EVENT_BREAK:
+ case CHR_EVENT_MUX_IN:
+ case CHR_EVENT_MUX_OUT:
+ /* Ignore */
+ break;
+ }
+}
+
+static void do_vhost_user_cleanup(VirtIODevice *vdev, VHostUserSCMI *scmi)
+{
+ virtio_delete_queue(scmi->cmd_vq);
+ virtio_delete_queue(scmi->event_vq);
+ g_free(scmi->vhost_dev.vqs);
+ virtio_cleanup(vdev);
+ vhost_user_cleanup(&scmi->vhost_user);
+}
+
+static void vu_scmi_device_realize(DeviceState *dev, Error **errp)
+{
+ VirtIODevice *vdev = VIRTIO_DEVICE(dev);
+ VHostUserSCMI *scmi = VHOST_USER_SCMI(dev);
+ int ret;
+
+ if (!scmi->chardev.chr) {
+ error_setg(errp, "vhost-user-scmi: chardev is mandatory");
+ return;
+ }
+
+ vdev->host_features |= (1ULL << VIRTIO_SCMI_F_P2A_CHANNELS);
+
+ if (!vhost_user_init(&scmi->vhost_user, &scmi->chardev, errp)) {
+ return;
+ }
+
+ virtio_init(vdev, VIRTIO_ID_SCMI, 0);
+
+ scmi->cmd_vq = virtio_add_queue(vdev, 256, vu_scmi_handle_output);
+ scmi->event_vq = virtio_add_queue(vdev, 256, vu_scmi_handle_output);
+ scmi->vhost_dev.nvqs = 2;
+ scmi->vhost_dev.vqs = g_new0(struct vhost_virtqueue, scmi->vhost_dev.nvqs);
+
+ ret = vhost_dev_init(&scmi->vhost_dev, &scmi->vhost_user,
+ VHOST_BACKEND_TYPE_USER, 0, errp);
+ if (ret < 0) {
+ error_setg_errno(errp, -ret,
+ "vhost-user-scmi: vhost_dev_init() failed");
+ do_vhost_user_cleanup(vdev, scmi);
+ return;
+ }
+
+ qemu_chr_fe_set_handlers(&scmi->chardev, NULL, NULL, vu_scmi_event, NULL,
+ dev, NULL, true);
+
+ return;
+}
+
+static void vu_scmi_device_unrealize(DeviceState *dev)
+{
+ VirtIODevice *vdev = VIRTIO_DEVICE(dev);
+ VHostUserSCMI *scmi = VHOST_USER_SCMI(dev);
+
+ vu_scmi_set_status(vdev, 0);
+ vhost_dev_cleanup(&scmi->vhost_dev);
+ do_vhost_user_cleanup(vdev, scmi);
+}
+
+static const VMStateDescription vu_scmi_vmstate = {
+ .name = "vhost-user-scmi",
+ .unmigratable = 1,
+};
+
+static Property vu_scmi_properties[] = {
+ DEFINE_PROP_CHR("chardev", VHostUserSCMI, chardev),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void vu_scmi_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
+
+ device_class_set_props(dc, vu_scmi_properties);
+ dc->vmsd = &vu_scmi_vmstate;
+ set_bit(DEVICE_CATEGORY_INPUT, dc->categories);
+ vdc->realize = vu_scmi_device_realize;
+ vdc->unrealize = vu_scmi_device_unrealize;
+ vdc->get_features = vu_scmi_get_features;
+ vdc->set_status = vu_scmi_set_status;
+ vdc->guest_notifier_mask = vu_scmi_guest_notifier_mask;
+ vdc->guest_notifier_pending = vu_scmi_guest_notifier_pending;
+}
+
+static const TypeInfo vu_scmi_info = {
+ .name = TYPE_VHOST_USER_SCMI,
+ .parent = TYPE_VIRTIO_DEVICE,
+ .instance_size = sizeof(VHostUserSCMI),
+ .class_init = vu_scmi_class_init,
+};
+
+static void vu_scmi_register_types(void)
+{
+ type_register_static(&vu_scmi_info);
+}
+
+type_init(vu_scmi_register_types)
return msg_reply.payload.u64 ? -EIO : 0;
}
-static bool vhost_user_one_time_request(VhostUserRequest request)
+static bool vhost_user_per_device_request(VhostUserRequest request)
{
switch (request) {
case VHOST_USER_SET_OWNER:
case VHOST_USER_SET_MEM_TABLE:
case VHOST_USER_GET_QUEUE_NUM:
case VHOST_USER_NET_SET_MTU:
+ case VHOST_USER_RESET_DEVICE:
case VHOST_USER_ADD_MEM_REG:
case VHOST_USER_REM_MEM_REG:
return true;
int ret, size = VHOST_USER_HDR_SIZE + msg->hdr.size;
/*
- * For non-vring specific requests, like VHOST_USER_SET_MEM_TABLE,
- * we just need send it once in the first time. For later such
- * request, we just ignore it.
+ * Some devices, like virtio-scsi, are implemented as a single vhost_dev,
+ * while others, like virtio-net, contain multiple vhost_devs. For
+ * operations such as configuring device memory mappings or issuing device
+ * resets, which affect the whole device instead of individual VQs,
+ * vhost-user messages should only be sent once.
+ *
+ * Devices with multiple vhost_devs are given an associated dev->vq_index
+ * so per_device requests are only sent if vq_index is 0.
*/
- if (vhost_user_one_time_request(msg->hdr.request) && dev->vq_index != 0) {
+ if (vhost_user_per_device_request(msg->hdr.request)
+ && dev->vq_index != 0) {
msg->hdr.flags &= ~VHOST_USER_NEED_REPLY_MASK;
return 0;
}
.hdr.flags = VHOST_USER_VERSION,
};
- if (vhost_user_one_time_request(request) && dev->vq_index != 0) {
+ if (vhost_user_per_device_request(request) && dev->vq_index != 0) {
return 0;
}
if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) !=
(section->offset_within_region & ~TARGET_PAGE_MASK))) {
- error_report("%s received unaligned region", __func__);
+ trace_vhost_vdpa_listener_region_add_unaligned(v, section->mr->name,
+ section->offset_within_address_space & ~TARGET_PAGE_MASK,
+ section->offset_within_region & ~TARGET_PAGE_MASK);
return;
}
if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) !=
(section->offset_within_region & ~TARGET_PAGE_MASK))) {
- error_report("%s received unaligned region", __func__);
+ trace_vhost_vdpa_listener_region_del_unaligned(v, section->mr->name,
+ section->offset_within_address_space & ~TARGET_PAGE_MASK,
+ section->offset_within_region & ~TARGET_PAGE_MASK);
return;
}
uint8_t status = 0;
ret = vhost_vdpa_call(dev, VHOST_VDPA_SET_STATUS, &status);
- trace_vhost_vdpa_reset_device(dev, status);
+ trace_vhost_vdpa_reset_device(dev);
v->suspended = false;
return ret;
}
/*
* Add the check for configure interrupt, Use VIRTIO_CONFIG_IRQ_IDX -1
- * as the Marco of configure interrupt's IDX, If this driver does not
+ * as the macro of configure interrupt's IDX, If this driver does not
* support, the function will return
*/
/*
* Add the check for configure interrupt, Use VIRTIO_CONFIG_IRQ_IDX -1
- * as the Marco of configure interrupt's IDX, If this driver does not
+ * as the macro of configure interrupt's IDX, If this driver does not
* support, the function will return
*/
Int128 end;
int iommu_idx;
IOMMUMemoryRegion *iommu_mr;
- int ret;
if (!memory_region_is_iommu(section->mr)) {
return;
iommu_idx = memory_region_iommu_attrs_to_index(iommu_mr,
MEMTXATTRS_UNSPECIFIED);
iommu_notifier_init(&iommu->n, vhost_iommu_unmap_notify,
- IOMMU_NOTIFIER_DEVIOTLB_UNMAP,
+ dev->vdev->device_iotlb_enabled ?
+ IOMMU_NOTIFIER_DEVIOTLB_UNMAP :
+ IOMMU_NOTIFIER_UNMAP,
section->offset_within_region,
int128_get64(end),
iommu_idx);
iommu->iommu_offset = section->offset_within_address_space -
section->offset_within_region;
iommu->hdev = dev;
- ret = memory_region_register_iommu_notifier(section->mr, &iommu->n, NULL);
- if (ret) {
- /*
- * Some vIOMMUs do not support dev-iotlb yet. If so, try to use the
- * UNMAP legacy message
- */
- iommu->n.notifier_flags = IOMMU_NOTIFIER_UNMAP;
- memory_region_register_iommu_notifier(section->mr, &iommu->n,
- &error_fatal);
- }
+ memory_region_register_iommu_notifier(section->mr, &iommu->n,
+ &error_fatal);
QLIST_INSERT_HEAD(&dev->iommu_list, iommu, iommu_next);
/* TODO: can replay help performance here? */
}
}
}
+void vhost_toggle_device_iotlb(VirtIODevice *vdev)
+{
+ VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
+ struct vhost_dev *dev;
+ struct vhost_iommu *iommu;
+
+ if (vdev->vhost_started) {
+ dev = vdc->get_vhost(vdev);
+ } else {
+ return;
+ }
+
+ QLIST_FOREACH(iommu, &dev->iommu_list, iommu_next) {
+ memory_region_unregister_iommu_notifier(iommu->mr, &iommu->n);
+ iommu->n.notifier_flags = vdev->device_iotlb_enabled ?
+ IOMMU_NOTIFIER_DEVIOTLB_UNMAP : IOMMU_NOTIFIER_UNMAP;
+ memory_region_register_iommu_notifier(iommu->mr, &iommu->n,
+ &error_fatal);
+ }
+}
+
static int vhost_virtqueue_set_addr(struct vhost_dev *dev,
struct vhost_virtqueue *vq,
unsigned idx, bool enable_log)
/*
* Add the check for configure interrupt, Use VIRTIO_CONFIG_IRQ_IDX -1
- * as the Marco of configure interrupt's IDX, If this driver does not
+ * as the macro of configure interrupt's IDX, If this driver does not
* support, the function will return
*/
/*
* Add the check for configure interrupt, Use VIRTIO_CONFIG_IRQ_IDX -1
- * as the Marco of configure interrupt's IDX, If this driver does not
+ * as the macro of configure interrupt's IDX, If this driver does not
* support, the function will return
*/
#include "hw/virtio/virtio.h"
#include "sysemu/kvm.h"
#include "sysemu/reset.h"
+#include "sysemu/sysemu.h"
#include "qapi/error.h"
#include "qemu/error-report.h"
#include "trace.h"
new_mask);
if ((cur_mask & new_mask) == 0) {
- error_setg(errp, "virtio-iommu page mask 0x%"PRIx64
- " is incompatible with mask 0x%"PRIx64, cur_mask, new_mask);
+ error_setg(errp, "virtio-iommu %s reports a page size mask 0x%"PRIx64
+ " incompatible with currently supported mask 0x%"PRIx64,
+ mr->parent_obj.name, new_mask, cur_mask);
return -1;
}
/*
- * After the machine is finalized, we can't change the mask anymore. If by
+ * Once the granule is frozen we can't change the mask anymore. If by
* chance the hotplugged device supports the same granule, we can still
- * accept it. Having a different masks is possible but the guest will use
- * sub-optimal block sizes, so warn about it.
+ * accept it.
*/
- if (phase_check(PHASE_MACHINE_READY)) {
- int new_granule = ctz64(new_mask);
+ if (s->granule_frozen) {
int cur_granule = ctz64(cur_mask);
- if (new_granule != cur_granule) {
- error_setg(errp, "virtio-iommu page mask 0x%"PRIx64
- " is incompatible with mask 0x%"PRIx64, cur_mask,
- new_mask);
+ if (!(BIT(cur_granule) & new_mask)) {
+ error_setg(errp, "virtio-iommu %s does not support frozen granule 0x%llx",
+ mr->parent_obj.name, BIT_ULL(cur_granule));
return -1;
- } else if (new_mask != cur_mask) {
- warn_report("virtio-iommu page mask 0x%"PRIx64
- " does not match 0x%"PRIx64, cur_mask, new_mask);
}
return 0;
}
}
+static void virtio_iommu_freeze_granule(Notifier *notifier, void *data)
+{
+ VirtIOIOMMU *s = container_of(notifier, VirtIOIOMMU, machine_done);
+ int granule;
+
+ if (likely(s->config.bypass)) {
+ /*
+ * Transient IOMMU MR enable to collect page_size_mask requirements
+ * through memory_region_iommu_set_page_size_mask() called by
+ * VFIO region_add() callback
+ */
+ s->config.bypass = false;
+ virtio_iommu_switch_address_space_all(s);
+ /* restore default */
+ s->config.bypass = true;
+ virtio_iommu_switch_address_space_all(s);
+ }
+ s->granule_frozen = true;
+ granule = ctz64(s->config.page_size_mask);
+ trace_virtio_iommu_freeze_granule(BIT(granule));
+}
+
static void virtio_iommu_device_realize(DeviceState *dev, Error **errp)
{
VirtIODevice *vdev = VIRTIO_DEVICE(dev);
error_setg(errp, "VIRTIO-IOMMU is not attached to any PCI bus!");
}
+ s->machine_done.notify = virtio_iommu_freeze_granule;
+ qemu_add_machine_init_done_notifier(&s->machine_done);
+
qemu_register_reset(virtio_iommu_system_reset, s);
}
VirtIOIOMMU *s = VIRTIO_IOMMU(dev);
qemu_unregister_reset(virtio_iommu_system_reset, s);
+ qemu_remove_machine_init_done_notifier(&s->machine_done);
g_hash_table_destroy(s->as_by_busptr);
if (s->domains) {
--- /dev/null
+/*
+ * Abstract virtio based memory device
+ *
+ * Copyright (C) 2023 Red Hat, Inc.
+ *
+ * Authors:
+ * David Hildenbrand <david@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2.
+ * See the COPYING file in the top-level directory.
+ */
+
+#include "qemu/osdep.h"
+#include "hw/virtio/virtio-md-pci.h"
+#include "hw/mem/memory-device.h"
+#include "qapi/error.h"
+#include "qemu/error-report.h"
+
+void virtio_md_pci_pre_plug(VirtIOMDPCI *vmd, MachineState *ms, Error **errp)
+{
+ DeviceState *dev = DEVICE(vmd);
+ HotplugHandler *bus_handler = qdev_get_bus_hotplug_handler(dev);
+ MemoryDeviceState *md = MEMORY_DEVICE(vmd);
+ Error *local_err = NULL;
+
+ if (!bus_handler && dev->hotplugged) {
+ /*
+ * Without a bus hotplug handler, we cannot control the plug/unplug
+ * order. We should never reach this point when hotplugging on x86,
+ * however, better add a safety net.
+ */
+ error_setg(errp, "hotplug of virtio based memory devices not supported"
+ " on this bus.");
+ return;
+ }
+ /*
+ * First, see if we can plug this memory device at all. If that
+ * succeeds, branch of to the actual hotplug handler.
+ */
+ memory_device_pre_plug(md, ms, NULL, &local_err);
+ if (!local_err && bus_handler) {
+ hotplug_handler_pre_plug(bus_handler, dev, &local_err);
+ }
+ error_propagate(errp, local_err);
+}
+
+void virtio_md_pci_plug(VirtIOMDPCI *vmd, MachineState *ms, Error **errp)
+{
+ DeviceState *dev = DEVICE(vmd);
+ HotplugHandler *bus_handler = qdev_get_bus_hotplug_handler(dev);
+ MemoryDeviceState *md = MEMORY_DEVICE(vmd);
+ Error *local_err = NULL;
+
+ /*
+ * Plug the memory device first and then branch off to the actual
+ * hotplug handler. If that one fails, we can easily undo the memory
+ * device bits.
+ */
+ memory_device_plug(md, ms);
+ if (bus_handler) {
+ hotplug_handler_plug(bus_handler, dev, &local_err);
+ if (local_err) {
+ memory_device_unplug(md, ms);
+ }
+ }
+ error_propagate(errp, local_err);
+}
+
+void virtio_md_pci_unplug_request(VirtIOMDPCI *vmd, MachineState *ms,
+ Error **errp)
+{
+ VirtIOMDPCIClass *vmdc = VIRTIO_MD_PCI_GET_CLASS(vmd);
+ DeviceState *dev = DEVICE(vmd);
+ HotplugHandler *bus_handler = qdev_get_bus_hotplug_handler(dev);
+ HotplugHandlerClass *hdc;
+ Error *local_err = NULL;
+
+ if (!vmdc->unplug_request_check) {
+ error_setg(errp, "this virtio based memory devices cannot be unplugged");
+ return;
+ }
+
+ if (!bus_handler) {
+ error_setg(errp, "hotunplug of virtio based memory devices not"
+ "supported on this bus");
+ return;
+ }
+
+ vmdc->unplug_request_check(vmd, &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ return;
+ }
+
+ /*
+ * Forward the async request or turn it into a sync request (handling it
+ * like qdev_unplug()).
+ */
+ hdc = HOTPLUG_HANDLER_GET_CLASS(bus_handler);
+ if (hdc->unplug_request) {
+ hotplug_handler_unplug_request(bus_handler, dev, &local_err);
+ } else {
+ virtio_md_pci_unplug(vmd, ms, &local_err);
+ if (!local_err) {
+ object_unparent(OBJECT(dev));
+ }
+ }
+}
+
+void virtio_md_pci_unplug(VirtIOMDPCI *vmd, MachineState *ms, Error **errp)
+{
+ DeviceState *dev = DEVICE(vmd);
+ HotplugHandler *bus_handler = qdev_get_bus_hotplug_handler(dev);
+ MemoryDeviceState *md = MEMORY_DEVICE(vmd);
+ Error *local_err = NULL;
+
+ /* Unplug the memory device while it is still realized. */
+ memory_device_unplug(md, ms);
+
+ if (bus_handler) {
+ hotplug_handler_unplug(bus_handler, dev, &local_err);
+ if (local_err) {
+ /* Not expected to fail ... but still try to recover. */
+ memory_device_plug(md, ms);
+ error_propagate(errp, local_err);
+ return;
+ }
+ } else {
+ /* Very unexpected, but let's just try to do the right thing. */
+ warn_report("Unexpected unplug of virtio based memory device");
+ qdev_unrealize(dev);
+ }
+}
+
+static const TypeInfo virtio_md_pci_info = {
+ .name = TYPE_VIRTIO_MD_PCI,
+ .parent = TYPE_VIRTIO_PCI,
+ .instance_size = sizeof(VirtIOMDPCI),
+ .class_size = sizeof(VirtIOMDPCIClass),
+ .abstract = true,
+ .interfaces = (InterfaceInfo[]) {
+ { TYPE_MEMORY_DEVICE },
+ { }
+ },
+};
+
+static void virtio_md_pci_register(void)
+{
+ type_register_static(&virtio_md_pci_info);
+}
+type_init(virtio_md_pci_register)
g_free(qom_path);
}
+static void virtio_mem_pci_unplug_request_check(VirtIOMDPCI *vmd, Error **errp)
+{
+ VirtIOMEMPCI *pci_mem = VIRTIO_MEM_PCI(vmd);
+ VirtIOMEM *vmem = &pci_mem->vdev;
+ VirtIOMEMClass *vpc = VIRTIO_MEM_GET_CLASS(vmem);
+
+ vpc->unplug_request_check(vmem, errp);
+}
+
+static void virtio_mem_pci_get_requested_size(Object *obj, Visitor *v,
+ const char *name, void *opaque,
+ Error **errp)
+{
+ VirtIOMEMPCI *pci_mem = VIRTIO_MEM_PCI(obj);
+
+ object_property_get(OBJECT(&pci_mem->vdev), name, v, errp);
+}
+
+static void virtio_mem_pci_set_requested_size(Object *obj, Visitor *v,
+ const char *name, void *opaque,
+ Error **errp)
+{
+ VirtIOMEMPCI *pci_mem = VIRTIO_MEM_PCI(obj);
+ DeviceState *dev = DEVICE(obj);
+
+ /*
+ * If we passed virtio_mem_pci_unplug_request_check(), making sure that
+ * the requested size is 0, don't allow modifying the requested size
+ * anymore, otherwise the VM might end up hotplugging memory before
+ * handling the unplug request.
+ */
+ if (dev->pending_deleted_event) {
+ error_setg(errp, "'%s' cannot be changed if the device is in the"
+ " process of unplug", name);
+ return;
+ }
+
+ object_property_set(OBJECT(&pci_mem->vdev), name, v, errp);
+}
+
static void virtio_mem_pci_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
MemoryDeviceClass *mdc = MEMORY_DEVICE_CLASS(klass);
+ VirtIOMDPCIClass *vmdc = VIRTIO_MD_PCI_CLASS(klass);
k->realize = virtio_mem_pci_realize;
set_bit(DEVICE_CATEGORY_MISC, dc->categories);
mdc->get_memory_region = virtio_mem_pci_get_memory_region;
mdc->fill_device_info = virtio_mem_pci_fill_device_info;
mdc->get_min_alignment = virtio_mem_pci_get_min_alignment;
+
+ vmdc->unplug_request_check = virtio_mem_pci_unplug_request_check;
}
static void virtio_mem_pci_instance_init(Object *obj)
OBJECT(&dev->vdev), VIRTIO_MEM_BLOCK_SIZE_PROP);
object_property_add_alias(obj, VIRTIO_MEM_SIZE_PROP, OBJECT(&dev->vdev),
VIRTIO_MEM_SIZE_PROP);
- object_property_add_alias(obj, VIRTIO_MEM_REQUESTED_SIZE_PROP,
- OBJECT(&dev->vdev),
- VIRTIO_MEM_REQUESTED_SIZE_PROP);
+ object_property_add(obj, VIRTIO_MEM_REQUESTED_SIZE_PROP, "size",
+ virtio_mem_pci_get_requested_size,
+ virtio_mem_pci_set_requested_size, NULL, NULL);
}
static const VirtioPCIDeviceTypeInfo virtio_mem_pci_info = {
.base_name = TYPE_VIRTIO_MEM_PCI,
+ .parent = TYPE_VIRTIO_MD_PCI,
.generic_name = "virtio-mem-pci",
.instance_size = sizeof(VirtIOMEMPCI),
.instance_init = virtio_mem_pci_instance_init,
.class_init = virtio_mem_pci_class_init,
- .interfaces = (InterfaceInfo[]) {
- { TYPE_MEMORY_DEVICE },
- { }
- },
};
static void virtio_mem_pci_register_types(void)
#ifndef QEMU_VIRTIO_MEM_PCI_H
#define QEMU_VIRTIO_MEM_PCI_H
-#include "hw/virtio/virtio-pci.h"
+#include "hw/virtio/virtio-md-pci.h"
#include "hw/virtio/virtio-mem.h"
#include "qom/object.h"
typedef struct VirtIOMEMPCI VirtIOMEMPCI;
/*
- * virtio-mem-pci: This extends VirtioPCIProxy.
+ * virtio-mem-pci: This extends VirtIOMDPCI.
*/
#define TYPE_VIRTIO_MEM_PCI "virtio-mem-pci-base"
DECLARE_INSTANCE_CHECKER(VirtIOMEMPCI, VIRTIO_MEM_PCI,
TYPE_VIRTIO_MEM_PCI)
struct VirtIOMEMPCI {
- VirtIOPCIProxy parent_obj;
+ VirtIOMDPCI parent_obj;
VirtIOMEM vdev;
Notifier size_change_notifier;
};
#include "sysemu/numa.h"
#include "sysemu/sysemu.h"
#include "sysemu/reset.h"
+#include "sysemu/runstate.h"
#include "hw/virtio/virtio.h"
#include "hw/virtio/virtio-bus.h"
#include "hw/virtio/virtio-mem.h"
{
RAMBlock *rb = vmem->memdev->mr.ram_block;
- if (virtio_mem_is_busy()) {
- return -EBUSY;
- }
-
- if (ram_block_discard_range(rb, 0, qemu_ram_get_used_length(rb))) {
- return -EBUSY;
- }
- virtio_mem_notify_unplug_all(vmem);
-
- bitmap_clear(vmem->bitmap, 0, vmem->bitmap_size);
if (vmem->size) {
+ if (virtio_mem_is_busy()) {
+ return -EBUSY;
+ }
+ if (ram_block_discard_range(rb, 0, qemu_ram_get_used_length(rb))) {
+ return -EBUSY;
+ }
+ virtio_mem_notify_unplug_all(vmem);
+
+ bitmap_clear(vmem->bitmap, 0, vmem->bitmap_size);
vmem->size = 0;
notifier_list_notify(&vmem->size_change_notifiers, &vmem->size);
}
+
trace_virtio_mem_unplugged_all();
virtio_mem_resize_usable_region(vmem, vmem->requested_size, true);
return 0;
return;
}
- ret = ram_block_discard_range(rb, 0, qemu_ram_get_used_length(rb));
- if (ret) {
- error_setg_errno(errp, -ret, "Unexpected error discarding RAM");
- ram_block_coordinated_discard_require(false);
- return;
+ /*
+ * We don't know at this point whether shared RAM is migrated using
+ * QEMU or migrated using the file content. "x-ignore-shared" will be
+ * configured after realizing the device. So in case we have an
+ * incoming migration, simply always skip the discard step.
+ *
+ * Otherwise, make sure that we start with a clean slate: either the
+ * memory backend might get reused or the shared file might still have
+ * memory allocated.
+ */
+ if (!runstate_check(RUN_STATE_INMIGRATE)) {
+ ret = ram_block_discard_range(rb, 0, qemu_ram_get_used_length(rb));
+ if (ret) {
+ error_setg_errno(errp, -ret, "Unexpected error discarding RAM");
+ ram_block_coordinated_discard_require(false);
+ return;
+ }
}
virtio_mem_resize_usable_region(vmem, vmem->requested_size, true);
RamDiscardListener *rdl;
int ret;
- if (vmem->prealloc && !vmem->early_migration) {
- warn_report("Proper preallocation with migration requires a newer QEMU machine");
- }
-
/*
* We started out with all memory discarded and our memory region is mapped
* into an address space. Replay, now that we updated the bitmap.
}
}
+ /*
+ * If shared RAM is migrated using the file content and not using QEMU,
+ * don't mess with preallocation and postcopy.
+ */
+ if (migrate_ram_is_ignored(vmem->memdev->mr.ram_block)) {
+ return 0;
+ }
+
+ if (vmem->prealloc && !vmem->early_migration) {
+ warn_report("Proper preallocation with migration requires a newer QEMU machine");
+ }
+
if (migration_in_incoming_postcopy()) {
return 0;
}
return 0;
}
+ /*
+ * If shared RAM is migrated using the file content and not using QEMU,
+ * don't mess with preallocation and postcopy.
+ */
+ if (migrate_ram_is_ignored(rb)) {
+ return 0;
+ }
+
/*
* We restored the bitmap and verified that the basic properties
* match on source and destination, so we can go ahead and preallocate
QLIST_REMOVE(rdl, next);
}
+static void virtio_mem_unplug_request_check(VirtIOMEM *vmem, Error **errp)
+{
+ if (vmem->unplugged_inaccessible == ON_OFF_AUTO_OFF) {
+ /*
+ * We could allow it with a usable region size of 0, but let's just
+ * not care about that legacy setting.
+ */
+ error_setg(errp, "virtio-mem device cannot get unplugged while"
+ " '" VIRTIO_MEM_UNPLUGGED_INACCESSIBLE_PROP "' != 'on'");
+ return;
+ }
+
+ if (vmem->size) {
+ error_setg(errp, "virtio-mem device cannot get unplugged while"
+ " '" VIRTIO_MEM_SIZE_PROP "' != '0'");
+ return;
+ }
+ if (vmem->requested_size) {
+ error_setg(errp, "virtio-mem device cannot get unplugged while"
+ " '" VIRTIO_MEM_REQUESTED_SIZE_PROP "' != '0'");
+ return;
+ }
+}
+
static void virtio_mem_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
vmc->get_memory_region = virtio_mem_get_memory_region;
vmc->add_size_change_notifier = virtio_mem_add_size_change_notifier;
vmc->remove_size_change_notifier = virtio_mem_remove_size_change_notifier;
+ vmc->unplug_request_check = virtio_mem_unplug_request_check;
rdmc->get_min_granularity = virtio_mem_rdm_get_min_granularity;
rdmc->is_populated = virtio_mem_rdm_is_populated;
static const VirtioPCIDeviceTypeInfo virtio_pmem_pci_info = {
.base_name = TYPE_VIRTIO_PMEM_PCI,
.generic_name = "virtio-pmem-pci",
+ .parent = TYPE_VIRTIO_MD_PCI,
.instance_size = sizeof(VirtIOPMEMPCI),
.instance_init = virtio_pmem_pci_instance_init,
.class_init = virtio_pmem_pci_class_init,
- .interfaces = (InterfaceInfo[]) {
- { TYPE_MEMORY_DEVICE },
- { }
- },
};
static void virtio_pmem_pci_register_types(void)
#ifndef QEMU_VIRTIO_PMEM_PCI_H
#define QEMU_VIRTIO_PMEM_PCI_H
-#include "hw/virtio/virtio-pci.h"
+#include "hw/virtio/virtio-md-pci.h"
#include "hw/virtio/virtio-pmem.h"
#include "qom/object.h"
typedef struct VirtIOPMEMPCI VirtIOPMEMPCI;
/*
- * virtio-pmem-pci: This extends VirtioPCIProxy.
+ * virtio-pmem-pci: This extends VirtIOMDPCI.
*/
#define TYPE_VIRTIO_PMEM_PCI "virtio-pmem-pci-base"
DECLARE_INSTANCE_CHECKER(VirtIOPMEMPCI, VIRTIO_PMEM_PCI,
TYPE_VIRTIO_PMEM_PCI)
struct VirtIOPMEMPCI {
- VirtIOPCIProxy parent_obj;
+ VirtIOMDPCI parent_obj;
VirtIOPMEM vdev;
};
/*
- * Copyright (C) 2016-2022 Red Hat, Inc.
+ * Copyright Red Hat
* Copyright (C) 2005 Anthony Liguori <anthony@codemonkey.ws>
*
* Network Block Device
#include "qapi/error.h"
#include "qemu/bswap.h"
+typedef struct NBDExport NBDExport;
+typedef struct NBDClient NBDClient;
+typedef struct NBDClientConnection NBDClientConnection;
+
extern const BlockExportDriver blk_exp_nbd;
/* Handshake phase structs - this struct is passed on the wire */
-struct NBDOption {
+typedef struct NBDOption {
uint64_t magic; /* NBD_OPTS_MAGIC */
uint32_t option; /* NBD_OPT_* */
uint32_t length;
-} QEMU_PACKED;
-typedef struct NBDOption NBDOption;
+} QEMU_PACKED NBDOption;
-struct NBDOptionReply {
+typedef struct NBDOptionReply {
uint64_t magic; /* NBD_REP_MAGIC */
uint32_t option; /* NBD_OPT_* */
uint32_t type; /* NBD_REP_* */
uint32_t length;
-} QEMU_PACKED;
-typedef struct NBDOptionReply NBDOptionReply;
+} QEMU_PACKED NBDOptionReply;
typedef struct NBDOptionReplyMetaContext {
NBDOptionReply h; /* h.type = NBD_REP_META_CONTEXT, h.length > 4 */
/* metadata context name follows */
} QEMU_PACKED NBDOptionReplyMetaContext;
+/* Track results of negotiation */
+typedef enum NBDMode {
+ /* Keep this list in a continuum of increasing features. */
+ NBD_MODE_OLDSTYLE, /* server lacks newstyle negotiation */
+ NBD_MODE_EXPORT_NAME, /* newstyle but only OPT_EXPORT_NAME safe */
+ NBD_MODE_SIMPLE, /* newstyle but only simple replies */
+ NBD_MODE_STRUCTURED, /* newstyle, structured replies enabled */
+ /* TODO add NBD_MODE_EXTENDED */
+} NBDMode;
+
/* Transmission phase structs
*
* Note: these are _NOT_ the same as the network representation of an NBD
* request and reply!
*/
-struct NBDRequest {
- uint64_t handle;
+typedef struct NBDRequest {
+ uint64_t cookie;
uint64_t from;
uint32_t len;
uint16_t flags; /* NBD_CMD_FLAG_* */
uint16_t type; /* NBD_CMD_* */
-};
-typedef struct NBDRequest NBDRequest;
+} NBDRequest;
typedef struct NBDSimpleReply {
uint32_t magic; /* NBD_SIMPLE_REPLY_MAGIC */
uint32_t error;
- uint64_t handle;
+ uint64_t cookie;
} QEMU_PACKED NBDSimpleReply;
/* Header of all structured replies */
uint32_t magic; /* NBD_STRUCTURED_REPLY_MAGIC */
uint16_t flags; /* combination of NBD_REPLY_FLAG_* */
uint16_t type; /* NBD_REPLY_TYPE_* */
- uint64_t handle; /* request handle */
+ uint64_t cookie; /* request handle */
uint32_t length; /* length of payload */
} QEMU_PACKED NBDStructuredReplyChunk;
NBDSimpleReply simple;
NBDStructuredReplyChunk structured;
struct {
- /* @magic and @handle fields have the same offset and size both in
+ /*
+ * @magic and @cookie fields have the same offset and size both in
* simple reply and structured reply chunk, so let them be accessible
* without ".simple." or ".structured." specification
*/
uint32_t magic;
uint32_t _skip;
- uint64_t handle;
+ uint64_t cookie;
} QEMU_PACKED;
} NBDReply;
/* Header of chunk for NBD_REPLY_TYPE_OFFSET_DATA */
typedef struct NBDStructuredReadData {
- NBDStructuredReplyChunk h; /* h.length >= 9 */
+ /* header's .length >= 9 */
uint64_t offset;
/* At least one byte of data payload follows, calculated from h.length */
} QEMU_PACKED NBDStructuredReadData;
/* Complete chunk for NBD_REPLY_TYPE_OFFSET_HOLE */
typedef struct NBDStructuredReadHole {
- NBDStructuredReplyChunk h; /* h.length == 12 */
+ /* header's length == 12 */
uint64_t offset;
uint32_t length;
} QEMU_PACKED NBDStructuredReadHole;
/* Header of all NBD_REPLY_TYPE_ERROR* errors */
typedef struct NBDStructuredError {
- NBDStructuredReplyChunk h; /* h.length >= 6 */
+ /* header's length >= 6 */
uint32_t error;
uint16_t message_length;
} QEMU_PACKED NBDStructuredError;
/* Header of NBD_REPLY_TYPE_BLOCK_STATUS */
typedef struct NBDStructuredMeta {
- NBDStructuredReplyChunk h; /* h.length >= 12 (at least one extent) */
+ /* header's length >= 12 (at least one extent) */
uint32_t context_id;
/* extents follows */
} QEMU_PACKED NBDStructuredMeta;
#define NBD_ESHUTDOWN 108
/* Details collected by NBD_OPT_EXPORT_NAME and NBD_OPT_GO */
-struct NBDExportInfo {
+typedef struct NBDExportInfo {
/* Set by client before nbd_receive_negotiate() */
bool request_sizes;
char *x_dirty_bitmap;
char *description;
int n_contexts;
char **contexts;
-};
-typedef struct NBDExportInfo NBDExportInfo;
+} NBDExportInfo;
int nbd_receive_negotiate(AioContext *aio_context, QIOChannel *ioc,
QCryptoTLSCreds *tlscreds,
int nbd_disconnect(int fd);
int nbd_errno_to_system_errno(int err);
-typedef struct NBDExport NBDExport;
-typedef struct NBDClient NBDClient;
-
void nbd_export_set_on_eject_blk(BlockExport *exp, BlockBackend *blk);
AioContext *nbd_export_aio_context(NBDExport *exp);
const char *nbd_info_lookup(uint16_t info);
const char *nbd_cmd_lookup(uint16_t info);
const char *nbd_err_lookup(int err);
+const char *nbd_mode_lookup(NBDMode mode);
/* nbd/client-connection.c */
-typedef struct NBDClientConnection NBDClientConnection;
-
void nbd_client_connection_enable_retry(NBDClientConnection *conn);
NBDClientConnection *nbd_client_connection_new(const SocketAddress *saddr,
/* Bits present in AT_HWCAP for s390. */
-#define HWCAP_S390_ESAN3 1
-#define HWCAP_S390_ZARCH 2
-#define HWCAP_S390_STFLE 4
-#define HWCAP_S390_MSA 8
-#define HWCAP_S390_LDISP 16
-#define HWCAP_S390_EIMM 32
-#define HWCAP_S390_DFP 64
-#define HWCAP_S390_HPAGE 128
-#define HWCAP_S390_ETF3EH 256
-#define HWCAP_S390_HIGH_GPRS 512
-#define HWCAP_S390_TE 1024
-#define HWCAP_S390_VXRS 2048
-#define HWCAP_S390_VXRS_BCD 4096
-#define HWCAP_S390_VXRS_EXT 8192
-#define HWCAP_S390_GS 16384
-#define HWCAP_S390_VXRS_EXT2 32768
-#define HWCAP_S390_VXRS_PDE 65536
-#define HWCAP_S390_SORT 131072
-#define HWCAP_S390_DFLT 262144
+#define HWCAP_S390_NR_ESAN3 0
+#define HWCAP_S390_NR_ZARCH 1
+#define HWCAP_S390_NR_STFLE 2
+#define HWCAP_S390_NR_MSA 3
+#define HWCAP_S390_NR_LDISP 4
+#define HWCAP_S390_NR_EIMM 5
+#define HWCAP_S390_NR_DFP 6
+#define HWCAP_S390_NR_HPAGE 7
+#define HWCAP_S390_NR_ETF3EH 8
+#define HWCAP_S390_NR_HIGH_GPRS 9
+#define HWCAP_S390_NR_TE 10
+#define HWCAP_S390_NR_VXRS 11
+#define HWCAP_S390_NR_VXRS_BCD 12
+#define HWCAP_S390_NR_VXRS_EXT 13
+#define HWCAP_S390_NR_GS 14
+#define HWCAP_S390_NR_VXRS_EXT2 15
+#define HWCAP_S390_NR_VXRS_PDE 16
+#define HWCAP_S390_NR_SORT 17
+#define HWCAP_S390_NR_DFLT 18
+#define HWCAP_S390_NR_VXRS_PDE2 19
+#define HWCAP_S390_NR_NNPA 20
+#define HWCAP_S390_NR_PCI_MIO 21
+#define HWCAP_S390_NR_SIE 22
+
+#define HWCAP_S390_ESAN3 (1 << HWCAP_S390_NR_ESAN3)
+#define HWCAP_S390_ZARCH (1 << HWCAP_S390_NR_ZARCH)
+#define HWCAP_S390_STFLE (1 << HWCAP_S390_NR_STFLE)
+#define HWCAP_S390_MSA (1 << HWCAP_S390_NR_MSA)
+#define HWCAP_S390_LDISP (1 << HWCAP_S390_NR_LDISP)
+#define HWCAP_S390_EIMM (1 << HWCAP_S390_NR_EIMM)
+#define HWCAP_S390_DFP (1 << HWCAP_S390_NR_DFP)
+#define HWCAP_S390_HPAGE (1 << HWCAP_S390_NR_HPAGE)
+#define HWCAP_S390_ETF3EH (1 << HWCAP_S390_NR_ETF3EH)
+#define HWCAP_S390_HIGH_GPRS (1 << HWCAP_S390_NR_HIGH_GPRS)
+#define HWCAP_S390_TE (1 << HWCAP_S390_NR_TE)
+#define HWCAP_S390_VXRS (1 << HWCAP_S390_NR_VXRS)
+#define HWCAP_S390_VXRS_BCD (1 << HWCAP_S390_NR_VXRS_BCD)
+#define HWCAP_S390_VXRS_EXT (1 << HWCAP_S390_NR_VXRS_EXT)
+#define HWCAP_S390_GS (1 << HWCAP_S390_NR_GS)
+#define HWCAP_S390_VXRS_EXT2 (1 << HWCAP_S390_NR_VXRS_EXT2)
+#define HWCAP_S390_VXRS_PDE (1 << HWCAP_S390_NR_VXRS_PDE)
+#define HWCAP_S390_SORT (1 << HWCAP_S390_NR_SORT)
+#define HWCAP_S390_DFLT (1 << HWCAP_S390_NR_DFLT)
+#define HWCAP_S390_VXRS_PDE2 (1 << HWCAP_S390_NR_VXRS_PDE2)
+#define HWCAP_S390_NNPA (1 << HWCAP_S390_NR_NNPA)
+#define HWCAP_S390_PCI_MIO (1 << HWCAP_S390_NR_PCI_MIO)
+#define HWCAP_S390_SIE (1 << HWCAP_S390_NR_SIE)
/* M68K specific definitions. */
/* We use the top 24 bits to encode information about the
int page_get_flags(target_ulong address);
void page_set_flags(target_ulong start, target_ulong last, int flags);
void page_reset_target_data(target_ulong start, target_ulong last);
-int page_check_range(target_ulong start, target_ulong len, int flags);
+
+/**
+ * page_check_range
+ * @start: first byte of range
+ * @len: length of range
+ * @flags: flags required for each page
+ *
+ * Return true if every page in [@start, @start+@len) has @flags set.
+ * Return false if any page is unmapped. Thus testing flags == 0 is
+ * equivalent to testing for flags == PAGE_VALID.
+ */
+bool page_check_range(target_ulong start, target_ulong last, int flags);
+
+/**
+ * page_check_range_empty:
+ * @start: first byte of range
+ * @last: last byte of range
+ * Context: holding mmap lock
+ *
+ * Return true if the entire range [@start, @last] is unmapped.
+ * The memory lock must be held so that the caller will can ensure
+ * the result stays true until a new mapping can be installed.
+ */
+bool page_check_range_empty(target_ulong start, target_ulong last);
+
+/**
+ * page_find_range_empty
+ * @min: first byte of search range
+ * @max: last byte of search range
+ * @len: size of the hole required
+ * @align: alignment of the hole required (power of 2)
+ *
+ * If there is a range [x, x+@len) within [@min, @max] such that
+ * x % @align == 0, then return x. Otherwise return -1.
+ * The memory lock must be held, as the caller will want to ensure
+ * the returned range stays empty until a new mapping can be installed.
+ */
+target_ulong page_find_range_empty(target_ulong min, target_ulong max,
+ target_ulong len, target_ulong align);
/**
* page_get_target_data(address)
void TSA_NO_TSA mmap_unlock(void);
bool have_mmap_lock(void);
+static inline void mmap_unlock_guard(void *unused)
+{
+ mmap_unlock();
+}
+
+#define WITH_MMAP_LOCK_GUARD() \
+ for (int _mmap_lock_iter __attribute__((cleanup(mmap_unlock_guard))) \
+ = (mmap_lock(), 0); _mmap_lock_iter == 0; _mmap_lock_iter = 1)
+
/**
* adjust_signal_pc:
* @pc: raw pc from the host signal ucontext_t.
#else
static inline void mmap_lock(void) {}
static inline void mmap_unlock(void) {}
+#define WITH_MMAP_LOCK_GUARD()
void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length);
void tlb_set_dirty(CPUState *cpu, vaddr addr);
#ifndef HELPER_PROTO_COMMON_H
#define HELPER_PROTO_COMMON_H
+#include "qemu/atomic128.h" /* for HAVE_CMPXCHG128 */
+
#define HELPER_H "accel/tcg/tcg-runtime.h"
#include "exec/helper-proto.h.inc"
#undef HELPER_H
#define ABI_LLONG_ALIGNMENT 2
#endif
-#if (defined(TARGET_I386) && !defined(TARGET_X86_64)) || defined(TARGET_SH4)
+#if (defined(TARGET_I386) && !defined(TARGET_X86_64)) \
+ || defined(TARGET_SH4) \
+ || defined(TARGET_MICROBLAZE) \
+ || defined(TARGET_NIOS2)
#define ABI_LLONG_ALIGNMENT 4
#endif
Error **errp);
void machine_parse_smp_config(MachineState *ms,
const SMPConfiguration *config, Error **errp);
+unsigned int machine_topo_get_cores_per_socket(const MachineState *ms);
+unsigned int machine_topo_get_threads_per_socket(const MachineState *ms);
+void machine_memory_devices_init(MachineState *ms, hwaddr base, uint64_t size);
/**
* machine_class_allow_dynamic_sysbus_dev: Add type to list of valid devices
* address space for memory devices starts
* @mr: address space container for memory devices
* @dimm_size: the sum of plugged DIMMs' sizes
+ * @used_region_size: the part of @mr already used by memory devices
*/
typedef struct DeviceMemoryState {
hwaddr base;
MemoryRegion mr;
uint64_t dimm_size;
+ uint64_t used_region_size;
} DeviceMemoryState;
/**
#define PC_MACHINE_ACPI_DEVICE_PROP "acpi-device"
#define PC_MACHINE_MAX_RAM_BELOW_4G "max-ram-below-4g"
-#define PC_MACHINE_DEVMEM_REGION_SIZE "device-memory-region-size"
#define PC_MACHINE_VMPORT "vmport"
#define PC_MACHINE_SMBUS "smbus"
#define PC_MACHINE_SATA "sata"
void pc_guest_info_init(PCMachineState *pcms);
+#define PCI_HOST_PROP_RAM_MEM "ram-mem"
+#define PCI_HOST_PROP_PCI_MEM "pci-mem"
+#define PCI_HOST_PROP_SYSTEM_MEM "system-mem"
+#define PCI_HOST_PROP_IO_MEM "io-mem"
#define PCI_HOST_PROP_PCI_HOLE_START "pci-hole-start"
#define PCI_HOST_PROP_PCI_HOLE_END "pci-hole-end"
#define PCI_HOST_PROP_PCI_HOLE64_START "pci-hole64-start"
void bmdma_init(IDEBus *bus, BMDMAState *bm, PCIIDEState *d);
void bmdma_cmd_writeb(BMDMAState *bm, uint32_t val);
+void bmdma_status_writeb(BMDMAState *bm, uint32_t val);
extern MemoryRegionOps bmdma_addr_ioport_ops;
void pci_ide_create_devs(PCIDevice *dev);
uint32_t ac97_cmd;
};
-void via_isa_set_irq(PCIDevice *d, int n, int level);
-
#endif
--- /dev/null
+/*
+ * SiFive HiFive1 AON (Always On Domain) interface.
+ *
+ * Copyright (c) 2022 SiFive, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HW_SIFIVE_AON_H
+#define HW_SIFIVE_AON_H
+
+#include "hw/sysbus.h"
+#include "qom/object.h"
+
+#define TYPE_SIFIVE_E_AON "riscv.sifive.e.aon"
+OBJECT_DECLARE_SIMPLE_TYPE(SiFiveEAONState, SIFIVE_E_AON)
+
+#define SIFIVE_E_AON_WDOGKEY (0x51F15E)
+#define SIFIVE_E_AON_WDOGFEED (0xD09F00D)
+#define SIFIVE_E_LFCLK_DEFAULT_FREQ (32768)
+
+enum {
+ SIFIVE_E_AON_WDT = 0x0,
+ SIFIVE_E_AON_RTC = 0x40,
+ SIFIVE_E_AON_LFROSC = 0x70,
+ SIFIVE_E_AON_BACKUP = 0x80,
+ SIFIVE_E_AON_PMU = 0x100,
+ SIFIVE_E_AON_MAX = 0x150
+};
+
+struct SiFiveEAONState {
+ /*< private >*/
+ SysBusDevice parent_obj;
+
+ /*< public >*/
+ MemoryRegion mmio;
+
+ /*< watchdog timer >*/
+ QEMUTimer *wdog_timer;
+ qemu_irq wdog_irq;
+ uint64_t wdog_restart_time;
+ uint64_t wdogclk_freq;
+
+ uint32_t wdogcfg;
+ uint16_t wdogcmp0;
+ uint32_t wdogcount;
+ uint8_t wdogunlock;
+};
+
+#endif
#include "hw/pci-host/pam.h"
#include "qom/object.h"
+#define I440FX_HOST_PROP_PCI_TYPE "pci-type"
+
#define TYPE_I440FX_PCI_HOST_BRIDGE "i440FX-pcihost"
#define TYPE_I440FX_PCI_DEVICE "i440FX"
PCIDevice parent_obj;
/*< public >*/
- MemoryRegion *system_memory;
- MemoryRegion *pci_address_space;
- MemoryRegion *ram_memory;
PAMMemoryRegion pam_regions[PAM_REGIONS_COUNT];
MemoryRegion smram_region;
MemoryRegion smram, low_smram;
#define TYPE_IGD_PASSTHROUGH_I440FX_PCI_DEVICE "igd-passthrough-i440FX"
-PCIBus *i440fx_init(const char *pci_type,
- DeviceState *dev,
- MemoryRegion *address_space_mem,
- MemoryRegion *address_space_io,
- ram_addr_t ram_size,
- ram_addr_t below_4g_mem_size,
- ram_addr_t above_4g_mem_size,
- MemoryRegion *pci_memory,
- MemoryRegion *ram_memory);
-
-
#endif
* gmch part
*/
-#define MCH_HOST_PROP_RAM_MEM "ram-mem"
-#define MCH_HOST_PROP_PCI_MEM "pci-mem"
-#define MCH_HOST_PROP_SYSTEM_MEM "system-mem"
-#define MCH_HOST_PROP_IO_MEM "io-mem"
-
/* PCI configuration */
#define MCH_HOST_BRIDGE "MCH"
QEMU_PCIE_CAP_CXL = (1 << QEMU_PCIE_CXL_BITNR),
#define QEMU_PCIE_ERR_UNC_MASK_BITNR 11
QEMU_PCIE_ERR_UNC_MASK = (1 << QEMU_PCIE_ERR_UNC_MASK_BITNR),
+#define QEMU_PCIE_ARI_NEXTFN_1_BITNR 12
+ QEMU_PCIE_ARI_NEXTFN_1 = (1 << QEMU_PCIE_ARI_NEXTFN_1_BITNR),
};
typedef struct PCIINTxRoute {
pci_set_quad(config, (~mask & val) | (mask & rval));
}
-PCIDevice *pci_new_multifunction(int devfn, bool multifunction,
- const char *name);
+PCIDevice *pci_new_multifunction(int devfn, const char *name);
PCIDevice *pci_new(int devfn, const char *name);
bool pci_realize_and_unref(PCIDevice *dev, PCIBus *bus, Error **errp);
PCIDevice *pci_create_simple_multifunction(PCIBus *bus, int devfn,
- bool multifunction,
const char *name);
PCIDevice *pci_create_simple(PCIBus *bus, int devfn, const char *name);
#include "hw/sysbus.h"
#include "qom/object.h"
+#define PCI_HOST_BYPASS_IOMMU "bypass-iommu"
+
#define TYPE_PCI_HOST_BRIDGE "pci-host-bridge"
OBJECT_DECLARE_TYPE(PCIHostState, PCIHostBridgeClass, PCI_HOST_BRIDGE)
int pcie_endpoint_cap_v1_init(PCIDevice *dev, uint8_t offset);
void pcie_cap_v1_exit(PCIDevice *dev);
uint8_t pcie_cap_get_type(const PCIDevice *dev);
+uint8_t pcie_cap_get_version(const PCIDevice *dev);
void pcie_cap_flags_set_vector(PCIDevice *dev, uint8_t vector);
uint8_t pcie_cap_flags_get_vector(PCIDevice *dev);
void pcie_acs_init(PCIDevice *dev, uint16_t offset);
void pcie_acs_reset(PCIDevice *dev);
-void pcie_ari_init(PCIDevice *dev, uint16_t offset, uint16_t nextfn);
+void pcie_ari_init(PCIDevice *dev, uint16_t offset);
void pcie_dev_ser_num_init(PCIDevice *dev, uint16_t offset, uint64_t ser_num);
void pcie_ats_init(PCIDevice *dev, uint16_t offset, bool aligned);
char *qdev_get_fw_dev_path(DeviceState *dev);
char *qdev_get_own_fw_dev_path_from_handler(BusState *bus, DeviceState *dev);
+/**
+ * device_class_set_props(): add a set of properties to an device
+ * @dc: the parent DeviceClass all devices inherit
+ * @props: an array of properties, terminate by DEFINE_PROP_END_OF_LIST()
+ *
+ * This will add a set of properties to the object. It will fault if
+ * you attempt to add an existing property defined by a parent class.
+ * To modify an inherited property you need to use????
+ */
void device_class_set_props(DeviceClass *dc, Property *props);
/**
void device_class_set_parent_reset(DeviceClass *dc,
DeviceReset dev_reset,
DeviceReset *parent_reset);
+
+/**
+ * device_class_set_parent_realize() - set up for chaining realize fns
+ * @dc: The device class
+ * @dev_realize: the device realize function
+ * @parent_realize: somewhere to save the parents realize function
+ *
+ * This is intended to be used when the new realize function will
+ * eventually call its parent realization function during creation.
+ * This requires storing the function call somewhere (usually in the
+ * instance structure) so you can eventually call
+ * dc->parent_realize(dev, errp)
+ */
void device_class_set_parent_realize(DeviceClass *dc,
DeviceRealize dev_realize,
DeviceRealize *parent_realize);
+
+
+/**
+ * device_class_set_parent_unrealize() - set up for chaining unrealize fns
+ * @dc: The device class
+ * @dev_unrealize: the device realize function
+ * @parent_unrealize: somewhere to save the parents unrealize function
+ *
+ * This is intended to be used when the new unrealize function will
+ * eventually call its parent unrealization function during the
+ * unrealize phase. This requires storing the function call somewhere
+ * (usually in the instance structure) so you can eventually call
+ * dc->parent_unrealize(dev);
+ */
void device_class_set_parent_unrealize(DeviceClass *dc,
DeviceUnrealize dev_unrealize,
DeviceUnrealize *parent_unrealize);
#include "hw/riscv/riscv_hart.h"
#include "hw/riscv/sifive_cpu.h"
#include "hw/gpio/sifive_gpio.h"
+#include "hw/misc/sifive_e_aon.h"
#include "hw/boards.h"
#define TYPE_RISCV_E_SOC "riscv.sifive.e.soc"
/*< public >*/
RISCVHartArrayState cpus;
DeviceState *plic;
+ SiFiveEAONState aon;
SIFIVEGPIOState gpio;
MemoryRegion xip_mem;
MemoryRegion mask_rom;
};
enum {
- SIFIVE_E_UART0_IRQ = 3,
- SIFIVE_E_UART1_IRQ = 4,
- SIFIVE_E_GPIO0_IRQ0 = 8
+ SIFIVE_E_AON_WDT_IRQ = 1,
+ SIFIVE_E_UART0_IRQ = 3,
+ SIFIVE_E_UART1_IRQ = 4,
+ SIFIVE_E_GPIO0_IRQ0 = 8
};
#define SIFIVE_E_PLIC_HART_CONFIG "M"
+++ /dev/null
-/*
- * Protected Virtualization header
- *
- * Copyright IBM Corp. 2020
- * Author(s):
- * Janosch Frank <frankja@linux.ibm.com>
- *
- * This work is licensed under the terms of the GNU GPL, version 2 or (at
- * your option) any later version. See the COPYING file in the top-level
- * directory.
- */
-#ifndef HW_S390_PV_H
-#define HW_S390_PV_H
-
-#include "qapi/error.h"
-#include "sysemu/kvm.h"
-#include "hw/s390x/s390-virtio-ccw.h"
-
-#ifdef CONFIG_KVM
-#include "cpu.h"
-
-static inline bool s390_is_pv(void)
-{
- static S390CcwMachineState *ccw;
- Object *obj;
-
- if (ccw) {
- return ccw->pv;
- }
-
- /* we have to bail out for the "none" machine */
- obj = object_dynamic_cast(qdev_get_machine(),
- TYPE_S390_CCW_MACHINE);
- if (!obj) {
- return false;
- }
- ccw = S390_CCW_MACHINE(obj);
- return ccw->pv;
-}
-
-int s390_pv_query_info(void);
-int s390_pv_vm_enable(void);
-void s390_pv_vm_disable(void);
-bool s390_pv_vm_try_disable_async(S390CcwMachineState *ms);
-int s390_pv_set_sec_parms(uint64_t origin, uint64_t length);
-int s390_pv_unpack(uint64_t addr, uint64_t size, uint64_t tweak);
-void s390_pv_prep_reset(void);
-int s390_pv_verify(void);
-void s390_pv_unshare(void);
-void s390_pv_inject_reset_error(CPUState *cs);
-uint64_t kvm_s390_pv_dmp_get_size_cpu(void);
-uint64_t kvm_s390_pv_dmp_get_size_mem_state(void);
-uint64_t kvm_s390_pv_dmp_get_size_completion_data(void);
-bool kvm_s390_pv_info_basic_valid(void);
-int kvm_s390_dump_init(void);
-int kvm_s390_dump_cpu(S390CPU *cpu, void *buff);
-int kvm_s390_dump_mem_state(uint64_t addr, size_t len, void *dest);
-int kvm_s390_dump_completion_data(void *buff);
-#else /* CONFIG_KVM */
-static inline bool s390_is_pv(void) { return false; }
-static inline int s390_pv_query_info(void) { return 0; }
-static inline int s390_pv_vm_enable(void) { return 0; }
-static inline void s390_pv_vm_disable(void) {}
-static inline bool s390_pv_vm_try_disable_async(S390CcwMachineState *ms) { return false; }
-static inline int s390_pv_set_sec_parms(uint64_t origin, uint64_t length) { return 0; }
-static inline int s390_pv_unpack(uint64_t addr, uint64_t size, uint64_t tweak) { return 0; }
-static inline void s390_pv_prep_reset(void) {}
-static inline int s390_pv_verify(void) { return 0; }
-static inline void s390_pv_unshare(void) {}
-static inline void s390_pv_inject_reset_error(CPUState *cs) {};
-static inline uint64_t kvm_s390_pv_dmp_get_size_cpu(void) { return 0; }
-static inline uint64_t kvm_s390_pv_dmp_get_size_mem_state(void) { return 0; }
-static inline uint64_t kvm_s390_pv_dmp_get_size_completion_data(void) { return 0; }
-static inline bool kvm_s390_pv_info_basic_valid(void) { return false; }
-static inline int kvm_s390_dump_init(void) { return 0; }
-static inline int kvm_s390_dump_cpu(S390CPU *cpu, void *buff) { return 0; }
-static inline int kvm_s390_dump_mem_state(uint64_t addr, size_t len,
- void *dest) { return 0; }
-static inline int kvm_s390_dump_completion_data(void *buff) { return 0; }
-#endif /* CONFIG_KVM */
-
-int s390_pv_kvm_init(ConfidentialGuestSupport *cgs, Error **errp);
-static inline int s390_pv_init(ConfidentialGuestSupport *cgs, Error **errp)
-{
- if (!cgs) {
- return 0;
- }
- if (kvm_enabled()) {
- return s390_pv_kvm_init(cgs, errp);
- }
-
- error_setg(errp, "Protected Virtualization requires KVM");
- return -1;
-}
-
-#endif /* HW_S390_PV_H */
/* scsi-bus.c */
struct SCSIReqOps {
size_t size;
+ void (*init_req)(SCSIRequest *req);
void (*free_req)(SCSIRequest *req);
int32_t (*send_command)(SCSIRequest *req, uint8_t *buf);
void (*read_data)(SCSIRequest *req);
bool vfio_mig_active(void);
int vfio_block_multiple_devices_migration(VFIODevice *vbasedev, Error **errp);
void vfio_unblock_multiple_devices_migration(void);
-int vfio_block_giommu_migration(VFIODevice *vbasedev, Error **errp);
+bool vfio_viommu_preset(VFIODevice *vbasedev);
int64_t vfio_mig_bytes_transferred(void);
void vfio_reset_bytes_transferred(void);
int vfio_spapr_remove_window(VFIOContainer *container,
hwaddr offset_within_address_space);
-int vfio_migration_realize(VFIODevice *vbasedev, Error **errp);
+bool vfio_migration_realize(VFIODevice *vbasedev, Error **errp);
void vfio_migration_exit(VFIODevice *vbasedev);
-void vfio_migration_finalize(void);
#endif /* HW_VFIO_VFIO_COMMON_H */
--- /dev/null
+/*
+ * Vhost-user SCMI virtio device
+ *
+ * Copyright (c) 2023 Red Hat, Inc.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef _QEMU_VHOST_USER_SCMI_H
+#define _QEMU_VHOST_USER_SCMI_H
+
+#include "hw/virtio/virtio.h"
+#include "hw/virtio/vhost.h"
+#include "hw/virtio/vhost-user.h"
+
+#define TYPE_VHOST_USER_SCMI "vhost-user-scmi"
+OBJECT_DECLARE_SIMPLE_TYPE(VHostUserSCMI, VHOST_USER_SCMI);
+
+struct VHostUserSCMI {
+ VirtIODevice parent;
+ CharBackend chardev;
+ struct vhost_virtqueue *vhost_vqs;
+ struct vhost_dev vhost_dev;
+ VhostUserState vhost_user;
+ VirtQueue *cmd_vq;
+ VirtQueue *event_vq;
+ bool connected;
+};
+
+#endif /* _QEMU_VHOST_USER_SCMI_H */
int vhost_net_set_backend(struct vhost_dev *hdev,
struct vhost_vring_file *file);
+void vhost_toggle_device_iotlb(VirtIODevice *vdev);
int vhost_device_iotlb_miss(struct vhost_dev *dev, uint64_t iova, int write);
int vhost_virtqueue_start(struct vhost_dev *dev, struct VirtIODevice *vdev,
void virtio_gpu_base_fill_display_info(VirtIOGPUBase *g,
struct virtio_gpu_resp_display_info *dpy_info);
+void virtio_gpu_base_generate_edid(VirtIOGPUBase *g, int scanout,
+ struct virtio_gpu_resp_edid *edid);
/* virtio-gpu.c */
void virtio_gpu_ctrl_response(VirtIOGPU *g,
struct virtio_gpu_ctrl_command *cmd,
QemuRecMutex mutex;
GTree *endpoints;
bool boot_bypass;
+ Notifier machine_done;
+ bool granule_frozen;
};
#endif
--- /dev/null
+/*
+ * Abstract virtio based memory device
+ *
+ * Copyright (C) 2023 Red Hat, Inc.
+ *
+ * Authors:
+ * David Hildenbrand <david@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef HW_VIRTIO_MD_PCI_H
+#define HW_VIRTIO_MD_PCI_H
+
+#include "hw/virtio/virtio-pci.h"
+#include "qom/object.h"
+
+/*
+ * virtio-md-pci: This extends VirtioPCIProxy.
+ */
+#define TYPE_VIRTIO_MD_PCI "virtio-md-pci"
+
+OBJECT_DECLARE_TYPE(VirtIOMDPCI, VirtIOMDPCIClass, VIRTIO_MD_PCI)
+
+struct VirtIOMDPCIClass {
+ /* private */
+ VirtioPCIClass parent;
+
+ /* public */
+ void (*unplug_request_check)(VirtIOMDPCI *vmd, Error **errp);
+};
+
+struct VirtIOMDPCI {
+ VirtIOPCIProxy parent_obj;
+};
+
+void virtio_md_pci_pre_plug(VirtIOMDPCI *vmd, MachineState *ms, Error **errp);
+void virtio_md_pci_plug(VirtIOMDPCI *vmd, MachineState *ms, Error **errp);
+void virtio_md_pci_unplug_request(VirtIOMDPCI *vmd, MachineState *ms,
+ Error **errp);
+void virtio_md_pci_unplug(VirtIOMDPCI *vmd, MachineState *ms, Error **errp);
+
+#endif
MemoryRegion *(*get_memory_region)(VirtIOMEM *vmem, Error **errp);
void (*add_size_change_notifier)(VirtIOMEM *vmem, Notifier *notifier);
void (*remove_size_change_notifier)(VirtIOMEM *vmem, Notifier *notifier);
+ void (*unplug_request_check)(VirtIOMEM *vmem, Error **errp);
};
#endif
VMChangeStateEntry *vmstate;
char *bus_name;
uint8_t device_endian;
+ /**
+ * @user_guest_notifier_mask: gate usage of ->guest_notifier_mask() callback.
+ * This is used to suppress the masking of guest updates for
+ * vhost-user devices which are asynchronous by design.
+ */
bool use_guest_notifier_mask;
AddressSpace *dma_as;
QLIST_HEAD(, VirtQueue) *vector_queues;
QTAILQ_ENTRY(VirtIODevice) next;
+ /**
+ * @config_notifier: the event notifier that handles config events
+ */
EventNotifier config_notifier;
bool device_iotlb_enabled;
};
void virtio_instance_init_common(Object *proxy_obj, void *data,
size_t vdev_size, const char *vdev_name);
+/**
+ * virtio_init() - initialise the common VirtIODevice structure
+ * @vdev: pointer to VirtIODevice
+ * @device_id: the VirtIO device ID (see virtio_ids.h)
+ * @config_size: size of the config space
+ */
void virtio_init(VirtIODevice *vdev, uint16_t device_id, size_t config_size);
void virtio_cleanup(VirtIODevice *vdev);
int virtio_load(VirtIODevice *vdev, QEMUFile *f, int version_id);
+/**
+ * virtio_notify_config() - signal a change to device config
+ * @vdev: the virtio device
+ *
+ * Assuming the virtio device is up (VIRTIO_CONFIG_S_DRIVER_OK) this
+ * will trigger a guest interrupt and update the config version.
+ */
void virtio_notify_config(VirtIODevice *vdev);
bool virtio_queue_get_notification(VirtQueue *vq);
void ram_mig_init(void);
void qemu_guest_free_page_hint(void *addr, size_t len);
+bool migrate_ram_is_ignored(RAMBlock *block);
/* migration/block.c */
*/
#define I915_FORMAT_MOD_4_TILED_DG2_RC_CCS_CC fourcc_mod_code(INTEL, 12)
+/*
+ * Intel Color Control Surfaces (CCS) for display ver. 14 render compression.
+ *
+ * The main surface is tile4 and at plane index 0, the CCS is linear and
+ * at index 1. A 64B CCS cache line corresponds to an area of 4x1 tiles in
+ * main surface. In other words, 4 bits in CCS map to a main surface cache
+ * line pair. The main surface pitch is required to be a multiple of four
+ * tile4 widths.
+ */
+#define I915_FORMAT_MOD_4_TILED_MTL_RC_CCS fourcc_mod_code(INTEL, 13)
+
+/*
+ * Intel Color Control Surfaces (CCS) for display ver. 14 media compression
+ *
+ * The main surface is tile4 and at plane index 0, the CCS is linear and
+ * at index 1. A 64B CCS cache line corresponds to an area of 4x1 tiles in
+ * main surface. In other words, 4 bits in CCS map to a main surface cache
+ * line pair. The main surface pitch is required to be a multiple of four
+ * tile4 widths. For semi-planar formats like NV12, CCS planes follow the
+ * Y and UV planes i.e., planes 0 and 1 are used for Y and UV surfaces,
+ * planes 2 and 3 for the respective CCS.
+ */
+#define I915_FORMAT_MOD_4_TILED_MTL_MC_CCS fourcc_mod_code(INTEL, 14)
+
+/*
+ * Intel Color Control Surface with Clear Color (CCS) for display ver. 14 render
+ * compression.
+ *
+ * The main surface is tile4 and is at plane index 0 whereas CCS is linear
+ * and at index 1. The clear color is stored at index 2, and the pitch should
+ * be ignored. The clear color structure is 256 bits. The first 128 bits
+ * represents Raw Clear Color Red, Green, Blue and Alpha color each represented
+ * by 32 bits. The raw clear color is consumed by the 3d engine and generates
+ * the converted clear color of size 64 bits. The first 32 bits store the Lower
+ * Converted Clear Color value and the next 32 bits store the Higher Converted
+ * Clear Color value when applicable. The Converted Clear Color values are
+ * consumed by the DE. The last 64 bits are used to store Color Discard Enable
+ * and Depth Clear Value Valid which are ignored by the DE. A CCS cache line
+ * corresponds to an area of 4x1 tiles in the main surface. The main surface
+ * pitch is required to be a multiple of 4 tile widths.
+ */
+#define I915_FORMAT_MOD_4_TILED_MTL_RC_CCS_CC fourcc_mod_code(INTEL, 15)
+
/*
* Tiled, NV12MT, grouped in 64 (pixels) x 32 (lines) -sized macroblocks
*
#define _BITUL(x) (_UL(1) << (x))
#define _BITULL(x) (_ULL(1) << (x))
-#define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (typeof(x))(a) - 1)
+#define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (__typeof__(x))(a) - 1)
#define __ALIGN_KERNEL_MASK(x, mask) (((x) + (mask)) & ~(mask))
#define __KERNEL_DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
#define PCI_EXT_CAP_ID_DVSEC 0x23 /* Designated Vendor-Specific */
#define PCI_EXT_CAP_ID_DLF 0x25 /* Data Link Feature */
#define PCI_EXT_CAP_ID_PL_16GT 0x26 /* Physical Layer 16.0 GT/s */
+#define PCI_EXT_CAP_ID_PL_32GT 0x2A /* Physical Layer 32.0 GT/s */
#define PCI_EXT_CAP_ID_DOE 0x2E /* Data Object Exchange */
#define PCI_EXT_CAP_ID_MAX PCI_EXT_CAP_ID_DOE
uint64_t log_guest_addr;
};
+struct vhost_worker_state {
+ /*
+ * For VHOST_NEW_WORKER the kernel will return the new vhost_worker id.
+ * For VHOST_FREE_WORKER this must be set to the id of the vhost_worker
+ * to free.
+ */
+ unsigned int worker_id;
+};
+
+struct vhost_vring_worker {
+ /* vring index */
+ unsigned int index;
+ /* The id of the vhost_worker returned from VHOST_NEW_WORKER */
+ unsigned int worker_id;
+};
+
/* no alignment requirement */
struct vhost_iotlb_msg {
uint64_t iova;
/* Zoned block device characteristics (if VIRTIO_BLK_F_ZONED) */
struct virtio_blk_zoned_characteristics {
- uint32_t zone_sectors;
- uint32_t max_open_zones;
- uint32_t max_active_zones;
- uint32_t max_append_sectors;
- uint32_t write_granularity;
+ __virtio32 zone_sectors;
+ __virtio32 max_open_zones;
+ __virtio32 max_active_zones;
+ __virtio32 max_append_sectors;
+ __virtio32 write_granularity;
uint8_t model;
uint8_t unused2[3];
} zoned;
*/
struct virtio_blk_zone_descriptor {
/* Zone capacity */
- uint64_t z_cap;
+ __virtio64 z_cap;
/* The starting sector of the zone */
- uint64_t z_start;
+ __virtio64 z_start;
/* Zone write pointer position in sectors */
- uint64_t z_wp;
+ __virtio64 z_wp;
/* Zone type */
uint8_t z_type;
/* Zone state */
};
struct virtio_blk_zone_report {
- uint64_t nr_zones;
+ __virtio64 nr_zones;
uint8_t reserved[56];
struct virtio_blk_zone_descriptor zones[];
};
*/
#define VIRTIO_F_SR_IOV 37
+/*
+ * This feature indicates that the driver passes extra data (besides
+ * identifying the virtqueue) in its device notifications.
+ */
+#define VIRTIO_F_NOTIFICATION_DATA 38
+
/*
* This feature indicates that the driver can reset a queue individually.
*/
#define VIRTIO_NET_F_GUEST_USO6 55 /* Guest can handle USOv6 in. */
#define VIRTIO_NET_F_HOST_USO 56 /* Host can handle USO in. */
#define VIRTIO_NET_F_HASH_REPORT 57 /* Supports hash report */
+#define VIRTIO_NET_F_GUEST_HDRLEN 59 /* Guest provides the exact hdr_len value. */
#define VIRTIO_NET_F_RSS 60 /* Supports RSS RX steering */
#define VIRTIO_NET_F_RSC_EXT 61 /* extended coalescing info */
#define VIRTIO_NET_F_STANDBY 62 /* Act as standby for another device
uint32_t texture;
uint32_t x;
uint32_t y;
- uint32_t scanout_width;
- uint32_t scanout_height;
+ uint32_t backing_width;
+ uint32_t backing_height;
bool y0_top;
void *sync;
int fence_fd;
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-/*
- * Copyright (C) 2012 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-#ifndef __ASM_BITSPERLONG_H
-#define __ASM_BITSPERLONG_H
-
-#define __BITS_PER_LONG 64
-
#include <asm-generic/bitsperlong.h>
-
-#endif /* __ASM_BITSPERLONG_H */
__u64 reserved[2];
};
+/*
+ * Counter/Timer offset structure. Describe the virtual/physical offset.
+ * To be used with KVM_ARM_SET_COUNTER_OFFSET.
+ */
+struct kvm_arm_counter_offset {
+ __u64 counter_offset;
+ __u64 reserved;
+};
+
#define KVM_ARM_TAGS_TO_GUEST 0
#define KVM_ARM_TAGS_FROM_GUEST 1
KVM_REG_ARM_VENDOR_HYP_BIT_PTP = 1,
};
+/* Device Control API on vm fd */
+#define KVM_ARM_VM_SMCCC_CTRL 0
+#define KVM_ARM_VM_SMCCC_FILTER 0
+
/* Device Control API: ARM VGIC */
#define KVM_DEV_ARM_VGIC_GRP_ADDR 0
#define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1
#define KVM_ARM_VCPU_TIMER_CTRL 1
#define KVM_ARM_VCPU_TIMER_IRQ_VTIMER 0
#define KVM_ARM_VCPU_TIMER_IRQ_PTIMER 1
+#define KVM_ARM_VCPU_TIMER_IRQ_HVTIMER 2
+#define KVM_ARM_VCPU_TIMER_IRQ_HPTIMER 3
#define KVM_ARM_VCPU_PVTIME_CTRL 2
#define KVM_ARM_VCPU_PVTIME_IPA 0
/* run->fail_entry.hardware_entry_failure_reason codes. */
#define KVM_EXIT_FAIL_ENTRY_CPU_UNSUPPORTED (1ULL << 0)
+enum kvm_smccc_filter_action {
+ KVM_SMCCC_FILTER_HANDLE = 0,
+ KVM_SMCCC_FILTER_DENY,
+ KVM_SMCCC_FILTER_FWD_TO_USER,
+
+};
+
+struct kvm_smccc_filter {
+ __u32 base;
+ __u32 nr_functions;
+ __u8 action;
+ __u8 pad[15];
+};
+
+/* arm64-specific KVM_EXIT_HYPERCALL flags */
+#define KVM_HYPERCALL_EXIT_SMC (1U << 0)
+#define KVM_HYPERCALL_EXIT_16BIT (1U << 1)
+
#endif
#endif /* __ARM_KVM_H__ */
#ifndef __ASM_GENERIC_BITS_PER_LONG
#define __ASM_GENERIC_BITS_PER_LONG
+#ifndef __BITS_PER_LONG
+/*
+ * In order to keep safe and avoid regression, only unify uapi
+ * bitsperlong.h for some archs which are using newer toolchains
+ * that have the definitions of __CHAR_BIT__ and __SIZEOF_LONG__.
+ * See the following link for more info:
+ * https://lore.kernel.org/linux-arch/b9624545-2c80-49a1-ac3c-39264a591f7b@app.fastmail.com/
+ */
+#if defined(__CHAR_BIT__) && defined(__SIZEOF_LONG__)
+#define __BITS_PER_LONG (__CHAR_BIT__ * __SIZEOF_LONG__)
+#else
/*
* There seems to be no way of detecting this automatically from user
* space, so 64 bit architectures should override this in their
* both 32 and 64 bit user space must not rely on CONFIG_64BIT
* to decide it, but rather check a compiler provided macro.
*/
-#ifndef __BITS_PER_LONG
#define __BITS_PER_LONG 32
#endif
+#endif
#endif /* __ASM_GENERIC_BITS_PER_LONG */
__SC_COMP(__NR_io_submit, sys_io_submit, compat_sys_io_submit)
#define __NR_io_cancel 3
__SYSCALL(__NR_io_cancel, sys_io_cancel)
+
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_io_getevents 4
__SC_3264(__NR_io_getevents, sys_io_getevents_time32, sys_io_getevents)
#endif
-/* fs/xattr.c */
#define __NR_setxattr 5
__SYSCALL(__NR_setxattr, sys_setxattr)
#define __NR_lsetxattr 6
__SYSCALL(__NR_lremovexattr, sys_lremovexattr)
#define __NR_fremovexattr 16
__SYSCALL(__NR_fremovexattr, sys_fremovexattr)
-
-/* fs/dcache.c */
#define __NR_getcwd 17
__SYSCALL(__NR_getcwd, sys_getcwd)
-
-/* fs/cookies.c */
#define __NR_lookup_dcookie 18
__SC_COMP(__NR_lookup_dcookie, sys_lookup_dcookie, compat_sys_lookup_dcookie)
-
-/* fs/eventfd.c */
#define __NR_eventfd2 19
__SYSCALL(__NR_eventfd2, sys_eventfd2)
-
-/* fs/eventpoll.c */
#define __NR_epoll_create1 20
__SYSCALL(__NR_epoll_create1, sys_epoll_create1)
#define __NR_epoll_ctl 21
__SYSCALL(__NR_epoll_ctl, sys_epoll_ctl)
#define __NR_epoll_pwait 22
__SC_COMP(__NR_epoll_pwait, sys_epoll_pwait, compat_sys_epoll_pwait)
-
-/* fs/fcntl.c */
#define __NR_dup 23
__SYSCALL(__NR_dup, sys_dup)
#define __NR_dup3 24
__SYSCALL(__NR_dup3, sys_dup3)
#define __NR3264_fcntl 25
__SC_COMP_3264(__NR3264_fcntl, sys_fcntl64, sys_fcntl, compat_sys_fcntl64)
-
-/* fs/inotify_user.c */
#define __NR_inotify_init1 26
__SYSCALL(__NR_inotify_init1, sys_inotify_init1)
#define __NR_inotify_add_watch 27
__SYSCALL(__NR_inotify_add_watch, sys_inotify_add_watch)
#define __NR_inotify_rm_watch 28
__SYSCALL(__NR_inotify_rm_watch, sys_inotify_rm_watch)
-
-/* fs/ioctl.c */
#define __NR_ioctl 29
__SC_COMP(__NR_ioctl, sys_ioctl, compat_sys_ioctl)
-
-/* fs/ioprio.c */
#define __NR_ioprio_set 30
__SYSCALL(__NR_ioprio_set, sys_ioprio_set)
#define __NR_ioprio_get 31
__SYSCALL(__NR_ioprio_get, sys_ioprio_get)
-
-/* fs/locks.c */
#define __NR_flock 32
__SYSCALL(__NR_flock, sys_flock)
-
-/* fs/namei.c */
#define __NR_mknodat 33
__SYSCALL(__NR_mknodat, sys_mknodat)
#define __NR_mkdirat 34
__SYSCALL(__NR_symlinkat, sys_symlinkat)
#define __NR_linkat 37
__SYSCALL(__NR_linkat, sys_linkat)
+
#ifdef __ARCH_WANT_RENAMEAT
/* renameat is superseded with flags by renameat2 */
#define __NR_renameat 38
__SYSCALL(__NR_renameat, sys_renameat)
#endif /* __ARCH_WANT_RENAMEAT */
-/* fs/namespace.c */
#define __NR_umount2 39
__SYSCALL(__NR_umount2, sys_umount)
#define __NR_mount 40
__SYSCALL(__NR_mount, sys_mount)
#define __NR_pivot_root 41
__SYSCALL(__NR_pivot_root, sys_pivot_root)
-
-/* fs/nfsctl.c */
#define __NR_nfsservctl 42
__SYSCALL(__NR_nfsservctl, sys_ni_syscall)
-
-/* fs/open.c */
#define __NR3264_statfs 43
__SC_COMP_3264(__NR3264_statfs, sys_statfs64, sys_statfs, \
compat_sys_statfs64)
#define __NR3264_ftruncate 46
__SC_COMP_3264(__NR3264_ftruncate, sys_ftruncate64, sys_ftruncate, \
compat_sys_ftruncate64)
-
#define __NR_fallocate 47
__SC_COMP(__NR_fallocate, sys_fallocate, compat_sys_fallocate)
#define __NR_faccessat 48
__SYSCALL(__NR_close, sys_close)
#define __NR_vhangup 58
__SYSCALL(__NR_vhangup, sys_vhangup)
-
-/* fs/pipe.c */
#define __NR_pipe2 59
__SYSCALL(__NR_pipe2, sys_pipe2)
-
-/* fs/quota.c */
#define __NR_quotactl 60
__SYSCALL(__NR_quotactl, sys_quotactl)
-
-/* fs/readdir.c */
#define __NR_getdents64 61
__SYSCALL(__NR_getdents64, sys_getdents64)
-
-/* fs/read_write.c */
#define __NR3264_lseek 62
__SC_3264(__NR3264_lseek, sys_llseek, sys_lseek)
#define __NR_read 63
__SC_COMP(__NR_preadv, sys_preadv, compat_sys_preadv)
#define __NR_pwritev 70
__SC_COMP(__NR_pwritev, sys_pwritev, compat_sys_pwritev)
-
-/* fs/sendfile.c */
#define __NR3264_sendfile 71
__SYSCALL(__NR3264_sendfile, sys_sendfile64)
-/* fs/select.c */
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_pselect6 72
__SC_COMP_3264(__NR_pselect6, sys_pselect6_time32, sys_pselect6, compat_sys_pselect6_time32)
__SC_COMP_3264(__NR_ppoll, sys_ppoll_time32, sys_ppoll, compat_sys_ppoll_time32)
#endif
-/* fs/signalfd.c */
#define __NR_signalfd4 74
__SC_COMP(__NR_signalfd4, sys_signalfd4, compat_sys_signalfd4)
-
-/* fs/splice.c */
#define __NR_vmsplice 75
__SYSCALL(__NR_vmsplice, sys_vmsplice)
#define __NR_splice 76
__SYSCALL(__NR_splice, sys_splice)
#define __NR_tee 77
__SYSCALL(__NR_tee, sys_tee)
-
-/* fs/stat.c */
#define __NR_readlinkat 78
__SYSCALL(__NR_readlinkat, sys_readlinkat)
+
#if defined(__ARCH_WANT_NEW_STAT) || defined(__ARCH_WANT_STAT64)
#define __NR3264_fstatat 79
__SC_3264(__NR3264_fstatat, sys_fstatat64, sys_newfstatat)
__SC_3264(__NR3264_fstat, sys_fstat64, sys_newfstat)
#endif
-/* fs/sync.c */
#define __NR_sync 81
__SYSCALL(__NR_sync, sys_sync)
#define __NR_fsync 82
__SYSCALL(__NR_fsync, sys_fsync)
#define __NR_fdatasync 83
__SYSCALL(__NR_fdatasync, sys_fdatasync)
+
#ifdef __ARCH_WANT_SYNC_FILE_RANGE2
#define __NR_sync_file_range2 84
__SC_COMP(__NR_sync_file_range2, sys_sync_file_range2, \
compat_sys_sync_file_range)
#endif
-/* fs/timerfd.c */
#define __NR_timerfd_create 85
__SYSCALL(__NR_timerfd_create, sys_timerfd_create)
+
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_timerfd_settime 86
__SC_3264(__NR_timerfd_settime, sys_timerfd_settime32, \
sys_timerfd_gettime)
#endif
-/* fs/utimes.c */
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_utimensat 88
__SC_3264(__NR_utimensat, sys_utimensat_time32, sys_utimensat)
#endif
-/* kernel/acct.c */
#define __NR_acct 89
__SYSCALL(__NR_acct, sys_acct)
-
-/* kernel/capability.c */
#define __NR_capget 90
__SYSCALL(__NR_capget, sys_capget)
#define __NR_capset 91
__SYSCALL(__NR_capset, sys_capset)
-
-/* kernel/exec_domain.c */
#define __NR_personality 92
__SYSCALL(__NR_personality, sys_personality)
-
-/* kernel/exit.c */
#define __NR_exit 93
__SYSCALL(__NR_exit, sys_exit)
#define __NR_exit_group 94
__SYSCALL(__NR_exit_group, sys_exit_group)
#define __NR_waitid 95
__SC_COMP(__NR_waitid, sys_waitid, compat_sys_waitid)
-
-/* kernel/fork.c */
#define __NR_set_tid_address 96
__SYSCALL(__NR_set_tid_address, sys_set_tid_address)
#define __NR_unshare 97
__SYSCALL(__NR_unshare, sys_unshare)
-/* kernel/futex.c */
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_futex 98
__SC_3264(__NR_futex, sys_futex_time32, sys_futex)
#endif
+
#define __NR_set_robust_list 99
__SC_COMP(__NR_set_robust_list, sys_set_robust_list, \
compat_sys_set_robust_list)
__SC_COMP(__NR_get_robust_list, sys_get_robust_list, \
compat_sys_get_robust_list)
-/* kernel/hrtimer.c */
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_nanosleep 101
__SC_3264(__NR_nanosleep, sys_nanosleep_time32, sys_nanosleep)
#endif
-/* kernel/itimer.c */
#define __NR_getitimer 102
__SC_COMP(__NR_getitimer, sys_getitimer, compat_sys_getitimer)
#define __NR_setitimer 103
__SC_COMP(__NR_setitimer, sys_setitimer, compat_sys_setitimer)
-
-/* kernel/kexec.c */
#define __NR_kexec_load 104
__SC_COMP(__NR_kexec_load, sys_kexec_load, compat_sys_kexec_load)
-
-/* kernel/module.c */
#define __NR_init_module 105
__SYSCALL(__NR_init_module, sys_init_module)
#define __NR_delete_module 106
__SYSCALL(__NR_delete_module, sys_delete_module)
-
-/* kernel/posix-timers.c */
#define __NR_timer_create 107
__SC_COMP(__NR_timer_create, sys_timer_create, compat_sys_timer_create)
+
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_timer_gettime 108
__SC_3264(__NR_timer_gettime, sys_timer_gettime32, sys_timer_gettime)
#endif
+
#define __NR_timer_getoverrun 109
__SYSCALL(__NR_timer_getoverrun, sys_timer_getoverrun)
+
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_timer_settime 110
__SC_3264(__NR_timer_settime, sys_timer_settime32, sys_timer_settime)
#endif
+
#define __NR_timer_delete 111
__SYSCALL(__NR_timer_delete, sys_timer_delete)
+
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_clock_settime 112
__SC_3264(__NR_clock_settime, sys_clock_settime32, sys_clock_settime)
sys_clock_nanosleep)
#endif
-/* kernel/printk.c */
#define __NR_syslog 116
__SYSCALL(__NR_syslog, sys_syslog)
-
-/* kernel/ptrace.c */
#define __NR_ptrace 117
__SC_COMP(__NR_ptrace, sys_ptrace, compat_sys_ptrace)
-
-/* kernel/sched/core.c */
#define __NR_sched_setparam 118
__SYSCALL(__NR_sched_setparam, sys_sched_setparam)
#define __NR_sched_setscheduler 119
__SYSCALL(__NR_sched_get_priority_max, sys_sched_get_priority_max)
#define __NR_sched_get_priority_min 126
__SYSCALL(__NR_sched_get_priority_min, sys_sched_get_priority_min)
+
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_sched_rr_get_interval 127
__SC_3264(__NR_sched_rr_get_interval, sys_sched_rr_get_interval_time32, \
sys_sched_rr_get_interval)
#endif
-/* kernel/signal.c */
#define __NR_restart_syscall 128
__SYSCALL(__NR_restart_syscall, sys_restart_syscall)
#define __NR_kill 129
__SC_COMP(__NR_rt_sigprocmask, sys_rt_sigprocmask, compat_sys_rt_sigprocmask)
#define __NR_rt_sigpending 136
__SC_COMP(__NR_rt_sigpending, sys_rt_sigpending, compat_sys_rt_sigpending)
+
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_rt_sigtimedwait 137
__SC_COMP_3264(__NR_rt_sigtimedwait, sys_rt_sigtimedwait_time32, \
sys_rt_sigtimedwait, compat_sys_rt_sigtimedwait_time32)
#endif
+
#define __NR_rt_sigqueueinfo 138
__SC_COMP(__NR_rt_sigqueueinfo, sys_rt_sigqueueinfo, \
compat_sys_rt_sigqueueinfo)
#define __NR_rt_sigreturn 139
__SC_COMP(__NR_rt_sigreturn, sys_rt_sigreturn, compat_sys_rt_sigreturn)
-
-/* kernel/sys.c */
#define __NR_setpriority 140
__SYSCALL(__NR_setpriority, sys_setpriority)
#define __NR_getpriority 141
#define __NR_getcpu 168
__SYSCALL(__NR_getcpu, sys_getcpu)
-/* kernel/time.c */
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_gettimeofday 169
__SC_COMP(__NR_gettimeofday, sys_gettimeofday, compat_sys_gettimeofday)
__SC_3264(__NR_adjtimex, sys_adjtimex_time32, sys_adjtimex)
#endif
-/* kernel/sys.c */
#define __NR_getpid 172
__SYSCALL(__NR_getpid, sys_getpid)
#define __NR_getppid 173
__SYSCALL(__NR_gettid, sys_gettid)
#define __NR_sysinfo 179
__SC_COMP(__NR_sysinfo, sys_sysinfo, compat_sys_sysinfo)
-
-/* ipc/mqueue.c */
#define __NR_mq_open 180
__SC_COMP(__NR_mq_open, sys_mq_open, compat_sys_mq_open)
#define __NR_mq_unlink 181
__SYSCALL(__NR_mq_unlink, sys_mq_unlink)
+
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_mq_timedsend 182
__SC_3264(__NR_mq_timedsend, sys_mq_timedsend_time32, sys_mq_timedsend)
__SC_3264(__NR_mq_timedreceive, sys_mq_timedreceive_time32, \
sys_mq_timedreceive)
#endif
+
#define __NR_mq_notify 184
__SC_COMP(__NR_mq_notify, sys_mq_notify, compat_sys_mq_notify)
#define __NR_mq_getsetattr 185
__SC_COMP(__NR_mq_getsetattr, sys_mq_getsetattr, compat_sys_mq_getsetattr)
-
-/* ipc/msg.c */
#define __NR_msgget 186
__SYSCALL(__NR_msgget, sys_msgget)
#define __NR_msgctl 187
__SC_COMP(__NR_msgrcv, sys_msgrcv, compat_sys_msgrcv)
#define __NR_msgsnd 189
__SC_COMP(__NR_msgsnd, sys_msgsnd, compat_sys_msgsnd)
-
-/* ipc/sem.c */
#define __NR_semget 190
__SYSCALL(__NR_semget, sys_semget)
#define __NR_semctl 191
__SC_COMP(__NR_semctl, sys_semctl, compat_sys_semctl)
+
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_semtimedop 192
__SC_3264(__NR_semtimedop, sys_semtimedop_time32, sys_semtimedop)
#endif
+
#define __NR_semop 193
__SYSCALL(__NR_semop, sys_semop)
-
-/* ipc/shm.c */
#define __NR_shmget 194
__SYSCALL(__NR_shmget, sys_shmget)
#define __NR_shmctl 195
__SC_COMP(__NR_shmat, sys_shmat, compat_sys_shmat)
#define __NR_shmdt 197
__SYSCALL(__NR_shmdt, sys_shmdt)
-
-/* net/socket.c */
#define __NR_socket 198
__SYSCALL(__NR_socket, sys_socket)
#define __NR_socketpair 199
__SC_COMP(__NR_sendmsg, sys_sendmsg, compat_sys_sendmsg)
#define __NR_recvmsg 212
__SC_COMP(__NR_recvmsg, sys_recvmsg, compat_sys_recvmsg)
-
-/* mm/filemap.c */
#define __NR_readahead 213
__SC_COMP(__NR_readahead, sys_readahead, compat_sys_readahead)
-
-/* mm/nommu.c, also with MMU */
#define __NR_brk 214
__SYSCALL(__NR_brk, sys_brk)
#define __NR_munmap 215
__SYSCALL(__NR_munmap, sys_munmap)
#define __NR_mremap 216
__SYSCALL(__NR_mremap, sys_mremap)
-
-/* security/keys/keyctl.c */
#define __NR_add_key 217
__SYSCALL(__NR_add_key, sys_add_key)
#define __NR_request_key 218
__SYSCALL(__NR_request_key, sys_request_key)
#define __NR_keyctl 219
__SC_COMP(__NR_keyctl, sys_keyctl, compat_sys_keyctl)
-
-/* arch/example/kernel/sys_example.c */
#define __NR_clone 220
__SYSCALL(__NR_clone, sys_clone)
#define __NR_execve 221
__SC_COMP(__NR_execve, sys_execve, compat_sys_execve)
-
#define __NR3264_mmap 222
__SC_3264(__NR3264_mmap, sys_mmap2, sys_mmap)
-/* mm/fadvise.c */
#define __NR3264_fadvise64 223
__SC_COMP(__NR3264_fadvise64, sys_fadvise64_64, compat_sys_fadvise64_64)
-/* mm/, CONFIG_MMU only */
+/* CONFIG_MMU only */
#ifndef __ARCH_NOMMU
#define __NR_swapon 224
__SYSCALL(__NR_swapon, sys_swapon)
__SYSCALL(__NR_perf_event_open, sys_perf_event_open)
#define __NR_accept4 242
__SYSCALL(__NR_accept4, sys_accept4)
+
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_recvmmsg 243
__SC_COMP_3264(__NR_recvmmsg, sys_recvmmsg_time32, sys_recvmmsg, compat_sys_recvmmsg_time32)
#define __NR_wait4 260
__SC_COMP(__NR_wait4, sys_wait4, compat_sys_wait4)
#endif
+
#define __NR_prlimit64 261
__SYSCALL(__NR_prlimit64, sys_prlimit64)
#define __NR_fanotify_init 262
__SYSCALL(__NR_name_to_handle_at, sys_name_to_handle_at)
#define __NR_open_by_handle_at 265
__SYSCALL(__NR_open_by_handle_at, sys_open_by_handle_at)
+
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_clock_adjtime 266
__SC_3264(__NR_clock_adjtime, sys_clock_adjtime32, sys_clock_adjtime)
#endif
+
#define __NR_syncfs 267
__SYSCALL(__NR_syncfs, sys_syncfs)
#define __NR_setns 268
__SYSCALL(__NR_pkey_free, sys_pkey_free)
#define __NR_statx 291
__SYSCALL(__NR_statx, sys_statx)
+
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_io_pgetevents 292
__SC_COMP_3264(__NR_io_pgetevents, sys_io_pgetevents_time32, sys_io_pgetevents, compat_sys_io_pgetevents)
#endif
+
#define __NR_rseq 293
__SYSCALL(__NR_rseq, sys_rseq)
#define __NR_kexec_file_load 294
__SYSCALL(__NR_kexec_file_load, sys_kexec_file_load)
+
/* 295 through 402 are unassigned to sync up with generic numbers, don't use */
+
#if defined(__SYSCALL_COMPAT) || __BITS_PER_LONG == 32
#define __NR_clock_gettime64 403
__SYSCALL(__NR_clock_gettime64, sys_clock_gettime)
__SYSCALL(__NR_fspick, sys_fspick)
#define __NR_pidfd_open 434
__SYSCALL(__NR_pidfd_open, sys_pidfd_open)
+
#ifdef __ARCH_WANT_SYS_CLONE3
#define __NR_clone3 435
__SYSCALL(__NR_clone3, sys_clone3)
#endif
+
#define __NR_close_range 436
__SYSCALL(__NR_close_range, sys_close_range)
-
#define __NR_openat2 437
__SYSCALL(__NR_openat2, sys_openat2)
#define __NR_pidfd_getfd 438
__SYSCALL(__NR_mount_setattr, sys_mount_setattr)
#define __NR_quotactl_fd 443
__SYSCALL(__NR_quotactl_fd, sys_quotactl_fd)
-
#define __NR_landlock_create_ruleset 444
__SYSCALL(__NR_landlock_create_ruleset, sys_landlock_create_ruleset)
#define __NR_landlock_add_rule 445
#define __NR_memfd_secret 447
__SYSCALL(__NR_memfd_secret, sys_memfd_secret)
#endif
+
#define __NR_process_mrelease 448
__SYSCALL(__NR_process_mrelease, sys_process_mrelease)
-
#define __NR_futex_waitv 449
__SYSCALL(__NR_futex_waitv, sys_futex_waitv)
-
#define __NR_set_mempolicy_home_node 450
__SYSCALL(__NR_set_mempolicy_home_node, sys_set_mempolicy_home_node)
+#define __NR_cachestat 451
+__SYSCALL(__NR_cachestat, sys_cachestat)
+
#undef __NR_syscalls
-#define __NR_syscalls 451
+#define __NR_syscalls 452
/*
* 32 bit systems traditionally used different
#define __NR_process_mrelease (__NR_Linux + 448)
#define __NR_futex_waitv (__NR_Linux + 449)
#define __NR_set_mempolicy_home_node (__NR_Linux + 450)
+#define __NR_cachestat (__NR_Linux + 451)
#endif /* _ASM_UNISTD_N32_H */
#define __NR_process_mrelease (__NR_Linux + 448)
#define __NR_futex_waitv (__NR_Linux + 449)
#define __NR_set_mempolicy_home_node (__NR_Linux + 450)
+#define __NR_cachestat (__NR_Linux + 451)
#endif /* _ASM_UNISTD_N64_H */
#define __NR_process_mrelease (__NR_Linux + 448)
#define __NR_futex_waitv (__NR_Linux + 449)
#define __NR_set_mempolicy_home_node (__NR_Linux + 450)
+#define __NR_cachestat (__NR_Linux + 451)
#endif /* _ASM_UNISTD_O32_H */
#define __NR_process_mrelease 448
#define __NR_futex_waitv 449
#define __NR_set_mempolicy_home_node 450
+#define __NR_cachestat 451
#endif /* _ASM_UNISTD_32_H */
#define __NR_process_mrelease 448
#define __NR_futex_waitv 449
#define __NR_set_mempolicy_home_node 450
+#define __NR_cachestat 451
#endif /* _ASM_UNISTD_64_H */
-/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
-/*
- * Copyright (C) 2012 ARM Ltd.
- * Copyright (C) 2015 Regents of the University of California
- */
-
-#ifndef _ASM_RISCV_BITSPERLONG_H
-#define _ASM_RISCV_BITSPERLONG_H
-
-#define __BITS_PER_LONG (__SIZEOF_POINTER__ * 8)
-
#include <asm-generic/bitsperlong.h>
-
-#endif /* _ASM_RISCV_BITSPERLONG_H */
#ifndef __ASSEMBLY__
#include <linux/types.h>
+#include <asm/bitsperlong.h>
#include <asm/ptrace.h>
+#define __KVM_HAVE_IRQ_LINE
#define __KVM_HAVE_READONLY_MEM
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
unsigned long mvendorid;
unsigned long marchid;
unsigned long mimpid;
+ unsigned long zicboz_block_size;
};
/* CORE registers for KVM_GET_ONE_REG and KVM_SET_ONE_REG */
#define KVM_RISCV_MODE_S 1
#define KVM_RISCV_MODE_U 0
-/* CSR registers for KVM_GET_ONE_REG and KVM_SET_ONE_REG */
+/* General CSR registers for KVM_GET_ONE_REG and KVM_SET_ONE_REG */
struct kvm_riscv_csr {
unsigned long sstatus;
unsigned long sie;
unsigned long scounteren;
};
+/* AIA CSR registers for KVM_GET_ONE_REG and KVM_SET_ONE_REG */
+struct kvm_riscv_aia_csr {
+ unsigned long siselect;
+ unsigned long iprio1;
+ unsigned long iprio2;
+ unsigned long sieh;
+ unsigned long siph;
+ unsigned long iprio1h;
+ unsigned long iprio2h;
+};
+
/* TIMER registers for KVM_GET_ONE_REG and KVM_SET_ONE_REG */
struct kvm_riscv_timer {
__u64 frequency;
KVM_RISCV_ISA_EXT_SVINVAL,
KVM_RISCV_ISA_EXT_ZIHINTPAUSE,
KVM_RISCV_ISA_EXT_ZICBOM,
+ KVM_RISCV_ISA_EXT_ZICBOZ,
+ KVM_RISCV_ISA_EXT_ZBB,
+ KVM_RISCV_ISA_EXT_SSAIA,
+ KVM_RISCV_ISA_EXT_V,
+ KVM_RISCV_ISA_EXT_SVNAPOT,
KVM_RISCV_ISA_EXT_MAX,
};
+/*
+ * SBI extension IDs specific to KVM. This is not the same as the SBI
+ * extension IDs defined by the RISC-V SBI specification.
+ */
+enum KVM_RISCV_SBI_EXT_ID {
+ KVM_RISCV_SBI_EXT_V01 = 0,
+ KVM_RISCV_SBI_EXT_TIME,
+ KVM_RISCV_SBI_EXT_IPI,
+ KVM_RISCV_SBI_EXT_RFENCE,
+ KVM_RISCV_SBI_EXT_SRST,
+ KVM_RISCV_SBI_EXT_HSM,
+ KVM_RISCV_SBI_EXT_PMU,
+ KVM_RISCV_SBI_EXT_EXPERIMENTAL,
+ KVM_RISCV_SBI_EXT_VENDOR,
+ KVM_RISCV_SBI_EXT_MAX,
+};
+
/* Possible states for kvm_riscv_timer */
#define KVM_RISCV_TIMER_STATE_OFF 0
#define KVM_RISCV_TIMER_STATE_ON 1
/* If you need to interpret the index values, here is the key: */
#define KVM_REG_RISCV_TYPE_MASK 0x00000000FF000000
#define KVM_REG_RISCV_TYPE_SHIFT 24
+#define KVM_REG_RISCV_SUBTYPE_MASK 0x0000000000FF0000
+#define KVM_REG_RISCV_SUBTYPE_SHIFT 16
/* Config registers are mapped as type 1 */
#define KVM_REG_RISCV_CONFIG (0x01 << KVM_REG_RISCV_TYPE_SHIFT)
/* Control and status registers are mapped as type 3 */
#define KVM_REG_RISCV_CSR (0x03 << KVM_REG_RISCV_TYPE_SHIFT)
+#define KVM_REG_RISCV_CSR_GENERAL (0x0 << KVM_REG_RISCV_SUBTYPE_SHIFT)
+#define KVM_REG_RISCV_CSR_AIA (0x1 << KVM_REG_RISCV_SUBTYPE_SHIFT)
#define KVM_REG_RISCV_CSR_REG(name) \
(offsetof(struct kvm_riscv_csr, name) / sizeof(unsigned long))
+#define KVM_REG_RISCV_CSR_AIA_REG(name) \
+ (offsetof(struct kvm_riscv_aia_csr, name) / sizeof(unsigned long))
/* Timer registers are mapped as type 4 */
#define KVM_REG_RISCV_TIMER (0x04 << KVM_REG_RISCV_TYPE_SHIFT)
/* ISA Extension registers are mapped as type 7 */
#define KVM_REG_RISCV_ISA_EXT (0x07 << KVM_REG_RISCV_TYPE_SHIFT)
+/* SBI extension registers are mapped as type 8 */
+#define KVM_REG_RISCV_SBI_EXT (0x08 << KVM_REG_RISCV_TYPE_SHIFT)
+#define KVM_REG_RISCV_SBI_SINGLE (0x0 << KVM_REG_RISCV_SUBTYPE_SHIFT)
+#define KVM_REG_RISCV_SBI_MULTI_EN (0x1 << KVM_REG_RISCV_SUBTYPE_SHIFT)
+#define KVM_REG_RISCV_SBI_MULTI_DIS (0x2 << KVM_REG_RISCV_SUBTYPE_SHIFT)
+#define KVM_REG_RISCV_SBI_MULTI_REG(__ext_id) \
+ ((__ext_id) / __BITS_PER_LONG)
+#define KVM_REG_RISCV_SBI_MULTI_MASK(__ext_id) \
+ (1UL << ((__ext_id) % __BITS_PER_LONG))
+#define KVM_REG_RISCV_SBI_MULTI_REG_LAST \
+ KVM_REG_RISCV_SBI_MULTI_REG(KVM_RISCV_SBI_EXT_MAX - 1)
+
+/* V extension registers are mapped as type 9 */
+#define KVM_REG_RISCV_VECTOR (0x09 << KVM_REG_RISCV_TYPE_SHIFT)
+#define KVM_REG_RISCV_VECTOR_CSR_REG(name) \
+ (offsetof(struct __riscv_v_ext_state, name) / sizeof(unsigned long))
+#define KVM_REG_RISCV_VECTOR_REG(n) \
+ ((n) + sizeof(struct __riscv_v_ext_state) / sizeof(unsigned long))
+
+/* Device Control API: RISC-V AIA */
+#define KVM_DEV_RISCV_APLIC_ALIGN 0x1000
+#define KVM_DEV_RISCV_APLIC_SIZE 0x4000
+#define KVM_DEV_RISCV_APLIC_MAX_HARTS 0x4000
+#define KVM_DEV_RISCV_IMSIC_ALIGN 0x1000
+#define KVM_DEV_RISCV_IMSIC_SIZE 0x1000
+
+#define KVM_DEV_RISCV_AIA_GRP_CONFIG 0
+#define KVM_DEV_RISCV_AIA_CONFIG_MODE 0
+#define KVM_DEV_RISCV_AIA_CONFIG_IDS 1
+#define KVM_DEV_RISCV_AIA_CONFIG_SRCS 2
+#define KVM_DEV_RISCV_AIA_CONFIG_GROUP_BITS 3
+#define KVM_DEV_RISCV_AIA_CONFIG_GROUP_SHIFT 4
+#define KVM_DEV_RISCV_AIA_CONFIG_HART_BITS 5
+#define KVM_DEV_RISCV_AIA_CONFIG_GUEST_BITS 6
+
+/*
+ * Modes of RISC-V AIA device:
+ * 1) EMUL (aka Emulation): Trap-n-emulate IMSIC
+ * 2) HWACCEL (aka HW Acceleration): Virtualize IMSIC using IMSIC guest files
+ * 3) AUTO (aka Automatic): Virtualize IMSIC using IMSIC guest files whenever
+ * available otherwise fallback to trap-n-emulation
+ */
+#define KVM_DEV_RISCV_AIA_MODE_EMUL 0
+#define KVM_DEV_RISCV_AIA_MODE_HWACCEL 1
+#define KVM_DEV_RISCV_AIA_MODE_AUTO 2
+
+#define KVM_DEV_RISCV_AIA_IDS_MIN 63
+#define KVM_DEV_RISCV_AIA_IDS_MAX 2048
+#define KVM_DEV_RISCV_AIA_SRCS_MAX 1024
+#define KVM_DEV_RISCV_AIA_GROUP_BITS_MAX 8
+#define KVM_DEV_RISCV_AIA_GROUP_SHIFT_MIN 24
+#define KVM_DEV_RISCV_AIA_GROUP_SHIFT_MAX 56
+#define KVM_DEV_RISCV_AIA_HART_BITS_MAX 16
+#define KVM_DEV_RISCV_AIA_GUEST_BITS_MAX 8
+
+#define KVM_DEV_RISCV_AIA_GRP_ADDR 1
+#define KVM_DEV_RISCV_AIA_ADDR_APLIC 0
+#define KVM_DEV_RISCV_AIA_ADDR_IMSIC(__vcpu) (1 + (__vcpu))
+#define KVM_DEV_RISCV_AIA_ADDR_MAX \
+ (1 + KVM_DEV_RISCV_APLIC_MAX_HARTS)
+
+#define KVM_DEV_RISCV_AIA_GRP_CTRL 2
+#define KVM_DEV_RISCV_AIA_CTRL_INIT 0
+
+/*
+ * The device attribute type contains the memory mapped offset of the
+ * APLIC register (range 0x0000-0x3FFF) and it must be 4-byte aligned.
+ */
+#define KVM_DEV_RISCV_AIA_GRP_APLIC 3
+
+/*
+ * The lower 12-bits of the device attribute type contains the iselect
+ * value of the IMSIC register (range 0x70-0xFF) whereas the higher order
+ * bits contains the VCPU id.
+ */
+#define KVM_DEV_RISCV_AIA_GRP_IMSIC 4
+#define KVM_DEV_RISCV_AIA_IMSIC_ISEL_BITS 12
+#define KVM_DEV_RISCV_AIA_IMSIC_ISEL_MASK \
+ ((1U << KVM_DEV_RISCV_AIA_IMSIC_ISEL_BITS) - 1)
+#define KVM_DEV_RISCV_AIA_IMSIC_MKATTR(__vcpu, __isel) \
+ (((__vcpu) << KVM_DEV_RISCV_AIA_IMSIC_ISEL_BITS) | \
+ ((__isel) & KVM_DEV_RISCV_AIA_IMSIC_ISEL_MASK))
+#define KVM_DEV_RISCV_AIA_IMSIC_GET_ISEL(__attr) \
+ ((__attr) & KVM_DEV_RISCV_AIA_IMSIC_ISEL_MASK)
+#define KVM_DEV_RISCV_AIA_IMSIC_GET_VCPU(__attr) \
+ ((__attr) >> KVM_DEV_RISCV_AIA_IMSIC_ISEL_BITS)
+
+/* One single KVM irqchip, ie. the AIA */
+#define KVM_NR_IRQCHIPS 1
+
#endif
#endif /* __LINUX_KVM_RISCV_H */
#define __NR_riscv_flush_icache (__NR_arch_specific_syscall + 15)
#endif
__SYSCALL(__NR_riscv_flush_icache, sys_riscv_flush_icache)
+
+/*
+ * Allows userspace to query the kernel for CPU architecture and
+ * microarchitecture details across a given set of CPUs.
+ */
+#ifndef __NR_riscv_hwprobe
+#define __NR_riscv_hwprobe (__NR_arch_specific_syscall + 14)
+#endif
+__SYSCALL(__NR_riscv_hwprobe, sys_riscv_hwprobe)
#define __NR_landlock_create_ruleset 444
#define __NR_landlock_add_rule 445
#define __NR_landlock_restrict_self 446
+#define __NR_memfd_secret 447
#define __NR_process_mrelease 448
#define __NR_futex_waitv 449
#define __NR_set_mempolicy_home_node 450
+#define __NR_cachestat 451
#endif /* _ASM_S390_UNISTD_32_H */
#define __NR_landlock_create_ruleset 444
#define __NR_landlock_add_rule 445
#define __NR_landlock_restrict_self 446
+#define __NR_memfd_secret 447
#define __NR_process_mrelease 448
#define __NR_futex_waitv 449
#define __NR_set_mempolicy_home_node 450
+#define __NR_cachestat 451
#endif /* _ASM_S390_UNISTD_64_H */
#define KVM_VCPU_TSC_CTRL 0 /* control group for the timestamp counter (TSC) */
#define KVM_VCPU_TSC_OFFSET 0 /* attribute for the TSC offset */
+/* x86-specific KVM_EXIT_HYPERCALL flags. */
+#define KVM_EXIT_HYPERCALL_LONG_MODE BIT(0)
+
#endif /* _ASM_X86_KVM_H */
#define __NR_process_mrelease 448
#define __NR_futex_waitv 449
#define __NR_set_mempolicy_home_node 450
+#define __NR_cachestat 451
#endif /* _ASM_UNISTD_32_H */
#define __NR_process_mrelease 448
#define __NR_futex_waitv 449
#define __NR_set_mempolicy_home_node 450
+#define __NR_cachestat 451
#endif /* _ASM_UNISTD_64_H */
#define __NR_process_mrelease (__X32_SYSCALL_BIT + 448)
#define __NR_futex_waitv (__X32_SYSCALL_BIT + 449)
#define __NR_set_mempolicy_home_node (__X32_SYSCALL_BIT + 450)
+#define __NR_cachestat (__X32_SYSCALL_BIT + 451)
#define __NR_rt_sigaction (__X32_SYSCALL_BIT + 512)
#define __NR_rt_sigreturn (__X32_SYSCALL_BIT + 513)
#define __NR_ioctl (__X32_SYSCALL_BIT + 514)
#define _BITUL(x) (_UL(1) << (x))
#define _BITULL(x) (_ULL(1) << (x))
-#define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (typeof(x))(a) - 1)
+#define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (__typeof__(x))(a) - 1)
#define __ALIGN_KERNEL_MASK(x, mask) (((x) + (mask)) & ~(mask))
#define __KERNEL_DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
__u64 nr;
__u64 args[6];
__u64 ret;
- __u32 longmode;
- __u32 pad;
+
+ union {
+ __u32 longmode;
+ __u64 flags;
+ };
} hypercall;
/* KVM_EXIT_TPR_ACCESS */
struct {
#define KVM_CAP_S390_PROTECTED_ASYNC_DISABLE 224
#define KVM_CAP_DIRTY_LOG_RING_WITH_BITMAP 225
#define KVM_CAP_PMU_EVENT_MASKED_EVENTS 226
+#define KVM_CAP_COUNTER_OFFSET 227
+#define KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE 228
+#define KVM_CAP_ARM_SUPPORTED_BLOCK_SIZES 229
#ifdef KVM_CAP_IRQ_ROUTING
#define KVM_DEV_TYPE_XIVE KVM_DEV_TYPE_XIVE
KVM_DEV_TYPE_ARM_PV_TIME,
#define KVM_DEV_TYPE_ARM_PV_TIME KVM_DEV_TYPE_ARM_PV_TIME
+ KVM_DEV_TYPE_RISCV_AIA,
+#define KVM_DEV_TYPE_RISCV_AIA KVM_DEV_TYPE_RISCV_AIA
KVM_DEV_TYPE_MAX,
};
#define KVM_CREATE_VCPU _IO(KVMIO, 0x41)
#define KVM_GET_DIRTY_LOG _IOW(KVMIO, 0x42, struct kvm_dirty_log)
#define KVM_SET_NR_MMU_PAGES _IO(KVMIO, 0x44)
-#define KVM_GET_NR_MMU_PAGES _IO(KVMIO, 0x45)
+#define KVM_GET_NR_MMU_PAGES _IO(KVMIO, 0x45) /* deprecated */
#define KVM_SET_USER_MEMORY_REGION _IOW(KVMIO, 0x46, \
struct kvm_userspace_memory_region)
#define KVM_SET_TSS_ADDR _IO(KVMIO, 0x47)
#define KVM_SET_PMU_EVENT_FILTER _IOW(KVMIO, 0xb2, struct kvm_pmu_event_filter)
#define KVM_PPC_SVM_OFF _IO(KVMIO, 0xb3)
#define KVM_ARM_MTE_COPY_TAGS _IOR(KVMIO, 0xb4, struct kvm_arm_copy_mte_tags)
+/* Available with KVM_CAP_COUNTER_OFFSET */
+#define KVM_ARM_SET_COUNTER_OFFSET _IOW(KVMIO, 0xb5, struct kvm_arm_counter_offset)
/* ioctl for vm fd */
#define KVM_CREATE_DEVICE _IOWR(KVMIO, 0xe0, struct kvm_create_device)
#define KVM_GET_DEBUGREGS _IOR(KVMIO, 0xa1, struct kvm_debugregs)
#define KVM_SET_DEBUGREGS _IOW(KVMIO, 0xa2, struct kvm_debugregs)
/*
- * vcpu version available with KVM_ENABLE_CAP
+ * vcpu version available with KVM_CAP_ENABLE_CAP
* vm version available with KVM_CAP_ENABLE_CAP_VM
*/
#define KVM_ENABLE_CAP _IOW(KVMIO, 0xa3, struct kvm_enable_cap)
#include <asm/mman.h>
#include <asm-generic/hugetlb_encode.h>
+#include <linux/types.h>
#define MREMAP_MAYMOVE 1
#define MREMAP_FIXED 2
#define MAP_HUGE_2GB HUGETLB_FLAG_ENCODE_2GB
#define MAP_HUGE_16GB HUGETLB_FLAG_ENCODE_16GB
+struct cachestat_range {
+ __u64 off;
+ __u64 len;
+};
+
+struct cachestat {
+ __u64 nr_cache;
+ __u64 nr_dirty;
+ __u64 nr_writeback;
+ __u64 nr_evicted;
+ __u64 nr_recently_evicted;
+};
+
#endif /* _LINUX_MMAN_H */
* SEV Firmware status code
*/
typedef enum {
+ /*
+ * This error code is not in the SEV spec. Its purpose is to convey that
+ * there was an error that prevented the SEV firmware from being called.
+ * The SEV API error codes are 16 bits, so the -1 value will not overlap
+ * with possible values from the specification.
+ */
+ SEV_RET_NO_FW_CALL = -1,
SEV_RET_SUCCESS = 0,
SEV_RET_INVALID_PLATFORM_STATE,
SEV_RET_INVALID_GUEST_STATE,
UFFD_FEATURE_MINOR_HUGETLBFS | \
UFFD_FEATURE_MINOR_SHMEM | \
UFFD_FEATURE_EXACT_ADDRESS | \
- UFFD_FEATURE_WP_HUGETLBFS_SHMEM)
+ UFFD_FEATURE_WP_HUGETLBFS_SHMEM | \
+ UFFD_FEATURE_WP_UNPOPULATED)
#define UFFD_API_IOCTLS \
((__u64)1 << _UFFDIO_REGISTER | \
(__u64)1 << _UFFDIO_UNREGISTER | \
*
* UFFD_FEATURE_WP_HUGETLBFS_SHMEM indicates that userfaultfd
* write-protection mode is supported on both shmem and hugetlbfs.
+ *
+ * UFFD_FEATURE_WP_UNPOPULATED indicates that userfaultfd
+ * write-protection mode will always apply to unpopulated pages
+ * (i.e. empty ptes). This will be the default behavior for shmem
+ * & hugetlbfs, so this flag only affects anonymous memory behavior
+ * when userfault write-protection mode is registered.
*/
#define UFFD_FEATURE_PAGEFAULT_FLAG_WP (1<<0)
#define UFFD_FEATURE_EVENT_FORK (1<<1)
#define UFFD_FEATURE_MINOR_SHMEM (1<<10)
#define UFFD_FEATURE_EXACT_ADDRESS (1<<11)
#define UFFD_FEATURE_WP_HUGETLBFS_SHMEM (1<<12)
+#define UFFD_FEATURE_WP_UNPOPULATED (1<<13)
__u64 features;
__u64 ioctls;
struct uffdio_continue {
struct uffdio_range range;
#define UFFDIO_CONTINUE_MODE_DONTWAKE ((__u64)1<<0)
+ /*
+ * UFFDIO_CONTINUE_MODE_WP will map the page write protected on
+ * the fly. UFFDIO_CONTINUE_MODE_WP is available only if the
+ * write protected ioctl is implemented for the range
+ * according to the uffdio_register.ioctls.
+ */
+#define UFFDIO_CONTINUE_MODE_WP ((__u64)1<<1)
__u64 mode;
/*
#define VFIO_DEVICE_FLAGS_AP (1 << 5) /* vfio-ap device */
#define VFIO_DEVICE_FLAGS_FSL_MC (1 << 6) /* vfio-fsl-mc device */
#define VFIO_DEVICE_FLAGS_CAPS (1 << 7) /* Info supports caps */
+#define VFIO_DEVICE_FLAGS_CDX (1 << 8) /* vfio-cdx device */
__u32 num_regions; /* Max region index + 1 */
__u32 num_irqs; /* Max IRQ index + 1 */
__u32 cap_offset; /* Offset within info struct of first cap */
#define VFIO_DEVICE_INFO_CAP_ZPCI_UTIL 3
#define VFIO_DEVICE_INFO_CAP_ZPCI_PFIP 4
+/*
+ * The following VFIO_DEVICE_INFO capability reports support for PCIe AtomicOp
+ * completion to the root bus with supported widths provided via flags.
+ */
+#define VFIO_DEVICE_INFO_CAP_PCI_ATOMIC_COMP 5
+struct vfio_device_info_cap_pci_atomic_comp {
+ struct vfio_info_cap_header header;
+ __u32 flags;
+#define VFIO_PCI_ATOMIC_COMP32 (1 << 0)
+#define VFIO_PCI_ATOMIC_COMP64 (1 << 1)
+#define VFIO_PCI_ATOMIC_COMP128 (1 << 2)
+ __u32 reserved;
+};
+
/**
* VFIO_DEVICE_GET_REGION_INFO - _IOWR(VFIO_TYPE, VFIO_BASE + 8,
* struct vfio_region_info)
* then add and unmask vectors, it's up to userspace to make the decision
* whether to allocate the maximum supported number of vectors or tear
* down setup and incrementally increase the vectors as each is enabled.
+ * Absence of the NORESIZE flag indicates that vectors can be enabled
+ * and disabled dynamically without impacting other vectors within the
+ * index.
*/
struct vfio_irq_info {
__u32 argsz;
VFIO_CCW_NUM_IRQS
};
+/*
+ * The vfio-ap bus driver makes use of the following IRQ index mapping.
+ * Unimplemented IRQ types return a count of zero.
+ */
+enum {
+ VFIO_AP_REQ_IRQ_INDEX,
+ VFIO_AP_NUM_IRQS
+};
+
/**
* VFIO_DEVICE_GET_PCI_HOT_RESET_INFO - _IOWR(VFIO_TYPE, VFIO_BASE + 12,
* struct vfio_pci_hot_reset_info)
#define VHOST_SET_LOG_BASE _IOW(VHOST_VIRTIO, 0x04, __u64)
/* Specify an eventfd file descriptor to signal on log write. */
#define VHOST_SET_LOG_FD _IOW(VHOST_VIRTIO, 0x07, int)
+/* By default, a device gets one vhost_worker that its virtqueues share. This
+ * command allows the owner of the device to create an additional vhost_worker
+ * for the device. It can later be bound to 1 or more of its virtqueues using
+ * the VHOST_ATTACH_VRING_WORKER command.
+ *
+ * This must be called after VHOST_SET_OWNER and the caller must be the owner
+ * of the device. The new thread will inherit caller's cgroups and namespaces,
+ * and will share the caller's memory space. The new thread will also be
+ * counted against the caller's RLIMIT_NPROC value.
+ *
+ * The worker's ID used in other commands will be returned in
+ * vhost_worker_state.
+ */
+#define VHOST_NEW_WORKER _IOR(VHOST_VIRTIO, 0x8, struct vhost_worker_state)
+/* Free a worker created with VHOST_NEW_WORKER if it's not attached to any
+ * virtqueue. If userspace is not able to call this for workers its created,
+ * the kernel will free all the device's workers when the device is closed.
+ */
+#define VHOST_FREE_WORKER _IOW(VHOST_VIRTIO, 0x9, struct vhost_worker_state)
/* Ring setup. */
/* Set number of descriptors in ring. This parameter can not
#define VHOST_VRING_BIG_ENDIAN 1
#define VHOST_SET_VRING_ENDIAN _IOW(VHOST_VIRTIO, 0x13, struct vhost_vring_state)
#define VHOST_GET_VRING_ENDIAN _IOW(VHOST_VIRTIO, 0x14, struct vhost_vring_state)
+/* Attach a vhost_worker created with VHOST_NEW_WORKER to one of the device's
+ * virtqueues.
+ *
+ * This will replace the virtqueue's existing worker. If the replaced worker
+ * is no longer attached to any virtqueues, it can be freed with
+ * VHOST_FREE_WORKER.
+ */
+#define VHOST_ATTACH_VRING_WORKER _IOW(VHOST_VIRTIO, 0x15, \
+ struct vhost_vring_worker)
+/* Return the vring worker's ID */
+#define VHOST_GET_VRING_WORKER _IOWR(VHOST_VIRTIO, 0x16, \
+ struct vhost_vring_worker)
/* The following ioctls use eventfd file descriptors to signal and poll
* for events. */
+#ifndef AARCH64_TARGET_MMAN_H
+#define AARCH64_TARGET_MMAN_H
+
+#define TARGET_PROT_BTI 0x10
+#define TARGET_PROT_MTE 0x20
+
#include "../generic/target_mman.h"
+
+#endif
#ifndef ALPHA_TARGET_MMAN_H
#define ALPHA_TARGET_MMAN_H
+#define TARGET_MAP_ANONYMOUS 0x10
+#define TARGET_MAP_FIXED 0x100
+#define TARGET_MAP_GROWSDOWN 0x01000
+#define TARGET_MAP_DENYWRITE 0x02000
+#define TARGET_MAP_EXECUTABLE 0x04000
+#define TARGET_MAP_LOCKED 0x08000
+#define TARGET_MAP_NORESERVE 0x10000
+#define TARGET_MAP_POPULATE 0x20000
+#define TARGET_MAP_NONBLOCK 0x40000
+#define TARGET_MAP_STACK 0x80000
+#define TARGET_MAP_HUGETLB 0x100000
+#define TARGET_MAP_FIXED_NOREPLACE 0x200000
+
#define TARGET_MADV_DONTNEED 6
#define TARGET_MS_ASYNC 1
static bool init_guest_commpage(void)
{
- abi_ptr commpage = HI_COMMPAGE & -qemu_host_page_size;
- void *want = g2h_untagged(commpage);
- void *addr = mmap(want, qemu_host_page_size, PROT_READ | PROT_WRITE,
- MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, -1, 0);
+ ARMCPU *cpu = ARM_CPU(thread_cpu);
+ abi_ptr commpage;
+ void *want;
+ void *addr;
+
+ /*
+ * M-profile allocates maximum of 2GB address space, so can never
+ * allocate the commpage. Skip it.
+ */
+ if (arm_feature(&cpu->env, ARM_FEATURE_M)) {
+ return true;
+ }
+
+ commpage = HI_COMMPAGE & -qemu_host_page_size;
+ want = g2h_untagged(commpage);
+ addr = mmap(want, qemu_host_page_size, PROT_READ | PROT_WRITE,
+ MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, -1, 0);
if (addr == MAP_FAILED) {
perror("Allocating guest commpage");
const char *elf_hwcap_str(uint32_t bit)
{
static const char *hwcap_str[] = {
- [HWCAP_S390_ESAN3] = "esan3",
- [HWCAP_S390_ZARCH] = "zarch",
- [HWCAP_S390_STFLE] = "stfle",
- [HWCAP_S390_MSA] = "msa",
- [HWCAP_S390_LDISP] = "ldisp",
- [HWCAP_S390_EIMM] = "eimm",
- [HWCAP_S390_DFP] = "dfp",
- [HWCAP_S390_HPAGE] = "edat",
- [HWCAP_S390_ETF3EH] = "etf3eh",
- [HWCAP_S390_HIGH_GPRS] = "highgprs",
- [HWCAP_S390_TE] = "te",
- [HWCAP_S390_VXRS] = "vx",
- [HWCAP_S390_VXRS_BCD] = "vxd",
- [HWCAP_S390_VXRS_EXT] = "vxe",
- [HWCAP_S390_GS] = "gs",
- [HWCAP_S390_VXRS_EXT2] = "vxe2",
- [HWCAP_S390_VXRS_PDE] = "vxp",
- [HWCAP_S390_SORT] = "sort",
- [HWCAP_S390_DFLT] = "dflt",
+ [HWCAP_S390_NR_ESAN3] = "esan3",
+ [HWCAP_S390_NR_ZARCH] = "zarch",
+ [HWCAP_S390_NR_STFLE] = "stfle",
+ [HWCAP_S390_NR_MSA] = "msa",
+ [HWCAP_S390_NR_LDISP] = "ldisp",
+ [HWCAP_S390_NR_EIMM] = "eimm",
+ [HWCAP_S390_NR_DFP] = "dfp",
+ [HWCAP_S390_NR_HPAGE] = "edat",
+ [HWCAP_S390_NR_ETF3EH] = "etf3eh",
+ [HWCAP_S390_NR_HIGH_GPRS] = "highgprs",
+ [HWCAP_S390_NR_TE] = "te",
+ [HWCAP_S390_NR_VXRS] = "vx",
+ [HWCAP_S390_NR_VXRS_BCD] = "vxd",
+ [HWCAP_S390_NR_VXRS_EXT] = "vxe",
+ [HWCAP_S390_NR_GS] = "gs",
+ [HWCAP_S390_NR_VXRS_EXT2] = "vxe2",
+ [HWCAP_S390_NR_VXRS_PDE] = "vxp",
+ [HWCAP_S390_NR_SORT] = "sort",
+ [HWCAP_S390_NR_DFLT] = "dflt",
+ [HWCAP_S390_NR_NNPA] = "nnpa",
+ [HWCAP_S390_NR_PCI_MIO] = "pcimio",
+ [HWCAP_S390_NR_SIE] = "sie",
};
return bit < ARRAY_SIZE(hwcap_str) ? hwcap_str[bit] : NULL;
static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
{
regs->psw.addr = infop->entry;
- regs->psw.mask = PSW_MASK_64 | PSW_MASK_32;
+ regs->psw.mask = PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | \
+ PSW_MASK_MCHECK | PSW_MASK_PSTATE | PSW_MASK_64 | \
+ PSW_MASK_32;
regs->gprs[15] = infop->start_stack;
}
#ifndef LINUX_USER_TARGET_MMAN_H
#define LINUX_USER_TARGET_MMAN_H
+/* These are defined in linux/mmap.h */
+#define TARGET_MAP_SHARED 0x01
+#define TARGET_MAP_PRIVATE 0x02
+#define TARGET_MAP_SHARED_VALIDATE 0x03
+
+/* 0x0100 - 0x4000 flags are defined in asm-generic/mman.h */
+#ifndef TARGET_MAP_GROWSDOWN
+#define TARGET_MAP_GROWSDOWN 0x0100
+#endif
+#ifndef TARGET_MAP_DENYWRITE
+#define TARGET_MAP_DENYWRITE 0x0800
+#endif
+#ifndef TARGET_MAP_EXECUTABLE
+#define TARGET_MAP_EXECUTABLE 0x1000
+#endif
+#ifndef TARGET_MAP_LOCKED
+#define TARGET_MAP_LOCKED 0x2000
+#endif
+#ifndef TARGET_MAP_NORESERVE
+#define TARGET_MAP_NORESERVE 0x4000
+#endif
+
+/* Defined in asm-generic/mman-common.h */
+#ifndef TARGET_PROT_SEM
+#define TARGET_PROT_SEM 0x08
+#endif
+
+#ifndef TARGET_MAP_TYPE
+#define TARGET_MAP_TYPE 0x0f
+#endif
+#ifndef TARGET_MAP_FIXED
+#define TARGET_MAP_FIXED 0x10
+#endif
+#ifndef TARGET_MAP_ANONYMOUS
+#define TARGET_MAP_ANONYMOUS 0x20
+#endif
+#ifndef TARGET_MAP_POPULATE
+#define TARGET_MAP_POPULATE 0x008000
+#endif
+#ifndef TARGET_MAP_NONBLOCK
+#define TARGET_MAP_NONBLOCK 0x010000
+#endif
+#ifndef TARGET_MAP_STACK
+#define TARGET_MAP_STACK 0x020000
+#endif
+#ifndef TARGET_MAP_HUGETLB
+#define TARGET_MAP_HUGETLB 0x040000
+#endif
+#ifndef TARGET_MAP_SYNC
+#define TARGET_MAP_SYNC 0x080000
+#endif
+#ifndef TARGET_MAP_FIXED_NOREPLACE
+#define TARGET_MAP_FIXED_NOREPLACE 0x100000
+#endif
+#ifndef TARGET_MAP_UNINITIALIZED
+#define TARGET_MAP_UNINITIALIZED 0x4000000
+#endif
+
#ifndef TARGET_MADV_NORMAL
#define TARGET_MADV_NORMAL 0
#endif
#ifndef HPPA_TARGET_MMAN_H
#define HPPA_TARGET_MMAN_H
+#define TARGET_MAP_TYPE 0x2b
+#define TARGET_MAP_FIXED 0x04
+#define TARGET_MAP_ANONYMOUS 0x10
+#define TARGET_MAP_GROWSDOWN 0x8000
+#define TARGET_MAP_POPULATE 0x10000
+#define TARGET_MAP_NONBLOCK 0x20000
+#define TARGET_MAP_STACK 0x40000
+#define TARGET_MAP_HUGETLB 0x80000
+#define TARGET_MAP_UNINITIALIZED 0
+
#define TARGET_MADV_MERGEABLE 65
#define TARGET_MADV_UNMERGEABLE 66
#define TARGET_MADV_HUGEPAGE 67
+#ifndef MIPS_TARGET_MMAN_H
+#define MIPS_TARGET_MMAN_H
+
+#define TARGET_PROT_SEM 0x10
+
+#define TARGET_MAP_NORESERVE 0x0400
+#define TARGET_MAP_ANONYMOUS 0x0800
+#define TARGET_MAP_GROWSDOWN 0x1000
+#define TARGET_MAP_DENYWRITE 0x2000
+#define TARGET_MAP_EXECUTABLE 0x4000
+#define TARGET_MAP_LOCKED 0x8000
+#define TARGET_MAP_POPULATE 0x10000
+#define TARGET_MAP_NONBLOCK 0x20000
+#define TARGET_MAP_STACK 0x40000
+#define TARGET_MAP_HUGETLB 0x80000
+
#include "../generic/target_mman.h"
+
+#endif
-#include "../generic/target_mman.h"
+#include "../mips/target_mman.h"
void mmap_unlock(void)
{
+ assert(mmap_lock_count > 0);
if (--mmap_lock_count == 0) {
pthread_mutex_unlock(&mmap_mutex);
}
void mmap_fork_end(int child)
{
- if (child)
+ if (child) {
pthread_mutex_init(&mmap_mutex, NULL);
- else
+ } else {
pthread_mutex_unlock(&mmap_mutex);
+ }
}
/*
* Return 0 if the target prot bitmask is invalid, otherwise
* the internal qemu page_flags (which will include PAGE_VALID).
*/
-static int validate_prot_to_pageflags(int *host_prot, int prot)
+static int validate_prot_to_pageflags(int prot)
{
int valid = PROT_READ | PROT_WRITE | PROT_EXEC | TARGET_PROT_SEM;
int page_flags = (prot & PAGE_BITS) | PAGE_VALID;
- /*
- * For the host, we need not pass anything except read/write/exec.
- * While PROT_SEM is allowed by all hosts, it is also ignored, so
- * don't bother transforming guest bit to host bit. Any other
- * target-specific prot bits will not be understood by the host
- * and will need to be encoded into page_flags for qemu emulation.
- *
- * Pages that are executable by the guest will never be executed
- * by the host, but the host will need to be able to read them.
- */
- *host_prot = (prot & (PROT_READ | PROT_WRITE))
- | (prot & PROT_EXEC ? PROT_READ : 0);
-
#ifdef TARGET_AARCH64
{
ARMCPU *cpu = ARM_CPU(thread_cpu);
return prot & ~valid ? 0 : page_flags;
}
+/*
+ * For the host, we need not pass anything except read/write/exec.
+ * While PROT_SEM is allowed by all hosts, it is also ignored, so
+ * don't bother transforming guest bit to host bit. Any other
+ * target-specific prot bits will not be understood by the host
+ * and will need to be encoded into page_flags for qemu emulation.
+ *
+ * Pages that are executable by the guest will never be executed
+ * by the host, but the host will need to be able to read them.
+ */
+static int target_to_host_prot(int prot)
+{
+ return (prot & (PROT_READ | PROT_WRITE)) |
+ (prot & PROT_EXEC ? PROT_READ : 0);
+}
+
/* NOTE: all the constants are the HOST ones, but addresses are target. */
int target_mprotect(abi_ulong start, abi_ulong len, int target_prot)
{
- abi_ulong end, host_start, host_end, addr;
- int prot1, ret, page_flags, host_prot;
+ abi_ulong starts[3];
+ abi_ulong lens[3];
+ int prots[3];
+ abi_ulong host_start, host_last, last;
+ int prot1, ret, page_flags, nranges;
trace_target_mprotect(start, len, target_prot);
if ((start & ~TARGET_PAGE_MASK) != 0) {
return -TARGET_EINVAL;
}
- page_flags = validate_prot_to_pageflags(&host_prot, target_prot);
+ page_flags = validate_prot_to_pageflags(target_prot);
if (!page_flags) {
return -TARGET_EINVAL;
}
+ if (len == 0) {
+ return 0;
+ }
len = TARGET_PAGE_ALIGN(len);
- end = start + len;
if (!guest_range_valid_untagged(start, len)) {
return -TARGET_ENOMEM;
}
- if (len == 0) {
- return 0;
- }
- mmap_lock();
+ last = start + len - 1;
host_start = start & qemu_host_page_mask;
- host_end = HOST_PAGE_ALIGN(end);
- if (start > host_start) {
- /* handle host page containing start */
- prot1 = host_prot;
- for (addr = host_start; addr < start; addr += TARGET_PAGE_SIZE) {
- prot1 |= page_get_flags(addr);
- }
- if (host_end == host_start + qemu_host_page_size) {
- for (addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
- prot1 |= page_get_flags(addr);
- }
- end = host_end;
+ host_last = HOST_PAGE_ALIGN(last) - 1;
+ nranges = 0;
+
+ mmap_lock();
+
+ if (host_last - host_start < qemu_host_page_size) {
+ /* Single host page contains all guest pages: sum the prot. */
+ prot1 = target_prot;
+ for (abi_ulong a = host_start; a < start; a += TARGET_PAGE_SIZE) {
+ prot1 |= page_get_flags(a);
}
- ret = mprotect(g2h_untagged(host_start), qemu_host_page_size,
- prot1 & PAGE_BITS);
- if (ret != 0) {
- goto error;
+ for (abi_ulong a = last; a < host_last; a += TARGET_PAGE_SIZE) {
+ prot1 |= page_get_flags(a + 1);
}
- host_start += qemu_host_page_size;
- }
- if (end < host_end) {
- prot1 = host_prot;
- for (addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
- prot1 |= page_get_flags(addr);
+ starts[nranges] = host_start;
+ lens[nranges] = qemu_host_page_size;
+ prots[nranges] = prot1;
+ nranges++;
+ } else {
+ if (host_start < start) {
+ /* Host page contains more than one guest page: sum the prot. */
+ prot1 = target_prot;
+ for (abi_ulong a = host_start; a < start; a += TARGET_PAGE_SIZE) {
+ prot1 |= page_get_flags(a);
+ }
+ /* If the resulting sum differs, create a new range. */
+ if (prot1 != target_prot) {
+ starts[nranges] = host_start;
+ lens[nranges] = qemu_host_page_size;
+ prots[nranges] = prot1;
+ nranges++;
+ host_start += qemu_host_page_size;
+ }
}
- ret = mprotect(g2h_untagged(host_end - qemu_host_page_size),
- qemu_host_page_size, prot1 & PAGE_BITS);
- if (ret != 0) {
- goto error;
+
+ if (last < host_last) {
+ /* Host page contains more than one guest page: sum the prot. */
+ prot1 = target_prot;
+ for (abi_ulong a = last; a < host_last; a += TARGET_PAGE_SIZE) {
+ prot1 |= page_get_flags(a + 1);
+ }
+ /* If the resulting sum differs, create a new range. */
+ if (prot1 != target_prot) {
+ host_last -= qemu_host_page_size;
+ starts[nranges] = host_last + 1;
+ lens[nranges] = qemu_host_page_size;
+ prots[nranges] = prot1;
+ nranges++;
+ }
+ }
+
+ /* Create a range for the middle, if any remains. */
+ if (host_start < host_last) {
+ starts[nranges] = host_start;
+ lens[nranges] = host_last - host_start + 1;
+ prots[nranges] = target_prot;
+ nranges++;
}
- host_end -= qemu_host_page_size;
}
- /* handle the pages in the middle */
- if (host_start < host_end) {
- ret = mprotect(g2h_untagged(host_start),
- host_end - host_start, host_prot);
+ for (int i = 0; i < nranges; ++i) {
+ ret = mprotect(g2h_untagged(starts[i]), lens[i],
+ target_to_host_prot(prots[i]));
if (ret != 0) {
goto error;
}
}
- page_set_flags(start, start + len - 1, page_flags);
+ page_set_flags(start, last, page_flags);
ret = 0;
-error:
+ error:
mmap_unlock();
return ret;
}
/* map an incomplete host page */
-static int mmap_frag(abi_ulong real_start,
- abi_ulong start, abi_ulong end,
- int prot, int flags, int fd, abi_ulong offset)
+static bool mmap_frag(abi_ulong real_start, abi_ulong start, abi_ulong last,
+ int prot, int flags, int fd, off_t offset)
{
- abi_ulong real_end, addr;
+ abi_ulong real_last;
void *host_start;
- int prot1, prot_new;
+ int prot_old, prot_new;
+ int host_prot_old, host_prot_new;
- real_end = real_start + qemu_host_page_size;
+ if (!(flags & MAP_ANONYMOUS)
+ && (flags & MAP_TYPE) == MAP_SHARED
+ && (prot & PROT_WRITE)) {
+ /*
+ * msync() won't work with the partial page, so we return an
+ * error if write is possible while it is a shared mapping.
+ */
+ errno = EINVAL;
+ return false;
+ }
+
+ real_last = real_start + qemu_host_page_size - 1;
host_start = g2h_untagged(real_start);
- /* get the protection of the target pages outside the mapping */
- prot1 = 0;
- for(addr = real_start; addr < real_end; addr++) {
- if (addr < start || addr >= end)
- prot1 |= page_get_flags(addr);
+ /* Get the protection of the target pages outside the mapping. */
+ prot_old = 0;
+ for (abi_ulong a = real_start; a < start; a += TARGET_PAGE_SIZE) {
+ prot_old |= page_get_flags(a);
+ }
+ for (abi_ulong a = real_last; a > last; a -= TARGET_PAGE_SIZE) {
+ prot_old |= page_get_flags(a);
}
- if (prot1 == 0) {
- /* no page was there, so we allocate one */
- void *p = mmap(host_start, qemu_host_page_size, prot,
+ if (prot_old == 0) {
+ /*
+ * Since !(prot_old & PAGE_VALID), there were no guest pages
+ * outside of the fragment we need to map. Allocate a new host
+ * page to cover, discarding whatever else may have been present.
+ */
+ void *p = mmap(host_start, qemu_host_page_size,
+ target_to_host_prot(prot),
flags | MAP_ANONYMOUS, -1, 0);
- if (p == MAP_FAILED)
- return -1;
- prot1 = prot;
- }
- prot1 &= PAGE_BITS;
-
- prot_new = prot | prot1;
- if (!(flags & MAP_ANONYMOUS)) {
- /* msync() won't work here, so we return an error if write is
- possible while it is a shared mapping */
- if ((flags & MAP_TYPE) == MAP_SHARED &&
- (prot & PROT_WRITE))
- return -1;
-
- /* adjust protection to be able to read */
- if (!(prot1 & PROT_WRITE))
- mprotect(host_start, qemu_host_page_size, prot1 | PROT_WRITE);
-
- /* read the corresponding file data */
- if (pread(fd, g2h_untagged(start), end - start, offset) == -1)
- return -1;
-
- /* put final protection */
- if (prot_new != (prot1 | PROT_WRITE))
- mprotect(host_start, qemu_host_page_size, prot_new);
- } else {
- if (prot_new != prot1) {
- mprotect(host_start, qemu_host_page_size, prot_new);
+ if (p == MAP_FAILED) {
+ return false;
}
- if (prot_new & PROT_WRITE) {
- memset(g2h_untagged(start), 0, end - start);
+ prot_old = prot;
+ }
+ prot_new = prot | prot_old;
+
+ host_prot_old = target_to_host_prot(prot_old);
+ host_prot_new = target_to_host_prot(prot_new);
+
+ /* Adjust protection to be able to write. */
+ if (!(host_prot_old & PROT_WRITE)) {
+ host_prot_old |= PROT_WRITE;
+ mprotect(host_start, qemu_host_page_size, host_prot_old);
+ }
+
+ /* Read or zero the new guest pages. */
+ if (flags & MAP_ANONYMOUS) {
+ memset(g2h_untagged(start), 0, last - start + 1);
+ } else {
+ if (pread(fd, g2h_untagged(start), last - start + 1, offset) == -1) {
+ return false;
}
}
- return 0;
+
+ /* Put final protection */
+ if (host_prot_new != host_prot_old) {
+ mprotect(host_start, qemu_host_page_size, host_prot_new);
+ }
+ return true;
}
#if HOST_LONG_BITS == 64 && TARGET_ABI_BITS == 64
unsigned long last_brk;
-/* Subroutine of mmap_find_vma, used when we have pre-allocated a chunk
- of guest address space. */
+/*
+ * Subroutine of mmap_find_vma, used when we have pre-allocated
+ * a chunk of guest address space.
+ */
static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size,
abi_ulong align)
{
- abi_ulong addr, end_addr, incr = qemu_host_page_size;
- int prot;
- bool looped = false;
+ target_ulong ret;
- if (size > reserved_va) {
- return (abi_ulong)-1;
+ ret = page_find_range_empty(start, reserved_va, size, align);
+ if (ret == -1 && start > mmap_min_addr) {
+ /* Restart at the beginning of the address space. */
+ ret = page_find_range_empty(mmap_min_addr, start - 1, size, align);
}
- /* Note that start and size have already been aligned by mmap_find_vma. */
-
- end_addr = start + size;
- /*
- * Start at the top of the address space, ignoring the last page.
- * If reserved_va == UINT32_MAX, then end_addr wraps to 0,
- * throwing the rest of the calculations off.
- * TODO: rewrite using last_addr instead.
- * TODO: use the interval tree instead of probing every page.
- */
- if (start > reserved_va - size) {
- end_addr = ((reserved_va - size) & -align) + size;
- looped = true;
- }
-
- /* Search downward from END_ADDR, checking to see if a page is in use. */
- addr = end_addr;
- while (1) {
- addr -= incr;
- if (addr > end_addr) {
- if (looped) {
- /* Failure. The entire address space has been searched. */
- return (abi_ulong)-1;
- }
- /* Re-start at the top of the address space (see above). */
- addr = end_addr = ((reserved_va - size) & -align) + size;
- looped = true;
- } else {
- prot = page_get_flags(addr);
- if (prot) {
- /* Page in use. Restart below this page. */
- addr = end_addr = ((addr - size) & -align) + size;
- } else if (addr && addr + size == end_addr) {
- /* Success! All pages between ADDR and END_ADDR are free. */
- if (start == mmap_next_start) {
- mmap_next_start = addr;
- }
- return addr;
- }
- }
- }
+ return ret;
}
/*
* - shmat() with SHM_REMAP flag
*/
ptr = mmap(g2h_untagged(addr), size, PROT_NONE,
- MAP_ANONYMOUS|MAP_PRIVATE|MAP_NORESERVE, -1, 0);
+ MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE, -1, 0);
/* ENOMEM, if host address space has no memory */
if (ptr == MAP_FAILED) {
return (abi_ulong)-1;
}
- /* Count the number of sequential returns of the same address.
- This is used to modify the search algorithm below. */
+ /*
+ * Count the number of sequential returns of the same address.
+ * This is used to modify the search algorithm below.
+ */
repeat = (ptr == prev ? repeat + 1 : 0);
if (h2g_valid(ptr + size - 1)) {
/* The address is not properly aligned for the target. */
switch (repeat) {
case 0:
- /* Assume the result that the kernel gave us is the
- first with enough free space, so start again at the
- next higher target page. */
+ /*
+ * Assume the result that the kernel gave us is the
+ * first with enough free space, so start again at the
+ * next higher target page.
+ */
addr = ROUND_UP(addr, align);
break;
case 1:
- /* Sometimes the kernel decides to perform the allocation
- at the top end of memory instead. */
+ /*
+ * Sometimes the kernel decides to perform the allocation
+ * at the top end of memory instead.
+ */
addr &= -align;
break;
case 2:
break;
}
} else {
- /* Since the result the kernel gave didn't fit, start
- again at low memory. If any repetition, fail. */
+ /*
+ * Since the result the kernel gave didn't fit, start
+ * again at low memory. If any repetition, fail.
+ */
addr = (repeat ? -1 : 0);
}
return (abi_ulong)-1;
}
wrapped = 1;
- /* Don't actually use 0 when wrapping, instead indicate
- that we'd truly like an allocation in low memory. */
+ /*
+ * Don't actually use 0 when wrapping, instead indicate
+ * that we'd truly like an allocation in low memory.
+ */
addr = (mmap_min_addr > TARGET_PAGE_SIZE
? TARGET_PAGE_ALIGN(mmap_min_addr)
: TARGET_PAGE_SIZE);
/* NOTE: all the constants are the HOST ones */
abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
- int flags, int fd, abi_ulong offset)
+ int flags, int fd, off_t offset)
{
- abi_ulong ret, end, real_start, real_end, retaddr, host_offset, host_len,
- passthrough_start = -1, passthrough_end = -1;
- int page_flags, host_prot;
+ abi_ulong ret, last, real_start, real_last, retaddr, host_len;
+ abi_ulong passthrough_start = -1, passthrough_last = 0;
+ int page_flags;
+ off_t host_offset;
mmap_lock();
trace_target_mmap(start, len, target_prot, flags, fd, offset);
goto fail;
}
- page_flags = validate_prot_to_pageflags(&host_prot, target_prot);
+ page_flags = validate_prot_to_pageflags(target_prot);
if (!page_flags) {
errno = EINVAL;
goto fail;
real_start = start & qemu_host_page_mask;
host_offset = offset & qemu_host_page_mask;
- /* If the user is asking for the kernel to find a location, do that
- before we truncate the length for mapping files below. */
- if (!(flags & MAP_FIXED)) {
+ /*
+ * If the user is asking for the kernel to find a location, do that
+ * before we truncate the length for mapping files below.
+ */
+ if (!(flags & (MAP_FIXED | MAP_FIXED_NOREPLACE))) {
host_len = len + offset - host_offset;
host_len = HOST_PAGE_ALIGN(host_len);
start = mmap_find_vma(real_start, host_len, TARGET_PAGE_SIZE);
}
}
- /* When mapping files into a memory area larger than the file, accesses
- to pages beyond the file size will cause a SIGBUS.
-
- For example, if mmaping a file of 100 bytes on a host with 4K pages
- emulating a target with 8K pages, the target expects to be able to
- access the first 8K. But the host will trap us on any access beyond
- 4K.
-
- When emulating a target with a larger page-size than the hosts, we
- may need to truncate file maps at EOF and add extra anonymous pages
- up to the targets page boundary. */
-
+ /*
+ * When mapping files into a memory area larger than the file, accesses
+ * to pages beyond the file size will cause a SIGBUS.
+ *
+ * For example, if mmaping a file of 100 bytes on a host with 4K pages
+ * emulating a target with 8K pages, the target expects to be able to
+ * access the first 8K. But the host will trap us on any access beyond
+ * 4K.
+ *
+ * When emulating a target with a larger page-size than the hosts, we
+ * may need to truncate file maps at EOF and add extra anonymous pages
+ * up to the targets page boundary.
+ */
if ((qemu_real_host_page_size() < qemu_host_page_size) &&
!(flags & MAP_ANONYMOUS)) {
struct stat sb;
- if (fstat (fd, &sb) == -1)
- goto fail;
+ if (fstat(fd, &sb) == -1) {
+ goto fail;
+ }
- /* Are we trying to create a map beyond EOF?. */
- if (offset + len > sb.st_size) {
- /* If so, truncate the file map at eof aligned with
- the hosts real pagesize. Additional anonymous maps
- will be created beyond EOF. */
- len = REAL_HOST_PAGE_ALIGN(sb.st_size - offset);
- }
+ /* Are we trying to create a map beyond EOF?. */
+ if (offset + len > sb.st_size) {
+ /*
+ * If so, truncate the file map at eof aligned with
+ * the hosts real pagesize. Additional anonymous maps
+ * will be created beyond EOF.
+ */
+ len = REAL_HOST_PAGE_ALIGN(sb.st_size - offset);
+ }
}
- if (!(flags & MAP_FIXED)) {
- unsigned long host_start;
+ if (!(flags & (MAP_FIXED | MAP_FIXED_NOREPLACE))) {
+ uintptr_t host_start;
+ int host_prot;
void *p;
host_len = len + offset - host_offset;
host_len = HOST_PAGE_ALIGN(host_len);
+ host_prot = target_to_host_prot(target_prot);
- /* Note: we prefer to control the mapping address. It is
- especially important if qemu_host_page_size >
- qemu_real_host_page_size */
+ /*
+ * Note: we prefer to control the mapping address. It is
+ * especially important if qemu_host_page_size >
+ * qemu_real_host_page_size.
+ */
p = mmap(g2h_untagged(start), host_len, host_prot,
flags | MAP_FIXED | MAP_ANONYMOUS, -1, 0);
if (p == MAP_FAILED) {
goto fail;
}
/* update start so that it points to the file position at 'offset' */
- host_start = (unsigned long)p;
+ host_start = (uintptr_t)p;
if (!(flags & MAP_ANONYMOUS)) {
p = mmap(g2h_untagged(start), len, host_prot,
flags | MAP_FIXED, fd, host_offset);
host_start += offset - host_offset;
}
start = h2g(host_start);
+ last = start + len - 1;
passthrough_start = start;
- passthrough_end = start + len;
+ passthrough_last = last;
} else {
if (start & ~TARGET_PAGE_MASK) {
errno = EINVAL;
goto fail;
}
- end = start + len;
- real_end = HOST_PAGE_ALIGN(end);
+ last = start + len - 1;
+ real_last = HOST_PAGE_ALIGN(last) - 1;
/*
* Test if requested memory area fits target address space
* It can fail only on 64-bit host with 32-bit target.
* On any other target/host host mmap() handles this error correctly.
*/
- if (end < start || !guest_range_valid_untagged(start, len)) {
+ if (last < start || !guest_range_valid_untagged(start, len)) {
errno = ENOMEM;
goto fail;
}
- /* worst case: we cannot map the file because the offset is not
- aligned, so we read it */
+ /* Validate that the chosen range is empty. */
+ if ((flags & MAP_FIXED_NOREPLACE)
+ && !page_check_range_empty(start, last)) {
+ errno = EEXIST;
+ goto fail;
+ }
+
+ /*
+ * worst case: we cannot map the file because the offset is not
+ * aligned, so we read it
+ */
if (!(flags & MAP_ANONYMOUS) &&
(offset & ~qemu_host_page_mask) != (start & ~qemu_host_page_mask)) {
- /* msync() won't work here, so we return an error if write is
- possible while it is a shared mapping */
- if ((flags & MAP_TYPE) == MAP_SHARED &&
- (host_prot & PROT_WRITE)) {
+ /*
+ * msync() won't work here, so we return an error if write is
+ * possible while it is a shared mapping
+ */
+ if ((flags & MAP_TYPE) == MAP_SHARED
+ && (target_prot & PROT_WRITE)) {
errno = EINVAL;
goto fail;
}
retaddr = target_mmap(start, len, target_prot | PROT_WRITE,
- MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS,
+ (flags & (MAP_FIXED | MAP_FIXED_NOREPLACE))
+ | MAP_PRIVATE | MAP_ANONYMOUS,
-1, 0);
- if (retaddr == -1)
+ if (retaddr == -1) {
goto fail;
- if (pread(fd, g2h_untagged(start), len, offset) == -1)
+ }
+ if (pread(fd, g2h_untagged(start), len, offset) == -1) {
goto fail;
- if (!(host_prot & PROT_WRITE)) {
+ }
+ if (!(target_prot & PROT_WRITE)) {
ret = target_mprotect(start, len, target_prot);
assert(ret == 0);
}
goto the_end;
}
-
+
/* handle the start of the mapping */
if (start > real_start) {
- if (real_end == real_start + qemu_host_page_size) {
+ if (real_last == real_start + qemu_host_page_size - 1) {
/* one single host page */
- ret = mmap_frag(real_start, start, end,
- host_prot, flags, fd, offset);
- if (ret == -1)
+ if (!mmap_frag(real_start, start, last,
+ target_prot, flags, fd, offset)) {
goto fail;
+ }
goto the_end1;
}
- ret = mmap_frag(real_start, start, real_start + qemu_host_page_size,
- host_prot, flags, fd, offset);
- if (ret == -1)
+ if (!mmap_frag(real_start, start,
+ real_start + qemu_host_page_size - 1,
+ target_prot, flags, fd, offset)) {
goto fail;
+ }
real_start += qemu_host_page_size;
}
/* handle the end of the mapping */
- if (end < real_end) {
- ret = mmap_frag(real_end - qemu_host_page_size,
- real_end - qemu_host_page_size, end,
- host_prot, flags, fd,
- offset + real_end - qemu_host_page_size - start);
- if (ret == -1)
+ if (last < real_last) {
+ abi_ulong real_page = real_last - qemu_host_page_size + 1;
+ if (!mmap_frag(real_page, real_page, last,
+ target_prot, flags, fd,
+ offset + real_page - start)) {
goto fail;
- real_end -= qemu_host_page_size;
+ }
+ real_last -= qemu_host_page_size;
}
/* map the middle (easier) */
- if (real_start < real_end) {
+ if (real_start < real_last) {
void *p;
- unsigned long offset1;
- if (flags & MAP_ANONYMOUS)
+ off_t offset1;
+
+ if (flags & MAP_ANONYMOUS) {
offset1 = 0;
- else
+ } else {
offset1 = offset + real_start - start;
- p = mmap(g2h_untagged(real_start), real_end - real_start,
- host_prot, flags, fd, offset1);
- if (p == MAP_FAILED)
+ }
+ p = mmap(g2h_untagged(real_start), real_last - real_start + 1,
+ target_to_host_prot(target_prot), flags, fd, offset1);
+ if (p == MAP_FAILED) {
goto fail;
+ }
passthrough_start = real_start;
- passthrough_end = real_end;
+ passthrough_last = real_last;
}
}
the_end1:
page_flags |= PAGE_ANON;
}
page_flags |= PAGE_RESET;
- if (passthrough_start == passthrough_end) {
- page_set_flags(start, start + len - 1, page_flags);
+ if (passthrough_start > passthrough_last) {
+ page_set_flags(start, last, page_flags);
} else {
if (start < passthrough_start) {
page_set_flags(start, passthrough_start - 1, page_flags);
}
- page_set_flags(passthrough_start, passthrough_end - 1,
+ page_set_flags(passthrough_start, passthrough_last,
page_flags | PAGE_PASSTHROUGH);
- if (passthrough_end < start + len) {
- page_set_flags(passthrough_end, start + len - 1, page_flags);
+ if (passthrough_last < last) {
+ page_set_flags(passthrough_last + 1, last, page_flags);
}
}
the_end:
return -1;
}
-static void mmap_reserve(abi_ulong start, abi_ulong size)
+static void mmap_reserve_or_unmap(abi_ulong start, abi_ulong len)
{
abi_ulong real_start;
- abi_ulong real_end;
- abi_ulong addr;
- abi_ulong end;
+ abi_ulong real_last;
+ abi_ulong real_len;
+ abi_ulong last;
+ abi_ulong a;
+ void *host_start;
int prot;
+ last = start + len - 1;
real_start = start & qemu_host_page_mask;
- real_end = HOST_PAGE_ALIGN(start + size);
- end = start + size;
- if (start > real_start) {
- /* handle host page containing start */
+ real_last = HOST_PAGE_ALIGN(last) - 1;
+
+ /*
+ * If guest pages remain on the first or last host pages,
+ * adjust the deallocation to retain those guest pages.
+ * The single page special case is required for the last page,
+ * lest real_start overflow to zero.
+ */
+ if (real_last - real_start < qemu_host_page_size) {
prot = 0;
- for (addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) {
- prot |= page_get_flags(addr);
+ for (a = real_start; a < start; a += TARGET_PAGE_SIZE) {
+ prot |= page_get_flags(a);
}
- if (real_end == real_start + qemu_host_page_size) {
- for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
- prot |= page_get_flags(addr);
- }
- end = real_end;
+ for (a = last; a < real_last; a += TARGET_PAGE_SIZE) {
+ prot |= page_get_flags(a + 1);
+ }
+ if (prot != 0) {
+ return;
}
- if (prot != 0)
+ } else {
+ for (prot = 0, a = real_start; a < start; a += TARGET_PAGE_SIZE) {
+ prot |= page_get_flags(a);
+ }
+ if (prot != 0) {
real_start += qemu_host_page_size;
- }
- if (end < real_end) {
- prot = 0;
- for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
- prot |= page_get_flags(addr);
}
- if (prot != 0)
- real_end -= qemu_host_page_size;
+
+ for (prot = 0, a = last; a < real_last; a += TARGET_PAGE_SIZE) {
+ prot |= page_get_flags(a + 1);
+ }
+ if (prot != 0) {
+ real_last -= qemu_host_page_size;
+ }
+
+ if (real_last < real_start) {
+ return;
+ }
}
- if (real_start != real_end) {
- mmap(g2h_untagged(real_start), real_end - real_start, PROT_NONE,
- MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE,
- -1, 0);
+
+ real_len = real_last - real_start + 1;
+ host_start = g2h_untagged(real_start);
+
+ if (reserved_va) {
+ void *ptr = mmap(host_start, real_len, PROT_NONE,
+ MAP_FIXED | MAP_ANONYMOUS
+ | MAP_PRIVATE | MAP_NORESERVE, -1, 0);
+ assert(ptr == host_start);
+ } else {
+ int ret = munmap(host_start, real_len);
+ assert(ret == 0);
}
}
int target_munmap(abi_ulong start, abi_ulong len)
{
- abi_ulong end, real_start, real_end, addr;
- int prot, ret;
-
trace_target_munmap(start, len);
- if (start & ~TARGET_PAGE_MASK)
+ if (start & ~TARGET_PAGE_MASK) {
return -TARGET_EINVAL;
+ }
len = TARGET_PAGE_ALIGN(len);
if (len == 0 || !guest_range_valid_untagged(start, len)) {
return -TARGET_EINVAL;
}
mmap_lock();
- end = start + len;
- real_start = start & qemu_host_page_mask;
- real_end = HOST_PAGE_ALIGN(end);
-
- if (start > real_start) {
- /* handle host page containing start */
- prot = 0;
- for(addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) {
- prot |= page_get_flags(addr);
- }
- if (real_end == real_start + qemu_host_page_size) {
- for(addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
- prot |= page_get_flags(addr);
- }
- end = real_end;
- }
- if (prot != 0)
- real_start += qemu_host_page_size;
- }
- if (end < real_end) {
- prot = 0;
- for(addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
- prot |= page_get_flags(addr);
- }
- if (prot != 0)
- real_end -= qemu_host_page_size;
- }
-
- ret = 0;
- /* unmap what we can */
- if (real_start < real_end) {
- if (reserved_va) {
- mmap_reserve(real_start, real_end - real_start);
- } else {
- ret = munmap(g2h_untagged(real_start), real_end - real_start);
- }
- }
-
- if (ret == 0) {
- page_set_flags(start, start + len - 1, 0);
- }
+ mmap_reserve_or_unmap(start, len);
+ page_set_flags(start, start + len - 1, 0);
mmap_unlock();
- return ret;
+
+ return 0;
}
abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
flags, g2h_untagged(new_addr));
if (reserved_va && host_addr != MAP_FAILED) {
- /* If new and old addresses overlap then the above mremap will
- already have failed with EINVAL. */
- mmap_reserve(old_addr, old_size);
+ /*
+ * If new and old addresses overlap then the above mremap will
+ * already have failed with EINVAL.
+ */
+ mmap_reserve_or_unmap(old_addr, old_size);
}
} else if (flags & MREMAP_MAYMOVE) {
abi_ulong mmap_start;
flags | MREMAP_FIXED,
g2h_untagged(mmap_start));
if (reserved_va) {
- mmap_reserve(old_addr, old_size);
+ mmap_reserve_or_unmap(old_addr, old_size);
}
}
} else {
errno = ENOMEM;
host_addr = MAP_FAILED;
} else if (reserved_va && old_size > new_size) {
- mmap_reserve(old_addr + old_size, old_size - new_size);
+ mmap_reserve_or_unmap(old_addr + old_size,
+ old_size - new_size);
}
}
} else {
return new_addr;
}
-static bool can_passthrough_madvise(abi_ulong start, abi_ulong end)
-{
- ulong addr;
-
- if ((start | end) & ~qemu_host_page_mask) {
- return false;
- }
-
- for (addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
- if (!(page_get_flags(addr) & PAGE_PASSTHROUGH)) {
- return false;
- }
- }
-
- return true;
-}
-
abi_long target_madvise(abi_ulong start, abi_ulong len_in, int advice)
{
- abi_ulong len, end;
+ abi_ulong len;
int ret = 0;
if (start & ~TARGET_PAGE_MASK) {
return -TARGET_EINVAL;
}
- len = TARGET_PAGE_ALIGN(len_in);
-
- if (len_in && !len) {
- return -TARGET_EINVAL;
- }
-
- end = start + len;
- if (end < start) {
- return -TARGET_EINVAL;
- }
-
- if (end == start) {
+ if (len_in == 0) {
return 0;
}
-
- if (!guest_range_valid_untagged(start, len)) {
+ len = TARGET_PAGE_ALIGN(len_in);
+ if (len == 0 || !guest_range_valid_untagged(start, len)) {
return -TARGET_EINVAL;
}
*
* A straight passthrough for those may not be safe because qemu sometimes
* turns private file-backed mappings into anonymous mappings.
- * can_passthrough_madvise() helps to check if a passthrough is possible by
- * comparing mappings that are known to have the same semantics in the host
- * and the guest. In this case passthrough is safe.
+ * If all guest pages have PAGE_PASSTHROUGH set, mappings have the
+ * same semantics for the host as for the guest.
*
* We pass through MADV_WIPEONFORK and MADV_KEEPONFORK if possible and
* return failure if not.
ret = -EINVAL;
/* fall through */
case MADV_DONTNEED:
- if (can_passthrough_madvise(start, end)) {
+ if (page_check_range(start, len, PAGE_PASSTHROUGH)) {
ret = get_errno(madvise(g2h_untagged(start), len, advice));
if ((advice == MADV_DONTNEED) && (ret == 0)) {
page_reset_target_data(start, start + len - 1);
+#ifndef PPC_TARGET_MMAN_H
+#define PPC_TARGET_MMAN_H
+
+#define TARGET_MAP_NORESERVE 0x40
+#define TARGET_MAP_LOCKED 0x80
+
#include "../generic/target_mman.h"
+
+#endif
: !guest_range_valid_untagged(addr, size)) {
return false;
}
- return page_check_range((target_ulong)addr, size, type) == 0;
+ return page_check_range((target_ulong)addr, size, type);
}
static inline bool access_ok(CPUState *cpu, int type,
#define TARGET_NR_accept4 242
#define TARGET_NR_arch_specific_syscall 244
#define TARGET_NR_riscv_flush_icache (TARGET_NR_arch_specific_syscall + 15)
+#define TARGET_NR_riscv_hwprobe (TARGET_NR_arch_specific_syscall + 14)
#define TARGET_NR_prlimit64 261
#define TARGET_NR_fanotify_init 262
#define TARGET_NR_fanotify_mark 263
#define TARGET_NR_recvmmsg 243
#define TARGET_NR_arch_specific_syscall 244
#define TARGET_NR_riscv_flush_icache (TARGET_NR_arch_specific_syscall + 15)
+#define TARGET_NR_riscv_hwprobe (TARGET_NR_arch_specific_syscall + 14)
#define TARGET_NR_wait4 260
#define TARGET_NR_prlimit64 261
#define TARGET_NR_fanotify_init 262
+#ifndef SPARC_TARGET_MMAN_H
+#define SPARC_TARGET_MMAN_H
+
+#define TARGET_MAP_NORESERVE 0x40
+#define TARGET_MAP_LOCKED 0x100
+#define TARGET_MAP_GROWSDOWN 0x0200
+
#include "../generic/target_mman.h"
+
+#endif
*/
struct flags {
abi_long f_value; /* flag */
+ abi_long f_mask; /* mask */
const char *f_string; /* stringified flag */
};
+/* No 'struct flags' element should have a zero mask. */
+#define FLAG_BASIC(V, M, N) { V, M | QEMU_BUILD_BUG_ON_ZERO(!(M)), N }
+
/* common flags for all architectures */
-#define FLAG_GENERIC(name) { name, #name }
+#define FLAG_GENERIC_MASK(V, M) FLAG_BASIC(V, M, #V)
+#define FLAG_GENERIC(V) FLAG_BASIC(V, V, #V)
/* target specific flags (syscall_defs.h has TARGET_<flag>) */
-#define FLAG_TARGET(name) { TARGET_ ## name, #name }
+#define FLAG_TARGET_MASK(V, M) FLAG_BASIC(TARGET_##V, TARGET_##M, #V)
+#define FLAG_TARGET(V) FLAG_BASIC(TARGET_##V, TARGET_##V, #V)
/* end of flags array */
-#define FLAG_END { 0, NULL }
+#define FLAG_END { 0, 0, NULL }
/* Structure used to translate enumerated values into strings */
struct enums {
#endif
UNUSED static const struct flags access_flags[] = {
- FLAG_GENERIC(F_OK),
+ FLAG_GENERIC_MASK(F_OK, R_OK | W_OK | X_OK),
FLAG_GENERIC(R_OK),
FLAG_GENERIC(W_OK),
FLAG_GENERIC(X_OK),
};
UNUSED static const struct flags open_access_flags[] = {
- FLAG_TARGET(O_RDONLY),
- FLAG_TARGET(O_WRONLY),
- FLAG_TARGET(O_RDWR),
+ FLAG_TARGET_MASK(O_RDONLY, O_ACCMODE),
+ FLAG_TARGET_MASK(O_WRONLY, O_ACCMODE),
+ FLAG_TARGET_MASK(O_RDWR, O_ACCMODE),
FLAG_END,
};
FLAG_TARGET(O_CREAT),
FLAG_TARGET(O_DIRECTORY),
FLAG_TARGET(O_EXCL),
+#if TARGET_O_LARGEFILE != 0
FLAG_TARGET(O_LARGEFILE),
+#endif
FLAG_TARGET(O_NOCTTY),
FLAG_TARGET(O_NOFOLLOW),
FLAG_TARGET(O_NONBLOCK), /* also O_NDELAY */
};
UNUSED static const struct flags mmap_prot_flags[] = {
- FLAG_GENERIC(PROT_NONE),
+ FLAG_GENERIC_MASK(PROT_NONE, PROT_READ | PROT_WRITE | PROT_EXEC),
FLAG_GENERIC(PROT_EXEC),
FLAG_GENERIC(PROT_READ),
FLAG_GENERIC(PROT_WRITE),
};
UNUSED static const struct flags mmap_flags[] = {
- FLAG_TARGET(MAP_SHARED),
- FLAG_TARGET(MAP_PRIVATE),
+ FLAG_TARGET_MASK(MAP_SHARED, MAP_TYPE),
+ FLAG_TARGET_MASK(MAP_PRIVATE, MAP_TYPE),
+ FLAG_TARGET_MASK(MAP_SHARED_VALIDATE, MAP_TYPE),
FLAG_TARGET(MAP_ANONYMOUS),
FLAG_TARGET(MAP_DENYWRITE),
+ FLAG_TARGET(MAP_EXECUTABLE),
FLAG_TARGET(MAP_FIXED),
+ FLAG_TARGET(MAP_FIXED_NOREPLACE),
FLAG_TARGET(MAP_GROWSDOWN),
- FLAG_TARGET(MAP_EXECUTABLE),
-#ifdef MAP_LOCKED
+ FLAG_TARGET(MAP_HUGETLB),
FLAG_TARGET(MAP_LOCKED),
-#endif
-#ifdef MAP_NONBLOCK
FLAG_TARGET(MAP_NONBLOCK),
-#endif
FLAG_TARGET(MAP_NORESERVE),
-#ifdef MAP_POPULATE
FLAG_TARGET(MAP_POPULATE),
-#endif
-#ifdef TARGET_MAP_UNINITIALIZED
+ FLAG_TARGET(MAP_STACK),
+ FLAG_TARGET(MAP_SYNC),
+#if TARGET_MAP_UNINITIALIZED != 0
FLAG_TARGET(MAP_UNINITIALIZED),
#endif
- FLAG_TARGET(MAP_HUGETLB),
- FLAG_TARGET(MAP_STACK),
FLAG_END,
};
FLAG_GENERIC(AT_SYMLINK_NOFOLLOW),
#endif
#ifdef AT_STATX_SYNC_AS_STAT
- FLAG_GENERIC(AT_STATX_SYNC_AS_STAT),
+ FLAG_GENERIC_MASK(AT_STATX_SYNC_AS_STAT, AT_STATX_SYNC_TYPE),
#endif
#ifdef AT_STATX_FORCE_SYNC
- FLAG_GENERIC(AT_STATX_FORCE_SYNC),
+ FLAG_GENERIC_MASK(AT_STATX_FORCE_SYNC, AT_STATX_SYNC_TYPE),
#endif
#ifdef AT_STATX_DONT_SYNC
- FLAG_GENERIC(AT_STATX_DONT_SYNC),
+ FLAG_GENERIC_MASK(AT_STATX_DONT_SYNC, AT_STATX_SYNC_TYPE),
#endif
FLAG_END,
};
const char *sep = "";
int n;
- if ((flags == 0) && (f->f_value == 0)) {
- qemu_log("%s%s", f->f_string, get_comma(last));
- return;
- }
for (n = 0; f->f_string != NULL; f++) {
- if ((f->f_value != 0) && ((flags & f->f_value) == f->f_value)) {
+ if ((flags & f->f_mask) == f->f_value) {
qemu_log("%s%s", sep, f->f_string);
- flags &= ~f->f_value;
+ flags &= ~f->f_mask;
sep = "|";
n++;
}
#if defined(TARGET_NR_mmap) || defined(TARGET_NR_mmap2)
static void
-print_mmap(CPUArchState *cpu_env, const struct syscallname *name,
+print_mmap_both(CPUArchState *cpu_env, const struct syscallname *name,
abi_long arg0, abi_long arg1, abi_long arg2,
- abi_long arg3, abi_long arg4, abi_long arg5)
-{
+ abi_long arg3, abi_long arg4, abi_long arg5,
+ bool is_old_mmap)
+{
+ if (is_old_mmap) {
+ abi_ulong *v;
+ abi_ulong argp = arg0;
+ if (!(v = lock_user(VERIFY_READ, argp, 6 * sizeof(abi_ulong), 1)))
+ return;
+ arg0 = tswapal(v[0]);
+ arg1 = tswapal(v[1]);
+ arg2 = tswapal(v[2]);
+ arg3 = tswapal(v[3]);
+ arg4 = tswapal(v[4]);
+ arg5 = tswapal(v[5]);
+ unlock_user(v, argp, 0);
+ }
print_syscall_prologue(name);
print_pointer(arg0, 0);
print_raw_param("%d", arg1, 0);
print_raw_param("%#x", arg5, 1);
print_syscall_epilogue(name);
}
-#define print_mmap2 print_mmap
+#endif
+
+#if defined(TARGET_NR_mmap)
+static void
+print_mmap(CPUArchState *cpu_env, const struct syscallname *name,
+ abi_long arg0, abi_long arg1, abi_long arg2,
+ abi_long arg3, abi_long arg4, abi_long arg5)
+{
+ return print_mmap_both(cpu_env, name, arg0, arg1, arg2, arg3,
+ arg4, arg5,
+#if defined(TARGET_NR_mmap2)
+ true
+#else
+ false
+#endif
+ );
+}
+#endif
+
+#if defined(TARGET_NR_mmap2)
+static void
+print_mmap2(CPUArchState *cpu_env, const struct syscallname *name,
+ abi_long arg0, abi_long arg1, abi_long arg2,
+ abi_long arg3, abi_long arg4, abi_long arg5)
+{
+ return print_mmap_both(cpu_env, name, arg0, arg1, arg2, arg3,
+ arg4, arg5, false);
+}
#endif
#ifdef TARGET_NR_mprotect
#endif
#if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
-_syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
+_syscall3(int, sys_getdents, unsigned int, fd, struct linux_dirent *, dirp, unsigned int, count);
#endif
#if (defined(TARGET_NR_getdents) && \
!defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
(defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
-_syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
+_syscall3(int, sys_getdents64, unsigned int, fd, struct linux_dirent64 *, dirp, unsigned int, count);
#endif
#if defined(TARGET_NR__llseek) && defined(__NR_llseek)
-_syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
- loff_t *, res, uint, wh);
+_syscall5(int, _llseek, unsigned int, fd, unsigned long, hi, unsigned long, lo,
+ loff_t *, res, unsigned int, wh);
#endif
_syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
_syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
#endif
safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
int, options, struct rusage *, rusage)
+safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
safe_syscall5(int, execveat, int, dirfd, const char *, filename,
char **, argv, char **, envp, int, flags)
#if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
return target_type;
}
-static abi_ulong target_brk;
+static abi_ulong target_brk, initial_target_brk;
static abi_ulong brk_page;
void target_set_brk(abi_ulong new_brk)
{
- target_brk = new_brk;
+ target_brk = TARGET_PAGE_ALIGN(new_brk);
+ initial_target_brk = target_brk;
brk_page = HOST_PAGE_ALIGN(target_brk);
}
return target_brk;
}
+ /* do not allow to shrink below initial brk value */
+ if (brk_val < initial_target_brk) {
+ brk_val = initial_target_brk;
+ }
+
new_brk = TARGET_PAGE_ALIGN(brk_val);
new_host_brk_page = HOST_PAGE_ALIGN(brk_val);
/* brk_val and old target_brk might be on the same page */
if (new_brk == TARGET_PAGE_ALIGN(target_brk)) {
- if (brk_val > target_brk) {
- /* empty remaining bytes in (possibly larger) host page */
- memset(g2h_untagged(target_brk), 0, new_host_brk_page - target_brk);
- }
+ /* empty remaining bytes in (possibly larger) host page */
+ memset(g2h_untagged(new_brk), 0, new_host_brk_page - new_brk);
target_brk = brk_val;
return target_brk;
}
/* Release heap if necesary */
if (new_brk < target_brk) {
/* empty remaining bytes in (possibly larger) host page */
- memset(g2h_untagged(brk_val), 0, new_host_brk_page - brk_val);
+ memset(g2h_untagged(new_brk), 0, new_host_brk_page - new_brk);
/* free unused host pages and set new brk_page */
target_munmap(new_host_brk_page, brk_page - new_host_brk_page);
* itself); instead we treat "mapped but at wrong address" as
* a failure and unmap again.
*/
- new_alloc_size = new_host_brk_page - brk_page;
- if (new_alloc_size) {
+ if (new_host_brk_page > brk_page) {
+ new_alloc_size = new_host_brk_page - brk_page;
mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
PROT_READ|PROT_WRITE,
MAP_ANON|MAP_PRIVATE, 0, 0));
} else {
+ new_alloc_size = 0;
mapped_addr = brk_page;
}
* come from the remaining part of the previous page: it may
* contains garbage data due to a previous heap usage (grown
* then shrunken). */
- memset(g2h_untagged(target_brk), 0, brk_page - target_brk);
+ memset(g2h_untagged(brk_page), 0, HOST_PAGE_ALIGN(brk_page) - brk_page);
target_brk = brk_val;
brk_page = new_host_brk_page;
}
#endif
-static inline abi_ulong do_shmat(CPUArchState *cpu_env,
- int shmid, abi_ulong shmaddr, int shmflg)
+static abi_ulong do_shmat(CPUArchState *cpu_env, int shmid,
+ abi_ulong shmaddr, int shmflg)
{
CPUState *cpu = env_cpu(cpu_env);
- abi_long raddr;
+ abi_ulong raddr;
void *host_raddr;
struct shmid_ds shm_info;
- int i,ret;
+ int i, ret;
abi_ulong shmlba;
/* shmat pointers are always untagged */
if (host_raddr == (void *)-1) {
mmap_unlock();
- return get_errno((long)host_raddr);
+ return get_errno((intptr_t)host_raddr);
}
- raddr=h2g((unsigned long)host_raddr);
+ raddr = h2g((uintptr_t)host_raddr);
page_set_flags(raddr, raddr + shm_info.shm_segsz - 1,
PAGE_VALID | PAGE_RESET | PAGE_READ |
mmap_unlock();
return raddr;
-
}
static inline abi_long do_shmdt(abi_ulong shmaddr)
.print = print_termios,
};
+/* If the host does not provide these bits, they may be safely discarded. */
+#ifndef MAP_SYNC
+#define MAP_SYNC 0
+#endif
+#ifndef MAP_UNINITIALIZED
+#define MAP_UNINITIALIZED 0
+#endif
+
static const bitmask_transtbl mmap_flags_tbl[] = {
- { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
- { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
+ { TARGET_MAP_TYPE, TARGET_MAP_SHARED, MAP_TYPE, MAP_SHARED },
+ { TARGET_MAP_TYPE, TARGET_MAP_PRIVATE, MAP_TYPE, MAP_PRIVATE },
+ { TARGET_MAP_TYPE, TARGET_MAP_SHARED_VALIDATE,
+ MAP_TYPE, MAP_SHARED_VALIDATE },
{ TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
{ TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
MAP_ANONYMOUS, MAP_ANONYMOUS },
Recognize it for the target insofar as we do not want to pass
it through to the host. */
{ TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
+ { TARGET_MAP_SYNC, TARGET_MAP_SYNC, MAP_SYNC, MAP_SYNC },
+ { TARGET_MAP_NONBLOCK, TARGET_MAP_NONBLOCK, MAP_NONBLOCK, MAP_NONBLOCK },
+ { TARGET_MAP_POPULATE, TARGET_MAP_POPULATE, MAP_POPULATE, MAP_POPULATE },
+ { TARGET_MAP_FIXED_NOREPLACE, TARGET_MAP_FIXED_NOREPLACE,
+ MAP_FIXED_NOREPLACE, MAP_FIXED_NOREPLACE },
+ { TARGET_MAP_UNINITIALIZED, TARGET_MAP_UNINITIALIZED,
+ MAP_UNINITIALIZED, MAP_UNINITIALIZED },
{ 0, 0, 0, 0 }
};
max = h2g_valid(max - 1) ?
max : (uintptr_t) g2h_untagged(GUEST_ADDR_MAX) + 1;
- if (page_check_range(h2g(min), max - min, flags) == -1) {
+ if (!page_check_range(h2g(min), max - min, flags)) {
continue;
}
return ret;
}
-static int do_execveat(CPUArchState *cpu_env, int dirfd,
- abi_long pathname, abi_long guest_argp,
- abi_long guest_envp, int flags)
+static int do_execv(CPUArchState *cpu_env, int dirfd,
+ abi_long pathname, abi_long guest_argp,
+ abi_long guest_envp, int flags, bool is_execveat)
{
int ret;
char **argp, **envp;
goto execve_efault;
}
+ const char *exe = p;
if (is_proc_myself(p, "exe")) {
- ret = get_errno(safe_execveat(dirfd, exec_path, argp, envp, flags));
- } else {
- ret = get_errno(safe_execveat(dirfd, p, argp, envp, flags));
+ exe = exec_path;
}
+ ret = is_execveat
+ ? safe_execveat(dirfd, exe, argp, envp, flags)
+ : safe_execve(exe, argp, envp);
+ ret = get_errno(ret);
unlock_user(p, pathname, 0);
}
#endif /* TARGET_NR_getdents64 */
+#if defined(TARGET_NR_riscv_hwprobe)
+
+#define RISCV_HWPROBE_KEY_MVENDORID 0
+#define RISCV_HWPROBE_KEY_MARCHID 1
+#define RISCV_HWPROBE_KEY_MIMPID 2
+
+#define RISCV_HWPROBE_KEY_BASE_BEHAVIOR 3
+#define RISCV_HWPROBE_BASE_BEHAVIOR_IMA (1 << 0)
+
+#define RISCV_HWPROBE_KEY_IMA_EXT_0 4
+#define RISCV_HWPROBE_IMA_FD (1 << 0)
+#define RISCV_HWPROBE_IMA_C (1 << 1)
+
+#define RISCV_HWPROBE_KEY_CPUPERF_0 5
+#define RISCV_HWPROBE_MISALIGNED_UNKNOWN (0 << 0)
+#define RISCV_HWPROBE_MISALIGNED_EMULATED (1 << 0)
+#define RISCV_HWPROBE_MISALIGNED_SLOW (2 << 0)
+#define RISCV_HWPROBE_MISALIGNED_FAST (3 << 0)
+#define RISCV_HWPROBE_MISALIGNED_UNSUPPORTED (4 << 0)
+#define RISCV_HWPROBE_MISALIGNED_MASK (7 << 0)
+
+struct riscv_hwprobe {
+ abi_llong key;
+ abi_ullong value;
+};
+
+static void risc_hwprobe_fill_pairs(CPURISCVState *env,
+ struct riscv_hwprobe *pair,
+ size_t pair_count)
+{
+ const RISCVCPUConfig *cfg = riscv_cpu_cfg(env);
+
+ for (; pair_count > 0; pair_count--, pair++) {
+ abi_llong key;
+ abi_ullong value;
+ __put_user(0, &pair->value);
+ __get_user(key, &pair->key);
+ switch (key) {
+ case RISCV_HWPROBE_KEY_MVENDORID:
+ __put_user(cfg->mvendorid, &pair->value);
+ break;
+ case RISCV_HWPROBE_KEY_MARCHID:
+ __put_user(cfg->marchid, &pair->value);
+ break;
+ case RISCV_HWPROBE_KEY_MIMPID:
+ __put_user(cfg->mimpid, &pair->value);
+ break;
+ case RISCV_HWPROBE_KEY_BASE_BEHAVIOR:
+ value = riscv_has_ext(env, RVI) &&
+ riscv_has_ext(env, RVM) &&
+ riscv_has_ext(env, RVA) ?
+ RISCV_HWPROBE_BASE_BEHAVIOR_IMA : 0;
+ __put_user(value, &pair->value);
+ break;
+ case RISCV_HWPROBE_KEY_IMA_EXT_0:
+ value = riscv_has_ext(env, RVF) &&
+ riscv_has_ext(env, RVD) ?
+ RISCV_HWPROBE_IMA_FD : 0;
+ value |= riscv_has_ext(env, RVC) ?
+ RISCV_HWPROBE_IMA_C : pair->value;
+ __put_user(value, &pair->value);
+ break;
+ case RISCV_HWPROBE_KEY_CPUPERF_0:
+ __put_user(RISCV_HWPROBE_MISALIGNED_FAST, &pair->value);
+ break;
+ default:
+ __put_user(-1, &pair->key);
+ break;
+ }
+ }
+}
+
+static int cpu_set_valid(abi_long arg3, abi_long arg4)
+{
+ int ret, i, tmp;
+ size_t host_mask_size, target_mask_size;
+ unsigned long *host_mask;
+
+ /*
+ * cpu_set_t represent CPU masks as bit masks of type unsigned long *.
+ * arg3 contains the cpu count.
+ */
+ tmp = (8 * sizeof(abi_ulong));
+ target_mask_size = ((arg3 + tmp - 1) / tmp) * sizeof(abi_ulong);
+ host_mask_size = (target_mask_size + (sizeof(*host_mask) - 1)) &
+ ~(sizeof(*host_mask) - 1);
+
+ host_mask = alloca(host_mask_size);
+
+ ret = target_to_host_cpu_mask(host_mask, host_mask_size,
+ arg4, target_mask_size);
+ if (ret != 0) {
+ return ret;
+ }
+
+ for (i = 0 ; i < host_mask_size / sizeof(*host_mask); i++) {
+ if (host_mask[i] != 0) {
+ return 0;
+ }
+ }
+ return -TARGET_EINVAL;
+}
+
+static abi_long do_riscv_hwprobe(CPUArchState *cpu_env, abi_long arg1,
+ abi_long arg2, abi_long arg3,
+ abi_long arg4, abi_long arg5)
+{
+ int ret;
+ struct riscv_hwprobe *host_pairs;
+
+ /* flags must be 0 */
+ if (arg5 != 0) {
+ return -TARGET_EINVAL;
+ }
+
+ /* check cpu_set */
+ if (arg3 != 0) {
+ ret = cpu_set_valid(arg3, arg4);
+ if (ret != 0) {
+ return ret;
+ }
+ } else if (arg4 != 0) {
+ return -TARGET_EINVAL;
+ }
+
+ /* no pairs */
+ if (arg2 == 0) {
+ return 0;
+ }
+
+ host_pairs = lock_user(VERIFY_WRITE, arg1,
+ sizeof(*host_pairs) * (size_t)arg2, 0);
+ if (host_pairs == NULL) {
+ return -TARGET_EFAULT;
+ }
+ risc_hwprobe_fill_pairs(cpu_env, host_pairs, arg2);
+ unlock_user(host_pairs, arg1, sizeof(*host_pairs) * (size_t)arg2);
+ return 0;
+}
+#endif /* TARGET_NR_riscv_hwprobe */
+
#if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
_syscall2(int, pivot_root, const char *, new_root, const char *, put_old)
#endif
return ret;
#endif
case TARGET_NR_execveat:
- return do_execveat(cpu_env, arg1, arg2, arg3, arg4, arg5);
+ return do_execv(cpu_env, arg1, arg2, arg3, arg4, arg5, true);
case TARGET_NR_execve:
- return do_execveat(cpu_env, AT_FDCWD, arg1, arg2, arg3, 0);
+ return do_execv(cpu_env, AT_FDCWD, arg1, arg2, arg3, 0, false);
case TARGET_NR_chdir:
if (!(p = lock_user_string(arg1)))
return -TARGET_EFAULT;
#endif
ret = target_mmap(arg1, arg2, arg3,
target_to_host_bitmask(arg4, mmap_flags_tbl),
- arg5, arg6 << MMAP_SHIFT);
+ arg5, (off_t)(abi_ulong)arg6 << MMAP_SHIFT);
return get_errno(ret);
#endif
case TARGET_NR_munmap:
#if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
case TARGET_NR_clock_adjtime:
{
- struct timex htx, *phtx = &htx;
+ struct timex htx;
- if (target_to_host_timex(phtx, arg2) != 0) {
+ if (target_to_host_timex(&htx, arg2) != 0) {
return -TARGET_EFAULT;
}
- ret = get_errno(clock_adjtime(arg1, phtx));
- if (!is_error(ret) && phtx) {
- if (host_to_target_timex(arg2, phtx) != 0) {
- return -TARGET_EFAULT;
- }
+ ret = get_errno(clock_adjtime(arg1, &htx));
+ if (!is_error(ret) && host_to_target_timex(arg2, &htx)) {
+ return -TARGET_EFAULT;
}
}
return ret;
return ret;
#endif
+#if defined(TARGET_NR_riscv_hwprobe)
+ case TARGET_NR_riscv_hwprobe:
+ return do_riscv_hwprobe(cpu_env, arg1, arg2, arg3, arg4, arg5);
+#endif
+
default:
qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
return -TARGET_ENOSYS;
#define TARGET_SYS_SENDMMSG 20 /* sendmmsg() */
#define IPCOP_CALL(VERSION, OP) ((VERSION) << 16 | (OP))
-#define IPCOP_semop 1
-#define IPCOP_semget 2
-#define IPCOP_semctl 3
-#define IPCOP_semtimedop 4
-#define IPCOP_msgsnd 11
-#define IPCOP_msgrcv 12
-#define IPCOP_msgget 13
-#define IPCOP_msgctl 14
-#define IPCOP_shmat 21
-#define IPCOP_shmdt 22
-#define IPCOP_shmget 23
-#define IPCOP_shmctl 24
+#define IPCOP_semop 1
+#define IPCOP_semget 2
+#define IPCOP_semctl 3
+#define IPCOP_semtimedop 4
+#define IPCOP_msgsnd 11
+#define IPCOP_msgrcv 12
+#define IPCOP_msgget 13
+#define IPCOP_msgctl 14
+#define IPCOP_shmat 21
+#define IPCOP_shmdt 22
+#define IPCOP_shmget 23
+#define IPCOP_shmctl 24
#define TARGET_SEMOPM 500
* this explicit here. Please be sure to use the decoding macros
* below from now on.
*/
-#define TARGET_IOC_NRBITS 8
-#define TARGET_IOC_TYPEBITS 8
+#define TARGET_IOC_NRBITS 8
+#define TARGET_IOC_TYPEBITS 8
-#if (defined(TARGET_I386) && defined(TARGET_ABI32)) \
- || (defined(TARGET_ARM) && defined(TARGET_ABI32)) \
- || (defined(TARGET_SPARC) && defined(TARGET_ABI32)) \
+#if (defined(TARGET_I386) && defined(TARGET_ABI32)) \
+ || (defined(TARGET_ARM) && defined(TARGET_ABI32)) \
+ || (defined(TARGET_SPARC) && defined(TARGET_ABI32)) \
|| defined(TARGET_M68K) || defined(TARGET_SH4) || defined(TARGET_CRIS)
- /* 16 bit uid wrappers emulation */
+/* 16 bit uid wrappers emulation */
#define USE_UID16
#define target_id uint16_t
#else
-#define target_id uint32_t
+#define target_id abi_uint
#endif
-#if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_SH4) \
- || defined(TARGET_M68K) || defined(TARGET_CRIS) \
- || defined(TARGET_S390X) || defined(TARGET_OPENRISC) \
- || defined(TARGET_NIOS2) || defined(TARGET_RISCV) \
+#if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_SH4) \
+ || defined(TARGET_M68K) || defined(TARGET_CRIS) \
+ || defined(TARGET_S390X) || defined(TARGET_OPENRISC) \
+ || defined(TARGET_NIOS2) || defined(TARGET_RISCV) \
|| defined(TARGET_XTENSA) || defined(TARGET_LOONGARCH64)
-#define TARGET_IOC_SIZEBITS 14
-#define TARGET_IOC_DIRBITS 2
+#define TARGET_IOC_SIZEBITS 14
+#define TARGET_IOC_DIRBITS 2
-#define TARGET_IOC_NONE 0U
+#define TARGET_IOC_NONE 0U
#define TARGET_IOC_WRITE 1U
-#define TARGET_IOC_READ 2U
+#define TARGET_IOC_READ 2U
-#elif defined(TARGET_PPC) || defined(TARGET_ALPHA) || \
- defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE) || \
- defined(TARGET_MIPS)
+#elif defined(TARGET_PPC) || defined(TARGET_ALPHA) || \
+ defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE) || \
+ defined(TARGET_MIPS)
-#define TARGET_IOC_SIZEBITS 13
-#define TARGET_IOC_DIRBITS 3
+#define TARGET_IOC_SIZEBITS 13
+#define TARGET_IOC_DIRBITS 3
-#define TARGET_IOC_NONE 1U
-#define TARGET_IOC_READ 2U
+#define TARGET_IOC_NONE 1U
+#define TARGET_IOC_READ 2U
#define TARGET_IOC_WRITE 4U
#elif defined(TARGET_HPPA)
#error unsupported CPU
#endif
-#define TARGET_IOC_NRMASK ((1 << TARGET_IOC_NRBITS)-1)
-#define TARGET_IOC_TYPEMASK ((1 << TARGET_IOC_TYPEBITS)-1)
-#define TARGET_IOC_SIZEMASK ((1 << TARGET_IOC_SIZEBITS)-1)
-#define TARGET_IOC_DIRMASK ((1 << TARGET_IOC_DIRBITS)-1)
+#define TARGET_IOC_NRMASK ((1 << TARGET_IOC_NRBITS)-1)
+#define TARGET_IOC_TYPEMASK ((1 << TARGET_IOC_TYPEBITS)-1)
+#define TARGET_IOC_SIZEMASK ((1 << TARGET_IOC_SIZEBITS)-1)
+#define TARGET_IOC_DIRMASK ((1 << TARGET_IOC_DIRBITS)-1)
-#define TARGET_IOC_NRSHIFT 0
-#define TARGET_IOC_TYPESHIFT (TARGET_IOC_NRSHIFT+TARGET_IOC_NRBITS)
-#define TARGET_IOC_SIZESHIFT (TARGET_IOC_TYPESHIFT+TARGET_IOC_TYPEBITS)
-#define TARGET_IOC_DIRSHIFT (TARGET_IOC_SIZESHIFT+TARGET_IOC_SIZEBITS)
+#define TARGET_IOC_NRSHIFT 0
+#define TARGET_IOC_TYPESHIFT (TARGET_IOC_NRSHIFT+TARGET_IOC_NRBITS)
+#define TARGET_IOC_SIZESHIFT (TARGET_IOC_TYPESHIFT+TARGET_IOC_TYPEBITS)
+#define TARGET_IOC_DIRSHIFT (TARGET_IOC_SIZESHIFT+TARGET_IOC_SIZEBITS)
-#define TARGET_IOC(dir,type,nr,size) \
- (((dir) << TARGET_IOC_DIRSHIFT) | \
- ((type) << TARGET_IOC_TYPESHIFT) | \
- ((nr) << TARGET_IOC_NRSHIFT) | \
- ((size) << TARGET_IOC_SIZESHIFT))
+#define TARGET_IOC(dir,type,nr,size) \
+ (((dir) << TARGET_IOC_DIRSHIFT) | \
+ ((type) << TARGET_IOC_TYPESHIFT) | \
+ ((nr) << TARGET_IOC_NRSHIFT) | \
+ ((size) << TARGET_IOC_SIZESHIFT))
/* used to create numbers */
-#define TARGET_IO(type,nr) TARGET_IOC(TARGET_IOC_NONE,(type),(nr),0)
-#define TARGET_IOR(type,nr,size) TARGET_IOC(TARGET_IOC_READ,(type),(nr),sizeof(size))
-#define TARGET_IOW(type,nr,size) TARGET_IOC(TARGET_IOC_WRITE,(type),(nr),sizeof(size))
-#define TARGET_IOWR(type,nr,size) TARGET_IOC(TARGET_IOC_READ|TARGET_IOC_WRITE,(type),(nr),sizeof(size))
+#define TARGET_IO(type,nr) TARGET_IOC(TARGET_IOC_NONE,(type),(nr),0)
+#define TARGET_IOR(type,nr,size) TARGET_IOC(TARGET_IOC_READ,(type),(nr),sizeof(size))
+#define TARGET_IOW(type,nr,size) TARGET_IOC(TARGET_IOC_WRITE,(type),(nr),sizeof(size))
+#define TARGET_IOWR(type,nr,size) TARGET_IOC(TARGET_IOC_READ|TARGET_IOC_WRITE,(type),(nr),sizeof(size))
/* the size is automatically computed for these defines */
-#define TARGET_IORU(type,nr) TARGET_IOC(TARGET_IOC_READ,(type),(nr),TARGET_IOC_SIZEMASK)
-#define TARGET_IOWU(type,nr) TARGET_IOC(TARGET_IOC_WRITE,(type),(nr),TARGET_IOC_SIZEMASK)
-#define TARGET_IOWRU(type,nr) TARGET_IOC(TARGET_IOC_READ|TARGET_IOC_WRITE,(type),(nr),TARGET_IOC_SIZEMASK)
+#define TARGET_IORU(type,nr) TARGET_IOC(TARGET_IOC_READ,(type),(nr),TARGET_IOC_SIZEMASK)
+#define TARGET_IOWU(type,nr) TARGET_IOC(TARGET_IOC_WRITE,(type),(nr),TARGET_IOC_SIZEMASK)
+#define TARGET_IOWRU(type,nr) TARGET_IOC(TARGET_IOC_READ|TARGET_IOC_WRITE,(type),(nr),TARGET_IOC_SIZEMASK)
struct target_sockaddr {
abi_ushort sa_family;
};
struct target_sockaddr_in {
- abi_ushort sin_family;
- abi_short sin_port; /* big endian */
- struct target_in_addr sin_addr;
- uint8_t __pad[sizeof(struct target_sockaddr) -
- sizeof(abi_ushort) - sizeof(abi_short) -
- sizeof(struct target_in_addr)];
+ abi_ushort sin_family;
+ abi_short sin_port; /* big endian */
+ struct target_in_addr sin_addr;
+ uint8_t __pad[sizeof(struct target_sockaddr) -
+ sizeof(abi_ushort) - sizeof(abi_short) -
+ sizeof(struct target_in_addr)];
};
struct target_sockaddr_in6 {
struct target_ip_mreq_source {
/* big endian */
- uint32_t imr_multiaddr;
- uint32_t imr_interface;
- uint32_t imr_sourceaddr;
+ abi_uint imr_multiaddr;
+ abi_uint imr_interface;
+ abi_uint imr_sourceaddr;
};
struct target_linger {
};
struct target_msghdr {
- abi_long msg_name; /* Socket name */
- int msg_namelen; /* Length of name */
- abi_long msg_iov; /* Data blocks */
- abi_long msg_iovlen; /* Number of blocks */
- abi_long msg_control; /* Per protocol magic (eg BSD file descriptor passing) */
- abi_long msg_controllen; /* Length of cmsg list */
- unsigned int msg_flags;
+ abi_long msg_name; /* Socket name */
+ abi_int msg_namelen; /* Length of name */
+ abi_long msg_iov; /* Data blocks */
+ abi_long msg_iovlen; /* Number of blocks */
+ abi_long msg_control; /* Per protocol magic (eg BSD file descriptor passing) */
+ abi_long msg_controllen; /* Length of cmsg list */
+ abi_uint msg_flags;
};
struct target_cmsghdr {
abi_long cmsg_len;
- int cmsg_level;
- int cmsg_type;
+ abi_int cmsg_level;
+ abi_int cmsg_type;
};
#define TARGET_CMSG_DATA(cmsg) ((unsigned char *) ((struct target_cmsghdr *) (cmsg) + 1))
-#define TARGET_CMSG_NXTHDR(mhdr, cmsg, cmsg_start) \
- __target_cmsg_nxthdr(mhdr, cmsg, cmsg_start)
-#define TARGET_CMSG_ALIGN(len) (((len) + sizeof (abi_long) - 1) \
- & (size_t) ~(sizeof (abi_long) - 1))
+#define TARGET_CMSG_NXTHDR(mhdr, cmsg, cmsg_start) \
+ __target_cmsg_nxthdr(mhdr, cmsg, cmsg_start)
+#define TARGET_CMSG_ALIGN(len) (((len) + sizeof (abi_long) - 1) \
+ & (size_t) ~(sizeof (abi_long) - 1))
#define TARGET_CMSG_SPACE(len) (sizeof(struct target_cmsghdr) + \
TARGET_CMSG_ALIGN(len))
#define TARGET_CMSG_LEN(len) (sizeof(struct target_cmsghdr) + (len))
struct target_cmsghdr *__cmsg,
struct target_cmsghdr *__cmsg_start)
{
- struct target_cmsghdr *__ptr;
-
- __ptr = (struct target_cmsghdr *)((unsigned char *) __cmsg
- + TARGET_CMSG_ALIGN (tswapal(__cmsg->cmsg_len)));
- if ((unsigned long)((char *)(__ptr+1) - (char *)__cmsg_start)
- > tswapal(__mhdr->msg_controllen)) {
- /* No more entries. */
- return (struct target_cmsghdr *)0;
- }
- return __ptr;
+ struct target_cmsghdr *__ptr;
+
+ __ptr = (struct target_cmsghdr *)((unsigned char *) __cmsg
+ + TARGET_CMSG_ALIGN (tswapal(__cmsg->cmsg_len)));
+ if ((unsigned long)((char *)(__ptr+1) - (char *)__cmsg_start)
+ > tswapal(__mhdr->msg_controllen)) {
+ /* No more entries. */
+ return (struct target_cmsghdr *)0;
+ }
+ return __ptr;
}
struct target_mmsghdr {
struct target_msghdr msg_hdr; /* Message header */
- unsigned int msg_len; /* Number of bytes transmitted */
+ abi_uint msg_len; /* Number of bytes transmitted */
};
struct target_rusage {
- struct target_timeval ru_utime; /* user time used */
- struct target_timeval ru_stime; /* system time used */
- abi_long ru_maxrss; /* maximum resident set size */
- abi_long ru_ixrss; /* integral shared memory size */
- abi_long ru_idrss; /* integral unshared data size */
- abi_long ru_isrss; /* integral unshared stack size */
- abi_long ru_minflt; /* page reclaims */
- abi_long ru_majflt; /* page faults */
- abi_long ru_nswap; /* swaps */
- abi_long ru_inblock; /* block input operations */
- abi_long ru_oublock; /* block output operations */
- abi_long ru_msgsnd; /* messages sent */
- abi_long ru_msgrcv; /* messages received */
- abi_long ru_nsignals; /* signals received */
- abi_long ru_nvcsw; /* voluntary context switches */
- abi_long ru_nivcsw; /* involuntary " */
+ struct target_timeval ru_utime; /* user time used */
+ struct target_timeval ru_stime; /* system time used */
+ abi_long ru_maxrss; /* maximum resident set size */
+ abi_long ru_ixrss; /* integral shared memory size */
+ abi_long ru_idrss; /* integral unshared data size */
+ abi_long ru_isrss; /* integral unshared stack size */
+ abi_long ru_minflt; /* page reclaims */
+ abi_long ru_majflt; /* page faults */
+ abi_long ru_nswap; /* swaps */
+ abi_long ru_inblock; /* block input operations */
+ abi_long ru_oublock; /* block output operations */
+ abi_long ru_msgsnd; /* messages sent */
+ abi_long ru_msgrcv; /* messages received */
+ abi_long ru_nsignals; /* signals received */
+ abi_long ru_nvcsw; /* voluntary context switches */
+ abi_long ru_nivcsw; /* involuntary " */
};
typedef struct {
- int val[2];
+ abi_int val[2];
} kernel_fsid_t;
struct target_dirent {
- abi_long d_ino;
- abi_long d_off;
- unsigned short d_reclen;
- char d_name[];
+ abi_long d_ino;
+ abi_long d_off;
+ abi_ushort d_reclen;
+ char d_name[];
};
struct target_dirent64 {
- abi_ullong d_ino;
- abi_llong d_off;
- abi_ushort d_reclen;
- unsigned char d_type;
- char d_name[];
+ abi_ullong d_ino;
+ abi_llong d_off;
+ abi_ushort d_reclen;
+ unsigned char d_type;
+ char d_name[];
};
/* mostly generic signal stuff */
-#define TARGET_SIG_DFL ((abi_long)0) /* default signal handling */
-#define TARGET_SIG_IGN ((abi_long)1) /* ignore signal */
-#define TARGET_SIG_ERR ((abi_long)-1) /* error return from signal */
+#define TARGET_SIG_DFL ((abi_long)0) /* default signal handling */
+#define TARGET_SIG_IGN ((abi_long)1) /* ignore signal */
+#define TARGET_SIG_ERR ((abi_long)-1) /* error return from signal */
#ifdef TARGET_MIPS
-#define TARGET_NSIG 128
+#define TARGET_NSIG 128
#else
-#define TARGET_NSIG 64
+#define TARGET_NSIG 64
#endif
-#define TARGET_NSIG_BPW TARGET_ABI_BITS
+#define TARGET_NSIG_BPW TARGET_ABI_BITS
#define TARGET_NSIG_WORDS (TARGET_NSIG / TARGET_NSIG_BPW)
typedef struct {
#endif
#if defined(TARGET_ALPHA)
-typedef int32_t target_old_sa_flags;
+typedef abi_int target_old_sa_flags;
#else
typedef abi_ulong target_old_sa_flags;
#endif
#if defined(TARGET_MIPS)
struct target_sigaction {
- uint32_t sa_flags;
+ abi_uint sa_flags;
#if defined(TARGET_ABI_MIPSN32)
- uint32_t _sa_handler;
+ abi_uint _sa_handler;
#else
- abi_ulong _sa_handler;
+ abi_ulong _sa_handler;
#endif
- target_sigset_t sa_mask;
+ target_sigset_t sa_mask;
#ifdef TARGET_ARCH_HAS_SA_RESTORER
- /* ??? This is always present, but ignored unless O32. */
- abi_ulong sa_restorer;
+ /* ??? This is always present, but ignored unless O32. */
+ abi_ulong sa_restorer;
#endif
};
#else
struct target_old_sigaction {
- abi_ulong _sa_handler;
- abi_ulong sa_mask;
- target_old_sa_flags sa_flags;
+ abi_ulong _sa_handler;
+ abi_ulong sa_mask;
+ target_old_sa_flags sa_flags;
#ifdef TARGET_ARCH_HAS_SA_RESTORER
- abi_ulong sa_restorer;
+ abi_ulong sa_restorer;
#endif
};
struct target_sigaction {
- abi_ulong _sa_handler;
- abi_ulong sa_flags;
+ abi_ulong _sa_handler;
+ abi_ulong sa_flags;
#ifdef TARGET_ARCH_HAS_SA_RESTORER
- abi_ulong sa_restorer;
+ abi_ulong sa_restorer;
#endif
- target_sigset_t sa_mask;
+ target_sigset_t sa_mask;
#ifdef TARGET_ARCH_HAS_KA_RESTORER
- abi_ulong ka_restorer;
+ abi_ulong ka_restorer;
#endif
};
#endif
typedef union target_sigval {
- int sival_int;
- abi_ulong sival_ptr;
+ abi_int sival_int;
+ abi_ulong sival_ptr;
} target_sigval_t;
-#if 0
-#if defined (TARGET_SPARC)
-typedef struct {
- struct {
- abi_ulong psr;
- abi_ulong pc;
- abi_ulong npc;
- abi_ulong y;
- abi_ulong u_regs[16]; /* globals and ins */
- } si_regs;
- int si_mask;
-} __siginfo_t;
-typedef struct {
- unsigned long si_float_regs [32];
- unsigned long si_fsr;
- unsigned long si_fpqdepth;
- struct {
- unsigned long *insn_addr;
- unsigned long insn;
- } si_fpqueue [16];
-} __siginfo_fpu_t;
-#endif
-#endif
-
-#define TARGET_SI_MAX_SIZE 128
+#define TARGET_SI_MAX_SIZE 128
#if TARGET_ABI_BITS == 32
#define TARGET_SI_PREAMBLE_SIZE (3 * sizeof(int))
typedef struct target_siginfo {
#ifdef TARGET_MIPS
- int si_signo;
- int si_code;
- int si_errno;
+ abi_int si_signo;
+ abi_int si_code;
+ abi_int si_errno;
#else
- int si_signo;
- int si_errno;
- int si_code;
+ abi_int si_signo;
+ abi_int si_errno;
+ abi_int si_code;
#endif
- union {
- int _pad[TARGET_SI_PAD_SIZE];
-
- /* kill() */
- struct {
- pid_t _pid; /* sender's pid */
- uid_t _uid; /* sender's uid */
- } _kill;
-
- /* POSIX.1b timers */
- struct {
- unsigned int _timer1;
- unsigned int _timer2;
- } _timer;
-
- /* POSIX.1b signals */
- struct {
- pid_t _pid; /* sender's pid */
- uid_t _uid; /* sender's uid */
- target_sigval_t _sigval;
- } _rt;
-
- /* SIGCHLD */
- struct {
- pid_t _pid; /* which child */
- uid_t _uid; /* sender's uid */
- int _status; /* exit code */
- target_clock_t _utime;
- target_clock_t _stime;
- } _sigchld;
-
- /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
- struct {
- abi_ulong _addr; /* faulting insn/memory ref. */
- } _sigfault;
-
- /* SIGPOLL */
- struct {
- int _band; /* POLL_IN, POLL_OUT, POLL_MSG */
- int _fd;
- } _sigpoll;
- } _sifields;
+ union {
+ abi_int _pad[TARGET_SI_PAD_SIZE];
+
+ /* kill() */
+ struct {
+ pid_t _pid; /* sender's pid */
+ uid_t _uid; /* sender's uid */
+ } _kill;
+
+ /* POSIX.1b timers */
+ struct {
+ abi_uint _timer1;
+ abi_uint _timer2;
+ } _timer;
+
+ /* POSIX.1b signals */
+ struct {
+ pid_t _pid; /* sender's pid */
+ uid_t _uid; /* sender's uid */
+ target_sigval_t _sigval;
+ } _rt;
+
+ /* SIGCHLD */
+ struct {
+ pid_t _pid; /* which child */
+ uid_t _uid; /* sender's uid */
+ abi_int _status; /* exit code */
+ target_clock_t _utime;
+ target_clock_t _stime;
+ } _sigchld;
+
+ /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
+ struct {
+ abi_ulong _addr; /* faulting insn/memory ref. */
+ } _sigfault;
+
+ /* SIGPOLL */
+ struct {
+ abi_int _band; /* POLL_IN, POLL_OUT, POLL_MSG */
+ abi_int _fd;
+ } _sigpoll;
+ } _sifields;
} target_siginfo_t;
/*
* si_code values
* Digital reserves positive values for kernel-generated signals.
*/
-#define TARGET_SI_USER 0 /* sent by kill, sigsend, raise */
-#define TARGET_SI_KERNEL 0x80 /* sent by the kernel from somewhere */
-#define TARGET_SI_QUEUE -1 /* sent by sigqueue */
+#define TARGET_SI_USER 0 /* sent by kill, sigsend, raise */
+#define TARGET_SI_KERNEL 0x80 /* sent by the kernel from somewhere */
+#define TARGET_SI_QUEUE -1 /* sent by sigqueue */
#define TARGET_SI_TIMER -2 /* sent by timer expiration */
-#define TARGET_SI_MESGQ -3 /* sent by real time mesq state change */
-#define TARGET_SI_ASYNCIO -4 /* sent by AIO completion */
-#define TARGET_SI_SIGIO -5 /* sent by queued SIGIO */
+#define TARGET_SI_MESGQ -3 /* sent by real time mesq state change */
+#define TARGET_SI_ASYNCIO -4 /* sent by AIO completion */
+#define TARGET_SI_SIGIO -5 /* sent by queued SIGIO */
/*
* SIGILL si_codes
*/
-#define TARGET_ILL_ILLOPC (1) /* illegal opcode */
-#define TARGET_ILL_ILLOPN (2) /* illegal operand */
-#define TARGET_ILL_ILLADR (3) /* illegal addressing mode */
-#define TARGET_ILL_ILLTRP (4) /* illegal trap */
-#define TARGET_ILL_PRVOPC (5) /* privileged opcode */
-#define TARGET_ILL_PRVREG (6) /* privileged register */
-#define TARGET_ILL_COPROC (7) /* coprocessor error */
-#define TARGET_ILL_BADSTK (8) /* internal stack error */
+#define TARGET_ILL_ILLOPC (1) /* illegal opcode */
+#define TARGET_ILL_ILLOPN (2) /* illegal operand */
+#define TARGET_ILL_ILLADR (3) /* illegal addressing mode */
+#define TARGET_ILL_ILLTRP (4) /* illegal trap */
+#define TARGET_ILL_PRVOPC (5) /* privileged opcode */
+#define TARGET_ILL_PRVREG (6) /* privileged register */
+#define TARGET_ILL_COPROC (7) /* coprocessor error */
+#define TARGET_ILL_BADSTK (8) /* internal stack error */
/*
* SIGFPE si_codes
/*
* SIGBUS si_codes
*/
-#define TARGET_BUS_ADRALN (1) /* invalid address alignment */
-#define TARGET_BUS_ADRERR (2) /* non-existent physical address */
-#define TARGET_BUS_OBJERR (3) /* object specific hardware error */
+#define TARGET_BUS_ADRALN (1) /* invalid address alignment */
+#define TARGET_BUS_ADRERR (2) /* non-existent physical address */
+#define TARGET_BUS_OBJERR (3) /* object specific hardware error */
/* hardware memory error consumed on a machine check: action required */
#define TARGET_BUS_MCEERR_AR (4)
/* hardware memory error detected in process but not consumed: action optional*/
/*
* SIGTRAP si_codes
*/
-#define TARGET_TRAP_BRKPT (1) /* process breakpoint */
-#define TARGET_TRAP_TRACE (2) /* process trace trap */
+#define TARGET_TRAP_BRKPT (1) /* process breakpoint */
+#define TARGET_TRAP_TRACE (2) /* process trace trap */
#define TARGET_TRAP_BRANCH (3) /* process taken branch trap */
#define TARGET_TRAP_HWBKPT (4) /* hardware breakpoint/watchpoint */
#define TARGET_TRAP_UNK (5) /* undiagnosed trap */
#include "target_resource.h"
struct target_pollfd {
- int fd; /* file descriptor */
- short events; /* requested events */
- short revents; /* returned events */
+ abi_int fd; /* file descriptor */
+ abi_short events; /* requested events */
+ abi_short revents; /* returned events */
};
/* virtual terminal ioctls */
-#define TARGET_KIOCSOUND 0x4B2F /* start sound generation (0 for off) */
-#define TARGET_KDMKTONE 0x4B30 /* generate tone */
+#define TARGET_KIOCSOUND 0x4B2F /* start sound generation (0 for off) */
+#define TARGET_KDMKTONE 0x4B30 /* generate tone */
#define TARGET_KDGKBTYPE 0x4b33
#define TARGET_KDSETMODE 0x4b3a
#define TARGET_KDGKBMODE 0x4b44
#define TARGET_KDSKBMODE 0x4b45
-#define TARGET_KDGKBENT 0x4B46 /* gets one entry in translation table */
-#define TARGET_KDGKBSENT 0x4B48 /* gets one function key string entry */
-#define TARGET_KDGKBLED 0x4B64 /* get led flags (not lights) */
-#define TARGET_KDSKBLED 0x4B65 /* set led flags (not lights) */
-#define TARGET_KDGETLED 0x4B31 /* return current led state */
-#define TARGET_KDSETLED 0x4B32 /* set led state [lights, not flags] */
+#define TARGET_KDGKBENT 0x4B46 /* gets one entry in translation table */
+#define TARGET_KDGKBSENT 0x4B48 /* gets one function key string entry */
+#define TARGET_KDGKBLED 0x4B64 /* get led flags (not lights) */
+#define TARGET_KDSKBLED 0x4B65 /* set led flags (not lights) */
+#define TARGET_KDGETLED 0x4B31 /* return current led state */
+#define TARGET_KDSETLED 0x4B32 /* set led state [lights, not flags] */
#define TARGET_KDSIGACCEPT 0x4B4E
struct target_rtc_pll_info {
- int pll_ctrl;
- int pll_value;
- int pll_max;
- int pll_min;
- int pll_posmult;
- int pll_negmult;
+ abi_int pll_ctrl;
+ abi_int pll_value;
+ abi_int pll_max;
+ abi_int pll_min;
+ abi_int pll_posmult;
+ abi_int pll_negmult;
abi_long pll_clock;
};
#define TARGET_RTC_EPOCH_SET TARGET_IOW('p', 0x0e, abi_ulong)
#define TARGET_RTC_WKALM_RD TARGET_IOR('p', 0x10, struct rtc_wkalrm)
#define TARGET_RTC_WKALM_SET TARGET_IOW('p', 0x0f, struct rtc_wkalrm)
-#define TARGET_RTC_PLL_GET TARGET_IOR('p', 0x11, \
+#define TARGET_RTC_PLL_GET TARGET_IOR('p', 0x11, \
struct target_rtc_pll_info)
-#define TARGET_RTC_PLL_SET TARGET_IOW('p', 0x12, \
+#define TARGET_RTC_PLL_SET TARGET_IOW('p', 0x12, \
struct target_rtc_pll_info)
-#define TARGET_RTC_VL_READ TARGET_IOR('p', 0x13, int)
+#define TARGET_RTC_VL_READ TARGET_IOR('p', 0x13, abi_int)
#define TARGET_RTC_VL_CLR TARGET_IO('p', 0x14)
-#if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_SH4) || \
- defined(TARGET_XTENSA)
-#define TARGET_FIOGETOWN TARGET_IOR('f', 123, int)
-#define TARGET_FIOSETOWN TARGET_IOW('f', 124, int)
-#define TARGET_SIOCATMARK TARGET_IOR('s', 7, int)
+#if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_SH4) || \
+ defined(TARGET_XTENSA)
+#define TARGET_FIOGETOWN TARGET_IOR('f', 123, abi_int)
+#define TARGET_FIOSETOWN TARGET_IOW('f', 124, abi_int)
+#define TARGET_SIOCATMARK TARGET_IOR('s', 7, abi_int)
#define TARGET_SIOCSPGRP TARGET_IOW('s', 8, pid_t)
#define TARGET_SIOCGPGRP TARGET_IOR('s', 9, pid_t)
#else
/* From <linux/if_tun.h> */
-#define TARGET_TUNSETDEBUG TARGET_IOW('T', 201, int)
-#define TARGET_TUNSETIFF TARGET_IOW('T', 202, int)
-#define TARGET_TUNSETPERSIST TARGET_IOW('T', 203, int)
-#define TARGET_TUNSETOWNER TARGET_IOW('T', 204, int)
-#define TARGET_TUNSETLINK TARGET_IOW('T', 205, int)
-#define TARGET_TUNSETGROUP TARGET_IOW('T', 206, int)
-#define TARGET_TUNGETFEATURES TARGET_IOR('T', 207, unsigned int)
-#define TARGET_TUNSETOFFLOAD TARGET_IOW('T', 208, unsigned int)
-#define TARGET_TUNSETTXFILTER TARGET_IOW('T', 209, unsigned int)
-#define TARGET_TUNGETIFF TARGET_IOR('T', 210, unsigned int)
-#define TARGET_TUNGETSNDBUF TARGET_IOR('T', 211, int)
-#define TARGET_TUNSETSNDBUF TARGET_IOW('T', 212, int)
+#define TARGET_TUNSETDEBUG TARGET_IOW('T', 201, abi_int)
+#define TARGET_TUNSETIFF TARGET_IOW('T', 202, abi_int)
+#define TARGET_TUNSETPERSIST TARGET_IOW('T', 203, abi_int)
+#define TARGET_TUNSETOWNER TARGET_IOW('T', 204, abi_int)
+#define TARGET_TUNSETLINK TARGET_IOW('T', 205, abi_int)
+#define TARGET_TUNSETGROUP TARGET_IOW('T', 206, abi_int)
+#define TARGET_TUNGETFEATURES TARGET_IOR('T', 207, abi_uint)
+#define TARGET_TUNSETOFFLOAD TARGET_IOW('T', 208, abi_uint)
+#define TARGET_TUNSETTXFILTER TARGET_IOW('T', 209, abi_uint)
+#define TARGET_TUNGETIFF TARGET_IOR('T', 210, abi_uint)
+#define TARGET_TUNGETSNDBUF TARGET_IOR('T', 211, abi_int)
+#define TARGET_TUNSETSNDBUF TARGET_IOW('T', 212, abi_int)
/*
* TUNATTACHFILTER and TUNDETACHFILTER are not supported. Linux kernel keeps a
* user pointer in TUNATTACHFILTER, which we are not able to correctly handle.
*/
-#define TARGET_TUNGETVNETHDRSZ TARGET_IOR('T', 215, int)
-#define TARGET_TUNSETVNETHDRSZ TARGET_IOW('T', 216, int)
-#define TARGET_TUNSETQUEUE TARGET_IOW('T', 217, int)
-#define TARGET_TUNSETIFINDEX TARGET_IOW('T', 218, unsigned int)
+#define TARGET_TUNGETVNETHDRSZ TARGET_IOR('T', 215, abi_int)
+#define TARGET_TUNSETVNETHDRSZ TARGET_IOW('T', 216, abi_int)
+#define TARGET_TUNSETQUEUE TARGET_IOW('T', 217, abi_int)
+#define TARGET_TUNSETIFINDEX TARGET_IOW('T', 218, abi_uint)
/* TUNGETFILTER is not supported: see TUNATTACHFILTER. */
-#define TARGET_TUNSETVNETLE TARGET_IOW('T', 220, int)
-#define TARGET_TUNGETVNETLE TARGET_IOR('T', 221, int)
-#define TARGET_TUNSETVNETBE TARGET_IOW('T', 222, int)
-#define TARGET_TUNGETVNETBE TARGET_IOR('T', 223, int)
-#define TARGET_TUNSETSTEERINGEBPF TARGET_IOR('T', 224, int)
-#define TARGET_TUNSETFILTEREBPF TARGET_IOR('T', 225, int)
-#define TARGET_TUNSETCARRIER TARGET_IOW('T', 226, int)
+#define TARGET_TUNSETVNETLE TARGET_IOW('T', 220, abi_int)
+#define TARGET_TUNGETVNETLE TARGET_IOR('T', 221, abi_int)
+#define TARGET_TUNSETVNETBE TARGET_IOW('T', 222, abi_int)
+#define TARGET_TUNGETVNETBE TARGET_IOR('T', 223, abi_int)
+#define TARGET_TUNSETSTEERINGEBPF TARGET_IOR('T', 224, abi_int)
+#define TARGET_TUNSETFILTEREBPF TARGET_IOR('T', 225, abi_int)
+#define TARGET_TUNSETCARRIER TARGET_IOW('T', 226, abi_int)
#define TARGET_TUNGETDEVNETNS TARGET_IO('T', 227)
/* From <linux/random.h> */
-#define TARGET_RNDGETENTCNT TARGET_IOR('R', 0x00, int)
-#define TARGET_RNDADDTOENTCNT TARGET_IOW('R', 0x01, int)
+#define TARGET_RNDGETENTCNT TARGET_IOR('R', 0x00, abi_int)
+#define TARGET_RNDADDTOENTCNT TARGET_IOW('R', 0x01, abi_int)
#define TARGET_RNDZAPENTCNT TARGET_IO('R', 0x04)
#define TARGET_RNDCLEARPOOL TARGET_IO('R', 0x06)
#define TARGET_RNDRESEEDCRNG TARGET_IO('R', 0x07)
#define TARGET_BLKBSZGET TARGET_IOR(0x12, 112, abi_ulong)
#define TARGET_BLKBSZSET TARGET_IOW(0x12, 113, abi_ulong)
#define TARGET_BLKGETSIZE64 TARGET_IOR(0x12,114,abi_ulong)
- /* return device size in bytes
- (u64 *arg) */
+/* return device size in bytes
+ (u64 *arg) */
#define TARGET_BLKDISCARD TARGET_IO(0x12, 119)
#define TARGET_BLKIOMIN TARGET_IO(0x12, 120)
#define TARGET_FIBMAP TARGET_IO(0x00,1) /* bmap access */
#define TARGET_FIGETBSZ TARGET_IO(0x00,2) /* get the block size used for bmap */
-#define TARGET_FICLONE TARGET_IOW(0x94, 9, int)
+#define TARGET_FICLONE TARGET_IOW(0x94, 9, abi_int)
#define TARGET_FICLONERANGE TARGET_IOW(0x94, 13, struct file_clone_range)
/*
#define TARGET_FS_IOC_GETVERSION TARGET_IOR('v', 1, abi_long)
#define TARGET_FS_IOC_SETVERSION TARGET_IOW('v', 2, abi_long)
#define TARGET_FS_IOC_FIEMAP TARGET_IOWR('f',11,struct fiemap)
-#define TARGET_FS_IOC32_GETFLAGS TARGET_IOR('f', 1, int)
-#define TARGET_FS_IOC32_SETFLAGS TARGET_IOW('f', 2, int)
-#define TARGET_FS_IOC32_GETVERSION TARGET_IOR('v', 1, int)
-#define TARGET_FS_IOC32_SETVERSION TARGET_IOW('v', 2, int)
+#define TARGET_FS_IOC32_GETFLAGS TARGET_IOR('f', 1, abi_int)
+#define TARGET_FS_IOC32_SETFLAGS TARGET_IOW('f', 2, abi_int)
+#define TARGET_FS_IOC32_GETVERSION TARGET_IOR('v', 1, abi_int)
+#define TARGET_FS_IOC32_SETVERSION TARGET_IOW('v', 2, abi_int)
/* btrfs ioctls */
#ifdef HAVE_BTRFS_H
#define TARGET_BTRFS_IOC_SUBVOL_CREATE TARGET_IOWU(BTRFS_IOCTL_MAGIC, 14)
#define TARGET_BTRFS_IOC_SNAP_DESTROY TARGET_IOWU(BTRFS_IOCTL_MAGIC, 15)
#define TARGET_BTRFS_IOC_INO_LOOKUP TARGET_IOWRU(BTRFS_IOCTL_MAGIC, 18)
-#define TARGET_BTRFS_IOC_DEFAULT_SUBVOL TARGET_IOW(BTRFS_IOCTL_MAGIC, 19,\
+#define TARGET_BTRFS_IOC_DEFAULT_SUBVOL TARGET_IOW(BTRFS_IOCTL_MAGIC, 19, \
abi_ullong)
-#define TARGET_BTRFS_IOC_SUBVOL_GETFLAGS TARGET_IOR(BTRFS_IOCTL_MAGIC, 25,\
+#define TARGET_BTRFS_IOC_SUBVOL_GETFLAGS TARGET_IOR(BTRFS_IOCTL_MAGIC, 25, \
abi_ullong)
-#define TARGET_BTRFS_IOC_SUBVOL_SETFLAGS TARGET_IOW(BTRFS_IOCTL_MAGIC, 26,\
+#define TARGET_BTRFS_IOC_SUBVOL_SETFLAGS TARGET_IOW(BTRFS_IOCTL_MAGIC, 26, \
abi_ullong)
#define TARGET_BTRFS_IOC_SCRUB TARGET_IOWRU(BTRFS_IOCTL_MAGIC, 27)
#define TARGET_BTRFS_IOC_SCRUB_CANCEL TARGET_IO(BTRFS_IOCTL_MAGIC, 28)
#define TARGET_USBDEVFS_GET_SPEED TARGET_IO('U', 31)
/* cdrom commands */
-#define TARGET_CDROMPAUSE 0x5301 /* Pause Audio Operation */
-#define TARGET_CDROMRESUME 0x5302 /* Resume paused Audio Operation */
-#define TARGET_CDROMPLAYMSF 0x5303 /* Play Audio MSF (struct cdrom_msf) */
-#define TARGET_CDROMPLAYTRKIND 0x5304 /* Play Audio Track/index
- (struct cdrom_ti) */
-#define TARGET_CDROMREADTOCHDR 0x5305 /* Read TOC header
- (struct cdrom_tochdr) */
-#define TARGET_CDROMREADTOCENTRY 0x5306 /* Read TOC entry
- (struct cdrom_tocentry) */
-#define TARGET_CDROMSTOP 0x5307 /* Stop the cdrom drive */
-#define TARGET_CDROMSTART 0x5308 /* Start the cdrom drive */
-#define TARGET_CDROMEJECT 0x5309 /* Ejects the cdrom media */
-#define TARGET_CDROMVOLCTRL 0x530a /* Control output volume
- (struct cdrom_volctrl) */
-#define TARGET_CDROMSUBCHNL 0x530b /* Read subchannel data
- (struct cdrom_subchnl) */
-#define TARGET_CDROMREADMODE2 0x530c /* Read TARGET_CDROM mode 2 data (2336 Bytes)
- (struct cdrom_read) */
-#define TARGET_CDROMREADMODE1 0x530d /* Read TARGET_CDROM mode 1 data (2048 Bytes)
- (struct cdrom_read) */
-#define TARGET_CDROMREADAUDIO 0x530e /* (struct cdrom_read_audio) */
-#define TARGET_CDROMEJECT_SW 0x530f /* enable(1)/disable(0) auto-ejecting */
-#define TARGET_CDROMMULTISESSION 0x5310 /* Obtain the start-of-last-session
- address of multi session disks
- (struct cdrom_multisession) */
-#define TARGET_CDROM_GET_MCN 0x5311 /* Obtain the "Universal Product Code"
- if available (struct cdrom_mcn) */
-#define TARGET_CDROM_GET_UPC TARGET_CDROM_GET_MCN /* This one is deprecated,
- but here anyway for compatibility */
-#define TARGET_CDROMRESET 0x5312 /* hard-reset the drive */
-#define TARGET_CDROMVOLREAD 0x5313 /* Get the drive's volume setting
- (struct cdrom_volctrl) */
-#define TARGET_CDROMREADRAW 0x5314 /* read data in raw mode (2352 Bytes)
- (struct cdrom_read) */
+#define TARGET_CDROMPAUSE 0x5301 /* Pause Audio Operation */
+#define TARGET_CDROMRESUME 0x5302 /* Resume paused Audio Operation */
+#define TARGET_CDROMPLAYMSF 0x5303 /* Play Audio MSF (struct cdrom_msf) */
+#define TARGET_CDROMPLAYTRKIND 0x5304 /* Play Audio Track/index
+ (struct cdrom_ti) */
+#define TARGET_CDROMREADTOCHDR 0x5305 /* Read TOC header
+ (struct cdrom_tochdr) */
+#define TARGET_CDROMREADTOCENTRY 0x5306 /* Read TOC entry
+ (struct cdrom_tocentry) */
+#define TARGET_CDROMSTOP 0x5307 /* Stop the cdrom drive */
+#define TARGET_CDROMSTART 0x5308 /* Start the cdrom drive */
+#define TARGET_CDROMEJECT 0x5309 /* Ejects the cdrom media */
+#define TARGET_CDROMVOLCTRL 0x530a /* Control output volume
+ (struct cdrom_volctrl) */
+#define TARGET_CDROMSUBCHNL 0x530b /* Read subchannel data
+ (struct cdrom_subchnl) */
+#define TARGET_CDROMREADMODE2 0x530c /* Read TARGET_CDROM mode 2 data (2336 Bytes)
+ (struct cdrom_read) */
+#define TARGET_CDROMREADMODE1 0x530d /* Read TARGET_CDROM mode 1 data (2048 Bytes)
+ (struct cdrom_read) */
+#define TARGET_CDROMREADAUDIO 0x530e /* (struct cdrom_read_audio) */
+#define TARGET_CDROMEJECT_SW 0x530f /* enable(1)/disable(0) auto-ejecting */
+#define TARGET_CDROMMULTISESSION 0x5310 /* Obtain the start-of-last-session
+ address of multi session disks
+ (struct cdrom_multisession) */
+#define TARGET_CDROM_GET_MCN 0x5311 /* Obtain the "Universal Product Code"
+ if available (struct cdrom_mcn) */
+#define TARGET_CDROM_GET_UPC TARGET_CDROM_GET_MCN /* This one is deprecated,
+ but here anyway for compatibility */
+#define TARGET_CDROMRESET 0x5312 /* hard-reset the drive */
+#define TARGET_CDROMVOLREAD 0x5313 /* Get the drive's volume setting
+ (struct cdrom_volctrl) */
+#define TARGET_CDROMREADRAW 0x5314 /* read data in raw mode (2352 Bytes)
+ (struct cdrom_read) */
/*
* These ioctls are used only used in aztcd.c and optcd.c
*/
-#define TARGET_CDROMREADCOOKED 0x5315 /* read data in cooked mode */
-#define TARGET_CDROMSEEK 0x5316 /* seek msf address */
+#define TARGET_CDROMREADCOOKED 0x5315 /* read data in cooked mode */
+#define TARGET_CDROMSEEK 0x5316 /* seek msf address */
/*
* This ioctl is only used by the scsi-cd driver.
- It is for playing audio in logical block addressing mode.
- */
-#define TARGET_CDROMPLAYBLK 0x5317 /* (struct cdrom_blk) */
+ It is for playing audio in logical block addressing mode.
+*/
+#define TARGET_CDROMPLAYBLK 0x5317 /* (struct cdrom_blk) */
/*
* These ioctls are only used in optcd.c
*/
-#define TARGET_CDROMREADALL 0x5318 /* read all 2646 bytes */
+#define TARGET_CDROMREADALL 0x5318 /* read all 2646 bytes */
/*
* These ioctls are (now) only in ide-cd.c for controlling
* They _will_ be adopted by all CD-ROM drivers, when all the CD-ROM
* drivers are eventually ported to the uniform CD-ROM driver interface.
*/
-#define TARGET_CDROMCLOSETRAY 0x5319 /* pendant of CDROMEJECT */
-#define TARGET_CDROM_SET_OPTIONS 0x5320 /* Set behavior options */
-#define TARGET_CDROM_CLEAR_OPTIONS 0x5321 /* Clear behavior options */
-#define TARGET_CDROM_SELECT_SPEED 0x5322 /* Set the CD-ROM speed */
-#define TARGET_CDROM_SELECT_DISC 0x5323 /* Select disc (for juke-boxes) */
-#define TARGET_CDROM_MEDIA_CHANGED 0x5325 /* Check is media changed */
-#define TARGET_CDROM_DRIVE_STATUS 0x5326 /* Get tray position, etc. */
-#define TARGET_CDROM_DISC_STATUS 0x5327 /* Get disc type, etc. */
+#define TARGET_CDROMCLOSETRAY 0x5319 /* pendant of CDROMEJECT */
+#define TARGET_CDROM_SET_OPTIONS 0x5320 /* Set behavior options */
+#define TARGET_CDROM_CLEAR_OPTIONS 0x5321 /* Clear behavior options */
+#define TARGET_CDROM_SELECT_SPEED 0x5322 /* Set the CD-ROM speed */
+#define TARGET_CDROM_SELECT_DISC 0x5323 /* Select disc (for juke-boxes) */
+#define TARGET_CDROM_MEDIA_CHANGED 0x5325 /* Check is media changed */
+#define TARGET_CDROM_DRIVE_STATUS 0x5326 /* Get tray position, etc. */
+#define TARGET_CDROM_DISC_STATUS 0x5327 /* Get disc type, etc. */
#define TARGET_CDROM_CHANGER_NSLOTS 0x5328 /* Get number of slots */
-#define TARGET_CDROM_LOCKDOOR 0x5329 /* lock or unlock door */
-#define TARGET_CDROM_DEBUG 0x5330 /* Turn debug messages on/off */
-#define TARGET_CDROM_GET_CAPABILITY 0x5331 /* get capabilities */
+#define TARGET_CDROM_LOCKDOOR 0x5329 /* lock or unlock door */
+#define TARGET_CDROM_DEBUG 0x5330 /* Turn debug messages on/off */
+#define TARGET_CDROM_GET_CAPABILITY 0x5331 /* get capabilities */
/* Note that scsi/scsi_ioctl.h also uses 0x5382 - 0x5386.
* Future CDROM ioctls should be kept below 0x537F
*/
/* This ioctl is only used by sbpcd at the moment */
-#define TARGET_CDROMAUDIOBUFSIZ 0x5382 /* set the audio buffer size */
- /* conflict with SCSI_IOCTL_GET_IDLUN */
+#define TARGET_CDROMAUDIOBUFSIZ 0x5382 /* set the audio buffer size */
+/* conflict with SCSI_IOCTL_GET_IDLUN */
/* DVD-ROM Specific ioctls */
-#define TARGET_DVD_READ_STRUCT 0x5390 /* Read structure */
-#define TARGET_DVD_WRITE_STRUCT 0x5391 /* Write structure */
-#define TARGET_DVD_AUTH 0x5392 /* Authentication */
+#define TARGET_DVD_READ_STRUCT 0x5390 /* Read structure */
+#define TARGET_DVD_WRITE_STRUCT 0x5391 /* Write structure */
+#define TARGET_DVD_AUTH 0x5392 /* Authentication */
-#define TARGET_CDROM_SEND_PACKET 0x5393 /* send a packet to the drive */
-#define TARGET_CDROM_NEXT_WRITABLE 0x5394 /* get next writable block */
-#define TARGET_CDROM_LAST_WRITTEN 0x5395 /* get last block written on disc */
+#define TARGET_CDROM_SEND_PACKET 0x5393 /* send a packet to the drive */
+#define TARGET_CDROM_NEXT_WRITABLE 0x5394 /* get next writable block */
+#define TARGET_CDROM_LAST_WRITTEN 0x5395 /* get last block written on disc */
/* HD commands */
#define TARGET_NCC 8
struct target_termio {
- unsigned short c_iflag; /* input mode flags */
- unsigned short c_oflag; /* output mode flags */
- unsigned short c_cflag; /* control mode flags */
- unsigned short c_lflag; /* local mode flags */
- unsigned char c_line; /* line discipline */
- unsigned char c_cc[TARGET_NCC]; /* control characters */
+ abi_ushort c_iflag; /* input mode flags */
+ abi_ushort c_oflag; /* output mode flags */
+ abi_ushort c_cflag; /* control mode flags */
+ abi_ushort c_lflag; /* local mode flags */
+ unsigned char c_line; /* line discipline */
+ unsigned char c_cc[TARGET_NCC]; /* control characters */
};
struct target_winsize {
- unsigned short ws_row;
- unsigned short ws_col;
- unsigned short ws_xpixel;
- unsigned short ws_ypixel;
+ abi_ushort ws_row;
+ abi_ushort ws_col;
+ abi_ushort ws_xpixel;
+ abi_ushort ws_ypixel;
};
#include "termbits.h"
-#if defined(TARGET_MIPS) || defined(TARGET_XTENSA)
-#define TARGET_PROT_SEM 0x10
-#else
-#define TARGET_PROT_SEM 0x08
-#endif
-
-#ifdef TARGET_AARCH64
-#define TARGET_PROT_BTI 0x10
-#define TARGET_PROT_MTE 0x20
-#endif
-
-/* Common */
-#define TARGET_MAP_SHARED 0x01 /* Share changes */
-#define TARGET_MAP_PRIVATE 0x02 /* Changes are private */
-#if defined(TARGET_HPPA)
-#define TARGET_MAP_TYPE 0x03 /* Mask for type of mapping */
-#else
-#define TARGET_MAP_TYPE 0x0f /* Mask for type of mapping */
-#endif
-
-/* Target specific */
-#if defined(TARGET_MIPS)
-#define TARGET_MAP_FIXED 0x10 /* Interpret addr exactly */
-#define TARGET_MAP_ANONYMOUS 0x0800 /* don't use a file */
-#define TARGET_MAP_GROWSDOWN 0x1000 /* stack-like segment */
-#define TARGET_MAP_DENYWRITE 0x2000 /* ETXTBSY */
-#define TARGET_MAP_EXECUTABLE 0x4000 /* mark it as an executable */
-#define TARGET_MAP_LOCKED 0x8000 /* pages are locked */
-#define TARGET_MAP_NORESERVE 0x0400 /* don't check for reservations */
-#define TARGET_MAP_POPULATE 0x10000 /* populate (prefault) pagetables */
-#define TARGET_MAP_NONBLOCK 0x20000 /* do not block on IO */
-#define TARGET_MAP_STACK 0x40000 /* ignored */
-#define TARGET_MAP_HUGETLB 0x80000 /* create a huge page mapping */
-#elif defined(TARGET_PPC)
-#define TARGET_MAP_FIXED 0x10 /* Interpret addr exactly */
-#define TARGET_MAP_ANONYMOUS 0x20 /* don't use a file */
-#define TARGET_MAP_GROWSDOWN 0x0100 /* stack-like segment */
-#define TARGET_MAP_DENYWRITE 0x0800 /* ETXTBSY */
-#define TARGET_MAP_EXECUTABLE 0x1000 /* mark it as an executable */
-#define TARGET_MAP_LOCKED 0x0080 /* pages are locked */
-#define TARGET_MAP_NORESERVE 0x0040 /* don't check for reservations */
-#define TARGET_MAP_POPULATE 0x8000 /* populate (prefault) pagetables */
-#define TARGET_MAP_NONBLOCK 0x10000 /* do not block on IO */
-#define TARGET_MAP_STACK 0x20000 /* ignored */
-#define TARGET_MAP_HUGETLB 0x40000 /* create a huge page mapping */
-#elif defined(TARGET_ALPHA)
-#define TARGET_MAP_ANONYMOUS 0x10 /* don't use a file */
-#define TARGET_MAP_FIXED 0x100 /* Interpret addr exactly */
-#define TARGET_MAP_GROWSDOWN 0x01000 /* stack-like segment */
-#define TARGET_MAP_DENYWRITE 0x02000 /* ETXTBSY */
-#define TARGET_MAP_EXECUTABLE 0x04000 /* mark it as an executable */
-#define TARGET_MAP_LOCKED 0x08000 /* lock the mapping */
-#define TARGET_MAP_NORESERVE 0x10000 /* no check for reservations */
-#define TARGET_MAP_POPULATE 0x20000 /* pop (prefault) pagetables */
-#define TARGET_MAP_NONBLOCK 0x40000 /* do not block on IO */
-#define TARGET_MAP_STACK 0x80000 /* ignored */
-#define TARGET_MAP_HUGETLB 0x100000 /* create a huge page mapping */
-#elif defined(TARGET_HPPA)
-#define TARGET_MAP_ANONYMOUS 0x10 /* don't use a file */
-#define TARGET_MAP_FIXED 0x04 /* Interpret addr exactly */
-#define TARGET_MAP_GROWSDOWN 0x08000 /* stack-like segment */
-#define TARGET_MAP_DENYWRITE 0x00800 /* ETXTBSY */
-#define TARGET_MAP_EXECUTABLE 0x01000 /* mark it as an executable */
-#define TARGET_MAP_LOCKED 0x02000 /* lock the mapping */
-#define TARGET_MAP_NORESERVE 0x04000 /* no check for reservations */
-#define TARGET_MAP_POPULATE 0x10000 /* pop (prefault) pagetables */
-#define TARGET_MAP_NONBLOCK 0x20000 /* do not block on IO */
-#define TARGET_MAP_STACK 0x40000 /* ignored */
-#define TARGET_MAP_HUGETLB 0x80000 /* create a huge page mapping */
-#elif defined(TARGET_XTENSA)
-#define TARGET_MAP_FIXED 0x10 /* Interpret addr exactly */
-#define TARGET_MAP_ANONYMOUS 0x0800 /* don't use a file */
-#define TARGET_MAP_GROWSDOWN 0x1000 /* stack-like segment */
-#define TARGET_MAP_DENYWRITE 0x2000 /* ETXTBSY */
-#define TARGET_MAP_EXECUTABLE 0x4000 /* mark it as an executable */
-#define TARGET_MAP_LOCKED 0x8000 /* pages are locked */
-#define TARGET_MAP_NORESERVE 0x0400 /* don't check for reservations */
-#define TARGET_MAP_POPULATE 0x10000 /* populate (prefault) pagetables */
-#define TARGET_MAP_NONBLOCK 0x20000 /* do not block on IO */
-#define TARGET_MAP_STACK 0x40000
-#define TARGET_MAP_HUGETLB 0x80000 /* create a huge page mapping */
-#else
-#define TARGET_MAP_FIXED 0x10 /* Interpret addr exactly */
-#define TARGET_MAP_ANONYMOUS 0x20 /* don't use a file */
-#define TARGET_MAP_GROWSDOWN 0x0100 /* stack-like segment */
-#define TARGET_MAP_DENYWRITE 0x0800 /* ETXTBSY */
-#define TARGET_MAP_EXECUTABLE 0x1000 /* mark it as an executable */
-#define TARGET_MAP_LOCKED 0x2000 /* pages are locked */
-#define TARGET_MAP_NORESERVE 0x4000 /* don't check for reservations */
-#define TARGET_MAP_POPULATE 0x8000 /* populate (prefault) pagetables */
-#define TARGET_MAP_NONBLOCK 0x10000 /* do not block on IO */
-#define TARGET_MAP_STACK 0x20000 /* ignored */
-#define TARGET_MAP_HUGETLB 0x40000 /* create a huge page mapping */
-#define TARGET_MAP_UNINITIALIZED 0x4000000 /* for anonymous mmap, memory could be uninitialized */
-#endif
+#include "target_mman.h"
-#if (defined(TARGET_I386) && defined(TARGET_ABI32)) \
- || (defined(TARGET_ARM) && defined(TARGET_ABI32)) \
+#if (defined(TARGET_I386) && defined(TARGET_ABI32)) \
+ || (defined(TARGET_ARM) && defined(TARGET_ABI32)) \
|| defined(TARGET_CRIS)
#define TARGET_STAT_HAVE_NSEC
struct target_stat {
- unsigned short st_dev;
- unsigned short __pad1;
- abi_ulong st_ino;
- unsigned short st_mode;
- unsigned short st_nlink;
- unsigned short st_uid;
- unsigned short st_gid;
- unsigned short st_rdev;
- unsigned short __pad2;
- abi_ulong st_size;
- abi_ulong st_blksize;
- abi_ulong st_blocks;
- abi_ulong target_st_atime;
- abi_ulong target_st_atime_nsec;
- abi_ulong target_st_mtime;
- abi_ulong target_st_mtime_nsec;
- abi_ulong target_st_ctime;
- abi_ulong target_st_ctime_nsec;
- abi_ulong __unused4;
- abi_ulong __unused5;
+ abi_ushort st_dev;
+ abi_ushort __pad1;
+ abi_ulong st_ino;
+ abi_ushort st_mode;
+ abi_ushort st_nlink;
+ abi_ushort st_uid;
+ abi_ushort st_gid;
+ abi_ushort st_rdev;
+ abi_ushort __pad2;
+ abi_ulong st_size;
+ abi_ulong st_blksize;
+ abi_ulong st_blocks;
+ abi_ulong target_st_atime;
+ abi_ulong target_st_atime_nsec;
+ abi_ulong target_st_mtime;
+ abi_ulong target_st_mtime_nsec;
+ abi_ulong target_st_ctime;
+ abi_ulong target_st_ctime_nsec;
+ abi_ulong __unused4;
+ abi_ulong __unused5;
};
/* This matches struct stat64 in glibc2.1, hence the absolutely
*/
#define TARGET_HAS_STRUCT_STAT64
struct target_stat64 {
- unsigned short st_dev;
- unsigned char __pad0[10];
+ abi_ushort st_dev;
+ unsigned char __pad0[10];
-#define TARGET_STAT64_HAS_BROKEN_ST_INO 1
- abi_ulong __st_ino;
+#define TARGET_STAT64_HAS_BROKEN_ST_INO 1
+ abi_ulong __st_ino;
- unsigned int st_mode;
- unsigned int st_nlink;
+ abi_uint st_mode;
+ abi_uint st_nlink;
- abi_ulong st_uid;
- abi_ulong st_gid;
+ abi_ulong st_uid;
+ abi_ulong st_gid;
- unsigned short st_rdev;
- unsigned char __pad3[10];
+ abi_ushort st_rdev;
+ unsigned char __pad3[10];
- long long st_size;
- abi_ulong st_blksize;
+ abi_llong st_size;
+ abi_ulong st_blksize;
- abi_ulong st_blocks; /* Number 512-byte blocks allocated. */
- abi_ulong __pad4; /* future possible st_blocks high bits */
+ abi_ulong st_blocks; /* Number 512-byte blocks allocated. */
+ abi_ulong __pad4; /* future possible st_blocks high bits */
- abi_ulong target_st_atime;
- abi_ulong target_st_atime_nsec;
+ abi_ulong target_st_atime;
+ abi_ulong target_st_atime_nsec;
- abi_ulong target_st_mtime;
- abi_ulong target_st_mtime_nsec;
+ abi_ulong target_st_mtime;
+ abi_ulong target_st_mtime_nsec;
- abi_ulong target_st_ctime;
- abi_ulong target_st_ctime_nsec;
+ abi_ulong target_st_ctime;
+ abi_ulong target_st_ctime_nsec;
- unsigned long long st_ino;
+ abi_ullong st_ino;
} QEMU_PACKED;
#ifdef TARGET_ARM
#define TARGET_HAS_STRUCT_STAT64
struct target_eabi_stat64 {
- unsigned long long st_dev;
- unsigned int __pad1;
- abi_ulong __st_ino;
- unsigned int st_mode;
- unsigned int st_nlink;
+ abi_ullong st_dev;
+ abi_uint __pad1;
+ abi_ulong __st_ino;
+ abi_uint st_mode;
+ abi_uint st_nlink;
- abi_ulong st_uid;
- abi_ulong st_gid;
+ abi_ulong st_uid;
+ abi_ulong st_gid;
- unsigned long long st_rdev;
- unsigned int __pad2[2];
+ abi_ullong st_rdev;
+ abi_uint __pad2[2];
- long long st_size;
- abi_ulong st_blksize;
- unsigned int __pad3;
- unsigned long long st_blocks;
+ abi_llong st_size;
+ abi_ulong st_blksize;
+ abi_uint __pad3;
+ abi_ullong st_blocks;
- abi_ulong target_st_atime;
- abi_ulong target_st_atime_nsec;
+ abi_ulong target_st_atime;
+ abi_ulong target_st_atime_nsec;
- abi_ulong target_st_mtime;
- abi_ulong target_st_mtime_nsec;
+ abi_ulong target_st_mtime;
+ abi_ulong target_st_mtime_nsec;
- abi_ulong target_st_ctime;
- abi_ulong target_st_ctime_nsec;
+ abi_ulong target_st_ctime;
+ abi_ulong target_st_ctime_nsec;
- unsigned long long st_ino;
+ abi_ullong st_ino;
} QEMU_PACKED;
#endif
#elif defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
struct target_stat {
- unsigned int st_dev;
- abi_ulong st_ino;
- unsigned int st_mode;
- unsigned int st_nlink;
- unsigned int st_uid;
- unsigned int st_gid;
- unsigned int st_rdev;
- abi_long st_size;
- abi_long target_st_atime;
- abi_long target_st_mtime;
- abi_long target_st_ctime;
- abi_long st_blksize;
- abi_long st_blocks;
- abi_ulong __unused4[2];
+ abi_uint st_dev;
+ abi_ulong st_ino;
+ abi_uint st_mode;
+ abi_uint st_nlink;
+ abi_uint st_uid;
+ abi_uint st_gid;
+ abi_uint st_rdev;
+ abi_long st_size;
+ abi_long target_st_atime;
+ abi_long target_st_mtime;
+ abi_long target_st_ctime;
+ abi_long st_blksize;
+ abi_long st_blocks;
+ abi_ulong __unused4[2];
};
#define TARGET_HAS_STRUCT_STAT64
struct target_stat64 {
- unsigned char __pad0[6];
- unsigned short st_dev;
+ unsigned char __pad0[6];
+ abi_ushort st_dev;
- uint64_t st_ino;
- uint64_t st_nlink;
+ abi_ullong st_ino;
+ abi_ullong st_nlink;
- unsigned int st_mode;
+ abi_uint st_mode;
- unsigned int st_uid;
- unsigned int st_gid;
+ abi_uint st_uid;
+ abi_uint st_gid;
- unsigned char __pad2[6];
- unsigned short st_rdev;
+ unsigned char __pad2[6];
+ abi_ushort st_rdev;
- int64_t st_size;
- int64_t st_blksize;
+ abi_llong st_size;
+ abi_llong st_blksize;
- unsigned char __pad4[4];
- unsigned int st_blocks;
+ unsigned char __pad4[4];
+ abi_uint st_blocks;
- abi_ulong target_st_atime;
- abi_ulong target_st_atime_nsec;
+ abi_ulong target_st_atime;
+ abi_ulong target_st_atime_nsec;
- abi_ulong target_st_mtime;
- abi_ulong target_st_mtime_nsec;
+ abi_ulong target_st_mtime;
+ abi_ulong target_st_mtime_nsec;
- abi_ulong target_st_ctime;
- abi_ulong target_st_ctime_nsec;
+ abi_ulong target_st_ctime;
+ abi_ulong target_st_ctime_nsec;
- abi_ulong __unused4[3];
+ abi_ulong __unused4[3];
};
#elif defined(TARGET_SPARC)
#define TARGET_STAT_HAVE_NSEC
struct target_stat {
- unsigned short st_dev;
- abi_ulong st_ino;
- unsigned short st_mode;
- short st_nlink;
- unsigned short st_uid;
- unsigned short st_gid;
- unsigned short st_rdev;
- abi_long st_size;
- abi_long target_st_atime;
- abi_ulong target_st_atime_nsec;
- abi_long target_st_mtime;
- abi_ulong target_st_mtime_nsec;
- abi_long target_st_ctime;
- abi_ulong target_st_ctime_nsec;
- abi_long st_blksize;
- abi_long st_blocks;
- abi_ulong __unused1[2];
+ abi_ushort st_dev;
+ abi_ulong st_ino;
+ abi_ushort st_mode;
+ abi_short st_nlink;
+ abi_ushort st_uid;
+ abi_ushort st_gid;
+ abi_ushort st_rdev;
+ abi_long st_size;
+ abi_long target_st_atime;
+ abi_ulong target_st_atime_nsec;
+ abi_long target_st_mtime;
+ abi_ulong target_st_mtime_nsec;
+ abi_long target_st_ctime;
+ abi_ulong target_st_ctime_nsec;
+ abi_long st_blksize;
+ abi_long st_blocks;
+ abi_ulong __unused1[2];
};
#define TARGET_HAS_STRUCT_STAT64
struct target_stat64 {
- unsigned char __pad0[6];
- unsigned short st_dev;
+ unsigned char __pad0[6];
+ abi_ushort st_dev;
- uint64_t st_ino;
+ abi_ullong st_ino;
- unsigned int st_mode;
- unsigned int st_nlink;
+ abi_uint st_mode;
+ abi_uint st_nlink;
- unsigned int st_uid;
- unsigned int st_gid;
+ abi_uint st_uid;
+ abi_uint st_gid;
- unsigned char __pad2[6];
- unsigned short st_rdev;
+ unsigned char __pad2[6];
+ abi_ushort st_rdev;
- unsigned char __pad3[8];
+ unsigned char __pad3[8];
- int64_t st_size;
- unsigned int st_blksize;
+ abi_llong st_size;
+ abi_uint st_blksize;
- unsigned char __pad4[8];
- unsigned int st_blocks;
+ unsigned char __pad4[8];
+ abi_uint st_blocks;
- unsigned int target_st_atime;
- unsigned int target_st_atime_nsec;
+ abi_uint target_st_atime;
+ abi_uint target_st_atime_nsec;
- unsigned int target_st_mtime;
- unsigned int target_st_mtime_nsec;
+ abi_uint target_st_mtime;
+ abi_uint target_st_mtime_nsec;
- unsigned int target_st_ctime;
- unsigned int target_st_ctime_nsec;
+ abi_uint target_st_ctime;
+ abi_uint target_st_ctime_nsec;
- unsigned int __unused1;
- unsigned int __unused2;
+ abi_uint __unused1;
+ abi_uint __unused2;
};
#elif defined(TARGET_PPC)
#define TARGET_STAT_HAVE_NSEC
struct target_stat {
- abi_ulong st_dev;
- abi_ulong st_ino;
+ abi_ulong st_dev;
+ abi_ulong st_ino;
#if defined(TARGET_PPC64)
- abi_ulong st_nlink;
- unsigned int st_mode;
+ abi_ulong st_nlink;
+ abi_uint st_mode;
#else
- unsigned int st_mode;
- unsigned short st_nlink;
+ abi_uint st_mode;
+ abi_ushort st_nlink;
#endif
- unsigned int st_uid;
- unsigned int st_gid;
- abi_ulong st_rdev;
- abi_ulong st_size;
- abi_ulong st_blksize;
- abi_ulong st_blocks;
- abi_ulong target_st_atime;
- abi_ulong target_st_atime_nsec;
- abi_ulong target_st_mtime;
- abi_ulong target_st_mtime_nsec;
- abi_ulong target_st_ctime;
- abi_ulong target_st_ctime_nsec;
- abi_ulong __unused4;
- abi_ulong __unused5;
+ abi_uint st_uid;
+ abi_uint st_gid;
+ abi_ulong st_rdev;
+ abi_ulong st_size;
+ abi_ulong st_blksize;
+ abi_ulong st_blocks;
+ abi_ulong target_st_atime;
+ abi_ulong target_st_atime_nsec;
+ abi_ulong target_st_mtime;
+ abi_ulong target_st_mtime_nsec;
+ abi_ulong target_st_ctime;
+ abi_ulong target_st_ctime_nsec;
+ abi_ulong __unused4;
+ abi_ulong __unused5;
#if defined(TARGET_PPC64)
- abi_ulong __unused6;
+ abi_ulong __unused6;
#endif
};
#if !defined(TARGET_PPC64)
#define TARGET_HAS_STRUCT_STAT64
struct QEMU_PACKED target_stat64 {
- unsigned long long st_dev;
- unsigned long long st_ino;
- unsigned int st_mode;
- unsigned int st_nlink;
- unsigned int st_uid;
- unsigned int st_gid;
- unsigned long long st_rdev;
- unsigned long long __pad0;
- long long st_size;
- int st_blksize;
- unsigned int __pad1;
- long long st_blocks; /* Number 512-byte blocks allocated. */
- int target_st_atime;
- unsigned int target_st_atime_nsec;
- int target_st_mtime;
- unsigned int target_st_mtime_nsec;
- int target_st_ctime;
- unsigned int target_st_ctime_nsec;
- unsigned int __unused4;
- unsigned int __unused5;
+ abi_ullong st_dev;
+ abi_ullong st_ino;
+ abi_uint st_mode;
+ abi_uint st_nlink;
+ abi_uint st_uid;
+ abi_uint st_gid;
+ abi_ullong st_rdev;
+ abi_ullong __pad0;
+ abi_llong st_size;
+ abi_int st_blksize;
+ abi_uint __pad1;
+ abi_llong st_blocks; /* Number 512-byte blocks allocated. */
+ abi_int target_st_atime;
+ abi_uint target_st_atime_nsec;
+ abi_int target_st_mtime;
+ abi_uint target_st_mtime_nsec;
+ abi_int target_st_ctime;
+ abi_uint target_st_ctime_nsec;
+ abi_uint __unused4;
+ abi_uint __unused5;
};
#endif
#define TARGET_STAT_HAVE_NSEC
struct target_stat {
- abi_ulong st_dev;
- abi_ulong st_ino;
- unsigned int st_mode;
- unsigned short st_nlink;
- unsigned int st_uid;
- unsigned int st_gid;
- abi_ulong st_rdev;
- abi_ulong st_size;
- abi_ulong st_blksize;
- abi_ulong st_blocks;
- abi_ulong target_st_atime;
- abi_ulong target_st_atime_nsec;
- abi_ulong target_st_mtime;
- abi_ulong target_st_mtime_nsec;
- abi_ulong target_st_ctime;
- abi_ulong target_st_ctime_nsec;
- abi_ulong __unused4;
- abi_ulong __unused5;
+ abi_ulong st_dev;
+ abi_ulong st_ino;
+ abi_uint st_mode;
+ abi_ushort st_nlink;
+ abi_uint st_uid;
+ abi_uint st_gid;
+ abi_ulong st_rdev;
+ abi_ulong st_size;
+ abi_ulong st_blksize;
+ abi_ulong st_blocks;
+ abi_ulong target_st_atime;
+ abi_ulong target_st_atime_nsec;
+ abi_ulong target_st_mtime;
+ abi_ulong target_st_mtime_nsec;
+ abi_ulong target_st_ctime;
+ abi_ulong target_st_ctime_nsec;
+ abi_ulong __unused4;
+ abi_ulong __unused5;
};
/* FIXME: Microblaze no-mmu user-space has a difference stat64 layout... */
#define TARGET_HAS_STRUCT_STAT64
struct QEMU_PACKED target_stat64 {
- uint64_t st_dev;
+ abi_ullong st_dev;
#define TARGET_STAT64_HAS_BROKEN_ST_INO 1
- uint32_t pad0;
- uint32_t __st_ino;
-
- uint32_t st_mode;
- uint32_t st_nlink;
- uint32_t st_uid;
- uint32_t st_gid;
- uint64_t st_rdev;
- uint64_t __pad1;
-
- int64_t st_size;
- int32_t st_blksize;
- uint32_t __pad2;
- int64_t st_blocks; /* Number 512-byte blocks allocated. */
-
- int target_st_atime;
- unsigned int target_st_atime_nsec;
- int target_st_mtime;
- unsigned int target_st_mtime_nsec;
- int target_st_ctime;
- unsigned int target_st_ctime_nsec;
- uint64_t st_ino;
+ abi_uint pad0;
+ abi_uint __st_ino;
+
+ abi_uint st_mode;
+ abi_uint st_nlink;
+ abi_uint st_uid;
+ abi_uint st_gid;
+ abi_ullong st_rdev;
+ abi_ullong __pad1;
+
+ abi_llong st_size;
+ abi_int st_blksize;
+ abi_uint __pad2;
+ abi_llong st_blocks;
+
+ abi_int target_st_atime;
+ abi_uint target_st_atime_nsec;
+ abi_int target_st_mtime;
+ abi_uint target_st_mtime_nsec;
+ abi_int target_st_ctime;
+ abi_uint target_st_ctime_nsec;
+ abi_ullong st_ino;
};
#elif defined(TARGET_M68K)
struct target_stat {
- unsigned short st_dev;
- unsigned short __pad1;
- abi_ulong st_ino;
- unsigned short st_mode;
- unsigned short st_nlink;
- unsigned short st_uid;
- unsigned short st_gid;
- unsigned short st_rdev;
- unsigned short __pad2;
- abi_ulong st_size;
- abi_ulong st_blksize;
- abi_ulong st_blocks;
- abi_ulong target_st_atime;
- abi_ulong __unused1;
- abi_ulong target_st_mtime;
- abi_ulong __unused2;
- abi_ulong target_st_ctime;
- abi_ulong __unused3;
- abi_ulong __unused4;
- abi_ulong __unused5;
+ abi_ushort st_dev;
+ abi_ushort __pad1;
+ abi_ulong st_ino;
+ abi_ushort st_mode;
+ abi_ushort st_nlink;
+ abi_ushort st_uid;
+ abi_ushort st_gid;
+ abi_ushort st_rdev;
+ abi_ushort __pad2;
+ abi_ulong st_size;
+ abi_ulong st_blksize;
+ abi_ulong st_blocks;
+ abi_ulong target_st_atime;
+ abi_ulong __unused1;
+ abi_ulong target_st_mtime;
+ abi_ulong __unused2;
+ abi_ulong target_st_ctime;
+ abi_ulong __unused3;
+ abi_ulong __unused4;
+ abi_ulong __unused5;
};
/* This matches struct stat64 in glibc2.1, hence the absolutely
*/
#define TARGET_HAS_STRUCT_STAT64
struct target_stat64 {
- unsigned long long st_dev;
- unsigned char __pad1[2];
+ abi_ullong st_dev;
+ unsigned char __pad1[2];
-#define TARGET_STAT64_HAS_BROKEN_ST_INO 1
- abi_ulong __st_ino;
+#define TARGET_STAT64_HAS_BROKEN_ST_INO 1
+ abi_ulong __st_ino;
- unsigned int st_mode;
- unsigned int st_nlink;
+ abi_uint st_mode;
+ abi_uint st_nlink;
- abi_ulong st_uid;
- abi_ulong st_gid;
+ abi_ulong st_uid;
+ abi_ulong st_gid;
- unsigned long long st_rdev;
- unsigned char __pad3[2];
+ abi_ullong st_rdev;
+ unsigned char __pad3[2];
- long long st_size;
- abi_ulong st_blksize;
+ abi_llong st_size;
+ abi_ulong st_blksize;
- abi_ulong __pad4; /* future possible st_blocks high bits */
- abi_ulong st_blocks; /* Number 512-byte blocks allocated. */
+ abi_ulong __pad4; /* future possible st_blocks high bits */
+ abi_ulong st_blocks; /* Number 512-byte blocks allocated. */
- abi_ulong target_st_atime;
- abi_ulong target_st_atime_nsec;
+ abi_ulong target_st_atime;
+ abi_ulong target_st_atime_nsec;
- abi_ulong target_st_mtime;
- abi_ulong target_st_mtime_nsec;
+ abi_ulong target_st_mtime;
+ abi_ulong target_st_mtime_nsec;
- abi_ulong target_st_ctime;
- abi_ulong target_st_ctime_nsec;
+ abi_ulong target_st_ctime;
+ abi_ulong target_st_ctime_nsec;
- unsigned long long st_ino;
+ abi_ullong st_ino;
} QEMU_PACKED;
#elif defined(TARGET_ABI_MIPSN64)
#define TARGET_STAT_HAVE_NSEC
/* The memory layout is the same as of struct stat64 of the 32-bit kernel. */
struct target_stat {
- unsigned int st_dev;
- unsigned int st_pad0[3]; /* Reserved for st_dev expansion */
+ abi_uint st_dev;
+ abi_uint st_pad0[3]; /* Reserved for st_dev expansion */
- abi_ulong st_ino;
+ abi_ulong st_ino;
- unsigned int st_mode;
- unsigned int st_nlink;
+ abi_uint st_mode;
+ abi_uint st_nlink;
- int st_uid;
- int st_gid;
+ abi_int st_uid;
+ abi_int st_gid;
- unsigned int st_rdev;
- unsigned int st_pad1[3]; /* Reserved for st_rdev expansion */
+ abi_uint st_rdev;
+ abi_uint st_pad1[3]; /* Reserved for st_rdev expansion */
- abi_ulong st_size;
+ abi_ulong st_size;
- /*
- * Actually this should be timestruc_t st_atime, st_mtime and st_ctime
- * but we don't have it under Linux.
- */
- unsigned int target_st_atime;
- unsigned int target_st_atime_nsec;
+ /*
+ * Actually this should be timestruc_t st_atime, st_mtime and st_ctime
+ * but we don't have it under Linux.
+ */
+ abi_uint target_st_atime;
+ abi_uint target_st_atime_nsec;
- unsigned int target_st_mtime;
- unsigned int target_st_mtime_nsec;
+ abi_uint target_st_mtime;
+ abi_uint target_st_mtime_nsec;
- unsigned int target_st_ctime;
- unsigned int target_st_ctime_nsec;
+ abi_uint target_st_ctime;
+ abi_uint target_st_ctime_nsec;
- unsigned int st_blksize;
- unsigned int st_pad2;
+ abi_uint st_blksize;
+ abi_uint st_pad2;
- abi_ulong st_blocks;
+ abi_ulong st_blocks;
};
#elif defined(TARGET_ABI_MIPSN32)
#define TARGET_STAT_HAVE_NSEC
struct target_stat {
- abi_ulong st_dev;
- abi_ulong st_pad0[3]; /* Reserved for st_dev expansion */
- uint64_t st_ino;
- unsigned int st_mode;
- unsigned int st_nlink;
- int st_uid;
- int st_gid;
- abi_ulong st_rdev;
- abi_ulong st_pad1[3]; /* Reserved for st_rdev expansion */
- int64_t st_size;
- abi_long target_st_atime;
- abi_ulong target_st_atime_nsec; /* Reserved for st_atime expansion */
- abi_long target_st_mtime;
- abi_ulong target_st_mtime_nsec; /* Reserved for st_mtime expansion */
- abi_long target_st_ctime;
- abi_ulong target_st_ctime_nsec; /* Reserved for st_ctime expansion */
- abi_ulong st_blksize;
- abi_ulong st_pad2;
- int64_t st_blocks;
+ abi_ulong st_dev;
+ abi_ulong st_pad0[3]; /* Reserved for st_dev expansion */
+ abi_ullong st_ino;
+ abi_uint st_mode;
+ abi_uint st_nlink;
+ abi_int st_uid;
+ abi_int st_gid;
+ abi_ulong st_rdev;
+ abi_ulong st_pad1[3]; /* Reserved for st_rdev expansion */
+ abi_llong st_size;
+ abi_long target_st_atime;
+ abi_ulong target_st_atime_nsec; /* Reserved for st_atime expansion */
+ abi_long target_st_mtime;
+ abi_ulong target_st_mtime_nsec; /* Reserved for st_mtime expansion */
+ abi_long target_st_ctime;
+ abi_ulong target_st_ctime_nsec; /* Reserved for st_ctime expansion */
+ abi_ulong st_blksize;
+ abi_ulong st_pad2;
+ abi_llong st_blocks;
};
#elif defined(TARGET_ABI_MIPSO32)
#define TARGET_STAT_HAVE_NSEC
struct target_stat {
- unsigned st_dev;
- abi_long st_pad1[3]; /* Reserved for network id */
- abi_ulong st_ino;
- unsigned int st_mode;
- unsigned int st_nlink;
- int st_uid;
- int st_gid;
- unsigned st_rdev;
- abi_long st_pad2[2];
- abi_long st_size;
- abi_long st_pad3;
- /*
- * Actually this should be timestruc_t st_atime, st_mtime and st_ctime
- * but we don't have it under Linux.
- */
- abi_long target_st_atime;
- abi_long target_st_atime_nsec;
- abi_long target_st_mtime;
- abi_long target_st_mtime_nsec;
- abi_long target_st_ctime;
- abi_long target_st_ctime_nsec;
- abi_long st_blksize;
- abi_long st_blocks;
- abi_long st_pad4[14];
+ abi_uint st_dev;
+ abi_long st_pad1[3]; /* Reserved for network id */
+ abi_ulong st_ino;
+ abi_uint st_mode;
+ abi_uint st_nlink;
+ abi_int st_uid;
+ abi_int st_gid;
+ abi_uint st_rdev;
+ abi_long st_pad2[2];
+ abi_long st_size;
+ abi_long st_pad3;
+ /*
+ * Actually this should be timestruc_t st_atime, st_mtime and st_ctime
+ * but we don't have it under Linux.
+ */
+ abi_long target_st_atime;
+ abi_long target_st_atime_nsec;
+ abi_long target_st_mtime;
+ abi_long target_st_mtime_nsec;
+ abi_long target_st_ctime;
+ abi_long target_st_ctime_nsec;
+ abi_long st_blksize;
+ abi_long st_blocks;
+ abi_long st_pad4[14];
};
/*
#define TARGET_HAS_STRUCT_STAT64
struct target_stat64 {
- abi_ulong st_dev;
- abi_ulong st_pad0[3]; /* Reserved for st_dev expansion */
+ abi_ulong st_dev;
+ abi_ulong st_pad0[3]; /* Reserved for st_dev expansion */
- uint64_t st_ino;
+ abi_ullong st_ino;
- unsigned int st_mode;
- unsigned int st_nlink;
+ abi_uint st_mode;
+ abi_uint st_nlink;
- int st_uid;
- int st_gid;
+ abi_int st_uid;
+ abi_int st_gid;
- abi_ulong st_rdev;
- abi_ulong st_pad1[3]; /* Reserved for st_rdev expansion */
+ abi_ulong st_rdev;
+ abi_ulong st_pad1[3]; /* Reserved for st_rdev expansion */
- int64_t st_size;
+ abi_llong st_size;
- /*
- * Actually this should be timestruc_t st_atime, st_mtime and st_ctime
- * but we don't have it under Linux.
- */
- abi_long target_st_atime;
- abi_ulong target_st_atime_nsec; /* Reserved for st_atime expansion */
+ /*
+ * Actually this should be timestruc_t st_atime, st_mtime and st_ctime
+ * but we don't have it under Linux.
+ */
+ abi_long target_st_atime;
+ abi_ulong target_st_atime_nsec; /* Reserved for st_atime expansion */
- abi_long target_st_mtime;
- abi_ulong target_st_mtime_nsec; /* Reserved for st_mtime expansion */
+ abi_long target_st_mtime;
+ abi_ulong target_st_mtime_nsec; /* Reserved for st_mtime expansion */
- abi_long target_st_ctime;
- abi_ulong target_st_ctime_nsec; /* Reserved for st_ctime expansion */
+ abi_long target_st_ctime;
+ abi_ulong target_st_ctime_nsec; /* Reserved for st_ctime expansion */
- abi_ulong st_blksize;
- abi_ulong st_pad2;
+ abi_ulong st_blksize;
+ abi_ulong st_pad2;
- int64_t st_blocks;
+ abi_llong st_blocks;
};
#elif defined(TARGET_ALPHA)
struct target_stat {
- unsigned int st_dev;
- unsigned int st_ino;
- unsigned int st_mode;
- unsigned int st_nlink;
- unsigned int st_uid;
- unsigned int st_gid;
- unsigned int st_rdev;
- abi_long st_size;
- abi_ulong target_st_atime;
- abi_ulong target_st_mtime;
- abi_ulong target_st_ctime;
- unsigned int st_blksize;
- unsigned int st_blocks;
- unsigned int st_flags;
- unsigned int st_gen;
+ abi_uint st_dev;
+ abi_uint st_ino;
+ abi_uint st_mode;
+ abi_uint st_nlink;
+ abi_uint st_uid;
+ abi_uint st_gid;
+ abi_uint st_rdev;
+ abi_long st_size;
+ abi_ulong target_st_atime;
+ abi_ulong target_st_mtime;
+ abi_ulong target_st_ctime;
+ abi_uint st_blksize;
+ abi_uint st_blocks;
+ abi_uint st_flags;
+ abi_uint st_gen;
};
#define TARGET_HAS_STRUCT_STAT64
struct target_stat64 {
- abi_ulong st_dev;
- abi_ulong st_ino;
- abi_ulong st_rdev;
- abi_long st_size;
- abi_ulong st_blocks;
-
- unsigned int st_mode;
- unsigned int st_uid;
- unsigned int st_gid;
- unsigned int st_blksize;
- unsigned int st_nlink;
- unsigned int __pad0;
-
- abi_ulong target_st_atime;
- abi_ulong target_st_atime_nsec;
- abi_ulong target_st_mtime;
- abi_ulong target_st_mtime_nsec;
- abi_ulong target_st_ctime;
- abi_ulong target_st_ctime_nsec;
- abi_long __unused[3];
+ abi_ulong st_dev;
+ abi_ulong st_ino;
+ abi_ulong st_rdev;
+ abi_long st_size;
+ abi_ulong st_blocks;
+
+ abi_uint st_mode;
+ abi_uint st_uid;
+ abi_uint st_gid;
+ abi_uint st_blksize;
+ abi_uint st_nlink;
+ abi_uint __pad0;
+
+ abi_ulong target_st_atime;
+ abi_ulong target_st_atime_nsec;
+ abi_ulong target_st_mtime;
+ abi_ulong target_st_mtime_nsec;
+ abi_ulong target_st_ctime;
+ abi_ulong target_st_ctime_nsec;
+ abi_long __unused[3];
};
#elif defined(TARGET_SH4)
#define TARGET_STAT_HAVE_NSEC
struct target_stat {
- abi_ulong st_dev;
- abi_ulong st_ino;
- unsigned short st_mode;
- unsigned short st_nlink;
- unsigned short st_uid;
- unsigned short st_gid;
- abi_ulong st_rdev;
- abi_ulong st_size;
- abi_ulong st_blksize;
- abi_ulong st_blocks;
- abi_ulong target_st_atime;
- abi_ulong target_st_atime_nsec;
- abi_ulong target_st_mtime;
- abi_ulong target_st_mtime_nsec;
- abi_ulong target_st_ctime;
- abi_ulong target_st_ctime_nsec;
- abi_ulong __unused4;
- abi_ulong __unused5;
+ abi_ulong st_dev;
+ abi_ulong st_ino;
+ abi_ushort st_mode;
+ abi_ushort st_nlink;
+ abi_ushort st_uid;
+ abi_ushort st_gid;
+ abi_ulong st_rdev;
+ abi_ulong st_size;
+ abi_ulong st_blksize;
+ abi_ulong st_blocks;
+ abi_ulong target_st_atime;
+ abi_ulong target_st_atime_nsec;
+ abi_ulong target_st_mtime;
+ abi_ulong target_st_mtime_nsec;
+ abi_ulong target_st_ctime;
+ abi_ulong target_st_ctime_nsec;
+ abi_ulong __unused4;
+ abi_ulong __unused5;
};
/* This matches struct stat64 in glibc2.1, hence the absolutely
*/
#define TARGET_HAS_STRUCT_STAT64
struct QEMU_PACKED target_stat64 {
- unsigned long long st_dev;
- unsigned char __pad0[4];
+ abi_ullong st_dev;
+ unsigned char __pad0[4];
-#define TARGET_STAT64_HAS_BROKEN_ST_INO 1
- abi_ulong __st_ino;
+#define TARGET_STAT64_HAS_BROKEN_ST_INO 1
+ abi_ulong __st_ino;
- unsigned int st_mode;
- unsigned int st_nlink;
+ abi_uint st_mode;
+ abi_uint st_nlink;
- abi_ulong st_uid;
- abi_ulong st_gid;
+ abi_ulong st_uid;
+ abi_ulong st_gid;
- unsigned long long st_rdev;
- unsigned char __pad3[4];
+ abi_ullong st_rdev;
+ unsigned char __pad3[4];
- long long st_size;
- abi_ulong st_blksize;
+ abi_llong st_size;
+ abi_ulong st_blksize;
- unsigned long long st_blocks; /* Number 512-byte blocks allocated. */
+ abi_ullong st_blocks; /* Number 512-byte blocks allocated. */
- abi_ulong target_st_atime;
- abi_ulong target_st_atime_nsec;
+ abi_ulong target_st_atime;
+ abi_ulong target_st_atime_nsec;
- abi_ulong target_st_mtime;
- abi_ulong target_st_mtime_nsec;
+ abi_ulong target_st_mtime;
+ abi_ulong target_st_mtime_nsec;
- abi_ulong target_st_ctime;
- abi_ulong target_st_ctime_nsec;
+ abi_ulong target_st_ctime;
+ abi_ulong target_st_ctime_nsec;
- unsigned long long st_ino;
+ abi_ullong st_ino;
};
#elif defined(TARGET_I386) && !defined(TARGET_ABI32)
#define TARGET_STAT_HAVE_NSEC
struct target_stat {
- abi_ulong st_dev;
- abi_ulong st_ino;
- abi_ulong st_nlink;
-
- unsigned int st_mode;
- unsigned int st_uid;
- unsigned int st_gid;
- unsigned int __pad0;
- abi_ulong st_rdev;
- abi_long st_size;
- abi_long st_blksize;
- abi_long st_blocks; /* Number 512-byte blocks allocated. */
-
- abi_ulong target_st_atime;
- abi_ulong target_st_atime_nsec;
- abi_ulong target_st_mtime;
- abi_ulong target_st_mtime_nsec;
- abi_ulong target_st_ctime;
- abi_ulong target_st_ctime_nsec;
-
- abi_long __unused[3];
+ abi_ulong st_dev;
+ abi_ulong st_ino;
+ abi_ulong st_nlink;
+
+ abi_uint st_mode;
+ abi_uint st_uid;
+ abi_uint st_gid;
+ abi_uint __pad0;
+ abi_ulong st_rdev;
+ abi_long st_size;
+ abi_long st_blksize;
+ abi_long st_blocks; /* Number 512-byte blocks allocated. */
+
+ abi_ulong target_st_atime;
+ abi_ulong target_st_atime_nsec;
+ abi_ulong target_st_mtime;
+ abi_ulong target_st_mtime_nsec;
+ abi_ulong target_st_ctime;
+ abi_ulong target_st_ctime_nsec;
+
+ abi_long __unused[3];
};
#elif defined(TARGET_S390X)
struct target_stat {
abi_ulong st_dev;
abi_ulong st_ino;
abi_ulong st_nlink;
- unsigned int st_mode;
- unsigned int st_uid;
- unsigned int st_gid;
- unsigned int __pad1;
+ abi_uint st_mode;
+ abi_uint st_uid;
+ abi_uint st_gid;
+ abi_uint __pad1;
abi_ulong st_rdev;
abi_ulong st_size;
abi_ulong target_st_atime;
struct target_stat {
abi_ulong st_dev;
abi_ulong st_ino;
- unsigned int st_mode;
- unsigned int st_nlink;
- unsigned int st_uid;
- unsigned int st_gid;
+ abi_uint st_mode;
+ abi_uint st_nlink;
+ abi_uint st_uid;
+ abi_uint st_gid;
abi_ulong st_rdev;
abi_ulong _pad1;
abi_long st_size;
- int st_blksize;
- int __pad2;
+ abi_int st_blksize;
+ abi_int __pad2;
abi_long st_blocks;
abi_long target_st_atime;
abi_ulong target_st_atime_nsec;
abi_ulong target_st_mtime_nsec;
abi_long target_st_ctime;
abi_ulong target_st_ctime_nsec;
- unsigned int __unused[2];
+ abi_uint __unused[2];
};
#elif defined(TARGET_XTENSA)
#define TARGET_STAT_HAVE_NSEC
struct target_stat {
abi_ulong st_dev;
abi_ulong st_ino;
- unsigned int st_mode;
- unsigned int st_nlink;
- unsigned int st_uid;
- unsigned int st_gid;
+ abi_uint st_mode;
+ abi_uint st_nlink;
+ abi_uint st_uid;
+ abi_uint st_gid;
abi_ulong st_rdev;
abi_long st_size;
abi_ulong st_blksize;
#define TARGET_HAS_STRUCT_STAT64
struct target_stat64 {
- uint64_t st_dev; /* Device */
- uint64_t st_ino; /* File serial number */
- unsigned int st_mode; /* File mode. */
- unsigned int st_nlink; /* Link count. */
- unsigned int st_uid; /* User ID of the file's owner. */
- unsigned int st_gid; /* Group ID of the file's group. */
- uint64_t st_rdev; /* Device number, if device. */
- int64_t st_size; /* Size of file, in bytes. */
+ abi_ullong st_dev; /* Device */
+ abi_ullong st_ino; /* File serial number */
+ abi_uint st_mode; /* File mode. */
+ abi_uint st_nlink; /* Link count. */
+ abi_uint st_uid; /* User ID of the file's owner. */
+ abi_uint st_gid; /* Group ID of the file's group. */
+ abi_ullong st_rdev; /* Device number, if device. */
+ abi_llong st_size; /* Size of file, in bytes. */
abi_ulong st_blksize; /* Optimal block size for I/O. */
abi_ulong __unused2;
- uint64_t st_blocks; /* Number 512-byte blocks allocated. */
+ abi_ullong st_blocks; /* Number 512-byte blocks allocated. */
abi_ulong target_st_atime; /* Time of last access. */
abi_ulong target_st_atime_nsec;
abi_ulong target_st_mtime; /* Time of last modification. */
};
#elif defined(TARGET_OPENRISC) || defined(TARGET_NIOS2) \
- || defined(TARGET_RISCV) || defined(TARGET_HEXAGON)
+ || defined(TARGET_RISCV) || defined(TARGET_HEXAGON)
/* These are the asm-generic versions of the stat and stat64 structures */
struct target_stat {
abi_ulong st_dev;
abi_ulong st_ino;
- unsigned int st_mode;
- unsigned int st_nlink;
- unsigned int st_uid;
- unsigned int st_gid;
+ abi_uint st_mode;
+ abi_uint st_nlink;
+ abi_uint st_uid;
+ abi_uint st_gid;
abi_ulong st_rdev;
abi_ulong __pad1;
abi_long st_size;
- int st_blksize;
- int __pad2;
+ abi_int st_blksize;
+ abi_int __pad2;
abi_long st_blocks;
abi_long target_st_atime;
abi_ulong target_st_atime_nsec;
abi_ulong target_st_mtime_nsec;
abi_long target_st_ctime;
abi_ulong target_st_ctime_nsec;
- unsigned int __unused4;
- unsigned int __unused5;
+ abi_uint __unused4;
+ abi_uint __unused5;
};
#if !defined(TARGET_RISCV64)
#define TARGET_HAS_STRUCT_STAT64
struct target_stat64 {
- uint64_t st_dev;
- uint64_t st_ino;
- unsigned int st_mode;
- unsigned int st_nlink;
- unsigned int st_uid;
- unsigned int st_gid;
- uint64_t st_rdev;
- uint64_t __pad1;
- int64_t st_size;
- int st_blksize;
- int __pad2;
- int64_t st_blocks;
- int target_st_atime;
- unsigned int target_st_atime_nsec;
- int target_st_mtime;
- unsigned int target_st_mtime_nsec;
- int target_st_ctime;
- unsigned int target_st_ctime_nsec;
- unsigned int __unused4;
- unsigned int __unused5;
+ abi_ullong st_dev;
+ abi_ullong st_ino;
+ abi_uint st_mode;
+ abi_uint st_nlink;
+ abi_uint st_uid;
+ abi_uint st_gid;
+ abi_ullong st_rdev;
+ abi_ullong __pad1;
+ abi_llong st_size;
+ abi_int st_blksize;
+ abi_int __pad2;
+ abi_llong st_blocks;
+ abi_int target_st_atime;
+ abi_uint target_st_atime_nsec;
+ abi_int target_st_mtime;
+ abi_uint target_st_mtime_nsec;
+ abi_int target_st_ctime;
+ abi_uint target_st_ctime_nsec;
+ abi_uint __unused4;
+ abi_uint __unused5;
};
#endif
#define TARGET_HAS_STRUCT_STAT64
struct target_stat64 {
- uint64_t st_dev;
+ abi_ullong st_dev;
abi_uint _pad1;
abi_uint _res1;
abi_uint st_mode;
abi_uint st_nlink;
abi_uint st_uid;
abi_uint st_gid;
- uint64_t st_rdev;
+ abi_ullong st_rdev;
abi_uint _pad2;
- int64_t st_size;
+ abi_llong st_size;
abi_int st_blksize;
- int64_t st_blocks;
+ abi_llong st_blocks;
abi_int target_st_atime;
abi_uint target_st_atime_nsec;
abi_int target_st_mtime;
abi_uint target_st_mtime_nsec;
abi_int target_st_ctime;
abi_uint target_st_ctime_nsec;
- uint64_t st_ino;
+ abi_ullong st_ino;
};
#elif defined(TARGET_LOONGARCH64)
#endif
typedef struct {
- int val[2];
+ abi_int val[2];
} target_fsid_t;
#ifdef TARGET_MIPS
#ifdef TARGET_ABI_MIPSN32
struct target_statfs {
- int32_t f_type;
- int32_t f_bsize;
- int32_t f_frsize; /* Fragment size - unsupported */
- int32_t f_blocks;
- int32_t f_bfree;
- int32_t f_files;
- int32_t f_ffree;
- int32_t f_bavail;
-
- /* Linux specials */
- target_fsid_t f_fsid;
- int32_t f_namelen;
- int32_t f_flags;
- int32_t f_spare[5];
+ abi_int f_type;
+ abi_int f_bsize;
+ abi_int f_frsize; /* Fragment size - unsupported */
+ abi_int f_blocks;
+ abi_int f_bfree;
+ abi_int f_files;
+ abi_int f_ffree;
+ abi_int f_bavail;
+
+ /* Linux specials */
+ target_fsid_t f_fsid;
+ abi_int f_namelen;
+ abi_int f_flags;
+ abi_int f_spare[5];
};
#else
struct target_statfs {
- abi_long f_type;
- abi_long f_bsize;
- abi_long f_frsize; /* Fragment size - unsupported */
- abi_long f_blocks;
- abi_long f_bfree;
- abi_long f_files;
- abi_long f_ffree;
- abi_long f_bavail;
-
- /* Linux specials */
- target_fsid_t f_fsid;
- abi_long f_namelen;
- abi_long f_flags;
- abi_long f_spare[5];
+ abi_long f_type;
+ abi_long f_bsize;
+ abi_long f_frsize; /* Fragment size - unsupported */
+ abi_long f_blocks;
+ abi_long f_bfree;
+ abi_long f_files;
+ abi_long f_ffree;
+ abi_long f_bavail;
+
+ /* Linux specials */
+ target_fsid_t f_fsid;
+ abi_long f_namelen;
+ abi_long f_flags;
+ abi_long f_spare[5];
};
#endif
struct target_statfs64 {
- uint32_t f_type;
- uint32_t f_bsize;
- uint32_t f_frsize; /* Fragment size - unsupported */
- uint32_t __pad;
- uint64_t f_blocks;
- uint64_t f_bfree;
- uint64_t f_files;
- uint64_t f_ffree;
- uint64_t f_bavail;
- target_fsid_t f_fsid;
- uint32_t f_namelen;
- uint32_t f_flags;
- uint32_t f_spare[5];
-};
-#elif (defined(TARGET_PPC64) || defined(TARGET_X86_64) || \
- defined(TARGET_SPARC64) || defined(TARGET_AARCH64) || \
+ abi_uint f_type;
+ abi_uint f_bsize;
+ abi_uint f_frsize; /* Fragment size - unsupported */
+ abi_uint __pad;
+ abi_ullong f_blocks;
+ abi_ullong f_bfree;
+ abi_ullong f_files;
+ abi_ullong f_ffree;
+ abi_ullong f_bavail;
+ target_fsid_t f_fsid;
+ abi_uint f_namelen;
+ abi_uint f_flags;
+ abi_uint f_spare[5];
+};
+#elif (defined(TARGET_PPC64) || defined(TARGET_X86_64) || \
+ defined(TARGET_SPARC64) || defined(TARGET_AARCH64) || \
defined(TARGET_RISCV) || defined(TARGET_LOONGARCH64)) && \
- !defined(TARGET_ABI32)
+ !defined(TARGET_ABI32)
struct target_statfs {
- abi_long f_type;
- abi_long f_bsize;
- abi_long f_blocks;
- abi_long f_bfree;
- abi_long f_bavail;
- abi_long f_files;
- abi_long f_ffree;
- target_fsid_t f_fsid;
- abi_long f_namelen;
- abi_long f_frsize;
- abi_long f_flags;
- abi_long f_spare[4];
+ abi_long f_type;
+ abi_long f_bsize;
+ abi_long f_blocks;
+ abi_long f_bfree;
+ abi_long f_bavail;
+ abi_long f_files;
+ abi_long f_ffree;
+ target_fsid_t f_fsid;
+ abi_long f_namelen;
+ abi_long f_frsize;
+ abi_long f_flags;
+ abi_long f_spare[4];
};
struct target_statfs64 {
- abi_long f_type;
- abi_long f_bsize;
- abi_long f_blocks;
- abi_long f_bfree;
- abi_long f_bavail;
- abi_long f_files;
- abi_long f_ffree;
- target_fsid_t f_fsid;
- abi_long f_namelen;
- abi_long f_frsize;
- abi_long f_flags;
- abi_long f_spare[4];
+ abi_long f_type;
+ abi_long f_bsize;
+ abi_long f_blocks;
+ abi_long f_bfree;
+ abi_long f_bavail;
+ abi_long f_files;
+ abi_long f_ffree;
+ target_fsid_t f_fsid;
+ abi_long f_namelen;
+ abi_long f_frsize;
+ abi_long f_flags;
+ abi_long f_spare[4];
};
#elif defined(TARGET_S390X)
struct target_statfs {
- int32_t f_type;
- int32_t f_bsize;
+ abi_int f_type;
+ abi_int f_bsize;
abi_long f_blocks;
abi_long f_bfree;
abi_long f_bavail;
abi_long f_files;
abi_long f_ffree;
kernel_fsid_t f_fsid;
- int32_t f_namelen;
- int32_t f_frsize;
- int32_t f_flags;
- int32_t f_spare[4];
+ abi_int f_namelen;
+ abi_int f_frsize;
+ abi_int f_flags;
+ abi_int f_spare[4];
};
struct target_statfs64 {
- int32_t f_type;
- int32_t f_bsize;
+ abi_int f_type;
+ abi_int f_bsize;
abi_long f_blocks;
abi_long f_bfree;
abi_long f_bavail;
abi_long f_files;
abi_long f_ffree;
kernel_fsid_t f_fsid;
- int32_t f_namelen;
- int32_t f_frsize;
- int32_t f_flags;
- int32_t f_spare[4];
+ abi_int f_namelen;
+ abi_int f_frsize;
+ abi_int f_flags;
+ abi_int f_spare[4];
};
#else
struct target_statfs {
- uint32_t f_type;
- uint32_t f_bsize;
- uint32_t f_blocks;
- uint32_t f_bfree;
- uint32_t f_bavail;
- uint32_t f_files;
- uint32_t f_ffree;
- target_fsid_t f_fsid;
- uint32_t f_namelen;
- uint32_t f_frsize;
- uint32_t f_flags;
- uint32_t f_spare[4];
+ abi_uint f_type;
+ abi_uint f_bsize;
+ abi_uint f_blocks;
+ abi_uint f_bfree;
+ abi_uint f_bavail;
+ abi_uint f_files;
+ abi_uint f_ffree;
+ target_fsid_t f_fsid;
+ abi_uint f_namelen;
+ abi_uint f_frsize;
+ abi_uint f_flags;
+ abi_uint f_spare[4];
};
struct target_statfs64 {
- uint32_t f_type;
- uint32_t f_bsize;
- uint64_t f_blocks;
- uint64_t f_bfree;
- uint64_t f_bavail;
- uint64_t f_files;
- uint64_t f_ffree;
- target_fsid_t f_fsid;
- uint32_t f_namelen;
- uint32_t f_frsize;
- uint32_t f_flags;
- uint32_t f_spare[4];
+ abi_uint f_type;
+ abi_uint f_bsize;
+ abi_ullong f_blocks;
+ abi_ullong f_bfree;
+ abi_ullong f_bavail;
+ abi_ullong f_files;
+ abi_ullong f_ffree;
+ target_fsid_t f_fsid;
+ abi_uint f_namelen;
+ abi_uint f_frsize;
+ abi_uint f_flags;
+ abi_uint f_spare[4];
};
#endif
/* soundcard defines */
/* XXX: convert them all to arch independent entries */
-#define TARGET_SNDCTL_COPR_HALT TARGET_IOWR('C', 7, int);
+#define TARGET_SNDCTL_COPR_HALT TARGET_IOWR('C', 7, abi_int);
#define TARGET_SNDCTL_COPR_LOAD 0xcfb04301
#define TARGET_SNDCTL_COPR_RCODE 0xc0144303
#define TARGET_SNDCTL_COPR_RCVMSG 0x8fa44309
#define TARGET_SNDCTL_COPR_WDATA 0x40144304
#define TARGET_SNDCTL_DSP_RESET TARGET_IO('P', 0)
#define TARGET_SNDCTL_DSP_SYNC TARGET_IO('P', 1)
-#define TARGET_SNDCTL_DSP_SPEED TARGET_IOWR('P', 2, int)
-#define TARGET_SNDCTL_DSP_STEREO TARGET_IOWR('P', 3, int)
-#define TARGET_SNDCTL_DSP_GETBLKSIZE TARGET_IOWR('P', 4, int)
-#define TARGET_SNDCTL_DSP_SETFMT TARGET_IOWR('P', 5, int)
-#define TARGET_SNDCTL_DSP_CHANNELS TARGET_IOWR('P', 6, int)
-#define TARGET_SOUND_PCM_WRITE_FILTER TARGET_IOWR('P', 7, int)
+#define TARGET_SNDCTL_DSP_SPEED TARGET_IOWR('P', 2, abi_int)
+#define TARGET_SNDCTL_DSP_STEREO TARGET_IOWR('P', 3, abi_int)
+#define TARGET_SNDCTL_DSP_GETBLKSIZE TARGET_IOWR('P', 4, abi_int)
+#define TARGET_SNDCTL_DSP_SETFMT TARGET_IOWR('P', 5, abi_int)
+#define TARGET_SNDCTL_DSP_CHANNELS TARGET_IOWR('P', 6, abi_int)
+#define TARGET_SOUND_PCM_WRITE_FILTER TARGET_IOWR('P', 7, abi_int)
#define TARGET_SNDCTL_DSP_POST TARGET_IO('P', 8)
-#define TARGET_SNDCTL_DSP_SUBDIVIDE TARGET_IOWR('P', 9, int)
-#define TARGET_SNDCTL_DSP_SETFRAGMENT TARGET_IOWR('P',10, int)
-#define TARGET_SNDCTL_DSP_GETFMTS TARGET_IOR('P', 11, int)
+#define TARGET_SNDCTL_DSP_SUBDIVIDE TARGET_IOWR('P', 9, abi_int)
+#define TARGET_SNDCTL_DSP_SETFRAGMENT TARGET_IOWR('P',10, abi_int)
+#define TARGET_SNDCTL_DSP_GETFMTS TARGET_IOR('P', 11, abi_int)
#define TARGET_SNDCTL_DSP_GETOSPACE TARGET_IORU('P',12)
#define TARGET_SNDCTL_DSP_GETISPACE TARGET_IORU('P',13)
-#define TARGET_SNDCTL_DSP_GETCAPS TARGET_IOR('P', 15, int)
-#define TARGET_SNDCTL_DSP_GETTRIGGER TARGET_IOR('P',16, int)
+#define TARGET_SNDCTL_DSP_GETCAPS TARGET_IOR('P', 15, abi_int)
+#define TARGET_SNDCTL_DSP_GETTRIGGER TARGET_IOR('P',16, abi_int)
#define TARGET_SNDCTL_DSP_GETIPTR TARGET_IORU('P',17)
#define TARGET_SNDCTL_DSP_GETOPTR TARGET_IORU('P',18)
#define TARGET_SNDCTL_DSP_MAPINBUF TARGET_IORU('P', 19)
#define TARGET_SOUND_PCM_READ_FILTER 0x80045007
#define TARGET_SOUND_MIXER_INFO TARGET_IOR ('M', 101, mixer_info)
#define TARGET_SOUND_MIXER_ACCESS 0xc0804d66
-#define TARGET_SOUND_MIXER_PRIVATE1 TARGET_IOWR('M', 111, int)
-#define TARGET_SOUND_MIXER_PRIVATE2 TARGET_IOWR('M', 112, int)
-#define TARGET_SOUND_MIXER_PRIVATE3 TARGET_IOWR('M', 113, int)
-#define TARGET_SOUND_MIXER_PRIVATE4 TARGET_IOWR('M', 114, int)
-#define TARGET_SOUND_MIXER_PRIVATE5 TARGET_IOWR('M', 115, int)
-
-#define TARGET_MIXER_READ(dev) TARGET_IOR('M', dev, int)
-
-#define TARGET_SOUND_MIXER_READ_VOLUME TARGET_MIXER_READ(SOUND_MIXER_VOLUME)
-#define TARGET_SOUND_MIXER_READ_BASS TARGET_MIXER_READ(SOUND_MIXER_BASS)
-#define TARGET_SOUND_MIXER_READ_TREBLE TARGET_MIXER_READ(SOUND_MIXER_TREBLE)
-#define TARGET_SOUND_MIXER_READ_SYNTH TARGET_MIXER_READ(SOUND_MIXER_SYNTH)
-#define TARGET_SOUND_MIXER_READ_PCM TARGET_MIXER_READ(SOUND_MIXER_PCM)
-#define TARGET_SOUND_MIXER_READ_SPEAKER TARGET_MIXER_READ(SOUND_MIXER_SPEAKER)
-#define TARGET_SOUND_MIXER_READ_LINE TARGET_MIXER_READ(SOUND_MIXER_LINE)
-#define TARGET_SOUND_MIXER_READ_MIC TARGET_MIXER_READ(SOUND_MIXER_MIC)
-#define TARGET_SOUND_MIXER_READ_CD TARGET_MIXER_READ(SOUND_MIXER_CD)
-#define TARGET_SOUND_MIXER_READ_IMIX TARGET_MIXER_READ(SOUND_MIXER_IMIX)
-#define TARGET_SOUND_MIXER_READ_ALTPCM TARGET_MIXER_READ(SOUND_MIXER_ALTPCM)
-#define TARGET_SOUND_MIXER_READ_RECLEV TARGET_MIXER_READ(SOUND_MIXER_RECLEV)
-#define TARGET_SOUND_MIXER_READ_IGAIN TARGET_MIXER_READ(SOUND_MIXER_IGAIN)
-#define TARGET_SOUND_MIXER_READ_OGAIN TARGET_MIXER_READ(SOUND_MIXER_OGAIN)
-#define TARGET_SOUND_MIXER_READ_LINE1 TARGET_MIXER_READ(SOUND_MIXER_LINE1)
-#define TARGET_SOUND_MIXER_READ_LINE2 TARGET_MIXER_READ(SOUND_MIXER_LINE2)
-#define TARGET_SOUND_MIXER_READ_LINE3 TARGET_MIXER_READ(SOUND_MIXER_LINE3)
+#define TARGET_SOUND_MIXER_PRIVATE1 TARGET_IOWR('M', 111, abi_int)
+#define TARGET_SOUND_MIXER_PRIVATE2 TARGET_IOWR('M', 112, abi_int)
+#define TARGET_SOUND_MIXER_PRIVATE3 TARGET_IOWR('M', 113, abi_int)
+#define TARGET_SOUND_MIXER_PRIVATE4 TARGET_IOWR('M', 114, abi_int)
+#define TARGET_SOUND_MIXER_PRIVATE5 TARGET_IOWR('M', 115, abi_int)
+
+#define TARGET_MIXER_READ(dev) TARGET_IOR('M', dev, abi_int)
+
+#define TARGET_SOUND_MIXER_READ_VOLUME TARGET_MIXER_READ(SOUND_MIXER_VOLUME)
+#define TARGET_SOUND_MIXER_READ_BASS TARGET_MIXER_READ(SOUND_MIXER_BASS)
+#define TARGET_SOUND_MIXER_READ_TREBLE TARGET_MIXER_READ(SOUND_MIXER_TREBLE)
+#define TARGET_SOUND_MIXER_READ_SYNTH TARGET_MIXER_READ(SOUND_MIXER_SYNTH)
+#define TARGET_SOUND_MIXER_READ_PCM TARGET_MIXER_READ(SOUND_MIXER_PCM)
+#define TARGET_SOUND_MIXER_READ_SPEAKER TARGET_MIXER_READ(SOUND_MIXER_SPEAKER)
+#define TARGET_SOUND_MIXER_READ_LINE TARGET_MIXER_READ(SOUND_MIXER_LINE)
+#define TARGET_SOUND_MIXER_READ_MIC TARGET_MIXER_READ(SOUND_MIXER_MIC)
+#define TARGET_SOUND_MIXER_READ_CD TARGET_MIXER_READ(SOUND_MIXER_CD)
+#define TARGET_SOUND_MIXER_READ_IMIX TARGET_MIXER_READ(SOUND_MIXER_IMIX)
+#define TARGET_SOUND_MIXER_READ_ALTPCM TARGET_MIXER_READ(SOUND_MIXER_ALTPCM)
+#define TARGET_SOUND_MIXER_READ_RECLEV TARGET_MIXER_READ(SOUND_MIXER_RECLEV)
+#define TARGET_SOUND_MIXER_READ_IGAIN TARGET_MIXER_READ(SOUND_MIXER_IGAIN)
+#define TARGET_SOUND_MIXER_READ_OGAIN TARGET_MIXER_READ(SOUND_MIXER_OGAIN)
+#define TARGET_SOUND_MIXER_READ_LINE1 TARGET_MIXER_READ(SOUND_MIXER_LINE1)
+#define TARGET_SOUND_MIXER_READ_LINE2 TARGET_MIXER_READ(SOUND_MIXER_LINE2)
+#define TARGET_SOUND_MIXER_READ_LINE3 TARGET_MIXER_READ(SOUND_MIXER_LINE3)
/* Obsolete macros */
-#define TARGET_SOUND_MIXER_READ_MUTE TARGET_MIXER_READ(SOUND_MIXER_MUTE)
-#define TARGET_SOUND_MIXER_READ_ENHANCE TARGET_MIXER_READ(SOUND_MIXER_ENHANCE)
-#define TARGET_SOUND_MIXER_READ_LOUD TARGET_MIXER_READ(SOUND_MIXER_LOUD)
-
-#define TARGET_SOUND_MIXER_READ_RECSRC TARGET_MIXER_READ(SOUND_MIXER_RECSRC)
-#define TARGET_SOUND_MIXER_READ_DEVMASK TARGET_MIXER_READ(SOUND_MIXER_DEVMASK)
-#define TARGET_SOUND_MIXER_READ_RECMASK TARGET_MIXER_READ(SOUND_MIXER_RECMASK)
-#define TARGET_SOUND_MIXER_READ_STEREODEVS TARGET_MIXER_READ(SOUND_MIXER_STEREODEVS)
-#define TARGET_SOUND_MIXER_READ_CAPS TARGET_MIXER_READ(SOUND_MIXER_CAPS)
-
-#define TARGET_MIXER_WRITE(dev) TARGET_IOWR('M', dev, int)
-
-#define TARGET_SOUND_MIXER_WRITE_VOLUME TARGET_MIXER_WRITE(SOUND_MIXER_VOLUME)
-#define TARGET_SOUND_MIXER_WRITE_BASS TARGET_MIXER_WRITE(SOUND_MIXER_BASS)
-#define TARGET_SOUND_MIXER_WRITE_TREBLE TARGET_MIXER_WRITE(SOUND_MIXER_TREBLE)
-#define TARGET_SOUND_MIXER_WRITE_SYNTH TARGET_MIXER_WRITE(SOUND_MIXER_SYNTH)
-#define TARGET_SOUND_MIXER_WRITE_PCM TARGET_MIXER_WRITE(SOUND_MIXER_PCM)
-#define TARGET_SOUND_MIXER_WRITE_SPEAKER TARGET_MIXER_WRITE(SOUND_MIXER_SPEAKER)
-#define TARGET_SOUND_MIXER_WRITE_LINE TARGET_MIXER_WRITE(SOUND_MIXER_LINE)
-#define TARGET_SOUND_MIXER_WRITE_MIC TARGET_MIXER_WRITE(SOUND_MIXER_MIC)
-#define TARGET_SOUND_MIXER_WRITE_CD TARGET_MIXER_WRITE(SOUND_MIXER_CD)
-#define TARGET_SOUND_MIXER_WRITE_IMIX TARGET_MIXER_WRITE(SOUND_MIXER_IMIX)
-#define TARGET_SOUND_MIXER_WRITE_ALTPCM TARGET_MIXER_WRITE(SOUND_MIXER_ALTPCM)
-#define TARGET_SOUND_MIXER_WRITE_RECLEV TARGET_MIXER_WRITE(SOUND_MIXER_RECLEV)
-#define TARGET_SOUND_MIXER_WRITE_IGAIN TARGET_MIXER_WRITE(SOUND_MIXER_IGAIN)
-#define TARGET_SOUND_MIXER_WRITE_OGAIN TARGET_MIXER_WRITE(SOUND_MIXER_OGAIN)
-#define TARGET_SOUND_MIXER_WRITE_LINE1 TARGET_MIXER_WRITE(SOUND_MIXER_LINE1)
-#define TARGET_SOUND_MIXER_WRITE_LINE2 TARGET_MIXER_WRITE(SOUND_MIXER_LINE2)
-#define TARGET_SOUND_MIXER_WRITE_LINE3 TARGET_MIXER_WRITE(SOUND_MIXER_LINE3)
+#define TARGET_SOUND_MIXER_READ_MUTE TARGET_MIXER_READ(SOUND_MIXER_MUTE)
+#define TARGET_SOUND_MIXER_READ_ENHANCE TARGET_MIXER_READ(SOUND_MIXER_ENHANCE)
+#define TARGET_SOUND_MIXER_READ_LOUD TARGET_MIXER_READ(SOUND_MIXER_LOUD)
+
+#define TARGET_SOUND_MIXER_READ_RECSRC TARGET_MIXER_READ(SOUND_MIXER_RECSRC)
+#define TARGET_SOUND_MIXER_READ_DEVMASK TARGET_MIXER_READ(SOUND_MIXER_DEVMASK)
+#define TARGET_SOUND_MIXER_READ_RECMASK TARGET_MIXER_READ(SOUND_MIXER_RECMASK)
+#define TARGET_SOUND_MIXER_READ_STEREODEVS TARGET_MIXER_READ(SOUND_MIXER_STEREODEVS)
+#define TARGET_SOUND_MIXER_READ_CAPS TARGET_MIXER_READ(SOUND_MIXER_CAPS)
+
+#define TARGET_MIXER_WRITE(dev) TARGET_IOWR('M', dev, abi_int)
+
+#define TARGET_SOUND_MIXER_WRITE_VOLUME TARGET_MIXER_WRITE(SOUND_MIXER_VOLUME)
+#define TARGET_SOUND_MIXER_WRITE_BASS TARGET_MIXER_WRITE(SOUND_MIXER_BASS)
+#define TARGET_SOUND_MIXER_WRITE_TREBLE TARGET_MIXER_WRITE(SOUND_MIXER_TREBLE)
+#define TARGET_SOUND_MIXER_WRITE_SYNTH TARGET_MIXER_WRITE(SOUND_MIXER_SYNTH)
+#define TARGET_SOUND_MIXER_WRITE_PCM TARGET_MIXER_WRITE(SOUND_MIXER_PCM)
+#define TARGET_SOUND_MIXER_WRITE_SPEAKER TARGET_MIXER_WRITE(SOUND_MIXER_SPEAKER)
+#define TARGET_SOUND_MIXER_WRITE_LINE TARGET_MIXER_WRITE(SOUND_MIXER_LINE)
+#define TARGET_SOUND_MIXER_WRITE_MIC TARGET_MIXER_WRITE(SOUND_MIXER_MIC)
+#define TARGET_SOUND_MIXER_WRITE_CD TARGET_MIXER_WRITE(SOUND_MIXER_CD)
+#define TARGET_SOUND_MIXER_WRITE_IMIX TARGET_MIXER_WRITE(SOUND_MIXER_IMIX)
+#define TARGET_SOUND_MIXER_WRITE_ALTPCM TARGET_MIXER_WRITE(SOUND_MIXER_ALTPCM)
+#define TARGET_SOUND_MIXER_WRITE_RECLEV TARGET_MIXER_WRITE(SOUND_MIXER_RECLEV)
+#define TARGET_SOUND_MIXER_WRITE_IGAIN TARGET_MIXER_WRITE(SOUND_MIXER_IGAIN)
+#define TARGET_SOUND_MIXER_WRITE_OGAIN TARGET_MIXER_WRITE(SOUND_MIXER_OGAIN)
+#define TARGET_SOUND_MIXER_WRITE_LINE1 TARGET_MIXER_WRITE(SOUND_MIXER_LINE1)
+#define TARGET_SOUND_MIXER_WRITE_LINE2 TARGET_MIXER_WRITE(SOUND_MIXER_LINE2)
+#define TARGET_SOUND_MIXER_WRITE_LINE3 TARGET_MIXER_WRITE(SOUND_MIXER_LINE3)
/* Obsolete macros */
-#define TARGET_SOUND_MIXER_WRITE_MUTE TARGET_MIXER_WRITE(SOUND_MIXER_MUTE)
-#define TARGET_SOUND_MIXER_WRITE_ENHANCE TARGET_MIXER_WRITE(SOUND_MIXER_ENHANCE)
-#define TARGET_SOUND_MIXER_WRITE_LOUD TARGET_MIXER_WRITE(SOUND_MIXER_LOUD)
+#define TARGET_SOUND_MIXER_WRITE_MUTE TARGET_MIXER_WRITE(SOUND_MIXER_MUTE)
+#define TARGET_SOUND_MIXER_WRITE_ENHANCE TARGET_MIXER_WRITE(SOUND_MIXER_ENHANCE)
+#define TARGET_SOUND_MIXER_WRITE_LOUD TARGET_MIXER_WRITE(SOUND_MIXER_LOUD)
-#define TARGET_SOUND_MIXER_WRITE_RECSRC TARGET_MIXER_WRITE(SOUND_MIXER_RECSRC)
+#define TARGET_SOUND_MIXER_WRITE_RECSRC TARGET_MIXER_WRITE(SOUND_MIXER_RECSRC)
struct target_snd_timer_id {
- int dev_class;
- int dev_sclass;
- int card;
- int device;
- int subdevice;
+ abi_int dev_class;
+ abi_int dev_sclass;
+ abi_int card;
+ abi_int device;
+ abi_int subdevice;
};
struct target_snd_timer_ginfo {
struct target_snd_timer_id tid;
- unsigned int flags;
- int card;
+ abi_uint flags;
+ abi_int card;
unsigned char id[64];
unsigned char name[80];
abi_ulong reserved0;
abi_ulong resolution;
abi_ulong resolution_min;
abi_ulong resolution_max;
- unsigned int clients;
+ abi_uint clients;
unsigned char reserved[32];
};
};
struct target_snd_timer_info {
- unsigned int flags;
- int card;
+ abi_uint flags;
+ abi_int card;
unsigned char id[64];
unsigned char name[80];
abi_ulong reserved0;
struct target_snd_timer_status {
struct target_timespec tstamp;
- unsigned int resolution;
- unsigned int lost;
- unsigned int overrun;
- unsigned int queue;
+ abi_uint resolution;
+ abi_uint lost;
+ abi_uint overrun;
+ abi_uint queue;
unsigned char reserved[64];
};
/* alsa timer ioctls */
-#define TARGET_SNDRV_TIMER_IOCTL_PVERSION TARGET_IOR('T', 0x00, int)
-#define TARGET_SNDRV_TIMER_IOCTL_NEXT_DEVICE TARGET_IOWR('T', 0x01, \
- struct snd_timer_id)
-#define TARGET_SNDRV_TIMER_IOCTL_GINFO TARGET_IOWR('T', 0x03, \
- struct target_snd_timer_ginfo)
-#define TARGET_SNDRV_TIMER_IOCTL_GPARAMS TARGET_IOW('T', 0x04, \
- struct target_snd_timer_gparams)
-#define TARGET_SNDRV_TIMER_IOCTL_GSTATUS TARGET_IOWR('T', 0x05, \
- struct target_snd_timer_gstatus)
-#define TARGET_SNDRV_TIMER_IOCTL_SELECT TARGET_IOW('T', 0x10, \
- struct target_snd_timer_select)
-#define TARGET_SNDRV_TIMER_IOCTL_INFO TARGET_IOR('T', 0x11, \
- struct target_snd_timer_info)
-#define TARGET_SNDRV_TIMER_IOCTL_PARAMS TARGET_IOW('T', 0x12, \
- struct snd_timer_params)
-#define TARGET_SNDRV_TIMER_IOCTL_STATUS TARGET_IOR('T', 0x14, \
- struct target_snd_timer_status)
+#define TARGET_SNDRV_TIMER_IOCTL_PVERSION TARGET_IOR('T', 0x00, abi_int)
+#define TARGET_SNDRV_TIMER_IOCTL_NEXT_DEVICE TARGET_IOWR('T', 0x01, \
+ struct snd_timer_id)
+#define TARGET_SNDRV_TIMER_IOCTL_GINFO TARGET_IOWR('T', 0x03, \
+ struct target_snd_timer_ginfo)
+#define TARGET_SNDRV_TIMER_IOCTL_GPARAMS TARGET_IOW('T', 0x04, \
+ struct target_snd_timer_gparams)
+#define TARGET_SNDRV_TIMER_IOCTL_GSTATUS TARGET_IOWR('T', 0x05, \
+ struct target_snd_timer_gstatus)
+#define TARGET_SNDRV_TIMER_IOCTL_SELECT TARGET_IOW('T', 0x10, \
+ struct target_snd_timer_select)
+#define TARGET_SNDRV_TIMER_IOCTL_INFO TARGET_IOR('T', 0x11, \
+ struct target_snd_timer_info)
+#define TARGET_SNDRV_TIMER_IOCTL_PARAMS TARGET_IOW('T', 0x12, \
+ struct snd_timer_params)
+#define TARGET_SNDRV_TIMER_IOCTL_STATUS TARGET_IOR('T', 0x14, \
+ struct target_snd_timer_status)
#define TARGET_SNDRV_TIMER_IOCTL_START TARGET_IO('T', 0xa0)
#define TARGET_SNDRV_TIMER_IOCTL_STOP TARGET_IO('T', 0xa1)
#define TARGET_SNDRV_TIMER_IOCTL_CONTINUE TARGET_IO('T', 0xa2)
abi_ulong bufferram; /* Memory used by buffers */
abi_ulong totalswap; /* Total swap space size */
abi_ulong freeswap; /* swap space still available */
- unsigned short procs; /* Number of current processes */
- unsigned short pad; /* explicit padding for m68k */
+ abi_ushort procs; /* Number of current processes */
+ abi_ushort pad; /* explicit padding for m68k */
abi_ulong totalhigh; /* Total high memory size */
abi_ulong freehigh; /* Available high memory size */
- unsigned int mem_unit; /* Memory unit size in bytes */
+ abi_uint mem_unit; /* Memory unit size in bytes */
char _f[20-2*sizeof(abi_long)-sizeof(int)]; /* Padding: libc5 uses this.. */
};
};
struct target_drm_version {
- int version_major;
- int version_minor;
- int version_patchlevel;
+ abi_int version_major;
+ abi_int version_minor;
+ abi_int version_patchlevel;
abi_ulong name_len;
abi_ulong name;
abi_ulong date_len;
};
struct target_drm_i915_getparam {
- int param;
+ abi_int param;
abi_ulong value;
};
#endif
struct target_ucred {
- uint32_t pid;
- uint32_t uid;
- uint32_t gid;
+ abi_uint pid;
+ abi_uint uid;
+ abi_uint gid;
};
-typedef int32_t target_timer_t;
+typedef abi_int target_timer_t;
#define TARGET_SIGEV_MAX_SIZE 64
/* This is architecture-specific but most architectures use the default */
#ifdef TARGET_MIPS
-#define TARGET_SIGEV_PREAMBLE_SIZE (sizeof(int32_t) * 2 + sizeof(abi_long))
+#define TARGET_SIGEV_PREAMBLE_SIZE (sizeof(abi_int) * 2 + sizeof(abi_long))
#else
-#define TARGET_SIGEV_PREAMBLE_SIZE (sizeof(int32_t) * 2 \
+#define TARGET_SIGEV_PREAMBLE_SIZE (sizeof(abi_int) * 2 \
+ sizeof(target_sigval_t))
#endif
-#define TARGET_SIGEV_PAD_SIZE ((TARGET_SIGEV_MAX_SIZE \
- - TARGET_SIGEV_PREAMBLE_SIZE) \
- / sizeof(int32_t))
+#define TARGET_SIGEV_PAD_SIZE ((TARGET_SIGEV_MAX_SIZE \
+ - TARGET_SIGEV_PREAMBLE_SIZE) \
+ / sizeof(abi_int))
struct target_sigevent {
target_sigval_t sigev_value;
};
struct target_user_cap_header {
- uint32_t version;
- int pid;
+ abi_uint version;
+ abi_int pid;
};
struct target_user_cap_data {
- uint32_t effective;
- uint32_t permitted;
- uint32_t inheritable;
+ abi_uint effective;
+ abi_uint permitted;
+ abi_uint inheritable;
};
/* from kernel's include/linux/syslog.h */
#define TARGET_SYSLOG_ACTION_SIZE_BUFFER 10
struct target_statx_timestamp {
- int64_t tv_sec;
- uint32_t tv_nsec;
- int32_t __reserved;
+ abi_llong tv_sec;
+ abi_uint tv_nsec;
+ abi_int __reserved;
};
struct target_statx {
- /* 0x00 */
- uint32_t stx_mask; /* What results were written [uncond] */
- uint32_t stx_blksize; /* Preferred general I/O size [uncond] */
- uint64_t stx_attributes; /* Flags conveying information about the file */
- /* 0x10 */
- uint32_t stx_nlink; /* Number of hard links */
- uint32_t stx_uid; /* User ID of owner */
- uint32_t stx_gid; /* Group ID of owner */
- uint16_t stx_mode; /* File mode */
- uint16_t __spare0[1];
- /* 0x20 */
- uint64_t stx_ino; /* Inode number */
- uint64_t stx_size; /* File size */
- uint64_t stx_blocks; /* Number of 512-byte blocks allocated */
- uint64_t stx_attributes_mask; /* Mask to show what is supported */
- /* 0x40 */
- struct target_statx_timestamp stx_atime; /* Last access time */
- struct target_statx_timestamp stx_btime; /* File creation time */
- struct target_statx_timestamp stx_ctime; /* Last attribute change time */
- struct target_statx_timestamp stx_mtime; /* Last data modification time */
- /* 0x80 */
- uint32_t stx_rdev_major; /* Device ID of special file [if bdev/cdev] */
- uint32_t stx_rdev_minor;
- uint32_t stx_dev_major; /* ID of device containing file [uncond] */
- uint32_t stx_dev_minor;
- /* 0x90 */
- uint64_t __spare2[14]; /* Spare space for future expansion */
- /* 0x100 */
+ /* 0x00 */
+ abi_uint stx_mask; /* What results were written [uncond] */
+ abi_uint stx_blksize; /* Preferred general I/O size [uncond] */
+ abi_ullong stx_attributes; /* Flags conveying information about the file */
+ /* 0x10 */
+ abi_uint stx_nlink; /* Number of hard links */
+ abi_uint stx_uid; /* User ID of owner */
+ abi_uint stx_gid; /* Group ID of owner */
+ uint16_t stx_mode; /* File mode */
+ uint16_t __spare0[1];
+ /* 0x20 */
+ abi_ullong stx_ino; /* Inode number */
+ abi_ullong stx_size; /* File size */
+ abi_ullong stx_blocks; /* Number of 512-byte blocks allocated */
+ abi_ullong stx_attributes_mask; /* Mask to show what is supported */
+ /* 0x40 */
+ struct target_statx_timestamp stx_atime; /* Last access time */
+ struct target_statx_timestamp stx_btime; /* File creation time */
+ struct target_statx_timestamp stx_ctime; /* Last attribute change time */
+ struct target_statx_timestamp stx_mtime; /* Last data modification time */
+ /* 0x80 */
+ abi_uint stx_rdev_major; /* Device ID of special file [if bdev/cdev] */
+ abi_uint stx_rdev_minor;
+ abi_uint stx_dev_major; /* ID of device containing file [uncond] */
+ abi_uint stx_dev_minor;
+ /* 0x90 */
+ abi_ullong __spare2[14]; /* Spare space for future expansion */
+ /* 0x100 */
};
/* from kernel's include/linux/sched/types.h */
int target_mprotect(abi_ulong start, abi_ulong len, int prot);
abi_long target_mmap(abi_ulong start, abi_ulong len, int prot,
- int flags, int fd, abi_ulong offset);
+ int flags, int fd, off_t offset);
int target_munmap(abi_ulong start, abi_ulong len);
abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
abi_ulong new_size, unsigned long flags,
+#ifndef XTENSA_TARGET_MMAN_H
+#define XTENSA_TARGET_MMAN_H
+
+#define TARGET_PROT_SEM 0x10
+
+#define TARGET_MAP_NORESERVE 0x0400
+#define TARGET_MAP_ANONYMOUS 0x0800
+#define TARGET_MAP_GROWSDOWN 0x1000
+#define TARGET_MAP_DENYWRITE 0x2000
+#define TARGET_MAP_EXECUTABLE 0x4000
+#define TARGET_MAP_LOCKED 0x8000
+#define TARGET_MAP_POPULATE 0x10000
+#define TARGET_MAP_NONBLOCK 0x20000
+#define TARGET_MAP_STACK 0x40000
+#define TARGET_MAP_HUGETLB 0x80000
+
#include "../generic/target_mman.h"
+
+#endif
cc = meson.get_compiler('c')
all_languages = ['c']
-if add_languages('cpp', required: false, native: false)
+if targetos == 'windows' and add_languages('cpp', required: false, native: false)
all_languages += ['cpp']
cxx = meson.get_compiler('cpp')
endif
bsd_oses = ['gnu/kfreebsd', 'freebsd', 'netbsd', 'openbsd', 'dragonfly', 'darwin']
supported_oses = ['windows', 'freebsd', 'netbsd', 'openbsd', 'darwin', 'sunos', 'linux']
-supported_cpus = ['ppc', 'ppc64', 's390x', 'riscv', 'x86', 'x86_64',
+supported_cpus = ['ppc', 'ppc64', 's390x', 'riscv32', 'riscv64', 'x86', 'x86_64',
'arm', 'aarch64', 'loongarch64', 'mips', 'mips64', 'sparc64']
cpu = host_machine.cpu_family()
-# Unify riscv* to a single family.
-if cpu in ['riscv32', 'riscv64']
- cpu = 'riscv'
-endif
-
target_dirs = config_host['TARGET_DIRS'].split()
have_linux_user = false
have_bsd_user = false
host_arch = 'i386'
elif cpu == 'mips64'
host_arch = 'mips'
+elif cpu in ['riscv32', 'riscv64']
+ host_arch = 'riscv'
else
host_arch = cpu
endif
kvm_targets = ['ppc-softmmu', 'ppc64-softmmu']
elif cpu in ['mips', 'mips64']
kvm_targets = ['mips-softmmu', 'mipsel-softmmu', 'mips64-softmmu', 'mips64el-softmmu']
-elif cpu in ['riscv']
- kvm_targets = ['riscv32-softmmu', 'riscv64-softmmu']
+elif cpu in ['riscv32']
+ kvm_targets = ['riscv32-softmmu']
+elif cpu in ['riscv64']
+ kvm_targets = ['riscv64-softmmu']
else
kvm_targets = []
endif
summary_info += {'ALSA support': alsa}
summary_info += {'PulseAudio support': pulse}
endif
-summary_info += {'Pipewire support': pipewire}
+summary_info += {'PipeWire support': pipewire}
summary_info += {'JACK support': jack}
summary(summary_info, bool_yn: true, section: 'Audio backends')
option('pa', type: 'feature', value: 'auto',
description: 'PulseAudio sound support')
option('pipewire', type: 'feature', value: 'auto',
- description: 'Pipewire sound support')
+ description: 'PipeWire sound support')
option('sndio', type: 'feature', value: 'auto',
description: 'sndio sound support')
/*
* We don't support postcopy with some type of ramblocks.
*
- * NOTE: we explicitly ignored ramblock_is_ignored() instead we checked
+ * NOTE: we explicitly ignored migrate_ram_is_ignored() instead we checked
* all possible ramblocks. This is because this function can be called
* when creating the migration object, during the phase RAM_MIGRATABLE
* is not even properly set for all the ramblocks.
return migrate_postcopy_preempt() && migration_in_postcopy();
}
-bool ramblock_is_ignored(RAMBlock *block)
+bool migrate_ram_is_ignored(RAMBlock *block)
{
return !qemu_ram_is_migratable(block) ||
(migrate_ignore_shared() && qemu_ram_is_shared(block)
unsigned long size = rb->used_length >> TARGET_PAGE_BITS;
unsigned long *bitmap = rb->bmap;
- if (ramblock_is_ignored(rb)) {
+ if (migrate_ram_is_ignored(rb)) {
/* Points directly to the end, so we know no dirty page */
pss->page = size;
return;
*num = 0;
- if (ramblock_is_ignored(rb)) {
+ if (migrate_ram_is_ignored(rb)) {
return size;
}
unsigned long start_page = pss->page;
int res;
- if (ramblock_is_ignored(pss->block)) {
+ if (migrate_ram_is_ignored(pss->block)) {
error_report("block %s should not be migrated !", pss->block->idstr);
return 0;
}
return NULL;
}
- if (ramblock_is_ignored(block)) {
+ if (migrate_ram_is_ignored(block)) {
error_report("block %s should not be migrated !", id);
return NULL;
}
}
if (migrate_ignore_shared()) {
hwaddr addr = qemu_get_be64(f);
- if (ramblock_is_ignored(block) &&
+ if (migrate_ram_is_ignored(block) &&
block->mr->addr != addr) {
error_report("Mismatched GPAs for block %s "
"%" PRId64 "!= %" PRId64,
RAMBlock *rb = qemu_ram_block_from_host(host, false, &offset);
Error *err = NULL;
- if (ramblock_is_ignored(rb)) {
+ if (migrate_ram_is_ignored(rb)) {
return;
}
extern XBZRLECacheStats xbzrle_counters;
extern CompressionStats compression_counters;
-bool ramblock_is_ignored(RAMBlock *block);
/* Should be holding either ram_list.mutex, or the RCU lock. */
#define RAMBLOCK_FOREACH_NOT_IGNORED(block) \
INTERNAL_RAMBLOCK_FOREACH(block) \
- if (ramblock_is_ignored(block)) {} else
+ if (migrate_ram_is_ignored(block)) {} else
#define RAMBLOCK_FOREACH_MIGRATABLE(block) \
INTERNAL_RAMBLOCK_FOREACH(block) \
/*
- * Copyright (C) 2016-2019 Red Hat, Inc.
+ * Copyright Red Hat
* Copyright (C) 2005 Anthony Liguori <anthony@codemonkey.ws>
*
* Network Block Device Client Side
Error **errp)
{
int ret;
- uint32_t export_len = strlen(export);
+ uint32_t export_len;
uint32_t queries = !!query;
uint32_t query_len = 0;
uint32_t data_len;
char *data;
char *p;
+ assert(strnlen(export, NBD_MAX_STRING_SIZE + 1) <= NBD_MAX_STRING_SIZE);
+ export_len = strlen(export);
data_len = sizeof(export_len) + export_len + sizeof(queries);
- assert(export_len <= NBD_MAX_STRING_SIZE);
if (query) {
+ assert(strnlen(query, NBD_MAX_STRING_SIZE + 1) <= NBD_MAX_STRING_SIZE);
query_len = strlen(query);
data_len += sizeof(query_len) + query_len;
- assert(query_len <= NBD_MAX_STRING_SIZE);
} else {
assert(opt == NBD_OPT_LIST_META_CONTEXT);
}
* Start the handshake to the server. After a positive return, the server
* is ready to accept additional NBD_OPT requests.
* Returns: negative errno: failure talking to server
- * 0: server is oldstyle, must call nbd_negotiate_finish_oldstyle
- * 1: server is newstyle, but can only accept EXPORT_NAME
- * 2: server is newstyle, but lacks structured replies
- * 3: server is newstyle and set up for structured replies
+ * non-negative: enum NBDMode describing server abilities
*/
static int nbd_start_negotiate(AioContext *aio_context, QIOChannel *ioc,
QCryptoTLSCreds *tlscreds,
return -EINVAL;
}
}
- return 2 + result;
+ return result ? NBD_MODE_STRUCTURED : NBD_MODE_SIMPLE;
} else {
- return 1;
+ return NBD_MODE_EXPORT_NAME;
}
} else if (magic == NBD_CLIENT_MAGIC) {
if (tlscreds) {
error_setg(errp, "Server does not support STARTTLS");
return -EINVAL;
}
- return 0;
+ return NBD_MODE_OLDSTYLE;
} else {
error_setg(errp, "Bad server magic received: 0x%" PRIx64, magic);
return -EINVAL;
result = nbd_start_negotiate(aio_context, ioc, tlscreds, hostname, outioc,
info->structured_reply, &zeroes, errp);
+ if (result < 0) {
+ return result;
+ }
info->structured_reply = false;
info->base_allocation = false;
ioc = *outioc;
}
- switch (result) {
- case 3: /* newstyle, with structured replies */
+ switch ((NBDMode)result) {
+ case NBD_MODE_STRUCTURED:
info->structured_reply = true;
if (base_allocation) {
result = nbd_negotiate_simple_meta_context(ioc, info, errp);
info->base_allocation = result == 1;
}
/* fall through */
- case 2: /* newstyle, try OPT_GO */
+ case NBD_MODE_SIMPLE:
/* Try NBD_OPT_GO first - if it works, we are done (it
* also gives us a good message if the server requires
* TLS). If it is not available, fall back to
return -EINVAL;
}
/* fall through */
- case 1: /* newstyle, but limited to EXPORT_NAME */
+ case NBD_MODE_EXPORT_NAME:
/* write the export name request */
if (nbd_send_option_request(ioc, NBD_OPT_EXPORT_NAME, -1, info->name,
errp) < 0) {
return -EINVAL;
}
break;
- case 0: /* oldstyle, parse length and flags */
+ case NBD_MODE_OLDSTYLE:
if (*info->name) {
error_setg(errp, "Server does not support non-empty export names");
return -EINVAL;
}
break;
default:
- return result;
+ g_assert_not_reached();
}
trace_nbd_receive_negotiate_size_flags(info->size, info->flags);
if (tlscreds && sioc) {
ioc = sioc;
}
+ if (result < 0) {
+ goto out;
+ }
- switch (result) {
- case 2:
- case 3:
+ switch ((NBDMode)result) {
+ case NBD_MODE_SIMPLE:
+ case NBD_MODE_STRUCTURED:
/* newstyle - use NBD_OPT_LIST to populate array, then try
* NBD_OPT_INFO on each array member. If structured replies
* are enabled, also try NBD_OPT_LIST_META_CONTEXT. */
memset(&array[count - 1], 0, sizeof(*array));
array[count - 1].name = name;
array[count - 1].description = desc;
- array[count - 1].structured_reply = result == 3;
+ array[count - 1].structured_reply = result == NBD_MODE_STRUCTURED;
}
for (i = 0; i < count; i++) {
break;
}
- if (result == 3 &&
+ if (result == NBD_MODE_STRUCTURED &&
nbd_list_meta_contexts(ioc, &array[i], errp) < 0) {
goto out;
}
/* Send NBD_OPT_ABORT as a courtesy before hanging up */
nbd_send_opt_abort(ioc);
break;
- case 1: /* newstyle, but limited to EXPORT_NAME */
+ case NBD_MODE_EXPORT_NAME:
error_setg(errp, "Server does not support export lists");
/* We can't even send NBD_OPT_ABORT, so merely hang up */
goto out;
- case 0: /* oldstyle, parse length and flags */
+ case NBD_MODE_OLDSTYLE:
+ /* Lone export name is implied, but we can parse length and flags */
array = g_new0(NBDExportInfo, 1);
array->name = g_strdup("");
count = 1;
}
break;
default:
- goto out;
+ g_assert_not_reached();
}
*info = array;
{
uint8_t buf[NBD_REQUEST_SIZE];
- trace_nbd_send_request(request->from, request->len, request->handle,
+ trace_nbd_send_request(request->from, request->len, request->cookie,
request->flags, request->type,
nbd_cmd_lookup(request->type));
stl_be_p(buf, NBD_REQUEST_MAGIC);
stw_be_p(buf + 4, request->flags);
stw_be_p(buf + 6, request->type);
- stq_be_p(buf + 8, request->handle);
+ stq_be_p(buf + 8, request->cookie);
stq_be_p(buf + 16, request->from);
stl_be_p(buf + 24, request->len);
}
reply->error = be32_to_cpu(reply->error);
- reply->handle = be64_to_cpu(reply->handle);
+ reply->cookie = be64_to_cpu(reply->cookie);
return 0;
}
chunk->flags = be16_to_cpu(chunk->flags);
chunk->type = be16_to_cpu(chunk->type);
- chunk->handle = be64_to_cpu(chunk->handle);
+ chunk->cookie = be64_to_cpu(chunk->cookie);
chunk->length = be32_to_cpu(chunk->length);
+ /*
+ * Because we use BLOCK_STATUS with REQ_ONE, and cap READ requests
+ * at 32M, no valid server should send us payload larger than
+ * this. Even if we stopped using REQ_ONE, sane servers will cap
+ * the number of extents they return for block status.
+ */
+ if (chunk->length > NBD_MAX_BUFFER_SIZE + sizeof(NBDStructuredReadData)) {
+ error_setg(errp, "server chunk %" PRIu32 " (%s) payload is too long",
+ chunk->type, nbd_rep_lookup(chunk->type));
+ return -EINVAL;
+ }
+
return 0;
}
}
trace_nbd_receive_simple_reply(reply->simple.error,
nbd_err_lookup(reply->simple.error),
- reply->handle);
+ reply->cookie);
break;
case NBD_STRUCTURED_REPLY_MAGIC:
ret = nbd_receive_structured_reply_chunk(ioc, &reply->structured, errp);
type = nbd_reply_type_lookup(reply->structured.type);
trace_nbd_receive_structured_reply_chunk(reply->structured.flags,
reply->structured.type, type,
- reply->structured.handle,
+ reply->structured.cookie,
reply->structured.length);
break;
default:
}
return ret;
}
+
+
+const char *nbd_mode_lookup(NBDMode mode)
+{
+ switch (mode) {
+ case NBD_MODE_OLDSTYLE:
+ return "oldstyle";
+ case NBD_MODE_EXPORT_NAME:
+ return "export name only";
+ case NBD_MODE_SIMPLE:
+ return "simple headers";
+ case NBD_MODE_STRUCTURED:
+ return "structured replies";
+ default:
+ return "<unknown>";
+ }
+}
/*
- * Copyright (C) 2016-2022 Red Hat, Inc.
+ * Copyright Red Hat
* Copyright (C) 2005 Anthony Liguori <anthony@codemonkey.ws>
*
* Network Block Device Server Side
[ 0 .. 3] magic (NBD_REQUEST_MAGIC)
[ 4 .. 5] flags (NBD_CMD_FLAG_FUA, ...)
[ 6 .. 7] type (NBD_CMD_READ, ...)
- [ 8 .. 15] handle
+ [ 8 .. 15] cookie
[16 .. 23] from
[24 .. 27] len
*/
magic = ldl_be_p(buf);
request->flags = lduw_be_p(buf + 4);
request->type = lduw_be_p(buf + 6);
- request->handle = ldq_be_p(buf + 8);
+ request->cookie = ldq_be_p(buf + 8);
request->from = ldq_be_p(buf + 16);
request->len = ldl_be_p(buf + 24);
}
static inline void set_be_simple_reply(NBDSimpleReply *reply, uint64_t error,
- uint64_t handle)
+ uint64_t cookie)
{
stl_be_p(&reply->magic, NBD_SIMPLE_REPLY_MAGIC);
stl_be_p(&reply->error, error);
- stq_be_p(&reply->handle, handle);
+ stq_be_p(&reply->cookie, cookie);
}
static int coroutine_fn nbd_co_send_simple_reply(NBDClient *client,
- uint64_t handle,
+ NBDRequest *request,
uint32_t error,
void *data,
size_t len,
{.iov_base = data, .iov_len = len}
};
- trace_nbd_co_send_simple_reply(handle, nbd_err, nbd_err_lookup(nbd_err),
- len);
- set_be_simple_reply(&reply, nbd_err, handle);
+ assert(!len || !nbd_err);
+ assert(!client->structured_reply || request->type != NBD_CMD_READ);
+ trace_nbd_co_send_simple_reply(request->cookie, nbd_err,
+ nbd_err_lookup(nbd_err), len);
+ set_be_simple_reply(&reply, nbd_err, request->cookie);
- return nbd_co_send_iov(client, iov, len ? 2 : 1, errp);
+ return nbd_co_send_iov(client, iov, 2, errp);
}
-static inline void set_be_chunk(NBDStructuredReplyChunk *chunk, uint16_t flags,
- uint16_t type, uint64_t handle, uint32_t length)
+/*
+ * Prepare the header of a reply chunk for network transmission.
+ *
+ * On input, @iov is partially initialized: iov[0].iov_base must point
+ * to an uninitialized NBDReply, while the remaining @niov elements
+ * (if any) must be ready for transmission. This function then
+ * populates iov[0] for transmission.
+ */
+static inline void set_be_chunk(NBDClient *client, struct iovec *iov,
+ size_t niov, uint16_t flags, uint16_t type,
+ NBDRequest *request)
{
+ /* TODO - handle structured vs. extended replies */
+ NBDStructuredReplyChunk *chunk = iov->iov_base;
+ size_t i, length = 0;
+
+ for (i = 1; i < niov; i++) {
+ length += iov[i].iov_len;
+ }
+ assert(length <= NBD_MAX_BUFFER_SIZE + sizeof(NBDStructuredReadData));
+
+ iov[0].iov_len = sizeof(*chunk);
stl_be_p(&chunk->magic, NBD_STRUCTURED_REPLY_MAGIC);
stw_be_p(&chunk->flags, flags);
stw_be_p(&chunk->type, type);
- stq_be_p(&chunk->handle, handle);
+ stq_be_p(&chunk->cookie, request->cookie);
stl_be_p(&chunk->length, length);
}
-static int coroutine_fn nbd_co_send_structured_done(NBDClient *client,
- uint64_t handle,
- Error **errp)
+static int coroutine_fn nbd_co_send_chunk_done(NBDClient *client,
+ NBDRequest *request,
+ Error **errp)
{
- NBDStructuredReplyChunk chunk;
+ NBDReply hdr;
struct iovec iov[] = {
- {.iov_base = &chunk, .iov_len = sizeof(chunk)},
+ {.iov_base = &hdr},
};
- trace_nbd_co_send_structured_done(handle);
- set_be_chunk(&chunk, NBD_REPLY_FLAG_DONE, NBD_REPLY_TYPE_NONE, handle, 0);
-
+ trace_nbd_co_send_chunk_done(request->cookie);
+ set_be_chunk(client, iov, 1, NBD_REPLY_FLAG_DONE,
+ NBD_REPLY_TYPE_NONE, request);
return nbd_co_send_iov(client, iov, 1, errp);
}
-static int coroutine_fn nbd_co_send_structured_read(NBDClient *client,
- uint64_t handle,
- uint64_t offset,
- void *data,
- size_t size,
- bool final,
- Error **errp)
+static int coroutine_fn nbd_co_send_chunk_read(NBDClient *client,
+ NBDRequest *request,
+ uint64_t offset,
+ void *data,
+ size_t size,
+ bool final,
+ Error **errp)
{
+ NBDReply hdr;
NBDStructuredReadData chunk;
struct iovec iov[] = {
+ {.iov_base = &hdr},
{.iov_base = &chunk, .iov_len = sizeof(chunk)},
{.iov_base = data, .iov_len = size}
};
assert(size);
- trace_nbd_co_send_structured_read(handle, offset, data, size);
- set_be_chunk(&chunk.h, final ? NBD_REPLY_FLAG_DONE : 0,
- NBD_REPLY_TYPE_OFFSET_DATA, handle,
- sizeof(chunk) - sizeof(chunk.h) + size);
+ trace_nbd_co_send_chunk_read(request->cookie, offset, data, size);
+ set_be_chunk(client, iov, 3, final ? NBD_REPLY_FLAG_DONE : 0,
+ NBD_REPLY_TYPE_OFFSET_DATA, request);
stq_be_p(&chunk.offset, offset);
- return nbd_co_send_iov(client, iov, 2, errp);
+ return nbd_co_send_iov(client, iov, 3, errp);
}
-
-static int coroutine_fn nbd_co_send_structured_error(NBDClient *client,
- uint64_t handle,
- uint32_t error,
- const char *msg,
- Error **errp)
+/*ebb*/
+static int coroutine_fn nbd_co_send_chunk_error(NBDClient *client,
+ NBDRequest *request,
+ uint32_t error,
+ const char *msg,
+ Error **errp)
{
+ NBDReply hdr;
NBDStructuredError chunk;
int nbd_err = system_errno_to_nbd_errno(error);
struct iovec iov[] = {
+ {.iov_base = &hdr},
{.iov_base = &chunk, .iov_len = sizeof(chunk)},
{.iov_base = (char *)msg, .iov_len = msg ? strlen(msg) : 0},
};
assert(nbd_err);
- trace_nbd_co_send_structured_error(handle, nbd_err,
- nbd_err_lookup(nbd_err), msg ? msg : "");
- set_be_chunk(&chunk.h, NBD_REPLY_FLAG_DONE, NBD_REPLY_TYPE_ERROR, handle,
- sizeof(chunk) - sizeof(chunk.h) + iov[1].iov_len);
+ trace_nbd_co_send_chunk_error(request->cookie, nbd_err,
+ nbd_err_lookup(nbd_err), msg ? msg : "");
+ set_be_chunk(client, iov, 3, NBD_REPLY_FLAG_DONE,
+ NBD_REPLY_TYPE_ERROR, request);
stl_be_p(&chunk.error, nbd_err);
- stw_be_p(&chunk.message_length, iov[1].iov_len);
+ stw_be_p(&chunk.message_length, iov[2].iov_len);
- return nbd_co_send_iov(client, iov, 1 + !!iov[1].iov_len, errp);
+ return nbd_co_send_iov(client, iov, 3, errp);
}
/* Do a sparse read and send the structured reply to the client.
* reported to the client, at which point this function succeeds.
*/
static int coroutine_fn nbd_co_send_sparse_read(NBDClient *client,
- uint64_t handle,
+ NBDRequest *request,
uint64_t offset,
uint8_t *data,
size_t size,
char *msg = g_strdup_printf("unable to check for holes: %s",
strerror(-status));
- ret = nbd_co_send_structured_error(client, handle, -status, msg,
- errp);
+ ret = nbd_co_send_chunk_error(client, request, -status, msg, errp);
g_free(msg);
return ret;
}
assert(pnum && pnum <= size - progress);
final = progress + pnum == size;
if (status & BDRV_BLOCK_ZERO) {
+ NBDReply hdr;
NBDStructuredReadHole chunk;
struct iovec iov[] = {
+ {.iov_base = &hdr},
{.iov_base = &chunk, .iov_len = sizeof(chunk)},
};
- trace_nbd_co_send_structured_read_hole(handle, offset + progress,
- pnum);
- set_be_chunk(&chunk.h, final ? NBD_REPLY_FLAG_DONE : 0,
- NBD_REPLY_TYPE_OFFSET_HOLE,
- handle, sizeof(chunk) - sizeof(chunk.h));
+ trace_nbd_co_send_chunk_read_hole(request->cookie,
+ offset + progress, pnum);
+ set_be_chunk(client, iov, 2,
+ final ? NBD_REPLY_FLAG_DONE : 0,
+ NBD_REPLY_TYPE_OFFSET_HOLE, request);
stq_be_p(&chunk.offset, offset + progress);
stl_be_p(&chunk.length, pnum);
- ret = nbd_co_send_iov(client, iov, 1, errp);
+ ret = nbd_co_send_iov(client, iov, 2, errp);
} else {
ret = blk_co_pread(exp->common.blk, offset + progress, pnum,
data + progress, 0);
error_setg_errno(errp, -ret, "reading from file failed");
break;
}
- ret = nbd_co_send_structured_read(client, handle, offset + progress,
- data + progress, pnum, final,
- errp);
+ ret = nbd_co_send_chunk_read(client, request, offset + progress,
+ data + progress, pnum, final, errp);
}
if (ret < 0) {
* @last controls whether NBD_REPLY_FLAG_DONE is sent.
*/
static int coroutine_fn
-nbd_co_send_extents(NBDClient *client, uint64_t handle, NBDExtentArray *ea,
+nbd_co_send_extents(NBDClient *client, NBDRequest *request, NBDExtentArray *ea,
bool last, uint32_t context_id, Error **errp)
{
+ NBDReply hdr;
NBDStructuredMeta chunk;
struct iovec iov[] = {
+ {.iov_base = &hdr},
{.iov_base = &chunk, .iov_len = sizeof(chunk)},
{.iov_base = ea->extents, .iov_len = ea->count * sizeof(ea->extents[0])}
};
nbd_extent_array_convert_to_be(ea);
- trace_nbd_co_send_extents(handle, ea->count, context_id, ea->total_length,
- last);
- set_be_chunk(&chunk.h, last ? NBD_REPLY_FLAG_DONE : 0,
- NBD_REPLY_TYPE_BLOCK_STATUS,
- handle, sizeof(chunk) - sizeof(chunk.h) + iov[1].iov_len);
+ trace_nbd_co_send_extents(request->cookie, ea->count, context_id,
+ ea->total_length, last);
+ set_be_chunk(client, iov, 3, last ? NBD_REPLY_FLAG_DONE : 0,
+ NBD_REPLY_TYPE_BLOCK_STATUS, request);
stl_be_p(&chunk.context_id, context_id);
- return nbd_co_send_iov(client, iov, 2, errp);
+ return nbd_co_send_iov(client, iov, 3, errp);
}
/* Get block status from the exported device and send it to the client */
static int
-coroutine_fn nbd_co_send_block_status(NBDClient *client, uint64_t handle,
+coroutine_fn nbd_co_send_block_status(NBDClient *client, NBDRequest *request,
BlockBackend *blk, uint64_t offset,
uint32_t length, bool dont_fragment,
bool last, uint32_t context_id,
ret = blockalloc_to_extents(blk, offset, length, ea);
}
if (ret < 0) {
- return nbd_co_send_structured_error(
- client, handle, -ret, "can't get block status", errp);
+ return nbd_co_send_chunk_error(client, request, -ret,
+ "can't get block status", errp);
}
- return nbd_co_send_extents(client, handle, ea, last, context_id, errp);
+ return nbd_co_send_extents(client, request, ea, last, context_id, errp);
}
/* Populate @ea from a dirty bitmap. */
bdrv_dirty_bitmap_unlock(bitmap);
}
-static int coroutine_fn nbd_co_send_bitmap(NBDClient *client, uint64_t handle,
- BdrvDirtyBitmap *bitmap, uint64_t offset,
- uint32_t length, bool dont_fragment, bool last,
- uint32_t context_id, Error **errp)
+static int coroutine_fn nbd_co_send_bitmap(NBDClient *client,
+ NBDRequest *request,
+ BdrvDirtyBitmap *bitmap,
+ uint64_t offset,
+ uint32_t length, bool dont_fragment,
+ bool last, uint32_t context_id,
+ Error **errp)
{
unsigned int nb_extents = dont_fragment ? 1 : NBD_MAX_BLOCK_STATUS_EXTENTS;
g_autoptr(NBDExtentArray) ea = nbd_extent_array_new(nb_extents);
bitmap_to_extents(bitmap, offset, length, ea);
- return nbd_co_send_extents(client, handle, ea, last, context_id, errp);
+ return nbd_co_send_extents(client, request, ea, last, context_id, errp);
}
/* nbd_co_receive_request
return ret;
}
- trace_nbd_co_receive_request_decode_type(request->handle, request->type,
+ trace_nbd_co_receive_request_decode_type(request->cookie, request->type,
nbd_cmd_lookup(request->type));
if (request->type != NBD_CMD_WRITE) {
}
req->complete = true;
- trace_nbd_co_receive_request_payload_received(request->handle,
+ trace_nbd_co_receive_request_payload_received(request->cookie,
request->len);
}
* Returns 0 if connection is still live, -errno on failure to talk to client
*/
static coroutine_fn int nbd_send_generic_reply(NBDClient *client,
- uint64_t handle,
+ NBDRequest *request,
int ret,
const char *error_msg,
Error **errp)
{
if (client->structured_reply && ret < 0) {
- return nbd_co_send_structured_error(client, handle, -ret, error_msg,
- errp);
+ return nbd_co_send_chunk_error(client, request, -ret, error_msg, errp);
} else {
- return nbd_co_send_simple_reply(client, handle, ret < 0 ? -ret : 0,
+ return nbd_co_send_simple_reply(client, request, ret < 0 ? -ret : 0,
NULL, 0, errp);
}
}
if (request->flags & NBD_CMD_FLAG_FUA) {
ret = blk_co_flush(exp->common.blk);
if (ret < 0) {
- return nbd_send_generic_reply(client, request->handle, ret,
+ return nbd_send_generic_reply(client, request, ret,
"flush failed", errp);
}
}
if (client->structured_reply && !(request->flags & NBD_CMD_FLAG_DF) &&
request->len)
{
- return nbd_co_send_sparse_read(client, request->handle, request->from,
+ return nbd_co_send_sparse_read(client, request, request->from,
data, request->len, errp);
}
ret = blk_co_pread(exp->common.blk, request->from, request->len, data, 0);
if (ret < 0) {
- return nbd_send_generic_reply(client, request->handle, ret,
+ return nbd_send_generic_reply(client, request, ret,
"reading from file failed", errp);
}
if (client->structured_reply) {
if (request->len) {
- return nbd_co_send_structured_read(client, request->handle,
- request->from, data,
- request->len, true, errp);
+ return nbd_co_send_chunk_read(client, request, request->from, data,
+ request->len, true, errp);
} else {
- return nbd_co_send_structured_done(client, request->handle, errp);
+ return nbd_co_send_chunk_done(client, request, errp);
}
} else {
- return nbd_co_send_simple_reply(client, request->handle, 0,
+ return nbd_co_send_simple_reply(client, request, 0,
data, request->len, errp);
}
}
ret = blk_co_preadv(exp->common.blk, request->from, request->len,
NULL, BDRV_REQ_COPY_ON_READ | BDRV_REQ_PREFETCH);
- return nbd_send_generic_reply(client, request->handle, ret,
+ return nbd_send_generic_reply(client, request, ret,
"caching data failed", errp);
}
}
ret = blk_co_pwrite(exp->common.blk, request->from, request->len, data,
flags);
- return nbd_send_generic_reply(client, request->handle, ret,
+ return nbd_send_generic_reply(client, request, ret,
"writing to file failed", errp);
case NBD_CMD_WRITE_ZEROES:
}
ret = blk_co_pwrite_zeroes(exp->common.blk, request->from, request->len,
flags);
- return nbd_send_generic_reply(client, request->handle, ret,
+ return nbd_send_generic_reply(client, request, ret,
"writing to file failed", errp);
case NBD_CMD_DISC:
case NBD_CMD_FLUSH:
ret = blk_co_flush(exp->common.blk);
- return nbd_send_generic_reply(client, request->handle, ret,
+ return nbd_send_generic_reply(client, request, ret,
"flush failed", errp);
case NBD_CMD_TRIM:
if (ret >= 0 && request->flags & NBD_CMD_FLAG_FUA) {
ret = blk_co_flush(exp->common.blk);
}
- return nbd_send_generic_reply(client, request->handle, ret,
+ return nbd_send_generic_reply(client, request, ret,
"discard failed", errp);
case NBD_CMD_BLOCK_STATUS:
if (!request->len) {
- return nbd_send_generic_reply(client, request->handle, -EINVAL,
+ return nbd_send_generic_reply(client, request, -EINVAL,
"need non-zero length", errp);
}
if (client->export_meta.count) {
int contexts_remaining = client->export_meta.count;
if (client->export_meta.base_allocation) {
- ret = nbd_co_send_block_status(client, request->handle,
+ ret = nbd_co_send_block_status(client, request,
exp->common.blk,
request->from,
request->len, dont_fragment,
}
if (client->export_meta.allocation_depth) {
- ret = nbd_co_send_block_status(client, request->handle,
+ ret = nbd_co_send_block_status(client, request,
exp->common.blk,
request->from, request->len,
dont_fragment,
if (!client->export_meta.bitmaps[i]) {
continue;
}
- ret = nbd_co_send_bitmap(client, request->handle,
+ ret = nbd_co_send_bitmap(client, request,
client->exp->export_bitmaps[i],
request->from, request->len,
dont_fragment, !--contexts_remaining,
return 0;
} else {
- return nbd_send_generic_reply(client, request->handle, -EINVAL,
+ return nbd_send_generic_reply(client, request, -EINVAL,
"CMD_BLOCK_STATUS not negotiated",
errp);
}
default:
msg = g_strdup_printf("invalid request type (%" PRIu32 ") received",
request->type);
- ret = nbd_send_generic_reply(client, request->handle, -EINVAL, msg,
+ ret = nbd_send_generic_reply(client, request, -EINVAL, msg,
errp);
g_free(msg);
return ret;
Error *export_err = local_err;
local_err = NULL;
- ret = nbd_send_generic_reply(client, request.handle, -EINVAL,
+ ret = nbd_send_generic_reply(client, &request, -EINVAL,
error_get_pretty(export_err), &local_err);
error_free(export_err);
} else {
nbd_client_loop_ret(int ret, const char *error) "NBD loop returned %d: %s"
nbd_client_clear_queue(void) "Clearing NBD queue"
nbd_client_clear_socket(void) "Clearing NBD socket"
-nbd_send_request(uint64_t from, uint32_t len, uint64_t handle, uint16_t flags, uint16_t type, const char *name) "Sending request to server: { .from = %" PRIu64", .len = %" PRIu32 ", .handle = %" PRIu64 ", .flags = 0x%" PRIx16 ", .type = %" PRIu16 " (%s) }"
-nbd_receive_simple_reply(int32_t error, const char *errname, uint64_t handle) "Got simple reply: { .error = %" PRId32 " (%s), handle = %" PRIu64" }"
-nbd_receive_structured_reply_chunk(uint16_t flags, uint16_t type, const char *name, uint64_t handle, uint32_t length) "Got structured reply chunk: { flags = 0x%" PRIx16 ", type = %d (%s), handle = %" PRIu64 ", length = %" PRIu32 " }"
+nbd_send_request(uint64_t from, uint32_t len, uint64_t cookie, uint16_t flags, uint16_t type, const char *name) "Sending request to server: { .from = %" PRIu64", .len = %" PRIu32 ", .cookie = %" PRIu64 ", .flags = 0x%" PRIx16 ", .type = %" PRIu16 " (%s) }"
+nbd_receive_simple_reply(int32_t error, const char *errname, uint64_t cookie) "Got simple reply: { .error = %" PRId32 " (%s), cookie = %" PRIu64" }"
+nbd_receive_structured_reply_chunk(uint16_t flags, uint16_t type, const char *name, uint64_t cookie, uint32_t length) "Got structured reply chunk: { flags = 0x%" PRIx16 ", type = %d (%s), cookie = %" PRIu64 ", length = %" PRIu32 " }"
# common.c
nbd_unknown_error(int err) "Squashing unexpected error %d to EINVAL"
nbd_receive_request(uint32_t magic, uint16_t flags, uint16_t type, uint64_t from, uint32_t len) "Got request: { magic = 0x%" PRIx32 ", .flags = 0x%" PRIx16 ", .type = 0x%" PRIx16 ", from = %" PRIu64 ", len = %" PRIu32 " }"
nbd_blk_aio_attached(const char *name, void *ctx) "Export %s: Attaching clients to AIO context %p"
nbd_blk_aio_detach(const char *name, void *ctx) "Export %s: Detaching clients from AIO context %p"
-nbd_co_send_simple_reply(uint64_t handle, uint32_t error, const char *errname, int len) "Send simple reply: handle = %" PRIu64 ", error = %" PRIu32 " (%s), len = %d"
-nbd_co_send_structured_done(uint64_t handle) "Send structured reply done: handle = %" PRIu64
-nbd_co_send_structured_read(uint64_t handle, uint64_t offset, void *data, size_t size) "Send structured read data reply: handle = %" PRIu64 ", offset = %" PRIu64 ", data = %p, len = %zu"
-nbd_co_send_structured_read_hole(uint64_t handle, uint64_t offset, size_t size) "Send structured read hole reply: handle = %" PRIu64 ", offset = %" PRIu64 ", len = %zu"
-nbd_co_send_extents(uint64_t handle, unsigned int extents, uint32_t id, uint64_t length, int last) "Send block status reply: handle = %" PRIu64 ", extents = %u, context = %d (extents cover %" PRIu64 " bytes, last chunk = %d)"
-nbd_co_send_structured_error(uint64_t handle, int err, const char *errname, const char *msg) "Send structured error reply: handle = %" PRIu64 ", error = %d (%s), msg = '%s'"
-nbd_co_receive_request_decode_type(uint64_t handle, uint16_t type, const char *name) "Decoding type: handle = %" PRIu64 ", type = %" PRIu16 " (%s)"
-nbd_co_receive_request_payload_received(uint64_t handle, uint32_t len) "Payload received: handle = %" PRIu64 ", len = %" PRIu32
+nbd_co_send_simple_reply(uint64_t cookie, uint32_t error, const char *errname, int len) "Send simple reply: cookie = %" PRIu64 ", error = %" PRIu32 " (%s), len = %d"
+nbd_co_send_chunk_done(uint64_t cookie) "Send structured reply done: cookie = %" PRIu64
+nbd_co_send_chunk_read(uint64_t cookie, uint64_t offset, void *data, size_t size) "Send structured read data reply: cookie = %" PRIu64 ", offset = %" PRIu64 ", data = %p, len = %zu"
+nbd_co_send_chunk_read_hole(uint64_t cookie, uint64_t offset, size_t size) "Send structured read hole reply: cookie = %" PRIu64 ", offset = %" PRIu64 ", len = %zu"
+nbd_co_send_extents(uint64_t cookie, unsigned int extents, uint32_t id, uint64_t length, int last) "Send block status reply: cookie = %" PRIu64 ", extents = %u, context = %d (extents cover %" PRIu64 " bytes, last chunk = %d)"
+nbd_co_send_chunk_error(uint64_t cookie, int err, const char *errname, const char *msg) "Send structured error reply: cookie = %" PRIu64 ", error = %d (%s), msg = '%s'"
+nbd_co_receive_request_decode_type(uint64_t cookie, uint16_t type, const char *name) "Decoding type: cookie = %" PRIu64 ", type = %" PRIu16 " (%s)"
+nbd_co_receive_request_payload_received(uint64_t cookie, uint32_t len) "Payload received: cookie = %" PRIu64 ", len = %" PRIu32
nbd_co_receive_align_compliance(const char *op, uint64_t from, uint32_t len, uint32_t align) "client sent non-compliant unaligned %s request: from=0x%" PRIx64 ", len=0x%" PRIx32 ", align=0x%" PRIx32
nbd_trip(void) "Reading request"
BIT_ULL(VIRTIO_NET_F_MRG_RXBUF) |
BIT_ULL(VIRTIO_NET_F_STATUS) |
BIT_ULL(VIRTIO_NET_F_CTRL_VQ) |
+ BIT_ULL(VIRTIO_NET_F_CTRL_RX) |
+ BIT_ULL(VIRTIO_NET_F_CTRL_RX_EXTRA) |
BIT_ULL(VIRTIO_NET_F_MQ) |
BIT_ULL(VIRTIO_F_ANY_LAYOUT) |
BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR) |
}
static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s, uint8_t class,
- uint8_t cmd, const void *data,
- size_t data_size)
+ uint8_t cmd, const struct iovec *data_sg,
+ size_t data_num)
{
const struct virtio_net_ctrl_hdr ctrl = {
.class = class,
.cmd = cmd,
};
+ size_t data_size = iov_size(data_sg, data_num);
assert(data_size < vhost_vdpa_net_cvq_cmd_page_len() - sizeof(ctrl));
+ /* pack the CVQ command header */
memcpy(s->cvq_cmd_out_buffer, &ctrl, sizeof(ctrl));
- memcpy(s->cvq_cmd_out_buffer + sizeof(ctrl), data, data_size);
- return vhost_vdpa_net_cvq_add(s, sizeof(ctrl) + data_size,
+ /* pack the CVQ command command-specific-data */
+ iov_to_buf(data_sg, data_num, 0,
+ s->cvq_cmd_out_buffer + sizeof(ctrl), data_size);
+
+ return vhost_vdpa_net_cvq_add(s, data_size + sizeof(ctrl),
sizeof(virtio_net_ctrl_ack));
}
static int vhost_vdpa_net_load_mac(VhostVDPAState *s, const VirtIONet *n)
{
if (virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
+ const struct iovec data = {
+ .iov_base = (void *)n->mac,
+ .iov_len = sizeof(n->mac),
+ };
ssize_t dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_MAC,
VIRTIO_NET_CTRL_MAC_ADDR_SET,
- n->mac, sizeof(n->mac));
+ &data, 1);
if (unlikely(dev_written < 0)) {
return dev_written;
}
+ if (*s->status != VIRTIO_NET_OK) {
+ return -EIO;
+ }
+ }
- return *s->status != VIRTIO_NET_OK;
+ /*
+ * According to VirtIO standard, "The device MUST have an
+ * empty MAC filtering table on reset.".
+ *
+ * Therefore, there is no need to send this CVQ command if the
+ * driver also sets an empty MAC filter table, which aligns with
+ * the device's defaults.
+ *
+ * Note that the device's defaults can mismatch the driver's
+ * configuration only at live migration.
+ */
+ if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_RX) ||
+ n->mac_table.in_use == 0) {
+ return 0;
+ }
+
+ uint32_t uni_entries = n->mac_table.first_multi,
+ uni_macs_size = uni_entries * ETH_ALEN,
+ mul_entries = n->mac_table.in_use - uni_entries,
+ mul_macs_size = mul_entries * ETH_ALEN;
+ struct virtio_net_ctrl_mac uni = {
+ .entries = cpu_to_le32(uni_entries),
+ };
+ struct virtio_net_ctrl_mac mul = {
+ .entries = cpu_to_le32(mul_entries),
+ };
+ const struct iovec data[] = {
+ {
+ .iov_base = &uni,
+ .iov_len = sizeof(uni),
+ }, {
+ .iov_base = n->mac_table.macs,
+ .iov_len = uni_macs_size,
+ }, {
+ .iov_base = &mul,
+ .iov_len = sizeof(mul),
+ }, {
+ .iov_base = &n->mac_table.macs[uni_macs_size],
+ .iov_len = mul_macs_size,
+ },
+ };
+ ssize_t dev_written = vhost_vdpa_net_load_cmd(s,
+ VIRTIO_NET_CTRL_MAC,
+ VIRTIO_NET_CTRL_MAC_TABLE_SET,
+ data, ARRAY_SIZE(data));
+ if (unlikely(dev_written < 0)) {
+ return dev_written;
+ }
+ if (*s->status != VIRTIO_NET_OK) {
+ return -EIO;
}
return 0;
}
mq.virtqueue_pairs = cpu_to_le16(n->curr_queue_pairs);
+ const struct iovec data = {
+ .iov_base = &mq,
+ .iov_len = sizeof(mq),
+ };
dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_MQ,
- VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &mq,
- sizeof(mq));
+ VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET,
+ &data, 1);
if (unlikely(dev_written < 0)) {
return dev_written;
}
+ if (*s->status != VIRTIO_NET_OK) {
+ return -EIO;
+ }
- return *s->status != VIRTIO_NET_OK;
+ return 0;
}
static int vhost_vdpa_net_load_offloads(VhostVDPAState *s,
}
offloads = cpu_to_le64(n->curr_guest_offloads);
+ const struct iovec data = {
+ .iov_base = &offloads,
+ .iov_len = sizeof(offloads),
+ };
dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_GUEST_OFFLOADS,
VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET,
- &offloads, sizeof(offloads));
+ &data, 1);
if (unlikely(dev_written < 0)) {
return dev_written;
}
+ if (*s->status != VIRTIO_NET_OK) {
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int vhost_vdpa_net_load_rx_mode(VhostVDPAState *s,
+ uint8_t cmd,
+ uint8_t on)
+{
+ const struct iovec data = {
+ .iov_base = &on,
+ .iov_len = sizeof(on),
+ };
+ return vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_RX,
+ cmd, &data, 1);
+}
+
+static int vhost_vdpa_net_load_rx(VhostVDPAState *s,
+ const VirtIONet *n)
+{
+ ssize_t dev_written;
+
+ if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_RX)) {
+ return 0;
+ }
+
+ /*
+ * According to virtio_net_reset(), device turns promiscuous mode
+ * on by default.
+ *
+ * Addtionally, according to VirtIO standard, "Since there are
+ * no guarantees, it can use a hash filter or silently switch to
+ * allmulti or promiscuous mode if it is given too many addresses.".
+ * QEMU marks `n->mac_table.uni_overflow` if guest sets too many
+ * non-multicast MAC addresses, indicating that promiscuous mode
+ * should be enabled.
+ *
+ * Therefore, QEMU should only send this CVQ command if the
+ * `n->mac_table.uni_overflow` is not marked and `n->promisc` is off,
+ * which sets promiscuous mode on, different from the device's defaults.
+ *
+ * Note that the device's defaults can mismatch the driver's
+ * configuration only at live migration.
+ */
+ if (!n->mac_table.uni_overflow && !n->promisc) {
+ dev_written = vhost_vdpa_net_load_rx_mode(s,
+ VIRTIO_NET_CTRL_RX_PROMISC, 0);
+ if (unlikely(dev_written < 0)) {
+ return dev_written;
+ }
+ if (*s->status != VIRTIO_NET_OK) {
+ return -EIO;
+ }
+ }
+
+ /*
+ * According to virtio_net_reset(), device turns all-multicast mode
+ * off by default.
+ *
+ * According to VirtIO standard, "Since there are no guarantees,
+ * it can use a hash filter or silently switch to allmulti or
+ * promiscuous mode if it is given too many addresses.". QEMU marks
+ * `n->mac_table.multi_overflow` if guest sets too many
+ * non-multicast MAC addresses.
+ *
+ * Therefore, QEMU should only send this CVQ command if the
+ * `n->mac_table.multi_overflow` is marked or `n->allmulti` is on,
+ * which sets all-multicast mode on, different from the device's defaults.
+ *
+ * Note that the device's defaults can mismatch the driver's
+ * configuration only at live migration.
+ */
+ if (n->mac_table.multi_overflow || n->allmulti) {
+ dev_written = vhost_vdpa_net_load_rx_mode(s,
+ VIRTIO_NET_CTRL_RX_ALLMULTI, 1);
+ if (unlikely(dev_written < 0)) {
+ return dev_written;
+ }
+ if (*s->status != VIRTIO_NET_OK) {
+ return -EIO;
+ }
+ }
+
+ if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_RX_EXTRA)) {
+ return 0;
+ }
+
+ /*
+ * According to virtio_net_reset(), device turns all-unicast mode
+ * off by default.
+ *
+ * Therefore, QEMU should only send this CVQ command if the driver
+ * sets all-unicast mode on, different from the device's defaults.
+ *
+ * Note that the device's defaults can mismatch the driver's
+ * configuration only at live migration.
+ */
+ if (n->alluni) {
+ dev_written = vhost_vdpa_net_load_rx_mode(s,
+ VIRTIO_NET_CTRL_RX_ALLUNI, 1);
+ if (dev_written < 0) {
+ return dev_written;
+ }
+ if (*s->status != VIRTIO_NET_OK) {
+ return -EIO;
+ }
+ }
+
+ /*
+ * According to virtio_net_reset(), device turns non-multicast mode
+ * off by default.
+ *
+ * Therefore, QEMU should only send this CVQ command if the driver
+ * sets non-multicast mode on, different from the device's defaults.
+ *
+ * Note that the device's defaults can mismatch the driver's
+ * configuration only at live migration.
+ */
+ if (n->nomulti) {
+ dev_written = vhost_vdpa_net_load_rx_mode(s,
+ VIRTIO_NET_CTRL_RX_NOMULTI, 1);
+ if (dev_written < 0) {
+ return dev_written;
+ }
+ if (*s->status != VIRTIO_NET_OK) {
+ return -EIO;
+ }
+ }
- return *s->status != VIRTIO_NET_OK;
+ /*
+ * According to virtio_net_reset(), device turns non-unicast mode
+ * off by default.
+ *
+ * Therefore, QEMU should only send this CVQ command if the driver
+ * sets non-unicast mode on, different from the device's defaults.
+ *
+ * Note that the device's defaults can mismatch the driver's
+ * configuration only at live migration.
+ */
+ if (n->nouni) {
+ dev_written = vhost_vdpa_net_load_rx_mode(s,
+ VIRTIO_NET_CTRL_RX_NOUNI, 1);
+ if (dev_written < 0) {
+ return dev_written;
+ }
+ if (*s->status != VIRTIO_NET_OK) {
+ return -EIO;
+ }
+ }
+
+ /*
+ * According to virtio_net_reset(), device turns non-broadcast mode
+ * off by default.
+ *
+ * Therefore, QEMU should only send this CVQ command if the driver
+ * sets non-broadcast mode on, different from the device's defaults.
+ *
+ * Note that the device's defaults can mismatch the driver's
+ * configuration only at live migration.
+ */
+ if (n->nobcast) {
+ dev_written = vhost_vdpa_net_load_rx_mode(s,
+ VIRTIO_NET_CTRL_RX_NOBCAST, 1);
+ if (dev_written < 0) {
+ return dev_written;
+ }
+ if (*s->status != VIRTIO_NET_OK) {
+ return -EIO;
+ }
+ }
+
+ return 0;
}
static int vhost_vdpa_net_load(NetClientState *nc)
if (unlikely(r)) {
return r;
}
+ r = vhost_vdpa_net_load_rx(s, n);
+ if (unlikely(r)) {
+ return r;
+ }
return 0;
}
.check_peer_type = vhost_vdpa_check_peer_type,
};
+/*
+ * Forward the excessive VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command to
+ * vdpa device.
+ *
+ * Considering that QEMU cannot send the entire filter table to the
+ * vdpa device, it should send the VIRTIO_NET_CTRL_RX_PROMISC CVQ
+ * command to enable promiscuous mode to receive all packets,
+ * according to VirtIO standard, "Since there are no guarantees,
+ * it can use a hash filter or silently switch to allmulti or
+ * promiscuous mode if it is given too many addresses.".
+ *
+ * Since QEMU ignores MAC addresses beyond `MAC_TABLE_ENTRIES` and
+ * marks `n->mac_table.x_overflow` accordingly, it should have
+ * the same effect on the device model to receive
+ * (`MAC_TABLE_ENTRIES` + 1) or more non-multicast MAC addresses.
+ * The same applies to multicast MAC addresses.
+ *
+ * Therefore, QEMU can provide the device model with a fake
+ * VIRTIO_NET_CTRL_MAC_TABLE_SET command with (`MAC_TABLE_ENTRIES` + 1)
+ * non-multicast MAC addresses and (`MAC_TABLE_ENTRIES` + 1) multicast
+ * MAC addresses. This ensures that the device model marks
+ * `n->mac_table.uni_overflow` and `n->mac_table.multi_overflow`,
+ * allowing all packets to be received, which aligns with the
+ * state of the vdpa device.
+ */
+static int vhost_vdpa_net_excessive_mac_filter_cvq_add(VhostVDPAState *s,
+ VirtQueueElement *elem,
+ struct iovec *out)
+{
+ struct virtio_net_ctrl_mac mac_data, *mac_ptr;
+ struct virtio_net_ctrl_hdr *hdr_ptr;
+ uint32_t cursor;
+ ssize_t r;
+
+ /* parse the non-multicast MAC address entries from CVQ command */
+ cursor = sizeof(*hdr_ptr);
+ r = iov_to_buf(elem->out_sg, elem->out_num, cursor,
+ &mac_data, sizeof(mac_data));
+ if (unlikely(r != sizeof(mac_data))) {
+ /*
+ * If the CVQ command is invalid, we should simulate the vdpa device
+ * to reject the VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command
+ */
+ *s->status = VIRTIO_NET_ERR;
+ return sizeof(*s->status);
+ }
+ cursor += sizeof(mac_data) + le32_to_cpu(mac_data.entries) * ETH_ALEN;
+
+ /* parse the multicast MAC address entries from CVQ command */
+ r = iov_to_buf(elem->out_sg, elem->out_num, cursor,
+ &mac_data, sizeof(mac_data));
+ if (r != sizeof(mac_data)) {
+ /*
+ * If the CVQ command is invalid, we should simulate the vdpa device
+ * to reject the VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command
+ */
+ *s->status = VIRTIO_NET_ERR;
+ return sizeof(*s->status);
+ }
+ cursor += sizeof(mac_data) + le32_to_cpu(mac_data.entries) * ETH_ALEN;
+
+ /* validate the CVQ command */
+ if (iov_size(elem->out_sg, elem->out_num) != cursor) {
+ /*
+ * If the CVQ command is invalid, we should simulate the vdpa device
+ * to reject the VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command
+ */
+ *s->status = VIRTIO_NET_ERR;
+ return sizeof(*s->status);
+ }
+
+ /*
+ * According to VirtIO standard, "Since there are no guarantees,
+ * it can use a hash filter or silently switch to allmulti or
+ * promiscuous mode if it is given too many addresses.".
+ *
+ * Therefore, considering that QEMU is unable to send the entire
+ * filter table to the vdpa device, it should send the
+ * VIRTIO_NET_CTRL_RX_PROMISC CVQ command to enable promiscuous mode
+ */
+ r = vhost_vdpa_net_load_rx_mode(s, VIRTIO_NET_CTRL_RX_PROMISC, 1);
+ if (unlikely(r < 0)) {
+ return r;
+ }
+ if (*s->status != VIRTIO_NET_OK) {
+ return sizeof(*s->status);
+ }
+
+ /*
+ * QEMU should also send a fake VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ
+ * command to the device model, including (`MAC_TABLE_ENTRIES` + 1)
+ * non-multicast MAC addresses and (`MAC_TABLE_ENTRIES` + 1)
+ * multicast MAC addresses.
+ *
+ * By doing so, the device model can mark `n->mac_table.uni_overflow`
+ * and `n->mac_table.multi_overflow`, enabling all packets to be
+ * received, which aligns with the state of the vdpa device.
+ */
+ cursor = 0;
+ uint32_t fake_uni_entries = MAC_TABLE_ENTRIES + 1,
+ fake_mul_entries = MAC_TABLE_ENTRIES + 1,
+ fake_cvq_size = sizeof(struct virtio_net_ctrl_hdr) +
+ sizeof(mac_data) + fake_uni_entries * ETH_ALEN +
+ sizeof(mac_data) + fake_mul_entries * ETH_ALEN;
+
+ assert(fake_cvq_size < vhost_vdpa_net_cvq_cmd_page_len());
+ out->iov_len = fake_cvq_size;
+
+ /* pack the header for fake CVQ command */
+ hdr_ptr = out->iov_base + cursor;
+ hdr_ptr->class = VIRTIO_NET_CTRL_MAC;
+ hdr_ptr->cmd = VIRTIO_NET_CTRL_MAC_TABLE_SET;
+ cursor += sizeof(*hdr_ptr);
+
+ /*
+ * Pack the non-multicast MAC addresses part for fake CVQ command.
+ *
+ * According to virtio_net_handle_mac(), QEMU doesn't verify the MAC
+ * addresses provieded in CVQ command. Therefore, only the entries
+ * field need to be prepared in the CVQ command.
+ */
+ mac_ptr = out->iov_base + cursor;
+ mac_ptr->entries = cpu_to_le32(fake_uni_entries);
+ cursor += sizeof(*mac_ptr) + fake_uni_entries * ETH_ALEN;
+
+ /*
+ * Pack the multicast MAC addresses part for fake CVQ command.
+ *
+ * According to virtio_net_handle_mac(), QEMU doesn't verify the MAC
+ * addresses provieded in CVQ command. Therefore, only the entries
+ * field need to be prepared in the CVQ command.
+ */
+ mac_ptr = out->iov_base + cursor;
+ mac_ptr->entries = cpu_to_le32(fake_mul_entries);
+
+ /*
+ * Simulating QEMU poll a vdpa device used buffer
+ * for VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command
+ */
+ return sizeof(*s->status);
+}
+
/**
* Validate and copy control virtqueue commands.
*
{
VhostVDPAState *s = opaque;
size_t in_len;
+ const struct virtio_net_ctrl_hdr *ctrl;
virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
/* Out buffer sent to both the vdpa device and the device model */
struct iovec out = {
out.iov_len = iov_to_buf(elem->out_sg, elem->out_num, 0,
s->cvq_cmd_out_buffer,
- vhost_vdpa_net_cvq_cmd_len());
- if (*(uint8_t *)s->cvq_cmd_out_buffer == VIRTIO_NET_CTRL_ANNOUNCE) {
+ vhost_vdpa_net_cvq_cmd_page_len());
+
+ ctrl = s->cvq_cmd_out_buffer;
+ if (ctrl->class == VIRTIO_NET_CTRL_ANNOUNCE) {
/*
* Guest announce capability is emulated by qemu, so don't forward to
* the device.
*/
dev_written = sizeof(status);
*s->status = VIRTIO_NET_OK;
+ } else if (unlikely(ctrl->class == VIRTIO_NET_CTRL_MAC &&
+ ctrl->cmd == VIRTIO_NET_CTRL_MAC_TABLE_SET &&
+ iov_size(elem->out_sg, elem->out_num) > out.iov_len)) {
+ /*
+ * Due to the size limitation of the out buffer sent to the vdpa device,
+ * which is determined by vhost_vdpa_net_cvq_cmd_page_len(), excessive
+ * MAC addresses set by the driver for the filter table can cause
+ * truncation of the CVQ command in QEMU. As a result, the vdpa device
+ * rejects the flawed CVQ command.
+ *
+ * Therefore, QEMU must handle this situation instead of sending
+ * the CVQ command direclty.
+ */
+ dev_written = vhost_vdpa_net_excessive_mac_filter_cvq_add(s, elem,
+ &out);
+ if (unlikely(dev_written < 0)) {
+ goto out;
+ }
} else {
dev_written = vhost_vdpa_net_cvq_add(s, out.iov_len, sizeof(status));
if (unlikely(dev_written < 0)) {
error_report("Bad device CVQ written length");
}
vhost_svq_push_elem(svq, elem, MIN(in_len, sizeof(status)));
- g_free(elem);
+ /*
+ * `elem` belongs to vhost_vdpa_net_handle_ctrl_avail() only when
+ * the function successfully forwards the CVQ command, indicated
+ * by a non-negative value of `dev_written`. Otherwise, it still
+ * belongs to SVQ.
+ * This function should only free the `elem` when it owns.
+ */
+ if (dev_written >= 0) {
+ g_free(elem);
+ }
return dev_written < 0 ? dev_written : 0;
}
#include "qemu/cutils.h"
#include "qemu/config-file.h"
#include "qemu/option.h"
+#include "qemu/module.h"
#ifdef CONFIG_LINUX
#include <sys/prctl.h>
}
break;
case QEMU_OPTION_chroot:
+ warn_report("option is deprecated, use '-run-with chroot=...' instead");
chroot_dir = optarg;
break;
case QEMU_OPTION_daemonize:
case QEMU_OPTION_asyncteardown:
init_async_teardown();
break;
+#endif
case QEMU_OPTION_run_with: {
+ const char *str;
QemuOpts *opts = qemu_opts_parse_noisily(qemu_find_opts("run-with"),
optarg, false);
if (!opts) {
exit(1);
}
+#if defined(CONFIG_LINUX)
if (qemu_opt_get_bool(opts, "async-teardown", false)) {
init_async_teardown();
}
+#endif
+ str = qemu_opt_get(opts, "chroot");
+ if (str) {
+ chroot_dir = str;
+ }
break;
}
-#endif
default:
return -1;
}
return -ENOSYS;
#endif
}
+
+static QemuOptsList qemu_run_with_opts = {
+ .name = "run-with",
+ .head = QTAILQ_HEAD_INITIALIZER(qemu_run_with_opts.head),
+ .desc = {
+#if defined(CONFIG_LINUX)
+ {
+ .name = "async-teardown",
+ .type = QEMU_OPT_BOOL,
+ },
+#endif
+ {
+ .name = "chroot",
+ .type = QEMU_OPT_STRING,
+ },
+ { /* end of list */ }
+ },
+};
+
+static void register_runwith(void)
+{
+ qemu_add_opts(&qemu_run_with_opts);
+}
+opts_init(register_runwith);
##
# @AudiodevPipewirePerDirectionOptions:
#
-# Options of the Pipewire backend that are used for both playback and
+# Options of the PipeWire backend that are used for both playback and
# recording.
#
# @name: name of the sink/source to use
#
-# @stream-name: name of the Pipewire stream created by qemu. Can be
-# used to identify the stream in Pipewire when you create multiple
-# Pipewire devices or run multiple qemu instances (default:
+# @stream-name: name of the PipeWire stream created by qemu. Can be
+# used to identify the stream in PipeWire when you create multiple
+# PipeWire devices or run multiple qemu instances (default:
# audiodev's id)
#
-# @latency: latency you want Pipewire to achieve in microseconds
+# @latency: latency you want PipeWire to achieve in microseconds
# (default 46000)
#
# Since: 8.1
##
# @AudiodevPipewireOptions:
#
-# Options of the Pipewire audio backend.
+# Options of the PipeWire audio backend.
#
# @in: options of the capture stream
#
#define MBR_SIZE 512
-static int verbose;
static char *srcpath;
static SocketAddress *saddr;
static int persistent = 0;
return NULL;
}
+struct NbdClientOpts {
+ char *device;
+ bool fork_process;
+ bool verbose;
+};
+
static void *nbd_client_thread(void *arg)
{
- char *device = arg;
+ struct NbdClientOpts *opts = arg;
NBDExportInfo info = { .request_sizes = false, .name = g_strdup("") };
QIOChannelSocket *sioc;
int fd = -1;
goto out;
}
- fd = open(device, O_RDWR);
+ fd = open(opts->device, O_RDWR);
if (fd < 0) {
/* Linux-only, we can use %m in printf. */
- error_report("Failed to open %s: %m", device);
+ error_report("Failed to open %s: %m", opts->device);
goto out;
}
}
/* update partition table */
- pthread_create(&show_parts_thread, NULL, show_parts, device);
+ pthread_create(&show_parts_thread, NULL, show_parts, opts->device);
- if (verbose) {
+ if (opts->verbose && !opts->fork_process) {
fprintf(stderr, "NBD device %s is now connected to %s\n",
- device, srcpath);
+ opts->device, srcpath);
} else {
/* Close stderr so that the qemu-nbd process exits. */
- dup2(STDOUT_FILENO, STDERR_FILENO);
+ if (dup2(STDOUT_FILENO, STDERR_FILENO) < 0) {
+ error_report("Could not set stderr to /dev/null: %s",
+ strerror(errno));
+ exit(EXIT_FAILURE);
+ }
}
if (nbd_client(fd) < 0) {
const char *tlshostname = NULL;
bool imageOpts = false;
bool writethrough = false; /* Client will flush as needed. */
+ bool verbose = false;
bool fork_process = false;
bool list = false;
- int old_stderr = -1;
unsigned socket_activation;
const char *pid_file_name = NULL;
const char *selinux_label = NULL;
}
break;
case 'v':
- verbose = 1;
+ verbose = true;
break;
case 'V':
version(argv[0]);
error_report("Failed to fork: %s", strerror(errno));
exit(EXIT_FAILURE);
} else if (pid == 0) {
- close(stderr_fd[0]);
+ int saved_errno;
- /* Remember parent's stderr if we will be restoring it. */
- if (fork_process) {
- old_stderr = dup(STDERR_FILENO);
- }
+ close(stderr_fd[0]);
ret = qemu_daemon(1, 0);
+ saved_errno = errno; /* dup2 will overwrite error below */
/* Temporarily redirect stderr to the parent's pipe... */
- dup2(stderr_fd[1], STDERR_FILENO);
+ if (dup2(stderr_fd[1], STDERR_FILENO) < 0) {
+ char str[256];
+ snprintf(str, sizeof(str),
+ "%s: Failed to link stderr to the pipe: %s\n",
+ g_get_prgname(), strerror(errno));
+ /*
+ * We are unable to use error_report() here as we need to get
+ * stderr pointed to the parent's pipe. Write to that pipe
+ * manually.
+ */
+ ret = write(stderr_fd[1], str, strlen(str));
+ exit(EXIT_FAILURE);
+ }
+
if (ret < 0) {
- error_report("Failed to daemonize: %s", strerror(errno));
+ error_report("Failed to daemonize: %s", strerror(saved_errno));
exit(EXIT_FAILURE);
}
if (device) {
#if HAVE_NBD_DEVICE
int ret;
+ struct NbdClientOpts opts = {
+ .device = device,
+ .fork_process = fork_process,
+ .verbose = verbose,
+ };
- ret = pthread_create(&client_thread, NULL, nbd_client_thread, device);
+ ret = pthread_create(&client_thread, NULL, nbd_client_thread, &opts);
if (ret != 0) {
error_report("Failed to create client thread: %s", strerror(ret));
exit(EXIT_FAILURE);
}
if (fork_process) {
- dup2(old_stderr, STDERR_FILENO);
- close(old_stderr);
+ if (dup2(STDOUT_FILENO, STDERR_FILENO) < 0) {
+ error_report("Could not set stderr to /dev/null: %s",
+ strerror(errno));
+ exit(EXIT_FAILURE);
+ }
}
state = RUNNING;
to honor this value but actual latencies may be lower or higher.
``-audiodev pipewire,id=id[,prop[=value][,...]]``
- Creates a backend using Pipewire. This backend is available on
+ Creates a backend using PipeWire. This backend is available on
most systems.
- Pipewire specific options are:
+ PipeWire specific options are:
``in|out.latency=usecs``
Desired latency in microseconds.
#ifndef _WIN32
DEF("chroot", HAS_ARG, QEMU_OPTION_chroot, \
- "-chroot dir chroot to dir just before starting the VM\n",
+ "-chroot dir chroot to dir just before starting the VM (deprecated)\n",
QEMU_ARCH_ALL)
#endif
SRST
``-chroot dir``
+ Deprecated, use '-run-with chroot=...' instead.
Immediately before starting guest execution, chroot to the specified
directory. Especially useful in combination with -runas.
ERST
This option is deprecated and should no longer be used. The new option
``-run-with async-teardown=on`` is a replacement.
ERST
+#endif
+#ifdef CONFIG_POSIX
DEF("run-with", HAS_ARG, QEMU_OPTION_run_with,
- "-run-with async-teardown[=on|off]\n"
- " misc QEMU process lifecycle options\n"
- " async-teardown=on enables asynchronous teardown\n",
+ "-run-with [async-teardown=on|off][,chroot=dir]\n"
+ " Set miscellaneous QEMU process lifecycle options:\n"
+ " async-teardown=on enables asynchronous teardown (Linux only)\n"
+ " chroot=dir chroot to dir just before starting the VM\n",
QEMU_ARCH_ALL)
SRST
-``-run-with``
+``-run-with [async-teardown=on|off][,chroot=dir]``
Set QEMU process lifecycle options.
``async-teardown=on`` enables asynchronous teardown. A new process called
performed correctly. This only works if the cleanup process is not
forcefully killed with SIGKILL before the main QEMU process has
terminated completely.
+
+ ``chroot=dir`` can be used for doing a chroot to the specified directory
+ immediately before starting the guest execution. This is especially useful
+ in combination with -runas.
ERST
#endif
bool delimit_response;
bool frozen;
GList *blockedrpcs;
+ GList *allowedrpcs;
char *state_filepath_isfrozen;
struct {
const char *log_filepath;
#endif
" -b, --block-rpcs comma-separated list of RPCs to disable (no spaces,\n"
" use \"help\" to list available RPCs)\n"
+" -a, --allow-rpcs comma-separated list of RPCs to enable (no spaces,\n"
+" use \"help\" to list available RPCs)\n"
" -D, --dump-conf dump a qemu-ga config file based on current config\n"
" options / command-line parameters to stdout\n"
" -r, --retry-path attempt re-opening path if it's unavailable or closed\n"
}
/* disable commands that aren't safe for fsfreeze */
-static void ga_disable_not_allowed(const QmpCommand *cmd, void *opaque)
+static void ga_disable_not_allowed_freeze(const QmpCommand *cmd, void *opaque)
{
bool allowed = false;
int i = 0;
/* [re-]enable all commands, except those explicitly blocked by user */
static void ga_enable_non_blocked(const QmpCommand *cmd, void *opaque)
{
- GList *blockedrpcs = opaque;
+ GAState *s = opaque;
+ GList *blockedrpcs = s->blockedrpcs;
+ GList *allowedrpcs = s->allowedrpcs;
const char *name = qmp_command_name(cmd);
- if (g_list_find_custom(blockedrpcs, name, ga_strcmp) == NULL &&
- !qmp_command_is_enabled(cmd)) {
+ if (g_list_find_custom(blockedrpcs, name, ga_strcmp) == NULL) {
+ if (qmp_command_is_enabled(cmd)) {
+ return;
+ }
+
+ if (allowedrpcs &&
+ g_list_find_custom(allowedrpcs, name, ga_strcmp) == NULL) {
+ return;
+ }
+
g_debug("enabling command: %s", name);
qmp_enable_command(&ga_commands, name);
}
}
+/* disable commands that aren't allowed */
+static void ga_disable_not_allowed(const QmpCommand *cmd, void *opaque)
+{
+ GList *allowedrpcs = opaque;
+ const char *name = qmp_command_name(cmd);
+
+ if (g_list_find_custom(allowedrpcs, name, ga_strcmp) == NULL) {
+ g_debug("disabling command: %s", name);
+ qmp_disable_command(&ga_commands, name, "the command is not allowed");
+ }
+}
+
static bool ga_create_file(const char *path)
{
int fd = open(path, O_CREAT | O_WRONLY, S_IWUSR | S_IRUSR);
return;
}
/* disable all forbidden (for frozen state) commands */
- qmp_for_each_command(&ga_commands, ga_disable_not_allowed, NULL);
+ qmp_for_each_command(&ga_commands, ga_disable_not_allowed_freeze, NULL);
g_warning("disabling logging due to filesystem freeze");
ga_disable_logging(s);
s->frozen = true;
s->deferred_options.pid_filepath = NULL;
}
- /* enable all disabled, non-blocked commands */
- qmp_for_each_command(&ga_commands, ga_enable_non_blocked, s->blockedrpcs);
+ /* enable all disabled, non-blocked and allowed commands */
+ qmp_for_each_command(&ga_commands, ga_enable_non_blocked, s);
s->frozen = false;
if (!ga_delete_file(s->state_filepath_isfrozen)) {
g_warning("unable to delete %s, fsfreeze may not function properly",
const char *service;
#endif
gchar *bliststr; /* blockedrpcs may point to this string */
+ gchar *aliststr; /* allowedrpcs may point to this string */
GList *blockedrpcs;
+ GList *allowedrpcs;
int daemonize;
GLogLevelFlags log_level;
int dumpconf;
config->blockedrpcs = g_list_concat(config->blockedrpcs,
split_list(config->bliststr, ","));
}
+ if (g_key_file_has_key(keyfile, "general", "allow-rpcs", NULL)) {
+ config->aliststr =
+ g_key_file_get_string(keyfile, "general", "allow-rpcs", &gerr);
+ config->allowedrpcs = g_list_concat(config->allowedrpcs,
+ split_list(config->aliststr, ","));
+ }
+
+ if (g_key_file_has_key(keyfile, "general", blockrpcs_key, NULL) &&
+ g_key_file_has_key(keyfile, "general", "allow-rpcs", NULL)) {
+ g_critical("wrong config, using 'block-rpcs' and 'allow-rpcs' keys at"
+ " the same time is not allowed");
+ exit(EXIT_FAILURE);
+ }
end:
g_key_file_free(keyfile);
tmp = list_join(config->blockedrpcs, ',');
g_key_file_set_string(keyfile, "general", "block-rpcs", tmp);
g_free(tmp);
+ tmp = list_join(config->allowedrpcs, ',');
+ g_key_file_set_string(keyfile, "general", "allow-rpcs", tmp);
+ g_free(tmp);
tmp = g_key_file_to_data(keyfile, NULL, &error);
if (error) {
static void config_parse(GAConfig *config, int argc, char **argv)
{
- const char *sopt = "hVvdm:p:l:f:F::b:s:t:Dr";
+ const char *sopt = "hVvdm:p:l:f:F::b:a:s:t:Dr";
int opt_ind = 0, ch;
+ bool block_rpcs = false, allow_rpcs = false;
const struct option lopt[] = {
{ "help", 0, NULL, 'h' },
{ "version", 0, NULL, 'V' },
{ "daemonize", 0, NULL, 'd' },
{ "block-rpcs", 1, NULL, 'b' },
{ "blacklist", 1, NULL, 'b' }, /* deprecated alias for 'block-rpcs' */
+ { "allow-rpcs", 1, NULL, 'a' },
#ifdef _WIN32
{ "service", 1, NULL, 's' },
#endif
}
config->blockedrpcs = g_list_concat(config->blockedrpcs,
split_list(optarg, ","));
+ block_rpcs = true;
+ break;
+ }
+ case 'a': {
+ if (is_help_option(optarg)) {
+ qmp_for_each_command(&ga_commands, ga_print_cmd, NULL);
+ exit(EXIT_SUCCESS);
+ }
+ config->allowedrpcs = g_list_concat(config->allowedrpcs,
+ split_list(optarg, ","));
+ allow_rpcs = true;
break;
}
#ifdef _WIN32
exit(EXIT_FAILURE);
}
}
+
+ if (block_rpcs && allow_rpcs) {
+ g_critical("wrong commandline, using --block-rpcs and --allow-rpcs at the"
+ " same time is not allowed");
+ exit(EXIT_FAILURE);
+ }
}
static void config_free(GAConfig *config)
g_free(config->state_dir);
g_free(config->channel_path);
g_free(config->bliststr);
+ g_free(config->aliststr);
#ifdef CONFIG_FSFREEZE
g_free(config->fsfreeze_hook);
#endif
g_list_free_full(config->blockedrpcs, g_free);
+ g_list_free_full(config->allowedrpcs, g_free);
g_free(config);
}
s->deferred_options.log_filepath = config->log_filepath;
}
ga_disable_logging(s);
- qmp_for_each_command(&ga_commands, ga_disable_not_allowed, NULL);
+ qmp_for_each_command(&ga_commands, ga_disable_not_allowed_freeze, NULL);
} else {
if (config->daemonize) {
become_daemon(config->pid_filepath);
return NULL;
}
+ if (config->allowedrpcs) {
+ qmp_for_each_command(&ga_commands, ga_disable_not_allowed, config->allowedrpcs);
+ s->allowedrpcs = config->allowedrpcs;
+ }
+
+ /*
+ * Some commands can be blocked due to system limitation.
+ * Initialize blockedrpcs list even if allowedrpcs specified.
+ */
config->blockedrpcs = ga_command_init_blockedrpcs(config->blockedrpcs);
if (config->blockedrpcs) {
GList *l = config->blockedrpcs;
#include "qemu/osdep.h"
#include "vss-common.h"
+#include "vss-debug.h"
#ifdef HAVE_VSS_SDK
#include <vscoordint.h>
#else
FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS,
NULL, err, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
(char *)&msg, 0, NULL);
- fprintf(stderr, "%.*s. (Error: %lx) %s\n", len, text, err, msg);
+ qga_debug("%.*s. (Error: %lx) %s", len, text, err, msg);
LocalFree(msg);
}
/* Lookup Administrators group name from winmgmt */
static HRESULT GetAdminName(_bstr_t *name)
{
+ qga_debug_begin;
+
HRESULT hr;
COMPointer<IWbemLocator> pLoc;
COMPointer<IWbemServices> pSvc;
}
out:
+ qga_debug_end;
return hr;
}
static HRESULT getNameByStringSID(
const wchar_t *sid, LPWSTR buffer, LPDWORD bufferLen)
{
+ qga_debug_begin;
+
HRESULT hr = S_OK;
PSID psid = NULL;
SID_NAME_USE groupType;
LocalFree(psid);
out:
+ qga_debug_end;
return hr;
}
static HRESULT QGAProviderFind(
HRESULT (*found)(ICatalogCollection *, int, void *), void *arg)
{
+ qga_debug_begin;
+
HRESULT hr;
COMInitializer initializer;
COMPointer<IUnknown> pUnknown;
chk(pColl->SaveChanges(&n));
out:
+ qga_debug_end;
return hr;
}
/* Count QGA VSS provider in COM+ Application Catalog */
static HRESULT QGAProviderCount(ICatalogCollection *coll, int i, void *arg)
{
+ qga_debug_begin;
+
(*(int *)arg)++;
+
+ qga_debug_end;
return S_OK;
}
/* Remove QGA VSS provider from COM+ Application Catalog Collection */
static HRESULT QGAProviderRemove(ICatalogCollection *coll, int i, void *arg)
{
+ qga_debug_begin;
HRESULT hr;
- fprintf(stderr, "Removing COM+ Application: %s\n", QGA_PROVIDER_NAME);
+ qga_debug("Removing COM+ Application: %s", QGA_PROVIDER_NAME);
chk(coll->Remove(i));
out:
+ qga_debug_end;
return hr;
}
/* Unregister this module from COM+ Applications Catalog */
STDAPI COMUnregister(void)
{
+ qga_debug_begin;
+
HRESULT hr;
DllUnregisterServer();
chk(QGAProviderFind(QGAProviderRemove, NULL));
out:
+ qga_debug_end;
return hr;
}
/* Register this module to COM+ Applications Catalog */
STDAPI COMRegister(void)
{
+ qga_debug_begin;
+
HRESULT hr;
COMInitializer initializer;
COMPointer<IUnknown> pUnknown;
if (!g_hinstDll) {
errmsg(E_FAIL, "Failed to initialize DLL");
+ qga_debug_end;
return E_FAIL;
}
chk(QGAProviderFind(QGAProviderCount, (void *)&count));
if (count) {
errmsg(E_ABORT, "QGA VSS Provider is already installed");
+ qga_debug_end;
return E_ABORT;
}
}
strcpy(tlbPath, dllPath);
strcpy(tlbPath+n-3, "tlb");
- fprintf(stderr, "Registering " QGA_PROVIDER_NAME ":\n");
- fprintf(stderr, " %s\n", dllPath);
- fprintf(stderr, " %s\n", tlbPath);
+ qga_debug("Registering " QGA_PROVIDER_NAME ": %s %s",
+ dllPath, tlbPath);
if (!PathFileExists(tlbPath)) {
hr = HRESULT_FROM_WIN32(ERROR_FILE_NOT_FOUND);
errmsg(hr, "Failed to lookup tlb");
COMUnregister();
}
+ qga_debug_end;
return hr;
}
static BOOL CreateRegistryKey(LPCTSTR key, LPCTSTR value, LPCTSTR data)
{
+ qga_debug_begin;
+
HKEY hKey;
LONG ret;
DWORD size;
RegCloseKey(hKey);
out:
+ qga_debug_end;
if (ret != ERROR_SUCCESS) {
/* As we cannot printf within DllRegisterServer(), show a dialog. */
errmsg_dialog(ret, "Cannot add registry", key);
/* Register this dll as a VSS provider */
STDAPI DllRegisterServer(void)
{
+ qga_debug_begin;
+
COMInitializer initializer;
COMPointer<IVssAdmin> pVssAdmin;
HRESULT hr = E_FAIL;
DllUnregisterServer();
}
+ qga_debug_end;
return hr;
}
/* Unregister this VSS hardware provider from the system */
STDAPI DllUnregisterServer(void)
{
+ qga_debug_begin;
+
TCHAR key[256];
COMInitializer initializer;
COMPointer<IVssAdmin> pVssAdmin;
SHDeleteKey(HKEY_CLASSES_ROOT, key);
SHDeleteKey(HKEY_CLASSES_ROOT, g_szProgid);
+ qga_debug_end;
return S_OK; /* Uninstall should never fail */
}
}
if (mbstowcs(bstr, ascii, len) == (size_t)-1) {
- fprintf(stderr, "Failed to convert string '%s' into BSTR", ascii);
+ qga_debug("Failed to convert string '%s' into BSTR", ascii);
bstr[0] = 0;
}
return bstr;
/* Stop QGA VSS provider service using Winsvc API */
STDAPI StopService(void)
{
+ qga_debug_begin;
+
HRESULT hr = S_OK;
SC_HANDLE manager = OpenSCManager(NULL, NULL, SC_MANAGER_ALL_ACCESS);
SC_HANDLE service = NULL;
out:
CloseServiceHandle(service);
CloseServiceHandle(manager);
+ qga_debug_end;
return hr;
}
qga_vss = shared_module(
'qga-vss',
- ['requester.cpp', 'provider.cpp', 'install.cpp', genh],
+ ['requester.cpp', 'provider.cpp', 'install.cpp', 'vss-debug.cpp', genh],
name_prefix: '',
cpp_args: ['-Wno-unknown-pragmas', '-Wno-delete-non-virtual-dtor', '-Wno-non-virtual-dtor'],
link_args: link_args,
#include "qemu/osdep.h"
#include "vss-common.h"
+#include "vss-debug.h"
#ifdef HAVE_VSS_SDK
#include <vscoordint.h>
#else
EXTERN_C
BOOL WINAPI DllMain(HINSTANCE hinstDll, DWORD dwReason, LPVOID lpReserved)
{
+ qga_debug("begin, reason = %lu", dwReason);
if (dwReason == DLL_PROCESS_ATTACH) {
g_hinstDll = hinstDll;
DisableThreadLibraryCalls(hinstDll);
}
+ qga_debug_end;
return TRUE;
}
#include "qemu/osdep.h"
#include "vss-common.h"
+#include "vss-debug.h"
#include "requester.h"
#include "install.h"
#include <vswriter.h>
#define DEFAULT_VSS_BACKUP_TYPE VSS_BT_FULL
-#define err_set(e, err, fmt, ...) \
- ((e)->error_setg_win32_wrapper((e)->errp, __FILE__, __LINE__, __func__, \
- err, fmt, ## __VA_ARGS__))
+#define err_set(e, err, fmt, ...) { \
+ (e)->error_setg_win32_wrapper((e)->errp, __FILE__, __LINE__, __func__, \
+ err, fmt, ## __VA_ARGS__); \
+ qga_debug(fmt, ## __VA_ARGS__); \
+}
/* Bad idea, works only when (e)->errp != NULL: */
#define err_is_set(e) ((e)->errp && *(e)->errp)
/* To lift this restriction, error_propagate(), like we do in QEMU code */
STDAPI requester_init(void)
{
+ qga_debug_begin;
+
COMInitializer initializer; /* to call CoInitializeSecurity */
HRESULT hr = CoInitializeSecurity(
NULL, -1, NULL, NULL, RPC_C_AUTHN_LEVEL_PKT_PRIVACY,
RPC_C_IMP_LEVEL_IDENTIFY, NULL, EOAC_NONE, NULL);
if (FAILED(hr)) {
- fprintf(stderr, "failed to CoInitializeSecurity (error %lx)\n", hr);
+ qga_debug("failed to CoInitializeSecurity (error %lx)", hr);
return hr;
}
hLib = LoadLibraryA("VSSAPI.DLL");
if (!hLib) {
- fprintf(stderr, "failed to load VSSAPI.DLL\n");
+ qga_debug("failed to load VSSAPI.DLL");
return HRESULT_FROM_WIN32(GetLastError());
}
#endif
);
if (!pCreateVssBackupComponents) {
- fprintf(stderr, "failed to get proc address from VSSAPI.DLL\n");
+ qga_debug("failed to get proc address from VSSAPI.DLL");
return HRESULT_FROM_WIN32(GetLastError());
}
pVssFreeSnapshotProperties = (t_VssFreeSnapshotProperties)
GetProcAddress(hLib, "VssFreeSnapshotProperties");
if (!pVssFreeSnapshotProperties) {
- fprintf(stderr, "failed to get proc address from VSSAPI.DLL\n");
+ qga_debug("failed to get proc address from VSSAPI.DLL");
return HRESULT_FROM_WIN32(GetLastError());
}
+ qga_debug_end;
return S_OK;
}
static void requester_cleanup(void)
{
+ qga_debug_begin;
+
if (vss_ctx.hEventFrozen) {
CloseHandle(vss_ctx.hEventFrozen);
vss_ctx.hEventFrozen = NULL;
vss_ctx.pVssbc = NULL;
}
vss_ctx.cFrozenVols = 0;
+ qga_debug_end;
}
STDAPI requester_deinit(void)
{
+ qga_debug_begin;
+
requester_cleanup();
pCreateVssBackupComponents = NULL;
hLib = NULL;
}
+ qga_debug_end;
return S_OK;
}
static HRESULT WaitForAsync(IVssAsync *pAsync)
{
+ qga_debug_begin;
+
HRESULT ret, hr;
do {
}
} while (ret == VSS_S_ASYNC_PENDING);
+ qga_debug_end;
return ret;
}
static void AddComponents(ErrorSet *errset)
{
+ qga_debug_begin;
+
unsigned int cWriters, i;
VSS_ID id, idInstance, idWriter;
BSTR bstrWriterName = NULL;
if (pComponent && info) {
pComponent->FreeComponentInfo(info);
}
+ qga_debug_end;
}
DWORD get_reg_dword_value(HKEY baseKey, LPCSTR subKey, LPCSTR valueName,
DWORD defaultData)
{
+ qga_debug_begin;
+
DWORD regGetValueError;
DWORD dwordData;
DWORD dataSize = sizeof(DWORD);
regGetValueError = RegGetValue(baseKey, subKey, valueName, RRF_RT_DWORD,
NULL, &dwordData, &dataSize);
+ qga_debug_end;
if (regGetValueError != ERROR_SUCCESS) {
return defaultData;
}
VSS_BACKUP_TYPE get_vss_backup_type(
VSS_BACKUP_TYPE defaultVssBT = DEFAULT_VSS_BACKUP_TYPE)
{
+ qga_debug_begin;
+
VSS_BACKUP_TYPE vssBackupType;
vssBackupType = static_cast<VSS_BACKUP_TYPE>(
QGA_PROVIDER_REGISTRY_ADDRESS,
"VssOption",
defaultVssBT));
+ qga_debug_end;
if (!is_valid_vss_backup_type(vssBackupType)) {
return defaultVssBT;
}
void requester_freeze(int *num_vols, void *mountpoints, ErrorSet *errset)
{
+ qga_debug_begin;
+
COMPointer<IVssAsync> pAsync;
HANDLE volume;
HRESULT hr;
if (vss_ctx.pVssbc) { /* already frozen */
*num_vols = 0;
+ qga_debug("finished, already frozen");
return;
}
}
}
+ qga_debug("preparing for backup");
hr = vss_ctx.pVssbc->PrepareForBackup(pAsync.replace());
if (SUCCEEDED(hr)) {
hr = WaitForAsync(pAsync);
* CQGAVssProvider::CommitSnapshots will kick vss_ctx.hEventFrozen
* after the applications and filesystems are frozen.
*/
+ qga_debug("do snapshot set");
hr = vss_ctx.pVssbc->DoSnapshotSet(&vss_ctx.pAsyncSnapshot);
if (FAILED(hr)) {
err_set(errset, hr, "failed to do snapshot set");
*num_vols = vss_ctx.cFrozenVols = num_fixed_drives;
}
+ qga_debug("end successful");
return;
out:
out1:
requester_cleanup();
CoUninitialize();
+
+ qga_debug_end;
}
void requester_thaw(int *num_vols, void *mountpints, ErrorSet *errset)
{
+ qga_debug_begin;
COMPointer<IVssAsync> pAsync;
if (!vss_ctx.hEventThaw) {
* and no volumes must be frozen. We return without an error.
*/
*num_vols = 0;
+ qga_debug("finished, no volumes were frozen");
+
return;
}
CoUninitialize();
StopService();
+
+ qga_debug_end;
}
--- /dev/null
+/*
+ * QEMU Guest Agent VSS debug declarations
+ *
+ * Copyright (C) 2023 Red Hat Inc
+ *
+ * Authors:
+ * Konstantin Kostiuk <kkostiuk@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#include "qemu/osdep.h"
+#include "vss-debug.h"
+#include "vss-common.h"
+
+void qga_debug_internal(const char *funcname, const char *fmt, ...)
+{
+ char user_string[512] = {0};
+ char full_string[640] = {0};
+
+ va_list args;
+ va_start(args, fmt);
+ if (vsnprintf(user_string, _countof(user_string), fmt, args) <= 0) {
+ va_end(args);
+ return;
+ }
+
+ va_end(args);
+
+ if (snprintf(full_string, _countof(full_string),
+ QGA_PROVIDER_NAME "[%lu]: %s %s\n",
+ GetCurrentThreadId(), funcname, user_string) <= 0) {
+ return;
+ }
+
+ OutputDebugString(full_string);
+ fputs(full_string, stderr);
+}
--- /dev/null
+/*
+ * QEMU Guest Agent VSS debug declarations
+ *
+ * Copyright (C) 2023 Red Hat Inc
+ *
+ * Authors:
+ * Konstantin Kostiuk <kkostiuk@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#include "qemu/osdep.h"
+#include <vss-handles.h>
+
+#ifndef VSS_DEBUG_H
+#define VSS_DEBUG_H
+
+void qga_debug_internal(const char *funcname, const char *fmt, ...) G_GNUC_PRINTF(2, 3);
+
+#define qga_debug(fmt, ...) qga_debug_internal(__func__, fmt, ## __VA_ARGS__)
+#define qga_debug_begin qga_debug("begin")
+#define qga_debug_end qga_debug("end")
+
+#endif
-Subproject commit 6b5188ca14e59ce7bf71afe4e7d3d557c3d31bf8
+Subproject commit 057eb10b6d523540012e6947d5c9f63e95244e94
printf "%s\n" ' oss OSS sound support'
printf "%s\n" ' pa PulseAudio sound support'
printf "%s\n" ' parallels parallels image format support'
- printf "%s\n" ' pipewire Pipewire sound support'
+ printf "%s\n" ' pipewire PipeWire sound support'
printf "%s\n" ' png PNG support with libpng'
printf "%s\n" ' pvrdma Enable PVRDMA support'
printf "%s\n" ' qcow1 qcow1 image format support'
* so a userfault will trigger.
*/
#ifdef CONFIG_FALLOCATE_PUNCH_HOLE
+ /*
+ * We'll discard data from the actual file, even though we only
+ * have a MAP_PRIVATE mapping, possibly messing with other
+ * MAP_PRIVATE/MAP_SHARED mappings. There is no easy way to
+ * change that behavior whithout violating the promised
+ * semantics of ram_block_discard_range().
+ *
+ * Only warn, because it works as long as nobody else uses that
+ * file.
+ */
+ if (!qemu_ram_is_shared(rb)) {
+ warn_report_once("ram_block_discard_range: Discarding RAM"
+ " in private file mappings is possibly"
+ " dangerous, because it will modify the"
+ " underlying file and will affect other"
+ " users of the file");
+ }
+
ret = fallocate(rb->fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
start, length);
if (ret) {
stub_ss.add(files('semihost.c'))
stub_ss.add(files('usb-dev-stub.c'))
stub_ss.add(files('xen-hw-stub.c'))
+ stub_ss.add(files('virtio-md-pci.c'))
else
stub_ss.add(files('qdev.c'))
endif
--- /dev/null
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "hw/virtio/virtio-md-pci.h"
+
+void virtio_md_pci_pre_plug(VirtIOMDPCI *vmd, MachineState *ms, Error **errp)
+{
+ error_setg(errp, "virtio based memory devices not supported");
+}
+
+void virtio_md_pci_plug(VirtIOMDPCI *vmd, MachineState *ms, Error **errp)
+{
+ error_setg(errp, "virtio based memory devices not supported");
+}
+
+void virtio_md_pci_unplug_request(VirtIOMDPCI *vmd, MachineState *ms,
+ Error **errp)
+{
+ error_setg(errp, "virtio based memory devices not supported");
+}
+
+void virtio_md_pci_unplug(VirtIOMDPCI *vmd, MachineState *ms, Error **errp)
+{
+ error_setg(errp, "virtio based memory devices not supported");
+}
#endif
typedef struct S1Translate {
+ /*
+ * in_mmu_idx : specifies which TTBR, TCR, etc to use for the walk.
+ * Together with in_space, specifies the architectural translation regime.
+ */
ARMMMUIdx in_mmu_idx;
+ /*
+ * in_ptw_idx: specifies which mmuidx to use for the actual
+ * page table descriptor load operations. This will be one of the
+ * ARMMMUIdx_Stage2* or one of the ARMMMUIdx_Phys_* indexes.
+ * If a Secure ptw is "downgraded" to NonSecure by an NSTable bit,
+ * this field is updated accordingly.
+ */
ARMMMUIdx in_ptw_idx;
+ /*
+ * in_space: the security space for this walk. This plus
+ * the in_mmu_idx specify the architectural translation regime.
+ * If a Secure ptw is "downgraded" to NonSecure by an NSTable bit,
+ * this field is updated accordingly.
+ *
+ * Note that the security space for the in_ptw_idx may be different
+ * from that for the in_mmu_idx. We do not need to explicitly track
+ * the in_ptw_idx security space because:
+ * - if the in_ptw_idx is an ARMMMUIdx_Phys_* then the mmuidx
+ * itself specifies the security space
+ * - if the in_ptw_idx is an ARMMMUIdx_Stage2* then the security
+ * space used for ptw reads is the same as that of the security
+ * space of the stage 1 translation for all cases except where
+ * stage 1 is Secure; in that case the only possibilities for
+ * the ptw read are Secure and NonSecure, and the in_ptw_idx
+ * value being Stage2 vs Stage2_S distinguishes those.
+ */
ARMSecuritySpace in_space;
+ /*
+ * in_secure: whether the translation regime is a Secure one.
+ * This is always equal to arm_space_is_secure(in_space).
+ * If a Secure ptw is "downgraded" to NonSecure by an NSTable bit,
+ * this field is updated accordingly.
+ */
bool in_secure;
+ /*
+ * in_debug: is this a QEMU debug access (gdbstub, etc)? Debug
+ * accesses will not update the guest page table access flags
+ * and will not change the state of the softmmu TLBs.
+ */
bool in_debug;
/*
* If this is stage 2 of a stage 1+2 page table walk, then this must
}
}
+static ARMSecuritySpace S2_security_space(ARMSecuritySpace s1_space,
+ ARMMMUIdx s2_mmu_idx)
+{
+ /*
+ * Return the security space to use for stage 2 when doing
+ * the S1 page table descriptor load.
+ */
+ if (regime_is_stage2(s2_mmu_idx)) {
+ /*
+ * The security space for ptw reads is almost always the same
+ * as that of the security space of the stage 1 translation.
+ * The only exception is when stage 1 is Secure; in that case
+ * the ptw read might be to the Secure or the NonSecure space
+ * (but never Realm or Root), and the s2_mmu_idx tells us which.
+ * Root translations are always single-stage.
+ */
+ if (s1_space == ARMSS_Secure) {
+ return arm_secure_to_space(s2_mmu_idx == ARMMMUIdx_Stage2_S);
+ } else {
+ assert(s2_mmu_idx != ARMMMUIdx_Stage2_S);
+ assert(s1_space != ARMSS_Root);
+ return s1_space;
+ }
+ } else {
+ /* ptw loads are from phys: the mmu idx itself says which space */
+ return arm_phys_to_space(s2_mmu_idx);
+ }
+}
+
/* Translate a S1 pagetable walk through S2 if needed. */
static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw,
hwaddr addr, ARMMMUFaultInfo *fi)
{
- ARMSecuritySpace space = ptw->in_space;
bool is_secure = ptw->in_secure;
ARMMMUIdx mmu_idx = ptw->in_mmu_idx;
ARMMMUIdx s2_mmu_idx = ptw->in_ptw_idx;
* From gdbstub, do not use softmmu so that we don't modify the
* state of the cpu at all, including softmmu tlb contents.
*/
+ ARMSecuritySpace s2_space = S2_security_space(ptw->in_space, s2_mmu_idx);
S1Translate s2ptw = {
.in_mmu_idx = s2_mmu_idx,
.in_ptw_idx = ptw_idx_for_stage_2(env, s2_mmu_idx),
- .in_secure = s2_mmu_idx == ARMMMUIdx_Stage2_S,
- .in_space = (s2_mmu_idx == ARMMMUIdx_Stage2_S ? ARMSS_Secure
- : space == ARMSS_Realm ? ARMSS_Realm
- : ARMSS_NonSecure),
+ .in_secure = arm_space_is_secure(s2_space),
+ .in_space = s2_space,
.in_debug = true,
};
GetPhysAddrResult s2 = { };
hwaddr ipa;
int s1_prot, s1_lgpgsz;
bool is_secure = ptw->in_secure;
+ ARMSecuritySpace in_space = ptw->in_space;
bool ret, ipa_secure;
ARMCacheAttrs cacheattrs1;
ARMSecuritySpace ipa_space;
* Check if IPA translates to secure or non-secure PA space.
* Note that VSTCR overrides VTCR and {N}SW overrides {N}SA.
*/
- result->f.attrs.secure =
- (is_secure
- && !(env->cp15.vstcr_el2 & (VSTCR_SA | VSTCR_SW))
- && (ipa_secure
- || !(env->cp15.vtcr_el2 & (VTCR_NSA | VTCR_NSW))));
+ if (in_space == ARMSS_Secure) {
+ result->f.attrs.secure =
+ !(env->cp15.vstcr_el2 & (VSTCR_SA | VSTCR_SW))
+ && (ipa_secure
+ || !(env->cp15.vtcr_el2 & (VTCR_NSA | VTCR_NSW)));
+ result->f.attrs.space = arm_secure_to_space(result->f.attrs.secure);
+ }
return false;
}
uint32_t level, uint32_t want)
{
#ifdef CONFIG_USER_ONLY
- return (page_check_range(addr, 1, want) == 0) ? 1 : 0;
+ return page_check_range(addr, 1, want);
#else
int prot, excp;
hwaddr phys;
.insn_flags = CPU_MIPS32R1,
.mmu_type = MMU_TYPE_R4000,
},
+ {
+ .name = "XBurstR1",
+ .CP0_PRid = 0x1ed0024f,
+ .CP0_Config0 = MIPS_CONFIG0 | (MMU_TYPE_R4000 << CP0C0_MT),
+ .CP0_Config1 = MIPS_CONFIG1 | (15 << CP0C1_MMU) |
+ (0 << CP0C1_IS) | (3 << CP0C1_IL) | (1 << CP0C1_IA) |
+ (0 << CP0C1_DS) | (3 << CP0C1_DL) | (1 << CP0C1_DA) |
+ (0 << CP0C1_CA),
+ .CP0_Config2 = MIPS_CONFIG2,
+ .CP0_Config3 = MIPS_CONFIG3,
+ .CP0_LLAddr_rw_bitmask = 0,
+ .CP0_LLAddr_shift = 4,
+ .SYNCI_Step = 32,
+ .CCRes = 2,
+ .CP0_Status_rw_bitmask = 0x1278FF17,
+ .SEGBITS = 32,
+ .PABITS = 32,
+ .insn_flags = CPU_MIPS32R1 | ASE_MXU,
+ .mmu_type = MMU_TYPE_R4000,
+ },
{
.name = "4KEmR1",
.CP0_PRid = 0x00018500,
.insn_flags = CPU_MIPS32R2 | ASE_MIPS16 | ASE_DSP | ASE_DSP_R2,
.mmu_type = MMU_TYPE_R4000,
},
+ {
+ .name = "XBurstR2",
+ .CP0_PRid = 0x2ed1024f,
+ .CP0_Config0 = MIPS_CONFIG0 | (0x1 << CP0C0_AR) |
+ (MMU_TYPE_R4000 << CP0C0_MT),
+ .CP0_Config1 = MIPS_CONFIG1 | (1 << CP0C1_FP) | (15 << CP0C1_MMU) |
+ (0 << CP0C1_IS) | (3 << CP0C1_IL) | (1 << CP0C1_IA) |
+ (0 << CP0C1_DS) | (3 << CP0C1_DL) | (1 << CP0C1_DA) |
+ (1 << CP0C1_CA),
+ .CP0_Config2 = MIPS_CONFIG2,
+ .CP0_Config3 = MIPS_CONFIG3 | (1 << CP0C3_DSP2P) | (1 << CP0C3_DSPP) |
+ (1 << CP0C3_VInt),
+ .CP0_LLAddr_rw_bitmask = 0,
+ .CP0_LLAddr_shift = 4,
+ .SYNCI_Step = 32,
+ .CCRes = 2,
+ .CP0_Status_rw_bitmask = 0x3778FF1F,
+ .CP1_fcr0 = (1 << FCR0_F64) | (1 << FCR0_L) | (1 << FCR0_W) |
+ (1 << FCR0_D) | (1 << FCR0_S) | (0x93 << FCR0_PRID),
+ .CP1_fcr31 = 0,
+ .CP1_fcr31_rw_bitmask = 0xFF83FFFF,
+ .SEGBITS = 32,
+ .PABITS = 32,
+ .insn_flags = CPU_MIPS32R2 | ASE_MXU,
+ .mmu_type = MMU_TYPE_R4000,
+ },
{
.name = "M14K",
.CP0_PRid = 0x00019b00,
.CP0_Config4 = MIPS_CONFIG4 | (1U << CP0C4_M) | (3 << CP0C4_IE) |
(1 << CP0C4_AE) | (0xfc << CP0C4_KScrExist),
.CP0_Config5 = MIPS_CONFIG5 | (1 << CP0C5_XNP) | (1 << CP0C5_VP) |
- (1 << CP0C5_LLB) | (1 << CP0C5_MRP),
+ (1 << CP0C5_LLB) | (1 << CP0C5_MRP) | (3 << CP0C5_GI),
.CP0_Config5_rw_bitmask = (1 << CP0C5_MSAEn) | (1 << CP0C5_SBRI) |
(1 << CP0C5_FRE) | (1 << CP0C5_UFE),
.CP0_LLAddr_rw_bitmask = 0,
.CP0_Config4 = MIPS_CONFIG4 | (1U << CP0C4_M) | (3 << CP0C4_IE) |
(1 << CP0C4_AE) | (0xfc << CP0C4_KScrExist),
.CP0_Config5 = MIPS_CONFIG5 | (1 << CP0C5_XNP) | (1 << CP0C5_VP) |
- (1 << CP0C5_LLB) | (1 << CP0C5_MRP),
+ (1 << CP0C5_LLB) | (1 << CP0C5_MRP) | (3 << CP0C5_GI),
.CP0_Config5_rw_bitmask = (1 << CP0C5_MSAEn) | (1 << CP0C5_SBRI) |
(1 << CP0C5_FRE) | (1 << CP0C5_UFE),
.CP0_LLAddr_rw_bitmask = 0,
.CP1_fcr31 = 0,
.CP1_fcr31_rw_bitmask = 0xFF83FFFF,
.MSAIR = (0x01 << MSAIR_ProcID) | (0x40 << MSAIR_Rev),
+ .lcsr_cpucfg1 = (1 << CPUCFG1_FP) | (2 << CPUCFG1_FPREV) |
+ (1 << CPUCFG1_MSA1) | (1 << CPUCFG1_LSLDR0) |
+ (1 << CPUCFG1_LSPERF) | (1 << CPUCFG1_LSPERFX) |
+ (1 << CPUCFG1_LSSYNCI) | (1 << CPUCFG1_LLEXC) |
+ (1 << CPUCFG1_SCRAND) | (1 << CPUCFG1_MUALP) |
+ (1 << CPUCFG1_KMUALEN) | (1 << CPUCFG1_ITLBT) |
+ (1 << CPUCFG1_SFBP) | (1 << CPUCFG1_CDMAP),
+ .lcsr_cpucfg2 = (1 << CPUCFG2_LEXT1) | (1 << CPUCFG2_LCSRP) |
+ (1 << CPUCFG2_LDISBLIKELY),
.SEGBITS = 48,
.PABITS = 48,
.insn_flags = CPU_MIPS64R2 | INSN_LOONGSON3A |
env->CP0_PageGrain_rw_bitmask = env->cpu_model->CP0_PageGrain_rw_bitmask;
env->CP0_PageGrain = env->cpu_model->CP0_PageGrain;
env->CP0_EBaseWG_rw_bitmask = env->cpu_model->CP0_EBaseWG_rw_bitmask;
+ env->lcsr_cpucfg1 = env->cpu_model->lcsr_cpucfg1;
+ env->lcsr_cpucfg2 = env->cpu_model->lcsr_cpucfg2;
env->active_fpu.fcr0 = env->cpu_model->CP1_fcr0;
env->active_fpu.fcr31_rw_bitmask = env->cpu_model->CP1_fcr31_rw_bitmask;
env->active_fpu.fcr31 = env->cpu_model->CP1_fcr31;
{
CPUMIPSState *env = &cpu->env;
- env->cp0_count_ns = clock_ticks_to_ns(MIPS_CPU(cpu)->clock,
- env->cpu_model->CCRes);
- assert(env->cp0_count_ns);
+ clock_set_mul_div(cpu->count_div, env->cpu_model->CCRes, 1);
+ clock_set_source(cpu->count_div, cpu->clock);
+ clock_set_source(env->count_clock, cpu->count_div);
}
static void mips_cpu_realizefn(DeviceState *dev, Error **errp)
cpu_set_cpustate_pointers(cpu);
cpu->clock = qdev_init_clock_in(DEVICE(obj), "clk-in", NULL, cpu, 0);
+ cpu->count_div = clock_new(OBJECT(obj), "clk-div-count");
+ env->count_clock = clock_new(OBJECT(obj), "clk-count");
env->cpu_model = mcc->cpu_def;
+#ifndef CONFIG_USER_ONLY
+ if (mcc->cpu_def->lcsr_cpucfg2 & (1 << CPUCFG2_LCSRP)) {
+ memory_region_init_io(&env->iocsr.mr, OBJECT(cpu), NULL,
+ env, "iocsr", UINT64_MAX);
+ address_space_init(&env->iocsr.as,
+ &env->iocsr.mr, "IOCSR");
+ }
+#endif
}
static char *mips_cpu_type_name(const char *cpu_model)
#include "cpu-qom.h"
#include "exec/cpu-defs.h"
+#ifndef CONFIG_USER_ONLY
+#include "exec/memory.h"
+#endif
#include "fpu/softfloat-types.h"
#include "hw/clock.h"
#include "mips-defs.h"
*/
int32_t CP0_DESAVE;
target_ulong CP0_KScratch[MIPS_KSCRATCH_NUM];
+/*
+ * Loongson CSR CPUCFG registers
+ */
+ uint32_t lcsr_cpucfg1;
+#define CPUCFG1_FP 0
+#define CPUCFG1_FPREV 1
+#define CPUCFG1_MMI 4
+#define CPUCFG1_MSA1 5
+#define CPUCFG1_MSA2 6
+#define CPUCFG1_LSLDR0 16
+#define CPUCFG1_LSPERF 17
+#define CPUCFG1_LSPERFX 18
+#define CPUCFG1_LSSYNCI 19
+#define CPUCFG1_LLEXC 20
+#define CPUCFG1_SCRAND 21
+#define CPUCFG1_MUALP 25
+#define CPUCFG1_KMUALEN 26
+#define CPUCFG1_ITLBT 27
+#define CPUCFG1_SFBP 29
+#define CPUCFG1_CDMAP 30
+ uint32_t lcsr_cpucfg2;
+#define CPUCFG2_LEXT1 0
+#define CPUCFG2_LEXT2 1
+#define CPUCFG2_LEXT3 2
+#define CPUCFG2_LSPW 3
+#define CPUCFG2_LCSRP 27
+#define CPUCFG2_LDISBLIKELY 28
/* We waste some space so we can handle shadow registers like TCs. */
TCState tcs[MIPS_SHADOW_SET_MAX];
void *irq[8];
struct MIPSITUState *itu;
MemoryRegion *itc_tag; /* ITC Configuration Tags */
+
+ /* Loongson IOCSR memory */
+ struct {
+ AddressSpace as;
+ MemoryRegion mr;
+ } iocsr;
#endif
const mips_def_t *cpu_model;
QEMUTimer *timer; /* Internal timer */
+ Clock *count_clock; /* CP0_Count clock */
target_ulong exception_base; /* ExceptionBase input to the core */
- uint64_t cp0_count_ns; /* CP0_Count clock period (in nanoseconds) */
} CPUMIPSState;
/**
/*< public >*/
Clock *clock;
+ Clock *count_div; /* Divider for CP0_Count clock */
CPUNegativeOffsetState neg;
CPUMIPSState env;
};
return env->CP0_Config3 & (1 << CP0C3_MSAP);
}
+/* Check presence of Loongson CSR instructions */
+static inline bool ase_lcsr_available(CPUMIPSState *env)
+{
+ return env->lcsr_cpucfg2 & (1 << CPUCFG2_LCSRP);
+}
+
/* Check presence of multi-threading ASE implementation */
static inline bool ase_mt_available(CPUMIPSState *env)
{
DEF_HELPER_2(pmon, void, env, int)
DEF_HELPER_1(wait, void, env)
+#ifdef TARGET_MIPS64
+DEF_HELPER_FLAGS_2(lcsr_cpucfg, TCG_CALL_NO_RWG_SE, tl, env, tl)
+#endif
+
/* Loongson multimedia functions. */
DEF_HELPER_FLAGS_2(paddsh, TCG_CALL_NO_RWG_SE, i64, i64, i64)
DEF_HELPER_FLAGS_2(paddush, TCG_CALL_NO_RWG_SE, i64, i64, i64)
int32_t CP0_PageGrain_rw_bitmask;
int32_t CP0_PageGrain;
target_ulong CP0_EBaseWG_rw_bitmask;
+ uint32_t lcsr_cpucfg1;
+ uint32_t lcsr_cpucfg2;
uint64_t insn_flags;
enum mips_mmu_types mmu_type;
int32_t SAARP;
#include "internal.h"
/* MIPS R4K timer */
+static uint32_t cpu_mips_get_count_val(CPUMIPSState *env)
+{
+ int64_t now_ns;
+ now_ns = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
+ return env->CP0_Count +
+ (uint32_t)clock_ns_to_ticks(env->count_clock, now_ns);
+}
+
static void cpu_mips_timer_update(CPUMIPSState *env)
{
uint64_t now_ns, next_ns;
uint32_t wait;
now_ns = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
- wait = env->CP0_Compare - env->CP0_Count -
- (uint32_t)(now_ns / env->cp0_count_ns);
- next_ns = now_ns + (uint64_t)wait * env->cp0_count_ns;
+ wait = env->CP0_Compare - cpu_mips_get_count_val(env);
+ /* Clamp interval to overflow if virtual time had not progressed */
+ if (!wait) {
+ wait = UINT32_MAX;
+ }
+ next_ns = now_ns + clock_ticks_to_ns(env->count_clock, wait);
timer_mod(env->timer, next_ns);
}
cpu_mips_timer_expire(env);
}
- return env->CP0_Count + (uint32_t)(now_ns / env->cp0_count_ns);
+ return cpu_mips_get_count_val(env);
}
}
env->CP0_Count = count;
} else {
/* Store new count register */
- env->CP0_Count = count -
- (uint32_t)(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) /
- env->cp0_count_ns);
+ env->CP0_Count = count - (uint32_t)clock_ns_to_ticks(env->count_clock,
+ qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL));
/* Update timer timer */
cpu_mips_timer_update(env);
}
void cpu_mips_stop_count(CPUMIPSState *env)
{
/* Store the current value */
- env->CP0_Count += (uint32_t)(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) /
- env->cp0_count_ns);
+ env->CP0_Count += (uint32_t)clock_ns_to_ticks(env->count_clock,
+ qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL));
}
static void mips_timer_cb(void *opaque)
return;
}
- /*
- * ??? This callback should occur when the counter is exactly equal to
- * the comparator value. Offset the count by one to avoid immediately
- * retriggering the callback before any virtual time has passed.
- */
- env->CP0_Count++;
cpu_mips_timer_expire(env);
- env->CP0_Count--;
}
void cpu_mips_clock_init(MIPSCPU *cpu)
--- /dev/null
+# Loongson CSR instructions
+#
+# Copyright (C) 2023 Jiaxun Yang <jiaxun.yang@flygoat.com>
+#
+# SPDX-License-Identifier: LGPL-2.1-or-later
+#
+
+&r rs rt rd sa
+
+@rs_rd ...... rs:5 ..... rd:5 ..... ...... &r rt=0 sa=0
+
+CPUCFG 110010 ..... 01000 ..... 00100 011000 @rs_rd
+
+RDCSR 110010 ..... 00000 ..... 00100 011000 @rs_rd
+WRCSR 110010 ..... 00001 ..... 00100 011000 @rs_rd
+DRDCSR 110010 ..... 00010 ..... 00100 011000 @rs_rd
+DWRCSR 110010 ..... 00011 ..... 00100 011000 @rs_rd
--- /dev/null
+/*
+ * Loongson CSR instructions translation routines
+ *
+ * Copyright (c) 2023 Jiaxun Yang <jiaxun.yang@flygoat.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "tcg/tcg-op.h"
+#include "tcg/tcg-op-gvec.h"
+#include "exec/helper-gen.h"
+#include "translate.h"
+
+/* Include the auto-generated decoder. */
+#include "decode-lcsr.c.inc"
+
+static bool trans_CPUCFG(DisasContext *ctx, arg_CPUCFG *a)
+{
+ TCGv dest = tcg_temp_new();
+ TCGv src1 = tcg_temp_new();
+
+ gen_load_gpr(src1, a->rs);
+ gen_helper_lcsr_cpucfg(dest, cpu_env, src1);
+ gen_store_gpr(dest, a->rd);
+
+ return true;
+}
+
+#ifndef CONFIG_USER_ONLY
+static bool gen_rdcsr(DisasContext *ctx, arg_r *a,
+ void (*func)(TCGv, TCGv_ptr, TCGv))
+{
+ TCGv dest = tcg_temp_new();
+ TCGv src1 = tcg_temp_new();
+
+ check_cp0_enabled(ctx);
+ gen_load_gpr(src1, a->rs);
+ func(dest, cpu_env, src1);
+ gen_store_gpr(dest, a->rd);
+
+ return true;
+}
+
+static bool gen_wrcsr(DisasContext *ctx, arg_r *a,
+ void (*func)(TCGv_ptr, TCGv, TCGv))
+{
+ TCGv val = tcg_temp_new();
+ TCGv addr = tcg_temp_new();
+
+ check_cp0_enabled(ctx);
+ gen_load_gpr(addr, a->rs);
+ gen_load_gpr(val, a->rd);
+ func(cpu_env, addr, val);
+
+ return true;
+}
+
+TRANS(RDCSR, gen_rdcsr, gen_helper_lcsr_rdcsr)
+TRANS(DRDCSR, gen_rdcsr, gen_helper_lcsr_drdcsr)
+TRANS(WRCSR, gen_wrcsr, gen_helper_lcsr_wrcsr)
+TRANS(DWRCSR, gen_wrcsr, gen_helper_lcsr_dwrcsr)
+#else
+#define GEN_FALSE_TRANS(name) \
+static bool trans_##name(DisasContext *ctx, arg_##name * a) \
+{ \
+ return false; \
+}
+
+GEN_FALSE_TRANS(RDCSR)
+GEN_FALSE_TRANS(DRDCSR)
+GEN_FALSE_TRANS(WRCSR)
+GEN_FALSE_TRANS(DWRCSR)
+#endif
decodetree.process('tx79.decode', extra_args: '--static-decode=decode_tx79'),
decodetree.process('vr54xx.decode', extra_args: '--decode=decode_ext_vr54xx'),
decodetree.process('octeon.decode', extra_args: '--decode=decode_ext_octeon'),
+ decodetree.process('lcsr.decode', extra_args: '--decode=decode_ase_lcsr'),
]
mips_ss.add(gen)
mips_ss.add(when: 'TARGET_MIPS64', if_true: files(
'tx79_translate.c',
'octeon_translate.c',
+ 'lcsr_translate.c',
), if_false: files(
'mxu_translate.c',
))
* ├─ 001100 ─ OPC_MXU_D16MADL
* ├─ 001101 ─ OPC_MXU_S16MAD
* ├─ 001110 ─ OPC_MXU_Q16ADD
- * ├─ 001111 ─ OPC_MXU_D16MACE 23
+ * ├─ 001111 ─ OPC_MXU_D16MACE 20 (13..10 don't care)
* │ ┌─ 0 ─ OPC_MXU_S32LDD
* ├─ 010000 ─ OPC_MXU__POOL04 ─┴─ 1 ─ OPC_MXU_S32LDDR
* │
- * │ 23
+ * │ 20 (13..10 don't care)
* ├─ 010001 ─ OPC_MXU__POOL05 ─┬─ 0 ─ OPC_MXU_S32STD
* │ └─ 1 ─ OPC_MXU_S32STDR
* │
* ├─ 010011 ─ OPC_MXU__POOL07 ─┬─ 0000 ─ OPC_MXU_S32STDV
* │ └─ 0001 ─ OPC_MXU_S32STDVR
* │
- * │ 23
+ * │ 20 (13..10 don't care)
* ├─ 010100 ─ OPC_MXU__POOL08 ─┬─ 0 ─ OPC_MXU_S32LDI
* │ └─ 1 ─ OPC_MXU_S32LDIR
* │
- * │ 23
+ * │ 20 (13..10 don't care)
* ├─ 010101 ─ OPC_MXU__POOL09 ─┬─ 0 ─ OPC_MXU_S32SDI
* │ └─ 1 ─ OPC_MXU_S32SDIR
* │
* │ 13..10
* ├─ 010111 ─ OPC_MXU__POOL11 ─┬─ 0000 ─ OPC_MXU_S32SDIV
* │ └─ 0001 ─ OPC_MXU_S32SDIVR
- * ├─ 011000 ─ OPC_MXU_D32ADD
+ * ├─ 011000 ─ OPC_MXU_D32ADD (catches D32ADDC too)
* │ 23..22
* MXU ├─ 011001 ─ OPC_MXU__POOL12 ─┬─ 00 ─ OPC_MXU_D32ACC
* opcodes ─┤ ├─ 01 ─ OPC_MXU_D32ACCM
* │ 23..22
* ├─ 011011 ─ OPC_MXU__POOL13 ─┬─ 00 ─ OPC_MXU_Q16ACC
* │ ├─ 01 ─ OPC_MXU_Q16ACCM
- * │ └─ 10 ─ OPC_MXU_Q16ASUM
+ * │ └─ 10 ─ OPC_MXU_D16ASUM
* │
* │ 23..22
* ├─ 011100 ─ OPC_MXU__POOL14 ─┬─ 00 ─ OPC_MXU_Q8ADDE
* ├─ 100010 ─ OPC_MXU_S8LDD
* ├─ 100011 ─ OPC_MXU_S8STD 15..14
* ├─ 100100 ─ OPC_MXU_S8LDI ┌─ 00 ─ OPC_MXU_S32MUL
- * ├─ 100101 ─ OPC_MXU_S8SDI ├─ 00 ─ OPC_MXU_S32MULU
- * │ ├─ 00 ─ OPC_MXU_S32EXTR
- * ├─ 100110 ─ OPC_MXU__POOL15 ─┴─ 00 ─ OPC_MXU_S32EXTRV
+ * ├─ 100101 ─ OPC_MXU_S8SDI ├─ 01 ─ OPC_MXU_S32MULU
+ * │ ├─ 10 ─ OPC_MXU_S32EXTR
+ * ├─ 100110 ─ OPC_MXU__POOL15 ─┴─ 11 ─ OPC_MXU_S32EXTRV
* │
* │ 20..18
* ├─ 100111 ─ OPC_MXU__POOL16 ─┬─ 000 ─ OPC_MXU_D32SARW
* │ ├─ 110 ─ OPC_MXU_S32OR
* │ └─ 111 ─ OPC_MXU_S32XOR
* │
- * │ 7..5
+ * │ 8..6
* ├─ 101000 ─ OPC_MXU__POOL17 ─┬─ 000 ─ OPC_MXU_LXB
* │ ├─ 001 ─ OPC_MXU_LXH
* ├─ 101001 ─ <not assigned> ├─ 011 ─ OPC_MXU_LXW
* ├─ 110001 ─ OPC_MXU_D32SLR 20..18
* ├─ 110010 ─ OPC_MXU_D32SARL ┌─ 000 ─ OPC_MXU_D32SLLV
* ├─ 110011 ─ OPC_MXU_D32SAR ├─ 001 ─ OPC_MXU_D32SLRV
- * ├─ 110100 ─ OPC_MXU_Q16SLL ├─ 010 ─ OPC_MXU_D32SARV
- * ├─ 110101 ─ OPC_MXU_Q16SLR ├─ 011 ─ OPC_MXU_Q16SLLV
- * │ ├─ 100 ─ OPC_MXU_Q16SLRV
- * ├─ 110110 ─ OPC_MXU__POOL18 ─┴─ 101 ─ OPC_MXU_Q16SARV
+ * ├─ 110100 ─ OPC_MXU_Q16SLL ├─ 011 ─ OPC_MXU_D32SARV
+ * ├─ 110101 ─ OPC_MXU_Q16SLR ├─ 100 ─ OPC_MXU_Q16SLLV
+ * │ ├─ 101 ─ OPC_MXU_Q16SLRV
+ * ├─ 110110 ─ OPC_MXU__POOL18 ─┴─ 111 ─ OPC_MXU_Q16SARV
* │
* ├─ 110111 ─ OPC_MXU_Q16SAR
* │ 23..22
* ├─ 111000 ─ OPC_MXU__POOL19 ─┬─ 00 ─ OPC_MXU_Q8MUL
- * │ └─ 01 ─ OPC_MXU_Q8MULSU
+ * │ └─ 10 ─ OPC_MXU_Q8MULSU
* │
* │ 20..18
* ├─ 111001 ─ OPC_MXU__POOL20 ─┬─ 000 ─ OPC_MXU_Q8MOVZ
*/
enum {
+ OPC_MXU_S32MADD = 0x00,
+ OPC_MXU_S32MADDU = 0x01,
OPC_MXU__POOL00 = 0x03,
+ OPC_MXU_S32MSUB = 0x04,
+ OPC_MXU_S32MSUBU = 0x05,
+ OPC_MXU__POOL01 = 0x06,
+ OPC_MXU__POOL02 = 0x07,
OPC_MXU_D16MUL = 0x08,
+ OPC_MXU__POOL03 = 0x09,
OPC_MXU_D16MAC = 0x0A,
+ OPC_MXU_D16MACF = 0x0B,
+ OPC_MXU_D16MADL = 0x0C,
+ OPC_MXU_S16MAD = 0x0D,
+ OPC_MXU_Q16ADD = 0x0E,
+ OPC_MXU_D16MACE = 0x0F,
OPC_MXU__POOL04 = 0x10,
+ OPC_MXU__POOL05 = 0x11,
+ OPC_MXU__POOL06 = 0x12,
+ OPC_MXU__POOL07 = 0x13,
+ OPC_MXU__POOL08 = 0x14,
+ OPC_MXU__POOL09 = 0x15,
+ OPC_MXU__POOL10 = 0x16,
+ OPC_MXU__POOL11 = 0x17,
+ OPC_MXU_D32ADD = 0x18,
+ OPC_MXU__POOL12 = 0x19,
+ OPC_MXU__POOL13 = 0x1B,
+ OPC_MXU__POOL14 = 0x1C,
+ OPC_MXU_Q8ACCE = 0x1D,
OPC_MXU_S8LDD = 0x22,
+ OPC_MXU_S8STD = 0x23,
+ OPC_MXU_S8LDI = 0x24,
+ OPC_MXU_S8SDI = 0x25,
+ OPC_MXU__POOL15 = 0x26,
OPC_MXU__POOL16 = 0x27,
+ OPC_MXU__POOL17 = 0x28,
+ OPC_MXU_S16LDD = 0x2A,
+ OPC_MXU_S16STD = 0x2B,
+ OPC_MXU_S16LDI = 0x2C,
+ OPC_MXU_S16SDI = 0x2D,
OPC_MXU_S32M2I = 0x2E,
OPC_MXU_S32I2M = 0x2F,
+ OPC_MXU_D32SLL = 0x30,
+ OPC_MXU_D32SLR = 0x31,
+ OPC_MXU_D32SARL = 0x32,
+ OPC_MXU_D32SAR = 0x33,
+ OPC_MXU_Q16SLL = 0x34,
+ OPC_MXU_Q16SLR = 0x35,
+ OPC_MXU__POOL18 = 0x36,
+ OPC_MXU_Q16SAR = 0x37,
OPC_MXU__POOL19 = 0x38,
+ OPC_MXU__POOL20 = 0x39,
+ OPC_MXU__POOL21 = 0x3A,
+ OPC_MXU_Q16SCOP = 0x3B,
+ OPC_MXU_Q8MADL = 0x3C,
+ OPC_MXU_S32SFL = 0x3D,
+ OPC_MXU_Q8SAD = 0x3E,
};
OPC_MXU_D16MIN = 0x03,
OPC_MXU_Q8MAX = 0x04,
OPC_MXU_Q8MIN = 0x05,
+ OPC_MXU_Q8SLT = 0x06,
+ OPC_MXU_Q8SLTU = 0x07,
};
/*
- * MXU pool 04
+ * MXU pool 01
*/
enum {
- OPC_MXU_S32LDD = 0x00,
- OPC_MXU_S32LDDR = 0x01,
+ OPC_MXU_S32SLT = 0x00,
+ OPC_MXU_D16SLT = 0x01,
+ OPC_MXU_D16AVG = 0x02,
+ OPC_MXU_D16AVGR = 0x03,
+ OPC_MXU_Q8AVG = 0x04,
+ OPC_MXU_Q8AVGR = 0x05,
+ OPC_MXU_Q8ADD = 0x07,
+};
+
+/*
+ * MXU pool 02
+ */
+enum {
+ OPC_MXU_S32CPS = 0x00,
+ OPC_MXU_D16CPS = 0x02,
+ OPC_MXU_Q8ABD = 0x04,
+ OPC_MXU_Q16SAT = 0x06,
+};
+
+/*
+ * MXU pool 03
+ */
+enum {
+ OPC_MXU_D16MULF = 0x00,
+ OPC_MXU_D16MULE = 0x01,
+};
+
+/*
+ * MXU pool 04 05 06 07 08 09 10 11
+ */
+enum {
+ OPC_MXU_S32LDST = 0x00,
+ OPC_MXU_S32LDSTR = 0x01,
+};
+
+/*
+ * MXU pool 12
+ */
+enum {
+ OPC_MXU_D32ACC = 0x00,
+ OPC_MXU_D32ACCM = 0x01,
+ OPC_MXU_D32ASUM = 0x02,
+};
+
+/*
+ * MXU pool 13
+ */
+enum {
+ OPC_MXU_Q16ACC = 0x00,
+ OPC_MXU_Q16ACCM = 0x01,
+ OPC_MXU_D16ASUM = 0x02,
+};
+
+/*
+ * MXU pool 14
+ */
+enum {
+ OPC_MXU_Q8ADDE = 0x00,
+ OPC_MXU_D8SUM = 0x01,
+ OPC_MXU_D8SUMC = 0x02,
+};
+
+/*
+ * MXU pool 15
+ */
+enum {
+ OPC_MXU_S32MUL = 0x00,
+ OPC_MXU_S32MULU = 0x01,
+ OPC_MXU_S32EXTR = 0x02,
+ OPC_MXU_S32EXTRV = 0x03,
};
/*
* MXU pool 16
*/
enum {
+ OPC_MXU_D32SARW = 0x00,
+ OPC_MXU_S32ALN = 0x01,
OPC_MXU_S32ALNI = 0x02,
+ OPC_MXU_S32LUI = 0x03,
OPC_MXU_S32NOR = 0x04,
OPC_MXU_S32AND = 0x05,
OPC_MXU_S32OR = 0x06,
OPC_MXU_S32XOR = 0x07,
};
+/*
+ * MXU pool 17
+ */
+enum {
+ OPC_MXU_LXB = 0x00,
+ OPC_MXU_LXH = 0x01,
+ OPC_MXU_LXW = 0x03,
+ OPC_MXU_LXBU = 0x04,
+ OPC_MXU_LXHU = 0x05,
+};
+
+/*
+ * MXU pool 18
+ */
+enum {
+ OPC_MXU_D32SLLV = 0x00,
+ OPC_MXU_D32SLRV = 0x01,
+ OPC_MXU_D32SARV = 0x03,
+ OPC_MXU_Q16SLLV = 0x04,
+ OPC_MXU_Q16SLRV = 0x05,
+ OPC_MXU_Q16SARV = 0x07,
+};
+
/*
* MXU pool 19
*/
enum {
OPC_MXU_Q8MUL = 0x00,
- OPC_MXU_Q8MULSU = 0x01,
+ OPC_MXU_Q8MULSU = 0x02,
+};
+
+/*
+ * MXU pool 20
+ */
+enum {
+ OPC_MXU_Q8MOVZ = 0x00,
+ OPC_MXU_Q8MOVN = 0x01,
+ OPC_MXU_D16MOVZ = 0x02,
+ OPC_MXU_D16MOVN = 0x03,
+ OPC_MXU_S32MOVZ = 0x04,
+ OPC_MXU_S32MOVN = 0x05,
+};
+
+/*
+ * MXU pool 21
+ */
+enum {
+ OPC_MXU_Q8MAC = 0x00,
+ OPC_MXU_Q8MACSU = 0x02,
};
+
/* MXU accumulate add/subtract 1-bit pattern 'aptn1' */
#define MXU_APTN1_A 0
#define MXU_APTN1_S 1
/*
* S8LDD XRa, Rb, s8, optn3 - Load a byte from memory to XRF
+ *
+ * S8LDI XRa, Rb, s8, optn3 - Load a byte from memory to XRF,
+ * post modify address register
*/
-static void gen_mxu_s8ldd(DisasContext *ctx)
+static void gen_mxu_s8ldd(DisasContext *ctx, bool postmodify)
{
TCGv t0, t1;
uint32_t XRa, Rb, s8, optn3;
gen_load_gpr(t0, Rb);
tcg_gen_addi_tl(t0, t0, (int8_t)s8);
+ if (postmodify) {
+ gen_store_gpr(t0, Rb);
+ }
switch (optn3) {
/* XRa[7:0] = tmp8 */
}
/*
- * D16MUL XRa, XRb, XRc, XRd, optn2 - Signed 16 bit pattern multiplication
+ * S8STD XRa, Rb, s8, optn3 - Store a byte from XRF to memory
+ *
+ * S8SDI XRa, Rb, s8, optn3 - Store a byte from XRF to memory,
+ * post modify address register
+ */
+static void gen_mxu_s8std(DisasContext *ctx, bool postmodify)
+{
+ TCGv t0, t1;
+ uint32_t XRa, Rb, s8, optn3;
+
+ t0 = tcg_temp_new();
+ t1 = tcg_temp_new();
+
+ XRa = extract32(ctx->opcode, 6, 4);
+ s8 = extract32(ctx->opcode, 10, 8);
+ optn3 = extract32(ctx->opcode, 18, 3);
+ Rb = extract32(ctx->opcode, 21, 5);
+
+ if (optn3 > 3) {
+ /* reserved, do nothing */
+ return;
+ }
+
+ gen_load_gpr(t0, Rb);
+ tcg_gen_addi_tl(t0, t0, (int8_t)s8);
+ if (postmodify) {
+ gen_store_gpr(t0, Rb);
+ }
+ gen_load_mxu_gpr(t1, XRa);
+
+ switch (optn3) {
+ /* XRa[7:0] => tmp8 */
+ case MXU_OPTN3_PTN0:
+ tcg_gen_extract_tl(t1, t1, 0, 8);
+ break;
+ /* XRa[15:8] => tmp8 */
+ case MXU_OPTN3_PTN1:
+ tcg_gen_extract_tl(t1, t1, 8, 8);
+ break;
+ /* XRa[23:16] => tmp8 */
+ case MXU_OPTN3_PTN2:
+ tcg_gen_extract_tl(t1, t1, 16, 8);
+ break;
+ /* XRa[31:24] => tmp8 */
+ case MXU_OPTN3_PTN3:
+ tcg_gen_extract_tl(t1, t1, 24, 8);
+ break;
+ }
+
+ tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_UB);
+}
+
+/*
+ * S16LDD XRa, Rb, s10, optn2 - Load a halfword from memory to XRF
+ *
+ * S16LDI XRa, Rb, s10, optn2 - Load a halfword from memory to XRF,
+ * post modify address register
+ */
+static void gen_mxu_s16ldd(DisasContext *ctx, bool postmodify)
+{
+ TCGv t0, t1;
+ uint32_t XRa, Rb, optn2;
+ int32_t s10;
+
+ t0 = tcg_temp_new();
+ t1 = tcg_temp_new();
+
+ XRa = extract32(ctx->opcode, 6, 4);
+ s10 = sextract32(ctx->opcode, 10, 9) * 2;
+ optn2 = extract32(ctx->opcode, 19, 2);
+ Rb = extract32(ctx->opcode, 21, 5);
+
+ gen_load_gpr(t0, Rb);
+ tcg_gen_addi_tl(t0, t0, s10);
+ if (postmodify) {
+ gen_store_gpr(t0, Rb);
+ }
+
+ switch (optn2) {
+ /* XRa[15:0] = tmp16 */
+ case MXU_OPTN2_PTN0:
+ tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_UW);
+ gen_load_mxu_gpr(t0, XRa);
+ tcg_gen_deposit_tl(t0, t0, t1, 0, 16);
+ break;
+ /* XRa[31:16] = tmp16 */
+ case MXU_OPTN2_PTN1:
+ tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_UW);
+ gen_load_mxu_gpr(t0, XRa);
+ tcg_gen_deposit_tl(t0, t0, t1, 16, 16);
+ break;
+ /* XRa = sign_extend(tmp16) */
+ case MXU_OPTN2_PTN2:
+ tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_SW);
+ break;
+ /* XRa = {tmp16, tmp16} */
+ case MXU_OPTN2_PTN3:
+ tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_UW);
+ tcg_gen_deposit_tl(t0, t1, t1, 0, 16);
+ tcg_gen_deposit_tl(t0, t1, t1, 16, 16);
+ break;
+ }
+
+ gen_store_mxu_gpr(t0, XRa);
+}
+
+/*
+ * S16STD XRa, Rb, s8, optn2 - Store a byte from XRF to memory
+ *
+ * S16SDI XRa, Rb, s8, optn2 - Store a byte from XRF to memory,
+ * post modify address register
+ */
+static void gen_mxu_s16std(DisasContext *ctx, bool postmodify)
+{
+ TCGv t0, t1;
+ uint32_t XRa, Rb, optn2;
+ int32_t s10;
+
+ t0 = tcg_temp_new();
+ t1 = tcg_temp_new();
+
+ XRa = extract32(ctx->opcode, 6, 4);
+ s10 = sextract32(ctx->opcode, 10, 9) * 2;
+ optn2 = extract32(ctx->opcode, 19, 2);
+ Rb = extract32(ctx->opcode, 21, 5);
+
+ if (optn2 > 1) {
+ /* reserved, do nothing */
+ return;
+ }
+
+ gen_load_gpr(t0, Rb);
+ tcg_gen_addi_tl(t0, t0, s10);
+ if (postmodify) {
+ gen_store_gpr(t0, Rb);
+ }
+ gen_load_mxu_gpr(t1, XRa);
+
+ switch (optn2) {
+ /* XRa[15:0] => tmp16 */
+ case MXU_OPTN2_PTN0:
+ tcg_gen_extract_tl(t1, t1, 0, 16);
+ break;
+ /* XRa[31:16] => tmp16 */
+ case MXU_OPTN2_PTN1:
+ tcg_gen_extract_tl(t1, t1, 16, 16);
+ break;
+ }
+
+ tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_UW);
+}
+
+/*
+ * S32MUL XRa, XRd, rs, rt - Signed 32x32=>64 bit multiplication
+ * of GPR's and stores result into pair of MXU registers.
+ * It strains HI and LO registers.
+ *
+ * S32MULU XRa, XRd, rs, rt - Unsigned 32x32=>64 bit multiplication
+ * of GPR's and stores result into pair of MXU registers.
+ * It strains HI and LO registers.
+ */
+static void gen_mxu_s32mul(DisasContext *ctx, bool mulu)
+{
+ TCGv t0, t1;
+ uint32_t XRa, XRd, rs, rt;
+
+ t0 = tcg_temp_new();
+ t1 = tcg_temp_new();
+
+ XRa = extract32(ctx->opcode, 6, 4);
+ XRd = extract32(ctx->opcode, 10, 4);
+ rs = extract32(ctx->opcode, 16, 5);
+ rt = extract32(ctx->opcode, 21, 5);
+
+ if (unlikely(rs == 0 || rt == 0)) {
+ tcg_gen_movi_tl(t0, 0);
+ tcg_gen_movi_tl(t1, 0);
+ } else {
+ gen_load_gpr(t0, rs);
+ gen_load_gpr(t1, rt);
+
+ if (mulu) {
+ tcg_gen_mulu2_tl(t0, t1, t0, t1);
+ } else {
+ tcg_gen_muls2_tl(t0, t1, t0, t1);
+ }
+ }
+ tcg_gen_mov_tl(cpu_HI[0], t1);
+ tcg_gen_mov_tl(cpu_LO[0], t0);
+ gen_store_mxu_gpr(t1, XRa);
+ gen_store_mxu_gpr(t0, XRd);
+}
+
+/*
+ * D16MUL XRa, XRb, XRc, XRd, optn2 - Signed 16 bit pattern multiplication
+ * D16MULF XRa, XRb, XRc, optn2 - Signed Q15 fraction pattern multiplication
+ * with rounding and packing result
+ * D16MULE XRa, XRb, XRc, XRd, optn2 - Signed Q15 fraction pattern
+ * multiplication with rounding
*/
-static void gen_mxu_d16mul(DisasContext *ctx)
+static void gen_mxu_d16mul(DisasContext *ctx, bool fractional,
+ bool packed_result)
{
TCGv t0, t1, t2, t3;
uint32_t XRa, XRb, XRc, XRd, optn2;
XRd = extract32(ctx->opcode, 18, 4);
optn2 = extract32(ctx->opcode, 22, 2);
+ /*
+ * TODO: XRd field isn't used for D16MULF
+ * There's no knowledge how this field affect
+ * instruction decoding/behavior
+ */
+
gen_load_mxu_gpr(t1, XRb);
tcg_gen_sextract_tl(t0, t1, 0, 16);
tcg_gen_sextract_tl(t1, t1, 16, 16);
tcg_gen_mul_tl(t2, t1, t2);
break;
}
- gen_store_mxu_gpr(t3, XRa);
- gen_store_mxu_gpr(t2, XRd);
+ if (fractional) {
+ TCGLabel *l_done = gen_new_label();
+ TCGv rounding = tcg_temp_new();
+
+ tcg_gen_shli_tl(t3, t3, 1);
+ tcg_gen_shli_tl(t2, t2, 1);
+ tcg_gen_andi_tl(rounding, mxu_CR, 0x2);
+ tcg_gen_brcondi_tl(TCG_COND_EQ, rounding, 0, l_done);
+ if (packed_result) {
+ TCGLabel *l_apply_bias_l = gen_new_label();
+ TCGLabel *l_apply_bias_r = gen_new_label();
+ TCGLabel *l_half_done = gen_new_label();
+ TCGv bias = tcg_temp_new();
+
+ /*
+ * D16MULF supports unbiased rounding aka "bankers rounding",
+ * "round to even", "convergent rounding"
+ */
+ tcg_gen_andi_tl(bias, mxu_CR, 0x4);
+ tcg_gen_brcondi_tl(TCG_COND_NE, bias, 0, l_apply_bias_l);
+ tcg_gen_andi_tl(t0, t3, 0x1ffff);
+ tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 0x8000, l_half_done);
+ gen_set_label(l_apply_bias_l);
+ tcg_gen_addi_tl(t3, t3, 0x8000);
+ gen_set_label(l_half_done);
+ tcg_gen_brcondi_tl(TCG_COND_NE, bias, 0, l_apply_bias_r);
+ tcg_gen_andi_tl(t0, t2, 0x1ffff);
+ tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 0x8000, l_done);
+ gen_set_label(l_apply_bias_r);
+ tcg_gen_addi_tl(t2, t2, 0x8000);
+ } else {
+ /* D16MULE doesn't support unbiased rounding */
+ tcg_gen_addi_tl(t3, t3, 0x8000);
+ tcg_gen_addi_tl(t2, t2, 0x8000);
+ }
+ gen_set_label(l_done);
+ }
+ if (!packed_result) {
+ gen_store_mxu_gpr(t3, XRa);
+ gen_store_mxu_gpr(t2, XRd);
+ } else {
+ tcg_gen_andi_tl(t3, t3, 0xffff0000);
+ tcg_gen_shri_tl(t2, t2, 16);
+ tcg_gen_or_tl(t3, t3, t2);
+ gen_store_mxu_gpr(t3, XRa);
+ }
}
/*
- * D16MAC XRa, XRb, XRc, XRd, aptn2, optn2 - Signed 16 bit pattern multiply
- * and accumulate
+ * D16MAC XRa, XRb, XRc, XRd, aptn2, optn2
+ * Signed 16 bit pattern multiply and accumulate
+ * D16MACF XRa, XRb, XRc, aptn2, optn2
+ * Signed Q15 fraction pattern multiply accumulate and pack
+ * D16MACE XRa, XRb, XRc, XRd, aptn2, optn2
+ * Signed Q15 fraction pattern multiply and accumulate
*/
-static void gen_mxu_d16mac(DisasContext *ctx)
+static void gen_mxu_d16mac(DisasContext *ctx, bool fractional,
+ bool packed_result)
{
TCGv t0, t1, t2, t3;
uint32_t XRa, XRb, XRc, XRd, optn2, aptn2;
tcg_gen_mul_tl(t2, t1, t2);
break;
}
+
+ if (fractional) {
+ tcg_gen_shli_tl(t3, t3, 1);
+ tcg_gen_shli_tl(t2, t2, 1);
+ }
gen_load_mxu_gpr(t0, XRa);
gen_load_mxu_gpr(t1, XRd);
tcg_gen_sub_tl(t2, t1, t2);
break;
}
- gen_store_mxu_gpr(t3, XRa);
- gen_store_mxu_gpr(t2, XRd);
+
+ if (fractional) {
+ TCGLabel *l_done = gen_new_label();
+ TCGv rounding = tcg_temp_new();
+
+ tcg_gen_andi_tl(rounding, mxu_CR, 0x2);
+ tcg_gen_brcondi_tl(TCG_COND_EQ, rounding, 0, l_done);
+ if (packed_result) {
+ TCGLabel *l_apply_bias_l = gen_new_label();
+ TCGLabel *l_apply_bias_r = gen_new_label();
+ TCGLabel *l_half_done = gen_new_label();
+ TCGv bias = tcg_temp_new();
+
+ /*
+ * D16MACF supports unbiased rounding aka "bankers rounding",
+ * "round to even", "convergent rounding"
+ */
+ tcg_gen_andi_tl(bias, mxu_CR, 0x4);
+ tcg_gen_brcondi_tl(TCG_COND_NE, bias, 0, l_apply_bias_l);
+ tcg_gen_andi_tl(t0, t3, 0x1ffff);
+ tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 0x8000, l_half_done);
+ gen_set_label(l_apply_bias_l);
+ tcg_gen_addi_tl(t3, t3, 0x8000);
+ gen_set_label(l_half_done);
+ tcg_gen_brcondi_tl(TCG_COND_NE, bias, 0, l_apply_bias_r);
+ tcg_gen_andi_tl(t0, t2, 0x1ffff);
+ tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 0x8000, l_done);
+ gen_set_label(l_apply_bias_r);
+ tcg_gen_addi_tl(t2, t2, 0x8000);
+ } else {
+ /* D16MACE doesn't support unbiased rounding */
+ tcg_gen_addi_tl(t3, t3, 0x8000);
+ tcg_gen_addi_tl(t2, t2, 0x8000);
+ }
+ gen_set_label(l_done);
+ }
+
+ if (!packed_result) {
+ gen_store_mxu_gpr(t3, XRa);
+ gen_store_mxu_gpr(t2, XRd);
+ } else {
+ tcg_gen_andi_tl(t3, t3, 0xffff0000);
+ tcg_gen_shri_tl(t2, t2, 16);
+ tcg_gen_or_tl(t3, t3, t2);
+ gen_store_mxu_gpr(t3, XRa);
+ }
}
/*
- * Q8MUL XRa, XRb, XRc, XRd - Parallel unsigned 8 bit pattern multiply
- * Q8MULSU XRa, XRb, XRc, XRd - Parallel signed 8 bit pattern multiply
+ * D16MADL XRa, XRb, XRc, XRd, aptn2, optn2 - Double packed
+ * unsigned 16 bit pattern multiply and add/subtract.
*/
-static void gen_mxu_q8mul_q8mulsu(DisasContext *ctx)
+static void gen_mxu_d16madl(DisasContext *ctx)
{
- TCGv t0, t1, t2, t3, t4, t5, t6, t7;
- uint32_t XRa, XRb, XRc, XRd, sel;
+ TCGv t0, t1, t2, t3;
+ uint32_t XRa, XRb, XRc, XRd, optn2, aptn2;
t0 = tcg_temp_new();
t1 = tcg_temp_new();
t2 = tcg_temp_new();
t3 = tcg_temp_new();
- t4 = tcg_temp_new();
- t5 = tcg_temp_new();
- t6 = tcg_temp_new();
- t7 = tcg_temp_new();
XRa = extract32(ctx->opcode, 6, 4);
XRb = extract32(ctx->opcode, 10, 4);
XRc = extract32(ctx->opcode, 14, 4);
XRd = extract32(ctx->opcode, 18, 4);
- sel = extract32(ctx->opcode, 22, 2);
-
- gen_load_mxu_gpr(t3, XRb);
- gen_load_mxu_gpr(t7, XRc);
+ optn2 = extract32(ctx->opcode, 22, 2);
+ aptn2 = extract32(ctx->opcode, 24, 2);
- if (sel == 0x2) {
- /* Q8MULSU */
- tcg_gen_ext8s_tl(t0, t3);
- tcg_gen_shri_tl(t3, t3, 8);
- tcg_gen_ext8s_tl(t1, t3);
- tcg_gen_shri_tl(t3, t3, 8);
- tcg_gen_ext8s_tl(t2, t3);
- tcg_gen_shri_tl(t3, t3, 8);
- tcg_gen_ext8s_tl(t3, t3);
- } else {
- /* Q8MUL */
- tcg_gen_ext8u_tl(t0, t3);
- tcg_gen_shri_tl(t3, t3, 8);
- tcg_gen_ext8u_tl(t1, t3);
- tcg_gen_shri_tl(t3, t3, 8);
- tcg_gen_ext8u_tl(t2, t3);
- tcg_gen_shri_tl(t3, t3, 8);
- tcg_gen_ext8u_tl(t3, t3);
- }
-
- tcg_gen_ext8u_tl(t4, t7);
- tcg_gen_shri_tl(t7, t7, 8);
- tcg_gen_ext8u_tl(t5, t7);
- tcg_gen_shri_tl(t7, t7, 8);
- tcg_gen_ext8u_tl(t6, t7);
- tcg_gen_shri_tl(t7, t7, 8);
- tcg_gen_ext8u_tl(t7, t7);
+ gen_load_mxu_gpr(t1, XRb);
+ tcg_gen_sextract_tl(t0, t1, 0, 16);
+ tcg_gen_sextract_tl(t1, t1, 16, 16);
- tcg_gen_mul_tl(t0, t0, t4);
- tcg_gen_mul_tl(t1, t1, t5);
- tcg_gen_mul_tl(t2, t2, t6);
- tcg_gen_mul_tl(t3, t3, t7);
+ gen_load_mxu_gpr(t3, XRc);
+ tcg_gen_sextract_tl(t2, t3, 0, 16);
+ tcg_gen_sextract_tl(t3, t3, 16, 16);
- tcg_gen_andi_tl(t0, t0, 0xFFFF);
- tcg_gen_andi_tl(t1, t1, 0xFFFF);
- tcg_gen_andi_tl(t2, t2, 0xFFFF);
- tcg_gen_andi_tl(t3, t3, 0xFFFF);
+ switch (optn2) {
+ case MXU_OPTN2_WW: /* XRB.H*XRC.H == lop, XRB.L*XRC.L == rop */
+ tcg_gen_mul_tl(t3, t1, t3);
+ tcg_gen_mul_tl(t2, t0, t2);
+ break;
+ case MXU_OPTN2_LW: /* XRB.L*XRC.H == lop, XRB.L*XRC.L == rop */
+ tcg_gen_mul_tl(t3, t0, t3);
+ tcg_gen_mul_tl(t2, t0, t2);
+ break;
+ case MXU_OPTN2_HW: /* XRB.H*XRC.H == lop, XRB.H*XRC.L == rop */
+ tcg_gen_mul_tl(t3, t1, t3);
+ tcg_gen_mul_tl(t2, t1, t2);
+ break;
+ case MXU_OPTN2_XW: /* XRB.L*XRC.H == lop, XRB.H*XRC.L == rop */
+ tcg_gen_mul_tl(t3, t0, t3);
+ tcg_gen_mul_tl(t2, t1, t2);
+ break;
+ }
+ tcg_gen_extract_tl(t2, t2, 0, 16);
+ tcg_gen_extract_tl(t3, t3, 0, 16);
- tcg_gen_shli_tl(t1, t1, 16);
- tcg_gen_shli_tl(t3, t3, 16);
+ gen_load_mxu_gpr(t1, XRa);
+ tcg_gen_extract_tl(t0, t1, 0, 16);
+ tcg_gen_extract_tl(t1, t1, 16, 16);
- tcg_gen_or_tl(t0, t0, t1);
- tcg_gen_or_tl(t1, t2, t3);
+ switch (aptn2) {
+ case MXU_APTN2_AA:
+ tcg_gen_add_tl(t3, t1, t3);
+ tcg_gen_add_tl(t2, t0, t2);
+ break;
+ case MXU_APTN2_AS:
+ tcg_gen_add_tl(t3, t1, t3);
+ tcg_gen_sub_tl(t2, t0, t2);
+ break;
+ case MXU_APTN2_SA:
+ tcg_gen_sub_tl(t3, t1, t3);
+ tcg_gen_add_tl(t2, t0, t2);
+ break;
+ case MXU_APTN2_SS:
+ tcg_gen_sub_tl(t3, t1, t3);
+ tcg_gen_sub_tl(t2, t0, t2);
+ break;
+ }
- gen_store_mxu_gpr(t0, XRd);
- gen_store_mxu_gpr(t1, XRa);
+ tcg_gen_andi_tl(t2, t2, 0xffff);
+ tcg_gen_shli_tl(t3, t3, 16);
+ tcg_gen_or_tl(mxu_gpr[XRd - 1], t3, t2);
}
/*
- * S32LDD XRa, Rb, S12 - Load a word from memory to XRF
- * S32LDDR XRa, Rb, S12 - Load a word from memory to XRF, reversed byte seq.
+ * S16MAD XRa, XRb, XRc, XRd, aptn2, optn2 - Single packed
+ * signed 16 bit pattern multiply and 32-bit add/subtract.
*/
-static void gen_mxu_s32ldd_s32lddr(DisasContext *ctx)
+static void gen_mxu_s16mad(DisasContext *ctx)
{
TCGv t0, t1;
- uint32_t XRa, Rb, s12, sel;
+ uint32_t XRa, XRb, XRc, XRd, optn2, aptn1, pad;
t0 = tcg_temp_new();
t1 = tcg_temp_new();
XRa = extract32(ctx->opcode, 6, 4);
- s12 = extract32(ctx->opcode, 10, 10);
- sel = extract32(ctx->opcode, 20, 1);
- Rb = extract32(ctx->opcode, 21, 5);
+ XRb = extract32(ctx->opcode, 10, 4);
+ XRc = extract32(ctx->opcode, 14, 4);
+ XRd = extract32(ctx->opcode, 18, 4);
+ optn2 = extract32(ctx->opcode, 22, 2);
+ aptn1 = extract32(ctx->opcode, 24, 1);
+ pad = extract32(ctx->opcode, 25, 1);
- gen_load_gpr(t0, Rb);
+ if (pad) {
+ /* FIXME check if it influence the result */
+ }
+
+ gen_load_mxu_gpr(t0, XRb);
+ gen_load_mxu_gpr(t1, XRc);
+
+ switch (optn2) {
+ case MXU_OPTN2_WW: /* XRB.H*XRC.H */
+ tcg_gen_sextract_tl(t0, t0, 16, 16);
+ tcg_gen_sextract_tl(t1, t1, 16, 16);
+ break;
+ case MXU_OPTN2_LW: /* XRB.L*XRC.L */
+ tcg_gen_sextract_tl(t0, t0, 0, 16);
+ tcg_gen_sextract_tl(t1, t1, 0, 16);
+ break;
+ case MXU_OPTN2_HW: /* XRB.H*XRC.L */
+ tcg_gen_sextract_tl(t0, t0, 16, 16);
+ tcg_gen_sextract_tl(t1, t1, 0, 16);
+ break;
+ case MXU_OPTN2_XW: /* XRB.L*XRC.H */
+ tcg_gen_sextract_tl(t0, t0, 0, 16);
+ tcg_gen_sextract_tl(t1, t1, 16, 16);
+ break;
+ }
+ tcg_gen_mul_tl(t0, t0, t1);
+
+ gen_load_mxu_gpr(t1, XRa);
- tcg_gen_movi_tl(t1, s12);
- tcg_gen_shli_tl(t1, t1, 2);
- if (s12 & 0x200) {
- tcg_gen_ori_tl(t1, t1, 0xFFFFF000);
+ switch (aptn1) {
+ case MXU_APTN1_A:
+ tcg_gen_add_tl(t1, t1, t0);
+ break;
+ case MXU_APTN1_S:
+ tcg_gen_sub_tl(t1, t1, t0);
+ break;
}
- tcg_gen_add_tl(t1, t0, t1);
- tcg_gen_qemu_ld_tl(t1, t1, ctx->mem_idx, (MO_TESL ^ (sel * MO_BSWAP)) |
- ctx->default_tcg_memop_mask);
- gen_store_mxu_gpr(t1, XRa);
+ gen_store_mxu_gpr(t1, XRd);
+}
+
+/*
+ * Q8MUL XRa, XRb, XRc, XRd - Parallel quad unsigned 8 bit multiply
+ * Q8MULSU XRa, XRb, XRc, XRd - Parallel quad signed 8 bit multiply
+ * Q8MAC XRa, XRb, XRc, XRd - Parallel quad unsigned 8 bit multiply
+ * and accumulate
+ * Q8MACSU XRa, XRb, XRc, XRd - Parallel quad signed 8 bit multiply
+ * and accumulate
+ */
+static void gen_mxu_q8mul_mac(DisasContext *ctx, bool su, bool mac)
+{
+ TCGv t0, t1, t2, t3, t4, t5, t6, t7;
+ uint32_t XRa, XRb, XRc, XRd, aptn2;
+
+ t0 = tcg_temp_new();
+ t1 = tcg_temp_new();
+ t2 = tcg_temp_new();
+ t3 = tcg_temp_new();
+ t4 = tcg_temp_new();
+ t5 = tcg_temp_new();
+ t6 = tcg_temp_new();
+ t7 = tcg_temp_new();
+
+ XRa = extract32(ctx->opcode, 6, 4);
+ XRb = extract32(ctx->opcode, 10, 4);
+ XRc = extract32(ctx->opcode, 14, 4);
+ XRd = extract32(ctx->opcode, 18, 4);
+ aptn2 = extract32(ctx->opcode, 24, 2);
+
+ gen_load_mxu_gpr(t3, XRb);
+ gen_load_mxu_gpr(t7, XRc);
+
+ if (su) {
+ /* Q8MULSU / Q8MACSU */
+ tcg_gen_sextract_tl(t0, t3, 0, 8);
+ tcg_gen_sextract_tl(t1, t3, 8, 8);
+ tcg_gen_sextract_tl(t2, t3, 16, 8);
+ tcg_gen_sextract_tl(t3, t3, 24, 8);
+ } else {
+ /* Q8MUL / Q8MAC */
+ tcg_gen_extract_tl(t0, t3, 0, 8);
+ tcg_gen_extract_tl(t1, t3, 8, 8);
+ tcg_gen_extract_tl(t2, t3, 16, 8);
+ tcg_gen_extract_tl(t3, t3, 24, 8);
+ }
+
+ tcg_gen_extract_tl(t4, t7, 0, 8);
+ tcg_gen_extract_tl(t5, t7, 8, 8);
+ tcg_gen_extract_tl(t6, t7, 16, 8);
+ tcg_gen_extract_tl(t7, t7, 24, 8);
+
+ tcg_gen_mul_tl(t0, t0, t4);
+ tcg_gen_mul_tl(t1, t1, t5);
+ tcg_gen_mul_tl(t2, t2, t6);
+ tcg_gen_mul_tl(t3, t3, t7);
+
+ if (mac) {
+ gen_load_mxu_gpr(t4, XRd);
+ gen_load_mxu_gpr(t5, XRa);
+ tcg_gen_extract_tl(t6, t4, 0, 16);
+ tcg_gen_extract_tl(t7, t4, 16, 16);
+ if (aptn2 & 1) {
+ tcg_gen_sub_tl(t0, t6, t0);
+ tcg_gen_sub_tl(t1, t7, t1);
+ } else {
+ tcg_gen_add_tl(t0, t6, t0);
+ tcg_gen_add_tl(t1, t7, t1);
+ }
+ tcg_gen_extract_tl(t6, t5, 0, 16);
+ tcg_gen_extract_tl(t7, t5, 16, 16);
+ if (aptn2 & 2) {
+ tcg_gen_sub_tl(t2, t6, t2);
+ tcg_gen_sub_tl(t3, t7, t3);
+ } else {
+ tcg_gen_add_tl(t2, t6, t2);
+ tcg_gen_add_tl(t3, t7, t3);
+ }
+ }
+
+ tcg_gen_deposit_tl(t0, t0, t1, 16, 16);
+ tcg_gen_deposit_tl(t1, t2, t3, 16, 16);
+
+ gen_store_mxu_gpr(t0, XRd);
+ gen_store_mxu_gpr(t1, XRa);
+}
+
+/*
+ * Q8MADL XRd, XRa, XRb, XRc
+ * Parallel quad unsigned 8 bit multiply and accumulate.
+ * e.g. XRd[0..3] = XRa[0..3] + XRb[0..3] * XRc[0..3]
+ */
+static void gen_mxu_q8madl(DisasContext *ctx)
+{
+ TCGv t0, t1, t2, t3, t4, t5, t6, t7;
+ uint32_t XRa, XRb, XRc, XRd, aptn2;
+
+ t0 = tcg_temp_new();
+ t1 = tcg_temp_new();
+ t2 = tcg_temp_new();
+ t3 = tcg_temp_new();
+ t4 = tcg_temp_new();
+ t5 = tcg_temp_new();
+ t6 = tcg_temp_new();
+ t7 = tcg_temp_new();
+
+ XRa = extract32(ctx->opcode, 6, 4);
+ XRb = extract32(ctx->opcode, 10, 4);
+ XRc = extract32(ctx->opcode, 14, 4);
+ XRd = extract32(ctx->opcode, 18, 4);
+ aptn2 = extract32(ctx->opcode, 24, 2);
+
+ gen_load_mxu_gpr(t3, XRb);
+ gen_load_mxu_gpr(t7, XRc);
+
+ tcg_gen_extract_tl(t0, t3, 0, 8);
+ tcg_gen_extract_tl(t1, t3, 8, 8);
+ tcg_gen_extract_tl(t2, t3, 16, 8);
+ tcg_gen_extract_tl(t3, t3, 24, 8);
+
+ tcg_gen_extract_tl(t4, t7, 0, 8);
+ tcg_gen_extract_tl(t5, t7, 8, 8);
+ tcg_gen_extract_tl(t6, t7, 16, 8);
+ tcg_gen_extract_tl(t7, t7, 24, 8);
+
+ tcg_gen_mul_tl(t0, t0, t4);
+ tcg_gen_mul_tl(t1, t1, t5);
+ tcg_gen_mul_tl(t2, t2, t6);
+ tcg_gen_mul_tl(t3, t3, t7);
+
+ gen_load_mxu_gpr(t4, XRa);
+ tcg_gen_extract_tl(t6, t4, 0, 8);
+ tcg_gen_extract_tl(t7, t4, 8, 8);
+ if (aptn2 & 1) {
+ tcg_gen_sub_tl(t0, t6, t0);
+ tcg_gen_sub_tl(t1, t7, t1);
+ } else {
+ tcg_gen_add_tl(t0, t6, t0);
+ tcg_gen_add_tl(t1, t7, t1);
+ }
+ tcg_gen_extract_tl(t6, t4, 16, 8);
+ tcg_gen_extract_tl(t7, t4, 24, 8);
+ if (aptn2 & 2) {
+ tcg_gen_sub_tl(t2, t6, t2);
+ tcg_gen_sub_tl(t3, t7, t3);
+ } else {
+ tcg_gen_add_tl(t2, t6, t2);
+ tcg_gen_add_tl(t3, t7, t3);
+ }
+
+ tcg_gen_andi_tl(t5, t0, 0xff);
+ tcg_gen_deposit_tl(t5, t5, t1, 8, 8);
+ tcg_gen_deposit_tl(t5, t5, t2, 16, 8);
+ tcg_gen_deposit_tl(t5, t5, t3, 24, 8);
+
+ gen_store_mxu_gpr(t5, XRd);
+}
+
+/*
+ * S32LDD XRa, Rb, S12 - Load a word from memory to XRF
+ * S32LDDR XRa, Rb, S12 - Load a word from memory to XRF
+ * in reversed byte seq.
+ * S32LDI XRa, Rb, S12 - Load a word from memory to XRF,
+ * post modify base address GPR.
+ * S32LDIR XRa, Rb, S12 - Load a word from memory to XRF,
+ * post modify base address GPR and load in reversed byte seq.
+ */
+static void gen_mxu_s32ldxx(DisasContext *ctx, bool reversed, bool postinc)
+{
+ TCGv t0, t1;
+ uint32_t XRa, Rb, s12;
+
+ t0 = tcg_temp_new();
+ t1 = tcg_temp_new();
+
+ XRa = extract32(ctx->opcode, 6, 4);
+ s12 = sextract32(ctx->opcode, 10, 10);
+ Rb = extract32(ctx->opcode, 21, 5);
+
+ gen_load_gpr(t0, Rb);
+ tcg_gen_movi_tl(t1, s12 * 4);
+ tcg_gen_add_tl(t0, t0, t1);
+
+ tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx,
+ (MO_TESL ^ (reversed ? MO_BSWAP : 0)) |
+ ctx->default_tcg_memop_mask);
+ gen_store_mxu_gpr(t1, XRa);
+
+ if (postinc) {
+ gen_store_gpr(t0, Rb);
+ }
+}
+
+/*
+ * S32STD XRa, Rb, S12 - Store a word from XRF to memory
+ * S32STDR XRa, Rb, S12 - Store a word from XRF to memory
+ * in reversed byte seq.
+ * S32SDI XRa, Rb, S12 - Store a word from XRF to memory,
+ * post modify base address GPR.
+ * S32SDIR XRa, Rb, S12 - Store a word from XRF to memory,
+ * post modify base address GPR and store in reversed byte seq.
+ */
+static void gen_mxu_s32stxx(DisasContext *ctx, bool reversed, bool postinc)
+{
+ TCGv t0, t1;
+ uint32_t XRa, Rb, s12;
+
+ t0 = tcg_temp_new();
+ t1 = tcg_temp_new();
+
+ XRa = extract32(ctx->opcode, 6, 4);
+ s12 = sextract32(ctx->opcode, 10, 10);
+ Rb = extract32(ctx->opcode, 21, 5);
+
+ gen_load_gpr(t0, Rb);
+ tcg_gen_movi_tl(t1, s12 * 4);
+ tcg_gen_add_tl(t0, t0, t1);
+
+ gen_load_mxu_gpr(t1, XRa);
+ tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx,
+ (MO_TESL ^ (reversed ? MO_BSWAP : 0)) |
+ ctx->default_tcg_memop_mask);
+
+ if (postinc) {
+ gen_store_gpr(t0, Rb);
+ }
+}
+
+/*
+ * S32LDDV XRa, Rb, Rc, STRD2 - Load a word from memory to XRF
+ * S32LDDVR XRa, Rb, Rc, STRD2 - Load a word from memory to XRF
+ * in reversed byte seq.
+ * S32LDIV XRa, Rb, Rc, STRD2 - Load a word from memory to XRF,
+ * post modify base address GPR.
+ * S32LDIVR XRa, Rb, Rc, STRD2 - Load a word from memory to XRF,
+ * post modify base address GPR and load in reversed byte seq.
+ */
+static void gen_mxu_s32ldxvx(DisasContext *ctx, bool reversed,
+ bool postinc, uint32_t strd2)
+{
+ TCGv t0, t1;
+ uint32_t XRa, Rb, Rc;
+
+ t0 = tcg_temp_new();
+ t1 = tcg_temp_new();
+
+ XRa = extract32(ctx->opcode, 6, 4);
+ Rc = extract32(ctx->opcode, 16, 5);
+ Rb = extract32(ctx->opcode, 21, 5);
+
+ gen_load_gpr(t0, Rb);
+ gen_load_gpr(t1, Rc);
+ tcg_gen_shli_tl(t1, t1, strd2);
+ tcg_gen_add_tl(t0, t0, t1);
+
+ tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx,
+ (MO_TESL ^ (reversed ? MO_BSWAP : 0)) |
+ ctx->default_tcg_memop_mask);
+ gen_store_mxu_gpr(t1, XRa);
+
+ if (postinc) {
+ gen_store_gpr(t0, Rb);
+ }
+}
+
+/*
+ * LXW Ra, Rb, Rc, STRD2 - Load a word from memory to GPR
+ * LXB Ra, Rb, Rc, STRD2 - Load a byte from memory to GPR,
+ * sign extending to GPR size.
+ * LXH Ra, Rb, Rc, STRD2 - Load a byte from memory to GPR,
+ * sign extending to GPR size.
+ * LXBU Ra, Rb, Rc, STRD2 - Load a halfword from memory to GPR,
+ * zero extending to GPR size.
+ * LXHU Ra, Rb, Rc, STRD2 - Load a halfword from memory to GPR,
+ * zero extending to GPR size.
+ */
+static void gen_mxu_lxx(DisasContext *ctx, uint32_t strd2, MemOp mop)
+{
+ TCGv t0, t1;
+ uint32_t Ra, Rb, Rc;
+
+ t0 = tcg_temp_new();
+ t1 = tcg_temp_new();
+
+ Ra = extract32(ctx->opcode, 11, 5);
+ Rc = extract32(ctx->opcode, 16, 5);
+ Rb = extract32(ctx->opcode, 21, 5);
+
+ gen_load_gpr(t0, Rb);
+ gen_load_gpr(t1, Rc);
+ tcg_gen_shli_tl(t1, t1, strd2);
+ tcg_gen_add_tl(t0, t0, t1);
+
+ tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, mop | ctx->default_tcg_memop_mask);
+ gen_store_gpr(t1, Ra);
+}
+
+/*
+ * S32STDV XRa, Rb, Rc, STRD2 - Load a word from memory to XRF
+ * S32STDVR XRa, Rb, Rc, STRD2 - Load a word from memory to XRF
+ * in reversed byte seq.
+ * S32SDIV XRa, Rb, Rc, STRD2 - Load a word from memory to XRF,
+ * post modify base address GPR.
+ * S32SDIVR XRa, Rb, Rc, STRD2 - Load a word from memory to XRF,
+ * post modify base address GPR and store in reversed byte seq.
+ */
+static void gen_mxu_s32stxvx(DisasContext *ctx, bool reversed,
+ bool postinc, uint32_t strd2)
+{
+ TCGv t0, t1;
+ uint32_t XRa, Rb, Rc;
+
+ t0 = tcg_temp_new();
+ t1 = tcg_temp_new();
+
+ XRa = extract32(ctx->opcode, 6, 4);
+ Rc = extract32(ctx->opcode, 16, 5);
+ Rb = extract32(ctx->opcode, 21, 5);
+
+ gen_load_gpr(t0, Rb);
+ gen_load_gpr(t1, Rc);
+ tcg_gen_shli_tl(t1, t1, strd2);
+ tcg_gen_add_tl(t0, t0, t1);
+
+ gen_load_mxu_gpr(t1, XRa);
+ tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx,
+ (MO_TESL ^ (reversed ? MO_BSWAP : 0)) |
+ ctx->default_tcg_memop_mask);
+
+ if (postinc) {
+ gen_store_gpr(t0, Rb);
+ }
}
-
/*
* MXU instruction category: logic
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
}
}
+/*
+ * MXU instruction category: shift
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * D32SLL D32SLR D32SAR D32SARL
+ * D32SLLV D32SLRV D32SARV D32SARW
+ * Q16SLL Q16SLR Q16SAR
+ * Q16SLLV Q16SLRV Q16SARV
+ */
+
+/*
+ * D32SLL XRa, XRd, XRb, XRc, SFT4
+ * Dual 32-bit shift left from XRb and XRc to SFT4
+ * bits (0..15). Store to XRa and XRd respectively.
+ * D32SLR XRa, XRd, XRb, XRc, SFT4
+ * Dual 32-bit shift logic right from XRb and XRc
+ * to SFT4 bits (0..15). Store to XRa and XRd respectively.
+ * D32SAR XRa, XRd, XRb, XRc, SFT4
+ * Dual 32-bit shift arithmetic right from XRb and XRc
+ * to SFT4 bits (0..15). Store to XRa and XRd respectively.
+ */
+static void gen_mxu_d32sxx(DisasContext *ctx, bool right, bool arithmetic)
+{
+ uint32_t XRa, XRb, XRc, XRd, sft4;
+
+ XRa = extract32(ctx->opcode, 6, 4);
+ XRb = extract32(ctx->opcode, 10, 4);
+ XRc = extract32(ctx->opcode, 14, 4);
+ XRd = extract32(ctx->opcode, 18, 4);
+ sft4 = extract32(ctx->opcode, 22, 4);
+
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+
+ gen_load_mxu_gpr(t0, XRb);
+ gen_load_mxu_gpr(t1, XRc);
+
+ if (right) {
+ if (arithmetic) {
+ tcg_gen_sari_tl(t0, t0, sft4);
+ tcg_gen_sari_tl(t1, t1, sft4);
+ } else {
+ tcg_gen_shri_tl(t0, t0, sft4);
+ tcg_gen_shri_tl(t1, t1, sft4);
+ }
+ } else {
+ tcg_gen_shli_tl(t0, t0, sft4);
+ tcg_gen_shli_tl(t1, t1, sft4);
+ }
+ gen_store_mxu_gpr(t0, XRa);
+ gen_store_mxu_gpr(t1, XRd);
+}
+
+/*
+ * D32SLLV XRa, XRd, rs
+ * Dual 32-bit shift left from XRa and XRd to rs[3:0]
+ * bits. Store back to XRa and XRd respectively.
+ * D32SLRV XRa, XRd, rs
+ * Dual 32-bit shift logic right from XRa and XRd to rs[3:0]
+ * bits. Store back to XRa and XRd respectively.
+ * D32SARV XRa, XRd, rs
+ * Dual 32-bit shift arithmetic right from XRa and XRd to rs[3:0]
+ * bits. Store back to XRa and XRd respectively.
+ */
+static void gen_mxu_d32sxxv(DisasContext *ctx, bool right, bool arithmetic)
+{
+ uint32_t XRa, XRd, rs;
+
+ XRa = extract32(ctx->opcode, 10, 4);
+ XRd = extract32(ctx->opcode, 14, 4);
+ rs = extract32(ctx->opcode, 21, 5);
+
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ TCGv t2 = tcg_temp_new();
+
+ gen_load_mxu_gpr(t0, XRa);
+ gen_load_mxu_gpr(t1, XRd);
+ gen_load_gpr(t2, rs);
+ tcg_gen_andi_tl(t2, t2, 0x0f);
+
+ if (right) {
+ if (arithmetic) {
+ tcg_gen_sar_tl(t0, t0, t2);
+ tcg_gen_sar_tl(t1, t1, t2);
+ } else {
+ tcg_gen_shr_tl(t0, t0, t2);
+ tcg_gen_shr_tl(t1, t1, t2);
+ }
+ } else {
+ tcg_gen_shl_tl(t0, t0, t2);
+ tcg_gen_shl_tl(t1, t1, t2);
+ }
+ gen_store_mxu_gpr(t0, XRa);
+ gen_store_mxu_gpr(t1, XRd);
+}
+
+/*
+ * D32SARL XRa, XRb, XRc, SFT4
+ * Dual shift arithmetic right 32-bit integers in XRb and XRc
+ * to SFT4 bits (0..15). Pack 16 LSBs of each into XRa.
+ *
+ * D32SARW XRa, XRb, XRc, rb
+ * Dual shift arithmetic right 32-bit integers in XRb and XRc
+ * to rb[3:0] bits. Pack 16 LSBs of each into XRa.
+ */
+static void gen_mxu_d32sarl(DisasContext *ctx, bool sarw)
+{
+ uint32_t XRa, XRb, XRc, rb;
+
+ XRa = extract32(ctx->opcode, 6, 4);
+ XRb = extract32(ctx->opcode, 10, 4);
+ XRc = extract32(ctx->opcode, 14, 4);
+ rb = extract32(ctx->opcode, 21, 5);
+
+ if (unlikely(XRa == 0)) {
+ /* destination is zero register -> do nothing */
+ } else {
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ TCGv t2 = tcg_temp_new();
+
+ if (!sarw) {
+ /* Make SFT4 from rb field */
+ tcg_gen_movi_tl(t2, rb >> 1);
+ } else {
+ gen_load_gpr(t2, rb);
+ tcg_gen_andi_tl(t2, t2, 0x0f);
+ }
+ gen_load_mxu_gpr(t0, XRb);
+ gen_load_mxu_gpr(t1, XRc);
+ tcg_gen_sar_tl(t0, t0, t2);
+ tcg_gen_sar_tl(t1, t1, t2);
+ tcg_gen_extract_tl(t2, t1, 0, 16);
+ tcg_gen_deposit_tl(t2, t2, t0, 16, 16);
+ gen_store_mxu_gpr(t2, XRa);
+ }
+}
+
+/*
+ * Q16SLL XRa, XRd, XRb, XRc, SFT4
+ * Quad 16-bit shift left from XRb and XRc to SFT4
+ * bits (0..15). Store to XRa and XRd respectively.
+ * Q16SLR XRa, XRd, XRb, XRc, SFT4
+ * Quad 16-bit shift logic right from XRb and XRc
+ * to SFT4 bits (0..15). Store to XRa and XRd respectively.
+ * Q16SAR XRa, XRd, XRb, XRc, SFT4
+ * Quad 16-bit shift arithmetic right from XRb and XRc
+ * to SFT4 bits (0..15). Store to XRa and XRd respectively.
+ */
+static void gen_mxu_q16sxx(DisasContext *ctx, bool right, bool arithmetic)
+{
+ uint32_t XRa, XRb, XRc, XRd, sft4;
+
+ XRa = extract32(ctx->opcode, 6, 4);
+ XRb = extract32(ctx->opcode, 10, 4);
+ XRc = extract32(ctx->opcode, 14, 4);
+ XRd = extract32(ctx->opcode, 18, 4);
+ sft4 = extract32(ctx->opcode, 22, 4);
+
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ TCGv t2 = tcg_temp_new();
+ TCGv t3 = tcg_temp_new();
+
+ gen_load_mxu_gpr(t0, XRb);
+ gen_load_mxu_gpr(t2, XRc);
+
+ if (arithmetic) {
+ tcg_gen_sextract_tl(t1, t0, 16, 16);
+ tcg_gen_sextract_tl(t0, t0, 0, 16);
+ tcg_gen_sextract_tl(t3, t2, 16, 16);
+ tcg_gen_sextract_tl(t2, t2, 0, 16);
+ } else {
+ tcg_gen_extract_tl(t1, t0, 16, 16);
+ tcg_gen_extract_tl(t0, t0, 0, 16);
+ tcg_gen_extract_tl(t3, t2, 16, 16);
+ tcg_gen_extract_tl(t2, t2, 0, 16);
+ }
+
+ if (right) {
+ if (arithmetic) {
+ tcg_gen_sari_tl(t0, t0, sft4);
+ tcg_gen_sari_tl(t1, t1, sft4);
+ tcg_gen_sari_tl(t2, t2, sft4);
+ tcg_gen_sari_tl(t3, t3, sft4);
+ } else {
+ tcg_gen_shri_tl(t0, t0, sft4);
+ tcg_gen_shri_tl(t1, t1, sft4);
+ tcg_gen_shri_tl(t2, t2, sft4);
+ tcg_gen_shri_tl(t3, t3, sft4);
+ }
+ } else {
+ tcg_gen_shli_tl(t0, t0, sft4);
+ tcg_gen_shli_tl(t1, t1, sft4);
+ tcg_gen_shli_tl(t2, t2, sft4);
+ tcg_gen_shli_tl(t3, t3, sft4);
+ }
+ tcg_gen_deposit_tl(t0, t0, t1, 16, 16);
+ tcg_gen_deposit_tl(t2, t2, t3, 16, 16);
+
+ gen_store_mxu_gpr(t0, XRa);
+ gen_store_mxu_gpr(t2, XRd);
+}
+
+/*
+ * Q16SLLV XRa, XRd, rs
+ * Quad 16-bit shift left from XRa and XRd to rs[3:0]
+ * bits. Store to XRa and XRd respectively.
+ * Q16SLRV XRa, XRd, rs
+ * Quad 16-bit shift logic right from XRa and XRd to rs[3:0]
+ * bits. Store to XRa and XRd respectively.
+ * Q16SARV XRa, XRd, rs
+ * Quad 16-bit shift arithmetic right from XRa and XRd to rs[3:0]
+ * bits. Store to XRa and XRd respectively.
+ */
+static void gen_mxu_q16sxxv(DisasContext *ctx, bool right, bool arithmetic)
+{
+ uint32_t XRa, XRd, rs;
+
+ XRa = extract32(ctx->opcode, 10, 4);
+ XRd = extract32(ctx->opcode, 14, 4);
+ rs = extract32(ctx->opcode, 21, 5);
+
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ TCGv t2 = tcg_temp_new();
+ TCGv t3 = tcg_temp_new();
+ TCGv t5 = tcg_temp_new();
+
+ gen_load_mxu_gpr(t0, XRa);
+ gen_load_mxu_gpr(t2, XRd);
+ gen_load_gpr(t5, rs);
+ tcg_gen_andi_tl(t5, t5, 0x0f);
+
+
+ if (arithmetic) {
+ tcg_gen_sextract_tl(t1, t0, 16, 16);
+ tcg_gen_sextract_tl(t0, t0, 0, 16);
+ tcg_gen_sextract_tl(t3, t2, 16, 16);
+ tcg_gen_sextract_tl(t2, t2, 0, 16);
+ } else {
+ tcg_gen_extract_tl(t1, t0, 16, 16);
+ tcg_gen_extract_tl(t0, t0, 0, 16);
+ tcg_gen_extract_tl(t3, t2, 16, 16);
+ tcg_gen_extract_tl(t2, t2, 0, 16);
+ }
+
+ if (right) {
+ if (arithmetic) {
+ tcg_gen_sar_tl(t0, t0, t5);
+ tcg_gen_sar_tl(t1, t1, t5);
+ tcg_gen_sar_tl(t2, t2, t5);
+ tcg_gen_sar_tl(t3, t3, t5);
+ } else {
+ tcg_gen_shr_tl(t0, t0, t5);
+ tcg_gen_shr_tl(t1, t1, t5);
+ tcg_gen_shr_tl(t2, t2, t5);
+ tcg_gen_shr_tl(t3, t3, t5);
+ }
+ } else {
+ tcg_gen_shl_tl(t0, t0, t5);
+ tcg_gen_shl_tl(t1, t1, t5);
+ tcg_gen_shl_tl(t2, t2, t5);
+ tcg_gen_shl_tl(t3, t3, t5);
+ }
+ tcg_gen_deposit_tl(t0, t0, t1, 16, 16);
+ tcg_gen_deposit_tl(t2, t2, t3, 16, 16);
+
+ gen_store_mxu_gpr(t0, XRa);
+ gen_store_mxu_gpr(t2, XRd);
+}
/*
- * MXU instruction category max/min
+ * MXU instruction category max/min/avg
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* S32MAX D16MAX Q8MAX
* S32MIN D16MIN Q8MIN
+ * S32SLT D16SLT Q8SLT
+ * Q8SLTU
+ * D16AVG Q8AVG
+ * D16AVGR Q8AVGR
+ * S32MOVZ D16MOVZ Q8MOVZ
+ * S32MOVN D16MOVN Q8MOVN
*/
/*
/* ...and do half-word-wise max/min with one operand 0 */
TCGv_i32 t0 = tcg_temp_new();
TCGv_i32 t1 = tcg_constant_i32(0);
+ TCGv_i32 t2 = tcg_temp_new();
/* the left half-word first */
tcg_gen_andi_i32(t0, mxu_gpr[XRx - 1], 0xFFFF0000);
if (opc == OPC_MXU_D16MAX) {
- tcg_gen_smax_i32(mxu_gpr[XRa - 1], t0, t1);
+ tcg_gen_smax_i32(t2, t0, t1);
} else {
- tcg_gen_smin_i32(mxu_gpr[XRa - 1], t0, t1);
+ tcg_gen_smin_i32(t2, t0, t1);
}
/* the right half-word */
/* return resulting half-words to its original position */
tcg_gen_shri_i32(t0, t0, 16);
/* finally update the destination */
- tcg_gen_or_i32(mxu_gpr[XRa - 1], mxu_gpr[XRa - 1], t0);
+ tcg_gen_or_i32(mxu_gpr[XRa - 1], t2, t0);
} else if (unlikely(XRb == XRc)) {
/* both operands same -> just set destination to one of them */
tcg_gen_mov_i32(mxu_gpr[XRa - 1], mxu_gpr[XRb - 1]);
/* the most general case */
TCGv_i32 t0 = tcg_temp_new();
TCGv_i32 t1 = tcg_temp_new();
+ TCGv_i32 t2 = tcg_temp_new();
/* the left half-word first */
tcg_gen_andi_i32(t0, mxu_gpr[XRb - 1], 0xFFFF0000);
tcg_gen_andi_i32(t1, mxu_gpr[XRc - 1], 0xFFFF0000);
if (opc == OPC_MXU_D16MAX) {
- tcg_gen_smax_i32(mxu_gpr[XRa - 1], t0, t1);
+ tcg_gen_smax_i32(t2, t0, t1);
} else {
- tcg_gen_smin_i32(mxu_gpr[XRa - 1], t0, t1);
+ tcg_gen_smin_i32(t2, t0, t1);
}
/* the right half-word */
/* return resulting half-words to its original position */
tcg_gen_shri_i32(t0, t0, 16);
/* finally update the destination */
- tcg_gen_or_i32(mxu_gpr[XRa - 1], mxu_gpr[XRa - 1], t0);
+ tcg_gen_or_i32(mxu_gpr[XRa - 1], t2, t0);
}
}
/* ...and do byte-wise max/min with one operand 0 */
TCGv_i32 t0 = tcg_temp_new();
TCGv_i32 t1 = tcg_constant_i32(0);
+ TCGv_i32 t2 = tcg_temp_new();
int32_t i;
/* the leftmost byte (byte 3) first */
tcg_gen_andi_i32(t0, mxu_gpr[XRx - 1], 0xFF000000);
if (opc == OPC_MXU_Q8MAX) {
- tcg_gen_smax_i32(mxu_gpr[XRa - 1], t0, t1);
+ tcg_gen_smax_i32(t2, t0, t1);
} else {
- tcg_gen_smin_i32(mxu_gpr[XRa - 1], t0, t1);
+ tcg_gen_smin_i32(t2, t0, t1);
}
/* bytes 2, 1, 0 */
/* return resulting byte to its original position */
tcg_gen_shri_i32(t0, t0, 8 * (3 - i));
/* finally update the destination */
- tcg_gen_or_i32(mxu_gpr[XRa - 1], mxu_gpr[XRa - 1], t0);
+ tcg_gen_or_i32(t2, t2, t0);
}
+ gen_store_mxu_gpr(t2, XRa);
} else if (unlikely(XRb == XRc)) {
/* both operands same -> just set destination to one of them */
tcg_gen_mov_i32(mxu_gpr[XRa - 1], mxu_gpr[XRb - 1]);
/* the most general case */
TCGv_i32 t0 = tcg_temp_new();
TCGv_i32 t1 = tcg_temp_new();
+ TCGv_i32 t2 = tcg_temp_new();
int32_t i;
/* the leftmost bytes (bytes 3) first */
tcg_gen_andi_i32(t0, mxu_gpr[XRb - 1], 0xFF000000);
tcg_gen_andi_i32(t1, mxu_gpr[XRc - 1], 0xFF000000);
if (opc == OPC_MXU_Q8MAX) {
- tcg_gen_smax_i32(mxu_gpr[XRa - 1], t0, t1);
+ tcg_gen_smax_i32(t2, t0, t1);
} else {
- tcg_gen_smin_i32(mxu_gpr[XRa - 1], t0, t1);
+ tcg_gen_smin_i32(t2, t0, t1);
}
/* bytes 2, 1, 0 */
/* return resulting byte to its original position */
tcg_gen_shri_i32(t0, t0, 8 * (3 - i));
/* finally update the destination */
- tcg_gen_or_i32(mxu_gpr[XRa - 1], mxu_gpr[XRa - 1], t0);
+ tcg_gen_or_i32(t2, t2, t0);
}
+ gen_store_mxu_gpr(t2, XRa);
}
}
-
/*
- * MXU instruction category: align
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ * Q8SLT
+ * Update XRa with the signed "set less than" comparison of XRb and XRc
+ * on per-byte basis.
+ * a.k.a. XRa[0..3] = XRb[0..3] < XRc[0..3] ? 1 : 0;
+ *
+ * Q8SLTU
+ * Update XRa with the unsigned "set less than" comparison of XRb and XRc
+ * on per-byte basis.
+ * a.k.a. XRa[0..3] = XRb[0..3] < XRc[0..3] ? 1 : 0;
+ */
+static void gen_mxu_q8slt(DisasContext *ctx, bool sltu)
+{
+ uint32_t pad, XRc, XRb, XRa;
+
+ pad = extract32(ctx->opcode, 21, 5);
+ XRc = extract32(ctx->opcode, 14, 4);
+ XRb = extract32(ctx->opcode, 10, 4);
+ XRa = extract32(ctx->opcode, 6, 4);
+
+ if (unlikely(pad != 0)) {
+ /* opcode padding incorrect -> do nothing */
+ } else if (unlikely(XRa == 0)) {
+ /* destination is zero register -> do nothing */
+ } else if (unlikely((XRb == 0) && (XRc == 0))) {
+ /* both operands zero registers -> just set destination to zero */
+ tcg_gen_movi_tl(mxu_gpr[XRa - 1], 0);
+ } else if (unlikely(XRb == XRc)) {
+ /* both operands same registers -> just set destination to zero */
+ tcg_gen_movi_tl(mxu_gpr[XRa - 1], 0);
+ } else {
+ /* the most general case */
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ TCGv t2 = tcg_temp_new();
+ TCGv t3 = tcg_temp_new();
+ TCGv t4 = tcg_temp_new();
+
+ gen_load_mxu_gpr(t3, XRb);
+ gen_load_mxu_gpr(t4, XRc);
+ tcg_gen_movi_tl(t2, 0);
+
+ for (int i = 0; i < 4; i++) {
+ if (sltu) {
+ tcg_gen_extract_tl(t0, t3, 8 * i, 8);
+ tcg_gen_extract_tl(t1, t4, 8 * i, 8);
+ } else {
+ tcg_gen_sextract_tl(t0, t3, 8 * i, 8);
+ tcg_gen_sextract_tl(t1, t4, 8 * i, 8);
+ }
+ tcg_gen_setcond_tl(TCG_COND_LT, t0, t0, t1);
+ tcg_gen_deposit_tl(t2, t2, t0, 8 * i, 8);
+ }
+ gen_store_mxu_gpr(t2, XRa);
+ }
+}
+
+/*
+ * S32SLT
+ * Update XRa with the signed "set less than" comparison of XRb and XRc.
+ * a.k.a. XRa = XRb < XRc ? 1 : 0;
+ */
+static void gen_mxu_S32SLT(DisasContext *ctx)
+{
+ uint32_t pad, XRc, XRb, XRa;
+
+ pad = extract32(ctx->opcode, 21, 5);
+ XRc = extract32(ctx->opcode, 14, 4);
+ XRb = extract32(ctx->opcode, 10, 4);
+ XRa = extract32(ctx->opcode, 6, 4);
+
+ if (unlikely(pad != 0)) {
+ /* opcode padding incorrect -> do nothing */
+ } else if (unlikely(XRa == 0)) {
+ /* destination is zero register -> do nothing */
+ } else if (unlikely((XRb == 0) && (XRc == 0))) {
+ /* both operands zero registers -> just set destination to zero */
+ tcg_gen_movi_tl(mxu_gpr[XRa - 1], 0);
+ } else if (unlikely(XRb == XRc)) {
+ /* both operands same registers -> just set destination to zero */
+ tcg_gen_movi_tl(mxu_gpr[XRa - 1], 0);
+ } else {
+ /* the most general case */
+ tcg_gen_setcond_tl(TCG_COND_LT, mxu_gpr[XRa - 1],
+ mxu_gpr[XRb - 1], mxu_gpr[XRc - 1]);
+ }
+}
+
+/*
+ * D16SLT
+ * Update XRa with the signed "set less than" comparison of XRb and XRc
+ * on per-word basis.
+ * a.k.a. XRa[0..1] = XRb[0..1] < XRc[0..1] ? 1 : 0;
+ */
+static void gen_mxu_D16SLT(DisasContext *ctx)
+{
+ uint32_t pad, XRc, XRb, XRa;
+
+ pad = extract32(ctx->opcode, 21, 5);
+ XRc = extract32(ctx->opcode, 14, 4);
+ XRb = extract32(ctx->opcode, 10, 4);
+ XRa = extract32(ctx->opcode, 6, 4);
+
+ if (unlikely(pad != 0)) {
+ /* opcode padding incorrect -> do nothing */
+ } else if (unlikely(XRa == 0)) {
+ /* destination is zero register -> do nothing */
+ } else if (unlikely((XRb == 0) && (XRc == 0))) {
+ /* both operands zero registers -> just set destination to zero */
+ tcg_gen_movi_tl(mxu_gpr[XRa - 1], 0);
+ } else if (unlikely(XRb == XRc)) {
+ /* both operands same registers -> just set destination to zero */
+ tcg_gen_movi_tl(mxu_gpr[XRa - 1], 0);
+ } else {
+ /* the most general case */
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ TCGv t2 = tcg_temp_new();
+ TCGv t3 = tcg_temp_new();
+ TCGv t4 = tcg_temp_new();
+
+ gen_load_mxu_gpr(t3, XRb);
+ gen_load_mxu_gpr(t4, XRc);
+ tcg_gen_sextract_tl(t0, t3, 16, 16);
+ tcg_gen_sextract_tl(t1, t4, 16, 16);
+ tcg_gen_setcond_tl(TCG_COND_LT, t0, t0, t1);
+ tcg_gen_shli_tl(t2, t0, 16);
+ tcg_gen_sextract_tl(t0, t3, 0, 16);
+ tcg_gen_sextract_tl(t1, t4, 0, 16);
+ tcg_gen_setcond_tl(TCG_COND_LT, t0, t0, t1);
+ tcg_gen_or_tl(mxu_gpr[XRa - 1], t2, t0);
+ }
+}
+
+/*
+ * D16AVG
+ * Update XRa with the signed average of XRb and XRc
+ * on per-word basis, rounding down.
+ * a.k.a. XRa[0..1] = (XRb[0..1] + XRc[0..1]) >> 1;
+ *
+ * D16AVGR
+ * Update XRa with the signed average of XRb and XRc
+ * on per-word basis, math rounding 4/5.
+ * a.k.a. XRa[0..1] = (XRb[0..1] + XRc[0..1] + 1) >> 1;
+ */
+static void gen_mxu_d16avg(DisasContext *ctx, bool round45)
+{
+ uint32_t pad, XRc, XRb, XRa;
+
+ pad = extract32(ctx->opcode, 21, 5);
+ XRc = extract32(ctx->opcode, 14, 4);
+ XRb = extract32(ctx->opcode, 10, 4);
+ XRa = extract32(ctx->opcode, 6, 4);
+
+ if (unlikely(pad != 0)) {
+ /* opcode padding incorrect -> do nothing */
+ } else if (unlikely(XRa == 0)) {
+ /* destination is zero register -> do nothing */
+ } else if (unlikely((XRb == 0) && (XRc == 0))) {
+ /* both operands zero registers -> just set destination to zero */
+ tcg_gen_movi_tl(mxu_gpr[XRa - 1], 0);
+ } else if (unlikely(XRb == XRc)) {
+ /* both operands same registers -> just set destination to same */
+ tcg_gen_mov_tl(mxu_gpr[XRa - 1], mxu_gpr[XRb - 1]);
+ } else {
+ /* the most general case */
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ TCGv t2 = tcg_temp_new();
+ TCGv t3 = tcg_temp_new();
+ TCGv t4 = tcg_temp_new();
+
+ gen_load_mxu_gpr(t3, XRb);
+ gen_load_mxu_gpr(t4, XRc);
+ tcg_gen_sextract_tl(t0, t3, 16, 16);
+ tcg_gen_sextract_tl(t1, t4, 16, 16);
+ tcg_gen_add_tl(t0, t0, t1);
+ if (round45) {
+ tcg_gen_addi_tl(t0, t0, 1);
+ }
+ tcg_gen_shli_tl(t2, t0, 15);
+ tcg_gen_andi_tl(t2, t2, 0xffff0000);
+ tcg_gen_sextract_tl(t0, t3, 0, 16);
+ tcg_gen_sextract_tl(t1, t4, 0, 16);
+ tcg_gen_add_tl(t0, t0, t1);
+ if (round45) {
+ tcg_gen_addi_tl(t0, t0, 1);
+ }
+ tcg_gen_shri_tl(t0, t0, 1);
+ tcg_gen_deposit_tl(t2, t2, t0, 0, 16);
+ gen_store_mxu_gpr(t2, XRa);
+ }
+}
+
+/*
+ * Q8AVG
+ * Update XRa with the signed average of XRb and XRc
+ * on per-byte basis, rounding down.
+ * a.k.a. XRa[0..3] = (XRb[0..3] + XRc[0..3]) >> 1;
+ *
+ * Q8AVGR
+ * Update XRa with the signed average of XRb and XRc
+ * on per-word basis, math rounding 4/5.
+ * a.k.a. XRa[0..3] = (XRb[0..3] + XRc[0..3] + 1) >> 1;
+ */
+static void gen_mxu_q8avg(DisasContext *ctx, bool round45)
+{
+ uint32_t pad, XRc, XRb, XRa;
+
+ pad = extract32(ctx->opcode, 21, 5);
+ XRc = extract32(ctx->opcode, 14, 4);
+ XRb = extract32(ctx->opcode, 10, 4);
+ XRa = extract32(ctx->opcode, 6, 4);
+
+ if (unlikely(pad != 0)) {
+ /* opcode padding incorrect -> do nothing */
+ } else if (unlikely(XRa == 0)) {
+ /* destination is zero register -> do nothing */
+ } else if (unlikely((XRb == 0) && (XRc == 0))) {
+ /* both operands zero registers -> just set destination to zero */
+ tcg_gen_movi_tl(mxu_gpr[XRa - 1], 0);
+ } else if (unlikely(XRb == XRc)) {
+ /* both operands same registers -> just set destination to same */
+ tcg_gen_mov_tl(mxu_gpr[XRa - 1], mxu_gpr[XRb - 1]);
+ } else {
+ /* the most general case */
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ TCGv t2 = tcg_temp_new();
+ TCGv t3 = tcg_temp_new();
+ TCGv t4 = tcg_temp_new();
+
+ gen_load_mxu_gpr(t3, XRb);
+ gen_load_mxu_gpr(t4, XRc);
+ tcg_gen_movi_tl(t2, 0);
+
+ for (int i = 0; i < 4; i++) {
+ tcg_gen_extract_tl(t0, t3, 8 * i, 8);
+ tcg_gen_extract_tl(t1, t4, 8 * i, 8);
+ tcg_gen_add_tl(t0, t0, t1);
+ if (round45) {
+ tcg_gen_addi_tl(t0, t0, 1);
+ }
+ tcg_gen_shri_tl(t0, t0, 1);
+ tcg_gen_deposit_tl(t2, t2, t0, 8 * i, 8);
+ }
+ gen_store_mxu_gpr(t2, XRa);
+ }
+}
+
+/*
+ * Q8MOVZ
+ * Quadruple 8-bit packed conditional move where
+ * XRb contains conditions, XRc what to move and
+ * XRa is the destination.
+ * a.k.a. if (XRb[0..3] == 0) { XRa[0..3] = XRc[0..3] }
+ *
+ * Q8MOVN
+ * Quadruple 8-bit packed conditional move where
+ * XRb contains conditions, XRc what to move and
+ * XRa is the destination.
+ * a.k.a. if (XRb[0..3] != 0) { XRa[0..3] = XRc[0..3] }
+ */
+static void gen_mxu_q8movzn(DisasContext *ctx, TCGCond cond)
+{
+ uint32_t XRc, XRb, XRa;
+
+ XRa = extract32(ctx->opcode, 6, 4);
+ XRb = extract32(ctx->opcode, 10, 4);
+ XRc = extract32(ctx->opcode, 14, 4);
+
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ TCGv t2 = tcg_temp_new();
+ TCGv t3 = tcg_temp_new();
+ TCGLabel *l_quarterdone = gen_new_label();
+ TCGLabel *l_halfdone = gen_new_label();
+ TCGLabel *l_quarterrest = gen_new_label();
+ TCGLabel *l_done = gen_new_label();
+
+ gen_load_mxu_gpr(t0, XRc);
+ gen_load_mxu_gpr(t1, XRb);
+ gen_load_mxu_gpr(t2, XRa);
+
+ tcg_gen_extract_tl(t3, t1, 24, 8);
+ tcg_gen_brcondi_tl(cond, t3, 0, l_quarterdone);
+ tcg_gen_extract_tl(t3, t0, 24, 8);
+ tcg_gen_deposit_tl(t2, t2, t3, 24, 8);
+
+ gen_set_label(l_quarterdone);
+ tcg_gen_extract_tl(t3, t1, 16, 8);
+ tcg_gen_brcondi_tl(cond, t3, 0, l_halfdone);
+ tcg_gen_extract_tl(t3, t0, 16, 8);
+ tcg_gen_deposit_tl(t2, t2, t3, 16, 8);
+
+ gen_set_label(l_halfdone);
+ tcg_gen_extract_tl(t3, t1, 8, 8);
+ tcg_gen_brcondi_tl(cond, t3, 0, l_quarterrest);
+ tcg_gen_extract_tl(t3, t0, 8, 8);
+ tcg_gen_deposit_tl(t2, t2, t3, 8, 8);
+
+ gen_set_label(l_quarterrest);
+ tcg_gen_extract_tl(t3, t1, 0, 8);
+ tcg_gen_brcondi_tl(cond, t3, 0, l_done);
+ tcg_gen_extract_tl(t3, t0, 0, 8);
+ tcg_gen_deposit_tl(t2, t2, t3, 0, 8);
+
+ gen_set_label(l_done);
+ gen_store_mxu_gpr(t2, XRa);
+}
+
+/*
+ * D16MOVZ
+ * Double 16-bit packed conditional move where
+ * XRb contains conditions, XRc what to move and
+ * XRa is the destination.
+ * a.k.a. if (XRb[0..1] == 0) { XRa[0..1] = XRc[0..1] }
+ *
+ * D16MOVN
+ * Double 16-bit packed conditional move where
+ * XRb contains conditions, XRc what to move and
+ * XRa is the destination.
+ * a.k.a. if (XRb[0..3] != 0) { XRa[0..1] = XRc[0..1] }
+ */
+static void gen_mxu_d16movzn(DisasContext *ctx, TCGCond cond)
+{
+ uint32_t XRc, XRb, XRa;
+
+ XRa = extract32(ctx->opcode, 6, 4);
+ XRb = extract32(ctx->opcode, 10, 4);
+ XRc = extract32(ctx->opcode, 14, 4);
+
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ TCGv t2 = tcg_temp_new();
+ TCGv t3 = tcg_temp_new();
+ TCGLabel *l_halfdone = gen_new_label();
+ TCGLabel *l_done = gen_new_label();
+
+ gen_load_mxu_gpr(t0, XRc);
+ gen_load_mxu_gpr(t1, XRb);
+ gen_load_mxu_gpr(t2, XRa);
+
+ tcg_gen_extract_tl(t3, t1, 16, 16);
+ tcg_gen_brcondi_tl(cond, t3, 0, l_halfdone);
+ tcg_gen_extract_tl(t3, t0, 16, 16);
+ tcg_gen_deposit_tl(t2, t2, t3, 16, 16);
+
+ gen_set_label(l_halfdone);
+ tcg_gen_extract_tl(t3, t1, 0, 16);
+ tcg_gen_brcondi_tl(cond, t3, 0, l_done);
+ tcg_gen_extract_tl(t3, t0, 0, 16);
+ tcg_gen_deposit_tl(t2, t2, t3, 0, 16);
+
+ gen_set_label(l_done);
+ gen_store_mxu_gpr(t2, XRa);
+}
+
+/*
+ * S32MOVZ
+ * Quadruple 32-bit conditional move where
+ * XRb contains conditions, XRc what to move and
+ * XRa is the destination.
+ * a.k.a. if (XRb == 0) { XRa = XRc }
+ *
+ * S32MOVN
+ * Single 32-bit conditional move where
+ * XRb contains conditions, XRc what to move and
+ * XRa is the destination.
+ * a.k.a. if (XRb != 0) { XRa = XRc }
+ */
+static void gen_mxu_s32movzn(DisasContext *ctx, TCGCond cond)
+{
+ uint32_t XRc, XRb, XRa;
+
+ XRa = extract32(ctx->opcode, 6, 4);
+ XRb = extract32(ctx->opcode, 10, 4);
+ XRc = extract32(ctx->opcode, 14, 4);
+
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ TCGLabel *l_done = gen_new_label();
+
+ gen_load_mxu_gpr(t0, XRc);
+ gen_load_mxu_gpr(t1, XRb);
+
+ tcg_gen_brcondi_tl(cond, t1, 0, l_done);
+ gen_store_mxu_gpr(t0, XRa);
+ gen_set_label(l_done);
+}
+
+/*
+ * MXU instruction category: Addition and subtraction
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * S32CPS D16CPS
+ * Q8ADD
+ */
+
+/*
+ * S32CPS
+ * Update XRa if XRc < 0 by value of 0 - XRb
+ * else XRa = XRb
+ */
+static void gen_mxu_S32CPS(DisasContext *ctx)
+{
+ uint32_t pad, XRc, XRb, XRa;
+
+ pad = extract32(ctx->opcode, 21, 5);
+ XRc = extract32(ctx->opcode, 14, 4);
+ XRb = extract32(ctx->opcode, 10, 4);
+ XRa = extract32(ctx->opcode, 6, 4);
+
+ if (unlikely(pad != 0)) {
+ /* opcode padding incorrect -> do nothing */
+ } else if (unlikely(XRa == 0)) {
+ /* destination is zero register -> do nothing */
+ } else if (unlikely(XRb == 0)) {
+ /* XRc make no sense 0 - 0 = 0 -> just set destination to zero */
+ tcg_gen_movi_tl(mxu_gpr[XRa - 1], 0);
+ } else if (unlikely(XRc == 0)) {
+ /* condition always false -> just move XRb to XRa */
+ tcg_gen_mov_tl(mxu_gpr[XRa - 1], mxu_gpr[XRb - 1]);
+ } else {
+ /* the most general case */
+ TCGv t0 = tcg_temp_new();
+ TCGLabel *l_not_less = gen_new_label();
+ TCGLabel *l_done = gen_new_label();
+
+ tcg_gen_brcondi_tl(TCG_COND_GE, mxu_gpr[XRc - 1], 0, l_not_less);
+ tcg_gen_neg_tl(t0, mxu_gpr[XRb - 1]);
+ tcg_gen_br(l_done);
+ gen_set_label(l_not_less);
+ gen_load_mxu_gpr(t0, XRb);
+ gen_set_label(l_done);
+ gen_store_mxu_gpr(t0, XRa);
+ }
+}
+
+/*
+ * D16CPS
+ * Update XRa[0..1] if XRc[0..1] < 0 by value of 0 - XRb[0..1]
+ * else XRa[0..1] = XRb[0..1]
+ */
+static void gen_mxu_D16CPS(DisasContext *ctx)
+{
+ uint32_t pad, XRc, XRb, XRa;
+
+ pad = extract32(ctx->opcode, 21, 5);
+ XRc = extract32(ctx->opcode, 14, 4);
+ XRb = extract32(ctx->opcode, 10, 4);
+ XRa = extract32(ctx->opcode, 6, 4);
+
+ if (unlikely(pad != 0)) {
+ /* opcode padding incorrect -> do nothing */
+ } else if (unlikely(XRa == 0)) {
+ /* destination is zero register -> do nothing */
+ } else if (unlikely(XRb == 0)) {
+ /* XRc make no sense 0 - 0 = 0 -> just set destination to zero */
+ tcg_gen_movi_tl(mxu_gpr[XRa - 1], 0);
+ } else if (unlikely(XRc == 0)) {
+ /* condition always false -> just move XRb to XRa */
+ tcg_gen_mov_tl(mxu_gpr[XRa - 1], mxu_gpr[XRb - 1]);
+ } else {
+ /* the most general case */
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ TCGLabel *l_done_hi = gen_new_label();
+ TCGLabel *l_not_less_lo = gen_new_label();
+ TCGLabel *l_done_lo = gen_new_label();
+
+ tcg_gen_sextract_tl(t0, mxu_gpr[XRc - 1], 16, 16);
+ tcg_gen_sextract_tl(t1, mxu_gpr[XRb - 1], 16, 16);
+ tcg_gen_brcondi_tl(TCG_COND_GE, t0, 0, l_done_hi);
+ tcg_gen_subfi_tl(t1, 0, t1);
+
+ gen_set_label(l_done_hi);
+ tcg_gen_shli_i32(t1, t1, 16);
+
+ tcg_gen_sextract_tl(t0, mxu_gpr[XRc - 1], 0, 16);
+ tcg_gen_brcondi_tl(TCG_COND_GE, t0, 0, l_not_less_lo);
+ tcg_gen_sextract_tl(t0, mxu_gpr[XRb - 1], 0, 16);
+ tcg_gen_subfi_tl(t0, 0, t0);
+ tcg_gen_br(l_done_lo);
+
+ gen_set_label(l_not_less_lo);
+ tcg_gen_extract_tl(t0, mxu_gpr[XRb - 1], 0, 16);
+
+ gen_set_label(l_done_lo);
+ tcg_gen_deposit_tl(mxu_gpr[XRa - 1], t1, t0, 0, 16);
+ }
+}
+
+/*
+ * Q8ABD XRa, XRb, XRc
+ * Gets absolute difference for quadruple of 8-bit
+ * packed in XRb to another one in XRc,
+ * put the result in XRa.
+ * a.k.a. XRa[0..3] = abs(XRb[0..3] - XRc[0..3]);
+ */
+static void gen_mxu_Q8ABD(DisasContext *ctx)
+{
+ uint32_t pad, XRc, XRb, XRa;
+
+ pad = extract32(ctx->opcode, 21, 3);
+ XRc = extract32(ctx->opcode, 14, 4);
+ XRb = extract32(ctx->opcode, 10, 4);
+ XRa = extract32(ctx->opcode, 6, 4);
+
+ if (unlikely(pad != 0)) {
+ /* opcode padding incorrect -> do nothing */
+ } else if (unlikely(XRa == 0)) {
+ /* destination is zero register -> do nothing */
+ } else if (unlikely((XRb == 0) && (XRc == 0))) {
+ /* both operands zero registers -> just set destination to zero */
+ tcg_gen_movi_tl(mxu_gpr[XRa - 1], 0);
+ } else {
+ /* the most general case */
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ TCGv t2 = tcg_temp_new();
+ TCGv t3 = tcg_temp_new();
+ TCGv t4 = tcg_temp_new();
+
+ gen_load_mxu_gpr(t3, XRb);
+ gen_load_mxu_gpr(t4, XRc);
+ tcg_gen_movi_tl(t2, 0);
+
+ for (int i = 0; i < 4; i++) {
+ tcg_gen_extract_tl(t0, t3, 8 * i, 8);
+ tcg_gen_extract_tl(t1, t4, 8 * i, 8);
+
+ tcg_gen_sub_tl(t0, t0, t1);
+ tcg_gen_abs_tl(t0, t0);
+
+ tcg_gen_deposit_tl(t2, t2, t0, 8 * i, 8);
+ }
+ gen_store_mxu_gpr(t2, XRa);
+ }
+}
+
+/*
+ * Q8ADD XRa, XRb, XRc, ptn2
+ * Add/subtract quadruple of 8-bit packed in XRb
+ * to another one in XRc, put the result in XRa.
+ */
+static void gen_mxu_Q8ADD(DisasContext *ctx)
+{
+ uint32_t aptn2, pad, XRc, XRb, XRa;
+
+ aptn2 = extract32(ctx->opcode, 24, 2);
+ pad = extract32(ctx->opcode, 21, 3);
+ XRc = extract32(ctx->opcode, 14, 4);
+ XRb = extract32(ctx->opcode, 10, 4);
+ XRa = extract32(ctx->opcode, 6, 4);
+
+ if (unlikely(pad != 0)) {
+ /* opcode padding incorrect -> do nothing */
+ } else if (unlikely(XRa == 0)) {
+ /* destination is zero register -> do nothing */
+ } else if (unlikely((XRb == 0) && (XRc == 0))) {
+ /* both operands zero registers -> just set destination to zero */
+ tcg_gen_movi_i32(mxu_gpr[XRa - 1], 0);
+ } else {
+ /* the most general case */
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ TCGv t2 = tcg_temp_new();
+ TCGv t3 = tcg_temp_new();
+ TCGv t4 = tcg_temp_new();
+
+ gen_load_mxu_gpr(t3, XRb);
+ gen_load_mxu_gpr(t4, XRc);
+
+ for (int i = 0; i < 4; i++) {
+ tcg_gen_andi_tl(t0, t3, 0xff);
+ tcg_gen_andi_tl(t1, t4, 0xff);
+
+ if (i < 2) {
+ if (aptn2 & 0x01) {
+ tcg_gen_sub_tl(t0, t0, t1);
+ } else {
+ tcg_gen_add_tl(t0, t0, t1);
+ }
+ } else {
+ if (aptn2 & 0x02) {
+ tcg_gen_sub_tl(t0, t0, t1);
+ } else {
+ tcg_gen_add_tl(t0, t0, t1);
+ }
+ }
+ if (i < 3) {
+ tcg_gen_shri_tl(t3, t3, 8);
+ tcg_gen_shri_tl(t4, t4, 8);
+ }
+ if (i > 0) {
+ tcg_gen_deposit_tl(t2, t2, t0, 8 * i, 8);
+ } else {
+ tcg_gen_andi_tl(t0, t0, 0xff);
+ tcg_gen_mov_tl(t2, t0);
+ }
+ }
+ gen_store_mxu_gpr(t2, XRa);
+ }
+}
+
+/*
+ * Q8ADDE XRa, XRb, XRc, XRd, aptn2
+ * Add/subtract quadruple of 8-bit packed in XRb
+ * to another one in XRc, with zero extending
+ * to 16-bit and put results as packed 16-bit data
+ * into XRa and XRd.
+ * aptn2 manages action add or subract of pairs of data.
+ *
+ * Q8ACCE XRa, XRb, XRc, XRd, aptn2
+ * Add/subtract quadruple of 8-bit packed in XRb
+ * to another one in XRc, with zero extending
+ * to 16-bit and accumulate results as packed 16-bit data
+ * into XRa and XRd.
+ * aptn2 manages action add or subract of pairs of data.
+ */
+static void gen_mxu_q8adde(DisasContext *ctx, bool accumulate)
+{
+ uint32_t aptn2, XRd, XRc, XRb, XRa;
+
+ aptn2 = extract32(ctx->opcode, 24, 2);
+ XRd = extract32(ctx->opcode, 18, 4);
+ XRc = extract32(ctx->opcode, 14, 4);
+ XRb = extract32(ctx->opcode, 10, 4);
+ XRa = extract32(ctx->opcode, 6, 4);
+
+ if (unlikely((XRb == 0) && (XRc == 0))) {
+ /* both operands zero registers -> just set destination to zero */
+ if (XRa != 0) {
+ tcg_gen_movi_tl(mxu_gpr[XRa - 1], 0);
+ }
+ if (XRd != 0) {
+ tcg_gen_movi_tl(mxu_gpr[XRd - 1], 0);
+ }
+ } else {
+ /* the most general case */
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ TCGv t2 = tcg_temp_new();
+ TCGv t3 = tcg_temp_new();
+ TCGv t4 = tcg_temp_new();
+ TCGv t5 = tcg_temp_new();
+
+ if (XRa != 0) {
+ tcg_gen_extract_tl(t0, mxu_gpr[XRb - 1], 16, 8);
+ tcg_gen_extract_tl(t1, mxu_gpr[XRc - 1], 16, 8);
+ tcg_gen_extract_tl(t2, mxu_gpr[XRb - 1], 24, 8);
+ tcg_gen_extract_tl(t3, mxu_gpr[XRc - 1], 24, 8);
+ if (aptn2 & 2) {
+ tcg_gen_sub_tl(t0, t0, t1);
+ tcg_gen_sub_tl(t2, t2, t3);
+ } else {
+ tcg_gen_add_tl(t0, t0, t1);
+ tcg_gen_add_tl(t2, t2, t3);
+ }
+ if (accumulate) {
+ gen_load_mxu_gpr(t5, XRa);
+ tcg_gen_extract_tl(t1, t5, 0, 16);
+ tcg_gen_extract_tl(t3, t5, 16, 16);
+ tcg_gen_add_tl(t0, t0, t1);
+ tcg_gen_add_tl(t2, t2, t3);
+ }
+ tcg_gen_shli_tl(t2, t2, 16);
+ tcg_gen_extract_tl(t0, t0, 0, 16);
+ tcg_gen_or_tl(t4, t2, t0);
+ }
+ if (XRd != 0) {
+ tcg_gen_extract_tl(t0, mxu_gpr[XRb - 1], 0, 8);
+ tcg_gen_extract_tl(t1, mxu_gpr[XRc - 1], 0, 8);
+ tcg_gen_extract_tl(t2, mxu_gpr[XRb - 1], 8, 8);
+ tcg_gen_extract_tl(t3, mxu_gpr[XRc - 1], 8, 8);
+ if (aptn2 & 1) {
+ tcg_gen_sub_tl(t0, t0, t1);
+ tcg_gen_sub_tl(t2, t2, t3);
+ } else {
+ tcg_gen_add_tl(t0, t0, t1);
+ tcg_gen_add_tl(t2, t2, t3);
+ }
+ if (accumulate) {
+ gen_load_mxu_gpr(t5, XRd);
+ tcg_gen_extract_tl(t1, t5, 0, 16);
+ tcg_gen_extract_tl(t3, t5, 16, 16);
+ tcg_gen_add_tl(t0, t0, t1);
+ tcg_gen_add_tl(t2, t2, t3);
+ }
+ tcg_gen_shli_tl(t2, t2, 16);
+ tcg_gen_extract_tl(t0, t0, 0, 16);
+ tcg_gen_or_tl(t5, t2, t0);
+ }
+
+ gen_store_mxu_gpr(t4, XRa);
+ gen_store_mxu_gpr(t5, XRd);
+ }
+}
+
+/*
+ * D8SUM XRa, XRb, XRc
+ * Double parallel add of quadruple unsigned 8-bit together
+ * with zero extending to 16-bit data.
+ * D8SUMC XRa, XRb, XRc
+ * Double parallel add of quadruple unsigned 8-bit together
+ * with zero extending to 16-bit data and adding 2 to each
+ * parallel result.
+ */
+static void gen_mxu_d8sum(DisasContext *ctx, bool sumc)
+{
+ uint32_t pad, pad2, XRc, XRb, XRa;
+
+ pad = extract32(ctx->opcode, 24, 2);
+ pad2 = extract32(ctx->opcode, 18, 4);
+ XRc = extract32(ctx->opcode, 14, 4);
+ XRb = extract32(ctx->opcode, 10, 4);
+ XRa = extract32(ctx->opcode, 6, 4);
+
+ if (unlikely(pad != 0 || pad2 != 0)) {
+ /* opcode padding incorrect -> do nothing */
+ } else if (unlikely(XRa == 0)) {
+ /* destination is zero register -> do nothing */
+ } else if (unlikely((XRb == 0) && (XRc == 0))) {
+ /* both operands zero registers -> just set destination to zero */
+ tcg_gen_movi_tl(mxu_gpr[XRa - 1], 0);
+ } else {
+ /* the most general case */
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ TCGv t2 = tcg_temp_new();
+ TCGv t3 = tcg_temp_new();
+ TCGv t4 = tcg_temp_new();
+ TCGv t5 = tcg_temp_new();
+
+ if (XRb != 0) {
+ tcg_gen_extract_tl(t0, mxu_gpr[XRb - 1], 0, 8);
+ tcg_gen_extract_tl(t1, mxu_gpr[XRb - 1], 8, 8);
+ tcg_gen_extract_tl(t2, mxu_gpr[XRb - 1], 16, 8);
+ tcg_gen_extract_tl(t3, mxu_gpr[XRb - 1], 24, 8);
+ tcg_gen_add_tl(t4, t0, t1);
+ tcg_gen_add_tl(t4, t4, t2);
+ tcg_gen_add_tl(t4, t4, t3);
+ } else {
+ tcg_gen_mov_tl(t4, 0);
+ }
+ if (XRc != 0) {
+ tcg_gen_extract_tl(t0, mxu_gpr[XRc - 1], 0, 8);
+ tcg_gen_extract_tl(t1, mxu_gpr[XRc - 1], 8, 8);
+ tcg_gen_extract_tl(t2, mxu_gpr[XRc - 1], 16, 8);
+ tcg_gen_extract_tl(t3, mxu_gpr[XRc - 1], 24, 8);
+ tcg_gen_add_tl(t5, t0, t1);
+ tcg_gen_add_tl(t5, t5, t2);
+ tcg_gen_add_tl(t5, t5, t3);
+ } else {
+ tcg_gen_mov_tl(t5, 0);
+ }
+
+ if (sumc) {
+ tcg_gen_addi_tl(t4, t4, 2);
+ tcg_gen_addi_tl(t5, t5, 2);
+ }
+ tcg_gen_shli_tl(t4, t4, 16);
+
+ tcg_gen_or_tl(mxu_gpr[XRa - 1], t4, t5);
+ }
+}
+
+/*
+ * Q16ADD XRa, XRb, XRc, XRd, aptn2, optn2 - Quad packed
+ * 16-bit pattern addition.
+ */
+static void gen_mxu_q16add(DisasContext *ctx)
+{
+ uint32_t aptn2, optn2, XRc, XRb, XRa, XRd;
+
+ aptn2 = extract32(ctx->opcode, 24, 2);
+ optn2 = extract32(ctx->opcode, 22, 2);
+ XRd = extract32(ctx->opcode, 18, 4);
+ XRc = extract32(ctx->opcode, 14, 4);
+ XRb = extract32(ctx->opcode, 10, 4);
+ XRa = extract32(ctx->opcode, 6, 4);
+
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ TCGv t2 = tcg_temp_new();
+ TCGv t3 = tcg_temp_new();
+ TCGv t4 = tcg_temp_new();
+ TCGv t5 = tcg_temp_new();
+
+ gen_load_mxu_gpr(t1, XRb);
+ tcg_gen_extract_tl(t0, t1, 0, 16);
+ tcg_gen_extract_tl(t1, t1, 16, 16);
+
+ gen_load_mxu_gpr(t3, XRc);
+ tcg_gen_extract_tl(t2, t3, 0, 16);
+ tcg_gen_extract_tl(t3, t3, 16, 16);
+
+ switch (optn2) {
+ case MXU_OPTN2_WW: /* XRB.H+XRC.H == lop, XRB.L+XRC.L == rop */
+ tcg_gen_mov_tl(t4, t1);
+ tcg_gen_mov_tl(t5, t0);
+ break;
+ case MXU_OPTN2_LW: /* XRB.L+XRC.H == lop, XRB.L+XRC.L == rop */
+ tcg_gen_mov_tl(t4, t0);
+ tcg_gen_mov_tl(t5, t0);
+ break;
+ case MXU_OPTN2_HW: /* XRB.H+XRC.H == lop, XRB.H+XRC.L == rop */
+ tcg_gen_mov_tl(t4, t1);
+ tcg_gen_mov_tl(t5, t1);
+ break;
+ case MXU_OPTN2_XW: /* XRB.L+XRC.H == lop, XRB.H+XRC.L == rop */
+ tcg_gen_mov_tl(t4, t0);
+ tcg_gen_mov_tl(t5, t1);
+ break;
+ }
+
+ switch (aptn2) {
+ case MXU_APTN2_AA: /* lop +, rop + */
+ tcg_gen_add_tl(t0, t4, t3);
+ tcg_gen_add_tl(t1, t5, t2);
+ tcg_gen_add_tl(t4, t4, t3);
+ tcg_gen_add_tl(t5, t5, t2);
+ break;
+ case MXU_APTN2_AS: /* lop +, rop + */
+ tcg_gen_sub_tl(t0, t4, t3);
+ tcg_gen_sub_tl(t1, t5, t2);
+ tcg_gen_add_tl(t4, t4, t3);
+ tcg_gen_add_tl(t5, t5, t2);
+ break;
+ case MXU_APTN2_SA: /* lop +, rop + */
+ tcg_gen_add_tl(t0, t4, t3);
+ tcg_gen_add_tl(t1, t5, t2);
+ tcg_gen_sub_tl(t4, t4, t3);
+ tcg_gen_sub_tl(t5, t5, t2);
+ break;
+ case MXU_APTN2_SS: /* lop +, rop + */
+ tcg_gen_sub_tl(t0, t4, t3);
+ tcg_gen_sub_tl(t1, t5, t2);
+ tcg_gen_sub_tl(t4, t4, t3);
+ tcg_gen_sub_tl(t5, t5, t2);
+ break;
+ }
+
+ tcg_gen_shli_tl(t0, t0, 16);
+ tcg_gen_extract_tl(t1, t1, 0, 16);
+ tcg_gen_shli_tl(t4, t4, 16);
+ tcg_gen_extract_tl(t5, t5, 0, 16);
+
+ tcg_gen_or_tl(mxu_gpr[XRa - 1], t4, t5);
+ tcg_gen_or_tl(mxu_gpr[XRd - 1], t0, t1);
+}
+
+/*
+ * Q16ACC XRa, XRb, XRc, XRd, aptn2 - Quad packed
+ * 16-bit addition/subtraction with accumulate.
+ */
+static void gen_mxu_q16acc(DisasContext *ctx)
+{
+ uint32_t aptn2, XRc, XRb, XRa, XRd;
+
+ aptn2 = extract32(ctx->opcode, 24, 2);
+ XRd = extract32(ctx->opcode, 18, 4);
+ XRc = extract32(ctx->opcode, 14, 4);
+ XRb = extract32(ctx->opcode, 10, 4);
+ XRa = extract32(ctx->opcode, 6, 4);
+
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ TCGv t2 = tcg_temp_new();
+ TCGv t3 = tcg_temp_new();
+ TCGv s3 = tcg_temp_new();
+ TCGv s2 = tcg_temp_new();
+ TCGv s1 = tcg_temp_new();
+ TCGv s0 = tcg_temp_new();
+
+ gen_load_mxu_gpr(t1, XRb);
+ tcg_gen_extract_tl(t0, t1, 0, 16);
+ tcg_gen_extract_tl(t1, t1, 16, 16);
+
+ gen_load_mxu_gpr(t3, XRc);
+ tcg_gen_extract_tl(t2, t3, 0, 16);
+ tcg_gen_extract_tl(t3, t3, 16, 16);
+
+ switch (aptn2) {
+ case MXU_APTN2_AA: /* lop +, rop + */
+ tcg_gen_add_tl(s3, t1, t3);
+ tcg_gen_add_tl(s2, t0, t2);
+ tcg_gen_add_tl(s1, t1, t3);
+ tcg_gen_add_tl(s0, t0, t2);
+ break;
+ case MXU_APTN2_AS: /* lop +, rop - */
+ tcg_gen_sub_tl(s3, t1, t3);
+ tcg_gen_sub_tl(s2, t0, t2);
+ tcg_gen_add_tl(s1, t1, t3);
+ tcg_gen_add_tl(s0, t0, t2);
+ break;
+ case MXU_APTN2_SA: /* lop -, rop + */
+ tcg_gen_add_tl(s3, t1, t3);
+ tcg_gen_add_tl(s2, t0, t2);
+ tcg_gen_sub_tl(s1, t1, t3);
+ tcg_gen_sub_tl(s0, t0, t2);
+ break;
+ case MXU_APTN2_SS: /* lop -, rop - */
+ tcg_gen_sub_tl(s3, t1, t3);
+ tcg_gen_sub_tl(s2, t0, t2);
+ tcg_gen_sub_tl(s1, t1, t3);
+ tcg_gen_sub_tl(s0, t0, t2);
+ break;
+ }
+
+ if (XRa != 0) {
+ tcg_gen_add_tl(t0, mxu_gpr[XRa - 1], s0);
+ tcg_gen_extract_tl(t0, t0, 0, 16);
+ tcg_gen_extract_tl(t1, mxu_gpr[XRa - 1], 16, 16);
+ tcg_gen_add_tl(t1, t1, s1);
+ tcg_gen_shli_tl(t1, t1, 16);
+ tcg_gen_or_tl(mxu_gpr[XRa - 1], t1, t0);
+ }
+
+ if (XRd != 0) {
+ tcg_gen_add_tl(t0, mxu_gpr[XRd - 1], s2);
+ tcg_gen_extract_tl(t0, t0, 0, 16);
+ tcg_gen_extract_tl(t1, mxu_gpr[XRd - 1], 16, 16);
+ tcg_gen_add_tl(t1, t1, s3);
+ tcg_gen_shli_tl(t1, t1, 16);
+ tcg_gen_or_tl(mxu_gpr[XRd - 1], t1, t0);
+ }
+}
+
+/*
+ * Q16ACCM XRa, XRb, XRc, XRd, aptn2 - Quad packed
+ * 16-bit accumulate.
+ */
+static void gen_mxu_q16accm(DisasContext *ctx)
+{
+ uint32_t aptn2, XRc, XRb, XRa, XRd;
+
+ aptn2 = extract32(ctx->opcode, 24, 2);
+ XRd = extract32(ctx->opcode, 18, 4);
+ XRc = extract32(ctx->opcode, 14, 4);
+ XRb = extract32(ctx->opcode, 10, 4);
+ XRa = extract32(ctx->opcode, 6, 4);
+
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ TCGv t2 = tcg_temp_new();
+ TCGv t3 = tcg_temp_new();
+
+ gen_load_mxu_gpr(t2, XRb);
+ gen_load_mxu_gpr(t3, XRc);
+
+ if (XRa != 0) {
+ TCGv a0 = tcg_temp_new();
+ TCGv a1 = tcg_temp_new();
+
+ tcg_gen_extract_tl(t0, t2, 0, 16);
+ tcg_gen_extract_tl(t1, t2, 16, 16);
+
+ gen_load_mxu_gpr(a1, XRa);
+ tcg_gen_extract_tl(a0, a1, 0, 16);
+ tcg_gen_extract_tl(a1, a1, 16, 16);
+
+ if (aptn2 & 2) {
+ tcg_gen_sub_tl(a0, a0, t0);
+ tcg_gen_sub_tl(a1, a1, t1);
+ } else {
+ tcg_gen_add_tl(a0, a0, t0);
+ tcg_gen_add_tl(a1, a1, t1);
+ }
+ tcg_gen_extract_tl(a0, a0, 0, 16);
+ tcg_gen_shli_tl(a1, a1, 16);
+ tcg_gen_or_tl(mxu_gpr[XRa - 1], a1, a0);
+ }
+
+ if (XRd != 0) {
+ TCGv a0 = tcg_temp_new();
+ TCGv a1 = tcg_temp_new();
+
+ tcg_gen_extract_tl(t0, t3, 0, 16);
+ tcg_gen_extract_tl(t1, t3, 16, 16);
+
+ gen_load_mxu_gpr(a1, XRd);
+ tcg_gen_extract_tl(a0, a1, 0, 16);
+ tcg_gen_extract_tl(a1, a1, 16, 16);
+
+ if (aptn2 & 1) {
+ tcg_gen_sub_tl(a0, a0, t0);
+ tcg_gen_sub_tl(a1, a1, t1);
+ } else {
+ tcg_gen_add_tl(a0, a0, t0);
+ tcg_gen_add_tl(a1, a1, t1);
+ }
+ tcg_gen_extract_tl(a0, a0, 0, 16);
+ tcg_gen_shli_tl(a1, a1, 16);
+ tcg_gen_or_tl(mxu_gpr[XRd - 1], a1, a0);
+ }
+}
+
+
+/*
+ * D16ASUM XRa, XRb, XRc, XRd, aptn2 - Double packed
+ * 16-bit sign extended addition and accumulate.
+ */
+static void gen_mxu_d16asum(DisasContext *ctx)
+{
+ uint32_t aptn2, XRc, XRb, XRa, XRd;
+
+ aptn2 = extract32(ctx->opcode, 24, 2);
+ XRd = extract32(ctx->opcode, 18, 4);
+ XRc = extract32(ctx->opcode, 14, 4);
+ XRb = extract32(ctx->opcode, 10, 4);
+ XRa = extract32(ctx->opcode, 6, 4);
+
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ TCGv t2 = tcg_temp_new();
+ TCGv t3 = tcg_temp_new();
+
+ gen_load_mxu_gpr(t2, XRb);
+ gen_load_mxu_gpr(t3, XRc);
+
+ if (XRa != 0) {
+ tcg_gen_sextract_tl(t0, t2, 0, 16);
+ tcg_gen_sextract_tl(t1, t2, 16, 16);
+ tcg_gen_add_tl(t0, t0, t1);
+ if (aptn2 & 2) {
+ tcg_gen_sub_tl(mxu_gpr[XRa - 1], mxu_gpr[XRa - 1], t0);
+ } else {
+ tcg_gen_add_tl(mxu_gpr[XRa - 1], mxu_gpr[XRa - 1], t0);
+ }
+ }
+
+ if (XRd != 0) {
+ tcg_gen_sextract_tl(t0, t3, 0, 16);
+ tcg_gen_sextract_tl(t1, t3, 16, 16);
+ tcg_gen_add_tl(t0, t0, t1);
+ if (aptn2 & 1) {
+ tcg_gen_sub_tl(mxu_gpr[XRd - 1], mxu_gpr[XRd - 1], t0);
+ } else {
+ tcg_gen_add_tl(mxu_gpr[XRd - 1], mxu_gpr[XRd - 1], t0);
+ }
+ }
+}
+
+/*
+ * D32ADD XRa, XRb, XRc, XRd, aptn2 - Double
+ * 32 bit pattern addition/subtraction, set carry.
+ *
+ * D32ADDC XRa, XRb, XRc, XRd, aptn2 - Double
+ * 32 bit pattern addition/subtraction with carry.
+ */
+static void gen_mxu_d32add(DisasContext *ctx)
+{
+ uint32_t aptn2, addc, XRc, XRb, XRa, XRd;
+
+ aptn2 = extract32(ctx->opcode, 24, 2);
+ addc = extract32(ctx->opcode, 22, 2);
+ XRd = extract32(ctx->opcode, 18, 4);
+ XRc = extract32(ctx->opcode, 14, 4);
+ XRb = extract32(ctx->opcode, 10, 4);
+ XRa = extract32(ctx->opcode, 6, 4);
+
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ TCGv t2 = tcg_temp_new();
+ TCGv cr = tcg_temp_new();
+
+ if (unlikely(addc > 1)) {
+ /* opcode incorrect -> do nothing */
+ } else if (addc == 1) {
+ if (unlikely(XRa == 0 && XRd == 0)) {
+ /* destinations are zero register -> do nothing */
+ } else {
+ /* FIXME ??? What if XRa == XRd ??? */
+ /* aptn2 is unused here */
+ gen_load_mxu_gpr(t0, XRb);
+ gen_load_mxu_gpr(t1, XRc);
+ gen_load_mxu_cr(cr);
+ if (XRa != 0) {
+ tcg_gen_extract_tl(t2, cr, 31, 1);
+ tcg_gen_add_tl(t0, t0, t2);
+ tcg_gen_add_tl(mxu_gpr[XRa - 1], mxu_gpr[XRa - 1], t0);
+ }
+ if (XRd != 0) {
+ tcg_gen_extract_tl(t2, cr, 30, 1);
+ tcg_gen_add_tl(t1, t1, t2);
+ tcg_gen_add_tl(mxu_gpr[XRd - 1], mxu_gpr[XRd - 1], t1);
+ }
+ }
+ } else if (unlikely(XRa == 0 && XRd == 0)) {
+ /* destinations are zero register -> do nothing */
+ } else {
+ /* common case */
+ /* FIXME ??? What if XRa == XRd ??? */
+ TCGv carry = tcg_temp_new();
+
+ gen_load_mxu_gpr(t0, XRb);
+ gen_load_mxu_gpr(t1, XRc);
+ gen_load_mxu_cr(cr);
+ if (XRa != 0) {
+ if (aptn2 & 2) {
+ tcg_gen_sub_i32(t2, t0, t1);
+ tcg_gen_setcond_tl(TCG_COND_GTU, carry, t0, t1);
+ } else {
+ tcg_gen_add_i32(t2, t0, t1);
+ tcg_gen_setcond_tl(TCG_COND_GTU, carry, t0, t2);
+ }
+ tcg_gen_andi_tl(cr, cr, 0x7fffffff);
+ tcg_gen_shli_tl(carry, carry, 31);
+ tcg_gen_or_tl(cr, cr, carry);
+ gen_store_mxu_gpr(t2, XRa);
+ }
+ if (XRd != 0) {
+ if (aptn2 & 1) {
+ tcg_gen_sub_i32(t2, t0, t1);
+ tcg_gen_setcond_tl(TCG_COND_GTU, carry, t0, t1);
+ } else {
+ tcg_gen_add_i32(t2, t0, t1);
+ tcg_gen_setcond_tl(TCG_COND_GTU, carry, t0, t2);
+ }
+ tcg_gen_andi_tl(cr, cr, 0xbfffffff);
+ tcg_gen_shli_tl(carry, carry, 30);
+ tcg_gen_or_tl(cr, cr, carry);
+ gen_store_mxu_gpr(t2, XRd);
+ }
+ gen_store_mxu_cr(cr);
+ }
+}
+
+/*
+ * D32ACC XRa, XRb, XRc, XRd, aptn2 - Double
+ * 32 bit pattern addition/subtraction and accumulate.
+ */
+static void gen_mxu_d32acc(DisasContext *ctx)
+{
+ uint32_t aptn2, XRc, XRb, XRa, XRd;
+
+ aptn2 = extract32(ctx->opcode, 24, 2);
+ XRd = extract32(ctx->opcode, 18, 4);
+ XRc = extract32(ctx->opcode, 14, 4);
+ XRb = extract32(ctx->opcode, 10, 4);
+ XRa = extract32(ctx->opcode, 6, 4);
+
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ TCGv t2 = tcg_temp_new();
+
+ if (unlikely(XRa == 0 && XRd == 0)) {
+ /* destinations are zero register -> do nothing */
+ } else {
+ /* common case */
+ gen_load_mxu_gpr(t0, XRb);
+ gen_load_mxu_gpr(t1, XRc);
+ if (XRa != 0) {
+ if (aptn2 & 2) {
+ tcg_gen_sub_tl(t2, t0, t1);
+ } else {
+ tcg_gen_add_tl(t2, t0, t1);
+ }
+ tcg_gen_add_tl(mxu_gpr[XRa - 1], mxu_gpr[XRa - 1], t2);
+ }
+ if (XRd != 0) {
+ if (aptn2 & 1) {
+ tcg_gen_sub_tl(t2, t0, t1);
+ } else {
+ tcg_gen_add_tl(t2, t0, t1);
+ }
+ tcg_gen_add_tl(mxu_gpr[XRd - 1], mxu_gpr[XRd - 1], t2);
+ }
+ }
+}
+
+/*
+ * D32ACCM XRa, XRb, XRc, XRd, aptn2 - Double
+ * 32 bit pattern addition/subtraction and accumulate.
+ */
+static void gen_mxu_d32accm(DisasContext *ctx)
+{
+ uint32_t aptn2, XRc, XRb, XRa, XRd;
+
+ aptn2 = extract32(ctx->opcode, 24, 2);
+ XRd = extract32(ctx->opcode, 18, 4);
+ XRc = extract32(ctx->opcode, 14, 4);
+ XRb = extract32(ctx->opcode, 10, 4);
+ XRa = extract32(ctx->opcode, 6, 4);
+
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ TCGv t2 = tcg_temp_new();
+
+ if (unlikely(XRa == 0 && XRd == 0)) {
+ /* destinations are zero register -> do nothing */
+ } else {
+ /* common case */
+ gen_load_mxu_gpr(t0, XRb);
+ gen_load_mxu_gpr(t1, XRc);
+ if (XRa != 0) {
+ tcg_gen_add_tl(t2, t0, t1);
+ if (aptn2 & 2) {
+ tcg_gen_sub_tl(mxu_gpr[XRa - 1], mxu_gpr[XRa - 1], t2);
+ } else {
+ tcg_gen_add_tl(mxu_gpr[XRa - 1], mxu_gpr[XRa - 1], t2);
+ }
+ }
+ if (XRd != 0) {
+ tcg_gen_sub_tl(t2, t0, t1);
+ if (aptn2 & 1) {
+ tcg_gen_sub_tl(mxu_gpr[XRd - 1], mxu_gpr[XRd - 1], t2);
+ } else {
+ tcg_gen_add_tl(mxu_gpr[XRd - 1], mxu_gpr[XRd - 1], t2);
+ }
+ }
+ }
+}
+
+/*
+ * D32ASUM XRa, XRb, XRc, XRd, aptn2 - Double
+ * 32 bit pattern addition/subtraction.
+ */
+static void gen_mxu_d32asum(DisasContext *ctx)
+{
+ uint32_t aptn2, XRc, XRb, XRa, XRd;
+
+ aptn2 = extract32(ctx->opcode, 24, 2);
+ XRd = extract32(ctx->opcode, 18, 4);
+ XRc = extract32(ctx->opcode, 14, 4);
+ XRb = extract32(ctx->opcode, 10, 4);
+ XRa = extract32(ctx->opcode, 6, 4);
+
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+
+ if (unlikely(XRa == 0 && XRd == 0)) {
+ /* destinations are zero register -> do nothing */
+ } else {
+ /* common case */
+ gen_load_mxu_gpr(t0, XRb);
+ gen_load_mxu_gpr(t1, XRc);
+ if (XRa != 0) {
+ if (aptn2 & 2) {
+ tcg_gen_sub_tl(mxu_gpr[XRa - 1], mxu_gpr[XRa - 1], t0);
+ } else {
+ tcg_gen_add_tl(mxu_gpr[XRa - 1], mxu_gpr[XRa - 1], t0);
+ }
+ }
+ if (XRd != 0) {
+ if (aptn2 & 1) {
+ tcg_gen_sub_tl(mxu_gpr[XRd - 1], mxu_gpr[XRd - 1], t1);
+ } else {
+ tcg_gen_add_tl(mxu_gpr[XRd - 1], mxu_gpr[XRd - 1], t1);
+ }
+ }
+ }
+}
+
+/*
+ * MXU instruction category: Miscellaneous
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * S32EXTR S32LUI
+ * S32EXTRV
+ * Q16SAT
+ * Q16SCOP
+ */
+
+/*
+ * S32EXTR XRa, XRd, rs, bits5
+ * Extract bits5 bits from 64-bit pair {XRa:XRd}
+ * starting from rs[4:0] offset and put to the XRa.
+ */
+static void gen_mxu_s32extr(DisasContext *ctx)
+{
+ TCGv t0, t1, t2, t3;
+ uint32_t XRa, XRd, rs, bits5;
+
+ t0 = tcg_temp_new();
+ t1 = tcg_temp_new();
+ t2 = tcg_temp_new();
+ t3 = tcg_temp_new();
+
+ XRa = extract32(ctx->opcode, 6, 4);
+ XRd = extract32(ctx->opcode, 10, 4);
+ bits5 = extract32(ctx->opcode, 16, 5);
+ rs = extract32(ctx->opcode, 21, 5);
+
+ /* {tmp} = {XRa:XRd} >> (64 - rt - bits5); */
+ /* {XRa} = extract({tmp}, 0, bits5); */
+ if (bits5 > 0) {
+ TCGLabel *l_xra_only = gen_new_label();
+ TCGLabel *l_done = gen_new_label();
+
+ gen_load_mxu_gpr(t0, XRd);
+ gen_load_mxu_gpr(t1, XRa);
+ gen_load_gpr(t2, rs);
+ tcg_gen_andi_tl(t2, t2, 0x1f);
+ tcg_gen_subfi_tl(t2, 32, t2);
+ tcg_gen_brcondi_tl(TCG_COND_GE, t2, bits5, l_xra_only);
+ tcg_gen_subfi_tl(t2, bits5, t2);
+ tcg_gen_subfi_tl(t3, 32, t2);
+ tcg_gen_shr_tl(t0, t0, t3);
+ tcg_gen_shl_tl(t1, t1, t2);
+ tcg_gen_or_tl(t0, t0, t1);
+ tcg_gen_br(l_done);
+ gen_set_label(l_xra_only);
+ tcg_gen_subi_tl(t2, t2, bits5);
+ tcg_gen_shr_tl(t0, t1, t2);
+ gen_set_label(l_done);
+ tcg_gen_extract_tl(t0, t0, 0, bits5);
+ } else {
+ /* unspecified behavior but matches tests on real hardware*/
+ tcg_gen_movi_tl(t0, 0);
+ }
+ gen_store_mxu_gpr(t0, XRa);
+}
+
+/*
+ * S32EXTRV XRa, XRd, rs, rt
+ * Extract rt[4:0] bits from 64-bit pair {XRa:XRd}
+ * starting from rs[4:0] offset and put to the XRa.
+ */
+static void gen_mxu_s32extrv(DisasContext *ctx)
+{
+ TCGv t0, t1, t2, t3, t4;
+ uint32_t XRa, XRd, rs, rt;
+
+ t0 = tcg_temp_new();
+ t1 = tcg_temp_new();
+ t2 = tcg_temp_new();
+ t3 = tcg_temp_new();
+ t4 = tcg_temp_new();
+ TCGLabel *l_xra_only = gen_new_label();
+ TCGLabel *l_done = gen_new_label();
+ TCGLabel *l_zero = gen_new_label();
+ TCGLabel *l_extract = gen_new_label();
+
+ XRa = extract32(ctx->opcode, 6, 4);
+ XRd = extract32(ctx->opcode, 10, 4);
+ rt = extract32(ctx->opcode, 16, 5);
+ rs = extract32(ctx->opcode, 21, 5);
+
+ /* {tmp} = {XRa:XRd} >> (64 - rs - rt) */
+ gen_load_mxu_gpr(t0, XRd);
+ gen_load_mxu_gpr(t1, XRa);
+ gen_load_gpr(t2, rs);
+ gen_load_gpr(t4, rt);
+ tcg_gen_brcondi_tl(TCG_COND_EQ, t4, 0, l_zero);
+ tcg_gen_andi_tl(t2, t2, 0x1f);
+ tcg_gen_subfi_tl(t2, 32, t2);
+ tcg_gen_brcond_tl(TCG_COND_GE, t2, t4, l_xra_only);
+ tcg_gen_sub_tl(t2, t4, t2);
+ tcg_gen_subfi_tl(t3, 32, t2);
+ tcg_gen_shr_tl(t0, t0, t3);
+ tcg_gen_shl_tl(t1, t1, t2);
+ tcg_gen_or_tl(t0, t0, t1);
+ tcg_gen_br(l_extract);
+
+ gen_set_label(l_xra_only);
+ tcg_gen_sub_tl(t2, t2, t4);
+ tcg_gen_shr_tl(t0, t1, t2);
+ tcg_gen_br(l_extract);
+
+ /* unspecified behavior but matches tests on real hardware*/
+ gen_set_label(l_zero);
+ tcg_gen_movi_tl(t0, 0);
+ tcg_gen_br(l_done);
+
+ /* {XRa} = extract({tmp}, 0, rt) */
+ gen_set_label(l_extract);
+ tcg_gen_subfi_tl(t4, 32, t4);
+ tcg_gen_shl_tl(t0, t0, t4);
+ tcg_gen_shr_tl(t0, t0, t4);
+
+ gen_set_label(l_done);
+ gen_store_mxu_gpr(t0, XRa);
+}
+
+/*
+ * S32LUI XRa, S8, optn3
+ * Permutate the immediate S8 value to form a word
+ * to update XRa.
+ */
+static void gen_mxu_s32lui(DisasContext *ctx)
+{
+ uint32_t XRa, s8, optn3, pad;
+
+ XRa = extract32(ctx->opcode, 6, 4);
+ s8 = extract32(ctx->opcode, 10, 8);
+ pad = extract32(ctx->opcode, 21, 2);
+ optn3 = extract32(ctx->opcode, 23, 3);
+
+ if (unlikely(pad != 0)) {
+ /* opcode padding incorrect -> do nothing */
+ } else if (unlikely(XRa == 0)) {
+ /* destination is zero register -> do nothing */
+ } else {
+ uint32_t s16;
+ TCGv t0 = tcg_temp_new();
+
+ switch (optn3) {
+ case 0:
+ tcg_gen_movi_tl(t0, s8);
+ break;
+ case 1:
+ tcg_gen_movi_tl(t0, s8 << 8);
+ break;
+ case 2:
+ tcg_gen_movi_tl(t0, s8 << 16);
+ break;
+ case 3:
+ tcg_gen_movi_tl(t0, s8 << 24);
+ break;
+ case 4:
+ tcg_gen_movi_tl(t0, (s8 << 16) | s8);
+ break;
+ case 5:
+ tcg_gen_movi_tl(t0, (s8 << 24) | (s8 << 8));
+ break;
+ case 6:
+ s16 = (uint16_t)(int16_t)(int8_t)s8;
+ tcg_gen_movi_tl(t0, (s16 << 16) | s16);
+ break;
+ case 7:
+ tcg_gen_movi_tl(t0, (s8 << 24) | (s8 << 16) | (s8 << 8) | s8);
+ break;
+ }
+ gen_store_mxu_gpr(t0, XRa);
+ }
+}
+
+/*
+ * Q16SAT XRa, XRb, XRc
+ * Packs four 16-bit signed integers in XRb and XRc to
+ * four saturated unsigned 8-bit into XRa.
+ *
+ */
+static void gen_mxu_Q16SAT(DisasContext *ctx)
+{
+ uint32_t pad, XRc, XRb, XRa;
+
+ pad = extract32(ctx->opcode, 21, 3);
+ XRc = extract32(ctx->opcode, 14, 4);
+ XRb = extract32(ctx->opcode, 10, 4);
+ XRa = extract32(ctx->opcode, 6, 4);
+
+ if (unlikely(pad != 0)) {
+ /* opcode padding incorrect -> do nothing */
+ } else if (unlikely(XRa == 0)) {
+ /* destination is zero register -> do nothing */
+ } else {
+ /* the most general case */
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ TCGv t2 = tcg_temp_new();
+
+ tcg_gen_movi_tl(t2, 0);
+ if (XRb != 0) {
+ TCGLabel *l_less_hi = gen_new_label();
+ TCGLabel *l_less_lo = gen_new_label();
+ TCGLabel *l_lo = gen_new_label();
+ TCGLabel *l_greater_hi = gen_new_label();
+ TCGLabel *l_greater_lo = gen_new_label();
+ TCGLabel *l_done = gen_new_label();
+
+ tcg_gen_sari_tl(t0, mxu_gpr[XRb - 1], 16);
+ tcg_gen_brcondi_tl(TCG_COND_LT, t0, 0, l_less_hi);
+ tcg_gen_brcondi_tl(TCG_COND_GT, t0, 255, l_greater_hi);
+ tcg_gen_br(l_lo);
+ gen_set_label(l_less_hi);
+ tcg_gen_movi_tl(t0, 0);
+ tcg_gen_br(l_lo);
+ gen_set_label(l_greater_hi);
+ tcg_gen_movi_tl(t0, 255);
+
+ gen_set_label(l_lo);
+ tcg_gen_shli_tl(t1, mxu_gpr[XRb - 1], 16);
+ tcg_gen_sari_tl(t1, t1, 16);
+ tcg_gen_brcondi_tl(TCG_COND_LT, t1, 0, l_less_lo);
+ tcg_gen_brcondi_tl(TCG_COND_GT, t1, 255, l_greater_lo);
+ tcg_gen_br(l_done);
+ gen_set_label(l_less_lo);
+ tcg_gen_movi_tl(t1, 0);
+ tcg_gen_br(l_done);
+ gen_set_label(l_greater_lo);
+ tcg_gen_movi_tl(t1, 255);
+
+ gen_set_label(l_done);
+ tcg_gen_shli_tl(t2, t0, 24);
+ tcg_gen_shli_tl(t1, t1, 16);
+ tcg_gen_or_tl(t2, t2, t1);
+ }
+
+ if (XRc != 0) {
+ TCGLabel *l_less_hi = gen_new_label();
+ TCGLabel *l_less_lo = gen_new_label();
+ TCGLabel *l_lo = gen_new_label();
+ TCGLabel *l_greater_hi = gen_new_label();
+ TCGLabel *l_greater_lo = gen_new_label();
+ TCGLabel *l_done = gen_new_label();
+
+ tcg_gen_sari_tl(t0, mxu_gpr[XRc - 1], 16);
+ tcg_gen_brcondi_tl(TCG_COND_LT, t0, 0, l_less_hi);
+ tcg_gen_brcondi_tl(TCG_COND_GT, t0, 255, l_greater_hi);
+ tcg_gen_br(l_lo);
+ gen_set_label(l_less_hi);
+ tcg_gen_movi_tl(t0, 0);
+ tcg_gen_br(l_lo);
+ gen_set_label(l_greater_hi);
+ tcg_gen_movi_tl(t0, 255);
+
+ gen_set_label(l_lo);
+ tcg_gen_shli_tl(t1, mxu_gpr[XRc - 1], 16);
+ tcg_gen_sari_tl(t1, t1, 16);
+ tcg_gen_brcondi_tl(TCG_COND_LT, t1, 0, l_less_lo);
+ tcg_gen_brcondi_tl(TCG_COND_GT, t1, 255, l_greater_lo);
+ tcg_gen_br(l_done);
+ gen_set_label(l_less_lo);
+ tcg_gen_movi_tl(t1, 0);
+ tcg_gen_br(l_done);
+ gen_set_label(l_greater_lo);
+ tcg_gen_movi_tl(t1, 255);
+
+ gen_set_label(l_done);
+ tcg_gen_shli_tl(t0, t0, 8);
+ tcg_gen_or_tl(t2, t2, t0);
+ tcg_gen_or_tl(t2, t2, t1);
+ }
+ gen_store_mxu_gpr(t2, XRa);
+ }
+}
+
+/*
+ * Q16SCOP XRa, XRd, XRb, XRc
+ * Determine sign of quad packed 16-bit signed values
+ * in XRb and XRc put result in XRa and XRd respectively.
+ */
+static void gen_mxu_q16scop(DisasContext *ctx)
+{
+ uint32_t XRd, XRc, XRb, XRa;
+
+ XRd = extract32(ctx->opcode, 18, 4);
+ XRc = extract32(ctx->opcode, 14, 4);
+ XRb = extract32(ctx->opcode, 10, 4);
+ XRa = extract32(ctx->opcode, 6, 4);
+
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ TCGv t2 = tcg_temp_new();
+ TCGv t3 = tcg_temp_new();
+ TCGv t4 = tcg_temp_new();
+
+ TCGLabel *l_b_hi_lt = gen_new_label();
+ TCGLabel *l_b_hi_gt = gen_new_label();
+ TCGLabel *l_b_lo = gen_new_label();
+ TCGLabel *l_b_lo_lt = gen_new_label();
+ TCGLabel *l_c_hi = gen_new_label();
+ TCGLabel *l_c_hi_lt = gen_new_label();
+ TCGLabel *l_c_hi_gt = gen_new_label();
+ TCGLabel *l_c_lo = gen_new_label();
+ TCGLabel *l_c_lo_lt = gen_new_label();
+ TCGLabel *l_done = gen_new_label();
+
+ gen_load_mxu_gpr(t0, XRb);
+ gen_load_mxu_gpr(t1, XRc);
+
+ tcg_gen_sextract_tl(t2, t0, 16, 16);
+ tcg_gen_brcondi_tl(TCG_COND_LT, t2, 0, l_b_hi_lt);
+ tcg_gen_brcondi_tl(TCG_COND_GT, t2, 0, l_b_hi_gt);
+ tcg_gen_movi_tl(t3, 0);
+ tcg_gen_br(l_b_lo);
+ gen_set_label(l_b_hi_lt);
+ tcg_gen_movi_tl(t3, 0xffff0000);
+ tcg_gen_br(l_b_lo);
+ gen_set_label(l_b_hi_gt);
+ tcg_gen_movi_tl(t3, 0x00010000);
+
+ gen_set_label(l_b_lo);
+ tcg_gen_sextract_tl(t2, t0, 0, 16);
+ tcg_gen_brcondi_tl(TCG_COND_EQ, t2, 0, l_c_hi);
+ tcg_gen_brcondi_tl(TCG_COND_LT, t2, 0, l_b_lo_lt);
+ tcg_gen_ori_tl(t3, t3, 0x00000001);
+ tcg_gen_br(l_c_hi);
+ gen_set_label(l_b_lo_lt);
+ tcg_gen_ori_tl(t3, t3, 0x0000ffff);
+ tcg_gen_br(l_c_hi);
+
+ gen_set_label(l_c_hi);
+ tcg_gen_sextract_tl(t2, t1, 16, 16);
+ tcg_gen_brcondi_tl(TCG_COND_LT, t2, 0, l_c_hi_lt);
+ tcg_gen_brcondi_tl(TCG_COND_GT, t2, 0, l_c_hi_gt);
+ tcg_gen_movi_tl(t4, 0);
+ tcg_gen_br(l_c_lo);
+ gen_set_label(l_c_hi_lt);
+ tcg_gen_movi_tl(t4, 0xffff0000);
+ tcg_gen_br(l_c_lo);
+ gen_set_label(l_c_hi_gt);
+ tcg_gen_movi_tl(t4, 0x00010000);
+
+ gen_set_label(l_c_lo);
+ tcg_gen_sextract_tl(t2, t1, 0, 16);
+ tcg_gen_brcondi_tl(TCG_COND_EQ, t2, 0, l_done);
+ tcg_gen_brcondi_tl(TCG_COND_LT, t2, 0, l_c_lo_lt);
+ tcg_gen_ori_tl(t4, t4, 0x00000001);
+ tcg_gen_br(l_done);
+ gen_set_label(l_c_lo_lt);
+ tcg_gen_ori_tl(t4, t4, 0x0000ffff);
+
+ gen_set_label(l_done);
+ gen_store_mxu_gpr(t3, XRa);
+ gen_store_mxu_gpr(t4, XRd);
+}
+
+/*
+ * S32SFL XRa, XRd, XRb, XRc
+ * Shuffle bytes according to one of four patterns.
+ */
+static void gen_mxu_s32sfl(DisasContext *ctx)
+{
+ uint32_t XRd, XRc, XRb, XRa, ptn2;
+
+ XRd = extract32(ctx->opcode, 18, 4);
+ XRc = extract32(ctx->opcode, 14, 4);
+ XRb = extract32(ctx->opcode, 10, 4);
+ XRa = extract32(ctx->opcode, 6, 4);
+ ptn2 = extract32(ctx->opcode, 24, 2);
+
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ TCGv t2 = tcg_temp_new();
+ TCGv t3 = tcg_temp_new();
+
+ gen_load_mxu_gpr(t0, XRb);
+ gen_load_mxu_gpr(t1, XRc);
+
+ switch (ptn2) {
+ case 0:
+ tcg_gen_andi_tl(t2, t0, 0xff000000);
+ tcg_gen_andi_tl(t3, t1, 0x000000ff);
+ tcg_gen_deposit_tl(t3, t3, t0, 8, 8);
+ tcg_gen_shri_tl(t0, t0, 8);
+ tcg_gen_shri_tl(t1, t1, 8);
+ tcg_gen_deposit_tl(t3, t3, t0, 24, 8);
+ tcg_gen_deposit_tl(t3, t3, t1, 16, 8);
+ tcg_gen_shri_tl(t0, t0, 8);
+ tcg_gen_shri_tl(t1, t1, 8);
+ tcg_gen_deposit_tl(t2, t2, t0, 8, 8);
+ tcg_gen_deposit_tl(t2, t2, t1, 0, 8);
+ tcg_gen_shri_tl(t1, t1, 8);
+ tcg_gen_deposit_tl(t2, t2, t1, 16, 8);
+ break;
+ case 1:
+ tcg_gen_andi_tl(t2, t0, 0xff000000);
+ tcg_gen_andi_tl(t3, t1, 0x000000ff);
+ tcg_gen_deposit_tl(t3, t3, t0, 16, 8);
+ tcg_gen_shri_tl(t0, t0, 8);
+ tcg_gen_shri_tl(t1, t1, 8);
+ tcg_gen_deposit_tl(t2, t2, t0, 16, 8);
+ tcg_gen_deposit_tl(t2, t2, t1, 0, 8);
+ tcg_gen_shri_tl(t0, t0, 8);
+ tcg_gen_shri_tl(t1, t1, 8);
+ tcg_gen_deposit_tl(t3, t3, t0, 24, 8);
+ tcg_gen_deposit_tl(t3, t3, t1, 8, 8);
+ tcg_gen_shri_tl(t1, t1, 8);
+ tcg_gen_deposit_tl(t2, t2, t1, 8, 8);
+ break;
+ case 2:
+ tcg_gen_andi_tl(t2, t0, 0xff00ff00);
+ tcg_gen_andi_tl(t3, t1, 0x00ff00ff);
+ tcg_gen_deposit_tl(t3, t3, t0, 8, 8);
+ tcg_gen_shri_tl(t0, t0, 16);
+ tcg_gen_shri_tl(t1, t1, 8);
+ tcg_gen_deposit_tl(t2, t2, t1, 0, 8);
+ tcg_gen_deposit_tl(t3, t3, t0, 24, 8);
+ tcg_gen_shri_tl(t1, t1, 16);
+ tcg_gen_deposit_tl(t2, t2, t1, 16, 8);
+ break;
+ case 3:
+ tcg_gen_andi_tl(t2, t0, 0xffff0000);
+ tcg_gen_andi_tl(t3, t1, 0x0000ffff);
+ tcg_gen_shri_tl(t1, t1, 16);
+ tcg_gen_deposit_tl(t2, t2, t1, 0, 16);
+ tcg_gen_deposit_tl(t3, t3, t0, 16, 16);
+ break;
+ }
+
+ gen_store_mxu_gpr(t2, XRa);
+ gen_store_mxu_gpr(t3, XRd);
+}
+
+/*
+ * Q8SAD XRa, XRd, XRb, XRc
+ * Typical SAD opration for motion estimation.
+ */
+static void gen_mxu_q8sad(DisasContext *ctx)
+{
+ uint32_t XRd, XRc, XRb, XRa;
+
+ XRd = extract32(ctx->opcode, 18, 4);
+ XRc = extract32(ctx->opcode, 14, 4);
+ XRb = extract32(ctx->opcode, 10, 4);
+ XRa = extract32(ctx->opcode, 6, 4);
+
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ TCGv t2 = tcg_temp_new();
+ TCGv t3 = tcg_temp_new();
+ TCGv t4 = tcg_temp_new();
+ TCGv t5 = tcg_temp_new();
+
+ gen_load_mxu_gpr(t2, XRb);
+ gen_load_mxu_gpr(t3, XRc);
+ gen_load_mxu_gpr(t5, XRd);
+ tcg_gen_movi_tl(t4, 0);
+
+ for (int i = 0; i < 4; i++) {
+ tcg_gen_andi_tl(t0, t2, 0xff);
+ tcg_gen_andi_tl(t1, t3, 0xff);
+ tcg_gen_sub_tl(t0, t0, t1);
+ tcg_gen_abs_tl(t0, t0);
+ tcg_gen_add_tl(t4, t4, t0);
+ if (i < 3) {
+ tcg_gen_shri_tl(t2, t2, 8);
+ tcg_gen_shri_tl(t3, t3, 8);
+ }
+ }
+ tcg_gen_add_tl(t5, t5, t4);
+ gen_store_mxu_gpr(t4, XRa);
+ gen_store_mxu_gpr(t5, XRd);
+}
+
+/*
+ * MXU instruction category: align
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* S32ALN S32ALNI
*/
tcg_gen_andi_i32(t0, mxu_gpr[XRb - 1], 0x00FFFFFF);
tcg_gen_shli_i32(t0, t0, 8);
- tcg_gen_andi_i32(t1, mxu_gpr[XRc - 1], 0xFF000000);
- tcg_gen_shri_i32(t1, t1, 24);
+ tcg_gen_andi_i32(t1, mxu_gpr[XRc - 1], 0xFF000000);
+ tcg_gen_shri_i32(t1, t1, 24);
+
+ tcg_gen_or_i32(mxu_gpr[XRa - 1], t0, t1);
+ }
+ break;
+ case MXU_OPTN3_PTN2:
+ {
+ /* */
+ /* XRb XRc */
+ /* +-------------------+ */
+ /* A B | C D E F | G H */
+ /* +---------+---------+ */
+ /* | */
+ /* XRa */
+ /* */
+
+ TCGv_i32 t0 = tcg_temp_new();
+ TCGv_i32 t1 = tcg_temp_new();
+
+ tcg_gen_andi_i32(t0, mxu_gpr[XRb - 1], 0x0000FFFF);
+ tcg_gen_shli_i32(t0, t0, 16);
+
+ tcg_gen_andi_i32(t1, mxu_gpr[XRc - 1], 0xFFFF0000);
+ tcg_gen_shri_i32(t1, t1, 16);
+
+ tcg_gen_or_i32(mxu_gpr[XRa - 1], t0, t1);
+ }
+ break;
+ case MXU_OPTN3_PTN3:
+ {
+ /* */
+ /* XRb XRc */
+ /* +-------------------+ */
+ /* A B C | D E F G | H */
+ /* +---------+---------+ */
+ /* | */
+ /* XRa */
+ /* */
+
+ TCGv_i32 t0 = tcg_temp_new();
+ TCGv_i32 t1 = tcg_temp_new();
+
+ tcg_gen_andi_i32(t0, mxu_gpr[XRb - 1], 0x000000FF);
+ tcg_gen_shli_i32(t0, t0, 24);
+
+ tcg_gen_andi_i32(t1, mxu_gpr[XRc - 1], 0xFFFFFF00);
+ tcg_gen_shri_i32(t1, t1, 8);
+
+ tcg_gen_or_i32(mxu_gpr[XRa - 1], t0, t1);
+ }
+ break;
+ case MXU_OPTN3_PTN4:
+ {
+ /* */
+ /* XRb XRc */
+ /* +---------------+ */
+ /* A B C D | E F G H | */
+ /* +-------+-------+ */
+ /* | */
+ /* XRa */
+ /* */
+
+ tcg_gen_mov_i32(mxu_gpr[XRa - 1], mxu_gpr[XRc - 1]);
+ }
+ break;
+ }
+ }
+}
+
+/*
+ * S32ALN XRc, XRb, XRa, rs
+ * Arrange bytes from XRb and XRc according to one of five sets of
+ * rules determined by rs[2:0], and place the result in XRa.
+ */
+static void gen_mxu_S32ALN(DisasContext *ctx)
+{
+ uint32_t rs, XRc, XRb, XRa;
+
+ rs = extract32(ctx->opcode, 21, 5);
+ XRc = extract32(ctx->opcode, 14, 4);
+ XRb = extract32(ctx->opcode, 10, 4);
+ XRa = extract32(ctx->opcode, 6, 4);
+
+ if (unlikely(XRa == 0)) {
+ /* destination is zero register -> do nothing */
+ } else if (unlikely((XRb == 0) && (XRc == 0))) {
+ /* both operands zero registers -> just set destination to all 0s */
+ tcg_gen_movi_tl(mxu_gpr[XRa - 1], 0);
+ } else {
+ /* the most general case */
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ TCGv t2 = tcg_temp_new();
+ TCGv t3 = tcg_temp_new();
+ TCGLabel *l_exit = gen_new_label();
+ TCGLabel *l_b_only = gen_new_label();
+ TCGLabel *l_c_only = gen_new_label();
+
+ gen_load_mxu_gpr(t0, XRb);
+ gen_load_mxu_gpr(t1, XRc);
+ gen_load_gpr(t2, rs);
+ tcg_gen_andi_tl(t2, t2, 0x07);
+
+ /* do nothing for undefined cases */
+ tcg_gen_brcondi_tl(TCG_COND_GE, t2, 5, l_exit);
+
+ tcg_gen_brcondi_tl(TCG_COND_EQ, t2, 0, l_b_only);
+ tcg_gen_brcondi_tl(TCG_COND_EQ, t2, 4, l_c_only);
+
+ tcg_gen_shli_tl(t2, t2, 3);
+ tcg_gen_subfi_tl(t3, 32, t2);
+
+ tcg_gen_shl_tl(t0, t0, t2);
+ tcg_gen_shr_tl(t1, t1, t3);
+ tcg_gen_or_tl(mxu_gpr[XRa - 1], t0, t1);
+ tcg_gen_br(l_exit);
+
+ gen_set_label(l_b_only);
+ gen_store_mxu_gpr(t0, XRa);
+ tcg_gen_br(l_exit);
+
+ gen_set_label(l_c_only);
+ gen_store_mxu_gpr(t1, XRa);
+
+ gen_set_label(l_exit);
+ }
+}
+
+/*
+ * S32MADD XRa, XRd, rb, rc
+ * 32 to 64 bit signed multiply with subsequent add
+ * result stored in {XRa, XRd} pair, stain HI/LO.
+ * S32MADDU XRa, XRd, rb, rc
+ * 32 to 64 bit unsigned multiply with subsequent add
+ * result stored in {XRa, XRd} pair, stain HI/LO.
+ * S32MSUB XRa, XRd, rb, rc
+ * 32 to 64 bit signed multiply with subsequent subtract
+ * result stored in {XRa, XRd} pair, stain HI/LO.
+ * S32MSUBU XRa, XRd, rb, rc
+ * 32 to 64 bit unsigned multiply with subsequent subtract
+ * result stored in {XRa, XRd} pair, stain HI/LO.
+ */
+static void gen_mxu_s32madd_sub(DisasContext *ctx, bool sub, bool uns)
+{
+ uint32_t XRa, XRd, Rb, Rc;
+
+ XRa = extract32(ctx->opcode, 6, 4);
+ XRd = extract32(ctx->opcode, 10, 4);
+ Rb = extract32(ctx->opcode, 16, 5);
+ Rc = extract32(ctx->opcode, 21, 5);
+
+ if (unlikely(Rb == 0 || Rc == 0)) {
+ /* do nothing because x + 0 * y => x */
+ } else if (unlikely(XRa == 0 && XRd == 0)) {
+ /* do nothing because result just dropped */
+ } else {
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ TCGv_i64 t2 = tcg_temp_new_i64();
+ TCGv_i64 t3 = tcg_temp_new_i64();
+
+ gen_load_gpr(t0, Rb);
+ gen_load_gpr(t1, Rc);
+
+ if (uns) {
+ tcg_gen_extu_tl_i64(t2, t0);
+ tcg_gen_extu_tl_i64(t3, t1);
+ } else {
+ tcg_gen_ext_tl_i64(t2, t0);
+ tcg_gen_ext_tl_i64(t3, t1);
+ }
+ tcg_gen_mul_i64(t2, t2, t3);
+
+ gen_load_mxu_gpr(t0, XRa);
+ gen_load_mxu_gpr(t1, XRd);
+
+ tcg_gen_concat_tl_i64(t3, t1, t0);
+ if (sub) {
+ tcg_gen_sub_i64(t3, t3, t2);
+ } else {
+ tcg_gen_add_i64(t3, t3, t2);
+ }
+ gen_move_low32(t1, t3);
+ gen_move_high32(t0, t3);
+
+ tcg_gen_mov_tl(cpu_HI[0], t0);
+ tcg_gen_mov_tl(cpu_LO[0], t1);
+
+ gen_store_mxu_gpr(t1, XRd);
+ gen_store_mxu_gpr(t0, XRa);
+ }
+}
+
+/*
+ * Decoding engine for MXU
+ * =======================
+ */
+
+static void decode_opc_mxu__pool00(DisasContext *ctx)
+{
+ uint32_t opcode = extract32(ctx->opcode, 18, 3);
+
+ switch (opcode) {
+ case OPC_MXU_S32MAX:
+ case OPC_MXU_S32MIN:
+ gen_mxu_S32MAX_S32MIN(ctx);
+ break;
+ case OPC_MXU_D16MAX:
+ case OPC_MXU_D16MIN:
+ gen_mxu_D16MAX_D16MIN(ctx);
+ break;
+ case OPC_MXU_Q8MAX:
+ case OPC_MXU_Q8MIN:
+ gen_mxu_Q8MAX_Q8MIN(ctx);
+ break;
+ case OPC_MXU_Q8SLT:
+ gen_mxu_q8slt(ctx, false);
+ break;
+ case OPC_MXU_Q8SLTU:
+ gen_mxu_q8slt(ctx, true);
+ break;
+ default:
+ MIPS_INVAL("decode_opc_mxu");
+ gen_reserved_instruction(ctx);
+ break;
+ }
+}
+
+static bool decode_opc_mxu_s32madd_sub(DisasContext *ctx)
+{
+ uint32_t opcode = extract32(ctx->opcode, 0, 6);
+ uint32_t pad = extract32(ctx->opcode, 14, 2);
+
+ if (pad != 2) {
+ /* MIPS32R1 MADD/MADDU/MSUB/MSUBU are on pad == 0 */
+ return false;
+ }
+
+ switch (opcode) {
+ case OPC_MXU_S32MADD:
+ gen_mxu_s32madd_sub(ctx, false, false);
+ break;
+ case OPC_MXU_S32MADDU:
+ gen_mxu_s32madd_sub(ctx, false, true);
+ break;
+ case OPC_MXU_S32MSUB:
+ gen_mxu_s32madd_sub(ctx, true, false);
+ break;
+ case OPC_MXU_S32MSUBU:
+ gen_mxu_s32madd_sub(ctx, true, true);
+ break;
+ default:
+ return false;
+ }
+ return true;
+}
+
+static void decode_opc_mxu__pool01(DisasContext *ctx)
+{
+ uint32_t opcode = extract32(ctx->opcode, 18, 3);
+
+ switch (opcode) {
+ case OPC_MXU_S32SLT:
+ gen_mxu_S32SLT(ctx);
+ break;
+ case OPC_MXU_D16SLT:
+ gen_mxu_D16SLT(ctx);
+ break;
+ case OPC_MXU_D16AVG:
+ gen_mxu_d16avg(ctx, false);
+ break;
+ case OPC_MXU_D16AVGR:
+ gen_mxu_d16avg(ctx, true);
+ break;
+ case OPC_MXU_Q8AVG:
+ gen_mxu_q8avg(ctx, false);
+ break;
+ case OPC_MXU_Q8AVGR:
+ gen_mxu_q8avg(ctx, true);
+ break;
+ case OPC_MXU_Q8ADD:
+ gen_mxu_Q8ADD(ctx);
+ break;
+ default:
+ MIPS_INVAL("decode_opc_mxu");
+ gen_reserved_instruction(ctx);
+ break;
+ }
+}
+
+static void decode_opc_mxu__pool02(DisasContext *ctx)
+{
+ uint32_t opcode = extract32(ctx->opcode, 18, 3);
+
+ switch (opcode) {
+ case OPC_MXU_S32CPS:
+ gen_mxu_S32CPS(ctx);
+ break;
+ case OPC_MXU_D16CPS:
+ gen_mxu_D16CPS(ctx);
+ break;
+ case OPC_MXU_Q8ABD:
+ gen_mxu_Q8ABD(ctx);
+ break;
+ case OPC_MXU_Q16SAT:
+ gen_mxu_Q16SAT(ctx);
+ break;
+ default:
+ MIPS_INVAL("decode_opc_mxu");
+ gen_reserved_instruction(ctx);
+ break;
+ }
+}
+
+static void decode_opc_mxu__pool03(DisasContext *ctx)
+{
+ uint32_t opcode = extract32(ctx->opcode, 24, 2);
+
+ switch (opcode) {
+ case OPC_MXU_D16MULF:
+ gen_mxu_d16mul(ctx, true, true);
+ break;
+ case OPC_MXU_D16MULE:
+ gen_mxu_d16mul(ctx, true, false);
+ break;
+ default:
+ MIPS_INVAL("decode_opc_mxu");
+ gen_reserved_instruction(ctx);
+ break;
+ }
+}
+
+static void decode_opc_mxu__pool04(DisasContext *ctx)
+{
+ uint32_t reversed = extract32(ctx->opcode, 20, 1);
+ uint32_t opcode = extract32(ctx->opcode, 10, 4);
+
+ /* Don't care about opcode bits as their meaning is unknown yet */
+ switch (opcode) {
+ default:
+ gen_mxu_s32ldxx(ctx, reversed, false);
+ break;
+ }
+}
+
+static void decode_opc_mxu__pool05(DisasContext *ctx)
+{
+ uint32_t reversed = extract32(ctx->opcode, 20, 1);
+ uint32_t opcode = extract32(ctx->opcode, 10, 4);
+
+ /* Don't care about opcode bits as their meaning is unknown yet */
+ switch (opcode) {
+ default:
+ gen_mxu_s32stxx(ctx, reversed, false);
+ break;
+ }
+}
+
+static void decode_opc_mxu__pool06(DisasContext *ctx)
+{
+ uint32_t opcode = extract32(ctx->opcode, 10, 4);
+ uint32_t strd2 = extract32(ctx->opcode, 14, 2);
- tcg_gen_or_i32(mxu_gpr[XRa - 1], t0, t1);
- }
+ switch (opcode) {
+ case OPC_MXU_S32LDST:
+ case OPC_MXU_S32LDSTR:
+ if (strd2 <= 2) {
+ gen_mxu_s32ldxvx(ctx, opcode, false, strd2);
break;
- case MXU_OPTN3_PTN2:
- {
- /* */
- /* XRb XRc */
- /* +-------------------+ */
- /* A B | C D E F | G H */
- /* +---------+---------+ */
- /* | */
- /* XRa */
- /* */
+ }
+ /* fallthrough */
+ default:
+ MIPS_INVAL("decode_opc_mxu");
+ gen_reserved_instruction(ctx);
+ break;
+ }
+}
- TCGv_i32 t0 = tcg_temp_new();
- TCGv_i32 t1 = tcg_temp_new();
+static void decode_opc_mxu__pool07(DisasContext *ctx)
+{
+ uint32_t opcode = extract32(ctx->opcode, 10, 4);
+ uint32_t strd2 = extract32(ctx->opcode, 14, 2);
- tcg_gen_andi_i32(t0, mxu_gpr[XRb - 1], 0x0000FFFF);
- tcg_gen_shli_i32(t0, t0, 16);
+ switch (opcode) {
+ case OPC_MXU_S32LDST:
+ case OPC_MXU_S32LDSTR:
+ if (strd2 <= 2) {
+ gen_mxu_s32stxvx(ctx, opcode, false, strd2);
+ break;
+ }
+ /* fallthrough */
+ default:
+ MIPS_INVAL("decode_opc_mxu");
+ gen_reserved_instruction(ctx);
+ break;
+ }
+}
- tcg_gen_andi_i32(t1, mxu_gpr[XRc - 1], 0xFFFF0000);
- tcg_gen_shri_i32(t1, t1, 16);
+static void decode_opc_mxu__pool08(DisasContext *ctx)
+{
+ uint32_t reversed = extract32(ctx->opcode, 20, 1);
+ uint32_t opcode = extract32(ctx->opcode, 10, 4);
- tcg_gen_or_i32(mxu_gpr[XRa - 1], t0, t1);
- }
- break;
- case MXU_OPTN3_PTN3:
- {
- /* */
- /* XRb XRc */
- /* +-------------------+ */
- /* A B C | D E F G | H */
- /* +---------+---------+ */
- /* | */
- /* XRa */
- /* */
+ /* Don't care about opcode bits as their meaning is unknown yet */
+ switch (opcode) {
+ default:
+ gen_mxu_s32ldxx(ctx, reversed, true);
+ break;
+ }
+}
- TCGv_i32 t0 = tcg_temp_new();
- TCGv_i32 t1 = tcg_temp_new();
+static void decode_opc_mxu__pool09(DisasContext *ctx)
+{
+ uint32_t reversed = extract32(ctx->opcode, 20, 1);
+ uint32_t opcode = extract32(ctx->opcode, 10, 4);
- tcg_gen_andi_i32(t0, mxu_gpr[XRb - 1], 0x000000FF);
- tcg_gen_shli_i32(t0, t0, 24);
+ /* Don't care about opcode bits as their meaning is unknown yet */
+ switch (opcode) {
+ default:
+ gen_mxu_s32stxx(ctx, reversed, true);
+ break;
+ }
+}
- tcg_gen_andi_i32(t1, mxu_gpr[XRc - 1], 0xFFFFFF00);
- tcg_gen_shri_i32(t1, t1, 8);
+static void decode_opc_mxu__pool10(DisasContext *ctx)
+{
+ uint32_t opcode = extract32(ctx->opcode, 10, 4);
+ uint32_t strd2 = extract32(ctx->opcode, 14, 2);
- tcg_gen_or_i32(mxu_gpr[XRa - 1], t0, t1);
- }
+ switch (opcode) {
+ case OPC_MXU_S32LDST:
+ case OPC_MXU_S32LDSTR:
+ if (strd2 <= 2) {
+ gen_mxu_s32ldxvx(ctx, opcode, true, strd2);
break;
- case MXU_OPTN3_PTN4:
- {
- /* */
- /* XRb XRc */
- /* +---------------+ */
- /* A B C D | E F G H | */
- /* +-------+-------+ */
- /* | */
- /* XRa */
- /* */
+ }
+ /* fallthrough */
+ default:
+ MIPS_INVAL("decode_opc_mxu");
+ gen_reserved_instruction(ctx);
+ break;
+ }
+}
- tcg_gen_mov_i32(mxu_gpr[XRa - 1], mxu_gpr[XRc - 1]);
- }
+static void decode_opc_mxu__pool11(DisasContext *ctx)
+{
+ uint32_t opcode = extract32(ctx->opcode, 10, 4);
+ uint32_t strd2 = extract32(ctx->opcode, 14, 2);
+
+ switch (opcode) {
+ case OPC_MXU_S32LDST:
+ case OPC_MXU_S32LDSTR:
+ if (strd2 <= 2) {
+ gen_mxu_s32stxvx(ctx, opcode, true, strd2);
break;
}
+ /* fallthrough */
+ default:
+ MIPS_INVAL("decode_opc_mxu");
+ gen_reserved_instruction(ctx);
+ break;
}
}
+static void decode_opc_mxu__pool12(DisasContext *ctx)
+{
+ uint32_t opcode = extract32(ctx->opcode, 22, 2);
-/*
- * Decoding engine for MXU
- * =======================
- */
+ switch (opcode) {
+ case OPC_MXU_D32ACC:
+ gen_mxu_d32acc(ctx);
+ break;
+ case OPC_MXU_D32ACCM:
+ gen_mxu_d32accm(ctx);
+ break;
+ case OPC_MXU_D32ASUM:
+ gen_mxu_d32asum(ctx);
+ break;
+ default:
+ MIPS_INVAL("decode_opc_mxu");
+ gen_reserved_instruction(ctx);
+ break;
+ }
+}
-static void decode_opc_mxu__pool00(DisasContext *ctx)
+static void decode_opc_mxu__pool13(DisasContext *ctx)
{
- uint32_t opcode = extract32(ctx->opcode, 18, 3);
+ uint32_t opcode = extract32(ctx->opcode, 22, 2);
switch (opcode) {
- case OPC_MXU_S32MAX:
- case OPC_MXU_S32MIN:
- gen_mxu_S32MAX_S32MIN(ctx);
+ case OPC_MXU_Q16ACC:
+ gen_mxu_q16acc(ctx);
break;
- case OPC_MXU_D16MAX:
- case OPC_MXU_D16MIN:
- gen_mxu_D16MAX_D16MIN(ctx);
+ case OPC_MXU_Q16ACCM:
+ gen_mxu_q16accm(ctx);
break;
- case OPC_MXU_Q8MAX:
- case OPC_MXU_Q8MIN:
- gen_mxu_Q8MAX_Q8MIN(ctx);
+ case OPC_MXU_D16ASUM:
+ gen_mxu_d16asum(ctx);
break;
default:
MIPS_INVAL("decode_opc_mxu");
}
}
-static void decode_opc_mxu__pool04(DisasContext *ctx)
+static void decode_opc_mxu__pool14(DisasContext *ctx)
+{
+ uint32_t opcode = extract32(ctx->opcode, 22, 2);
+
+ switch (opcode) {
+ case OPC_MXU_Q8ADDE:
+ gen_mxu_q8adde(ctx, false);
+ break;
+ case OPC_MXU_D8SUM:
+ gen_mxu_d8sum(ctx, false);
+ break;
+ case OPC_MXU_D8SUMC:
+ gen_mxu_d8sum(ctx, true);
+ break;
+ default:
+ MIPS_INVAL("decode_opc_mxu");
+ gen_reserved_instruction(ctx);
+ break;
+ }
+}
+
+static void decode_opc_mxu__pool15(DisasContext *ctx)
{
- uint32_t opcode = extract32(ctx->opcode, 20, 1);
+ uint32_t opcode = extract32(ctx->opcode, 14, 2);
switch (opcode) {
- case OPC_MXU_S32LDD:
- case OPC_MXU_S32LDDR:
- gen_mxu_s32ldd_s32lddr(ctx);
+ case OPC_MXU_S32MUL:
+ gen_mxu_s32mul(ctx, false);
+ break;
+ case OPC_MXU_S32MULU:
+ gen_mxu_s32mul(ctx, true);
+ break;
+ case OPC_MXU_S32EXTR:
+ gen_mxu_s32extr(ctx);
+ break;
+ case OPC_MXU_S32EXTRV:
+ gen_mxu_s32extrv(ctx);
break;
default:
MIPS_INVAL("decode_opc_mxu");
uint32_t opcode = extract32(ctx->opcode, 18, 3);
switch (opcode) {
+ case OPC_MXU_D32SARW:
+ gen_mxu_d32sarl(ctx, true);
+ break;
+ case OPC_MXU_S32ALN:
+ gen_mxu_S32ALN(ctx);
+ break;
case OPC_MXU_S32ALNI:
gen_mxu_S32ALNI(ctx);
break;
+ case OPC_MXU_S32LUI:
+ gen_mxu_s32lui(ctx);
+ break;
case OPC_MXU_S32NOR:
gen_mxu_S32NOR(ctx);
break;
}
}
+static void decode_opc_mxu__pool17(DisasContext *ctx)
+{
+ uint32_t opcode = extract32(ctx->opcode, 6, 3);
+ uint32_t strd2 = extract32(ctx->opcode, 9, 2);
+
+ if (strd2 > 2) {
+ MIPS_INVAL("decode_opc_mxu");
+ gen_reserved_instruction(ctx);
+ return;
+ }
+
+ switch (opcode) {
+ case OPC_MXU_LXW:
+ gen_mxu_lxx(ctx, strd2, MO_TE | MO_UL);
+ break;
+ case OPC_MXU_LXB:
+ gen_mxu_lxx(ctx, strd2, MO_TE | MO_SB);
+ break;
+ case OPC_MXU_LXH:
+ gen_mxu_lxx(ctx, strd2, MO_TE | MO_SW);
+ break;
+ case OPC_MXU_LXBU:
+ gen_mxu_lxx(ctx, strd2, MO_TE | MO_UB);
+ break;
+ case OPC_MXU_LXHU:
+ gen_mxu_lxx(ctx, strd2, MO_TE | MO_UW);
+ break;
+ default:
+ MIPS_INVAL("decode_opc_mxu");
+ gen_reserved_instruction(ctx);
+ break;
+ }
+}
+
+static void decode_opc_mxu__pool18(DisasContext *ctx)
+{
+ uint32_t opcode = extract32(ctx->opcode, 18, 3);
+
+ switch (opcode) {
+ case OPC_MXU_D32SLLV:
+ gen_mxu_d32sxxv(ctx, false, false);
+ break;
+ case OPC_MXU_D32SLRV:
+ gen_mxu_d32sxxv(ctx, true, false);
+ break;
+ case OPC_MXU_D32SARV:
+ gen_mxu_d32sxxv(ctx, true, true);
+ break;
+ case OPC_MXU_Q16SLLV:
+ gen_mxu_q16sxxv(ctx, false, false);
+ break;
+ case OPC_MXU_Q16SLRV:
+ gen_mxu_q16sxxv(ctx, true, false);
+ break;
+ case OPC_MXU_Q16SARV:
+ gen_mxu_q16sxxv(ctx, true, true);
+ break;
+ default:
+ MIPS_INVAL("decode_opc_mxu");
+ gen_reserved_instruction(ctx);
+ break;
+ }
+}
+
static void decode_opc_mxu__pool19(DisasContext *ctx)
{
- uint32_t opcode = extract32(ctx->opcode, 22, 2);
+ uint32_t opcode = extract32(ctx->opcode, 22, 4);
switch (opcode) {
case OPC_MXU_Q8MUL:
+ gen_mxu_q8mul_mac(ctx, false, false);
+ break;
case OPC_MXU_Q8MULSU:
- gen_mxu_q8mul_q8mulsu(ctx);
+ gen_mxu_q8mul_mac(ctx, true, false);
+ break;
+ default:
+ MIPS_INVAL("decode_opc_mxu");
+ gen_reserved_instruction(ctx);
+ break;
+ }
+}
+
+static void decode_opc_mxu__pool20(DisasContext *ctx)
+{
+ uint32_t opcode = extract32(ctx->opcode, 18, 3);
+
+ switch (opcode) {
+ case OPC_MXU_Q8MOVZ:
+ gen_mxu_q8movzn(ctx, TCG_COND_NE);
+ break;
+ case OPC_MXU_Q8MOVN:
+ gen_mxu_q8movzn(ctx, TCG_COND_EQ);
+ break;
+ case OPC_MXU_D16MOVZ:
+ gen_mxu_d16movzn(ctx, TCG_COND_NE);
+ break;
+ case OPC_MXU_D16MOVN:
+ gen_mxu_d16movzn(ctx, TCG_COND_EQ);
+ break;
+ case OPC_MXU_S32MOVZ:
+ gen_mxu_s32movzn(ctx, TCG_COND_NE);
+ break;
+ case OPC_MXU_S32MOVN:
+ gen_mxu_s32movzn(ctx, TCG_COND_EQ);
+ break;
+ default:
+ MIPS_INVAL("decode_opc_mxu");
+ gen_reserved_instruction(ctx);
+ break;
+ }
+}
+
+static void decode_opc_mxu__pool21(DisasContext *ctx)
+{
+ uint32_t opcode = extract32(ctx->opcode, 22, 2);
+
+ switch (opcode) {
+ case OPC_MXU_Q8MAC:
+ gen_mxu_q8mul_mac(ctx, false, true);
+ break;
+ case OPC_MXU_Q8MACSU:
+ gen_mxu_q8mul_mac(ctx, true, true);
break;
default:
MIPS_INVAL("decode_opc_mxu");
}
}
+
bool decode_ase_mxu(DisasContext *ctx, uint32_t insn)
{
uint32_t opcode = extract32(insn, 0, 6);
tcg_gen_brcondi_tl(TCG_COND_NE, t_mxu_cr, MXU_CR_MXU_EN, l_exit);
switch (opcode) {
+ case OPC_MXU_S32MADD:
+ case OPC_MXU_S32MADDU:
+ case OPC_MXU_S32MSUB:
+ case OPC_MXU_S32MSUBU:
+ return decode_opc_mxu_s32madd_sub(ctx);
case OPC_MXU__POOL00:
decode_opc_mxu__pool00(ctx);
break;
case OPC_MXU_D16MUL:
- gen_mxu_d16mul(ctx);
+ gen_mxu_d16mul(ctx, false, false);
break;
case OPC_MXU_D16MAC:
- gen_mxu_d16mac(ctx);
+ gen_mxu_d16mac(ctx, false, false);
+ break;
+ case OPC_MXU_D16MACF:
+ gen_mxu_d16mac(ctx, true, true);
+ break;
+ case OPC_MXU_D16MADL:
+ gen_mxu_d16madl(ctx);
+ break;
+ case OPC_MXU_S16MAD:
+ gen_mxu_s16mad(ctx);
+ break;
+ case OPC_MXU_Q16ADD:
+ gen_mxu_q16add(ctx);
+ break;
+ case OPC_MXU_D16MACE:
+ gen_mxu_d16mac(ctx, true, false);
+ break;
+ case OPC_MXU__POOL01:
+ decode_opc_mxu__pool01(ctx);
+ break;
+ case OPC_MXU__POOL02:
+ decode_opc_mxu__pool02(ctx);
+ break;
+ case OPC_MXU__POOL03:
+ decode_opc_mxu__pool03(ctx);
break;
case OPC_MXU__POOL04:
decode_opc_mxu__pool04(ctx);
break;
+ case OPC_MXU__POOL05:
+ decode_opc_mxu__pool05(ctx);
+ break;
+ case OPC_MXU__POOL06:
+ decode_opc_mxu__pool06(ctx);
+ break;
+ case OPC_MXU__POOL07:
+ decode_opc_mxu__pool07(ctx);
+ break;
+ case OPC_MXU__POOL08:
+ decode_opc_mxu__pool08(ctx);
+ break;
+ case OPC_MXU__POOL09:
+ decode_opc_mxu__pool09(ctx);
+ break;
+ case OPC_MXU__POOL10:
+ decode_opc_mxu__pool10(ctx);
+ break;
+ case OPC_MXU__POOL11:
+ decode_opc_mxu__pool11(ctx);
+ break;
+ case OPC_MXU_D32ADD:
+ gen_mxu_d32add(ctx);
+ break;
+ case OPC_MXU__POOL12:
+ decode_opc_mxu__pool12(ctx);
+ break;
+ case OPC_MXU__POOL13:
+ decode_opc_mxu__pool13(ctx);
+ break;
+ case OPC_MXU__POOL14:
+ decode_opc_mxu__pool14(ctx);
+ break;
+ case OPC_MXU_Q8ACCE:
+ gen_mxu_q8adde(ctx, true);
+ break;
case OPC_MXU_S8LDD:
- gen_mxu_s8ldd(ctx);
+ gen_mxu_s8ldd(ctx, false);
+ break;
+ case OPC_MXU_S8STD:
+ gen_mxu_s8std(ctx, false);
+ break;
+ case OPC_MXU_S8LDI:
+ gen_mxu_s8ldd(ctx, true);
+ break;
+ case OPC_MXU_S8SDI:
+ gen_mxu_s8std(ctx, true);
+ break;
+ case OPC_MXU__POOL15:
+ decode_opc_mxu__pool15(ctx);
break;
case OPC_MXU__POOL16:
decode_opc_mxu__pool16(ctx);
break;
+ case OPC_MXU__POOL17:
+ decode_opc_mxu__pool17(ctx);
+ break;
+ case OPC_MXU_S16LDD:
+ gen_mxu_s16ldd(ctx, false);
+ break;
+ case OPC_MXU_S16STD:
+ gen_mxu_s16std(ctx, false);
+ break;
+ case OPC_MXU_S16LDI:
+ gen_mxu_s16ldd(ctx, true);
+ break;
+ case OPC_MXU_S16SDI:
+ gen_mxu_s16std(ctx, true);
+ break;
+ case OPC_MXU_D32SLL:
+ gen_mxu_d32sxx(ctx, false, false);
+ break;
+ case OPC_MXU_D32SLR:
+ gen_mxu_d32sxx(ctx, true, false);
+ break;
+ case OPC_MXU_D32SARL:
+ gen_mxu_d32sarl(ctx, false);
+ break;
+ case OPC_MXU_D32SAR:
+ gen_mxu_d32sxx(ctx, true, true);
+ break;
+ case OPC_MXU_Q16SLL:
+ gen_mxu_q16sxx(ctx, false, false);
+ break;
+ case OPC_MXU__POOL18:
+ decode_opc_mxu__pool18(ctx);
+ break;
+ case OPC_MXU_Q16SLR:
+ gen_mxu_q16sxx(ctx, true, false);
+ break;
+ case OPC_MXU_Q16SAR:
+ gen_mxu_q16sxx(ctx, true, true);
+ break;
case OPC_MXU__POOL19:
decode_opc_mxu__pool19(ctx);
break;
+ case OPC_MXU__POOL20:
+ decode_opc_mxu__pool20(ctx);
+ break;
+ case OPC_MXU__POOL21:
+ decode_opc_mxu__pool21(ctx);
+ break;
+ case OPC_MXU_Q16SCOP:
+ gen_mxu_q16scop(ctx);
+ break;
+ case OPC_MXU_Q8MADL:
+ gen_mxu_q8madl(ctx);
+ break;
+ case OPC_MXU_S32SFL:
+ gen_mxu_s32sfl(ctx);
+ break;
+ case OPC_MXU_Q8SAD:
+ gen_mxu_q8sad(ctx);
+ break;
default:
- MIPS_INVAL("decode_opc_mxu");
- gen_reserved_instruction(ctx);
+ return false;
}
gen_set_label(l_exit);
}
}
+#ifdef TARGET_MIPS64
+target_ulong helper_lcsr_cpucfg(CPUMIPSState *env, target_ulong rs)
+{
+ switch (rs) {
+ case 0:
+ return env->CP0_PRid;
+ case 1:
+ return env->lcsr_cpucfg1;
+ case 2:
+ return env->lcsr_cpucfg2;
+ default:
+ return 0;
+ }
+}
+#endif
+
#if !defined(CONFIG_USER_ONLY)
void mips_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
--- /dev/null
+/*
+ * Loongson CSR instructions translation routines
+ *
+ * Copyright (c) 2023 Jiaxun Yang <jiaxun.yang@flygoat.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/main-loop.h"
+#include "cpu.h"
+#include "internal.h"
+#include "qemu/host-utils.h"
+#include "exec/helper-proto.h"
+#include "exec/exec-all.h"
+#include "exec/cpu_ldst.h"
+
+#define GET_MEMTXATTRS(cas) \
+ ((MemTxAttrs){.requester_id = env_cpu(cas)->cpu_index})
+
+uint64_t helper_lcsr_rdcsr(CPUMIPSState *env, target_ulong r_addr)
+{
+ return address_space_ldl(&env->iocsr.as, r_addr,
+ GET_MEMTXATTRS(env), NULL);
+}
+
+uint64_t helper_lcsr_drdcsr(CPUMIPSState *env, target_ulong r_addr)
+{
+ return address_space_ldq(&env->iocsr.as, r_addr,
+ GET_MEMTXATTRS(env), NULL);
+}
+
+void helper_lcsr_wrcsr(CPUMIPSState *env, target_ulong w_addr,
+ target_ulong val)
+{
+ address_space_stl(&env->iocsr.as, w_addr,
+ val, GET_MEMTXATTRS(env), NULL);
+}
+
+void helper_lcsr_dwrcsr(CPUMIPSState *env, target_ulong w_addr,
+ target_ulong val)
+{
+ address_space_stq(&env->iocsr.as, w_addr,
+ val, GET_MEMTXATTRS(env), NULL);
+}
'special_helper.c',
'tlb_helper.c',
))
+
+mips_system_ss.add(when: 'TARGET_MIPS64', if_true: files(
+ 'lcsr_helper.c',
+))
DEF_HELPER_1(eretnc, void, env)
DEF_HELPER_1(deret, void, env)
DEF_HELPER_3(cache, void, env, tl, i32)
+
+#ifdef TARGET_MIPS64
+/* Longson CSR */
+DEF_HELPER_2(lcsr_rdcsr, i64, env, tl)
+DEF_HELPER_2(lcsr_drdcsr, i64, env, tl)
+DEF_HELPER_3(lcsr_wrcsr, void, env, tl, tl)
+DEF_HELPER_3(lcsr_dwrcsr, void, env, tl, tl)
+#endif
}
#endif
if (TARGET_LONG_BITS == 32 && (ctx->insn_flags & ASE_MXU)) {
- if (MASK_SPECIAL2(ctx->opcode) == OPC_MUL) {
- gen_arith(ctx, OPC_MUL, rd, rs, rt);
- } else {
- decode_ase_mxu(ctx, ctx->opcode);
+ if (decode_ase_mxu(ctx, ctx->opcode)) {
+ break;
}
- break;
}
decode_opc_special2_legacy(env, ctx);
break;
return;
}
#if defined(TARGET_MIPS64)
+ if (ase_lcsr_available(env) && decode_ase_lcsr(ctx, ctx->opcode)) {
+ return;
+ }
if (cpu_supports_isa(env, INSN_OCTEON) && decode_ext_octeon(ctx, ctx->opcode)) {
return;
}
bool decode_ase_msa(DisasContext *ctx, uint32_t insn);
bool decode_ext_txx9(DisasContext *ctx, uint32_t insn);
#if defined(TARGET_MIPS64)
+bool decode_ase_lcsr(DisasContext *ctx, uint32_t insn);
bool decode_ext_tx79(DisasContext *ctx, uint32_t insn);
bool decode_ext_octeon(DisasContext *ctx, uint32_t insn);
#endif
#include "migration/vmstate.h"
#include "fpu/softfloat-helpers.h"
#include "sysemu/kvm.h"
+#include "sysemu/tcg.h"
#include "kvm_riscv.h"
#include "tcg/tcg.h"
/* RISC-V CPU definitions */
-
-#define RISCV_CPU_MARCHID ((QEMU_VERSION_MAJOR << 16) | \
- (QEMU_VERSION_MINOR << 8) | \
- (QEMU_VERSION_MICRO))
-#define RISCV_CPU_MIMPID RISCV_CPU_MARCHID
-
static const char riscv_single_letter_exts[] = "IEMAFDQCPVH";
struct isa_ext_data {
#define ISA_EXT_DATA_ENTRY(_name, _min_ver, _prop) \
{#_name, _min_ver, offsetof(struct RISCVCPUConfig, _prop)}
+/*
+ * From vector_helper.c
+ * Note that vector data is stored in host-endian 64-bit chunks,
+ * so addressing bytes needs a host-endian fixup.
+ */
+#if HOST_BIG_ENDIAN
+#define BYTE(x) ((x) ^ 7)
+#else
+#define BYTE(x) (x)
+#endif
+
/*
* Here are the ordering rules of extension naming defined by RISC-V
* specification :
ISA_EXT_DATA_ENTRY(zifencei, PRIV_VERSION_1_10_0, ext_ifencei),
ISA_EXT_DATA_ENTRY(zihintpause, PRIV_VERSION_1_10_0, ext_zihintpause),
ISA_EXT_DATA_ENTRY(zawrs, PRIV_VERSION_1_12_0, ext_zawrs),
+ ISA_EXT_DATA_ENTRY(zfa, PRIV_VERSION_1_12_0, ext_zfa),
+ ISA_EXT_DATA_ENTRY(zfbfmin, PRIV_VERSION_1_12_0, ext_zfbfmin),
ISA_EXT_DATA_ENTRY(zfh, PRIV_VERSION_1_11_0, ext_zfh),
ISA_EXT_DATA_ENTRY(zfhmin, PRIV_VERSION_1_11_0, ext_zfhmin),
ISA_EXT_DATA_ENTRY(zfinx, PRIV_VERSION_1_12_0, ext_zfinx),
ISA_EXT_DATA_ENTRY(zve32f, PRIV_VERSION_1_10_0, ext_zve32f),
ISA_EXT_DATA_ENTRY(zve64f, PRIV_VERSION_1_10_0, ext_zve64f),
ISA_EXT_DATA_ENTRY(zve64d, PRIV_VERSION_1_10_0, ext_zve64d),
+ ISA_EXT_DATA_ENTRY(zvfbfmin, PRIV_VERSION_1_12_0, ext_zvfbfmin),
+ ISA_EXT_DATA_ENTRY(zvfbfwma, PRIV_VERSION_1_12_0, ext_zvfbfwma),
ISA_EXT_DATA_ENTRY(zvfh, PRIV_VERSION_1_12_0, ext_zvfh),
ISA_EXT_DATA_ENTRY(zvfhmin, PRIV_VERSION_1_12_0, ext_zvfhmin),
ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx),
"f30/ft10", "f31/ft11"
};
+const char * const riscv_rvv_regnames[] = {
+ "v0", "v1", "v2", "v3", "v4", "v5", "v6",
+ "v7", "v8", "v9", "v10", "v11", "v12", "v13",
+ "v14", "v15", "v16", "v17", "v18", "v19", "v20",
+ "v21", "v22", "v23", "v24", "v25", "v26", "v27",
+ "v28", "v29", "v30", "v31"
+};
+
static const char * const riscv_excp_names[] = {
"misaligned_fetch",
"fault_fetch",
set_misa(env, MXL_RV64, RVG | RVC | RVS | RVU);
env->priv_ver = PRIV_VERSION_1_11_0;
+ cpu->cfg.ext_zfa = true;
cpu->cfg.ext_zfh = true;
cpu->cfg.mmu = true;
cpu->cfg.ext_xtheadba = true;
/* Enable ISA extensions */
cpu->cfg.mmu = true;
+ cpu->cfg.ext_ifencei = true;
+ cpu->cfg.ext_icsr = true;
+ cpu->cfg.pmp = true;
cpu->cfg.ext_icbom = true;
cpu->cfg.cbom_blocksize = 64;
cpu->cfg.cboz_blocksize = 64;
{
RISCVCPU *cpu = RISCV_CPU(cs);
CPURISCVState *env = &cpu->env;
- int i;
+ int i, j;
+ uint8_t *p;
#if !defined(CONFIG_USER_ONLY)
if (riscv_has_ext(env, RVH)) {
}
}
}
+ if (riscv_has_ext(env, RVV) && (flags & CPU_DUMP_VPU)) {
+ static const int dump_rvv_csrs[] = {
+ CSR_VSTART,
+ CSR_VXSAT,
+ CSR_VXRM,
+ CSR_VCSR,
+ CSR_VL,
+ CSR_VTYPE,
+ CSR_VLENB,
+ };
+ for (int i = 0; i < ARRAY_SIZE(dump_rvv_csrs); ++i) {
+ int csrno = dump_rvv_csrs[i];
+ target_ulong val = 0;
+ RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0);
+
+ /*
+ * Rely on the smode, hmode, etc, predicates within csr.c
+ * to do the filtering of the registers that are present.
+ */
+ if (res == RISCV_EXCP_NONE) {
+ qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n",
+ csr_ops[csrno].name, val);
+ }
+ }
+ uint16_t vlenb = cpu->cfg.vlen >> 3;
+
+ for (i = 0; i < 32; i++) {
+ qemu_fprintf(f, " %-8s ", riscv_rvv_regnames[i]);
+ p = (uint8_t *)env->vreg;
+ for (j = vlenb - 1 ; j >= 0; j--) {
+ qemu_fprintf(f, "%02x", *(p + i * vlenb + BYTE(j)));
+ }
+ qemu_fprintf(f, "\n");
+ }
+ }
}
static void riscv_cpu_set_pc(CPUState *cs, vaddr value)
static void riscv_cpu_disas_set_info(CPUState *s, disassemble_info *info)
{
RISCVCPU *cpu = RISCV_CPU(s);
+ CPURISCVState *env = &cpu->env;
info->target_info = &cpu->cfg;
- switch (riscv_cpu_mxl(&cpu->env)) {
+ switch (env->xl) {
case MXL_RV32:
info->print_insn = print_insn_riscv32;
break;
return;
}
+ if (cpu->cfg.ext_zfa && !riscv_has_ext(env, RVF)) {
+ error_setg(errp, "Zfa extension requires F extension");
+ return;
+ }
+
if (cpu->cfg.ext_zfh) {
cpu->cfg.ext_zfhmin = true;
}
return;
}
+ if (cpu->cfg.ext_zfbfmin && !riscv_has_ext(env, RVF)) {
+ error_setg(errp, "Zfbfmin extension depends on F extension");
+ return;
+ }
+
if (riscv_has_ext(env, RVD) && !riscv_has_ext(env, RVF)) {
error_setg(errp, "D extension requires F extension");
return;
return;
}
+ if (cpu->cfg.ext_zvfbfmin && !cpu->cfg.ext_zfbfmin) {
+ error_setg(errp, "Zvfbfmin extension depends on Zfbfmin extension");
+ return;
+ }
+
+ if (cpu->cfg.ext_zvfbfmin && !cpu->cfg.ext_zve32f) {
+ error_setg(errp, "Zvfbfmin extension depends on Zve32f extension");
+ return;
+ }
+
+ if (cpu->cfg.ext_zvfbfwma && !cpu->cfg.ext_zvfbfmin) {
+ error_setg(errp, "Zvfbfwma extension depends on Zvfbfmin extension");
+ return;
+ }
+
/* Set the ISA extensions, checks should have happened above */
if (cpu->cfg.ext_zhinx) {
cpu->cfg.ext_zhinxmin = true;
}
}
- if (riscv_has_ext(env, RVC)) {
+ /* zca, zcd and zcf has a PRIV 1.12.0 restriction */
+ if (riscv_has_ext(env, RVC) && env->priv_ver >= PRIV_VERSION_1_12_0) {
cpu->cfg.ext_zca = true;
if (riscv_has_ext(env, RVF) && env->misa_mxl_max == MXL_RV32) {
cpu->cfg.ext_zcf = true;
}
}
-static void riscv_cpu_realize(DeviceState *dev, Error **errp)
+static void riscv_cpu_realize_tcg(DeviceState *dev, Error **errp)
{
- CPUState *cs = CPU(dev);
RISCVCPU *cpu = RISCV_CPU(dev);
CPURISCVState *env = &cpu->env;
- RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(dev);
Error *local_err = NULL;
- cpu_exec_realizefn(cs, &local_err);
- if (local_err != NULL) {
- error_propagate(errp, local_err);
- return;
- }
-
riscv_cpu_validate_misa_mxl(cpu, &local_err);
if (local_err != NULL) {
error_propagate(errp, local_err);
}
#ifndef CONFIG_USER_ONLY
- cs->tcg_cflags |= CF_PCREL;
+ CPU(dev)->tcg_cflags |= CF_PCREL;
if (cpu->cfg.ext_sstc) {
riscv_timer_init(cpu);
}
}
#endif
+}
+
+static void riscv_cpu_realize(DeviceState *dev, Error **errp)
+{
+ CPUState *cs = CPU(dev);
+ RISCVCPU *cpu = RISCV_CPU(dev);
+ RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(dev);
+ Error *local_err = NULL;
+
+ cpu_exec_realizefn(cs, &local_err);
+ if (local_err != NULL) {
+ error_propagate(errp, local_err);
+ return;
+ }
+
+ if (tcg_enabled()) {
+ riscv_cpu_realize_tcg(dev, &local_err);
+ if (local_err != NULL) {
+ error_propagate(errp, local_err);
+ return;
+ }
+ }
riscv_cpu_finalize_features(cpu, &local_err);
if (local_err != NULL) {
visit_type_bool(v, name, &value, errp);
}
-static const RISCVCPUMisaExtConfig misa_ext_cfgs[] = {
- {.name = "a", .description = "Atomic instructions",
- .misa_bit = RVA, .enabled = true},
- {.name = "c", .description = "Compressed instructions",
- .misa_bit = RVC, .enabled = true},
- {.name = "d", .description = "Double-precision float point",
- .misa_bit = RVD, .enabled = true},
- {.name = "f", .description = "Single-precision float point",
- .misa_bit = RVF, .enabled = true},
- {.name = "i", .description = "Base integer instruction set",
- .misa_bit = RVI, .enabled = true},
- {.name = "e", .description = "Base integer instruction set (embedded)",
- .misa_bit = RVE, .enabled = false},
- {.name = "m", .description = "Integer multiplication and division",
- .misa_bit = RVM, .enabled = true},
- {.name = "s", .description = "Supervisor-level instructions",
- .misa_bit = RVS, .enabled = true},
- {.name = "u", .description = "User-level instructions",
- .misa_bit = RVU, .enabled = true},
- {.name = "h", .description = "Hypervisor",
- .misa_bit = RVH, .enabled = true},
- {.name = "x-j", .description = "Dynamic translated languages",
- .misa_bit = RVJ, .enabled = false},
- {.name = "v", .description = "Vector operations",
- .misa_bit = RVV, .enabled = false},
- {.name = "g", .description = "General purpose (IMAFD_Zicsr_Zifencei)",
- .misa_bit = RVG, .enabled = false},
+typedef struct misa_ext_info {
+ const char *name;
+ const char *description;
+} MISAExtInfo;
+
+#define MISA_INFO_IDX(_bit) \
+ __builtin_ctz(_bit)
+
+#define MISA_EXT_INFO(_bit, _propname, _descr) \
+ [MISA_INFO_IDX(_bit)] = {.name = _propname, .description = _descr}
+
+static const MISAExtInfo misa_ext_info_arr[] = {
+ MISA_EXT_INFO(RVA, "a", "Atomic instructions"),
+ MISA_EXT_INFO(RVC, "c", "Compressed instructions"),
+ MISA_EXT_INFO(RVD, "d", "Double-precision float point"),
+ MISA_EXT_INFO(RVF, "f", "Single-precision float point"),
+ MISA_EXT_INFO(RVI, "i", "Base integer instruction set"),
+ MISA_EXT_INFO(RVE, "e", "Base integer instruction set (embedded)"),
+ MISA_EXT_INFO(RVM, "m", "Integer multiplication and division"),
+ MISA_EXT_INFO(RVS, "s", "Supervisor-level instructions"),
+ MISA_EXT_INFO(RVU, "u", "User-level instructions"),
+ MISA_EXT_INFO(RVH, "h", "Hypervisor"),
+ MISA_EXT_INFO(RVJ, "x-j", "Dynamic translated languages"),
+ MISA_EXT_INFO(RVV, "v", "Vector operations"),
+ MISA_EXT_INFO(RVG, "g", "General purpose (IMAFD_Zicsr_Zifencei)"),
+};
+
+static int riscv_validate_misa_info_idx(uint32_t bit)
+{
+ int idx;
+
+ /*
+ * Our lowest valid input (RVA) is 1 and
+ * __builtin_ctz() is UB with zero.
+ */
+ g_assert(bit != 0);
+ idx = MISA_INFO_IDX(bit);
+
+ g_assert(idx < ARRAY_SIZE(misa_ext_info_arr));
+ return idx;
+}
+
+const char *riscv_get_misa_ext_name(uint32_t bit)
+{
+ int idx = riscv_validate_misa_info_idx(bit);
+ const char *val = misa_ext_info_arr[idx].name;
+
+ g_assert(val != NULL);
+ return val;
+}
+
+const char *riscv_get_misa_ext_description(uint32_t bit)
+{
+ int idx = riscv_validate_misa_info_idx(bit);
+ const char *val = misa_ext_info_arr[idx].description;
+
+ g_assert(val != NULL);
+ return val;
+}
+
+#define MISA_CFG(_bit, _enabled) \
+ {.misa_bit = _bit, .enabled = _enabled}
+
+static RISCVCPUMisaExtConfig misa_ext_cfgs[] = {
+ MISA_CFG(RVA, true),
+ MISA_CFG(RVC, true),
+ MISA_CFG(RVD, true),
+ MISA_CFG(RVF, true),
+ MISA_CFG(RVI, true),
+ MISA_CFG(RVE, false),
+ MISA_CFG(RVM, true),
+ MISA_CFG(RVS, true),
+ MISA_CFG(RVU, true),
+ MISA_CFG(RVH, true),
+ MISA_CFG(RVJ, false),
+ MISA_CFG(RVV, false),
+ MISA_CFG(RVG, false),
};
static void riscv_cpu_add_misa_properties(Object *cpu_obj)
int i;
for (i = 0; i < ARRAY_SIZE(misa_ext_cfgs); i++) {
- const RISCVCPUMisaExtConfig *misa_cfg = &misa_ext_cfgs[i];
+ RISCVCPUMisaExtConfig *misa_cfg = &misa_ext_cfgs[i];
+ int bit = misa_cfg->misa_bit;
+
+ misa_cfg->name = riscv_get_misa_ext_name(bit);
+ misa_cfg->description = riscv_get_misa_ext_description(bit);
+
+ /* Check if KVM already created the property */
+ if (object_property_find(cpu_obj, misa_cfg->name)) {
+ continue;
+ }
object_property_add(cpu_obj, misa_cfg->name, "bool",
cpu_get_misa_ext_cfg,
DEFINE_PROP_BOOL("Zicsr", RISCVCPU, cfg.ext_icsr, true),
DEFINE_PROP_BOOL("Zihintpause", RISCVCPU, cfg.ext_zihintpause, true),
DEFINE_PROP_BOOL("Zawrs", RISCVCPU, cfg.ext_zawrs, true),
+ DEFINE_PROP_BOOL("Zfa", RISCVCPU, cfg.ext_zfa, true),
DEFINE_PROP_BOOL("Zfh", RISCVCPU, cfg.ext_zfh, false),
DEFINE_PROP_BOOL("Zfhmin", RISCVCPU, cfg.ext_zfhmin, false),
DEFINE_PROP_BOOL("Zve32f", RISCVCPU, cfg.ext_zve32f, false),
DEFINE_PROP_BOOL("x-zvfh", RISCVCPU, cfg.ext_zvfh, false),
DEFINE_PROP_BOOL("x-zvfhmin", RISCVCPU, cfg.ext_zvfhmin, false),
+ DEFINE_PROP_BOOL("x-zfbfmin", RISCVCPU, cfg.ext_zfbfmin, false),
+ DEFINE_PROP_BOOL("x-zvfbfmin", RISCVCPU, cfg.ext_zvfbfmin, false),
+ DEFINE_PROP_BOOL("x-zvfbfwma", RISCVCPU, cfg.ext_zvfbfwma, false),
+
DEFINE_PROP_END_OF_LIST(),
};
+
+#ifndef CONFIG_USER_ONLY
+static void cpu_set_cfg_unavailable(Object *obj, Visitor *v,
+ const char *name,
+ void *opaque, Error **errp)
+{
+ const char *propname = opaque;
+ bool value;
+
+ if (!visit_type_bool(v, name, &value, errp)) {
+ return;
+ }
+
+ if (value) {
+ error_setg(errp, "extension %s is not available with KVM",
+ propname);
+ }
+}
+#endif
+
/*
* Add CPU properties with user-facing flags.
*
Property *prop;
DeviceState *dev = DEVICE(obj);
- riscv_cpu_add_misa_properties(obj);
+#ifndef CONFIG_USER_ONLY
+ riscv_add_satp_mode_properties(obj);
- for (prop = riscv_cpu_extensions; prop && prop->name; prop++) {
- qdev_property_add_static(dev, prop);
+ if (kvm_enabled()) {
+ kvm_riscv_init_user_properties(obj);
}
+#endif
+ riscv_cpu_add_misa_properties(obj);
+
+ for (prop = riscv_cpu_extensions; prop && prop->name; prop++) {
#ifndef CONFIG_USER_ONLY
- riscv_add_satp_mode_properties(obj);
+ if (kvm_enabled()) {
+ /* Check if KVM created the property already */
+ if (object_property_find(obj, prop->name)) {
+ continue;
+ }
+
+ /*
+ * Set the default to disabled for every extension
+ * unknown to KVM and error out if the user attempts
+ * to enable any of them.
+ *
+ * We're giving a pass for non-bool properties since they're
+ * not related to the availability of extensions and can be
+ * safely ignored as is.
+ */
+ if (prop->info == &qdev_prop_bool) {
+ object_property_add(obj, prop->name, "bool",
+ NULL, cpu_set_cfg_unavailable,
+ NULL, (void *)prop->name);
+ continue;
+ }
+ }
#endif
+ qdev_property_add_static(dev, prop);
+ }
}
static Property riscv_cpu_properties[] = {
DEFINE_PROP_BOOL("debug", RISCVCPU, cfg.debug, true),
- DEFINE_PROP_UINT32("mvendorid", RISCVCPU, cfg.mvendorid, 0),
- DEFINE_PROP_UINT64("marchid", RISCVCPU, cfg.marchid, RISCV_CPU_MARCHID),
- DEFINE_PROP_UINT64("mimpid", RISCVCPU, cfg.mimpid, RISCV_CPU_MIMPID),
-
#ifndef CONFIG_USER_ONLY
DEFINE_PROP_UINT64("resetvec", RISCVCPU, env.resetvec, DEFAULT_RSTVEC),
#endif
#endif /* !CONFIG_USER_ONLY */
};
+static bool riscv_cpu_is_dynamic(Object *cpu_obj)
+{
+ return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL;
+}
+
+static void cpu_set_mvendorid(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
+{
+ bool dynamic_cpu = riscv_cpu_is_dynamic(obj);
+ RISCVCPU *cpu = RISCV_CPU(obj);
+ uint32_t prev_val = cpu->cfg.mvendorid;
+ uint32_t value;
+
+ if (!visit_type_uint32(v, name, &value, errp)) {
+ return;
+ }
+
+ if (!dynamic_cpu && prev_val != value) {
+ error_setg(errp, "Unable to change %s mvendorid (0x%x)",
+ object_get_typename(obj), prev_val);
+ return;
+ }
+
+ cpu->cfg.mvendorid = value;
+}
+
+static void cpu_get_mvendorid(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
+{
+ bool value = RISCV_CPU(obj)->cfg.mvendorid;
+
+ visit_type_bool(v, name, &value, errp);
+}
+
+static void cpu_set_mimpid(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
+{
+ bool dynamic_cpu = riscv_cpu_is_dynamic(obj);
+ RISCVCPU *cpu = RISCV_CPU(obj);
+ uint64_t prev_val = cpu->cfg.mimpid;
+ uint64_t value;
+
+ if (!visit_type_uint64(v, name, &value, errp)) {
+ return;
+ }
+
+ if (!dynamic_cpu && prev_val != value) {
+ error_setg(errp, "Unable to change %s mimpid (0x%" PRIu64 ")",
+ object_get_typename(obj), prev_val);
+ return;
+ }
+
+ cpu->cfg.mimpid = value;
+}
+
+static void cpu_get_mimpid(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
+{
+ bool value = RISCV_CPU(obj)->cfg.mimpid;
+
+ visit_type_bool(v, name, &value, errp);
+}
+
+static void cpu_set_marchid(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
+{
+ bool dynamic_cpu = riscv_cpu_is_dynamic(obj);
+ RISCVCPU *cpu = RISCV_CPU(obj);
+ uint64_t prev_val = cpu->cfg.marchid;
+ uint64_t value, invalid_val;
+ uint32_t mxlen = 0;
+
+ if (!visit_type_uint64(v, name, &value, errp)) {
+ return;
+ }
+
+ if (!dynamic_cpu && prev_val != value) {
+ error_setg(errp, "Unable to change %s marchid (0x%" PRIu64 ")",
+ object_get_typename(obj), prev_val);
+ return;
+ }
+
+ switch (riscv_cpu_mxl(&cpu->env)) {
+ case MXL_RV32:
+ mxlen = 32;
+ break;
+ case MXL_RV64:
+ case MXL_RV128:
+ mxlen = 64;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ invalid_val = 1LL << (mxlen - 1);
+
+ if (value == invalid_val) {
+ error_setg(errp, "Unable to set marchid with MSB (%u) bit set "
+ "and the remaining bits zero", mxlen);
+ return;
+ }
+
+ cpu->cfg.marchid = value;
+}
+
+static void cpu_get_marchid(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
+{
+ bool value = RISCV_CPU(obj)->cfg.marchid;
+
+ visit_type_bool(v, name, &value, errp);
+}
+
static void riscv_cpu_class_init(ObjectClass *c, void *data)
{
RISCVCPUClass *mcc = RISCV_CPU_CLASS(c);
cc->gdb_get_dynamic_xml = riscv_gdb_get_dynamic_xml;
cc->tcg_ops = &riscv_tcg_ops;
+ object_class_property_add(c, "mvendorid", "uint32", cpu_get_mvendorid,
+ cpu_set_mvendorid, NULL, NULL);
+
+ object_class_property_add(c, "mimpid", "uint64", cpu_get_mimpid,
+ cpu_set_mimpid, NULL, NULL);
+
+ object_class_property_add(c, "marchid", "uint64", cpu_get_marchid,
+ cpu_set_marchid, NULL, NULL);
+
device_class_set_props(dc, riscv_cpu_properties);
}
int i;
for (i = 0; i < ARRAY_SIZE(isa_edata_arr); i++) {
- if (cpu->env.priv_ver >= isa_edata_arr[i].min_version &&
- isa_ext_is_enabled(cpu, &isa_edata_arr[i])) {
+ if (isa_ext_is_enabled(cpu, &isa_edata_arr[i])) {
new = g_strconcat(old, "_", isa_edata_arr[i].name, NULL);
g_free(old);
old = new;
#define RV(x) ((target_ulong)1 << (x - 'A'))
-/* Consider updating misa_ext_cfgs[] when adding new MISA bits here */
+/*
+ * Consider updating misa_ext_info_arr[] and misa_ext_cfgs[]
+ * when adding new MISA bits here.
+ */
#define RVI RV('I')
#define RVE RV('E') /* E and I are mutually exclusive */
#define RVM RV('M')
#define RVJ RV('J')
#define RVG RV('G')
+const char *riscv_get_misa_ext_name(uint32_t bit);
+const char *riscv_get_misa_ext_description(uint32_t bit);
/* Privileged specification version */
enum {
/* Virtual mode enabled */
FIELD(TB_FLAGS, VIRT_ENABLED, 23, 1)
FIELD(TB_FLAGS, PRIV, 24, 2)
+FIELD(TB_FLAGS, AXL, 26, 2)
#ifdef TARGET_RISCV32
#define riscv_cpu_mxl(env) ((void)(env), MXL_RV32)
return &env_archcpu(env)->cfg;
}
-#if defined(TARGET_RISCV32)
-#define cpu_recompute_xl(env) ((void)(env), MXL_RV32)
-#else
-static inline RISCVMXL cpu_recompute_xl(CPURISCVState *env)
+#if !defined(CONFIG_USER_ONLY)
+static inline int cpu_address_mode(CPURISCVState *env)
+{
+ int mode = env->priv;
+
+ if (mode == PRV_M && get_field(env->mstatus, MSTATUS_MPRV)) {
+ mode = get_field(env->mstatus, MSTATUS_MPP);
+ }
+ return mode;
+}
+
+static inline RISCVMXL cpu_get_xl(CPURISCVState *env, target_ulong mode)
{
RISCVMXL xl = env->misa_mxl;
-#if !defined(CONFIG_USER_ONLY)
/*
* When emulating a 32-bit-only cpu, use RV32.
* When emulating a 64-bit cpu, and MXL has been reduced to RV32,
* back to RV64 for lower privs.
*/
if (xl != MXL_RV32) {
- switch (env->priv) {
+ switch (mode) {
case PRV_M:
break;
case PRV_U:
break;
}
}
-#endif
return xl;
}
#endif
+#if defined(TARGET_RISCV32)
+#define cpu_recompute_xl(env) ((void)(env), MXL_RV32)
+#else
+static inline RISCVMXL cpu_recompute_xl(CPURISCVState *env)
+{
+#if !defined(CONFIG_USER_ONLY)
+ return cpu_get_xl(env, env->priv);
+#else
+ return env->misa_mxl;
+#endif
+}
+#endif
+
+#if defined(TARGET_RISCV32)
+#define cpu_address_xl(env) ((void)(env), MXL_RV32)
+#else
+static inline RISCVMXL cpu_address_xl(CPURISCVState *env)
+{
+#ifdef CONFIG_USER_ONLY
+ return env->xl;
+#else
+ int mode = cpu_address_mode(env);
+
+ return cpu_get_xl(env, mode);
+#endif
+}
+#endif
+
static inline int riscv_cpu_xlen(CPURISCVState *env)
{
return 16 << env->xl;
bool ext_svpbmt;
bool ext_zdinx;
bool ext_zawrs;
+ bool ext_zfa;
+ bool ext_zfbfmin;
bool ext_zfh;
bool ext_zfhmin;
bool ext_zfinx;
bool ext_zve64f;
bool ext_zve64d;
bool ext_zmmul;
+ bool ext_zvfbfmin;
+ bool ext_zvfbfwma;
bool ext_zvfh;
bool ext_zvfhmin;
bool ext_smaia;
};
typedef struct RISCVCPUConfig RISCVCPUConfig;
+
+/* Helper functions to test for extensions. */
+
+static inline bool always_true_p(const RISCVCPUConfig *cfg __attribute__((__unused__)))
+{
+ return true;
+}
+
+static inline bool has_xthead_p(const RISCVCPUConfig *cfg)
+{
+ return cfg->ext_xtheadba || cfg->ext_xtheadbb ||
+ cfg->ext_xtheadbs || cfg->ext_xtheadcmo ||
+ cfg->ext_xtheadcondmov ||
+ cfg->ext_xtheadfmemidx || cfg->ext_xtheadfmv ||
+ cfg->ext_xtheadmac || cfg->ext_xtheadmemidx ||
+ cfg->ext_xtheadmempair || cfg->ext_xtheadsync;
+}
+
+#define MATERIALISE_EXT_PREDICATE(ext) \
+ static inline bool has_ ## ext ## _p(const RISCVCPUConfig *cfg) \
+ { \
+ return cfg->ext_ ## ext ; \
+ }
+
+MATERIALISE_EXT_PREDICATE(xtheadba)
+MATERIALISE_EXT_PREDICATE(xtheadbb)
+MATERIALISE_EXT_PREDICATE(xtheadbs)
+MATERIALISE_EXT_PREDICATE(xtheadcmo)
+MATERIALISE_EXT_PREDICATE(xtheadcondmov)
+MATERIALISE_EXT_PREDICATE(xtheadfmemidx)
+MATERIALISE_EXT_PREDICATE(xtheadfmv)
+MATERIALISE_EXT_PREDICATE(xtheadmac)
+MATERIALISE_EXT_PREDICATE(xtheadmemidx)
+MATERIALISE_EXT_PREDICATE(xtheadmempair)
+MATERIALISE_EXT_PREDICATE(xtheadsync)
+MATERIALISE_EXT_PREDICATE(XVentanaCondOps)
+
#endif
if (mode == PRV_M && get_field(status, MSTATUS_MPRV)) {
mode = get_field(env->mstatus, MSTATUS_MPP);
- virt = get_field(env->mstatus, MSTATUS_MPV);
+ virt = get_field(env->mstatus, MSTATUS_MPV) &&
+ (mode != PRV_M);
if (virt) {
status = env->vsstatus;
}
flags = FIELD_DP32(flags, TB_FLAGS, FS, fs);
flags = FIELD_DP32(flags, TB_FLAGS, VS, vs);
flags = FIELD_DP32(flags, TB_FLAGS, XL, env->xl);
+ flags = FIELD_DP32(flags, TB_FLAGS, AXL, cpu_address_xl(env));
if (env->cur_pmmask != 0) {
flags = FIELD_DP32(flags, TB_FLAGS, PM_MASK_ENABLED, 1);
}
void riscv_cpu_update_mask(CPURISCVState *env)
{
target_ulong mask = 0, base = 0;
+ RISCVMXL xl = env->xl;
/*
* TODO: Current RVJ spec does not specify
* how the extension interacts with XLEN.
*/
#ifndef CONFIG_USER_ONLY
+ int mode = cpu_address_mode(env);
+ xl = cpu_get_xl(env, mode);
if (riscv_has_ext(env, RVJ)) {
- switch (env->priv) {
+ switch (mode) {
case PRV_M:
if (env->mmte & M_PM_ENABLE) {
mask = env->mpmmask;
}
}
#endif
- if (env->xl == MXL_RV32) {
+ if (xl == MXL_RV32) {
env->cur_pmmask = mask & UINT32_MAX;
env->cur_pmbase = base & UINT32_MAX;
} else {
if (ret == TRANSLATE_G_STAGE_FAIL) {
first_stage_error = false;
two_stage_indirect_error = true;
- access_type = MMU_DATA_LOAD;
}
qemu_log_mask(CPU_LOG_MMU,
}
if (xl != MXL_RV32 || env->debugger) {
- /*
- * RV32: MPV and GVA are not in mstatus. The current plan is to
- * add them to mstatush. For now, we just don't support it.
- */
- mask |= MSTATUS_MPV | MSTATUS_GVA;
+ if (riscv_has_ext(env, RVH)) {
+ mask |= MSTATUS_MPV | MSTATUS_GVA;
+ }
if ((val & MSTATUS64_UXL) != 0) {
mask |= MSTATUS64_UXL;
}
mstatus = (mstatus & ~mask) | (val & mask);
- if (xl > MXL_RV32) {
- /* SXL field is for now read only */
- mstatus = set_field(mstatus, MSTATUS64_SXL, xl);
- }
env->mstatus = mstatus;
/*
*/
if (env->debugger) {
env->xl = cpu_recompute_xl(env);
- riscv_cpu_update_mask(env);
}
+
+ riscv_cpu_update_mask(env);
return RISCV_EXCP_NONE;
}
target_ulong val)
{
uint64_t valh = (uint64_t)val << 32;
- uint64_t mask = MSTATUS_MPV | MSTATUS_GVA;
+ uint64_t mask = riscv_has_ext(env, RVH) ? MSTATUS_MPV | MSTATUS_GVA : 0;
env->mstatus = (env->mstatus & ~mask) | (valh & mask);
uint64_t mstatus;
env->mpmmask = val;
- if ((env->priv == PRV_M) && (env->mmte & M_PM_ENABLE)) {
+ if ((cpu_address_mode(env) == PRV_M) && (env->mmte & M_PM_ENABLE)) {
env->cur_pmmask = val;
}
env->mmte |= EXT_STATUS_DIRTY;
return RISCV_EXCP_NONE;
}
env->spmmask = val;
- if ((env->priv == PRV_S) && (env->mmte & S_PM_ENABLE)) {
+ if ((cpu_address_mode(env) == PRV_S) && (env->mmte & S_PM_ENABLE)) {
env->cur_pmmask = val;
+ if (cpu_get_xl(env, PRV_S) == MXL_RV32) {
+ env->cur_pmmask &= UINT32_MAX;
+ }
}
env->mmte |= EXT_STATUS_DIRTY;
return RISCV_EXCP_NONE;
}
env->upmmask = val;
- if ((env->priv == PRV_U) && (env->mmte & U_PM_ENABLE)) {
+ if ((cpu_address_mode(env) == PRV_U) && (env->mmte & U_PM_ENABLE)) {
env->cur_pmmask = val;
+ if (cpu_get_xl(env, PRV_U) == MXL_RV32) {
+ env->cur_pmmask &= UINT32_MAX;
+ }
}
env->mmte |= EXT_STATUS_DIRTY;
uint64_t mstatus;
env->mpmbase = val;
- if ((env->priv == PRV_M) && (env->mmte & M_PM_ENABLE)) {
+ if ((cpu_address_mode(env) == PRV_M) && (env->mmte & M_PM_ENABLE)) {
env->cur_pmbase = val;
}
env->mmte |= EXT_STATUS_DIRTY;
return RISCV_EXCP_NONE;
}
env->spmbase = val;
- if ((env->priv == PRV_S) && (env->mmte & S_PM_ENABLE)) {
+ if ((cpu_address_mode(env) == PRV_S) && (env->mmte & S_PM_ENABLE)) {
env->cur_pmbase = val;
+ if (cpu_get_xl(env, PRV_S) == MXL_RV32) {
+ env->cur_pmbase &= UINT32_MAX;
+ }
}
env->mmte |= EXT_STATUS_DIRTY;
return RISCV_EXCP_NONE;
}
env->upmbase = val;
- if ((env->priv == PRV_U) && (env->mmte & U_PM_ENABLE)) {
+ if ((cpu_address_mode(env) == PRV_U) && (env->mmte & U_PM_ENABLE)) {
env->cur_pmbase = val;
+ if (cpu_get_xl(env, PRV_U) == MXL_RV32) {
+ env->cur_pmbase &= UINT32_MAX;
+ }
}
env->mmte |= EXT_STATUS_DIRTY;
float32_minimum_number(frs1, frs2, &env->fp_status));
}
+uint64_t helper_fminm_s(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
+{
+ float32 frs1 = check_nanbox_s(env, rs1);
+ float32 frs2 = check_nanbox_s(env, rs2);
+ float32 ret = float32_min(frs1, frs2, &env->fp_status);
+ return nanbox_s(env, ret);
+}
+
uint64_t helper_fmax_s(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
{
float32 frs1 = check_nanbox_s(env, rs1);
float32_maximum_number(frs1, frs2, &env->fp_status));
}
+uint64_t helper_fmaxm_s(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
+{
+ float32 frs1 = check_nanbox_s(env, rs1);
+ float32 frs2 = check_nanbox_s(env, rs2);
+ float32 ret = float32_max(frs1, frs2, &env->fp_status);
+ return nanbox_s(env, ret);
+}
+
uint64_t helper_fsqrt_s(CPURISCVState *env, uint64_t rs1)
{
float32 frs1 = check_nanbox_s(env, rs1);
return float32_le(frs1, frs2, &env->fp_status);
}
+target_ulong helper_fleq_s(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
+{
+ float32 frs1 = check_nanbox_s(env, rs1);
+ float32 frs2 = check_nanbox_s(env, rs2);
+ return float32_le_quiet(frs1, frs2, &env->fp_status);
+}
+
target_ulong helper_flt_s(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
{
float32 frs1 = check_nanbox_s(env, rs1);
return float32_lt(frs1, frs2, &env->fp_status);
}
+target_ulong helper_fltq_s(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
+{
+ float32 frs1 = check_nanbox_s(env, rs1);
+ float32 frs2 = check_nanbox_s(env, rs2);
+ return float32_lt_quiet(frs1, frs2, &env->fp_status);
+}
+
target_ulong helper_feq_s(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
{
float32 frs1 = check_nanbox_s(env, rs1);
return fclass_s(frs1);
}
+uint64_t helper_fround_s(CPURISCVState *env, uint64_t rs1)
+{
+ float_status *fs = &env->fp_status;
+ uint16_t nx_old = get_float_exception_flags(fs) & float_flag_inexact;
+ float32 frs1 = check_nanbox_s(env, rs1);
+
+ frs1 = float32_round_to_int(frs1, fs);
+
+ /* Restore the original NX flag. */
+ uint16_t flags = get_float_exception_flags(fs);
+ flags &= ~float_flag_inexact;
+ flags |= nx_old;
+ set_float_exception_flags(flags, fs);
+
+ return nanbox_s(env, frs1);
+}
+
+uint64_t helper_froundnx_s(CPURISCVState *env, uint64_t rs1)
+{
+ float32 frs1 = check_nanbox_s(env, rs1);
+ frs1 = float32_round_to_int(frs1, &env->fp_status);
+ return nanbox_s(env, frs1);
+}
+
uint64_t helper_fadd_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2)
{
return float64_add(frs1, frs2, &env->fp_status);
float64_minimum_number(frs1, frs2, &env->fp_status);
}
+uint64_t helper_fminm_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2)
+{
+ return float64_min(frs1, frs2, &env->fp_status);
+}
+
uint64_t helper_fmax_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2)
{
return env->priv_ver < PRIV_VERSION_1_11_0 ?
float64_maximum_number(frs1, frs2, &env->fp_status);
}
+uint64_t helper_fmaxm_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2)
+{
+ return float64_max(frs1, frs2, &env->fp_status);
+}
+
uint64_t helper_fcvt_s_d(CPURISCVState *env, uint64_t rs1)
{
return nanbox_s(env, float64_to_float32(rs1, &env->fp_status));
return float64_le(frs1, frs2, &env->fp_status);
}
+target_ulong helper_fleq_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2)
+{
+ return float64_le_quiet(frs1, frs2, &env->fp_status);
+}
+
target_ulong helper_flt_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2)
{
return float64_lt(frs1, frs2, &env->fp_status);
}
+target_ulong helper_fltq_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2)
+{
+ return float64_lt_quiet(frs1, frs2, &env->fp_status);
+}
+
target_ulong helper_feq_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2)
{
return float64_eq_quiet(frs1, frs2, &env->fp_status);
return float64_to_int32(frs1, &env->fp_status);
}
+uint64_t helper_fcvtmod_w_d(CPURISCVState *env, uint64_t value)
+{
+ return float64_to_int32_modulo(value, float_round_to_zero, &env->fp_status);
+}
+
target_ulong helper_fcvt_wu_d(CPURISCVState *env, uint64_t frs1)
{
return (int32_t)float64_to_uint32(frs1, &env->fp_status);
return fclass_d(frs1);
}
+uint64_t helper_fround_d(CPURISCVState *env, uint64_t frs1)
+{
+ float_status *fs = &env->fp_status;
+ uint16_t nx_old = get_float_exception_flags(fs) & float_flag_inexact;
+
+ frs1 = float64_round_to_int(frs1, fs);
+
+ /* Restore the original NX flag. */
+ uint16_t flags = get_float_exception_flags(fs);
+ flags &= ~float_flag_inexact;
+ flags |= nx_old;
+ set_float_exception_flags(flags, fs);
+
+ return frs1;
+}
+
+uint64_t helper_froundnx_d(CPURISCVState *env, uint64_t frs1)
+{
+ return float64_round_to_int(frs1, &env->fp_status);
+}
+
uint64_t helper_fadd_h(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
{
float16 frs1 = check_nanbox_h(env, rs1);
float16_minimum_number(frs1, frs2, &env->fp_status));
}
+uint64_t helper_fminm_h(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
+{
+ float16 frs1 = check_nanbox_h(env, rs1);
+ float16 frs2 = check_nanbox_h(env, rs2);
+ float16 ret = float16_min(frs1, frs2, &env->fp_status);
+ return nanbox_h(env, ret);
+}
+
uint64_t helper_fmax_h(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
{
float16 frs1 = check_nanbox_h(env, rs1);
float16_maximum_number(frs1, frs2, &env->fp_status));
}
+uint64_t helper_fmaxm_h(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
+{
+ float16 frs1 = check_nanbox_h(env, rs1);
+ float16 frs2 = check_nanbox_h(env, rs2);
+ float16 ret = float16_max(frs1, frs2, &env->fp_status);
+ return nanbox_h(env, ret);
+}
+
uint64_t helper_fsqrt_h(CPURISCVState *env, uint64_t rs1)
{
float16 frs1 = check_nanbox_h(env, rs1);
return float16_le(frs1, frs2, &env->fp_status);
}
+target_ulong helper_fleq_h(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
+{
+ float16 frs1 = check_nanbox_h(env, rs1);
+ float16 frs2 = check_nanbox_h(env, rs2);
+ return float16_le_quiet(frs1, frs2, &env->fp_status);
+}
+
target_ulong helper_flt_h(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
{
float16 frs1 = check_nanbox_h(env, rs1);
return float16_lt(frs1, frs2, &env->fp_status);
}
+target_ulong helper_fltq_h(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
+{
+ float16 frs1 = check_nanbox_h(env, rs1);
+ float16 frs2 = check_nanbox_h(env, rs2);
+ return float16_lt_quiet(frs1, frs2, &env->fp_status);
+}
+
target_ulong helper_feq_h(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
{
float16 frs1 = check_nanbox_h(env, rs1);
return fclass_h(frs1);
}
+uint64_t helper_fround_h(CPURISCVState *env, uint64_t rs1)
+{
+ float_status *fs = &env->fp_status;
+ uint16_t nx_old = get_float_exception_flags(fs) & float_flag_inexact;
+ float16 frs1 = check_nanbox_h(env, rs1);
+
+ frs1 = float16_round_to_int(frs1, fs);
+
+ /* Restore the original NX flag. */
+ uint16_t flags = get_float_exception_flags(fs);
+ flags &= ~float_flag_inexact;
+ flags |= nx_old;
+ set_float_exception_flags(flags, fs);
+
+ return nanbox_h(env, frs1);
+}
+
+uint64_t helper_froundnx_h(CPURISCVState *env, uint64_t rs1)
+{
+ float16 frs1 = check_nanbox_s(env, rs1);
+ frs1 = float16_round_to_int(frs1, &env->fp_status);
+ return nanbox_h(env, frs1);
+}
+
target_ulong helper_fcvt_w_h(CPURISCVState *env, uint64_t rs1)
{
float16 frs1 = check_nanbox_h(env, rs1);
float16 frs1 = check_nanbox_h(env, rs1);
return float16_to_float64(frs1, true, &env->fp_status);
}
+
+uint64_t helper_fcvt_bf16_s(CPURISCVState *env, uint64_t rs1)
+{
+ float32 frs1 = check_nanbox_s(env, rs1);
+ return nanbox_h(env, float32_to_bfloat16(frs1, &env->fp_status));
+}
+
+uint64_t helper_fcvt_s_bf16(CPURISCVState *env, uint64_t rs1)
+{
+ float16 frs1 = check_nanbox_h(env, rs1);
+ return nanbox_s(env, bfloat16_to_float32(frs1, &env->fp_status));
+}
DEF_HELPER_FLAGS_3(fmul_s, TCG_CALL_NO_RWG, i64, env, i64, i64)
DEF_HELPER_FLAGS_3(fdiv_s, TCG_CALL_NO_RWG, i64, env, i64, i64)
DEF_HELPER_FLAGS_3(fmin_s, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(fminm_s, TCG_CALL_NO_RWG, i64, env, i64, i64)
DEF_HELPER_FLAGS_3(fmax_s, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(fmaxm_s, TCG_CALL_NO_RWG, i64, env, i64, i64)
DEF_HELPER_FLAGS_2(fsqrt_s, TCG_CALL_NO_RWG, i64, env, i64)
DEF_HELPER_FLAGS_3(fle_s, TCG_CALL_NO_RWG, tl, env, i64, i64)
+DEF_HELPER_FLAGS_3(fleq_s, TCG_CALL_NO_RWG, tl, env, i64, i64)
DEF_HELPER_FLAGS_3(flt_s, TCG_CALL_NO_RWG, tl, env, i64, i64)
+DEF_HELPER_FLAGS_3(fltq_s, TCG_CALL_NO_RWG, tl, env, i64, i64)
DEF_HELPER_FLAGS_3(feq_s, TCG_CALL_NO_RWG, tl, env, i64, i64)
DEF_HELPER_FLAGS_2(fcvt_w_s, TCG_CALL_NO_RWG, tl, env, i64)
DEF_HELPER_FLAGS_2(fcvt_wu_s, TCG_CALL_NO_RWG, tl, env, i64)
DEF_HELPER_FLAGS_2(fcvt_s_l, TCG_CALL_NO_RWG, i64, env, tl)
DEF_HELPER_FLAGS_2(fcvt_s_lu, TCG_CALL_NO_RWG, i64, env, tl)
DEF_HELPER_FLAGS_2(fclass_s, TCG_CALL_NO_RWG_SE, tl, env, i64)
+DEF_HELPER_FLAGS_2(fround_s, TCG_CALL_NO_RWG_SE, i64, env, i64)
+DEF_HELPER_FLAGS_2(froundnx_s, TCG_CALL_NO_RWG_SE, i64, env, i64)
/* Floating Point - Double Precision */
DEF_HELPER_FLAGS_3(fadd_d, TCG_CALL_NO_RWG, i64, env, i64, i64)
DEF_HELPER_FLAGS_3(fmul_d, TCG_CALL_NO_RWG, i64, env, i64, i64)
DEF_HELPER_FLAGS_3(fdiv_d, TCG_CALL_NO_RWG, i64, env, i64, i64)
DEF_HELPER_FLAGS_3(fmin_d, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(fminm_d, TCG_CALL_NO_RWG, i64, env, i64, i64)
DEF_HELPER_FLAGS_3(fmax_d, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(fmaxm_d, TCG_CALL_NO_RWG, i64, env, i64, i64)
DEF_HELPER_FLAGS_2(fcvt_s_d, TCG_CALL_NO_RWG, i64, env, i64)
DEF_HELPER_FLAGS_2(fcvt_d_s, TCG_CALL_NO_RWG, i64, env, i64)
DEF_HELPER_FLAGS_2(fsqrt_d, TCG_CALL_NO_RWG, i64, env, i64)
DEF_HELPER_FLAGS_3(fle_d, TCG_CALL_NO_RWG, tl, env, i64, i64)
+DEF_HELPER_FLAGS_3(fleq_d, TCG_CALL_NO_RWG, tl, env, i64, i64)
DEF_HELPER_FLAGS_3(flt_d, TCG_CALL_NO_RWG, tl, env, i64, i64)
+DEF_HELPER_FLAGS_3(fltq_d, TCG_CALL_NO_RWG, tl, env, i64, i64)
DEF_HELPER_FLAGS_3(feq_d, TCG_CALL_NO_RWG, tl, env, i64, i64)
DEF_HELPER_FLAGS_2(fcvt_w_d, TCG_CALL_NO_RWG, tl, env, i64)
+DEF_HELPER_FLAGS_2(fcvtmod_w_d, TCG_CALL_NO_RWG, i64, env, i64)
DEF_HELPER_FLAGS_2(fcvt_wu_d, TCG_CALL_NO_RWG, tl, env, i64)
DEF_HELPER_FLAGS_2(fcvt_l_d, TCG_CALL_NO_RWG, tl, env, i64)
DEF_HELPER_FLAGS_2(fcvt_lu_d, TCG_CALL_NO_RWG, tl, env, i64)
DEF_HELPER_FLAGS_2(fcvt_d_l, TCG_CALL_NO_RWG, i64, env, tl)
DEF_HELPER_FLAGS_2(fcvt_d_lu, TCG_CALL_NO_RWG, i64, env, tl)
DEF_HELPER_FLAGS_1(fclass_d, TCG_CALL_NO_RWG_SE, tl, i64)
+DEF_HELPER_FLAGS_2(fround_d, TCG_CALL_NO_RWG_SE, i64, env, i64)
+DEF_HELPER_FLAGS_2(froundnx_d, TCG_CALL_NO_RWG_SE, i64, env, i64)
/* Bitmanip */
DEF_HELPER_FLAGS_2(clmul, TCG_CALL_NO_RWG_SE, tl, tl, tl)
DEF_HELPER_FLAGS_3(fmul_h, TCG_CALL_NO_RWG, i64, env, i64, i64)
DEF_HELPER_FLAGS_3(fdiv_h, TCG_CALL_NO_RWG, i64, env, i64, i64)
DEF_HELPER_FLAGS_3(fmin_h, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(fminm_h, TCG_CALL_NO_RWG, i64, env, i64, i64)
DEF_HELPER_FLAGS_3(fmax_h, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(fmaxm_h, TCG_CALL_NO_RWG, i64, env, i64, i64)
DEF_HELPER_FLAGS_2(fsqrt_h, TCG_CALL_NO_RWG, i64, env, i64)
DEF_HELPER_FLAGS_3(fle_h, TCG_CALL_NO_RWG, tl, env, i64, i64)
+DEF_HELPER_FLAGS_3(fleq_h, TCG_CALL_NO_RWG, tl, env, i64, i64)
DEF_HELPER_FLAGS_3(flt_h, TCG_CALL_NO_RWG, tl, env, i64, i64)
+DEF_HELPER_FLAGS_3(fltq_h, TCG_CALL_NO_RWG, tl, env, i64, i64)
DEF_HELPER_FLAGS_3(feq_h, TCG_CALL_NO_RWG, tl, env, i64, i64)
DEF_HELPER_FLAGS_2(fcvt_s_h, TCG_CALL_NO_RWG, i64, env, i64)
DEF_HELPER_FLAGS_2(fcvt_h_s, TCG_CALL_NO_RWG, i64, env, i64)
DEF_HELPER_FLAGS_2(fcvt_h_l, TCG_CALL_NO_RWG, i64, env, tl)
DEF_HELPER_FLAGS_2(fcvt_h_lu, TCG_CALL_NO_RWG, i64, env, tl)
DEF_HELPER_FLAGS_2(fclass_h, TCG_CALL_NO_RWG_SE, tl, env, i64)
+DEF_HELPER_FLAGS_2(fround_h, TCG_CALL_NO_RWG_SE, i64, env, i64)
+DEF_HELPER_FLAGS_2(froundnx_h, TCG_CALL_NO_RWG_SE, i64, env, i64)
/* Cache-block operations */
DEF_HELPER_2(cbo_clean_flush, void, env, tl)
/* Zce helper */
DEF_HELPER_FLAGS_2(cm_jalt, TCG_CALL_NO_WG, tl, env, i32)
+
+/* BF16 functions */
+DEF_HELPER_FLAGS_2(fcvt_bf16_s, TCG_CALL_NO_RWG, i64, env, i64)
+DEF_HELPER_FLAGS_2(fcvt_s_bf16, TCG_CALL_NO_RWG, i64, env, i64)
+
+DEF_HELPER_5(vfncvtbf16_f_f_w, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_5(vfwcvtbf16_f_f_v, void, ptr, ptr, ptr, env, i32)
+
+DEF_HELPER_6(vfwmaccbf16_vv, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vfwmaccbf16_vf, void, ptr, ptr, i64, ptr, env, i32)
bset 0010100 .......... 001 ..... 0110011 @r
bseti 00101. ........... 001 ..... 0010011 @sh
+# *** Zfa Standard Extension ***
+fli_s 1111000 00001 ..... 000 ..... 1010011 @r2
+fli_d 1111001 00001 ..... 000 ..... 1010011 @r2
+fli_h 1111010 00001 ..... 000 ..... 1010011 @r2
+fminm_s 0010100 ..... ..... 010 ..... 1010011 @r
+fmaxm_s 0010100 ..... ..... 011 ..... 1010011 @r
+fminm_d 0010101 ..... ..... 010 ..... 1010011 @r
+fmaxm_d 0010101 ..... ..... 011 ..... 1010011 @r
+fminm_h 0010110 ..... ..... 010 ..... 1010011 @r
+fmaxm_h 0010110 ..... ..... 011 ..... 1010011 @r
+fround_s 0100000 00100 ..... ... ..... 1010011 @r2_rm
+froundnx_s 0100000 00101 ..... ... ..... 1010011 @r2_rm
+fround_d 0100001 00100 ..... ... ..... 1010011 @r2_rm
+froundnx_d 0100001 00101 ..... ... ..... 1010011 @r2_rm
+fround_h 0100010 00100 ..... ... ..... 1010011 @r2_rm
+froundnx_h 0100010 00101 ..... ... ..... 1010011 @r2_rm
+fcvtmod_w_d 1100001 01000 ..... 001 ..... 1010011 @r2
+fmvh_x_d 1110001 00001 ..... 000 ..... 1010011 @r2
+fmvp_d_x 1011001 ..... ..... 000 ..... 1010011 @r
+fleq_s 1010000 ..... ..... 100 ..... 1010011 @r
+fltq_s 1010000 ..... ..... 101 ..... 1010011 @r
+fleq_d 1010001 ..... ..... 100 ..... 1010011 @r
+fltq_d 1010001 ..... ..... 101 ..... 1010011 @r
+fleq_h 1010010 ..... ..... 100 ..... 1010011 @r
+fltq_h 1010010 ..... ..... 101 ..... 1010011 @r
+
# *** RV32 Zfh Extension ***
flh ............ ..... 001 ..... 0000111 @i
fsh ....... ..... ..... 001 ..... 0100111 @s
# *** RV32 Zicond Standard Extension ***
czero_eqz 0000111 ..... ..... 101 ..... 0110011 @r
czero_nez 0000111 ..... ..... 111 ..... 0110011 @r
+
+# *** Zfbfmin Standard Extension ***
+fcvt_bf16_s 0100010 01000 ..... ... ..... 1010011 @r2_rm
+fcvt_s_bf16 0100000 00110 ..... ... ..... 1010011 @r2_rm
+
+# *** Zvfbfmin Standard Extension ***
+vfncvtbf16_f_f_w 010010 . ..... 11101 001 ..... 1010111 @r2_vm
+vfwcvtbf16_f_f_v 010010 . ..... 01101 001 ..... 1010111 @r2_vm
+
+# *** Zvfbfwma Standard Extension ***
+vfwmaccbf16_vv 111011 . ..... ..... 001 ..... 1010111 @r_vm
+vfwmaccbf16_vf 111011 . ..... ..... 101 ..... 1010111 @r_vm
--- /dev/null
+/*
+ * RISC-V translation routines for the BF16 Standard Extensions.
+ *
+ * Copyright (c) 2020-2023 PLCT Lab
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define REQUIRE_ZFBFMIN(ctx) do { \
+ if (!ctx->cfg_ptr->ext_zfbfmin) { \
+ return false; \
+ } \
+} while (0)
+
+#define REQUIRE_ZVFBFMIN(ctx) do { \
+ if (!ctx->cfg_ptr->ext_zvfbfmin) { \
+ return false; \
+ } \
+} while (0)
+
+#define REQUIRE_ZVFBFWMA(ctx) do { \
+ if (!ctx->cfg_ptr->ext_zvfbfwma) { \
+ return false; \
+ } \
+} while (0)
+
+static bool trans_fcvt_bf16_s(DisasContext *ctx, arg_fcvt_bf16_s *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_ZFBFMIN(ctx);
+
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
+
+ gen_set_rm(ctx, a->rm);
+ gen_helper_fcvt_bf16_s(dest, cpu_env, src1);
+ gen_set_fpr_hs(ctx, a->rd, dest);
+ mark_fs_dirty(ctx);
+ return true;
+}
+
+static bool trans_fcvt_s_bf16(DisasContext *ctx, arg_fcvt_s_bf16 *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_ZFBFMIN(ctx);
+
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
+
+ gen_set_rm(ctx, a->rm);
+ gen_helper_fcvt_s_bf16(dest, cpu_env, src1);
+ gen_set_fpr_hs(ctx, a->rd, dest);
+ mark_fs_dirty(ctx);
+ return true;
+}
+
+static bool trans_vfncvtbf16_f_f_w(DisasContext *ctx, arg_vfncvtbf16_f_f_w *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_ZVFBFMIN(ctx);
+
+ if (opfv_narrow_check(ctx, a) && (ctx->sew == MO_16)) {
+ uint32_t data = 0;
+ TCGLabel *over = gen_new_label();
+
+ gen_set_rm_chkfrm(ctx, RISCV_FRM_DYN);
+ tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
+ tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
+
+ data = FIELD_DP32(data, VDATA, VM, a->vm);
+ data = FIELD_DP32(data, VDATA, LMUL, ctx->lmul);
+ data = FIELD_DP32(data, VDATA, VTA, ctx->vta);
+ data = FIELD_DP32(data, VDATA, VMA, ctx->vma);
+ tcg_gen_gvec_3_ptr(vreg_ofs(ctx, a->rd), vreg_ofs(ctx, 0),
+ vreg_ofs(ctx, a->rs2), cpu_env,
+ ctx->cfg_ptr->vlen / 8,
+ ctx->cfg_ptr->vlen / 8, data,
+ gen_helper_vfncvtbf16_f_f_w);
+ mark_vs_dirty(ctx);
+ gen_set_label(over);
+ return true;
+ }
+ return false;
+}
+
+static bool trans_vfwcvtbf16_f_f_v(DisasContext *ctx, arg_vfwcvtbf16_f_f_v *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_ZVFBFMIN(ctx);
+
+ if (opfv_widen_check(ctx, a) && (ctx->sew == MO_16)) {
+ uint32_t data = 0;
+ TCGLabel *over = gen_new_label();
+
+ gen_set_rm_chkfrm(ctx, RISCV_FRM_DYN);
+ tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
+ tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
+
+ data = FIELD_DP32(data, VDATA, VM, a->vm);
+ data = FIELD_DP32(data, VDATA, LMUL, ctx->lmul);
+ data = FIELD_DP32(data, VDATA, VTA, ctx->vta);
+ data = FIELD_DP32(data, VDATA, VMA, ctx->vma);
+ tcg_gen_gvec_3_ptr(vreg_ofs(ctx, a->rd), vreg_ofs(ctx, 0),
+ vreg_ofs(ctx, a->rs2), cpu_env,
+ ctx->cfg_ptr->vlen / 8,
+ ctx->cfg_ptr->vlen / 8, data,
+ gen_helper_vfwcvtbf16_f_f_v);
+ mark_vs_dirty(ctx);
+ gen_set_label(over);
+ return true;
+ }
+ return false;
+}
+
+static bool trans_vfwmaccbf16_vv(DisasContext *ctx, arg_vfwmaccbf16_vv *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_ZVFBFWMA(ctx);
+
+ if (require_rvv(ctx) && vext_check_isa_ill(ctx) && (ctx->sew == MO_16) &&
+ vext_check_dss(ctx, a->rd, a->rs1, a->rs2, a->vm)) {
+ uint32_t data = 0;
+ TCGLabel *over = gen_new_label();
+
+ gen_set_rm_chkfrm(ctx, RISCV_FRM_DYN);
+ tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
+ tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
+
+ data = FIELD_DP32(data, VDATA, VM, a->vm);
+ data = FIELD_DP32(data, VDATA, LMUL, ctx->lmul);
+ data = FIELD_DP32(data, VDATA, VTA, ctx->vta);
+ data = FIELD_DP32(data, VDATA, VMA, ctx->vma);
+ tcg_gen_gvec_4_ptr(vreg_ofs(ctx, a->rd), vreg_ofs(ctx, 0),
+ vreg_ofs(ctx, a->rs1),
+ vreg_ofs(ctx, a->rs2), cpu_env,
+ ctx->cfg_ptr->vlen / 8,
+ ctx->cfg_ptr->vlen / 8, data,
+ gen_helper_vfwmaccbf16_vv);
+ mark_vs_dirty(ctx);
+ gen_set_label(over);
+ return true;
+ }
+ return false;
+}
+
+static bool trans_vfwmaccbf16_vf(DisasContext *ctx, arg_vfwmaccbf16_vf *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_ZVFBFWMA(ctx);
+
+ if (require_rvv(ctx) && (ctx->sew == MO_16) && vext_check_isa_ill(ctx) &&
+ vext_check_ds(ctx, a->rd, a->rs2, a->vm)) {
+ uint32_t data = 0;
+
+ gen_set_rm(ctx, RISCV_FRM_DYN);
+ data = FIELD_DP32(data, VDATA, VM, a->vm);
+ data = FIELD_DP32(data, VDATA, LMUL, ctx->lmul);
+ data = FIELD_DP32(data, VDATA, VTA, ctx->vta);
+ data = FIELD_DP32(data, VDATA, VMA, ctx->vma);
+ return opfvf_trans(a->rd, a->rs1, a->rs2, data,
+ gen_helper_vfwmaccbf16_vf, ctx);
+ }
+
+ return false;
+}
--- /dev/null
+/*
+ * RISC-V translation routines for the Zfa Standard Extension.
+ *
+ * Copyright (c) 2023 Christoph Müllner, christoph.muellner@vrull.eu
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define REQUIRE_ZFA(ctx) do { \
+ if (!ctx->cfg_ptr->ext_zfa) { \
+ return false; \
+ } \
+} while (0)
+
+#define REQUIRE_ZFH(ctx) do { \
+ if (!ctx->cfg_ptr->ext_zfh) { \
+ return false; \
+ } \
+} while (0)
+
+static bool trans_fli_s(DisasContext *ctx, arg_fli_s *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_ZFA(ctx);
+ REQUIRE_EXT(ctx, RVF);
+
+ /* Values below are NaN-boxed to avoid a gen_nanbox_s(). */
+ static const uint64_t fli_s_table[] = {
+ 0xffffffffbf800000, /* -1.0 */
+ 0xffffffff00800000, /* minimum positive normal */
+ 0xffffffff37800000, /* 1.0 * 2^-16 */
+ 0xffffffff38000000, /* 1.0 * 2^-15 */
+ 0xffffffff3b800000, /* 1.0 * 2^-8 */
+ 0xffffffff3c000000, /* 1.0 * 2^-7 */
+ 0xffffffff3d800000, /* 1.0 * 2^-4 */
+ 0xffffffff3e000000, /* 1.0 * 2^-3 */
+ 0xffffffff3e800000, /* 0.25 */
+ 0xffffffff3ea00000, /* 0.3125 */
+ 0xffffffff3ec00000, /* 0.375 */
+ 0xffffffff3ee00000, /* 0.4375 */
+ 0xffffffff3f000000, /* 0.5 */
+ 0xffffffff3f200000, /* 0.625 */
+ 0xffffffff3f400000, /* 0.75 */
+ 0xffffffff3f600000, /* 0.875 */
+ 0xffffffff3f800000, /* 1.0 */
+ 0xffffffff3fa00000, /* 1.25 */
+ 0xffffffff3fc00000, /* 1.5 */
+ 0xffffffff3fe00000, /* 1.75 */
+ 0xffffffff40000000, /* 2.0 */
+ 0xffffffff40200000, /* 2.5 */
+ 0xffffffff40400000, /* 3 */
+ 0xffffffff40800000, /* 4 */
+ 0xffffffff41000000, /* 8 */
+ 0xffffffff41800000, /* 16 */
+ 0xffffffff43000000, /* 2^7 */
+ 0xffffffff43800000, /* 2^8 */
+ 0xffffffff47000000, /* 2^15 */
+ 0xffffffff47800000, /* 2^16 */
+ 0xffffffff7f800000, /* +inf */
+ 0xffffffff7fc00000, /* Canonical NaN */
+ };
+
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
+ tcg_gen_movi_i64(dest, fli_s_table[a->rs1]);
+ gen_set_fpr_hs(ctx, a->rd, dest);
+
+ mark_fs_dirty(ctx);
+ return true;
+}
+
+static bool trans_fli_d(DisasContext *ctx, arg_fli_d *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_ZFA(ctx);
+ REQUIRE_EXT(ctx, RVD);
+
+ static const uint64_t fli_d_table[] = {
+ 0xbff0000000000000, /* -1.0 */
+ 0x0010000000000000, /* minimum positive normal */
+ 0x3ef0000000000000, /* 1.0 * 2^-16 */
+ 0x3f00000000000000, /* 1.0 * 2^-15 */
+ 0x3f70000000000000, /* 1.0 * 2^-8 */
+ 0x3f80000000000000, /* 1.0 * 2^-7 */
+ 0x3fb0000000000000, /* 1.0 * 2^-4 */
+ 0x3fc0000000000000, /* 1.0 * 2^-3 */
+ 0x3fd0000000000000, /* 0.25 */
+ 0x3fd4000000000000, /* 0.3125 */
+ 0x3fd8000000000000, /* 0.375 */
+ 0x3fdc000000000000, /* 0.4375 */
+ 0x3fe0000000000000, /* 0.5 */
+ 0x3fe4000000000000, /* 0.625 */
+ 0x3fe8000000000000, /* 0.75 */
+ 0x3fec000000000000, /* 0.875 */
+ 0x3ff0000000000000, /* 1.0 */
+ 0x3ff4000000000000, /* 1.25 */
+ 0x3ff8000000000000, /* 1.5 */
+ 0x3ffc000000000000, /* 1.75 */
+ 0x4000000000000000, /* 2.0 */
+ 0x4004000000000000, /* 2.5 */
+ 0x4008000000000000, /* 3 */
+ 0x4010000000000000, /* 4 */
+ 0x4020000000000000, /* 8 */
+ 0x4030000000000000, /* 16 */
+ 0x4060000000000000, /* 2^7 */
+ 0x4070000000000000, /* 2^8 */
+ 0x40e0000000000000, /* 2^15 */
+ 0x40f0000000000000, /* 2^16 */
+ 0x7ff0000000000000, /* +inf */
+ 0x7ff8000000000000, /* Canonical NaN */
+ };
+
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
+ tcg_gen_movi_i64(dest, fli_d_table[a->rs1]);
+ gen_set_fpr_d(ctx, a->rd, dest);
+
+ mark_fs_dirty(ctx);
+ return true;
+}
+
+static bool trans_fli_h(DisasContext *ctx, arg_fli_h *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_ZFA(ctx);
+ REQUIRE_ZFH(ctx);
+
+ /* Values below are NaN-boxed to avoid a gen_nanbox_h(). */
+ static const uint64_t fli_h_table[] = {
+ 0xffffffffffffbc00, /* -1.0 */
+ 0xffffffffffff0400, /* minimum positive normal */
+ 0xffffffffffff0100, /* 1.0 * 2^-16 */
+ 0xffffffffffff0200, /* 1.0 * 2^-15 */
+ 0xffffffffffff1c00, /* 1.0 * 2^-8 */
+ 0xffffffffffff2000, /* 1.0 * 2^-7 */
+ 0xffffffffffff2c00, /* 1.0 * 2^-4 */
+ 0xffffffffffff3000, /* 1.0 * 2^-3 */
+ 0xffffffffffff3400, /* 0.25 */
+ 0xffffffffffff3500, /* 0.3125 */
+ 0xffffffffffff3600, /* 0.375 */
+ 0xffffffffffff3700, /* 0.4375 */
+ 0xffffffffffff3800, /* 0.5 */
+ 0xffffffffffff3900, /* 0.625 */
+ 0xffffffffffff3a00, /* 0.75 */
+ 0xffffffffffff3b00, /* 0.875 */
+ 0xffffffffffff3c00, /* 1.0 */
+ 0xffffffffffff3d00, /* 1.25 */
+ 0xffffffffffff3e00, /* 1.5 */
+ 0xffffffffffff3f00, /* 1.75 */
+ 0xffffffffffff4000, /* 2.0 */
+ 0xffffffffffff4100, /* 2.5 */
+ 0xffffffffffff4200, /* 3 */
+ 0xffffffffffff4400, /* 4 */
+ 0xffffffffffff4800, /* 8 */
+ 0xffffffffffff4c00, /* 16 */
+ 0xffffffffffff5800, /* 2^7 */
+ 0xffffffffffff5c00, /* 2^8 */
+ 0xffffffffffff7800, /* 2^15 */
+ 0xffffffffffff7c00, /* 2^16 */
+ 0xffffffffffff7c00, /* +inf */
+ 0xffffffffffff7e00, /* Canonical NaN */
+ };
+
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
+ tcg_gen_movi_i64(dest, fli_h_table[a->rs1]);
+ gen_set_fpr_hs(ctx, a->rd, dest);
+
+ mark_fs_dirty(ctx);
+ return true;
+}
+
+static bool trans_fminm_s(DisasContext *ctx, arg_fminm_s *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_ZFA(ctx);
+ REQUIRE_EXT(ctx, RVF);
+
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
+ TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
+
+ gen_helper_fminm_s(dest, cpu_env, src1, src2);
+ gen_set_fpr_hs(ctx, a->rd, dest);
+
+ mark_fs_dirty(ctx);
+ return true;
+}
+
+static bool trans_fmaxm_s(DisasContext *ctx, arg_fmaxm_s *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_ZFA(ctx);
+ REQUIRE_EXT(ctx, RVF);
+
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
+ TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
+
+ gen_helper_fmaxm_s(dest, cpu_env, src1, src2);
+ gen_set_fpr_hs(ctx, a->rd, dest);
+
+ mark_fs_dirty(ctx);
+ return true;
+}
+
+static bool trans_fminm_d(DisasContext *ctx, arg_fminm_d *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_ZFA(ctx);
+ REQUIRE_EXT(ctx, RVD);
+
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
+ TCGv_i64 src1 = get_fpr_d(ctx, a->rs1);
+ TCGv_i64 src2 = get_fpr_d(ctx, a->rs2);
+
+ gen_helper_fminm_d(dest, cpu_env, src1, src2);
+ gen_set_fpr_d(ctx, a->rd, dest);
+
+ mark_fs_dirty(ctx);
+ return true;
+}
+
+static bool trans_fmaxm_d(DisasContext *ctx, arg_fmaxm_d *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_ZFA(ctx);
+ REQUIRE_EXT(ctx, RVD);
+
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
+ TCGv_i64 src1 = get_fpr_d(ctx, a->rs1);
+ TCGv_i64 src2 = get_fpr_d(ctx, a->rs2);
+
+ gen_helper_fmaxm_d(dest, cpu_env, src1, src2);
+ gen_set_fpr_d(ctx, a->rd, dest);
+
+ mark_fs_dirty(ctx);
+ return true;
+}
+
+static bool trans_fminm_h(DisasContext *ctx, arg_fminm_h *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_ZFA(ctx);
+ REQUIRE_ZFH(ctx);
+
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
+ TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
+
+ gen_helper_fminm_h(dest, cpu_env, src1, src2);
+ gen_set_fpr_hs(ctx, a->rd, dest);
+
+ mark_fs_dirty(ctx);
+ return true;
+}
+
+static bool trans_fmaxm_h(DisasContext *ctx, arg_fmaxm_h *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_ZFA(ctx);
+ REQUIRE_ZFH(ctx);
+
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
+ TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
+
+ gen_helper_fmaxm_h(dest, cpu_env, src1, src2);
+ gen_set_fpr_hs(ctx, a->rd, dest);
+
+ mark_fs_dirty(ctx);
+ return true;
+}
+
+static bool trans_fround_s(DisasContext *ctx, arg_fround_s *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_ZFA(ctx);
+ REQUIRE_EXT(ctx, RVF);
+
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
+
+ gen_set_rm(ctx, a->rm);
+ gen_helper_fround_s(dest, cpu_env, src1);
+ gen_set_fpr_hs(ctx, a->rd, dest);
+
+ mark_fs_dirty(ctx);
+ return true;
+}
+
+static bool trans_froundnx_s(DisasContext *ctx, arg_froundnx_s *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_ZFA(ctx);
+ REQUIRE_EXT(ctx, RVF);
+
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
+
+ gen_set_rm(ctx, a->rm);
+ gen_helper_froundnx_s(dest, cpu_env, src1);
+ gen_set_fpr_hs(ctx, a->rd, dest);
+
+ mark_fs_dirty(ctx);
+ return true;
+}
+
+static bool trans_fround_d(DisasContext *ctx, arg_fround_d *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_ZFA(ctx);
+ REQUIRE_EXT(ctx, RVD);
+
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
+ TCGv_i64 src1 = get_fpr_d(ctx, a->rs1);
+
+ gen_set_rm(ctx, a->rm);
+ gen_helper_fround_d(dest, cpu_env, src1);
+ gen_set_fpr_hs(ctx, a->rd, dest);
+
+ mark_fs_dirty(ctx);
+ return true;
+}
+
+static bool trans_froundnx_d(DisasContext *ctx, arg_froundnx_d *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_ZFA(ctx);
+ REQUIRE_EXT(ctx, RVD);
+
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
+ TCGv_i64 src1 = get_fpr_d(ctx, a->rs1);
+
+ gen_set_rm(ctx, a->rm);
+ gen_helper_froundnx_d(dest, cpu_env, src1);
+ gen_set_fpr_hs(ctx, a->rd, dest);
+
+ mark_fs_dirty(ctx);
+ return true;
+}
+
+static bool trans_fround_h(DisasContext *ctx, arg_fround_h *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_ZFA(ctx);
+ REQUIRE_ZFH(ctx);
+
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
+
+ gen_set_rm(ctx, a->rm);
+ gen_helper_fround_h(dest, cpu_env, src1);
+ gen_set_fpr_hs(ctx, a->rd, dest);
+
+ mark_fs_dirty(ctx);
+ return true;
+}
+
+static bool trans_froundnx_h(DisasContext *ctx, arg_froundnx_h *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_ZFA(ctx);
+ REQUIRE_ZFH(ctx);
+
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
+
+ gen_set_rm(ctx, a->rm);
+ gen_helper_froundnx_h(dest, cpu_env, src1);
+ gen_set_fpr_hs(ctx, a->rd, dest);
+
+ mark_fs_dirty(ctx);
+ return true;
+}
+
+bool trans_fcvtmod_w_d(DisasContext *ctx, arg_fcvtmod_w_d *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_ZFA(ctx);
+ REQUIRE_EXT(ctx, RVD);
+
+ TCGv dst = dest_gpr(ctx, a->rd);
+ TCGv_i64 src1 = get_fpr_d(ctx, a->rs1);
+ TCGv_i64 t1 = tcg_temp_new_i64();
+
+ /* Rounding mode is RTZ. */
+ gen_set_rm(ctx, RISCV_FRM_RTZ);
+ gen_helper_fcvtmod_w_d(t1, cpu_env, src1);
+ tcg_gen_trunc_i64_tl(dst, t1);
+ gen_set_gpr(ctx, a->rd, dst);
+
+ return true;
+}
+
+bool trans_fmvh_x_d(DisasContext *ctx, arg_fmvh_x_d *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_ZFA(ctx);
+ REQUIRE_EXT(ctx, RVD);
+ REQUIRE_32BIT(ctx);
+
+ TCGv dst = dest_gpr(ctx, a->rd);
+ TCGv_i64 t1 = tcg_temp_new_i64();
+ tcg_gen_sari_i64(t1, cpu_fpr[a->rs1], 32);
+ tcg_gen_trunc_i64_tl(dst, t1);
+ gen_set_gpr(ctx, a->rd, dst);
+ return true;
+}
+
+bool trans_fmvp_d_x(DisasContext *ctx, arg_fmvp_d_x *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_ZFA(ctx);
+ REQUIRE_EXT(ctx, RVD);
+ REQUIRE_32BIT(ctx);
+
+ TCGv src1 = get_gpr(ctx, a->rs1, EXT_NONE);
+ TCGv src2 = get_gpr(ctx, a->rs2, EXT_NONE);
+ tcg_gen_concat_tl_i64(cpu_fpr[a->rd], src1, src2);
+
+ mark_fs_dirty(ctx);
+ return true;
+}
+
+bool trans_fleq_s(DisasContext *ctx, arg_fleq_s *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_ZFA(ctx);
+ REQUIRE_EXT(ctx, RVF);
+
+ TCGv dest = dest_gpr(ctx, a->rd);
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
+ TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
+
+ gen_helper_fleq_s(dest, cpu_env, src1, src2);
+ gen_set_gpr(ctx, a->rd, dest);
+ return true;
+}
+
+bool trans_fltq_s(DisasContext *ctx, arg_fltq_s *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_ZFA(ctx);
+ REQUIRE_EXT(ctx, RVF);
+
+ TCGv dest = dest_gpr(ctx, a->rd);
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
+ TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
+
+ gen_helper_fltq_s(dest, cpu_env, src1, src2);
+ gen_set_gpr(ctx, a->rd, dest);
+ return true;
+}
+
+bool trans_fleq_d(DisasContext *ctx, arg_fleq_d *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_ZFA(ctx);
+ REQUIRE_EXT(ctx, RVD);
+
+ TCGv dest = dest_gpr(ctx, a->rd);
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
+ TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
+
+ gen_helper_fltq_s(dest, cpu_env, src1, src2);
+ gen_set_gpr(ctx, a->rd, dest);
+ return true;
+}
+
+bool trans_fltq_d(DisasContext *ctx, arg_fltq_d *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_ZFA(ctx);
+ REQUIRE_EXT(ctx, RVD);
+
+ TCGv dest = dest_gpr(ctx, a->rd);
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
+ TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
+
+ gen_helper_fltq_s(dest, cpu_env, src1, src2);
+ gen_set_gpr(ctx, a->rd, dest);
+ return true;
+}
+
+bool trans_fleq_h(DisasContext *ctx, arg_fleq_h *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_ZFA(ctx);
+ REQUIRE_ZFH(ctx);
+
+ TCGv dest = dest_gpr(ctx, a->rd);
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
+ TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
+
+ gen_helper_fleq_h(dest, cpu_env, src1, src2);
+ gen_set_gpr(ctx, a->rd, dest);
+ return true;
+}
+
+bool trans_fltq_h(DisasContext *ctx, arg_fltq_h *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_ZFA(ctx);
+ REQUIRE_ZFH(ctx);
+
+ TCGv dest = dest_gpr(ctx, a->rd);
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
+ TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
+
+ gen_helper_fltq_h(dest, cpu_env, src1, src2);
+ gen_set_gpr(ctx, a->rd, dest);
+ return true;
+}
} \
} while (0)
-#define REQUIRE_ZFHMIN(ctx) do { \
- if (!ctx->cfg_ptr->ext_zfhmin) { \
+#define REQUIRE_ZFHMIN_OR_ZFBFMIN(ctx) do { \
+ if (!ctx->cfg_ptr->ext_zfhmin && !ctx->cfg_ptr->ext_zfbfmin) { \
return false; \
} \
} while (0)
TCGv t0;
REQUIRE_FPU;
- REQUIRE_ZFHMIN(ctx);
+ REQUIRE_ZFHMIN_OR_ZFBFMIN(ctx);
decode_save_opc(ctx);
t0 = get_gpr(ctx, a->rs1, EXT_NONE);
TCGv t0;
REQUIRE_FPU;
- REQUIRE_ZFHMIN(ctx);
+ REQUIRE_ZFHMIN_OR_ZFBFMIN(ctx);
decode_save_opc(ctx);
t0 = get_gpr(ctx, a->rs1, EXT_NONE);
static bool trans_fmv_x_h(DisasContext *ctx, arg_fmv_x_h *a)
{
REQUIRE_FPU;
- REQUIRE_ZFHMIN(ctx);
+ REQUIRE_ZFHMIN_OR_ZFBFMIN(ctx);
TCGv dest = dest_gpr(ctx, a->rd);
static bool trans_fmv_h_x(DisasContext *ctx, arg_fmv_h_x *a)
{
REQUIRE_FPU;
- REQUIRE_ZFHMIN(ctx);
+ REQUIRE_ZFHMIN_OR_ZFBFMIN(ctx);
TCGv t0 = get_gpr(ctx, a->rs1, EXT_ZERO);
#include <linux/kvm.h>
#include "qemu/timer.h"
+#include "qapi/error.h"
#include "qemu/error-report.h"
#include "qemu/main-loop.h"
+#include "qapi/visitor.h"
#include "sysemu/sysemu.h"
#include "sysemu/kvm.h"
#include "sysemu/kvm_int.h"
#define KVM_RISCV_SET_TIMER(cs, env, name, reg) \
do { \
- int ret = kvm_set_one_reg(cs, RISCV_TIMER_REG(env, time), ®); \
+ int ret = kvm_set_one_reg(cs, RISCV_TIMER_REG(env, name), ®); \
if (ret) { \
abort(); \
} \
} while (0)
+typedef struct KVMCPUConfig {
+ const char *name;
+ const char *description;
+ target_ulong offset;
+ int kvm_reg_id;
+ bool user_set;
+ bool supported;
+} KVMCPUConfig;
+
+#define KVM_MISA_CFG(_bit, _reg_id) \
+ {.offset = _bit, .kvm_reg_id = _reg_id}
+
+/* KVM ISA extensions */
+static KVMCPUConfig kvm_misa_ext_cfgs[] = {
+ KVM_MISA_CFG(RVA, KVM_RISCV_ISA_EXT_A),
+ KVM_MISA_CFG(RVC, KVM_RISCV_ISA_EXT_C),
+ KVM_MISA_CFG(RVD, KVM_RISCV_ISA_EXT_D),
+ KVM_MISA_CFG(RVF, KVM_RISCV_ISA_EXT_F),
+ KVM_MISA_CFG(RVH, KVM_RISCV_ISA_EXT_H),
+ KVM_MISA_CFG(RVI, KVM_RISCV_ISA_EXT_I),
+ KVM_MISA_CFG(RVM, KVM_RISCV_ISA_EXT_M),
+};
+
+static void kvm_cpu_set_misa_ext_cfg(Object *obj, Visitor *v,
+ const char *name,
+ void *opaque, Error **errp)
+{
+ KVMCPUConfig *misa_ext_cfg = opaque;
+ target_ulong misa_bit = misa_ext_cfg->offset;
+ RISCVCPU *cpu = RISCV_CPU(obj);
+ CPURISCVState *env = &cpu->env;
+ bool value, host_bit;
+
+ if (!visit_type_bool(v, name, &value, errp)) {
+ return;
+ }
+
+ host_bit = env->misa_ext_mask & misa_bit;
+
+ if (value == host_bit) {
+ return;
+ }
+
+ if (!value) {
+ misa_ext_cfg->user_set = true;
+ return;
+ }
+
+ /*
+ * Forbid users to enable extensions that aren't
+ * available in the hart.
+ */
+ error_setg(errp, "Enabling MISA bit '%s' is not allowed: it's not "
+ "enabled in the host", misa_ext_cfg->name);
+}
+
+static void kvm_riscv_update_cpu_misa_ext(RISCVCPU *cpu, CPUState *cs)
+{
+ CPURISCVState *env = &cpu->env;
+ uint64_t id, reg;
+ int i, ret;
+
+ for (i = 0; i < ARRAY_SIZE(kvm_misa_ext_cfgs); i++) {
+ KVMCPUConfig *misa_cfg = &kvm_misa_ext_cfgs[i];
+ target_ulong misa_bit = misa_cfg->offset;
+
+ if (!misa_cfg->user_set) {
+ continue;
+ }
+
+ /* If we're here we're going to disable the MISA bit */
+ reg = 0;
+ id = kvm_riscv_reg_id(env, KVM_REG_RISCV_ISA_EXT,
+ misa_cfg->kvm_reg_id);
+ ret = kvm_set_one_reg(cs, id, ®);
+ if (ret != 0) {
+ /*
+ * We're not checking for -EINVAL because if the bit is about
+ * to be disabled, it means that it was already enabled by
+ * KVM. We determined that by fetching the 'isa' register
+ * during init() time. Any error at this point is worth
+ * aborting.
+ */
+ error_report("Unable to set KVM reg %s, error %d",
+ misa_cfg->name, ret);
+ exit(EXIT_FAILURE);
+ }
+ env->misa_ext &= ~misa_bit;
+ }
+}
+
+#define CPUCFG(_prop) offsetof(struct RISCVCPUConfig, _prop)
+
+#define KVM_EXT_CFG(_name, _prop, _reg_id) \
+ {.name = _name, .offset = CPUCFG(_prop), \
+ .kvm_reg_id = _reg_id}
+
+static KVMCPUConfig kvm_multi_ext_cfgs[] = {
+ KVM_EXT_CFG("zicbom", ext_icbom, KVM_RISCV_ISA_EXT_ZICBOM),
+ KVM_EXT_CFG("zicboz", ext_icboz, KVM_RISCV_ISA_EXT_ZICBOZ),
+ KVM_EXT_CFG("zihintpause", ext_zihintpause, KVM_RISCV_ISA_EXT_ZIHINTPAUSE),
+ KVM_EXT_CFG("zbb", ext_zbb, KVM_RISCV_ISA_EXT_ZBB),
+ KVM_EXT_CFG("ssaia", ext_ssaia, KVM_RISCV_ISA_EXT_SSAIA),
+ KVM_EXT_CFG("sstc", ext_sstc, KVM_RISCV_ISA_EXT_SSTC),
+ KVM_EXT_CFG("svinval", ext_svinval, KVM_RISCV_ISA_EXT_SVINVAL),
+ KVM_EXT_CFG("svpbmt", ext_svpbmt, KVM_RISCV_ISA_EXT_SVPBMT),
+};
+
+static void *kvmconfig_get_cfg_addr(RISCVCPU *cpu, KVMCPUConfig *kvmcfg)
+{
+ return (void *)&cpu->cfg + kvmcfg->offset;
+}
+
+static void kvm_cpu_cfg_set(RISCVCPU *cpu, KVMCPUConfig *multi_ext,
+ uint32_t val)
+{
+ bool *ext_enabled = kvmconfig_get_cfg_addr(cpu, multi_ext);
+
+ *ext_enabled = val;
+}
+
+static uint32_t kvm_cpu_cfg_get(RISCVCPU *cpu,
+ KVMCPUConfig *multi_ext)
+{
+ bool *ext_enabled = kvmconfig_get_cfg_addr(cpu, multi_ext);
+
+ return *ext_enabled;
+}
+
+static void kvm_cpu_set_multi_ext_cfg(Object *obj, Visitor *v,
+ const char *name,
+ void *opaque, Error **errp)
+{
+ KVMCPUConfig *multi_ext_cfg = opaque;
+ RISCVCPU *cpu = RISCV_CPU(obj);
+ bool value, host_val;
+
+ if (!visit_type_bool(v, name, &value, errp)) {
+ return;
+ }
+
+ host_val = kvm_cpu_cfg_get(cpu, multi_ext_cfg);
+
+ /*
+ * Ignore if the user is setting the same value
+ * as the host.
+ */
+ if (value == host_val) {
+ return;
+ }
+
+ if (!multi_ext_cfg->supported) {
+ /*
+ * Error out if the user is trying to enable an
+ * extension that KVM doesn't support. Ignore
+ * option otherwise.
+ */
+ if (value) {
+ error_setg(errp, "KVM does not support disabling extension %s",
+ multi_ext_cfg->name);
+ }
+
+ return;
+ }
+
+ multi_ext_cfg->user_set = true;
+ kvm_cpu_cfg_set(cpu, multi_ext_cfg, value);
+}
+
+static KVMCPUConfig kvm_cbom_blocksize = {
+ .name = "cbom_blocksize",
+ .offset = CPUCFG(cbom_blocksize),
+ .kvm_reg_id = KVM_REG_RISCV_CONFIG_REG(zicbom_block_size)
+};
+
+static KVMCPUConfig kvm_cboz_blocksize = {
+ .name = "cboz_blocksize",
+ .offset = CPUCFG(cboz_blocksize),
+ .kvm_reg_id = KVM_REG_RISCV_CONFIG_REG(zicboz_block_size)
+};
+
+static void kvm_cpu_set_cbomz_blksize(Object *obj, Visitor *v,
+ const char *name,
+ void *opaque, Error **errp)
+{
+ KVMCPUConfig *cbomz_cfg = opaque;
+ RISCVCPU *cpu = RISCV_CPU(obj);
+ uint16_t value, *host_val;
+
+ if (!visit_type_uint16(v, name, &value, errp)) {
+ return;
+ }
+
+ host_val = kvmconfig_get_cfg_addr(cpu, cbomz_cfg);
+
+ if (value != *host_val) {
+ error_report("Unable to set %s to a different value than "
+ "the host (%u)",
+ cbomz_cfg->name, *host_val);
+ exit(EXIT_FAILURE);
+ }
+
+ cbomz_cfg->user_set = true;
+}
+
+static void kvm_riscv_update_cpu_cfg_isa_ext(RISCVCPU *cpu, CPUState *cs)
+{
+ CPURISCVState *env = &cpu->env;
+ uint64_t id, reg;
+ int i, ret;
+
+ for (i = 0; i < ARRAY_SIZE(kvm_multi_ext_cfgs); i++) {
+ KVMCPUConfig *multi_ext_cfg = &kvm_multi_ext_cfgs[i];
+
+ if (!multi_ext_cfg->user_set) {
+ continue;
+ }
+
+ id = kvm_riscv_reg_id(env, KVM_REG_RISCV_ISA_EXT,
+ multi_ext_cfg->kvm_reg_id);
+ reg = kvm_cpu_cfg_get(cpu, multi_ext_cfg);
+ ret = kvm_set_one_reg(cs, id, ®);
+ if (ret != 0) {
+ error_report("Unable to %s extension %s in KVM, error %d",
+ reg ? "enable" : "disable",
+ multi_ext_cfg->name, ret);
+ exit(EXIT_FAILURE);
+ }
+ }
+}
+
+static void kvm_riscv_add_cpu_user_properties(Object *cpu_obj)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(kvm_misa_ext_cfgs); i++) {
+ KVMCPUConfig *misa_cfg = &kvm_misa_ext_cfgs[i];
+ int bit = misa_cfg->offset;
+
+ misa_cfg->name = riscv_get_misa_ext_name(bit);
+ misa_cfg->description = riscv_get_misa_ext_description(bit);
+
+ object_property_add(cpu_obj, misa_cfg->name, "bool",
+ NULL,
+ kvm_cpu_set_misa_ext_cfg,
+ NULL, misa_cfg);
+ object_property_set_description(cpu_obj, misa_cfg->name,
+ misa_cfg->description);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(kvm_multi_ext_cfgs); i++) {
+ KVMCPUConfig *multi_cfg = &kvm_multi_ext_cfgs[i];
+
+ object_property_add(cpu_obj, multi_cfg->name, "bool",
+ NULL,
+ kvm_cpu_set_multi_ext_cfg,
+ NULL, multi_cfg);
+ }
+
+ object_property_add(cpu_obj, "cbom_blocksize", "uint16",
+ NULL, kvm_cpu_set_cbomz_blksize,
+ NULL, &kvm_cbom_blocksize);
+
+ object_property_add(cpu_obj, "cboz_blocksize", "uint16",
+ NULL, kvm_cpu_set_cbomz_blksize,
+ NULL, &kvm_cboz_blocksize);
+}
+
static int kvm_riscv_get_regs_core(CPUState *cs)
{
int ret = 0;
env->kvm_timer_dirty = false;
}
+typedef struct KVMScratchCPU {
+ int kvmfd;
+ int vmfd;
+ int cpufd;
+} KVMScratchCPU;
+
+/*
+ * Heavily inspired by kvm_arm_create_scratch_host_vcpu()
+ * from target/arm/kvm.c.
+ */
+static bool kvm_riscv_create_scratch_vcpu(KVMScratchCPU *scratch)
+{
+ int kvmfd = -1, vmfd = -1, cpufd = -1;
+
+ kvmfd = qemu_open_old("/dev/kvm", O_RDWR);
+ if (kvmfd < 0) {
+ goto err;
+ }
+ do {
+ vmfd = ioctl(kvmfd, KVM_CREATE_VM, 0);
+ } while (vmfd == -1 && errno == EINTR);
+ if (vmfd < 0) {
+ goto err;
+ }
+ cpufd = ioctl(vmfd, KVM_CREATE_VCPU, 0);
+ if (cpufd < 0) {
+ goto err;
+ }
+
+ scratch->kvmfd = kvmfd;
+ scratch->vmfd = vmfd;
+ scratch->cpufd = cpufd;
+
+ return true;
+
+ err:
+ if (cpufd >= 0) {
+ close(cpufd);
+ }
+ if (vmfd >= 0) {
+ close(vmfd);
+ }
+ if (kvmfd >= 0) {
+ close(kvmfd);
+ }
+
+ return false;
+}
+
+static void kvm_riscv_destroy_scratch_vcpu(KVMScratchCPU *scratch)
+{
+ close(scratch->cpufd);
+ close(scratch->vmfd);
+ close(scratch->kvmfd);
+}
+
+static void kvm_riscv_init_machine_ids(RISCVCPU *cpu, KVMScratchCPU *kvmcpu)
+{
+ CPURISCVState *env = &cpu->env;
+ struct kvm_one_reg reg;
+ int ret;
+
+ reg.id = kvm_riscv_reg_id(env, KVM_REG_RISCV_CONFIG,
+ KVM_REG_RISCV_CONFIG_REG(mvendorid));
+ reg.addr = (uint64_t)&cpu->cfg.mvendorid;
+ ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, ®);
+ if (ret != 0) {
+ error_report("Unable to retrieve mvendorid from host, error %d", ret);
+ }
+
+ reg.id = kvm_riscv_reg_id(env, KVM_REG_RISCV_CONFIG,
+ KVM_REG_RISCV_CONFIG_REG(marchid));
+ reg.addr = (uint64_t)&cpu->cfg.marchid;
+ ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, ®);
+ if (ret != 0) {
+ error_report("Unable to retrieve marchid from host, error %d", ret);
+ }
+
+ reg.id = kvm_riscv_reg_id(env, KVM_REG_RISCV_CONFIG,
+ KVM_REG_RISCV_CONFIG_REG(mimpid));
+ reg.addr = (uint64_t)&cpu->cfg.mimpid;
+ ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, ®);
+ if (ret != 0) {
+ error_report("Unable to retrieve mimpid from host, error %d", ret);
+ }
+}
+
+static void kvm_riscv_init_misa_ext_mask(RISCVCPU *cpu,
+ KVMScratchCPU *kvmcpu)
+{
+ CPURISCVState *env = &cpu->env;
+ struct kvm_one_reg reg;
+ int ret;
+
+ reg.id = kvm_riscv_reg_id(env, KVM_REG_RISCV_CONFIG,
+ KVM_REG_RISCV_CONFIG_REG(isa));
+ reg.addr = (uint64_t)&env->misa_ext_mask;
+ ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, ®);
+
+ if (ret) {
+ error_report("Unable to fetch ISA register from KVM, "
+ "error %d", ret);
+ kvm_riscv_destroy_scratch_vcpu(kvmcpu);
+ exit(EXIT_FAILURE);
+ }
+
+ env->misa_ext = env->misa_ext_mask;
+}
+
+static void kvm_riscv_read_cbomz_blksize(RISCVCPU *cpu, KVMScratchCPU *kvmcpu,
+ KVMCPUConfig *cbomz_cfg)
+{
+ CPURISCVState *env = &cpu->env;
+ struct kvm_one_reg reg;
+ int ret;
+
+ reg.id = kvm_riscv_reg_id(env, KVM_REG_RISCV_CONFIG,
+ cbomz_cfg->kvm_reg_id);
+ reg.addr = (uint64_t)kvmconfig_get_cfg_addr(cpu, cbomz_cfg);
+ ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, ®);
+ if (ret != 0) {
+ error_report("Unable to read KVM reg %s, error %d",
+ cbomz_cfg->name, ret);
+ exit(EXIT_FAILURE);
+ }
+}
+
+static void kvm_riscv_init_multiext_cfg(RISCVCPU *cpu, KVMScratchCPU *kvmcpu)
+{
+ CPURISCVState *env = &cpu->env;
+ uint64_t val;
+ int i, ret;
+
+ for (i = 0; i < ARRAY_SIZE(kvm_multi_ext_cfgs); i++) {
+ KVMCPUConfig *multi_ext_cfg = &kvm_multi_ext_cfgs[i];
+ struct kvm_one_reg reg;
+
+ reg.id = kvm_riscv_reg_id(env, KVM_REG_RISCV_ISA_EXT,
+ multi_ext_cfg->kvm_reg_id);
+ reg.addr = (uint64_t)&val;
+ ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, ®);
+ if (ret != 0) {
+ if (errno == EINVAL) {
+ /* Silently default to 'false' if KVM does not support it. */
+ multi_ext_cfg->supported = false;
+ val = false;
+ } else {
+ error_report("Unable to read ISA_EXT KVM register %s, "
+ "error %d", multi_ext_cfg->name, ret);
+ kvm_riscv_destroy_scratch_vcpu(kvmcpu);
+ exit(EXIT_FAILURE);
+ }
+ } else {
+ multi_ext_cfg->supported = true;
+ }
+
+ kvm_cpu_cfg_set(cpu, multi_ext_cfg, val);
+ }
+
+ if (cpu->cfg.ext_icbom) {
+ kvm_riscv_read_cbomz_blksize(cpu, kvmcpu, &kvm_cbom_blocksize);
+ }
+
+ if (cpu->cfg.ext_icboz) {
+ kvm_riscv_read_cbomz_blksize(cpu, kvmcpu, &kvm_cboz_blocksize);
+ }
+}
+
+void kvm_riscv_init_user_properties(Object *cpu_obj)
+{
+ RISCVCPU *cpu = RISCV_CPU(cpu_obj);
+ KVMScratchCPU kvmcpu;
+
+ if (!kvm_riscv_create_scratch_vcpu(&kvmcpu)) {
+ return;
+ }
+
+ kvm_riscv_add_cpu_user_properties(cpu_obj);
+ kvm_riscv_init_machine_ids(cpu, &kvmcpu);
+ kvm_riscv_init_misa_ext_mask(cpu, &kvmcpu);
+ kvm_riscv_init_multiext_cfg(cpu, &kvmcpu);
+
+ kvm_riscv_destroy_scratch_vcpu(&kvmcpu);
+}
+
const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
KVM_CAP_LAST_INFO
};
{
}
-int kvm_arch_init_vcpu(CPUState *cs)
+static int kvm_vcpu_set_machine_ids(RISCVCPU *cpu, CPUState *cs)
{
- int ret = 0;
- target_ulong isa;
- RISCVCPU *cpu = RISCV_CPU(cs);
CPURISCVState *env = &cpu->env;
uint64_t id;
+ int ret;
- qemu_add_vm_change_state_handler(kvm_riscv_vm_state_change, cs);
+ id = kvm_riscv_reg_id(env, KVM_REG_RISCV_CONFIG,
+ KVM_REG_RISCV_CONFIG_REG(mvendorid));
+ ret = kvm_set_one_reg(cs, id, &cpu->cfg.mvendorid);
+ if (ret != 0) {
+ return ret;
+ }
id = kvm_riscv_reg_id(env, KVM_REG_RISCV_CONFIG,
- KVM_REG_RISCV_CONFIG_REG(isa));
- ret = kvm_get_one_reg(cs, id, &isa);
- if (ret) {
+ KVM_REG_RISCV_CONFIG_REG(marchid));
+ ret = kvm_set_one_reg(cs, id, &cpu->cfg.marchid);
+ if (ret != 0) {
return ret;
}
- env->misa_ext = isa;
+
+ id = kvm_riscv_reg_id(env, KVM_REG_RISCV_CONFIG,
+ KVM_REG_RISCV_CONFIG_REG(mimpid));
+ ret = kvm_set_one_reg(cs, id, &cpu->cfg.mimpid);
+
+ return ret;
+}
+
+int kvm_arch_init_vcpu(CPUState *cs)
+{
+ int ret = 0;
+ RISCVCPU *cpu = RISCV_CPU(cs);
+
+ qemu_add_vm_change_state_handler(kvm_riscv_vm_state_change, cs);
+
+ if (!object_dynamic_cast(OBJECT(cpu), TYPE_RISCV_CPU_HOST)) {
+ ret = kvm_vcpu_set_machine_ids(cpu, cs);
+ if (ret != 0) {
+ return ret;
+ }
+ }
+
+ kvm_riscv_update_cpu_misa_ext(cpu, cs);
+ kvm_riscv_update_cpu_cfg_isa_ext(cpu, cs);
return ret;
}
#ifndef QEMU_KVM_RISCV_H
#define QEMU_KVM_RISCV_H
+void kvm_riscv_init_user_properties(Object *cpu_obj);
void kvm_riscv_reset_vcpu(RISCVCPU *cpu);
void kvm_riscv_set_irq(RISCVCPU *cpu, int irq, int level);
riscv_raise_exception(env, RISCV_EXCP_INST_ACCESS_FAULT, GETPC());
}
- target_ulong prev_virt = get_field(env->mstatus, MSTATUS_MPV);
+ target_ulong prev_virt = get_field(env->mstatus, MSTATUS_MPV) &&
+ (prev_priv != PRV_M);
mstatus = set_field(mstatus, MSTATUS_MIE,
get_field(mstatus, MSTATUS_MPIE));
mstatus = set_field(mstatus, MSTATUS_MPIE, 1);
target_ulong priv_ver;
RISCVMXL misa_mxl_max;
RISCVMXL xl;
+ RISCVMXL address_xl;
uint32_t misa_ext;
uint32_t opcode;
RISCVExtStatus mstatus_fs;
return ctx->misa_ext & ext;
}
-static bool always_true_p(DisasContext *ctx __attribute__((__unused__)))
-{
- return true;
-}
-
-static bool has_xthead_p(DisasContext *ctx __attribute__((__unused__)))
-{
- return ctx->cfg_ptr->ext_xtheadba || ctx->cfg_ptr->ext_xtheadbb ||
- ctx->cfg_ptr->ext_xtheadbs || ctx->cfg_ptr->ext_xtheadcmo ||
- ctx->cfg_ptr->ext_xtheadcondmov ||
- ctx->cfg_ptr->ext_xtheadfmemidx || ctx->cfg_ptr->ext_xtheadfmv ||
- ctx->cfg_ptr->ext_xtheadmac || ctx->cfg_ptr->ext_xtheadmemidx ||
- ctx->cfg_ptr->ext_xtheadmempair || ctx->cfg_ptr->ext_xtheadsync;
-}
-
-#define MATERIALISE_EXT_PREDICATE(ext) \
- static bool has_ ## ext ## _p(DisasContext *ctx) \
- { \
- return ctx->cfg_ptr->ext_ ## ext ; \
- }
-
-MATERIALISE_EXT_PREDICATE(XVentanaCondOps);
-
#ifdef TARGET_RISCV32
#define get_xl(ctx) MXL_RV32
#elif defined(CONFIG_USER_ONLY)
#define get_xl(ctx) ((ctx)->xl)
#endif
+#ifdef TARGET_RISCV32
+#define get_address_xl(ctx) MXL_RV32
+#elif defined(CONFIG_USER_ONLY)
+#define get_address_xl(ctx) MXL_RV64
+#else
+#define get_address_xl(ctx) ((ctx)->address_xl)
+#endif
+
/* The word size for this machine mode. */
static inline int __attribute__((unused)) get_xlen(DisasContext *ctx)
{
tcg_gen_addi_tl(addr, src1, imm);
if (ctx->pm_mask_enabled) {
tcg_gen_andc_tl(addr, addr, pm_mask);
- } else if (get_xl(ctx) == MXL_RV32) {
+ } else if (get_address_xl(ctx) == MXL_RV32) {
tcg_gen_ext32u_tl(addr, addr);
}
if (ctx->pm_base_enabled) {
tcg_gen_or_tl(addr, addr, pm_base);
}
+
return addr;
}
#include "insn_trans/trans_rvzicond.c.inc"
#include "insn_trans/trans_rvzawrs.c.inc"
#include "insn_trans/trans_rvzicbo.c.inc"
+#include "insn_trans/trans_rvzfa.c.inc"
#include "insn_trans/trans_rvzfh.c.inc"
#include "insn_trans/trans_rvk.c.inc"
#include "insn_trans/trans_privileged.c.inc"
#include "insn_trans/trans_svinval.c.inc"
+#include "insn_trans/trans_rvbf16.c.inc"
#include "decode-xthead.c.inc"
#include "insn_trans/trans_xthead.c.inc"
#include "insn_trans/trans_xventanacondops.c.inc"
* that are tested in-order until a decoder matches onto the opcode.
*/
static const struct {
- bool (*guard_func)(DisasContext *);
+ bool (*guard_func)(const RISCVCPUConfig *);
bool (*decode_func)(DisasContext *, uint32_t);
} decoders[] = {
{ always_true_p, decode_insn32 },
ctx->opcode = opcode32;
for (size_t i = 0; i < ARRAY_SIZE(decoders); ++i) {
- if (decoders[i].guard_func(ctx) &&
+ if (decoders[i].guard_func(ctx->cfg_ptr) &&
decoders[i].decode_func(ctx, opcode32)) {
return;
}
ctx->vl_eq_vlmax = FIELD_EX32(tb_flags, TB_FLAGS, VL_EQ_VLMAX);
ctx->misa_mxl_max = env->misa_mxl_max;
ctx->xl = FIELD_EX32(tb_flags, TB_FLAGS, XL);
+ ctx->address_xl = FIELD_EX32(tb_flags, TB_FLAGS, AXL);
ctx->cs = cs;
ctx->pm_mask_enabled = FIELD_EX32(tb_flags, TB_FLAGS, PM_MASK_ENABLED);
ctx->pm_base_enabled = FIELD_EX32(tb_flags, TB_FLAGS, PM_BASE_ENABLED);
xlen - 1 - R_VTYPE_RESERVED_SHIFT);
if (lmul & 4) {
- /* Fractional LMUL. */
+ /* Fractional LMUL - check LMUL * VLEN >= SEW */
if (lmul == 4 ||
- cpu->cfg.elen >> (8 - lmul) < sew) {
+ cpu->cfg.vlen >> (8 - lmul) < sew) {
vill = true;
}
}
cpu_mmu_index(env, false));
if (host) {
#ifdef CONFIG_USER_ONLY
- if (page_check_range(addr, offset, PAGE_READ) < 0) {
+ if (page_check_range(addr, offset, PAGE_READ)) {
vl = i;
goto ProbeSuccess;
}
GEN_VEXT_VF(vfwmacc_vf_h, 4)
GEN_VEXT_VF(vfwmacc_vf_w, 8)
+static uint32_t fwmaccbf16(uint16_t a, uint16_t b, uint32_t d, float_status *s)
+{
+ return float32_muladd(bfloat16_to_float32(a, s),
+ bfloat16_to_float32(b, s), d, 0, s);
+}
+
+RVVCALL(OPFVV3, vfwmaccbf16_vv, WOP_UUU_H, H4, H2, H2, fwmaccbf16)
+GEN_VEXT_VV_ENV(vfwmaccbf16_vv, 4)
+RVVCALL(OPFVF3, vfwmaccbf16_vf, WOP_UUU_H, H4, H2, fwmacc16)
+GEN_VEXT_VF(vfwmaccbf16_vf, 4)
+
static uint32_t fwnmacc16(uint16_t a, uint16_t b, uint32_t d, float_status *s)
{
return float32_muladd(float16_to_float32(a, true, s),
GEN_VEXT_V_ENV(vfwcvt_f_f_v_h, 4)
GEN_VEXT_V_ENV(vfwcvt_f_f_v_w, 8)
+RVVCALL(OPFVV1, vfwcvtbf16_f_f_v, WOP_UU_H, H4, H2, bfloat16_to_float32)
+GEN_VEXT_V_ENV(vfwcvtbf16_f_f_v, 4)
+
/* Narrowing Floating-Point/Integer Type-Convert Instructions */
/* (TD, T2, TX2) */
#define NOP_UU_B uint8_t, uint16_t, uint32_t
GEN_VEXT_V_ENV(vfncvt_f_f_w_h, 2)
GEN_VEXT_V_ENV(vfncvt_f_f_w_w, 4)
+RVVCALL(OPFVV1, vfncvtbf16_f_f_w, NOP_UU_H, H2, H4, float32_to_bfloat16)
+GEN_VEXT_V_ENV(vfncvtbf16_f_f_w, 2)
+
/*
* Vector Reduction Operations
*/
#include "s390x-internal.h"
#include "elf.h"
#include "sysemu/dump.h"
-#include "hw/s390x/pv.h"
#include "kvm/kvm_s390x.h"
+#include "target/s390x/kvm/pv.h"
struct S390xUserRegsStruct {
uint64_t psw[2];
#include "qapi/qapi-visit-run-state.h"
#include "sysemu/hw_accel.h"
-#include "hw/s390x/pv.h"
+#include "target/s390x/kvm/pv.h"
#include "hw/boards.h"
#include "sysemu/sysemu.h"
#include "sysemu/tcg.h"
#include "qemu/module.h"
#include "cpu_features.h"
#ifndef CONFIG_USER_ONLY
-#include "hw/s390x/pv.h"
+#include "target/s390x/kvm/pv.h"
#endif
#define DEF_FEAT(_FEAT, _NAME, _TYPE, _BIT, _DESC) \
#include "qemu/qemu-print.h"
#ifndef CONFIG_USER_ONLY
#include "sysemu/sysemu.h"
-#include "hw/s390x/pv.h"
+#include "target/s390x/kvm/pv.h"
#endif
#define CPUDEF_INIT(_type, _gen, _ec_ga, _mha_pow, _hmfai, _name, _desc) \
#include "sysemu/cpus.h"
#include "hw/s390x/ipl.h"
#include "hw/s390x/s390-virtio-ccw.h"
-#include "hw/s390x/pv.h"
#include "sysemu/kvm.h"
#include "kvm/kvm_s390x.h"
+#include "target/s390x/kvm/pv.h"
#include "qemu/error-report.h"
#include "gdbstub/helpers.h"
#include "qemu/timer.h"
#include "hw/s390x/ioinst.h"
-#include "hw/s390x/pv.h"
+#include "target/s390x/kvm/pv.h"
#include "sysemu/hw_accel.h"
#include "sysemu/runstate.h"
DEF_HELPER_FLAGS_4(ipte, TCG_CALL_NO_RWG, void, env, i64, i64, i32)
DEF_HELPER_FLAGS_1(ptlb, TCG_CALL_NO_RWG, void, env)
DEF_HELPER_FLAGS_1(purge, TCG_CALL_NO_RWG, void, env)
-DEF_HELPER_2(lra, i64, env, i64)
+DEF_HELPER_3(lra, i64, env, i64, i64)
DEF_HELPER_1(per_check_exception, void, env)
DEF_HELPER_FLAGS_3(per_branch, TCG_CALL_NO_RWG, void, env, i64, i64)
DEF_HELPER_FLAGS_2(per_ifetch, TCG_CALL_NO_RWG, void, env, i64)
#include "hw/s390x/ioinst.h"
#include "trace.h"
#include "hw/s390x/s390-pci-bus.h"
-#include "hw/s390x/pv.h"
+#include "target/s390x/kvm/pv.h"
/* All I/O instructions but chsc use the s format */
static uint64_t get_address_from_regs(CPUS390XState *env, uint32_t ipb,
#include "exec/memattrs.h"
#include "hw/s390x/s390-virtio-ccw.h"
#include "hw/s390x/s390-virtio-hcall.h"
-#include "hw/s390x/pv.h"
+#include "target/s390x/kvm/pv.h"
#ifndef DEBUG_KVM
#define DEBUG_KVM 0
s390x_ss.add(when: 'CONFIG_KVM', if_true: files(
+ 'pv.c',
'kvm.c'
), if_false: files(
'stubs.c'
--- /dev/null
+/*
+ * Protected Virtualization functions
+ *
+ * Copyright IBM Corp. 2020
+ * Author(s):
+ * Janosch Frank <frankja@linux.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or (at
+ * your option) any later version. See the COPYING file in the top-level
+ * directory.
+ */
+#include "qemu/osdep.h"
+
+#include <linux/kvm.h>
+
+#include "qemu/units.h"
+#include "qapi/error.h"
+#include "qemu/error-report.h"
+#include "sysemu/kvm.h"
+#include "sysemu/cpus.h"
+#include "qom/object_interfaces.h"
+#include "exec/confidential-guest-support.h"
+#include "hw/s390x/ipl.h"
+#include "hw/s390x/sclp.h"
+#include "target/s390x/kvm/kvm_s390x.h"
+#include "target/s390x/kvm/pv.h"
+
+static bool info_valid;
+static struct kvm_s390_pv_info_vm info_vm;
+static struct kvm_s390_pv_info_dump info_dump;
+
+static int __s390_pv_cmd(uint32_t cmd, const char *cmdname, void *data)
+{
+ struct kvm_pv_cmd pv_cmd = {
+ .cmd = cmd,
+ .data = (uint64_t)data,
+ };
+ int rc;
+
+ do {
+ rc = kvm_vm_ioctl(kvm_state, KVM_S390_PV_COMMAND, &pv_cmd);
+ } while (rc == -EINTR);
+
+ if (rc) {
+ error_report("KVM PV command %d (%s) failed: header rc %x rrc %x "
+ "IOCTL rc: %d", cmd, cmdname, pv_cmd.rc, pv_cmd.rrc,
+ rc);
+ }
+ return rc;
+}
+
+/*
+ * This macro lets us pass the command as a string to the function so
+ * we can print it on an error.
+ */
+#define s390_pv_cmd(cmd, data) __s390_pv_cmd(cmd, #cmd, data)
+#define s390_pv_cmd_exit(cmd, data) \
+{ \
+ int rc; \
+ \
+ rc = __s390_pv_cmd(cmd, #cmd, data);\
+ if (rc) { \
+ exit(1); \
+ } \
+}
+
+int s390_pv_query_info(void)
+{
+ struct kvm_s390_pv_info info = {
+ .header.id = KVM_PV_INFO_VM,
+ .header.len_max = sizeof(info.header) + sizeof(info.vm),
+ };
+ int rc;
+
+ /* Info API's first user is dump so they are bundled */
+ if (!kvm_s390_get_protected_dump()) {
+ return 0;
+ }
+
+ rc = s390_pv_cmd(KVM_PV_INFO, &info);
+ if (rc) {
+ error_report("KVM PV INFO cmd %x failed: %s",
+ info.header.id, strerror(-rc));
+ return rc;
+ }
+ memcpy(&info_vm, &info.vm, sizeof(info.vm));
+
+ info.header.id = KVM_PV_INFO_DUMP;
+ info.header.len_max = sizeof(info.header) + sizeof(info.dump);
+ rc = s390_pv_cmd(KVM_PV_INFO, &info);
+ if (rc) {
+ error_report("KVM PV INFO cmd %x failed: %s",
+ info.header.id, strerror(-rc));
+ return rc;
+ }
+
+ memcpy(&info_dump, &info.dump, sizeof(info.dump));
+ info_valid = true;
+
+ return rc;
+}
+
+int s390_pv_vm_enable(void)
+{
+ return s390_pv_cmd(KVM_PV_ENABLE, NULL);
+}
+
+void s390_pv_vm_disable(void)
+{
+ s390_pv_cmd_exit(KVM_PV_DISABLE, NULL);
+}
+
+static void *s390_pv_do_unprot_async_fn(void *p)
+{
+ s390_pv_cmd_exit(KVM_PV_ASYNC_CLEANUP_PERFORM, NULL);
+ return NULL;
+}
+
+bool s390_pv_vm_try_disable_async(S390CcwMachineState *ms)
+{
+ /*
+ * t is only needed to create the thread; once qemu_thread_create
+ * returns, it can safely be discarded.
+ */
+ QemuThread t;
+
+ /*
+ * If the feature is not present or if the VM is not larger than 2 GiB,
+ * KVM_PV_ASYNC_CLEANUP_PREPARE fill fail; no point in attempting it.
+ */
+ if ((MACHINE(ms)->maxram_size <= 2 * GiB) ||
+ !kvm_check_extension(kvm_state, KVM_CAP_S390_PROTECTED_ASYNC_DISABLE)) {
+ return false;
+ }
+ if (s390_pv_cmd(KVM_PV_ASYNC_CLEANUP_PREPARE, NULL) != 0) {
+ return false;
+ }
+
+ qemu_thread_create(&t, "async_cleanup", s390_pv_do_unprot_async_fn, NULL,
+ QEMU_THREAD_DETACHED);
+
+ return true;
+}
+
+int s390_pv_set_sec_parms(uint64_t origin, uint64_t length)
+{
+ struct kvm_s390_pv_sec_parm args = {
+ .origin = origin,
+ .length = length,
+ };
+
+ return s390_pv_cmd(KVM_PV_SET_SEC_PARMS, &args);
+}
+
+/*
+ * Called for each component in the SE type IPL parameter block 0.
+ */
+int s390_pv_unpack(uint64_t addr, uint64_t size, uint64_t tweak)
+{
+ struct kvm_s390_pv_unp args = {
+ .addr = addr,
+ .size = size,
+ .tweak = tweak,
+ };
+
+ return s390_pv_cmd(KVM_PV_UNPACK, &args);
+}
+
+void s390_pv_prep_reset(void)
+{
+ s390_pv_cmd_exit(KVM_PV_PREP_RESET, NULL);
+}
+
+int s390_pv_verify(void)
+{
+ return s390_pv_cmd(KVM_PV_VERIFY, NULL);
+}
+
+void s390_pv_unshare(void)
+{
+ s390_pv_cmd_exit(KVM_PV_UNSHARE_ALL, NULL);
+}
+
+void s390_pv_inject_reset_error(CPUState *cs)
+{
+ int r1 = (cs->kvm_run->s390_sieic.ipa & 0x00f0) >> 4;
+ CPUS390XState *env = &S390_CPU(cs)->env;
+
+ /* Report that we are unable to enter protected mode */
+ env->regs[r1 + 1] = DIAG_308_RC_INVAL_FOR_PV;
+}
+
+uint64_t kvm_s390_pv_dmp_get_size_cpu(void)
+{
+ return info_dump.dump_cpu_buffer_len;
+}
+
+uint64_t kvm_s390_pv_dmp_get_size_completion_data(void)
+{
+ return info_dump.dump_config_finalize_len;
+}
+
+uint64_t kvm_s390_pv_dmp_get_size_mem_state(void)
+{
+ return info_dump.dump_config_mem_buffer_per_1m;
+}
+
+bool kvm_s390_pv_info_basic_valid(void)
+{
+ return info_valid;
+}
+
+static int s390_pv_dump_cmd(uint64_t subcmd, uint64_t uaddr, uint64_t gaddr,
+ uint64_t len)
+{
+ struct kvm_s390_pv_dmp dmp = {
+ .subcmd = subcmd,
+ .buff_addr = uaddr,
+ .buff_len = len,
+ .gaddr = gaddr,
+ };
+ int ret;
+
+ ret = s390_pv_cmd(KVM_PV_DUMP, (void *)&dmp);
+ if (ret) {
+ error_report("KVM DUMP command %ld failed", subcmd);
+ }
+ return ret;
+}
+
+int kvm_s390_dump_cpu(S390CPU *cpu, void *buff)
+{
+ struct kvm_s390_pv_dmp dmp = {
+ .subcmd = KVM_PV_DUMP_CPU,
+ .buff_addr = (uint64_t)buff,
+ .gaddr = 0,
+ .buff_len = info_dump.dump_cpu_buffer_len,
+ };
+ struct kvm_pv_cmd pv = {
+ .cmd = KVM_PV_DUMP,
+ .data = (uint64_t)&dmp,
+ };
+
+ return kvm_vcpu_ioctl(CPU(cpu), KVM_S390_PV_CPU_COMMAND, &pv);
+}
+
+int kvm_s390_dump_init(void)
+{
+ return s390_pv_dump_cmd(KVM_PV_DUMP_INIT, 0, 0, 0);
+}
+
+int kvm_s390_dump_mem_state(uint64_t gaddr, size_t len, void *dest)
+{
+ return s390_pv_dump_cmd(KVM_PV_DUMP_CONFIG_STOR_STATE, (uint64_t)dest,
+ gaddr, len);
+}
+
+int kvm_s390_dump_completion_data(void *buff)
+{
+ return s390_pv_dump_cmd(KVM_PV_DUMP_COMPLETE, (uint64_t)buff, 0,
+ info_dump.dump_config_finalize_len);
+}
+
+#define TYPE_S390_PV_GUEST "s390-pv-guest"
+OBJECT_DECLARE_SIMPLE_TYPE(S390PVGuest, S390_PV_GUEST)
+
+/**
+ * S390PVGuest:
+ *
+ * The S390PVGuest object is basically a dummy used to tell the
+ * confidential guest support system to use s390's PV mechanism.
+ *
+ * # $QEMU \
+ * -object s390-pv-guest,id=pv0 \
+ * -machine ...,confidential-guest-support=pv0
+ */
+struct S390PVGuest {
+ ConfidentialGuestSupport parent_obj;
+};
+
+typedef struct S390PVGuestClass S390PVGuestClass;
+
+struct S390PVGuestClass {
+ ConfidentialGuestSupportClass parent_class;
+};
+
+/*
+ * If protected virtualization is enabled, the amount of data that the
+ * Read SCP Info Service Call can use is limited to one page. The
+ * available space also depends on the Extended-Length SCCB (ELS)
+ * feature which can take more buffer space to store feature
+ * information. This impacts the maximum number of CPUs supported in
+ * the machine.
+ */
+static uint32_t s390_pv_get_max_cpus(void)
+{
+ int offset_cpu = s390_has_feat(S390_FEAT_EXTENDED_LENGTH_SCCB) ?
+ offsetof(ReadInfo, entries) : SCLP_READ_SCP_INFO_FIXED_CPU_OFFSET;
+
+ return (TARGET_PAGE_SIZE - offset_cpu) / sizeof(CPUEntry);
+}
+
+static bool s390_pv_check_cpus(Error **errp)
+{
+ MachineState *ms = MACHINE(qdev_get_machine());
+ uint32_t pv_max_cpus = s390_pv_get_max_cpus();
+
+ if (ms->smp.max_cpus > pv_max_cpus) {
+ error_setg(errp, "Protected VMs support a maximum of %d CPUs",
+ pv_max_cpus);
+ return false;
+ }
+
+ return true;
+}
+
+static bool s390_pv_guest_check(ConfidentialGuestSupport *cgs, Error **errp)
+{
+ return s390_pv_check_cpus(errp);
+}
+
+int s390_pv_kvm_init(ConfidentialGuestSupport *cgs, Error **errp)
+{
+ if (!object_dynamic_cast(OBJECT(cgs), TYPE_S390_PV_GUEST)) {
+ return 0;
+ }
+
+ if (!s390_has_feat(S390_FEAT_UNPACK)) {
+ error_setg(errp,
+ "CPU model does not support Protected Virtualization");
+ return -1;
+ }
+
+ if (!s390_pv_guest_check(cgs, errp)) {
+ return -1;
+ }
+
+ cgs->ready = true;
+
+ return 0;
+}
+
+OBJECT_DEFINE_TYPE_WITH_INTERFACES(S390PVGuest,
+ s390_pv_guest,
+ S390_PV_GUEST,
+ CONFIDENTIAL_GUEST_SUPPORT,
+ { TYPE_USER_CREATABLE },
+ { NULL })
+
+static void s390_pv_guest_class_init(ObjectClass *oc, void *data)
+{
+}
+
+static void s390_pv_guest_init(Object *obj)
+{
+}
+
+static void s390_pv_guest_finalize(Object *obj)
+{
+}
--- /dev/null
+/*
+ * Protected Virtualization header
+ *
+ * Copyright IBM Corp. 2020
+ * Author(s):
+ * Janosch Frank <frankja@linux.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or (at
+ * your option) any later version. See the COPYING file in the top-level
+ * directory.
+ */
+#ifndef HW_S390_PV_H
+#define HW_S390_PV_H
+
+#include "qapi/error.h"
+#include "sysemu/kvm.h"
+#include "hw/s390x/s390-virtio-ccw.h"
+
+#ifdef CONFIG_KVM
+#include "cpu.h"
+
+static inline bool s390_is_pv(void)
+{
+ static S390CcwMachineState *ccw;
+ Object *obj;
+
+ if (ccw) {
+ return ccw->pv;
+ }
+
+ /* we have to bail out for the "none" machine */
+ obj = object_dynamic_cast(qdev_get_machine(),
+ TYPE_S390_CCW_MACHINE);
+ if (!obj) {
+ return false;
+ }
+ ccw = S390_CCW_MACHINE(obj);
+ return ccw->pv;
+}
+
+int s390_pv_query_info(void);
+int s390_pv_vm_enable(void);
+void s390_pv_vm_disable(void);
+bool s390_pv_vm_try_disable_async(S390CcwMachineState *ms);
+int s390_pv_set_sec_parms(uint64_t origin, uint64_t length);
+int s390_pv_unpack(uint64_t addr, uint64_t size, uint64_t tweak);
+void s390_pv_prep_reset(void);
+int s390_pv_verify(void);
+void s390_pv_unshare(void);
+void s390_pv_inject_reset_error(CPUState *cs);
+uint64_t kvm_s390_pv_dmp_get_size_cpu(void);
+uint64_t kvm_s390_pv_dmp_get_size_mem_state(void);
+uint64_t kvm_s390_pv_dmp_get_size_completion_data(void);
+bool kvm_s390_pv_info_basic_valid(void);
+int kvm_s390_dump_init(void);
+int kvm_s390_dump_cpu(S390CPU *cpu, void *buff);
+int kvm_s390_dump_mem_state(uint64_t addr, size_t len, void *dest);
+int kvm_s390_dump_completion_data(void *buff);
+#else /* CONFIG_KVM */
+static inline bool s390_is_pv(void) { return false; }
+static inline int s390_pv_query_info(void) { return 0; }
+static inline int s390_pv_vm_enable(void) { return 0; }
+static inline void s390_pv_vm_disable(void) {}
+static inline bool s390_pv_vm_try_disable_async(S390CcwMachineState *ms) { return false; }
+static inline int s390_pv_set_sec_parms(uint64_t origin, uint64_t length) { return 0; }
+static inline int s390_pv_unpack(uint64_t addr, uint64_t size, uint64_t tweak) { return 0; }
+static inline void s390_pv_prep_reset(void) {}
+static inline int s390_pv_verify(void) { return 0; }
+static inline void s390_pv_unshare(void) {}
+static inline void s390_pv_inject_reset_error(CPUState *cs) {};
+static inline uint64_t kvm_s390_pv_dmp_get_size_cpu(void) { return 0; }
+static inline uint64_t kvm_s390_pv_dmp_get_size_mem_state(void) { return 0; }
+static inline uint64_t kvm_s390_pv_dmp_get_size_completion_data(void) { return 0; }
+static inline bool kvm_s390_pv_info_basic_valid(void) { return false; }
+static inline int kvm_s390_dump_init(void) { return 0; }
+static inline int kvm_s390_dump_cpu(S390CPU *cpu, void *buff) { return 0; }
+static inline int kvm_s390_dump_mem_state(uint64_t addr, size_t len,
+ void *dest) { return 0; }
+static inline int kvm_s390_dump_completion_data(void *buff) { return 0; }
+#endif /* CONFIG_KVM */
+
+int s390_pv_kvm_init(ConfidentialGuestSupport *cgs, Error **errp);
+static inline int s390_pv_init(ConfidentialGuestSupport *cgs, Error **errp)
+{
+ if (!cgs) {
+ return 0;
+ }
+ if (kvm_enabled()) {
+ return s390_pv_kvm_init(cgs, errp);
+ }
+
+ error_setg(errp, "Protected Virtualization requires KVM");
+ return -1;
+}
+
+#endif /* HW_S390_PV_H */
vaddr &= TARGET_PAGE_MASK;
- if (!(env->psw.mask & PSW_MASK_DAT)) {
+ if (rw != MMU_S390_LRA && !(env->psw.mask & PSW_MASK_DAT)) {
*raddr = vaddr;
goto nodat;
}
/* 64/32-bit FP multiplication */
uint64_t HELPER(mdeb)(CPUS390XState *env, uint64_t f1, uint64_t f2)
{
+ float64 f1_64 = float32_to_float64(f1, &env->fpu_status);
float64 ret = float32_to_float64(f2, &env->fpu_status);
- ret = float64_mul(f1, ret, &env->fpu_status);
+ ret = float64_mul(f1_64, ret, &env->fpu_status);
handle_exceptions(env, false, GETPC());
return ret;
}
F(0xb317, MEEBR, RRE, Z, e1, e2, new, e1, meeb, 0, IF_BFP)
F(0xb31c, MDBR, RRE, Z, f1, f2, new, f1, mdb, 0, IF_BFP)
F(0xb34c, MXBR, RRE, Z, x1, x2, new_x, x1, mxb, 0, IF_BFP)
- F(0xb30c, MDEBR, RRE, Z, f1, e2, new, f1, mdeb, 0, IF_BFP)
+ F(0xb30c, MDEBR, RRE, Z, e1, e2, new, f1, mdeb, 0, IF_BFP)
F(0xb307, MXDBR, RRE, Z, f1, f2, new_x, x1, mxdb, 0, IF_BFP)
F(0xed17, MEEB, RXE, Z, e1, m2_32u, new, e1, meeb, 0, IF_BFP)
F(0xed1c, MDB, RXE, Z, f1, m2_64, new, f1, mdb, 0, IF_BFP)
- F(0xed0c, MDEB, RXE, Z, f1, m2_32u, new, f1, mdeb, 0, IF_BFP)
+ F(0xed0c, MDEB, RXE, Z, e1, m2_32u, new, f1, mdeb, 0, IF_BFP)
F(0xed07, MXDB, RXE, Z, f1, m2_64, new_x, x1, mxdb, 0, IF_BFP)
/* MULTIPLY HALFWORD */
C(0x4c00, MH, RX_a, Z, r1_o, m2_16s, new, r1_32, mul, 0)
int32_t i;
/* MVCRL always copies one more byte than specified - maximum is 256 */
+ l &= 0xff;
l++;
access_prepare(&srca, env, src, l, MMU_DATA_LOAD, mmu_idx, ra);
}
/* load real address */
-uint64_t HELPER(lra)(CPUS390XState *env, uint64_t addr)
+uint64_t HELPER(lra)(CPUS390XState *env, uint64_t r1, uint64_t addr)
{
uint64_t asc = env->psw.mask & PSW_MASK_ASC;
uint64_t ret, tec;
exc = mmu_translate(env, addr, MMU_S390_LRA, asc, &ret, &flags, &tec);
if (exc) {
cc = 3;
- ret = exc | 0x80000000;
+ ret = (r1 & 0xFFFFFFFF00000000ULL) | exc | 0x80000000;
} else {
cc = 0;
ret |= addr & ~TARGET_PAGE_MASK;
int r1 = get_field(s, r1);
int r2 = get_field(s, r2);
TCGv_i64 t = tcg_temp_new_i64();
+ TCGv_i64 t_cc = tcg_temp_new_i64();
/* Note the "subsequently" in the PoO, which implies a defined result
if r1 == r2. Thus we cannot defer these writes to an output hook. */
+ gen_op_calc_cc(s);
+ tcg_gen_extu_i32_i64(t_cc, cc_op);
tcg_gen_shri_i64(t, psw_mask, 32);
+ tcg_gen_deposit_i64(t, t, t_cc, 12, 2);
store_reg32_i64(r1, t);
if (r2 != 0) {
store_reg32_i64(r2, psw_mask);
static DisasJumpType op_lra(DisasContext *s, DisasOps *o)
{
- gen_helper_lra(o->out, cpu_env, o->in2);
+ gen_helper_lra(o->out, cpu_env, o->out, o->in2);
set_cc_static(s);
return DISAS_NEXT;
}
disas_jdest(s, i2, is_imm, imm, ri2);
if (is_imm) {
- ri2 = tcg_constant_i64(s->base.pc_next + imm * 2);
+ ri2 = tcg_constant_i64(s->base.pc_next + (int64_t)imm * 2);
}
return ri2;
case ASI_PNFL: /* Primary no-fault LE */
case ASI_SNF: /* Secondary no-fault */
case ASI_SNFL: /* Secondary no-fault LE */
- if (page_check_range(addr, size, PAGE_READ) == -1) {
+ if (!page_check_range(addr, size, PAGE_READ)) {
ret = 0;
break;
}
*
* C_N1_Im(...) defines a constraint set with 1 output and <m> inputs,
* except that the output must use a new register.
+ *
+ * C_Nn_Om_Ik(...) defines a constraint set with <n + m> outputs and <k>
+ * inputs, except that the first <n> outputs must use new registers.
*/
C_O0_I1(r)
C_O0_I2(L, L)
C_O2_I2(a, d, a, r)
C_O2_I2(r, r, L, L)
C_O2_I3(a, d, 0, 1, r)
-C_O2_I4(r, r, 0, 1, re, re)
+C_N1_O1_I4(r, r, 0, 1, re, re)
case INDEX_op_add2_i64:
case INDEX_op_sub2_i32:
case INDEX_op_sub2_i64:
- return C_O2_I4(r, r, 0, 1, re, re);
+ return C_N1_O1_I4(r, r, 0, 1, re, re);
case INDEX_op_ctz_i32:
case INDEX_op_ctz_i64:
ptrdiff_t offset = tcg_tbrel_diff(s, (void *)ptr);
tcg_out_mem_long(s, LD, LDX, TCG_REG_TB, TCG_REG_TB, offset);
- /* Direct branch will be patched by tb_target_set_jmp_target. */
+ /* TODO: Use direct branches when possible. */
set_jmp_insn_offset(s, which);
tcg_out32(s, MTSPR | RS(TCG_REG_TB) | CTR);
- /* When branch is out of range, fall through to indirect. */
tcg_out32(s, BCCTR | BO_ALWAYS);
/* For the unlinked case, need to reset TCG_REG_TB. */
intptr_t diff = addr - jmp_rx;
tcg_insn_unit insn;
+ if (USE_REG_TB) {
+ return;
+ }
+
if (in_range_b(diff)) {
insn = B | (diff & 0x3fffffc);
- } else if (USE_REG_TB) {
- insn = MTSPR | RS(TCG_REG_TB) | CTR;
} else {
insn = NOP;
}
* C_On_Im(...) defines a constraint set with <n> outputs and <m> inputs.
* Each operand should be a sequence of constraint letters as defined by
* tcg-target-con-str.h; the constraint combination is inclusive or.
+ *
+ * C_Nn_Om_Ik(...) defines a constraint set with <n + m> outputs and <k>
+ * inputs, except that the first <n> outputs must use new registers.
*/
C_O0_I1(r)
C_O0_I2(r, r)
C_O2_I2(o, m, 0, r)
C_O2_I2(o, m, r, r)
C_O2_I3(o, m, 0, 1, r)
-C_O2_I4(r, r, 0, 1, rA, r)
-C_O2_I4(r, r, 0, 1, ri, r)
-C_O2_I4(r, r, 0, 1, r, r)
+C_N1_O1_I4(r, r, 0, 1, ri, r)
+C_N1_O1_I4(r, r, 0, 1, rA, r)
case INDEX_op_add2_i32:
case INDEX_op_sub2_i32:
- return C_O2_I4(r, r, 0, 1, ri, r);
+ return C_N1_O1_I4(r, r, 0, 1, ri, r);
case INDEX_op_add2_i64:
case INDEX_op_sub2_i64:
- return C_O2_I4(r, r, 0, 1, rA, r);
+ return C_N1_O1_I4(r, r, 0, 1, rA, r);
case INDEX_op_st_vec:
return C_O0_I2(v, r);
#else
# define WITH_ATOMIC64(X)
#endif
-#ifdef CONFIG_CMPXCHG128
+#if HAVE_CMPXCHG128
# define WITH_ATOMIC128(X) X,
#else
# define WITH_ATOMIC128(X)
#define C_O2_I2(O1, O2, I1, I2) C_PFX4(c_o2_i2_, O1, O2, I1, I2),
#define C_O2_I3(O1, O2, I1, I2, I3) C_PFX5(c_o2_i3_, O1, O2, I1, I2, I3),
#define C_O2_I4(O1, O2, I1, I2, I3, I4) C_PFX6(c_o2_i4_, O1, O2, I1, I2, I3, I4),
+#define C_N1_O1_I4(O1, O2, I1, I2, I3, I4) C_PFX6(c_n1_o1_i4_, O1, O2, I1, I2, I3, I4),
typedef enum {
#include "tcg-target-con-set.h"
#undef C_O2_I2
#undef C_O2_I3
#undef C_O2_I4
+#undef C_N1_O1_I4
/* Put all of the constraint sets into an array, indexed by the enum. */
#define C_O2_I2(O1, O2, I1, I2) { .args_ct_str = { #O1, #O2, #I1, #I2 } },
#define C_O2_I3(O1, O2, I1, I2, I3) { .args_ct_str = { #O1, #O2, #I1, #I2, #I3 } },
#define C_O2_I4(O1, O2, I1, I2, I3, I4) { .args_ct_str = { #O1, #O2, #I1, #I2, #I3, #I4 } },
+#define C_N1_O1_I4(O1, O2, I1, I2, I3, I4) { .args_ct_str = { "&" #O1, #O2, #I1, #I2, #I3, #I4 } },
static const TCGTargetOpDef constraint_sets[] = {
#include "tcg-target-con-set.h"
#undef C_O2_I2
#undef C_O2_I3
#undef C_O2_I4
+#undef C_N1_O1_I4
/* Expand the enumerator to be returned from tcg_target_op_def(). */
#define C_O2_I2(O1, O2, I1, I2) C_PFX4(c_o2_i2_, O1, O2, I1, I2)
#define C_O2_I3(O1, O2, I1, I2, I3) C_PFX5(c_o2_i3_, O1, O2, I1, I2, I3)
#define C_O2_I4(O1, O2, I1, I2, I3, I4) C_PFX6(c_o2_i4_, O1, O2, I1, I2, I3, I4)
+#define C_N1_O1_I4(O1, O2, I1, I2, I3, I4) C_PFX6(c_n1_o1_i4_, O1, O2, I1, I2, I3, I4)
#include "tcg-target.c.inc"
.ref_slot = cum->ref_slot + i,
};
}
- cum->info_in_idx += n;
+ cum->info_in_idx += n - 1; /* i=0 accounted for in layout_arg_1 */
cum->ref_slot += n;
}
* dead after the instruction, we must allocate a new
* register and move it.
*/
- if (temp_readonly(ts) || !IS_DEAD_ARG(i)) {
+ if (temp_readonly(ts) || !IS_DEAD_ARG(i)
+ || def->args_ct[arg_ct->alias_index].newreg) {
allocate_new_reg = true;
} else if (ts->val_type == TEMP_VAL_REG) {
/*
import os
-from avocado import skip
from avocado import skipUnless
from avocado.utils import archive
"sbsa-ref",
)
+ @skipUnless(os.getenv('QEMU_TEST_FLAKY_TESTS'), 'Test is not reliable')
def test_sbsaref_edk2_firmware(self):
"""
:avocado: tags=cpu:cortex-a57
# later. See the COPYING file in the top-level directory.
from avocado_qemu import QemuSystemTest
-from avocado import skip
from avocado_qemu import wait_for_console_pattern
class RiscvOpenSBI(QemuSystemTest):
wait_for_console_pattern(self, 'Platform Name')
wait_for_console_pattern(self, 'Boot HART MEDELEG')
- @skip("requires OpenSBI fix to work")
def test_riscv32_spike(self):
"""
:avocado: tags=arch:riscv32
numactl-dev \
openssh-client \
pcre-dev \
+ pipewire-dev \
pixman-dev \
pkgconf \
pulseaudio-dev \
openssh-clients \
pam-devel \
pcre-static \
+ pipewire-devel \
pixman-devel \
pkgconfig \
pulseaudio-libs-devel \
libnfs-dev:amd64 \
libnuma-dev:amd64 \
libpam0g-dev:amd64 \
+ libpipewire-0.3-dev:amd64 \
libpixman-1-dev:amd64 \
libpmem-dev:amd64 \
libpng-dev:amd64 \
libnuma-dev \
libpam0g-dev \
libpcre2-dev \
+ libpipewire-0.3-dev \
libpixman-1-dev \
libpmem-dev \
libpng-dev \
libnfs-dev:arm64 \
libnuma-dev:arm64 \
libpam0g-dev:arm64 \
+ libpipewire-0.3-dev:arm64 \
libpixman-1-dev:arm64 \
libpng-dev:arm64 \
libpulse-dev:arm64 \
libnfs-dev:armel \
libnuma-dev:armel \
libpam0g-dev:armel \
+ libpipewire-0.3-dev:armel \
libpixman-1-dev:armel \
libpng-dev:armel \
libpulse-dev:armel \
libnfs-dev:armhf \
libnuma-dev:armhf \
libpam0g-dev:armhf \
+ libpipewire-0.3-dev:armhf \
libpixman-1-dev:armhf \
libpng-dev:armhf \
libpulse-dev:armhf \
libnfs-dev:mips64el \
libnuma-dev:mips64el \
libpam0g-dev:mips64el \
+ libpipewire-0.3-dev:mips64el \
libpixman-1-dev:mips64el \
libpng-dev:mips64el \
libpulse-dev:mips64el \
libnfs-dev:mipsel \
libnuma-dev:mipsel \
libpam0g-dev:mipsel \
+ libpipewire-0.3-dev:mipsel \
libpixman-1-dev:mipsel \
libpng-dev:mipsel \
libpulse-dev:mipsel \
libnfs-dev:ppc64el \
libnuma-dev:ppc64el \
libpam0g-dev:ppc64el \
+ libpipewire-0.3-dev:ppc64el \
libpixman-1-dev:ppc64el \
libpng-dev:ppc64el \
libpulse-dev:ppc64el \
libnfs-dev:s390x \
libnuma-dev:s390x \
libpam0g-dev:s390x \
+ libpipewire-0.3-dev:s390x \
libpixman-1-dev:s390x \
libpng-dev:s390x \
libpulse-dev:s390x \
openssh-clients \
pam-devel \
pcre-static \
+ pipewire-devel \
pixman-devel \
pkgconfig \
pulseaudio-libs-devel \
openssh \
pam-devel \
pcre-devel-static \
+ pipewire-devel \
pkgconfig \
python39-base \
python39-pip \
libnuma-dev \
libpam0g-dev \
libpcre2-dev \
+ libpipewire-0.3-dev \
libpixman-1-dev \
libpmem-dev \
libpng-dev \
-Subproject commit b0f44f929a81c0a604fb7fbf8afc34d37ab0eae9
+Subproject commit 9bff3b763b5531a1490e238bfbf77306dc3a6dbb
- pam
- pcre-static
- pixman
+ - pipewire
- pkg-config
- pulseaudio
- python3
generate(filename, cmd, trailer)
+def generate_pkglist(vm, target):
+ filename = Path(src_dir, "tests", "vm", "generated", vm + ".json")
+ cmd = lcitool_cmd + ["variables", "--format", "json", target, "qemu"]
+ generate(filename, cmd, None)
+
+
# Netmap still needs to be manually built as it is yet to be packaged
# into a distro. We also add cscope and gtags which are used in the CI
# test
generate_cirrus("freebsd-13")
generate_cirrus("macos-12")
+ #
+ # VM packages lists
+ #
+ generate_pkglist("freebsd", "freebsd-13")
+
sys.exit(0)
except Exception as ex:
print(str(ex), file=sys.stderr)
" -device pci-testdev,bus=nohprp,acpi-index=501"
" -device pcie-root-port,id=nohprpint,port=0x0,chassis=3,hotplug=off,"
"multifunction=on,addr=8.0"
- " -device pci-testdev,bus=nohprpint,acpi-index=601,addr=8.1"
+ " -device pci-testdev,bus=nohprpint,acpi-index=601,addr=0.1"
" -device pcie-root-port,id=hprp2,port=0x0,chassis=4,bus=nohprpint,"
- "addr=9.0"
+ "addr=0.2"
" -device pci-testdev,bus=hprp2,acpi-index=602"
, &data);
free_test_data(&data);
test_override(args, "pc", expected);
}
-static void setup_pci_bridge(TestArgs *args, const char *id, const char *rootid)
+static void setup_pci_bridge(TestArgs *args, const char *id)
{
- char *root, *br;
- root = g_strdup_printf("-device pcie-root-port,id=%s", rootid);
- br = g_strdup_printf("-device pcie-pci-bridge,bus=%s,id=%s", rootid, id);
+ char *br;
+ br = g_strdup_printf("-device pcie-pci-bridge,bus=pcie.0,id=%s", id);
- args->argc = append_arg(args->argc, args->argv, ARGV_SIZE, root);
args->argc = append_arg(args->argc, args->argv, ARGV_SIZE, br);
}
add_drive_with_mbr(args, empty_mbr, 1);
add_drive_with_mbr(args, empty_mbr, 1);
add_drive_with_mbr(args, empty_mbr, 1);
- setup_pci_bridge(args, "pcie.0", "br");
- add_scsi_controller(args, "lsi53c895a", "br", 3);
+ setup_pci_bridge(args, "pcie-pci-br");
+ add_scsi_controller(args, "lsi53c895a", "pcie-pci-br", 3);
add_scsi_disk(args, 0, 0, 0, 0, 0, 10000, 120, 30);
add_scsi_disk(args, 1, 0, 0, 1, 0, 9000, 120, 30);
add_scsi_disk(args, 2, 0, 0, 2, 0, 1, 0, 0);
};
add_drive_with_mbr(args, empty_mbr, 1);
add_drive_with_mbr(args, empty_mbr, 1);
- setup_pci_bridge(args, "pcie.0", "br");
- add_virtio_disk(args, 0, "br", 3, 10000, 120, 30);
- add_virtio_disk(args, 1, "br", 4, 9000, 120, 30);
+ setup_pci_bridge(args, "pcie-pci-br");
+ add_virtio_disk(args, 0, "pcie-pci-br", 3, 10000, 120, 30);
+ add_virtio_disk(args, 1, "pcie-pci-br", 4, 9000, 120, 30);
test_override(args, "q35", expected);
}
migrate_allocator(&from->alloc, &to->alloc);
}
-bool have_qemu_img(void)
-{
- char *rpath;
- const char *path = getenv("QTEST_QEMU_IMG");
- if (!path) {
- return false;
- }
-
- rpath = realpath(path, NULL);
- if (!rpath) {
- return false;
- } else {
- free(rpath);
- return true;
- }
-}
-
-void mkimg(const char *file, const char *fmt, unsigned size_mb)
-{
- gchar *cli;
- bool ret;
- int rc;
- GError *err = NULL;
- char *qemu_img_path;
- gchar *out, *out2;
- char *qemu_img_abs_path;
-
- qemu_img_path = getenv("QTEST_QEMU_IMG");
- g_assert(qemu_img_path);
- qemu_img_abs_path = realpath(qemu_img_path, NULL);
- g_assert(qemu_img_abs_path);
-
- cli = g_strdup_printf("%s create -f %s %s %uM", qemu_img_abs_path,
- fmt, file, size_mb);
- ret = g_spawn_command_line_sync(cli, &out, &out2, &rc, &err);
- if (err || !g_spawn_check_exit_status(rc, &err)) {
- fprintf(stderr, "%s\n", err->message);
- g_error_free(err);
- }
- g_assert(ret && !err);
-
- g_free(out);
- g_free(out2);
- g_free(cli);
- free(qemu_img_abs_path);
-}
-
void mkqcow2(const char *file, unsigned size_mb)
{
- return mkimg(file, "qcow2", size_mb);
+ g_assert_true(mkimg(file, "qcow2", size_mb));
}
void prepare_blkdebug_script(const char *debug_fn, const char *event)
G_GNUC_PRINTF(2, 3);
void qtest_common_shutdown(QOSState *qs);
void qtest_shutdown(QOSState *qs);
-bool have_qemu_img(void);
-void mkimg(const char *file, const char *fmt, unsigned size_mb);
void mkqcow2(const char *file, unsigned size_mb);
void migrate(QOSState *from, QOSState *to, const char *uri);
void prepare_blkdebug_script(const char *debug_fn, const char *event);
'virtio-serial.c',
'virtio-iommu.c',
'virtio-gpio.c',
+ 'virtio-scmi.c',
'generic-pcihost.c',
# qgraph machines:
--- /dev/null
+/*
+ * virtio-scmi nodes for testing
+ *
+ * SPDX-FileCopyrightText: Linaro Ltd
+ * SPDX-FileCopyrightText: Red Hat, Inc.
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * Based on virtio-gpio.c, doing basically the same thing.
+ */
+
+#include "qemu/osdep.h"
+#include "standard-headers/linux/virtio_config.h"
+#include "../libqtest.h"
+#include "qemu/module.h"
+#include "qgraph.h"
+#include "virtio-scmi.h"
+
+static QGuestAllocator *alloc;
+
+static void virtio_scmi_cleanup(QVhostUserSCMI *scmi)
+{
+ QVirtioDevice *vdev = scmi->vdev;
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ qvirtqueue_cleanup(vdev->bus, scmi->queues[i], alloc);
+ }
+ g_free(scmi->queues);
+}
+
+/*
+ * This handles the VirtIO setup from the point of view of the driver
+ * frontend and therefore doesn't present any vhost specific features
+ * and in fact masks of the re-used bit.
+ */
+static void virtio_scmi_setup(QVhostUserSCMI *scmi)
+{
+ QVirtioDevice *vdev = scmi->vdev;
+ uint64_t features;
+ int i;
+
+ features = qvirtio_get_features(vdev);
+ features &= ~QVIRTIO_F_BAD_FEATURE;
+ qvirtio_set_features(vdev, features);
+
+ scmi->queues = g_new(QVirtQueue *, 2);
+ for (i = 0; i < 2; i++) {
+ scmi->queues[i] = qvirtqueue_setup(vdev, alloc, i);
+ }
+ qvirtio_set_driver_ok(vdev);
+}
+
+static void *qvirtio_scmi_get_driver(QVhostUserSCMI *v_scmi,
+ const char *interface)
+{
+ if (!g_strcmp0(interface, "vhost-user-scmi")) {
+ return v_scmi;
+ }
+ if (!g_strcmp0(interface, "virtio")) {
+ return v_scmi->vdev;
+ }
+
+ g_assert_not_reached();
+}
+
+static void *qvirtio_scmi_device_get_driver(void *object,
+ const char *interface)
+{
+ QVhostUserSCMIDevice *v_scmi = object;
+ return qvirtio_scmi_get_driver(&v_scmi->scmi, interface);
+}
+
+/* virtio-scmi (mmio) */
+static void qvirtio_scmi_device_destructor(QOSGraphObject *obj)
+{
+ QVhostUserSCMIDevice *scmi_dev = (QVhostUserSCMIDevice *) obj;
+ virtio_scmi_cleanup(&scmi_dev->scmi);
+}
+
+static void qvirtio_scmi_device_start_hw(QOSGraphObject *obj)
+{
+ QVhostUserSCMIDevice *scmi_dev = (QVhostUserSCMIDevice *) obj;
+ virtio_scmi_setup(&scmi_dev->scmi);
+}
+
+static void *virtio_scmi_device_create(void *virtio_dev,
+ QGuestAllocator *t_alloc,
+ void *addr)
+{
+ QVhostUserSCMIDevice *virtio_device = g_new0(QVhostUserSCMIDevice, 1);
+ QVhostUserSCMI *interface = &virtio_device->scmi;
+
+ interface->vdev = virtio_dev;
+ alloc = t_alloc;
+
+ virtio_device->obj.get_driver = qvirtio_scmi_device_get_driver;
+ virtio_device->obj.start_hw = qvirtio_scmi_device_start_hw;
+ virtio_device->obj.destructor = qvirtio_scmi_device_destructor;
+
+ return &virtio_device->obj;
+}
+
+/* virtio-scmi-pci */
+static void qvirtio_scmi_pci_destructor(QOSGraphObject *obj)
+{
+ QVhostUserSCMIPCI *scmi_pci = (QVhostUserSCMIPCI *) obj;
+ QOSGraphObject *pci_vobj = &scmi_pci->pci_vdev.obj;
+
+ virtio_scmi_cleanup(&scmi_pci->scmi);
+ qvirtio_pci_destructor(pci_vobj);
+}
+
+static void qvirtio_scmi_pci_start_hw(QOSGraphObject *obj)
+{
+ QVhostUserSCMIPCI *scmi_pci = (QVhostUserSCMIPCI *) obj;
+ QOSGraphObject *pci_vobj = &scmi_pci->pci_vdev.obj;
+
+ qvirtio_pci_start_hw(pci_vobj);
+ virtio_scmi_setup(&scmi_pci->scmi);
+}
+
+static void *qvirtio_scmi_pci_get_driver(void *object, const char *interface)
+{
+ QVhostUserSCMIPCI *v_scmi = object;
+
+ if (!g_strcmp0(interface, "pci-device")) {
+ return v_scmi->pci_vdev.pdev;
+ }
+ return qvirtio_scmi_get_driver(&v_scmi->scmi, interface);
+}
+
+static void *virtio_scmi_pci_create(void *pci_bus, QGuestAllocator *t_alloc,
+ void *addr)
+{
+ QVhostUserSCMIPCI *virtio_spci = g_new0(QVhostUserSCMIPCI, 1);
+ QVhostUserSCMI *interface = &virtio_spci->scmi;
+ QOSGraphObject *obj = &virtio_spci->pci_vdev.obj;
+
+ virtio_pci_init(&virtio_spci->pci_vdev, pci_bus, addr);
+ interface->vdev = &virtio_spci->pci_vdev.vdev;
+ alloc = t_alloc;
+
+ obj->get_driver = qvirtio_scmi_pci_get_driver;
+ obj->start_hw = qvirtio_scmi_pci_start_hw;
+ obj->destructor = qvirtio_scmi_pci_destructor;
+
+ return obj;
+}
+
+static void virtio_scmi_register_nodes(void)
+{
+ QPCIAddress addr = {
+ .devfn = QPCI_DEVFN(4, 0),
+ };
+
+ QOSGraphEdgeOptions edge_opts = { };
+
+ /* vhost-user-scmi-device */
+ edge_opts.extra_device_opts = "id=scmi,chardev=chr-vhost-user-test "
+ "-global virtio-mmio.force-legacy=false";
+ qos_node_create_driver("vhost-user-scmi-device",
+ virtio_scmi_device_create);
+ qos_node_consumes("vhost-user-scmi-device", "virtio-bus", &edge_opts);
+ qos_node_produces("vhost-user-scmi-device", "vhost-user-scmi");
+
+ /* virtio-scmi-pci */
+ edge_opts.extra_device_opts = "id=scmi,addr=04.0,chardev=chr-vhost-user-test";
+ add_qpci_address(&edge_opts, &addr);
+ qos_node_create_driver("vhost-user-scmi-pci", virtio_scmi_pci_create);
+ qos_node_consumes("vhost-user-scmi-pci", "pci-bus", &edge_opts);
+ qos_node_produces("vhost-user-scmi-pci", "vhost-user-scmi");
+}
+
+libqos_init(virtio_scmi_register_nodes);
--- /dev/null
+/*
+ * virtio-scmi structures
+ *
+ * SPDX-FileCopyrightText: Red Hat, Inc.
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef TESTS_LIBQOS_VIRTIO_SCMI_H
+#define TESTS_LIBQOS_VIRTIO_SCMI_H
+
+#include "qgraph.h"
+#include "virtio.h"
+#include "virtio-pci.h"
+
+typedef struct QVhostUserSCMI QVhostUserSCMI;
+typedef struct QVhostUserSCMIPCI QVhostUserSCMIPCI;
+typedef struct QVhostUserSCMIDevice QVhostUserSCMIDevice;
+
+struct QVhostUserSCMI {
+ QVirtioDevice *vdev;
+ QVirtQueue **queues;
+};
+
+struct QVhostUserSCMIPCI {
+ QVirtioPCIDevice pci_vdev;
+ QVhostUserSCMI scmi;
+};
+
+struct QVhostUserSCMIDevice {
+ QOSGraphObject obj;
+ QVhostUserSCMI scmi;
+};
+
+#endif
return b;
}
+
+bool have_qemu_img(void)
+{
+ char *rpath;
+ const char *path = getenv("QTEST_QEMU_IMG");
+ if (!path) {
+ return false;
+ }
+
+ rpath = realpath(path, NULL);
+ if (!rpath) {
+ return false;
+ } else {
+ free(rpath);
+ return true;
+ }
+}
+
+bool mkimg(const char *file, const char *fmt, unsigned size_mb)
+{
+ gchar *cli;
+ bool ret;
+ int rc;
+ GError *err = NULL;
+ char *qemu_img_path;
+ gchar *out, *out2;
+ char *qemu_img_abs_path;
+
+ qemu_img_path = getenv("QTEST_QEMU_IMG");
+ if (!qemu_img_path) {
+ return false;
+ }
+ qemu_img_abs_path = realpath(qemu_img_path, NULL);
+ if (!qemu_img_abs_path) {
+ return false;
+ }
+
+ cli = g_strdup_printf("%s create -f %s %s %uM", qemu_img_abs_path,
+ fmt, file, size_mb);
+ ret = g_spawn_command_line_sync(cli, &out, &out2, &rc, &err);
+ if (err || !g_spawn_check_exit_status(rc, &err)) {
+ fprintf(stderr, "%s\n", err->message);
+ g_error_free(err);
+ }
+
+ g_free(out);
+ g_free(out2);
+ g_free(cli);
+ free(qemu_img_abs_path);
+
+ return ret && !err;
+}
*/
pid_t qtest_pid(QTestState *s);
+/**
+ * have_qemu_img:
+ *
+ * Returns: true if "qemu-img" is available.
+ */
+bool have_qemu_img(void);
+
+/**
+ * mkimg:
+ * @file: File name of the image that should be created
+ * @fmt: Format, e.g. "qcow2" or "raw"
+ * @size_mb: Size of the image in megabytes
+ *
+ * Create a disk image with qemu-img. Note that the QTEST_QEMU_IMG
+ * environment variable must point to the qemu-img file.
+ *
+ * Returns: true if the image has been created successfully.
+ */
+bool mkimg(const char *file, const char *fmt, unsigned size_mb);
+
#endif
'cpu-plug-test',
'migration-test']
+qtests_riscv32 = \
+ (config_all_devices.has_key('CONFIG_SIFIVE_E_AON') ? ['sifive-e-aon-watchdog-test'] : [])
+
qos_test_ss = ss.source_set()
qos_test_ss.add(
'ac97-test.c',
static bool got_src_stop;
static bool got_dst_resume;
+/*
+ * An initial 3 MB offset is used as that corresponds
+ * to ~1 sec of data transfer with our bandwidth setting.
+ */
+#define MAGIC_OFFSET_BASE (3 * 1024 * 1024)
+/*
+ * A further 1k is added to ensure we're not a multiple
+ * of TEST_MEM_PAGE_SIZE, thus avoid clash with writes
+ * from the migration guest workload.
+ */
+#define MAGIC_OFFSET_SHUFFLE 1024
+#define MAGIC_OFFSET (MAGIC_OFFSET_BASE + MAGIC_OFFSET_SHUFFLE)
+#define MAGIC_MARKER 0xFEED12345678CAFEULL
+
/*
* Dirtylimit stop working if dirty page rate error
* value less than DIRTYLIMIT_TOLERANCE_RANGE
migrate_set_parameter_int(who, "downtime-limit", 30 * 1000);
}
+/*
+ * Our goal is to ensure that we run a single full migration
+ * iteration, and also dirty memory, ensuring that at least
+ * one further iteration is required.
+ *
+ * We can't directly synchronize with the start of a migration
+ * so we have to apply some tricks monitoring memory that is
+ * transferred.
+ *
+ * Initially we set the migration bandwidth to an insanely
+ * low value, with tiny max downtime too. This basically
+ * guarantees migration will never complete.
+ *
+ * This will result in a test that is unacceptably slow though,
+ * so we can't let the entire migration pass run at this speed.
+ * Our intent is to let it run just long enough that we can
+ * prove data prior to the marker has been transferred *AND*
+ * also prove this transferred data is dirty again.
+ *
+ * Before migration starts, we write a 64-bit magic marker
+ * into a fixed location in the src VM RAM.
+ *
+ * Then watch dst memory until the marker appears. This is
+ * proof that start_address -> MAGIC_OFFSET_BASE has been
+ * transferred.
+ *
+ * Finally we go back to the source and read a byte just
+ * before the marker untill we see it flip in value. This
+ * is proof that start_address -> MAGIC_OFFSET_BASE
+ * is now dirty again.
+ *
+ * IOW, we're guaranteed at least a 2nd migration pass
+ * at this point.
+ *
+ * We can now let migration run at full speed to finish
+ * the test
+ */
+static void migrate_prepare_for_dirty_mem(QTestState *from)
+{
+ /*
+ * The guest workflow iterates from start_address to
+ * end_address, writing 1 byte every TEST_MEM_PAGE_SIZE
+ * bytes.
+ *
+ * IOW, if we write to mem at a point which is NOT
+ * a multiple of TEST_MEM_PAGE_SIZE, our write won't
+ * conflict with the migration workflow.
+ *
+ * We put in a marker here, that we'll use to determine
+ * when the data has been transferred to the dst.
+ */
+ qtest_writeq(from, start_address + MAGIC_OFFSET, MAGIC_MARKER);
+}
+
+static void migrate_wait_for_dirty_mem(QTestState *from,
+ QTestState *to)
+{
+ uint64_t watch_address = start_address + MAGIC_OFFSET_BASE;
+ uint64_t marker_address = start_address + MAGIC_OFFSET;
+ uint8_t watch_byte;
+
+ /*
+ * Wait for the MAGIC_MARKER to get transferred, as an
+ * indicator that a migration pass has made some known
+ * amount of progress.
+ */
+ do {
+ usleep(1000 * 10);
+ } while (qtest_readq(to, marker_address) != MAGIC_MARKER);
+
+ /*
+ * Now ensure that already transferred bytes are
+ * dirty again from the guest workload. Note the
+ * guest byte value will wrap around and by chance
+ * match the original watch_byte. This is harmless
+ * as we'll eventually see a different value if we
+ * keep watching
+ */
+ watch_byte = qtest_readb(from, watch_address);
+ do {
+ usleep(1000 * 10);
+ } while (qtest_readb(from, watch_address) == watch_byte);
+}
+
+
static void migrate_pause(QTestState *who)
{
qtest_qmp_assert_success(who, "{ 'execute': 'migrate-pause' }");
MIG_TEST_FAIL_DEST_QUIT_ERR,
} result;
- /* Optional: set number of migration passes to wait for, if live==true */
+ /*
+ * Optional: set number of migration passes to wait for, if live==true.
+ * If zero, then merely wait for a few MB of dirty data
+ */
unsigned int iterations;
/*
migrate_ensure_non_converge(from);
+ migrate_prepare_for_dirty_mem(from);
+
/* Wait for the first serial output from the source */
wait_for_serial("src_serial");
migrate_qmp(from, uri, "{}");
- wait_for_migration_pass(from);
+ migrate_wait_for_dirty_mem(from, to);
*from_ptr = from;
*to_ptr = to;
}
if (args->live) {
- /*
- * Testing live migration, we want to ensure that some
- * memory is re-dirtied after being transferred, so that
- * we exercise logic for dirty page handling. We achieve
- * this with a ridiculosly low bandwidth that guarantees
- * non-convergance.
- */
migrate_ensure_non_converge(from);
+ migrate_prepare_for_dirty_mem(from);
} else {
/*
* Testing non-live migration, we allow it to run at
}
} else {
if (args->live) {
- if (args->iterations) {
- while (args->iterations--) {
- wait_for_migration_pass(from);
- }
- } else {
+ /*
+ * For initial iteration(s) we must do a full pass,
+ * but for the final iteration, we need only wait
+ * for some dirty mem before switching to converge
+ */
+ while (args->iterations > 1) {
wait_for_migration_pass(from);
+ args->iterations--;
}
+ migrate_wait_for_dirty_mem(from, to);
migrate_ensure_converge(from);
return;
}
+ migrate_ensure_non_converge(from);
+ migrate_prepare_for_dirty_mem(from);
+
migrate_set_capability(from, "x-ignore-shared", true);
migrate_set_capability(to, "x-ignore-shared", true);
migrate_qmp(from, uri, "{}");
- wait_for_migration_pass(from);
+ migrate_wait_for_dirty_mem(from, to);
if (!got_src_stop) {
qtest_qmp_eventwait(from, "STOP");
}
migrate_ensure_non_converge(from);
+ migrate_prepare_for_dirty_mem(from);
migrate_set_parameter_int(from, "multifd-channels", 16);
migrate_set_parameter_int(to, "multifd-channels", 16);
migrate_qmp(from, uri, "{}");
- wait_for_migration_pass(from);
+ migrate_wait_for_dirty_mem(from, to);
migrate_cancel(from);
wait_for_migration_status(from, "cancelled", NULL);
- migrate_ensure_converge(from);
+ migrate_ensure_non_converge(from);
migrate_qmp(from, uri, "{}");
- wait_for_migration_pass(from);
+ migrate_wait_for_dirty_mem(from, to2);
+
+ migrate_ensure_converge(from);
if (!got_src_stop) {
qtest_qmp_eventwait(from, "STOP");
return qts;
}
-static void test_x86_memdev_resp(QObject *res)
+static void test_x86_memdev_resp(QObject *res, const char *mem_id, int size)
{
Visitor *v;
g_autoptr(MemdevList) memdevs = NULL;
g_assert(!memdevs->next);
memdev = memdevs->value;
- g_assert_cmpstr(memdev->id, ==, "ram");
- g_assert_cmpint(memdev->size, ==, 200 * MiB);
+ g_assert_cmpstr(memdev->id, ==, mem_id);
+ g_assert_cmpint(memdev->size, ==, size * MiB);
visit_free(v);
}
qts = qtest_init_with_config(cfgdata);
/* Test valid command */
resp = qtest_qmp(qts, "{ 'execute': 'query-memdev' }");
- test_x86_memdev_resp(qdict_get(resp, "return"));
+ test_x86_memdev_resp(qdict_get(resp, "return"), "ram", 200);
qobject_unref(resp);
qtest_quit(qts);
qtest_quit(qts);
}
+#if defined(CONFIG_POSIX) && defined(CONFIG_SLIRP)
+
+static char *make_temp_img(const char *template, const char *format, int size)
+{
+ GError *error = NULL;
+ char *temp_name;
+ int fd;
+
+ /* Create a temporary image names */
+ fd = g_file_open_tmp(template, &temp_name, &error);
+ if (fd == -1) {
+ fprintf(stderr, "unable to create file: %s\n", error->message);
+ g_error_free(error);
+ return NULL;
+ }
+ close(fd);
+
+ if (!mkimg(temp_name, format, size)) {
+ fprintf(stderr, "qemu-img failed to create %s\n", temp_name);
+ g_free(temp_name);
+ return NULL;
+ }
+
+ return temp_name;
+}
+
+struct device {
+ const char *name;
+ const char *type;
+};
+
+static void test_docs_q35(const char *input_file, struct device *devices)
+{
+ QTestState *qts;
+ QDict *resp;
+ QObject *qobj;
+ int ret, i;
+ g_autofree char *cfg_file = NULL, *sedcmd = NULL;
+ g_autofree char *hd_file = NULL, *cd_file = NULL;
+
+ /* Check that all the devices are available in the QEMU binary */
+ for (i = 0; devices[i].name; i++) {
+ if (!qtest_has_device(devices[i].type)) {
+ g_test_skip("one of the required devices is not available");
+ return;
+ }
+ }
+
+ hd_file = make_temp_img("qtest_disk_XXXXXX.qcow2", "qcow2", 1);
+ cd_file = make_temp_img("qtest_cdrom_XXXXXX.iso", "raw", 1);
+ if (!hd_file || !cd_file) {
+ g_test_skip("could not create disk images");
+ goto cleanup;
+ }
+
+ /* Create a temporary config file where we replace the disk image names */
+ ret = g_file_open_tmp("q35-emulated-XXXXXX.cfg", &cfg_file, NULL);
+ if (ret == -1) {
+ g_test_skip("could not create temporary config file");
+ goto cleanup;
+ }
+ close(ret);
+
+ sedcmd = g_strdup_printf("sed -e 's,guest.qcow2,%s,' -e 's,install.iso,%s,'"
+ " %s %s > '%s'",
+ hd_file, cd_file,
+ !qtest_has_accel("kvm") ? "-e '/accel/d'" : "",
+ input_file, cfg_file);
+ ret = system(sedcmd);
+ if (ret) {
+ g_test_skip("could not modify temporary config file");
+ goto cleanup;
+ }
+
+ qts = qtest_initf("-machine none -nodefaults -readconfig %s", cfg_file);
+
+ /* Check memory size */
+ resp = qtest_qmp(qts, "{ 'execute': 'query-memdev' }");
+ test_x86_memdev_resp(qdict_get(resp, "return"), "pc.ram", 1024);
+ qobject_unref(resp);
+
+ resp = qtest_qmp(qts, "{ 'execute': 'qom-list',"
+ " 'arguments': {'path': '/machine/peripheral' }}");
+ qobj = qdict_get(resp, "return");
+
+ /* Check that all the devices have been created */
+ for (i = 0; devices[i].name; i++) {
+ test_object_available(qobj, devices[i].name, devices[i].type);
+ }
+
+ qobject_unref(resp);
+
+ qtest_quit(qts);
+
+cleanup:
+ if (hd_file) {
+ unlink(hd_file);
+ }
+ if (cd_file) {
+ unlink(cd_file);
+ }
+ if (cfg_file) {
+ unlink(cfg_file);
+ }
+}
+
+static void test_docs_q35_emulated(void)
+{
+ struct device devices[] = {
+ { "ich9-pcie-port-1", "ioh3420" },
+ { "ich9-pcie-port-2", "ioh3420" },
+ { "ich9-pcie-port-3", "ioh3420" },
+ { "ich9-pcie-port-4", "ioh3420" },
+ { "ich9-pci-bridge", "i82801b11-bridge" },
+ { "ich9-ehci-1", "ich9-usb-ehci1" },
+ { "ich9-ehci-2", "ich9-usb-ehci2" },
+ { "ich9-uhci-1", "ich9-usb-uhci1" },
+ { "ich9-uhci-2", "ich9-usb-uhci2" },
+ { "ich9-uhci-3", "ich9-usb-uhci3" },
+ { "ich9-uhci-4", "ich9-usb-uhci4" },
+ { "ich9-uhci-5", "ich9-usb-uhci5" },
+ { "ich9-uhci-6", "ich9-usb-uhci6" },
+ { "sata-disk", "ide-hd" },
+ { "sata-optical-disk", "ide-cd" },
+ { "net", "e1000" },
+ { "video", "VGA" },
+ { "ich9-hda-audio", "ich9-intel-hda" },
+ { "ich9-hda-duplex", "hda-duplex" },
+ { NULL, NULL }
+ };
+
+ test_docs_q35("docs/config/q35-emulated.cfg", devices);
+}
+
+static void test_docs_q35_virtio_graphical(void)
+{
+ struct device devices[] = {
+ { "pcie.1", "pcie-root-port" },
+ { "pcie.2", "pcie-root-port" },
+ { "pcie.3", "pcie-root-port" },
+ { "pcie.4", "pcie-root-port" },
+ { "pcie.5", "pcie-root-port" },
+ { "pcie.6", "pcie-root-port" },
+ { "pcie.7", "pcie-root-port" },
+ { "pcie.8", "pcie-root-port" },
+ { "scsi", "virtio-scsi-pci" },
+ { "scsi-disk", "scsi-hd" },
+ { "scsi-optical-disk", "scsi-cd" },
+ { "net", "virtio-net-pci" },
+ { "usb", "nec-usb-xhci" },
+ { "tablet", "usb-tablet" },
+ { "video", "qxl-vga" },
+ { "sound", "ich9-intel-hda" },
+ { "duplex", "hda-duplex" },
+ { NULL, NULL }
+ };
+
+ test_docs_q35("docs/config/q35-virtio-graphical.cfg", devices);
+}
+
+static void test_docs_q35_virtio_serial(void)
+{
+ struct device devices[] = {
+ { "pcie.1", "pcie-root-port" },
+ { "pcie.2", "pcie-root-port" },
+ { "pcie.3", "pcie-root-port" },
+ { "pcie.4", "pcie-root-port" },
+ { "pcie.5", "pcie-root-port" },
+ { "pcie.6", "pcie-root-port" },
+ { "pcie.7", "pcie-root-port" },
+ { "pcie.8", "pcie-root-port" },
+ { "scsi", "virtio-scsi-pci" },
+ { "scsi-disk", "scsi-hd" },
+ { "scsi-optical-disk", "scsi-cd" },
+ { "net", "virtio-net-pci" },
+ { NULL, NULL }
+ };
+
+ test_docs_q35("docs/config/q35-virtio-serial.cfg", devices);
+}
+
+#endif /* CONFIG_LINUX */
+
int main(int argc, char *argv[])
{
const char *arch;
qtest_has_device("ich9-usb-uhci1")) {
qtest_add_func("readconfig/x86/ich9-ehci-uhci", test_docs_config_ich9);
}
+#if defined(CONFIG_POSIX) && defined(CONFIG_SLIRP)
+ qtest_add_func("readconfig/x86/q35-emulated", test_docs_q35_emulated);
+ qtest_add_func("readconfig/x86/q35-virtio-graphical",
+ test_docs_q35_virtio_graphical);
+ if (g_test_slow()) {
+ /*
+ * q35-virtio-serial.cfg is a subset of q35-virtio-graphical.cfg,
+ * so we can skip the test in quick mode
+ */
+ qtest_add_func("readconfig/x86/q35-virtio-serial",
+ test_docs_q35_virtio_serial);
+ }
+#endif
}
#if defined(CONFIG_SPICE) && !defined(__FreeBSD__)
qtest_add_func("readconfig/spice", test_spice);
--- /dev/null
+/*
+ * QTest testcase for the watchdog timer of HiFive 1 rev b.
+ *
+ * Copyright (c) 2023 SiFive, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/timer.h"
+#include "qemu/bitops.h"
+#include "libqtest.h"
+#include "hw/registerfields.h"
+#include "hw/misc/sifive_e_aon.h"
+
+FIELD(AON_WDT_WDOGCFG, SCALE, 0, 4)
+FIELD(AON_WDT_WDOGCFG, RSVD0, 4, 4)
+FIELD(AON_WDT_WDOGCFG, RSTEN, 8, 1)
+FIELD(AON_WDT_WDOGCFG, ZEROCMP, 9, 1)
+FIELD(AON_WDT_WDOGCFG, RSVD1, 10, 2)
+FIELD(AON_WDT_WDOGCFG, EN_ALWAYS, 12, 1)
+FIELD(AON_WDT_WDOGCFG, EN_CORE_AWAKE, 13, 1)
+FIELD(AON_WDT_WDOGCFG, RSVD2, 14, 14)
+FIELD(AON_WDT_WDOGCFG, IP0, 28, 1)
+FIELD(AON_WDT_WDOGCFG, RSVD3, 29, 3)
+
+#define WDOG_BASE (0x10000000)
+#define WDOGCFG (0x0)
+#define WDOGCOUNT (0x8)
+#define WDOGS (0x10)
+#define WDOGFEED (0x18)
+#define WDOGKEY (0x1c)
+#define WDOGCMP0 (0x20)
+
+#define SIFIVE_E_AON_WDOGKEY (0x51F15E)
+#define SIFIVE_E_AON_WDOGFEED (0xD09F00D)
+#define SIFIVE_E_LFCLK_DEFAULT_FREQ (32768)
+
+static void test_init(QTestState *qts)
+{
+ qtest_writel(qts, WDOG_BASE + WDOGKEY, SIFIVE_E_AON_WDOGKEY);
+ qtest_writel(qts, WDOG_BASE + WDOGCOUNT, 0);
+
+ qtest_writel(qts, WDOG_BASE + WDOGKEY, SIFIVE_E_AON_WDOGKEY);
+ qtest_writel(qts, WDOG_BASE + WDOGCFG, 0);
+
+ qtest_writel(qts, WDOG_BASE + WDOGKEY, SIFIVE_E_AON_WDOGKEY);
+ qtest_writel(qts, WDOG_BASE + WDOGCMP0, 0xBEEF);
+}
+
+static void test_wdogcount(void)
+{
+ uint64_t tmp;
+ QTestState *qts = qtest_init("-machine sifive_e");
+
+ test_init(qts);
+
+ tmp = qtest_readl(qts, WDOG_BASE + WDOGCOUNT);
+ qtest_writel(qts, WDOG_BASE + WDOGCOUNT, 0xBEEF);
+ g_assert(qtest_readl(qts, WDOG_BASE + WDOGCOUNT) == tmp);
+
+ qtest_writel(qts, WDOG_BASE + WDOGKEY, SIFIVE_E_AON_WDOGKEY);
+ qtest_writel(qts, WDOG_BASE + WDOGCOUNT, 0xBEEF);
+ g_assert(0xBEEF == qtest_readl(qts, WDOG_BASE + WDOGCOUNT));
+
+ qtest_writel(qts, WDOG_BASE + WDOGKEY, SIFIVE_E_AON_WDOGKEY);
+ qtest_writel(qts, WDOG_BASE + WDOGCOUNT, 0xAAAAAAAA);
+ g_assert(0x2AAAAAAA == qtest_readl(qts, WDOG_BASE + WDOGCOUNT));
+
+ qtest_writel(qts, WDOG_BASE + WDOGKEY, SIFIVE_E_AON_WDOGKEY);
+ qtest_writel(qts, WDOG_BASE + WDOGFEED, 0xAAAAAAAA);
+ g_assert(0x2AAAAAAA == qtest_readl(qts, WDOG_BASE + WDOGCOUNT));
+
+ qtest_writel(qts, WDOG_BASE + WDOGKEY, SIFIVE_E_AON_WDOGKEY);
+ qtest_writel(qts, WDOG_BASE + WDOGFEED, SIFIVE_E_AON_WDOGFEED);
+ g_assert(0 == qtest_readl(qts, WDOG_BASE + WDOGCOUNT));
+
+ qtest_quit(qts);
+}
+
+static void test_wdogcfg(void)
+{
+ uint32_t tmp_cfg;
+ QTestState *qts = qtest_init("-machine sifive_e");
+
+ test_init(qts);
+
+ tmp_cfg = qtest_readl(qts, WDOG_BASE + WDOGCFG);
+ qtest_writel(qts, WDOG_BASE + WDOGCFG, 0xFFFFFFFF);
+ g_assert(qtest_readl(qts, WDOG_BASE + WDOGCFG) == tmp_cfg);
+
+ qtest_writel(qts, WDOG_BASE + WDOGKEY, SIFIVE_E_AON_WDOGKEY);
+ qtest_writel(qts, WDOG_BASE + WDOGCFG, 0xFFFFFFFF);
+ g_assert(0xFFFFFFFF == qtest_readl(qts, WDOG_BASE + WDOGCFG));
+
+ tmp_cfg = qtest_readl(qts, WDOG_BASE + WDOGCFG);
+ g_assert(15 == FIELD_EX32(tmp_cfg, AON_WDT_WDOGCFG, SCALE));
+ g_assert(1 == FIELD_EX32(tmp_cfg, AON_WDT_WDOGCFG, RSTEN));
+ g_assert(1 == FIELD_EX32(tmp_cfg, AON_WDT_WDOGCFG, ZEROCMP));
+ g_assert(1 == FIELD_EX32(tmp_cfg, AON_WDT_WDOGCFG, EN_ALWAYS));
+ g_assert(1 == FIELD_EX32(tmp_cfg, AON_WDT_WDOGCFG, EN_CORE_AWAKE));
+ g_assert(1 == FIELD_EX32(tmp_cfg, AON_WDT_WDOGCFG, IP0));
+
+ qtest_writel(qts, WDOG_BASE + WDOGKEY, SIFIVE_E_AON_WDOGKEY);
+ qtest_writel(qts, WDOG_BASE + WDOGCFG, 0);
+ tmp_cfg = qtest_readl(qts, WDOG_BASE + WDOGCFG);
+ g_assert(0 == FIELD_EX32(tmp_cfg, AON_WDT_WDOGCFG, SCALE));
+ g_assert(0 == FIELD_EX32(tmp_cfg, AON_WDT_WDOGCFG, RSTEN));
+ g_assert(0 == FIELD_EX32(tmp_cfg, AON_WDT_WDOGCFG, ZEROCMP));
+ g_assert(0 == FIELD_EX32(tmp_cfg, AON_WDT_WDOGCFG, EN_ALWAYS));
+ g_assert(0 == FIELD_EX32(tmp_cfg, AON_WDT_WDOGCFG, EN_CORE_AWAKE));
+ g_assert(0 == FIELD_EX32(tmp_cfg, AON_WDT_WDOGCFG, IP0));
+ g_assert(0 == qtest_readl(qts, WDOG_BASE + WDOGCFG));
+
+ qtest_quit(qts);
+}
+
+static void test_wdogcmp0(void)
+{
+ uint32_t tmp;
+ QTestState *qts = qtest_init("-machine sifive_e");
+
+ test_init(qts);
+
+ tmp = qtest_readl(qts, WDOG_BASE + WDOGCMP0);
+ qtest_writel(qts, WDOG_BASE + WDOGCMP0, 0xBEEF);
+ g_assert(qtest_readl(qts, WDOG_BASE + WDOGCMP0) == tmp);
+
+ qtest_writel(qts, WDOG_BASE + WDOGKEY, SIFIVE_E_AON_WDOGKEY);
+ qtest_writel(qts, WDOG_BASE + WDOGCMP0, 0xBEEF);
+ g_assert(0xBEEF == qtest_readl(qts, WDOG_BASE + WDOGCMP0));
+
+ qtest_quit(qts);
+}
+
+static void test_wdogkey(void)
+{
+ QTestState *qts = qtest_init("-machine sifive_e");
+
+ test_init(qts);
+
+ g_assert(0 == qtest_readl(qts, WDOG_BASE + WDOGKEY));
+
+ qtest_writel(qts, WDOG_BASE + WDOGKEY, 0xFFFF);
+ g_assert(0 == qtest_readl(qts, WDOG_BASE + WDOGKEY));
+
+ qtest_writel(qts, WDOG_BASE + WDOGKEY, SIFIVE_E_AON_WDOGKEY);
+ g_assert(1 == qtest_readl(qts, WDOG_BASE + WDOGKEY));
+
+ qtest_writel(qts, WDOG_BASE + WDOGFEED, 0xAAAAAAAA);
+ g_assert(0 == qtest_readl(qts, WDOG_BASE + WDOGKEY));
+
+ qtest_quit(qts);
+}
+
+static void test_wdogfeed(void)
+{
+ QTestState *qts = qtest_init("-machine sifive_e");
+
+ test_init(qts);
+
+ g_assert(0 == qtest_readl(qts, WDOG_BASE + WDOGFEED));
+
+ qtest_writel(qts, WDOG_BASE + WDOGFEED, 0xFFFF);
+ g_assert(0 == qtest_readl(qts, WDOG_BASE + WDOGFEED));
+
+ qtest_quit(qts);
+}
+
+static void test_scaled_wdogs(void)
+{
+ uint32_t cfg;
+ uint32_t fake_count = 0x12345678;
+ QTestState *qts = qtest_init("-machine sifive_e");
+
+ test_init(qts);
+
+ qtest_writel(qts, WDOG_BASE + WDOGKEY, SIFIVE_E_AON_WDOGKEY);
+ qtest_writel(qts, WDOG_BASE + WDOGCOUNT, fake_count);
+ g_assert(qtest_readl(qts, WDOG_BASE + WDOGCOUNT) == fake_count);
+ g_assert((uint16_t)qtest_readl(qts, WDOG_BASE + WDOGS) ==
+ (uint16_t)fake_count);
+
+ for (int i = 0; i < 16; i++) {
+ cfg = qtest_readl(qts, WDOG_BASE + WDOGCFG);
+ cfg = FIELD_DP32(cfg, AON_WDT_WDOGCFG, SCALE, i);
+ qtest_writel(qts, WDOG_BASE + WDOGKEY, SIFIVE_E_AON_WDOGKEY);
+ qtest_writel(qts, WDOG_BASE + WDOGCFG, cfg);
+ g_assert((uint16_t)qtest_readl(qts, WDOG_BASE + WDOGS) ==
+ (uint16_t)(fake_count >>
+ FIELD_EX32(cfg, AON_WDT_WDOGCFG, SCALE)));
+ }
+
+ qtest_quit(qts);
+}
+
+static void test_watchdog(void)
+{
+ uint32_t cfg;
+ QTestState *qts = qtest_init("-machine sifive_e");
+
+ test_init(qts);
+
+ qtest_writel(qts, WDOG_BASE + WDOGKEY, SIFIVE_E_AON_WDOGKEY);
+ qtest_writel(qts, WDOG_BASE + WDOGCMP0, SIFIVE_E_LFCLK_DEFAULT_FREQ);
+
+ cfg = qtest_readl(qts, WDOG_BASE + WDOGCFG);
+ cfg = FIELD_DP32(cfg, AON_WDT_WDOGCFG, SCALE, 0);
+ cfg = FIELD_DP32(cfg, AON_WDT_WDOGCFG, EN_ALWAYS, 1);
+ qtest_writel(qts, WDOG_BASE + WDOGKEY, SIFIVE_E_AON_WDOGKEY);
+ qtest_writel(qts, WDOG_BASE + WDOGCFG, cfg);
+
+ qtest_clock_step(qts, NANOSECONDS_PER_SECOND);
+
+ g_assert(qtest_readl(qts, WDOG_BASE + WDOGCOUNT) ==
+ SIFIVE_E_LFCLK_DEFAULT_FREQ);
+ g_assert(qtest_readl(qts, WDOG_BASE + WDOGS) ==
+ SIFIVE_E_LFCLK_DEFAULT_FREQ);
+
+ cfg = qtest_readl(qts, WDOG_BASE + WDOGCFG);
+ g_assert(0 == FIELD_EX32(cfg, AON_WDT_WDOGCFG, SCALE));
+ g_assert(0 == FIELD_EX32(cfg, AON_WDT_WDOGCFG, RSTEN));
+ g_assert(0 == FIELD_EX32(cfg, AON_WDT_WDOGCFG, ZEROCMP));
+ g_assert(1 == FIELD_EX32(cfg, AON_WDT_WDOGCFG, EN_ALWAYS));
+ g_assert(0 == FIELD_EX32(cfg, AON_WDT_WDOGCFG, EN_CORE_AWAKE));
+ g_assert(1 == FIELD_EX32(cfg, AON_WDT_WDOGCFG, IP0));
+
+ qtest_writel(qts, WDOG_BASE + WDOGKEY, SIFIVE_E_AON_WDOGKEY);
+ qtest_writel(qts, WDOG_BASE + WDOGCOUNT, 0);
+ cfg = FIELD_DP32(cfg, AON_WDT_WDOGCFG, IP0, 0);
+ qtest_writel(qts, WDOG_BASE + WDOGKEY, SIFIVE_E_AON_WDOGKEY);
+ qtest_writel(qts, WDOG_BASE + WDOGCFG, cfg);
+ cfg = qtest_readl(qts, WDOG_BASE + WDOGCFG);
+ g_assert(0 == FIELD_EX32(cfg, AON_WDT_WDOGCFG, IP0));
+
+ qtest_quit(qts);
+}
+
+static void test_scaled_watchdog(void)
+{
+ uint32_t cfg;
+ QTestState *qts = qtest_init("-machine sifive_e");
+
+ test_init(qts);
+
+ qtest_writel(qts, WDOG_BASE + WDOGKEY, SIFIVE_E_AON_WDOGKEY);
+ qtest_writel(qts, WDOG_BASE + WDOGCMP0, 10);
+
+ cfg = qtest_readl(qts, WDOG_BASE + WDOGCFG);
+ cfg = FIELD_DP32(cfg, AON_WDT_WDOGCFG, SCALE, 15);
+ cfg = FIELD_DP32(cfg, AON_WDT_WDOGCFG, EN_ALWAYS, 1);
+ qtest_writel(qts, WDOG_BASE + WDOGKEY, SIFIVE_E_AON_WDOGKEY);
+ qtest_writel(qts, WDOG_BASE + WDOGCFG, cfg);
+
+ qtest_clock_step(qts, NANOSECONDS_PER_SECOND * 10);
+
+ g_assert(qtest_readl(qts, WDOG_BASE + WDOGCOUNT) ==
+ SIFIVE_E_LFCLK_DEFAULT_FREQ * 10);
+
+ g_assert(10 == qtest_readl(qts, WDOG_BASE + WDOGS));
+
+ cfg = qtest_readl(qts, WDOG_BASE + WDOGCFG);
+ g_assert(15 == FIELD_EX32(cfg, AON_WDT_WDOGCFG, SCALE));
+ g_assert(0 == FIELD_EX32(cfg, AON_WDT_WDOGCFG, RSTEN));
+ g_assert(0 == FIELD_EX32(cfg, AON_WDT_WDOGCFG, ZEROCMP));
+ g_assert(1 == FIELD_EX32(cfg, AON_WDT_WDOGCFG, EN_ALWAYS));
+ g_assert(0 == FIELD_EX32(cfg, AON_WDT_WDOGCFG, EN_CORE_AWAKE));
+ g_assert(1 == FIELD_EX32(cfg, AON_WDT_WDOGCFG, IP0));
+
+ qtest_writel(qts, WDOG_BASE + WDOGKEY, SIFIVE_E_AON_WDOGKEY);
+ qtest_writel(qts, WDOG_BASE + WDOGCOUNT, 0);
+ cfg = FIELD_DP32(cfg, AON_WDT_WDOGCFG, IP0, 0);
+ qtest_writel(qts, WDOG_BASE + WDOGKEY, SIFIVE_E_AON_WDOGKEY);
+ qtest_writel(qts, WDOG_BASE + WDOGCFG, cfg);
+ cfg = qtest_readl(qts, WDOG_BASE + WDOGCFG);
+ g_assert(0 == FIELD_EX32(cfg, AON_WDT_WDOGCFG, IP0));
+
+ qtest_quit(qts);
+}
+
+static void test_periodic_int(void)
+{
+ uint32_t cfg;
+ QTestState *qts = qtest_init("-machine sifive_e");
+
+ test_init(qts);
+
+ qtest_writel(qts, WDOG_BASE + WDOGKEY, SIFIVE_E_AON_WDOGKEY);
+ qtest_writel(qts, WDOG_BASE + WDOGCMP0, SIFIVE_E_LFCLK_DEFAULT_FREQ);
+
+ cfg = qtest_readl(qts, WDOG_BASE + WDOGCFG);
+ cfg = FIELD_DP32(cfg, AON_WDT_WDOGCFG, SCALE, 0);
+ cfg = FIELD_DP32(cfg, AON_WDT_WDOGCFG, ZEROCMP, 1);
+ cfg = FIELD_DP32(cfg, AON_WDT_WDOGCFG, EN_ALWAYS, 1);
+ qtest_writel(qts, WDOG_BASE + WDOGKEY, SIFIVE_E_AON_WDOGKEY);
+ qtest_writel(qts, WDOG_BASE + WDOGCFG, cfg);
+
+ qtest_clock_step(qts, NANOSECONDS_PER_SECOND);
+
+ g_assert(0 == qtest_readl(qts, WDOG_BASE + WDOGCOUNT));
+ g_assert(0 == qtest_readl(qts, WDOG_BASE + WDOGS));
+
+ cfg = qtest_readl(qts, WDOG_BASE + WDOGCFG);
+ g_assert(0 == FIELD_EX32(cfg, AON_WDT_WDOGCFG, SCALE));
+ g_assert(0 == FIELD_EX32(cfg, AON_WDT_WDOGCFG, RSTEN));
+ g_assert(1 == FIELD_EX32(cfg, AON_WDT_WDOGCFG, ZEROCMP));
+ g_assert(1 == FIELD_EX32(cfg, AON_WDT_WDOGCFG, EN_ALWAYS));
+ g_assert(0 == FIELD_EX32(cfg, AON_WDT_WDOGCFG, EN_CORE_AWAKE));
+ g_assert(1 == FIELD_EX32(cfg, AON_WDT_WDOGCFG, IP0));
+
+ cfg = FIELD_DP32(cfg, AON_WDT_WDOGCFG, IP0, 0);
+ qtest_writel(qts, WDOG_BASE + WDOGKEY, SIFIVE_E_AON_WDOGKEY);
+ qtest_writel(qts, WDOG_BASE + WDOGCFG, cfg);
+ cfg = qtest_readl(qts, WDOG_BASE + WDOGCFG);
+ g_assert(0 == FIELD_EX32(cfg, AON_WDT_WDOGCFG, IP0));
+
+ qtest_clock_step(qts, NANOSECONDS_PER_SECOND);
+
+ g_assert(0 == qtest_readl(qts, WDOG_BASE + WDOGCOUNT));
+ g_assert(0 == qtest_readl(qts, WDOG_BASE + WDOGS));
+
+ cfg = qtest_readl(qts, WDOG_BASE + WDOGCFG);
+ g_assert(0 == FIELD_EX32(cfg, AON_WDT_WDOGCFG, SCALE));
+ g_assert(0 == FIELD_EX32(cfg, AON_WDT_WDOGCFG, RSTEN));
+ g_assert(1 == FIELD_EX32(cfg, AON_WDT_WDOGCFG, ZEROCMP));
+ g_assert(1 == FIELD_EX32(cfg, AON_WDT_WDOGCFG, EN_ALWAYS));
+ g_assert(0 == FIELD_EX32(cfg, AON_WDT_WDOGCFG, EN_CORE_AWAKE));
+ g_assert(1 == FIELD_EX32(cfg, AON_WDT_WDOGCFG, IP0));
+
+ cfg = FIELD_DP32(cfg, AON_WDT_WDOGCFG, IP0, 0);
+ qtest_writel(qts, WDOG_BASE + WDOGKEY, SIFIVE_E_AON_WDOGKEY);
+ qtest_writel(qts, WDOG_BASE + WDOGCFG, cfg);
+ cfg = qtest_readl(qts, WDOG_BASE + WDOGCFG);
+ g_assert(0 == FIELD_EX32(cfg, AON_WDT_WDOGCFG, IP0));
+
+ qtest_quit(qts);
+}
+
+static void test_enable_disable(void)
+{
+ uint32_t cfg;
+ QTestState *qts = qtest_init("-machine sifive_e");
+
+ test_init(qts);
+
+ qtest_writel(qts, WDOG_BASE + WDOGKEY, SIFIVE_E_AON_WDOGKEY);
+ qtest_writel(qts, WDOG_BASE + WDOGCMP0, 10);
+
+ cfg = qtest_readl(qts, WDOG_BASE + WDOGCFG);
+ cfg = FIELD_DP32(cfg, AON_WDT_WDOGCFG, SCALE, 15);
+ cfg = FIELD_DP32(cfg, AON_WDT_WDOGCFG, EN_ALWAYS, 1);
+ qtest_writel(qts, WDOG_BASE + WDOGKEY, SIFIVE_E_AON_WDOGKEY);
+ qtest_writel(qts, WDOG_BASE + WDOGCFG, cfg);
+
+ qtest_clock_step(qts, NANOSECONDS_PER_SECOND * 2);
+
+ g_assert(qtest_readl(qts, WDOG_BASE + WDOGCOUNT) ==
+ SIFIVE_E_LFCLK_DEFAULT_FREQ * 2);
+ g_assert(2 == qtest_readl(qts, WDOG_BASE + WDOGS));
+
+ cfg = qtest_readl(qts, WDOG_BASE + WDOGCFG);
+ g_assert(15 == FIELD_EX32(cfg, AON_WDT_WDOGCFG, SCALE));
+ g_assert(0 == FIELD_EX32(cfg, AON_WDT_WDOGCFG, RSTEN));
+ g_assert(0 == FIELD_EX32(cfg, AON_WDT_WDOGCFG, ZEROCMP));
+ g_assert(1 == FIELD_EX32(cfg, AON_WDT_WDOGCFG, EN_ALWAYS));
+ g_assert(0 == FIELD_EX32(cfg, AON_WDT_WDOGCFG, EN_CORE_AWAKE));
+ g_assert(0 == FIELD_EX32(cfg, AON_WDT_WDOGCFG, IP0));
+
+ qtest_writel(qts, WDOG_BASE + WDOGKEY, SIFIVE_E_AON_WDOGKEY);
+ cfg = FIELD_DP32(cfg, AON_WDT_WDOGCFG, EN_ALWAYS, 0);
+ qtest_writel(qts, WDOG_BASE + WDOGCFG, cfg);
+
+ qtest_clock_step(qts, NANOSECONDS_PER_SECOND * 8);
+
+ g_assert(qtest_readl(qts, WDOG_BASE + WDOGCOUNT) ==
+ SIFIVE_E_LFCLK_DEFAULT_FREQ * 2);
+ g_assert(2 == qtest_readl(qts, WDOG_BASE + WDOGS));
+
+ cfg = qtest_readl(qts, WDOG_BASE + WDOGCFG);
+ g_assert(15 == FIELD_EX32(cfg, AON_WDT_WDOGCFG, SCALE));
+ g_assert(0 == FIELD_EX32(cfg, AON_WDT_WDOGCFG, RSTEN));
+ g_assert(0 == FIELD_EX32(cfg, AON_WDT_WDOGCFG, ZEROCMP));
+ g_assert(0 == FIELD_EX32(cfg, AON_WDT_WDOGCFG, EN_ALWAYS));
+ g_assert(0 == FIELD_EX32(cfg, AON_WDT_WDOGCFG, EN_CORE_AWAKE));
+ g_assert(0 == FIELD_EX32(cfg, AON_WDT_WDOGCFG, IP0));
+
+ cfg = FIELD_DP32(cfg, AON_WDT_WDOGCFG, EN_ALWAYS, 1);
+ qtest_writel(qts, WDOG_BASE + WDOGKEY, SIFIVE_E_AON_WDOGKEY);
+ qtest_writel(qts, WDOG_BASE + WDOGCFG, cfg);
+
+ qtest_clock_step(qts, NANOSECONDS_PER_SECOND * 8);
+
+ g_assert(qtest_readl(qts, WDOG_BASE + WDOGCOUNT) ==
+ SIFIVE_E_LFCLK_DEFAULT_FREQ * 10);
+ g_assert(10 == qtest_readl(qts, WDOG_BASE + WDOGS));
+
+ cfg = qtest_readl(qts, WDOG_BASE + WDOGCFG);
+ g_assert(15 == FIELD_EX32(cfg, AON_WDT_WDOGCFG, SCALE));
+ g_assert(0 == FIELD_EX32(cfg, AON_WDT_WDOGCFG, RSTEN));
+ g_assert(0 == FIELD_EX32(cfg, AON_WDT_WDOGCFG, ZEROCMP));
+ g_assert(1 == FIELD_EX32(cfg, AON_WDT_WDOGCFG, EN_ALWAYS));
+ g_assert(0 == FIELD_EX32(cfg, AON_WDT_WDOGCFG, EN_CORE_AWAKE));
+ g_assert(1 == FIELD_EX32(cfg, AON_WDT_WDOGCFG, IP0));
+
+ qtest_writel(qts, WDOG_BASE + WDOGKEY, SIFIVE_E_AON_WDOGKEY);
+ qtest_writel(qts, WDOG_BASE + WDOGCOUNT, 0);
+ cfg = FIELD_DP32(cfg, AON_WDT_WDOGCFG, IP0, 0);
+ qtest_writel(qts, WDOG_BASE + WDOGKEY, SIFIVE_E_AON_WDOGKEY);
+ qtest_writel(qts, WDOG_BASE + WDOGCFG, cfg);
+ cfg = qtest_readl(qts, WDOG_BASE + WDOGCFG);
+ g_assert(0 == FIELD_EX32(cfg, AON_WDT_WDOGCFG, IP0));
+
+ qtest_quit(qts);
+}
+
+int main(int argc, char *argv[])
+{
+ g_test_init(&argc, &argv, NULL);
+ qtest_add_func("/sifive-e-aon-watchdog-test/wdogcount",
+ test_wdogcount);
+ qtest_add_func("/sifive-e-aon-watchdog-test/wdogcfg",
+ test_wdogcfg);
+ qtest_add_func("/sifive-e-aon-watchdog-test/wdogcmp0",
+ test_wdogcmp0);
+ qtest_add_func("/sifive-e-aon-watchdog-test/wdogkey",
+ test_wdogkey);
+ qtest_add_func("/sifive-e-aon-watchdog-test/wdogfeed",
+ test_wdogfeed);
+ qtest_add_func("/sifive-e-aon-watchdog-test/scaled_wdogs",
+ test_scaled_wdogs);
+ qtest_add_func("/sifive-e-aon-watchdog-test/watchdog",
+ test_watchdog);
+ qtest_add_func("/sifive-e-aon-watchdog-test/scaled_watchdog",
+ test_scaled_watchdog);
+ qtest_add_func("/sifive-e-aon-watchdog-test/periodic_int",
+ test_periodic_int);
+ qtest_add_func("/sifive-e-aon-watchdog-test/enable_disable",
+ test_enable_disable);
+ return g_test_run();
+}
#include "standard-headers/linux/virtio_ids.h"
#include "standard-headers/linux/virtio_net.h"
#include "standard-headers/linux/virtio_gpio.h"
+#include "standard-headers/linux/virtio_scmi.h"
#ifdef CONFIG_LINUX
#include <sys/vfs.h>
enum {
VHOST_USER_NET,
VHOST_USER_GPIO,
+ VHOST_USER_SCMI,
};
typedef struct TestServer {
"vhost-user-gpio", test_read_guest_mem, &opts);
}
libqos_init(register_vhost_gpio_test);
+
+static uint64_t vu_scmi_get_features(TestServer *s)
+{
+ return 0x1ULL << VIRTIO_F_VERSION_1 |
+ 0x1ULL << VIRTIO_SCMI_F_P2A_CHANNELS |
+ 0x1ULL << VHOST_USER_F_PROTOCOL_FEATURES;
+}
+
+static void vu_scmi_get_protocol_features(TestServer *s, CharBackend *chr,
+ VhostUserMsg *msg)
+{
+ msg->flags |= VHOST_USER_REPLY_MASK;
+ msg->size = sizeof(m.payload.u64);
+ msg->payload.u64 = 1ULL << VHOST_USER_PROTOCOL_F_MQ;
+
+ qemu_chr_fe_write_all(chr, (uint8_t *)msg, VHOST_USER_HDR_SIZE + msg->size);
+}
+
+static struct vhost_user_ops g_vu_scmi_ops = {
+ .type = VHOST_USER_SCMI,
+
+ .append_opts = append_vhost_gpio_opts,
+
+ .get_features = vu_scmi_get_features,
+ .set_features = vu_net_set_features,
+ .get_protocol_features = vu_scmi_get_protocol_features,
+};
+
+static void register_vhost_scmi_test(void)
+{
+ QOSGraphTestOptions opts = {
+ .before = vhost_user_test_setup,
+ .subprocess = true,
+ .arg = &g_vu_scmi_ops,
+ };
+
+ qemu_add_opts(&qemu_chardev_opts);
+
+ qos_add_test("scmi/read-guest-mem/memfile",
+ "vhost-user-scmi", test_read_guest_mem, &opts);
+}
+libqos_init(register_vhost_scmi_test);
TESTS += test-aes
run-test-aes: QEMU_OPTS += -cpu rv64,zk=on
+
+# Test for fcvtmod
+TESTS += test-fcvtmod
+test-fcvtmod: CFLAGS += -march=rv64imafdc
+test-fcvtmod: LDFLAGS += -static
+run-test-fcvtmod: QEMU_OPTS += -cpu rv64,d=true,Zfa=true
--- /dev/null
+#include <stdio.h>
+#include <stddef.h>
+#include <stdint.h>
+
+#define FFLAG_NX_SHIFT 0 /* inexact */
+#define FFLAG_UF_SHIFT 1 /* underflow */
+#define FFLAG_OF_SHIFT 2 /* overflow */
+#define FFLAG_DZ_SHIFT 3 /* divide by zero */
+#define FFLAG_NV_SHIFT 4 /* invalid operation */
+
+#define FFLAG_NV (1UL << FFLAG_NV_SHIFT)
+#define FFLAG_DZ (1UL << FFLAG_DZ_SHIFT)
+#define FFLAG_OF (1UL << FFLAG_OF_SHIFT)
+#define FFLAG_UF (1UL << FFLAG_UF_SHIFT)
+#define FFLAG_NX (1UL << FFLAG_NX_SHIFT)
+
+typedef struct fp64_fcvt_fcvtmod_testcase {
+ const char* name;
+ union {
+ uint64_t inp_lu;
+ double inp_lf;
+ };
+ uint64_t exp_fcvt;
+ uint8_t exp_fcvt_fflags;
+ uint64_t exp_fcvtmod;
+ uint8_t exp_fcvtmod_fflags;
+} fp64_fcvt_fcvtmod_testcase_t;
+
+void print_fflags(uint8_t fflags)
+{
+ int set = 0;
+
+ if (fflags == 0) {
+ printf("-");
+ return;
+ }
+
+ if (fflags & FFLAG_NV) {
+ printf("%sFFLAG_NV", set ? " | " : "");
+ set = 1;
+ }
+ if (fflags & FFLAG_DZ) {
+ printf("%sFFLAG_DZ", set ? " | " : "");
+ set = 1;
+ }
+ if (fflags & FFLAG_OF) {
+ printf("%sFFLAG_OF", set ? " | " : "");
+ set = 1;
+ }
+ if (fflags & FFLAG_UF) {
+ printf("%sFFLAG_UF", set ? " | " : "");
+ set = 1;
+ }
+ if (fflags & FFLAG_NX) {
+ printf("%sFFLAG_NX", set ? " | " : "");
+ set = 1;
+ }
+}
+
+/* Clear all FP flags. */
+static inline void clear_fflags()
+{
+ __asm__ __volatile__("fsflags zero");
+}
+
+/* Read all FP flags. */
+static inline uint8_t get_fflags()
+{
+ uint64_t v;
+ __asm__ __volatile__("frflags %0" : "=r"(v));
+ return (uint8_t)v;
+}
+
+/* Move input value (without conversations) into an FP register. */
+static inline double do_fmv_d_x(uint64_t inp)
+{
+ double fpr;
+ __asm__ __volatile__("fmv.d.x %0, %1" : "=f"(fpr) : "r"(inp));
+ return fpr;
+}
+
+static inline uint64_t do_fcvt_w_d(uint64_t inp, uint8_t *fflags)
+{
+ uint64_t ret;
+ double fpr = do_fmv_d_x(inp);
+
+ clear_fflags();
+
+ __asm__ __volatile__("fcvt.w.d %0, %1, rtz" : "=r"(ret) : "f"(fpr));
+
+ *fflags = get_fflags();
+
+ return ret;
+}
+
+static inline uint64_t do_fcvtmod_w_d(uint64_t inp, uint8_t *fflags)
+{
+ uint64_t ret;
+ double fpr = do_fmv_d_x(inp);
+
+ clear_fflags();
+
+ /* fcvtmod.w.d rd, rs1, rtz = 1100001 01000 rs1 001 rd 1010011 */
+ asm(".insn r 0x53, 0x1, 0x61, %0, %1, f8" : "=r"(ret) : "f"(fpr));
+
+ *fflags = get_fflags();
+
+ return ret;
+}
+
+static const fp64_fcvt_fcvtmod_testcase_t tests[] = {
+ /* Zero (exp=0, frac=0) */
+ { .name = "+0.0",
+ .inp_lf = 0x0p0,
+ .exp_fcvt = 0x0000000000000000,
+ .exp_fcvt_fflags = 0,
+ .exp_fcvtmod = 0x0000000000000000,
+ .exp_fcvtmod_fflags = 0 },
+ { .name = "-0.0",
+ .inp_lf = -0x0p0,
+ .exp_fcvt = 0x0000000000000000,
+ .exp_fcvt_fflags = 0,
+ .exp_fcvtmod = 0x0000000000000000,
+ .exp_fcvtmod_fflags = 0 },
+
+ /* Subnormal: exp=0 frac!=0 */
+ { .name = "Subnormal frac=1",
+ .inp_lu = 0x0000000000000001,
+ .exp_fcvt = 0x0000000000000000,
+ .exp_fcvt_fflags = FFLAG_NX,
+ .exp_fcvtmod = 0,
+ .exp_fcvtmod_fflags = FFLAG_NX },
+ { .name = "Subnormal frac=0xf..f",
+ .inp_lu = 0x0000ffffffffffff,
+ .exp_fcvt = 0x0000000000000000,
+ .exp_fcvt_fflags = FFLAG_NX,
+ .exp_fcvtmod = 0,
+ .exp_fcvtmod_fflags = FFLAG_NX },
+ { .name = "Neg subnormal frac=1",
+ .inp_lu = 0x0000000000000001,
+ .exp_fcvt = 0x0000000000000000,
+ .exp_fcvt_fflags = FFLAG_NX,
+ .exp_fcvtmod = 0,
+ .exp_fcvtmod_fflags = FFLAG_NX },
+ { .name = "Neg subnormal frac=0xf..f",
+ .inp_lu = 0x8000ffffffffffff,
+ .exp_fcvt = 0x0000000000000000,
+ .exp_fcvt_fflags = FFLAG_NX,
+ .exp_fcvtmod = 0,
+ .exp_fcvtmod_fflags = FFLAG_NX },
+
+ /* Infinity: exp=0x7ff, frac=0 */
+ { .name = "+INF",
+ .inp_lu = 0x7ff0000000000000,
+ .exp_fcvt = 0x000000007fffffff, /* int32 max */
+ .exp_fcvt_fflags = FFLAG_NV,
+ .exp_fcvtmod = 0,
+ .exp_fcvtmod_fflags = FFLAG_NV },
+ { .name = "-INF",
+ .inp_lu = 0xfff0000000000000,
+ .exp_fcvt = 0xffffffff80000000, /* int32 min */
+ .exp_fcvt_fflags = FFLAG_NV,
+ .exp_fcvtmod = 0,
+ .exp_fcvtmod_fflags = FFLAG_NV },
+
+ /* NaN: exp=7ff, frac!=0 */
+ { .name = "canonical NaN",
+ .inp_lu = 0x7ff8000000000000,
+ .exp_fcvt = 0x000000007fffffff, /* int32 max */
+ .exp_fcvt_fflags = FFLAG_NV,
+ .exp_fcvtmod = 0,
+ .exp_fcvtmod_fflags = FFLAG_NV },
+ { .name = "non-canonical NaN",
+ .inp_lu = 0x7ff8000000100000,
+ .exp_fcvt = 0x000000007fffffff, /* int32 min */
+ .exp_fcvt_fflags = FFLAG_NV,
+ .exp_fcvtmod = 0,
+ .exp_fcvtmod_fflags = FFLAG_NV },
+
+ /* Normal numbers: exp!=0, exp!=7ff */
+ { .name = "+smallest normal value",
+ .inp_lu = 0x0010000000000000,
+ .exp_fcvt = 0,
+ .exp_fcvt_fflags = FFLAG_NX,
+ .exp_fcvtmod = 0,
+ .exp_fcvtmod_fflags = FFLAG_NX },
+ { .name = "-smallest normal value",
+ .inp_lu = 0x8010000000000000,
+ .exp_fcvt = 0,
+ .exp_fcvt_fflags = FFLAG_NX,
+ .exp_fcvtmod = 0,
+ .exp_fcvtmod_fflags = FFLAG_NX },
+
+ { .name = "+0.5",
+ .inp_lf = 0x1p-1,
+ .exp_fcvt = 0,
+ .exp_fcvt_fflags = FFLAG_NX,
+ .exp_fcvtmod = 0,
+ .exp_fcvtmod_fflags = FFLAG_NX },
+ { .name = "-0.5",
+ .inp_lf = -0x1p-1,
+ .exp_fcvt = 0,
+ .exp_fcvt_fflags = FFLAG_NX,
+ .exp_fcvtmod = 0,
+ .exp_fcvtmod_fflags = FFLAG_NX },
+
+ { .name = "+value just below 1.0",
+ .inp_lu = 0x3fefffffffffffff,
+ .exp_fcvt = 0,
+ .exp_fcvt_fflags = FFLAG_NX,
+ .exp_fcvtmod = 0,
+ .exp_fcvtmod_fflags = FFLAG_NX },
+ { .name = "-value just above -1.0",
+ .inp_lu = 0xbfefffffffffffff,
+ .exp_fcvt = 0,
+ .exp_fcvt_fflags = FFLAG_NX,
+ .exp_fcvtmod = 0,
+ .exp_fcvtmod_fflags = FFLAG_NX },
+
+ { .name = "+1.0",
+ .inp_lf = 0x1p0,
+ .exp_fcvt = 0x0000000000000001,
+ .exp_fcvt_fflags = 0,
+ .exp_fcvtmod = 0x0000000000000001,
+ .exp_fcvtmod_fflags = 0 },
+ { .name = "-1.0",
+ .inp_lf = -0x1p0,
+ .exp_fcvt = 0xffffffffffffffff,
+ .exp_fcvt_fflags = 0,
+ .exp_fcvtmod = 0xffffffffffffffff,
+ .exp_fcvtmod_fflags = 0 },
+
+ { .name = "+1.5",
+ .inp_lu = 0x3ff8000000000000,
+ .exp_fcvt = 1,
+ .exp_fcvt_fflags = FFLAG_NX,
+ .exp_fcvtmod = 1,
+ .exp_fcvtmod_fflags = FFLAG_NX },
+ { .name = "-1.5",
+ .inp_lu = 0xbff8000000000000,
+ .exp_fcvt = 0xffffffffffffffff,
+ .exp_fcvt_fflags = FFLAG_NX,
+ .exp_fcvtmod = 0xffffffffffffffff,
+ .exp_fcvtmod_fflags = FFLAG_NX },
+
+ { .name = "+max int32 (2147483647)",
+ .inp_lu = 0x41dfffffffc00000,
+ .exp_fcvt = 0x000000007fffffff,
+ .exp_fcvt_fflags = 0,
+ .exp_fcvtmod = 0x000000007fffffff,
+ .exp_fcvtmod_fflags = 0 },
+ { .name = "+max int32 +1 (2147483648)",
+ .inp_lf = 0x1p31,
+ .exp_fcvt = 0x000000007fffffff,
+ .exp_fcvt_fflags = FFLAG_NV,
+ .exp_fcvtmod = (uint64_t)-2147483648l, /* int32 min */
+ .exp_fcvtmod_fflags = FFLAG_NV },
+ { .name = "+max int32 +2 (2147483649)",
+ .inp_lu = 0x41e0000000200000,
+ .exp_fcvt = 0x000000007fffffff,
+ .exp_fcvt_fflags = FFLAG_NV,
+ .exp_fcvtmod = (uint64_t)-2147483647l, /* int32 min +1 */
+ .exp_fcvtmod_fflags = FFLAG_NV },
+
+ { .name = "-max int32 (-2147483648)",
+ .inp_lf = -0x1p31,
+ .exp_fcvt = 0xffffffff80000000,
+ .exp_fcvt_fflags = 0,
+ .exp_fcvtmod = 0xffffffff80000000,
+ .exp_fcvtmod_fflags = 0 },
+ { .name = "-max int32 -1 (-2147483649)",
+ .inp_lf = -0x1.00000002p+31,
+ .exp_fcvt = 0xffffffff80000000,
+ .exp_fcvt_fflags = FFLAG_NV,
+ .exp_fcvtmod = 2147483647, /* int32 max */
+ .exp_fcvtmod_fflags = FFLAG_NV },
+ { .name = "-max int32 -2 (-2147483650)",
+ .inp_lf = -0x1.00000004p+31,
+ .exp_fcvt = 0xffffffff80000000,
+ .exp_fcvt_fflags = FFLAG_NV,
+ .exp_fcvtmod = 2147483646, /* int32 max -1 */
+ .exp_fcvtmod_fflags = FFLAG_NV },
+};
+
+int run_fcvtmod_tests()
+{
+ uint64_t act_fcvt;
+ uint8_t act_fcvt_fflags;
+ uint64_t act_fcvtmod;
+ uint8_t act_fcvtmod_fflags;
+
+ for (size_t i = 0; i < sizeof(tests)/sizeof(tests[0]); i++) {
+ const fp64_fcvt_fcvtmod_testcase_t *t = &tests[i];
+
+ act_fcvt = do_fcvt_w_d(t->inp_lu, &act_fcvt_fflags);
+ int fcvt_correct = act_fcvt == t->exp_fcvt &&
+ act_fcvt_fflags == t->exp_fcvt_fflags;
+ act_fcvtmod = do_fcvtmod_w_d(t->inp_lu, &act_fcvtmod_fflags);
+ int fcvtmod_correct = act_fcvtmod == t->exp_fcvtmod &&
+ act_fcvtmod_fflags == t->exp_fcvtmod_fflags;
+
+ if (fcvt_correct && fcvtmod_correct) {
+ continue;
+ }
+
+ printf("Test %zu (%s) failed!\n", i, t->name);
+
+ double fpr = do_fmv_d_x(t->inp_lu);
+ printf("inp_lu: 0x%016lx == %lf\n", t->inp_lu, fpr);
+ printf("inp_lf: %lf\n", t->inp_lf);
+
+ uint32_t sign = (t->inp_lu >> 63);
+ uint32_t exp = (uint32_t)(t->inp_lu >> 52) & 0x7ff;
+ uint64_t frac = t->inp_lu & 0xfffffffffffffull; /* significand */
+ int true_exp = exp - 1023;
+ int shift = true_exp - 52;
+ uint64_t true_frac = frac | 1ull << 52;
+
+ printf("sign=%d, exp=0x%03x, frac=0x%012lx\n", sign, exp, frac);
+ printf("true_exp=%d, shift=%d, true_frac=0x%016lx\n", true_exp, shift, true_frac);
+
+ if (!fcvt_correct) {
+ printf("act_fcvt: 0x%016lx == %li\n", act_fcvt, act_fcvt);
+ printf("exp_fcvt: 0x%016lx == %li\n", t->exp_fcvt, t->exp_fcvt);
+ printf("act_fcvt_fflags: "); print_fflags(act_fcvt_fflags); printf("\n");
+ printf("exp_fcvt_fflags: "); print_fflags(t->exp_fcvt_fflags); printf("\n");
+ }
+
+ if (!fcvtmod_correct) {
+ printf("act_fcvtmod: 0x%016lx == %li\n", act_fcvtmod, act_fcvtmod);
+ printf("exp_fcvtmod: 0x%016lx == %li\n", t->exp_fcvtmod, t->exp_fcvtmod);
+ printf("act_fcvtmod_fflags: "); print_fflags(act_fcvtmod_fflags); printf("\n");
+ printf("exp_fcvtmod_fflags: "); print_fflags(t->exp_fcvtmod_fflags); printf("\n");
+ }
+
+ return 1;
+ }
+
+ return 0;
+}
+
+int main()
+{
+ return run_fcvtmod_tests();
+}
sam \
lpsw \
lpswe-early \
+ lra \
ssm-early \
stosm-early \
unaligned-lowcore
TESTS+=ex-relative-long
TESTS+=ex-branch
TESTS+=mxdb
+TESTS+=epsw
+TESTS+=larl
+TESTS+=mdeb
cdsg: CFLAGS+=-pthread
cdsg: LDFLAGS+=-pthread
--- /dev/null
+/*
+ * Test the EPSW instruction.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+#include <assert.h>
+#include <stdlib.h>
+
+int main(void)
+{
+ unsigned long r1 = 0x1234567887654321UL, r2 = 0x8765432112345678UL;
+
+ asm("cr %[r1],%[r2]\n" /* cc = 1 */
+ "epsw %[r1],%[r2]"
+ : [r1] "+r" (r1), [r2] "+r" (r2) : : "cc");
+
+ /* Do not check the R and RI bits. */
+ r1 &= ~0x40000008UL;
+ assert(r1 == 0x1234567807051001UL);
+ assert(r2 == 0x8765432180000000UL);
+
+ return EXIT_SUCCESS;
+}
gdb.execute("si")
report("larl\t" in gdb.execute("x/i $pc", False, True), "insn #2")
gdb.execute("si")
- report("lghi\t" in gdb.execute("x/i $pc", False, True), "insn #3")
+ report("lgrl\t" in gdb.execute("x/i $pc", False, True), "insn #3")
gdb.execute("si")
report("svc\t" in gdb.execute("x/i $pc", False, True), "insn #4")
gdb.execute("si")
/* puts("Hello, World!"); */
lghi %r2,1
larl %r3,foo
-lghi %r4,foo_end-foo
+lgrl %r4,foo_len
svc 4
/* exit(0); */
.align 2
foo: .asciz "Hello, World!\n"
foo_end:
+.align 8
+foo_len: .quad foo_end-foo
--- /dev/null
+/*
+ * Test the LARL instruction.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+#include <stdlib.h>
+
+int main(void)
+{
+ long algfi = (long)main;
+ long larl;
+
+ /*
+ * The compiler may emit larl for the C addition, so compute the expected
+ * value using algfi.
+ */
+ asm("algfi %[r],0xd0000000" : [r] "+r" (algfi) : : "cc");
+ asm("larl %[r],main+0xd0000000" : [r] "=r" (larl));
+
+ return algfi == larl ? EXIT_SUCCESS : EXIT_FAILURE;
+}
--- /dev/null
+ .org 0x200 /* lowcore padding */
+ .globl _start
+_start:
+ lgrl %r1,initial_r1
+ lra %r1,0(%r1)
+ cgrl %r1,expected_r1
+ jne 1f
+ lpswe success_psw
+1:
+ lpswe failure_psw
+ .align 8
+initial_r1:
+ .quad 0x8765432112345678
+expected_r1:
+ .quad 0x8765432180000038 /* ASCE type exception */
+success_psw:
+ .quad 0x2000000000000,0xfff /* see is_special_wait_psw() */
+failure_psw:
+ .quad 0x2000000000000,0 /* disabled wait */
--- /dev/null
+/*
+ * Test the MDEB and MDEBR instructions.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+#include <assert.h>
+#include <stdlib.h>
+
+int main(void)
+{
+ union {
+ float f[2];
+ double d;
+ } a;
+ float b;
+
+ a.f[0] = 1.2345;
+ a.f[1] = 999;
+ b = 6.789;
+ asm("mdeb %[a],%[b]" : [a] "+f" (a.d) : [b] "R" (b));
+ assert(a.d > 8.38 && a.d < 8.39);
+
+ a.f[0] = 1.2345;
+ a.f[1] = 999;
+ b = 6.789;
+ asm("mdebr %[a],%[b]" : [a] "+f" (a.d) : [b] "f" (b));
+ assert(a.d > 8.38 && a.d < 8.39);
+
+ return EXIT_SUCCESS;
+}
+#include <stdbool.h>
#include <stdint.h>
+#include <stdlib.h>
#include <string.h>
-
-static inline void mvcrl_8(const char *dst, const char *src)
+static void mvcrl(const char *dst, const char *src, size_t len)
{
+ register long r0 asm("r0") = len;
+
asm volatile (
- "llill %%r0, 8\n"
".insn sse, 0xE50A00000000, 0(%[dst]), 0(%[src])"
- : : [dst] "d" (dst), [src] "d" (src)
- : "r0", "memory");
+ : : [dst] "d" (dst), [src] "d" (src), "r" (r0)
+ : "memory");
}
-
-int main(int argc, char *argv[])
+static bool test(void)
{
const char *alpha = "abcdefghijklmnop";
/* array missing 'i' */
- char tstr[17] = "abcdefghjklmnop\0" ;
+ char tstr[17] = "abcdefghjklmnop\0";
/* mvcrl reference use: 'open a hole in an array' */
- mvcrl_8(tstr + 9, tstr + 8);
+ mvcrl(tstr + 9, tstr + 8, 8);
/* place missing 'i' */
tstr[8] = 'i';
- return strncmp(alpha, tstr, 16ul);
+ return strncmp(alpha, tstr, 16ul) == 0;
+}
+
+static bool test_bad_r0(void)
+{
+ char src[256] = { 0 };
+
+ /*
+ * PoP says: Bits 32-55 of general register 0 should contain zeros;
+ * otherwise, the program may not operate compatibly in the future.
+ *
+ * Try it anyway in order to check whether this would crash QEMU itself.
+ */
+ mvcrl(src, src, (size_t)-1);
+
+ return true;
+}
+
+int main(void)
+{
+ bool ok = true;
+
+ ok &= test();
+ ok &= test_bad_r0();
+
+ return ok ? EXIT_SUCCESS : EXIT_FAILURE;
}
fixture_tear_down(&fix, NULL);
}
+static void test_qga_allowedrpcs(gconstpointer data)
+{
+ TestFixture fix;
+ QDict *ret, *error;
+ const gchar *class, *desc;
+
+ fixture_setup(&fix, "-a guest-ping,guest-get-time", NULL);
+
+ /* check allowed RPCs */
+ ret = qmp_fd(fix.fd, "{'execute': 'guest-ping'}");
+ qmp_assert_no_error(ret);
+ qobject_unref(ret);
+
+ ret = qmp_fd(fix.fd, "{'execute': 'guest-get-time'}");
+ qmp_assert_no_error(ret);
+ qobject_unref(ret);
+
+ /* check something else */
+ ret = qmp_fd(fix.fd, "{'execute': 'guest-get-fsinfo'}");
+ g_assert_nonnull(ret);
+ error = qdict_get_qdict(ret, "error");
+ class = qdict_get_try_str(error, "class");
+ desc = qdict_get_try_str(error, "desc");
+ g_assert_cmpstr(class, ==, "CommandNotFound");
+ g_assert_nonnull(g_strstr_len(desc, -1, "has been disabled"));
+ qobject_unref(ret);
+
+ fixture_tear_down(&fix, NULL);
+}
+
static void test_qga_config(gconstpointer data)
{
GError *error = NULL;
test_qga_fsfreeze_status);
g_test_add_data_func("/qga/blockedrpcs", NULL, test_qga_blockedrpcs);
+ g_test_add_data_func("/qga/allowedrpcs", NULL, test_qga_allowedrpcs);
g_test_add_data_func("/qga/config", NULL, test_qga_config);
g_test_add_data_func("/qga/guest-exec", &fix, test_qga_guest_exec);
g_test_add_data_func("/qga/guest-exec-separated", &fix,
import multiprocessing
import traceback
import shlex
+import json
from qemu.machine import QEMUMachine
from qemu.utils import get_info_usernet_hostfwd_port, kvm_available
stderr=self._stdout)
return os.path.join(cidir, "cloud-init.iso")
+ def get_qemu_packages_from_lcitool_json(self, json_path=None):
+ """Parse a lcitool variables json file and return the PKGS list."""
+ if json_path is None:
+ json_path = os.path.join(
+ os.path.dirname(__file__), "generated", self.name + ".json"
+ )
+ with open(json_path, "r") as fh:
+ return json.load(fh)["pkgs"]
+
+
def get_qemu_path(arch, build_path=None):
"""Fetch the path to the qemu binary."""
# If QEMU environment variable set, it takes precedence
link = "https://download.freebsd.org/releases/CI-IMAGES/13.2-RELEASE/amd64/Latest/FreeBSD-13.2-RELEASE-amd64-BASIC-CI.raw.xz"
csum = "a4fb3b6c7b75dd4d58fb0d75e4caf72844bffe0ca00e66459c028b198ffb3c0e"
size = "20G"
- pkgs = [
- # build tools
- "git",
- "pkgconf",
- "bzip2",
- "python39",
- "ninja",
-
- # gnu tools
- "bash",
- "gmake",
- "gsed",
- "gettext",
-
- # libs: crypto
- "gnutls",
-
- # libs: images
- "jpeg-turbo",
- "png",
-
- # libs: ui
- "sdl2",
- "gtk3",
- "libxkbcommon",
-
- # libs: opengl
- "libepoxy",
- "mesa-libs",
-
- # libs: migration
- "zstd",
-
- # libs: networking
- "libslirp",
-
- # libs: sndio
- "sndio",
- ]
BUILD_SCRIPT = """
set -e;
self.console_wait(prompt)
self.console_send("echo 'chmod 666 /dev/vtbd1' >> /etc/rc.local\n")
+ pkgs = self.get_qemu_packages_from_lcitool_json()
self.print_step("Installing packages")
- self.ssh_root_check("pkg install -y %s\n" % " ".join(self.pkgs))
+ self.ssh_root_check("pkg install -y %s\n" % " ".join(pkgs))
# shutdown
self.ssh_root(self.poweroff)
--- /dev/null
+# FILES IN THIS FOLDER WERE AUTO-GENERATED
+#
+# $ make lcitool-refresh
+#
+# https://gitlab.com/libvirt/libvirt-ci
--- /dev/null
+{
+ "ccache": "/usr/local/bin/ccache",
+ "cpan_pkgs": [],
+ "cross_pkgs": [],
+ "make": "/usr/local/bin/gmake",
+ "ninja": "/usr/local/bin/ninja",
+ "packaging_command": "pkg",
+ "pip3": "/usr/local/bin/pip-3.8",
+ "pkgs": [
+ "alsa-lib",
+ "bash",
+ "bison",
+ "bzip2",
+ "ca_root_nss",
+ "capstone4",
+ "ccache",
+ "cmocka",
+ "ctags",
+ "curl",
+ "cyrus-sasl",
+ "dbus",
+ "diffutils",
+ "dtc",
+ "flex",
+ "fusefs-libs3",
+ "gettext",
+ "git",
+ "glib",
+ "gmake",
+ "gnutls",
+ "gsed",
+ "gtk3",
+ "json-c",
+ "libepoxy",
+ "libffi",
+ "libgcrypt",
+ "libjpeg-turbo",
+ "libnfs",
+ "libslirp",
+ "libspice-server",
+ "libssh",
+ "libtasn1",
+ "llvm",
+ "lzo2",
+ "meson",
+ "mtools",
+ "ncurses",
+ "nettle",
+ "ninja",
+ "opencv",
+ "pixman",
+ "pkgconf",
+ "png",
+ "py39-numpy",
+ "py39-pillow",
+ "py39-pip",
+ "py39-sphinx",
+ "py39-sphinx_rtd_theme",
+ "py39-yaml",
+ "python3",
+ "rpm2cpio",
+ "sdl2",
+ "sdl2_image",
+ "snappy",
+ "sndio",
+ "socat",
+ "spice-protocol",
+ "tesseract",
+ "usbredir",
+ "virglrenderer",
+ "vte3",
+ "xorriso",
+ "zstd"
+ ],
+ "pypi_pkgs": [],
+ "python": "/usr/local/bin/python3"
+}
static const char placeholder_msg[] = "Display output is not active.";
DisplayState *s = con->ds;
DisplaySurface *old_surface = con->surface;
+ DisplaySurface *new_surface = surface;
DisplayChangeListener *dcl;
int width;
int height;
height = 480;
}
- surface = qemu_create_placeholder_surface(width, height, placeholder_msg);
+ new_surface = qemu_create_placeholder_surface(width, height, placeholder_msg);
}
- assert(old_surface != surface);
+ assert(old_surface != new_surface);
con->scanout.kind = SCANOUT_SURFACE;
- con->surface = surface;
- dpy_gfx_create_texture(con, surface);
+ con->surface = new_surface;
+ dpy_gfx_create_texture(con, new_surface);
QLIST_FOREACH(dcl, &s->listeners, next) {
if (con != (dcl->con ? dcl->con : active_console)) {
continue;
}
- displaychangelistener_gfx_switch(dcl, surface, FALSE);
+ displaychangelistener_gfx_switch(dcl, new_surface, surface ? FALSE : TRUE);
}
dpy_gfx_destroy_texture(con, old_surface);
qemu_free_displaysurface(old_surface);
backing_width, backing_height, x, y, w, h);
#ifdef CONFIG_GBM
QemuDmaBuf dmabuf = {
- .width = backing_width,
- .height = backing_height,
+ .width = w,
+ .height = h,
.y0_top = backing_y_0_top,
.x = x,
.y = y,
- .scanout_width = w,
- .scanout_height = h,
+ .backing_width = backing_width,
+ .backing_height = backing_height,
};
assert(tex_id);
if (src->dmabuf) {
x1 = src->dmabuf->x;
y1 = src->dmabuf->y;
- w = src->dmabuf->scanout_width;
- h = src->dmabuf->scanout_height;
+ w = src->dmabuf->width;
+ h = src->dmabuf->height;
}
w = (x1 + w) > src->width ? src->width - x1 : w;
}
attrs[i++] = EGL_WIDTH;
- attrs[i++] = dmabuf->width;
+ attrs[i++] = dmabuf->backing_width;
attrs[i++] = EGL_HEIGHT;
- attrs[i++] = dmabuf->height;
+ attrs[i++] = dmabuf->backing_height;
attrs[i++] = EGL_LINUX_DRM_FOURCC_EXT;
attrs[i++] = dmabuf->fourcc;
vc->gfx.scanout_mode = scanout;
if (!vc->gfx.scanout_mode) {
+ eglMakeCurrent(qemu_egl_display, vc->gfx.esurface,
+ vc->gfx.esurface, vc->gfx.ectx);
egl_fb_destroy(&vc->gfx.guest_fb);
if (vc->gfx.surface) {
surface_gl_destroy_texture(vc->gfx.gls, vc->gfx.ds);
vc->gfx.esurface, vc->gfx.ectx);
surface_gl_update_texture(vc->gfx.gls, vc->gfx.ds, x, y, w, h);
vc->gfx.glupdates++;
+ eglMakeCurrent(qemu_egl_display, EGL_NO_SURFACE,
+ EGL_NO_SURFACE, EGL_NO_CONTEXT);
}
void gd_egl_refresh(DisplayChangeListener *dcl)
gd_update_monitor_refresh_rate(
vc, vc->window ? vc->window : vc->gfx.drawing_area);
+ if (vc->gfx.guest_fb.dmabuf && vc->gfx.guest_fb.dmabuf->draw_submitted) {
+ return;
+ }
+
if (!vc->gfx.esurface) {
gd_egl_init(vc);
if (!vc->gfx.esurface) {
eglMakeCurrent(qemu_egl_display, vc->gfx.esurface,
vc->gfx.esurface, vc->gfx.ectx);
- gtk_egl_set_scanout_mode(vc, true);
egl_fb_setup_for_tex(&vc->gfx.guest_fb, backing_width, backing_height,
backing_id, false);
}
}
gd_egl_scanout_texture(dcl, dmabuf->texture,
- dmabuf->y0_top, dmabuf->width, dmabuf->height,
- dmabuf->x, dmabuf->y, dmabuf->scanout_width,
- dmabuf->scanout_height, NULL);
+ dmabuf->y0_top,
+ dmabuf->backing_width, dmabuf->backing_height,
+ dmabuf->x, dmabuf->y, dmabuf->width,
+ dmabuf->height, NULL);
if (dmabuf->allow_fences) {
vc->gfx.guest_fb.dmabuf = dmabuf;
if (!dmabuf->texture) {
return;
}
- egl_fb_setup_for_tex(&vc->gfx.cursor_fb, dmabuf->width, dmabuf->height,
+ egl_fb_setup_for_tex(&vc->gfx.cursor_fb,
+ dmabuf->backing_width, dmabuf->backing_height,
dmabuf->texture, false);
} else {
egl_fb_destroy(&vc->gfx.cursor_fb);
if (vc->gfx.guest_fb.dmabuf && !vc->gfx.guest_fb.dmabuf->draw_submitted) {
graphic_hw_gl_block(vc->gfx.dcl.con, true);
vc->gfx.guest_fb.dmabuf->draw_submitted = true;
+ gtk_egl_set_scanout_mode(vc, true);
gtk_widget_queue_draw_area(area, x, y, w, h);
return;
}
vc->gfx.scanout_mode = scanout;
if (!vc->gfx.scanout_mode) {
+ gtk_gl_area_make_current(GTK_GL_AREA(vc->gfx.drawing_area));
egl_fb_destroy(&vc->gfx.guest_fb);
if (vc->gfx.surface) {
surface_gl_destroy_texture(vc->gfx.gls, vc->gfx.ds);
gtk_gl_area_make_current(GTK_GL_AREA(vc->gfx.drawing_area));
surface_gl_update_texture(vc->gfx.gls, vc->gfx.ds, x, y, w, h);
vc->gfx.glupdates++;
+ gdk_gl_context_clear_current();
}
void gd_gl_area_refresh(DisplayChangeListener *dcl)
gd_update_monitor_refresh_rate(vc, vc->window ? vc->window : vc->gfx.drawing_area);
+ if (vc->gfx.guest_fb.dmabuf && vc->gfx.guest_fb.dmabuf->draw_submitted) {
+ return;
+ }
+
if (!vc->gfx.gls) {
if (!gtk_widget_get_realized(vc->gfx.drawing_area)) {
return;
return;
}
- gtk_gl_area_set_scanout_mode(vc, true);
egl_fb_setup_for_tex(&vc->gfx.guest_fb, backing_width, backing_height,
backing_id, false);
}
if (vc->gfx.guest_fb.dmabuf && !vc->gfx.guest_fb.dmabuf->draw_submitted) {
graphic_hw_gl_block(vc->gfx.dcl.con, true);
vc->gfx.guest_fb.dmabuf->draw_submitted = true;
+ gtk_gl_area_set_scanout_mode(vc, true);
}
gtk_gl_area_queue_render(GTK_GL_AREA(vc->gfx.drawing_area));
}
}
gd_gl_area_scanout_texture(dcl, dmabuf->texture,
- dmabuf->y0_top, dmabuf->width, dmabuf->height,
- dmabuf->x, dmabuf->y, dmabuf->scanout_width,
- dmabuf->scanout_height, NULL);
+ dmabuf->y0_top,
+ dmabuf->backing_width, dmabuf->backing_height,
+ dmabuf->x, dmabuf->y, dmabuf->width,
+ dmabuf->height, NULL);
if (dmabuf->allow_fences) {
vc->gfx.guest_fb.dmabuf = dmabuf;
ret = inflate(&stream, Z_FINISH);
switch (ret) {
case Z_OK:
- case Z_STREAM_END:
break;
+ case Z_STREAM_END:
+ *size = stream.total_out;
+ inflateEnd(&stream);
+ return out;
case Z_BUF_ERROR:
out_len <<= 1;
if (out_len > (1 << 20)) {
}
}
- *size = stream.total_out;
- inflateEnd(&stream);
-
- return out;
-
err_end:
inflateEnd(&stream);
err:
*/
#include "qemu/osdep.h"
-#include "qemu/config-file.h"
-#include "qemu/option.h"
-#include "qemu/module.h"
#include <dirent.h>
#include <sys/prctl.h>
#include <sched.h>
clone(async_teardown_fn, new_stack_for_clone(), CLONE_VM, NULL);
sigprocmask(SIG_SETMASK, &old_signals, NULL);
}
-
-static QemuOptsList qemu_run_with_opts = {
- .name = "run-with",
- .head = QTAILQ_HEAD_INITIALIZER(qemu_run_with_opts.head),
- .desc = {
- {
- .name = "async-teardown",
- .type = QEMU_OPT_BOOL,
- },
- { /* end of list */ }
- },
-};
-
-static void register_teardown(void)
-{
- qemu_add_opts(&qemu_run_with_opts);
-}
-opts_init(register_teardown);