#include "qemu/memfd.h"
#include "qemu/queue.h"
#include "qemu/plugin.h"
+#include "tcg/startup.h"
#include "target_mman.h"
#include <elf.h>
#include <endian.h>
#include "special-errno.h"
#include "qapi/error.h"
#include "fd-trans.h"
-#include "tcg/tcg.h"
#include "cpu_loop-common.h"
#ifndef CLONE_IO
return target_brk;
}
- /* Release heap if necesary */
+ /* Release heap if necessary */
if (new_brk < old_brk) {
target_munmap(new_brk, old_brk - new_brk);
uint32_t *dst = (uint32_t *)data;
memcpy(dst, target_data, len);
- /* fix endianess of first 32-bit word */
+ /* fix endianness of first 32-bit word */
if (len >= sizeof(uint32_t)) {
*dst = tswap32(*dst);
}
unlock_user(results, optval_addr, 0);
return ret;
}
- /* swap host endianess to target endianess. */
+ /* swap host endianness to target endianness. */
for (i = 0; i < (len / sizeof(uint32_t)); i++) {
results[i] = tswap32(results[i]);
}
}
#endif
-#define N_SHM_REGIONS 32
-
-static struct shm_region {
- abi_ulong start;
- abi_ulong size;
- bool in_use;
-} shm_regions[N_SHM_REGIONS];
-
#ifndef TARGET_SEMID64_DS
/* asm-generic version of this struct */
struct target_semid64_ds
return ret;
}
-#ifndef TARGET_FORCE_SHMLBA
-/* For most architectures, SHMLBA is the same as the page size;
- * some architectures have larger values, in which case they should
- * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
- * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
- * and defining its own value for SHMLBA.
- *
- * The kernel also permits SHMLBA to be set by the architecture to a
- * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
- * this means that addresses are rounded to the large size if
- * SHM_RND is set but addresses not aligned to that size are not rejected
- * as long as they are at least page-aligned. Since the only architecture
- * which uses this is ia64 this code doesn't provide for that oddity.
- */
-static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
-{
- return TARGET_PAGE_SIZE;
-}
-#endif
-
-static abi_ulong do_shmat(CPUArchState *cpu_env, int shmid,
- abi_ulong shmaddr, int shmflg)
-{
- CPUState *cpu = env_cpu(cpu_env);
- abi_ulong raddr;
- void *host_raddr;
- struct shmid_ds shm_info;
- int i, ret;
- abi_ulong shmlba;
-
- /* shmat pointers are always untagged */
-
- /* find out the length of the shared memory segment */
- ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
- if (is_error(ret)) {
- /* can't get length, bail out */
- return ret;
- }
-
- shmlba = target_shmlba(cpu_env);
-
- if (shmaddr & (shmlba - 1)) {
- if (shmflg & SHM_RND) {
- shmaddr &= ~(shmlba - 1);
- } else {
- return -TARGET_EINVAL;
- }
- }
- if (!guest_range_valid_untagged(shmaddr, shm_info.shm_segsz)) {
- return -TARGET_EINVAL;
- }
-
- mmap_lock();
-
- /*
- * We're mapping shared memory, so ensure we generate code for parallel
- * execution and flush old translations. This will work up to the level
- * supported by the host -- anything that requires EXCP_ATOMIC will not
- * be atomic with respect to an external process.
- */
- if (!(cpu->tcg_cflags & CF_PARALLEL)) {
- cpu->tcg_cflags |= CF_PARALLEL;
- tb_flush(cpu);
- }
-
- if (shmaddr)
- host_raddr = shmat(shmid, (void *)g2h_untagged(shmaddr), shmflg);
- else {
- abi_ulong mmap_start;
-
- /* In order to use the host shmat, we need to honor host SHMLBA. */
- mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
-
- if (mmap_start == -1) {
- errno = ENOMEM;
- host_raddr = (void *)-1;
- } else
- host_raddr = shmat(shmid, g2h_untagged(mmap_start),
- shmflg | SHM_REMAP);
- }
-
- if (host_raddr == (void *)-1) {
- mmap_unlock();
- return get_errno((intptr_t)host_raddr);
- }
- raddr = h2g((uintptr_t)host_raddr);
-
- page_set_flags(raddr, raddr + shm_info.shm_segsz - 1,
- PAGE_VALID | PAGE_RESET | PAGE_READ |
- (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE));
-
- for (i = 0; i < N_SHM_REGIONS; i++) {
- if (!shm_regions[i].in_use) {
- shm_regions[i].in_use = true;
- shm_regions[i].start = raddr;
- shm_regions[i].size = shm_info.shm_segsz;
- break;
- }
- }
-
- mmap_unlock();
- return raddr;
-}
-
-static inline abi_long do_shmdt(abi_ulong shmaddr)
-{
- int i;
- abi_long rv;
-
- /* shmdt pointers are always untagged */
-
- mmap_lock();
-
- for (i = 0; i < N_SHM_REGIONS; ++i) {
- if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
- shm_regions[i].in_use = false;
- page_set_flags(shmaddr, shmaddr + shm_regions[i].size - 1, 0);
- break;
- }
- }
- rv = get_errno(shmdt(g2h_untagged(shmaddr)));
-
- mmap_unlock();
-
- return rv;
-}
-
#ifdef TARGET_NR_ipc
/* ??? This only works with linear mappings. */
/* do_ipc() must return target values and target errnos. */
default:
{
abi_ulong raddr;
- raddr = do_shmat(cpu_env, first, ptr, second);
+ raddr = target_shmat(cpu_env, first, ptr, second);
if (is_error(raddr))
return get_errno(raddr);
if (put_user_ual(raddr, third))
}
break;
case IPCOP_shmdt:
- ret = do_shmdt(ptr);
+ ret = target_shmdt(ptr);
break;
case IPCOP_shmget:
{
void *gspec = argptr;
void *cur_data = host_data;
- const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
- int spec_size = thunk_type_size(arg_type, 0);
+ const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
+ int spec_size = thunk_type_size(dm_arg_type, 0);
int i;
for (i = 0; i < host_dm->target_count; i++) {
uint32_t next;
int slen;
- thunk_convert(spec, gspec, arg_type, THUNK_HOST);
+ thunk_convert(spec, gspec, dm_arg_type, THUNK_HOST);
slen = strlen((char*)gspec + spec_size) + 1;
next = spec->next;
spec->next = sizeof(*spec) + slen;
struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
uint32_t remaining_data = guest_data_size;
void *cur_data = argptr;
- const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
+ const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
int nl_size = 12; /* can't use thunk_size due to alignment */
while (1) {
host_dm->flags |= DM_BUFFER_FULL_FLAG;
break;
}
- thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
+ thunk_convert(cur_data, nl, dm_arg_type, THUNK_TARGET);
strcpy(cur_data + nl_size, nl->name);
cur_data += nl->next;
remaining_data -= nl->next;
{
struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
void *cur_data = argptr;
- const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
- int spec_size = thunk_type_size(arg_type, 0);
+ const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
+ int spec_size = thunk_type_size(dm_arg_type, 0);
int i;
for (i = 0; i < host_dm->target_count; i++) {
host_dm->flags |= DM_BUFFER_FULL_FLAG;
break;
}
- thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
+ thunk_convert(cur_data, spec, dm_arg_type, THUNK_TARGET);
strcpy(cur_data + spec_size, (char*)&spec[1]);
cur_data = argptr + spec->next;
spec = (void*)host_dm + host_dm->data_start + next;
struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
uint32_t remaining_data = guest_data_size;
void *cur_data = argptr;
- const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
- int vers_size = thunk_type_size(arg_type, 0);
+ const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
+ int vers_size = thunk_type_size(dm_arg_type, 0);
while (1) {
uint32_t next = vers->next;
host_dm->flags |= DM_BUFFER_FULL_FLAG;
break;
}
- thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
+ thunk_convert(cur_data, vers, dm_arg_type, THUNK_TARGET);
strcpy(cur_data + vers_size, vers->name);
cur_data += vers->next;
remaining_data -= vers->next;
path = "[stack]";
} else if (start == info->brk) {
path = "[heap]";
+ } else if (start == info->vdso) {
+ path = "[vdso]";
}
/* Except null device (MAP_ANON), adjust offset for this fragment. */
"Private_Clean: 0 kB\n"
"Private_Dirty: 0 kB\n"
"Referenced: 0 kB\n"
- "Anonymous: 0 kB\n"
+ "Anonymous: %lu kB\n"
"LazyFree: 0 kB\n"
"AnonHugePages: 0 kB\n"
"ShmemPmdMapped: 0 kB\n"
"THPeligible: 0\n"
"VmFlags:%s%s%s%s%s%s%s%s\n",
size_kb, page_size_kb, page_size_kb,
+ (flags & PAGE_ANON ? size_kb : 0),
(flags & PAGE_READ) ? " rd" : "",
(flags & PAGE_WRITE_ORG) ? " wr" : "",
(flags & PAGE_EXEC) ? " ex" : "",
#define RISCV_HWPROBE_KEY_IMA_EXT_0 4
#define RISCV_HWPROBE_IMA_FD (1 << 0)
#define RISCV_HWPROBE_IMA_C (1 << 1)
+#define RISCV_HWPROBE_IMA_V (1 << 2)
+#define RISCV_HWPROBE_EXT_ZBA (1 << 3)
+#define RISCV_HWPROBE_EXT_ZBB (1 << 4)
+#define RISCV_HWPROBE_EXT_ZBS (1 << 5)
#define RISCV_HWPROBE_KEY_CPUPERF_0 5
#define RISCV_HWPROBE_MISALIGNED_UNKNOWN (0 << 0)
riscv_has_ext(env, RVD) ?
RISCV_HWPROBE_IMA_FD : 0;
value |= riscv_has_ext(env, RVC) ?
- RISCV_HWPROBE_IMA_C : pair->value;
+ RISCV_HWPROBE_IMA_C : 0;
+ value |= riscv_has_ext(env, RVV) ?
+ RISCV_HWPROBE_IMA_V : 0;
+ value |= cfg->ext_zba ?
+ RISCV_HWPROBE_EXT_ZBA : 0;
+ value |= cfg->ext_zbb ?
+ RISCV_HWPROBE_EXT_ZBB : 0;
+ value |= cfg->ext_zbs ?
+ RISCV_HWPROBE_EXT_ZBS : 0;
__put_user(value, &pair->value);
break;
case RISCV_HWPROBE_KEY_CPUPERF_0:
#endif
#ifdef TARGET_NR_shmat
case TARGET_NR_shmat:
- return do_shmat(cpu_env, arg1, arg2, arg3);
+ return target_shmat(cpu_env, arg1, arg2, arg3);
#endif
#ifdef TARGET_NR_shmdt
case TARGET_NR_shmdt:
- return do_shmdt(arg1);
+ return target_shmdt(arg1);
#endif
case TARGET_NR_fsync:
return get_errno(fsync(arg1));
}
case TARGET_NR_getcpu:
{
- unsigned cpu, node;
- ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
+ unsigned cpuid, node;
+ ret = get_errno(sys_getcpu(arg1 ? &cpuid : NULL,
arg2 ? &node : NULL,
NULL));
if (is_error(ret)) {
return ret;
}
- if (arg1 && put_user_u32(cpu, arg1)) {
+ if (arg1 && put_user_u32(cpuid, arg1)) {
return -TARGET_EFAULT;
}
if (arg2 && put_user_u32(node, arg2)) {
case TARGET_NR_listxattr:
case TARGET_NR_llistxattr:
{
- void *p, *b = 0;
+ void *b = 0;
if (arg2) {
b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
if (!b) {
case TARGET_NR_setxattr:
case TARGET_NR_lsetxattr:
{
- void *p, *n, *v = 0;
+ void *n, *v = 0;
if (arg3) {
v = lock_user(VERIFY_READ, arg3, arg4, 1);
if (!v) {
case TARGET_NR_getxattr:
case TARGET_NR_lgetxattr:
{
- void *p, *n, *v = 0;
+ void *n, *v = 0;
if (arg3) {
v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
if (!v) {
case TARGET_NR_removexattr:
case TARGET_NR_lremovexattr:
{
- void *p, *n;
+ void *n;
p = lock_user_string(arg1);
n = lock_user_string(arg2);
if (p && n) {