--p; --tmp; --len;
if (--offset < 0) {
offset = p % TARGET_PAGE_SIZE;
- pag = (char *)page[p / TARGET_PAGE_SIZE];
+ pag = page[p / TARGET_PAGE_SIZE];
if (!pag) {
pag = g_try_malloc0(TARGET_PAGE_SIZE);
page[p / TARGET_PAGE_SIZE] = pag;
g_mutex_lock(&l1_dcache_locks[cache_idx]);
hit_in_l1 = access_cache(l1_dcaches[cache_idx], effective_addr);
if (!hit_in_l1) {
- insn = (InsnData *) userdata;
+ insn = userdata;
__atomic_fetch_add(&insn->l1_dmisses, 1, __ATOMIC_SEQ_CST);
l1_dcaches[cache_idx]->misses++;
}
g_mutex_lock(&l2_ucache_locks[cache_idx]);
if (!access_cache(l2_ucaches[cache_idx], effective_addr)) {
- insn = (InsnData *) userdata;
+ insn = userdata;
__atomic_fetch_add(&insn->l2_misses, 1, __ATOMIC_SEQ_CST);
l2_ucaches[cache_idx]->misses++;
}
g_mutex_lock(&l1_icache_locks[cache_idx]);
hit_in_l1 = access_cache(l1_icaches[cache_idx], insn_addr);
if (!hit_in_l1) {
- insn = (InsnData *) userdata;
+ insn = userdata;
__atomic_fetch_add(&insn->l1_imisses, 1, __ATOMIC_SEQ_CST);
l1_icaches[cache_idx]->misses++;
}
g_mutex_lock(&l2_ucache_locks[cache_idx]);
if (!access_cache(l2_ucaches[cache_idx], insn_addr)) {
- insn = (InsnData *) userdata;
+ insn = userdata;
__atomic_fetch_add(&insn->l2_misses, 1, __ATOMIC_SEQ_CST);
l2_ucaches[cache_idx]->misses++;
}
#if defined(__linux__) && defined(BLKDISCARD) && defined(BLKZEROOUT)
VubDev *vdev_blk = req->vdev_blk;
- desc = (struct virtio_blk_discard_write_zeroes *)buf;
+ desc = buf;
uint64_t range[2] = { le64toh(desc->sector) << 9,
le32toh(desc->num_sectors) << 9 };
if (type == VIRTIO_BLK_T_DISCARD) {
Clock **clkp;
/* offset cannot be inside the DeviceState part */
assert(elem->offset > sizeof(DeviceState));
- clkp = (Clock **)(((void *) dev) + elem->offset);
+ clkp = ((void *)dev) + elem->offset;
if (elem->is_output) {
*clkp = qdev_init_clock_out(dev, elem->name);
} else {
goto out;
}
msgdata = hv_msg->payload;
- msg = (struct vmbus_message_header *)msgdata;
+ msg = msgdata;
trace_vmbus_process_incoming_message(msg->message_type);
{
CadenceGEMState *s;
uint32_t retval;
- s = (CadenceGEMState *)opaque;
+ s = opaque;
offset >>= 2;
retval = s->regs[offset];
VirtioNetRscChain *chain;
VirtioNetRscUnit unit;
- chain = (VirtioNetRscChain *)opq;
+ chain = opq;
hdr_len = ((VirtIONet *)(chain->n))->guest_hdr_len;
if (size < (hdr_len + sizeof(struct eth_header) + sizeof(struct ip6_header)
nr_zones++;
}
}
- header = (NvmeZoneReportHeader *)buf;
+ header = buf;
header->nr_zones = cpu_to_le64(nr_zones);
buf_p = buf + sizeof(NvmeZoneReportHeader);
for (; zone_idx < ns->num_zones && max_zones > 0; zone_idx++) {
zone = &ns->zone_array[zone_idx];
if (nvme_zone_matches_filter(zrasf, zone)) {
- z = (NvmeZoneDescr *)buf_p;
+ z = buf_p;
buf_p += sizeof(NvmeZoneDescr);
z->zt = zone->d.zt;
r = g_malloc(sizeof(*r));
*ring = r;
- r->ring_state = (PvrdmaRingState *)
- rdma_pci_dma_map(pci_dev, tbl[0], TARGET_PAGE_SIZE);
+ r->ring_state = rdma_pci_dma_map(pci_dev, tbl[0], TARGET_PAGE_SIZE);
if (!r->ring_state) {
rdma_error_report("Failed to map to CQ ring state");
*rings = sr;
/* Create send ring */
- sr->ring_state = (PvrdmaRingState *)
- rdma_pci_dma_map(pci_dev, tbl[0], TARGET_PAGE_SIZE);
+ sr->ring_state = rdma_pci_dma_map(pci_dev, tbl[0], TARGET_PAGE_SIZE);
if (!sr->ring_state) {
rdma_error_report("Failed to map to QP ring state");
goto out_free_sr_mem;
r = g_malloc(sizeof(*r));
*ring = r;
- r->ring_state = (PvrdmaRingState *)
- rdma_pci_dma_map(pci_dev, tbl[0], TARGET_PAGE_SIZE);
+ r->ring_state = rdma_pci_dma_map(pci_dev, tbl[0], TARGET_PAGE_SIZE);
if (!r->ring_state) {
rdma_error_report("Failed to map tp SRQ ring state");
goto out_free_ring_mem;
ring = (PvrdmaRing *)qp->opaque;
- wqe = (struct PvrdmaSqWqe *)pvrdma_ring_next_elem_read(ring);
+ wqe = pvrdma_ring_next_elem_read(ring);
while (wqe) {
CompHandlerCtx *comp_ctx;
ring = &((PvrdmaRing *)qp->opaque)[1];
- wqe = (struct PvrdmaRqWqe *)pvrdma_ring_next_elem_read(ring);
+ wqe = pvrdma_ring_next_elem_read(ring);
while (wqe) {
CompHandlerCtx *comp_ctx;
ring = (PvrdmaRing *)srq->opaque;
- wqe = (struct PvrdmaRqWqe *)pvrdma_ring_next_elem_read(ring);
+ wqe = pvrdma_ring_next_elem_read(ring);
while (wqe) {
CompHandlerCtx *comp_ctx;
output_size = s->config.probe_size + sizeof(tail);
buf = g_malloc0(output_size);
- ptail = (struct virtio_iommu_req_tail *)
- (buf + s->config.probe_size);
+ ptail = buf + s->config.probe_size;
ptail->status = virtio_iommu_handle_probe(s, iov, iov_cnt, buf);
break;
}
for (i = 0; i < se->nb_fields; i++) {
if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
assert(*field_types == TYPE_PTRVOID);
- target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
+ target_rt_dev_ptr = argptr + src_offsets[i];
host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
if (*target_rt_dev_ptr != 0) {
*host_rt_dev_ptr = (unsigned long)lock_user_string(
MemTxAttrs attrs = { 0 };
if (!df) {
- ptr = (uint8_t *) buffer;
+ ptr = buffer;
} else {
ptr = buffer + size * count - size;
}
ml_printf("stack: %p <- %p\n", info.stack_limit, info.stack_base);
/* finally can we read/write the heap */
- ptr_to_heap = (uint32_t *) info.heap_base;
+ ptr_to_heap = info.heap_base;
for (i = 0; i < 512; i++) {
*ptr_to_heap++ = i;
}
- ptr_to_heap = (uint32_t *) info.heap_base;
+ ptr_to_heap = info.heap_base;
for (i = 0; i < 512; i++) {
uint32_t tmp = *ptr_to_heap;
if (tmp != i) {
if (!cap->next) {
return;
}
- cap = (struct vfio_info_cap_header *)(buf + cap->next);
+ cap = buf + cap->next;
}
cap_iova_range = (struct vfio_iommu_type1_info_cap_iova_range *)cap;