dma_cache_maint(handle, size, GNTTAB_CACHE_INVAL);
}
-void xen_dma_sync_for_device(dma_addr_t handle, phys_addr_t paddr, size_t size,
- enum dma_data_direction dir)
+void xen_dma_sync_for_device(struct device *dev, dma_addr_t handle,
+ phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
{
if (pfn_valid(PFN_DOWN(handle)))
arch_sync_dma_for_device(paddr, size, dir);
done:
if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
- xen_dma_sync_for_device(dev_addr, phys, size, dir);
+ xen_dma_sync_for_device(dev, dev_addr, phys, size, dir);
return dev_addr;
}
swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_DEVICE);
if (!dev_is_dma_coherent(dev))
- xen_dma_sync_for_device(dma_addr, paddr, size, dir);
+ xen_dma_sync_for_device(dev, dma_addr, paddr, size, dir);
}
/*
void xen_dma_sync_for_cpu(struct device *dev, dma_addr_t handle,
phys_addr_t paddr, size_t size,
enum dma_data_direction dir);
-void xen_dma_sync_for_device(dma_addr_t handle, phys_addr_t paddr, size_t size,
- enum dma_data_direction dir);
+void xen_dma_sync_for_device(struct device *dev, dma_addr_t handle,
+ phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir);
extern int xen_swiotlb_init(int verbose, bool early);
extern const struct dma_map_ops xen_swiotlb_dma_ops;