BUG_ON(dir == DMA_NONE);
- if (!iommu_need_mapping(dev))
- return paddr;
-
domain = get_valid_domain_for_dev(dev);
if (!domain)
return DMA_MAPPING_ERROR;
enum dma_data_direction dir,
unsigned long attrs)
{
- return __intel_map_single(dev, page_to_phys(page) + offset, size,
- dir, *dev->dma_mask);
+ if (iommu_need_mapping(dev))
+ return __intel_map_single(dev, page_to_phys(page) + offset,
+ size, dir, *dev->dma_mask);
+ return dma_direct_map_page(dev, page, offset, size, dir, attrs);
}
static dma_addr_t intel_map_resource(struct device *dev, phys_addr_t phys_addr,
size_t size, enum dma_data_direction dir,
unsigned long attrs)
{
- return __intel_map_single(dev, phys_addr, size, dir, *dev->dma_mask);
+ if (iommu_need_mapping(dev))
+ return __intel_map_single(dev, phys_addr, size, dir,
+ *dev->dma_mask);
+ return dma_direct_map_resource(dev, phys_addr, size, dir, attrs);
}
static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
struct intel_iommu *iommu;
struct page *freelist;
- if (!iommu_need_mapping(dev))
- return;
-
domain = find_domain(dev);
BUG_ON(!domain);
size_t size, enum dma_data_direction dir,
unsigned long attrs)
{
- intel_unmap(dev, dev_addr, size);
+ if (iommu_need_mapping(dev))
+ intel_unmap(dev, dev_addr, size);
+ else
+ dma_direct_unmap_page(dev, dev_addr, size, dir, attrs);
+}
+
+static void intel_unmap_resource(struct device *dev, dma_addr_t dev_addr,
+ size_t size, enum dma_data_direction dir, unsigned long attrs)
+{
+ if (iommu_need_mapping(dev))
+ intel_unmap(dev, dev_addr, size);
}
static void *intel_alloc_coherent(struct device *dev, size_t size,
struct page *page = NULL;
int order;
+ if (!iommu_need_mapping(dev))
+ return dma_direct_alloc(dev, size, dma_handle, flags, attrs);
+
size = PAGE_ALIGN(size);
order = get_order(size);
-
- if (iommu_need_mapping(dev))
- flags &= ~(GFP_DMA | GFP_DMA32);
- else if (dev->coherent_dma_mask < dma_get_required_mask(dev)) {
- if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
- flags |= GFP_DMA;
- else
- flags |= GFP_DMA32;
- }
+ flags &= ~(GFP_DMA | GFP_DMA32);
if (gfpflags_allow_blocking(flags)) {
unsigned int count = size >> PAGE_SHIFT;
page = dma_alloc_from_contiguous(dev, count, order,
flags & __GFP_NOWARN);
- if (page && !iommu_need_mapping(dev) &&
- page_to_phys(page) + size > dev->coherent_dma_mask) {
- dma_release_from_contiguous(dev, page, count);
- page = NULL;
- }
}
if (!page)
int order;
struct page *page = virt_to_page(vaddr);
+ if (!iommu_need_mapping(dev))
+ return dma_direct_free(dev, size, vaddr, dma_handle, attrs);
+
size = PAGE_ALIGN(size);
order = get_order(size);
struct scatterlist *sg;
int i;
+ if (!iommu_need_mapping(dev))
+ return dma_direct_unmap_sg(dev, sglist, nelems, dir, attrs);
+
for_each_sg(sglist, sg, nelems, i) {
nrpages += aligned_nrpages(sg_dma_address(sg), sg_dma_len(sg));
}
intel_unmap(dev, startaddr, nrpages << VTD_PAGE_SHIFT);
}
-static int intel_nontranslate_map_sg(struct device *hddev,
- struct scatterlist *sglist, int nelems, int dir)
-{
- int i;
- struct scatterlist *sg;
-
- for_each_sg(sglist, sg, nelems, i) {
- BUG_ON(!sg_page(sg));
- sg->dma_address = sg_phys(sg);
- sg->dma_length = sg->length;
- }
- return nelems;
-}
-
static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
enum dma_data_direction dir, unsigned long attrs)
{
BUG_ON(dir == DMA_NONE);
if (!iommu_need_mapping(dev))
- return intel_nontranslate_map_sg(dev, sglist, nelems, dir);
+ return dma_direct_map_sg(dev, sglist, nelems, dir, attrs);
domain = get_valid_domain_for_dev(dev);
if (!domain)
.map_page = intel_map_page,
.unmap_page = intel_unmap_page,
.map_resource = intel_map_resource,
- .unmap_resource = intel_unmap_page,
+ .unmap_resource = intel_unmap_resource,
.dma_supported = dma_direct_supported,
};