if (unlikely(npages == 0)) {
if (printk_ratelimit())
WARN_ON(1);
- return IOMMU_MAPPING_ERROR;
+ return DMA_MAPPING_ERROR;
}
if (should_fail_iommu(dev))
- return IOMMU_MAPPING_ERROR;
+ return DMA_MAPPING_ERROR;
/*
* We don't need to disable preemption here because any CPU can
} else {
/* Give up */
spin_unlock_irqrestore(&(pool->lock), flags);
- return IOMMU_MAPPING_ERROR;
+ return DMA_MAPPING_ERROR;
}
}
unsigned long attrs)
{
unsigned long entry;
- dma_addr_t ret = IOMMU_MAPPING_ERROR;
+ dma_addr_t ret = DMA_MAPPING_ERROR;
int build_fail;
entry = iommu_range_alloc(dev, tbl, npages, NULL, mask, align_order);
- if (unlikely(entry == IOMMU_MAPPING_ERROR))
- return IOMMU_MAPPING_ERROR;
+ if (unlikely(entry == DMA_MAPPING_ERROR))
+ return DMA_MAPPING_ERROR;
entry += tbl->it_offset; /* Offset into real TCE table */
ret = entry << tbl->it_page_shift; /* Set the return dma address */
/* tbl->it_ops->set() only returns non-zero for transient errors.
* Clean up the table bitmap in this case and return
- * IOMMU_MAPPING_ERROR. For all other errors the functionality is
+ * DMA_MAPPING_ERROR. For all other errors the functionality is
* not altered.
*/
if (unlikely(build_fail)) {
__iommu_free(tbl, ret, npages);
- return IOMMU_MAPPING_ERROR;
+ return DMA_MAPPING_ERROR;
}
/* Flush/invalidate TLB caches if necessary */
DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen);
/* Handle failure */
- if (unlikely(entry == IOMMU_MAPPING_ERROR)) {
+ if (unlikely(entry == DMA_MAPPING_ERROR)) {
if (!(attrs & DMA_ATTR_NO_WARN) &&
printk_ratelimit())
dev_info(dev, "iommu_alloc failed, tbl %p "
*/
if (outcount < incount) {
outs = sg_next(outs);
- outs->dma_address = IOMMU_MAPPING_ERROR;
+ outs->dma_address = DMA_MAPPING_ERROR;
outs->dma_length = 0;
}
npages = iommu_num_pages(s->dma_address, s->dma_length,
IOMMU_PAGE_SIZE(tbl));
__iommu_free(tbl, vaddr, npages);
- s->dma_address = IOMMU_MAPPING_ERROR;
+ s->dma_address = DMA_MAPPING_ERROR;
s->dma_length = 0;
}
if (s == outs)
unsigned long mask, enum dma_data_direction direction,
unsigned long attrs)
{
- dma_addr_t dma_handle = IOMMU_MAPPING_ERROR;
+ dma_addr_t dma_handle = DMA_MAPPING_ERROR;
void *vaddr;
unsigned long uaddr;
unsigned int npages, align;
dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction,
mask >> tbl->it_page_shift, align,
attrs);
- if (dma_handle == IOMMU_MAPPING_ERROR) {
+ if (dma_handle == DMA_MAPPING_ERROR) {
if (!(attrs & DMA_ATTR_NO_WARN) &&
printk_ratelimit()) {
dev_info(dev, "iommu_alloc failed, tbl %p "
io_order = get_iommu_order(size, tbl);
mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
mask >> tbl->it_page_shift, io_order, 0);
- if (mapping == IOMMU_MAPPING_ERROR) {
+ if (mapping == DMA_MAPPING_ERROR) {
free_pages((unsigned long)ret, order);
return NULL;
}