static void __iommu_free(struct iommu_table *, dma_addr_t, unsigned int);
-static inline unsigned long iommu_num_pages(unsigned long vaddr,
+static inline unsigned long iommu_nr_pages(unsigned long vaddr,
unsigned long slen)
{
unsigned long npages;
}
/* Allocate iommu entries for that segment */
vaddr = (unsigned long) sg_virt(s);
- npages = iommu_num_pages(vaddr, slen);
+ npages = iommu_nr_pages(vaddr, slen);
align = 0;
if (IOMMU_PAGE_SHIFT < PAGE_SHIFT && slen >= PAGE_SIZE &&
(vaddr & ~PAGE_MASK) == 0)
unsigned long vaddr, npages;
vaddr = s->dma_address & IOMMU_PAGE_MASK;
- npages = iommu_num_pages(s->dma_address, s->dma_length);
+ npages = iommu_nr_pages(s->dma_address, s->dma_length);
__iommu_free(tbl, vaddr, npages);
s->dma_address = DMA_ERROR_CODE;
s->dma_length = 0;
if (sg->dma_length == 0)
break;
- npages = iommu_num_pages(dma_handle, sg->dma_length);
+ npages = iommu_nr_pages(dma_handle, sg->dma_length);
__iommu_free(tbl, dma_handle, npages);
sg = sg_next(sg);
}
BUG_ON(direction == DMA_NONE);
uaddr = (unsigned long)vaddr;
- npages = iommu_num_pages(uaddr, size);
+ npages = iommu_nr_pages(uaddr, size);
if (tbl) {
align = 0;
BUG_ON(direction == DMA_NONE);
if (tbl) {
- npages = iommu_num_pages(dma_handle, size);
+ npages = iommu_nr_pages(dma_handle, size);
iommu_free(tbl, dma_handle, npages);
}
}