__dma_page_cpu_to_dev(page, offset, size, dir);
}
-struct dma_map_ops arm_dma_ops = {
+const struct dma_map_ops arm_dma_ops = {
.alloc = arm_dma_alloc,
.free = arm_dma_free,
.mmap = arm_dma_mmap,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs);
-struct dma_map_ops arm_coherent_dma_ops = {
+const struct dma_map_ops arm_coherent_dma_ops = {
.alloc = arm_coherent_dma_alloc,
.free = arm_coherent_dma_free,
.mmap = arm_coherent_dma_mmap,
static void *__alloc_from_contiguous(struct device *dev, size_t size,
pgprot_t prot, struct page **ret_page,
const void *caller, bool want_vaddr,
- int coherent_flag);
+ int coherent_flag, gfp_t gfp);
static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
pgprot_t prot, struct page **ret_page,
*/
if (dev_get_cma_area(NULL))
ptr = __alloc_from_contiguous(NULL, atomic_pool_size, prot,
- &page, atomic_pool_init, true, NORMAL);
+ &page, atomic_pool_init, true, NORMAL,
+ GFP_KERNEL);
else
ptr = __alloc_remap_buffer(NULL, atomic_pool_size, gfp, prot,
&page, atomic_pool_init, true);
static void *__alloc_from_contiguous(struct device *dev, size_t size,
pgprot_t prot, struct page **ret_page,
const void *caller, bool want_vaddr,
- int coherent_flag)
+ int coherent_flag, gfp_t gfp)
{
unsigned long order = get_order(size);
size_t count = size >> PAGE_SHIFT;
struct page *page;
void *ptr = NULL;
- page = dma_alloc_from_contiguous(dev, count, order);
+ page = dma_alloc_from_contiguous(dev, count, order, gfp);
if (!page)
return NULL;
#define __get_dma_pgprot(attrs, prot) __pgprot(0)
#define __alloc_remap_buffer(dev, size, gfp, prot, ret, c, wv) NULL
#define __alloc_from_pool(size, ret_page) NULL
-#define __alloc_from_contiguous(dev, size, prot, ret, c, wv, coherent_flag) NULL
+#define __alloc_from_contiguous(dev, size, prot, ret, c, wv, coherent_flag, gfp) NULL
#define __free_from_pool(cpu_addr, size) do { } while (0)
#define __free_from_contiguous(dev, page, cpu_addr, size, wv) do { } while (0)
#define __dma_free_remap(cpu_addr, size) do { } while (0)
{
return __alloc_from_contiguous(args->dev, args->size, args->prot,
ret_page, args->caller,
- args->want_vaddr, args->coherent_flag);
+ args->want_vaddr, args->coherent_flag,
+ args->gfp);
}
static void cma_allocator_free(struct arm_dma_free_args *args)
int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction dir, unsigned long attrs)
{
- struct dma_map_ops *ops = get_dma_ops(dev);
+ const struct dma_map_ops *ops = get_dma_ops(dev);
struct scatterlist *s;
int i, j;
void arm_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction dir, unsigned long attrs)
{
- struct dma_map_ops *ops = get_dma_ops(dev);
+ const struct dma_map_ops *ops = get_dma_ops(dev);
struct scatterlist *s;
int i;
void arm_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir)
{
- struct dma_map_ops *ops = get_dma_ops(dev);
+ const struct dma_map_ops *ops = get_dma_ops(dev);
struct scatterlist *s;
int i;
void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir)
{
- struct dma_map_ops *ops = get_dma_ops(dev);
+ const struct dma_map_ops *ops = get_dma_ops(dev);
struct scatterlist *s;
int i;
#ifdef CONFIG_ARM_DMA_USE_IOMMU
+static int __dma_info_to_prot(enum dma_data_direction dir, unsigned long attrs)
+{
+ int prot = 0;
+
+ if (attrs & DMA_ATTR_PRIVILEGED)
+ prot |= IOMMU_PRIV;
+
+ switch (dir) {
+ case DMA_BIDIRECTIONAL:
+ return prot | IOMMU_READ | IOMMU_WRITE;
+ case DMA_TO_DEVICE:
+ return prot | IOMMU_READ;
+ case DMA_FROM_DEVICE:
+ return prot | IOMMU_WRITE;
+ default:
+ return prot;
+ }
+}
+
/* IOMMU */
static int extend_iommu_mapping(struct dma_iommu_mapping *mapping);
unsigned long order = get_order(size);
struct page *page;
- page = dma_alloc_from_contiguous(dev, count, order);
+ page = dma_alloc_from_contiguous(dev, count, order, gfp);
if (!page)
goto error;
* Create a mapping in device IO address space for specified pages
*/
static dma_addr_t
-__iommu_create_mapping(struct device *dev, struct page **pages, size_t size)
+__iommu_create_mapping(struct device *dev, struct page **pages, size_t size,
+ unsigned long attrs)
{
struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
len = (j - i) << PAGE_SHIFT;
ret = iommu_map(mapping->domain, iova, phys, len,
- IOMMU_READ|IOMMU_WRITE);
+ __dma_info_to_prot(DMA_BIDIRECTIONAL, attrs));
if (ret < 0)
goto fail;
iova += len;
}
static void *__iommu_alloc_simple(struct device *dev, size_t size, gfp_t gfp,
- dma_addr_t *handle, int coherent_flag)
+ dma_addr_t *handle, int coherent_flag,
+ unsigned long attrs)
{
struct page *page;
void *addr;
if (!addr)
return NULL;
- *handle = __iommu_create_mapping(dev, &page, size);
+ *handle = __iommu_create_mapping(dev, &page, size, attrs);
if (*handle == DMA_ERROR_CODE)
goto err_mapping;
if (coherent_flag == COHERENT || !gfpflags_allow_blocking(gfp))
return __iommu_alloc_simple(dev, size, gfp, handle,
- coherent_flag);
+ coherent_flag, attrs);
/*
* Following is a work-around (a.k.a. hack) to prevent pages
if (!pages)
return NULL;
- *handle = __iommu_create_mapping(dev, pages, size);
+ *handle = __iommu_create_mapping(dev, pages, size, attrs);
if (*handle == DMA_ERROR_CODE)
goto err_buffer;
GFP_KERNEL);
}
-static int __dma_direction_to_prot(enum dma_data_direction dir)
-{
- int prot;
-
- switch (dir) {
- case DMA_BIDIRECTIONAL:
- prot = IOMMU_READ | IOMMU_WRITE;
- break;
- case DMA_TO_DEVICE:
- prot = IOMMU_READ;
- break;
- case DMA_FROM_DEVICE:
- prot = IOMMU_WRITE;
- break;
- default:
- prot = 0;
- }
-
- return prot;
-}
-
/*
* Map a part of the scatter-gather list into contiguous io address space
*/
if (!is_coherent && (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
__dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
- prot = __dma_direction_to_prot(dir);
+ prot = __dma_info_to_prot(dir, attrs);
ret = iommu_map(mapping->domain, iova, phys, len, prot);
if (ret < 0)
if (dma_addr == DMA_ERROR_CODE)
return dma_addr;
- prot = __dma_direction_to_prot(dir);
+ prot = __dma_info_to_prot(dir, attrs);
ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, prot);
if (ret < 0)
if (dma_addr == DMA_ERROR_CODE)
return dma_addr;
- prot = __dma_direction_to_prot(dir) | IOMMU_MMIO;
+ prot = __dma_info_to_prot(dir, attrs) | IOMMU_MMIO;
ret = iommu_map(mapping->domain, dma_addr, addr, len, prot);
if (ret < 0)
__dma_page_cpu_to_dev(page, offset, size, dir);
}
-struct dma_map_ops iommu_ops = {
+const struct dma_map_ops iommu_ops = {
.alloc = arm_iommu_alloc_attrs,
.free = arm_iommu_free_attrs,
.mmap = arm_iommu_mmap_attrs,
.unmap_resource = arm_iommu_unmap_resource,
};
-struct dma_map_ops iommu_coherent_ops = {
+const struct dma_map_ops iommu_coherent_ops = {
.alloc = arm_coherent_iommu_alloc_attrs,
.free = arm_coherent_iommu_free_attrs,
.mmap = arm_coherent_iommu_mmap_attrs,
}
EXPORT_SYMBOL_GPL(arm_iommu_detach_device);
-static struct dma_map_ops *arm_get_iommu_dma_map_ops(bool coherent)
+static const struct dma_map_ops *arm_get_iommu_dma_map_ops(bool coherent)
{
return coherent ? &iommu_coherent_ops : &iommu_ops;
}
#endif /* CONFIG_ARM_DMA_USE_IOMMU */
-static struct dma_map_ops *arm_get_dma_map_ops(bool coherent)
+static const struct dma_map_ops *arm_get_dma_map_ops(bool coherent)
{
return coherent ? &arm_coherent_dma_ops : &arm_dma_ops;
}
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
const struct iommu_ops *iommu, bool coherent)
{
- struct dma_map_ops *dma_ops;
+ const struct dma_map_ops *dma_ops;
dev->archdata.dma_coherent = coherent;
if (arm_setup_iommu_dma_ops(dev, dma_base, size, iommu))