#define arch_setup_dma_ops arch_setup_dma_ops
extern void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
- struct iommu_ops *iommu, bool coherent);
+ const struct iommu_ops *iommu, bool coherent);
#define arch_teardown_dma_ops arch_teardown_dma_ops
extern void arch_teardown_dma_ops(struct device *dev);
}
static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
- struct iommu_ops *iommu)
+ const struct iommu_ops *iommu)
{
struct dma_iommu_mapping *mapping;
#else
static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
- struct iommu_ops *iommu)
+ const struct iommu_ops *iommu)
{
return false;
}
}
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
- struct iommu_ops *iommu, bool coherent)
+ const struct iommu_ops *iommu, bool coherent)
{
struct dma_map_ops *dma_ops;
}
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
- struct iommu_ops *iommu, bool coherent);
+ const struct iommu_ops *iommu, bool coherent);
#define arch_setup_dma_ops arch_setup_dma_ops
#ifdef CONFIG_IOMMU_DMA
#else
static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
- struct iommu_ops *iommu)
+ const struct iommu_ops *iommu)
{ }
#endif /* CONFIG_IOMMU_DMA */
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
- struct iommu_ops *iommu, bool coherent)
+ const struct iommu_ops *iommu, bool coherent)
{
if (!dev->archdata.dma_ops)
dev->archdata.dma_ops = &swiotlb_dma_ops;
struct of_iommu_node {
struct list_head list;
struct device_node *np;
- struct iommu_ops *ops;
+ const struct iommu_ops *ops;
};
static LIST_HEAD(of_iommu_list);
static DEFINE_SPINLOCK(of_iommu_lock);
-void of_iommu_set_ops(struct device_node *np, struct iommu_ops *ops)
+void of_iommu_set_ops(struct device_node *np, const struct iommu_ops *ops)
{
struct of_iommu_node *iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
spin_unlock(&of_iommu_lock);
}
-struct iommu_ops *of_iommu_get_ops(struct device_node *np)
+const struct iommu_ops *of_iommu_get_ops(struct device_node *np)
{
struct of_iommu_node *node;
- struct iommu_ops *ops = NULL;
+ const struct iommu_ops *ops = NULL;
spin_lock(&of_iommu_lock);
list_for_each_entry(node, &of_iommu_list, list)
return ops;
}
-struct iommu_ops *of_iommu_configure(struct device *dev,
- struct device_node *master_np)
+const struct iommu_ops *of_iommu_configure(struct device *dev,
+ struct device_node *master_np)
{
struct of_phandle_args iommu_spec;
struct device_node *np;
- struct iommu_ops *ops = NULL;
+ const struct iommu_ops *ops = NULL;
int idx = 0;
/*
int ret;
bool coherent;
unsigned long offset;
- struct iommu_ops *iommu;
+ const struct iommu_ops *iommu;
/*
* Set default coherent_dma_mask to 32 bit. Drivers are expected to
#ifndef arch_setup_dma_ops
static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
- u64 size, struct iommu_ops *iommu,
+ u64 size, const struct iommu_ops *iommu,
bool coherent) { }
#endif
size_t *size);
extern void of_iommu_init(void);
-extern struct iommu_ops *of_iommu_configure(struct device *dev,
+extern const struct iommu_ops *of_iommu_configure(struct device *dev,
struct device_node *master_np);
#else
}
static inline void of_iommu_init(void) { }
-static inline struct iommu_ops *of_iommu_configure(struct device *dev,
+static inline const struct iommu_ops *of_iommu_configure(struct device *dev,
struct device_node *master_np)
{
return NULL;
#endif /* CONFIG_OF_IOMMU */
-void of_iommu_set_ops(struct device_node *np, struct iommu_ops *ops);
-struct iommu_ops *of_iommu_get_ops(struct device_node *np);
+void of_iommu_set_ops(struct device_node *np, const struct iommu_ops *ops);
+const struct iommu_ops *of_iommu_get_ops(struct device_node *np);
extern struct of_device_id __iommu_of_table;