]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - include/asm-x86/dma-mapping_64.h
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
[mirror_ubuntu-bionic-kernel.git] / include / asm-x86 / dma-mapping_64.h
CommitLineData
1da177e4
LT
1#ifndef _X8664_DMA_MAPPING_H
2#define _X8664_DMA_MAPPING_H 1
3
4/*
5 * IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for
6 * documentation.
7 */
8
46856afa 9#include <linux/scatterlist.h>
1da177e4
LT
10#include <asm/io.h>
11#include <asm/swiotlb.h>
12
17a941d8
MBY
13struct dma_mapping_ops {
14 int (*mapping_error)(dma_addr_t dma_addr);
15 void* (*alloc_coherent)(struct device *dev, size_t size,
16 dma_addr_t *dma_handle, gfp_t gfp);
17 void (*free_coherent)(struct device *dev, size_t size,
18 void *vaddr, dma_addr_t dma_handle);
19 dma_addr_t (*map_single)(struct device *hwdev, void *ptr,
20 size_t size, int direction);
21 /* like map_single, but doesn't check the device mask */
22 dma_addr_t (*map_simple)(struct device *hwdev, char *ptr,
23 size_t size, int direction);
24 void (*unmap_single)(struct device *dev, dma_addr_t addr,
25 size_t size, int direction);
26 void (*sync_single_for_cpu)(struct device *hwdev,
27 dma_addr_t dma_handle, size_t size,
28 int direction);
29 void (*sync_single_for_device)(struct device *hwdev,
30 dma_addr_t dma_handle, size_t size,
31 int direction);
32 void (*sync_single_range_for_cpu)(struct device *hwdev,
33 dma_addr_t dma_handle, unsigned long offset,
34 size_t size, int direction);
35 void (*sync_single_range_for_device)(struct device *hwdev,
36 dma_addr_t dma_handle, unsigned long offset,
37 size_t size, int direction);
38 void (*sync_sg_for_cpu)(struct device *hwdev,
39 struct scatterlist *sg, int nelems,
40 int direction);
41 void (*sync_sg_for_device)(struct device *hwdev,
42 struct scatterlist *sg, int nelems,
43 int direction);
44 int (*map_sg)(struct device *hwdev, struct scatterlist *sg,
45 int nents, int direction);
46 void (*unmap_sg)(struct device *hwdev,
47 struct scatterlist *sg, int nents,
48 int direction);
49 int (*dma_supported)(struct device *hwdev, u64 mask);
50 int is_phys;
51};
1da177e4 52
17a941d8 53extern dma_addr_t bad_dma_address;
e6584504 54extern const struct dma_mapping_ops* dma_ops;
17a941d8 55extern int iommu_merge;
1da177e4 56
17a941d8
MBY
57static inline int dma_mapping_error(dma_addr_t dma_addr)
58{
59 if (dma_ops->mapping_error)
60 return dma_ops->mapping_error(dma_addr);
1da177e4 61
17a941d8
MBY
62 return (dma_addr == bad_dma_address);
63}
1da177e4 64
259886a7
JG
65#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
66#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
67
2fa8a050
JG
68#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
69#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
70
17a941d8
MBY
71extern void *dma_alloc_coherent(struct device *dev, size_t size,
72 dma_addr_t *dma_handle, gfp_t gfp);
73extern void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
74 dma_addr_t dma_handle);
1da177e4 75
17a941d8
MBY
76static inline dma_addr_t
77dma_map_single(struct device *hwdev, void *ptr, size_t size,
78 int direction)
1da177e4 79{
a3c042a0 80 BUG_ON(!valid_dma_direction(direction));
17a941d8 81 return dma_ops->map_single(hwdev, ptr, size, direction);
1da177e4
LT
82}
83
17a941d8
MBY
84static inline void
85dma_unmap_single(struct device *dev, dma_addr_t addr,size_t size,
86 int direction)
1da177e4 87{
a3c042a0 88 BUG_ON(!valid_dma_direction(direction));
17a941d8 89 dma_ops->unmap_single(dev, addr, size, direction);
1da177e4
LT
90}
91
1da177e4
LT
92#define dma_map_page(dev,page,offset,size,dir) \
93 dma_map_single((dev), page_address(page)+(offset), (size), (dir))
94
17a941d8 95#define dma_unmap_page dma_unmap_single
1da177e4 96
17a941d8
MBY
97static inline void
98dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
99 size_t size, int direction)
100{
a3c042a0 101 BUG_ON(!valid_dma_direction(direction));
17a941d8
MBY
102 if (dma_ops->sync_single_for_cpu)
103 dma_ops->sync_single_for_cpu(hwdev, dma_handle, size,
104 direction);
1da177e4
LT
105 flush_write_buffers();
106}
107
17a941d8
MBY
108static inline void
109dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
110 size_t size, int direction)
1da177e4 111{
a3c042a0 112 BUG_ON(!valid_dma_direction(direction));
17a941d8
MBY
113 if (dma_ops->sync_single_for_device)
114 dma_ops->sync_single_for_device(hwdev, dma_handle, size,
115 direction);
1da177e4
LT
116 flush_write_buffers();
117}
118
17a941d8
MBY
119static inline void
120dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
121 unsigned long offset, size_t size, int direction)
8d15d19e 122{
a3c042a0 123 BUG_ON(!valid_dma_direction(direction));
17a941d8
MBY
124 if (dma_ops->sync_single_range_for_cpu) {
125 dma_ops->sync_single_range_for_cpu(hwdev, dma_handle, offset, size, direction);
126 }
8d15d19e
JL
127
128 flush_write_buffers();
129}
130
17a941d8
MBY
131static inline void
132dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
133 unsigned long offset, size_t size, int direction)
8d15d19e 134{
a3c042a0 135 BUG_ON(!valid_dma_direction(direction));
17a941d8
MBY
136 if (dma_ops->sync_single_range_for_device)
137 dma_ops->sync_single_range_for_device(hwdev, dma_handle,
138 offset, size, direction);
8d15d19e
JL
139
140 flush_write_buffers();
141}
27183ebd 142
17a941d8
MBY
143static inline void
144dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
145 int nelems, int direction)
1da177e4 146{
a3c042a0 147 BUG_ON(!valid_dma_direction(direction));
17a941d8
MBY
148 if (dma_ops->sync_sg_for_cpu)
149 dma_ops->sync_sg_for_cpu(hwdev, sg, nelems, direction);
1da177e4
LT
150 flush_write_buffers();
151}
152
17a941d8
MBY
153static inline void
154dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
155 int nelems, int direction)
1da177e4 156{
a3c042a0 157 BUG_ON(!valid_dma_direction(direction));
17a941d8
MBY
158 if (dma_ops->sync_sg_for_device) {
159 dma_ops->sync_sg_for_device(hwdev, sg, nelems, direction);
160 }
1da177e4
LT
161
162 flush_write_buffers();
163}
164
17a941d8
MBY
165static inline int
166dma_map_sg(struct device *hwdev, struct scatterlist *sg, int nents, int direction)
167{
a3c042a0 168 BUG_ON(!valid_dma_direction(direction));
17a941d8
MBY
169 return dma_ops->map_sg(hwdev, sg, nents, direction);
170}
1da177e4 171
17a941d8
MBY
172static inline void
173dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
174 int direction)
175{
a3c042a0 176 BUG_ON(!valid_dma_direction(direction));
17a941d8
MBY
177 dma_ops->unmap_sg(hwdev, sg, nents, direction);
178}
1da177e4
LT
179
180extern int dma_supported(struct device *hwdev, u64 mask);
1da177e4 181
17a941d8
MBY
182/* same for gart, swiotlb, and nommu */
183static inline int dma_get_cache_alignment(void)
1da177e4 184{
17a941d8 185 return boot_cpu_data.x86_clflush_size;
1da177e4
LT
186}
187
f67637ee 188#define dma_is_consistent(d, h) 1
17a941d8
MBY
189
190extern int dma_set_mask(struct device *dev, u64 mask);
191
192static inline void
d3fa72e4
RB
193dma_cache_sync(struct device *dev, void *vaddr, size_t size,
194 enum dma_data_direction dir)
1da177e4
LT
195{
196 flush_write_buffers();
197}
198
17a941d8
MBY
199extern struct device fallback_dev;
200extern int panic_on_overflow;
201
202#endif /* _X8664_DMA_MAPPING_H */