]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - include/asm-x86/dma-mapping_32.h
x86: clean up bitops-related warnings
[mirror_ubuntu-bionic-kernel.git] / include / asm-x86 / dma-mapping_32.h
1 #ifndef _ASM_I386_DMA_MAPPING_H
2 #define _ASM_I386_DMA_MAPPING_H
3
4 #include <linux/mm.h>
5 #include <linux/scatterlist.h>
6
7 #include <asm/cache.h>
8 #include <asm/io.h>
9 #include <asm/bug.h>
10
11 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
12 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
13
14 void *dma_alloc_coherent(struct device *dev, size_t size,
15 dma_addr_t *dma_handle, gfp_t flag);
16
17 void dma_free_coherent(struct device *dev, size_t size,
18 void *vaddr, dma_addr_t dma_handle);
19
20 static inline dma_addr_t
21 dma_map_single(struct device *dev, void *ptr, size_t size,
22 enum dma_data_direction direction)
23 {
24 BUG_ON(!valid_dma_direction(direction));
25 WARN_ON(size == 0);
26 flush_write_buffers();
27 return virt_to_phys(ptr);
28 }
29
30 static inline void
31 dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
32 enum dma_data_direction direction)
33 {
34 BUG_ON(!valid_dma_direction(direction));
35 }
36
37 static inline int
38 dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
39 enum dma_data_direction direction)
40 {
41 struct scatterlist *sg;
42 int i;
43
44 BUG_ON(!valid_dma_direction(direction));
45 WARN_ON(nents == 0 || sglist[0].length == 0);
46
47 for_each_sg(sglist, sg, nents, i) {
48 BUG_ON(!sg_page(sg));
49
50 sg->dma_address = sg_phys(sg);
51 }
52
53 flush_write_buffers();
54 return nents;
55 }
56
57 static inline dma_addr_t
58 dma_map_page(struct device *dev, struct page *page, unsigned long offset,
59 size_t size, enum dma_data_direction direction)
60 {
61 BUG_ON(!valid_dma_direction(direction));
62 return page_to_phys(page) + offset;
63 }
64
65 static inline void
66 dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
67 enum dma_data_direction direction)
68 {
69 BUG_ON(!valid_dma_direction(direction));
70 }
71
72
73 static inline void
74 dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
75 enum dma_data_direction direction)
76 {
77 BUG_ON(!valid_dma_direction(direction));
78 }
79
80 static inline void
81 dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
82 enum dma_data_direction direction)
83 {
84 }
85
86 static inline void
87 dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
88 enum dma_data_direction direction)
89 {
90 flush_write_buffers();
91 }
92
93 static inline void
94 dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
95 unsigned long offset, size_t size,
96 enum dma_data_direction direction)
97 {
98 }
99
100 static inline void
101 dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
102 unsigned long offset, size_t size,
103 enum dma_data_direction direction)
104 {
105 flush_write_buffers();
106 }
107
108 static inline void
109 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
110 enum dma_data_direction direction)
111 {
112 }
113
114 static inline void
115 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
116 enum dma_data_direction direction)
117 {
118 flush_write_buffers();
119 }
120
121 static inline int
122 dma_mapping_error(dma_addr_t dma_addr)
123 {
124 return 0;
125 }
126
127 extern int forbid_dac;
128
129 static inline int
130 dma_supported(struct device *dev, u64 mask)
131 {
132 /*
133 * we fall back to GFP_DMA when the mask isn't all 1s,
134 * so we can't guarantee allocations that must be
135 * within a tighter range than GFP_DMA..
136 */
137 if(mask < 0x00ffffff)
138 return 0;
139
140 /* Work around chipset bugs */
141 if (forbid_dac > 0 && mask > 0xffffffffULL)
142 return 0;
143
144 return 1;
145 }
146
147 static inline int
148 dma_set_mask(struct device *dev, u64 mask)
149 {
150 if(!dev->dma_mask || !dma_supported(dev, mask))
151 return -EIO;
152
153 *dev->dma_mask = mask;
154
155 return 0;
156 }
157
158 static inline int
159 dma_get_cache_alignment(void)
160 {
161 /* no easy way to get cache size on all x86, so return the
162 * maximum possible, to be safe */
163 return (1 << INTERNODE_CACHE_SHIFT);
164 }
165
166 #define dma_is_consistent(d, h) (1)
167
168 static inline void
169 dma_cache_sync(struct device *dev, void *vaddr, size_t size,
170 enum dma_data_direction direction)
171 {
172 flush_write_buffers();
173 }
174
175 #define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
176 extern int
177 dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
178 dma_addr_t device_addr, size_t size, int flags);
179
180 extern void
181 dma_release_declared_memory(struct device *dev);
182
183 extern void *
184 dma_mark_declared_memory_occupied(struct device *dev,
185 dma_addr_t device_addr, size_t size);
186
187 #endif