]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/media/pci/intel/ipu-dma.c
2e844dd16e6121cc9716499702cc9a2ed0905360
[mirror_ubuntu-jammy-kernel.git] / drivers / media / pci / intel / ipu-dma.c
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2013 - 2020 Intel Corporation
3
4 #include <asm/cacheflush.h>
5
6 #include <linux/slab.h>
7 #include <linux/device.h>
8 #include <linux/dma-mapping.h>
9 #include <linux/gfp.h>
10 #include <linux/highmem.h>
11 #include <linux/iova.h>
12 #include <linux/module.h>
13 #include <linux/scatterlist.h>
14 #include <linux/version.h>
15 #include <linux/vmalloc.h>
16 #include <linux/dma-map-ops.h>
17
18 #include "ipu-dma.h"
19 #include "ipu-bus.h"
20 #include "ipu-mmu.h"
21
22 struct vm_info {
23 struct list_head list;
24 struct page **pages;
25 void *vaddr;
26 unsigned long size;
27 };
28
29 static struct vm_info *get_vm_info(struct ipu_mmu *mmu, void *vaddr)
30 {
31 struct vm_info *info, *save;
32
33 list_for_each_entry_safe(info, save, &mmu->vma_list, list) {
34 if (info->vaddr == vaddr)
35 return info;
36 }
37
38 return NULL;
39 }
40
41 /* Begin of things adapted from arch/arm/mm/dma-mapping.c */
42 static void __dma_clear_buffer(struct page *page, size_t size,
43 unsigned long attrs)
44 {
45 /*
46 * Ensure that the allocated pages are zeroed, and that any data
47 * lurking in the kernel direct-mapped region is invalidated.
48 */
49 void *ptr = page_address(page);
50
51 memset(ptr, 0, size);
52 if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
53 clflush_cache_range(ptr, size);
54 }
55
56 static struct page **__dma_alloc_buffer(struct device *dev, size_t size,
57 gfp_t gfp,
58 unsigned long attrs)
59 {
60 struct page **pages;
61 int count = size >> PAGE_SHIFT;
62 int array_size = count * sizeof(struct page *);
63 int i = 0;
64
65 pages = kvzalloc(array_size, GFP_KERNEL);
66 if (!pages)
67 return NULL;
68
69 gfp |= __GFP_NOWARN;
70
71 while (count) {
72 int j, order = __fls(count);
73
74 pages[i] = alloc_pages(gfp, order);
75 while (!pages[i] && order)
76 pages[i] = alloc_pages(gfp, --order);
77 if (!pages[i])
78 goto error;
79
80 if (order) {
81 split_page(pages[i], order);
82 j = 1 << order;
83 while (--j)
84 pages[i + j] = pages[i] + j;
85 }
86
87 __dma_clear_buffer(pages[i], PAGE_SIZE << order, attrs);
88 i += 1 << order;
89 count -= 1 << order;
90 }
91
92 return pages;
93 error:
94 while (i--)
95 if (pages[i])
96 __free_pages(pages[i], 0);
97 kvfree(pages);
98 return NULL;
99 }
100
101 static int __dma_free_buffer(struct device *dev, struct page **pages,
102 size_t size,
103 unsigned long attrs)
104 {
105 int count = size >> PAGE_SHIFT;
106 int i;
107
108 for (i = 0; i < count; i++) {
109 if (pages[i]) {
110 __dma_clear_buffer(pages[i], PAGE_SIZE, attrs);
111 __free_pages(pages[i], 0);
112 }
113 }
114
115 kvfree(pages);
116 return 0;
117 }
118
119 /* End of things adapted from arch/arm/mm/dma-mapping.c */
120
121 static void ipu_dma_sync_single_for_cpu(struct device *dev,
122 dma_addr_t dma_handle,
123 size_t size,
124 enum dma_data_direction dir)
125 {
126 struct ipu_mmu *mmu = to_ipu_bus_device(dev)->mmu;
127 unsigned long pa = ipu_mmu_iova_to_phys(mmu->dmap->mmu_info,
128 dma_handle);
129
130 clflush_cache_range(phys_to_virt(pa), size);
131 }
132
133 static void ipu_dma_sync_sg_for_cpu(struct device *dev,
134 struct scatterlist *sglist,
135 int nents, enum dma_data_direction dir)
136 {
137 struct scatterlist *sg;
138 int i;
139
140 for_each_sg(sglist, sg, nents, i)
141 clflush_cache_range(page_to_virt(sg_page(sg)), sg->length);
142 }
143
144 static void *ipu_dma_alloc(struct device *dev, size_t size,
145 dma_addr_t *dma_handle, gfp_t gfp,
146 unsigned long attrs)
147 {
148 struct ipu_mmu *mmu = to_ipu_bus_device(dev)->mmu;
149 struct page **pages;
150 struct iova *iova;
151 struct vm_info *info;
152 int i;
153 int rval;
154 unsigned long count;
155
156 info = kzalloc(sizeof(*info), GFP_KERNEL);
157 if (!info)
158 return NULL;
159
160 size = PAGE_ALIGN(size);
161 count = size >> PAGE_SHIFT;
162
163 iova = alloc_iova(&mmu->dmap->iovad, count,
164 dma_get_mask(dev) >> PAGE_SHIFT, 0);
165 if (!iova) {
166 kfree(info);
167 return NULL;
168 }
169
170 pages = __dma_alloc_buffer(dev, size, gfp, attrs);
171 if (!pages)
172 goto out_free_iova;
173
174 for (i = 0; iova->pfn_lo + i <= iova->pfn_hi; i++) {
175 rval = ipu_mmu_map(mmu->dmap->mmu_info,
176 (iova->pfn_lo + i) << PAGE_SHIFT,
177 page_to_phys(pages[i]), PAGE_SIZE);
178 if (rval)
179 goto out_unmap;
180 }
181
182 info->vaddr = vmap(pages, count, VM_USERMAP, PAGE_KERNEL);
183 if (!info->vaddr)
184 goto out_unmap;
185
186 *dma_handle = iova->pfn_lo << PAGE_SHIFT;
187
188 mmu->tlb_invalidate(mmu);
189
190 info->pages = pages;
191 info->size = size;
192 list_add(&info->list, &mmu->vma_list);
193
194 return info->vaddr;
195
196 out_unmap:
197 for (i--; i >= 0; i--) {
198 ipu_mmu_unmap(mmu->dmap->mmu_info,
199 (iova->pfn_lo + i) << PAGE_SHIFT, PAGE_SIZE);
200 }
201 __dma_free_buffer(dev, pages, size, attrs);
202
203 out_free_iova:
204 __free_iova(&mmu->dmap->iovad, iova);
205 kfree(info);
206
207 return NULL;
208 }
209
210 static void ipu_dma_free(struct device *dev, size_t size, void *vaddr,
211 dma_addr_t dma_handle,
212 unsigned long attrs)
213 {
214 struct ipu_mmu *mmu = to_ipu_bus_device(dev)->mmu;
215 struct page **pages;
216 struct vm_info *info;
217 struct iova *iova = find_iova(&mmu->dmap->iovad,
218 dma_handle >> PAGE_SHIFT);
219
220 if (WARN_ON(!iova))
221 return;
222
223 info = get_vm_info(mmu, vaddr);
224 if (WARN_ON(!info))
225 return;
226
227 if (WARN_ON(!info->vaddr))
228 return;
229
230 if (WARN_ON(!info->pages))
231 return;
232
233 list_del(&info->list);
234
235 size = PAGE_ALIGN(size);
236
237 pages = info->pages;
238
239 vunmap(vaddr);
240
241 ipu_mmu_unmap(mmu->dmap->mmu_info, iova->pfn_lo << PAGE_SHIFT,
242 (iova->pfn_hi - iova->pfn_lo + 1) << PAGE_SHIFT);
243
244 __dma_free_buffer(dev, pages, size, attrs);
245
246 __free_iova(&mmu->dmap->iovad, iova);
247
248 mmu->tlb_invalidate(mmu);
249
250 kfree(info);
251 }
252
253 static int ipu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
254 void *addr, dma_addr_t iova, size_t size,
255 unsigned long attrs)
256 {
257 struct ipu_mmu *mmu = to_ipu_bus_device(dev)->mmu;
258 struct vm_info *info;
259 size_t count = PAGE_ALIGN(size) >> PAGE_SHIFT;
260 size_t i;
261
262 info = get_vm_info(mmu, addr);
263 if (!info)
264 return -EFAULT;
265
266 if (!info->vaddr)
267 return -EFAULT;
268
269 if (vma->vm_start & ~PAGE_MASK)
270 return -EINVAL;
271
272 if (size > info->size)
273 return -EFAULT;
274
275 for (i = 0; i < count; i++)
276 vm_insert_page(vma, vma->vm_start + (i << PAGE_SHIFT),
277 info->pages[i]);
278
279 return 0;
280 }
281
282 static void ipu_dma_unmap_sg(struct device *dev,
283 struct scatterlist *sglist,
284 int nents, enum dma_data_direction dir,
285 unsigned long attrs)
286 {
287 struct ipu_mmu *mmu = to_ipu_bus_device(dev)->mmu;
288 struct iova *iova = find_iova(&mmu->dmap->iovad,
289 sg_dma_address(sglist) >> PAGE_SHIFT);
290
291 if (!nents)
292 return;
293
294 if (WARN_ON(!iova))
295 return;
296
297 if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
298 ipu_dma_sync_sg_for_cpu(dev, sglist, nents, DMA_BIDIRECTIONAL);
299
300 ipu_mmu_unmap(mmu->dmap->mmu_info, iova->pfn_lo << PAGE_SHIFT,
301 (iova->pfn_hi - iova->pfn_lo + 1) << PAGE_SHIFT);
302
303 mmu->tlb_invalidate(mmu);
304
305 __free_iova(&mmu->dmap->iovad, iova);
306 }
307
308 static int ipu_dma_map_sg(struct device *dev, struct scatterlist *sglist,
309 int nents, enum dma_data_direction dir,
310 unsigned long attrs)
311 {
312 struct ipu_mmu *mmu = to_ipu_bus_device(dev)->mmu;
313 struct scatterlist *sg;
314 struct iova *iova;
315 size_t size = 0;
316 u32 iova_addr;
317 int i;
318
319 for_each_sg(sglist, sg, nents, i)
320 size += PAGE_ALIGN(sg->length) >> PAGE_SHIFT;
321
322 dev_dbg(dev, "dmamap: mapping sg %d entries, %zu pages\n", nents, size);
323
324 iova = alloc_iova(&mmu->dmap->iovad, size,
325 dma_get_mask(dev) >> PAGE_SHIFT, 0);
326 if (!iova)
327 return 0;
328
329 dev_dbg(dev, "dmamap: iova low pfn %lu, high pfn %lu\n", iova->pfn_lo,
330 iova->pfn_hi);
331
332 iova_addr = iova->pfn_lo;
333
334 for_each_sg(sglist, sg, nents, i) {
335 int rval;
336
337 dev_dbg(dev, "mapping entry %d: iova 0x%8.8x,phy 0x%16.16llx\n",
338 i, iova_addr << PAGE_SHIFT,
339 (unsigned long long)page_to_phys(sg_page(sg)));
340 rval = ipu_mmu_map(mmu->dmap->mmu_info, iova_addr << PAGE_SHIFT,
341 page_to_phys(sg_page(sg)),
342 PAGE_ALIGN(sg->length));
343 if (rval)
344 goto out_fail;
345 sg_dma_address(sg) = iova_addr << PAGE_SHIFT;
346 #ifdef CONFIG_NEED_SG_DMA_LENGTH
347 sg_dma_len(sg) = sg->length;
348 #endif /* CONFIG_NEED_SG_DMA_LENGTH */
349
350 iova_addr += PAGE_ALIGN(sg->length) >> PAGE_SHIFT;
351 }
352
353 if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
354 ipu_dma_sync_sg_for_cpu(dev, sglist, nents, DMA_BIDIRECTIONAL);
355
356 mmu->tlb_invalidate(mmu);
357
358 return nents;
359
360 out_fail:
361 ipu_dma_unmap_sg(dev, sglist, i, dir, attrs);
362
363 return 0;
364 }
365
366 /*
367 * Create scatter-list for the already allocated DMA buffer
368 */
369 static int ipu_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
370 void *cpu_addr, dma_addr_t handle, size_t size,
371 unsigned long attrs)
372 {
373 struct ipu_mmu *mmu = to_ipu_bus_device(dev)->mmu;
374 struct vm_info *info;
375 int n_pages;
376 int ret = 0;
377
378 info = get_vm_info(mmu, cpu_addr);
379 if (!info)
380 return -EFAULT;
381
382 if (!info->vaddr)
383 return -EFAULT;
384
385 if (WARN_ON(!info->pages))
386 return -ENOMEM;
387
388 n_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
389
390 ret = sg_alloc_table_from_pages(sgt, info->pages, n_pages, 0, size,
391 GFP_KERNEL);
392 if (ret)
393 dev_dbg(dev, "IPU get sgt table fail\n");
394
395 return ret;
396 }
397
398 const struct dma_map_ops ipu_dma_ops = {
399 .alloc = ipu_dma_alloc,
400 .free = ipu_dma_free,
401 .mmap = ipu_dma_mmap,
402 .map_sg = ipu_dma_map_sg,
403 .unmap_sg = ipu_dma_unmap_sg,
404 .sync_single_for_cpu = ipu_dma_sync_single_for_cpu,
405 .sync_single_for_device = ipu_dma_sync_single_for_cpu,
406 .sync_sg_for_cpu = ipu_dma_sync_sg_for_cpu,
407 .sync_sg_for_device = ipu_dma_sync_sg_for_cpu,
408 .get_sgtable = ipu_dma_get_sgtable,
409 };