]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/media/pci/intel/ipu-dma.c
UBUNTU: SAUCE: IPU6 driver release for kernel 5.13
[mirror_ubuntu-jammy-kernel.git] / drivers / media / pci / intel / ipu-dma.c
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2013 - 2021 Intel Corporation
3
4 #include <asm/cacheflush.h>
5
6 #include <linux/slab.h>
7 #include <linux/device.h>
8 #include <linux/dma-mapping.h>
9 #include <linux/gfp.h>
10 #include <linux/highmem.h>
11 #include <linux/iova.h>
12 #include <linux/module.h>
13 #include <linux/scatterlist.h>
14 #include <linux/version.h>
15 #include <linux/vmalloc.h>
16 #include <linux/dma-map-ops.h>
17
18 #include "ipu-dma.h"
19 #include "ipu-bus.h"
20 #include "ipu-mmu.h"
21
22 struct vm_info {
23 struct list_head list;
24 struct page **pages;
25 void *vaddr;
26 unsigned long size;
27 };
28
29 static struct vm_info *get_vm_info(struct ipu_mmu *mmu, void *vaddr)
30 {
31 struct vm_info *info, *save;
32
33 list_for_each_entry_safe(info, save, &mmu->vma_list, list) {
34 if (info->vaddr == vaddr)
35 return info;
36 }
37
38 return NULL;
39 }
40
41 /* Begin of things adapted from arch/arm/mm/dma-mapping.c */
42 static void __dma_clear_buffer(struct page *page, size_t size,
43 unsigned long attrs)
44 {
45 /*
46 * Ensure that the allocated pages are zeroed, and that any data
47 * lurking in the kernel direct-mapped region is invalidated.
48 */
49 void *ptr = page_address(page);
50
51 memset(ptr, 0, size);
52 if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
53 clflush_cache_range(ptr, size);
54 }
55
56 static struct page **__dma_alloc_buffer(struct device *dev, size_t size,
57 gfp_t gfp,
58 unsigned long attrs)
59 {
60 struct page **pages;
61 int count = size >> PAGE_SHIFT;
62 int array_size = count * sizeof(struct page *);
63 int i = 0;
64
65 pages = kvzalloc(array_size, GFP_KERNEL);
66 if (!pages)
67 return NULL;
68
69 gfp |= __GFP_NOWARN;
70
71 while (count) {
72 int j, order = __fls(count);
73
74 pages[i] = alloc_pages(gfp, order);
75 while (!pages[i] && order)
76 pages[i] = alloc_pages(gfp, --order);
77 if (!pages[i])
78 goto error;
79
80 if (order) {
81 split_page(pages[i], order);
82 j = 1 << order;
83 while (--j)
84 pages[i + j] = pages[i] + j;
85 }
86
87 __dma_clear_buffer(pages[i], PAGE_SIZE << order, attrs);
88 i += 1 << order;
89 count -= 1 << order;
90 }
91
92 return pages;
93 error:
94 while (i--)
95 if (pages[i])
96 __free_pages(pages[i], 0);
97 kvfree(pages);
98 return NULL;
99 }
100
101 static int __dma_free_buffer(struct device *dev, struct page **pages,
102 size_t size,
103 unsigned long attrs)
104 {
105 int count = size >> PAGE_SHIFT;
106 int i;
107
108 for (i = 0; i < count; i++) {
109 if (pages[i]) {
110 __dma_clear_buffer(pages[i], PAGE_SIZE, attrs);
111 __free_pages(pages[i], 0);
112 }
113 }
114
115 kvfree(pages);
116 return 0;
117 }
118
119 /* End of things adapted from arch/arm/mm/dma-mapping.c */
120
121 static void ipu_dma_sync_single_for_cpu(struct device *dev,
122 dma_addr_t dma_handle,
123 size_t size,
124 enum dma_data_direction dir)
125 {
126 struct ipu_mmu *mmu = to_ipu_bus_device(dev)->mmu;
127 unsigned long pa = ipu_mmu_iova_to_phys(mmu->dmap->mmu_info,
128 dma_handle);
129
130 clflush_cache_range(phys_to_virt(pa), size);
131 }
132
133 static void ipu_dma_sync_sg_for_cpu(struct device *dev,
134 struct scatterlist *sglist,
135 int nents, enum dma_data_direction dir)
136 {
137 struct scatterlist *sg;
138 int i;
139
140 for_each_sg(sglist, sg, nents, i)
141 clflush_cache_range(page_to_virt(sg_page(sg)), sg->length);
142 }
143
144 static void *ipu_dma_alloc(struct device *dev, size_t size,
145 dma_addr_t *dma_handle, gfp_t gfp,
146 unsigned long attrs)
147 {
148 struct ipu_mmu *mmu = to_ipu_bus_device(dev)->mmu;
149 struct page **pages;
150 struct iova *iova;
151 struct vm_info *info;
152 int i;
153 int rval;
154 unsigned long count;
155
156 info = kzalloc(sizeof(*info), GFP_KERNEL);
157 if (!info)
158 return NULL;
159
160 size = PAGE_ALIGN(size);
161 count = size >> PAGE_SHIFT;
162
163 iova = alloc_iova(&mmu->dmap->iovad, count,
164 dma_get_mask(dev) >> PAGE_SHIFT, 0);
165 if (!iova)
166 goto out_kfree;
167
168 pages = __dma_alloc_buffer(dev, size, gfp, attrs);
169 if (!pages)
170 goto out_free_iova;
171
172 for (i = 0; iova->pfn_lo + i <= iova->pfn_hi; i++) {
173 rval = ipu_mmu_map(mmu->dmap->mmu_info,
174 (iova->pfn_lo + i) << PAGE_SHIFT,
175 page_to_phys(pages[i]), PAGE_SIZE);
176 if (rval)
177 goto out_unmap;
178 }
179
180 info->vaddr = vmap(pages, count, VM_USERMAP, PAGE_KERNEL);
181 if (!info->vaddr)
182 goto out_unmap;
183
184 *dma_handle = iova->pfn_lo << PAGE_SHIFT;
185
186 info->pages = pages;
187 info->size = size;
188 list_add(&info->list, &mmu->vma_list);
189
190 return info->vaddr;
191
192 out_unmap:
193 for (i--; i >= 0; i--) {
194 ipu_mmu_unmap(mmu->dmap->mmu_info,
195 (iova->pfn_lo + i) << PAGE_SHIFT, PAGE_SIZE);
196 }
197 __dma_free_buffer(dev, pages, size, attrs);
198
199 out_free_iova:
200 __free_iova(&mmu->dmap->iovad, iova);
201 out_kfree:
202 kfree(info);
203
204 return NULL;
205 }
206
207 static void ipu_dma_free(struct device *dev, size_t size, void *vaddr,
208 dma_addr_t dma_handle,
209 unsigned long attrs)
210 {
211 struct ipu_mmu *mmu = to_ipu_bus_device(dev)->mmu;
212 struct page **pages;
213 struct vm_info *info;
214 struct iova *iova = find_iova(&mmu->dmap->iovad,
215 dma_handle >> PAGE_SHIFT);
216
217 if (WARN_ON(!iova))
218 return;
219
220 info = get_vm_info(mmu, vaddr);
221 if (WARN_ON(!info))
222 return;
223
224 if (WARN_ON(!info->vaddr))
225 return;
226
227 if (WARN_ON(!info->pages))
228 return;
229
230 list_del(&info->list);
231
232 size = PAGE_ALIGN(size);
233
234 pages = info->pages;
235
236 vunmap(vaddr);
237
238 ipu_mmu_unmap(mmu->dmap->mmu_info, iova->pfn_lo << PAGE_SHIFT,
239 (iova->pfn_hi - iova->pfn_lo + 1) << PAGE_SHIFT);
240
241 __dma_free_buffer(dev, pages, size, attrs);
242
243 mmu->tlb_invalidate(mmu);
244
245 __free_iova(&mmu->dmap->iovad, iova);
246
247 kfree(info);
248 }
249
250 static int ipu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
251 void *addr, dma_addr_t iova, size_t size,
252 unsigned long attrs)
253 {
254 struct ipu_mmu *mmu = to_ipu_bus_device(dev)->mmu;
255 struct vm_info *info;
256 size_t count = PAGE_ALIGN(size) >> PAGE_SHIFT;
257 size_t i;
258
259 info = get_vm_info(mmu, addr);
260 if (!info)
261 return -EFAULT;
262
263 if (!info->vaddr)
264 return -EFAULT;
265
266 if (vma->vm_start & ~PAGE_MASK)
267 return -EINVAL;
268
269 if (size > info->size)
270 return -EFAULT;
271
272 for (i = 0; i < count; i++)
273 vm_insert_page(vma, vma->vm_start + (i << PAGE_SHIFT),
274 info->pages[i]);
275
276 return 0;
277 }
278
279 static void ipu_dma_unmap_sg(struct device *dev,
280 struct scatterlist *sglist,
281 int nents, enum dma_data_direction dir,
282 unsigned long attrs)
283 {
284 struct ipu_mmu *mmu = to_ipu_bus_device(dev)->mmu;
285 struct iova *iova = find_iova(&mmu->dmap->iovad,
286 sg_dma_address(sglist) >> PAGE_SHIFT);
287
288 if (!nents)
289 return;
290
291 if (WARN_ON(!iova))
292 return;
293
294 if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
295 ipu_dma_sync_sg_for_cpu(dev, sglist, nents, DMA_BIDIRECTIONAL);
296
297 ipu_mmu_unmap(mmu->dmap->mmu_info, iova->pfn_lo << PAGE_SHIFT,
298 (iova->pfn_hi - iova->pfn_lo + 1) << PAGE_SHIFT);
299
300 mmu->tlb_invalidate(mmu);
301
302 __free_iova(&mmu->dmap->iovad, iova);
303 }
304
305 static int ipu_dma_map_sg(struct device *dev, struct scatterlist *sglist,
306 int nents, enum dma_data_direction dir,
307 unsigned long attrs)
308 {
309 struct ipu_mmu *mmu = to_ipu_bus_device(dev)->mmu;
310 struct scatterlist *sg;
311 struct iova *iova;
312 size_t size = 0;
313 u32 iova_addr;
314 int i;
315
316 for_each_sg(sglist, sg, nents, i)
317 size += PAGE_ALIGN(sg->length) >> PAGE_SHIFT;
318
319 dev_dbg(dev, "dmamap: mapping sg %d entries, %zu pages\n", nents, size);
320
321 iova = alloc_iova(&mmu->dmap->iovad, size,
322 dma_get_mask(dev) >> PAGE_SHIFT, 0);
323 if (!iova)
324 return 0;
325
326 dev_dbg(dev, "dmamap: iova low pfn %lu, high pfn %lu\n", iova->pfn_lo,
327 iova->pfn_hi);
328
329 iova_addr = iova->pfn_lo;
330
331 for_each_sg(sglist, sg, nents, i) {
332 int rval;
333
334 dev_dbg(dev, "mapping entry %d: iova 0x%8.8x,phy 0x%16.16llx\n",
335 i, iova_addr << PAGE_SHIFT,
336 (unsigned long long)page_to_phys(sg_page(sg)));
337 rval = ipu_mmu_map(mmu->dmap->mmu_info, iova_addr << PAGE_SHIFT,
338 page_to_phys(sg_page(sg)),
339 PAGE_ALIGN(sg->length));
340 if (rval)
341 goto out_fail;
342 sg_dma_address(sg) = iova_addr << PAGE_SHIFT;
343 #ifdef CONFIG_NEED_SG_DMA_LENGTH
344 sg_dma_len(sg) = sg->length;
345 #endif /* CONFIG_NEED_SG_DMA_LENGTH */
346
347 iova_addr += PAGE_ALIGN(sg->length) >> PAGE_SHIFT;
348 }
349
350 if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
351 ipu_dma_sync_sg_for_cpu(dev, sglist, nents, DMA_BIDIRECTIONAL);
352
353 mmu->tlb_invalidate(mmu);
354
355 return nents;
356
357 out_fail:
358 ipu_dma_unmap_sg(dev, sglist, i, dir, attrs);
359
360 return 0;
361 }
362
363 /*
364 * Create scatter-list for the already allocated DMA buffer
365 */
366 static int ipu_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
367 void *cpu_addr, dma_addr_t handle, size_t size,
368 unsigned long attrs)
369 {
370 struct ipu_mmu *mmu = to_ipu_bus_device(dev)->mmu;
371 struct vm_info *info;
372 int n_pages;
373 int ret = 0;
374
375 info = get_vm_info(mmu, cpu_addr);
376 if (!info)
377 return -EFAULT;
378
379 if (!info->vaddr)
380 return -EFAULT;
381
382 if (WARN_ON(!info->pages))
383 return -ENOMEM;
384
385 n_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
386
387 ret = sg_alloc_table_from_pages(sgt, info->pages, n_pages, 0, size,
388 GFP_KERNEL);
389 if (ret)
390 dev_dbg(dev, "IPU get sgt table fail\n");
391
392 return ret;
393 }
394
395 const struct dma_map_ops ipu_dma_ops = {
396 .alloc = ipu_dma_alloc,
397 .free = ipu_dma_free,
398 .mmap = ipu_dma_mmap,
399 .map_sg = ipu_dma_map_sg,
400 .unmap_sg = ipu_dma_unmap_sg,
401 .sync_single_for_cpu = ipu_dma_sync_single_for_cpu,
402 .sync_single_for_device = ipu_dma_sync_single_for_cpu,
403 .sync_sg_for_cpu = ipu_dma_sync_sg_for_cpu,
404 .sync_sg_for_device = ipu_dma_sync_sg_for_cpu,
405 .get_sgtable = ipu_dma_get_sgtable,
406 };