1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2013 - 2020 Intel Corporation
4 #include <asm/cacheflush.h>
6 #include <linux/slab.h>
7 #include <linux/device.h>
8 #include <linux/dma-mapping.h>
10 #include <linux/highmem.h>
11 #include <linux/iova.h>
12 #include <linux/module.h>
13 #include <linux/scatterlist.h>
14 #include <linux/version.h>
15 #include <linux/vmalloc.h>
16 #include <linux/dma-map-ops.h>
23 struct list_head list
;
29 static struct vm_info
*get_vm_info(struct ipu_mmu
*mmu
, void *vaddr
)
31 struct vm_info
*info
, *save
;
33 list_for_each_entry_safe(info
, save
, &mmu
->vma_list
, list
) {
34 if (info
->vaddr
== vaddr
)
41 /* Begin of things adapted from arch/arm/mm/dma-mapping.c */
42 static void __dma_clear_buffer(struct page
*page
, size_t size
,
46 * Ensure that the allocated pages are zeroed, and that any data
47 * lurking in the kernel direct-mapped region is invalidated.
49 void *ptr
= page_address(page
);
52 if ((attrs
& DMA_ATTR_SKIP_CPU_SYNC
) == 0)
53 clflush_cache_range(ptr
, size
);
56 static struct page
**__dma_alloc_buffer(struct device
*dev
, size_t size
,
61 int count
= size
>> PAGE_SHIFT
;
62 int array_size
= count
* sizeof(struct page
*);
65 pages
= kvzalloc(array_size
, GFP_KERNEL
);
72 int j
, order
= __fls(count
);
74 pages
[i
] = alloc_pages(gfp
, order
);
75 while (!pages
[i
] && order
)
76 pages
[i
] = alloc_pages(gfp
, --order
);
81 split_page(pages
[i
], order
);
84 pages
[i
+ j
] = pages
[i
] + j
;
87 __dma_clear_buffer(pages
[i
], PAGE_SIZE
<< order
, attrs
);
96 __free_pages(pages
[i
], 0);
101 static int __dma_free_buffer(struct device
*dev
, struct page
**pages
,
105 int count
= size
>> PAGE_SHIFT
;
108 for (i
= 0; i
< count
; i
++) {
110 __dma_clear_buffer(pages
[i
], PAGE_SIZE
, attrs
);
111 __free_pages(pages
[i
], 0);
119 /* End of things adapted from arch/arm/mm/dma-mapping.c */
121 static void ipu_dma_sync_single_for_cpu(struct device
*dev
,
122 dma_addr_t dma_handle
,
124 enum dma_data_direction dir
)
126 struct ipu_mmu
*mmu
= to_ipu_bus_device(dev
)->mmu
;
127 unsigned long pa
= ipu_mmu_iova_to_phys(mmu
->dmap
->mmu_info
,
130 clflush_cache_range(phys_to_virt(pa
), size
);
133 static void ipu_dma_sync_sg_for_cpu(struct device
*dev
,
134 struct scatterlist
*sglist
,
135 int nents
, enum dma_data_direction dir
)
137 struct scatterlist
*sg
;
140 for_each_sg(sglist
, sg
, nents
, i
)
141 clflush_cache_range(page_to_virt(sg_page(sg
)), sg
->length
);
144 static void *ipu_dma_alloc(struct device
*dev
, size_t size
,
145 dma_addr_t
*dma_handle
, gfp_t gfp
,
148 struct ipu_mmu
*mmu
= to_ipu_bus_device(dev
)->mmu
;
151 struct vm_info
*info
;
156 info
= kzalloc(sizeof(*info
), GFP_KERNEL
);
160 size
= PAGE_ALIGN(size
);
161 count
= size
>> PAGE_SHIFT
;
163 iova
= alloc_iova(&mmu
->dmap
->iovad
, count
,
164 dma_get_mask(dev
) >> PAGE_SHIFT
, 0);
170 pages
= __dma_alloc_buffer(dev
, size
, gfp
, attrs
);
174 for (i
= 0; iova
->pfn_lo
+ i
<= iova
->pfn_hi
; i
++) {
175 rval
= ipu_mmu_map(mmu
->dmap
->mmu_info
,
176 (iova
->pfn_lo
+ i
) << PAGE_SHIFT
,
177 page_to_phys(pages
[i
]), PAGE_SIZE
);
182 info
->vaddr
= vmap(pages
, count
, VM_USERMAP
, PAGE_KERNEL
);
186 *dma_handle
= iova
->pfn_lo
<< PAGE_SHIFT
;
188 mmu
->tlb_invalidate(mmu
);
192 list_add(&info
->list
, &mmu
->vma_list
);
197 for (i
--; i
>= 0; i
--) {
198 ipu_mmu_unmap(mmu
->dmap
->mmu_info
,
199 (iova
->pfn_lo
+ i
) << PAGE_SHIFT
, PAGE_SIZE
);
201 __dma_free_buffer(dev
, pages
, size
, attrs
);
204 __free_iova(&mmu
->dmap
->iovad
, iova
);
210 static void ipu_dma_free(struct device
*dev
, size_t size
, void *vaddr
,
211 dma_addr_t dma_handle
,
214 struct ipu_mmu
*mmu
= to_ipu_bus_device(dev
)->mmu
;
216 struct vm_info
*info
;
217 struct iova
*iova
= find_iova(&mmu
->dmap
->iovad
,
218 dma_handle
>> PAGE_SHIFT
);
223 info
= get_vm_info(mmu
, vaddr
);
227 if (WARN_ON(!info
->vaddr
))
230 if (WARN_ON(!info
->pages
))
233 list_del(&info
->list
);
235 size
= PAGE_ALIGN(size
);
241 ipu_mmu_unmap(mmu
->dmap
->mmu_info
, iova
->pfn_lo
<< PAGE_SHIFT
,
242 (iova
->pfn_hi
- iova
->pfn_lo
+ 1) << PAGE_SHIFT
);
244 __dma_free_buffer(dev
, pages
, size
, attrs
);
246 __free_iova(&mmu
->dmap
->iovad
, iova
);
248 mmu
->tlb_invalidate(mmu
);
253 static int ipu_dma_mmap(struct device
*dev
, struct vm_area_struct
*vma
,
254 void *addr
, dma_addr_t iova
, size_t size
,
257 struct ipu_mmu
*mmu
= to_ipu_bus_device(dev
)->mmu
;
258 struct vm_info
*info
;
259 size_t count
= PAGE_ALIGN(size
) >> PAGE_SHIFT
;
262 info
= get_vm_info(mmu
, addr
);
269 if (vma
->vm_start
& ~PAGE_MASK
)
272 if (size
> info
->size
)
275 for (i
= 0; i
< count
; i
++)
276 vm_insert_page(vma
, vma
->vm_start
+ (i
<< PAGE_SHIFT
),
282 static void ipu_dma_unmap_sg(struct device
*dev
,
283 struct scatterlist
*sglist
,
284 int nents
, enum dma_data_direction dir
,
287 struct ipu_mmu
*mmu
= to_ipu_bus_device(dev
)->mmu
;
288 struct iova
*iova
= find_iova(&mmu
->dmap
->iovad
,
289 sg_dma_address(sglist
) >> PAGE_SHIFT
);
297 if ((attrs
& DMA_ATTR_SKIP_CPU_SYNC
) == 0)
298 ipu_dma_sync_sg_for_cpu(dev
, sglist
, nents
, DMA_BIDIRECTIONAL
);
300 ipu_mmu_unmap(mmu
->dmap
->mmu_info
, iova
->pfn_lo
<< PAGE_SHIFT
,
301 (iova
->pfn_hi
- iova
->pfn_lo
+ 1) << PAGE_SHIFT
);
303 mmu
->tlb_invalidate(mmu
);
305 __free_iova(&mmu
->dmap
->iovad
, iova
);
308 static int ipu_dma_map_sg(struct device
*dev
, struct scatterlist
*sglist
,
309 int nents
, enum dma_data_direction dir
,
312 struct ipu_mmu
*mmu
= to_ipu_bus_device(dev
)->mmu
;
313 struct scatterlist
*sg
;
319 for_each_sg(sglist
, sg
, nents
, i
)
320 size
+= PAGE_ALIGN(sg
->length
) >> PAGE_SHIFT
;
322 dev_dbg(dev
, "dmamap: mapping sg %d entries, %zu pages\n", nents
, size
);
324 iova
= alloc_iova(&mmu
->dmap
->iovad
, size
,
325 dma_get_mask(dev
) >> PAGE_SHIFT
, 0);
329 dev_dbg(dev
, "dmamap: iova low pfn %lu, high pfn %lu\n", iova
->pfn_lo
,
332 iova_addr
= iova
->pfn_lo
;
334 for_each_sg(sglist
, sg
, nents
, i
) {
337 dev_dbg(dev
, "mapping entry %d: iova 0x%8.8x,phy 0x%16.16llx\n",
338 i
, iova_addr
<< PAGE_SHIFT
,
339 (unsigned long long)page_to_phys(sg_page(sg
)));
340 rval
= ipu_mmu_map(mmu
->dmap
->mmu_info
, iova_addr
<< PAGE_SHIFT
,
341 page_to_phys(sg_page(sg
)),
342 PAGE_ALIGN(sg
->length
));
345 sg_dma_address(sg
) = iova_addr
<< PAGE_SHIFT
;
346 #ifdef CONFIG_NEED_SG_DMA_LENGTH
347 sg_dma_len(sg
) = sg
->length
;
348 #endif /* CONFIG_NEED_SG_DMA_LENGTH */
350 iova_addr
+= PAGE_ALIGN(sg
->length
) >> PAGE_SHIFT
;
353 if ((attrs
& DMA_ATTR_SKIP_CPU_SYNC
) == 0)
354 ipu_dma_sync_sg_for_cpu(dev
, sglist
, nents
, DMA_BIDIRECTIONAL
);
356 mmu
->tlb_invalidate(mmu
);
361 ipu_dma_unmap_sg(dev
, sglist
, i
, dir
, attrs
);
367 * Create scatter-list for the already allocated DMA buffer
369 static int ipu_dma_get_sgtable(struct device
*dev
, struct sg_table
*sgt
,
370 void *cpu_addr
, dma_addr_t handle
, size_t size
,
373 struct ipu_mmu
*mmu
= to_ipu_bus_device(dev
)->mmu
;
374 struct vm_info
*info
;
378 info
= get_vm_info(mmu
, cpu_addr
);
385 if (WARN_ON(!info
->pages
))
388 n_pages
= PAGE_ALIGN(size
) >> PAGE_SHIFT
;
390 ret
= sg_alloc_table_from_pages(sgt
, info
->pages
, n_pages
, 0, size
,
393 dev_dbg(dev
, "IPU get sgt table fail\n");
398 const struct dma_map_ops ipu_dma_ops
= {
399 .alloc
= ipu_dma_alloc
,
400 .free
= ipu_dma_free
,
401 .mmap
= ipu_dma_mmap
,
402 .map_sg
= ipu_dma_map_sg
,
403 .unmap_sg
= ipu_dma_unmap_sg
,
404 .sync_single_for_cpu
= ipu_dma_sync_single_for_cpu
,
405 .sync_single_for_device
= ipu_dma_sync_single_for_cpu
,
406 .sync_sg_for_cpu
= ipu_dma_sync_sg_for_cpu
,
407 .sync_sg_for_device
= ipu_dma_sync_sg_for_cpu
,
408 .get_sgtable
= ipu_dma_get_sgtable
,