]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blame - drivers/staging/media/ipu3/ipu3-dmamap.c
Merge branches 'for-5.1/upstream-fixes', 'for-5.2/core', 'for-5.2/ish', 'for-5.2...
[mirror_ubuntu-kernels.git] / drivers / staging / media / ipu3 / ipu3-dmamap.c
CommitLineData
2a2c51d3
TF
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2018 Intel Corporation
4 * Copyright 2018 Google LLC.
5 *
6 * Author: Tomasz Figa <tfiga@chromium.org>
7 * Author: Yong Zhi <yong.zhi@intel.com>
8 */
9
10#include <linux/vmalloc.h>
11
12#include "ipu3.h"
13#include "ipu3-css-pool.h"
14#include "ipu3-mmu.h"
5f5b4fa5 15#include "ipu3-dmamap.h"
2a2c51d3
TF
16
17/*
27b795ad 18 * Free a buffer allocated by imgu_dmamap_alloc_buffer()
2a2c51d3 19 */
27b795ad 20static void imgu_dmamap_free_buffer(struct page **pages,
2a2c51d3
TF
21 size_t size)
22{
23 int count = size >> PAGE_SHIFT;
24
25 while (count--)
26 __free_page(pages[count]);
27 kvfree(pages);
28}
29
30/*
31 * Based on the implementation of __iommu_dma_alloc_pages()
32 * defined in drivers/iommu/dma-iommu.c
33 */
27b795ad 34static struct page **imgu_dmamap_alloc_buffer(size_t size,
2a2c51d3
TF
35 unsigned long order_mask,
36 gfp_t gfp)
37{
38 struct page **pages;
39 unsigned int i = 0, count = size >> PAGE_SHIFT;
40 const gfp_t high_order_gfp = __GFP_NOWARN | __GFP_NORETRY;
41
42 /* Allocate mem for array of page ptrs */
43 pages = kvmalloc_array(count, sizeof(*pages), GFP_KERNEL);
44
45 if (!pages)
46 return NULL;
47
48 order_mask &= (2U << MAX_ORDER) - 1;
49 if (!order_mask)
50 return NULL;
51
52 gfp |= __GFP_HIGHMEM | __GFP_ZERO;
53
54 while (count) {
55 struct page *page = NULL;
56 unsigned int order_size;
57
58 for (order_mask &= (2U << __fls(count)) - 1;
59 order_mask; order_mask &= ~order_size) {
60 unsigned int order = __fls(order_mask);
61
62 order_size = 1U << order;
63 page = alloc_pages((order_mask - order_size) ?
64 gfp | high_order_gfp : gfp, order);
65 if (!page)
66 continue;
67 if (!order)
68 break;
69 if (!PageCompound(page)) {
70 split_page(page, order);
71 break;
72 }
73
74 __free_pages(page, order);
75 }
76 if (!page) {
27b795ad 77 imgu_dmamap_free_buffer(pages, i << PAGE_SHIFT);
2a2c51d3
TF
78 return NULL;
79 }
80 count -= order_size;
81 while (order_size--)
82 pages[i++] = page++;
83 }
84
85 return pages;
86}
87
88/**
27b795ad 89 * imgu_dmamap_alloc - allocate and map a buffer into KVA
2a2c51d3
TF
90 * @imgu: struct device pointer
91 * @map: struct to store mapping variables
92 * @len: size required
93 *
94 * Returns:
95 * KVA on success
96 * %NULL on failure
97 */
27b795ad 98void *imgu_dmamap_alloc(struct imgu_device *imgu, struct imgu_css_map *map,
2a2c51d3
TF
99 size_t len)
100{
101 unsigned long shift = iova_shift(&imgu->iova_domain);
102 unsigned int alloc_sizes = imgu->mmu->pgsize_bitmap;
103 struct device *dev = &imgu->pci_dev->dev;
104 size_t size = PAGE_ALIGN(len);
105 struct page **pages;
106 dma_addr_t iovaddr;
107 struct iova *iova;
108 int i, rval;
109
110 dev_dbg(dev, "%s: allocating %zu\n", __func__, size);
111
112 iova = alloc_iova(&imgu->iova_domain, size >> shift,
113 imgu->mmu->aperture_end >> shift, 0);
114 if (!iova)
115 return NULL;
116
27b795ad 117 pages = imgu_dmamap_alloc_buffer(size, alloc_sizes >> PAGE_SHIFT,
2a2c51d3
TF
118 GFP_KERNEL);
119 if (!pages)
120 goto out_free_iova;
121
122 /* Call IOMMU driver to setup pgt */
123 iovaddr = iova_dma_addr(&imgu->iova_domain, iova);
124 for (i = 0; i < size / PAGE_SIZE; ++i) {
27b795ad 125 rval = imgu_mmu_map(imgu->mmu, iovaddr,
2a2c51d3
TF
126 page_to_phys(pages[i]), PAGE_SIZE);
127 if (rval)
128 goto out_unmap;
129
130 iovaddr += PAGE_SIZE;
131 }
132
133 /* Now grab a virtual region */
134 map->vma = __get_vm_area(size, VM_USERMAP, VMALLOC_START, VMALLOC_END);
135 if (!map->vma)
136 goto out_unmap;
137
138 map->vma->pages = pages;
139 /* And map it in KVA */
140 if (map_vm_area(map->vma, PAGE_KERNEL, pages))
141 goto out_vunmap;
142
143 map->size = size;
144 map->daddr = iova_dma_addr(&imgu->iova_domain, iova);
145 map->vaddr = map->vma->addr;
146
147 dev_dbg(dev, "%s: allocated %zu @ IOVA %pad @ VA %p\n", __func__,
148 size, &map->daddr, map->vma->addr);
149
150 return map->vma->addr;
151
152out_vunmap:
153 vunmap(map->vma->addr);
154
155out_unmap:
27b795ad
YZ
156 imgu_dmamap_free_buffer(pages, size);
157 imgu_mmu_unmap(imgu->mmu, iova_dma_addr(&imgu->iova_domain, iova),
2a2c51d3
TF
158 i * PAGE_SIZE);
159 map->vma = NULL;
160
161out_free_iova:
162 __free_iova(&imgu->iova_domain, iova);
163
164 return NULL;
165}
166
27b795ad 167void imgu_dmamap_unmap(struct imgu_device *imgu, struct imgu_css_map *map)
2a2c51d3
TF
168{
169 struct iova *iova;
170
171 iova = find_iova(&imgu->iova_domain,
172 iova_pfn(&imgu->iova_domain, map->daddr));
173 if (WARN_ON(!iova))
174 return;
175
27b795ad 176 imgu_mmu_unmap(imgu->mmu, iova_dma_addr(&imgu->iova_domain, iova),
2a2c51d3
TF
177 iova_size(iova) << iova_shift(&imgu->iova_domain));
178
179 __free_iova(&imgu->iova_domain, iova);
180}
181
182/*
27b795ad 183 * Counterpart of imgu_dmamap_alloc
2a2c51d3 184 */
27b795ad 185void imgu_dmamap_free(struct imgu_device *imgu, struct imgu_css_map *map)
2a2c51d3
TF
186{
187 struct vm_struct *area = map->vma;
188
189 dev_dbg(&imgu->pci_dev->dev, "%s: freeing %zu @ IOVA %pad @ VA %p\n",
190 __func__, map->size, &map->daddr, map->vaddr);
191
192 if (!map->vaddr)
193 return;
194
27b795ad 195 imgu_dmamap_unmap(imgu, map);
2a2c51d3
TF
196
197 if (WARN_ON(!area) || WARN_ON(!area->pages))
198 return;
199
27b795ad 200 imgu_dmamap_free_buffer(area->pages, map->size);
2a2c51d3
TF
201 vunmap(map->vaddr);
202 map->vaddr = NULL;
203}
204
27b795ad
YZ
205int imgu_dmamap_map_sg(struct imgu_device *imgu, struct scatterlist *sglist,
206 int nents, struct imgu_css_map *map)
2a2c51d3
TF
207{
208 unsigned long shift = iova_shift(&imgu->iova_domain);
209 struct scatterlist *sg;
210 struct iova *iova;
211 size_t size = 0;
212 int i;
213
214 for_each_sg(sglist, sg, nents, i) {
215 if (sg->offset)
216 return -EINVAL;
217
218 if (i != nents - 1 && !PAGE_ALIGNED(sg->length))
219 return -EINVAL;
220
221 size += sg->length;
222 }
223
224 size = iova_align(&imgu->iova_domain, size);
225 dev_dbg(&imgu->pci_dev->dev, "dmamap: mapping sg %d entries, %zu pages\n",
226 nents, size >> shift);
227
228 iova = alloc_iova(&imgu->iova_domain, size >> shift,
229 imgu->mmu->aperture_end >> shift, 0);
230 if (!iova)
231 return -ENOMEM;
232
233 dev_dbg(&imgu->pci_dev->dev, "dmamap: iova low pfn %lu, high pfn %lu\n",
234 iova->pfn_lo, iova->pfn_hi);
235
27b795ad 236 if (imgu_mmu_map_sg(imgu->mmu, iova_dma_addr(&imgu->iova_domain, iova),
2a2c51d3
TF
237 sglist, nents) < size)
238 goto out_fail;
239
240 memset(map, 0, sizeof(*map));
241 map->daddr = iova_dma_addr(&imgu->iova_domain, iova);
242 map->size = size;
243
244 return 0;
245
246out_fail:
247 __free_iova(&imgu->iova_domain, iova);
248
249 return -EFAULT;
250}
251
27b795ad 252int imgu_dmamap_init(struct imgu_device *imgu)
2a2c51d3
TF
253{
254 unsigned long order, base_pfn;
255 int ret = iova_cache_get();
256
257 if (ret)
258 return ret;
259
260 order = __ffs(imgu->mmu->pgsize_bitmap);
261 base_pfn = max_t(unsigned long, 1, imgu->mmu->aperture_start >> order);
262 init_iova_domain(&imgu->iova_domain, 1UL << order, base_pfn);
263
264 return 0;
265}
266
27b795ad 267void imgu_dmamap_exit(struct imgu_device *imgu)
2a2c51d3
TF
268{
269 put_iova_domain(&imgu->iova_domain);
270 iova_cache_put();
271}