]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/media/pci/intel/ipu-mmu.c
UBUNTU: SAUCE: change power control driver to acpi driver
[mirror_ubuntu-jammy-kernel.git] / drivers / media / pci / intel / ipu-mmu.c
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2013 - 2020 Intel Corporation
3
4 #include <asm/cacheflush.h>
5
6 #include <linux/device.h>
7 #include <linux/iova.h>
8 #include <linux/module.h>
9 #include <linux/sizes.h>
10
11 #include "ipu.h"
12 #include "ipu-platform.h"
13 #include "ipu-dma.h"
14 #include "ipu-mmu.h"
15 #include "ipu-platform-regs.h"
16
17 #define ISP_PAGE_SHIFT 12
18 #define ISP_PAGE_SIZE BIT(ISP_PAGE_SHIFT)
19 #define ISP_PAGE_MASK (~(ISP_PAGE_SIZE - 1))
20
21 #define ISP_L1PT_SHIFT 22
22 #define ISP_L1PT_MASK (~((1U << ISP_L1PT_SHIFT) - 1))
23
24 #define ISP_L2PT_SHIFT 12
25 #define ISP_L2PT_MASK (~(ISP_L1PT_MASK | (~(ISP_PAGE_MASK))))
26
27 #define ISP_L1PT_PTES 1024
28 #define ISP_L2PT_PTES 1024
29
30 #define ISP_PADDR_SHIFT 12
31
32 #define REG_TLB_INVALIDATE 0x0000
33
34 #define REG_L1_PHYS 0x0004 /* 27-bit pfn */
35 #define REG_INFO 0x0008
36
37 /* The range of stream ID i in L1 cache is from 0 to 15 */
38 #define MMUV2_REG_L1_STREAMID(i) (0x0c + ((i) * 4))
39
40 /* The range of stream ID i in L2 cache is from 0 to 15 */
41 #define MMUV2_REG_L2_STREAMID(i) (0x4c + ((i) * 4))
42
43 /* ZLW Enable for each stream in L1 MMU AT where i : 0..15 */
44 #define MMUV2_AT_REG_L1_ZLW_EN_SID(i) (0x100 + ((i) * 0x20))
45
46 /* ZLW 1D mode Enable for each stream in L1 MMU AT where i : 0..15 */
47 #define MMUV2_AT_REG_L1_ZLW_1DMODE_SID(i) (0x100 + ((i) * 0x20) + 0x0004)
48
49 /* Set ZLW insertion N pages ahead per stream 1D where i : 0..15 */
50 #define MMUV2_AT_REG_L1_ZLW_INS_N_AHEAD_SID(i) (0x100 + ((i) * 0x20) + 0x0008)
51
52 /* ZLW 2D mode Enable for each stream in L1 MMU AT where i : 0..15 */
53 #define MMUV2_AT_REG_L1_ZLW_2DMODE_SID(i) (0x100 + ((i) * 0x20) + 0x0010)
54
55 /* ZLW Insertion for each stream in L1 MMU AT where i : 0..15 */
56 #define MMUV2_AT_REG_L1_ZLW_INSERTION(i) (0x100 + ((i) * 0x20) + 0x000c)
57
58 #define MMUV2_AT_REG_L1_FW_ZLW_FIFO (0x100 + \
59 (IPU_MMU_MAX_TLB_L1_STREAMS * 0x20) + 0x003c)
60
61 /* FW ZLW has prioty - needed for ZLW invalidations */
62 #define MMUV2_AT_REG_L1_FW_ZLW_PRIO (0x100 + \
63 (IPU_MMU_MAX_TLB_L1_STREAMS * 0x20))
64
65 #define TBL_PHYS_ADDR(a) ((phys_addr_t)(a) << ISP_PADDR_SHIFT)
66 #define TBL_VIRT_ADDR(a) phys_to_virt(TBL_PHYS_ADDR(a))
67
68 static void zlw_invalidate(struct ipu_mmu *mmu, struct ipu_mmu_hw *mmu_hw)
69 {
70 unsigned int retry = 0;
71 unsigned int i, j;
72 int ret;
73
74 for (i = 0; i < mmu_hw->nr_l1streams; i++) {
75 /* We need to invalidate only the zlw enabled stream IDs */
76 if (mmu_hw->l1_zlw_en[i]) {
77 /*
78 * Maximum 16 blocks per L1 stream
79 * Write trash buffer iova offset to the FW_ZLW
80 * register. This will trigger pre-fetching of next 16
81 * pages from the page table. So we need to increment
82 * iova address by 16 * 4K to trigger the next 16 pages.
83 * Once this loop is completed, the L1 cache will be
84 * filled with trash buffer translation.
85 *
86 * TODO: Instead of maximum 16 blocks, use the allocated
87 * block size
88 */
89 for (j = 0; j < mmu_hw->l1_block_sz[i]; j++)
90 writel(mmu->iova_addr_trash +
91 j * MMUV2_TRASH_L1_BLOCK_OFFSET,
92 mmu_hw->base +
93 MMUV2_AT_REG_L1_ZLW_INSERTION(i));
94
95 /*
96 * Now we need to fill the L2 cache entry. L2 cache
97 * entries will be automatically updated, based on the
98 * L1 entry. The above loop for L1 will update only one
99 * of the two entries in L2 as the L1 is under 4MB
100 * range. To force the other entry in L2 to update, we
101 * just need to trigger another pre-fetch which is
102 * outside the above 4MB range.
103 */
104 writel(mmu->iova_addr_trash +
105 MMUV2_TRASH_L2_BLOCK_OFFSET,
106 mmu_hw->base +
107 MMUV2_AT_REG_L1_ZLW_INSERTION(0));
108 }
109 }
110
111 /*
112 * Wait until AT is ready. FIFO read should return 2 when AT is ready.
113 * Retry value of 1000 is just by guess work to avoid the forever loop.
114 */
115 do {
116 if (retry > 1000) {
117 dev_err(mmu->dev, "zlw invalidation failed\n");
118 return;
119 }
120 ret = readl(mmu_hw->base + MMUV2_AT_REG_L1_FW_ZLW_FIFO);
121 retry++;
122 } while (ret != 2);
123 }
124
125 static void tlb_invalidate(struct ipu_mmu *mmu)
126 {
127 unsigned int i;
128 unsigned long flags;
129
130 spin_lock_irqsave(&mmu->ready_lock, flags);
131 if (!mmu->ready) {
132 spin_unlock_irqrestore(&mmu->ready_lock, flags);
133 return;
134 }
135
136 for (i = 0; i < mmu->nr_mmus; i++) {
137 /*
138 * To avoid the HW bug induced dead lock in some of the IPU
139 * MMUs on successive invalidate calls, we need to first do a
140 * read to the page table base before writing the invalidate
141 * register. MMUs which need to implement this WA, will have
142 * the insert_read_before_invalidate flasg set as true.
143 * Disregard the return value of the read.
144 */
145 if (mmu->mmu_hw[i].insert_read_before_invalidate)
146 readl(mmu->mmu_hw[i].base + REG_L1_PHYS);
147
148 /* Normal invalidate or zlw invalidate */
149 if (mmu->mmu_hw[i].zlw_invalidate) {
150 /* trash buffer must be mapped by now, just in case! */
151 WARN_ON(!mmu->iova_addr_trash);
152
153 zlw_invalidate(mmu, &mmu->mmu_hw[i]);
154 } else {
155 writel(0xffffffff, mmu->mmu_hw[i].base +
156 REG_TLB_INVALIDATE);
157 }
158 }
159 spin_unlock_irqrestore(&mmu->ready_lock, flags);
160 }
161
162 #ifdef DEBUG
163 static void page_table_dump(struct ipu_mmu_info *mmu_info)
164 {
165 u32 l1_idx;
166
167 pr_debug("begin IOMMU page table dump\n");
168
169 for (l1_idx = 0; l1_idx < ISP_L1PT_PTES; l1_idx++) {
170 u32 l2_idx;
171 u32 iova = (phys_addr_t)l1_idx << ISP_L1PT_SHIFT;
172
173 if (mmu_info->pgtbl[l1_idx] == mmu_info->dummy_l2_tbl)
174 continue;
175 pr_debug("l1 entry %u; iovas 0x%8.8x--0x%8.8x, at %p\n",
176 l1_idx, iova, iova + ISP_PAGE_SIZE,
177 (void *)TBL_PHYS_ADDR(mmu_info->pgtbl[l1_idx]));
178
179 for (l2_idx = 0; l2_idx < ISP_L2PT_PTES; l2_idx++) {
180 u32 *l2_pt = TBL_VIRT_ADDR(mmu_info->pgtbl[l1_idx]);
181 u32 iova2 = iova + (l2_idx << ISP_L2PT_SHIFT);
182
183 if (l2_pt[l2_idx] == mmu_info->dummy_page)
184 continue;
185
186 pr_debug("\tl2 entry %u; iova 0x%8.8x, phys %p\n",
187 l2_idx, iova2,
188 (void *)TBL_PHYS_ADDR(l2_pt[l2_idx]));
189 }
190 }
191
192 pr_debug("end IOMMU page table dump\n");
193 }
194 #endif /* DEBUG */
195
196 static u32 *alloc_page_table(struct ipu_mmu_info *mmu_info, bool l1)
197 {
198 u32 *pt = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32);
199 int i;
200
201 if (!pt)
202 return NULL;
203
204 pr_debug("get_zeroed_page() == %p\n", pt);
205
206 for (i = 0; i < ISP_L1PT_PTES; i++)
207 pt[i] = l1 ? mmu_info->dummy_l2_tbl : mmu_info->dummy_page;
208
209 return pt;
210 }
211
212 static int l2_map(struct ipu_mmu_info *mmu_info, unsigned long iova,
213 phys_addr_t paddr, size_t size)
214 {
215 u32 l1_idx = iova >> ISP_L1PT_SHIFT;
216 u32 l1_entry = mmu_info->pgtbl[l1_idx];
217 u32 *l2_pt;
218 u32 iova_start = iova;
219 unsigned int l2_idx;
220 unsigned long flags;
221
222 pr_debug("mapping l2 page table for l1 index %u (iova %8.8x)\n",
223 l1_idx, (u32)iova);
224
225 spin_lock_irqsave(&mmu_info->lock, flags);
226 if (l1_entry == mmu_info->dummy_l2_tbl) {
227 u32 *l2_virt = alloc_page_table(mmu_info, false);
228
229 if (!l2_virt) {
230 spin_unlock_irqrestore(&mmu_info->lock, flags);
231 return -ENOMEM;
232 }
233
234 l1_entry = virt_to_phys(l2_virt) >> ISP_PADDR_SHIFT;
235 pr_debug("allocated page for l1_idx %u\n", l1_idx);
236
237 if (mmu_info->pgtbl[l1_idx] == mmu_info->dummy_l2_tbl) {
238 mmu_info->pgtbl[l1_idx] = l1_entry;
239 #ifdef CONFIG_X86
240 clflush_cache_range(&mmu_info->pgtbl[l1_idx],
241 sizeof(mmu_info->pgtbl[l1_idx]));
242 #endif /* CONFIG_X86 */
243 } else {
244 free_page((unsigned long)TBL_VIRT_ADDR(l1_entry));
245 }
246 }
247
248 l2_pt = TBL_VIRT_ADDR(mmu_info->pgtbl[l1_idx]);
249
250 pr_debug("l2_pt at %p\n", l2_pt);
251
252 paddr = ALIGN(paddr, ISP_PAGE_SIZE);
253
254 l2_idx = (iova_start & ISP_L2PT_MASK) >> ISP_L2PT_SHIFT;
255
256 pr_debug("l2_idx %u, phys 0x%8.8x\n", l2_idx, l2_pt[l2_idx]);
257 if (l2_pt[l2_idx] != mmu_info->dummy_page) {
258 spin_unlock_irqrestore(&mmu_info->lock, flags);
259 return -EBUSY;
260 }
261
262 l2_pt[l2_idx] = paddr >> ISP_PADDR_SHIFT;
263
264 #ifdef CONFIG_X86
265 clflush_cache_range(&l2_pt[l2_idx], sizeof(l2_pt[l2_idx]));
266 #endif /* CONFIG_X86 */
267 spin_unlock_irqrestore(&mmu_info->lock, flags);
268
269 pr_debug("l2 index %u mapped as 0x%8.8x\n", l2_idx, l2_pt[l2_idx]);
270
271 return 0;
272 }
273
274 static int __ipu_mmu_map(struct ipu_mmu_info *mmu_info, unsigned long iova,
275 phys_addr_t paddr, size_t size)
276 {
277 u32 iova_start = round_down(iova, ISP_PAGE_SIZE);
278 u32 iova_end = ALIGN(iova + size, ISP_PAGE_SIZE);
279
280 pr_debug
281 ("mapping iova 0x%8.8x--0x%8.8x, size %zu at paddr 0x%10.10llx\n",
282 iova_start, iova_end, size, paddr);
283
284 return l2_map(mmu_info, iova_start, paddr, size);
285 }
286
287 static size_t l2_unmap(struct ipu_mmu_info *mmu_info, unsigned long iova,
288 phys_addr_t dummy, size_t size)
289 {
290 u32 l1_idx = iova >> ISP_L1PT_SHIFT;
291 u32 *l2_pt = TBL_VIRT_ADDR(mmu_info->pgtbl[l1_idx]);
292 u32 iova_start = iova;
293 unsigned int l2_idx;
294 size_t unmapped = 0;
295
296 pr_debug("unmapping l2 page table for l1 index %u (iova 0x%8.8lx)\n",
297 l1_idx, iova);
298
299 if (mmu_info->pgtbl[l1_idx] == mmu_info->dummy_l2_tbl)
300 return -EINVAL;
301
302 pr_debug("l2_pt at %p\n", l2_pt);
303
304 for (l2_idx = (iova_start & ISP_L2PT_MASK) >> ISP_L2PT_SHIFT;
305 (iova_start & ISP_L1PT_MASK) + (l2_idx << ISP_PAGE_SHIFT)
306 < iova_start + size && l2_idx < ISP_L2PT_PTES; l2_idx++) {
307 unsigned long flags;
308
309 pr_debug("l2 index %u unmapped, was 0x%10.10llx\n",
310 l2_idx, TBL_PHYS_ADDR(l2_pt[l2_idx]));
311 spin_lock_irqsave(&mmu_info->lock, flags);
312 l2_pt[l2_idx] = mmu_info->dummy_page;
313 spin_unlock_irqrestore(&mmu_info->lock, flags);
314 #ifdef CONFIG_X86
315 clflush_cache_range(&l2_pt[l2_idx], sizeof(l2_pt[l2_idx]));
316 #endif /* CONFIG_X86 */
317 unmapped++;
318 }
319
320 return unmapped << ISP_PAGE_SHIFT;
321 }
322
323 static size_t __ipu_mmu_unmap(struct ipu_mmu_info *mmu_info,
324 unsigned long iova, size_t size)
325 {
326 return l2_unmap(mmu_info, iova, 0, size);
327 }
328
329 static int allocate_trash_buffer(struct ipu_mmu *mmu)
330 {
331 unsigned int n_pages = PAGE_ALIGN(IPU_MMUV2_TRASH_RANGE) >> PAGE_SHIFT;
332 struct iova *iova;
333 u32 iova_addr;
334 unsigned int i;
335 int ret;
336
337 /* Allocate 8MB in iova range */
338 iova = alloc_iova(&mmu->dmap->iovad, n_pages,
339 mmu->dmap->mmu_info->aperture_end >> PAGE_SHIFT, 0);
340 if (!iova) {
341 dev_err(mmu->dev, "cannot allocate iova range for trash\n");
342 return -ENOMEM;
343 }
344
345 /*
346 * Map the 8MB iova address range to the same physical trash page
347 * mmu->trash_page which is already reserved at the probe
348 */
349 iova_addr = iova->pfn_lo;
350 for (i = 0; i < n_pages; i++) {
351 ret = ipu_mmu_map(mmu->dmap->mmu_info, iova_addr << PAGE_SHIFT,
352 page_to_phys(mmu->trash_page), PAGE_SIZE);
353 if (ret) {
354 dev_err(mmu->dev,
355 "mapping trash buffer range failed\n");
356 goto out_unmap;
357 }
358
359 iova_addr++;
360 }
361
362 /* save the address for the ZLW invalidation */
363 mmu->iova_addr_trash = iova->pfn_lo << PAGE_SHIFT;
364 dev_dbg(mmu->dev, "iova trash buffer for MMUID: %d is %u\n",
365 mmu->mmid, (unsigned int)mmu->iova_addr_trash);
366 return 0;
367
368 out_unmap:
369 ipu_mmu_unmap(mmu->dmap->mmu_info, iova->pfn_lo << PAGE_SHIFT,
370 (iova->pfn_hi - iova->pfn_lo + 1) << PAGE_SHIFT);
371 __free_iova(&mmu->dmap->iovad, iova);
372 return ret;
373 }
374
375 int ipu_mmu_hw_init(struct ipu_mmu *mmu)
376 {
377 unsigned int i;
378 unsigned long flags;
379 struct ipu_mmu_info *mmu_info;
380
381 dev_dbg(mmu->dev, "mmu hw init\n");
382
383 mmu_info = mmu->dmap->mmu_info;
384
385 /* Initialise the each MMU HW block */
386 for (i = 0; i < mmu->nr_mmus; i++) {
387 struct ipu_mmu_hw *mmu_hw = &mmu->mmu_hw[i];
388 unsigned int j;
389 u16 block_addr;
390
391 /* Write page table address per MMU */
392 writel((phys_addr_t)virt_to_phys(mmu_info->pgtbl)
393 >> ISP_PADDR_SHIFT,
394 mmu->mmu_hw[i].base + REG_L1_PHYS);
395
396 /* Set info bits per MMU */
397 writel(mmu->mmu_hw[i].info_bits,
398 mmu->mmu_hw[i].base + REG_INFO);
399
400 /* Configure MMU TLB stream configuration for L1 */
401 for (j = 0, block_addr = 0; j < mmu_hw->nr_l1streams;
402 block_addr += mmu->mmu_hw[i].l1_block_sz[j], j++) {
403 if (block_addr > IPU_MAX_LI_BLOCK_ADDR) {
404 dev_err(mmu->dev, "invalid L1 configuration\n");
405 return -EINVAL;
406 }
407
408 /* Write block start address for each streams */
409 writel(block_addr, mmu_hw->base +
410 mmu_hw->l1_stream_id_reg_offset + 4 * j);
411 }
412
413 /* Configure MMU TLB stream configuration for L2 */
414 for (j = 0, block_addr = 0; j < mmu_hw->nr_l2streams;
415 block_addr += mmu->mmu_hw[i].l2_block_sz[j], j++) {
416 if (block_addr > IPU_MAX_L2_BLOCK_ADDR) {
417 dev_err(mmu->dev, "invalid L2 configuration\n");
418 return -EINVAL;
419 }
420
421 writel(block_addr, mmu_hw->base +
422 mmu_hw->l2_stream_id_reg_offset + 4 * j);
423 }
424 }
425
426 /*
427 * Allocate 1 page of physical memory for the trash buffer.
428 */
429 if (!mmu->trash_page) {
430 mmu->trash_page = alloc_page(GFP_KERNEL);
431 if (!mmu->trash_page) {
432 dev_err(mmu->dev, "insufficient memory for trash buffer\n");
433 return -ENOMEM;
434 }
435 }
436
437 /* Allocate trash buffer, if not allocated. Only once per MMU */
438 if (!mmu->iova_addr_trash) {
439 int ret;
440
441 ret = allocate_trash_buffer(mmu);
442 if (ret) {
443 __free_page(mmu->trash_page);
444 mmu->trash_page = NULL;
445 dev_err(mmu->dev, "trash buffer allocation failed\n");
446 return ret;
447 }
448 }
449
450 spin_lock_irqsave(&mmu->ready_lock, flags);
451 mmu->ready = true;
452 spin_unlock_irqrestore(&mmu->ready_lock, flags);
453
454 return 0;
455 }
456 EXPORT_SYMBOL(ipu_mmu_hw_init);
457
458 static struct ipu_mmu_info *ipu_mmu_alloc(struct ipu_device *isp)
459 {
460 struct ipu_mmu_info *mmu_info;
461 void *ptr;
462
463 mmu_info = kzalloc(sizeof(*mmu_info), GFP_KERNEL);
464 if (!mmu_info)
465 return NULL;
466
467 mmu_info->aperture_start = 0;
468 mmu_info->aperture_end = DMA_BIT_MASK(isp->secure_mode ?
469 IPU_MMU_ADDRESS_BITS :
470 IPU_MMU_ADDRESS_BITS_NON_SECURE);
471 mmu_info->pgsize_bitmap = SZ_4K;
472
473 ptr = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA32);
474 if (!ptr)
475 goto err_mem;
476
477 mmu_info->dummy_page = virt_to_phys(ptr) >> ISP_PAGE_SHIFT;
478
479 ptr = alloc_page_table(mmu_info, false);
480 if (!ptr)
481 goto err;
482
483 mmu_info->dummy_l2_tbl = virt_to_phys(ptr) >> ISP_PAGE_SHIFT;
484
485 /*
486 * We always map the L1 page table (a single page as well as
487 * the L2 page tables).
488 */
489 mmu_info->pgtbl = alloc_page_table(mmu_info, true);
490 if (!mmu_info->pgtbl)
491 goto err;
492
493 spin_lock_init(&mmu_info->lock);
494
495 pr_debug("domain initialised\n");
496
497 return mmu_info;
498
499 err:
500 free_page((unsigned long)TBL_VIRT_ADDR(mmu_info->dummy_page));
501 free_page((unsigned long)TBL_VIRT_ADDR(mmu_info->dummy_l2_tbl));
502 err_mem:
503 kfree(mmu_info);
504
505 return NULL;
506 }
507
508 int ipu_mmu_hw_cleanup(struct ipu_mmu *mmu)
509 {
510 unsigned long flags;
511
512 spin_lock_irqsave(&mmu->ready_lock, flags);
513 mmu->ready = false;
514 spin_unlock_irqrestore(&mmu->ready_lock, flags);
515
516 return 0;
517 }
518 EXPORT_SYMBOL(ipu_mmu_hw_cleanup);
519
520 static struct ipu_dma_mapping *alloc_dma_mapping(struct ipu_device *isp)
521 {
522 struct ipu_dma_mapping *dmap;
523
524 dmap = kzalloc(sizeof(*dmap), GFP_KERNEL);
525 if (!dmap)
526 return NULL;
527
528 dmap->mmu_info = ipu_mmu_alloc(isp);
529 if (!dmap->mmu_info) {
530 kfree(dmap);
531 return NULL;
532 }
533 init_iova_domain(&dmap->iovad, SZ_4K, 1);
534 dmap->mmu_info->dmap = dmap;
535
536 kref_init(&dmap->ref);
537
538 pr_debug("alloc mapping\n");
539
540 iova_cache_get();
541
542 return dmap;
543 }
544
545 phys_addr_t ipu_mmu_iova_to_phys(struct ipu_mmu_info *mmu_info,
546 dma_addr_t iova)
547 {
548 u32 *l2_pt = TBL_VIRT_ADDR(mmu_info->pgtbl[iova >> ISP_L1PT_SHIFT]);
549
550 return (phys_addr_t)l2_pt[(iova & ISP_L2PT_MASK) >> ISP_L2PT_SHIFT]
551 << ISP_PAGE_SHIFT;
552 }
553
554 /*
555 * The following four functions are implemented based on iommu.c
556 * drivers/iommu/iommu.c/iommu_pgsize().
557 */
558 static size_t ipu_mmu_pgsize(unsigned long pgsize_bitmap,
559 unsigned long addr_merge, size_t size)
560 {
561 unsigned int pgsize_idx;
562 size_t pgsize;
563
564 /* Max page size that still fits into 'size' */
565 pgsize_idx = __fls(size);
566
567 /* need to consider alignment requirements ? */
568 if (likely(addr_merge)) {
569 /* Max page size allowed by address */
570 unsigned int align_pgsize_idx = __ffs(addr_merge);
571
572 pgsize_idx = min(pgsize_idx, align_pgsize_idx);
573 }
574
575 /* build a mask of acceptable page sizes */
576 pgsize = (1UL << (pgsize_idx + 1)) - 1;
577
578 /* throw away page sizes not supported by the hardware */
579 pgsize &= pgsize_bitmap;
580
581 /* make sure we're still sane */
582 WARN_ON(!pgsize);
583
584 /* pick the biggest page */
585 pgsize_idx = __fls(pgsize);
586 pgsize = 1UL << pgsize_idx;
587
588 return pgsize;
589 }
590
591 /* drivers/iommu/iommu.c/iommu_unmap() */
592 size_t ipu_mmu_unmap(struct ipu_mmu_info *mmu_info, unsigned long iova,
593 size_t size)
594 {
595 size_t unmapped_page, unmapped = 0;
596 unsigned int min_pagesz;
597
598 /* find out the minimum page size supported */
599 min_pagesz = 1 << __ffs(mmu_info->pgsize_bitmap);
600
601 /*
602 * The virtual address, as well as the size of the mapping, must be
603 * aligned (at least) to the size of the smallest page supported
604 * by the hardware
605 */
606 if (!IS_ALIGNED(iova | size, min_pagesz)) {
607 dev_err(NULL, "unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n",
608 iova, size, min_pagesz);
609 return -EINVAL;
610 }
611
612 /*
613 * Keep iterating until we either unmap 'size' bytes (or more)
614 * or we hit an area that isn't mapped.
615 */
616 while (unmapped < size) {
617 size_t pgsize = ipu_mmu_pgsize(mmu_info->pgsize_bitmap,
618 iova, size - unmapped);
619
620 unmapped_page = __ipu_mmu_unmap(mmu_info, iova, pgsize);
621 if (!unmapped_page)
622 break;
623
624 dev_dbg(NULL, "unmapped: iova 0x%lx size 0x%zx\n",
625 iova, unmapped_page);
626
627 iova += unmapped_page;
628 unmapped += unmapped_page;
629 }
630
631 return unmapped;
632 }
633
634 /* drivers/iommu/iommu.c/iommu_map() */
635 int ipu_mmu_map(struct ipu_mmu_info *mmu_info, unsigned long iova,
636 phys_addr_t paddr, size_t size)
637 {
638 unsigned long orig_iova = iova;
639 unsigned int min_pagesz;
640 size_t orig_size = size;
641 int ret = 0;
642
643 if (mmu_info->pgsize_bitmap == 0UL)
644 return -ENODEV;
645
646 /* find out the minimum page size supported */
647 min_pagesz = 1 << __ffs(mmu_info->pgsize_bitmap);
648
649 /*
650 * both the virtual address and the physical one, as well as
651 * the size of the mapping, must be aligned (at least) to the
652 * size of the smallest page supported by the hardware
653 */
654 if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
655 pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n",
656 iova, &paddr, size, min_pagesz);
657 return -EINVAL;
658 }
659
660 pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size);
661
662 while (size) {
663 size_t pgsize = ipu_mmu_pgsize(mmu_info->pgsize_bitmap,
664 iova | paddr, size);
665
666 pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx\n",
667 iova, &paddr, pgsize);
668
669 ret = __ipu_mmu_map(mmu_info, iova, paddr, pgsize);
670 if (ret)
671 break;
672
673 iova += pgsize;
674 paddr += pgsize;
675 size -= pgsize;
676 }
677
678 /* unroll mapping in case something went wrong */
679 if (ret)
680 ipu_mmu_unmap(mmu_info, orig_iova, orig_size - size);
681
682 return ret;
683 }
684
685 static void ipu_mmu_destroy(struct ipu_mmu *mmu)
686 {
687 struct ipu_dma_mapping *dmap = mmu->dmap;
688 struct ipu_mmu_info *mmu_info = dmap->mmu_info;
689 struct iova *iova;
690 u32 l1_idx;
691
692 if (mmu->iova_addr_trash) {
693 iova = find_iova(&dmap->iovad,
694 mmu->iova_addr_trash >> PAGE_SHIFT);
695 if (iova) {
696 /* unmap and free the trash buffer iova */
697 ipu_mmu_unmap(mmu_info, iova->pfn_lo << PAGE_SHIFT,
698 (iova->pfn_hi - iova->pfn_lo + 1) <<
699 PAGE_SHIFT);
700 __free_iova(&dmap->iovad, iova);
701 } else {
702 dev_err(mmu->dev, "trash buffer iova not found.\n");
703 }
704
705 mmu->iova_addr_trash = 0;
706 }
707
708 if (mmu->trash_page)
709 __free_page(mmu->trash_page);
710
711 for (l1_idx = 0; l1_idx < ISP_L1PT_PTES; l1_idx++)
712 if (mmu_info->pgtbl[l1_idx] != mmu_info->dummy_l2_tbl)
713 free_page((unsigned long)
714 TBL_VIRT_ADDR(mmu_info->pgtbl[l1_idx]));
715
716 free_page((unsigned long)TBL_VIRT_ADDR(mmu_info->dummy_page));
717 free_page((unsigned long)TBL_VIRT_ADDR(mmu_info->dummy_l2_tbl));
718 free_page((unsigned long)mmu_info->pgtbl);
719 kfree(mmu_info);
720 }
721
722 struct ipu_mmu *ipu_mmu_init(struct device *dev,
723 void __iomem *base, int mmid,
724 const struct ipu_hw_variants *hw)
725 {
726 struct ipu_mmu *mmu;
727 struct ipu_mmu_pdata *pdata;
728 struct ipu_device *isp = pci_get_drvdata(to_pci_dev(dev));
729 unsigned int i;
730
731 if (hw->nr_mmus > IPU_MMU_MAX_DEVICES)
732 return ERR_PTR(-EINVAL);
733
734 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
735 if (!pdata)
736 return ERR_PTR(-ENOMEM);
737
738 for (i = 0; i < hw->nr_mmus; i++) {
739 struct ipu_mmu_hw *pdata_mmu = &pdata->mmu_hw[i];
740 const struct ipu_mmu_hw *src_mmu = &hw->mmu_hw[i];
741
742 if (src_mmu->nr_l1streams > IPU_MMU_MAX_TLB_L1_STREAMS ||
743 src_mmu->nr_l2streams > IPU_MMU_MAX_TLB_L2_STREAMS)
744 return ERR_PTR(-EINVAL);
745
746 *pdata_mmu = *src_mmu;
747 pdata_mmu->base = base + src_mmu->offset;
748 }
749
750 mmu = devm_kzalloc(dev, sizeof(*mmu), GFP_KERNEL);
751 if (!mmu)
752 return ERR_PTR(-ENOMEM);
753
754 mmu->mmid = mmid;
755 mmu->mmu_hw = pdata->mmu_hw;
756 mmu->nr_mmus = hw->nr_mmus;
757 mmu->tlb_invalidate = tlb_invalidate;
758 mmu->ready = false;
759 INIT_LIST_HEAD(&mmu->vma_list);
760 spin_lock_init(&mmu->ready_lock);
761
762 mmu->dmap = alloc_dma_mapping(isp);
763 if (!mmu->dmap) {
764 dev_err(dev, "can't alloc dma mapping\n");
765 return ERR_PTR(-ENOMEM);
766 }
767
768 return mmu;
769 }
770 EXPORT_SYMBOL(ipu_mmu_init);
771
772 void ipu_mmu_cleanup(struct ipu_mmu *mmu)
773 {
774 struct ipu_dma_mapping *dmap = mmu->dmap;
775
776 ipu_mmu_destroy(mmu);
777 mmu->dmap = NULL;
778 iova_cache_put();
779 put_iova_domain(&dmap->iovad);
780 kfree(dmap);
781 }
782 EXPORT_SYMBOL(ipu_mmu_cleanup);
783
784 MODULE_AUTHOR("Sakari Ailus <sakari.ailus@linux.intel.com>");
785 MODULE_AUTHOR("Samu Onkalo <samu.onkalo@intel.com>");
786 MODULE_LICENSE("GPL");
787 MODULE_DESCRIPTION("Intel ipu mmu driver");