1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2013 - 2020 Intel Corporation
4 #include <asm/cacheflush.h>
6 #include <linux/device.h>
7 #include <linux/iova.h>
8 #include <linux/module.h>
9 #include <linux/sizes.h>
12 #include "ipu-platform.h"
15 #include "ipu-platform-regs.h"
17 #define ISP_PAGE_SHIFT 12
18 #define ISP_PAGE_SIZE BIT(ISP_PAGE_SHIFT)
19 #define ISP_PAGE_MASK (~(ISP_PAGE_SIZE - 1))
21 #define ISP_L1PT_SHIFT 22
22 #define ISP_L1PT_MASK (~((1U << ISP_L1PT_SHIFT) - 1))
24 #define ISP_L2PT_SHIFT 12
25 #define ISP_L2PT_MASK (~(ISP_L1PT_MASK | (~(ISP_PAGE_MASK))))
27 #define ISP_L1PT_PTES 1024
28 #define ISP_L2PT_PTES 1024
30 #define ISP_PADDR_SHIFT 12
32 #define REG_TLB_INVALIDATE 0x0000
34 #define REG_L1_PHYS 0x0004 /* 27-bit pfn */
35 #define REG_INFO 0x0008
37 /* The range of stream ID i in L1 cache is from 0 to 15 */
38 #define MMUV2_REG_L1_STREAMID(i) (0x0c + ((i) * 4))
40 /* The range of stream ID i in L2 cache is from 0 to 15 */
41 #define MMUV2_REG_L2_STREAMID(i) (0x4c + ((i) * 4))
43 /* ZLW Enable for each stream in L1 MMU AT where i : 0..15 */
44 #define MMUV2_AT_REG_L1_ZLW_EN_SID(i) (0x100 + ((i) * 0x20))
46 /* ZLW 1D mode Enable for each stream in L1 MMU AT where i : 0..15 */
47 #define MMUV2_AT_REG_L1_ZLW_1DMODE_SID(i) (0x100 + ((i) * 0x20) + 0x0004)
49 /* Set ZLW insertion N pages ahead per stream 1D where i : 0..15 */
50 #define MMUV2_AT_REG_L1_ZLW_INS_N_AHEAD_SID(i) (0x100 + ((i) * 0x20) + 0x0008)
52 /* ZLW 2D mode Enable for each stream in L1 MMU AT where i : 0..15 */
53 #define MMUV2_AT_REG_L1_ZLW_2DMODE_SID(i) (0x100 + ((i) * 0x20) + 0x0010)
55 /* ZLW Insertion for each stream in L1 MMU AT where i : 0..15 */
56 #define MMUV2_AT_REG_L1_ZLW_INSERTION(i) (0x100 + ((i) * 0x20) + 0x000c)
58 #define MMUV2_AT_REG_L1_FW_ZLW_FIFO (0x100 + \
59 (IPU_MMU_MAX_TLB_L1_STREAMS * 0x20) + 0x003c)
61 /* FW ZLW has prioty - needed for ZLW invalidations */
62 #define MMUV2_AT_REG_L1_FW_ZLW_PRIO (0x100 + \
63 (IPU_MMU_MAX_TLB_L1_STREAMS * 0x20))
65 #define TBL_PHYS_ADDR(a) ((phys_addr_t)(a) << ISP_PADDR_SHIFT)
66 #define TBL_VIRT_ADDR(a) phys_to_virt(TBL_PHYS_ADDR(a))
68 static void zlw_invalidate(struct ipu_mmu
*mmu
, struct ipu_mmu_hw
*mmu_hw
)
70 unsigned int retry
= 0;
74 for (i
= 0; i
< mmu_hw
->nr_l1streams
; i
++) {
75 /* We need to invalidate only the zlw enabled stream IDs */
76 if (mmu_hw
->l1_zlw_en
[i
]) {
78 * Maximum 16 blocks per L1 stream
79 * Write trash buffer iova offset to the FW_ZLW
80 * register. This will trigger pre-fetching of next 16
81 * pages from the page table. So we need to increment
82 * iova address by 16 * 4K to trigger the next 16 pages.
83 * Once this loop is completed, the L1 cache will be
84 * filled with trash buffer translation.
86 * TODO: Instead of maximum 16 blocks, use the allocated
89 for (j
= 0; j
< mmu_hw
->l1_block_sz
[i
]; j
++)
90 writel(mmu
->iova_addr_trash
+
91 j
* MMUV2_TRASH_L1_BLOCK_OFFSET
,
93 MMUV2_AT_REG_L1_ZLW_INSERTION(i
));
96 * Now we need to fill the L2 cache entry. L2 cache
97 * entries will be automatically updated, based on the
98 * L1 entry. The above loop for L1 will update only one
99 * of the two entries in L2 as the L1 is under 4MB
100 * range. To force the other entry in L2 to update, we
101 * just need to trigger another pre-fetch which is
102 * outside the above 4MB range.
104 writel(mmu
->iova_addr_trash
+
105 MMUV2_TRASH_L2_BLOCK_OFFSET
,
107 MMUV2_AT_REG_L1_ZLW_INSERTION(0));
112 * Wait until AT is ready. FIFO read should return 2 when AT is ready.
113 * Retry value of 1000 is just by guess work to avoid the forever loop.
117 dev_err(mmu
->dev
, "zlw invalidation failed\n");
120 ret
= readl(mmu_hw
->base
+ MMUV2_AT_REG_L1_FW_ZLW_FIFO
);
125 static void tlb_invalidate(struct ipu_mmu
*mmu
)
130 spin_lock_irqsave(&mmu
->ready_lock
, flags
);
132 spin_unlock_irqrestore(&mmu
->ready_lock
, flags
);
136 for (i
= 0; i
< mmu
->nr_mmus
; i
++) {
138 * To avoid the HW bug induced dead lock in some of the IPU
139 * MMUs on successive invalidate calls, we need to first do a
140 * read to the page table base before writing the invalidate
141 * register. MMUs which need to implement this WA, will have
142 * the insert_read_before_invalidate flasg set as true.
143 * Disregard the return value of the read.
145 if (mmu
->mmu_hw
[i
].insert_read_before_invalidate
)
146 readl(mmu
->mmu_hw
[i
].base
+ REG_L1_PHYS
);
148 /* Normal invalidate or zlw invalidate */
149 if (mmu
->mmu_hw
[i
].zlw_invalidate
) {
150 /* trash buffer must be mapped by now, just in case! */
151 WARN_ON(!mmu
->iova_addr_trash
);
153 zlw_invalidate(mmu
, &mmu
->mmu_hw
[i
]);
155 writel(0xffffffff, mmu
->mmu_hw
[i
].base
+
159 spin_unlock_irqrestore(&mmu
->ready_lock
, flags
);
163 static void page_table_dump(struct ipu_mmu_info
*mmu_info
)
167 pr_debug("begin IOMMU page table dump\n");
169 for (l1_idx
= 0; l1_idx
< ISP_L1PT_PTES
; l1_idx
++) {
171 u32 iova
= (phys_addr_t
)l1_idx
<< ISP_L1PT_SHIFT
;
173 if (mmu_info
->pgtbl
[l1_idx
] == mmu_info
->dummy_l2_tbl
)
175 pr_debug("l1 entry %u; iovas 0x%8.8x--0x%8.8x, at %p\n",
176 l1_idx
, iova
, iova
+ ISP_PAGE_SIZE
,
177 (void *)TBL_PHYS_ADDR(mmu_info
->pgtbl
[l1_idx
]));
179 for (l2_idx
= 0; l2_idx
< ISP_L2PT_PTES
; l2_idx
++) {
180 u32
*l2_pt
= TBL_VIRT_ADDR(mmu_info
->pgtbl
[l1_idx
]);
181 u32 iova2
= iova
+ (l2_idx
<< ISP_L2PT_SHIFT
);
183 if (l2_pt
[l2_idx
] == mmu_info
->dummy_page
)
186 pr_debug("\tl2 entry %u; iova 0x%8.8x, phys %p\n",
188 (void *)TBL_PHYS_ADDR(l2_pt
[l2_idx
]));
192 pr_debug("end IOMMU page table dump\n");
196 static u32
*alloc_page_table(struct ipu_mmu_info
*mmu_info
, bool l1
)
198 u32
*pt
= (u32
*)get_zeroed_page(GFP_ATOMIC
| GFP_DMA32
);
204 pr_debug("get_zeroed_page() == %p\n", pt
);
206 for (i
= 0; i
< ISP_L1PT_PTES
; i
++)
207 pt
[i
] = l1
? mmu_info
->dummy_l2_tbl
: mmu_info
->dummy_page
;
212 static int l2_map(struct ipu_mmu_info
*mmu_info
, unsigned long iova
,
213 phys_addr_t paddr
, size_t size
)
215 u32 l1_idx
= iova
>> ISP_L1PT_SHIFT
;
216 u32 l1_entry
= mmu_info
->pgtbl
[l1_idx
];
218 u32 iova_start
= iova
;
222 pr_debug("mapping l2 page table for l1 index %u (iova %8.8x)\n",
225 spin_lock_irqsave(&mmu_info
->lock
, flags
);
226 if (l1_entry
== mmu_info
->dummy_l2_tbl
) {
227 u32
*l2_virt
= alloc_page_table(mmu_info
, false);
230 spin_unlock_irqrestore(&mmu_info
->lock
, flags
);
234 l1_entry
= virt_to_phys(l2_virt
) >> ISP_PADDR_SHIFT
;
235 pr_debug("allocated page for l1_idx %u\n", l1_idx
);
237 if (mmu_info
->pgtbl
[l1_idx
] == mmu_info
->dummy_l2_tbl
) {
238 mmu_info
->pgtbl
[l1_idx
] = l1_entry
;
240 clflush_cache_range(&mmu_info
->pgtbl
[l1_idx
],
241 sizeof(mmu_info
->pgtbl
[l1_idx
]));
242 #endif /* CONFIG_X86 */
244 free_page((unsigned long)TBL_VIRT_ADDR(l1_entry
));
248 l2_pt
= TBL_VIRT_ADDR(mmu_info
->pgtbl
[l1_idx
]);
250 pr_debug("l2_pt at %p\n", l2_pt
);
252 paddr
= ALIGN(paddr
, ISP_PAGE_SIZE
);
254 l2_idx
= (iova_start
& ISP_L2PT_MASK
) >> ISP_L2PT_SHIFT
;
256 pr_debug("l2_idx %u, phys 0x%8.8x\n", l2_idx
, l2_pt
[l2_idx
]);
257 if (l2_pt
[l2_idx
] != mmu_info
->dummy_page
) {
258 spin_unlock_irqrestore(&mmu_info
->lock
, flags
);
262 l2_pt
[l2_idx
] = paddr
>> ISP_PADDR_SHIFT
;
265 clflush_cache_range(&l2_pt
[l2_idx
], sizeof(l2_pt
[l2_idx
]));
266 #endif /* CONFIG_X86 */
267 spin_unlock_irqrestore(&mmu_info
->lock
, flags
);
269 pr_debug("l2 index %u mapped as 0x%8.8x\n", l2_idx
, l2_pt
[l2_idx
]);
274 static int __ipu_mmu_map(struct ipu_mmu_info
*mmu_info
, unsigned long iova
,
275 phys_addr_t paddr
, size_t size
)
277 u32 iova_start
= round_down(iova
, ISP_PAGE_SIZE
);
278 u32 iova_end
= ALIGN(iova
+ size
, ISP_PAGE_SIZE
);
281 ("mapping iova 0x%8.8x--0x%8.8x, size %zu at paddr 0x%10.10llx\n",
282 iova_start
, iova_end
, size
, paddr
);
284 return l2_map(mmu_info
, iova_start
, paddr
, size
);
287 static size_t l2_unmap(struct ipu_mmu_info
*mmu_info
, unsigned long iova
,
288 phys_addr_t dummy
, size_t size
)
290 u32 l1_idx
= iova
>> ISP_L1PT_SHIFT
;
291 u32
*l2_pt
= TBL_VIRT_ADDR(mmu_info
->pgtbl
[l1_idx
]);
292 u32 iova_start
= iova
;
296 pr_debug("unmapping l2 page table for l1 index %u (iova 0x%8.8lx)\n",
299 if (mmu_info
->pgtbl
[l1_idx
] == mmu_info
->dummy_l2_tbl
)
302 pr_debug("l2_pt at %p\n", l2_pt
);
304 for (l2_idx
= (iova_start
& ISP_L2PT_MASK
) >> ISP_L2PT_SHIFT
;
305 (iova_start
& ISP_L1PT_MASK
) + (l2_idx
<< ISP_PAGE_SHIFT
)
306 < iova_start
+ size
&& l2_idx
< ISP_L2PT_PTES
; l2_idx
++) {
309 pr_debug("l2 index %u unmapped, was 0x%10.10llx\n",
310 l2_idx
, TBL_PHYS_ADDR(l2_pt
[l2_idx
]));
311 spin_lock_irqsave(&mmu_info
->lock
, flags
);
312 l2_pt
[l2_idx
] = mmu_info
->dummy_page
;
313 spin_unlock_irqrestore(&mmu_info
->lock
, flags
);
315 clflush_cache_range(&l2_pt
[l2_idx
], sizeof(l2_pt
[l2_idx
]));
316 #endif /* CONFIG_X86 */
320 return unmapped
<< ISP_PAGE_SHIFT
;
323 static size_t __ipu_mmu_unmap(struct ipu_mmu_info
*mmu_info
,
324 unsigned long iova
, size_t size
)
326 return l2_unmap(mmu_info
, iova
, 0, size
);
329 static int allocate_trash_buffer(struct ipu_mmu
*mmu
)
331 unsigned int n_pages
= PAGE_ALIGN(IPU_MMUV2_TRASH_RANGE
) >> PAGE_SHIFT
;
337 /* Allocate 8MB in iova range */
338 iova
= alloc_iova(&mmu
->dmap
->iovad
, n_pages
,
339 mmu
->dmap
->mmu_info
->aperture_end
>> PAGE_SHIFT
, 0);
341 dev_err(mmu
->dev
, "cannot allocate iova range for trash\n");
346 * Map the 8MB iova address range to the same physical trash page
347 * mmu->trash_page which is already reserved at the probe
349 iova_addr
= iova
->pfn_lo
;
350 for (i
= 0; i
< n_pages
; i
++) {
351 ret
= ipu_mmu_map(mmu
->dmap
->mmu_info
, iova_addr
<< PAGE_SHIFT
,
352 page_to_phys(mmu
->trash_page
), PAGE_SIZE
);
355 "mapping trash buffer range failed\n");
362 /* save the address for the ZLW invalidation */
363 mmu
->iova_addr_trash
= iova
->pfn_lo
<< PAGE_SHIFT
;
364 dev_dbg(mmu
->dev
, "iova trash buffer for MMUID: %d is %u\n",
365 mmu
->mmid
, (unsigned int)mmu
->iova_addr_trash
);
369 ipu_mmu_unmap(mmu
->dmap
->mmu_info
, iova
->pfn_lo
<< PAGE_SHIFT
,
370 (iova
->pfn_hi
- iova
->pfn_lo
+ 1) << PAGE_SHIFT
);
371 __free_iova(&mmu
->dmap
->iovad
, iova
);
375 int ipu_mmu_hw_init(struct ipu_mmu
*mmu
)
379 struct ipu_mmu_info
*mmu_info
;
381 dev_dbg(mmu
->dev
, "mmu hw init\n");
383 mmu_info
= mmu
->dmap
->mmu_info
;
385 /* Initialise the each MMU HW block */
386 for (i
= 0; i
< mmu
->nr_mmus
; i
++) {
387 struct ipu_mmu_hw
*mmu_hw
= &mmu
->mmu_hw
[i
];
391 /* Write page table address per MMU */
392 writel((phys_addr_t
)virt_to_phys(mmu_info
->pgtbl
)
394 mmu
->mmu_hw
[i
].base
+ REG_L1_PHYS
);
396 /* Set info bits per MMU */
397 writel(mmu
->mmu_hw
[i
].info_bits
,
398 mmu
->mmu_hw
[i
].base
+ REG_INFO
);
400 /* Configure MMU TLB stream configuration for L1 */
401 for (j
= 0, block_addr
= 0; j
< mmu_hw
->nr_l1streams
;
402 block_addr
+= mmu
->mmu_hw
[i
].l1_block_sz
[j
], j
++) {
403 if (block_addr
> IPU_MAX_LI_BLOCK_ADDR
) {
404 dev_err(mmu
->dev
, "invalid L1 configuration\n");
408 /* Write block start address for each streams */
409 writel(block_addr
, mmu_hw
->base
+
410 mmu_hw
->l1_stream_id_reg_offset
+ 4 * j
);
413 /* Configure MMU TLB stream configuration for L2 */
414 for (j
= 0, block_addr
= 0; j
< mmu_hw
->nr_l2streams
;
415 block_addr
+= mmu
->mmu_hw
[i
].l2_block_sz
[j
], j
++) {
416 if (block_addr
> IPU_MAX_L2_BLOCK_ADDR
) {
417 dev_err(mmu
->dev
, "invalid L2 configuration\n");
421 writel(block_addr
, mmu_hw
->base
+
422 mmu_hw
->l2_stream_id_reg_offset
+ 4 * j
);
427 * Allocate 1 page of physical memory for the trash buffer.
429 if (!mmu
->trash_page
) {
430 mmu
->trash_page
= alloc_page(GFP_KERNEL
);
431 if (!mmu
->trash_page
) {
432 dev_err(mmu
->dev
, "insufficient memory for trash buffer\n");
437 /* Allocate trash buffer, if not allocated. Only once per MMU */
438 if (!mmu
->iova_addr_trash
) {
441 ret
= allocate_trash_buffer(mmu
);
443 __free_page(mmu
->trash_page
);
444 mmu
->trash_page
= NULL
;
445 dev_err(mmu
->dev
, "trash buffer allocation failed\n");
450 spin_lock_irqsave(&mmu
->ready_lock
, flags
);
452 spin_unlock_irqrestore(&mmu
->ready_lock
, flags
);
456 EXPORT_SYMBOL(ipu_mmu_hw_init
);
458 static struct ipu_mmu_info
*ipu_mmu_alloc(struct ipu_device
*isp
)
460 struct ipu_mmu_info
*mmu_info
;
463 mmu_info
= kzalloc(sizeof(*mmu_info
), GFP_KERNEL
);
467 mmu_info
->aperture_start
= 0;
468 mmu_info
->aperture_end
= DMA_BIT_MASK(isp
->secure_mode
?
469 IPU_MMU_ADDRESS_BITS
:
470 IPU_MMU_ADDRESS_BITS_NON_SECURE
);
471 mmu_info
->pgsize_bitmap
= SZ_4K
;
473 ptr
= (void *)get_zeroed_page(GFP_KERNEL
| GFP_DMA32
);
477 mmu_info
->dummy_page
= virt_to_phys(ptr
) >> ISP_PAGE_SHIFT
;
479 ptr
= alloc_page_table(mmu_info
, false);
483 mmu_info
->dummy_l2_tbl
= virt_to_phys(ptr
) >> ISP_PAGE_SHIFT
;
486 * We always map the L1 page table (a single page as well as
487 * the L2 page tables).
489 mmu_info
->pgtbl
= alloc_page_table(mmu_info
, true);
490 if (!mmu_info
->pgtbl
)
493 spin_lock_init(&mmu_info
->lock
);
495 pr_debug("domain initialised\n");
500 free_page((unsigned long)TBL_VIRT_ADDR(mmu_info
->dummy_page
));
501 free_page((unsigned long)TBL_VIRT_ADDR(mmu_info
->dummy_l2_tbl
));
508 int ipu_mmu_hw_cleanup(struct ipu_mmu
*mmu
)
512 spin_lock_irqsave(&mmu
->ready_lock
, flags
);
514 spin_unlock_irqrestore(&mmu
->ready_lock
, flags
);
518 EXPORT_SYMBOL(ipu_mmu_hw_cleanup
);
520 static struct ipu_dma_mapping
*alloc_dma_mapping(struct ipu_device
*isp
)
522 struct ipu_dma_mapping
*dmap
;
524 dmap
= kzalloc(sizeof(*dmap
), GFP_KERNEL
);
528 dmap
->mmu_info
= ipu_mmu_alloc(isp
);
529 if (!dmap
->mmu_info
) {
533 init_iova_domain(&dmap
->iovad
, SZ_4K
, 1);
534 dmap
->mmu_info
->dmap
= dmap
;
536 kref_init(&dmap
->ref
);
538 pr_debug("alloc mapping\n");
545 phys_addr_t
ipu_mmu_iova_to_phys(struct ipu_mmu_info
*mmu_info
,
548 u32
*l2_pt
= TBL_VIRT_ADDR(mmu_info
->pgtbl
[iova
>> ISP_L1PT_SHIFT
]);
550 return (phys_addr_t
)l2_pt
[(iova
& ISP_L2PT_MASK
) >> ISP_L2PT_SHIFT
]
555 * The following four functions are implemented based on iommu.c
556 * drivers/iommu/iommu.c/iommu_pgsize().
558 static size_t ipu_mmu_pgsize(unsigned long pgsize_bitmap
,
559 unsigned long addr_merge
, size_t size
)
561 unsigned int pgsize_idx
;
564 /* Max page size that still fits into 'size' */
565 pgsize_idx
= __fls(size
);
567 /* need to consider alignment requirements ? */
568 if (likely(addr_merge
)) {
569 /* Max page size allowed by address */
570 unsigned int align_pgsize_idx
= __ffs(addr_merge
);
572 pgsize_idx
= min(pgsize_idx
, align_pgsize_idx
);
575 /* build a mask of acceptable page sizes */
576 pgsize
= (1UL << (pgsize_idx
+ 1)) - 1;
578 /* throw away page sizes not supported by the hardware */
579 pgsize
&= pgsize_bitmap
;
581 /* make sure we're still sane */
584 /* pick the biggest page */
585 pgsize_idx
= __fls(pgsize
);
586 pgsize
= 1UL << pgsize_idx
;
591 /* drivers/iommu/iommu.c/iommu_unmap() */
592 size_t ipu_mmu_unmap(struct ipu_mmu_info
*mmu_info
, unsigned long iova
,
595 size_t unmapped_page
, unmapped
= 0;
596 unsigned int min_pagesz
;
598 /* find out the minimum page size supported */
599 min_pagesz
= 1 << __ffs(mmu_info
->pgsize_bitmap
);
602 * The virtual address, as well as the size of the mapping, must be
603 * aligned (at least) to the size of the smallest page supported
606 if (!IS_ALIGNED(iova
| size
, min_pagesz
)) {
607 dev_err(NULL
, "unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n",
608 iova
, size
, min_pagesz
);
613 * Keep iterating until we either unmap 'size' bytes (or more)
614 * or we hit an area that isn't mapped.
616 while (unmapped
< size
) {
617 size_t pgsize
= ipu_mmu_pgsize(mmu_info
->pgsize_bitmap
,
618 iova
, size
- unmapped
);
620 unmapped_page
= __ipu_mmu_unmap(mmu_info
, iova
, pgsize
);
624 dev_dbg(NULL
, "unmapped: iova 0x%lx size 0x%zx\n",
625 iova
, unmapped_page
);
627 iova
+= unmapped_page
;
628 unmapped
+= unmapped_page
;
634 /* drivers/iommu/iommu.c/iommu_map() */
635 int ipu_mmu_map(struct ipu_mmu_info
*mmu_info
, unsigned long iova
,
636 phys_addr_t paddr
, size_t size
)
638 unsigned long orig_iova
= iova
;
639 unsigned int min_pagesz
;
640 size_t orig_size
= size
;
643 if (mmu_info
->pgsize_bitmap
== 0UL)
646 /* find out the minimum page size supported */
647 min_pagesz
= 1 << __ffs(mmu_info
->pgsize_bitmap
);
650 * both the virtual address and the physical one, as well as
651 * the size of the mapping, must be aligned (at least) to the
652 * size of the smallest page supported by the hardware
654 if (!IS_ALIGNED(iova
| paddr
| size
, min_pagesz
)) {
655 pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n",
656 iova
, &paddr
, size
, min_pagesz
);
660 pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova
, &paddr
, size
);
663 size_t pgsize
= ipu_mmu_pgsize(mmu_info
->pgsize_bitmap
,
666 pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx\n",
667 iova
, &paddr
, pgsize
);
669 ret
= __ipu_mmu_map(mmu_info
, iova
, paddr
, pgsize
);
678 /* unroll mapping in case something went wrong */
680 ipu_mmu_unmap(mmu_info
, orig_iova
, orig_size
- size
);
685 static void ipu_mmu_destroy(struct ipu_mmu
*mmu
)
687 struct ipu_dma_mapping
*dmap
= mmu
->dmap
;
688 struct ipu_mmu_info
*mmu_info
= dmap
->mmu_info
;
692 if (mmu
->iova_addr_trash
) {
693 iova
= find_iova(&dmap
->iovad
,
694 mmu
->iova_addr_trash
>> PAGE_SHIFT
);
696 /* unmap and free the trash buffer iova */
697 ipu_mmu_unmap(mmu_info
, iova
->pfn_lo
<< PAGE_SHIFT
,
698 (iova
->pfn_hi
- iova
->pfn_lo
+ 1) <<
700 __free_iova(&dmap
->iovad
, iova
);
702 dev_err(mmu
->dev
, "trash buffer iova not found.\n");
705 mmu
->iova_addr_trash
= 0;
709 __free_page(mmu
->trash_page
);
711 for (l1_idx
= 0; l1_idx
< ISP_L1PT_PTES
; l1_idx
++)
712 if (mmu_info
->pgtbl
[l1_idx
] != mmu_info
->dummy_l2_tbl
)
713 free_page((unsigned long)
714 TBL_VIRT_ADDR(mmu_info
->pgtbl
[l1_idx
]));
716 free_page((unsigned long)TBL_VIRT_ADDR(mmu_info
->dummy_page
));
717 free_page((unsigned long)TBL_VIRT_ADDR(mmu_info
->dummy_l2_tbl
));
718 free_page((unsigned long)mmu_info
->pgtbl
);
722 struct ipu_mmu
*ipu_mmu_init(struct device
*dev
,
723 void __iomem
*base
, int mmid
,
724 const struct ipu_hw_variants
*hw
)
727 struct ipu_mmu_pdata
*pdata
;
728 struct ipu_device
*isp
= pci_get_drvdata(to_pci_dev(dev
));
731 if (hw
->nr_mmus
> IPU_MMU_MAX_DEVICES
)
732 return ERR_PTR(-EINVAL
);
734 pdata
= devm_kzalloc(dev
, sizeof(*pdata
), GFP_KERNEL
);
736 return ERR_PTR(-ENOMEM
);
738 for (i
= 0; i
< hw
->nr_mmus
; i
++) {
739 struct ipu_mmu_hw
*pdata_mmu
= &pdata
->mmu_hw
[i
];
740 const struct ipu_mmu_hw
*src_mmu
= &hw
->mmu_hw
[i
];
742 if (src_mmu
->nr_l1streams
> IPU_MMU_MAX_TLB_L1_STREAMS
||
743 src_mmu
->nr_l2streams
> IPU_MMU_MAX_TLB_L2_STREAMS
)
744 return ERR_PTR(-EINVAL
);
746 *pdata_mmu
= *src_mmu
;
747 pdata_mmu
->base
= base
+ src_mmu
->offset
;
750 mmu
= devm_kzalloc(dev
, sizeof(*mmu
), GFP_KERNEL
);
752 return ERR_PTR(-ENOMEM
);
755 mmu
->mmu_hw
= pdata
->mmu_hw
;
756 mmu
->nr_mmus
= hw
->nr_mmus
;
757 mmu
->tlb_invalidate
= tlb_invalidate
;
759 INIT_LIST_HEAD(&mmu
->vma_list
);
760 spin_lock_init(&mmu
->ready_lock
);
762 mmu
->dmap
= alloc_dma_mapping(isp
);
764 dev_err(dev
, "can't alloc dma mapping\n");
765 return ERR_PTR(-ENOMEM
);
770 EXPORT_SYMBOL(ipu_mmu_init
);
772 void ipu_mmu_cleanup(struct ipu_mmu
*mmu
)
774 struct ipu_dma_mapping
*dmap
= mmu
->dmap
;
776 ipu_mmu_destroy(mmu
);
779 put_iova_domain(&dmap
->iovad
);
782 EXPORT_SYMBOL(ipu_mmu_cleanup
);
784 MODULE_AUTHOR("Sakari Ailus <sakari.ailus@linux.intel.com>");
785 MODULE_AUTHOR("Samu Onkalo <samu.onkalo@intel.com>");
786 MODULE_LICENSE("GPL");
787 MODULE_DESCRIPTION("Intel ipu mmu driver");