1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2013 - 2021 Intel Corporation
4 #include <asm/cacheflush.h>
6 #include <linux/device.h>
7 #include <linux/iova.h>
8 #include <linux/module.h>
9 #include <linux/sizes.h>
12 #include "ipu-platform.h"
15 #include "ipu-platform-regs.h"
17 #define ISP_PAGE_SHIFT 12
18 #define ISP_PAGE_SIZE BIT(ISP_PAGE_SHIFT)
19 #define ISP_PAGE_MASK (~(ISP_PAGE_SIZE - 1))
21 #define ISP_L1PT_SHIFT 22
22 #define ISP_L1PT_MASK (~((1U << ISP_L1PT_SHIFT) - 1))
24 #define ISP_L2PT_SHIFT 12
25 #define ISP_L2PT_MASK (~(ISP_L1PT_MASK | (~(ISP_PAGE_MASK))))
27 #define ISP_L1PT_PTES 1024
28 #define ISP_L2PT_PTES 1024
30 #define ISP_PADDR_SHIFT 12
32 #define REG_TLB_INVALIDATE 0x0000
34 #define REG_L1_PHYS 0x0004 /* 27-bit pfn */
35 #define REG_INFO 0x0008
37 /* The range of stream ID i in L1 cache is from 0 to 15 */
38 #define MMUV2_REG_L1_STREAMID(i) (0x0c + ((i) * 4))
40 /* The range of stream ID i in L2 cache is from 0 to 15 */
41 #define MMUV2_REG_L2_STREAMID(i) (0x4c + ((i) * 4))
43 #define TBL_PHYS_ADDR(a) ((phys_addr_t)(a) << ISP_PADDR_SHIFT)
45 static void tlb_invalidate(struct ipu_mmu
*mmu
)
50 spin_lock_irqsave(&mmu
->ready_lock
, flags
);
52 spin_unlock_irqrestore(&mmu
->ready_lock
, flags
);
56 for (i
= 0; i
< mmu
->nr_mmus
; i
++) {
58 * To avoid the HW bug induced dead lock in some of the IPU
59 * MMUs on successive invalidate calls, we need to first do a
60 * read to the page table base before writing the invalidate
61 * register. MMUs which need to implement this WA, will have
62 * the insert_read_before_invalidate flags set as true.
63 * Disregard the return value of the read.
65 if (mmu
->mmu_hw
[i
].insert_read_before_invalidate
)
66 readl(mmu
->mmu_hw
[i
].base
+ REG_L1_PHYS
);
68 writel(0xffffffff, mmu
->mmu_hw
[i
].base
+
71 * The TLB invalidation is a "single cycle" (IOMMU clock cycles)
72 * When the actual MMIO write reaches the IPU TLB Invalidate
73 * register, wmb() will force the TLB invalidate out if the CPU
74 * attempts to update the IOMMU page table (or sooner).
78 spin_unlock_irqrestore(&mmu
->ready_lock
, flags
);
82 static void page_table_dump(struct ipu_mmu_info
*mmu_info
)
86 dev_dbg(mmu_info
->dev
, "begin IOMMU page table dump\n");
88 for (l1_idx
= 0; l1_idx
< ISP_L1PT_PTES
; l1_idx
++) {
90 u32 iova
= (phys_addr_t
)l1_idx
<< ISP_L1PT_SHIFT
;
92 if (mmu_info
->l1_pt
[l1_idx
] == mmu_info
->dummy_l2_pteval
)
94 dev_dbg(mmu_info
->dev
,
95 "l1 entry %u; iovas 0x%8.8x-0x%8.8x, at %p\n",
96 l1_idx
, iova
, iova
+ ISP_PAGE_SIZE
,
97 (void *)TBL_PHYS_ADDR(mmu_info
->l1_pt
[l1_idx
]));
99 for (l2_idx
= 0; l2_idx
< ISP_L2PT_PTES
; l2_idx
++) {
100 u32
*l2_pt
= mmu_info
->l2_pts
[l1_idx
];
101 u32 iova2
= iova
+ (l2_idx
<< ISP_L2PT_SHIFT
);
103 if (l2_pt
[l2_idx
] == mmu_info
->dummy_page_pteval
)
106 dev_dbg(mmu_info
->dev
,
107 "\tl2 entry %u; iova 0x%8.8x, phys %p\n",
109 (void *)TBL_PHYS_ADDR(l2_pt
[l2_idx
]));
113 dev_dbg(mmu_info
->dev
, "end IOMMU page table dump\n");
117 static dma_addr_t
map_single(struct ipu_mmu_info
*mmu_info
, void *ptr
)
121 dma
= dma_map_single(mmu_info
->dev
, ptr
, PAGE_SIZE
, DMA_BIDIRECTIONAL
);
122 if (dma_mapping_error(mmu_info
->dev
, dma
))
128 static int get_dummy_page(struct ipu_mmu_info
*mmu_info
)
131 void *pt
= (void *)get_zeroed_page(GFP_ATOMIC
| GFP_DMA32
);
136 dev_dbg(mmu_info
->dev
, "%s get_zeroed_page() == %p\n", __func__
, pt
);
138 dma
= map_single(mmu_info
, pt
);
140 dev_err(mmu_info
->dev
, "Failed to map dummy page\n");
144 mmu_info
->dummy_page
= pt
;
145 mmu_info
->dummy_page_pteval
= dma
>> ISP_PAGE_SHIFT
;
150 free_page((unsigned long)pt
);
154 static void free_dummy_page(struct ipu_mmu_info
*mmu_info
)
156 dma_unmap_single(mmu_info
->dev
,
157 TBL_PHYS_ADDR(mmu_info
->dummy_page_pteval
),
158 PAGE_SIZE
, DMA_BIDIRECTIONAL
);
159 free_page((unsigned long)mmu_info
->dummy_page
);
162 static int alloc_dummy_l2_pt(struct ipu_mmu_info
*mmu_info
)
165 u32
*pt
= (u32
*)get_zeroed_page(GFP_ATOMIC
| GFP_DMA32
);
171 dev_dbg(mmu_info
->dev
, "%s get_zeroed_page() == %p\n", __func__
, pt
);
173 dma
= map_single(mmu_info
, pt
);
175 dev_err(mmu_info
->dev
, "Failed to map l2pt page\n");
179 for (i
= 0; i
< ISP_L2PT_PTES
; i
++)
180 pt
[i
] = mmu_info
->dummy_page_pteval
;
182 mmu_info
->dummy_l2_pt
= pt
;
183 mmu_info
->dummy_l2_pteval
= dma
>> ISP_PAGE_SHIFT
;
188 free_page((unsigned long)pt
);
192 static void free_dummy_l2_pt(struct ipu_mmu_info
*mmu_info
)
194 dma_unmap_single(mmu_info
->dev
,
195 TBL_PHYS_ADDR(mmu_info
->dummy_l2_pteval
),
196 PAGE_SIZE
, DMA_BIDIRECTIONAL
);
197 free_page((unsigned long)mmu_info
->dummy_l2_pt
);
200 static u32
*alloc_l1_pt(struct ipu_mmu_info
*mmu_info
)
203 u32
*pt
= (u32
*)get_zeroed_page(GFP_ATOMIC
| GFP_DMA32
);
209 dev_dbg(mmu_info
->dev
, "%s get_zeroed_page() == %p\n", __func__
, pt
);
211 for (i
= 0; i
< ISP_L1PT_PTES
; i
++)
212 pt
[i
] = mmu_info
->dummy_l2_pteval
;
214 dma
= map_single(mmu_info
, pt
);
216 dev_err(mmu_info
->dev
, "Failed to map l1pt page\n");
220 mmu_info
->l1_pt_dma
= dma
>> ISP_PADDR_SHIFT
;
221 dev_dbg(mmu_info
->dev
, "l1 pt %p mapped at %llx\n", pt
, dma
);
226 free_page((unsigned long)pt
);
230 static u32
*alloc_l2_pt(struct ipu_mmu_info
*mmu_info
)
232 u32
*pt
= (u32
*)get_zeroed_page(GFP_ATOMIC
| GFP_DMA32
);
238 dev_dbg(mmu_info
->dev
, "%s get_zeroed_page() == %p\n", __func__
, pt
);
240 for (i
= 0; i
< ISP_L1PT_PTES
; i
++)
241 pt
[i
] = mmu_info
->dummy_page_pteval
;
246 static int l2_map(struct ipu_mmu_info
*mmu_info
, unsigned long iova
,
247 phys_addr_t paddr
, size_t size
)
249 u32 l1_idx
= iova
>> ISP_L1PT_SHIFT
;
251 u32
*l2_pt
, *l2_virt
;
252 u32 iova_start
= iova
;
257 dev_dbg(mmu_info
->dev
,
258 "mapping l2 page table for l1 index %u (iova %8.8x)\n",
261 spin_lock_irqsave(&mmu_info
->lock
, flags
);
262 l1_entry
= mmu_info
->l1_pt
[l1_idx
];
263 if (l1_entry
== mmu_info
->dummy_l2_pteval
) {
264 l2_virt
= mmu_info
->l2_pts
[l1_idx
];
265 if (likely(!l2_virt
)) {
266 l2_virt
= alloc_l2_pt(mmu_info
);
268 spin_unlock_irqrestore(&mmu_info
->lock
, flags
);
273 dma
= map_single(mmu_info
, l2_virt
);
275 dev_err(mmu_info
->dev
, "Failed to map l2pt page\n");
276 free_page((unsigned long)l2_virt
);
277 spin_unlock_irqrestore(&mmu_info
->lock
, flags
);
281 l1_entry
= dma
>> ISP_PADDR_SHIFT
;
283 dev_dbg(mmu_info
->dev
, "page for l1_idx %u %p allocated\n",
285 mmu_info
->l1_pt
[l1_idx
] = l1_entry
;
286 mmu_info
->l2_pts
[l1_idx
] = l2_virt
;
287 clflush_cache_range(&mmu_info
->l1_pt
[l1_idx
],
288 sizeof(mmu_info
->l1_pt
[l1_idx
]));
291 l2_pt
= mmu_info
->l2_pts
[l1_idx
];
293 dev_dbg(mmu_info
->dev
, "l2_pt at %p with dma 0x%x\n", l2_pt
, l1_entry
);
295 paddr
= ALIGN(paddr
, ISP_PAGE_SIZE
);
297 l2_idx
= (iova_start
& ISP_L2PT_MASK
) >> ISP_L2PT_SHIFT
;
299 dev_dbg(mmu_info
->dev
, "l2_idx %u, phys 0x%8.8x\n", l2_idx
,
301 if (l2_pt
[l2_idx
] != mmu_info
->dummy_page_pteval
) {
302 spin_unlock_irqrestore(&mmu_info
->lock
, flags
);
306 l2_pt
[l2_idx
] = paddr
>> ISP_PADDR_SHIFT
;
308 clflush_cache_range(&l2_pt
[l2_idx
], sizeof(l2_pt
[l2_idx
]));
309 spin_unlock_irqrestore(&mmu_info
->lock
, flags
);
311 dev_dbg(mmu_info
->dev
, "l2 index %u mapped as 0x%8.8x\n", l2_idx
,
317 static int __ipu_mmu_map(struct ipu_mmu_info
*mmu_info
, unsigned long iova
,
318 phys_addr_t paddr
, size_t size
)
320 u32 iova_start
= round_down(iova
, ISP_PAGE_SIZE
);
321 u32 iova_end
= ALIGN(iova
+ size
, ISP_PAGE_SIZE
);
323 dev_dbg(mmu_info
->dev
,
324 "mapping iova 0x%8.8x--0x%8.8x, size %zu at paddr 0x%10.10llx\n",
325 iova_start
, iova_end
, size
, paddr
);
327 return l2_map(mmu_info
, iova_start
, paddr
, size
);
330 static size_t l2_unmap(struct ipu_mmu_info
*mmu_info
, unsigned long iova
,
331 phys_addr_t dummy
, size_t size
)
333 u32 l1_idx
= iova
>> ISP_L1PT_SHIFT
;
335 u32 iova_start
= iova
;
340 dev_dbg(mmu_info
->dev
, "unmapping l2 page table for l1 index %u (iova 0x%8.8lx)\n",
343 spin_lock_irqsave(&mmu_info
->lock
, flags
);
344 if (mmu_info
->l1_pt
[l1_idx
] == mmu_info
->dummy_l2_pteval
) {
345 spin_unlock_irqrestore(&mmu_info
->lock
, flags
);
346 dev_err(mmu_info
->dev
,
347 "unmap iova 0x%8.8lx l1 idx %u which was not mapped\n",
352 for (l2_idx
= (iova_start
& ISP_L2PT_MASK
) >> ISP_L2PT_SHIFT
;
353 (iova_start
& ISP_L1PT_MASK
) + (l2_idx
<< ISP_PAGE_SHIFT
)
354 < iova_start
+ size
&& l2_idx
< ISP_L2PT_PTES
; l2_idx
++) {
355 l2_pt
= mmu_info
->l2_pts
[l1_idx
];
356 dev_dbg(mmu_info
->dev
,
357 "unmap l2 index %u with pteval 0x%10.10llx\n",
358 l2_idx
, TBL_PHYS_ADDR(l2_pt
[l2_idx
]));
359 l2_pt
[l2_idx
] = mmu_info
->dummy_page_pteval
;
361 clflush_cache_range(&l2_pt
[l2_idx
], sizeof(l2_pt
[l2_idx
]));
364 spin_unlock_irqrestore(&mmu_info
->lock
, flags
);
366 return unmapped
<< ISP_PAGE_SHIFT
;
369 static size_t __ipu_mmu_unmap(struct ipu_mmu_info
*mmu_info
,
370 unsigned long iova
, size_t size
)
372 return l2_unmap(mmu_info
, iova
, 0, size
);
375 static int allocate_trash_buffer(struct ipu_mmu
*mmu
)
377 unsigned int n_pages
= PAGE_ALIGN(IPU_MMUV2_TRASH_RANGE
) >> PAGE_SHIFT
;
384 /* Allocate 8MB in iova range */
385 iova
= alloc_iova(&mmu
->dmap
->iovad
, n_pages
,
386 mmu
->dmap
->mmu_info
->aperture_end
>> PAGE_SHIFT
, 0);
388 dev_err(mmu
->dev
, "cannot allocate iova range for trash\n");
392 dma
= dma_map_page(mmu
->dmap
->mmu_info
->dev
, mmu
->trash_page
, 0,
393 PAGE_SIZE
, DMA_BIDIRECTIONAL
);
394 if (dma_mapping_error(mmu
->dmap
->mmu_info
->dev
, dma
)) {
395 dev_err(mmu
->dmap
->mmu_info
->dev
, "Failed to map trash page\n");
400 mmu
->pci_trash_page
= dma
;
403 * Map the 8MB iova address range to the same physical trash page
404 * mmu->trash_page which is already reserved at the probe
406 iova_addr
= iova
->pfn_lo
;
407 for (i
= 0; i
< n_pages
; i
++) {
408 ret
= ipu_mmu_map(mmu
->dmap
->mmu_info
, iova_addr
<< PAGE_SHIFT
,
409 mmu
->pci_trash_page
, PAGE_SIZE
);
412 "mapping trash buffer range failed\n");
419 mmu
->iova_trash_page
= iova
->pfn_lo
<< PAGE_SHIFT
;
420 dev_dbg(mmu
->dev
, "iova trash buffer for MMUID: %d is %u\n",
421 mmu
->mmid
, (unsigned int)mmu
->iova_trash_page
);
425 ipu_mmu_unmap(mmu
->dmap
->mmu_info
, iova
->pfn_lo
<< PAGE_SHIFT
,
426 (iova
->pfn_hi
- iova
->pfn_lo
+ 1) << PAGE_SHIFT
);
427 dma_unmap_page(mmu
->dmap
->mmu_info
->dev
, mmu
->pci_trash_page
,
428 PAGE_SIZE
, DMA_BIDIRECTIONAL
);
430 __free_iova(&mmu
->dmap
->iovad
, iova
);
434 int ipu_mmu_hw_init(struct ipu_mmu
*mmu
)
438 struct ipu_mmu_info
*mmu_info
;
440 dev_dbg(mmu
->dev
, "mmu hw init\n");
442 mmu_info
= mmu
->dmap
->mmu_info
;
444 /* Initialise the each MMU HW block */
445 for (i
= 0; i
< mmu
->nr_mmus
; i
++) {
446 struct ipu_mmu_hw
*mmu_hw
= &mmu
->mmu_hw
[i
];
450 /* Write page table address per MMU */
451 writel((phys_addr_t
)mmu_info
->l1_pt_dma
,
452 mmu
->mmu_hw
[i
].base
+ REG_L1_PHYS
);
454 /* Set info bits per MMU */
455 writel(mmu
->mmu_hw
[i
].info_bits
,
456 mmu
->mmu_hw
[i
].base
+ REG_INFO
);
458 /* Configure MMU TLB stream configuration for L1 */
459 for (j
= 0, block_addr
= 0; j
< mmu_hw
->nr_l1streams
;
460 block_addr
+= mmu
->mmu_hw
[i
].l1_block_sz
[j
], j
++) {
461 if (block_addr
> IPU_MAX_LI_BLOCK_ADDR
) {
462 dev_err(mmu
->dev
, "invalid L1 configuration\n");
466 /* Write block start address for each streams */
467 writel(block_addr
, mmu_hw
->base
+
468 mmu_hw
->l1_stream_id_reg_offset
+ 4 * j
);
471 /* Configure MMU TLB stream configuration for L2 */
472 for (j
= 0, block_addr
= 0; j
< mmu_hw
->nr_l2streams
;
473 block_addr
+= mmu
->mmu_hw
[i
].l2_block_sz
[j
], j
++) {
474 if (block_addr
> IPU_MAX_L2_BLOCK_ADDR
) {
475 dev_err(mmu
->dev
, "invalid L2 configuration\n");
479 writel(block_addr
, mmu_hw
->base
+
480 mmu_hw
->l2_stream_id_reg_offset
+ 4 * j
);
484 if (!mmu
->trash_page
) {
487 mmu
->trash_page
= alloc_page(GFP_KERNEL
);
488 if (!mmu
->trash_page
) {
489 dev_err(mmu
->dev
, "insufficient memory for trash buffer\n");
493 ret
= allocate_trash_buffer(mmu
);
495 __free_page(mmu
->trash_page
);
496 mmu
->trash_page
= NULL
;
497 dev_err(mmu
->dev
, "trash buffer allocation failed\n");
502 spin_lock_irqsave(&mmu
->ready_lock
, flags
);
504 spin_unlock_irqrestore(&mmu
->ready_lock
, flags
);
508 EXPORT_SYMBOL(ipu_mmu_hw_init
);
510 static struct ipu_mmu_info
*ipu_mmu_alloc(struct ipu_device
*isp
)
512 struct ipu_mmu_info
*mmu_info
;
515 mmu_info
= kzalloc(sizeof(*mmu_info
), GFP_KERNEL
);
519 mmu_info
->aperture_start
= 0;
520 mmu_info
->aperture_end
= DMA_BIT_MASK(isp
->secure_mode
?
521 IPU_MMU_ADDRESS_BITS
:
522 IPU_MMU_ADDRESS_BITS_NON_SECURE
);
523 mmu_info
->pgsize_bitmap
= SZ_4K
;
524 mmu_info
->dev
= &isp
->pdev
->dev
;
526 ret
= get_dummy_page(mmu_info
);
530 ret
= alloc_dummy_l2_pt(mmu_info
);
532 goto err_free_dummy_page
;
534 mmu_info
->l2_pts
= vzalloc(ISP_L2PT_PTES
* sizeof(*mmu_info
->l2_pts
));
535 if (!mmu_info
->l2_pts
)
536 goto err_free_dummy_l2_pt
;
539 * We always map the L1 page table (a single page as well as
540 * the L2 page tables).
542 mmu_info
->l1_pt
= alloc_l1_pt(mmu_info
);
543 if (!mmu_info
->l1_pt
)
544 goto err_free_l2_pts
;
546 spin_lock_init(&mmu_info
->lock
);
548 dev_dbg(mmu_info
->dev
, "domain initialised\n");
553 vfree(mmu_info
->l2_pts
);
554 err_free_dummy_l2_pt
:
555 free_dummy_l2_pt(mmu_info
);
557 free_dummy_page(mmu_info
);
564 int ipu_mmu_hw_cleanup(struct ipu_mmu
*mmu
)
568 spin_lock_irqsave(&mmu
->ready_lock
, flags
);
570 spin_unlock_irqrestore(&mmu
->ready_lock
, flags
);
574 EXPORT_SYMBOL(ipu_mmu_hw_cleanup
);
576 static struct ipu_dma_mapping
*alloc_dma_mapping(struct ipu_device
*isp
)
578 struct ipu_dma_mapping
*dmap
;
580 dmap
= kzalloc(sizeof(*dmap
), GFP_KERNEL
);
584 dmap
->mmu_info
= ipu_mmu_alloc(isp
);
585 if (!dmap
->mmu_info
) {
589 init_iova_domain(&dmap
->iovad
, SZ_4K
, 1);
590 dmap
->mmu_info
->dmap
= dmap
;
592 kref_init(&dmap
->ref
);
594 dev_dbg(&isp
->pdev
->dev
, "alloc mapping\n");
601 phys_addr_t
ipu_mmu_iova_to_phys(struct ipu_mmu_info
*mmu_info
,
606 phys_addr_t phy_addr
;
608 spin_lock_irqsave(&mmu_info
->lock
, flags
);
609 l2_pt
= mmu_info
->l2_pts
[iova
>> ISP_L1PT_SHIFT
];
610 phy_addr
= (phys_addr_t
)l2_pt
[(iova
& ISP_L2PT_MASK
) >> ISP_L2PT_SHIFT
];
611 phy_addr
<<= ISP_PAGE_SHIFT
;
612 spin_unlock_irqrestore(&mmu_info
->lock
, flags
);
618 * The following four functions are implemented based on iommu.c
619 * drivers/iommu/iommu.c:iommu_pgsize().
621 static size_t ipu_mmu_pgsize(unsigned long pgsize_bitmap
,
622 unsigned long addr_merge
, size_t size
)
624 unsigned int pgsize_idx
;
627 /* Max page size that still fits into 'size' */
628 pgsize_idx
= __fls(size
);
630 /* need to consider alignment requirements ? */
631 if (likely(addr_merge
)) {
632 /* Max page size allowed by address */
633 unsigned int align_pgsize_idx
= __ffs(addr_merge
);
635 pgsize_idx
= min(pgsize_idx
, align_pgsize_idx
);
638 /* build a mask of acceptable page sizes */
639 pgsize
= (1UL << (pgsize_idx
+ 1)) - 1;
641 /* throw away page sizes not supported by the hardware */
642 pgsize
&= pgsize_bitmap
;
644 /* make sure we're still sane */
647 /* pick the biggest page */
648 pgsize_idx
= __fls(pgsize
);
649 pgsize
= 1UL << pgsize_idx
;
654 /* drivers/iommu/iommu.c:iommu_unmap() */
655 size_t ipu_mmu_unmap(struct ipu_mmu_info
*mmu_info
, unsigned long iova
,
658 size_t unmapped_page
, unmapped
= 0;
659 unsigned int min_pagesz
;
661 /* find out the minimum page size supported */
662 min_pagesz
= 1 << __ffs(mmu_info
->pgsize_bitmap
);
665 * The virtual address, as well as the size of the mapping, must be
666 * aligned (at least) to the size of the smallest page supported
669 if (!IS_ALIGNED(iova
| size
, min_pagesz
)) {
670 dev_err(NULL
, "unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n",
671 iova
, size
, min_pagesz
);
676 * Keep iterating until we either unmap 'size' bytes (or more)
677 * or we hit an area that isn't mapped.
679 while (unmapped
< size
) {
680 size_t pgsize
= ipu_mmu_pgsize(mmu_info
->pgsize_bitmap
,
681 iova
, size
- unmapped
);
683 unmapped_page
= __ipu_mmu_unmap(mmu_info
, iova
, pgsize
);
687 dev_dbg(mmu_info
->dev
, "unmapped: iova 0x%lx size 0x%zx\n",
688 iova
, unmapped_page
);
690 iova
+= unmapped_page
;
691 unmapped
+= unmapped_page
;
697 /* drivers/iommu/iommu.c:iommu_map() */
698 int ipu_mmu_map(struct ipu_mmu_info
*mmu_info
, unsigned long iova
,
699 phys_addr_t paddr
, size_t size
)
701 unsigned long orig_iova
= iova
;
702 unsigned int min_pagesz
;
703 size_t orig_size
= size
;
706 if (mmu_info
->pgsize_bitmap
== 0UL)
709 /* find out the minimum page size supported */
710 min_pagesz
= 1 << __ffs(mmu_info
->pgsize_bitmap
);
713 * both the virtual address and the physical one, as well as
714 * the size of the mapping, must be aligned (at least) to the
715 * size of the smallest page supported by the hardware
717 if (!IS_ALIGNED(iova
| paddr
| size
, min_pagesz
)) {
718 dev_err(mmu_info
->dev
,
719 "unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n",
720 iova
, &paddr
, size
, min_pagesz
);
724 dev_dbg(mmu_info
->dev
, "map: iova 0x%lx pa %pa size 0x%zx\n",
728 size_t pgsize
= ipu_mmu_pgsize(mmu_info
->pgsize_bitmap
,
731 dev_dbg(mmu_info
->dev
,
732 "mapping: iova 0x%lx pa %pa pgsize 0x%zx\n",
733 iova
, &paddr
, pgsize
);
735 ret
= __ipu_mmu_map(mmu_info
, iova
, paddr
, pgsize
);
744 /* unroll mapping in case something went wrong */
746 ipu_mmu_unmap(mmu_info
, orig_iova
, orig_size
- size
);
751 static void ipu_mmu_destroy(struct ipu_mmu
*mmu
)
753 struct ipu_dma_mapping
*dmap
= mmu
->dmap
;
754 struct ipu_mmu_info
*mmu_info
= dmap
->mmu_info
;
758 if (mmu
->iova_trash_page
) {
759 iova
= find_iova(&dmap
->iovad
,
760 mmu
->iova_trash_page
>> PAGE_SHIFT
);
762 /* unmap and free the trash buffer iova */
763 ipu_mmu_unmap(mmu_info
, iova
->pfn_lo
<< PAGE_SHIFT
,
764 (iova
->pfn_hi
- iova
->pfn_lo
+ 1) <<
766 __free_iova(&dmap
->iovad
, iova
);
768 dev_err(mmu
->dev
, "trash buffer iova not found.\n");
771 mmu
->iova_trash_page
= 0;
772 dma_unmap_page(mmu_info
->dev
, mmu
->pci_trash_page
,
773 PAGE_SIZE
, DMA_BIDIRECTIONAL
);
774 mmu
->pci_trash_page
= 0;
775 __free_page(mmu
->trash_page
);
778 for (l1_idx
= 0; l1_idx
< ISP_L1PT_PTES
; l1_idx
++) {
779 if (mmu_info
->l1_pt
[l1_idx
] != mmu_info
->dummy_l2_pteval
) {
780 dma_unmap_single(mmu_info
->dev
,
781 TBL_PHYS_ADDR(mmu_info
->l1_pt
[l1_idx
]),
782 PAGE_SIZE
, DMA_BIDIRECTIONAL
);
783 free_page((unsigned long)mmu_info
->l2_pts
[l1_idx
]);
787 free_dummy_page(mmu_info
);
788 dma_unmap_single(mmu_info
->dev
, mmu_info
->l1_pt_dma
,
789 PAGE_SIZE
, DMA_BIDIRECTIONAL
);
790 free_page((unsigned long)mmu_info
->dummy_l2_pt
);
791 free_page((unsigned long)mmu_info
->l1_pt
);
795 struct ipu_mmu
*ipu_mmu_init(struct device
*dev
,
796 void __iomem
*base
, int mmid
,
797 const struct ipu_hw_variants
*hw
)
800 struct ipu_mmu_pdata
*pdata
;
801 struct ipu_device
*isp
= pci_get_drvdata(to_pci_dev(dev
));
804 if (hw
->nr_mmus
> IPU_MMU_MAX_DEVICES
)
805 return ERR_PTR(-EINVAL
);
807 pdata
= devm_kzalloc(dev
, sizeof(*pdata
), GFP_KERNEL
);
809 return ERR_PTR(-ENOMEM
);
811 for (i
= 0; i
< hw
->nr_mmus
; i
++) {
812 struct ipu_mmu_hw
*pdata_mmu
= &pdata
->mmu_hw
[i
];
813 const struct ipu_mmu_hw
*src_mmu
= &hw
->mmu_hw
[i
];
815 if (src_mmu
->nr_l1streams
> IPU_MMU_MAX_TLB_L1_STREAMS
||
816 src_mmu
->nr_l2streams
> IPU_MMU_MAX_TLB_L2_STREAMS
)
817 return ERR_PTR(-EINVAL
);
819 *pdata_mmu
= *src_mmu
;
820 pdata_mmu
->base
= base
+ src_mmu
->offset
;
823 mmu
= devm_kzalloc(dev
, sizeof(*mmu
), GFP_KERNEL
);
825 return ERR_PTR(-ENOMEM
);
828 mmu
->mmu_hw
= pdata
->mmu_hw
;
829 mmu
->nr_mmus
= hw
->nr_mmus
;
830 mmu
->tlb_invalidate
= tlb_invalidate
;
832 INIT_LIST_HEAD(&mmu
->vma_list
);
833 spin_lock_init(&mmu
->ready_lock
);
835 mmu
->dmap
= alloc_dma_mapping(isp
);
837 dev_err(dev
, "can't alloc dma mapping\n");
838 return ERR_PTR(-ENOMEM
);
843 EXPORT_SYMBOL(ipu_mmu_init
);
845 void ipu_mmu_cleanup(struct ipu_mmu
*mmu
)
847 struct ipu_dma_mapping
*dmap
= mmu
->dmap
;
849 ipu_mmu_destroy(mmu
);
852 put_iova_domain(&dmap
->iovad
);
855 EXPORT_SYMBOL(ipu_mmu_cleanup
);
857 MODULE_AUTHOR("Sakari Ailus <sakari.ailus@linux.intel.com>");
858 MODULE_AUTHOR("Samu Onkalo <samu.onkalo@intel.com>");
859 MODULE_LICENSE("GPL");
860 MODULE_DESCRIPTION("Intel ipu mmu driver");