1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
3 #include <linux/bitfield.h>
4 #include <linux/delay.h>
5 #include <linux/interrupt.h>
7 #include <linux/iopoll.h>
8 #include <linux/io-pgtable.h>
9 #include <linux/iommu.h>
10 #include <linux/platform_device.h>
11 #include <linux/pm_runtime.h>
12 #include <linux/sizes.h>
14 #include "panfrost_device.h"
15 #include "panfrost_mmu.h"
16 #include "panfrost_gem.h"
17 #include "panfrost_features.h"
18 #include "panfrost_regs.h"
20 #define mmu_write(dev, reg, data) writel(data, dev->iomem + reg)
21 #define mmu_read(dev, reg) readl(dev->iomem + reg)
24 struct io_pgtable_cfg pgtbl_cfg
;
25 struct io_pgtable_ops
*pgtbl_ops
;
29 static int wait_ready(struct panfrost_device
*pfdev
, u32 as_nr
)
34 /* Wait for the MMU status to indicate there is no active command, in
35 * case one is pending. */
36 ret
= readl_relaxed_poll_timeout_atomic(pfdev
->iomem
+ AS_STATUS(as_nr
),
37 val
, !(val
& AS_STATUS_AS_ACTIVE
), 10, 1000);
40 dev_err(pfdev
->dev
, "AS_ACTIVE bit stuck\n");
45 static int write_cmd(struct panfrost_device
*pfdev
, u32 as_nr
, u32 cmd
)
49 /* write AS_COMMAND when MMU is ready to accept another command */
50 status
= wait_ready(pfdev
, as_nr
);
52 mmu_write(pfdev
, AS_COMMAND(as_nr
), cmd
);
57 static void lock_region(struct panfrost_device
*pfdev
, u32 as_nr
,
58 u64 iova
, size_t size
)
61 u64 region
= iova
& PAGE_MASK
;
67 * results in the range (11 .. 42)
70 size
= round_up(size
, PAGE_SIZE
);
72 region_width
= 10 + fls(size
>> PAGE_SHIFT
);
73 if ((size
>> PAGE_SHIFT
) != (1ul << (region_width
- 11))) {
74 /* not pow2, so must go up to the next pow2 */
77 region
|= region_width
;
79 /* Lock the region that needs to be updated */
80 mmu_write(pfdev
, AS_LOCKADDR_LO(as_nr
), region
& 0xFFFFFFFFUL
);
81 mmu_write(pfdev
, AS_LOCKADDR_HI(as_nr
), (region
>> 32) & 0xFFFFFFFFUL
);
82 write_cmd(pfdev
, as_nr
, AS_COMMAND_LOCK
);
86 static int mmu_hw_do_operation(struct panfrost_device
*pfdev
, u32 as_nr
,
87 u64 iova
, size_t size
, u32 op
)
92 spin_lock_irqsave(&pfdev
->hwaccess_lock
, flags
);
94 if (op
!= AS_COMMAND_UNLOCK
)
95 lock_region(pfdev
, as_nr
, iova
, size
);
97 /* Run the MMU operation */
98 write_cmd(pfdev
, as_nr
, op
);
100 /* Wait for the flush to complete */
101 ret
= wait_ready(pfdev
, as_nr
);
103 spin_unlock_irqrestore(&pfdev
->hwaccess_lock
, flags
);
108 void panfrost_mmu_enable(struct panfrost_device
*pfdev
, u32 as_nr
)
110 struct io_pgtable_cfg
*cfg
= &pfdev
->mmu
->pgtbl_cfg
;
111 u64 transtab
= cfg
->arm_mali_lpae_cfg
.transtab
;
112 u64 memattr
= cfg
->arm_mali_lpae_cfg
.memattr
;
114 mmu_write(pfdev
, MMU_INT_CLEAR
, ~0);
115 mmu_write(pfdev
, MMU_INT_MASK
, ~0);
117 mmu_write(pfdev
, AS_TRANSTAB_LO(as_nr
), transtab
& 0xffffffffUL
);
118 mmu_write(pfdev
, AS_TRANSTAB_HI(as_nr
), transtab
>> 32);
120 /* Need to revisit mem attrs.
121 * NC is the default, Mali driver is inner WT.
123 mmu_write(pfdev
, AS_MEMATTR_LO(as_nr
), memattr
& 0xffffffffUL
);
124 mmu_write(pfdev
, AS_MEMATTR_HI(as_nr
), memattr
>> 32);
126 write_cmd(pfdev
, as_nr
, AS_COMMAND_UPDATE
);
129 static void mmu_disable(struct panfrost_device
*pfdev
, u32 as_nr
)
131 mmu_write(pfdev
, AS_TRANSTAB_LO(as_nr
), 0);
132 mmu_write(pfdev
, AS_TRANSTAB_HI(as_nr
), 0);
134 mmu_write(pfdev
, AS_MEMATTR_LO(as_nr
), 0);
135 mmu_write(pfdev
, AS_MEMATTR_HI(as_nr
), 0);
137 write_cmd(pfdev
, as_nr
, AS_COMMAND_UPDATE
);
140 static size_t get_pgsize(u64 addr
, size_t size
)
142 if (addr
& (SZ_2M
- 1) || size
< SZ_2M
)
148 int panfrost_mmu_map(struct panfrost_gem_object
*bo
)
150 struct drm_gem_object
*obj
= &bo
->base
.base
;
151 struct panfrost_device
*pfdev
= to_panfrost_device(obj
->dev
);
152 struct io_pgtable_ops
*ops
= pfdev
->mmu
->pgtbl_ops
;
153 u64 iova
= bo
->node
.start
<< PAGE_SHIFT
;
155 struct scatterlist
*sgl
;
156 struct sg_table
*sgt
;
159 if (WARN_ON(bo
->is_mapped
))
162 sgt
= drm_gem_shmem_get_pages_sgt(obj
);
163 if (WARN_ON(IS_ERR(sgt
)))
166 ret
= pm_runtime_get_sync(pfdev
->dev
);
170 mutex_lock(&pfdev
->mmu
->lock
);
172 for_each_sg(sgt
->sgl
, sgl
, sgt
->nents
, count
) {
173 unsigned long paddr
= sg_dma_address(sgl
);
174 size_t len
= sg_dma_len(sgl
);
176 dev_dbg(pfdev
->dev
, "map: iova=%llx, paddr=%lx, len=%zx", iova
, paddr
, len
);
179 size_t pgsize
= get_pgsize(iova
| paddr
, len
);
181 ops
->map(ops
, iova
, paddr
, pgsize
, IOMMU_WRITE
| IOMMU_READ
);
188 mmu_hw_do_operation(pfdev
, 0, bo
->node
.start
<< PAGE_SHIFT
,
189 bo
->node
.size
<< PAGE_SHIFT
, AS_COMMAND_FLUSH_PT
);
191 mutex_unlock(&pfdev
->mmu
->lock
);
193 pm_runtime_mark_last_busy(pfdev
->dev
);
194 pm_runtime_put_autosuspend(pfdev
->dev
);
195 bo
->is_mapped
= true;
200 void panfrost_mmu_unmap(struct panfrost_gem_object
*bo
)
202 struct drm_gem_object
*obj
= &bo
->base
.base
;
203 struct panfrost_device
*pfdev
= to_panfrost_device(obj
->dev
);
204 struct io_pgtable_ops
*ops
= pfdev
->mmu
->pgtbl_ops
;
205 u64 iova
= bo
->node
.start
<< PAGE_SHIFT
;
206 size_t len
= bo
->node
.size
<< PAGE_SHIFT
;
207 size_t unmapped_len
= 0;
210 if (WARN_ON(!bo
->is_mapped
))
213 dev_dbg(pfdev
->dev
, "unmap: iova=%llx, len=%zx", iova
, len
);
215 ret
= pm_runtime_get_sync(pfdev
->dev
);
219 mutex_lock(&pfdev
->mmu
->lock
);
221 while (unmapped_len
< len
) {
222 size_t unmapped_page
;
223 size_t pgsize
= get_pgsize(iova
, len
- unmapped_len
);
225 unmapped_page
= ops
->unmap(ops
, iova
, pgsize
, NULL
);
229 iova
+= unmapped_page
;
230 unmapped_len
+= unmapped_page
;
233 mmu_hw_do_operation(pfdev
, 0, bo
->node
.start
<< PAGE_SHIFT
,
234 bo
->node
.size
<< PAGE_SHIFT
, AS_COMMAND_FLUSH_PT
);
236 mutex_unlock(&pfdev
->mmu
->lock
);
238 pm_runtime_mark_last_busy(pfdev
->dev
);
239 pm_runtime_put_autosuspend(pfdev
->dev
);
240 bo
->is_mapped
= false;
243 static void mmu_tlb_inv_context_s1(void *cookie
)
245 struct panfrost_device
*pfdev
= cookie
;
247 mmu_hw_do_operation(pfdev
, 0, 0, ~0UL, AS_COMMAND_FLUSH_MEM
);
250 static void mmu_tlb_sync_context(void *cookie
)
252 //struct panfrost_device *pfdev = cookie;
253 // TODO: Wait 1000 GPU cycles for HW_ISSUE_6367/T60X
256 static void mmu_tlb_flush_walk(unsigned long iova
, size_t size
, size_t granule
,
259 mmu_tlb_sync_context(cookie
);
262 static void mmu_tlb_flush_leaf(unsigned long iova
, size_t size
, size_t granule
,
265 mmu_tlb_sync_context(cookie
);
268 static const struct iommu_flush_ops mmu_tlb_ops
= {
269 .tlb_flush_all
= mmu_tlb_inv_context_s1
,
270 .tlb_flush_walk
= mmu_tlb_flush_walk
,
271 .tlb_flush_leaf
= mmu_tlb_flush_leaf
,
274 static const char *access_type_name(struct panfrost_device
*pfdev
,
277 switch (fault_status
& AS_FAULTSTATUS_ACCESS_TYPE_MASK
) {
278 case AS_FAULTSTATUS_ACCESS_TYPE_ATOMIC
:
279 if (panfrost_has_hw_feature(pfdev
, HW_FEATURE_AARCH64_MMU
))
283 case AS_FAULTSTATUS_ACCESS_TYPE_READ
:
285 case AS_FAULTSTATUS_ACCESS_TYPE_WRITE
:
287 case AS_FAULTSTATUS_ACCESS_TYPE_EX
:
295 static irqreturn_t
panfrost_mmu_irq_handler(int irq
, void *data
)
297 struct panfrost_device
*pfdev
= data
;
298 u32 status
= mmu_read(pfdev
, MMU_INT_STAT
);
304 dev_err(pfdev
->dev
, "mmu irq status=%x\n", status
);
306 for (i
= 0; status
; i
++) {
307 u32 mask
= BIT(i
) | BIT(i
+ 16);
314 if (!(status
& mask
))
317 fault_status
= mmu_read(pfdev
, AS_FAULTSTATUS(i
));
318 addr
= mmu_read(pfdev
, AS_FAULTADDRESS_LO(i
));
319 addr
|= (u64
)mmu_read(pfdev
, AS_FAULTADDRESS_HI(i
)) << 32;
321 /* decode the fault status */
322 exception_type
= fault_status
& 0xFF;
323 access_type
= (fault_status
>> 8) & 0x3;
324 source_id
= (fault_status
>> 16);
326 /* terminal fault, print info about the fault */
328 "Unhandled Page fault in AS%d at VA 0x%016llX\n"
330 "raw fault status: 0x%X\n"
331 "decoded fault status: %s\n"
332 "exception type 0x%X: %s\n"
333 "access type 0x%X: %s\n"
338 (fault_status
& (1 << 10) ? "DECODER FAULT" : "SLAVE FAULT"),
339 exception_type
, panfrost_exception_name(pfdev
, exception_type
),
340 access_type
, access_type_name(pfdev
, fault_status
),
343 mmu_write(pfdev
, MMU_INT_CLEAR
, mask
);
351 int panfrost_mmu_init(struct panfrost_device
*pfdev
)
353 struct io_pgtable_ops
*pgtbl_ops
;
356 pfdev
->mmu
= devm_kzalloc(pfdev
->dev
, sizeof(*pfdev
->mmu
), GFP_KERNEL
);
360 mutex_init(&pfdev
->mmu
->lock
);
362 irq
= platform_get_irq_byname(to_platform_device(pfdev
->dev
), "mmu");
366 err
= devm_request_irq(pfdev
->dev
, irq
, panfrost_mmu_irq_handler
,
367 IRQF_SHARED
, "mmu", pfdev
);
370 dev_err(pfdev
->dev
, "failed to request mmu irq");
373 mmu_write(pfdev
, MMU_INT_CLEAR
, ~0);
374 mmu_write(pfdev
, MMU_INT_MASK
, ~0);
376 pfdev
->mmu
->pgtbl_cfg
= (struct io_pgtable_cfg
) {
377 .pgsize_bitmap
= SZ_4K
| SZ_2M
,
378 .ias
= FIELD_GET(0xff, pfdev
->features
.mmu_features
),
379 .oas
= FIELD_GET(0xff00, pfdev
->features
.mmu_features
),
381 .iommu_dev
= pfdev
->dev
,
384 pgtbl_ops
= alloc_io_pgtable_ops(ARM_MALI_LPAE
, &pfdev
->mmu
->pgtbl_cfg
,
389 pfdev
->mmu
->pgtbl_ops
= pgtbl_ops
;
391 panfrost_mmu_enable(pfdev
, 0);
396 void panfrost_mmu_fini(struct panfrost_device
*pfdev
)
398 mmu_write(pfdev
, MMU_INT_MASK
, 0);
399 mmu_disable(pfdev
, 0);
401 free_io_pgtable_ops(pfdev
->mmu
->pgtbl_ops
);