2 * IOMMU API for GART in Tegra20
4 * Copyright (c) 2010-2012, NVIDIA CORPORATION. All rights reserved.
6 * Author: Hiroshi DOYU <hdoyu@nvidia.com>
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
22 #define pr_fmt(fmt) "%s(): " fmt, __func__
24 #include <linux/init.h>
25 #include <linux/moduleparam.h>
26 #include <linux/platform_device.h>
27 #include <linux/spinlock.h>
28 #include <linux/slab.h>
29 #include <linux/vmalloc.h>
31 #include <linux/list.h>
32 #include <linux/device.h>
34 #include <linux/iommu.h>
37 #include <asm/cacheflush.h>
39 /* bitmap of the page sizes currently supported */
40 #define GART_IOMMU_PGSIZES (SZ_4K)
42 #define GART_REG_BASE 0x24
43 #define GART_CONFIG (0x24 - GART_REG_BASE)
44 #define GART_ENTRY_ADDR (0x28 - GART_REG_BASE)
45 #define GART_ENTRY_DATA (0x2c - GART_REG_BASE)
46 #define GART_ENTRY_PHYS_ADDR_VALID (1 << 31)
48 #define GART_PAGE_SHIFT 12
49 #define GART_PAGE_SIZE (1 << GART_PAGE_SHIFT)
50 #define GART_PAGE_MASK \
51 (~(GART_PAGE_SIZE - 1) & ~GART_ENTRY_PHYS_ADDR_VALID)
55 struct list_head list
;
61 u32 page_count
; /* total remappable size */
62 dma_addr_t iovmm_base
; /* offset to vmm_area */
63 spinlock_t pte_lock
; /* for pagetable */
64 struct list_head client
;
65 spinlock_t client_lock
; /* for client list */
68 struct iommu_device iommu
; /* IOMMU Core handle */
72 struct iommu_domain domain
; /* generic domain handle */
73 struct gart_device
*gart
; /* link to gart device */
76 static struct gart_device
*gart_handle
; /* unique for a system */
78 static bool gart_debug
;
80 #define GART_PTE(_pfn) \
81 (GART_ENTRY_PHYS_ADDR_VALID | ((_pfn) << PAGE_SHIFT))
83 static struct gart_domain
*to_gart_domain(struct iommu_domain
*dom
)
85 return container_of(dom
, struct gart_domain
, domain
);
89 * Any interaction between any block on PPSB and a block on APB or AHB
90 * must have these read-back to ensure the APB/AHB bus transaction is
91 * complete before initiating activity on the PPSB block.
93 #define FLUSH_GART_REGS(gart) ((void)readl((gart)->regs + GART_CONFIG))
95 #define for_each_gart_pte(gart, iova) \
96 for (iova = gart->iovmm_base; \
97 iova < gart->iovmm_base + GART_PAGE_SIZE * gart->page_count; \
98 iova += GART_PAGE_SIZE)
100 static inline void gart_set_pte(struct gart_device
*gart
,
101 unsigned long offs
, u32 pte
)
103 writel(offs
, gart
->regs
+ GART_ENTRY_ADDR
);
104 writel(pte
, gart
->regs
+ GART_ENTRY_DATA
);
106 dev_dbg(gart
->dev
, "%s %08lx:%08x\n",
107 pte
? "map" : "unmap", offs
, pte
& GART_PAGE_MASK
);
110 static inline unsigned long gart_read_pte(struct gart_device
*gart
,
115 writel(offs
, gart
->regs
+ GART_ENTRY_ADDR
);
116 pte
= readl(gart
->regs
+ GART_ENTRY_DATA
);
121 static void do_gart_setup(struct gart_device
*gart
, const u32
*data
)
125 for_each_gart_pte(gart
, iova
)
126 gart_set_pte(gart
, iova
, data
? *(data
++) : 0);
128 writel(1, gart
->regs
+ GART_CONFIG
);
129 FLUSH_GART_REGS(gart
);
133 static void gart_dump_table(struct gart_device
*gart
)
138 spin_lock_irqsave(&gart
->pte_lock
, flags
);
139 for_each_gart_pte(gart
, iova
) {
142 pte
= gart_read_pte(gart
, iova
);
144 dev_dbg(gart
->dev
, "%s %08lx:%08lx\n",
145 (GART_ENTRY_PHYS_ADDR_VALID
& pte
) ? "v" : " ",
146 iova
, pte
& GART_PAGE_MASK
);
148 spin_unlock_irqrestore(&gart
->pte_lock
, flags
);
151 static inline void gart_dump_table(struct gart_device
*gart
)
156 static inline bool gart_iova_range_valid(struct gart_device
*gart
,
157 unsigned long iova
, size_t bytes
)
159 unsigned long iova_start
, iova_end
, gart_start
, gart_end
;
162 iova_end
= iova_start
+ bytes
- 1;
163 gart_start
= gart
->iovmm_base
;
164 gart_end
= gart_start
+ gart
->page_count
* GART_PAGE_SIZE
- 1;
166 if (iova_start
< gart_start
)
168 if (iova_end
> gart_end
)
173 static int gart_iommu_attach_dev(struct iommu_domain
*domain
,
176 struct gart_domain
*gart_domain
= to_gart_domain(domain
);
177 struct gart_device
*gart
= gart_domain
->gart
;
178 struct gart_client
*client
, *c
;
181 client
= devm_kzalloc(gart
->dev
, sizeof(*c
), GFP_KERNEL
);
186 spin_lock(&gart
->client_lock
);
187 list_for_each_entry(c
, &gart
->client
, list
) {
190 "%s is already attached\n", dev_name(dev
));
195 list_add(&client
->list
, &gart
->client
);
196 spin_unlock(&gart
->client_lock
);
197 dev_dbg(gart
->dev
, "Attached %s\n", dev_name(dev
));
201 devm_kfree(gart
->dev
, client
);
202 spin_unlock(&gart
->client_lock
);
206 static void gart_iommu_detach_dev(struct iommu_domain
*domain
,
209 struct gart_domain
*gart_domain
= to_gart_domain(domain
);
210 struct gart_device
*gart
= gart_domain
->gart
;
211 struct gart_client
*c
;
213 spin_lock(&gart
->client_lock
);
215 list_for_each_entry(c
, &gart
->client
, list
) {
218 devm_kfree(gart
->dev
, c
);
219 dev_dbg(gart
->dev
, "Detached %s\n", dev_name(dev
));
223 dev_err(gart
->dev
, "Couldn't find\n");
225 spin_unlock(&gart
->client_lock
);
228 static struct iommu_domain
*gart_iommu_domain_alloc(unsigned type
)
230 struct gart_domain
*gart_domain
;
231 struct gart_device
*gart
;
233 if (type
!= IOMMU_DOMAIN_UNMANAGED
)
240 gart_domain
= kzalloc(sizeof(*gart_domain
), GFP_KERNEL
);
244 gart_domain
->gart
= gart
;
245 gart_domain
->domain
.geometry
.aperture_start
= gart
->iovmm_base
;
246 gart_domain
->domain
.geometry
.aperture_end
= gart
->iovmm_base
+
247 gart
->page_count
* GART_PAGE_SIZE
- 1;
248 gart_domain
->domain
.geometry
.force_aperture
= true;
250 return &gart_domain
->domain
;
253 static void gart_iommu_domain_free(struct iommu_domain
*domain
)
255 struct gart_domain
*gart_domain
= to_gart_domain(domain
);
256 struct gart_device
*gart
= gart_domain
->gart
;
259 spin_lock(&gart
->client_lock
);
260 if (!list_empty(&gart
->client
)) {
261 struct gart_client
*c
;
263 list_for_each_entry(c
, &gart
->client
, list
)
264 gart_iommu_detach_dev(domain
, c
->dev
);
266 spin_unlock(&gart
->client_lock
);
272 static int gart_iommu_map(struct iommu_domain
*domain
, unsigned long iova
,
273 phys_addr_t pa
, size_t bytes
, int prot
)
275 struct gart_domain
*gart_domain
= to_gart_domain(domain
);
276 struct gart_device
*gart
= gart_domain
->gart
;
281 if (!gart_iova_range_valid(gart
, iova
, bytes
))
284 spin_lock_irqsave(&gart
->pte_lock
, flags
);
285 pfn
= __phys_to_pfn(pa
);
286 if (!pfn_valid(pfn
)) {
287 dev_err(gart
->dev
, "Invalid page: %pa\n", &pa
);
288 spin_unlock_irqrestore(&gart
->pte_lock
, flags
);
292 pte
= gart_read_pte(gart
, iova
);
293 if (pte
& GART_ENTRY_PHYS_ADDR_VALID
) {
294 spin_unlock_irqrestore(&gart
->pte_lock
, flags
);
295 dev_err(gart
->dev
, "Page entry is in-use\n");
299 gart_set_pte(gart
, iova
, GART_PTE(pfn
));
300 FLUSH_GART_REGS(gart
);
301 spin_unlock_irqrestore(&gart
->pte_lock
, flags
);
305 static size_t gart_iommu_unmap(struct iommu_domain
*domain
, unsigned long iova
,
308 struct gart_domain
*gart_domain
= to_gart_domain(domain
);
309 struct gart_device
*gart
= gart_domain
->gart
;
312 if (!gart_iova_range_valid(gart
, iova
, bytes
))
315 spin_lock_irqsave(&gart
->pte_lock
, flags
);
316 gart_set_pte(gart
, iova
, 0);
317 FLUSH_GART_REGS(gart
);
318 spin_unlock_irqrestore(&gart
->pte_lock
, flags
);
322 static phys_addr_t
gart_iommu_iova_to_phys(struct iommu_domain
*domain
,
325 struct gart_domain
*gart_domain
= to_gart_domain(domain
);
326 struct gart_device
*gart
= gart_domain
->gart
;
331 if (!gart_iova_range_valid(gart
, iova
, 0))
334 spin_lock_irqsave(&gart
->pte_lock
, flags
);
335 pte
= gart_read_pte(gart
, iova
);
336 spin_unlock_irqrestore(&gart
->pte_lock
, flags
);
338 pa
= (pte
& GART_PAGE_MASK
);
339 if (!pfn_valid(__phys_to_pfn(pa
))) {
340 dev_err(gart
->dev
, "No entry for %08llx:%pa\n",
341 (unsigned long long)iova
, &pa
);
342 gart_dump_table(gart
);
348 static bool gart_iommu_capable(enum iommu_cap cap
)
353 static int gart_iommu_add_device(struct device
*dev
)
355 struct iommu_group
*group
= iommu_group_get_for_dev(dev
);
358 return PTR_ERR(group
);
360 iommu_group_put(group
);
362 iommu_device_link(&gart_handle
->iommu
, dev
);
367 static void gart_iommu_remove_device(struct device
*dev
)
369 iommu_group_remove_device(dev
);
370 iommu_device_unlink(&gart_handle
->iommu
, dev
);
373 static const struct iommu_ops gart_iommu_ops
= {
374 .capable
= gart_iommu_capable
,
375 .domain_alloc
= gart_iommu_domain_alloc
,
376 .domain_free
= gart_iommu_domain_free
,
377 .attach_dev
= gart_iommu_attach_dev
,
378 .detach_dev
= gart_iommu_detach_dev
,
379 .add_device
= gart_iommu_add_device
,
380 .remove_device
= gart_iommu_remove_device
,
381 .device_group
= generic_device_group
,
382 .map
= gart_iommu_map
,
383 .unmap
= gart_iommu_unmap
,
384 .iova_to_phys
= gart_iommu_iova_to_phys
,
385 .pgsize_bitmap
= GART_IOMMU_PGSIZES
,
388 static int tegra_gart_suspend(struct device
*dev
)
390 struct gart_device
*gart
= dev_get_drvdata(dev
);
392 u32
*data
= gart
->savedata
;
395 spin_lock_irqsave(&gart
->pte_lock
, flags
);
396 for_each_gart_pte(gart
, iova
)
397 *(data
++) = gart_read_pte(gart
, iova
);
398 spin_unlock_irqrestore(&gart
->pte_lock
, flags
);
402 static int tegra_gart_resume(struct device
*dev
)
404 struct gart_device
*gart
= dev_get_drvdata(dev
);
407 spin_lock_irqsave(&gart
->pte_lock
, flags
);
408 do_gart_setup(gart
, gart
->savedata
);
409 spin_unlock_irqrestore(&gart
->pte_lock
, flags
);
413 static int tegra_gart_probe(struct platform_device
*pdev
)
415 struct gart_device
*gart
;
416 struct resource
*res
, *res_remap
;
417 void __iomem
*gart_regs
;
418 struct device
*dev
= &pdev
->dev
;
424 BUILD_BUG_ON(PAGE_SHIFT
!= GART_PAGE_SHIFT
);
426 /* the GART memory aperture is required */
427 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
428 res_remap
= platform_get_resource(pdev
, IORESOURCE_MEM
, 1);
429 if (!res
|| !res_remap
) {
430 dev_err(dev
, "GART memory aperture expected\n");
434 gart
= devm_kzalloc(dev
, sizeof(*gart
), GFP_KERNEL
);
436 dev_err(dev
, "failed to allocate gart_device\n");
440 gart_regs
= devm_ioremap(dev
, res
->start
, resource_size(res
));
442 dev_err(dev
, "failed to remap GART registers\n");
446 ret
= iommu_device_sysfs_add(&gart
->iommu
, &pdev
->dev
, NULL
,
447 dev_name(&pdev
->dev
));
449 dev_err(dev
, "Failed to register IOMMU in sysfs\n");
453 iommu_device_set_ops(&gart
->iommu
, &gart_iommu_ops
);
455 ret
= iommu_device_register(&gart
->iommu
);
457 dev_err(dev
, "Failed to register IOMMU\n");
458 iommu_device_sysfs_remove(&gart
->iommu
);
462 gart
->dev
= &pdev
->dev
;
463 spin_lock_init(&gart
->pte_lock
);
464 spin_lock_init(&gart
->client_lock
);
465 INIT_LIST_HEAD(&gart
->client
);
466 gart
->regs
= gart_regs
;
467 gart
->iovmm_base
= (dma_addr_t
)res_remap
->start
;
468 gart
->page_count
= (resource_size(res_remap
) >> GART_PAGE_SHIFT
);
470 gart
->savedata
= vmalloc(array_size(sizeof(u32
), gart
->page_count
));
471 if (!gart
->savedata
) {
472 dev_err(dev
, "failed to allocate context save area\n");
476 platform_set_drvdata(pdev
, gart
);
477 do_gart_setup(gart
, NULL
);
484 static const struct dev_pm_ops tegra_gart_pm_ops
= {
485 .suspend
= tegra_gart_suspend
,
486 .resume
= tegra_gart_resume
,
489 static const struct of_device_id tegra_gart_of_match
[] = {
490 { .compatible
= "nvidia,tegra20-gart", },
494 static struct platform_driver tegra_gart_driver
= {
495 .probe
= tegra_gart_probe
,
497 .name
= "tegra-gart",
498 .pm
= &tegra_gart_pm_ops
,
499 .of_match_table
= tegra_gart_of_match
,
500 .suppress_bind_attrs
= true,
504 static int __init
tegra_gart_init(void)
506 return platform_driver_register(&tegra_gart_driver
);
508 subsys_initcall(tegra_gart_init
);
510 module_param(gart_debug
, bool, 0644);
511 MODULE_PARM_DESC(gart_debug
, "Enable GART debugging");