]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/iommu/tegra-gart.c
Merge tag 'pinctrl-v5.7-1' of git://git.kernel.org/pub/scm/linux/kernel/git/linusw...
[mirror_ubuntu-hirsute-kernel.git] / drivers / iommu / tegra-gart.c
CommitLineData
a61127c2 1// SPDX-License-Identifier: GPL-2.0-only
d53e54b4 2/*
70722309 3 * IOMMU API for Graphics Address Relocation Table on Tegra20
d53e54b4
HD
4 *
5 * Copyright (c) 2010-2012, NVIDIA CORPORATION. All rights reserved.
6 *
39fcbbcc 7 * Author: Hiroshi DOYU <hdoyu@nvidia.com>
d53e54b4
HD
8 */
9
5dd82cdb
DO
10#define dev_fmt(fmt) "gart: " fmt
11
4f821c10
DO
12#include <linux/io.h>
13#include <linux/iommu.h>
39fcbbcc 14#include <linux/moduleparam.h>
ce2785a7 15#include <linux/platform_device.h>
d53e54b4 16#include <linux/slab.h>
4f821c10 17#include <linux/spinlock.h>
d53e54b4 18#include <linux/vmalloc.h>
d53e54b4 19
ce2785a7
DO
20#include <soc/tegra/mc.h>
21
774dfc9b
HD
22#define GART_REG_BASE 0x24
23#define GART_CONFIG (0x24 - GART_REG_BASE)
24#define GART_ENTRY_ADDR (0x28 - GART_REG_BASE)
25#define GART_ENTRY_DATA (0x2c - GART_REG_BASE)
70722309
DO
26
27#define GART_ENTRY_PHYS_ADDR_VALID BIT(31)
d53e54b4
HD
28
29#define GART_PAGE_SHIFT 12
30#define GART_PAGE_SIZE (1 << GART_PAGE_SHIFT)
70722309
DO
31#define GART_PAGE_MASK GENMASK(30, GART_PAGE_SHIFT)
32
33/* bitmap of the page sizes currently supported */
34#define GART_IOMMU_PGSIZES (GART_PAGE_SIZE)
d53e54b4 35
d53e54b4
HD
36struct gart_device {
37 void __iomem *regs;
38 u32 *savedata;
70722309
DO
39 unsigned long iovmm_base; /* offset to vmm_area start */
40 unsigned long iovmm_end; /* offset to vmm_area end */
d53e54b4 41 spinlock_t pte_lock; /* for pagetable */
e7e23670
DO
42 spinlock_t dom_lock; /* for active domain */
43 unsigned int active_devices; /* number of active devices */
7d849b7b 44 struct iommu_domain *active_domain; /* current active domain */
c184ae83 45 struct iommu_device iommu; /* IOMMU Core handle */
70722309 46 struct device *dev;
d53e54b4
HD
47};
48
49static struct gart_device *gart_handle; /* unique for a system */
50
40c9b882
DO
51static bool gart_debug;
52
d53e54b4
HD
53/*
54 * Any interaction between any block on PPSB and a block on APB or AHB
55 * must have these read-back to ensure the APB/AHB bus transaction is
56 * complete before initiating activity on the PPSB block.
57 */
70722309 58#define FLUSH_GART_REGS(gart) readl_relaxed((gart)->regs + GART_CONFIG)
d53e54b4
HD
59
60#define for_each_gart_pte(gart, iova) \
61 for (iova = gart->iovmm_base; \
70722309 62 iova < gart->iovmm_end; \
d53e54b4
HD
63 iova += GART_PAGE_SIZE)
64
65static inline void gart_set_pte(struct gart_device *gart,
70722309 66 unsigned long iova, unsigned long pte)
d53e54b4 67{
70722309
DO
68 writel_relaxed(iova, gart->regs + GART_ENTRY_ADDR);
69 writel_relaxed(pte, gart->regs + GART_ENTRY_DATA);
d53e54b4
HD
70}
71
72static inline unsigned long gart_read_pte(struct gart_device *gart,
70722309 73 unsigned long iova)
d53e54b4
HD
74{
75 unsigned long pte;
76
70722309
DO
77 writel_relaxed(iova, gart->regs + GART_ENTRY_ADDR);
78 pte = readl_relaxed(gart->regs + GART_ENTRY_DATA);
d53e54b4
HD
79
80 return pte;
81}
82
83static void do_gart_setup(struct gart_device *gart, const u32 *data)
84{
85 unsigned long iova;
86
87 for_each_gart_pte(gart, iova)
88 gart_set_pte(gart, iova, data ? *(data++) : 0);
89
70722309 90 writel_relaxed(1, gart->regs + GART_CONFIG);
d53e54b4
HD
91 FLUSH_GART_REGS(gart);
92}
93
70722309
DO
94static inline bool gart_iova_range_invalid(struct gart_device *gart,
95 unsigned long iova, size_t bytes)
d53e54b4 96{
70722309
DO
97 return unlikely(iova < gart->iovmm_base || bytes != GART_PAGE_SIZE ||
98 iova + bytes > gart->iovmm_end);
d53e54b4 99}
d53e54b4 100
70722309 101static inline bool gart_pte_valid(struct gart_device *gart, unsigned long iova)
d53e54b4 102{
70722309 103 return !!(gart_read_pte(gart, iova) & GART_ENTRY_PHYS_ADDR_VALID);
d53e54b4
HD
104}
105
106static int gart_iommu_attach_dev(struct iommu_domain *domain,
107 struct device *dev)
108{
cc0e1205 109 struct gart_device *gart = gart_handle;
e7e23670 110 int ret = 0;
d53e54b4 111
e7e23670 112 spin_lock(&gart->dom_lock);
d53e54b4 113
e7e23670
DO
114 if (gart->active_domain && gart->active_domain != domain) {
115 ret = -EBUSY;
116 } else if (dev->archdata.iommu != domain) {
117 dev->archdata.iommu = domain;
118 gart->active_domain = domain;
119 gart->active_devices++;
d53e54b4 120 }
c3086fad 121
e7e23670
DO
122 spin_unlock(&gart->dom_lock);
123
124 return ret;
c3086fad
DO
125}
126
127static void gart_iommu_detach_dev(struct iommu_domain *domain,
128 struct device *dev)
129{
e7e23670
DO
130 struct gart_device *gart = gart_handle;
131
132 spin_lock(&gart->dom_lock);
c3086fad 133
e7e23670
DO
134 if (dev->archdata.iommu == domain) {
135 dev->archdata.iommu = NULL;
136
137 if (--gart->active_devices == 0)
138 gart->active_domain = NULL;
139 }
140
141 spin_unlock(&gart->dom_lock);
d53e54b4
HD
142}
143
b5cbb386 144static struct iommu_domain *gart_iommu_domain_alloc(unsigned type)
d53e54b4 145{
e7e23670 146 struct iommu_domain *domain;
d53e54b4 147
b5cbb386
JR
148 if (type != IOMMU_DOMAIN_UNMANAGED)
149 return NULL;
d53e54b4 150
e7e23670
DO
151 domain = kzalloc(sizeof(*domain), GFP_KERNEL);
152 if (domain) {
70722309
DO
153 domain->geometry.aperture_start = gart_handle->iovmm_base;
154 domain->geometry.aperture_end = gart_handle->iovmm_end - 1;
e7e23670
DO
155 domain->geometry.force_aperture = true;
156 }
836a8ac9 157
e7e23670 158 return domain;
d53e54b4
HD
159}
160
b5cbb386 161static void gart_iommu_domain_free(struct iommu_domain *domain)
d53e54b4 162{
e7e23670
DO
163 WARN_ON(gart_handle->active_domain == domain);
164 kfree(domain);
d53e54b4
HD
165}
166
70722309
DO
167static inline int __gart_iommu_map(struct gart_device *gart, unsigned long iova,
168 unsigned long pa)
169{
170 if (unlikely(gart_debug && gart_pte_valid(gart, iova))) {
171 dev_err(gart->dev, "Page entry is in-use\n");
172 return -EINVAL;
173 }
174
175 gart_set_pte(gart, iova, GART_ENTRY_PHYS_ADDR_VALID | pa);
176
177 return 0;
178}
179
d53e54b4 180static int gart_iommu_map(struct iommu_domain *domain, unsigned long iova,
781ca2de 181 phys_addr_t pa, size_t bytes, int prot, gfp_t gfp)
d53e54b4 182{
e7e23670 183 struct gart_device *gart = gart_handle;
70722309 184 int ret;
d53e54b4 185
70722309 186 if (gart_iova_range_invalid(gart, iova, bytes))
d53e54b4
HD
187 return -EINVAL;
188
70722309
DO
189 spin_lock(&gart->pte_lock);
190 ret = __gart_iommu_map(gart, iova, (unsigned long)pa);
191 spin_unlock(&gart->pte_lock);
192
193 return ret;
194}
195
196static inline int __gart_iommu_unmap(struct gart_device *gart,
197 unsigned long iova)
198{
199 if (unlikely(gart_debug && !gart_pte_valid(gart, iova))) {
200 dev_err(gart->dev, "Page entry is invalid\n");
d53e54b4
HD
201 return -EINVAL;
202 }
70722309
DO
203
204 gart_set_pte(gart, iova, 0);
205
d53e54b4
HD
206 return 0;
207}
208
209static size_t gart_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
56f8af5e 210 size_t bytes, struct iommu_iotlb_gather *gather)
d53e54b4 211{
e7e23670 212 struct gart_device *gart = gart_handle;
70722309 213 int err;
d53e54b4 214
70722309 215 if (gart_iova_range_invalid(gart, iova, bytes))
d53e54b4
HD
216 return 0;
217
70722309
DO
218 spin_lock(&gart->pte_lock);
219 err = __gart_iommu_unmap(gart, iova);
220 spin_unlock(&gart->pte_lock);
221
222 return err ? 0 : bytes;
d53e54b4
HD
223}
224
225static phys_addr_t gart_iommu_iova_to_phys(struct iommu_domain *domain,
bb5547ac 226 dma_addr_t iova)
d53e54b4 227{
e7e23670 228 struct gart_device *gart = gart_handle;
d53e54b4 229 unsigned long pte;
d53e54b4 230
70722309 231 if (gart_iova_range_invalid(gart, iova, GART_PAGE_SIZE))
d53e54b4
HD
232 return -EINVAL;
233
70722309 234 spin_lock(&gart->pte_lock);
d53e54b4 235 pte = gart_read_pte(gart, iova);
70722309 236 spin_unlock(&gart->pte_lock);
d53e54b4 237
70722309 238 return pte & GART_PAGE_MASK;
d53e54b4
HD
239}
240
7c2aa644 241static bool gart_iommu_capable(enum iommu_cap cap)
d53e54b4 242{
7c2aa644 243 return false;
d53e54b4
HD
244}
245
15f9a310
RM
246static int gart_iommu_add_device(struct device *dev)
247{
4b6f0ea3 248 struct iommu_group *group;
15f9a310 249
4b6f0ea3
DO
250 if (!dev->iommu_fwspec)
251 return -ENODEV;
252
253 group = iommu_group_get_for_dev(dev);
15f9a310
RM
254 if (IS_ERR(group))
255 return PTR_ERR(group);
256
257 iommu_group_put(group);
c184ae83
JR
258
259 iommu_device_link(&gart_handle->iommu, dev);
260
15f9a310
RM
261 return 0;
262}
263
264static void gart_iommu_remove_device(struct device *dev)
265{
266 iommu_group_remove_device(dev);
c184ae83 267 iommu_device_unlink(&gart_handle->iommu, dev);
15f9a310
RM
268}
269
4b6f0ea3
DO
270static int gart_iommu_of_xlate(struct device *dev,
271 struct of_phandle_args *args)
272{
273 return 0;
274}
275
56f8af5e 276static void gart_iommu_sync_map(struct iommu_domain *domain)
2fc0ac18 277{
70722309 278 FLUSH_GART_REGS(gart_handle);
2fc0ac18
DO
279}
280
56f8af5e
WD
281static void gart_iommu_sync(struct iommu_domain *domain,
282 struct iommu_iotlb_gather *gather)
283{
284 gart_iommu_sync_map(domain);
285}
286
b22f6434 287static const struct iommu_ops gart_iommu_ops = {
7c2aa644 288 .capable = gart_iommu_capable,
b5cbb386
JR
289 .domain_alloc = gart_iommu_domain_alloc,
290 .domain_free = gart_iommu_domain_free,
d53e54b4
HD
291 .attach_dev = gart_iommu_attach_dev,
292 .detach_dev = gart_iommu_detach_dev,
15f9a310
RM
293 .add_device = gart_iommu_add_device,
294 .remove_device = gart_iommu_remove_device,
295 .device_group = generic_device_group,
d53e54b4
HD
296 .map = gart_iommu_map,
297 .unmap = gart_iommu_unmap,
298 .iova_to_phys = gart_iommu_iova_to_phys,
d53e54b4 299 .pgsize_bitmap = GART_IOMMU_PGSIZES,
4b6f0ea3 300 .of_xlate = gart_iommu_of_xlate,
56f8af5e 301 .iotlb_sync_map = gart_iommu_sync_map,
2fc0ac18 302 .iotlb_sync = gart_iommu_sync,
d53e54b4
HD
303};
304
ce2785a7 305int tegra_gart_suspend(struct gart_device *gart)
d53e54b4 306{
d53e54b4 307 u32 *data = gart->savedata;
70722309
DO
308 unsigned long iova;
309
310 /*
311 * All GART users shall be suspended at this point. Disable
312 * address translation to trap all GART accesses as invalid
313 * memory accesses.
314 */
315 writel_relaxed(0, gart->regs + GART_CONFIG);
316 FLUSH_GART_REGS(gart);
d53e54b4 317
d53e54b4
HD
318 for_each_gart_pte(gart, iova)
319 *(data++) = gart_read_pte(gart, iova);
70722309 320
d53e54b4
HD
321 return 0;
322}
323
ce2785a7 324int tegra_gart_resume(struct gart_device *gart)
d53e54b4 325{
d53e54b4 326 do_gart_setup(gart, gart->savedata);
70722309 327
d53e54b4
HD
328 return 0;
329}
330
ce2785a7 331struct gart_device *tegra_gart_probe(struct device *dev, struct tegra_mc *mc)
d53e54b4
HD
332{
333 struct gart_device *gart;
70722309
DO
334 struct resource *res;
335 int err;
d53e54b4 336
d53e54b4
HD
337 BUILD_BUG_ON(PAGE_SHIFT != GART_PAGE_SHIFT);
338
339 /* the GART memory aperture is required */
70722309
DO
340 res = platform_get_resource(to_platform_device(dev), IORESOURCE_MEM, 1);
341 if (!res) {
342 dev_err(dev, "Memory aperture resource unavailable\n");
ce2785a7 343 return ERR_PTR(-ENXIO);
d53e54b4
HD
344 }
345
167d67d5 346 gart = kzalloc(sizeof(*gart), GFP_KERNEL);
70722309 347 if (!gart)
ce2785a7 348 return ERR_PTR(-ENOMEM);
d53e54b4 349
70722309
DO
350 gart_handle = gart;
351
352 gart->dev = dev;
353 gart->regs = mc->regs + GART_REG_BASE;
354 gart->iovmm_base = res->start;
355 gart->iovmm_end = res->end + 1;
356 spin_lock_init(&gart->pte_lock);
357 spin_lock_init(&gart->dom_lock);
358
359 do_gart_setup(gart, NULL);
360
361 err = iommu_device_sysfs_add(&gart->iommu, dev, NULL, "gart");
362 if (err)
167d67d5 363 goto free_gart;
c184ae83
JR
364
365 iommu_device_set_ops(&gart->iommu, &gart_iommu_ops);
4b6f0ea3 366 iommu_device_set_fwnode(&gart->iommu, dev->fwnode);
c184ae83 367
70722309
DO
368 err = iommu_device_register(&gart->iommu);
369 if (err)
ae95c46d 370 goto remove_sysfs;
c184ae83 371
70722309
DO
372 gart->savedata = vmalloc(resource_size(res) / GART_PAGE_SIZE *
373 sizeof(u32));
d53e54b4 374 if (!gart->savedata) {
70722309 375 err = -ENOMEM;
ae95c46d 376 goto unregister_iommu;
d53e54b4
HD
377 }
378
ce2785a7 379 return gart;
ae95c46d
DO
380
381unregister_iommu:
382 iommu_device_unregister(&gart->iommu);
383remove_sysfs:
384 iommu_device_sysfs_remove(&gart->iommu);
167d67d5
DO
385free_gart:
386 kfree(gart);
ae95c46d 387
70722309 388 return ERR_PTR(err);
d53e54b4 389}
d53e54b4 390
39fcbbcc 391module_param(gart_debug, bool, 0644);
40c9b882 392MODULE_PARM_DESC(gart_debug, "Enable GART debugging");