]>
Commit | Line | Data |
---|---|---|
d94d71cb | 1 | // SPDX-License-Identifier: GPL-2.0-only |
695093e3 | 2 | /* |
695093e3 VS |
3 | * |
4 | * Copyright (C) 2013 Freescale Semiconductor, Inc. | |
5 | * Author: Varun Sethi <varun.sethi@freescale.com> | |
695093e3 VS |
6 | */ |
7 | ||
8 | #define pr_fmt(fmt) "fsl-pamu-domain: %s: " fmt, __func__ | |
9 | ||
695093e3 | 10 | #include "fsl_pamu_domain.h" |
695093e3 | 11 | |
cd70d465 EM |
12 | #include <sysdev/fsl_pci.h> |
13 | ||
695093e3 VS |
14 | /* |
15 | * Global spinlock that needs to be held while | |
16 | * configuring PAMU. | |
17 | */ | |
18 | static DEFINE_SPINLOCK(iommu_lock); | |
19 | ||
20 | static struct kmem_cache *fsl_pamu_domain_cache; | |
21 | static struct kmem_cache *iommu_devinfo_cache; | |
22 | static DEFINE_SPINLOCK(device_domain_lock); | |
23 | ||
3ff2dcc0 JR |
24 | struct iommu_device pamu_iommu; /* IOMMU core code handle */ |
25 | ||
8d4bfe40 JR |
26 | static struct fsl_dma_domain *to_fsl_dma_domain(struct iommu_domain *dom) |
27 | { | |
28 | return container_of(dom, struct fsl_dma_domain, iommu_domain); | |
29 | } | |
30 | ||
695093e3 VS |
31 | static int __init iommu_init_mempool(void) |
32 | { | |
695093e3 | 33 | fsl_pamu_domain_cache = kmem_cache_create("fsl_pamu_domain", |
cd70d465 EM |
34 | sizeof(struct fsl_dma_domain), |
35 | 0, | |
36 | SLAB_HWCACHE_ALIGN, | |
37 | NULL); | |
695093e3 VS |
38 | if (!fsl_pamu_domain_cache) { |
39 | pr_debug("Couldn't create fsl iommu_domain cache\n"); | |
40 | return -ENOMEM; | |
41 | } | |
42 | ||
43 | iommu_devinfo_cache = kmem_cache_create("iommu_devinfo", | |
cd70d465 EM |
44 | sizeof(struct device_domain_info), |
45 | 0, | |
46 | SLAB_HWCACHE_ALIGN, | |
47 | NULL); | |
695093e3 VS |
48 | if (!iommu_devinfo_cache) { |
49 | pr_debug("Couldn't create devinfo cache\n"); | |
50 | kmem_cache_destroy(fsl_pamu_domain_cache); | |
51 | return -ENOMEM; | |
52 | } | |
53 | ||
54 | return 0; | |
55 | } | |
56 | ||
695093e3 | 57 | static int update_liodn_stash(int liodn, struct fsl_dma_domain *dma_domain, |
cd70d465 | 58 | u32 val) |
695093e3 | 59 | { |
84b6269c | 60 | int ret = 0; |
695093e3 VS |
61 | unsigned long flags; |
62 | ||
63 | spin_lock_irqsave(&iommu_lock, flags); | |
ba58d121 CH |
64 | ret = pamu_update_paace_stash(liodn, val); |
65 | if (ret) { | |
84b6269c | 66 | pr_debug("Failed to update SPAACE for liodn %d\n ", liodn); |
695093e3 | 67 | spin_unlock_irqrestore(&iommu_lock, flags); |
ba58d121 | 68 | return ret; |
695093e3 VS |
69 | } |
70 | ||
71 | spin_unlock_irqrestore(&iommu_lock, flags); | |
72 | ||
73 | return ret; | |
74 | } | |
75 | ||
76 | /* Set the geometry parameters for a LIODN */ | |
dae7747a CH |
77 | static int pamu_set_liodn(struct fsl_dma_domain *dma_domain, struct device *dev, |
78 | int liodn) | |
695093e3 | 79 | { |
695093e3 VS |
80 | u32 omi_index = ~(u32)0; |
81 | unsigned long flags; | |
ba58d121 | 82 | int ret; |
695093e3 VS |
83 | |
84 | /* | |
85 | * Configure the omi_index at the geometry setup time. | |
86 | * This is a static value which depends on the type of | |
87 | * device and would not change thereafter. | |
88 | */ | |
89 | get_ome_index(&omi_index, dev); | |
90 | ||
695093e3 VS |
91 | spin_lock_irqsave(&iommu_lock, flags); |
92 | ret = pamu_disable_liodn(liodn); | |
dae7747a CH |
93 | if (ret) |
94 | goto out_unlock; | |
57fa44be | 95 | ret = pamu_config_ppaace(liodn, omi_index, dma_domain->stash_id, 0); |
dae7747a CH |
96 | if (ret) |
97 | goto out_unlock; | |
57fa44be | 98 | ret = pamu_config_ppaace(liodn, ~(u32)0, dma_domain->stash_id, |
dae7747a CH |
99 | PAACE_AP_PERMS_QUERY | PAACE_AP_PERMS_UPDATE); |
100 | out_unlock: | |
695093e3 VS |
101 | spin_unlock_irqrestore(&iommu_lock, flags); |
102 | if (ret) { | |
ba58d121 CH |
103 | pr_debug("PAACE configuration failed for liodn %d\n", |
104 | liodn); | |
695093e3 | 105 | } |
695093e3 VS |
106 | return ret; |
107 | } | |
108 | ||
ba58d121 | 109 | static void remove_device_ref(struct device_domain_info *info) |
695093e3 VS |
110 | { |
111 | unsigned long flags; | |
112 | ||
113 | list_del(&info->link); | |
114 | spin_lock_irqsave(&iommu_lock, flags); | |
695093e3 VS |
115 | pamu_disable_liodn(info->liodn); |
116 | spin_unlock_irqrestore(&iommu_lock, flags); | |
117 | spin_lock_irqsave(&device_domain_lock, flags); | |
2263d818 | 118 | dev_iommu_priv_set(info->dev, NULL); |
695093e3 VS |
119 | kmem_cache_free(iommu_devinfo_cache, info); |
120 | spin_unlock_irqrestore(&device_domain_lock, flags); | |
121 | } | |
122 | ||
123 | static void detach_device(struct device *dev, struct fsl_dma_domain *dma_domain) | |
124 | { | |
125 | struct device_domain_info *info, *tmp; | |
126 | unsigned long flags; | |
127 | ||
128 | spin_lock_irqsave(&dma_domain->domain_lock, flags); | |
129 | /* Remove the device from the domain device list */ | |
130 | list_for_each_entry_safe(info, tmp, &dma_domain->devices, link) { | |
131 | if (!dev || (info->dev == dev)) | |
ba58d121 | 132 | remove_device_ref(info); |
695093e3 VS |
133 | } |
134 | spin_unlock_irqrestore(&dma_domain->domain_lock, flags); | |
135 | } | |
136 | ||
137 | static void attach_device(struct fsl_dma_domain *dma_domain, int liodn, struct device *dev) | |
138 | { | |
139 | struct device_domain_info *info, *old_domain_info; | |
140 | unsigned long flags; | |
141 | ||
142 | spin_lock_irqsave(&device_domain_lock, flags); | |
143 | /* | |
144 | * Check here if the device is already attached to domain or not. | |
145 | * If the device is already attached to a domain detach it. | |
146 | */ | |
2263d818 | 147 | old_domain_info = dev_iommu_priv_get(dev); |
695093e3 VS |
148 | if (old_domain_info && old_domain_info->domain != dma_domain) { |
149 | spin_unlock_irqrestore(&device_domain_lock, flags); | |
150 | detach_device(dev, old_domain_info->domain); | |
151 | spin_lock_irqsave(&device_domain_lock, flags); | |
152 | } | |
153 | ||
154 | info = kmem_cache_zalloc(iommu_devinfo_cache, GFP_ATOMIC); | |
155 | ||
156 | info->dev = dev; | |
157 | info->liodn = liodn; | |
158 | info->domain = dma_domain; | |
159 | ||
160 | list_add(&info->link, &dma_domain->devices); | |
161 | /* | |
162 | * In case of devices with multiple LIODNs just store | |
163 | * the info for the first LIODN as all | |
164 | * LIODNs share the same domain | |
165 | */ | |
2263d818 JR |
166 | if (!dev_iommu_priv_get(dev)) |
167 | dev_iommu_priv_set(dev, info); | |
695093e3 | 168 | spin_unlock_irqrestore(&device_domain_lock, flags); |
695093e3 VS |
169 | } |
170 | ||
171 | static phys_addr_t fsl_pamu_iova_to_phys(struct iommu_domain *domain, | |
cd70d465 | 172 | dma_addr_t iova) |
695093e3 | 173 | { |
cd70d465 EM |
174 | if (iova < domain->geometry.aperture_start || |
175 | iova > domain->geometry.aperture_end) | |
695093e3 | 176 | return 0; |
376dfd2a | 177 | return iova; |
695093e3 VS |
178 | } |
179 | ||
b7eb6785 | 180 | static bool fsl_pamu_capable(enum iommu_cap cap) |
695093e3 VS |
181 | { |
182 | return cap == IOMMU_CAP_CACHE_COHERENCY; | |
183 | } | |
184 | ||
8d4bfe40 | 185 | static void fsl_pamu_domain_free(struct iommu_domain *domain) |
695093e3 | 186 | { |
8d4bfe40 | 187 | struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain); |
695093e3 VS |
188 | |
189 | /* remove all the devices from the device list */ | |
190 | detach_device(NULL, dma_domain); | |
695093e3 VS |
191 | kmem_cache_free(fsl_pamu_domain_cache, dma_domain); |
192 | } | |
193 | ||
8d4bfe40 | 194 | static struct iommu_domain *fsl_pamu_domain_alloc(unsigned type) |
695093e3 VS |
195 | { |
196 | struct fsl_dma_domain *dma_domain; | |
197 | ||
8d4bfe40 JR |
198 | if (type != IOMMU_DOMAIN_UNMANAGED) |
199 | return NULL; | |
200 | ||
c8224508 CH |
201 | dma_domain = kmem_cache_zalloc(fsl_pamu_domain_cache, GFP_KERNEL); |
202 | if (!dma_domain) | |
8d4bfe40 | 203 | return NULL; |
c8224508 CH |
204 | |
205 | dma_domain->stash_id = ~(u32)0; | |
c8224508 CH |
206 | INIT_LIST_HEAD(&dma_domain->devices); |
207 | spin_lock_init(&dma_domain->domain_lock); | |
208 | ||
209 | /* default geometry 64 GB i.e. maximum system address */ | |
8d4bfe40 JR |
210 | dma_domain->iommu_domain. geometry.aperture_start = 0; |
211 | dma_domain->iommu_domain.geometry.aperture_end = (1ULL << 36) - 1; | |
212 | dma_domain->iommu_domain.geometry.force_aperture = true; | |
695093e3 | 213 | |
8d4bfe40 | 214 | return &dma_domain->iommu_domain; |
695093e3 VS |
215 | } |
216 | ||
695093e3 VS |
217 | /* Update stash destination for all LIODNs associated with the domain */ |
218 | static int update_domain_stash(struct fsl_dma_domain *dma_domain, u32 val) | |
219 | { | |
220 | struct device_domain_info *info; | |
221 | int ret = 0; | |
222 | ||
223 | list_for_each_entry(info, &dma_domain->devices, link) { | |
224 | ret = update_liodn_stash(info->liodn, dma_domain, val); | |
225 | if (ret) | |
226 | break; | |
227 | } | |
228 | ||
229 | return ret; | |
230 | } | |
231 | ||
695093e3 VS |
232 | static int fsl_pamu_attach_device(struct iommu_domain *domain, |
233 | struct device *dev) | |
234 | { | |
8d4bfe40 | 235 | struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain); |
85e362ca CH |
236 | unsigned long flags; |
237 | int len, ret = 0, i; | |
695093e3 | 238 | const u32 *liodn; |
695093e3 VS |
239 | struct pci_dev *pdev = NULL; |
240 | struct pci_controller *pci_ctl; | |
241 | ||
242 | /* | |
243 | * Use LIODN of the PCI controller while attaching a | |
244 | * PCI device. | |
245 | */ | |
b3eb76d1 | 246 | if (dev_is_pci(dev)) { |
695093e3 VS |
247 | pdev = to_pci_dev(dev); |
248 | pci_ctl = pci_bus_to_host(pdev->bus); | |
249 | /* | |
250 | * make dev point to pci controller device | |
251 | * so we can get the LIODN programmed by | |
252 | * u-boot. | |
253 | */ | |
254 | dev = pci_ctl->parent; | |
255 | } | |
256 | ||
257 | liodn = of_get_property(dev->of_node, "fsl,liodn", &len); | |
85e362ca | 258 | if (!liodn) { |
6bd4f1c7 | 259 | pr_debug("missing fsl,liodn property at %pOF\n", dev->of_node); |
85e362ca | 260 | return -EINVAL; |
695093e3 VS |
261 | } |
262 | ||
85e362ca CH |
263 | spin_lock_irqsave(&dma_domain->domain_lock, flags); |
264 | for (i = 0; i < len / sizeof(u32); i++) { | |
265 | /* Ensure that LIODN value is valid */ | |
266 | if (liodn[i] >= PAACE_NUMBER_ENTRIES) { | |
267 | pr_debug("Invalid liodn %d, attach device failed for %pOF\n", | |
268 | liodn[i], dev->of_node); | |
269 | ret = -EINVAL; | |
270 | break; | |
271 | } | |
272 | ||
273 | attach_device(dma_domain, liodn[i], dev); | |
274 | ret = pamu_set_liodn(dma_domain, dev, liodn[i]); | |
275 | if (ret) | |
276 | break; | |
7d61cb6f CH |
277 | ret = pamu_enable_liodn(liodn[i]); |
278 | if (ret) | |
279 | break; | |
85e362ca CH |
280 | } |
281 | spin_unlock_irqrestore(&dma_domain->domain_lock, flags); | |
695093e3 VS |
282 | return ret; |
283 | } | |
284 | ||
285 | static void fsl_pamu_detach_device(struct iommu_domain *domain, | |
cd70d465 | 286 | struct device *dev) |
695093e3 | 287 | { |
8d4bfe40 | 288 | struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain); |
695093e3 VS |
289 | const u32 *prop; |
290 | int len; | |
291 | struct pci_dev *pdev = NULL; | |
292 | struct pci_controller *pci_ctl; | |
293 | ||
294 | /* | |
295 | * Use LIODN of the PCI controller while detaching a | |
296 | * PCI device. | |
297 | */ | |
b3eb76d1 | 298 | if (dev_is_pci(dev)) { |
695093e3 VS |
299 | pdev = to_pci_dev(dev); |
300 | pci_ctl = pci_bus_to_host(pdev->bus); | |
301 | /* | |
302 | * make dev point to pci controller device | |
303 | * so we can get the LIODN programmed by | |
304 | * u-boot. | |
305 | */ | |
306 | dev = pci_ctl->parent; | |
307 | } | |
308 | ||
309 | prop = of_get_property(dev->of_node, "fsl,liodn", &len); | |
310 | if (prop) | |
311 | detach_device(dev, dma_domain); | |
312 | else | |
6bd4f1c7 | 313 | pr_debug("missing fsl,liodn property at %pOF\n", dev->of_node); |
695093e3 VS |
314 | } |
315 | ||
695093e3 | 316 | /* Set the domain stash attribute */ |
4eeb96f6 | 317 | int fsl_pamu_configure_l1_stash(struct iommu_domain *domain, u32 cpu) |
695093e3 | 318 | { |
4eeb96f6 | 319 | struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain); |
695093e3 VS |
320 | unsigned long flags; |
321 | int ret; | |
322 | ||
323 | spin_lock_irqsave(&dma_domain->domain_lock, flags); | |
4eeb96f6 | 324 | dma_domain->stash_id = get_stash_id(PAMU_ATTR_CACHE_L1, cpu); |
695093e3 VS |
325 | if (dma_domain->stash_id == ~(u32)0) { |
326 | pr_debug("Invalid stash attributes\n"); | |
327 | spin_unlock_irqrestore(&dma_domain->domain_lock, flags); | |
328 | return -EINVAL; | |
329 | } | |
695093e3 | 330 | ret = update_domain_stash(dma_domain, dma_domain->stash_id); |
695093e3 VS |
331 | spin_unlock_irqrestore(&dma_domain->domain_lock, flags); |
332 | ||
333 | return ret; | |
334 | } | |
335 | ||
695093e3 VS |
336 | static struct iommu_group *get_device_iommu_group(struct device *dev) |
337 | { | |
338 | struct iommu_group *group; | |
339 | ||
340 | group = iommu_group_get(dev); | |
341 | if (!group) | |
342 | group = iommu_group_alloc(); | |
343 | ||
344 | return group; | |
345 | } | |
346 | ||
347 | static bool check_pci_ctl_endpt_part(struct pci_controller *pci_ctl) | |
348 | { | |
349 | u32 version; | |
350 | ||
351 | /* Check the PCI controller version number by readding BRR1 register */ | |
352 | version = in_be32(pci_ctl->cfg_addr + (PCI_FSL_BRR1 >> 2)); | |
353 | version &= PCI_FSL_BRR1_VER; | |
cd70d465 EM |
354 | /* If PCI controller version is >= 0x204 we can partition endpoints */ |
355 | return version >= 0x204; | |
695093e3 VS |
356 | } |
357 | ||
358 | /* Get iommu group information from peer devices or devices on the parent bus */ | |
359 | static struct iommu_group *get_shared_pci_device_group(struct pci_dev *pdev) | |
360 | { | |
361 | struct pci_dev *tmp; | |
362 | struct iommu_group *group; | |
363 | struct pci_bus *bus = pdev->bus; | |
364 | ||
9ed43662 | 365 | /* |
695093e3 VS |
366 | * Traverese the pci bus device list to get |
367 | * the shared iommu group. | |
368 | */ | |
369 | while (bus) { | |
370 | list_for_each_entry(tmp, &bus->devices, bus_list) { | |
371 | if (tmp == pdev) | |
372 | continue; | |
373 | group = iommu_group_get(&tmp->dev); | |
374 | if (group) | |
375 | return group; | |
376 | } | |
377 | ||
378 | bus = bus->parent; | |
379 | } | |
380 | ||
381 | return NULL; | |
382 | } | |
383 | ||
384 | static struct iommu_group *get_pci_device_group(struct pci_dev *pdev) | |
385 | { | |
386 | struct pci_controller *pci_ctl; | |
bc46c229 | 387 | bool pci_endpt_partitioning; |
695093e3 | 388 | struct iommu_group *group = NULL; |
695093e3 VS |
389 | |
390 | pci_ctl = pci_bus_to_host(pdev->bus); | |
bc46c229 | 391 | pci_endpt_partitioning = check_pci_ctl_endpt_part(pci_ctl); |
695093e3 | 392 | /* We can partition PCIe devices so assign device group to the device */ |
bc46c229 | 393 | if (pci_endpt_partitioning) { |
d5e58297 | 394 | group = pci_device_group(&pdev->dev); |
695093e3 | 395 | |
695093e3 VS |
396 | /* |
397 | * PCIe controller is not a paritionable entity | |
398 | * free the controller device iommu_group. | |
399 | */ | |
400 | if (pci_ctl->parent->iommu_group) | |
401 | iommu_group_remove_device(pci_ctl->parent); | |
402 | } else { | |
403 | /* | |
404 | * All devices connected to the controller will share the | |
405 | * PCI controllers device group. If this is the first | |
406 | * device to be probed for the pci controller, copy the | |
407 | * device group information from the PCI controller device | |
408 | * node and remove the PCI controller iommu group. | |
409 | * For subsequent devices, the iommu group information can | |
410 | * be obtained from sibling devices (i.e. from the bus_devices | |
411 | * link list). | |
412 | */ | |
413 | if (pci_ctl->parent->iommu_group) { | |
414 | group = get_device_iommu_group(pci_ctl->parent); | |
415 | iommu_group_remove_device(pci_ctl->parent); | |
cd70d465 | 416 | } else { |
695093e3 | 417 | group = get_shared_pci_device_group(pdev); |
cd70d465 | 418 | } |
695093e3 VS |
419 | } |
420 | ||
3170447c VS |
421 | if (!group) |
422 | group = ERR_PTR(-ENODEV); | |
423 | ||
695093e3 VS |
424 | return group; |
425 | } | |
426 | ||
d5e58297 | 427 | static struct iommu_group *fsl_pamu_device_group(struct device *dev) |
695093e3 | 428 | { |
3170447c | 429 | struct iommu_group *group = ERR_PTR(-ENODEV); |
d5e58297 | 430 | int len; |
695093e3 VS |
431 | |
432 | /* | |
433 | * For platform devices we allocate a separate group for | |
434 | * each of the devices. | |
435 | */ | |
d5e58297 JR |
436 | if (dev_is_pci(dev)) |
437 | group = get_pci_device_group(to_pci_dev(dev)); | |
438 | else if (of_get_property(dev->of_node, "fsl,liodn", &len)) | |
439 | group = get_device_iommu_group(dev); | |
695093e3 | 440 | |
d5e58297 JR |
441 | return group; |
442 | } | |
695093e3 | 443 | |
52dd3ca4 | 444 | static struct iommu_device *fsl_pamu_probe_device(struct device *dev) |
d5e58297 | 445 | { |
52dd3ca4 | 446 | return &pamu_iommu; |
695093e3 VS |
447 | } |
448 | ||
52dd3ca4 | 449 | static void fsl_pamu_release_device(struct device *dev) |
695093e3 | 450 | { |
695093e3 VS |
451 | } |
452 | ||
b22f6434 | 453 | static const struct iommu_ops fsl_pamu_ops = { |
b7eb6785 | 454 | .capable = fsl_pamu_capable, |
8d4bfe40 JR |
455 | .domain_alloc = fsl_pamu_domain_alloc, |
456 | .domain_free = fsl_pamu_domain_free, | |
695093e3 VS |
457 | .attach_dev = fsl_pamu_attach_device, |
458 | .detach_dev = fsl_pamu_detach_device, | |
695093e3 | 459 | .iova_to_phys = fsl_pamu_iova_to_phys, |
52dd3ca4 JR |
460 | .probe_device = fsl_pamu_probe_device, |
461 | .release_device = fsl_pamu_release_device, | |
d5e58297 | 462 | .device_group = fsl_pamu_device_group, |
695093e3 VS |
463 | }; |
464 | ||
cd70d465 | 465 | int __init pamu_domain_init(void) |
695093e3 VS |
466 | { |
467 | int ret = 0; | |
468 | ||
469 | ret = iommu_init_mempool(); | |
470 | if (ret) | |
471 | return ret; | |
472 | ||
3ff2dcc0 JR |
473 | ret = iommu_device_sysfs_add(&pamu_iommu, NULL, NULL, "iommu0"); |
474 | if (ret) | |
475 | return ret; | |
476 | ||
2d471b20 | 477 | ret = iommu_device_register(&pamu_iommu, &fsl_pamu_ops, NULL); |
3ff2dcc0 JR |
478 | if (ret) { |
479 | iommu_device_sysfs_remove(&pamu_iommu); | |
480 | pr_err("Can't register iommu device\n"); | |
481 | return ret; | |
482 | } | |
483 | ||
695093e3 VS |
484 | bus_set_iommu(&platform_bus_type, &fsl_pamu_ops); |
485 | bus_set_iommu(&pci_bus_type, &fsl_pamu_ops); | |
486 | ||
487 | return ret; | |
488 | } |