]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/acpi/arm64/iort.c
iommu/arm-smmu: Add IORT configuration
[mirror_ubuntu-jammy-kernel.git] / drivers / acpi / arm64 / iort.c
CommitLineData
88ef16d8
TN
1/*
2 * Copyright (C) 2016, Semihalf
3 * Author: Tomasz Nowicki <tn@semihalf.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * This file implements early detection/parsing of I/O mapping
15 * reported to OS through firmware via I/O Remapping Table (IORT)
16 * IORT document number: ARM DEN 0049A
17 */
18
19#define pr_fmt(fmt) "ACPI: IORT: " fmt
20
21#include <linux/acpi_iort.h>
846f0e9e 22#include <linux/iommu.h>
88ef16d8 23#include <linux/kernel.h>
7936df92 24#include <linux/list.h>
88ef16d8 25#include <linux/pci.h>
846f0e9e 26#include <linux/platform_device.h>
7936df92 27#include <linux/slab.h>
88ef16d8 28
4bf2efd2
TN
29struct iort_its_msi_chip {
30 struct list_head list;
31 struct fwnode_handle *fw_node;
32 u32 translation_id;
33};
34
7936df92
LP
35struct iort_fwnode {
36 struct list_head list;
37 struct acpi_iort_node *iort_node;
38 struct fwnode_handle *fwnode;
39};
40static LIST_HEAD(iort_fwnode_list);
41static DEFINE_SPINLOCK(iort_fwnode_lock);
42
43/**
44 * iort_set_fwnode() - Create iort_fwnode and use it to register
45 * iommu data in the iort_fwnode_list
46 *
47 * @node: IORT table node associated with the IOMMU
48 * @fwnode: fwnode associated with the IORT node
49 *
50 * Returns: 0 on success
51 * <0 on failure
52 */
53static inline int iort_set_fwnode(struct acpi_iort_node *iort_node,
54 struct fwnode_handle *fwnode)
55{
56 struct iort_fwnode *np;
57
58 np = kzalloc(sizeof(struct iort_fwnode), GFP_ATOMIC);
59
60 if (WARN_ON(!np))
61 return -ENOMEM;
62
63 INIT_LIST_HEAD(&np->list);
64 np->iort_node = iort_node;
65 np->fwnode = fwnode;
66
67 spin_lock(&iort_fwnode_lock);
68 list_add_tail(&np->list, &iort_fwnode_list);
69 spin_unlock(&iort_fwnode_lock);
70
71 return 0;
72}
73
74/**
75 * iort_get_fwnode() - Retrieve fwnode associated with an IORT node
76 *
77 * @node: IORT table node to be looked-up
78 *
79 * Returns: fwnode_handle pointer on success, NULL on failure
80 */
81static inline
82struct fwnode_handle *iort_get_fwnode(struct acpi_iort_node *node)
83{
84 struct iort_fwnode *curr;
85 struct fwnode_handle *fwnode = NULL;
86
87 spin_lock(&iort_fwnode_lock);
88 list_for_each_entry(curr, &iort_fwnode_list, list) {
89 if (curr->iort_node == node) {
90 fwnode = curr->fwnode;
91 break;
92 }
93 }
94 spin_unlock(&iort_fwnode_lock);
95
96 return fwnode;
97}
98
99/**
100 * iort_delete_fwnode() - Delete fwnode associated with an IORT node
101 *
102 * @node: IORT table node associated with fwnode to delete
103 */
104static inline void iort_delete_fwnode(struct acpi_iort_node *node)
105{
106 struct iort_fwnode *curr, *tmp;
107
108 spin_lock(&iort_fwnode_lock);
109 list_for_each_entry_safe(curr, tmp, &iort_fwnode_list, list) {
110 if (curr->iort_node == node) {
111 list_del(&curr->list);
112 kfree(curr);
113 break;
114 }
115 }
116 spin_unlock(&iort_fwnode_lock);
117}
118
88ef16d8
TN
119typedef acpi_status (*iort_find_node_callback)
120 (struct acpi_iort_node *node, void *context);
121
122/* Root pointer to the mapped IORT table */
123static struct acpi_table_header *iort_table;
124
125static LIST_HEAD(iort_msi_chip_list);
126static DEFINE_SPINLOCK(iort_msi_chip_lock);
127
4bf2efd2
TN
128/**
129 * iort_register_domain_token() - register domain token and related ITS ID
130 * to the list from where we can get it back later on.
131 * @trans_id: ITS ID.
132 * @fw_node: Domain token.
133 *
134 * Returns: 0 on success, -ENOMEM if no memory when allocating list element
135 */
136int iort_register_domain_token(int trans_id, struct fwnode_handle *fw_node)
137{
138 struct iort_its_msi_chip *its_msi_chip;
139
140 its_msi_chip = kzalloc(sizeof(*its_msi_chip), GFP_KERNEL);
141 if (!its_msi_chip)
142 return -ENOMEM;
143
144 its_msi_chip->fw_node = fw_node;
145 its_msi_chip->translation_id = trans_id;
146
147 spin_lock(&iort_msi_chip_lock);
148 list_add(&its_msi_chip->list, &iort_msi_chip_list);
149 spin_unlock(&iort_msi_chip_lock);
150
151 return 0;
152}
153
154/**
155 * iort_deregister_domain_token() - Deregister domain token based on ITS ID
156 * @trans_id: ITS ID.
157 *
158 * Returns: none.
159 */
160void iort_deregister_domain_token(int trans_id)
161{
162 struct iort_its_msi_chip *its_msi_chip, *t;
163
164 spin_lock(&iort_msi_chip_lock);
165 list_for_each_entry_safe(its_msi_chip, t, &iort_msi_chip_list, list) {
166 if (its_msi_chip->translation_id == trans_id) {
167 list_del(&its_msi_chip->list);
168 kfree(its_msi_chip);
169 break;
170 }
171 }
172 spin_unlock(&iort_msi_chip_lock);
173}
174
175/**
176 * iort_find_domain_token() - Find domain token based on given ITS ID
177 * @trans_id: ITS ID.
178 *
179 * Returns: domain token when find on the list, NULL otherwise
180 */
181struct fwnode_handle *iort_find_domain_token(int trans_id)
182{
183 struct fwnode_handle *fw_node = NULL;
184 struct iort_its_msi_chip *its_msi_chip;
185
186 spin_lock(&iort_msi_chip_lock);
187 list_for_each_entry(its_msi_chip, &iort_msi_chip_list, list) {
188 if (its_msi_chip->translation_id == trans_id) {
189 fw_node = its_msi_chip->fw_node;
190 break;
191 }
192 }
193 spin_unlock(&iort_msi_chip_lock);
194
195 return fw_node;
196}
197
88ef16d8
TN
198static struct acpi_iort_node *iort_scan_node(enum acpi_iort_node_type type,
199 iort_find_node_callback callback,
200 void *context)
201{
202 struct acpi_iort_node *iort_node, *iort_end;
203 struct acpi_table_iort *iort;
204 int i;
205
206 if (!iort_table)
207 return NULL;
208
209 /* Get the first IORT node */
210 iort = (struct acpi_table_iort *)iort_table;
211 iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort,
212 iort->node_offset);
213 iort_end = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
214 iort_table->length);
215
216 for (i = 0; i < iort->node_count; i++) {
217 if (WARN_TAINT(iort_node >= iort_end, TAINT_FIRMWARE_WORKAROUND,
218 "IORT node pointer overflows, bad table!\n"))
219 return NULL;
220
221 if (iort_node->type == type &&
222 ACPI_SUCCESS(callback(iort_node, context)))
223 return iort_node;
224
225 iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort_node,
226 iort_node->length);
227 }
228
229 return NULL;
230}
231
bdca0c07
LP
232static acpi_status
233iort_match_type_callback(struct acpi_iort_node *node, void *context)
234{
235 return AE_OK;
236}
237
238bool iort_node_match(u8 type)
239{
240 struct acpi_iort_node *node;
241
242 node = iort_scan_node(type, iort_match_type_callback, NULL);
243
244 return node != NULL;
245}
246
88ef16d8
TN
247static acpi_status iort_match_node_callback(struct acpi_iort_node *node,
248 void *context)
249{
250 struct device *dev = context;
251 acpi_status status;
252
253 if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT) {
254 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
255 struct acpi_device *adev = to_acpi_device_node(dev->fwnode);
256 struct acpi_iort_named_component *ncomp;
257
258 if (!adev) {
259 status = AE_NOT_FOUND;
260 goto out;
261 }
262
263 status = acpi_get_name(adev->handle, ACPI_FULL_PATHNAME, &buf);
264 if (ACPI_FAILURE(status)) {
265 dev_warn(dev, "Can't get device full path name\n");
266 goto out;
267 }
268
269 ncomp = (struct acpi_iort_named_component *)node->node_data;
270 status = !strcmp(ncomp->device_name, buf.pointer) ?
271 AE_OK : AE_NOT_FOUND;
272 acpi_os_free(buf.pointer);
273 } else if (node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) {
274 struct acpi_iort_root_complex *pci_rc;
275 struct pci_bus *bus;
276
277 bus = to_pci_bus(dev);
278 pci_rc = (struct acpi_iort_root_complex *)node->node_data;
279
280 /*
281 * It is assumed that PCI segment numbers maps one-to-one
282 * with root complexes. Each segment number can represent only
283 * one root complex.
284 */
285 status = pci_rc->pci_segment_number == pci_domain_nr(bus) ?
286 AE_OK : AE_NOT_FOUND;
287 } else {
288 status = AE_NOT_FOUND;
289 }
290out:
291 return status;
292}
293
294static int iort_id_map(struct acpi_iort_id_mapping *map, u8 type, u32 rid_in,
295 u32 *rid_out)
296{
297 /* Single mapping does not care for input id */
298 if (map->flags & ACPI_IORT_ID_SINGLE_MAPPING) {
299 if (type == ACPI_IORT_NODE_NAMED_COMPONENT ||
300 type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) {
301 *rid_out = map->output_base;
302 return 0;
303 }
304
305 pr_warn(FW_BUG "[map %p] SINGLE MAPPING flag not allowed for node type %d, skipping ID map\n",
306 map, type);
307 return -ENXIO;
308 }
309
310 if (rid_in < map->input_base ||
311 (rid_in >= map->input_base + map->id_count))
312 return -ENXIO;
313
314 *rid_out = map->output_base + (rid_in - map->input_base);
315 return 0;
316}
317
318static struct acpi_iort_node *iort_node_map_rid(struct acpi_iort_node *node,
319 u32 rid_in, u32 *rid_out,
320 u8 type)
321{
322 u32 rid = rid_in;
323
324 /* Parse the ID mapping tree to find specified node type */
325 while (node) {
326 struct acpi_iort_id_mapping *map;
327 int i;
328
329 if (node->type == type) {
330 if (rid_out)
331 *rid_out = rid;
332 return node;
333 }
334
335 if (!node->mapping_offset || !node->mapping_count)
336 goto fail_map;
337
338 map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node,
339 node->mapping_offset);
340
341 /* Firmware bug! */
342 if (!map->output_reference) {
343 pr_err(FW_BUG "[node %p type %d] ID map has NULL parent reference\n",
344 node, node->type);
345 goto fail_map;
346 }
347
348 /* Do the RID translation */
349 for (i = 0; i < node->mapping_count; i++, map++) {
350 if (!iort_id_map(map, node->type, rid, &rid))
351 break;
352 }
353
354 if (i == node->mapping_count)
355 goto fail_map;
356
357 node = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
358 map->output_reference);
359 }
360
361fail_map:
362 /* Map input RID to output RID unchanged on mapping failure*/
363 if (rid_out)
364 *rid_out = rid_in;
365
366 return NULL;
367}
368
369static struct acpi_iort_node *iort_find_dev_node(struct device *dev)
370{
371 struct pci_bus *pbus;
372
373 if (!dev_is_pci(dev))
374 return iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT,
375 iort_match_node_callback, dev);
376
377 /* Find a PCI root bus */
378 pbus = to_pci_dev(dev)->bus;
379 while (!pci_is_root_bus(pbus))
380 pbus = pbus->parent;
381
382 return iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX,
383 iort_match_node_callback, &pbus->dev);
384}
385
4bf2efd2
TN
386/**
387 * iort_msi_map_rid() - Map a MSI requester ID for a device
388 * @dev: The device for which the mapping is to be done.
389 * @req_id: The device requester ID.
390 *
391 * Returns: mapped MSI RID on success, input requester ID otherwise
392 */
393u32 iort_msi_map_rid(struct device *dev, u32 req_id)
394{
395 struct acpi_iort_node *node;
396 u32 dev_id;
397
398 node = iort_find_dev_node(dev);
399 if (!node)
400 return req_id;
401
402 iort_node_map_rid(node, req_id, &dev_id, ACPI_IORT_NODE_ITS_GROUP);
403 return dev_id;
404}
405
406/**
407 * iort_dev_find_its_id() - Find the ITS identifier for a device
408 * @dev: The device.
409 * @idx: Index of the ITS identifier list.
410 * @its_id: ITS identifier.
411 *
412 * Returns: 0 on success, appropriate error value otherwise
413 */
414static int iort_dev_find_its_id(struct device *dev, u32 req_id,
415 unsigned int idx, int *its_id)
416{
417 struct acpi_iort_its_group *its;
418 struct acpi_iort_node *node;
419
420 node = iort_find_dev_node(dev);
421 if (!node)
422 return -ENXIO;
423
424 node = iort_node_map_rid(node, req_id, NULL, ACPI_IORT_NODE_ITS_GROUP);
425 if (!node)
426 return -ENXIO;
427
428 /* Move to ITS specific data */
429 its = (struct acpi_iort_its_group *)node->node_data;
430 if (idx > its->its_count) {
431 dev_err(dev, "requested ITS ID index [%d] is greater than available [%d]\n",
432 idx, its->its_count);
433 return -ENXIO;
434 }
435
436 *its_id = its->identifiers[idx];
437 return 0;
438}
439
440/**
441 * iort_get_device_domain() - Find MSI domain related to a device
442 * @dev: The device.
443 * @req_id: Requester ID for the device.
444 *
445 * Returns: the MSI domain for this device, NULL otherwise
446 */
447struct irq_domain *iort_get_device_domain(struct device *dev, u32 req_id)
448{
449 struct fwnode_handle *handle;
450 int its_id;
451
452 if (iort_dev_find_its_id(dev, req_id, 0, &its_id))
453 return NULL;
454
455 handle = iort_find_domain_token(its_id);
456 if (!handle)
457 return NULL;
458
459 return irq_find_matching_fwnode(handle, DOMAIN_BUS_PCI_MSI);
460}
461
e4dadfa8
LP
462static void __init acpi_iort_register_irq(int hwirq, const char *name,
463 int trigger,
464 struct resource *res)
465{
466 int irq = acpi_register_gsi(NULL, hwirq, trigger,
467 ACPI_ACTIVE_HIGH);
468
469 if (irq <= 0) {
470 pr_err("could not register gsi hwirq %d name [%s]\n", hwirq,
471 name);
472 return;
473 }
474
475 res->start = irq;
476 res->end = irq;
477 res->flags = IORESOURCE_IRQ;
478 res->name = name;
479}
480
481static int __init arm_smmu_v3_count_resources(struct acpi_iort_node *node)
482{
483 struct acpi_iort_smmu_v3 *smmu;
484 /* Always present mem resource */
485 int num_res = 1;
486
487 /* Retrieve SMMUv3 specific data */
488 smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
489
490 if (smmu->event_gsiv)
491 num_res++;
492
493 if (smmu->pri_gsiv)
494 num_res++;
495
496 if (smmu->gerr_gsiv)
497 num_res++;
498
499 if (smmu->sync_gsiv)
500 num_res++;
501
502 return num_res;
503}
504
505static void __init arm_smmu_v3_init_resources(struct resource *res,
506 struct acpi_iort_node *node)
507{
508 struct acpi_iort_smmu_v3 *smmu;
509 int num_res = 0;
510
511 /* Retrieve SMMUv3 specific data */
512 smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
513
514 res[num_res].start = smmu->base_address;
515 res[num_res].end = smmu->base_address + SZ_128K - 1;
516 res[num_res].flags = IORESOURCE_MEM;
517
518 num_res++;
519
520 if (smmu->event_gsiv)
521 acpi_iort_register_irq(smmu->event_gsiv, "eventq",
522 ACPI_EDGE_SENSITIVE,
523 &res[num_res++]);
524
525 if (smmu->pri_gsiv)
526 acpi_iort_register_irq(smmu->pri_gsiv, "priq",
527 ACPI_EDGE_SENSITIVE,
528 &res[num_res++]);
529
530 if (smmu->gerr_gsiv)
531 acpi_iort_register_irq(smmu->gerr_gsiv, "gerror",
532 ACPI_EDGE_SENSITIVE,
533 &res[num_res++]);
534
535 if (smmu->sync_gsiv)
536 acpi_iort_register_irq(smmu->sync_gsiv, "cmdq-sync",
537 ACPI_EDGE_SENSITIVE,
538 &res[num_res++]);
539}
540
541static bool __init arm_smmu_v3_is_coherent(struct acpi_iort_node *node)
542{
543 struct acpi_iort_smmu_v3 *smmu;
544
545 /* Retrieve SMMUv3 specific data */
546 smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
547
548 return smmu->flags & ACPI_IORT_SMMU_V3_COHACC_OVERRIDE;
549}
550
d6fcd3b1
LP
551static int __init arm_smmu_count_resources(struct acpi_iort_node *node)
552{
553 struct acpi_iort_smmu *smmu;
554
555 /* Retrieve SMMU specific data */
556 smmu = (struct acpi_iort_smmu *)node->node_data;
557
558 /*
559 * Only consider the global fault interrupt and ignore the
560 * configuration access interrupt.
561 *
562 * MMIO address and global fault interrupt resources are always
563 * present so add them to the context interrupt count as a static
564 * value.
565 */
566 return smmu->context_interrupt_count + 2;
567}
568
569static void __init arm_smmu_init_resources(struct resource *res,
570 struct acpi_iort_node *node)
571{
572 struct acpi_iort_smmu *smmu;
573 int i, hw_irq, trigger, num_res = 0;
574 u64 *ctx_irq, *glb_irq;
575
576 /* Retrieve SMMU specific data */
577 smmu = (struct acpi_iort_smmu *)node->node_data;
578
579 res[num_res].start = smmu->base_address;
580 res[num_res].end = smmu->base_address + smmu->span - 1;
581 res[num_res].flags = IORESOURCE_MEM;
582 num_res++;
583
584 glb_irq = ACPI_ADD_PTR(u64, node, smmu->global_interrupt_offset);
585 /* Global IRQs */
586 hw_irq = IORT_IRQ_MASK(glb_irq[0]);
587 trigger = IORT_IRQ_TRIGGER_MASK(glb_irq[0]);
588
589 acpi_iort_register_irq(hw_irq, "arm-smmu-global", trigger,
590 &res[num_res++]);
591
592 /* Context IRQs */
593 ctx_irq = ACPI_ADD_PTR(u64, node, smmu->context_interrupt_offset);
594 for (i = 0; i < smmu->context_interrupt_count; i++) {
595 hw_irq = IORT_IRQ_MASK(ctx_irq[i]);
596 trigger = IORT_IRQ_TRIGGER_MASK(ctx_irq[i]);
597
598 acpi_iort_register_irq(hw_irq, "arm-smmu-context", trigger,
599 &res[num_res++]);
600 }
601}
602
603static bool __init arm_smmu_is_coherent(struct acpi_iort_node *node)
604{
605 struct acpi_iort_smmu *smmu;
606
607 /* Retrieve SMMU specific data */
608 smmu = (struct acpi_iort_smmu *)node->node_data;
609
610 return smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK;
611}
612
846f0e9e
LP
613struct iort_iommu_config {
614 const char *name;
615 int (*iommu_init)(struct acpi_iort_node *node);
616 bool (*iommu_is_coherent)(struct acpi_iort_node *node);
617 int (*iommu_count_resources)(struct acpi_iort_node *node);
618 void (*iommu_init_resources)(struct resource *res,
619 struct acpi_iort_node *node);
620};
621
e4dadfa8
LP
622static const struct iort_iommu_config iort_arm_smmu_v3_cfg __initconst = {
623 .name = "arm-smmu-v3",
624 .iommu_is_coherent = arm_smmu_v3_is_coherent,
625 .iommu_count_resources = arm_smmu_v3_count_resources,
626 .iommu_init_resources = arm_smmu_v3_init_resources
627};
628
d6fcd3b1
LP
629static const struct iort_iommu_config iort_arm_smmu_cfg __initconst = {
630 .name = "arm-smmu",
631 .iommu_is_coherent = arm_smmu_is_coherent,
632 .iommu_count_resources = arm_smmu_count_resources,
633 .iommu_init_resources = arm_smmu_init_resources
634};
635
846f0e9e
LP
636static __init
637const struct iort_iommu_config *iort_get_iommu_cfg(struct acpi_iort_node *node)
638{
e4dadfa8
LP
639 switch (node->type) {
640 case ACPI_IORT_NODE_SMMU_V3:
641 return &iort_arm_smmu_v3_cfg;
d6fcd3b1
LP
642 case ACPI_IORT_NODE_SMMU:
643 return &iort_arm_smmu_cfg;
e4dadfa8
LP
644 default:
645 return NULL;
646 }
846f0e9e
LP
647}
648
649/**
650 * iort_add_smmu_platform_device() - Allocate a platform device for SMMU
651 * @node: Pointer to SMMU ACPI IORT node
652 *
653 * Returns: 0 on success, <0 failure
654 */
655static int __init iort_add_smmu_platform_device(struct acpi_iort_node *node)
656{
657 struct fwnode_handle *fwnode;
658 struct platform_device *pdev;
659 struct resource *r;
660 enum dev_dma_attr attr;
661 int ret, count;
662 const struct iort_iommu_config *ops = iort_get_iommu_cfg(node);
663
664 if (!ops)
665 return -ENODEV;
666
667 pdev = platform_device_alloc(ops->name, PLATFORM_DEVID_AUTO);
668 if (!pdev)
669 return PTR_ERR(pdev);
670
671 count = ops->iommu_count_resources(node);
672
673 r = kcalloc(count, sizeof(*r), GFP_KERNEL);
674 if (!r) {
675 ret = -ENOMEM;
676 goto dev_put;
677 }
678
679 ops->iommu_init_resources(r, node);
680
681 ret = platform_device_add_resources(pdev, r, count);
682 /*
683 * Resources are duplicated in platform_device_add_resources,
684 * free their allocated memory
685 */
686 kfree(r);
687
688 if (ret)
689 goto dev_put;
690
691 /*
692 * Add a copy of IORT node pointer to platform_data to
693 * be used to retrieve IORT data information.
694 */
695 ret = platform_device_add_data(pdev, &node, sizeof(node));
696 if (ret)
697 goto dev_put;
698
699 /*
700 * We expect the dma masks to be equivalent for
701 * all SMMUs set-ups
702 */
703 pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
704
705 fwnode = iort_get_fwnode(node);
706
707 if (!fwnode) {
708 ret = -ENODEV;
709 goto dev_put;
710 }
711
712 pdev->dev.fwnode = fwnode;
713
714 attr = ops->iommu_is_coherent(node) ?
715 DEV_DMA_COHERENT : DEV_DMA_NON_COHERENT;
716
717 /* Configure DMA for the page table walker */
718 acpi_dma_configure(&pdev->dev, attr);
719
720 ret = platform_device_add(pdev);
721 if (ret)
722 goto dma_deconfigure;
723
724 return 0;
725
726dma_deconfigure:
727 acpi_dma_deconfigure(&pdev->dev);
728dev_put:
729 platform_device_put(pdev);
730
731 return ret;
732}
733
734static void __init iort_init_platform_devices(void)
735{
736 struct acpi_iort_node *iort_node, *iort_end;
737 struct acpi_table_iort *iort;
738 struct fwnode_handle *fwnode;
739 int i, ret;
740
741 /*
742 * iort_table and iort both point to the start of IORT table, but
743 * have different struct types
744 */
745 iort = (struct acpi_table_iort *)iort_table;
746
747 /* Get the first IORT node */
748 iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort,
749 iort->node_offset);
750 iort_end = ACPI_ADD_PTR(struct acpi_iort_node, iort,
751 iort_table->length);
752
753 for (i = 0; i < iort->node_count; i++) {
754 if (iort_node >= iort_end) {
755 pr_err("iort node pointer overflows, bad table\n");
756 return;
757 }
758
759 if ((iort_node->type == ACPI_IORT_NODE_SMMU) ||
760 (iort_node->type == ACPI_IORT_NODE_SMMU_V3)) {
761
762 fwnode = acpi_alloc_fwnode_static();
763 if (!fwnode)
764 return;
765
766 iort_set_fwnode(iort_node, fwnode);
767
768 ret = iort_add_smmu_platform_device(iort_node);
769 if (ret) {
770 iort_delete_fwnode(iort_node);
771 acpi_free_fwnode_static(fwnode);
772 return;
773 }
774 }
775
776 iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort_node,
777 iort_node->length);
778 }
779}
780
88ef16d8
TN
781void __init acpi_iort_init(void)
782{
783 acpi_status status;
784
785 status = acpi_get_table(ACPI_SIG_IORT, 0, &iort_table);
34ceea27
LP
786 if (ACPI_FAILURE(status)) {
787 if (status != AE_NOT_FOUND) {
788 const char *msg = acpi_format_exception(status);
789
790 pr_err("Failed to get table, %s\n", msg);
791 }
792
793 return;
88ef16d8 794 }
34ceea27 795
846f0e9e
LP
796 iort_init_platform_devices();
797
34ceea27 798 acpi_probe_device_table(iort);
88ef16d8 799}