]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - drivers/of/address.c
selftests: check hot-pluggagble memory for memory-hotplug test
[mirror_ubuntu-zesty-kernel.git] / drivers / of / address.c
1
2 #define pr_fmt(fmt) "OF: " fmt
3
4 #include <linux/device.h>
5 #include <linux/fwnode.h>
6 #include <linux/io.h>
7 #include <linux/ioport.h>
8 #include <linux/libio.h>
9 #include <linux/module.h>
10 #include <linux/of_address.h>
11 #include <linux/pci.h>
12 #include <linux/pci_regs.h>
13 #include <linux/sizes.h>
14 #include <linux/slab.h>
15 #include <linux/string.h>
16
17 /* Max address size we deal with */
18 #define OF_MAX_ADDR_CELLS 4
19 #define OF_CHECK_ADDR_COUNT(na) ((na) > 0 && (na) <= OF_MAX_ADDR_CELLS)
20 #define OF_CHECK_COUNTS(na, ns) (OF_CHECK_ADDR_COUNT(na) && (ns) > 0)
21
22 static struct of_bus *of_match_bus(struct device_node *np);
23 static int __of_address_to_resource(struct device_node *dev,
24 const __be32 *addrp, u64 size, unsigned int flags,
25 const char *name, struct resource *r);
26
27 /* Debug utility */
28 #ifdef DEBUG
29 static void of_dump_addr(const char *s, const __be32 *addr, int na)
30 {
31 pr_debug("%s", s);
32 while (na--)
33 pr_cont(" %08x", be32_to_cpu(*(addr++)));
34 pr_cont("\n");
35 }
36 #else
37 static void of_dump_addr(const char *s, const __be32 *addr, int na) { }
38 #endif
39
40 /* Callbacks for bus specific translators */
41 struct of_bus {
42 const char *name;
43 const char *addresses;
44 int (*match)(struct device_node *parent);
45 void (*count_cells)(struct device_node *child,
46 int *addrc, int *sizec);
47 u64 (*map)(__be32 *addr, const __be32 *range,
48 int na, int ns, int pna);
49 int (*translate)(__be32 *addr, u64 offset, int na);
50 unsigned int (*get_flags)(const __be32 *addr);
51 };
52
53 /*
54 * Default translator (generic bus)
55 */
56
57 static void of_bus_default_count_cells(struct device_node *dev,
58 int *addrc, int *sizec)
59 {
60 if (addrc)
61 *addrc = of_n_addr_cells(dev);
62 if (sizec)
63 *sizec = of_n_size_cells(dev);
64 }
65
66 static u64 of_bus_default_map(__be32 *addr, const __be32 *range,
67 int na, int ns, int pna)
68 {
69 u64 cp, s, da;
70
71 cp = of_read_number(range, na);
72 s = of_read_number(range + na + pna, ns);
73 da = of_read_number(addr, na);
74
75 pr_debug("default map, cp=%llx, s=%llx, da=%llx\n",
76 (unsigned long long)cp, (unsigned long long)s,
77 (unsigned long long)da);
78
79 if (da < cp || da >= (cp + s))
80 return OF_BAD_ADDR;
81 return da - cp;
82 }
83
84 static int of_bus_default_translate(__be32 *addr, u64 offset, int na)
85 {
86 u64 a = of_read_number(addr, na);
87 memset(addr, 0, na * 4);
88 a += offset;
89 if (na > 1)
90 addr[na - 2] = cpu_to_be32(a >> 32);
91 addr[na - 1] = cpu_to_be32(a & 0xffffffffu);
92
93 return 0;
94 }
95
96 static unsigned int of_bus_default_get_flags(const __be32 *addr)
97 {
98 return IORESOURCE_MEM;
99 }
100
101 #ifdef CONFIG_OF_ADDRESS_PCI
102 /*
103 * PCI bus specific translator
104 */
105
106 static int of_bus_pci_match(struct device_node *np)
107 {
108 /*
109 * "pciex" is PCI Express
110 * "vci" is for the /chaos bridge on 1st-gen PCI powermacs
111 * "ht" is hypertransport
112 */
113 return !strcmp(np->type, "pci") || !strcmp(np->type, "pciex") ||
114 !strcmp(np->type, "vci") || !strcmp(np->type, "ht");
115 }
116
117 static void of_bus_pci_count_cells(struct device_node *np,
118 int *addrc, int *sizec)
119 {
120 if (addrc)
121 *addrc = 3;
122 if (sizec)
123 *sizec = 2;
124 }
125
126 static unsigned int of_bus_pci_get_flags(const __be32 *addr)
127 {
128 unsigned int flags = 0;
129 u32 w = be32_to_cpup(addr);
130
131 switch((w >> 24) & 0x03) {
132 case 0x01:
133 flags |= IORESOURCE_IO;
134 break;
135 case 0x02: /* 32 bits */
136 case 0x03: /* 64 bits */
137 flags |= IORESOURCE_MEM;
138 break;
139 }
140 if (w & 0x40000000)
141 flags |= IORESOURCE_PREFETCH;
142 return flags;
143 }
144
145 static u64 of_bus_pci_map(__be32 *addr, const __be32 *range, int na, int ns,
146 int pna)
147 {
148 u64 cp, s, da;
149 unsigned int af, rf;
150
151 af = of_bus_pci_get_flags(addr);
152 rf = of_bus_pci_get_flags(range);
153
154 /* Check address type match */
155 if ((af ^ rf) & (IORESOURCE_MEM | IORESOURCE_IO))
156 return OF_BAD_ADDR;
157
158 /* Read address values, skipping high cell */
159 cp = of_read_number(range + 1, na - 1);
160 s = of_read_number(range + na + pna, ns);
161 da = of_read_number(addr + 1, na - 1);
162
163 pr_debug("PCI map, cp=%llx, s=%llx, da=%llx\n",
164 (unsigned long long)cp, (unsigned long long)s,
165 (unsigned long long)da);
166
167 if (da < cp || da >= (cp + s))
168 return OF_BAD_ADDR;
169 return da - cp;
170 }
171
172 static int of_bus_pci_translate(__be32 *addr, u64 offset, int na)
173 {
174 return of_bus_default_translate(addr + 1, offset, na - 1);
175 }
176 #endif /* CONFIG_OF_ADDRESS_PCI */
177
178 #ifdef CONFIG_PCI
179 const __be32 *of_get_pci_address(struct device_node *dev, int bar_no, u64 *size,
180 unsigned int *flags)
181 {
182 const __be32 *prop;
183 unsigned int psize;
184 struct device_node *parent;
185 struct of_bus *bus;
186 int onesize, i, na, ns;
187
188 /* Get parent & match bus type */
189 parent = of_get_parent(dev);
190 if (parent == NULL)
191 return NULL;
192 bus = of_match_bus(parent);
193 if (strcmp(bus->name, "pci")) {
194 of_node_put(parent);
195 return NULL;
196 }
197 bus->count_cells(dev, &na, &ns);
198 of_node_put(parent);
199 if (!OF_CHECK_ADDR_COUNT(na))
200 return NULL;
201
202 /* Get "reg" or "assigned-addresses" property */
203 prop = of_get_property(dev, bus->addresses, &psize);
204 if (prop == NULL)
205 return NULL;
206 psize /= 4;
207
208 onesize = na + ns;
209 for (i = 0; psize >= onesize; psize -= onesize, prop += onesize, i++) {
210 u32 val = be32_to_cpu(prop[0]);
211 if ((val & 0xff) == ((bar_no * 4) + PCI_BASE_ADDRESS_0)) {
212 if (size)
213 *size = of_read_number(prop + na, ns);
214 if (flags)
215 *flags = bus->get_flags(prop);
216 return prop;
217 }
218 }
219 return NULL;
220 }
221 EXPORT_SYMBOL(of_get_pci_address);
222
223 int of_pci_address_to_resource(struct device_node *dev, int bar,
224 struct resource *r)
225 {
226 const __be32 *addrp;
227 u64 size;
228 unsigned int flags;
229
230 addrp = of_get_pci_address(dev, bar, &size, &flags);
231 if (addrp == NULL)
232 return -EINVAL;
233 return __of_address_to_resource(dev, addrp, size, flags, NULL, r);
234 }
235 EXPORT_SYMBOL_GPL(of_pci_address_to_resource);
236
237 int of_pci_range_parser_init(struct of_pci_range_parser *parser,
238 struct device_node *node)
239 {
240 const int na = 3, ns = 2;
241 int rlen;
242
243 parser->node = node;
244 parser->pna = of_n_addr_cells(node);
245 parser->np = parser->pna + na + ns;
246
247 parser->range = of_get_property(node, "ranges", &rlen);
248 if (parser->range == NULL)
249 return -ENOENT;
250
251 parser->end = parser->range + rlen / sizeof(__be32);
252
253 return 0;
254 }
255 EXPORT_SYMBOL_GPL(of_pci_range_parser_init);
256
257 struct of_pci_range *of_pci_range_parser_one(struct of_pci_range_parser *parser,
258 struct of_pci_range *range)
259 {
260 const int na = 3, ns = 2;
261
262 if (!range)
263 return NULL;
264
265 if (!parser->range || parser->range + parser->np > parser->end)
266 return NULL;
267
268 range->pci_space = parser->range[0];
269 range->flags = of_bus_pci_get_flags(parser->range);
270 range->pci_addr = of_read_number(parser->range + 1, ns);
271 range->cpu_addr = of_translate_address(parser->node,
272 parser->range + na);
273 range->size = of_read_number(parser->range + parser->pna + na, ns);
274
275 parser->range += parser->np;
276
277 /* Now consume following elements while they are contiguous */
278 while (parser->range + parser->np <= parser->end) {
279 u32 flags, pci_space;
280 u64 pci_addr, cpu_addr, size;
281
282 pci_space = be32_to_cpup(parser->range);
283 flags = of_bus_pci_get_flags(parser->range);
284 pci_addr = of_read_number(parser->range + 1, ns);
285 cpu_addr = of_translate_address(parser->node,
286 parser->range + na);
287 size = of_read_number(parser->range + parser->pna + na, ns);
288
289 if (flags != range->flags)
290 break;
291 if (pci_addr != range->pci_addr + range->size ||
292 cpu_addr != range->cpu_addr + range->size)
293 break;
294
295 range->size += size;
296 parser->range += parser->np;
297 }
298
299 return range;
300 }
301 EXPORT_SYMBOL_GPL(of_pci_range_parser_one);
302
303 /*
304 * of_pci_range_to_resource - Create a resource from an of_pci_range
305 * @range: the PCI range that describes the resource
306 * @np: device node where the range belongs to
307 * @res: pointer to a valid resource that will be updated to
308 * reflect the values contained in the range.
309 *
310 * Returns EINVAL if the range cannot be converted to resource.
311 *
312 * Note that if the range is an IO range, the resource will be converted
313 * using pci_address_to_pio() which can fail if it is called too early or
314 * if the range cannot be matched to any host bridge IO space (our case here).
315 * To guard against that we try to register the IO range first.
316 * If that fails we know that pci_address_to_pio() will do too.
317 */
318 int of_pci_range_to_resource(struct of_pci_range *range,
319 struct device_node *np, struct resource *res)
320 {
321 int err;
322 res->flags = range->flags;
323 res->parent = res->child = res->sibling = NULL;
324 res->name = np->full_name;
325
326 if (res->flags & IORESOURCE_IO) {
327 unsigned long port;
328 err = pci_register_io_range(&np->fwnode, range->cpu_addr,
329 range->size);
330 if (err)
331 goto invalid_range;
332 port = pci_address_to_pio(range->cpu_addr);
333 if (port == (unsigned long)-1) {
334 err = -EINVAL;
335 goto invalid_range;
336 }
337 res->start = port;
338 } else {
339 if ((sizeof(resource_size_t) < 8) &&
340 upper_32_bits(range->cpu_addr)) {
341 err = -EINVAL;
342 goto invalid_range;
343 }
344
345 res->start = range->cpu_addr;
346 }
347 res->end = res->start + range->size - 1;
348 return 0;
349
350 invalid_range:
351 res->start = (resource_size_t)OF_BAD_ADDR;
352 res->end = (resource_size_t)OF_BAD_ADDR;
353 return err;
354 }
355 #endif /* CONFIG_PCI */
356
357 /*
358 * ISA bus specific translator
359 */
360
361 static int of_bus_isa_match(struct device_node *np)
362 {
363 return !strcmp(np->name, "isa");
364 }
365
366 static void of_bus_isa_count_cells(struct device_node *child,
367 int *addrc, int *sizec)
368 {
369 if (addrc)
370 *addrc = 2;
371 if (sizec)
372 *sizec = 1;
373 }
374
375 static u64 of_bus_isa_map(__be32 *addr, const __be32 *range, int na, int ns,
376 int pna)
377 {
378 u64 cp, s, da;
379
380 /* Check address type match */
381 if ((addr[0] ^ range[0]) & cpu_to_be32(1))
382 return OF_BAD_ADDR;
383
384 /* Read address values, skipping high cell */
385 cp = of_read_number(range + 1, na - 1);
386 s = of_read_number(range + na + pna, ns);
387 da = of_read_number(addr + 1, na - 1);
388
389 pr_debug("ISA map, cp=%llx, s=%llx, da=%llx\n",
390 (unsigned long long)cp, (unsigned long long)s,
391 (unsigned long long)da);
392
393 if (da < cp || da >= (cp + s))
394 return OF_BAD_ADDR;
395 return da - cp;
396 }
397
398 static int of_bus_isa_translate(__be32 *addr, u64 offset, int na)
399 {
400 return of_bus_default_translate(addr + 1, offset, na - 1);
401 }
402
403 static unsigned int of_bus_isa_get_flags(const __be32 *addr)
404 {
405 unsigned int flags = 0;
406 u32 w = be32_to_cpup(addr);
407
408 if (w & 1)
409 flags |= IORESOURCE_IO;
410 else
411 flags |= IORESOURCE_MEM;
412 return flags;
413 }
414
415 /*
416 * Array of bus specific translators
417 */
418
419 static struct of_bus of_busses[] = {
420 #ifdef CONFIG_OF_ADDRESS_PCI
421 /* PCI */
422 {
423 .name = "pci",
424 .addresses = "assigned-addresses",
425 .match = of_bus_pci_match,
426 .count_cells = of_bus_pci_count_cells,
427 .map = of_bus_pci_map,
428 .translate = of_bus_pci_translate,
429 .get_flags = of_bus_pci_get_flags,
430 },
431 #endif /* CONFIG_OF_ADDRESS_PCI */
432 /* ISA */
433 {
434 .name = "isa",
435 .addresses = "reg",
436 .match = of_bus_isa_match,
437 .count_cells = of_bus_isa_count_cells,
438 .map = of_bus_isa_map,
439 .translate = of_bus_isa_translate,
440 .get_flags = of_bus_isa_get_flags,
441 },
442 /* Default */
443 {
444 .name = "default",
445 .addresses = "reg",
446 .match = NULL,
447 .count_cells = of_bus_default_count_cells,
448 .map = of_bus_default_map,
449 .translate = of_bus_default_translate,
450 .get_flags = of_bus_default_get_flags,
451 },
452 };
453
454 static struct of_bus *of_match_bus(struct device_node *np)
455 {
456 int i;
457
458 for (i = 0; i < ARRAY_SIZE(of_busses); i++)
459 if (!of_busses[i].match || of_busses[i].match(np))
460 return &of_busses[i];
461 BUG();
462 return NULL;
463 }
464
465 static int of_empty_ranges_quirk(struct device_node *np)
466 {
467 if (IS_ENABLED(CONFIG_PPC)) {
468 /* To save cycles, we cache the result for global "Mac" setting */
469 static int quirk_state = -1;
470
471 /* PA-SEMI sdc DT bug */
472 if (of_device_is_compatible(np, "1682m-sdc"))
473 return true;
474
475 /* Make quirk cached */
476 if (quirk_state < 0)
477 quirk_state =
478 of_machine_is_compatible("Power Macintosh") ||
479 of_machine_is_compatible("MacRISC");
480 return quirk_state;
481 }
482 return false;
483 }
484
485 static int of_translate_one(struct device_node *parent, struct of_bus *bus,
486 struct of_bus *pbus, __be32 *addr,
487 int na, int ns, int pna, const char *rprop)
488 {
489 const __be32 *ranges;
490 unsigned int rlen;
491 int rone;
492 u64 offset = OF_BAD_ADDR;
493
494 /*
495 * Normally, an absence of a "ranges" property means we are
496 * crossing a non-translatable boundary, and thus the addresses
497 * below the current cannot be converted to CPU physical ones.
498 * Unfortunately, while this is very clear in the spec, it's not
499 * what Apple understood, and they do have things like /uni-n or
500 * /ht nodes with no "ranges" property and a lot of perfectly
501 * useable mapped devices below them. Thus we treat the absence of
502 * "ranges" as equivalent to an empty "ranges" property which means
503 * a 1:1 translation at that level. It's up to the caller not to try
504 * to translate addresses that aren't supposed to be translated in
505 * the first place. --BenH.
506 *
507 * As far as we know, this damage only exists on Apple machines, so
508 * This code is only enabled on powerpc. --gcl
509 */
510 ranges = of_get_property(parent, rprop, &rlen);
511 if (ranges == NULL && !of_empty_ranges_quirk(parent)) {
512 pr_debug("no ranges; cannot translate\n");
513 return 1;
514 }
515 if (ranges == NULL || rlen == 0) {
516 offset = of_read_number(addr, na);
517 memset(addr, 0, pna * 4);
518 pr_debug("empty ranges; 1:1 translation\n");
519 goto finish;
520 }
521
522 pr_debug("walking ranges...\n");
523
524 /* Now walk through the ranges */
525 rlen /= 4;
526 rone = na + pna + ns;
527 for (; rlen >= rone; rlen -= rone, ranges += rone) {
528 offset = bus->map(addr, ranges, na, ns, pna);
529 if (offset != OF_BAD_ADDR)
530 break;
531 }
532 if (offset == OF_BAD_ADDR) {
533 pr_debug("not found !\n");
534 return 1;
535 }
536 memcpy(addr, ranges + na, 4 * pna);
537
538 finish:
539 of_dump_addr("parent translation for:", addr, pna);
540 pr_debug("with offset: %llx\n", (unsigned long long)offset);
541
542 /* Translate it into parent bus space */
543 return pbus->translate(addr, offset, pna);
544 }
545
546 /*
547 * Translate an address from the device-tree into a CPU physical address,
548 * this walks up the tree and applies the various bus mappings on the
549 * way.
550 *
551 * Note: We consider that crossing any level with #size-cells == 0 to mean
552 * that translation is impossible (that is we are not dealing with a value
553 * that can be mapped to a cpu physical address). This is not really specified
554 * that way, but this is traditionally the way IBM at least do things
555 *
556 * Whenever the translation fails, the *host pointer will be set to the
557 * device that had registered logical PIO mapping, and the return code is relative to
558 * that node.
559 */
560 static u64 __of_translate_address(struct device_node *dev,
561 const __be32 *in_addr, const char *rprop,
562 struct device_node **host)
563 {
564 struct device_node *parent = NULL;
565 struct of_bus *bus, *pbus;
566 __be32 addr[OF_MAX_ADDR_CELLS];
567 int na, ns, pna, pns;
568 u64 result = OF_BAD_ADDR;
569
570 pr_debug("** translation for device %s **\n", of_node_full_name(dev));
571
572 /* Increase refcount at current level */
573 of_node_get(dev);
574
575 *host = NULL;
576 /* Get parent & match bus type */
577 parent = of_get_parent(dev);
578 if (parent == NULL)
579 goto bail;
580 bus = of_match_bus(parent);
581
582 /* Count address cells & copy address locally */
583 bus->count_cells(dev, &na, &ns);
584 if (!OF_CHECK_COUNTS(na, ns)) {
585 pr_debug("Bad cell count for %s\n", of_node_full_name(dev));
586 goto bail;
587 }
588 memcpy(addr, in_addr, na * 4);
589
590 pr_debug("bus is %s (na=%d, ns=%d) on %s\n",
591 bus->name, na, ns, of_node_full_name(parent));
592 of_dump_addr("translating address:", addr, na);
593
594 /* Translate */
595 for (;;) {
596 struct libio_range *iorange;
597
598 /* Switch to parent bus */
599 of_node_put(dev);
600 dev = parent;
601 parent = of_get_parent(dev);
602
603 /* If root, we have finished */
604 if (parent == NULL) {
605 pr_debug("reached root node\n");
606 result = of_read_number(addr, na);
607 break;
608 }
609
610 /*
611 * For indirectIO device which has no ranges property, get
612 * the address from reg directly.
613 */
614 iorange = find_io_range_from_fwnode(&dev->fwnode);
615 if (iorange && !(iorange->flags & IO_CPU_MMIO)) {
616 result = of_read_number(addr + 1, na - 1);
617 pr_debug("indirectIO matched(%s) 0x%llx\n",
618 of_node_full_name(dev), result);
619 *host = of_node_get(dev);
620 break;
621 }
622
623 /* Get new parent bus and counts */
624 pbus = of_match_bus(parent);
625 pbus->count_cells(dev, &pna, &pns);
626 if (!OF_CHECK_COUNTS(pna, pns)) {
627 pr_err("Bad cell count for %s\n",
628 of_node_full_name(dev));
629 break;
630 }
631
632 pr_debug("parent bus is %s (na=%d, ns=%d) on %s\n",
633 pbus->name, pna, pns, of_node_full_name(parent));
634
635 /* Apply bus translation */
636 if (of_translate_one(dev, bus, pbus, addr, na, ns, pna, rprop))
637 break;
638
639 /* Complete the move up one level */
640 na = pna;
641 ns = pns;
642 bus = pbus;
643
644 of_dump_addr("one level translation:", addr, na);
645 }
646 bail:
647 of_node_put(parent);
648 of_node_put(dev);
649
650 return result;
651 }
652
653 u64 of_translate_address(struct device_node *dev, const __be32 *in_addr)
654 {
655 struct device_node *host;
656 u64 ret;
657
658 ret = __of_translate_address(dev, in_addr, "ranges", &host);
659 if (host) {
660 of_node_put(host);
661 return OF_BAD_ADDR;
662 }
663
664 return ret;
665 }
666 EXPORT_SYMBOL(of_translate_address);
667
668 u64 of_translate_dma_address(struct device_node *dev, const __be32 *in_addr)
669 {
670 struct device_node *host;
671 u64 ret;
672
673 ret = __of_translate_address(dev, in_addr, "dma-ranges", &host);
674
675 if (host) {
676 of_node_put(host);
677 return OF_BAD_ADDR;
678 }
679
680 return ret;
681 }
682 EXPORT_SYMBOL(of_translate_dma_address);
683
684 const __be32 *of_get_address(struct device_node *dev, int index, u64 *size,
685 unsigned int *flags)
686 {
687 const __be32 *prop;
688 unsigned int psize;
689 struct device_node *parent;
690 struct of_bus *bus;
691 int onesize, i, na, ns;
692
693 /* Get parent & match bus type */
694 parent = of_get_parent(dev);
695 if (parent == NULL)
696 return NULL;
697 bus = of_match_bus(parent);
698 bus->count_cells(dev, &na, &ns);
699 of_node_put(parent);
700 if (!OF_CHECK_ADDR_COUNT(na))
701 return NULL;
702
703 /* Get "reg" or "assigned-addresses" property */
704 prop = of_get_property(dev, bus->addresses, &psize);
705 if (prop == NULL)
706 return NULL;
707 psize /= 4;
708
709 onesize = na + ns;
710 for (i = 0; psize >= onesize; psize -= onesize, prop += onesize, i++)
711 if (i == index) {
712 if (size)
713 *size = of_read_number(prop + na, ns);
714 if (flags)
715 *flags = bus->get_flags(prop);
716 return prop;
717 }
718 return NULL;
719 }
720 EXPORT_SYMBOL(of_get_address);
721
722 static u64 of_translate_ioport(struct device_node *dev, const __be32 *in_addr)
723 {
724 u64 taddr;
725 unsigned long port;
726 struct device_node *host;
727
728 taddr = __of_translate_address(dev, in_addr, "ranges", &host);
729 if (host) {
730 /* host specific port access */
731 port = libio_translate_hwaddr(&host->fwnode, taddr);
732 of_node_put(host);
733 } else {
734 /* memory mapped I/O range */
735 port = pci_address_to_pio(taddr);
736 }
737
738 if (port == (unsigned long)-1)
739 return OF_BAD_ADDR;
740
741 return port;
742 }
743
744 static int __of_address_to_resource(struct device_node *dev,
745 const __be32 *addrp, u64 size, unsigned int flags,
746 const char *name, struct resource *r)
747 {
748 u64 taddr;
749
750 if (flags & IORESOURCE_MEM)
751 taddr = of_translate_address(dev, addrp);
752 else if (flags & IORESOURCE_IO)
753 taddr = of_translate_ioport(dev, addrp);
754 else
755 return -EINVAL;
756
757 if (taddr == OF_BAD_ADDR)
758 return -EINVAL;
759 memset(r, 0, sizeof(struct resource));
760
761 r->start = taddr;
762 r->end = taddr + size - 1;
763 r->flags = flags;
764 r->name = name ? name : dev->full_name;
765
766 return 0;
767 }
768
769 /**
770 * of_address_to_resource - Translate device tree address and return as resource
771 *
772 * Note that if your address is a PIO address, the conversion will fail if
773 * the physical address can't be internally converted to an IO token with
774 * pci_address_to_pio(), that is because it's either called to early or it
775 * can't be matched to any host bridge IO space
776 */
777 int of_address_to_resource(struct device_node *dev, int index,
778 struct resource *r)
779 {
780 const __be32 *addrp;
781 u64 size;
782 unsigned int flags;
783 const char *name = NULL;
784
785 addrp = of_get_address(dev, index, &size, &flags);
786 if (addrp == NULL)
787 return -EINVAL;
788
789 /* Get optional "reg-names" property to add a name to a resource */
790 of_property_read_string_index(dev, "reg-names", index, &name);
791
792 return __of_address_to_resource(dev, addrp, size, flags, name, r);
793 }
794 EXPORT_SYMBOL_GPL(of_address_to_resource);
795
796 struct device_node *of_find_matching_node_by_address(struct device_node *from,
797 const struct of_device_id *matches,
798 u64 base_address)
799 {
800 struct device_node *dn = of_find_matching_node(from, matches);
801 struct resource res;
802
803 while (dn) {
804 if (!of_address_to_resource(dn, 0, &res) &&
805 res.start == base_address)
806 return dn;
807
808 dn = of_find_matching_node(dn, matches);
809 }
810
811 return NULL;
812 }
813
814
815 /**
816 * of_iomap - Maps the memory mapped IO for a given device_node
817 * @device: the device whose io range will be mapped
818 * @index: index of the io range
819 *
820 * Returns a pointer to the mapped memory
821 */
822 void __iomem *of_iomap(struct device_node *np, int index)
823 {
824 struct resource res;
825
826 if (of_address_to_resource(np, index, &res))
827 return NULL;
828
829 return ioremap(res.start, resource_size(&res));
830 }
831 EXPORT_SYMBOL(of_iomap);
832
833 /*
834 * of_io_request_and_map - Requests a resource and maps the memory mapped IO
835 * for a given device_node
836 * @device: the device whose io range will be mapped
837 * @index: index of the io range
838 * @name: name of the resource
839 *
840 * Returns a pointer to the requested and mapped memory or an ERR_PTR() encoded
841 * error code on failure. Usage example:
842 *
843 * base = of_io_request_and_map(node, 0, "foo");
844 * if (IS_ERR(base))
845 * return PTR_ERR(base);
846 */
847 void __iomem *of_io_request_and_map(struct device_node *np, int index,
848 const char *name)
849 {
850 struct resource res;
851 void __iomem *mem;
852
853 if (of_address_to_resource(np, index, &res))
854 return IOMEM_ERR_PTR(-EINVAL);
855
856 if (!request_mem_region(res.start, resource_size(&res), name))
857 return IOMEM_ERR_PTR(-EBUSY);
858
859 mem = ioremap(res.start, resource_size(&res));
860 if (!mem) {
861 release_mem_region(res.start, resource_size(&res));
862 return IOMEM_ERR_PTR(-ENOMEM);
863 }
864
865 return mem;
866 }
867 EXPORT_SYMBOL(of_io_request_and_map);
868
869 /**
870 * of_dma_get_range - Get DMA range info
871 * @np: device node to get DMA range info
872 * @dma_addr: pointer to store initial DMA address of DMA range
873 * @paddr: pointer to store initial CPU address of DMA range
874 * @size: pointer to store size of DMA range
875 *
876 * Look in bottom up direction for the first "dma-ranges" property
877 * and parse it.
878 * dma-ranges format:
879 * DMA addr (dma_addr) : naddr cells
880 * CPU addr (phys_addr_t) : pna cells
881 * size : nsize cells
882 *
883 * It returns -ENODEV if "dma-ranges" property was not found
884 * for this device in DT.
885 */
886 int of_dma_get_range(struct device_node *np, u64 *dma_addr, u64 *paddr, u64 *size)
887 {
888 struct device_node *node = of_node_get(np);
889 const __be32 *ranges = NULL;
890 int len, naddr, nsize, pna;
891 int ret = 0;
892 u64 dmaaddr;
893
894 if (!node)
895 return -EINVAL;
896
897 while (1) {
898 naddr = of_n_addr_cells(node);
899 nsize = of_n_size_cells(node);
900 node = of_get_next_parent(node);
901 if (!node)
902 break;
903
904 ranges = of_get_property(node, "dma-ranges", &len);
905
906 /* Ignore empty ranges, they imply no translation required */
907 if (ranges && len > 0)
908 break;
909
910 /*
911 * At least empty ranges has to be defined for parent node if
912 * DMA is supported
913 */
914 if (!ranges)
915 break;
916 }
917
918 if (!ranges) {
919 pr_debug("no dma-ranges found for node(%s)\n", np->full_name);
920 ret = -ENODEV;
921 goto out;
922 }
923
924 len /= sizeof(u32);
925
926 pna = of_n_addr_cells(node);
927
928 /* dma-ranges format:
929 * DMA addr : naddr cells
930 * CPU addr : pna cells
931 * size : nsize cells
932 */
933 dmaaddr = of_read_number(ranges, naddr);
934 *paddr = of_translate_dma_address(np, ranges);
935 if (*paddr == OF_BAD_ADDR) {
936 pr_err("translation of DMA address(%pad) to CPU address failed node(%s)\n",
937 dma_addr, np->full_name);
938 ret = -EINVAL;
939 goto out;
940 }
941 *dma_addr = dmaaddr;
942
943 *size = of_read_number(ranges + naddr + pna, nsize);
944
945 pr_debug("dma_addr(%llx) cpu_addr(%llx) size(%llx)\n",
946 *dma_addr, *paddr, *size);
947
948 out:
949 of_node_put(node);
950
951 return ret;
952 }
953 EXPORT_SYMBOL_GPL(of_dma_get_range);
954
955 /**
956 * of_dma_is_coherent - Check if device is coherent
957 * @np: device node
958 *
959 * It returns true if "dma-coherent" property was found
960 * for this device in DT.
961 */
962 bool of_dma_is_coherent(struct device_node *np)
963 {
964 struct device_node *node = of_node_get(np);
965
966 while (node) {
967 if (of_property_read_bool(node, "dma-coherent")) {
968 of_node_put(node);
969 return true;
970 }
971 node = of_get_next_parent(node);
972 }
973 of_node_put(node);
974 return false;
975 }
976 EXPORT_SYMBOL_GPL(of_dma_is_coherent);