]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - kernel/resource.c
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/bart/ide-2.6
[mirror_ubuntu-bionic-kernel.git] / kernel / resource.c
1 /*
2 * linux/kernel/resource.c
3 *
4 * Copyright (C) 1999 Linus Torvalds
5 * Copyright (C) 1999 Martin Mares <mj@ucw.cz>
6 *
7 * Arbitrary resource management.
8 */
9
10 #include <linux/module.h>
11 #include <linux/errno.h>
12 #include <linux/ioport.h>
13 #include <linux/init.h>
14 #include <linux/slab.h>
15 #include <linux/spinlock.h>
16 #include <linux/fs.h>
17 #include <linux/proc_fs.h>
18 #include <linux/seq_file.h>
19 #include <linux/device.h>
20 #include <linux/pfn.h>
21 #include <asm/io.h>
22
23
24 struct resource ioport_resource = {
25 .name = "PCI IO",
26 .start = 0,
27 .end = IO_SPACE_LIMIT,
28 .flags = IORESOURCE_IO,
29 };
30 EXPORT_SYMBOL(ioport_resource);
31
32 struct resource iomem_resource = {
33 .name = "PCI mem",
34 .start = 0,
35 .end = -1,
36 .flags = IORESOURCE_MEM,
37 };
38 EXPORT_SYMBOL(iomem_resource);
39
40 static DEFINE_RWLOCK(resource_lock);
41
42 static void *r_next(struct seq_file *m, void *v, loff_t *pos)
43 {
44 struct resource *p = v;
45 (*pos)++;
46 if (p->child)
47 return p->child;
48 while (!p->sibling && p->parent)
49 p = p->parent;
50 return p->sibling;
51 }
52
53 #ifdef CONFIG_PROC_FS
54
55 enum { MAX_IORES_LEVEL = 5 };
56
57 static void *r_start(struct seq_file *m, loff_t *pos)
58 __acquires(resource_lock)
59 {
60 struct resource *p = m->private;
61 loff_t l = 0;
62 read_lock(&resource_lock);
63 for (p = p->child; p && l < *pos; p = r_next(m, p, &l))
64 ;
65 return p;
66 }
67
68 static void r_stop(struct seq_file *m, void *v)
69 __releases(resource_lock)
70 {
71 read_unlock(&resource_lock);
72 }
73
74 static int r_show(struct seq_file *m, void *v)
75 {
76 struct resource *root = m->private;
77 struct resource *r = v, *p;
78 int width = root->end < 0x10000 ? 4 : 8;
79 int depth;
80
81 for (depth = 0, p = r; depth < MAX_IORES_LEVEL; depth++, p = p->parent)
82 if (p->parent == root)
83 break;
84 seq_printf(m, "%*s%0*llx-%0*llx : %s\n",
85 depth * 2, "",
86 width, (unsigned long long) r->start,
87 width, (unsigned long long) r->end,
88 r->name ? r->name : "<BAD>");
89 return 0;
90 }
91
92 static const struct seq_operations resource_op = {
93 .start = r_start,
94 .next = r_next,
95 .stop = r_stop,
96 .show = r_show,
97 };
98
99 static int ioports_open(struct inode *inode, struct file *file)
100 {
101 int res = seq_open(file, &resource_op);
102 if (!res) {
103 struct seq_file *m = file->private_data;
104 m->private = &ioport_resource;
105 }
106 return res;
107 }
108
109 static int iomem_open(struct inode *inode, struct file *file)
110 {
111 int res = seq_open(file, &resource_op);
112 if (!res) {
113 struct seq_file *m = file->private_data;
114 m->private = &iomem_resource;
115 }
116 return res;
117 }
118
119 static const struct file_operations proc_ioports_operations = {
120 .open = ioports_open,
121 .read = seq_read,
122 .llseek = seq_lseek,
123 .release = seq_release,
124 };
125
126 static const struct file_operations proc_iomem_operations = {
127 .open = iomem_open,
128 .read = seq_read,
129 .llseek = seq_lseek,
130 .release = seq_release,
131 };
132
133 static int __init ioresources_init(void)
134 {
135 proc_create("ioports", 0, NULL, &proc_ioports_operations);
136 proc_create("iomem", 0, NULL, &proc_iomem_operations);
137 return 0;
138 }
139 __initcall(ioresources_init);
140
141 #endif /* CONFIG_PROC_FS */
142
143 /* Return the conflict entry if you can't request it */
144 static struct resource * __request_resource(struct resource *root, struct resource *new)
145 {
146 resource_size_t start = new->start;
147 resource_size_t end = new->end;
148 struct resource *tmp, **p;
149
150 if (end < start)
151 return root;
152 if (start < root->start)
153 return root;
154 if (end > root->end)
155 return root;
156 p = &root->child;
157 for (;;) {
158 tmp = *p;
159 if (!tmp || tmp->start > end) {
160 new->sibling = tmp;
161 *p = new;
162 new->parent = root;
163 return NULL;
164 }
165 p = &tmp->sibling;
166 if (tmp->end < start)
167 continue;
168 return tmp;
169 }
170 }
171
172 static int __release_resource(struct resource *old)
173 {
174 struct resource *tmp, **p;
175
176 p = &old->parent->child;
177 for (;;) {
178 tmp = *p;
179 if (!tmp)
180 break;
181 if (tmp == old) {
182 *p = tmp->sibling;
183 old->parent = NULL;
184 return 0;
185 }
186 p = &tmp->sibling;
187 }
188 return -EINVAL;
189 }
190
191 /**
192 * request_resource - request and reserve an I/O or memory resource
193 * @root: root resource descriptor
194 * @new: resource descriptor desired by caller
195 *
196 * Returns 0 for success, negative error code on error.
197 */
198 int request_resource(struct resource *root, struct resource *new)
199 {
200 struct resource *conflict;
201
202 write_lock(&resource_lock);
203 conflict = __request_resource(root, new);
204 write_unlock(&resource_lock);
205 return conflict ? -EBUSY : 0;
206 }
207
208 EXPORT_SYMBOL(request_resource);
209
210 /**
211 * release_resource - release a previously reserved resource
212 * @old: resource pointer
213 */
214 int release_resource(struct resource *old)
215 {
216 int retval;
217
218 write_lock(&resource_lock);
219 retval = __release_resource(old);
220 write_unlock(&resource_lock);
221 return retval;
222 }
223
224 EXPORT_SYMBOL(release_resource);
225
226 #if defined(CONFIG_MEMORY_HOTPLUG) && !defined(CONFIG_ARCH_HAS_WALK_MEMORY)
227 /*
228 * Finds the lowest memory reosurce exists within [res->start.res->end)
229 * the caller must specify res->start, res->end, res->flags.
230 * If found, returns 0, res is overwritten, if not found, returns -1.
231 */
232 static int find_next_system_ram(struct resource *res)
233 {
234 resource_size_t start, end;
235 struct resource *p;
236
237 BUG_ON(!res);
238
239 start = res->start;
240 end = res->end;
241 BUG_ON(start >= end);
242
243 read_lock(&resource_lock);
244 for (p = iomem_resource.child; p ; p = p->sibling) {
245 /* system ram is just marked as IORESOURCE_MEM */
246 if (p->flags != res->flags)
247 continue;
248 if (p->start > end) {
249 p = NULL;
250 break;
251 }
252 if ((p->end >= start) && (p->start < end))
253 break;
254 }
255 read_unlock(&resource_lock);
256 if (!p)
257 return -1;
258 /* copy data */
259 if (res->start < p->start)
260 res->start = p->start;
261 if (res->end > p->end)
262 res->end = p->end;
263 return 0;
264 }
265 int
266 walk_memory_resource(unsigned long start_pfn, unsigned long nr_pages, void *arg,
267 int (*func)(unsigned long, unsigned long, void *))
268 {
269 struct resource res;
270 unsigned long pfn, len;
271 u64 orig_end;
272 int ret = -1;
273 res.start = (u64) start_pfn << PAGE_SHIFT;
274 res.end = ((u64)(start_pfn + nr_pages) << PAGE_SHIFT) - 1;
275 res.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
276 orig_end = res.end;
277 while ((res.start < res.end) && (find_next_system_ram(&res) >= 0)) {
278 pfn = (unsigned long)(res.start >> PAGE_SHIFT);
279 len = (unsigned long)((res.end + 1 - res.start) >> PAGE_SHIFT);
280 ret = (*func)(pfn, len, arg);
281 if (ret)
282 break;
283 res.start = res.end + 1;
284 res.end = orig_end;
285 }
286 return ret;
287 }
288
289 #endif
290
291 /*
292 * Find empty slot in the resource tree given range and alignment.
293 */
294 static int find_resource(struct resource *root, struct resource *new,
295 resource_size_t size, resource_size_t min,
296 resource_size_t max, resource_size_t align,
297 void (*alignf)(void *, struct resource *,
298 resource_size_t, resource_size_t),
299 void *alignf_data)
300 {
301 struct resource *this = root->child;
302
303 new->start = root->start;
304 /*
305 * Skip past an allocated resource that starts at 0, since the assignment
306 * of this->start - 1 to new->end below would cause an underflow.
307 */
308 if (this && this->start == 0) {
309 new->start = this->end + 1;
310 this = this->sibling;
311 }
312 for(;;) {
313 if (this)
314 new->end = this->start - 1;
315 else
316 new->end = root->end;
317 if (new->start < min)
318 new->start = min;
319 if (new->end > max)
320 new->end = max;
321 new->start = ALIGN(new->start, align);
322 if (alignf)
323 alignf(alignf_data, new, size, align);
324 if (new->start < new->end && new->end - new->start >= size - 1) {
325 new->end = new->start + size - 1;
326 return 0;
327 }
328 if (!this)
329 break;
330 new->start = this->end + 1;
331 this = this->sibling;
332 }
333 return -EBUSY;
334 }
335
336 /**
337 * allocate_resource - allocate empty slot in the resource tree given range & alignment
338 * @root: root resource descriptor
339 * @new: resource descriptor desired by caller
340 * @size: requested resource region size
341 * @min: minimum size to allocate
342 * @max: maximum size to allocate
343 * @align: alignment requested, in bytes
344 * @alignf: alignment function, optional, called if not NULL
345 * @alignf_data: arbitrary data to pass to the @alignf function
346 */
347 int allocate_resource(struct resource *root, struct resource *new,
348 resource_size_t size, resource_size_t min,
349 resource_size_t max, resource_size_t align,
350 void (*alignf)(void *, struct resource *,
351 resource_size_t, resource_size_t),
352 void *alignf_data)
353 {
354 int err;
355
356 write_lock(&resource_lock);
357 err = find_resource(root, new, size, min, max, align, alignf, alignf_data);
358 if (err >= 0 && __request_resource(root, new))
359 err = -EBUSY;
360 write_unlock(&resource_lock);
361 return err;
362 }
363
364 EXPORT_SYMBOL(allocate_resource);
365
366 /*
367 * Insert a resource into the resource tree. If successful, return NULL,
368 * otherwise return the conflicting resource (compare to __request_resource())
369 */
370 static struct resource * __insert_resource(struct resource *parent, struct resource *new)
371 {
372 struct resource *first, *next;
373
374 for (;; parent = first) {
375 first = __request_resource(parent, new);
376 if (!first)
377 return first;
378
379 if (first == parent)
380 return first;
381
382 if ((first->start > new->start) || (first->end < new->end))
383 break;
384 if ((first->start == new->start) && (first->end == new->end))
385 break;
386 }
387
388 for (next = first; ; next = next->sibling) {
389 /* Partial overlap? Bad, and unfixable */
390 if (next->start < new->start || next->end > new->end)
391 return next;
392 if (!next->sibling)
393 break;
394 if (next->sibling->start > new->end)
395 break;
396 }
397
398 new->parent = parent;
399 new->sibling = next->sibling;
400 new->child = first;
401
402 next->sibling = NULL;
403 for (next = first; next; next = next->sibling)
404 next->parent = new;
405
406 if (parent->child == first) {
407 parent->child = new;
408 } else {
409 next = parent->child;
410 while (next->sibling != first)
411 next = next->sibling;
412 next->sibling = new;
413 }
414 return NULL;
415 }
416
417 /**
418 * insert_resource - Inserts a resource in the resource tree
419 * @parent: parent of the new resource
420 * @new: new resource to insert
421 *
422 * Returns 0 on success, -EBUSY if the resource can't be inserted.
423 *
424 * This function is equivalent to request_resource when no conflict
425 * happens. If a conflict happens, and the conflicting resources
426 * entirely fit within the range of the new resource, then the new
427 * resource is inserted and the conflicting resources become children of
428 * the new resource.
429 */
430 int insert_resource(struct resource *parent, struct resource *new)
431 {
432 struct resource *conflict;
433
434 write_lock(&resource_lock);
435 conflict = __insert_resource(parent, new);
436 write_unlock(&resource_lock);
437 return conflict ? -EBUSY : 0;
438 }
439
440 /**
441 * insert_resource_expand_to_fit - Insert a resource into the resource tree
442 * @root: root resource descriptor
443 * @new: new resource to insert
444 *
445 * Insert a resource into the resource tree, possibly expanding it in order
446 * to make it encompass any conflicting resources.
447 */
448 void insert_resource_expand_to_fit(struct resource *root, struct resource *new)
449 {
450 if (new->parent)
451 return;
452
453 write_lock(&resource_lock);
454 for (;;) {
455 struct resource *conflict;
456
457 conflict = __insert_resource(root, new);
458 if (!conflict)
459 break;
460 if (conflict == root)
461 break;
462
463 /* Ok, expand resource to cover the conflict, then try again .. */
464 if (conflict->start < new->start)
465 new->start = conflict->start;
466 if (conflict->end > new->end)
467 new->end = conflict->end;
468
469 printk("Expanded resource %s due to conflict with %s\n", new->name, conflict->name);
470 }
471 write_unlock(&resource_lock);
472 }
473
474 /**
475 * adjust_resource - modify a resource's start and size
476 * @res: resource to modify
477 * @start: new start value
478 * @size: new size
479 *
480 * Given an existing resource, change its start and size to match the
481 * arguments. Returns 0 on success, -EBUSY if it can't fit.
482 * Existing children of the resource are assumed to be immutable.
483 */
484 int adjust_resource(struct resource *res, resource_size_t start, resource_size_t size)
485 {
486 struct resource *tmp, *parent = res->parent;
487 resource_size_t end = start + size - 1;
488 int result = -EBUSY;
489
490 write_lock(&resource_lock);
491
492 if ((start < parent->start) || (end > parent->end))
493 goto out;
494
495 for (tmp = res->child; tmp; tmp = tmp->sibling) {
496 if ((tmp->start < start) || (tmp->end > end))
497 goto out;
498 }
499
500 if (res->sibling && (res->sibling->start <= end))
501 goto out;
502
503 tmp = parent->child;
504 if (tmp != res) {
505 while (tmp->sibling != res)
506 tmp = tmp->sibling;
507 if (start <= tmp->end)
508 goto out;
509 }
510
511 res->start = start;
512 res->end = end;
513 result = 0;
514
515 out:
516 write_unlock(&resource_lock);
517 return result;
518 }
519
520 static void __init __reserve_region_with_split(struct resource *root,
521 resource_size_t start, resource_size_t end,
522 const char *name)
523 {
524 struct resource *parent = root;
525 struct resource *conflict;
526 struct resource *res = kzalloc(sizeof(*res), GFP_ATOMIC);
527
528 if (!res)
529 return;
530
531 res->name = name;
532 res->start = start;
533 res->end = end;
534 res->flags = IORESOURCE_BUSY;
535
536 conflict = __request_resource(parent, res);
537 if (!conflict)
538 return;
539
540 /* failed, split and try again */
541 kfree(res);
542
543 /* conflict covered whole area */
544 if (conflict->start <= start && conflict->end >= end)
545 return;
546
547 if (conflict->start > start)
548 __reserve_region_with_split(root, start, conflict->start-1, name);
549 if (conflict->end < end)
550 __reserve_region_with_split(root, conflict->end+1, end, name);
551 }
552
553 void __init reserve_region_with_split(struct resource *root,
554 resource_size_t start, resource_size_t end,
555 const char *name)
556 {
557 write_lock(&resource_lock);
558 __reserve_region_with_split(root, start, end, name);
559 write_unlock(&resource_lock);
560 }
561
562 EXPORT_SYMBOL(adjust_resource);
563
564 /**
565 * resource_alignment - calculate resource's alignment
566 * @res: resource pointer
567 *
568 * Returns alignment on success, 0 (invalid alignment) on failure.
569 */
570 resource_size_t resource_alignment(struct resource *res)
571 {
572 switch (res->flags & (IORESOURCE_SIZEALIGN | IORESOURCE_STARTALIGN)) {
573 case IORESOURCE_SIZEALIGN:
574 return resource_size(res);
575 case IORESOURCE_STARTALIGN:
576 return res->start;
577 default:
578 return 0;
579 }
580 }
581
582 /*
583 * This is compatibility stuff for IO resources.
584 *
585 * Note how this, unlike the above, knows about
586 * the IO flag meanings (busy etc).
587 *
588 * request_region creates a new busy region.
589 *
590 * check_region returns non-zero if the area is already busy.
591 *
592 * release_region releases a matching busy region.
593 */
594
595 /**
596 * __request_region - create a new busy resource region
597 * @parent: parent resource descriptor
598 * @start: resource start address
599 * @n: resource region size
600 * @name: reserving caller's ID string
601 * @flags: IO resource flags
602 */
603 struct resource * __request_region(struct resource *parent,
604 resource_size_t start, resource_size_t n,
605 const char *name, int flags)
606 {
607 struct resource *res = kzalloc(sizeof(*res), GFP_KERNEL);
608
609 if (!res)
610 return NULL;
611
612 res->name = name;
613 res->start = start;
614 res->end = start + n - 1;
615 res->flags = IORESOURCE_BUSY;
616 res->flags |= flags;
617
618 write_lock(&resource_lock);
619
620 for (;;) {
621 struct resource *conflict;
622
623 conflict = __request_resource(parent, res);
624 if (!conflict)
625 break;
626 if (conflict != parent) {
627 parent = conflict;
628 if (!(conflict->flags & IORESOURCE_BUSY))
629 continue;
630 }
631
632 /* Uhhuh, that didn't work out.. */
633 kfree(res);
634 res = NULL;
635 break;
636 }
637 write_unlock(&resource_lock);
638 return res;
639 }
640 EXPORT_SYMBOL(__request_region);
641
642 /**
643 * __check_region - check if a resource region is busy or free
644 * @parent: parent resource descriptor
645 * @start: resource start address
646 * @n: resource region size
647 *
648 * Returns 0 if the region is free at the moment it is checked,
649 * returns %-EBUSY if the region is busy.
650 *
651 * NOTE:
652 * This function is deprecated because its use is racy.
653 * Even if it returns 0, a subsequent call to request_region()
654 * may fail because another driver etc. just allocated the region.
655 * Do NOT use it. It will be removed from the kernel.
656 */
657 int __check_region(struct resource *parent, resource_size_t start,
658 resource_size_t n)
659 {
660 struct resource * res;
661
662 res = __request_region(parent, start, n, "check-region", 0);
663 if (!res)
664 return -EBUSY;
665
666 release_resource(res);
667 kfree(res);
668 return 0;
669 }
670 EXPORT_SYMBOL(__check_region);
671
672 /**
673 * __release_region - release a previously reserved resource region
674 * @parent: parent resource descriptor
675 * @start: resource start address
676 * @n: resource region size
677 *
678 * The described resource region must match a currently busy region.
679 */
680 void __release_region(struct resource *parent, resource_size_t start,
681 resource_size_t n)
682 {
683 struct resource **p;
684 resource_size_t end;
685
686 p = &parent->child;
687 end = start + n - 1;
688
689 write_lock(&resource_lock);
690
691 for (;;) {
692 struct resource *res = *p;
693
694 if (!res)
695 break;
696 if (res->start <= start && res->end >= end) {
697 if (!(res->flags & IORESOURCE_BUSY)) {
698 p = &res->child;
699 continue;
700 }
701 if (res->start != start || res->end != end)
702 break;
703 *p = res->sibling;
704 write_unlock(&resource_lock);
705 kfree(res);
706 return;
707 }
708 p = &res->sibling;
709 }
710
711 write_unlock(&resource_lock);
712
713 printk(KERN_WARNING "Trying to free nonexistent resource "
714 "<%016llx-%016llx>\n", (unsigned long long)start,
715 (unsigned long long)end);
716 }
717 EXPORT_SYMBOL(__release_region);
718
719 /*
720 * Managed region resource
721 */
722 struct region_devres {
723 struct resource *parent;
724 resource_size_t start;
725 resource_size_t n;
726 };
727
728 static void devm_region_release(struct device *dev, void *res)
729 {
730 struct region_devres *this = res;
731
732 __release_region(this->parent, this->start, this->n);
733 }
734
735 static int devm_region_match(struct device *dev, void *res, void *match_data)
736 {
737 struct region_devres *this = res, *match = match_data;
738
739 return this->parent == match->parent &&
740 this->start == match->start && this->n == match->n;
741 }
742
743 struct resource * __devm_request_region(struct device *dev,
744 struct resource *parent, resource_size_t start,
745 resource_size_t n, const char *name)
746 {
747 struct region_devres *dr = NULL;
748 struct resource *res;
749
750 dr = devres_alloc(devm_region_release, sizeof(struct region_devres),
751 GFP_KERNEL);
752 if (!dr)
753 return NULL;
754
755 dr->parent = parent;
756 dr->start = start;
757 dr->n = n;
758
759 res = __request_region(parent, start, n, name, 0);
760 if (res)
761 devres_add(dev, dr);
762 else
763 devres_free(dr);
764
765 return res;
766 }
767 EXPORT_SYMBOL(__devm_request_region);
768
769 void __devm_release_region(struct device *dev, struct resource *parent,
770 resource_size_t start, resource_size_t n)
771 {
772 struct region_devres match_data = { parent, start, n };
773
774 __release_region(parent, start, n);
775 WARN_ON(devres_destroy(dev, devm_region_release, devm_region_match,
776 &match_data));
777 }
778 EXPORT_SYMBOL(__devm_release_region);
779
780 /*
781 * Called from init/main.c to reserve IO ports.
782 */
783 #define MAXRESERVE 4
784 static int __init reserve_setup(char *str)
785 {
786 static int reserved;
787 static struct resource reserve[MAXRESERVE];
788
789 for (;;) {
790 int io_start, io_num;
791 int x = reserved;
792
793 if (get_option (&str, &io_start) != 2)
794 break;
795 if (get_option (&str, &io_num) == 0)
796 break;
797 if (x < MAXRESERVE) {
798 struct resource *res = reserve + x;
799 res->name = "reserved";
800 res->start = io_start;
801 res->end = io_start + io_num - 1;
802 res->flags = IORESOURCE_BUSY;
803 res->child = NULL;
804 if (request_resource(res->start >= 0x10000 ? &iomem_resource : &ioport_resource, res) == 0)
805 reserved = x+1;
806 }
807 }
808 return 1;
809 }
810
811 __setup("reserve=", reserve_setup);
812
813 /*
814 * Check if the requested addr and size spans more than any slot in the
815 * iomem resource tree.
816 */
817 int iomem_map_sanity_check(resource_size_t addr, unsigned long size)
818 {
819 struct resource *p = &iomem_resource;
820 int err = 0;
821 loff_t l;
822
823 read_lock(&resource_lock);
824 for (p = p->child; p ; p = r_next(NULL, p, &l)) {
825 /*
826 * We can probably skip the resources without
827 * IORESOURCE_IO attribute?
828 */
829 if (p->start >= addr + size)
830 continue;
831 if (p->end < addr)
832 continue;
833 if (PFN_DOWN(p->start) <= PFN_DOWN(addr) &&
834 PFN_DOWN(p->end) >= PFN_DOWN(addr + size - 1))
835 continue;
836 /*
837 * if a resource is "BUSY", it's not a hardware resource
838 * but a driver mapping of such a resource; we don't want
839 * to warn for those; some drivers legitimately map only
840 * partial hardware resources. (example: vesafb)
841 */
842 if (p->flags & IORESOURCE_BUSY)
843 continue;
844
845 printk(KERN_WARNING "resource map sanity check conflict: "
846 "0x%llx 0x%llx 0x%llx 0x%llx %s\n",
847 (unsigned long long)addr,
848 (unsigned long long)(addr + size - 1),
849 (unsigned long long)p->start,
850 (unsigned long long)p->end,
851 p->name);
852 err = -1;
853 break;
854 }
855 read_unlock(&resource_lock);
856
857 return err;
858 }
859
860 #ifdef CONFIG_STRICT_DEVMEM
861 static int strict_iomem_checks = 1;
862 #else
863 static int strict_iomem_checks;
864 #endif
865
866 /*
867 * check if an address is reserved in the iomem resource tree
868 * returns 1 if reserved, 0 if not reserved.
869 */
870 int iomem_is_exclusive(u64 addr)
871 {
872 struct resource *p = &iomem_resource;
873 int err = 0;
874 loff_t l;
875 int size = PAGE_SIZE;
876
877 if (!strict_iomem_checks)
878 return 0;
879
880 addr = addr & PAGE_MASK;
881
882 read_lock(&resource_lock);
883 for (p = p->child; p ; p = r_next(NULL, p, &l)) {
884 /*
885 * We can probably skip the resources without
886 * IORESOURCE_IO attribute?
887 */
888 if (p->start >= addr + size)
889 break;
890 if (p->end < addr)
891 continue;
892 if (p->flags & IORESOURCE_BUSY &&
893 p->flags & IORESOURCE_EXCLUSIVE) {
894 err = 1;
895 break;
896 }
897 }
898 read_unlock(&resource_lock);
899
900 return err;
901 }
902
903 static int __init strict_iomem(char *str)
904 {
905 if (strstr(str, "relaxed"))
906 strict_iomem_checks = 0;
907 if (strstr(str, "strict"))
908 strict_iomem_checks = 1;
909 return 1;
910 }
911
912 __setup("iomem=", strict_iomem);