2 * linux/kernel/resource.c
4 * Copyright (C) 1999 Linus Torvalds
5 * Copyright (C) 1999 Martin Mares <mj@ucw.cz>
7 * Arbitrary resource management.
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #include <linux/export.h>
13 #include <linux/errno.h>
14 #include <linux/ioport.h>
15 #include <linux/init.h>
16 #include <linux/slab.h>
17 #include <linux/spinlock.h>
19 #include <linux/proc_fs.h>
20 #include <linux/sched.h>
21 #include <linux/seq_file.h>
22 #include <linux/device.h>
23 #include <linux/pfn.h>
25 #include <linux/resource_ext.h>
29 struct resource ioport_resource
= {
32 .end
= IO_SPACE_LIMIT
,
33 .flags
= IORESOURCE_IO
,
35 EXPORT_SYMBOL(ioport_resource
);
37 struct resource iomem_resource
= {
41 .flags
= IORESOURCE_MEM
,
43 EXPORT_SYMBOL(iomem_resource
);
45 /* constraints to be met while allocating resources */
46 struct resource_constraint
{
47 resource_size_t min
, max
, align
;
48 resource_size_t (*alignf
)(void *, const struct resource
*,
49 resource_size_t
, resource_size_t
);
53 static DEFINE_RWLOCK(resource_lock
);
56 * For memory hotplug, there is no way to free resource entries allocated
57 * by boot mem after the system is up. So for reusing the resource entry
58 * we need to remember the resource.
60 static struct resource
*bootmem_resource_free
;
61 static DEFINE_SPINLOCK(bootmem_resource_lock
);
63 static struct resource
*next_resource(struct resource
*p
, bool sibling_only
)
65 /* Caller wants to traverse through siblings only */
71 while (!p
->sibling
&& p
->parent
)
76 static void *r_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
78 struct resource
*p
= v
;
80 return (void *)next_resource(p
, false);
85 enum { MAX_IORES_LEVEL
= 5 };
87 static void *r_start(struct seq_file
*m
, loff_t
*pos
)
88 __acquires(resource_lock
)
90 struct resource
*p
= m
->private;
92 read_lock(&resource_lock
);
93 for (p
= p
->child
; p
&& l
< *pos
; p
= r_next(m
, p
, &l
))
98 static void r_stop(struct seq_file
*m
, void *v
)
99 __releases(resource_lock
)
101 read_unlock(&resource_lock
);
104 static int r_show(struct seq_file
*m
, void *v
)
106 struct resource
*root
= m
->private;
107 struct resource
*r
= v
, *p
;
108 unsigned long long start
, end
;
109 int width
= root
->end
< 0x10000 ? 4 : 8;
112 for (depth
= 0, p
= r
; depth
< MAX_IORES_LEVEL
; depth
++, p
= p
->parent
)
113 if (p
->parent
== root
)
116 if (file_ns_capable(m
->file
, &init_user_ns
, CAP_SYS_ADMIN
)) {
123 seq_printf(m
, "%*s%0*llx-%0*llx : %s\n",
127 r
->name
? r
->name
: "<BAD>");
131 static const struct seq_operations resource_op
= {
138 static int ioports_open(struct inode
*inode
, struct file
*file
)
140 int res
= seq_open(file
, &resource_op
);
142 struct seq_file
*m
= file
->private_data
;
143 m
->private = &ioport_resource
;
148 static int iomem_open(struct inode
*inode
, struct file
*file
)
150 int res
= seq_open(file
, &resource_op
);
152 struct seq_file
*m
= file
->private_data
;
153 m
->private = &iomem_resource
;
158 static const struct file_operations proc_ioports_operations
= {
159 .open
= ioports_open
,
162 .release
= seq_release
,
165 static const struct file_operations proc_iomem_operations
= {
169 .release
= seq_release
,
172 static int __init
ioresources_init(void)
174 proc_create("ioports", 0, NULL
, &proc_ioports_operations
);
175 proc_create("iomem", 0, NULL
, &proc_iomem_operations
);
178 __initcall(ioresources_init
);
180 #endif /* CONFIG_PROC_FS */
182 static void free_resource(struct resource
*res
)
187 if (!PageSlab(virt_to_head_page(res
))) {
188 spin_lock(&bootmem_resource_lock
);
189 res
->sibling
= bootmem_resource_free
;
190 bootmem_resource_free
= res
;
191 spin_unlock(&bootmem_resource_lock
);
197 static struct resource
*alloc_resource(gfp_t flags
)
199 struct resource
*res
= NULL
;
201 spin_lock(&bootmem_resource_lock
);
202 if (bootmem_resource_free
) {
203 res
= bootmem_resource_free
;
204 bootmem_resource_free
= res
->sibling
;
206 spin_unlock(&bootmem_resource_lock
);
209 memset(res
, 0, sizeof(struct resource
));
211 res
= kzalloc(sizeof(struct resource
), flags
);
216 /* Return the conflict entry if you can't request it */
217 static struct resource
* __request_resource(struct resource
*root
, struct resource
*new)
219 resource_size_t start
= new->start
;
220 resource_size_t end
= new->end
;
221 struct resource
*tmp
, **p
;
225 if (start
< root
->start
)
232 if (!tmp
|| tmp
->start
> end
) {
239 if (tmp
->end
< start
)
245 static int __release_resource(struct resource
*old
, bool release_child
)
247 struct resource
*tmp
, **p
, *chd
;
250 WARN(old
->sibling
, "sibling but no parent");
255 p
= &old
->parent
->child
;
261 if (release_child
|| !(tmp
->child
)) {
264 for (chd
= tmp
->child
;; chd
= chd
->sibling
) {
265 chd
->parent
= tmp
->parent
;
270 chd
->sibling
= tmp
->sibling
;
280 static void __release_child_resources(struct resource
*r
)
282 struct resource
*tmp
, *p
;
283 resource_size_t size
;
293 __release_child_resources(tmp
);
295 printk(KERN_DEBUG
"release child resource %pR\n", tmp
);
296 /* need to restore size, and keep flags */
297 size
= resource_size(tmp
);
303 void release_child_resources(struct resource
*r
)
305 write_lock(&resource_lock
);
306 __release_child_resources(r
);
307 write_unlock(&resource_lock
);
311 * request_resource_conflict - request and reserve an I/O or memory resource
312 * @root: root resource descriptor
313 * @new: resource descriptor desired by caller
315 * Returns 0 for success, conflict resource on error.
317 struct resource
*request_resource_conflict(struct resource
*root
, struct resource
*new)
319 struct resource
*conflict
;
321 write_lock(&resource_lock
);
322 conflict
= __request_resource(root
, new);
323 write_unlock(&resource_lock
);
328 * request_resource - request and reserve an I/O or memory resource
329 * @root: root resource descriptor
330 * @new: resource descriptor desired by caller
332 * Returns 0 for success, negative error code on error.
334 int request_resource(struct resource
*root
, struct resource
*new)
336 struct resource
*conflict
;
338 conflict
= request_resource_conflict(root
, new);
339 return conflict
? -EBUSY
: 0;
342 EXPORT_SYMBOL(request_resource
);
345 * release_resource - release a previously reserved resource
346 * @old: resource pointer
348 int release_resource(struct resource
*old
)
352 write_lock(&resource_lock
);
353 retval
= __release_resource(old
, true);
354 write_unlock(&resource_lock
);
358 EXPORT_SYMBOL(release_resource
);
361 * Finds the lowest iomem resource existing within [res->start.res->end).
362 * The caller must specify res->start, res->end, res->flags, and optionally
363 * desc. If found, returns 0, res is overwritten, if not found, returns -1.
364 * This function walks the whole tree and not just first level children until
365 * and unless first_level_children_only is true.
367 static int find_next_iomem_res(struct resource
*res
, unsigned long desc
,
368 bool first_level_children_only
)
370 resource_size_t start
, end
;
372 bool sibling_only
= false;
378 BUG_ON(start
>= end
);
380 if (first_level_children_only
)
383 read_lock(&resource_lock
);
385 for (p
= iomem_resource
.child
; p
; p
= next_resource(p
, sibling_only
)) {
386 if ((p
->flags
& res
->flags
) != res
->flags
)
388 if ((desc
!= IORES_DESC_NONE
) && (desc
!= p
->desc
))
390 if (p
->start
> end
) {
394 if ((p
->end
>= start
) && (p
->start
< end
))
398 read_unlock(&resource_lock
);
402 if (res
->start
< p
->start
)
403 res
->start
= p
->start
;
404 if (res
->end
> p
->end
)
406 res
->flags
= p
->flags
;
411 static int __walk_iomem_res_desc(struct resource
*res
, unsigned long desc
,
412 bool first_level_children_only
,
414 int (*func
)(struct resource
*, void *))
416 u64 orig_end
= res
->end
;
419 while ((res
->start
< res
->end
) &&
420 !find_next_iomem_res(res
, desc
, first_level_children_only
)) {
421 ret
= (*func
)(res
, arg
);
425 res
->start
= res
->end
+ 1;
433 * Walks through iomem resources and calls func() with matching resource
434 * ranges. This walks through whole tree and not just first level children.
435 * All the memory ranges which overlap start,end and also match flags and
436 * desc are valid candidates.
438 * @desc: I/O resource descriptor. Use IORES_DESC_NONE to skip @desc check.
439 * @flags: I/O resource flags
443 * NOTE: For a new descriptor search, define a new IORES_DESC in
444 * <linux/ioport.h> and set it in 'desc' of a target resource entry.
446 int walk_iomem_res_desc(unsigned long desc
, unsigned long flags
, u64 start
,
447 u64 end
, void *arg
, int (*func
)(struct resource
*, void *))
455 return __walk_iomem_res_desc(&res
, desc
, false, arg
, func
);
459 * This function calls the @func callback against all memory ranges of type
460 * System RAM which are marked as IORESOURCE_SYSTEM_RAM and IORESOUCE_BUSY.
461 * Now, this function is only for System RAM, it deals with full ranges and
462 * not PFNs. If resources are not PFN-aligned, dealing with PFNs can truncate
465 int walk_system_ram_res(u64 start
, u64 end
, void *arg
,
466 int (*func
)(struct resource
*, void *))
472 res
.flags
= IORESOURCE_SYSTEM_RAM
| IORESOURCE_BUSY
;
474 return __walk_iomem_res_desc(&res
, IORES_DESC_NONE
, true,
479 * This function calls the @func callback against all memory ranges, which
480 * are ranges marked as IORESOURCE_MEM and IORESOUCE_BUSY.
482 int walk_mem_res(u64 start
, u64 end
, void *arg
,
483 int (*func
)(struct resource
*, void *))
489 res
.flags
= IORESOURCE_MEM
| IORESOURCE_BUSY
;
491 return __walk_iomem_res_desc(&res
, IORES_DESC_NONE
, true,
495 #if !defined(CONFIG_ARCH_HAS_WALK_MEMORY)
498 * This function calls the @func callback against all memory ranges of type
499 * System RAM which are marked as IORESOURCE_SYSTEM_RAM and IORESOUCE_BUSY.
500 * It is to be used only for System RAM.
502 int walk_system_ram_range(unsigned long start_pfn
, unsigned long nr_pages
,
503 void *arg
, int (*func
)(unsigned long, unsigned long, void *))
506 unsigned long pfn
, end_pfn
;
510 res
.start
= (u64
) start_pfn
<< PAGE_SHIFT
;
511 res
.end
= ((u64
)(start_pfn
+ nr_pages
) << PAGE_SHIFT
) - 1;
512 res
.flags
= IORESOURCE_SYSTEM_RAM
| IORESOURCE_BUSY
;
514 while ((res
.start
< res
.end
) &&
515 (find_next_iomem_res(&res
, IORES_DESC_NONE
, true) >= 0)) {
516 pfn
= (res
.start
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
517 end_pfn
= (res
.end
+ 1) >> PAGE_SHIFT
;
519 ret
= (*func
)(pfn
, end_pfn
- pfn
, arg
);
522 res
.start
= res
.end
+ 1;
530 static int __is_ram(unsigned long pfn
, unsigned long nr_pages
, void *arg
)
536 * This generic page_is_ram() returns true if specified address is
537 * registered as System RAM in iomem_resource list.
539 int __weak
page_is_ram(unsigned long pfn
)
541 return walk_system_ram_range(pfn
, 1, NULL
, __is_ram
) == 1;
543 EXPORT_SYMBOL_GPL(page_is_ram
);
546 * region_intersects() - determine intersection of region with known resources
547 * @start: region start address
548 * @size: size of region
549 * @flags: flags of resource (in iomem_resource)
550 * @desc: descriptor of resource (in iomem_resource) or IORES_DESC_NONE
552 * Check if the specified region partially overlaps or fully eclipses a
553 * resource identified by @flags and @desc (optional with IORES_DESC_NONE).
554 * Return REGION_DISJOINT if the region does not overlap @flags/@desc,
555 * return REGION_MIXED if the region overlaps @flags/@desc and another
556 * resource, and return REGION_INTERSECTS if the region overlaps @flags/@desc
557 * and no other defined resource. Note that REGION_INTERSECTS is also
558 * returned in the case when the specified region overlaps RAM and undefined
561 * region_intersect() is used by memory remapping functions to ensure
562 * the user is not remapping RAM and is a vast speed up over walking
563 * through the resource table page by page.
565 int region_intersects(resource_size_t start
, size_t size
, unsigned long flags
,
568 resource_size_t end
= start
+ size
- 1;
569 int type
= 0; int other
= 0;
572 read_lock(&resource_lock
);
573 for (p
= iomem_resource
.child
; p
; p
= p
->sibling
) {
574 bool is_type
= (((p
->flags
& flags
) == flags
) &&
575 ((desc
== IORES_DESC_NONE
) ||
578 if (start
>= p
->start
&& start
<= p
->end
)
579 is_type
? type
++ : other
++;
580 if (end
>= p
->start
&& end
<= p
->end
)
581 is_type
? type
++ : other
++;
582 if (p
->start
>= start
&& p
->end
<= end
)
583 is_type
? type
++ : other
++;
585 read_unlock(&resource_lock
);
588 return type
? REGION_INTERSECTS
: REGION_DISJOINT
;
593 return REGION_DISJOINT
;
595 EXPORT_SYMBOL_GPL(region_intersects
);
597 void __weak
arch_remove_reservations(struct resource
*avail
)
601 static resource_size_t
simple_align_resource(void *data
,
602 const struct resource
*avail
,
603 resource_size_t size
,
604 resource_size_t align
)
609 static void resource_clip(struct resource
*res
, resource_size_t min
,
612 if (res
->start
< min
)
619 * Find empty slot in the resource tree with the given range and
620 * alignment constraints
622 static int __find_resource(struct resource
*root
, struct resource
*old
,
623 struct resource
*new,
624 resource_size_t size
,
625 struct resource_constraint
*constraint
)
627 struct resource
*this = root
->child
;
628 struct resource tmp
= *new, avail
, alloc
;
630 tmp
.start
= root
->start
;
632 * Skip past an allocated resource that starts at 0, since the assignment
633 * of this->start - 1 to tmp->end below would cause an underflow.
635 if (this && this->start
== root
->start
) {
636 tmp
.start
= (this == old
) ? old
->start
: this->end
+ 1;
637 this = this->sibling
;
641 tmp
.end
= (this == old
) ? this->end
: this->start
- 1;
645 if (tmp
.end
< tmp
.start
)
648 resource_clip(&tmp
, constraint
->min
, constraint
->max
);
649 arch_remove_reservations(&tmp
);
651 /* Check for overflow after ALIGN() */
652 avail
.start
= ALIGN(tmp
.start
, constraint
->align
);
654 avail
.flags
= new->flags
& ~IORESOURCE_UNSET
;
655 if (avail
.start
>= tmp
.start
) {
656 alloc
.flags
= avail
.flags
;
657 alloc
.start
= constraint
->alignf(constraint
->alignf_data
, &avail
,
658 size
, constraint
->align
);
659 alloc
.end
= alloc
.start
+ size
- 1;
660 if (alloc
.start
<= alloc
.end
&&
661 resource_contains(&avail
, &alloc
)) {
662 new->start
= alloc
.start
;
663 new->end
= alloc
.end
;
668 next
: if (!this || this->end
== root
->end
)
672 tmp
.start
= this->end
+ 1;
673 this = this->sibling
;
679 * Find empty slot in the resource tree given range and alignment.
681 static int find_resource(struct resource
*root
, struct resource
*new,
682 resource_size_t size
,
683 struct resource_constraint
*constraint
)
685 return __find_resource(root
, NULL
, new, size
, constraint
);
689 * reallocate_resource - allocate a slot in the resource tree given range & alignment.
690 * The resource will be relocated if the new size cannot be reallocated in the
693 * @root: root resource descriptor
694 * @old: resource descriptor desired by caller
695 * @newsize: new size of the resource descriptor
696 * @constraint: the size and alignment constraints to be met.
698 static int reallocate_resource(struct resource
*root
, struct resource
*old
,
699 resource_size_t newsize
,
700 struct resource_constraint
*constraint
)
703 struct resource
new = *old
;
704 struct resource
*conflict
;
706 write_lock(&resource_lock
);
708 if ((err
= __find_resource(root
, old
, &new, newsize
, constraint
)))
711 if (resource_contains(&new, old
)) {
712 old
->start
= new.start
;
722 if (resource_contains(old
, &new)) {
723 old
->start
= new.start
;
726 __release_resource(old
, true);
728 conflict
= __request_resource(root
, old
);
732 write_unlock(&resource_lock
);
738 * allocate_resource - allocate empty slot in the resource tree given range & alignment.
739 * The resource will be reallocated with a new size if it was already allocated
740 * @root: root resource descriptor
741 * @new: resource descriptor desired by caller
742 * @size: requested resource region size
743 * @min: minimum boundary to allocate
744 * @max: maximum boundary to allocate
745 * @align: alignment requested, in bytes
746 * @alignf: alignment function, optional, called if not NULL
747 * @alignf_data: arbitrary data to pass to the @alignf function
749 int allocate_resource(struct resource
*root
, struct resource
*new,
750 resource_size_t size
, resource_size_t min
,
751 resource_size_t max
, resource_size_t align
,
752 resource_size_t (*alignf
)(void *,
753 const struct resource
*,
759 struct resource_constraint constraint
;
762 alignf
= simple_align_resource
;
764 constraint
.min
= min
;
765 constraint
.max
= max
;
766 constraint
.align
= align
;
767 constraint
.alignf
= alignf
;
768 constraint
.alignf_data
= alignf_data
;
771 /* resource is already allocated, try reallocating with
772 the new constraints */
773 return reallocate_resource(root
, new, size
, &constraint
);
776 write_lock(&resource_lock
);
777 err
= find_resource(root
, new, size
, &constraint
);
778 if (err
>= 0 && __request_resource(root
, new))
780 write_unlock(&resource_lock
);
784 EXPORT_SYMBOL(allocate_resource
);
787 * lookup_resource - find an existing resource by a resource start address
788 * @root: root resource descriptor
789 * @start: resource start address
791 * Returns a pointer to the resource if found, NULL otherwise
793 struct resource
*lookup_resource(struct resource
*root
, resource_size_t start
)
795 struct resource
*res
;
797 read_lock(&resource_lock
);
798 for (res
= root
->child
; res
; res
= res
->sibling
) {
799 if (res
->start
== start
)
802 read_unlock(&resource_lock
);
808 * Insert a resource into the resource tree. If successful, return NULL,
809 * otherwise return the conflicting resource (compare to __request_resource())
811 static struct resource
* __insert_resource(struct resource
*parent
, struct resource
*new)
813 struct resource
*first
, *next
;
815 for (;; parent
= first
) {
816 first
= __request_resource(parent
, new);
822 if (WARN_ON(first
== new)) /* duplicated insertion */
825 if ((first
->start
> new->start
) || (first
->end
< new->end
))
827 if ((first
->start
== new->start
) && (first
->end
== new->end
))
831 for (next
= first
; ; next
= next
->sibling
) {
832 /* Partial overlap? Bad, and unfixable */
833 if (next
->start
< new->start
|| next
->end
> new->end
)
837 if (next
->sibling
->start
> new->end
)
841 new->parent
= parent
;
842 new->sibling
= next
->sibling
;
845 next
->sibling
= NULL
;
846 for (next
= first
; next
; next
= next
->sibling
)
849 if (parent
->child
== first
) {
852 next
= parent
->child
;
853 while (next
->sibling
!= first
)
854 next
= next
->sibling
;
861 * insert_resource_conflict - Inserts resource in the resource tree
862 * @parent: parent of the new resource
863 * @new: new resource to insert
865 * Returns 0 on success, conflict resource if the resource can't be inserted.
867 * This function is equivalent to request_resource_conflict when no conflict
868 * happens. If a conflict happens, and the conflicting resources
869 * entirely fit within the range of the new resource, then the new
870 * resource is inserted and the conflicting resources become children of
873 * This function is intended for producers of resources, such as FW modules
876 struct resource
*insert_resource_conflict(struct resource
*parent
, struct resource
*new)
878 struct resource
*conflict
;
880 write_lock(&resource_lock
);
881 conflict
= __insert_resource(parent
, new);
882 write_unlock(&resource_lock
);
887 * insert_resource - Inserts a resource in the resource tree
888 * @parent: parent of the new resource
889 * @new: new resource to insert
891 * Returns 0 on success, -EBUSY if the resource can't be inserted.
893 * This function is intended for producers of resources, such as FW modules
896 int insert_resource(struct resource
*parent
, struct resource
*new)
898 struct resource
*conflict
;
900 conflict
= insert_resource_conflict(parent
, new);
901 return conflict
? -EBUSY
: 0;
903 EXPORT_SYMBOL_GPL(insert_resource
);
906 * insert_resource_expand_to_fit - Insert a resource into the resource tree
907 * @root: root resource descriptor
908 * @new: new resource to insert
910 * Insert a resource into the resource tree, possibly expanding it in order
911 * to make it encompass any conflicting resources.
913 void insert_resource_expand_to_fit(struct resource
*root
, struct resource
*new)
918 write_lock(&resource_lock
);
920 struct resource
*conflict
;
922 conflict
= __insert_resource(root
, new);
925 if (conflict
== root
)
928 /* Ok, expand resource to cover the conflict, then try again .. */
929 if (conflict
->start
< new->start
)
930 new->start
= conflict
->start
;
931 if (conflict
->end
> new->end
)
932 new->end
= conflict
->end
;
934 printk("Expanded resource %s due to conflict with %s\n", new->name
, conflict
->name
);
936 write_unlock(&resource_lock
);
940 * remove_resource - Remove a resource in the resource tree
941 * @old: resource to remove
943 * Returns 0 on success, -EINVAL if the resource is not valid.
945 * This function removes a resource previously inserted by insert_resource()
946 * or insert_resource_conflict(), and moves the children (if any) up to
947 * where they were before. insert_resource() and insert_resource_conflict()
948 * insert a new resource, and move any conflicting resources down to the
949 * children of the new resource.
951 * insert_resource(), insert_resource_conflict() and remove_resource() are
952 * intended for producers of resources, such as FW modules and bus drivers.
954 int remove_resource(struct resource
*old
)
958 write_lock(&resource_lock
);
959 retval
= __release_resource(old
, false);
960 write_unlock(&resource_lock
);
963 EXPORT_SYMBOL_GPL(remove_resource
);
965 static int __adjust_resource(struct resource
*res
, resource_size_t start
,
966 resource_size_t size
)
968 struct resource
*tmp
, *parent
= res
->parent
;
969 resource_size_t end
= start
+ size
- 1;
975 if ((start
< parent
->start
) || (end
> parent
->end
))
978 if (res
->sibling
&& (res
->sibling
->start
<= end
))
983 while (tmp
->sibling
!= res
)
985 if (start
<= tmp
->end
)
990 for (tmp
= res
->child
; tmp
; tmp
= tmp
->sibling
)
991 if ((tmp
->start
< start
) || (tmp
->end
> end
))
1003 * adjust_resource - modify a resource's start and size
1004 * @res: resource to modify
1005 * @start: new start value
1008 * Given an existing resource, change its start and size to match the
1009 * arguments. Returns 0 on success, -EBUSY if it can't fit.
1010 * Existing children of the resource are assumed to be immutable.
1012 int adjust_resource(struct resource
*res
, resource_size_t start
,
1013 resource_size_t size
)
1017 write_lock(&resource_lock
);
1018 result
= __adjust_resource(res
, start
, size
);
1019 write_unlock(&resource_lock
);
1022 EXPORT_SYMBOL(adjust_resource
);
1024 static void __init
__reserve_region_with_split(struct resource
*root
,
1025 resource_size_t start
, resource_size_t end
,
1028 struct resource
*parent
= root
;
1029 struct resource
*conflict
;
1030 struct resource
*res
= alloc_resource(GFP_ATOMIC
);
1031 struct resource
*next_res
= NULL
;
1039 res
->flags
= IORESOURCE_BUSY
;
1040 res
->desc
= IORES_DESC_NONE
;
1044 conflict
= __request_resource(parent
, res
);
1053 /* conflict covered whole area */
1054 if (conflict
->start
<= res
->start
&&
1055 conflict
->end
>= res
->end
) {
1061 /* failed, split and try again */
1062 if (conflict
->start
> res
->start
) {
1064 res
->end
= conflict
->start
- 1;
1065 if (conflict
->end
< end
) {
1066 next_res
= alloc_resource(GFP_ATOMIC
);
1071 next_res
->name
= name
;
1072 next_res
->start
= conflict
->end
+ 1;
1073 next_res
->end
= end
;
1074 next_res
->flags
= IORESOURCE_BUSY
;
1075 next_res
->desc
= IORES_DESC_NONE
;
1078 res
->start
= conflict
->end
+ 1;
1084 void __init
reserve_region_with_split(struct resource
*root
,
1085 resource_size_t start
, resource_size_t end
,
1090 write_lock(&resource_lock
);
1091 if (root
->start
> start
|| root
->end
< end
) {
1092 pr_err("requested range [0x%llx-0x%llx] not in root %pr\n",
1093 (unsigned long long)start
, (unsigned long long)end
,
1095 if (start
> root
->end
|| end
< root
->start
)
1098 if (end
> root
->end
)
1100 if (start
< root
->start
)
1101 start
= root
->start
;
1102 pr_err("fixing request to [0x%llx-0x%llx]\n",
1103 (unsigned long long)start
,
1104 (unsigned long long)end
);
1109 __reserve_region_with_split(root
, start
, end
, name
);
1110 write_unlock(&resource_lock
);
1114 * resource_alignment - calculate resource's alignment
1115 * @res: resource pointer
1117 * Returns alignment on success, 0 (invalid alignment) on failure.
1119 resource_size_t
resource_alignment(struct resource
*res
)
1121 switch (res
->flags
& (IORESOURCE_SIZEALIGN
| IORESOURCE_STARTALIGN
)) {
1122 case IORESOURCE_SIZEALIGN
:
1123 return resource_size(res
);
1124 case IORESOURCE_STARTALIGN
:
1132 * This is compatibility stuff for IO resources.
1134 * Note how this, unlike the above, knows about
1135 * the IO flag meanings (busy etc).
1137 * request_region creates a new busy region.
1139 * release_region releases a matching busy region.
1142 static DECLARE_WAIT_QUEUE_HEAD(muxed_resource_wait
);
1145 * __request_region - create a new busy resource region
1146 * @parent: parent resource descriptor
1147 * @start: resource start address
1148 * @n: resource region size
1149 * @name: reserving caller's ID string
1150 * @flags: IO resource flags
1152 struct resource
* __request_region(struct resource
*parent
,
1153 resource_size_t start
, resource_size_t n
,
1154 const char *name
, int flags
)
1156 DECLARE_WAITQUEUE(wait
, current
);
1157 struct resource
*res
= alloc_resource(GFP_KERNEL
);
1164 res
->end
= start
+ n
- 1;
1166 write_lock(&resource_lock
);
1169 struct resource
*conflict
;
1171 res
->flags
= resource_type(parent
) | resource_ext_type(parent
);
1172 res
->flags
|= IORESOURCE_BUSY
| flags
;
1173 res
->desc
= parent
->desc
;
1175 conflict
= __request_resource(parent
, res
);
1178 if (conflict
!= parent
) {
1179 if (!(conflict
->flags
& IORESOURCE_BUSY
)) {
1184 if (conflict
->flags
& flags
& IORESOURCE_MUXED
) {
1185 add_wait_queue(&muxed_resource_wait
, &wait
);
1186 write_unlock(&resource_lock
);
1187 set_current_state(TASK_UNINTERRUPTIBLE
);
1189 remove_wait_queue(&muxed_resource_wait
, &wait
);
1190 write_lock(&resource_lock
);
1193 /* Uhhuh, that didn't work out.. */
1198 write_unlock(&resource_lock
);
1201 EXPORT_SYMBOL(__request_region
);
1204 * __release_region - release a previously reserved resource region
1205 * @parent: parent resource descriptor
1206 * @start: resource start address
1207 * @n: resource region size
1209 * The described resource region must match a currently busy region.
1211 void __release_region(struct resource
*parent
, resource_size_t start
,
1214 struct resource
**p
;
1215 resource_size_t end
;
1218 end
= start
+ n
- 1;
1220 write_lock(&resource_lock
);
1223 struct resource
*res
= *p
;
1227 if (res
->start
<= start
&& res
->end
>= end
) {
1228 if (!(res
->flags
& IORESOURCE_BUSY
)) {
1232 if (res
->start
!= start
|| res
->end
!= end
)
1235 write_unlock(&resource_lock
);
1236 if (res
->flags
& IORESOURCE_MUXED
)
1237 wake_up(&muxed_resource_wait
);
1244 write_unlock(&resource_lock
);
1246 printk(KERN_WARNING
"Trying to free nonexistent resource "
1247 "<%016llx-%016llx>\n", (unsigned long long)start
,
1248 (unsigned long long)end
);
1250 EXPORT_SYMBOL(__release_region
);
1252 #ifdef CONFIG_MEMORY_HOTREMOVE
1254 * release_mem_region_adjustable - release a previously reserved memory region
1255 * @parent: parent resource descriptor
1256 * @start: resource start address
1257 * @size: resource region size
1259 * This interface is intended for memory hot-delete. The requested region
1260 * is released from a currently busy memory resource. The requested region
1261 * must either match exactly or fit into a single busy resource entry. In
1262 * the latter case, the remaining resource is adjusted accordingly.
1263 * Existing children of the busy memory resource must be immutable in the
1267 * - Additional release conditions, such as overlapping region, can be
1268 * supported after they are confirmed as valid cases.
1269 * - When a busy memory resource gets split into two entries, the code
1270 * assumes that all children remain in the lower address entry for
1271 * simplicity. Enhance this logic when necessary.
1273 int release_mem_region_adjustable(struct resource
*parent
,
1274 resource_size_t start
, resource_size_t size
)
1276 struct resource
**p
;
1277 struct resource
*res
;
1278 struct resource
*new_res
;
1279 resource_size_t end
;
1282 end
= start
+ size
- 1;
1283 if ((start
< parent
->start
) || (end
> parent
->end
))
1286 /* The alloc_resource() result gets checked later */
1287 new_res
= alloc_resource(GFP_KERNEL
);
1290 write_lock(&resource_lock
);
1292 while ((res
= *p
)) {
1293 if (res
->start
>= end
)
1296 /* look for the next resource if it does not fit into */
1297 if (res
->start
> start
|| res
->end
< end
) {
1302 if (!(res
->flags
& IORESOURCE_MEM
))
1305 if (!(res
->flags
& IORESOURCE_BUSY
)) {
1310 /* found the target resource; let's adjust accordingly */
1311 if (res
->start
== start
&& res
->end
== end
) {
1312 /* free the whole entry */
1316 } else if (res
->start
== start
&& res
->end
!= end
) {
1317 /* adjust the start */
1318 ret
= __adjust_resource(res
, end
+ 1,
1320 } else if (res
->start
!= start
&& res
->end
== end
) {
1321 /* adjust the end */
1322 ret
= __adjust_resource(res
, res
->start
,
1323 start
- res
->start
);
1325 /* split into two entries */
1330 new_res
->name
= res
->name
;
1331 new_res
->start
= end
+ 1;
1332 new_res
->end
= res
->end
;
1333 new_res
->flags
= res
->flags
;
1334 new_res
->desc
= res
->desc
;
1335 new_res
->parent
= res
->parent
;
1336 new_res
->sibling
= res
->sibling
;
1337 new_res
->child
= NULL
;
1339 ret
= __adjust_resource(res
, res
->start
,
1340 start
- res
->start
);
1343 res
->sibling
= new_res
;
1350 write_unlock(&resource_lock
);
1351 free_resource(new_res
);
1354 #endif /* CONFIG_MEMORY_HOTREMOVE */
1357 * Managed region resource
1359 static void devm_resource_release(struct device
*dev
, void *ptr
)
1361 struct resource
**r
= ptr
;
1363 release_resource(*r
);
1367 * devm_request_resource() - request and reserve an I/O or memory resource
1368 * @dev: device for which to request the resource
1369 * @root: root of the resource tree from which to request the resource
1370 * @new: descriptor of the resource to request
1372 * This is a device-managed version of request_resource(). There is usually
1373 * no need to release resources requested by this function explicitly since
1374 * that will be taken care of when the device is unbound from its driver.
1375 * If for some reason the resource needs to be released explicitly, because
1376 * of ordering issues for example, drivers must call devm_release_resource()
1377 * rather than the regular release_resource().
1379 * When a conflict is detected between any existing resources and the newly
1380 * requested resource, an error message will be printed.
1382 * Returns 0 on success or a negative error code on failure.
1384 int devm_request_resource(struct device
*dev
, struct resource
*root
,
1385 struct resource
*new)
1387 struct resource
*conflict
, **ptr
;
1389 ptr
= devres_alloc(devm_resource_release
, sizeof(*ptr
), GFP_KERNEL
);
1395 conflict
= request_resource_conflict(root
, new);
1397 dev_err(dev
, "resource collision: %pR conflicts with %s %pR\n",
1398 new, conflict
->name
, conflict
);
1403 devres_add(dev
, ptr
);
1406 EXPORT_SYMBOL(devm_request_resource
);
1408 static int devm_resource_match(struct device
*dev
, void *res
, void *data
)
1410 struct resource
**ptr
= res
;
1412 return *ptr
== data
;
1416 * devm_release_resource() - release a previously requested resource
1417 * @dev: device for which to release the resource
1418 * @new: descriptor of the resource to release
1420 * Releases a resource previously requested using devm_request_resource().
1422 void devm_release_resource(struct device
*dev
, struct resource
*new)
1424 WARN_ON(devres_release(dev
, devm_resource_release
, devm_resource_match
,
1427 EXPORT_SYMBOL(devm_release_resource
);
1429 struct region_devres
{
1430 struct resource
*parent
;
1431 resource_size_t start
;
1435 static void devm_region_release(struct device
*dev
, void *res
)
1437 struct region_devres
*this = res
;
1439 __release_region(this->parent
, this->start
, this->n
);
1442 static int devm_region_match(struct device
*dev
, void *res
, void *match_data
)
1444 struct region_devres
*this = res
, *match
= match_data
;
1446 return this->parent
== match
->parent
&&
1447 this->start
== match
->start
&& this->n
== match
->n
;
1450 struct resource
* __devm_request_region(struct device
*dev
,
1451 struct resource
*parent
, resource_size_t start
,
1452 resource_size_t n
, const char *name
)
1454 struct region_devres
*dr
= NULL
;
1455 struct resource
*res
;
1457 dr
= devres_alloc(devm_region_release
, sizeof(struct region_devres
),
1462 dr
->parent
= parent
;
1466 res
= __request_region(parent
, start
, n
, name
, 0);
1468 devres_add(dev
, dr
);
1474 EXPORT_SYMBOL(__devm_request_region
);
1476 void __devm_release_region(struct device
*dev
, struct resource
*parent
,
1477 resource_size_t start
, resource_size_t n
)
1479 struct region_devres match_data
= { parent
, start
, n
};
1481 __release_region(parent
, start
, n
);
1482 WARN_ON(devres_destroy(dev
, devm_region_release
, devm_region_match
,
1485 EXPORT_SYMBOL(__devm_release_region
);
1488 * Called from init/main.c to reserve IO ports.
1490 #define MAXRESERVE 4
1491 static int __init
reserve_setup(char *str
)
1493 static int reserved
;
1494 static struct resource reserve
[MAXRESERVE
];
1497 unsigned int io_start
, io_num
;
1500 if (get_option (&str
, &io_start
) != 2)
1502 if (get_option (&str
, &io_num
) == 0)
1504 if (x
< MAXRESERVE
) {
1505 struct resource
*res
= reserve
+ x
;
1506 res
->name
= "reserved";
1507 res
->start
= io_start
;
1508 res
->end
= io_start
+ io_num
- 1;
1509 res
->flags
= IORESOURCE_BUSY
;
1510 res
->desc
= IORES_DESC_NONE
;
1512 if (request_resource(res
->start
>= 0x10000 ? &iomem_resource
: &ioport_resource
, res
) == 0)
1519 __setup("reserve=", reserve_setup
);
1522 * Check if the requested addr and size spans more than any slot in the
1523 * iomem resource tree.
1525 int iomem_map_sanity_check(resource_size_t addr
, unsigned long size
)
1527 struct resource
*p
= &iomem_resource
;
1531 read_lock(&resource_lock
);
1532 for (p
= p
->child
; p
; p
= r_next(NULL
, p
, &l
)) {
1534 * We can probably skip the resources without
1535 * IORESOURCE_IO attribute?
1537 if (p
->start
>= addr
+ size
)
1541 if (PFN_DOWN(p
->start
) <= PFN_DOWN(addr
) &&
1542 PFN_DOWN(p
->end
) >= PFN_DOWN(addr
+ size
- 1))
1545 * if a resource is "BUSY", it's not a hardware resource
1546 * but a driver mapping of such a resource; we don't want
1547 * to warn for those; some drivers legitimately map only
1548 * partial hardware resources. (example: vesafb)
1550 if (p
->flags
& IORESOURCE_BUSY
)
1553 printk(KERN_WARNING
"resource sanity check: requesting [mem %#010llx-%#010llx], which spans more than %s %pR\n",
1554 (unsigned long long)addr
,
1555 (unsigned long long)(addr
+ size
- 1),
1560 read_unlock(&resource_lock
);
1565 #ifdef CONFIG_STRICT_DEVMEM
1566 static int strict_iomem_checks
= 1;
1568 static int strict_iomem_checks
;
1572 * check if an address is reserved in the iomem resource tree
1573 * returns 1 if reserved, 0 if not reserved.
1575 int iomem_is_exclusive(u64 addr
)
1577 struct resource
*p
= &iomem_resource
;
1580 int size
= PAGE_SIZE
;
1582 if (!strict_iomem_checks
)
1585 addr
= addr
& PAGE_MASK
;
1587 read_lock(&resource_lock
);
1588 for (p
= p
->child
; p
; p
= r_next(NULL
, p
, &l
)) {
1590 * We can probably skip the resources without
1591 * IORESOURCE_IO attribute?
1593 if (p
->start
>= addr
+ size
)
1598 * A resource is exclusive if IORESOURCE_EXCLUSIVE is set
1599 * or CONFIG_IO_STRICT_DEVMEM is enabled and the
1602 if ((p
->flags
& IORESOURCE_BUSY
) == 0)
1604 if (IS_ENABLED(CONFIG_IO_STRICT_DEVMEM
)
1605 || p
->flags
& IORESOURCE_EXCLUSIVE
) {
1610 read_unlock(&resource_lock
);
1615 struct resource_entry
*resource_list_create_entry(struct resource
*res
,
1618 struct resource_entry
*entry
;
1620 entry
= kzalloc(sizeof(*entry
) + extra_size
, GFP_KERNEL
);
1622 INIT_LIST_HEAD(&entry
->node
);
1623 entry
->res
= res
? res
: &entry
->__res
;
1628 EXPORT_SYMBOL(resource_list_create_entry
);
1630 void resource_list_free(struct list_head
*head
)
1632 struct resource_entry
*entry
, *tmp
;
1634 list_for_each_entry_safe(entry
, tmp
, head
, node
)
1635 resource_list_destroy_entry(entry
);
1637 EXPORT_SYMBOL(resource_list_free
);
1639 static int __init
strict_iomem(char *str
)
1641 if (strstr(str
, "relaxed"))
1642 strict_iomem_checks
= 0;
1643 if (strstr(str
, "strict"))
1644 strict_iomem_checks
= 1;
1648 __setup("iomem=", strict_iomem
);