1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/kernel/resource.c
5 * Copyright (C) 1999 Linus Torvalds
6 * Copyright (C) 1999 Martin Mares <mj@ucw.cz>
8 * Arbitrary resource management.
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 #include <linux/export.h>
14 #include <linux/errno.h>
15 #include <linux/ioport.h>
16 #include <linux/init.h>
17 #include <linux/slab.h>
18 #include <linux/spinlock.h>
20 #include <linux/proc_fs.h>
21 #include <linux/pseudo_fs.h>
22 #include <linux/sched.h>
23 #include <linux/seq_file.h>
24 #include <linux/device.h>
25 #include <linux/pfn.h>
27 #include <linux/mount.h>
28 #include <linux/resource_ext.h>
29 #include <uapi/linux/magic.h>
33 struct resource ioport_resource
= {
36 .end
= IO_SPACE_LIMIT
,
37 .flags
= IORESOURCE_IO
,
39 EXPORT_SYMBOL(ioport_resource
);
41 struct resource iomem_resource
= {
45 .flags
= IORESOURCE_MEM
,
47 EXPORT_SYMBOL(iomem_resource
);
49 /* constraints to be met while allocating resources */
50 struct resource_constraint
{
51 resource_size_t min
, max
, align
;
52 resource_size_t (*alignf
)(void *, const struct resource
*,
53 resource_size_t
, resource_size_t
);
57 static DEFINE_RWLOCK(resource_lock
);
59 static struct resource
*next_resource(struct resource
*p
)
63 while (!p
->sibling
&& p
->parent
)
68 static void *r_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
70 struct resource
*p
= v
;
72 return (void *)next_resource(p
);
77 enum { MAX_IORES_LEVEL
= 5 };
79 static void *r_start(struct seq_file
*m
, loff_t
*pos
)
80 __acquires(resource_lock
)
82 struct resource
*p
= PDE_DATA(file_inode(m
->file
));
84 read_lock(&resource_lock
);
85 for (p
= p
->child
; p
&& l
< *pos
; p
= r_next(m
, p
, &l
))
90 static void r_stop(struct seq_file
*m
, void *v
)
91 __releases(resource_lock
)
93 read_unlock(&resource_lock
);
96 static int r_show(struct seq_file
*m
, void *v
)
98 struct resource
*root
= PDE_DATA(file_inode(m
->file
));
99 struct resource
*r
= v
, *p
;
100 unsigned long long start
, end
;
101 int width
= root
->end
< 0x10000 ? 4 : 8;
104 for (depth
= 0, p
= r
; depth
< MAX_IORES_LEVEL
; depth
++, p
= p
->parent
)
105 if (p
->parent
== root
)
108 if (file_ns_capable(m
->file
, &init_user_ns
, CAP_SYS_ADMIN
)) {
115 seq_printf(m
, "%*s%0*llx-%0*llx : %s\n",
119 r
->name
? r
->name
: "<BAD>");
123 static const struct seq_operations resource_op
= {
130 static int __init
ioresources_init(void)
132 proc_create_seq_data("ioports", 0, NULL
, &resource_op
,
134 proc_create_seq_data("iomem", 0, NULL
, &resource_op
, &iomem_resource
);
137 __initcall(ioresources_init
);
139 #endif /* CONFIG_PROC_FS */
141 static void free_resource(struct resource
*res
)
144 * If the resource was allocated using memblock early during boot
145 * we'll leak it here: we can only return full pages back to the
146 * buddy and trying to be smart and reusing them eventually in
147 * alloc_resource() overcomplicates resource handling.
149 if (res
&& PageSlab(virt_to_head_page(res
)))
153 static struct resource
*alloc_resource(gfp_t flags
)
155 return kzalloc(sizeof(struct resource
), flags
);
158 /* Return the conflict entry if you can't request it */
159 static struct resource
* __request_resource(struct resource
*root
, struct resource
*new)
161 resource_size_t start
= new->start
;
162 resource_size_t end
= new->end
;
163 struct resource
*tmp
, **p
;
167 if (start
< root
->start
)
174 if (!tmp
|| tmp
->start
> end
) {
181 if (tmp
->end
< start
)
187 static int __release_resource(struct resource
*old
, bool release_child
)
189 struct resource
*tmp
, **p
, *chd
;
191 p
= &old
->parent
->child
;
197 if (release_child
|| !(tmp
->child
)) {
200 for (chd
= tmp
->child
;; chd
= chd
->sibling
) {
201 chd
->parent
= tmp
->parent
;
206 chd
->sibling
= tmp
->sibling
;
216 static void __release_child_resources(struct resource
*r
)
218 struct resource
*tmp
, *p
;
219 resource_size_t size
;
229 __release_child_resources(tmp
);
231 printk(KERN_DEBUG
"release child resource %pR\n", tmp
);
232 /* need to restore size, and keep flags */
233 size
= resource_size(tmp
);
239 void release_child_resources(struct resource
*r
)
241 write_lock(&resource_lock
);
242 __release_child_resources(r
);
243 write_unlock(&resource_lock
);
247 * request_resource_conflict - request and reserve an I/O or memory resource
248 * @root: root resource descriptor
249 * @new: resource descriptor desired by caller
251 * Returns 0 for success, conflict resource on error.
253 struct resource
*request_resource_conflict(struct resource
*root
, struct resource
*new)
255 struct resource
*conflict
;
257 write_lock(&resource_lock
);
258 conflict
= __request_resource(root
, new);
259 write_unlock(&resource_lock
);
264 * request_resource - request and reserve an I/O or memory resource
265 * @root: root resource descriptor
266 * @new: resource descriptor desired by caller
268 * Returns 0 for success, negative error code on error.
270 int request_resource(struct resource
*root
, struct resource
*new)
272 struct resource
*conflict
;
274 conflict
= request_resource_conflict(root
, new);
275 return conflict
? -EBUSY
: 0;
278 EXPORT_SYMBOL(request_resource
);
281 * release_resource - release a previously reserved resource
282 * @old: resource pointer
284 int release_resource(struct resource
*old
)
288 write_lock(&resource_lock
);
289 retval
= __release_resource(old
, true);
290 write_unlock(&resource_lock
);
294 EXPORT_SYMBOL(release_resource
);
297 * find_next_iomem_res - Finds the lowest iomem resource that covers part of
300 * If a resource is found, returns 0 and @*res is overwritten with the part
301 * of the resource that's within [@start..@end]; if none is found, returns
302 * -ENODEV. Returns -EINVAL for invalid parameters.
304 * @start: start address of the resource searched for
305 * @end: end address of same resource
306 * @flags: flags which the resource must have
307 * @desc: descriptor the resource must have
308 * @res: return ptr, if resource found
310 * The caller must specify @start, @end, @flags, and @desc
311 * (which may be IORES_DESC_NONE).
313 static int find_next_iomem_res(resource_size_t start
, resource_size_t end
,
314 unsigned long flags
, unsigned long desc
,
315 struct resource
*res
)
325 read_lock(&resource_lock
);
327 for (p
= iomem_resource
.child
; p
; p
= next_resource(p
)) {
328 /* If we passed the resource we are looking for, stop */
329 if (p
->start
> end
) {
334 /* Skip until we find a range that matches what we look for */
338 if ((p
->flags
& flags
) != flags
)
340 if ((desc
!= IORES_DESC_NONE
) && (desc
!= p
->desc
))
343 /* Found a match, break */
349 *res
= (struct resource
) {
350 .start
= max(start
, p
->start
),
351 .end
= min(end
, p
->end
),
358 read_unlock(&resource_lock
);
359 return p
? 0 : -ENODEV
;
362 static int __walk_iomem_res_desc(resource_size_t start
, resource_size_t end
,
363 unsigned long flags
, unsigned long desc
,
365 int (*func
)(struct resource
*, void *))
370 while (start
< end
&&
371 !find_next_iomem_res(start
, end
, flags
, desc
, &res
)) {
372 ret
= (*func
)(&res
, arg
);
383 * walk_iomem_res_desc - Walks through iomem resources and calls func()
384 * with matching resource ranges.
386 * @desc: I/O resource descriptor. Use IORES_DESC_NONE to skip @desc check.
387 * @flags: I/O resource flags
390 * @arg: function argument for the callback @func
391 * @func: callback function that is called for each qualifying resource area
393 * All the memory ranges which overlap start,end and also match flags and
394 * desc are valid candidates.
396 * NOTE: For a new descriptor search, define a new IORES_DESC in
397 * <linux/ioport.h> and set it in 'desc' of a target resource entry.
399 int walk_iomem_res_desc(unsigned long desc
, unsigned long flags
, u64 start
,
400 u64 end
, void *arg
, int (*func
)(struct resource
*, void *))
402 return __walk_iomem_res_desc(start
, end
, flags
, desc
, arg
, func
);
404 EXPORT_SYMBOL_GPL(walk_iomem_res_desc
);
407 * This function calls the @func callback against all memory ranges of type
408 * System RAM which are marked as IORESOURCE_SYSTEM_RAM and IORESOUCE_BUSY.
409 * Now, this function is only for System RAM, it deals with full ranges and
410 * not PFNs. If resources are not PFN-aligned, dealing with PFNs can truncate
413 int walk_system_ram_res(u64 start
, u64 end
, void *arg
,
414 int (*func
)(struct resource
*, void *))
416 unsigned long flags
= IORESOURCE_SYSTEM_RAM
| IORESOURCE_BUSY
;
418 return __walk_iomem_res_desc(start
, end
, flags
, IORES_DESC_NONE
, arg
,
423 * This function calls the @func callback against all memory ranges, which
424 * are ranges marked as IORESOURCE_MEM and IORESOUCE_BUSY.
426 int walk_mem_res(u64 start
, u64 end
, void *arg
,
427 int (*func
)(struct resource
*, void *))
429 unsigned long flags
= IORESOURCE_MEM
| IORESOURCE_BUSY
;
431 return __walk_iomem_res_desc(start
, end
, flags
, IORES_DESC_NONE
, arg
,
436 * This function calls the @func callback against all memory ranges of type
437 * System RAM which are marked as IORESOURCE_SYSTEM_RAM and IORESOUCE_BUSY.
438 * It is to be used only for System RAM.
440 int walk_system_ram_range(unsigned long start_pfn
, unsigned long nr_pages
,
441 void *arg
, int (*func
)(unsigned long, unsigned long, void *))
443 resource_size_t start
, end
;
446 unsigned long pfn
, end_pfn
;
449 start
= (u64
) start_pfn
<< PAGE_SHIFT
;
450 end
= ((u64
)(start_pfn
+ nr_pages
) << PAGE_SHIFT
) - 1;
451 flags
= IORESOURCE_SYSTEM_RAM
| IORESOURCE_BUSY
;
452 while (start
< end
&&
453 !find_next_iomem_res(start
, end
, flags
, IORES_DESC_NONE
, &res
)) {
454 pfn
= PFN_UP(res
.start
);
455 end_pfn
= PFN_DOWN(res
.end
+ 1);
457 ret
= (*func
)(pfn
, end_pfn
- pfn
, arg
);
465 static int __is_ram(unsigned long pfn
, unsigned long nr_pages
, void *arg
)
471 * This generic page_is_ram() returns true if specified address is
472 * registered as System RAM in iomem_resource list.
474 int __weak
page_is_ram(unsigned long pfn
)
476 return walk_system_ram_range(pfn
, 1, NULL
, __is_ram
) == 1;
478 EXPORT_SYMBOL_GPL(page_is_ram
);
480 static int __region_intersects(resource_size_t start
, size_t size
,
481 unsigned long flags
, unsigned long desc
)
484 int type
= 0; int other
= 0;
488 res
.end
= start
+ size
- 1;
490 for (p
= iomem_resource
.child
; p
; p
= p
->sibling
) {
491 bool is_type
= (((p
->flags
& flags
) == flags
) &&
492 ((desc
== IORES_DESC_NONE
) ||
495 if (resource_overlaps(p
, &res
))
496 is_type
? type
++ : other
++;
500 return REGION_DISJOINT
;
503 return REGION_INTERSECTS
;
509 * region_intersects() - determine intersection of region with known resources
510 * @start: region start address
511 * @size: size of region
512 * @flags: flags of resource (in iomem_resource)
513 * @desc: descriptor of resource (in iomem_resource) or IORES_DESC_NONE
515 * Check if the specified region partially overlaps or fully eclipses a
516 * resource identified by @flags and @desc (optional with IORES_DESC_NONE).
517 * Return REGION_DISJOINT if the region does not overlap @flags/@desc,
518 * return REGION_MIXED if the region overlaps @flags/@desc and another
519 * resource, and return REGION_INTERSECTS if the region overlaps @flags/@desc
520 * and no other defined resource. Note that REGION_INTERSECTS is also
521 * returned in the case when the specified region overlaps RAM and undefined
524 * region_intersect() is used by memory remapping functions to ensure
525 * the user is not remapping RAM and is a vast speed up over walking
526 * through the resource table page by page.
528 int region_intersects(resource_size_t start
, size_t size
, unsigned long flags
,
533 read_lock(&resource_lock
);
534 ret
= __region_intersects(start
, size
, flags
, desc
);
535 read_unlock(&resource_lock
);
539 EXPORT_SYMBOL_GPL(region_intersects
);
541 void __weak
arch_remove_reservations(struct resource
*avail
)
545 static resource_size_t
simple_align_resource(void *data
,
546 const struct resource
*avail
,
547 resource_size_t size
,
548 resource_size_t align
)
553 static void resource_clip(struct resource
*res
, resource_size_t min
,
556 if (res
->start
< min
)
563 * Find empty slot in the resource tree with the given range and
564 * alignment constraints
566 static int __find_resource(struct resource
*root
, struct resource
*old
,
567 struct resource
*new,
568 resource_size_t size
,
569 struct resource_constraint
*constraint
)
571 struct resource
*this = root
->child
;
572 struct resource tmp
= *new, avail
, alloc
;
574 tmp
.start
= root
->start
;
576 * Skip past an allocated resource that starts at 0, since the assignment
577 * of this->start - 1 to tmp->end below would cause an underflow.
579 if (this && this->start
== root
->start
) {
580 tmp
.start
= (this == old
) ? old
->start
: this->end
+ 1;
581 this = this->sibling
;
585 tmp
.end
= (this == old
) ? this->end
: this->start
- 1;
589 if (tmp
.end
< tmp
.start
)
592 resource_clip(&tmp
, constraint
->min
, constraint
->max
);
593 arch_remove_reservations(&tmp
);
595 /* Check for overflow after ALIGN() */
596 avail
.start
= ALIGN(tmp
.start
, constraint
->align
);
598 avail
.flags
= new->flags
& ~IORESOURCE_UNSET
;
599 if (avail
.start
>= tmp
.start
) {
600 alloc
.flags
= avail
.flags
;
601 alloc
.start
= constraint
->alignf(constraint
->alignf_data
, &avail
,
602 size
, constraint
->align
);
603 alloc
.end
= alloc
.start
+ size
- 1;
604 if (alloc
.start
<= alloc
.end
&&
605 resource_contains(&avail
, &alloc
)) {
606 new->start
= alloc
.start
;
607 new->end
= alloc
.end
;
612 next
: if (!this || this->end
== root
->end
)
616 tmp
.start
= this->end
+ 1;
617 this = this->sibling
;
623 * Find empty slot in the resource tree given range and alignment.
625 static int find_resource(struct resource
*root
, struct resource
*new,
626 resource_size_t size
,
627 struct resource_constraint
*constraint
)
629 return __find_resource(root
, NULL
, new, size
, constraint
);
633 * reallocate_resource - allocate a slot in the resource tree given range & alignment.
634 * The resource will be relocated if the new size cannot be reallocated in the
637 * @root: root resource descriptor
638 * @old: resource descriptor desired by caller
639 * @newsize: new size of the resource descriptor
640 * @constraint: the size and alignment constraints to be met.
642 static int reallocate_resource(struct resource
*root
, struct resource
*old
,
643 resource_size_t newsize
,
644 struct resource_constraint
*constraint
)
647 struct resource
new = *old
;
648 struct resource
*conflict
;
650 write_lock(&resource_lock
);
652 if ((err
= __find_resource(root
, old
, &new, newsize
, constraint
)))
655 if (resource_contains(&new, old
)) {
656 old
->start
= new.start
;
666 if (resource_contains(old
, &new)) {
667 old
->start
= new.start
;
670 __release_resource(old
, true);
672 conflict
= __request_resource(root
, old
);
676 write_unlock(&resource_lock
);
682 * allocate_resource - allocate empty slot in the resource tree given range & alignment.
683 * The resource will be reallocated with a new size if it was already allocated
684 * @root: root resource descriptor
685 * @new: resource descriptor desired by caller
686 * @size: requested resource region size
687 * @min: minimum boundary to allocate
688 * @max: maximum boundary to allocate
689 * @align: alignment requested, in bytes
690 * @alignf: alignment function, optional, called if not NULL
691 * @alignf_data: arbitrary data to pass to the @alignf function
693 int allocate_resource(struct resource
*root
, struct resource
*new,
694 resource_size_t size
, resource_size_t min
,
695 resource_size_t max
, resource_size_t align
,
696 resource_size_t (*alignf
)(void *,
697 const struct resource
*,
703 struct resource_constraint constraint
;
706 alignf
= simple_align_resource
;
708 constraint
.min
= min
;
709 constraint
.max
= max
;
710 constraint
.align
= align
;
711 constraint
.alignf
= alignf
;
712 constraint
.alignf_data
= alignf_data
;
715 /* resource is already allocated, try reallocating with
716 the new constraints */
717 return reallocate_resource(root
, new, size
, &constraint
);
720 write_lock(&resource_lock
);
721 err
= find_resource(root
, new, size
, &constraint
);
722 if (err
>= 0 && __request_resource(root
, new))
724 write_unlock(&resource_lock
);
728 EXPORT_SYMBOL(allocate_resource
);
731 * lookup_resource - find an existing resource by a resource start address
732 * @root: root resource descriptor
733 * @start: resource start address
735 * Returns a pointer to the resource if found, NULL otherwise
737 struct resource
*lookup_resource(struct resource
*root
, resource_size_t start
)
739 struct resource
*res
;
741 read_lock(&resource_lock
);
742 for (res
= root
->child
; res
; res
= res
->sibling
) {
743 if (res
->start
== start
)
746 read_unlock(&resource_lock
);
752 * Insert a resource into the resource tree. If successful, return NULL,
753 * otherwise return the conflicting resource (compare to __request_resource())
755 static struct resource
* __insert_resource(struct resource
*parent
, struct resource
*new)
757 struct resource
*first
, *next
;
759 for (;; parent
= first
) {
760 first
= __request_resource(parent
, new);
766 if (WARN_ON(first
== new)) /* duplicated insertion */
769 if ((first
->start
> new->start
) || (first
->end
< new->end
))
771 if ((first
->start
== new->start
) && (first
->end
== new->end
))
775 for (next
= first
; ; next
= next
->sibling
) {
776 /* Partial overlap? Bad, and unfixable */
777 if (next
->start
< new->start
|| next
->end
> new->end
)
781 if (next
->sibling
->start
> new->end
)
785 new->parent
= parent
;
786 new->sibling
= next
->sibling
;
789 next
->sibling
= NULL
;
790 for (next
= first
; next
; next
= next
->sibling
)
793 if (parent
->child
== first
) {
796 next
= parent
->child
;
797 while (next
->sibling
!= first
)
798 next
= next
->sibling
;
805 * insert_resource_conflict - Inserts resource in the resource tree
806 * @parent: parent of the new resource
807 * @new: new resource to insert
809 * Returns 0 on success, conflict resource if the resource can't be inserted.
811 * This function is equivalent to request_resource_conflict when no conflict
812 * happens. If a conflict happens, and the conflicting resources
813 * entirely fit within the range of the new resource, then the new
814 * resource is inserted and the conflicting resources become children of
817 * This function is intended for producers of resources, such as FW modules
820 struct resource
*insert_resource_conflict(struct resource
*parent
, struct resource
*new)
822 struct resource
*conflict
;
824 write_lock(&resource_lock
);
825 conflict
= __insert_resource(parent
, new);
826 write_unlock(&resource_lock
);
831 * insert_resource - Inserts a resource in the resource tree
832 * @parent: parent of the new resource
833 * @new: new resource to insert
835 * Returns 0 on success, -EBUSY if the resource can't be inserted.
837 * This function is intended for producers of resources, such as FW modules
840 int insert_resource(struct resource
*parent
, struct resource
*new)
842 struct resource
*conflict
;
844 conflict
= insert_resource_conflict(parent
, new);
845 return conflict
? -EBUSY
: 0;
847 EXPORT_SYMBOL_GPL(insert_resource
);
850 * insert_resource_expand_to_fit - Insert a resource into the resource tree
851 * @root: root resource descriptor
852 * @new: new resource to insert
854 * Insert a resource into the resource tree, possibly expanding it in order
855 * to make it encompass any conflicting resources.
857 void insert_resource_expand_to_fit(struct resource
*root
, struct resource
*new)
862 write_lock(&resource_lock
);
864 struct resource
*conflict
;
866 conflict
= __insert_resource(root
, new);
869 if (conflict
== root
)
872 /* Ok, expand resource to cover the conflict, then try again .. */
873 if (conflict
->start
< new->start
)
874 new->start
= conflict
->start
;
875 if (conflict
->end
> new->end
)
876 new->end
= conflict
->end
;
878 printk("Expanded resource %s due to conflict with %s\n", new->name
, conflict
->name
);
880 write_unlock(&resource_lock
);
884 * remove_resource - Remove a resource in the resource tree
885 * @old: resource to remove
887 * Returns 0 on success, -EINVAL if the resource is not valid.
889 * This function removes a resource previously inserted by insert_resource()
890 * or insert_resource_conflict(), and moves the children (if any) up to
891 * where they were before. insert_resource() and insert_resource_conflict()
892 * insert a new resource, and move any conflicting resources down to the
893 * children of the new resource.
895 * insert_resource(), insert_resource_conflict() and remove_resource() are
896 * intended for producers of resources, such as FW modules and bus drivers.
898 int remove_resource(struct resource
*old
)
902 write_lock(&resource_lock
);
903 retval
= __release_resource(old
, false);
904 write_unlock(&resource_lock
);
907 EXPORT_SYMBOL_GPL(remove_resource
);
909 static int __adjust_resource(struct resource
*res
, resource_size_t start
,
910 resource_size_t size
)
912 struct resource
*tmp
, *parent
= res
->parent
;
913 resource_size_t end
= start
+ size
- 1;
919 if ((start
< parent
->start
) || (end
> parent
->end
))
922 if (res
->sibling
&& (res
->sibling
->start
<= end
))
927 while (tmp
->sibling
!= res
)
929 if (start
<= tmp
->end
)
934 for (tmp
= res
->child
; tmp
; tmp
= tmp
->sibling
)
935 if ((tmp
->start
< start
) || (tmp
->end
> end
))
947 * adjust_resource - modify a resource's start and size
948 * @res: resource to modify
949 * @start: new start value
952 * Given an existing resource, change its start and size to match the
953 * arguments. Returns 0 on success, -EBUSY if it can't fit.
954 * Existing children of the resource are assumed to be immutable.
956 int adjust_resource(struct resource
*res
, resource_size_t start
,
957 resource_size_t size
)
961 write_lock(&resource_lock
);
962 result
= __adjust_resource(res
, start
, size
);
963 write_unlock(&resource_lock
);
966 EXPORT_SYMBOL(adjust_resource
);
969 __reserve_region_with_split(struct resource
*root
, resource_size_t start
,
970 resource_size_t end
, const char *name
)
972 struct resource
*parent
= root
;
973 struct resource
*conflict
;
974 struct resource
*res
= alloc_resource(GFP_ATOMIC
);
975 struct resource
*next_res
= NULL
;
976 int type
= resource_type(root
);
984 res
->flags
= type
| IORESOURCE_BUSY
;
985 res
->desc
= IORES_DESC_NONE
;
989 conflict
= __request_resource(parent
, res
);
998 /* conflict covered whole area */
999 if (conflict
->start
<= res
->start
&&
1000 conflict
->end
>= res
->end
) {
1006 /* failed, split and try again */
1007 if (conflict
->start
> res
->start
) {
1009 res
->end
= conflict
->start
- 1;
1010 if (conflict
->end
< end
) {
1011 next_res
= alloc_resource(GFP_ATOMIC
);
1016 next_res
->name
= name
;
1017 next_res
->start
= conflict
->end
+ 1;
1018 next_res
->end
= end
;
1019 next_res
->flags
= type
| IORESOURCE_BUSY
;
1020 next_res
->desc
= IORES_DESC_NONE
;
1023 res
->start
= conflict
->end
+ 1;
1030 reserve_region_with_split(struct resource
*root
, resource_size_t start
,
1031 resource_size_t end
, const char *name
)
1035 write_lock(&resource_lock
);
1036 if (root
->start
> start
|| root
->end
< end
) {
1037 pr_err("requested range [0x%llx-0x%llx] not in root %pr\n",
1038 (unsigned long long)start
, (unsigned long long)end
,
1040 if (start
> root
->end
|| end
< root
->start
)
1043 if (end
> root
->end
)
1045 if (start
< root
->start
)
1046 start
= root
->start
;
1047 pr_err("fixing request to [0x%llx-0x%llx]\n",
1048 (unsigned long long)start
,
1049 (unsigned long long)end
);
1054 __reserve_region_with_split(root
, start
, end
, name
);
1055 write_unlock(&resource_lock
);
1059 * resource_alignment - calculate resource's alignment
1060 * @res: resource pointer
1062 * Returns alignment on success, 0 (invalid alignment) on failure.
1064 resource_size_t
resource_alignment(struct resource
*res
)
1066 switch (res
->flags
& (IORESOURCE_SIZEALIGN
| IORESOURCE_STARTALIGN
)) {
1067 case IORESOURCE_SIZEALIGN
:
1068 return resource_size(res
);
1069 case IORESOURCE_STARTALIGN
:
1077 * This is compatibility stuff for IO resources.
1079 * Note how this, unlike the above, knows about
1080 * the IO flag meanings (busy etc).
1082 * request_region creates a new busy region.
1084 * release_region releases a matching busy region.
1087 static DECLARE_WAIT_QUEUE_HEAD(muxed_resource_wait
);
1089 static struct inode
*iomem_inode
;
1091 #ifdef CONFIG_IO_STRICT_DEVMEM
1092 static void revoke_iomem(struct resource
*res
)
1094 /* pairs with smp_store_release() in iomem_init_inode() */
1095 struct inode
*inode
= smp_load_acquire(&iomem_inode
);
1098 * Check that the initialization has completed. Losing the race
1099 * is ok because it means drivers are claiming resources before
1100 * the fs_initcall level of init and prevent iomem_get_mapping users
1101 * from establishing mappings.
1107 * The expectation is that the driver has successfully marked
1108 * the resource busy by this point, so devmem_is_allowed()
1109 * should start returning false, however for performance this
1110 * does not iterate the entire resource range.
1112 if (devmem_is_allowed(PHYS_PFN(res
->start
)) &&
1113 devmem_is_allowed(PHYS_PFN(res
->end
))) {
1115 * *cringe* iomem=relaxed says "go ahead, what's the
1116 * worst that can happen?"
1121 unmap_mapping_range(inode
->i_mapping
, res
->start
, resource_size(res
), 1);
1124 static void revoke_iomem(struct resource
*res
) {}
1127 struct address_space
*iomem_get_mapping(void)
1130 * This function is only called from file open paths, hence guaranteed
1131 * that fs_initcalls have completed and no need to check for NULL. But
1132 * since revoke_iomem can be called before the initcall we still need
1133 * the barrier to appease checkers.
1135 return smp_load_acquire(&iomem_inode
)->i_mapping
;
1138 static int __request_region_locked(struct resource
*res
, struct resource
*parent
,
1139 resource_size_t start
, resource_size_t n
,
1140 const char *name
, int flags
)
1142 DECLARE_WAITQUEUE(wait
, current
);
1146 res
->end
= start
+ n
- 1;
1149 struct resource
*conflict
;
1151 res
->flags
= resource_type(parent
) | resource_ext_type(parent
);
1152 res
->flags
|= IORESOURCE_BUSY
| flags
;
1153 res
->desc
= parent
->desc
;
1155 conflict
= __request_resource(parent
, res
);
1159 * mm/hmm.c reserves physical addresses which then
1160 * become unavailable to other users. Conflicts are
1161 * not expected. Warn to aid debugging if encountered.
1163 if (conflict
->desc
== IORES_DESC_DEVICE_PRIVATE_MEMORY
) {
1164 pr_warn("Unaddressable device %s %pR conflicts with %pR",
1165 conflict
->name
, conflict
, res
);
1167 if (conflict
!= parent
) {
1168 if (!(conflict
->flags
& IORESOURCE_BUSY
)) {
1173 if (conflict
->flags
& flags
& IORESOURCE_MUXED
) {
1174 add_wait_queue(&muxed_resource_wait
, &wait
);
1175 write_unlock(&resource_lock
);
1176 set_current_state(TASK_UNINTERRUPTIBLE
);
1178 remove_wait_queue(&muxed_resource_wait
, &wait
);
1179 write_lock(&resource_lock
);
1182 /* Uhhuh, that didn't work out.. */
1190 * __request_region - create a new busy resource region
1191 * @parent: parent resource descriptor
1192 * @start: resource start address
1193 * @n: resource region size
1194 * @name: reserving caller's ID string
1195 * @flags: IO resource flags
1197 struct resource
*__request_region(struct resource
*parent
,
1198 resource_size_t start
, resource_size_t n
,
1199 const char *name
, int flags
)
1201 struct resource
*res
= alloc_resource(GFP_KERNEL
);
1207 write_lock(&resource_lock
);
1208 ret
= __request_region_locked(res
, parent
, start
, n
, name
, flags
);
1209 write_unlock(&resource_lock
);
1216 if (parent
== &iomem_resource
)
1221 EXPORT_SYMBOL(__request_region
);
1224 * __release_region - release a previously reserved resource region
1225 * @parent: parent resource descriptor
1226 * @start: resource start address
1227 * @n: resource region size
1229 * The described resource region must match a currently busy region.
1231 void __release_region(struct resource
*parent
, resource_size_t start
,
1234 struct resource
**p
;
1235 resource_size_t end
;
1238 end
= start
+ n
- 1;
1240 write_lock(&resource_lock
);
1243 struct resource
*res
= *p
;
1247 if (res
->start
<= start
&& res
->end
>= end
) {
1248 if (!(res
->flags
& IORESOURCE_BUSY
)) {
1252 if (res
->start
!= start
|| res
->end
!= end
)
1255 write_unlock(&resource_lock
);
1256 if (res
->flags
& IORESOURCE_MUXED
)
1257 wake_up(&muxed_resource_wait
);
1264 write_unlock(&resource_lock
);
1266 printk(KERN_WARNING
"Trying to free nonexistent resource "
1267 "<%016llx-%016llx>\n", (unsigned long long)start
,
1268 (unsigned long long)end
);
1270 EXPORT_SYMBOL(__release_region
);
1272 #ifdef CONFIG_MEMORY_HOTREMOVE
1274 * release_mem_region_adjustable - release a previously reserved memory region
1275 * @start: resource start address
1276 * @size: resource region size
1278 * This interface is intended for memory hot-delete. The requested region
1279 * is released from a currently busy memory resource. The requested region
1280 * must either match exactly or fit into a single busy resource entry. In
1281 * the latter case, the remaining resource is adjusted accordingly.
1282 * Existing children of the busy memory resource must be immutable in the
1286 * - Additional release conditions, such as overlapping region, can be
1287 * supported after they are confirmed as valid cases.
1288 * - When a busy memory resource gets split into two entries, the code
1289 * assumes that all children remain in the lower address entry for
1290 * simplicity. Enhance this logic when necessary.
1292 void release_mem_region_adjustable(resource_size_t start
, resource_size_t size
)
1294 struct resource
*parent
= &iomem_resource
;
1295 struct resource
*new_res
= NULL
;
1296 bool alloc_nofail
= false;
1297 struct resource
**p
;
1298 struct resource
*res
;
1299 resource_size_t end
;
1301 end
= start
+ size
- 1;
1302 if (WARN_ON_ONCE((start
< parent
->start
) || (end
> parent
->end
)))
1306 * We free up quite a lot of memory on memory hotunplug (esp., memap),
1307 * just before releasing the region. This is highly unlikely to
1308 * fail - let's play save and make it never fail as the caller cannot
1309 * perform any error handling (e.g., trying to re-add memory will fail
1313 new_res
= alloc_resource(GFP_KERNEL
| (alloc_nofail
? __GFP_NOFAIL
: 0));
1316 write_lock(&resource_lock
);
1318 while ((res
= *p
)) {
1319 if (res
->start
>= end
)
1322 /* look for the next resource if it does not fit into */
1323 if (res
->start
> start
|| res
->end
< end
) {
1329 * All memory regions added from memory-hotplug path have the
1330 * flag IORESOURCE_SYSTEM_RAM. If the resource does not have
1331 * this flag, we know that we are dealing with a resource coming
1332 * from HMM/devm. HMM/devm use another mechanism to add/release
1333 * a resource. This goes via devm_request_mem_region and
1334 * devm_release_mem_region.
1335 * HMM/devm take care to release their resources when they want,
1336 * so if we are dealing with them, let us just back off here.
1338 if (!(res
->flags
& IORESOURCE_SYSRAM
)) {
1342 if (!(res
->flags
& IORESOURCE_MEM
))
1345 if (!(res
->flags
& IORESOURCE_BUSY
)) {
1350 /* found the target resource; let's adjust accordingly */
1351 if (res
->start
== start
&& res
->end
== end
) {
1352 /* free the whole entry */
1355 } else if (res
->start
== start
&& res
->end
!= end
) {
1356 /* adjust the start */
1357 WARN_ON_ONCE(__adjust_resource(res
, end
+ 1,
1359 } else if (res
->start
!= start
&& res
->end
== end
) {
1360 /* adjust the end */
1361 WARN_ON_ONCE(__adjust_resource(res
, res
->start
,
1362 start
- res
->start
));
1364 /* split into two entries - we need a new resource */
1366 new_res
= alloc_resource(GFP_ATOMIC
);
1368 alloc_nofail
= true;
1369 write_unlock(&resource_lock
);
1373 new_res
->name
= res
->name
;
1374 new_res
->start
= end
+ 1;
1375 new_res
->end
= res
->end
;
1376 new_res
->flags
= res
->flags
;
1377 new_res
->desc
= res
->desc
;
1378 new_res
->parent
= res
->parent
;
1379 new_res
->sibling
= res
->sibling
;
1380 new_res
->child
= NULL
;
1382 if (WARN_ON_ONCE(__adjust_resource(res
, res
->start
,
1383 start
- res
->start
)))
1385 res
->sibling
= new_res
;
1392 write_unlock(&resource_lock
);
1393 free_resource(new_res
);
1395 #endif /* CONFIG_MEMORY_HOTREMOVE */
1397 #ifdef CONFIG_MEMORY_HOTPLUG
1398 static bool system_ram_resources_mergeable(struct resource
*r1
,
1399 struct resource
*r2
)
1401 /* We assume either r1 or r2 is IORESOURCE_SYSRAM_MERGEABLE. */
1402 return r1
->flags
== r2
->flags
&& r1
->end
+ 1 == r2
->start
&&
1403 r1
->name
== r2
->name
&& r1
->desc
== r2
->desc
&&
1404 !r1
->child
&& !r2
->child
;
1408 * merge_system_ram_resource - mark the System RAM resource mergeable and try to
1409 * merge it with adjacent, mergeable resources
1410 * @res: resource descriptor
1412 * This interface is intended for memory hotplug, whereby lots of contiguous
1413 * system ram resources are added (e.g., via add_memory*()) by a driver, and
1414 * the actual resource boundaries are not of interest (e.g., it might be
1415 * relevant for DIMMs). Only resources that are marked mergeable, that have the
1416 * same parent, and that don't have any children are considered. All mergeable
1417 * resources must be immutable during the request.
1420 * - The caller has to make sure that no pointers to resources that are
1421 * marked mergeable are used anymore after this call - the resource might
1422 * be freed and the pointer might be stale!
1423 * - release_mem_region_adjustable() will split on demand on memory hotunplug
1425 void merge_system_ram_resource(struct resource
*res
)
1427 const unsigned long flags
= IORESOURCE_SYSTEM_RAM
| IORESOURCE_BUSY
;
1428 struct resource
*cur
;
1430 if (WARN_ON_ONCE((res
->flags
& flags
) != flags
))
1433 write_lock(&resource_lock
);
1434 res
->flags
|= IORESOURCE_SYSRAM_MERGEABLE
;
1436 /* Try to merge with next item in the list. */
1438 if (cur
&& system_ram_resources_mergeable(res
, cur
)) {
1439 res
->end
= cur
->end
;
1440 res
->sibling
= cur
->sibling
;
1444 /* Try to merge with previous item in the list. */
1445 cur
= res
->parent
->child
;
1446 while (cur
&& cur
->sibling
!= res
)
1448 if (cur
&& system_ram_resources_mergeable(cur
, res
)) {
1449 cur
->end
= res
->end
;
1450 cur
->sibling
= res
->sibling
;
1453 write_unlock(&resource_lock
);
1455 #endif /* CONFIG_MEMORY_HOTPLUG */
1458 * Managed region resource
1460 static void devm_resource_release(struct device
*dev
, void *ptr
)
1462 struct resource
**r
= ptr
;
1464 release_resource(*r
);
1468 * devm_request_resource() - request and reserve an I/O or memory resource
1469 * @dev: device for which to request the resource
1470 * @root: root of the resource tree from which to request the resource
1471 * @new: descriptor of the resource to request
1473 * This is a device-managed version of request_resource(). There is usually
1474 * no need to release resources requested by this function explicitly since
1475 * that will be taken care of when the device is unbound from its driver.
1476 * If for some reason the resource needs to be released explicitly, because
1477 * of ordering issues for example, drivers must call devm_release_resource()
1478 * rather than the regular release_resource().
1480 * When a conflict is detected between any existing resources and the newly
1481 * requested resource, an error message will be printed.
1483 * Returns 0 on success or a negative error code on failure.
1485 int devm_request_resource(struct device
*dev
, struct resource
*root
,
1486 struct resource
*new)
1488 struct resource
*conflict
, **ptr
;
1490 ptr
= devres_alloc(devm_resource_release
, sizeof(*ptr
), GFP_KERNEL
);
1496 conflict
= request_resource_conflict(root
, new);
1498 dev_err(dev
, "resource collision: %pR conflicts with %s %pR\n",
1499 new, conflict
->name
, conflict
);
1504 devres_add(dev
, ptr
);
1507 EXPORT_SYMBOL(devm_request_resource
);
1509 static int devm_resource_match(struct device
*dev
, void *res
, void *data
)
1511 struct resource
**ptr
= res
;
1513 return *ptr
== data
;
1517 * devm_release_resource() - release a previously requested resource
1518 * @dev: device for which to release the resource
1519 * @new: descriptor of the resource to release
1521 * Releases a resource previously requested using devm_request_resource().
1523 void devm_release_resource(struct device
*dev
, struct resource
*new)
1525 WARN_ON(devres_release(dev
, devm_resource_release
, devm_resource_match
,
1528 EXPORT_SYMBOL(devm_release_resource
);
1530 struct region_devres
{
1531 struct resource
*parent
;
1532 resource_size_t start
;
1536 static void devm_region_release(struct device
*dev
, void *res
)
1538 struct region_devres
*this = res
;
1540 __release_region(this->parent
, this->start
, this->n
);
1543 static int devm_region_match(struct device
*dev
, void *res
, void *match_data
)
1545 struct region_devres
*this = res
, *match
= match_data
;
1547 return this->parent
== match
->parent
&&
1548 this->start
== match
->start
&& this->n
== match
->n
;
1552 __devm_request_region(struct device
*dev
, struct resource
*parent
,
1553 resource_size_t start
, resource_size_t n
, const char *name
)
1555 struct region_devres
*dr
= NULL
;
1556 struct resource
*res
;
1558 dr
= devres_alloc(devm_region_release
, sizeof(struct region_devres
),
1563 dr
->parent
= parent
;
1567 res
= __request_region(parent
, start
, n
, name
, 0);
1569 devres_add(dev
, dr
);
1575 EXPORT_SYMBOL(__devm_request_region
);
1577 void __devm_release_region(struct device
*dev
, struct resource
*parent
,
1578 resource_size_t start
, resource_size_t n
)
1580 struct region_devres match_data
= { parent
, start
, n
};
1582 __release_region(parent
, start
, n
);
1583 WARN_ON(devres_destroy(dev
, devm_region_release
, devm_region_match
,
1586 EXPORT_SYMBOL(__devm_release_region
);
1589 * Reserve I/O ports or memory based on "reserve=" kernel parameter.
1591 #define MAXRESERVE 4
1592 static int __init
reserve_setup(char *str
)
1594 static int reserved
;
1595 static struct resource reserve
[MAXRESERVE
];
1598 unsigned int io_start
, io_num
;
1600 struct resource
*parent
;
1602 if (get_option(&str
, &io_start
) != 2)
1604 if (get_option(&str
, &io_num
) == 0)
1606 if (x
< MAXRESERVE
) {
1607 struct resource
*res
= reserve
+ x
;
1610 * If the region starts below 0x10000, we assume it's
1611 * I/O port space; otherwise assume it's memory.
1613 if (io_start
< 0x10000) {
1614 res
->flags
= IORESOURCE_IO
;
1615 parent
= &ioport_resource
;
1617 res
->flags
= IORESOURCE_MEM
;
1618 parent
= &iomem_resource
;
1620 res
->name
= "reserved";
1621 res
->start
= io_start
;
1622 res
->end
= io_start
+ io_num
- 1;
1623 res
->flags
|= IORESOURCE_BUSY
;
1624 res
->desc
= IORES_DESC_NONE
;
1626 if (request_resource(parent
, res
) == 0)
1632 __setup("reserve=", reserve_setup
);
1635 * Check if the requested addr and size spans more than any slot in the
1636 * iomem resource tree.
1638 int iomem_map_sanity_check(resource_size_t addr
, unsigned long size
)
1640 struct resource
*p
= &iomem_resource
;
1644 read_lock(&resource_lock
);
1645 for (p
= p
->child
; p
; p
= r_next(NULL
, p
, &l
)) {
1647 * We can probably skip the resources without
1648 * IORESOURCE_IO attribute?
1650 if (p
->start
>= addr
+ size
)
1654 if (PFN_DOWN(p
->start
) <= PFN_DOWN(addr
) &&
1655 PFN_DOWN(p
->end
) >= PFN_DOWN(addr
+ size
- 1))
1658 * if a resource is "BUSY", it's not a hardware resource
1659 * but a driver mapping of such a resource; we don't want
1660 * to warn for those; some drivers legitimately map only
1661 * partial hardware resources. (example: vesafb)
1663 if (p
->flags
& IORESOURCE_BUSY
)
1666 printk(KERN_WARNING
"resource sanity check: requesting [mem %#010llx-%#010llx], which spans more than %s %pR\n",
1667 (unsigned long long)addr
,
1668 (unsigned long long)(addr
+ size
- 1),
1673 read_unlock(&resource_lock
);
1678 #ifdef CONFIG_STRICT_DEVMEM
1679 static int strict_iomem_checks
= 1;
1681 static int strict_iomem_checks
;
1685 * check if an address is reserved in the iomem resource tree
1686 * returns true if reserved, false if not reserved.
1688 bool iomem_is_exclusive(u64 addr
)
1690 struct resource
*p
= &iomem_resource
;
1693 int size
= PAGE_SIZE
;
1695 if (!strict_iomem_checks
)
1698 addr
= addr
& PAGE_MASK
;
1700 read_lock(&resource_lock
);
1701 for (p
= p
->child
; p
; p
= r_next(NULL
, p
, &l
)) {
1703 * We can probably skip the resources without
1704 * IORESOURCE_IO attribute?
1706 if (p
->start
>= addr
+ size
)
1711 * A resource is exclusive if IORESOURCE_EXCLUSIVE is set
1712 * or CONFIG_IO_STRICT_DEVMEM is enabled and the
1715 if ((p
->flags
& IORESOURCE_BUSY
) == 0)
1717 if (IS_ENABLED(CONFIG_IO_STRICT_DEVMEM
)
1718 || p
->flags
& IORESOURCE_EXCLUSIVE
) {
1723 read_unlock(&resource_lock
);
1728 struct resource_entry
*resource_list_create_entry(struct resource
*res
,
1731 struct resource_entry
*entry
;
1733 entry
= kzalloc(sizeof(*entry
) + extra_size
, GFP_KERNEL
);
1735 INIT_LIST_HEAD(&entry
->node
);
1736 entry
->res
= res
? res
: &entry
->__res
;
1741 EXPORT_SYMBOL(resource_list_create_entry
);
1743 void resource_list_free(struct list_head
*head
)
1745 struct resource_entry
*entry
, *tmp
;
1747 list_for_each_entry_safe(entry
, tmp
, head
, node
)
1748 resource_list_destroy_entry(entry
);
1750 EXPORT_SYMBOL(resource_list_free
);
1752 #ifdef CONFIG_DEVICE_PRIVATE
1753 static struct resource
*__request_free_mem_region(struct device
*dev
,
1754 struct resource
*base
, unsigned long size
, const char *name
)
1756 resource_size_t end
, addr
;
1757 struct resource
*res
;
1758 struct region_devres
*dr
= NULL
;
1760 size
= ALIGN(size
, 1UL << PA_SECTION_SHIFT
);
1761 end
= min_t(unsigned long, base
->end
, (1UL << MAX_PHYSMEM_BITS
) - 1);
1762 addr
= end
- size
+ 1UL;
1764 res
= alloc_resource(GFP_KERNEL
);
1766 return ERR_PTR(-ENOMEM
);
1769 dr
= devres_alloc(devm_region_release
,
1770 sizeof(struct region_devres
), GFP_KERNEL
);
1773 return ERR_PTR(-ENOMEM
);
1777 write_lock(&resource_lock
);
1778 for (; addr
> size
&& addr
>= base
->start
; addr
-= size
) {
1779 if (__region_intersects(addr
, size
, 0, IORES_DESC_NONE
) !=
1783 if (__request_region_locked(res
, &iomem_resource
, addr
, size
,
1788 dr
->parent
= &iomem_resource
;
1791 devres_add(dev
, dr
);
1794 res
->desc
= IORES_DESC_DEVICE_PRIVATE_MEMORY
;
1795 write_unlock(&resource_lock
);
1798 * A driver is claiming this region so revoke any mappings.
1803 write_unlock(&resource_lock
);
1809 return ERR_PTR(-ERANGE
);
1813 * devm_request_free_mem_region - find free region for device private memory
1815 * @dev: device struct to bind the resource to
1816 * @size: size in bytes of the device memory to add
1817 * @base: resource tree to look in
1819 * This function tries to find an empty range of physical address big enough to
1820 * contain the new resource, so that it can later be hotplugged as ZONE_DEVICE
1821 * memory, which in turn allocates struct pages.
1823 struct resource
*devm_request_free_mem_region(struct device
*dev
,
1824 struct resource
*base
, unsigned long size
)
1826 return __request_free_mem_region(dev
, base
, size
, dev_name(dev
));
1828 EXPORT_SYMBOL_GPL(devm_request_free_mem_region
);
1830 struct resource
*request_free_mem_region(struct resource
*base
,
1831 unsigned long size
, const char *name
)
1833 return __request_free_mem_region(NULL
, base
, size
, name
);
1835 EXPORT_SYMBOL_GPL(request_free_mem_region
);
1837 #endif /* CONFIG_DEVICE_PRIVATE */
1839 static int __init
strict_iomem(char *str
)
1841 if (strstr(str
, "relaxed"))
1842 strict_iomem_checks
= 0;
1843 if (strstr(str
, "strict"))
1844 strict_iomem_checks
= 1;
1848 static int iomem_fs_init_fs_context(struct fs_context
*fc
)
1850 return init_pseudo(fc
, DEVMEM_MAGIC
) ? 0 : -ENOMEM
;
1853 static struct file_system_type iomem_fs_type
= {
1855 .owner
= THIS_MODULE
,
1856 .init_fs_context
= iomem_fs_init_fs_context
,
1857 .kill_sb
= kill_anon_super
,
1860 static int __init
iomem_init_inode(void)
1862 static struct vfsmount
*iomem_vfs_mount
;
1863 static int iomem_fs_cnt
;
1864 struct inode
*inode
;
1867 rc
= simple_pin_fs(&iomem_fs_type
, &iomem_vfs_mount
, &iomem_fs_cnt
);
1869 pr_err("Cannot mount iomem pseudo filesystem: %d\n", rc
);
1873 inode
= alloc_anon_inode(iomem_vfs_mount
->mnt_sb
);
1874 if (IS_ERR(inode
)) {
1875 rc
= PTR_ERR(inode
);
1876 pr_err("Cannot allocate inode for iomem: %d\n", rc
);
1877 simple_release_fs(&iomem_vfs_mount
, &iomem_fs_cnt
);
1882 * Publish iomem revocation inode initialized.
1883 * Pairs with smp_load_acquire() in revoke_iomem().
1885 smp_store_release(&iomem_inode
, inode
);
1890 fs_initcall(iomem_init_inode
);
1892 __setup("iomem=", strict_iomem
);