]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - kernel/resource.c
Merge branch 'linus' into perf/urgent
[mirror_ubuntu-zesty-kernel.git] / kernel / resource.c
1 /*
2 * linux/kernel/resource.c
3 *
4 * Copyright (C) 1999 Linus Torvalds
5 * Copyright (C) 1999 Martin Mares <mj@ucw.cz>
6 *
7 * Arbitrary resource management.
8 */
9
10 #include <linux/module.h>
11 #include <linux/errno.h>
12 #include <linux/ioport.h>
13 #include <linux/init.h>
14 #include <linux/slab.h>
15 #include <linux/spinlock.h>
16 #include <linux/fs.h>
17 #include <linux/proc_fs.h>
18 #include <linux/sched.h>
19 #include <linux/seq_file.h>
20 #include <linux/device.h>
21 #include <linux/pfn.h>
22 #include <asm/io.h>
23
24
25 struct resource ioport_resource = {
26 .name = "PCI IO",
27 .start = 0,
28 .end = IO_SPACE_LIMIT,
29 .flags = IORESOURCE_IO,
30 };
31 EXPORT_SYMBOL(ioport_resource);
32
33 struct resource iomem_resource = {
34 .name = "PCI mem",
35 .start = 0,
36 .end = -1,
37 .flags = IORESOURCE_MEM,
38 };
39 EXPORT_SYMBOL(iomem_resource);
40
41 /* constraints to be met while allocating resources */
42 struct resource_constraint {
43 resource_size_t min, max, align;
44 resource_size_t (*alignf)(void *, const struct resource *,
45 resource_size_t, resource_size_t);
46 void *alignf_data;
47 };
48
49 static DEFINE_RWLOCK(resource_lock);
50
51 static void *r_next(struct seq_file *m, void *v, loff_t *pos)
52 {
53 struct resource *p = v;
54 (*pos)++;
55 if (p->child)
56 return p->child;
57 while (!p->sibling && p->parent)
58 p = p->parent;
59 return p->sibling;
60 }
61
62 #ifdef CONFIG_PROC_FS
63
64 enum { MAX_IORES_LEVEL = 5 };
65
66 static void *r_start(struct seq_file *m, loff_t *pos)
67 __acquires(resource_lock)
68 {
69 struct resource *p = m->private;
70 loff_t l = 0;
71 read_lock(&resource_lock);
72 for (p = p->child; p && l < *pos; p = r_next(m, p, &l))
73 ;
74 return p;
75 }
76
77 static void r_stop(struct seq_file *m, void *v)
78 __releases(resource_lock)
79 {
80 read_unlock(&resource_lock);
81 }
82
83 static int r_show(struct seq_file *m, void *v)
84 {
85 struct resource *root = m->private;
86 struct resource *r = v, *p;
87 int width = root->end < 0x10000 ? 4 : 8;
88 int depth;
89
90 for (depth = 0, p = r; depth < MAX_IORES_LEVEL; depth++, p = p->parent)
91 if (p->parent == root)
92 break;
93 seq_printf(m, "%*s%0*llx-%0*llx : %s\n",
94 depth * 2, "",
95 width, (unsigned long long) r->start,
96 width, (unsigned long long) r->end,
97 r->name ? r->name : "<BAD>");
98 return 0;
99 }
100
101 static const struct seq_operations resource_op = {
102 .start = r_start,
103 .next = r_next,
104 .stop = r_stop,
105 .show = r_show,
106 };
107
108 static int ioports_open(struct inode *inode, struct file *file)
109 {
110 int res = seq_open(file, &resource_op);
111 if (!res) {
112 struct seq_file *m = file->private_data;
113 m->private = &ioport_resource;
114 }
115 return res;
116 }
117
118 static int iomem_open(struct inode *inode, struct file *file)
119 {
120 int res = seq_open(file, &resource_op);
121 if (!res) {
122 struct seq_file *m = file->private_data;
123 m->private = &iomem_resource;
124 }
125 return res;
126 }
127
128 static const struct file_operations proc_ioports_operations = {
129 .open = ioports_open,
130 .read = seq_read,
131 .llseek = seq_lseek,
132 .release = seq_release,
133 };
134
135 static const struct file_operations proc_iomem_operations = {
136 .open = iomem_open,
137 .read = seq_read,
138 .llseek = seq_lseek,
139 .release = seq_release,
140 };
141
142 static int __init ioresources_init(void)
143 {
144 proc_create("ioports", 0, NULL, &proc_ioports_operations);
145 proc_create("iomem", 0, NULL, &proc_iomem_operations);
146 return 0;
147 }
148 __initcall(ioresources_init);
149
150 #endif /* CONFIG_PROC_FS */
151
152 /* Return the conflict entry if you can't request it */
153 static struct resource * __request_resource(struct resource *root, struct resource *new)
154 {
155 resource_size_t start = new->start;
156 resource_size_t end = new->end;
157 struct resource *tmp, **p;
158
159 if (end < start)
160 return root;
161 if (start < root->start)
162 return root;
163 if (end > root->end)
164 return root;
165 p = &root->child;
166 for (;;) {
167 tmp = *p;
168 if (!tmp || tmp->start > end) {
169 new->sibling = tmp;
170 *p = new;
171 new->parent = root;
172 return NULL;
173 }
174 p = &tmp->sibling;
175 if (tmp->end < start)
176 continue;
177 return tmp;
178 }
179 }
180
181 static int __release_resource(struct resource *old)
182 {
183 struct resource *tmp, **p;
184
185 p = &old->parent->child;
186 for (;;) {
187 tmp = *p;
188 if (!tmp)
189 break;
190 if (tmp == old) {
191 *p = tmp->sibling;
192 old->parent = NULL;
193 return 0;
194 }
195 p = &tmp->sibling;
196 }
197 return -EINVAL;
198 }
199
200 static void __release_child_resources(struct resource *r)
201 {
202 struct resource *tmp, *p;
203 resource_size_t size;
204
205 p = r->child;
206 r->child = NULL;
207 while (p) {
208 tmp = p;
209 p = p->sibling;
210
211 tmp->parent = NULL;
212 tmp->sibling = NULL;
213 __release_child_resources(tmp);
214
215 printk(KERN_DEBUG "release child resource %pR\n", tmp);
216 /* need to restore size, and keep flags */
217 size = resource_size(tmp);
218 tmp->start = 0;
219 tmp->end = size - 1;
220 }
221 }
222
223 void release_child_resources(struct resource *r)
224 {
225 write_lock(&resource_lock);
226 __release_child_resources(r);
227 write_unlock(&resource_lock);
228 }
229
230 /**
231 * request_resource_conflict - request and reserve an I/O or memory resource
232 * @root: root resource descriptor
233 * @new: resource descriptor desired by caller
234 *
235 * Returns 0 for success, conflict resource on error.
236 */
237 struct resource *request_resource_conflict(struct resource *root, struct resource *new)
238 {
239 struct resource *conflict;
240
241 write_lock(&resource_lock);
242 conflict = __request_resource(root, new);
243 write_unlock(&resource_lock);
244 return conflict;
245 }
246
247 /**
248 * request_resource - request and reserve an I/O or memory resource
249 * @root: root resource descriptor
250 * @new: resource descriptor desired by caller
251 *
252 * Returns 0 for success, negative error code on error.
253 */
254 int request_resource(struct resource *root, struct resource *new)
255 {
256 struct resource *conflict;
257
258 conflict = request_resource_conflict(root, new);
259 return conflict ? -EBUSY : 0;
260 }
261
262 EXPORT_SYMBOL(request_resource);
263
264 /**
265 * release_resource - release a previously reserved resource
266 * @old: resource pointer
267 */
268 int release_resource(struct resource *old)
269 {
270 int retval;
271
272 write_lock(&resource_lock);
273 retval = __release_resource(old);
274 write_unlock(&resource_lock);
275 return retval;
276 }
277
278 EXPORT_SYMBOL(release_resource);
279
280 #if !defined(CONFIG_ARCH_HAS_WALK_MEMORY)
281 /*
282 * Finds the lowest memory reosurce exists within [res->start.res->end)
283 * the caller must specify res->start, res->end, res->flags and "name".
284 * If found, returns 0, res is overwritten, if not found, returns -1.
285 */
286 static int find_next_system_ram(struct resource *res, char *name)
287 {
288 resource_size_t start, end;
289 struct resource *p;
290
291 BUG_ON(!res);
292
293 start = res->start;
294 end = res->end;
295 BUG_ON(start >= end);
296
297 read_lock(&resource_lock);
298 for (p = iomem_resource.child; p ; p = p->sibling) {
299 /* system ram is just marked as IORESOURCE_MEM */
300 if (p->flags != res->flags)
301 continue;
302 if (name && strcmp(p->name, name))
303 continue;
304 if (p->start > end) {
305 p = NULL;
306 break;
307 }
308 if ((p->end >= start) && (p->start < end))
309 break;
310 }
311 read_unlock(&resource_lock);
312 if (!p)
313 return -1;
314 /* copy data */
315 if (res->start < p->start)
316 res->start = p->start;
317 if (res->end > p->end)
318 res->end = p->end;
319 return 0;
320 }
321
322 /*
323 * This function calls callback against all memory range of "System RAM"
324 * which are marked as IORESOURCE_MEM and IORESOUCE_BUSY.
325 * Now, this function is only for "System RAM".
326 */
327 int walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
328 void *arg, int (*func)(unsigned long, unsigned long, void *))
329 {
330 struct resource res;
331 unsigned long pfn, end_pfn;
332 u64 orig_end;
333 int ret = -1;
334
335 res.start = (u64) start_pfn << PAGE_SHIFT;
336 res.end = ((u64)(start_pfn + nr_pages) << PAGE_SHIFT) - 1;
337 res.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
338 orig_end = res.end;
339 while ((res.start < res.end) &&
340 (find_next_system_ram(&res, "System RAM") >= 0)) {
341 pfn = (res.start + PAGE_SIZE - 1) >> PAGE_SHIFT;
342 end_pfn = (res.end + 1) >> PAGE_SHIFT;
343 if (end_pfn > pfn)
344 ret = (*func)(pfn, end_pfn - pfn, arg);
345 if (ret)
346 break;
347 res.start = res.end + 1;
348 res.end = orig_end;
349 }
350 return ret;
351 }
352
353 #endif
354
355 static int __is_ram(unsigned long pfn, unsigned long nr_pages, void *arg)
356 {
357 return 1;
358 }
359 /*
360 * This generic page_is_ram() returns true if specified address is
361 * registered as "System RAM" in iomem_resource list.
362 */
363 int __weak page_is_ram(unsigned long pfn)
364 {
365 return walk_system_ram_range(pfn, 1, NULL, __is_ram) == 1;
366 }
367
368 void __weak arch_remove_reservations(struct resource *avail)
369 {
370 }
371
372 static resource_size_t simple_align_resource(void *data,
373 const struct resource *avail,
374 resource_size_t size,
375 resource_size_t align)
376 {
377 return avail->start;
378 }
379
380 static void resource_clip(struct resource *res, resource_size_t min,
381 resource_size_t max)
382 {
383 if (res->start < min)
384 res->start = min;
385 if (res->end > max)
386 res->end = max;
387 }
388
389 static bool resource_contains(struct resource *res1, struct resource *res2)
390 {
391 return res1->start <= res2->start && res1->end >= res2->end;
392 }
393
394 /*
395 * Find empty slot in the resource tree with the given range and
396 * alignment constraints
397 */
398 static int __find_resource(struct resource *root, struct resource *old,
399 struct resource *new,
400 resource_size_t size,
401 struct resource_constraint *constraint)
402 {
403 struct resource *this = root->child;
404 struct resource tmp = *new, avail, alloc;
405
406 tmp.flags = new->flags;
407 tmp.start = root->start;
408 /*
409 * Skip past an allocated resource that starts at 0, since the assignment
410 * of this->start - 1 to tmp->end below would cause an underflow.
411 */
412 if (this && this->start == root->start) {
413 tmp.start = (this == old) ? old->start : this->end + 1;
414 this = this->sibling;
415 }
416 for(;;) {
417 if (this)
418 tmp.end = (this == old) ? this->end : this->start - 1;
419 else
420 tmp.end = root->end;
421
422 resource_clip(&tmp, constraint->min, constraint->max);
423 arch_remove_reservations(&tmp);
424
425 /* Check for overflow after ALIGN() */
426 avail = *new;
427 avail.start = ALIGN(tmp.start, constraint->align);
428 avail.end = tmp.end;
429 if (avail.start >= tmp.start) {
430 alloc.start = constraint->alignf(constraint->alignf_data, &avail,
431 size, constraint->align);
432 alloc.end = alloc.start + size - 1;
433 if (resource_contains(&avail, &alloc)) {
434 new->start = alloc.start;
435 new->end = alloc.end;
436 return 0;
437 }
438 }
439 if (!this)
440 break;
441 if (this != old)
442 tmp.start = this->end + 1;
443 this = this->sibling;
444 }
445 return -EBUSY;
446 }
447
448 /*
449 * Find empty slot in the resource tree given range and alignment.
450 */
451 static int find_resource(struct resource *root, struct resource *new,
452 resource_size_t size,
453 struct resource_constraint *constraint)
454 {
455 return __find_resource(root, NULL, new, size, constraint);
456 }
457
458 /**
459 * reallocate_resource - allocate a slot in the resource tree given range & alignment.
460 * The resource will be relocated if the new size cannot be reallocated in the
461 * current location.
462 *
463 * @root: root resource descriptor
464 * @old: resource descriptor desired by caller
465 * @newsize: new size of the resource descriptor
466 * @constraint: the size and alignment constraints to be met.
467 */
468 int reallocate_resource(struct resource *root, struct resource *old,
469 resource_size_t newsize,
470 struct resource_constraint *constraint)
471 {
472 int err=0;
473 struct resource new = *old;
474 struct resource *conflict;
475
476 write_lock(&resource_lock);
477
478 if ((err = __find_resource(root, old, &new, newsize, constraint)))
479 goto out;
480
481 if (resource_contains(&new, old)) {
482 old->start = new.start;
483 old->end = new.end;
484 goto out;
485 }
486
487 if (old->child) {
488 err = -EBUSY;
489 goto out;
490 }
491
492 if (resource_contains(old, &new)) {
493 old->start = new.start;
494 old->end = new.end;
495 } else {
496 __release_resource(old);
497 *old = new;
498 conflict = __request_resource(root, old);
499 BUG_ON(conflict);
500 }
501 out:
502 write_unlock(&resource_lock);
503 return err;
504 }
505
506
507 /**
508 * allocate_resource - allocate empty slot in the resource tree given range & alignment.
509 * The resource will be reallocated with a new size if it was already allocated
510 * @root: root resource descriptor
511 * @new: resource descriptor desired by caller
512 * @size: requested resource region size
513 * @min: minimum size to allocate
514 * @max: maximum size to allocate
515 * @align: alignment requested, in bytes
516 * @alignf: alignment function, optional, called if not NULL
517 * @alignf_data: arbitrary data to pass to the @alignf function
518 */
519 int allocate_resource(struct resource *root, struct resource *new,
520 resource_size_t size, resource_size_t min,
521 resource_size_t max, resource_size_t align,
522 resource_size_t (*alignf)(void *,
523 const struct resource *,
524 resource_size_t,
525 resource_size_t),
526 void *alignf_data)
527 {
528 int err;
529 struct resource_constraint constraint;
530
531 if (!alignf)
532 alignf = simple_align_resource;
533
534 constraint.min = min;
535 constraint.max = max;
536 constraint.align = align;
537 constraint.alignf = alignf;
538 constraint.alignf_data = alignf_data;
539
540 if ( new->parent ) {
541 /* resource is already allocated, try reallocating with
542 the new constraints */
543 return reallocate_resource(root, new, size, &constraint);
544 }
545
546 write_lock(&resource_lock);
547 err = find_resource(root, new, size, &constraint);
548 if (err >= 0 && __request_resource(root, new))
549 err = -EBUSY;
550 write_unlock(&resource_lock);
551 return err;
552 }
553
554 EXPORT_SYMBOL(allocate_resource);
555
556 /**
557 * lookup_resource - find an existing resource by a resource start address
558 * @root: root resource descriptor
559 * @start: resource start address
560 *
561 * Returns a pointer to the resource if found, NULL otherwise
562 */
563 struct resource *lookup_resource(struct resource *root, resource_size_t start)
564 {
565 struct resource *res;
566
567 read_lock(&resource_lock);
568 for (res = root->child; res; res = res->sibling) {
569 if (res->start == start)
570 break;
571 }
572 read_unlock(&resource_lock);
573
574 return res;
575 }
576
577 /*
578 * Insert a resource into the resource tree. If successful, return NULL,
579 * otherwise return the conflicting resource (compare to __request_resource())
580 */
581 static struct resource * __insert_resource(struct resource *parent, struct resource *new)
582 {
583 struct resource *first, *next;
584
585 for (;; parent = first) {
586 first = __request_resource(parent, new);
587 if (!first)
588 return first;
589
590 if (first == parent)
591 return first;
592 if (WARN_ON(first == new)) /* duplicated insertion */
593 return first;
594
595 if ((first->start > new->start) || (first->end < new->end))
596 break;
597 if ((first->start == new->start) && (first->end == new->end))
598 break;
599 }
600
601 for (next = first; ; next = next->sibling) {
602 /* Partial overlap? Bad, and unfixable */
603 if (next->start < new->start || next->end > new->end)
604 return next;
605 if (!next->sibling)
606 break;
607 if (next->sibling->start > new->end)
608 break;
609 }
610
611 new->parent = parent;
612 new->sibling = next->sibling;
613 new->child = first;
614
615 next->sibling = NULL;
616 for (next = first; next; next = next->sibling)
617 next->parent = new;
618
619 if (parent->child == first) {
620 parent->child = new;
621 } else {
622 next = parent->child;
623 while (next->sibling != first)
624 next = next->sibling;
625 next->sibling = new;
626 }
627 return NULL;
628 }
629
630 /**
631 * insert_resource_conflict - Inserts resource in the resource tree
632 * @parent: parent of the new resource
633 * @new: new resource to insert
634 *
635 * Returns 0 on success, conflict resource if the resource can't be inserted.
636 *
637 * This function is equivalent to request_resource_conflict when no conflict
638 * happens. If a conflict happens, and the conflicting resources
639 * entirely fit within the range of the new resource, then the new
640 * resource is inserted and the conflicting resources become children of
641 * the new resource.
642 */
643 struct resource *insert_resource_conflict(struct resource *parent, struct resource *new)
644 {
645 struct resource *conflict;
646
647 write_lock(&resource_lock);
648 conflict = __insert_resource(parent, new);
649 write_unlock(&resource_lock);
650 return conflict;
651 }
652
653 /**
654 * insert_resource - Inserts a resource in the resource tree
655 * @parent: parent of the new resource
656 * @new: new resource to insert
657 *
658 * Returns 0 on success, -EBUSY if the resource can't be inserted.
659 */
660 int insert_resource(struct resource *parent, struct resource *new)
661 {
662 struct resource *conflict;
663
664 conflict = insert_resource_conflict(parent, new);
665 return conflict ? -EBUSY : 0;
666 }
667
668 /**
669 * insert_resource_expand_to_fit - Insert a resource into the resource tree
670 * @root: root resource descriptor
671 * @new: new resource to insert
672 *
673 * Insert a resource into the resource tree, possibly expanding it in order
674 * to make it encompass any conflicting resources.
675 */
676 void insert_resource_expand_to_fit(struct resource *root, struct resource *new)
677 {
678 if (new->parent)
679 return;
680
681 write_lock(&resource_lock);
682 for (;;) {
683 struct resource *conflict;
684
685 conflict = __insert_resource(root, new);
686 if (!conflict)
687 break;
688 if (conflict == root)
689 break;
690
691 /* Ok, expand resource to cover the conflict, then try again .. */
692 if (conflict->start < new->start)
693 new->start = conflict->start;
694 if (conflict->end > new->end)
695 new->end = conflict->end;
696
697 printk("Expanded resource %s due to conflict with %s\n", new->name, conflict->name);
698 }
699 write_unlock(&resource_lock);
700 }
701
702 /**
703 * adjust_resource - modify a resource's start and size
704 * @res: resource to modify
705 * @start: new start value
706 * @size: new size
707 *
708 * Given an existing resource, change its start and size to match the
709 * arguments. Returns 0 on success, -EBUSY if it can't fit.
710 * Existing children of the resource are assumed to be immutable.
711 */
712 int adjust_resource(struct resource *res, resource_size_t start, resource_size_t size)
713 {
714 struct resource *tmp, *parent = res->parent;
715 resource_size_t end = start + size - 1;
716 int result = -EBUSY;
717
718 write_lock(&resource_lock);
719
720 if ((start < parent->start) || (end > parent->end))
721 goto out;
722
723 for (tmp = res->child; tmp; tmp = tmp->sibling) {
724 if ((tmp->start < start) || (tmp->end > end))
725 goto out;
726 }
727
728 if (res->sibling && (res->sibling->start <= end))
729 goto out;
730
731 tmp = parent->child;
732 if (tmp != res) {
733 while (tmp->sibling != res)
734 tmp = tmp->sibling;
735 if (start <= tmp->end)
736 goto out;
737 }
738
739 res->start = start;
740 res->end = end;
741 result = 0;
742
743 out:
744 write_unlock(&resource_lock);
745 return result;
746 }
747
748 static void __init __reserve_region_with_split(struct resource *root,
749 resource_size_t start, resource_size_t end,
750 const char *name)
751 {
752 struct resource *parent = root;
753 struct resource *conflict;
754 struct resource *res = kzalloc(sizeof(*res), GFP_ATOMIC);
755
756 if (!res)
757 return;
758
759 res->name = name;
760 res->start = start;
761 res->end = end;
762 res->flags = IORESOURCE_BUSY;
763
764 conflict = __request_resource(parent, res);
765 if (!conflict)
766 return;
767
768 /* failed, split and try again */
769 kfree(res);
770
771 /* conflict covered whole area */
772 if (conflict->start <= start && conflict->end >= end)
773 return;
774
775 if (conflict->start > start)
776 __reserve_region_with_split(root, start, conflict->start-1, name);
777 if (conflict->end < end)
778 __reserve_region_with_split(root, conflict->end+1, end, name);
779 }
780
781 void __init reserve_region_with_split(struct resource *root,
782 resource_size_t start, resource_size_t end,
783 const char *name)
784 {
785 write_lock(&resource_lock);
786 __reserve_region_with_split(root, start, end, name);
787 write_unlock(&resource_lock);
788 }
789
790 EXPORT_SYMBOL(adjust_resource);
791
792 /**
793 * resource_alignment - calculate resource's alignment
794 * @res: resource pointer
795 *
796 * Returns alignment on success, 0 (invalid alignment) on failure.
797 */
798 resource_size_t resource_alignment(struct resource *res)
799 {
800 switch (res->flags & (IORESOURCE_SIZEALIGN | IORESOURCE_STARTALIGN)) {
801 case IORESOURCE_SIZEALIGN:
802 return resource_size(res);
803 case IORESOURCE_STARTALIGN:
804 return res->start;
805 default:
806 return 0;
807 }
808 }
809
810 /*
811 * This is compatibility stuff for IO resources.
812 *
813 * Note how this, unlike the above, knows about
814 * the IO flag meanings (busy etc).
815 *
816 * request_region creates a new busy region.
817 *
818 * check_region returns non-zero if the area is already busy.
819 *
820 * release_region releases a matching busy region.
821 */
822
823 static DECLARE_WAIT_QUEUE_HEAD(muxed_resource_wait);
824
825 /**
826 * __request_region - create a new busy resource region
827 * @parent: parent resource descriptor
828 * @start: resource start address
829 * @n: resource region size
830 * @name: reserving caller's ID string
831 * @flags: IO resource flags
832 */
833 struct resource * __request_region(struct resource *parent,
834 resource_size_t start, resource_size_t n,
835 const char *name, int flags)
836 {
837 DECLARE_WAITQUEUE(wait, current);
838 struct resource *res = kzalloc(sizeof(*res), GFP_KERNEL);
839
840 if (!res)
841 return NULL;
842
843 res->name = name;
844 res->start = start;
845 res->end = start + n - 1;
846 res->flags = IORESOURCE_BUSY;
847 res->flags |= flags;
848
849 write_lock(&resource_lock);
850
851 for (;;) {
852 struct resource *conflict;
853
854 conflict = __request_resource(parent, res);
855 if (!conflict)
856 break;
857 if (conflict != parent) {
858 parent = conflict;
859 if (!(conflict->flags & IORESOURCE_BUSY))
860 continue;
861 }
862 if (conflict->flags & flags & IORESOURCE_MUXED) {
863 add_wait_queue(&muxed_resource_wait, &wait);
864 write_unlock(&resource_lock);
865 set_current_state(TASK_UNINTERRUPTIBLE);
866 schedule();
867 remove_wait_queue(&muxed_resource_wait, &wait);
868 write_lock(&resource_lock);
869 continue;
870 }
871 /* Uhhuh, that didn't work out.. */
872 kfree(res);
873 res = NULL;
874 break;
875 }
876 write_unlock(&resource_lock);
877 return res;
878 }
879 EXPORT_SYMBOL(__request_region);
880
881 /**
882 * __check_region - check if a resource region is busy or free
883 * @parent: parent resource descriptor
884 * @start: resource start address
885 * @n: resource region size
886 *
887 * Returns 0 if the region is free at the moment it is checked,
888 * returns %-EBUSY if the region is busy.
889 *
890 * NOTE:
891 * This function is deprecated because its use is racy.
892 * Even if it returns 0, a subsequent call to request_region()
893 * may fail because another driver etc. just allocated the region.
894 * Do NOT use it. It will be removed from the kernel.
895 */
896 int __check_region(struct resource *parent, resource_size_t start,
897 resource_size_t n)
898 {
899 struct resource * res;
900
901 res = __request_region(parent, start, n, "check-region", 0);
902 if (!res)
903 return -EBUSY;
904
905 release_resource(res);
906 kfree(res);
907 return 0;
908 }
909 EXPORT_SYMBOL(__check_region);
910
911 /**
912 * __release_region - release a previously reserved resource region
913 * @parent: parent resource descriptor
914 * @start: resource start address
915 * @n: resource region size
916 *
917 * The described resource region must match a currently busy region.
918 */
919 void __release_region(struct resource *parent, resource_size_t start,
920 resource_size_t n)
921 {
922 struct resource **p;
923 resource_size_t end;
924
925 p = &parent->child;
926 end = start + n - 1;
927
928 write_lock(&resource_lock);
929
930 for (;;) {
931 struct resource *res = *p;
932
933 if (!res)
934 break;
935 if (res->start <= start && res->end >= end) {
936 if (!(res->flags & IORESOURCE_BUSY)) {
937 p = &res->child;
938 continue;
939 }
940 if (res->start != start || res->end != end)
941 break;
942 *p = res->sibling;
943 write_unlock(&resource_lock);
944 if (res->flags & IORESOURCE_MUXED)
945 wake_up(&muxed_resource_wait);
946 kfree(res);
947 return;
948 }
949 p = &res->sibling;
950 }
951
952 write_unlock(&resource_lock);
953
954 printk(KERN_WARNING "Trying to free nonexistent resource "
955 "<%016llx-%016llx>\n", (unsigned long long)start,
956 (unsigned long long)end);
957 }
958 EXPORT_SYMBOL(__release_region);
959
960 /*
961 * Managed region resource
962 */
963 struct region_devres {
964 struct resource *parent;
965 resource_size_t start;
966 resource_size_t n;
967 };
968
969 static void devm_region_release(struct device *dev, void *res)
970 {
971 struct region_devres *this = res;
972
973 __release_region(this->parent, this->start, this->n);
974 }
975
976 static int devm_region_match(struct device *dev, void *res, void *match_data)
977 {
978 struct region_devres *this = res, *match = match_data;
979
980 return this->parent == match->parent &&
981 this->start == match->start && this->n == match->n;
982 }
983
984 struct resource * __devm_request_region(struct device *dev,
985 struct resource *parent, resource_size_t start,
986 resource_size_t n, const char *name)
987 {
988 struct region_devres *dr = NULL;
989 struct resource *res;
990
991 dr = devres_alloc(devm_region_release, sizeof(struct region_devres),
992 GFP_KERNEL);
993 if (!dr)
994 return NULL;
995
996 dr->parent = parent;
997 dr->start = start;
998 dr->n = n;
999
1000 res = __request_region(parent, start, n, name, 0);
1001 if (res)
1002 devres_add(dev, dr);
1003 else
1004 devres_free(dr);
1005
1006 return res;
1007 }
1008 EXPORT_SYMBOL(__devm_request_region);
1009
1010 void __devm_release_region(struct device *dev, struct resource *parent,
1011 resource_size_t start, resource_size_t n)
1012 {
1013 struct region_devres match_data = { parent, start, n };
1014
1015 __release_region(parent, start, n);
1016 WARN_ON(devres_destroy(dev, devm_region_release, devm_region_match,
1017 &match_data));
1018 }
1019 EXPORT_SYMBOL(__devm_release_region);
1020
1021 /*
1022 * Called from init/main.c to reserve IO ports.
1023 */
1024 #define MAXRESERVE 4
1025 static int __init reserve_setup(char *str)
1026 {
1027 static int reserved;
1028 static struct resource reserve[MAXRESERVE];
1029
1030 for (;;) {
1031 unsigned int io_start, io_num;
1032 int x = reserved;
1033
1034 if (get_option (&str, &io_start) != 2)
1035 break;
1036 if (get_option (&str, &io_num) == 0)
1037 break;
1038 if (x < MAXRESERVE) {
1039 struct resource *res = reserve + x;
1040 res->name = "reserved";
1041 res->start = io_start;
1042 res->end = io_start + io_num - 1;
1043 res->flags = IORESOURCE_BUSY;
1044 res->child = NULL;
1045 if (request_resource(res->start >= 0x10000 ? &iomem_resource : &ioport_resource, res) == 0)
1046 reserved = x+1;
1047 }
1048 }
1049 return 1;
1050 }
1051
1052 __setup("reserve=", reserve_setup);
1053
1054 /*
1055 * Check if the requested addr and size spans more than any slot in the
1056 * iomem resource tree.
1057 */
1058 int iomem_map_sanity_check(resource_size_t addr, unsigned long size)
1059 {
1060 struct resource *p = &iomem_resource;
1061 int err = 0;
1062 loff_t l;
1063
1064 read_lock(&resource_lock);
1065 for (p = p->child; p ; p = r_next(NULL, p, &l)) {
1066 /*
1067 * We can probably skip the resources without
1068 * IORESOURCE_IO attribute?
1069 */
1070 if (p->start >= addr + size)
1071 continue;
1072 if (p->end < addr)
1073 continue;
1074 if (PFN_DOWN(p->start) <= PFN_DOWN(addr) &&
1075 PFN_DOWN(p->end) >= PFN_DOWN(addr + size - 1))
1076 continue;
1077 /*
1078 * if a resource is "BUSY", it's not a hardware resource
1079 * but a driver mapping of such a resource; we don't want
1080 * to warn for those; some drivers legitimately map only
1081 * partial hardware resources. (example: vesafb)
1082 */
1083 if (p->flags & IORESOURCE_BUSY)
1084 continue;
1085
1086 printk(KERN_WARNING "resource map sanity check conflict: "
1087 "0x%llx 0x%llx 0x%llx 0x%llx %s\n",
1088 (unsigned long long)addr,
1089 (unsigned long long)(addr + size - 1),
1090 (unsigned long long)p->start,
1091 (unsigned long long)p->end,
1092 p->name);
1093 err = -1;
1094 break;
1095 }
1096 read_unlock(&resource_lock);
1097
1098 return err;
1099 }
1100
1101 #ifdef CONFIG_STRICT_DEVMEM
1102 static int strict_iomem_checks = 1;
1103 #else
1104 static int strict_iomem_checks;
1105 #endif
1106
1107 /*
1108 * check if an address is reserved in the iomem resource tree
1109 * returns 1 if reserved, 0 if not reserved.
1110 */
1111 int iomem_is_exclusive(u64 addr)
1112 {
1113 struct resource *p = &iomem_resource;
1114 int err = 0;
1115 loff_t l;
1116 int size = PAGE_SIZE;
1117
1118 if (!strict_iomem_checks)
1119 return 0;
1120
1121 addr = addr & PAGE_MASK;
1122
1123 read_lock(&resource_lock);
1124 for (p = p->child; p ; p = r_next(NULL, p, &l)) {
1125 /*
1126 * We can probably skip the resources without
1127 * IORESOURCE_IO attribute?
1128 */
1129 if (p->start >= addr + size)
1130 break;
1131 if (p->end < addr)
1132 continue;
1133 if (p->flags & IORESOURCE_BUSY &&
1134 p->flags & IORESOURCE_EXCLUSIVE) {
1135 err = 1;
1136 break;
1137 }
1138 }
1139 read_unlock(&resource_lock);
1140
1141 return err;
1142 }
1143
1144 static int __init strict_iomem(char *str)
1145 {
1146 if (strstr(str, "relaxed"))
1147 strict_iomem_checks = 0;
1148 if (strstr(str, "strict"))
1149 strict_iomem_checks = 1;
1150 return 1;
1151 }
1152
1153 __setup("iomem=", strict_iomem);