]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * linux/kernel/resource.c | |
3 | * | |
4 | * Copyright (C) 1999 Linus Torvalds | |
5 | * Copyright (C) 1999 Martin Mares <mj@ucw.cz> | |
6 | * | |
7 | * Arbitrary resource management. | |
8 | */ | |
9 | ||
10 | #include <linux/module.h> | |
11 | #include <linux/errno.h> | |
12 | #include <linux/ioport.h> | |
13 | #include <linux/init.h> | |
14 | #include <linux/slab.h> | |
15 | #include <linux/spinlock.h> | |
16 | #include <linux/fs.h> | |
17 | #include <linux/proc_fs.h> | |
18 | #include <linux/seq_file.h> | |
19 | #include <linux/device.h> | |
20 | #include <linux/pfn.h> | |
21 | #include <asm/io.h> | |
22 | ||
23 | ||
24 | struct resource ioport_resource = { | |
25 | .name = "PCI IO", | |
26 | .start = 0, | |
27 | .end = IO_SPACE_LIMIT, | |
28 | .flags = IORESOURCE_IO, | |
29 | }; | |
30 | EXPORT_SYMBOL(ioport_resource); | |
31 | ||
32 | struct resource iomem_resource = { | |
33 | .name = "PCI mem", | |
34 | .start = 0, | |
35 | .end = -1, | |
36 | .flags = IORESOURCE_MEM, | |
37 | }; | |
38 | EXPORT_SYMBOL(iomem_resource); | |
39 | ||
40 | static DEFINE_RWLOCK(resource_lock); | |
41 | ||
42 | static void *r_next(struct seq_file *m, void *v, loff_t *pos) | |
43 | { | |
44 | struct resource *p = v; | |
45 | (*pos)++; | |
46 | if (p->child) | |
47 | return p->child; | |
48 | while (!p->sibling && p->parent) | |
49 | p = p->parent; | |
50 | return p->sibling; | |
51 | } | |
52 | ||
53 | #ifdef CONFIG_PROC_FS | |
54 | ||
55 | enum { MAX_IORES_LEVEL = 5 }; | |
56 | ||
57 | static void *r_start(struct seq_file *m, loff_t *pos) | |
58 | __acquires(resource_lock) | |
59 | { | |
60 | struct resource *p = m->private; | |
61 | loff_t l = 0; | |
62 | read_lock(&resource_lock); | |
63 | for (p = p->child; p && l < *pos; p = r_next(m, p, &l)) | |
64 | ; | |
65 | return p; | |
66 | } | |
67 | ||
68 | static void r_stop(struct seq_file *m, void *v) | |
69 | __releases(resource_lock) | |
70 | { | |
71 | read_unlock(&resource_lock); | |
72 | } | |
73 | ||
74 | static int r_show(struct seq_file *m, void *v) | |
75 | { | |
76 | struct resource *root = m->private; | |
77 | struct resource *r = v, *p; | |
78 | int width = root->end < 0x10000 ? 4 : 8; | |
79 | int depth; | |
80 | ||
81 | for (depth = 0, p = r; depth < MAX_IORES_LEVEL; depth++, p = p->parent) | |
82 | if (p->parent == root) | |
83 | break; | |
84 | seq_printf(m, "%*s%0*llx-%0*llx : %s\n", | |
85 | depth * 2, "", | |
86 | width, (unsigned long long) r->start, | |
87 | width, (unsigned long long) r->end, | |
88 | r->name ? r->name : "<BAD>"); | |
89 | return 0; | |
90 | } | |
91 | ||
92 | static const struct seq_operations resource_op = { | |
93 | .start = r_start, | |
94 | .next = r_next, | |
95 | .stop = r_stop, | |
96 | .show = r_show, | |
97 | }; | |
98 | ||
99 | static int ioports_open(struct inode *inode, struct file *file) | |
100 | { | |
101 | int res = seq_open(file, &resource_op); | |
102 | if (!res) { | |
103 | struct seq_file *m = file->private_data; | |
104 | m->private = &ioport_resource; | |
105 | } | |
106 | return res; | |
107 | } | |
108 | ||
109 | static int iomem_open(struct inode *inode, struct file *file) | |
110 | { | |
111 | int res = seq_open(file, &resource_op); | |
112 | if (!res) { | |
113 | struct seq_file *m = file->private_data; | |
114 | m->private = &iomem_resource; | |
115 | } | |
116 | return res; | |
117 | } | |
118 | ||
119 | static const struct file_operations proc_ioports_operations = { | |
120 | .open = ioports_open, | |
121 | .read = seq_read, | |
122 | .llseek = seq_lseek, | |
123 | .release = seq_release, | |
124 | }; | |
125 | ||
126 | static const struct file_operations proc_iomem_operations = { | |
127 | .open = iomem_open, | |
128 | .read = seq_read, | |
129 | .llseek = seq_lseek, | |
130 | .release = seq_release, | |
131 | }; | |
132 | ||
133 | static int __init ioresources_init(void) | |
134 | { | |
135 | proc_create("ioports", 0, NULL, &proc_ioports_operations); | |
136 | proc_create("iomem", 0, NULL, &proc_iomem_operations); | |
137 | return 0; | |
138 | } | |
139 | __initcall(ioresources_init); | |
140 | ||
141 | #endif /* CONFIG_PROC_FS */ | |
142 | ||
143 | /* Return the conflict entry if you can't request it */ | |
144 | static struct resource * __request_resource(struct resource *root, struct resource *new) | |
145 | { | |
146 | resource_size_t start = new->start; | |
147 | resource_size_t end = new->end; | |
148 | struct resource *tmp, **p; | |
149 | ||
150 | if (end < start) | |
151 | return root; | |
152 | if (start < root->start) | |
153 | return root; | |
154 | if (end > root->end) | |
155 | return root; | |
156 | p = &root->child; | |
157 | for (;;) { | |
158 | tmp = *p; | |
159 | if (!tmp || tmp->start > end) { | |
160 | new->sibling = tmp; | |
161 | *p = new; | |
162 | new->parent = root; | |
163 | return NULL; | |
164 | } | |
165 | p = &tmp->sibling; | |
166 | if (tmp->end < start) | |
167 | continue; | |
168 | return tmp; | |
169 | } | |
170 | } | |
171 | ||
172 | static int __release_resource(struct resource *old) | |
173 | { | |
174 | struct resource *tmp, **p; | |
175 | ||
176 | p = &old->parent->child; | |
177 | for (;;) { | |
178 | tmp = *p; | |
179 | if (!tmp) | |
180 | break; | |
181 | if (tmp == old) { | |
182 | *p = tmp->sibling; | |
183 | old->parent = NULL; | |
184 | return 0; | |
185 | } | |
186 | p = &tmp->sibling; | |
187 | } | |
188 | return -EINVAL; | |
189 | } | |
190 | ||
191 | /** | |
192 | * request_resource - request and reserve an I/O or memory resource | |
193 | * @root: root resource descriptor | |
194 | * @new: resource descriptor desired by caller | |
195 | * | |
196 | * Returns 0 for success, negative error code on error. | |
197 | */ | |
198 | int request_resource(struct resource *root, struct resource *new) | |
199 | { | |
200 | struct resource *conflict; | |
201 | ||
202 | write_lock(&resource_lock); | |
203 | conflict = __request_resource(root, new); | |
204 | write_unlock(&resource_lock); | |
205 | return conflict ? -EBUSY : 0; | |
206 | } | |
207 | ||
208 | EXPORT_SYMBOL(request_resource); | |
209 | ||
210 | /** | |
211 | * release_resource - release a previously reserved resource | |
212 | * @old: resource pointer | |
213 | */ | |
214 | int release_resource(struct resource *old) | |
215 | { | |
216 | int retval; | |
217 | ||
218 | write_lock(&resource_lock); | |
219 | retval = __release_resource(old); | |
220 | write_unlock(&resource_lock); | |
221 | return retval; | |
222 | } | |
223 | ||
224 | EXPORT_SYMBOL(release_resource); | |
225 | ||
226 | #if !defined(CONFIG_ARCH_HAS_WALK_MEMORY) | |
227 | /* | |
228 | * Finds the lowest memory reosurce exists within [res->start.res->end) | |
229 | * the caller must specify res->start, res->end, res->flags and "name". | |
230 | * If found, returns 0, res is overwritten, if not found, returns -1. | |
231 | */ | |
232 | static int find_next_system_ram(struct resource *res, char *name) | |
233 | { | |
234 | resource_size_t start, end; | |
235 | struct resource *p; | |
236 | ||
237 | BUG_ON(!res); | |
238 | ||
239 | start = res->start; | |
240 | end = res->end; | |
241 | BUG_ON(start >= end); | |
242 | ||
243 | read_lock(&resource_lock); | |
244 | for (p = iomem_resource.child; p ; p = p->sibling) { | |
245 | /* system ram is just marked as IORESOURCE_MEM */ | |
246 | if (p->flags != res->flags) | |
247 | continue; | |
248 | if (name && strcmp(p->name, name)) | |
249 | continue; | |
250 | if (p->start > end) { | |
251 | p = NULL; | |
252 | break; | |
253 | } | |
254 | if ((p->end >= start) && (p->start < end)) | |
255 | break; | |
256 | } | |
257 | read_unlock(&resource_lock); | |
258 | if (!p) | |
259 | return -1; | |
260 | /* copy data */ | |
261 | if (res->start < p->start) | |
262 | res->start = p->start; | |
263 | if (res->end > p->end) | |
264 | res->end = p->end; | |
265 | return 0; | |
266 | } | |
267 | ||
268 | /* | |
269 | * This function calls callback against all memory range of "System RAM" | |
270 | * which are marked as IORESOURCE_MEM and IORESOUCE_BUSY. | |
271 | * Now, this function is only for "System RAM". | |
272 | */ | |
273 | int walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages, | |
274 | void *arg, int (*func)(unsigned long, unsigned long, void *)) | |
275 | { | |
276 | struct resource res; | |
277 | unsigned long pfn, len; | |
278 | u64 orig_end; | |
279 | int ret = -1; | |
280 | ||
281 | res.start = (u64) start_pfn << PAGE_SHIFT; | |
282 | res.end = ((u64)(start_pfn + nr_pages) << PAGE_SHIFT) - 1; | |
283 | res.flags = IORESOURCE_MEM | IORESOURCE_BUSY; | |
284 | orig_end = res.end; | |
285 | while ((res.start < res.end) && | |
286 | (find_next_system_ram(&res, "System RAM") >= 0)) { | |
287 | pfn = (unsigned long)(res.start >> PAGE_SHIFT); | |
288 | len = (unsigned long)((res.end + 1 - res.start) >> PAGE_SHIFT); | |
289 | ret = (*func)(pfn, len, arg); | |
290 | if (ret) | |
291 | break; | |
292 | res.start = res.end + 1; | |
293 | res.end = orig_end; | |
294 | } | |
295 | return ret; | |
296 | } | |
297 | ||
298 | #endif | |
299 | ||
300 | /* | |
301 | * Find empty slot in the resource tree given range and alignment. | |
302 | */ | |
303 | static int find_resource(struct resource *root, struct resource *new, | |
304 | resource_size_t size, resource_size_t min, | |
305 | resource_size_t max, resource_size_t align, | |
306 | resource_size_t (*alignf)(void *, | |
307 | const struct resource *, | |
308 | resource_size_t, | |
309 | resource_size_t), | |
310 | void *alignf_data) | |
311 | { | |
312 | struct resource *this = root->child; | |
313 | struct resource tmp = *new; | |
314 | ||
315 | tmp.start = root->start; | |
316 | /* | |
317 | * Skip past an allocated resource that starts at 0, since the assignment | |
318 | * of this->start - 1 to tmp->end below would cause an underflow. | |
319 | */ | |
320 | if (this && this->start == 0) { | |
321 | tmp.start = this->end + 1; | |
322 | this = this->sibling; | |
323 | } | |
324 | for(;;) { | |
325 | if (this) | |
326 | tmp.end = this->start - 1; | |
327 | else | |
328 | tmp.end = root->end; | |
329 | if (tmp.start < min) | |
330 | tmp.start = min; | |
331 | if (tmp.end > max) | |
332 | tmp.end = max; | |
333 | tmp.start = ALIGN(tmp.start, align); | |
334 | if (alignf) | |
335 | tmp.start = alignf(alignf_data, &tmp, size, align); | |
336 | if (tmp.start < tmp.end && tmp.end - tmp.start >= size - 1) { | |
337 | new->start = tmp.start; | |
338 | new->end = tmp.start + size - 1; | |
339 | return 0; | |
340 | } | |
341 | if (!this) | |
342 | break; | |
343 | tmp.start = this->end + 1; | |
344 | this = this->sibling; | |
345 | } | |
346 | return -EBUSY; | |
347 | } | |
348 | ||
349 | /** | |
350 | * allocate_resource - allocate empty slot in the resource tree given range & alignment | |
351 | * @root: root resource descriptor | |
352 | * @new: resource descriptor desired by caller | |
353 | * @size: requested resource region size | |
354 | * @min: minimum size to allocate | |
355 | * @max: maximum size to allocate | |
356 | * @align: alignment requested, in bytes | |
357 | * @alignf: alignment function, optional, called if not NULL | |
358 | * @alignf_data: arbitrary data to pass to the @alignf function | |
359 | */ | |
360 | int allocate_resource(struct resource *root, struct resource *new, | |
361 | resource_size_t size, resource_size_t min, | |
362 | resource_size_t max, resource_size_t align, | |
363 | resource_size_t (*alignf)(void *, | |
364 | const struct resource *, | |
365 | resource_size_t, | |
366 | resource_size_t), | |
367 | void *alignf_data) | |
368 | { | |
369 | int err; | |
370 | ||
371 | write_lock(&resource_lock); | |
372 | err = find_resource(root, new, size, min, max, align, alignf, alignf_data); | |
373 | if (err >= 0 && __request_resource(root, new)) | |
374 | err = -EBUSY; | |
375 | write_unlock(&resource_lock); | |
376 | return err; | |
377 | } | |
378 | ||
379 | EXPORT_SYMBOL(allocate_resource); | |
380 | ||
381 | /* | |
382 | * Insert a resource into the resource tree. If successful, return NULL, | |
383 | * otherwise return the conflicting resource (compare to __request_resource()) | |
384 | */ | |
385 | static struct resource * __insert_resource(struct resource *parent, struct resource *new) | |
386 | { | |
387 | struct resource *first, *next; | |
388 | ||
389 | for (;; parent = first) { | |
390 | first = __request_resource(parent, new); | |
391 | if (!first) | |
392 | return first; | |
393 | ||
394 | if (first == parent) | |
395 | return first; | |
396 | ||
397 | if ((first->start > new->start) || (first->end < new->end)) | |
398 | break; | |
399 | if ((first->start == new->start) && (first->end == new->end)) | |
400 | break; | |
401 | } | |
402 | ||
403 | for (next = first; ; next = next->sibling) { | |
404 | /* Partial overlap? Bad, and unfixable */ | |
405 | if (next->start < new->start || next->end > new->end) | |
406 | return next; | |
407 | if (!next->sibling) | |
408 | break; | |
409 | if (next->sibling->start > new->end) | |
410 | break; | |
411 | } | |
412 | ||
413 | new->parent = parent; | |
414 | new->sibling = next->sibling; | |
415 | new->child = first; | |
416 | ||
417 | next->sibling = NULL; | |
418 | for (next = first; next; next = next->sibling) | |
419 | next->parent = new; | |
420 | ||
421 | if (parent->child == first) { | |
422 | parent->child = new; | |
423 | } else { | |
424 | next = parent->child; | |
425 | while (next->sibling != first) | |
426 | next = next->sibling; | |
427 | next->sibling = new; | |
428 | } | |
429 | return NULL; | |
430 | } | |
431 | ||
432 | /** | |
433 | * insert_resource - Inserts a resource in the resource tree | |
434 | * @parent: parent of the new resource | |
435 | * @new: new resource to insert | |
436 | * | |
437 | * Returns 0 on success, -EBUSY if the resource can't be inserted. | |
438 | * | |
439 | * This function is equivalent to request_resource when no conflict | |
440 | * happens. If a conflict happens, and the conflicting resources | |
441 | * entirely fit within the range of the new resource, then the new | |
442 | * resource is inserted and the conflicting resources become children of | |
443 | * the new resource. | |
444 | */ | |
445 | int insert_resource(struct resource *parent, struct resource *new) | |
446 | { | |
447 | struct resource *conflict; | |
448 | ||
449 | write_lock(&resource_lock); | |
450 | conflict = __insert_resource(parent, new); | |
451 | write_unlock(&resource_lock); | |
452 | return conflict ? -EBUSY : 0; | |
453 | } | |
454 | ||
455 | /** | |
456 | * insert_resource_expand_to_fit - Insert a resource into the resource tree | |
457 | * @root: root resource descriptor | |
458 | * @new: new resource to insert | |
459 | * | |
460 | * Insert a resource into the resource tree, possibly expanding it in order | |
461 | * to make it encompass any conflicting resources. | |
462 | */ | |
463 | void insert_resource_expand_to_fit(struct resource *root, struct resource *new) | |
464 | { | |
465 | if (new->parent) | |
466 | return; | |
467 | ||
468 | write_lock(&resource_lock); | |
469 | for (;;) { | |
470 | struct resource *conflict; | |
471 | ||
472 | conflict = __insert_resource(root, new); | |
473 | if (!conflict) | |
474 | break; | |
475 | if (conflict == root) | |
476 | break; | |
477 | ||
478 | /* Ok, expand resource to cover the conflict, then try again .. */ | |
479 | if (conflict->start < new->start) | |
480 | new->start = conflict->start; | |
481 | if (conflict->end > new->end) | |
482 | new->end = conflict->end; | |
483 | ||
484 | printk("Expanded resource %s due to conflict with %s\n", new->name, conflict->name); | |
485 | } | |
486 | write_unlock(&resource_lock); | |
487 | } | |
488 | ||
489 | /** | |
490 | * adjust_resource - modify a resource's start and size | |
491 | * @res: resource to modify | |
492 | * @start: new start value | |
493 | * @size: new size | |
494 | * | |
495 | * Given an existing resource, change its start and size to match the | |
496 | * arguments. Returns 0 on success, -EBUSY if it can't fit. | |
497 | * Existing children of the resource are assumed to be immutable. | |
498 | */ | |
499 | int adjust_resource(struct resource *res, resource_size_t start, resource_size_t size) | |
500 | { | |
501 | struct resource *tmp, *parent = res->parent; | |
502 | resource_size_t end = start + size - 1; | |
503 | int result = -EBUSY; | |
504 | ||
505 | write_lock(&resource_lock); | |
506 | ||
507 | if ((start < parent->start) || (end > parent->end)) | |
508 | goto out; | |
509 | ||
510 | for (tmp = res->child; tmp; tmp = tmp->sibling) { | |
511 | if ((tmp->start < start) || (tmp->end > end)) | |
512 | goto out; | |
513 | } | |
514 | ||
515 | if (res->sibling && (res->sibling->start <= end)) | |
516 | goto out; | |
517 | ||
518 | tmp = parent->child; | |
519 | if (tmp != res) { | |
520 | while (tmp->sibling != res) | |
521 | tmp = tmp->sibling; | |
522 | if (start <= tmp->end) | |
523 | goto out; | |
524 | } | |
525 | ||
526 | res->start = start; | |
527 | res->end = end; | |
528 | result = 0; | |
529 | ||
530 | out: | |
531 | write_unlock(&resource_lock); | |
532 | return result; | |
533 | } | |
534 | ||
535 | static void __init __reserve_region_with_split(struct resource *root, | |
536 | resource_size_t start, resource_size_t end, | |
537 | const char *name) | |
538 | { | |
539 | struct resource *parent = root; | |
540 | struct resource *conflict; | |
541 | struct resource *res = kzalloc(sizeof(*res), GFP_ATOMIC); | |
542 | ||
543 | if (!res) | |
544 | return; | |
545 | ||
546 | res->name = name; | |
547 | res->start = start; | |
548 | res->end = end; | |
549 | res->flags = IORESOURCE_BUSY; | |
550 | ||
551 | conflict = __request_resource(parent, res); | |
552 | if (!conflict) | |
553 | return; | |
554 | ||
555 | /* failed, split and try again */ | |
556 | kfree(res); | |
557 | ||
558 | /* conflict covered whole area */ | |
559 | if (conflict->start <= start && conflict->end >= end) | |
560 | return; | |
561 | ||
562 | if (conflict->start > start) | |
563 | __reserve_region_with_split(root, start, conflict->start-1, name); | |
564 | if (conflict->end < end) | |
565 | __reserve_region_with_split(root, conflict->end+1, end, name); | |
566 | } | |
567 | ||
568 | void __init reserve_region_with_split(struct resource *root, | |
569 | resource_size_t start, resource_size_t end, | |
570 | const char *name) | |
571 | { | |
572 | write_lock(&resource_lock); | |
573 | __reserve_region_with_split(root, start, end, name); | |
574 | write_unlock(&resource_lock); | |
575 | } | |
576 | ||
577 | EXPORT_SYMBOL(adjust_resource); | |
578 | ||
579 | /** | |
580 | * resource_alignment - calculate resource's alignment | |
581 | * @res: resource pointer | |
582 | * | |
583 | * Returns alignment on success, 0 (invalid alignment) on failure. | |
584 | */ | |
585 | resource_size_t resource_alignment(struct resource *res) | |
586 | { | |
587 | switch (res->flags & (IORESOURCE_SIZEALIGN | IORESOURCE_STARTALIGN)) { | |
588 | case IORESOURCE_SIZEALIGN: | |
589 | return resource_size(res); | |
590 | case IORESOURCE_STARTALIGN: | |
591 | return res->start; | |
592 | default: | |
593 | return 0; | |
594 | } | |
595 | } | |
596 | ||
597 | /* | |
598 | * This is compatibility stuff for IO resources. | |
599 | * | |
600 | * Note how this, unlike the above, knows about | |
601 | * the IO flag meanings (busy etc). | |
602 | * | |
603 | * request_region creates a new busy region. | |
604 | * | |
605 | * check_region returns non-zero if the area is already busy. | |
606 | * | |
607 | * release_region releases a matching busy region. | |
608 | */ | |
609 | ||
610 | /** | |
611 | * __request_region - create a new busy resource region | |
612 | * @parent: parent resource descriptor | |
613 | * @start: resource start address | |
614 | * @n: resource region size | |
615 | * @name: reserving caller's ID string | |
616 | * @flags: IO resource flags | |
617 | */ | |
618 | struct resource * __request_region(struct resource *parent, | |
619 | resource_size_t start, resource_size_t n, | |
620 | const char *name, int flags) | |
621 | { | |
622 | struct resource *res = kzalloc(sizeof(*res), GFP_KERNEL); | |
623 | ||
624 | if (!res) | |
625 | return NULL; | |
626 | ||
627 | res->name = name; | |
628 | res->start = start; | |
629 | res->end = start + n - 1; | |
630 | res->flags = IORESOURCE_BUSY; | |
631 | res->flags |= flags; | |
632 | ||
633 | write_lock(&resource_lock); | |
634 | ||
635 | for (;;) { | |
636 | struct resource *conflict; | |
637 | ||
638 | conflict = __request_resource(parent, res); | |
639 | if (!conflict) | |
640 | break; | |
641 | if (conflict != parent) { | |
642 | parent = conflict; | |
643 | if (!(conflict->flags & IORESOURCE_BUSY)) | |
644 | continue; | |
645 | } | |
646 | ||
647 | /* Uhhuh, that didn't work out.. */ | |
648 | kfree(res); | |
649 | res = NULL; | |
650 | break; | |
651 | } | |
652 | write_unlock(&resource_lock); | |
653 | return res; | |
654 | } | |
655 | EXPORT_SYMBOL(__request_region); | |
656 | ||
657 | /** | |
658 | * __check_region - check if a resource region is busy or free | |
659 | * @parent: parent resource descriptor | |
660 | * @start: resource start address | |
661 | * @n: resource region size | |
662 | * | |
663 | * Returns 0 if the region is free at the moment it is checked, | |
664 | * returns %-EBUSY if the region is busy. | |
665 | * | |
666 | * NOTE: | |
667 | * This function is deprecated because its use is racy. | |
668 | * Even if it returns 0, a subsequent call to request_region() | |
669 | * may fail because another driver etc. just allocated the region. | |
670 | * Do NOT use it. It will be removed from the kernel. | |
671 | */ | |
672 | int __check_region(struct resource *parent, resource_size_t start, | |
673 | resource_size_t n) | |
674 | { | |
675 | struct resource * res; | |
676 | ||
677 | res = __request_region(parent, start, n, "check-region", 0); | |
678 | if (!res) | |
679 | return -EBUSY; | |
680 | ||
681 | release_resource(res); | |
682 | kfree(res); | |
683 | return 0; | |
684 | } | |
685 | EXPORT_SYMBOL(__check_region); | |
686 | ||
687 | /** | |
688 | * __release_region - release a previously reserved resource region | |
689 | * @parent: parent resource descriptor | |
690 | * @start: resource start address | |
691 | * @n: resource region size | |
692 | * | |
693 | * The described resource region must match a currently busy region. | |
694 | */ | |
695 | void __release_region(struct resource *parent, resource_size_t start, | |
696 | resource_size_t n) | |
697 | { | |
698 | struct resource **p; | |
699 | resource_size_t end; | |
700 | ||
701 | p = &parent->child; | |
702 | end = start + n - 1; | |
703 | ||
704 | write_lock(&resource_lock); | |
705 | ||
706 | for (;;) { | |
707 | struct resource *res = *p; | |
708 | ||
709 | if (!res) | |
710 | break; | |
711 | if (res->start <= start && res->end >= end) { | |
712 | if (!(res->flags & IORESOURCE_BUSY)) { | |
713 | p = &res->child; | |
714 | continue; | |
715 | } | |
716 | if (res->start != start || res->end != end) | |
717 | break; | |
718 | *p = res->sibling; | |
719 | write_unlock(&resource_lock); | |
720 | kfree(res); | |
721 | return; | |
722 | } | |
723 | p = &res->sibling; | |
724 | } | |
725 | ||
726 | write_unlock(&resource_lock); | |
727 | ||
728 | printk(KERN_WARNING "Trying to free nonexistent resource " | |
729 | "<%016llx-%016llx>\n", (unsigned long long)start, | |
730 | (unsigned long long)end); | |
731 | } | |
732 | EXPORT_SYMBOL(__release_region); | |
733 | ||
734 | /* | |
735 | * Managed region resource | |
736 | */ | |
737 | struct region_devres { | |
738 | struct resource *parent; | |
739 | resource_size_t start; | |
740 | resource_size_t n; | |
741 | }; | |
742 | ||
743 | static void devm_region_release(struct device *dev, void *res) | |
744 | { | |
745 | struct region_devres *this = res; | |
746 | ||
747 | __release_region(this->parent, this->start, this->n); | |
748 | } | |
749 | ||
750 | static int devm_region_match(struct device *dev, void *res, void *match_data) | |
751 | { | |
752 | struct region_devres *this = res, *match = match_data; | |
753 | ||
754 | return this->parent == match->parent && | |
755 | this->start == match->start && this->n == match->n; | |
756 | } | |
757 | ||
758 | struct resource * __devm_request_region(struct device *dev, | |
759 | struct resource *parent, resource_size_t start, | |
760 | resource_size_t n, const char *name) | |
761 | { | |
762 | struct region_devres *dr = NULL; | |
763 | struct resource *res; | |
764 | ||
765 | dr = devres_alloc(devm_region_release, sizeof(struct region_devres), | |
766 | GFP_KERNEL); | |
767 | if (!dr) | |
768 | return NULL; | |
769 | ||
770 | dr->parent = parent; | |
771 | dr->start = start; | |
772 | dr->n = n; | |
773 | ||
774 | res = __request_region(parent, start, n, name, 0); | |
775 | if (res) | |
776 | devres_add(dev, dr); | |
777 | else | |
778 | devres_free(dr); | |
779 | ||
780 | return res; | |
781 | } | |
782 | EXPORT_SYMBOL(__devm_request_region); | |
783 | ||
784 | void __devm_release_region(struct device *dev, struct resource *parent, | |
785 | resource_size_t start, resource_size_t n) | |
786 | { | |
787 | struct region_devres match_data = { parent, start, n }; | |
788 | ||
789 | __release_region(parent, start, n); | |
790 | WARN_ON(devres_destroy(dev, devm_region_release, devm_region_match, | |
791 | &match_data)); | |
792 | } | |
793 | EXPORT_SYMBOL(__devm_release_region); | |
794 | ||
795 | /* | |
796 | * Called from init/main.c to reserve IO ports. | |
797 | */ | |
798 | #define MAXRESERVE 4 | |
799 | static int __init reserve_setup(char *str) | |
800 | { | |
801 | static int reserved; | |
802 | static struct resource reserve[MAXRESERVE]; | |
803 | ||
804 | for (;;) { | |
805 | unsigned int io_start, io_num; | |
806 | int x = reserved; | |
807 | ||
808 | if (get_option (&str, &io_start) != 2) | |
809 | break; | |
810 | if (get_option (&str, &io_num) == 0) | |
811 | break; | |
812 | if (x < MAXRESERVE) { | |
813 | struct resource *res = reserve + x; | |
814 | res->name = "reserved"; | |
815 | res->start = io_start; | |
816 | res->end = io_start + io_num - 1; | |
817 | res->flags = IORESOURCE_BUSY; | |
818 | res->child = NULL; | |
819 | if (request_resource(res->start >= 0x10000 ? &iomem_resource : &ioport_resource, res) == 0) | |
820 | reserved = x+1; | |
821 | } | |
822 | } | |
823 | return 1; | |
824 | } | |
825 | ||
826 | __setup("reserve=", reserve_setup); | |
827 | ||
828 | /* | |
829 | * Check if the requested addr and size spans more than any slot in the | |
830 | * iomem resource tree. | |
831 | */ | |
832 | int iomem_map_sanity_check(resource_size_t addr, unsigned long size) | |
833 | { | |
834 | struct resource *p = &iomem_resource; | |
835 | int err = 0; | |
836 | loff_t l; | |
837 | ||
838 | read_lock(&resource_lock); | |
839 | for (p = p->child; p ; p = r_next(NULL, p, &l)) { | |
840 | /* | |
841 | * We can probably skip the resources without | |
842 | * IORESOURCE_IO attribute? | |
843 | */ | |
844 | if (p->start >= addr + size) | |
845 | continue; | |
846 | if (p->end < addr) | |
847 | continue; | |
848 | if (PFN_DOWN(p->start) <= PFN_DOWN(addr) && | |
849 | PFN_DOWN(p->end) >= PFN_DOWN(addr + size - 1)) | |
850 | continue; | |
851 | /* | |
852 | * if a resource is "BUSY", it's not a hardware resource | |
853 | * but a driver mapping of such a resource; we don't want | |
854 | * to warn for those; some drivers legitimately map only | |
855 | * partial hardware resources. (example: vesafb) | |
856 | */ | |
857 | if (p->flags & IORESOURCE_BUSY) | |
858 | continue; | |
859 | ||
860 | printk(KERN_WARNING "resource map sanity check conflict: " | |
861 | "0x%llx 0x%llx 0x%llx 0x%llx %s\n", | |
862 | (unsigned long long)addr, | |
863 | (unsigned long long)(addr + size - 1), | |
864 | (unsigned long long)p->start, | |
865 | (unsigned long long)p->end, | |
866 | p->name); | |
867 | err = -1; | |
868 | break; | |
869 | } | |
870 | read_unlock(&resource_lock); | |
871 | ||
872 | return err; | |
873 | } | |
874 | ||
875 | #ifdef CONFIG_STRICT_DEVMEM | |
876 | static int strict_iomem_checks = 1; | |
877 | #else | |
878 | static int strict_iomem_checks; | |
879 | #endif | |
880 | ||
881 | /* | |
882 | * check if an address is reserved in the iomem resource tree | |
883 | * returns 1 if reserved, 0 if not reserved. | |
884 | */ | |
885 | int iomem_is_exclusive(u64 addr) | |
886 | { | |
887 | struct resource *p = &iomem_resource; | |
888 | int err = 0; | |
889 | loff_t l; | |
890 | int size = PAGE_SIZE; | |
891 | ||
892 | if (!strict_iomem_checks) | |
893 | return 0; | |
894 | ||
895 | addr = addr & PAGE_MASK; | |
896 | ||
897 | read_lock(&resource_lock); | |
898 | for (p = p->child; p ; p = r_next(NULL, p, &l)) { | |
899 | /* | |
900 | * We can probably skip the resources without | |
901 | * IORESOURCE_IO attribute? | |
902 | */ | |
903 | if (p->start >= addr + size) | |
904 | break; | |
905 | if (p->end < addr) | |
906 | continue; | |
907 | if (p->flags & IORESOURCE_BUSY && | |
908 | p->flags & IORESOURCE_EXCLUSIVE) { | |
909 | err = 1; | |
910 | break; | |
911 | } | |
912 | } | |
913 | read_unlock(&resource_lock); | |
914 | ||
915 | return err; | |
916 | } | |
917 | ||
918 | static int __init strict_iomem(char *str) | |
919 | { | |
920 | if (strstr(str, "relaxed")) | |
921 | strict_iomem_checks = 0; | |
922 | if (strstr(str, "strict")) | |
923 | strict_iomem_checks = 1; | |
924 | return 1; | |
925 | } | |
926 | ||
927 | __setup("iomem=", strict_iomem); |