]>
Commit | Line | Data |
---|---|---|
13ccf3ad RK |
1 | #include <linux/spinlock.h> |
2 | #include <linux/list.h> | |
3 | #include <linux/slab.h> | |
4 | ||
5 | #include "vmregion.h" | |
6 | ||
7 | /* | |
8 | * VM region handling support. | |
9 | * | |
10 | * This should become something generic, handling VM region allocations for | |
11 | * vmalloc and similar (ioremap, module space, etc). | |
12 | * | |
13 | * I envisage vmalloc()'s supporting vm_struct becoming: | |
14 | * | |
15 | * struct vm_struct { | |
16 | * struct vmregion region; | |
17 | * unsigned long flags; | |
18 | * struct page **pages; | |
19 | * unsigned int nr_pages; | |
20 | * unsigned long phys_addr; | |
21 | * }; | |
22 | * | |
23 | * get_vm_area() would then call vmregion_alloc with an appropriate | |
24 | * struct vmregion head (eg): | |
25 | * | |
26 | * struct vmregion vmalloc_head = { | |
27 | * .vm_list = LIST_HEAD_INIT(vmalloc_head.vm_list), | |
28 | * .vm_start = VMALLOC_START, | |
29 | * .vm_end = VMALLOC_END, | |
30 | * }; | |
31 | * | |
32 | * However, vmalloc_head.vm_start is variable (typically, it is dependent on | |
33 | * the amount of RAM found at boot time.) I would imagine that get_vm_area() | |
34 | * would have to initialise this each time prior to calling vmregion_alloc(). | |
35 | */ | |
36 | ||
37 | struct arm_vmregion * | |
38 | arm_vmregion_alloc(struct arm_vmregion_head *head, size_t size, gfp_t gfp) | |
39 | { | |
40 | unsigned long addr = head->vm_start, end = head->vm_end - size; | |
41 | unsigned long flags; | |
42 | struct arm_vmregion *c, *new; | |
43 | ||
44 | if (head->vm_end - head->vm_start < size) { | |
45 | printk(KERN_WARNING "%s: allocation too big (requested %#x)\n", | |
46 | __func__, size); | |
47 | goto out; | |
48 | } | |
49 | ||
50 | new = kmalloc(sizeof(struct arm_vmregion), gfp); | |
51 | if (!new) | |
52 | goto out; | |
53 | ||
54 | spin_lock_irqsave(&head->vm_lock, flags); | |
55 | ||
56 | list_for_each_entry(c, &head->vm_list, vm_list) { | |
57 | if ((addr + size) < addr) | |
58 | goto nospc; | |
59 | if ((addr + size) <= c->vm_start) | |
60 | goto found; | |
61 | addr = c->vm_end; | |
62 | if (addr > end) | |
63 | goto nospc; | |
64 | } | |
65 | ||
66 | found: | |
67 | /* | |
68 | * Insert this entry _before_ the one we found. | |
69 | */ | |
70 | list_add_tail(&new->vm_list, &c->vm_list); | |
71 | new->vm_start = addr; | |
72 | new->vm_end = addr + size; | |
73 | new->vm_active = 1; | |
74 | ||
75 | spin_unlock_irqrestore(&head->vm_lock, flags); | |
76 | return new; | |
77 | ||
78 | nospc: | |
79 | spin_unlock_irqrestore(&head->vm_lock, flags); | |
80 | kfree(new); | |
81 | out: | |
82 | return NULL; | |
83 | } | |
84 | ||
85 | static struct arm_vmregion *__arm_vmregion_find(struct arm_vmregion_head *head, unsigned long addr) | |
86 | { | |
87 | struct arm_vmregion *c; | |
88 | ||
89 | list_for_each_entry(c, &head->vm_list, vm_list) { | |
90 | if (c->vm_active && c->vm_start == addr) | |
91 | goto out; | |
92 | } | |
93 | c = NULL; | |
94 | out: | |
95 | return c; | |
96 | } | |
97 | ||
98 | struct arm_vmregion *arm_vmregion_find(struct arm_vmregion_head *head, unsigned long addr) | |
99 | { | |
100 | struct arm_vmregion *c; | |
101 | unsigned long flags; | |
102 | ||
103 | spin_lock_irqsave(&head->vm_lock, flags); | |
104 | c = __arm_vmregion_find(head, addr); | |
105 | spin_unlock_irqrestore(&head->vm_lock, flags); | |
106 | return c; | |
107 | } | |
108 | ||
109 | struct arm_vmregion *arm_vmregion_find_remove(struct arm_vmregion_head *head, unsigned long addr) | |
110 | { | |
111 | struct arm_vmregion *c; | |
112 | unsigned long flags; | |
113 | ||
114 | spin_lock_irqsave(&head->vm_lock, flags); | |
115 | c = __arm_vmregion_find(head, addr); | |
116 | if (c) | |
117 | c->vm_active = 0; | |
118 | spin_unlock_irqrestore(&head->vm_lock, flags); | |
119 | return c; | |
120 | } | |
121 | ||
122 | void arm_vmregion_free(struct arm_vmregion_head *head, struct arm_vmregion *c) | |
123 | { | |
124 | unsigned long flags; | |
125 | ||
126 | spin_lock_irqsave(&head->vm_lock, flags); | |
127 | list_del(&c->vm_list); | |
128 | spin_unlock_irqrestore(&head->vm_lock, flags); | |
129 | ||
130 | kfree(c); | |
131 | } |