]>
Commit | Line | Data |
---|---|---|
7c8c6b97 PM |
1 | /* |
2 | * Procedures for maintaining information about logical memory blocks. | |
3 | * | |
4 | * Peter Bergner, IBM Corp. June 2001. | |
5 | * Copyright (C) 2001 Peter Bergner. | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or | |
8 | * modify it under the terms of the GNU General Public License | |
9 | * as published by the Free Software Foundation; either version | |
10 | * 2 of the License, or (at your option) any later version. | |
11 | */ | |
12 | ||
7c8c6b97 PM |
13 | #include <linux/kernel.h> |
14 | #include <linux/init.h> | |
15 | #include <linux/bitops.h> | |
16 | #include <asm/types.h> | |
17 | #include <asm/page.h> | |
18 | #include <asm/prom.h> | |
19 | #include <asm/lmb.h> | |
20 | #ifdef CONFIG_PPC32 | |
21 | #include "mmu_decl.h" /* for __max_low_memory */ | |
22 | #endif | |
23 | ||
7c8c6b97 PM |
24 | #undef DEBUG |
25 | ||
eb481899 ME |
26 | #ifdef DEBUG |
27 | #include <asm/udbg.h> | |
28 | #define DBG(fmt...) udbg_printf(fmt) | |
29 | #else | |
30 | #define DBG(fmt...) | |
31 | #endif | |
32 | ||
3b9331da ME |
33 | #define LMB_ALLOC_ANYWHERE 0 |
34 | ||
eb481899 ME |
35 | struct lmb lmb; |
36 | ||
7c8c6b97 PM |
37 | void lmb_dump_all(void) |
38 | { | |
39 | #ifdef DEBUG | |
40 | unsigned long i; | |
41 | ||
eb481899 ME |
42 | DBG("lmb_dump_all:\n"); |
43 | DBG(" memory.cnt = 0x%lx\n", lmb.memory.cnt); | |
44 | DBG(" memory.size = 0x%lx\n", lmb.memory.size); | |
7c8c6b97 | 45 | for (i=0; i < lmb.memory.cnt ;i++) { |
eb481899 | 46 | DBG(" memory.region[0x%x].base = 0x%lx\n", |
7c8c6b97 | 47 | i, lmb.memory.region[i].base); |
eb481899 | 48 | DBG(" .size = 0x%lx\n", |
7c8c6b97 PM |
49 | lmb.memory.region[i].size); |
50 | } | |
51 | ||
eb481899 ME |
52 | DBG("\n reserved.cnt = 0x%lx\n", lmb.reserved.cnt); |
53 | DBG(" reserved.size = 0x%lx\n", lmb.reserved.size); | |
7c8c6b97 | 54 | for (i=0; i < lmb.reserved.cnt ;i++) { |
eb481899 | 55 | DBG(" reserved.region[0x%x].base = 0x%lx\n", |
7c8c6b97 | 56 | i, lmb.reserved.region[i].base); |
eb481899 | 57 | DBG(" .size = 0x%lx\n", |
7c8c6b97 PM |
58 | lmb.reserved.region[i].size); |
59 | } | |
60 | #endif /* DEBUG */ | |
61 | } | |
62 | ||
63 | static unsigned long __init lmb_addrs_overlap(unsigned long base1, | |
64 | unsigned long size1, unsigned long base2, unsigned long size2) | |
65 | { | |
66 | return ((base1 < (base2+size2)) && (base2 < (base1+size1))); | |
67 | } | |
68 | ||
69 | static long __init lmb_addrs_adjacent(unsigned long base1, unsigned long size1, | |
70 | unsigned long base2, unsigned long size2) | |
71 | { | |
72 | if (base2 == base1 + size1) | |
73 | return 1; | |
74 | else if (base1 == base2 + size2) | |
75 | return -1; | |
76 | ||
77 | return 0; | |
78 | } | |
79 | ||
80 | static long __init lmb_regions_adjacent(struct lmb_region *rgn, | |
81 | unsigned long r1, unsigned long r2) | |
82 | { | |
83 | unsigned long base1 = rgn->region[r1].base; | |
84 | unsigned long size1 = rgn->region[r1].size; | |
85 | unsigned long base2 = rgn->region[r2].base; | |
86 | unsigned long size2 = rgn->region[r2].size; | |
87 | ||
88 | return lmb_addrs_adjacent(base1, size1, base2, size2); | |
89 | } | |
90 | ||
2babf5c2 | 91 | static void __init lmb_remove_region(struct lmb_region *rgn, unsigned long r) |
7c8c6b97 PM |
92 | { |
93 | unsigned long i; | |
94 | ||
2babf5c2 ME |
95 | for (i = r; i < rgn->cnt - 1; i++) { |
96 | rgn->region[i].base = rgn->region[i + 1].base; | |
97 | rgn->region[i].size = rgn->region[i + 1].size; | |
7c8c6b97 PM |
98 | } |
99 | rgn->cnt--; | |
100 | } | |
101 | ||
2babf5c2 ME |
102 | /* Assumption: base addr of region 1 < base addr of region 2 */ |
103 | static void __init lmb_coalesce_regions(struct lmb_region *rgn, | |
104 | unsigned long r1, unsigned long r2) | |
105 | { | |
106 | rgn->region[r1].size += rgn->region[r2].size; | |
107 | lmb_remove_region(rgn, r2); | |
108 | } | |
109 | ||
7c8c6b97 PM |
110 | /* This routine called with relocation disabled. */ |
111 | void __init lmb_init(void) | |
112 | { | |
113 | /* Create a dummy zero size LMB which will get coalesced away later. | |
114 | * This simplifies the lmb_add() code below... | |
115 | */ | |
116 | lmb.memory.region[0].base = 0; | |
117 | lmb.memory.region[0].size = 0; | |
118 | lmb.memory.cnt = 1; | |
119 | ||
120 | /* Ditto. */ | |
121 | lmb.reserved.region[0].base = 0; | |
122 | lmb.reserved.region[0].size = 0; | |
123 | lmb.reserved.cnt = 1; | |
124 | } | |
125 | ||
126 | /* This routine may be called with relocation disabled. */ | |
127 | void __init lmb_analyze(void) | |
128 | { | |
129 | int i; | |
130 | ||
131 | lmb.memory.size = 0; | |
132 | ||
133 | for (i = 0; i < lmb.memory.cnt; i++) | |
134 | lmb.memory.size += lmb.memory.region[i].size; | |
135 | } | |
136 | ||
137 | /* This routine called with relocation disabled. */ | |
138 | static long __init lmb_add_region(struct lmb_region *rgn, unsigned long base, | |
139 | unsigned long size) | |
140 | { | |
141 | unsigned long i, coalesced = 0; | |
142 | long adjacent; | |
143 | ||
144 | /* First try and coalesce this LMB with another. */ | |
145 | for (i=0; i < rgn->cnt; i++) { | |
146 | unsigned long rgnbase = rgn->region[i].base; | |
147 | unsigned long rgnsize = rgn->region[i].size; | |
148 | ||
149 | adjacent = lmb_addrs_adjacent(base,size,rgnbase,rgnsize); | |
150 | if ( adjacent > 0 ) { | |
151 | rgn->region[i].base -= size; | |
152 | rgn->region[i].size += size; | |
153 | coalesced++; | |
154 | break; | |
155 | } | |
156 | else if ( adjacent < 0 ) { | |
157 | rgn->region[i].size += size; | |
158 | coalesced++; | |
159 | break; | |
160 | } | |
161 | } | |
162 | ||
163 | if ((i < rgn->cnt-1) && lmb_regions_adjacent(rgn, i, i+1) ) { | |
164 | lmb_coalesce_regions(rgn, i, i+1); | |
165 | coalesced++; | |
166 | } | |
167 | ||
168 | if (coalesced) | |
169 | return coalesced; | |
170 | if (rgn->cnt >= MAX_LMB_REGIONS) | |
171 | return -1; | |
172 | ||
173 | /* Couldn't coalesce the LMB, so add it to the sorted table. */ | |
174 | for (i = rgn->cnt-1; i >= 0; i--) { | |
175 | if (base < rgn->region[i].base) { | |
176 | rgn->region[i+1].base = rgn->region[i].base; | |
177 | rgn->region[i+1].size = rgn->region[i].size; | |
178 | } else { | |
179 | rgn->region[i+1].base = base; | |
180 | rgn->region[i+1].size = size; | |
181 | break; | |
182 | } | |
183 | } | |
184 | rgn->cnt++; | |
185 | ||
186 | return 0; | |
187 | } | |
188 | ||
189 | /* This routine may be called with relocation disabled. */ | |
190 | long __init lmb_add(unsigned long base, unsigned long size) | |
191 | { | |
192 | struct lmb_region *_rgn = &(lmb.memory); | |
193 | ||
194 | /* On pSeries LPAR systems, the first LMB is our RMO region. */ | |
195 | if (base == 0) | |
196 | lmb.rmo_size = size; | |
197 | ||
198 | return lmb_add_region(_rgn, base, size); | |
199 | ||
200 | } | |
201 | ||
202 | long __init lmb_reserve(unsigned long base, unsigned long size) | |
203 | { | |
204 | struct lmb_region *_rgn = &(lmb.reserved); | |
205 | ||
8c20fafa ME |
206 | BUG_ON(0 == size); |
207 | ||
7c8c6b97 PM |
208 | return lmb_add_region(_rgn, base, size); |
209 | } | |
210 | ||
211 | long __init lmb_overlaps_region(struct lmb_region *rgn, unsigned long base, | |
212 | unsigned long size) | |
213 | { | |
214 | unsigned long i; | |
215 | ||
216 | for (i=0; i < rgn->cnt; i++) { | |
217 | unsigned long rgnbase = rgn->region[i].base; | |
218 | unsigned long rgnsize = rgn->region[i].size; | |
219 | if ( lmb_addrs_overlap(base,size,rgnbase,rgnsize) ) { | |
220 | break; | |
221 | } | |
222 | } | |
223 | ||
224 | return (i < rgn->cnt) ? i : -1; | |
225 | } | |
226 | ||
227 | unsigned long __init lmb_alloc(unsigned long size, unsigned long align) | |
228 | { | |
229 | return lmb_alloc_base(size, align, LMB_ALLOC_ANYWHERE); | |
230 | } | |
231 | ||
232 | unsigned long __init lmb_alloc_base(unsigned long size, unsigned long align, | |
233 | unsigned long max_addr) | |
d7a5b2ff ME |
234 | { |
235 | unsigned long alloc; | |
236 | ||
237 | alloc = __lmb_alloc_base(size, align, max_addr); | |
238 | ||
2c276603 | 239 | if (alloc == 0) |
d7a5b2ff ME |
240 | panic("ERROR: Failed to allocate 0x%lx bytes below 0x%lx.\n", |
241 | size, max_addr); | |
242 | ||
243 | return alloc; | |
244 | } | |
245 | ||
246 | unsigned long __init __lmb_alloc_base(unsigned long size, unsigned long align, | |
247 | unsigned long max_addr) | |
7c8c6b97 PM |
248 | { |
249 | long i, j; | |
250 | unsigned long base = 0; | |
251 | ||
8c20fafa ME |
252 | BUG_ON(0 == size); |
253 | ||
7c8c6b97 PM |
254 | #ifdef CONFIG_PPC32 |
255 | /* On 32-bit, make sure we allocate lowmem */ | |
256 | if (max_addr == LMB_ALLOC_ANYWHERE) | |
257 | max_addr = __max_low_memory; | |
258 | #endif | |
259 | for (i = lmb.memory.cnt-1; i >= 0; i--) { | |
260 | unsigned long lmbbase = lmb.memory.region[i].base; | |
261 | unsigned long lmbsize = lmb.memory.region[i].size; | |
262 | ||
263 | if (max_addr == LMB_ALLOC_ANYWHERE) | |
264 | base = _ALIGN_DOWN(lmbbase + lmbsize - size, align); | |
265 | else if (lmbbase < max_addr) { | |
266 | base = min(lmbbase + lmbsize, max_addr); | |
267 | base = _ALIGN_DOWN(base - size, align); | |
268 | } else | |
269 | continue; | |
270 | ||
271 | while ((lmbbase <= base) && | |
272 | ((j = lmb_overlaps_region(&lmb.reserved, base, size)) >= 0) ) | |
273 | base = _ALIGN_DOWN(lmb.reserved.region[j].base - size, | |
274 | align); | |
275 | ||
276 | if ((base != 0) && (lmbbase <= base)) | |
277 | break; | |
278 | } | |
279 | ||
280 | if (i < 0) | |
281 | return 0; | |
282 | ||
283 | lmb_add_region(&lmb.reserved, base, size); | |
284 | ||
285 | return base; | |
286 | } | |
287 | ||
288 | /* You must call lmb_analyze() before this. */ | |
289 | unsigned long __init lmb_phys_mem_size(void) | |
290 | { | |
291 | return lmb.memory.size; | |
292 | } | |
293 | ||
294 | unsigned long __init lmb_end_of_DRAM(void) | |
295 | { | |
296 | int idx = lmb.memory.cnt - 1; | |
297 | ||
298 | return (lmb.memory.region[idx].base + lmb.memory.region[idx].size); | |
299 | } | |
300 | ||
2babf5c2 | 301 | /* You must call lmb_analyze() after this. */ |
7c8c6b97 PM |
302 | void __init lmb_enforce_memory_limit(unsigned long memory_limit) |
303 | { | |
304 | unsigned long i, limit; | |
2babf5c2 | 305 | struct lmb_property *p; |
7c8c6b97 PM |
306 | |
307 | if (! memory_limit) | |
308 | return; | |
309 | ||
2babf5c2 | 310 | /* Truncate the lmb regions to satisfy the memory limit. */ |
7c8c6b97 PM |
311 | limit = memory_limit; |
312 | for (i = 0; i < lmb.memory.cnt; i++) { | |
313 | if (limit > lmb.memory.region[i].size) { | |
314 | limit -= lmb.memory.region[i].size; | |
315 | continue; | |
316 | } | |
317 | ||
318 | lmb.memory.region[i].size = limit; | |
319 | lmb.memory.cnt = i + 1; | |
320 | break; | |
321 | } | |
2babf5c2 ME |
322 | |
323 | lmb.rmo_size = lmb.memory.region[0].size; | |
324 | ||
325 | /* And truncate any reserves above the limit also. */ | |
326 | for (i = 0; i < lmb.reserved.cnt; i++) { | |
327 | p = &lmb.reserved.region[i]; | |
328 | ||
329 | if (p->base > memory_limit) | |
330 | p->size = 0; | |
331 | else if ((p->base + p->size) > memory_limit) | |
332 | p->size = memory_limit - p->base; | |
333 | ||
334 | if (p->size == 0) { | |
335 | lmb_remove_region(&lmb.reserved, i); | |
336 | i--; | |
337 | } | |
338 | } | |
7c8c6b97 | 339 | } |