]>
Commit | Line | Data |
---|---|---|
f5df8e26 JH |
1 | /* |
2 | * Copyright (C) 2005,2006,2007,2008,2009,2010 Imagination Technologies | |
3 | * | |
4 | */ | |
5 | ||
6 | #include <linux/mm.h> | |
7 | #include <linux/swap.h> | |
8 | #include <linux/init.h> | |
9 | #include <linux/bootmem.h> | |
10 | #include <linux/pagemap.h> | |
11 | #include <linux/percpu.h> | |
12 | #include <linux/memblock.h> | |
13 | #include <linux/initrd.h> | |
14 | #include <linux/of_fdt.h> | |
15 | ||
16 | #include <asm/setup.h> | |
17 | #include <asm/page.h> | |
18 | #include <asm/pgalloc.h> | |
19 | #include <asm/mmu.h> | |
20 | #include <asm/mmu_context.h> | |
21 | #include <asm/sections.h> | |
22 | #include <asm/tlb.h> | |
23 | #include <asm/user_gateway.h> | |
24 | #include <asm/mmzone.h> | |
25 | #include <asm/fixmap.h> | |
26 | ||
27 | unsigned long pfn_base; | |
28 | ||
29 | pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_data; | |
30 | ||
31 | unsigned long empty_zero_page; | |
32 | ||
33 | extern char __user_gateway_start; | |
34 | extern char __user_gateway_end; | |
35 | ||
36 | void *gateway_page; | |
37 | ||
38 | /* | |
39 | * Insert the gateway page into a set of page tables, creating the | |
40 | * page tables if necessary. | |
41 | */ | |
42 | static void insert_gateway_page(pgd_t *pgd, unsigned long address) | |
43 | { | |
44 | pud_t *pud; | |
45 | pmd_t *pmd; | |
46 | pte_t *pte; | |
47 | ||
48 | BUG_ON(!pgd_present(*pgd)); | |
49 | ||
50 | pud = pud_offset(pgd, address); | |
51 | BUG_ON(!pud_present(*pud)); | |
52 | ||
53 | pmd = pmd_offset(pud, address); | |
54 | if (!pmd_present(*pmd)) { | |
55 | pte = alloc_bootmem_pages(PAGE_SIZE); | |
56 | set_pmd(pmd, __pmd(_PAGE_TABLE | __pa(pte))); | |
57 | } | |
58 | ||
59 | pte = pte_offset_kernel(pmd, address); | |
60 | set_pte(pte, pfn_pte(__pa(gateway_page) >> PAGE_SHIFT, PAGE_READONLY)); | |
61 | } | |
62 | ||
63 | /* Alloc and map a page in a known location accessible to userspace. */ | |
64 | static void __init user_gateway_init(void) | |
65 | { | |
66 | unsigned long address = USER_GATEWAY_PAGE; | |
67 | int offset = pgd_index(address); | |
68 | pgd_t *pgd; | |
69 | ||
70 | gateway_page = alloc_bootmem_pages(PAGE_SIZE); | |
71 | ||
72 | pgd = swapper_pg_dir + offset; | |
73 | insert_gateway_page(pgd, address); | |
74 | ||
75 | #ifdef CONFIG_METAG_META12 | |
76 | /* | |
77 | * Insert the gateway page into our current page tables even | |
78 | * though we've already inserted it into our reference page | |
79 | * table (swapper_pg_dir). This is because with a META1 mmu we | |
80 | * copy just the user address range and not the gateway page | |
81 | * entry on context switch, see switch_mmu(). | |
82 | */ | |
83 | pgd = (pgd_t *)mmu_get_base() + offset; | |
84 | insert_gateway_page(pgd, address); | |
85 | #endif /* CONFIG_METAG_META12 */ | |
86 | ||
87 | BUG_ON((&__user_gateway_end - &__user_gateway_start) > PAGE_SIZE); | |
88 | ||
89 | gateway_page += (address & ~PAGE_MASK); | |
90 | ||
91 | memcpy(gateway_page, &__user_gateway_start, | |
92 | &__user_gateway_end - &__user_gateway_start); | |
93 | ||
94 | /* | |
95 | * We don't need to flush the TLB here, there should be no mapping | |
96 | * present at boot for this address and only valid mappings are in | |
97 | * the TLB (apart from on Meta 1.x, but those cached invalid | |
98 | * mappings should be impossible to hit here). | |
99 | * | |
100 | * We don't flush the code cache here even though we have written | |
101 | * code through the data cache and they may not be coherent. At | |
102 | * this point we assume there is no stale data in the code cache | |
103 | * for this address so there is no need to flush. | |
104 | */ | |
105 | } | |
106 | ||
107 | static void __init allocate_pgdat(unsigned int nid) | |
108 | { | |
109 | unsigned long start_pfn, end_pfn; | |
110 | #ifdef CONFIG_NEED_MULTIPLE_NODES | |
111 | unsigned long phys; | |
112 | #endif | |
113 | ||
114 | get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); | |
115 | ||
116 | #ifdef CONFIG_NEED_MULTIPLE_NODES | |
117 | phys = __memblock_alloc_base(sizeof(struct pglist_data), | |
118 | SMP_CACHE_BYTES, end_pfn << PAGE_SHIFT); | |
119 | /* Retry with all of system memory */ | |
120 | if (!phys) | |
121 | phys = __memblock_alloc_base(sizeof(struct pglist_data), | |
122 | SMP_CACHE_BYTES, | |
123 | memblock_end_of_DRAM()); | |
124 | if (!phys) | |
125 | panic("Can't allocate pgdat for node %d\n", nid); | |
126 | ||
127 | NODE_DATA(nid) = __va(phys); | |
128 | memset(NODE_DATA(nid), 0, sizeof(struct pglist_data)); | |
129 | ||
130 | NODE_DATA(nid)->bdata = &bootmem_node_data[nid]; | |
131 | #endif | |
132 | ||
133 | NODE_DATA(nid)->node_start_pfn = start_pfn; | |
134 | NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn; | |
135 | } | |
136 | ||
137 | static void __init bootmem_init_one_node(unsigned int nid) | |
138 | { | |
139 | unsigned long total_pages, paddr; | |
140 | unsigned long end_pfn; | |
141 | struct pglist_data *p; | |
142 | ||
143 | p = NODE_DATA(nid); | |
144 | ||
145 | /* Nothing to do.. */ | |
146 | if (!p->node_spanned_pages) | |
147 | return; | |
148 | ||
149 | end_pfn = p->node_start_pfn + p->node_spanned_pages; | |
150 | #ifdef CONFIG_HIGHMEM | |
151 | if (end_pfn > max_low_pfn) | |
152 | end_pfn = max_low_pfn; | |
153 | #endif | |
154 | ||
155 | total_pages = bootmem_bootmap_pages(end_pfn - p->node_start_pfn); | |
156 | ||
157 | paddr = memblock_alloc(total_pages << PAGE_SHIFT, PAGE_SIZE); | |
158 | if (!paddr) | |
159 | panic("Can't allocate bootmap for nid[%d]\n", nid); | |
160 | ||
161 | init_bootmem_node(p, paddr >> PAGE_SHIFT, p->node_start_pfn, end_pfn); | |
162 | ||
163 | free_bootmem_with_active_regions(nid, end_pfn); | |
164 | ||
165 | /* | |
166 | * XXX Handle initial reservations for the system memory node | |
167 | * only for the moment, we'll refactor this later for handling | |
168 | * reservations in other nodes. | |
169 | */ | |
170 | if (nid == 0) { | |
171 | struct memblock_region *reg; | |
172 | ||
173 | /* Reserve the sections we're already using. */ | |
174 | for_each_memblock(reserved, reg) { | |
175 | unsigned long size = reg->size; | |
176 | ||
177 | #ifdef CONFIG_HIGHMEM | |
178 | /* ...but not highmem */ | |
179 | if (PFN_DOWN(reg->base) >= highstart_pfn) | |
180 | continue; | |
181 | ||
182 | if (PFN_UP(reg->base + size) > highstart_pfn) | |
183 | size = (highstart_pfn - PFN_DOWN(reg->base)) | |
184 | << PAGE_SHIFT; | |
185 | #endif | |
186 | ||
187 | reserve_bootmem(reg->base, size, BOOTMEM_DEFAULT); | |
188 | } | |
189 | } | |
190 | ||
191 | sparse_memory_present_with_active_regions(nid); | |
192 | } | |
193 | ||
194 | static void __init do_init_bootmem(void) | |
195 | { | |
196 | struct memblock_region *reg; | |
197 | int i; | |
198 | ||
199 | /* Add active regions with valid PFNs. */ | |
200 | for_each_memblock(memory, reg) { | |
201 | unsigned long start_pfn, end_pfn; | |
202 | start_pfn = memblock_region_memory_base_pfn(reg); | |
203 | end_pfn = memblock_region_memory_end_pfn(reg); | |
204 | memblock_set_node(PFN_PHYS(start_pfn), | |
205 | PFN_PHYS(end_pfn - start_pfn), 0); | |
206 | } | |
207 | ||
208 | /* All of system RAM sits in node 0 for the non-NUMA case */ | |
209 | allocate_pgdat(0); | |
210 | node_set_online(0); | |
211 | ||
212 | soc_mem_setup(); | |
213 | ||
214 | for_each_online_node(i) | |
215 | bootmem_init_one_node(i); | |
216 | ||
217 | sparse_init(); | |
218 | } | |
219 | ||
220 | extern char _heap_start[]; | |
221 | ||
222 | static void __init init_and_reserve_mem(void) | |
223 | { | |
224 | unsigned long start_pfn, heap_start; | |
225 | u64 base = min_low_pfn << PAGE_SHIFT; | |
226 | u64 size = (max_low_pfn << PAGE_SHIFT) - base; | |
227 | ||
228 | heap_start = (unsigned long) &_heap_start; | |
229 | ||
230 | memblock_add(base, size); | |
231 | ||
232 | /* | |
233 | * Partially used pages are not usable - thus | |
234 | * we are rounding upwards: | |
235 | */ | |
236 | start_pfn = PFN_UP(__pa(heap_start)); | |
237 | ||
238 | /* | |
239 | * Reserve the kernel text. | |
240 | */ | |
241 | memblock_reserve(base, (PFN_PHYS(start_pfn) + PAGE_SIZE - 1) - base); | |
242 | ||
243 | #ifdef CONFIG_HIGHMEM | |
244 | /* | |
245 | * Add & reserve highmem, so page structures are initialised. | |
246 | */ | |
247 | base = highstart_pfn << PAGE_SHIFT; | |
248 | size = (highend_pfn << PAGE_SHIFT) - base; | |
249 | if (size) { | |
250 | memblock_add(base, size); | |
251 | memblock_reserve(base, size); | |
252 | } | |
253 | #endif | |
254 | } | |
255 | ||
256 | #ifdef CONFIG_HIGHMEM | |
257 | /* | |
258 | * Ensure we have allocated page tables in swapper_pg_dir for the | |
259 | * fixed mappings range from 'start' to 'end'. | |
260 | */ | |
261 | static void __init allocate_pgtables(unsigned long start, unsigned long end) | |
262 | { | |
263 | pgd_t *pgd; | |
264 | pmd_t *pmd; | |
265 | pte_t *pte; | |
266 | int i, j; | |
267 | unsigned long vaddr; | |
268 | ||
269 | vaddr = start; | |
270 | i = pgd_index(vaddr); | |
271 | j = pmd_index(vaddr); | |
272 | pgd = swapper_pg_dir + i; | |
273 | ||
274 | for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) { | |
275 | pmd = (pmd_t *)pgd; | |
276 | for (; (j < PTRS_PER_PMD) && (vaddr != end); pmd++, j++) { | |
277 | vaddr += PMD_SIZE; | |
278 | ||
279 | if (!pmd_none(*pmd)) | |
280 | continue; | |
281 | ||
282 | pte = (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE); | |
283 | pmd_populate_kernel(&init_mm, pmd, pte); | |
284 | } | |
285 | j = 0; | |
286 | } | |
287 | } | |
288 | ||
289 | static void __init fixedrange_init(void) | |
290 | { | |
291 | unsigned long vaddr, end; | |
292 | pgd_t *pgd; | |
293 | pud_t *pud; | |
294 | pmd_t *pmd; | |
295 | pte_t *pte; | |
296 | ||
297 | /* | |
298 | * Fixed mappings: | |
299 | */ | |
300 | vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; | |
301 | end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK; | |
302 | allocate_pgtables(vaddr, end); | |
303 | ||
304 | /* | |
305 | * Permanent kmaps: | |
306 | */ | |
307 | vaddr = PKMAP_BASE; | |
308 | allocate_pgtables(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP); | |
309 | ||
310 | pgd = swapper_pg_dir + pgd_index(vaddr); | |
311 | pud = pud_offset(pgd, vaddr); | |
312 | pmd = pmd_offset(pud, vaddr); | |
313 | pte = pte_offset_kernel(pmd, vaddr); | |
314 | pkmap_page_table = pte; | |
315 | } | |
316 | #endif /* CONFIG_HIGHMEM */ | |
317 | ||
318 | /* | |
319 | * paging_init() continues the virtual memory environment setup which | |
320 | * was begun by the code in arch/metag/kernel/setup.c. | |
321 | */ | |
322 | void __init paging_init(unsigned long mem_end) | |
323 | { | |
324 | unsigned long max_zone_pfns[MAX_NR_ZONES]; | |
325 | int nid; | |
326 | ||
327 | init_and_reserve_mem(); | |
328 | ||
329 | memblock_allow_resize(); | |
330 | ||
331 | memblock_dump_all(); | |
332 | ||
333 | nodes_clear(node_online_map); | |
334 | ||
335 | init_new_context(&init_task, &init_mm); | |
336 | ||
337 | memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir)); | |
338 | ||
339 | do_init_bootmem(); | |
340 | mmu_init(mem_end); | |
341 | ||
342 | #ifdef CONFIG_HIGHMEM | |
343 | fixedrange_init(); | |
344 | kmap_init(); | |
345 | #endif | |
346 | ||
347 | /* Initialize the zero page to a bootmem page, already zeroed. */ | |
348 | empty_zero_page = (unsigned long)alloc_bootmem_pages(PAGE_SIZE); | |
349 | ||
350 | user_gateway_init(); | |
351 | ||
352 | memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); | |
353 | ||
354 | for_each_online_node(nid) { | |
355 | pg_data_t *pgdat = NODE_DATA(nid); | |
356 | unsigned long low, start_pfn; | |
357 | ||
358 | start_pfn = pgdat->bdata->node_min_pfn; | |
359 | low = pgdat->bdata->node_low_pfn; | |
360 | ||
361 | if (max_zone_pfns[ZONE_NORMAL] < low) | |
362 | max_zone_pfns[ZONE_NORMAL] = low; | |
363 | ||
364 | #ifdef CONFIG_HIGHMEM | |
365 | max_zone_pfns[ZONE_HIGHMEM] = highend_pfn; | |
366 | #endif | |
367 | pr_info("Node %u: start_pfn = 0x%lx, low = 0x%lx\n", | |
368 | nid, start_pfn, low); | |
369 | } | |
370 | ||
371 | free_area_init_nodes(max_zone_pfns); | |
372 | } | |
373 | ||
374 | void __init mem_init(void) | |
375 | { | |
376 | int nid; | |
377 | ||
378 | #ifdef CONFIG_HIGHMEM | |
379 | unsigned long tmp; | |
380 | for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) { | |
381 | struct page *page = pfn_to_page(tmp); | |
382 | ClearPageReserved(page); | |
383 | init_page_count(page); | |
384 | __free_page(page); | |
385 | totalhigh_pages++; | |
386 | } | |
387 | totalram_pages += totalhigh_pages; | |
388 | num_physpages += totalhigh_pages; | |
389 | #endif /* CONFIG_HIGHMEM */ | |
390 | ||
391 | for_each_online_node(nid) { | |
392 | pg_data_t *pgdat = NODE_DATA(nid); | |
393 | unsigned long node_pages = 0; | |
394 | ||
395 | num_physpages += pgdat->node_present_pages; | |
396 | ||
397 | if (pgdat->node_spanned_pages) | |
398 | node_pages = free_all_bootmem_node(pgdat); | |
399 | ||
400 | totalram_pages += node_pages; | |
401 | } | |
402 | ||
403 | pr_info("Memory: %luk/%luk available\n", | |
404 | (unsigned long)nr_free_pages() << (PAGE_SHIFT - 10), | |
405 | num_physpages << (PAGE_SHIFT - 10)); | |
406 | ||
407 | show_mem(0); | |
408 | ||
409 | return; | |
410 | } | |
411 | ||
412 | static void free_init_pages(char *what, unsigned long begin, unsigned long end) | |
413 | { | |
414 | unsigned long addr; | |
415 | ||
416 | for (addr = begin; addr < end; addr += PAGE_SIZE) { | |
417 | ClearPageReserved(virt_to_page(addr)); | |
418 | init_page_count(virt_to_page(addr)); | |
419 | memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE); | |
420 | free_page(addr); | |
421 | totalram_pages++; | |
422 | } | |
423 | pr_info("Freeing %s: %luk freed\n", what, (end - begin) >> 10); | |
424 | } | |
425 | ||
426 | void free_initmem(void) | |
427 | { | |
428 | free_init_pages("unused kernel memory", | |
429 | (unsigned long)(&__init_begin), | |
430 | (unsigned long)(&__init_end)); | |
431 | } | |
432 | ||
433 | #ifdef CONFIG_BLK_DEV_INITRD | |
434 | void free_initrd_mem(unsigned long start, unsigned long end) | |
435 | { | |
436 | end = end & PAGE_MASK; | |
437 | free_init_pages("initrd memory", start, end); | |
438 | } | |
439 | #endif | |
440 | ||
441 | #ifdef CONFIG_OF_FLATTREE | |
442 | void __init early_init_dt_setup_initrd_arch(unsigned long start, | |
443 | unsigned long end) | |
444 | { | |
445 | pr_err("%s(%lx, %lx)\n", | |
446 | __func__, start, end); | |
447 | } | |
448 | #endif /* CONFIG_OF_FLATTREE */ |