]>
Commit | Line | Data |
---|---|---|
01066625 PM |
1 | /* |
2 | * linux/arch/sh/mm/init.c | |
1da177e4 LT |
3 | * |
4 | * Copyright (C) 1999 Niibe Yutaka | |
01066625 | 5 | * Copyright (C) 2002 - 2007 Paul Mundt |
1da177e4 LT |
6 | * |
7 | * Based on linux/arch/i386/mm/init.c: | |
8 | * Copyright (C) 1995 Linus Torvalds | |
9 | */ | |
1da177e4 LT |
10 | #include <linux/mm.h> |
11 | #include <linux/swap.h> | |
1da177e4 | 12 | #include <linux/init.h> |
1da177e4 | 13 | #include <linux/bootmem.h> |
2cb7ce3b | 14 | #include <linux/proc_fs.h> |
27641dee | 15 | #include <linux/pagemap.h> |
01066625 PM |
16 | #include <linux/percpu.h> |
17 | #include <linux/io.h> | |
1da177e4 | 18 | #include <asm/mmu_context.h> |
1da177e4 LT |
19 | #include <asm/tlb.h> |
20 | #include <asm/cacheflush.h> | |
21 | #include <asm/cache.h> | |
22 | ||
23 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); | |
24 | pgd_t swapper_pg_dir[PTRS_PER_PGD]; | |
25 | ||
1da177e4 LT |
26 | void (*copy_page)(void *from, void *to); |
27 | void (*clear_page)(void *to); | |
28 | ||
29 | void show_mem(void) | |
30 | { | |
01066625 PM |
31 | int total = 0, reserved = 0, free = 0; |
32 | int shared = 0, cached = 0, slab = 0; | |
33 | pg_data_t *pgdat; | |
1da177e4 LT |
34 | |
35 | printk("Mem-info:\n"); | |
36 | show_free_areas(); | |
01066625 PM |
37 | |
38 | for_each_online_pgdat(pgdat) { | |
dfbb9042 | 39 | unsigned long flags, i; |
01066625 PM |
40 | |
41 | pgdat_resize_lock(pgdat, &flags); | |
dfbb9042 PM |
42 | for (i = 0; i < pgdat->node_spanned_pages; i++) { |
43 | struct page *page = pgdat_page_nr(pgdat, i); | |
01066625 PM |
44 | total++; |
45 | if (PageReserved(page)) | |
46 | reserved++; | |
47 | else if (PageSwapCache(page)) | |
48 | cached++; | |
49 | else if (PageSlab(page)) | |
50 | slab++; | |
51 | else if (!page_count(page)) | |
52 | free++; | |
53 | else | |
54 | shared += page_count(page) - 1; | |
dfbb9042 | 55 | } |
01066625 | 56 | pgdat_resize_unlock(pgdat, &flags); |
1da177e4 | 57 | } |
01066625 PM |
58 | |
59 | printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); | |
60 | printk("%d pages of RAM\n", total); | |
61 | printk("%d free pages\n", free); | |
62 | printk("%d reserved pages\n", reserved); | |
63 | printk("%d slab pages\n", slab); | |
64 | printk("%d pages shared\n", shared); | |
65 | printk("%d pages swap cached\n", cached); | |
5f8c9908 PM |
66 | printk(KERN_INFO "Total of %ld pages in page table cache\n", |
67 | quicklist_total_size()); | |
1da177e4 LT |
68 | } |
69 | ||
11cbb70e | 70 | #ifdef CONFIG_MMU |
1da177e4 LT |
71 | static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot) |
72 | { | |
73 | pgd_t *pgd; | |
26ff6c11 | 74 | pud_t *pud; |
1da177e4 LT |
75 | pmd_t *pmd; |
76 | pte_t *pte; | |
77 | ||
99a596f9 | 78 | pgd = pgd_offset_k(addr); |
1da177e4 LT |
79 | if (pgd_none(*pgd)) { |
80 | pgd_ERROR(*pgd); | |
81 | return; | |
82 | } | |
83 | ||
99a596f9 SM |
84 | pud = pud_alloc(NULL, pgd, addr); |
85 | if (unlikely(!pud)) { | |
86 | pud_ERROR(*pud); | |
87 | return; | |
26ff6c11 PM |
88 | } |
89 | ||
99a596f9 SM |
90 | pmd = pmd_alloc(NULL, pud, addr); |
91 | if (unlikely(!pmd)) { | |
92 | pmd_ERROR(*pmd); | |
93 | return; | |
1da177e4 LT |
94 | } |
95 | ||
96 | pte = pte_offset_kernel(pmd, addr); | |
97 | if (!pte_none(*pte)) { | |
98 | pte_ERROR(*pte); | |
99 | return; | |
100 | } | |
101 | ||
102 | set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot)); | |
103 | ||
ea9af694 | 104 | flush_tlb_one(get_asid(), addr); |
1da177e4 LT |
105 | } |
106 | ||
107 | /* | |
108 | * As a performance optimization, other platforms preserve the fixmap mapping | |
109 | * across a context switch, we don't presently do this, but this could be done | |
110 | * in a similar fashion as to the wired TLB interface that sh64 uses (by way | |
e868d612 | 111 | * of the memory mapped UTLB configuration) -- this unfortunately forces us to |
1da177e4 LT |
112 | * give up a TLB entry for each mapping we want to preserve. While this may be |
113 | * viable for a small number of fixmaps, it's not particularly useful for | |
114 | * everything and needs to be carefully evaluated. (ie, we may want this for | |
115 | * the vsyscall page). | |
116 | * | |
117 | * XXX: Perhaps add a _PAGE_WIRED flag or something similar that we can pass | |
118 | * in at __set_fixmap() time to determine the appropriate behavior to follow. | |
119 | * | |
120 | * -- PFM. | |
121 | */ | |
122 | void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot) | |
123 | { | |
124 | unsigned long address = __fix_to_virt(idx); | |
125 | ||
126 | if (idx >= __end_of_fixed_addresses) { | |
127 | BUG(); | |
128 | return; | |
129 | } | |
130 | ||
131 | set_pte_phys(address, phys, prot); | |
132 | } | |
11cbb70e | 133 | #endif /* CONFIG_MMU */ |
1da177e4 LT |
134 | |
135 | /* References to section boundaries */ | |
136 | ||
137 | extern char _text, _etext, _edata, __bss_start, _end; | |
138 | extern char __init_begin, __init_end; | |
139 | ||
140 | /* | |
141 | * paging_init() sets up the page tables | |
1da177e4 LT |
142 | */ |
143 | void __init paging_init(void) | |
144 | { | |
2de212eb | 145 | unsigned long max_zone_pfns[MAX_NR_ZONES]; |
01066625 | 146 | int nid; |
1da177e4 | 147 | |
01066625 PM |
148 | /* We don't need to map the kernel through the TLB, as |
149 | * it is permanatly mapped using P1. So clear the | |
150 | * entire pgd. */ | |
151 | memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir)); | |
1da177e4 | 152 | |
6e4662ff SM |
153 | /* Set an initial value for the MMU.TTB so we don't have to |
154 | * check for a null value. */ | |
155 | set_TTB(swapper_pg_dir); | |
156 | ||
2de212eb PM |
157 | memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); |
158 | ||
01066625 PM |
159 | for_each_online_node(nid) { |
160 | pg_data_t *pgdat = NODE_DATA(nid); | |
01066625 PM |
161 | unsigned long low, start_pfn; |
162 | ||
01066625 PM |
163 | start_pfn = pgdat->bdata->node_boot_start >> PAGE_SHIFT; |
164 | low = pgdat->bdata->node_low_pfn; | |
165 | ||
2de212eb PM |
166 | if (max_zone_pfns[ZONE_NORMAL] < low) |
167 | max_zone_pfns[ZONE_NORMAL] = low; | |
01066625 PM |
168 | |
169 | printk("Node %u: start_pfn = 0x%lx, low = 0x%lx\n", | |
170 | nid, start_pfn, low); | |
01066625 | 171 | } |
2de212eb PM |
172 | |
173 | free_area_init_nodes(max_zone_pfns); | |
1da177e4 LT |
174 | } |
175 | ||
2cb7ce3b PM |
176 | static struct kcore_list kcore_mem, kcore_vmalloc; |
177 | ||
1da177e4 LT |
178 | void __init mem_init(void) |
179 | { | |
dfbb9042 | 180 | int codesize, datasize, initsize; |
01066625 | 181 | int nid; |
1da177e4 | 182 | |
2de212eb PM |
183 | num_physpages = 0; |
184 | high_memory = NULL; | |
185 | ||
01066625 PM |
186 | for_each_online_node(nid) { |
187 | pg_data_t *pgdat = NODE_DATA(nid); | |
188 | unsigned long node_pages = 0; | |
189 | void *node_high_memory; | |
01066625 PM |
190 | |
191 | num_physpages += pgdat->node_present_pages; | |
192 | ||
193 | if (pgdat->node_spanned_pages) | |
194 | node_pages = free_all_bootmem_node(pgdat); | |
195 | ||
196 | totalram_pages += node_pages; | |
1da177e4 | 197 | |
2de212eb PM |
198 | node_high_memory = (void *)__va((pgdat->node_start_pfn + |
199 | pgdat->node_spanned_pages) << | |
200 | PAGE_SHIFT); | |
01066625 PM |
201 | if (node_high_memory > high_memory) |
202 | high_memory = node_high_memory; | |
203 | } | |
1da177e4 LT |
204 | |
205 | /* clear the zero-page */ | |
206 | memset(empty_zero_page, 0, PAGE_SIZE); | |
207 | __flush_wback_region(empty_zero_page, PAGE_SIZE); | |
208 | ||
65463b73 | 209 | /* |
1da177e4 LT |
210 | * Setup wrappers for copy/clear_page(), these will get overridden |
211 | * later in the boot process if a better method is available. | |
212 | */ | |
e96636cc | 213 | #ifdef CONFIG_MMU |
1da177e4 LT |
214 | copy_page = copy_page_slow; |
215 | clear_page = clear_page_slow; | |
e96636cc YS |
216 | #else |
217 | copy_page = copy_page_nommu; | |
218 | clear_page = clear_page_nommu; | |
219 | #endif | |
1da177e4 | 220 | |
1da177e4 LT |
221 | codesize = (unsigned long) &_etext - (unsigned long) &_text; |
222 | datasize = (unsigned long) &_edata - (unsigned long) &_etext; | |
223 | initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; | |
224 | ||
2cb7ce3b PM |
225 | kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT); |
226 | kclist_add(&kcore_vmalloc, (void *)VMALLOC_START, | |
227 | VMALLOC_END - VMALLOC_START); | |
228 | ||
229 | printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, " | |
dfbb9042 | 230 | "%dk data, %dk init)\n", |
1da177e4 | 231 | (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), |
2de212eb | 232 | num_physpages << (PAGE_SHIFT-10), |
1da177e4 | 233 | codesize >> 10, |
1da177e4 LT |
234 | datasize >> 10, |
235 | initsize >> 10); | |
236 | ||
237 | p3_cache_init(); | |
19f9a34f PM |
238 | |
239 | /* Initialize the vDSO */ | |
240 | vsyscall_init(); | |
1da177e4 LT |
241 | } |
242 | ||
243 | void free_initmem(void) | |
244 | { | |
245 | unsigned long addr; | |
65463b73 | 246 | |
1da177e4 LT |
247 | addr = (unsigned long)(&__init_begin); |
248 | for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) { | |
249 | ClearPageReserved(virt_to_page(addr)); | |
7835e98b | 250 | init_page_count(virt_to_page(addr)); |
1da177e4 LT |
251 | free_page(addr); |
252 | totalram_pages++; | |
253 | } | |
2de212eb PM |
254 | printk("Freeing unused kernel memory: %dk freed\n", |
255 | (&__init_end - &__init_begin) >> 10); | |
1da177e4 LT |
256 | } |
257 | ||
258 | #ifdef CONFIG_BLK_DEV_INITRD | |
259 | void free_initrd_mem(unsigned long start, unsigned long end) | |
260 | { | |
261 | unsigned long p; | |
262 | for (p = start; p < end; p += PAGE_SIZE) { | |
263 | ClearPageReserved(virt_to_page(p)); | |
7835e98b | 264 | init_page_count(virt_to_page(p)); |
1da177e4 LT |
265 | free_page(p); |
266 | totalram_pages++; | |
267 | } | |
2de212eb | 268 | printk("Freeing initrd memory: %ldk freed\n", (end - start) >> 10); |
1da177e4 LT |
269 | } |
270 | #endif |