]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - arch/mips/mm/init.c
kcore: use usual list for kclist
[mirror_ubuntu-jammy-kernel.git] / arch / mips / mm / init.c
CommitLineData
1da177e4
LT
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994 - 2000 Ralf Baechle
7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
9 * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
10 */
b868868a 11#include <linux/bug.h>
1da177e4
LT
12#include <linux/init.h>
13#include <linux/module.h>
14#include <linux/signal.h>
15#include <linux/sched.h>
631330f5 16#include <linux/smp.h>
1da177e4
LT
17#include <linux/kernel.h>
18#include <linux/errno.h>
19#include <linux/string.h>
20#include <linux/types.h>
21#include <linux/pagemap.h>
22#include <linux/ptrace.h>
23#include <linux/mman.h>
24#include <linux/mm.h>
25#include <linux/bootmem.h>
26#include <linux/highmem.h>
27#include <linux/swap.h>
3d503753 28#include <linux/proc_fs.h>
22a9835c 29#include <linux/pfn.h>
1da177e4 30
9975e77d 31#include <asm/asm-offsets.h>
1da177e4
LT
32#include <asm/bootinfo.h>
33#include <asm/cachectl.h>
34#include <asm/cpu.h>
35#include <asm/dma.h>
f8829cae 36#include <asm/kmap_types.h>
1da177e4
LT
37#include <asm/mmu_context.h>
38#include <asm/sections.h>
39#include <asm/pgtable.h>
40#include <asm/pgalloc.h>
41#include <asm/tlb.h>
f8829cae
RB
42#include <asm/fixmap.h>
43
44/* Atomicity and interruptability */
45#ifdef CONFIG_MIPS_MT_SMTC
46
47#include <asm/mipsmtregs.h>
48
49#define ENTER_CRITICAL(flags) \
50 { \
51 unsigned int mvpflags; \
52 local_irq_save(flags);\
53 mvpflags = dvpe()
54#define EXIT_CRITICAL(flags) \
55 evpe(mvpflags); \
56 local_irq_restore(flags); \
57 }
58#else
59
60#define ENTER_CRITICAL(flags) local_irq_save(flags)
61#define EXIT_CRITICAL(flags) local_irq_restore(flags)
62
63#endif /* CONFIG_MIPS_MT_SMTC */
1da177e4
LT
64
65DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
66
1da177e4
LT
67/*
68 * We have up to 8 empty zeroed pages so we can map one of the right colour
69 * when needed. This is necessary only on R4000 / R4400 SC and MC versions
70 * where we have to avoid VCED / VECI exceptions for good performance at
71 * any price. Since page is never written to after the initialization we
72 * don't have to care about aliases on other CPUs.
73 */
74unsigned long empty_zero_page, zero_page_mask;
497d2adc 75EXPORT_SYMBOL_GPL(empty_zero_page);
1da177e4
LT
76
77/*
78 * Not static inline because used by IP27 special magic initialization code
79 */
80unsigned long setup_zero_pages(void)
81{
8dfcc9ba
NP
82 unsigned int order;
83 unsigned long size;
1da177e4
LT
84 struct page *page;
85
86 if (cpu_has_vce)
87 order = 3;
88 else
89 order = 0;
90
91 empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
92 if (!empty_zero_page)
93 panic("Oh boy, that early out of memory?");
94
99e3b942 95 page = virt_to_page((void *)empty_zero_page);
8dfcc9ba 96 split_page(page, order);
99e3b942 97 while (page < virt_to_page((void *)(empty_zero_page + (PAGE_SIZE << order)))) {
68352e6e 98 SetPageReserved(page);
1da177e4
LT
99 page++;
100 }
101
102 size = PAGE_SIZE << order;
103 zero_page_mask = (size - 1) & PAGE_MASK;
104
105 return 1UL << order;
106}
107
f8829cae
RB
108#ifdef CONFIG_MIPS_MT_SMTC
109static pte_t *kmap_coherent_pte;
110static void __init kmap_coherent_init(void)
111{
112 unsigned long vaddr;
113
114 /* cache the first coherent kmap pte */
115 vaddr = __fix_to_virt(FIX_CMAP_BEGIN);
116 kmap_coherent_pte = kmap_get_fixmap_pte(vaddr);
117}
118#else
119static inline void kmap_coherent_init(void) {}
120#endif
121
7575a49f 122void *kmap_coherent(struct page *page, unsigned long addr)
f8829cae
RB
123{
124 enum fixed_addresses idx;
125 unsigned long vaddr, flags, entrylo;
126 unsigned long old_ctx;
127 pte_t pte;
128 int tlbidx;
129
b868868a
RB
130 BUG_ON(Page_dcache_dirty(page));
131
f8829cae
RB
132 inc_preempt_count();
133 idx = (addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1);
134#ifdef CONFIG_MIPS_MT_SMTC
135 idx += FIX_N_COLOURS * smp_processor_id();
136#endif
137 vaddr = __fix_to_virt(FIX_CMAP_END - idx);
138 pte = mk_pte(page, PAGE_KERNEL);
962f480e 139#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
f8829cae
RB
140 entrylo = pte.pte_high;
141#else
142 entrylo = pte_val(pte) >> 6;
143#endif
144
145 ENTER_CRITICAL(flags);
146 old_ctx = read_c0_entryhi();
147 write_c0_entryhi(vaddr & (PAGE_MASK << 1));
148 write_c0_entrylo0(entrylo);
149 write_c0_entrylo1(entrylo);
150#ifdef CONFIG_MIPS_MT_SMTC
151 set_pte(kmap_coherent_pte - (FIX_CMAP_END - idx), pte);
152 /* preload TLB instead of local_flush_tlb_one() */
153 mtc0_tlbw_hazard();
154 tlb_probe();
155 tlb_probe_hazard();
156 tlbidx = read_c0_index();
157 mtc0_tlbw_hazard();
158 if (tlbidx < 0)
159 tlb_write_random();
160 else
161 tlb_write_indexed();
162#else
163 tlbidx = read_c0_wired();
164 write_c0_wired(tlbidx + 1);
165 write_c0_index(tlbidx);
166 mtc0_tlbw_hazard();
167 tlb_write_indexed();
168#endif
169 tlbw_use_hazard();
170 write_c0_entryhi(old_ctx);
171 EXIT_CRITICAL(flags);
172
173 return (void*) vaddr;
174}
175
176#define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1)))
177
eacb9d61 178void kunmap_coherent(void)
f8829cae
RB
179{
180#ifndef CONFIG_MIPS_MT_SMTC
181 unsigned int wired;
182 unsigned long flags, old_ctx;
183
184 ENTER_CRITICAL(flags);
185 old_ctx = read_c0_entryhi();
186 wired = read_c0_wired() - 1;
187 write_c0_wired(wired);
188 write_c0_index(wired);
189 write_c0_entryhi(UNIQUE_ENTRYHI(wired));
190 write_c0_entrylo0(0);
191 write_c0_entrylo1(0);
192 mtc0_tlbw_hazard();
193 tlb_write_indexed();
194 tlbw_use_hazard();
195 write_c0_entryhi(old_ctx);
196 EXIT_CRITICAL(flags);
197#endif
198 dec_preempt_count();
199 preempt_check_resched();
200}
201
bcd02280
AN
202void copy_user_highpage(struct page *to, struct page *from,
203 unsigned long vaddr, struct vm_area_struct *vma)
204{
205 void *vfrom, *vto;
206
207 vto = kmap_atomic(to, KM_USER1);
9a74b3eb
RB
208 if (cpu_has_dc_aliases &&
209 page_mapped(from) && !Page_dcache_dirty(from)) {
bcd02280
AN
210 vfrom = kmap_coherent(from, vaddr);
211 copy_page(vto, vfrom);
eacb9d61 212 kunmap_coherent();
bcd02280
AN
213 } else {
214 vfrom = kmap_atomic(from, KM_USER0);
215 copy_page(vto, vfrom);
216 kunmap_atomic(vfrom, KM_USER0);
217 }
39b8d525 218 if ((!cpu_has_ic_fills_f_dc) ||
bcd02280
AN
219 pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
220 flush_data_cache_page((unsigned long)vto);
221 kunmap_atomic(vto, KM_USER1);
222 /* Make sure this page is cleared on other CPU's too before using it */
223 smp_wmb();
224}
225
f8829cae
RB
226void copy_to_user_page(struct vm_area_struct *vma,
227 struct page *page, unsigned long vaddr, void *dst, const void *src,
228 unsigned long len)
229{
9a74b3eb
RB
230 if (cpu_has_dc_aliases &&
231 page_mapped(page) && !Page_dcache_dirty(page)) {
f8829cae
RB
232 void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
233 memcpy(vto, src, len);
eacb9d61 234 kunmap_coherent();
985c30ef 235 } else {
f8829cae 236 memcpy(dst, src, len);
985c30ef
RB
237 if (cpu_has_dc_aliases)
238 SetPageDcacheDirty(page);
239 }
f8829cae
RB
240 if ((vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc)
241 flush_cache_page(vma, vaddr, page_to_pfn(page));
242}
243
f8829cae
RB
244void copy_from_user_page(struct vm_area_struct *vma,
245 struct page *page, unsigned long vaddr, void *dst, const void *src,
246 unsigned long len)
247{
9a74b3eb
RB
248 if (cpu_has_dc_aliases &&
249 page_mapped(page) && !Page_dcache_dirty(page)) {
985c30ef 250 void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
f8829cae 251 memcpy(dst, vfrom, len);
eacb9d61 252 kunmap_coherent();
985c30ef 253 } else {
f8829cae 254 memcpy(dst, src, len);
985c30ef
RB
255 if (cpu_has_dc_aliases)
256 SetPageDcacheDirty(page);
257 }
f8829cae
RB
258}
259
84fd089a 260void __init fixrange_init(unsigned long start, unsigned long end,
1da177e4
LT
261 pgd_t *pgd_base)
262{
f8829cae 263#if defined(CONFIG_HIGHMEM) || defined(CONFIG_MIPS_MT_SMTC)
1da177e4 264 pgd_t *pgd;
c6e8b587 265 pud_t *pud;
1da177e4
LT
266 pmd_t *pmd;
267 pte_t *pte;
c6e8b587 268 int i, j, k;
1da177e4
LT
269 unsigned long vaddr;
270
271 vaddr = start;
272 i = __pgd_offset(vaddr);
c6e8b587
RB
273 j = __pud_offset(vaddr);
274 k = __pmd_offset(vaddr);
1da177e4
LT
275 pgd = pgd_base + i;
276
277 for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
c6e8b587
RB
278 pud = (pud_t *)pgd;
279 for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) {
280 pmd = (pmd_t *)pud;
281 for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) {
282 if (pmd_none(*pmd)) {
283 pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
f8829cae 284 set_pmd(pmd, __pmd((unsigned long)pte));
b72b7092 285 BUG_ON(pte != pte_offset_kernel(pmd, 0));
c6e8b587
RB
286 }
287 vaddr += PMD_SIZE;
1da177e4 288 }
c6e8b587 289 k = 0;
1da177e4
LT
290 }
291 j = 0;
292 }
f8829cae 293#endif
1da177e4 294}
1da177e4 295
b4819b59 296#ifndef CONFIG_NEED_MULTIPLE_NODES
565200a1
AN
297static int __init page_is_ram(unsigned long pagenr)
298{
299 int i;
300
301 for (i = 0; i < boot_mem_map.nr_map; i++) {
302 unsigned long addr, end;
303
304 if (boot_mem_map.map[i].type != BOOT_MEM_RAM)
305 /* not usable memory */
306 continue;
307
308 addr = PFN_UP(boot_mem_map.map[i].addr);
309 end = PFN_DOWN(boot_mem_map.map[i].addr +
310 boot_mem_map.map[i].size);
311
312 if (pagenr >= addr && pagenr < end)
313 return 1;
314 }
315
316 return 0;
317}
318
1da177e4
LT
319void __init paging_init(void)
320{
cce335ae
RB
321 unsigned long max_zone_pfns[MAX_NR_ZONES];
322 unsigned long lastpfn;
1da177e4
LT
323
324 pagetable_init();
325
326#ifdef CONFIG_HIGHMEM
327 kmap_init();
328#endif
f8829cae 329 kmap_coherent_init();
1da177e4 330
05502339 331#ifdef CONFIG_ZONE_DMA
cce335ae 332 max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
1da177e4 333#endif
cce335ae
RB
334#ifdef CONFIG_ZONE_DMA32
335 max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
336#endif
337 max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
338 lastpfn = max_low_pfn;
1da177e4 339#ifdef CONFIG_HIGHMEM
cce335ae
RB
340 max_zone_pfns[ZONE_HIGHMEM] = highend_pfn;
341 lastpfn = highend_pfn;
cbb8fc07 342
cce335ae 343 if (cpu_has_dc_aliases && max_low_pfn != highend_pfn) {
cbb8fc07 344 printk(KERN_WARNING "This processor doesn't support highmem."
cce335ae
RB
345 " %ldk highmem ignored\n",
346 (highend_pfn - max_low_pfn) << (PAGE_SHIFT - 10));
347 max_zone_pfns[ZONE_HIGHMEM] = max_low_pfn;
348 lastpfn = max_low_pfn;
cbb8fc07 349 }
1da177e4
LT
350#endif
351
cce335ae 352 free_area_init_nodes(max_zone_pfns);
1da177e4
LT
353}
354
3d503753
DJ
355static struct kcore_list kcore_mem, kcore_vmalloc;
356#ifdef CONFIG_64BIT
357static struct kcore_list kcore_kseg0;
358#endif
359
1da177e4
LT
360void __init mem_init(void)
361{
362 unsigned long codesize, reservedpages, datasize, initsize;
363 unsigned long tmp, ram;
364
365#ifdef CONFIG_HIGHMEM
366#ifdef CONFIG_DISCONTIGMEM
367#error "CONFIG_HIGHMEM and CONFIG_DISCONTIGMEM dont work together yet"
368#endif
565200a1 369 max_mapnr = highend_pfn;
1da177e4 370#else
565200a1 371 max_mapnr = max_low_pfn;
1da177e4
LT
372#endif
373 high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
374
375 totalram_pages += free_all_bootmem();
376 totalram_pages -= setup_zero_pages(); /* Setup zeroed pages. */
377
378 reservedpages = ram = 0;
379 for (tmp = 0; tmp < max_low_pfn; tmp++)
380 if (page_is_ram(tmp)) {
381 ram++;
b1c231f5 382 if (PageReserved(pfn_to_page(tmp)))
1da177e4
LT
383 reservedpages++;
384 }
565200a1 385 num_physpages = ram;
1da177e4
LT
386
387#ifdef CONFIG_HIGHMEM
388 for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) {
a8049c53 389 struct page *page = pfn_to_page(tmp);
1da177e4
LT
390
391 if (!page_is_ram(tmp)) {
392 SetPageReserved(page);
393 continue;
394 }
395 ClearPageReserved(page);
7835e98b 396 init_page_count(page);
1da177e4
LT
397 __free_page(page);
398 totalhigh_pages++;
399 }
400 totalram_pages += totalhigh_pages;
565200a1 401 num_physpages += totalhigh_pages;
1da177e4
LT
402#endif
403
404 codesize = (unsigned long) &_etext - (unsigned long) &_text;
405 datasize = (unsigned long) &_edata - (unsigned long) &_etext;
406 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
407
3d503753
DJ
408#ifdef CONFIG_64BIT
409 if ((unsigned long) &_text > (unsigned long) CKSEG0)
410 /* The -4 is a hack so that user tools don't have to handle
411 the overflow. */
412 kclist_add(&kcore_kseg0, (void *) CKSEG0, 0x80000000 - 4);
413#endif
414 kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
415 kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
416 VMALLOC_END-VMALLOC_START);
417
1da177e4
LT
418 printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, "
419 "%ldk reserved, %ldk data, %ldk init, %ldk highmem)\n",
cc013a88 420 nr_free_pages() << (PAGE_SHIFT-10),
1da177e4
LT
421 ram << (PAGE_SHIFT-10),
422 codesize >> 10,
423 reservedpages << (PAGE_SHIFT-10),
424 datasize >> 10,
425 initsize >> 10,
426 (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10)));
427}
b4819b59 428#endif /* !CONFIG_NEED_MULTIPLE_NODES */
1da177e4 429
c44e8d5e 430void free_init_pages(const char *what, unsigned long begin, unsigned long end)
6fd11a21 431{
acd86b86 432 unsigned long pfn;
6fd11a21 433
acd86b86
FBH
434 for (pfn = PFN_UP(begin); pfn < PFN_DOWN(end); pfn++) {
435 struct page *page = pfn_to_page(pfn);
436 void *addr = phys_to_virt(PFN_PHYS(pfn));
437
438 ClearPageReserved(page);
439 init_page_count(page);
440 memset(addr, POISON_FREE_INITMEM, PAGE_SIZE);
441 __free_page(page);
6fd11a21
RB
442 totalram_pages++;
443 }
444 printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
445}
446
1da177e4
LT
447#ifdef CONFIG_BLK_DEV_INITRD
448void free_initrd_mem(unsigned long start, unsigned long end)
449{
acd86b86
FBH
450 free_init_pages("initrd memory",
451 virt_to_phys((void *)start),
452 virt_to_phys((void *)end));
1da177e4
LT
453}
454#endif
455
fb4bb133 456void __init_refok free_initmem(void)
1da177e4 457{
c44e8d5e 458 prom_free_prom_memory();
acd86b86
FBH
459 free_init_pages("unused kernel memory",
460 __pa_symbol(&__init_begin),
461 __pa_symbol(&__init_end));
1da177e4 462}
69a6c312
AN
463
464unsigned long pgd_current[NR_CPUS];
465/*
466 * On 64-bit we've got three-level pagetables with a slightly
467 * different layout ...
468 */
469#define __page_aligned(order) __attribute__((__aligned__(PAGE_SIZE<<order)))
9975e77d
RB
470
471/*
472 * gcc 3.3 and older have trouble determining that PTRS_PER_PGD and PGD_ORDER
473 * are constants. So we use the variants from asm-offset.h until that gcc
474 * will officially be retired.
475 */
476pgd_t swapper_pg_dir[_PTRS_PER_PGD] __page_aligned(_PGD_ORDER);
69a6c312 477#ifdef CONFIG_64BIT
69a6c312
AN
478pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned(PMD_ORDER);
479#endif
480pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned(PTE_ORDER);