]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - arch/ia64/mm/init.c
Merge tag 'powerpc-5.2-2' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc...
[mirror_ubuntu-hirsute-kernel.git] / arch / ia64 / mm / init.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
1da177e4
LT
2/*
3 * Initialize MMU support.
4 *
5 * Copyright (C) 1998-2003 Hewlett-Packard Co
6 * David Mosberger-Tang <davidm@hpl.hp.com>
7 */
1da177e4
LT
8#include <linux/kernel.h>
9#include <linux/init.h>
10
68c60834 11#include <linux/dma-noncoherent.h>
1da177e4
LT
12#include <linux/efi.h>
13#include <linux/elf.h>
98e4ae8a 14#include <linux/memblock.h>
1da177e4 15#include <linux/mm.h>
3f07c014 16#include <linux/sched/signal.h>
1da177e4
LT
17#include <linux/mmzone.h>
18#include <linux/module.h>
19#include <linux/personality.h>
20#include <linux/reboot.h>
21#include <linux/slab.h>
22#include <linux/swap.h>
23#include <linux/proc_fs.h>
24#include <linux/bitops.h>
139b8304 25#include <linux/kexec.h>
1da177e4 26
1da177e4 27#include <asm/dma.h>
1da177e4
LT
28#include <asm/io.h>
29#include <asm/machvec.h>
30#include <asm/numa.h>
31#include <asm/patch.h>
32#include <asm/pgalloc.h>
33#include <asm/sal.h>
34#include <asm/sections.h>
1da177e4 35#include <asm/tlb.h>
7c0f6ba6 36#include <linux/uaccess.h>
1da177e4
LT
37#include <asm/unistd.h>
38#include <asm/mca.h>
39
1da177e4
LT
40extern void ia64_tlb_init (void);
41
42unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL;
43
44#ifdef CONFIG_VIRTUAL_MEM_MAP
126b3fcd
TH
45unsigned long VMALLOC_END = VMALLOC_END_INIT;
46EXPORT_SYMBOL(VMALLOC_END);
1da177e4
LT
47struct page *vmem_map;
48EXPORT_SYMBOL(vmem_map);
49#endif
50
fde740e4 51struct page *zero_page_memmap_ptr; /* map entry for zero page */
1da177e4
LT
52EXPORT_SYMBOL(zero_page_memmap_ptr);
53
1da177e4 54void
954ffcb3 55__ia64_sync_icache_dcache (pte_t pte)
1da177e4
LT
56{
57 unsigned long addr;
58 struct page *page;
59
1da177e4
LT
60 page = pte_page(pte);
61 addr = (unsigned long) page_address(page);
62
63 if (test_bit(PG_arch_1, &page->flags))
64 return; /* i-cache is already coherent with d-cache */
65
273988fa 66 flush_icache_range(addr, addr + (PAGE_SIZE << compound_order(page)));
1da177e4
LT
67 set_bit(PG_arch_1, &page->flags); /* mark page as clean */
68}
69
3fed6ae4 70#ifdef CONFIG_SWIOTLB
cde14bbf
JB
71/*
72 * Since DMA is i-cache coherent, any (complete) pages that were written via
73 * DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to
74 * flush them when they get mapped into an executable vm-area.
75 */
68c60834
CH
76void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
77 size_t size, enum dma_data_direction dir)
cde14bbf 78{
68c60834
CH
79 unsigned long pfn = PHYS_PFN(paddr);
80
81 do {
82 set_bit(PG_arch_1, &pfn_to_page(pfn)->flags);
83 } while (++pfn <= PHYS_PFN(paddr + size - 1));
cde14bbf 84}
3fed6ae4 85#endif
cde14bbf 86
1da177e4
LT
87inline void
88ia64_set_rbs_bot (void)
89{
02b763b8 90 unsigned long stack_size = rlimit_max(RLIMIT_STACK) & -16;
1da177e4
LT
91
92 if (stack_size > MAX_USER_STACK_SIZE)
93 stack_size = MAX_USER_STACK_SIZE;
83d2cd3d 94 current->thread.rbs_bot = PAGE_ALIGN(current->mm->start_stack - stack_size);
1da177e4
LT
95}
96
97/*
98 * This performs some platform-dependent address space initialization.
99 * On IA-64, we want to setup the VM area for the register backing
100 * store (which grows upwards) and install the gateway page which is
101 * used for signal trampolines, etc.
102 */
103void
104ia64_init_addr_space (void)
105{
106 struct vm_area_struct *vma;
107
108 ia64_set_rbs_bot();
109
110 /*
111 * If we're out of memory and kmem_cache_alloc() returns NULL, we simply ignore
112 * the problem. When the process attempts to write to the register backing store
113 * for the first time, it will get a SEGFAULT in this case.
114 */
490fc053 115 vma = vm_area_alloc(current->mm);
1da177e4 116 if (vma) {
ebad825c 117 vma_set_anonymous(vma);
1da177e4
LT
118 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
119 vma->vm_end = vma->vm_start + PAGE_SIZE;
46dea3d0 120 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
3ed75eb8 121 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1da177e4
LT
122 down_write(&current->mm->mmap_sem);
123 if (insert_vm_struct(current->mm, vma)) {
124 up_write(&current->mm->mmap_sem);
3928d4f5 125 vm_area_free(vma);
1da177e4
LT
126 return;
127 }
128 up_write(&current->mm->mmap_sem);
129 }
130
131 /* map NaT-page at address zero to speed up speculative dereferencing of NULL: */
132 if (!(current->personality & MMAP_PAGE_ZERO)) {
490fc053 133 vma = vm_area_alloc(current->mm);
1da177e4 134 if (vma) {
ebad825c 135 vma_set_anonymous(vma);
1da177e4
LT
136 vma->vm_end = PAGE_SIZE;
137 vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT);
314e51b9
KK
138 vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO |
139 VM_DONTEXPAND | VM_DONTDUMP;
1da177e4
LT
140 down_write(&current->mm->mmap_sem);
141 if (insert_vm_struct(current->mm, vma)) {
142 up_write(&current->mm->mmap_sem);
3928d4f5 143 vm_area_free(vma);
1da177e4
LT
144 return;
145 }
146 up_write(&current->mm->mmap_sem);
147 }
148 }
149}
150
151void
152free_initmem (void)
153{
11199692 154 free_reserved_area(ia64_imva(__init_begin), ia64_imva(__init_end),
dbe67df4 155 -1, "unused kernel");
1da177e4
LT
156}
157
dae28066 158void __init
1da177e4
LT
159free_initrd_mem (unsigned long start, unsigned long end)
160{
1da177e4
LT
161 /*
162 * EFI uses 4KB pages while the kernel can use 4KB or bigger.
163 * Thus EFI and the kernel may have different page sizes. It is
164 * therefore possible to have the initrd share the same page as
165 * the end of the kernel (given current setup).
166 *
167 * To avoid freeing/using the wrong page (kernel sized) we:
168 * - align up the beginning of initrd
169 * - align down the end of initrd
170 *
171 * | |
172 * |=============| a000
173 * | |
174 * | |
175 * | | 9000
176 * |/////////////|
177 * |/////////////|
178 * |=============| 8000
179 * |///INITRD////|
180 * |/////////////|
181 * |/////////////| 7000
182 * | |
183 * |KKKKKKKKKKKKK|
184 * |=============| 6000
185 * |KKKKKKKKKKKKK|
186 * |KKKKKKKKKKKKK|
187 * K=kernel using 8KB pages
188 *
189 * In this example, we must free page 8000 ONLY. So we must align up
190 * initrd_start and keep initrd_end as is.
191 */
192 start = PAGE_ALIGN(start);
193 end = end & PAGE_MASK;
194
195 if (start < end)
196 printk(KERN_INFO "Freeing initrd memory: %ldkB freed\n", (end - start) >> 10);
197
198 for (; start < end; start += PAGE_SIZE) {
199 if (!virt_addr_valid(start))
200 continue;
66f62594 201 free_reserved_page(virt_to_page(start));
1da177e4
LT
202 }
203}
204
205/*
206 * This installs a clean page in the kernel's page table.
207 */
dae28066 208static struct page * __init
1da177e4
LT
209put_kernel_page (struct page *page, unsigned long address, pgprot_t pgprot)
210{
211 pgd_t *pgd;
212 pud_t *pud;
213 pmd_t *pmd;
214 pte_t *pte;
215
1da177e4
LT
216 pgd = pgd_offset_k(address); /* note: this is NOT pgd_offset()! */
217
1da177e4
LT
218 {
219 pud = pud_alloc(&init_mm, pgd, address);
220 if (!pud)
221 goto out;
1da177e4
LT
222 pmd = pmd_alloc(&init_mm, pud, address);
223 if (!pmd)
224 goto out;
872fec16 225 pte = pte_alloc_kernel(pmd, address);
1da177e4
LT
226 if (!pte)
227 goto out;
872fec16 228 if (!pte_none(*pte))
1da177e4 229 goto out;
1da177e4 230 set_pte(pte, mk_pte(page, pgprot));
1da177e4 231 }
872fec16 232 out:
1da177e4
LT
233 /* no need for flush_tlb */
234 return page;
235}
236
914a4ea4 237static void __init
1da177e4
LT
238setup_gate (void)
239{
240 struct page *page;
241
242 /*
ad597bd5
DMT
243 * Map the gate page twice: once read-only to export the ELF
244 * headers etc. and once execute-only page to enable
245 * privilege-promotion via "epc":
1da177e4 246 */
e55645ec 247 page = virt_to_page(ia64_imva(__start_gate_section));
1da177e4
LT
248 put_kernel_page(page, GATE_ADDR, PAGE_READONLY);
249#ifdef HAVE_BUGGY_SEGREL
e55645ec 250 page = virt_to_page(ia64_imva(__start_gate_section + PAGE_SIZE));
1da177e4
LT
251 put_kernel_page(page, GATE_ADDR + PAGE_SIZE, PAGE_GATE);
252#else
253 put_kernel_page(page, GATE_ADDR + PERCPU_PAGE_SIZE, PAGE_GATE);
ad597bd5
DMT
254 /* Fill in the holes (if any) with read-only zero pages: */
255 {
256 unsigned long addr;
257
258 for (addr = GATE_ADDR + PAGE_SIZE;
259 addr < GATE_ADDR + PERCPU_PAGE_SIZE;
260 addr += PAGE_SIZE)
261 {
262 put_kernel_page(ZERO_PAGE(0), addr,
263 PAGE_READONLY);
264 put_kernel_page(ZERO_PAGE(0), addr + PERCPU_PAGE_SIZE,
265 PAGE_READONLY);
266 }
267 }
1da177e4
LT
268#endif
269 ia64_patch_gate();
270}
271
a6c19dfe
AL
272static struct vm_area_struct gate_vma;
273
274static int __init gate_vma_init(void)
275{
2c4541e2 276 vma_init(&gate_vma, NULL);
a6c19dfe
AL
277 gate_vma.vm_start = FIXADDR_USER_START;
278 gate_vma.vm_end = FIXADDR_USER_END;
279 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
280 gate_vma.vm_page_prot = __P101;
281
282 return 0;
283}
284__initcall(gate_vma_init);
285
286struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
287{
288 return &gate_vma;
289}
290
291int in_gate_area_no_mm(unsigned long addr)
292{
293 if ((addr >= FIXADDR_USER_START) && (addr < FIXADDR_USER_END))
294 return 1;
295 return 0;
296}
297
298int in_gate_area(struct mm_struct *mm, unsigned long addr)
299{
300 return in_gate_area_no_mm(addr);
301}
302
5b5e76e9 303void ia64_mmu_init(void *my_cpu_data)
1da177e4 304{
00b65985 305 unsigned long pta, impl_va_bits;
5b5e76e9 306 extern void tlb_init(void);
1da177e4
LT
307
308#ifdef CONFIG_DISABLE_VHPT
309# define VHPT_ENABLE_BIT 0
310#else
311# define VHPT_ENABLE_BIT 1
312#endif
313
1da177e4
LT
314 /*
315 * Check if the virtually mapped linear page table (VMLPT) overlaps with a mapped
316 * address space. The IA-64 architecture guarantees that at least 50 bits of
317 * virtual address space are implemented but if we pick a large enough page size
318 * (e.g., 64KB), the mapped address space is big enough that it will overlap with
319 * VMLPT. I assume that once we run on machines big enough to warrant 64KB pages,
320 * IMPL_VA_MSB will be significantly bigger, so this is unlikely to become a
321 * problem in practice. Alternatively, we could truncate the top of the mapped
322 * address space to not permit mappings that would overlap with the VMLPT.
323 * --davidm 00/12/06
324 */
325# define pte_bits 3
326# define mapped_space_bits (3*(PAGE_SHIFT - pte_bits) + PAGE_SHIFT)
327 /*
328 * The virtual page table has to cover the entire implemented address space within
329 * a region even though not all of this space may be mappable. The reason for
330 * this is that the Access bit and Dirty bit fault handlers perform
331 * non-speculative accesses to the virtual page table, so the address range of the
332 * virtual page table itself needs to be covered by virtual page table.
333 */
334# define vmlpt_bits (impl_va_bits - PAGE_SHIFT + pte_bits)
335# define POW2(n) (1ULL << (n))
336
337 impl_va_bits = ffz(~(local_cpu_data->unimpl_va_mask | (7UL << 61)));
338
339 if (impl_va_bits < 51 || impl_va_bits > 61)
340 panic("CPU has bogus IMPL_VA_MSB value of %lu!\n", impl_va_bits - 1);
6cf07a8c
PC
341 /*
342 * mapped_space_bits - PAGE_SHIFT is the total number of ptes we need,
343 * which must fit into "vmlpt_bits - pte_bits" slots. Second half of
344 * the test makes sure that our mapped space doesn't overlap the
345 * unimplemented hole in the middle of the region.
346 */
347 if ((mapped_space_bits - PAGE_SHIFT > vmlpt_bits - pte_bits) ||
348 (mapped_space_bits > impl_va_bits - 1))
349 panic("Cannot build a big enough virtual-linear page table"
350 " to cover mapped address space.\n"
351 " Try using a smaller page size.\n");
352
1da177e4
LT
353
354 /* place the VMLPT at the end of each page-table mapped region: */
355 pta = POW2(61) - POW2(vmlpt_bits);
356
1da177e4
LT
357 /*
358 * Set the (virtually mapped linear) page table address. Bit
359 * 8 selects between the short and long format, bits 2-7 the
360 * size of the table, and bit 0 whether the VHPT walker is
361 * enabled.
362 */
363 ia64_set_pta(pta | (0 << 8) | (vmlpt_bits << 2) | VHPT_ENABLE_BIT);
364
365 ia64_tlb_init();
366
367#ifdef CONFIG_HUGETLB_PAGE
368 ia64_set_rr(HPAGE_REGION_BASE, HPAGE_SHIFT << 2);
369 ia64_srlz_d();
370#endif
371}
372
373#ifdef CONFIG_VIRTUAL_MEM_MAP
e44e41d0
BP
374int vmemmap_find_next_valid_pfn(int node, int i)
375{
376 unsigned long end_address, hole_next_pfn;
377 unsigned long stop_address;
378 pg_data_t *pgdat = NODE_DATA(node);
379
380 end_address = (unsigned long) &vmem_map[pgdat->node_start_pfn + i];
381 end_address = PAGE_ALIGN(end_address);
6408068e 382 stop_address = (unsigned long) &vmem_map[pgdat_end_pfn(pgdat)];
e44e41d0
BP
383
384 do {
385 pgd_t *pgd;
386 pud_t *pud;
387 pmd_t *pmd;
388 pte_t *pte;
389
390 pgd = pgd_offset_k(end_address);
391 if (pgd_none(*pgd)) {
392 end_address += PGDIR_SIZE;
393 continue;
394 }
395
396 pud = pud_offset(pgd, end_address);
397 if (pud_none(*pud)) {
398 end_address += PUD_SIZE;
399 continue;
400 }
401
402 pmd = pmd_offset(pud, end_address);
403 if (pmd_none(*pmd)) {
404 end_address += PMD_SIZE;
405 continue;
406 }
407
408 pte = pte_offset_kernel(pmd, end_address);
409retry_pte:
410 if (pte_none(*pte)) {
411 end_address += PAGE_SIZE;
412 pte++;
413 if ((end_address < stop_address) &&
414 (end_address != ALIGN(end_address, 1UL << PMD_SHIFT)))
415 goto retry_pte;
416 continue;
417 }
418 /* Found next valid vmem_map page */
419 break;
420 } while (end_address < stop_address);
421
422 end_address = min(end_address, stop_address);
423 end_address = end_address - (unsigned long) vmem_map + sizeof(struct page) - 1;
424 hole_next_pfn = end_address / sizeof(struct page);
425 return hole_next_pfn - pgdat->node_start_pfn;
426}
1da177e4 427
e088a4ad 428int __init create_mem_map_page_table(u64 start, u64 end, void *arg)
1da177e4
LT
429{
430 unsigned long address, start_page, end_page;
431 struct page *map_start, *map_end;
432 int node;
433 pgd_t *pgd;
434 pud_t *pud;
435 pmd_t *pmd;
436 pte_t *pte;
437
438 map_start = vmem_map + (__pa(start) >> PAGE_SHIFT);
439 map_end = vmem_map + (__pa(end) >> PAGE_SHIFT);
440
441 start_page = (unsigned long) map_start & PAGE_MASK;
442 end_page = PAGE_ALIGN((unsigned long) map_end);
443 node = paddr_to_nid(__pa(start));
444
445 for (address = start_page; address < end_page; address += PAGE_SIZE) {
446 pgd = pgd_offset_k(address);
d80db5c1
MR
447 if (pgd_none(*pgd)) {
448 pud = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node);
449 if (!pud)
450 goto err_alloc;
451 pgd_populate(&init_mm, pgd, pud);
452 }
1da177e4
LT
453 pud = pud_offset(pgd, address);
454
d80db5c1
MR
455 if (pud_none(*pud)) {
456 pmd = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node);
457 if (!pmd)
458 goto err_alloc;
459 pud_populate(&init_mm, pud, pmd);
460 }
1da177e4
LT
461 pmd = pmd_offset(pud, address);
462
d80db5c1
MR
463 if (pmd_none(*pmd)) {
464 pte = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node);
465 if (!pte)
466 goto err_alloc;
467 pmd_populate_kernel(&init_mm, pmd, pte);
468 }
1da177e4
LT
469 pte = pte_offset_kernel(pmd, address);
470
d80db5c1
MR
471 if (pte_none(*pte)) {
472 void *page = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE,
473 node);
474 if (!page)
475 goto err_alloc;
476 set_pte(pte, pfn_pte(__pa(page) >> PAGE_SHIFT,
1da177e4 477 PAGE_KERNEL));
d80db5c1 478 }
1da177e4
LT
479 }
480 return 0;
d80db5c1
MR
481
482err_alloc:
483 panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d\n",
484 __func__, PAGE_SIZE, PAGE_SIZE, node);
485 return -ENOMEM;
1da177e4
LT
486}
487
488struct memmap_init_callback_data {
489 struct page *start;
490 struct page *end;
491 int nid;
492 unsigned long zone;
493};
494
18b8befd 495static int __meminit
e088a4ad 496virtual_memmap_init(u64 start, u64 end, void *arg)
1da177e4
LT
497{
498 struct memmap_init_callback_data *args;
499 struct page *map_start, *map_end;
500
501 args = (struct memmap_init_callback_data *) arg;
502 map_start = vmem_map + (__pa(start) >> PAGE_SHIFT);
503 map_end = vmem_map + (__pa(end) >> PAGE_SHIFT);
504
505 if (map_start < args->start)
506 map_start = args->start;
507 if (map_end > args->end)
508 map_end = args->end;
509
510 /*
511 * We have to initialize "out of bounds" struct page elements that fit completely
512 * on the same pages that were allocated for the "in bounds" elements because they
513 * may be referenced later (and found to be "reserved").
514 */
515 map_start -= ((unsigned long) map_start & (PAGE_SIZE - 1)) / sizeof(struct page);
516 map_end += ((PAGE_ALIGN((unsigned long) map_end) - (unsigned long) map_end)
517 / sizeof(struct page));
518
519 if (map_start < map_end)
520 memmap_init_zone((unsigned long)(map_end - map_start),
a2f3aa02 521 args->nid, args->zone, page_to_pfn(map_start),
a99583e7 522 MEMMAP_EARLY, NULL);
1da177e4
LT
523 return 0;
524}
525
18b8befd 526void __meminit
1da177e4
LT
527memmap_init (unsigned long size, int nid, unsigned long zone,
528 unsigned long start_pfn)
529{
a99583e7
CH
530 if (!vmem_map) {
531 memmap_init_zone(size, nid, zone, start_pfn, MEMMAP_EARLY,
532 NULL);
533 } else {
1da177e4
LT
534 struct page *start;
535 struct memmap_init_callback_data args;
536
537 start = pfn_to_page(start_pfn);
538 args.start = start;
539 args.end = start + size;
540 args.nid = nid;
541 args.zone = zone;
542
543 efi_memmap_walk(virtual_memmap_init, &args);
544 }
545}
546
547int
548ia64_pfn_valid (unsigned long pfn)
549{
550 char byte;
551 struct page *pg = pfn_to_page(pfn);
552
553 return (__get_user(byte, (char __user *) pg) == 0)
554 && ((((u64)pg & PAGE_MASK) == (((u64)(pg + 1) - 1) & PAGE_MASK))
555 || (__get_user(byte, (char __user *) (pg + 1) - 1) == 0));
556}
557EXPORT_SYMBOL(ia64_pfn_valid);
558
e088a4ad 559int __init find_largest_hole(u64 start, u64 end, void *arg)
1da177e4
LT
560{
561 u64 *max_gap = arg;
562
563 static u64 last_end = PAGE_OFFSET;
564
565 /* NOTE: this algorithm assumes efi memmap table is ordered */
566
567 if (*max_gap < (start - last_end))
568 *max_gap = start - last_end;
569 last_end = end;
570 return 0;
571}
05e0caad 572
139b8304
BP
573#endif /* CONFIG_VIRTUAL_MEM_MAP */
574
e088a4ad 575int __init register_active_ranges(u64 start, u64 len, int nid)
05e0caad 576{
98075d24 577 u64 end = start + len;
139b8304 578
139b8304
BP
579#ifdef CONFIG_KEXEC
580 if (start > crashk_res.start && start < crashk_res.end)
581 start = crashk_res.end;
582 if (end > crashk_res.start && end < crashk_res.end)
583 end = crashk_res.start;
584#endif
585
586 if (start < end)
98e4ae8a 587 memblock_add_node(__pa(start), end - start, nid);
05e0caad
MG
588 return 0;
589}
1da177e4 590
a3f5c338 591int
e088a4ad 592find_max_min_low_pfn (u64 start, u64 end, void *arg)
a3f5c338
ZN
593{
594 unsigned long pfn_start, pfn_end;
595#ifdef CONFIG_FLATMEM
596 pfn_start = (PAGE_ALIGN(__pa(start))) >> PAGE_SHIFT;
597 pfn_end = (PAGE_ALIGN(__pa(end - 1))) >> PAGE_SHIFT;
598#else
599 pfn_start = GRANULEROUNDDOWN(__pa(start)) >> PAGE_SHIFT;
600 pfn_end = GRANULEROUNDUP(__pa(end - 1)) >> PAGE_SHIFT;
601#endif
602 min_low_pfn = min(min_low_pfn, pfn_start);
603 max_low_pfn = max(max_low_pfn, pfn_end);
604 return 0;
605}
606
1da177e4
LT
607/*
608 * Boot command-line option "nolwsys" can be used to disable the use of any light-weight
609 * system call handler. When this option is in effect, all fsyscalls will end up bubbling
610 * down into the kernel and calling the normal (heavy-weight) syscall handler. This is
611 * useful for performance testing, but conceivably could also come in handy for debugging
612 * purposes.
613 */
614
03906ea0 615static int nolwsys __initdata;
1da177e4
LT
616
617static int __init
618nolwsys_setup (char *s)
619{
620 nolwsys = 1;
621 return 1;
622}
623
624__setup("nolwsys", nolwsys_setup);
625
dae28066 626void __init
1da177e4
LT
627mem_init (void)
628{
1da177e4 629 int i;
1da177e4 630
fde740e4
RH
631 BUG_ON(PTRS_PER_PGD * sizeof(pgd_t) != PAGE_SIZE);
632 BUG_ON(PTRS_PER_PMD * sizeof(pmd_t) != PAGE_SIZE);
633 BUG_ON(PTRS_PER_PTE * sizeof(pte_t) != PAGE_SIZE);
634
1da177e4
LT
635#ifdef CONFIG_PCI
636 /*
637 * This needs to be called _after_ the command line has been parsed but _before_
638 * any drivers that may need the PCI DMA interface are initialized or bootmem has
639 * been freed.
640 */
641 platform_dma_init();
642#endif
643
2d4b1fa2 644#ifdef CONFIG_FLATMEM
80a03e29 645 BUG_ON(!mem_map);
1da177e4
LT
646#endif
647
b57b63a2 648 set_max_mapnr(max_low_pfn);
1da177e4 649 high_memory = __va(max_low_pfn * PAGE_SIZE);
c6ffc5ca 650 memblock_free_all();
de4bcddc 651 mem_init_print_info(NULL);
1da177e4
LT
652
653 /*
654 * For fsyscall entrpoints with no light-weight handler, use the ordinary
655 * (heavy-weight) handler, but mark it by setting bit 0, so the fsyscall entry
656 * code can tell them apart.
657 */
658 for (i = 0; i < NR_syscalls; ++i) {
e55645ec 659 extern unsigned long fsyscall_table[NR_syscalls];
1da177e4
LT
660 extern unsigned long sys_call_table[NR_syscalls];
661
662 if (!fsyscall_table[i] || nolwsys)
663 fsyscall_table[i] = sys_call_table[i] | 1;
664 }
665 setup_gate();
1da177e4 666}
1681b8e1
YG
667
668#ifdef CONFIG_MEMORY_HOTPLUG
940519f0
MH
669int arch_add_memory(int nid, u64 start, u64 size,
670 struct mhp_restrictions *restrictions)
1681b8e1 671{
1681b8e1
YG
672 unsigned long start_pfn = start >> PAGE_SHIFT;
673 unsigned long nr_pages = size >> PAGE_SHIFT;
674 int ret;
675
940519f0 676 ret = __add_pages(nid, start_pfn, nr_pages, restrictions);
1681b8e1
YG
677 if (ret)
678 printk("%s: Problem encountered in __add_pages() as ret=%d\n",
d4ed8084 679 __func__, ret);
1681b8e1
YG
680
681 return ret;
682}
24d335ca
WC
683
684#ifdef CONFIG_MEMORY_HOTREMOVE
ac5c9426
DH
685void arch_remove_memory(int nid, u64 start, u64 size,
686 struct vmem_altmap *altmap)
24d335ca
WC
687{
688 unsigned long start_pfn = start >> PAGE_SHIFT;
689 unsigned long nr_pages = size >> PAGE_SHIFT;
690 struct zone *zone;
24d335ca
WC
691
692 zone = page_zone(pfn_to_page(start_pfn));
ac5c9426 693 __remove_pages(zone, start_pfn, nr_pages, altmap);
24d335ca
WC
694}
695#endif
1681b8e1 696#endif