]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blob - arch/s390/mm/init.c
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[mirror_ubuntu-eoan-kernel.git] / arch / s390 / mm / init.c
1 /*
2 * arch/s390/mm/init.c
3 *
4 * S390 version
5 * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Hartmut Penner (hp@de.ibm.com)
7 *
8 * Derived from "arch/i386/mm/init.c"
9 * Copyright (C) 1995 Linus Torvalds
10 */
11
12 #include <linux/signal.h>
13 #include <linux/sched.h>
14 #include <linux/kernel.h>
15 #include <linux/errno.h>
16 #include <linux/string.h>
17 #include <linux/types.h>
18 #include <linux/ptrace.h>
19 #include <linux/mman.h>
20 #include <linux/mm.h>
21 #include <linux/swap.h>
22 #include <linux/smp.h>
23 #include <linux/init.h>
24 #include <linux/pagemap.h>
25 #include <linux/bootmem.h>
26 #include <linux/pfn.h>
27 #include <linux/poison.h>
28 #include <linux/initrd.h>
29 #include <linux/gfp.h>
30 #include <asm/processor.h>
31 #include <asm/system.h>
32 #include <asm/uaccess.h>
33 #include <asm/pgtable.h>
34 #include <asm/pgalloc.h>
35 #include <asm/dma.h>
36 #include <asm/lowcore.h>
37 #include <asm/tlb.h>
38 #include <asm/tlbflush.h>
39 #include <asm/sections.h>
40
41 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
42
43 pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((__aligned__(PAGE_SIZE)));
44
45 char empty_zero_page[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
46 EXPORT_SYMBOL(empty_zero_page);
47
48 /*
49 * paging_init() sets up the page tables
50 */
51 void __init paging_init(void)
52 {
53 static const int ssm_mask = 0x04000000L;
54 unsigned long max_zone_pfns[MAX_NR_ZONES];
55 unsigned long pgd_type;
56
57 init_mm.pgd = swapper_pg_dir;
58 S390_lowcore.kernel_asce = __pa(init_mm.pgd) & PAGE_MASK;
59 #ifdef CONFIG_64BIT
60 /* A three level page table (4TB) is enough for the kernel space. */
61 S390_lowcore.kernel_asce |= _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
62 pgd_type = _REGION3_ENTRY_EMPTY;
63 #else
64 S390_lowcore.kernel_asce |= _ASCE_TABLE_LENGTH;
65 pgd_type = _SEGMENT_ENTRY_EMPTY;
66 #endif
67 clear_table((unsigned long *) init_mm.pgd, pgd_type,
68 sizeof(unsigned long)*2048);
69 vmem_map_init();
70
71 /* enable virtual mapping in kernel mode */
72 __ctl_load(S390_lowcore.kernel_asce, 1, 1);
73 __ctl_load(S390_lowcore.kernel_asce, 7, 7);
74 __ctl_load(S390_lowcore.kernel_asce, 13, 13);
75 __raw_local_irq_ssm(ssm_mask);
76
77 sparse_memory_present_with_active_regions(MAX_NUMNODES);
78 sparse_init();
79 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
80 #ifdef CONFIG_ZONE_DMA
81 max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS);
82 #endif
83 max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
84 free_area_init_nodes(max_zone_pfns);
85 }
86
87 void __init mem_init(void)
88 {
89 unsigned long codesize, reservedpages, datasize, initsize;
90
91 max_mapnr = num_physpages = max_low_pfn;
92 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
93
94 /* clear the zero-page */
95 memset(empty_zero_page, 0, PAGE_SIZE);
96
97 /* Setup guest page hinting */
98 cmma_init();
99
100 /* this will put all low memory onto the freelists */
101 totalram_pages += free_all_bootmem();
102
103 reservedpages = 0;
104
105 codesize = (unsigned long) &_etext - (unsigned long) &_text;
106 datasize = (unsigned long) &_edata - (unsigned long) &_etext;
107 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
108 printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, %ldk data, %ldk init)\n",
109 nr_free_pages() << (PAGE_SHIFT-10),
110 max_mapnr << (PAGE_SHIFT-10),
111 codesize >> 10,
112 reservedpages << (PAGE_SHIFT-10),
113 datasize >>10,
114 initsize >> 10);
115 printk("Write protected kernel read-only data: %#lx - %#lx\n",
116 (unsigned long)&_stext,
117 PFN_ALIGN((unsigned long)&_eshared) - 1);
118 }
119
120 #ifdef CONFIG_DEBUG_PAGEALLOC
121 void kernel_map_pages(struct page *page, int numpages, int enable)
122 {
123 pgd_t *pgd;
124 pud_t *pud;
125 pmd_t *pmd;
126 pte_t *pte;
127 unsigned long address;
128 int i;
129
130 for (i = 0; i < numpages; i++) {
131 address = page_to_phys(page + i);
132 pgd = pgd_offset_k(address);
133 pud = pud_offset(pgd, address);
134 pmd = pmd_offset(pud, address);
135 pte = pte_offset_kernel(pmd, address);
136 if (!enable) {
137 ptep_invalidate(&init_mm, address, pte);
138 continue;
139 }
140 *pte = mk_pte_phys(address, __pgprot(_PAGE_TYPE_RW));
141 /* Flush cpu write queue. */
142 mb();
143 }
144 }
145 #endif
146
147 void free_init_pages(char *what, unsigned long begin, unsigned long end)
148 {
149 unsigned long addr = begin;
150
151 if (begin >= end)
152 return;
153 for (; addr < end; addr += PAGE_SIZE) {
154 ClearPageReserved(virt_to_page(addr));
155 init_page_count(virt_to_page(addr));
156 memset((void *)(addr & PAGE_MASK), POISON_FREE_INITMEM,
157 PAGE_SIZE);
158 free_page(addr);
159 totalram_pages++;
160 }
161 printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
162 }
163
164 void free_initmem(void)
165 {
166 free_init_pages("unused kernel memory",
167 (unsigned long)&__init_begin,
168 (unsigned long)&__init_end);
169 }
170
171 #ifdef CONFIG_BLK_DEV_INITRD
172 void free_initrd_mem(unsigned long start, unsigned long end)
173 {
174 free_init_pages("initrd memory", start, end);
175 }
176 #endif
177
178 #ifdef CONFIG_MEMORY_HOTPLUG
179 int arch_add_memory(int nid, u64 start, u64 size)
180 {
181 struct pglist_data *pgdat;
182 struct zone *zone;
183 int rc;
184
185 pgdat = NODE_DATA(nid);
186 zone = pgdat->node_zones + ZONE_MOVABLE;
187 rc = vmem_add_mapping(start, size);
188 if (rc)
189 return rc;
190 rc = __add_pages(nid, zone, PFN_DOWN(start), PFN_DOWN(size));
191 if (rc)
192 vmem_remove_mapping(start, size);
193 return rc;
194 }
195 #endif /* CONFIG_MEMORY_HOTPLUG */