]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - arch/ia64/mm/contig.c
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs...
[mirror_ubuntu-bionic-kernel.git] / arch / ia64 / mm / contig.c
CommitLineData
1da177e4
LT
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1998-2003 Hewlett-Packard Co
7 * David Mosberger-Tang <davidm@hpl.hp.com>
8 * Stephane Eranian <eranian@hpl.hp.com>
9 * Copyright (C) 2000, Rohit Seth <rohit.seth@intel.com>
10 * Copyright (C) 1999 VA Linux Systems
11 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
12 * Copyright (C) 2003 Silicon Graphics, Inc. All rights reserved.
13 *
14 * Routines used by ia64 machines with contiguous (or virtually contiguous)
15 * memory.
16 */
1da177e4
LT
17#include <linux/bootmem.h>
18#include <linux/efi.h>
19#include <linux/mm.h>
99a19cf1 20#include <linux/nmi.h>
1da177e4
LT
21#include <linux/swap.h>
22
23#include <asm/meminit.h>
24#include <asm/pgalloc.h>
25#include <asm/pgtable.h>
26#include <asm/sections.h>
27#include <asm/mca.h>
28
29#ifdef CONFIG_VIRTUAL_MEM_MAP
e44e41d0 30static unsigned long max_gap;
1da177e4
LT
31#endif
32
33/**
f1c0afa2 34 * show_mem - give short summary of memory stats
1da177e4 35 *
f1c0afa2
GB
36 * Shows a simple page count of reserved and used pages in the system.
37 * For discontig machines, it does this on a per-pgdat basis.
1da177e4 38 */
f1c0afa2 39void show_mem(void)
1da177e4 40{
f1c0afa2
GB
41 int i, total_reserved = 0;
42 int total_shared = 0, total_cached = 0;
43 unsigned long total_present = 0;
44 pg_data_t *pgdat;
1da177e4 45
709a6c1c 46 printk(KERN_INFO "Mem-info:\n");
1da177e4 47 show_free_areas();
f1c0afa2
GB
48 printk(KERN_INFO "Node memory in pages:\n");
49 for_each_online_pgdat(pgdat) {
50 unsigned long present;
51 unsigned long flags;
52 int shared = 0, cached = 0, reserved = 0;
53
54 pgdat_resize_lock(pgdat, &flags);
55 present = pgdat->node_present_pages;
56 for(i = 0; i < pgdat->node_spanned_pages; i++) {
57 struct page *page;
99a19cf1
PB
58 if (unlikely(i % MAX_ORDER_NR_PAGES == 0))
59 touch_nmi_watchdog();
f1c0afa2
GB
60 if (pfn_valid(pgdat->node_start_pfn + i))
61 page = pfn_to_page(pgdat->node_start_pfn + i);
62 else {
e44e41d0 63#ifdef CONFIG_VIRTUAL_MEM_MAP
f1c0afa2
GB
64 if (max_gap < LARGE_GAP)
65 continue;
e44e41d0 66#endif
f1c0afa2
GB
67 i = vmemmap_find_next_valid_pfn(pgdat->node_id,
68 i) - 1;
69 continue;
70 }
71 if (PageReserved(page))
72 reserved++;
73 else if (PageSwapCache(page))
74 cached++;
75 else if (page_count(page))
76 shared += page_count(page)-1;
e44e41d0 77 }
f1c0afa2
GB
78 pgdat_resize_unlock(pgdat, &flags);
79 total_present += present;
80 total_reserved += reserved;
81 total_cached += cached;
82 total_shared += shared;
83 printk(KERN_INFO "Node %4d: RAM: %11ld, rsvd: %8d, "
84 "shrd: %10d, swpd: %10d\n", pgdat->node_id,
85 present, reserved, shared, cached);
1da177e4 86 }
f1c0afa2
GB
87 printk(KERN_INFO "%ld pages of RAM\n", total_present);
88 printk(KERN_INFO "%d reserved pages\n", total_reserved);
89 printk(KERN_INFO "%d pages shared\n", total_shared);
90 printk(KERN_INFO "%d pages swap cached\n", total_cached);
91 printk(KERN_INFO "Total of %ld pages in page table cache\n",
2bd62a40 92 quicklist_total_size());
f1c0afa2 93 printk(KERN_INFO "%d free buffer pages\n", nr_free_buffer_pages());
1da177e4
LT
94}
95
f1c0afa2 96
1da177e4
LT
97/* physical address where the bootmem map is located */
98unsigned long bootmap_start;
99
1da177e4
LT
100/**
101 * find_bootmap_location - callback to find a memory area for the bootmap
102 * @start: start of region
103 * @end: end of region
104 * @arg: unused callback data
105 *
106 * Find a place to put the bootmap and return its starting address in
107 * bootmap_start. This address must be page-aligned.
108 */
dae28066 109static int __init
e088a4ad 110find_bootmap_location (u64 start, u64 end, void *arg)
1da177e4 111{
e088a4ad
MW
112 u64 needed = *(unsigned long *)arg;
113 u64 range_start, range_end, free_start;
1da177e4
LT
114 int i;
115
116#if IGNORE_PFN0
117 if (start == PAGE_OFFSET) {
118 start += PAGE_SIZE;
119 if (start >= end)
120 return 0;
121 }
122#endif
123
124 free_start = PAGE_OFFSET;
125
126 for (i = 0; i < num_rsvd_regions; i++) {
127 range_start = max(start, free_start);
128 range_end = min(end, rsvd_region[i].start & PAGE_MASK);
129
130 free_start = PAGE_ALIGN(rsvd_region[i].end);
131
132 if (range_end <= range_start)
133 continue; /* skip over empty range */
134
135 if (range_end - range_start >= needed) {
136 bootmap_start = __pa(range_start);
137 return -1; /* done */
138 }
139
140 /* nothing more available in this segment */
141 if (range_end == end)
142 return 0;
143 }
144 return 0;
145}
146
4b9ddc7c
TL
147#ifdef CONFIG_SMP
148static void *cpu_data;
149/**
150 * per_cpu_init - setup per-cpu variables
151 *
152 * Allocate and setup per-cpu data areas.
153 */
154void * __cpuinit
155per_cpu_init (void)
156{
157 int cpu;
158 static int first_time=1;
159
160 /*
161 * get_free_pages() cannot be used before cpu_init() done. BSP
162 * allocates "NR_CPUS" pages for all CPUs to avoid that AP calls
163 * get_zeroed_page().
164 */
165 if (first_time) {
c459ce8b 166 void *cpu0_data = __cpu0_per_cpu;
10617bbe 167
4b9ddc7c 168 first_time=0;
10617bbe
TL
169
170 __per_cpu_offset[0] = (char *) cpu0_data - __per_cpu_start;
171 per_cpu(local_per_cpu_offset, 0) = __per_cpu_offset[0];
172
173 for (cpu = 1; cpu < NR_CPUS; cpu++) {
4b9ddc7c
TL
174 memcpy(cpu_data, __phys_per_cpu_start, __per_cpu_end - __per_cpu_start);
175 __per_cpu_offset[cpu] = (char *) cpu_data - __per_cpu_start;
176 cpu_data += PERCPU_PAGE_SIZE;
177 per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu];
178 }
179 }
180 return __per_cpu_start + __per_cpu_offset[smp_processor_id()];
181}
182
183static inline void
184alloc_per_cpu_data(void)
185{
10617bbe 186 cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * NR_CPUS-1,
4b9ddc7c
TL
187 PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
188}
189#else
190#define alloc_per_cpu_data() do { } while (0)
191#endif /* CONFIG_SMP */
192
1da177e4
LT
193/**
194 * find_memory - setup memory map
195 *
196 * Walk the EFI memory map and find usable memory for the system, taking
197 * into account reserved areas.
198 */
dae28066 199void __init
1da177e4
LT
200find_memory (void)
201{
202 unsigned long bootmap_size;
203
204 reserve_memory();
205
206 /* first find highest page frame number */
a3f5c338
ZN
207 min_low_pfn = ~0UL;
208 max_low_pfn = 0;
209 efi_memmap_walk(find_max_min_low_pfn, NULL);
210 max_pfn = max_low_pfn;
1da177e4
LT
211 /* how many bytes to cover all the pages */
212 bootmap_size = bootmem_bootmap_pages(max_pfn) << PAGE_SHIFT;
213
214 /* look for a location to hold the bootmap */
215 bootmap_start = ~0UL;
216 efi_memmap_walk(find_bootmap_location, &bootmap_size);
217 if (bootmap_start == ~0UL)
218 panic("Cannot find %ld bytes for bootmap\n", bootmap_size);
219
a3f5c338
ZN
220 bootmap_size = init_bootmem_node(NODE_DATA(0),
221 (bootmap_start >> PAGE_SHIFT), 0, max_pfn);
1da177e4
LT
222
223 /* Free all available memory, then mark bootmem-map as being in use. */
224 efi_memmap_walk(filter_rsvd_memory, free_bootmem);
72a7fe39 225 reserve_bootmem(bootmap_start, bootmap_size, BOOTMEM_DEFAULT);
1da177e4
LT
226
227 find_initrd();
45a98fc6 228
4b9ddc7c 229 alloc_per_cpu_data();
1da177e4
LT
230}
231
e088a4ad 232static int count_pages(u64 start, u64 end, void *arg)
1da177e4
LT
233{
234 unsigned long *count = arg;
235
236 *count += (end - start) >> PAGE_SHIFT;
237 return 0;
238}
239
1da177e4
LT
240/*
241 * Set up the page tables.
242 */
243
dae28066 244void __init
1da177e4
LT
245paging_init (void)
246{
247 unsigned long max_dma;
05e0caad 248 unsigned long max_zone_pfns[MAX_NR_ZONES];
1da177e4
LT
249
250 num_physpages = 0;
251 efi_memmap_walk(count_pages, &num_physpages);
252
6391af17 253 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
09ae1f58
CL
254#ifdef CONFIG_ZONE_DMA
255 max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
05e0caad 256 max_zone_pfns[ZONE_DMA] = max_dma;
09ae1f58 257#endif
05e0caad 258 max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
1da177e4
LT
259
260#ifdef CONFIG_VIRTUAL_MEM_MAP
98075d24 261 efi_memmap_walk(filter_memory, register_active_ranges);
1da177e4
LT
262 efi_memmap_walk(find_largest_hole, (u64 *)&max_gap);
263 if (max_gap < LARGE_GAP) {
264 vmem_map = (struct page *) 0;
05e0caad 265 free_area_init_nodes(max_zone_pfns);
1da177e4
LT
266 } else {
267 unsigned long map_size;
268
269 /* allocate virtual_mem_map */
270
921eea1c
BP
271 map_size = PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) *
272 sizeof(struct page));
1da177e4
LT
273 vmalloc_end -= map_size;
274 vmem_map = (struct page *) vmalloc_end;
275 efi_memmap_walk(create_mem_map_page_table, NULL);
276
05e0caad
MG
277 /*
278 * alloc_node_mem_map makes an adjustment for mem_map
279 * which isn't compatible with vmem_map.
280 */
281 NODE_DATA(0)->node_mem_map = vmem_map +
282 find_min_pfn_with_active_regions();
283 free_area_init_nodes(max_zone_pfns);
1da177e4
LT
284
285 printk("Virtual mem_map starts at 0x%p\n", mem_map);
286 }
287#else /* !CONFIG_VIRTUAL_MEM_MAP */
05e0caad
MG
288 add_active_range(0, 0, max_low_pfn);
289 free_area_init_nodes(max_zone_pfns);
1da177e4
LT
290#endif /* !CONFIG_VIRTUAL_MEM_MAP */
291 zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));
292}