]>
git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - arch/ia64/mm/contig.c
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1998-2003 Hewlett-Packard Co
7 * David Mosberger-Tang <davidm@hpl.hp.com>
8 * Stephane Eranian <eranian@hpl.hp.com>
9 * Copyright (C) 2000, Rohit Seth <rohit.seth@intel.com>
10 * Copyright (C) 1999 VA Linux Systems
11 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
12 * Copyright (C) 2003 Silicon Graphics, Inc. All rights reserved.
14 * Routines used by ia64 machines with contiguous (or virtually contiguous)
17 #include <linux/efi.h>
18 #include <linux/memblock.h>
20 #include <linux/nmi.h>
21 #include <linux/swap.h>
22 #include <linux/sizes.h>
25 #include <asm/meminit.h>
26 #include <asm/sections.h>
29 /* physical address where the bootmem map is located */
30 unsigned long bootmap_start
;
33 static void *cpu_data
;
35 * per_cpu_init - setup per-cpu variables
37 * Allocate and setup per-cpu data areas.
39 void *per_cpu_init(void)
41 static bool first_time
= true;
42 void *cpu0_data
= __cpu0_per_cpu
;
50 * get_free_pages() cannot be used before cpu_init() done.
51 * BSP allocates PERCPU_PAGE_SIZE bytes for all possible CPUs
52 * to avoid that AP calls get_zeroed_page().
54 for_each_possible_cpu(cpu
) {
55 void *src
= cpu
== 0 ? cpu0_data
: __phys_per_cpu_start
;
57 memcpy(cpu_data
, src
, __per_cpu_end
- __per_cpu_start
);
58 __per_cpu_offset
[cpu
] = (char *)cpu_data
- __per_cpu_start
;
59 per_cpu(local_per_cpu_offset
, cpu
) = __per_cpu_offset
[cpu
];
62 * percpu area for cpu0 is moved from the __init area
63 * which is setup by head.S and used till this point.
64 * Update ar.k3. This move is ensures that percpu
65 * area for cpu0 is on the correct node and its
66 * virtual address isn't insanely far from other
67 * percpu areas which is important for congruent
71 ia64_set_kr(IA64_KR_PER_CPU_DATA
, __pa(cpu_data
) -
72 (unsigned long)__per_cpu_start
);
74 cpu_data
+= PERCPU_PAGE_SIZE
;
77 return __per_cpu_start
+ __per_cpu_offset
[smp_processor_id()];
81 alloc_per_cpu_data(void)
83 size_t size
= PERCPU_PAGE_SIZE
* num_possible_cpus();
85 cpu_data
= memblock_alloc_from(size
, PERCPU_PAGE_SIZE
,
86 __pa(MAX_DMA_ADDRESS
));
88 panic("%s: Failed to allocate %lu bytes align=%lx from=%lx\n",
89 __func__
, size
, PERCPU_PAGE_SIZE
, __pa(MAX_DMA_ADDRESS
));
93 * setup_per_cpu_areas - setup percpu areas
95 * Arch code has already allocated and initialized percpu areas. All
96 * this function has to do is to teach the determined layout to the
97 * dynamic percpu allocator, which happens to be more complex than
98 * creating whole new ones using helpers.
101 setup_per_cpu_areas(void)
103 struct pcpu_alloc_info
*ai
;
104 struct pcpu_group_info
*gi
;
106 ssize_t static_size
, reserved_size
, dyn_size
;
108 ai
= pcpu_alloc_alloc_info(1, num_possible_cpus());
110 panic("failed to allocate pcpu_alloc_info");
113 /* units are assigned consecutively to possible cpus */
114 for_each_possible_cpu(cpu
)
115 gi
->cpu_map
[gi
->nr_units
++] = cpu
;
118 static_size
= __per_cpu_end
- __per_cpu_start
;
119 reserved_size
= PERCPU_MODULE_RESERVE
;
120 dyn_size
= PERCPU_PAGE_SIZE
- static_size
- reserved_size
;
122 panic("percpu area overflow static=%zd reserved=%zd\n",
123 static_size
, reserved_size
);
125 ai
->static_size
= static_size
;
126 ai
->reserved_size
= reserved_size
;
127 ai
->dyn_size
= dyn_size
;
128 ai
->unit_size
= PERCPU_PAGE_SIZE
;
129 ai
->atom_size
= PAGE_SIZE
;
130 ai
->alloc_size
= PERCPU_PAGE_SIZE
;
132 pcpu_setup_first_chunk(ai
, __per_cpu_start
+ __per_cpu_offset
[0]);
133 pcpu_free_alloc_info(ai
);
136 #define alloc_per_cpu_data() do { } while (0)
137 #endif /* CONFIG_SMP */
140 * find_memory - setup memory map
142 * Walk the EFI memory map and find usable memory for the system, taking
143 * into account reserved areas.
150 /* first find highest page frame number */
153 efi_memmap_walk(find_max_min_low_pfn
, NULL
);
154 max_pfn
= max_low_pfn
;
156 memblock_add_node(0, PFN_PHYS(max_low_pfn
), 0);
160 alloc_per_cpu_data();
163 static int __init
find_largest_hole(u64 start
, u64 end
, void *arg
)
167 static u64 last_end
= PAGE_OFFSET
;
169 /* NOTE: this algorithm assumes efi memmap table is ordered */
171 if (*max_gap
< (start
- last_end
))
172 *max_gap
= start
- last_end
;
177 static void __init
verify_gap_absence(void)
179 unsigned long max_gap
;
181 /* Forbid FLATMEM if hole is > than 1G */
182 efi_memmap_walk(find_largest_hole
, (u64
*)&max_gap
);
183 if (max_gap
>= SZ_1G
)
184 panic("Cannot use FLATMEM with %ldMB hole\n"
185 "Please switch over to SPARSEMEM\n",
190 * Set up the page tables.
196 unsigned long max_dma
;
197 unsigned long max_zone_pfns
[MAX_NR_ZONES
];
199 memset(max_zone_pfns
, 0, sizeof(max_zone_pfns
));
200 max_dma
= virt_to_phys((void *) MAX_DMA_ADDRESS
) >> PAGE_SHIFT
;
201 max_zone_pfns
[ZONE_DMA32
] = max_dma
;
202 max_zone_pfns
[ZONE_NORMAL
] = max_low_pfn
;
204 verify_gap_absence();
206 free_area_init(max_zone_pfns
);
207 zero_page_memmap_ptr
= virt_to_page(ia64_imva(empty_zero_page
));