2 * Copyright IBM Corp. 2006
3 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
6 #include <linux/bootmem.h>
9 #include <linux/module.h>
10 #include <linux/list.h>
11 #include <linux/hugetlb.h>
12 #include <linux/slab.h>
13 #include <linux/memblock.h>
14 #include <asm/pgalloc.h>
15 #include <asm/pgtable.h>
16 #include <asm/setup.h>
17 #include <asm/tlbflush.h>
18 #include <asm/sections.h>
20 static DEFINE_MUTEX(vmem_mutex
);
22 struct memory_segment
{
23 struct list_head list
;
28 static LIST_HEAD(mem_segs
);
30 static void __ref
*vmem_alloc_pages(unsigned int order
)
32 if (slab_is_available())
33 return (void *)__get_free_pages(GFP_KERNEL
, order
);
34 return alloc_bootmem_pages((1 << order
) * PAGE_SIZE
);
37 static inline pud_t
*vmem_pud_alloc(void)
42 pud
= vmem_alloc_pages(2);
45 clear_table((unsigned long *) pud
, _REGION3_ENTRY_EMPTY
, PAGE_SIZE
* 4);
50 static inline pmd_t
*vmem_pmd_alloc(void)
55 pmd
= vmem_alloc_pages(2);
58 clear_table((unsigned long *) pmd
, _SEGMENT_ENTRY_EMPTY
, PAGE_SIZE
* 4);
63 static pte_t __ref
*vmem_pte_alloc(unsigned long address
)
67 if (slab_is_available())
68 pte
= (pte_t
*) page_table_alloc(&init_mm
);
70 pte
= alloc_bootmem_align(PTRS_PER_PTE
* sizeof(pte_t
),
71 PTRS_PER_PTE
* sizeof(pte_t
));
74 clear_table((unsigned long *) pte
, _PAGE_INVALID
,
75 PTRS_PER_PTE
* sizeof(pte_t
));
80 * Add a physical memory range to the 1:1 mapping.
82 static int vmem_add_mem(unsigned long start
, unsigned long size
, int ro
)
84 unsigned long end
= start
+ size
;
85 unsigned long address
= start
;
92 while (address
< end
) {
93 pg_dir
= pgd_offset_k(address
);
94 if (pgd_none(*pg_dir
)) {
95 pu_dir
= vmem_pud_alloc();
98 pgd_populate(&init_mm
, pg_dir
, pu_dir
);
100 pu_dir
= pud_offset(pg_dir
, address
);
101 #if defined(CONFIG_64BIT) && !defined(CONFIG_DEBUG_PAGEALLOC)
102 if (MACHINE_HAS_EDAT2
&& pud_none(*pu_dir
) && address
&&
103 !(address
& ~PUD_MASK
) && (address
+ PUD_SIZE
<= end
)) {
104 pud_val(*pu_dir
) = __pa(address
) |
105 _REGION_ENTRY_TYPE_R3
| _REGION3_ENTRY_LARGE
|
106 (ro
? _REGION_ENTRY_PROTECT
: 0);
111 if (pud_none(*pu_dir
)) {
112 pm_dir
= vmem_pmd_alloc();
115 pud_populate(&init_mm
, pu_dir
, pm_dir
);
117 pm_dir
= pmd_offset(pu_dir
, address
);
118 #if defined(CONFIG_64BIT) && !defined(CONFIG_DEBUG_PAGEALLOC)
119 if (MACHINE_HAS_EDAT1
&& pmd_none(*pm_dir
) && address
&&
120 !(address
& ~PMD_MASK
) && (address
+ PMD_SIZE
<= end
)) {
121 pmd_val(*pm_dir
) = __pa(address
) |
122 _SEGMENT_ENTRY
| _SEGMENT_ENTRY_LARGE
|
123 _SEGMENT_ENTRY_YOUNG
|
124 (ro
? _SEGMENT_ENTRY_PROTECT
: 0);
129 if (pmd_none(*pm_dir
)) {
130 pt_dir
= vmem_pte_alloc(address
);
133 pmd_populate(&init_mm
, pm_dir
, pt_dir
);
136 pt_dir
= pte_offset_kernel(pm_dir
, address
);
137 pte_val(*pt_dir
) = __pa(address
) |
138 pgprot_val(ro
? PAGE_KERNEL_RO
: PAGE_KERNEL
);
139 address
+= PAGE_SIZE
;
147 * Remove a physical memory range from the 1:1 mapping.
148 * Currently only invalidates page table entries.
150 static void vmem_remove_range(unsigned long start
, unsigned long size
)
152 unsigned long end
= start
+ size
;
153 unsigned long address
= start
;
160 pte_val(pte
) = _PAGE_INVALID
;
161 while (address
< end
) {
162 pg_dir
= pgd_offset_k(address
);
163 if (pgd_none(*pg_dir
)) {
164 address
+= PGDIR_SIZE
;
167 pu_dir
= pud_offset(pg_dir
, address
);
168 if (pud_none(*pu_dir
)) {
172 if (pud_large(*pu_dir
)) {
177 pm_dir
= pmd_offset(pu_dir
, address
);
178 if (pmd_none(*pm_dir
)) {
182 if (pmd_large(*pm_dir
)) {
187 pt_dir
= pte_offset_kernel(pm_dir
, address
);
189 address
+= PAGE_SIZE
;
191 flush_tlb_kernel_range(start
, end
);
195 * Add a backed mem_map array to the virtual mem_map array.
197 int __meminit
vmemmap_populate(unsigned long start
, unsigned long end
, int node
)
199 unsigned long address
= start
;
206 for (address
= start
; address
< end
;) {
207 pg_dir
= pgd_offset_k(address
);
208 if (pgd_none(*pg_dir
)) {
209 pu_dir
= vmem_pud_alloc();
212 pgd_populate(&init_mm
, pg_dir
, pu_dir
);
215 pu_dir
= pud_offset(pg_dir
, address
);
216 if (pud_none(*pu_dir
)) {
217 pm_dir
= vmem_pmd_alloc();
220 pud_populate(&init_mm
, pu_dir
, pm_dir
);
223 pm_dir
= pmd_offset(pu_dir
, address
);
224 if (pmd_none(*pm_dir
)) {
226 /* Use 1MB frames for vmemmap if available. We always
227 * use large frames even if they are only partially
229 * Otherwise we would have also page tables since
230 * vmemmap_populate gets called for each section
232 if (MACHINE_HAS_EDAT1
) {
235 new_page
= vmemmap_alloc_block(PMD_SIZE
, node
);
238 pmd_val(*pm_dir
) = __pa(new_page
) |
239 _SEGMENT_ENTRY
| _SEGMENT_ENTRY_LARGE
;
240 address
= (address
+ PMD_SIZE
) & PMD_MASK
;
244 pt_dir
= vmem_pte_alloc(address
);
247 pmd_populate(&init_mm
, pm_dir
, pt_dir
);
248 } else if (pmd_large(*pm_dir
)) {
249 address
= (address
+ PMD_SIZE
) & PMD_MASK
;
253 pt_dir
= pte_offset_kernel(pm_dir
, address
);
254 if (pte_none(*pt_dir
)) {
257 new_page
= vmemmap_alloc_block(PAGE_SIZE
, node
);
261 __pa(new_page
) | pgprot_val(PAGE_KERNEL
);
263 address
+= PAGE_SIZE
;
270 void vmemmap_free(unsigned long start
, unsigned long end
)
275 * Add memory segment to the segment list if it doesn't overlap with
276 * an already present segment.
278 static int insert_memory_segment(struct memory_segment
*seg
)
280 struct memory_segment
*tmp
;
282 if (seg
->start
+ seg
->size
> VMEM_MAX_PHYS
||
283 seg
->start
+ seg
->size
< seg
->start
)
286 list_for_each_entry(tmp
, &mem_segs
, list
) {
287 if (seg
->start
>= tmp
->start
+ tmp
->size
)
289 if (seg
->start
+ seg
->size
<= tmp
->start
)
293 list_add(&seg
->list
, &mem_segs
);
298 * Remove memory segment from the segment list.
300 static void remove_memory_segment(struct memory_segment
*seg
)
302 list_del(&seg
->list
);
305 static void __remove_shared_memory(struct memory_segment
*seg
)
307 remove_memory_segment(seg
);
308 vmem_remove_range(seg
->start
, seg
->size
);
311 int vmem_remove_mapping(unsigned long start
, unsigned long size
)
313 struct memory_segment
*seg
;
316 mutex_lock(&vmem_mutex
);
319 list_for_each_entry(seg
, &mem_segs
, list
) {
320 if (seg
->start
== start
&& seg
->size
== size
)
324 if (seg
->start
!= start
|| seg
->size
!= size
)
328 __remove_shared_memory(seg
);
331 mutex_unlock(&vmem_mutex
);
335 int vmem_add_mapping(unsigned long start
, unsigned long size
)
337 struct memory_segment
*seg
;
340 mutex_lock(&vmem_mutex
);
342 seg
= kzalloc(sizeof(*seg
), GFP_KERNEL
);
348 ret
= insert_memory_segment(seg
);
352 ret
= vmem_add_mem(start
, size
, 0);
358 __remove_shared_memory(seg
);
362 mutex_unlock(&vmem_mutex
);
367 * map whole physical memory to virtual memory (identity mapping)
368 * we reserve enough space in the vmalloc area for vmemmap to hotplug
369 * additional memory segments.
371 void __init
vmem_map_init(void)
373 unsigned long ro_start
, ro_end
;
374 struct memblock_region
*reg
;
375 phys_addr_t start
, end
;
377 ro_start
= PFN_ALIGN((unsigned long)&_stext
);
378 ro_end
= (unsigned long)&_eshared
& PAGE_MASK
;
379 for_each_memblock(memory
, reg
) {
381 end
= reg
->base
+ reg
->size
- 1;
382 if (start
>= ro_end
|| end
<= ro_start
)
383 vmem_add_mem(start
, end
- start
, 0);
384 else if (start
>= ro_start
&& end
<= ro_end
)
385 vmem_add_mem(start
, end
- start
, 1);
386 else if (start
>= ro_start
) {
387 vmem_add_mem(start
, ro_end
- start
, 1);
388 vmem_add_mem(ro_end
, end
- ro_end
, 0);
389 } else if (end
< ro_end
) {
390 vmem_add_mem(start
, ro_start
- start
, 0);
391 vmem_add_mem(ro_start
, end
- ro_start
, 1);
393 vmem_add_mem(start
, ro_start
- start
, 0);
394 vmem_add_mem(ro_start
, ro_end
- ro_start
, 1);
395 vmem_add_mem(ro_end
, end
- ro_end
, 0);
401 * Convert memblock.memory to a memory segment list so there is a single
402 * list that contains all memory segments.
404 static int __init
vmem_convert_memory_chunk(void)
406 struct memblock_region
*reg
;
407 struct memory_segment
*seg
;
409 mutex_lock(&vmem_mutex
);
410 for_each_memblock(memory
, reg
) {
411 seg
= kzalloc(sizeof(*seg
), GFP_KERNEL
);
413 panic("Out of memory...\n");
414 seg
->start
= reg
->base
;
415 seg
->size
= reg
->size
;
416 insert_memory_segment(seg
);
418 mutex_unlock(&vmem_mutex
);
422 core_initcall(vmem_convert_memory_chunk
);