2 * sparse memory mappings.
5 #include <linux/mmzone.h>
6 #include <linux/bootmem.h>
7 #include <linux/highmem.h>
8 #include <linux/module.h>
9 #include <linux/spinlock.h>
10 #include <linux/vmalloc.h>
12 #include <asm/pgalloc.h>
13 #include <asm/pgtable.h>
16 * Permanent SPARSEMEM data:
18 * 1) mem_section - memory sections, mem_map's for valid memory
20 #ifdef CONFIG_SPARSEMEM_EXTREME
21 struct mem_section
*mem_section
[NR_SECTION_ROOTS
]
22 ____cacheline_internodealigned_in_smp
;
24 struct mem_section mem_section
[NR_SECTION_ROOTS
][SECTIONS_PER_ROOT
]
25 ____cacheline_internodealigned_in_smp
;
27 EXPORT_SYMBOL(mem_section
);
29 #ifdef NODE_NOT_IN_PAGE_FLAGS
31 * If we did not store the node number in the page then we have to
32 * do a lookup in the section_to_node_table in order to find which
33 * node the page belongs to.
35 #if MAX_NUMNODES <= 256
36 static u8 section_to_node_table
[NR_MEM_SECTIONS
] __cacheline_aligned
;
38 static u16 section_to_node_table
[NR_MEM_SECTIONS
] __cacheline_aligned
;
41 int page_to_nid(struct page
*page
)
43 return section_to_node_table
[page_to_section(page
)];
45 EXPORT_SYMBOL(page_to_nid
);
47 static void set_section_nid(unsigned long section_nr
, int nid
)
49 section_to_node_table
[section_nr
] = nid
;
51 #else /* !NODE_NOT_IN_PAGE_FLAGS */
52 static inline void set_section_nid(unsigned long section_nr
, int nid
)
57 #ifdef CONFIG_SPARSEMEM_EXTREME
58 static struct mem_section noinline __init_refok
*sparse_index_alloc(int nid
)
60 struct mem_section
*section
= NULL
;
61 unsigned long array_size
= SECTIONS_PER_ROOT
*
62 sizeof(struct mem_section
);
64 if (slab_is_available())
65 section
= kmalloc_node(array_size
, GFP_KERNEL
, nid
);
67 section
= alloc_bootmem_node(NODE_DATA(nid
), array_size
);
70 memset(section
, 0, array_size
);
75 static int __meminit
sparse_index_init(unsigned long section_nr
, int nid
)
77 static DEFINE_SPINLOCK(index_init_lock
);
78 unsigned long root
= SECTION_NR_TO_ROOT(section_nr
);
79 struct mem_section
*section
;
82 if (mem_section
[root
])
85 section
= sparse_index_alloc(nid
);
89 * This lock keeps two different sections from
90 * reallocating for the same index
92 spin_lock(&index_init_lock
);
94 if (mem_section
[root
]) {
99 mem_section
[root
] = section
;
101 spin_unlock(&index_init_lock
);
104 #else /* !SPARSEMEM_EXTREME */
105 static inline int sparse_index_init(unsigned long section_nr
, int nid
)
112 * Although written for the SPARSEMEM_EXTREME case, this happens
113 * to also work for the flat array case because
114 * NR_SECTION_ROOTS==NR_MEM_SECTIONS.
116 int __section_nr(struct mem_section
* ms
)
118 unsigned long root_nr
;
119 struct mem_section
* root
;
121 for (root_nr
= 0; root_nr
< NR_SECTION_ROOTS
; root_nr
++) {
122 root
= __nr_to_section(root_nr
* SECTIONS_PER_ROOT
);
126 if ((ms
>= root
) && (ms
< (root
+ SECTIONS_PER_ROOT
)))
130 return (root_nr
* SECTIONS_PER_ROOT
) + (ms
- root
);
134 * During early boot, before section_mem_map is used for an actual
135 * mem_map, we use section_mem_map to store the section's NUMA
136 * node. This keeps us from having to use another data structure. The
137 * node information is cleared just before we store the real mem_map.
139 static inline unsigned long sparse_encode_early_nid(int nid
)
141 return (nid
<< SECTION_NID_SHIFT
);
144 static inline int sparse_early_nid(struct mem_section
*section
)
146 return (section
->section_mem_map
>> SECTION_NID_SHIFT
);
149 /* Record a memory area against a node. */
150 void __init
memory_present(int nid
, unsigned long start
, unsigned long end
)
152 unsigned long max_arch_pfn
= 1UL << (MAX_PHYSMEM_BITS
-PAGE_SHIFT
);
156 * Sanity checks - do not allow an architecture to pass
157 * in larger pfns than the maximum scope of sparsemem:
159 if (start
>= max_arch_pfn
)
161 if (end
>= max_arch_pfn
)
164 start
&= PAGE_SECTION_MASK
;
165 for (pfn
= start
; pfn
< end
; pfn
+= PAGES_PER_SECTION
) {
166 unsigned long section
= pfn_to_section_nr(pfn
);
167 struct mem_section
*ms
;
169 sparse_index_init(section
, nid
);
170 set_section_nid(section
, nid
);
172 ms
= __nr_to_section(section
);
173 if (!ms
->section_mem_map
)
174 ms
->section_mem_map
= sparse_encode_early_nid(nid
) |
175 SECTION_MARKED_PRESENT
;
180 * Only used by the i386 NUMA architecures, but relatively
183 unsigned long __init
node_memmap_size_bytes(int nid
, unsigned long start_pfn
,
184 unsigned long end_pfn
)
187 unsigned long nr_pages
= 0;
189 for (pfn
= start_pfn
; pfn
< end_pfn
; pfn
+= PAGES_PER_SECTION
) {
190 if (nid
!= early_pfn_to_nid(pfn
))
193 if (pfn_present(pfn
))
194 nr_pages
+= PAGES_PER_SECTION
;
197 return nr_pages
* sizeof(struct page
);
201 * Subtle, we encode the real pfn into the mem_map such that
202 * the identity pfn - section_mem_map will return the actual
203 * physical page frame number.
205 static unsigned long sparse_encode_mem_map(struct page
*mem_map
, unsigned long pnum
)
207 return (unsigned long)(mem_map
- (section_nr_to_pfn(pnum
)));
211 * Decode mem_map from the coded memmap
214 struct page
*sparse_decode_mem_map(unsigned long coded_mem_map
, unsigned long pnum
)
216 /* mask off the extra low bits of information */
217 coded_mem_map
&= SECTION_MAP_MASK
;
218 return ((struct page
*)coded_mem_map
) + section_nr_to_pfn(pnum
);
221 static int __meminit
sparse_init_one_section(struct mem_section
*ms
,
222 unsigned long pnum
, struct page
*mem_map
,
223 unsigned long *pageblock_bitmap
)
225 if (!present_section(ms
))
228 ms
->section_mem_map
&= ~SECTION_MAP_MASK
;
229 ms
->section_mem_map
|= sparse_encode_mem_map(mem_map
, pnum
) |
231 ms
->pageblock_flags
= pageblock_bitmap
;
236 static unsigned long usemap_size(void)
238 unsigned long size_bytes
;
239 size_bytes
= roundup(SECTION_BLOCKFLAGS_BITS
, 8) / 8;
240 size_bytes
= roundup(size_bytes
, sizeof(unsigned long));
244 #ifdef CONFIG_MEMORY_HOTPLUG
245 static unsigned long *__kmalloc_section_usemap(void)
247 return kmalloc(usemap_size(), GFP_KERNEL
);
249 #endif /* CONFIG_MEMORY_HOTPLUG */
251 static unsigned long *__init
sparse_early_usemap_alloc(unsigned long pnum
)
253 unsigned long *usemap
;
254 struct mem_section
*ms
= __nr_to_section(pnum
);
255 int nid
= sparse_early_nid(ms
);
257 usemap
= alloc_bootmem_node(NODE_DATA(nid
), usemap_size());
261 /* Stupid: suppress gcc warning for SPARSEMEM && !NUMA */
264 printk(KERN_WARNING
"%s: allocation failed\n", __FUNCTION__
);
268 #ifndef CONFIG_SPARSEMEM_VMEMMAP
269 struct page __init
*sparse_mem_map_populate(unsigned long pnum
, int nid
)
273 map
= alloc_remap(nid
, sizeof(struct page
) * PAGES_PER_SECTION
);
277 map
= alloc_bootmem_node(NODE_DATA(nid
),
278 sizeof(struct page
) * PAGES_PER_SECTION
);
281 #endif /* !CONFIG_SPARSEMEM_VMEMMAP */
283 struct page __init
*sparse_early_mem_map_alloc(unsigned long pnum
)
286 struct mem_section
*ms
= __nr_to_section(pnum
);
287 int nid
= sparse_early_nid(ms
);
289 map
= sparse_mem_map_populate(pnum
, nid
);
293 printk(KERN_ERR
"%s: sparsemem memory map backing failed "
294 "some memory will not be available.\n", __FUNCTION__
);
295 ms
->section_mem_map
= 0;
299 void __attribute__((weak
)) __meminit
vmemmap_populate_print_last(void)
303 * Allocate the accumulated non-linear sections, allocate a mem_map
304 * for each and record the physical to section mapping.
306 void __init
sparse_init(void)
310 unsigned long *usemap
;
311 unsigned long **usemap_map
;
315 * map is using big page (aka 2M in x86 64 bit)
316 * usemap is less one page (aka 24 bytes)
317 * so alloc 2M (with 2M align) and 24 bytes in turn will
318 * make next 2M slip to one more 2M later.
319 * then in big system, the memory will have a lot of holes...
320 * here try to allocate 2M pages continously.
322 * powerpc need to call sparse_init_one_section right after each
323 * sparse_early_mem_map_alloc, so allocate usemap_map at first.
325 size
= sizeof(unsigned long *) * NR_MEM_SECTIONS
;
326 usemap_map
= alloc_bootmem(size
);
328 panic("can not allocate usemap_map\n");
330 for (pnum
= 0; pnum
< NR_MEM_SECTIONS
; pnum
++) {
331 if (!present_section_nr(pnum
))
333 usemap_map
[pnum
] = sparse_early_usemap_alloc(pnum
);
336 for (pnum
= 0; pnum
< NR_MEM_SECTIONS
; pnum
++) {
337 if (!present_section_nr(pnum
))
340 usemap
= usemap_map
[pnum
];
344 map
= sparse_early_mem_map_alloc(pnum
);
348 sparse_init_one_section(__nr_to_section(pnum
), pnum
, map
,
352 vmemmap_populate_print_last();
354 free_bootmem(__pa(usemap_map
), size
);
357 #ifdef CONFIG_MEMORY_HOTPLUG
358 #ifdef CONFIG_SPARSEMEM_VMEMMAP
359 static inline struct page
*kmalloc_section_memmap(unsigned long pnum
, int nid
,
360 unsigned long nr_pages
)
362 /* This will make the necessary allocations eventually. */
363 return sparse_mem_map_populate(pnum
, nid
);
365 static void __kfree_section_memmap(struct page
*memmap
, unsigned long nr_pages
)
367 return; /* XXX: Not implemented yet */
370 static struct page
*__kmalloc_section_memmap(unsigned long nr_pages
)
372 struct page
*page
, *ret
;
373 unsigned long memmap_size
= sizeof(struct page
) * nr_pages
;
375 page
= alloc_pages(GFP_KERNEL
|__GFP_NOWARN
, get_order(memmap_size
));
379 ret
= vmalloc(memmap_size
);
385 ret
= (struct page
*)pfn_to_kaddr(page_to_pfn(page
));
387 memset(ret
, 0, memmap_size
);
392 static inline struct page
*kmalloc_section_memmap(unsigned long pnum
, int nid
,
393 unsigned long nr_pages
)
395 return __kmalloc_section_memmap(nr_pages
);
398 static void __kfree_section_memmap(struct page
*memmap
, unsigned long nr_pages
)
400 if (is_vmalloc_addr(memmap
))
403 free_pages((unsigned long)memmap
,
404 get_order(sizeof(struct page
) * nr_pages
));
406 #endif /* CONFIG_SPARSEMEM_VMEMMAP */
408 static void free_section_usemap(struct page
*memmap
, unsigned long *usemap
)
414 * Check to see if allocation came from hot-plug-add
416 if (PageSlab(virt_to_page(usemap
))) {
419 __kfree_section_memmap(memmap
, PAGES_PER_SECTION
);
424 * TODO: Allocations came from bootmem - how do I free up ?
426 printk(KERN_WARNING
"Not freeing up allocations from bootmem "
427 "- leaking memory\n");
431 * returns the number of sections whose mem_maps were properly
432 * set. If this is <=0, then that means that the passed-in
433 * map was not consumed and must be freed.
435 int sparse_add_one_section(struct zone
*zone
, unsigned long start_pfn
,
438 unsigned long section_nr
= pfn_to_section_nr(start_pfn
);
439 struct pglist_data
*pgdat
= zone
->zone_pgdat
;
440 struct mem_section
*ms
;
442 unsigned long *usemap
;
447 * no locking for this, because it does its own
448 * plus, it does a kmalloc
450 ret
= sparse_index_init(section_nr
, pgdat
->node_id
);
451 if (ret
< 0 && ret
!= -EEXIST
)
453 memmap
= kmalloc_section_memmap(section_nr
, pgdat
->node_id
, nr_pages
);
456 usemap
= __kmalloc_section_usemap();
458 __kfree_section_memmap(memmap
, nr_pages
);
462 pgdat_resize_lock(pgdat
, &flags
);
464 ms
= __pfn_to_section(start_pfn
);
465 if (ms
->section_mem_map
& SECTION_MARKED_PRESENT
) {
470 ms
->section_mem_map
|= SECTION_MARKED_PRESENT
;
472 ret
= sparse_init_one_section(ms
, section_nr
, memmap
, usemap
);
475 pgdat_resize_unlock(pgdat
, &flags
);
478 __kfree_section_memmap(memmap
, nr_pages
);
483 void sparse_remove_one_section(struct zone
*zone
, struct mem_section
*ms
)
485 struct page
*memmap
= NULL
;
486 unsigned long *usemap
= NULL
;
488 if (ms
->section_mem_map
) {
489 usemap
= ms
->pageblock_flags
;
490 memmap
= sparse_decode_mem_map(ms
->section_mem_map
,
492 ms
->section_mem_map
= 0;
493 ms
->pageblock_flags
= NULL
;
496 free_section_usemap(memmap
, usemap
);