2 * sparse memory mappings.
5 #include <linux/slab.h>
6 #include <linux/mmzone.h>
7 #include <linux/bootmem.h>
8 #include <linux/compiler.h>
9 #include <linux/highmem.h>
10 #include <linux/export.h>
11 #include <linux/spinlock.h>
12 #include <linux/vmalloc.h>
16 #include <asm/pgalloc.h>
17 #include <asm/pgtable.h>
20 * Permanent SPARSEMEM data:
22 * 1) mem_section - memory sections, mem_map's for valid memory
24 #ifdef CONFIG_SPARSEMEM_EXTREME
25 struct mem_section
**mem_section
;
27 struct mem_section mem_section
[NR_SECTION_ROOTS
][SECTIONS_PER_ROOT
]
28 ____cacheline_internodealigned_in_smp
;
30 EXPORT_SYMBOL(mem_section
);
32 #ifdef NODE_NOT_IN_PAGE_FLAGS
34 * If we did not store the node number in the page then we have to
35 * do a lookup in the section_to_node_table in order to find which
36 * node the page belongs to.
38 #if MAX_NUMNODES <= 256
39 static u8 section_to_node_table
[NR_MEM_SECTIONS
] __cacheline_aligned
;
41 static u16 section_to_node_table
[NR_MEM_SECTIONS
] __cacheline_aligned
;
44 int page_to_nid(const struct page
*page
)
46 return section_to_node_table
[page_to_section(page
)];
48 EXPORT_SYMBOL(page_to_nid
);
50 static void set_section_nid(unsigned long section_nr
, int nid
)
52 section_to_node_table
[section_nr
] = nid
;
54 #else /* !NODE_NOT_IN_PAGE_FLAGS */
55 static inline void set_section_nid(unsigned long section_nr
, int nid
)
60 #ifdef CONFIG_SPARSEMEM_EXTREME
61 static noinline
struct mem_section __ref
*sparse_index_alloc(int nid
)
63 struct mem_section
*section
= NULL
;
64 unsigned long array_size
= SECTIONS_PER_ROOT
*
65 sizeof(struct mem_section
);
67 if (slab_is_available()) {
68 if (node_state(nid
, N_HIGH_MEMORY
))
69 section
= kzalloc_node(array_size
, GFP_KERNEL
, nid
);
71 section
= kzalloc(array_size
, GFP_KERNEL
);
73 section
= memblock_virt_alloc_node(array_size
, nid
);
79 static int __meminit
sparse_index_init(unsigned long section_nr
, int nid
)
81 unsigned long root
= SECTION_NR_TO_ROOT(section_nr
);
82 struct mem_section
*section
;
84 if (mem_section
[root
])
87 section
= sparse_index_alloc(nid
);
91 mem_section
[root
] = section
;
95 #else /* !SPARSEMEM_EXTREME */
96 static inline int sparse_index_init(unsigned long section_nr
, int nid
)
102 #ifdef CONFIG_SPARSEMEM_EXTREME
103 int __section_nr(struct mem_section
* ms
)
105 unsigned long root_nr
;
106 struct mem_section
*root
= NULL
;
108 for (root_nr
= 0; root_nr
< NR_SECTION_ROOTS
; root_nr
++) {
109 root
= __nr_to_section(root_nr
* SECTIONS_PER_ROOT
);
113 if ((ms
>= root
) && (ms
< (root
+ SECTIONS_PER_ROOT
)))
119 return (root_nr
* SECTIONS_PER_ROOT
) + (ms
- root
);
122 int __section_nr(struct mem_section
* ms
)
124 return (int)(ms
- mem_section
[0]);
129 * During early boot, before section_mem_map is used for an actual
130 * mem_map, we use section_mem_map to store the section's NUMA
131 * node. This keeps us from having to use another data structure. The
132 * node information is cleared just before we store the real mem_map.
134 static inline unsigned long sparse_encode_early_nid(int nid
)
136 return (nid
<< SECTION_NID_SHIFT
);
139 static inline int sparse_early_nid(struct mem_section
*section
)
141 return (section
->section_mem_map
>> SECTION_NID_SHIFT
);
144 /* Validate the physical addressing limitations of the model */
145 void __meminit
mminit_validate_memmodel_limits(unsigned long *start_pfn
,
146 unsigned long *end_pfn
)
148 unsigned long max_sparsemem_pfn
= 1UL << (MAX_PHYSMEM_BITS
-PAGE_SHIFT
);
151 * Sanity checks - do not allow an architecture to pass
152 * in larger pfns than the maximum scope of sparsemem:
154 if (*start_pfn
> max_sparsemem_pfn
) {
155 mminit_dprintk(MMINIT_WARNING
, "pfnvalidation",
156 "Start of range %lu -> %lu exceeds SPARSEMEM max %lu\n",
157 *start_pfn
, *end_pfn
, max_sparsemem_pfn
);
159 *start_pfn
= max_sparsemem_pfn
;
160 *end_pfn
= max_sparsemem_pfn
;
161 } else if (*end_pfn
> max_sparsemem_pfn
) {
162 mminit_dprintk(MMINIT_WARNING
, "pfnvalidation",
163 "End of range %lu -> %lu exceeds SPARSEMEM max %lu\n",
164 *start_pfn
, *end_pfn
, max_sparsemem_pfn
);
166 *end_pfn
= max_sparsemem_pfn
;
171 * There are a number of times that we loop over NR_MEM_SECTIONS,
172 * looking for section_present() on each. But, when we have very
173 * large physical address spaces, NR_MEM_SECTIONS can also be
174 * very large which makes the loops quite long.
176 * Keeping track of this gives us an easy way to break out of
179 int __highest_present_section_nr
;
180 static void section_mark_present(struct mem_section
*ms
)
182 int section_nr
= __section_nr(ms
);
184 if (section_nr
> __highest_present_section_nr
)
185 __highest_present_section_nr
= section_nr
;
187 ms
->section_mem_map
|= SECTION_MARKED_PRESENT
;
190 static inline int next_present_section_nr(int section_nr
)
194 if (present_section_nr(section_nr
))
196 } while ((section_nr
< NR_MEM_SECTIONS
) &&
197 (section_nr
<= __highest_present_section_nr
));
201 #define for_each_present_section_nr(start, section_nr) \
202 for (section_nr = next_present_section_nr(start-1); \
203 ((section_nr >= 0) && \
204 (section_nr < NR_MEM_SECTIONS) && \
205 (section_nr <= __highest_present_section_nr)); \
206 section_nr = next_present_section_nr(section_nr))
208 /* Record a memory area against a node. */
209 void __init
memory_present(int nid
, unsigned long start
, unsigned long end
)
213 #ifdef CONFIG_SPARSEMEM_EXTREME
214 if (unlikely(!mem_section
)) {
215 unsigned long size
, align
;
217 size
= sizeof(struct mem_section
) * NR_SECTION_ROOTS
;
218 align
= 1 << (INTERNODE_CACHE_SHIFT
);
219 mem_section
= memblock_virt_alloc(size
, align
);
223 start
&= PAGE_SECTION_MASK
;
224 mminit_validate_memmodel_limits(&start
, &end
);
225 for (pfn
= start
; pfn
< end
; pfn
+= PAGES_PER_SECTION
) {
226 unsigned long section
= pfn_to_section_nr(pfn
);
227 struct mem_section
*ms
;
229 sparse_index_init(section
, nid
);
230 set_section_nid(section
, nid
);
232 ms
= __nr_to_section(section
);
233 if (!ms
->section_mem_map
) {
234 ms
->section_mem_map
= sparse_encode_early_nid(nid
) |
236 section_mark_present(ms
);
242 * Only used by the i386 NUMA architecures, but relatively
245 unsigned long __init
node_memmap_size_bytes(int nid
, unsigned long start_pfn
,
246 unsigned long end_pfn
)
249 unsigned long nr_pages
= 0;
251 mminit_validate_memmodel_limits(&start_pfn
, &end_pfn
);
252 for (pfn
= start_pfn
; pfn
< end_pfn
; pfn
+= PAGES_PER_SECTION
) {
253 if (nid
!= early_pfn_to_nid(pfn
))
256 if (pfn_present(pfn
))
257 nr_pages
+= PAGES_PER_SECTION
;
260 return nr_pages
* sizeof(struct page
);
264 * Subtle, we encode the real pfn into the mem_map such that
265 * the identity pfn - section_mem_map will return the actual
266 * physical page frame number.
268 static unsigned long sparse_encode_mem_map(struct page
*mem_map
, unsigned long pnum
)
270 return (unsigned long)(mem_map
- (section_nr_to_pfn(pnum
)));
274 * Decode mem_map from the coded memmap
276 struct page
*sparse_decode_mem_map(unsigned long coded_mem_map
, unsigned long pnum
)
278 /* mask off the extra low bits of information */
279 coded_mem_map
&= SECTION_MAP_MASK
;
280 return ((struct page
*)coded_mem_map
) + section_nr_to_pfn(pnum
);
283 static int __meminit
sparse_init_one_section(struct mem_section
*ms
,
284 unsigned long pnum
, struct page
*mem_map
,
285 unsigned long *pageblock_bitmap
)
287 if (!present_section(ms
))
290 ms
->section_mem_map
&= ~SECTION_MAP_MASK
;
291 ms
->section_mem_map
|= sparse_encode_mem_map(mem_map
, pnum
) |
293 ms
->pageblock_flags
= pageblock_bitmap
;
298 unsigned long usemap_size(void)
300 return BITS_TO_LONGS(SECTION_BLOCKFLAGS_BITS
) * sizeof(unsigned long);
303 #ifdef CONFIG_MEMORY_HOTPLUG
304 static unsigned long *__kmalloc_section_usemap(void)
306 return kmalloc(usemap_size(), GFP_KERNEL
);
308 #endif /* CONFIG_MEMORY_HOTPLUG */
310 #ifdef CONFIG_MEMORY_HOTREMOVE
311 static unsigned long * __init
312 sparse_early_usemaps_alloc_pgdat_section(struct pglist_data
*pgdat
,
315 unsigned long goal
, limit
;
319 * A page may contain usemaps for other sections preventing the
320 * page being freed and making a section unremovable while
321 * other sections referencing the usemap remain active. Similarly,
322 * a pgdat can prevent a section being removed. If section A
323 * contains a pgdat and section B contains the usemap, both
324 * sections become inter-dependent. This allocates usemaps
325 * from the same section as the pgdat where possible to avoid
328 goal
= __pa(pgdat
) & (PAGE_SECTION_MASK
<< PAGE_SHIFT
);
329 limit
= goal
+ (1UL << PA_SECTION_SHIFT
);
330 nid
= early_pfn_to_nid(goal
>> PAGE_SHIFT
);
332 p
= memblock_virt_alloc_try_nid_nopanic(size
,
333 SMP_CACHE_BYTES
, goal
, limit
,
342 static void __init
check_usemap_section_nr(int nid
, unsigned long *usemap
)
344 unsigned long usemap_snr
, pgdat_snr
;
345 static unsigned long old_usemap_snr
;
346 static unsigned long old_pgdat_snr
;
347 struct pglist_data
*pgdat
= NODE_DATA(nid
);
351 if (!old_usemap_snr
) {
352 old_usemap_snr
= NR_MEM_SECTIONS
;
353 old_pgdat_snr
= NR_MEM_SECTIONS
;
356 usemap_snr
= pfn_to_section_nr(__pa(usemap
) >> PAGE_SHIFT
);
357 pgdat_snr
= pfn_to_section_nr(__pa(pgdat
) >> PAGE_SHIFT
);
358 if (usemap_snr
== pgdat_snr
)
361 if (old_usemap_snr
== usemap_snr
&& old_pgdat_snr
== pgdat_snr
)
362 /* skip redundant message */
365 old_usemap_snr
= usemap_snr
;
366 old_pgdat_snr
= pgdat_snr
;
368 usemap_nid
= sparse_early_nid(__nr_to_section(usemap_snr
));
369 if (usemap_nid
!= nid
) {
370 pr_info("node %d must be removed before remove section %ld\n",
375 * There is a circular dependency.
376 * Some platforms allow un-removable section because they will just
377 * gather other removable sections for dynamic partitioning.
378 * Just notify un-removable section's number here.
380 pr_info("Section %ld and %ld (node %d) have a circular dependency on usemap and pgdat allocations\n",
381 usemap_snr
, pgdat_snr
, nid
);
384 static unsigned long * __init
385 sparse_early_usemaps_alloc_pgdat_section(struct pglist_data
*pgdat
,
388 return memblock_virt_alloc_node_nopanic(size
, pgdat
->node_id
);
391 static void __init
check_usemap_section_nr(int nid
, unsigned long *usemap
)
394 #endif /* CONFIG_MEMORY_HOTREMOVE */
396 static void __init
sparse_early_usemaps_alloc_node(void *data
,
397 unsigned long pnum_begin
,
398 unsigned long pnum_end
,
399 unsigned long usemap_count
, int nodeid
)
403 unsigned long **usemap_map
= (unsigned long **)data
;
404 int size
= usemap_size();
406 usemap
= sparse_early_usemaps_alloc_pgdat_section(NODE_DATA(nodeid
),
407 size
* usemap_count
);
409 pr_warn("%s: allocation failed\n", __func__
);
413 for (pnum
= pnum_begin
; pnum
< pnum_end
; pnum
++) {
414 if (!present_section_nr(pnum
))
416 usemap_map
[pnum
] = usemap
;
418 check_usemap_section_nr(nodeid
, usemap_map
[pnum
]);
422 #ifndef CONFIG_SPARSEMEM_VMEMMAP
423 struct page __init
*sparse_mem_map_populate(unsigned long pnum
, int nid
)
428 map
= alloc_remap(nid
, sizeof(struct page
) * PAGES_PER_SECTION
);
432 size
= PAGE_ALIGN(sizeof(struct page
) * PAGES_PER_SECTION
);
433 map
= memblock_virt_alloc_try_nid(size
,
434 PAGE_SIZE
, __pa(MAX_DMA_ADDRESS
),
435 BOOTMEM_ALLOC_ACCESSIBLE
, nid
);
438 void __init
sparse_mem_maps_populate_node(struct page
**map_map
,
439 unsigned long pnum_begin
,
440 unsigned long pnum_end
,
441 unsigned long map_count
, int nodeid
)
445 unsigned long size
= sizeof(struct page
) * PAGES_PER_SECTION
;
447 map
= alloc_remap(nodeid
, size
* map_count
);
449 for (pnum
= pnum_begin
; pnum
< pnum_end
; pnum
++) {
450 if (!present_section_nr(pnum
))
458 size
= PAGE_ALIGN(size
);
459 map
= memblock_virt_alloc_try_nid(size
* map_count
,
460 PAGE_SIZE
, __pa(MAX_DMA_ADDRESS
),
461 BOOTMEM_ALLOC_ACCESSIBLE
, nodeid
);
463 for (pnum
= pnum_begin
; pnum
< pnum_end
; pnum
++) {
464 if (!present_section_nr(pnum
))
473 for (pnum
= pnum_begin
; pnum
< pnum_end
; pnum
++) {
474 struct mem_section
*ms
;
476 if (!present_section_nr(pnum
))
478 map_map
[pnum
] = sparse_mem_map_populate(pnum
, nodeid
);
481 ms
= __nr_to_section(pnum
);
482 pr_err("%s: sparsemem memory map backing failed some memory will not be available\n",
484 ms
->section_mem_map
= 0;
487 #endif /* !CONFIG_SPARSEMEM_VMEMMAP */
489 #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
490 static void __init
sparse_early_mem_maps_alloc_node(void *data
,
491 unsigned long pnum_begin
,
492 unsigned long pnum_end
,
493 unsigned long map_count
, int nodeid
)
495 struct page
**map_map
= (struct page
**)data
;
496 sparse_mem_maps_populate_node(map_map
, pnum_begin
, pnum_end
,
500 static struct page __init
*sparse_early_mem_map_alloc(unsigned long pnum
)
503 struct mem_section
*ms
= __nr_to_section(pnum
);
504 int nid
= sparse_early_nid(ms
);
506 map
= sparse_mem_map_populate(pnum
, nid
);
510 pr_err("%s: sparsemem memory map backing failed some memory will not be available\n",
512 ms
->section_mem_map
= 0;
517 void __weak __meminit
vmemmap_populate_print_last(void)
522 * alloc_usemap_and_memmap - memory alloction for pageblock flags and vmemmap
523 * @map: usemap_map for pageblock flags or mmap_map for vmemmap
525 static void __init
alloc_usemap_and_memmap(void (*alloc_func
)
526 (void *, unsigned long, unsigned long,
527 unsigned long, int), void *data
)
530 unsigned long map_count
;
531 int nodeid_begin
= 0;
532 unsigned long pnum_begin
= 0;
534 for_each_present_section_nr(0, pnum
) {
535 struct mem_section
*ms
;
537 ms
= __nr_to_section(pnum
);
538 nodeid_begin
= sparse_early_nid(ms
);
543 for_each_present_section_nr(pnum_begin
+ 1, pnum
) {
544 struct mem_section
*ms
;
547 ms
= __nr_to_section(pnum
);
548 nodeid
= sparse_early_nid(ms
);
549 if (nodeid
== nodeid_begin
) {
553 /* ok, we need to take cake of from pnum_begin to pnum - 1*/
554 alloc_func(data
, pnum_begin
, pnum
,
555 map_count
, nodeid_begin
);
556 /* new start, update count etc*/
557 nodeid_begin
= nodeid
;
562 alloc_func(data
, pnum_begin
, NR_MEM_SECTIONS
,
563 map_count
, nodeid_begin
);
567 * Allocate the accumulated non-linear sections, allocate a mem_map
568 * for each and record the physical to section mapping.
570 void __init
sparse_init(void)
574 unsigned long *usemap
;
575 unsigned long **usemap_map
;
577 #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
579 struct page
**map_map
;
582 /* see include/linux/mmzone.h 'struct mem_section' definition */
583 BUILD_BUG_ON(!is_power_of_2(sizeof(struct mem_section
)));
585 /* Setup pageblock_order for HUGETLB_PAGE_SIZE_VARIABLE */
586 set_pageblock_order();
589 * map is using big page (aka 2M in x86 64 bit)
590 * usemap is less one page (aka 24 bytes)
591 * so alloc 2M (with 2M align) and 24 bytes in turn will
592 * make next 2M slip to one more 2M later.
593 * then in big system, the memory will have a lot of holes...
594 * here try to allocate 2M pages continuously.
596 * powerpc need to call sparse_init_one_section right after each
597 * sparse_early_mem_map_alloc, so allocate usemap_map at first.
599 size
= sizeof(unsigned long *) * NR_MEM_SECTIONS
;
600 usemap_map
= memblock_virt_alloc(size
, 0);
602 panic("can not allocate usemap_map\n");
603 alloc_usemap_and_memmap(sparse_early_usemaps_alloc_node
,
606 #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
607 size2
= sizeof(struct page
*) * NR_MEM_SECTIONS
;
608 map_map
= memblock_virt_alloc(size2
, 0);
610 panic("can not allocate map_map\n");
611 alloc_usemap_and_memmap(sparse_early_mem_maps_alloc_node
,
615 for_each_present_section_nr(0, pnum
) {
616 usemap
= usemap_map
[pnum
];
620 #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
623 map
= sparse_early_mem_map_alloc(pnum
);
628 sparse_init_one_section(__nr_to_section(pnum
), pnum
, map
,
632 vmemmap_populate_print_last();
634 #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
635 memblock_free_early(__pa(map_map
), size2
);
637 memblock_free_early(__pa(usemap_map
), size
);
640 #ifdef CONFIG_MEMORY_HOTPLUG
642 /* Mark all memory sections within the pfn range as online */
643 void online_mem_sections(unsigned long start_pfn
, unsigned long end_pfn
)
647 for (pfn
= start_pfn
; pfn
< end_pfn
; pfn
+= PAGES_PER_SECTION
) {
648 unsigned long section_nr
= pfn_to_section_nr(pfn
);
649 struct mem_section
*ms
;
651 /* onlining code should never touch invalid ranges */
652 if (WARN_ON(!valid_section_nr(section_nr
)))
655 ms
= __nr_to_section(section_nr
);
656 ms
->section_mem_map
|= SECTION_IS_ONLINE
;
660 #ifdef CONFIG_MEMORY_HOTREMOVE
661 /* Mark all memory sections within the pfn range as online */
662 void offline_mem_sections(unsigned long start_pfn
, unsigned long end_pfn
)
666 for (pfn
= start_pfn
; pfn
< end_pfn
; pfn
+= PAGES_PER_SECTION
) {
667 unsigned long section_nr
= pfn_to_section_nr(start_pfn
);
668 struct mem_section
*ms
;
671 * TODO this needs some double checking. Offlining code makes
672 * sure to check pfn_valid but those checks might be just bogus
674 if (WARN_ON(!valid_section_nr(section_nr
)))
677 ms
= __nr_to_section(section_nr
);
678 ms
->section_mem_map
&= ~SECTION_IS_ONLINE
;
683 #ifdef CONFIG_SPARSEMEM_VMEMMAP
684 static inline struct page
*kmalloc_section_memmap(unsigned long pnum
, int nid
)
686 /* This will make the necessary allocations eventually. */
687 return sparse_mem_map_populate(pnum
, nid
);
689 static void __kfree_section_memmap(struct page
*memmap
)
691 unsigned long start
= (unsigned long)memmap
;
692 unsigned long end
= (unsigned long)(memmap
+ PAGES_PER_SECTION
);
694 vmemmap_free(start
, end
);
696 #ifdef CONFIG_MEMORY_HOTREMOVE
697 static void free_map_bootmem(struct page
*memmap
)
699 unsigned long start
= (unsigned long)memmap
;
700 unsigned long end
= (unsigned long)(memmap
+ PAGES_PER_SECTION
);
702 vmemmap_free(start
, end
);
704 #endif /* CONFIG_MEMORY_HOTREMOVE */
706 static struct page
*__kmalloc_section_memmap(void)
708 struct page
*page
, *ret
;
709 unsigned long memmap_size
= sizeof(struct page
) * PAGES_PER_SECTION
;
711 page
= alloc_pages(GFP_KERNEL
|__GFP_NOWARN
, get_order(memmap_size
));
715 ret
= vmalloc(memmap_size
);
721 ret
= (struct page
*)pfn_to_kaddr(page_to_pfn(page
));
727 static inline struct page
*kmalloc_section_memmap(unsigned long pnum
, int nid
)
729 return __kmalloc_section_memmap();
732 static void __kfree_section_memmap(struct page
*memmap
)
734 if (is_vmalloc_addr(memmap
))
737 free_pages((unsigned long)memmap
,
738 get_order(sizeof(struct page
) * PAGES_PER_SECTION
));
741 #ifdef CONFIG_MEMORY_HOTREMOVE
742 static void free_map_bootmem(struct page
*memmap
)
744 unsigned long maps_section_nr
, removing_section_nr
, i
;
745 unsigned long magic
, nr_pages
;
746 struct page
*page
= virt_to_page(memmap
);
748 nr_pages
= PAGE_ALIGN(PAGES_PER_SECTION
* sizeof(struct page
))
751 for (i
= 0; i
< nr_pages
; i
++, page
++) {
752 magic
= (unsigned long) page
->freelist
;
754 BUG_ON(magic
== NODE_INFO
);
756 maps_section_nr
= pfn_to_section_nr(page_to_pfn(page
));
757 removing_section_nr
= page_private(page
);
760 * When this function is called, the removing section is
761 * logical offlined state. This means all pages are isolated
762 * from page allocator. If removing section's memmap is placed
763 * on the same section, it must not be freed.
764 * If it is freed, page allocator may allocate it which will
765 * be removed physically soon.
767 if (maps_section_nr
!= removing_section_nr
)
768 put_page_bootmem(page
);
771 #endif /* CONFIG_MEMORY_HOTREMOVE */
772 #endif /* CONFIG_SPARSEMEM_VMEMMAP */
775 * returns the number of sections whose mem_maps were properly
776 * set. If this is <=0, then that means that the passed-in
777 * map was not consumed and must be freed.
779 int __meminit
sparse_add_one_section(struct zone
*zone
, unsigned long start_pfn
)
781 unsigned long section_nr
= pfn_to_section_nr(start_pfn
);
782 struct pglist_data
*pgdat
= zone
->zone_pgdat
;
783 struct mem_section
*ms
;
785 unsigned long *usemap
;
790 * no locking for this, because it does its own
791 * plus, it does a kmalloc
793 ret
= sparse_index_init(section_nr
, pgdat
->node_id
);
794 if (ret
< 0 && ret
!= -EEXIST
)
796 memmap
= kmalloc_section_memmap(section_nr
, pgdat
->node_id
);
799 usemap
= __kmalloc_section_usemap();
801 __kfree_section_memmap(memmap
);
805 pgdat_resize_lock(pgdat
, &flags
);
807 ms
= __pfn_to_section(start_pfn
);
808 if (ms
->section_mem_map
& SECTION_MARKED_PRESENT
) {
813 memset(memmap
, 0, sizeof(struct page
) * PAGES_PER_SECTION
);
815 section_mark_present(ms
);
817 ret
= sparse_init_one_section(ms
, section_nr
, memmap
, usemap
);
820 pgdat_resize_unlock(pgdat
, &flags
);
823 __kfree_section_memmap(memmap
);
828 #ifdef CONFIG_MEMORY_HOTREMOVE
829 #ifdef CONFIG_MEMORY_FAILURE
830 static void clear_hwpoisoned_pages(struct page
*memmap
, int nr_pages
)
837 for (i
= 0; i
< nr_pages
; i
++) {
838 if (PageHWPoison(&memmap
[i
])) {
839 atomic_long_sub(1, &num_poisoned_pages
);
840 ClearPageHWPoison(&memmap
[i
]);
845 static inline void clear_hwpoisoned_pages(struct page
*memmap
, int nr_pages
)
850 static void free_section_usemap(struct page
*memmap
, unsigned long *usemap
)
852 struct page
*usemap_page
;
857 usemap_page
= virt_to_page(usemap
);
859 * Check to see if allocation came from hot-plug-add
861 if (PageSlab(usemap_page
) || PageCompound(usemap_page
)) {
864 __kfree_section_memmap(memmap
);
869 * The usemap came from bootmem. This is packed with other usemaps
870 * on the section which has pgdat at boot time. Just keep it as is now.
874 free_map_bootmem(memmap
);
877 void sparse_remove_one_section(struct zone
*zone
, struct mem_section
*ms
,
878 unsigned long map_offset
)
880 struct page
*memmap
= NULL
;
881 unsigned long *usemap
= NULL
, flags
;
882 struct pglist_data
*pgdat
= zone
->zone_pgdat
;
884 pgdat_resize_lock(pgdat
, &flags
);
885 if (ms
->section_mem_map
) {
886 usemap
= ms
->pageblock_flags
;
887 memmap
= sparse_decode_mem_map(ms
->section_mem_map
,
889 ms
->section_mem_map
= 0;
890 ms
->pageblock_flags
= NULL
;
892 pgdat_resize_unlock(pgdat
, &flags
);
894 clear_hwpoisoned_pages(memmap
+ map_offset
,
895 PAGES_PER_SECTION
- map_offset
);
896 free_section_usemap(memmap
, usemap
);
898 #endif /* CONFIG_MEMORY_HOTREMOVE */
899 #endif /* CONFIG_MEMORY_HOTPLUG */