2 * sparse memory mappings.
4 #include <linux/config.h>
6 #include <linux/mmzone.h>
7 #include <linux/bootmem.h>
8 #include <linux/module.h>
12 * Permanent SPARSEMEM data:
14 * 1) mem_section - memory sections, mem_map's for valid memory
16 #ifdef CONFIG_SPARSEMEM_EXTREME
17 struct mem_section
*mem_section
[NR_SECTION_ROOTS
]
18 ____cacheline_maxaligned_in_smp
;
20 struct mem_section mem_section
[NR_SECTION_ROOTS
][SECTIONS_PER_ROOT
]
21 ____cacheline_maxaligned_in_smp
;
23 EXPORT_SYMBOL(mem_section
);
25 static void sparse_alloc_root(unsigned long root
, int nid
)
27 #ifdef CONFIG_SPARSEMEM_EXTREME
28 mem_section
[root
] = alloc_bootmem_node(NODE_DATA(nid
), PAGE_SIZE
);
32 static void sparse_index_init(unsigned long section
, int nid
)
34 unsigned long root
= SECTION_NR_TO_ROOT(section
);
36 if (mem_section
[root
])
39 sparse_alloc_root(root
, nid
);
41 if (mem_section
[root
])
42 memset(mem_section
[root
], 0, PAGE_SIZE
);
44 panic("memory_present: NO MEMORY\n");
46 /* Record a memory area against a node. */
47 void memory_present(int nid
, unsigned long start
, unsigned long end
)
51 start
&= PAGE_SECTION_MASK
;
52 for (pfn
= start
; pfn
< end
; pfn
+= PAGES_PER_SECTION
) {
53 unsigned long section
= pfn_to_section_nr(pfn
);
54 struct mem_section
*ms
;
56 sparse_index_init(section
, nid
);
58 ms
= __nr_to_section(section
);
59 if (!ms
->section_mem_map
)
60 ms
->section_mem_map
= SECTION_MARKED_PRESENT
;
65 * Only used by the i386 NUMA architecures, but relatively
68 unsigned long __init
node_memmap_size_bytes(int nid
, unsigned long start_pfn
,
69 unsigned long end_pfn
)
72 unsigned long nr_pages
= 0;
74 for (pfn
= start_pfn
; pfn
< end_pfn
; pfn
+= PAGES_PER_SECTION
) {
75 if (nid
!= early_pfn_to_nid(pfn
))
79 nr_pages
+= PAGES_PER_SECTION
;
82 return nr_pages
* sizeof(struct page
);
86 * Subtle, we encode the real pfn into the mem_map such that
87 * the identity pfn - section_mem_map will return the actual
88 * physical page frame number.
90 static unsigned long sparse_encode_mem_map(struct page
*mem_map
, unsigned long pnum
)
92 return (unsigned long)(mem_map
- (section_nr_to_pfn(pnum
)));
96 * We need this if we ever free the mem_maps. While not implemented yet,
97 * this function is included for parity with its sibling.
99 static __attribute((unused
))
100 struct page
*sparse_decode_mem_map(unsigned long coded_mem_map
, unsigned long pnum
)
102 return ((struct page
*)coded_mem_map
) + section_nr_to_pfn(pnum
);
105 static int sparse_init_one_section(struct mem_section
*ms
,
106 unsigned long pnum
, struct page
*mem_map
)
108 if (!valid_section(ms
))
111 ms
->section_mem_map
|= sparse_encode_mem_map(mem_map
, pnum
);
116 static struct page
*sparse_early_mem_map_alloc(unsigned long pnum
)
119 int nid
= early_pfn_to_nid(section_nr_to_pfn(pnum
));
120 struct mem_section
*ms
= __nr_to_section(pnum
);
122 map
= alloc_remap(nid
, sizeof(struct page
) * PAGES_PER_SECTION
);
126 map
= alloc_bootmem_node(NODE_DATA(nid
),
127 sizeof(struct page
) * PAGES_PER_SECTION
);
131 printk(KERN_WARNING
"%s: allocation failed\n", __FUNCTION__
);
132 ms
->section_mem_map
= 0;
137 * Allocate the accumulated non-linear sections, allocate a mem_map
138 * for each and record the physical to section mapping.
140 void sparse_init(void)
145 for (pnum
= 0; pnum
< NR_MEM_SECTIONS
; pnum
++) {
146 if (!valid_section_nr(pnum
))
149 map
= sparse_early_mem_map_alloc(pnum
);
152 sparse_init_one_section(__nr_to_section(pnum
), pnum
, map
);
157 * returns the number of sections whose mem_maps were properly
158 * set. If this is <=0, then that means that the passed-in
159 * map was not consumed and must be freed.
161 int sparse_add_one_section(unsigned long start_pfn
, int nr_pages
, struct page
*map
)
163 struct mem_section
*ms
= __pfn_to_section(start_pfn
);
165 if (ms
->section_mem_map
& SECTION_MARKED_PRESENT
)
168 ms
->section_mem_map
|= SECTION_MARKED_PRESENT
;
170 return sparse_init_one_section(ms
, pfn_to_section_nr(start_pfn
), map
);