]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - mm/sparse.c
Merge branch 'v2.6.24-rc1-lockdep' of git://git.kernel.org/pub/scm/linux/kernel/git...
[mirror_ubuntu-bionic-kernel.git] / mm / sparse.c
1 /*
2 * sparse memory mappings.
3 */
4 #include <linux/mm.h>
5 #include <linux/mmzone.h>
6 #include <linux/bootmem.h>
7 #include <linux/highmem.h>
8 #include <linux/module.h>
9 #include <linux/spinlock.h>
10 #include <linux/vmalloc.h>
11 #include <asm/dma.h>
12 #include <asm/pgalloc.h>
13 #include <asm/pgtable.h>
14
15 /*
16 * Permanent SPARSEMEM data:
17 *
18 * 1) mem_section - memory sections, mem_map's for valid memory
19 */
20 #ifdef CONFIG_SPARSEMEM_EXTREME
21 struct mem_section *mem_section[NR_SECTION_ROOTS]
22 ____cacheline_internodealigned_in_smp;
23 #else
24 struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]
25 ____cacheline_internodealigned_in_smp;
26 #endif
27 EXPORT_SYMBOL(mem_section);
28
29 #ifdef NODE_NOT_IN_PAGE_FLAGS
30 /*
31 * If we did not store the node number in the page then we have to
32 * do a lookup in the section_to_node_table in order to find which
33 * node the page belongs to.
34 */
35 #if MAX_NUMNODES <= 256
36 static u8 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned;
37 #else
38 static u16 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned;
39 #endif
40
41 int page_to_nid(struct page *page)
42 {
43 return section_to_node_table[page_to_section(page)];
44 }
45 EXPORT_SYMBOL(page_to_nid);
46
47 static void set_section_nid(unsigned long section_nr, int nid)
48 {
49 section_to_node_table[section_nr] = nid;
50 }
51 #else /* !NODE_NOT_IN_PAGE_FLAGS */
52 static inline void set_section_nid(unsigned long section_nr, int nid)
53 {
54 }
55 #endif
56
57 #ifdef CONFIG_SPARSEMEM_EXTREME
58 static struct mem_section noinline __init_refok *sparse_index_alloc(int nid)
59 {
60 struct mem_section *section = NULL;
61 unsigned long array_size = SECTIONS_PER_ROOT *
62 sizeof(struct mem_section);
63
64 if (slab_is_available())
65 section = kmalloc_node(array_size, GFP_KERNEL, nid);
66 else
67 section = alloc_bootmem_node(NODE_DATA(nid), array_size);
68
69 if (section)
70 memset(section, 0, array_size);
71
72 return section;
73 }
74
75 static int __meminit sparse_index_init(unsigned long section_nr, int nid)
76 {
77 static DEFINE_SPINLOCK(index_init_lock);
78 unsigned long root = SECTION_NR_TO_ROOT(section_nr);
79 struct mem_section *section;
80 int ret = 0;
81
82 if (mem_section[root])
83 return -EEXIST;
84
85 section = sparse_index_alloc(nid);
86 /*
87 * This lock keeps two different sections from
88 * reallocating for the same index
89 */
90 spin_lock(&index_init_lock);
91
92 if (mem_section[root]) {
93 ret = -EEXIST;
94 goto out;
95 }
96
97 mem_section[root] = section;
98 out:
99 spin_unlock(&index_init_lock);
100 return ret;
101 }
102 #else /* !SPARSEMEM_EXTREME */
103 static inline int sparse_index_init(unsigned long section_nr, int nid)
104 {
105 return 0;
106 }
107 #endif
108
109 /*
110 * Although written for the SPARSEMEM_EXTREME case, this happens
111 * to also work for the flat array case because
112 * NR_SECTION_ROOTS==NR_MEM_SECTIONS.
113 */
114 int __section_nr(struct mem_section* ms)
115 {
116 unsigned long root_nr;
117 struct mem_section* root;
118
119 for (root_nr = 0; root_nr < NR_SECTION_ROOTS; root_nr++) {
120 root = __nr_to_section(root_nr * SECTIONS_PER_ROOT);
121 if (!root)
122 continue;
123
124 if ((ms >= root) && (ms < (root + SECTIONS_PER_ROOT)))
125 break;
126 }
127
128 return (root_nr * SECTIONS_PER_ROOT) + (ms - root);
129 }
130
131 /*
132 * During early boot, before section_mem_map is used for an actual
133 * mem_map, we use section_mem_map to store the section's NUMA
134 * node. This keeps us from having to use another data structure. The
135 * node information is cleared just before we store the real mem_map.
136 */
137 static inline unsigned long sparse_encode_early_nid(int nid)
138 {
139 return (nid << SECTION_NID_SHIFT);
140 }
141
142 static inline int sparse_early_nid(struct mem_section *section)
143 {
144 return (section->section_mem_map >> SECTION_NID_SHIFT);
145 }
146
147 /* Record a memory area against a node. */
148 void __init memory_present(int nid, unsigned long start, unsigned long end)
149 {
150 unsigned long pfn;
151
152 start &= PAGE_SECTION_MASK;
153 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) {
154 unsigned long section = pfn_to_section_nr(pfn);
155 struct mem_section *ms;
156
157 sparse_index_init(section, nid);
158 set_section_nid(section, nid);
159
160 ms = __nr_to_section(section);
161 if (!ms->section_mem_map)
162 ms->section_mem_map = sparse_encode_early_nid(nid) |
163 SECTION_MARKED_PRESENT;
164 }
165 }
166
167 /*
168 * Only used by the i386 NUMA architecures, but relatively
169 * generic code.
170 */
171 unsigned long __init node_memmap_size_bytes(int nid, unsigned long start_pfn,
172 unsigned long end_pfn)
173 {
174 unsigned long pfn;
175 unsigned long nr_pages = 0;
176
177 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
178 if (nid != early_pfn_to_nid(pfn))
179 continue;
180
181 if (pfn_present(pfn))
182 nr_pages += PAGES_PER_SECTION;
183 }
184
185 return nr_pages * sizeof(struct page);
186 }
187
188 /*
189 * Subtle, we encode the real pfn into the mem_map such that
190 * the identity pfn - section_mem_map will return the actual
191 * physical page frame number.
192 */
193 static unsigned long sparse_encode_mem_map(struct page *mem_map, unsigned long pnum)
194 {
195 return (unsigned long)(mem_map - (section_nr_to_pfn(pnum)));
196 }
197
198 /*
199 * We need this if we ever free the mem_maps. While not implemented yet,
200 * this function is included for parity with its sibling.
201 */
202 static __attribute((unused))
203 struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum)
204 {
205 return ((struct page *)coded_mem_map) + section_nr_to_pfn(pnum);
206 }
207
208 static int __meminit sparse_init_one_section(struct mem_section *ms,
209 unsigned long pnum, struct page *mem_map,
210 unsigned long *pageblock_bitmap)
211 {
212 if (!present_section(ms))
213 return -EINVAL;
214
215 ms->section_mem_map &= ~SECTION_MAP_MASK;
216 ms->section_mem_map |= sparse_encode_mem_map(mem_map, pnum) |
217 SECTION_HAS_MEM_MAP;
218 ms->pageblock_flags = pageblock_bitmap;
219
220 return 1;
221 }
222
223 static unsigned long usemap_size(void)
224 {
225 unsigned long size_bytes;
226 size_bytes = roundup(SECTION_BLOCKFLAGS_BITS, 8) / 8;
227 size_bytes = roundup(size_bytes, sizeof(unsigned long));
228 return size_bytes;
229 }
230
231 #ifdef CONFIG_MEMORY_HOTPLUG
232 static unsigned long *__kmalloc_section_usemap(void)
233 {
234 return kmalloc(usemap_size(), GFP_KERNEL);
235 }
236 #endif /* CONFIG_MEMORY_HOTPLUG */
237
238 static unsigned long *sparse_early_usemap_alloc(unsigned long pnum)
239 {
240 unsigned long *usemap;
241 struct mem_section *ms = __nr_to_section(pnum);
242 int nid = sparse_early_nid(ms);
243
244 usemap = alloc_bootmem_node(NODE_DATA(nid), usemap_size());
245 if (usemap)
246 return usemap;
247
248 /* Stupid: suppress gcc warning for SPARSEMEM && !NUMA */
249 nid = 0;
250
251 printk(KERN_WARNING "%s: allocation failed\n", __FUNCTION__);
252 return NULL;
253 }
254
255 #ifndef CONFIG_SPARSEMEM_VMEMMAP
256 struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid)
257 {
258 struct page *map;
259
260 map = alloc_remap(nid, sizeof(struct page) * PAGES_PER_SECTION);
261 if (map)
262 return map;
263
264 map = alloc_bootmem_node(NODE_DATA(nid),
265 sizeof(struct page) * PAGES_PER_SECTION);
266 return map;
267 }
268 #endif /* !CONFIG_SPARSEMEM_VMEMMAP */
269
270 struct page __init *sparse_early_mem_map_alloc(unsigned long pnum)
271 {
272 struct page *map;
273 struct mem_section *ms = __nr_to_section(pnum);
274 int nid = sparse_early_nid(ms);
275
276 map = sparse_mem_map_populate(pnum, nid);
277 if (map)
278 return map;
279
280 printk(KERN_ERR "%s: sparsemem memory map backing failed "
281 "some memory will not be available.\n", __FUNCTION__);
282 ms->section_mem_map = 0;
283 return NULL;
284 }
285
286 /*
287 * Allocate the accumulated non-linear sections, allocate a mem_map
288 * for each and record the physical to section mapping.
289 */
290 void __init sparse_init(void)
291 {
292 unsigned long pnum;
293 struct page *map;
294 unsigned long *usemap;
295
296 for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
297 if (!present_section_nr(pnum))
298 continue;
299
300 map = sparse_early_mem_map_alloc(pnum);
301 if (!map)
302 continue;
303
304 usemap = sparse_early_usemap_alloc(pnum);
305 if (!usemap)
306 continue;
307
308 sparse_init_one_section(__nr_to_section(pnum), pnum, map,
309 usemap);
310 }
311 }
312
313 #ifdef CONFIG_MEMORY_HOTPLUG
314 #ifdef CONFIG_SPARSEMEM_VMEMMAP
315 static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid,
316 unsigned long nr_pages)
317 {
318 /* This will make the necessary allocations eventually. */
319 return sparse_mem_map_populate(pnum, nid);
320 }
321 static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages)
322 {
323 return; /* XXX: Not implemented yet */
324 }
325 #else
326 static struct page *__kmalloc_section_memmap(unsigned long nr_pages)
327 {
328 struct page *page, *ret;
329 unsigned long memmap_size = sizeof(struct page) * nr_pages;
330
331 page = alloc_pages(GFP_KERNEL|__GFP_NOWARN, get_order(memmap_size));
332 if (page)
333 goto got_map_page;
334
335 ret = vmalloc(memmap_size);
336 if (ret)
337 goto got_map_ptr;
338
339 return NULL;
340 got_map_page:
341 ret = (struct page *)pfn_to_kaddr(page_to_pfn(page));
342 got_map_ptr:
343 memset(ret, 0, memmap_size);
344
345 return ret;
346 }
347
348 static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid,
349 unsigned long nr_pages)
350 {
351 return __kmalloc_section_memmap(nr_pages);
352 }
353
354 static int vaddr_in_vmalloc_area(void *addr)
355 {
356 if (addr >= (void *)VMALLOC_START &&
357 addr < (void *)VMALLOC_END)
358 return 1;
359 return 0;
360 }
361
362 static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages)
363 {
364 if (vaddr_in_vmalloc_area(memmap))
365 vfree(memmap);
366 else
367 free_pages((unsigned long)memmap,
368 get_order(sizeof(struct page) * nr_pages));
369 }
370 #endif /* CONFIG_SPARSEMEM_VMEMMAP */
371
372 /*
373 * returns the number of sections whose mem_maps were properly
374 * set. If this is <=0, then that means that the passed-in
375 * map was not consumed and must be freed.
376 */
377 int sparse_add_one_section(struct zone *zone, unsigned long start_pfn,
378 int nr_pages)
379 {
380 unsigned long section_nr = pfn_to_section_nr(start_pfn);
381 struct pglist_data *pgdat = zone->zone_pgdat;
382 struct mem_section *ms;
383 struct page *memmap;
384 unsigned long *usemap;
385 unsigned long flags;
386 int ret;
387
388 /*
389 * no locking for this, because it does its own
390 * plus, it does a kmalloc
391 */
392 sparse_index_init(section_nr, pgdat->node_id);
393 memmap = kmalloc_section_memmap(section_nr, pgdat->node_id, nr_pages);
394 usemap = __kmalloc_section_usemap();
395
396 pgdat_resize_lock(pgdat, &flags);
397
398 ms = __pfn_to_section(start_pfn);
399 if (ms->section_mem_map & SECTION_MARKED_PRESENT) {
400 ret = -EEXIST;
401 goto out;
402 }
403
404 if (!usemap) {
405 ret = -ENOMEM;
406 goto out;
407 }
408 ms->section_mem_map |= SECTION_MARKED_PRESENT;
409
410 ret = sparse_init_one_section(ms, section_nr, memmap, usemap);
411
412 out:
413 pgdat_resize_unlock(pgdat, &flags);
414 if (ret <= 0)
415 __kfree_section_memmap(memmap, nr_pages);
416 return ret;
417 }
418 #endif