]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - arch/x86/mm/srat_64.c
x86: refactor ioport unification
[mirror_ubuntu-bionic-kernel.git] / arch / x86 / mm / srat_64.c
CommitLineData
1da177e4
LT
1/*
2 * ACPI 3.0 based NUMA setup
3 * Copyright 2004 Andi Kleen, SuSE Labs.
4 *
5 * Reads the ACPI SRAT table to figure out what memory belongs to which CPUs.
6 *
7 * Called from acpi_numa_init while reading the SRAT and SLIT tables.
8 * Assumes all memory regions belonging to a single proximity domain
9 * are in one chunk. Holes between them will be included in the node.
10 */
11
12#include <linux/kernel.h>
13#include <linux/acpi.h>
14#include <linux/mmzone.h>
15#include <linux/bitmap.h>
16#include <linux/module.h>
17#include <linux/topology.h>
68a3a7fe
AK
18#include <linux/bootmem.h>
19#include <linux/mm.h>
1da177e4
LT
20#include <asm/proto.h>
21#include <asm/numa.h>
8a6fdd3e 22#include <asm/e820.h>
1da177e4 23
c31fbb1a
AK
24int acpi_numa __initdata;
25
1da177e4
LT
26static struct acpi_table_slit *acpi_slit;
27
28static nodemask_t nodes_parsed __initdata;
abe059e7 29static struct bootnode nodes[MAX_NUMNODES] __initdata;
4942e998 30static struct bootnode nodes_add[MAX_NUMNODES];
68a3a7fe 31static int found_add_area __initdata;
fad7906d 32int hotadd_percent __initdata = 0;
1da177e4 33
9391a3f9
AK
34/* Too small nodes confuse the VM badly. Usually they result
35 from BIOS bugs. */
36#define NODE_MIN_SIZE (4*1024*1024)
37
1da177e4
LT
38static __init int setup_node(int pxm)
39{
762834e8 40 return acpi_map_pxm_to_node(pxm);
1da177e4
LT
41}
42
43static __init int conflicting_nodes(unsigned long start, unsigned long end)
44{
45 int i;
4b6a455c 46 for_each_node_mask(i, nodes_parsed) {
abe059e7 47 struct bootnode *nd = &nodes[i];
1da177e4
LT
48 if (nd->start == nd->end)
49 continue;
50 if (nd->end > start && nd->start < end)
05d1fa4b 51 return i;
1da177e4 52 if (nd->end == end && nd->start == start)
05d1fa4b 53 return i;
1da177e4
LT
54 }
55 return -1;
56}
57
58static __init void cutoff_node(int i, unsigned long start, unsigned long end)
59{
abe059e7 60 struct bootnode *nd = &nodes[i];
68a3a7fe
AK
61
62 if (found_add_area)
63 return;
64
1da177e4
LT
65 if (nd->start < start) {
66 nd->start = start;
67 if (nd->end < nd->start)
68 nd->start = nd->end;
69 }
70 if (nd->end > end) {
1da177e4
LT
71 nd->end = end;
72 if (nd->start > nd->end)
73 nd->start = nd->end;
74 }
75}
76
77static __init void bad_srat(void)
78{
2bce2b54 79 int i;
1da177e4
LT
80 printk(KERN_ERR "SRAT: SRAT not used.\n");
81 acpi_numa = -1;
fad7906d 82 found_add_area = 0;
2bce2b54
AK
83 for (i = 0; i < MAX_LOCAL_APIC; i++)
84 apicid_to_node[i] = NUMA_NO_NODE;
68a3a7fe
AK
85 for (i = 0; i < MAX_NUMNODES; i++)
86 nodes_add[i].start = nodes[i].end = 0;
5cb248ab 87 remove_all_active_ranges();
1da177e4
LT
88}
89
90static __init inline int srat_disabled(void)
91{
92 return numa_off || acpi_numa < 0;
93}
94
1584b89c
AK
95/*
96 * A lot of BIOS fill in 10 (= no distance) everywhere. This messes
97 * up the NUMA heuristics which wants the local node to have a smaller
98 * distance than the others.
99 * Do some quick checks here and only use the SLIT if it passes.
100 */
101static __init int slit_valid(struct acpi_table_slit *slit)
102{
103 int i, j;
15a58ed1 104 int d = slit->locality_count;
1584b89c
AK
105 for (i = 0; i < d; i++) {
106 for (j = 0; j < d; j++) {
107 u8 val = slit->entry[d*i + j];
108 if (i == j) {
a2e212da 109 if (val != LOCAL_DISTANCE)
1584b89c 110 return 0;
a2e212da 111 } else if (val <= LOCAL_DISTANCE)
1584b89c
AK
112 return 0;
113 }
114 }
115 return 1;
116}
117
1da177e4
LT
118/* Callback for SLIT parsing */
119void __init acpi_numa_slit_init(struct acpi_table_slit *slit)
120{
1584b89c
AK
121 if (!slit_valid(slit)) {
122 printk(KERN_INFO "ACPI: SLIT table looks invalid. Not used.\n");
123 return;
124 }
1da177e4
LT
125 acpi_slit = slit;
126}
127
128/* Callback for Proximity Domain -> LAPIC mapping */
129void __init
15a58ed1 130acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
1da177e4
LT
131{
132 int pxm, node;
d22fe808
AK
133 if (srat_disabled())
134 return;
15a58ed1 135 if (pa->header.length != sizeof(struct acpi_srat_cpu_affinity)) {
fad7906d 136 bad_srat();
d22fe808
AK
137 return;
138 }
15a58ed1 139 if ((pa->flags & ACPI_SRAT_CPU_ENABLED) == 0)
1da177e4 140 return;
15a58ed1 141 pxm = pa->proximity_domain_lo;
1da177e4
LT
142 node = setup_node(pxm);
143 if (node < 0) {
144 printk(KERN_ERR "SRAT: Too many proximity domains %x\n", pxm);
145 bad_srat();
146 return;
147 }
0b07e984 148 apicid_to_node[pa->apic_id] = node;
1da177e4 149 acpi_numa = 1;
0b07e984
AK
150 printk(KERN_INFO "SRAT: PXM %u -> APIC %u -> Node %u\n",
151 pxm, pa->apic_id, node);
1da177e4
LT
152}
153
926fafeb 154int update_end_of_memory(unsigned long end) {return -1;}
71efa8fd
KM
155static int hotadd_enough_memory(struct bootnode *nd) {return 1;}
156#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
157static inline int save_add_info(void) {return 1;}
158#else
159static inline int save_add_info(void) {return 0;}
160#endif
68a3a7fe 161/*
71efa8fd 162 * Update nodes_add and decide if to include add are in the zone.
ab4a574e 163 * Both SPARSE and RESERVE need nodes_add information.
676b1855 164 * This code supports one contiguous hot add area per node.
68a3a7fe
AK
165 */
166static int reserve_hotadd(int node, unsigned long start, unsigned long end)
167{
168 unsigned long s_pfn = start >> PAGE_SHIFT;
169 unsigned long e_pfn = end >> PAGE_SHIFT;
71efa8fd 170 int ret = 0, changed = 0;
68a3a7fe
AK
171 struct bootnode *nd = &nodes_add[node];
172
173 /* I had some trouble with strange memory hotadd regions breaking
174 the boot. Be very strict here and reject anything unexpected.
175 If you want working memory hotadd write correct SRATs.
176
177 The node size check is a basic sanity check to guard against
178 mistakes */
179 if ((signed long)(end - start) < NODE_MIN_SIZE) {
180 printk(KERN_ERR "SRAT: Hotplug area too small\n");
181 return -1;
182 }
183
184 /* This check might be a bit too strict, but I'm keeping it for now. */
5cb248ab 185 if (absent_pages_in_range(s_pfn, e_pfn) != e_pfn - s_pfn) {
9c7cd687
MG
186 printk(KERN_ERR
187 "SRAT: Hotplug area %lu -> %lu has existing memory\n",
188 s_pfn, e_pfn);
68a3a7fe
AK
189 return -1;
190 }
191
192 if (!hotadd_enough_memory(&nodes_add[node])) {
193 printk(KERN_ERR "SRAT: Hotplug area too large\n");
194 return -1;
195 }
196
197 /* Looks good */
198
68a3a7fe 199 if (nd->start == nd->end) {
15a58ed1
AS
200 nd->start = start;
201 nd->end = end;
68a3a7fe 202 changed = 1;
15a58ed1
AS
203 } else {
204 if (nd->start == end) {
205 nd->start = start;
68a3a7fe
AK
206 changed = 1;
207 }
15a58ed1
AS
208 if (nd->end == start) {
209 nd->end = end;
68a3a7fe
AK
210 changed = 1;
211 }
212 if (!changed)
213 printk(KERN_ERR "SRAT: Hotplug zone not continuous. Partly ignored\n");
15a58ed1 214 }
68a3a7fe 215
71efa8fd 216 ret = update_end_of_memory(nd->end);
68a3a7fe
AK
217
218 if (changed)
219 printk(KERN_INFO "SRAT: hot plug zone found %Lx - %Lx\n", nd->start, nd->end);
71efa8fd 220 return ret;
68a3a7fe 221}
68a3a7fe 222
1da177e4
LT
223/* Callback for parsing of the Proximity Domain <-> Memory Area mappings */
224void __init
15a58ed1 225acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
1da177e4 226{
68a3a7fe 227 struct bootnode *nd, oldnode;
1da177e4
LT
228 unsigned long start, end;
229 int node, pxm;
230 int i;
231
d22fe808 232 if (srat_disabled())
1da177e4 233 return;
15a58ed1 234 if (ma->header.length != sizeof(struct acpi_srat_mem_affinity)) {
d22fe808
AK
235 bad_srat();
236 return;
237 }
15a58ed1 238 if ((ma->flags & ACPI_SRAT_MEM_ENABLED) == 0)
d22fe808 239 return;
15a58ed1
AS
240
241 if ((ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) && !save_add_info())
68a3a7fe 242 return;
15a58ed1
AS
243 start = ma->base_address;
244 end = start + ma->length;
1da177e4
LT
245 pxm = ma->proximity_domain;
246 node = setup_node(pxm);
247 if (node < 0) {
248 printk(KERN_ERR "SRAT: Too many proximity domains.\n");
249 bad_srat();
250 return;
251 }
1da177e4 252 i = conflicting_nodes(start, end);
05d1fa4b
AK
253 if (i == node) {
254 printk(KERN_WARNING
255 "SRAT: Warning: PXM %d (%lx-%lx) overlaps with itself (%Lx-%Lx)\n",
256 pxm, start, end, nodes[i].start, nodes[i].end);
257 } else if (i >= 0) {
1da177e4 258 printk(KERN_ERR
05d1fa4b
AK
259 "SRAT: PXM %d (%lx-%lx) overlaps with PXM %d (%Lx-%Lx)\n",
260 pxm, start, end, node_to_pxm(i),
261 nodes[i].start, nodes[i].end);
1da177e4
LT
262 bad_srat();
263 return;
264 }
265 nd = &nodes[node];
68a3a7fe 266 oldnode = *nd;
1da177e4
LT
267 if (!node_test_and_set(node, nodes_parsed)) {
268 nd->start = start;
269 nd->end = end;
270 } else {
271 if (start < nd->start)
272 nd->start = start;
273 if (nd->end < end)
274 nd->end = end;
275 }
68a3a7fe 276
1da177e4
LT
277 printk(KERN_INFO "SRAT: Node %u PXM %u %Lx-%Lx\n", node, pxm,
278 nd->start, nd->end);
5cb248ab
MG
279 e820_register_active_regions(node, nd->start >> PAGE_SHIFT,
280 nd->end >> PAGE_SHIFT);
fb01439c
MG
281 push_node_boundaries(node, nd->start >> PAGE_SHIFT,
282 nd->end >> PAGE_SHIFT);
68a3a7fe 283
15a58ed1
AS
284 if ((ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) &&
285 (reserve_hotadd(node, start, end) < 0)) {
68a3a7fe
AK
286 /* Ignore hotadd region. Undo damage */
287 printk(KERN_NOTICE "SRAT: Hotplug region ignored\n");
288 *nd = oldnode;
289 if ((nd->start | nd->end) == 0)
290 node_clear(node, nodes_parsed);
291 }
1da177e4
LT
292}
293
8a6fdd3e
AK
294/* Sanity check to catch more bad SRATs (they are amazingly common).
295 Make sure the PXMs cover all memory. */
3484d798 296static int __init nodes_cover_memory(const struct bootnode *nodes)
8a6fdd3e
AK
297{
298 int i;
299 unsigned long pxmram, e820ram;
300
301 pxmram = 0;
302 for_each_node_mask(i, nodes_parsed) {
303 unsigned long s = nodes[i].start >> PAGE_SHIFT;
304 unsigned long e = nodes[i].end >> PAGE_SHIFT;
305 pxmram += e - s;
5cb248ab 306 pxmram -= absent_pages_in_range(s, e);
68a3a7fe
AK
307 if ((long)pxmram < 0)
308 pxmram = 0;
8a6fdd3e
AK
309 }
310
5cb248ab 311 e820ram = end_pfn - absent_pages_in_range(0, end_pfn);
fdb9df94
AK
312 /* We seem to lose 3 pages somewhere. Allow a bit of slack. */
313 if ((long)(e820ram - pxmram) >= 1*1024*1024) {
8a6fdd3e
AK
314 printk(KERN_ERR
315 "SRAT: PXMs only cover %luMB of your %luMB e820 RAM. Not used.\n",
316 (pxmram << PAGE_SHIFT) >> 20,
317 (e820ram << PAGE_SHIFT) >> 20);
318 return 0;
319 }
320 return 1;
321}
322
9391a3f9
AK
323static void unparse_node(int node)
324{
325 int i;
326 node_clear(node, nodes_parsed);
327 for (i = 0; i < MAX_LOCAL_APIC; i++) {
328 if (apicid_to_node[i] == node)
329 apicid_to_node[i] = NUMA_NO_NODE;
330 }
331}
332
1da177e4
LT
333void __init acpi_numa_arch_fixup(void) {}
334
335/* Use the information discovered above to actually set up the nodes. */
336int __init acpi_scan_nodes(unsigned long start, unsigned long end)
337{
338 int i;
8a6fdd3e 339
ae2c6dcf
DR
340 if (acpi_numa <= 0)
341 return -1;
342
e58e0d03 343 /* First clean up the node list */
9391a3f9 344 for (i = 0; i < MAX_NUMNODES; i++) {
15a58ed1 345 cutoff_node(i, start, end);
0d015324 346 if ((nodes[i].end - nodes[i].start) < NODE_MIN_SIZE) {
9391a3f9 347 unparse_node(i);
0d015324
DY
348 node_set_offline(i);
349 }
e58e0d03
AK
350 }
351
3484d798 352 if (!nodes_cover_memory(nodes)) {
8a6fdd3e
AK
353 bad_srat();
354 return -1;
355 }
356
2aed711a 357 memnode_shift = compute_hash_shift(nodes, MAX_NUMNODES);
1da177e4
LT
358 if (memnode_shift < 0) {
359 printk(KERN_ERR
360 "SRAT: No NUMA node hash function found. Contact maintainer\n");
361 bad_srat();
362 return -1;
363 }
e58e0d03 364
e3f1caee
SS
365 node_possible_map = nodes_parsed;
366
e58e0d03 367 /* Finally register nodes */
e3f1caee 368 for_each_node_mask(i, node_possible_map)
1da177e4 369 setup_node_bootmem(i, nodes[i].start, nodes[i].end);
a8062231
AK
370 /* Try again in case setup_node_bootmem missed one due
371 to missing bootmem */
e3f1caee 372 for_each_node_mask(i, node_possible_map)
a8062231
AK
373 if (!node_online(i))
374 setup_node_bootmem(i, nodes[i].start, nodes[i].end);
375
15a58ed1 376 for (i = 0; i < NR_CPUS; i++) {
98c9e27a 377 if (cpu_to_node(i) == NUMA_NO_NODE)
1da177e4 378 continue;
98c9e27a 379 if (!node_isset(cpu_to_node(i), node_possible_map))
69d81fcd 380 numa_set_node(i, NUMA_NO_NODE);
1da177e4
LT
381 }
382 numa_init_array();
383 return 0;
384}
385
3484d798
DR
386#ifdef CONFIG_NUMA_EMU
387static int __init find_node_by_addr(unsigned long addr)
388{
389 int ret = NUMA_NO_NODE;
390 int i;
391
392 for_each_node_mask(i, nodes_parsed) {
393 /*
394 * Find the real node that this emulated node appears on. For
395 * the sake of simplicity, we only use a real node's starting
396 * address to determine which emulated node it appears on.
397 */
398 if (addr >= nodes[i].start && addr < nodes[i].end) {
399 ret = i;
400 break;
401 }
402 }
403 return i;
404}
405
406/*
407 * In NUMA emulation, we need to setup proximity domain (_PXM) to node ID
408 * mappings that respect the real ACPI topology but reflect our emulated
409 * environment. For each emulated node, we find which real node it appears on
410 * and create PXM to NID mappings for those fake nodes which mirror that
411 * locality. SLIT will now represent the correct distances between emulated
412 * nodes as a result of the real topology.
413 */
414void __init acpi_fake_nodes(const struct bootnode *fake_nodes, int num_nodes)
415{
08705b89 416 int i, j;
3484d798
DR
417 int fake_node_to_pxm_map[MAX_NUMNODES] = {
418 [0 ... MAX_NUMNODES-1] = PXM_INVAL
419 };
08705b89
DR
420 unsigned char fake_apicid_to_node[MAX_LOCAL_APIC] = {
421 [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
422 };
3484d798
DR
423
424 printk(KERN_INFO "Faking PXM affinity for fake nodes on real "
425 "topology.\n");
426 for (i = 0; i < num_nodes; i++) {
427 int nid, pxm;
428
429 nid = find_node_by_addr(fake_nodes[i].start);
430 if (nid == NUMA_NO_NODE)
431 continue;
432 pxm = node_to_pxm(nid);
433 if (pxm == PXM_INVAL)
434 continue;
435 fake_node_to_pxm_map[i] = pxm;
08705b89
DR
436 /*
437 * For each apicid_to_node mapping that exists for this real
438 * node, it must now point to the fake node ID.
439 */
440 for (j = 0; j < MAX_LOCAL_APIC; j++)
441 if (apicid_to_node[j] == nid)
442 fake_apicid_to_node[j] = i;
3484d798
DR
443 }
444 for (i = 0; i < num_nodes; i++)
445 __acpi_map_pxm_to_node(fake_node_to_pxm_map[i], i);
08705b89 446 memcpy(apicid_to_node, fake_apicid_to_node, sizeof(apicid_to_node));
3484d798
DR
447
448 nodes_clear(nodes_parsed);
449 for (i = 0; i < num_nodes; i++)
450 if (fake_nodes[i].start != fake_nodes[i].end)
451 node_set(i, nodes_parsed);
452 WARN_ON(!nodes_cover_memory(fake_nodes));
453}
454
455static int null_slit_node_compare(int a, int b)
456{
457 return node_to_pxm(a) == node_to_pxm(b);
458}
459#else
460static int null_slit_node_compare(int a, int b)
461{
462 return a == b;
463}
464#endif /* CONFIG_NUMA_EMU */
465
68a3a7fe
AK
466void __init srat_reserve_add_area(int nodeid)
467{
468 if (found_add_area && nodes_add[nodeid].end) {
469 u64 total_mb;
470
471 printk(KERN_INFO "SRAT: Reserving hot-add memory space "
472 "for node %d at %Lx-%Lx\n",
473 nodeid, nodes_add[nodeid].start, nodes_add[nodeid].end);
474 total_mb = (nodes_add[nodeid].end - nodes_add[nodeid].start)
475 >> PAGE_SHIFT;
476 total_mb *= sizeof(struct page);
477 total_mb >>= 20;
478 printk(KERN_INFO "SRAT: This will cost you %Lu MB of "
479 "pre-allocated memory.\n", (unsigned long long)total_mb);
480 reserve_bootmem_node(NODE_DATA(nodeid), nodes_add[nodeid].start,
481 nodes_add[nodeid].end - nodes_add[nodeid].start);
482 }
483}
484
1da177e4
LT
485int __node_distance(int a, int b)
486{
487 int index;
488
489 if (!acpi_slit)
3484d798
DR
490 return null_slit_node_compare(a, b) ? LOCAL_DISTANCE :
491 REMOTE_DISTANCE;
15a58ed1 492 index = acpi_slit->locality_count * node_to_pxm(a);
1da177e4
LT
493 return acpi_slit->entry[index + node_to_pxm(b)];
494}
495
496EXPORT_SYMBOL(__node_distance);
4942e998
KM
497
498int memory_add_physaddr_to_nid(u64 start)
499{
500 int i, ret = 0;
501
502 for_each_node(i)
503 if (nodes_add[i].start <= start && nodes_add[i].end > start)
504 ret = i;
505
506 return ret;
507}
8c2676a5
KM
508EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
509