1 /* Common code for 32 and 64-bit NUMA */
2 #include <linux/kernel.h>
4 #include <linux/string.h>
5 #include <linux/init.h>
6 #include <linux/bootmem.h>
7 #include <linux/memblock.h>
8 #include <linux/mmzone.h>
9 #include <linux/ctype.h>
10 #include <linux/module.h>
11 #include <linux/nodemask.h>
12 #include <linux/sched.h>
13 #include <linux/topology.h>
16 #include <asm/proto.h>
19 #include <asm/amd_nb.h>
21 #include "numa_internal.h"
23 int __initdata numa_off;
24 nodemask_t numa_nodes_parsed __initdata;
26 struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
27 EXPORT_SYMBOL(node_data);
29 static struct numa_meminfo numa_meminfo
30 #ifndef CONFIG_MEMORY_HOTPLUG
35 static int numa_distance_cnt;
36 static u8 *numa_distance;
38 static __init int numa_setup(char *opt)
42 if (!strncmp(opt, "off", 3))
44 #ifdef CONFIG_NUMA_EMU
45 if (!strncmp(opt, "fake=", 5))
46 numa_emu_cmdline(opt + 5);
48 #ifdef CONFIG_ACPI_NUMA
49 if (!strncmp(opt, "noacpi", 6))
54 early_param("numa", numa_setup);
57 * apicid, cpu, node mappings
59 s16 __apicid_to_node[MAX_LOCAL_APIC] = {
60 [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
63 int __cpuinit numa_cpu_node(int cpu)
65 int apicid = early_per_cpu(x86_cpu_to_apicid, cpu);
67 if (apicid != BAD_APICID)
68 return __apicid_to_node[apicid];
72 cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
73 EXPORT_SYMBOL(node_to_cpumask_map);
76 * Map cpu index to node index
78 DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE);
79 EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map);
81 void numa_set_node(int cpu, int node)
83 int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);
85 /* early setting, no percpu area yet */
86 if (cpu_to_node_map) {
87 cpu_to_node_map[cpu] = node;
91 #ifdef CONFIG_DEBUG_PER_CPU_MAPS
92 if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) {
93 printk(KERN_ERR "numa_set_node: invalid cpu# (%d)\n", cpu);
98 per_cpu(x86_cpu_to_node_map, cpu) = node;
100 set_cpu_numa_node(cpu, node);
103 void numa_clear_node(int cpu)
105 numa_set_node(cpu, NUMA_NO_NODE);
109 * Allocate node_to_cpumask_map based on number of available nodes
110 * Requires node_possible_map to be valid.
112 * Note: cpumask_of_node() is not valid until after this is done.
113 * (Use CONFIG_DEBUG_PER_CPU_MAPS to check this.)
115 void __init setup_node_to_cpumask_map(void)
117 unsigned int node, num = 0;
119 /* setup nr_node_ids if not done yet */
120 if (nr_node_ids == MAX_NUMNODES) {
121 for_each_node_mask(node, node_possible_map)
123 nr_node_ids = num + 1;
126 /* allocate the map */
127 for (node = 0; node < nr_node_ids; node++)
128 alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]);
130 /* cpumask_of_node() will now work */
131 pr_debug("Node to cpumask map for %d nodes\n", nr_node_ids);
134 static int __init numa_add_memblk_to(int nid, u64 start, u64 end,
135 struct numa_meminfo *mi)
137 /* ignore zero length blks */
141 /* whine about and ignore invalid blks */
142 if (start > end || nid < 0 || nid >= MAX_NUMNODES) {
143 pr_warning("NUMA: Warning: invalid memblk node %d [mem %#010Lx-%#010Lx]\n",
144 nid, start, end - 1);
148 if (mi->nr_blks >= NR_NODE_MEMBLKS) {
149 pr_err("NUMA: too many memblk ranges\n");
153 mi->blk[mi->nr_blks].start = start;
154 mi->blk[mi->nr_blks].end = end;
155 mi->blk[mi->nr_blks].nid = nid;
161 * numa_remove_memblk_from - Remove one numa_memblk from a numa_meminfo
162 * @idx: Index of memblk to remove
163 * @mi: numa_meminfo to remove memblk from
165 * Remove @idx'th numa_memblk from @mi by shifting @mi->blk[] and
166 * decrementing @mi->nr_blks.
168 void __init numa_remove_memblk_from(int idx, struct numa_meminfo *mi)
171 memmove(&mi->blk[idx], &mi->blk[idx + 1],
172 (mi->nr_blks - idx) * sizeof(mi->blk[0]));
176 * numa_add_memblk - Add one numa_memblk to numa_meminfo
177 * @nid: NUMA node ID of the new memblk
178 * @start: Start address of the new memblk
179 * @end: End address of the new memblk
181 * Add a new memblk to the default numa_meminfo.
184 * 0 on success, -errno on failure.
186 int __init numa_add_memblk(int nid, u64 start, u64 end)
188 return numa_add_memblk_to(nid, start, end, &numa_meminfo);
191 /* Initialize NODE_DATA for a node on the local memory */
192 static void __init setup_node_data(int nid, u64 start, u64 end)
194 const size_t nd_size = roundup(sizeof(pg_data_t), PAGE_SIZE);
200 * Don't confuse VM with a node that doesn't have the
201 * minimum amount of memory:
203 if (end && (end - start) < NODE_MIN_SIZE)
206 start = roundup(start, ZONE_ALIGN);
208 printk(KERN_INFO "Initmem setup node %d [mem %#010Lx-%#010Lx]\n",
209 nid, start, end - 1);
212 * Allocate node data. Try node-local memory and then any node.
213 * Never allocate in DMA zone.
215 nd_pa = memblock_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid);
217 pr_err("Cannot find %zu bytes in any node\n", nd_size);
222 /* report and initialize */
223 printk(KERN_INFO " NODE_DATA [mem %#010Lx-%#010Lx]\n",
224 nd_pa, nd_pa + nd_size - 1);
225 tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT);
227 printk(KERN_INFO " NODE_DATA(%d) on node %d\n", nid, tnid);
230 memset(NODE_DATA(nid), 0, sizeof(pg_data_t));
231 NODE_DATA(nid)->node_id = nid;
232 NODE_DATA(nid)->node_start_pfn = start >> PAGE_SHIFT;
233 NODE_DATA(nid)->node_spanned_pages = (end - start) >> PAGE_SHIFT;
235 node_set_online(nid);
239 * numa_cleanup_meminfo - Cleanup a numa_meminfo
240 * @mi: numa_meminfo to clean up
242 * Sanitize @mi by merging and removing unncessary memblks. Also check for
243 * conflicts and clear unused memblks.
246 * 0 on success, -errno on failure.
248 int __init numa_cleanup_meminfo(struct numa_meminfo *mi)
251 const u64 high = PFN_PHYS(max_pfn);
254 /* first, trim all entries */
255 for (i = 0; i < mi->nr_blks; i++) {
256 struct numa_memblk *bi = &mi->blk[i];
258 /* make sure all blocks are inside the limits */
259 bi->start = max(bi->start, low);
260 bi->end = min(bi->end, high);
262 /* and there's no empty block */
263 if (bi->start >= bi->end)
264 numa_remove_memblk_from(i--, mi);
267 /* merge neighboring / overlapping entries */
268 for (i = 0; i < mi->nr_blks; i++) {
269 struct numa_memblk *bi = &mi->blk[i];
271 for (j = i + 1; j < mi->nr_blks; j++) {
272 struct numa_memblk *bj = &mi->blk[j];
276 * See whether there are overlapping blocks. Whine
277 * about but allow overlaps of the same nid. They
278 * will be merged below.
280 if (bi->end > bj->start && bi->start < bj->end) {
281 if (bi->nid != bj->nid) {
282 pr_err("NUMA: node %d [mem %#010Lx-%#010Lx] overlaps with node %d [mem %#010Lx-%#010Lx]\n",
283 bi->nid, bi->start, bi->end - 1,
284 bj->nid, bj->start, bj->end - 1);
287 pr_warning("NUMA: Warning: node %d [mem %#010Lx-%#010Lx] overlaps with itself [mem %#010Lx-%#010Lx]\n",
288 bi->nid, bi->start, bi->end - 1,
289 bj->start, bj->end - 1);
293 * Join together blocks on the same node, holes
294 * between which don't overlap with memory on other
297 if (bi->nid != bj->nid)
299 start = min(bi->start, bj->start);
300 end = max(bi->end, bj->end);
301 for (k = 0; k < mi->nr_blks; k++) {
302 struct numa_memblk *bk = &mi->blk[k];
304 if (bi->nid == bk->nid)
306 if (start < bk->end && end > bk->start)
311 printk(KERN_INFO "NUMA: Node %d [mem %#010Lx-%#010Lx] + [mem %#010Lx-%#010Lx] -> [mem %#010Lx-%#010Lx]\n",
312 bi->nid, bi->start, bi->end - 1, bj->start,
313 bj->end - 1, start, end - 1);
316 numa_remove_memblk_from(j--, mi);
320 /* clear unused ones */
321 for (i = mi->nr_blks; i < ARRAY_SIZE(mi->blk); i++) {
322 mi->blk[i].start = mi->blk[i].end = 0;
323 mi->blk[i].nid = NUMA_NO_NODE;
330 * Set nodes, which have memory in @mi, in *@nodemask.
332 static void __init numa_nodemask_from_meminfo(nodemask_t *nodemask,
333 const struct numa_meminfo *mi)
337 for (i = 0; i < ARRAY_SIZE(mi->blk); i++)
338 if (mi->blk[i].start != mi->blk[i].end &&
339 mi->blk[i].nid != NUMA_NO_NODE)
340 node_set(mi->blk[i].nid, *nodemask);
344 * numa_reset_distance - Reset NUMA distance table
346 * The current table is freed. The next numa_set_distance() call will
349 void __init numa_reset_distance(void)
351 size_t size = numa_distance_cnt * numa_distance_cnt * sizeof(numa_distance[0]);
353 /* numa_distance could be 1LU marking allocation failure, test cnt */
354 if (numa_distance_cnt)
355 memblock_free(__pa(numa_distance), size);
356 numa_distance_cnt = 0;
357 numa_distance = NULL; /* enable table creation */
360 static int __init numa_alloc_distance(void)
362 nodemask_t nodes_parsed;
367 /* size the new table and allocate it */
368 nodes_parsed = numa_nodes_parsed;
369 numa_nodemask_from_meminfo(&nodes_parsed, &numa_meminfo);
371 for_each_node_mask(i, nodes_parsed)
374 size = cnt * cnt * sizeof(numa_distance[0]);
376 phys = memblock_find_in_range(0, PFN_PHYS(max_pfn_mapped),
379 pr_warning("NUMA: Warning: can't allocate distance table!\n");
380 /* don't retry until explicitly reset */
381 numa_distance = (void *)1LU;
384 memblock_reserve(phys, size);
386 numa_distance = __va(phys);
387 numa_distance_cnt = cnt;
389 /* fill with the default distances */
390 for (i = 0; i < cnt; i++)
391 for (j = 0; j < cnt; j++)
392 numa_distance[i * cnt + j] = i == j ?
393 LOCAL_DISTANCE : REMOTE_DISTANCE;
394 printk(KERN_DEBUG "NUMA: Initialized distance table, cnt=%d\n", cnt);
400 * numa_set_distance - Set NUMA distance from one NUMA to another
401 * @from: the 'from' node to set distance
402 * @to: the 'to' node to set distance
403 * @distance: NUMA distance
405 * Set the distance from node @from to @to to @distance. If distance table
406 * doesn't exist, one which is large enough to accommodate all the currently
407 * known nodes will be created.
409 * If such table cannot be allocated, a warning is printed and further
410 * calls are ignored until the distance table is reset with
411 * numa_reset_distance().
413 * If @from or @to is higher than the highest known node or lower than zero
414 * at the time of table creation or @distance doesn't make sense, the call
416 * This is to allow simplification of specific NUMA config implementations.
418 void __init numa_set_distance(int from, int to, int distance)
420 if (!numa_distance && numa_alloc_distance() < 0)
423 if (from >= numa_distance_cnt || to >= numa_distance_cnt ||
424 from < 0 || to < 0) {
425 pr_warn_once("NUMA: Warning: node ids are out of bound, from=%d to=%d distance=%d\n",
430 if ((u8)distance != distance ||
431 (from == to && distance != LOCAL_DISTANCE)) {
432 pr_warn_once("NUMA: Warning: invalid distance parameter, from=%d to=%d distance=%d\n",
437 numa_distance[from * numa_distance_cnt + to] = distance;
440 int __node_distance(int from, int to)
442 if (from >= numa_distance_cnt || to >= numa_distance_cnt)
443 return from == to ? LOCAL_DISTANCE : REMOTE_DISTANCE;
444 return numa_distance[from * numa_distance_cnt + to];
446 EXPORT_SYMBOL(__node_distance);
449 * Sanity check to catch more bad NUMA configurations (they are amazingly
450 * common). Make sure the nodes cover all memory.
452 static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi)
454 u64 numaram, e820ram;
458 for (i = 0; i < mi->nr_blks; i++) {
459 u64 s = mi->blk[i].start >> PAGE_SHIFT;
460 u64 e = mi->blk[i].end >> PAGE_SHIFT;
462 numaram -= __absent_pages_in_range(mi->blk[i].nid, s, e);
463 if ((s64)numaram < 0)
467 e820ram = max_pfn - absent_pages_in_range(0, max_pfn);
469 /* We seem to lose 3 pages somewhere. Allow 1M of slack. */
470 if ((s64)(e820ram - numaram) >= (1 << (20 - PAGE_SHIFT))) {
471 printk(KERN_ERR "NUMA: nodes only cover %LuMB of your %LuMB e820 RAM. Not used.\n",
472 (numaram << PAGE_SHIFT) >> 20,
473 (e820ram << PAGE_SHIFT) >> 20);
479 static int __init numa_register_memblks(struct numa_meminfo *mi)
481 unsigned long uninitialized_var(pfn_align);
484 /* Account for nodes with cpus and no memory */
485 node_possible_map = numa_nodes_parsed;
486 numa_nodemask_from_meminfo(&node_possible_map, mi);
487 if (WARN_ON(nodes_empty(node_possible_map)))
490 for (i = 0; i < mi->nr_blks; i++) {
491 struct numa_memblk *mb = &mi->blk[i];
492 memblock_set_node(mb->start, mb->end - mb->start, mb->nid);
496 * If sections array is gonna be used for pfn -> nid mapping, check
497 * whether its granularity is fine enough.
499 #ifdef NODE_NOT_IN_PAGE_FLAGS
500 pfn_align = node_map_pfn_alignment();
501 if (pfn_align && pfn_align < PAGES_PER_SECTION) {
502 printk(KERN_WARNING "Node alignment %LuMB < min %LuMB, rejecting NUMA config\n",
503 PFN_PHYS(pfn_align) >> 20,
504 PFN_PHYS(PAGES_PER_SECTION) >> 20);
508 if (!numa_meminfo_cover_memory(mi))
511 /* Finally register nodes. */
512 for_each_node_mask(nid, node_possible_map) {
513 u64 start = PFN_PHYS(max_pfn);
516 for (i = 0; i < mi->nr_blks; i++) {
517 if (nid != mi->blk[i].nid)
519 start = min(mi->blk[i].start, start);
520 end = max(mi->blk[i].end, end);
524 setup_node_data(nid, start, end);
527 /* Dump memblock with node info and return. */
533 * There are unfortunately some poorly designed mainboards around that
534 * only connect memory to a single CPU. This breaks the 1:1 cpu->node
535 * mapping. To avoid this fill in the mapping for all possible CPUs,
536 * as the number of CPUs is not known yet. We round robin the existing
539 static void __init numa_init_array(void)
543 rr = first_node(node_online_map);
544 for (i = 0; i < nr_cpu_ids; i++) {
545 if (early_cpu_to_node(i) != NUMA_NO_NODE)
547 numa_set_node(i, rr);
548 rr = next_node(rr, node_online_map);
549 if (rr == MAX_NUMNODES)
550 rr = first_node(node_online_map);
554 static int __init numa_init(int (*init_func)(void))
559 for (i = 0; i < MAX_LOCAL_APIC; i++)
560 set_apicid_to_node(i, NUMA_NO_NODE);
563 * Do not clear numa_nodes_parsed or zero numa_meminfo here, because
564 * SRAT was parsed earlier in early_parse_srat().
566 nodes_clear(node_possible_map);
567 nodes_clear(node_online_map);
568 WARN_ON(memblock_set_node(0, ULLONG_MAX, MAX_NUMNODES));
569 numa_reset_distance();
574 ret = numa_cleanup_meminfo(&numa_meminfo);
578 numa_emulation(&numa_meminfo, numa_distance_cnt);
580 ret = numa_register_memblks(&numa_meminfo);
584 for (i = 0; i < nr_cpu_ids; i++) {
585 int nid = early_cpu_to_node(i);
587 if (nid == NUMA_NO_NODE)
589 if (!node_online(nid))
597 * dummy_numa_init - Fallback dummy NUMA init
599 * Used if there's no underlying NUMA architecture, NUMA initialization
600 * fails, or NUMA is disabled on the command line.
602 * Must online at least one node and add memory blocks that cover all
603 * allowed memory. This function must not fail.
605 static int __init dummy_numa_init(void)
607 printk(KERN_INFO "%s\n",
608 numa_off ? "NUMA turned off" : "No NUMA configuration found");
609 printk(KERN_INFO "Faking a node at [mem %#018Lx-%#018Lx]\n",
610 0LLU, PFN_PHYS(max_pfn) - 1);
612 node_set(0, numa_nodes_parsed);
613 numa_add_memblk(0, 0, PFN_PHYS(max_pfn));
619 * x86_numa_init - Initialize NUMA
621 * Try each configured NUMA initialization method until one succeeds. The
622 * last fallback is dummy single node config encomapssing whole memory and
625 void __init x86_numa_init(void)
628 #ifdef CONFIG_X86_NUMAQ
629 if (!numa_init(numaq_numa_init))
632 #ifdef CONFIG_ACPI_NUMA
633 if (!numa_init(x86_acpi_numa_init))
636 #ifdef CONFIG_AMD_NUMA
637 if (!numa_init(amd_numa_init))
642 numa_init(dummy_numa_init);
645 static __init int find_near_online_node(int node)
648 int min_val = INT_MAX;
651 for_each_online_node(n) {
652 val = node_distance(node, n);
664 * Setup early cpu_to_node.
666 * Populate cpu_to_node[] only if x86_cpu_to_apicid[],
667 * and apicid_to_node[] tables have valid entries for a CPU.
668 * This means we skip cpu_to_node[] initialisation for NUMA
669 * emulation and faking node case (when running a kernel compiled
670 * for NUMA on a non NUMA box), which is OK as cpu_to_node[]
671 * is already initialized in a round robin manner at numa_init_array,
672 * prior to this call, and this initialization is good enough
673 * for the fake NUMA cases.
675 * Called before the per_cpu areas are setup.
677 void __init init_cpu_to_node(void)
680 u16 *cpu_to_apicid = early_per_cpu_ptr(x86_cpu_to_apicid);
682 BUG_ON(cpu_to_apicid == NULL);
684 for_each_possible_cpu(cpu) {
685 int node = numa_cpu_node(cpu);
687 if (node == NUMA_NO_NODE)
689 if (!node_online(node))
690 node = find_near_online_node(node);
691 numa_set_node(cpu, node);
695 #ifndef CONFIG_DEBUG_PER_CPU_MAPS
697 # ifndef CONFIG_NUMA_EMU
698 void __cpuinit numa_add_cpu(int cpu)
700 cpumask_set_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
703 void __cpuinit numa_remove_cpu(int cpu)
705 cpumask_clear_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
707 # endif /* !CONFIG_NUMA_EMU */
709 #else /* !CONFIG_DEBUG_PER_CPU_MAPS */
711 int __cpu_to_node(int cpu)
713 if (early_per_cpu_ptr(x86_cpu_to_node_map)) {
715 "cpu_to_node(%d): usage too early!\n", cpu);
717 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
719 return per_cpu(x86_cpu_to_node_map, cpu);
721 EXPORT_SYMBOL(__cpu_to_node);
724 * Same function as cpu_to_node() but used if called before the
725 * per_cpu areas are setup.
727 int early_cpu_to_node(int cpu)
729 if (early_per_cpu_ptr(x86_cpu_to_node_map))
730 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
732 if (!cpu_possible(cpu)) {
734 "early_cpu_to_node(%d): no per_cpu area!\n", cpu);
738 return per_cpu(x86_cpu_to_node_map, cpu);
741 void debug_cpumask_set_cpu(int cpu, int node, bool enable)
743 struct cpumask *mask;
746 if (node == NUMA_NO_NODE) {
747 /* early_cpu_to_node() already emits a warning and trace */
750 mask = node_to_cpumask_map[node];
752 pr_err("node_to_cpumask_map[%i] NULL\n", node);
758 cpumask_set_cpu(cpu, mask);
760 cpumask_clear_cpu(cpu, mask);
762 cpulist_scnprintf(buf, sizeof(buf), mask);
763 printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n",
764 enable ? "numa_add_cpu" : "numa_remove_cpu",
769 # ifndef CONFIG_NUMA_EMU
770 static void __cpuinit numa_set_cpumask(int cpu, bool enable)
772 debug_cpumask_set_cpu(cpu, early_cpu_to_node(cpu), enable);
775 void __cpuinit numa_add_cpu(int cpu)
777 numa_set_cpumask(cpu, true);
780 void __cpuinit numa_remove_cpu(int cpu)
782 numa_set_cpumask(cpu, false);
784 # endif /* !CONFIG_NUMA_EMU */
787 * Returns a pointer to the bitmask of CPUs on Node 'node'.
789 const struct cpumask *cpumask_of_node(int node)
791 if (node >= nr_node_ids) {
793 "cpumask_of_node(%d): node > nr_node_ids(%d)\n",
796 return cpu_none_mask;
798 if (node_to_cpumask_map[node] == NULL) {
800 "cpumask_of_node(%d): no node_to_cpumask_map!\n",
803 return cpu_online_mask;
805 return node_to_cpumask_map[node];
807 EXPORT_SYMBOL(cpumask_of_node);
809 #endif /* !CONFIG_DEBUG_PER_CPU_MAPS */
811 #ifdef CONFIG_MEMORY_HOTPLUG
812 int memory_add_physaddr_to_nid(u64 start)
814 struct numa_meminfo *mi = &numa_meminfo;
815 int nid = mi->blk[0].nid;
818 for (i = 0; i < mi->nr_blks; i++)
819 if (mi->blk[i].start <= start && mi->blk[i].end > start)
820 nid = mi->blk[i].nid;
823 EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);