]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/commitdiff
numa: make "nr_node_ids" unsigned int
authorAlexey Dobriyan <adobriyan@gmail.com>
Tue, 5 Mar 2019 23:48:26 +0000 (15:48 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 6 Mar 2019 05:07:19 +0000 (21:07 -0800)
Number of NUMA nodes can't be negative.

This saves a few bytes on x86_64:

add/remove: 0/0 grow/shrink: 4/21 up/down: 27/-265 (-238)
Function                                     old     new   delta
hv_synic_alloc.cold                           88     110     +22
prealloc_shrinker                            260     262      +2
bootstrap                                    249     251      +2
sched_init_numa                             1566    1567      +1
show_slab_objects                            778     777      -1
s_show                                      1201    1200      -1
kmem_cache_init                              346     345      -1
__alloc_workqueue_key                       1146    1145      -1
mem_cgroup_css_alloc                        1614    1612      -2
__do_sys_swapon                             4702    4699      -3
__list_lru_init                              655     651      -4
nic_probe                                   2379    2374      -5
store_user_store                             118     111      -7
red_zone_store                               106      99      -7
poison_store                                 106      99      -7
wq_numa_init                                 348     338     -10
__kmem_cache_empty                            75      65     -10
task_numa_free                               186     173     -13
merge_across_nodes_store                     351     336     -15
irq_create_affinity_masks                   1261    1246     -15
do_numa_crng_init                            343     321     -22
task_numa_fault                             4760    4737     -23
swapfile_init                                179     156     -23
hv_synic_alloc                               536     492     -44
apply_wqattrs_prepare                        746     695     -51

Link: http://lkml.kernel.org/r/20190201223029.GA15820@avx2
Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com>
Reviewed-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
12 files changed:
arch/arm64/mm/numa.c
arch/powerpc/mm/numa.c
arch/x86/kernel/setup_percpu.c
arch/x86/mm/numa.c
include/linux/nodemask.h
mm/list_lru.c
mm/memcontrol.c
mm/page_alloc.c
mm/slab.c
mm/slub.c
mm/swapfile.c
mm/vmscan.c

index ae34e3a1cef1c87eaedf28081bbdcd85bfceb86f..7a0a555b366af0aa8c56a801cf9f5ecdbd070112 100644 (file)
@@ -120,7 +120,7 @@ static void __init setup_node_to_cpumask_map(void)
        }
 
        /* cpumask_of_node() will now work */
-       pr_debug("Node to cpumask map for %d nodes\n", nr_node_ids);
+       pr_debug("Node to cpumask map for %u nodes\n", nr_node_ids);
 }
 
 /*
index 270cefb75cca8031b50fdec8585100a7d6b90f02..df1e11ebbabbfb9647a7f81bfd41d98259212f03 100644 (file)
@@ -84,7 +84,7 @@ static void __init setup_node_to_cpumask_map(void)
                alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]);
 
        /* cpumask_of_node() will now work */
-       dbg("Node to cpumask map for %d nodes\n", nr_node_ids);
+       dbg("Node to cpumask map for %u nodes\n", nr_node_ids);
 }
 
 static int __init fake_numa_create_new_node(unsigned long end_pfn,
index e8796fcd7e5a5ec2be8608a7c43ba78fc34b4e6f..13af08827eefc61e4252f7ffc6cc014f661e1f27 100644 (file)
@@ -171,7 +171,7 @@ void __init setup_per_cpu_areas(void)
        unsigned long delta;
        int rc;
 
-       pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%u nr_node_ids:%d\n",
+       pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%u nr_node_ids:%u\n",
                NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids);
 
        /*
index 1308f5408bf74881f89c5b323a465e23ac83177f..12c1b7a83ed7b03145ed487e1fbe54a8f60c78cd 100644 (file)
@@ -123,7 +123,7 @@ void __init setup_node_to_cpumask_map(void)
                alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]);
 
        /* cpumask_of_node() will now work */
-       pr_debug("Node to cpumask map for %d nodes\n", nr_node_ids);
+       pr_debug("Node to cpumask map for %u nodes\n", nr_node_ids);
 }
 
 static int __init numa_add_memblk_to(int nid, u64 start, u64 end,
@@ -866,7 +866,7 @@ const struct cpumask *cpumask_of_node(int node)
 {
        if (node >= nr_node_ids) {
                printk(KERN_WARNING
-                       "cpumask_of_node(%d): node > nr_node_ids(%d)\n",
+                       "cpumask_of_node(%d): node > nr_node_ids(%u)\n",
                        node, nr_node_ids);
                dump_stack();
                return cpu_none_mask;
index 5a30ad594ccc11d40879ec8438b587f519b5c791..962c5e783d50bfc8cafaffdac896231d211a74c8 100644 (file)
@@ -444,7 +444,7 @@ static inline int next_memory_node(int nid)
        return next_node(nid, node_states[N_MEMORY]);
 }
 
-extern int nr_node_ids;
+extern unsigned int nr_node_ids;
 extern int nr_online_nodes;
 
 static inline void node_set_online(int nid)
@@ -485,7 +485,7 @@ static inline int num_node_state(enum node_states state)
 #define first_online_node      0
 #define first_memory_node      0
 #define next_online_node(nid)  (MAX_NUMNODES)
-#define nr_node_ids            1
+#define nr_node_ids            1U
 #define nr_online_nodes                1
 
 #define node_set_online(node)     node_set_state((node), N_ONLINE)
index 5b30625fd3651a51deab140c3412897a51624f4f..0730bf8ff39f639b19e0b36ed5498386ab9b6d7f 100644 (file)
@@ -601,7 +601,6 @@ int __list_lru_init(struct list_lru *lru, bool memcg_aware,
                    struct lock_class_key *key, struct shrinker *shrinker)
 {
        int i;
-       size_t size = sizeof(*lru->node) * nr_node_ids;
        int err = -ENOMEM;
 
 #ifdef CONFIG_MEMCG_KMEM
@@ -612,7 +611,7 @@ int __list_lru_init(struct list_lru *lru, bool memcg_aware,
 #endif
        memcg_get_cache_ids();
 
-       lru->node = kzalloc(size, GFP_KERNEL);
+       lru->node = kcalloc(nr_node_ids, sizeof(*lru->node), GFP_KERNEL);
        if (!lru->node)
                goto out;
 
index 30bda8d7fb5c3593827007323c9ecc8df639f6e3..45cd1f84268a43021b23a99a84eb1fe18e9f53d7 100644 (file)
@@ -4429,7 +4429,7 @@ static void mem_cgroup_free(struct mem_cgroup *memcg)
 static struct mem_cgroup *mem_cgroup_alloc(void)
 {
        struct mem_cgroup *memcg;
-       size_t size;
+       unsigned int size;
        int node;
 
        size = sizeof(struct mem_cgroup);
index 11a5f50efd97e799c39b3c545cf74b5da98795c1..8df43caf2eb731f39c6716ce299f69fcbb4de013 100644 (file)
@@ -289,7 +289,7 @@ EXPORT_SYMBOL(movable_zone);
 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
 
 #if MAX_NUMNODES > 1
-int nr_node_ids __read_mostly = MAX_NUMNODES;
+unsigned int nr_node_ids __read_mostly = MAX_NUMNODES;
 int nr_online_nodes __read_mostly = 1;
 EXPORT_SYMBOL(nr_node_ids);
 EXPORT_SYMBOL(nr_online_nodes);
index 757e646baa5d7b90714e345098c4f043e15903af..7510a1b489df733d0f62d6b55e62c5dee899ae34 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -677,12 +677,11 @@ static struct alien_cache *__alloc_alien_cache(int node, int entries,
 static struct alien_cache **alloc_alien_cache(int node, int limit, gfp_t gfp)
 {
        struct alien_cache **alc_ptr;
-       size_t memsize = sizeof(void *) * nr_node_ids;
        int i;
 
        if (limit > 1)
                limit = 12;
-       alc_ptr = kzalloc_node(memsize, gfp, node);
+       alc_ptr = kcalloc_node(nr_node_ids, sizeof(void *), gfp, node);
        if (!alc_ptr)
                return NULL;
 
index 017a2ce5ba23150dc0d52c77ba671c8dc358d89e..1b08fbcb7e61fbcc5fa84738dc09e88050d2bd2b 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -4262,7 +4262,7 @@ void __init kmem_cache_init(void)
        cpuhp_setup_state_nocalls(CPUHP_SLUB_DEAD, "slub:dead", NULL,
                                  slub_cpu_dead);
 
-       pr_info("SLUB: HWalign=%d, Order=%u-%u, MinObjects=%u, CPUs=%u, Nodes=%d\n",
+       pr_info("SLUB: HWalign=%d, Order=%u-%u, MinObjects=%u, CPUs=%u, Nodes=%u\n",
                cache_line_size(),
                slub_min_order, slub_max_order, slub_min_objects,
                nr_cpu_ids, nr_node_ids);
index 57e9b1b31d5506988eeb36a600bef15104aecb10..a14257ac0476c6b1eb3df753c89e14a2bf781e87 100644 (file)
@@ -2713,7 +2713,7 @@ static struct swap_info_struct *alloc_swap_info(void)
        struct swap_info_struct *p;
        unsigned int type;
        int i;
-       int size = sizeof(*p) + nr_node_ids * sizeof(struct plist_node);
+       unsigned int size = sizeof(*p) + nr_node_ids * sizeof(struct plist_node);
 
        p = kvzalloc(size, GFP_KERNEL);
        if (!p)
index 209c2c78a087dc2dffaa05eddffffe821aafb9a9..e1f7ccdc0a904bec102796968c4449150b9dab8f 100644 (file)
@@ -374,7 +374,7 @@ unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone
  */
 int prealloc_shrinker(struct shrinker *shrinker)
 {
-       size_t size = sizeof(*shrinker->nr_deferred);
+       unsigned int size = sizeof(*shrinker->nr_deferred);
 
        if (shrinker->flags & SHRINKER_NUMA_AWARE)
                size *= nr_node_ids;