]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/commitdiff
hns_enet: use cpumask_var_t for on-stack mask
authorArnd Bergmann <arnd@arndb.de>
Thu, 2 Feb 2017 14:49:24 +0000 (15:49 +0100)
committerThadeu Lima de Souza Cascardo <cascardo@canonical.com>
Wed, 28 Jun 2017 14:24:07 +0000 (11:24 -0300)
BugLink: https://bugs.launchpad.net/bugs/1696031
On large SMP builds, we can run into a build warning:

drivers/net/ethernet/hisilicon/hns/hns_enet.c: In function 'hns_set_irq_affinity.isra.27':
drivers/net/ethernet/hisilicon/hns/hns_enet.c:1242:1: warning: the frame size of 1032 bytes is larger than 1024 bytes [-Wframe-larger-than=]

The solution here is to use cpumask_var_t, which can use dynamic
allocation when CONFIG_CPUMASK_OFFSTACK is enabled.

Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: David S. Miller <davem@davemloft.net>
(cherry picked from commit ff3edc9b8efc8200c25f3a5adfb1c1de0a882dc5)
Signed-off-by: dann frazier <dann.frazier@canonical.com>
Acked-by: Stefan Bader <stefan.bader@canonical.com>
Acked-by: Seth Forshee <seth.forshee@canonical.com>
Signed-off-by: Stefan Bader <stefan.bader@canonical.com>
drivers/net/ethernet/hisilicon/hns/hns_enet.c

index 8aed72860e7c0eece690c97ae38b1fbedfa58557..f90dc811e8efed5a11dfba76e3154ca64dbf733c 100644 (file)
@@ -1203,43 +1203,48 @@ static void hns_set_irq_affinity(struct hns_nic_priv *priv)
        struct hns_nic_ring_data *rd;
        int i;
        int cpu;
-       cpumask_t mask;
+       cpumask_var_t mask;
+
+       if (!alloc_cpumask_var(&mask, GFP_KERNEL))
+               return;
 
        /*diffrent irq banlance for 16core and 32core*/
        if (h->q_num == num_possible_cpus()) {
                for (i = 0; i < h->q_num * 2; i++) {
                        rd = &priv->ring_data[i];
                        if (cpu_online(rd->queue_index)) {
-                               cpumask_clear(&mask);
+                               cpumask_clear(mask);
                                cpu = rd->queue_index;
-                               cpumask_set_cpu(cpu, &mask);
+                               cpumask_set_cpu(cpu, mask);
                                (void)irq_set_affinity_hint(rd->ring->irq,
-                                                           &mask);
+                                                           mask);
                        }
                }
        } else {
                for (i = 0; i < h->q_num; i++) {
                        rd = &priv->ring_data[i];
                        if (cpu_online(rd->queue_index * 2)) {
-                               cpumask_clear(&mask);
+                               cpumask_clear(mask);
                                cpu = rd->queue_index * 2;
-                               cpumask_set_cpu(cpu, &mask);
+                               cpumask_set_cpu(cpu, mask);
                                (void)irq_set_affinity_hint(rd->ring->irq,
-                                                           &mask);
+                                                           mask);
                        }
                }
 
                for (i = h->q_num; i < h->q_num * 2; i++) {
                        rd = &priv->ring_data[i];
                        if (cpu_online(rd->queue_index * 2 + 1)) {
-                               cpumask_clear(&mask);
+                               cpumask_clear(mask);
                                cpu = rd->queue_index * 2 + 1;
-                               cpumask_set_cpu(cpu, &mask);
+                               cpumask_set_cpu(cpu, mask);
                                (void)irq_set_affinity_hint(rd->ring->irq,
-                                                           &mask);
+                                                           mask);
                        }
                }
        }
+
+       free_cpumask_var(mask);
 }
 
 static int hns_nic_init_irq(struct hns_nic_priv *priv)