]> git.proxmox.com Git - mirror_ubuntu-kernels.git/commitdiff
cpumask: fix incorrect cpumask scanning result checks
authorLinus Torvalds <torvalds@linux-foundation.org>
Mon, 6 Mar 2023 20:15:13 +0000 (12:15 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Mon, 6 Mar 2023 20:15:13 +0000 (12:15 -0800)
It turns out that commit 596ff4a09b89 ("cpumask: re-introduce
constant-sized cpumask optimizations") exposed a number of cases of
drivers not checking the result of "cpumask_next()" and friends
correctly.

The documented correct check for "no more cpus in the cpumask" is to
check for the result being equal or larger than the number of possible
CPU ids, exactly _because_ we've always done those constant-sized
cpumask scans using a widened type before.  So the return value of a
cpumask scan should be checked with

if (cpu >= nr_cpu_ids)
...

because the cpumask scan did not necessarily stop exactly *at* that
maximum CPU id.

But a few cases ended up instead using checks like

if (cpu == nr_cpumask_bits)
...

which used that internal "widened" number of bits.  And that used to
work pretty much by accident (ok, in this case "by accident" is simply
because it matched the historical internal implementation of the cpumask
scanning, so it was more of a "intentionally using implementation
details rather than an accident").

But the extended constant-sized optimizations then did that internal
implementation differently, and now that code that did things wrong but
matched the old implementation no longer worked at all.

Which then causes subsequent odd problems due to using what ends up
being an invalid CPU ID.

Most of these cases require either unusual hardware or special uses to
hit, but the random.c one triggers quite easily.

All you really need is to have a sufficiently small CONFIG_NR_CPUS value
for the bit scanning optimization to be triggered, but not enough CPUs
to then actually fill that widened cpumask.  At that point, the cpumask
scanning will return the NR_CPUS constant, which is _not_ the same as
nr_cpumask_bits.

This just does the mindless fix with

   sed -i 's/== nr_cpumask_bits/>= nr_cpu_ids/'

to fix the incorrect uses.

The ones in the SCSI lpfc driver in particular could probably be fixed
more cleanly by just removing that repeated pattern entirely, but I am
not emptionally invested enough in that driver to care.

Reported-and-tested-by: Guenter Roeck <linux@roeck-us.net>
Link: https://lore.kernel.org/lkml/481b19b5-83a0-4793-b4fd-194ad7b978c3@roeck-us.net/
Reported-and-tested-by: Geert Uytterhoeven <geert+renesas@glider.be>
Link: https://lore.kernel.org/lkml/CAMuHMdUKo_Sf7TjKzcNDa8Ve+6QrK+P8nSQrSQ=6LTRmcBKNww@mail.gmail.com/
Reported-by: Vernon Yang <vernon2gm@gmail.com>
Link: https://lore.kernel.org/lkml/20230306160651.2016767-1-vernon2gm@gmail.com/
Cc: Yury Norov <yury.norov@gmail.com>
Cc: Jason A. Donenfeld <Jason@zx2c4.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
arch/powerpc/xmon/xmon.c
drivers/char/random.c
drivers/net/wireguard/queueing.h
drivers/scsi/lpfc/lpfc_init.c

index 73c620c2a3a166b2b5c6b4d460847b01edf27541..e753a6bd48881026339abd56fe92c3944a0eb124 100644 (file)
@@ -1275,7 +1275,7 @@ static int xmon_batch_next_cpu(void)
        while (!cpumask_empty(&xmon_batch_cpus)) {
                cpu = cpumask_next_wrap(smp_processor_id(), &xmon_batch_cpus,
                                        xmon_batch_start_cpu, true);
-               if (cpu == nr_cpumask_bits)
+               if (cpu >= nr_cpu_ids)
                        break;
                if (xmon_batch_start_cpu == -1)
                        xmon_batch_start_cpu = cpu;
index ce3ccd172cc868d9f89ea2c5489a600ea08e2a0d..253f2ddb891308591db05ada2de72aa8703043da 100644 (file)
@@ -1311,7 +1311,7 @@ static void __cold try_to_generate_entropy(void)
                        /* Basic CPU round-robin, which avoids the current CPU. */
                        do {
                                cpu = cpumask_next(cpu, &timer_cpus);
-                               if (cpu == nr_cpumask_bits)
+                               if (cpu >= nr_cpu_ids)
                                        cpu = cpumask_first(&timer_cpus);
                        } while (cpu == smp_processor_id() && num_cpus > 1);
 
index 583adb37ee1e31e6f03858f6893b631e0d26d147..125284b346a7765f264c9e8ebaaeb0885c4bad91 100644 (file)
@@ -106,7 +106,7 @@ static inline int wg_cpumask_choose_online(int *stored_cpu, unsigned int id)
 {
        unsigned int cpu = *stored_cpu, cpu_index, i;
 
-       if (unlikely(cpu == nr_cpumask_bits ||
+       if (unlikely(cpu >= nr_cpu_ids ||
                     !cpumask_test_cpu(cpu, cpu_online_mask))) {
                cpu_index = id % cpumask_weight(cpu_online_mask);
                cpu = cpumask_first(cpu_online_mask);
index 61958a24a43d9f765bef60d9f2af17bc2e20634b..73b544bfbb2e64376cc0b0bd9ac90ac3b0c2a540 100644 (file)
@@ -12563,7 +12563,7 @@ lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
                                        goto found_same;
                                new_cpu = cpumask_next(
                                        new_cpu, cpu_present_mask);
-                               if (new_cpu == nr_cpumask_bits)
+                               if (new_cpu >= nr_cpu_ids)
                                        new_cpu = first_cpu;
                        }
                        /* At this point, we leave the CPU as unassigned */
@@ -12577,7 +12577,7 @@ found_same:
                         * selecting the same IRQ.
                         */
                        start_cpu = cpumask_next(new_cpu, cpu_present_mask);
-                       if (start_cpu == nr_cpumask_bits)
+                       if (start_cpu >= nr_cpu_ids)
                                start_cpu = first_cpu;
 
                        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
@@ -12613,7 +12613,7 @@ found_same:
                                        goto found_any;
                                new_cpu = cpumask_next(
                                        new_cpu, cpu_present_mask);
-                               if (new_cpu == nr_cpumask_bits)
+                               if (new_cpu >= nr_cpu_ids)
                                        new_cpu = first_cpu;
                        }
                        /* We should never leave an entry unassigned */
@@ -12631,7 +12631,7 @@ found_any:
                         * selecting the same IRQ.
                         */
                        start_cpu = cpumask_next(new_cpu, cpu_present_mask);
-                       if (start_cpu == nr_cpumask_bits)
+                       if (start_cpu >= nr_cpu_ids)
                                start_cpu = first_cpu;
 
                        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
@@ -12704,7 +12704,7 @@ found_any:
                                goto found_hdwq;
                        }
                        new_cpu = cpumask_next(new_cpu, cpu_present_mask);
-                       if (new_cpu == nr_cpumask_bits)
+                       if (new_cpu >= nr_cpu_ids)
                                new_cpu = first_cpu;
                }
 
@@ -12719,7 +12719,7 @@ found_any:
                                goto found_hdwq;
 
                        new_cpu = cpumask_next(new_cpu, cpu_present_mask);
-                       if (new_cpu == nr_cpumask_bits)
+                       if (new_cpu >= nr_cpu_ids)
                                new_cpu = first_cpu;
                }
 
@@ -12730,7 +12730,7 @@ found_any:
  found_hdwq:
                /* We found an available entry, copy the IRQ info */
                start_cpu = cpumask_next(new_cpu, cpu_present_mask);
-               if (start_cpu == nr_cpumask_bits)
+               if (start_cpu >= nr_cpu_ids)
                        start_cpu = first_cpu;
                cpup->hdwq = new_cpup->hdwq;
  logit: