]> git.proxmox.com Git - mirror_ubuntu-kernels.git/commitdiff
wireguard: queueing: use saner cpu selection wrapping
authorJason A. Donenfeld <Jason@zx2c4.com>
Mon, 3 Jul 2023 01:27:04 +0000 (03:27 +0200)
committerDavid S. Miller <davem@davemloft.net>
Mon, 3 Jul 2023 08:17:52 +0000 (09:17 +0100)
Using `% nr_cpumask_bits` is slow and complicated, and not totally
robust toward dynamic changes to CPU topologies. Rather than storing the
next CPU in the round-robin, just store the last one, and also return
that value. This simplifies the loop drastically into a much more common
pattern.

Fixes: e7096c131e51 ("net: WireGuard secure network tunnel")
Cc: stable@vger.kernel.org
Reported-by: Linus Torvalds <torvalds@linux-foundation.org>
Tested-by: Manuel Leiner <manuel.leiner@gmx.de>
Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/wireguard/queueing.c
drivers/net/wireguard/queueing.h
drivers/net/wireguard/receive.c
drivers/net/wireguard/send.c

index 8084e7408c0ae9065f57bc463921cb985fd68c5e..26d235d152352f8b0b28a61b45d83f72db4f0b5d 100644 (file)
@@ -28,6 +28,7 @@ int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function,
        int ret;
 
        memset(queue, 0, sizeof(*queue));
+       queue->last_cpu = -1;
        ret = ptr_ring_init(&queue->ring, len, GFP_KERNEL);
        if (ret)
                return ret;
index 125284b346a7765f264c9e8ebaaeb0885c4bad91..1ea4f874e367ee4185efd0cce9f2358c0d3d5e01 100644 (file)
@@ -117,20 +117,17 @@ static inline int wg_cpumask_choose_online(int *stored_cpu, unsigned int id)
        return cpu;
 }
 
-/* This function is racy, in the sense that next is unlocked, so it could return
- * the same CPU twice. A race-free version of this would be to instead store an
- * atomic sequence number, do an increment-and-return, and then iterate through
- * every possible CPU until we get to that index -- choose_cpu. However that's
- * a bit slower, and it doesn't seem like this potential race actually
- * introduces any performance loss, so we live with it.
+/* This function is racy, in the sense that it's called while last_cpu is
+ * unlocked, so it could return the same CPU twice. Adding locking or using
+ * atomic sequence numbers is slower though, and the consequences of racing are
+ * harmless, so live with it.
  */
-static inline int wg_cpumask_next_online(int *next)
+static inline int wg_cpumask_next_online(int *last_cpu)
 {
-       int cpu = *next;
-
-       while (unlikely(!cpumask_test_cpu(cpu, cpu_online_mask)))
-               cpu = cpumask_next(cpu, cpu_online_mask) % nr_cpumask_bits;
-       *next = cpumask_next(cpu, cpu_online_mask) % nr_cpumask_bits;
+       int cpu = cpumask_next(*last_cpu, cpu_online_mask);
+       if (cpu >= nr_cpu_ids)
+               cpu = cpumask_first(cpu_online_mask);
+       *last_cpu = cpu;
        return cpu;
 }
 
@@ -159,7 +156,7 @@ static inline void wg_prev_queue_drop_peeked(struct prev_queue *queue)
 
 static inline int wg_queue_enqueue_per_device_and_peer(
        struct crypt_queue *device_queue, struct prev_queue *peer_queue,
-       struct sk_buff *skb, struct workqueue_struct *wq, int *next_cpu)
+       struct sk_buff *skb, struct workqueue_struct *wq)
 {
        int cpu;
 
@@ -173,7 +170,7 @@ static inline int wg_queue_enqueue_per_device_and_peer(
        /* Then we queue it up in the device queue, which consumes the
         * packet as soon as it can.
         */
-       cpu = wg_cpumask_next_online(next_cpu);
+       cpu = wg_cpumask_next_online(&device_queue->last_cpu);
        if (unlikely(ptr_ring_produce_bh(&device_queue->ring, skb)))
                return -EPIPE;
        queue_work_on(cpu, wq, &per_cpu_ptr(device_queue->worker, cpu)->work);
index 7135d51d2d872edb66c856379ce2923b214289e9..0b3f0c843550957ee1fe3bed7185a7d990246c2b 100644 (file)
@@ -524,7 +524,7 @@ static void wg_packet_consume_data(struct wg_device *wg, struct sk_buff *skb)
                goto err;
 
        ret = wg_queue_enqueue_per_device_and_peer(&wg->decrypt_queue, &peer->rx_queue, skb,
-                                                  wg->packet_crypt_wq, &wg->decrypt_queue.last_cpu);
+                                                  wg->packet_crypt_wq);
        if (unlikely(ret == -EPIPE))
                wg_queue_enqueue_per_peer_rx(skb, PACKET_STATE_DEAD);
        if (likely(!ret || ret == -EPIPE)) {
index 5368f7c35b4bf21706ecc5801ebd1ae68a58d43b..95c853b59e1dae1df8b4e5cbf4e3541e35806b82 100644 (file)
@@ -318,7 +318,7 @@ static void wg_packet_create_data(struct wg_peer *peer, struct sk_buff *first)
                goto err;
 
        ret = wg_queue_enqueue_per_device_and_peer(&wg->encrypt_queue, &peer->tx_queue, first,
-                                                  wg->packet_crypt_wq, &wg->encrypt_queue.last_cpu);
+                                                  wg->packet_crypt_wq);
        if (unlikely(ret == -EPIPE))
                wg_queue_enqueue_per_peer_tx(first, PACKET_STATE_DEAD);
 err: