]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/commitdiff
xsk: Propagate napi_id to XDP socket Rx path
authorBjörn Töpel <bjorn.topel@intel.com>
Mon, 30 Nov 2020 18:52:01 +0000 (19:52 +0100)
committerDaniel Borkmann <daniel@iogearbox.net>
Mon, 30 Nov 2020 23:09:25 +0000 (00:09 +0100)
Add napi_id to the xdp_rxq_info structure, and make sure the XDP
socket pick up the napi_id in the Rx path. The napi_id is used to find
the corresponding NAPI structure for socket busy polling.

Signed-off-by: Björn Töpel <bjorn.topel@intel.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Ilias Apalodimas <ilias.apalodimas@linaro.org>
Acked-by: Michael S. Tsirkin <mst@redhat.com>
Acked-by: Tariq Toukan <tariqt@nvidia.com>
Link: https://lore.kernel.org/bpf/20201130185205.196029-7-bjorn.topel@gmail.com
29 files changed:
drivers/net/ethernet/amazon/ena/ena_netdev.c
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/cavium/thunder/nicvf_queues.c
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
drivers/net/ethernet/intel/i40e/i40e_txrx.c
drivers/net/ethernet/intel/ice/ice_base.c
drivers/net/ethernet/intel/ice/ice_txrx.c
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
drivers/net/ethernet/marvell/mvneta.c
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
drivers/net/ethernet/mellanox/mlx4/en_rx.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/netronome/nfp/nfp_net_common.c
drivers/net/ethernet/qlogic/qede/qede_main.c
drivers/net/ethernet/sfc/rx_common.c
drivers/net/ethernet/socionext/netsec.c
drivers/net/ethernet/ti/cpsw_priv.c
drivers/net/hyperv/netvsc.c
drivers/net/tun.c
drivers/net/veth.c
drivers/net/virtio_net.c
drivers/net/xen-netfront.c
include/net/busy_poll.h
include/net/xdp.h
net/core/dev.c
net/core/xdp.c
net/xdp/xsk.c

index e8131dadc22c3155fb565793b2e24e8a01617500..6ad59f0068f6a2ee87477e61fd365308d09908ff 100644 (file)
@@ -416,7 +416,7 @@ static int ena_xdp_register_rxq_info(struct ena_ring *rx_ring)
 {
        int rc;
 
-       rc = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev, rx_ring->qid);
+       rc = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev, rx_ring->qid, 0);
 
        if (rc) {
                netif_err(rx_ring->adapter, ifup, rx_ring->netdev,
index 7975f59735d61f3da9bb018e6635a4a7a241d7a1..725d929eddb156cb8cfa7744885b20db6d36056b 100644 (file)
@@ -2884,7 +2884,7 @@ static int bnxt_alloc_rx_rings(struct bnxt *bp)
                if (rc)
                        return rc;
 
-               rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i);
+               rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i, 0);
                if (rc < 0)
                        return rc;
 
index 7a141ce32e86c74d7a1d08d6cba77f8a9c5ce297..f782e6af45e93d7b5d419d7b619b493f7beece56 100644 (file)
@@ -770,7 +770,7 @@ static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
        rq->caching = 1;
 
        /* Driver have no proper error path for failed XDP RX-queue info reg */
-       WARN_ON(xdp_rxq_info_reg(&rq->xdp_rxq, nic->netdev, qidx) < 0);
+       WARN_ON(xdp_rxq_info_reg(&rq->xdp_rxq, nic->netdev, qidx, 0) < 0);
 
        /* Send a mailbox msg to PF to config RQ */
        mbx.rq.msg = NIC_MBOX_MSG_RQ_CFG;
index cf9400a9886d7a6a80fcd597467eca6ea3d2f6ef..40953980e84684470a22242416e772ef744d69b8 100644 (file)
@@ -3334,7 +3334,7 @@ static int dpaa2_eth_setup_rx_flow(struct dpaa2_eth_priv *priv,
                return 0;
 
        err = xdp_rxq_info_reg(&fq->channel->xdp_rxq, priv->net_dev,
-                              fq->flowid);
+                              fq->flowid, 0);
        if (err) {
                dev_err(dev, "xdp_rxq_info_reg failed\n");
                return err;
index c21548c71bb1ec912725e510ea248c8f5c12a866..9f73cd7aee09b17ee0abe70313c8884b9718a50a 100644 (file)
@@ -1447,7 +1447,7 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
        /* XDP RX-queue info only needed for RX rings exposed to XDP */
        if (rx_ring->vsi->type == I40E_VSI_MAIN) {
                err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
-                                      rx_ring->queue_index);
+                                      rx_ring->queue_index, rx_ring->q_vector->napi.napi_id);
                if (err < 0)
                        return err;
        }
index fe4320e2d1f2f73c272fcdc0046f8225b0b4406f..3124a3bf519a826b6f59e1f1fdd12ae599307e1b 100644 (file)
@@ -306,7 +306,7 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
                if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
                        /* coverity[check_return] */
                        xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
-                                        ring->q_index);
+                                        ring->q_index, ring->q_vector->napi.napi_id);
 
                ring->xsk_pool = ice_xsk_pool(ring);
                if (ring->xsk_pool) {
@@ -333,7 +333,7 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
                                /* coverity[check_return] */
                                xdp_rxq_info_reg(&ring->xdp_rxq,
                                                 ring->netdev,
-                                                ring->q_index);
+                                                ring->q_index, ring->q_vector->napi.napi_id);
 
                        err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
                                                         MEM_TYPE_PAGE_SHARED,
index eae75260fe20b0a2832916aa4dcde288fde97cb4..77d5eae6b4c2dd543f6b272c37ec59910726384b 100644 (file)
@@ -483,7 +483,7 @@ int ice_setup_rx_ring(struct ice_ring *rx_ring)
        if (rx_ring->vsi->type == ICE_VSI_PF &&
            !xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
                if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
-                                    rx_ring->q_index))
+                                    rx_ring->q_index, rx_ring->q_vector->napi.napi_id))
                        goto err;
        return 0;
 
index 5fc2c381da55de1a505468c6bc6cc7772e2cbfdd..6a4ef4934fcf958dcb30328e892f4f9b5578e632 100644 (file)
@@ -4352,7 +4352,7 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring)
 
        /* XDP RX-queue info */
        if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
-                            rx_ring->queue_index) < 0)
+                            rx_ring->queue_index, 0) < 0)
                goto err;
 
        return 0;
index 45ae33e15303849030c6c7894a87ea157679b848..50e6b8b6ba7bada9bd131cb945b5b7181a7161b2 100644 (file)
@@ -6577,7 +6577,7 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
 
        /* XDP RX-queue info */
        if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, adapter->netdev,
-                            rx_ring->queue_index) < 0)
+                            rx_ring->queue_index, rx_ring->q_vector->napi.napi_id) < 0)
                goto err;
 
        rx_ring->xdp_prog = adapter->xdp_prog;
index 82fce27f682bb63c7b7d3bfda82054eea9778e33..4061cd7db5dd759715dc71e4b038bd1109777bc3 100644 (file)
@@ -3493,7 +3493,7 @@ int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter,
 
        /* XDP RX-queue info */
        if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, adapter->netdev,
-                            rx_ring->queue_index) < 0)
+                            rx_ring->queue_index, 0) < 0)
                goto err;
 
        rx_ring->xdp_prog = adapter->xdp_prog;
index 183530ed4d1db78a4b3eeadd5043472b5dcb5f2f..ba6dcb19bb1d37efdd3d08f016498863e3612d4b 100644 (file)
@@ -3227,7 +3227,7 @@ static int mvneta_create_page_pool(struct mvneta_port *pp,
                return err;
        }
 
-       err = xdp_rxq_info_reg(&rxq->xdp_rxq, pp->dev, rxq->id);
+       err = xdp_rxq_info_reg(&rxq->xdp_rxq, pp->dev, rxq->id, 0);
        if (err < 0)
                goto err_free_pp;
 
index 3069e192d773bfc1c983bbb5ae4980ab4b5cc2c6..5504cbc24970c258062643bd5370b80b8af1a7c3 100644 (file)
@@ -2614,11 +2614,11 @@ static int mvpp2_rxq_init(struct mvpp2_port *port,
        mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size);
 
        if (priv->percpu_pools) {
-               err = xdp_rxq_info_reg(&rxq->xdp_rxq_short, port->dev, rxq->id);
+               err = xdp_rxq_info_reg(&rxq->xdp_rxq_short, port->dev, rxq->id, 0);
                if (err < 0)
                        goto err_free_dma;
 
-               err = xdp_rxq_info_reg(&rxq->xdp_rxq_long, port->dev, rxq->id);
+               err = xdp_rxq_info_reg(&rxq->xdp_rxq_long, port->dev, rxq->id, 0);
                if (err < 0)
                        goto err_unregister_rxq_short;
 
index b0f79a5151cfa711bee4436cd552341151d7012d..40775cb8fb2a416fe21de67da04c9dc7bad215f7 100644 (file)
@@ -283,7 +283,7 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
        ring->log_stride = ffs(ring->stride) - 1;
        ring->buf_size = ring->size * ring->stride + TXBB_SIZE;
 
-       if (xdp_rxq_info_reg(&ring->xdp_rxq, priv->dev, queue_index) < 0)
+       if (xdp_rxq_info_reg(&ring->xdp_rxq, priv->dev, queue_index, 0) < 0)
                goto err_ring;
 
        tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS *
index 527c5f12c5af67145afde14e2c2b5d495e979960..427fc376fe1ad948643eff02bc4165a4e06f94cc 100644 (file)
@@ -434,7 +434,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
        rq_xdp_ix = rq->ix;
        if (xsk)
                rq_xdp_ix += params->num_channels * MLX5E_RQ_GROUP_XSK;
-       err = xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq_xdp_ix);
+       err = xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq_xdp_ix, 0);
        if (err < 0)
                goto err_rq_xdp_prog;
 
index b150da43adb2d1703039c43ad06ea2496d30833a..b4acf2f41e84b341f741cc14c08eb8a8ba53772f 100644 (file)
@@ -2533,7 +2533,7 @@ nfp_net_rx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring)
 
        if (dp->netdev) {
                err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, dp->netdev,
-                                      rx_ring->idx);
+                                      rx_ring->idx, rx_ring->r_vec->napi.napi_id);
                if (err < 0)
                        return err;
        }
index 05e3a3b60269e49ddc6d28ccaffa5e4c7e9becf1..9cf960a6d0078820ac83f5dfdd8d24d82c0a5939 100644 (file)
@@ -1762,7 +1762,7 @@ static void qede_init_fp(struct qede_dev *edev)
 
                        /* Driver have no error path from here */
                        WARN_ON(xdp_rxq_info_reg(&fp->rxq->xdp_rxq, edev->ndev,
-                                                fp->rxq->rxq_id) < 0);
+                                                fp->rxq->rxq_id, 0) < 0);
 
                        if (xdp_rxq_info_reg_mem_model(&fp->rxq->xdp_rxq,
                                                       MEM_TYPE_PAGE_ORDER0,
index 19cf7cac1e6e9e8005cc12cc95ddb024f5f770f1..68fc7d317693b5532f647f36ef2d62ee66e8765f 100644 (file)
@@ -262,7 +262,7 @@ void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
 
        /* Initialise XDP queue information */
        rc = xdp_rxq_info_reg(&rx_queue->xdp_rxq_info, efx->net_dev,
-                             rx_queue->core_index);
+                             rx_queue->core_index, 0);
 
        if (rc) {
                netif_err(efx, rx_err, efx->net_dev,
index 1503cc9ec6e2d479e09c0cb7c2afcd887be2bb7b..27d3c9d9210e5f69039c7095abff54e2512e8cc7 100644 (file)
@@ -1304,7 +1304,7 @@ static int netsec_setup_rx_dring(struct netsec_priv *priv)
                goto err_out;
        }
 
-       err = xdp_rxq_info_reg(&dring->xdp_rxq, priv->ndev, 0);
+       err = xdp_rxq_info_reg(&dring->xdp_rxq, priv->ndev, 0, priv->napi.napi_id);
        if (err)
                goto err_out;
 
index 31c5e36ff7069f545f0ce9a4e05a72a3db64e428..6dd73bd0f458ea20e94dcc331d69e91a3e9060ff 100644 (file)
@@ -1186,7 +1186,7 @@ static int cpsw_ndev_create_xdp_rxq(struct cpsw_priv *priv, int ch)
        pool = cpsw->page_pool[ch];
        rxq = &priv->xdp_rxq[ch];
 
-       ret = xdp_rxq_info_reg(rxq, priv->ndev, ch);
+       ret = xdp_rxq_info_reg(rxq, priv->ndev, ch, 0);
        if (ret)
                return ret;
 
index 0c3de94b51787b852b4d3e86b2a222d3051ce139..fa8341f8359ac4a999059754bff5ac3f89d7c7dc 100644 (file)
@@ -1499,7 +1499,7 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device,
                u64_stats_init(&nvchan->tx_stats.syncp);
                u64_stats_init(&nvchan->rx_stats.syncp);
 
-               ret = xdp_rxq_info_reg(&nvchan->xdp_rxq, ndev, i);
+               ret = xdp_rxq_info_reg(&nvchan->xdp_rxq, ndev, i, 0);
 
                if (ret) {
                        netdev_err(ndev, "xdp_rxq_info_reg fail: %d\n", ret);
index 3d45d56172cb803bc1a0837f6e2c4d3d7409e1cf..8867d39db6ac1c318a78c52c167f3400d62e2bf4 100644 (file)
@@ -780,7 +780,7 @@ static int tun_attach(struct tun_struct *tun, struct file *file,
        } else {
                /* Setup XDP RX-queue info, for new tfile getting attached */
                err = xdp_rxq_info_reg(&tfile->xdp_rxq,
-                                      tun->dev, tfile->queue_index);
+                                      tun->dev, tfile->queue_index, 0);
                if (err < 0)
                        goto out;
                err = xdp_rxq_info_reg_mem_model(&tfile->xdp_rxq,
index 8c737668008a0d0edc23b18601f3a4a597d1fd86..9bd37c7151f8c74b0471cffd2c57df3da93777ec 100644 (file)
@@ -884,7 +884,6 @@ static int veth_napi_add(struct net_device *dev)
        for (i = 0; i < dev->real_num_rx_queues; i++) {
                struct veth_rq *rq = &priv->rq[i];
 
-               netif_napi_add(dev, &rq->xdp_napi, veth_poll, NAPI_POLL_WEIGHT);
                napi_enable(&rq->xdp_napi);
        }
 
@@ -926,7 +925,8 @@ static int veth_enable_xdp(struct net_device *dev)
                for (i = 0; i < dev->real_num_rx_queues; i++) {
                        struct veth_rq *rq = &priv->rq[i];
 
-                       err = xdp_rxq_info_reg(&rq->xdp_rxq, dev, i);
+                       netif_napi_add(dev, &rq->xdp_napi, veth_poll, NAPI_POLL_WEIGHT);
+                       err = xdp_rxq_info_reg(&rq->xdp_rxq, dev, i, rq->xdp_napi.napi_id);
                        if (err < 0)
                                goto err_rxq_reg;
 
@@ -952,8 +952,12 @@ static int veth_enable_xdp(struct net_device *dev)
 err_reg_mem:
        xdp_rxq_info_unreg(&priv->rq[i].xdp_rxq);
 err_rxq_reg:
-       for (i--; i >= 0; i--)
-               xdp_rxq_info_unreg(&priv->rq[i].xdp_rxq);
+       for (i--; i >= 0; i--) {
+               struct veth_rq *rq = &priv->rq[i];
+
+               xdp_rxq_info_unreg(&rq->xdp_rxq);
+               netif_napi_del(&rq->xdp_napi);
+       }
 
        return err;
 }
index 21b71148c53241beb26b0e812f4038854c35a479..052975ea0af4cd6a6cb69c16496b09d9f7e884a0 100644 (file)
@@ -1485,7 +1485,7 @@ static int virtnet_open(struct net_device *dev)
                        if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
                                schedule_delayed_work(&vi->refill, 0);
 
-               err = xdp_rxq_info_reg(&vi->rq[i].xdp_rxq, dev, i);
+               err = xdp_rxq_info_reg(&vi->rq[i].xdp_rxq, dev, i, vi->rq[i].napi.napi_id);
                if (err < 0)
                        return err;
 
index 920cac4385bf7ef415d5e07223ddf0e130d6927e..b01848ef46493f9bc19a853b784896429cd5923b 100644 (file)
@@ -2014,7 +2014,7 @@ static int xennet_create_page_pool(struct netfront_queue *queue)
        }
 
        err = xdp_rxq_info_reg(&queue->xdp_rxq, queue->info->netdev,
-                              queue->id);
+                              queue->id, 0);
        if (err) {
                netdev_err(queue->info->netdev, "xdp_rxq_info_reg failed\n");
                goto err_free_pp;
index 2f8f51807b83ac8b2dafc44a8ee94911c4f83a48..45b3e04b99d3e8a61a2ca8945d1ca77f03ceabe9 100644 (file)
@@ -135,14 +135,25 @@ static inline void sk_mark_napi_id(struct sock *sk, const struct sk_buff *skb)
        sk_rx_queue_set(sk, skb);
 }
 
-/* variant used for unconnected sockets */
-static inline void sk_mark_napi_id_once(struct sock *sk,
-                                       const struct sk_buff *skb)
+static inline void __sk_mark_napi_id_once_xdp(struct sock *sk, unsigned int napi_id)
 {
 #ifdef CONFIG_NET_RX_BUSY_POLL
        if (!READ_ONCE(sk->sk_napi_id))
-               WRITE_ONCE(sk->sk_napi_id, skb->napi_id);
+               WRITE_ONCE(sk->sk_napi_id, napi_id);
 #endif
 }
 
+/* variant used for unconnected sockets */
+static inline void sk_mark_napi_id_once(struct sock *sk,
+                                       const struct sk_buff *skb)
+{
+       __sk_mark_napi_id_once_xdp(sk, skb->napi_id);
+}
+
+static inline void sk_mark_napi_id_once_xdp(struct sock *sk,
+                                           const struct xdp_buff *xdp)
+{
+       __sk_mark_napi_id_once_xdp(sk, xdp->rxq->napi_id);
+}
+
 #endif /* _LINUX_NET_BUSY_POLL_H */
index 7d48b2ae217a559310c18d06947b09bdf6f6a625..700ad5db7f5d49585a62f5cd3ec47bf3855d1839 100644 (file)
@@ -59,6 +59,7 @@ struct xdp_rxq_info {
        u32 queue_index;
        u32 reg_state;
        struct xdp_mem_info mem;
+       unsigned int napi_id;
 } ____cacheline_aligned; /* perf critical, avoid false-sharing */
 
 struct xdp_txq_info {
@@ -226,7 +227,7 @@ static inline void xdp_release_frame(struct xdp_frame *xdpf)
 }
 
 int xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq,
-                    struct net_device *dev, u32 queue_index);
+                    struct net_device *dev, u32 queue_index, unsigned int napi_id);
 void xdp_rxq_info_unreg(struct xdp_rxq_info *xdp_rxq);
 void xdp_rxq_info_unused(struct xdp_rxq_info *xdp_rxq);
 bool xdp_rxq_info_is_reg(struct xdp_rxq_info *xdp_rxq);
index 7a1e5936c67fdc55072c70e6cdef9cfe9b9a341c..3b6b0e175fe7fda602a9d4174d3b0a9681b70d18 100644 (file)
@@ -9810,7 +9810,7 @@ static int netif_alloc_rx_queues(struct net_device *dev)
                rx[i].dev = dev;
 
                /* XDP RX-queue setup */
-               err = xdp_rxq_info_reg(&rx[i].xdp_rxq, dev, i);
+               err = xdp_rxq_info_reg(&rx[i].xdp_rxq, dev, i, 0);
                if (err < 0)
                        goto err_rxq_info;
        }
index 3d330ebda893dff93a225f1a29c448d79dacb175..17ffd33c6b18ce6c7d8b9fb2559c084647c05fc2 100644 (file)
@@ -158,7 +158,7 @@ static void xdp_rxq_info_init(struct xdp_rxq_info *xdp_rxq)
 
 /* Returns 0 on success, negative on failure */
 int xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq,
-                    struct net_device *dev, u32 queue_index)
+                    struct net_device *dev, u32 queue_index, unsigned int napi_id)
 {
        if (xdp_rxq->reg_state == REG_STATE_UNUSED) {
                WARN(1, "Driver promised not to register this");
@@ -179,6 +179,7 @@ int xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq,
        xdp_rxq_info_init(xdp_rxq);
        xdp_rxq->dev = dev;
        xdp_rxq->queue_index = queue_index;
+       xdp_rxq->napi_id = napi_id;
 
        xdp_rxq->reg_state = REG_STATE_REGISTERED;
        return 0;
index a8501a5477cff63d5d4c11151ed74f0dde1c28a9..7588e599a048d565a590b31aedc21cbd76153f5e 100644 (file)
@@ -233,6 +233,7 @@ static int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp,
        if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index)
                return -EINVAL;
 
+       sk_mark_napi_id_once_xdp(&xs->sk, xdp);
        len = xdp->data_end - xdp->data;
 
        return xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL ?