]> git.proxmox.com Git - mirror_ubuntu-kernels.git/commitdiff
ice: Fix ASSERT_RTNL() warning during certain scenarios
authorAmritha Nambiar <amritha.nambiar@intel.com>
Tue, 13 Feb 2024 19:48:50 +0000 (11:48 -0800)
committerTony Nguyen <anthony.l.nguyen@intel.com>
Tue, 20 Feb 2024 21:30:26 +0000 (13:30 -0800)
Commit 91fdbce7e8d6 ("ice: Add support in the driver for associating
queue with napi") invoked the netif_queue_set_napi() call. This
kernel function requires to be called with rtnl_lock taken,
otherwise ASSERT_RTNL() warning will be triggered. ice_vsi_rebuild()
initiating this call is under rtnl_lock when the rebuild is in
response to configuration changes from external interfaces (such as
tc, ethtool etc. which holds the lock). But, the VSI rebuild
generated from service tasks and resets (PFR/CORER/GLOBR) is not
under rtnl lock protection. Handle these cases as well to hold lock
before the kernel call (by setting the 'locked' boolean to false).

netif_queue_set_napi() is also used to clear previously set napi
in the q_vector unroll flow. Handle this for locked/lockless execution
paths.

Fixes: 91fdbce7e8d6 ("ice: Add support in the driver for associating queue with napi")
Signed-off-by: Amritha Nambiar <amritha.nambiar@intel.com>
Reviewed-by: Sridhar Samudrala <sridhar.samudrala@intel.com>
Tested-by: Pucha Himasekhar Reddy <himasekharx.reddy.pucha@intel.com> (A Contingent worker at Intel)
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
drivers/net/ethernet/intel/ice/ice_base.c
drivers/net/ethernet/intel/ice/ice_lib.c
drivers/net/ethernet/intel/ice/ice_lib.h
drivers/net/ethernet/intel/ice/ice_main.c

index 7ac847718882e29b38071ca6b8adb47ca063f1d7..c979192e44d108b370ad132ec900c19d8452db32 100644 (file)
@@ -190,15 +190,13 @@ static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx)
        q_vector = vsi->q_vectors[v_idx];
 
        ice_for_each_tx_ring(tx_ring, q_vector->tx) {
-               if (vsi->netdev)
-                       netif_queue_set_napi(vsi->netdev, tx_ring->q_index,
-                                            NETDEV_QUEUE_TYPE_TX, NULL);
+               ice_queue_set_napi(vsi, tx_ring->q_index, NETDEV_QUEUE_TYPE_TX,
+                                  NULL);
                tx_ring->q_vector = NULL;
        }
        ice_for_each_rx_ring(rx_ring, q_vector->rx) {
-               if (vsi->netdev)
-                       netif_queue_set_napi(vsi->netdev, rx_ring->q_index,
-                                            NETDEV_QUEUE_TYPE_RX, NULL);
+               ice_queue_set_napi(vsi, rx_ring->q_index, NETDEV_QUEUE_TYPE_RX,
+                                  NULL);
                rx_ring->q_vector = NULL;
        }
 
index 9be724291ef82ac7e05c198d9febe029b4946a5e..097bf8fd6bf0edc5599edbf3fe9f3cf2e05e212a 100644 (file)
@@ -2426,7 +2426,7 @@ ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params)
                ice_vsi_map_rings_to_vectors(vsi);
 
                /* Associate q_vector rings to napi */
-               ice_vsi_set_napi_queues(vsi, true);
+               ice_vsi_set_napi_queues(vsi);
 
                vsi->stat_offsets_loaded = false;
 
@@ -2904,19 +2904,19 @@ void ice_vsi_dis_irq(struct ice_vsi *vsi)
 }
 
 /**
- * ice_queue_set_napi - Set the napi instance for the queue
+ * __ice_queue_set_napi - Set the napi instance for the queue
  * @dev: device to which NAPI and queue belong
  * @queue_index: Index of queue
  * @type: queue type as RX or TX
  * @napi: NAPI context
  * @locked: is the rtnl_lock already held
  *
- * Set the napi instance for the queue
+ * Set the napi instance for the queue. Caller indicates the lock status.
  */
 static void
-ice_queue_set_napi(struct net_device *dev, unsigned int queue_index,
-                  enum netdev_queue_type type, struct napi_struct *napi,
-                  bool locked)
+__ice_queue_set_napi(struct net_device *dev, unsigned int queue_index,
+                    enum netdev_queue_type type, struct napi_struct *napi,
+                    bool locked)
 {
        if (!locked)
                rtnl_lock();
@@ -2926,26 +2926,79 @@ ice_queue_set_napi(struct net_device *dev, unsigned int queue_index,
 }
 
 /**
- * ice_q_vector_set_napi_queues - Map queue[s] associated with the napi
+ * ice_queue_set_napi - Set the napi instance for the queue
+ * @vsi: VSI being configured
+ * @queue_index: Index of queue
+ * @type: queue type as RX or TX
+ * @napi: NAPI context
+ *
+ * Set the napi instance for the queue. The rtnl lock state is derived from the
+ * execution path.
+ */
+void
+ice_queue_set_napi(struct ice_vsi *vsi, unsigned int queue_index,
+                  enum netdev_queue_type type, struct napi_struct *napi)
+{
+       struct ice_pf *pf = vsi->back;
+
+       if (!vsi->netdev)
+               return;
+
+       if (current_work() == &pf->serv_task ||
+           test_bit(ICE_PREPARED_FOR_RESET, pf->state) ||
+           test_bit(ICE_DOWN, pf->state) ||
+           test_bit(ICE_SUSPENDED, pf->state))
+               __ice_queue_set_napi(vsi->netdev, queue_index, type, napi,
+                                    false);
+       else
+               __ice_queue_set_napi(vsi->netdev, queue_index, type, napi,
+                                    true);
+}
+
+/**
+ * __ice_q_vector_set_napi_queues - Map queue[s] associated with the napi
  * @q_vector: q_vector pointer
  * @locked: is the rtnl_lock already held
  *
+ * Associate the q_vector napi with all the queue[s] on the vector.
+ * Caller indicates the lock status.
+ */
+void __ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector, bool locked)
+{
+       struct ice_rx_ring *rx_ring;
+       struct ice_tx_ring *tx_ring;
+
+       ice_for_each_rx_ring(rx_ring, q_vector->rx)
+               __ice_queue_set_napi(q_vector->vsi->netdev, rx_ring->q_index,
+                                    NETDEV_QUEUE_TYPE_RX, &q_vector->napi,
+                                    locked);
+
+       ice_for_each_tx_ring(tx_ring, q_vector->tx)
+               __ice_queue_set_napi(q_vector->vsi->netdev, tx_ring->q_index,
+                                    NETDEV_QUEUE_TYPE_TX, &q_vector->napi,
+                                    locked);
+       /* Also set the interrupt number for the NAPI */
+       netif_napi_set_irq(&q_vector->napi, q_vector->irq.virq);
+}
+
+/**
+ * ice_q_vector_set_napi_queues - Map queue[s] associated with the napi
+ * @q_vector: q_vector pointer
+ *
  * Associate the q_vector napi with all the queue[s] on the vector
  */
-void ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector, bool locked)
+void ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector)
 {
        struct ice_rx_ring *rx_ring;
        struct ice_tx_ring *tx_ring;
 
        ice_for_each_rx_ring(rx_ring, q_vector->rx)
-               ice_queue_set_napi(q_vector->vsi->netdev, rx_ring->q_index,
-                                  NETDEV_QUEUE_TYPE_RX, &q_vector->napi,
-                                  locked);
+               ice_queue_set_napi(q_vector->vsi, rx_ring->q_index,
+                                  NETDEV_QUEUE_TYPE_RX, &q_vector->napi);
 
        ice_for_each_tx_ring(tx_ring, q_vector->tx)
-               ice_queue_set_napi(q_vector->vsi->netdev, tx_ring->q_index,
-                                  NETDEV_QUEUE_TYPE_TX, &q_vector->napi,
-                                  locked);
+               ice_queue_set_napi(q_vector->vsi, tx_ring->q_index,
+                                  NETDEV_QUEUE_TYPE_TX, &q_vector->napi);
        /* Also set the interrupt number for the NAPI */
        netif_napi_set_irq(&q_vector->napi, q_vector->irq.virq);
 }
@@ -2953,11 +3006,10 @@ void ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector, bool locked)
 /**
  * ice_vsi_set_napi_queues
  * @vsi: VSI pointer
- * @locked: is the rtnl_lock already held
  *
  * Associate queue[s] with napi for all vectors
  */
-void ice_vsi_set_napi_queues(struct ice_vsi *vsi, bool locked)
+void ice_vsi_set_napi_queues(struct ice_vsi *vsi)
 {
        int i;
 
@@ -2965,7 +3017,7 @@ void ice_vsi_set_napi_queues(struct ice_vsi *vsi, bool locked)
                return;
 
        ice_for_each_q_vector(vsi, i)
-               ice_q_vector_set_napi_queues(vsi->q_vectors[i], locked);
+               ice_q_vector_set_napi_queues(vsi->q_vectors[i]);
 }
 
 /**
index 71bd27244941d549d9253af900629ccb36278072..bfcfc582a4c04ff143390e394d0b65a1d0970391 100644 (file)
@@ -91,9 +91,15 @@ void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc);
 struct ice_vsi *
 ice_vsi_setup(struct ice_pf *pf, struct ice_vsi_cfg_params *params);
 
-void ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector, bool locked);
+void
+ice_queue_set_napi(struct ice_vsi *vsi, unsigned int queue_index,
+                  enum netdev_queue_type type, struct napi_struct *napi);
+
+void __ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector, bool locked);
+
+void ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector);
 
-void ice_vsi_set_napi_queues(struct ice_vsi *vsi, bool locked);
+void ice_vsi_set_napi_queues(struct ice_vsi *vsi);
 
 int ice_vsi_release(struct ice_vsi *vsi);
 
index dd4a9bc0dfdc661b2d2f3c48a2df5b773e4f75bb..59c7e37f175fe80f060b4aff05df572529543f5d 100644 (file)
@@ -3495,7 +3495,7 @@ static void ice_napi_add(struct ice_vsi *vsi)
        ice_for_each_q_vector(vsi, v_idx) {
                netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi,
                               ice_napi_poll);
-               ice_q_vector_set_napi_queues(vsi->q_vectors[v_idx], false);
+               __ice_q_vector_set_napi_queues(vsi->q_vectors[v_idx], false);
        }
 }
 
@@ -5447,6 +5447,7 @@ static int ice_reinit_interrupt_scheme(struct ice_pf *pf)
                if (ret)
                        goto err_reinit;
                ice_vsi_map_rings_to_vectors(pf->vsi[v]);
+               ice_vsi_set_napi_queues(pf->vsi[v]);
        }
 
        ret = ice_req_irq_msix_misc(pf);