]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/commitdiff
Merge branch '1GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next...
authorDavid S. Miller <davem@davemloft.net>
Fri, 21 Feb 2020 00:00:14 +0000 (16:00 -0800)
committerDavid S. Miller <davem@davemloft.net>
Fri, 21 Feb 2020 00:00:14 +0000 (16:00 -0800)
Jeff Kirsher says:

====================
1GbE Intel Wired LAN Driver Updates 2020-02-19

This series contains updates to e1000e and igc drivers.

Ben Dooks adds a missing cpu_to_le64() in the e1000e transmit ring flush
function.

Jia-Ju Bai replaces a couple of udelay() with usleep_range() where we
could sleep while holding a spinlock in e1000e.

Chen Zhou make 2 functions static in igc,

Sasha finishes the legacy power management support in igc by adding
resume and schedule suspend requests.  Also added register dump
functionality in the igc driver.  Added device id support for the next
generation of i219 devices in e1000e.  Fixed a typo in the igc driver
that referenced a device that is not support in the driver.  Added the
missing PTP support when suspending now that igc has legacy power
management support.  Added PCIe error detection, slot reset and resume
capability in igc.  Added WoL support for igc as well.  Lastly, added a
code comment to distinguish between interrupt and flag definitions.

Vitaly adds device id support for Tiger Lake platforms, which has
another next generation of i219 device in e1000e.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
70 files changed:
drivers/net/bonding/bond_main.c
drivers/net/ethernet/atheros/alx/main.c
drivers/net/ethernet/atheros/atl1c/atl1c_main.c
drivers/net/ethernet/brocade/bna/bnad.c
drivers/net/ethernet/cisco/enic/enic_main.c
drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
drivers/net/ethernet/intel/e1000/e1000_main.c
drivers/net/ethernet/intel/e1000e/netdev.c
drivers/net/ethernet/intel/ice/ice.h
drivers/net/ethernet/intel/ice/ice_base.c
drivers/net/ethernet/intel/ice/ice_common.c
drivers/net/ethernet/intel/ice/ice_dcb_lib.c
drivers/net/ethernet/intel/ice/ice_dcb_lib.h
drivers/net/ethernet/intel/ice/ice_dcb_nl.c
drivers/net/ethernet/intel/ice/ice_devids.h
drivers/net/ethernet/intel/ice/ice_ethtool.c
drivers/net/ethernet/intel/ice/ice_flex_pipe.c
drivers/net/ethernet/intel/ice/ice_hw_autogen.h
drivers/net/ethernet/intel/ice/ice_main.c
drivers/net/ethernet/intel/ice/ice_nvm.c
drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
drivers/net/ethernet/intel/ice/ice_xsk.c
drivers/net/ethernet/jme.c
drivers/net/ethernet/marvell/mvneta.c
drivers/net/ethernet/mellanox/mlxsw/core.c
drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
drivers/net/ethernet/mellanox/mlxsw/spectrum.h
drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c
drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
drivers/net/ethernet/micrel/ksz884x.c
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
drivers/net/ethernet/qualcomm/emac/emac-mac.c
drivers/net/ethernet/realtek/r8169_main.c
drivers/net/ethernet/sfc/efx.c
drivers/net/ethernet/socionext/netsec.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/hyperv/netvsc_drv.c
drivers/net/phy/broadcom.c
drivers/net/phy/phy-c45.c
drivers/net/phy/phy_device.c
drivers/net/usb/r8152.c
drivers/net/vmxnet3/vmxnet3_drv.c
include/net/ip6_checksum.h
include/net/page_pool.h
net/core/neighbour.c
net/core/page_pool.c
net/core/rtnetlink.c
net/core/xdp.c
net/ipv4/fib_trie.c
net/ipv4/sysctl_net_ipv4.c
net/wireless/nl80211.c
net/wireless/util.c
tools/testing/selftests/tc-testing/tc-tests/filters/basic.json

index 48d5ec770b94242fddb71a02effab6fc1ee988e8..c3c524f77fcde7c021f9bad700381ea566fd2bcc 100644 (file)
@@ -1265,7 +1265,7 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
        skb->dev = bond->dev;
 
        if (BOND_MODE(bond) == BOND_MODE_ALB &&
-           bond->dev->priv_flags & IFF_BRIDGE_PORT &&
+           netif_is_bridge_port(bond->dev) &&
            skb->pkt_type == PACKET_HOST) {
 
                if (unlikely(skb_cow_head(skb,
index 1dcbc486eca90deb14967fc8606e0cfac67d01df..b9b4edb913c1317ddeca47354786f85964bab979 100644 (file)
@@ -1416,10 +1416,7 @@ static int alx_tso(struct sk_buff *skb, struct alx_txd *first)
                                                         0, IPPROTO_TCP, 0);
                first->word1 |= 1 << TPD_IPV4_SHIFT;
        } else if (skb_is_gso_v6(skb)) {
-               ipv6_hdr(skb)->payload_len = 0;
-               tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
-                                                      &ipv6_hdr(skb)->daddr,
-                                                      0, IPPROTO_TCP, 0);
+               tcp_v6_gso_csum_prep(skb);
                /* LSOv2: the first TPD only provides the packet length */
                first->adrl.l.pkt_len = skb->len;
                first->word1 |= 1 << TPD_LSO_V2_SHIFT;
index 4c0b1f8551dd89acc12c6958eb867a9a83579ad5..0d67b951c0b2dd7d9c1c0238f1090b7dff716247 100644 (file)
@@ -2025,10 +2025,8 @@ static int atl1c_tso_csum(struct atl1c_adapter *adapter,
                                                "IPV6 tso with zero data??\n");
                                goto check_sum;
                        } else
-                               tcp_hdr(skb)->check = ~csum_ipv6_magic(
-                                               &ipv6_hdr(skb)->saddr,
-                                               &ipv6_hdr(skb)->daddr,
-                                               0, IPPROTO_TCP, 0);
+                               tcp_v6_gso_csum_prep(skb);
+
                        etpd->word1 |= 1 << TPD_LSO_EN_SHIFT;
                        etpd->word1 |= 1 << TPD_LSO_VER_SHIFT;
                        etpd->pkt_len = cpu_to_le32(skb->len);
index 01a50a4b21135fd2c347c6d80aadc3ca874bb5b6..d6588502a050af749d051998609bb9ece1dad089 100644 (file)
@@ -2504,12 +2504,7 @@ bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb)
                                           IPPROTO_TCP, 0);
                BNAD_UPDATE_CTR(bnad, tso4);
        } else {
-               struct ipv6hdr *ipv6h = ipv6_hdr(skb);
-
-               ipv6h->payload_len = 0;
-               tcp_hdr(skb)->check =
-                       ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, 0,
-                                        IPPROTO_TCP, 0);
+               tcp_v6_gso_csum_prep(skb);
                BNAD_UPDATE_CTR(bnad, tso6);
        }
 
index ddf60dc9ad167d913f9d2c454aacc2da1779e947..3fc858b2c87b10b72d74ff9ebbece213a6c5d0cc 100644 (file)
@@ -696,8 +696,7 @@ static void enic_preload_tcp_csum(struct sk_buff *skb)
                tcp_hdr(skb)->check = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
                        ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
        } else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) {
-               tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
-                       &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
+               tcp_v6_gso_csum_prep(skb);
        }
 }
 
index 1d4ffc5f408ac35b17b6b199832b2f98cce80a3b..e1d88095a77e82fe4f1b701c9d543aa8094c7c75 100644 (file)
@@ -260,6 +260,8 @@ static void hns3_dbg_help(struct hnae3_handle *h)
        dev_info(&h->pdev->dev, "dump m7 info\n");
        dev_info(&h->pdev->dev, "dump ncl_config <offset> <length>(in hex)\n");
        dev_info(&h->pdev->dev, "dump mac tnl status\n");
+       dev_info(&h->pdev->dev, "dump loopback\n");
+       dev_info(&h->pdev->dev, "dump qs shaper [qs id]\n");
 
        memset(printf_buf, 0, HNS3_DBG_BUF_LEN);
        strncat(printf_buf, "dump reg [[bios common] [ssu <port_id>]",
index c03856e63320234b2d0659cd85b6a624673c2cfe..3f59a1924390f7636f3545d24006acbd2c0fd4d5 100644 (file)
@@ -736,7 +736,7 @@ static int hns3_check_ksettings_param(const struct net_device *netdev,
        if (ops->get_media_type)
                ops->get_media_type(handle, &media_type, &module_type);
 
-       if (cmd->base.duplex != DUPLEX_FULL &&
+       if (cmd->base.duplex == DUPLEX_HALF &&
            media_type != HNAE3_MEDIA_TYPE_COPPER) {
                netdev_err(netdev,
                           "only copper port supports half duplex!");
index 67fad80035d3a76f6f5f74c95400ad05db81814e..6295cf93c3501be61013551cfc596bae6cb9f95a 100644 (file)
@@ -310,8 +310,9 @@ static void hclge_title_idx_print(struct hclge_dev *hdev, bool flag, int index,
                                  char *false_buf)
 {
        if (flag)
-               dev_info(&hdev->pdev->dev, "%s(%d): %s\n", title_buf, index,
-                        true_buf);
+               dev_info(&hdev->pdev->dev, "%s(%d): %s weight: %u\n",
+                        title_buf, index, true_buf,
+                        hdev->tm_info.pg_info[0].tc_dwrr[index]);
        else
                dev_info(&hdev->pdev->dev, "%s(%d): %s\n", title_buf, index,
                         false_buf);
@@ -339,7 +340,8 @@ static void hclge_dbg_dump_tc(struct hclge_dev *hdev)
 
        ets_weight = (struct hclge_ets_tc_weight_cmd *)desc.data;
 
-       dev_info(&hdev->pdev->dev, "dump tc\n");
+       dev_info(&hdev->pdev->dev, "dump tc: %u tc enabled\n",
+                hdev->tm_info.num_tc);
        dev_info(&hdev->pdev->dev, "weight_offset: %u\n",
                 ets_weight->weight_offset);
 
@@ -1169,6 +1171,57 @@ static void hclge_dbg_dump_ncl_config(struct hclge_dev *hdev,
        }
 }
 
+static void hclge_dbg_dump_loopback(struct hclge_dev *hdev,
+                                   const char *cmd_buf)
+{
+       struct phy_device *phydev = hdev->hw.mac.phydev;
+       struct hclge_config_mac_mode_cmd *req_app;
+       struct hclge_serdes_lb_cmd *req_serdes;
+       struct hclge_desc desc;
+       u8 loopback_en;
+       int ret;
+
+       req_app = (struct hclge_config_mac_mode_cmd *)desc.data;
+       req_serdes = (struct hclge_serdes_lb_cmd *)desc.data;
+
+       dev_info(&hdev->pdev->dev, "mac id: %u\n", hdev->hw.mac.mac_id);
+
+       hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
+       ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+       if (ret) {
+               dev_err(&hdev->pdev->dev,
+                       "failed to dump app loopback status, ret = %d\n", ret);
+               return;
+       }
+
+       loopback_en = hnae3_get_bit(le32_to_cpu(req_app->txrx_pad_fcs_loop_en),
+                                   HCLGE_MAC_APP_LP_B);
+       dev_info(&hdev->pdev->dev, "app loopback: %s\n",
+                loopback_en ? "on" : "off");
+
+       hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, true);
+       ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+       if (ret) {
+               dev_err(&hdev->pdev->dev,
+                       "failed to dump serdes loopback status, ret = %d\n",
+                       ret);
+               return;
+       }
+
+       loopback_en = req_serdes->enable & HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
+       dev_info(&hdev->pdev->dev, "serdes serial loopback: %s\n",
+                loopback_en ? "on" : "off");
+
+       loopback_en = req_serdes->enable &
+                       HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
+       dev_info(&hdev->pdev->dev, "serdes parallel loopback: %s\n",
+                loopback_en ? "on" : "off");
+
+       if (phydev)
+               dev_info(&hdev->pdev->dev, "phy loopback: %s\n",
+                        phydev->loopback_enabled ? "on" : "off");
+}
+
 /* hclge_dbg_dump_mac_tnl_status: print message about mac tnl interrupt
  * @hdev: pointer to struct hclge_dev
  */
@@ -1269,6 +1322,7 @@ int hclge_dbg_run_cmd(struct hnae3_handle *handle, const char *cmd_buf)
 {
 #define DUMP_REG       "dump reg"
 #define DUMP_TM_MAP    "dump tm map"
+#define DUMP_LOOPBACK  "dump loopback"
 
        struct hclge_vport *vport = hclge_get_vport(handle);
        struct hclge_dev *hdev = vport->back;
@@ -1302,6 +1356,9 @@ int hclge_dbg_run_cmd(struct hnae3_handle *handle, const char *cmd_buf)
                                          &cmd_buf[sizeof("dump ncl_config")]);
        } else if (strncmp(cmd_buf, "dump mac tnl status", 19) == 0) {
                hclge_dbg_dump_mac_tnl_status(hdev);
+       } else if (strncmp(cmd_buf, DUMP_LOOPBACK,
+                  strlen(DUMP_LOOPBACK)) == 0) {
+               hclge_dbg_dump_loopback(hdev, &cmd_buf[sizeof(DUMP_LOOPBACK)]);
        } else if (strncmp(cmd_buf, "dump qs shaper", 14) == 0) {
                hclge_dbg_dump_qs_shaper(hdev,
                                         &cmd_buf[sizeof("dump qs shaper")]);
index 492bc944646372bea3db8212884e9d3914dd2a5e..51399dbed77a6702b5a33bf408f545b0c5e73778 100644 (file)
@@ -824,6 +824,8 @@ static void hclge_get_mac_stat(struct hnae3_handle *handle,
 static int hclge_parse_func_status(struct hclge_dev *hdev,
                                   struct hclge_func_status_cmd *status)
 {
+#define HCLGE_MAC_ID_MASK      0xF
+
        if (!(status->pf_state & HCLGE_PF_STATE_DONE))
                return -EINVAL;
 
@@ -833,6 +835,7 @@ static int hclge_parse_func_status(struct hclge_dev *hdev,
        else
                hdev->flag &= ~HCLGE_FLAG_MAIN;
 
+       hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK;
        return 0;
 }
 
index f78cbb4cc85eaee14320c6db8d5b4808c3237e3c..71df23d5f1b4d15036c27ddabd7ee07fa1388c00 100644 (file)
@@ -249,6 +249,7 @@ enum HCLGE_MAC_DUPLEX {
 #define QUERY_ACTIVE_SPEED     1
 
 struct hclge_mac {
+       u8 mac_id;
        u8 phy_addr;
        u8 flag;
        u8 media_type;  /* port media type, e.g. fibre/copper/backplane */
index 2bced34c19ba731b928ba9bc571b2ef09e13893f..f7103356ef56288efb05b58b7671bb8f23d465ca 100644 (file)
@@ -2715,11 +2715,7 @@ static int e1000_tso(struct e1000_adapter *adapter,
                        cmd_length = E1000_TXD_CMD_IP;
                        ipcse = skb_transport_offset(skb) - 1;
                } else if (skb_is_gso_v6(skb)) {
-                       ipv6_hdr(skb)->payload_len = 0;
-                       tcp_hdr(skb)->check =
-                               ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
-                                                &ipv6_hdr(skb)->daddr,
-                                                0, IPPROTO_TCP, 0);
+                       tcp_v6_gso_csum_prep(skb);
                        ipcse = 0;
                }
                ipcss = skb_network_offset(skb);
index fb92002cabee6a72c1458cf4634e15275774da14..1e1625122596ba985c0dfbade94d78559073f2c5 100644 (file)
@@ -5464,10 +5464,7 @@ static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb,
                cmd_length = E1000_TXD_CMD_IP;
                ipcse = skb_transport_offset(skb) - 1;
        } else if (skb_is_gso_v6(skb)) {
-               ipv6_hdr(skb)->payload_len = 0;
-               tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
-                                                      &ipv6_hdr(skb)->daddr,
-                                                      0, IPPROTO_TCP, 0);
+               tcp_v6_gso_csum_prep(skb);
                ipcse = 0;
        }
        ipcss = skb_network_offset(skb);
index cb10abb14e11c7749127ca7e1356fe3e719992d6..2d51ceaa2c8c0846f9cdfcbcae595dde27e876da 100644 (file)
@@ -212,6 +212,7 @@ enum ice_state {
        __ICE_SERVICE_SCHED,
        __ICE_SERVICE_DIS,
        __ICE_OICR_INTR_DIS,            /* Global OICR interrupt disabled */
+       __ICE_MDD_VF_PRINT_PENDING,     /* set when MDD event handle */
        __ICE_STATE_NBITS               /* must be last */
 };
 
@@ -340,6 +341,7 @@ enum ice_pf_flags {
        ICE_FLAG_FW_LLDP_AGENT,
        ICE_FLAG_ETHTOOL_CTXT,          /* set when ethtool holds RTNL lock */
        ICE_FLAG_LEGACY_RX,
+       ICE_FLAG_MDD_AUTO_RESET_VF,
        ICE_PF_FLAGS_NBITS              /* must be last */
 };
 
@@ -363,6 +365,8 @@ struct ice_pf {
        u16 num_vfs_supported;          /* num VFs supported for this PF */
        u16 num_vf_qps;                 /* num queue pairs per VF */
        u16 num_vf_msix;                /* num vectors per VF */
+       /* used to ratelimit the MDD event logging */
+       unsigned long last_printed_mdd_jiffies;
        DECLARE_BITMAP(state, __ICE_STATE_NBITS);
        DECLARE_BITMAP(flags, ICE_PF_FLAGS_NBITS);
        unsigned long *avail_txqs;      /* bitmap to track PF Tx queue usage */
index 75cc5a366b269e5cf2a1f4555d1b7dd31835cdec..a19cd6f5436bc10b0abb4de493cfe1c94b13a048 100644 (file)
@@ -203,8 +203,7 @@ static void ice_cfg_itr_gran(struct ice_hw *hw)
  */
 static u16 ice_calc_q_handle(struct ice_vsi *vsi, struct ice_ring *ring, u8 tc)
 {
-       WARN_ONCE(ice_ring_is_xdp(ring) && tc,
-                 "XDP ring can't belong to TC other than 0");
+       WARN_ONCE(ice_ring_is_xdp(ring) && tc, "XDP ring can't belong to TC other than 0\n");
 
        /* Idea here for calculation is that we subtract the number of queue
         * count from TC that ring belongs to from it's absolute queue index
@@ -386,8 +385,8 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
         /* Enable Flexible Descriptors in the queue context which
          * allows this driver to select a specific receive descriptor format
          */
+       regval = rd32(hw, QRXFLXP_CNTXT(pf_q));
        if (vsi->type != ICE_VSI_VF) {
-               regval = rd32(hw, QRXFLXP_CNTXT(pf_q));
                regval |= (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
                        QRXFLXP_CNTXT_RXDID_IDX_M;
 
@@ -398,8 +397,12 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
                regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
                        QRXFLXP_CNTXT_RXDID_PRIO_M;
 
-               wr32(hw, QRXFLXP_CNTXT(pf_q), regval);
+       } else {
+               regval &= ~(QRXFLXP_CNTXT_RXDID_IDX_M |
+                           QRXFLXP_CNTXT_RXDID_PRIO_M |
+                           QRXFLXP_CNTXT_TS_M);
        }
+       wr32(hw, QRXFLXP_CNTXT(pf_q), regval);
 
        /* Absolute queue number out of 2K needs to be passed */
        err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q);
index 5587e9eb4cd0c1358316ad5b960ad2784e2a8480..1fe54f08f1622a3ef833d8dafc2c2abd51ae2b56 100644 (file)
@@ -6,7 +6,7 @@
 #include "ice_adminq_cmd.h"
 #include "ice_flow.h"
 
-#define ICE_PF_RESET_WAIT_COUNT        200
+#define ICE_PF_RESET_WAIT_COUNT        300
 
 /**
  * ice_set_mac_type - Sets MAC type
index 7108fb41b604296b24d7314467ff9147122324be..16656b6c3d09d9fc46e4ff535a1c0d4808f10356 100644 (file)
@@ -62,6 +62,26 @@ u8 ice_dcb_get_ena_tc(struct ice_dcbx_cfg *dcbcfg)
        return ena_tc;
 }
 
+/**
+ * ice_dcb_get_mode - gets the DCB mode
+ * @port_info: pointer to port info structure
+ * @host: if set it's HOST if not it's MANAGED
+ */
+static u8 ice_dcb_get_mode(struct ice_port_info *port_info, bool host)
+{
+       u8 mode;
+
+       if (host)
+               mode = DCB_CAP_DCBX_HOST;
+       else
+               mode = DCB_CAP_DCBX_LLD_MANAGED;
+
+       if (port_info->local_dcbx_cfg.dcbx_mode & ICE_DCBX_MODE_CEE)
+               return (mode | DCB_CAP_DCBX_VER_CEE);
+       else
+               return (mode | DCB_CAP_DCBX_VER_IEEE);
+}
+
 /**
  * ice_dcb_get_num_tc - Get the number of TCs from DCBX config
  * @dcbcfg: config to retrieve number of TCs from
@@ -148,6 +168,43 @@ void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi)
        }
 }
 
+/**
+ * ice_dcb_bwchk - check if ETS bandwidth input parameters are correct
+ * @pf: pointer to the PF struct
+ * @dcbcfg: pointer to DCB config structure
+ */
+int ice_dcb_bwchk(struct ice_pf *pf, struct ice_dcbx_cfg *dcbcfg)
+{
+       struct ice_dcb_ets_cfg *etscfg = &dcbcfg->etscfg;
+       u8 num_tc, total_bw = 0;
+       int i;
+
+       /* returns number of contigous TCs and 1 TC for non-contigous TCs,
+        * since at least 1 TC has to be configured
+        */
+       num_tc = ice_dcb_get_num_tc(dcbcfg);
+
+       /* no bandwidth checks required if there's only one TC, so assign
+        * all bandwidth to TC0 and return
+        */
+       if (num_tc == 1) {
+               etscfg->tcbwtable[0] = ICE_TC_MAX_BW;
+               return 0;
+       }
+
+       for (i = 0; i < num_tc; i++)
+               total_bw += etscfg->tcbwtable[i];
+
+       if (!total_bw) {
+               etscfg->tcbwtable[0] = ICE_TC_MAX_BW;
+       } else if (total_bw != ICE_TC_MAX_BW) {
+               dev_err(ice_pf_to_dev(pf), "Invalid config, total bandwidth must equal 100\n");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
 /**
  * ice_pf_dcb_cfg - Apply new DCB configuration
  * @pf: pointer to the PF struct
@@ -182,6 +239,9 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked)
                return ret;
        }
 
+       if (ice_dcb_bwchk(pf, new_cfg))
+               return -EINVAL;
+
        /* Store old config in case FW config fails */
        old_cfg = kmemdup(curr_cfg, sizeof(*old_cfg), GFP_KERNEL);
        if (!old_cfg)
@@ -605,14 +665,14 @@ int ice_init_pf_dcb(struct ice_pf *pf, bool locked)
 
                ice_cfg_sw_lldp(pf_vsi, false, true);
 
-               pf->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE;
+               pf->dcbx_cap = ice_dcb_get_mode(port_info, true);
                return 0;
        }
 
        set_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags);
 
-       /* DCBX in FW and LLDP enabled in FW */
-       pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED | DCB_CAP_DCBX_VER_IEEE;
+       /* DCBX/LLDP enabled in FW, set DCBNL mode advertisement */
+       pf->dcbx_cap = ice_dcb_get_mode(port_info, false);
 
        err = ice_dcb_init_cfg(pf, locked);
        if (err)
@@ -772,6 +832,7 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
        /* No change detected in DCBX configs */
        if (!memcmp(&tmp_dcbx_cfg, &pi->local_dcbx_cfg, sizeof(tmp_dcbx_cfg))) {
                dev_dbg(dev, "No change detected in DCBX configuration.\n");
+               pf->dcbx_cap = ice_dcb_get_mode(pi, false);
                goto out;
        }
 
index f15e5776f287b91c24c4e873754ceda24a3afa6c..37680e815b0287a5720e34bf66c212fd8bc0fa53 100644 (file)
@@ -20,6 +20,7 @@ u8 ice_dcb_get_num_tc(struct ice_dcbx_cfg *dcbcfg);
 u8 ice_dcb_get_tc(struct ice_vsi *vsi, int queue_index);
 int
 ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked);
+int ice_dcb_bwchk(struct ice_pf *pf, struct ice_dcbx_cfg *dcbcfg);
 void ice_pf_dcb_recfg(struct ice_pf *pf);
 void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi);
 int ice_init_pf_dcb(struct ice_pf *pf, bool locked);
index b61aba428adbfa44ed2fc53f445da2a2bf4355fa..c4c12414083a914454ddf445c4d004ececc618c2 100644 (file)
@@ -95,14 +95,12 @@ static int ice_dcbnl_setets(struct net_device *netdev, struct ieee_ets *ets)
                new_cfg->etsrec.prio_table[i] = ets->reco_prio_tc[i];
        }
 
-       /* max_tc is a 1-8 value count of number of TC's, not a 0-7 value
-        * for the TC's index number.  Add one to value if not zero, and
-        * for zero set it to the FW's default value
-        */
-       if (max_tc)
-               max_tc++;
-       else
-               max_tc = IEEE_8021QAZ_MAX_TCS;
+       if (ice_dcb_bwchk(pf, new_cfg)) {
+               err = -EINVAL;
+               goto ets_out;
+       }
+
+       max_tc = pf->hw.func_caps.common_cap.maxtc;
 
        new_cfg->etscfg.maxtcs = max_tc;
 
@@ -119,6 +117,7 @@ static int ice_dcbnl_setets(struct net_device *netdev, struct ieee_ets *ets)
        if (err == ICE_DCB_NO_HW_CHG)
                err = ICE_DCB_HW_CHG_RST;
 
+ets_out:
        mutex_unlock(&pf->tc_mutex);
        return err;
 }
@@ -534,6 +533,30 @@ ice_dcbnl_get_pg_tc_cfg_rx(struct net_device *netdev, int prio,
        *pgid = pi->local_dcbx_cfg.etscfg.prio_table[prio];
 }
 
+/**
+ * ice_dcbnl_set_pg_tc_cfg_rx
+ * @netdev: relevant netdev struct
+ * @prio: corresponding user priority
+ * @prio_type: the traffic priority type
+ * @pgid: the PG ID
+ * @bw_pct: BW percentage for corresponding BWG
+ * @up_map: prio mapped to corresponding TC
+ *
+ * lldpad requires this function pointer to be non-NULL to complete CEE config.
+ */
+static void
+ice_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev,
+                          int __always_unused prio,
+                          u8 __always_unused prio_type,
+                          u8 __always_unused pgid,
+                          u8 __always_unused bw_pct,
+                          u8 __always_unused up_map)
+{
+       struct ice_pf *pf = ice_netdev_to_pf(netdev);
+
+       dev_dbg(ice_pf_to_dev(pf), "Rx TC PG Config Not Supported.\n");
+}
+
 /**
  * ice_dcbnl_get_pg_bwg_cfg_rx - Get CEE PG BW Rx config
  * @netdev: pointer to netdev struct
@@ -553,6 +576,23 @@ ice_dcbnl_get_pg_bwg_cfg_rx(struct net_device *netdev, int __always_unused pgid,
        *bw_pct = 0;
 }
 
+/**
+ * ice_dcbnl_set_pg_bwg_cfg_rx
+ * @netdev: the corresponding netdev
+ * @pgid: corresponding TC
+ * @bw_pct: BW percentage for given TC
+ *
+ * lldpad requires this function pointer to be non-NULL to complete CEE config.
+ */
+static void
+ice_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int __always_unused pgid,
+                           u8 __always_unused bw_pct)
+{
+       struct ice_pf *pf = ice_netdev_to_pf(netdev);
+
+       dev_dbg(ice_pf_to_dev(pf), "Rx BWG PG Config Not Supported.\n");
+}
+
 /**
  * ice_dcbnl_get_cap - Get DCBX capabilities of adapter
  * @netdev: pointer to netdev struct
@@ -799,6 +839,8 @@ static const struct dcbnl_rtnl_ops dcbnl_ops = {
        .getpermhwaddr = ice_dcbnl_get_perm_hw_addr,
        .setpgtccfgtx = ice_dcbnl_set_pg_tc_cfg_tx,
        .setpgbwgcfgtx = ice_dcbnl_set_pg_bwg_cfg_tx,
+       .setpgtccfgrx = ice_dcbnl_set_pg_tc_cfg_rx,
+       .setpgbwgcfgrx = ice_dcbnl_set_pg_bwg_cfg_rx,
        .getpgtccfgtx = ice_dcbnl_get_pg_tc_cfg_tx,
        .getpgbwgcfgtx = ice_dcbnl_get_pg_bwg_cfg_tx,
        .getpgtccfgrx = ice_dcbnl_get_pg_tc_cfg_rx,
index ce63017c56c7c7bd2e8f4ab146e406da1c3e10ad..9d8194671f6a6abee4d8682dd314ab6d3346208d 100644 (file)
@@ -5,12 +5,34 @@
 #define _ICE_DEVIDS_H_
 
 /* Device IDs */
+/* Intel(R) Ethernet Connection E823-L for backplane */
+#define ICE_DEV_ID_E823L_BACKPLANE     0x124C
+/* Intel(R) Ethernet Connection E823-L for SFP */
+#define ICE_DEV_ID_E823L_SFP           0x124D
+/* Intel(R) Ethernet Connection E823-L/X557-AT 10GBASE-T */
+#define ICE_DEV_ID_E823L_10G_BASE_T    0x124E
+/* Intel(R) Ethernet Connection E823-L 1GbE */
+#define ICE_DEV_ID_E823L_1GBE          0x124F
+/* Intel(R) Ethernet Connection E823-L for QSFP */
+#define ICE_DEV_ID_E823L_QSFP          0x151D
 /* Intel(R) Ethernet Controller E810-C for backplane */
 #define ICE_DEV_ID_E810C_BACKPLANE     0x1591
 /* Intel(R) Ethernet Controller E810-C for QSFP */
 #define ICE_DEV_ID_E810C_QSFP          0x1592
 /* Intel(R) Ethernet Controller E810-C for SFP */
 #define ICE_DEV_ID_E810C_SFP           0x1593
+/* Intel(R) Ethernet Controller E810-XXV for SFP */
+#define ICE_DEV_ID_E810_XXV_SFP                0x159B
+/* Intel(R) Ethernet Connection E823-C for backplane */
+#define ICE_DEV_ID_E823C_BACKPLANE     0x188A
+/* Intel(R) Ethernet Connection E823-C for QSFP */
+#define ICE_DEV_ID_E823C_QSFP          0x188B
+/* Intel(R) Ethernet Connection E823-C for SFP */
+#define ICE_DEV_ID_E823C_SFP           0x188C
+/* Intel(R) Ethernet Connection E823-C/X557-AT 10GBASE-T */
+#define ICE_DEV_ID_E823C_10G_BASE_T    0x188D
+/* Intel(R) Ethernet Connection E823-C 1GbE */
+#define ICE_DEV_ID_E823C_SGMII         0x188E
 /* Intel(R) Ethernet Connection E822-C for backplane */
 #define ICE_DEV_ID_E822C_BACKPLANE     0x1890
 /* Intel(R) Ethernet Connection E822-C for QSFP */
@@ -21,8 +43,8 @@
 #define ICE_DEV_ID_E822C_10G_BASE_T    0x1893
 /* Intel(R) Ethernet Connection E822-C 1GbE */
 #define ICE_DEV_ID_E822C_SGMII         0x1894
-/* Intel(R) Ethernet Connection E822-X for backplane */
-#define ICE_DEV_ID_E822X_BACKPLANE     0x1897
+/* Intel(R) Ethernet Connection E822-L for backplane */
+#define ICE_DEV_ID_E822L_BACKPLANE     0x1897
 /* Intel(R) Ethernet Connection E822-L for SFP */
 #define ICE_DEV_ID_E822L_SFP           0x1898
 /* Intel(R) Ethernet Connection E822-L/X557-AT 10GBASE-T */
index 583e07fffd5f6e8ebac64b607589c3fd09c7b7aa..4f625c8dfdb5be4c40236451dfc7dd30d4b87817 100644 (file)
@@ -157,6 +157,7 @@ struct ice_priv_flag {
 static const struct ice_priv_flag ice_gstrings_priv_flags[] = {
        ICE_PRIV_FLAG("link-down-on-close", ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA),
        ICE_PRIV_FLAG("fw-lldp-agent", ICE_FLAG_FW_LLDP_AGENT),
+       ICE_PRIV_FLAG("mdd-auto-reset-vf", ICE_FLAG_MDD_AUTO_RESET_VF),
        ICE_PRIV_FLAG("legacy-rx", ICE_FLAG_LEGACY_RX),
 };
 
@@ -672,7 +673,7 @@ static u64 ice_loopback_test(struct net_device *netdev)
 
        test_vsi = ice_lb_vsi_setup(pf, pf->hw.port_info);
        if (!test_vsi) {
-               netdev_err(netdev, "Failed to create a VSI for the loopback test");
+               netdev_err(netdev, "Failed to create a VSI for the loopback test\n");
                return 1;
        }
 
@@ -731,7 +732,7 @@ lbtest_free_frame:
        devm_kfree(dev, tx_frame);
 remove_mac_filters:
        if (ice_remove_mac(&pf->hw, &tmp_list))
-               netdev_err(netdev, "Could not remove MAC filter for the test VSI");
+               netdev_err(netdev, "Could not remove MAC filter for the test VSI\n");
 free_mac_list:
        ice_free_fltr_list(dev, &tmp_list);
 lbtest_mac_dis:
@@ -744,7 +745,7 @@ lbtest_rings_dis:
 lbtest_vsi_close:
        test_vsi->netdev = NULL;
        if (ice_vsi_release(test_vsi))
-               netdev_err(netdev, "Failed to remove the test VSI");
+               netdev_err(netdev, "Failed to remove the test VSI\n");
 
        return ret;
 }
@@ -834,7 +835,7 @@ ice_self_test(struct net_device *netdev, struct ethtool_test *eth_test,
                        int status = ice_open(netdev);
 
                        if (status) {
-                               dev_err(dev, "Could not open device %s, err %d",
+                               dev_err(dev, "Could not open device %s, err %d\n",
                                        pf->int_name, status);
                        }
                }
index 99208946224c0f52287bded9ae82e57b74eb345b..42bac3ec55263da6304777245c0c21d8fa319e88 100644 (file)
@@ -3470,6 +3470,24 @@ ice_move_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig,
        return 0;
 }
 
+/**
+ * ice_rem_chg_tcam_ent - remove a specific TCAM entry from change list
+ * @hw: pointer to the HW struct
+ * @idx: the index of the TCAM entry to remove
+ * @chg: the list of change structures to search
+ */
+static void
+ice_rem_chg_tcam_ent(struct ice_hw *hw, u16 idx, struct list_head *chg)
+{
+       struct ice_chs_chg *pos, *tmp;
+
+       list_for_each_entry_safe(tmp, pos, chg, list_entry)
+               if (tmp->type == ICE_TCAM_ADD && tmp->tcam_idx == idx) {
+                       list_del(&tmp->list_entry);
+                       devm_kfree(ice_hw_to_dev(hw), tmp);
+               }
+}
+
 /**
  * ice_prof_tcam_ena_dis - add enable or disable TCAM change
  * @hw: pointer to the HW struct
@@ -3489,14 +3507,19 @@ ice_prof_tcam_ena_dis(struct ice_hw *hw, enum ice_block blk, bool enable,
        enum ice_status status;
        struct ice_chs_chg *p;
 
-       /* Default: enable means change the low flag bit to don't care */
-       u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x01, 0x00, 0x00, 0x00, 0x00 };
+       u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
+       u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 };
        u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 };
-       u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x01, 0x00, 0x00, 0x00, 0x00 };
 
        /* if disabling, free the TCAM */
        if (!enable) {
-               status = ice_free_tcam_ent(hw, blk, tcam->tcam_idx);
+               status = ice_rel_tcam_idx(hw, blk, tcam->tcam_idx);
+
+               /* if we have already created a change for this TCAM entry, then
+                * we need to remove that entry, in order to prevent writing to
+                * a TCAM entry we no longer will have ownership of.
+                */
+               ice_rem_chg_tcam_ent(hw, tcam->tcam_idx, chg);
                tcam->tcam_idx = 0;
                tcam->in_use = 0;
                return status;
@@ -3612,11 +3635,12 @@ ice_adj_prof_priorities(struct ice_hw *hw, enum ice_block blk, u16 vsig,
  * @blk: hardware block
  * @vsig: the VSIG to which this profile is to be added
  * @hdl: the profile handle indicating the profile to add
+ * @rev: true to add entries to the end of the list
  * @chg: the change list
  */
 static enum ice_status
 ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
-                    struct list_head *chg)
+                    bool rev, struct list_head *chg)
 {
        /* Masks that ignore flags */
        u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
@@ -3625,7 +3649,7 @@ ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
        struct ice_prof_map *map;
        struct ice_vsig_prof *t;
        struct ice_chs_chg *p;
-       u16 i;
+       u16 vsig_idx, i;
 
        /* Get the details on the profile specified by the handle ID */
        map = ice_search_prof_id(hw, blk, hdl);
@@ -3687,8 +3711,13 @@ ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
        }
 
        /* add profile to VSIG */
-       list_add(&t->list,
-                &hw->blk[blk].xlt2.vsig_tbl[(vsig & ICE_VSIG_IDX_M)].prop_lst);
+       vsig_idx = vsig & ICE_VSIG_IDX_M;
+       if (rev)
+               list_add_tail(&t->list,
+                             &hw->blk[blk].xlt2.vsig_tbl[vsig_idx].prop_lst);
+       else
+               list_add(&t->list,
+                        &hw->blk[blk].xlt2.vsig_tbl[vsig_idx].prop_lst);
 
        return 0;
 
@@ -3728,7 +3757,7 @@ ice_create_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl,
        if (status)
                goto err_ice_create_prof_id_vsig;
 
-       status = ice_add_prof_id_vsig(hw, blk, new_vsig, hdl, chg);
+       status = ice_add_prof_id_vsig(hw, blk, new_vsig, hdl, false, chg);
        if (status)
                goto err_ice_create_prof_id_vsig;
 
@@ -3753,11 +3782,13 @@ err_ice_create_prof_id_vsig:
  * @blk: hardware block
  * @vsi: the initial VSI that will be in VSIG
  * @lst: the list of profile that will be added to the VSIG
+ * @new_vsig: return of new VSIG
  * @chg: the change list
  */
 static enum ice_status
 ice_create_vsig_from_lst(struct ice_hw *hw, enum ice_block blk, u16 vsi,
-                        struct list_head *lst, struct list_head *chg)
+                        struct list_head *lst, u16 *new_vsig,
+                        struct list_head *chg)
 {
        struct ice_vsig_prof *t;
        enum ice_status status;
@@ -3772,12 +3803,15 @@ ice_create_vsig_from_lst(struct ice_hw *hw, enum ice_block blk, u16 vsi,
                return status;
 
        list_for_each_entry(t, lst, list) {
+               /* Reverse the order here since we are copying the list */
                status = ice_add_prof_id_vsig(hw, blk, vsig, t->profile_cookie,
-                                             chg);
+                                             true, chg);
                if (status)
                        return status;
        }
 
+       *new_vsig = vsig;
+
        return 0;
 }
 
@@ -3899,7 +3933,8 @@ ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
                         * not sharing entries and we can simply add the new
                         * profile to the VSIG.
                         */
-                       status = ice_add_prof_id_vsig(hw, blk, vsig, hdl, &chg);
+                       status = ice_add_prof_id_vsig(hw, blk, vsig, hdl, false,
+                                                     &chg);
                        if (status)
                                goto err_ice_add_prof_id_flow;
 
@@ -3910,7 +3945,8 @@ ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
                } else {
                        /* No match, so we need a new VSIG */
                        status = ice_create_vsig_from_lst(hw, blk, vsi,
-                                                         &union_lst, &chg);
+                                                         &union_lst, &vsig,
+                                                         &chg);
                        if (status)
                                goto err_ice_add_prof_id_flow;
 
@@ -4076,7 +4112,8 @@ ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
                                 * new VSIG and TCAM entries
                                 */
                                status = ice_create_vsig_from_lst(hw, blk, vsi,
-                                                                 &copy, &chg);
+                                                                 &copy, &vsig,
+                                                                 &chg);
                                if (status)
                                        goto err_ice_rem_prof_id_flow;
 
index b99ebfefe06beaf528a04fb8b11335b49421976f..1d37a9f02c1ca429efcb33bf73e2d92315b3aedc 100644 (file)
@@ -85,6 +85,7 @@
 #define QRXFLXP_CNTXT_RXDID_IDX_M              ICE_M(0x3F, 0)
 #define QRXFLXP_CNTXT_RXDID_PRIO_S             8
 #define QRXFLXP_CNTXT_RXDID_PRIO_M             ICE_M(0x7, 8)
+#define QRXFLXP_CNTXT_TS_M                     BIT(11)
 #define GLGEN_RSTAT                            0x000B8188
 #define GLGEN_RSTAT_DEVSTATE_M                 ICE_M(0x3, 0)
 #define GLGEN_RSTCTL                           0x000B8180
 #define VPLAN_TX_QBASE_VFNUMQ_M                        ICE_M(0xFF, 16)
 #define VPLAN_TXQ_MAPENA(_VF)                  (0x00073800 + ((_VF) * 4))
 #define VPLAN_TXQ_MAPENA_TX_ENA_M              BIT(0)
+#define GL_MDCK_TX_TDPU                                0x00049348
+#define GL_MDCK_TX_TDPU_RCU_ANTISPOOF_ITR_DIS_M BIT(1)
 #define GL_MDET_RX                             0x00294C00
 #define GL_MDET_RX_QNUM_S                      0
 #define GL_MDET_RX_QNUM_M                      ICE_M(0x7FFF, 0)
index 255317e4b1f31538d755c5e2e93f9d2810063638..3aa3fc37c70ebf4e4381584aafe92643f00edf6d 100644 (file)
@@ -1187,20 +1187,28 @@ static void ice_service_timer(struct timer_list *t)
  * ice_handle_mdd_event - handle malicious driver detect event
  * @pf: pointer to the PF structure
  *
- * Called from service task. OICR interrupt handler indicates MDD event
+ * Called from service task. OICR interrupt handler indicates MDD event.
+ * VF MDD logging is guarded by net_ratelimit. Additional PF and VF log
+ * messages are wrapped by netif_msg_[rx|tx]_err. Since VF Rx MDD events
+ * disable the queue, the PF can be configured to reset the VF using ethtool
+ * private flag mdd-auto-reset-vf.
  */
 static void ice_handle_mdd_event(struct ice_pf *pf)
 {
        struct device *dev = ice_pf_to_dev(pf);
        struct ice_hw *hw = &pf->hw;
-       bool mdd_detected = false;
        u32 reg;
        int i;
 
-       if (!test_and_clear_bit(__ICE_MDD_EVENT_PENDING, pf->state))
+       if (!test_and_clear_bit(__ICE_MDD_EVENT_PENDING, pf->state)) {
+               /* Since the VF MDD event logging is rate limited, check if
+                * there are pending MDD events.
+                */
+               ice_print_vfs_mdd_events(pf);
                return;
+       }
 
-       /* find what triggered the MDD event */
+       /* find what triggered an MDD event */
        reg = rd32(hw, GL_MDET_TX_PQM);
        if (reg & GL_MDET_TX_PQM_VALID_M) {
                u8 pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >>
@@ -1216,7 +1224,6 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
                        dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
                                 event, queue, pf_num, vf_num);
                wr32(hw, GL_MDET_TX_PQM, 0xffffffff);
-               mdd_detected = true;
        }
 
        reg = rd32(hw, GL_MDET_TX_TCLAN);
@@ -1234,7 +1241,6 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
                        dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
                                 event, queue, pf_num, vf_num);
                wr32(hw, GL_MDET_TX_TCLAN, 0xffffffff);
-               mdd_detected = true;
        }
 
        reg = rd32(hw, GL_MDET_RX);
@@ -1252,85 +1258,85 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
                        dev_info(dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n",
                                 event, queue, pf_num, vf_num);
                wr32(hw, GL_MDET_RX, 0xffffffff);
-               mdd_detected = true;
        }
 
-       if (mdd_detected) {
-               bool pf_mdd_detected = false;
-
-               reg = rd32(hw, PF_MDET_TX_PQM);
-               if (reg & PF_MDET_TX_PQM_VALID_M) {
-                       wr32(hw, PF_MDET_TX_PQM, 0xFFFF);
-                       dev_info(dev, "TX driver issue detected, PF reset issued\n");
-                       pf_mdd_detected = true;
-               }
+       /* check to see if this PF caused an MDD event */
+       reg = rd32(hw, PF_MDET_TX_PQM);
+       if (reg & PF_MDET_TX_PQM_VALID_M) {
+               wr32(hw, PF_MDET_TX_PQM, 0xFFFF);
+               if (netif_msg_tx_err(pf))
+                       dev_info(dev, "Malicious Driver Detection event TX_PQM detected on PF\n");
+       }
 
-               reg = rd32(hw, PF_MDET_TX_TCLAN);
-               if (reg & PF_MDET_TX_TCLAN_VALID_M) {
-                       wr32(hw, PF_MDET_TX_TCLAN, 0xFFFF);
-                       dev_info(dev, "TX driver issue detected, PF reset issued\n");
-                       pf_mdd_detected = true;
-               }
+       reg = rd32(hw, PF_MDET_TX_TCLAN);
+       if (reg & PF_MDET_TX_TCLAN_VALID_M) {
+               wr32(hw, PF_MDET_TX_TCLAN, 0xFFFF);
+               if (netif_msg_tx_err(pf))
+                       dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on PF\n");
+       }
 
-               reg = rd32(hw, PF_MDET_RX);
-               if (reg & PF_MDET_RX_VALID_M) {
-                       wr32(hw, PF_MDET_RX, 0xFFFF);
-                       dev_info(dev, "RX driver issue detected, PF reset issued\n");
-                       pf_mdd_detected = true;
-               }
-               /* Queue belongs to the PF initiate a reset */
-               if (pf_mdd_detected) {
-                       set_bit(__ICE_NEEDS_RESTART, pf->state);
-                       ice_service_task_schedule(pf);
-               }
+       reg = rd32(hw, PF_MDET_RX);
+       if (reg & PF_MDET_RX_VALID_M) {
+               wr32(hw, PF_MDET_RX, 0xFFFF);
+               if (netif_msg_rx_err(pf))
+                       dev_info(dev, "Malicious Driver Detection event RX detected on PF\n");
        }
 
-       /* check to see if one of the VFs caused the MDD */
+       /* Check to see if one of the VFs caused an MDD event, and then
+        * increment counters and set print pending
+        */
        ice_for_each_vf(pf, i) {
                struct ice_vf *vf = &pf->vf[i];
 
-               bool vf_mdd_detected = false;
-
                reg = rd32(hw, VP_MDET_TX_PQM(i));
                if (reg & VP_MDET_TX_PQM_VALID_M) {
                        wr32(hw, VP_MDET_TX_PQM(i), 0xFFFF);
-                       vf_mdd_detected = true;
-                       dev_info(dev, "TX driver issue detected on VF %d\n",
-                                i);
+                       vf->mdd_tx_events.count++;
+                       set_bit(__ICE_MDD_VF_PRINT_PENDING, pf->state);
+                       if (netif_msg_tx_err(pf))
+                               dev_info(dev, "Malicious Driver Detection event TX_PQM detected on VF %d\n",
+                                        i);
                }
 
                reg = rd32(hw, VP_MDET_TX_TCLAN(i));
                if (reg & VP_MDET_TX_TCLAN_VALID_M) {
                        wr32(hw, VP_MDET_TX_TCLAN(i), 0xFFFF);
-                       vf_mdd_detected = true;
-                       dev_info(dev, "TX driver issue detected on VF %d\n",
-                                i);
+                       vf->mdd_tx_events.count++;
+                       set_bit(__ICE_MDD_VF_PRINT_PENDING, pf->state);
+                       if (netif_msg_tx_err(pf))
+                               dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on VF %d\n",
+                                        i);
                }
 
                reg = rd32(hw, VP_MDET_TX_TDPU(i));
                if (reg & VP_MDET_TX_TDPU_VALID_M) {
                        wr32(hw, VP_MDET_TX_TDPU(i), 0xFFFF);
-                       vf_mdd_detected = true;
-                       dev_info(dev, "TX driver issue detected on VF %d\n",
-                                i);
+                       vf->mdd_tx_events.count++;
+                       set_bit(__ICE_MDD_VF_PRINT_PENDING, pf->state);
+                       if (netif_msg_tx_err(pf))
+                               dev_info(dev, "Malicious Driver Detection event TX_TDPU detected on VF %d\n",
+                                        i);
                }
 
                reg = rd32(hw, VP_MDET_RX(i));
                if (reg & VP_MDET_RX_VALID_M) {
                        wr32(hw, VP_MDET_RX(i), 0xFFFF);
-                       vf_mdd_detected = true;
-                       dev_info(dev, "RX driver issue detected on VF %d\n",
-                                i);
-               }
-
-               if (vf_mdd_detected) {
-                       vf->num_mdd_events++;
-                       if (vf->num_mdd_events &&
-                           vf->num_mdd_events <= ICE_MDD_EVENTS_THRESHOLD)
-                               dev_info(dev, "VF %d has had %llu MDD events since last boot, Admin might need to reload AVF driver with this number of events\n",
-                                        i, vf->num_mdd_events);
+                       vf->mdd_rx_events.count++;
+                       set_bit(__ICE_MDD_VF_PRINT_PENDING, pf->state);
+                       if (netif_msg_rx_err(pf))
+                               dev_info(dev, "Malicious Driver Detection event RX detected on VF %d\n",
+                                        i);
+
+                       /* Since the queue is disabled on VF Rx MDD events, the
+                        * PF can be configured to reset the VF through ethtool
+                        * private flag mdd-auto-reset-vf.
+                        */
+                       if (test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags))
+                               ice_reset_vf(&pf->vf[i], false);
                }
        }
+
+       ice_print_vfs_mdd_events(pf);
 }
 
 /**
@@ -1918,8 +1924,7 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
        if (if_running && !test_and_set_bit(__ICE_DOWN, vsi->state)) {
                ret = ice_down(vsi);
                if (ret) {
-                       NL_SET_ERR_MSG_MOD(extack,
-                                          "Preparing device for XDP attach failed");
+                       NL_SET_ERR_MSG_MOD(extack, "Preparing device for XDP attach failed");
                        return ret;
                }
        }
@@ -1928,13 +1933,11 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
                vsi->num_xdp_txq = vsi->alloc_txq;
                xdp_ring_err = ice_prepare_xdp_rings(vsi, prog);
                if (xdp_ring_err)
-                       NL_SET_ERR_MSG_MOD(extack,
-                                          "Setting up XDP Tx resources failed");
+                       NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");
        } else if (ice_is_xdp_ena_vsi(vsi) && !prog) {
                xdp_ring_err = ice_destroy_xdp_rings(vsi);
                if (xdp_ring_err)
-                       NL_SET_ERR_MSG_MOD(extack,
-                                          "Freeing XDP Tx resources failed");
+                       NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed");
        } else {
                ice_vsi_assign_bpf_prog(vsi, prog);
        }
@@ -1967,8 +1970,7 @@ static int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp)
        struct ice_vsi *vsi = np->vsi;
 
        if (vsi->type != ICE_VSI_PF) {
-               NL_SET_ERR_MSG_MOD(xdp->extack,
-                                  "XDP can be loaded only on PF VSI");
+               NL_SET_ERR_MSG_MOD(xdp->extack, "XDP can be loaded only on PF VSI");
                return -EINVAL;
        }
 
@@ -1995,6 +1997,14 @@ static void ice_ena_misc_vector(struct ice_pf *pf)
        struct ice_hw *hw = &pf->hw;
        u32 val;
 
+       /* Disable anti-spoof detection interrupt to prevent spurious event
+        * interrupts during a function reset. Anti-spoof functionally is
+        * still supported.
+        */
+       val = rd32(hw, GL_MDCK_TX_TDPU);
+       val |= GL_MDCK_TX_TDPU_RCU_ANTISPOOF_ITR_DIS_M;
+       wr32(hw, GL_MDCK_TX_TDPU, val);
+
        /* clear things first */
        wr32(hw, PFINT_OICR_ENA, 0);    /* disable all */
        rd32(hw, PFINT_OICR);           /* read to clear */
@@ -3542,15 +3552,26 @@ static const struct pci_device_id ice_pci_tbl[] = {
        { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE), 0 },
        { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP), 0 },
        { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP), 0 },
+       { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_SFP), 0 },
+       { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_BACKPLANE), 0 },
+       { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_QSFP), 0 },
+       { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SFP), 0 },
+       { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_10G_BASE_T), 0 },
+       { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SGMII), 0 },
        { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_BACKPLANE), 0 },
        { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_QSFP), 0 },
        { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SFP), 0 },
        { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_10G_BASE_T), 0 },
        { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SGMII), 0 },
-       { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822X_BACKPLANE), 0 },
+       { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_BACKPLANE), 0 },
        { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SFP), 0 },
        { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_10G_BASE_T), 0 },
        { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SGMII), 0 },
+       { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_BACKPLANE), 0 },
+       { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_SFP), 0 },
+       { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_10G_BASE_T), 0 },
+       { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_1GBE), 0 },
+       { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_QSFP), 0 },
        /* required last entry */
        { 0, }
 };
index 7525ac50742e08fc94cd888262e46481b2a01b14..f6e25db22c235d836b87791774366e1902af2acc 100644 (file)
@@ -289,17 +289,31 @@ enum ice_status ice_init_nvm(struct ice_hw *hw)
 
        nvm->eetrack = (eetrack_hi << 16) | eetrack_lo;
 
+       switch (hw->device_id) {
        /* the following devices do not have boot_cfg_tlv yet */
-       if (hw->device_id == ICE_DEV_ID_E822C_BACKPLANE ||
-           hw->device_id == ICE_DEV_ID_E822C_QSFP ||
-           hw->device_id == ICE_DEV_ID_E822C_10G_BASE_T ||
-           hw->device_id == ICE_DEV_ID_E822C_SGMII ||
-           hw->device_id == ICE_DEV_ID_E822C_SFP ||
-           hw->device_id == ICE_DEV_ID_E822X_BACKPLANE ||
-           hw->device_id == ICE_DEV_ID_E822L_SFP ||
-           hw->device_id == ICE_DEV_ID_E822L_10G_BASE_T ||
-           hw->device_id == ICE_DEV_ID_E822L_SGMII)
+       case ICE_DEV_ID_E823C_BACKPLANE:
+       case ICE_DEV_ID_E823C_QSFP:
+       case ICE_DEV_ID_E823C_SFP:
+       case ICE_DEV_ID_E823C_10G_BASE_T:
+       case ICE_DEV_ID_E823C_SGMII:
+       case ICE_DEV_ID_E822C_BACKPLANE:
+       case ICE_DEV_ID_E822C_QSFP:
+       case ICE_DEV_ID_E822C_10G_BASE_T:
+       case ICE_DEV_ID_E822C_SGMII:
+       case ICE_DEV_ID_E822C_SFP:
+       case ICE_DEV_ID_E822L_BACKPLANE:
+       case ICE_DEV_ID_E822L_SFP:
+       case ICE_DEV_ID_E822L_10G_BASE_T:
+       case ICE_DEV_ID_E822L_SGMII:
+       case ICE_DEV_ID_E823L_BACKPLANE:
+       case ICE_DEV_ID_E823L_SFP:
+       case ICE_DEV_ID_E823L_10G_BASE_T:
+       case ICE_DEV_ID_E823L_1GBE:
+       case ICE_DEV_ID_E823L_QSFP:
                return status;
+       default:
+               break;
+       }
 
        status = ice_get_pfa_module_tlv(hw, &boot_cfg_tlv, &boot_cfg_tlv_len,
                                        ICE_SR_BOOT_CFG_PTR);
index a21f9d2edbbb970f05de605be349f9cd975ad16e..de6317e9172465ca5466301b1eb4438d2cbb09a5 100644 (file)
@@ -171,6 +171,11 @@ static void ice_free_vf_res(struct ice_vf *vf)
        }
 
        last_vector_idx = vf->first_vector_idx + pf->num_vf_msix - 1;
+
+       /* clear VF MDD event information */
+       memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events));
+       memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events));
+
        /* Disable interrupts so that VF starts in a known state */
        for (i = vf->first_vector_idx; i <= last_vector_idx; i++) {
                wr32(&pf->hw, GLINT_DYN_CTL(i), GLINT_DYN_CTL_CLEARPBA_M);
@@ -1175,7 +1180,7 @@ static bool ice_is_vf_disabled(struct ice_vf *vf)
  *
  * Returns true if the VF is reset, false otherwise.
  */
-static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
+bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
 {
        struct ice_pf *pf = vf->pf;
        struct ice_vsi *vsi;
@@ -2013,7 +2018,7 @@ int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
 
        status = ice_update_vsi(&pf->hw, vf_vsi->idx, ctx, NULL);
        if (status) {
-               dev_err(dev, "Failed to %sable spoofchk on VF %d VSI %d\n error %d",
+               dev_err(dev, "Failed to %sable spoofchk on VF %d VSI %d\n error %d\n",
                        ena ? "en" : "dis", vf->vf_id, vf_vsi->vsi_num, status);
                ret = -EIO;
                goto out;
@@ -3529,3 +3534,52 @@ int ice_get_vf_stats(struct net_device *netdev, int vf_id,
 
        return 0;
 }
+
+/**
+ * ice_print_vfs_mdd_event - print VFs malicious driver detect event
+ * @pf: pointer to the PF structure
+ *
+ * Called from ice_handle_mdd_event to rate limit and print VFs MDD events.
+ */
+void ice_print_vfs_mdd_events(struct ice_pf *pf)
+{
+       struct device *dev = ice_pf_to_dev(pf);
+       struct ice_hw *hw = &pf->hw;
+       int i;
+
+       /* check that there are pending MDD events to print */
+       if (!test_and_clear_bit(__ICE_MDD_VF_PRINT_PENDING, pf->state))
+               return;
+
+       /* VF MDD event logs are rate limited to one second intervals */
+       if (time_is_after_jiffies(pf->last_printed_mdd_jiffies + HZ * 1))
+               return;
+
+       pf->last_printed_mdd_jiffies = jiffies;
+
+       ice_for_each_vf(pf, i) {
+               struct ice_vf *vf = &pf->vf[i];
+
+               /* only print Rx MDD event message if there are new events */
+               if (vf->mdd_rx_events.count != vf->mdd_rx_events.last_printed) {
+                       vf->mdd_rx_events.last_printed =
+                                                       vf->mdd_rx_events.count;
+
+                       dev_info(dev, "%d Rx Malicious Driver Detection events detected on PF %d VF %d MAC %pM. mdd-auto-reset-vfs=%s\n",
+                                vf->mdd_rx_events.count, hw->pf_id, i,
+                                vf->dflt_lan_addr.addr,
+                                test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)
+                                         ? "on" : "off");
+               }
+
+               /* only print Tx MDD event message if there are new events */
+               if (vf->mdd_tx_events.count != vf->mdd_tx_events.last_printed) {
+                       vf->mdd_tx_events.last_printed =
+                                                       vf->mdd_tx_events.count;
+
+                       dev_info(dev, "%d Tx Malicious Driver Detection events detected on PF %d VF %d MAC %pM.\n",
+                                vf->mdd_tx_events.count, hw->pf_id, i,
+                                vf->dflt_lan_addr.addr);
+               }
+       }
+}
index 474b2613f09c8d5ff482a2024dd472ef237053bc..656f1909b38f27a9e306583bb9037a5ffd9100c4 100644 (file)
@@ -55,6 +55,13 @@ enum ice_virtchnl_cap {
        ICE_VIRTCHNL_VF_CAP_PRIVILEGE,
 };
 
+/* VF MDD events print structure */
+struct ice_mdd_vf_events {
+       u16 count;                      /* total count of Rx|Tx events */
+       /* count number of the last printed event */
+       u16 last_printed;
+};
+
 /* VF information structure */
 struct ice_vf {
        struct ice_pf *pf;
@@ -83,13 +90,14 @@ struct ice_vf {
        unsigned int tx_rate;           /* Tx bandwidth limit in Mbps */
        DECLARE_BITMAP(vf_states, ICE_VF_STATES_NBITS); /* VF runtime states */
 
-       u64 num_mdd_events;             /* number of MDD events detected */
        u64 num_inval_msgs;             /* number of continuous invalid msgs */
        u64 num_valid_msgs;             /* number of valid msgs detected */
        unsigned long vf_caps;          /* VF's adv. capabilities */
        u8 num_req_qs;                  /* num of queue pairs requested by VF */
        u16 num_mac;
        u16 num_vf_qs;                  /* num of queue configured per VF */
+       struct ice_mdd_vf_events mdd_rx_events;
+       struct ice_mdd_vf_events mdd_tx_events;
 };
 
 #ifdef CONFIG_PCI_IOV
@@ -104,6 +112,7 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event);
 void ice_vc_notify_link_state(struct ice_pf *pf);
 void ice_vc_notify_reset(struct ice_pf *pf);
 bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr);
+bool ice_reset_vf(struct ice_vf *vf, bool is_vflr);
 
 int
 ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
@@ -123,7 +132,7 @@ ice_get_vf_stats(struct net_device *netdev, int vf_id,
                 struct ifla_vf_stats *vf_stats);
 void
 ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event);
-
+void ice_print_vfs_mdd_events(struct ice_pf *pf);
 #else /* CONFIG_PCI_IOV */
 #define ice_process_vflr_event(pf) do {} while (0)
 #define ice_free_vfs(pf) do {} while (0)
@@ -132,6 +141,7 @@ ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event);
 #define ice_vc_notify_reset(pf) do {} while (0)
 #define ice_set_vf_state_qs_dis(vf) do {} while (0)
 #define ice_vf_lan_overflow_event(pf, event) do {} while (0)
+#define ice_print_vfs_mdd_events(pf) do {} while (0)
 
 static inline bool
 ice_reset_all_vfs(struct ice_pf __always_unused *pf,
@@ -140,6 +150,12 @@ ice_reset_all_vfs(struct ice_pf __always_unused *pf,
        return true;
 }
 
+static inline bool
+ice_reset_vf(struct ice_vf __always_unused *vf, bool __always_unused is_vflr)
+{
+       return true;
+}
+
 static inline int
 ice_sriov_configure(struct pci_dev __always_unused *pdev,
                    int __always_unused num_vfs)
index 55d994f2d71e0f8a301a2cd411dd8b59fe2e156e..8279db15e8707e99064da6f129a7884db84e3c9e 100644 (file)
@@ -457,7 +457,7 @@ int ice_xsk_umem_setup(struct ice_vsi *vsi, struct xdp_umem *umem, u16 qid)
        if (if_running) {
                ret = ice_qp_dis(vsi, qid);
                if (ret) {
-                       netdev_err(vsi->netdev, "ice_qp_dis error = %d", ret);
+                       netdev_err(vsi->netdev, "ice_qp_dis error = %d\n", ret);
                        goto xsk_umem_if_up;
                }
        }
@@ -471,11 +471,11 @@ xsk_umem_if_up:
                if (!ret && umem_present)
                        napi_schedule(&vsi->xdp_rings[qid]->q_vector->napi);
                else if (ret)
-                       netdev_err(vsi->netdev, "ice_qp_ena error = %d", ret);
+                       netdev_err(vsi->netdev, "ice_qp_ena error = %d\n", ret);
        }
 
        if (umem_failure) {
-               netdev_err(vsi->netdev, "Could not %sable UMEM, error = %d",
+               netdev_err(vsi->netdev, "Could not %sable UMEM, error = %d\n",
                           umem_present ? "en" : "dis", umem_failure);
                return umem_failure;
        }
@@ -937,6 +937,15 @@ int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget)
        ice_finalize_xdp_rx(rx_ring, xdp_xmit);
        ice_update_rx_ring_stats(rx_ring, total_rx_packets, total_rx_bytes);
 
+       if (xsk_umem_uses_need_wakeup(rx_ring->xsk_umem)) {
+               if (failure || rx_ring->next_to_clean == rx_ring->next_to_use)
+                       xsk_set_rx_need_wakeup(rx_ring->xsk_umem);
+               else
+                       xsk_clear_rx_need_wakeup(rx_ring->xsk_umem);
+
+               return (int)total_rx_packets;
+       }
+
        return failure ? budget : (int)total_rx_packets;
 }
 
@@ -988,6 +997,8 @@ static bool ice_xmit_zc(struct ice_ring *xdp_ring, int budget)
        if (tx_desc) {
                ice_xdp_ring_update_tail(xdp_ring);
                xsk_umem_consume_tx_done(xdp_ring->xsk_umem);
+               if (xsk_umem_uses_need_wakeup(xdp_ring->xsk_umem))
+                       xsk_clear_tx_need_wakeup(xdp_ring->xsk_umem);
        }
 
        return budget > 0 && work_done;
@@ -1063,6 +1074,13 @@ bool ice_clean_tx_irq_zc(struct ice_ring *xdp_ring, int budget)
        if (xsk_frames)
                xsk_umem_complete_tx(xdp_ring->xsk_umem, xsk_frames);
 
+       if (xsk_umem_uses_need_wakeup(xdp_ring->xsk_umem)) {
+               if (xdp_ring->next_to_clean == xdp_ring->next_to_use)
+                       xsk_set_tx_need_wakeup(xdp_ring->xsk_umem);
+               else
+                       xsk_clear_tx_need_wakeup(xdp_ring->xsk_umem);
+       }
+
        ice_update_tx_ring_stats(xdp_ring, total_packets, total_bytes);
        xmit_done = ice_xmit_zc(xdp_ring, ICE_DFLT_IRQ_WORK);
 
index 2e4975572e9f4656388595e14b573b3ae109b4c6..de3c7ce9353c37c0ed324c407a8b2c1e8318d8ca 100644 (file)
@@ -2077,12 +2077,7 @@ jme_tx_tso(struct sk_buff *skb, __le16 *mss, u8 *flags)
                                                                IPPROTO_TCP,
                                                                0);
                } else {
-                       struct ipv6hdr *ip6h = ipv6_hdr(skb);
-
-                       tcp_hdr(skb)->check = ~csum_ipv6_magic(&ip6h->saddr,
-                                                               &ip6h->daddr, 0,
-                                                               IPPROTO_TCP,
-                                                               0);
+                       tcp_v6_gso_csum_prep(skb);
                }
 
                return 0;
index 8e1feb678cea7f108ffe18180234c3c324db595e..1c391f63a26fd639de71dd1c2306afe70146b79c 100644 (file)
@@ -1956,7 +1956,7 @@ static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
                if (!data || !(rx_desc->buf_phys_addr))
                        continue;
 
-               page_pool_put_page(rxq->page_pool, data, false);
+               page_pool_put_full_page(rxq->page_pool, data, false);
        }
        if (xdp_rxq_info_is_reg(&rxq->xdp_rxq))
                xdp_rxq_info_unreg(&rxq->xdp_rxq);
@@ -2154,9 +2154,9 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
                err = xdp_do_redirect(pp->dev, xdp, prog);
                if (err) {
                        ret = MVNETA_XDP_DROPPED;
-                       __page_pool_put_page(rxq->page_pool,
-                                            virt_to_head_page(xdp->data),
-                                            len, true);
+                       page_pool_put_page(rxq->page_pool,
+                                          virt_to_head_page(xdp->data), len,
+                                          true);
                } else {
                        ret = MVNETA_XDP_REDIR;
                        stats->xdp_redirect++;
@@ -2166,9 +2166,9 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
        case XDP_TX:
                ret = mvneta_xdp_xmit_back(pp, xdp);
                if (ret != MVNETA_XDP_TX)
-                       __page_pool_put_page(rxq->page_pool,
-                                            virt_to_head_page(xdp->data),
-                                            len, true);
+                       page_pool_put_page(rxq->page_pool,
+                                          virt_to_head_page(xdp->data), len,
+                                          true);
                break;
        default:
                bpf_warn_invalid_xdp_action(act);
@@ -2177,9 +2177,8 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
                trace_xdp_exception(pp->dev, prog, act);
                /* fall through */
        case XDP_DROP:
-               __page_pool_put_page(rxq->page_pool,
-                                    virt_to_head_page(xdp->data),
-                                    len, true);
+               page_pool_put_page(rxq->page_pool,
+                                  virt_to_head_page(xdp->data), len, true);
                ret = MVNETA_XDP_DROPPED;
                stats->xdp_drop++;
                break;
index e9f791c43f2020d790eb00e23059e2d7ea517205..0d630096a28cb7882cbd17484de38f7897eb9a15 100644 (file)
@@ -82,7 +82,7 @@ struct mlxsw_core {
        struct mlxsw_core_port *ports;
        unsigned int max_ports;
        bool fw_flash_in_progress;
-       unsigned long driver_priv[0];
+       unsigned long driver_priv[];
        /* driver_priv has to be always the last item */
 };
 
index feb4672a5ac04d8f498ed897c6370b144909a1d2..bd2207f6072266c8e4436849ab9c33e5e2920b58 100644 (file)
@@ -72,7 +72,7 @@ struct mlxsw_afk_key_info {
                                                      * is index inside "blocks"
                                                      */
        struct mlxsw_afk_element_usage elusage;
-       const struct mlxsw_afk_block *blocks[0];
+       const struct mlxsw_afk_block *blocks[];
 };
 
 static bool
index 7358b5bc7eb61ccc07cac43bd334058370a9c7f6..d78e790ba94ab47aa08d465a03d96dae047df291 100644 (file)
@@ -6316,7 +6316,7 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
                        return -EINVAL;
                }
                if (netif_is_macvlan(upper_dev) &&
-                   !mlxsw_sp_rif_find_by_dev(mlxsw_sp, lower_dev)) {
+                   !mlxsw_sp_rif_exists(mlxsw_sp, lower_dev)) {
                        NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
                        return -EOPNOTSUPP;
                }
@@ -6472,7 +6472,7 @@ static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
                        return -EINVAL;
                }
                if (netif_is_macvlan(upper_dev) &&
-                   !mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan_dev)) {
+                   !mlxsw_sp_rif_exists(mlxsw_sp, vlan_dev)) {
                        NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
                        return -EOPNOTSUPP;
                }
@@ -6549,7 +6549,7 @@ static int mlxsw_sp_netdevice_bridge_vlan_event(struct net_device *vlan_dev,
                if (!info->linking)
                        break;
                if (netif_is_macvlan(upper_dev) &&
-                   !mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan_dev)) {
+                   !mlxsw_sp_rif_exists(mlxsw_sp, vlan_dev)) {
                        NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
                        return -EOPNOTSUPP;
                }
@@ -6609,7 +6609,7 @@ static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev,
                if (!info->linking)
                        break;
                if (netif_is_macvlan(upper_dev) &&
-                   !mlxsw_sp_rif_find_by_dev(mlxsw_sp, br_dev)) {
+                   !mlxsw_sp_rif_exists(mlxsw_sp, br_dev)) {
                        NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
                        return -EOPNOTSUPP;
                }
index 4c3d39223a46aa39e9f2690c8a00d95c2627aa17..bed86f4825a88fef11cdf5720a4b3faf989e7646 100644 (file)
@@ -168,12 +168,8 @@ struct mlxsw_sp {
        struct notifier_block netdevice_nb;
        struct mlxsw_sp_ptp_clock *clock;
        struct mlxsw_sp_ptp_state *ptp_state;
-
        struct mlxsw_sp_counter_pool *counter_pool;
-       struct {
-               struct mlxsw_sp_span_entry *entries;
-               int entries_count;
-       } span;
+       struct mlxsw_sp_span *span;
        const struct mlxsw_fw_rev *req_rev;
        const char *fw_filename;
        const struct mlxsw_sp_kvdl_ops *kvdl_ops;
@@ -567,10 +563,10 @@ void
 mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan);
 void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp,
                                 struct net_device *dev);
-struct mlxsw_sp_rif *mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
-                                             const struct net_device *dev);
+bool mlxsw_sp_rif_exists(struct mlxsw_sp *mlxsw_sp,
+                        const struct net_device *dev);
+u16 mlxsw_sp_rif_vid(struct mlxsw_sp *mlxsw_sp, const struct net_device *dev);
 u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp);
-struct mlxsw_sp_fid *mlxsw_sp_rif_fid(const struct mlxsw_sp_rif *rif);
 int mlxsw_sp_router_nve_promote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
                                      enum mlxsw_sp_l3proto ul_proto,
                                      const union mlxsw_sp_l3addr *ul_sip,
index 09ee0a80774746c08f84bc32012c471b245c4c08..a9fff8adc75e720fdf7fe1dac14a653a175372a3 100644 (file)
@@ -60,7 +60,7 @@ static const struct mlxsw_sp1_kvdl_part_info mlxsw_sp1_kvdl_parts_info[] = {
 
 struct mlxsw_sp1_kvdl_part {
        struct mlxsw_sp1_kvdl_part_info info;
-       unsigned long usage[0]; /* Entries */
+       unsigned long usage[];  /* Entries */
 };
 
 struct mlxsw_sp1_kvdl {
index 8d14770766b475a633b42ae395fbd19e45f28e42..3a73d654017fe50a8106aeedada52bf394785efa 100644 (file)
@@ -45,7 +45,7 @@ struct mlxsw_sp2_kvdl_part {
        unsigned int usage_bit_count;
        unsigned int indexes_per_usage_bit;
        unsigned int last_allocated_bit;
-       unsigned long usage[0]; /* Usage bits */
+       unsigned long usage[];  /* Usage bits */
 };
 
 struct mlxsw_sp2_kvdl {
index 3d3cca5961163aec45a66bd96d3435ff1e598d7a..9368b93dab3878010ddf1b59a3c4d3561b63d687 100644 (file)
@@ -58,7 +58,7 @@ struct mlxsw_sp_acl_ruleset {
        struct mlxsw_sp_acl_ruleset_ht_key ht_key;
        struct rhashtable rule_ht;
        unsigned int ref_count;
-       unsigned long priv[0];
+       unsigned long priv[];
        /* priv has to be always the last item */
 };
 
@@ -71,7 +71,7 @@ struct mlxsw_sp_acl_rule {
        u64 last_used;
        u64 last_packets;
        u64 last_bytes;
-       unsigned long priv[0];
+       unsigned long priv[];
        /* priv has to be always the last item */
 };
 
index 3a2de13fcb68a1807ff725c593166f16cb16906b..dbd3bebf11eca2466682d15a780af3bd4fad66de 100644 (file)
@@ -13,7 +13,7 @@
 struct mlxsw_sp_acl_bf {
        struct mutex lock; /* Protects Bloom Filter updates. */
        unsigned int bank_size;
-       refcount_t refcnt[0];
+       refcount_t refcnt[];
 };
 
 /* Bloom filter uses a crc-16 hash over chunks of data which contain 4 key
index e993159e8e4cd7f8c16633ebcdc06a87a3a67be9..430da69003d838f7978384c71d3205d40c4276ee 100644 (file)
@@ -224,7 +224,7 @@ struct mlxsw_sp_acl_tcam_vchunk;
 struct mlxsw_sp_acl_tcam_chunk {
        struct mlxsw_sp_acl_tcam_vchunk *vchunk;
        struct mlxsw_sp_acl_tcam_region *region;
-       unsigned long priv[0];
+       unsigned long priv[];
        /* priv has to be always the last item */
 };
 
@@ -243,7 +243,7 @@ struct mlxsw_sp_acl_tcam_vchunk {
 struct mlxsw_sp_acl_tcam_entry {
        struct mlxsw_sp_acl_tcam_ventry *ventry;
        struct mlxsw_sp_acl_tcam_chunk *chunk;
-       unsigned long priv[0];
+       unsigned long priv[];
        /* priv has to be always the last item */
 };
 
index 5965913565a5dbca813417781fee61f6a0d45c13..96437992b102a5627cbc6589809d545f225c7fd5 100644 (file)
@@ -20,7 +20,7 @@ struct mlxsw_sp_acl_tcam {
        struct mutex lock; /* guards vregion list */
        struct list_head vregion_list;
        u32 vregion_rehash_intrvl;   /* ms */
-       unsigned long priv[0];
+       unsigned long priv[];
        /* priv has to be always the last item */
 };
 
@@ -86,7 +86,7 @@ struct mlxsw_sp_acl_tcam_region {
        char tcam_region_info[MLXSW_REG_PXXX_TCAM_REGION_INFO_LEN];
        struct mlxsw_afk_key_info *key_info;
        struct mlxsw_sp *mlxsw_sp;
-       unsigned long priv[0];
+       unsigned long priv[];
        /* priv has to be always the last item */
 };
 
index 83c2e1e5f216afcdd63cb4e720a080b62766e737..6a02ef9ec00edd21d92fc07d71cea6f568af3a22 100644 (file)
@@ -3,6 +3,7 @@
 
 #include <linux/kernel.h>
 #include <linux/bitops.h>
+#include <linux/spinlock.h>
 
 #include "spectrum_cnt.h"
 
@@ -18,6 +19,7 @@ struct mlxsw_sp_counter_sub_pool {
 struct mlxsw_sp_counter_pool {
        unsigned int pool_size;
        unsigned long *usage; /* Usage bitmap */
+       spinlock_t counter_pool_lock; /* Protects counter pool allocations */
        struct mlxsw_sp_counter_sub_pool *sub_pools;
 };
 
@@ -87,6 +89,7 @@ int mlxsw_sp_counter_pool_init(struct mlxsw_sp *mlxsw_sp)
        pool = kzalloc(sizeof(*pool), GFP_KERNEL);
        if (!pool)
                return -ENOMEM;
+       spin_lock_init(&pool->counter_pool_lock);
 
        pool->pool_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, COUNTER_POOL_SIZE);
        map_size = BITS_TO_LONGS(pool->pool_size) * sizeof(unsigned long);
@@ -139,25 +142,35 @@ int mlxsw_sp_counter_alloc(struct mlxsw_sp *mlxsw_sp,
        struct mlxsw_sp_counter_sub_pool *sub_pool;
        unsigned int entry_index;
        unsigned int stop_index;
-       int i;
+       int i, err;
 
        sub_pool = &mlxsw_sp_counter_sub_pools[sub_pool_id];
        stop_index = sub_pool->base_index + sub_pool->size;
        entry_index = sub_pool->base_index;
 
+       spin_lock(&pool->counter_pool_lock);
        entry_index = find_next_zero_bit(pool->usage, stop_index, entry_index);
-       if (entry_index == stop_index)
-               return -ENOBUFS;
+       if (entry_index == stop_index) {
+               err = -ENOBUFS;
+               goto err_alloc;
+       }
        /* The sub-pools can contain non-integer number of entries
         * so we must check for overflow
         */
-       if (entry_index + sub_pool->entry_size > stop_index)
-               return -ENOBUFS;
+       if (entry_index + sub_pool->entry_size > stop_index) {
+               err = -ENOBUFS;
+               goto err_alloc;
+       }
        for (i = 0; i < sub_pool->entry_size; i++)
                __set_bit(entry_index + i, pool->usage);
+       spin_unlock(&pool->counter_pool_lock);
 
        *p_counter_index = entry_index;
        return 0;
+
+err_alloc:
+       spin_unlock(&pool->counter_pool_lock);
+       return err;
 }
 
 void mlxsw_sp_counter_free(struct mlxsw_sp *mlxsw_sp,
@@ -171,6 +184,8 @@ void mlxsw_sp_counter_free(struct mlxsw_sp *mlxsw_sp,
        if (WARN_ON(counter_index >= pool->pool_size))
                return;
        sub_pool = &mlxsw_sp_counter_sub_pools[sub_pool_id];
+       spin_lock(&pool->counter_pool_lock);
        for (i = 0; i < sub_pool->entry_size; i++)
                __clear_bit(counter_index + i, pool->usage);
+       spin_unlock(&pool->counter_pool_lock);
 }
index 1e4cdee7bcd7d2e311cb020886f936a52ab90a71..20d72f1c0ceed2aa467ad77daaac686c2cf4c092 100644 (file)
@@ -2,13 +2,15 @@
 /* Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved */
 
 #include <linux/kernel.h>
+#include <linux/mutex.h>
 #include <linux/slab.h>
 
 #include "spectrum.h"
 
 struct mlxsw_sp_kvdl {
        const struct mlxsw_sp_kvdl_ops *kvdl_ops;
-       unsigned long priv[0];
+       struct mutex kvdl_lock; /* Protects kvdl allocations */
+       unsigned long priv[];
        /* priv has to be always the last item */
 };
 
@@ -22,6 +24,7 @@ int mlxsw_sp_kvdl_init(struct mlxsw_sp *mlxsw_sp)
                       GFP_KERNEL);
        if (!kvdl)
                return -ENOMEM;
+       mutex_init(&kvdl->kvdl_lock);
        kvdl->kvdl_ops = kvdl_ops;
        mlxsw_sp->kvdl = kvdl;
 
@@ -31,6 +34,7 @@ int mlxsw_sp_kvdl_init(struct mlxsw_sp *mlxsw_sp)
        return 0;
 
 err_init:
+       mutex_destroy(&kvdl->kvdl_lock);
        kfree(kvdl);
        return err;
 }
@@ -40,6 +44,7 @@ void mlxsw_sp_kvdl_fini(struct mlxsw_sp *mlxsw_sp)
        struct mlxsw_sp_kvdl *kvdl = mlxsw_sp->kvdl;
 
        kvdl->kvdl_ops->fini(mlxsw_sp, kvdl->priv);
+       mutex_destroy(&kvdl->kvdl_lock);
        kfree(kvdl);
 }
 
@@ -48,9 +53,14 @@ int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp,
                        unsigned int entry_count, u32 *p_entry_index)
 {
        struct mlxsw_sp_kvdl *kvdl = mlxsw_sp->kvdl;
+       int err;
+
+       mutex_lock(&kvdl->kvdl_lock);
+       err = kvdl->kvdl_ops->alloc(mlxsw_sp, kvdl->priv, type,
+                                   entry_count, p_entry_index);
+       mutex_unlock(&kvdl->kvdl_lock);
 
-       return kvdl->kvdl_ops->alloc(mlxsw_sp, kvdl->priv, type,
-                                    entry_count, p_entry_index);
+       return err;
 }
 
 void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp,
@@ -59,8 +69,10 @@ void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp,
 {
        struct mlxsw_sp_kvdl *kvdl = mlxsw_sp->kvdl;
 
+       mutex_lock(&kvdl->kvdl_lock);
        kvdl->kvdl_ops->free(mlxsw_sp, kvdl->priv, type,
                             entry_count, entry_index);
+       mutex_unlock(&kvdl->kvdl_lock);
 }
 
 int mlxsw_sp_kvdl_alloc_count_query(struct mlxsw_sp *mlxsw_sp,
index 54275624718baca0111d2dfd3bf168fd063e06cf..423eedebcd22a3bc646d84eaff1c1964b6e45731 100644 (file)
@@ -68,7 +68,7 @@ struct mlxsw_sp_mr_table {
        struct list_head route_list;
        struct rhashtable route_ht;
        const struct mlxsw_sp_mr_table_ops *ops;
-       char catchall_route_priv[0];
+       char catchall_route_priv[];
        /* catchall_route_priv has to be always the last item */
 };
 
index 2153bcc4b5853ab1bc873bf2bbbd614d30089541..eced553fd4effa9bc27ccc294aa2a0f412d339a3 100644 (file)
@@ -67,7 +67,7 @@ struct mlxsw_sp_nve_mc_record {
        struct mlxsw_sp_nve_mc_list *mc_list;
        const struct mlxsw_sp_nve_mc_record_ops *ops;
        u32 kvdl_index;
-       struct mlxsw_sp_nve_mc_entry entries[0];
+       struct mlxsw_sp_nve_mc_entry entries[];
 };
 
 struct mlxsw_sp_nve_mc_list {
@@ -744,6 +744,8 @@ static int mlxsw_sp_nve_tunnel_init(struct mlxsw_sp *mlxsw_sp,
        if (nve->num_nve_tunnels++ != 0)
                return 0;
 
+       nve->config = *config;
+
        err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
                                  &nve->tunnel_index);
        if (err)
@@ -760,6 +762,7 @@ err_ops_init:
        mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
                           nve->tunnel_index);
 err_kvdl_alloc:
+       memset(&nve->config, 0, sizeof(nve->config));
        nve->num_nve_tunnels--;
        return err;
 }
@@ -840,8 +843,6 @@ int mlxsw_sp_nve_fid_enable(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *fid,
                goto err_fid_vni_set;
        }
 
-       nve->config = config;
-
        err = ops->fdb_replay(params->dev, params->vni, extack);
        if (err)
                goto err_fdb_replay;
index def75d7fcd069360acba5718916b0f8d0dfd3a15..634a9a949777d7a0b98a88aef3a47665a84d8e81 100644 (file)
@@ -145,6 +145,9 @@ struct mlxsw_sp_rif_ops {
        void (*fdb_del)(struct mlxsw_sp_rif *rif, const char *mac);
 };
 
+static struct mlxsw_sp_rif *
+mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
+                        const struct net_device *dev);
 static void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif);
 static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree);
 static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
@@ -988,17 +991,23 @@ __mlxsw_sp_ipip_netdev_ul_dev_get(const struct net_device *ol_dev)
        struct ip_tunnel *tun = netdev_priv(ol_dev);
        struct net *net = dev_net(ol_dev);
 
-       return __dev_get_by_index(net, tun->parms.link);
+       return dev_get_by_index_rcu(net, tun->parms.link);
 }
 
 u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev)
 {
-       struct net_device *d = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
+       struct net_device *d;
+       u32 tb_id;
 
+       rcu_read_lock();
+       d = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
        if (d)
-               return l3mdev_fib_table(d) ? : RT_TABLE_MAIN;
+               tb_id = l3mdev_fib_table(d) ? : RT_TABLE_MAIN;
        else
-               return RT_TABLE_MAIN;
+               tb_id = RT_TABLE_MAIN;
+       rcu_read_unlock();
+
+       return tb_id;
 }
 
 static struct mlxsw_sp_rif *
@@ -1355,8 +1364,12 @@ mlxsw_sp_ipip_entry_find_by_ul_dev(const struct mlxsw_sp *mlxsw_sp,
                                        ipip_list_node);
        list_for_each_entry_continue(ipip_entry, &mlxsw_sp->router->ipip_list,
                                     ipip_list_node) {
-               struct net_device *ipip_ul_dev =
-                       __mlxsw_sp_ipip_netdev_ul_dev_get(ipip_entry->ol_dev);
+               struct net_device *ol_dev = ipip_entry->ol_dev;
+               struct net_device *ipip_ul_dev;
+
+               rcu_read_lock();
+               ipip_ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
+               rcu_read_unlock();
 
                if (ipip_ul_dev == ul_dev)
                        return ipip_entry;
@@ -1722,9 +1735,12 @@ static void mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(struct mlxsw_sp *mlxsw_sp,
 
        list_for_each_entry_safe(ipip_entry, tmp, &mlxsw_sp->router->ipip_list,
                                 ipip_list_node) {
-               struct net_device *ipip_ul_dev =
-                       __mlxsw_sp_ipip_netdev_ul_dev_get(ipip_entry->ol_dev);
+               struct net_device *ol_dev = ipip_entry->ol_dev;
+               struct net_device *ipip_ul_dev;
 
+               rcu_read_lock();
+               ipip_ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
+               rcu_read_unlock();
                if (ipip_ul_dev == ul_dev)
                        mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
        }
@@ -3711,9 +3727,15 @@ static void mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp *mlxsw_sp,
 
 static bool mlxsw_sp_ipip_netdev_ul_up(struct net_device *ol_dev)
 {
-       struct net_device *ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
+       struct net_device *ul_dev;
+       bool is_up;
+
+       rcu_read_lock();
+       ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
+       is_up = ul_dev ? (ul_dev->flags & IFF_UP) : true;
+       rcu_read_unlock();
 
-       return ul_dev ? (ul_dev->flags & IFF_UP) : true;
+       return is_up;
 }
 
 static void mlxsw_sp_nexthop_ipip_init(struct mlxsw_sp *mlxsw_sp,
@@ -3840,10 +3862,14 @@ static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp,
        if (!dev)
                return 0;
 
-       in_dev = __in_dev_get_rtnl(dev);
+       rcu_read_lock();
+       in_dev = __in_dev_get_rcu(dev);
        if (in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
-           fib_nh->fib_nh_flags & RTNH_F_LINKDOWN)
+           fib_nh->fib_nh_flags & RTNH_F_LINKDOWN) {
+               rcu_read_unlock();
                return 0;
+       }
+       rcu_read_unlock();
 
        err = mlxsw_sp_nexthop4_type_init(mlxsw_sp, nh, fib_nh);
        if (err)
@@ -6233,7 +6259,7 @@ err_fib_event:
        return NOTIFY_BAD;
 }
 
-struct mlxsw_sp_rif *
+static struct mlxsw_sp_rif *
 mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
                         const struct net_device *dev)
 {
@@ -6247,6 +6273,33 @@ mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
        return NULL;
 }
 
+bool mlxsw_sp_rif_exists(struct mlxsw_sp *mlxsw_sp,
+                        const struct net_device *dev)
+{
+       return !!mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
+}
+
+u16 mlxsw_sp_rif_vid(struct mlxsw_sp *mlxsw_sp, const struct net_device *dev)
+{
+       struct mlxsw_sp_rif *rif;
+       u16 vid = 0;
+
+       rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
+       if (!rif)
+               goto out;
+
+       /* We only return the VID for VLAN RIFs. Otherwise we return an
+        * invalid value (0).
+        */
+       if (rif->ops->type != MLXSW_SP_RIF_TYPE_VLAN)
+               goto out;
+
+       vid = mlxsw_sp_fid_8021q_vid(rif->fid);
+
+out:
+       return vid;
+}
+
 static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif)
 {
        char ritr_pl[MLXSW_REG_RITR_LEN];
@@ -6281,7 +6334,8 @@ mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *rif, struct net_device *dev,
        case NETDEV_UP:
                return rif == NULL;
        case NETDEV_DOWN:
-               idev = __in_dev_get_rtnl(dev);
+               rcu_read_lock();
+               idev = __in_dev_get_rcu(dev);
                if (idev && idev->ifa_list)
                        addr_list_empty = false;
 
@@ -6289,6 +6343,7 @@ mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *rif, struct net_device *dev,
                if (addr_list_empty && inet6_dev &&
                    !list_empty(&inet6_dev->addr_list))
                        addr_list_empty = false;
+               rcu_read_unlock();
 
                /* macvlans do not have a RIF, but rather piggy back on the
                 * RIF of their lower device.
@@ -6411,11 +6466,6 @@ const struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif)
        return rif->dev;
 }
 
-struct mlxsw_sp_fid *mlxsw_sp_rif_fid(const struct mlxsw_sp_rif *rif)
-{
-       return rif->fid;
-}
-
 static struct mlxsw_sp_rif *
 mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
                    const struct mlxsw_sp_rif_params *params,
@@ -6631,8 +6681,8 @@ err_fid_port_vid_map:
        return err;
 }
 
-void
-mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
+static void
+__mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
 {
        struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
        struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
@@ -6650,6 +6700,12 @@ mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
        mlxsw_sp_rif_subport_put(rif);
 }
 
+void
+mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
+{
+       __mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
+}
+
 static int mlxsw_sp_inetaddr_port_vlan_event(struct net_device *l3_dev,
                                             struct net_device *port_dev,
                                             unsigned long event, u16 vid,
@@ -6667,7 +6723,7 @@ static int mlxsw_sp_inetaddr_port_vlan_event(struct net_device *l3_dev,
                return mlxsw_sp_port_vlan_router_join(mlxsw_sp_port_vlan,
                                                      l3_dev, extack);
        case NETDEV_DOWN:
-               mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
+               __mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
                break;
        }
 
@@ -6848,8 +6904,8 @@ err_rif_vrrp_add:
        return err;
 }
 
-void mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp,
-                             const struct net_device *macvlan_dev)
+static void __mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp,
+                                      const struct net_device *macvlan_dev)
 {
        struct macvlan_dev *vlan = netdev_priv(macvlan_dev);
        struct mlxsw_sp_rif *rif;
@@ -6866,6 +6922,12 @@ void mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp,
                            mlxsw_sp_fid_index(rif->fid), false);
 }
 
+void mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp,
+                             const struct net_device *macvlan_dev)
+{
+       __mlxsw_sp_rif_macvlan_del(mlxsw_sp, macvlan_dev);
+}
+
 static int mlxsw_sp_inetaddr_macvlan_event(struct mlxsw_sp *mlxsw_sp,
                                           struct net_device *macvlan_dev,
                                           unsigned long event,
@@ -6875,7 +6937,7 @@ static int mlxsw_sp_inetaddr_macvlan_event(struct mlxsw_sp *mlxsw_sp,
        case NETDEV_UP:
                return mlxsw_sp_rif_macvlan_add(mlxsw_sp, macvlan_dev, extack);
        case NETDEV_DOWN:
-               mlxsw_sp_rif_macvlan_del(mlxsw_sp, macvlan_dev);
+               __mlxsw_sp_rif_macvlan_del(mlxsw_sp, macvlan_dev);
                break;
        }
 
index 0cdd7954a085fb75fcc00792ac3b9fcf26b2d111..9fb2e9d93929c6ea10d969783c32f1ff491a22e2 100644 (file)
@@ -3,6 +3,8 @@
 
 #include <linux/if_bridge.h>
 #include <linux/list.h>
+#include <linux/rtnetlink.h>
+#include <linux/workqueue.h>
 #include <net/arp.h>
 #include <net/gre.h>
 #include <net/lag.h>
 #include "spectrum_span.h"
 #include "spectrum_switchdev.h"
 
+struct mlxsw_sp_span {
+       struct work_struct work;
+       struct mlxsw_sp *mlxsw_sp;
+       atomic_t active_entries_count;
+       int entries_count;
+       struct mlxsw_sp_span_entry entries[0];
+};
+
+static void mlxsw_sp_span_respin_work(struct work_struct *work);
+
 static u64 mlxsw_sp_span_occ_get(void *priv)
 {
        const struct mlxsw_sp *mlxsw_sp = priv;
-       u64 occ = 0;
-       int i;
 
-       for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
-               if (mlxsw_sp->span.entries[i].ref_count)
-                       occ++;
-       }
-
-       return occ;
+       return atomic_read(&mlxsw_sp->span->active_entries_count);
 }
 
 int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp)
 {
        struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
-       int i;
+       struct mlxsw_sp_span *span;
+       int i, entries_count;
 
        if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_SPAN))
                return -EIO;
 
-       mlxsw_sp->span.entries_count = MLXSW_CORE_RES_GET(mlxsw_sp->core,
-                                                         MAX_SPAN);
-       mlxsw_sp->span.entries = kcalloc(mlxsw_sp->span.entries_count,
-                                        sizeof(struct mlxsw_sp_span_entry),
-                                        GFP_KERNEL);
-       if (!mlxsw_sp->span.entries)
+       entries_count = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_SPAN);
+       span = kzalloc(struct_size(span, entries, entries_count), GFP_KERNEL);
+       if (!span)
                return -ENOMEM;
+       span->entries_count = entries_count;
+       atomic_set(&span->active_entries_count, 0);
+       span->mlxsw_sp = mlxsw_sp;
+       mlxsw_sp->span = span;
 
-       for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
-               struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
+       for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
+               struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i];
 
                INIT_LIST_HEAD(&curr->bound_ports_list);
                curr->id = i;
@@ -53,6 +60,7 @@ int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp)
 
        devlink_resource_occ_get_register(devlink, MLXSW_SP_RESOURCE_SPAN,
                                          mlxsw_sp_span_occ_get, mlxsw_sp);
+       INIT_WORK(&span->work, mlxsw_sp_span_respin_work);
 
        return 0;
 }
@@ -62,14 +70,15 @@ void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp)
        struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
        int i;
 
+       cancel_work_sync(&mlxsw_sp->span->work);
        devlink_resource_occ_get_unregister(devlink, MLXSW_SP_RESOURCE_SPAN);
 
-       for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
-               struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
+       for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
+               struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i];
 
                WARN_ON_ONCE(!list_empty(&curr->bound_ports_list));
        }
-       kfree(mlxsw_sp->span.entries);
+       kfree(mlxsw_sp->span);
 }
 
 static int
@@ -645,15 +654,16 @@ mlxsw_sp_span_entry_create(struct mlxsw_sp *mlxsw_sp,
        int i;
 
        /* find a free entry to use */
-       for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
-               if (!mlxsw_sp->span.entries[i].ref_count) {
-                       span_entry = &mlxsw_sp->span.entries[i];
+       for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
+               if (!mlxsw_sp->span->entries[i].ref_count) {
+                       span_entry = &mlxsw_sp->span->entries[i];
                        break;
                }
        }
        if (!span_entry)
                return NULL;
 
+       atomic_inc(&mlxsw_sp->span->active_entries_count);
        span_entry->ops = ops;
        span_entry->ref_count = 1;
        span_entry->to_dev = to_dev;
@@ -662,9 +672,11 @@ mlxsw_sp_span_entry_create(struct mlxsw_sp *mlxsw_sp,
        return span_entry;
 }
 
-static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp_span_entry *span_entry)
+static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp *mlxsw_sp,
+                                       struct mlxsw_sp_span_entry *span_entry)
 {
        mlxsw_sp_span_entry_deconfigure(span_entry);
+       atomic_dec(&mlxsw_sp->span->active_entries_count);
 }
 
 struct mlxsw_sp_span_entry *
@@ -673,8 +685,8 @@ mlxsw_sp_span_entry_find_by_port(struct mlxsw_sp *mlxsw_sp,
 {
        int i;
 
-       for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
-               struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
+       for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
+               struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i];
 
                if (curr->ref_count && curr->to_dev == to_dev)
                        return curr;
@@ -694,8 +706,8 @@ mlxsw_sp_span_entry_find_by_id(struct mlxsw_sp *mlxsw_sp, int span_id)
 {
        int i;
 
-       for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
-               struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
+       for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
+               struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i];
 
                if (curr->ref_count && curr->id == span_id)
                        return curr;
@@ -726,7 +738,7 @@ static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp,
 {
        WARN_ON(!span_entry->ref_count);
        if (--span_entry->ref_count == 0)
-               mlxsw_sp_span_entry_destroy(span_entry);
+               mlxsw_sp_span_entry_destroy(mlxsw_sp, span_entry);
        return 0;
 }
 
@@ -736,8 +748,8 @@ static bool mlxsw_sp_span_is_egress_mirror(struct mlxsw_sp_port *port)
        struct mlxsw_sp_span_inspected_port *p;
        int i;
 
-       for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
-               struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
+       for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
+               struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i];
 
                list_for_each_entry(p, &curr->bound_ports_list, list)
                        if (p->local_port == port->local_port &&
@@ -842,9 +854,9 @@ mlxsw_sp_span_inspected_port_add(struct mlxsw_sp_port *port,
         * so if a binding is requested, check for conflicts.
         */
        if (bind)
-               for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
+               for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
                        struct mlxsw_sp_span_entry *curr =
-                               &mlxsw_sp->span.entries[i];
+                               &mlxsw_sp->span->entries[i];
 
                        if (mlxsw_sp_span_entry_bound_port_find(curr, type,
                                                                port, bind))
@@ -988,14 +1000,18 @@ void mlxsw_sp_span_mirror_del(struct mlxsw_sp_port *from, int span_id,
        mlxsw_sp_span_inspected_port_del(from, span_entry, type, bind);
 }
 
-void mlxsw_sp_span_respin(struct mlxsw_sp *mlxsw_sp)
+static void mlxsw_sp_span_respin_work(struct work_struct *work)
 {
-       int i;
-       int err;
+       struct mlxsw_sp_span *span;
+       struct mlxsw_sp *mlxsw_sp;
+       int i, err;
 
-       ASSERT_RTNL();
-       for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
-               struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
+       span = container_of(work, struct mlxsw_sp_span, work);
+       mlxsw_sp = span->mlxsw_sp;
+
+       rtnl_lock();
+       for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
+               struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i];
                struct mlxsw_sp_span_parms sparms = {NULL};
 
                if (!curr->ref_count)
@@ -1010,4 +1026,12 @@ void mlxsw_sp_span_respin(struct mlxsw_sp *mlxsw_sp)
                        mlxsw_sp_span_entry_configure(mlxsw_sp, curr, sparms);
                }
        }
+       rtnl_unlock();
+}
+
+void mlxsw_sp_span_respin(struct mlxsw_sp *mlxsw_sp)
+{
+       if (atomic_read(&mlxsw_sp->span->active_entries_count) == 0)
+               return;
+       mlxsw_core_schedule_work(&mlxsw_sp->span->work);
 }
index 6213fa43aa7b8921032d874ee4ea835870e69c2e..339c69da83b29b87babb62ab2d8e8762cc3b160a 100644 (file)
@@ -1173,16 +1173,12 @@ mlxsw_sp_br_ban_rif_pvid_change(struct mlxsw_sp *mlxsw_sp,
                                const struct net_device *br_dev,
                                const struct switchdev_obj_port_vlan *vlan)
 {
-       struct mlxsw_sp_rif *rif;
-       struct mlxsw_sp_fid *fid;
        u16 pvid;
        u16 vid;
 
-       rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, br_dev);
-       if (!rif)
+       pvid = mlxsw_sp_rif_vid(mlxsw_sp, br_dev);
+       if (!pvid)
                return 0;
-       fid = mlxsw_sp_rif_fid(rif);
-       pvid = mlxsw_sp_fid_8021q_vid(fid);
 
        for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
                if (vlan->flags & BRIDGE_VLAN_INFO_PVID) {
@@ -1778,36 +1774,6 @@ mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port *mlxsw_sp_port,
        }
 }
 
-struct mlxsw_sp_span_respin_work {
-       struct work_struct work;
-       struct mlxsw_sp *mlxsw_sp;
-};
-
-static void mlxsw_sp_span_respin_work(struct work_struct *work)
-{
-       struct mlxsw_sp_span_respin_work *respin_work =
-               container_of(work, struct mlxsw_sp_span_respin_work, work);
-
-       rtnl_lock();
-       mlxsw_sp_span_respin(respin_work->mlxsw_sp);
-       rtnl_unlock();
-       kfree(respin_work);
-}
-
-static void mlxsw_sp_span_respin_schedule(struct mlxsw_sp *mlxsw_sp)
-{
-       struct mlxsw_sp_span_respin_work *respin_work;
-
-       respin_work = kzalloc(sizeof(*respin_work), GFP_ATOMIC);
-       if (!respin_work)
-               return;
-
-       INIT_WORK(&respin_work->work, mlxsw_sp_span_respin_work);
-       respin_work->mlxsw_sp = mlxsw_sp;
-
-       mlxsw_core_schedule_work(&respin_work->work);
-}
-
 static int mlxsw_sp_port_obj_add(struct net_device *dev,
                                 const struct switchdev_obj *obj,
                                 struct switchdev_trans *trans,
@@ -1829,7 +1795,7 @@ static int mlxsw_sp_port_obj_add(struct net_device *dev,
                         * call for later, so that the respin logic sees the
                         * updated bridge state.
                         */
-                       mlxsw_sp_span_respin_schedule(mlxsw_sp_port->mlxsw_sp);
+                       mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
                }
                break;
        case SWITCHDEV_OBJ_ID_PORT_MDB:
@@ -1982,7 +1948,7 @@ static int mlxsw_sp_port_obj_del(struct net_device *dev,
                break;
        }
 
-       mlxsw_sp_span_respin_schedule(mlxsw_sp_port->mlxsw_sp);
+       mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
 
        return err;
 }
index d1444ba36e100b7930f3b8eee70c9d0d06bab7d2..4fe6aedca22f4f20f10e76917e17b4aa8411f750 100644 (file)
@@ -5694,7 +5694,7 @@ static void dev_set_promiscuous(struct net_device *dev, struct dev_priv *priv,
                 * from the bridge.
                 */
                if ((hw->features & STP_SUPPORT) && !promiscuous &&
-                   (dev->priv_flags & IFF_BRIDGE_PORT)) {
+                   netif_is_bridge_port(dev)) {
                        struct ksz_switch *sw = hw->ksz_switch;
                        int port = priv->port.first_port;
 
index e452f4242ba0cc592b65cd6d4a3cb5b9166e8e23..020acc300d7e013484e80d06f972f156e5eea104 100644 (file)
@@ -632,10 +632,7 @@ static int ionic_tx_tcp_pseudo_csum(struct sk_buff *skb)
                                           ip_hdr(skb)->daddr,
                                           0, IPPROTO_TCP, 0);
        } else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) {
-               tcp_hdr(skb)->check =
-                       ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
-                                        &ipv6_hdr(skb)->daddr,
-                                        0, IPPROTO_TCP, 0);
+               tcp_v6_gso_csum_prep(skb);
        }
 
        return 0;
index bebe38d74d6682790f009208bb4106027798054c..251d4ac4af02d348250935e4b1b4296933dc9ad7 100644 (file)
@@ -1288,11 +1288,8 @@ static int emac_tso_csum(struct emac_adapter *adpt,
                        memset(tpd, 0, sizeof(*tpd));
                        memset(&extra_tpd, 0, sizeof(extra_tpd));
 
-                       ipv6_hdr(skb)->payload_len = 0;
-                       tcp_hdr(skb)->check =
-                               ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
-                                                &ipv6_hdr(skb)->daddr,
-                                                0, IPPROTO_TCP, 0);
+                       tcp_v6_gso_csum_prep(skb);
+
                        TPD_PKT_LEN_SET(&extra_tpd, skb->len);
                        TPD_LSO_SET(&extra_tpd, 1);
                        TPD_LSOV_SET(&extra_tpd, 1);
index ad4bb5ac686e692872705f54cf0966adb6c63f17..267b7ae05e2307a444ffb7038cfc8f4d5a0b66c2 100644 (file)
@@ -4108,29 +4108,6 @@ static bool rtl_test_hw_pad_bug(struct rtl8169_private *tp, struct sk_buff *skb)
        return skb->len < ETH_ZLEN && tp->mac_version == RTL_GIGA_MAC_VER_34;
 }
 
-/* msdn_giant_send_check()
- * According to the document of microsoft, the TCP Pseudo Header excludes the
- * packet length for IPv6 TCP large packets.
- */
-static int msdn_giant_send_check(struct sk_buff *skb)
-{
-       const struct ipv6hdr *ipv6h;
-       struct tcphdr *th;
-       int ret;
-
-       ret = skb_cow_head(skb, 0);
-       if (ret)
-               return ret;
-
-       ipv6h = ipv6_hdr(skb);
-       th = tcp_hdr(skb);
-
-       th->check = 0;
-       th->check = ~tcp_v6_check(0, &ipv6h->saddr, &ipv6h->daddr, 0);
-
-       return ret;
-}
-
 static void rtl8169_tso_csum_v1(struct sk_buff *skb, u32 *opts)
 {
        u32 mss = skb_shinfo(skb)->gso_size;
@@ -4163,9 +4140,10 @@ static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp,
                        break;
 
                case htons(ETH_P_IPV6):
-                       if (msdn_giant_send_check(skb))
+                       if (skb_cow_head(skb, 0))
                                return false;
 
+                       tcp_v6_gso_csum_prep(skb);
                        opts[0] |= TD1_GTSENV6;
                        break;
 
index 4481f21a1f43bf39fa12650300b7f1599539c118..256807c28ff7c596631f4d09fdf536d33ff32821 100644 (file)
@@ -113,7 +113,6 @@ MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value");
  *
  *************************************************************************/
 
-static const struct efx_channel_type efx_default_channel_type;
 static void efx_remove_port(struct efx_nic *efx);
 static int efx_xdp_setup_prog(struct efx_nic *efx, struct bpf_prog *prog);
 static int efx_xdp(struct net_device *dev, struct netdev_bpf *xdp);
index e8224b543dfcf31ba96722589274ef5f0afca99d..58b9b7ce7195e719f1245c23fb0cc4987456fb7a 100644 (file)
@@ -896,9 +896,9 @@ static u32 netsec_run_xdp(struct netsec_priv *priv, struct bpf_prog *prog,
        case XDP_TX:
                ret = netsec_xdp_xmit_back(priv, xdp);
                if (ret != NETSEC_XDP_TX)
-                       __page_pool_put_page(dring->page_pool,
-                                            virt_to_head_page(xdp->data),
-                                            len, true);
+                       page_pool_put_page(dring->page_pool,
+                                          virt_to_head_page(xdp->data), len,
+                                          true);
                break;
        case XDP_REDIRECT:
                err = xdp_do_redirect(priv->ndev, xdp, prog);
@@ -906,9 +906,9 @@ static u32 netsec_run_xdp(struct netsec_priv *priv, struct bpf_prog *prog,
                        ret = NETSEC_XDP_REDIR;
                } else {
                        ret = NETSEC_XDP_CONSUMED;
-                       __page_pool_put_page(dring->page_pool,
-                                            virt_to_head_page(xdp->data),
-                                            len, true);
+                       page_pool_put_page(dring->page_pool,
+                                          virt_to_head_page(xdp->data), len,
+                                          true);
                }
                break;
        default:
@@ -919,9 +919,8 @@ static u32 netsec_run_xdp(struct netsec_priv *priv, struct bpf_prog *prog,
                /* fall through -- handle aborts by dropping packet */
        case XDP_DROP:
                ret = NETSEC_XDP_CONSUMED;
-               __page_pool_put_page(dring->page_pool,
-                                    virt_to_head_page(xdp->data),
-                                    len, true);
+               page_pool_put_page(dring->page_pool,
+                                  virt_to_head_page(xdp->data), len, true);
                break;
        }
 
@@ -1020,8 +1019,8 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
                         * cache state. Since we paid the allocation cost if
                         * building an skb fails try to put the page into cache
                         */
-                       __page_pool_put_page(dring->page_pool, page,
-                                            pkt_len, true);
+                       page_pool_put_page(dring->page_pool, page, pkt_len,
+                                          true);
                        netif_err(priv, drv, priv->ndev,
                                  "rx failed to build skb\n");
                        break;
@@ -1148,11 +1147,7 @@ static netdev_tx_t netsec_netdev_start_xmit(struct sk_buff *skb,
                                ~tcp_v4_check(0, ip_hdr(skb)->saddr,
                                              ip_hdr(skb)->daddr, 0);
                } else {
-                       ipv6_hdr(skb)->payload_len = 0;
-                       tcp_hdr(skb)->check =
-                               ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
-                                                &ipv6_hdr(skb)->daddr,
-                                                0, IPPROTO_TCP, 0);
+                       tcp_v6_gso_csum_prep(skb);
                }
 
                tx_ctrl.tcp_seg_offload_flag = true;
@@ -1199,7 +1194,7 @@ static void netsec_uninit_pkt_dring(struct netsec_priv *priv, int id)
                if (id == NETSEC_RING_RX) {
                        struct page *page = virt_to_page(desc->addr);
 
-                       page_pool_put_page(dring->page_pool, page, false);
+                       page_pool_put_full_page(dring->page_pool, page, false);
                } else if (id == NETSEC_RING_TX) {
                        dma_unmap_single(priv->dev, desc->dma_addr, desc->len,
                                         DMA_TO_DEVICE);
index 5836b21edd7ed7b54604acb60f0d95501c1169d3..37920b4da0919717eef8792dcdee611989a2c16f 100644 (file)
@@ -1251,11 +1251,11 @@ static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
        struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
 
        if (buf->page)
-               page_pool_put_page(rx_q->page_pool, buf->page, false);
+               page_pool_put_full_page(rx_q->page_pool, buf->page, false);
        buf->page = NULL;
 
        if (buf->sec_page)
-               page_pool_put_page(rx_q->page_pool, buf->sec_page, false);
+               page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
        buf->sec_page = NULL;
 }
 
index 65e12cb07f453fb0472d5bb527748d6ee8a18f92..5ee282b20ecbada2f446486d8feaded90bc86aa5 100644 (file)
@@ -638,10 +638,7 @@ static int netvsc_xmit(struct sk_buff *skb, struct net_device *net, bool xdp_tx)
                } else {
                        lso_info->lso_v2_transmit.ip_version =
                                NDIS_TCP_LARGE_SEND_OFFLOAD_IPV6;
-                       ipv6_hdr(skb)->payload_len = 0;
-                       tcp_hdr(skb)->check =
-                               ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
-                                                &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
+                       tcp_v6_gso_csum_prep(skb);
                }
                lso_info->lso_v2_transmit.tcp_header_offset = skb_transport_offset(skb);
                lso_info->lso_v2_transmit.mss = skb_shinfo(skb)->gso_size;
index 7d68b28bb8938de99fb2f979cd33df9975e82903..ab24692a92c6c114db05fb6edb8372e1a8ee51e7 100644 (file)
@@ -194,7 +194,8 @@ static void bcm54xx_adjust_rxrefclk(struct phy_device *phydev)
        /* Abort if we are using an untested phy. */
        if (BRCM_PHY_MODEL(phydev) != PHY_ID_BCM57780 &&
            BRCM_PHY_MODEL(phydev) != PHY_ID_BCM50610 &&
-           BRCM_PHY_MODEL(phydev) != PHY_ID_BCM50610M)
+           BRCM_PHY_MODEL(phydev) != PHY_ID_BCM50610M &&
+           BRCM_PHY_MODEL(phydev) != PHY_ID_BCM54810)
                return;
 
        val = bcm_phy_read_shadow(phydev, BCM54XX_SHD_SCR3);
@@ -272,10 +273,7 @@ static int bcm54xx_config_init(struct phy_device *phydev)
            (phydev->dev_flags & PHY_BRCM_CLEAR_RGMII_MODE))
                bcm_phy_write_shadow(phydev, BCM54XX_SHD_RGMII_MODE, 0);
 
-       if ((phydev->dev_flags & PHY_BRCM_RX_REFCLK_UNUSED) ||
-           (phydev->dev_flags & PHY_BRCM_DIS_TXCRXC_NOENRGY) ||
-           (phydev->dev_flags & PHY_BRCM_AUTO_PWRDWN_ENABLE))
-               bcm54xx_adjust_rxrefclk(phydev);
+       bcm54xx_adjust_rxrefclk(phydev);
 
        if (BRCM_PHY_MODEL(phydev) == PHY_ID_BCM54210E) {
                err = bcm54210e_config_init(phydev);
@@ -315,6 +313,20 @@ static int bcm54xx_config_init(struct phy_device *phydev)
        return 0;
 }
 
+static int bcm54xx_resume(struct phy_device *phydev)
+{
+       int ret;
+
+       /* Writes to register other than BMCR would be ignored
+        * unless we clear the PDOWN bit first
+        */
+       ret = genphy_resume(phydev);
+       if (ret < 0)
+               return ret;
+
+       return bcm54xx_config_init(phydev);
+}
+
 static int bcm5482_config_init(struct phy_device *phydev)
 {
        int err, reg;
@@ -708,6 +720,8 @@ static struct phy_driver broadcom_drivers[] = {
        .config_aneg    = bcm5481_config_aneg,
        .ack_interrupt  = bcm_phy_ack_intr,
        .config_intr    = bcm_phy_config_intr,
+       .suspend        = genphy_suspend,
+       .resume         = bcm54xx_resume,
 }, {
        .phy_id         = PHY_ID_BCM5482,
        .phy_id_mask    = 0xfffffff0,
index a1caeee1223617dab21b488858b14b5d0ef2aa2a..bceb0dcdecbd61533ded8d9c539462466b7244e6 100644 (file)
@@ -239,9 +239,10 @@ int genphy_c45_read_link(struct phy_device *phydev)
 
                /* The link state is latched low so that momentary link
                 * drops can be detected. Do not double-read the status
-                * in polling mode to detect such short link drops.
+                * in polling mode to detect such short link drops except
+                * the link was already down.
                 */
-               if (!phy_polling_mode(phydev)) {
+               if (!phy_polling_mode(phydev) || !phydev->link) {
                        val = phy_read_mmd(phydev, devad, MDIO_STAT1);
                        if (val < 0)
                                return val;
index 2a973265de8095a8095b5f3b7e9069388c7568d5..be0129231c2a1cefe5cf58e463752d86d1e2570f 100644 (file)
@@ -1930,9 +1930,10 @@ int genphy_update_link(struct phy_device *phydev)
 
        /* The link state is latched low so that momentary link
         * drops can be detected. Do not double-read the status
-        * in polling mode to detect such short link drops.
+        * in polling mode to detect such short link drops except
+        * the link was already down.
         */
-       if (!phy_polling_mode(phydev)) {
+       if (!phy_polling_mode(phydev) || !phydev->link) {
                status = phy_read(phydev, MII_BMSR);
                if (status < 0)
                        return status;
index 78ddbaf6401b6c953971e4dd6848688483fba89f..709578f4d060cb9c62b8d5b263717a2fdea08f6f 100644 (file)
@@ -1948,29 +1948,6 @@ drop:
        }
 }
 
-/* msdn_giant_send_check()
- * According to the document of microsoft, the TCP Pseudo Header excludes the
- * packet length for IPv6 TCP large packets.
- */
-static int msdn_giant_send_check(struct sk_buff *skb)
-{
-       const struct ipv6hdr *ipv6h;
-       struct tcphdr *th;
-       int ret;
-
-       ret = skb_cow_head(skb, 0);
-       if (ret)
-               return ret;
-
-       ipv6h = ipv6_hdr(skb);
-       th = tcp_hdr(skb);
-
-       th->check = 0;
-       th->check = ~tcp_v6_check(0, &ipv6h->saddr, &ipv6h->daddr, 0);
-
-       return ret;
-}
-
 static inline void rtl_tx_vlan_tag(struct tx_desc *desc, struct sk_buff *skb)
 {
        if (skb_vlan_tag_present(skb)) {
@@ -2016,10 +1993,11 @@ static int r8152_tx_csum(struct r8152 *tp, struct tx_desc *desc,
                        break;
 
                case htons(ETH_P_IPV6):
-                       if (msdn_giant_send_check(skb)) {
+                       if (skb_cow_head(skb, 0)) {
                                ret = TX_CSUM_TSO;
                                goto unavailable;
                        }
+                       tcp_v6_gso_csum_prep(skb);
                        opts1 |= GTSENDV6;
                        break;
 
index 18f152fa00683208ccc1e3b4a695357fbc9b9156..722cb054a5cd16bb0e9f619c97b3b3b5262ab20c 100644 (file)
@@ -942,10 +942,7 @@ vmxnet3_prepare_tso(struct sk_buff *skb,
                tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
                                                 IPPROTO_TCP, 0);
        } else if (ctx->ipv6) {
-               struct ipv6hdr *iph = ipv6_hdr(skb);
-
-               tcph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, 0,
-                                              IPPROTO_TCP, 0);
+               tcp_v6_gso_csum_prep(skb);
        }
 }
 
index 7bec95df4f804c1e5fadff93815fe65de6a35da2..27ec612cd4a4598dc01e4984e61bc42c6ee5e77b 100644 (file)
@@ -76,6 +76,15 @@ static inline void __tcp_v6_send_check(struct sk_buff *skb,
        }
 }
 
+static inline void tcp_v6_gso_csum_prep(struct sk_buff *skb)
+{
+       struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+       struct tcphdr *th = tcp_hdr(skb);
+
+       ipv6h->payload_len = 0;
+       th->check = ~tcp_v6_check(0, &ipv6h->saddr, &ipv6h->daddr, 0);
+}
+
 #if IS_ENABLED(CONFIG_IPV6)
 static inline void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb)
 {
index cfbed00ba7ee07ded9212ad4bfd4d70111832f2e..81d7773f96cdfd366f045e8dc9fbff97897cb384 100644 (file)
@@ -151,6 +151,7 @@ struct page_pool *page_pool_create(const struct page_pool_params *params);
 #ifdef CONFIG_PAGE_POOL
 void page_pool_destroy(struct page_pool *pool);
 void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *));
+void page_pool_release_page(struct page_pool *pool, struct page *page);
 #else
 static inline void page_pool_destroy(struct page_pool *pool)
 {
@@ -160,41 +161,32 @@ static inline void page_pool_use_xdp_mem(struct page_pool *pool,
                                         void (*disconnect)(void *))
 {
 }
+static inline void page_pool_release_page(struct page_pool *pool,
+                                         struct page *page)
+{
+}
 #endif
 
-/* Never call this directly, use helpers below */
-void __page_pool_put_page(struct page_pool *pool, struct page *page,
-                         unsigned int dma_sync_size, bool allow_direct);
+void page_pool_put_page(struct page_pool *pool, struct page *page,
+                       unsigned int dma_sync_size, bool allow_direct);
 
-static inline void page_pool_put_page(struct page_pool *pool,
-                                     struct page *page, bool allow_direct)
+/* Same as above but will try to sync the entire area pool->max_len */
+static inline void page_pool_put_full_page(struct page_pool *pool,
+                                          struct page *page, bool allow_direct)
 {
        /* When page_pool isn't compiled-in, net/core/xdp.c doesn't
         * allow registering MEM_TYPE_PAGE_POOL, but shield linker.
         */
 #ifdef CONFIG_PAGE_POOL
-       __page_pool_put_page(pool, page, -1, allow_direct);
+       page_pool_put_page(pool, page, -1, allow_direct);
 #endif
 }
-/* Very limited use-cases allow recycle direct */
+
+/* Same as above but the caller must guarantee safe context. e.g NAPI */
 static inline void page_pool_recycle_direct(struct page_pool *pool,
                                            struct page *page)
 {
-       __page_pool_put_page(pool, page, -1, true);
-}
-
-/* Disconnects a page (from a page_pool).  API users can have a need
- * to disconnect a page (from a page_pool), to allow it to be used as
- * a regular page (that will eventually be returned to the normal
- * page-allocator via put_page).
- */
-void page_pool_unmap_page(struct page_pool *pool, struct page *page);
-static inline void page_pool_release_page(struct page_pool *pool,
-                                         struct page *page)
-{
-#ifdef CONFIG_PAGE_POOL
-       page_pool_unmap_page(pool, page);
-#endif
+       page_pool_put_full_page(pool, page, true);
 }
 
 static inline dma_addr_t page_pool_get_dma_addr(struct page *page)
index 789a73aa7bd87b3492e983f8c6c6723e1e3c6bc5..5bf8d22a47ec0d6e1c3385ce5bd26e45f7b6510f 100644 (file)
@@ -3553,9 +3553,6 @@ static int neigh_proc_base_reachable_time(struct ctl_table *ctl, int write,
 #define NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(attr, name) \
        NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_userhz_jiffies)
 
-#define NEIGH_SYSCTL_MS_JIFFIES_ENTRY(attr, name) \
-       NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_ms_jiffies)
-
 #define NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(attr, data_attr, name) \
        NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_ms_jiffies)
 
index 10d2b255df5eccade8feb8f73d8fc657b604e9a4..626db912fce4d88b4e44f68624cdee90a9edfc73 100644 (file)
@@ -96,7 +96,7 @@ struct page_pool *page_pool_create(const struct page_pool_params *params)
 }
 EXPORT_SYMBOL(page_pool_create);
 
-static void __page_pool_return_page(struct page_pool *pool, struct page *page);
+static void page_pool_return_page(struct page_pool *pool, struct page *page);
 
 noinline
 static struct page *page_pool_refill_alloc_cache(struct page_pool *pool)
@@ -136,7 +136,7 @@ static struct page *page_pool_refill_alloc_cache(struct page_pool *pool)
                         * (2) break out to fallthrough to alloc_pages_node.
                         * This limit stress on page buddy alloactor.
                         */
-                       __page_pool_return_page(pool, page);
+                       page_pool_return_page(pool, page);
                        page = NULL;
                        break;
                }
@@ -274,18 +274,25 @@ static s32 page_pool_inflight(struct page_pool *pool)
        return inflight;
 }
 
-/* Cleanup page_pool state from page */
-static void __page_pool_clean_page(struct page_pool *pool,
-                                  struct page *page)
+/* Disconnects a page (from a page_pool).  API users can have a need
+ * to disconnect a page (from a page_pool), to allow it to be used as
+ * a regular page (that will eventually be returned to the normal
+ * page-allocator via put_page).
+ */
+void page_pool_release_page(struct page_pool *pool, struct page *page)
 {
        dma_addr_t dma;
        int count;
 
        if (!(pool->p.flags & PP_FLAG_DMA_MAP))
+               /* Always account for inflight pages, even if we didn't
+                * map them
+                */
                goto skip_dma_unmap;
 
        dma = page->dma_addr;
-       /* DMA unmap */
+
+       /* When page is unmapped, it cannot be returned our pool */
        dma_unmap_page_attrs(pool->p.dev, dma,
                             PAGE_SIZE << pool->p.order, pool->p.dma_dir,
                             DMA_ATTR_SKIP_CPU_SYNC);
@@ -297,21 +304,12 @@ skip_dma_unmap:
        count = atomic_inc_return(&pool->pages_state_release_cnt);
        trace_page_pool_state_release(pool, page, count);
 }
-
-/* unmap the page and clean our state */
-void page_pool_unmap_page(struct page_pool *pool, struct page *page)
-{
-       /* When page is unmapped, this implies page will not be
-        * returned to page_pool.
-        */
-       __page_pool_clean_page(pool, page);
-}
-EXPORT_SYMBOL(page_pool_unmap_page);
+EXPORT_SYMBOL(page_pool_release_page);
 
 /* Return a page to the page allocator, cleaning up our state */
-static void __page_pool_return_page(struct page_pool *pool, struct page *page)
+static void page_pool_return_page(struct page_pool *pool, struct page *page)
 {
-       __page_pool_clean_page(pool, page);
+       page_pool_release_page(pool, page);
 
        put_page(page);
        /* An optimization would be to call __free_pages(page, pool->p.order)
@@ -320,8 +318,7 @@ static void __page_pool_return_page(struct page_pool *pool, struct page *page)
         */
 }
 
-static bool __page_pool_recycle_into_ring(struct page_pool *pool,
-                                  struct page *page)
+static bool page_pool_recycle_in_ring(struct page_pool *pool, struct page *page)
 {
        int ret;
        /* BH protection not needed if current is serving softirq */
@@ -338,7 +335,7 @@ static bool __page_pool_recycle_into_ring(struct page_pool *pool,
  *
  * Caller must provide appropriate safe context.
  */
-static bool __page_pool_recycle_direct(struct page *page,
+static bool page_pool_recycle_in_cache(struct page *page,
                                       struct page_pool *pool)
 {
        if (unlikely(pool->alloc.count == PP_ALLOC_CACHE_SIZE))
@@ -357,8 +354,14 @@ static bool pool_page_reusable(struct page_pool *pool, struct page *page)
        return !page_is_pfmemalloc(page);
 }
 
-void __page_pool_put_page(struct page_pool *pool, struct page *page,
-                         unsigned int dma_sync_size, bool allow_direct)
+/* If the page refcnt == 1, this will try to recycle the page.
+ * if PP_FLAG_DMA_SYNC_DEV is set, we'll try to sync the DMA area for
+ * the configured size min(dma_sync_size, pool->max_len).
+ * If the page refcnt != 1, then the page will be returned to memory
+ * subsystem.
+ */
+void page_pool_put_page(struct page_pool *pool, struct page *page,
+                       unsigned int dma_sync_size, bool allow_direct)
 {
        /* This allocator is optimized for the XDP mode that uses
         * one-frame-per-page, but have fallbacks that act like the
@@ -375,12 +378,12 @@ void __page_pool_put_page(struct page_pool *pool, struct page *page,
                                                      dma_sync_size);
 
                if (allow_direct && in_serving_softirq())
-                       if (__page_pool_recycle_direct(page, pool))
+                       if (page_pool_recycle_in_cache(page, pool))
                                return;
 
-               if (!__page_pool_recycle_into_ring(pool, page)) {
+               if (!page_pool_recycle_in_ring(pool, page)) {
                        /* Cache full, fallback to free pages */
-                       __page_pool_return_page(pool, page);
+                       page_pool_return_page(pool, page);
                }
                return;
        }
@@ -397,12 +400,13 @@ void __page_pool_put_page(struct page_pool *pool, struct page *page,
         * doing refcnt based recycle tricks, meaning another process
         * will be invoking put_page.
         */
-       __page_pool_clean_page(pool, page);
+       /* Do not replace this with page_pool_return_page() */
+       page_pool_release_page(pool, page);
        put_page(page);
 }
-EXPORT_SYMBOL(__page_pool_put_page);
+EXPORT_SYMBOL(page_pool_put_page);
 
-static void __page_pool_empty_ring(struct page_pool *pool)
+static void page_pool_empty_ring(struct page_pool *pool)
 {
        struct page *page;
 
@@ -413,7 +417,7 @@ static void __page_pool_empty_ring(struct page_pool *pool)
                        pr_crit("%s() page_pool refcnt %d violation\n",
                                __func__, page_ref_count(page));
 
-               __page_pool_return_page(pool, page);
+               page_pool_return_page(pool, page);
        }
 }
 
@@ -443,7 +447,7 @@ static void page_pool_empty_alloc_cache_once(struct page_pool *pool)
         */
        while (pool->alloc.count) {
                page = pool->alloc.cache[--pool->alloc.count];
-               __page_pool_return_page(pool, page);
+               page_pool_return_page(pool, page);
        }
 }
 
@@ -455,7 +459,7 @@ static void page_pool_scrub(struct page_pool *pool)
        /* No more consumers should exist, but producers could still
         * be in-flight.
         */
-       __page_pool_empty_ring(pool);
+       page_pool_empty_ring(pool);
 }
 
 static int page_pool_release(struct page_pool *pool)
@@ -529,7 +533,7 @@ void page_pool_update_nid(struct page_pool *pool, int new_nid)
        /* Flush pool alloc cache, as refill will check NUMA node */
        while (pool->alloc.count) {
                page = pool->alloc.cache[--pool->alloc.count];
-               __page_pool_return_page(pool, page);
+               page_pool_return_page(pool, page);
        }
 }
 EXPORT_SYMBOL(page_pool_update_nid);
index 9b4f8a254a15d47bcf86ab94aa9f3c2b8e02fd16..6e35742969e6765345aede51fecd92f24b07b247 100644 (file)
@@ -3911,7 +3911,7 @@ static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh,
 
        /* Support fdb on master device the net/bridge default case */
        if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) &&
-           (dev->priv_flags & IFF_BRIDGE_PORT)) {
+           netif_is_bridge_port(dev)) {
                struct net_device *br_dev = netdev_master_upper_dev_get(dev);
                const struct net_device_ops *ops = br_dev->netdev_ops;
 
@@ -4022,7 +4022,7 @@ static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh,
 
        /* Support fdb on master device the net/bridge default case */
        if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) &&
-           (dev->priv_flags & IFF_BRIDGE_PORT)) {
+           netif_is_bridge_port(dev)) {
                struct net_device *br_dev = netdev_master_upper_dev_get(dev);
                const struct net_device_ops *ops = br_dev->netdev_ops;
 
@@ -4248,13 +4248,13 @@ static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
                                continue;
 
                        if (!br_idx) { /* user did not specify a specific bridge */
-                               if (dev->priv_flags & IFF_BRIDGE_PORT) {
+                               if (netif_is_bridge_port(dev)) {
                                        br_dev = netdev_master_upper_dev_get(dev);
                                        cops = br_dev->netdev_ops;
                                }
                        } else {
                                if (dev != br_dev &&
-                                   !(dev->priv_flags & IFF_BRIDGE_PORT))
+                                   !netif_is_bridge_port(dev))
                                        continue;
 
                                if (br_dev != netdev_master_upper_dev_get(dev) &&
@@ -4266,7 +4266,7 @@ static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
                        if (idx < s_idx)
                                goto cont;
 
-                       if (dev->priv_flags & IFF_BRIDGE_PORT) {
+                       if (netif_is_bridge_port(dev)) {
                                if (cops && cops->ndo_fdb_dump) {
                                        err = cops->ndo_fdb_dump(skb, cb,
                                                                br_dev, dev,
@@ -4416,7 +4416,7 @@ static int rtnl_fdb_get(struct sk_buff *in_skb, struct nlmsghdr *nlh,
 
        if (dev) {
                if (!ndm_flags || (ndm_flags & NTF_MASTER)) {
-                       if (!(dev->priv_flags & IFF_BRIDGE_PORT)) {
+                       if (!netif_is_bridge_port(dev)) {
                                NL_SET_ERR_MSG(extack, "Device is not a bridge port");
                                return -EINVAL;
                        }
index 8310714c47fd7af0877dcf531ad46d0d2fef2903..4c7ea85486af986e1d143b461e4d405ed49b2127 100644 (file)
@@ -372,7 +372,7 @@ static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct,
                xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
                page = virt_to_head_page(data);
                napi_direct &= !xdp_return_frame_no_direct();
-               page_pool_put_page(xa->page_pool, page, napi_direct);
+               page_pool_put_full_page(xa->page_pool, page, napi_direct);
                rcu_read_unlock();
                break;
        case MEM_TYPE_PAGE_SHARED:
index ff0c24371e3309b3068980f46d1ed743337d2a3e..f4c2ac445b3fd43399038de067df805fc44d9072 100644 (file)
@@ -35,9 +35,6 @@
  *             Paul E. McKenney <paulmck@us.ibm.com>
  *             Patrick McHardy <kaber@trash.net>
  */
-
-#define VERSION "0.409"
-
 #include <linux/cache.h>
 #include <linux/uaccess.h>
 #include <linux/bitops.h>
@@ -304,8 +301,6 @@ static inline void alias_free_mem_rcu(struct fib_alias *fa)
        call_rcu(&fa->rcu, __alias_free_mem);
 }
 
-#define TNODE_KMALLOC_MAX \
-       ilog2((PAGE_SIZE - TNODE_SIZE(0)) / sizeof(struct key_vector *))
 #define TNODE_VMALLOC_MAX \
        ilog2((SIZE_MAX - TNODE_SIZE(0)) / sizeof(struct key_vector *))
 
index 9684af02e0a590ceb05268b6c13d625174c5be50..d9531b4b33f294a547240cc013a123805ce2ee66 100644 (file)
@@ -554,18 +554,6 @@ static struct ctl_table ipv4_table[] = {
                .proc_handler   = proc_dointvec,
        },
 #endif /* CONFIG_NETLABEL */
-       {
-               .procname       = "tcp_available_congestion_control",
-               .maxlen         = TCP_CA_BUF_MAX,
-               .mode           = 0444,
-               .proc_handler   = proc_tcp_available_congestion_control,
-       },
-       {
-               .procname       = "tcp_allowed_congestion_control",
-               .maxlen         = TCP_CA_BUF_MAX,
-               .mode           = 0644,
-               .proc_handler   = proc_allowed_congestion_control,
-       },
        {
                .procname       = "tcp_available_ulp",
                .maxlen         = TCP_ULP_BUF_MAX,
@@ -885,6 +873,18 @@ static struct ctl_table ipv4_net_table[] = {
                .maxlen         = TCP_CA_NAME_MAX,
                .proc_handler   = proc_tcp_congestion_control,
        },
+       {
+               .procname       = "tcp_available_congestion_control",
+               .maxlen         = TCP_CA_BUF_MAX,
+               .mode           = 0444,
+               .proc_handler   = proc_tcp_available_congestion_control,
+       },
+       {
+               .procname       = "tcp_allowed_congestion_control",
+               .maxlen         = TCP_CA_BUF_MAX,
+               .mode           = 0644,
+               .proc_handler   = proc_allowed_congestion_control,
+       },
        {
                .procname       = "tcp_keepalive_time",
                .data           = &init_net.ipv4.sysctl_tcp_keepalive_time,
index f0112dabe21e5a7cee3bafa5a3d8a78696954191..8c2a246099ef5a7eea99eb86c9e44ddfe2db1296 100644 (file)
@@ -3531,7 +3531,7 @@ static int nl80211_valid_4addr(struct cfg80211_registered_device *rdev,
                               enum nl80211_iftype iftype)
 {
        if (!use_4addr) {
-               if (netdev && (netdev->priv_flags & IFF_BRIDGE_PORT))
+               if (netdev && netif_is_bridge_port(netdev))
                        return -EBUSY;
                return 0;
        }
index 8481e9ac33da5c71652186e8110b92e025eedae4..80fb47c43bdd5f3b5c0544658f70c8c6a7e94661 100644 (file)
@@ -934,7 +934,7 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
                return -EOPNOTSUPP;
 
        /* if it's part of a bridge, reject changing type to station/ibss */
-       if ((dev->priv_flags & IFF_BRIDGE_PORT) &&
+       if (netif_is_bridge_port(dev) &&
            (ntype == NL80211_IFTYPE_ADHOC ||
             ntype == NL80211_IFTYPE_STATION ||
             ntype == NL80211_IFTYPE_P2P_CLIENT))
index 98a20faf319868b7fcf2af424a85e424e90fac0f..75f547a5ee48b2737bd918ff65980cb4f6f685c4 100644 (file)
         "teardown": [
             "$TC qdisc del dev $DEV1 ingress"
         ]
+    },
+    {
+        "id": "bae4",
+        "name": "Add basic filter with u32 ematch u8/zero offset and default action",
+        "category": [
+            "filter",
+            "basic"
+        ],
+        "plugins": {
+            "requires": "nsPlugin"
+        },
+        "setup": [
+            "$TC qdisc add dev $DEV1 ingress"
+        ],
+        "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 basic match 'u32(u8 0x11 0x0f at 0)' classid 1:1",
+        "expExitCode": "0",
+        "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip basic",
+        "matchPattern": "^filter parent ffff: protocol ip pref 1 basic.*handle 0x1 flowid 1:1.*u32\\(01000000/0f000000 at 0\\)",
+        "matchCount": "1",
+        "teardown": [
+            "$TC qdisc del dev $DEV1 ingress"
+        ]
+    },
+    {
+        "id": "e6cb",
+        "name": "Add basic filter with u32 ematch u8/zero offset and invalid value >0xFF",
+        "category": [
+            "filter",
+            "basic"
+        ],
+        "plugins": {
+            "requires": "nsPlugin"
+        },
+        "setup": [
+            "$TC qdisc add dev $DEV1 ingress"
+        ],
+        "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 basic match 'u32(u8 0x1122 0x0f at 0)' classid 1:1",
+        "expExitCode": "1",
+        "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip basic",
+        "matchPattern": "^filter parent ffff: protocol ip pref 1 basic.*handle 0x1 flowid 1:1.*u32\\(11220000/0f000000 at 0\\)",
+        "matchCount": "0",
+        "teardown": [
+            "$TC qdisc del dev $DEV1 ingress"
+        ]
+    },
+    {
+        "id": "7727",
+        "name": "Add basic filter with u32 ematch u8/positive offset and default action",
+        "category": [
+            "filter",
+            "basic"
+        ],
+        "plugins": {
+            "requires": "nsPlugin"
+        },
+        "setup": [
+            "$TC qdisc add dev $DEV1 ingress"
+        ],
+        "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 basic match 'u32(u8 0x77 0x1f at 12)' classid 1:1",
+        "expExitCode": "0",
+        "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip basic",
+        "matchPattern": "^filter parent ffff: protocol ip pref 1 basic.*handle 0x1 flowid 1:1.*u32\\(17000000/1f000000 at 12\\)",
+        "matchCount": "1",
+        "teardown": [
+            "$TC qdisc del dev $DEV1 ingress"
+        ]
+    },
+    {
+        "id": "a429",
+        "name": "Add basic filter with u32 ematch u8/invalid mask >0xFF",
+        "category": [
+            "filter",
+            "basic"
+        ],
+        "plugins": {
+            "requires": "nsPlugin"
+        },
+        "setup": [
+            "$TC qdisc add dev $DEV1 ingress"
+        ],
+        "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 basic match 'u32(u8 0x77 0xff00 at 12)' classid 1:1",
+        "expExitCode": "1",
+        "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip basic",
+        "matchPattern": "^filter parent ffff: protocol ip pref 1 basic.*handle 0x1 flowid 1:1.*u32\\(77000000/ff000000 at 12\\)",
+        "matchCount": "0",
+        "teardown": [
+            "$TC qdisc del dev $DEV1 ingress"
+        ]
+    },
+    {
+        "id": "8373",
+        "name": "Add basic filter with u32 ematch u8/missing offset",
+        "category": [
+            "filter",
+            "basic"
+        ],
+        "plugins": {
+            "requires": "nsPlugin"
+        },
+        "setup": [
+            "$TC qdisc add dev $DEV1 ingress"
+        ],
+        "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 basic match 'u32(u8 0x77 0xff at)' classid 1:1",
+        "expExitCode": "1",
+        "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip basic",
+        "matchPattern": "^filter parent ffff: protocol ip pref 1 basic.*handle 0x1 flowid 1:1.*u32\\(77000000 at 12\\)",
+        "matchCount": "0",
+        "teardown": [
+            "$TC qdisc del dev $DEV1 ingress"
+        ]
+    },
+    {
+        "id": "ab8e",
+        "name": "Add basic filter with u32 ematch u8/missing AT keyword",
+        "category": [
+            "filter",
+            "basic"
+        ],
+        "plugins": {
+            "requires": "nsPlugin"
+        },
+        "setup": [
+            "$TC qdisc add dev $DEV1 ingress"
+        ],
+        "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 basic match 'u32(u8 0x77 0xff 0)' classid 1:1",
+        "expExitCode": "1",
+        "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip basic",
+        "matchPattern": "^filter parent ffff: protocol ip pref 1 basic.*handle 0x1 flowid 1:1.*u32\\(77000000 at 12\\)",
+        "matchCount": "0",
+        "teardown": [
+            "$TC qdisc del dev $DEV1 ingress"
+        ]
+    },
+    {
+        "id": "712d",
+        "name": "Add basic filter with u32 ematch u8/missing value",
+        "category": [
+            "filter",
+            "basic"
+        ],
+        "plugins": {
+            "requires": "nsPlugin"
+        },
+        "setup": [
+            "$TC qdisc add dev $DEV1 ingress"
+        ],
+        "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 basic match 'u32(u8 at 12)' classid 1:1",
+        "expExitCode": "1",
+        "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip basic",
+        "matchPattern": "^filter parent ffff: protocol ip pref 1 basic.*handle 0x1 flowid 1:1.*u32\\(at 12\\)",
+        "matchCount": "0",
+        "teardown": [
+            "$TC qdisc del dev $DEV1 ingress"
+        ]
+    },
+    {
+        "id": "350f",
+        "name": "Add basic filter with u32 ematch u8/non-numeric value",
+        "category": [
+            "filter",
+            "basic"
+        ],
+        "plugins": {
+            "requires": "nsPlugin"
+        },
+        "setup": [
+            "$TC qdisc add dev $DEV1 ingress"
+        ],
+        "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 basic match 'u32(u8 zero 0xff at 0)' classid 1:1",
+        "expExitCode": "1",
+        "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip basic",
+        "matchPattern": "^filter parent ffff: protocol ip pref 1 basic.*handle 0x1 flowid 1:1.*u32\\(00000000/ff000000 at 0\\)",
+        "matchCount": "0",
+        "teardown": [
+            "$TC qdisc del dev $DEV1 ingress"
+        ]
+    },
+    {
+        "id": "e28f",
+        "name": "Add basic filter with u32 ematch u8/non-numeric mask",
+        "category": [
+            "filter",
+            "basic"
+        ],
+        "plugins": {
+            "requires": "nsPlugin"
+        },
+        "setup": [
+            "$TC qdisc add dev $DEV1 ingress"
+        ],
+        "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 basic match 'u32(u8 0x11 mask at 0)' classid 1:1",
+        "expExitCode": "1",
+        "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip basic",
+        "matchPattern": "^filter parent ffff: protocol ip pref 1 basic.*handle 0x1 flowid 1:1.*u32\\(11000000/00000000 at 0\\)",
+        "matchCount": "0",
+        "teardown": [
+            "$TC qdisc del dev $DEV1 ingress"
+        ]
+    },
+    {
+        "id": "6d5f",
+        "name": "Add basic filter with u32 ematch u8/negative offset and default action",
+        "category": [
+            "filter",
+            "basic"
+        ],
+        "plugins": {
+            "requires": "nsPlugin"
+        },
+        "setup": [
+            "$TC qdisc add dev $DEV1 ingress"
+        ],
+        "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 basic match 'u32(u8 0xaa 0xf0 at -14)' classid 1:1",
+        "expExitCode": "0",
+        "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip basic",
+        "matchPattern": "^filter parent ffff: protocol ip pref 1 basic.*handle 0x1 flowid 1:1.*u32\\(0000a000/0000f000 at -16\\)",
+        "matchCount": "1",
+        "teardown": [
+            "$TC qdisc del dev $DEV1 ingress"
+        ]
+    },
+    {
+        "id": "12dc",
+        "name": "Add basic filter with u32 ematch u8/nexthdr+ offset and default action",
+        "category": [
+            "filter",
+            "basic"
+        ],
+        "plugins": {
+            "requires": "nsPlugin"
+        },
+        "setup": [
+            "$TC qdisc add dev $DEV1 ingress"
+        ],
+        "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 basic match 'u32(u8 0xaa 0xf0 at nexthdr+0)' classid 1:1",
+        "expExitCode": "0",
+        "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip basic",
+        "matchPattern": "^filter parent ffff: protocol ip pref 1 basic.*handle 0x1 flowid 1:1.*u32\\(a0000000/f0000000 at nexthdr\\+0\\)",
+        "matchCount": "1",
+        "teardown": [
+            "$TC qdisc del dev $DEV1 ingress"
+        ]
     }
 ]