Updating features and vlan_features with vlan HW offload.
Added vlan_tag fields to rx/tx ring_buff to track vlan related data.
Tested-by: Nikita Danilov <ndanilov@aquantia.com>
Signed-off-by: Igor Russkikh <igor.russkikh@aquantia.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
cfg->link_speed_msk &= cfg->aq_hw_caps->link_speed_msk;
cfg->features = cfg->aq_hw_caps->hw_features;
+ cfg->is_vlan_rx_strip = !!(cfg->features & NETIF_F_HW_VLAN_CTAG_RX);
+ cfg->is_vlan_tx_insert = !!(cfg->features & NETIF_F_HW_VLAN_CTAG_TX);
}
static int aq_nic_update_link_status(struct aq_nic_s *self)
self->ndev->hw_features |= aq_hw_caps->hw_features;
self->ndev->features = aq_hw_caps->hw_features;
self->ndev->vlan_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM |
- NETIF_F_RXHASH | NETIF_F_SG | NETIF_F_LRO;
+ NETIF_F_RXHASH | NETIF_F_SG |
+ NETIF_F_LRO | NETIF_F_TSO;
self->ndev->priv_flags = aq_hw_caps->hw_priv_flags;
self->ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
dx_buff->len_l3 = ip_hdrlen(skb);
dx_buff->len_l4 = tcp_hdrlen(skb);
dx_buff->mss = skb_shinfo(skb)->gso_size;
- dx_buff->is_txc = 1U;
+ dx_buff->is_gso = 1U;
dx_buff->eop_index = 0xffffU;
dx_buff->is_ipv6 =
--ret, dx = aq_ring_next_dx(ring, dx)) {
dx_buff = &ring->buff_ring[dx];
- if (!dx_buff->is_txc && dx_buff->pa) {
+ if (!dx_buff->is_gso && dx_buff->pa) {
if (unlikely(dx_buff->is_sop)) {
dma_unmap_single(aq_nic_get_dev(self),
dx_buff->pa,
u32 flow_control;
u32 link_speed_msk;
u32 wol;
+ u8 is_vlan_rx_strip;
+ u8 is_vlan_tx_insert;
u16 is_mc_list_enabled;
u16 mc_list_count;
bool is_autoneg;
* +----------+----------+----------+-----------
* 4/8bytes|len pkt |len pkt | | skb
* +----------+----------+----------+-----------
- * 4/8bytes|is_txc |len,flags |len |len,is_eop
+ * 4/8bytes|is_gso |len,flags |len |len,is_eop
* +----------+----------+----------+-----------
*
* This aq_ring_buff_s doesn't have endianness dependency.
u8 is_hash_l4;
u8 rsvd1;
struct aq_rxpage rxdata;
+ u16 vlan_rx_tag;
};
/* EOP */
struct {
u8 is_ipv6:1;
u8 rsvd2:7;
u32 len_pkt;
+ u16 vlan_tx_tag;
};
};
union {
u32 is_cso_err:1;
u32 is_sop:1;
u32 is_eop:1;
- u32 is_txc:1;
+ u32 is_gso:1;
u32 is_mapped:1;
u32 is_cleaned:1;
u32 is_error:1;
- u32 rsvd3:6;
+ u32 is_vlan:1;
+ u32 rsvd3:5;
u16 eop_index;
u16 rsvd4;
};
buff = &ring->buff_ring[ring->sw_tail];
- if (buff->is_txc) {
+ if (buff->is_gso) {
txd->ctl |= (buff->len_l3 << 31) |
(buff->len_l2 << 24) |
HW_ATL_A0_TXD_CTL_CMD_TCP |
NETIF_F_TSO | \
NETIF_F_LRO | \
NETIF_F_NTUPLE | \
- NETIF_F_HW_VLAN_CTAG_FILTER, \
+ NETIF_F_HW_VLAN_CTAG_FILTER | \
+ NETIF_F_HW_VLAN_CTAG_RX | \
+ NETIF_F_HW_VLAN_CTAG_TX, \
.hw_priv_flags = IFF_UNICAST_FLT, \
.flow_control = true, \
.mtu = HW_ATL_B0_MTU_JUMBO, \
buff = &ring->buff_ring[ring->sw_tail];
- if (buff->is_txc) {
+ if (buff->is_gso) {
txd->ctl |= (buff->len_l3 << 31) |
(buff->len_l2 << 24) |
HW_ATL_B0_TXD_CTL_CMD_TCP |
{
u32 dma_desc_addr_lsw = (u32)aq_ring->dx_ring_pa;
u32 dma_desc_addr_msw = (u32)(((u64)aq_ring->dx_ring_pa) >> 32);
+ u32 vlan_rx_stripping = self->aq_nic_cfg->is_vlan_rx_strip;
hw_atl_rdm_rx_desc_en_set(self, false, aq_ring->idx);
hw_atl_rdm_rx_desc_head_buff_size_set(self, 0U, aq_ring->idx);
hw_atl_rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
- hw_atl_rpo_rx_desc_vlan_stripping_set(self, 0U, aq_ring->idx);
+ hw_atl_rpo_rx_desc_vlan_stripping_set(self, !!vlan_rx_stripping,
+ aq_ring->idx);
/* Rx ring set mode */