1 // SPDX-License-Identifier: GPL-2.0-only
3 * aQuantia Corporation Network Driver
4 * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
7 /* File hw_atl_b0.c: Definition of Atlantic hardware specific functions. */
10 #include "../aq_hw_utils.h"
11 #include "../aq_ring.h"
12 #include "../aq_nic.h"
13 #include "hw_atl_b0.h"
14 #include "hw_atl_utils.h"
15 #include "hw_atl_llh.h"
16 #include "hw_atl_b0_internal.h"
17 #include "hw_atl_llh_internal.h"
19 #define DEFAULT_B0_BOARD_BASIC_CAPABILITIES \
23 .vecs = HW_ATL_B0_RSS_MAX, \
24 .tcs = HW_ATL_B0_TC_MAX, \
25 .rxd_alignment = 1U, \
26 .rxd_size = HW_ATL_B0_RXD_SIZE, \
27 .rxds_max = HW_ATL_B0_MAX_RXD, \
28 .rxds_min = HW_ATL_B0_MIN_RXD, \
29 .txd_alignment = 1U, \
30 .txd_size = HW_ATL_B0_TXD_SIZE, \
31 .txds_max = HW_ATL_B0_MAX_TXD, \
32 .txds_min = HW_ATL_B0_MIN_TXD, \
33 .txhwb_alignment = 4096U, \
34 .tx_rings = HW_ATL_B0_TX_RINGS, \
35 .rx_rings = HW_ATL_B0_RX_RINGS, \
36 .hw_features = NETIF_F_HW_CSUM | \
43 NETIF_F_HW_VLAN_CTAG_FILTER | \
44 NETIF_F_HW_VLAN_CTAG_RX | \
45 NETIF_F_HW_VLAN_CTAG_TX, \
46 .hw_priv_flags = IFF_UNICAST_FLT, \
47 .flow_control = true, \
48 .mtu = HW_ATL_B0_MTU_JUMBO, \
49 .mac_regs_count = 88, \
50 .hw_alive_check_addr = 0x10U
52 const struct aq_hw_caps_s hw_atl_b0_caps_aqc100
= {
53 DEFAULT_B0_BOARD_BASIC_CAPABILITIES
,
54 .media_type
= AQ_HW_MEDIA_TYPE_FIBRE
,
55 .link_speed_msk
= AQ_NIC_RATE_10G
|
62 const struct aq_hw_caps_s hw_atl_b0_caps_aqc107
= {
63 DEFAULT_B0_BOARD_BASIC_CAPABILITIES
,
64 .media_type
= AQ_HW_MEDIA_TYPE_TP
,
65 .link_speed_msk
= AQ_NIC_RATE_10G
|
72 const struct aq_hw_caps_s hw_atl_b0_caps_aqc108
= {
73 DEFAULT_B0_BOARD_BASIC_CAPABILITIES
,
74 .media_type
= AQ_HW_MEDIA_TYPE_TP
,
75 .link_speed_msk
= AQ_NIC_RATE_5G
|
81 const struct aq_hw_caps_s hw_atl_b0_caps_aqc109
= {
82 DEFAULT_B0_BOARD_BASIC_CAPABILITIES
,
83 .media_type
= AQ_HW_MEDIA_TYPE_TP
,
84 .link_speed_msk
= AQ_NIC_RATE_2GS
|
89 static int hw_atl_b0_hw_reset(struct aq_hw_s
*self
)
93 err
= hw_atl_utils_soft_reset(self
);
97 self
->aq_fw_ops
->set_state(self
, MPI_RESET
);
99 err
= aq_hw_err_from_flags(self
);
104 static int hw_atl_b0_set_fc(struct aq_hw_s
*self
, u32 fc
, u32 tc
)
106 hw_atl_rpb_rx_xoff_en_per_tc_set(self
, !!(fc
& AQ_NIC_FC_RX
), tc
);
110 static int hw_atl_b0_hw_qos_set(struct aq_hw_s
*self
)
114 unsigned int i_priority
= 0U;
116 /* TPS Descriptor rate init */
117 hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(self
, 0x0U
);
118 hw_atl_tps_tx_pkt_shed_desc_rate_lim_set(self
, 0xA);
121 hw_atl_tps_tx_pkt_shed_desc_vm_arb_mode_set(self
, 0U);
123 /* TPS TC credits init */
124 hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(self
, 0U);
125 hw_atl_tps_tx_pkt_shed_data_arb_mode_set(self
, 0U);
127 hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(self
, 0xFFF, 0U);
128 hw_atl_tps_tx_pkt_shed_tc_data_weight_set(self
, 0x64, 0U);
129 hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(self
, 0x50, 0U);
130 hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(self
, 0x1E, 0U);
133 buff_size
= HW_ATL_B0_TXBUF_MAX
;
135 hw_atl_tpb_tx_pkt_buff_size_per_tc_set(self
, buff_size
, tc
);
136 hw_atl_tpb_tx_buff_hi_threshold_per_tc_set(self
,
138 (1024 / 32U) * 66U) /
140 hw_atl_tpb_tx_buff_lo_threshold_per_tc_set(self
,
142 (1024 / 32U) * 50U) /
145 /* QoS Rx buf size per TC */
147 buff_size
= HW_ATL_B0_RXBUF_MAX
;
149 hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self
, buff_size
, tc
);
150 hw_atl_rpb_rx_buff_hi_threshold_per_tc_set(self
,
152 (1024U / 32U) * 66U) /
154 hw_atl_rpb_rx_buff_lo_threshold_per_tc_set(self
,
156 (1024U / 32U) * 50U) /
159 hw_atl_b0_set_fc(self
, self
->aq_nic_cfg
->flow_control
, tc
);
161 /* QoS 802.1p priority -> TC mapping */
162 for (i_priority
= 8U; i_priority
--;)
163 hw_atl_rpf_rpb_user_priority_tc_map_set(self
, i_priority
, 0U);
165 return aq_hw_err_from_flags(self
);
168 static int hw_atl_b0_hw_rss_hash_set(struct aq_hw_s
*self
,
169 struct aq_rss_parameters
*rss_params
)
171 struct aq_nic_cfg_s
*cfg
= self
->aq_nic_cfg
;
174 unsigned int addr
= 0U;
177 for (i
= 10, addr
= 0U; i
--; ++addr
) {
178 u32 key_data
= cfg
->is_rss
?
179 __swab32(rss_params
->hash_secret_key
[i
]) : 0U;
180 hw_atl_rpf_rss_key_wr_data_set(self
, key_data
);
181 hw_atl_rpf_rss_key_addr_set(self
, addr
);
182 hw_atl_rpf_rss_key_wr_en_set(self
, 1U);
183 err
= readx_poll_timeout_atomic(hw_atl_rpf_rss_key_wr_en_get
,
190 err
= aq_hw_err_from_flags(self
);
196 static int hw_atl_b0_hw_rss_set(struct aq_hw_s
*self
,
197 struct aq_rss_parameters
*rss_params
)
199 u8
*indirection_table
= rss_params
->indirection_table
;
201 u32 num_rss_queues
= max(1U, self
->aq_nic_cfg
->num_rss_queues
);
203 u16 bitary
[1 + (HW_ATL_B0_RSS_REDIRECTION_MAX
*
204 HW_ATL_B0_RSS_REDIRECTION_BITS
/ 16U)];
207 memset(bitary
, 0, sizeof(bitary
));
209 for (i
= HW_ATL_B0_RSS_REDIRECTION_MAX
; i
--;) {
210 (*(u32
*)(bitary
+ ((i
* 3U) / 16U))) |=
211 ((indirection_table
[i
] % num_rss_queues
) <<
215 for (i
= ARRAY_SIZE(bitary
); i
--;) {
216 hw_atl_rpf_rss_redir_tbl_wr_data_set(self
, bitary
[i
]);
217 hw_atl_rpf_rss_redir_tbl_addr_set(self
, i
);
218 hw_atl_rpf_rss_redir_wr_en_set(self
, 1U);
219 err
= readx_poll_timeout_atomic(hw_atl_rpf_rss_redir_wr_en_get
,
226 err
= aq_hw_err_from_flags(self
);
232 static int hw_atl_b0_hw_offload_set(struct aq_hw_s
*self
,
233 struct aq_nic_cfg_s
*aq_nic_cfg
)
237 /* TX checksums offloads*/
238 hw_atl_tpo_ipv4header_crc_offload_en_set(self
, 1);
239 hw_atl_tpo_tcp_udp_crc_offload_en_set(self
, 1);
241 /* RX checksums offloads*/
242 hw_atl_rpo_ipv4header_crc_offload_en_set(self
, !!(aq_nic_cfg
->features
&
244 hw_atl_rpo_tcp_udp_crc_offload_en_set(self
, !!(aq_nic_cfg
->features
&
248 hw_atl_tdm_large_send_offload_en_set(self
, 0xFFFFFFFFU
);
250 /* Outer VLAN tag offload */
251 hw_atl_rpo_outer_vlan_tag_mode_set(self
, 1U);
255 unsigned int val
= (8U < HW_ATL_B0_LRO_RXD_MAX
) ? 0x3U
:
256 ((4U < HW_ATL_B0_LRO_RXD_MAX
) ? 0x2U
:
257 ((2U < HW_ATL_B0_LRO_RXD_MAX
) ? 0x1U
: 0x0));
259 for (i
= 0; i
< HW_ATL_B0_RINGS_MAX
; i
++)
260 hw_atl_rpo_lro_max_num_of_descriptors_set(self
, val
, i
);
262 hw_atl_rpo_lro_time_base_divider_set(self
, 0x61AU
);
263 hw_atl_rpo_lro_inactive_interval_set(self
, 0);
264 /* the LRO timebase divider is 5 uS (0x61a),
265 * which is multiplied by 50(0x32)
266 * to get a maximum coalescing interval of 250 uS,
267 * which is the default value
269 hw_atl_rpo_lro_max_coalescing_interval_set(self
, 50);
271 hw_atl_rpo_lro_qsessions_lim_set(self
, 1U);
273 hw_atl_rpo_lro_total_desc_lim_set(self
, 2U);
275 hw_atl_rpo_lro_patch_optimization_en_set(self
, 1U);
277 hw_atl_rpo_lro_min_pay_of_first_pkt_set(self
, 10U);
279 hw_atl_rpo_lro_pkt_lim_set(self
, 1U);
281 hw_atl_rpo_lro_en_set(self
,
282 aq_nic_cfg
->is_lro
? 0xFFFFFFFFU
: 0U);
283 hw_atl_itr_rsc_en_set(self
,
284 aq_nic_cfg
->is_lro
? 0xFFFFFFFFU
: 0U);
286 hw_atl_itr_rsc_delay_set(self
, 1U);
288 return aq_hw_err_from_flags(self
);
291 static int hw_atl_b0_hw_init_tx_path(struct aq_hw_s
*self
)
293 /* Tx TC/Queue number config */
294 hw_atl_rpb_tps_tx_tc_mode_set(self
, 1U);
296 hw_atl_thm_lso_tcp_flag_of_first_pkt_set(self
, 0x0FF6U
);
297 hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(self
, 0x0FF6U
);
298 hw_atl_thm_lso_tcp_flag_of_last_pkt_set(self
, 0x0F7FU
);
301 hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self
, 1U);
304 aq_hw_write_reg(self
, 0x00007040U
, IS_CHIP_FEATURE(TPO2
) ?
305 0x00010000U
: 0x00000000U
);
306 hw_atl_tdm_tx_dca_en_set(self
, 0U);
307 hw_atl_tdm_tx_dca_mode_set(self
, 0U);
309 hw_atl_tpb_tx_path_scp_ins_en_set(self
, 1U);
311 return aq_hw_err_from_flags(self
);
314 static int hw_atl_b0_hw_init_rx_path(struct aq_hw_s
*self
)
316 struct aq_nic_cfg_s
*cfg
= self
->aq_nic_cfg
;
319 /* Rx TC/RSS number config */
320 hw_atl_rpb_rpf_rx_traf_class_mode_set(self
, 1U);
322 /* Rx flow control */
323 hw_atl_rpb_rx_flow_ctl_mode_set(self
, 1U);
325 /* RSS Ring selection */
326 hw_atl_reg_rx_flr_rss_control1set(self
, cfg
->is_rss
?
327 0xB3333333U
: 0x00000000U
);
329 /* Multicast filters */
330 for (i
= HW_ATL_B0_MAC_MAX
; i
--;) {
331 hw_atl_rpfl2_uc_flr_en_set(self
, (i
== 0U) ? 1U : 0U, i
);
332 hw_atl_rpfl2unicast_flr_act_set(self
, 1U, i
);
335 hw_atl_reg_rx_flr_mcst_flr_msk_set(self
, 0x00000000U
);
336 hw_atl_reg_rx_flr_mcst_flr_set(self
, 0x00010FFFU
, 0U);
339 hw_atl_rpf_vlan_outer_etht_set(self
, 0x88A8U
);
340 hw_atl_rpf_vlan_inner_etht_set(self
, 0x8100U
);
342 hw_atl_rpf_vlan_prom_mode_en_set(self
, 1);
344 // Always accept untagged packets
345 hw_atl_rpf_vlan_accept_untagged_packets_set(self
, 1U);
346 hw_atl_rpf_vlan_untagged_act_set(self
, 1U);
349 hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self
, 1U);
352 aq_hw_write_reg(self
, 0x00005040U
,
353 IS_CHIP_FEATURE(RPF2
) ? 0x000F0000U
: 0x00000000U
);
355 hw_atl_rpfl2broadcast_flr_act_set(self
, 1U);
356 hw_atl_rpfl2broadcast_count_threshold_set(self
, 0xFFFFU
& (~0U / 256U));
358 hw_atl_rdm_rx_dca_en_set(self
, 0U);
359 hw_atl_rdm_rx_dca_mode_set(self
, 0U);
361 return aq_hw_err_from_flags(self
);
364 static int hw_atl_b0_hw_mac_addr_set(struct aq_hw_s
*self
, u8
*mac_addr
)
374 h
= (mac_addr
[0] << 8) | (mac_addr
[1]);
375 l
= (mac_addr
[2] << 24) | (mac_addr
[3] << 16) |
376 (mac_addr
[4] << 8) | mac_addr
[5];
378 hw_atl_rpfl2_uc_flr_en_set(self
, 0U, HW_ATL_B0_MAC
);
379 hw_atl_rpfl2unicast_dest_addresslsw_set(self
, l
, HW_ATL_B0_MAC
);
380 hw_atl_rpfl2unicast_dest_addressmsw_set(self
, h
, HW_ATL_B0_MAC
);
381 hw_atl_rpfl2_uc_flr_en_set(self
, 1U, HW_ATL_B0_MAC
);
383 err
= aq_hw_err_from_flags(self
);
389 static int hw_atl_b0_hw_init(struct aq_hw_s
*self
, u8
*mac_addr
)
391 static u32 aq_hw_atl_igcr_table_
[4][2] = {
392 [AQ_HW_IRQ_INVALID
] = { 0x20000000U
, 0x20000000U
},
393 [AQ_HW_IRQ_LEGACY
] = { 0x20000080U
, 0x20000080U
},
394 [AQ_HW_IRQ_MSI
] = { 0x20000021U
, 0x20000025U
},
395 [AQ_HW_IRQ_MSIX
] = { 0x20000022U
, 0x20000026U
},
401 struct aq_nic_cfg_s
*aq_nic_cfg
= self
->aq_nic_cfg
;
403 hw_atl_b0_hw_init_tx_path(self
);
404 hw_atl_b0_hw_init_rx_path(self
);
406 hw_atl_b0_hw_mac_addr_set(self
, mac_addr
);
408 self
->aq_fw_ops
->set_link_speed(self
, aq_nic_cfg
->link_speed_msk
);
409 self
->aq_fw_ops
->set_state(self
, MPI_INIT
);
411 hw_atl_b0_hw_qos_set(self
);
412 hw_atl_b0_hw_rss_set(self
, &aq_nic_cfg
->aq_rss
);
413 hw_atl_b0_hw_rss_hash_set(self
, &aq_nic_cfg
->aq_rss
);
415 /* Force limit MRRS on RDM/TDM to 2K */
416 val
= aq_hw_read_reg(self
, HW_ATL_PCI_REG_CONTROL6_ADR
);
417 aq_hw_write_reg(self
, HW_ATL_PCI_REG_CONTROL6_ADR
,
418 (val
& ~0x707) | 0x404);
420 /* TX DMA total request limit. B0 hardware is not capable to
421 * handle more than (8K-MRRS) incoming DMA data.
422 * Value 24 in 256byte units
424 aq_hw_write_reg(self
, HW_ATL_TX_DMA_TOTAL_REQ_LIMIT_ADR
, 24);
426 /* Reset link status and read out initial hardware counters */
427 self
->aq_link_status
.mbps
= 0;
428 self
->aq_fw_ops
->update_stats(self
);
430 err
= aq_hw_err_from_flags(self
);
435 hw_atl_reg_irq_glb_ctl_set(self
,
436 aq_hw_atl_igcr_table_
[aq_nic_cfg
->irq_type
]
437 [(aq_nic_cfg
->vecs
> 1U) ?
440 hw_atl_itr_irq_auto_masklsw_set(self
, aq_nic_cfg
->aq_hw_caps
->irq_mask
);
443 hw_atl_reg_gen_irq_map_set(self
,
444 ((HW_ATL_B0_ERR_INT
<< 0x18) | (1U << 0x1F)) |
445 ((HW_ATL_B0_ERR_INT
<< 0x10) | (1U << 0x17)), 0U);
447 /* Enable link interrupt */
448 if (aq_nic_cfg
->link_irq_vec
)
449 hw_atl_reg_gen_irq_map_set(self
, BIT(7) |
450 aq_nic_cfg
->link_irq_vec
, 3U);
452 hw_atl_b0_hw_offload_set(self
, aq_nic_cfg
);
458 static int hw_atl_b0_hw_ring_tx_start(struct aq_hw_s
*self
,
459 struct aq_ring_s
*ring
)
461 hw_atl_tdm_tx_desc_en_set(self
, 1, ring
->idx
);
462 return aq_hw_err_from_flags(self
);
465 static int hw_atl_b0_hw_ring_rx_start(struct aq_hw_s
*self
,
466 struct aq_ring_s
*ring
)
468 hw_atl_rdm_rx_desc_en_set(self
, 1, ring
->idx
);
469 return aq_hw_err_from_flags(self
);
472 static int hw_atl_b0_hw_start(struct aq_hw_s
*self
)
474 hw_atl_tpb_tx_buff_en_set(self
, 1);
475 hw_atl_rpb_rx_buff_en_set(self
, 1);
476 return aq_hw_err_from_flags(self
);
479 static int hw_atl_b0_hw_tx_ring_tail_update(struct aq_hw_s
*self
,
480 struct aq_ring_s
*ring
)
482 hw_atl_reg_tx_dma_desc_tail_ptr_set(self
, ring
->sw_tail
, ring
->idx
);
486 static int hw_atl_b0_hw_ring_tx_xmit(struct aq_hw_s
*self
,
487 struct aq_ring_s
*ring
,
490 struct aq_ring_buff_s
*buff
= NULL
;
491 struct hw_atl_txd_s
*txd
= NULL
;
492 unsigned int buff_pa_len
= 0U;
493 unsigned int pkt_len
= 0U;
494 unsigned int frag_count
= 0U;
495 bool is_vlan
= false;
498 buff
= &ring
->buff_ring
[ring
->sw_tail
];
499 pkt_len
= (buff
->is_eop
&& buff
->is_sop
) ? buff
->len
: buff
->len_pkt
;
501 for (frag_count
= 0; frag_count
< frags
; frag_count
++) {
502 txd
= (struct hw_atl_txd_s
*)&ring
->dx_ring
[ring
->sw_tail
*
508 buff
= &ring
->buff_ring
[ring
->sw_tail
];
511 txd
->ctl
|= HW_ATL_B0_TXD_CTL_CMD_TCP
;
512 txd
->ctl
|= HW_ATL_B0_TXD_CTL_DESC_TYPE_TXC
;
513 txd
->ctl
|= (buff
->len_l3
<< 31) |
514 (buff
->len_l2
<< 24);
515 txd
->ctl2
|= (buff
->mss
<< 16);
518 pkt_len
-= (buff
->len_l4
+
522 txd
->ctl
|= HW_ATL_B0_TXD_CTL_CMD_IPV6
;
523 txd
->ctl2
|= (buff
->len_l4
<< 8) |
527 txd
->ctl
|= HW_ATL_B0_TXD_CTL_DESC_TYPE_TXC
;
528 txd
->ctl
|= buff
->vlan_tx_tag
<< 4;
531 if (!buff
->is_gso
&& !buff
->is_vlan
) {
532 buff_pa_len
= buff
->len
;
534 txd
->buf_addr
= buff
->pa
;
535 txd
->ctl
|= (HW_ATL_B0_TXD_CTL_BLEN
&
536 ((u32
)buff_pa_len
<< 4));
537 txd
->ctl
|= HW_ATL_B0_TXD_CTL_DESC_TYPE_TXD
;
540 txd
->ctl2
|= HW_ATL_B0_TXD_CTL2_LEN
& (pkt_len
<< 14);
542 if (is_gso
|| is_vlan
) {
543 /* enable tx context */
544 txd
->ctl2
|= HW_ATL_B0_TXD_CTL2_CTX_EN
;
547 txd
->ctl
|= HW_ATL_B0_TXD_CTL_CMD_LSO
;
549 /* Tx checksum offloads */
551 txd
->ctl
|= HW_ATL_B0_TXD_CTL_CMD_IPCSO
;
553 if (buff
->is_udp_cso
|| buff
->is_tcp_cso
)
554 txd
->ctl
|= HW_ATL_B0_TXD_CTL_CMD_TUCSO
;
557 txd
->ctl
|= HW_ATL_B0_TXD_CTL_CMD_VLAN
;
559 if (unlikely(buff
->is_eop
)) {
560 txd
->ctl
|= HW_ATL_B0_TXD_CTL_EOP
;
561 txd
->ctl
|= HW_ATL_B0_TXD_CTL_CMD_WB
;
566 ring
->sw_tail
= aq_ring_next_dx(ring
, ring
->sw_tail
);
569 hw_atl_b0_hw_tx_ring_tail_update(self
, ring
);
570 return aq_hw_err_from_flags(self
);
573 static int hw_atl_b0_hw_ring_rx_init(struct aq_hw_s
*self
,
574 struct aq_ring_s
*aq_ring
,
575 struct aq_ring_param_s
*aq_ring_param
)
577 u32 dma_desc_addr_lsw
= (u32
)aq_ring
->dx_ring_pa
;
578 u32 dma_desc_addr_msw
= (u32
)(((u64
)aq_ring
->dx_ring_pa
) >> 32);
579 u32 vlan_rx_stripping
= self
->aq_nic_cfg
->is_vlan_rx_strip
;
581 hw_atl_rdm_rx_desc_en_set(self
, false, aq_ring
->idx
);
583 hw_atl_rdm_rx_desc_head_splitting_set(self
, 0U, aq_ring
->idx
);
585 hw_atl_reg_rx_dma_desc_base_addresslswset(self
, dma_desc_addr_lsw
,
588 hw_atl_reg_rx_dma_desc_base_addressmswset(self
,
589 dma_desc_addr_msw
, aq_ring
->idx
);
591 hw_atl_rdm_rx_desc_len_set(self
, aq_ring
->size
/ 8U, aq_ring
->idx
);
593 hw_atl_rdm_rx_desc_data_buff_size_set(self
,
594 AQ_CFG_RX_FRAME_MAX
/ 1024U,
597 hw_atl_rdm_rx_desc_head_buff_size_set(self
, 0U, aq_ring
->idx
);
598 hw_atl_rdm_rx_desc_head_splitting_set(self
, 0U, aq_ring
->idx
);
599 hw_atl_rpo_rx_desc_vlan_stripping_set(self
, !!vlan_rx_stripping
,
602 /* Rx ring set mode */
604 /* Mapping interrupt vector */
605 hw_atl_itr_irq_map_rx_set(self
, aq_ring_param
->vec_idx
, aq_ring
->idx
);
606 hw_atl_itr_irq_map_en_rx_set(self
, true, aq_ring
->idx
);
608 hw_atl_rdm_cpu_id_set(self
, aq_ring_param
->cpu
, aq_ring
->idx
);
609 hw_atl_rdm_rx_desc_dca_en_set(self
, 0U, aq_ring
->idx
);
610 hw_atl_rdm_rx_head_dca_en_set(self
, 0U, aq_ring
->idx
);
611 hw_atl_rdm_rx_pld_dca_en_set(self
, 0U, aq_ring
->idx
);
613 return aq_hw_err_from_flags(self
);
616 static int hw_atl_b0_hw_ring_tx_init(struct aq_hw_s
*self
,
617 struct aq_ring_s
*aq_ring
,
618 struct aq_ring_param_s
*aq_ring_param
)
620 u32 dma_desc_lsw_addr
= (u32
)aq_ring
->dx_ring_pa
;
621 u32 dma_desc_msw_addr
= (u32
)(((u64
)aq_ring
->dx_ring_pa
) >> 32);
623 hw_atl_reg_tx_dma_desc_base_addresslswset(self
, dma_desc_lsw_addr
,
626 hw_atl_reg_tx_dma_desc_base_addressmswset(self
, dma_desc_msw_addr
,
629 hw_atl_tdm_tx_desc_len_set(self
, aq_ring
->size
/ 8U, aq_ring
->idx
);
631 hw_atl_b0_hw_tx_ring_tail_update(self
, aq_ring
);
633 /* Set Tx threshold */
634 hw_atl_tdm_tx_desc_wr_wb_threshold_set(self
, 0U, aq_ring
->idx
);
636 /* Mapping interrupt vector */
637 hw_atl_itr_irq_map_tx_set(self
, aq_ring_param
->vec_idx
, aq_ring
->idx
);
638 hw_atl_itr_irq_map_en_tx_set(self
, true, aq_ring
->idx
);
640 hw_atl_tdm_cpu_id_set(self
, aq_ring_param
->cpu
, aq_ring
->idx
);
641 hw_atl_tdm_tx_desc_dca_en_set(self
, 0U, aq_ring
->idx
);
643 return aq_hw_err_from_flags(self
);
646 static int hw_atl_b0_hw_ring_rx_fill(struct aq_hw_s
*self
,
647 struct aq_ring_s
*ring
,
648 unsigned int sw_tail_old
)
650 for (; sw_tail_old
!= ring
->sw_tail
;
651 sw_tail_old
= aq_ring_next_dx(ring
, sw_tail_old
)) {
652 struct hw_atl_rxd_s
*rxd
=
653 (struct hw_atl_rxd_s
*)&ring
->dx_ring
[sw_tail_old
*
656 struct aq_ring_buff_s
*buff
= &ring
->buff_ring
[sw_tail_old
];
658 rxd
->buf_addr
= buff
->pa
;
662 hw_atl_reg_rx_dma_desc_tail_ptr_set(self
, sw_tail_old
, ring
->idx
);
664 return aq_hw_err_from_flags(self
);
667 static int hw_atl_b0_hw_ring_tx_head_update(struct aq_hw_s
*self
,
668 struct aq_ring_s
*ring
)
671 unsigned int hw_head_
= hw_atl_tdm_tx_desc_head_ptr_get(self
, ring
->idx
);
673 if (aq_utils_obj_test(&self
->flags
, AQ_HW_FLAG_ERR_UNPLUG
)) {
677 ring
->hw_head
= hw_head_
;
678 err
= aq_hw_err_from_flags(self
);
684 static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s
*self
,
685 struct aq_ring_s
*ring
)
687 for (; ring
->hw_head
!= ring
->sw_tail
;
688 ring
->hw_head
= aq_ring_next_dx(ring
, ring
->hw_head
)) {
689 struct aq_ring_buff_s
*buff
= NULL
;
690 struct hw_atl_rxd_wb_s
*rxd_wb
= (struct hw_atl_rxd_wb_s
*)
691 &ring
->dx_ring
[ring
->hw_head
* HW_ATL_B0_RXD_SIZE
];
693 unsigned int is_rx_check_sum_enabled
= 0U;
694 unsigned int pkt_type
= 0U;
697 if (!(rxd_wb
->status
& 0x1U
)) { /* RxD is not done */
701 buff
= &ring
->buff_ring
[ring
->hw_head
];
704 buff
->is_hash_l4
= 0U;
706 rx_stat
= (0x0000003CU
& rxd_wb
->status
) >> 2;
708 is_rx_check_sum_enabled
= (rxd_wb
->type
>> 19) & 0x3U
;
710 pkt_type
= (rxd_wb
->type
& HW_ATL_B0_RXD_WB_STAT_PKTTYPE
) >>
711 HW_ATL_B0_RXD_WB_STAT_PKTTYPE_SHIFT
;
713 if (is_rx_check_sum_enabled
& BIT(0) &&
714 (0x0U
== (pkt_type
& 0x3U
)))
715 buff
->is_ip_cso
= (rx_stat
& BIT(1)) ? 0U : 1U;
717 if (is_rx_check_sum_enabled
& BIT(1)) {
718 if (0x4U
== (pkt_type
& 0x1CU
))
719 buff
->is_udp_cso
= (rx_stat
& BIT(2)) ? 0U :
720 !!(rx_stat
& BIT(3));
721 else if (0x0U
== (pkt_type
& 0x1CU
))
722 buff
->is_tcp_cso
= (rx_stat
& BIT(2)) ? 0U :
723 !!(rx_stat
& BIT(3));
725 buff
->is_cso_err
= !!(rx_stat
& 0x6);
726 /* Checksum offload workaround for small packets */
727 if (unlikely(rxd_wb
->pkt_len
<= 60)) {
728 buff
->is_ip_cso
= 0U;
729 buff
->is_cso_err
= 0U;
732 if (self
->aq_nic_cfg
->is_vlan_rx_strip
&&
733 ((pkt_type
& HW_ATL_B0_RXD_WB_PKTTYPE_VLAN
) ||
734 (pkt_type
& HW_ATL_B0_RXD_WB_PKTTYPE_VLAN_DOUBLE
))) {
736 buff
->vlan_rx_tag
= le16_to_cpu(rxd_wb
->vlan
);
739 if ((rx_stat
& BIT(0)) || rxd_wb
->type
& 0x1000U
) {
740 /* MAC error or DMA error */
743 if (self
->aq_nic_cfg
->is_rss
) {
745 u16 rss_type
= rxd_wb
->type
& 0xFU
;
747 if (rss_type
&& rss_type
< 0x8U
) {
748 buff
->is_hash_l4
= (rss_type
== 0x4 ||
750 buff
->rss_hash
= rxd_wb
->rss_hash
;
754 if (HW_ATL_B0_RXD_WB_STAT2_EOP
& rxd_wb
->status
) {
755 buff
->len
= rxd_wb
->pkt_len
%
757 buff
->len
= buff
->len
?
758 buff
->len
: AQ_CFG_RX_FRAME_MAX
;
763 rxd_wb
->pkt_len
> AQ_CFG_RX_FRAME_MAX
?
764 AQ_CFG_RX_FRAME_MAX
: rxd_wb
->pkt_len
;
766 if (HW_ATL_B0_RXD_WB_STAT2_RSCCNT
&
769 buff
->next
= rxd_wb
->next_desc_ptr
;
770 ++ring
->stats
.rx
.lro_packets
;
774 aq_ring_next_dx(ring
,
776 ++ring
->stats
.rx
.jumbo_packets
;
781 return aq_hw_err_from_flags(self
);
784 static int hw_atl_b0_hw_irq_enable(struct aq_hw_s
*self
, u64 mask
)
786 hw_atl_itr_irq_msk_setlsw_set(self
, LODWORD(mask
));
787 return aq_hw_err_from_flags(self
);
790 static int hw_atl_b0_hw_irq_disable(struct aq_hw_s
*self
, u64 mask
)
792 hw_atl_itr_irq_msk_clearlsw_set(self
, LODWORD(mask
));
793 hw_atl_itr_irq_status_clearlsw_set(self
, LODWORD(mask
));
795 atomic_inc(&self
->dpc
);
796 return aq_hw_err_from_flags(self
);
799 static int hw_atl_b0_hw_irq_read(struct aq_hw_s
*self
, u64
*mask
)
801 *mask
= hw_atl_itr_irq_statuslsw_get(self
);
802 return aq_hw_err_from_flags(self
);
805 #define IS_FILTER_ENABLED(_F_) ((packet_filter & (_F_)) ? 1U : 0U)
807 static int hw_atl_b0_hw_packet_filter_set(struct aq_hw_s
*self
,
808 unsigned int packet_filter
)
811 struct aq_nic_cfg_s
*cfg
= self
->aq_nic_cfg
;
813 hw_atl_rpfl2promiscuous_mode_en_set(self
,
814 IS_FILTER_ENABLED(IFF_PROMISC
));
816 hw_atl_rpf_vlan_prom_mode_en_set(self
,
817 IS_FILTER_ENABLED(IFF_PROMISC
) ||
818 cfg
->is_vlan_force_promisc
);
820 hw_atl_rpfl2multicast_flr_en_set(self
,
821 IS_FILTER_ENABLED(IFF_ALLMULTI
) &&
822 IS_FILTER_ENABLED(IFF_MULTICAST
), 0);
824 hw_atl_rpfl2_accept_all_mc_packets_set(self
,
825 IS_FILTER_ENABLED(IFF_ALLMULTI
) &&
826 IS_FILTER_ENABLED(IFF_MULTICAST
));
828 hw_atl_rpfl2broadcast_en_set(self
, IS_FILTER_ENABLED(IFF_BROADCAST
));
831 for (i
= HW_ATL_B0_MAC_MIN
; i
< HW_ATL_B0_MAC_MAX
; ++i
)
832 hw_atl_rpfl2_uc_flr_en_set(self
,
833 (cfg
->is_mc_list_enabled
&&
834 (i
<= cfg
->mc_list_count
)) ?
837 return aq_hw_err_from_flags(self
);
840 #undef IS_FILTER_ENABLED
842 static int hw_atl_b0_hw_multicast_list_set(struct aq_hw_s
*self
,
844 [AQ_HW_MULTICAST_ADDRESS_MAX
]
850 if (count
> (HW_ATL_B0_MAC_MAX
- HW_ATL_B0_MAC_MIN
)) {
854 for (self
->aq_nic_cfg
->mc_list_count
= 0U;
855 self
->aq_nic_cfg
->mc_list_count
< count
;
856 ++self
->aq_nic_cfg
->mc_list_count
) {
857 u32 i
= self
->aq_nic_cfg
->mc_list_count
;
858 u32 h
= (ar_mac
[i
][0] << 8) | (ar_mac
[i
][1]);
859 u32 l
= (ar_mac
[i
][2] << 24) | (ar_mac
[i
][3] << 16) |
860 (ar_mac
[i
][4] << 8) | ar_mac
[i
][5];
862 hw_atl_rpfl2_uc_flr_en_set(self
, 0U, HW_ATL_B0_MAC_MIN
+ i
);
864 hw_atl_rpfl2unicast_dest_addresslsw_set(self
,
865 l
, HW_ATL_B0_MAC_MIN
+ i
);
867 hw_atl_rpfl2unicast_dest_addressmsw_set(self
,
868 h
, HW_ATL_B0_MAC_MIN
+ i
);
870 hw_atl_rpfl2_uc_flr_en_set(self
,
871 (self
->aq_nic_cfg
->is_mc_list_enabled
),
872 HW_ATL_B0_MAC_MIN
+ i
);
875 err
= aq_hw_err_from_flags(self
);
881 static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s
*self
)
887 switch (self
->aq_nic_cfg
->itr
) {
888 case AQ_CFG_INTERRUPT_MODERATION_ON
:
889 case AQ_CFG_INTERRUPT_MODERATION_AUTO
:
890 hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self
, 0U);
891 hw_atl_tdm_tdm_intr_moder_en_set(self
, 1U);
892 hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self
, 0U);
893 hw_atl_rdm_rdm_intr_moder_en_set(self
, 1U);
895 if (self
->aq_nic_cfg
->itr
== AQ_CFG_INTERRUPT_MODERATION_ON
) {
896 /* HW timers are in 2us units */
897 int tx_max_timer
= self
->aq_nic_cfg
->tx_itr
/ 2;
898 int tx_min_timer
= tx_max_timer
/ 2;
900 int rx_max_timer
= self
->aq_nic_cfg
->rx_itr
/ 2;
901 int rx_min_timer
= rx_max_timer
/ 2;
903 tx_max_timer
= min(HW_ATL_INTR_MODER_MAX
, tx_max_timer
);
904 tx_min_timer
= min(HW_ATL_INTR_MODER_MIN
, tx_min_timer
);
905 rx_max_timer
= min(HW_ATL_INTR_MODER_MAX
, rx_max_timer
);
906 rx_min_timer
= min(HW_ATL_INTR_MODER_MIN
, rx_min_timer
);
908 itr_tx
|= tx_min_timer
<< 0x8U
;
909 itr_tx
|= tx_max_timer
<< 0x10U
;
910 itr_rx
|= rx_min_timer
<< 0x8U
;
911 itr_rx
|= rx_max_timer
<< 0x10U
;
913 static unsigned int hw_atl_b0_timers_table_tx_
[][2] = {
914 {0xfU
, 0xffU
}, /* 10Gbit */
915 {0xfU
, 0x1ffU
}, /* 5Gbit */
916 {0xfU
, 0x1ffU
}, /* 5Gbit 5GS */
917 {0xfU
, 0x1ffU
}, /* 2.5Gbit */
918 {0xfU
, 0x1ffU
}, /* 1Gbit */
919 {0xfU
, 0x1ffU
}, /* 100Mbit */
922 static unsigned int hw_atl_b0_timers_table_rx_
[][2] = {
923 {0x6U
, 0x38U
},/* 10Gbit */
924 {0xCU
, 0x70U
},/* 5Gbit */
925 {0xCU
, 0x70U
},/* 5Gbit 5GS */
926 {0x18U
, 0xE0U
},/* 2.5Gbit */
927 {0x30U
, 0x80U
},/* 1Gbit */
928 {0x4U
, 0x50U
},/* 100Mbit */
931 unsigned int speed_index
=
932 hw_atl_utils_mbps_2_speed_index(
933 self
->aq_link_status
.mbps
);
935 /* Update user visible ITR settings */
936 self
->aq_nic_cfg
->tx_itr
= hw_atl_b0_timers_table_tx_
937 [speed_index
][1] * 2;
938 self
->aq_nic_cfg
->rx_itr
= hw_atl_b0_timers_table_rx_
939 [speed_index
][1] * 2;
941 itr_tx
|= hw_atl_b0_timers_table_tx_
942 [speed_index
][0] << 0x8U
;
943 itr_tx
|= hw_atl_b0_timers_table_tx_
944 [speed_index
][1] << 0x10U
;
946 itr_rx
|= hw_atl_b0_timers_table_rx_
947 [speed_index
][0] << 0x8U
;
948 itr_rx
|= hw_atl_b0_timers_table_rx_
949 [speed_index
][1] << 0x10U
;
952 case AQ_CFG_INTERRUPT_MODERATION_OFF
:
953 hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self
, 1U);
954 hw_atl_tdm_tdm_intr_moder_en_set(self
, 0U);
955 hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self
, 1U);
956 hw_atl_rdm_rdm_intr_moder_en_set(self
, 0U);
962 for (i
= HW_ATL_B0_RINGS_MAX
; i
--;) {
963 hw_atl_reg_tx_intr_moder_ctrl_set(self
, itr_tx
, i
);
964 hw_atl_reg_rx_intr_moder_ctrl_set(self
, itr_rx
, i
);
967 return aq_hw_err_from_flags(self
);
970 static int hw_atl_b0_hw_stop(struct aq_hw_s
*self
)
975 hw_atl_b0_hw_irq_disable(self
, HW_ATL_B0_INT_MASK
);
977 /* Invalidate Descriptor Cache to prevent writing to the cached
978 * descriptors and to the data pointer of those descriptors
980 hw_atl_rdm_rx_dma_desc_cache_init_tgl(self
);
982 err
= aq_hw_err_from_flags(self
);
987 readx_poll_timeout_atomic(hw_atl_rdm_rx_dma_desc_cache_init_done_get
,
988 self
, val
, val
== 1, 1000U, 10000U);
994 static int hw_atl_b0_hw_ring_tx_stop(struct aq_hw_s
*self
,
995 struct aq_ring_s
*ring
)
997 hw_atl_tdm_tx_desc_en_set(self
, 0U, ring
->idx
);
998 return aq_hw_err_from_flags(self
);
1001 static int hw_atl_b0_hw_ring_rx_stop(struct aq_hw_s
*self
,
1002 struct aq_ring_s
*ring
)
1004 hw_atl_rdm_rx_desc_en_set(self
, 0U, ring
->idx
);
1005 return aq_hw_err_from_flags(self
);
1008 static int hw_atl_b0_hw_fl3l4_clear(struct aq_hw_s
*self
,
1009 struct aq_rx_filter_l3l4
*data
)
1011 u8 location
= data
->location
;
1013 if (!data
->is_ipv6
) {
1014 hw_atl_rpfl3l4_cmd_clear(self
, location
);
1015 hw_atl_rpf_l4_spd_set(self
, 0U, location
);
1016 hw_atl_rpf_l4_dpd_set(self
, 0U, location
);
1017 hw_atl_rpfl3l4_ipv4_src_addr_clear(self
, location
);
1018 hw_atl_rpfl3l4_ipv4_dest_addr_clear(self
, location
);
1022 for (i
= 0; i
< HW_ATL_RX_CNT_REG_ADDR_IPV6
; ++i
) {
1023 hw_atl_rpfl3l4_cmd_clear(self
, location
+ i
);
1024 hw_atl_rpf_l4_spd_set(self
, 0U, location
+ i
);
1025 hw_atl_rpf_l4_dpd_set(self
, 0U, location
+ i
);
1027 hw_atl_rpfl3l4_ipv6_src_addr_clear(self
, location
);
1028 hw_atl_rpfl3l4_ipv6_dest_addr_clear(self
, location
);
1031 return aq_hw_err_from_flags(self
);
1034 static int hw_atl_b0_hw_fl3l4_set(struct aq_hw_s
*self
,
1035 struct aq_rx_filter_l3l4
*data
)
1037 u8 location
= data
->location
;
1039 hw_atl_b0_hw_fl3l4_clear(self
, data
);
1042 if (!data
->is_ipv6
) {
1043 hw_atl_rpfl3l4_ipv4_dest_addr_set(self
,
1046 hw_atl_rpfl3l4_ipv4_src_addr_set(self
,
1050 hw_atl_rpfl3l4_ipv6_dest_addr_set(self
,
1053 hw_atl_rpfl3l4_ipv6_src_addr_set(self
,
1058 hw_atl_rpf_l4_dpd_set(self
, data
->p_dst
, location
);
1059 hw_atl_rpf_l4_spd_set(self
, data
->p_src
, location
);
1060 hw_atl_rpfl3l4_cmd_set(self
, location
, data
->cmd
);
1062 return aq_hw_err_from_flags(self
);
1065 static int hw_atl_b0_hw_fl2_set(struct aq_hw_s
*self
,
1066 struct aq_rx_filter_l2
*data
)
1068 hw_atl_rpf_etht_flr_en_set(self
, 1U, data
->location
);
1069 hw_atl_rpf_etht_flr_set(self
, data
->ethertype
, data
->location
);
1070 hw_atl_rpf_etht_user_priority_en_set(self
,
1071 !!data
->user_priority_en
,
1073 if (data
->user_priority_en
)
1074 hw_atl_rpf_etht_user_priority_set(self
,
1075 data
->user_priority
,
1078 if (data
->queue
< 0) {
1079 hw_atl_rpf_etht_flr_act_set(self
, 0U, data
->location
);
1080 hw_atl_rpf_etht_rx_queue_en_set(self
, 0U, data
->location
);
1082 hw_atl_rpf_etht_flr_act_set(self
, 1U, data
->location
);
1083 hw_atl_rpf_etht_rx_queue_en_set(self
, 1U, data
->location
);
1084 hw_atl_rpf_etht_rx_queue_set(self
, data
->queue
, data
->location
);
1087 return aq_hw_err_from_flags(self
);
1090 static int hw_atl_b0_hw_fl2_clear(struct aq_hw_s
*self
,
1091 struct aq_rx_filter_l2
*data
)
1093 hw_atl_rpf_etht_flr_en_set(self
, 0U, data
->location
);
1094 hw_atl_rpf_etht_flr_set(self
, 0U, data
->location
);
1095 hw_atl_rpf_etht_user_priority_en_set(self
, 0U, data
->location
);
1097 return aq_hw_err_from_flags(self
);
1101 * @brief Set VLAN filter table
1102 * @details Configure VLAN filter table to accept (and assign the queue) traffic
1103 * for the particular vlan ids.
1104 * Note: use this function under vlan promisc mode not to lost the traffic
1107 * @param aq_rx_filter_vlan VLAN filter configuration
1108 * @return 0 - OK, <0 - error
1110 static int hw_atl_b0_hw_vlan_set(struct aq_hw_s
*self
,
1111 struct aq_rx_filter_vlan
*aq_vlans
)
1115 for (i
= 0; i
< AQ_VLAN_MAX_FILTERS
; i
++) {
1116 hw_atl_rpf_vlan_flr_en_set(self
, 0U, i
);
1117 hw_atl_rpf_vlan_rxq_en_flr_set(self
, 0U, i
);
1118 if (aq_vlans
[i
].enable
) {
1119 hw_atl_rpf_vlan_id_flr_set(self
,
1120 aq_vlans
[i
].vlan_id
,
1122 hw_atl_rpf_vlan_flr_act_set(self
, 1U, i
);
1123 hw_atl_rpf_vlan_flr_en_set(self
, 1U, i
);
1124 if (aq_vlans
[i
].queue
!= 0xFF) {
1125 hw_atl_rpf_vlan_rxq_flr_set(self
,
1128 hw_atl_rpf_vlan_rxq_en_flr_set(self
, 1U, i
);
1133 return aq_hw_err_from_flags(self
);
1136 static int hw_atl_b0_hw_vlan_ctrl(struct aq_hw_s
*self
, bool enable
)
1138 /* set promisc in case of disabing the vland filter */
1139 hw_atl_rpf_vlan_prom_mode_en_set(self
, !enable
);
1141 return aq_hw_err_from_flags(self
);
1144 const struct aq_hw_ops hw_atl_ops_b0
= {
1145 .hw_set_mac_address
= hw_atl_b0_hw_mac_addr_set
,
1146 .hw_init
= hw_atl_b0_hw_init
,
1147 .hw_reset
= hw_atl_b0_hw_reset
,
1148 .hw_start
= hw_atl_b0_hw_start
,
1149 .hw_ring_tx_start
= hw_atl_b0_hw_ring_tx_start
,
1150 .hw_ring_tx_stop
= hw_atl_b0_hw_ring_tx_stop
,
1151 .hw_ring_rx_start
= hw_atl_b0_hw_ring_rx_start
,
1152 .hw_ring_rx_stop
= hw_atl_b0_hw_ring_rx_stop
,
1153 .hw_stop
= hw_atl_b0_hw_stop
,
1155 .hw_ring_tx_xmit
= hw_atl_b0_hw_ring_tx_xmit
,
1156 .hw_ring_tx_head_update
= hw_atl_b0_hw_ring_tx_head_update
,
1158 .hw_ring_rx_receive
= hw_atl_b0_hw_ring_rx_receive
,
1159 .hw_ring_rx_fill
= hw_atl_b0_hw_ring_rx_fill
,
1161 .hw_irq_enable
= hw_atl_b0_hw_irq_enable
,
1162 .hw_irq_disable
= hw_atl_b0_hw_irq_disable
,
1163 .hw_irq_read
= hw_atl_b0_hw_irq_read
,
1165 .hw_ring_rx_init
= hw_atl_b0_hw_ring_rx_init
,
1166 .hw_ring_tx_init
= hw_atl_b0_hw_ring_tx_init
,
1167 .hw_packet_filter_set
= hw_atl_b0_hw_packet_filter_set
,
1168 .hw_filter_l2_set
= hw_atl_b0_hw_fl2_set
,
1169 .hw_filter_l2_clear
= hw_atl_b0_hw_fl2_clear
,
1170 .hw_filter_l3l4_set
= hw_atl_b0_hw_fl3l4_set
,
1171 .hw_filter_vlan_set
= hw_atl_b0_hw_vlan_set
,
1172 .hw_filter_vlan_ctrl
= hw_atl_b0_hw_vlan_ctrl
,
1173 .hw_multicast_list_set
= hw_atl_b0_hw_multicast_list_set
,
1174 .hw_interrupt_moderation_set
= hw_atl_b0_hw_interrupt_moderation_set
,
1175 .hw_rss_set
= hw_atl_b0_hw_rss_set
,
1176 .hw_rss_hash_set
= hw_atl_b0_hw_rss_hash_set
,
1177 .hw_get_regs
= hw_atl_utils_hw_get_regs
,
1178 .hw_get_hw_stats
= hw_atl_utils_get_hw_stats
,
1179 .hw_get_fw_version
= hw_atl_utils_get_fw_version
,
1180 .hw_set_offload
= hw_atl_b0_hw_offload_set
,
1181 .hw_set_fc
= hw_atl_b0_set_fc
,