1 // SPDX-License-Identifier: GPL-2.0-only
3 * aQuantia Corporation Network Driver
4 * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
7 /* File hw_atl_b0.c: Definition of Atlantic hardware specific functions. */
10 #include "../aq_hw_utils.h"
11 #include "../aq_ring.h"
12 #include "../aq_nic.h"
13 #include "../aq_phy.h"
14 #include "hw_atl_b0.h"
15 #include "hw_atl_utils.h"
16 #include "hw_atl_llh.h"
17 #include "hw_atl_b0_internal.h"
18 #include "hw_atl_llh_internal.h"
20 #define DEFAULT_B0_BOARD_BASIC_CAPABILITIES \
24 .vecs = HW_ATL_B0_RSS_MAX, \
25 .tcs = HW_ATL_B0_TC_MAX, \
26 .rxd_alignment = 1U, \
27 .rxd_size = HW_ATL_B0_RXD_SIZE, \
28 .rxds_max = HW_ATL_B0_MAX_RXD, \
29 .rxds_min = HW_ATL_B0_MIN_RXD, \
30 .txd_alignment = 1U, \
31 .txd_size = HW_ATL_B0_TXD_SIZE, \
32 .txds_max = HW_ATL_B0_MAX_TXD, \
33 .txds_min = HW_ATL_B0_MIN_TXD, \
34 .txhwb_alignment = 4096U, \
35 .tx_rings = HW_ATL_B0_TX_RINGS, \
36 .rx_rings = HW_ATL_B0_RX_RINGS, \
37 .hw_features = NETIF_F_HW_CSUM | \
44 NETIF_F_HW_VLAN_CTAG_FILTER | \
45 NETIF_F_HW_VLAN_CTAG_RX | \
46 NETIF_F_HW_VLAN_CTAG_TX | \
47 NETIF_F_GSO_UDP_L4 | \
48 NETIF_F_GSO_PARTIAL, \
49 .hw_priv_flags = IFF_UNICAST_FLT, \
50 .flow_control = true, \
51 .mtu = HW_ATL_B0_MTU_JUMBO, \
52 .mac_regs_count = 88, \
53 .hw_alive_check_addr = 0x10U
55 #define FRAC_PER_NS 0x100000000LL
57 const struct aq_hw_caps_s hw_atl_b0_caps_aqc100
= {
58 DEFAULT_B0_BOARD_BASIC_CAPABILITIES
,
59 .media_type
= AQ_HW_MEDIA_TYPE_FIBRE
,
60 .link_speed_msk
= AQ_NIC_RATE_10G
|
67 const struct aq_hw_caps_s hw_atl_b0_caps_aqc107
= {
68 DEFAULT_B0_BOARD_BASIC_CAPABILITIES
,
69 .media_type
= AQ_HW_MEDIA_TYPE_TP
,
70 .link_speed_msk
= AQ_NIC_RATE_10G
|
77 const struct aq_hw_caps_s hw_atl_b0_caps_aqc108
= {
78 DEFAULT_B0_BOARD_BASIC_CAPABILITIES
,
79 .media_type
= AQ_HW_MEDIA_TYPE_TP
,
80 .link_speed_msk
= AQ_NIC_RATE_5G
|
86 const struct aq_hw_caps_s hw_atl_b0_caps_aqc109
= {
87 DEFAULT_B0_BOARD_BASIC_CAPABILITIES
,
88 .media_type
= AQ_HW_MEDIA_TYPE_TP
,
89 .link_speed_msk
= AQ_NIC_RATE_2GS
|
94 static int hw_atl_b0_hw_reset(struct aq_hw_s
*self
)
98 err
= hw_atl_utils_soft_reset(self
);
102 self
->aq_fw_ops
->set_state(self
, MPI_RESET
);
104 err
= aq_hw_err_from_flags(self
);
109 static int hw_atl_b0_set_fc(struct aq_hw_s
*self
, u32 fc
, u32 tc
)
111 hw_atl_rpb_rx_xoff_en_per_tc_set(self
, !!(fc
& AQ_NIC_FC_RX
), tc
);
116 static int hw_atl_b0_hw_qos_set(struct aq_hw_s
*self
)
118 unsigned int i_priority
= 0U;
122 /* TPS Descriptor rate init */
123 hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(self
, 0x0U
);
124 hw_atl_tps_tx_pkt_shed_desc_rate_lim_set(self
, 0xA);
127 hw_atl_tps_tx_pkt_shed_desc_vm_arb_mode_set(self
, 0U);
129 /* TPS TC credits init */
130 hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(self
, 0U);
131 hw_atl_tps_tx_pkt_shed_data_arb_mode_set(self
, 0U);
135 /* TX Packet Scheduler Data TC0 */
136 hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(self
, 0xFFF, tc
);
137 hw_atl_tps_tx_pkt_shed_tc_data_weight_set(self
, 0x64, tc
);
138 hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(self
, 0x50, tc
);
139 hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(self
, 0x1E, tc
);
141 /* Tx buf size TC0 */
142 buff_size
= HW_ATL_B0_TXBUF_MAX
- HW_ATL_B0_PTP_TXBUF_SIZE
;
144 hw_atl_tpb_tx_pkt_buff_size_per_tc_set(self
, buff_size
, tc
);
145 hw_atl_tpb_tx_buff_hi_threshold_per_tc_set(self
,
147 (1024 / 32U) * 66U) /
149 hw_atl_tpb_tx_buff_lo_threshold_per_tc_set(self
,
151 (1024 / 32U) * 50U) /
153 /* Init TC2 for PTP_TX */
156 hw_atl_tpb_tx_pkt_buff_size_per_tc_set(self
, HW_ATL_B0_PTP_TXBUF_SIZE
,
159 /* QoS Rx buf size per TC */
161 buff_size
= HW_ATL_B0_RXBUF_MAX
- HW_ATL_B0_PTP_RXBUF_SIZE
;
163 hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self
, buff_size
, tc
);
164 hw_atl_rpb_rx_buff_hi_threshold_per_tc_set(self
,
166 (1024U / 32U) * 66U) /
168 hw_atl_rpb_rx_buff_lo_threshold_per_tc_set(self
,
170 (1024U / 32U) * 50U) /
173 hw_atl_b0_set_fc(self
, self
->aq_nic_cfg
->fc
.req
, tc
);
175 /* Init TC2 for PTP_RX */
178 hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self
, HW_ATL_B0_PTP_RXBUF_SIZE
,
180 /* No flow control for PTP */
181 hw_atl_rpb_rx_xoff_en_per_tc_set(self
, 0U, tc
);
183 /* QoS 802.1p priority -> TC mapping */
184 for (i_priority
= 8U; i_priority
--;)
185 hw_atl_rpf_rpb_user_priority_tc_map_set(self
, i_priority
, 0U);
187 return aq_hw_err_from_flags(self
);
190 static int hw_atl_b0_hw_rss_hash_set(struct aq_hw_s
*self
,
191 struct aq_rss_parameters
*rss_params
)
193 struct aq_nic_cfg_s
*cfg
= self
->aq_nic_cfg
;
194 unsigned int addr
= 0U;
199 for (i
= 10, addr
= 0U; i
--; ++addr
) {
200 u32 key_data
= cfg
->is_rss
?
201 __swab32(rss_params
->hash_secret_key
[i
]) : 0U;
202 hw_atl_rpf_rss_key_wr_data_set(self
, key_data
);
203 hw_atl_rpf_rss_key_addr_set(self
, addr
);
204 hw_atl_rpf_rss_key_wr_en_set(self
, 1U);
205 err
= readx_poll_timeout_atomic(hw_atl_rpf_rss_key_wr_en_get
,
212 err
= aq_hw_err_from_flags(self
);
218 static int hw_atl_b0_hw_rss_set(struct aq_hw_s
*self
,
219 struct aq_rss_parameters
*rss_params
)
221 u32 num_rss_queues
= max(1U, self
->aq_nic_cfg
->num_rss_queues
);
222 u8
*indirection_table
= rss_params
->indirection_table
;
223 u16 bitary
[1 + (HW_ATL_B0_RSS_REDIRECTION_MAX
*
224 HW_ATL_B0_RSS_REDIRECTION_BITS
/ 16U)];
229 memset(bitary
, 0, sizeof(bitary
));
231 for (i
= HW_ATL_B0_RSS_REDIRECTION_MAX
; i
--;) {
232 (*(u32
*)(bitary
+ ((i
* 3U) / 16U))) |=
233 ((indirection_table
[i
] % num_rss_queues
) <<
237 for (i
= ARRAY_SIZE(bitary
); i
--;) {
238 hw_atl_rpf_rss_redir_tbl_wr_data_set(self
, bitary
[i
]);
239 hw_atl_rpf_rss_redir_tbl_addr_set(self
, i
);
240 hw_atl_rpf_rss_redir_wr_en_set(self
, 1U);
241 err
= readx_poll_timeout_atomic(hw_atl_rpf_rss_redir_wr_en_get
,
248 err
= aq_hw_err_from_flags(self
);
254 static int hw_atl_b0_hw_offload_set(struct aq_hw_s
*self
,
255 struct aq_nic_cfg_s
*aq_nic_cfg
)
259 /* TX checksums offloads*/
260 hw_atl_tpo_ipv4header_crc_offload_en_set(self
, 1);
261 hw_atl_tpo_tcp_udp_crc_offload_en_set(self
, 1);
263 /* RX checksums offloads*/
264 hw_atl_rpo_ipv4header_crc_offload_en_set(self
, !!(aq_nic_cfg
->features
&
266 hw_atl_rpo_tcp_udp_crc_offload_en_set(self
, !!(aq_nic_cfg
->features
&
270 hw_atl_tdm_large_send_offload_en_set(self
, 0xFFFFFFFFU
);
272 /* Outer VLAN tag offload */
273 hw_atl_rpo_outer_vlan_tag_mode_set(self
, 1U);
277 unsigned int val
= (8U < HW_ATL_B0_LRO_RXD_MAX
) ? 0x3U
:
278 ((4U < HW_ATL_B0_LRO_RXD_MAX
) ? 0x2U
:
279 ((2U < HW_ATL_B0_LRO_RXD_MAX
) ? 0x1U
: 0x0));
281 for (i
= 0; i
< HW_ATL_B0_RINGS_MAX
; i
++)
282 hw_atl_rpo_lro_max_num_of_descriptors_set(self
, val
, i
);
284 hw_atl_rpo_lro_time_base_divider_set(self
, 0x61AU
);
285 hw_atl_rpo_lro_inactive_interval_set(self
, 0);
286 /* the LRO timebase divider is 5 uS (0x61a),
287 * which is multiplied by 50(0x32)
288 * to get a maximum coalescing interval of 250 uS,
289 * which is the default value
291 hw_atl_rpo_lro_max_coalescing_interval_set(self
, 50);
293 hw_atl_rpo_lro_qsessions_lim_set(self
, 1U);
295 hw_atl_rpo_lro_total_desc_lim_set(self
, 2U);
297 hw_atl_rpo_lro_patch_optimization_en_set(self
, 1U);
299 hw_atl_rpo_lro_min_pay_of_first_pkt_set(self
, 10U);
301 hw_atl_rpo_lro_pkt_lim_set(self
, 1U);
303 hw_atl_rpo_lro_en_set(self
,
304 aq_nic_cfg
->is_lro
? 0xFFFFFFFFU
: 0U);
305 hw_atl_itr_rsc_en_set(self
,
306 aq_nic_cfg
->is_lro
? 0xFFFFFFFFU
: 0U);
308 hw_atl_itr_rsc_delay_set(self
, 1U);
311 return aq_hw_err_from_flags(self
);
314 static int hw_atl_b0_hw_init_tx_path(struct aq_hw_s
*self
)
316 /* Tx TC/Queue number config */
317 hw_atl_rpb_tps_tx_tc_mode_set(self
, 1U);
319 hw_atl_thm_lso_tcp_flag_of_first_pkt_set(self
, 0x0FF6U
);
320 hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(self
, 0x0FF6U
);
321 hw_atl_thm_lso_tcp_flag_of_last_pkt_set(self
, 0x0F7FU
);
324 hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self
, 1U);
327 aq_hw_write_reg(self
, 0x00007040U
, IS_CHIP_FEATURE(TPO2
) ?
328 0x00010000U
: 0x00000000U
);
329 hw_atl_tdm_tx_dca_en_set(self
, 0U);
330 hw_atl_tdm_tx_dca_mode_set(self
, 0U);
332 hw_atl_tpb_tx_path_scp_ins_en_set(self
, 1U);
334 return aq_hw_err_from_flags(self
);
337 static int hw_atl_b0_hw_init_rx_path(struct aq_hw_s
*self
)
339 struct aq_nic_cfg_s
*cfg
= self
->aq_nic_cfg
;
342 /* Rx TC/RSS number config */
343 hw_atl_rpb_rpf_rx_traf_class_mode_set(self
, 1U);
345 /* Rx flow control */
346 hw_atl_rpb_rx_flow_ctl_mode_set(self
, 1U);
348 /* RSS Ring selection */
349 hw_atl_reg_rx_flr_rss_control1set(self
, cfg
->is_rss
?
350 0xB3333333U
: 0x00000000U
);
352 /* Multicast filters */
353 for (i
= HW_ATL_B0_MAC_MAX
; i
--;) {
354 hw_atl_rpfl2_uc_flr_en_set(self
, (i
== 0U) ? 1U : 0U, i
);
355 hw_atl_rpfl2unicast_flr_act_set(self
, 1U, i
);
358 hw_atl_reg_rx_flr_mcst_flr_msk_set(self
, 0x00000000U
);
359 hw_atl_reg_rx_flr_mcst_flr_set(self
, 0x00010FFFU
, 0U);
362 hw_atl_rpf_vlan_outer_etht_set(self
, 0x88A8U
);
363 hw_atl_rpf_vlan_inner_etht_set(self
, 0x8100U
);
365 hw_atl_rpf_vlan_prom_mode_en_set(self
, 1);
367 // Always accept untagged packets
368 hw_atl_rpf_vlan_accept_untagged_packets_set(self
, 1U);
369 hw_atl_rpf_vlan_untagged_act_set(self
, 1U);
372 hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self
, 1U);
375 aq_hw_write_reg(self
, 0x00005040U
,
376 IS_CHIP_FEATURE(RPF2
) ? 0x000F0000U
: 0x00000000U
);
378 hw_atl_rpfl2broadcast_flr_act_set(self
, 1U);
379 hw_atl_rpfl2broadcast_count_threshold_set(self
, 0xFFFFU
& (~0U / 256U));
381 hw_atl_rdm_rx_dca_en_set(self
, 0U);
382 hw_atl_rdm_rx_dca_mode_set(self
, 0U);
384 return aq_hw_err_from_flags(self
);
387 static int hw_atl_b0_hw_mac_addr_set(struct aq_hw_s
*self
, u8
*mac_addr
)
397 h
= (mac_addr
[0] << 8) | (mac_addr
[1]);
398 l
= (mac_addr
[2] << 24) | (mac_addr
[3] << 16) |
399 (mac_addr
[4] << 8) | mac_addr
[5];
401 hw_atl_rpfl2_uc_flr_en_set(self
, 0U, HW_ATL_B0_MAC
);
402 hw_atl_rpfl2unicast_dest_addresslsw_set(self
, l
, HW_ATL_B0_MAC
);
403 hw_atl_rpfl2unicast_dest_addressmsw_set(self
, h
, HW_ATL_B0_MAC
);
404 hw_atl_rpfl2_uc_flr_en_set(self
, 1U, HW_ATL_B0_MAC
);
406 err
= aq_hw_err_from_flags(self
);
412 static int hw_atl_b0_hw_init(struct aq_hw_s
*self
, u8
*mac_addr
)
414 static u32 aq_hw_atl_igcr_table_
[4][2] = {
415 [AQ_HW_IRQ_INVALID
] = { 0x20000000U
, 0x20000000U
},
416 [AQ_HW_IRQ_LEGACY
] = { 0x20000080U
, 0x20000080U
},
417 [AQ_HW_IRQ_MSI
] = { 0x20000021U
, 0x20000025U
},
418 [AQ_HW_IRQ_MSIX
] = { 0x20000022U
, 0x20000026U
},
420 struct aq_nic_cfg_s
*aq_nic_cfg
= self
->aq_nic_cfg
;
425 hw_atl_b0_hw_init_tx_path(self
);
426 hw_atl_b0_hw_init_rx_path(self
);
428 hw_atl_b0_hw_mac_addr_set(self
, mac_addr
);
430 self
->aq_fw_ops
->set_link_speed(self
, aq_nic_cfg
->link_speed_msk
);
431 self
->aq_fw_ops
->set_state(self
, MPI_INIT
);
433 hw_atl_b0_hw_qos_set(self
);
434 hw_atl_b0_hw_rss_set(self
, &aq_nic_cfg
->aq_rss
);
435 hw_atl_b0_hw_rss_hash_set(self
, &aq_nic_cfg
->aq_rss
);
437 /* Force limit MRRS on RDM/TDM to 2K */
438 val
= aq_hw_read_reg(self
, HW_ATL_PCI_REG_CONTROL6_ADR
);
439 aq_hw_write_reg(self
, HW_ATL_PCI_REG_CONTROL6_ADR
,
440 (val
& ~0x707) | 0x404);
442 /* TX DMA total request limit. B0 hardware is not capable to
443 * handle more than (8K-MRRS) incoming DMA data.
444 * Value 24 in 256byte units
446 aq_hw_write_reg(self
, HW_ATL_TX_DMA_TOTAL_REQ_LIMIT_ADR
, 24);
448 /* Reset link status and read out initial hardware counters */
449 self
->aq_link_status
.mbps
= 0;
450 self
->aq_fw_ops
->update_stats(self
);
452 err
= aq_hw_err_from_flags(self
);
457 hw_atl_reg_irq_glb_ctl_set(self
,
458 aq_hw_atl_igcr_table_
[aq_nic_cfg
->irq_type
]
459 [(aq_nic_cfg
->vecs
> 1U) ?
462 hw_atl_itr_irq_auto_masklsw_set(self
, aq_nic_cfg
->aq_hw_caps
->irq_mask
);
465 hw_atl_reg_gen_irq_map_set(self
,
466 ((HW_ATL_B0_ERR_INT
<< 0x18) |
468 ((HW_ATL_B0_ERR_INT
<< 0x10) |
471 /* Enable link interrupt */
472 if (aq_nic_cfg
->link_irq_vec
)
473 hw_atl_reg_gen_irq_map_set(self
, BIT(7) |
474 aq_nic_cfg
->link_irq_vec
, 3U);
476 hw_atl_b0_hw_offload_set(self
, aq_nic_cfg
);
482 static int hw_atl_b0_hw_ring_tx_start(struct aq_hw_s
*self
,
483 struct aq_ring_s
*ring
)
485 hw_atl_tdm_tx_desc_en_set(self
, 1, ring
->idx
);
487 return aq_hw_err_from_flags(self
);
490 static int hw_atl_b0_hw_ring_rx_start(struct aq_hw_s
*self
,
491 struct aq_ring_s
*ring
)
493 hw_atl_rdm_rx_desc_en_set(self
, 1, ring
->idx
);
495 return aq_hw_err_from_flags(self
);
498 static int hw_atl_b0_hw_start(struct aq_hw_s
*self
)
500 hw_atl_tpb_tx_buff_en_set(self
, 1);
501 hw_atl_rpb_rx_buff_en_set(self
, 1);
503 return aq_hw_err_from_flags(self
);
506 static int hw_atl_b0_hw_tx_ring_tail_update(struct aq_hw_s
*self
,
507 struct aq_ring_s
*ring
)
509 hw_atl_reg_tx_dma_desc_tail_ptr_set(self
, ring
->sw_tail
, ring
->idx
);
514 static int hw_atl_b0_hw_ring_tx_xmit(struct aq_hw_s
*self
,
515 struct aq_ring_s
*ring
,
518 struct aq_ring_buff_s
*buff
= NULL
;
519 struct hw_atl_txd_s
*txd
= NULL
;
520 unsigned int buff_pa_len
= 0U;
521 unsigned int frag_count
= 0U;
522 unsigned int pkt_len
= 0U;
523 bool is_vlan
= false;
526 buff
= &ring
->buff_ring
[ring
->sw_tail
];
527 pkt_len
= (buff
->is_eop
&& buff
->is_sop
) ? buff
->len
: buff
->len_pkt
;
529 for (frag_count
= 0; frag_count
< frags
; frag_count
++) {
530 txd
= (struct hw_atl_txd_s
*)&ring
->dx_ring
[ring
->sw_tail
*
536 buff
= &ring
->buff_ring
[ring
->sw_tail
];
538 if (buff
->is_gso_tcp
|| buff
->is_gso_udp
) {
539 if (buff
->is_gso_tcp
)
540 txd
->ctl
|= HW_ATL_B0_TXD_CTL_CMD_TCP
;
541 txd
->ctl
|= HW_ATL_B0_TXD_CTL_DESC_TYPE_TXC
;
542 txd
->ctl
|= (buff
->len_l3
<< 31) |
543 (buff
->len_l2
<< 24);
544 txd
->ctl2
|= (buff
->mss
<< 16);
547 pkt_len
-= (buff
->len_l4
+
551 txd
->ctl
|= HW_ATL_B0_TXD_CTL_CMD_IPV6
;
552 txd
->ctl2
|= (buff
->len_l4
<< 8) |
556 txd
->ctl
|= HW_ATL_B0_TXD_CTL_DESC_TYPE_TXC
;
557 txd
->ctl
|= buff
->vlan_tx_tag
<< 4;
560 if (!buff
->is_gso_tcp
&& !buff
->is_gso_udp
&& !buff
->is_vlan
) {
561 buff_pa_len
= buff
->len
;
563 txd
->buf_addr
= buff
->pa
;
564 txd
->ctl
|= (HW_ATL_B0_TXD_CTL_BLEN
&
565 ((u32
)buff_pa_len
<< 4));
566 txd
->ctl
|= HW_ATL_B0_TXD_CTL_DESC_TYPE_TXD
;
569 txd
->ctl2
|= HW_ATL_B0_TXD_CTL2_LEN
& (pkt_len
<< 14);
571 if (is_gso
|| is_vlan
) {
572 /* enable tx context */
573 txd
->ctl2
|= HW_ATL_B0_TXD_CTL2_CTX_EN
;
576 txd
->ctl
|= HW_ATL_B0_TXD_CTL_CMD_LSO
;
578 /* Tx checksum offloads */
580 txd
->ctl
|= HW_ATL_B0_TXD_CTL_CMD_IPCSO
;
582 if (buff
->is_udp_cso
|| buff
->is_tcp_cso
)
583 txd
->ctl
|= HW_ATL_B0_TXD_CTL_CMD_TUCSO
;
586 txd
->ctl
|= HW_ATL_B0_TXD_CTL_CMD_VLAN
;
588 if (unlikely(buff
->is_eop
)) {
589 txd
->ctl
|= HW_ATL_B0_TXD_CTL_EOP
;
590 txd
->ctl
|= HW_ATL_B0_TXD_CTL_CMD_WB
;
595 ring
->sw_tail
= aq_ring_next_dx(ring
, ring
->sw_tail
);
598 hw_atl_b0_hw_tx_ring_tail_update(self
, ring
);
600 return aq_hw_err_from_flags(self
);
603 static int hw_atl_b0_hw_ring_rx_init(struct aq_hw_s
*self
,
604 struct aq_ring_s
*aq_ring
,
605 struct aq_ring_param_s
*aq_ring_param
)
607 u32 dma_desc_addr_msw
= (u32
)(((u64
)aq_ring
->dx_ring_pa
) >> 32);
608 u32 vlan_rx_stripping
= self
->aq_nic_cfg
->is_vlan_rx_strip
;
609 u32 dma_desc_addr_lsw
= (u32
)aq_ring
->dx_ring_pa
;
611 hw_atl_rdm_rx_desc_en_set(self
, false, aq_ring
->idx
);
613 hw_atl_rdm_rx_desc_head_splitting_set(self
, 0U, aq_ring
->idx
);
615 hw_atl_reg_rx_dma_desc_base_addresslswset(self
, dma_desc_addr_lsw
,
618 hw_atl_reg_rx_dma_desc_base_addressmswset(self
,
619 dma_desc_addr_msw
, aq_ring
->idx
);
621 hw_atl_rdm_rx_desc_len_set(self
, aq_ring
->size
/ 8U, aq_ring
->idx
);
623 hw_atl_rdm_rx_desc_data_buff_size_set(self
,
624 AQ_CFG_RX_FRAME_MAX
/ 1024U,
627 hw_atl_rdm_rx_desc_head_buff_size_set(self
, 0U, aq_ring
->idx
);
628 hw_atl_rdm_rx_desc_head_splitting_set(self
, 0U, aq_ring
->idx
);
629 hw_atl_rpo_rx_desc_vlan_stripping_set(self
, !!vlan_rx_stripping
,
632 /* Rx ring set mode */
634 /* Mapping interrupt vector */
635 hw_atl_itr_irq_map_rx_set(self
, aq_ring_param
->vec_idx
, aq_ring
->idx
);
636 hw_atl_itr_irq_map_en_rx_set(self
, true, aq_ring
->idx
);
638 hw_atl_rdm_cpu_id_set(self
, aq_ring_param
->cpu
, aq_ring
->idx
);
639 hw_atl_rdm_rx_desc_dca_en_set(self
, 0U, aq_ring
->idx
);
640 hw_atl_rdm_rx_head_dca_en_set(self
, 0U, aq_ring
->idx
);
641 hw_atl_rdm_rx_pld_dca_en_set(self
, 0U, aq_ring
->idx
);
643 return aq_hw_err_from_flags(self
);
646 static int hw_atl_b0_hw_ring_tx_init(struct aq_hw_s
*self
,
647 struct aq_ring_s
*aq_ring
,
648 struct aq_ring_param_s
*aq_ring_param
)
650 u32 dma_desc_msw_addr
= (u32
)(((u64
)aq_ring
->dx_ring_pa
) >> 32);
651 u32 dma_desc_lsw_addr
= (u32
)aq_ring
->dx_ring_pa
;
653 hw_atl_reg_tx_dma_desc_base_addresslswset(self
, dma_desc_lsw_addr
,
656 hw_atl_reg_tx_dma_desc_base_addressmswset(self
, dma_desc_msw_addr
,
659 hw_atl_tdm_tx_desc_len_set(self
, aq_ring
->size
/ 8U, aq_ring
->idx
);
661 hw_atl_b0_hw_tx_ring_tail_update(self
, aq_ring
);
663 /* Set Tx threshold */
664 hw_atl_tdm_tx_desc_wr_wb_threshold_set(self
, 0U, aq_ring
->idx
);
666 /* Mapping interrupt vector */
667 hw_atl_itr_irq_map_tx_set(self
, aq_ring_param
->vec_idx
, aq_ring
->idx
);
668 hw_atl_itr_irq_map_en_tx_set(self
, true, aq_ring
->idx
);
670 hw_atl_tdm_cpu_id_set(self
, aq_ring_param
->cpu
, aq_ring
->idx
);
671 hw_atl_tdm_tx_desc_dca_en_set(self
, 0U, aq_ring
->idx
);
673 return aq_hw_err_from_flags(self
);
676 static int hw_atl_b0_hw_ring_rx_fill(struct aq_hw_s
*self
,
677 struct aq_ring_s
*ring
,
678 unsigned int sw_tail_old
)
680 for (; sw_tail_old
!= ring
->sw_tail
;
681 sw_tail_old
= aq_ring_next_dx(ring
, sw_tail_old
)) {
682 struct hw_atl_rxd_s
*rxd
=
683 (struct hw_atl_rxd_s
*)&ring
->dx_ring
[sw_tail_old
*
686 struct aq_ring_buff_s
*buff
= &ring
->buff_ring
[sw_tail_old
];
688 rxd
->buf_addr
= buff
->pa
;
692 hw_atl_reg_rx_dma_desc_tail_ptr_set(self
, sw_tail_old
, ring
->idx
);
694 return aq_hw_err_from_flags(self
);
697 static int hw_atl_b0_hw_ring_hwts_rx_fill(struct aq_hw_s
*self
,
698 struct aq_ring_s
*ring
)
702 for (i
= aq_ring_avail_dx(ring
); i
--;
703 ring
->sw_tail
= aq_ring_next_dx(ring
, ring
->sw_tail
)) {
704 struct hw_atl_rxd_s
*rxd
=
705 (struct hw_atl_rxd_s
*)
706 &ring
->dx_ring
[ring
->sw_tail
* HW_ATL_B0_RXD_SIZE
];
708 rxd
->buf_addr
= ring
->dx_ring_pa
+ ring
->size
* ring
->dx_size
;
711 /* Make sure descriptors are updated before bump tail*/
714 hw_atl_reg_rx_dma_desc_tail_ptr_set(self
, ring
->sw_tail
, ring
->idx
);
716 return aq_hw_err_from_flags(self
);
719 static int hw_atl_b0_hw_ring_hwts_rx_receive(struct aq_hw_s
*self
,
720 struct aq_ring_s
*ring
)
722 while (ring
->hw_head
!= ring
->sw_tail
) {
723 struct hw_atl_rxd_hwts_wb_s
*hwts_wb
=
724 (struct hw_atl_rxd_hwts_wb_s
*)
725 (ring
->dx_ring
+ (ring
->hw_head
* HW_ATL_B0_RXD_SIZE
));
727 /* RxD is not done */
728 if (!(hwts_wb
->sec_lw0
& 0x1U
))
731 ring
->hw_head
= aq_ring_next_dx(ring
, ring
->hw_head
);
734 return aq_hw_err_from_flags(self
);
737 static int hw_atl_b0_hw_ring_tx_head_update(struct aq_hw_s
*self
,
738 struct aq_ring_s
*ring
)
740 unsigned int hw_head_
;
743 hw_head_
= hw_atl_tdm_tx_desc_head_ptr_get(self
, ring
->idx
);
745 if (aq_utils_obj_test(&self
->flags
, AQ_HW_FLAG_ERR_UNPLUG
)) {
749 ring
->hw_head
= hw_head_
;
750 err
= aq_hw_err_from_flags(self
);
756 static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s
*self
,
757 struct aq_ring_s
*ring
)
759 for (; ring
->hw_head
!= ring
->sw_tail
;
760 ring
->hw_head
= aq_ring_next_dx(ring
, ring
->hw_head
)) {
761 struct aq_ring_buff_s
*buff
= NULL
;
762 struct hw_atl_rxd_wb_s
*rxd_wb
= (struct hw_atl_rxd_wb_s
*)
763 &ring
->dx_ring
[ring
->hw_head
* HW_ATL_B0_RXD_SIZE
];
765 unsigned int is_rx_check_sum_enabled
= 0U;
766 unsigned int pkt_type
= 0U;
769 if (!(rxd_wb
->status
& 0x1U
)) { /* RxD is not done */
773 buff
= &ring
->buff_ring
[ring
->hw_head
];
776 buff
->is_hash_l4
= 0U;
778 rx_stat
= (0x0000003CU
& rxd_wb
->status
) >> 2;
780 is_rx_check_sum_enabled
= (rxd_wb
->type
>> 19) & 0x3U
;
782 pkt_type
= (rxd_wb
->type
& HW_ATL_B0_RXD_WB_STAT_PKTTYPE
) >>
783 HW_ATL_B0_RXD_WB_STAT_PKTTYPE_SHIFT
;
785 if (is_rx_check_sum_enabled
& BIT(0) &&
786 (0x0U
== (pkt_type
& 0x3U
)))
787 buff
->is_ip_cso
= (rx_stat
& BIT(1)) ? 0U : 1U;
789 if (is_rx_check_sum_enabled
& BIT(1)) {
790 if (0x4U
== (pkt_type
& 0x1CU
))
791 buff
->is_udp_cso
= (rx_stat
& BIT(2)) ? 0U :
792 !!(rx_stat
& BIT(3));
793 else if (0x0U
== (pkt_type
& 0x1CU
))
794 buff
->is_tcp_cso
= (rx_stat
& BIT(2)) ? 0U :
795 !!(rx_stat
& BIT(3));
797 buff
->is_cso_err
= !!(rx_stat
& 0x6);
798 /* Checksum offload workaround for small packets */
799 if (unlikely(rxd_wb
->pkt_len
<= 60)) {
800 buff
->is_ip_cso
= 0U;
801 buff
->is_cso_err
= 0U;
804 if (self
->aq_nic_cfg
->is_vlan_rx_strip
&&
805 ((pkt_type
& HW_ATL_B0_RXD_WB_PKTTYPE_VLAN
) ||
806 (pkt_type
& HW_ATL_B0_RXD_WB_PKTTYPE_VLAN_DOUBLE
))) {
808 buff
->vlan_rx_tag
= le16_to_cpu(rxd_wb
->vlan
);
811 if ((rx_stat
& BIT(0)) || rxd_wb
->type
& 0x1000U
) {
812 /* MAC error or DMA error */
815 if (self
->aq_nic_cfg
->is_rss
) {
817 u16 rss_type
= rxd_wb
->type
& 0xFU
;
819 if (rss_type
&& rss_type
< 0x8U
) {
820 buff
->is_hash_l4
= (rss_type
== 0x4 ||
822 buff
->rss_hash
= rxd_wb
->rss_hash
;
826 buff
->is_lro
= !!(HW_ATL_B0_RXD_WB_STAT2_RSCCNT
&
828 if (HW_ATL_B0_RXD_WB_STAT2_EOP
& rxd_wb
->status
) {
829 buff
->len
= rxd_wb
->pkt_len
%
831 buff
->len
= buff
->len
?
832 buff
->len
: AQ_CFG_RX_FRAME_MAX
;
837 rxd_wb
->pkt_len
> AQ_CFG_RX_FRAME_MAX
?
838 AQ_CFG_RX_FRAME_MAX
: rxd_wb
->pkt_len
;
842 buff
->next
= rxd_wb
->next_desc_ptr
;
843 ++ring
->stats
.rx
.lro_packets
;
847 aq_ring_next_dx(ring
,
849 ++ring
->stats
.rx
.jumbo_packets
;
854 return aq_hw_err_from_flags(self
);
857 static int hw_atl_b0_hw_irq_enable(struct aq_hw_s
*self
, u64 mask
)
859 hw_atl_itr_irq_msk_setlsw_set(self
, LODWORD(mask
));
861 return aq_hw_err_from_flags(self
);
864 static int hw_atl_b0_hw_irq_disable(struct aq_hw_s
*self
, u64 mask
)
866 hw_atl_itr_irq_msk_clearlsw_set(self
, LODWORD(mask
));
867 hw_atl_itr_irq_status_clearlsw_set(self
, LODWORD(mask
));
869 atomic_inc(&self
->dpc
);
871 return aq_hw_err_from_flags(self
);
874 static int hw_atl_b0_hw_irq_read(struct aq_hw_s
*self
, u64
*mask
)
876 *mask
= hw_atl_itr_irq_statuslsw_get(self
);
878 return aq_hw_err_from_flags(self
);
881 #define IS_FILTER_ENABLED(_F_) ((packet_filter & (_F_)) ? 1U : 0U)
883 static int hw_atl_b0_hw_packet_filter_set(struct aq_hw_s
*self
,
884 unsigned int packet_filter
)
886 struct aq_nic_cfg_s
*cfg
= self
->aq_nic_cfg
;
891 l2_promisc
= IS_FILTER_ENABLED(IFF_PROMISC
) ||
892 !!(cfg
->priv_flags
& BIT(AQ_HW_LOOPBACK_DMA_NET
));
893 vlan_promisc
= l2_promisc
|| cfg
->is_vlan_force_promisc
;
895 hw_atl_rpfl2promiscuous_mode_en_set(self
, l2_promisc
);
897 hw_atl_rpf_vlan_prom_mode_en_set(self
, vlan_promisc
);
899 hw_atl_rpfl2multicast_flr_en_set(self
,
900 IS_FILTER_ENABLED(IFF_ALLMULTI
) &&
901 IS_FILTER_ENABLED(IFF_MULTICAST
), 0);
903 hw_atl_rpfl2_accept_all_mc_packets_set(self
,
904 IS_FILTER_ENABLED(IFF_ALLMULTI
) &&
905 IS_FILTER_ENABLED(IFF_MULTICAST
));
907 hw_atl_rpfl2broadcast_en_set(self
, IS_FILTER_ENABLED(IFF_BROADCAST
));
910 for (i
= HW_ATL_B0_MAC_MIN
; i
< HW_ATL_B0_MAC_MAX
; ++i
)
911 hw_atl_rpfl2_uc_flr_en_set(self
,
912 (cfg
->is_mc_list_enabled
&&
913 (i
<= cfg
->mc_list_count
)) ?
916 return aq_hw_err_from_flags(self
);
919 #undef IS_FILTER_ENABLED
921 static int hw_atl_b0_hw_multicast_list_set(struct aq_hw_s
*self
,
923 [AQ_HW_MULTICAST_ADDRESS_MAX
]
928 struct aq_nic_cfg_s
*cfg
= self
->aq_nic_cfg
;
930 if (count
> (HW_ATL_B0_MAC_MAX
- HW_ATL_B0_MAC_MIN
)) {
934 for (cfg
->mc_list_count
= 0U;
935 cfg
->mc_list_count
< count
;
936 ++cfg
->mc_list_count
) {
937 u32 i
= cfg
->mc_list_count
;
938 u32 h
= (ar_mac
[i
][0] << 8) | (ar_mac
[i
][1]);
939 u32 l
= (ar_mac
[i
][2] << 24) | (ar_mac
[i
][3] << 16) |
940 (ar_mac
[i
][4] << 8) | ar_mac
[i
][5];
942 hw_atl_rpfl2_uc_flr_en_set(self
, 0U, HW_ATL_B0_MAC_MIN
+ i
);
944 hw_atl_rpfl2unicast_dest_addresslsw_set(self
, l
,
945 HW_ATL_B0_MAC_MIN
+ i
);
947 hw_atl_rpfl2unicast_dest_addressmsw_set(self
, h
,
948 HW_ATL_B0_MAC_MIN
+ i
);
950 hw_atl_rpfl2_uc_flr_en_set(self
,
951 (cfg
->is_mc_list_enabled
),
952 HW_ATL_B0_MAC_MIN
+ i
);
955 err
= aq_hw_err_from_flags(self
);
961 static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s
*self
)
967 switch (self
->aq_nic_cfg
->itr
) {
968 case AQ_CFG_INTERRUPT_MODERATION_ON
:
969 case AQ_CFG_INTERRUPT_MODERATION_AUTO
:
970 hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self
, 0U);
971 hw_atl_tdm_tdm_intr_moder_en_set(self
, 1U);
972 hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self
, 0U);
973 hw_atl_rdm_rdm_intr_moder_en_set(self
, 1U);
975 if (self
->aq_nic_cfg
->itr
== AQ_CFG_INTERRUPT_MODERATION_ON
) {
976 /* HW timers are in 2us units */
977 int tx_max_timer
= self
->aq_nic_cfg
->tx_itr
/ 2;
978 int tx_min_timer
= tx_max_timer
/ 2;
980 int rx_max_timer
= self
->aq_nic_cfg
->rx_itr
/ 2;
981 int rx_min_timer
= rx_max_timer
/ 2;
983 tx_max_timer
= min(HW_ATL_INTR_MODER_MAX
, tx_max_timer
);
984 tx_min_timer
= min(HW_ATL_INTR_MODER_MIN
, tx_min_timer
);
985 rx_max_timer
= min(HW_ATL_INTR_MODER_MAX
, rx_max_timer
);
986 rx_min_timer
= min(HW_ATL_INTR_MODER_MIN
, rx_min_timer
);
988 itr_tx
|= tx_min_timer
<< 0x8U
;
989 itr_tx
|= tx_max_timer
<< 0x10U
;
990 itr_rx
|= rx_min_timer
<< 0x8U
;
991 itr_rx
|= rx_max_timer
<< 0x10U
;
993 static unsigned int hw_atl_b0_timers_table_tx_
[][2] = {
994 {0xfU
, 0xffU
}, /* 10Gbit */
995 {0xfU
, 0x1ffU
}, /* 5Gbit */
996 {0xfU
, 0x1ffU
}, /* 5Gbit 5GS */
997 {0xfU
, 0x1ffU
}, /* 2.5Gbit */
998 {0xfU
, 0x1ffU
}, /* 1Gbit */
999 {0xfU
, 0x1ffU
}, /* 100Mbit */
1002 static unsigned int hw_atl_b0_timers_table_rx_
[][2] = {
1003 {0x6U
, 0x38U
},/* 10Gbit */
1004 {0xCU
, 0x70U
},/* 5Gbit */
1005 {0xCU
, 0x70U
},/* 5Gbit 5GS */
1006 {0x18U
, 0xE0U
},/* 2.5Gbit */
1007 {0x30U
, 0x80U
},/* 1Gbit */
1008 {0x4U
, 0x50U
},/* 100Mbit */
1011 unsigned int speed_index
=
1012 hw_atl_utils_mbps_2_speed_index(
1013 self
->aq_link_status
.mbps
);
1015 /* Update user visible ITR settings */
1016 self
->aq_nic_cfg
->tx_itr
= hw_atl_b0_timers_table_tx_
1017 [speed_index
][1] * 2;
1018 self
->aq_nic_cfg
->rx_itr
= hw_atl_b0_timers_table_rx_
1019 [speed_index
][1] * 2;
1021 itr_tx
|= hw_atl_b0_timers_table_tx_
1022 [speed_index
][0] << 0x8U
;
1023 itr_tx
|= hw_atl_b0_timers_table_tx_
1024 [speed_index
][1] << 0x10U
;
1026 itr_rx
|= hw_atl_b0_timers_table_rx_
1027 [speed_index
][0] << 0x8U
;
1028 itr_rx
|= hw_atl_b0_timers_table_rx_
1029 [speed_index
][1] << 0x10U
;
1032 case AQ_CFG_INTERRUPT_MODERATION_OFF
:
1033 hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self
, 1U);
1034 hw_atl_tdm_tdm_intr_moder_en_set(self
, 0U);
1035 hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self
, 1U);
1036 hw_atl_rdm_rdm_intr_moder_en_set(self
, 0U);
1042 for (i
= HW_ATL_B0_RINGS_MAX
; i
--;) {
1043 hw_atl_reg_tx_intr_moder_ctrl_set(self
, itr_tx
, i
);
1044 hw_atl_reg_rx_intr_moder_ctrl_set(self
, itr_rx
, i
);
1047 return aq_hw_err_from_flags(self
);
1050 static int hw_atl_b0_hw_stop(struct aq_hw_s
*self
)
1055 hw_atl_b0_hw_irq_disable(self
, HW_ATL_B0_INT_MASK
);
1057 /* Invalidate Descriptor Cache to prevent writing to the cached
1058 * descriptors and to the data pointer of those descriptors
1060 hw_atl_rdm_rx_dma_desc_cache_init_tgl(self
);
1062 err
= aq_hw_err_from_flags(self
);
1067 readx_poll_timeout_atomic(hw_atl_rdm_rx_dma_desc_cache_init_done_get
,
1068 self
, val
, val
== 1, 1000U, 10000U);
1074 static int hw_atl_b0_hw_ring_tx_stop(struct aq_hw_s
*self
,
1075 struct aq_ring_s
*ring
)
1077 hw_atl_tdm_tx_desc_en_set(self
, 0U, ring
->idx
);
1079 return aq_hw_err_from_flags(self
);
1082 static int hw_atl_b0_hw_ring_rx_stop(struct aq_hw_s
*self
,
1083 struct aq_ring_s
*ring
)
1085 hw_atl_rdm_rx_desc_en_set(self
, 0U, ring
->idx
);
1087 return aq_hw_err_from_flags(self
);
1090 static int hw_atl_b0_tx_tc_mode_get(struct aq_hw_s
*self
, u32
*tc_mode
)
1092 *tc_mode
= hw_atl_rpb_tps_tx_tc_mode_get(self
);
1093 return aq_hw_err_from_flags(self
);
1096 static int hw_atl_b0_rx_tc_mode_get(struct aq_hw_s
*self
, u32
*tc_mode
)
1098 *tc_mode
= hw_atl_rpb_rpf_rx_traf_class_mode_get(self
);
1099 return aq_hw_err_from_flags(self
);
1102 #define get_ptp_ts_val_u64(self, indx) \
1103 ((u64)(hw_atl_pcs_ptp_clock_get(self, indx) & 0xffff))
1105 static void hw_atl_b0_get_ptp_ts(struct aq_hw_s
*self
, u64
*stamp
)
1109 hw_atl_pcs_ptp_clock_read_enable(self
, 1);
1110 hw_atl_pcs_ptp_clock_read_enable(self
, 0);
1111 ns
= (get_ptp_ts_val_u64(self
, 0) +
1112 (get_ptp_ts_val_u64(self
, 1) << 16)) * NSEC_PER_SEC
+
1113 (get_ptp_ts_val_u64(self
, 3) +
1114 (get_ptp_ts_val_u64(self
, 4) << 16));
1116 *stamp
= ns
+ self
->ptp_clk_offset
;
1119 static void hw_atl_b0_adj_params_get(u64 freq
, s64 adj
, u32
*ns
, u32
*fns
)
1121 /* For accuracy, the digit is extended */
1122 s64 base_ns
= ((adj
+ NSEC_PER_SEC
) * NSEC_PER_SEC
);
1126 base_ns
= div64_s64(base_ns
, freq
);
1127 nsi
= div64_u64(base_ns
, NSEC_PER_SEC
);
1129 if (base_ns
!= nsi
* NSEC_PER_SEC
) {
1130 s64 divisor
= div64_s64((s64
)NSEC_PER_SEC
* NSEC_PER_SEC
,
1131 base_ns
- nsi
* NSEC_PER_SEC
);
1132 nsi_frac
= div64_s64(FRAC_PER_NS
* NSEC_PER_SEC
, divisor
);
1136 *fns
= (u32
)nsi_frac
;
1140 hw_atl_b0_mac_adj_param_calc(struct hw_fw_request_ptp_adj_freq
*ptp_adj_freq
,
1141 u64 phyfreq
, u64 macfreq
)
1144 s64 fns_in_sec_phy
= phyfreq
* (ptp_adj_freq
->fns_phy
+
1145 FRAC_PER_NS
* ptp_adj_freq
->ns_phy
);
1146 s64 fns_in_sec_mac
= macfreq
* (ptp_adj_freq
->fns_mac
+
1147 FRAC_PER_NS
* ptp_adj_freq
->ns_mac
);
1148 s64 fault_in_sec_phy
= FRAC_PER_NS
* NSEC_PER_SEC
- fns_in_sec_phy
;
1149 s64 fault_in_sec_mac
= FRAC_PER_NS
* NSEC_PER_SEC
- fns_in_sec_mac
;
1150 /* MAC MCP counter freq is macfreq / 4 */
1151 s64 diff_in_mcp_overflow
= (fault_in_sec_mac
- fault_in_sec_phy
) *
1154 diff_in_mcp_overflow
= div64_s64(diff_in_mcp_overflow
,
1155 AQ_HW_MAC_COUNTER_HZ
);
1156 adj_fns_val
= (ptp_adj_freq
->fns_mac
+ FRAC_PER_NS
*
1157 ptp_adj_freq
->ns_mac
) + diff_in_mcp_overflow
;
1159 ptp_adj_freq
->mac_ns_adj
= div64_s64(adj_fns_val
, FRAC_PER_NS
);
1160 ptp_adj_freq
->mac_fns_adj
= adj_fns_val
- ptp_adj_freq
->mac_ns_adj
*
1164 static int hw_atl_b0_adj_sys_clock(struct aq_hw_s
*self
, s64 delta
)
1166 self
->ptp_clk_offset
+= delta
;
1168 self
->aq_fw_ops
->adjust_ptp(self
, self
->ptp_clk_offset
);
1173 static int hw_atl_b0_set_sys_clock(struct aq_hw_s
*self
, u64 time
, u64 ts
)
1175 s64 delta
= time
- (self
->ptp_clk_offset
+ ts
);
1177 return hw_atl_b0_adj_sys_clock(self
, delta
);
1180 static int hw_atl_b0_ts_to_sys_clock(struct aq_hw_s
*self
, u64 ts
, u64
*time
)
1182 *time
= self
->ptp_clk_offset
+ ts
;
1186 static int hw_atl_b0_adj_clock_freq(struct aq_hw_s
*self
, s32 ppb
)
1188 struct hw_fw_request_iface fwreq
;
1191 memset(&fwreq
, 0, sizeof(fwreq
));
1193 fwreq
.msg_id
= HW_AQ_FW_REQUEST_PTP_ADJ_FREQ
;
1194 hw_atl_b0_adj_params_get(AQ_HW_MAC_COUNTER_HZ
, ppb
,
1195 &fwreq
.ptp_adj_freq
.ns_mac
,
1196 &fwreq
.ptp_adj_freq
.fns_mac
);
1197 hw_atl_b0_adj_params_get(AQ_HW_PHY_COUNTER_HZ
, ppb
,
1198 &fwreq
.ptp_adj_freq
.ns_phy
,
1199 &fwreq
.ptp_adj_freq
.fns_phy
);
1200 hw_atl_b0_mac_adj_param_calc(&fwreq
.ptp_adj_freq
,
1201 AQ_HW_PHY_COUNTER_HZ
,
1202 AQ_HW_MAC_COUNTER_HZ
);
1204 size
= sizeof(fwreq
.msg_id
) + sizeof(fwreq
.ptp_adj_freq
);
1205 return self
->aq_fw_ops
->send_fw_request(self
, &fwreq
, size
);
1208 static int hw_atl_b0_gpio_pulse(struct aq_hw_s
*self
, u32 index
,
1209 u64 start
, u32 period
)
1211 struct hw_fw_request_iface fwreq
;
1214 memset(&fwreq
, 0, sizeof(fwreq
));
1216 fwreq
.msg_id
= HW_AQ_FW_REQUEST_PTP_GPIO_CTRL
;
1217 fwreq
.ptp_gpio_ctrl
.index
= index
;
1218 fwreq
.ptp_gpio_ctrl
.period
= period
;
1219 /* Apply time offset */
1220 fwreq
.ptp_gpio_ctrl
.start
= start
;
1222 size
= sizeof(fwreq
.msg_id
) + sizeof(fwreq
.ptp_gpio_ctrl
);
1223 return self
->aq_fw_ops
->send_fw_request(self
, &fwreq
, size
);
1226 static int hw_atl_b0_extts_gpio_enable(struct aq_hw_s
*self
, u32 index
,
1229 /* Enable/disable Sync1588 GPIO Timestamping */
1230 aq_phy_write_reg(self
, MDIO_MMD_PCS
, 0xc611, enable
? 0x71 : 0);
1235 static int hw_atl_b0_get_sync_ts(struct aq_hw_s
*self
, u64
*ts
)
1245 /* PTP external GPIO clock seconds count 15:0 */
1246 sec_l
= aq_phy_read_reg(self
, MDIO_MMD_PCS
, 0xc914);
1247 /* PTP external GPIO clock seconds count 31:16 */
1248 sec_h
= aq_phy_read_reg(self
, MDIO_MMD_PCS
, 0xc915);
1249 /* PTP external GPIO clock nanoseconds count 15:0 */
1250 nsec_l
= aq_phy_read_reg(self
, MDIO_MMD_PCS
, 0xc916);
1251 /* PTP external GPIO clock nanoseconds count 31:16 */
1252 nsec_h
= aq_phy_read_reg(self
, MDIO_MMD_PCS
, 0xc917);
1254 *ts
= (nsec_h
<< 16) + nsec_l
+ ((sec_h
<< 16) + sec_l
) * NSEC_PER_SEC
;
1259 static u16
hw_atl_b0_rx_extract_ts(struct aq_hw_s
*self
, u8
*p
,
1260 unsigned int len
, u64
*timestamp
)
1262 unsigned int offset
= 14;
1268 if (len
<= offset
|| !timestamp
)
1271 /* The TIMESTAMP in the end of package has following format:
1276 * uint16_t stream_id;
1279 ptr
= p
+ (len
- offset
);
1280 memcpy(&sec
, ptr
, sizeof(sec
));
1282 memcpy(&ns
, ptr
, sizeof(ns
));
1284 *timestamp
= (be64_to_cpu(sec
) & 0xffffffffffffllu
) * NSEC_PER_SEC
+
1285 be32_to_cpu(ns
) + self
->ptp_clk_offset
;
1287 eth
= (struct ethhdr
*)p
;
1289 return (eth
->h_proto
== htons(ETH_P_1588
)) ? 12 : 14;
1292 static int hw_atl_b0_extract_hwts(struct aq_hw_s
*self
, u8
*p
, unsigned int len
,
1295 struct hw_atl_rxd_hwts_wb_s
*hwts_wb
= (struct hw_atl_rxd_hwts_wb_s
*)p
;
1299 tmp
= (hwts_wb
->sec_lw0
>> 2) & 0x3ff;
1301 tmp
= (u64
)((hwts_wb
->sec_lw1
>> 16) & 0xffff) << 10;
1303 tmp
= (u64
)(hwts_wb
->sec_hw
& 0xfff) << 26;
1305 tmp
= (u64
)((hwts_wb
->sec_hw
>> 22) & 0x3ff) << 38;
1307 ns
= sec
* NSEC_PER_SEC
+ hwts_wb
->ns
;
1309 *timestamp
= ns
+ self
->ptp_clk_offset
;
1313 static int hw_atl_b0_hw_fl3l4_clear(struct aq_hw_s
*self
,
1314 struct aq_rx_filter_l3l4
*data
)
1316 u8 location
= data
->location
;
1318 if (!data
->is_ipv6
) {
1319 hw_atl_rpfl3l4_cmd_clear(self
, location
);
1320 hw_atl_rpf_l4_spd_set(self
, 0U, location
);
1321 hw_atl_rpf_l4_dpd_set(self
, 0U, location
);
1322 hw_atl_rpfl3l4_ipv4_src_addr_clear(self
, location
);
1323 hw_atl_rpfl3l4_ipv4_dest_addr_clear(self
, location
);
1327 for (i
= 0; i
< HW_ATL_RX_CNT_REG_ADDR_IPV6
; ++i
) {
1328 hw_atl_rpfl3l4_cmd_clear(self
, location
+ i
);
1329 hw_atl_rpf_l4_spd_set(self
, 0U, location
+ i
);
1330 hw_atl_rpf_l4_dpd_set(self
, 0U, location
+ i
);
1332 hw_atl_rpfl3l4_ipv6_src_addr_clear(self
, location
);
1333 hw_atl_rpfl3l4_ipv6_dest_addr_clear(self
, location
);
1336 return aq_hw_err_from_flags(self
);
1339 static int hw_atl_b0_hw_fl3l4_set(struct aq_hw_s
*self
,
1340 struct aq_rx_filter_l3l4
*data
)
1342 u8 location
= data
->location
;
1344 hw_atl_b0_hw_fl3l4_clear(self
, data
);
1346 if (data
->cmd
& (HW_ATL_RX_ENABLE_CMP_DEST_ADDR_L3
|
1347 HW_ATL_RX_ENABLE_CMP_SRC_ADDR_L3
)) {
1348 if (!data
->is_ipv6
) {
1349 hw_atl_rpfl3l4_ipv4_dest_addr_set(self
,
1352 hw_atl_rpfl3l4_ipv4_src_addr_set(self
,
1356 hw_atl_rpfl3l4_ipv6_dest_addr_set(self
,
1359 hw_atl_rpfl3l4_ipv6_src_addr_set(self
,
1365 if (data
->cmd
& (HW_ATL_RX_ENABLE_CMP_DEST_PORT_L4
|
1366 HW_ATL_RX_ENABLE_CMP_SRC_PORT_L4
)) {
1367 hw_atl_rpf_l4_dpd_set(self
, data
->p_dst
, location
);
1368 hw_atl_rpf_l4_spd_set(self
, data
->p_src
, location
);
1371 hw_atl_rpfl3l4_cmd_set(self
, location
, data
->cmd
);
1373 return aq_hw_err_from_flags(self
);
1376 static int hw_atl_b0_hw_fl2_set(struct aq_hw_s
*self
,
1377 struct aq_rx_filter_l2
*data
)
1379 hw_atl_rpf_etht_flr_en_set(self
, 1U, data
->location
);
1380 hw_atl_rpf_etht_flr_set(self
, data
->ethertype
, data
->location
);
1381 hw_atl_rpf_etht_user_priority_en_set(self
,
1382 !!data
->user_priority_en
,
1384 if (data
->user_priority_en
)
1385 hw_atl_rpf_etht_user_priority_set(self
,
1386 data
->user_priority
,
1389 if (data
->queue
< 0) {
1390 hw_atl_rpf_etht_flr_act_set(self
, 0U, data
->location
);
1391 hw_atl_rpf_etht_rx_queue_en_set(self
, 0U, data
->location
);
1393 hw_atl_rpf_etht_flr_act_set(self
, 1U, data
->location
);
1394 hw_atl_rpf_etht_rx_queue_en_set(self
, 1U, data
->location
);
1395 hw_atl_rpf_etht_rx_queue_set(self
, data
->queue
, data
->location
);
1398 return aq_hw_err_from_flags(self
);
1401 static int hw_atl_b0_hw_fl2_clear(struct aq_hw_s
*self
,
1402 struct aq_rx_filter_l2
*data
)
1404 hw_atl_rpf_etht_flr_en_set(self
, 0U, data
->location
);
1405 hw_atl_rpf_etht_flr_set(self
, 0U, data
->location
);
1406 hw_atl_rpf_etht_user_priority_en_set(self
, 0U, data
->location
);
1408 return aq_hw_err_from_flags(self
);
1412 * @brief Set VLAN filter table
1413 * @details Configure VLAN filter table to accept (and assign the queue) traffic
1414 * for the particular vlan ids.
1415 * Note: use this function under vlan promisc mode not to lost the traffic
1418 * @param aq_rx_filter_vlan VLAN filter configuration
1419 * @return 0 - OK, <0 - error
1421 static int hw_atl_b0_hw_vlan_set(struct aq_hw_s
*self
,
1422 struct aq_rx_filter_vlan
*aq_vlans
)
1426 for (i
= 0; i
< AQ_VLAN_MAX_FILTERS
; i
++) {
1427 hw_atl_rpf_vlan_flr_en_set(self
, 0U, i
);
1428 hw_atl_rpf_vlan_rxq_en_flr_set(self
, 0U, i
);
1429 if (aq_vlans
[i
].enable
) {
1430 hw_atl_rpf_vlan_id_flr_set(self
,
1431 aq_vlans
[i
].vlan_id
,
1433 hw_atl_rpf_vlan_flr_act_set(self
, 1U, i
);
1434 hw_atl_rpf_vlan_flr_en_set(self
, 1U, i
);
1435 if (aq_vlans
[i
].queue
!= 0xFF) {
1436 hw_atl_rpf_vlan_rxq_flr_set(self
,
1439 hw_atl_rpf_vlan_rxq_en_flr_set(self
, 1U, i
);
1444 return aq_hw_err_from_flags(self
);
1447 static int hw_atl_b0_hw_vlan_ctrl(struct aq_hw_s
*self
, bool enable
)
1449 /* set promisc in case of disabing the vland filter */
1450 hw_atl_rpf_vlan_prom_mode_en_set(self
, !enable
);
1452 return aq_hw_err_from_flags(self
);
1455 static int hw_atl_b0_set_loopback(struct aq_hw_s
*self
, u32 mode
, bool enable
)
1458 case AQ_HW_LOOPBACK_DMA_SYS
:
1459 hw_atl_tpb_tx_dma_sys_lbk_en_set(self
, enable
);
1460 hw_atl_rpb_dma_sys_lbk_set(self
, enable
);
1462 case AQ_HW_LOOPBACK_PKT_SYS
:
1463 hw_atl_tpo_tx_pkt_sys_lbk_en_set(self
, enable
);
1464 hw_atl_rpf_tpo_to_rpf_sys_lbk_set(self
, enable
);
1466 case AQ_HW_LOOPBACK_DMA_NET
:
1467 hw_atl_rpf_vlan_prom_mode_en_set(self
, enable
);
1468 hw_atl_rpfl2promiscuous_mode_en_set(self
, enable
);
1469 hw_atl_tpb_tx_tx_clk_gate_en_set(self
, !enable
);
1470 hw_atl_tpb_tx_dma_net_lbk_en_set(self
, enable
);
1471 hw_atl_rpb_dma_net_lbk_set(self
, enable
);
1480 const struct aq_hw_ops hw_atl_ops_b0
= {
1481 .hw_set_mac_address
= hw_atl_b0_hw_mac_addr_set
,
1482 .hw_init
= hw_atl_b0_hw_init
,
1483 .hw_reset
= hw_atl_b0_hw_reset
,
1484 .hw_start
= hw_atl_b0_hw_start
,
1485 .hw_ring_tx_start
= hw_atl_b0_hw_ring_tx_start
,
1486 .hw_ring_tx_stop
= hw_atl_b0_hw_ring_tx_stop
,
1487 .hw_ring_rx_start
= hw_atl_b0_hw_ring_rx_start
,
1488 .hw_ring_rx_stop
= hw_atl_b0_hw_ring_rx_stop
,
1489 .hw_stop
= hw_atl_b0_hw_stop
,
1491 .hw_ring_tx_xmit
= hw_atl_b0_hw_ring_tx_xmit
,
1492 .hw_ring_tx_head_update
= hw_atl_b0_hw_ring_tx_head_update
,
1494 .hw_ring_rx_receive
= hw_atl_b0_hw_ring_rx_receive
,
1495 .hw_ring_rx_fill
= hw_atl_b0_hw_ring_rx_fill
,
1497 .hw_irq_enable
= hw_atl_b0_hw_irq_enable
,
1498 .hw_irq_disable
= hw_atl_b0_hw_irq_disable
,
1499 .hw_irq_read
= hw_atl_b0_hw_irq_read
,
1501 .hw_ring_rx_init
= hw_atl_b0_hw_ring_rx_init
,
1502 .hw_ring_tx_init
= hw_atl_b0_hw_ring_tx_init
,
1503 .hw_packet_filter_set
= hw_atl_b0_hw_packet_filter_set
,
1504 .hw_filter_l2_set
= hw_atl_b0_hw_fl2_set
,
1505 .hw_filter_l2_clear
= hw_atl_b0_hw_fl2_clear
,
1506 .hw_filter_l3l4_set
= hw_atl_b0_hw_fl3l4_set
,
1507 .hw_filter_vlan_set
= hw_atl_b0_hw_vlan_set
,
1508 .hw_filter_vlan_ctrl
= hw_atl_b0_hw_vlan_ctrl
,
1509 .hw_multicast_list_set
= hw_atl_b0_hw_multicast_list_set
,
1510 .hw_interrupt_moderation_set
= hw_atl_b0_hw_interrupt_moderation_set
,
1511 .hw_rss_set
= hw_atl_b0_hw_rss_set
,
1512 .hw_rss_hash_set
= hw_atl_b0_hw_rss_hash_set
,
1513 .hw_get_regs
= hw_atl_utils_hw_get_regs
,
1514 .hw_get_hw_stats
= hw_atl_utils_get_hw_stats
,
1515 .hw_get_fw_version
= hw_atl_utils_get_fw_version
,
1517 .hw_tx_tc_mode_get
= hw_atl_b0_tx_tc_mode_get
,
1518 .hw_rx_tc_mode_get
= hw_atl_b0_rx_tc_mode_get
,
1520 .hw_ring_hwts_rx_fill
= hw_atl_b0_hw_ring_hwts_rx_fill
,
1521 .hw_ring_hwts_rx_receive
= hw_atl_b0_hw_ring_hwts_rx_receive
,
1523 .hw_get_ptp_ts
= hw_atl_b0_get_ptp_ts
,
1524 .hw_adj_sys_clock
= hw_atl_b0_adj_sys_clock
,
1525 .hw_set_sys_clock
= hw_atl_b0_set_sys_clock
,
1526 .hw_ts_to_sys_clock
= hw_atl_b0_ts_to_sys_clock
,
1527 .hw_adj_clock_freq
= hw_atl_b0_adj_clock_freq
,
1528 .hw_gpio_pulse
= hw_atl_b0_gpio_pulse
,
1529 .hw_extts_gpio_enable
= hw_atl_b0_extts_gpio_enable
,
1530 .hw_get_sync_ts
= hw_atl_b0_get_sync_ts
,
1531 .rx_extract_ts
= hw_atl_b0_rx_extract_ts
,
1532 .extract_hwts
= hw_atl_b0_extract_hwts
,
1533 .hw_set_offload
= hw_atl_b0_hw_offload_set
,
1534 .hw_set_loopback
= hw_atl_b0_set_loopback
,
1535 .hw_set_fc
= hw_atl_b0_set_fc
,