2 * aQuantia Corporation Network Driver
3 * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
10 /* File hw_atl_a0.c: Definition of Atlantic hardware specific functions. */
13 #include "../aq_hw_utils.h"
14 #include "../aq_ring.h"
15 #include "hw_atl_a0.h"
16 #include "hw_atl_utils.h"
17 #include "hw_atl_llh.h"
18 #include "hw_atl_a0_internal.h"
20 static int hw_atl_a0_get_hw_caps(struct aq_hw_s
*self
,
21 struct aq_hw_caps_s
*aq_hw_caps
,
22 unsigned short device
,
23 unsigned short subsystem_device
)
25 memcpy(aq_hw_caps
, &hw_atl_a0_hw_caps_
, sizeof(*aq_hw_caps
));
27 if (device
== HW_ATL_DEVICE_ID_D108
&& subsystem_device
== 0x0001)
28 aq_hw_caps
->link_speed_msk
&= ~HW_ATL_A0_RATE_10G
;
30 if (device
== HW_ATL_DEVICE_ID_D109
&& subsystem_device
== 0x0001) {
31 aq_hw_caps
->link_speed_msk
&= ~HW_ATL_A0_RATE_10G
;
32 aq_hw_caps
->link_speed_msk
&= ~HW_ATL_A0_RATE_5G
;
38 static struct aq_hw_s
*hw_atl_a0_create(struct aq_pci_func_s
*aq_pci_func
,
40 struct aq_hw_ops
*ops
)
42 struct hw_atl_s
*self
= NULL
;
44 self
= kzalloc(sizeof(*self
), GFP_KERNEL
);
48 self
->base
.aq_pci_func
= aq_pci_func
;
50 self
->base
.not_ff_addr
= 0x10U
;
53 return (struct aq_hw_s
*)self
;
56 static void hw_atl_a0_destroy(struct aq_hw_s
*self
)
61 static int hw_atl_a0_hw_reset(struct aq_hw_s
*self
)
65 glb_glb_reg_res_dis_set(self
, 1U);
66 pci_pci_reg_res_dis_set(self
, 0U);
67 rx_rx_reg_res_dis_set(self
, 0U);
68 tx_tx_reg_res_dis_set(self
, 0U);
71 glb_soft_res_set(self
, 1);
73 /* check 10 times by 1ms */
74 AQ_HW_WAIT_FOR(glb_soft_res_get(self
) == 0, 1000U, 10U);
78 itr_irq_reg_res_dis_set(self
, 0U);
79 itr_res_irq_set(self
, 1U);
81 /* check 10 times by 1ms */
82 AQ_HW_WAIT_FOR(itr_res_irq_get(self
) == 0, 1000U, 10U);
86 hw_atl_utils_mpi_set(self
, MPI_RESET
, 0x0U
);
88 err
= aq_hw_err_from_flags(self
);
94 static int hw_atl_a0_hw_qos_set(struct aq_hw_s
*self
)
98 unsigned int i_priority
= 0U;
99 bool is_rx_flow_control
= false;
101 /* TPS Descriptor rate init */
102 tps_tx_pkt_shed_desc_rate_curr_time_res_set(self
, 0x0U
);
103 tps_tx_pkt_shed_desc_rate_lim_set(self
, 0xA);
106 tps_tx_pkt_shed_desc_vm_arb_mode_set(self
, 0U);
108 /* TPS TC credits init */
109 tps_tx_pkt_shed_desc_tc_arb_mode_set(self
, 0U);
110 tps_tx_pkt_shed_data_arb_mode_set(self
, 0U);
112 tps_tx_pkt_shed_tc_data_max_credit_set(self
, 0xFFF, 0U);
113 tps_tx_pkt_shed_tc_data_weight_set(self
, 0x64, 0U);
114 tps_tx_pkt_shed_desc_tc_max_credit_set(self
, 0x50, 0U);
115 tps_tx_pkt_shed_desc_tc_weight_set(self
, 0x1E, 0U);
118 buff_size
= HW_ATL_A0_TXBUF_MAX
;
120 tpb_tx_pkt_buff_size_per_tc_set(self
, buff_size
, tc
);
121 tpb_tx_buff_hi_threshold_per_tc_set(self
,
122 (buff_size
* (1024 / 32U) * 66U) /
124 tpb_tx_buff_lo_threshold_per_tc_set(self
,
125 (buff_size
* (1024 / 32U) * 50U) /
128 /* QoS Rx buf size per TC */
130 is_rx_flow_control
= (AQ_NIC_FC_RX
& self
->aq_nic_cfg
->flow_control
);
131 buff_size
= HW_ATL_A0_RXBUF_MAX
;
133 rpb_rx_pkt_buff_size_per_tc_set(self
, buff_size
, tc
);
134 rpb_rx_buff_hi_threshold_per_tc_set(self
,
136 (1024U / 32U) * 66U) /
138 rpb_rx_buff_lo_threshold_per_tc_set(self
,
140 (1024U / 32U) * 50U) /
142 rpb_rx_xoff_en_per_tc_set(self
, is_rx_flow_control
? 1U : 0U, tc
);
144 /* QoS 802.1p priority -> TC mapping */
145 for (i_priority
= 8U; i_priority
--;)
146 rpf_rpb_user_priority_tc_map_set(self
, i_priority
, 0U);
148 return aq_hw_err_from_flags(self
);
151 static int hw_atl_a0_hw_rss_hash_set(struct aq_hw_s
*self
,
152 struct aq_rss_parameters
*rss_params
)
154 struct aq_nic_cfg_s
*cfg
= NULL
;
157 unsigned int addr
= 0U;
159 cfg
= self
->aq_nic_cfg
;
161 for (i
= 10, addr
= 0U; i
--; ++addr
) {
162 u32 key_data
= cfg
->is_rss
?
163 __swab32(rss_params
->hash_secret_key
[i
]) : 0U;
164 rpf_rss_key_wr_data_set(self
, key_data
);
165 rpf_rss_key_addr_set(self
, addr
);
166 rpf_rss_key_wr_en_set(self
, 1U);
167 AQ_HW_WAIT_FOR(rpf_rss_key_wr_en_get(self
) == 0, 1000U, 10U);
172 err
= aq_hw_err_from_flags(self
);
178 static int hw_atl_a0_hw_rss_set(struct aq_hw_s
*self
,
179 struct aq_rss_parameters
*rss_params
)
181 u8
*indirection_table
= rss_params
->indirection_table
;
183 u32 num_rss_queues
= max(1U, self
->aq_nic_cfg
->num_rss_queues
);
185 u16 bitary
[(HW_ATL_A0_RSS_REDIRECTION_MAX
*
186 HW_ATL_A0_RSS_REDIRECTION_BITS
/ 16U)];
188 memset(bitary
, 0, sizeof(bitary
));
190 for (i
= HW_ATL_A0_RSS_REDIRECTION_MAX
; i
--; ) {
191 (*(u32
*)(bitary
+ ((i
* 3U) / 16U))) |=
192 ((indirection_table
[i
] % num_rss_queues
) <<
196 for (i
= AQ_DIMOF(bitary
); i
--;) {
197 rpf_rss_redir_tbl_wr_data_set(self
, bitary
[i
]);
198 rpf_rss_redir_tbl_addr_set(self
, i
);
199 rpf_rss_redir_wr_en_set(self
, 1U);
200 AQ_HW_WAIT_FOR(rpf_rss_redir_wr_en_get(self
) == 0, 1000U, 10U);
205 err
= aq_hw_err_from_flags(self
);
211 static int hw_atl_a0_hw_offload_set(struct aq_hw_s
*self
,
212 struct aq_nic_cfg_s
*aq_nic_cfg
)
214 /* TX checksums offloads*/
215 tpo_ipv4header_crc_offload_en_set(self
, 1);
216 tpo_tcp_udp_crc_offload_en_set(self
, 1);
218 /* RX checksums offloads*/
219 rpo_ipv4header_crc_offload_en_set(self
, 1);
220 rpo_tcp_udp_crc_offload_en_set(self
, 1);
223 tdm_large_send_offload_en_set(self
, 0xFFFFFFFFU
);
225 return aq_hw_err_from_flags(self
);
228 static int hw_atl_a0_hw_init_tx_path(struct aq_hw_s
*self
)
230 thm_lso_tcp_flag_of_first_pkt_set(self
, 0x0FF6U
);
231 thm_lso_tcp_flag_of_middle_pkt_set(self
, 0x0FF6U
);
232 thm_lso_tcp_flag_of_last_pkt_set(self
, 0x0F7FU
);
235 tdm_tx_desc_wr_wb_irq_en_set(self
, 1U);
238 aq_hw_write_reg(self
, 0x00007040U
, IS_CHIP_FEATURE(TPO2
) ?
239 0x00010000U
: 0x00000000U
);
240 tdm_tx_dca_en_set(self
, 0U);
241 tdm_tx_dca_mode_set(self
, 0U);
243 tpb_tx_path_scp_ins_en_set(self
, 1U);
245 return aq_hw_err_from_flags(self
);
248 static int hw_atl_a0_hw_init_rx_path(struct aq_hw_s
*self
)
250 struct aq_nic_cfg_s
*cfg
= self
->aq_nic_cfg
;
253 /* Rx TC/RSS number config */
254 rpb_rpf_rx_traf_class_mode_set(self
, 1U);
256 /* Rx flow control */
257 rpb_rx_flow_ctl_mode_set(self
, 1U);
259 /* RSS Ring selection */
260 reg_rx_flr_rss_control1set(self
, cfg
->is_rss
?
261 0xB3333333U
: 0x00000000U
);
263 /* Multicast filters */
264 for (i
= HW_ATL_A0_MAC_MAX
; i
--;) {
265 rpfl2_uc_flr_en_set(self
, (i
== 0U) ? 1U : 0U, i
);
266 rpfl2unicast_flr_act_set(self
, 1U, i
);
269 reg_rx_flr_mcst_flr_msk_set(self
, 0x00000000U
);
270 reg_rx_flr_mcst_flr_set(self
, 0x00010FFFU
, 0U);
273 rpf_vlan_outer_etht_set(self
, 0x88A8U
);
274 rpf_vlan_inner_etht_set(self
, 0x8100U
);
275 rpf_vlan_prom_mode_en_set(self
, 1);
278 rdm_rx_desc_wr_wb_irq_en_set(self
, 1U);
281 rpfl2broadcast_flr_act_set(self
, 1U);
282 rpfl2broadcast_count_threshold_set(self
, 0xFFFFU
& (~0U / 256U));
284 rdm_rx_dca_en_set(self
, 0U);
285 rdm_rx_dca_mode_set(self
, 0U);
287 return aq_hw_err_from_flags(self
);
290 static int hw_atl_a0_hw_mac_addr_set(struct aq_hw_s
*self
, u8
*mac_addr
)
300 h
= (mac_addr
[0] << 8) | (mac_addr
[1]);
301 l
= (mac_addr
[2] << 24) | (mac_addr
[3] << 16) |
302 (mac_addr
[4] << 8) | mac_addr
[5];
304 rpfl2_uc_flr_en_set(self
, 0U, HW_ATL_A0_MAC
);
305 rpfl2unicast_dest_addresslsw_set(self
, l
, HW_ATL_A0_MAC
);
306 rpfl2unicast_dest_addressmsw_set(self
, h
, HW_ATL_A0_MAC
);
307 rpfl2_uc_flr_en_set(self
, 1U, HW_ATL_A0_MAC
);
309 err
= aq_hw_err_from_flags(self
);
315 static int hw_atl_a0_hw_init(struct aq_hw_s
*self
,
316 struct aq_nic_cfg_s
*aq_nic_cfg
,
319 static u32 aq_hw_atl_igcr_table_
[4][2] = {
320 { 0x20000000U
, 0x20000000U
}, /* AQ_IRQ_INVALID */
321 { 0x20000080U
, 0x20000080U
}, /* AQ_IRQ_LEGACY */
322 { 0x20000021U
, 0x20000025U
}, /* AQ_IRQ_MSI */
323 { 0x20000022U
, 0x20000026U
} /* AQ_IRQ_MSIX */
328 self
->aq_nic_cfg
= aq_nic_cfg
;
330 hw_atl_utils_hw_chip_features_init(self
,
331 &PHAL_ATLANTIC_A0
->chip_features
);
333 hw_atl_a0_hw_init_tx_path(self
);
334 hw_atl_a0_hw_init_rx_path(self
);
336 hw_atl_a0_hw_mac_addr_set(self
, mac_addr
);
338 hw_atl_utils_mpi_set(self
, MPI_INIT
, aq_nic_cfg
->link_speed_msk
);
340 reg_tx_dma_debug_ctl_set(self
, 0x800000b8U
);
341 reg_tx_dma_debug_ctl_set(self
, 0x000000b8U
);
343 hw_atl_a0_hw_qos_set(self
);
344 hw_atl_a0_hw_rss_set(self
, &aq_nic_cfg
->aq_rss
);
345 hw_atl_a0_hw_rss_hash_set(self
, &aq_nic_cfg
->aq_rss
);
347 /* Reset link status and read out initial hardware counters */
348 self
->aq_link_status
.mbps
= 0;
349 hw_atl_utils_update_stats(self
);
351 err
= aq_hw_err_from_flags(self
);
356 reg_irq_glb_ctl_set(self
,
357 aq_hw_atl_igcr_table_
[aq_nic_cfg
->irq_type
]
358 [(aq_nic_cfg
->vecs
> 1U) ?
361 itr_irq_auto_masklsw_set(self
, aq_nic_cfg
->aq_hw_caps
->irq_mask
);
364 reg_gen_irq_map_set(self
,
365 ((HW_ATL_A0_ERR_INT
<< 0x18) | (1U << 0x1F)) |
366 ((HW_ATL_A0_ERR_INT
<< 0x10) | (1U << 0x17)) |
367 ((HW_ATL_A0_ERR_INT
<< 8) | (1U << 0xF)) |
368 ((HW_ATL_A0_ERR_INT
) | (1U << 0x7)), 0U);
370 hw_atl_a0_hw_offload_set(self
, aq_nic_cfg
);
376 static int hw_atl_a0_hw_ring_tx_start(struct aq_hw_s
*self
,
377 struct aq_ring_s
*ring
)
379 tdm_tx_desc_en_set(self
, 1, ring
->idx
);
380 return aq_hw_err_from_flags(self
);
383 static int hw_atl_a0_hw_ring_rx_start(struct aq_hw_s
*self
,
384 struct aq_ring_s
*ring
)
386 rdm_rx_desc_en_set(self
, 1, ring
->idx
);
387 return aq_hw_err_from_flags(self
);
390 static int hw_atl_a0_hw_start(struct aq_hw_s
*self
)
392 tpb_tx_buff_en_set(self
, 1);
393 rpb_rx_buff_en_set(self
, 1);
394 return aq_hw_err_from_flags(self
);
397 static int hw_atl_a0_hw_tx_ring_tail_update(struct aq_hw_s
*self
,
398 struct aq_ring_s
*ring
)
400 reg_tx_dma_desc_tail_ptr_set(self
, ring
->sw_tail
, ring
->idx
);
404 static int hw_atl_a0_hw_ring_tx_xmit(struct aq_hw_s
*self
,
405 struct aq_ring_s
*ring
,
408 struct aq_ring_buff_s
*buff
= NULL
;
409 struct hw_atl_txd_s
*txd
= NULL
;
410 unsigned int buff_pa_len
= 0U;
411 unsigned int pkt_len
= 0U;
412 unsigned int frag_count
= 0U;
415 buff
= &ring
->buff_ring
[ring
->sw_tail
];
416 pkt_len
= (buff
->is_eop
&& buff
->is_sop
) ? buff
->len
: buff
->len_pkt
;
418 for (frag_count
= 0; frag_count
< frags
; frag_count
++) {
419 txd
= (struct hw_atl_txd_s
*)&ring
->dx_ring
[ring
->sw_tail
*
425 buff
= &ring
->buff_ring
[ring
->sw_tail
];
428 txd
->ctl
|= (buff
->len_l3
<< 31) |
429 (buff
->len_l2
<< 24) |
430 HW_ATL_A0_TXD_CTL_CMD_TCP
|
431 HW_ATL_A0_TXD_CTL_DESC_TYPE_TXC
;
432 txd
->ctl2
|= (buff
->mss
<< 16) |
433 (buff
->len_l4
<< 8) |
436 pkt_len
-= (buff
->len_l4
+
442 txd
->ctl
|= HW_ATL_A0_TXD_CTL_CMD_IPV6
;
444 buff_pa_len
= buff
->len
;
446 txd
->buf_addr
= buff
->pa
;
447 txd
->ctl
|= (HW_ATL_A0_TXD_CTL_BLEN
&
448 ((u32
)buff_pa_len
<< 4));
449 txd
->ctl
|= HW_ATL_A0_TXD_CTL_DESC_TYPE_TXD
;
451 txd
->ctl2
|= HW_ATL_A0_TXD_CTL2_LEN
& (pkt_len
<< 14);
454 txd
->ctl
|= HW_ATL_A0_TXD_CTL_CMD_LSO
;
455 txd
->ctl2
|= HW_ATL_A0_TXD_CTL2_CTX_EN
;
458 /* Tx checksum offloads */
460 txd
->ctl
|= HW_ATL_A0_TXD_CTL_CMD_IPCSO
;
462 if (buff
->is_udp_cso
|| buff
->is_tcp_cso
)
463 txd
->ctl
|= HW_ATL_A0_TXD_CTL_CMD_TUCSO
;
465 if (unlikely(buff
->is_eop
)) {
466 txd
->ctl
|= HW_ATL_A0_TXD_CTL_EOP
;
467 txd
->ctl
|= HW_ATL_A0_TXD_CTL_CMD_WB
;
472 ring
->sw_tail
= aq_ring_next_dx(ring
, ring
->sw_tail
);
475 hw_atl_a0_hw_tx_ring_tail_update(self
, ring
);
476 return aq_hw_err_from_flags(self
);
479 static int hw_atl_a0_hw_ring_rx_init(struct aq_hw_s
*self
,
480 struct aq_ring_s
*aq_ring
,
481 struct aq_ring_param_s
*aq_ring_param
)
483 u32 dma_desc_addr_lsw
= (u32
)aq_ring
->dx_ring_pa
;
484 u32 dma_desc_addr_msw
= (u32
)(((u64
)aq_ring
->dx_ring_pa
) >> 32);
486 rdm_rx_desc_en_set(self
, false, aq_ring
->idx
);
488 rdm_rx_desc_head_splitting_set(self
, 0U, aq_ring
->idx
);
490 reg_rx_dma_desc_base_addresslswset(self
, dma_desc_addr_lsw
,
493 reg_rx_dma_desc_base_addressmswset(self
,
494 dma_desc_addr_msw
, aq_ring
->idx
);
496 rdm_rx_desc_len_set(self
, aq_ring
->size
/ 8U, aq_ring
->idx
);
498 rdm_rx_desc_data_buff_size_set(self
,
499 AQ_CFG_RX_FRAME_MAX
/ 1024U,
502 rdm_rx_desc_head_buff_size_set(self
, 0U, aq_ring
->idx
);
503 rdm_rx_desc_head_splitting_set(self
, 0U, aq_ring
->idx
);
504 rpo_rx_desc_vlan_stripping_set(self
, 0U, aq_ring
->idx
);
506 /* Rx ring set mode */
508 /* Mapping interrupt vector */
509 itr_irq_map_rx_set(self
, aq_ring_param
->vec_idx
, aq_ring
->idx
);
510 itr_irq_map_en_rx_set(self
, true, aq_ring
->idx
);
512 rdm_cpu_id_set(self
, aq_ring_param
->cpu
, aq_ring
->idx
);
513 rdm_rx_desc_dca_en_set(self
, 0U, aq_ring
->idx
);
514 rdm_rx_head_dca_en_set(self
, 0U, aq_ring
->idx
);
515 rdm_rx_pld_dca_en_set(self
, 0U, aq_ring
->idx
);
517 return aq_hw_err_from_flags(self
);
520 static int hw_atl_a0_hw_ring_tx_init(struct aq_hw_s
*self
,
521 struct aq_ring_s
*aq_ring
,
522 struct aq_ring_param_s
*aq_ring_param
)
524 u32 dma_desc_lsw_addr
= (u32
)aq_ring
->dx_ring_pa
;
525 u32 dma_desc_msw_addr
= (u32
)(((u64
)aq_ring
->dx_ring_pa
) >> 32);
527 reg_tx_dma_desc_base_addresslswset(self
, dma_desc_lsw_addr
,
530 reg_tx_dma_desc_base_addressmswset(self
, dma_desc_msw_addr
,
533 tdm_tx_desc_len_set(self
, aq_ring
->size
/ 8U, aq_ring
->idx
);
535 hw_atl_a0_hw_tx_ring_tail_update(self
, aq_ring
);
537 /* Set Tx threshold */
538 tdm_tx_desc_wr_wb_threshold_set(self
, 0U, aq_ring
->idx
);
540 /* Mapping interrupt vector */
541 itr_irq_map_tx_set(self
, aq_ring_param
->vec_idx
, aq_ring
->idx
);
542 itr_irq_map_en_tx_set(self
, true, aq_ring
->idx
);
544 tdm_cpu_id_set(self
, aq_ring_param
->cpu
, aq_ring
->idx
);
545 tdm_tx_desc_dca_en_set(self
, 0U, aq_ring
->idx
);
547 return aq_hw_err_from_flags(self
);
550 static int hw_atl_a0_hw_ring_rx_fill(struct aq_hw_s
*self
,
551 struct aq_ring_s
*ring
,
552 unsigned int sw_tail_old
)
554 for (; sw_tail_old
!= ring
->sw_tail
;
555 sw_tail_old
= aq_ring_next_dx(ring
, sw_tail_old
)) {
556 struct hw_atl_rxd_s
*rxd
=
557 (struct hw_atl_rxd_s
*)&ring
->dx_ring
[sw_tail_old
*
560 struct aq_ring_buff_s
*buff
= &ring
->buff_ring
[sw_tail_old
];
562 rxd
->buf_addr
= buff
->pa
;
566 reg_rx_dma_desc_tail_ptr_set(self
, sw_tail_old
, ring
->idx
);
568 return aq_hw_err_from_flags(self
);
571 static int hw_atl_a0_hw_ring_tx_head_update(struct aq_hw_s
*self
,
572 struct aq_ring_s
*ring
)
575 unsigned int hw_head_
= tdm_tx_desc_head_ptr_get(self
, ring
->idx
);
577 if (aq_utils_obj_test(&self
->header
.flags
, AQ_HW_FLAG_ERR_UNPLUG
)) {
581 ring
->hw_head
= hw_head_
;
582 err
= aq_hw_err_from_flags(self
);
588 static int hw_atl_a0_hw_ring_rx_receive(struct aq_hw_s
*self
,
589 struct aq_ring_s
*ring
)
591 struct device
*ndev
= aq_nic_get_dev(ring
->aq_nic
);
593 for (; ring
->hw_head
!= ring
->sw_tail
;
594 ring
->hw_head
= aq_ring_next_dx(ring
, ring
->hw_head
)) {
595 struct aq_ring_buff_s
*buff
= NULL
;
596 struct hw_atl_rxd_wb_s
*rxd_wb
= (struct hw_atl_rxd_wb_s
*)
597 &ring
->dx_ring
[ring
->hw_head
* HW_ATL_A0_RXD_SIZE
];
599 unsigned int is_err
= 1U;
600 unsigned int is_rx_check_sum_enabled
= 0U;
601 unsigned int pkt_type
= 0U;
603 if (!(rxd_wb
->status
& 0x5U
)) { /* RxD is not done */
605 reg_rx_dma_desc_status_get(self
, ring
->idx
)) {
606 rdm_rx_desc_en_set(self
, false, ring
->idx
);
607 rdm_rx_desc_res_set(self
, true, ring
->idx
);
608 rdm_rx_desc_res_set(self
, false, ring
->idx
);
609 rdm_rx_desc_en_set(self
, true, ring
->idx
);
613 (rdm_rx_desc_head_ptr_get(self
, ring
->idx
) < 2U)) {
615 } else if (!(rxd_wb
->status
& 0x1U
)) {
616 struct hw_atl_rxd_wb_s
*rxd_wb1
=
617 (struct hw_atl_rxd_wb_s
*)
618 (&ring
->dx_ring
[(1U) *
619 HW_ATL_A0_RXD_SIZE
]);
621 if ((rxd_wb1
->status
& 0x1U
)) {
622 rxd_wb
->pkt_len
= 1514U;
630 buff
= &ring
->buff_ring
[ring
->hw_head
];
632 if (0x3U
!= (rxd_wb
->status
& 0x3U
))
635 is_err
= (0x0000001CU
& rxd_wb
->status
);
636 is_rx_check_sum_enabled
= (rxd_wb
->type
) & (0x3U
<< 19);
637 pkt_type
= 0xFFU
& (rxd_wb
->type
>> 4);
639 if (is_rx_check_sum_enabled
) {
640 if (0x0U
== (pkt_type
& 0x3U
))
641 buff
->is_ip_cso
= (is_err
& 0x08U
) ? 0 : 1;
643 if (0x4U
== (pkt_type
& 0x1CU
))
644 buff
->is_udp_cso
= (is_err
& 0x10U
) ? 0 : 1;
645 else if (0x0U
== (pkt_type
& 0x1CU
))
646 buff
->is_tcp_cso
= (is_err
& 0x10U
) ? 0 : 1;
648 /* Checksum offload workaround for small packets */
649 if (rxd_wb
->pkt_len
<= 60) {
650 buff
->is_ip_cso
= 0U;
651 buff
->is_cso_err
= 0U;
658 dma_unmap_page(ndev
, buff
->pa
, buff
->len
, DMA_FROM_DEVICE
);
660 if (is_err
|| rxd_wb
->type
& 0x1000U
) {
661 /* status error or DMA error */
664 if (self
->aq_nic_cfg
->is_rss
) {
666 u16 rss_type
= rxd_wb
->type
& 0xFU
;
668 if (rss_type
&& rss_type
< 0x8U
) {
669 buff
->is_hash_l4
= (rss_type
== 0x4 ||
671 buff
->rss_hash
= rxd_wb
->rss_hash
;
675 if (HW_ATL_A0_RXD_WB_STAT2_EOP
& rxd_wb
->status
) {
676 buff
->len
= rxd_wb
->pkt_len
%
678 buff
->len
= buff
->len
?
679 buff
->len
: AQ_CFG_RX_FRAME_MAX
;
684 buff
->next
= aq_ring_next_dx(ring
,
686 ++ring
->stats
.rx
.jumbo_packets
;
691 return aq_hw_err_from_flags(self
);
694 static int hw_atl_a0_hw_irq_enable(struct aq_hw_s
*self
, u64 mask
)
696 itr_irq_msk_setlsw_set(self
, LODWORD(mask
) |
697 (1U << HW_ATL_A0_ERR_INT
));
698 return aq_hw_err_from_flags(self
);
701 static int hw_atl_a0_hw_irq_disable(struct aq_hw_s
*self
, u64 mask
)
703 itr_irq_msk_clearlsw_set(self
, LODWORD(mask
));
704 itr_irq_status_clearlsw_set(self
, LODWORD(mask
));
706 if ((1U << 16) & reg_gen_irq_status_get(self
))
708 atomic_inc(&PHAL_ATLANTIC_A0
->dpc
);
710 return aq_hw_err_from_flags(self
);
713 static int hw_atl_a0_hw_irq_read(struct aq_hw_s
*self
, u64
*mask
)
715 *mask
= itr_irq_statuslsw_get(self
);
716 return aq_hw_err_from_flags(self
);
719 #define IS_FILTER_ENABLED(_F_) ((packet_filter & (_F_)) ? 1U : 0U)
721 static int hw_atl_a0_hw_packet_filter_set(struct aq_hw_s
*self
,
722 unsigned int packet_filter
)
726 rpfl2promiscuous_mode_en_set(self
, IS_FILTER_ENABLED(IFF_PROMISC
));
727 rpfl2multicast_flr_en_set(self
, IS_FILTER_ENABLED(IFF_MULTICAST
), 0);
728 rpfl2broadcast_en_set(self
, IS_FILTER_ENABLED(IFF_BROADCAST
));
730 self
->aq_nic_cfg
->is_mc_list_enabled
=
731 IS_FILTER_ENABLED(IFF_MULTICAST
);
733 for (i
= HW_ATL_A0_MAC_MIN
; i
< HW_ATL_A0_MAC_MAX
; ++i
)
734 rpfl2_uc_flr_en_set(self
,
735 (self
->aq_nic_cfg
->is_mc_list_enabled
&&
736 (i
<= self
->aq_nic_cfg
->mc_list_count
)) ?
739 return aq_hw_err_from_flags(self
);
742 #undef IS_FILTER_ENABLED
744 static int hw_atl_a0_hw_multicast_list_set(struct aq_hw_s
*self
,
746 [AQ_CFG_MULTICAST_ADDRESS_MAX
]
752 if (count
> (HW_ATL_A0_MAC_MAX
- HW_ATL_A0_MAC_MIN
)) {
756 for (self
->aq_nic_cfg
->mc_list_count
= 0U;
757 self
->aq_nic_cfg
->mc_list_count
< count
;
758 ++self
->aq_nic_cfg
->mc_list_count
) {
759 u32 i
= self
->aq_nic_cfg
->mc_list_count
;
760 u32 h
= (ar_mac
[i
][0] << 8) | (ar_mac
[i
][1]);
761 u32 l
= (ar_mac
[i
][2] << 24) | (ar_mac
[i
][3] << 16) |
762 (ar_mac
[i
][4] << 8) | ar_mac
[i
][5];
764 rpfl2_uc_flr_en_set(self
, 0U, HW_ATL_A0_MAC_MIN
+ i
);
766 rpfl2unicast_dest_addresslsw_set(self
,
767 l
, HW_ATL_A0_MAC_MIN
+ i
);
769 rpfl2unicast_dest_addressmsw_set(self
,
770 h
, HW_ATL_A0_MAC_MIN
+ i
);
772 rpfl2_uc_flr_en_set(self
,
773 (self
->aq_nic_cfg
->is_mc_list_enabled
),
774 HW_ATL_A0_MAC_MIN
+ i
);
777 err
= aq_hw_err_from_flags(self
);
783 static int hw_atl_a0_hw_interrupt_moderation_set(struct aq_hw_s
*self
)
788 if (self
->aq_nic_cfg
->itr
) {
789 if (self
->aq_nic_cfg
->itr
!= AQ_CFG_INTERRUPT_MODERATION_AUTO
) {
790 u32 itr_
= (self
->aq_nic_cfg
->itr
>> 1);
792 itr_
= min(AQ_CFG_IRQ_MASK
, itr_
);
794 itr_rx
= 0x80000000U
| (itr_
<< 0x10);
796 u32 n
= 0xFFFFU
& aq_hw_read_reg(self
, 0x00002A00U
);
798 if (n
< self
->aq_link_status
.mbps
) {
801 static unsigned int hw_timers_tbl_
[] = {
804 0x039U
, /* 5Gbit 5GS */
805 0x073U
, /* 2.5Gbit */
807 0x1FFU
, /* 100Mbit */
810 unsigned int speed_index
=
811 hw_atl_utils_mbps_2_speed_index(
812 self
->aq_link_status
.mbps
);
814 itr_rx
= 0x80000000U
|
815 (hw_timers_tbl_
[speed_index
] << 0x10U
);
818 aq_hw_write_reg(self
, 0x00002A00U
, 0x40000000U
);
819 aq_hw_write_reg(self
, 0x00002A00U
, 0x8D000000U
);
825 for (i
= HW_ATL_A0_RINGS_MAX
; i
--;)
826 reg_irq_thr_set(self
, itr_rx
, i
);
828 return aq_hw_err_from_flags(self
);
831 static int hw_atl_a0_hw_stop(struct aq_hw_s
*self
)
833 hw_atl_a0_hw_irq_disable(self
, HW_ATL_A0_INT_MASK
);
834 return aq_hw_err_from_flags(self
);
837 static int hw_atl_a0_hw_ring_tx_stop(struct aq_hw_s
*self
,
838 struct aq_ring_s
*ring
)
840 tdm_tx_desc_en_set(self
, 0U, ring
->idx
);
841 return aq_hw_err_from_flags(self
);
844 static int hw_atl_a0_hw_ring_rx_stop(struct aq_hw_s
*self
,
845 struct aq_ring_s
*ring
)
847 rdm_rx_desc_en_set(self
, 0U, ring
->idx
);
848 return aq_hw_err_from_flags(self
);
851 static int hw_atl_a0_hw_set_speed(struct aq_hw_s
*self
, u32 speed
)
855 err
= hw_atl_utils_mpi_set_speed(self
, speed
, MPI_INIT
);
863 static struct aq_hw_ops hw_atl_ops_
= {
864 .create
= hw_atl_a0_create
,
865 .destroy
= hw_atl_a0_destroy
,
866 .get_hw_caps
= hw_atl_a0_get_hw_caps
,
868 .hw_get_mac_permanent
= hw_atl_utils_get_mac_permanent
,
869 .hw_set_mac_address
= hw_atl_a0_hw_mac_addr_set
,
870 .hw_get_link_status
= hw_atl_utils_mpi_get_link_status
,
871 .hw_set_link_speed
= hw_atl_a0_hw_set_speed
,
872 .hw_init
= hw_atl_a0_hw_init
,
873 .hw_deinit
= hw_atl_utils_hw_deinit
,
874 .hw_set_power
= hw_atl_utils_hw_set_power
,
875 .hw_reset
= hw_atl_a0_hw_reset
,
876 .hw_start
= hw_atl_a0_hw_start
,
877 .hw_ring_tx_start
= hw_atl_a0_hw_ring_tx_start
,
878 .hw_ring_tx_stop
= hw_atl_a0_hw_ring_tx_stop
,
879 .hw_ring_rx_start
= hw_atl_a0_hw_ring_rx_start
,
880 .hw_ring_rx_stop
= hw_atl_a0_hw_ring_rx_stop
,
881 .hw_stop
= hw_atl_a0_hw_stop
,
883 .hw_ring_tx_xmit
= hw_atl_a0_hw_ring_tx_xmit
,
884 .hw_ring_tx_head_update
= hw_atl_a0_hw_ring_tx_head_update
,
886 .hw_ring_rx_receive
= hw_atl_a0_hw_ring_rx_receive
,
887 .hw_ring_rx_fill
= hw_atl_a0_hw_ring_rx_fill
,
889 .hw_irq_enable
= hw_atl_a0_hw_irq_enable
,
890 .hw_irq_disable
= hw_atl_a0_hw_irq_disable
,
891 .hw_irq_read
= hw_atl_a0_hw_irq_read
,
893 .hw_ring_rx_init
= hw_atl_a0_hw_ring_rx_init
,
894 .hw_ring_tx_init
= hw_atl_a0_hw_ring_tx_init
,
895 .hw_packet_filter_set
= hw_atl_a0_hw_packet_filter_set
,
896 .hw_multicast_list_set
= hw_atl_a0_hw_multicast_list_set
,
897 .hw_interrupt_moderation_set
= hw_atl_a0_hw_interrupt_moderation_set
,
898 .hw_rss_set
= hw_atl_a0_hw_rss_set
,
899 .hw_rss_hash_set
= hw_atl_a0_hw_rss_hash_set
,
900 .hw_get_regs
= hw_atl_utils_hw_get_regs
,
901 .hw_update_stats
= hw_atl_utils_update_stats
,
902 .hw_get_hw_stats
= hw_atl_utils_get_hw_stats
,
903 .hw_get_fw_version
= hw_atl_utils_get_fw_version
,
906 struct aq_hw_ops
*hw_atl_a0_get_ops_by_id(struct pci_dev
*pdev
)
908 bool is_vid_ok
= (pdev
->vendor
== PCI_VENDOR_ID_AQUANTIA
);
909 bool is_did_ok
= ((pdev
->device
== HW_ATL_DEVICE_ID_0001
) ||
910 (pdev
->device
== HW_ATL_DEVICE_ID_D100
) ||
911 (pdev
->device
== HW_ATL_DEVICE_ID_D107
) ||
912 (pdev
->device
== HW_ATL_DEVICE_ID_D108
) ||
913 (pdev
->device
== HW_ATL_DEVICE_ID_D109
));
915 bool is_rev_ok
= (pdev
->revision
== 1U);
917 return (is_vid_ok
&& is_did_ok
&& is_rev_ok
) ? &hw_atl_ops_
: NULL
;