2 * aQuantia Corporation Network Driver
3 * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
10 /* File hw_atl_a0.c: Definition of Atlantic hardware specific functions. */
13 #include "../aq_hw_utils.h"
14 #include "../aq_ring.h"
15 #include "hw_atl_a0.h"
16 #include "hw_atl_utils.h"
17 #include "hw_atl_llh.h"
18 #include "hw_atl_a0_internal.h"
20 static int hw_atl_a0_get_hw_caps(struct aq_hw_s
*self
,
21 struct aq_hw_caps_s
*aq_hw_caps
)
23 memcpy(aq_hw_caps
, &hw_atl_a0_hw_caps_
, sizeof(*aq_hw_caps
));
27 static struct aq_hw_s
*hw_atl_a0_create(struct aq_pci_func_s
*aq_pci_func
,
29 struct aq_hw_ops
*ops
)
31 struct hw_atl_s
*self
= NULL
;
33 self
= kzalloc(sizeof(*self
), GFP_KERNEL
);
37 self
->base
.aq_pci_func
= aq_pci_func
;
39 self
->base
.not_ff_addr
= 0x10U
;
42 return (struct aq_hw_s
*)self
;
45 static void hw_atl_a0_destroy(struct aq_hw_s
*self
)
50 static int hw_atl_a0_hw_reset(struct aq_hw_s
*self
)
54 glb_glb_reg_res_dis_set(self
, 1U);
55 pci_pci_reg_res_dis_set(self
, 0U);
56 rx_rx_reg_res_dis_set(self
, 0U);
57 tx_tx_reg_res_dis_set(self
, 0U);
60 glb_soft_res_set(self
, 1);
62 /* check 10 times by 1ms */
63 AQ_HW_WAIT_FOR(glb_soft_res_get(self
) == 0, 1000U, 10U);
67 itr_irq_reg_res_dis_set(self
, 0U);
68 itr_res_irq_set(self
, 1U);
70 /* check 10 times by 1ms */
71 AQ_HW_WAIT_FOR(itr_res_irq_get(self
) == 0, 1000U, 10U);
75 hw_atl_utils_mpi_set(self
, MPI_RESET
, 0x0U
);
77 err
= aq_hw_err_from_flags(self
);
83 static int hw_atl_a0_hw_qos_set(struct aq_hw_s
*self
)
87 unsigned int i_priority
= 0U;
88 bool is_rx_flow_control
= false;
90 /* TPS Descriptor rate init */
91 tps_tx_pkt_shed_desc_rate_curr_time_res_set(self
, 0x0U
);
92 tps_tx_pkt_shed_desc_rate_lim_set(self
, 0xA);
95 tps_tx_pkt_shed_desc_vm_arb_mode_set(self
, 0U);
97 /* TPS TC credits init */
98 tps_tx_pkt_shed_desc_tc_arb_mode_set(self
, 0U);
99 tps_tx_pkt_shed_data_arb_mode_set(self
, 0U);
101 tps_tx_pkt_shed_tc_data_max_credit_set(self
, 0xFFF, 0U);
102 tps_tx_pkt_shed_tc_data_weight_set(self
, 0x64, 0U);
103 tps_tx_pkt_shed_desc_tc_max_credit_set(self
, 0x50, 0U);
104 tps_tx_pkt_shed_desc_tc_weight_set(self
, 0x1E, 0U);
107 buff_size
= HW_ATL_A0_TXBUF_MAX
;
109 tpb_tx_pkt_buff_size_per_tc_set(self
, buff_size
, tc
);
110 tpb_tx_buff_hi_threshold_per_tc_set(self
,
111 (buff_size
* (1024 / 32U) * 66U) /
113 tpb_tx_buff_lo_threshold_per_tc_set(self
,
114 (buff_size
* (1024 / 32U) * 50U) /
117 /* QoS Rx buf size per TC */
119 is_rx_flow_control
= (AQ_NIC_FC_RX
& self
->aq_nic_cfg
->flow_control
);
120 buff_size
= HW_ATL_A0_RXBUF_MAX
;
122 rpb_rx_pkt_buff_size_per_tc_set(self
, buff_size
, tc
);
123 rpb_rx_buff_hi_threshold_per_tc_set(self
,
125 (1024U / 32U) * 66U) /
127 rpb_rx_buff_lo_threshold_per_tc_set(self
,
129 (1024U / 32U) * 50U) /
131 rpb_rx_xoff_en_per_tc_set(self
, is_rx_flow_control
? 1U : 0U, tc
);
133 /* QoS 802.1p priority -> TC mapping */
134 for (i_priority
= 8U; i_priority
--;)
135 rpf_rpb_user_priority_tc_map_set(self
, i_priority
, 0U);
137 return aq_hw_err_from_flags(self
);
140 static int hw_atl_a0_hw_rss_hash_set(struct aq_hw_s
*self
,
141 struct aq_rss_parameters
*rss_params
)
143 struct aq_nic_cfg_s
*cfg
= NULL
;
146 unsigned int addr
= 0U;
148 cfg
= self
->aq_nic_cfg
;
150 for (i
= 10, addr
= 0U; i
--; ++addr
) {
151 u32 key_data
= cfg
->is_rss
?
152 __swab32(rss_params
->hash_secret_key
[i
]) : 0U;
153 rpf_rss_key_wr_data_set(self
, key_data
);
154 rpf_rss_key_addr_set(self
, addr
);
155 rpf_rss_key_wr_en_set(self
, 1U);
156 AQ_HW_WAIT_FOR(rpf_rss_key_wr_en_get(self
) == 0, 1000U, 10U);
161 err
= aq_hw_err_from_flags(self
);
167 static int hw_atl_a0_hw_rss_set(struct aq_hw_s
*self
,
168 struct aq_rss_parameters
*rss_params
)
170 u8
*indirection_table
= rss_params
->indirection_table
;
172 u32 num_rss_queues
= max(1U, self
->aq_nic_cfg
->num_rss_queues
);
174 u16 bitary
[(HW_ATL_A0_RSS_REDIRECTION_MAX
*
175 HW_ATL_A0_RSS_REDIRECTION_BITS
/ 16U)];
177 memset(bitary
, 0, sizeof(bitary
));
179 for (i
= HW_ATL_A0_RSS_REDIRECTION_MAX
; i
--; ) {
180 (*(u32
*)(bitary
+ ((i
* 3U) / 16U))) |=
181 ((indirection_table
[i
] % num_rss_queues
) <<
185 for (i
= AQ_DIMOF(bitary
); i
--;) {
186 rpf_rss_redir_tbl_wr_data_set(self
, bitary
[i
]);
187 rpf_rss_redir_tbl_addr_set(self
, i
);
188 rpf_rss_redir_wr_en_set(self
, 1U);
189 AQ_HW_WAIT_FOR(rpf_rss_redir_wr_en_get(self
) == 0, 1000U, 10U);
194 err
= aq_hw_err_from_flags(self
);
200 static int hw_atl_a0_hw_offload_set(struct aq_hw_s
*self
,
201 struct aq_nic_cfg_s
*aq_nic_cfg
)
203 /* TX checksums offloads*/
204 tpo_ipv4header_crc_offload_en_set(self
, 1);
205 tpo_tcp_udp_crc_offload_en_set(self
, 1);
207 /* RX checksums offloads*/
208 rpo_ipv4header_crc_offload_en_set(self
, 1);
209 rpo_tcp_udp_crc_offload_en_set(self
, 1);
212 tdm_large_send_offload_en_set(self
, 0xFFFFFFFFU
);
214 return aq_hw_err_from_flags(self
);
217 static int hw_atl_a0_hw_init_tx_path(struct aq_hw_s
*self
)
219 thm_lso_tcp_flag_of_first_pkt_set(self
, 0x0FF6U
);
220 thm_lso_tcp_flag_of_middle_pkt_set(self
, 0x0FF6U
);
221 thm_lso_tcp_flag_of_last_pkt_set(self
, 0x0F7FU
);
224 tdm_tx_desc_wr_wb_irq_en_set(self
, 1U);
227 aq_hw_write_reg(self
, 0x00007040U
, IS_CHIP_FEATURE(TPO2
) ?
228 0x00010000U
: 0x00000000U
);
229 tdm_tx_dca_en_set(self
, 0U);
230 tdm_tx_dca_mode_set(self
, 0U);
232 tpb_tx_path_scp_ins_en_set(self
, 1U);
234 return aq_hw_err_from_flags(self
);
237 static int hw_atl_a0_hw_init_rx_path(struct aq_hw_s
*self
)
239 struct aq_nic_cfg_s
*cfg
= self
->aq_nic_cfg
;
242 /* Rx TC/RSS number config */
243 rpb_rpf_rx_traf_class_mode_set(self
, 1U);
245 /* Rx flow control */
246 rpb_rx_flow_ctl_mode_set(self
, 1U);
248 /* RSS Ring selection */
249 reg_rx_flr_rss_control1set(self
, cfg
->is_rss
?
250 0xB3333333U
: 0x00000000U
);
252 /* Multicast filters */
253 for (i
= HW_ATL_A0_MAC_MAX
; i
--;) {
254 rpfl2_uc_flr_en_set(self
, (i
== 0U) ? 1U : 0U, i
);
255 rpfl2unicast_flr_act_set(self
, 1U, i
);
258 reg_rx_flr_mcst_flr_msk_set(self
, 0x00000000U
);
259 reg_rx_flr_mcst_flr_set(self
, 0x00010FFFU
, 0U);
262 rpf_vlan_outer_etht_set(self
, 0x88A8U
);
263 rpf_vlan_inner_etht_set(self
, 0x8100U
);
264 rpf_vlan_prom_mode_en_set(self
, 1);
267 rdm_rx_desc_wr_wb_irq_en_set(self
, 1U);
270 rpfl2broadcast_flr_act_set(self
, 1U);
271 rpfl2broadcast_count_threshold_set(self
, 0xFFFFU
& (~0U / 256U));
273 rdm_rx_dca_en_set(self
, 0U);
274 rdm_rx_dca_mode_set(self
, 0U);
276 return aq_hw_err_from_flags(self
);
279 static int hw_atl_a0_hw_mac_addr_set(struct aq_hw_s
*self
, u8
*mac_addr
)
289 h
= (mac_addr
[0] << 8) | (mac_addr
[1]);
290 l
= (mac_addr
[2] << 24) | (mac_addr
[3] << 16) |
291 (mac_addr
[4] << 8) | mac_addr
[5];
293 rpfl2_uc_flr_en_set(self
, 0U, HW_ATL_A0_MAC
);
294 rpfl2unicast_dest_addresslsw_set(self
, l
, HW_ATL_A0_MAC
);
295 rpfl2unicast_dest_addressmsw_set(self
, h
, HW_ATL_A0_MAC
);
296 rpfl2_uc_flr_en_set(self
, 1U, HW_ATL_A0_MAC
);
298 err
= aq_hw_err_from_flags(self
);
304 static int hw_atl_a0_hw_init(struct aq_hw_s
*self
,
305 struct aq_nic_cfg_s
*aq_nic_cfg
,
308 static u32 aq_hw_atl_igcr_table_
[4][2] = {
309 { 0x20000000U
, 0x20000000U
}, /* AQ_IRQ_INVALID */
310 { 0x20000080U
, 0x20000080U
}, /* AQ_IRQ_LEGACY */
311 { 0x20000021U
, 0x20000025U
}, /* AQ_IRQ_MSI */
312 { 0x20000022U
, 0x20000026U
} /* AQ_IRQ_MSIX */
317 self
->aq_nic_cfg
= aq_nic_cfg
;
319 hw_atl_utils_hw_chip_features_init(self
,
320 &PHAL_ATLANTIC_A0
->chip_features
);
322 hw_atl_a0_hw_init_tx_path(self
);
323 hw_atl_a0_hw_init_rx_path(self
);
325 hw_atl_a0_hw_mac_addr_set(self
, mac_addr
);
327 hw_atl_utils_mpi_set(self
, MPI_INIT
, aq_nic_cfg
->link_speed_msk
);
329 reg_tx_dma_debug_ctl_set(self
, 0x800000b8U
);
330 reg_tx_dma_debug_ctl_set(self
, 0x000000b8U
);
332 hw_atl_a0_hw_qos_set(self
);
333 hw_atl_a0_hw_rss_set(self
, &aq_nic_cfg
->aq_rss
);
334 hw_atl_a0_hw_rss_hash_set(self
, &aq_nic_cfg
->aq_rss
);
336 err
= aq_hw_err_from_flags(self
);
341 reg_irq_glb_ctl_set(self
,
342 aq_hw_atl_igcr_table_
[aq_nic_cfg
->irq_type
]
343 [(aq_nic_cfg
->vecs
> 1U) ?
346 itr_irq_auto_masklsw_set(self
, aq_nic_cfg
->aq_hw_caps
->irq_mask
);
349 reg_gen_irq_map_set(self
,
350 ((HW_ATL_A0_ERR_INT
<< 0x18) | (1U << 0x1F)) |
351 ((HW_ATL_A0_ERR_INT
<< 0x10) | (1U << 0x17)) |
352 ((HW_ATL_A0_ERR_INT
<< 8) | (1U << 0xF)) |
353 ((HW_ATL_A0_ERR_INT
) | (1U << 0x7)), 0U);
355 hw_atl_a0_hw_offload_set(self
, aq_nic_cfg
);
361 static int hw_atl_a0_hw_ring_tx_start(struct aq_hw_s
*self
,
362 struct aq_ring_s
*ring
)
364 tdm_tx_desc_en_set(self
, 1, ring
->idx
);
365 return aq_hw_err_from_flags(self
);
368 static int hw_atl_a0_hw_ring_rx_start(struct aq_hw_s
*self
,
369 struct aq_ring_s
*ring
)
371 rdm_rx_desc_en_set(self
, 1, ring
->idx
);
372 return aq_hw_err_from_flags(self
);
375 static int hw_atl_a0_hw_start(struct aq_hw_s
*self
)
377 tpb_tx_buff_en_set(self
, 1);
378 rpb_rx_buff_en_set(self
, 1);
379 return aq_hw_err_from_flags(self
);
382 static int hw_atl_a0_hw_tx_ring_tail_update(struct aq_hw_s
*self
,
383 struct aq_ring_s
*ring
)
385 reg_tx_dma_desc_tail_ptr_set(self
, ring
->sw_tail
, ring
->idx
);
389 static int hw_atl_a0_hw_ring_tx_xmit(struct aq_hw_s
*self
,
390 struct aq_ring_s
*ring
,
393 struct aq_ring_buff_s
*buff
= NULL
;
394 struct hw_atl_txd_s
*txd
= NULL
;
395 unsigned int buff_pa_len
= 0U;
396 unsigned int pkt_len
= 0U;
397 unsigned int frag_count
= 0U;
400 buff
= &ring
->buff_ring
[ring
->sw_tail
];
401 pkt_len
= (buff
->is_eop
&& buff
->is_sop
) ? buff
->len
: buff
->len_pkt
;
403 for (frag_count
= 0; frag_count
< frags
; frag_count
++) {
404 txd
= (struct hw_atl_txd_s
*)&ring
->dx_ring
[ring
->sw_tail
*
410 buff
= &ring
->buff_ring
[ring
->sw_tail
];
413 txd
->ctl
|= (buff
->len_l3
<< 31) |
414 (buff
->len_l2
<< 24) |
415 HW_ATL_A0_TXD_CTL_CMD_TCP
|
416 HW_ATL_A0_TXD_CTL_DESC_TYPE_TXC
;
417 txd
->ctl2
|= (buff
->mss
<< 16) |
418 (buff
->len_l4
<< 8) |
421 pkt_len
-= (buff
->len_l4
+
427 txd
->ctl
|= HW_ATL_A0_TXD_CTL_CMD_IPV6
;
429 buff_pa_len
= buff
->len
;
431 txd
->buf_addr
= buff
->pa
;
432 txd
->ctl
|= (HW_ATL_A0_TXD_CTL_BLEN
&
433 ((u32
)buff_pa_len
<< 4));
434 txd
->ctl
|= HW_ATL_A0_TXD_CTL_DESC_TYPE_TXD
;
436 txd
->ctl2
|= HW_ATL_A0_TXD_CTL2_LEN
& (pkt_len
<< 14);
439 txd
->ctl
|= HW_ATL_A0_TXD_CTL_CMD_LSO
;
440 txd
->ctl2
|= HW_ATL_A0_TXD_CTL2_CTX_EN
;
443 /* Tx checksum offloads */
445 txd
->ctl
|= HW_ATL_A0_TXD_CTL_CMD_IPCSO
;
447 if (buff
->is_udp_cso
|| buff
->is_tcp_cso
)
448 txd
->ctl
|= HW_ATL_A0_TXD_CTL_CMD_TUCSO
;
450 if (unlikely(buff
->is_eop
)) {
451 txd
->ctl
|= HW_ATL_A0_TXD_CTL_EOP
;
452 txd
->ctl
|= HW_ATL_A0_TXD_CTL_CMD_WB
;
457 ring
->sw_tail
= aq_ring_next_dx(ring
, ring
->sw_tail
);
460 hw_atl_a0_hw_tx_ring_tail_update(self
, ring
);
461 return aq_hw_err_from_flags(self
);
464 static int hw_atl_a0_hw_ring_rx_init(struct aq_hw_s
*self
,
465 struct aq_ring_s
*aq_ring
,
466 struct aq_ring_param_s
*aq_ring_param
)
468 u32 dma_desc_addr_lsw
= (u32
)aq_ring
->dx_ring_pa
;
469 u32 dma_desc_addr_msw
= (u32
)(((u64
)aq_ring
->dx_ring_pa
) >> 32);
471 rdm_rx_desc_en_set(self
, false, aq_ring
->idx
);
473 rdm_rx_desc_head_splitting_set(self
, 0U, aq_ring
->idx
);
475 reg_rx_dma_desc_base_addresslswset(self
, dma_desc_addr_lsw
,
478 reg_rx_dma_desc_base_addressmswset(self
,
479 dma_desc_addr_msw
, aq_ring
->idx
);
481 rdm_rx_desc_len_set(self
, aq_ring
->size
/ 8U, aq_ring
->idx
);
483 rdm_rx_desc_data_buff_size_set(self
,
484 AQ_CFG_RX_FRAME_MAX
/ 1024U,
487 rdm_rx_desc_head_buff_size_set(self
, 0U, aq_ring
->idx
);
488 rdm_rx_desc_head_splitting_set(self
, 0U, aq_ring
->idx
);
489 rpo_rx_desc_vlan_stripping_set(self
, 0U, aq_ring
->idx
);
491 /* Rx ring set mode */
493 /* Mapping interrupt vector */
494 itr_irq_map_rx_set(self
, aq_ring_param
->vec_idx
, aq_ring
->idx
);
495 itr_irq_map_en_rx_set(self
, true, aq_ring
->idx
);
497 rdm_cpu_id_set(self
, aq_ring_param
->cpu
, aq_ring
->idx
);
498 rdm_rx_desc_dca_en_set(self
, 0U, aq_ring
->idx
);
499 rdm_rx_head_dca_en_set(self
, 0U, aq_ring
->idx
);
500 rdm_rx_pld_dca_en_set(self
, 0U, aq_ring
->idx
);
502 return aq_hw_err_from_flags(self
);
505 static int hw_atl_a0_hw_ring_tx_init(struct aq_hw_s
*self
,
506 struct aq_ring_s
*aq_ring
,
507 struct aq_ring_param_s
*aq_ring_param
)
509 u32 dma_desc_lsw_addr
= (u32
)aq_ring
->dx_ring_pa
;
510 u32 dma_desc_msw_addr
= (u32
)(((u64
)aq_ring
->dx_ring_pa
) >> 32);
512 reg_tx_dma_desc_base_addresslswset(self
, dma_desc_lsw_addr
,
515 reg_tx_dma_desc_base_addressmswset(self
, dma_desc_msw_addr
,
518 tdm_tx_desc_len_set(self
, aq_ring
->size
/ 8U, aq_ring
->idx
);
520 hw_atl_a0_hw_tx_ring_tail_update(self
, aq_ring
);
522 /* Set Tx threshold */
523 tdm_tx_desc_wr_wb_threshold_set(self
, 0U, aq_ring
->idx
);
525 /* Mapping interrupt vector */
526 itr_irq_map_tx_set(self
, aq_ring_param
->vec_idx
, aq_ring
->idx
);
527 itr_irq_map_en_tx_set(self
, true, aq_ring
->idx
);
529 tdm_cpu_id_set(self
, aq_ring_param
->cpu
, aq_ring
->idx
);
530 tdm_tx_desc_dca_en_set(self
, 0U, aq_ring
->idx
);
532 return aq_hw_err_from_flags(self
);
535 static int hw_atl_a0_hw_ring_rx_fill(struct aq_hw_s
*self
,
536 struct aq_ring_s
*ring
,
537 unsigned int sw_tail_old
)
539 for (; sw_tail_old
!= ring
->sw_tail
;
540 sw_tail_old
= aq_ring_next_dx(ring
, sw_tail_old
)) {
541 struct hw_atl_rxd_s
*rxd
=
542 (struct hw_atl_rxd_s
*)&ring
->dx_ring
[sw_tail_old
*
545 struct aq_ring_buff_s
*buff
= &ring
->buff_ring
[sw_tail_old
];
547 rxd
->buf_addr
= buff
->pa
;
551 reg_rx_dma_desc_tail_ptr_set(self
, sw_tail_old
, ring
->idx
);
553 return aq_hw_err_from_flags(self
);
556 static int hw_atl_a0_hw_ring_tx_head_update(struct aq_hw_s
*self
,
557 struct aq_ring_s
*ring
)
560 unsigned int hw_head_
= tdm_tx_desc_head_ptr_get(self
, ring
->idx
);
562 if (aq_utils_obj_test(&self
->header
.flags
, AQ_HW_FLAG_ERR_UNPLUG
)) {
566 ring
->hw_head
= hw_head_
;
567 err
= aq_hw_err_from_flags(self
);
573 static int hw_atl_a0_hw_ring_rx_receive(struct aq_hw_s
*self
,
574 struct aq_ring_s
*ring
)
576 struct device
*ndev
= aq_nic_get_dev(ring
->aq_nic
);
578 for (; ring
->hw_head
!= ring
->sw_tail
;
579 ring
->hw_head
= aq_ring_next_dx(ring
, ring
->hw_head
)) {
580 struct aq_ring_buff_s
*buff
= NULL
;
581 struct hw_atl_rxd_wb_s
*rxd_wb
= (struct hw_atl_rxd_wb_s
*)
582 &ring
->dx_ring
[ring
->hw_head
* HW_ATL_A0_RXD_SIZE
];
584 unsigned int is_err
= 1U;
585 unsigned int is_rx_check_sum_enabled
= 0U;
586 unsigned int pkt_type
= 0U;
588 if (!(rxd_wb
->status
& 0x5U
)) { /* RxD is not done */
590 reg_rx_dma_desc_status_get(self
, ring
->idx
)) {
591 rdm_rx_desc_en_set(self
, false, ring
->idx
);
592 rdm_rx_desc_res_set(self
, true, ring
->idx
);
593 rdm_rx_desc_res_set(self
, false, ring
->idx
);
594 rdm_rx_desc_en_set(self
, true, ring
->idx
);
598 (rdm_rx_desc_head_ptr_get(self
, ring
->idx
) < 2U)) {
600 } else if (!(rxd_wb
->status
& 0x1U
)) {
601 struct hw_atl_rxd_wb_s
*rxd_wb1
=
602 (struct hw_atl_rxd_wb_s
*)
603 (&ring
->dx_ring
[(1U) *
604 HW_ATL_A0_RXD_SIZE
]);
606 if ((rxd_wb1
->status
& 0x1U
)) {
607 rxd_wb
->pkt_len
= 1514U;
615 buff
= &ring
->buff_ring
[ring
->hw_head
];
617 if (0x3U
!= (rxd_wb
->status
& 0x3U
))
620 is_err
= (0x0000001CU
& rxd_wb
->status
);
621 is_rx_check_sum_enabled
= (rxd_wb
->type
) & (0x3U
<< 19);
622 pkt_type
= 0xFFU
& (rxd_wb
->type
>> 4);
624 if (is_rx_check_sum_enabled
) {
625 if (0x0U
== (pkt_type
& 0x3U
))
626 buff
->is_ip_cso
= (is_err
& 0x08U
) ? 0 : 1;
628 if (0x4U
== (pkt_type
& 0x1CU
))
629 buff
->is_udp_cso
= (is_err
& 0x10U
) ? 0 : 1;
630 else if (0x0U
== (pkt_type
& 0x1CU
))
631 buff
->is_tcp_cso
= (is_err
& 0x10U
) ? 0 : 1;
633 /* Checksum offload workaround for small packets */
634 if (rxd_wb
->pkt_len
<= 60) {
635 buff
->is_ip_cso
= 0U;
636 buff
->is_cso_err
= 0U;
643 dma_unmap_page(ndev
, buff
->pa
, buff
->len
, DMA_FROM_DEVICE
);
645 if (is_err
|| rxd_wb
->type
& 0x1000U
) {
646 /* status error or DMA error */
649 if (self
->aq_nic_cfg
->is_rss
) {
651 u16 rss_type
= rxd_wb
->type
& 0xFU
;
653 if (rss_type
&& rss_type
< 0x8U
) {
654 buff
->is_hash_l4
= (rss_type
== 0x4 ||
656 buff
->rss_hash
= rxd_wb
->rss_hash
;
660 if (HW_ATL_A0_RXD_WB_STAT2_EOP
& rxd_wb
->status
) {
661 buff
->len
= rxd_wb
->pkt_len
%
663 buff
->len
= buff
->len
?
664 buff
->len
: AQ_CFG_RX_FRAME_MAX
;
669 buff
->next
= aq_ring_next_dx(ring
,
671 ++ring
->stats
.rx
.jumbo_packets
;
676 return aq_hw_err_from_flags(self
);
679 static int hw_atl_a0_hw_irq_enable(struct aq_hw_s
*self
, u64 mask
)
681 itr_irq_msk_setlsw_set(self
, LODWORD(mask
) |
682 (1U << HW_ATL_A0_ERR_INT
));
683 return aq_hw_err_from_flags(self
);
686 static int hw_atl_a0_hw_irq_disable(struct aq_hw_s
*self
, u64 mask
)
688 itr_irq_msk_clearlsw_set(self
, LODWORD(mask
));
689 itr_irq_status_clearlsw_set(self
, LODWORD(mask
));
691 if ((1U << 16) & reg_gen_irq_status_get(self
))
693 atomic_inc(&PHAL_ATLANTIC_A0
->dpc
);
695 return aq_hw_err_from_flags(self
);
698 static int hw_atl_a0_hw_irq_read(struct aq_hw_s
*self
, u64
*mask
)
700 *mask
= itr_irq_statuslsw_get(self
);
701 return aq_hw_err_from_flags(self
);
704 #define IS_FILTER_ENABLED(_F_) ((packet_filter & (_F_)) ? 1U : 0U)
706 static int hw_atl_a0_hw_packet_filter_set(struct aq_hw_s
*self
,
707 unsigned int packet_filter
)
711 rpfl2promiscuous_mode_en_set(self
, IS_FILTER_ENABLED(IFF_PROMISC
));
712 rpfl2multicast_flr_en_set(self
, IS_FILTER_ENABLED(IFF_MULTICAST
), 0);
713 rpfl2broadcast_en_set(self
, IS_FILTER_ENABLED(IFF_BROADCAST
));
715 self
->aq_nic_cfg
->is_mc_list_enabled
=
716 IS_FILTER_ENABLED(IFF_MULTICAST
);
718 for (i
= HW_ATL_A0_MAC_MIN
; i
< HW_ATL_A0_MAC_MAX
; ++i
)
719 rpfl2_uc_flr_en_set(self
,
720 (self
->aq_nic_cfg
->is_mc_list_enabled
&&
721 (i
<= self
->aq_nic_cfg
->mc_list_count
)) ?
724 return aq_hw_err_from_flags(self
);
727 #undef IS_FILTER_ENABLED
729 static int hw_atl_a0_hw_multicast_list_set(struct aq_hw_s
*self
,
731 [AQ_CFG_MULTICAST_ADDRESS_MAX
]
737 if (count
> (HW_ATL_A0_MAC_MAX
- HW_ATL_A0_MAC_MIN
)) {
741 for (self
->aq_nic_cfg
->mc_list_count
= 0U;
742 self
->aq_nic_cfg
->mc_list_count
< count
;
743 ++self
->aq_nic_cfg
->mc_list_count
) {
744 u32 i
= self
->aq_nic_cfg
->mc_list_count
;
745 u32 h
= (ar_mac
[i
][0] << 8) | (ar_mac
[i
][1]);
746 u32 l
= (ar_mac
[i
][2] << 24) | (ar_mac
[i
][3] << 16) |
747 (ar_mac
[i
][4] << 8) | ar_mac
[i
][5];
749 rpfl2_uc_flr_en_set(self
, 0U, HW_ATL_A0_MAC_MIN
+ i
);
751 rpfl2unicast_dest_addresslsw_set(self
,
752 l
, HW_ATL_A0_MAC_MIN
+ i
);
754 rpfl2unicast_dest_addressmsw_set(self
,
755 h
, HW_ATL_A0_MAC_MIN
+ i
);
757 rpfl2_uc_flr_en_set(self
,
758 (self
->aq_nic_cfg
->is_mc_list_enabled
),
759 HW_ATL_A0_MAC_MIN
+ i
);
762 err
= aq_hw_err_from_flags(self
);
768 static int hw_atl_a0_hw_interrupt_moderation_set(struct aq_hw_s
*self
)
773 if (self
->aq_nic_cfg
->itr
) {
774 if (self
->aq_nic_cfg
->itr
!= AQ_CFG_INTERRUPT_MODERATION_AUTO
) {
775 u32 itr_
= (self
->aq_nic_cfg
->itr
>> 1);
777 itr_
= min(AQ_CFG_IRQ_MASK
, itr_
);
779 itr_rx
= 0x80000000U
| (itr_
<< 0x10);
781 u32 n
= 0xFFFFU
& aq_hw_read_reg(self
, 0x00002A00U
);
783 if (n
< self
->aq_link_status
.mbps
) {
786 static unsigned int hw_timers_tbl_
[] = {
789 0x039U
, /* 5Gbit 5GS */
790 0x073U
, /* 2.5Gbit */
792 0x1FFU
, /* 100Mbit */
795 unsigned int speed_index
=
796 hw_atl_utils_mbps_2_speed_index(
797 self
->aq_link_status
.mbps
);
799 itr_rx
= 0x80000000U
|
800 (hw_timers_tbl_
[speed_index
] << 0x10U
);
803 aq_hw_write_reg(self
, 0x00002A00U
, 0x40000000U
);
804 aq_hw_write_reg(self
, 0x00002A00U
, 0x8D000000U
);
810 for (i
= HW_ATL_A0_RINGS_MAX
; i
--;)
811 reg_irq_thr_set(self
, itr_rx
, i
);
813 return aq_hw_err_from_flags(self
);
816 static int hw_atl_a0_hw_stop(struct aq_hw_s
*self
)
818 hw_atl_a0_hw_irq_disable(self
, HW_ATL_A0_INT_MASK
);
819 return aq_hw_err_from_flags(self
);
822 static int hw_atl_a0_hw_ring_tx_stop(struct aq_hw_s
*self
,
823 struct aq_ring_s
*ring
)
825 tdm_tx_desc_en_set(self
, 0U, ring
->idx
);
826 return aq_hw_err_from_flags(self
);
829 static int hw_atl_a0_hw_ring_rx_stop(struct aq_hw_s
*self
,
830 struct aq_ring_s
*ring
)
832 rdm_rx_desc_en_set(self
, 0U, ring
->idx
);
833 return aq_hw_err_from_flags(self
);
836 static int hw_atl_a0_hw_set_speed(struct aq_hw_s
*self
, u32 speed
)
840 err
= hw_atl_utils_mpi_set_speed(self
, speed
, MPI_INIT
);
848 static struct aq_hw_ops hw_atl_ops_
= {
849 .create
= hw_atl_a0_create
,
850 .destroy
= hw_atl_a0_destroy
,
851 .get_hw_caps
= hw_atl_a0_get_hw_caps
,
853 .hw_get_mac_permanent
= hw_atl_utils_get_mac_permanent
,
854 .hw_set_mac_address
= hw_atl_a0_hw_mac_addr_set
,
855 .hw_get_link_status
= hw_atl_utils_mpi_get_link_status
,
856 .hw_set_link_speed
= hw_atl_a0_hw_set_speed
,
857 .hw_init
= hw_atl_a0_hw_init
,
858 .hw_deinit
= hw_atl_utils_hw_deinit
,
859 .hw_set_power
= hw_atl_utils_hw_set_power
,
860 .hw_reset
= hw_atl_a0_hw_reset
,
861 .hw_start
= hw_atl_a0_hw_start
,
862 .hw_ring_tx_start
= hw_atl_a0_hw_ring_tx_start
,
863 .hw_ring_tx_stop
= hw_atl_a0_hw_ring_tx_stop
,
864 .hw_ring_rx_start
= hw_atl_a0_hw_ring_rx_start
,
865 .hw_ring_rx_stop
= hw_atl_a0_hw_ring_rx_stop
,
866 .hw_stop
= hw_atl_a0_hw_stop
,
868 .hw_ring_tx_xmit
= hw_atl_a0_hw_ring_tx_xmit
,
869 .hw_ring_tx_head_update
= hw_atl_a0_hw_ring_tx_head_update
,
871 .hw_ring_rx_receive
= hw_atl_a0_hw_ring_rx_receive
,
872 .hw_ring_rx_fill
= hw_atl_a0_hw_ring_rx_fill
,
874 .hw_irq_enable
= hw_atl_a0_hw_irq_enable
,
875 .hw_irq_disable
= hw_atl_a0_hw_irq_disable
,
876 .hw_irq_read
= hw_atl_a0_hw_irq_read
,
878 .hw_ring_rx_init
= hw_atl_a0_hw_ring_rx_init
,
879 .hw_ring_tx_init
= hw_atl_a0_hw_ring_tx_init
,
880 .hw_packet_filter_set
= hw_atl_a0_hw_packet_filter_set
,
881 .hw_multicast_list_set
= hw_atl_a0_hw_multicast_list_set
,
882 .hw_interrupt_moderation_set
= hw_atl_a0_hw_interrupt_moderation_set
,
883 .hw_rss_set
= hw_atl_a0_hw_rss_set
,
884 .hw_rss_hash_set
= hw_atl_a0_hw_rss_hash_set
,
885 .hw_get_regs
= hw_atl_utils_hw_get_regs
,
886 .hw_update_stats
= hw_atl_utils_update_stats
,
887 .hw_get_hw_stats
= hw_atl_utils_get_hw_stats
,
888 .hw_get_fw_version
= hw_atl_utils_get_fw_version
,
891 struct aq_hw_ops
*hw_atl_a0_get_ops_by_id(struct pci_dev
*pdev
)
893 bool is_vid_ok
= (pdev
->vendor
== PCI_VENDOR_ID_AQUANTIA
);
894 bool is_did_ok
= ((pdev
->device
== HW_ATL_DEVICE_ID_0001
) ||
895 (pdev
->device
== HW_ATL_DEVICE_ID_D100
) ||
896 (pdev
->device
== HW_ATL_DEVICE_ID_D107
) ||
897 (pdev
->device
== HW_ATL_DEVICE_ID_D108
) ||
898 (pdev
->device
== HW_ATL_DEVICE_ID_D109
));
900 bool is_rev_ok
= (pdev
->revision
== 1U);
902 return (is_vid_ok
&& is_did_ok
&& is_rev_ok
) ? &hw_atl_ops_
: NULL
;