2 * Linux network driver for QLogic BR-series Converged Network Adapter.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
14 * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
15 * Copyright (c) 2014-2015 QLogic Corporation
24 bna_ib_coalescing_timeo_set(struct bna_ib
*ib
, u8 coalescing_timeo
)
26 ib
->coalescing_timeo
= coalescing_timeo
;
27 ib
->door_bell
.doorbell_ack
= BNA_DOORBELL_IB_INT_ACK(
28 (u32
)ib
->coalescing_timeo
, 0);
33 #define bna_rxf_vlan_cfg_soft_reset(rxf) \
35 (rxf)->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL; \
36 (rxf)->vlan_strip_pending = true; \
39 #define bna_rxf_rss_cfg_soft_reset(rxf) \
41 if ((rxf)->rss_status == BNA_STATUS_T_ENABLED) \
42 (rxf)->rss_pending = (BNA_RSS_F_RIT_PENDING | \
43 BNA_RSS_F_CFG_PENDING | \
44 BNA_RSS_F_STATUS_PENDING); \
47 static int bna_rxf_cfg_apply(struct bna_rxf
*rxf
);
48 static void bna_rxf_cfg_reset(struct bna_rxf
*rxf
);
49 static int bna_rxf_ucast_cfg_apply(struct bna_rxf
*rxf
);
50 static int bna_rxf_promisc_cfg_apply(struct bna_rxf
*rxf
);
51 static int bna_rxf_allmulti_cfg_apply(struct bna_rxf
*rxf
);
52 static int bna_rxf_vlan_strip_cfg_apply(struct bna_rxf
*rxf
);
53 static int bna_rxf_ucast_cfg_reset(struct bna_rxf
*rxf
,
54 enum bna_cleanup_type cleanup
);
55 static int bna_rxf_promisc_cfg_reset(struct bna_rxf
*rxf
,
56 enum bna_cleanup_type cleanup
);
57 static int bna_rxf_allmulti_cfg_reset(struct bna_rxf
*rxf
,
58 enum bna_cleanup_type cleanup
);
60 bfa_fsm_state_decl(bna_rxf
, stopped
, struct bna_rxf
,
62 bfa_fsm_state_decl(bna_rxf
, cfg_wait
, struct bna_rxf
,
64 bfa_fsm_state_decl(bna_rxf
, started
, struct bna_rxf
,
66 bfa_fsm_state_decl(bna_rxf
, last_resp_wait
, struct bna_rxf
,
70 bna_rxf_sm_stopped_entry(struct bna_rxf
*rxf
)
72 call_rxf_stop_cbfn(rxf
);
76 bna_rxf_sm_stopped(struct bna_rxf
*rxf
, enum bna_rxf_event event
)
80 bfa_fsm_set_state(rxf
, bna_rxf_sm_cfg_wait
);
84 call_rxf_stop_cbfn(rxf
);
92 call_rxf_cam_fltr_cbfn(rxf
);
101 bna_rxf_sm_cfg_wait_entry(struct bna_rxf
*rxf
)
103 if (!bna_rxf_cfg_apply(rxf
)) {
104 /* No more pending config updates */
105 bfa_fsm_set_state(rxf
, bna_rxf_sm_started
);
110 bna_rxf_sm_cfg_wait(struct bna_rxf
*rxf
, enum bna_rxf_event event
)
114 bfa_fsm_set_state(rxf
, bna_rxf_sm_last_resp_wait
);
118 bna_rxf_cfg_reset(rxf
);
119 call_rxf_start_cbfn(rxf
);
120 call_rxf_cam_fltr_cbfn(rxf
);
121 bfa_fsm_set_state(rxf
, bna_rxf_sm_stopped
);
129 if (!bna_rxf_cfg_apply(rxf
)) {
130 /* No more pending config updates */
131 bfa_fsm_set_state(rxf
, bna_rxf_sm_started
);
141 bna_rxf_sm_started_entry(struct bna_rxf
*rxf
)
143 call_rxf_start_cbfn(rxf
);
144 call_rxf_cam_fltr_cbfn(rxf
);
148 bna_rxf_sm_started(struct bna_rxf
*rxf
, enum bna_rxf_event event
)
153 bna_rxf_cfg_reset(rxf
);
154 bfa_fsm_set_state(rxf
, bna_rxf_sm_stopped
);
158 bfa_fsm_set_state(rxf
, bna_rxf_sm_cfg_wait
);
167 bna_rxf_sm_last_resp_wait_entry(struct bna_rxf
*rxf
)
172 bna_rxf_sm_last_resp_wait(struct bna_rxf
*rxf
, enum bna_rxf_event event
)
177 bna_rxf_cfg_reset(rxf
);
178 bfa_fsm_set_state(rxf
, bna_rxf_sm_stopped
);
187 bna_bfi_ucast_req(struct bna_rxf
*rxf
, struct bna_mac
*mac
,
188 enum bfi_enet_h2i_msgs req_type
)
190 struct bfi_enet_ucast_req
*req
= &rxf
->bfi_enet_cmd
.ucast_req
;
192 bfi_msgq_mhdr_set(req
->mh
, BFI_MC_ENET
, req_type
, 0, rxf
->rx
->rid
);
193 req
->mh
.num_entries
= htons(
194 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_ucast_req
)));
195 ether_addr_copy(req
->mac_addr
, mac
->addr
);
196 bfa_msgq_cmd_set(&rxf
->msgq_cmd
, NULL
, NULL
,
197 sizeof(struct bfi_enet_ucast_req
), &req
->mh
);
198 bfa_msgq_cmd_post(&rxf
->rx
->bna
->msgq
, &rxf
->msgq_cmd
);
202 bna_bfi_mcast_add_req(struct bna_rxf
*rxf
, struct bna_mac
*mac
)
204 struct bfi_enet_mcast_add_req
*req
=
205 &rxf
->bfi_enet_cmd
.mcast_add_req
;
207 bfi_msgq_mhdr_set(req
->mh
, BFI_MC_ENET
, BFI_ENET_H2I_MAC_MCAST_ADD_REQ
,
209 req
->mh
.num_entries
= htons(
210 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_mcast_add_req
)));
211 ether_addr_copy(req
->mac_addr
, mac
->addr
);
212 bfa_msgq_cmd_set(&rxf
->msgq_cmd
, NULL
, NULL
,
213 sizeof(struct bfi_enet_mcast_add_req
), &req
->mh
);
214 bfa_msgq_cmd_post(&rxf
->rx
->bna
->msgq
, &rxf
->msgq_cmd
);
218 bna_bfi_mcast_del_req(struct bna_rxf
*rxf
, u16 handle
)
220 struct bfi_enet_mcast_del_req
*req
=
221 &rxf
->bfi_enet_cmd
.mcast_del_req
;
223 bfi_msgq_mhdr_set(req
->mh
, BFI_MC_ENET
, BFI_ENET_H2I_MAC_MCAST_DEL_REQ
,
225 req
->mh
.num_entries
= htons(
226 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_mcast_del_req
)));
227 req
->handle
= htons(handle
);
228 bfa_msgq_cmd_set(&rxf
->msgq_cmd
, NULL
, NULL
,
229 sizeof(struct bfi_enet_mcast_del_req
), &req
->mh
);
230 bfa_msgq_cmd_post(&rxf
->rx
->bna
->msgq
, &rxf
->msgq_cmd
);
234 bna_bfi_mcast_filter_req(struct bna_rxf
*rxf
, enum bna_status status
)
236 struct bfi_enet_enable_req
*req
= &rxf
->bfi_enet_cmd
.req
;
238 bfi_msgq_mhdr_set(req
->mh
, BFI_MC_ENET
,
239 BFI_ENET_H2I_MAC_MCAST_FILTER_REQ
, 0, rxf
->rx
->rid
);
240 req
->mh
.num_entries
= htons(
241 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req
)));
242 req
->enable
= status
;
243 bfa_msgq_cmd_set(&rxf
->msgq_cmd
, NULL
, NULL
,
244 sizeof(struct bfi_enet_enable_req
), &req
->mh
);
245 bfa_msgq_cmd_post(&rxf
->rx
->bna
->msgq
, &rxf
->msgq_cmd
);
249 bna_bfi_rx_promisc_req(struct bna_rxf
*rxf
, enum bna_status status
)
251 struct bfi_enet_enable_req
*req
= &rxf
->bfi_enet_cmd
.req
;
253 bfi_msgq_mhdr_set(req
->mh
, BFI_MC_ENET
,
254 BFI_ENET_H2I_RX_PROMISCUOUS_REQ
, 0, rxf
->rx
->rid
);
255 req
->mh
.num_entries
= htons(
256 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req
)));
257 req
->enable
= status
;
258 bfa_msgq_cmd_set(&rxf
->msgq_cmd
, NULL
, NULL
,
259 sizeof(struct bfi_enet_enable_req
), &req
->mh
);
260 bfa_msgq_cmd_post(&rxf
->rx
->bna
->msgq
, &rxf
->msgq_cmd
);
264 bna_bfi_rx_vlan_filter_set(struct bna_rxf
*rxf
, u8 block_idx
)
266 struct bfi_enet_rx_vlan_req
*req
= &rxf
->bfi_enet_cmd
.vlan_req
;
270 bfi_msgq_mhdr_set(req
->mh
, BFI_MC_ENET
,
271 BFI_ENET_H2I_RX_VLAN_SET_REQ
, 0, rxf
->rx
->rid
);
272 req
->mh
.num_entries
= htons(
273 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rx_vlan_req
)));
274 req
->block_idx
= block_idx
;
275 for (i
= 0; i
< (BFI_ENET_VLAN_BLOCK_SIZE
/ 32); i
++) {
276 j
= (block_idx
* (BFI_ENET_VLAN_BLOCK_SIZE
/ 32)) + i
;
277 if (rxf
->vlan_filter_status
== BNA_STATUS_T_ENABLED
)
279 htonl(rxf
->vlan_filter_table
[j
]);
281 req
->bit_mask
[i
] = 0xFFFFFFFF;
283 bfa_msgq_cmd_set(&rxf
->msgq_cmd
, NULL
, NULL
,
284 sizeof(struct bfi_enet_rx_vlan_req
), &req
->mh
);
285 bfa_msgq_cmd_post(&rxf
->rx
->bna
->msgq
, &rxf
->msgq_cmd
);
289 bna_bfi_vlan_strip_enable(struct bna_rxf
*rxf
)
291 struct bfi_enet_enable_req
*req
= &rxf
->bfi_enet_cmd
.req
;
293 bfi_msgq_mhdr_set(req
->mh
, BFI_MC_ENET
,
294 BFI_ENET_H2I_RX_VLAN_STRIP_ENABLE_REQ
, 0, rxf
->rx
->rid
);
295 req
->mh
.num_entries
= htons(
296 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req
)));
297 req
->enable
= rxf
->vlan_strip_status
;
298 bfa_msgq_cmd_set(&rxf
->msgq_cmd
, NULL
, NULL
,
299 sizeof(struct bfi_enet_enable_req
), &req
->mh
);
300 bfa_msgq_cmd_post(&rxf
->rx
->bna
->msgq
, &rxf
->msgq_cmd
);
304 bna_bfi_rit_cfg(struct bna_rxf
*rxf
)
306 struct bfi_enet_rit_req
*req
= &rxf
->bfi_enet_cmd
.rit_req
;
308 bfi_msgq_mhdr_set(req
->mh
, BFI_MC_ENET
,
309 BFI_ENET_H2I_RIT_CFG_REQ
, 0, rxf
->rx
->rid
);
310 req
->mh
.num_entries
= htons(
311 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rit_req
)));
312 req
->size
= htons(rxf
->rit_size
);
313 memcpy(&req
->table
[0], rxf
->rit
, rxf
->rit_size
);
314 bfa_msgq_cmd_set(&rxf
->msgq_cmd
, NULL
, NULL
,
315 sizeof(struct bfi_enet_rit_req
), &req
->mh
);
316 bfa_msgq_cmd_post(&rxf
->rx
->bna
->msgq
, &rxf
->msgq_cmd
);
320 bna_bfi_rss_cfg(struct bna_rxf
*rxf
)
322 struct bfi_enet_rss_cfg_req
*req
= &rxf
->bfi_enet_cmd
.rss_req
;
325 bfi_msgq_mhdr_set(req
->mh
, BFI_MC_ENET
,
326 BFI_ENET_H2I_RSS_CFG_REQ
, 0, rxf
->rx
->rid
);
327 req
->mh
.num_entries
= htons(
328 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rss_cfg_req
)));
329 req
->cfg
.type
= rxf
->rss_cfg
.hash_type
;
330 req
->cfg
.mask
= rxf
->rss_cfg
.hash_mask
;
331 for (i
= 0; i
< BFI_ENET_RSS_KEY_LEN
; i
++)
333 htonl(rxf
->rss_cfg
.toeplitz_hash_key
[i
]);
334 bfa_msgq_cmd_set(&rxf
->msgq_cmd
, NULL
, NULL
,
335 sizeof(struct bfi_enet_rss_cfg_req
), &req
->mh
);
336 bfa_msgq_cmd_post(&rxf
->rx
->bna
->msgq
, &rxf
->msgq_cmd
);
340 bna_bfi_rss_enable(struct bna_rxf
*rxf
)
342 struct bfi_enet_enable_req
*req
= &rxf
->bfi_enet_cmd
.req
;
344 bfi_msgq_mhdr_set(req
->mh
, BFI_MC_ENET
,
345 BFI_ENET_H2I_RSS_ENABLE_REQ
, 0, rxf
->rx
->rid
);
346 req
->mh
.num_entries
= htons(
347 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req
)));
348 req
->enable
= rxf
->rss_status
;
349 bfa_msgq_cmd_set(&rxf
->msgq_cmd
, NULL
, NULL
,
350 sizeof(struct bfi_enet_enable_req
), &req
->mh
);
351 bfa_msgq_cmd_post(&rxf
->rx
->bna
->msgq
, &rxf
->msgq_cmd
);
354 /* This function gets the multicast MAC that has already been added to CAM */
355 static struct bna_mac
*
356 bna_rxf_mcmac_get(struct bna_rxf
*rxf
, u8
*mac_addr
)
359 struct list_head
*qe
;
361 list_for_each(qe
, &rxf
->mcast_active_q
) {
362 mac
= (struct bna_mac
*)qe
;
363 if (ether_addr_equal(mac
->addr
, mac_addr
))
367 list_for_each(qe
, &rxf
->mcast_pending_del_q
) {
368 mac
= (struct bna_mac
*)qe
;
369 if (ether_addr_equal(mac
->addr
, mac_addr
))
376 static struct bna_mcam_handle
*
377 bna_rxf_mchandle_get(struct bna_rxf
*rxf
, int handle
)
379 struct bna_mcam_handle
*mchandle
;
380 struct list_head
*qe
;
382 list_for_each(qe
, &rxf
->mcast_handle_q
) {
383 mchandle
= (struct bna_mcam_handle
*)qe
;
384 if (mchandle
->handle
== handle
)
392 bna_rxf_mchandle_attach(struct bna_rxf
*rxf
, u8
*mac_addr
, int handle
)
394 struct bna_mac
*mcmac
;
395 struct bna_mcam_handle
*mchandle
;
397 mcmac
= bna_rxf_mcmac_get(rxf
, mac_addr
);
398 mchandle
= bna_rxf_mchandle_get(rxf
, handle
);
399 if (mchandle
== NULL
) {
400 mchandle
= bna_mcam_mod_handle_get(&rxf
->rx
->bna
->mcam_mod
);
401 mchandle
->handle
= handle
;
402 mchandle
->refcnt
= 0;
403 list_add_tail(&mchandle
->qe
, &rxf
->mcast_handle_q
);
406 mcmac
->handle
= mchandle
;
410 bna_rxf_mcast_del(struct bna_rxf
*rxf
, struct bna_mac
*mac
,
411 enum bna_cleanup_type cleanup
)
413 struct bna_mcam_handle
*mchandle
;
416 mchandle
= mac
->handle
;
417 if (mchandle
== NULL
)
421 if (mchandle
->refcnt
== 0) {
422 if (cleanup
== BNA_HARD_CLEANUP
) {
423 bna_bfi_mcast_del_req(rxf
, mchandle
->handle
);
426 list_del(&mchandle
->qe
);
427 bna_mcam_mod_handle_put(&rxf
->rx
->bna
->mcam_mod
, mchandle
);
435 bna_rxf_mcast_cfg_apply(struct bna_rxf
*rxf
)
437 struct bna_mac
*mac
= NULL
;
440 /* First delete multicast entries to maintain the count */
441 while (!list_empty(&rxf
->mcast_pending_del_q
)) {
442 mac
= list_first_entry(&rxf
->mcast_pending_del_q
,
444 ret
= bna_rxf_mcast_del(rxf
, mac
, BNA_HARD_CLEANUP
);
445 list_move_tail(&mac
->qe
, bna_mcam_mod_del_q(rxf
->rx
->bna
));
450 /* Add multicast entries */
451 if (!list_empty(&rxf
->mcast_pending_add_q
)) {
452 mac
= list_first_entry(&rxf
->mcast_pending_add_q
,
454 list_move_tail(&mac
->qe
, &rxf
->mcast_active_q
);
455 bna_bfi_mcast_add_req(rxf
, mac
);
463 bna_rxf_vlan_cfg_apply(struct bna_rxf
*rxf
)
465 u8 vlan_pending_bitmask
;
468 if (rxf
->vlan_pending_bitmask
) {
469 vlan_pending_bitmask
= rxf
->vlan_pending_bitmask
;
470 while (!(vlan_pending_bitmask
& 0x1)) {
472 vlan_pending_bitmask
>>= 1;
474 rxf
->vlan_pending_bitmask
&= ~BIT(block_idx
);
475 bna_bfi_rx_vlan_filter_set(rxf
, block_idx
);
483 bna_rxf_mcast_cfg_reset(struct bna_rxf
*rxf
, enum bna_cleanup_type cleanup
)
488 /* Throw away delete pending mcast entries */
489 while (!list_empty(&rxf
->mcast_pending_del_q
)) {
490 mac
= list_first_entry(&rxf
->mcast_pending_del_q
,
492 ret
= bna_rxf_mcast_del(rxf
, mac
, cleanup
);
493 list_move_tail(&mac
->qe
, bna_mcam_mod_del_q(rxf
->rx
->bna
));
498 /* Move active mcast entries to pending_add_q */
499 while (!list_empty(&rxf
->mcast_active_q
)) {
500 mac
= list_first_entry(&rxf
->mcast_active_q
,
502 list_move_tail(&mac
->qe
, &rxf
->mcast_pending_add_q
);
503 if (bna_rxf_mcast_del(rxf
, mac
, cleanup
))
511 bna_rxf_rss_cfg_apply(struct bna_rxf
*rxf
)
513 if (rxf
->rss_pending
) {
514 if (rxf
->rss_pending
& BNA_RSS_F_RIT_PENDING
) {
515 rxf
->rss_pending
&= ~BNA_RSS_F_RIT_PENDING
;
516 bna_bfi_rit_cfg(rxf
);
520 if (rxf
->rss_pending
& BNA_RSS_F_CFG_PENDING
) {
521 rxf
->rss_pending
&= ~BNA_RSS_F_CFG_PENDING
;
522 bna_bfi_rss_cfg(rxf
);
526 if (rxf
->rss_pending
& BNA_RSS_F_STATUS_PENDING
) {
527 rxf
->rss_pending
&= ~BNA_RSS_F_STATUS_PENDING
;
528 bna_bfi_rss_enable(rxf
);
537 bna_rxf_cfg_apply(struct bna_rxf
*rxf
)
539 if (bna_rxf_ucast_cfg_apply(rxf
))
542 if (bna_rxf_mcast_cfg_apply(rxf
))
545 if (bna_rxf_promisc_cfg_apply(rxf
))
548 if (bna_rxf_allmulti_cfg_apply(rxf
))
551 if (bna_rxf_vlan_cfg_apply(rxf
))
554 if (bna_rxf_vlan_strip_cfg_apply(rxf
))
557 if (bna_rxf_rss_cfg_apply(rxf
))
564 bna_rxf_cfg_reset(struct bna_rxf
*rxf
)
566 bna_rxf_ucast_cfg_reset(rxf
, BNA_SOFT_CLEANUP
);
567 bna_rxf_mcast_cfg_reset(rxf
, BNA_SOFT_CLEANUP
);
568 bna_rxf_promisc_cfg_reset(rxf
, BNA_SOFT_CLEANUP
);
569 bna_rxf_allmulti_cfg_reset(rxf
, BNA_SOFT_CLEANUP
);
570 bna_rxf_vlan_cfg_soft_reset(rxf
);
571 bna_rxf_rss_cfg_soft_reset(rxf
);
575 bna_rit_init(struct bna_rxf
*rxf
, int rit_size
)
577 struct bna_rx
*rx
= rxf
->rx
;
579 struct list_head
*qe
;
582 rxf
->rit_size
= rit_size
;
583 list_for_each(qe
, &rx
->rxp_q
) {
584 rxp
= (struct bna_rxp
*)qe
;
585 rxf
->rit
[offset
] = rxp
->cq
.ccb
->id
;
592 bna_bfi_rxf_cfg_rsp(struct bna_rxf
*rxf
, struct bfi_msgq_mhdr
*msghdr
)
594 bfa_fsm_send_event(rxf
, RXF_E_FW_RESP
);
598 bna_bfi_rxf_ucast_set_rsp(struct bna_rxf
*rxf
,
599 struct bfi_msgq_mhdr
*msghdr
)
601 struct bfi_enet_rsp
*rsp
=
602 container_of(msghdr
, struct bfi_enet_rsp
, mh
);
605 /* Clear ucast from cache */
606 rxf
->ucast_active_set
= 0;
609 bfa_fsm_send_event(rxf
, RXF_E_FW_RESP
);
613 bna_bfi_rxf_mcast_add_rsp(struct bna_rxf
*rxf
,
614 struct bfi_msgq_mhdr
*msghdr
)
616 struct bfi_enet_mcast_add_req
*req
=
617 &rxf
->bfi_enet_cmd
.mcast_add_req
;
618 struct bfi_enet_mcast_add_rsp
*rsp
=
619 container_of(msghdr
, struct bfi_enet_mcast_add_rsp
, mh
);
621 bna_rxf_mchandle_attach(rxf
, (u8
*)&req
->mac_addr
,
623 bfa_fsm_send_event(rxf
, RXF_E_FW_RESP
);
627 bna_rxf_init(struct bna_rxf
*rxf
,
629 struct bna_rx_config
*q_config
,
630 struct bna_res_info
*res_info
)
634 INIT_LIST_HEAD(&rxf
->ucast_pending_add_q
);
635 INIT_LIST_HEAD(&rxf
->ucast_pending_del_q
);
636 rxf
->ucast_pending_set
= 0;
637 rxf
->ucast_active_set
= 0;
638 INIT_LIST_HEAD(&rxf
->ucast_active_q
);
639 rxf
->ucast_pending_mac
= NULL
;
641 INIT_LIST_HEAD(&rxf
->mcast_pending_add_q
);
642 INIT_LIST_HEAD(&rxf
->mcast_pending_del_q
);
643 INIT_LIST_HEAD(&rxf
->mcast_active_q
);
644 INIT_LIST_HEAD(&rxf
->mcast_handle_q
);
647 res_info
[BNA_RX_RES_MEM_T_RIT
].res_u
.mem_info
.mdl
[0].kva
;
648 bna_rit_init(rxf
, q_config
->num_paths
);
650 rxf
->rss_status
= q_config
->rss_status
;
651 if (rxf
->rss_status
== BNA_STATUS_T_ENABLED
) {
652 rxf
->rss_cfg
= q_config
->rss_config
;
653 rxf
->rss_pending
|= BNA_RSS_F_CFG_PENDING
;
654 rxf
->rss_pending
|= BNA_RSS_F_RIT_PENDING
;
655 rxf
->rss_pending
|= BNA_RSS_F_STATUS_PENDING
;
658 rxf
->vlan_filter_status
= BNA_STATUS_T_DISABLED
;
659 memset(rxf
->vlan_filter_table
, 0,
660 (sizeof(u32
) * (BFI_ENET_VLAN_ID_MAX
/ 32)));
661 rxf
->vlan_filter_table
[0] |= 1; /* for pure priority tagged frames */
662 rxf
->vlan_pending_bitmask
= (u8
)BFI_VLAN_BMASK_ALL
;
664 rxf
->vlan_strip_status
= q_config
->vlan_strip_status
;
666 bfa_fsm_set_state(rxf
, bna_rxf_sm_stopped
);
670 bna_rxf_uninit(struct bna_rxf
*rxf
)
674 rxf
->ucast_pending_set
= 0;
675 rxf
->ucast_active_set
= 0;
677 while (!list_empty(&rxf
->ucast_pending_add_q
)) {
678 mac
= list_first_entry(&rxf
->ucast_pending_add_q
,
680 list_move_tail(&mac
->qe
, bna_ucam_mod_free_q(rxf
->rx
->bna
));
683 if (rxf
->ucast_pending_mac
) {
684 list_add_tail(&rxf
->ucast_pending_mac
->qe
,
685 bna_ucam_mod_free_q(rxf
->rx
->bna
));
686 rxf
->ucast_pending_mac
= NULL
;
689 while (!list_empty(&rxf
->mcast_pending_add_q
)) {
690 mac
= list_first_entry(&rxf
->mcast_pending_add_q
,
692 list_move_tail(&mac
->qe
, bna_mcam_mod_free_q(rxf
->rx
->bna
));
695 rxf
->rxmode_pending
= 0;
696 rxf
->rxmode_pending_bitmask
= 0;
697 if (rxf
->rx
->bna
->promisc_rid
== rxf
->rx
->rid
)
698 rxf
->rx
->bna
->promisc_rid
= BFI_INVALID_RID
;
699 if (rxf
->rx
->bna
->default_mode_rid
== rxf
->rx
->rid
)
700 rxf
->rx
->bna
->default_mode_rid
= BFI_INVALID_RID
;
702 rxf
->rss_pending
= 0;
703 rxf
->vlan_strip_pending
= false;
709 bna_rx_cb_rxf_started(struct bna_rx
*rx
)
711 bfa_fsm_send_event(rx
, RX_E_RXF_STARTED
);
715 bna_rxf_start(struct bna_rxf
*rxf
)
717 rxf
->start_cbfn
= bna_rx_cb_rxf_started
;
718 rxf
->start_cbarg
= rxf
->rx
;
719 bfa_fsm_send_event(rxf
, RXF_E_START
);
723 bna_rx_cb_rxf_stopped(struct bna_rx
*rx
)
725 bfa_fsm_send_event(rx
, RX_E_RXF_STOPPED
);
729 bna_rxf_stop(struct bna_rxf
*rxf
)
731 rxf
->stop_cbfn
= bna_rx_cb_rxf_stopped
;
732 rxf
->stop_cbarg
= rxf
->rx
;
733 bfa_fsm_send_event(rxf
, RXF_E_STOP
);
737 bna_rxf_fail(struct bna_rxf
*rxf
)
739 bfa_fsm_send_event(rxf
, RXF_E_FAIL
);
743 bna_rx_ucast_set(struct bna_rx
*rx
, u8
*ucmac
)
745 struct bna_rxf
*rxf
= &rx
->rxf
;
747 if (rxf
->ucast_pending_mac
== NULL
) {
748 rxf
->ucast_pending_mac
=
749 bna_cam_mod_mac_get(bna_ucam_mod_free_q(rxf
->rx
->bna
));
750 if (rxf
->ucast_pending_mac
== NULL
)
751 return BNA_CB_UCAST_CAM_FULL
;
754 ether_addr_copy(rxf
->ucast_pending_mac
->addr
, ucmac
);
755 rxf
->ucast_pending_set
= 1;
756 rxf
->cam_fltr_cbfn
= NULL
;
757 rxf
->cam_fltr_cbarg
= rx
->bna
->bnad
;
759 bfa_fsm_send_event(rxf
, RXF_E_CONFIG
);
761 return BNA_CB_SUCCESS
;
765 bna_rx_mcast_add(struct bna_rx
*rx
, u8
*addr
,
766 void (*cbfn
)(struct bnad
*, struct bna_rx
*))
768 struct bna_rxf
*rxf
= &rx
->rxf
;
771 /* Check if already added or pending addition */
772 if (bna_mac_find(&rxf
->mcast_active_q
, addr
) ||
773 bna_mac_find(&rxf
->mcast_pending_add_q
, addr
)) {
775 cbfn(rx
->bna
->bnad
, rx
);
776 return BNA_CB_SUCCESS
;
779 mac
= bna_cam_mod_mac_get(bna_mcam_mod_free_q(rxf
->rx
->bna
));
781 return BNA_CB_MCAST_LIST_FULL
;
782 ether_addr_copy(mac
->addr
, addr
);
783 list_add_tail(&mac
->qe
, &rxf
->mcast_pending_add_q
);
785 rxf
->cam_fltr_cbfn
= cbfn
;
786 rxf
->cam_fltr_cbarg
= rx
->bna
->bnad
;
788 bfa_fsm_send_event(rxf
, RXF_E_CONFIG
);
790 return BNA_CB_SUCCESS
;
794 bna_rx_ucast_listset(struct bna_rx
*rx
, int count
, u8
*uclist
)
796 struct bna_ucam_mod
*ucam_mod
= &rx
->bna
->ucam_mod
;
797 struct bna_rxf
*rxf
= &rx
->rxf
;
798 struct list_head list_head
;
800 struct bna_mac
*mac
, *del_mac
;
803 /* Purge the pending_add_q */
804 while (!list_empty(&rxf
->ucast_pending_add_q
)) {
805 mac
= list_first_entry(&rxf
->ucast_pending_add_q
,
807 list_move_tail(&mac
->qe
, &ucam_mod
->free_q
);
810 /* Schedule active_q entries for deletion */
811 while (!list_empty(&rxf
->ucast_active_q
)) {
812 mac
= list_first_entry(&rxf
->ucast_active_q
,
814 del_mac
= bna_cam_mod_mac_get(&ucam_mod
->del_q
);
815 ether_addr_copy(del_mac
->addr
, mac
->addr
);
816 del_mac
->handle
= mac
->handle
;
817 list_add_tail(&del_mac
->qe
, &rxf
->ucast_pending_del_q
);
818 list_move_tail(&mac
->qe
, &ucam_mod
->free_q
);
822 INIT_LIST_HEAD(&list_head
);
823 for (i
= 0, mcaddr
= uclist
; i
< count
; i
++) {
824 mac
= bna_cam_mod_mac_get(&ucam_mod
->free_q
);
827 ether_addr_copy(mac
->addr
, mcaddr
);
828 list_add_tail(&mac
->qe
, &list_head
);
832 /* Add the new entries */
833 while (!list_empty(&list_head
)) {
834 mac
= list_first_entry(&list_head
, struct bna_mac
, qe
);
835 list_move_tail(&mac
->qe
, &rxf
->ucast_pending_add_q
);
838 bfa_fsm_send_event(rxf
, RXF_E_CONFIG
);
840 return BNA_CB_SUCCESS
;
843 while (!list_empty(&list_head
)) {
844 mac
= list_first_entry(&list_head
, struct bna_mac
, qe
);
845 list_move_tail(&mac
->qe
, &ucam_mod
->free_q
);
848 return BNA_CB_UCAST_CAM_FULL
;
852 bna_rx_mcast_listset(struct bna_rx
*rx
, int count
, u8
*mclist
)
854 struct bna_mcam_mod
*mcam_mod
= &rx
->bna
->mcam_mod
;
855 struct bna_rxf
*rxf
= &rx
->rxf
;
856 struct list_head list_head
;
858 struct bna_mac
*mac
, *del_mac
;
861 /* Purge the pending_add_q */
862 while (!list_empty(&rxf
->mcast_pending_add_q
)) {
863 mac
= list_first_entry(&rxf
->mcast_pending_add_q
,
865 list_move_tail(&mac
->qe
, &mcam_mod
->free_q
);
868 /* Schedule active_q entries for deletion */
869 while (!list_empty(&rxf
->mcast_active_q
)) {
870 mac
= list_first_entry(&rxf
->mcast_active_q
,
872 del_mac
= bna_cam_mod_mac_get(&mcam_mod
->del_q
);
873 ether_addr_copy(del_mac
->addr
, mac
->addr
);
874 del_mac
->handle
= mac
->handle
;
875 list_add_tail(&del_mac
->qe
, &rxf
->mcast_pending_del_q
);
877 list_move_tail(&mac
->qe
, &mcam_mod
->free_q
);
881 INIT_LIST_HEAD(&list_head
);
882 for (i
= 0, mcaddr
= mclist
; i
< count
; i
++) {
883 mac
= bna_cam_mod_mac_get(&mcam_mod
->free_q
);
886 ether_addr_copy(mac
->addr
, mcaddr
);
887 list_add_tail(&mac
->qe
, &list_head
);
892 /* Add the new entries */
893 while (!list_empty(&list_head
)) {
894 mac
= list_first_entry(&list_head
, struct bna_mac
, qe
);
895 list_move_tail(&mac
->qe
, &rxf
->mcast_pending_add_q
);
898 bfa_fsm_send_event(rxf
, RXF_E_CONFIG
);
900 return BNA_CB_SUCCESS
;
903 while (!list_empty(&list_head
)) {
904 mac
= list_first_entry(&list_head
, struct bna_mac
, qe
);
905 list_move_tail(&mac
->qe
, &mcam_mod
->free_q
);
908 return BNA_CB_MCAST_LIST_FULL
;
912 bna_rx_mcast_delall(struct bna_rx
*rx
)
914 struct bna_rxf
*rxf
= &rx
->rxf
;
915 struct bna_mac
*mac
, *del_mac
;
916 int need_hw_config
= 0;
918 /* Purge all entries from pending_add_q */
919 while (!list_empty(&rxf
->mcast_pending_add_q
)) {
920 mac
= list_first_entry(&rxf
->mcast_pending_add_q
,
922 list_move_tail(&mac
->qe
, bna_mcam_mod_free_q(rxf
->rx
->bna
));
925 /* Schedule all entries in active_q for deletion */
926 while (!list_empty(&rxf
->mcast_active_q
)) {
927 mac
= list_first_entry(&rxf
->mcast_active_q
,
930 del_mac
= bna_cam_mod_mac_get(bna_mcam_mod_del_q(rxf
->rx
->bna
));
931 memcpy(del_mac
, mac
, sizeof(*del_mac
));
932 list_add_tail(&del_mac
->qe
, &rxf
->mcast_pending_del_q
);
934 list_add_tail(&mac
->qe
, bna_mcam_mod_free_q(rxf
->rx
->bna
));
939 bfa_fsm_send_event(rxf
, RXF_E_CONFIG
);
943 bna_rx_vlan_add(struct bna_rx
*rx
, int vlan_id
)
945 struct bna_rxf
*rxf
= &rx
->rxf
;
946 int index
= (vlan_id
>> BFI_VLAN_WORD_SHIFT
);
947 int bit
= BIT((vlan_id
& BFI_VLAN_WORD_MASK
));
948 int group_id
= (vlan_id
>> BFI_VLAN_BLOCK_SHIFT
);
950 rxf
->vlan_filter_table
[index
] |= bit
;
951 if (rxf
->vlan_filter_status
== BNA_STATUS_T_ENABLED
) {
952 rxf
->vlan_pending_bitmask
|= BIT(group_id
);
953 bfa_fsm_send_event(rxf
, RXF_E_CONFIG
);
958 bna_rx_vlan_del(struct bna_rx
*rx
, int vlan_id
)
960 struct bna_rxf
*rxf
= &rx
->rxf
;
961 int index
= (vlan_id
>> BFI_VLAN_WORD_SHIFT
);
962 int bit
= BIT((vlan_id
& BFI_VLAN_WORD_MASK
));
963 int group_id
= (vlan_id
>> BFI_VLAN_BLOCK_SHIFT
);
965 rxf
->vlan_filter_table
[index
] &= ~bit
;
966 if (rxf
->vlan_filter_status
== BNA_STATUS_T_ENABLED
) {
967 rxf
->vlan_pending_bitmask
|= BIT(group_id
);
968 bfa_fsm_send_event(rxf
, RXF_E_CONFIG
);
973 bna_rxf_ucast_cfg_apply(struct bna_rxf
*rxf
)
975 struct bna_mac
*mac
= NULL
;
977 /* Delete MAC addresses previousely added */
978 if (!list_empty(&rxf
->ucast_pending_del_q
)) {
979 mac
= list_first_entry(&rxf
->ucast_pending_del_q
,
981 bna_bfi_ucast_req(rxf
, mac
, BFI_ENET_H2I_MAC_UCAST_DEL_REQ
);
982 list_move_tail(&mac
->qe
, bna_ucam_mod_del_q(rxf
->rx
->bna
));
986 /* Set default unicast MAC */
987 if (rxf
->ucast_pending_set
) {
988 rxf
->ucast_pending_set
= 0;
989 ether_addr_copy(rxf
->ucast_active_mac
.addr
,
990 rxf
->ucast_pending_mac
->addr
);
991 rxf
->ucast_active_set
= 1;
992 bna_bfi_ucast_req(rxf
, &rxf
->ucast_active_mac
,
993 BFI_ENET_H2I_MAC_UCAST_SET_REQ
);
997 /* Add additional MAC entries */
998 if (!list_empty(&rxf
->ucast_pending_add_q
)) {
999 mac
= list_first_entry(&rxf
->ucast_pending_add_q
,
1000 struct bna_mac
, qe
);
1001 list_add_tail(&mac
->qe
, &rxf
->ucast_active_q
);
1002 bna_bfi_ucast_req(rxf
, mac
, BFI_ENET_H2I_MAC_UCAST_ADD_REQ
);
1010 bna_rxf_ucast_cfg_reset(struct bna_rxf
*rxf
, enum bna_cleanup_type cleanup
)
1012 struct bna_mac
*mac
;
1014 /* Throw away delete pending ucast entries */
1015 while (!list_empty(&rxf
->ucast_pending_del_q
)) {
1016 mac
= list_first_entry(&rxf
->ucast_pending_del_q
,
1017 struct bna_mac
, qe
);
1018 if (cleanup
== BNA_SOFT_CLEANUP
)
1019 list_move_tail(&mac
->qe
,
1020 bna_ucam_mod_del_q(rxf
->rx
->bna
));
1022 bna_bfi_ucast_req(rxf
, mac
,
1023 BFI_ENET_H2I_MAC_UCAST_DEL_REQ
);
1024 list_move_tail(&mac
->qe
,
1025 bna_ucam_mod_del_q(rxf
->rx
->bna
));
1030 /* Move active ucast entries to pending_add_q */
1031 while (!list_empty(&rxf
->ucast_active_q
)) {
1032 mac
= list_first_entry(&rxf
->ucast_active_q
,
1033 struct bna_mac
, qe
);
1034 list_move_tail(&mac
->qe
, &rxf
->ucast_pending_add_q
);
1035 if (cleanup
== BNA_HARD_CLEANUP
) {
1036 bna_bfi_ucast_req(rxf
, mac
,
1037 BFI_ENET_H2I_MAC_UCAST_DEL_REQ
);
1042 if (rxf
->ucast_active_set
) {
1043 rxf
->ucast_pending_set
= 1;
1044 rxf
->ucast_active_set
= 0;
1045 if (cleanup
== BNA_HARD_CLEANUP
) {
1046 bna_bfi_ucast_req(rxf
, &rxf
->ucast_active_mac
,
1047 BFI_ENET_H2I_MAC_UCAST_CLR_REQ
);
1056 bna_rxf_promisc_cfg_apply(struct bna_rxf
*rxf
)
1058 struct bna
*bna
= rxf
->rx
->bna
;
1060 /* Enable/disable promiscuous mode */
1061 if (is_promisc_enable(rxf
->rxmode_pending
,
1062 rxf
->rxmode_pending_bitmask
)) {
1063 /* move promisc configuration from pending -> active */
1064 promisc_inactive(rxf
->rxmode_pending
,
1065 rxf
->rxmode_pending_bitmask
);
1066 rxf
->rxmode_active
|= BNA_RXMODE_PROMISC
;
1067 bna_bfi_rx_promisc_req(rxf
, BNA_STATUS_T_ENABLED
);
1069 } else if (is_promisc_disable(rxf
->rxmode_pending
,
1070 rxf
->rxmode_pending_bitmask
)) {
1071 /* move promisc configuration from pending -> active */
1072 promisc_inactive(rxf
->rxmode_pending
,
1073 rxf
->rxmode_pending_bitmask
);
1074 rxf
->rxmode_active
&= ~BNA_RXMODE_PROMISC
;
1075 bna
->promisc_rid
= BFI_INVALID_RID
;
1076 bna_bfi_rx_promisc_req(rxf
, BNA_STATUS_T_DISABLED
);
1084 bna_rxf_promisc_cfg_reset(struct bna_rxf
*rxf
, enum bna_cleanup_type cleanup
)
1086 struct bna
*bna
= rxf
->rx
->bna
;
1088 /* Clear pending promisc mode disable */
1089 if (is_promisc_disable(rxf
->rxmode_pending
,
1090 rxf
->rxmode_pending_bitmask
)) {
1091 promisc_inactive(rxf
->rxmode_pending
,
1092 rxf
->rxmode_pending_bitmask
);
1093 rxf
->rxmode_active
&= ~BNA_RXMODE_PROMISC
;
1094 bna
->promisc_rid
= BFI_INVALID_RID
;
1095 if (cleanup
== BNA_HARD_CLEANUP
) {
1096 bna_bfi_rx_promisc_req(rxf
, BNA_STATUS_T_DISABLED
);
1101 /* Move promisc mode config from active -> pending */
1102 if (rxf
->rxmode_active
& BNA_RXMODE_PROMISC
) {
1103 promisc_enable(rxf
->rxmode_pending
,
1104 rxf
->rxmode_pending_bitmask
);
1105 rxf
->rxmode_active
&= ~BNA_RXMODE_PROMISC
;
1106 if (cleanup
== BNA_HARD_CLEANUP
) {
1107 bna_bfi_rx_promisc_req(rxf
, BNA_STATUS_T_DISABLED
);
1116 bna_rxf_allmulti_cfg_apply(struct bna_rxf
*rxf
)
1118 /* Enable/disable allmulti mode */
1119 if (is_allmulti_enable(rxf
->rxmode_pending
,
1120 rxf
->rxmode_pending_bitmask
)) {
1121 /* move allmulti configuration from pending -> active */
1122 allmulti_inactive(rxf
->rxmode_pending
,
1123 rxf
->rxmode_pending_bitmask
);
1124 rxf
->rxmode_active
|= BNA_RXMODE_ALLMULTI
;
1125 bna_bfi_mcast_filter_req(rxf
, BNA_STATUS_T_DISABLED
);
1127 } else if (is_allmulti_disable(rxf
->rxmode_pending
,
1128 rxf
->rxmode_pending_bitmask
)) {
1129 /* move allmulti configuration from pending -> active */
1130 allmulti_inactive(rxf
->rxmode_pending
,
1131 rxf
->rxmode_pending_bitmask
);
1132 rxf
->rxmode_active
&= ~BNA_RXMODE_ALLMULTI
;
1133 bna_bfi_mcast_filter_req(rxf
, BNA_STATUS_T_ENABLED
);
1141 bna_rxf_allmulti_cfg_reset(struct bna_rxf
*rxf
, enum bna_cleanup_type cleanup
)
1143 /* Clear pending allmulti mode disable */
1144 if (is_allmulti_disable(rxf
->rxmode_pending
,
1145 rxf
->rxmode_pending_bitmask
)) {
1146 allmulti_inactive(rxf
->rxmode_pending
,
1147 rxf
->rxmode_pending_bitmask
);
1148 rxf
->rxmode_active
&= ~BNA_RXMODE_ALLMULTI
;
1149 if (cleanup
== BNA_HARD_CLEANUP
) {
1150 bna_bfi_mcast_filter_req(rxf
, BNA_STATUS_T_ENABLED
);
1155 /* Move allmulti mode config from active -> pending */
1156 if (rxf
->rxmode_active
& BNA_RXMODE_ALLMULTI
) {
1157 allmulti_enable(rxf
->rxmode_pending
,
1158 rxf
->rxmode_pending_bitmask
);
1159 rxf
->rxmode_active
&= ~BNA_RXMODE_ALLMULTI
;
1160 if (cleanup
== BNA_HARD_CLEANUP
) {
1161 bna_bfi_mcast_filter_req(rxf
, BNA_STATUS_T_ENABLED
);
1170 bna_rxf_promisc_enable(struct bna_rxf
*rxf
)
1172 struct bna
*bna
= rxf
->rx
->bna
;
1175 if (is_promisc_enable(rxf
->rxmode_pending
,
1176 rxf
->rxmode_pending_bitmask
) ||
1177 (rxf
->rxmode_active
& BNA_RXMODE_PROMISC
)) {
1178 /* Do nothing if pending enable or already enabled */
1179 } else if (is_promisc_disable(rxf
->rxmode_pending
,
1180 rxf
->rxmode_pending_bitmask
)) {
1181 /* Turn off pending disable command */
1182 promisc_inactive(rxf
->rxmode_pending
,
1183 rxf
->rxmode_pending_bitmask
);
1185 /* Schedule enable */
1186 promisc_enable(rxf
->rxmode_pending
,
1187 rxf
->rxmode_pending_bitmask
);
1188 bna
->promisc_rid
= rxf
->rx
->rid
;
1196 bna_rxf_promisc_disable(struct bna_rxf
*rxf
)
1198 struct bna
*bna
= rxf
->rx
->bna
;
1201 if (is_promisc_disable(rxf
->rxmode_pending
,
1202 rxf
->rxmode_pending_bitmask
) ||
1203 (!(rxf
->rxmode_active
& BNA_RXMODE_PROMISC
))) {
1204 /* Do nothing if pending disable or already disabled */
1205 } else if (is_promisc_enable(rxf
->rxmode_pending
,
1206 rxf
->rxmode_pending_bitmask
)) {
1207 /* Turn off pending enable command */
1208 promisc_inactive(rxf
->rxmode_pending
,
1209 rxf
->rxmode_pending_bitmask
);
1210 bna
->promisc_rid
= BFI_INVALID_RID
;
1211 } else if (rxf
->rxmode_active
& BNA_RXMODE_PROMISC
) {
1212 /* Schedule disable */
1213 promisc_disable(rxf
->rxmode_pending
,
1214 rxf
->rxmode_pending_bitmask
);
1222 bna_rxf_allmulti_enable(struct bna_rxf
*rxf
)
1226 if (is_allmulti_enable(rxf
->rxmode_pending
,
1227 rxf
->rxmode_pending_bitmask
) ||
1228 (rxf
->rxmode_active
& BNA_RXMODE_ALLMULTI
)) {
1229 /* Do nothing if pending enable or already enabled */
1230 } else if (is_allmulti_disable(rxf
->rxmode_pending
,
1231 rxf
->rxmode_pending_bitmask
)) {
1232 /* Turn off pending disable command */
1233 allmulti_inactive(rxf
->rxmode_pending
,
1234 rxf
->rxmode_pending_bitmask
);
1236 /* Schedule enable */
1237 allmulti_enable(rxf
->rxmode_pending
,
1238 rxf
->rxmode_pending_bitmask
);
1246 bna_rxf_allmulti_disable(struct bna_rxf
*rxf
)
1250 if (is_allmulti_disable(rxf
->rxmode_pending
,
1251 rxf
->rxmode_pending_bitmask
) ||
1252 (!(rxf
->rxmode_active
& BNA_RXMODE_ALLMULTI
))) {
1253 /* Do nothing if pending disable or already disabled */
1254 } else if (is_allmulti_enable(rxf
->rxmode_pending
,
1255 rxf
->rxmode_pending_bitmask
)) {
1256 /* Turn off pending enable command */
1257 allmulti_inactive(rxf
->rxmode_pending
,
1258 rxf
->rxmode_pending_bitmask
);
1259 } else if (rxf
->rxmode_active
& BNA_RXMODE_ALLMULTI
) {
1260 /* Schedule disable */
1261 allmulti_disable(rxf
->rxmode_pending
,
1262 rxf
->rxmode_pending_bitmask
);
1270 bna_rxf_vlan_strip_cfg_apply(struct bna_rxf
*rxf
)
1272 if (rxf
->vlan_strip_pending
) {
1273 rxf
->vlan_strip_pending
= false;
1274 bna_bfi_vlan_strip_enable(rxf
);
1283 #define BNA_GET_RXQS(qcfg) (((qcfg)->rxp_type == BNA_RXP_SINGLE) ? \
1284 (qcfg)->num_paths : ((qcfg)->num_paths * 2))
1286 #define SIZE_TO_PAGES(size) (((size) >> PAGE_SHIFT) + ((((size) &\
1287 (PAGE_SIZE - 1)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT))
1289 #define call_rx_stop_cbfn(rx) \
1291 if ((rx)->stop_cbfn) { \
1292 void (*cbfn)(void *, struct bna_rx *); \
1294 cbfn = (rx)->stop_cbfn; \
1295 cbarg = (rx)->stop_cbarg; \
1296 (rx)->stop_cbfn = NULL; \
1297 (rx)->stop_cbarg = NULL; \
1302 #define call_rx_stall_cbfn(rx) \
1304 if ((rx)->rx_stall_cbfn) \
1305 (rx)->rx_stall_cbfn((rx)->bna->bnad, (rx)); \
1308 #define bfi_enet_datapath_q_init(bfi_q, bna_qpt) \
1310 struct bna_dma_addr cur_q_addr = \
1311 *((struct bna_dma_addr *)((bna_qpt)->kv_qpt_ptr)); \
1312 (bfi_q)->pg_tbl.a32.addr_lo = (bna_qpt)->hw_qpt_ptr.lsb; \
1313 (bfi_q)->pg_tbl.a32.addr_hi = (bna_qpt)->hw_qpt_ptr.msb; \
1314 (bfi_q)->first_entry.a32.addr_lo = cur_q_addr.lsb; \
1315 (bfi_q)->first_entry.a32.addr_hi = cur_q_addr.msb; \
1316 (bfi_q)->pages = htons((u16)(bna_qpt)->page_count); \
1317 (bfi_q)->page_sz = htons((u16)(bna_qpt)->page_size);\
1320 static void bna_bfi_rx_enet_start(struct bna_rx
*rx
);
1321 static void bna_rx_enet_stop(struct bna_rx
*rx
);
1322 static void bna_rx_mod_cb_rx_stopped(void *arg
, struct bna_rx
*rx
);
1324 bfa_fsm_state_decl(bna_rx
, stopped
,
1325 struct bna_rx
, enum bna_rx_event
);
1326 bfa_fsm_state_decl(bna_rx
, start_wait
,
1327 struct bna_rx
, enum bna_rx_event
);
1328 bfa_fsm_state_decl(bna_rx
, start_stop_wait
,
1329 struct bna_rx
, enum bna_rx_event
);
1330 bfa_fsm_state_decl(bna_rx
, rxf_start_wait
,
1331 struct bna_rx
, enum bna_rx_event
);
1332 bfa_fsm_state_decl(bna_rx
, started
,
1333 struct bna_rx
, enum bna_rx_event
);
1334 bfa_fsm_state_decl(bna_rx
, rxf_stop_wait
,
1335 struct bna_rx
, enum bna_rx_event
);
1336 bfa_fsm_state_decl(bna_rx
, stop_wait
,
1337 struct bna_rx
, enum bna_rx_event
);
1338 bfa_fsm_state_decl(bna_rx
, cleanup_wait
,
1339 struct bna_rx
, enum bna_rx_event
);
1340 bfa_fsm_state_decl(bna_rx
, failed
,
1341 struct bna_rx
, enum bna_rx_event
);
1342 bfa_fsm_state_decl(bna_rx
, quiesce_wait
,
1343 struct bna_rx
, enum bna_rx_event
);
1345 static void bna_rx_sm_stopped_entry(struct bna_rx
*rx
)
1347 call_rx_stop_cbfn(rx
);
1350 static void bna_rx_sm_stopped(struct bna_rx
*rx
,
1351 enum bna_rx_event event
)
1355 bfa_fsm_set_state(rx
, bna_rx_sm_start_wait
);
1359 call_rx_stop_cbfn(rx
);
1367 bfa_sm_fault(event
);
1372 static void bna_rx_sm_start_wait_entry(struct bna_rx
*rx
)
1374 bna_bfi_rx_enet_start(rx
);
1378 bna_rx_sm_stop_wait_entry(struct bna_rx
*rx
)
1383 bna_rx_sm_stop_wait(struct bna_rx
*rx
, enum bna_rx_event event
)
1388 bfa_fsm_set_state(rx
, bna_rx_sm_cleanup_wait
);
1389 rx
->rx_cleanup_cbfn(rx
->bna
->bnad
, rx
);
1393 bna_rx_enet_stop(rx
);
1397 bfa_sm_fault(event
);
1402 static void bna_rx_sm_start_wait(struct bna_rx
*rx
,
1403 enum bna_rx_event event
)
1407 bfa_fsm_set_state(rx
, bna_rx_sm_start_stop_wait
);
1411 bfa_fsm_set_state(rx
, bna_rx_sm_stopped
);
1415 bfa_fsm_set_state(rx
, bna_rx_sm_rxf_start_wait
);
1419 bfa_sm_fault(event
);
1424 static void bna_rx_sm_rxf_start_wait_entry(struct bna_rx
*rx
)
1426 rx
->rx_post_cbfn(rx
->bna
->bnad
, rx
);
1427 bna_rxf_start(&rx
->rxf
);
1431 bna_rx_sm_rxf_stop_wait_entry(struct bna_rx
*rx
)
1436 bna_rx_sm_rxf_stop_wait(struct bna_rx
*rx
, enum bna_rx_event event
)
1440 bfa_fsm_set_state(rx
, bna_rx_sm_cleanup_wait
);
1441 bna_rxf_fail(&rx
->rxf
);
1442 call_rx_stall_cbfn(rx
);
1443 rx
->rx_cleanup_cbfn(rx
->bna
->bnad
, rx
);
1446 case RX_E_RXF_STARTED
:
1447 bna_rxf_stop(&rx
->rxf
);
1450 case RX_E_RXF_STOPPED
:
1451 bfa_fsm_set_state(rx
, bna_rx_sm_stop_wait
);
1452 call_rx_stall_cbfn(rx
);
1453 bna_rx_enet_stop(rx
);
1457 bfa_sm_fault(event
);
1464 bna_rx_sm_start_stop_wait_entry(struct bna_rx
*rx
)
1469 bna_rx_sm_start_stop_wait(struct bna_rx
*rx
, enum bna_rx_event event
)
1474 bfa_fsm_set_state(rx
, bna_rx_sm_stopped
);
1478 bna_rx_enet_stop(rx
);
1482 bfa_sm_fault(event
);
1487 bna_rx_sm_started_entry(struct bna_rx
*rx
)
1489 struct bna_rxp
*rxp
;
1490 struct list_head
*qe_rxp
;
1491 int is_regular
= (rx
->type
== BNA_RX_T_REGULAR
);
1494 list_for_each(qe_rxp
, &rx
->rxp_q
) {
1495 rxp
= (struct bna_rxp
*)qe_rxp
;
1496 bna_ib_start(rx
->bna
, &rxp
->cq
.ib
, is_regular
);
1499 bna_ethport_cb_rx_started(&rx
->bna
->ethport
);
1503 bna_rx_sm_started(struct bna_rx
*rx
, enum bna_rx_event event
)
1507 bfa_fsm_set_state(rx
, bna_rx_sm_rxf_stop_wait
);
1508 bna_ethport_cb_rx_stopped(&rx
->bna
->ethport
);
1509 bna_rxf_stop(&rx
->rxf
);
1513 bfa_fsm_set_state(rx
, bna_rx_sm_failed
);
1514 bna_ethport_cb_rx_stopped(&rx
->bna
->ethport
);
1515 bna_rxf_fail(&rx
->rxf
);
1516 call_rx_stall_cbfn(rx
);
1517 rx
->rx_cleanup_cbfn(rx
->bna
->bnad
, rx
);
1521 bfa_sm_fault(event
);
1526 static void bna_rx_sm_rxf_start_wait(struct bna_rx
*rx
,
1527 enum bna_rx_event event
)
1531 bfa_fsm_set_state(rx
, bna_rx_sm_rxf_stop_wait
);
1535 bfa_fsm_set_state(rx
, bna_rx_sm_failed
);
1536 bna_rxf_fail(&rx
->rxf
);
1537 call_rx_stall_cbfn(rx
);
1538 rx
->rx_cleanup_cbfn(rx
->bna
->bnad
, rx
);
1541 case RX_E_RXF_STARTED
:
1542 bfa_fsm_set_state(rx
, bna_rx_sm_started
);
1546 bfa_sm_fault(event
);
1552 bna_rx_sm_cleanup_wait_entry(struct bna_rx
*rx
)
1557 bna_rx_sm_cleanup_wait(struct bna_rx
*rx
, enum bna_rx_event event
)
1561 case RX_E_RXF_STOPPED
:
1565 case RX_E_CLEANUP_DONE
:
1566 bfa_fsm_set_state(rx
, bna_rx_sm_stopped
);
1570 bfa_sm_fault(event
);
1576 bna_rx_sm_failed_entry(struct bna_rx
*rx
)
1581 bna_rx_sm_failed(struct bna_rx
*rx
, enum bna_rx_event event
)
1585 bfa_fsm_set_state(rx
, bna_rx_sm_quiesce_wait
);
1589 bfa_fsm_set_state(rx
, bna_rx_sm_cleanup_wait
);
1593 case RX_E_RXF_STARTED
:
1594 case RX_E_RXF_STOPPED
:
1598 case RX_E_CLEANUP_DONE
:
1599 bfa_fsm_set_state(rx
, bna_rx_sm_stopped
);
1603 bfa_sm_fault(event
);
1608 bna_rx_sm_quiesce_wait_entry(struct bna_rx
*rx
)
1613 bna_rx_sm_quiesce_wait(struct bna_rx
*rx
, enum bna_rx_event event
)
1617 bfa_fsm_set_state(rx
, bna_rx_sm_cleanup_wait
);
1621 bfa_fsm_set_state(rx
, bna_rx_sm_failed
);
1624 case RX_E_CLEANUP_DONE
:
1625 bfa_fsm_set_state(rx
, bna_rx_sm_start_wait
);
1629 bfa_sm_fault(event
);
1635 bna_bfi_rx_enet_start(struct bna_rx
*rx
)
1637 struct bfi_enet_rx_cfg_req
*cfg_req
= &rx
->bfi_enet_cmd
.cfg_req
;
1638 struct bna_rxp
*rxp
= NULL
;
1639 struct bna_rxq
*q0
= NULL
, *q1
= NULL
;
1642 bfi_msgq_mhdr_set(cfg_req
->mh
, BFI_MC_ENET
,
1643 BFI_ENET_H2I_RX_CFG_SET_REQ
, 0, rx
->rid
);
1644 cfg_req
->mh
.num_entries
= htons(
1645 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rx_cfg_req
)));
1647 cfg_req
->rx_cfg
.frame_size
= bna_enet_mtu_get(&rx
->bna
->enet
);
1648 cfg_req
->num_queue_sets
= rx
->num_paths
;
1649 for (i
= 0; i
< rx
->num_paths
; i
++) {
1650 rxp
= rxp
? list_next_entry(rxp
, qe
)
1651 : list_first_entry(&rx
->rxp_q
, struct bna_rxp
, qe
);
1652 GET_RXQS(rxp
, q0
, q1
);
1653 switch (rxp
->type
) {
1657 bfi_enet_datapath_q_init(&cfg_req
->q_cfg
[i
].qs
.q
,
1659 cfg_req
->q_cfg
[i
].qs
.rx_buffer_size
=
1660 htons((u16
)q1
->buffer_size
);
1663 case BNA_RXP_SINGLE
:
1664 /* Large/Single RxQ */
1665 bfi_enet_datapath_q_init(&cfg_req
->q_cfg
[i
].ql
.q
,
1667 if (q0
->multi_buffer
)
1668 /* multi-buffer is enabled by allocating
1669 * a new rx with new set of resources.
1670 * q0->buffer_size should be initialized to
1673 cfg_req
->rx_cfg
.multi_buffer
=
1674 BNA_STATUS_T_ENABLED
;
1677 bna_enet_mtu_get(&rx
->bna
->enet
);
1678 cfg_req
->q_cfg
[i
].ql
.rx_buffer_size
=
1679 htons((u16
)q0
->buffer_size
);
1686 bfi_enet_datapath_q_init(&cfg_req
->q_cfg
[i
].cq
.q
,
1689 cfg_req
->q_cfg
[i
].ib
.index_addr
.a32
.addr_lo
=
1690 rxp
->cq
.ib
.ib_seg_host_addr
.lsb
;
1691 cfg_req
->q_cfg
[i
].ib
.index_addr
.a32
.addr_hi
=
1692 rxp
->cq
.ib
.ib_seg_host_addr
.msb
;
1693 cfg_req
->q_cfg
[i
].ib
.intr
.msix_index
=
1694 htons((u16
)rxp
->cq
.ib
.intr_vector
);
1697 cfg_req
->ib_cfg
.int_pkt_dma
= BNA_STATUS_T_DISABLED
;
1698 cfg_req
->ib_cfg
.int_enabled
= BNA_STATUS_T_ENABLED
;
1699 cfg_req
->ib_cfg
.int_pkt_enabled
= BNA_STATUS_T_DISABLED
;
1700 cfg_req
->ib_cfg
.continuous_coalescing
= BNA_STATUS_T_DISABLED
;
1701 cfg_req
->ib_cfg
.msix
= (rxp
->cq
.ib
.intr_type
== BNA_INTR_T_MSIX
)
1702 ? BNA_STATUS_T_ENABLED
:
1703 BNA_STATUS_T_DISABLED
;
1704 cfg_req
->ib_cfg
.coalescing_timeout
=
1705 htonl((u32
)rxp
->cq
.ib
.coalescing_timeo
);
1706 cfg_req
->ib_cfg
.inter_pkt_timeout
=
1707 htonl((u32
)rxp
->cq
.ib
.interpkt_timeo
);
1708 cfg_req
->ib_cfg
.inter_pkt_count
= (u8
)rxp
->cq
.ib
.interpkt_count
;
1710 switch (rxp
->type
) {
1712 cfg_req
->rx_cfg
.rxq_type
= BFI_ENET_RXQ_LARGE_SMALL
;
1716 cfg_req
->rx_cfg
.rxq_type
= BFI_ENET_RXQ_HDS
;
1717 cfg_req
->rx_cfg
.hds
.type
= rx
->hds_cfg
.hdr_type
;
1718 cfg_req
->rx_cfg
.hds
.force_offset
= rx
->hds_cfg
.forced_offset
;
1719 cfg_req
->rx_cfg
.hds
.max_header_size
= rx
->hds_cfg
.forced_offset
;
1722 case BNA_RXP_SINGLE
:
1723 cfg_req
->rx_cfg
.rxq_type
= BFI_ENET_RXQ_SINGLE
;
1729 cfg_req
->rx_cfg
.strip_vlan
= rx
->rxf
.vlan_strip_status
;
1731 bfa_msgq_cmd_set(&rx
->msgq_cmd
, NULL
, NULL
,
1732 sizeof(struct bfi_enet_rx_cfg_req
), &cfg_req
->mh
);
1733 bfa_msgq_cmd_post(&rx
->bna
->msgq
, &rx
->msgq_cmd
);
1737 bna_bfi_rx_enet_stop(struct bna_rx
*rx
)
1739 struct bfi_enet_req
*req
= &rx
->bfi_enet_cmd
.req
;
1741 bfi_msgq_mhdr_set(req
->mh
, BFI_MC_ENET
,
1742 BFI_ENET_H2I_RX_CFG_CLR_REQ
, 0, rx
->rid
);
1743 req
->mh
.num_entries
= htons(
1744 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_req
)));
1745 bfa_msgq_cmd_set(&rx
->msgq_cmd
, NULL
, NULL
, sizeof(struct bfi_enet_req
),
1747 bfa_msgq_cmd_post(&rx
->bna
->msgq
, &rx
->msgq_cmd
);
1751 bna_rx_enet_stop(struct bna_rx
*rx
)
1753 struct bna_rxp
*rxp
;
1754 struct list_head
*qe_rxp
;
1757 list_for_each(qe_rxp
, &rx
->rxp_q
) {
1758 rxp
= (struct bna_rxp
*)qe_rxp
;
1759 bna_ib_stop(rx
->bna
, &rxp
->cq
.ib
);
1762 bna_bfi_rx_enet_stop(rx
);
1766 bna_rx_res_check(struct bna_rx_mod
*rx_mod
, struct bna_rx_config
*rx_cfg
)
1768 if ((rx_mod
->rx_free_count
== 0) ||
1769 (rx_mod
->rxp_free_count
== 0) ||
1770 (rx_mod
->rxq_free_count
== 0))
1773 if (rx_cfg
->rxp_type
== BNA_RXP_SINGLE
) {
1774 if ((rx_mod
->rxp_free_count
< rx_cfg
->num_paths
) ||
1775 (rx_mod
->rxq_free_count
< rx_cfg
->num_paths
))
1778 if ((rx_mod
->rxp_free_count
< rx_cfg
->num_paths
) ||
1779 (rx_mod
->rxq_free_count
< (2 * rx_cfg
->num_paths
)))
1786 static struct bna_rxq
*
1787 bna_rxq_get(struct bna_rx_mod
*rx_mod
)
1789 struct bna_rxq
*rxq
= NULL
;
1791 rxq
= list_first_entry(&rx_mod
->rxq_free_q
, struct bna_rxq
, qe
);
1793 rx_mod
->rxq_free_count
--;
1799 bna_rxq_put(struct bna_rx_mod
*rx_mod
, struct bna_rxq
*rxq
)
1801 list_add_tail(&rxq
->qe
, &rx_mod
->rxq_free_q
);
1802 rx_mod
->rxq_free_count
++;
1805 static struct bna_rxp
*
1806 bna_rxp_get(struct bna_rx_mod
*rx_mod
)
1808 struct bna_rxp
*rxp
= NULL
;
1810 rxp
= list_first_entry(&rx_mod
->rxp_free_q
, struct bna_rxp
, qe
);
1812 rx_mod
->rxp_free_count
--;
1818 bna_rxp_put(struct bna_rx_mod
*rx_mod
, struct bna_rxp
*rxp
)
1820 list_add_tail(&rxp
->qe
, &rx_mod
->rxp_free_q
);
1821 rx_mod
->rxp_free_count
++;
1824 static struct bna_rx
*
1825 bna_rx_get(struct bna_rx_mod
*rx_mod
, enum bna_rx_type type
)
1827 struct bna_rx
*rx
= NULL
;
1829 BUG_ON(list_empty(&rx_mod
->rx_free_q
));
1830 if (type
== BNA_RX_T_REGULAR
)
1831 rx
= list_first_entry(&rx_mod
->rx_free_q
, struct bna_rx
, qe
);
1833 rx
= list_last_entry(&rx_mod
->rx_free_q
, struct bna_rx
, qe
);
1835 rx_mod
->rx_free_count
--;
1836 list_move_tail(&rx
->qe
, &rx_mod
->rx_active_q
);
1843 bna_rx_put(struct bna_rx_mod
*rx_mod
, struct bna_rx
*rx
)
1845 struct list_head
*qe
;
1847 list_for_each_prev(qe
, &rx_mod
->rx_free_q
)
1848 if (((struct bna_rx
*)qe
)->rid
< rx
->rid
)
1851 list_add(&rx
->qe
, qe
);
1852 rx_mod
->rx_free_count
++;
1856 bna_rxp_add_rxqs(struct bna_rxp
*rxp
, struct bna_rxq
*q0
,
1859 switch (rxp
->type
) {
1860 case BNA_RXP_SINGLE
:
1861 rxp
->rxq
.single
.only
= q0
;
1862 rxp
->rxq
.single
.reserved
= NULL
;
1865 rxp
->rxq
.slr
.large
= q0
;
1866 rxp
->rxq
.slr
.small
= q1
;
1869 rxp
->rxq
.hds
.data
= q0
;
1870 rxp
->rxq
.hds
.hdr
= q1
;
1878 bna_rxq_qpt_setup(struct bna_rxq
*rxq
,
1879 struct bna_rxp
*rxp
,
1882 struct bna_mem_descr
*qpt_mem
,
1883 struct bna_mem_descr
*swqpt_mem
,
1884 struct bna_mem_descr
*page_mem
)
1888 struct bna_dma_addr bna_dma
;
1891 rxq
->qpt
.hw_qpt_ptr
.lsb
= qpt_mem
->dma
.lsb
;
1892 rxq
->qpt
.hw_qpt_ptr
.msb
= qpt_mem
->dma
.msb
;
1893 rxq
->qpt
.kv_qpt_ptr
= qpt_mem
->kva
;
1894 rxq
->qpt
.page_count
= page_count
;
1895 rxq
->qpt
.page_size
= page_size
;
1897 rxq
->rcb
->sw_qpt
= (void **) swqpt_mem
->kva
;
1898 rxq
->rcb
->sw_q
= page_mem
->kva
;
1900 kva
= page_mem
->kva
;
1901 BNA_GET_DMA_ADDR(&page_mem
->dma
, dma
);
1903 for (i
= 0; i
< rxq
->qpt
.page_count
; i
++) {
1904 rxq
->rcb
->sw_qpt
[i
] = kva
;
1907 BNA_SET_DMA_ADDR(dma
, &bna_dma
);
1908 ((struct bna_dma_addr
*)rxq
->qpt
.kv_qpt_ptr
)[i
].lsb
=
1910 ((struct bna_dma_addr
*)rxq
->qpt
.kv_qpt_ptr
)[i
].msb
=
1917 bna_rxp_cqpt_setup(struct bna_rxp
*rxp
,
1920 struct bna_mem_descr
*qpt_mem
,
1921 struct bna_mem_descr
*swqpt_mem
,
1922 struct bna_mem_descr
*page_mem
)
1926 struct bna_dma_addr bna_dma
;
1929 rxp
->cq
.qpt
.hw_qpt_ptr
.lsb
= qpt_mem
->dma
.lsb
;
1930 rxp
->cq
.qpt
.hw_qpt_ptr
.msb
= qpt_mem
->dma
.msb
;
1931 rxp
->cq
.qpt
.kv_qpt_ptr
= qpt_mem
->kva
;
1932 rxp
->cq
.qpt
.page_count
= page_count
;
1933 rxp
->cq
.qpt
.page_size
= page_size
;
1935 rxp
->cq
.ccb
->sw_qpt
= (void **) swqpt_mem
->kva
;
1936 rxp
->cq
.ccb
->sw_q
= page_mem
->kva
;
1938 kva
= page_mem
->kva
;
1939 BNA_GET_DMA_ADDR(&page_mem
->dma
, dma
);
1941 for (i
= 0; i
< rxp
->cq
.qpt
.page_count
; i
++) {
1942 rxp
->cq
.ccb
->sw_qpt
[i
] = kva
;
1945 BNA_SET_DMA_ADDR(dma
, &bna_dma
);
1946 ((struct bna_dma_addr
*)rxp
->cq
.qpt
.kv_qpt_ptr
)[i
].lsb
=
1948 ((struct bna_dma_addr
*)rxp
->cq
.qpt
.kv_qpt_ptr
)[i
].msb
=
1955 bna_rx_mod_cb_rx_stopped(void *arg
, struct bna_rx
*rx
)
1957 struct bna_rx_mod
*rx_mod
= (struct bna_rx_mod
*)arg
;
1959 bfa_wc_down(&rx_mod
->rx_stop_wc
);
1963 bna_rx_mod_cb_rx_stopped_all(void *arg
)
1965 struct bna_rx_mod
*rx_mod
= (struct bna_rx_mod
*)arg
;
1967 if (rx_mod
->stop_cbfn
)
1968 rx_mod
->stop_cbfn(&rx_mod
->bna
->enet
);
1969 rx_mod
->stop_cbfn
= NULL
;
1973 bna_rx_start(struct bna_rx
*rx
)
1975 rx
->rx_flags
|= BNA_RX_F_ENET_STARTED
;
1976 if (rx
->rx_flags
& BNA_RX_F_ENABLED
)
1977 bfa_fsm_send_event(rx
, RX_E_START
);
1981 bna_rx_stop(struct bna_rx
*rx
)
1983 rx
->rx_flags
&= ~BNA_RX_F_ENET_STARTED
;
1984 if (rx
->fsm
== (bfa_fsm_t
) bna_rx_sm_stopped
)
1985 bna_rx_mod_cb_rx_stopped(&rx
->bna
->rx_mod
, rx
);
1987 rx
->stop_cbfn
= bna_rx_mod_cb_rx_stopped
;
1988 rx
->stop_cbarg
= &rx
->bna
->rx_mod
;
1989 bfa_fsm_send_event(rx
, RX_E_STOP
);
1994 bna_rx_fail(struct bna_rx
*rx
)
1996 /* Indicate Enet is not enabled, and failed */
1997 rx
->rx_flags
&= ~BNA_RX_F_ENET_STARTED
;
1998 bfa_fsm_send_event(rx
, RX_E_FAIL
);
2002 bna_rx_mod_start(struct bna_rx_mod
*rx_mod
, enum bna_rx_type type
)
2005 struct list_head
*qe
;
2007 rx_mod
->flags
|= BNA_RX_MOD_F_ENET_STARTED
;
2008 if (type
== BNA_RX_T_LOOPBACK
)
2009 rx_mod
->flags
|= BNA_RX_MOD_F_ENET_LOOPBACK
;
2011 list_for_each(qe
, &rx_mod
->rx_active_q
) {
2012 rx
= (struct bna_rx
*)qe
;
2013 if (rx
->type
== type
)
2019 bna_rx_mod_stop(struct bna_rx_mod
*rx_mod
, enum bna_rx_type type
)
2022 struct list_head
*qe
;
2024 rx_mod
->flags
&= ~BNA_RX_MOD_F_ENET_STARTED
;
2025 rx_mod
->flags
&= ~BNA_RX_MOD_F_ENET_LOOPBACK
;
2027 rx_mod
->stop_cbfn
= bna_enet_cb_rx_stopped
;
2029 bfa_wc_init(&rx_mod
->rx_stop_wc
, bna_rx_mod_cb_rx_stopped_all
, rx_mod
);
2031 list_for_each(qe
, &rx_mod
->rx_active_q
) {
2032 rx
= (struct bna_rx
*)qe
;
2033 if (rx
->type
== type
) {
2034 bfa_wc_up(&rx_mod
->rx_stop_wc
);
2039 bfa_wc_wait(&rx_mod
->rx_stop_wc
);
2043 bna_rx_mod_fail(struct bna_rx_mod
*rx_mod
)
2046 struct list_head
*qe
;
2048 rx_mod
->flags
&= ~BNA_RX_MOD_F_ENET_STARTED
;
2049 rx_mod
->flags
&= ~BNA_RX_MOD_F_ENET_LOOPBACK
;
2051 list_for_each(qe
, &rx_mod
->rx_active_q
) {
2052 rx
= (struct bna_rx
*)qe
;
2057 void bna_rx_mod_init(struct bna_rx_mod
*rx_mod
, struct bna
*bna
,
2058 struct bna_res_info
*res_info
)
2061 struct bna_rx
*rx_ptr
;
2062 struct bna_rxp
*rxp_ptr
;
2063 struct bna_rxq
*rxq_ptr
;
2068 rx_mod
->rx
= (struct bna_rx
*)
2069 res_info
[BNA_MOD_RES_MEM_T_RX_ARRAY
].res_u
.mem_info
.mdl
[0].kva
;
2070 rx_mod
->rxp
= (struct bna_rxp
*)
2071 res_info
[BNA_MOD_RES_MEM_T_RXP_ARRAY
].res_u
.mem_info
.mdl
[0].kva
;
2072 rx_mod
->rxq
= (struct bna_rxq
*)
2073 res_info
[BNA_MOD_RES_MEM_T_RXQ_ARRAY
].res_u
.mem_info
.mdl
[0].kva
;
2075 /* Initialize the queues */
2076 INIT_LIST_HEAD(&rx_mod
->rx_free_q
);
2077 rx_mod
->rx_free_count
= 0;
2078 INIT_LIST_HEAD(&rx_mod
->rxq_free_q
);
2079 rx_mod
->rxq_free_count
= 0;
2080 INIT_LIST_HEAD(&rx_mod
->rxp_free_q
);
2081 rx_mod
->rxp_free_count
= 0;
2082 INIT_LIST_HEAD(&rx_mod
->rx_active_q
);
2084 /* Build RX queues */
2085 for (index
= 0; index
< bna
->ioceth
.attr
.num_rxp
; index
++) {
2086 rx_ptr
= &rx_mod
->rx
[index
];
2088 INIT_LIST_HEAD(&rx_ptr
->rxp_q
);
2090 rx_ptr
->rid
= index
;
2091 rx_ptr
->stop_cbfn
= NULL
;
2092 rx_ptr
->stop_cbarg
= NULL
;
2094 list_add_tail(&rx_ptr
->qe
, &rx_mod
->rx_free_q
);
2095 rx_mod
->rx_free_count
++;
2098 /* build RX-path queue */
2099 for (index
= 0; index
< bna
->ioceth
.attr
.num_rxp
; index
++) {
2100 rxp_ptr
= &rx_mod
->rxp
[index
];
2101 list_add_tail(&rxp_ptr
->qe
, &rx_mod
->rxp_free_q
);
2102 rx_mod
->rxp_free_count
++;
2105 /* build RXQ queue */
2106 for (index
= 0; index
< (bna
->ioceth
.attr
.num_rxp
* 2); index
++) {
2107 rxq_ptr
= &rx_mod
->rxq
[index
];
2108 list_add_tail(&rxq_ptr
->qe
, &rx_mod
->rxq_free_q
);
2109 rx_mod
->rxq_free_count
++;
2114 bna_rx_mod_uninit(struct bna_rx_mod
*rx_mod
)
2116 struct list_head
*qe
;
2120 list_for_each(qe
, &rx_mod
->rx_free_q
)
2124 list_for_each(qe
, &rx_mod
->rxp_free_q
)
2128 list_for_each(qe
, &rx_mod
->rxq_free_q
)
2135 bna_bfi_rx_enet_start_rsp(struct bna_rx
*rx
, struct bfi_msgq_mhdr
*msghdr
)
2137 struct bfi_enet_rx_cfg_rsp
*cfg_rsp
= &rx
->bfi_enet_cmd
.cfg_rsp
;
2138 struct bna_rxp
*rxp
= NULL
;
2139 struct bna_rxq
*q0
= NULL
, *q1
= NULL
;
2142 bfa_msgq_rsp_copy(&rx
->bna
->msgq
, (u8
*)cfg_rsp
,
2143 sizeof(struct bfi_enet_rx_cfg_rsp
));
2145 rx
->hw_id
= cfg_rsp
->hw_id
;
2147 for (i
= 0, rxp
= list_first_entry(&rx
->rxp_q
, struct bna_rxp
, qe
);
2148 i
< rx
->num_paths
; i
++, rxp
= list_next_entry(rxp
, qe
)) {
2149 GET_RXQS(rxp
, q0
, q1
);
2151 /* Setup doorbells */
2152 rxp
->cq
.ccb
->i_dbell
->doorbell_addr
=
2153 rx
->bna
->pcidev
.pci_bar_kva
2154 + ntohl(cfg_rsp
->q_handles
[i
].i_dbell
);
2155 rxp
->hw_id
= cfg_rsp
->q_handles
[i
].hw_cqid
;
2157 rx
->bna
->pcidev
.pci_bar_kva
2158 + ntohl(cfg_rsp
->q_handles
[i
].ql_dbell
);
2159 q0
->hw_id
= cfg_rsp
->q_handles
[i
].hw_lqid
;
2162 rx
->bna
->pcidev
.pci_bar_kva
2163 + ntohl(cfg_rsp
->q_handles
[i
].qs_dbell
);
2164 q1
->hw_id
= cfg_rsp
->q_handles
[i
].hw_sqid
;
2167 /* Initialize producer/consumer indexes */
2168 (*rxp
->cq
.ccb
->hw_producer_index
) = 0;
2169 rxp
->cq
.ccb
->producer_index
= 0;
2170 q0
->rcb
->producer_index
= q0
->rcb
->consumer_index
= 0;
2172 q1
->rcb
->producer_index
= q1
->rcb
->consumer_index
= 0;
2175 bfa_fsm_send_event(rx
, RX_E_STARTED
);
2179 bna_bfi_rx_enet_stop_rsp(struct bna_rx
*rx
, struct bfi_msgq_mhdr
*msghdr
)
2181 bfa_fsm_send_event(rx
, RX_E_STOPPED
);
2185 bna_rx_res_req(struct bna_rx_config
*q_cfg
, struct bna_res_info
*res_info
)
2187 u32 cq_size
, hq_size
, dq_size
;
2188 u32 cpage_count
, hpage_count
, dpage_count
;
2189 struct bna_mem_info
*mem_info
;
2194 dq_depth
= q_cfg
->q0_depth
;
2195 hq_depth
= ((q_cfg
->rxp_type
== BNA_RXP_SINGLE
) ? 0 : q_cfg
->q1_depth
);
2196 cq_depth
= roundup_pow_of_two(dq_depth
+ hq_depth
);
2198 cq_size
= cq_depth
* BFI_CQ_WI_SIZE
;
2199 cq_size
= ALIGN(cq_size
, PAGE_SIZE
);
2200 cpage_count
= SIZE_TO_PAGES(cq_size
);
2202 dq_depth
= roundup_pow_of_two(dq_depth
);
2203 dq_size
= dq_depth
* BFI_RXQ_WI_SIZE
;
2204 dq_size
= ALIGN(dq_size
, PAGE_SIZE
);
2205 dpage_count
= SIZE_TO_PAGES(dq_size
);
2207 if (BNA_RXP_SINGLE
!= q_cfg
->rxp_type
) {
2208 hq_depth
= roundup_pow_of_two(hq_depth
);
2209 hq_size
= hq_depth
* BFI_RXQ_WI_SIZE
;
2210 hq_size
= ALIGN(hq_size
, PAGE_SIZE
);
2211 hpage_count
= SIZE_TO_PAGES(hq_size
);
2215 res_info
[BNA_RX_RES_MEM_T_CCB
].res_type
= BNA_RES_T_MEM
;
2216 mem_info
= &res_info
[BNA_RX_RES_MEM_T_CCB
].res_u
.mem_info
;
2217 mem_info
->mem_type
= BNA_MEM_T_KVA
;
2218 mem_info
->len
= sizeof(struct bna_ccb
);
2219 mem_info
->num
= q_cfg
->num_paths
;
2221 res_info
[BNA_RX_RES_MEM_T_RCB
].res_type
= BNA_RES_T_MEM
;
2222 mem_info
= &res_info
[BNA_RX_RES_MEM_T_RCB
].res_u
.mem_info
;
2223 mem_info
->mem_type
= BNA_MEM_T_KVA
;
2224 mem_info
->len
= sizeof(struct bna_rcb
);
2225 mem_info
->num
= BNA_GET_RXQS(q_cfg
);
2227 res_info
[BNA_RX_RES_MEM_T_CQPT
].res_type
= BNA_RES_T_MEM
;
2228 mem_info
= &res_info
[BNA_RX_RES_MEM_T_CQPT
].res_u
.mem_info
;
2229 mem_info
->mem_type
= BNA_MEM_T_DMA
;
2230 mem_info
->len
= cpage_count
* sizeof(struct bna_dma_addr
);
2231 mem_info
->num
= q_cfg
->num_paths
;
2233 res_info
[BNA_RX_RES_MEM_T_CSWQPT
].res_type
= BNA_RES_T_MEM
;
2234 mem_info
= &res_info
[BNA_RX_RES_MEM_T_CSWQPT
].res_u
.mem_info
;
2235 mem_info
->mem_type
= BNA_MEM_T_KVA
;
2236 mem_info
->len
= cpage_count
* sizeof(void *);
2237 mem_info
->num
= q_cfg
->num_paths
;
2239 res_info
[BNA_RX_RES_MEM_T_CQPT_PAGE
].res_type
= BNA_RES_T_MEM
;
2240 mem_info
= &res_info
[BNA_RX_RES_MEM_T_CQPT_PAGE
].res_u
.mem_info
;
2241 mem_info
->mem_type
= BNA_MEM_T_DMA
;
2242 mem_info
->len
= PAGE_SIZE
* cpage_count
;
2243 mem_info
->num
= q_cfg
->num_paths
;
2245 res_info
[BNA_RX_RES_MEM_T_DQPT
].res_type
= BNA_RES_T_MEM
;
2246 mem_info
= &res_info
[BNA_RX_RES_MEM_T_DQPT
].res_u
.mem_info
;
2247 mem_info
->mem_type
= BNA_MEM_T_DMA
;
2248 mem_info
->len
= dpage_count
* sizeof(struct bna_dma_addr
);
2249 mem_info
->num
= q_cfg
->num_paths
;
2251 res_info
[BNA_RX_RES_MEM_T_DSWQPT
].res_type
= BNA_RES_T_MEM
;
2252 mem_info
= &res_info
[BNA_RX_RES_MEM_T_DSWQPT
].res_u
.mem_info
;
2253 mem_info
->mem_type
= BNA_MEM_T_KVA
;
2254 mem_info
->len
= dpage_count
* sizeof(void *);
2255 mem_info
->num
= q_cfg
->num_paths
;
2257 res_info
[BNA_RX_RES_MEM_T_DPAGE
].res_type
= BNA_RES_T_MEM
;
2258 mem_info
= &res_info
[BNA_RX_RES_MEM_T_DPAGE
].res_u
.mem_info
;
2259 mem_info
->mem_type
= BNA_MEM_T_DMA
;
2260 mem_info
->len
= PAGE_SIZE
* dpage_count
;
2261 mem_info
->num
= q_cfg
->num_paths
;
2263 res_info
[BNA_RX_RES_MEM_T_HQPT
].res_type
= BNA_RES_T_MEM
;
2264 mem_info
= &res_info
[BNA_RX_RES_MEM_T_HQPT
].res_u
.mem_info
;
2265 mem_info
->mem_type
= BNA_MEM_T_DMA
;
2266 mem_info
->len
= hpage_count
* sizeof(struct bna_dma_addr
);
2267 mem_info
->num
= (hpage_count
? q_cfg
->num_paths
: 0);
2269 res_info
[BNA_RX_RES_MEM_T_HSWQPT
].res_type
= BNA_RES_T_MEM
;
2270 mem_info
= &res_info
[BNA_RX_RES_MEM_T_HSWQPT
].res_u
.mem_info
;
2271 mem_info
->mem_type
= BNA_MEM_T_KVA
;
2272 mem_info
->len
= hpage_count
* sizeof(void *);
2273 mem_info
->num
= (hpage_count
? q_cfg
->num_paths
: 0);
2275 res_info
[BNA_RX_RES_MEM_T_HPAGE
].res_type
= BNA_RES_T_MEM
;
2276 mem_info
= &res_info
[BNA_RX_RES_MEM_T_HPAGE
].res_u
.mem_info
;
2277 mem_info
->mem_type
= BNA_MEM_T_DMA
;
2278 mem_info
->len
= PAGE_SIZE
* hpage_count
;
2279 mem_info
->num
= (hpage_count
? q_cfg
->num_paths
: 0);
2281 res_info
[BNA_RX_RES_MEM_T_IBIDX
].res_type
= BNA_RES_T_MEM
;
2282 mem_info
= &res_info
[BNA_RX_RES_MEM_T_IBIDX
].res_u
.mem_info
;
2283 mem_info
->mem_type
= BNA_MEM_T_DMA
;
2284 mem_info
->len
= BFI_IBIDX_SIZE
;
2285 mem_info
->num
= q_cfg
->num_paths
;
2287 res_info
[BNA_RX_RES_MEM_T_RIT
].res_type
= BNA_RES_T_MEM
;
2288 mem_info
= &res_info
[BNA_RX_RES_MEM_T_RIT
].res_u
.mem_info
;
2289 mem_info
->mem_type
= BNA_MEM_T_KVA
;
2290 mem_info
->len
= BFI_ENET_RSS_RIT_MAX
;
2293 res_info
[BNA_RX_RES_T_INTR
].res_type
= BNA_RES_T_INTR
;
2294 res_info
[BNA_RX_RES_T_INTR
].res_u
.intr_info
.intr_type
= BNA_INTR_T_MSIX
;
2295 res_info
[BNA_RX_RES_T_INTR
].res_u
.intr_info
.num
= q_cfg
->num_paths
;
2299 bna_rx_create(struct bna
*bna
, struct bnad
*bnad
,
2300 struct bna_rx_config
*rx_cfg
,
2301 const struct bna_rx_event_cbfn
*rx_cbfn
,
2302 struct bna_res_info
*res_info
,
2305 struct bna_rx_mod
*rx_mod
= &bna
->rx_mod
;
2307 struct bna_rxp
*rxp
;
2310 struct bna_intr_info
*intr_info
;
2311 struct bna_mem_descr
*hqunmap_mem
;
2312 struct bna_mem_descr
*dqunmap_mem
;
2313 struct bna_mem_descr
*ccb_mem
;
2314 struct bna_mem_descr
*rcb_mem
;
2315 struct bna_mem_descr
*cqpt_mem
;
2316 struct bna_mem_descr
*cswqpt_mem
;
2317 struct bna_mem_descr
*cpage_mem
;
2318 struct bna_mem_descr
*hqpt_mem
;
2319 struct bna_mem_descr
*dqpt_mem
;
2320 struct bna_mem_descr
*hsqpt_mem
;
2321 struct bna_mem_descr
*dsqpt_mem
;
2322 struct bna_mem_descr
*hpage_mem
;
2323 struct bna_mem_descr
*dpage_mem
;
2324 u32 dpage_count
, hpage_count
;
2325 u32 hq_idx
, dq_idx
, rcb_idx
;
2329 if (!bna_rx_res_check(rx_mod
, rx_cfg
))
2332 intr_info
= &res_info
[BNA_RX_RES_T_INTR
].res_u
.intr_info
;
2333 ccb_mem
= &res_info
[BNA_RX_RES_MEM_T_CCB
].res_u
.mem_info
.mdl
[0];
2334 rcb_mem
= &res_info
[BNA_RX_RES_MEM_T_RCB
].res_u
.mem_info
.mdl
[0];
2335 dqunmap_mem
= &res_info
[BNA_RX_RES_MEM_T_UNMAPDQ
].res_u
.mem_info
.mdl
[0];
2336 hqunmap_mem
= &res_info
[BNA_RX_RES_MEM_T_UNMAPHQ
].res_u
.mem_info
.mdl
[0];
2337 cqpt_mem
= &res_info
[BNA_RX_RES_MEM_T_CQPT
].res_u
.mem_info
.mdl
[0];
2338 cswqpt_mem
= &res_info
[BNA_RX_RES_MEM_T_CSWQPT
].res_u
.mem_info
.mdl
[0];
2339 cpage_mem
= &res_info
[BNA_RX_RES_MEM_T_CQPT_PAGE
].res_u
.mem_info
.mdl
[0];
2340 hqpt_mem
= &res_info
[BNA_RX_RES_MEM_T_HQPT
].res_u
.mem_info
.mdl
[0];
2341 dqpt_mem
= &res_info
[BNA_RX_RES_MEM_T_DQPT
].res_u
.mem_info
.mdl
[0];
2342 hsqpt_mem
= &res_info
[BNA_RX_RES_MEM_T_HSWQPT
].res_u
.mem_info
.mdl
[0];
2343 dsqpt_mem
= &res_info
[BNA_RX_RES_MEM_T_DSWQPT
].res_u
.mem_info
.mdl
[0];
2344 hpage_mem
= &res_info
[BNA_RX_RES_MEM_T_HPAGE
].res_u
.mem_info
.mdl
[0];
2345 dpage_mem
= &res_info
[BNA_RX_RES_MEM_T_DPAGE
].res_u
.mem_info
.mdl
[0];
2347 page_count
= res_info
[BNA_RX_RES_MEM_T_CQPT_PAGE
].res_u
.mem_info
.len
/
2350 dpage_count
= res_info
[BNA_RX_RES_MEM_T_DPAGE
].res_u
.mem_info
.len
/
2353 hpage_count
= res_info
[BNA_RX_RES_MEM_T_HPAGE
].res_u
.mem_info
.len
/
2356 rx
= bna_rx_get(rx_mod
, rx_cfg
->rx_type
);
2359 INIT_LIST_HEAD(&rx
->rxp_q
);
2360 rx
->stop_cbfn
= NULL
;
2361 rx
->stop_cbarg
= NULL
;
2364 rx
->rcb_setup_cbfn
= rx_cbfn
->rcb_setup_cbfn
;
2365 rx
->rcb_destroy_cbfn
= rx_cbfn
->rcb_destroy_cbfn
;
2366 rx
->ccb_setup_cbfn
= rx_cbfn
->ccb_setup_cbfn
;
2367 rx
->ccb_destroy_cbfn
= rx_cbfn
->ccb_destroy_cbfn
;
2368 rx
->rx_stall_cbfn
= rx_cbfn
->rx_stall_cbfn
;
2369 /* Following callbacks are mandatory */
2370 rx
->rx_cleanup_cbfn
= rx_cbfn
->rx_cleanup_cbfn
;
2371 rx
->rx_post_cbfn
= rx_cbfn
->rx_post_cbfn
;
2373 if (rx
->bna
->rx_mod
.flags
& BNA_RX_MOD_F_ENET_STARTED
) {
2375 case BNA_RX_T_REGULAR
:
2376 if (!(rx
->bna
->rx_mod
.flags
&
2377 BNA_RX_MOD_F_ENET_LOOPBACK
))
2378 rx
->rx_flags
|= BNA_RX_F_ENET_STARTED
;
2380 case BNA_RX_T_LOOPBACK
:
2381 if (rx
->bna
->rx_mod
.flags
& BNA_RX_MOD_F_ENET_LOOPBACK
)
2382 rx
->rx_flags
|= BNA_RX_F_ENET_STARTED
;
2387 rx
->num_paths
= rx_cfg
->num_paths
;
2388 for (i
= 0, hq_idx
= 0, dq_idx
= 0, rcb_idx
= 0;
2389 i
< rx
->num_paths
; i
++) {
2390 rxp
= bna_rxp_get(rx_mod
);
2391 list_add_tail(&rxp
->qe
, &rx
->rxp_q
);
2392 rxp
->type
= rx_cfg
->rxp_type
;
2396 q0
= bna_rxq_get(rx_mod
);
2397 if (BNA_RXP_SINGLE
== rx_cfg
->rxp_type
)
2400 q1
= bna_rxq_get(rx_mod
);
2402 if (1 == intr_info
->num
)
2403 rxp
->vector
= intr_info
->idl
[0].vector
;
2405 rxp
->vector
= intr_info
->idl
[i
].vector
;
2409 rxp
->cq
.ib
.ib_seg_host_addr
.lsb
=
2410 res_info
[BNA_RX_RES_MEM_T_IBIDX
].res_u
.mem_info
.mdl
[i
].dma
.lsb
;
2411 rxp
->cq
.ib
.ib_seg_host_addr
.msb
=
2412 res_info
[BNA_RX_RES_MEM_T_IBIDX
].res_u
.mem_info
.mdl
[i
].dma
.msb
;
2413 rxp
->cq
.ib
.ib_seg_host_addr_kva
=
2414 res_info
[BNA_RX_RES_MEM_T_IBIDX
].res_u
.mem_info
.mdl
[i
].kva
;
2415 rxp
->cq
.ib
.intr_type
= intr_info
->intr_type
;
2416 if (intr_info
->intr_type
== BNA_INTR_T_MSIX
)
2417 rxp
->cq
.ib
.intr_vector
= rxp
->vector
;
2419 rxp
->cq
.ib
.intr_vector
= BIT(rxp
->vector
);
2420 rxp
->cq
.ib
.coalescing_timeo
= rx_cfg
->coalescing_timeo
;
2421 rxp
->cq
.ib
.interpkt_count
= BFI_RX_INTERPKT_COUNT
;
2422 rxp
->cq
.ib
.interpkt_timeo
= BFI_RX_INTERPKT_TIMEO
;
2424 bna_rxp_add_rxqs(rxp
, q0
, q1
);
2431 q0
->rcb
= (struct bna_rcb
*) rcb_mem
[rcb_idx
].kva
;
2432 q0
->rcb
->unmap_q
= (void *)dqunmap_mem
[dq_idx
].kva
;
2433 rcb_idx
++; dq_idx
++;
2434 q0
->rcb
->q_depth
= rx_cfg
->q0_depth
;
2435 q0
->q_depth
= rx_cfg
->q0_depth
;
2436 q0
->multi_buffer
= rx_cfg
->q0_multi_buf
;
2437 q0
->buffer_size
= rx_cfg
->q0_buf_size
;
2438 q0
->num_vecs
= rx_cfg
->q0_num_vecs
;
2440 q0
->rcb
->bnad
= bna
->bnad
;
2442 q0
->rx_packets
= q0
->rx_bytes
= 0;
2443 q0
->rx_packets_with_error
= q0
->rxbuf_alloc_failed
= 0;
2445 bna_rxq_qpt_setup(q0
, rxp
, dpage_count
, PAGE_SIZE
,
2446 &dqpt_mem
[i
], &dsqpt_mem
[i
], &dpage_mem
[i
]);
2448 if (rx
->rcb_setup_cbfn
)
2449 rx
->rcb_setup_cbfn(bnad
, q0
->rcb
);
2457 q1
->rcb
= (struct bna_rcb
*) rcb_mem
[rcb_idx
].kva
;
2458 q1
->rcb
->unmap_q
= (void *)hqunmap_mem
[hq_idx
].kva
;
2459 rcb_idx
++; hq_idx
++;
2460 q1
->rcb
->q_depth
= rx_cfg
->q1_depth
;
2461 q1
->q_depth
= rx_cfg
->q1_depth
;
2462 q1
->multi_buffer
= BNA_STATUS_T_DISABLED
;
2465 q1
->rcb
->bnad
= bna
->bnad
;
2467 q1
->buffer_size
= (rx_cfg
->rxp_type
== BNA_RXP_HDS
) ?
2468 rx_cfg
->hds_config
.forced_offset
2469 : rx_cfg
->q1_buf_size
;
2470 q1
->rx_packets
= q1
->rx_bytes
= 0;
2471 q1
->rx_packets_with_error
= q1
->rxbuf_alloc_failed
= 0;
2473 bna_rxq_qpt_setup(q1
, rxp
, hpage_count
, PAGE_SIZE
,
2474 &hqpt_mem
[i
], &hsqpt_mem
[i
],
2477 if (rx
->rcb_setup_cbfn
)
2478 rx
->rcb_setup_cbfn(bnad
, q1
->rcb
);
2483 rxp
->cq
.ccb
= (struct bna_ccb
*) ccb_mem
[i
].kva
;
2484 cq_depth
= rx_cfg
->q0_depth
+
2485 ((rx_cfg
->rxp_type
== BNA_RXP_SINGLE
) ?
2486 0 : rx_cfg
->q1_depth
);
2487 /* if multi-buffer is enabled sum of q0_depth
2488 * and q1_depth need not be a power of 2
2490 cq_depth
= roundup_pow_of_two(cq_depth
);
2491 rxp
->cq
.ccb
->q_depth
= cq_depth
;
2492 rxp
->cq
.ccb
->cq
= &rxp
->cq
;
2493 rxp
->cq
.ccb
->rcb
[0] = q0
->rcb
;
2494 q0
->rcb
->ccb
= rxp
->cq
.ccb
;
2496 rxp
->cq
.ccb
->rcb
[1] = q1
->rcb
;
2497 q1
->rcb
->ccb
= rxp
->cq
.ccb
;
2499 rxp
->cq
.ccb
->hw_producer_index
=
2500 (u32
*)rxp
->cq
.ib
.ib_seg_host_addr_kva
;
2501 rxp
->cq
.ccb
->i_dbell
= &rxp
->cq
.ib
.door_bell
;
2502 rxp
->cq
.ccb
->intr_type
= rxp
->cq
.ib
.intr_type
;
2503 rxp
->cq
.ccb
->intr_vector
= rxp
->cq
.ib
.intr_vector
;
2504 rxp
->cq
.ccb
->rx_coalescing_timeo
=
2505 rxp
->cq
.ib
.coalescing_timeo
;
2506 rxp
->cq
.ccb
->pkt_rate
.small_pkt_cnt
= 0;
2507 rxp
->cq
.ccb
->pkt_rate
.large_pkt_cnt
= 0;
2508 rxp
->cq
.ccb
->bnad
= bna
->bnad
;
2509 rxp
->cq
.ccb
->id
= i
;
2511 bna_rxp_cqpt_setup(rxp
, page_count
, PAGE_SIZE
,
2512 &cqpt_mem
[i
], &cswqpt_mem
[i
], &cpage_mem
[i
]);
2514 if (rx
->ccb_setup_cbfn
)
2515 rx
->ccb_setup_cbfn(bnad
, rxp
->cq
.ccb
);
2518 rx
->hds_cfg
= rx_cfg
->hds_config
;
2520 bna_rxf_init(&rx
->rxf
, rx
, rx_cfg
, res_info
);
2522 bfa_fsm_set_state(rx
, bna_rx_sm_stopped
);
2524 rx_mod
->rid_mask
|= BIT(rx
->rid
);
2530 bna_rx_destroy(struct bna_rx
*rx
)
2532 struct bna_rx_mod
*rx_mod
= &rx
->bna
->rx_mod
;
2533 struct bna_rxq
*q0
= NULL
;
2534 struct bna_rxq
*q1
= NULL
;
2535 struct bna_rxp
*rxp
;
2536 struct list_head
*qe
;
2538 bna_rxf_uninit(&rx
->rxf
);
2540 while (!list_empty(&rx
->rxp_q
)) {
2541 rxp
= list_first_entry(&rx
->rxp_q
, struct bna_rxp
, qe
);
2543 GET_RXQS(rxp
, q0
, q1
);
2544 if (rx
->rcb_destroy_cbfn
)
2545 rx
->rcb_destroy_cbfn(rx
->bna
->bnad
, q0
->rcb
);
2549 bna_rxq_put(rx_mod
, q0
);
2552 if (rx
->rcb_destroy_cbfn
)
2553 rx
->rcb_destroy_cbfn(rx
->bna
->bnad
, q1
->rcb
);
2557 bna_rxq_put(rx_mod
, q1
);
2559 rxp
->rxq
.slr
.large
= NULL
;
2560 rxp
->rxq
.slr
.small
= NULL
;
2562 if (rx
->ccb_destroy_cbfn
)
2563 rx
->ccb_destroy_cbfn(rx
->bna
->bnad
, rxp
->cq
.ccb
);
2566 bna_rxp_put(rx_mod
, rxp
);
2569 list_for_each(qe
, &rx_mod
->rx_active_q
)
2570 if (qe
== &rx
->qe
) {
2575 rx_mod
->rid_mask
&= ~BIT(rx
->rid
);
2579 bna_rx_put(rx_mod
, rx
);
2583 bna_rx_enable(struct bna_rx
*rx
)
2585 if (rx
->fsm
!= (bfa_sm_t
)bna_rx_sm_stopped
)
2588 rx
->rx_flags
|= BNA_RX_F_ENABLED
;
2589 if (rx
->rx_flags
& BNA_RX_F_ENET_STARTED
)
2590 bfa_fsm_send_event(rx
, RX_E_START
);
2594 bna_rx_disable(struct bna_rx
*rx
, enum bna_cleanup_type type
,
2595 void (*cbfn
)(void *, struct bna_rx
*))
2597 if (type
== BNA_SOFT_CLEANUP
) {
2598 /* h/w should not be accessed. Treat we're stopped */
2599 (*cbfn
)(rx
->bna
->bnad
, rx
);
2601 rx
->stop_cbfn
= cbfn
;
2602 rx
->stop_cbarg
= rx
->bna
->bnad
;
2604 rx
->rx_flags
&= ~BNA_RX_F_ENABLED
;
2606 bfa_fsm_send_event(rx
, RX_E_STOP
);
2611 bna_rx_cleanup_complete(struct bna_rx
*rx
)
2613 bfa_fsm_send_event(rx
, RX_E_CLEANUP_DONE
);
2617 bna_rx_vlan_strip_enable(struct bna_rx
*rx
)
2619 struct bna_rxf
*rxf
= &rx
->rxf
;
2621 if (rxf
->vlan_strip_status
== BNA_STATUS_T_DISABLED
) {
2622 rxf
->vlan_strip_status
= BNA_STATUS_T_ENABLED
;
2623 rxf
->vlan_strip_pending
= true;
2624 bfa_fsm_send_event(rxf
, RXF_E_CONFIG
);
2629 bna_rx_vlan_strip_disable(struct bna_rx
*rx
)
2631 struct bna_rxf
*rxf
= &rx
->rxf
;
2633 if (rxf
->vlan_strip_status
!= BNA_STATUS_T_DISABLED
) {
2634 rxf
->vlan_strip_status
= BNA_STATUS_T_DISABLED
;
2635 rxf
->vlan_strip_pending
= true;
2636 bfa_fsm_send_event(rxf
, RXF_E_CONFIG
);
2641 bna_rx_mode_set(struct bna_rx
*rx
, enum bna_rxmode new_mode
,
2642 enum bna_rxmode bitmask
)
2644 struct bna_rxf
*rxf
= &rx
->rxf
;
2645 int need_hw_config
= 0;
2649 if (is_promisc_enable(new_mode
, bitmask
)) {
2650 /* If promisc mode is already enabled elsewhere in the system */
2651 if ((rx
->bna
->promisc_rid
!= BFI_INVALID_RID
) &&
2652 (rx
->bna
->promisc_rid
!= rxf
->rx
->rid
))
2655 /* If default mode is already enabled in the system */
2656 if (rx
->bna
->default_mode_rid
!= BFI_INVALID_RID
)
2659 /* Trying to enable promiscuous and default mode together */
2660 if (is_default_enable(new_mode
, bitmask
))
2664 if (is_default_enable(new_mode
, bitmask
)) {
2665 /* If default mode is already enabled elsewhere in the system */
2666 if ((rx
->bna
->default_mode_rid
!= BFI_INVALID_RID
) &&
2667 (rx
->bna
->default_mode_rid
!= rxf
->rx
->rid
)) {
2671 /* If promiscuous mode is already enabled in the system */
2672 if (rx
->bna
->promisc_rid
!= BFI_INVALID_RID
)
2676 /* Process the commands */
2678 if (is_promisc_enable(new_mode
, bitmask
)) {
2679 if (bna_rxf_promisc_enable(rxf
))
2681 } else if (is_promisc_disable(new_mode
, bitmask
)) {
2682 if (bna_rxf_promisc_disable(rxf
))
2686 if (is_allmulti_enable(new_mode
, bitmask
)) {
2687 if (bna_rxf_allmulti_enable(rxf
))
2689 } else if (is_allmulti_disable(new_mode
, bitmask
)) {
2690 if (bna_rxf_allmulti_disable(rxf
))
2694 /* Trigger h/w if needed */
2696 if (need_hw_config
) {
2697 rxf
->cam_fltr_cbfn
= NULL
;
2698 rxf
->cam_fltr_cbarg
= rx
->bna
->bnad
;
2699 bfa_fsm_send_event(rxf
, RXF_E_CONFIG
);
2702 return BNA_CB_SUCCESS
;
2709 bna_rx_vlanfilter_enable(struct bna_rx
*rx
)
2711 struct bna_rxf
*rxf
= &rx
->rxf
;
2713 if (rxf
->vlan_filter_status
== BNA_STATUS_T_DISABLED
) {
2714 rxf
->vlan_filter_status
= BNA_STATUS_T_ENABLED
;
2715 rxf
->vlan_pending_bitmask
= (u8
)BFI_VLAN_BMASK_ALL
;
2716 bfa_fsm_send_event(rxf
, RXF_E_CONFIG
);
2721 bna_rx_coalescing_timeo_set(struct bna_rx
*rx
, int coalescing_timeo
)
2723 struct bna_rxp
*rxp
;
2724 struct list_head
*qe
;
2726 list_for_each(qe
, &rx
->rxp_q
) {
2727 rxp
= (struct bna_rxp
*)qe
;
2728 rxp
->cq
.ccb
->rx_coalescing_timeo
= coalescing_timeo
;
2729 bna_ib_coalescing_timeo_set(&rxp
->cq
.ib
, coalescing_timeo
);
2734 bna_rx_dim_reconfig(struct bna
*bna
, const u32 vector
[][BNA_BIAS_T_MAX
])
2738 for (i
= 0; i
< BNA_LOAD_T_MAX
; i
++)
2739 for (j
= 0; j
< BNA_BIAS_T_MAX
; j
++)
2740 bna
->rx_mod
.dim_vector
[i
][j
] = vector
[i
][j
];
2744 bna_rx_dim_update(struct bna_ccb
*ccb
)
2746 struct bna
*bna
= ccb
->cq
->rx
->bna
;
2748 u32 pkt_rt
, small_rt
, large_rt
;
2749 u8 coalescing_timeo
;
2751 if ((ccb
->pkt_rate
.small_pkt_cnt
== 0) &&
2752 (ccb
->pkt_rate
.large_pkt_cnt
== 0))
2755 /* Arrive at preconfigured coalescing timeo value based on pkt rate */
2757 small_rt
= ccb
->pkt_rate
.small_pkt_cnt
;
2758 large_rt
= ccb
->pkt_rate
.large_pkt_cnt
;
2760 pkt_rt
= small_rt
+ large_rt
;
2762 if (pkt_rt
< BNA_PKT_RATE_10K
)
2763 load
= BNA_LOAD_T_LOW_4
;
2764 else if (pkt_rt
< BNA_PKT_RATE_20K
)
2765 load
= BNA_LOAD_T_LOW_3
;
2766 else if (pkt_rt
< BNA_PKT_RATE_30K
)
2767 load
= BNA_LOAD_T_LOW_2
;
2768 else if (pkt_rt
< BNA_PKT_RATE_40K
)
2769 load
= BNA_LOAD_T_LOW_1
;
2770 else if (pkt_rt
< BNA_PKT_RATE_50K
)
2771 load
= BNA_LOAD_T_HIGH_1
;
2772 else if (pkt_rt
< BNA_PKT_RATE_60K
)
2773 load
= BNA_LOAD_T_HIGH_2
;
2774 else if (pkt_rt
< BNA_PKT_RATE_80K
)
2775 load
= BNA_LOAD_T_HIGH_3
;
2777 load
= BNA_LOAD_T_HIGH_4
;
2779 if (small_rt
> (large_rt
<< 1))
2784 ccb
->pkt_rate
.small_pkt_cnt
= 0;
2785 ccb
->pkt_rate
.large_pkt_cnt
= 0;
2787 coalescing_timeo
= bna
->rx_mod
.dim_vector
[load
][bias
];
2788 ccb
->rx_coalescing_timeo
= coalescing_timeo
;
2791 bna_ib_coalescing_timeo_set(&ccb
->cq
->ib
, coalescing_timeo
);
2794 const u32 bna_napi_dim_vector
[BNA_LOAD_T_MAX
][BNA_BIAS_T_MAX
] = {
2807 #define call_tx_stop_cbfn(tx) \
2809 if ((tx)->stop_cbfn) { \
2810 void (*cbfn)(void *, struct bna_tx *); \
2812 cbfn = (tx)->stop_cbfn; \
2813 cbarg = (tx)->stop_cbarg; \
2814 (tx)->stop_cbfn = NULL; \
2815 (tx)->stop_cbarg = NULL; \
2816 cbfn(cbarg, (tx)); \
2820 static void bna_tx_mod_cb_tx_stopped(void *tx_mod
, struct bna_tx
*tx
);
2821 static void bna_bfi_tx_enet_start(struct bna_tx
*tx
);
2822 static void bna_tx_enet_stop(struct bna_tx
*tx
);
2830 TX_E_CLEANUP_DONE
= 7,
2834 bfa_fsm_state_decl(bna_tx
, stopped
, struct bna_tx
, enum bna_tx_event
);
2835 bfa_fsm_state_decl(bna_tx
, start_wait
, struct bna_tx
, enum bna_tx_event
);
2836 bfa_fsm_state_decl(bna_tx
, started
, struct bna_tx
, enum bna_tx_event
);
2837 bfa_fsm_state_decl(bna_tx
, stop_wait
, struct bna_tx
, enum bna_tx_event
);
2838 bfa_fsm_state_decl(bna_tx
, cleanup_wait
, struct bna_tx
,
2840 bfa_fsm_state_decl(bna_tx
, prio_stop_wait
, struct bna_tx
,
2842 bfa_fsm_state_decl(bna_tx
, prio_cleanup_wait
, struct bna_tx
,
2844 bfa_fsm_state_decl(bna_tx
, failed
, struct bna_tx
, enum bna_tx_event
);
2845 bfa_fsm_state_decl(bna_tx
, quiesce_wait
, struct bna_tx
,
2849 bna_tx_sm_stopped_entry(struct bna_tx
*tx
)
2851 call_tx_stop_cbfn(tx
);
2855 bna_tx_sm_stopped(struct bna_tx
*tx
, enum bna_tx_event event
)
2859 bfa_fsm_set_state(tx
, bna_tx_sm_start_wait
);
2863 call_tx_stop_cbfn(tx
);
2870 case TX_E_BW_UPDATE
:
2875 bfa_sm_fault(event
);
2880 bna_tx_sm_start_wait_entry(struct bna_tx
*tx
)
2882 bna_bfi_tx_enet_start(tx
);
2886 bna_tx_sm_start_wait(struct bna_tx
*tx
, enum bna_tx_event event
)
2890 tx
->flags
&= ~BNA_TX_F_BW_UPDATED
;
2891 bfa_fsm_set_state(tx
, bna_tx_sm_stop_wait
);
2895 tx
->flags
&= ~BNA_TX_F_BW_UPDATED
;
2896 bfa_fsm_set_state(tx
, bna_tx_sm_stopped
);
2900 if (tx
->flags
& BNA_TX_F_BW_UPDATED
) {
2901 tx
->flags
&= ~BNA_TX_F_BW_UPDATED
;
2902 bfa_fsm_set_state(tx
, bna_tx_sm_prio_stop_wait
);
2904 bfa_fsm_set_state(tx
, bna_tx_sm_started
);
2907 case TX_E_BW_UPDATE
:
2908 tx
->flags
|= BNA_TX_F_BW_UPDATED
;
2912 bfa_sm_fault(event
);
2917 bna_tx_sm_started_entry(struct bna_tx
*tx
)
2919 struct bna_txq
*txq
;
2920 struct list_head
*qe
;
2921 int is_regular
= (tx
->type
== BNA_TX_T_REGULAR
);
2923 list_for_each(qe
, &tx
->txq_q
) {
2924 txq
= (struct bna_txq
*)qe
;
2925 txq
->tcb
->priority
= txq
->priority
;
2927 bna_ib_start(tx
->bna
, &txq
->ib
, is_regular
);
2929 tx
->tx_resume_cbfn(tx
->bna
->bnad
, tx
);
2933 bna_tx_sm_started(struct bna_tx
*tx
, enum bna_tx_event event
)
2937 bfa_fsm_set_state(tx
, bna_tx_sm_stop_wait
);
2938 tx
->tx_stall_cbfn(tx
->bna
->bnad
, tx
);
2939 bna_tx_enet_stop(tx
);
2943 bfa_fsm_set_state(tx
, bna_tx_sm_failed
);
2944 tx
->tx_stall_cbfn(tx
->bna
->bnad
, tx
);
2945 tx
->tx_cleanup_cbfn(tx
->bna
->bnad
, tx
);
2948 case TX_E_BW_UPDATE
:
2949 bfa_fsm_set_state(tx
, bna_tx_sm_prio_stop_wait
);
2953 bfa_sm_fault(event
);
2958 bna_tx_sm_stop_wait_entry(struct bna_tx
*tx
)
2963 bna_tx_sm_stop_wait(struct bna_tx
*tx
, enum bna_tx_event event
)
2968 bfa_fsm_set_state(tx
, bna_tx_sm_cleanup_wait
);
2969 tx
->tx_cleanup_cbfn(tx
->bna
->bnad
, tx
);
2974 * We are here due to start_wait -> stop_wait transition on
2977 bna_tx_enet_stop(tx
);
2980 case TX_E_BW_UPDATE
:
2985 bfa_sm_fault(event
);
2990 bna_tx_sm_cleanup_wait_entry(struct bna_tx
*tx
)
2995 bna_tx_sm_cleanup_wait(struct bna_tx
*tx
, enum bna_tx_event event
)
2999 case TX_E_BW_UPDATE
:
3003 case TX_E_CLEANUP_DONE
:
3004 bfa_fsm_set_state(tx
, bna_tx_sm_stopped
);
3008 bfa_sm_fault(event
);
3013 bna_tx_sm_prio_stop_wait_entry(struct bna_tx
*tx
)
3015 tx
->tx_stall_cbfn(tx
->bna
->bnad
, tx
);
3016 bna_tx_enet_stop(tx
);
3020 bna_tx_sm_prio_stop_wait(struct bna_tx
*tx
, enum bna_tx_event event
)
3024 bfa_fsm_set_state(tx
, bna_tx_sm_stop_wait
);
3028 bfa_fsm_set_state(tx
, bna_tx_sm_failed
);
3029 tx
->tx_cleanup_cbfn(tx
->bna
->bnad
, tx
);
3033 bfa_fsm_set_state(tx
, bna_tx_sm_prio_cleanup_wait
);
3036 case TX_E_BW_UPDATE
:
3041 bfa_sm_fault(event
);
3046 bna_tx_sm_prio_cleanup_wait_entry(struct bna_tx
*tx
)
3048 tx
->tx_cleanup_cbfn(tx
->bna
->bnad
, tx
);
3052 bna_tx_sm_prio_cleanup_wait(struct bna_tx
*tx
, enum bna_tx_event event
)
3056 bfa_fsm_set_state(tx
, bna_tx_sm_cleanup_wait
);
3060 bfa_fsm_set_state(tx
, bna_tx_sm_failed
);
3063 case TX_E_BW_UPDATE
:
3067 case TX_E_CLEANUP_DONE
:
3068 bfa_fsm_set_state(tx
, bna_tx_sm_start_wait
);
3072 bfa_sm_fault(event
);
3077 bna_tx_sm_failed_entry(struct bna_tx
*tx
)
3082 bna_tx_sm_failed(struct bna_tx
*tx
, enum bna_tx_event event
)
3086 bfa_fsm_set_state(tx
, bna_tx_sm_quiesce_wait
);
3090 bfa_fsm_set_state(tx
, bna_tx_sm_cleanup_wait
);
3097 case TX_E_CLEANUP_DONE
:
3098 bfa_fsm_set_state(tx
, bna_tx_sm_stopped
);
3102 bfa_sm_fault(event
);
3107 bna_tx_sm_quiesce_wait_entry(struct bna_tx
*tx
)
3112 bna_tx_sm_quiesce_wait(struct bna_tx
*tx
, enum bna_tx_event event
)
3116 bfa_fsm_set_state(tx
, bna_tx_sm_cleanup_wait
);
3120 bfa_fsm_set_state(tx
, bna_tx_sm_failed
);
3123 case TX_E_CLEANUP_DONE
:
3124 bfa_fsm_set_state(tx
, bna_tx_sm_start_wait
);
3127 case TX_E_BW_UPDATE
:
3132 bfa_sm_fault(event
);
3137 bna_bfi_tx_enet_start(struct bna_tx
*tx
)
3139 struct bfi_enet_tx_cfg_req
*cfg_req
= &tx
->bfi_enet_cmd
.cfg_req
;
3140 struct bna_txq
*txq
= NULL
;
3143 bfi_msgq_mhdr_set(cfg_req
->mh
, BFI_MC_ENET
,
3144 BFI_ENET_H2I_TX_CFG_SET_REQ
, 0, tx
->rid
);
3145 cfg_req
->mh
.num_entries
= htons(
3146 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_tx_cfg_req
)));
3148 cfg_req
->num_queues
= tx
->num_txq
;
3149 for (i
= 0; i
< tx
->num_txq
; i
++) {
3150 txq
= txq
? list_next_entry(txq
, qe
)
3151 : list_first_entry(&tx
->txq_q
, struct bna_txq
, qe
);
3152 bfi_enet_datapath_q_init(&cfg_req
->q_cfg
[i
].q
.q
, &txq
->qpt
);
3153 cfg_req
->q_cfg
[i
].q
.priority
= txq
->priority
;
3155 cfg_req
->q_cfg
[i
].ib
.index_addr
.a32
.addr_lo
=
3156 txq
->ib
.ib_seg_host_addr
.lsb
;
3157 cfg_req
->q_cfg
[i
].ib
.index_addr
.a32
.addr_hi
=
3158 txq
->ib
.ib_seg_host_addr
.msb
;
3159 cfg_req
->q_cfg
[i
].ib
.intr
.msix_index
=
3160 htons((u16
)txq
->ib
.intr_vector
);
3163 cfg_req
->ib_cfg
.int_pkt_dma
= BNA_STATUS_T_ENABLED
;
3164 cfg_req
->ib_cfg
.int_enabled
= BNA_STATUS_T_ENABLED
;
3165 cfg_req
->ib_cfg
.int_pkt_enabled
= BNA_STATUS_T_DISABLED
;
3166 cfg_req
->ib_cfg
.continuous_coalescing
= BNA_STATUS_T_ENABLED
;
3167 cfg_req
->ib_cfg
.msix
= (txq
->ib
.intr_type
== BNA_INTR_T_MSIX
)
3168 ? BNA_STATUS_T_ENABLED
: BNA_STATUS_T_DISABLED
;
3169 cfg_req
->ib_cfg
.coalescing_timeout
=
3170 htonl((u32
)txq
->ib
.coalescing_timeo
);
3171 cfg_req
->ib_cfg
.inter_pkt_timeout
=
3172 htonl((u32
)txq
->ib
.interpkt_timeo
);
3173 cfg_req
->ib_cfg
.inter_pkt_count
= (u8
)txq
->ib
.interpkt_count
;
3175 cfg_req
->tx_cfg
.vlan_mode
= BFI_ENET_TX_VLAN_WI
;
3176 cfg_req
->tx_cfg
.vlan_id
= htons((u16
)tx
->txf_vlan_id
);
3177 cfg_req
->tx_cfg
.admit_tagged_frame
= BNA_STATUS_T_ENABLED
;
3178 cfg_req
->tx_cfg
.apply_vlan_filter
= BNA_STATUS_T_DISABLED
;
3180 bfa_msgq_cmd_set(&tx
->msgq_cmd
, NULL
, NULL
,
3181 sizeof(struct bfi_enet_tx_cfg_req
), &cfg_req
->mh
);
3182 bfa_msgq_cmd_post(&tx
->bna
->msgq
, &tx
->msgq_cmd
);
3186 bna_bfi_tx_enet_stop(struct bna_tx
*tx
)
3188 struct bfi_enet_req
*req
= &tx
->bfi_enet_cmd
.req
;
3190 bfi_msgq_mhdr_set(req
->mh
, BFI_MC_ENET
,
3191 BFI_ENET_H2I_TX_CFG_CLR_REQ
, 0, tx
->rid
);
3192 req
->mh
.num_entries
= htons(
3193 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_req
)));
3194 bfa_msgq_cmd_set(&tx
->msgq_cmd
, NULL
, NULL
, sizeof(struct bfi_enet_req
),
3196 bfa_msgq_cmd_post(&tx
->bna
->msgq
, &tx
->msgq_cmd
);
3200 bna_tx_enet_stop(struct bna_tx
*tx
)
3202 struct bna_txq
*txq
;
3203 struct list_head
*qe
;
3206 list_for_each(qe
, &tx
->txq_q
) {
3207 txq
= (struct bna_txq
*)qe
;
3208 bna_ib_stop(tx
->bna
, &txq
->ib
);
3211 bna_bfi_tx_enet_stop(tx
);
3215 bna_txq_qpt_setup(struct bna_txq
*txq
, int page_count
, int page_size
,
3216 struct bna_mem_descr
*qpt_mem
,
3217 struct bna_mem_descr
*swqpt_mem
,
3218 struct bna_mem_descr
*page_mem
)
3222 struct bna_dma_addr bna_dma
;
3225 txq
->qpt
.hw_qpt_ptr
.lsb
= qpt_mem
->dma
.lsb
;
3226 txq
->qpt
.hw_qpt_ptr
.msb
= qpt_mem
->dma
.msb
;
3227 txq
->qpt
.kv_qpt_ptr
= qpt_mem
->kva
;
3228 txq
->qpt
.page_count
= page_count
;
3229 txq
->qpt
.page_size
= page_size
;
3231 txq
->tcb
->sw_qpt
= (void **) swqpt_mem
->kva
;
3232 txq
->tcb
->sw_q
= page_mem
->kva
;
3234 kva
= page_mem
->kva
;
3235 BNA_GET_DMA_ADDR(&page_mem
->dma
, dma
);
3237 for (i
= 0; i
< page_count
; i
++) {
3238 txq
->tcb
->sw_qpt
[i
] = kva
;
3241 BNA_SET_DMA_ADDR(dma
, &bna_dma
);
3242 ((struct bna_dma_addr
*)txq
->qpt
.kv_qpt_ptr
)[i
].lsb
=
3244 ((struct bna_dma_addr
*)txq
->qpt
.kv_qpt_ptr
)[i
].msb
=
3250 static struct bna_tx
*
3251 bna_tx_get(struct bna_tx_mod
*tx_mod
, enum bna_tx_type type
)
3253 struct bna_tx
*tx
= NULL
;
3255 if (list_empty(&tx_mod
->tx_free_q
))
3257 if (type
== BNA_TX_T_REGULAR
)
3258 tx
= list_first_entry(&tx_mod
->tx_free_q
, struct bna_tx
, qe
);
3260 tx
= list_last_entry(&tx_mod
->tx_free_q
, struct bna_tx
, qe
);
3268 bna_tx_free(struct bna_tx
*tx
)
3270 struct bna_tx_mod
*tx_mod
= &tx
->bna
->tx_mod
;
3271 struct bna_txq
*txq
;
3272 struct list_head
*qe
;
3274 while (!list_empty(&tx
->txq_q
)) {
3275 txq
= list_first_entry(&tx
->txq_q
, struct bna_txq
, qe
);
3278 list_move_tail(&txq
->qe
, &tx_mod
->txq_free_q
);
3281 list_for_each(qe
, &tx_mod
->tx_active_q
) {
3282 if (qe
== &tx
->qe
) {
3291 list_for_each_prev(qe
, &tx_mod
->tx_free_q
)
3292 if (((struct bna_tx
*)qe
)->rid
< tx
->rid
)
3295 list_add(&tx
->qe
, qe
);
3299 bna_tx_start(struct bna_tx
*tx
)
3301 tx
->flags
|= BNA_TX_F_ENET_STARTED
;
3302 if (tx
->flags
& BNA_TX_F_ENABLED
)
3303 bfa_fsm_send_event(tx
, TX_E_START
);
3307 bna_tx_stop(struct bna_tx
*tx
)
3309 tx
->stop_cbfn
= bna_tx_mod_cb_tx_stopped
;
3310 tx
->stop_cbarg
= &tx
->bna
->tx_mod
;
3312 tx
->flags
&= ~BNA_TX_F_ENET_STARTED
;
3313 bfa_fsm_send_event(tx
, TX_E_STOP
);
3317 bna_tx_fail(struct bna_tx
*tx
)
3319 tx
->flags
&= ~BNA_TX_F_ENET_STARTED
;
3320 bfa_fsm_send_event(tx
, TX_E_FAIL
);
3324 bna_bfi_tx_enet_start_rsp(struct bna_tx
*tx
, struct bfi_msgq_mhdr
*msghdr
)
3326 struct bfi_enet_tx_cfg_rsp
*cfg_rsp
= &tx
->bfi_enet_cmd
.cfg_rsp
;
3327 struct bna_txq
*txq
= NULL
;
3330 bfa_msgq_rsp_copy(&tx
->bna
->msgq
, (u8
*)cfg_rsp
,
3331 sizeof(struct bfi_enet_tx_cfg_rsp
));
3333 tx
->hw_id
= cfg_rsp
->hw_id
;
3335 for (i
= 0, txq
= list_first_entry(&tx
->txq_q
, struct bna_txq
, qe
);
3336 i
< tx
->num_txq
; i
++, txq
= list_next_entry(txq
, qe
)) {
3337 /* Setup doorbells */
3338 txq
->tcb
->i_dbell
->doorbell_addr
=
3339 tx
->bna
->pcidev
.pci_bar_kva
3340 + ntohl(cfg_rsp
->q_handles
[i
].i_dbell
);
3342 tx
->bna
->pcidev
.pci_bar_kva
3343 + ntohl(cfg_rsp
->q_handles
[i
].q_dbell
);
3344 txq
->hw_id
= cfg_rsp
->q_handles
[i
].hw_qid
;
3346 /* Initialize producer/consumer indexes */
3347 (*txq
->tcb
->hw_consumer_index
) = 0;
3348 txq
->tcb
->producer_index
= txq
->tcb
->consumer_index
= 0;
3351 bfa_fsm_send_event(tx
, TX_E_STARTED
);
3355 bna_bfi_tx_enet_stop_rsp(struct bna_tx
*tx
, struct bfi_msgq_mhdr
*msghdr
)
3357 bfa_fsm_send_event(tx
, TX_E_STOPPED
);
3361 bna_bfi_bw_update_aen(struct bna_tx_mod
*tx_mod
)
3364 struct list_head
*qe
;
3366 list_for_each(qe
, &tx_mod
->tx_active_q
) {
3367 tx
= (struct bna_tx
*)qe
;
3368 bfa_fsm_send_event(tx
, TX_E_BW_UPDATE
);
3373 bna_tx_res_req(int num_txq
, int txq_depth
, struct bna_res_info
*res_info
)
3377 struct bna_mem_info
*mem_info
;
3379 res_info
[BNA_TX_RES_MEM_T_TCB
].res_type
= BNA_RES_T_MEM
;
3380 mem_info
= &res_info
[BNA_TX_RES_MEM_T_TCB
].res_u
.mem_info
;
3381 mem_info
->mem_type
= BNA_MEM_T_KVA
;
3382 mem_info
->len
= sizeof(struct bna_tcb
);
3383 mem_info
->num
= num_txq
;
3385 q_size
= txq_depth
* BFI_TXQ_WI_SIZE
;
3386 q_size
= ALIGN(q_size
, PAGE_SIZE
);
3387 page_count
= q_size
>> PAGE_SHIFT
;
3389 res_info
[BNA_TX_RES_MEM_T_QPT
].res_type
= BNA_RES_T_MEM
;
3390 mem_info
= &res_info
[BNA_TX_RES_MEM_T_QPT
].res_u
.mem_info
;
3391 mem_info
->mem_type
= BNA_MEM_T_DMA
;
3392 mem_info
->len
= page_count
* sizeof(struct bna_dma_addr
);
3393 mem_info
->num
= num_txq
;
3395 res_info
[BNA_TX_RES_MEM_T_SWQPT
].res_type
= BNA_RES_T_MEM
;
3396 mem_info
= &res_info
[BNA_TX_RES_MEM_T_SWQPT
].res_u
.mem_info
;
3397 mem_info
->mem_type
= BNA_MEM_T_KVA
;
3398 mem_info
->len
= page_count
* sizeof(void *);
3399 mem_info
->num
= num_txq
;
3401 res_info
[BNA_TX_RES_MEM_T_PAGE
].res_type
= BNA_RES_T_MEM
;
3402 mem_info
= &res_info
[BNA_TX_RES_MEM_T_PAGE
].res_u
.mem_info
;
3403 mem_info
->mem_type
= BNA_MEM_T_DMA
;
3404 mem_info
->len
= PAGE_SIZE
* page_count
;
3405 mem_info
->num
= num_txq
;
3407 res_info
[BNA_TX_RES_MEM_T_IBIDX
].res_type
= BNA_RES_T_MEM
;
3408 mem_info
= &res_info
[BNA_TX_RES_MEM_T_IBIDX
].res_u
.mem_info
;
3409 mem_info
->mem_type
= BNA_MEM_T_DMA
;
3410 mem_info
->len
= BFI_IBIDX_SIZE
;
3411 mem_info
->num
= num_txq
;
3413 res_info
[BNA_TX_RES_INTR_T_TXCMPL
].res_type
= BNA_RES_T_INTR
;
3414 res_info
[BNA_TX_RES_INTR_T_TXCMPL
].res_u
.intr_info
.intr_type
=
3416 res_info
[BNA_TX_RES_INTR_T_TXCMPL
].res_u
.intr_info
.num
= num_txq
;
3420 bna_tx_create(struct bna
*bna
, struct bnad
*bnad
,
3421 struct bna_tx_config
*tx_cfg
,
3422 const struct bna_tx_event_cbfn
*tx_cbfn
,
3423 struct bna_res_info
*res_info
, void *priv
)
3425 struct bna_intr_info
*intr_info
;
3426 struct bna_tx_mod
*tx_mod
= &bna
->tx_mod
;
3428 struct bna_txq
*txq
;
3429 struct list_head
*qe
;
3433 intr_info
= &res_info
[BNA_TX_RES_INTR_T_TXCMPL
].res_u
.intr_info
;
3434 page_count
= (res_info
[BNA_TX_RES_MEM_T_PAGE
].res_u
.mem_info
.len
) /
3441 if ((intr_info
->num
!= 1) && (intr_info
->num
!= tx_cfg
->num_txq
))
3446 tx
= bna_tx_get(tx_mod
, tx_cfg
->tx_type
);
3454 INIT_LIST_HEAD(&tx
->txq_q
);
3455 for (i
= 0; i
< tx_cfg
->num_txq
; i
++) {
3456 if (list_empty(&tx_mod
->txq_free_q
))
3459 txq
= list_first_entry(&tx_mod
->txq_free_q
, struct bna_txq
, qe
);
3460 list_move_tail(&txq
->qe
, &tx
->txq_q
);
3470 tx
->tcb_setup_cbfn
= tx_cbfn
->tcb_setup_cbfn
;
3471 tx
->tcb_destroy_cbfn
= tx_cbfn
->tcb_destroy_cbfn
;
3472 /* Following callbacks are mandatory */
3473 tx
->tx_stall_cbfn
= tx_cbfn
->tx_stall_cbfn
;
3474 tx
->tx_resume_cbfn
= tx_cbfn
->tx_resume_cbfn
;
3475 tx
->tx_cleanup_cbfn
= tx_cbfn
->tx_cleanup_cbfn
;
3477 list_add_tail(&tx
->qe
, &tx_mod
->tx_active_q
);
3479 tx
->num_txq
= tx_cfg
->num_txq
;
3482 if (tx
->bna
->tx_mod
.flags
& BNA_TX_MOD_F_ENET_STARTED
) {
3484 case BNA_TX_T_REGULAR
:
3485 if (!(tx
->bna
->tx_mod
.flags
&
3486 BNA_TX_MOD_F_ENET_LOOPBACK
))
3487 tx
->flags
|= BNA_TX_F_ENET_STARTED
;
3489 case BNA_TX_T_LOOPBACK
:
3490 if (tx
->bna
->tx_mod
.flags
& BNA_TX_MOD_F_ENET_LOOPBACK
)
3491 tx
->flags
|= BNA_TX_F_ENET_STARTED
;
3499 list_for_each(qe
, &tx
->txq_q
) {
3500 txq
= (struct bna_txq
*)qe
;
3501 txq
->tcb
= (struct bna_tcb
*)
3502 res_info
[BNA_TX_RES_MEM_T_TCB
].res_u
.mem_info
.mdl
[i
].kva
;
3503 txq
->tx_packets
= 0;
3507 txq
->ib
.ib_seg_host_addr
.lsb
=
3508 res_info
[BNA_TX_RES_MEM_T_IBIDX
].res_u
.mem_info
.mdl
[i
].dma
.lsb
;
3509 txq
->ib
.ib_seg_host_addr
.msb
=
3510 res_info
[BNA_TX_RES_MEM_T_IBIDX
].res_u
.mem_info
.mdl
[i
].dma
.msb
;
3511 txq
->ib
.ib_seg_host_addr_kva
=
3512 res_info
[BNA_TX_RES_MEM_T_IBIDX
].res_u
.mem_info
.mdl
[i
].kva
;
3513 txq
->ib
.intr_type
= intr_info
->intr_type
;
3514 txq
->ib
.intr_vector
= (intr_info
->num
== 1) ?
3515 intr_info
->idl
[0].vector
:
3516 intr_info
->idl
[i
].vector
;
3517 if (intr_info
->intr_type
== BNA_INTR_T_INTX
)
3518 txq
->ib
.intr_vector
= BIT(txq
->ib
.intr_vector
);
3519 txq
->ib
.coalescing_timeo
= tx_cfg
->coalescing_timeo
;
3520 txq
->ib
.interpkt_timeo
= BFI_TX_INTERPKT_TIMEO
;
3521 txq
->ib
.interpkt_count
= BFI_TX_INTERPKT_COUNT
;
3525 txq
->tcb
->q_depth
= tx_cfg
->txq_depth
;
3526 txq
->tcb
->unmap_q
= (void *)
3527 res_info
[BNA_TX_RES_MEM_T_UNMAPQ
].res_u
.mem_info
.mdl
[i
].kva
;
3528 txq
->tcb
->hw_consumer_index
=
3529 (u32
*)txq
->ib
.ib_seg_host_addr_kva
;
3530 txq
->tcb
->i_dbell
= &txq
->ib
.door_bell
;
3531 txq
->tcb
->intr_type
= txq
->ib
.intr_type
;
3532 txq
->tcb
->intr_vector
= txq
->ib
.intr_vector
;
3533 txq
->tcb
->txq
= txq
;
3534 txq
->tcb
->bnad
= bnad
;
3537 /* QPT, SWQPT, Pages */
3538 bna_txq_qpt_setup(txq
, page_count
, PAGE_SIZE
,
3539 &res_info
[BNA_TX_RES_MEM_T_QPT
].res_u
.mem_info
.mdl
[i
],
3540 &res_info
[BNA_TX_RES_MEM_T_SWQPT
].res_u
.mem_info
.mdl
[i
],
3541 &res_info
[BNA_TX_RES_MEM_T_PAGE
].
3542 res_u
.mem_info
.mdl
[i
]);
3544 /* Callback to bnad for setting up TCB */
3545 if (tx
->tcb_setup_cbfn
)
3546 (tx
->tcb_setup_cbfn
)(bna
->bnad
, txq
->tcb
);
3548 if (tx_cfg
->num_txq
== BFI_TX_MAX_PRIO
)
3549 txq
->priority
= txq
->tcb
->id
;
3551 txq
->priority
= tx_mod
->default_prio
;
3556 tx
->txf_vlan_id
= 0;
3558 bfa_fsm_set_state(tx
, bna_tx_sm_stopped
);
3560 tx_mod
->rid_mask
|= BIT(tx
->rid
);
3570 bna_tx_destroy(struct bna_tx
*tx
)
3572 struct bna_txq
*txq
;
3573 struct list_head
*qe
;
3575 list_for_each(qe
, &tx
->txq_q
) {
3576 txq
= (struct bna_txq
*)qe
;
3577 if (tx
->tcb_destroy_cbfn
)
3578 (tx
->tcb_destroy_cbfn
)(tx
->bna
->bnad
, txq
->tcb
);
3581 tx
->bna
->tx_mod
.rid_mask
&= ~BIT(tx
->rid
);
3586 bna_tx_enable(struct bna_tx
*tx
)
3588 if (tx
->fsm
!= (bfa_sm_t
)bna_tx_sm_stopped
)
3591 tx
->flags
|= BNA_TX_F_ENABLED
;
3593 if (tx
->flags
& BNA_TX_F_ENET_STARTED
)
3594 bfa_fsm_send_event(tx
, TX_E_START
);
3598 bna_tx_disable(struct bna_tx
*tx
, enum bna_cleanup_type type
,
3599 void (*cbfn
)(void *, struct bna_tx
*))
3601 if (type
== BNA_SOFT_CLEANUP
) {
3602 (*cbfn
)(tx
->bna
->bnad
, tx
);
3606 tx
->stop_cbfn
= cbfn
;
3607 tx
->stop_cbarg
= tx
->bna
->bnad
;
3609 tx
->flags
&= ~BNA_TX_F_ENABLED
;
3611 bfa_fsm_send_event(tx
, TX_E_STOP
);
3615 bna_tx_cleanup_complete(struct bna_tx
*tx
)
3617 bfa_fsm_send_event(tx
, TX_E_CLEANUP_DONE
);
3621 bna_tx_mod_cb_tx_stopped(void *arg
, struct bna_tx
*tx
)
3623 struct bna_tx_mod
*tx_mod
= (struct bna_tx_mod
*)arg
;
3625 bfa_wc_down(&tx_mod
->tx_stop_wc
);
3629 bna_tx_mod_cb_tx_stopped_all(void *arg
)
3631 struct bna_tx_mod
*tx_mod
= (struct bna_tx_mod
*)arg
;
3633 if (tx_mod
->stop_cbfn
)
3634 tx_mod
->stop_cbfn(&tx_mod
->bna
->enet
);
3635 tx_mod
->stop_cbfn
= NULL
;
3639 bna_tx_mod_init(struct bna_tx_mod
*tx_mod
, struct bna
*bna
,
3640 struct bna_res_info
*res_info
)
3647 tx_mod
->tx
= (struct bna_tx
*)
3648 res_info
[BNA_MOD_RES_MEM_T_TX_ARRAY
].res_u
.mem_info
.mdl
[0].kva
;
3649 tx_mod
->txq
= (struct bna_txq
*)
3650 res_info
[BNA_MOD_RES_MEM_T_TXQ_ARRAY
].res_u
.mem_info
.mdl
[0].kva
;
3652 INIT_LIST_HEAD(&tx_mod
->tx_free_q
);
3653 INIT_LIST_HEAD(&tx_mod
->tx_active_q
);
3655 INIT_LIST_HEAD(&tx_mod
->txq_free_q
);
3657 for (i
= 0; i
< bna
->ioceth
.attr
.num_txq
; i
++) {
3658 tx_mod
->tx
[i
].rid
= i
;
3659 list_add_tail(&tx_mod
->tx
[i
].qe
, &tx_mod
->tx_free_q
);
3660 list_add_tail(&tx_mod
->txq
[i
].qe
, &tx_mod
->txq_free_q
);
3663 tx_mod
->prio_map
= BFI_TX_PRIO_MAP_ALL
;
3664 tx_mod
->default_prio
= 0;
3665 tx_mod
->iscsi_over_cee
= BNA_STATUS_T_DISABLED
;
3666 tx_mod
->iscsi_prio
= -1;
3670 bna_tx_mod_uninit(struct bna_tx_mod
*tx_mod
)
3672 struct list_head
*qe
;
3676 list_for_each(qe
, &tx_mod
->tx_free_q
)
3680 list_for_each(qe
, &tx_mod
->txq_free_q
)
3687 bna_tx_mod_start(struct bna_tx_mod
*tx_mod
, enum bna_tx_type type
)
3690 struct list_head
*qe
;
3692 tx_mod
->flags
|= BNA_TX_MOD_F_ENET_STARTED
;
3693 if (type
== BNA_TX_T_LOOPBACK
)
3694 tx_mod
->flags
|= BNA_TX_MOD_F_ENET_LOOPBACK
;
3696 list_for_each(qe
, &tx_mod
->tx_active_q
) {
3697 tx
= (struct bna_tx
*)qe
;
3698 if (tx
->type
== type
)
3704 bna_tx_mod_stop(struct bna_tx_mod
*tx_mod
, enum bna_tx_type type
)
3707 struct list_head
*qe
;
3709 tx_mod
->flags
&= ~BNA_TX_MOD_F_ENET_STARTED
;
3710 tx_mod
->flags
&= ~BNA_TX_MOD_F_ENET_LOOPBACK
;
3712 tx_mod
->stop_cbfn
= bna_enet_cb_tx_stopped
;
3714 bfa_wc_init(&tx_mod
->tx_stop_wc
, bna_tx_mod_cb_tx_stopped_all
, tx_mod
);
3716 list_for_each(qe
, &tx_mod
->tx_active_q
) {
3717 tx
= (struct bna_tx
*)qe
;
3718 if (tx
->type
== type
) {
3719 bfa_wc_up(&tx_mod
->tx_stop_wc
);
3724 bfa_wc_wait(&tx_mod
->tx_stop_wc
);
3728 bna_tx_mod_fail(struct bna_tx_mod
*tx_mod
)
3731 struct list_head
*qe
;
3733 tx_mod
->flags
&= ~BNA_TX_MOD_F_ENET_STARTED
;
3734 tx_mod
->flags
&= ~BNA_TX_MOD_F_ENET_LOOPBACK
;
3736 list_for_each(qe
, &tx_mod
->tx_active_q
) {
3737 tx
= (struct bna_tx
*)qe
;
3743 bna_tx_coalescing_timeo_set(struct bna_tx
*tx
, int coalescing_timeo
)
3745 struct bna_txq
*txq
;
3746 struct list_head
*qe
;
3748 list_for_each(qe
, &tx
->txq_q
) {
3749 txq
= (struct bna_txq
*)qe
;
3750 bna_ib_coalescing_timeo_set(&txq
->ib
, coalescing_timeo
);