]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/net/ethernet/brocade/bna/bna_tx_rx.c
bna: replace pragma(pack) with attribute __packed
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / brocade / bna / bna_tx_rx.c
CommitLineData
f3bd5173 1/*
2732ba56 2 * Linux network driver for QLogic BR-series Converged Network Adapter.
f3bd5173
RM
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13/*
2732ba56
RM
14 * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
15 * Copyright (c) 2014-2015 QLogic Corporation
f3bd5173 16 * All rights reserved
2732ba56 17 * www.qlogic.com
f3bd5173
RM
18 */
19#include "bna.h"
20#include "bfi.h"
21
1aa8b471 22/* IB */
f3bd5173
RM
23static void
24bna_ib_coalescing_timeo_set(struct bna_ib *ib, u8 coalescing_timeo)
25{
26 ib->coalescing_timeo = coalescing_timeo;
27 ib->door_bell.doorbell_ack = BNA_DOORBELL_IB_INT_ACK(
28 (u32)ib->coalescing_timeo, 0);
29}
30
1aa8b471 31/* RXF */
f3bd5173
RM
32
33#define bna_rxf_vlan_cfg_soft_reset(rxf) \
34do { \
35 (rxf)->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL; \
36 (rxf)->vlan_strip_pending = true; \
37} while (0)
38
39#define bna_rxf_rss_cfg_soft_reset(rxf) \
40do { \
41 if ((rxf)->rss_status == BNA_STATUS_T_ENABLED) \
42 (rxf)->rss_pending = (BNA_RSS_F_RIT_PENDING | \
43 BNA_RSS_F_CFG_PENDING | \
44 BNA_RSS_F_STATUS_PENDING); \
45} while (0)
46
47static int bna_rxf_cfg_apply(struct bna_rxf *rxf);
48static void bna_rxf_cfg_reset(struct bna_rxf *rxf);
49static int bna_rxf_fltr_clear(struct bna_rxf *rxf);
50static int bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf);
51static int bna_rxf_promisc_cfg_apply(struct bna_rxf *rxf);
52static int bna_rxf_allmulti_cfg_apply(struct bna_rxf *rxf);
53static int bna_rxf_vlan_strip_cfg_apply(struct bna_rxf *rxf);
54static int bna_rxf_ucast_cfg_reset(struct bna_rxf *rxf,
55 enum bna_cleanup_type cleanup);
56static int bna_rxf_promisc_cfg_reset(struct bna_rxf *rxf,
57 enum bna_cleanup_type cleanup);
58static int bna_rxf_allmulti_cfg_reset(struct bna_rxf *rxf,
59 enum bna_cleanup_type cleanup);
60
61bfa_fsm_state_decl(bna_rxf, stopped, struct bna_rxf,
62 enum bna_rxf_event);
63bfa_fsm_state_decl(bna_rxf, paused, struct bna_rxf,
64 enum bna_rxf_event);
65bfa_fsm_state_decl(bna_rxf, cfg_wait, struct bna_rxf,
66 enum bna_rxf_event);
67bfa_fsm_state_decl(bna_rxf, started, struct bna_rxf,
68 enum bna_rxf_event);
69bfa_fsm_state_decl(bna_rxf, fltr_clr_wait, struct bna_rxf,
70 enum bna_rxf_event);
71bfa_fsm_state_decl(bna_rxf, last_resp_wait, struct bna_rxf,
72 enum bna_rxf_event);
73
74static void
75bna_rxf_sm_stopped_entry(struct bna_rxf *rxf)
76{
77 call_rxf_stop_cbfn(rxf);
78}
79
80static void
81bna_rxf_sm_stopped(struct bna_rxf *rxf, enum bna_rxf_event event)
82{
83 switch (event) {
84 case RXF_E_START:
85 if (rxf->flags & BNA_RXF_F_PAUSED) {
86 bfa_fsm_set_state(rxf, bna_rxf_sm_paused);
87 call_rxf_start_cbfn(rxf);
88 } else
89 bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait);
90 break;
91
92 case RXF_E_STOP:
93 call_rxf_stop_cbfn(rxf);
94 break;
95
96 case RXF_E_FAIL:
97 /* No-op */
98 break;
99
100 case RXF_E_CONFIG:
101 call_rxf_cam_fltr_cbfn(rxf);
102 break;
103
104 case RXF_E_PAUSE:
105 rxf->flags |= BNA_RXF_F_PAUSED;
106 call_rxf_pause_cbfn(rxf);
107 break;
108
109 case RXF_E_RESUME:
110 rxf->flags &= ~BNA_RXF_F_PAUSED;
111 call_rxf_resume_cbfn(rxf);
112 break;
113
114 default:
115 bfa_sm_fault(event);
116 }
117}
118
119static void
120bna_rxf_sm_paused_entry(struct bna_rxf *rxf)
121{
122 call_rxf_pause_cbfn(rxf);
123}
124
125static void
126bna_rxf_sm_paused(struct bna_rxf *rxf, enum bna_rxf_event event)
127{
128 switch (event) {
129 case RXF_E_STOP:
130 case RXF_E_FAIL:
131 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
132 break;
133
134 case RXF_E_CONFIG:
135 call_rxf_cam_fltr_cbfn(rxf);
136 break;
137
138 case RXF_E_RESUME:
139 rxf->flags &= ~BNA_RXF_F_PAUSED;
140 bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait);
141 break;
142
143 default:
144 bfa_sm_fault(event);
145 }
146}
147
148static void
149bna_rxf_sm_cfg_wait_entry(struct bna_rxf *rxf)
150{
151 if (!bna_rxf_cfg_apply(rxf)) {
152 /* No more pending config updates */
153 bfa_fsm_set_state(rxf, bna_rxf_sm_started);
154 }
155}
156
157static void
158bna_rxf_sm_cfg_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
159{
160 switch (event) {
161 case RXF_E_STOP:
162 bfa_fsm_set_state(rxf, bna_rxf_sm_last_resp_wait);
163 break;
164
165 case RXF_E_FAIL:
166 bna_rxf_cfg_reset(rxf);
167 call_rxf_start_cbfn(rxf);
168 call_rxf_cam_fltr_cbfn(rxf);
169 call_rxf_resume_cbfn(rxf);
170 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
171 break;
172
173 case RXF_E_CONFIG:
174 /* No-op */
175 break;
176
177 case RXF_E_PAUSE:
178 rxf->flags |= BNA_RXF_F_PAUSED;
179 call_rxf_start_cbfn(rxf);
180 bfa_fsm_set_state(rxf, bna_rxf_sm_fltr_clr_wait);
181 break;
182
183 case RXF_E_FW_RESP:
184 if (!bna_rxf_cfg_apply(rxf)) {
185 /* No more pending config updates */
186 bfa_fsm_set_state(rxf, bna_rxf_sm_started);
187 }
188 break;
189
190 default:
191 bfa_sm_fault(event);
192 }
193}
194
195static void
196bna_rxf_sm_started_entry(struct bna_rxf *rxf)
197{
198 call_rxf_start_cbfn(rxf);
199 call_rxf_cam_fltr_cbfn(rxf);
200 call_rxf_resume_cbfn(rxf);
201}
202
203static void
204bna_rxf_sm_started(struct bna_rxf *rxf, enum bna_rxf_event event)
205{
206 switch (event) {
207 case RXF_E_STOP:
208 case RXF_E_FAIL:
209 bna_rxf_cfg_reset(rxf);
210 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
211 break;
212
213 case RXF_E_CONFIG:
214 bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait);
215 break;
216
217 case RXF_E_PAUSE:
218 rxf->flags |= BNA_RXF_F_PAUSED;
219 if (!bna_rxf_fltr_clear(rxf))
220 bfa_fsm_set_state(rxf, bna_rxf_sm_paused);
221 else
222 bfa_fsm_set_state(rxf, bna_rxf_sm_fltr_clr_wait);
223 break;
224
225 default:
226 bfa_sm_fault(event);
227 }
228}
229
230static void
231bna_rxf_sm_fltr_clr_wait_entry(struct bna_rxf *rxf)
232{
233}
234
235static void
236bna_rxf_sm_fltr_clr_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
237{
238 switch (event) {
239 case RXF_E_FAIL:
240 bna_rxf_cfg_reset(rxf);
241 call_rxf_pause_cbfn(rxf);
242 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
243 break;
244
245 case RXF_E_FW_RESP:
246 if (!bna_rxf_fltr_clear(rxf)) {
247 /* No more pending CAM entries to clear */
248 bfa_fsm_set_state(rxf, bna_rxf_sm_paused);
249 }
250 break;
251
252 default:
253 bfa_sm_fault(event);
254 }
255}
256
257static void
258bna_rxf_sm_last_resp_wait_entry(struct bna_rxf *rxf)
259{
260}
261
262static void
263bna_rxf_sm_last_resp_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
264{
265 switch (event) {
266 case RXF_E_FAIL:
267 case RXF_E_FW_RESP:
268 bna_rxf_cfg_reset(rxf);
269 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
270 break;
271
272 default:
273 bfa_sm_fault(event);
274 }
275}
276
277static void
278bna_bfi_ucast_req(struct bna_rxf *rxf, struct bna_mac *mac,
279 enum bfi_enet_h2i_msgs req_type)
280{
281 struct bfi_enet_ucast_req *req = &rxf->bfi_enet_cmd.ucast_req;
282
283 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, req_type, 0, rxf->rx->rid);
284 req->mh.num_entries = htons(
285 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_ucast_req)));
d6b30598 286 ether_addr_copy(req->mac_addr, mac->addr);
f3bd5173
RM
287 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
288 sizeof(struct bfi_enet_ucast_req), &req->mh);
289 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
290}
291
292static void
293bna_bfi_mcast_add_req(struct bna_rxf *rxf, struct bna_mac *mac)
294{
295 struct bfi_enet_mcast_add_req *req =
296 &rxf->bfi_enet_cmd.mcast_add_req;
297
298 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, BFI_ENET_H2I_MAC_MCAST_ADD_REQ,
299 0, rxf->rx->rid);
300 req->mh.num_entries = htons(
301 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_mcast_add_req)));
d6b30598 302 ether_addr_copy(req->mac_addr, mac->addr);
f3bd5173
RM
303 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
304 sizeof(struct bfi_enet_mcast_add_req), &req->mh);
305 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
306}
307
308static void
309bna_bfi_mcast_del_req(struct bna_rxf *rxf, u16 handle)
310{
311 struct bfi_enet_mcast_del_req *req =
312 &rxf->bfi_enet_cmd.mcast_del_req;
313
314 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, BFI_ENET_H2I_MAC_MCAST_DEL_REQ,
315 0, rxf->rx->rid);
316 req->mh.num_entries = htons(
317 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_mcast_del_req)));
318 req->handle = htons(handle);
319 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
320 sizeof(struct bfi_enet_mcast_del_req), &req->mh);
321 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
322}
323
324static void
325bna_bfi_mcast_filter_req(struct bna_rxf *rxf, enum bna_status status)
326{
327 struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
328
329 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
330 BFI_ENET_H2I_MAC_MCAST_FILTER_REQ, 0, rxf->rx->rid);
331 req->mh.num_entries = htons(
332 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
333 req->enable = status;
334 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
335 sizeof(struct bfi_enet_enable_req), &req->mh);
336 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
337}
338
339static void
340bna_bfi_rx_promisc_req(struct bna_rxf *rxf, enum bna_status status)
341{
342 struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
343
344 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
345 BFI_ENET_H2I_RX_PROMISCUOUS_REQ, 0, rxf->rx->rid);
346 req->mh.num_entries = htons(
347 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
348 req->enable = status;
349 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
350 sizeof(struct bfi_enet_enable_req), &req->mh);
351 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
352}
353
354static void
355bna_bfi_rx_vlan_filter_set(struct bna_rxf *rxf, u8 block_idx)
356{
357 struct bfi_enet_rx_vlan_req *req = &rxf->bfi_enet_cmd.vlan_req;
358 int i;
359 int j;
360
361 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
362 BFI_ENET_H2I_RX_VLAN_SET_REQ, 0, rxf->rx->rid);
363 req->mh.num_entries = htons(
364 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rx_vlan_req)));
365 req->block_idx = block_idx;
366 for (i = 0; i < (BFI_ENET_VLAN_BLOCK_SIZE / 32); i++) {
367 j = (block_idx * (BFI_ENET_VLAN_BLOCK_SIZE / 32)) + i;
368 if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED)
369 req->bit_mask[i] =
370 htonl(rxf->vlan_filter_table[j]);
371 else
372 req->bit_mask[i] = 0xFFFFFFFF;
373 }
374 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
375 sizeof(struct bfi_enet_rx_vlan_req), &req->mh);
376 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
377}
378
379static void
380bna_bfi_vlan_strip_enable(struct bna_rxf *rxf)
381{
382 struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
383
384 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
385 BFI_ENET_H2I_RX_VLAN_STRIP_ENABLE_REQ, 0, rxf->rx->rid);
386 req->mh.num_entries = htons(
387 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
388 req->enable = rxf->vlan_strip_status;
389 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
390 sizeof(struct bfi_enet_enable_req), &req->mh);
391 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
392}
393
394static void
395bna_bfi_rit_cfg(struct bna_rxf *rxf)
396{
397 struct bfi_enet_rit_req *req = &rxf->bfi_enet_cmd.rit_req;
398
399 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
400 BFI_ENET_H2I_RIT_CFG_REQ, 0, rxf->rx->rid);
401 req->mh.num_entries = htons(
402 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rit_req)));
403 req->size = htons(rxf->rit_size);
404 memcpy(&req->table[0], rxf->rit, rxf->rit_size);
405 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
406 sizeof(struct bfi_enet_rit_req), &req->mh);
407 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
408}
409
410static void
411bna_bfi_rss_cfg(struct bna_rxf *rxf)
412{
413 struct bfi_enet_rss_cfg_req *req = &rxf->bfi_enet_cmd.rss_req;
414 int i;
415
416 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
417 BFI_ENET_H2I_RSS_CFG_REQ, 0, rxf->rx->rid);
418 req->mh.num_entries = htons(
419 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rss_cfg_req)));
420 req->cfg.type = rxf->rss_cfg.hash_type;
421 req->cfg.mask = rxf->rss_cfg.hash_mask;
422 for (i = 0; i < BFI_ENET_RSS_KEY_LEN; i++)
423 req->cfg.key[i] =
424 htonl(rxf->rss_cfg.toeplitz_hash_key[i]);
425 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
426 sizeof(struct bfi_enet_rss_cfg_req), &req->mh);
427 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
428}
429
430static void
431bna_bfi_rss_enable(struct bna_rxf *rxf)
432{
433 struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
434
435 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
436 BFI_ENET_H2I_RSS_ENABLE_REQ, 0, rxf->rx->rid);
437 req->mh.num_entries = htons(
438 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
439 req->enable = rxf->rss_status;
440 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
441 sizeof(struct bfi_enet_enable_req), &req->mh);
442 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
443}
444
445/* This function gets the multicast MAC that has already been added to CAM */
446static struct bna_mac *
447bna_rxf_mcmac_get(struct bna_rxf *rxf, u8 *mac_addr)
448{
449 struct bna_mac *mac;
450 struct list_head *qe;
451
452 list_for_each(qe, &rxf->mcast_active_q) {
453 mac = (struct bna_mac *)qe;
454 if (BNA_MAC_IS_EQUAL(&mac->addr, mac_addr))
455 return mac;
456 }
457
458 list_for_each(qe, &rxf->mcast_pending_del_q) {
459 mac = (struct bna_mac *)qe;
460 if (BNA_MAC_IS_EQUAL(&mac->addr, mac_addr))
461 return mac;
462 }
463
464 return NULL;
465}
466
467static struct bna_mcam_handle *
468bna_rxf_mchandle_get(struct bna_rxf *rxf, int handle)
469{
470 struct bna_mcam_handle *mchandle;
471 struct list_head *qe;
472
473 list_for_each(qe, &rxf->mcast_handle_q) {
474 mchandle = (struct bna_mcam_handle *)qe;
475 if (mchandle->handle == handle)
476 return mchandle;
477 }
478
479 return NULL;
480}
481
482static void
483bna_rxf_mchandle_attach(struct bna_rxf *rxf, u8 *mac_addr, int handle)
484{
485 struct bna_mac *mcmac;
486 struct bna_mcam_handle *mchandle;
487
488 mcmac = bna_rxf_mcmac_get(rxf, mac_addr);
489 mchandle = bna_rxf_mchandle_get(rxf, handle);
490 if (mchandle == NULL) {
491 mchandle = bna_mcam_mod_handle_get(&rxf->rx->bna->mcam_mod);
492 mchandle->handle = handle;
493 mchandle->refcnt = 0;
494 list_add_tail(&mchandle->qe, &rxf->mcast_handle_q);
495 }
496 mchandle->refcnt++;
497 mcmac->handle = mchandle;
498}
499
500static int
501bna_rxf_mcast_del(struct bna_rxf *rxf, struct bna_mac *mac,
502 enum bna_cleanup_type cleanup)
503{
504 struct bna_mcam_handle *mchandle;
505 int ret = 0;
506
507 mchandle = mac->handle;
508 if (mchandle == NULL)
509 return ret;
510
511 mchandle->refcnt--;
512 if (mchandle->refcnt == 0) {
513 if (cleanup == BNA_HARD_CLEANUP) {
514 bna_bfi_mcast_del_req(rxf, mchandle->handle);
515 ret = 1;
516 }
517 list_del(&mchandle->qe);
518 bfa_q_qe_init(&mchandle->qe);
519 bna_mcam_mod_handle_put(&rxf->rx->bna->mcam_mod, mchandle);
520 }
521 mac->handle = NULL;
522
523 return ret;
524}
525
526static int
527bna_rxf_mcast_cfg_apply(struct bna_rxf *rxf)
528{
529 struct bna_mac *mac = NULL;
530 struct list_head *qe;
531 int ret;
532
20b298f5 533 /* First delete multicast entries to maintain the count */
f3bd5173
RM
534 while (!list_empty(&rxf->mcast_pending_del_q)) {
535 bfa_q_deq(&rxf->mcast_pending_del_q, &qe);
536 bfa_q_qe_init(qe);
537 mac = (struct bna_mac *)qe;
538 ret = bna_rxf_mcast_del(rxf, mac, BNA_HARD_CLEANUP);
20b298f5 539 bna_cam_mod_mac_put(bna_mcam_mod_del_q(rxf->rx->bna), mac);
f3bd5173
RM
540 if (ret)
541 return ret;
542 }
543
544 /* Add multicast entries */
545 if (!list_empty(&rxf->mcast_pending_add_q)) {
546 bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
547 bfa_q_qe_init(qe);
548 mac = (struct bna_mac *)qe;
549 list_add_tail(&mac->qe, &rxf->mcast_active_q);
550 bna_bfi_mcast_add_req(rxf, mac);
551 return 1;
552 }
553
554 return 0;
555}
556
557static int
558bna_rxf_vlan_cfg_apply(struct bna_rxf *rxf)
559{
560 u8 vlan_pending_bitmask;
561 int block_idx = 0;
562
563 if (rxf->vlan_pending_bitmask) {
564 vlan_pending_bitmask = rxf->vlan_pending_bitmask;
565 while (!(vlan_pending_bitmask & 0x1)) {
566 block_idx++;
567 vlan_pending_bitmask >>= 1;
568 }
569 rxf->vlan_pending_bitmask &= ~(1 << block_idx);
570 bna_bfi_rx_vlan_filter_set(rxf, block_idx);
571 return 1;
572 }
573
574 return 0;
575}
576
577static int
578bna_rxf_mcast_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
579{
580 struct list_head *qe;
581 struct bna_mac *mac;
582 int ret;
583
584 /* Throw away delete pending mcast entries */
585 while (!list_empty(&rxf->mcast_pending_del_q)) {
586 bfa_q_deq(&rxf->mcast_pending_del_q, &qe);
587 bfa_q_qe_init(qe);
588 mac = (struct bna_mac *)qe;
589 ret = bna_rxf_mcast_del(rxf, mac, cleanup);
20b298f5 590 bna_cam_mod_mac_put(bna_mcam_mod_del_q(rxf->rx->bna), mac);
f3bd5173
RM
591 if (ret)
592 return ret;
593 }
594
595 /* Move active mcast entries to pending_add_q */
596 while (!list_empty(&rxf->mcast_active_q)) {
597 bfa_q_deq(&rxf->mcast_active_q, &qe);
598 bfa_q_qe_init(qe);
599 list_add_tail(qe, &rxf->mcast_pending_add_q);
600 mac = (struct bna_mac *)qe;
601 if (bna_rxf_mcast_del(rxf, mac, cleanup))
602 return 1;
603 }
604
605 return 0;
606}
607
608static int
609bna_rxf_rss_cfg_apply(struct bna_rxf *rxf)
610{
611 if (rxf->rss_pending) {
612 if (rxf->rss_pending & BNA_RSS_F_RIT_PENDING) {
613 rxf->rss_pending &= ~BNA_RSS_F_RIT_PENDING;
614 bna_bfi_rit_cfg(rxf);
615 return 1;
616 }
617
618 if (rxf->rss_pending & BNA_RSS_F_CFG_PENDING) {
619 rxf->rss_pending &= ~BNA_RSS_F_CFG_PENDING;
620 bna_bfi_rss_cfg(rxf);
621 return 1;
622 }
623
624 if (rxf->rss_pending & BNA_RSS_F_STATUS_PENDING) {
625 rxf->rss_pending &= ~BNA_RSS_F_STATUS_PENDING;
626 bna_bfi_rss_enable(rxf);
627 return 1;
628 }
629 }
630
631 return 0;
632}
633
634static int
635bna_rxf_cfg_apply(struct bna_rxf *rxf)
636{
637 if (bna_rxf_ucast_cfg_apply(rxf))
638 return 1;
639
640 if (bna_rxf_mcast_cfg_apply(rxf))
641 return 1;
642
643 if (bna_rxf_promisc_cfg_apply(rxf))
644 return 1;
645
646 if (bna_rxf_allmulti_cfg_apply(rxf))
647 return 1;
648
649 if (bna_rxf_vlan_cfg_apply(rxf))
650 return 1;
651
652 if (bna_rxf_vlan_strip_cfg_apply(rxf))
653 return 1;
654
655 if (bna_rxf_rss_cfg_apply(rxf))
656 return 1;
657
658 return 0;
659}
660
661/* Only software reset */
662static int
663bna_rxf_fltr_clear(struct bna_rxf *rxf)
664{
665 if (bna_rxf_ucast_cfg_reset(rxf, BNA_HARD_CLEANUP))
666 return 1;
667
668 if (bna_rxf_mcast_cfg_reset(rxf, BNA_HARD_CLEANUP))
669 return 1;
670
671 if (bna_rxf_promisc_cfg_reset(rxf, BNA_HARD_CLEANUP))
672 return 1;
673
674 if (bna_rxf_allmulti_cfg_reset(rxf, BNA_HARD_CLEANUP))
675 return 1;
676
677 return 0;
678}
679
680static void
681bna_rxf_cfg_reset(struct bna_rxf *rxf)
682{
683 bna_rxf_ucast_cfg_reset(rxf, BNA_SOFT_CLEANUP);
684 bna_rxf_mcast_cfg_reset(rxf, BNA_SOFT_CLEANUP);
685 bna_rxf_promisc_cfg_reset(rxf, BNA_SOFT_CLEANUP);
686 bna_rxf_allmulti_cfg_reset(rxf, BNA_SOFT_CLEANUP);
687 bna_rxf_vlan_cfg_soft_reset(rxf);
688 bna_rxf_rss_cfg_soft_reset(rxf);
689}
690
691static void
692bna_rit_init(struct bna_rxf *rxf, int rit_size)
693{
694 struct bna_rx *rx = rxf->rx;
695 struct bna_rxp *rxp;
696 struct list_head *qe;
697 int offset = 0;
698
699 rxf->rit_size = rit_size;
700 list_for_each(qe, &rx->rxp_q) {
701 rxp = (struct bna_rxp *)qe;
702 rxf->rit[offset] = rxp->cq.ccb->id;
703 offset++;
704 }
705
706}
707
708void
709bna_bfi_rxf_cfg_rsp(struct bna_rxf *rxf, struct bfi_msgq_mhdr *msghdr)
710{
711 bfa_fsm_send_event(rxf, RXF_E_FW_RESP);
712}
713
f489a4ba
RM
714void
715bna_bfi_rxf_ucast_set_rsp(struct bna_rxf *rxf,
716 struct bfi_msgq_mhdr *msghdr)
717{
718 struct bfi_enet_rsp *rsp =
17b6f244 719 container_of(msghdr, struct bfi_enet_rsp, mh);
f489a4ba
RM
720
721 if (rsp->error) {
722 /* Clear ucast from cache */
723 rxf->ucast_active_set = 0;
724 }
725
726 bfa_fsm_send_event(rxf, RXF_E_FW_RESP);
727}
728
f3bd5173
RM
729void
730bna_bfi_rxf_mcast_add_rsp(struct bna_rxf *rxf,
731 struct bfi_msgq_mhdr *msghdr)
732{
733 struct bfi_enet_mcast_add_req *req =
734 &rxf->bfi_enet_cmd.mcast_add_req;
735 struct bfi_enet_mcast_add_rsp *rsp =
17b6f244 736 container_of(msghdr, struct bfi_enet_mcast_add_rsp, mh);
f3bd5173
RM
737
738 bna_rxf_mchandle_attach(rxf, (u8 *)&req->mac_addr,
739 ntohs(rsp->handle));
740 bfa_fsm_send_event(rxf, RXF_E_FW_RESP);
741}
742
743static void
744bna_rxf_init(struct bna_rxf *rxf,
745 struct bna_rx *rx,
746 struct bna_rx_config *q_config,
747 struct bna_res_info *res_info)
748{
749 rxf->rx = rx;
750
751 INIT_LIST_HEAD(&rxf->ucast_pending_add_q);
752 INIT_LIST_HEAD(&rxf->ucast_pending_del_q);
753 rxf->ucast_pending_set = 0;
754 rxf->ucast_active_set = 0;
755 INIT_LIST_HEAD(&rxf->ucast_active_q);
756 rxf->ucast_pending_mac = NULL;
757
758 INIT_LIST_HEAD(&rxf->mcast_pending_add_q);
759 INIT_LIST_HEAD(&rxf->mcast_pending_del_q);
760 INIT_LIST_HEAD(&rxf->mcast_active_q);
761 INIT_LIST_HEAD(&rxf->mcast_handle_q);
762
763 if (q_config->paused)
764 rxf->flags |= BNA_RXF_F_PAUSED;
765
766 rxf->rit = (u8 *)
767 res_info[BNA_RX_RES_MEM_T_RIT].res_u.mem_info.mdl[0].kva;
768 bna_rit_init(rxf, q_config->num_paths);
769
770 rxf->rss_status = q_config->rss_status;
771 if (rxf->rss_status == BNA_STATUS_T_ENABLED) {
772 rxf->rss_cfg = q_config->rss_config;
773 rxf->rss_pending |= BNA_RSS_F_CFG_PENDING;
774 rxf->rss_pending |= BNA_RSS_F_RIT_PENDING;
775 rxf->rss_pending |= BNA_RSS_F_STATUS_PENDING;
776 }
777
778 rxf->vlan_filter_status = BNA_STATUS_T_DISABLED;
779 memset(rxf->vlan_filter_table, 0,
780 (sizeof(u32) * (BFI_ENET_VLAN_ID_MAX / 32)));
781 rxf->vlan_filter_table[0] |= 1; /* for pure priority tagged frames */
782 rxf->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL;
783
784 rxf->vlan_strip_status = q_config->vlan_strip_status;
785
786 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
787}
788
789static void
790bna_rxf_uninit(struct bna_rxf *rxf)
791{
792 struct bna_mac *mac;
793
794 rxf->ucast_pending_set = 0;
795 rxf->ucast_active_set = 0;
796
797 while (!list_empty(&rxf->ucast_pending_add_q)) {
798 bfa_q_deq(&rxf->ucast_pending_add_q, &mac);
799 bfa_q_qe_init(&mac->qe);
20b298f5 800 bna_cam_mod_mac_put(bna_ucam_mod_free_q(rxf->rx->bna), mac);
f3bd5173
RM
801 }
802
803 if (rxf->ucast_pending_mac) {
804 bfa_q_qe_init(&rxf->ucast_pending_mac->qe);
20b298f5
RM
805 bna_cam_mod_mac_put(bna_ucam_mod_free_q(rxf->rx->bna),
806 rxf->ucast_pending_mac);
f3bd5173
RM
807 rxf->ucast_pending_mac = NULL;
808 }
809
810 while (!list_empty(&rxf->mcast_pending_add_q)) {
811 bfa_q_deq(&rxf->mcast_pending_add_q, &mac);
812 bfa_q_qe_init(&mac->qe);
20b298f5 813 bna_cam_mod_mac_put(bna_mcam_mod_free_q(rxf->rx->bna), mac);
f3bd5173
RM
814 }
815
816 rxf->rxmode_pending = 0;
817 rxf->rxmode_pending_bitmask = 0;
818 if (rxf->rx->bna->promisc_rid == rxf->rx->rid)
819 rxf->rx->bna->promisc_rid = BFI_INVALID_RID;
820 if (rxf->rx->bna->default_mode_rid == rxf->rx->rid)
821 rxf->rx->bna->default_mode_rid = BFI_INVALID_RID;
822
823 rxf->rss_pending = 0;
824 rxf->vlan_strip_pending = false;
825
826 rxf->flags = 0;
827
828 rxf->rx = NULL;
829}
830
831static void
832bna_rx_cb_rxf_started(struct bna_rx *rx)
833{
834 bfa_fsm_send_event(rx, RX_E_RXF_STARTED);
835}
836
837static void
838bna_rxf_start(struct bna_rxf *rxf)
839{
840 rxf->start_cbfn = bna_rx_cb_rxf_started;
841 rxf->start_cbarg = rxf->rx;
842 bfa_fsm_send_event(rxf, RXF_E_START);
843}
844
845static void
846bna_rx_cb_rxf_stopped(struct bna_rx *rx)
847{
848 bfa_fsm_send_event(rx, RX_E_RXF_STOPPED);
849}
850
851static void
852bna_rxf_stop(struct bna_rxf *rxf)
853{
854 rxf->stop_cbfn = bna_rx_cb_rxf_stopped;
855 rxf->stop_cbarg = rxf->rx;
856 bfa_fsm_send_event(rxf, RXF_E_STOP);
857}
858
859static void
860bna_rxf_fail(struct bna_rxf *rxf)
861{
862 bfa_fsm_send_event(rxf, RXF_E_FAIL);
863}
864
865enum bna_cb_status
866bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac,
867 void (*cbfn)(struct bnad *, struct bna_rx *))
868{
869 struct bna_rxf *rxf = &rx->rxf;
870
871 if (rxf->ucast_pending_mac == NULL) {
872 rxf->ucast_pending_mac =
20b298f5 873 bna_cam_mod_mac_get(bna_ucam_mod_free_q(rxf->rx->bna));
f3bd5173
RM
874 if (rxf->ucast_pending_mac == NULL)
875 return BNA_CB_UCAST_CAM_FULL;
876 bfa_q_qe_init(&rxf->ucast_pending_mac->qe);
877 }
878
e2f9ecfc 879 ether_addr_copy(rxf->ucast_pending_mac->addr, ucmac);
f3bd5173
RM
880 rxf->ucast_pending_set = 1;
881 rxf->cam_fltr_cbfn = cbfn;
882 rxf->cam_fltr_cbarg = rx->bna->bnad;
883
884 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
885
886 return BNA_CB_SUCCESS;
887}
888
889enum bna_cb_status
890bna_rx_mcast_add(struct bna_rx *rx, u8 *addr,
891 void (*cbfn)(struct bnad *, struct bna_rx *))
892{
893 struct bna_rxf *rxf = &rx->rxf;
894 struct bna_mac *mac;
895
896 /* Check if already added or pending addition */
897 if (bna_mac_find(&rxf->mcast_active_q, addr) ||
898 bna_mac_find(&rxf->mcast_pending_add_q, addr)) {
899 if (cbfn)
900 cbfn(rx->bna->bnad, rx);
901 return BNA_CB_SUCCESS;
902 }
903
20b298f5 904 mac = bna_cam_mod_mac_get(bna_mcam_mod_free_q(rxf->rx->bna));
f3bd5173
RM
905 if (mac == NULL)
906 return BNA_CB_MCAST_LIST_FULL;
907 bfa_q_qe_init(&mac->qe);
e2f9ecfc 908 ether_addr_copy(mac->addr, addr);
f3bd5173
RM
909 list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
910
911 rxf->cam_fltr_cbfn = cbfn;
912 rxf->cam_fltr_cbarg = rx->bna->bnad;
913
914 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
915
916 return BNA_CB_SUCCESS;
917}
918
fe1624cf
RM
919enum bna_cb_status
920bna_rx_ucast_listset(struct bna_rx *rx, int count, u8 *uclist,
921 void (*cbfn)(struct bnad *, struct bna_rx *))
922{
923 struct bna_ucam_mod *ucam_mod = &rx->bna->ucam_mod;
924 struct bna_rxf *rxf = &rx->rxf;
925 struct list_head list_head;
926 struct list_head *qe;
927 u8 *mcaddr;
928 struct bna_mac *mac, *del_mac;
929 int i;
930
931 /* Purge the pending_add_q */
932 while (!list_empty(&rxf->ucast_pending_add_q)) {
933 bfa_q_deq(&rxf->ucast_pending_add_q, &qe);
934 bfa_q_qe_init(qe);
935 mac = (struct bna_mac *)qe;
936 bna_cam_mod_mac_put(&ucam_mod->free_q, mac);
937 }
938
939 /* Schedule active_q entries for deletion */
940 while (!list_empty(&rxf->ucast_active_q)) {
941 bfa_q_deq(&rxf->ucast_active_q, &qe);
942 mac = (struct bna_mac *)qe;
943 bfa_q_qe_init(&mac->qe);
944
945 del_mac = bna_cam_mod_mac_get(&ucam_mod->del_q);
946 memcpy(del_mac, mac, sizeof(*del_mac));
947 list_add_tail(&del_mac->qe, &rxf->ucast_pending_del_q);
948 bna_cam_mod_mac_put(&ucam_mod->free_q, mac);
949 }
950
951 /* Allocate nodes */
952 INIT_LIST_HEAD(&list_head);
953 for (i = 0, mcaddr = uclist; i < count; i++) {
954 mac = bna_cam_mod_mac_get(&ucam_mod->free_q);
955 if (mac == NULL)
956 goto err_return;
957 bfa_q_qe_init(&mac->qe);
e2f9ecfc 958 ether_addr_copy(mac->addr, mcaddr);
fe1624cf
RM
959 list_add_tail(&mac->qe, &list_head);
960 mcaddr += ETH_ALEN;
961 }
962
963 /* Add the new entries */
964 while (!list_empty(&list_head)) {
965 bfa_q_deq(&list_head, &qe);
966 mac = (struct bna_mac *)qe;
967 bfa_q_qe_init(&mac->qe);
968 list_add_tail(&mac->qe, &rxf->ucast_pending_add_q);
969 }
970
971 rxf->cam_fltr_cbfn = cbfn;
972 rxf->cam_fltr_cbarg = rx->bna->bnad;
973 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
974
975 return BNA_CB_SUCCESS;
976
977err_return:
978 while (!list_empty(&list_head)) {
979 bfa_q_deq(&list_head, &qe);
980 mac = (struct bna_mac *)qe;
981 bfa_q_qe_init(&mac->qe);
982 bna_cam_mod_mac_put(&ucam_mod->free_q, mac);
983 }
984
985 return BNA_CB_UCAST_CAM_FULL;
986}
987
f3bd5173
RM
988enum bna_cb_status
989bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mclist,
990 void (*cbfn)(struct bnad *, struct bna_rx *))
991{
20b298f5 992 struct bna_mcam_mod *mcam_mod = &rx->bna->mcam_mod;
f3bd5173
RM
993 struct bna_rxf *rxf = &rx->rxf;
994 struct list_head list_head;
995 struct list_head *qe;
996 u8 *mcaddr;
20b298f5 997 struct bna_mac *mac, *del_mac;
f3bd5173
RM
998 int i;
999
f3bd5173
RM
1000 /* Purge the pending_add_q */
1001 while (!list_empty(&rxf->mcast_pending_add_q)) {
1002 bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
1003 bfa_q_qe_init(qe);
1004 mac = (struct bna_mac *)qe;
20b298f5 1005 bna_cam_mod_mac_put(&mcam_mod->free_q, mac);
f3bd5173
RM
1006 }
1007
1008 /* Schedule active_q entries for deletion */
1009 while (!list_empty(&rxf->mcast_active_q)) {
1010 bfa_q_deq(&rxf->mcast_active_q, &qe);
1011 mac = (struct bna_mac *)qe;
1012 bfa_q_qe_init(&mac->qe);
20b298f5
RM
1013
1014 del_mac = bna_cam_mod_mac_get(&mcam_mod->del_q);
1015
fe1624cf 1016 memcpy(del_mac, mac, sizeof(*del_mac));
20b298f5
RM
1017 list_add_tail(&del_mac->qe, &rxf->mcast_pending_del_q);
1018 mac->handle = NULL;
1019 bna_cam_mod_mac_put(&mcam_mod->free_q, mac);
1020 }
1021
1022 /* Allocate nodes */
1023 INIT_LIST_HEAD(&list_head);
1024 for (i = 0, mcaddr = mclist; i < count; i++) {
1025 mac = bna_cam_mod_mac_get(&mcam_mod->free_q);
1026 if (mac == NULL)
1027 goto err_return;
1028 bfa_q_qe_init(&mac->qe);
e2f9ecfc 1029 ether_addr_copy(mac->addr, mcaddr);
20b298f5
RM
1030 list_add_tail(&mac->qe, &list_head);
1031
1032 mcaddr += ETH_ALEN;
f3bd5173
RM
1033 }
1034
1035 /* Add the new entries */
1036 while (!list_empty(&list_head)) {
1037 bfa_q_deq(&list_head, &qe);
1038 mac = (struct bna_mac *)qe;
1039 bfa_q_qe_init(&mac->qe);
1040 list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
1041 }
1042
1043 rxf->cam_fltr_cbfn = cbfn;
1044 rxf->cam_fltr_cbarg = rx->bna->bnad;
1045 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
1046
1047 return BNA_CB_SUCCESS;
1048
1049err_return:
1050 while (!list_empty(&list_head)) {
1051 bfa_q_deq(&list_head, &qe);
1052 mac = (struct bna_mac *)qe;
1053 bfa_q_qe_init(&mac->qe);
20b298f5 1054 bna_cam_mod_mac_put(&mcam_mod->free_q, mac);
f3bd5173
RM
1055 }
1056
1057 return BNA_CB_MCAST_LIST_FULL;
1058}
1059
fe1624cf
RM
1060void
1061bna_rx_mcast_delall(struct bna_rx *rx,
1062 void (*cbfn)(struct bnad *, struct bna_rx *))
1063{
1064 struct bna_rxf *rxf = &rx->rxf;
1065 struct list_head *qe;
1066 struct bna_mac *mac, *del_mac;
1067 int need_hw_config = 0;
1068
1069 /* Purge all entries from pending_add_q */
1070 while (!list_empty(&rxf->mcast_pending_add_q)) {
1071 bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
1072 mac = (struct bna_mac *)qe;
1073 bfa_q_qe_init(&mac->qe);
1074 bna_cam_mod_mac_put(bna_mcam_mod_free_q(rxf->rx->bna), mac);
1075 }
1076
1077 /* Schedule all entries in active_q for deletion */
1078 while (!list_empty(&rxf->mcast_active_q)) {
1079 bfa_q_deq(&rxf->mcast_active_q, &qe);
1080 mac = (struct bna_mac *)qe;
1081 bfa_q_qe_init(&mac->qe);
1082
1083 del_mac = bna_cam_mod_mac_get(bna_mcam_mod_del_q(rxf->rx->bna));
1084
1085 memcpy(del_mac, mac, sizeof(*del_mac));
1086 list_add_tail(&del_mac->qe, &rxf->mcast_pending_del_q);
1087 mac->handle = NULL;
1088 bna_cam_mod_mac_put(bna_mcam_mod_free_q(rxf->rx->bna), mac);
1089 need_hw_config = 1;
1090 }
1091
1092 if (need_hw_config) {
1093 rxf->cam_fltr_cbfn = cbfn;
1094 rxf->cam_fltr_cbarg = rx->bna->bnad;
1095 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
1096 return;
1097 }
1098
1099 if (cbfn)
1100 (*cbfn)(rx->bna->bnad, rx);
1101}
1102
f3bd5173
RM
1103void
1104bna_rx_vlan_add(struct bna_rx *rx, int vlan_id)
1105{
1106 struct bna_rxf *rxf = &rx->rxf;
1107 int index = (vlan_id >> BFI_VLAN_WORD_SHIFT);
1108 int bit = (1 << (vlan_id & BFI_VLAN_WORD_MASK));
1109 int group_id = (vlan_id >> BFI_VLAN_BLOCK_SHIFT);
1110
1111 rxf->vlan_filter_table[index] |= bit;
1112 if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) {
1113 rxf->vlan_pending_bitmask |= (1 << group_id);
1114 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
1115 }
1116}
1117
1118void
1119bna_rx_vlan_del(struct bna_rx *rx, int vlan_id)
1120{
1121 struct bna_rxf *rxf = &rx->rxf;
1122 int index = (vlan_id >> BFI_VLAN_WORD_SHIFT);
1123 int bit = (1 << (vlan_id & BFI_VLAN_WORD_MASK));
1124 int group_id = (vlan_id >> BFI_VLAN_BLOCK_SHIFT);
1125
1126 rxf->vlan_filter_table[index] &= ~bit;
1127 if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) {
1128 rxf->vlan_pending_bitmask |= (1 << group_id);
1129 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
1130 }
1131}
1132
1133static int
1134bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf)
1135{
1136 struct bna_mac *mac = NULL;
1137 struct list_head *qe;
1138
1139 /* Delete MAC addresses previousely added */
1140 if (!list_empty(&rxf->ucast_pending_del_q)) {
1141 bfa_q_deq(&rxf->ucast_pending_del_q, &qe);
1142 bfa_q_qe_init(qe);
1143 mac = (struct bna_mac *)qe;
1144 bna_bfi_ucast_req(rxf, mac, BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
20b298f5 1145 bna_cam_mod_mac_put(bna_ucam_mod_del_q(rxf->rx->bna), mac);
f3bd5173
RM
1146 return 1;
1147 }
1148
1149 /* Set default unicast MAC */
1150 if (rxf->ucast_pending_set) {
1151 rxf->ucast_pending_set = 0;
e2f9ecfc
IV
1152 ether_addr_copy(rxf->ucast_active_mac.addr,
1153 rxf->ucast_pending_mac->addr);
f3bd5173
RM
1154 rxf->ucast_active_set = 1;
1155 bna_bfi_ucast_req(rxf, &rxf->ucast_active_mac,
1156 BFI_ENET_H2I_MAC_UCAST_SET_REQ);
1157 return 1;
1158 }
1159
1160 /* Add additional MAC entries */
1161 if (!list_empty(&rxf->ucast_pending_add_q)) {
1162 bfa_q_deq(&rxf->ucast_pending_add_q, &qe);
1163 bfa_q_qe_init(qe);
1164 mac = (struct bna_mac *)qe;
1165 list_add_tail(&mac->qe, &rxf->ucast_active_q);
1166 bna_bfi_ucast_req(rxf, mac, BFI_ENET_H2I_MAC_UCAST_ADD_REQ);
1167 return 1;
1168 }
1169
1170 return 0;
1171}
1172
1173static int
1174bna_rxf_ucast_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
1175{
1176 struct list_head *qe;
1177 struct bna_mac *mac;
1178
1179 /* Throw away delete pending ucast entries */
1180 while (!list_empty(&rxf->ucast_pending_del_q)) {
1181 bfa_q_deq(&rxf->ucast_pending_del_q, &qe);
1182 bfa_q_qe_init(qe);
1183 mac = (struct bna_mac *)qe;
1184 if (cleanup == BNA_SOFT_CLEANUP)
20b298f5
RM
1185 bna_cam_mod_mac_put(bna_ucam_mod_del_q(rxf->rx->bna),
1186 mac);
f3bd5173
RM
1187 else {
1188 bna_bfi_ucast_req(rxf, mac,
1189 BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
20b298f5
RM
1190 bna_cam_mod_mac_put(bna_ucam_mod_del_q(rxf->rx->bna),
1191 mac);
f3bd5173
RM
1192 return 1;
1193 }
1194 }
1195
1196 /* Move active ucast entries to pending_add_q */
1197 while (!list_empty(&rxf->ucast_active_q)) {
1198 bfa_q_deq(&rxf->ucast_active_q, &qe);
1199 bfa_q_qe_init(qe);
1200 list_add_tail(qe, &rxf->ucast_pending_add_q);
1201 if (cleanup == BNA_HARD_CLEANUP) {
1202 mac = (struct bna_mac *)qe;
1203 bna_bfi_ucast_req(rxf, mac,
1204 BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
1205 return 1;
1206 }
1207 }
1208
1209 if (rxf->ucast_active_set) {
1210 rxf->ucast_pending_set = 1;
1211 rxf->ucast_active_set = 0;
1212 if (cleanup == BNA_HARD_CLEANUP) {
1213 bna_bfi_ucast_req(rxf, &rxf->ucast_active_mac,
1214 BFI_ENET_H2I_MAC_UCAST_CLR_REQ);
1215 return 1;
1216 }
1217 }
1218
1219 return 0;
1220}
1221
1222static int
1223bna_rxf_promisc_cfg_apply(struct bna_rxf *rxf)
1224{
1225 struct bna *bna = rxf->rx->bna;
1226
1227 /* Enable/disable promiscuous mode */
1228 if (is_promisc_enable(rxf->rxmode_pending,
1229 rxf->rxmode_pending_bitmask)) {
1230 /* move promisc configuration from pending -> active */
1231 promisc_inactive(rxf->rxmode_pending,
1232 rxf->rxmode_pending_bitmask);
1233 rxf->rxmode_active |= BNA_RXMODE_PROMISC;
1234 bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_ENABLED);
1235 return 1;
1236 } else if (is_promisc_disable(rxf->rxmode_pending,
1237 rxf->rxmode_pending_bitmask)) {
1238 /* move promisc configuration from pending -> active */
1239 promisc_inactive(rxf->rxmode_pending,
1240 rxf->rxmode_pending_bitmask);
1241 rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
1242 bna->promisc_rid = BFI_INVALID_RID;
1243 bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_DISABLED);
1244 return 1;
1245 }
1246
1247 return 0;
1248}
1249
1250static int
1251bna_rxf_promisc_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
1252{
1253 struct bna *bna = rxf->rx->bna;
1254
1255 /* Clear pending promisc mode disable */
1256 if (is_promisc_disable(rxf->rxmode_pending,
1257 rxf->rxmode_pending_bitmask)) {
1258 promisc_inactive(rxf->rxmode_pending,
1259 rxf->rxmode_pending_bitmask);
1260 rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
1261 bna->promisc_rid = BFI_INVALID_RID;
1262 if (cleanup == BNA_HARD_CLEANUP) {
1263 bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_DISABLED);
1264 return 1;
1265 }
1266 }
1267
1268 /* Move promisc mode config from active -> pending */
1269 if (rxf->rxmode_active & BNA_RXMODE_PROMISC) {
1270 promisc_enable(rxf->rxmode_pending,
1271 rxf->rxmode_pending_bitmask);
1272 rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
1273 if (cleanup == BNA_HARD_CLEANUP) {
1274 bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_DISABLED);
1275 return 1;
1276 }
1277 }
1278
1279 return 0;
1280}
1281
1282static int
1283bna_rxf_allmulti_cfg_apply(struct bna_rxf *rxf)
1284{
1285 /* Enable/disable allmulti mode */
1286 if (is_allmulti_enable(rxf->rxmode_pending,
1287 rxf->rxmode_pending_bitmask)) {
1288 /* move allmulti configuration from pending -> active */
1289 allmulti_inactive(rxf->rxmode_pending,
1290 rxf->rxmode_pending_bitmask);
1291 rxf->rxmode_active |= BNA_RXMODE_ALLMULTI;
1292 bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_DISABLED);
1293 return 1;
1294 } else if (is_allmulti_disable(rxf->rxmode_pending,
1295 rxf->rxmode_pending_bitmask)) {
1296 /* move allmulti configuration from pending -> active */
1297 allmulti_inactive(rxf->rxmode_pending,
1298 rxf->rxmode_pending_bitmask);
1299 rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
1300 bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_ENABLED);
1301 return 1;
1302 }
1303
1304 return 0;
1305}
1306
1307static int
1308bna_rxf_allmulti_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
1309{
1310 /* Clear pending allmulti mode disable */
1311 if (is_allmulti_disable(rxf->rxmode_pending,
1312 rxf->rxmode_pending_bitmask)) {
1313 allmulti_inactive(rxf->rxmode_pending,
1314 rxf->rxmode_pending_bitmask);
1315 rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
1316 if (cleanup == BNA_HARD_CLEANUP) {
1317 bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_ENABLED);
1318 return 1;
1319 }
1320 }
1321
1322 /* Move allmulti mode config from active -> pending */
1323 if (rxf->rxmode_active & BNA_RXMODE_ALLMULTI) {
1324 allmulti_enable(rxf->rxmode_pending,
1325 rxf->rxmode_pending_bitmask);
1326 rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
1327 if (cleanup == BNA_HARD_CLEANUP) {
1328 bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_ENABLED);
1329 return 1;
1330 }
1331 }
1332
1333 return 0;
1334}
1335
1336static int
1337bna_rxf_promisc_enable(struct bna_rxf *rxf)
1338{
1339 struct bna *bna = rxf->rx->bna;
1340 int ret = 0;
1341
1342 if (is_promisc_enable(rxf->rxmode_pending,
1343 rxf->rxmode_pending_bitmask) ||
1344 (rxf->rxmode_active & BNA_RXMODE_PROMISC)) {
1345 /* Do nothing if pending enable or already enabled */
1346 } else if (is_promisc_disable(rxf->rxmode_pending,
1347 rxf->rxmode_pending_bitmask)) {
1348 /* Turn off pending disable command */
1349 promisc_inactive(rxf->rxmode_pending,
1350 rxf->rxmode_pending_bitmask);
1351 } else {
1352 /* Schedule enable */
1353 promisc_enable(rxf->rxmode_pending,
1354 rxf->rxmode_pending_bitmask);
1355 bna->promisc_rid = rxf->rx->rid;
1356 ret = 1;
1357 }
1358
1359 return ret;
1360}
1361
1362static int
1363bna_rxf_promisc_disable(struct bna_rxf *rxf)
1364{
1365 struct bna *bna = rxf->rx->bna;
1366 int ret = 0;
1367
1368 if (is_promisc_disable(rxf->rxmode_pending,
1369 rxf->rxmode_pending_bitmask) ||
1370 (!(rxf->rxmode_active & BNA_RXMODE_PROMISC))) {
1371 /* Do nothing if pending disable or already disabled */
1372 } else if (is_promisc_enable(rxf->rxmode_pending,
1373 rxf->rxmode_pending_bitmask)) {
1374 /* Turn off pending enable command */
1375 promisc_inactive(rxf->rxmode_pending,
1376 rxf->rxmode_pending_bitmask);
1377 bna->promisc_rid = BFI_INVALID_RID;
1378 } else if (rxf->rxmode_active & BNA_RXMODE_PROMISC) {
1379 /* Schedule disable */
1380 promisc_disable(rxf->rxmode_pending,
1381 rxf->rxmode_pending_bitmask);
1382 ret = 1;
1383 }
1384
1385 return ret;
1386}
1387
1388static int
1389bna_rxf_allmulti_enable(struct bna_rxf *rxf)
1390{
1391 int ret = 0;
1392
1393 if (is_allmulti_enable(rxf->rxmode_pending,
1394 rxf->rxmode_pending_bitmask) ||
1395 (rxf->rxmode_active & BNA_RXMODE_ALLMULTI)) {
1396 /* Do nothing if pending enable or already enabled */
1397 } else if (is_allmulti_disable(rxf->rxmode_pending,
1398 rxf->rxmode_pending_bitmask)) {
1399 /* Turn off pending disable command */
1400 allmulti_inactive(rxf->rxmode_pending,
1401 rxf->rxmode_pending_bitmask);
1402 } else {
1403 /* Schedule enable */
1404 allmulti_enable(rxf->rxmode_pending,
1405 rxf->rxmode_pending_bitmask);
1406 ret = 1;
1407 }
1408
1409 return ret;
1410}
1411
1412static int
1413bna_rxf_allmulti_disable(struct bna_rxf *rxf)
1414{
1415 int ret = 0;
1416
1417 if (is_allmulti_disable(rxf->rxmode_pending,
1418 rxf->rxmode_pending_bitmask) ||
1419 (!(rxf->rxmode_active & BNA_RXMODE_ALLMULTI))) {
1420 /* Do nothing if pending disable or already disabled */
1421 } else if (is_allmulti_enable(rxf->rxmode_pending,
1422 rxf->rxmode_pending_bitmask)) {
1423 /* Turn off pending enable command */
1424 allmulti_inactive(rxf->rxmode_pending,
1425 rxf->rxmode_pending_bitmask);
1426 } else if (rxf->rxmode_active & BNA_RXMODE_ALLMULTI) {
1427 /* Schedule disable */
1428 allmulti_disable(rxf->rxmode_pending,
1429 rxf->rxmode_pending_bitmask);
1430 ret = 1;
1431 }
1432
1433 return ret;
1434}
1435
1436static int
1437bna_rxf_vlan_strip_cfg_apply(struct bna_rxf *rxf)
1438{
1439 if (rxf->vlan_strip_pending) {
1440 rxf->vlan_strip_pending = false;
1441 bna_bfi_vlan_strip_enable(rxf);
1442 return 1;
1443 }
1444
1445 return 0;
1446}
1447
1aa8b471 1448/* RX */
f3bd5173
RM
1449
1450#define BNA_GET_RXQS(qcfg) (((qcfg)->rxp_type == BNA_RXP_SINGLE) ? \
1451 (qcfg)->num_paths : ((qcfg)->num_paths * 2))
1452
1453#define SIZE_TO_PAGES(size) (((size) >> PAGE_SHIFT) + ((((size) &\
1454 (PAGE_SIZE - 1)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT))
1455
1456#define call_rx_stop_cbfn(rx) \
1457do { \
1458 if ((rx)->stop_cbfn) { \
1459 void (*cbfn)(void *, struct bna_rx *); \
1460 void *cbarg; \
1461 cbfn = (rx)->stop_cbfn; \
1462 cbarg = (rx)->stop_cbarg; \
1463 (rx)->stop_cbfn = NULL; \
1464 (rx)->stop_cbarg = NULL; \
1465 cbfn(cbarg, rx); \
1466 } \
1467} while (0)
1468
5bcf6ac0
RM
1469#define call_rx_stall_cbfn(rx) \
1470do { \
1471 if ((rx)->rx_stall_cbfn) \
1472 (rx)->rx_stall_cbfn((rx)->bna->bnad, (rx)); \
1473} while (0)
1474
f3bd5173
RM
1475#define bfi_enet_datapath_q_init(bfi_q, bna_qpt) \
1476do { \
1477 struct bna_dma_addr cur_q_addr = \
1478 *((struct bna_dma_addr *)((bna_qpt)->kv_qpt_ptr)); \
1479 (bfi_q)->pg_tbl.a32.addr_lo = (bna_qpt)->hw_qpt_ptr.lsb; \
1480 (bfi_q)->pg_tbl.a32.addr_hi = (bna_qpt)->hw_qpt_ptr.msb; \
1481 (bfi_q)->first_entry.a32.addr_lo = cur_q_addr.lsb; \
1482 (bfi_q)->first_entry.a32.addr_hi = cur_q_addr.msb; \
1483 (bfi_q)->pages = htons((u16)(bna_qpt)->page_count); \
1484 (bfi_q)->page_sz = htons((u16)(bna_qpt)->page_size);\
1485} while (0)
1486
1487static void bna_bfi_rx_enet_start(struct bna_rx *rx);
1488static void bna_rx_enet_stop(struct bna_rx *rx);
1489static void bna_rx_mod_cb_rx_stopped(void *arg, struct bna_rx *rx);
1490
1491bfa_fsm_state_decl(bna_rx, stopped,
1492 struct bna_rx, enum bna_rx_event);
1493bfa_fsm_state_decl(bna_rx, start_wait,
1494 struct bna_rx, enum bna_rx_event);
215a64a2
RM
1495bfa_fsm_state_decl(bna_rx, start_stop_wait,
1496 struct bna_rx, enum bna_rx_event);
f3bd5173
RM
1497bfa_fsm_state_decl(bna_rx, rxf_start_wait,
1498 struct bna_rx, enum bna_rx_event);
1499bfa_fsm_state_decl(bna_rx, started,
1500 struct bna_rx, enum bna_rx_event);
1501bfa_fsm_state_decl(bna_rx, rxf_stop_wait,
1502 struct bna_rx, enum bna_rx_event);
1503bfa_fsm_state_decl(bna_rx, stop_wait,
1504 struct bna_rx, enum bna_rx_event);
1505bfa_fsm_state_decl(bna_rx, cleanup_wait,
1506 struct bna_rx, enum bna_rx_event);
1507bfa_fsm_state_decl(bna_rx, failed,
1508 struct bna_rx, enum bna_rx_event);
1509bfa_fsm_state_decl(bna_rx, quiesce_wait,
1510 struct bna_rx, enum bna_rx_event);
1511
1512static void bna_rx_sm_stopped_entry(struct bna_rx *rx)
1513{
1514 call_rx_stop_cbfn(rx);
1515}
1516
1517static void bna_rx_sm_stopped(struct bna_rx *rx,
1518 enum bna_rx_event event)
1519{
1520 switch (event) {
1521 case RX_E_START:
1522 bfa_fsm_set_state(rx, bna_rx_sm_start_wait);
1523 break;
1524
1525 case RX_E_STOP:
1526 call_rx_stop_cbfn(rx);
1527 break;
1528
1529 case RX_E_FAIL:
1530 /* no-op */
1531 break;
1532
1533 default:
1534 bfa_sm_fault(event);
1535 break;
1536 }
1537}
1538
1539static void bna_rx_sm_start_wait_entry(struct bna_rx *rx)
1540{
1541 bna_bfi_rx_enet_start(rx);
1542}
1543
7f4341fe 1544static void
f3bd5173
RM
1545bna_rx_sm_stop_wait_entry(struct bna_rx *rx)
1546{
1547}
1548
1549static void
1550bna_rx_sm_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
1551{
1552 switch (event) {
1553 case RX_E_FAIL:
1554 case RX_E_STOPPED:
1555 bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait);
1556 rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
1557 break;
1558
1559 case RX_E_STARTED:
1560 bna_rx_enet_stop(rx);
1561 break;
1562
1563 default:
1564 bfa_sm_fault(event);
1565 break;
1566 }
1567}
1568
1569static void bna_rx_sm_start_wait(struct bna_rx *rx,
1570 enum bna_rx_event event)
1571{
1572 switch (event) {
1573 case RX_E_STOP:
215a64a2 1574 bfa_fsm_set_state(rx, bna_rx_sm_start_stop_wait);
f3bd5173
RM
1575 break;
1576
1577 case RX_E_FAIL:
1578 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
1579 break;
1580
1581 case RX_E_STARTED:
1582 bfa_fsm_set_state(rx, bna_rx_sm_rxf_start_wait);
1583 break;
1584
1585 default:
1586 bfa_sm_fault(event);
1587 break;
1588 }
1589}
1590
1591static void bna_rx_sm_rxf_start_wait_entry(struct bna_rx *rx)
1592{
1593 rx->rx_post_cbfn(rx->bna->bnad, rx);
1594 bna_rxf_start(&rx->rxf);
1595}
1596
7f4341fe 1597static void
f3bd5173
RM
1598bna_rx_sm_rxf_stop_wait_entry(struct bna_rx *rx)
1599{
1600}
1601
1602static void
1603bna_rx_sm_rxf_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
1604{
1605 switch (event) {
1606 case RX_E_FAIL:
1607 bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait);
1608 bna_rxf_fail(&rx->rxf);
5bcf6ac0 1609 call_rx_stall_cbfn(rx);
f3bd5173
RM
1610 rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
1611 break;
1612
1613 case RX_E_RXF_STARTED:
1614 bna_rxf_stop(&rx->rxf);
1615 break;
1616
1617 case RX_E_RXF_STOPPED:
1618 bfa_fsm_set_state(rx, bna_rx_sm_stop_wait);
5bcf6ac0 1619 call_rx_stall_cbfn(rx);
f3bd5173
RM
1620 bna_rx_enet_stop(rx);
1621 break;
1622
1623 default:
1624 bfa_sm_fault(event);
1625 break;
1626 }
1627
1628}
1629
215a64a2
RM
1630static void
1631bna_rx_sm_start_stop_wait_entry(struct bna_rx *rx)
1632{
1633}
1634
1635static void
1636bna_rx_sm_start_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
1637{
1638 switch (event) {
1639 case RX_E_FAIL:
1640 case RX_E_STOPPED:
1641 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
1642 break;
1643
1644 case RX_E_STARTED:
1645 bna_rx_enet_stop(rx);
1646 break;
1647
1648 default:
1649 bfa_sm_fault(event);
1650 }
1651}
1652
7f4341fe 1653static void
f3bd5173
RM
1654bna_rx_sm_started_entry(struct bna_rx *rx)
1655{
1656 struct bna_rxp *rxp;
1657 struct list_head *qe_rxp;
1658 int is_regular = (rx->type == BNA_RX_T_REGULAR);
1659
1660 /* Start IB */
1661 list_for_each(qe_rxp, &rx->rxp_q) {
1662 rxp = (struct bna_rxp *)qe_rxp;
1663 bna_ib_start(rx->bna, &rxp->cq.ib, is_regular);
1664 }
1665
1666 bna_ethport_cb_rx_started(&rx->bna->ethport);
1667}
1668
1669static void
1670bna_rx_sm_started(struct bna_rx *rx, enum bna_rx_event event)
1671{
1672 switch (event) {
1673 case RX_E_STOP:
1674 bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait);
1675 bna_ethport_cb_rx_stopped(&rx->bna->ethport);
1676 bna_rxf_stop(&rx->rxf);
1677 break;
1678
1679 case RX_E_FAIL:
1680 bfa_fsm_set_state(rx, bna_rx_sm_failed);
1681 bna_ethport_cb_rx_stopped(&rx->bna->ethport);
1682 bna_rxf_fail(&rx->rxf);
5bcf6ac0 1683 call_rx_stall_cbfn(rx);
f3bd5173
RM
1684 rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
1685 break;
1686
1687 default:
1688 bfa_sm_fault(event);
1689 break;
1690 }
1691}
1692
1693static void bna_rx_sm_rxf_start_wait(struct bna_rx *rx,
1694 enum bna_rx_event event)
1695{
1696 switch (event) {
1697 case RX_E_STOP:
1698 bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait);
1699 break;
1700
1701 case RX_E_FAIL:
1702 bfa_fsm_set_state(rx, bna_rx_sm_failed);
1703 bna_rxf_fail(&rx->rxf);
5bcf6ac0 1704 call_rx_stall_cbfn(rx);
f3bd5173
RM
1705 rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
1706 break;
1707
1708 case RX_E_RXF_STARTED:
1709 bfa_fsm_set_state(rx, bna_rx_sm_started);
1710 break;
1711
1712 default:
1713 bfa_sm_fault(event);
1714 break;
1715 }
1716}
1717
7f4341fe 1718static void
f3bd5173
RM
1719bna_rx_sm_cleanup_wait_entry(struct bna_rx *rx)
1720{
1721}
1722
7f4341fe 1723static void
f3bd5173
RM
1724bna_rx_sm_cleanup_wait(struct bna_rx *rx, enum bna_rx_event event)
1725{
1726 switch (event) {
1727 case RX_E_FAIL:
1728 case RX_E_RXF_STOPPED:
1729 /* No-op */
1730 break;
1731
1732 case RX_E_CLEANUP_DONE:
1733 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
1734 break;
1735
1736 default:
1737 bfa_sm_fault(event);
1738 break;
1739 }
1740}
1741
1742static void
1743bna_rx_sm_failed_entry(struct bna_rx *rx)
1744{
1745}
1746
1747static void
1748bna_rx_sm_failed(struct bna_rx *rx, enum bna_rx_event event)
1749{
1750 switch (event) {
1751 case RX_E_START:
1752 bfa_fsm_set_state(rx, bna_rx_sm_quiesce_wait);
1753 break;
1754
1755 case RX_E_STOP:
1756 bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait);
1757 break;
1758
1759 case RX_E_FAIL:
1760 case RX_E_RXF_STARTED:
1761 case RX_E_RXF_STOPPED:
1762 /* No-op */
1763 break;
1764
1765 case RX_E_CLEANUP_DONE:
1766 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
1767 break;
1768
1769 default:
1770 bfa_sm_fault(event);
1771 break;
1772} }
1773
1774static void
1775bna_rx_sm_quiesce_wait_entry(struct bna_rx *rx)
1776{
1777}
1778
1779static void
1780bna_rx_sm_quiesce_wait(struct bna_rx *rx, enum bna_rx_event event)
1781{
1782 switch (event) {
1783 case RX_E_STOP:
1784 bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait);
1785 break;
1786
1787 case RX_E_FAIL:
1788 bfa_fsm_set_state(rx, bna_rx_sm_failed);
1789 break;
1790
1791 case RX_E_CLEANUP_DONE:
1792 bfa_fsm_set_state(rx, bna_rx_sm_start_wait);
1793 break;
1794
1795 default:
1796 bfa_sm_fault(event);
1797 break;
1798 }
1799}
1800
1801static void
1802bna_bfi_rx_enet_start(struct bna_rx *rx)
1803{
1804 struct bfi_enet_rx_cfg_req *cfg_req = &rx->bfi_enet_cmd.cfg_req;
1805 struct bna_rxp *rxp = NULL;
1806 struct bna_rxq *q0 = NULL, *q1 = NULL;
1807 struct list_head *rxp_qe;
1808 int i;
1809
1810 bfi_msgq_mhdr_set(cfg_req->mh, BFI_MC_ENET,
1811 BFI_ENET_H2I_RX_CFG_SET_REQ, 0, rx->rid);
1812 cfg_req->mh.num_entries = htons(
1813 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rx_cfg_req)));
1814
e29aa339 1815 cfg_req->rx_cfg.frame_size = bna_enet_mtu_get(&rx->bna->enet);
f3bd5173
RM
1816 cfg_req->num_queue_sets = rx->num_paths;
1817 for (i = 0, rxp_qe = bfa_q_first(&rx->rxp_q);
1818 i < rx->num_paths;
1819 i++, rxp_qe = bfa_q_next(rxp_qe)) {
1820 rxp = (struct bna_rxp *)rxp_qe;
1821
1822 GET_RXQS(rxp, q0, q1);
1823 switch (rxp->type) {
1824 case BNA_RXP_SLR:
1825 case BNA_RXP_HDS:
1826 /* Small RxQ */
1827 bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].qs.q,
1828 &q1->qpt);
1829 cfg_req->q_cfg[i].qs.rx_buffer_size =
1830 htons((u16)q1->buffer_size);
1831 /* Fall through */
1832
1833 case BNA_RXP_SINGLE:
1834 /* Large/Single RxQ */
1835 bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].ql.q,
1836 &q0->qpt);
e29aa339
RM
1837 if (q0->multi_buffer)
1838 /* multi-buffer is enabled by allocating
1839 * a new rx with new set of resources.
1840 * q0->buffer_size should be initialized to
1841 * fragment size.
1842 */
1843 cfg_req->rx_cfg.multi_buffer =
1844 BNA_STATUS_T_ENABLED;
1845 else
1846 q0->buffer_size =
1847 bna_enet_mtu_get(&rx->bna->enet);
f3bd5173
RM
1848 cfg_req->q_cfg[i].ql.rx_buffer_size =
1849 htons((u16)q0->buffer_size);
1850 break;
1851
1852 default:
1853 BUG_ON(1);
1854 }
1855
1856 bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].cq.q,
1857 &rxp->cq.qpt);
1858
1859 cfg_req->q_cfg[i].ib.index_addr.a32.addr_lo =
1860 rxp->cq.ib.ib_seg_host_addr.lsb;
1861 cfg_req->q_cfg[i].ib.index_addr.a32.addr_hi =
1862 rxp->cq.ib.ib_seg_host_addr.msb;
1863 cfg_req->q_cfg[i].ib.intr.msix_index =
1864 htons((u16)rxp->cq.ib.intr_vector);
1865 }
1866
1867 cfg_req->ib_cfg.int_pkt_dma = BNA_STATUS_T_DISABLED;
1868 cfg_req->ib_cfg.int_enabled = BNA_STATUS_T_ENABLED;
1869 cfg_req->ib_cfg.int_pkt_enabled = BNA_STATUS_T_DISABLED;
1870 cfg_req->ib_cfg.continuous_coalescing = BNA_STATUS_T_DISABLED;
1871 cfg_req->ib_cfg.msix = (rxp->cq.ib.intr_type == BNA_INTR_T_MSIX)
1872 ? BNA_STATUS_T_ENABLED :
1873 BNA_STATUS_T_DISABLED;
1874 cfg_req->ib_cfg.coalescing_timeout =
1875 htonl((u32)rxp->cq.ib.coalescing_timeo);
1876 cfg_req->ib_cfg.inter_pkt_timeout =
1877 htonl((u32)rxp->cq.ib.interpkt_timeo);
1878 cfg_req->ib_cfg.inter_pkt_count = (u8)rxp->cq.ib.interpkt_count;
1879
1880 switch (rxp->type) {
1881 case BNA_RXP_SLR:
1882 cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_LARGE_SMALL;
1883 break;
1884
1885 case BNA_RXP_HDS:
1886 cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_HDS;
1887 cfg_req->rx_cfg.hds.type = rx->hds_cfg.hdr_type;
1888 cfg_req->rx_cfg.hds.force_offset = rx->hds_cfg.forced_offset;
1889 cfg_req->rx_cfg.hds.max_header_size = rx->hds_cfg.forced_offset;
1890 break;
1891
1892 case BNA_RXP_SINGLE:
1893 cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_SINGLE;
1894 break;
1895
1896 default:
1897 BUG_ON(1);
1898 }
1899 cfg_req->rx_cfg.strip_vlan = rx->rxf.vlan_strip_status;
1900
1901 bfa_msgq_cmd_set(&rx->msgq_cmd, NULL, NULL,
1902 sizeof(struct bfi_enet_rx_cfg_req), &cfg_req->mh);
1903 bfa_msgq_cmd_post(&rx->bna->msgq, &rx->msgq_cmd);
1904}
1905
1906static void
1907bna_bfi_rx_enet_stop(struct bna_rx *rx)
1908{
1909 struct bfi_enet_req *req = &rx->bfi_enet_cmd.req;
1910
1911 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
1912 BFI_ENET_H2I_RX_CFG_CLR_REQ, 0, rx->rid);
1913 req->mh.num_entries = htons(
1914 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_req)));
1915 bfa_msgq_cmd_set(&rx->msgq_cmd, NULL, NULL, sizeof(struct bfi_enet_req),
1916 &req->mh);
1917 bfa_msgq_cmd_post(&rx->bna->msgq, &rx->msgq_cmd);
1918}
1919
1920static void
1921bna_rx_enet_stop(struct bna_rx *rx)
1922{
1923 struct bna_rxp *rxp;
1924 struct list_head *qe_rxp;
1925
1926 /* Stop IB */
1927 list_for_each(qe_rxp, &rx->rxp_q) {
1928 rxp = (struct bna_rxp *)qe_rxp;
1929 bna_ib_stop(rx->bna, &rxp->cq.ib);
1930 }
1931
1932 bna_bfi_rx_enet_stop(rx);
1933}
1934
1935static int
1936bna_rx_res_check(struct bna_rx_mod *rx_mod, struct bna_rx_config *rx_cfg)
1937{
1938 if ((rx_mod->rx_free_count == 0) ||
1939 (rx_mod->rxp_free_count == 0) ||
1940 (rx_mod->rxq_free_count == 0))
1941 return 0;
1942
1943 if (rx_cfg->rxp_type == BNA_RXP_SINGLE) {
1944 if ((rx_mod->rxp_free_count < rx_cfg->num_paths) ||
1945 (rx_mod->rxq_free_count < rx_cfg->num_paths))
1946 return 0;
1947 } else {
1948 if ((rx_mod->rxp_free_count < rx_cfg->num_paths) ||
1949 (rx_mod->rxq_free_count < (2 * rx_cfg->num_paths)))
1950 return 0;
1951 }
1952
1953 return 1;
1954}
1955
1956static struct bna_rxq *
1957bna_rxq_get(struct bna_rx_mod *rx_mod)
1958{
1959 struct bna_rxq *rxq = NULL;
1960 struct list_head *qe = NULL;
1961
1962 bfa_q_deq(&rx_mod->rxq_free_q, &qe);
1963 rx_mod->rxq_free_count--;
1964 rxq = (struct bna_rxq *)qe;
1965 bfa_q_qe_init(&rxq->qe);
1966
1967 return rxq;
1968}
1969
1970static void
1971bna_rxq_put(struct bna_rx_mod *rx_mod, struct bna_rxq *rxq)
1972{
1973 bfa_q_qe_init(&rxq->qe);
1974 list_add_tail(&rxq->qe, &rx_mod->rxq_free_q);
1975 rx_mod->rxq_free_count++;
1976}
1977
1978static struct bna_rxp *
1979bna_rxp_get(struct bna_rx_mod *rx_mod)
1980{
1981 struct list_head *qe = NULL;
1982 struct bna_rxp *rxp = NULL;
1983
1984 bfa_q_deq(&rx_mod->rxp_free_q, &qe);
1985 rx_mod->rxp_free_count--;
1986 rxp = (struct bna_rxp *)qe;
1987 bfa_q_qe_init(&rxp->qe);
1988
1989 return rxp;
1990}
1991
1992static void
1993bna_rxp_put(struct bna_rx_mod *rx_mod, struct bna_rxp *rxp)
1994{
1995 bfa_q_qe_init(&rxp->qe);
1996 list_add_tail(&rxp->qe, &rx_mod->rxp_free_q);
1997 rx_mod->rxp_free_count++;
1998}
1999
2000static struct bna_rx *
2001bna_rx_get(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
2002{
2003 struct list_head *qe = NULL;
2004 struct bna_rx *rx = NULL;
2005
2006 if (type == BNA_RX_T_REGULAR) {
2007 bfa_q_deq(&rx_mod->rx_free_q, &qe);
2008 } else
2009 bfa_q_deq_tail(&rx_mod->rx_free_q, &qe);
2010
2011 rx_mod->rx_free_count--;
2012 rx = (struct bna_rx *)qe;
2013 bfa_q_qe_init(&rx->qe);
2014 list_add_tail(&rx->qe, &rx_mod->rx_active_q);
2015 rx->type = type;
2016
2017 return rx;
2018}
2019
2020static void
2021bna_rx_put(struct bna_rx_mod *rx_mod, struct bna_rx *rx)
2022{
2023 struct list_head *prev_qe = NULL;
2024 struct list_head *qe;
2025
2026 bfa_q_qe_init(&rx->qe);
2027
2028 list_for_each(qe, &rx_mod->rx_free_q) {
2029 if (((struct bna_rx *)qe)->rid < rx->rid)
2030 prev_qe = qe;
2031 else
2032 break;
2033 }
2034
2035 if (prev_qe == NULL) {
2036 /* This is the first entry */
2037 bfa_q_enq_head(&rx_mod->rx_free_q, &rx->qe);
2038 } else if (bfa_q_next(prev_qe) == &rx_mod->rx_free_q) {
2039 /* This is the last entry */
2040 list_add_tail(&rx->qe, &rx_mod->rx_free_q);
2041 } else {
2042 /* Somewhere in the middle */
2043 bfa_q_next(&rx->qe) = bfa_q_next(prev_qe);
2044 bfa_q_prev(&rx->qe) = prev_qe;
2045 bfa_q_next(prev_qe) = &rx->qe;
2046 bfa_q_prev(bfa_q_next(&rx->qe)) = &rx->qe;
2047 }
2048
2049 rx_mod->rx_free_count++;
2050}
2051
2052static void
2053bna_rxp_add_rxqs(struct bna_rxp *rxp, struct bna_rxq *q0,
2054 struct bna_rxq *q1)
2055{
2056 switch (rxp->type) {
2057 case BNA_RXP_SINGLE:
2058 rxp->rxq.single.only = q0;
2059 rxp->rxq.single.reserved = NULL;
2060 break;
2061 case BNA_RXP_SLR:
2062 rxp->rxq.slr.large = q0;
2063 rxp->rxq.slr.small = q1;
2064 break;
2065 case BNA_RXP_HDS:
2066 rxp->rxq.hds.data = q0;
2067 rxp->rxq.hds.hdr = q1;
2068 break;
2069 default:
2070 break;
2071 }
2072}
2073
2074static void
2075bna_rxq_qpt_setup(struct bna_rxq *rxq,
2076 struct bna_rxp *rxp,
2077 u32 page_count,
2078 u32 page_size,
2079 struct bna_mem_descr *qpt_mem,
2080 struct bna_mem_descr *swqpt_mem,
2081 struct bna_mem_descr *page_mem)
2082{
5216562a
RM
2083 u8 *kva;
2084 u64 dma;
2085 struct bna_dma_addr bna_dma;
f3bd5173
RM
2086 int i;
2087
2088 rxq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
2089 rxq->qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
2090 rxq->qpt.kv_qpt_ptr = qpt_mem->kva;
2091 rxq->qpt.page_count = page_count;
2092 rxq->qpt.page_size = page_size;
2093
2094 rxq->rcb->sw_qpt = (void **) swqpt_mem->kva;
5216562a
RM
2095 rxq->rcb->sw_q = page_mem->kva;
2096
2097 kva = page_mem->kva;
2098 BNA_GET_DMA_ADDR(&page_mem->dma, dma);
f3bd5173
RM
2099
2100 for (i = 0; i < rxq->qpt.page_count; i++) {
5216562a
RM
2101 rxq->rcb->sw_qpt[i] = kva;
2102 kva += PAGE_SIZE;
2103
2104 BNA_SET_DMA_ADDR(dma, &bna_dma);
f3bd5173 2105 ((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].lsb =
5216562a 2106 bna_dma.lsb;
f3bd5173 2107 ((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].msb =
5216562a
RM
2108 bna_dma.msb;
2109 dma += PAGE_SIZE;
f3bd5173
RM
2110 }
2111}
2112
2113static void
2114bna_rxp_cqpt_setup(struct bna_rxp *rxp,
2115 u32 page_count,
2116 u32 page_size,
2117 struct bna_mem_descr *qpt_mem,
2118 struct bna_mem_descr *swqpt_mem,
2119 struct bna_mem_descr *page_mem)
2120{
5216562a
RM
2121 u8 *kva;
2122 u64 dma;
2123 struct bna_dma_addr bna_dma;
f3bd5173
RM
2124 int i;
2125
2126 rxp->cq.qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
2127 rxp->cq.qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
2128 rxp->cq.qpt.kv_qpt_ptr = qpt_mem->kva;
2129 rxp->cq.qpt.page_count = page_count;
2130 rxp->cq.qpt.page_size = page_size;
2131
2132 rxp->cq.ccb->sw_qpt = (void **) swqpt_mem->kva;
5216562a
RM
2133 rxp->cq.ccb->sw_q = page_mem->kva;
2134
2135 kva = page_mem->kva;
2136 BNA_GET_DMA_ADDR(&page_mem->dma, dma);
f3bd5173
RM
2137
2138 for (i = 0; i < rxp->cq.qpt.page_count; i++) {
5216562a
RM
2139 rxp->cq.ccb->sw_qpt[i] = kva;
2140 kva += PAGE_SIZE;
f3bd5173 2141
5216562a 2142 BNA_SET_DMA_ADDR(dma, &bna_dma);
f3bd5173 2143 ((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].lsb =
5216562a 2144 bna_dma.lsb;
f3bd5173 2145 ((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].msb =
5216562a
RM
2146 bna_dma.msb;
2147 dma += PAGE_SIZE;
f3bd5173
RM
2148 }
2149}
2150
2151static void
2152bna_rx_mod_cb_rx_stopped(void *arg, struct bna_rx *rx)
2153{
2154 struct bna_rx_mod *rx_mod = (struct bna_rx_mod *)arg;
2155
2156 bfa_wc_down(&rx_mod->rx_stop_wc);
2157}
2158
2159static void
2160bna_rx_mod_cb_rx_stopped_all(void *arg)
2161{
2162 struct bna_rx_mod *rx_mod = (struct bna_rx_mod *)arg;
2163
2164 if (rx_mod->stop_cbfn)
2165 rx_mod->stop_cbfn(&rx_mod->bna->enet);
2166 rx_mod->stop_cbfn = NULL;
2167}
2168
2169static void
2170bna_rx_start(struct bna_rx *rx)
2171{
2172 rx->rx_flags |= BNA_RX_F_ENET_STARTED;
2173 if (rx->rx_flags & BNA_RX_F_ENABLED)
2174 bfa_fsm_send_event(rx, RX_E_START);
2175}
2176
2177static void
2178bna_rx_stop(struct bna_rx *rx)
2179{
2180 rx->rx_flags &= ~BNA_RX_F_ENET_STARTED;
2181 if (rx->fsm == (bfa_fsm_t) bna_rx_sm_stopped)
2182 bna_rx_mod_cb_rx_stopped(&rx->bna->rx_mod, rx);
2183 else {
2184 rx->stop_cbfn = bna_rx_mod_cb_rx_stopped;
2185 rx->stop_cbarg = &rx->bna->rx_mod;
2186 bfa_fsm_send_event(rx, RX_E_STOP);
2187 }
2188}
2189
2190static void
2191bna_rx_fail(struct bna_rx *rx)
2192{
2193 /* Indicate Enet is not enabled, and failed */
2194 rx->rx_flags &= ~BNA_RX_F_ENET_STARTED;
2195 bfa_fsm_send_event(rx, RX_E_FAIL);
2196}
2197
2198void
2199bna_rx_mod_start(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
2200{
2201 struct bna_rx *rx;
2202 struct list_head *qe;
2203
2204 rx_mod->flags |= BNA_RX_MOD_F_ENET_STARTED;
2205 if (type == BNA_RX_T_LOOPBACK)
2206 rx_mod->flags |= BNA_RX_MOD_F_ENET_LOOPBACK;
2207
2208 list_for_each(qe, &rx_mod->rx_active_q) {
2209 rx = (struct bna_rx *)qe;
2210 if (rx->type == type)
2211 bna_rx_start(rx);
2212 }
2213}
2214
2215void
2216bna_rx_mod_stop(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
2217{
2218 struct bna_rx *rx;
2219 struct list_head *qe;
2220
2221 rx_mod->flags &= ~BNA_RX_MOD_F_ENET_STARTED;
2222 rx_mod->flags &= ~BNA_RX_MOD_F_ENET_LOOPBACK;
2223
2224 rx_mod->stop_cbfn = bna_enet_cb_rx_stopped;
2225
2226 bfa_wc_init(&rx_mod->rx_stop_wc, bna_rx_mod_cb_rx_stopped_all, rx_mod);
2227
2228 list_for_each(qe, &rx_mod->rx_active_q) {
2229 rx = (struct bna_rx *)qe;
2230 if (rx->type == type) {
2231 bfa_wc_up(&rx_mod->rx_stop_wc);
2232 bna_rx_stop(rx);
2233 }
2234 }
2235
2236 bfa_wc_wait(&rx_mod->rx_stop_wc);
2237}
2238
2239void
2240bna_rx_mod_fail(struct bna_rx_mod *rx_mod)
2241{
2242 struct bna_rx *rx;
2243 struct list_head *qe;
2244
2245 rx_mod->flags &= ~BNA_RX_MOD_F_ENET_STARTED;
2246 rx_mod->flags &= ~BNA_RX_MOD_F_ENET_LOOPBACK;
2247
2248 list_for_each(qe, &rx_mod->rx_active_q) {
2249 rx = (struct bna_rx *)qe;
2250 bna_rx_fail(rx);
2251 }
2252}
2253
2254void bna_rx_mod_init(struct bna_rx_mod *rx_mod, struct bna *bna,
2255 struct bna_res_info *res_info)
2256{
2257 int index;
2258 struct bna_rx *rx_ptr;
2259 struct bna_rxp *rxp_ptr;
2260 struct bna_rxq *rxq_ptr;
2261
2262 rx_mod->bna = bna;
2263 rx_mod->flags = 0;
2264
2265 rx_mod->rx = (struct bna_rx *)
2266 res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.mdl[0].kva;
2267 rx_mod->rxp = (struct bna_rxp *)
2268 res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.mdl[0].kva;
2269 rx_mod->rxq = (struct bna_rxq *)
2270 res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.mdl[0].kva;
2271
2272 /* Initialize the queues */
2273 INIT_LIST_HEAD(&rx_mod->rx_free_q);
2274 rx_mod->rx_free_count = 0;
2275 INIT_LIST_HEAD(&rx_mod->rxq_free_q);
2276 rx_mod->rxq_free_count = 0;
2277 INIT_LIST_HEAD(&rx_mod->rxp_free_q);
2278 rx_mod->rxp_free_count = 0;
2279 INIT_LIST_HEAD(&rx_mod->rx_active_q);
2280
2281 /* Build RX queues */
2282 for (index = 0; index < bna->ioceth.attr.num_rxp; index++) {
2283 rx_ptr = &rx_mod->rx[index];
2284
2285 bfa_q_qe_init(&rx_ptr->qe);
2286 INIT_LIST_HEAD(&rx_ptr->rxp_q);
2287 rx_ptr->bna = NULL;
2288 rx_ptr->rid = index;
2289 rx_ptr->stop_cbfn = NULL;
2290 rx_ptr->stop_cbarg = NULL;
2291
2292 list_add_tail(&rx_ptr->qe, &rx_mod->rx_free_q);
2293 rx_mod->rx_free_count++;
2294 }
2295
2296 /* build RX-path queue */
2297 for (index = 0; index < bna->ioceth.attr.num_rxp; index++) {
2298 rxp_ptr = &rx_mod->rxp[index];
2299 bfa_q_qe_init(&rxp_ptr->qe);
2300 list_add_tail(&rxp_ptr->qe, &rx_mod->rxp_free_q);
2301 rx_mod->rxp_free_count++;
2302 }
2303
2304 /* build RXQ queue */
2305 for (index = 0; index < (bna->ioceth.attr.num_rxp * 2); index++) {
2306 rxq_ptr = &rx_mod->rxq[index];
2307 bfa_q_qe_init(&rxq_ptr->qe);
2308 list_add_tail(&rxq_ptr->qe, &rx_mod->rxq_free_q);
2309 rx_mod->rxq_free_count++;
2310 }
2311}
2312
2313void
2314bna_rx_mod_uninit(struct bna_rx_mod *rx_mod)
2315{
2316 struct list_head *qe;
2317 int i;
2318
2319 i = 0;
2320 list_for_each(qe, &rx_mod->rx_free_q)
2321 i++;
2322
2323 i = 0;
2324 list_for_each(qe, &rx_mod->rxp_free_q)
2325 i++;
2326
2327 i = 0;
2328 list_for_each(qe, &rx_mod->rxq_free_q)
2329 i++;
2330
2331 rx_mod->bna = NULL;
2332}
2333
2334void
2335bna_bfi_rx_enet_start_rsp(struct bna_rx *rx, struct bfi_msgq_mhdr *msghdr)
2336{
2337 struct bfi_enet_rx_cfg_rsp *cfg_rsp = &rx->bfi_enet_cmd.cfg_rsp;
2338 struct bna_rxp *rxp = NULL;
2339 struct bna_rxq *q0 = NULL, *q1 = NULL;
2340 struct list_head *rxp_qe;
2341 int i;
2342
2343 bfa_msgq_rsp_copy(&rx->bna->msgq, (u8 *)cfg_rsp,
2344 sizeof(struct bfi_enet_rx_cfg_rsp));
2345
2346 rx->hw_id = cfg_rsp->hw_id;
2347
2348 for (i = 0, rxp_qe = bfa_q_first(&rx->rxp_q);
2349 i < rx->num_paths;
2350 i++, rxp_qe = bfa_q_next(rxp_qe)) {
2351 rxp = (struct bna_rxp *)rxp_qe;
2352 GET_RXQS(rxp, q0, q1);
2353
2354 /* Setup doorbells */
2355 rxp->cq.ccb->i_dbell->doorbell_addr =
2356 rx->bna->pcidev.pci_bar_kva
2357 + ntohl(cfg_rsp->q_handles[i].i_dbell);
2358 rxp->hw_id = cfg_rsp->q_handles[i].hw_cqid;
2359 q0->rcb->q_dbell =
2360 rx->bna->pcidev.pci_bar_kva
2361 + ntohl(cfg_rsp->q_handles[i].ql_dbell);
2362 q0->hw_id = cfg_rsp->q_handles[i].hw_lqid;
2363 if (q1) {
2364 q1->rcb->q_dbell =
2365 rx->bna->pcidev.pci_bar_kva
2366 + ntohl(cfg_rsp->q_handles[i].qs_dbell);
2367 q1->hw_id = cfg_rsp->q_handles[i].hw_sqid;
2368 }
2369
2370 /* Initialize producer/consumer indexes */
2371 (*rxp->cq.ccb->hw_producer_index) = 0;
2372 rxp->cq.ccb->producer_index = 0;
2373 q0->rcb->producer_index = q0->rcb->consumer_index = 0;
2374 if (q1)
2375 q1->rcb->producer_index = q1->rcb->consumer_index = 0;
2376 }
2377
2378 bfa_fsm_send_event(rx, RX_E_STARTED);
2379}
2380
2381void
2382bna_bfi_rx_enet_stop_rsp(struct bna_rx *rx, struct bfi_msgq_mhdr *msghdr)
2383{
2384 bfa_fsm_send_event(rx, RX_E_STOPPED);
2385}
2386
2387void
2388bna_rx_res_req(struct bna_rx_config *q_cfg, struct bna_res_info *res_info)
2389{
2390 u32 cq_size, hq_size, dq_size;
2391 u32 cpage_count, hpage_count, dpage_count;
2392 struct bna_mem_info *mem_info;
2393 u32 cq_depth;
2394 u32 hq_depth;
2395 u32 dq_depth;
2396
e29aa339
RM
2397 dq_depth = q_cfg->q0_depth;
2398 hq_depth = ((q_cfg->rxp_type == BNA_RXP_SINGLE) ? 0 : q_cfg->q1_depth);
f3bd5173
RM
2399 cq_depth = dq_depth + hq_depth;
2400
2401 BNA_TO_POWER_OF_2_HIGH(cq_depth);
2402 cq_size = cq_depth * BFI_CQ_WI_SIZE;
2403 cq_size = ALIGN(cq_size, PAGE_SIZE);
2404 cpage_count = SIZE_TO_PAGES(cq_size);
2405
2406 BNA_TO_POWER_OF_2_HIGH(dq_depth);
2407 dq_size = dq_depth * BFI_RXQ_WI_SIZE;
2408 dq_size = ALIGN(dq_size, PAGE_SIZE);
2409 dpage_count = SIZE_TO_PAGES(dq_size);
2410
2411 if (BNA_RXP_SINGLE != q_cfg->rxp_type) {
2412 BNA_TO_POWER_OF_2_HIGH(hq_depth);
2413 hq_size = hq_depth * BFI_RXQ_WI_SIZE;
2414 hq_size = ALIGN(hq_size, PAGE_SIZE);
2415 hpage_count = SIZE_TO_PAGES(hq_size);
2416 } else
2417 hpage_count = 0;
2418
2419 res_info[BNA_RX_RES_MEM_T_CCB].res_type = BNA_RES_T_MEM;
2420 mem_info = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info;
2421 mem_info->mem_type = BNA_MEM_T_KVA;
2422 mem_info->len = sizeof(struct bna_ccb);
2423 mem_info->num = q_cfg->num_paths;
2424
2425 res_info[BNA_RX_RES_MEM_T_RCB].res_type = BNA_RES_T_MEM;
2426 mem_info = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info;
2427 mem_info->mem_type = BNA_MEM_T_KVA;
2428 mem_info->len = sizeof(struct bna_rcb);
2429 mem_info->num = BNA_GET_RXQS(q_cfg);
2430
2431 res_info[BNA_RX_RES_MEM_T_CQPT].res_type = BNA_RES_T_MEM;
2432 mem_info = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info;
2433 mem_info->mem_type = BNA_MEM_T_DMA;
2434 mem_info->len = cpage_count * sizeof(struct bna_dma_addr);
2435 mem_info->num = q_cfg->num_paths;
2436
2437 res_info[BNA_RX_RES_MEM_T_CSWQPT].res_type = BNA_RES_T_MEM;
2438 mem_info = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info;
2439 mem_info->mem_type = BNA_MEM_T_KVA;
2440 mem_info->len = cpage_count * sizeof(void *);
2441 mem_info->num = q_cfg->num_paths;
2442
2443 res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_type = BNA_RES_T_MEM;
2444 mem_info = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info;
2445 mem_info->mem_type = BNA_MEM_T_DMA;
5216562a
RM
2446 mem_info->len = PAGE_SIZE * cpage_count;
2447 mem_info->num = q_cfg->num_paths;
f3bd5173
RM
2448
2449 res_info[BNA_RX_RES_MEM_T_DQPT].res_type = BNA_RES_T_MEM;
2450 mem_info = &res_info[BNA_RX_RES_MEM_T_DQPT].res_u.mem_info;
2451 mem_info->mem_type = BNA_MEM_T_DMA;
2452 mem_info->len = dpage_count * sizeof(struct bna_dma_addr);
2453 mem_info->num = q_cfg->num_paths;
2454
2455 res_info[BNA_RX_RES_MEM_T_DSWQPT].res_type = BNA_RES_T_MEM;
2456 mem_info = &res_info[BNA_RX_RES_MEM_T_DSWQPT].res_u.mem_info;
2457 mem_info->mem_type = BNA_MEM_T_KVA;
2458 mem_info->len = dpage_count * sizeof(void *);
2459 mem_info->num = q_cfg->num_paths;
2460
2461 res_info[BNA_RX_RES_MEM_T_DPAGE].res_type = BNA_RES_T_MEM;
2462 mem_info = &res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info;
2463 mem_info->mem_type = BNA_MEM_T_DMA;
5216562a
RM
2464 mem_info->len = PAGE_SIZE * dpage_count;
2465 mem_info->num = q_cfg->num_paths;
f3bd5173
RM
2466
2467 res_info[BNA_RX_RES_MEM_T_HQPT].res_type = BNA_RES_T_MEM;
2468 mem_info = &res_info[BNA_RX_RES_MEM_T_HQPT].res_u.mem_info;
2469 mem_info->mem_type = BNA_MEM_T_DMA;
2470 mem_info->len = hpage_count * sizeof(struct bna_dma_addr);
2471 mem_info->num = (hpage_count ? q_cfg->num_paths : 0);
2472
2473 res_info[BNA_RX_RES_MEM_T_HSWQPT].res_type = BNA_RES_T_MEM;
2474 mem_info = &res_info[BNA_RX_RES_MEM_T_HSWQPT].res_u.mem_info;
2475 mem_info->mem_type = BNA_MEM_T_KVA;
2476 mem_info->len = hpage_count * sizeof(void *);
2477 mem_info->num = (hpage_count ? q_cfg->num_paths : 0);
2478
2479 res_info[BNA_RX_RES_MEM_T_HPAGE].res_type = BNA_RES_T_MEM;
2480 mem_info = &res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info;
2481 mem_info->mem_type = BNA_MEM_T_DMA;
5216562a
RM
2482 mem_info->len = PAGE_SIZE * hpage_count;
2483 mem_info->num = (hpage_count ? q_cfg->num_paths : 0);
f3bd5173
RM
2484
2485 res_info[BNA_RX_RES_MEM_T_IBIDX].res_type = BNA_RES_T_MEM;
2486 mem_info = &res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info;
2487 mem_info->mem_type = BNA_MEM_T_DMA;
2488 mem_info->len = BFI_IBIDX_SIZE;
2489 mem_info->num = q_cfg->num_paths;
2490
2491 res_info[BNA_RX_RES_MEM_T_RIT].res_type = BNA_RES_T_MEM;
2492 mem_info = &res_info[BNA_RX_RES_MEM_T_RIT].res_u.mem_info;
2493 mem_info->mem_type = BNA_MEM_T_KVA;
2494 mem_info->len = BFI_ENET_RSS_RIT_MAX;
2495 mem_info->num = 1;
2496
2497 res_info[BNA_RX_RES_T_INTR].res_type = BNA_RES_T_INTR;
2498 res_info[BNA_RX_RES_T_INTR].res_u.intr_info.intr_type = BNA_INTR_T_MSIX;
2499 res_info[BNA_RX_RES_T_INTR].res_u.intr_info.num = q_cfg->num_paths;
2500}
2501
2502struct bna_rx *
2503bna_rx_create(struct bna *bna, struct bnad *bnad,
2504 struct bna_rx_config *rx_cfg,
d91d25d5 2505 const struct bna_rx_event_cbfn *rx_cbfn,
f3bd5173
RM
2506 struct bna_res_info *res_info,
2507 void *priv)
2508{
2509 struct bna_rx_mod *rx_mod = &bna->rx_mod;
2510 struct bna_rx *rx;
2511 struct bna_rxp *rxp;
2512 struct bna_rxq *q0;
2513 struct bna_rxq *q1;
2514 struct bna_intr_info *intr_info;
e29aa339
RM
2515 struct bna_mem_descr *hqunmap_mem;
2516 struct bna_mem_descr *dqunmap_mem;
f3bd5173
RM
2517 struct bna_mem_descr *ccb_mem;
2518 struct bna_mem_descr *rcb_mem;
f3bd5173
RM
2519 struct bna_mem_descr *cqpt_mem;
2520 struct bna_mem_descr *cswqpt_mem;
2521 struct bna_mem_descr *cpage_mem;
2522 struct bna_mem_descr *hqpt_mem;
2523 struct bna_mem_descr *dqpt_mem;
2524 struct bna_mem_descr *hsqpt_mem;
2525 struct bna_mem_descr *dsqpt_mem;
2526 struct bna_mem_descr *hpage_mem;
2527 struct bna_mem_descr *dpage_mem;
e29aa339
RM
2528 u32 dpage_count, hpage_count;
2529 u32 hq_idx, dq_idx, rcb_idx;
2530 u32 cq_depth, i;
2531 u32 page_count;
f3bd5173
RM
2532
2533 if (!bna_rx_res_check(rx_mod, rx_cfg))
2534 return NULL;
2535
2536 intr_info = &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
2537 ccb_mem = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info.mdl[0];
2538 rcb_mem = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info.mdl[0];
e29aa339
RM
2539 dqunmap_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPDQ].res_u.mem_info.mdl[0];
2540 hqunmap_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPHQ].res_u.mem_info.mdl[0];
f3bd5173
RM
2541 cqpt_mem = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info.mdl[0];
2542 cswqpt_mem = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info.mdl[0];
2543 cpage_mem = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.mdl[0];
2544 hqpt_mem = &res_info[BNA_RX_RES_MEM_T_HQPT].res_u.mem_info.mdl[0];
2545 dqpt_mem = &res_info[BNA_RX_RES_MEM_T_DQPT].res_u.mem_info.mdl[0];
2546 hsqpt_mem = &res_info[BNA_RX_RES_MEM_T_HSWQPT].res_u.mem_info.mdl[0];
2547 dsqpt_mem = &res_info[BNA_RX_RES_MEM_T_DSWQPT].res_u.mem_info.mdl[0];
2548 hpage_mem = &res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.mdl[0];
2549 dpage_mem = &res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.mdl[0];
2550
5216562a
RM
2551 page_count = res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.len /
2552 PAGE_SIZE;
f3bd5173 2553
5216562a
RM
2554 dpage_count = res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.len /
2555 PAGE_SIZE;
f3bd5173 2556
5216562a
RM
2557 hpage_count = res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.len /
2558 PAGE_SIZE;
f3bd5173
RM
2559
2560 rx = bna_rx_get(rx_mod, rx_cfg->rx_type);
2561 rx->bna = bna;
2562 rx->rx_flags = 0;
2563 INIT_LIST_HEAD(&rx->rxp_q);
2564 rx->stop_cbfn = NULL;
2565 rx->stop_cbarg = NULL;
2566 rx->priv = priv;
2567
2568 rx->rcb_setup_cbfn = rx_cbfn->rcb_setup_cbfn;
2569 rx->rcb_destroy_cbfn = rx_cbfn->rcb_destroy_cbfn;
2570 rx->ccb_setup_cbfn = rx_cbfn->ccb_setup_cbfn;
2571 rx->ccb_destroy_cbfn = rx_cbfn->ccb_destroy_cbfn;
5bcf6ac0 2572 rx->rx_stall_cbfn = rx_cbfn->rx_stall_cbfn;
f3bd5173
RM
2573 /* Following callbacks are mandatory */
2574 rx->rx_cleanup_cbfn = rx_cbfn->rx_cleanup_cbfn;
2575 rx->rx_post_cbfn = rx_cbfn->rx_post_cbfn;
2576
2577 if (rx->bna->rx_mod.flags & BNA_RX_MOD_F_ENET_STARTED) {
2578 switch (rx->type) {
2579 case BNA_RX_T_REGULAR:
2580 if (!(rx->bna->rx_mod.flags &
2581 BNA_RX_MOD_F_ENET_LOOPBACK))
2582 rx->rx_flags |= BNA_RX_F_ENET_STARTED;
2583 break;
2584 case BNA_RX_T_LOOPBACK:
2585 if (rx->bna->rx_mod.flags & BNA_RX_MOD_F_ENET_LOOPBACK)
2586 rx->rx_flags |= BNA_RX_F_ENET_STARTED;
2587 break;
2588 }
2589 }
2590
2591 rx->num_paths = rx_cfg->num_paths;
e29aa339
RM
2592 for (i = 0, hq_idx = 0, dq_idx = 0, rcb_idx = 0;
2593 i < rx->num_paths; i++) {
f3bd5173
RM
2594 rxp = bna_rxp_get(rx_mod);
2595 list_add_tail(&rxp->qe, &rx->rxp_q);
2596 rxp->type = rx_cfg->rxp_type;
2597 rxp->rx = rx;
2598 rxp->cq.rx = rx;
2599
2600 q0 = bna_rxq_get(rx_mod);
2601 if (BNA_RXP_SINGLE == rx_cfg->rxp_type)
2602 q1 = NULL;
2603 else
2604 q1 = bna_rxq_get(rx_mod);
2605
2606 if (1 == intr_info->num)
2607 rxp->vector = intr_info->idl[0].vector;
2608 else
2609 rxp->vector = intr_info->idl[i].vector;
2610
2611 /* Setup IB */
2612
2613 rxp->cq.ib.ib_seg_host_addr.lsb =
2614 res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.lsb;
2615 rxp->cq.ib.ib_seg_host_addr.msb =
2616 res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.msb;
2617 rxp->cq.ib.ib_seg_host_addr_kva =
2618 res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].kva;
2619 rxp->cq.ib.intr_type = intr_info->intr_type;
2620 if (intr_info->intr_type == BNA_INTR_T_MSIX)
2621 rxp->cq.ib.intr_vector = rxp->vector;
2622 else
2623 rxp->cq.ib.intr_vector = (1 << rxp->vector);
2624 rxp->cq.ib.coalescing_timeo = rx_cfg->coalescing_timeo;
2625 rxp->cq.ib.interpkt_count = BFI_RX_INTERPKT_COUNT;
2626 rxp->cq.ib.interpkt_timeo = BFI_RX_INTERPKT_TIMEO;
2627
2628 bna_rxp_add_rxqs(rxp, q0, q1);
2629
2630 /* Setup large Q */
2631
2632 q0->rx = rx;
2633 q0->rxp = rxp;
2634
2635 q0->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva;
e29aa339
RM
2636 q0->rcb->unmap_q = (void *)dqunmap_mem[dq_idx].kva;
2637 rcb_idx++; dq_idx++;
2638 q0->rcb->q_depth = rx_cfg->q0_depth;
2639 q0->q_depth = rx_cfg->q0_depth;
2640 q0->multi_buffer = rx_cfg->q0_multi_buf;
2641 q0->buffer_size = rx_cfg->q0_buf_size;
2642 q0->num_vecs = rx_cfg->q0_num_vecs;
f3bd5173
RM
2643 q0->rcb->rxq = q0;
2644 q0->rcb->bnad = bna->bnad;
2645 q0->rcb->id = 0;
2646 q0->rx_packets = q0->rx_bytes = 0;
2647 q0->rx_packets_with_error = q0->rxbuf_alloc_failed = 0;
2648
2649 bna_rxq_qpt_setup(q0, rxp, dpage_count, PAGE_SIZE,
5216562a 2650 &dqpt_mem[i], &dsqpt_mem[i], &dpage_mem[i]);
f3bd5173
RM
2651
2652 if (rx->rcb_setup_cbfn)
2653 rx->rcb_setup_cbfn(bnad, q0->rcb);
2654
2655 /* Setup small Q */
2656
2657 if (q1) {
2658 q1->rx = rx;
2659 q1->rxp = rxp;
2660
2661 q1->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva;
e29aa339
RM
2662 q1->rcb->unmap_q = (void *)hqunmap_mem[hq_idx].kva;
2663 rcb_idx++; hq_idx++;
2664 q1->rcb->q_depth = rx_cfg->q1_depth;
2665 q1->q_depth = rx_cfg->q1_depth;
2666 q1->multi_buffer = BNA_STATUS_T_DISABLED;
2667 q1->num_vecs = 1;
f3bd5173
RM
2668 q1->rcb->rxq = q1;
2669 q1->rcb->bnad = bna->bnad;
2670 q1->rcb->id = 1;
2671 q1->buffer_size = (rx_cfg->rxp_type == BNA_RXP_HDS) ?
2672 rx_cfg->hds_config.forced_offset
e29aa339 2673 : rx_cfg->q1_buf_size;
f3bd5173
RM
2674 q1->rx_packets = q1->rx_bytes = 0;
2675 q1->rx_packets_with_error = q1->rxbuf_alloc_failed = 0;
2676
2677 bna_rxq_qpt_setup(q1, rxp, hpage_count, PAGE_SIZE,
2678 &hqpt_mem[i], &hsqpt_mem[i],
5216562a 2679 &hpage_mem[i]);
f3bd5173
RM
2680
2681 if (rx->rcb_setup_cbfn)
2682 rx->rcb_setup_cbfn(bnad, q1->rcb);
2683 }
2684
2685 /* Setup CQ */
2686
2687 rxp->cq.ccb = (struct bna_ccb *) ccb_mem[i].kva;
e29aa339
RM
2688 cq_depth = rx_cfg->q0_depth +
2689 ((rx_cfg->rxp_type == BNA_RXP_SINGLE) ?
2690 0 : rx_cfg->q1_depth);
2691 /* if multi-buffer is enabled sum of q0_depth
2692 * and q1_depth need not be a power of 2
2693 */
2694 BNA_TO_POWER_OF_2_HIGH(cq_depth);
2695 rxp->cq.ccb->q_depth = cq_depth;
f3bd5173
RM
2696 rxp->cq.ccb->cq = &rxp->cq;
2697 rxp->cq.ccb->rcb[0] = q0->rcb;
2698 q0->rcb->ccb = rxp->cq.ccb;
2699 if (q1) {
2700 rxp->cq.ccb->rcb[1] = q1->rcb;
2701 q1->rcb->ccb = rxp->cq.ccb;
2702 }
2703 rxp->cq.ccb->hw_producer_index =
2704 (u32 *)rxp->cq.ib.ib_seg_host_addr_kva;
2705 rxp->cq.ccb->i_dbell = &rxp->cq.ib.door_bell;
2706 rxp->cq.ccb->intr_type = rxp->cq.ib.intr_type;
2707 rxp->cq.ccb->intr_vector = rxp->cq.ib.intr_vector;
2708 rxp->cq.ccb->rx_coalescing_timeo =
2709 rxp->cq.ib.coalescing_timeo;
2710 rxp->cq.ccb->pkt_rate.small_pkt_cnt = 0;
2711 rxp->cq.ccb->pkt_rate.large_pkt_cnt = 0;
2712 rxp->cq.ccb->bnad = bna->bnad;
2713 rxp->cq.ccb->id = i;
2714
2715 bna_rxp_cqpt_setup(rxp, page_count, PAGE_SIZE,
5216562a 2716 &cqpt_mem[i], &cswqpt_mem[i], &cpage_mem[i]);
f3bd5173
RM
2717
2718 if (rx->ccb_setup_cbfn)
2719 rx->ccb_setup_cbfn(bnad, rxp->cq.ccb);
2720 }
2721
2722 rx->hds_cfg = rx_cfg->hds_config;
2723
2724 bna_rxf_init(&rx->rxf, rx, rx_cfg, res_info);
2725
2726 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
2727
2728 rx_mod->rid_mask |= (1 << rx->rid);
2729
2730 return rx;
2731}
2732
2733void
2734bna_rx_destroy(struct bna_rx *rx)
2735{
2736 struct bna_rx_mod *rx_mod = &rx->bna->rx_mod;
2737 struct bna_rxq *q0 = NULL;
2738 struct bna_rxq *q1 = NULL;
2739 struct bna_rxp *rxp;
2740 struct list_head *qe;
2741
2742 bna_rxf_uninit(&rx->rxf);
2743
2744 while (!list_empty(&rx->rxp_q)) {
2745 bfa_q_deq(&rx->rxp_q, &rxp);
2746 GET_RXQS(rxp, q0, q1);
2747 if (rx->rcb_destroy_cbfn)
2748 rx->rcb_destroy_cbfn(rx->bna->bnad, q0->rcb);
2749 q0->rcb = NULL;
2750 q0->rxp = NULL;
2751 q0->rx = NULL;
2752 bna_rxq_put(rx_mod, q0);
2753
2754 if (q1) {
2755 if (rx->rcb_destroy_cbfn)
2756 rx->rcb_destroy_cbfn(rx->bna->bnad, q1->rcb);
2757 q1->rcb = NULL;
2758 q1->rxp = NULL;
2759 q1->rx = NULL;
2760 bna_rxq_put(rx_mod, q1);
2761 }
2762 rxp->rxq.slr.large = NULL;
2763 rxp->rxq.slr.small = NULL;
2764
2765 if (rx->ccb_destroy_cbfn)
2766 rx->ccb_destroy_cbfn(rx->bna->bnad, rxp->cq.ccb);
2767 rxp->cq.ccb = NULL;
2768 rxp->rx = NULL;
2769 bna_rxp_put(rx_mod, rxp);
2770 }
2771
2772 list_for_each(qe, &rx_mod->rx_active_q) {
2773 if (qe == &rx->qe) {
2774 list_del(&rx->qe);
2775 bfa_q_qe_init(&rx->qe);
2776 break;
2777 }
2778 }
2779
2780 rx_mod->rid_mask &= ~(1 << rx->rid);
2781
2782 rx->bna = NULL;
2783 rx->priv = NULL;
2784 bna_rx_put(rx_mod, rx);
2785}
2786
2787void
2788bna_rx_enable(struct bna_rx *rx)
2789{
2790 if (rx->fsm != (bfa_sm_t)bna_rx_sm_stopped)
2791 return;
2792
2793 rx->rx_flags |= BNA_RX_F_ENABLED;
2794 if (rx->rx_flags & BNA_RX_F_ENET_STARTED)
2795 bfa_fsm_send_event(rx, RX_E_START);
2796}
2797
2798void
2799bna_rx_disable(struct bna_rx *rx, enum bna_cleanup_type type,
2800 void (*cbfn)(void *, struct bna_rx *))
2801{
2802 if (type == BNA_SOFT_CLEANUP) {
2803 /* h/w should not be accessed. Treat we're stopped */
2804 (*cbfn)(rx->bna->bnad, rx);
2805 } else {
2806 rx->stop_cbfn = cbfn;
2807 rx->stop_cbarg = rx->bna->bnad;
2808
2809 rx->rx_flags &= ~BNA_RX_F_ENABLED;
2810
2811 bfa_fsm_send_event(rx, RX_E_STOP);
2812 }
2813}
2814
2815void
2816bna_rx_cleanup_complete(struct bna_rx *rx)
2817{
2818 bfa_fsm_send_event(rx, RX_E_CLEANUP_DONE);
2819}
2820
fe1624cf
RM
2821void
2822bna_rx_vlan_strip_enable(struct bna_rx *rx)
2823{
2824 struct bna_rxf *rxf = &rx->rxf;
2825
2826 if (rxf->vlan_strip_status == BNA_STATUS_T_DISABLED) {
2827 rxf->vlan_strip_status = BNA_STATUS_T_ENABLED;
2828 rxf->vlan_strip_pending = true;
2829 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
2830 }
2831}
2832
2833void
2834bna_rx_vlan_strip_disable(struct bna_rx *rx)
2835{
2836 struct bna_rxf *rxf = &rx->rxf;
2837
2838 if (rxf->vlan_strip_status != BNA_STATUS_T_DISABLED) {
2839 rxf->vlan_strip_status = BNA_STATUS_T_DISABLED;
2840 rxf->vlan_strip_pending = true;
2841 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
2842 }
2843}
2844
f3bd5173
RM
2845enum bna_cb_status
2846bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode new_mode,
2847 enum bna_rxmode bitmask,
2848 void (*cbfn)(struct bnad *, struct bna_rx *))
2849{
2850 struct bna_rxf *rxf = &rx->rxf;
2851 int need_hw_config = 0;
2852
2853 /* Error checks */
2854
2855 if (is_promisc_enable(new_mode, bitmask)) {
2856 /* If promisc mode is already enabled elsewhere in the system */
2857 if ((rx->bna->promisc_rid != BFI_INVALID_RID) &&
2858 (rx->bna->promisc_rid != rxf->rx->rid))
2859 goto err_return;
2860
2861 /* If default mode is already enabled in the system */
2862 if (rx->bna->default_mode_rid != BFI_INVALID_RID)
2863 goto err_return;
2864
2865 /* Trying to enable promiscuous and default mode together */
2866 if (is_default_enable(new_mode, bitmask))
2867 goto err_return;
2868 }
2869
2870 if (is_default_enable(new_mode, bitmask)) {
2871 /* If default mode is already enabled elsewhere in the system */
2872 if ((rx->bna->default_mode_rid != BFI_INVALID_RID) &&
2873 (rx->bna->default_mode_rid != rxf->rx->rid)) {
2874 goto err_return;
2875 }
2876
2877 /* If promiscuous mode is already enabled in the system */
2878 if (rx->bna->promisc_rid != BFI_INVALID_RID)
2879 goto err_return;
2880 }
2881
2882 /* Process the commands */
2883
2884 if (is_promisc_enable(new_mode, bitmask)) {
2885 if (bna_rxf_promisc_enable(rxf))
2886 need_hw_config = 1;
2887 } else if (is_promisc_disable(new_mode, bitmask)) {
2888 if (bna_rxf_promisc_disable(rxf))
2889 need_hw_config = 1;
2890 }
2891
2892 if (is_allmulti_enable(new_mode, bitmask)) {
2893 if (bna_rxf_allmulti_enable(rxf))
2894 need_hw_config = 1;
2895 } else if (is_allmulti_disable(new_mode, bitmask)) {
2896 if (bna_rxf_allmulti_disable(rxf))
2897 need_hw_config = 1;
2898 }
2899
2900 /* Trigger h/w if needed */
2901
2902 if (need_hw_config) {
2903 rxf->cam_fltr_cbfn = cbfn;
2904 rxf->cam_fltr_cbarg = rx->bna->bnad;
2905 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
2906 } else if (cbfn)
2907 (*cbfn)(rx->bna->bnad, rx);
2908
2909 return BNA_CB_SUCCESS;
2910
2911err_return:
2912 return BNA_CB_FAIL;
2913}
2914
2915void
2916bna_rx_vlanfilter_enable(struct bna_rx *rx)
2917{
2918 struct bna_rxf *rxf = &rx->rxf;
2919
2920 if (rxf->vlan_filter_status == BNA_STATUS_T_DISABLED) {
2921 rxf->vlan_filter_status = BNA_STATUS_T_ENABLED;
2922 rxf->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL;
2923 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
2924 }
2925}
2926
2927void
2928bna_rx_coalescing_timeo_set(struct bna_rx *rx, int coalescing_timeo)
2929{
2930 struct bna_rxp *rxp;
2931 struct list_head *qe;
2932
2933 list_for_each(qe, &rx->rxp_q) {
2934 rxp = (struct bna_rxp *)qe;
2935 rxp->cq.ccb->rx_coalescing_timeo = coalescing_timeo;
2936 bna_ib_coalescing_timeo_set(&rxp->cq.ib, coalescing_timeo);
2937 }
2938}
2939
2940void
2941bna_rx_dim_reconfig(struct bna *bna, const u32 vector[][BNA_BIAS_T_MAX])
2942{
2943 int i, j;
2944
2945 for (i = 0; i < BNA_LOAD_T_MAX; i++)
2946 for (j = 0; j < BNA_BIAS_T_MAX; j++)
2947 bna->rx_mod.dim_vector[i][j] = vector[i][j];
2948}
2949
2950void
2951bna_rx_dim_update(struct bna_ccb *ccb)
2952{
2953 struct bna *bna = ccb->cq->rx->bna;
2954 u32 load, bias;
2955 u32 pkt_rt, small_rt, large_rt;
2956 u8 coalescing_timeo;
2957
2958 if ((ccb->pkt_rate.small_pkt_cnt == 0) &&
2959 (ccb->pkt_rate.large_pkt_cnt == 0))
2960 return;
2961
2962 /* Arrive at preconfigured coalescing timeo value based on pkt rate */
2963
2964 small_rt = ccb->pkt_rate.small_pkt_cnt;
2965 large_rt = ccb->pkt_rate.large_pkt_cnt;
2966
2967 pkt_rt = small_rt + large_rt;
2968
2969 if (pkt_rt < BNA_PKT_RATE_10K)
2970 load = BNA_LOAD_T_LOW_4;
2971 else if (pkt_rt < BNA_PKT_RATE_20K)
2972 load = BNA_LOAD_T_LOW_3;
2973 else if (pkt_rt < BNA_PKT_RATE_30K)
2974 load = BNA_LOAD_T_LOW_2;
2975 else if (pkt_rt < BNA_PKT_RATE_40K)
2976 load = BNA_LOAD_T_LOW_1;
2977 else if (pkt_rt < BNA_PKT_RATE_50K)
2978 load = BNA_LOAD_T_HIGH_1;
2979 else if (pkt_rt < BNA_PKT_RATE_60K)
2980 load = BNA_LOAD_T_HIGH_2;
2981 else if (pkt_rt < BNA_PKT_RATE_80K)
2982 load = BNA_LOAD_T_HIGH_3;
2983 else
2984 load = BNA_LOAD_T_HIGH_4;
2985
2986 if (small_rt > (large_rt << 1))
2987 bias = 0;
2988 else
2989 bias = 1;
2990
2991 ccb->pkt_rate.small_pkt_cnt = 0;
2992 ccb->pkt_rate.large_pkt_cnt = 0;
2993
2994 coalescing_timeo = bna->rx_mod.dim_vector[load][bias];
2995 ccb->rx_coalescing_timeo = coalescing_timeo;
2996
2997 /* Set it to IB */
2998 bna_ib_coalescing_timeo_set(&ccb->cq->ib, coalescing_timeo);
2999}
3000
3001const u32 bna_napi_dim_vector[BNA_LOAD_T_MAX][BNA_BIAS_T_MAX] = {
3002 {12, 12},
3003 {6, 10},
3004 {5, 10},
3005 {4, 8},
3006 {3, 6},
3007 {3, 6},
3008 {2, 4},
3009 {1, 2},
3010};
3011
1aa8b471
BH
3012/* TX */
3013
f3bd5173
RM
3014#define call_tx_stop_cbfn(tx) \
3015do { \
3016 if ((tx)->stop_cbfn) { \
3017 void (*cbfn)(void *, struct bna_tx *); \
3018 void *cbarg; \
3019 cbfn = (tx)->stop_cbfn; \
3020 cbarg = (tx)->stop_cbarg; \
3021 (tx)->stop_cbfn = NULL; \
3022 (tx)->stop_cbarg = NULL; \
3023 cbfn(cbarg, (tx)); \
3024 } \
3025} while (0)
3026
3027#define call_tx_prio_change_cbfn(tx) \
3028do { \
3029 if ((tx)->prio_change_cbfn) { \
3030 void (*cbfn)(struct bnad *, struct bna_tx *); \
3031 cbfn = (tx)->prio_change_cbfn; \
3032 (tx)->prio_change_cbfn = NULL; \
3033 cbfn((tx)->bna->bnad, (tx)); \
3034 } \
3035} while (0)
3036
3037static void bna_tx_mod_cb_tx_stopped(void *tx_mod, struct bna_tx *tx);
3038static void bna_bfi_tx_enet_start(struct bna_tx *tx);
3039static void bna_tx_enet_stop(struct bna_tx *tx);
3040
3041enum bna_tx_event {
3042 TX_E_START = 1,
3043 TX_E_STOP = 2,
3044 TX_E_FAIL = 3,
3045 TX_E_STARTED = 4,
3046 TX_E_STOPPED = 5,
3047 TX_E_PRIO_CHANGE = 6,
3048 TX_E_CLEANUP_DONE = 7,
3049 TX_E_BW_UPDATE = 8,
3050};
3051
3052bfa_fsm_state_decl(bna_tx, stopped, struct bna_tx, enum bna_tx_event);
3053bfa_fsm_state_decl(bna_tx, start_wait, struct bna_tx, enum bna_tx_event);
3054bfa_fsm_state_decl(bna_tx, started, struct bna_tx, enum bna_tx_event);
3055bfa_fsm_state_decl(bna_tx, stop_wait, struct bna_tx, enum bna_tx_event);
3056bfa_fsm_state_decl(bna_tx, cleanup_wait, struct bna_tx,
3057 enum bna_tx_event);
3058bfa_fsm_state_decl(bna_tx, prio_stop_wait, struct bna_tx,
3059 enum bna_tx_event);
3060bfa_fsm_state_decl(bna_tx, prio_cleanup_wait, struct bna_tx,
3061 enum bna_tx_event);
3062bfa_fsm_state_decl(bna_tx, failed, struct bna_tx, enum bna_tx_event);
3063bfa_fsm_state_decl(bna_tx, quiesce_wait, struct bna_tx,
3064 enum bna_tx_event);
3065
3066static void
3067bna_tx_sm_stopped_entry(struct bna_tx *tx)
3068{
3069 call_tx_stop_cbfn(tx);
3070}
3071
3072static void
3073bna_tx_sm_stopped(struct bna_tx *tx, enum bna_tx_event event)
3074{
3075 switch (event) {
3076 case TX_E_START:
3077 bfa_fsm_set_state(tx, bna_tx_sm_start_wait);
3078 break;
3079
3080 case TX_E_STOP:
3081 call_tx_stop_cbfn(tx);
3082 break;
3083
3084 case TX_E_FAIL:
3085 /* No-op */
3086 break;
3087
3088 case TX_E_PRIO_CHANGE:
3089 call_tx_prio_change_cbfn(tx);
3090 break;
3091
3092 case TX_E_BW_UPDATE:
3093 /* No-op */
3094 break;
3095
3096 default:
3097 bfa_sm_fault(event);
3098 }
3099}
3100
3101static void
3102bna_tx_sm_start_wait_entry(struct bna_tx *tx)
3103{
3104 bna_bfi_tx_enet_start(tx);
3105}
3106
3107static void
3108bna_tx_sm_start_wait(struct bna_tx *tx, enum bna_tx_event event)
3109{
3110 switch (event) {
3111 case TX_E_STOP:
3112 tx->flags &= ~(BNA_TX_F_PRIO_CHANGED | BNA_TX_F_BW_UPDATED);
3113 bfa_fsm_set_state(tx, bna_tx_sm_stop_wait);
3114 break;
3115
3116 case TX_E_FAIL:
3117 tx->flags &= ~(BNA_TX_F_PRIO_CHANGED | BNA_TX_F_BW_UPDATED);
3118 bfa_fsm_set_state(tx, bna_tx_sm_stopped);
3119 break;
3120
3121 case TX_E_STARTED:
3122 if (tx->flags & (BNA_TX_F_PRIO_CHANGED | BNA_TX_F_BW_UPDATED)) {
3123 tx->flags &= ~(BNA_TX_F_PRIO_CHANGED |
3124 BNA_TX_F_BW_UPDATED);
3125 bfa_fsm_set_state(tx, bna_tx_sm_prio_stop_wait);
3126 } else
3127 bfa_fsm_set_state(tx, bna_tx_sm_started);
3128 break;
3129
3130 case TX_E_PRIO_CHANGE:
3131 tx->flags |= BNA_TX_F_PRIO_CHANGED;
3132 break;
3133
3134 case TX_E_BW_UPDATE:
3135 tx->flags |= BNA_TX_F_BW_UPDATED;
3136 break;
3137
3138 default:
3139 bfa_sm_fault(event);
3140 }
3141}
3142
3143static void
3144bna_tx_sm_started_entry(struct bna_tx *tx)
3145{
3146 struct bna_txq *txq;
3147 struct list_head *qe;
3148 int is_regular = (tx->type == BNA_TX_T_REGULAR);
3149
3150 list_for_each(qe, &tx->txq_q) {
3151 txq = (struct bna_txq *)qe;
3152 txq->tcb->priority = txq->priority;
3153 /* Start IB */
3154 bna_ib_start(tx->bna, &txq->ib, is_regular);
3155 }
3156 tx->tx_resume_cbfn(tx->bna->bnad, tx);
3157}
3158
3159static void
3160bna_tx_sm_started(struct bna_tx *tx, enum bna_tx_event event)
3161{
3162 switch (event) {
3163 case TX_E_STOP:
3164 bfa_fsm_set_state(tx, bna_tx_sm_stop_wait);
3165 tx->tx_stall_cbfn(tx->bna->bnad, tx);
3166 bna_tx_enet_stop(tx);
3167 break;
3168
3169 case TX_E_FAIL:
3170 bfa_fsm_set_state(tx, bna_tx_sm_failed);
3171 tx->tx_stall_cbfn(tx->bna->bnad, tx);
3172 tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
3173 break;
3174
3175 case TX_E_PRIO_CHANGE:
3176 case TX_E_BW_UPDATE:
3177 bfa_fsm_set_state(tx, bna_tx_sm_prio_stop_wait);
3178 break;
3179
3180 default:
3181 bfa_sm_fault(event);
3182 }
3183}
3184
3185static void
3186bna_tx_sm_stop_wait_entry(struct bna_tx *tx)
3187{
3188}
3189
3190static void
3191bna_tx_sm_stop_wait(struct bna_tx *tx, enum bna_tx_event event)
3192{
3193 switch (event) {
3194 case TX_E_FAIL:
3195 case TX_E_STOPPED:
3196 bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait);
3197 tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
3198 break;
3199
3200 case TX_E_STARTED:
3201 /**
3202 * We are here due to start_wait -> stop_wait transition on
3203 * TX_E_STOP event
3204 */
3205 bna_tx_enet_stop(tx);
3206 break;
3207
3208 case TX_E_PRIO_CHANGE:
3209 case TX_E_BW_UPDATE:
3210 /* No-op */
3211 break;
3212
3213 default:
3214 bfa_sm_fault(event);
3215 }
3216}
3217
3218static void
3219bna_tx_sm_cleanup_wait_entry(struct bna_tx *tx)
3220{
3221}
3222
3223static void
3224bna_tx_sm_cleanup_wait(struct bna_tx *tx, enum bna_tx_event event)
3225{
3226 switch (event) {
3227 case TX_E_FAIL:
3228 case TX_E_PRIO_CHANGE:
3229 case TX_E_BW_UPDATE:
3230 /* No-op */
3231 break;
3232
3233 case TX_E_CLEANUP_DONE:
3234 bfa_fsm_set_state(tx, bna_tx_sm_stopped);
3235 break;
3236
3237 default:
3238 bfa_sm_fault(event);
3239 }
3240}
3241
3242static void
3243bna_tx_sm_prio_stop_wait_entry(struct bna_tx *tx)
3244{
3245 tx->tx_stall_cbfn(tx->bna->bnad, tx);
3246 bna_tx_enet_stop(tx);
3247}
3248
3249static void
3250bna_tx_sm_prio_stop_wait(struct bna_tx *tx, enum bna_tx_event event)
3251{
3252 switch (event) {
3253 case TX_E_STOP:
3254 bfa_fsm_set_state(tx, bna_tx_sm_stop_wait);
3255 break;
3256
3257 case TX_E_FAIL:
3258 bfa_fsm_set_state(tx, bna_tx_sm_failed);
3259 call_tx_prio_change_cbfn(tx);
3260 tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
3261 break;
3262
3263 case TX_E_STOPPED:
3264 bfa_fsm_set_state(tx, bna_tx_sm_prio_cleanup_wait);
3265 break;
3266
3267 case TX_E_PRIO_CHANGE:
3268 case TX_E_BW_UPDATE:
3269 /* No-op */
3270 break;
3271
3272 default:
3273 bfa_sm_fault(event);
3274 }
3275}
3276
3277static void
3278bna_tx_sm_prio_cleanup_wait_entry(struct bna_tx *tx)
3279{
3280 call_tx_prio_change_cbfn(tx);
3281 tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
3282}
3283
3284static void
3285bna_tx_sm_prio_cleanup_wait(struct bna_tx *tx, enum bna_tx_event event)
3286{
3287 switch (event) {
3288 case TX_E_STOP:
3289 bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait);
3290 break;
3291
3292 case TX_E_FAIL:
3293 bfa_fsm_set_state(tx, bna_tx_sm_failed);
3294 break;
3295
3296 case TX_E_PRIO_CHANGE:
3297 case TX_E_BW_UPDATE:
3298 /* No-op */
3299 break;
3300
3301 case TX_E_CLEANUP_DONE:
3302 bfa_fsm_set_state(tx, bna_tx_sm_start_wait);
3303 break;
3304
3305 default:
3306 bfa_sm_fault(event);
3307 }
3308}
3309
3310static void
3311bna_tx_sm_failed_entry(struct bna_tx *tx)
3312{
3313}
3314
3315static void
3316bna_tx_sm_failed(struct bna_tx *tx, enum bna_tx_event event)
3317{
3318 switch (event) {
3319 case TX_E_START:
3320 bfa_fsm_set_state(tx, bna_tx_sm_quiesce_wait);
3321 break;
3322
3323 case TX_E_STOP:
3324 bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait);
3325 break;
3326
3327 case TX_E_FAIL:
3328 /* No-op */
3329 break;
3330
3331 case TX_E_CLEANUP_DONE:
3332 bfa_fsm_set_state(tx, bna_tx_sm_stopped);
3333 break;
3334
3335 default:
3336 bfa_sm_fault(event);
3337 }
3338}
3339
3340static void
3341bna_tx_sm_quiesce_wait_entry(struct bna_tx *tx)
3342{
3343}
3344
3345static void
3346bna_tx_sm_quiesce_wait(struct bna_tx *tx, enum bna_tx_event event)
3347{
3348 switch (event) {
3349 case TX_E_STOP:
3350 bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait);
3351 break;
3352
3353 case TX_E_FAIL:
3354 bfa_fsm_set_state(tx, bna_tx_sm_failed);
3355 break;
3356
3357 case TX_E_CLEANUP_DONE:
3358 bfa_fsm_set_state(tx, bna_tx_sm_start_wait);
3359 break;
3360
3361 case TX_E_BW_UPDATE:
3362 /* No-op */
3363 break;
3364
3365 default:
3366 bfa_sm_fault(event);
3367 }
3368}
3369
3370static void
3371bna_bfi_tx_enet_start(struct bna_tx *tx)
3372{
3373 struct bfi_enet_tx_cfg_req *cfg_req = &tx->bfi_enet_cmd.cfg_req;
3374 struct bna_txq *txq = NULL;
3375 struct list_head *qe;
3376 int i;
3377
3378 bfi_msgq_mhdr_set(cfg_req->mh, BFI_MC_ENET,
3379 BFI_ENET_H2I_TX_CFG_SET_REQ, 0, tx->rid);
3380 cfg_req->mh.num_entries = htons(
3381 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_tx_cfg_req)));
3382
3383 cfg_req->num_queues = tx->num_txq;
3384 for (i = 0, qe = bfa_q_first(&tx->txq_q);
3385 i < tx->num_txq;
3386 i++, qe = bfa_q_next(qe)) {
3387 txq = (struct bna_txq *)qe;
3388
3389 bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].q.q, &txq->qpt);
3390 cfg_req->q_cfg[i].q.priority = txq->priority;
3391
3392 cfg_req->q_cfg[i].ib.index_addr.a32.addr_lo =
3393 txq->ib.ib_seg_host_addr.lsb;
3394 cfg_req->q_cfg[i].ib.index_addr.a32.addr_hi =
3395 txq->ib.ib_seg_host_addr.msb;
3396 cfg_req->q_cfg[i].ib.intr.msix_index =
3397 htons((u16)txq->ib.intr_vector);
3398 }
3399
3400 cfg_req->ib_cfg.int_pkt_dma = BNA_STATUS_T_ENABLED;
3401 cfg_req->ib_cfg.int_enabled = BNA_STATUS_T_ENABLED;
3402 cfg_req->ib_cfg.int_pkt_enabled = BNA_STATUS_T_DISABLED;
3403 cfg_req->ib_cfg.continuous_coalescing = BNA_STATUS_T_ENABLED;
3404 cfg_req->ib_cfg.msix = (txq->ib.intr_type == BNA_INTR_T_MSIX)
3405 ? BNA_STATUS_T_ENABLED : BNA_STATUS_T_DISABLED;
3406 cfg_req->ib_cfg.coalescing_timeout =
3407 htonl((u32)txq->ib.coalescing_timeo);
3408 cfg_req->ib_cfg.inter_pkt_timeout =
3409 htonl((u32)txq->ib.interpkt_timeo);
3410 cfg_req->ib_cfg.inter_pkt_count = (u8)txq->ib.interpkt_count;
3411
3412 cfg_req->tx_cfg.vlan_mode = BFI_ENET_TX_VLAN_WI;
3413 cfg_req->tx_cfg.vlan_id = htons((u16)tx->txf_vlan_id);
6654cf60 3414 cfg_req->tx_cfg.admit_tagged_frame = BNA_STATUS_T_ENABLED;
f3bd5173
RM
3415 cfg_req->tx_cfg.apply_vlan_filter = BNA_STATUS_T_DISABLED;
3416
3417 bfa_msgq_cmd_set(&tx->msgq_cmd, NULL, NULL,
3418 sizeof(struct bfi_enet_tx_cfg_req), &cfg_req->mh);
3419 bfa_msgq_cmd_post(&tx->bna->msgq, &tx->msgq_cmd);
3420}
3421
3422static void
3423bna_bfi_tx_enet_stop(struct bna_tx *tx)
3424{
3425 struct bfi_enet_req *req = &tx->bfi_enet_cmd.req;
3426
3427 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
3428 BFI_ENET_H2I_TX_CFG_CLR_REQ, 0, tx->rid);
3429 req->mh.num_entries = htons(
3430 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_req)));
3431 bfa_msgq_cmd_set(&tx->msgq_cmd, NULL, NULL, sizeof(struct bfi_enet_req),
3432 &req->mh);
3433 bfa_msgq_cmd_post(&tx->bna->msgq, &tx->msgq_cmd);
3434}
3435
3436static void
3437bna_tx_enet_stop(struct bna_tx *tx)
3438{
3439 struct bna_txq *txq;
3440 struct list_head *qe;
3441
3442 /* Stop IB */
3443 list_for_each(qe, &tx->txq_q) {
3444 txq = (struct bna_txq *)qe;
3445 bna_ib_stop(tx->bna, &txq->ib);
3446 }
3447
3448 bna_bfi_tx_enet_stop(tx);
3449}
3450
3451static void
3452bna_txq_qpt_setup(struct bna_txq *txq, int page_count, int page_size,
3453 struct bna_mem_descr *qpt_mem,
3454 struct bna_mem_descr *swqpt_mem,
3455 struct bna_mem_descr *page_mem)
3456{
5216562a
RM
3457 u8 *kva;
3458 u64 dma;
3459 struct bna_dma_addr bna_dma;
f3bd5173
RM
3460 int i;
3461
3462 txq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
3463 txq->qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
3464 txq->qpt.kv_qpt_ptr = qpt_mem->kva;
3465 txq->qpt.page_count = page_count;
3466 txq->qpt.page_size = page_size;
3467
3468 txq->tcb->sw_qpt = (void **) swqpt_mem->kva;
5216562a
RM
3469 txq->tcb->sw_q = page_mem->kva;
3470
3471 kva = page_mem->kva;
3472 BNA_GET_DMA_ADDR(&page_mem->dma, dma);
f3bd5173
RM
3473
3474 for (i = 0; i < page_count; i++) {
5216562a
RM
3475 txq->tcb->sw_qpt[i] = kva;
3476 kva += PAGE_SIZE;
f3bd5173 3477
5216562a 3478 BNA_SET_DMA_ADDR(dma, &bna_dma);
f3bd5173 3479 ((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].lsb =
5216562a 3480 bna_dma.lsb;
f3bd5173 3481 ((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].msb =
5216562a
RM
3482 bna_dma.msb;
3483 dma += PAGE_SIZE;
f3bd5173
RM
3484 }
3485}
3486
3487static struct bna_tx *
3488bna_tx_get(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
3489{
3490 struct list_head *qe = NULL;
3491 struct bna_tx *tx = NULL;
3492
3493 if (list_empty(&tx_mod->tx_free_q))
3494 return NULL;
3495 if (type == BNA_TX_T_REGULAR) {
3496 bfa_q_deq(&tx_mod->tx_free_q, &qe);
3497 } else {
3498 bfa_q_deq_tail(&tx_mod->tx_free_q, &qe);
3499 }
3500 tx = (struct bna_tx *)qe;
3501 bfa_q_qe_init(&tx->qe);
3502 tx->type = type;
3503
3504 return tx;
3505}
3506
3507static void
3508bna_tx_free(struct bna_tx *tx)
3509{
3510 struct bna_tx_mod *tx_mod = &tx->bna->tx_mod;
3511 struct bna_txq *txq;
3512 struct list_head *prev_qe;
3513 struct list_head *qe;
3514
3515 while (!list_empty(&tx->txq_q)) {
3516 bfa_q_deq(&tx->txq_q, &txq);
3517 bfa_q_qe_init(&txq->qe);
3518 txq->tcb = NULL;
3519 txq->tx = NULL;
3520 list_add_tail(&txq->qe, &tx_mod->txq_free_q);
3521 }
3522
3523 list_for_each(qe, &tx_mod->tx_active_q) {
3524 if (qe == &tx->qe) {
3525 list_del(&tx->qe);
3526 bfa_q_qe_init(&tx->qe);
3527 break;
3528 }
3529 }
3530
3531 tx->bna = NULL;
3532 tx->priv = NULL;
3533
3534 prev_qe = NULL;
3535 list_for_each(qe, &tx_mod->tx_free_q) {
3536 if (((struct bna_tx *)qe)->rid < tx->rid)
3537 prev_qe = qe;
3538 else {
3539 break;
3540 }
3541 }
3542
3543 if (prev_qe == NULL) {
3544 /* This is the first entry */
3545 bfa_q_enq_head(&tx_mod->tx_free_q, &tx->qe);
3546 } else if (bfa_q_next(prev_qe) == &tx_mod->tx_free_q) {
3547 /* This is the last entry */
3548 list_add_tail(&tx->qe, &tx_mod->tx_free_q);
3549 } else {
3550 /* Somewhere in the middle */
3551 bfa_q_next(&tx->qe) = bfa_q_next(prev_qe);
3552 bfa_q_prev(&tx->qe) = prev_qe;
3553 bfa_q_next(prev_qe) = &tx->qe;
3554 bfa_q_prev(bfa_q_next(&tx->qe)) = &tx->qe;
3555 }
3556}
3557
3558static void
3559bna_tx_start(struct bna_tx *tx)
3560{
3561 tx->flags |= BNA_TX_F_ENET_STARTED;
3562 if (tx->flags & BNA_TX_F_ENABLED)
3563 bfa_fsm_send_event(tx, TX_E_START);
3564}
3565
3566static void
3567bna_tx_stop(struct bna_tx *tx)
3568{
3569 tx->stop_cbfn = bna_tx_mod_cb_tx_stopped;
3570 tx->stop_cbarg = &tx->bna->tx_mod;
3571
3572 tx->flags &= ~BNA_TX_F_ENET_STARTED;
3573 bfa_fsm_send_event(tx, TX_E_STOP);
3574}
3575
3576static void
3577bna_tx_fail(struct bna_tx *tx)
3578{
3579 tx->flags &= ~BNA_TX_F_ENET_STARTED;
3580 bfa_fsm_send_event(tx, TX_E_FAIL);
3581}
3582
3583void
3584bna_bfi_tx_enet_start_rsp(struct bna_tx *tx, struct bfi_msgq_mhdr *msghdr)
3585{
3586 struct bfi_enet_tx_cfg_rsp *cfg_rsp = &tx->bfi_enet_cmd.cfg_rsp;
3587 struct bna_txq *txq = NULL;
3588 struct list_head *qe;
3589 int i;
3590
3591 bfa_msgq_rsp_copy(&tx->bna->msgq, (u8 *)cfg_rsp,
3592 sizeof(struct bfi_enet_tx_cfg_rsp));
3593
3594 tx->hw_id = cfg_rsp->hw_id;
3595
3596 for (i = 0, qe = bfa_q_first(&tx->txq_q);
3597 i < tx->num_txq; i++, qe = bfa_q_next(qe)) {
3598 txq = (struct bna_txq *)qe;
3599
3600 /* Setup doorbells */
3601 txq->tcb->i_dbell->doorbell_addr =
3602 tx->bna->pcidev.pci_bar_kva
3603 + ntohl(cfg_rsp->q_handles[i].i_dbell);
3604 txq->tcb->q_dbell =
3605 tx->bna->pcidev.pci_bar_kva
3606 + ntohl(cfg_rsp->q_handles[i].q_dbell);
3607 txq->hw_id = cfg_rsp->q_handles[i].hw_qid;
3608
3609 /* Initialize producer/consumer indexes */
3610 (*txq->tcb->hw_consumer_index) = 0;
3611 txq->tcb->producer_index = txq->tcb->consumer_index = 0;
3612 }
3613
3614 bfa_fsm_send_event(tx, TX_E_STARTED);
3615}
3616
3617void
3618bna_bfi_tx_enet_stop_rsp(struct bna_tx *tx, struct bfi_msgq_mhdr *msghdr)
3619{
3620 bfa_fsm_send_event(tx, TX_E_STOPPED);
3621}
3622
3623void
3624bna_bfi_bw_update_aen(struct bna_tx_mod *tx_mod)
3625{
3626 struct bna_tx *tx;
3627 struct list_head *qe;
3628
3629 list_for_each(qe, &tx_mod->tx_active_q) {
3630 tx = (struct bna_tx *)qe;
3631 bfa_fsm_send_event(tx, TX_E_BW_UPDATE);
3632 }
3633}
3634
3635void
3636bna_tx_res_req(int num_txq, int txq_depth, struct bna_res_info *res_info)
3637{
3638 u32 q_size;
3639 u32 page_count;
3640 struct bna_mem_info *mem_info;
3641
3642 res_info[BNA_TX_RES_MEM_T_TCB].res_type = BNA_RES_T_MEM;
3643 mem_info = &res_info[BNA_TX_RES_MEM_T_TCB].res_u.mem_info;
3644 mem_info->mem_type = BNA_MEM_T_KVA;
3645 mem_info->len = sizeof(struct bna_tcb);
3646 mem_info->num = num_txq;
3647
3648 q_size = txq_depth * BFI_TXQ_WI_SIZE;
3649 q_size = ALIGN(q_size, PAGE_SIZE);
3650 page_count = q_size >> PAGE_SHIFT;
3651
3652 res_info[BNA_TX_RES_MEM_T_QPT].res_type = BNA_RES_T_MEM;
3653 mem_info = &res_info[BNA_TX_RES_MEM_T_QPT].res_u.mem_info;
3654 mem_info->mem_type = BNA_MEM_T_DMA;
3655 mem_info->len = page_count * sizeof(struct bna_dma_addr);
3656 mem_info->num = num_txq;
3657
3658 res_info[BNA_TX_RES_MEM_T_SWQPT].res_type = BNA_RES_T_MEM;
3659 mem_info = &res_info[BNA_TX_RES_MEM_T_SWQPT].res_u.mem_info;
3660 mem_info->mem_type = BNA_MEM_T_KVA;
3661 mem_info->len = page_count * sizeof(void *);
3662 mem_info->num = num_txq;
3663
3664 res_info[BNA_TX_RES_MEM_T_PAGE].res_type = BNA_RES_T_MEM;
3665 mem_info = &res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info;
3666 mem_info->mem_type = BNA_MEM_T_DMA;
5216562a
RM
3667 mem_info->len = PAGE_SIZE * page_count;
3668 mem_info->num = num_txq;
f3bd5173
RM
3669
3670 res_info[BNA_TX_RES_MEM_T_IBIDX].res_type = BNA_RES_T_MEM;
3671 mem_info = &res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info;
3672 mem_info->mem_type = BNA_MEM_T_DMA;
3673 mem_info->len = BFI_IBIDX_SIZE;
3674 mem_info->num = num_txq;
3675
3676 res_info[BNA_TX_RES_INTR_T_TXCMPL].res_type = BNA_RES_T_INTR;
3677 res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info.intr_type =
3678 BNA_INTR_T_MSIX;
3679 res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info.num = num_txq;
3680}
3681
3682struct bna_tx *
3683bna_tx_create(struct bna *bna, struct bnad *bnad,
3684 struct bna_tx_config *tx_cfg,
d91d25d5 3685 const struct bna_tx_event_cbfn *tx_cbfn,
f3bd5173
RM
3686 struct bna_res_info *res_info, void *priv)
3687{
3688 struct bna_intr_info *intr_info;
3689 struct bna_tx_mod *tx_mod = &bna->tx_mod;
3690 struct bna_tx *tx;
3691 struct bna_txq *txq;
3692 struct list_head *qe;
3693 int page_count;
f3bd5173
RM
3694 int i;
3695
3696 intr_info = &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
5216562a
RM
3697 page_count = (res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info.len) /
3698 PAGE_SIZE;
f3bd5173
RM
3699
3700 /**
3701 * Get resources
3702 */
3703
3704 if ((intr_info->num != 1) && (intr_info->num != tx_cfg->num_txq))
3705 return NULL;
3706
3707 /* Tx */
3708
3709 tx = bna_tx_get(tx_mod, tx_cfg->tx_type);
3710 if (!tx)
3711 return NULL;
3712 tx->bna = bna;
3713 tx->priv = priv;
3714
3715 /* TxQs */
3716
3717 INIT_LIST_HEAD(&tx->txq_q);
3718 for (i = 0; i < tx_cfg->num_txq; i++) {
3719 if (list_empty(&tx_mod->txq_free_q))
3720 goto err_return;
3721
3722 bfa_q_deq(&tx_mod->txq_free_q, &txq);
3723 bfa_q_qe_init(&txq->qe);
3724 list_add_tail(&txq->qe, &tx->txq_q);
3725 txq->tx = tx;
3726 }
3727
3728 /*
3729 * Initialize
3730 */
3731
3732 /* Tx */
3733
3734 tx->tcb_setup_cbfn = tx_cbfn->tcb_setup_cbfn;
3735 tx->tcb_destroy_cbfn = tx_cbfn->tcb_destroy_cbfn;
3736 /* Following callbacks are mandatory */
3737 tx->tx_stall_cbfn = tx_cbfn->tx_stall_cbfn;
3738 tx->tx_resume_cbfn = tx_cbfn->tx_resume_cbfn;
3739 tx->tx_cleanup_cbfn = tx_cbfn->tx_cleanup_cbfn;
3740
3741 list_add_tail(&tx->qe, &tx_mod->tx_active_q);
3742
3743 tx->num_txq = tx_cfg->num_txq;
3744
3745 tx->flags = 0;
3746 if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_ENET_STARTED) {
3747 switch (tx->type) {
3748 case BNA_TX_T_REGULAR:
3749 if (!(tx->bna->tx_mod.flags &
3750 BNA_TX_MOD_F_ENET_LOOPBACK))
3751 tx->flags |= BNA_TX_F_ENET_STARTED;
3752 break;
3753 case BNA_TX_T_LOOPBACK:
3754 if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_ENET_LOOPBACK)
3755 tx->flags |= BNA_TX_F_ENET_STARTED;
3756 break;
3757 }
3758 }
3759
3760 /* TxQ */
3761
3762 i = 0;
f3bd5173
RM
3763 list_for_each(qe, &tx->txq_q) {
3764 txq = (struct bna_txq *)qe;
3765 txq->tcb = (struct bna_tcb *)
3766 res_info[BNA_TX_RES_MEM_T_TCB].res_u.mem_info.mdl[i].kva;
3767 txq->tx_packets = 0;
3768 txq->tx_bytes = 0;
3769
3770 /* IB */
3771 txq->ib.ib_seg_host_addr.lsb =
3772 res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.lsb;
3773 txq->ib.ib_seg_host_addr.msb =
3774 res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.msb;
3775 txq->ib.ib_seg_host_addr_kva =
3776 res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].kva;
3777 txq->ib.intr_type = intr_info->intr_type;
3778 txq->ib.intr_vector = (intr_info->num == 1) ?
3779 intr_info->idl[0].vector :
3780 intr_info->idl[i].vector;
3781 if (intr_info->intr_type == BNA_INTR_T_INTX)
3782 txq->ib.intr_vector = (1 << txq->ib.intr_vector);
3783 txq->ib.coalescing_timeo = tx_cfg->coalescing_timeo;
d3f92aec 3784 txq->ib.interpkt_timeo = BFI_TX_INTERPKT_TIMEO;
f3bd5173
RM
3785 txq->ib.interpkt_count = BFI_TX_INTERPKT_COUNT;
3786
3787 /* TCB */
3788
3789 txq->tcb->q_depth = tx_cfg->txq_depth;
3790 txq->tcb->unmap_q = (void *)
3791 res_info[BNA_TX_RES_MEM_T_UNMAPQ].res_u.mem_info.mdl[i].kva;
3792 txq->tcb->hw_consumer_index =
3793 (u32 *)txq->ib.ib_seg_host_addr_kva;
3794 txq->tcb->i_dbell = &txq->ib.door_bell;
3795 txq->tcb->intr_type = txq->ib.intr_type;
3796 txq->tcb->intr_vector = txq->ib.intr_vector;
3797 txq->tcb->txq = txq;
3798 txq->tcb->bnad = bnad;
3799 txq->tcb->id = i;
3800
3801 /* QPT, SWQPT, Pages */
5216562a 3802 bna_txq_qpt_setup(txq, page_count, PAGE_SIZE,
f3bd5173
RM
3803 &res_info[BNA_TX_RES_MEM_T_QPT].res_u.mem_info.mdl[i],
3804 &res_info[BNA_TX_RES_MEM_T_SWQPT].res_u.mem_info.mdl[i],
3805 &res_info[BNA_TX_RES_MEM_T_PAGE].
5216562a 3806 res_u.mem_info.mdl[i]);
f3bd5173
RM
3807
3808 /* Callback to bnad for setting up TCB */
3809 if (tx->tcb_setup_cbfn)
3810 (tx->tcb_setup_cbfn)(bna->bnad, txq->tcb);
3811
3812 if (tx_cfg->num_txq == BFI_TX_MAX_PRIO)
3813 txq->priority = txq->tcb->id;
3814 else
3815 txq->priority = tx_mod->default_prio;
3816
3817 i++;
3818 }
3819
3820 tx->txf_vlan_id = 0;
3821
3822 bfa_fsm_set_state(tx, bna_tx_sm_stopped);
3823
3824 tx_mod->rid_mask |= (1 << tx->rid);
3825
3826 return tx;
3827
3828err_return:
3829 bna_tx_free(tx);
3830 return NULL;
3831}
3832
3833void
3834bna_tx_destroy(struct bna_tx *tx)
3835{
3836 struct bna_txq *txq;
3837 struct list_head *qe;
3838
3839 list_for_each(qe, &tx->txq_q) {
3840 txq = (struct bna_txq *)qe;
3841 if (tx->tcb_destroy_cbfn)
3842 (tx->tcb_destroy_cbfn)(tx->bna->bnad, txq->tcb);
3843 }
3844
3845 tx->bna->tx_mod.rid_mask &= ~(1 << tx->rid);
3846 bna_tx_free(tx);
3847}
3848
3849void
3850bna_tx_enable(struct bna_tx *tx)
3851{
3852 if (tx->fsm != (bfa_sm_t)bna_tx_sm_stopped)
3853 return;
3854
3855 tx->flags |= BNA_TX_F_ENABLED;
3856
3857 if (tx->flags & BNA_TX_F_ENET_STARTED)
3858 bfa_fsm_send_event(tx, TX_E_START);
3859}
3860
3861void
3862bna_tx_disable(struct bna_tx *tx, enum bna_cleanup_type type,
3863 void (*cbfn)(void *, struct bna_tx *))
3864{
3865 if (type == BNA_SOFT_CLEANUP) {
3866 (*cbfn)(tx->bna->bnad, tx);
3867 return;
3868 }
3869
3870 tx->stop_cbfn = cbfn;
3871 tx->stop_cbarg = tx->bna->bnad;
3872
3873 tx->flags &= ~BNA_TX_F_ENABLED;
3874
3875 bfa_fsm_send_event(tx, TX_E_STOP);
3876}
3877
3878void
3879bna_tx_cleanup_complete(struct bna_tx *tx)
3880{
3881 bfa_fsm_send_event(tx, TX_E_CLEANUP_DONE);
3882}
3883
3884static void
3885bna_tx_mod_cb_tx_stopped(void *arg, struct bna_tx *tx)
3886{
3887 struct bna_tx_mod *tx_mod = (struct bna_tx_mod *)arg;
3888
3889 bfa_wc_down(&tx_mod->tx_stop_wc);
3890}
3891
3892static void
3893bna_tx_mod_cb_tx_stopped_all(void *arg)
3894{
3895 struct bna_tx_mod *tx_mod = (struct bna_tx_mod *)arg;
3896
3897 if (tx_mod->stop_cbfn)
3898 tx_mod->stop_cbfn(&tx_mod->bna->enet);
3899 tx_mod->stop_cbfn = NULL;
3900}
3901
3902void
3903bna_tx_mod_init(struct bna_tx_mod *tx_mod, struct bna *bna,
3904 struct bna_res_info *res_info)
3905{
3906 int i;
3907
3908 tx_mod->bna = bna;
3909 tx_mod->flags = 0;
3910
3911 tx_mod->tx = (struct bna_tx *)
3912 res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.mdl[0].kva;
3913 tx_mod->txq = (struct bna_txq *)
3914 res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.mdl[0].kva;
3915
3916 INIT_LIST_HEAD(&tx_mod->tx_free_q);
3917 INIT_LIST_HEAD(&tx_mod->tx_active_q);
3918
3919 INIT_LIST_HEAD(&tx_mod->txq_free_q);
3920
3921 for (i = 0; i < bna->ioceth.attr.num_txq; i++) {
3922 tx_mod->tx[i].rid = i;
3923 bfa_q_qe_init(&tx_mod->tx[i].qe);
3924 list_add_tail(&tx_mod->tx[i].qe, &tx_mod->tx_free_q);
3925 bfa_q_qe_init(&tx_mod->txq[i].qe);
3926 list_add_tail(&tx_mod->txq[i].qe, &tx_mod->txq_free_q);
3927 }
3928
3929 tx_mod->prio_map = BFI_TX_PRIO_MAP_ALL;
3930 tx_mod->default_prio = 0;
3931 tx_mod->iscsi_over_cee = BNA_STATUS_T_DISABLED;
3932 tx_mod->iscsi_prio = -1;
3933}
3934
3935void
3936bna_tx_mod_uninit(struct bna_tx_mod *tx_mod)
3937{
3938 struct list_head *qe;
3939 int i;
3940
3941 i = 0;
3942 list_for_each(qe, &tx_mod->tx_free_q)
3943 i++;
3944
3945 i = 0;
3946 list_for_each(qe, &tx_mod->txq_free_q)
3947 i++;
3948
3949 tx_mod->bna = NULL;
3950}
3951
3952void
3953bna_tx_mod_start(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
3954{
3955 struct bna_tx *tx;
3956 struct list_head *qe;
3957
3958 tx_mod->flags |= BNA_TX_MOD_F_ENET_STARTED;
3959 if (type == BNA_TX_T_LOOPBACK)
3960 tx_mod->flags |= BNA_TX_MOD_F_ENET_LOOPBACK;
3961
3962 list_for_each(qe, &tx_mod->tx_active_q) {
3963 tx = (struct bna_tx *)qe;
3964 if (tx->type == type)
3965 bna_tx_start(tx);
3966 }
3967}
3968
3969void
3970bna_tx_mod_stop(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
3971{
3972 struct bna_tx *tx;
3973 struct list_head *qe;
3974
3975 tx_mod->flags &= ~BNA_TX_MOD_F_ENET_STARTED;
3976 tx_mod->flags &= ~BNA_TX_MOD_F_ENET_LOOPBACK;
3977
3978 tx_mod->stop_cbfn = bna_enet_cb_tx_stopped;
3979
3980 bfa_wc_init(&tx_mod->tx_stop_wc, bna_tx_mod_cb_tx_stopped_all, tx_mod);
3981
3982 list_for_each(qe, &tx_mod->tx_active_q) {
3983 tx = (struct bna_tx *)qe;
3984 if (tx->type == type) {
3985 bfa_wc_up(&tx_mod->tx_stop_wc);
3986 bna_tx_stop(tx);
3987 }
3988 }
3989
3990 bfa_wc_wait(&tx_mod->tx_stop_wc);
3991}
3992
3993void
3994bna_tx_mod_fail(struct bna_tx_mod *tx_mod)
3995{
3996 struct bna_tx *tx;
3997 struct list_head *qe;
3998
3999 tx_mod->flags &= ~BNA_TX_MOD_F_ENET_STARTED;
4000 tx_mod->flags &= ~BNA_TX_MOD_F_ENET_LOOPBACK;
4001
4002 list_for_each(qe, &tx_mod->tx_active_q) {
4003 tx = (struct bna_tx *)qe;
4004 bna_tx_fail(tx);
4005 }
4006}
4007
4008void
4009bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo)
4010{
4011 struct bna_txq *txq;
4012 struct list_head *qe;
4013
4014 list_for_each(qe, &tx->txq_q) {
4015 txq = (struct bna_txq *)qe;
4016 bna_ib_coalescing_timeo_set(&txq->ib, coalescing_timeo);
4017 }
4018}