]>
Commit | Line | Data |
---|---|---|
f3bd5173 RM |
1 | /* |
2 | * Linux network driver for Brocade Converged Network Adapter. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify it | |
5 | * under the terms of the GNU General Public License (GPL) Version 2 as | |
6 | * published by the Free Software Foundation | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, but | |
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
11 | * General Public License for more details. | |
12 | */ | |
13 | /* | |
14 | * Copyright (c) 2005-2011 Brocade Communications Systems, Inc. | |
15 | * All rights reserved | |
16 | * www.brocade.com | |
17 | */ | |
18 | #include "bna.h" | |
19 | #include "bfi.h" | |
20 | ||
1aa8b471 | 21 | /* IB */ |
f3bd5173 RM |
22 | static void |
23 | bna_ib_coalescing_timeo_set(struct bna_ib *ib, u8 coalescing_timeo) | |
24 | { | |
25 | ib->coalescing_timeo = coalescing_timeo; | |
26 | ib->door_bell.doorbell_ack = BNA_DOORBELL_IB_INT_ACK( | |
27 | (u32)ib->coalescing_timeo, 0); | |
28 | } | |
29 | ||
1aa8b471 | 30 | /* RXF */ |
f3bd5173 RM |
31 | |
32 | #define bna_rxf_vlan_cfg_soft_reset(rxf) \ | |
33 | do { \ | |
34 | (rxf)->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL; \ | |
35 | (rxf)->vlan_strip_pending = true; \ | |
36 | } while (0) | |
37 | ||
38 | #define bna_rxf_rss_cfg_soft_reset(rxf) \ | |
39 | do { \ | |
40 | if ((rxf)->rss_status == BNA_STATUS_T_ENABLED) \ | |
41 | (rxf)->rss_pending = (BNA_RSS_F_RIT_PENDING | \ | |
42 | BNA_RSS_F_CFG_PENDING | \ | |
43 | BNA_RSS_F_STATUS_PENDING); \ | |
44 | } while (0) | |
45 | ||
46 | static int bna_rxf_cfg_apply(struct bna_rxf *rxf); | |
47 | static void bna_rxf_cfg_reset(struct bna_rxf *rxf); | |
48 | static int bna_rxf_fltr_clear(struct bna_rxf *rxf); | |
49 | static int bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf); | |
50 | static int bna_rxf_promisc_cfg_apply(struct bna_rxf *rxf); | |
51 | static int bna_rxf_allmulti_cfg_apply(struct bna_rxf *rxf); | |
52 | static int bna_rxf_vlan_strip_cfg_apply(struct bna_rxf *rxf); | |
53 | static int bna_rxf_ucast_cfg_reset(struct bna_rxf *rxf, | |
54 | enum bna_cleanup_type cleanup); | |
55 | static int bna_rxf_promisc_cfg_reset(struct bna_rxf *rxf, | |
56 | enum bna_cleanup_type cleanup); | |
57 | static int bna_rxf_allmulti_cfg_reset(struct bna_rxf *rxf, | |
58 | enum bna_cleanup_type cleanup); | |
59 | ||
60 | bfa_fsm_state_decl(bna_rxf, stopped, struct bna_rxf, | |
61 | enum bna_rxf_event); | |
62 | bfa_fsm_state_decl(bna_rxf, paused, struct bna_rxf, | |
63 | enum bna_rxf_event); | |
64 | bfa_fsm_state_decl(bna_rxf, cfg_wait, struct bna_rxf, | |
65 | enum bna_rxf_event); | |
66 | bfa_fsm_state_decl(bna_rxf, started, struct bna_rxf, | |
67 | enum bna_rxf_event); | |
68 | bfa_fsm_state_decl(bna_rxf, fltr_clr_wait, struct bna_rxf, | |
69 | enum bna_rxf_event); | |
70 | bfa_fsm_state_decl(bna_rxf, last_resp_wait, struct bna_rxf, | |
71 | enum bna_rxf_event); | |
72 | ||
73 | static void | |
74 | bna_rxf_sm_stopped_entry(struct bna_rxf *rxf) | |
75 | { | |
76 | call_rxf_stop_cbfn(rxf); | |
77 | } | |
78 | ||
79 | static void | |
80 | bna_rxf_sm_stopped(struct bna_rxf *rxf, enum bna_rxf_event event) | |
81 | { | |
82 | switch (event) { | |
83 | case RXF_E_START: | |
84 | if (rxf->flags & BNA_RXF_F_PAUSED) { | |
85 | bfa_fsm_set_state(rxf, bna_rxf_sm_paused); | |
86 | call_rxf_start_cbfn(rxf); | |
87 | } else | |
88 | bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait); | |
89 | break; | |
90 | ||
91 | case RXF_E_STOP: | |
92 | call_rxf_stop_cbfn(rxf); | |
93 | break; | |
94 | ||
95 | case RXF_E_FAIL: | |
96 | /* No-op */ | |
97 | break; | |
98 | ||
99 | case RXF_E_CONFIG: | |
100 | call_rxf_cam_fltr_cbfn(rxf); | |
101 | break; | |
102 | ||
103 | case RXF_E_PAUSE: | |
104 | rxf->flags |= BNA_RXF_F_PAUSED; | |
105 | call_rxf_pause_cbfn(rxf); | |
106 | break; | |
107 | ||
108 | case RXF_E_RESUME: | |
109 | rxf->flags &= ~BNA_RXF_F_PAUSED; | |
110 | call_rxf_resume_cbfn(rxf); | |
111 | break; | |
112 | ||
113 | default: | |
114 | bfa_sm_fault(event); | |
115 | } | |
116 | } | |
117 | ||
118 | static void | |
119 | bna_rxf_sm_paused_entry(struct bna_rxf *rxf) | |
120 | { | |
121 | call_rxf_pause_cbfn(rxf); | |
122 | } | |
123 | ||
124 | static void | |
125 | bna_rxf_sm_paused(struct bna_rxf *rxf, enum bna_rxf_event event) | |
126 | { | |
127 | switch (event) { | |
128 | case RXF_E_STOP: | |
129 | case RXF_E_FAIL: | |
130 | bfa_fsm_set_state(rxf, bna_rxf_sm_stopped); | |
131 | break; | |
132 | ||
133 | case RXF_E_CONFIG: | |
134 | call_rxf_cam_fltr_cbfn(rxf); | |
135 | break; | |
136 | ||
137 | case RXF_E_RESUME: | |
138 | rxf->flags &= ~BNA_RXF_F_PAUSED; | |
139 | bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait); | |
140 | break; | |
141 | ||
142 | default: | |
143 | bfa_sm_fault(event); | |
144 | } | |
145 | } | |
146 | ||
147 | static void | |
148 | bna_rxf_sm_cfg_wait_entry(struct bna_rxf *rxf) | |
149 | { | |
150 | if (!bna_rxf_cfg_apply(rxf)) { | |
151 | /* No more pending config updates */ | |
152 | bfa_fsm_set_state(rxf, bna_rxf_sm_started); | |
153 | } | |
154 | } | |
155 | ||
156 | static void | |
157 | bna_rxf_sm_cfg_wait(struct bna_rxf *rxf, enum bna_rxf_event event) | |
158 | { | |
159 | switch (event) { | |
160 | case RXF_E_STOP: | |
161 | bfa_fsm_set_state(rxf, bna_rxf_sm_last_resp_wait); | |
162 | break; | |
163 | ||
164 | case RXF_E_FAIL: | |
165 | bna_rxf_cfg_reset(rxf); | |
166 | call_rxf_start_cbfn(rxf); | |
167 | call_rxf_cam_fltr_cbfn(rxf); | |
168 | call_rxf_resume_cbfn(rxf); | |
169 | bfa_fsm_set_state(rxf, bna_rxf_sm_stopped); | |
170 | break; | |
171 | ||
172 | case RXF_E_CONFIG: | |
173 | /* No-op */ | |
174 | break; | |
175 | ||
176 | case RXF_E_PAUSE: | |
177 | rxf->flags |= BNA_RXF_F_PAUSED; | |
178 | call_rxf_start_cbfn(rxf); | |
179 | bfa_fsm_set_state(rxf, bna_rxf_sm_fltr_clr_wait); | |
180 | break; | |
181 | ||
182 | case RXF_E_FW_RESP: | |
183 | if (!bna_rxf_cfg_apply(rxf)) { | |
184 | /* No more pending config updates */ | |
185 | bfa_fsm_set_state(rxf, bna_rxf_sm_started); | |
186 | } | |
187 | break; | |
188 | ||
189 | default: | |
190 | bfa_sm_fault(event); | |
191 | } | |
192 | } | |
193 | ||
194 | static void | |
195 | bna_rxf_sm_started_entry(struct bna_rxf *rxf) | |
196 | { | |
197 | call_rxf_start_cbfn(rxf); | |
198 | call_rxf_cam_fltr_cbfn(rxf); | |
199 | call_rxf_resume_cbfn(rxf); | |
200 | } | |
201 | ||
202 | static void | |
203 | bna_rxf_sm_started(struct bna_rxf *rxf, enum bna_rxf_event event) | |
204 | { | |
205 | switch (event) { | |
206 | case RXF_E_STOP: | |
207 | case RXF_E_FAIL: | |
208 | bna_rxf_cfg_reset(rxf); | |
209 | bfa_fsm_set_state(rxf, bna_rxf_sm_stopped); | |
210 | break; | |
211 | ||
212 | case RXF_E_CONFIG: | |
213 | bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait); | |
214 | break; | |
215 | ||
216 | case RXF_E_PAUSE: | |
217 | rxf->flags |= BNA_RXF_F_PAUSED; | |
218 | if (!bna_rxf_fltr_clear(rxf)) | |
219 | bfa_fsm_set_state(rxf, bna_rxf_sm_paused); | |
220 | else | |
221 | bfa_fsm_set_state(rxf, bna_rxf_sm_fltr_clr_wait); | |
222 | break; | |
223 | ||
224 | default: | |
225 | bfa_sm_fault(event); | |
226 | } | |
227 | } | |
228 | ||
229 | static void | |
230 | bna_rxf_sm_fltr_clr_wait_entry(struct bna_rxf *rxf) | |
231 | { | |
232 | } | |
233 | ||
234 | static void | |
235 | bna_rxf_sm_fltr_clr_wait(struct bna_rxf *rxf, enum bna_rxf_event event) | |
236 | { | |
237 | switch (event) { | |
238 | case RXF_E_FAIL: | |
239 | bna_rxf_cfg_reset(rxf); | |
240 | call_rxf_pause_cbfn(rxf); | |
241 | bfa_fsm_set_state(rxf, bna_rxf_sm_stopped); | |
242 | break; | |
243 | ||
244 | case RXF_E_FW_RESP: | |
245 | if (!bna_rxf_fltr_clear(rxf)) { | |
246 | /* No more pending CAM entries to clear */ | |
247 | bfa_fsm_set_state(rxf, bna_rxf_sm_paused); | |
248 | } | |
249 | break; | |
250 | ||
251 | default: | |
252 | bfa_sm_fault(event); | |
253 | } | |
254 | } | |
255 | ||
256 | static void | |
257 | bna_rxf_sm_last_resp_wait_entry(struct bna_rxf *rxf) | |
258 | { | |
259 | } | |
260 | ||
261 | static void | |
262 | bna_rxf_sm_last_resp_wait(struct bna_rxf *rxf, enum bna_rxf_event event) | |
263 | { | |
264 | switch (event) { | |
265 | case RXF_E_FAIL: | |
266 | case RXF_E_FW_RESP: | |
267 | bna_rxf_cfg_reset(rxf); | |
268 | bfa_fsm_set_state(rxf, bna_rxf_sm_stopped); | |
269 | break; | |
270 | ||
271 | default: | |
272 | bfa_sm_fault(event); | |
273 | } | |
274 | } | |
275 | ||
276 | static void | |
277 | bna_bfi_ucast_req(struct bna_rxf *rxf, struct bna_mac *mac, | |
278 | enum bfi_enet_h2i_msgs req_type) | |
279 | { | |
280 | struct bfi_enet_ucast_req *req = &rxf->bfi_enet_cmd.ucast_req; | |
281 | ||
282 | bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, req_type, 0, rxf->rx->rid); | |
283 | req->mh.num_entries = htons( | |
284 | bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_ucast_req))); | |
285 | memcpy(&req->mac_addr, &mac->addr, sizeof(mac_t)); | |
286 | bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL, | |
287 | sizeof(struct bfi_enet_ucast_req), &req->mh); | |
288 | bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd); | |
289 | } | |
290 | ||
291 | static void | |
292 | bna_bfi_mcast_add_req(struct bna_rxf *rxf, struct bna_mac *mac) | |
293 | { | |
294 | struct bfi_enet_mcast_add_req *req = | |
295 | &rxf->bfi_enet_cmd.mcast_add_req; | |
296 | ||
297 | bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, BFI_ENET_H2I_MAC_MCAST_ADD_REQ, | |
298 | 0, rxf->rx->rid); | |
299 | req->mh.num_entries = htons( | |
300 | bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_mcast_add_req))); | |
301 | memcpy(&req->mac_addr, &mac->addr, sizeof(mac_t)); | |
302 | bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL, | |
303 | sizeof(struct bfi_enet_mcast_add_req), &req->mh); | |
304 | bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd); | |
305 | } | |
306 | ||
307 | static void | |
308 | bna_bfi_mcast_del_req(struct bna_rxf *rxf, u16 handle) | |
309 | { | |
310 | struct bfi_enet_mcast_del_req *req = | |
311 | &rxf->bfi_enet_cmd.mcast_del_req; | |
312 | ||
313 | bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, BFI_ENET_H2I_MAC_MCAST_DEL_REQ, | |
314 | 0, rxf->rx->rid); | |
315 | req->mh.num_entries = htons( | |
316 | bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_mcast_del_req))); | |
317 | req->handle = htons(handle); | |
318 | bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL, | |
319 | sizeof(struct bfi_enet_mcast_del_req), &req->mh); | |
320 | bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd); | |
321 | } | |
322 | ||
323 | static void | |
324 | bna_bfi_mcast_filter_req(struct bna_rxf *rxf, enum bna_status status) | |
325 | { | |
326 | struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req; | |
327 | ||
328 | bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, | |
329 | BFI_ENET_H2I_MAC_MCAST_FILTER_REQ, 0, rxf->rx->rid); | |
330 | req->mh.num_entries = htons( | |
331 | bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req))); | |
332 | req->enable = status; | |
333 | bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL, | |
334 | sizeof(struct bfi_enet_enable_req), &req->mh); | |
335 | bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd); | |
336 | } | |
337 | ||
338 | static void | |
339 | bna_bfi_rx_promisc_req(struct bna_rxf *rxf, enum bna_status status) | |
340 | { | |
341 | struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req; | |
342 | ||
343 | bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, | |
344 | BFI_ENET_H2I_RX_PROMISCUOUS_REQ, 0, rxf->rx->rid); | |
345 | req->mh.num_entries = htons( | |
346 | bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req))); | |
347 | req->enable = status; | |
348 | bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL, | |
349 | sizeof(struct bfi_enet_enable_req), &req->mh); | |
350 | bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd); | |
351 | } | |
352 | ||
353 | static void | |
354 | bna_bfi_rx_vlan_filter_set(struct bna_rxf *rxf, u8 block_idx) | |
355 | { | |
356 | struct bfi_enet_rx_vlan_req *req = &rxf->bfi_enet_cmd.vlan_req; | |
357 | int i; | |
358 | int j; | |
359 | ||
360 | bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, | |
361 | BFI_ENET_H2I_RX_VLAN_SET_REQ, 0, rxf->rx->rid); | |
362 | req->mh.num_entries = htons( | |
363 | bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rx_vlan_req))); | |
364 | req->block_idx = block_idx; | |
365 | for (i = 0; i < (BFI_ENET_VLAN_BLOCK_SIZE / 32); i++) { | |
366 | j = (block_idx * (BFI_ENET_VLAN_BLOCK_SIZE / 32)) + i; | |
367 | if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) | |
368 | req->bit_mask[i] = | |
369 | htonl(rxf->vlan_filter_table[j]); | |
370 | else | |
371 | req->bit_mask[i] = 0xFFFFFFFF; | |
372 | } | |
373 | bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL, | |
374 | sizeof(struct bfi_enet_rx_vlan_req), &req->mh); | |
375 | bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd); | |
376 | } | |
377 | ||
378 | static void | |
379 | bna_bfi_vlan_strip_enable(struct bna_rxf *rxf) | |
380 | { | |
381 | struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req; | |
382 | ||
383 | bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, | |
384 | BFI_ENET_H2I_RX_VLAN_STRIP_ENABLE_REQ, 0, rxf->rx->rid); | |
385 | req->mh.num_entries = htons( | |
386 | bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req))); | |
387 | req->enable = rxf->vlan_strip_status; | |
388 | bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL, | |
389 | sizeof(struct bfi_enet_enable_req), &req->mh); | |
390 | bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd); | |
391 | } | |
392 | ||
393 | static void | |
394 | bna_bfi_rit_cfg(struct bna_rxf *rxf) | |
395 | { | |
396 | struct bfi_enet_rit_req *req = &rxf->bfi_enet_cmd.rit_req; | |
397 | ||
398 | bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, | |
399 | BFI_ENET_H2I_RIT_CFG_REQ, 0, rxf->rx->rid); | |
400 | req->mh.num_entries = htons( | |
401 | bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rit_req))); | |
402 | req->size = htons(rxf->rit_size); | |
403 | memcpy(&req->table[0], rxf->rit, rxf->rit_size); | |
404 | bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL, | |
405 | sizeof(struct bfi_enet_rit_req), &req->mh); | |
406 | bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd); | |
407 | } | |
408 | ||
409 | static void | |
410 | bna_bfi_rss_cfg(struct bna_rxf *rxf) | |
411 | { | |
412 | struct bfi_enet_rss_cfg_req *req = &rxf->bfi_enet_cmd.rss_req; | |
413 | int i; | |
414 | ||
415 | bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, | |
416 | BFI_ENET_H2I_RSS_CFG_REQ, 0, rxf->rx->rid); | |
417 | req->mh.num_entries = htons( | |
418 | bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rss_cfg_req))); | |
419 | req->cfg.type = rxf->rss_cfg.hash_type; | |
420 | req->cfg.mask = rxf->rss_cfg.hash_mask; | |
421 | for (i = 0; i < BFI_ENET_RSS_KEY_LEN; i++) | |
422 | req->cfg.key[i] = | |
423 | htonl(rxf->rss_cfg.toeplitz_hash_key[i]); | |
424 | bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL, | |
425 | sizeof(struct bfi_enet_rss_cfg_req), &req->mh); | |
426 | bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd); | |
427 | } | |
428 | ||
429 | static void | |
430 | bna_bfi_rss_enable(struct bna_rxf *rxf) | |
431 | { | |
432 | struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req; | |
433 | ||
434 | bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, | |
435 | BFI_ENET_H2I_RSS_ENABLE_REQ, 0, rxf->rx->rid); | |
436 | req->mh.num_entries = htons( | |
437 | bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req))); | |
438 | req->enable = rxf->rss_status; | |
439 | bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL, | |
440 | sizeof(struct bfi_enet_enable_req), &req->mh); | |
441 | bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd); | |
442 | } | |
443 | ||
444 | /* This function gets the multicast MAC that has already been added to CAM */ | |
445 | static struct bna_mac * | |
446 | bna_rxf_mcmac_get(struct bna_rxf *rxf, u8 *mac_addr) | |
447 | { | |
448 | struct bna_mac *mac; | |
449 | struct list_head *qe; | |
450 | ||
451 | list_for_each(qe, &rxf->mcast_active_q) { | |
452 | mac = (struct bna_mac *)qe; | |
453 | if (BNA_MAC_IS_EQUAL(&mac->addr, mac_addr)) | |
454 | return mac; | |
455 | } | |
456 | ||
457 | list_for_each(qe, &rxf->mcast_pending_del_q) { | |
458 | mac = (struct bna_mac *)qe; | |
459 | if (BNA_MAC_IS_EQUAL(&mac->addr, mac_addr)) | |
460 | return mac; | |
461 | } | |
462 | ||
463 | return NULL; | |
464 | } | |
465 | ||
466 | static struct bna_mcam_handle * | |
467 | bna_rxf_mchandle_get(struct bna_rxf *rxf, int handle) | |
468 | { | |
469 | struct bna_mcam_handle *mchandle; | |
470 | struct list_head *qe; | |
471 | ||
472 | list_for_each(qe, &rxf->mcast_handle_q) { | |
473 | mchandle = (struct bna_mcam_handle *)qe; | |
474 | if (mchandle->handle == handle) | |
475 | return mchandle; | |
476 | } | |
477 | ||
478 | return NULL; | |
479 | } | |
480 | ||
481 | static void | |
482 | bna_rxf_mchandle_attach(struct bna_rxf *rxf, u8 *mac_addr, int handle) | |
483 | { | |
484 | struct bna_mac *mcmac; | |
485 | struct bna_mcam_handle *mchandle; | |
486 | ||
487 | mcmac = bna_rxf_mcmac_get(rxf, mac_addr); | |
488 | mchandle = bna_rxf_mchandle_get(rxf, handle); | |
489 | if (mchandle == NULL) { | |
490 | mchandle = bna_mcam_mod_handle_get(&rxf->rx->bna->mcam_mod); | |
491 | mchandle->handle = handle; | |
492 | mchandle->refcnt = 0; | |
493 | list_add_tail(&mchandle->qe, &rxf->mcast_handle_q); | |
494 | } | |
495 | mchandle->refcnt++; | |
496 | mcmac->handle = mchandle; | |
497 | } | |
498 | ||
499 | static int | |
500 | bna_rxf_mcast_del(struct bna_rxf *rxf, struct bna_mac *mac, | |
501 | enum bna_cleanup_type cleanup) | |
502 | { | |
503 | struct bna_mcam_handle *mchandle; | |
504 | int ret = 0; | |
505 | ||
506 | mchandle = mac->handle; | |
507 | if (mchandle == NULL) | |
508 | return ret; | |
509 | ||
510 | mchandle->refcnt--; | |
511 | if (mchandle->refcnt == 0) { | |
512 | if (cleanup == BNA_HARD_CLEANUP) { | |
513 | bna_bfi_mcast_del_req(rxf, mchandle->handle); | |
514 | ret = 1; | |
515 | } | |
516 | list_del(&mchandle->qe); | |
517 | bfa_q_qe_init(&mchandle->qe); | |
518 | bna_mcam_mod_handle_put(&rxf->rx->bna->mcam_mod, mchandle); | |
519 | } | |
520 | mac->handle = NULL; | |
521 | ||
522 | return ret; | |
523 | } | |
524 | ||
525 | static int | |
526 | bna_rxf_mcast_cfg_apply(struct bna_rxf *rxf) | |
527 | { | |
528 | struct bna_mac *mac = NULL; | |
529 | struct list_head *qe; | |
530 | int ret; | |
531 | ||
20b298f5 | 532 | /* First delete multicast entries to maintain the count */ |
f3bd5173 RM |
533 | while (!list_empty(&rxf->mcast_pending_del_q)) { |
534 | bfa_q_deq(&rxf->mcast_pending_del_q, &qe); | |
535 | bfa_q_qe_init(qe); | |
536 | mac = (struct bna_mac *)qe; | |
537 | ret = bna_rxf_mcast_del(rxf, mac, BNA_HARD_CLEANUP); | |
20b298f5 | 538 | bna_cam_mod_mac_put(bna_mcam_mod_del_q(rxf->rx->bna), mac); |
f3bd5173 RM |
539 | if (ret) |
540 | return ret; | |
541 | } | |
542 | ||
543 | /* Add multicast entries */ | |
544 | if (!list_empty(&rxf->mcast_pending_add_q)) { | |
545 | bfa_q_deq(&rxf->mcast_pending_add_q, &qe); | |
546 | bfa_q_qe_init(qe); | |
547 | mac = (struct bna_mac *)qe; | |
548 | list_add_tail(&mac->qe, &rxf->mcast_active_q); | |
549 | bna_bfi_mcast_add_req(rxf, mac); | |
550 | return 1; | |
551 | } | |
552 | ||
553 | return 0; | |
554 | } | |
555 | ||
556 | static int | |
557 | bna_rxf_vlan_cfg_apply(struct bna_rxf *rxf) | |
558 | { | |
559 | u8 vlan_pending_bitmask; | |
560 | int block_idx = 0; | |
561 | ||
562 | if (rxf->vlan_pending_bitmask) { | |
563 | vlan_pending_bitmask = rxf->vlan_pending_bitmask; | |
564 | while (!(vlan_pending_bitmask & 0x1)) { | |
565 | block_idx++; | |
566 | vlan_pending_bitmask >>= 1; | |
567 | } | |
568 | rxf->vlan_pending_bitmask &= ~(1 << block_idx); | |
569 | bna_bfi_rx_vlan_filter_set(rxf, block_idx); | |
570 | return 1; | |
571 | } | |
572 | ||
573 | return 0; | |
574 | } | |
575 | ||
576 | static int | |
577 | bna_rxf_mcast_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup) | |
578 | { | |
579 | struct list_head *qe; | |
580 | struct bna_mac *mac; | |
581 | int ret; | |
582 | ||
583 | /* Throw away delete pending mcast entries */ | |
584 | while (!list_empty(&rxf->mcast_pending_del_q)) { | |
585 | bfa_q_deq(&rxf->mcast_pending_del_q, &qe); | |
586 | bfa_q_qe_init(qe); | |
587 | mac = (struct bna_mac *)qe; | |
588 | ret = bna_rxf_mcast_del(rxf, mac, cleanup); | |
20b298f5 | 589 | bna_cam_mod_mac_put(bna_mcam_mod_del_q(rxf->rx->bna), mac); |
f3bd5173 RM |
590 | if (ret) |
591 | return ret; | |
592 | } | |
593 | ||
594 | /* Move active mcast entries to pending_add_q */ | |
595 | while (!list_empty(&rxf->mcast_active_q)) { | |
596 | bfa_q_deq(&rxf->mcast_active_q, &qe); | |
597 | bfa_q_qe_init(qe); | |
598 | list_add_tail(qe, &rxf->mcast_pending_add_q); | |
599 | mac = (struct bna_mac *)qe; | |
600 | if (bna_rxf_mcast_del(rxf, mac, cleanup)) | |
601 | return 1; | |
602 | } | |
603 | ||
604 | return 0; | |
605 | } | |
606 | ||
607 | static int | |
608 | bna_rxf_rss_cfg_apply(struct bna_rxf *rxf) | |
609 | { | |
610 | if (rxf->rss_pending) { | |
611 | if (rxf->rss_pending & BNA_RSS_F_RIT_PENDING) { | |
612 | rxf->rss_pending &= ~BNA_RSS_F_RIT_PENDING; | |
613 | bna_bfi_rit_cfg(rxf); | |
614 | return 1; | |
615 | } | |
616 | ||
617 | if (rxf->rss_pending & BNA_RSS_F_CFG_PENDING) { | |
618 | rxf->rss_pending &= ~BNA_RSS_F_CFG_PENDING; | |
619 | bna_bfi_rss_cfg(rxf); | |
620 | return 1; | |
621 | } | |
622 | ||
623 | if (rxf->rss_pending & BNA_RSS_F_STATUS_PENDING) { | |
624 | rxf->rss_pending &= ~BNA_RSS_F_STATUS_PENDING; | |
625 | bna_bfi_rss_enable(rxf); | |
626 | return 1; | |
627 | } | |
628 | } | |
629 | ||
630 | return 0; | |
631 | } | |
632 | ||
633 | static int | |
634 | bna_rxf_cfg_apply(struct bna_rxf *rxf) | |
635 | { | |
636 | if (bna_rxf_ucast_cfg_apply(rxf)) | |
637 | return 1; | |
638 | ||
639 | if (bna_rxf_mcast_cfg_apply(rxf)) | |
640 | return 1; | |
641 | ||
642 | if (bna_rxf_promisc_cfg_apply(rxf)) | |
643 | return 1; | |
644 | ||
645 | if (bna_rxf_allmulti_cfg_apply(rxf)) | |
646 | return 1; | |
647 | ||
648 | if (bna_rxf_vlan_cfg_apply(rxf)) | |
649 | return 1; | |
650 | ||
651 | if (bna_rxf_vlan_strip_cfg_apply(rxf)) | |
652 | return 1; | |
653 | ||
654 | if (bna_rxf_rss_cfg_apply(rxf)) | |
655 | return 1; | |
656 | ||
657 | return 0; | |
658 | } | |
659 | ||
660 | /* Only software reset */ | |
661 | static int | |
662 | bna_rxf_fltr_clear(struct bna_rxf *rxf) | |
663 | { | |
664 | if (bna_rxf_ucast_cfg_reset(rxf, BNA_HARD_CLEANUP)) | |
665 | return 1; | |
666 | ||
667 | if (bna_rxf_mcast_cfg_reset(rxf, BNA_HARD_CLEANUP)) | |
668 | return 1; | |
669 | ||
670 | if (bna_rxf_promisc_cfg_reset(rxf, BNA_HARD_CLEANUP)) | |
671 | return 1; | |
672 | ||
673 | if (bna_rxf_allmulti_cfg_reset(rxf, BNA_HARD_CLEANUP)) | |
674 | return 1; | |
675 | ||
676 | return 0; | |
677 | } | |
678 | ||
679 | static void | |
680 | bna_rxf_cfg_reset(struct bna_rxf *rxf) | |
681 | { | |
682 | bna_rxf_ucast_cfg_reset(rxf, BNA_SOFT_CLEANUP); | |
683 | bna_rxf_mcast_cfg_reset(rxf, BNA_SOFT_CLEANUP); | |
684 | bna_rxf_promisc_cfg_reset(rxf, BNA_SOFT_CLEANUP); | |
685 | bna_rxf_allmulti_cfg_reset(rxf, BNA_SOFT_CLEANUP); | |
686 | bna_rxf_vlan_cfg_soft_reset(rxf); | |
687 | bna_rxf_rss_cfg_soft_reset(rxf); | |
688 | } | |
689 | ||
690 | static void | |
691 | bna_rit_init(struct bna_rxf *rxf, int rit_size) | |
692 | { | |
693 | struct bna_rx *rx = rxf->rx; | |
694 | struct bna_rxp *rxp; | |
695 | struct list_head *qe; | |
696 | int offset = 0; | |
697 | ||
698 | rxf->rit_size = rit_size; | |
699 | list_for_each(qe, &rx->rxp_q) { | |
700 | rxp = (struct bna_rxp *)qe; | |
701 | rxf->rit[offset] = rxp->cq.ccb->id; | |
702 | offset++; | |
703 | } | |
704 | ||
705 | } | |
706 | ||
707 | void | |
708 | bna_bfi_rxf_cfg_rsp(struct bna_rxf *rxf, struct bfi_msgq_mhdr *msghdr) | |
709 | { | |
710 | bfa_fsm_send_event(rxf, RXF_E_FW_RESP); | |
711 | } | |
712 | ||
f489a4ba RM |
713 | void |
714 | bna_bfi_rxf_ucast_set_rsp(struct bna_rxf *rxf, | |
715 | struct bfi_msgq_mhdr *msghdr) | |
716 | { | |
717 | struct bfi_enet_rsp *rsp = | |
17b6f244 | 718 | container_of(msghdr, struct bfi_enet_rsp, mh); |
f489a4ba RM |
719 | |
720 | if (rsp->error) { | |
721 | /* Clear ucast from cache */ | |
722 | rxf->ucast_active_set = 0; | |
723 | } | |
724 | ||
725 | bfa_fsm_send_event(rxf, RXF_E_FW_RESP); | |
726 | } | |
727 | ||
f3bd5173 RM |
728 | void |
729 | bna_bfi_rxf_mcast_add_rsp(struct bna_rxf *rxf, | |
730 | struct bfi_msgq_mhdr *msghdr) | |
731 | { | |
732 | struct bfi_enet_mcast_add_req *req = | |
733 | &rxf->bfi_enet_cmd.mcast_add_req; | |
734 | struct bfi_enet_mcast_add_rsp *rsp = | |
17b6f244 | 735 | container_of(msghdr, struct bfi_enet_mcast_add_rsp, mh); |
f3bd5173 RM |
736 | |
737 | bna_rxf_mchandle_attach(rxf, (u8 *)&req->mac_addr, | |
738 | ntohs(rsp->handle)); | |
739 | bfa_fsm_send_event(rxf, RXF_E_FW_RESP); | |
740 | } | |
741 | ||
742 | static void | |
743 | bna_rxf_init(struct bna_rxf *rxf, | |
744 | struct bna_rx *rx, | |
745 | struct bna_rx_config *q_config, | |
746 | struct bna_res_info *res_info) | |
747 | { | |
748 | rxf->rx = rx; | |
749 | ||
750 | INIT_LIST_HEAD(&rxf->ucast_pending_add_q); | |
751 | INIT_LIST_HEAD(&rxf->ucast_pending_del_q); | |
752 | rxf->ucast_pending_set = 0; | |
753 | rxf->ucast_active_set = 0; | |
754 | INIT_LIST_HEAD(&rxf->ucast_active_q); | |
755 | rxf->ucast_pending_mac = NULL; | |
756 | ||
757 | INIT_LIST_HEAD(&rxf->mcast_pending_add_q); | |
758 | INIT_LIST_HEAD(&rxf->mcast_pending_del_q); | |
759 | INIT_LIST_HEAD(&rxf->mcast_active_q); | |
760 | INIT_LIST_HEAD(&rxf->mcast_handle_q); | |
761 | ||
762 | if (q_config->paused) | |
763 | rxf->flags |= BNA_RXF_F_PAUSED; | |
764 | ||
765 | rxf->rit = (u8 *) | |
766 | res_info[BNA_RX_RES_MEM_T_RIT].res_u.mem_info.mdl[0].kva; | |
767 | bna_rit_init(rxf, q_config->num_paths); | |
768 | ||
769 | rxf->rss_status = q_config->rss_status; | |
770 | if (rxf->rss_status == BNA_STATUS_T_ENABLED) { | |
771 | rxf->rss_cfg = q_config->rss_config; | |
772 | rxf->rss_pending |= BNA_RSS_F_CFG_PENDING; | |
773 | rxf->rss_pending |= BNA_RSS_F_RIT_PENDING; | |
774 | rxf->rss_pending |= BNA_RSS_F_STATUS_PENDING; | |
775 | } | |
776 | ||
777 | rxf->vlan_filter_status = BNA_STATUS_T_DISABLED; | |
778 | memset(rxf->vlan_filter_table, 0, | |
779 | (sizeof(u32) * (BFI_ENET_VLAN_ID_MAX / 32))); | |
780 | rxf->vlan_filter_table[0] |= 1; /* for pure priority tagged frames */ | |
781 | rxf->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL; | |
782 | ||
783 | rxf->vlan_strip_status = q_config->vlan_strip_status; | |
784 | ||
785 | bfa_fsm_set_state(rxf, bna_rxf_sm_stopped); | |
786 | } | |
787 | ||
788 | static void | |
789 | bna_rxf_uninit(struct bna_rxf *rxf) | |
790 | { | |
791 | struct bna_mac *mac; | |
792 | ||
793 | rxf->ucast_pending_set = 0; | |
794 | rxf->ucast_active_set = 0; | |
795 | ||
796 | while (!list_empty(&rxf->ucast_pending_add_q)) { | |
797 | bfa_q_deq(&rxf->ucast_pending_add_q, &mac); | |
798 | bfa_q_qe_init(&mac->qe); | |
20b298f5 | 799 | bna_cam_mod_mac_put(bna_ucam_mod_free_q(rxf->rx->bna), mac); |
f3bd5173 RM |
800 | } |
801 | ||
802 | if (rxf->ucast_pending_mac) { | |
803 | bfa_q_qe_init(&rxf->ucast_pending_mac->qe); | |
20b298f5 RM |
804 | bna_cam_mod_mac_put(bna_ucam_mod_free_q(rxf->rx->bna), |
805 | rxf->ucast_pending_mac); | |
f3bd5173 RM |
806 | rxf->ucast_pending_mac = NULL; |
807 | } | |
808 | ||
809 | while (!list_empty(&rxf->mcast_pending_add_q)) { | |
810 | bfa_q_deq(&rxf->mcast_pending_add_q, &mac); | |
811 | bfa_q_qe_init(&mac->qe); | |
20b298f5 | 812 | bna_cam_mod_mac_put(bna_mcam_mod_free_q(rxf->rx->bna), mac); |
f3bd5173 RM |
813 | } |
814 | ||
815 | rxf->rxmode_pending = 0; | |
816 | rxf->rxmode_pending_bitmask = 0; | |
817 | if (rxf->rx->bna->promisc_rid == rxf->rx->rid) | |
818 | rxf->rx->bna->promisc_rid = BFI_INVALID_RID; | |
819 | if (rxf->rx->bna->default_mode_rid == rxf->rx->rid) | |
820 | rxf->rx->bna->default_mode_rid = BFI_INVALID_RID; | |
821 | ||
822 | rxf->rss_pending = 0; | |
823 | rxf->vlan_strip_pending = false; | |
824 | ||
825 | rxf->flags = 0; | |
826 | ||
827 | rxf->rx = NULL; | |
828 | } | |
829 | ||
830 | static void | |
831 | bna_rx_cb_rxf_started(struct bna_rx *rx) | |
832 | { | |
833 | bfa_fsm_send_event(rx, RX_E_RXF_STARTED); | |
834 | } | |
835 | ||
836 | static void | |
837 | bna_rxf_start(struct bna_rxf *rxf) | |
838 | { | |
839 | rxf->start_cbfn = bna_rx_cb_rxf_started; | |
840 | rxf->start_cbarg = rxf->rx; | |
841 | bfa_fsm_send_event(rxf, RXF_E_START); | |
842 | } | |
843 | ||
844 | static void | |
845 | bna_rx_cb_rxf_stopped(struct bna_rx *rx) | |
846 | { | |
847 | bfa_fsm_send_event(rx, RX_E_RXF_STOPPED); | |
848 | } | |
849 | ||
850 | static void | |
851 | bna_rxf_stop(struct bna_rxf *rxf) | |
852 | { | |
853 | rxf->stop_cbfn = bna_rx_cb_rxf_stopped; | |
854 | rxf->stop_cbarg = rxf->rx; | |
855 | bfa_fsm_send_event(rxf, RXF_E_STOP); | |
856 | } | |
857 | ||
858 | static void | |
859 | bna_rxf_fail(struct bna_rxf *rxf) | |
860 | { | |
861 | bfa_fsm_send_event(rxf, RXF_E_FAIL); | |
862 | } | |
863 | ||
864 | enum bna_cb_status | |
865 | bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac, | |
866 | void (*cbfn)(struct bnad *, struct bna_rx *)) | |
867 | { | |
868 | struct bna_rxf *rxf = &rx->rxf; | |
869 | ||
870 | if (rxf->ucast_pending_mac == NULL) { | |
871 | rxf->ucast_pending_mac = | |
20b298f5 | 872 | bna_cam_mod_mac_get(bna_ucam_mod_free_q(rxf->rx->bna)); |
f3bd5173 RM |
873 | if (rxf->ucast_pending_mac == NULL) |
874 | return BNA_CB_UCAST_CAM_FULL; | |
875 | bfa_q_qe_init(&rxf->ucast_pending_mac->qe); | |
876 | } | |
877 | ||
878 | memcpy(rxf->ucast_pending_mac->addr, ucmac, ETH_ALEN); | |
879 | rxf->ucast_pending_set = 1; | |
880 | rxf->cam_fltr_cbfn = cbfn; | |
881 | rxf->cam_fltr_cbarg = rx->bna->bnad; | |
882 | ||
883 | bfa_fsm_send_event(rxf, RXF_E_CONFIG); | |
884 | ||
885 | return BNA_CB_SUCCESS; | |
886 | } | |
887 | ||
888 | enum bna_cb_status | |
889 | bna_rx_mcast_add(struct bna_rx *rx, u8 *addr, | |
890 | void (*cbfn)(struct bnad *, struct bna_rx *)) | |
891 | { | |
892 | struct bna_rxf *rxf = &rx->rxf; | |
893 | struct bna_mac *mac; | |
894 | ||
895 | /* Check if already added or pending addition */ | |
896 | if (bna_mac_find(&rxf->mcast_active_q, addr) || | |
897 | bna_mac_find(&rxf->mcast_pending_add_q, addr)) { | |
898 | if (cbfn) | |
899 | cbfn(rx->bna->bnad, rx); | |
900 | return BNA_CB_SUCCESS; | |
901 | } | |
902 | ||
20b298f5 | 903 | mac = bna_cam_mod_mac_get(bna_mcam_mod_free_q(rxf->rx->bna)); |
f3bd5173 RM |
904 | if (mac == NULL) |
905 | return BNA_CB_MCAST_LIST_FULL; | |
906 | bfa_q_qe_init(&mac->qe); | |
907 | memcpy(mac->addr, addr, ETH_ALEN); | |
908 | list_add_tail(&mac->qe, &rxf->mcast_pending_add_q); | |
909 | ||
910 | rxf->cam_fltr_cbfn = cbfn; | |
911 | rxf->cam_fltr_cbarg = rx->bna->bnad; | |
912 | ||
913 | bfa_fsm_send_event(rxf, RXF_E_CONFIG); | |
914 | ||
915 | return BNA_CB_SUCCESS; | |
916 | } | |
917 | ||
fe1624cf RM |
918 | enum bna_cb_status |
919 | bna_rx_ucast_listset(struct bna_rx *rx, int count, u8 *uclist, | |
920 | void (*cbfn)(struct bnad *, struct bna_rx *)) | |
921 | { | |
922 | struct bna_ucam_mod *ucam_mod = &rx->bna->ucam_mod; | |
923 | struct bna_rxf *rxf = &rx->rxf; | |
924 | struct list_head list_head; | |
925 | struct list_head *qe; | |
926 | u8 *mcaddr; | |
927 | struct bna_mac *mac, *del_mac; | |
928 | int i; | |
929 | ||
930 | /* Purge the pending_add_q */ | |
931 | while (!list_empty(&rxf->ucast_pending_add_q)) { | |
932 | bfa_q_deq(&rxf->ucast_pending_add_q, &qe); | |
933 | bfa_q_qe_init(qe); | |
934 | mac = (struct bna_mac *)qe; | |
935 | bna_cam_mod_mac_put(&ucam_mod->free_q, mac); | |
936 | } | |
937 | ||
938 | /* Schedule active_q entries for deletion */ | |
939 | while (!list_empty(&rxf->ucast_active_q)) { | |
940 | bfa_q_deq(&rxf->ucast_active_q, &qe); | |
941 | mac = (struct bna_mac *)qe; | |
942 | bfa_q_qe_init(&mac->qe); | |
943 | ||
944 | del_mac = bna_cam_mod_mac_get(&ucam_mod->del_q); | |
945 | memcpy(del_mac, mac, sizeof(*del_mac)); | |
946 | list_add_tail(&del_mac->qe, &rxf->ucast_pending_del_q); | |
947 | bna_cam_mod_mac_put(&ucam_mod->free_q, mac); | |
948 | } | |
949 | ||
950 | /* Allocate nodes */ | |
951 | INIT_LIST_HEAD(&list_head); | |
952 | for (i = 0, mcaddr = uclist; i < count; i++) { | |
953 | mac = bna_cam_mod_mac_get(&ucam_mod->free_q); | |
954 | if (mac == NULL) | |
955 | goto err_return; | |
956 | bfa_q_qe_init(&mac->qe); | |
957 | memcpy(mac->addr, mcaddr, ETH_ALEN); | |
958 | list_add_tail(&mac->qe, &list_head); | |
959 | mcaddr += ETH_ALEN; | |
960 | } | |
961 | ||
962 | /* Add the new entries */ | |
963 | while (!list_empty(&list_head)) { | |
964 | bfa_q_deq(&list_head, &qe); | |
965 | mac = (struct bna_mac *)qe; | |
966 | bfa_q_qe_init(&mac->qe); | |
967 | list_add_tail(&mac->qe, &rxf->ucast_pending_add_q); | |
968 | } | |
969 | ||
970 | rxf->cam_fltr_cbfn = cbfn; | |
971 | rxf->cam_fltr_cbarg = rx->bna->bnad; | |
972 | bfa_fsm_send_event(rxf, RXF_E_CONFIG); | |
973 | ||
974 | return BNA_CB_SUCCESS; | |
975 | ||
976 | err_return: | |
977 | while (!list_empty(&list_head)) { | |
978 | bfa_q_deq(&list_head, &qe); | |
979 | mac = (struct bna_mac *)qe; | |
980 | bfa_q_qe_init(&mac->qe); | |
981 | bna_cam_mod_mac_put(&ucam_mod->free_q, mac); | |
982 | } | |
983 | ||
984 | return BNA_CB_UCAST_CAM_FULL; | |
985 | } | |
986 | ||
f3bd5173 RM |
987 | enum bna_cb_status |
988 | bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mclist, | |
989 | void (*cbfn)(struct bnad *, struct bna_rx *)) | |
990 | { | |
20b298f5 | 991 | struct bna_mcam_mod *mcam_mod = &rx->bna->mcam_mod; |
f3bd5173 RM |
992 | struct bna_rxf *rxf = &rx->rxf; |
993 | struct list_head list_head; | |
994 | struct list_head *qe; | |
995 | u8 *mcaddr; | |
20b298f5 | 996 | struct bna_mac *mac, *del_mac; |
f3bd5173 RM |
997 | int i; |
998 | ||
f3bd5173 RM |
999 | /* Purge the pending_add_q */ |
1000 | while (!list_empty(&rxf->mcast_pending_add_q)) { | |
1001 | bfa_q_deq(&rxf->mcast_pending_add_q, &qe); | |
1002 | bfa_q_qe_init(qe); | |
1003 | mac = (struct bna_mac *)qe; | |
20b298f5 | 1004 | bna_cam_mod_mac_put(&mcam_mod->free_q, mac); |
f3bd5173 RM |
1005 | } |
1006 | ||
1007 | /* Schedule active_q entries for deletion */ | |
1008 | while (!list_empty(&rxf->mcast_active_q)) { | |
1009 | bfa_q_deq(&rxf->mcast_active_q, &qe); | |
1010 | mac = (struct bna_mac *)qe; | |
1011 | bfa_q_qe_init(&mac->qe); | |
20b298f5 RM |
1012 | |
1013 | del_mac = bna_cam_mod_mac_get(&mcam_mod->del_q); | |
1014 | ||
fe1624cf | 1015 | memcpy(del_mac, mac, sizeof(*del_mac)); |
20b298f5 RM |
1016 | list_add_tail(&del_mac->qe, &rxf->mcast_pending_del_q); |
1017 | mac->handle = NULL; | |
1018 | bna_cam_mod_mac_put(&mcam_mod->free_q, mac); | |
1019 | } | |
1020 | ||
1021 | /* Allocate nodes */ | |
1022 | INIT_LIST_HEAD(&list_head); | |
1023 | for (i = 0, mcaddr = mclist; i < count; i++) { | |
1024 | mac = bna_cam_mod_mac_get(&mcam_mod->free_q); | |
1025 | if (mac == NULL) | |
1026 | goto err_return; | |
1027 | bfa_q_qe_init(&mac->qe); | |
1028 | memcpy(mac->addr, mcaddr, ETH_ALEN); | |
1029 | list_add_tail(&mac->qe, &list_head); | |
1030 | ||
1031 | mcaddr += ETH_ALEN; | |
f3bd5173 RM |
1032 | } |
1033 | ||
1034 | /* Add the new entries */ | |
1035 | while (!list_empty(&list_head)) { | |
1036 | bfa_q_deq(&list_head, &qe); | |
1037 | mac = (struct bna_mac *)qe; | |
1038 | bfa_q_qe_init(&mac->qe); | |
1039 | list_add_tail(&mac->qe, &rxf->mcast_pending_add_q); | |
1040 | } | |
1041 | ||
1042 | rxf->cam_fltr_cbfn = cbfn; | |
1043 | rxf->cam_fltr_cbarg = rx->bna->bnad; | |
1044 | bfa_fsm_send_event(rxf, RXF_E_CONFIG); | |
1045 | ||
1046 | return BNA_CB_SUCCESS; | |
1047 | ||
1048 | err_return: | |
1049 | while (!list_empty(&list_head)) { | |
1050 | bfa_q_deq(&list_head, &qe); | |
1051 | mac = (struct bna_mac *)qe; | |
1052 | bfa_q_qe_init(&mac->qe); | |
20b298f5 | 1053 | bna_cam_mod_mac_put(&mcam_mod->free_q, mac); |
f3bd5173 RM |
1054 | } |
1055 | ||
1056 | return BNA_CB_MCAST_LIST_FULL; | |
1057 | } | |
1058 | ||
fe1624cf RM |
1059 | void |
1060 | bna_rx_mcast_delall(struct bna_rx *rx, | |
1061 | void (*cbfn)(struct bnad *, struct bna_rx *)) | |
1062 | { | |
1063 | struct bna_rxf *rxf = &rx->rxf; | |
1064 | struct list_head *qe; | |
1065 | struct bna_mac *mac, *del_mac; | |
1066 | int need_hw_config = 0; | |
1067 | ||
1068 | /* Purge all entries from pending_add_q */ | |
1069 | while (!list_empty(&rxf->mcast_pending_add_q)) { | |
1070 | bfa_q_deq(&rxf->mcast_pending_add_q, &qe); | |
1071 | mac = (struct bna_mac *)qe; | |
1072 | bfa_q_qe_init(&mac->qe); | |
1073 | bna_cam_mod_mac_put(bna_mcam_mod_free_q(rxf->rx->bna), mac); | |
1074 | } | |
1075 | ||
1076 | /* Schedule all entries in active_q for deletion */ | |
1077 | while (!list_empty(&rxf->mcast_active_q)) { | |
1078 | bfa_q_deq(&rxf->mcast_active_q, &qe); | |
1079 | mac = (struct bna_mac *)qe; | |
1080 | bfa_q_qe_init(&mac->qe); | |
1081 | ||
1082 | del_mac = bna_cam_mod_mac_get(bna_mcam_mod_del_q(rxf->rx->bna)); | |
1083 | ||
1084 | memcpy(del_mac, mac, sizeof(*del_mac)); | |
1085 | list_add_tail(&del_mac->qe, &rxf->mcast_pending_del_q); | |
1086 | mac->handle = NULL; | |
1087 | bna_cam_mod_mac_put(bna_mcam_mod_free_q(rxf->rx->bna), mac); | |
1088 | need_hw_config = 1; | |
1089 | } | |
1090 | ||
1091 | if (need_hw_config) { | |
1092 | rxf->cam_fltr_cbfn = cbfn; | |
1093 | rxf->cam_fltr_cbarg = rx->bna->bnad; | |
1094 | bfa_fsm_send_event(rxf, RXF_E_CONFIG); | |
1095 | return; | |
1096 | } | |
1097 | ||
1098 | if (cbfn) | |
1099 | (*cbfn)(rx->bna->bnad, rx); | |
1100 | } | |
1101 | ||
f3bd5173 RM |
1102 | void |
1103 | bna_rx_vlan_add(struct bna_rx *rx, int vlan_id) | |
1104 | { | |
1105 | struct bna_rxf *rxf = &rx->rxf; | |
1106 | int index = (vlan_id >> BFI_VLAN_WORD_SHIFT); | |
1107 | int bit = (1 << (vlan_id & BFI_VLAN_WORD_MASK)); | |
1108 | int group_id = (vlan_id >> BFI_VLAN_BLOCK_SHIFT); | |
1109 | ||
1110 | rxf->vlan_filter_table[index] |= bit; | |
1111 | if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) { | |
1112 | rxf->vlan_pending_bitmask |= (1 << group_id); | |
1113 | bfa_fsm_send_event(rxf, RXF_E_CONFIG); | |
1114 | } | |
1115 | } | |
1116 | ||
1117 | void | |
1118 | bna_rx_vlan_del(struct bna_rx *rx, int vlan_id) | |
1119 | { | |
1120 | struct bna_rxf *rxf = &rx->rxf; | |
1121 | int index = (vlan_id >> BFI_VLAN_WORD_SHIFT); | |
1122 | int bit = (1 << (vlan_id & BFI_VLAN_WORD_MASK)); | |
1123 | int group_id = (vlan_id >> BFI_VLAN_BLOCK_SHIFT); | |
1124 | ||
1125 | rxf->vlan_filter_table[index] &= ~bit; | |
1126 | if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) { | |
1127 | rxf->vlan_pending_bitmask |= (1 << group_id); | |
1128 | bfa_fsm_send_event(rxf, RXF_E_CONFIG); | |
1129 | } | |
1130 | } | |
1131 | ||
1132 | static int | |
1133 | bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf) | |
1134 | { | |
1135 | struct bna_mac *mac = NULL; | |
1136 | struct list_head *qe; | |
1137 | ||
1138 | /* Delete MAC addresses previousely added */ | |
1139 | if (!list_empty(&rxf->ucast_pending_del_q)) { | |
1140 | bfa_q_deq(&rxf->ucast_pending_del_q, &qe); | |
1141 | bfa_q_qe_init(qe); | |
1142 | mac = (struct bna_mac *)qe; | |
1143 | bna_bfi_ucast_req(rxf, mac, BFI_ENET_H2I_MAC_UCAST_DEL_REQ); | |
20b298f5 | 1144 | bna_cam_mod_mac_put(bna_ucam_mod_del_q(rxf->rx->bna), mac); |
f3bd5173 RM |
1145 | return 1; |
1146 | } | |
1147 | ||
1148 | /* Set default unicast MAC */ | |
1149 | if (rxf->ucast_pending_set) { | |
1150 | rxf->ucast_pending_set = 0; | |
1151 | memcpy(rxf->ucast_active_mac.addr, | |
1152 | rxf->ucast_pending_mac->addr, ETH_ALEN); | |
1153 | rxf->ucast_active_set = 1; | |
1154 | bna_bfi_ucast_req(rxf, &rxf->ucast_active_mac, | |
1155 | BFI_ENET_H2I_MAC_UCAST_SET_REQ); | |
1156 | return 1; | |
1157 | } | |
1158 | ||
1159 | /* Add additional MAC entries */ | |
1160 | if (!list_empty(&rxf->ucast_pending_add_q)) { | |
1161 | bfa_q_deq(&rxf->ucast_pending_add_q, &qe); | |
1162 | bfa_q_qe_init(qe); | |
1163 | mac = (struct bna_mac *)qe; | |
1164 | list_add_tail(&mac->qe, &rxf->ucast_active_q); | |
1165 | bna_bfi_ucast_req(rxf, mac, BFI_ENET_H2I_MAC_UCAST_ADD_REQ); | |
1166 | return 1; | |
1167 | } | |
1168 | ||
1169 | return 0; | |
1170 | } | |
1171 | ||
1172 | static int | |
1173 | bna_rxf_ucast_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup) | |
1174 | { | |
1175 | struct list_head *qe; | |
1176 | struct bna_mac *mac; | |
1177 | ||
1178 | /* Throw away delete pending ucast entries */ | |
1179 | while (!list_empty(&rxf->ucast_pending_del_q)) { | |
1180 | bfa_q_deq(&rxf->ucast_pending_del_q, &qe); | |
1181 | bfa_q_qe_init(qe); | |
1182 | mac = (struct bna_mac *)qe; | |
1183 | if (cleanup == BNA_SOFT_CLEANUP) | |
20b298f5 RM |
1184 | bna_cam_mod_mac_put(bna_ucam_mod_del_q(rxf->rx->bna), |
1185 | mac); | |
f3bd5173 RM |
1186 | else { |
1187 | bna_bfi_ucast_req(rxf, mac, | |
1188 | BFI_ENET_H2I_MAC_UCAST_DEL_REQ); | |
20b298f5 RM |
1189 | bna_cam_mod_mac_put(bna_ucam_mod_del_q(rxf->rx->bna), |
1190 | mac); | |
f3bd5173 RM |
1191 | return 1; |
1192 | } | |
1193 | } | |
1194 | ||
1195 | /* Move active ucast entries to pending_add_q */ | |
1196 | while (!list_empty(&rxf->ucast_active_q)) { | |
1197 | bfa_q_deq(&rxf->ucast_active_q, &qe); | |
1198 | bfa_q_qe_init(qe); | |
1199 | list_add_tail(qe, &rxf->ucast_pending_add_q); | |
1200 | if (cleanup == BNA_HARD_CLEANUP) { | |
1201 | mac = (struct bna_mac *)qe; | |
1202 | bna_bfi_ucast_req(rxf, mac, | |
1203 | BFI_ENET_H2I_MAC_UCAST_DEL_REQ); | |
1204 | return 1; | |
1205 | } | |
1206 | } | |
1207 | ||
1208 | if (rxf->ucast_active_set) { | |
1209 | rxf->ucast_pending_set = 1; | |
1210 | rxf->ucast_active_set = 0; | |
1211 | if (cleanup == BNA_HARD_CLEANUP) { | |
1212 | bna_bfi_ucast_req(rxf, &rxf->ucast_active_mac, | |
1213 | BFI_ENET_H2I_MAC_UCAST_CLR_REQ); | |
1214 | return 1; | |
1215 | } | |
1216 | } | |
1217 | ||
1218 | return 0; | |
1219 | } | |
1220 | ||
1221 | static int | |
1222 | bna_rxf_promisc_cfg_apply(struct bna_rxf *rxf) | |
1223 | { | |
1224 | struct bna *bna = rxf->rx->bna; | |
1225 | ||
1226 | /* Enable/disable promiscuous mode */ | |
1227 | if (is_promisc_enable(rxf->rxmode_pending, | |
1228 | rxf->rxmode_pending_bitmask)) { | |
1229 | /* move promisc configuration from pending -> active */ | |
1230 | promisc_inactive(rxf->rxmode_pending, | |
1231 | rxf->rxmode_pending_bitmask); | |
1232 | rxf->rxmode_active |= BNA_RXMODE_PROMISC; | |
1233 | bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_ENABLED); | |
1234 | return 1; | |
1235 | } else if (is_promisc_disable(rxf->rxmode_pending, | |
1236 | rxf->rxmode_pending_bitmask)) { | |
1237 | /* move promisc configuration from pending -> active */ | |
1238 | promisc_inactive(rxf->rxmode_pending, | |
1239 | rxf->rxmode_pending_bitmask); | |
1240 | rxf->rxmode_active &= ~BNA_RXMODE_PROMISC; | |
1241 | bna->promisc_rid = BFI_INVALID_RID; | |
1242 | bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_DISABLED); | |
1243 | return 1; | |
1244 | } | |
1245 | ||
1246 | return 0; | |
1247 | } | |
1248 | ||
1249 | static int | |
1250 | bna_rxf_promisc_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup) | |
1251 | { | |
1252 | struct bna *bna = rxf->rx->bna; | |
1253 | ||
1254 | /* Clear pending promisc mode disable */ | |
1255 | if (is_promisc_disable(rxf->rxmode_pending, | |
1256 | rxf->rxmode_pending_bitmask)) { | |
1257 | promisc_inactive(rxf->rxmode_pending, | |
1258 | rxf->rxmode_pending_bitmask); | |
1259 | rxf->rxmode_active &= ~BNA_RXMODE_PROMISC; | |
1260 | bna->promisc_rid = BFI_INVALID_RID; | |
1261 | if (cleanup == BNA_HARD_CLEANUP) { | |
1262 | bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_DISABLED); | |
1263 | return 1; | |
1264 | } | |
1265 | } | |
1266 | ||
1267 | /* Move promisc mode config from active -> pending */ | |
1268 | if (rxf->rxmode_active & BNA_RXMODE_PROMISC) { | |
1269 | promisc_enable(rxf->rxmode_pending, | |
1270 | rxf->rxmode_pending_bitmask); | |
1271 | rxf->rxmode_active &= ~BNA_RXMODE_PROMISC; | |
1272 | if (cleanup == BNA_HARD_CLEANUP) { | |
1273 | bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_DISABLED); | |
1274 | return 1; | |
1275 | } | |
1276 | } | |
1277 | ||
1278 | return 0; | |
1279 | } | |
1280 | ||
1281 | static int | |
1282 | bna_rxf_allmulti_cfg_apply(struct bna_rxf *rxf) | |
1283 | { | |
1284 | /* Enable/disable allmulti mode */ | |
1285 | if (is_allmulti_enable(rxf->rxmode_pending, | |
1286 | rxf->rxmode_pending_bitmask)) { | |
1287 | /* move allmulti configuration from pending -> active */ | |
1288 | allmulti_inactive(rxf->rxmode_pending, | |
1289 | rxf->rxmode_pending_bitmask); | |
1290 | rxf->rxmode_active |= BNA_RXMODE_ALLMULTI; | |
1291 | bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_DISABLED); | |
1292 | return 1; | |
1293 | } else if (is_allmulti_disable(rxf->rxmode_pending, | |
1294 | rxf->rxmode_pending_bitmask)) { | |
1295 | /* move allmulti configuration from pending -> active */ | |
1296 | allmulti_inactive(rxf->rxmode_pending, | |
1297 | rxf->rxmode_pending_bitmask); | |
1298 | rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI; | |
1299 | bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_ENABLED); | |
1300 | return 1; | |
1301 | } | |
1302 | ||
1303 | return 0; | |
1304 | } | |
1305 | ||
1306 | static int | |
1307 | bna_rxf_allmulti_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup) | |
1308 | { | |
1309 | /* Clear pending allmulti mode disable */ | |
1310 | if (is_allmulti_disable(rxf->rxmode_pending, | |
1311 | rxf->rxmode_pending_bitmask)) { | |
1312 | allmulti_inactive(rxf->rxmode_pending, | |
1313 | rxf->rxmode_pending_bitmask); | |
1314 | rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI; | |
1315 | if (cleanup == BNA_HARD_CLEANUP) { | |
1316 | bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_ENABLED); | |
1317 | return 1; | |
1318 | } | |
1319 | } | |
1320 | ||
1321 | /* Move allmulti mode config from active -> pending */ | |
1322 | if (rxf->rxmode_active & BNA_RXMODE_ALLMULTI) { | |
1323 | allmulti_enable(rxf->rxmode_pending, | |
1324 | rxf->rxmode_pending_bitmask); | |
1325 | rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI; | |
1326 | if (cleanup == BNA_HARD_CLEANUP) { | |
1327 | bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_ENABLED); | |
1328 | return 1; | |
1329 | } | |
1330 | } | |
1331 | ||
1332 | return 0; | |
1333 | } | |
1334 | ||
1335 | static int | |
1336 | bna_rxf_promisc_enable(struct bna_rxf *rxf) | |
1337 | { | |
1338 | struct bna *bna = rxf->rx->bna; | |
1339 | int ret = 0; | |
1340 | ||
1341 | if (is_promisc_enable(rxf->rxmode_pending, | |
1342 | rxf->rxmode_pending_bitmask) || | |
1343 | (rxf->rxmode_active & BNA_RXMODE_PROMISC)) { | |
1344 | /* Do nothing if pending enable or already enabled */ | |
1345 | } else if (is_promisc_disable(rxf->rxmode_pending, | |
1346 | rxf->rxmode_pending_bitmask)) { | |
1347 | /* Turn off pending disable command */ | |
1348 | promisc_inactive(rxf->rxmode_pending, | |
1349 | rxf->rxmode_pending_bitmask); | |
1350 | } else { | |
1351 | /* Schedule enable */ | |
1352 | promisc_enable(rxf->rxmode_pending, | |
1353 | rxf->rxmode_pending_bitmask); | |
1354 | bna->promisc_rid = rxf->rx->rid; | |
1355 | ret = 1; | |
1356 | } | |
1357 | ||
1358 | return ret; | |
1359 | } | |
1360 | ||
1361 | static int | |
1362 | bna_rxf_promisc_disable(struct bna_rxf *rxf) | |
1363 | { | |
1364 | struct bna *bna = rxf->rx->bna; | |
1365 | int ret = 0; | |
1366 | ||
1367 | if (is_promisc_disable(rxf->rxmode_pending, | |
1368 | rxf->rxmode_pending_bitmask) || | |
1369 | (!(rxf->rxmode_active & BNA_RXMODE_PROMISC))) { | |
1370 | /* Do nothing if pending disable or already disabled */ | |
1371 | } else if (is_promisc_enable(rxf->rxmode_pending, | |
1372 | rxf->rxmode_pending_bitmask)) { | |
1373 | /* Turn off pending enable command */ | |
1374 | promisc_inactive(rxf->rxmode_pending, | |
1375 | rxf->rxmode_pending_bitmask); | |
1376 | bna->promisc_rid = BFI_INVALID_RID; | |
1377 | } else if (rxf->rxmode_active & BNA_RXMODE_PROMISC) { | |
1378 | /* Schedule disable */ | |
1379 | promisc_disable(rxf->rxmode_pending, | |
1380 | rxf->rxmode_pending_bitmask); | |
1381 | ret = 1; | |
1382 | } | |
1383 | ||
1384 | return ret; | |
1385 | } | |
1386 | ||
1387 | static int | |
1388 | bna_rxf_allmulti_enable(struct bna_rxf *rxf) | |
1389 | { | |
1390 | int ret = 0; | |
1391 | ||
1392 | if (is_allmulti_enable(rxf->rxmode_pending, | |
1393 | rxf->rxmode_pending_bitmask) || | |
1394 | (rxf->rxmode_active & BNA_RXMODE_ALLMULTI)) { | |
1395 | /* Do nothing if pending enable or already enabled */ | |
1396 | } else if (is_allmulti_disable(rxf->rxmode_pending, | |
1397 | rxf->rxmode_pending_bitmask)) { | |
1398 | /* Turn off pending disable command */ | |
1399 | allmulti_inactive(rxf->rxmode_pending, | |
1400 | rxf->rxmode_pending_bitmask); | |
1401 | } else { | |
1402 | /* Schedule enable */ | |
1403 | allmulti_enable(rxf->rxmode_pending, | |
1404 | rxf->rxmode_pending_bitmask); | |
1405 | ret = 1; | |
1406 | } | |
1407 | ||
1408 | return ret; | |
1409 | } | |
1410 | ||
1411 | static int | |
1412 | bna_rxf_allmulti_disable(struct bna_rxf *rxf) | |
1413 | { | |
1414 | int ret = 0; | |
1415 | ||
1416 | if (is_allmulti_disable(rxf->rxmode_pending, | |
1417 | rxf->rxmode_pending_bitmask) || | |
1418 | (!(rxf->rxmode_active & BNA_RXMODE_ALLMULTI))) { | |
1419 | /* Do nothing if pending disable or already disabled */ | |
1420 | } else if (is_allmulti_enable(rxf->rxmode_pending, | |
1421 | rxf->rxmode_pending_bitmask)) { | |
1422 | /* Turn off pending enable command */ | |
1423 | allmulti_inactive(rxf->rxmode_pending, | |
1424 | rxf->rxmode_pending_bitmask); | |
1425 | } else if (rxf->rxmode_active & BNA_RXMODE_ALLMULTI) { | |
1426 | /* Schedule disable */ | |
1427 | allmulti_disable(rxf->rxmode_pending, | |
1428 | rxf->rxmode_pending_bitmask); | |
1429 | ret = 1; | |
1430 | } | |
1431 | ||
1432 | return ret; | |
1433 | } | |
1434 | ||
1435 | static int | |
1436 | bna_rxf_vlan_strip_cfg_apply(struct bna_rxf *rxf) | |
1437 | { | |
1438 | if (rxf->vlan_strip_pending) { | |
1439 | rxf->vlan_strip_pending = false; | |
1440 | bna_bfi_vlan_strip_enable(rxf); | |
1441 | return 1; | |
1442 | } | |
1443 | ||
1444 | return 0; | |
1445 | } | |
1446 | ||
1aa8b471 | 1447 | /* RX */ |
f3bd5173 RM |
1448 | |
1449 | #define BNA_GET_RXQS(qcfg) (((qcfg)->rxp_type == BNA_RXP_SINGLE) ? \ | |
1450 | (qcfg)->num_paths : ((qcfg)->num_paths * 2)) | |
1451 | ||
1452 | #define SIZE_TO_PAGES(size) (((size) >> PAGE_SHIFT) + ((((size) &\ | |
1453 | (PAGE_SIZE - 1)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT)) | |
1454 | ||
1455 | #define call_rx_stop_cbfn(rx) \ | |
1456 | do { \ | |
1457 | if ((rx)->stop_cbfn) { \ | |
1458 | void (*cbfn)(void *, struct bna_rx *); \ | |
1459 | void *cbarg; \ | |
1460 | cbfn = (rx)->stop_cbfn; \ | |
1461 | cbarg = (rx)->stop_cbarg; \ | |
1462 | (rx)->stop_cbfn = NULL; \ | |
1463 | (rx)->stop_cbarg = NULL; \ | |
1464 | cbfn(cbarg, rx); \ | |
1465 | } \ | |
1466 | } while (0) | |
1467 | ||
5bcf6ac0 RM |
1468 | #define call_rx_stall_cbfn(rx) \ |
1469 | do { \ | |
1470 | if ((rx)->rx_stall_cbfn) \ | |
1471 | (rx)->rx_stall_cbfn((rx)->bna->bnad, (rx)); \ | |
1472 | } while (0) | |
1473 | ||
f3bd5173 RM |
1474 | #define bfi_enet_datapath_q_init(bfi_q, bna_qpt) \ |
1475 | do { \ | |
1476 | struct bna_dma_addr cur_q_addr = \ | |
1477 | *((struct bna_dma_addr *)((bna_qpt)->kv_qpt_ptr)); \ | |
1478 | (bfi_q)->pg_tbl.a32.addr_lo = (bna_qpt)->hw_qpt_ptr.lsb; \ | |
1479 | (bfi_q)->pg_tbl.a32.addr_hi = (bna_qpt)->hw_qpt_ptr.msb; \ | |
1480 | (bfi_q)->first_entry.a32.addr_lo = cur_q_addr.lsb; \ | |
1481 | (bfi_q)->first_entry.a32.addr_hi = cur_q_addr.msb; \ | |
1482 | (bfi_q)->pages = htons((u16)(bna_qpt)->page_count); \ | |
1483 | (bfi_q)->page_sz = htons((u16)(bna_qpt)->page_size);\ | |
1484 | } while (0) | |
1485 | ||
1486 | static void bna_bfi_rx_enet_start(struct bna_rx *rx); | |
1487 | static void bna_rx_enet_stop(struct bna_rx *rx); | |
1488 | static void bna_rx_mod_cb_rx_stopped(void *arg, struct bna_rx *rx); | |
1489 | ||
1490 | bfa_fsm_state_decl(bna_rx, stopped, | |
1491 | struct bna_rx, enum bna_rx_event); | |
1492 | bfa_fsm_state_decl(bna_rx, start_wait, | |
1493 | struct bna_rx, enum bna_rx_event); | |
215a64a2 RM |
1494 | bfa_fsm_state_decl(bna_rx, start_stop_wait, |
1495 | struct bna_rx, enum bna_rx_event); | |
f3bd5173 RM |
1496 | bfa_fsm_state_decl(bna_rx, rxf_start_wait, |
1497 | struct bna_rx, enum bna_rx_event); | |
1498 | bfa_fsm_state_decl(bna_rx, started, | |
1499 | struct bna_rx, enum bna_rx_event); | |
1500 | bfa_fsm_state_decl(bna_rx, rxf_stop_wait, | |
1501 | struct bna_rx, enum bna_rx_event); | |
1502 | bfa_fsm_state_decl(bna_rx, stop_wait, | |
1503 | struct bna_rx, enum bna_rx_event); | |
1504 | bfa_fsm_state_decl(bna_rx, cleanup_wait, | |
1505 | struct bna_rx, enum bna_rx_event); | |
1506 | bfa_fsm_state_decl(bna_rx, failed, | |
1507 | struct bna_rx, enum bna_rx_event); | |
1508 | bfa_fsm_state_decl(bna_rx, quiesce_wait, | |
1509 | struct bna_rx, enum bna_rx_event); | |
1510 | ||
1511 | static void bna_rx_sm_stopped_entry(struct bna_rx *rx) | |
1512 | { | |
1513 | call_rx_stop_cbfn(rx); | |
1514 | } | |
1515 | ||
1516 | static void bna_rx_sm_stopped(struct bna_rx *rx, | |
1517 | enum bna_rx_event event) | |
1518 | { | |
1519 | switch (event) { | |
1520 | case RX_E_START: | |
1521 | bfa_fsm_set_state(rx, bna_rx_sm_start_wait); | |
1522 | break; | |
1523 | ||
1524 | case RX_E_STOP: | |
1525 | call_rx_stop_cbfn(rx); | |
1526 | break; | |
1527 | ||
1528 | case RX_E_FAIL: | |
1529 | /* no-op */ | |
1530 | break; | |
1531 | ||
1532 | default: | |
1533 | bfa_sm_fault(event); | |
1534 | break; | |
1535 | } | |
1536 | } | |
1537 | ||
1538 | static void bna_rx_sm_start_wait_entry(struct bna_rx *rx) | |
1539 | { | |
1540 | bna_bfi_rx_enet_start(rx); | |
1541 | } | |
1542 | ||
7f4341fe | 1543 | static void |
f3bd5173 RM |
1544 | bna_rx_sm_stop_wait_entry(struct bna_rx *rx) |
1545 | { | |
1546 | } | |
1547 | ||
1548 | static void | |
1549 | bna_rx_sm_stop_wait(struct bna_rx *rx, enum bna_rx_event event) | |
1550 | { | |
1551 | switch (event) { | |
1552 | case RX_E_FAIL: | |
1553 | case RX_E_STOPPED: | |
1554 | bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait); | |
1555 | rx->rx_cleanup_cbfn(rx->bna->bnad, rx); | |
1556 | break; | |
1557 | ||
1558 | case RX_E_STARTED: | |
1559 | bna_rx_enet_stop(rx); | |
1560 | break; | |
1561 | ||
1562 | default: | |
1563 | bfa_sm_fault(event); | |
1564 | break; | |
1565 | } | |
1566 | } | |
1567 | ||
1568 | static void bna_rx_sm_start_wait(struct bna_rx *rx, | |
1569 | enum bna_rx_event event) | |
1570 | { | |
1571 | switch (event) { | |
1572 | case RX_E_STOP: | |
215a64a2 | 1573 | bfa_fsm_set_state(rx, bna_rx_sm_start_stop_wait); |
f3bd5173 RM |
1574 | break; |
1575 | ||
1576 | case RX_E_FAIL: | |
1577 | bfa_fsm_set_state(rx, bna_rx_sm_stopped); | |
1578 | break; | |
1579 | ||
1580 | case RX_E_STARTED: | |
1581 | bfa_fsm_set_state(rx, bna_rx_sm_rxf_start_wait); | |
1582 | break; | |
1583 | ||
1584 | default: | |
1585 | bfa_sm_fault(event); | |
1586 | break; | |
1587 | } | |
1588 | } | |
1589 | ||
1590 | static void bna_rx_sm_rxf_start_wait_entry(struct bna_rx *rx) | |
1591 | { | |
1592 | rx->rx_post_cbfn(rx->bna->bnad, rx); | |
1593 | bna_rxf_start(&rx->rxf); | |
1594 | } | |
1595 | ||
7f4341fe | 1596 | static void |
f3bd5173 RM |
1597 | bna_rx_sm_rxf_stop_wait_entry(struct bna_rx *rx) |
1598 | { | |
1599 | } | |
1600 | ||
1601 | static void | |
1602 | bna_rx_sm_rxf_stop_wait(struct bna_rx *rx, enum bna_rx_event event) | |
1603 | { | |
1604 | switch (event) { | |
1605 | case RX_E_FAIL: | |
1606 | bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait); | |
1607 | bna_rxf_fail(&rx->rxf); | |
5bcf6ac0 | 1608 | call_rx_stall_cbfn(rx); |
f3bd5173 RM |
1609 | rx->rx_cleanup_cbfn(rx->bna->bnad, rx); |
1610 | break; | |
1611 | ||
1612 | case RX_E_RXF_STARTED: | |
1613 | bna_rxf_stop(&rx->rxf); | |
1614 | break; | |
1615 | ||
1616 | case RX_E_RXF_STOPPED: | |
1617 | bfa_fsm_set_state(rx, bna_rx_sm_stop_wait); | |
5bcf6ac0 | 1618 | call_rx_stall_cbfn(rx); |
f3bd5173 RM |
1619 | bna_rx_enet_stop(rx); |
1620 | break; | |
1621 | ||
1622 | default: | |
1623 | bfa_sm_fault(event); | |
1624 | break; | |
1625 | } | |
1626 | ||
1627 | } | |
1628 | ||
215a64a2 RM |
1629 | static void |
1630 | bna_rx_sm_start_stop_wait_entry(struct bna_rx *rx) | |
1631 | { | |
1632 | } | |
1633 | ||
1634 | static void | |
1635 | bna_rx_sm_start_stop_wait(struct bna_rx *rx, enum bna_rx_event event) | |
1636 | { | |
1637 | switch (event) { | |
1638 | case RX_E_FAIL: | |
1639 | case RX_E_STOPPED: | |
1640 | bfa_fsm_set_state(rx, bna_rx_sm_stopped); | |
1641 | break; | |
1642 | ||
1643 | case RX_E_STARTED: | |
1644 | bna_rx_enet_stop(rx); | |
1645 | break; | |
1646 | ||
1647 | default: | |
1648 | bfa_sm_fault(event); | |
1649 | } | |
1650 | } | |
1651 | ||
7f4341fe | 1652 | static void |
f3bd5173 RM |
1653 | bna_rx_sm_started_entry(struct bna_rx *rx) |
1654 | { | |
1655 | struct bna_rxp *rxp; | |
1656 | struct list_head *qe_rxp; | |
1657 | int is_regular = (rx->type == BNA_RX_T_REGULAR); | |
1658 | ||
1659 | /* Start IB */ | |
1660 | list_for_each(qe_rxp, &rx->rxp_q) { | |
1661 | rxp = (struct bna_rxp *)qe_rxp; | |
1662 | bna_ib_start(rx->bna, &rxp->cq.ib, is_regular); | |
1663 | } | |
1664 | ||
1665 | bna_ethport_cb_rx_started(&rx->bna->ethport); | |
1666 | } | |
1667 | ||
1668 | static void | |
1669 | bna_rx_sm_started(struct bna_rx *rx, enum bna_rx_event event) | |
1670 | { | |
1671 | switch (event) { | |
1672 | case RX_E_STOP: | |
1673 | bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait); | |
1674 | bna_ethport_cb_rx_stopped(&rx->bna->ethport); | |
1675 | bna_rxf_stop(&rx->rxf); | |
1676 | break; | |
1677 | ||
1678 | case RX_E_FAIL: | |
1679 | bfa_fsm_set_state(rx, bna_rx_sm_failed); | |
1680 | bna_ethport_cb_rx_stopped(&rx->bna->ethport); | |
1681 | bna_rxf_fail(&rx->rxf); | |
5bcf6ac0 | 1682 | call_rx_stall_cbfn(rx); |
f3bd5173 RM |
1683 | rx->rx_cleanup_cbfn(rx->bna->bnad, rx); |
1684 | break; | |
1685 | ||
1686 | default: | |
1687 | bfa_sm_fault(event); | |
1688 | break; | |
1689 | } | |
1690 | } | |
1691 | ||
1692 | static void bna_rx_sm_rxf_start_wait(struct bna_rx *rx, | |
1693 | enum bna_rx_event event) | |
1694 | { | |
1695 | switch (event) { | |
1696 | case RX_E_STOP: | |
1697 | bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait); | |
1698 | break; | |
1699 | ||
1700 | case RX_E_FAIL: | |
1701 | bfa_fsm_set_state(rx, bna_rx_sm_failed); | |
1702 | bna_rxf_fail(&rx->rxf); | |
5bcf6ac0 | 1703 | call_rx_stall_cbfn(rx); |
f3bd5173 RM |
1704 | rx->rx_cleanup_cbfn(rx->bna->bnad, rx); |
1705 | break; | |
1706 | ||
1707 | case RX_E_RXF_STARTED: | |
1708 | bfa_fsm_set_state(rx, bna_rx_sm_started); | |
1709 | break; | |
1710 | ||
1711 | default: | |
1712 | bfa_sm_fault(event); | |
1713 | break; | |
1714 | } | |
1715 | } | |
1716 | ||
7f4341fe | 1717 | static void |
f3bd5173 RM |
1718 | bna_rx_sm_cleanup_wait_entry(struct bna_rx *rx) |
1719 | { | |
1720 | } | |
1721 | ||
7f4341fe | 1722 | static void |
f3bd5173 RM |
1723 | bna_rx_sm_cleanup_wait(struct bna_rx *rx, enum bna_rx_event event) |
1724 | { | |
1725 | switch (event) { | |
1726 | case RX_E_FAIL: | |
1727 | case RX_E_RXF_STOPPED: | |
1728 | /* No-op */ | |
1729 | break; | |
1730 | ||
1731 | case RX_E_CLEANUP_DONE: | |
1732 | bfa_fsm_set_state(rx, bna_rx_sm_stopped); | |
1733 | break; | |
1734 | ||
1735 | default: | |
1736 | bfa_sm_fault(event); | |
1737 | break; | |
1738 | } | |
1739 | } | |
1740 | ||
1741 | static void | |
1742 | bna_rx_sm_failed_entry(struct bna_rx *rx) | |
1743 | { | |
1744 | } | |
1745 | ||
1746 | static void | |
1747 | bna_rx_sm_failed(struct bna_rx *rx, enum bna_rx_event event) | |
1748 | { | |
1749 | switch (event) { | |
1750 | case RX_E_START: | |
1751 | bfa_fsm_set_state(rx, bna_rx_sm_quiesce_wait); | |
1752 | break; | |
1753 | ||
1754 | case RX_E_STOP: | |
1755 | bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait); | |
1756 | break; | |
1757 | ||
1758 | case RX_E_FAIL: | |
1759 | case RX_E_RXF_STARTED: | |
1760 | case RX_E_RXF_STOPPED: | |
1761 | /* No-op */ | |
1762 | break; | |
1763 | ||
1764 | case RX_E_CLEANUP_DONE: | |
1765 | bfa_fsm_set_state(rx, bna_rx_sm_stopped); | |
1766 | break; | |
1767 | ||
1768 | default: | |
1769 | bfa_sm_fault(event); | |
1770 | break; | |
1771 | } } | |
1772 | ||
1773 | static void | |
1774 | bna_rx_sm_quiesce_wait_entry(struct bna_rx *rx) | |
1775 | { | |
1776 | } | |
1777 | ||
1778 | static void | |
1779 | bna_rx_sm_quiesce_wait(struct bna_rx *rx, enum bna_rx_event event) | |
1780 | { | |
1781 | switch (event) { | |
1782 | case RX_E_STOP: | |
1783 | bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait); | |
1784 | break; | |
1785 | ||
1786 | case RX_E_FAIL: | |
1787 | bfa_fsm_set_state(rx, bna_rx_sm_failed); | |
1788 | break; | |
1789 | ||
1790 | case RX_E_CLEANUP_DONE: | |
1791 | bfa_fsm_set_state(rx, bna_rx_sm_start_wait); | |
1792 | break; | |
1793 | ||
1794 | default: | |
1795 | bfa_sm_fault(event); | |
1796 | break; | |
1797 | } | |
1798 | } | |
1799 | ||
1800 | static void | |
1801 | bna_bfi_rx_enet_start(struct bna_rx *rx) | |
1802 | { | |
1803 | struct bfi_enet_rx_cfg_req *cfg_req = &rx->bfi_enet_cmd.cfg_req; | |
1804 | struct bna_rxp *rxp = NULL; | |
1805 | struct bna_rxq *q0 = NULL, *q1 = NULL; | |
1806 | struct list_head *rxp_qe; | |
1807 | int i; | |
1808 | ||
1809 | bfi_msgq_mhdr_set(cfg_req->mh, BFI_MC_ENET, | |
1810 | BFI_ENET_H2I_RX_CFG_SET_REQ, 0, rx->rid); | |
1811 | cfg_req->mh.num_entries = htons( | |
1812 | bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rx_cfg_req))); | |
1813 | ||
e29aa339 | 1814 | cfg_req->rx_cfg.frame_size = bna_enet_mtu_get(&rx->bna->enet); |
f3bd5173 RM |
1815 | cfg_req->num_queue_sets = rx->num_paths; |
1816 | for (i = 0, rxp_qe = bfa_q_first(&rx->rxp_q); | |
1817 | i < rx->num_paths; | |
1818 | i++, rxp_qe = bfa_q_next(rxp_qe)) { | |
1819 | rxp = (struct bna_rxp *)rxp_qe; | |
1820 | ||
1821 | GET_RXQS(rxp, q0, q1); | |
1822 | switch (rxp->type) { | |
1823 | case BNA_RXP_SLR: | |
1824 | case BNA_RXP_HDS: | |
1825 | /* Small RxQ */ | |
1826 | bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].qs.q, | |
1827 | &q1->qpt); | |
1828 | cfg_req->q_cfg[i].qs.rx_buffer_size = | |
1829 | htons((u16)q1->buffer_size); | |
1830 | /* Fall through */ | |
1831 | ||
1832 | case BNA_RXP_SINGLE: | |
1833 | /* Large/Single RxQ */ | |
1834 | bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].ql.q, | |
1835 | &q0->qpt); | |
e29aa339 RM |
1836 | if (q0->multi_buffer) |
1837 | /* multi-buffer is enabled by allocating | |
1838 | * a new rx with new set of resources. | |
1839 | * q0->buffer_size should be initialized to | |
1840 | * fragment size. | |
1841 | */ | |
1842 | cfg_req->rx_cfg.multi_buffer = | |
1843 | BNA_STATUS_T_ENABLED; | |
1844 | else | |
1845 | q0->buffer_size = | |
1846 | bna_enet_mtu_get(&rx->bna->enet); | |
f3bd5173 RM |
1847 | cfg_req->q_cfg[i].ql.rx_buffer_size = |
1848 | htons((u16)q0->buffer_size); | |
1849 | break; | |
1850 | ||
1851 | default: | |
1852 | BUG_ON(1); | |
1853 | } | |
1854 | ||
1855 | bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].cq.q, | |
1856 | &rxp->cq.qpt); | |
1857 | ||
1858 | cfg_req->q_cfg[i].ib.index_addr.a32.addr_lo = | |
1859 | rxp->cq.ib.ib_seg_host_addr.lsb; | |
1860 | cfg_req->q_cfg[i].ib.index_addr.a32.addr_hi = | |
1861 | rxp->cq.ib.ib_seg_host_addr.msb; | |
1862 | cfg_req->q_cfg[i].ib.intr.msix_index = | |
1863 | htons((u16)rxp->cq.ib.intr_vector); | |
1864 | } | |
1865 | ||
1866 | cfg_req->ib_cfg.int_pkt_dma = BNA_STATUS_T_DISABLED; | |
1867 | cfg_req->ib_cfg.int_enabled = BNA_STATUS_T_ENABLED; | |
1868 | cfg_req->ib_cfg.int_pkt_enabled = BNA_STATUS_T_DISABLED; | |
1869 | cfg_req->ib_cfg.continuous_coalescing = BNA_STATUS_T_DISABLED; | |
1870 | cfg_req->ib_cfg.msix = (rxp->cq.ib.intr_type == BNA_INTR_T_MSIX) | |
1871 | ? BNA_STATUS_T_ENABLED : | |
1872 | BNA_STATUS_T_DISABLED; | |
1873 | cfg_req->ib_cfg.coalescing_timeout = | |
1874 | htonl((u32)rxp->cq.ib.coalescing_timeo); | |
1875 | cfg_req->ib_cfg.inter_pkt_timeout = | |
1876 | htonl((u32)rxp->cq.ib.interpkt_timeo); | |
1877 | cfg_req->ib_cfg.inter_pkt_count = (u8)rxp->cq.ib.interpkt_count; | |
1878 | ||
1879 | switch (rxp->type) { | |
1880 | case BNA_RXP_SLR: | |
1881 | cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_LARGE_SMALL; | |
1882 | break; | |
1883 | ||
1884 | case BNA_RXP_HDS: | |
1885 | cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_HDS; | |
1886 | cfg_req->rx_cfg.hds.type = rx->hds_cfg.hdr_type; | |
1887 | cfg_req->rx_cfg.hds.force_offset = rx->hds_cfg.forced_offset; | |
1888 | cfg_req->rx_cfg.hds.max_header_size = rx->hds_cfg.forced_offset; | |
1889 | break; | |
1890 | ||
1891 | case BNA_RXP_SINGLE: | |
1892 | cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_SINGLE; | |
1893 | break; | |
1894 | ||
1895 | default: | |
1896 | BUG_ON(1); | |
1897 | } | |
1898 | cfg_req->rx_cfg.strip_vlan = rx->rxf.vlan_strip_status; | |
1899 | ||
1900 | bfa_msgq_cmd_set(&rx->msgq_cmd, NULL, NULL, | |
1901 | sizeof(struct bfi_enet_rx_cfg_req), &cfg_req->mh); | |
1902 | bfa_msgq_cmd_post(&rx->bna->msgq, &rx->msgq_cmd); | |
1903 | } | |
1904 | ||
1905 | static void | |
1906 | bna_bfi_rx_enet_stop(struct bna_rx *rx) | |
1907 | { | |
1908 | struct bfi_enet_req *req = &rx->bfi_enet_cmd.req; | |
1909 | ||
1910 | bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, | |
1911 | BFI_ENET_H2I_RX_CFG_CLR_REQ, 0, rx->rid); | |
1912 | req->mh.num_entries = htons( | |
1913 | bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_req))); | |
1914 | bfa_msgq_cmd_set(&rx->msgq_cmd, NULL, NULL, sizeof(struct bfi_enet_req), | |
1915 | &req->mh); | |
1916 | bfa_msgq_cmd_post(&rx->bna->msgq, &rx->msgq_cmd); | |
1917 | } | |
1918 | ||
1919 | static void | |
1920 | bna_rx_enet_stop(struct bna_rx *rx) | |
1921 | { | |
1922 | struct bna_rxp *rxp; | |
1923 | struct list_head *qe_rxp; | |
1924 | ||
1925 | /* Stop IB */ | |
1926 | list_for_each(qe_rxp, &rx->rxp_q) { | |
1927 | rxp = (struct bna_rxp *)qe_rxp; | |
1928 | bna_ib_stop(rx->bna, &rxp->cq.ib); | |
1929 | } | |
1930 | ||
1931 | bna_bfi_rx_enet_stop(rx); | |
1932 | } | |
1933 | ||
1934 | static int | |
1935 | bna_rx_res_check(struct bna_rx_mod *rx_mod, struct bna_rx_config *rx_cfg) | |
1936 | { | |
1937 | if ((rx_mod->rx_free_count == 0) || | |
1938 | (rx_mod->rxp_free_count == 0) || | |
1939 | (rx_mod->rxq_free_count == 0)) | |
1940 | return 0; | |
1941 | ||
1942 | if (rx_cfg->rxp_type == BNA_RXP_SINGLE) { | |
1943 | if ((rx_mod->rxp_free_count < rx_cfg->num_paths) || | |
1944 | (rx_mod->rxq_free_count < rx_cfg->num_paths)) | |
1945 | return 0; | |
1946 | } else { | |
1947 | if ((rx_mod->rxp_free_count < rx_cfg->num_paths) || | |
1948 | (rx_mod->rxq_free_count < (2 * rx_cfg->num_paths))) | |
1949 | return 0; | |
1950 | } | |
1951 | ||
1952 | return 1; | |
1953 | } | |
1954 | ||
1955 | static struct bna_rxq * | |
1956 | bna_rxq_get(struct bna_rx_mod *rx_mod) | |
1957 | { | |
1958 | struct bna_rxq *rxq = NULL; | |
1959 | struct list_head *qe = NULL; | |
1960 | ||
1961 | bfa_q_deq(&rx_mod->rxq_free_q, &qe); | |
1962 | rx_mod->rxq_free_count--; | |
1963 | rxq = (struct bna_rxq *)qe; | |
1964 | bfa_q_qe_init(&rxq->qe); | |
1965 | ||
1966 | return rxq; | |
1967 | } | |
1968 | ||
1969 | static void | |
1970 | bna_rxq_put(struct bna_rx_mod *rx_mod, struct bna_rxq *rxq) | |
1971 | { | |
1972 | bfa_q_qe_init(&rxq->qe); | |
1973 | list_add_tail(&rxq->qe, &rx_mod->rxq_free_q); | |
1974 | rx_mod->rxq_free_count++; | |
1975 | } | |
1976 | ||
1977 | static struct bna_rxp * | |
1978 | bna_rxp_get(struct bna_rx_mod *rx_mod) | |
1979 | { | |
1980 | struct list_head *qe = NULL; | |
1981 | struct bna_rxp *rxp = NULL; | |
1982 | ||
1983 | bfa_q_deq(&rx_mod->rxp_free_q, &qe); | |
1984 | rx_mod->rxp_free_count--; | |
1985 | rxp = (struct bna_rxp *)qe; | |
1986 | bfa_q_qe_init(&rxp->qe); | |
1987 | ||
1988 | return rxp; | |
1989 | } | |
1990 | ||
1991 | static void | |
1992 | bna_rxp_put(struct bna_rx_mod *rx_mod, struct bna_rxp *rxp) | |
1993 | { | |
1994 | bfa_q_qe_init(&rxp->qe); | |
1995 | list_add_tail(&rxp->qe, &rx_mod->rxp_free_q); | |
1996 | rx_mod->rxp_free_count++; | |
1997 | } | |
1998 | ||
1999 | static struct bna_rx * | |
2000 | bna_rx_get(struct bna_rx_mod *rx_mod, enum bna_rx_type type) | |
2001 | { | |
2002 | struct list_head *qe = NULL; | |
2003 | struct bna_rx *rx = NULL; | |
2004 | ||
2005 | if (type == BNA_RX_T_REGULAR) { | |
2006 | bfa_q_deq(&rx_mod->rx_free_q, &qe); | |
2007 | } else | |
2008 | bfa_q_deq_tail(&rx_mod->rx_free_q, &qe); | |
2009 | ||
2010 | rx_mod->rx_free_count--; | |
2011 | rx = (struct bna_rx *)qe; | |
2012 | bfa_q_qe_init(&rx->qe); | |
2013 | list_add_tail(&rx->qe, &rx_mod->rx_active_q); | |
2014 | rx->type = type; | |
2015 | ||
2016 | return rx; | |
2017 | } | |
2018 | ||
2019 | static void | |
2020 | bna_rx_put(struct bna_rx_mod *rx_mod, struct bna_rx *rx) | |
2021 | { | |
2022 | struct list_head *prev_qe = NULL; | |
2023 | struct list_head *qe; | |
2024 | ||
2025 | bfa_q_qe_init(&rx->qe); | |
2026 | ||
2027 | list_for_each(qe, &rx_mod->rx_free_q) { | |
2028 | if (((struct bna_rx *)qe)->rid < rx->rid) | |
2029 | prev_qe = qe; | |
2030 | else | |
2031 | break; | |
2032 | } | |
2033 | ||
2034 | if (prev_qe == NULL) { | |
2035 | /* This is the first entry */ | |
2036 | bfa_q_enq_head(&rx_mod->rx_free_q, &rx->qe); | |
2037 | } else if (bfa_q_next(prev_qe) == &rx_mod->rx_free_q) { | |
2038 | /* This is the last entry */ | |
2039 | list_add_tail(&rx->qe, &rx_mod->rx_free_q); | |
2040 | } else { | |
2041 | /* Somewhere in the middle */ | |
2042 | bfa_q_next(&rx->qe) = bfa_q_next(prev_qe); | |
2043 | bfa_q_prev(&rx->qe) = prev_qe; | |
2044 | bfa_q_next(prev_qe) = &rx->qe; | |
2045 | bfa_q_prev(bfa_q_next(&rx->qe)) = &rx->qe; | |
2046 | } | |
2047 | ||
2048 | rx_mod->rx_free_count++; | |
2049 | } | |
2050 | ||
2051 | static void | |
2052 | bna_rxp_add_rxqs(struct bna_rxp *rxp, struct bna_rxq *q0, | |
2053 | struct bna_rxq *q1) | |
2054 | { | |
2055 | switch (rxp->type) { | |
2056 | case BNA_RXP_SINGLE: | |
2057 | rxp->rxq.single.only = q0; | |
2058 | rxp->rxq.single.reserved = NULL; | |
2059 | break; | |
2060 | case BNA_RXP_SLR: | |
2061 | rxp->rxq.slr.large = q0; | |
2062 | rxp->rxq.slr.small = q1; | |
2063 | break; | |
2064 | case BNA_RXP_HDS: | |
2065 | rxp->rxq.hds.data = q0; | |
2066 | rxp->rxq.hds.hdr = q1; | |
2067 | break; | |
2068 | default: | |
2069 | break; | |
2070 | } | |
2071 | } | |
2072 | ||
2073 | static void | |
2074 | bna_rxq_qpt_setup(struct bna_rxq *rxq, | |
2075 | struct bna_rxp *rxp, | |
2076 | u32 page_count, | |
2077 | u32 page_size, | |
2078 | struct bna_mem_descr *qpt_mem, | |
2079 | struct bna_mem_descr *swqpt_mem, | |
2080 | struct bna_mem_descr *page_mem) | |
2081 | { | |
5216562a RM |
2082 | u8 *kva; |
2083 | u64 dma; | |
2084 | struct bna_dma_addr bna_dma; | |
f3bd5173 RM |
2085 | int i; |
2086 | ||
2087 | rxq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb; | |
2088 | rxq->qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb; | |
2089 | rxq->qpt.kv_qpt_ptr = qpt_mem->kva; | |
2090 | rxq->qpt.page_count = page_count; | |
2091 | rxq->qpt.page_size = page_size; | |
2092 | ||
2093 | rxq->rcb->sw_qpt = (void **) swqpt_mem->kva; | |
5216562a RM |
2094 | rxq->rcb->sw_q = page_mem->kva; |
2095 | ||
2096 | kva = page_mem->kva; | |
2097 | BNA_GET_DMA_ADDR(&page_mem->dma, dma); | |
f3bd5173 RM |
2098 | |
2099 | for (i = 0; i < rxq->qpt.page_count; i++) { | |
5216562a RM |
2100 | rxq->rcb->sw_qpt[i] = kva; |
2101 | kva += PAGE_SIZE; | |
2102 | ||
2103 | BNA_SET_DMA_ADDR(dma, &bna_dma); | |
f3bd5173 | 2104 | ((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].lsb = |
5216562a | 2105 | bna_dma.lsb; |
f3bd5173 | 2106 | ((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].msb = |
5216562a RM |
2107 | bna_dma.msb; |
2108 | dma += PAGE_SIZE; | |
f3bd5173 RM |
2109 | } |
2110 | } | |
2111 | ||
2112 | static void | |
2113 | bna_rxp_cqpt_setup(struct bna_rxp *rxp, | |
2114 | u32 page_count, | |
2115 | u32 page_size, | |
2116 | struct bna_mem_descr *qpt_mem, | |
2117 | struct bna_mem_descr *swqpt_mem, | |
2118 | struct bna_mem_descr *page_mem) | |
2119 | { | |
5216562a RM |
2120 | u8 *kva; |
2121 | u64 dma; | |
2122 | struct bna_dma_addr bna_dma; | |
f3bd5173 RM |
2123 | int i; |
2124 | ||
2125 | rxp->cq.qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb; | |
2126 | rxp->cq.qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb; | |
2127 | rxp->cq.qpt.kv_qpt_ptr = qpt_mem->kva; | |
2128 | rxp->cq.qpt.page_count = page_count; | |
2129 | rxp->cq.qpt.page_size = page_size; | |
2130 | ||
2131 | rxp->cq.ccb->sw_qpt = (void **) swqpt_mem->kva; | |
5216562a RM |
2132 | rxp->cq.ccb->sw_q = page_mem->kva; |
2133 | ||
2134 | kva = page_mem->kva; | |
2135 | BNA_GET_DMA_ADDR(&page_mem->dma, dma); | |
f3bd5173 RM |
2136 | |
2137 | for (i = 0; i < rxp->cq.qpt.page_count; i++) { | |
5216562a RM |
2138 | rxp->cq.ccb->sw_qpt[i] = kva; |
2139 | kva += PAGE_SIZE; | |
f3bd5173 | 2140 | |
5216562a | 2141 | BNA_SET_DMA_ADDR(dma, &bna_dma); |
f3bd5173 | 2142 | ((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].lsb = |
5216562a | 2143 | bna_dma.lsb; |
f3bd5173 | 2144 | ((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].msb = |
5216562a RM |
2145 | bna_dma.msb; |
2146 | dma += PAGE_SIZE; | |
f3bd5173 RM |
2147 | } |
2148 | } | |
2149 | ||
2150 | static void | |
2151 | bna_rx_mod_cb_rx_stopped(void *arg, struct bna_rx *rx) | |
2152 | { | |
2153 | struct bna_rx_mod *rx_mod = (struct bna_rx_mod *)arg; | |
2154 | ||
2155 | bfa_wc_down(&rx_mod->rx_stop_wc); | |
2156 | } | |
2157 | ||
2158 | static void | |
2159 | bna_rx_mod_cb_rx_stopped_all(void *arg) | |
2160 | { | |
2161 | struct bna_rx_mod *rx_mod = (struct bna_rx_mod *)arg; | |
2162 | ||
2163 | if (rx_mod->stop_cbfn) | |
2164 | rx_mod->stop_cbfn(&rx_mod->bna->enet); | |
2165 | rx_mod->stop_cbfn = NULL; | |
2166 | } | |
2167 | ||
2168 | static void | |
2169 | bna_rx_start(struct bna_rx *rx) | |
2170 | { | |
2171 | rx->rx_flags |= BNA_RX_F_ENET_STARTED; | |
2172 | if (rx->rx_flags & BNA_RX_F_ENABLED) | |
2173 | bfa_fsm_send_event(rx, RX_E_START); | |
2174 | } | |
2175 | ||
2176 | static void | |
2177 | bna_rx_stop(struct bna_rx *rx) | |
2178 | { | |
2179 | rx->rx_flags &= ~BNA_RX_F_ENET_STARTED; | |
2180 | if (rx->fsm == (bfa_fsm_t) bna_rx_sm_stopped) | |
2181 | bna_rx_mod_cb_rx_stopped(&rx->bna->rx_mod, rx); | |
2182 | else { | |
2183 | rx->stop_cbfn = bna_rx_mod_cb_rx_stopped; | |
2184 | rx->stop_cbarg = &rx->bna->rx_mod; | |
2185 | bfa_fsm_send_event(rx, RX_E_STOP); | |
2186 | } | |
2187 | } | |
2188 | ||
2189 | static void | |
2190 | bna_rx_fail(struct bna_rx *rx) | |
2191 | { | |
2192 | /* Indicate Enet is not enabled, and failed */ | |
2193 | rx->rx_flags &= ~BNA_RX_F_ENET_STARTED; | |
2194 | bfa_fsm_send_event(rx, RX_E_FAIL); | |
2195 | } | |
2196 | ||
2197 | void | |
2198 | bna_rx_mod_start(struct bna_rx_mod *rx_mod, enum bna_rx_type type) | |
2199 | { | |
2200 | struct bna_rx *rx; | |
2201 | struct list_head *qe; | |
2202 | ||
2203 | rx_mod->flags |= BNA_RX_MOD_F_ENET_STARTED; | |
2204 | if (type == BNA_RX_T_LOOPBACK) | |
2205 | rx_mod->flags |= BNA_RX_MOD_F_ENET_LOOPBACK; | |
2206 | ||
2207 | list_for_each(qe, &rx_mod->rx_active_q) { | |
2208 | rx = (struct bna_rx *)qe; | |
2209 | if (rx->type == type) | |
2210 | bna_rx_start(rx); | |
2211 | } | |
2212 | } | |
2213 | ||
2214 | void | |
2215 | bna_rx_mod_stop(struct bna_rx_mod *rx_mod, enum bna_rx_type type) | |
2216 | { | |
2217 | struct bna_rx *rx; | |
2218 | struct list_head *qe; | |
2219 | ||
2220 | rx_mod->flags &= ~BNA_RX_MOD_F_ENET_STARTED; | |
2221 | rx_mod->flags &= ~BNA_RX_MOD_F_ENET_LOOPBACK; | |
2222 | ||
2223 | rx_mod->stop_cbfn = bna_enet_cb_rx_stopped; | |
2224 | ||
2225 | bfa_wc_init(&rx_mod->rx_stop_wc, bna_rx_mod_cb_rx_stopped_all, rx_mod); | |
2226 | ||
2227 | list_for_each(qe, &rx_mod->rx_active_q) { | |
2228 | rx = (struct bna_rx *)qe; | |
2229 | if (rx->type == type) { | |
2230 | bfa_wc_up(&rx_mod->rx_stop_wc); | |
2231 | bna_rx_stop(rx); | |
2232 | } | |
2233 | } | |
2234 | ||
2235 | bfa_wc_wait(&rx_mod->rx_stop_wc); | |
2236 | } | |
2237 | ||
2238 | void | |
2239 | bna_rx_mod_fail(struct bna_rx_mod *rx_mod) | |
2240 | { | |
2241 | struct bna_rx *rx; | |
2242 | struct list_head *qe; | |
2243 | ||
2244 | rx_mod->flags &= ~BNA_RX_MOD_F_ENET_STARTED; | |
2245 | rx_mod->flags &= ~BNA_RX_MOD_F_ENET_LOOPBACK; | |
2246 | ||
2247 | list_for_each(qe, &rx_mod->rx_active_q) { | |
2248 | rx = (struct bna_rx *)qe; | |
2249 | bna_rx_fail(rx); | |
2250 | } | |
2251 | } | |
2252 | ||
2253 | void bna_rx_mod_init(struct bna_rx_mod *rx_mod, struct bna *bna, | |
2254 | struct bna_res_info *res_info) | |
2255 | { | |
2256 | int index; | |
2257 | struct bna_rx *rx_ptr; | |
2258 | struct bna_rxp *rxp_ptr; | |
2259 | struct bna_rxq *rxq_ptr; | |
2260 | ||
2261 | rx_mod->bna = bna; | |
2262 | rx_mod->flags = 0; | |
2263 | ||
2264 | rx_mod->rx = (struct bna_rx *) | |
2265 | res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.mdl[0].kva; | |
2266 | rx_mod->rxp = (struct bna_rxp *) | |
2267 | res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.mdl[0].kva; | |
2268 | rx_mod->rxq = (struct bna_rxq *) | |
2269 | res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.mdl[0].kva; | |
2270 | ||
2271 | /* Initialize the queues */ | |
2272 | INIT_LIST_HEAD(&rx_mod->rx_free_q); | |
2273 | rx_mod->rx_free_count = 0; | |
2274 | INIT_LIST_HEAD(&rx_mod->rxq_free_q); | |
2275 | rx_mod->rxq_free_count = 0; | |
2276 | INIT_LIST_HEAD(&rx_mod->rxp_free_q); | |
2277 | rx_mod->rxp_free_count = 0; | |
2278 | INIT_LIST_HEAD(&rx_mod->rx_active_q); | |
2279 | ||
2280 | /* Build RX queues */ | |
2281 | for (index = 0; index < bna->ioceth.attr.num_rxp; index++) { | |
2282 | rx_ptr = &rx_mod->rx[index]; | |
2283 | ||
2284 | bfa_q_qe_init(&rx_ptr->qe); | |
2285 | INIT_LIST_HEAD(&rx_ptr->rxp_q); | |
2286 | rx_ptr->bna = NULL; | |
2287 | rx_ptr->rid = index; | |
2288 | rx_ptr->stop_cbfn = NULL; | |
2289 | rx_ptr->stop_cbarg = NULL; | |
2290 | ||
2291 | list_add_tail(&rx_ptr->qe, &rx_mod->rx_free_q); | |
2292 | rx_mod->rx_free_count++; | |
2293 | } | |
2294 | ||
2295 | /* build RX-path queue */ | |
2296 | for (index = 0; index < bna->ioceth.attr.num_rxp; index++) { | |
2297 | rxp_ptr = &rx_mod->rxp[index]; | |
2298 | bfa_q_qe_init(&rxp_ptr->qe); | |
2299 | list_add_tail(&rxp_ptr->qe, &rx_mod->rxp_free_q); | |
2300 | rx_mod->rxp_free_count++; | |
2301 | } | |
2302 | ||
2303 | /* build RXQ queue */ | |
2304 | for (index = 0; index < (bna->ioceth.attr.num_rxp * 2); index++) { | |
2305 | rxq_ptr = &rx_mod->rxq[index]; | |
2306 | bfa_q_qe_init(&rxq_ptr->qe); | |
2307 | list_add_tail(&rxq_ptr->qe, &rx_mod->rxq_free_q); | |
2308 | rx_mod->rxq_free_count++; | |
2309 | } | |
2310 | } | |
2311 | ||
2312 | void | |
2313 | bna_rx_mod_uninit(struct bna_rx_mod *rx_mod) | |
2314 | { | |
2315 | struct list_head *qe; | |
2316 | int i; | |
2317 | ||
2318 | i = 0; | |
2319 | list_for_each(qe, &rx_mod->rx_free_q) | |
2320 | i++; | |
2321 | ||
2322 | i = 0; | |
2323 | list_for_each(qe, &rx_mod->rxp_free_q) | |
2324 | i++; | |
2325 | ||
2326 | i = 0; | |
2327 | list_for_each(qe, &rx_mod->rxq_free_q) | |
2328 | i++; | |
2329 | ||
2330 | rx_mod->bna = NULL; | |
2331 | } | |
2332 | ||
2333 | void | |
2334 | bna_bfi_rx_enet_start_rsp(struct bna_rx *rx, struct bfi_msgq_mhdr *msghdr) | |
2335 | { | |
2336 | struct bfi_enet_rx_cfg_rsp *cfg_rsp = &rx->bfi_enet_cmd.cfg_rsp; | |
2337 | struct bna_rxp *rxp = NULL; | |
2338 | struct bna_rxq *q0 = NULL, *q1 = NULL; | |
2339 | struct list_head *rxp_qe; | |
2340 | int i; | |
2341 | ||
2342 | bfa_msgq_rsp_copy(&rx->bna->msgq, (u8 *)cfg_rsp, | |
2343 | sizeof(struct bfi_enet_rx_cfg_rsp)); | |
2344 | ||
2345 | rx->hw_id = cfg_rsp->hw_id; | |
2346 | ||
2347 | for (i = 0, rxp_qe = bfa_q_first(&rx->rxp_q); | |
2348 | i < rx->num_paths; | |
2349 | i++, rxp_qe = bfa_q_next(rxp_qe)) { | |
2350 | rxp = (struct bna_rxp *)rxp_qe; | |
2351 | GET_RXQS(rxp, q0, q1); | |
2352 | ||
2353 | /* Setup doorbells */ | |
2354 | rxp->cq.ccb->i_dbell->doorbell_addr = | |
2355 | rx->bna->pcidev.pci_bar_kva | |
2356 | + ntohl(cfg_rsp->q_handles[i].i_dbell); | |
2357 | rxp->hw_id = cfg_rsp->q_handles[i].hw_cqid; | |
2358 | q0->rcb->q_dbell = | |
2359 | rx->bna->pcidev.pci_bar_kva | |
2360 | + ntohl(cfg_rsp->q_handles[i].ql_dbell); | |
2361 | q0->hw_id = cfg_rsp->q_handles[i].hw_lqid; | |
2362 | if (q1) { | |
2363 | q1->rcb->q_dbell = | |
2364 | rx->bna->pcidev.pci_bar_kva | |
2365 | + ntohl(cfg_rsp->q_handles[i].qs_dbell); | |
2366 | q1->hw_id = cfg_rsp->q_handles[i].hw_sqid; | |
2367 | } | |
2368 | ||
2369 | /* Initialize producer/consumer indexes */ | |
2370 | (*rxp->cq.ccb->hw_producer_index) = 0; | |
2371 | rxp->cq.ccb->producer_index = 0; | |
2372 | q0->rcb->producer_index = q0->rcb->consumer_index = 0; | |
2373 | if (q1) | |
2374 | q1->rcb->producer_index = q1->rcb->consumer_index = 0; | |
2375 | } | |
2376 | ||
2377 | bfa_fsm_send_event(rx, RX_E_STARTED); | |
2378 | } | |
2379 | ||
2380 | void | |
2381 | bna_bfi_rx_enet_stop_rsp(struct bna_rx *rx, struct bfi_msgq_mhdr *msghdr) | |
2382 | { | |
2383 | bfa_fsm_send_event(rx, RX_E_STOPPED); | |
2384 | } | |
2385 | ||
2386 | void | |
2387 | bna_rx_res_req(struct bna_rx_config *q_cfg, struct bna_res_info *res_info) | |
2388 | { | |
2389 | u32 cq_size, hq_size, dq_size; | |
2390 | u32 cpage_count, hpage_count, dpage_count; | |
2391 | struct bna_mem_info *mem_info; | |
2392 | u32 cq_depth; | |
2393 | u32 hq_depth; | |
2394 | u32 dq_depth; | |
2395 | ||
e29aa339 RM |
2396 | dq_depth = q_cfg->q0_depth; |
2397 | hq_depth = ((q_cfg->rxp_type == BNA_RXP_SINGLE) ? 0 : q_cfg->q1_depth); | |
f3bd5173 RM |
2398 | cq_depth = dq_depth + hq_depth; |
2399 | ||
2400 | BNA_TO_POWER_OF_2_HIGH(cq_depth); | |
2401 | cq_size = cq_depth * BFI_CQ_WI_SIZE; | |
2402 | cq_size = ALIGN(cq_size, PAGE_SIZE); | |
2403 | cpage_count = SIZE_TO_PAGES(cq_size); | |
2404 | ||
2405 | BNA_TO_POWER_OF_2_HIGH(dq_depth); | |
2406 | dq_size = dq_depth * BFI_RXQ_WI_SIZE; | |
2407 | dq_size = ALIGN(dq_size, PAGE_SIZE); | |
2408 | dpage_count = SIZE_TO_PAGES(dq_size); | |
2409 | ||
2410 | if (BNA_RXP_SINGLE != q_cfg->rxp_type) { | |
2411 | BNA_TO_POWER_OF_2_HIGH(hq_depth); | |
2412 | hq_size = hq_depth * BFI_RXQ_WI_SIZE; | |
2413 | hq_size = ALIGN(hq_size, PAGE_SIZE); | |
2414 | hpage_count = SIZE_TO_PAGES(hq_size); | |
2415 | } else | |
2416 | hpage_count = 0; | |
2417 | ||
2418 | res_info[BNA_RX_RES_MEM_T_CCB].res_type = BNA_RES_T_MEM; | |
2419 | mem_info = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info; | |
2420 | mem_info->mem_type = BNA_MEM_T_KVA; | |
2421 | mem_info->len = sizeof(struct bna_ccb); | |
2422 | mem_info->num = q_cfg->num_paths; | |
2423 | ||
2424 | res_info[BNA_RX_RES_MEM_T_RCB].res_type = BNA_RES_T_MEM; | |
2425 | mem_info = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info; | |
2426 | mem_info->mem_type = BNA_MEM_T_KVA; | |
2427 | mem_info->len = sizeof(struct bna_rcb); | |
2428 | mem_info->num = BNA_GET_RXQS(q_cfg); | |
2429 | ||
2430 | res_info[BNA_RX_RES_MEM_T_CQPT].res_type = BNA_RES_T_MEM; | |
2431 | mem_info = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info; | |
2432 | mem_info->mem_type = BNA_MEM_T_DMA; | |
2433 | mem_info->len = cpage_count * sizeof(struct bna_dma_addr); | |
2434 | mem_info->num = q_cfg->num_paths; | |
2435 | ||
2436 | res_info[BNA_RX_RES_MEM_T_CSWQPT].res_type = BNA_RES_T_MEM; | |
2437 | mem_info = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info; | |
2438 | mem_info->mem_type = BNA_MEM_T_KVA; | |
2439 | mem_info->len = cpage_count * sizeof(void *); | |
2440 | mem_info->num = q_cfg->num_paths; | |
2441 | ||
2442 | res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_type = BNA_RES_T_MEM; | |
2443 | mem_info = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info; | |
2444 | mem_info->mem_type = BNA_MEM_T_DMA; | |
5216562a RM |
2445 | mem_info->len = PAGE_SIZE * cpage_count; |
2446 | mem_info->num = q_cfg->num_paths; | |
f3bd5173 RM |
2447 | |
2448 | res_info[BNA_RX_RES_MEM_T_DQPT].res_type = BNA_RES_T_MEM; | |
2449 | mem_info = &res_info[BNA_RX_RES_MEM_T_DQPT].res_u.mem_info; | |
2450 | mem_info->mem_type = BNA_MEM_T_DMA; | |
2451 | mem_info->len = dpage_count * sizeof(struct bna_dma_addr); | |
2452 | mem_info->num = q_cfg->num_paths; | |
2453 | ||
2454 | res_info[BNA_RX_RES_MEM_T_DSWQPT].res_type = BNA_RES_T_MEM; | |
2455 | mem_info = &res_info[BNA_RX_RES_MEM_T_DSWQPT].res_u.mem_info; | |
2456 | mem_info->mem_type = BNA_MEM_T_KVA; | |
2457 | mem_info->len = dpage_count * sizeof(void *); | |
2458 | mem_info->num = q_cfg->num_paths; | |
2459 | ||
2460 | res_info[BNA_RX_RES_MEM_T_DPAGE].res_type = BNA_RES_T_MEM; | |
2461 | mem_info = &res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info; | |
2462 | mem_info->mem_type = BNA_MEM_T_DMA; | |
5216562a RM |
2463 | mem_info->len = PAGE_SIZE * dpage_count; |
2464 | mem_info->num = q_cfg->num_paths; | |
f3bd5173 RM |
2465 | |
2466 | res_info[BNA_RX_RES_MEM_T_HQPT].res_type = BNA_RES_T_MEM; | |
2467 | mem_info = &res_info[BNA_RX_RES_MEM_T_HQPT].res_u.mem_info; | |
2468 | mem_info->mem_type = BNA_MEM_T_DMA; | |
2469 | mem_info->len = hpage_count * sizeof(struct bna_dma_addr); | |
2470 | mem_info->num = (hpage_count ? q_cfg->num_paths : 0); | |
2471 | ||
2472 | res_info[BNA_RX_RES_MEM_T_HSWQPT].res_type = BNA_RES_T_MEM; | |
2473 | mem_info = &res_info[BNA_RX_RES_MEM_T_HSWQPT].res_u.mem_info; | |
2474 | mem_info->mem_type = BNA_MEM_T_KVA; | |
2475 | mem_info->len = hpage_count * sizeof(void *); | |
2476 | mem_info->num = (hpage_count ? q_cfg->num_paths : 0); | |
2477 | ||
2478 | res_info[BNA_RX_RES_MEM_T_HPAGE].res_type = BNA_RES_T_MEM; | |
2479 | mem_info = &res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info; | |
2480 | mem_info->mem_type = BNA_MEM_T_DMA; | |
5216562a RM |
2481 | mem_info->len = PAGE_SIZE * hpage_count; |
2482 | mem_info->num = (hpage_count ? q_cfg->num_paths : 0); | |
f3bd5173 RM |
2483 | |
2484 | res_info[BNA_RX_RES_MEM_T_IBIDX].res_type = BNA_RES_T_MEM; | |
2485 | mem_info = &res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info; | |
2486 | mem_info->mem_type = BNA_MEM_T_DMA; | |
2487 | mem_info->len = BFI_IBIDX_SIZE; | |
2488 | mem_info->num = q_cfg->num_paths; | |
2489 | ||
2490 | res_info[BNA_RX_RES_MEM_T_RIT].res_type = BNA_RES_T_MEM; | |
2491 | mem_info = &res_info[BNA_RX_RES_MEM_T_RIT].res_u.mem_info; | |
2492 | mem_info->mem_type = BNA_MEM_T_KVA; | |
2493 | mem_info->len = BFI_ENET_RSS_RIT_MAX; | |
2494 | mem_info->num = 1; | |
2495 | ||
2496 | res_info[BNA_RX_RES_T_INTR].res_type = BNA_RES_T_INTR; | |
2497 | res_info[BNA_RX_RES_T_INTR].res_u.intr_info.intr_type = BNA_INTR_T_MSIX; | |
2498 | res_info[BNA_RX_RES_T_INTR].res_u.intr_info.num = q_cfg->num_paths; | |
2499 | } | |
2500 | ||
2501 | struct bna_rx * | |
2502 | bna_rx_create(struct bna *bna, struct bnad *bnad, | |
2503 | struct bna_rx_config *rx_cfg, | |
d91d25d5 | 2504 | const struct bna_rx_event_cbfn *rx_cbfn, |
f3bd5173 RM |
2505 | struct bna_res_info *res_info, |
2506 | void *priv) | |
2507 | { | |
2508 | struct bna_rx_mod *rx_mod = &bna->rx_mod; | |
2509 | struct bna_rx *rx; | |
2510 | struct bna_rxp *rxp; | |
2511 | struct bna_rxq *q0; | |
2512 | struct bna_rxq *q1; | |
2513 | struct bna_intr_info *intr_info; | |
e29aa339 RM |
2514 | struct bna_mem_descr *hqunmap_mem; |
2515 | struct bna_mem_descr *dqunmap_mem; | |
f3bd5173 RM |
2516 | struct bna_mem_descr *ccb_mem; |
2517 | struct bna_mem_descr *rcb_mem; | |
f3bd5173 RM |
2518 | struct bna_mem_descr *cqpt_mem; |
2519 | struct bna_mem_descr *cswqpt_mem; | |
2520 | struct bna_mem_descr *cpage_mem; | |
2521 | struct bna_mem_descr *hqpt_mem; | |
2522 | struct bna_mem_descr *dqpt_mem; | |
2523 | struct bna_mem_descr *hsqpt_mem; | |
2524 | struct bna_mem_descr *dsqpt_mem; | |
2525 | struct bna_mem_descr *hpage_mem; | |
2526 | struct bna_mem_descr *dpage_mem; | |
e29aa339 RM |
2527 | u32 dpage_count, hpage_count; |
2528 | u32 hq_idx, dq_idx, rcb_idx; | |
2529 | u32 cq_depth, i; | |
2530 | u32 page_count; | |
f3bd5173 RM |
2531 | |
2532 | if (!bna_rx_res_check(rx_mod, rx_cfg)) | |
2533 | return NULL; | |
2534 | ||
2535 | intr_info = &res_info[BNA_RX_RES_T_INTR].res_u.intr_info; | |
2536 | ccb_mem = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info.mdl[0]; | |
2537 | rcb_mem = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info.mdl[0]; | |
e29aa339 RM |
2538 | dqunmap_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPDQ].res_u.mem_info.mdl[0]; |
2539 | hqunmap_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPHQ].res_u.mem_info.mdl[0]; | |
f3bd5173 RM |
2540 | cqpt_mem = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info.mdl[0]; |
2541 | cswqpt_mem = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info.mdl[0]; | |
2542 | cpage_mem = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.mdl[0]; | |
2543 | hqpt_mem = &res_info[BNA_RX_RES_MEM_T_HQPT].res_u.mem_info.mdl[0]; | |
2544 | dqpt_mem = &res_info[BNA_RX_RES_MEM_T_DQPT].res_u.mem_info.mdl[0]; | |
2545 | hsqpt_mem = &res_info[BNA_RX_RES_MEM_T_HSWQPT].res_u.mem_info.mdl[0]; | |
2546 | dsqpt_mem = &res_info[BNA_RX_RES_MEM_T_DSWQPT].res_u.mem_info.mdl[0]; | |
2547 | hpage_mem = &res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.mdl[0]; | |
2548 | dpage_mem = &res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.mdl[0]; | |
2549 | ||
5216562a RM |
2550 | page_count = res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.len / |
2551 | PAGE_SIZE; | |
f3bd5173 | 2552 | |
5216562a RM |
2553 | dpage_count = res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.len / |
2554 | PAGE_SIZE; | |
f3bd5173 | 2555 | |
5216562a RM |
2556 | hpage_count = res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.len / |
2557 | PAGE_SIZE; | |
f3bd5173 RM |
2558 | |
2559 | rx = bna_rx_get(rx_mod, rx_cfg->rx_type); | |
2560 | rx->bna = bna; | |
2561 | rx->rx_flags = 0; | |
2562 | INIT_LIST_HEAD(&rx->rxp_q); | |
2563 | rx->stop_cbfn = NULL; | |
2564 | rx->stop_cbarg = NULL; | |
2565 | rx->priv = priv; | |
2566 | ||
2567 | rx->rcb_setup_cbfn = rx_cbfn->rcb_setup_cbfn; | |
2568 | rx->rcb_destroy_cbfn = rx_cbfn->rcb_destroy_cbfn; | |
2569 | rx->ccb_setup_cbfn = rx_cbfn->ccb_setup_cbfn; | |
2570 | rx->ccb_destroy_cbfn = rx_cbfn->ccb_destroy_cbfn; | |
5bcf6ac0 | 2571 | rx->rx_stall_cbfn = rx_cbfn->rx_stall_cbfn; |
f3bd5173 RM |
2572 | /* Following callbacks are mandatory */ |
2573 | rx->rx_cleanup_cbfn = rx_cbfn->rx_cleanup_cbfn; | |
2574 | rx->rx_post_cbfn = rx_cbfn->rx_post_cbfn; | |
2575 | ||
2576 | if (rx->bna->rx_mod.flags & BNA_RX_MOD_F_ENET_STARTED) { | |
2577 | switch (rx->type) { | |
2578 | case BNA_RX_T_REGULAR: | |
2579 | if (!(rx->bna->rx_mod.flags & | |
2580 | BNA_RX_MOD_F_ENET_LOOPBACK)) | |
2581 | rx->rx_flags |= BNA_RX_F_ENET_STARTED; | |
2582 | break; | |
2583 | case BNA_RX_T_LOOPBACK: | |
2584 | if (rx->bna->rx_mod.flags & BNA_RX_MOD_F_ENET_LOOPBACK) | |
2585 | rx->rx_flags |= BNA_RX_F_ENET_STARTED; | |
2586 | break; | |
2587 | } | |
2588 | } | |
2589 | ||
2590 | rx->num_paths = rx_cfg->num_paths; | |
e29aa339 RM |
2591 | for (i = 0, hq_idx = 0, dq_idx = 0, rcb_idx = 0; |
2592 | i < rx->num_paths; i++) { | |
f3bd5173 RM |
2593 | rxp = bna_rxp_get(rx_mod); |
2594 | list_add_tail(&rxp->qe, &rx->rxp_q); | |
2595 | rxp->type = rx_cfg->rxp_type; | |
2596 | rxp->rx = rx; | |
2597 | rxp->cq.rx = rx; | |
2598 | ||
2599 | q0 = bna_rxq_get(rx_mod); | |
2600 | if (BNA_RXP_SINGLE == rx_cfg->rxp_type) | |
2601 | q1 = NULL; | |
2602 | else | |
2603 | q1 = bna_rxq_get(rx_mod); | |
2604 | ||
2605 | if (1 == intr_info->num) | |
2606 | rxp->vector = intr_info->idl[0].vector; | |
2607 | else | |
2608 | rxp->vector = intr_info->idl[i].vector; | |
2609 | ||
2610 | /* Setup IB */ | |
2611 | ||
2612 | rxp->cq.ib.ib_seg_host_addr.lsb = | |
2613 | res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.lsb; | |
2614 | rxp->cq.ib.ib_seg_host_addr.msb = | |
2615 | res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.msb; | |
2616 | rxp->cq.ib.ib_seg_host_addr_kva = | |
2617 | res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].kva; | |
2618 | rxp->cq.ib.intr_type = intr_info->intr_type; | |
2619 | if (intr_info->intr_type == BNA_INTR_T_MSIX) | |
2620 | rxp->cq.ib.intr_vector = rxp->vector; | |
2621 | else | |
2622 | rxp->cq.ib.intr_vector = (1 << rxp->vector); | |
2623 | rxp->cq.ib.coalescing_timeo = rx_cfg->coalescing_timeo; | |
2624 | rxp->cq.ib.interpkt_count = BFI_RX_INTERPKT_COUNT; | |
2625 | rxp->cq.ib.interpkt_timeo = BFI_RX_INTERPKT_TIMEO; | |
2626 | ||
2627 | bna_rxp_add_rxqs(rxp, q0, q1); | |
2628 | ||
2629 | /* Setup large Q */ | |
2630 | ||
2631 | q0->rx = rx; | |
2632 | q0->rxp = rxp; | |
2633 | ||
2634 | q0->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva; | |
e29aa339 RM |
2635 | q0->rcb->unmap_q = (void *)dqunmap_mem[dq_idx].kva; |
2636 | rcb_idx++; dq_idx++; | |
2637 | q0->rcb->q_depth = rx_cfg->q0_depth; | |
2638 | q0->q_depth = rx_cfg->q0_depth; | |
2639 | q0->multi_buffer = rx_cfg->q0_multi_buf; | |
2640 | q0->buffer_size = rx_cfg->q0_buf_size; | |
2641 | q0->num_vecs = rx_cfg->q0_num_vecs; | |
f3bd5173 RM |
2642 | q0->rcb->rxq = q0; |
2643 | q0->rcb->bnad = bna->bnad; | |
2644 | q0->rcb->id = 0; | |
2645 | q0->rx_packets = q0->rx_bytes = 0; | |
2646 | q0->rx_packets_with_error = q0->rxbuf_alloc_failed = 0; | |
2647 | ||
2648 | bna_rxq_qpt_setup(q0, rxp, dpage_count, PAGE_SIZE, | |
5216562a | 2649 | &dqpt_mem[i], &dsqpt_mem[i], &dpage_mem[i]); |
f3bd5173 RM |
2650 | |
2651 | if (rx->rcb_setup_cbfn) | |
2652 | rx->rcb_setup_cbfn(bnad, q0->rcb); | |
2653 | ||
2654 | /* Setup small Q */ | |
2655 | ||
2656 | if (q1) { | |
2657 | q1->rx = rx; | |
2658 | q1->rxp = rxp; | |
2659 | ||
2660 | q1->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva; | |
e29aa339 RM |
2661 | q1->rcb->unmap_q = (void *)hqunmap_mem[hq_idx].kva; |
2662 | rcb_idx++; hq_idx++; | |
2663 | q1->rcb->q_depth = rx_cfg->q1_depth; | |
2664 | q1->q_depth = rx_cfg->q1_depth; | |
2665 | q1->multi_buffer = BNA_STATUS_T_DISABLED; | |
2666 | q1->num_vecs = 1; | |
f3bd5173 RM |
2667 | q1->rcb->rxq = q1; |
2668 | q1->rcb->bnad = bna->bnad; | |
2669 | q1->rcb->id = 1; | |
2670 | q1->buffer_size = (rx_cfg->rxp_type == BNA_RXP_HDS) ? | |
2671 | rx_cfg->hds_config.forced_offset | |
e29aa339 | 2672 | : rx_cfg->q1_buf_size; |
f3bd5173 RM |
2673 | q1->rx_packets = q1->rx_bytes = 0; |
2674 | q1->rx_packets_with_error = q1->rxbuf_alloc_failed = 0; | |
2675 | ||
2676 | bna_rxq_qpt_setup(q1, rxp, hpage_count, PAGE_SIZE, | |
2677 | &hqpt_mem[i], &hsqpt_mem[i], | |
5216562a | 2678 | &hpage_mem[i]); |
f3bd5173 RM |
2679 | |
2680 | if (rx->rcb_setup_cbfn) | |
2681 | rx->rcb_setup_cbfn(bnad, q1->rcb); | |
2682 | } | |
2683 | ||
2684 | /* Setup CQ */ | |
2685 | ||
2686 | rxp->cq.ccb = (struct bna_ccb *) ccb_mem[i].kva; | |
e29aa339 RM |
2687 | cq_depth = rx_cfg->q0_depth + |
2688 | ((rx_cfg->rxp_type == BNA_RXP_SINGLE) ? | |
2689 | 0 : rx_cfg->q1_depth); | |
2690 | /* if multi-buffer is enabled sum of q0_depth | |
2691 | * and q1_depth need not be a power of 2 | |
2692 | */ | |
2693 | BNA_TO_POWER_OF_2_HIGH(cq_depth); | |
2694 | rxp->cq.ccb->q_depth = cq_depth; | |
f3bd5173 RM |
2695 | rxp->cq.ccb->cq = &rxp->cq; |
2696 | rxp->cq.ccb->rcb[0] = q0->rcb; | |
2697 | q0->rcb->ccb = rxp->cq.ccb; | |
2698 | if (q1) { | |
2699 | rxp->cq.ccb->rcb[1] = q1->rcb; | |
2700 | q1->rcb->ccb = rxp->cq.ccb; | |
2701 | } | |
2702 | rxp->cq.ccb->hw_producer_index = | |
2703 | (u32 *)rxp->cq.ib.ib_seg_host_addr_kva; | |
2704 | rxp->cq.ccb->i_dbell = &rxp->cq.ib.door_bell; | |
2705 | rxp->cq.ccb->intr_type = rxp->cq.ib.intr_type; | |
2706 | rxp->cq.ccb->intr_vector = rxp->cq.ib.intr_vector; | |
2707 | rxp->cq.ccb->rx_coalescing_timeo = | |
2708 | rxp->cq.ib.coalescing_timeo; | |
2709 | rxp->cq.ccb->pkt_rate.small_pkt_cnt = 0; | |
2710 | rxp->cq.ccb->pkt_rate.large_pkt_cnt = 0; | |
2711 | rxp->cq.ccb->bnad = bna->bnad; | |
2712 | rxp->cq.ccb->id = i; | |
2713 | ||
2714 | bna_rxp_cqpt_setup(rxp, page_count, PAGE_SIZE, | |
5216562a | 2715 | &cqpt_mem[i], &cswqpt_mem[i], &cpage_mem[i]); |
f3bd5173 RM |
2716 | |
2717 | if (rx->ccb_setup_cbfn) | |
2718 | rx->ccb_setup_cbfn(bnad, rxp->cq.ccb); | |
2719 | } | |
2720 | ||
2721 | rx->hds_cfg = rx_cfg->hds_config; | |
2722 | ||
2723 | bna_rxf_init(&rx->rxf, rx, rx_cfg, res_info); | |
2724 | ||
2725 | bfa_fsm_set_state(rx, bna_rx_sm_stopped); | |
2726 | ||
2727 | rx_mod->rid_mask |= (1 << rx->rid); | |
2728 | ||
2729 | return rx; | |
2730 | } | |
2731 | ||
2732 | void | |
2733 | bna_rx_destroy(struct bna_rx *rx) | |
2734 | { | |
2735 | struct bna_rx_mod *rx_mod = &rx->bna->rx_mod; | |
2736 | struct bna_rxq *q0 = NULL; | |
2737 | struct bna_rxq *q1 = NULL; | |
2738 | struct bna_rxp *rxp; | |
2739 | struct list_head *qe; | |
2740 | ||
2741 | bna_rxf_uninit(&rx->rxf); | |
2742 | ||
2743 | while (!list_empty(&rx->rxp_q)) { | |
2744 | bfa_q_deq(&rx->rxp_q, &rxp); | |
2745 | GET_RXQS(rxp, q0, q1); | |
2746 | if (rx->rcb_destroy_cbfn) | |
2747 | rx->rcb_destroy_cbfn(rx->bna->bnad, q0->rcb); | |
2748 | q0->rcb = NULL; | |
2749 | q0->rxp = NULL; | |
2750 | q0->rx = NULL; | |
2751 | bna_rxq_put(rx_mod, q0); | |
2752 | ||
2753 | if (q1) { | |
2754 | if (rx->rcb_destroy_cbfn) | |
2755 | rx->rcb_destroy_cbfn(rx->bna->bnad, q1->rcb); | |
2756 | q1->rcb = NULL; | |
2757 | q1->rxp = NULL; | |
2758 | q1->rx = NULL; | |
2759 | bna_rxq_put(rx_mod, q1); | |
2760 | } | |
2761 | rxp->rxq.slr.large = NULL; | |
2762 | rxp->rxq.slr.small = NULL; | |
2763 | ||
2764 | if (rx->ccb_destroy_cbfn) | |
2765 | rx->ccb_destroy_cbfn(rx->bna->bnad, rxp->cq.ccb); | |
2766 | rxp->cq.ccb = NULL; | |
2767 | rxp->rx = NULL; | |
2768 | bna_rxp_put(rx_mod, rxp); | |
2769 | } | |
2770 | ||
2771 | list_for_each(qe, &rx_mod->rx_active_q) { | |
2772 | if (qe == &rx->qe) { | |
2773 | list_del(&rx->qe); | |
2774 | bfa_q_qe_init(&rx->qe); | |
2775 | break; | |
2776 | } | |
2777 | } | |
2778 | ||
2779 | rx_mod->rid_mask &= ~(1 << rx->rid); | |
2780 | ||
2781 | rx->bna = NULL; | |
2782 | rx->priv = NULL; | |
2783 | bna_rx_put(rx_mod, rx); | |
2784 | } | |
2785 | ||
2786 | void | |
2787 | bna_rx_enable(struct bna_rx *rx) | |
2788 | { | |
2789 | if (rx->fsm != (bfa_sm_t)bna_rx_sm_stopped) | |
2790 | return; | |
2791 | ||
2792 | rx->rx_flags |= BNA_RX_F_ENABLED; | |
2793 | if (rx->rx_flags & BNA_RX_F_ENET_STARTED) | |
2794 | bfa_fsm_send_event(rx, RX_E_START); | |
2795 | } | |
2796 | ||
2797 | void | |
2798 | bna_rx_disable(struct bna_rx *rx, enum bna_cleanup_type type, | |
2799 | void (*cbfn)(void *, struct bna_rx *)) | |
2800 | { | |
2801 | if (type == BNA_SOFT_CLEANUP) { | |
2802 | /* h/w should not be accessed. Treat we're stopped */ | |
2803 | (*cbfn)(rx->bna->bnad, rx); | |
2804 | } else { | |
2805 | rx->stop_cbfn = cbfn; | |
2806 | rx->stop_cbarg = rx->bna->bnad; | |
2807 | ||
2808 | rx->rx_flags &= ~BNA_RX_F_ENABLED; | |
2809 | ||
2810 | bfa_fsm_send_event(rx, RX_E_STOP); | |
2811 | } | |
2812 | } | |
2813 | ||
2814 | void | |
2815 | bna_rx_cleanup_complete(struct bna_rx *rx) | |
2816 | { | |
2817 | bfa_fsm_send_event(rx, RX_E_CLEANUP_DONE); | |
2818 | } | |
2819 | ||
fe1624cf RM |
2820 | void |
2821 | bna_rx_vlan_strip_enable(struct bna_rx *rx) | |
2822 | { | |
2823 | struct bna_rxf *rxf = &rx->rxf; | |
2824 | ||
2825 | if (rxf->vlan_strip_status == BNA_STATUS_T_DISABLED) { | |
2826 | rxf->vlan_strip_status = BNA_STATUS_T_ENABLED; | |
2827 | rxf->vlan_strip_pending = true; | |
2828 | bfa_fsm_send_event(rxf, RXF_E_CONFIG); | |
2829 | } | |
2830 | } | |
2831 | ||
2832 | void | |
2833 | bna_rx_vlan_strip_disable(struct bna_rx *rx) | |
2834 | { | |
2835 | struct bna_rxf *rxf = &rx->rxf; | |
2836 | ||
2837 | if (rxf->vlan_strip_status != BNA_STATUS_T_DISABLED) { | |
2838 | rxf->vlan_strip_status = BNA_STATUS_T_DISABLED; | |
2839 | rxf->vlan_strip_pending = true; | |
2840 | bfa_fsm_send_event(rxf, RXF_E_CONFIG); | |
2841 | } | |
2842 | } | |
2843 | ||
f3bd5173 RM |
2844 | enum bna_cb_status |
2845 | bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode new_mode, | |
2846 | enum bna_rxmode bitmask, | |
2847 | void (*cbfn)(struct bnad *, struct bna_rx *)) | |
2848 | { | |
2849 | struct bna_rxf *rxf = &rx->rxf; | |
2850 | int need_hw_config = 0; | |
2851 | ||
2852 | /* Error checks */ | |
2853 | ||
2854 | if (is_promisc_enable(new_mode, bitmask)) { | |
2855 | /* If promisc mode is already enabled elsewhere in the system */ | |
2856 | if ((rx->bna->promisc_rid != BFI_INVALID_RID) && | |
2857 | (rx->bna->promisc_rid != rxf->rx->rid)) | |
2858 | goto err_return; | |
2859 | ||
2860 | /* If default mode is already enabled in the system */ | |
2861 | if (rx->bna->default_mode_rid != BFI_INVALID_RID) | |
2862 | goto err_return; | |
2863 | ||
2864 | /* Trying to enable promiscuous and default mode together */ | |
2865 | if (is_default_enable(new_mode, bitmask)) | |
2866 | goto err_return; | |
2867 | } | |
2868 | ||
2869 | if (is_default_enable(new_mode, bitmask)) { | |
2870 | /* If default mode is already enabled elsewhere in the system */ | |
2871 | if ((rx->bna->default_mode_rid != BFI_INVALID_RID) && | |
2872 | (rx->bna->default_mode_rid != rxf->rx->rid)) { | |
2873 | goto err_return; | |
2874 | } | |
2875 | ||
2876 | /* If promiscuous mode is already enabled in the system */ | |
2877 | if (rx->bna->promisc_rid != BFI_INVALID_RID) | |
2878 | goto err_return; | |
2879 | } | |
2880 | ||
2881 | /* Process the commands */ | |
2882 | ||
2883 | if (is_promisc_enable(new_mode, bitmask)) { | |
2884 | if (bna_rxf_promisc_enable(rxf)) | |
2885 | need_hw_config = 1; | |
2886 | } else if (is_promisc_disable(new_mode, bitmask)) { | |
2887 | if (bna_rxf_promisc_disable(rxf)) | |
2888 | need_hw_config = 1; | |
2889 | } | |
2890 | ||
2891 | if (is_allmulti_enable(new_mode, bitmask)) { | |
2892 | if (bna_rxf_allmulti_enable(rxf)) | |
2893 | need_hw_config = 1; | |
2894 | } else if (is_allmulti_disable(new_mode, bitmask)) { | |
2895 | if (bna_rxf_allmulti_disable(rxf)) | |
2896 | need_hw_config = 1; | |
2897 | } | |
2898 | ||
2899 | /* Trigger h/w if needed */ | |
2900 | ||
2901 | if (need_hw_config) { | |
2902 | rxf->cam_fltr_cbfn = cbfn; | |
2903 | rxf->cam_fltr_cbarg = rx->bna->bnad; | |
2904 | bfa_fsm_send_event(rxf, RXF_E_CONFIG); | |
2905 | } else if (cbfn) | |
2906 | (*cbfn)(rx->bna->bnad, rx); | |
2907 | ||
2908 | return BNA_CB_SUCCESS; | |
2909 | ||
2910 | err_return: | |
2911 | return BNA_CB_FAIL; | |
2912 | } | |
2913 | ||
2914 | void | |
2915 | bna_rx_vlanfilter_enable(struct bna_rx *rx) | |
2916 | { | |
2917 | struct bna_rxf *rxf = &rx->rxf; | |
2918 | ||
2919 | if (rxf->vlan_filter_status == BNA_STATUS_T_DISABLED) { | |
2920 | rxf->vlan_filter_status = BNA_STATUS_T_ENABLED; | |
2921 | rxf->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL; | |
2922 | bfa_fsm_send_event(rxf, RXF_E_CONFIG); | |
2923 | } | |
2924 | } | |
2925 | ||
2926 | void | |
2927 | bna_rx_coalescing_timeo_set(struct bna_rx *rx, int coalescing_timeo) | |
2928 | { | |
2929 | struct bna_rxp *rxp; | |
2930 | struct list_head *qe; | |
2931 | ||
2932 | list_for_each(qe, &rx->rxp_q) { | |
2933 | rxp = (struct bna_rxp *)qe; | |
2934 | rxp->cq.ccb->rx_coalescing_timeo = coalescing_timeo; | |
2935 | bna_ib_coalescing_timeo_set(&rxp->cq.ib, coalescing_timeo); | |
2936 | } | |
2937 | } | |
2938 | ||
2939 | void | |
2940 | bna_rx_dim_reconfig(struct bna *bna, const u32 vector[][BNA_BIAS_T_MAX]) | |
2941 | { | |
2942 | int i, j; | |
2943 | ||
2944 | for (i = 0; i < BNA_LOAD_T_MAX; i++) | |
2945 | for (j = 0; j < BNA_BIAS_T_MAX; j++) | |
2946 | bna->rx_mod.dim_vector[i][j] = vector[i][j]; | |
2947 | } | |
2948 | ||
2949 | void | |
2950 | bna_rx_dim_update(struct bna_ccb *ccb) | |
2951 | { | |
2952 | struct bna *bna = ccb->cq->rx->bna; | |
2953 | u32 load, bias; | |
2954 | u32 pkt_rt, small_rt, large_rt; | |
2955 | u8 coalescing_timeo; | |
2956 | ||
2957 | if ((ccb->pkt_rate.small_pkt_cnt == 0) && | |
2958 | (ccb->pkt_rate.large_pkt_cnt == 0)) | |
2959 | return; | |
2960 | ||
2961 | /* Arrive at preconfigured coalescing timeo value based on pkt rate */ | |
2962 | ||
2963 | small_rt = ccb->pkt_rate.small_pkt_cnt; | |
2964 | large_rt = ccb->pkt_rate.large_pkt_cnt; | |
2965 | ||
2966 | pkt_rt = small_rt + large_rt; | |
2967 | ||
2968 | if (pkt_rt < BNA_PKT_RATE_10K) | |
2969 | load = BNA_LOAD_T_LOW_4; | |
2970 | else if (pkt_rt < BNA_PKT_RATE_20K) | |
2971 | load = BNA_LOAD_T_LOW_3; | |
2972 | else if (pkt_rt < BNA_PKT_RATE_30K) | |
2973 | load = BNA_LOAD_T_LOW_2; | |
2974 | else if (pkt_rt < BNA_PKT_RATE_40K) | |
2975 | load = BNA_LOAD_T_LOW_1; | |
2976 | else if (pkt_rt < BNA_PKT_RATE_50K) | |
2977 | load = BNA_LOAD_T_HIGH_1; | |
2978 | else if (pkt_rt < BNA_PKT_RATE_60K) | |
2979 | load = BNA_LOAD_T_HIGH_2; | |
2980 | else if (pkt_rt < BNA_PKT_RATE_80K) | |
2981 | load = BNA_LOAD_T_HIGH_3; | |
2982 | else | |
2983 | load = BNA_LOAD_T_HIGH_4; | |
2984 | ||
2985 | if (small_rt > (large_rt << 1)) | |
2986 | bias = 0; | |
2987 | else | |
2988 | bias = 1; | |
2989 | ||
2990 | ccb->pkt_rate.small_pkt_cnt = 0; | |
2991 | ccb->pkt_rate.large_pkt_cnt = 0; | |
2992 | ||
2993 | coalescing_timeo = bna->rx_mod.dim_vector[load][bias]; | |
2994 | ccb->rx_coalescing_timeo = coalescing_timeo; | |
2995 | ||
2996 | /* Set it to IB */ | |
2997 | bna_ib_coalescing_timeo_set(&ccb->cq->ib, coalescing_timeo); | |
2998 | } | |
2999 | ||
3000 | const u32 bna_napi_dim_vector[BNA_LOAD_T_MAX][BNA_BIAS_T_MAX] = { | |
3001 | {12, 12}, | |
3002 | {6, 10}, | |
3003 | {5, 10}, | |
3004 | {4, 8}, | |
3005 | {3, 6}, | |
3006 | {3, 6}, | |
3007 | {2, 4}, | |
3008 | {1, 2}, | |
3009 | }; | |
3010 | ||
1aa8b471 BH |
3011 | /* TX */ |
3012 | ||
f3bd5173 RM |
3013 | #define call_tx_stop_cbfn(tx) \ |
3014 | do { \ | |
3015 | if ((tx)->stop_cbfn) { \ | |
3016 | void (*cbfn)(void *, struct bna_tx *); \ | |
3017 | void *cbarg; \ | |
3018 | cbfn = (tx)->stop_cbfn; \ | |
3019 | cbarg = (tx)->stop_cbarg; \ | |
3020 | (tx)->stop_cbfn = NULL; \ | |
3021 | (tx)->stop_cbarg = NULL; \ | |
3022 | cbfn(cbarg, (tx)); \ | |
3023 | } \ | |
3024 | } while (0) | |
3025 | ||
3026 | #define call_tx_prio_change_cbfn(tx) \ | |
3027 | do { \ | |
3028 | if ((tx)->prio_change_cbfn) { \ | |
3029 | void (*cbfn)(struct bnad *, struct bna_tx *); \ | |
3030 | cbfn = (tx)->prio_change_cbfn; \ | |
3031 | (tx)->prio_change_cbfn = NULL; \ | |
3032 | cbfn((tx)->bna->bnad, (tx)); \ | |
3033 | } \ | |
3034 | } while (0) | |
3035 | ||
3036 | static void bna_tx_mod_cb_tx_stopped(void *tx_mod, struct bna_tx *tx); | |
3037 | static void bna_bfi_tx_enet_start(struct bna_tx *tx); | |
3038 | static void bna_tx_enet_stop(struct bna_tx *tx); | |
3039 | ||
3040 | enum bna_tx_event { | |
3041 | TX_E_START = 1, | |
3042 | TX_E_STOP = 2, | |
3043 | TX_E_FAIL = 3, | |
3044 | TX_E_STARTED = 4, | |
3045 | TX_E_STOPPED = 5, | |
3046 | TX_E_PRIO_CHANGE = 6, | |
3047 | TX_E_CLEANUP_DONE = 7, | |
3048 | TX_E_BW_UPDATE = 8, | |
3049 | }; | |
3050 | ||
3051 | bfa_fsm_state_decl(bna_tx, stopped, struct bna_tx, enum bna_tx_event); | |
3052 | bfa_fsm_state_decl(bna_tx, start_wait, struct bna_tx, enum bna_tx_event); | |
3053 | bfa_fsm_state_decl(bna_tx, started, struct bna_tx, enum bna_tx_event); | |
3054 | bfa_fsm_state_decl(bna_tx, stop_wait, struct bna_tx, enum bna_tx_event); | |
3055 | bfa_fsm_state_decl(bna_tx, cleanup_wait, struct bna_tx, | |
3056 | enum bna_tx_event); | |
3057 | bfa_fsm_state_decl(bna_tx, prio_stop_wait, struct bna_tx, | |
3058 | enum bna_tx_event); | |
3059 | bfa_fsm_state_decl(bna_tx, prio_cleanup_wait, struct bna_tx, | |
3060 | enum bna_tx_event); | |
3061 | bfa_fsm_state_decl(bna_tx, failed, struct bna_tx, enum bna_tx_event); | |
3062 | bfa_fsm_state_decl(bna_tx, quiesce_wait, struct bna_tx, | |
3063 | enum bna_tx_event); | |
3064 | ||
3065 | static void | |
3066 | bna_tx_sm_stopped_entry(struct bna_tx *tx) | |
3067 | { | |
3068 | call_tx_stop_cbfn(tx); | |
3069 | } | |
3070 | ||
3071 | static void | |
3072 | bna_tx_sm_stopped(struct bna_tx *tx, enum bna_tx_event event) | |
3073 | { | |
3074 | switch (event) { | |
3075 | case TX_E_START: | |
3076 | bfa_fsm_set_state(tx, bna_tx_sm_start_wait); | |
3077 | break; | |
3078 | ||
3079 | case TX_E_STOP: | |
3080 | call_tx_stop_cbfn(tx); | |
3081 | break; | |
3082 | ||
3083 | case TX_E_FAIL: | |
3084 | /* No-op */ | |
3085 | break; | |
3086 | ||
3087 | case TX_E_PRIO_CHANGE: | |
3088 | call_tx_prio_change_cbfn(tx); | |
3089 | break; | |
3090 | ||
3091 | case TX_E_BW_UPDATE: | |
3092 | /* No-op */ | |
3093 | break; | |
3094 | ||
3095 | default: | |
3096 | bfa_sm_fault(event); | |
3097 | } | |
3098 | } | |
3099 | ||
3100 | static void | |
3101 | bna_tx_sm_start_wait_entry(struct bna_tx *tx) | |
3102 | { | |
3103 | bna_bfi_tx_enet_start(tx); | |
3104 | } | |
3105 | ||
3106 | static void | |
3107 | bna_tx_sm_start_wait(struct bna_tx *tx, enum bna_tx_event event) | |
3108 | { | |
3109 | switch (event) { | |
3110 | case TX_E_STOP: | |
3111 | tx->flags &= ~(BNA_TX_F_PRIO_CHANGED | BNA_TX_F_BW_UPDATED); | |
3112 | bfa_fsm_set_state(tx, bna_tx_sm_stop_wait); | |
3113 | break; | |
3114 | ||
3115 | case TX_E_FAIL: | |
3116 | tx->flags &= ~(BNA_TX_F_PRIO_CHANGED | BNA_TX_F_BW_UPDATED); | |
3117 | bfa_fsm_set_state(tx, bna_tx_sm_stopped); | |
3118 | break; | |
3119 | ||
3120 | case TX_E_STARTED: | |
3121 | if (tx->flags & (BNA_TX_F_PRIO_CHANGED | BNA_TX_F_BW_UPDATED)) { | |
3122 | tx->flags &= ~(BNA_TX_F_PRIO_CHANGED | | |
3123 | BNA_TX_F_BW_UPDATED); | |
3124 | bfa_fsm_set_state(tx, bna_tx_sm_prio_stop_wait); | |
3125 | } else | |
3126 | bfa_fsm_set_state(tx, bna_tx_sm_started); | |
3127 | break; | |
3128 | ||
3129 | case TX_E_PRIO_CHANGE: | |
3130 | tx->flags |= BNA_TX_F_PRIO_CHANGED; | |
3131 | break; | |
3132 | ||
3133 | case TX_E_BW_UPDATE: | |
3134 | tx->flags |= BNA_TX_F_BW_UPDATED; | |
3135 | break; | |
3136 | ||
3137 | default: | |
3138 | bfa_sm_fault(event); | |
3139 | } | |
3140 | } | |
3141 | ||
3142 | static void | |
3143 | bna_tx_sm_started_entry(struct bna_tx *tx) | |
3144 | { | |
3145 | struct bna_txq *txq; | |
3146 | struct list_head *qe; | |
3147 | int is_regular = (tx->type == BNA_TX_T_REGULAR); | |
3148 | ||
3149 | list_for_each(qe, &tx->txq_q) { | |
3150 | txq = (struct bna_txq *)qe; | |
3151 | txq->tcb->priority = txq->priority; | |
3152 | /* Start IB */ | |
3153 | bna_ib_start(tx->bna, &txq->ib, is_regular); | |
3154 | } | |
3155 | tx->tx_resume_cbfn(tx->bna->bnad, tx); | |
3156 | } | |
3157 | ||
3158 | static void | |
3159 | bna_tx_sm_started(struct bna_tx *tx, enum bna_tx_event event) | |
3160 | { | |
3161 | switch (event) { | |
3162 | case TX_E_STOP: | |
3163 | bfa_fsm_set_state(tx, bna_tx_sm_stop_wait); | |
3164 | tx->tx_stall_cbfn(tx->bna->bnad, tx); | |
3165 | bna_tx_enet_stop(tx); | |
3166 | break; | |
3167 | ||
3168 | case TX_E_FAIL: | |
3169 | bfa_fsm_set_state(tx, bna_tx_sm_failed); | |
3170 | tx->tx_stall_cbfn(tx->bna->bnad, tx); | |
3171 | tx->tx_cleanup_cbfn(tx->bna->bnad, tx); | |
3172 | break; | |
3173 | ||
3174 | case TX_E_PRIO_CHANGE: | |
3175 | case TX_E_BW_UPDATE: | |
3176 | bfa_fsm_set_state(tx, bna_tx_sm_prio_stop_wait); | |
3177 | break; | |
3178 | ||
3179 | default: | |
3180 | bfa_sm_fault(event); | |
3181 | } | |
3182 | } | |
3183 | ||
3184 | static void | |
3185 | bna_tx_sm_stop_wait_entry(struct bna_tx *tx) | |
3186 | { | |
3187 | } | |
3188 | ||
3189 | static void | |
3190 | bna_tx_sm_stop_wait(struct bna_tx *tx, enum bna_tx_event event) | |
3191 | { | |
3192 | switch (event) { | |
3193 | case TX_E_FAIL: | |
3194 | case TX_E_STOPPED: | |
3195 | bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait); | |
3196 | tx->tx_cleanup_cbfn(tx->bna->bnad, tx); | |
3197 | break; | |
3198 | ||
3199 | case TX_E_STARTED: | |
3200 | /** | |
3201 | * We are here due to start_wait -> stop_wait transition on | |
3202 | * TX_E_STOP event | |
3203 | */ | |
3204 | bna_tx_enet_stop(tx); | |
3205 | break; | |
3206 | ||
3207 | case TX_E_PRIO_CHANGE: | |
3208 | case TX_E_BW_UPDATE: | |
3209 | /* No-op */ | |
3210 | break; | |
3211 | ||
3212 | default: | |
3213 | bfa_sm_fault(event); | |
3214 | } | |
3215 | } | |
3216 | ||
3217 | static void | |
3218 | bna_tx_sm_cleanup_wait_entry(struct bna_tx *tx) | |
3219 | { | |
3220 | } | |
3221 | ||
3222 | static void | |
3223 | bna_tx_sm_cleanup_wait(struct bna_tx *tx, enum bna_tx_event event) | |
3224 | { | |
3225 | switch (event) { | |
3226 | case TX_E_FAIL: | |
3227 | case TX_E_PRIO_CHANGE: | |
3228 | case TX_E_BW_UPDATE: | |
3229 | /* No-op */ | |
3230 | break; | |
3231 | ||
3232 | case TX_E_CLEANUP_DONE: | |
3233 | bfa_fsm_set_state(tx, bna_tx_sm_stopped); | |
3234 | break; | |
3235 | ||
3236 | default: | |
3237 | bfa_sm_fault(event); | |
3238 | } | |
3239 | } | |
3240 | ||
3241 | static void | |
3242 | bna_tx_sm_prio_stop_wait_entry(struct bna_tx *tx) | |
3243 | { | |
3244 | tx->tx_stall_cbfn(tx->bna->bnad, tx); | |
3245 | bna_tx_enet_stop(tx); | |
3246 | } | |
3247 | ||
3248 | static void | |
3249 | bna_tx_sm_prio_stop_wait(struct bna_tx *tx, enum bna_tx_event event) | |
3250 | { | |
3251 | switch (event) { | |
3252 | case TX_E_STOP: | |
3253 | bfa_fsm_set_state(tx, bna_tx_sm_stop_wait); | |
3254 | break; | |
3255 | ||
3256 | case TX_E_FAIL: | |
3257 | bfa_fsm_set_state(tx, bna_tx_sm_failed); | |
3258 | call_tx_prio_change_cbfn(tx); | |
3259 | tx->tx_cleanup_cbfn(tx->bna->bnad, tx); | |
3260 | break; | |
3261 | ||
3262 | case TX_E_STOPPED: | |
3263 | bfa_fsm_set_state(tx, bna_tx_sm_prio_cleanup_wait); | |
3264 | break; | |
3265 | ||
3266 | case TX_E_PRIO_CHANGE: | |
3267 | case TX_E_BW_UPDATE: | |
3268 | /* No-op */ | |
3269 | break; | |
3270 | ||
3271 | default: | |
3272 | bfa_sm_fault(event); | |
3273 | } | |
3274 | } | |
3275 | ||
3276 | static void | |
3277 | bna_tx_sm_prio_cleanup_wait_entry(struct bna_tx *tx) | |
3278 | { | |
3279 | call_tx_prio_change_cbfn(tx); | |
3280 | tx->tx_cleanup_cbfn(tx->bna->bnad, tx); | |
3281 | } | |
3282 | ||
3283 | static void | |
3284 | bna_tx_sm_prio_cleanup_wait(struct bna_tx *tx, enum bna_tx_event event) | |
3285 | { | |
3286 | switch (event) { | |
3287 | case TX_E_STOP: | |
3288 | bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait); | |
3289 | break; | |
3290 | ||
3291 | case TX_E_FAIL: | |
3292 | bfa_fsm_set_state(tx, bna_tx_sm_failed); | |
3293 | break; | |
3294 | ||
3295 | case TX_E_PRIO_CHANGE: | |
3296 | case TX_E_BW_UPDATE: | |
3297 | /* No-op */ | |
3298 | break; | |
3299 | ||
3300 | case TX_E_CLEANUP_DONE: | |
3301 | bfa_fsm_set_state(tx, bna_tx_sm_start_wait); | |
3302 | break; | |
3303 | ||
3304 | default: | |
3305 | bfa_sm_fault(event); | |
3306 | } | |
3307 | } | |
3308 | ||
3309 | static void | |
3310 | bna_tx_sm_failed_entry(struct bna_tx *tx) | |
3311 | { | |
3312 | } | |
3313 | ||
3314 | static void | |
3315 | bna_tx_sm_failed(struct bna_tx *tx, enum bna_tx_event event) | |
3316 | { | |
3317 | switch (event) { | |
3318 | case TX_E_START: | |
3319 | bfa_fsm_set_state(tx, bna_tx_sm_quiesce_wait); | |
3320 | break; | |
3321 | ||
3322 | case TX_E_STOP: | |
3323 | bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait); | |
3324 | break; | |
3325 | ||
3326 | case TX_E_FAIL: | |
3327 | /* No-op */ | |
3328 | break; | |
3329 | ||
3330 | case TX_E_CLEANUP_DONE: | |
3331 | bfa_fsm_set_state(tx, bna_tx_sm_stopped); | |
3332 | break; | |
3333 | ||
3334 | default: | |
3335 | bfa_sm_fault(event); | |
3336 | } | |
3337 | } | |
3338 | ||
3339 | static void | |
3340 | bna_tx_sm_quiesce_wait_entry(struct bna_tx *tx) | |
3341 | { | |
3342 | } | |
3343 | ||
3344 | static void | |
3345 | bna_tx_sm_quiesce_wait(struct bna_tx *tx, enum bna_tx_event event) | |
3346 | { | |
3347 | switch (event) { | |
3348 | case TX_E_STOP: | |
3349 | bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait); | |
3350 | break; | |
3351 | ||
3352 | case TX_E_FAIL: | |
3353 | bfa_fsm_set_state(tx, bna_tx_sm_failed); | |
3354 | break; | |
3355 | ||
3356 | case TX_E_CLEANUP_DONE: | |
3357 | bfa_fsm_set_state(tx, bna_tx_sm_start_wait); | |
3358 | break; | |
3359 | ||
3360 | case TX_E_BW_UPDATE: | |
3361 | /* No-op */ | |
3362 | break; | |
3363 | ||
3364 | default: | |
3365 | bfa_sm_fault(event); | |
3366 | } | |
3367 | } | |
3368 | ||
3369 | static void | |
3370 | bna_bfi_tx_enet_start(struct bna_tx *tx) | |
3371 | { | |
3372 | struct bfi_enet_tx_cfg_req *cfg_req = &tx->bfi_enet_cmd.cfg_req; | |
3373 | struct bna_txq *txq = NULL; | |
3374 | struct list_head *qe; | |
3375 | int i; | |
3376 | ||
3377 | bfi_msgq_mhdr_set(cfg_req->mh, BFI_MC_ENET, | |
3378 | BFI_ENET_H2I_TX_CFG_SET_REQ, 0, tx->rid); | |
3379 | cfg_req->mh.num_entries = htons( | |
3380 | bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_tx_cfg_req))); | |
3381 | ||
3382 | cfg_req->num_queues = tx->num_txq; | |
3383 | for (i = 0, qe = bfa_q_first(&tx->txq_q); | |
3384 | i < tx->num_txq; | |
3385 | i++, qe = bfa_q_next(qe)) { | |
3386 | txq = (struct bna_txq *)qe; | |
3387 | ||
3388 | bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].q.q, &txq->qpt); | |
3389 | cfg_req->q_cfg[i].q.priority = txq->priority; | |
3390 | ||
3391 | cfg_req->q_cfg[i].ib.index_addr.a32.addr_lo = | |
3392 | txq->ib.ib_seg_host_addr.lsb; | |
3393 | cfg_req->q_cfg[i].ib.index_addr.a32.addr_hi = | |
3394 | txq->ib.ib_seg_host_addr.msb; | |
3395 | cfg_req->q_cfg[i].ib.intr.msix_index = | |
3396 | htons((u16)txq->ib.intr_vector); | |
3397 | } | |
3398 | ||
3399 | cfg_req->ib_cfg.int_pkt_dma = BNA_STATUS_T_ENABLED; | |
3400 | cfg_req->ib_cfg.int_enabled = BNA_STATUS_T_ENABLED; | |
3401 | cfg_req->ib_cfg.int_pkt_enabled = BNA_STATUS_T_DISABLED; | |
3402 | cfg_req->ib_cfg.continuous_coalescing = BNA_STATUS_T_ENABLED; | |
3403 | cfg_req->ib_cfg.msix = (txq->ib.intr_type == BNA_INTR_T_MSIX) | |
3404 | ? BNA_STATUS_T_ENABLED : BNA_STATUS_T_DISABLED; | |
3405 | cfg_req->ib_cfg.coalescing_timeout = | |
3406 | htonl((u32)txq->ib.coalescing_timeo); | |
3407 | cfg_req->ib_cfg.inter_pkt_timeout = | |
3408 | htonl((u32)txq->ib.interpkt_timeo); | |
3409 | cfg_req->ib_cfg.inter_pkt_count = (u8)txq->ib.interpkt_count; | |
3410 | ||
3411 | cfg_req->tx_cfg.vlan_mode = BFI_ENET_TX_VLAN_WI; | |
3412 | cfg_req->tx_cfg.vlan_id = htons((u16)tx->txf_vlan_id); | |
6654cf60 | 3413 | cfg_req->tx_cfg.admit_tagged_frame = BNA_STATUS_T_ENABLED; |
f3bd5173 RM |
3414 | cfg_req->tx_cfg.apply_vlan_filter = BNA_STATUS_T_DISABLED; |
3415 | ||
3416 | bfa_msgq_cmd_set(&tx->msgq_cmd, NULL, NULL, | |
3417 | sizeof(struct bfi_enet_tx_cfg_req), &cfg_req->mh); | |
3418 | bfa_msgq_cmd_post(&tx->bna->msgq, &tx->msgq_cmd); | |
3419 | } | |
3420 | ||
3421 | static void | |
3422 | bna_bfi_tx_enet_stop(struct bna_tx *tx) | |
3423 | { | |
3424 | struct bfi_enet_req *req = &tx->bfi_enet_cmd.req; | |
3425 | ||
3426 | bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, | |
3427 | BFI_ENET_H2I_TX_CFG_CLR_REQ, 0, tx->rid); | |
3428 | req->mh.num_entries = htons( | |
3429 | bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_req))); | |
3430 | bfa_msgq_cmd_set(&tx->msgq_cmd, NULL, NULL, sizeof(struct bfi_enet_req), | |
3431 | &req->mh); | |
3432 | bfa_msgq_cmd_post(&tx->bna->msgq, &tx->msgq_cmd); | |
3433 | } | |
3434 | ||
3435 | static void | |
3436 | bna_tx_enet_stop(struct bna_tx *tx) | |
3437 | { | |
3438 | struct bna_txq *txq; | |
3439 | struct list_head *qe; | |
3440 | ||
3441 | /* Stop IB */ | |
3442 | list_for_each(qe, &tx->txq_q) { | |
3443 | txq = (struct bna_txq *)qe; | |
3444 | bna_ib_stop(tx->bna, &txq->ib); | |
3445 | } | |
3446 | ||
3447 | bna_bfi_tx_enet_stop(tx); | |
3448 | } | |
3449 | ||
3450 | static void | |
3451 | bna_txq_qpt_setup(struct bna_txq *txq, int page_count, int page_size, | |
3452 | struct bna_mem_descr *qpt_mem, | |
3453 | struct bna_mem_descr *swqpt_mem, | |
3454 | struct bna_mem_descr *page_mem) | |
3455 | { | |
5216562a RM |
3456 | u8 *kva; |
3457 | u64 dma; | |
3458 | struct bna_dma_addr bna_dma; | |
f3bd5173 RM |
3459 | int i; |
3460 | ||
3461 | txq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb; | |
3462 | txq->qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb; | |
3463 | txq->qpt.kv_qpt_ptr = qpt_mem->kva; | |
3464 | txq->qpt.page_count = page_count; | |
3465 | txq->qpt.page_size = page_size; | |
3466 | ||
3467 | txq->tcb->sw_qpt = (void **) swqpt_mem->kva; | |
5216562a RM |
3468 | txq->tcb->sw_q = page_mem->kva; |
3469 | ||
3470 | kva = page_mem->kva; | |
3471 | BNA_GET_DMA_ADDR(&page_mem->dma, dma); | |
f3bd5173 RM |
3472 | |
3473 | for (i = 0; i < page_count; i++) { | |
5216562a RM |
3474 | txq->tcb->sw_qpt[i] = kva; |
3475 | kva += PAGE_SIZE; | |
f3bd5173 | 3476 | |
5216562a | 3477 | BNA_SET_DMA_ADDR(dma, &bna_dma); |
f3bd5173 | 3478 | ((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].lsb = |
5216562a | 3479 | bna_dma.lsb; |
f3bd5173 | 3480 | ((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].msb = |
5216562a RM |
3481 | bna_dma.msb; |
3482 | dma += PAGE_SIZE; | |
f3bd5173 RM |
3483 | } |
3484 | } | |
3485 | ||
3486 | static struct bna_tx * | |
3487 | bna_tx_get(struct bna_tx_mod *tx_mod, enum bna_tx_type type) | |
3488 | { | |
3489 | struct list_head *qe = NULL; | |
3490 | struct bna_tx *tx = NULL; | |
3491 | ||
3492 | if (list_empty(&tx_mod->tx_free_q)) | |
3493 | return NULL; | |
3494 | if (type == BNA_TX_T_REGULAR) { | |
3495 | bfa_q_deq(&tx_mod->tx_free_q, &qe); | |
3496 | } else { | |
3497 | bfa_q_deq_tail(&tx_mod->tx_free_q, &qe); | |
3498 | } | |
3499 | tx = (struct bna_tx *)qe; | |
3500 | bfa_q_qe_init(&tx->qe); | |
3501 | tx->type = type; | |
3502 | ||
3503 | return tx; | |
3504 | } | |
3505 | ||
3506 | static void | |
3507 | bna_tx_free(struct bna_tx *tx) | |
3508 | { | |
3509 | struct bna_tx_mod *tx_mod = &tx->bna->tx_mod; | |
3510 | struct bna_txq *txq; | |
3511 | struct list_head *prev_qe; | |
3512 | struct list_head *qe; | |
3513 | ||
3514 | while (!list_empty(&tx->txq_q)) { | |
3515 | bfa_q_deq(&tx->txq_q, &txq); | |
3516 | bfa_q_qe_init(&txq->qe); | |
3517 | txq->tcb = NULL; | |
3518 | txq->tx = NULL; | |
3519 | list_add_tail(&txq->qe, &tx_mod->txq_free_q); | |
3520 | } | |
3521 | ||
3522 | list_for_each(qe, &tx_mod->tx_active_q) { | |
3523 | if (qe == &tx->qe) { | |
3524 | list_del(&tx->qe); | |
3525 | bfa_q_qe_init(&tx->qe); | |
3526 | break; | |
3527 | } | |
3528 | } | |
3529 | ||
3530 | tx->bna = NULL; | |
3531 | tx->priv = NULL; | |
3532 | ||
3533 | prev_qe = NULL; | |
3534 | list_for_each(qe, &tx_mod->tx_free_q) { | |
3535 | if (((struct bna_tx *)qe)->rid < tx->rid) | |
3536 | prev_qe = qe; | |
3537 | else { | |
3538 | break; | |
3539 | } | |
3540 | } | |
3541 | ||
3542 | if (prev_qe == NULL) { | |
3543 | /* This is the first entry */ | |
3544 | bfa_q_enq_head(&tx_mod->tx_free_q, &tx->qe); | |
3545 | } else if (bfa_q_next(prev_qe) == &tx_mod->tx_free_q) { | |
3546 | /* This is the last entry */ | |
3547 | list_add_tail(&tx->qe, &tx_mod->tx_free_q); | |
3548 | } else { | |
3549 | /* Somewhere in the middle */ | |
3550 | bfa_q_next(&tx->qe) = bfa_q_next(prev_qe); | |
3551 | bfa_q_prev(&tx->qe) = prev_qe; | |
3552 | bfa_q_next(prev_qe) = &tx->qe; | |
3553 | bfa_q_prev(bfa_q_next(&tx->qe)) = &tx->qe; | |
3554 | } | |
3555 | } | |
3556 | ||
3557 | static void | |
3558 | bna_tx_start(struct bna_tx *tx) | |
3559 | { | |
3560 | tx->flags |= BNA_TX_F_ENET_STARTED; | |
3561 | if (tx->flags & BNA_TX_F_ENABLED) | |
3562 | bfa_fsm_send_event(tx, TX_E_START); | |
3563 | } | |
3564 | ||
3565 | static void | |
3566 | bna_tx_stop(struct bna_tx *tx) | |
3567 | { | |
3568 | tx->stop_cbfn = bna_tx_mod_cb_tx_stopped; | |
3569 | tx->stop_cbarg = &tx->bna->tx_mod; | |
3570 | ||
3571 | tx->flags &= ~BNA_TX_F_ENET_STARTED; | |
3572 | bfa_fsm_send_event(tx, TX_E_STOP); | |
3573 | } | |
3574 | ||
3575 | static void | |
3576 | bna_tx_fail(struct bna_tx *tx) | |
3577 | { | |
3578 | tx->flags &= ~BNA_TX_F_ENET_STARTED; | |
3579 | bfa_fsm_send_event(tx, TX_E_FAIL); | |
3580 | } | |
3581 | ||
3582 | void | |
3583 | bna_bfi_tx_enet_start_rsp(struct bna_tx *tx, struct bfi_msgq_mhdr *msghdr) | |
3584 | { | |
3585 | struct bfi_enet_tx_cfg_rsp *cfg_rsp = &tx->bfi_enet_cmd.cfg_rsp; | |
3586 | struct bna_txq *txq = NULL; | |
3587 | struct list_head *qe; | |
3588 | int i; | |
3589 | ||
3590 | bfa_msgq_rsp_copy(&tx->bna->msgq, (u8 *)cfg_rsp, | |
3591 | sizeof(struct bfi_enet_tx_cfg_rsp)); | |
3592 | ||
3593 | tx->hw_id = cfg_rsp->hw_id; | |
3594 | ||
3595 | for (i = 0, qe = bfa_q_first(&tx->txq_q); | |
3596 | i < tx->num_txq; i++, qe = bfa_q_next(qe)) { | |
3597 | txq = (struct bna_txq *)qe; | |
3598 | ||
3599 | /* Setup doorbells */ | |
3600 | txq->tcb->i_dbell->doorbell_addr = | |
3601 | tx->bna->pcidev.pci_bar_kva | |
3602 | + ntohl(cfg_rsp->q_handles[i].i_dbell); | |
3603 | txq->tcb->q_dbell = | |
3604 | tx->bna->pcidev.pci_bar_kva | |
3605 | + ntohl(cfg_rsp->q_handles[i].q_dbell); | |
3606 | txq->hw_id = cfg_rsp->q_handles[i].hw_qid; | |
3607 | ||
3608 | /* Initialize producer/consumer indexes */ | |
3609 | (*txq->tcb->hw_consumer_index) = 0; | |
3610 | txq->tcb->producer_index = txq->tcb->consumer_index = 0; | |
3611 | } | |
3612 | ||
3613 | bfa_fsm_send_event(tx, TX_E_STARTED); | |
3614 | } | |
3615 | ||
3616 | void | |
3617 | bna_bfi_tx_enet_stop_rsp(struct bna_tx *tx, struct bfi_msgq_mhdr *msghdr) | |
3618 | { | |
3619 | bfa_fsm_send_event(tx, TX_E_STOPPED); | |
3620 | } | |
3621 | ||
3622 | void | |
3623 | bna_bfi_bw_update_aen(struct bna_tx_mod *tx_mod) | |
3624 | { | |
3625 | struct bna_tx *tx; | |
3626 | struct list_head *qe; | |
3627 | ||
3628 | list_for_each(qe, &tx_mod->tx_active_q) { | |
3629 | tx = (struct bna_tx *)qe; | |
3630 | bfa_fsm_send_event(tx, TX_E_BW_UPDATE); | |
3631 | } | |
3632 | } | |
3633 | ||
3634 | void | |
3635 | bna_tx_res_req(int num_txq, int txq_depth, struct bna_res_info *res_info) | |
3636 | { | |
3637 | u32 q_size; | |
3638 | u32 page_count; | |
3639 | struct bna_mem_info *mem_info; | |
3640 | ||
3641 | res_info[BNA_TX_RES_MEM_T_TCB].res_type = BNA_RES_T_MEM; | |
3642 | mem_info = &res_info[BNA_TX_RES_MEM_T_TCB].res_u.mem_info; | |
3643 | mem_info->mem_type = BNA_MEM_T_KVA; | |
3644 | mem_info->len = sizeof(struct bna_tcb); | |
3645 | mem_info->num = num_txq; | |
3646 | ||
3647 | q_size = txq_depth * BFI_TXQ_WI_SIZE; | |
3648 | q_size = ALIGN(q_size, PAGE_SIZE); | |
3649 | page_count = q_size >> PAGE_SHIFT; | |
3650 | ||
3651 | res_info[BNA_TX_RES_MEM_T_QPT].res_type = BNA_RES_T_MEM; | |
3652 | mem_info = &res_info[BNA_TX_RES_MEM_T_QPT].res_u.mem_info; | |
3653 | mem_info->mem_type = BNA_MEM_T_DMA; | |
3654 | mem_info->len = page_count * sizeof(struct bna_dma_addr); | |
3655 | mem_info->num = num_txq; | |
3656 | ||
3657 | res_info[BNA_TX_RES_MEM_T_SWQPT].res_type = BNA_RES_T_MEM; | |
3658 | mem_info = &res_info[BNA_TX_RES_MEM_T_SWQPT].res_u.mem_info; | |
3659 | mem_info->mem_type = BNA_MEM_T_KVA; | |
3660 | mem_info->len = page_count * sizeof(void *); | |
3661 | mem_info->num = num_txq; | |
3662 | ||
3663 | res_info[BNA_TX_RES_MEM_T_PAGE].res_type = BNA_RES_T_MEM; | |
3664 | mem_info = &res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info; | |
3665 | mem_info->mem_type = BNA_MEM_T_DMA; | |
5216562a RM |
3666 | mem_info->len = PAGE_SIZE * page_count; |
3667 | mem_info->num = num_txq; | |
f3bd5173 RM |
3668 | |
3669 | res_info[BNA_TX_RES_MEM_T_IBIDX].res_type = BNA_RES_T_MEM; | |
3670 | mem_info = &res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info; | |
3671 | mem_info->mem_type = BNA_MEM_T_DMA; | |
3672 | mem_info->len = BFI_IBIDX_SIZE; | |
3673 | mem_info->num = num_txq; | |
3674 | ||
3675 | res_info[BNA_TX_RES_INTR_T_TXCMPL].res_type = BNA_RES_T_INTR; | |
3676 | res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info.intr_type = | |
3677 | BNA_INTR_T_MSIX; | |
3678 | res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info.num = num_txq; | |
3679 | } | |
3680 | ||
3681 | struct bna_tx * | |
3682 | bna_tx_create(struct bna *bna, struct bnad *bnad, | |
3683 | struct bna_tx_config *tx_cfg, | |
d91d25d5 | 3684 | const struct bna_tx_event_cbfn *tx_cbfn, |
f3bd5173 RM |
3685 | struct bna_res_info *res_info, void *priv) |
3686 | { | |
3687 | struct bna_intr_info *intr_info; | |
3688 | struct bna_tx_mod *tx_mod = &bna->tx_mod; | |
3689 | struct bna_tx *tx; | |
3690 | struct bna_txq *txq; | |
3691 | struct list_head *qe; | |
3692 | int page_count; | |
f3bd5173 RM |
3693 | int i; |
3694 | ||
3695 | intr_info = &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info; | |
5216562a RM |
3696 | page_count = (res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info.len) / |
3697 | PAGE_SIZE; | |
f3bd5173 RM |
3698 | |
3699 | /** | |
3700 | * Get resources | |
3701 | */ | |
3702 | ||
3703 | if ((intr_info->num != 1) && (intr_info->num != tx_cfg->num_txq)) | |
3704 | return NULL; | |
3705 | ||
3706 | /* Tx */ | |
3707 | ||
3708 | tx = bna_tx_get(tx_mod, tx_cfg->tx_type); | |
3709 | if (!tx) | |
3710 | return NULL; | |
3711 | tx->bna = bna; | |
3712 | tx->priv = priv; | |
3713 | ||
3714 | /* TxQs */ | |
3715 | ||
3716 | INIT_LIST_HEAD(&tx->txq_q); | |
3717 | for (i = 0; i < tx_cfg->num_txq; i++) { | |
3718 | if (list_empty(&tx_mod->txq_free_q)) | |
3719 | goto err_return; | |
3720 | ||
3721 | bfa_q_deq(&tx_mod->txq_free_q, &txq); | |
3722 | bfa_q_qe_init(&txq->qe); | |
3723 | list_add_tail(&txq->qe, &tx->txq_q); | |
3724 | txq->tx = tx; | |
3725 | } | |
3726 | ||
3727 | /* | |
3728 | * Initialize | |
3729 | */ | |
3730 | ||
3731 | /* Tx */ | |
3732 | ||
3733 | tx->tcb_setup_cbfn = tx_cbfn->tcb_setup_cbfn; | |
3734 | tx->tcb_destroy_cbfn = tx_cbfn->tcb_destroy_cbfn; | |
3735 | /* Following callbacks are mandatory */ | |
3736 | tx->tx_stall_cbfn = tx_cbfn->tx_stall_cbfn; | |
3737 | tx->tx_resume_cbfn = tx_cbfn->tx_resume_cbfn; | |
3738 | tx->tx_cleanup_cbfn = tx_cbfn->tx_cleanup_cbfn; | |
3739 | ||
3740 | list_add_tail(&tx->qe, &tx_mod->tx_active_q); | |
3741 | ||
3742 | tx->num_txq = tx_cfg->num_txq; | |
3743 | ||
3744 | tx->flags = 0; | |
3745 | if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_ENET_STARTED) { | |
3746 | switch (tx->type) { | |
3747 | case BNA_TX_T_REGULAR: | |
3748 | if (!(tx->bna->tx_mod.flags & | |
3749 | BNA_TX_MOD_F_ENET_LOOPBACK)) | |
3750 | tx->flags |= BNA_TX_F_ENET_STARTED; | |
3751 | break; | |
3752 | case BNA_TX_T_LOOPBACK: | |
3753 | if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_ENET_LOOPBACK) | |
3754 | tx->flags |= BNA_TX_F_ENET_STARTED; | |
3755 | break; | |
3756 | } | |
3757 | } | |
3758 | ||
3759 | /* TxQ */ | |
3760 | ||
3761 | i = 0; | |
f3bd5173 RM |
3762 | list_for_each(qe, &tx->txq_q) { |
3763 | txq = (struct bna_txq *)qe; | |
3764 | txq->tcb = (struct bna_tcb *) | |
3765 | res_info[BNA_TX_RES_MEM_T_TCB].res_u.mem_info.mdl[i].kva; | |
3766 | txq->tx_packets = 0; | |
3767 | txq->tx_bytes = 0; | |
3768 | ||
3769 | /* IB */ | |
3770 | txq->ib.ib_seg_host_addr.lsb = | |
3771 | res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.lsb; | |
3772 | txq->ib.ib_seg_host_addr.msb = | |
3773 | res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.msb; | |
3774 | txq->ib.ib_seg_host_addr_kva = | |
3775 | res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].kva; | |
3776 | txq->ib.intr_type = intr_info->intr_type; | |
3777 | txq->ib.intr_vector = (intr_info->num == 1) ? | |
3778 | intr_info->idl[0].vector : | |
3779 | intr_info->idl[i].vector; | |
3780 | if (intr_info->intr_type == BNA_INTR_T_INTX) | |
3781 | txq->ib.intr_vector = (1 << txq->ib.intr_vector); | |
3782 | txq->ib.coalescing_timeo = tx_cfg->coalescing_timeo; | |
d3f92aec | 3783 | txq->ib.interpkt_timeo = BFI_TX_INTERPKT_TIMEO; |
f3bd5173 RM |
3784 | txq->ib.interpkt_count = BFI_TX_INTERPKT_COUNT; |
3785 | ||
3786 | /* TCB */ | |
3787 | ||
3788 | txq->tcb->q_depth = tx_cfg->txq_depth; | |
3789 | txq->tcb->unmap_q = (void *) | |
3790 | res_info[BNA_TX_RES_MEM_T_UNMAPQ].res_u.mem_info.mdl[i].kva; | |
3791 | txq->tcb->hw_consumer_index = | |
3792 | (u32 *)txq->ib.ib_seg_host_addr_kva; | |
3793 | txq->tcb->i_dbell = &txq->ib.door_bell; | |
3794 | txq->tcb->intr_type = txq->ib.intr_type; | |
3795 | txq->tcb->intr_vector = txq->ib.intr_vector; | |
3796 | txq->tcb->txq = txq; | |
3797 | txq->tcb->bnad = bnad; | |
3798 | txq->tcb->id = i; | |
3799 | ||
3800 | /* QPT, SWQPT, Pages */ | |
5216562a | 3801 | bna_txq_qpt_setup(txq, page_count, PAGE_SIZE, |
f3bd5173 RM |
3802 | &res_info[BNA_TX_RES_MEM_T_QPT].res_u.mem_info.mdl[i], |
3803 | &res_info[BNA_TX_RES_MEM_T_SWQPT].res_u.mem_info.mdl[i], | |
3804 | &res_info[BNA_TX_RES_MEM_T_PAGE]. | |
5216562a | 3805 | res_u.mem_info.mdl[i]); |
f3bd5173 RM |
3806 | |
3807 | /* Callback to bnad for setting up TCB */ | |
3808 | if (tx->tcb_setup_cbfn) | |
3809 | (tx->tcb_setup_cbfn)(bna->bnad, txq->tcb); | |
3810 | ||
3811 | if (tx_cfg->num_txq == BFI_TX_MAX_PRIO) | |
3812 | txq->priority = txq->tcb->id; | |
3813 | else | |
3814 | txq->priority = tx_mod->default_prio; | |
3815 | ||
3816 | i++; | |
3817 | } | |
3818 | ||
3819 | tx->txf_vlan_id = 0; | |
3820 | ||
3821 | bfa_fsm_set_state(tx, bna_tx_sm_stopped); | |
3822 | ||
3823 | tx_mod->rid_mask |= (1 << tx->rid); | |
3824 | ||
3825 | return tx; | |
3826 | ||
3827 | err_return: | |
3828 | bna_tx_free(tx); | |
3829 | return NULL; | |
3830 | } | |
3831 | ||
3832 | void | |
3833 | bna_tx_destroy(struct bna_tx *tx) | |
3834 | { | |
3835 | struct bna_txq *txq; | |
3836 | struct list_head *qe; | |
3837 | ||
3838 | list_for_each(qe, &tx->txq_q) { | |
3839 | txq = (struct bna_txq *)qe; | |
3840 | if (tx->tcb_destroy_cbfn) | |
3841 | (tx->tcb_destroy_cbfn)(tx->bna->bnad, txq->tcb); | |
3842 | } | |
3843 | ||
3844 | tx->bna->tx_mod.rid_mask &= ~(1 << tx->rid); | |
3845 | bna_tx_free(tx); | |
3846 | } | |
3847 | ||
3848 | void | |
3849 | bna_tx_enable(struct bna_tx *tx) | |
3850 | { | |
3851 | if (tx->fsm != (bfa_sm_t)bna_tx_sm_stopped) | |
3852 | return; | |
3853 | ||
3854 | tx->flags |= BNA_TX_F_ENABLED; | |
3855 | ||
3856 | if (tx->flags & BNA_TX_F_ENET_STARTED) | |
3857 | bfa_fsm_send_event(tx, TX_E_START); | |
3858 | } | |
3859 | ||
3860 | void | |
3861 | bna_tx_disable(struct bna_tx *tx, enum bna_cleanup_type type, | |
3862 | void (*cbfn)(void *, struct bna_tx *)) | |
3863 | { | |
3864 | if (type == BNA_SOFT_CLEANUP) { | |
3865 | (*cbfn)(tx->bna->bnad, tx); | |
3866 | return; | |
3867 | } | |
3868 | ||
3869 | tx->stop_cbfn = cbfn; | |
3870 | tx->stop_cbarg = tx->bna->bnad; | |
3871 | ||
3872 | tx->flags &= ~BNA_TX_F_ENABLED; | |
3873 | ||
3874 | bfa_fsm_send_event(tx, TX_E_STOP); | |
3875 | } | |
3876 | ||
3877 | void | |
3878 | bna_tx_cleanup_complete(struct bna_tx *tx) | |
3879 | { | |
3880 | bfa_fsm_send_event(tx, TX_E_CLEANUP_DONE); | |
3881 | } | |
3882 | ||
3883 | static void | |
3884 | bna_tx_mod_cb_tx_stopped(void *arg, struct bna_tx *tx) | |
3885 | { | |
3886 | struct bna_tx_mod *tx_mod = (struct bna_tx_mod *)arg; | |
3887 | ||
3888 | bfa_wc_down(&tx_mod->tx_stop_wc); | |
3889 | } | |
3890 | ||
3891 | static void | |
3892 | bna_tx_mod_cb_tx_stopped_all(void *arg) | |
3893 | { | |
3894 | struct bna_tx_mod *tx_mod = (struct bna_tx_mod *)arg; | |
3895 | ||
3896 | if (tx_mod->stop_cbfn) | |
3897 | tx_mod->stop_cbfn(&tx_mod->bna->enet); | |
3898 | tx_mod->stop_cbfn = NULL; | |
3899 | } | |
3900 | ||
3901 | void | |
3902 | bna_tx_mod_init(struct bna_tx_mod *tx_mod, struct bna *bna, | |
3903 | struct bna_res_info *res_info) | |
3904 | { | |
3905 | int i; | |
3906 | ||
3907 | tx_mod->bna = bna; | |
3908 | tx_mod->flags = 0; | |
3909 | ||
3910 | tx_mod->tx = (struct bna_tx *) | |
3911 | res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.mdl[0].kva; | |
3912 | tx_mod->txq = (struct bna_txq *) | |
3913 | res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.mdl[0].kva; | |
3914 | ||
3915 | INIT_LIST_HEAD(&tx_mod->tx_free_q); | |
3916 | INIT_LIST_HEAD(&tx_mod->tx_active_q); | |
3917 | ||
3918 | INIT_LIST_HEAD(&tx_mod->txq_free_q); | |
3919 | ||
3920 | for (i = 0; i < bna->ioceth.attr.num_txq; i++) { | |
3921 | tx_mod->tx[i].rid = i; | |
3922 | bfa_q_qe_init(&tx_mod->tx[i].qe); | |
3923 | list_add_tail(&tx_mod->tx[i].qe, &tx_mod->tx_free_q); | |
3924 | bfa_q_qe_init(&tx_mod->txq[i].qe); | |
3925 | list_add_tail(&tx_mod->txq[i].qe, &tx_mod->txq_free_q); | |
3926 | } | |
3927 | ||
3928 | tx_mod->prio_map = BFI_TX_PRIO_MAP_ALL; | |
3929 | tx_mod->default_prio = 0; | |
3930 | tx_mod->iscsi_over_cee = BNA_STATUS_T_DISABLED; | |
3931 | tx_mod->iscsi_prio = -1; | |
3932 | } | |
3933 | ||
3934 | void | |
3935 | bna_tx_mod_uninit(struct bna_tx_mod *tx_mod) | |
3936 | { | |
3937 | struct list_head *qe; | |
3938 | int i; | |
3939 | ||
3940 | i = 0; | |
3941 | list_for_each(qe, &tx_mod->tx_free_q) | |
3942 | i++; | |
3943 | ||
3944 | i = 0; | |
3945 | list_for_each(qe, &tx_mod->txq_free_q) | |
3946 | i++; | |
3947 | ||
3948 | tx_mod->bna = NULL; | |
3949 | } | |
3950 | ||
3951 | void | |
3952 | bna_tx_mod_start(struct bna_tx_mod *tx_mod, enum bna_tx_type type) | |
3953 | { | |
3954 | struct bna_tx *tx; | |
3955 | struct list_head *qe; | |
3956 | ||
3957 | tx_mod->flags |= BNA_TX_MOD_F_ENET_STARTED; | |
3958 | if (type == BNA_TX_T_LOOPBACK) | |
3959 | tx_mod->flags |= BNA_TX_MOD_F_ENET_LOOPBACK; | |
3960 | ||
3961 | list_for_each(qe, &tx_mod->tx_active_q) { | |
3962 | tx = (struct bna_tx *)qe; | |
3963 | if (tx->type == type) | |
3964 | bna_tx_start(tx); | |
3965 | } | |
3966 | } | |
3967 | ||
3968 | void | |
3969 | bna_tx_mod_stop(struct bna_tx_mod *tx_mod, enum bna_tx_type type) | |
3970 | { | |
3971 | struct bna_tx *tx; | |
3972 | struct list_head *qe; | |
3973 | ||
3974 | tx_mod->flags &= ~BNA_TX_MOD_F_ENET_STARTED; | |
3975 | tx_mod->flags &= ~BNA_TX_MOD_F_ENET_LOOPBACK; | |
3976 | ||
3977 | tx_mod->stop_cbfn = bna_enet_cb_tx_stopped; | |
3978 | ||
3979 | bfa_wc_init(&tx_mod->tx_stop_wc, bna_tx_mod_cb_tx_stopped_all, tx_mod); | |
3980 | ||
3981 | list_for_each(qe, &tx_mod->tx_active_q) { | |
3982 | tx = (struct bna_tx *)qe; | |
3983 | if (tx->type == type) { | |
3984 | bfa_wc_up(&tx_mod->tx_stop_wc); | |
3985 | bna_tx_stop(tx); | |
3986 | } | |
3987 | } | |
3988 | ||
3989 | bfa_wc_wait(&tx_mod->tx_stop_wc); | |
3990 | } | |
3991 | ||
3992 | void | |
3993 | bna_tx_mod_fail(struct bna_tx_mod *tx_mod) | |
3994 | { | |
3995 | struct bna_tx *tx; | |
3996 | struct list_head *qe; | |
3997 | ||
3998 | tx_mod->flags &= ~BNA_TX_MOD_F_ENET_STARTED; | |
3999 | tx_mod->flags &= ~BNA_TX_MOD_F_ENET_LOOPBACK; | |
4000 | ||
4001 | list_for_each(qe, &tx_mod->tx_active_q) { | |
4002 | tx = (struct bna_tx *)qe; | |
4003 | bna_tx_fail(tx); | |
4004 | } | |
4005 | } | |
4006 | ||
4007 | void | |
4008 | bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo) | |
4009 | { | |
4010 | struct bna_txq *txq; | |
4011 | struct list_head *qe; | |
4012 | ||
4013 | list_for_each(qe, &tx->txq_q) { | |
4014 | txq = (struct bna_txq *)qe; | |
4015 | bna_ib_coalescing_timeo_set(&txq->ib, coalescing_timeo); | |
4016 | } | |
4017 | } |