]>
Commit | Line | Data |
---|---|---|
11fdf7f2 TL |
1 | /* SPDX-License-Identifier: BSD-3-Clause |
2 | * Copyright (c) 2007-2013 Broadcom Corporation. | |
7c673cae FG |
3 | * |
4 | * Eric Davis <edavis@broadcom.com> | |
5 | * David Christensen <davidch@broadcom.com> | |
6 | * Gary Zambrano <zambrano@broadcom.com> | |
7 | * | |
8 | * Copyright (c) 2013-2015 Brocade Communications Systems, Inc. | |
11fdf7f2 | 9 | * Copyright (c) 2015-2018 Cavium Inc. |
7c673cae | 10 | * All rights reserved. |
11fdf7f2 | 11 | * www.cavium.com |
7c673cae FG |
12 | */ |
13 | ||
14 | #include "bnx2x.h" | |
15 | #include "ecore_init.h" | |
16 | ||
17 | /**** Exe Queue interfaces ****/ | |
18 | ||
19 | /** | |
20 | * ecore_exe_queue_init - init the Exe Queue object | |
21 | * | |
22 | * @o: pointer to the object | |
23 | * @exe_len: length | |
24 | * @owner: pointer to the owner | |
25 | * @validate: validate function pointer | |
26 | * @optimize: optimize function pointer | |
27 | * @exec: execute function pointer | |
28 | * @get: get function pointer | |
29 | */ | |
30 | static void | |
31 | ecore_exe_queue_init(struct bnx2x_softc *sc __rte_unused, | |
32 | struct ecore_exe_queue_obj *o, | |
33 | int exe_len, | |
34 | union ecore_qable_obj *owner, | |
35 | exe_q_validate validate, | |
36 | exe_q_remove remove, | |
37 | exe_q_optimize optimize, exe_q_execute exec, exe_q_get get) | |
38 | { | |
39 | ECORE_MEMSET(o, 0, sizeof(*o)); | |
40 | ||
41 | ECORE_LIST_INIT(&o->exe_queue); | |
42 | ECORE_LIST_INIT(&o->pending_comp); | |
43 | ||
44 | ECORE_SPIN_LOCK_INIT(&o->lock, sc); | |
45 | ||
46 | o->exe_chunk_len = exe_len; | |
47 | o->owner = owner; | |
48 | ||
49 | /* Owner specific callbacks */ | |
50 | o->validate = validate; | |
51 | o->remove = remove; | |
52 | o->optimize = optimize; | |
53 | o->execute = exec; | |
54 | o->get = get; | |
55 | ||
9f95a23c | 56 | ECORE_MSG(sc, "Setup the execution queue with the chunk length of %d", |
7c673cae FG |
57 | exe_len); |
58 | } | |
59 | ||
60 | static void ecore_exe_queue_free_elem(struct bnx2x_softc *sc __rte_unused, | |
61 | struct ecore_exeq_elem *elem) | |
62 | { | |
9f95a23c | 63 | ECORE_MSG(sc, "Deleting an exe_queue element"); |
7c673cae FG |
64 | ECORE_FREE(sc, elem, sizeof(*elem)); |
65 | } | |
66 | ||
67 | static inline int ecore_exe_queue_length(struct ecore_exe_queue_obj *o) | |
68 | { | |
69 | struct ecore_exeq_elem *elem; | |
70 | int cnt = 0; | |
71 | ||
72 | ECORE_SPIN_LOCK_BH(&o->lock); | |
73 | ||
74 | ECORE_LIST_FOR_EACH_ENTRY(elem, &o->exe_queue, link, | |
75 | struct ecore_exeq_elem) cnt++; | |
76 | ||
77 | ECORE_SPIN_UNLOCK_BH(&o->lock); | |
78 | ||
79 | return cnt; | |
80 | } | |
81 | ||
82 | /** | |
83 | * ecore_exe_queue_add - add a new element to the execution queue | |
84 | * | |
85 | * @sc: driver handle | |
86 | * @o: queue | |
87 | * @cmd: new command to add | |
88 | * @restore: true - do not optimize the command | |
89 | * | |
90 | * If the element is optimized or is illegal, frees it. | |
91 | */ | |
92 | static int ecore_exe_queue_add(struct bnx2x_softc *sc, | |
93 | struct ecore_exe_queue_obj *o, | |
94 | struct ecore_exeq_elem *elem, int restore) | |
95 | { | |
96 | int rc; | |
97 | ||
98 | ECORE_SPIN_LOCK_BH(&o->lock); | |
99 | ||
100 | if (!restore) { | |
101 | /* Try to cancel this element queue */ | |
102 | rc = o->optimize(sc, o->owner, elem); | |
103 | if (rc) | |
104 | goto free_and_exit; | |
105 | ||
106 | /* Check if this request is ok */ | |
107 | rc = o->validate(sc, o->owner, elem); | |
108 | if (rc) { | |
9f95a23c | 109 | ECORE_MSG(sc, "Preamble failed: %d", rc); |
7c673cae FG |
110 | goto free_and_exit; |
111 | } | |
112 | } | |
113 | ||
114 | /* If so, add it to the execution queue */ | |
115 | ECORE_LIST_PUSH_TAIL(&elem->link, &o->exe_queue); | |
116 | ||
117 | ECORE_SPIN_UNLOCK_BH(&o->lock); | |
118 | ||
119 | return ECORE_SUCCESS; | |
120 | ||
121 | free_and_exit: | |
122 | ecore_exe_queue_free_elem(sc, elem); | |
123 | ||
124 | ECORE_SPIN_UNLOCK_BH(&o->lock); | |
125 | ||
126 | return rc; | |
127 | } | |
128 | ||
129 | static void __ecore_exe_queue_reset_pending(struct bnx2x_softc *sc, struct ecore_exe_queue_obj | |
130 | *o) | |
131 | { | |
132 | struct ecore_exeq_elem *elem; | |
133 | ||
134 | while (!ECORE_LIST_IS_EMPTY(&o->pending_comp)) { | |
135 | elem = ECORE_LIST_FIRST_ENTRY(&o->pending_comp, | |
136 | struct ecore_exeq_elem, link); | |
137 | ||
138 | ECORE_LIST_REMOVE_ENTRY(&elem->link, &o->pending_comp); | |
139 | ecore_exe_queue_free_elem(sc, elem); | |
140 | } | |
141 | } | |
142 | ||
143 | static inline void ecore_exe_queue_reset_pending(struct bnx2x_softc *sc, | |
144 | struct ecore_exe_queue_obj *o) | |
145 | { | |
146 | ECORE_SPIN_LOCK_BH(&o->lock); | |
147 | ||
148 | __ecore_exe_queue_reset_pending(sc, o); | |
149 | ||
150 | ECORE_SPIN_UNLOCK_BH(&o->lock); | |
151 | } | |
152 | ||
153 | /** | |
154 | * ecore_exe_queue_step - execute one execution chunk atomically | |
155 | * | |
156 | * @sc: driver handle | |
157 | * @o: queue | |
158 | * @ramrod_flags: flags | |
159 | * | |
160 | * (Should be called while holding the exe_queue->lock). | |
161 | */ | |
162 | static int ecore_exe_queue_step(struct bnx2x_softc *sc, | |
163 | struct ecore_exe_queue_obj *o, | |
164 | unsigned long *ramrod_flags) | |
165 | { | |
166 | struct ecore_exeq_elem *elem, spacer; | |
167 | int cur_len = 0, rc; | |
168 | ||
169 | ECORE_MEMSET(&spacer, 0, sizeof(spacer)); | |
170 | ||
171 | /* Next step should not be performed until the current is finished, | |
172 | * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to | |
173 | * properly clear object internals without sending any command to the FW | |
174 | * which also implies there won't be any completion to clear the | |
175 | * 'pending' list. | |
176 | */ | |
177 | if (!ECORE_LIST_IS_EMPTY(&o->pending_comp)) { | |
178 | if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ramrod_flags)) { | |
9f95a23c TL |
179 | ECORE_MSG(sc, |
180 | "RAMROD_DRV_CLR_ONLY requested: resetting a pending_comp list"); | |
7c673cae FG |
181 | __ecore_exe_queue_reset_pending(sc, o); |
182 | } else { | |
183 | return ECORE_PENDING; | |
184 | } | |
185 | } | |
186 | ||
187 | /* Run through the pending commands list and create a next | |
188 | * execution chunk. | |
189 | */ | |
190 | while (!ECORE_LIST_IS_EMPTY(&o->exe_queue)) { | |
191 | elem = ECORE_LIST_FIRST_ENTRY(&o->exe_queue, | |
192 | struct ecore_exeq_elem, link); | |
193 | ECORE_DBG_BREAK_IF(!elem->cmd_len); | |
194 | ||
195 | if (cur_len + elem->cmd_len <= o->exe_chunk_len) { | |
196 | cur_len += elem->cmd_len; | |
197 | /* Prevent from both lists being empty when moving an | |
198 | * element. This will allow the call of | |
199 | * ecore_exe_queue_empty() without locking. | |
200 | */ | |
201 | ECORE_LIST_PUSH_TAIL(&spacer.link, &o->pending_comp); | |
202 | mb(); | |
203 | ECORE_LIST_REMOVE_ENTRY(&elem->link, &o->exe_queue); | |
204 | ECORE_LIST_PUSH_TAIL(&elem->link, &o->pending_comp); | |
205 | ECORE_LIST_REMOVE_ENTRY(&spacer.link, &o->pending_comp); | |
206 | } else | |
207 | break; | |
208 | } | |
209 | ||
210 | /* Sanity check */ | |
211 | if (!cur_len) | |
212 | return ECORE_SUCCESS; | |
213 | ||
214 | rc = o->execute(sc, o->owner, &o->pending_comp, ramrod_flags); | |
215 | if (rc < 0) | |
216 | /* In case of an error return the commands back to the queue | |
217 | * and reset the pending_comp. | |
218 | */ | |
219 | ECORE_LIST_SPLICE_INIT(&o->pending_comp, &o->exe_queue); | |
220 | else if (!rc) | |
221 | /* If zero is returned, means there are no outstanding pending | |
222 | * completions and we may dismiss the pending list. | |
223 | */ | |
224 | __ecore_exe_queue_reset_pending(sc, o); | |
225 | ||
226 | return rc; | |
227 | } | |
228 | ||
229 | static inline int ecore_exe_queue_empty(struct ecore_exe_queue_obj *o) | |
230 | { | |
231 | int empty = ECORE_LIST_IS_EMPTY(&o->exe_queue); | |
232 | ||
233 | /* Don't reorder!!! */ | |
234 | mb(); | |
235 | ||
236 | return empty && ECORE_LIST_IS_EMPTY(&o->pending_comp); | |
237 | } | |
238 | ||
239 | static struct ecore_exeq_elem *ecore_exe_queue_alloc_elem(struct | |
240 | bnx2x_softc *sc | |
241 | __rte_unused) | |
242 | { | |
9f95a23c | 243 | ECORE_MSG(sc, "Allocating a new exe_queue element"); |
7c673cae FG |
244 | return ECORE_ZALLOC(sizeof(struct ecore_exeq_elem), GFP_ATOMIC, sc); |
245 | } | |
246 | ||
247 | /************************ raw_obj functions ***********************************/ | |
248 | static int ecore_raw_check_pending(struct ecore_raw_obj *o) | |
249 | { | |
250 | /* | |
251 | * !! converts the value returned by ECORE_TEST_BIT such that it | |
252 | * is guaranteed not to be truncated regardless of int definition. | |
253 | * | |
254 | * Note we cannot simply define the function's return value type | |
255 | * to match the type returned by ECORE_TEST_BIT, as it varies by | |
256 | * platform/implementation. | |
257 | */ | |
258 | ||
259 | return ! !ECORE_TEST_BIT(o->state, o->pstate); | |
260 | } | |
261 | ||
262 | static void ecore_raw_clear_pending(struct ecore_raw_obj *o) | |
263 | { | |
264 | ECORE_SMP_MB_BEFORE_CLEAR_BIT(); | |
265 | ECORE_CLEAR_BIT(o->state, o->pstate); | |
266 | ECORE_SMP_MB_AFTER_CLEAR_BIT(); | |
267 | } | |
268 | ||
269 | static void ecore_raw_set_pending(struct ecore_raw_obj *o) | |
270 | { | |
271 | ECORE_SMP_MB_BEFORE_CLEAR_BIT(); | |
272 | ECORE_SET_BIT(o->state, o->pstate); | |
273 | ECORE_SMP_MB_AFTER_CLEAR_BIT(); | |
274 | } | |
275 | ||
276 | /** | |
277 | * ecore_state_wait - wait until the given bit(state) is cleared | |
278 | * | |
279 | * @sc: device handle | |
280 | * @state: state which is to be cleared | |
281 | * @state_p: state buffer | |
282 | * | |
283 | */ | |
284 | static int ecore_state_wait(struct bnx2x_softc *sc, int state, | |
285 | unsigned long *pstate) | |
286 | { | |
287 | /* can take a while if any port is running */ | |
288 | int cnt = 5000; | |
289 | ||
290 | if (CHIP_REV_IS_EMUL(sc)) | |
291 | cnt *= 20; | |
292 | ||
9f95a23c TL |
293 | ECORE_MSG(sc, "waiting for state to become %d", state); |
294 | /* being over protective to remind bnx2x_intr_legacy() to | |
295 | * process RAMROD | |
296 | */ | |
297 | rte_atomic32_set(&sc->scan_fp, 1); | |
7c673cae FG |
298 | |
299 | ECORE_MIGHT_SLEEP(); | |
300 | while (cnt--) { | |
9f95a23c | 301 | bnx2x_intr_legacy(sc); |
7c673cae FG |
302 | if (!ECORE_TEST_BIT(state, pstate)) { |
303 | #ifdef ECORE_STOP_ON_ERROR | |
9f95a23c | 304 | ECORE_MSG(sc, "exit (cnt %d)", 5000 - cnt); |
7c673cae | 305 | #endif |
9f95a23c | 306 | rte_atomic32_set(&sc->scan_fp, 0); |
7c673cae FG |
307 | return ECORE_SUCCESS; |
308 | } | |
309 | ||
310 | ECORE_WAIT(sc, delay_us); | |
311 | ||
9f95a23c TL |
312 | if (sc->panic) { |
313 | rte_atomic32_set(&sc->scan_fp, 0); | |
7c673cae | 314 | return ECORE_IO; |
9f95a23c | 315 | } |
7c673cae FG |
316 | } |
317 | ||
318 | /* timeout! */ | |
9f95a23c TL |
319 | PMD_DRV_LOG(ERR, sc, "timeout waiting for state %d", state); |
320 | rte_atomic32_set(&sc->scan_fp, 0); | |
7c673cae FG |
321 | #ifdef ECORE_STOP_ON_ERROR |
322 | ecore_panic(); | |
323 | #endif | |
324 | ||
325 | return ECORE_TIMEOUT; | |
326 | } | |
327 | ||
328 | static int ecore_raw_wait(struct bnx2x_softc *sc, struct ecore_raw_obj *raw) | |
329 | { | |
330 | return ecore_state_wait(sc, raw->state, raw->pstate); | |
331 | } | |
332 | ||
333 | /***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/ | |
334 | /* credit handling callbacks */ | |
335 | static int ecore_get_cam_offset_mac(struct ecore_vlan_mac_obj *o, int *offset) | |
336 | { | |
337 | struct ecore_credit_pool_obj *mp = o->macs_pool; | |
338 | ||
339 | ECORE_DBG_BREAK_IF(!mp); | |
340 | ||
341 | return mp->get_entry(mp, offset); | |
342 | } | |
343 | ||
344 | static int ecore_get_credit_mac(struct ecore_vlan_mac_obj *o) | |
345 | { | |
346 | struct ecore_credit_pool_obj *mp = o->macs_pool; | |
347 | ||
348 | ECORE_DBG_BREAK_IF(!mp); | |
349 | ||
350 | return mp->get(mp, 1); | |
351 | } | |
352 | ||
353 | static int ecore_put_cam_offset_mac(struct ecore_vlan_mac_obj *o, int offset) | |
354 | { | |
355 | struct ecore_credit_pool_obj *mp = o->macs_pool; | |
356 | ||
357 | return mp->put_entry(mp, offset); | |
358 | } | |
359 | ||
360 | static int ecore_put_credit_mac(struct ecore_vlan_mac_obj *o) | |
361 | { | |
362 | struct ecore_credit_pool_obj *mp = o->macs_pool; | |
363 | ||
364 | return mp->put(mp, 1); | |
365 | } | |
366 | ||
367 | /** | |
368 | * __ecore_vlan_mac_h_write_trylock - try getting the writer lock on vlan mac | |
369 | * head list. | |
370 | * | |
371 | * @sc: device handle | |
372 | * @o: vlan_mac object | |
373 | * | |
374 | * @details: Non-blocking implementation; should be called under execution | |
375 | * queue lock. | |
376 | */ | |
377 | static int __ecore_vlan_mac_h_write_trylock(struct bnx2x_softc *sc __rte_unused, | |
378 | struct ecore_vlan_mac_obj *o) | |
379 | { | |
380 | if (o->head_reader) { | |
9f95a23c | 381 | ECORE_MSG(sc, "vlan_mac_lock writer - There are readers; Busy"); |
7c673cae FG |
382 | return ECORE_BUSY; |
383 | } | |
384 | ||
9f95a23c | 385 | ECORE_MSG(sc, "vlan_mac_lock writer - Taken"); |
7c673cae FG |
386 | return ECORE_SUCCESS; |
387 | } | |
388 | ||
389 | /** | |
390 | * __ecore_vlan_mac_h_exec_pending - execute step instead of a previous step | |
391 | * which wasn't able to run due to a taken lock on vlan mac head list. | |
392 | * | |
393 | * @sc: device handle | |
394 | * @o: vlan_mac object | |
395 | * | |
396 | * @details Should be called under execution queue lock; notice it might release | |
397 | * and reclaim it during its run. | |
398 | */ | |
399 | static void __ecore_vlan_mac_h_exec_pending(struct bnx2x_softc *sc, | |
400 | struct ecore_vlan_mac_obj *o) | |
401 | { | |
402 | int rc; | |
403 | unsigned long ramrod_flags = o->saved_ramrod_flags; | |
404 | ||
9f95a23c | 405 | ECORE_MSG(sc, "vlan_mac_lock execute pending command with ramrod flags %lu", |
7c673cae FG |
406 | ramrod_flags); |
407 | o->head_exe_request = FALSE; | |
408 | o->saved_ramrod_flags = 0; | |
409 | rc = ecore_exe_queue_step(sc, &o->exe_queue, &ramrod_flags); | |
410 | if (rc != ECORE_SUCCESS) { | |
9f95a23c | 411 | PMD_DRV_LOG(ERR, sc, |
7c673cae FG |
412 | "execution of pending commands failed with rc %d", |
413 | rc); | |
414 | #ifdef ECORE_STOP_ON_ERROR | |
415 | ecore_panic(); | |
416 | #endif | |
417 | } | |
418 | } | |
419 | ||
420 | /** | |
421 | * __ecore_vlan_mac_h_pend - Pend an execution step which couldn't have been | |
422 | * called due to vlan mac head list lock being taken. | |
423 | * | |
424 | * @sc: device handle | |
425 | * @o: vlan_mac object | |
426 | * @ramrod_flags: ramrod flags of missed execution | |
427 | * | |
428 | * @details Should be called under execution queue lock. | |
429 | */ | |
430 | static void __ecore_vlan_mac_h_pend(struct bnx2x_softc *sc __rte_unused, | |
431 | struct ecore_vlan_mac_obj *o, | |
432 | unsigned long ramrod_flags) | |
433 | { | |
434 | o->head_exe_request = TRUE; | |
435 | o->saved_ramrod_flags = ramrod_flags; | |
9f95a23c | 436 | ECORE_MSG(sc, "Placing pending execution with ramrod flags %lu", |
7c673cae FG |
437 | ramrod_flags); |
438 | } | |
439 | ||
440 | /** | |
441 | * __ecore_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock | |
442 | * | |
443 | * @sc: device handle | |
444 | * @o: vlan_mac object | |
445 | * | |
446 | * @details Should be called under execution queue lock. Notice if a pending | |
447 | * execution exists, it would perform it - possibly releasing and | |
448 | * reclaiming the execution queue lock. | |
449 | */ | |
450 | static void __ecore_vlan_mac_h_write_unlock(struct bnx2x_softc *sc, | |
451 | struct ecore_vlan_mac_obj *o) | |
452 | { | |
453 | /* It's possible a new pending execution was added since this writer | |
454 | * executed. If so, execute again. [Ad infinitum] | |
455 | */ | |
456 | while (o->head_exe_request) { | |
9f95a23c TL |
457 | ECORE_MSG(sc, |
458 | "vlan_mac_lock - writer release encountered a pending request"); | |
7c673cae FG |
459 | __ecore_vlan_mac_h_exec_pending(sc, o); |
460 | } | |
461 | } | |
462 | ||
463 | /** | |
464 | * ecore_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock | |
465 | * | |
466 | * @sc: device handle | |
467 | * @o: vlan_mac object | |
468 | * | |
469 | * @details Notice if a pending execution exists, it would perform it - | |
470 | * possibly releasing and reclaiming the execution queue lock. | |
471 | */ | |
472 | void ecore_vlan_mac_h_write_unlock(struct bnx2x_softc *sc, | |
473 | struct ecore_vlan_mac_obj *o) | |
474 | { | |
475 | ECORE_SPIN_LOCK_BH(&o->exe_queue.lock); | |
476 | __ecore_vlan_mac_h_write_unlock(sc, o); | |
477 | ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock); | |
478 | } | |
479 | ||
480 | /** | |
481 | * __ecore_vlan_mac_h_read_lock - lock the vlan mac head list reader lock | |
482 | * | |
483 | * @sc: device handle | |
484 | * @o: vlan_mac object | |
485 | * | |
486 | * @details Should be called under the execution queue lock. May sleep. May | |
487 | * release and reclaim execution queue lock during its run. | |
488 | */ | |
489 | static int __ecore_vlan_mac_h_read_lock(struct bnx2x_softc *sc __rte_unused, | |
490 | struct ecore_vlan_mac_obj *o) | |
491 | { | |
492 | /* If we got here, we're holding lock --> no WRITER exists */ | |
493 | o->head_reader++; | |
9f95a23c TL |
494 | ECORE_MSG(sc, |
495 | "vlan_mac_lock - locked reader - number %d", o->head_reader); | |
7c673cae FG |
496 | |
497 | return ECORE_SUCCESS; | |
498 | } | |
499 | ||
500 | /** | |
501 | * ecore_vlan_mac_h_read_lock - lock the vlan mac head list reader lock | |
502 | * | |
503 | * @sc: device handle | |
504 | * @o: vlan_mac object | |
505 | * | |
506 | * @details May sleep. Claims and releases execution queue lock during its run. | |
507 | */ | |
508 | static int ecore_vlan_mac_h_read_lock(struct bnx2x_softc *sc, | |
509 | struct ecore_vlan_mac_obj *o) | |
510 | { | |
511 | int rc; | |
512 | ||
513 | ECORE_SPIN_LOCK_BH(&o->exe_queue.lock); | |
514 | rc = __ecore_vlan_mac_h_read_lock(sc, o); | |
515 | ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock); | |
516 | ||
517 | return rc; | |
518 | } | |
519 | ||
520 | /** | |
521 | * __ecore_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock | |
522 | * | |
523 | * @sc: device handle | |
524 | * @o: vlan_mac object | |
525 | * | |
526 | * @details Should be called under execution queue lock. Notice if a pending | |
527 | * execution exists, it would be performed if this was the last | |
528 | * reader. possibly releasing and reclaiming the execution queue lock. | |
529 | */ | |
530 | static void __ecore_vlan_mac_h_read_unlock(struct bnx2x_softc *sc, | |
531 | struct ecore_vlan_mac_obj *o) | |
532 | { | |
533 | if (!o->head_reader) { | |
9f95a23c | 534 | PMD_DRV_LOG(ERR, sc, |
7c673cae FG |
535 | "Need to release vlan mac reader lock, but lock isn't taken"); |
536 | #ifdef ECORE_STOP_ON_ERROR | |
537 | ecore_panic(); | |
538 | #endif | |
539 | } else { | |
540 | o->head_reader--; | |
9f95a23c TL |
541 | ECORE_MSG(sc, "vlan_mac_lock - decreased readers to %d", |
542 | o->head_reader); | |
7c673cae FG |
543 | } |
544 | ||
545 | /* It's possible a new pending execution was added, and that this reader | |
546 | * was last - if so we need to execute the command. | |
547 | */ | |
548 | if (!o->head_reader && o->head_exe_request) { | |
9f95a23c | 549 | ECORE_MSG(sc, "vlan_mac_lock - reader release encountered a pending request"); |
7c673cae FG |
550 | |
551 | /* Writer release will do the trick */ | |
552 | __ecore_vlan_mac_h_write_unlock(sc, o); | |
553 | } | |
554 | } | |
555 | ||
556 | /** | |
557 | * ecore_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock | |
558 | * | |
559 | * @sc: device handle | |
560 | * @o: vlan_mac object | |
561 | * | |
562 | * @details Notice if a pending execution exists, it would be performed if this | |
563 | * was the last reader. Claims and releases the execution queue lock | |
564 | * during its run. | |
565 | */ | |
566 | void ecore_vlan_mac_h_read_unlock(struct bnx2x_softc *sc, | |
567 | struct ecore_vlan_mac_obj *o) | |
568 | { | |
569 | ECORE_SPIN_LOCK_BH(&o->exe_queue.lock); | |
570 | __ecore_vlan_mac_h_read_unlock(sc, o); | |
571 | ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock); | |
572 | } | |
573 | ||
574 | /** | |
575 | * ecore_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock | |
576 | * | |
577 | * @sc: device handle | |
578 | * @o: vlan_mac object | |
579 | * @n: number of elements to get | |
580 | * @base: base address for element placement | |
581 | * @stride: stride between elements (in bytes) | |
582 | */ | |
583 | static int ecore_get_n_elements(struct bnx2x_softc *sc, | |
584 | struct ecore_vlan_mac_obj *o, int n, | |
585 | uint8_t * base, uint8_t stride, uint8_t size) | |
586 | { | |
587 | struct ecore_vlan_mac_registry_elem *pos; | |
588 | uint8_t *next = base; | |
589 | int counter = 0, read_lock; | |
590 | ||
9f95a23c | 591 | ECORE_MSG(sc, "get_n_elements - taking vlan_mac_lock (reader)"); |
7c673cae FG |
592 | read_lock = ecore_vlan_mac_h_read_lock(sc, o); |
593 | if (read_lock != ECORE_SUCCESS) | |
9f95a23c | 594 | PMD_DRV_LOG(ERR, sc, |
7c673cae FG |
595 | "get_n_elements failed to get vlan mac reader lock; Access without lock"); |
596 | ||
597 | /* traverse list */ | |
598 | ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link, | |
599 | struct ecore_vlan_mac_registry_elem) { | |
600 | if (counter < n) { | |
601 | ECORE_MEMCPY(next, &pos->u, size); | |
602 | counter++; | |
9f95a23c TL |
603 | ECORE_MSG |
604 | (sc, "copied element number %d to address %p element was:", | |
7c673cae FG |
605 | counter, next); |
606 | next += stride + size; | |
607 | } | |
608 | } | |
609 | ||
610 | if (read_lock == ECORE_SUCCESS) { | |
9f95a23c | 611 | ECORE_MSG(sc, "get_n_elements - releasing vlan_mac_lock (reader)"); |
7c673cae FG |
612 | ecore_vlan_mac_h_read_unlock(sc, o); |
613 | } | |
614 | ||
615 | return counter * ETH_ALEN; | |
616 | } | |
617 | ||
618 | /* check_add() callbacks */ | |
619 | static int ecore_check_mac_add(struct bnx2x_softc *sc __rte_unused, | |
620 | struct ecore_vlan_mac_obj *o, | |
621 | union ecore_classification_ramrod_data *data) | |
622 | { | |
623 | struct ecore_vlan_mac_registry_elem *pos; | |
624 | ||
9f95a23c | 625 | ECORE_MSG(sc, "Checking MAC %02x:%02x:%02x:%02x:%02x:%02x for ADD command", |
7c673cae FG |
626 | data->mac.mac[0], data->mac.mac[1], data->mac.mac[2], |
627 | data->mac.mac[3], data->mac.mac[4], data->mac.mac[5]); | |
628 | ||
629 | if (!ECORE_IS_VALID_ETHER_ADDR(data->mac.mac)) | |
630 | return ECORE_INVAL; | |
631 | ||
632 | /* Check if a requested MAC already exists */ | |
633 | ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link, | |
634 | struct ecore_vlan_mac_registry_elem) | |
635 | if (!ECORE_MEMCMP(data->mac.mac, pos->u.mac.mac, ETH_ALEN) && | |
636 | (data->mac.is_inner_mac == pos->u.mac.is_inner_mac)) | |
637 | return ECORE_EXISTS; | |
638 | ||
639 | return ECORE_SUCCESS; | |
640 | } | |
641 | ||
642 | /* check_del() callbacks */ | |
643 | static struct ecore_vlan_mac_registry_elem *ecore_check_mac_del(struct bnx2x_softc | |
644 | *sc | |
645 | __rte_unused, | |
646 | struct | |
647 | ecore_vlan_mac_obj | |
648 | *o, union | |
649 | ecore_classification_ramrod_data | |
650 | *data) | |
651 | { | |
652 | struct ecore_vlan_mac_registry_elem *pos; | |
653 | ||
9f95a23c | 654 | ECORE_MSG(sc, "Checking MAC %02x:%02x:%02x:%02x:%02x:%02x for DEL command", |
7c673cae FG |
655 | data->mac.mac[0], data->mac.mac[1], data->mac.mac[2], |
656 | data->mac.mac[3], data->mac.mac[4], data->mac.mac[5]); | |
657 | ||
658 | ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link, | |
659 | struct ecore_vlan_mac_registry_elem) | |
660 | if ((!ECORE_MEMCMP(data->mac.mac, pos->u.mac.mac, ETH_ALEN)) && | |
661 | (data->mac.is_inner_mac == pos->u.mac.is_inner_mac)) | |
662 | return pos; | |
663 | ||
664 | return NULL; | |
665 | } | |
666 | ||
667 | /* check_move() callback */ | |
668 | static int ecore_check_move(struct bnx2x_softc *sc, | |
669 | struct ecore_vlan_mac_obj *src_o, | |
670 | struct ecore_vlan_mac_obj *dst_o, | |
671 | union ecore_classification_ramrod_data *data) | |
672 | { | |
673 | struct ecore_vlan_mac_registry_elem *pos; | |
674 | int rc; | |
675 | ||
676 | /* Check if we can delete the requested configuration from the first | |
677 | * object. | |
678 | */ | |
679 | pos = src_o->check_del(sc, src_o, data); | |
680 | ||
681 | /* check if configuration can be added */ | |
682 | rc = dst_o->check_add(sc, dst_o, data); | |
683 | ||
684 | /* If this classification can not be added (is already set) | |
685 | * or can't be deleted - return an error. | |
686 | */ | |
687 | if (rc || !pos) | |
688 | return FALSE; | |
689 | ||
690 | return TRUE; | |
691 | } | |
692 | ||
693 | static int ecore_check_move_always_err(__rte_unused struct bnx2x_softc *sc, | |
694 | __rte_unused struct ecore_vlan_mac_obj | |
695 | *src_o, __rte_unused struct ecore_vlan_mac_obj | |
696 | *dst_o, __rte_unused union | |
697 | ecore_classification_ramrod_data *data) | |
698 | { | |
699 | return FALSE; | |
700 | } | |
701 | ||
702 | static uint8_t ecore_vlan_mac_get_rx_tx_flag(struct ecore_vlan_mac_obj | |
703 | *o) | |
704 | { | |
705 | struct ecore_raw_obj *raw = &o->raw; | |
706 | uint8_t rx_tx_flag = 0; | |
707 | ||
708 | if ((raw->obj_type == ECORE_OBJ_TYPE_TX) || | |
709 | (raw->obj_type == ECORE_OBJ_TYPE_RX_TX)) | |
710 | rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_TX_CMD; | |
711 | ||
712 | if ((raw->obj_type == ECORE_OBJ_TYPE_RX) || | |
713 | (raw->obj_type == ECORE_OBJ_TYPE_RX_TX)) | |
714 | rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_RX_CMD; | |
715 | ||
716 | return rx_tx_flag; | |
717 | } | |
718 | ||
719 | static void ecore_set_mac_in_nig(struct bnx2x_softc *sc, | |
720 | int add, unsigned char *dev_addr, int index) | |
721 | { | |
722 | uint32_t wb_data[2]; | |
723 | uint32_t reg_offset = ECORE_PORT_ID(sc) ? NIG_REG_LLH1_FUNC_MEM : | |
724 | NIG_REG_LLH0_FUNC_MEM; | |
725 | ||
726 | if (!ECORE_IS_MF_SI_MODE(sc) && !IS_MF_AFEX(sc)) | |
727 | return; | |
728 | ||
729 | if (index > ECORE_LLH_CAM_MAX_PF_LINE) | |
730 | return; | |
731 | ||
9f95a23c | 732 | ECORE_MSG(sc, "Going to %s LLH configuration at entry %d", |
7c673cae FG |
733 | (add ? "ADD" : "DELETE"), index); |
734 | ||
735 | if (add) { | |
736 | /* LLH_FUNC_MEM is a uint64_t WB register */ | |
737 | reg_offset += 8 * index; | |
738 | ||
739 | wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) | | |
740 | (dev_addr[4] << 8) | dev_addr[5]); | |
741 | wb_data[1] = ((dev_addr[0] << 8) | dev_addr[1]); | |
742 | ||
743 | ECORE_REG_WR_DMAE_LEN(sc, reg_offset, wb_data, 2); | |
744 | } | |
745 | ||
746 | REG_WR(sc, (ECORE_PORT_ID(sc) ? NIG_REG_LLH1_FUNC_MEM_ENABLE : | |
747 | NIG_REG_LLH0_FUNC_MEM_ENABLE) + 4 * index, add); | |
748 | } | |
749 | ||
750 | /** | |
751 | * ecore_vlan_mac_set_cmd_hdr_e2 - set a header in a single classify ramrod | |
752 | * | |
753 | * @sc: device handle | |
754 | * @o: queue for which we want to configure this rule | |
755 | * @add: if TRUE the command is an ADD command, DEL otherwise | |
756 | * @opcode: CLASSIFY_RULE_OPCODE_XXX | |
757 | * @hdr: pointer to a header to setup | |
758 | * | |
759 | */ | |
760 | static void ecore_vlan_mac_set_cmd_hdr_e2(struct ecore_vlan_mac_obj *o, | |
761 | int add, int opcode, | |
762 | struct eth_classify_cmd_header | |
763 | *hdr) | |
764 | { | |
765 | struct ecore_raw_obj *raw = &o->raw; | |
766 | ||
767 | hdr->client_id = raw->cl_id; | |
768 | hdr->func_id = raw->func_id; | |
769 | ||
770 | /* Rx or/and Tx (internal switching) configuration ? */ | |
771 | hdr->cmd_general_data |= ecore_vlan_mac_get_rx_tx_flag(o); | |
772 | ||
773 | if (add) | |
774 | hdr->cmd_general_data |= ETH_CLASSIFY_CMD_HEADER_IS_ADD; | |
775 | ||
776 | hdr->cmd_general_data |= | |
777 | (opcode << ETH_CLASSIFY_CMD_HEADER_OPCODE_SHIFT); | |
778 | } | |
779 | ||
780 | /** | |
781 | * ecore_vlan_mac_set_rdata_hdr_e2 - set the classify ramrod data header | |
782 | * | |
783 | * @cid: connection id | |
784 | * @type: ECORE_FILTER_XXX_PENDING | |
785 | * @hdr: pointer to header to setup | |
786 | * @rule_cnt: | |
787 | * | |
788 | * currently we always configure one rule and echo field to contain a CID and an | |
789 | * opcode type. | |
790 | */ | |
791 | static void ecore_vlan_mac_set_rdata_hdr_e2(uint32_t cid, int type, struct eth_classify_header | |
792 | *hdr, int rule_cnt) | |
793 | { | |
794 | hdr->echo = ECORE_CPU_TO_LE32((cid & ECORE_SWCID_MASK) | | |
795 | (type << ECORE_SWCID_SHIFT)); | |
796 | hdr->rule_cnt = (uint8_t) rule_cnt; | |
797 | } | |
798 | ||
799 | /* hw_config() callbacks */ | |
800 | static void ecore_set_one_mac_e2(struct bnx2x_softc *sc, | |
801 | struct ecore_vlan_mac_obj *o, | |
802 | struct ecore_exeq_elem *elem, int rule_idx, | |
803 | __rte_unused int cam_offset) | |
804 | { | |
805 | struct ecore_raw_obj *raw = &o->raw; | |
806 | struct eth_classify_rules_ramrod_data *data = | |
807 | (struct eth_classify_rules_ramrod_data *)(raw->rdata); | |
808 | int rule_cnt = rule_idx + 1, cmd = elem->cmd_data.vlan_mac.cmd; | |
809 | union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx]; | |
810 | int add = (cmd == ECORE_VLAN_MAC_ADD) ? TRUE : FALSE; | |
811 | unsigned long *vlan_mac_flags = &elem->cmd_data.vlan_mac.vlan_mac_flags; | |
812 | uint8_t *mac = elem->cmd_data.vlan_mac.u.mac.mac; | |
813 | ||
814 | /* Set LLH CAM entry: currently only iSCSI and ETH macs are | |
815 | * relevant. In addition, current implementation is tuned for a | |
816 | * single ETH MAC. | |
817 | * | |
818 | * When multiple unicast ETH MACs PF configuration in switch | |
819 | * independent mode is required (NetQ, multiple netdev MACs, | |
820 | * etc.), consider better utilisation of 8 per function MAC | |
821 | * entries in the LLH register. There is also | |
822 | * NIG_REG_P[01]_LLH_FUNC_MEM2 registers that complete the | |
823 | * total number of CAM entries to 16. | |
824 | * | |
825 | * Currently we won't configure NIG for MACs other than a primary ETH | |
826 | * MAC and iSCSI L2 MAC. | |
827 | * | |
828 | * If this MAC is moving from one Queue to another, no need to change | |
829 | * NIG configuration. | |
830 | */ | |
831 | if (cmd != ECORE_VLAN_MAC_MOVE) { | |
832 | if (ECORE_TEST_BIT(ECORE_ISCSI_ETH_MAC, vlan_mac_flags)) | |
833 | ecore_set_mac_in_nig(sc, add, mac, | |
834 | ECORE_LLH_CAM_ISCSI_ETH_LINE); | |
835 | else if (ECORE_TEST_BIT(ECORE_ETH_MAC, vlan_mac_flags)) | |
836 | ecore_set_mac_in_nig(sc, add, mac, | |
837 | ECORE_LLH_CAM_ETH_LINE); | |
838 | } | |
839 | ||
840 | /* Reset the ramrod data buffer for the first rule */ | |
841 | if (rule_idx == 0) | |
842 | ECORE_MEMSET(data, 0, sizeof(*data)); | |
843 | ||
844 | /* Setup a command header */ | |
845 | ecore_vlan_mac_set_cmd_hdr_e2(o, add, CLASSIFY_RULE_OPCODE_MAC, | |
846 | &rule_entry->mac.header); | |
847 | ||
9f95a23c | 848 | ECORE_MSG(sc, "About to %s MAC %02x:%02x:%02x:%02x:%02x:%02x for Queue %d", |
7c673cae FG |
849 | (add ? "add" : "delete"), mac[0], mac[1], mac[2], mac[3], |
850 | mac[4], mac[5], raw->cl_id); | |
851 | ||
852 | /* Set a MAC itself */ | |
853 | ecore_set_fw_mac_addr(&rule_entry->mac.mac_msb, | |
854 | &rule_entry->mac.mac_mid, | |
855 | &rule_entry->mac.mac_lsb, mac); | |
856 | rule_entry->mac.inner_mac = elem->cmd_data.vlan_mac.u.mac.is_inner_mac; | |
857 | ||
858 | /* MOVE: Add a rule that will add this MAC to the target Queue */ | |
859 | if (cmd == ECORE_VLAN_MAC_MOVE) { | |
860 | rule_entry++; | |
861 | rule_cnt++; | |
862 | ||
863 | /* Setup ramrod data */ | |
864 | ecore_vlan_mac_set_cmd_hdr_e2(elem->cmd_data. | |
865 | vlan_mac.target_obj, TRUE, | |
866 | CLASSIFY_RULE_OPCODE_MAC, | |
867 | &rule_entry->mac.header); | |
868 | ||
869 | /* Set a MAC itself */ | |
870 | ecore_set_fw_mac_addr(&rule_entry->mac.mac_msb, | |
871 | &rule_entry->mac.mac_mid, | |
872 | &rule_entry->mac.mac_lsb, mac); | |
873 | rule_entry->mac.inner_mac = | |
874 | elem->cmd_data.vlan_mac.u.mac.is_inner_mac; | |
875 | } | |
876 | ||
877 | /* Set the ramrod data header */ | |
878 | ecore_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header, | |
879 | rule_cnt); | |
880 | } | |
881 | ||
882 | /** | |
883 | * ecore_vlan_mac_set_rdata_hdr_e1x - set a header in a single classify ramrod | |
884 | * | |
885 | * @sc: device handle | |
886 | * @o: queue | |
887 | * @type: | |
888 | * @cam_offset: offset in cam memory | |
889 | * @hdr: pointer to a header to setup | |
890 | * | |
891 | * E1H | |
892 | */ | |
893 | static void ecore_vlan_mac_set_rdata_hdr_e1x(struct ecore_vlan_mac_obj | |
894 | *o, int type, int cam_offset, struct mac_configuration_hdr | |
895 | *hdr) | |
896 | { | |
897 | struct ecore_raw_obj *r = &o->raw; | |
898 | ||
899 | hdr->length = 1; | |
900 | hdr->offset = (uint8_t) cam_offset; | |
901 | hdr->client_id = ECORE_CPU_TO_LE16(0xff); | |
902 | hdr->echo = ECORE_CPU_TO_LE32((r->cid & ECORE_SWCID_MASK) | | |
903 | (type << ECORE_SWCID_SHIFT)); | |
904 | } | |
905 | ||
906 | static void ecore_vlan_mac_set_cfg_entry_e1x(struct ecore_vlan_mac_obj | |
907 | *o, int add, int opcode, | |
908 | uint8_t * mac, | |
909 | uint16_t vlan_id, struct | |
910 | mac_configuration_entry | |
911 | *cfg_entry) | |
912 | { | |
913 | struct ecore_raw_obj *r = &o->raw; | |
914 | uint32_t cl_bit_vec = (1 << r->cl_id); | |
915 | ||
916 | cfg_entry->clients_bit_vector = ECORE_CPU_TO_LE32(cl_bit_vec); | |
917 | cfg_entry->pf_id = r->func_id; | |
918 | cfg_entry->vlan_id = ECORE_CPU_TO_LE16(vlan_id); | |
919 | ||
920 | if (add) { | |
921 | ECORE_SET_FLAG(cfg_entry->flags, | |
922 | MAC_CONFIGURATION_ENTRY_ACTION_TYPE, | |
923 | T_ETH_MAC_COMMAND_SET); | |
924 | ECORE_SET_FLAG(cfg_entry->flags, | |
925 | MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE, | |
926 | opcode); | |
927 | ||
928 | /* Set a MAC in a ramrod data */ | |
929 | ecore_set_fw_mac_addr(&cfg_entry->msb_mac_addr, | |
930 | &cfg_entry->middle_mac_addr, | |
931 | &cfg_entry->lsb_mac_addr, mac); | |
932 | } else | |
933 | ECORE_SET_FLAG(cfg_entry->flags, | |
934 | MAC_CONFIGURATION_ENTRY_ACTION_TYPE, | |
935 | T_ETH_MAC_COMMAND_INVALIDATE); | |
936 | } | |
937 | ||
938 | static void ecore_vlan_mac_set_rdata_e1x(struct bnx2x_softc *sc | |
939 | __rte_unused, | |
940 | struct ecore_vlan_mac_obj *o, | |
941 | int type, int cam_offset, | |
942 | int add, uint8_t * mac, | |
943 | uint16_t vlan_id, int opcode, | |
944 | struct mac_configuration_cmd | |
945 | *config) | |
946 | { | |
947 | struct mac_configuration_entry *cfg_entry = &config->config_table[0]; | |
948 | ||
949 | ecore_vlan_mac_set_rdata_hdr_e1x(o, type, cam_offset, &config->hdr); | |
950 | ecore_vlan_mac_set_cfg_entry_e1x(o, add, opcode, mac, vlan_id, | |
951 | cfg_entry); | |
952 | ||
9f95a23c | 953 | ECORE_MSG(sc, "%s MAC %02x:%02x:%02x:%02x:%02x:%02x CLID %d CAM offset %d", |
7c673cae FG |
954 | (add ? "setting" : "clearing"), |
955 | mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], | |
956 | o->raw.cl_id, cam_offset); | |
957 | } | |
958 | ||
959 | /** | |
960 | * ecore_set_one_mac_e1x - fill a single MAC rule ramrod data | |
961 | * | |
962 | * @sc: device handle | |
963 | * @o: ecore_vlan_mac_obj | |
964 | * @elem: ecore_exeq_elem | |
965 | * @rule_idx: rule_idx | |
966 | * @cam_offset: cam_offset | |
967 | */ | |
968 | static void ecore_set_one_mac_e1x(struct bnx2x_softc *sc, | |
969 | struct ecore_vlan_mac_obj *o, | |
970 | struct ecore_exeq_elem *elem, | |
971 | __rte_unused int rule_idx, int cam_offset) | |
972 | { | |
973 | struct ecore_raw_obj *raw = &o->raw; | |
974 | struct mac_configuration_cmd *config = | |
975 | (struct mac_configuration_cmd *)(raw->rdata); | |
976 | /* 57711 do not support MOVE command, | |
977 | * so it's either ADD or DEL | |
978 | */ | |
979 | int add = (elem->cmd_data.vlan_mac.cmd == ECORE_VLAN_MAC_ADD) ? | |
980 | TRUE : FALSE; | |
981 | ||
982 | /* Reset the ramrod data buffer */ | |
983 | ECORE_MEMSET(config, 0, sizeof(*config)); | |
984 | ||
985 | ecore_vlan_mac_set_rdata_e1x(sc, o, raw->state, | |
986 | cam_offset, add, | |
987 | elem->cmd_data.vlan_mac.u.mac.mac, 0, | |
988 | ETH_VLAN_FILTER_ANY_VLAN, config); | |
989 | } | |
990 | ||
991 | /** | |
992 | * ecore_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element | |
993 | * | |
994 | * @sc: device handle | |
995 | * @p: command parameters | |
996 | * @ppos: pointer to the cookie | |
997 | * | |
998 | * reconfigure next MAC/VLAN/VLAN-MAC element from the | |
999 | * previously configured elements list. | |
1000 | * | |
1001 | * from command parameters only RAMROD_COMP_WAIT bit in ramrod_flags is taken | |
1002 | * into an account | |
1003 | * | |
1004 | * pointer to the cookie - that should be given back in the next call to make | |
1005 | * function handle the next element. If *ppos is set to NULL it will restart the | |
1006 | * iterator. If returned *ppos == NULL this means that the last element has been | |
1007 | * handled. | |
1008 | * | |
1009 | */ | |
1010 | static int ecore_vlan_mac_restore(struct bnx2x_softc *sc, | |
1011 | struct ecore_vlan_mac_ramrod_params *p, | |
1012 | struct ecore_vlan_mac_registry_elem **ppos) | |
1013 | { | |
1014 | struct ecore_vlan_mac_registry_elem *pos; | |
1015 | struct ecore_vlan_mac_obj *o = p->vlan_mac_obj; | |
1016 | ||
1017 | /* If list is empty - there is nothing to do here */ | |
1018 | if (ECORE_LIST_IS_EMPTY(&o->head)) { | |
1019 | *ppos = NULL; | |
1020 | return 0; | |
1021 | } | |
1022 | ||
1023 | /* make a step... */ | |
1024 | if (*ppos == NULL) | |
1025 | *ppos = ECORE_LIST_FIRST_ENTRY(&o->head, struct | |
1026 | ecore_vlan_mac_registry_elem, | |
1027 | link); | |
1028 | else | |
1029 | *ppos = ECORE_LIST_NEXT(*ppos, link, | |
1030 | struct ecore_vlan_mac_registry_elem); | |
1031 | ||
1032 | pos = *ppos; | |
1033 | ||
1034 | /* If it's the last step - return NULL */ | |
1035 | if (ECORE_LIST_IS_LAST(&pos->link, &o->head)) | |
1036 | *ppos = NULL; | |
1037 | ||
1038 | /* Prepare a 'user_req' */ | |
1039 | ECORE_MEMCPY(&p->user_req.u, &pos->u, sizeof(pos->u)); | |
1040 | ||
1041 | /* Set the command */ | |
1042 | p->user_req.cmd = ECORE_VLAN_MAC_ADD; | |
1043 | ||
1044 | /* Set vlan_mac_flags */ | |
1045 | p->user_req.vlan_mac_flags = pos->vlan_mac_flags; | |
1046 | ||
1047 | /* Set a restore bit */ | |
1048 | ECORE_SET_BIT_NA(RAMROD_RESTORE, &p->ramrod_flags); | |
1049 | ||
1050 | return ecore_config_vlan_mac(sc, p); | |
1051 | } | |
1052 | ||
1053 | /* ecore_exeq_get_mac/ecore_exeq_get_vlan/ecore_exeq_get_vlan_mac return a | |
1054 | * pointer to an element with a specific criteria and NULL if such an element | |
1055 | * hasn't been found. | |
1056 | */ | |
1057 | static struct ecore_exeq_elem *ecore_exeq_get_mac(struct ecore_exe_queue_obj *o, | |
1058 | struct ecore_exeq_elem *elem) | |
1059 | { | |
1060 | struct ecore_exeq_elem *pos; | |
1061 | struct ecore_mac_ramrod_data *data = &elem->cmd_data.vlan_mac.u.mac; | |
1062 | ||
1063 | /* Check pending for execution commands */ | |
1064 | ECORE_LIST_FOR_EACH_ENTRY(pos, &o->exe_queue, link, | |
1065 | struct ecore_exeq_elem) | |
1066 | if (!ECORE_MEMCMP(&pos->cmd_data.vlan_mac.u.mac, data, | |
1067 | sizeof(*data)) && | |
1068 | (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd)) | |
1069 | return pos; | |
1070 | ||
1071 | return NULL; | |
1072 | } | |
1073 | ||
1074 | /** | |
1075 | * ecore_validate_vlan_mac_add - check if an ADD command can be executed | |
1076 | * | |
1077 | * @sc: device handle | |
1078 | * @qo: ecore_qable_obj | |
1079 | * @elem: ecore_exeq_elem | |
1080 | * | |
1081 | * Checks that the requested configuration can be added. If yes and if | |
1082 | * requested, consume CAM credit. | |
1083 | * | |
1084 | * The 'validate' is run after the 'optimize'. | |
1085 | * | |
1086 | */ | |
1087 | static int ecore_validate_vlan_mac_add(struct bnx2x_softc *sc, | |
1088 | union ecore_qable_obj *qo, | |
1089 | struct ecore_exeq_elem *elem) | |
1090 | { | |
1091 | struct ecore_vlan_mac_obj *o = &qo->vlan_mac; | |
1092 | struct ecore_exe_queue_obj *exeq = &o->exe_queue; | |
1093 | int rc; | |
1094 | ||
1095 | /* Check the registry */ | |
1096 | rc = o->check_add(sc, o, &elem->cmd_data.vlan_mac.u); | |
1097 | if (rc) { | |
9f95a23c TL |
1098 | ECORE_MSG(sc, |
1099 | "ADD command is not allowed considering current registry state."); | |
7c673cae FG |
1100 | return rc; |
1101 | } | |
1102 | ||
1103 | /* Check if there is a pending ADD command for this | |
1104 | * MAC/VLAN/VLAN-MAC. Return an error if there is. | |
1105 | */ | |
1106 | if (exeq->get(exeq, elem)) { | |
9f95a23c | 1107 | ECORE_MSG(sc, "There is a pending ADD command already"); |
7c673cae FG |
1108 | return ECORE_EXISTS; |
1109 | } | |
1110 | ||
1111 | /* Consume the credit if not requested not to */ | |
1112 | if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT, | |
1113 | &elem->cmd_data.vlan_mac.vlan_mac_flags) || | |
1114 | o->get_credit(o))) | |
1115 | return ECORE_INVAL; | |
1116 | ||
1117 | return ECORE_SUCCESS; | |
1118 | } | |
1119 | ||
1120 | /** | |
1121 | * ecore_validate_vlan_mac_del - check if the DEL command can be executed | |
1122 | * | |
1123 | * @sc: device handle | |
1124 | * @qo: quable object to check | |
1125 | * @elem: element that needs to be deleted | |
1126 | * | |
1127 | * Checks that the requested configuration can be deleted. If yes and if | |
1128 | * requested, returns a CAM credit. | |
1129 | * | |
1130 | * The 'validate' is run after the 'optimize'. | |
1131 | */ | |
1132 | static int ecore_validate_vlan_mac_del(struct bnx2x_softc *sc, | |
1133 | union ecore_qable_obj *qo, | |
1134 | struct ecore_exeq_elem *elem) | |
1135 | { | |
1136 | struct ecore_vlan_mac_obj *o = &qo->vlan_mac; | |
1137 | struct ecore_vlan_mac_registry_elem *pos; | |
1138 | struct ecore_exe_queue_obj *exeq = &o->exe_queue; | |
1139 | struct ecore_exeq_elem query_elem; | |
1140 | ||
1141 | /* If this classification can not be deleted (doesn't exist) | |
1142 | * - return a ECORE_EXIST. | |
1143 | */ | |
1144 | pos = o->check_del(sc, o, &elem->cmd_data.vlan_mac.u); | |
1145 | if (!pos) { | |
9f95a23c TL |
1146 | ECORE_MSG(sc, |
1147 | "DEL command is not allowed considering current registry state"); | |
7c673cae FG |
1148 | return ECORE_EXISTS; |
1149 | } | |
1150 | ||
1151 | /* Check if there are pending DEL or MOVE commands for this | |
1152 | * MAC/VLAN/VLAN-MAC. Return an error if so. | |
1153 | */ | |
1154 | ECORE_MEMCPY(&query_elem, elem, sizeof(query_elem)); | |
1155 | ||
1156 | /* Check for MOVE commands */ | |
1157 | query_elem.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_MOVE; | |
1158 | if (exeq->get(exeq, &query_elem)) { | |
9f95a23c | 1159 | PMD_DRV_LOG(ERR, sc, "There is a pending MOVE command already"); |
7c673cae FG |
1160 | return ECORE_INVAL; |
1161 | } | |
1162 | ||
1163 | /* Check for DEL commands */ | |
1164 | if (exeq->get(exeq, elem)) { | |
9f95a23c | 1165 | ECORE_MSG(sc, "There is a pending DEL command already"); |
7c673cae FG |
1166 | return ECORE_EXISTS; |
1167 | } | |
1168 | ||
1169 | /* Return the credit to the credit pool if not requested not to */ | |
1170 | if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT, | |
1171 | &elem->cmd_data.vlan_mac.vlan_mac_flags) || | |
1172 | o->put_credit(o))) { | |
9f95a23c | 1173 | PMD_DRV_LOG(ERR, sc, "Failed to return a credit"); |
7c673cae FG |
1174 | return ECORE_INVAL; |
1175 | } | |
1176 | ||
1177 | return ECORE_SUCCESS; | |
1178 | } | |
1179 | ||
1180 | /** | |
1181 | * ecore_validate_vlan_mac_move - check if the MOVE command can be executed | |
1182 | * | |
1183 | * @sc: device handle | |
1184 | * @qo: quable object to check (source) | |
1185 | * @elem: element that needs to be moved | |
1186 | * | |
1187 | * Checks that the requested configuration can be moved. If yes and if | |
1188 | * requested, returns a CAM credit. | |
1189 | * | |
1190 | * The 'validate' is run after the 'optimize'. | |
1191 | */ | |
1192 | static int ecore_validate_vlan_mac_move(struct bnx2x_softc *sc, | |
1193 | union ecore_qable_obj *qo, | |
1194 | struct ecore_exeq_elem *elem) | |
1195 | { | |
1196 | struct ecore_vlan_mac_obj *src_o = &qo->vlan_mac; | |
1197 | struct ecore_vlan_mac_obj *dest_o = elem->cmd_data.vlan_mac.target_obj; | |
1198 | struct ecore_exeq_elem query_elem; | |
1199 | struct ecore_exe_queue_obj *src_exeq = &src_o->exe_queue; | |
1200 | struct ecore_exe_queue_obj *dest_exeq = &dest_o->exe_queue; | |
1201 | ||
1202 | /* Check if we can perform this operation based on the current registry | |
1203 | * state. | |
1204 | */ | |
1205 | if (!src_o->check_move(sc, src_o, dest_o, &elem->cmd_data.vlan_mac.u)) { | |
9f95a23c TL |
1206 | ECORE_MSG(sc, |
1207 | "MOVE command is not allowed considering current registry state"); | |
7c673cae FG |
1208 | return ECORE_INVAL; |
1209 | } | |
1210 | ||
1211 | /* Check if there is an already pending DEL or MOVE command for the | |
1212 | * source object or ADD command for a destination object. Return an | |
1213 | * error if so. | |
1214 | */ | |
1215 | ECORE_MEMCPY(&query_elem, elem, sizeof(query_elem)); | |
1216 | ||
1217 | /* Check DEL on source */ | |
1218 | query_elem.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_DEL; | |
1219 | if (src_exeq->get(src_exeq, &query_elem)) { | |
9f95a23c | 1220 | PMD_DRV_LOG(ERR, sc, |
7c673cae FG |
1221 | "There is a pending DEL command on the source queue already"); |
1222 | return ECORE_INVAL; | |
1223 | } | |
1224 | ||
1225 | /* Check MOVE on source */ | |
1226 | if (src_exeq->get(src_exeq, elem)) { | |
9f95a23c | 1227 | ECORE_MSG(sc, "There is a pending MOVE command already"); |
7c673cae FG |
1228 | return ECORE_EXISTS; |
1229 | } | |
1230 | ||
1231 | /* Check ADD on destination */ | |
1232 | query_elem.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_ADD; | |
1233 | if (dest_exeq->get(dest_exeq, &query_elem)) { | |
9f95a23c | 1234 | PMD_DRV_LOG(ERR, sc, |
7c673cae FG |
1235 | "There is a pending ADD command on the destination queue already"); |
1236 | return ECORE_INVAL; | |
1237 | } | |
1238 | ||
1239 | /* Consume the credit if not requested not to */ | |
1240 | if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT_DEST, | |
1241 | &elem->cmd_data.vlan_mac.vlan_mac_flags) || | |
1242 | dest_o->get_credit(dest_o))) | |
1243 | return ECORE_INVAL; | |
1244 | ||
1245 | if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT, | |
1246 | &elem->cmd_data.vlan_mac.vlan_mac_flags) || | |
1247 | src_o->put_credit(src_o))) { | |
1248 | /* return the credit taken from dest... */ | |
1249 | dest_o->put_credit(dest_o); | |
1250 | return ECORE_INVAL; | |
1251 | } | |
1252 | ||
1253 | return ECORE_SUCCESS; | |
1254 | } | |
1255 | ||
1256 | static int ecore_validate_vlan_mac(struct bnx2x_softc *sc, | |
1257 | union ecore_qable_obj *qo, | |
1258 | struct ecore_exeq_elem *elem) | |
1259 | { | |
1260 | switch (elem->cmd_data.vlan_mac.cmd) { | |
1261 | case ECORE_VLAN_MAC_ADD: | |
1262 | return ecore_validate_vlan_mac_add(sc, qo, elem); | |
1263 | case ECORE_VLAN_MAC_DEL: | |
1264 | return ecore_validate_vlan_mac_del(sc, qo, elem); | |
1265 | case ECORE_VLAN_MAC_MOVE: | |
1266 | return ecore_validate_vlan_mac_move(sc, qo, elem); | |
1267 | default: | |
1268 | return ECORE_INVAL; | |
1269 | } | |
1270 | } | |
1271 | ||
1272 | static int ecore_remove_vlan_mac(__rte_unused struct bnx2x_softc *sc, | |
1273 | union ecore_qable_obj *qo, | |
1274 | struct ecore_exeq_elem *elem) | |
1275 | { | |
1276 | int rc = 0; | |
1277 | ||
1278 | /* If consumption wasn't required, nothing to do */ | |
1279 | if (ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT, | |
1280 | &elem->cmd_data.vlan_mac.vlan_mac_flags)) | |
1281 | return ECORE_SUCCESS; | |
1282 | ||
1283 | switch (elem->cmd_data.vlan_mac.cmd) { | |
1284 | case ECORE_VLAN_MAC_ADD: | |
1285 | case ECORE_VLAN_MAC_MOVE: | |
1286 | rc = qo->vlan_mac.put_credit(&qo->vlan_mac); | |
1287 | break; | |
1288 | case ECORE_VLAN_MAC_DEL: | |
1289 | rc = qo->vlan_mac.get_credit(&qo->vlan_mac); | |
1290 | break; | |
1291 | default: | |
1292 | return ECORE_INVAL; | |
1293 | } | |
1294 | ||
1295 | if (rc != TRUE) | |
1296 | return ECORE_INVAL; | |
1297 | ||
1298 | return ECORE_SUCCESS; | |
1299 | } | |
1300 | ||
1301 | /** | |
1302 | * ecore_wait_vlan_mac - passively wait for 5 seconds until all work completes. | |
1303 | * | |
1304 | * @sc: device handle | |
1305 | * @o: ecore_vlan_mac_obj | |
1306 | * | |
1307 | */ | |
1308 | static int ecore_wait_vlan_mac(struct bnx2x_softc *sc, | |
1309 | struct ecore_vlan_mac_obj *o) | |
1310 | { | |
1311 | int cnt = 5000, rc; | |
1312 | struct ecore_exe_queue_obj *exeq = &o->exe_queue; | |
1313 | struct ecore_raw_obj *raw = &o->raw; | |
1314 | ||
1315 | while (cnt--) { | |
1316 | /* Wait for the current command to complete */ | |
1317 | rc = raw->wait_comp(sc, raw); | |
1318 | if (rc) | |
1319 | return rc; | |
1320 | ||
1321 | /* Wait until there are no pending commands */ | |
1322 | if (!ecore_exe_queue_empty(exeq)) | |
1323 | ECORE_WAIT(sc, 1000); | |
1324 | else | |
1325 | return ECORE_SUCCESS; | |
1326 | } | |
1327 | ||
1328 | return ECORE_TIMEOUT; | |
1329 | } | |
1330 | ||
1331 | static int __ecore_vlan_mac_execute_step(struct bnx2x_softc *sc, | |
1332 | struct ecore_vlan_mac_obj *o, | |
1333 | unsigned long *ramrod_flags) | |
1334 | { | |
1335 | int rc = ECORE_SUCCESS; | |
1336 | ||
1337 | ECORE_SPIN_LOCK_BH(&o->exe_queue.lock); | |
1338 | ||
9f95a23c | 1339 | ECORE_MSG(sc, "vlan_mac_execute_step - trying to take writer lock"); |
7c673cae FG |
1340 | rc = __ecore_vlan_mac_h_write_trylock(sc, o); |
1341 | ||
1342 | if (rc != ECORE_SUCCESS) { | |
1343 | __ecore_vlan_mac_h_pend(sc, o, *ramrod_flags); | |
1344 | ||
1345 | /** Calling function should not diffrentiate between this case | |
1346 | * and the case in which there is already a pending ramrod | |
1347 | */ | |
1348 | rc = ECORE_PENDING; | |
1349 | } else { | |
1350 | rc = ecore_exe_queue_step(sc, &o->exe_queue, ramrod_flags); | |
1351 | } | |
1352 | ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock); | |
1353 | ||
1354 | return rc; | |
1355 | } | |
1356 | ||
1357 | /** | |
1358 | * ecore_complete_vlan_mac - complete one VLAN-MAC ramrod | |
1359 | * | |
1360 | * @sc: device handle | |
1361 | * @o: ecore_vlan_mac_obj | |
1362 | * @cqe: | |
1363 | * @cont: if TRUE schedule next execution chunk | |
1364 | * | |
1365 | */ | |
1366 | static int ecore_complete_vlan_mac(struct bnx2x_softc *sc, | |
1367 | struct ecore_vlan_mac_obj *o, | |
1368 | union event_ring_elem *cqe, | |
1369 | unsigned long *ramrod_flags) | |
1370 | { | |
1371 | struct ecore_raw_obj *r = &o->raw; | |
1372 | int rc; | |
1373 | ||
1374 | /* Reset pending list */ | |
1375 | ecore_exe_queue_reset_pending(sc, &o->exe_queue); | |
1376 | ||
1377 | /* Clear pending */ | |
1378 | r->clear_pending(r); | |
1379 | ||
1380 | /* If ramrod failed this is most likely a SW bug */ | |
1381 | if (cqe->message.error) | |
1382 | return ECORE_INVAL; | |
1383 | ||
1384 | /* Run the next bulk of pending commands if requested */ | |
1385 | if (ECORE_TEST_BIT(RAMROD_CONT, ramrod_flags)) { | |
1386 | rc = __ecore_vlan_mac_execute_step(sc, o, ramrod_flags); | |
1387 | if (rc < 0) | |
1388 | return rc; | |
1389 | } | |
1390 | ||
1391 | /* If there is more work to do return PENDING */ | |
1392 | if (!ecore_exe_queue_empty(&o->exe_queue)) | |
1393 | return ECORE_PENDING; | |
1394 | ||
1395 | return ECORE_SUCCESS; | |
1396 | } | |
1397 | ||
1398 | /** | |
1399 | * ecore_optimize_vlan_mac - optimize ADD and DEL commands. | |
1400 | * | |
1401 | * @sc: device handle | |
1402 | * @o: ecore_qable_obj | |
1403 | * @elem: ecore_exeq_elem | |
1404 | */ | |
1405 | static int ecore_optimize_vlan_mac(struct bnx2x_softc *sc, | |
1406 | union ecore_qable_obj *qo, | |
1407 | struct ecore_exeq_elem *elem) | |
1408 | { | |
1409 | struct ecore_exeq_elem query, *pos; | |
1410 | struct ecore_vlan_mac_obj *o = &qo->vlan_mac; | |
1411 | struct ecore_exe_queue_obj *exeq = &o->exe_queue; | |
1412 | ||
1413 | ECORE_MEMCPY(&query, elem, sizeof(query)); | |
1414 | ||
1415 | switch (elem->cmd_data.vlan_mac.cmd) { | |
1416 | case ECORE_VLAN_MAC_ADD: | |
1417 | query.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_DEL; | |
1418 | break; | |
1419 | case ECORE_VLAN_MAC_DEL: | |
1420 | query.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_ADD; | |
1421 | break; | |
1422 | default: | |
1423 | /* Don't handle anything other than ADD or DEL */ | |
1424 | return 0; | |
1425 | } | |
1426 | ||
1427 | /* If we found the appropriate element - delete it */ | |
1428 | pos = exeq->get(exeq, &query); | |
1429 | if (pos) { | |
1430 | ||
1431 | /* Return the credit of the optimized command */ | |
1432 | if (!ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT, | |
1433 | &pos->cmd_data.vlan_mac.vlan_mac_flags)) { | |
1434 | if ((query.cmd_data.vlan_mac.cmd == | |
1435 | ECORE_VLAN_MAC_ADD) && !o->put_credit(o)) { | |
9f95a23c | 1436 | PMD_DRV_LOG(ERR, sc, |
7c673cae FG |
1437 | "Failed to return the credit for the optimized ADD command"); |
1438 | return ECORE_INVAL; | |
1439 | } else if (!o->get_credit(o)) { /* VLAN_MAC_DEL */ | |
9f95a23c | 1440 | PMD_DRV_LOG(ERR, sc, |
7c673cae FG |
1441 | "Failed to recover the credit from the optimized DEL command"); |
1442 | return ECORE_INVAL; | |
1443 | } | |
1444 | } | |
1445 | ||
9f95a23c | 1446 | ECORE_MSG(sc, "Optimizing %s command", |
7c673cae FG |
1447 | (elem->cmd_data.vlan_mac.cmd == ECORE_VLAN_MAC_ADD) ? |
1448 | "ADD" : "DEL"); | |
1449 | ||
1450 | ECORE_LIST_REMOVE_ENTRY(&pos->link, &exeq->exe_queue); | |
1451 | ecore_exe_queue_free_elem(sc, pos); | |
1452 | return 1; | |
1453 | } | |
1454 | ||
1455 | return 0; | |
1456 | } | |
1457 | ||
1458 | /** | |
1459 | * ecore_vlan_mac_get_registry_elem - prepare a registry element | |
1460 | * | |
1461 | * @sc: device handle | |
1462 | * @o: | |
1463 | * @elem: | |
1464 | * @restore: | |
1465 | * @re: | |
1466 | * | |
1467 | * prepare a registry element according to the current command request. | |
1468 | */ | |
1469 | static int ecore_vlan_mac_get_registry_elem(struct bnx2x_softc *sc, | |
1470 | struct ecore_vlan_mac_obj *o, | |
1471 | struct ecore_exeq_elem *elem, | |
1472 | int restore, struct | |
1473 | ecore_vlan_mac_registry_elem | |
1474 | **re) | |
1475 | { | |
1476 | enum ecore_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd; | |
1477 | struct ecore_vlan_mac_registry_elem *reg_elem; | |
1478 | ||
1479 | /* Allocate a new registry element if needed. */ | |
1480 | if (!restore && | |
1481 | ((cmd == ECORE_VLAN_MAC_ADD) || (cmd == ECORE_VLAN_MAC_MOVE))) { | |
1482 | reg_elem = ECORE_ZALLOC(sizeof(*reg_elem), GFP_ATOMIC, sc); | |
1483 | if (!reg_elem) | |
1484 | return ECORE_NOMEM; | |
1485 | ||
1486 | /* Get a new CAM offset */ | |
1487 | if (!o->get_cam_offset(o, ®_elem->cam_offset)) { | |
1488 | /* This shall never happen, because we have checked the | |
1489 | * CAM availability in the 'validate'. | |
1490 | */ | |
1491 | ECORE_DBG_BREAK_IF(1); | |
1492 | ECORE_FREE(sc, reg_elem, sizeof(*reg_elem)); | |
1493 | return ECORE_INVAL; | |
1494 | } | |
1495 | ||
9f95a23c | 1496 | ECORE_MSG(sc, "Got cam offset %d", reg_elem->cam_offset); |
7c673cae FG |
1497 | |
1498 | /* Set a VLAN-MAC data */ | |
1499 | ECORE_MEMCPY(®_elem->u, &elem->cmd_data.vlan_mac.u, | |
1500 | sizeof(reg_elem->u)); | |
1501 | ||
1502 | /* Copy the flags (needed for DEL and RESTORE flows) */ | |
1503 | reg_elem->vlan_mac_flags = | |
1504 | elem->cmd_data.vlan_mac.vlan_mac_flags; | |
1505 | } else /* DEL, RESTORE */ | |
1506 | reg_elem = o->check_del(sc, o, &elem->cmd_data.vlan_mac.u); | |
1507 | ||
1508 | *re = reg_elem; | |
1509 | return ECORE_SUCCESS; | |
1510 | } | |
1511 | ||
1512 | /** | |
1513 | * ecore_execute_vlan_mac - execute vlan mac command | |
1514 | * | |
1515 | * @sc: device handle | |
1516 | * @qo: | |
1517 | * @exe_chunk: | |
1518 | * @ramrod_flags: | |
1519 | * | |
1520 | * go and send a ramrod! | |
1521 | */ | |
1522 | static int ecore_execute_vlan_mac(struct bnx2x_softc *sc, | |
1523 | union ecore_qable_obj *qo, | |
1524 | ecore_list_t * exe_chunk, | |
1525 | unsigned long *ramrod_flags) | |
1526 | { | |
1527 | struct ecore_exeq_elem *elem; | |
1528 | struct ecore_vlan_mac_obj *o = &qo->vlan_mac, *cam_obj; | |
1529 | struct ecore_raw_obj *r = &o->raw; | |
1530 | int rc, idx = 0; | |
1531 | int restore = ECORE_TEST_BIT(RAMROD_RESTORE, ramrod_flags); | |
1532 | int drv_only = ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ramrod_flags); | |
1533 | struct ecore_vlan_mac_registry_elem *reg_elem; | |
1534 | enum ecore_vlan_mac_cmd cmd; | |
1535 | ||
1536 | /* If DRIVER_ONLY execution is requested, cleanup a registry | |
1537 | * and exit. Otherwise send a ramrod to FW. | |
1538 | */ | |
1539 | if (!drv_only) { | |
1540 | ||
1541 | /* Set pending */ | |
1542 | r->set_pending(r); | |
1543 | ||
1544 | /* Fill the ramrod data */ | |
1545 | ECORE_LIST_FOR_EACH_ENTRY(elem, exe_chunk, link, | |
1546 | struct ecore_exeq_elem) { | |
1547 | cmd = elem->cmd_data.vlan_mac.cmd; | |
1548 | /* We will add to the target object in MOVE command, so | |
1549 | * change the object for a CAM search. | |
1550 | */ | |
1551 | if (cmd == ECORE_VLAN_MAC_MOVE) | |
1552 | cam_obj = elem->cmd_data.vlan_mac.target_obj; | |
1553 | else | |
1554 | cam_obj = o; | |
1555 | ||
1556 | rc = ecore_vlan_mac_get_registry_elem(sc, cam_obj, | |
1557 | elem, restore, | |
1558 | ®_elem); | |
1559 | if (rc) | |
1560 | goto error_exit; | |
1561 | ||
1562 | ECORE_DBG_BREAK_IF(!reg_elem); | |
1563 | ||
1564 | /* Push a new entry into the registry */ | |
1565 | if (!restore && | |
1566 | ((cmd == ECORE_VLAN_MAC_ADD) || | |
1567 | (cmd == ECORE_VLAN_MAC_MOVE))) | |
1568 | ECORE_LIST_PUSH_HEAD(®_elem->link, | |
1569 | &cam_obj->head); | |
1570 | ||
1571 | /* Configure a single command in a ramrod data buffer */ | |
1572 | o->set_one_rule(sc, o, elem, idx, reg_elem->cam_offset); | |
1573 | ||
1574 | /* MOVE command consumes 2 entries in the ramrod data */ | |
1575 | if (cmd == ECORE_VLAN_MAC_MOVE) | |
1576 | idx += 2; | |
1577 | else | |
1578 | idx++; | |
1579 | } | |
1580 | ||
1581 | /* | |
1582 | * No need for an explicit memory barrier here as long we would | |
1583 | * need to ensure the ordering of writing to the SPQ element | |
1584 | * and updating of the SPQ producer which involves a memory | |
1585 | * read and we will have to put a full memory barrier there | |
1586 | * (inside ecore_sp_post()). | |
1587 | */ | |
1588 | ||
1589 | rc = ecore_sp_post(sc, o->ramrod_cmd, r->cid, | |
1590 | r->rdata_mapping, ETH_CONNECTION_TYPE); | |
1591 | if (rc) | |
1592 | goto error_exit; | |
1593 | } | |
1594 | ||
1595 | /* Now, when we are done with the ramrod - clean up the registry */ | |
1596 | ECORE_LIST_FOR_EACH_ENTRY(elem, exe_chunk, link, struct ecore_exeq_elem) { | |
1597 | cmd = elem->cmd_data.vlan_mac.cmd; | |
1598 | if ((cmd == ECORE_VLAN_MAC_DEL) || (cmd == ECORE_VLAN_MAC_MOVE)) { | |
1599 | reg_elem = o->check_del(sc, o, | |
1600 | &elem->cmd_data.vlan_mac.u); | |
1601 | ||
1602 | ECORE_DBG_BREAK_IF(!reg_elem); | |
1603 | ||
1604 | o->put_cam_offset(o, reg_elem->cam_offset); | |
1605 | ECORE_LIST_REMOVE_ENTRY(®_elem->link, &o->head); | |
1606 | ECORE_FREE(sc, reg_elem, sizeof(*reg_elem)); | |
1607 | } | |
1608 | } | |
1609 | ||
1610 | if (!drv_only) | |
1611 | return ECORE_PENDING; | |
1612 | else | |
1613 | return ECORE_SUCCESS; | |
1614 | ||
1615 | error_exit: | |
1616 | r->clear_pending(r); | |
1617 | ||
1618 | /* Cleanup a registry in case of a failure */ | |
1619 | ECORE_LIST_FOR_EACH_ENTRY(elem, exe_chunk, link, struct ecore_exeq_elem) { | |
1620 | cmd = elem->cmd_data.vlan_mac.cmd; | |
1621 | ||
1622 | if (cmd == ECORE_VLAN_MAC_MOVE) | |
1623 | cam_obj = elem->cmd_data.vlan_mac.target_obj; | |
1624 | else | |
1625 | cam_obj = o; | |
1626 | ||
1627 | /* Delete all newly added above entries */ | |
1628 | if (!restore && | |
1629 | ((cmd == ECORE_VLAN_MAC_ADD) || | |
1630 | (cmd == ECORE_VLAN_MAC_MOVE))) { | |
1631 | reg_elem = o->check_del(sc, cam_obj, | |
1632 | &elem->cmd_data.vlan_mac.u); | |
1633 | if (reg_elem) { | |
1634 | ECORE_LIST_REMOVE_ENTRY(®_elem->link, | |
1635 | &cam_obj->head); | |
1636 | ECORE_FREE(sc, reg_elem, sizeof(*reg_elem)); | |
1637 | } | |
1638 | } | |
1639 | } | |
1640 | ||
1641 | return rc; | |
1642 | } | |
1643 | ||
1644 | static int ecore_vlan_mac_push_new_cmd(struct bnx2x_softc *sc, struct | |
1645 | ecore_vlan_mac_ramrod_params *p) | |
1646 | { | |
1647 | struct ecore_exeq_elem *elem; | |
1648 | struct ecore_vlan_mac_obj *o = p->vlan_mac_obj; | |
1649 | int restore = ECORE_TEST_BIT(RAMROD_RESTORE, &p->ramrod_flags); | |
1650 | ||
1651 | /* Allocate the execution queue element */ | |
1652 | elem = ecore_exe_queue_alloc_elem(sc); | |
1653 | if (!elem) | |
1654 | return ECORE_NOMEM; | |
1655 | ||
1656 | /* Set the command 'length' */ | |
1657 | switch (p->user_req.cmd) { | |
1658 | case ECORE_VLAN_MAC_MOVE: | |
1659 | elem->cmd_len = 2; | |
1660 | break; | |
1661 | default: | |
1662 | elem->cmd_len = 1; | |
1663 | } | |
1664 | ||
1665 | /* Fill the object specific info */ | |
1666 | ECORE_MEMCPY(&elem->cmd_data.vlan_mac, &p->user_req, | |
1667 | sizeof(p->user_req)); | |
1668 | ||
1669 | /* Try to add a new command to the pending list */ | |
1670 | return ecore_exe_queue_add(sc, &o->exe_queue, elem, restore); | |
1671 | } | |
1672 | ||
1673 | /** | |
1674 | * ecore_config_vlan_mac - configure VLAN/MAC/VLAN_MAC filtering rules. | |
1675 | * | |
1676 | * @sc: device handle | |
1677 | * @p: | |
1678 | * | |
1679 | */ | |
1680 | int ecore_config_vlan_mac(struct bnx2x_softc *sc, | |
1681 | struct ecore_vlan_mac_ramrod_params *p) | |
1682 | { | |
1683 | int rc = ECORE_SUCCESS; | |
1684 | struct ecore_vlan_mac_obj *o = p->vlan_mac_obj; | |
1685 | unsigned long *ramrod_flags = &p->ramrod_flags; | |
1686 | int cont = ECORE_TEST_BIT(RAMROD_CONT, ramrod_flags); | |
1687 | struct ecore_raw_obj *raw = &o->raw; | |
1688 | ||
1689 | /* | |
1690 | * Add new elements to the execution list for commands that require it. | |
1691 | */ | |
1692 | if (!cont) { | |
1693 | rc = ecore_vlan_mac_push_new_cmd(sc, p); | |
1694 | if (rc) | |
1695 | return rc; | |
1696 | } | |
1697 | ||
1698 | /* If nothing will be executed further in this iteration we want to | |
1699 | * return PENDING if there are pending commands | |
1700 | */ | |
1701 | if (!ecore_exe_queue_empty(&o->exe_queue)) | |
1702 | rc = ECORE_PENDING; | |
1703 | ||
1704 | if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ramrod_flags)) { | |
9f95a23c TL |
1705 | ECORE_MSG(sc, |
1706 | "RAMROD_DRV_CLR_ONLY requested: clearing a pending bit."); | |
7c673cae FG |
1707 | raw->clear_pending(raw); |
1708 | } | |
1709 | ||
1710 | /* Execute commands if required */ | |
1711 | if (cont || ECORE_TEST_BIT(RAMROD_EXEC, ramrod_flags) || | |
1712 | ECORE_TEST_BIT(RAMROD_COMP_WAIT, ramrod_flags)) { | |
1713 | rc = __ecore_vlan_mac_execute_step(sc, p->vlan_mac_obj, | |
1714 | &p->ramrod_flags); | |
1715 | if (rc < 0) | |
1716 | return rc; | |
1717 | } | |
1718 | ||
1719 | /* RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set | |
1720 | * then user want to wait until the last command is done. | |
1721 | */ | |
1722 | if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags)) { | |
1723 | /* Wait maximum for the current exe_queue length iterations plus | |
1724 | * one (for the current pending command). | |
1725 | */ | |
1726 | int max_iterations = ecore_exe_queue_length(&o->exe_queue) + 1; | |
1727 | ||
1728 | while (!ecore_exe_queue_empty(&o->exe_queue) && | |
1729 | max_iterations--) { | |
1730 | ||
1731 | /* Wait for the current command to complete */ | |
1732 | rc = raw->wait_comp(sc, raw); | |
1733 | if (rc) | |
1734 | return rc; | |
1735 | ||
1736 | /* Make a next step */ | |
1737 | rc = __ecore_vlan_mac_execute_step(sc, | |
1738 | p->vlan_mac_obj, | |
1739 | &p->ramrod_flags); | |
1740 | if (rc < 0) | |
1741 | return rc; | |
1742 | } | |
1743 | ||
1744 | return ECORE_SUCCESS; | |
1745 | } | |
1746 | ||
1747 | return rc; | |
1748 | } | |
1749 | ||
1750 | /** | |
1751 | * ecore_vlan_mac_del_all - delete elements with given vlan_mac_flags spec | |
1752 | * | |
1753 | * @sc: device handle | |
1754 | * @o: | |
1755 | * @vlan_mac_flags: | |
1756 | * @ramrod_flags: execution flags to be used for this deletion | |
1757 | * | |
1758 | * if the last operation has completed successfully and there are no | |
1759 | * more elements left, positive value if the last operation has completed | |
1760 | * successfully and there are more previously configured elements, negative | |
1761 | * value is current operation has failed. | |
1762 | */ | |
1763 | static int ecore_vlan_mac_del_all(struct bnx2x_softc *sc, | |
1764 | struct ecore_vlan_mac_obj *o, | |
1765 | unsigned long *vlan_mac_flags, | |
1766 | unsigned long *ramrod_flags) | |
1767 | { | |
1768 | struct ecore_vlan_mac_registry_elem *pos = NULL; | |
1769 | int rc = 0, read_lock; | |
1770 | struct ecore_vlan_mac_ramrod_params p; | |
1771 | struct ecore_exe_queue_obj *exeq = &o->exe_queue; | |
1772 | struct ecore_exeq_elem *exeq_pos, *exeq_pos_n; | |
1773 | ||
1774 | /* Clear pending commands first */ | |
1775 | ||
1776 | ECORE_SPIN_LOCK_BH(&exeq->lock); | |
1777 | ||
1778 | ECORE_LIST_FOR_EACH_ENTRY_SAFE(exeq_pos, exeq_pos_n, | |
1779 | &exeq->exe_queue, link, | |
1780 | struct ecore_exeq_elem) { | |
1781 | if (exeq_pos->cmd_data.vlan_mac.vlan_mac_flags == | |
1782 | *vlan_mac_flags) { | |
1783 | rc = exeq->remove(sc, exeq->owner, exeq_pos); | |
1784 | if (rc) { | |
9f95a23c | 1785 | PMD_DRV_LOG(ERR, sc, "Failed to remove command"); |
7c673cae FG |
1786 | ECORE_SPIN_UNLOCK_BH(&exeq->lock); |
1787 | return rc; | |
1788 | } | |
1789 | ECORE_LIST_REMOVE_ENTRY(&exeq_pos->link, | |
1790 | &exeq->exe_queue); | |
1791 | ecore_exe_queue_free_elem(sc, exeq_pos); | |
1792 | } | |
1793 | } | |
1794 | ||
1795 | ECORE_SPIN_UNLOCK_BH(&exeq->lock); | |
1796 | ||
1797 | /* Prepare a command request */ | |
1798 | ECORE_MEMSET(&p, 0, sizeof(p)); | |
1799 | p.vlan_mac_obj = o; | |
1800 | p.ramrod_flags = *ramrod_flags; | |
1801 | p.user_req.cmd = ECORE_VLAN_MAC_DEL; | |
1802 | ||
1803 | /* Add all but the last VLAN-MAC to the execution queue without actually | |
1804 | * execution anything. | |
1805 | */ | |
1806 | ECORE_CLEAR_BIT_NA(RAMROD_COMP_WAIT, &p.ramrod_flags); | |
1807 | ECORE_CLEAR_BIT_NA(RAMROD_EXEC, &p.ramrod_flags); | |
1808 | ECORE_CLEAR_BIT_NA(RAMROD_CONT, &p.ramrod_flags); | |
1809 | ||
9f95a23c | 1810 | ECORE_MSG(sc, "vlan_mac_del_all -- taking vlan_mac_lock (reader)"); |
7c673cae FG |
1811 | read_lock = ecore_vlan_mac_h_read_lock(sc, o); |
1812 | if (read_lock != ECORE_SUCCESS) | |
1813 | return read_lock; | |
1814 | ||
1815 | ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link, | |
1816 | struct ecore_vlan_mac_registry_elem) { | |
1817 | if (pos->vlan_mac_flags == *vlan_mac_flags) { | |
1818 | p.user_req.vlan_mac_flags = pos->vlan_mac_flags; | |
1819 | ECORE_MEMCPY(&p.user_req.u, &pos->u, sizeof(pos->u)); | |
1820 | rc = ecore_config_vlan_mac(sc, &p); | |
1821 | if (rc < 0) { | |
9f95a23c | 1822 | PMD_DRV_LOG(ERR, sc, |
7c673cae FG |
1823 | "Failed to add a new DEL command"); |
1824 | ecore_vlan_mac_h_read_unlock(sc, o); | |
1825 | return rc; | |
1826 | } | |
1827 | } | |
1828 | } | |
1829 | ||
9f95a23c | 1830 | ECORE_MSG(sc, "vlan_mac_del_all -- releasing vlan_mac_lock (reader)"); |
7c673cae FG |
1831 | ecore_vlan_mac_h_read_unlock(sc, o); |
1832 | ||
1833 | p.ramrod_flags = *ramrod_flags; | |
1834 | ECORE_SET_BIT_NA(RAMROD_CONT, &p.ramrod_flags); | |
1835 | ||
1836 | return ecore_config_vlan_mac(sc, &p); | |
1837 | } | |
1838 | ||
1839 | static void ecore_init_raw_obj(struct ecore_raw_obj *raw, uint8_t cl_id, | |
1840 | uint32_t cid, uint8_t func_id, | |
1841 | void *rdata, | |
1842 | ecore_dma_addr_t rdata_mapping, int state, | |
1843 | unsigned long *pstate, ecore_obj_type type) | |
1844 | { | |
1845 | raw->func_id = func_id; | |
1846 | raw->cid = cid; | |
1847 | raw->cl_id = cl_id; | |
1848 | raw->rdata = rdata; | |
1849 | raw->rdata_mapping = rdata_mapping; | |
1850 | raw->state = state; | |
1851 | raw->pstate = pstate; | |
1852 | raw->obj_type = type; | |
1853 | raw->check_pending = ecore_raw_check_pending; | |
1854 | raw->clear_pending = ecore_raw_clear_pending; | |
1855 | raw->set_pending = ecore_raw_set_pending; | |
1856 | raw->wait_comp = ecore_raw_wait; | |
1857 | } | |
1858 | ||
1859 | static void ecore_init_vlan_mac_common(struct ecore_vlan_mac_obj *o, | |
1860 | uint8_t cl_id, uint32_t cid, | |
1861 | uint8_t func_id, void *rdata, | |
1862 | ecore_dma_addr_t rdata_mapping, | |
1863 | int state, unsigned long *pstate, | |
1864 | ecore_obj_type type, | |
1865 | struct ecore_credit_pool_obj | |
1866 | *macs_pool, struct ecore_credit_pool_obj | |
1867 | *vlans_pool) | |
1868 | { | |
1869 | ECORE_LIST_INIT(&o->head); | |
1870 | o->head_reader = 0; | |
1871 | o->head_exe_request = FALSE; | |
1872 | o->saved_ramrod_flags = 0; | |
1873 | ||
1874 | o->macs_pool = macs_pool; | |
1875 | o->vlans_pool = vlans_pool; | |
1876 | ||
1877 | o->delete_all = ecore_vlan_mac_del_all; | |
1878 | o->restore = ecore_vlan_mac_restore; | |
1879 | o->complete = ecore_complete_vlan_mac; | |
1880 | o->wait = ecore_wait_vlan_mac; | |
1881 | ||
1882 | ecore_init_raw_obj(&o->raw, cl_id, cid, func_id, rdata, rdata_mapping, | |
1883 | state, pstate, type); | |
1884 | } | |
1885 | ||
1886 | void ecore_init_mac_obj(struct bnx2x_softc *sc, | |
1887 | struct ecore_vlan_mac_obj *mac_obj, | |
1888 | uint8_t cl_id, uint32_t cid, uint8_t func_id, | |
1889 | void *rdata, ecore_dma_addr_t rdata_mapping, int state, | |
1890 | unsigned long *pstate, ecore_obj_type type, | |
1891 | struct ecore_credit_pool_obj *macs_pool) | |
1892 | { | |
1893 | union ecore_qable_obj *qable_obj = (union ecore_qable_obj *)mac_obj; | |
1894 | ||
1895 | ecore_init_vlan_mac_common(mac_obj, cl_id, cid, func_id, rdata, | |
1896 | rdata_mapping, state, pstate, type, | |
1897 | macs_pool, NULL); | |
1898 | ||
1899 | /* CAM credit pool handling */ | |
1900 | mac_obj->get_credit = ecore_get_credit_mac; | |
1901 | mac_obj->put_credit = ecore_put_credit_mac; | |
1902 | mac_obj->get_cam_offset = ecore_get_cam_offset_mac; | |
1903 | mac_obj->put_cam_offset = ecore_put_cam_offset_mac; | |
1904 | ||
1905 | if (CHIP_IS_E1x(sc)) { | |
1906 | mac_obj->set_one_rule = ecore_set_one_mac_e1x; | |
1907 | mac_obj->check_del = ecore_check_mac_del; | |
1908 | mac_obj->check_add = ecore_check_mac_add; | |
1909 | mac_obj->check_move = ecore_check_move_always_err; | |
1910 | mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC; | |
1911 | ||
1912 | /* Exe Queue */ | |
1913 | ecore_exe_queue_init(sc, | |
1914 | &mac_obj->exe_queue, 1, qable_obj, | |
1915 | ecore_validate_vlan_mac, | |
1916 | ecore_remove_vlan_mac, | |
1917 | ecore_optimize_vlan_mac, | |
1918 | ecore_execute_vlan_mac, | |
1919 | ecore_exeq_get_mac); | |
1920 | } else { | |
1921 | mac_obj->set_one_rule = ecore_set_one_mac_e2; | |
1922 | mac_obj->check_del = ecore_check_mac_del; | |
1923 | mac_obj->check_add = ecore_check_mac_add; | |
1924 | mac_obj->check_move = ecore_check_move; | |
1925 | mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES; | |
1926 | mac_obj->get_n_elements = ecore_get_n_elements; | |
1927 | ||
1928 | /* Exe Queue */ | |
1929 | ecore_exe_queue_init(sc, | |
1930 | &mac_obj->exe_queue, CLASSIFY_RULES_COUNT, | |
1931 | qable_obj, ecore_validate_vlan_mac, | |
1932 | ecore_remove_vlan_mac, | |
1933 | ecore_optimize_vlan_mac, | |
1934 | ecore_execute_vlan_mac, | |
1935 | ecore_exeq_get_mac); | |
1936 | } | |
1937 | } | |
1938 | ||
1939 | /* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */ | |
1940 | static void __storm_memset_mac_filters(struct bnx2x_softc *sc, struct | |
1941 | tstorm_eth_mac_filter_config | |
1942 | *mac_filters, uint16_t pf_id) | |
1943 | { | |
1944 | size_t size = sizeof(struct tstorm_eth_mac_filter_config); | |
1945 | ||
1946 | uint32_t addr = BAR_TSTRORM_INTMEM + | |
1947 | TSTORM_MAC_FILTER_CONFIG_OFFSET(pf_id); | |
1948 | ||
1949 | ecore_storm_memset_struct(sc, addr, size, (uint32_t *) mac_filters); | |
1950 | } | |
1951 | ||
1952 | static int ecore_set_rx_mode_e1x(struct bnx2x_softc *sc, | |
1953 | struct ecore_rx_mode_ramrod_params *p) | |
1954 | { | |
1955 | /* update the sc MAC filter structure */ | |
1956 | uint32_t mask = (1 << p->cl_id); | |
1957 | ||
1958 | struct tstorm_eth_mac_filter_config *mac_filters = | |
1959 | (struct tstorm_eth_mac_filter_config *)p->rdata; | |
1960 | ||
1961 | /* initial setting is drop-all */ | |
1962 | uint8_t drop_all_ucast = 1, drop_all_mcast = 1; | |
1963 | uint8_t accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0; | |
1964 | uint8_t unmatched_unicast = 0; | |
1965 | ||
1966 | /* In e1x there we only take into account rx accept flag since tx switching | |
1967 | * isn't enabled. */ | |
1968 | if (ECORE_TEST_BIT(ECORE_ACCEPT_UNICAST, &p->rx_accept_flags)) | |
1969 | /* accept matched ucast */ | |
1970 | drop_all_ucast = 0; | |
1971 | ||
1972 | if (ECORE_TEST_BIT(ECORE_ACCEPT_MULTICAST, &p->rx_accept_flags)) | |
1973 | /* accept matched mcast */ | |
1974 | drop_all_mcast = 0; | |
1975 | ||
1976 | if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_UNICAST, &p->rx_accept_flags)) { | |
1977 | /* accept all mcast */ | |
1978 | drop_all_ucast = 0; | |
1979 | accp_all_ucast = 1; | |
1980 | } | |
1981 | if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_MULTICAST, &p->rx_accept_flags)) { | |
1982 | /* accept all mcast */ | |
1983 | drop_all_mcast = 0; | |
1984 | accp_all_mcast = 1; | |
1985 | } | |
1986 | if (ECORE_TEST_BIT(ECORE_ACCEPT_BROADCAST, &p->rx_accept_flags)) | |
1987 | /* accept (all) bcast */ | |
1988 | accp_all_bcast = 1; | |
1989 | if (ECORE_TEST_BIT(ECORE_ACCEPT_UNMATCHED, &p->rx_accept_flags)) | |
1990 | /* accept unmatched unicasts */ | |
1991 | unmatched_unicast = 1; | |
1992 | ||
1993 | mac_filters->ucast_drop_all = drop_all_ucast ? | |
1994 | mac_filters->ucast_drop_all | mask : | |
1995 | mac_filters->ucast_drop_all & ~mask; | |
1996 | ||
1997 | mac_filters->mcast_drop_all = drop_all_mcast ? | |
1998 | mac_filters->mcast_drop_all | mask : | |
1999 | mac_filters->mcast_drop_all & ~mask; | |
2000 | ||
2001 | mac_filters->ucast_accept_all = accp_all_ucast ? | |
2002 | mac_filters->ucast_accept_all | mask : | |
2003 | mac_filters->ucast_accept_all & ~mask; | |
2004 | ||
2005 | mac_filters->mcast_accept_all = accp_all_mcast ? | |
2006 | mac_filters->mcast_accept_all | mask : | |
2007 | mac_filters->mcast_accept_all & ~mask; | |
2008 | ||
2009 | mac_filters->bcast_accept_all = accp_all_bcast ? | |
2010 | mac_filters->bcast_accept_all | mask : | |
2011 | mac_filters->bcast_accept_all & ~mask; | |
2012 | ||
2013 | mac_filters->unmatched_unicast = unmatched_unicast ? | |
2014 | mac_filters->unmatched_unicast | mask : | |
2015 | mac_filters->unmatched_unicast & ~mask; | |
2016 | ||
9f95a23c | 2017 | ECORE_MSG(sc, "drop_ucast 0x%xdrop_mcast 0x%x accp_ucast 0x%x" |
7c673cae FG |
2018 | "accp_mcast 0x%xaccp_bcast 0x%x", |
2019 | mac_filters->ucast_drop_all, mac_filters->mcast_drop_all, | |
2020 | mac_filters->ucast_accept_all, mac_filters->mcast_accept_all, | |
2021 | mac_filters->bcast_accept_all); | |
2022 | ||
2023 | /* write the MAC filter structure */ | |
2024 | __storm_memset_mac_filters(sc, mac_filters, p->func_id); | |
2025 | ||
2026 | /* The operation is completed */ | |
2027 | ECORE_CLEAR_BIT(p->state, p->pstate); | |
2028 | ECORE_SMP_MB_AFTER_CLEAR_BIT(); | |
2029 | ||
2030 | return ECORE_SUCCESS; | |
2031 | } | |
2032 | ||
2033 | /* Setup ramrod data */ | |
2034 | static void ecore_rx_mode_set_rdata_hdr_e2(uint32_t cid, struct eth_classify_header | |
2035 | *hdr, uint8_t rule_cnt) | |
2036 | { | |
2037 | hdr->echo = ECORE_CPU_TO_LE32(cid); | |
2038 | hdr->rule_cnt = rule_cnt; | |
2039 | } | |
2040 | ||
2041 | static void ecore_rx_mode_set_cmd_state_e2(unsigned long *accept_flags, struct eth_filter_rules_cmd | |
2042 | *cmd, int clear_accept_all) | |
2043 | { | |
2044 | uint16_t state; | |
2045 | ||
2046 | /* start with 'drop-all' */ | |
2047 | state = ETH_FILTER_RULES_CMD_UCAST_DROP_ALL | | |
2048 | ETH_FILTER_RULES_CMD_MCAST_DROP_ALL; | |
2049 | ||
2050 | if (ECORE_TEST_BIT(ECORE_ACCEPT_UNICAST, accept_flags)) | |
2051 | state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL; | |
2052 | ||
2053 | if (ECORE_TEST_BIT(ECORE_ACCEPT_MULTICAST, accept_flags)) | |
2054 | state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL; | |
2055 | ||
2056 | if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_UNICAST, accept_flags)) { | |
2057 | state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL; | |
2058 | state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL; | |
2059 | } | |
2060 | ||
2061 | if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_MULTICAST, accept_flags)) { | |
2062 | state |= ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL; | |
2063 | state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL; | |
2064 | } | |
2065 | if (ECORE_TEST_BIT(ECORE_ACCEPT_BROADCAST, accept_flags)) | |
2066 | state |= ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL; | |
2067 | ||
2068 | if (ECORE_TEST_BIT(ECORE_ACCEPT_UNMATCHED, accept_flags)) { | |
2069 | state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL; | |
2070 | state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED; | |
2071 | } | |
2072 | if (ECORE_TEST_BIT(ECORE_ACCEPT_ANY_VLAN, accept_flags)) | |
2073 | state |= ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN; | |
2074 | ||
2075 | /* Clear ACCEPT_ALL_XXX flags for FCoE L2 Queue */ | |
2076 | if (clear_accept_all) { | |
2077 | state &= ~ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL; | |
2078 | state &= ~ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL; | |
2079 | state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL; | |
2080 | state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED; | |
2081 | } | |
2082 | ||
2083 | cmd->state = ECORE_CPU_TO_LE16(state); | |
2084 | } | |
2085 | ||
2086 | static int ecore_set_rx_mode_e2(struct bnx2x_softc *sc, | |
2087 | struct ecore_rx_mode_ramrod_params *p) | |
2088 | { | |
2089 | struct eth_filter_rules_ramrod_data *data = p->rdata; | |
2090 | int rc; | |
2091 | uint8_t rule_idx = 0; | |
2092 | ||
2093 | /* Reset the ramrod data buffer */ | |
2094 | ECORE_MEMSET(data, 0, sizeof(*data)); | |
2095 | ||
2096 | /* Setup ramrod data */ | |
2097 | ||
2098 | /* Tx (internal switching) */ | |
2099 | if (ECORE_TEST_BIT(RAMROD_TX, &p->ramrod_flags)) { | |
2100 | data->rules[rule_idx].client_id = p->cl_id; | |
2101 | data->rules[rule_idx].func_id = p->func_id; | |
2102 | ||
2103 | data->rules[rule_idx].cmd_general_data = | |
2104 | ETH_FILTER_RULES_CMD_TX_CMD; | |
2105 | ||
2106 | ecore_rx_mode_set_cmd_state_e2(&p->tx_accept_flags, | |
2107 | &(data->rules[rule_idx++]), | |
2108 | FALSE); | |
2109 | } | |
2110 | ||
2111 | /* Rx */ | |
2112 | if (ECORE_TEST_BIT(RAMROD_RX, &p->ramrod_flags)) { | |
2113 | data->rules[rule_idx].client_id = p->cl_id; | |
2114 | data->rules[rule_idx].func_id = p->func_id; | |
2115 | ||
2116 | data->rules[rule_idx].cmd_general_data = | |
2117 | ETH_FILTER_RULES_CMD_RX_CMD; | |
2118 | ||
2119 | ecore_rx_mode_set_cmd_state_e2(&p->rx_accept_flags, | |
2120 | &(data->rules[rule_idx++]), | |
2121 | FALSE); | |
2122 | } | |
2123 | ||
2124 | /* If FCoE Queue configuration has been requested configure the Rx and | |
2125 | * internal switching modes for this queue in separate rules. | |
2126 | * | |
2127 | * FCoE queue shell never be set to ACCEPT_ALL packets of any sort: | |
2128 | * MCAST_ALL, UCAST_ALL, BCAST_ALL and UNMATCHED. | |
2129 | */ | |
2130 | if (ECORE_TEST_BIT(ECORE_RX_MODE_FCOE_ETH, &p->rx_mode_flags)) { | |
2131 | /* Tx (internal switching) */ | |
2132 | if (ECORE_TEST_BIT(RAMROD_TX, &p->ramrod_flags)) { | |
2133 | data->rules[rule_idx].client_id = ECORE_FCOE_CID(sc); | |
2134 | data->rules[rule_idx].func_id = p->func_id; | |
2135 | ||
2136 | data->rules[rule_idx].cmd_general_data = | |
2137 | ETH_FILTER_RULES_CMD_TX_CMD; | |
2138 | ||
2139 | ecore_rx_mode_set_cmd_state_e2(&p->tx_accept_flags, | |
2140 | &(data->rules | |
2141 | [rule_idx++]), TRUE); | |
2142 | } | |
2143 | ||
2144 | /* Rx */ | |
2145 | if (ECORE_TEST_BIT(RAMROD_RX, &p->ramrod_flags)) { | |
2146 | data->rules[rule_idx].client_id = ECORE_FCOE_CID(sc); | |
2147 | data->rules[rule_idx].func_id = p->func_id; | |
2148 | ||
2149 | data->rules[rule_idx].cmd_general_data = | |
2150 | ETH_FILTER_RULES_CMD_RX_CMD; | |
2151 | ||
2152 | ecore_rx_mode_set_cmd_state_e2(&p->rx_accept_flags, | |
2153 | &(data->rules | |
2154 | [rule_idx++]), TRUE); | |
2155 | } | |
2156 | } | |
2157 | ||
2158 | /* Set the ramrod header (most importantly - number of rules to | |
2159 | * configure). | |
2160 | */ | |
2161 | ecore_rx_mode_set_rdata_hdr_e2(p->cid, &data->header, rule_idx); | |
2162 | ||
9f95a23c TL |
2163 | ECORE_MSG |
2164 | (sc, "About to configure %d rules, rx_accept_flags 0x%lx, tx_accept_flags 0x%lx", | |
7c673cae FG |
2165 | data->header.rule_cnt, p->rx_accept_flags, p->tx_accept_flags); |
2166 | ||
2167 | /* No need for an explicit memory barrier here as long we would | |
2168 | * need to ensure the ordering of writing to the SPQ element | |
2169 | * and updating of the SPQ producer which involves a memory | |
2170 | * read and we will have to put a full memory barrier there | |
2171 | * (inside ecore_sp_post()). | |
2172 | */ | |
2173 | ||
2174 | /* Send a ramrod */ | |
2175 | rc = ecore_sp_post(sc, | |
2176 | RAMROD_CMD_ID_ETH_FILTER_RULES, | |
2177 | p->cid, p->rdata_mapping, ETH_CONNECTION_TYPE); | |
2178 | if (rc) | |
2179 | return rc; | |
2180 | ||
2181 | /* Ramrod completion is pending */ | |
2182 | return ECORE_PENDING; | |
2183 | } | |
2184 | ||
2185 | static int ecore_wait_rx_mode_comp_e2(struct bnx2x_softc *sc, | |
2186 | struct ecore_rx_mode_ramrod_params *p) | |
2187 | { | |
2188 | return ecore_state_wait(sc, p->state, p->pstate); | |
2189 | } | |
2190 | ||
2191 | static int ecore_empty_rx_mode_wait(__rte_unused struct bnx2x_softc *sc, | |
2192 | __rte_unused struct | |
2193 | ecore_rx_mode_ramrod_params *p) | |
2194 | { | |
2195 | /* Do nothing */ | |
2196 | return ECORE_SUCCESS; | |
2197 | } | |
2198 | ||
2199 | int ecore_config_rx_mode(struct bnx2x_softc *sc, | |
2200 | struct ecore_rx_mode_ramrod_params *p) | |
2201 | { | |
2202 | int rc; | |
2203 | ||
2204 | /* Configure the new classification in the chip */ | |
2205 | if (p->rx_mode_obj->config_rx_mode) { | |
2206 | rc = p->rx_mode_obj->config_rx_mode(sc, p); | |
2207 | if (rc < 0) | |
2208 | return rc; | |
2209 | ||
2210 | /* Wait for a ramrod completion if was requested */ | |
2211 | if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags)) { | |
2212 | rc = p->rx_mode_obj->wait_comp(sc, p); | |
2213 | if (rc) | |
2214 | return rc; | |
2215 | } | |
2216 | } else { | |
9f95a23c | 2217 | ECORE_MSG(sc, "ERROR: config_rx_mode is NULL"); |
7c673cae FG |
2218 | return -1; |
2219 | } | |
2220 | ||
2221 | return rc; | |
2222 | } | |
2223 | ||
2224 | void ecore_init_rx_mode_obj(struct bnx2x_softc *sc, struct ecore_rx_mode_obj *o) | |
2225 | { | |
2226 | if (CHIP_IS_E1x(sc)) { | |
2227 | o->wait_comp = ecore_empty_rx_mode_wait; | |
2228 | o->config_rx_mode = ecore_set_rx_mode_e1x; | |
2229 | } else { | |
2230 | o->wait_comp = ecore_wait_rx_mode_comp_e2; | |
2231 | o->config_rx_mode = ecore_set_rx_mode_e2; | |
2232 | } | |
2233 | } | |
2234 | ||
2235 | /********************* Multicast verbs: SET, CLEAR ****************************/ | |
2236 | static uint8_t ecore_mcast_bin_from_mac(uint8_t * mac) | |
2237 | { | |
2238 | return (ECORE_CRC32_LE(0, mac, ETH_ALEN) >> 24) & 0xff; | |
2239 | } | |
2240 | ||
2241 | struct ecore_mcast_mac_elem { | |
2242 | ecore_list_entry_t link; | |
2243 | uint8_t mac[ETH_ALEN]; | |
2244 | uint8_t pad[2]; /* For a natural alignment of the following buffer */ | |
2245 | }; | |
2246 | ||
2247 | struct ecore_pending_mcast_cmd { | |
2248 | ecore_list_entry_t link; | |
2249 | int type; /* ECORE_MCAST_CMD_X */ | |
2250 | union { | |
2251 | ecore_list_t macs_head; | |
2252 | uint32_t macs_num; /* Needed for DEL command */ | |
2253 | int next_bin; /* Needed for RESTORE flow with aprox match */ | |
2254 | } data; | |
2255 | ||
2256 | int done; /* set to TRUE, when the command has been handled, | |
2257 | * practically used in 57712 handling only, where one pending | |
2258 | * command may be handled in a few operations. As long as for | |
2259 | * other chips every operation handling is completed in a | |
2260 | * single ramrod, there is no need to utilize this field. | |
2261 | */ | |
2262 | }; | |
2263 | ||
2264 | static int ecore_mcast_wait(struct bnx2x_softc *sc, struct ecore_mcast_obj *o) | |
2265 | { | |
2266 | if (ecore_state_wait(sc, o->sched_state, o->raw.pstate) || | |
2267 | o->raw.wait_comp(sc, &o->raw)) | |
2268 | return ECORE_TIMEOUT; | |
2269 | ||
2270 | return ECORE_SUCCESS; | |
2271 | } | |
2272 | ||
2273 | static int ecore_mcast_enqueue_cmd(struct bnx2x_softc *sc __rte_unused, | |
2274 | struct ecore_mcast_obj *o, | |
2275 | struct ecore_mcast_ramrod_params *p, | |
2276 | enum ecore_mcast_cmd cmd) | |
2277 | { | |
2278 | int total_sz; | |
2279 | struct ecore_pending_mcast_cmd *new_cmd; | |
2280 | struct ecore_mcast_mac_elem *cur_mac = NULL; | |
2281 | struct ecore_mcast_list_elem *pos; | |
2282 | int macs_list_len = ((cmd == ECORE_MCAST_CMD_ADD) ? | |
2283 | p->mcast_list_len : 0); | |
2284 | ||
2285 | /* If the command is empty ("handle pending commands only"), break */ | |
2286 | if (!p->mcast_list_len) | |
2287 | return ECORE_SUCCESS; | |
2288 | ||
2289 | total_sz = sizeof(*new_cmd) + | |
2290 | macs_list_len * sizeof(struct ecore_mcast_mac_elem); | |
2291 | ||
2292 | /* Add mcast is called under spin_lock, thus calling with GFP_ATOMIC */ | |
2293 | new_cmd = ECORE_ZALLOC(total_sz, GFP_ATOMIC, sc); | |
2294 | ||
2295 | if (!new_cmd) | |
2296 | return ECORE_NOMEM; | |
2297 | ||
9f95a23c | 2298 | ECORE_MSG(sc, "About to enqueue a new %d command. macs_list_len=%d", |
7c673cae FG |
2299 | cmd, macs_list_len); |
2300 | ||
2301 | ECORE_LIST_INIT(&new_cmd->data.macs_head); | |
2302 | ||
2303 | new_cmd->type = cmd; | |
2304 | new_cmd->done = FALSE; | |
2305 | ||
2306 | switch (cmd) { | |
2307 | case ECORE_MCAST_CMD_ADD: | |
2308 | cur_mac = (struct ecore_mcast_mac_elem *) | |
2309 | ((uint8_t *) new_cmd + sizeof(*new_cmd)); | |
2310 | ||
2311 | /* Push the MACs of the current command into the pending command | |
2312 | * MACs list: FIFO | |
2313 | */ | |
2314 | ECORE_LIST_FOR_EACH_ENTRY(pos, &p->mcast_list, link, | |
2315 | struct ecore_mcast_list_elem) { | |
2316 | ECORE_MEMCPY(cur_mac->mac, pos->mac, ETH_ALEN); | |
2317 | ECORE_LIST_PUSH_TAIL(&cur_mac->link, | |
2318 | &new_cmd->data.macs_head); | |
2319 | cur_mac++; | |
2320 | } | |
2321 | ||
2322 | break; | |
2323 | ||
2324 | case ECORE_MCAST_CMD_DEL: | |
2325 | new_cmd->data.macs_num = p->mcast_list_len; | |
2326 | break; | |
2327 | ||
2328 | case ECORE_MCAST_CMD_RESTORE: | |
2329 | new_cmd->data.next_bin = 0; | |
2330 | break; | |
2331 | ||
2332 | default: | |
2333 | ECORE_FREE(sc, new_cmd, total_sz); | |
9f95a23c | 2334 | PMD_DRV_LOG(ERR, sc, "Unknown command: %d", cmd); |
7c673cae FG |
2335 | return ECORE_INVAL; |
2336 | } | |
2337 | ||
2338 | /* Push the new pending command to the tail of the pending list: FIFO */ | |
2339 | ECORE_LIST_PUSH_TAIL(&new_cmd->link, &o->pending_cmds_head); | |
2340 | ||
2341 | o->set_sched(o); | |
2342 | ||
2343 | return ECORE_PENDING; | |
2344 | } | |
2345 | ||
2346 | /** | |
2347 | * ecore_mcast_get_next_bin - get the next set bin (index) | |
2348 | * | |
2349 | * @o: | |
2350 | * @last: index to start looking from (including) | |
2351 | * | |
2352 | * returns the next found (set) bin or a negative value if none is found. | |
2353 | */ | |
2354 | static int ecore_mcast_get_next_bin(struct ecore_mcast_obj *o, int last) | |
2355 | { | |
2356 | int i, j, inner_start = last % BIT_VEC64_ELEM_SZ; | |
2357 | ||
2358 | for (i = last / BIT_VEC64_ELEM_SZ; i < ECORE_MCAST_VEC_SZ; i++) { | |
2359 | if (o->registry.aprox_match.vec[i]) | |
2360 | for (j = inner_start; j < BIT_VEC64_ELEM_SZ; j++) { | |
2361 | int cur_bit = j + BIT_VEC64_ELEM_SZ * i; | |
2362 | if (BIT_VEC64_TEST_BIT | |
2363 | (o->registry.aprox_match.vec, cur_bit)) { | |
2364 | return cur_bit; | |
2365 | } | |
2366 | } | |
2367 | inner_start = 0; | |
2368 | } | |
2369 | ||
2370 | /* None found */ | |
2371 | return -1; | |
2372 | } | |
2373 | ||
2374 | /** | |
2375 | * ecore_mcast_clear_first_bin - find the first set bin and clear it | |
2376 | * | |
2377 | * @o: | |
2378 | * | |
2379 | * returns the index of the found bin or -1 if none is found | |
2380 | */ | |
2381 | static int ecore_mcast_clear_first_bin(struct ecore_mcast_obj *o) | |
2382 | { | |
2383 | int cur_bit = ecore_mcast_get_next_bin(o, 0); | |
2384 | ||
2385 | if (cur_bit >= 0) | |
2386 | BIT_VEC64_CLEAR_BIT(o->registry.aprox_match.vec, cur_bit); | |
2387 | ||
2388 | return cur_bit; | |
2389 | } | |
2390 | ||
2391 | static uint8_t ecore_mcast_get_rx_tx_flag(struct ecore_mcast_obj *o) | |
2392 | { | |
2393 | struct ecore_raw_obj *raw = &o->raw; | |
2394 | uint8_t rx_tx_flag = 0; | |
2395 | ||
2396 | if ((raw->obj_type == ECORE_OBJ_TYPE_TX) || | |
2397 | (raw->obj_type == ECORE_OBJ_TYPE_RX_TX)) | |
2398 | rx_tx_flag |= ETH_MULTICAST_RULES_CMD_TX_CMD; | |
2399 | ||
2400 | if ((raw->obj_type == ECORE_OBJ_TYPE_RX) || | |
2401 | (raw->obj_type == ECORE_OBJ_TYPE_RX_TX)) | |
2402 | rx_tx_flag |= ETH_MULTICAST_RULES_CMD_RX_CMD; | |
2403 | ||
2404 | return rx_tx_flag; | |
2405 | } | |
2406 | ||
2407 | static void ecore_mcast_set_one_rule_e2(struct bnx2x_softc *sc __rte_unused, | |
2408 | struct ecore_mcast_obj *o, int idx, | |
2409 | union ecore_mcast_config_data *cfg_data, | |
2410 | enum ecore_mcast_cmd cmd) | |
2411 | { | |
2412 | struct ecore_raw_obj *r = &o->raw; | |
2413 | struct eth_multicast_rules_ramrod_data *data = | |
2414 | (struct eth_multicast_rules_ramrod_data *)(r->rdata); | |
2415 | uint8_t func_id = r->func_id; | |
2416 | uint8_t rx_tx_add_flag = ecore_mcast_get_rx_tx_flag(o); | |
2417 | int bin; | |
2418 | ||
2419 | if ((cmd == ECORE_MCAST_CMD_ADD) || (cmd == ECORE_MCAST_CMD_RESTORE)) | |
2420 | rx_tx_add_flag |= ETH_MULTICAST_RULES_CMD_IS_ADD; | |
2421 | ||
2422 | data->rules[idx].cmd_general_data |= rx_tx_add_flag; | |
2423 | ||
2424 | /* Get a bin and update a bins' vector */ | |
2425 | switch (cmd) { | |
2426 | case ECORE_MCAST_CMD_ADD: | |
2427 | bin = ecore_mcast_bin_from_mac(cfg_data->mac); | |
2428 | BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bin); | |
2429 | break; | |
2430 | ||
2431 | case ECORE_MCAST_CMD_DEL: | |
2432 | /* If there were no more bins to clear | |
2433 | * (ecore_mcast_clear_first_bin() returns -1) then we would | |
2434 | * clear any (0xff) bin. | |
2435 | * See ecore_mcast_validate_e2() for explanation when it may | |
2436 | * happen. | |
2437 | */ | |
2438 | bin = ecore_mcast_clear_first_bin(o); | |
2439 | break; | |
2440 | ||
2441 | case ECORE_MCAST_CMD_RESTORE: | |
2442 | bin = cfg_data->bin; | |
2443 | break; | |
2444 | ||
2445 | default: | |
9f95a23c | 2446 | PMD_DRV_LOG(ERR, sc, "Unknown command: %d", cmd); |
7c673cae FG |
2447 | return; |
2448 | } | |
2449 | ||
9f95a23c | 2450 | ECORE_MSG(sc, "%s bin %d", |
7c673cae FG |
2451 | ((rx_tx_add_flag & ETH_MULTICAST_RULES_CMD_IS_ADD) ? |
2452 | "Setting" : "Clearing"), bin); | |
2453 | ||
2454 | data->rules[idx].bin_id = (uint8_t) bin; | |
2455 | data->rules[idx].func_id = func_id; | |
2456 | data->rules[idx].engine_id = o->engine_id; | |
2457 | } | |
2458 | ||
2459 | /** | |
2460 | * ecore_mcast_handle_restore_cmd_e2 - restore configuration from the registry | |
2461 | * | |
2462 | * @sc: device handle | |
2463 | * @o: | |
2464 | * @start_bin: index in the registry to start from (including) | |
2465 | * @rdata_idx: index in the ramrod data to start from | |
2466 | * | |
2467 | * returns last handled bin index or -1 if all bins have been handled | |
2468 | */ | |
2469 | static int ecore_mcast_handle_restore_cmd_e2(struct bnx2x_softc *sc, | |
2470 | struct ecore_mcast_obj *o, | |
2471 | int start_bin, int *rdata_idx) | |
2472 | { | |
2473 | int cur_bin, cnt = *rdata_idx; | |
2474 | union ecore_mcast_config_data cfg_data = { NULL }; | |
2475 | ||
2476 | /* go through the registry and configure the bins from it */ | |
2477 | for (cur_bin = ecore_mcast_get_next_bin(o, start_bin); cur_bin >= 0; | |
2478 | cur_bin = ecore_mcast_get_next_bin(o, cur_bin + 1)) { | |
2479 | ||
2480 | cfg_data.bin = (uint8_t) cur_bin; | |
2481 | o->set_one_rule(sc, o, cnt, &cfg_data, ECORE_MCAST_CMD_RESTORE); | |
2482 | ||
2483 | cnt++; | |
2484 | ||
9f95a23c | 2485 | ECORE_MSG(sc, "About to configure a bin %d", cur_bin); |
7c673cae FG |
2486 | |
2487 | /* Break if we reached the maximum number | |
2488 | * of rules. | |
2489 | */ | |
2490 | if (cnt >= o->max_cmd_len) | |
2491 | break; | |
2492 | } | |
2493 | ||
2494 | *rdata_idx = cnt; | |
2495 | ||
2496 | return cur_bin; | |
2497 | } | |
2498 | ||
2499 | static void ecore_mcast_hdl_pending_add_e2(struct bnx2x_softc *sc, | |
2500 | struct ecore_mcast_obj *o, | |
2501 | struct ecore_pending_mcast_cmd | |
2502 | *cmd_pos, int *line_idx) | |
2503 | { | |
2504 | struct ecore_mcast_mac_elem *pmac_pos, *pmac_pos_n; | |
2505 | int cnt = *line_idx; | |
2506 | union ecore_mcast_config_data cfg_data = { NULL }; | |
2507 | ||
2508 | ECORE_LIST_FOR_EACH_ENTRY_SAFE(pmac_pos, pmac_pos_n, | |
2509 | &cmd_pos->data.macs_head, link, | |
2510 | struct ecore_mcast_mac_elem) { | |
2511 | ||
2512 | cfg_data.mac = &pmac_pos->mac[0]; | |
2513 | o->set_one_rule(sc, o, cnt, &cfg_data, cmd_pos->type); | |
2514 | ||
2515 | cnt++; | |
2516 | ||
9f95a23c TL |
2517 | ECORE_MSG |
2518 | (sc, "About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC", | |
7c673cae FG |
2519 | pmac_pos->mac[0], pmac_pos->mac[1], pmac_pos->mac[2], |
2520 | pmac_pos->mac[3], pmac_pos->mac[4], pmac_pos->mac[5]); | |
2521 | ||
2522 | ECORE_LIST_REMOVE_ENTRY(&pmac_pos->link, | |
2523 | &cmd_pos->data.macs_head); | |
2524 | ||
2525 | /* Break if we reached the maximum number | |
2526 | * of rules. | |
2527 | */ | |
2528 | if (cnt >= o->max_cmd_len) | |
2529 | break; | |
2530 | } | |
2531 | ||
2532 | *line_idx = cnt; | |
2533 | ||
2534 | /* if no more MACs to configure - we are done */ | |
2535 | if (ECORE_LIST_IS_EMPTY(&cmd_pos->data.macs_head)) | |
2536 | cmd_pos->done = TRUE; | |
2537 | } | |
2538 | ||
2539 | static void ecore_mcast_hdl_pending_del_e2(struct bnx2x_softc *sc, | |
2540 | struct ecore_mcast_obj *o, | |
2541 | struct ecore_pending_mcast_cmd | |
2542 | *cmd_pos, int *line_idx) | |
2543 | { | |
2544 | int cnt = *line_idx; | |
2545 | ||
2546 | while (cmd_pos->data.macs_num) { | |
2547 | o->set_one_rule(sc, o, cnt, NULL, cmd_pos->type); | |
2548 | ||
2549 | cnt++; | |
2550 | ||
2551 | cmd_pos->data.macs_num--; | |
2552 | ||
9f95a23c | 2553 | ECORE_MSG(sc, "Deleting MAC. %d left,cnt is %d", |
7c673cae FG |
2554 | cmd_pos->data.macs_num, cnt); |
2555 | ||
2556 | /* Break if we reached the maximum | |
2557 | * number of rules. | |
2558 | */ | |
2559 | if (cnt >= o->max_cmd_len) | |
2560 | break; | |
2561 | } | |
2562 | ||
2563 | *line_idx = cnt; | |
2564 | ||
2565 | /* If we cleared all bins - we are done */ | |
2566 | if (!cmd_pos->data.macs_num) | |
2567 | cmd_pos->done = TRUE; | |
2568 | } | |
2569 | ||
2570 | static void ecore_mcast_hdl_pending_restore_e2(struct bnx2x_softc *sc, | |
2571 | struct ecore_mcast_obj *o, struct | |
2572 | ecore_pending_mcast_cmd | |
2573 | *cmd_pos, int *line_idx) | |
2574 | { | |
2575 | cmd_pos->data.next_bin = o->hdl_restore(sc, o, cmd_pos->data.next_bin, | |
2576 | line_idx); | |
2577 | ||
2578 | if (cmd_pos->data.next_bin < 0) | |
2579 | /* If o->set_restore returned -1 we are done */ | |
2580 | cmd_pos->done = TRUE; | |
2581 | else | |
2582 | /* Start from the next bin next time */ | |
2583 | cmd_pos->data.next_bin++; | |
2584 | } | |
2585 | ||
2586 | static int ecore_mcast_handle_pending_cmds_e2(struct bnx2x_softc *sc, struct | |
2587 | ecore_mcast_ramrod_params | |
2588 | *p) | |
2589 | { | |
2590 | struct ecore_pending_mcast_cmd *cmd_pos, *cmd_pos_n; | |
2591 | int cnt = 0; | |
2592 | struct ecore_mcast_obj *o = p->mcast_obj; | |
2593 | ||
2594 | ECORE_LIST_FOR_EACH_ENTRY_SAFE(cmd_pos, cmd_pos_n, | |
2595 | &o->pending_cmds_head, link, | |
2596 | struct ecore_pending_mcast_cmd) { | |
2597 | switch (cmd_pos->type) { | |
2598 | case ECORE_MCAST_CMD_ADD: | |
2599 | ecore_mcast_hdl_pending_add_e2(sc, o, cmd_pos, &cnt); | |
2600 | break; | |
2601 | ||
2602 | case ECORE_MCAST_CMD_DEL: | |
2603 | ecore_mcast_hdl_pending_del_e2(sc, o, cmd_pos, &cnt); | |
2604 | break; | |
2605 | ||
2606 | case ECORE_MCAST_CMD_RESTORE: | |
2607 | ecore_mcast_hdl_pending_restore_e2(sc, o, cmd_pos, | |
2608 | &cnt); | |
2609 | break; | |
2610 | ||
2611 | default: | |
9f95a23c TL |
2612 | PMD_DRV_LOG(ERR, sc, |
2613 | "Unknown command: %d", cmd_pos->type); | |
7c673cae FG |
2614 | return ECORE_INVAL; |
2615 | } | |
2616 | ||
2617 | /* If the command has been completed - remove it from the list | |
2618 | * and free the memory | |
2619 | */ | |
2620 | if (cmd_pos->done) { | |
2621 | ECORE_LIST_REMOVE_ENTRY(&cmd_pos->link, | |
2622 | &o->pending_cmds_head); | |
2623 | ECORE_FREE(sc, cmd_pos, cmd_pos->alloc_len); | |
2624 | } | |
2625 | ||
2626 | /* Break if we reached the maximum number of rules */ | |
2627 | if (cnt >= o->max_cmd_len) | |
2628 | break; | |
2629 | } | |
2630 | ||
2631 | return cnt; | |
2632 | } | |
2633 | ||
2634 | static void ecore_mcast_hdl_add(struct bnx2x_softc *sc, | |
2635 | struct ecore_mcast_obj *o, | |
2636 | struct ecore_mcast_ramrod_params *p, | |
2637 | int *line_idx) | |
2638 | { | |
2639 | struct ecore_mcast_list_elem *mlist_pos; | |
2640 | union ecore_mcast_config_data cfg_data = { NULL }; | |
2641 | int cnt = *line_idx; | |
2642 | ||
2643 | ECORE_LIST_FOR_EACH_ENTRY(mlist_pos, &p->mcast_list, link, | |
2644 | struct ecore_mcast_list_elem) { | |
2645 | cfg_data.mac = mlist_pos->mac; | |
2646 | o->set_one_rule(sc, o, cnt, &cfg_data, ECORE_MCAST_CMD_ADD); | |
2647 | ||
2648 | cnt++; | |
2649 | ||
9f95a23c TL |
2650 | ECORE_MSG |
2651 | (sc, "About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC", | |
7c673cae FG |
2652 | mlist_pos->mac[0], mlist_pos->mac[1], mlist_pos->mac[2], |
2653 | mlist_pos->mac[3], mlist_pos->mac[4], mlist_pos->mac[5]); | |
2654 | } | |
2655 | ||
2656 | *line_idx = cnt; | |
2657 | } | |
2658 | ||
2659 | static void ecore_mcast_hdl_del(struct bnx2x_softc *sc, | |
2660 | struct ecore_mcast_obj *o, | |
2661 | struct ecore_mcast_ramrod_params *p, | |
2662 | int *line_idx) | |
2663 | { | |
2664 | int cnt = *line_idx, i; | |
2665 | ||
2666 | for (i = 0; i < p->mcast_list_len; i++) { | |
2667 | o->set_one_rule(sc, o, cnt, NULL, ECORE_MCAST_CMD_DEL); | |
2668 | ||
2669 | cnt++; | |
2670 | ||
9f95a23c TL |
2671 | ECORE_MSG(sc, |
2672 | "Deleting MAC. %d left", p->mcast_list_len - i - 1); | |
7c673cae FG |
2673 | } |
2674 | ||
2675 | *line_idx = cnt; | |
2676 | } | |
2677 | ||
2678 | /** | |
2679 | * ecore_mcast_handle_current_cmd - | |
2680 | * | |
2681 | * @sc: device handle | |
2682 | * @p: | |
2683 | * @cmd: | |
2684 | * @start_cnt: first line in the ramrod data that may be used | |
2685 | * | |
11fdf7f2 | 2686 | * This function is called if there is enough place for the current command in |
7c673cae FG |
2687 | * the ramrod data. |
2688 | * Returns number of lines filled in the ramrod data in total. | |
2689 | */ | |
2690 | static int ecore_mcast_handle_current_cmd(struct bnx2x_softc *sc, struct | |
2691 | ecore_mcast_ramrod_params *p, | |
2692 | enum ecore_mcast_cmd cmd, | |
2693 | int start_cnt) | |
2694 | { | |
2695 | struct ecore_mcast_obj *o = p->mcast_obj; | |
2696 | int cnt = start_cnt; | |
2697 | ||
9f95a23c | 2698 | ECORE_MSG(sc, "p->mcast_list_len=%d", p->mcast_list_len); |
7c673cae FG |
2699 | |
2700 | switch (cmd) { | |
2701 | case ECORE_MCAST_CMD_ADD: | |
2702 | ecore_mcast_hdl_add(sc, o, p, &cnt); | |
2703 | break; | |
2704 | ||
2705 | case ECORE_MCAST_CMD_DEL: | |
2706 | ecore_mcast_hdl_del(sc, o, p, &cnt); | |
2707 | break; | |
2708 | ||
2709 | case ECORE_MCAST_CMD_RESTORE: | |
2710 | o->hdl_restore(sc, o, 0, &cnt); | |
2711 | break; | |
2712 | ||
2713 | default: | |
9f95a23c | 2714 | PMD_DRV_LOG(ERR, sc, "Unknown command: %d", cmd); |
7c673cae FG |
2715 | return ECORE_INVAL; |
2716 | } | |
2717 | ||
2718 | /* The current command has been handled */ | |
2719 | p->mcast_list_len = 0; | |
2720 | ||
2721 | return cnt; | |
2722 | } | |
2723 | ||
2724 | static int ecore_mcast_validate_e2(__rte_unused struct bnx2x_softc *sc, | |
2725 | struct ecore_mcast_ramrod_params *p, | |
2726 | enum ecore_mcast_cmd cmd) | |
2727 | { | |
2728 | struct ecore_mcast_obj *o = p->mcast_obj; | |
2729 | int reg_sz = o->get_registry_size(o); | |
2730 | ||
2731 | switch (cmd) { | |
2732 | /* DEL command deletes all currently configured MACs */ | |
2733 | case ECORE_MCAST_CMD_DEL: | |
2734 | o->set_registry_size(o, 0); | |
11fdf7f2 | 2735 | /* fall-through */ |
7c673cae FG |
2736 | |
2737 | /* RESTORE command will restore the entire multicast configuration */ | |
2738 | case ECORE_MCAST_CMD_RESTORE: | |
2739 | /* Here we set the approximate amount of work to do, which in | |
2740 | * fact may be only less as some MACs in postponed ADD | |
2741 | * command(s) scheduled before this command may fall into | |
2742 | * the same bin and the actual number of bins set in the | |
2743 | * registry would be less than we estimated here. See | |
2744 | * ecore_mcast_set_one_rule_e2() for further details. | |
2745 | */ | |
2746 | p->mcast_list_len = reg_sz; | |
2747 | break; | |
2748 | ||
2749 | case ECORE_MCAST_CMD_ADD: | |
2750 | case ECORE_MCAST_CMD_CONT: | |
2751 | /* Here we assume that all new MACs will fall into new bins. | |
2752 | * However we will correct the real registry size after we | |
2753 | * handle all pending commands. | |
2754 | */ | |
2755 | o->set_registry_size(o, reg_sz + p->mcast_list_len); | |
2756 | break; | |
2757 | ||
2758 | default: | |
9f95a23c | 2759 | PMD_DRV_LOG(ERR, sc, "Unknown command: %d", cmd); |
7c673cae FG |
2760 | return ECORE_INVAL; |
2761 | } | |
2762 | ||
2763 | /* Increase the total number of MACs pending to be configured */ | |
2764 | o->total_pending_num += p->mcast_list_len; | |
2765 | ||
2766 | return ECORE_SUCCESS; | |
2767 | } | |
2768 | ||
2769 | static void ecore_mcast_revert_e2(__rte_unused struct bnx2x_softc *sc, | |
2770 | struct ecore_mcast_ramrod_params *p, | |
2771 | int old_num_bins) | |
2772 | { | |
2773 | struct ecore_mcast_obj *o = p->mcast_obj; | |
2774 | ||
2775 | o->set_registry_size(o, old_num_bins); | |
2776 | o->total_pending_num -= p->mcast_list_len; | |
2777 | } | |
2778 | ||
2779 | /** | |
2780 | * ecore_mcast_set_rdata_hdr_e2 - sets a header values | |
2781 | * | |
2782 | * @sc: device handle | |
2783 | * @p: | |
2784 | * @len: number of rules to handle | |
2785 | */ | |
2786 | static void ecore_mcast_set_rdata_hdr_e2(__rte_unused struct bnx2x_softc | |
2787 | *sc, struct ecore_mcast_ramrod_params | |
2788 | *p, uint8_t len) | |
2789 | { | |
2790 | struct ecore_raw_obj *r = &p->mcast_obj->raw; | |
2791 | struct eth_multicast_rules_ramrod_data *data = | |
2792 | (struct eth_multicast_rules_ramrod_data *)(r->rdata); | |
2793 | ||
2794 | data->header.echo = ECORE_CPU_TO_LE32((r->cid & ECORE_SWCID_MASK) | | |
2795 | (ECORE_FILTER_MCAST_PENDING << | |
2796 | ECORE_SWCID_SHIFT)); | |
2797 | data->header.rule_cnt = len; | |
2798 | } | |
2799 | ||
2800 | /** | |
2801 | * ecore_mcast_refresh_registry_e2 - recalculate the actual number of set bins | |
2802 | * | |
2803 | * @sc: device handle | |
2804 | * @o: | |
2805 | * | |
2806 | * Recalculate the actual number of set bins in the registry using Brian | |
2807 | * Kernighan's algorithm: it's execution complexity is as a number of set bins. | |
2808 | */ | |
2809 | static int ecore_mcast_refresh_registry_e2(struct ecore_mcast_obj *o) | |
2810 | { | |
2811 | int i, cnt = 0; | |
2812 | uint64_t elem; | |
2813 | ||
2814 | for (i = 0; i < ECORE_MCAST_VEC_SZ; i++) { | |
2815 | elem = o->registry.aprox_match.vec[i]; | |
2816 | for (; elem; cnt++) | |
2817 | elem &= elem - 1; | |
2818 | } | |
2819 | ||
2820 | o->set_registry_size(o, cnt); | |
2821 | ||
2822 | return ECORE_SUCCESS; | |
2823 | } | |
2824 | ||
2825 | static int ecore_mcast_setup_e2(struct bnx2x_softc *sc, | |
2826 | struct ecore_mcast_ramrod_params *p, | |
2827 | enum ecore_mcast_cmd cmd) | |
2828 | { | |
2829 | struct ecore_raw_obj *raw = &p->mcast_obj->raw; | |
2830 | struct ecore_mcast_obj *o = p->mcast_obj; | |
2831 | struct eth_multicast_rules_ramrod_data *data = | |
2832 | (struct eth_multicast_rules_ramrod_data *)(raw->rdata); | |
2833 | int cnt = 0, rc; | |
2834 | ||
2835 | /* Reset the ramrod data buffer */ | |
2836 | ECORE_MEMSET(data, 0, sizeof(*data)); | |
2837 | ||
2838 | cnt = ecore_mcast_handle_pending_cmds_e2(sc, p); | |
2839 | ||
2840 | /* If there are no more pending commands - clear SCHEDULED state */ | |
2841 | if (ECORE_LIST_IS_EMPTY(&o->pending_cmds_head)) | |
2842 | o->clear_sched(o); | |
2843 | ||
11fdf7f2 | 2844 | /* The below may be TRUE if there was enough room in ramrod |
7c673cae FG |
2845 | * data for all pending commands and for the current |
2846 | * command. Otherwise the current command would have been added | |
2847 | * to the pending commands and p->mcast_list_len would have been | |
2848 | * zeroed. | |
2849 | */ | |
2850 | if (p->mcast_list_len > 0) | |
2851 | cnt = ecore_mcast_handle_current_cmd(sc, p, cmd, cnt); | |
2852 | ||
2853 | /* We've pulled out some MACs - update the total number of | |
2854 | * outstanding. | |
2855 | */ | |
2856 | o->total_pending_num -= cnt; | |
2857 | ||
2858 | /* send a ramrod */ | |
2859 | ECORE_DBG_BREAK_IF(o->total_pending_num < 0); | |
2860 | ECORE_DBG_BREAK_IF(cnt > o->max_cmd_len); | |
2861 | ||
2862 | ecore_mcast_set_rdata_hdr_e2(sc, p, (uint8_t) cnt); | |
2863 | ||
2864 | /* Update a registry size if there are no more pending operations. | |
2865 | * | |
2866 | * We don't want to change the value of the registry size if there are | |
2867 | * pending operations because we want it to always be equal to the | |
2868 | * exact or the approximate number (see ecore_mcast_validate_e2()) of | |
2869 | * set bins after the last requested operation in order to properly | |
2870 | * evaluate the size of the next DEL/RESTORE operation. | |
2871 | * | |
2872 | * Note that we update the registry itself during command(s) handling | |
2873 | * - see ecore_mcast_set_one_rule_e2(). That's because for 57712 we | |
2874 | * aggregate multiple commands (ADD/DEL/RESTORE) into one ramrod but | |
2875 | * with a limited amount of update commands (per MAC/bin) and we don't | |
2876 | * know in this scope what the actual state of bins configuration is | |
2877 | * going to be after this ramrod. | |
2878 | */ | |
2879 | if (!o->total_pending_num) | |
2880 | ecore_mcast_refresh_registry_e2(o); | |
2881 | ||
2882 | /* If CLEAR_ONLY was requested - don't send a ramrod and clear | |
2883 | * RAMROD_PENDING status immediately. | |
2884 | */ | |
2885 | if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) { | |
2886 | raw->clear_pending(raw); | |
2887 | return ECORE_SUCCESS; | |
2888 | } else { | |
2889 | /* No need for an explicit memory barrier here as long we would | |
2890 | * need to ensure the ordering of writing to the SPQ element | |
2891 | * and updating of the SPQ producer which involves a memory | |
2892 | * read and we will have to put a full memory barrier there | |
2893 | * (inside ecore_sp_post()). | |
2894 | */ | |
2895 | ||
2896 | /* Send a ramrod */ | |
2897 | rc = ecore_sp_post(sc, | |
2898 | RAMROD_CMD_ID_ETH_MULTICAST_RULES, | |
2899 | raw->cid, | |
2900 | raw->rdata_mapping, ETH_CONNECTION_TYPE); | |
2901 | if (rc) | |
2902 | return rc; | |
2903 | ||
2904 | /* Ramrod completion is pending */ | |
2905 | return ECORE_PENDING; | |
2906 | } | |
2907 | } | |
2908 | ||
2909 | static int ecore_mcast_validate_e1h(__rte_unused struct bnx2x_softc *sc, | |
2910 | struct ecore_mcast_ramrod_params *p, | |
2911 | enum ecore_mcast_cmd cmd) | |
2912 | { | |
2913 | /* Mark, that there is a work to do */ | |
2914 | if ((cmd == ECORE_MCAST_CMD_DEL) || (cmd == ECORE_MCAST_CMD_RESTORE)) | |
2915 | p->mcast_list_len = 1; | |
2916 | ||
2917 | return ECORE_SUCCESS; | |
2918 | } | |
2919 | ||
2920 | static void ecore_mcast_revert_e1h(__rte_unused struct bnx2x_softc *sc, | |
2921 | __rte_unused struct ecore_mcast_ramrod_params | |
2922 | *p, __rte_unused int old_num_bins) | |
2923 | { | |
2924 | /* Do nothing */ | |
2925 | } | |
2926 | ||
2927 | #define ECORE_57711_SET_MC_FILTER(filter, bit) \ | |
2928 | do { \ | |
2929 | (filter)[(bit) >> 5] |= (1 << ((bit) & 0x1f)); \ | |
2930 | } while (0) | |
2931 | ||
2932 | static void ecore_mcast_hdl_add_e1h(struct bnx2x_softc *sc __rte_unused, | |
2933 | struct ecore_mcast_obj *o, | |
2934 | struct ecore_mcast_ramrod_params *p, | |
2935 | uint32_t * mc_filter) | |
2936 | { | |
2937 | struct ecore_mcast_list_elem *mlist_pos; | |
2938 | int bit; | |
2939 | ||
2940 | ECORE_LIST_FOR_EACH_ENTRY(mlist_pos, &p->mcast_list, link, | |
2941 | struct ecore_mcast_list_elem) { | |
2942 | bit = ecore_mcast_bin_from_mac(mlist_pos->mac); | |
2943 | ECORE_57711_SET_MC_FILTER(mc_filter, bit); | |
2944 | ||
9f95a23c TL |
2945 | ECORE_MSG |
2946 | (sc, "About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC, bin %d", | |
7c673cae FG |
2947 | mlist_pos->mac[0], mlist_pos->mac[1], mlist_pos->mac[2], |
2948 | mlist_pos->mac[3], mlist_pos->mac[4], mlist_pos->mac[5], | |
2949 | bit); | |
2950 | ||
2951 | /* bookkeeping... */ | |
2952 | BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bit); | |
2953 | } | |
2954 | } | |
2955 | ||
2956 | static void ecore_mcast_hdl_restore_e1h(struct bnx2x_softc *sc | |
2957 | __rte_unused, | |
2958 | struct ecore_mcast_obj *o, | |
2959 | uint32_t * mc_filter) | |
2960 | { | |
2961 | int bit; | |
2962 | ||
2963 | for (bit = ecore_mcast_get_next_bin(o, 0); | |
2964 | bit >= 0; bit = ecore_mcast_get_next_bin(o, bit + 1)) { | |
2965 | ECORE_57711_SET_MC_FILTER(mc_filter, bit); | |
9f95a23c | 2966 | ECORE_MSG(sc, "About to set bin %d", bit); |
7c673cae FG |
2967 | } |
2968 | } | |
2969 | ||
2970 | /* On 57711 we write the multicast MACs' approximate match | |
2971 | * table by directly into the TSTORM's internal RAM. So we don't | |
2972 | * really need to handle any tricks to make it work. | |
2973 | */ | |
2974 | static int ecore_mcast_setup_e1h(struct bnx2x_softc *sc, | |
2975 | struct ecore_mcast_ramrod_params *p, | |
2976 | enum ecore_mcast_cmd cmd) | |
2977 | { | |
2978 | int i; | |
2979 | struct ecore_mcast_obj *o = p->mcast_obj; | |
2980 | struct ecore_raw_obj *r = &o->raw; | |
2981 | ||
2982 | /* If CLEAR_ONLY has been requested - clear the registry | |
2983 | * and clear a pending bit. | |
2984 | */ | |
2985 | if (!ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) { | |
2986 | uint32_t mc_filter[ECORE_MC_HASH_SIZE] = { 0 }; | |
2987 | ||
2988 | /* Set the multicast filter bits before writing it into | |
2989 | * the internal memory. | |
2990 | */ | |
2991 | switch (cmd) { | |
2992 | case ECORE_MCAST_CMD_ADD: | |
2993 | ecore_mcast_hdl_add_e1h(sc, o, p, mc_filter); | |
2994 | break; | |
2995 | ||
2996 | case ECORE_MCAST_CMD_DEL: | |
9f95a23c | 2997 | ECORE_MSG(sc, "Invalidating multicast MACs configuration"); |
7c673cae FG |
2998 | |
2999 | /* clear the registry */ | |
3000 | ECORE_MEMSET(o->registry.aprox_match.vec, 0, | |
3001 | sizeof(o->registry.aprox_match.vec)); | |
3002 | break; | |
3003 | ||
3004 | case ECORE_MCAST_CMD_RESTORE: | |
3005 | ecore_mcast_hdl_restore_e1h(sc, o, mc_filter); | |
3006 | break; | |
3007 | ||
3008 | default: | |
9f95a23c | 3009 | PMD_DRV_LOG(ERR, sc, "Unknown command: %d", cmd); |
7c673cae FG |
3010 | return ECORE_INVAL; |
3011 | } | |
3012 | ||
3013 | /* Set the mcast filter in the internal memory */ | |
3014 | for (i = 0; i < ECORE_MC_HASH_SIZE; i++) | |
3015 | REG_WR(sc, ECORE_MC_HASH_OFFSET(sc, i), mc_filter[i]); | |
3016 | } else | |
3017 | /* clear the registry */ | |
3018 | ECORE_MEMSET(o->registry.aprox_match.vec, 0, | |
3019 | sizeof(o->registry.aprox_match.vec)); | |
3020 | ||
3021 | /* We are done */ | |
3022 | r->clear_pending(r); | |
3023 | ||
3024 | return ECORE_SUCCESS; | |
3025 | } | |
3026 | ||
3027 | static int ecore_mcast_get_registry_size_aprox(struct ecore_mcast_obj *o) | |
3028 | { | |
3029 | return o->registry.aprox_match.num_bins_set; | |
3030 | } | |
3031 | ||
3032 | static void ecore_mcast_set_registry_size_aprox(struct ecore_mcast_obj *o, | |
3033 | int n) | |
3034 | { | |
3035 | o->registry.aprox_match.num_bins_set = n; | |
3036 | } | |
3037 | ||
3038 | int ecore_config_mcast(struct bnx2x_softc *sc, | |
3039 | struct ecore_mcast_ramrod_params *p, | |
3040 | enum ecore_mcast_cmd cmd) | |
3041 | { | |
3042 | struct ecore_mcast_obj *o = p->mcast_obj; | |
3043 | struct ecore_raw_obj *r = &o->raw; | |
3044 | int rc = 0, old_reg_size; | |
3045 | ||
3046 | /* This is needed to recover number of currently configured mcast macs | |
3047 | * in case of failure. | |
3048 | */ | |
3049 | old_reg_size = o->get_registry_size(o); | |
3050 | ||
3051 | /* Do some calculations and checks */ | |
3052 | rc = o->validate(sc, p, cmd); | |
3053 | if (rc) | |
3054 | return rc; | |
3055 | ||
3056 | /* Return if there is no work to do */ | |
3057 | if ((!p->mcast_list_len) && (!o->check_sched(o))) | |
3058 | return ECORE_SUCCESS; | |
3059 | ||
9f95a23c TL |
3060 | ECORE_MSG |
3061 | (sc, "o->total_pending_num=%d p->mcast_list_len=%d o->max_cmd_len=%d", | |
7c673cae FG |
3062 | o->total_pending_num, p->mcast_list_len, o->max_cmd_len); |
3063 | ||
3064 | /* Enqueue the current command to the pending list if we can't complete | |
3065 | * it in the current iteration | |
3066 | */ | |
3067 | if (r->check_pending(r) || | |
3068 | ((o->max_cmd_len > 0) && (o->total_pending_num > o->max_cmd_len))) { | |
3069 | rc = o->enqueue_cmd(sc, p->mcast_obj, p, cmd); | |
3070 | if (rc < 0) | |
3071 | goto error_exit1; | |
3072 | ||
3073 | /* As long as the current command is in a command list we | |
3074 | * don't need to handle it separately. | |
3075 | */ | |
3076 | p->mcast_list_len = 0; | |
3077 | } | |
3078 | ||
3079 | if (!r->check_pending(r)) { | |
3080 | ||
3081 | /* Set 'pending' state */ | |
3082 | r->set_pending(r); | |
3083 | ||
3084 | /* Configure the new classification in the chip */ | |
3085 | rc = o->config_mcast(sc, p, cmd); | |
3086 | if (rc < 0) | |
3087 | goto error_exit2; | |
3088 | ||
3089 | /* Wait for a ramrod completion if was requested */ | |
3090 | if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags)) | |
3091 | rc = o->wait_comp(sc, o); | |
3092 | } | |
3093 | ||
3094 | return rc; | |
3095 | ||
3096 | error_exit2: | |
3097 | r->clear_pending(r); | |
3098 | ||
3099 | error_exit1: | |
3100 | o->revert(sc, p, old_reg_size); | |
3101 | ||
3102 | return rc; | |
3103 | } | |
3104 | ||
3105 | static void ecore_mcast_clear_sched(struct ecore_mcast_obj *o) | |
3106 | { | |
3107 | ECORE_SMP_MB_BEFORE_CLEAR_BIT(); | |
3108 | ECORE_CLEAR_BIT(o->sched_state, o->raw.pstate); | |
3109 | ECORE_SMP_MB_AFTER_CLEAR_BIT(); | |
3110 | } | |
3111 | ||
3112 | static void ecore_mcast_set_sched(struct ecore_mcast_obj *o) | |
3113 | { | |
3114 | ECORE_SMP_MB_BEFORE_CLEAR_BIT(); | |
3115 | ECORE_SET_BIT(o->sched_state, o->raw.pstate); | |
3116 | ECORE_SMP_MB_AFTER_CLEAR_BIT(); | |
3117 | } | |
3118 | ||
3119 | static int ecore_mcast_check_sched(struct ecore_mcast_obj *o) | |
3120 | { | |
3121 | return ! !ECORE_TEST_BIT(o->sched_state, o->raw.pstate); | |
3122 | } | |
3123 | ||
3124 | static int ecore_mcast_check_pending(struct ecore_mcast_obj *o) | |
3125 | { | |
3126 | return o->raw.check_pending(&o->raw) || o->check_sched(o); | |
3127 | } | |
3128 | ||
3129 | void ecore_init_mcast_obj(struct bnx2x_softc *sc, | |
3130 | struct ecore_mcast_obj *mcast_obj, | |
3131 | uint8_t mcast_cl_id, uint32_t mcast_cid, | |
3132 | uint8_t func_id, uint8_t engine_id, void *rdata, | |
3133 | ecore_dma_addr_t rdata_mapping, int state, | |
3134 | unsigned long *pstate, ecore_obj_type type) | |
3135 | { | |
3136 | ECORE_MEMSET(mcast_obj, 0, sizeof(*mcast_obj)); | |
3137 | ||
3138 | ecore_init_raw_obj(&mcast_obj->raw, mcast_cl_id, mcast_cid, func_id, | |
3139 | rdata, rdata_mapping, state, pstate, type); | |
3140 | ||
3141 | mcast_obj->engine_id = engine_id; | |
3142 | ||
3143 | ECORE_LIST_INIT(&mcast_obj->pending_cmds_head); | |
3144 | ||
3145 | mcast_obj->sched_state = ECORE_FILTER_MCAST_SCHED; | |
3146 | mcast_obj->check_sched = ecore_mcast_check_sched; | |
3147 | mcast_obj->set_sched = ecore_mcast_set_sched; | |
3148 | mcast_obj->clear_sched = ecore_mcast_clear_sched; | |
3149 | ||
3150 | if (CHIP_IS_E1H(sc)) { | |
3151 | mcast_obj->config_mcast = ecore_mcast_setup_e1h; | |
3152 | mcast_obj->enqueue_cmd = NULL; | |
3153 | mcast_obj->hdl_restore = NULL; | |
3154 | mcast_obj->check_pending = ecore_mcast_check_pending; | |
3155 | ||
3156 | /* 57711 doesn't send a ramrod, so it has unlimited credit | |
3157 | * for one command. | |
3158 | */ | |
3159 | mcast_obj->max_cmd_len = -1; | |
3160 | mcast_obj->wait_comp = ecore_mcast_wait; | |
3161 | mcast_obj->set_one_rule = NULL; | |
3162 | mcast_obj->validate = ecore_mcast_validate_e1h; | |
3163 | mcast_obj->revert = ecore_mcast_revert_e1h; | |
3164 | mcast_obj->get_registry_size = | |
3165 | ecore_mcast_get_registry_size_aprox; | |
3166 | mcast_obj->set_registry_size = | |
3167 | ecore_mcast_set_registry_size_aprox; | |
3168 | } else { | |
3169 | mcast_obj->config_mcast = ecore_mcast_setup_e2; | |
3170 | mcast_obj->enqueue_cmd = ecore_mcast_enqueue_cmd; | |
3171 | mcast_obj->hdl_restore = ecore_mcast_handle_restore_cmd_e2; | |
3172 | mcast_obj->check_pending = ecore_mcast_check_pending; | |
3173 | mcast_obj->max_cmd_len = 16; | |
3174 | mcast_obj->wait_comp = ecore_mcast_wait; | |
3175 | mcast_obj->set_one_rule = ecore_mcast_set_one_rule_e2; | |
3176 | mcast_obj->validate = ecore_mcast_validate_e2; | |
3177 | mcast_obj->revert = ecore_mcast_revert_e2; | |
3178 | mcast_obj->get_registry_size = | |
3179 | ecore_mcast_get_registry_size_aprox; | |
3180 | mcast_obj->set_registry_size = | |
3181 | ecore_mcast_set_registry_size_aprox; | |
3182 | } | |
3183 | } | |
3184 | ||
3185 | /*************************** Credit handling **********************************/ | |
3186 | ||
3187 | /** | |
3188 | * atomic_add_ifless - add if the result is less than a given value. | |
3189 | * | |
3190 | * @v: pointer of type ecore_atomic_t | |
3191 | * @a: the amount to add to v... | |
3192 | * @u: ...if (v + a) is less than u. | |
3193 | * | |
3194 | * returns TRUE if (v + a) was less than u, and FALSE otherwise. | |
3195 | * | |
3196 | */ | |
3197 | static int __atomic_add_ifless(ecore_atomic_t * v, int a, int u) | |
3198 | { | |
3199 | int c, old; | |
3200 | ||
3201 | c = ECORE_ATOMIC_READ(v); | |
3202 | for (;;) { | |
3203 | if (ECORE_UNLIKELY(c + a >= u)) | |
3204 | return FALSE; | |
3205 | ||
3206 | old = ECORE_ATOMIC_CMPXCHG((v), c, c + a); | |
3207 | if (ECORE_LIKELY(old == c)) | |
3208 | break; | |
3209 | c = old; | |
3210 | } | |
3211 | ||
3212 | return TRUE; | |
3213 | } | |
3214 | ||
3215 | /** | |
3216 | * atomic_dec_ifmoe - dec if the result is more or equal than a given value. | |
3217 | * | |
3218 | * @v: pointer of type ecore_atomic_t | |
3219 | * @a: the amount to dec from v... | |
3220 | * @u: ...if (v - a) is more or equal than u. | |
3221 | * | |
3222 | * returns TRUE if (v - a) was more or equal than u, and FALSE | |
3223 | * otherwise. | |
3224 | */ | |
3225 | static int __atomic_dec_ifmoe(ecore_atomic_t * v, int a, int u) | |
3226 | { | |
3227 | int c, old; | |
3228 | ||
3229 | c = ECORE_ATOMIC_READ(v); | |
3230 | for (;;) { | |
3231 | if (ECORE_UNLIKELY(c - a < u)) | |
3232 | return FALSE; | |
3233 | ||
3234 | old = ECORE_ATOMIC_CMPXCHG((v), c, c - a); | |
3235 | if (ECORE_LIKELY(old == c)) | |
3236 | break; | |
3237 | c = old; | |
3238 | } | |
3239 | ||
3240 | return TRUE; | |
3241 | } | |
3242 | ||
3243 | static int ecore_credit_pool_get(struct ecore_credit_pool_obj *o, int cnt) | |
3244 | { | |
3245 | int rc; | |
3246 | ||
3247 | ECORE_SMP_MB(); | |
3248 | rc = __atomic_dec_ifmoe(&o->credit, cnt, 0); | |
3249 | ECORE_SMP_MB(); | |
3250 | ||
3251 | return rc; | |
3252 | } | |
3253 | ||
3254 | static int ecore_credit_pool_put(struct ecore_credit_pool_obj *o, int cnt) | |
3255 | { | |
3256 | int rc; | |
3257 | ||
3258 | ECORE_SMP_MB(); | |
3259 | ||
3260 | /* Don't let to refill if credit + cnt > pool_sz */ | |
3261 | rc = __atomic_add_ifless(&o->credit, cnt, o->pool_sz + 1); | |
3262 | ||
3263 | ECORE_SMP_MB(); | |
3264 | ||
3265 | return rc; | |
3266 | } | |
3267 | ||
3268 | static int ecore_credit_pool_check(struct ecore_credit_pool_obj *o) | |
3269 | { | |
3270 | int cur_credit; | |
3271 | ||
3272 | ECORE_SMP_MB(); | |
3273 | cur_credit = ECORE_ATOMIC_READ(&o->credit); | |
3274 | ||
3275 | return cur_credit; | |
3276 | } | |
3277 | ||
3278 | static int ecore_credit_pool_always_TRUE(__rte_unused struct | |
3279 | ecore_credit_pool_obj *o, | |
3280 | __rte_unused int cnt) | |
3281 | { | |
3282 | return TRUE; | |
3283 | } | |
3284 | ||
3285 | static int ecore_credit_pool_get_entry(struct ecore_credit_pool_obj *o, | |
3286 | int *offset) | |
3287 | { | |
3288 | int idx, vec, i; | |
3289 | ||
3290 | *offset = -1; | |
3291 | ||
3292 | /* Find "internal cam-offset" then add to base for this object... */ | |
3293 | for (vec = 0; vec < ECORE_POOL_VEC_SIZE; vec++) { | |
3294 | ||
3295 | /* Skip the current vector if there are no free entries in it */ | |
3296 | if (!o->pool_mirror[vec]) | |
3297 | continue; | |
3298 | ||
3299 | /* If we've got here we are going to find a free entry */ | |
3300 | for (idx = vec * BIT_VEC64_ELEM_SZ, i = 0; | |
3301 | i < BIT_VEC64_ELEM_SZ; idx++, i++) | |
3302 | ||
3303 | if (BIT_VEC64_TEST_BIT(o->pool_mirror, idx)) { | |
3304 | /* Got one!! */ | |
3305 | BIT_VEC64_CLEAR_BIT(o->pool_mirror, idx); | |
3306 | *offset = o->base_pool_offset + idx; | |
3307 | return TRUE; | |
3308 | } | |
3309 | } | |
3310 | ||
3311 | return FALSE; | |
3312 | } | |
3313 | ||
3314 | static int ecore_credit_pool_put_entry(struct ecore_credit_pool_obj *o, | |
3315 | int offset) | |
3316 | { | |
3317 | if (offset < o->base_pool_offset) | |
3318 | return FALSE; | |
3319 | ||
3320 | offset -= o->base_pool_offset; | |
3321 | ||
3322 | if (offset >= o->pool_sz) | |
3323 | return FALSE; | |
3324 | ||
3325 | /* Return the entry to the pool */ | |
3326 | BIT_VEC64_SET_BIT(o->pool_mirror, offset); | |
3327 | ||
3328 | return TRUE; | |
3329 | } | |
3330 | ||
3331 | static int ecore_credit_pool_put_entry_always_TRUE(__rte_unused struct | |
3332 | ecore_credit_pool_obj *o, | |
3333 | __rte_unused int offset) | |
3334 | { | |
3335 | return TRUE; | |
3336 | } | |
3337 | ||
3338 | static int ecore_credit_pool_get_entry_always_TRUE(__rte_unused struct | |
3339 | ecore_credit_pool_obj *o, | |
3340 | __rte_unused int *offset) | |
3341 | { | |
3342 | *offset = -1; | |
3343 | return TRUE; | |
3344 | } | |
3345 | ||
3346 | /** | |
3347 | * ecore_init_credit_pool - initialize credit pool internals. | |
3348 | * | |
3349 | * @p: | |
3350 | * @base: Base entry in the CAM to use. | |
3351 | * @credit: pool size. | |
3352 | * | |
3353 | * If base is negative no CAM entries handling will be performed. | |
3354 | * If credit is negative pool operations will always succeed (unlimited pool). | |
3355 | * | |
3356 | */ | |
3357 | static void ecore_init_credit_pool(struct ecore_credit_pool_obj *p, | |
3358 | int base, int credit) | |
3359 | { | |
3360 | /* Zero the object first */ | |
3361 | ECORE_MEMSET(p, 0, sizeof(*p)); | |
3362 | ||
3363 | /* Set the table to all 1s */ | |
3364 | ECORE_MEMSET(&p->pool_mirror, 0xff, sizeof(p->pool_mirror)); | |
3365 | ||
3366 | /* Init a pool as full */ | |
3367 | ECORE_ATOMIC_SET(&p->credit, credit); | |
3368 | ||
3369 | /* The total poll size */ | |
3370 | p->pool_sz = credit; | |
3371 | ||
3372 | p->base_pool_offset = base; | |
3373 | ||
3374 | /* Commit the change */ | |
3375 | ECORE_SMP_MB(); | |
3376 | ||
3377 | p->check = ecore_credit_pool_check; | |
3378 | ||
3379 | /* if pool credit is negative - disable the checks */ | |
3380 | if (credit >= 0) { | |
3381 | p->put = ecore_credit_pool_put; | |
3382 | p->get = ecore_credit_pool_get; | |
3383 | p->put_entry = ecore_credit_pool_put_entry; | |
3384 | p->get_entry = ecore_credit_pool_get_entry; | |
3385 | } else { | |
3386 | p->put = ecore_credit_pool_always_TRUE; | |
3387 | p->get = ecore_credit_pool_always_TRUE; | |
3388 | p->put_entry = ecore_credit_pool_put_entry_always_TRUE; | |
3389 | p->get_entry = ecore_credit_pool_get_entry_always_TRUE; | |
3390 | } | |
3391 | ||
3392 | /* If base is negative - disable entries handling */ | |
3393 | if (base < 0) { | |
3394 | p->put_entry = ecore_credit_pool_put_entry_always_TRUE; | |
3395 | p->get_entry = ecore_credit_pool_get_entry_always_TRUE; | |
3396 | } | |
3397 | } | |
3398 | ||
3399 | void ecore_init_mac_credit_pool(struct bnx2x_softc *sc, | |
3400 | struct ecore_credit_pool_obj *p, | |
3401 | uint8_t func_id, uint8_t func_num) | |
3402 | { | |
3403 | ||
3404 | #define ECORE_CAM_SIZE_EMUL 5 | |
3405 | ||
3406 | int cam_sz; | |
3407 | ||
3408 | if (CHIP_IS_E1H(sc)) { | |
3409 | /* CAM credit is equally divided between all active functions | |
3410 | * on the PORT!. | |
3411 | */ | |
11fdf7f2 | 3412 | if (func_num > 0) { |
7c673cae FG |
3413 | if (!CHIP_REV_IS_SLOW(sc)) |
3414 | cam_sz = (MAX_MAC_CREDIT_E1H / (2 * func_num)); | |
3415 | else | |
3416 | cam_sz = ECORE_CAM_SIZE_EMUL; | |
3417 | ecore_init_credit_pool(p, func_id * cam_sz, cam_sz); | |
3418 | } else { | |
3419 | /* this should never happen! Block MAC operations. */ | |
3420 | ecore_init_credit_pool(p, 0, 0); | |
3421 | } | |
3422 | ||
3423 | } else { | |
3424 | ||
3425 | /* | |
3426 | * CAM credit is equaly divided between all active functions | |
3427 | * on the PATH. | |
3428 | */ | |
11fdf7f2 | 3429 | if (func_num > 0) { |
7c673cae FG |
3430 | if (!CHIP_REV_IS_SLOW(sc)) |
3431 | cam_sz = (MAX_MAC_CREDIT_E2 / func_num); | |
3432 | else | |
3433 | cam_sz = ECORE_CAM_SIZE_EMUL; | |
3434 | ||
3435 | /* No need for CAM entries handling for 57712 and | |
3436 | * newer. | |
3437 | */ | |
3438 | ecore_init_credit_pool(p, -1, cam_sz); | |
3439 | } else { | |
3440 | /* this should never happen! Block MAC operations. */ | |
3441 | ecore_init_credit_pool(p, 0, 0); | |
3442 | } | |
3443 | } | |
3444 | } | |
3445 | ||
3446 | void ecore_init_vlan_credit_pool(struct bnx2x_softc *sc, | |
3447 | struct ecore_credit_pool_obj *p, | |
3448 | uint8_t func_id, uint8_t func_num) | |
3449 | { | |
3450 | if (CHIP_IS_E1x(sc)) { | |
3451 | /* There is no VLAN credit in HW on 57711 only | |
3452 | * MAC / MAC-VLAN can be set | |
3453 | */ | |
3454 | ecore_init_credit_pool(p, 0, -1); | |
3455 | } else { | |
3456 | /* CAM credit is equally divided between all active functions | |
3457 | * on the PATH. | |
3458 | */ | |
3459 | if (func_num > 0) { | |
3460 | int credit = MAX_VLAN_CREDIT_E2 / func_num; | |
3461 | ecore_init_credit_pool(p, func_id * credit, credit); | |
3462 | } else | |
3463 | /* this should never happen! Block VLAN operations. */ | |
3464 | ecore_init_credit_pool(p, 0, 0); | |
3465 | } | |
3466 | } | |
3467 | ||
3468 | /****************** RSS Configuration ******************/ | |
3469 | ||
3470 | /** | |
3471 | * ecore_setup_rss - configure RSS | |
3472 | * | |
3473 | * @sc: device handle | |
3474 | * @p: rss configuration | |
3475 | * | |
3476 | * sends on UPDATE ramrod for that matter. | |
3477 | */ | |
3478 | static int ecore_setup_rss(struct bnx2x_softc *sc, | |
3479 | struct ecore_config_rss_params *p) | |
3480 | { | |
3481 | struct ecore_rss_config_obj *o = p->rss_obj; | |
3482 | struct ecore_raw_obj *r = &o->raw; | |
3483 | struct eth_rss_update_ramrod_data *data = | |
3484 | (struct eth_rss_update_ramrod_data *)(r->rdata); | |
3485 | uint8_t rss_mode = 0; | |
3486 | int rc; | |
3487 | ||
3488 | ECORE_MEMSET(data, 0, sizeof(*data)); | |
3489 | ||
9f95a23c | 3490 | ECORE_MSG(sc, "Configuring RSS"); |
7c673cae FG |
3491 | |
3492 | /* Set an echo field */ | |
3493 | data->echo = ECORE_CPU_TO_LE32((r->cid & ECORE_SWCID_MASK) | | |
3494 | (r->state << ECORE_SWCID_SHIFT)); | |
3495 | ||
3496 | /* RSS mode */ | |
3497 | if (ECORE_TEST_BIT(ECORE_RSS_MODE_DISABLED, &p->rss_flags)) | |
3498 | rss_mode = ETH_RSS_MODE_DISABLED; | |
3499 | else if (ECORE_TEST_BIT(ECORE_RSS_MODE_REGULAR, &p->rss_flags)) | |
3500 | rss_mode = ETH_RSS_MODE_REGULAR; | |
3501 | ||
3502 | data->rss_mode = rss_mode; | |
3503 | ||
9f95a23c | 3504 | ECORE_MSG(sc, "rss_mode=%d", rss_mode); |
7c673cae FG |
3505 | |
3506 | /* RSS capabilities */ | |
3507 | if (ECORE_TEST_BIT(ECORE_RSS_IPV4, &p->rss_flags)) | |
3508 | data->capabilities |= | |
3509 | ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY; | |
3510 | ||
3511 | if (ECORE_TEST_BIT(ECORE_RSS_IPV4_TCP, &p->rss_flags)) | |
3512 | data->capabilities |= | |
3513 | ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY; | |
3514 | ||
3515 | if (ECORE_TEST_BIT(ECORE_RSS_IPV4_UDP, &p->rss_flags)) | |
3516 | data->capabilities |= | |
3517 | ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY; | |
3518 | ||
3519 | if (ECORE_TEST_BIT(ECORE_RSS_IPV6, &p->rss_flags)) | |
3520 | data->capabilities |= | |
3521 | ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY; | |
3522 | ||
3523 | if (ECORE_TEST_BIT(ECORE_RSS_IPV6_TCP, &p->rss_flags)) | |
3524 | data->capabilities |= | |
3525 | ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY; | |
3526 | ||
3527 | if (ECORE_TEST_BIT(ECORE_RSS_IPV6_UDP, &p->rss_flags)) | |
3528 | data->capabilities |= | |
3529 | ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY; | |
3530 | ||
3531 | if (ECORE_TEST_BIT(ECORE_RSS_TUNNELING, &p->rss_flags)) { | |
3532 | data->udp_4tuple_dst_port_mask = | |
3533 | ECORE_CPU_TO_LE16(p->tunnel_mask); | |
3534 | data->udp_4tuple_dst_port_value = | |
3535 | ECORE_CPU_TO_LE16(p->tunnel_value); | |
3536 | } | |
3537 | ||
3538 | /* Hashing mask */ | |
3539 | data->rss_result_mask = p->rss_result_mask; | |
3540 | ||
3541 | /* RSS engine ID */ | |
3542 | data->rss_engine_id = o->engine_id; | |
3543 | ||
9f95a23c | 3544 | ECORE_MSG(sc, "rss_engine_id=%d", data->rss_engine_id); |
7c673cae FG |
3545 | |
3546 | /* Indirection table */ | |
3547 | ECORE_MEMCPY(data->indirection_table, p->ind_table, | |
3548 | T_ETH_INDIRECTION_TABLE_SIZE); | |
3549 | ||
3550 | /* Remember the last configuration */ | |
3551 | ECORE_MEMCPY(o->ind_table, p->ind_table, T_ETH_INDIRECTION_TABLE_SIZE); | |
3552 | ||
3553 | /* RSS keys */ | |
3554 | if (ECORE_TEST_BIT(ECORE_RSS_SET_SRCH, &p->rss_flags)) { | |
3555 | ECORE_MEMCPY(&data->rss_key[0], &p->rss_key[0], | |
3556 | sizeof(data->rss_key)); | |
3557 | data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY; | |
3558 | } | |
3559 | ||
3560 | /* No need for an explicit memory barrier here as long we would | |
3561 | * need to ensure the ordering of writing to the SPQ element | |
3562 | * and updating of the SPQ producer which involves a memory | |
3563 | * read and we will have to put a full memory barrier there | |
3564 | * (inside ecore_sp_post()). | |
3565 | */ | |
3566 | ||
3567 | /* Send a ramrod */ | |
3568 | rc = ecore_sp_post(sc, | |
3569 | RAMROD_CMD_ID_ETH_RSS_UPDATE, | |
3570 | r->cid, r->rdata_mapping, ETH_CONNECTION_TYPE); | |
3571 | ||
3572 | if (rc < 0) | |
3573 | return rc; | |
3574 | ||
3575 | return ECORE_PENDING; | |
3576 | } | |
3577 | ||
3578 | int ecore_config_rss(struct bnx2x_softc *sc, struct ecore_config_rss_params *p) | |
3579 | { | |
3580 | int rc; | |
3581 | struct ecore_rss_config_obj *o = p->rss_obj; | |
3582 | struct ecore_raw_obj *r = &o->raw; | |
3583 | ||
3584 | /* Do nothing if only driver cleanup was requested */ | |
3585 | if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) | |
3586 | return ECORE_SUCCESS; | |
3587 | ||
3588 | r->set_pending(r); | |
3589 | ||
3590 | rc = o->config_rss(sc, p); | |
3591 | if (rc < 0) { | |
3592 | r->clear_pending(r); | |
3593 | return rc; | |
3594 | } | |
3595 | ||
3596 | if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags)) | |
3597 | rc = r->wait_comp(sc, r); | |
3598 | ||
3599 | return rc; | |
3600 | } | |
3601 | ||
3602 | void ecore_init_rss_config_obj(struct ecore_rss_config_obj *rss_obj, | |
3603 | uint8_t cl_id, uint32_t cid, uint8_t func_id, | |
3604 | uint8_t engine_id, void *rdata, | |
3605 | ecore_dma_addr_t rdata_mapping, int state, | |
3606 | unsigned long *pstate, ecore_obj_type type) | |
3607 | { | |
3608 | ecore_init_raw_obj(&rss_obj->raw, cl_id, cid, func_id, rdata, | |
3609 | rdata_mapping, state, pstate, type); | |
3610 | ||
3611 | rss_obj->engine_id = engine_id; | |
3612 | rss_obj->config_rss = ecore_setup_rss; | |
3613 | } | |
3614 | ||
3615 | /********************** Queue state object ***********************************/ | |
3616 | ||
3617 | /** | |
3618 | * ecore_queue_state_change - perform Queue state change transition | |
3619 | * | |
3620 | * @sc: device handle | |
3621 | * @params: parameters to perform the transition | |
3622 | * | |
3623 | * returns 0 in case of successfully completed transition, negative error | |
3624 | * code in case of failure, positive (EBUSY) value if there is a completion | |
3625 | * to that is still pending (possible only if RAMROD_COMP_WAIT is | |
3626 | * not set in params->ramrod_flags for asynchronous commands). | |
3627 | * | |
3628 | */ | |
3629 | int ecore_queue_state_change(struct bnx2x_softc *sc, | |
3630 | struct ecore_queue_state_params *params) | |
3631 | { | |
3632 | struct ecore_queue_sp_obj *o = params->q_obj; | |
3633 | int rc, pending_bit; | |
3634 | unsigned long *pending = &o->pending; | |
3635 | ||
3636 | /* Check that the requested transition is legal */ | |
3637 | rc = o->check_transition(sc, o, params); | |
3638 | if (rc) { | |
9f95a23c | 3639 | PMD_DRV_LOG(ERR, sc, "check transition returned an error. rc %d", |
7c673cae FG |
3640 | rc); |
3641 | return ECORE_INVAL; | |
3642 | } | |
3643 | ||
3644 | /* Set "pending" bit */ | |
9f95a23c | 3645 | ECORE_MSG(sc, "pending bit was=%lx", o->pending); |
7c673cae | 3646 | pending_bit = o->set_pending(o, params); |
9f95a23c | 3647 | ECORE_MSG(sc, "pending bit now=%lx", o->pending); |
7c673cae FG |
3648 | |
3649 | /* Don't send a command if only driver cleanup was requested */ | |
3650 | if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) | |
3651 | o->complete_cmd(sc, o, pending_bit); | |
3652 | else { | |
3653 | /* Send a ramrod */ | |
3654 | rc = o->send_cmd(sc, params); | |
3655 | if (rc) { | |
3656 | o->next_state = ECORE_Q_STATE_MAX; | |
3657 | ECORE_CLEAR_BIT(pending_bit, pending); | |
3658 | ECORE_SMP_MB_AFTER_CLEAR_BIT(); | |
3659 | return rc; | |
3660 | } | |
3661 | ||
3662 | if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, ¶ms->ramrod_flags)) { | |
3663 | rc = o->wait_comp(sc, o, pending_bit); | |
3664 | if (rc) | |
3665 | return rc; | |
3666 | ||
3667 | return ECORE_SUCCESS; | |
3668 | } | |
3669 | } | |
3670 | ||
3671 | return ECORE_RET_PENDING(pending_bit, pending); | |
3672 | } | |
3673 | ||
3674 | static int ecore_queue_set_pending(struct ecore_queue_sp_obj *obj, | |
3675 | struct ecore_queue_state_params *params) | |
3676 | { | |
3677 | enum ecore_queue_cmd cmd = params->cmd, bit; | |
3678 | ||
3679 | /* ACTIVATE and DEACTIVATE commands are implemented on top of | |
3680 | * UPDATE command. | |
3681 | */ | |
3682 | if ((cmd == ECORE_Q_CMD_ACTIVATE) || (cmd == ECORE_Q_CMD_DEACTIVATE)) | |
3683 | bit = ECORE_Q_CMD_UPDATE; | |
3684 | else | |
3685 | bit = cmd; | |
3686 | ||
3687 | ECORE_SET_BIT(bit, &obj->pending); | |
3688 | return bit; | |
3689 | } | |
3690 | ||
3691 | static int ecore_queue_wait_comp(struct bnx2x_softc *sc, | |
3692 | struct ecore_queue_sp_obj *o, | |
3693 | enum ecore_queue_cmd cmd) | |
3694 | { | |
3695 | return ecore_state_wait(sc, cmd, &o->pending); | |
3696 | } | |
3697 | ||
3698 | /** | |
3699 | * ecore_queue_comp_cmd - complete the state change command. | |
3700 | * | |
3701 | * @sc: device handle | |
3702 | * @o: | |
3703 | * @cmd: | |
3704 | * | |
3705 | * Checks that the arrived completion is expected. | |
3706 | */ | |
3707 | static int ecore_queue_comp_cmd(struct bnx2x_softc *sc __rte_unused, | |
3708 | struct ecore_queue_sp_obj *o, | |
3709 | enum ecore_queue_cmd cmd) | |
3710 | { | |
3711 | unsigned long cur_pending = o->pending; | |
3712 | ||
3713 | if (!ECORE_TEST_AND_CLEAR_BIT(cmd, &cur_pending)) { | |
9f95a23c | 3714 | PMD_DRV_LOG(ERR, sc, |
7c673cae FG |
3715 | "Bad MC reply %d for queue %d in state %d pending 0x%lx, next_state %d", |
3716 | cmd, o->cids[ECORE_PRIMARY_CID_INDEX], o->state, | |
3717 | cur_pending, o->next_state); | |
3718 | return ECORE_INVAL; | |
3719 | } | |
3720 | ||
3721 | if (o->next_tx_only >= o->max_cos) | |
3722 | /* >= because tx only must always be smaller than cos since the | |
3723 | * primary connection supports COS 0 | |
3724 | */ | |
9f95a23c | 3725 | PMD_DRV_LOG(ERR, sc, |
7c673cae FG |
3726 | "illegal value for next tx_only: %d. max cos was %d", |
3727 | o->next_tx_only, o->max_cos); | |
3728 | ||
9f95a23c | 3729 | ECORE_MSG(sc, "Completing command %d for queue %d, setting state to %d", |
7c673cae FG |
3730 | cmd, o->cids[ECORE_PRIMARY_CID_INDEX], o->next_state); |
3731 | ||
3732 | if (o->next_tx_only) /* print num tx-only if any exist */ | |
9f95a23c | 3733 | ECORE_MSG(sc, "primary cid %d: num tx-only cons %d", |
7c673cae FG |
3734 | o->cids[ECORE_PRIMARY_CID_INDEX], o->next_tx_only); |
3735 | ||
3736 | o->state = o->next_state; | |
3737 | o->num_tx_only = o->next_tx_only; | |
3738 | o->next_state = ECORE_Q_STATE_MAX; | |
3739 | ||
3740 | /* It's important that o->state and o->next_state are | |
3741 | * updated before o->pending. | |
3742 | */ | |
3743 | wmb(); | |
3744 | ||
3745 | ECORE_CLEAR_BIT(cmd, &o->pending); | |
3746 | ECORE_SMP_MB_AFTER_CLEAR_BIT(); | |
3747 | ||
3748 | return ECORE_SUCCESS; | |
3749 | } | |
3750 | ||
3751 | static void ecore_q_fill_setup_data_e2(struct ecore_queue_state_params | |
3752 | *cmd_params, | |
3753 | struct client_init_ramrod_data *data) | |
3754 | { | |
3755 | struct ecore_queue_setup_params *params = &cmd_params->params.setup; | |
3756 | ||
3757 | /* Rx data */ | |
3758 | ||
3759 | /* IPv6 TPA supported for E2 and above only */ | |
3760 | data->rx.tpa_en |= ECORE_TEST_BIT(ECORE_Q_FLG_TPA_IPV6, | |
3761 | ¶ms->flags) * | |
3762 | CLIENT_INIT_RX_DATA_TPA_EN_IPV6; | |
3763 | } | |
3764 | ||
3765 | static void ecore_q_fill_init_general_data(struct bnx2x_softc *sc __rte_unused, | |
3766 | struct ecore_queue_sp_obj *o, | |
3767 | struct ecore_general_setup_params | |
3768 | *params, struct client_init_general_data | |
3769 | *gen_data, unsigned long *flags) | |
3770 | { | |
3771 | gen_data->client_id = o->cl_id; | |
3772 | ||
3773 | if (ECORE_TEST_BIT(ECORE_Q_FLG_STATS, flags)) { | |
3774 | gen_data->statistics_counter_id = params->stat_id; | |
3775 | gen_data->statistics_en_flg = 1; | |
3776 | gen_data->statistics_zero_flg = | |
3777 | ECORE_TEST_BIT(ECORE_Q_FLG_ZERO_STATS, flags); | |
3778 | } else | |
3779 | gen_data->statistics_counter_id = | |
3780 | DISABLE_STATISTIC_COUNTER_ID_VALUE; | |
3781 | ||
3782 | gen_data->is_fcoe_flg = ECORE_TEST_BIT(ECORE_Q_FLG_FCOE, flags); | |
3783 | gen_data->activate_flg = ECORE_TEST_BIT(ECORE_Q_FLG_ACTIVE, flags); | |
3784 | gen_data->sp_client_id = params->spcl_id; | |
3785 | gen_data->mtu = ECORE_CPU_TO_LE16(params->mtu); | |
3786 | gen_data->func_id = o->func_id; | |
3787 | ||
3788 | gen_data->cos = params->cos; | |
3789 | ||
3790 | gen_data->traffic_type = | |
3791 | ECORE_TEST_BIT(ECORE_Q_FLG_FCOE, flags) ? | |
3792 | LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW; | |
3793 | ||
9f95a23c | 3794 | ECORE_MSG(sc, "flags: active %d, cos %d, stats en %d", |
7c673cae FG |
3795 | gen_data->activate_flg, gen_data->cos, |
3796 | gen_data->statistics_en_flg); | |
3797 | } | |
3798 | ||
3799 | static void ecore_q_fill_init_tx_data(struct ecore_txq_setup_params *params, | |
3800 | struct client_init_tx_data *tx_data, | |
3801 | unsigned long *flags) | |
3802 | { | |
3803 | tx_data->enforce_security_flg = | |
3804 | ECORE_TEST_BIT(ECORE_Q_FLG_TX_SEC, flags); | |
3805 | tx_data->default_vlan = ECORE_CPU_TO_LE16(params->default_vlan); | |
3806 | tx_data->default_vlan_flg = ECORE_TEST_BIT(ECORE_Q_FLG_DEF_VLAN, flags); | |
3807 | tx_data->tx_switching_flg = | |
3808 | ECORE_TEST_BIT(ECORE_Q_FLG_TX_SWITCH, flags); | |
3809 | tx_data->anti_spoofing_flg = | |
3810 | ECORE_TEST_BIT(ECORE_Q_FLG_ANTI_SPOOF, flags); | |
3811 | tx_data->force_default_pri_flg = | |
3812 | ECORE_TEST_BIT(ECORE_Q_FLG_FORCE_DEFAULT_PRI, flags); | |
3813 | tx_data->refuse_outband_vlan_flg = | |
3814 | ECORE_TEST_BIT(ECORE_Q_FLG_REFUSE_OUTBAND_VLAN, flags); | |
3815 | tx_data->tunnel_non_lso_pcsum_location = | |
3816 | ECORE_TEST_BIT(ECORE_Q_FLG_PCSUM_ON_PKT, flags) ? CSUM_ON_PKT : | |
3817 | CSUM_ON_BD; | |
3818 | ||
3819 | tx_data->tx_status_block_id = params->fw_sb_id; | |
3820 | tx_data->tx_sb_index_number = params->sb_cq_index; | |
3821 | tx_data->tss_leading_client_id = params->tss_leading_cl_id; | |
3822 | ||
3823 | tx_data->tx_bd_page_base.lo = | |
3824 | ECORE_CPU_TO_LE32(U64_LO(params->dscr_map)); | |
3825 | tx_data->tx_bd_page_base.hi = | |
3826 | ECORE_CPU_TO_LE32(U64_HI(params->dscr_map)); | |
3827 | ||
3828 | /* Don't configure any Tx switching mode during queue SETUP */ | |
3829 | tx_data->state = 0; | |
3830 | } | |
3831 | ||
3832 | static void ecore_q_fill_init_pause_data(struct rxq_pause_params *params, | |
3833 | struct client_init_rx_data *rx_data) | |
3834 | { | |
3835 | /* flow control data */ | |
3836 | rx_data->cqe_pause_thr_low = ECORE_CPU_TO_LE16(params->rcq_th_lo); | |
3837 | rx_data->cqe_pause_thr_high = ECORE_CPU_TO_LE16(params->rcq_th_hi); | |
3838 | rx_data->bd_pause_thr_low = ECORE_CPU_TO_LE16(params->bd_th_lo); | |
3839 | rx_data->bd_pause_thr_high = ECORE_CPU_TO_LE16(params->bd_th_hi); | |
3840 | rx_data->sge_pause_thr_low = ECORE_CPU_TO_LE16(params->sge_th_lo); | |
3841 | rx_data->sge_pause_thr_high = ECORE_CPU_TO_LE16(params->sge_th_hi); | |
3842 | rx_data->rx_cos_mask = ECORE_CPU_TO_LE16(params->pri_map); | |
3843 | } | |
3844 | ||
3845 | static void ecore_q_fill_init_rx_data(struct ecore_rxq_setup_params *params, | |
3846 | struct client_init_rx_data *rx_data, | |
3847 | unsigned long *flags) | |
3848 | { | |
3849 | rx_data->tpa_en = ECORE_TEST_BIT(ECORE_Q_FLG_TPA, flags) * | |
3850 | CLIENT_INIT_RX_DATA_TPA_EN_IPV4; | |
3851 | rx_data->tpa_en |= ECORE_TEST_BIT(ECORE_Q_FLG_TPA_GRO, flags) * | |
3852 | CLIENT_INIT_RX_DATA_TPA_MODE; | |
3853 | rx_data->vmqueue_mode_en_flg = 0; | |
3854 | ||
3855 | rx_data->extra_data_over_sgl_en_flg = | |
3856 | ECORE_TEST_BIT(ECORE_Q_FLG_OOO, flags); | |
3857 | rx_data->cache_line_alignment_log_size = params->cache_line_log; | |
3858 | rx_data->enable_dynamic_hc = ECORE_TEST_BIT(ECORE_Q_FLG_DHC, flags); | |
3859 | rx_data->client_qzone_id = params->cl_qzone_id; | |
3860 | rx_data->max_agg_size = ECORE_CPU_TO_LE16(params->tpa_agg_sz); | |
3861 | ||
3862 | /* Always start in DROP_ALL mode */ | |
3863 | rx_data->state = ECORE_CPU_TO_LE16(CLIENT_INIT_RX_DATA_UCAST_DROP_ALL | | |
3864 | CLIENT_INIT_RX_DATA_MCAST_DROP_ALL); | |
3865 | ||
3866 | /* We don't set drop flags */ | |
3867 | rx_data->drop_ip_cs_err_flg = 0; | |
3868 | rx_data->drop_tcp_cs_err_flg = 0; | |
3869 | rx_data->drop_ttl0_flg = 0; | |
3870 | rx_data->drop_udp_cs_err_flg = 0; | |
3871 | rx_data->inner_vlan_removal_enable_flg = | |
3872 | ECORE_TEST_BIT(ECORE_Q_FLG_VLAN, flags); | |
3873 | rx_data->outer_vlan_removal_enable_flg = | |
3874 | ECORE_TEST_BIT(ECORE_Q_FLG_OV, flags); | |
3875 | rx_data->status_block_id = params->fw_sb_id; | |
3876 | rx_data->rx_sb_index_number = params->sb_cq_index; | |
3877 | rx_data->max_tpa_queues = params->max_tpa_queues; | |
3878 | rx_data->max_bytes_on_bd = ECORE_CPU_TO_LE16(params->buf_sz); | |
3879 | rx_data->bd_page_base.lo = ECORE_CPU_TO_LE32(U64_LO(params->dscr_map)); | |
3880 | rx_data->bd_page_base.hi = ECORE_CPU_TO_LE32(U64_HI(params->dscr_map)); | |
3881 | rx_data->cqe_page_base.lo = ECORE_CPU_TO_LE32(U64_LO(params->rcq_map)); | |
3882 | rx_data->cqe_page_base.hi = ECORE_CPU_TO_LE32(U64_HI(params->rcq_map)); | |
3883 | rx_data->is_leading_rss = ECORE_TEST_BIT(ECORE_Q_FLG_LEADING_RSS, | |
3884 | flags); | |
3885 | ||
3886 | if (ECORE_TEST_BIT(ECORE_Q_FLG_MCAST, flags)) { | |
3887 | rx_data->approx_mcast_engine_id = params->mcast_engine_id; | |
3888 | rx_data->is_approx_mcast = 1; | |
3889 | } | |
3890 | ||
3891 | rx_data->rss_engine_id = params->rss_engine_id; | |
3892 | ||
3893 | /* silent vlan removal */ | |
3894 | rx_data->silent_vlan_removal_flg = | |
3895 | ECORE_TEST_BIT(ECORE_Q_FLG_SILENT_VLAN_REM, flags); | |
3896 | rx_data->silent_vlan_value = | |
3897 | ECORE_CPU_TO_LE16(params->silent_removal_value); | |
3898 | rx_data->silent_vlan_mask = | |
3899 | ECORE_CPU_TO_LE16(params->silent_removal_mask); | |
3900 | } | |
3901 | ||
3902 | /* initialize the general, tx and rx parts of a queue object */ | |
3903 | static void ecore_q_fill_setup_data_cmn(struct bnx2x_softc *sc, struct ecore_queue_state_params | |
3904 | *cmd_params, | |
3905 | struct client_init_ramrod_data *data) | |
3906 | { | |
3907 | ecore_q_fill_init_general_data(sc, cmd_params->q_obj, | |
3908 | &cmd_params->params.setup.gen_params, | |
3909 | &data->general, | |
3910 | &cmd_params->params.setup.flags); | |
3911 | ||
3912 | ecore_q_fill_init_tx_data(&cmd_params->params.setup.txq_params, | |
3913 | &data->tx, &cmd_params->params.setup.flags); | |
3914 | ||
3915 | ecore_q_fill_init_rx_data(&cmd_params->params.setup.rxq_params, | |
3916 | &data->rx, &cmd_params->params.setup.flags); | |
3917 | ||
3918 | ecore_q_fill_init_pause_data(&cmd_params->params.setup.pause_params, | |
3919 | &data->rx); | |
3920 | } | |
3921 | ||
3922 | /* initialize the general and tx parts of a tx-only queue object */ | |
3923 | static void ecore_q_fill_setup_tx_only(struct bnx2x_softc *sc, struct ecore_queue_state_params | |
3924 | *cmd_params, | |
3925 | struct tx_queue_init_ramrod_data *data) | |
3926 | { | |
3927 | ecore_q_fill_init_general_data(sc, cmd_params->q_obj, | |
3928 | &cmd_params->params.tx_only.gen_params, | |
3929 | &data->general, | |
3930 | &cmd_params->params.tx_only.flags); | |
3931 | ||
3932 | ecore_q_fill_init_tx_data(&cmd_params->params.tx_only.txq_params, | |
3933 | &data->tx, &cmd_params->params.tx_only.flags); | |
3934 | ||
9f95a23c | 3935 | ECORE_MSG(sc, "cid %d, tx bd page lo %x hi %x", |
7c673cae FG |
3936 | cmd_params->q_obj->cids[0], |
3937 | data->tx.tx_bd_page_base.lo, data->tx.tx_bd_page_base.hi); | |
3938 | } | |
3939 | ||
3940 | /** | |
3941 | * ecore_q_init - init HW/FW queue | |
3942 | * | |
3943 | * @sc: device handle | |
3944 | * @params: | |
3945 | * | |
3946 | * HW/FW initial Queue configuration: | |
3947 | * - HC: Rx and Tx | |
3948 | * - CDU context validation | |
3949 | * | |
3950 | */ | |
3951 | static int ecore_q_init(struct bnx2x_softc *sc, | |
3952 | struct ecore_queue_state_params *params) | |
3953 | { | |
3954 | struct ecore_queue_sp_obj *o = params->q_obj; | |
3955 | struct ecore_queue_init_params *init = ¶ms->params.init; | |
3956 | uint16_t hc_usec; | |
3957 | uint8_t cos; | |
3958 | ||
3959 | /* Tx HC configuration */ | |
3960 | if (ECORE_TEST_BIT(ECORE_Q_TYPE_HAS_TX, &o->type) && | |
3961 | ECORE_TEST_BIT(ECORE_Q_FLG_HC, &init->tx.flags)) { | |
3962 | hc_usec = init->tx.hc_rate ? 1000000 / init->tx.hc_rate : 0; | |
3963 | ||
3964 | ECORE_UPDATE_COALESCE_SB_INDEX(sc, init->tx.fw_sb_id, | |
3965 | init->tx.sb_cq_index, | |
3966 | !ECORE_TEST_BIT | |
3967 | (ECORE_Q_FLG_HC_EN, | |
3968 | &init->tx.flags), hc_usec); | |
3969 | } | |
3970 | ||
3971 | /* Rx HC configuration */ | |
3972 | if (ECORE_TEST_BIT(ECORE_Q_TYPE_HAS_RX, &o->type) && | |
3973 | ECORE_TEST_BIT(ECORE_Q_FLG_HC, &init->rx.flags)) { | |
3974 | hc_usec = init->rx.hc_rate ? 1000000 / init->rx.hc_rate : 0; | |
3975 | ||
3976 | ECORE_UPDATE_COALESCE_SB_INDEX(sc, init->rx.fw_sb_id, | |
3977 | init->rx.sb_cq_index, | |
3978 | !ECORE_TEST_BIT | |
3979 | (ECORE_Q_FLG_HC_EN, | |
3980 | &init->rx.flags), hc_usec); | |
3981 | } | |
3982 | ||
3983 | /* Set CDU context validation values */ | |
3984 | for (cos = 0; cos < o->max_cos; cos++) { | |
9f95a23c | 3985 | ECORE_MSG(sc, "setting context validation. cid %d, cos %d", |
7c673cae | 3986 | o->cids[cos], cos); |
9f95a23c | 3987 | ECORE_MSG(sc, "context pointer %p", init->cxts[cos]); |
7c673cae FG |
3988 | ECORE_SET_CTX_VALIDATION(sc, init->cxts[cos], o->cids[cos]); |
3989 | } | |
3990 | ||
3991 | /* As no ramrod is sent, complete the command immediately */ | |
3992 | o->complete_cmd(sc, o, ECORE_Q_CMD_INIT); | |
3993 | ||
3994 | ECORE_MMIOWB(); | |
3995 | ECORE_SMP_MB(); | |
3996 | ||
3997 | return ECORE_SUCCESS; | |
3998 | } | |
3999 | ||
4000 | static int ecore_q_send_setup_e1x(struct bnx2x_softc *sc, struct ecore_queue_state_params | |
4001 | *params) | |
4002 | { | |
4003 | struct ecore_queue_sp_obj *o = params->q_obj; | |
4004 | struct client_init_ramrod_data *rdata = | |
4005 | (struct client_init_ramrod_data *)o->rdata; | |
4006 | ecore_dma_addr_t data_mapping = o->rdata_mapping; | |
4007 | int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP; | |
4008 | ||
4009 | /* Clear the ramrod data */ | |
4010 | ECORE_MEMSET(rdata, 0, sizeof(*rdata)); | |
4011 | ||
4012 | /* Fill the ramrod data */ | |
4013 | ecore_q_fill_setup_data_cmn(sc, params, rdata); | |
4014 | ||
4015 | /* No need for an explicit memory barrier here as long we would | |
4016 | * need to ensure the ordering of writing to the SPQ element | |
4017 | * and updating of the SPQ producer which involves a memory | |
4018 | * read and we will have to put a full memory barrier there | |
4019 | * (inside ecore_sp_post()). | |
4020 | */ | |
4021 | ||
4022 | return ecore_sp_post(sc, | |
4023 | ramrod, | |
4024 | o->cids[ECORE_PRIMARY_CID_INDEX], | |
4025 | data_mapping, ETH_CONNECTION_TYPE); | |
4026 | } | |
4027 | ||
4028 | static int ecore_q_send_setup_e2(struct bnx2x_softc *sc, | |
4029 | struct ecore_queue_state_params *params) | |
4030 | { | |
4031 | struct ecore_queue_sp_obj *o = params->q_obj; | |
4032 | struct client_init_ramrod_data *rdata = | |
4033 | (struct client_init_ramrod_data *)o->rdata; | |
4034 | ecore_dma_addr_t data_mapping = o->rdata_mapping; | |
4035 | int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP; | |
4036 | ||
4037 | /* Clear the ramrod data */ | |
4038 | ECORE_MEMSET(rdata, 0, sizeof(*rdata)); | |
4039 | ||
4040 | /* Fill the ramrod data */ | |
4041 | ecore_q_fill_setup_data_cmn(sc, params, rdata); | |
4042 | ecore_q_fill_setup_data_e2(params, rdata); | |
4043 | ||
4044 | /* No need for an explicit memory barrier here as long we would | |
4045 | * need to ensure the ordering of writing to the SPQ element | |
4046 | * and updating of the SPQ producer which involves a memory | |
4047 | * read and we will have to put a full memory barrier there | |
4048 | * (inside ecore_sp_post()). | |
4049 | */ | |
4050 | ||
4051 | return ecore_sp_post(sc, | |
4052 | ramrod, | |
4053 | o->cids[ECORE_PRIMARY_CID_INDEX], | |
4054 | data_mapping, ETH_CONNECTION_TYPE); | |
4055 | } | |
4056 | ||
4057 | static int ecore_q_send_setup_tx_only(struct bnx2x_softc *sc, struct ecore_queue_state_params | |
4058 | *params) | |
4059 | { | |
4060 | struct ecore_queue_sp_obj *o = params->q_obj; | |
4061 | struct tx_queue_init_ramrod_data *rdata = | |
4062 | (struct tx_queue_init_ramrod_data *)o->rdata; | |
4063 | ecore_dma_addr_t data_mapping = o->rdata_mapping; | |
4064 | int ramrod = RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP; | |
4065 | struct ecore_queue_setup_tx_only_params *tx_only_params = | |
4066 | ¶ms->params.tx_only; | |
4067 | uint8_t cid_index = tx_only_params->cid_index; | |
4068 | ||
4069 | if (ECORE_TEST_BIT(ECORE_Q_TYPE_FWD, &o->type)) | |
4070 | ramrod = RAMROD_CMD_ID_ETH_FORWARD_SETUP; | |
9f95a23c | 4071 | ECORE_MSG(sc, "sending forward tx-only ramrod"); |
7c673cae FG |
4072 | |
4073 | if (cid_index >= o->max_cos) { | |
9f95a23c | 4074 | PMD_DRV_LOG(ERR, sc, "queue[%d]: cid_index (%d) is out of range", |
7c673cae FG |
4075 | o->cl_id, cid_index); |
4076 | return ECORE_INVAL; | |
4077 | } | |
4078 | ||
9f95a23c | 4079 | ECORE_MSG(sc, "parameters received: cos: %d sp-id: %d", |
7c673cae FG |
4080 | tx_only_params->gen_params.cos, |
4081 | tx_only_params->gen_params.spcl_id); | |
4082 | ||
4083 | /* Clear the ramrod data */ | |
4084 | ECORE_MEMSET(rdata, 0, sizeof(*rdata)); | |
4085 | ||
4086 | /* Fill the ramrod data */ | |
4087 | ecore_q_fill_setup_tx_only(sc, params, rdata); | |
4088 | ||
9f95a23c TL |
4089 | ECORE_MSG |
4090 | (sc, "sending tx-only ramrod: cid %d, client-id %d, sp-client id %d, cos %d", | |
7c673cae FG |
4091 | o->cids[cid_index], rdata->general.client_id, |
4092 | rdata->general.sp_client_id, rdata->general.cos); | |
4093 | ||
4094 | /* No need for an explicit memory barrier here as long we would | |
4095 | * need to ensure the ordering of writing to the SPQ element | |
4096 | * and updating of the SPQ producer which involves a memory | |
4097 | * read and we will have to put a full memory barrier there | |
4098 | * (inside ecore_sp_post()). | |
4099 | */ | |
4100 | ||
4101 | return ecore_sp_post(sc, ramrod, o->cids[cid_index], | |
4102 | data_mapping, ETH_CONNECTION_TYPE); | |
4103 | } | |
4104 | ||
4105 | static void ecore_q_fill_update_data(struct ecore_queue_sp_obj *obj, | |
4106 | struct ecore_queue_update_params *params, | |
4107 | struct client_update_ramrod_data *data) | |
4108 | { | |
4109 | /* Client ID of the client to update */ | |
4110 | data->client_id = obj->cl_id; | |
4111 | ||
4112 | /* Function ID of the client to update */ | |
4113 | data->func_id = obj->func_id; | |
4114 | ||
4115 | /* Default VLAN value */ | |
4116 | data->default_vlan = ECORE_CPU_TO_LE16(params->def_vlan); | |
4117 | ||
4118 | /* Inner VLAN stripping */ | |
4119 | data->inner_vlan_removal_enable_flg = | |
4120 | ECORE_TEST_BIT(ECORE_Q_UPDATE_IN_VLAN_REM, ¶ms->update_flags); | |
4121 | data->inner_vlan_removal_change_flg = | |
4122 | ECORE_TEST_BIT(ECORE_Q_UPDATE_IN_VLAN_REM_CHNG, | |
4123 | ¶ms->update_flags); | |
4124 | ||
4125 | /* Outer VLAN stripping */ | |
4126 | data->outer_vlan_removal_enable_flg = | |
4127 | ECORE_TEST_BIT(ECORE_Q_UPDATE_OUT_VLAN_REM, ¶ms->update_flags); | |
4128 | data->outer_vlan_removal_change_flg = | |
4129 | ECORE_TEST_BIT(ECORE_Q_UPDATE_OUT_VLAN_REM_CHNG, | |
4130 | ¶ms->update_flags); | |
4131 | ||
4132 | /* Drop packets that have source MAC that doesn't belong to this | |
4133 | * Queue. | |
4134 | */ | |
4135 | data->anti_spoofing_enable_flg = | |
4136 | ECORE_TEST_BIT(ECORE_Q_UPDATE_ANTI_SPOOF, ¶ms->update_flags); | |
4137 | data->anti_spoofing_change_flg = | |
4138 | ECORE_TEST_BIT(ECORE_Q_UPDATE_ANTI_SPOOF_CHNG, | |
4139 | ¶ms->update_flags); | |
4140 | ||
4141 | /* Activate/Deactivate */ | |
4142 | data->activate_flg = | |
4143 | ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE, ¶ms->update_flags); | |
4144 | data->activate_change_flg = | |
4145 | ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG, ¶ms->update_flags); | |
4146 | ||
4147 | /* Enable default VLAN */ | |
4148 | data->default_vlan_enable_flg = | |
4149 | ECORE_TEST_BIT(ECORE_Q_UPDATE_DEF_VLAN_EN, ¶ms->update_flags); | |
4150 | data->default_vlan_change_flg = | |
4151 | ECORE_TEST_BIT(ECORE_Q_UPDATE_DEF_VLAN_EN_CHNG, | |
4152 | ¶ms->update_flags); | |
4153 | ||
4154 | /* silent vlan removal */ | |
4155 | data->silent_vlan_change_flg = | |
4156 | ECORE_TEST_BIT(ECORE_Q_UPDATE_SILENT_VLAN_REM_CHNG, | |
4157 | ¶ms->update_flags); | |
4158 | data->silent_vlan_removal_flg = | |
4159 | ECORE_TEST_BIT(ECORE_Q_UPDATE_SILENT_VLAN_REM, | |
4160 | ¶ms->update_flags); | |
4161 | data->silent_vlan_value = | |
4162 | ECORE_CPU_TO_LE16(params->silent_removal_value); | |
4163 | data->silent_vlan_mask = ECORE_CPU_TO_LE16(params->silent_removal_mask); | |
4164 | ||
4165 | /* tx switching */ | |
4166 | data->tx_switching_flg = | |
4167 | ECORE_TEST_BIT(ECORE_Q_UPDATE_TX_SWITCHING, ¶ms->update_flags); | |
4168 | data->tx_switching_change_flg = | |
4169 | ECORE_TEST_BIT(ECORE_Q_UPDATE_TX_SWITCHING_CHNG, | |
4170 | ¶ms->update_flags); | |
4171 | } | |
4172 | ||
4173 | static int ecore_q_send_update(struct bnx2x_softc *sc, | |
4174 | struct ecore_queue_state_params *params) | |
4175 | { | |
4176 | struct ecore_queue_sp_obj *o = params->q_obj; | |
4177 | struct client_update_ramrod_data *rdata = | |
4178 | (struct client_update_ramrod_data *)o->rdata; | |
4179 | ecore_dma_addr_t data_mapping = o->rdata_mapping; | |
4180 | struct ecore_queue_update_params *update_params = | |
4181 | ¶ms->params.update; | |
4182 | uint8_t cid_index = update_params->cid_index; | |
4183 | ||
4184 | if (cid_index >= o->max_cos) { | |
9f95a23c | 4185 | PMD_DRV_LOG(ERR, sc, "queue[%d]: cid_index (%d) is out of range", |
7c673cae FG |
4186 | o->cl_id, cid_index); |
4187 | return ECORE_INVAL; | |
4188 | } | |
4189 | ||
4190 | /* Clear the ramrod data */ | |
4191 | ECORE_MEMSET(rdata, 0, sizeof(*rdata)); | |
4192 | ||
4193 | /* Fill the ramrod data */ | |
4194 | ecore_q_fill_update_data(o, update_params, rdata); | |
4195 | ||
4196 | /* No need for an explicit memory barrier here as long we would | |
4197 | * need to ensure the ordering of writing to the SPQ element | |
4198 | * and updating of the SPQ producer which involves a memory | |
4199 | * read and we will have to put a full memory barrier there | |
4200 | * (inside ecore_sp_post()). | |
4201 | */ | |
4202 | ||
4203 | return ecore_sp_post(sc, RAMROD_CMD_ID_ETH_CLIENT_UPDATE, | |
4204 | o->cids[cid_index], data_mapping, | |
4205 | ETH_CONNECTION_TYPE); | |
4206 | } | |
4207 | ||
4208 | /** | |
4209 | * ecore_q_send_deactivate - send DEACTIVATE command | |
4210 | * | |
4211 | * @sc: device handle | |
4212 | * @params: | |
4213 | * | |
4214 | * implemented using the UPDATE command. | |
4215 | */ | |
4216 | static int ecore_q_send_deactivate(struct bnx2x_softc *sc, struct ecore_queue_state_params | |
4217 | *params) | |
4218 | { | |
4219 | struct ecore_queue_update_params *update = ¶ms->params.update; | |
4220 | ||
4221 | ECORE_MEMSET(update, 0, sizeof(*update)); | |
4222 | ||
4223 | ECORE_SET_BIT_NA(ECORE_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags); | |
4224 | ||
4225 | return ecore_q_send_update(sc, params); | |
4226 | } | |
4227 | ||
4228 | /** | |
4229 | * ecore_q_send_activate - send ACTIVATE command | |
4230 | * | |
4231 | * @sc: device handle | |
4232 | * @params: | |
4233 | * | |
4234 | * implemented using the UPDATE command. | |
4235 | */ | |
4236 | static int ecore_q_send_activate(struct bnx2x_softc *sc, | |
4237 | struct ecore_queue_state_params *params) | |
4238 | { | |
4239 | struct ecore_queue_update_params *update = ¶ms->params.update; | |
4240 | ||
4241 | ECORE_MEMSET(update, 0, sizeof(*update)); | |
4242 | ||
4243 | ECORE_SET_BIT_NA(ECORE_Q_UPDATE_ACTIVATE, &update->update_flags); | |
4244 | ECORE_SET_BIT_NA(ECORE_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags); | |
4245 | ||
4246 | return ecore_q_send_update(sc, params); | |
4247 | } | |
4248 | ||
4249 | static int ecore_q_send_update_tpa(__rte_unused struct bnx2x_softc *sc, | |
4250 | __rte_unused struct | |
4251 | ecore_queue_state_params *params) | |
4252 | { | |
4253 | /* Not implemented yet. */ | |
4254 | return -1; | |
4255 | } | |
4256 | ||
4257 | static int ecore_q_send_halt(struct bnx2x_softc *sc, | |
4258 | struct ecore_queue_state_params *params) | |
4259 | { | |
4260 | struct ecore_queue_sp_obj *o = params->q_obj; | |
4261 | ||
4262 | /* build eth_halt_ramrod_data.client_id in a big-endian friendly way */ | |
4263 | ecore_dma_addr_t data_mapping = 0; | |
4264 | data_mapping = (ecore_dma_addr_t) o->cl_id; | |
4265 | ||
4266 | return ecore_sp_post(sc, | |
4267 | RAMROD_CMD_ID_ETH_HALT, | |
4268 | o->cids[ECORE_PRIMARY_CID_INDEX], | |
4269 | data_mapping, ETH_CONNECTION_TYPE); | |
4270 | } | |
4271 | ||
4272 | static int ecore_q_send_cfc_del(struct bnx2x_softc *sc, | |
4273 | struct ecore_queue_state_params *params) | |
4274 | { | |
4275 | struct ecore_queue_sp_obj *o = params->q_obj; | |
4276 | uint8_t cid_idx = params->params.cfc_del.cid_index; | |
4277 | ||
4278 | if (cid_idx >= o->max_cos) { | |
9f95a23c | 4279 | PMD_DRV_LOG(ERR, sc, "queue[%d]: cid_index (%d) is out of range", |
7c673cae FG |
4280 | o->cl_id, cid_idx); |
4281 | return ECORE_INVAL; | |
4282 | } | |
4283 | ||
4284 | return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_CFC_DEL, | |
4285 | o->cids[cid_idx], 0, NONE_CONNECTION_TYPE); | |
4286 | } | |
4287 | ||
4288 | static int ecore_q_send_terminate(struct bnx2x_softc *sc, struct ecore_queue_state_params | |
4289 | *params) | |
4290 | { | |
4291 | struct ecore_queue_sp_obj *o = params->q_obj; | |
4292 | uint8_t cid_index = params->params.terminate.cid_index; | |
4293 | ||
4294 | if (cid_index >= o->max_cos) { | |
9f95a23c | 4295 | PMD_DRV_LOG(ERR, sc, "queue[%d]: cid_index (%d) is out of range", |
7c673cae FG |
4296 | o->cl_id, cid_index); |
4297 | return ECORE_INVAL; | |
4298 | } | |
4299 | ||
4300 | return ecore_sp_post(sc, RAMROD_CMD_ID_ETH_TERMINATE, | |
4301 | o->cids[cid_index], 0, ETH_CONNECTION_TYPE); | |
4302 | } | |
4303 | ||
4304 | static int ecore_q_send_empty(struct bnx2x_softc *sc, | |
4305 | struct ecore_queue_state_params *params) | |
4306 | { | |
4307 | struct ecore_queue_sp_obj *o = params->q_obj; | |
4308 | ||
4309 | return ecore_sp_post(sc, RAMROD_CMD_ID_ETH_EMPTY, | |
4310 | o->cids[ECORE_PRIMARY_CID_INDEX], 0, | |
4311 | ETH_CONNECTION_TYPE); | |
4312 | } | |
4313 | ||
4314 | static int ecore_queue_send_cmd_cmn(struct bnx2x_softc *sc, struct ecore_queue_state_params | |
4315 | *params) | |
4316 | { | |
4317 | switch (params->cmd) { | |
4318 | case ECORE_Q_CMD_INIT: | |
4319 | return ecore_q_init(sc, params); | |
4320 | case ECORE_Q_CMD_SETUP_TX_ONLY: | |
4321 | return ecore_q_send_setup_tx_only(sc, params); | |
4322 | case ECORE_Q_CMD_DEACTIVATE: | |
4323 | return ecore_q_send_deactivate(sc, params); | |
4324 | case ECORE_Q_CMD_ACTIVATE: | |
4325 | return ecore_q_send_activate(sc, params); | |
4326 | case ECORE_Q_CMD_UPDATE: | |
4327 | return ecore_q_send_update(sc, params); | |
4328 | case ECORE_Q_CMD_UPDATE_TPA: | |
4329 | return ecore_q_send_update_tpa(sc, params); | |
4330 | case ECORE_Q_CMD_HALT: | |
4331 | return ecore_q_send_halt(sc, params); | |
4332 | case ECORE_Q_CMD_CFC_DEL: | |
4333 | return ecore_q_send_cfc_del(sc, params); | |
4334 | case ECORE_Q_CMD_TERMINATE: | |
4335 | return ecore_q_send_terminate(sc, params); | |
4336 | case ECORE_Q_CMD_EMPTY: | |
4337 | return ecore_q_send_empty(sc, params); | |
4338 | default: | |
9f95a23c | 4339 | PMD_DRV_LOG(ERR, sc, "Unknown command: %d", params->cmd); |
7c673cae FG |
4340 | return ECORE_INVAL; |
4341 | } | |
4342 | } | |
4343 | ||
4344 | static int ecore_queue_send_cmd_e1x(struct bnx2x_softc *sc, | |
4345 | struct ecore_queue_state_params *params) | |
4346 | { | |
4347 | switch (params->cmd) { | |
4348 | case ECORE_Q_CMD_SETUP: | |
4349 | return ecore_q_send_setup_e1x(sc, params); | |
4350 | case ECORE_Q_CMD_INIT: | |
4351 | case ECORE_Q_CMD_SETUP_TX_ONLY: | |
4352 | case ECORE_Q_CMD_DEACTIVATE: | |
4353 | case ECORE_Q_CMD_ACTIVATE: | |
4354 | case ECORE_Q_CMD_UPDATE: | |
4355 | case ECORE_Q_CMD_UPDATE_TPA: | |
4356 | case ECORE_Q_CMD_HALT: | |
4357 | case ECORE_Q_CMD_CFC_DEL: | |
4358 | case ECORE_Q_CMD_TERMINATE: | |
4359 | case ECORE_Q_CMD_EMPTY: | |
4360 | return ecore_queue_send_cmd_cmn(sc, params); | |
4361 | default: | |
9f95a23c | 4362 | PMD_DRV_LOG(ERR, sc, "Unknown command: %d", params->cmd); |
7c673cae FG |
4363 | return ECORE_INVAL; |
4364 | } | |
4365 | } | |
4366 | ||
4367 | static int ecore_queue_send_cmd_e2(struct bnx2x_softc *sc, | |
4368 | struct ecore_queue_state_params *params) | |
4369 | { | |
4370 | switch (params->cmd) { | |
4371 | case ECORE_Q_CMD_SETUP: | |
4372 | return ecore_q_send_setup_e2(sc, params); | |
4373 | case ECORE_Q_CMD_INIT: | |
4374 | case ECORE_Q_CMD_SETUP_TX_ONLY: | |
4375 | case ECORE_Q_CMD_DEACTIVATE: | |
4376 | case ECORE_Q_CMD_ACTIVATE: | |
4377 | case ECORE_Q_CMD_UPDATE: | |
4378 | case ECORE_Q_CMD_UPDATE_TPA: | |
4379 | case ECORE_Q_CMD_HALT: | |
4380 | case ECORE_Q_CMD_CFC_DEL: | |
4381 | case ECORE_Q_CMD_TERMINATE: | |
4382 | case ECORE_Q_CMD_EMPTY: | |
4383 | return ecore_queue_send_cmd_cmn(sc, params); | |
4384 | default: | |
9f95a23c | 4385 | PMD_DRV_LOG(ERR, sc, "Unknown command: %d", params->cmd); |
7c673cae FG |
4386 | return ECORE_INVAL; |
4387 | } | |
4388 | } | |
4389 | ||
4390 | /** | |
4391 | * ecore_queue_chk_transition - check state machine of a regular Queue | |
4392 | * | |
4393 | * @sc: device handle | |
4394 | * @o: | |
4395 | * @params: | |
4396 | * | |
4397 | * (not Forwarding) | |
4398 | * It both checks if the requested command is legal in a current | |
4399 | * state and, if it's legal, sets a `next_state' in the object | |
4400 | * that will be used in the completion flow to set the `state' | |
4401 | * of the object. | |
4402 | * | |
4403 | * returns 0 if a requested command is a legal transition, | |
4404 | * ECORE_INVAL otherwise. | |
4405 | */ | |
4406 | static int ecore_queue_chk_transition(struct bnx2x_softc *sc __rte_unused, | |
4407 | struct ecore_queue_sp_obj *o, | |
4408 | struct ecore_queue_state_params *params) | |
4409 | { | |
4410 | enum ecore_q_state state = o->state, next_state = ECORE_Q_STATE_MAX; | |
4411 | enum ecore_queue_cmd cmd = params->cmd; | |
4412 | struct ecore_queue_update_params *update_params = | |
4413 | ¶ms->params.update; | |
4414 | uint8_t next_tx_only = o->num_tx_only; | |
4415 | ||
4416 | /* Forget all pending for completion commands if a driver only state | |
4417 | * transition has been requested. | |
4418 | */ | |
4419 | if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) { | |
4420 | o->pending = 0; | |
4421 | o->next_state = ECORE_Q_STATE_MAX; | |
4422 | } | |
4423 | ||
4424 | /* Don't allow a next state transition if we are in the middle of | |
4425 | * the previous one. | |
4426 | */ | |
4427 | if (o->pending) { | |
9f95a23c | 4428 | PMD_DRV_LOG(ERR, sc, "Blocking transition since pending was %lx", |
7c673cae FG |
4429 | o->pending); |
4430 | return ECORE_BUSY; | |
4431 | } | |
4432 | ||
4433 | switch (state) { | |
4434 | case ECORE_Q_STATE_RESET: | |
4435 | if (cmd == ECORE_Q_CMD_INIT) | |
4436 | next_state = ECORE_Q_STATE_INITIALIZED; | |
4437 | ||
4438 | break; | |
4439 | case ECORE_Q_STATE_INITIALIZED: | |
4440 | if (cmd == ECORE_Q_CMD_SETUP) { | |
4441 | if (ECORE_TEST_BIT(ECORE_Q_FLG_ACTIVE, | |
4442 | ¶ms->params.setup.flags)) | |
4443 | next_state = ECORE_Q_STATE_ACTIVE; | |
4444 | else | |
4445 | next_state = ECORE_Q_STATE_INACTIVE; | |
4446 | } | |
4447 | ||
4448 | break; | |
4449 | case ECORE_Q_STATE_ACTIVE: | |
4450 | if (cmd == ECORE_Q_CMD_DEACTIVATE) | |
4451 | next_state = ECORE_Q_STATE_INACTIVE; | |
4452 | ||
4453 | else if ((cmd == ECORE_Q_CMD_EMPTY) || | |
4454 | (cmd == ECORE_Q_CMD_UPDATE_TPA)) | |
4455 | next_state = ECORE_Q_STATE_ACTIVE; | |
4456 | ||
4457 | else if (cmd == ECORE_Q_CMD_SETUP_TX_ONLY) { | |
4458 | next_state = ECORE_Q_STATE_MULTI_COS; | |
4459 | next_tx_only = 1; | |
4460 | } | |
4461 | ||
4462 | else if (cmd == ECORE_Q_CMD_HALT) | |
4463 | next_state = ECORE_Q_STATE_STOPPED; | |
4464 | ||
4465 | else if (cmd == ECORE_Q_CMD_UPDATE) { | |
4466 | /* If "active" state change is requested, update the | |
4467 | * state accordingly. | |
4468 | */ | |
4469 | if (ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG, | |
4470 | &update_params->update_flags) && | |
4471 | !ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE, | |
4472 | &update_params->update_flags)) | |
4473 | next_state = ECORE_Q_STATE_INACTIVE; | |
4474 | else | |
4475 | next_state = ECORE_Q_STATE_ACTIVE; | |
4476 | } | |
4477 | ||
4478 | break; | |
4479 | case ECORE_Q_STATE_MULTI_COS: | |
4480 | if (cmd == ECORE_Q_CMD_TERMINATE) | |
4481 | next_state = ECORE_Q_STATE_MCOS_TERMINATED; | |
4482 | ||
4483 | else if (cmd == ECORE_Q_CMD_SETUP_TX_ONLY) { | |
4484 | next_state = ECORE_Q_STATE_MULTI_COS; | |
4485 | next_tx_only = o->num_tx_only + 1; | |
4486 | } | |
4487 | ||
4488 | else if ((cmd == ECORE_Q_CMD_EMPTY) || | |
4489 | (cmd == ECORE_Q_CMD_UPDATE_TPA)) | |
4490 | next_state = ECORE_Q_STATE_MULTI_COS; | |
4491 | ||
4492 | else if (cmd == ECORE_Q_CMD_UPDATE) { | |
4493 | /* If "active" state change is requested, update the | |
4494 | * state accordingly. | |
4495 | */ | |
4496 | if (ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG, | |
4497 | &update_params->update_flags) && | |
4498 | !ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE, | |
4499 | &update_params->update_flags)) | |
4500 | next_state = ECORE_Q_STATE_INACTIVE; | |
4501 | else | |
4502 | next_state = ECORE_Q_STATE_MULTI_COS; | |
4503 | } | |
4504 | ||
4505 | break; | |
4506 | case ECORE_Q_STATE_MCOS_TERMINATED: | |
4507 | if (cmd == ECORE_Q_CMD_CFC_DEL) { | |
4508 | next_tx_only = o->num_tx_only - 1; | |
4509 | if (next_tx_only == 0) | |
4510 | next_state = ECORE_Q_STATE_ACTIVE; | |
4511 | else | |
4512 | next_state = ECORE_Q_STATE_MULTI_COS; | |
4513 | } | |
4514 | ||
4515 | break; | |
4516 | case ECORE_Q_STATE_INACTIVE: | |
4517 | if (cmd == ECORE_Q_CMD_ACTIVATE) | |
4518 | next_state = ECORE_Q_STATE_ACTIVE; | |
4519 | ||
4520 | else if ((cmd == ECORE_Q_CMD_EMPTY) || | |
4521 | (cmd == ECORE_Q_CMD_UPDATE_TPA)) | |
4522 | next_state = ECORE_Q_STATE_INACTIVE; | |
4523 | ||
4524 | else if (cmd == ECORE_Q_CMD_HALT) | |
4525 | next_state = ECORE_Q_STATE_STOPPED; | |
4526 | ||
4527 | else if (cmd == ECORE_Q_CMD_UPDATE) { | |
4528 | /* If "active" state change is requested, update the | |
4529 | * state accordingly. | |
4530 | */ | |
4531 | if (ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG, | |
4532 | &update_params->update_flags) && | |
4533 | ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE, | |
4534 | &update_params->update_flags)) { | |
4535 | if (o->num_tx_only == 0) | |
4536 | next_state = ECORE_Q_STATE_ACTIVE; | |
4537 | else /* tx only queues exist for this queue */ | |
4538 | next_state = ECORE_Q_STATE_MULTI_COS; | |
4539 | } else | |
4540 | next_state = ECORE_Q_STATE_INACTIVE; | |
4541 | } | |
4542 | ||
4543 | break; | |
4544 | case ECORE_Q_STATE_STOPPED: | |
4545 | if (cmd == ECORE_Q_CMD_TERMINATE) | |
4546 | next_state = ECORE_Q_STATE_TERMINATED; | |
4547 | ||
4548 | break; | |
4549 | case ECORE_Q_STATE_TERMINATED: | |
4550 | if (cmd == ECORE_Q_CMD_CFC_DEL) | |
4551 | next_state = ECORE_Q_STATE_RESET; | |
4552 | ||
4553 | break; | |
4554 | default: | |
9f95a23c | 4555 | PMD_DRV_LOG(ERR, sc, "Illegal state: %d", state); |
7c673cae FG |
4556 | } |
4557 | ||
4558 | /* Transition is assured */ | |
4559 | if (next_state != ECORE_Q_STATE_MAX) { | |
9f95a23c | 4560 | ECORE_MSG(sc, "Good state transition: %d(%d)->%d", |
7c673cae FG |
4561 | state, cmd, next_state); |
4562 | o->next_state = next_state; | |
4563 | o->next_tx_only = next_tx_only; | |
4564 | return ECORE_SUCCESS; | |
4565 | } | |
4566 | ||
9f95a23c | 4567 | ECORE_MSG(sc, "Bad state transition request: %d %d", state, cmd); |
7c673cae FG |
4568 | |
4569 | return ECORE_INVAL; | |
4570 | } | |
4571 | ||
4572 | /** | |
4573 | * ecore_queue_chk_fwd_transition - check state machine of a Forwarding Queue. | |
4574 | * | |
4575 | * @sc: device handle | |
4576 | * @o: | |
4577 | * @params: | |
4578 | * | |
4579 | * It both checks if the requested command is legal in a current | |
4580 | * state and, if it's legal, sets a `next_state' in the object | |
4581 | * that will be used in the completion flow to set the `state' | |
4582 | * of the object. | |
4583 | * | |
4584 | * returns 0 if a requested command is a legal transition, | |
4585 | * ECORE_INVAL otherwise. | |
4586 | */ | |
4587 | static int ecore_queue_chk_fwd_transition(struct bnx2x_softc *sc __rte_unused, | |
4588 | struct ecore_queue_sp_obj *o, | |
4589 | struct ecore_queue_state_params | |
4590 | *params) | |
4591 | { | |
4592 | enum ecore_q_state state = o->state, next_state = ECORE_Q_STATE_MAX; | |
4593 | enum ecore_queue_cmd cmd = params->cmd; | |
4594 | ||
4595 | switch (state) { | |
4596 | case ECORE_Q_STATE_RESET: | |
4597 | if (cmd == ECORE_Q_CMD_INIT) | |
4598 | next_state = ECORE_Q_STATE_INITIALIZED; | |
4599 | ||
4600 | break; | |
4601 | case ECORE_Q_STATE_INITIALIZED: | |
4602 | if (cmd == ECORE_Q_CMD_SETUP_TX_ONLY) { | |
4603 | if (ECORE_TEST_BIT(ECORE_Q_FLG_ACTIVE, | |
4604 | ¶ms->params.tx_only.flags)) | |
4605 | next_state = ECORE_Q_STATE_ACTIVE; | |
4606 | else | |
4607 | next_state = ECORE_Q_STATE_INACTIVE; | |
4608 | } | |
4609 | ||
4610 | break; | |
4611 | case ECORE_Q_STATE_ACTIVE: | |
4612 | case ECORE_Q_STATE_INACTIVE: | |
4613 | if (cmd == ECORE_Q_CMD_CFC_DEL) | |
4614 | next_state = ECORE_Q_STATE_RESET; | |
4615 | ||
4616 | break; | |
4617 | default: | |
9f95a23c | 4618 | PMD_DRV_LOG(ERR, sc, "Illegal state: %d", state); |
7c673cae FG |
4619 | } |
4620 | ||
4621 | /* Transition is assured */ | |
4622 | if (next_state != ECORE_Q_STATE_MAX) { | |
9f95a23c | 4623 | ECORE_MSG(sc, "Good state transition: %d(%d)->%d", |
7c673cae FG |
4624 | state, cmd, next_state); |
4625 | o->next_state = next_state; | |
4626 | return ECORE_SUCCESS; | |
4627 | } | |
4628 | ||
9f95a23c | 4629 | ECORE_MSG(sc, "Bad state transition request: %d %d", state, cmd); |
7c673cae FG |
4630 | return ECORE_INVAL; |
4631 | } | |
4632 | ||
4633 | void ecore_init_queue_obj(struct bnx2x_softc *sc, | |
4634 | struct ecore_queue_sp_obj *obj, | |
4635 | uint8_t cl_id, uint32_t * cids, uint8_t cid_cnt, | |
4636 | uint8_t func_id, void *rdata, | |
4637 | ecore_dma_addr_t rdata_mapping, unsigned long type) | |
4638 | { | |
4639 | ECORE_MEMSET(obj, 0, sizeof(*obj)); | |
4640 | ||
4641 | /* We support only ECORE_MULTI_TX_COS Tx CoS at the moment */ | |
4642 | ECORE_BUG_ON(ECORE_MULTI_TX_COS < cid_cnt); | |
4643 | ||
4644 | rte_memcpy(obj->cids, cids, sizeof(obj->cids[0]) * cid_cnt); | |
4645 | obj->max_cos = cid_cnt; | |
4646 | obj->cl_id = cl_id; | |
4647 | obj->func_id = func_id; | |
4648 | obj->rdata = rdata; | |
4649 | obj->rdata_mapping = rdata_mapping; | |
4650 | obj->type = type; | |
4651 | obj->next_state = ECORE_Q_STATE_MAX; | |
4652 | ||
4653 | if (CHIP_IS_E1x(sc)) | |
4654 | obj->send_cmd = ecore_queue_send_cmd_e1x; | |
4655 | else | |
4656 | obj->send_cmd = ecore_queue_send_cmd_e2; | |
4657 | ||
4658 | if (ECORE_TEST_BIT(ECORE_Q_TYPE_FWD, &type)) | |
4659 | obj->check_transition = ecore_queue_chk_fwd_transition; | |
4660 | else | |
4661 | obj->check_transition = ecore_queue_chk_transition; | |
4662 | ||
4663 | obj->complete_cmd = ecore_queue_comp_cmd; | |
4664 | obj->wait_comp = ecore_queue_wait_comp; | |
4665 | obj->set_pending = ecore_queue_set_pending; | |
4666 | } | |
4667 | ||
4668 | /********************** Function state object *********************************/ | |
4669 | enum ecore_func_state ecore_func_get_state(__rte_unused struct bnx2x_softc *sc, | |
4670 | struct ecore_func_sp_obj *o) | |
4671 | { | |
4672 | /* in the middle of transaction - return INVALID state */ | |
4673 | if (o->pending) | |
4674 | return ECORE_F_STATE_MAX; | |
4675 | ||
4676 | /* unsure the order of reading of o->pending and o->state | |
4677 | * o->pending should be read first | |
4678 | */ | |
4679 | rmb(); | |
4680 | ||
4681 | return o->state; | |
4682 | } | |
4683 | ||
4684 | static int ecore_func_wait_comp(struct bnx2x_softc *sc, | |
4685 | struct ecore_func_sp_obj *o, | |
4686 | enum ecore_func_cmd cmd) | |
4687 | { | |
4688 | return ecore_state_wait(sc, cmd, &o->pending); | |
4689 | } | |
4690 | ||
4691 | /** | |
4692 | * ecore_func_state_change_comp - complete the state machine transition | |
4693 | * | |
4694 | * @sc: device handle | |
4695 | * @o: | |
4696 | * @cmd: | |
4697 | * | |
4698 | * Called on state change transition. Completes the state | |
4699 | * machine transition only - no HW interaction. | |
4700 | */ | |
4701 | static int | |
4702 | ecore_func_state_change_comp(struct bnx2x_softc *sc __rte_unused, | |
4703 | struct ecore_func_sp_obj *o, | |
4704 | enum ecore_func_cmd cmd) | |
4705 | { | |
4706 | unsigned long cur_pending = o->pending; | |
4707 | ||
4708 | if (!ECORE_TEST_AND_CLEAR_BIT(cmd, &cur_pending)) { | |
9f95a23c | 4709 | PMD_DRV_LOG(ERR, sc, |
7c673cae FG |
4710 | "Bad MC reply %d for func %d in state %d pending 0x%lx, next_state %d", |
4711 | cmd, ECORE_FUNC_ID(sc), o->state, cur_pending, | |
4712 | o->next_state); | |
4713 | return ECORE_INVAL; | |
4714 | } | |
4715 | ||
9f95a23c | 4716 | ECORE_MSG(sc, "Completing command %d for func %d, setting state to %d", |
7c673cae FG |
4717 | cmd, ECORE_FUNC_ID(sc), o->next_state); |
4718 | ||
4719 | o->state = o->next_state; | |
4720 | o->next_state = ECORE_F_STATE_MAX; | |
4721 | ||
4722 | /* It's important that o->state and o->next_state are | |
4723 | * updated before o->pending. | |
4724 | */ | |
4725 | wmb(); | |
4726 | ||
4727 | ECORE_CLEAR_BIT(cmd, &o->pending); | |
4728 | ECORE_SMP_MB_AFTER_CLEAR_BIT(); | |
4729 | ||
4730 | return ECORE_SUCCESS; | |
4731 | } | |
4732 | ||
4733 | /** | |
4734 | * ecore_func_comp_cmd - complete the state change command | |
4735 | * | |
4736 | * @sc: device handle | |
4737 | * @o: | |
4738 | * @cmd: | |
4739 | * | |
4740 | * Checks that the arrived completion is expected. | |
4741 | */ | |
4742 | static int ecore_func_comp_cmd(struct bnx2x_softc *sc, | |
4743 | struct ecore_func_sp_obj *o, | |
4744 | enum ecore_func_cmd cmd) | |
4745 | { | |
4746 | /* Complete the state machine part first, check if it's a | |
4747 | * legal completion. | |
4748 | */ | |
4749 | int rc = ecore_func_state_change_comp(sc, o, cmd); | |
4750 | return rc; | |
4751 | } | |
4752 | ||
4753 | /** | |
4754 | * ecore_func_chk_transition - perform function state machine transition | |
4755 | * | |
4756 | * @sc: device handle | |
4757 | * @o: | |
4758 | * @params: | |
4759 | * | |
4760 | * It both checks if the requested command is legal in a current | |
4761 | * state and, if it's legal, sets a `next_state' in the object | |
4762 | * that will be used in the completion flow to set the `state' | |
4763 | * of the object. | |
4764 | * | |
4765 | * returns 0 if a requested command is a legal transition, | |
4766 | * ECORE_INVAL otherwise. | |
4767 | */ | |
4768 | static int ecore_func_chk_transition(struct bnx2x_softc *sc __rte_unused, | |
4769 | struct ecore_func_sp_obj *o, | |
4770 | struct ecore_func_state_params *params) | |
4771 | { | |
4772 | enum ecore_func_state state = o->state, next_state = ECORE_F_STATE_MAX; | |
4773 | enum ecore_func_cmd cmd = params->cmd; | |
4774 | ||
4775 | /* Forget all pending for completion commands if a driver only state | |
4776 | * transition has been requested. | |
4777 | */ | |
4778 | if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) { | |
4779 | o->pending = 0; | |
4780 | o->next_state = ECORE_F_STATE_MAX; | |
4781 | } | |
4782 | ||
4783 | /* Don't allow a next state transition if we are in the middle of | |
4784 | * the previous one. | |
4785 | */ | |
4786 | if (o->pending) | |
4787 | return ECORE_BUSY; | |
4788 | ||
4789 | switch (state) { | |
4790 | case ECORE_F_STATE_RESET: | |
4791 | if (cmd == ECORE_F_CMD_HW_INIT) | |
4792 | next_state = ECORE_F_STATE_INITIALIZED; | |
4793 | ||
4794 | break; | |
4795 | case ECORE_F_STATE_INITIALIZED: | |
4796 | if (cmd == ECORE_F_CMD_START) | |
4797 | next_state = ECORE_F_STATE_STARTED; | |
4798 | ||
4799 | else if (cmd == ECORE_F_CMD_HW_RESET) | |
4800 | next_state = ECORE_F_STATE_RESET; | |
4801 | ||
4802 | break; | |
4803 | case ECORE_F_STATE_STARTED: | |
4804 | if (cmd == ECORE_F_CMD_STOP) | |
4805 | next_state = ECORE_F_STATE_INITIALIZED; | |
4806 | /* afex ramrods can be sent only in started mode, and only | |
4807 | * if not pending for function_stop ramrod completion | |
4808 | * for these events - next state remained STARTED. | |
4809 | */ | |
4810 | else if ((cmd == ECORE_F_CMD_AFEX_UPDATE) && | |
4811 | (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending))) | |
4812 | next_state = ECORE_F_STATE_STARTED; | |
4813 | ||
4814 | else if ((cmd == ECORE_F_CMD_AFEX_VIFLISTS) && | |
4815 | (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending))) | |
4816 | next_state = ECORE_F_STATE_STARTED; | |
4817 | ||
4818 | /* Switch_update ramrod can be sent in either started or | |
4819 | * tx_stopped state, and it doesn't change the state. | |
4820 | */ | |
4821 | else if ((cmd == ECORE_F_CMD_SWITCH_UPDATE) && | |
4822 | (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending))) | |
4823 | next_state = ECORE_F_STATE_STARTED; | |
4824 | ||
4825 | else if (cmd == ECORE_F_CMD_TX_STOP) | |
4826 | next_state = ECORE_F_STATE_TX_STOPPED; | |
4827 | ||
4828 | break; | |
4829 | case ECORE_F_STATE_TX_STOPPED: | |
4830 | if ((cmd == ECORE_F_CMD_SWITCH_UPDATE) && | |
4831 | (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending))) | |
4832 | next_state = ECORE_F_STATE_TX_STOPPED; | |
4833 | ||
4834 | else if (cmd == ECORE_F_CMD_TX_START) | |
4835 | next_state = ECORE_F_STATE_STARTED; | |
4836 | ||
4837 | break; | |
4838 | default: | |
9f95a23c | 4839 | PMD_DRV_LOG(ERR, sc, "Unknown state: %d", state); |
7c673cae FG |
4840 | } |
4841 | ||
4842 | /* Transition is assured */ | |
4843 | if (next_state != ECORE_F_STATE_MAX) { | |
9f95a23c | 4844 | ECORE_MSG(sc, "Good function state transition: %d(%d)->%d", |
7c673cae FG |
4845 | state, cmd, next_state); |
4846 | o->next_state = next_state; | |
4847 | return ECORE_SUCCESS; | |
4848 | } | |
4849 | ||
9f95a23c TL |
4850 | ECORE_MSG(sc, |
4851 | "Bad function state transition request: %d %d", state, cmd); | |
7c673cae FG |
4852 | |
4853 | return ECORE_INVAL; | |
4854 | } | |
4855 | ||
4856 | /** | |
4857 | * ecore_func_init_func - performs HW init at function stage | |
4858 | * | |
4859 | * @sc: device handle | |
4860 | * @drv: | |
4861 | * | |
4862 | * Init HW when the current phase is | |
4863 | * FW_MSG_CODE_DRV_LOAD_FUNCTION: initialize only FUNCTION-only | |
4864 | * HW blocks. | |
4865 | */ | |
4866 | static int ecore_func_init_func(struct bnx2x_softc *sc, | |
4867 | const struct ecore_func_sp_drv_ops *drv) | |
4868 | { | |
4869 | return drv->init_hw_func(sc); | |
4870 | } | |
4871 | ||
4872 | /** | |
4873 | * ecore_func_init_port - performs HW init at port stage | |
4874 | * | |
4875 | * @sc: device handle | |
4876 | * @drv: | |
4877 | * | |
4878 | * Init HW when the current phase is | |
4879 | * FW_MSG_CODE_DRV_LOAD_PORT: initialize PORT-only and | |
4880 | * FUNCTION-only HW blocks. | |
4881 | * | |
4882 | */ | |
4883 | static int ecore_func_init_port(struct bnx2x_softc *sc, | |
4884 | const struct ecore_func_sp_drv_ops *drv) | |
4885 | { | |
4886 | int rc = drv->init_hw_port(sc); | |
4887 | if (rc) | |
4888 | return rc; | |
4889 | ||
4890 | return ecore_func_init_func(sc, drv); | |
4891 | } | |
4892 | ||
4893 | /** | |
4894 | * ecore_func_init_cmn_chip - performs HW init at chip-common stage | |
4895 | * | |
4896 | * @sc: device handle | |
4897 | * @drv: | |
4898 | * | |
4899 | * Init HW when the current phase is | |
4900 | * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON_CHIP, | |
4901 | * PORT-only and FUNCTION-only HW blocks. | |
4902 | */ | |
4903 | static int ecore_func_init_cmn_chip(struct bnx2x_softc *sc, const struct ecore_func_sp_drv_ops | |
4904 | *drv) | |
4905 | { | |
4906 | int rc = drv->init_hw_cmn_chip(sc); | |
4907 | if (rc) | |
4908 | return rc; | |
4909 | ||
4910 | return ecore_func_init_port(sc, drv); | |
4911 | } | |
4912 | ||
4913 | /** | |
4914 | * ecore_func_init_cmn - performs HW init at common stage | |
4915 | * | |
4916 | * @sc: device handle | |
4917 | * @drv: | |
4918 | * | |
4919 | * Init HW when the current phase is | |
4920 | * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON, | |
4921 | * PORT-only and FUNCTION-only HW blocks. | |
4922 | */ | |
4923 | static int ecore_func_init_cmn(struct bnx2x_softc *sc, | |
4924 | const struct ecore_func_sp_drv_ops *drv) | |
4925 | { | |
4926 | int rc = drv->init_hw_cmn(sc); | |
4927 | if (rc) | |
4928 | return rc; | |
4929 | ||
4930 | return ecore_func_init_port(sc, drv); | |
4931 | } | |
4932 | ||
4933 | static int ecore_func_hw_init(struct bnx2x_softc *sc, | |
4934 | struct ecore_func_state_params *params) | |
4935 | { | |
4936 | uint32_t load_code = params->params.hw_init.load_phase; | |
4937 | struct ecore_func_sp_obj *o = params->f_obj; | |
4938 | const struct ecore_func_sp_drv_ops *drv = o->drv; | |
4939 | int rc = 0; | |
4940 | ||
9f95a23c | 4941 | ECORE_MSG(sc, "function %d load_code %x", |
7c673cae FG |
4942 | ECORE_ABS_FUNC_ID(sc), load_code); |
4943 | ||
4944 | /* Prepare FW */ | |
4945 | rc = drv->init_fw(sc); | |
4946 | if (rc) { | |
9f95a23c | 4947 | PMD_DRV_LOG(ERR, sc, "Error loading firmware"); |
7c673cae FG |
4948 | goto init_err; |
4949 | } | |
4950 | ||
4951 | /* Handle the beginning of COMMON_XXX pases separately... */ | |
4952 | switch (load_code) { | |
4953 | case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: | |
4954 | rc = ecore_func_init_cmn_chip(sc, drv); | |
4955 | if (rc) | |
4956 | goto init_err; | |
4957 | ||
4958 | break; | |
4959 | case FW_MSG_CODE_DRV_LOAD_COMMON: | |
4960 | rc = ecore_func_init_cmn(sc, drv); | |
4961 | if (rc) | |
4962 | goto init_err; | |
4963 | ||
4964 | break; | |
4965 | case FW_MSG_CODE_DRV_LOAD_PORT: | |
4966 | rc = ecore_func_init_port(sc, drv); | |
4967 | if (rc) | |
4968 | goto init_err; | |
4969 | ||
4970 | break; | |
4971 | case FW_MSG_CODE_DRV_LOAD_FUNCTION: | |
4972 | rc = ecore_func_init_func(sc, drv); | |
4973 | if (rc) | |
4974 | goto init_err; | |
4975 | ||
4976 | break; | |
4977 | default: | |
9f95a23c | 4978 | PMD_DRV_LOG(ERR, sc, "Unknown load_code (0x%x) from MCP", |
7c673cae FG |
4979 | load_code); |
4980 | rc = ECORE_INVAL; | |
4981 | } | |
4982 | ||
4983 | init_err: | |
4984 | /* In case of success, complete the command immediately: no ramrods | |
4985 | * have been sent. | |
4986 | */ | |
4987 | if (!rc) | |
4988 | o->complete_cmd(sc, o, ECORE_F_CMD_HW_INIT); | |
4989 | ||
4990 | return rc; | |
4991 | } | |
4992 | ||
4993 | /** | |
4994 | * ecore_func_reset_func - reset HW at function stage | |
4995 | * | |
4996 | * @sc: device handle | |
4997 | * @drv: | |
4998 | * | |
4999 | * Reset HW at FW_MSG_CODE_DRV_UNLOAD_FUNCTION stage: reset only | |
5000 | * FUNCTION-only HW blocks. | |
5001 | */ | |
5002 | static void ecore_func_reset_func(struct bnx2x_softc *sc, const struct ecore_func_sp_drv_ops | |
5003 | *drv) | |
5004 | { | |
5005 | drv->reset_hw_func(sc); | |
5006 | } | |
5007 | ||
5008 | /** | |
5009 | * ecore_func_reset_port - reser HW at port stage | |
5010 | * | |
5011 | * @sc: device handle | |
5012 | * @drv: | |
5013 | * | |
5014 | * Reset HW at FW_MSG_CODE_DRV_UNLOAD_PORT stage: reset | |
5015 | * FUNCTION-only and PORT-only HW blocks. | |
5016 | * | |
5017 | * !!!IMPORTANT!!! | |
5018 | * | |
5019 | * It's important to call reset_port before reset_func() as the last thing | |
5020 | * reset_func does is pf_disable() thus disabling PGLUE_B, which | |
5021 | * makes impossible any DMAE transactions. | |
5022 | */ | |
5023 | static void ecore_func_reset_port(struct bnx2x_softc *sc, const struct ecore_func_sp_drv_ops | |
5024 | *drv) | |
5025 | { | |
5026 | drv->reset_hw_port(sc); | |
5027 | ecore_func_reset_func(sc, drv); | |
5028 | } | |
5029 | ||
5030 | /** | |
5031 | * ecore_func_reset_cmn - reser HW at common stage | |
5032 | * | |
5033 | * @sc: device handle | |
5034 | * @drv: | |
5035 | * | |
5036 | * Reset HW at FW_MSG_CODE_DRV_UNLOAD_COMMON and | |
5037 | * FW_MSG_CODE_DRV_UNLOAD_COMMON_CHIP stages: reset COMMON, | |
5038 | * COMMON_CHIP, FUNCTION-only and PORT-only HW blocks. | |
5039 | */ | |
5040 | static void ecore_func_reset_cmn(struct bnx2x_softc *sc, | |
5041 | const struct ecore_func_sp_drv_ops *drv) | |
5042 | { | |
5043 | ecore_func_reset_port(sc, drv); | |
5044 | drv->reset_hw_cmn(sc); | |
5045 | } | |
5046 | ||
5047 | static int ecore_func_hw_reset(struct bnx2x_softc *sc, | |
5048 | struct ecore_func_state_params *params) | |
5049 | { | |
5050 | uint32_t reset_phase = params->params.hw_reset.reset_phase; | |
5051 | struct ecore_func_sp_obj *o = params->f_obj; | |
5052 | const struct ecore_func_sp_drv_ops *drv = o->drv; | |
5053 | ||
9f95a23c | 5054 | ECORE_MSG(sc, "function %d reset_phase %x", ECORE_ABS_FUNC_ID(sc), |
7c673cae FG |
5055 | reset_phase); |
5056 | ||
5057 | switch (reset_phase) { | |
5058 | case FW_MSG_CODE_DRV_UNLOAD_COMMON: | |
5059 | ecore_func_reset_cmn(sc, drv); | |
5060 | break; | |
5061 | case FW_MSG_CODE_DRV_UNLOAD_PORT: | |
5062 | ecore_func_reset_port(sc, drv); | |
5063 | break; | |
5064 | case FW_MSG_CODE_DRV_UNLOAD_FUNCTION: | |
5065 | ecore_func_reset_func(sc, drv); | |
5066 | break; | |
5067 | default: | |
9f95a23c | 5068 | PMD_DRV_LOG(ERR, sc, "Unknown reset_phase (0x%x) from MCP", |
7c673cae FG |
5069 | reset_phase); |
5070 | break; | |
5071 | } | |
5072 | ||
5073 | /* Complete the command immediately: no ramrods have been sent. */ | |
5074 | o->complete_cmd(sc, o, ECORE_F_CMD_HW_RESET); | |
5075 | ||
5076 | return ECORE_SUCCESS; | |
5077 | } | |
5078 | ||
5079 | static int ecore_func_send_start(struct bnx2x_softc *sc, | |
5080 | struct ecore_func_state_params *params) | |
5081 | { | |
5082 | struct ecore_func_sp_obj *o = params->f_obj; | |
5083 | struct function_start_data *rdata = | |
5084 | (struct function_start_data *)o->rdata; | |
5085 | ecore_dma_addr_t data_mapping = o->rdata_mapping; | |
5086 | struct ecore_func_start_params *start_params = ¶ms->params.start; | |
5087 | ||
5088 | ECORE_MEMSET(rdata, 0, sizeof(*rdata)); | |
5089 | ||
5090 | /* Fill the ramrod data with provided parameters */ | |
5091 | rdata->function_mode = (uint8_t) start_params->mf_mode; | |
5092 | rdata->sd_vlan_tag = ECORE_CPU_TO_LE16(start_params->sd_vlan_tag); | |
5093 | rdata->path_id = ECORE_PATH_ID(sc); | |
5094 | rdata->network_cos_mode = start_params->network_cos_mode; | |
5095 | rdata->gre_tunnel_mode = start_params->gre_tunnel_mode; | |
5096 | rdata->gre_tunnel_rss = start_params->gre_tunnel_rss; | |
5097 | ||
5098 | /* | |
5099 | * No need for an explicit memory barrier here as long we would | |
5100 | * need to ensure the ordering of writing to the SPQ element | |
5101 | * and updating of the SPQ producer which involves a memory | |
5102 | * read and we will have to put a full memory barrier there | |
5103 | * (inside ecore_sp_post()). | |
5104 | */ | |
5105 | ||
5106 | return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0, | |
5107 | data_mapping, NONE_CONNECTION_TYPE); | |
5108 | } | |
5109 | ||
5110 | static int ecore_func_send_switch_update(struct bnx2x_softc *sc, struct ecore_func_state_params | |
5111 | *params) | |
5112 | { | |
5113 | struct ecore_func_sp_obj *o = params->f_obj; | |
5114 | struct function_update_data *rdata = | |
5115 | (struct function_update_data *)o->rdata; | |
5116 | ecore_dma_addr_t data_mapping = o->rdata_mapping; | |
5117 | struct ecore_func_switch_update_params *switch_update_params = | |
5118 | ¶ms->params.switch_update; | |
5119 | ||
5120 | ECORE_MEMSET(rdata, 0, sizeof(*rdata)); | |
5121 | ||
5122 | /* Fill the ramrod data with provided parameters */ | |
5123 | rdata->tx_switch_suspend_change_flg = 1; | |
5124 | rdata->tx_switch_suspend = switch_update_params->suspend; | |
5125 | rdata->echo = SWITCH_UPDATE; | |
5126 | ||
5127 | return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0, | |
5128 | data_mapping, NONE_CONNECTION_TYPE); | |
5129 | } | |
5130 | ||
5131 | static int ecore_func_send_afex_update(struct bnx2x_softc *sc, struct ecore_func_state_params | |
5132 | *params) | |
5133 | { | |
5134 | struct ecore_func_sp_obj *o = params->f_obj; | |
5135 | struct function_update_data *rdata = | |
5136 | (struct function_update_data *)o->afex_rdata; | |
5137 | ecore_dma_addr_t data_mapping = o->afex_rdata_mapping; | |
5138 | struct ecore_func_afex_update_params *afex_update_params = | |
5139 | ¶ms->params.afex_update; | |
5140 | ||
5141 | ECORE_MEMSET(rdata, 0, sizeof(*rdata)); | |
5142 | ||
5143 | /* Fill the ramrod data with provided parameters */ | |
5144 | rdata->vif_id_change_flg = 1; | |
5145 | rdata->vif_id = ECORE_CPU_TO_LE16(afex_update_params->vif_id); | |
5146 | rdata->afex_default_vlan_change_flg = 1; | |
5147 | rdata->afex_default_vlan = | |
5148 | ECORE_CPU_TO_LE16(afex_update_params->afex_default_vlan); | |
5149 | rdata->allowed_priorities_change_flg = 1; | |
5150 | rdata->allowed_priorities = afex_update_params->allowed_priorities; | |
5151 | rdata->echo = AFEX_UPDATE; | |
5152 | ||
5153 | /* No need for an explicit memory barrier here as long we would | |
5154 | * need to ensure the ordering of writing to the SPQ element | |
5155 | * and updating of the SPQ producer which involves a memory | |
5156 | * read and we will have to put a full memory barrier there | |
5157 | * (inside ecore_sp_post()). | |
5158 | */ | |
9f95a23c | 5159 | ECORE_MSG(sc, "afex: sending func_update vif_id 0x%x dvlan 0x%x prio 0x%x", |
7c673cae FG |
5160 | rdata->vif_id, |
5161 | rdata->afex_default_vlan, rdata->allowed_priorities); | |
5162 | ||
5163 | return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0, | |
5164 | data_mapping, NONE_CONNECTION_TYPE); | |
5165 | } | |
5166 | ||
5167 | static | |
5168 | inline int ecore_func_send_afex_viflists(struct bnx2x_softc *sc, | |
5169 | struct ecore_func_state_params *params) | |
5170 | { | |
5171 | struct ecore_func_sp_obj *o = params->f_obj; | |
5172 | struct afex_vif_list_ramrod_data *rdata = | |
5173 | (struct afex_vif_list_ramrod_data *)o->afex_rdata; | |
5174 | struct ecore_func_afex_viflists_params *afex_vif_params = | |
5175 | ¶ms->params.afex_viflists; | |
5176 | uint64_t *p_rdata = (uint64_t *) rdata; | |
5177 | ||
5178 | ECORE_MEMSET(rdata, 0, sizeof(*rdata)); | |
5179 | ||
5180 | /* Fill the ramrod data with provided parameters */ | |
5181 | rdata->vif_list_index = | |
5182 | ECORE_CPU_TO_LE16(afex_vif_params->vif_list_index); | |
5183 | rdata->func_bit_map = afex_vif_params->func_bit_map; | |
5184 | rdata->afex_vif_list_command = afex_vif_params->afex_vif_list_command; | |
5185 | rdata->func_to_clear = afex_vif_params->func_to_clear; | |
5186 | ||
5187 | /* send in echo type of sub command */ | |
5188 | rdata->echo = afex_vif_params->afex_vif_list_command; | |
5189 | ||
5190 | /* No need for an explicit memory barrier here as long we would | |
5191 | * need to ensure the ordering of writing to the SPQ element | |
5192 | * and updating of the SPQ producer which involves a memory | |
5193 | * read and we will have to put a full memory barrier there | |
5194 | * (inside ecore_sp_post()). | |
5195 | */ | |
5196 | ||
9f95a23c TL |
5197 | ECORE_MSG |
5198 | (sc, "afex: ramrod lists, cmd 0x%x index 0x%x func_bit_map 0x%x func_to_clr 0x%x", | |
7c673cae FG |
5199 | rdata->afex_vif_list_command, rdata->vif_list_index, |
5200 | rdata->func_bit_map, rdata->func_to_clear); | |
5201 | ||
5202 | /* this ramrod sends data directly and not through DMA mapping */ | |
5203 | return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_AFEX_VIF_LISTS, 0, | |
5204 | *p_rdata, NONE_CONNECTION_TYPE); | |
5205 | } | |
5206 | ||
5207 | static int ecore_func_send_stop(struct bnx2x_softc *sc, __rte_unused struct | |
5208 | ecore_func_state_params *params) | |
5209 | { | |
5210 | return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, | |
5211 | NONE_CONNECTION_TYPE); | |
5212 | } | |
5213 | ||
5214 | static int ecore_func_send_tx_stop(struct bnx2x_softc *sc, __rte_unused struct | |
5215 | ecore_func_state_params *params) | |
5216 | { | |
5217 | return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_STOP_TRAFFIC, 0, 0, | |
5218 | NONE_CONNECTION_TYPE); | |
5219 | } | |
5220 | ||
5221 | static int ecore_func_send_tx_start(struct bnx2x_softc *sc, struct ecore_func_state_params | |
5222 | *params) | |
5223 | { | |
5224 | struct ecore_func_sp_obj *o = params->f_obj; | |
5225 | struct flow_control_configuration *rdata = | |
5226 | (struct flow_control_configuration *)o->rdata; | |
5227 | ecore_dma_addr_t data_mapping = o->rdata_mapping; | |
5228 | struct ecore_func_tx_start_params *tx_start_params = | |
5229 | ¶ms->params.tx_start; | |
5230 | uint32_t i; | |
5231 | ||
5232 | ECORE_MEMSET(rdata, 0, sizeof(*rdata)); | |
5233 | ||
5234 | rdata->dcb_enabled = tx_start_params->dcb_enabled; | |
5235 | rdata->dcb_version = tx_start_params->dcb_version; | |
5236 | rdata->dont_add_pri_0 = tx_start_params->dont_add_pri_0; | |
5237 | ||
5238 | for (i = 0; i < ARRAY_SIZE(rdata->traffic_type_to_priority_cos); i++) | |
5239 | rdata->traffic_type_to_priority_cos[i] = | |
5240 | tx_start_params->traffic_type_to_priority_cos[i]; | |
5241 | ||
5242 | return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_START_TRAFFIC, 0, | |
5243 | data_mapping, NONE_CONNECTION_TYPE); | |
5244 | } | |
5245 | ||
5246 | static int ecore_func_send_cmd(struct bnx2x_softc *sc, | |
5247 | struct ecore_func_state_params *params) | |
5248 | { | |
5249 | switch (params->cmd) { | |
5250 | case ECORE_F_CMD_HW_INIT: | |
5251 | return ecore_func_hw_init(sc, params); | |
5252 | case ECORE_F_CMD_START: | |
5253 | return ecore_func_send_start(sc, params); | |
5254 | case ECORE_F_CMD_STOP: | |
5255 | return ecore_func_send_stop(sc, params); | |
5256 | case ECORE_F_CMD_HW_RESET: | |
5257 | return ecore_func_hw_reset(sc, params); | |
5258 | case ECORE_F_CMD_AFEX_UPDATE: | |
5259 | return ecore_func_send_afex_update(sc, params); | |
5260 | case ECORE_F_CMD_AFEX_VIFLISTS: | |
5261 | return ecore_func_send_afex_viflists(sc, params); | |
5262 | case ECORE_F_CMD_TX_STOP: | |
5263 | return ecore_func_send_tx_stop(sc, params); | |
5264 | case ECORE_F_CMD_TX_START: | |
5265 | return ecore_func_send_tx_start(sc, params); | |
5266 | case ECORE_F_CMD_SWITCH_UPDATE: | |
5267 | return ecore_func_send_switch_update(sc, params); | |
5268 | default: | |
9f95a23c | 5269 | PMD_DRV_LOG(ERR, sc, "Unknown command: %d", params->cmd); |
7c673cae FG |
5270 | return ECORE_INVAL; |
5271 | } | |
5272 | } | |
5273 | ||
5274 | void ecore_init_func_obj(__rte_unused struct bnx2x_softc *sc, | |
5275 | struct ecore_func_sp_obj *obj, | |
5276 | void *rdata, ecore_dma_addr_t rdata_mapping, | |
5277 | void *afex_rdata, ecore_dma_addr_t afex_rdata_mapping, | |
5278 | struct ecore_func_sp_drv_ops *drv_iface) | |
5279 | { | |
5280 | ECORE_MEMSET(obj, 0, sizeof(*obj)); | |
5281 | ||
5282 | ECORE_MUTEX_INIT(&obj->one_pending_mutex); | |
5283 | ||
5284 | obj->rdata = rdata; | |
5285 | obj->rdata_mapping = rdata_mapping; | |
5286 | obj->afex_rdata = afex_rdata; | |
5287 | obj->afex_rdata_mapping = afex_rdata_mapping; | |
5288 | obj->send_cmd = ecore_func_send_cmd; | |
5289 | obj->check_transition = ecore_func_chk_transition; | |
5290 | obj->complete_cmd = ecore_func_comp_cmd; | |
5291 | obj->wait_comp = ecore_func_wait_comp; | |
5292 | obj->drv = drv_iface; | |
5293 | } | |
5294 | ||
5295 | /** | |
5296 | * ecore_func_state_change - perform Function state change transition | |
5297 | * | |
5298 | * @sc: device handle | |
5299 | * @params: parameters to perform the transaction | |
5300 | * | |
5301 | * returns 0 in case of successfully completed transition, | |
5302 | * negative error code in case of failure, positive | |
5303 | * (EBUSY) value if there is a completion to that is | |
5304 | * still pending (possible only if RAMROD_COMP_WAIT is | |
5305 | * not set in params->ramrod_flags for asynchronous | |
5306 | * commands). | |
5307 | */ | |
5308 | int ecore_func_state_change(struct bnx2x_softc *sc, | |
5309 | struct ecore_func_state_params *params) | |
5310 | { | |
5311 | struct ecore_func_sp_obj *o = params->f_obj; | |
5312 | int rc, cnt = 300; | |
5313 | enum ecore_func_cmd cmd = params->cmd; | |
5314 | unsigned long *pending = &o->pending; | |
5315 | ||
5316 | ECORE_MUTEX_LOCK(&o->one_pending_mutex); | |
5317 | ||
5318 | /* Check that the requested transition is legal */ | |
5319 | rc = o->check_transition(sc, o, params); | |
5320 | if ((rc == ECORE_BUSY) && | |
5321 | (ECORE_TEST_BIT(RAMROD_RETRY, ¶ms->ramrod_flags))) { | |
5322 | while ((rc == ECORE_BUSY) && (--cnt > 0)) { | |
5323 | ECORE_MUTEX_UNLOCK(&o->one_pending_mutex); | |
5324 | ECORE_MSLEEP(10); | |
5325 | ECORE_MUTEX_LOCK(&o->one_pending_mutex); | |
5326 | rc = o->check_transition(sc, o, params); | |
5327 | } | |
5328 | if (rc == ECORE_BUSY) { | |
5329 | ECORE_MUTEX_UNLOCK(&o->one_pending_mutex); | |
9f95a23c | 5330 | PMD_DRV_LOG(ERR, sc, |
7c673cae FG |
5331 | "timeout waiting for previous ramrod completion"); |
5332 | return rc; | |
5333 | } | |
5334 | } else if (rc) { | |
5335 | ECORE_MUTEX_UNLOCK(&o->one_pending_mutex); | |
5336 | return rc; | |
5337 | } | |
5338 | ||
5339 | /* Set "pending" bit */ | |
5340 | ECORE_SET_BIT(cmd, pending); | |
5341 | ||
5342 | /* Don't send a command if only driver cleanup was requested */ | |
5343 | if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) { | |
5344 | ecore_func_state_change_comp(sc, o, cmd); | |
5345 | ECORE_MUTEX_UNLOCK(&o->one_pending_mutex); | |
5346 | } else { | |
5347 | /* Send a ramrod */ | |
5348 | rc = o->send_cmd(sc, params); | |
5349 | ||
5350 | ECORE_MUTEX_UNLOCK(&o->one_pending_mutex); | |
5351 | ||
5352 | if (rc) { | |
5353 | o->next_state = ECORE_F_STATE_MAX; | |
5354 | ECORE_CLEAR_BIT(cmd, pending); | |
5355 | ECORE_SMP_MB_AFTER_CLEAR_BIT(); | |
5356 | return rc; | |
5357 | } | |
5358 | ||
5359 | if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, ¶ms->ramrod_flags)) { | |
5360 | rc = o->wait_comp(sc, o, cmd); | |
5361 | if (rc) | |
5362 | return rc; | |
5363 | ||
5364 | return ECORE_SUCCESS; | |
5365 | } | |
5366 | } | |
5367 | ||
5368 | return ECORE_RET_PENDING(cmd, pending); | |
5369 | } | |
5370 | ||
5371 | /****************************************************************************** | |
5372 | * Description: | |
5373 | * Calculates crc 8 on a word value: polynomial 0-1-2-8 | |
5374 | * Code was translated from Verilog. | |
5375 | * Return: | |
5376 | *****************************************************************************/ | |
5377 | uint8_t ecore_calc_crc8(uint32_t data, uint8_t crc) | |
5378 | { | |
5379 | uint8_t D[32]; | |
5380 | uint8_t NewCRC[8]; | |
5381 | uint8_t C[8]; | |
5382 | uint8_t crc_res; | |
5383 | uint8_t i; | |
5384 | ||
5385 | /* split the data into 31 bits */ | |
5386 | for (i = 0; i < 32; i++) { | |
5387 | D[i] = (uint8_t) (data & 1); | |
5388 | data = data >> 1; | |
5389 | } | |
5390 | ||
5391 | /* split the crc into 8 bits */ | |
5392 | for (i = 0; i < 8; i++) { | |
5393 | C[i] = crc & 1; | |
5394 | crc = crc >> 1; | |
5395 | } | |
5396 | ||
5397 | NewCRC[0] = D[31] ^ D[30] ^ D[28] ^ D[23] ^ D[21] ^ D[19] ^ D[18] ^ | |
5398 | D[16] ^ D[14] ^ D[12] ^ D[8] ^ D[7] ^ D[6] ^ D[0] ^ C[4] ^ | |
5399 | C[6] ^ C[7]; | |
5400 | NewCRC[1] = D[30] ^ D[29] ^ D[28] ^ D[24] ^ D[23] ^ D[22] ^ D[21] ^ | |
5401 | D[20] ^ D[18] ^ D[17] ^ D[16] ^ D[15] ^ D[14] ^ D[13] ^ | |
5402 | D[12] ^ D[9] ^ D[6] ^ D[1] ^ D[0] ^ C[0] ^ C[4] ^ C[5] ^ C[6]; | |
5403 | NewCRC[2] = D[29] ^ D[28] ^ D[25] ^ D[24] ^ D[22] ^ D[17] ^ D[15] ^ | |
5404 | D[13] ^ D[12] ^ D[10] ^ D[8] ^ D[6] ^ D[2] ^ D[1] ^ D[0] ^ | |
5405 | C[0] ^ C[1] ^ C[4] ^ C[5]; | |
5406 | NewCRC[3] = D[30] ^ D[29] ^ D[26] ^ D[25] ^ D[23] ^ D[18] ^ D[16] ^ | |
5407 | D[14] ^ D[13] ^ D[11] ^ D[9] ^ D[7] ^ D[3] ^ D[2] ^ D[1] ^ | |
5408 | C[1] ^ C[2] ^ C[5] ^ C[6]; | |
5409 | NewCRC[4] = D[31] ^ D[30] ^ D[27] ^ D[26] ^ D[24] ^ D[19] ^ D[17] ^ | |
5410 | D[15] ^ D[14] ^ D[12] ^ D[10] ^ D[8] ^ D[4] ^ D[3] ^ D[2] ^ | |
5411 | C[0] ^ C[2] ^ C[3] ^ C[6] ^ C[7]; | |
5412 | NewCRC[5] = D[31] ^ D[28] ^ D[27] ^ D[25] ^ D[20] ^ D[18] ^ D[16] ^ | |
5413 | D[15] ^ D[13] ^ D[11] ^ D[9] ^ D[5] ^ D[4] ^ D[3] ^ C[1] ^ | |
5414 | C[3] ^ C[4] ^ C[7]; | |
5415 | NewCRC[6] = D[29] ^ D[28] ^ D[26] ^ D[21] ^ D[19] ^ D[17] ^ D[16] ^ | |
5416 | D[14] ^ D[12] ^ D[10] ^ D[6] ^ D[5] ^ D[4] ^ C[2] ^ C[4] ^ C[5]; | |
5417 | NewCRC[7] = D[30] ^ D[29] ^ D[27] ^ D[22] ^ D[20] ^ D[18] ^ D[17] ^ | |
5418 | D[15] ^ D[13] ^ D[11] ^ D[7] ^ D[6] ^ D[5] ^ C[3] ^ C[5] ^ C[6]; | |
5419 | ||
5420 | crc_res = 0; | |
5421 | for (i = 0; i < 8; i++) { | |
5422 | crc_res |= (NewCRC[i] << i); | |
5423 | } | |
5424 | ||
5425 | return crc_res; | |
5426 | } | |
5427 | ||
5428 | uint32_t | |
5429 | ecore_calc_crc32(uint32_t crc, uint8_t const *p, uint32_t len, uint32_t magic) | |
5430 | { | |
5431 | int i; | |
5432 | while (len--) { | |
5433 | crc ^= *p++; | |
5434 | for (i = 0; i < 8; i++) | |
5435 | crc = (crc >> 1) ^ ((crc & 1) ? magic : 0); | |
5436 | } | |
5437 | return crc; | |
5438 | } |