2 * Copyright (c) 2007-2013 QLogic Corporation. All rights reserved.
4 * Eric Davis <edavis@broadcom.com>
5 * David Christensen <davidch@broadcom.com>
6 * Gary Zambrano <zambrano@broadcom.com>
8 * Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
9 * Copyright (c) 2015 QLogic Corporation.
10 * All rights reserved.
13 * See LICENSE.bnx2x_pmd for copyright and licensing details.
17 #include "ecore_init.h"
19 /**** Exe Queue interfaces ****/
22 * ecore_exe_queue_init - init the Exe Queue object
24 * @o: pointer to the object
26 * @owner: pointer to the owner
27 * @validate: validate function pointer
28 * @optimize: optimize function pointer
29 * @exec: execute function pointer
30 * @get: get function pointer
33 ecore_exe_queue_init(struct bnx2x_softc
*sc __rte_unused
,
34 struct ecore_exe_queue_obj
*o
,
36 union ecore_qable_obj
*owner
,
37 exe_q_validate validate
,
39 exe_q_optimize optimize
, exe_q_execute exec
, exe_q_get get
)
41 ECORE_MEMSET(o
, 0, sizeof(*o
));
43 ECORE_LIST_INIT(&o
->exe_queue
);
44 ECORE_LIST_INIT(&o
->pending_comp
);
46 ECORE_SPIN_LOCK_INIT(&o
->lock
, sc
);
48 o
->exe_chunk_len
= exe_len
;
51 /* Owner specific callbacks */
52 o
->validate
= validate
;
54 o
->optimize
= optimize
;
58 ECORE_MSG("Setup the execution queue with the chunk length of %d",
62 static void ecore_exe_queue_free_elem(struct bnx2x_softc
*sc __rte_unused
,
63 struct ecore_exeq_elem
*elem
)
65 ECORE_MSG("Deleting an exe_queue element");
66 ECORE_FREE(sc
, elem
, sizeof(*elem
));
69 static inline int ecore_exe_queue_length(struct ecore_exe_queue_obj
*o
)
71 struct ecore_exeq_elem
*elem
;
74 ECORE_SPIN_LOCK_BH(&o
->lock
);
76 ECORE_LIST_FOR_EACH_ENTRY(elem
, &o
->exe_queue
, link
,
77 struct ecore_exeq_elem
) cnt
++;
79 ECORE_SPIN_UNLOCK_BH(&o
->lock
);
85 * ecore_exe_queue_add - add a new element to the execution queue
89 * @cmd: new command to add
90 * @restore: true - do not optimize the command
92 * If the element is optimized or is illegal, frees it.
94 static int ecore_exe_queue_add(struct bnx2x_softc
*sc
,
95 struct ecore_exe_queue_obj
*o
,
96 struct ecore_exeq_elem
*elem
, int restore
)
100 ECORE_SPIN_LOCK_BH(&o
->lock
);
103 /* Try to cancel this element queue */
104 rc
= o
->optimize(sc
, o
->owner
, elem
);
108 /* Check if this request is ok */
109 rc
= o
->validate(sc
, o
->owner
, elem
);
111 ECORE_MSG("Preamble failed: %d", rc
);
116 /* If so, add it to the execution queue */
117 ECORE_LIST_PUSH_TAIL(&elem
->link
, &o
->exe_queue
);
119 ECORE_SPIN_UNLOCK_BH(&o
->lock
);
121 return ECORE_SUCCESS
;
124 ecore_exe_queue_free_elem(sc
, elem
);
126 ECORE_SPIN_UNLOCK_BH(&o
->lock
);
131 static void __ecore_exe_queue_reset_pending(struct bnx2x_softc
*sc
, struct ecore_exe_queue_obj
134 struct ecore_exeq_elem
*elem
;
136 while (!ECORE_LIST_IS_EMPTY(&o
->pending_comp
)) {
137 elem
= ECORE_LIST_FIRST_ENTRY(&o
->pending_comp
,
138 struct ecore_exeq_elem
, link
);
140 ECORE_LIST_REMOVE_ENTRY(&elem
->link
, &o
->pending_comp
);
141 ecore_exe_queue_free_elem(sc
, elem
);
145 static inline void ecore_exe_queue_reset_pending(struct bnx2x_softc
*sc
,
146 struct ecore_exe_queue_obj
*o
)
148 ECORE_SPIN_LOCK_BH(&o
->lock
);
150 __ecore_exe_queue_reset_pending(sc
, o
);
152 ECORE_SPIN_UNLOCK_BH(&o
->lock
);
156 * ecore_exe_queue_step - execute one execution chunk atomically
160 * @ramrod_flags: flags
162 * (Should be called while holding the exe_queue->lock).
164 static int ecore_exe_queue_step(struct bnx2x_softc
*sc
,
165 struct ecore_exe_queue_obj
*o
,
166 unsigned long *ramrod_flags
)
168 struct ecore_exeq_elem
*elem
, spacer
;
171 ECORE_MEMSET(&spacer
, 0, sizeof(spacer
));
173 /* Next step should not be performed until the current is finished,
174 * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to
175 * properly clear object internals without sending any command to the FW
176 * which also implies there won't be any completion to clear the
179 if (!ECORE_LIST_IS_EMPTY(&o
->pending_comp
)) {
180 if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY
, ramrod_flags
)) {
182 ("RAMROD_DRV_CLR_ONLY requested: resetting a pending_comp list");
183 __ecore_exe_queue_reset_pending(sc
, o
);
185 return ECORE_PENDING
;
189 /* Run through the pending commands list and create a next
192 while (!ECORE_LIST_IS_EMPTY(&o
->exe_queue
)) {
193 elem
= ECORE_LIST_FIRST_ENTRY(&o
->exe_queue
,
194 struct ecore_exeq_elem
, link
);
195 ECORE_DBG_BREAK_IF(!elem
->cmd_len
);
197 if (cur_len
+ elem
->cmd_len
<= o
->exe_chunk_len
) {
198 cur_len
+= elem
->cmd_len
;
199 /* Prevent from both lists being empty when moving an
200 * element. This will allow the call of
201 * ecore_exe_queue_empty() without locking.
203 ECORE_LIST_PUSH_TAIL(&spacer
.link
, &o
->pending_comp
);
205 ECORE_LIST_REMOVE_ENTRY(&elem
->link
, &o
->exe_queue
);
206 ECORE_LIST_PUSH_TAIL(&elem
->link
, &o
->pending_comp
);
207 ECORE_LIST_REMOVE_ENTRY(&spacer
.link
, &o
->pending_comp
);
214 return ECORE_SUCCESS
;
216 rc
= o
->execute(sc
, o
->owner
, &o
->pending_comp
, ramrod_flags
);
218 /* In case of an error return the commands back to the queue
219 * and reset the pending_comp.
221 ECORE_LIST_SPLICE_INIT(&o
->pending_comp
, &o
->exe_queue
);
223 /* If zero is returned, means there are no outstanding pending
224 * completions and we may dismiss the pending list.
226 __ecore_exe_queue_reset_pending(sc
, o
);
231 static inline int ecore_exe_queue_empty(struct ecore_exe_queue_obj
*o
)
233 int empty
= ECORE_LIST_IS_EMPTY(&o
->exe_queue
);
235 /* Don't reorder!!! */
238 return empty
&& ECORE_LIST_IS_EMPTY(&o
->pending_comp
);
241 static struct ecore_exeq_elem
*ecore_exe_queue_alloc_elem(struct
245 ECORE_MSG("Allocating a new exe_queue element");
246 return ECORE_ZALLOC(sizeof(struct ecore_exeq_elem
), GFP_ATOMIC
, sc
);
249 /************************ raw_obj functions ***********************************/
250 static int ecore_raw_check_pending(struct ecore_raw_obj
*o
)
253 * !! converts the value returned by ECORE_TEST_BIT such that it
254 * is guaranteed not to be truncated regardless of int definition.
256 * Note we cannot simply define the function's return value type
257 * to match the type returned by ECORE_TEST_BIT, as it varies by
258 * platform/implementation.
261 return ! !ECORE_TEST_BIT(o
->state
, o
->pstate
);
264 static void ecore_raw_clear_pending(struct ecore_raw_obj
*o
)
266 ECORE_SMP_MB_BEFORE_CLEAR_BIT();
267 ECORE_CLEAR_BIT(o
->state
, o
->pstate
);
268 ECORE_SMP_MB_AFTER_CLEAR_BIT();
271 static void ecore_raw_set_pending(struct ecore_raw_obj
*o
)
273 ECORE_SMP_MB_BEFORE_CLEAR_BIT();
274 ECORE_SET_BIT(o
->state
, o
->pstate
);
275 ECORE_SMP_MB_AFTER_CLEAR_BIT();
279 * ecore_state_wait - wait until the given bit(state) is cleared
282 * @state: state which is to be cleared
283 * @state_p: state buffer
286 static int ecore_state_wait(struct bnx2x_softc
*sc
, int state
,
287 unsigned long *pstate
)
289 /* can take a while if any port is running */
292 if (CHIP_REV_IS_EMUL(sc
))
295 ECORE_MSG("waiting for state to become %d", state
);
299 bnx2x_intr_legacy(sc
, 1);
300 if (!ECORE_TEST_BIT(state
, pstate
)) {
301 #ifdef ECORE_STOP_ON_ERROR
302 ECORE_MSG("exit (cnt %d)", 5000 - cnt
);
304 return ECORE_SUCCESS
;
307 ECORE_WAIT(sc
, delay_us
);
314 PMD_DRV_LOG(ERR
, "timeout waiting for state %d", state
);
315 #ifdef ECORE_STOP_ON_ERROR
319 return ECORE_TIMEOUT
;
322 static int ecore_raw_wait(struct bnx2x_softc
*sc
, struct ecore_raw_obj
*raw
)
324 return ecore_state_wait(sc
, raw
->state
, raw
->pstate
);
327 /***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/
328 /* credit handling callbacks */
329 static int ecore_get_cam_offset_mac(struct ecore_vlan_mac_obj
*o
, int *offset
)
331 struct ecore_credit_pool_obj
*mp
= o
->macs_pool
;
333 ECORE_DBG_BREAK_IF(!mp
);
335 return mp
->get_entry(mp
, offset
);
338 static int ecore_get_credit_mac(struct ecore_vlan_mac_obj
*o
)
340 struct ecore_credit_pool_obj
*mp
= o
->macs_pool
;
342 ECORE_DBG_BREAK_IF(!mp
);
344 return mp
->get(mp
, 1);
347 static int ecore_put_cam_offset_mac(struct ecore_vlan_mac_obj
*o
, int offset
)
349 struct ecore_credit_pool_obj
*mp
= o
->macs_pool
;
351 return mp
->put_entry(mp
, offset
);
354 static int ecore_put_credit_mac(struct ecore_vlan_mac_obj
*o
)
356 struct ecore_credit_pool_obj
*mp
= o
->macs_pool
;
358 return mp
->put(mp
, 1);
362 * __ecore_vlan_mac_h_write_trylock - try getting the writer lock on vlan mac
366 * @o: vlan_mac object
368 * @details: Non-blocking implementation; should be called under execution
371 static int __ecore_vlan_mac_h_write_trylock(struct bnx2x_softc
*sc __rte_unused
,
372 struct ecore_vlan_mac_obj
*o
)
374 if (o
->head_reader
) {
375 ECORE_MSG("vlan_mac_lock writer - There are readers; Busy");
379 ECORE_MSG("vlan_mac_lock writer - Taken");
380 return ECORE_SUCCESS
;
384 * __ecore_vlan_mac_h_exec_pending - execute step instead of a previous step
385 * which wasn't able to run due to a taken lock on vlan mac head list.
388 * @o: vlan_mac object
390 * @details Should be called under execution queue lock; notice it might release
391 * and reclaim it during its run.
393 static void __ecore_vlan_mac_h_exec_pending(struct bnx2x_softc
*sc
,
394 struct ecore_vlan_mac_obj
*o
)
397 unsigned long ramrod_flags
= o
->saved_ramrod_flags
;
399 ECORE_MSG("vlan_mac_lock execute pending command with ramrod flags %lu",
401 o
->head_exe_request
= FALSE
;
402 o
->saved_ramrod_flags
= 0;
403 rc
= ecore_exe_queue_step(sc
, &o
->exe_queue
, &ramrod_flags
);
404 if (rc
!= ECORE_SUCCESS
) {
406 "execution of pending commands failed with rc %d",
408 #ifdef ECORE_STOP_ON_ERROR
415 * __ecore_vlan_mac_h_pend - Pend an execution step which couldn't have been
416 * called due to vlan mac head list lock being taken.
419 * @o: vlan_mac object
420 * @ramrod_flags: ramrod flags of missed execution
422 * @details Should be called under execution queue lock.
424 static void __ecore_vlan_mac_h_pend(struct bnx2x_softc
*sc __rte_unused
,
425 struct ecore_vlan_mac_obj
*o
,
426 unsigned long ramrod_flags
)
428 o
->head_exe_request
= TRUE
;
429 o
->saved_ramrod_flags
= ramrod_flags
;
430 ECORE_MSG("Placing pending execution with ramrod flags %lu",
435 * __ecore_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock
438 * @o: vlan_mac object
440 * @details Should be called under execution queue lock. Notice if a pending
441 * execution exists, it would perform it - possibly releasing and
442 * reclaiming the execution queue lock.
444 static void __ecore_vlan_mac_h_write_unlock(struct bnx2x_softc
*sc
,
445 struct ecore_vlan_mac_obj
*o
)
447 /* It's possible a new pending execution was added since this writer
448 * executed. If so, execute again. [Ad infinitum]
450 while (o
->head_exe_request
) {
452 ("vlan_mac_lock - writer release encountered a pending request");
453 __ecore_vlan_mac_h_exec_pending(sc
, o
);
458 * ecore_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock
461 * @o: vlan_mac object
463 * @details Notice if a pending execution exists, it would perform it -
464 * possibly releasing and reclaiming the execution queue lock.
466 void ecore_vlan_mac_h_write_unlock(struct bnx2x_softc
*sc
,
467 struct ecore_vlan_mac_obj
*o
)
469 ECORE_SPIN_LOCK_BH(&o
->exe_queue
.lock
);
470 __ecore_vlan_mac_h_write_unlock(sc
, o
);
471 ECORE_SPIN_UNLOCK_BH(&o
->exe_queue
.lock
);
475 * __ecore_vlan_mac_h_read_lock - lock the vlan mac head list reader lock
478 * @o: vlan_mac object
480 * @details Should be called under the execution queue lock. May sleep. May
481 * release and reclaim execution queue lock during its run.
483 static int __ecore_vlan_mac_h_read_lock(struct bnx2x_softc
*sc __rte_unused
,
484 struct ecore_vlan_mac_obj
*o
)
486 /* If we got here, we're holding lock --> no WRITER exists */
488 ECORE_MSG("vlan_mac_lock - locked reader - number %d", o
->head_reader
);
490 return ECORE_SUCCESS
;
494 * ecore_vlan_mac_h_read_lock - lock the vlan mac head list reader lock
497 * @o: vlan_mac object
499 * @details May sleep. Claims and releases execution queue lock during its run.
501 static int ecore_vlan_mac_h_read_lock(struct bnx2x_softc
*sc
,
502 struct ecore_vlan_mac_obj
*o
)
506 ECORE_SPIN_LOCK_BH(&o
->exe_queue
.lock
);
507 rc
= __ecore_vlan_mac_h_read_lock(sc
, o
);
508 ECORE_SPIN_UNLOCK_BH(&o
->exe_queue
.lock
);
514 * __ecore_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
517 * @o: vlan_mac object
519 * @details Should be called under execution queue lock. Notice if a pending
520 * execution exists, it would be performed if this was the last
521 * reader. possibly releasing and reclaiming the execution queue lock.
523 static void __ecore_vlan_mac_h_read_unlock(struct bnx2x_softc
*sc
,
524 struct ecore_vlan_mac_obj
*o
)
526 if (!o
->head_reader
) {
528 "Need to release vlan mac reader lock, but lock isn't taken");
529 #ifdef ECORE_STOP_ON_ERROR
535 "vlan_mac_lock - decreased readers to %d",
539 /* It's possible a new pending execution was added, and that this reader
540 * was last - if so we need to execute the command.
542 if (!o
->head_reader
&& o
->head_exe_request
) {
544 "vlan_mac_lock - reader release encountered a pending request");
546 /* Writer release will do the trick */
547 __ecore_vlan_mac_h_write_unlock(sc
, o
);
552 * ecore_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
555 * @o: vlan_mac object
557 * @details Notice if a pending execution exists, it would be performed if this
558 * was the last reader. Claims and releases the execution queue lock
561 void ecore_vlan_mac_h_read_unlock(struct bnx2x_softc
*sc
,
562 struct ecore_vlan_mac_obj
*o
)
564 ECORE_SPIN_LOCK_BH(&o
->exe_queue
.lock
);
565 __ecore_vlan_mac_h_read_unlock(sc
, o
);
566 ECORE_SPIN_UNLOCK_BH(&o
->exe_queue
.lock
);
570 * ecore_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
573 * @o: vlan_mac object
574 * @n: number of elements to get
575 * @base: base address for element placement
576 * @stride: stride between elements (in bytes)
578 static int ecore_get_n_elements(struct bnx2x_softc
*sc
,
579 struct ecore_vlan_mac_obj
*o
, int n
,
580 uint8_t * base
, uint8_t stride
, uint8_t size
)
582 struct ecore_vlan_mac_registry_elem
*pos
;
583 uint8_t *next
= base
;
584 int counter
= 0, read_lock
;
586 ECORE_MSG("get_n_elements - taking vlan_mac_lock (reader)");
587 read_lock
= ecore_vlan_mac_h_read_lock(sc
, o
);
588 if (read_lock
!= ECORE_SUCCESS
)
590 "get_n_elements failed to get vlan mac reader lock; Access without lock");
593 ECORE_LIST_FOR_EACH_ENTRY(pos
, &o
->head
, link
,
594 struct ecore_vlan_mac_registry_elem
) {
596 ECORE_MEMCPY(next
, &pos
->u
, size
);
599 ("copied element number %d to address %p element was:",
601 next
+= stride
+ size
;
605 if (read_lock
== ECORE_SUCCESS
) {
606 ECORE_MSG("get_n_elements - releasing vlan_mac_lock (reader)");
607 ecore_vlan_mac_h_read_unlock(sc
, o
);
610 return counter
* ETH_ALEN
;
613 /* check_add() callbacks */
614 static int ecore_check_mac_add(struct bnx2x_softc
*sc __rte_unused
,
615 struct ecore_vlan_mac_obj
*o
,
616 union ecore_classification_ramrod_data
*data
)
618 struct ecore_vlan_mac_registry_elem
*pos
;
620 ECORE_MSG("Checking MAC %02x:%02x:%02x:%02x:%02x:%02x for ADD command",
621 data
->mac
.mac
[0], data
->mac
.mac
[1], data
->mac
.mac
[2],
622 data
->mac
.mac
[3], data
->mac
.mac
[4], data
->mac
.mac
[5]);
624 if (!ECORE_IS_VALID_ETHER_ADDR(data
->mac
.mac
))
627 /* Check if a requested MAC already exists */
628 ECORE_LIST_FOR_EACH_ENTRY(pos
, &o
->head
, link
,
629 struct ecore_vlan_mac_registry_elem
)
630 if (!ECORE_MEMCMP(data
->mac
.mac
, pos
->u
.mac
.mac
, ETH_ALEN
) &&
631 (data
->mac
.is_inner_mac
== pos
->u
.mac
.is_inner_mac
))
634 return ECORE_SUCCESS
;
637 /* check_del() callbacks */
638 static struct ecore_vlan_mac_registry_elem
*ecore_check_mac_del(struct bnx2x_softc
644 ecore_classification_ramrod_data
647 struct ecore_vlan_mac_registry_elem
*pos
;
649 ECORE_MSG("Checking MAC %02x:%02x:%02x:%02x:%02x:%02x for DEL command",
650 data
->mac
.mac
[0], data
->mac
.mac
[1], data
->mac
.mac
[2],
651 data
->mac
.mac
[3], data
->mac
.mac
[4], data
->mac
.mac
[5]);
653 ECORE_LIST_FOR_EACH_ENTRY(pos
, &o
->head
, link
,
654 struct ecore_vlan_mac_registry_elem
)
655 if ((!ECORE_MEMCMP(data
->mac
.mac
, pos
->u
.mac
.mac
, ETH_ALEN
)) &&
656 (data
->mac
.is_inner_mac
== pos
->u
.mac
.is_inner_mac
))
662 /* check_move() callback */
663 static int ecore_check_move(struct bnx2x_softc
*sc
,
664 struct ecore_vlan_mac_obj
*src_o
,
665 struct ecore_vlan_mac_obj
*dst_o
,
666 union ecore_classification_ramrod_data
*data
)
668 struct ecore_vlan_mac_registry_elem
*pos
;
671 /* Check if we can delete the requested configuration from the first
674 pos
= src_o
->check_del(sc
, src_o
, data
);
676 /* check if configuration can be added */
677 rc
= dst_o
->check_add(sc
, dst_o
, data
);
679 /* If this classification can not be added (is already set)
680 * or can't be deleted - return an error.
688 static int ecore_check_move_always_err(__rte_unused
struct bnx2x_softc
*sc
,
689 __rte_unused
struct ecore_vlan_mac_obj
690 *src_o
, __rte_unused
struct ecore_vlan_mac_obj
691 *dst_o
, __rte_unused
union
692 ecore_classification_ramrod_data
*data
)
697 static uint8_t ecore_vlan_mac_get_rx_tx_flag(struct ecore_vlan_mac_obj
700 struct ecore_raw_obj
*raw
= &o
->raw
;
701 uint8_t rx_tx_flag
= 0;
703 if ((raw
->obj_type
== ECORE_OBJ_TYPE_TX
) ||
704 (raw
->obj_type
== ECORE_OBJ_TYPE_RX_TX
))
705 rx_tx_flag
|= ETH_CLASSIFY_CMD_HEADER_TX_CMD
;
707 if ((raw
->obj_type
== ECORE_OBJ_TYPE_RX
) ||
708 (raw
->obj_type
== ECORE_OBJ_TYPE_RX_TX
))
709 rx_tx_flag
|= ETH_CLASSIFY_CMD_HEADER_RX_CMD
;
714 static void ecore_set_mac_in_nig(struct bnx2x_softc
*sc
,
715 int add
, unsigned char *dev_addr
, int index
)
718 uint32_t reg_offset
= ECORE_PORT_ID(sc
) ? NIG_REG_LLH1_FUNC_MEM
:
719 NIG_REG_LLH0_FUNC_MEM
;
721 if (!ECORE_IS_MF_SI_MODE(sc
) && !IS_MF_AFEX(sc
))
724 if (index
> ECORE_LLH_CAM_MAX_PF_LINE
)
727 ECORE_MSG("Going to %s LLH configuration at entry %d",
728 (add
? "ADD" : "DELETE"), index
);
731 /* LLH_FUNC_MEM is a uint64_t WB register */
732 reg_offset
+= 8 * index
;
734 wb_data
[0] = ((dev_addr
[2] << 24) | (dev_addr
[3] << 16) |
735 (dev_addr
[4] << 8) | dev_addr
[5]);
736 wb_data
[1] = ((dev_addr
[0] << 8) | dev_addr
[1]);
738 ECORE_REG_WR_DMAE_LEN(sc
, reg_offset
, wb_data
, 2);
741 REG_WR(sc
, (ECORE_PORT_ID(sc
) ? NIG_REG_LLH1_FUNC_MEM_ENABLE
:
742 NIG_REG_LLH0_FUNC_MEM_ENABLE
) + 4 * index
, add
);
746 * ecore_vlan_mac_set_cmd_hdr_e2 - set a header in a single classify ramrod
749 * @o: queue for which we want to configure this rule
750 * @add: if TRUE the command is an ADD command, DEL otherwise
751 * @opcode: CLASSIFY_RULE_OPCODE_XXX
752 * @hdr: pointer to a header to setup
755 static void ecore_vlan_mac_set_cmd_hdr_e2(struct ecore_vlan_mac_obj
*o
,
757 struct eth_classify_cmd_header
760 struct ecore_raw_obj
*raw
= &o
->raw
;
762 hdr
->client_id
= raw
->cl_id
;
763 hdr
->func_id
= raw
->func_id
;
765 /* Rx or/and Tx (internal switching) configuration ? */
766 hdr
->cmd_general_data
|= ecore_vlan_mac_get_rx_tx_flag(o
);
769 hdr
->cmd_general_data
|= ETH_CLASSIFY_CMD_HEADER_IS_ADD
;
771 hdr
->cmd_general_data
|=
772 (opcode
<< ETH_CLASSIFY_CMD_HEADER_OPCODE_SHIFT
);
776 * ecore_vlan_mac_set_rdata_hdr_e2 - set the classify ramrod data header
778 * @cid: connection id
779 * @type: ECORE_FILTER_XXX_PENDING
780 * @hdr: pointer to header to setup
783 * currently we always configure one rule and echo field to contain a CID and an
786 static void ecore_vlan_mac_set_rdata_hdr_e2(uint32_t cid
, int type
, struct eth_classify_header
789 hdr
->echo
= ECORE_CPU_TO_LE32((cid
& ECORE_SWCID_MASK
) |
790 (type
<< ECORE_SWCID_SHIFT
));
791 hdr
->rule_cnt
= (uint8_t) rule_cnt
;
794 /* hw_config() callbacks */
795 static void ecore_set_one_mac_e2(struct bnx2x_softc
*sc
,
796 struct ecore_vlan_mac_obj
*o
,
797 struct ecore_exeq_elem
*elem
, int rule_idx
,
798 __rte_unused
int cam_offset
)
800 struct ecore_raw_obj
*raw
= &o
->raw
;
801 struct eth_classify_rules_ramrod_data
*data
=
802 (struct eth_classify_rules_ramrod_data
*)(raw
->rdata
);
803 int rule_cnt
= rule_idx
+ 1, cmd
= elem
->cmd_data
.vlan_mac
.cmd
;
804 union eth_classify_rule_cmd
*rule_entry
= &data
->rules
[rule_idx
];
805 int add
= (cmd
== ECORE_VLAN_MAC_ADD
) ? TRUE
: FALSE
;
806 unsigned long *vlan_mac_flags
= &elem
->cmd_data
.vlan_mac
.vlan_mac_flags
;
807 uint8_t *mac
= elem
->cmd_data
.vlan_mac
.u
.mac
.mac
;
809 /* Set LLH CAM entry: currently only iSCSI and ETH macs are
810 * relevant. In addition, current implementation is tuned for a
813 * When multiple unicast ETH MACs PF configuration in switch
814 * independent mode is required (NetQ, multiple netdev MACs,
815 * etc.), consider better utilisation of 8 per function MAC
816 * entries in the LLH register. There is also
817 * NIG_REG_P[01]_LLH_FUNC_MEM2 registers that complete the
818 * total number of CAM entries to 16.
820 * Currently we won't configure NIG for MACs other than a primary ETH
821 * MAC and iSCSI L2 MAC.
823 * If this MAC is moving from one Queue to another, no need to change
826 if (cmd
!= ECORE_VLAN_MAC_MOVE
) {
827 if (ECORE_TEST_BIT(ECORE_ISCSI_ETH_MAC
, vlan_mac_flags
))
828 ecore_set_mac_in_nig(sc
, add
, mac
,
829 ECORE_LLH_CAM_ISCSI_ETH_LINE
);
830 else if (ECORE_TEST_BIT(ECORE_ETH_MAC
, vlan_mac_flags
))
831 ecore_set_mac_in_nig(sc
, add
, mac
,
832 ECORE_LLH_CAM_ETH_LINE
);
835 /* Reset the ramrod data buffer for the first rule */
837 ECORE_MEMSET(data
, 0, sizeof(*data
));
839 /* Setup a command header */
840 ecore_vlan_mac_set_cmd_hdr_e2(o
, add
, CLASSIFY_RULE_OPCODE_MAC
,
841 &rule_entry
->mac
.header
);
843 ECORE_MSG("About to %s MAC %02x:%02x:%02x:%02x:%02x:%02x for Queue %d",
844 (add
? "add" : "delete"), mac
[0], mac
[1], mac
[2], mac
[3],
845 mac
[4], mac
[5], raw
->cl_id
);
847 /* Set a MAC itself */
848 ecore_set_fw_mac_addr(&rule_entry
->mac
.mac_msb
,
849 &rule_entry
->mac
.mac_mid
,
850 &rule_entry
->mac
.mac_lsb
, mac
);
851 rule_entry
->mac
.inner_mac
= elem
->cmd_data
.vlan_mac
.u
.mac
.is_inner_mac
;
853 /* MOVE: Add a rule that will add this MAC to the target Queue */
854 if (cmd
== ECORE_VLAN_MAC_MOVE
) {
858 /* Setup ramrod data */
859 ecore_vlan_mac_set_cmd_hdr_e2(elem
->cmd_data
.
860 vlan_mac
.target_obj
, TRUE
,
861 CLASSIFY_RULE_OPCODE_MAC
,
862 &rule_entry
->mac
.header
);
864 /* Set a MAC itself */
865 ecore_set_fw_mac_addr(&rule_entry
->mac
.mac_msb
,
866 &rule_entry
->mac
.mac_mid
,
867 &rule_entry
->mac
.mac_lsb
, mac
);
868 rule_entry
->mac
.inner_mac
=
869 elem
->cmd_data
.vlan_mac
.u
.mac
.is_inner_mac
;
872 /* Set the ramrod data header */
873 ecore_vlan_mac_set_rdata_hdr_e2(raw
->cid
, raw
->state
, &data
->header
,
878 * ecore_vlan_mac_set_rdata_hdr_e1x - set a header in a single classify ramrod
883 * @cam_offset: offset in cam memory
884 * @hdr: pointer to a header to setup
888 static void ecore_vlan_mac_set_rdata_hdr_e1x(struct ecore_vlan_mac_obj
889 *o
, int type
, int cam_offset
, struct mac_configuration_hdr
892 struct ecore_raw_obj
*r
= &o
->raw
;
895 hdr
->offset
= (uint8_t) cam_offset
;
896 hdr
->client_id
= ECORE_CPU_TO_LE16(0xff);
897 hdr
->echo
= ECORE_CPU_TO_LE32((r
->cid
& ECORE_SWCID_MASK
) |
898 (type
<< ECORE_SWCID_SHIFT
));
901 static void ecore_vlan_mac_set_cfg_entry_e1x(struct ecore_vlan_mac_obj
902 *o
, int add
, int opcode
,
904 uint16_t vlan_id
, struct
905 mac_configuration_entry
908 struct ecore_raw_obj
*r
= &o
->raw
;
909 uint32_t cl_bit_vec
= (1 << r
->cl_id
);
911 cfg_entry
->clients_bit_vector
= ECORE_CPU_TO_LE32(cl_bit_vec
);
912 cfg_entry
->pf_id
= r
->func_id
;
913 cfg_entry
->vlan_id
= ECORE_CPU_TO_LE16(vlan_id
);
916 ECORE_SET_FLAG(cfg_entry
->flags
,
917 MAC_CONFIGURATION_ENTRY_ACTION_TYPE
,
918 T_ETH_MAC_COMMAND_SET
);
919 ECORE_SET_FLAG(cfg_entry
->flags
,
920 MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE
,
923 /* Set a MAC in a ramrod data */
924 ecore_set_fw_mac_addr(&cfg_entry
->msb_mac_addr
,
925 &cfg_entry
->middle_mac_addr
,
926 &cfg_entry
->lsb_mac_addr
, mac
);
928 ECORE_SET_FLAG(cfg_entry
->flags
,
929 MAC_CONFIGURATION_ENTRY_ACTION_TYPE
,
930 T_ETH_MAC_COMMAND_INVALIDATE
);
933 static void ecore_vlan_mac_set_rdata_e1x(struct bnx2x_softc
*sc
935 struct ecore_vlan_mac_obj
*o
,
936 int type
, int cam_offset
,
937 int add
, uint8_t * mac
,
938 uint16_t vlan_id
, int opcode
,
939 struct mac_configuration_cmd
942 struct mac_configuration_entry
*cfg_entry
= &config
->config_table
[0];
944 ecore_vlan_mac_set_rdata_hdr_e1x(o
, type
, cam_offset
, &config
->hdr
);
945 ecore_vlan_mac_set_cfg_entry_e1x(o
, add
, opcode
, mac
, vlan_id
,
948 ECORE_MSG("%s MAC %02x:%02x:%02x:%02x:%02x:%02x CLID %d CAM offset %d",
949 (add
? "setting" : "clearing"),
950 mac
[0], mac
[1], mac
[2], mac
[3], mac
[4], mac
[5],
951 o
->raw
.cl_id
, cam_offset
);
955 * ecore_set_one_mac_e1x - fill a single MAC rule ramrod data
958 * @o: ecore_vlan_mac_obj
959 * @elem: ecore_exeq_elem
960 * @rule_idx: rule_idx
961 * @cam_offset: cam_offset
963 static void ecore_set_one_mac_e1x(struct bnx2x_softc
*sc
,
964 struct ecore_vlan_mac_obj
*o
,
965 struct ecore_exeq_elem
*elem
,
966 __rte_unused
int rule_idx
, int cam_offset
)
968 struct ecore_raw_obj
*raw
= &o
->raw
;
969 struct mac_configuration_cmd
*config
=
970 (struct mac_configuration_cmd
*)(raw
->rdata
);
971 /* 57711 do not support MOVE command,
972 * so it's either ADD or DEL
974 int add
= (elem
->cmd_data
.vlan_mac
.cmd
== ECORE_VLAN_MAC_ADD
) ?
977 /* Reset the ramrod data buffer */
978 ECORE_MEMSET(config
, 0, sizeof(*config
));
980 ecore_vlan_mac_set_rdata_e1x(sc
, o
, raw
->state
,
982 elem
->cmd_data
.vlan_mac
.u
.mac
.mac
, 0,
983 ETH_VLAN_FILTER_ANY_VLAN
, config
);
987 * ecore_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element
990 * @p: command parameters
991 * @ppos: pointer to the cookie
993 * reconfigure next MAC/VLAN/VLAN-MAC element from the
994 * previously configured elements list.
996 * from command parameters only RAMROD_COMP_WAIT bit in ramrod_flags is taken
999 * pointer to the cookie - that should be given back in the next call to make
1000 * function handle the next element. If *ppos is set to NULL it will restart the
1001 * iterator. If returned *ppos == NULL this means that the last element has been
1005 static int ecore_vlan_mac_restore(struct bnx2x_softc
*sc
,
1006 struct ecore_vlan_mac_ramrod_params
*p
,
1007 struct ecore_vlan_mac_registry_elem
**ppos
)
1009 struct ecore_vlan_mac_registry_elem
*pos
;
1010 struct ecore_vlan_mac_obj
*o
= p
->vlan_mac_obj
;
1012 /* If list is empty - there is nothing to do here */
1013 if (ECORE_LIST_IS_EMPTY(&o
->head
)) {
1018 /* make a step... */
1020 *ppos
= ECORE_LIST_FIRST_ENTRY(&o
->head
, struct
1021 ecore_vlan_mac_registry_elem
,
1024 *ppos
= ECORE_LIST_NEXT(*ppos
, link
,
1025 struct ecore_vlan_mac_registry_elem
);
1029 /* If it's the last step - return NULL */
1030 if (ECORE_LIST_IS_LAST(&pos
->link
, &o
->head
))
1033 /* Prepare a 'user_req' */
1034 ECORE_MEMCPY(&p
->user_req
.u
, &pos
->u
, sizeof(pos
->u
));
1036 /* Set the command */
1037 p
->user_req
.cmd
= ECORE_VLAN_MAC_ADD
;
1039 /* Set vlan_mac_flags */
1040 p
->user_req
.vlan_mac_flags
= pos
->vlan_mac_flags
;
1042 /* Set a restore bit */
1043 ECORE_SET_BIT_NA(RAMROD_RESTORE
, &p
->ramrod_flags
);
1045 return ecore_config_vlan_mac(sc
, p
);
1048 /* ecore_exeq_get_mac/ecore_exeq_get_vlan/ecore_exeq_get_vlan_mac return a
1049 * pointer to an element with a specific criteria and NULL if such an element
1050 * hasn't been found.
1052 static struct ecore_exeq_elem
*ecore_exeq_get_mac(struct ecore_exe_queue_obj
*o
,
1053 struct ecore_exeq_elem
*elem
)
1055 struct ecore_exeq_elem
*pos
;
1056 struct ecore_mac_ramrod_data
*data
= &elem
->cmd_data
.vlan_mac
.u
.mac
;
1058 /* Check pending for execution commands */
1059 ECORE_LIST_FOR_EACH_ENTRY(pos
, &o
->exe_queue
, link
,
1060 struct ecore_exeq_elem
)
1061 if (!ECORE_MEMCMP(&pos
->cmd_data
.vlan_mac
.u
.mac
, data
,
1063 (pos
->cmd_data
.vlan_mac
.cmd
== elem
->cmd_data
.vlan_mac
.cmd
))
1070 * ecore_validate_vlan_mac_add - check if an ADD command can be executed
1072 * @sc: device handle
1073 * @qo: ecore_qable_obj
1074 * @elem: ecore_exeq_elem
1076 * Checks that the requested configuration can be added. If yes and if
1077 * requested, consume CAM credit.
1079 * The 'validate' is run after the 'optimize'.
1082 static int ecore_validate_vlan_mac_add(struct bnx2x_softc
*sc
,
1083 union ecore_qable_obj
*qo
,
1084 struct ecore_exeq_elem
*elem
)
1086 struct ecore_vlan_mac_obj
*o
= &qo
->vlan_mac
;
1087 struct ecore_exe_queue_obj
*exeq
= &o
->exe_queue
;
1090 /* Check the registry */
1091 rc
= o
->check_add(sc
, o
, &elem
->cmd_data
.vlan_mac
.u
);
1094 ("ADD command is not allowed considering current registry state.");
1098 /* Check if there is a pending ADD command for this
1099 * MAC/VLAN/VLAN-MAC. Return an error if there is.
1101 if (exeq
->get(exeq
, elem
)) {
1102 ECORE_MSG("There is a pending ADD command already");
1103 return ECORE_EXISTS
;
1106 /* Consume the credit if not requested not to */
1107 if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT
,
1108 &elem
->cmd_data
.vlan_mac
.vlan_mac_flags
) ||
1112 return ECORE_SUCCESS
;
1116 * ecore_validate_vlan_mac_del - check if the DEL command can be executed
1118 * @sc: device handle
1119 * @qo: quable object to check
1120 * @elem: element that needs to be deleted
1122 * Checks that the requested configuration can be deleted. If yes and if
1123 * requested, returns a CAM credit.
1125 * The 'validate' is run after the 'optimize'.
1127 static int ecore_validate_vlan_mac_del(struct bnx2x_softc
*sc
,
1128 union ecore_qable_obj
*qo
,
1129 struct ecore_exeq_elem
*elem
)
1131 struct ecore_vlan_mac_obj
*o
= &qo
->vlan_mac
;
1132 struct ecore_vlan_mac_registry_elem
*pos
;
1133 struct ecore_exe_queue_obj
*exeq
= &o
->exe_queue
;
1134 struct ecore_exeq_elem query_elem
;
1136 /* If this classification can not be deleted (doesn't exist)
1137 * - return a ECORE_EXIST.
1139 pos
= o
->check_del(sc
, o
, &elem
->cmd_data
.vlan_mac
.u
);
1142 ("DEL command is not allowed considering current registry state");
1143 return ECORE_EXISTS
;
1146 /* Check if there are pending DEL or MOVE commands for this
1147 * MAC/VLAN/VLAN-MAC. Return an error if so.
1149 ECORE_MEMCPY(&query_elem
, elem
, sizeof(query_elem
));
1151 /* Check for MOVE commands */
1152 query_elem
.cmd_data
.vlan_mac
.cmd
= ECORE_VLAN_MAC_MOVE
;
1153 if (exeq
->get(exeq
, &query_elem
)) {
1154 PMD_DRV_LOG(ERR
, "There is a pending MOVE command already");
1158 /* Check for DEL commands */
1159 if (exeq
->get(exeq
, elem
)) {
1160 ECORE_MSG("There is a pending DEL command already");
1161 return ECORE_EXISTS
;
1164 /* Return the credit to the credit pool if not requested not to */
1165 if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT
,
1166 &elem
->cmd_data
.vlan_mac
.vlan_mac_flags
) ||
1167 o
->put_credit(o
))) {
1168 PMD_DRV_LOG(ERR
, "Failed to return a credit");
1172 return ECORE_SUCCESS
;
1176 * ecore_validate_vlan_mac_move - check if the MOVE command can be executed
1178 * @sc: device handle
1179 * @qo: quable object to check (source)
1180 * @elem: element that needs to be moved
1182 * Checks that the requested configuration can be moved. If yes and if
1183 * requested, returns a CAM credit.
1185 * The 'validate' is run after the 'optimize'.
1187 static int ecore_validate_vlan_mac_move(struct bnx2x_softc
*sc
,
1188 union ecore_qable_obj
*qo
,
1189 struct ecore_exeq_elem
*elem
)
1191 struct ecore_vlan_mac_obj
*src_o
= &qo
->vlan_mac
;
1192 struct ecore_vlan_mac_obj
*dest_o
= elem
->cmd_data
.vlan_mac
.target_obj
;
1193 struct ecore_exeq_elem query_elem
;
1194 struct ecore_exe_queue_obj
*src_exeq
= &src_o
->exe_queue
;
1195 struct ecore_exe_queue_obj
*dest_exeq
= &dest_o
->exe_queue
;
1197 /* Check if we can perform this operation based on the current registry
1200 if (!src_o
->check_move(sc
, src_o
, dest_o
, &elem
->cmd_data
.vlan_mac
.u
)) {
1202 ("MOVE command is not allowed considering current registry state");
1206 /* Check if there is an already pending DEL or MOVE command for the
1207 * source object or ADD command for a destination object. Return an
1210 ECORE_MEMCPY(&query_elem
, elem
, sizeof(query_elem
));
1212 /* Check DEL on source */
1213 query_elem
.cmd_data
.vlan_mac
.cmd
= ECORE_VLAN_MAC_DEL
;
1214 if (src_exeq
->get(src_exeq
, &query_elem
)) {
1216 "There is a pending DEL command on the source queue already");
1220 /* Check MOVE on source */
1221 if (src_exeq
->get(src_exeq
, elem
)) {
1222 ECORE_MSG("There is a pending MOVE command already");
1223 return ECORE_EXISTS
;
1226 /* Check ADD on destination */
1227 query_elem
.cmd_data
.vlan_mac
.cmd
= ECORE_VLAN_MAC_ADD
;
1228 if (dest_exeq
->get(dest_exeq
, &query_elem
)) {
1230 "There is a pending ADD command on the destination queue already");
1234 /* Consume the credit if not requested not to */
1235 if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT_DEST
,
1236 &elem
->cmd_data
.vlan_mac
.vlan_mac_flags
) ||
1237 dest_o
->get_credit(dest_o
)))
1240 if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT
,
1241 &elem
->cmd_data
.vlan_mac
.vlan_mac_flags
) ||
1242 src_o
->put_credit(src_o
))) {
1243 /* return the credit taken from dest... */
1244 dest_o
->put_credit(dest_o
);
1248 return ECORE_SUCCESS
;
1251 static int ecore_validate_vlan_mac(struct bnx2x_softc
*sc
,
1252 union ecore_qable_obj
*qo
,
1253 struct ecore_exeq_elem
*elem
)
1255 switch (elem
->cmd_data
.vlan_mac
.cmd
) {
1256 case ECORE_VLAN_MAC_ADD
:
1257 return ecore_validate_vlan_mac_add(sc
, qo
, elem
);
1258 case ECORE_VLAN_MAC_DEL
:
1259 return ecore_validate_vlan_mac_del(sc
, qo
, elem
);
1260 case ECORE_VLAN_MAC_MOVE
:
1261 return ecore_validate_vlan_mac_move(sc
, qo
, elem
);
1267 static int ecore_remove_vlan_mac(__rte_unused
struct bnx2x_softc
*sc
,
1268 union ecore_qable_obj
*qo
,
1269 struct ecore_exeq_elem
*elem
)
1273 /* If consumption wasn't required, nothing to do */
1274 if (ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT
,
1275 &elem
->cmd_data
.vlan_mac
.vlan_mac_flags
))
1276 return ECORE_SUCCESS
;
1278 switch (elem
->cmd_data
.vlan_mac
.cmd
) {
1279 case ECORE_VLAN_MAC_ADD
:
1280 case ECORE_VLAN_MAC_MOVE
:
1281 rc
= qo
->vlan_mac
.put_credit(&qo
->vlan_mac
);
1283 case ECORE_VLAN_MAC_DEL
:
1284 rc
= qo
->vlan_mac
.get_credit(&qo
->vlan_mac
);
1293 return ECORE_SUCCESS
;
1297 * ecore_wait_vlan_mac - passively wait for 5 seconds until all work completes.
1299 * @sc: device handle
1300 * @o: ecore_vlan_mac_obj
1303 static int ecore_wait_vlan_mac(struct bnx2x_softc
*sc
,
1304 struct ecore_vlan_mac_obj
*o
)
1307 struct ecore_exe_queue_obj
*exeq
= &o
->exe_queue
;
1308 struct ecore_raw_obj
*raw
= &o
->raw
;
1311 /* Wait for the current command to complete */
1312 rc
= raw
->wait_comp(sc
, raw
);
1316 /* Wait until there are no pending commands */
1317 if (!ecore_exe_queue_empty(exeq
))
1318 ECORE_WAIT(sc
, 1000);
1320 return ECORE_SUCCESS
;
1323 return ECORE_TIMEOUT
;
1326 static int __ecore_vlan_mac_execute_step(struct bnx2x_softc
*sc
,
1327 struct ecore_vlan_mac_obj
*o
,
1328 unsigned long *ramrod_flags
)
1330 int rc
= ECORE_SUCCESS
;
1332 ECORE_SPIN_LOCK_BH(&o
->exe_queue
.lock
);
1334 ECORE_MSG("vlan_mac_execute_step - trying to take writer lock");
1335 rc
= __ecore_vlan_mac_h_write_trylock(sc
, o
);
1337 if (rc
!= ECORE_SUCCESS
) {
1338 __ecore_vlan_mac_h_pend(sc
, o
, *ramrod_flags
);
1340 /** Calling function should not diffrentiate between this case
1341 * and the case in which there is already a pending ramrod
1345 rc
= ecore_exe_queue_step(sc
, &o
->exe_queue
, ramrod_flags
);
1347 ECORE_SPIN_UNLOCK_BH(&o
->exe_queue
.lock
);
1353 * ecore_complete_vlan_mac - complete one VLAN-MAC ramrod
1355 * @sc: device handle
1356 * @o: ecore_vlan_mac_obj
1358 * @cont: if TRUE schedule next execution chunk
1361 static int ecore_complete_vlan_mac(struct bnx2x_softc
*sc
,
1362 struct ecore_vlan_mac_obj
*o
,
1363 union event_ring_elem
*cqe
,
1364 unsigned long *ramrod_flags
)
1366 struct ecore_raw_obj
*r
= &o
->raw
;
1369 /* Reset pending list */
1370 ecore_exe_queue_reset_pending(sc
, &o
->exe_queue
);
1373 r
->clear_pending(r
);
1375 /* If ramrod failed this is most likely a SW bug */
1376 if (cqe
->message
.error
)
1379 /* Run the next bulk of pending commands if requested */
1380 if (ECORE_TEST_BIT(RAMROD_CONT
, ramrod_flags
)) {
1381 rc
= __ecore_vlan_mac_execute_step(sc
, o
, ramrod_flags
);
1386 /* If there is more work to do return PENDING */
1387 if (!ecore_exe_queue_empty(&o
->exe_queue
))
1388 return ECORE_PENDING
;
1390 return ECORE_SUCCESS
;
1394 * ecore_optimize_vlan_mac - optimize ADD and DEL commands.
1396 * @sc: device handle
1397 * @o: ecore_qable_obj
1398 * @elem: ecore_exeq_elem
1400 static int ecore_optimize_vlan_mac(struct bnx2x_softc
*sc
,
1401 union ecore_qable_obj
*qo
,
1402 struct ecore_exeq_elem
*elem
)
1404 struct ecore_exeq_elem query
, *pos
;
1405 struct ecore_vlan_mac_obj
*o
= &qo
->vlan_mac
;
1406 struct ecore_exe_queue_obj
*exeq
= &o
->exe_queue
;
1408 ECORE_MEMCPY(&query
, elem
, sizeof(query
));
1410 switch (elem
->cmd_data
.vlan_mac
.cmd
) {
1411 case ECORE_VLAN_MAC_ADD
:
1412 query
.cmd_data
.vlan_mac
.cmd
= ECORE_VLAN_MAC_DEL
;
1414 case ECORE_VLAN_MAC_DEL
:
1415 query
.cmd_data
.vlan_mac
.cmd
= ECORE_VLAN_MAC_ADD
;
1418 /* Don't handle anything other than ADD or DEL */
1422 /* If we found the appropriate element - delete it */
1423 pos
= exeq
->get(exeq
, &query
);
1426 /* Return the credit of the optimized command */
1427 if (!ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT
,
1428 &pos
->cmd_data
.vlan_mac
.vlan_mac_flags
)) {
1429 if ((query
.cmd_data
.vlan_mac
.cmd
==
1430 ECORE_VLAN_MAC_ADD
) && !o
->put_credit(o
)) {
1432 "Failed to return the credit for the optimized ADD command");
1434 } else if (!o
->get_credit(o
)) { /* VLAN_MAC_DEL */
1436 "Failed to recover the credit from the optimized DEL command");
1441 ECORE_MSG("Optimizing %s command",
1442 (elem
->cmd_data
.vlan_mac
.cmd
== ECORE_VLAN_MAC_ADD
) ?
1445 ECORE_LIST_REMOVE_ENTRY(&pos
->link
, &exeq
->exe_queue
);
1446 ecore_exe_queue_free_elem(sc
, pos
);
1454 * ecore_vlan_mac_get_registry_elem - prepare a registry element
1456 * @sc: device handle
1462 * prepare a registry element according to the current command request.
1464 static int ecore_vlan_mac_get_registry_elem(struct bnx2x_softc
*sc
,
1465 struct ecore_vlan_mac_obj
*o
,
1466 struct ecore_exeq_elem
*elem
,
1468 ecore_vlan_mac_registry_elem
1471 enum ecore_vlan_mac_cmd cmd
= elem
->cmd_data
.vlan_mac
.cmd
;
1472 struct ecore_vlan_mac_registry_elem
*reg_elem
;
1474 /* Allocate a new registry element if needed. */
1476 ((cmd
== ECORE_VLAN_MAC_ADD
) || (cmd
== ECORE_VLAN_MAC_MOVE
))) {
1477 reg_elem
= ECORE_ZALLOC(sizeof(*reg_elem
), GFP_ATOMIC
, sc
);
1481 /* Get a new CAM offset */
1482 if (!o
->get_cam_offset(o
, ®_elem
->cam_offset
)) {
1483 /* This shall never happen, because we have checked the
1484 * CAM availability in the 'validate'.
1486 ECORE_DBG_BREAK_IF(1);
1487 ECORE_FREE(sc
, reg_elem
, sizeof(*reg_elem
));
1491 ECORE_MSG("Got cam offset %d", reg_elem
->cam_offset
);
1493 /* Set a VLAN-MAC data */
1494 ECORE_MEMCPY(®_elem
->u
, &elem
->cmd_data
.vlan_mac
.u
,
1495 sizeof(reg_elem
->u
));
1497 /* Copy the flags (needed for DEL and RESTORE flows) */
1498 reg_elem
->vlan_mac_flags
=
1499 elem
->cmd_data
.vlan_mac
.vlan_mac_flags
;
1500 } else /* DEL, RESTORE */
1501 reg_elem
= o
->check_del(sc
, o
, &elem
->cmd_data
.vlan_mac
.u
);
1504 return ECORE_SUCCESS
;
1508 * ecore_execute_vlan_mac - execute vlan mac command
1510 * @sc: device handle
1515 * go and send a ramrod!
1517 static int ecore_execute_vlan_mac(struct bnx2x_softc
*sc
,
1518 union ecore_qable_obj
*qo
,
1519 ecore_list_t
* exe_chunk
,
1520 unsigned long *ramrod_flags
)
1522 struct ecore_exeq_elem
*elem
;
1523 struct ecore_vlan_mac_obj
*o
= &qo
->vlan_mac
, *cam_obj
;
1524 struct ecore_raw_obj
*r
= &o
->raw
;
1526 int restore
= ECORE_TEST_BIT(RAMROD_RESTORE
, ramrod_flags
);
1527 int drv_only
= ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY
, ramrod_flags
);
1528 struct ecore_vlan_mac_registry_elem
*reg_elem
;
1529 enum ecore_vlan_mac_cmd cmd
;
1531 /* If DRIVER_ONLY execution is requested, cleanup a registry
1532 * and exit. Otherwise send a ramrod to FW.
1539 /* Fill the ramrod data */
1540 ECORE_LIST_FOR_EACH_ENTRY(elem
, exe_chunk
, link
,
1541 struct ecore_exeq_elem
) {
1542 cmd
= elem
->cmd_data
.vlan_mac
.cmd
;
1543 /* We will add to the target object in MOVE command, so
1544 * change the object for a CAM search.
1546 if (cmd
== ECORE_VLAN_MAC_MOVE
)
1547 cam_obj
= elem
->cmd_data
.vlan_mac
.target_obj
;
1551 rc
= ecore_vlan_mac_get_registry_elem(sc
, cam_obj
,
1557 ECORE_DBG_BREAK_IF(!reg_elem
);
1559 /* Push a new entry into the registry */
1561 ((cmd
== ECORE_VLAN_MAC_ADD
) ||
1562 (cmd
== ECORE_VLAN_MAC_MOVE
)))
1563 ECORE_LIST_PUSH_HEAD(®_elem
->link
,
1566 /* Configure a single command in a ramrod data buffer */
1567 o
->set_one_rule(sc
, o
, elem
, idx
, reg_elem
->cam_offset
);
1569 /* MOVE command consumes 2 entries in the ramrod data */
1570 if (cmd
== ECORE_VLAN_MAC_MOVE
)
1577 * No need for an explicit memory barrier here as long we would
1578 * need to ensure the ordering of writing to the SPQ element
1579 * and updating of the SPQ producer which involves a memory
1580 * read and we will have to put a full memory barrier there
1581 * (inside ecore_sp_post()).
1584 rc
= ecore_sp_post(sc
, o
->ramrod_cmd
, r
->cid
,
1585 r
->rdata_mapping
, ETH_CONNECTION_TYPE
);
1590 /* Now, when we are done with the ramrod - clean up the registry */
1591 ECORE_LIST_FOR_EACH_ENTRY(elem
, exe_chunk
, link
, struct ecore_exeq_elem
) {
1592 cmd
= elem
->cmd_data
.vlan_mac
.cmd
;
1593 if ((cmd
== ECORE_VLAN_MAC_DEL
) || (cmd
== ECORE_VLAN_MAC_MOVE
)) {
1594 reg_elem
= o
->check_del(sc
, o
,
1595 &elem
->cmd_data
.vlan_mac
.u
);
1597 ECORE_DBG_BREAK_IF(!reg_elem
);
1599 o
->put_cam_offset(o
, reg_elem
->cam_offset
);
1600 ECORE_LIST_REMOVE_ENTRY(®_elem
->link
, &o
->head
);
1601 ECORE_FREE(sc
, reg_elem
, sizeof(*reg_elem
));
1606 return ECORE_PENDING
;
1608 return ECORE_SUCCESS
;
1611 r
->clear_pending(r
);
1613 /* Cleanup a registry in case of a failure */
1614 ECORE_LIST_FOR_EACH_ENTRY(elem
, exe_chunk
, link
, struct ecore_exeq_elem
) {
1615 cmd
= elem
->cmd_data
.vlan_mac
.cmd
;
1617 if (cmd
== ECORE_VLAN_MAC_MOVE
)
1618 cam_obj
= elem
->cmd_data
.vlan_mac
.target_obj
;
1622 /* Delete all newly added above entries */
1624 ((cmd
== ECORE_VLAN_MAC_ADD
) ||
1625 (cmd
== ECORE_VLAN_MAC_MOVE
))) {
1626 reg_elem
= o
->check_del(sc
, cam_obj
,
1627 &elem
->cmd_data
.vlan_mac
.u
);
1629 ECORE_LIST_REMOVE_ENTRY(®_elem
->link
,
1631 ECORE_FREE(sc
, reg_elem
, sizeof(*reg_elem
));
1639 static int ecore_vlan_mac_push_new_cmd(struct bnx2x_softc
*sc
, struct
1640 ecore_vlan_mac_ramrod_params
*p
)
1642 struct ecore_exeq_elem
*elem
;
1643 struct ecore_vlan_mac_obj
*o
= p
->vlan_mac_obj
;
1644 int restore
= ECORE_TEST_BIT(RAMROD_RESTORE
, &p
->ramrod_flags
);
1646 /* Allocate the execution queue element */
1647 elem
= ecore_exe_queue_alloc_elem(sc
);
1651 /* Set the command 'length' */
1652 switch (p
->user_req
.cmd
) {
1653 case ECORE_VLAN_MAC_MOVE
:
1660 /* Fill the object specific info */
1661 ECORE_MEMCPY(&elem
->cmd_data
.vlan_mac
, &p
->user_req
,
1662 sizeof(p
->user_req
));
1664 /* Try to add a new command to the pending list */
1665 return ecore_exe_queue_add(sc
, &o
->exe_queue
, elem
, restore
);
1669 * ecore_config_vlan_mac - configure VLAN/MAC/VLAN_MAC filtering rules.
1671 * @sc: device handle
1675 int ecore_config_vlan_mac(struct bnx2x_softc
*sc
,
1676 struct ecore_vlan_mac_ramrod_params
*p
)
1678 int rc
= ECORE_SUCCESS
;
1679 struct ecore_vlan_mac_obj
*o
= p
->vlan_mac_obj
;
1680 unsigned long *ramrod_flags
= &p
->ramrod_flags
;
1681 int cont
= ECORE_TEST_BIT(RAMROD_CONT
, ramrod_flags
);
1682 struct ecore_raw_obj
*raw
= &o
->raw
;
1685 * Add new elements to the execution list for commands that require it.
1688 rc
= ecore_vlan_mac_push_new_cmd(sc
, p
);
1693 /* If nothing will be executed further in this iteration we want to
1694 * return PENDING if there are pending commands
1696 if (!ecore_exe_queue_empty(&o
->exe_queue
))
1699 if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY
, ramrod_flags
)) {
1701 ("RAMROD_DRV_CLR_ONLY requested: clearing a pending bit.");
1702 raw
->clear_pending(raw
);
1705 /* Execute commands if required */
1706 if (cont
|| ECORE_TEST_BIT(RAMROD_EXEC
, ramrod_flags
) ||
1707 ECORE_TEST_BIT(RAMROD_COMP_WAIT
, ramrod_flags
)) {
1708 rc
= __ecore_vlan_mac_execute_step(sc
, p
->vlan_mac_obj
,
1714 /* RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set
1715 * then user want to wait until the last command is done.
1717 if (ECORE_TEST_BIT(RAMROD_COMP_WAIT
, &p
->ramrod_flags
)) {
1718 /* Wait maximum for the current exe_queue length iterations plus
1719 * one (for the current pending command).
1721 int max_iterations
= ecore_exe_queue_length(&o
->exe_queue
) + 1;
1723 while (!ecore_exe_queue_empty(&o
->exe_queue
) &&
1726 /* Wait for the current command to complete */
1727 rc
= raw
->wait_comp(sc
, raw
);
1731 /* Make a next step */
1732 rc
= __ecore_vlan_mac_execute_step(sc
,
1739 return ECORE_SUCCESS
;
1746 * ecore_vlan_mac_del_all - delete elements with given vlan_mac_flags spec
1748 * @sc: device handle
1751 * @ramrod_flags: execution flags to be used for this deletion
1753 * if the last operation has completed successfully and there are no
1754 * more elements left, positive value if the last operation has completed
1755 * successfully and there are more previously configured elements, negative
1756 * value is current operation has failed.
1758 static int ecore_vlan_mac_del_all(struct bnx2x_softc
*sc
,
1759 struct ecore_vlan_mac_obj
*o
,
1760 unsigned long *vlan_mac_flags
,
1761 unsigned long *ramrod_flags
)
1763 struct ecore_vlan_mac_registry_elem
*pos
= NULL
;
1764 int rc
= 0, read_lock
;
1765 struct ecore_vlan_mac_ramrod_params p
;
1766 struct ecore_exe_queue_obj
*exeq
= &o
->exe_queue
;
1767 struct ecore_exeq_elem
*exeq_pos
, *exeq_pos_n
;
1769 /* Clear pending commands first */
1771 ECORE_SPIN_LOCK_BH(&exeq
->lock
);
1773 ECORE_LIST_FOR_EACH_ENTRY_SAFE(exeq_pos
, exeq_pos_n
,
1774 &exeq
->exe_queue
, link
,
1775 struct ecore_exeq_elem
) {
1776 if (exeq_pos
->cmd_data
.vlan_mac
.vlan_mac_flags
==
1778 rc
= exeq
->remove(sc
, exeq
->owner
, exeq_pos
);
1780 PMD_DRV_LOG(ERR
, "Failed to remove command");
1781 ECORE_SPIN_UNLOCK_BH(&exeq
->lock
);
1784 ECORE_LIST_REMOVE_ENTRY(&exeq_pos
->link
,
1786 ecore_exe_queue_free_elem(sc
, exeq_pos
);
1790 ECORE_SPIN_UNLOCK_BH(&exeq
->lock
);
1792 /* Prepare a command request */
1793 ECORE_MEMSET(&p
, 0, sizeof(p
));
1795 p
.ramrod_flags
= *ramrod_flags
;
1796 p
.user_req
.cmd
= ECORE_VLAN_MAC_DEL
;
1798 /* Add all but the last VLAN-MAC to the execution queue without actually
1799 * execution anything.
1801 ECORE_CLEAR_BIT_NA(RAMROD_COMP_WAIT
, &p
.ramrod_flags
);
1802 ECORE_CLEAR_BIT_NA(RAMROD_EXEC
, &p
.ramrod_flags
);
1803 ECORE_CLEAR_BIT_NA(RAMROD_CONT
, &p
.ramrod_flags
);
1805 ECORE_MSG("vlan_mac_del_all -- taking vlan_mac_lock (reader)");
1806 read_lock
= ecore_vlan_mac_h_read_lock(sc
, o
);
1807 if (read_lock
!= ECORE_SUCCESS
)
1810 ECORE_LIST_FOR_EACH_ENTRY(pos
, &o
->head
, link
,
1811 struct ecore_vlan_mac_registry_elem
) {
1812 if (pos
->vlan_mac_flags
== *vlan_mac_flags
) {
1813 p
.user_req
.vlan_mac_flags
= pos
->vlan_mac_flags
;
1814 ECORE_MEMCPY(&p
.user_req
.u
, &pos
->u
, sizeof(pos
->u
));
1815 rc
= ecore_config_vlan_mac(sc
, &p
);
1818 "Failed to add a new DEL command");
1819 ecore_vlan_mac_h_read_unlock(sc
, o
);
1825 ECORE_MSG("vlan_mac_del_all -- releasing vlan_mac_lock (reader)");
1826 ecore_vlan_mac_h_read_unlock(sc
, o
);
1828 p
.ramrod_flags
= *ramrod_flags
;
1829 ECORE_SET_BIT_NA(RAMROD_CONT
, &p
.ramrod_flags
);
1831 return ecore_config_vlan_mac(sc
, &p
);
1834 static void ecore_init_raw_obj(struct ecore_raw_obj
*raw
, uint8_t cl_id
,
1835 uint32_t cid
, uint8_t func_id
,
1837 ecore_dma_addr_t rdata_mapping
, int state
,
1838 unsigned long *pstate
, ecore_obj_type type
)
1840 raw
->func_id
= func_id
;
1844 raw
->rdata_mapping
= rdata_mapping
;
1846 raw
->pstate
= pstate
;
1847 raw
->obj_type
= type
;
1848 raw
->check_pending
= ecore_raw_check_pending
;
1849 raw
->clear_pending
= ecore_raw_clear_pending
;
1850 raw
->set_pending
= ecore_raw_set_pending
;
1851 raw
->wait_comp
= ecore_raw_wait
;
1854 static void ecore_init_vlan_mac_common(struct ecore_vlan_mac_obj
*o
,
1855 uint8_t cl_id
, uint32_t cid
,
1856 uint8_t func_id
, void *rdata
,
1857 ecore_dma_addr_t rdata_mapping
,
1858 int state
, unsigned long *pstate
,
1859 ecore_obj_type type
,
1860 struct ecore_credit_pool_obj
1861 *macs_pool
, struct ecore_credit_pool_obj
1864 ECORE_LIST_INIT(&o
->head
);
1866 o
->head_exe_request
= FALSE
;
1867 o
->saved_ramrod_flags
= 0;
1869 o
->macs_pool
= macs_pool
;
1870 o
->vlans_pool
= vlans_pool
;
1872 o
->delete_all
= ecore_vlan_mac_del_all
;
1873 o
->restore
= ecore_vlan_mac_restore
;
1874 o
->complete
= ecore_complete_vlan_mac
;
1875 o
->wait
= ecore_wait_vlan_mac
;
1877 ecore_init_raw_obj(&o
->raw
, cl_id
, cid
, func_id
, rdata
, rdata_mapping
,
1878 state
, pstate
, type
);
1881 void ecore_init_mac_obj(struct bnx2x_softc
*sc
,
1882 struct ecore_vlan_mac_obj
*mac_obj
,
1883 uint8_t cl_id
, uint32_t cid
, uint8_t func_id
,
1884 void *rdata
, ecore_dma_addr_t rdata_mapping
, int state
,
1885 unsigned long *pstate
, ecore_obj_type type
,
1886 struct ecore_credit_pool_obj
*macs_pool
)
1888 union ecore_qable_obj
*qable_obj
= (union ecore_qable_obj
*)mac_obj
;
1890 ecore_init_vlan_mac_common(mac_obj
, cl_id
, cid
, func_id
, rdata
,
1891 rdata_mapping
, state
, pstate
, type
,
1894 /* CAM credit pool handling */
1895 mac_obj
->get_credit
= ecore_get_credit_mac
;
1896 mac_obj
->put_credit
= ecore_put_credit_mac
;
1897 mac_obj
->get_cam_offset
= ecore_get_cam_offset_mac
;
1898 mac_obj
->put_cam_offset
= ecore_put_cam_offset_mac
;
1900 if (CHIP_IS_E1x(sc
)) {
1901 mac_obj
->set_one_rule
= ecore_set_one_mac_e1x
;
1902 mac_obj
->check_del
= ecore_check_mac_del
;
1903 mac_obj
->check_add
= ecore_check_mac_add
;
1904 mac_obj
->check_move
= ecore_check_move_always_err
;
1905 mac_obj
->ramrod_cmd
= RAMROD_CMD_ID_ETH_SET_MAC
;
1908 ecore_exe_queue_init(sc
,
1909 &mac_obj
->exe_queue
, 1, qable_obj
,
1910 ecore_validate_vlan_mac
,
1911 ecore_remove_vlan_mac
,
1912 ecore_optimize_vlan_mac
,
1913 ecore_execute_vlan_mac
,
1914 ecore_exeq_get_mac
);
1916 mac_obj
->set_one_rule
= ecore_set_one_mac_e2
;
1917 mac_obj
->check_del
= ecore_check_mac_del
;
1918 mac_obj
->check_add
= ecore_check_mac_add
;
1919 mac_obj
->check_move
= ecore_check_move
;
1920 mac_obj
->ramrod_cmd
= RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES
;
1921 mac_obj
->get_n_elements
= ecore_get_n_elements
;
1924 ecore_exe_queue_init(sc
,
1925 &mac_obj
->exe_queue
, CLASSIFY_RULES_COUNT
,
1926 qable_obj
, ecore_validate_vlan_mac
,
1927 ecore_remove_vlan_mac
,
1928 ecore_optimize_vlan_mac
,
1929 ecore_execute_vlan_mac
,
1930 ecore_exeq_get_mac
);
1934 /* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
1935 static void __storm_memset_mac_filters(struct bnx2x_softc
*sc
, struct
1936 tstorm_eth_mac_filter_config
1937 *mac_filters
, uint16_t pf_id
)
1939 size_t size
= sizeof(struct tstorm_eth_mac_filter_config
);
1941 uint32_t addr
= BAR_TSTRORM_INTMEM
+
1942 TSTORM_MAC_FILTER_CONFIG_OFFSET(pf_id
);
1944 ecore_storm_memset_struct(sc
, addr
, size
, (uint32_t *) mac_filters
);
1947 static int ecore_set_rx_mode_e1x(struct bnx2x_softc
*sc
,
1948 struct ecore_rx_mode_ramrod_params
*p
)
1950 /* update the sc MAC filter structure */
1951 uint32_t mask
= (1 << p
->cl_id
);
1953 struct tstorm_eth_mac_filter_config
*mac_filters
=
1954 (struct tstorm_eth_mac_filter_config
*)p
->rdata
;
1956 /* initial setting is drop-all */
1957 uint8_t drop_all_ucast
= 1, drop_all_mcast
= 1;
1958 uint8_t accp_all_ucast
= 0, accp_all_bcast
= 0, accp_all_mcast
= 0;
1959 uint8_t unmatched_unicast
= 0;
1961 /* In e1x there we only take into account rx accept flag since tx switching
1963 if (ECORE_TEST_BIT(ECORE_ACCEPT_UNICAST
, &p
->rx_accept_flags
))
1964 /* accept matched ucast */
1967 if (ECORE_TEST_BIT(ECORE_ACCEPT_MULTICAST
, &p
->rx_accept_flags
))
1968 /* accept matched mcast */
1971 if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_UNICAST
, &p
->rx_accept_flags
)) {
1972 /* accept all mcast */
1976 if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_MULTICAST
, &p
->rx_accept_flags
)) {
1977 /* accept all mcast */
1981 if (ECORE_TEST_BIT(ECORE_ACCEPT_BROADCAST
, &p
->rx_accept_flags
))
1982 /* accept (all) bcast */
1984 if (ECORE_TEST_BIT(ECORE_ACCEPT_UNMATCHED
, &p
->rx_accept_flags
))
1985 /* accept unmatched unicasts */
1986 unmatched_unicast
= 1;
1988 mac_filters
->ucast_drop_all
= drop_all_ucast
?
1989 mac_filters
->ucast_drop_all
| mask
:
1990 mac_filters
->ucast_drop_all
& ~mask
;
1992 mac_filters
->mcast_drop_all
= drop_all_mcast
?
1993 mac_filters
->mcast_drop_all
| mask
:
1994 mac_filters
->mcast_drop_all
& ~mask
;
1996 mac_filters
->ucast_accept_all
= accp_all_ucast
?
1997 mac_filters
->ucast_accept_all
| mask
:
1998 mac_filters
->ucast_accept_all
& ~mask
;
2000 mac_filters
->mcast_accept_all
= accp_all_mcast
?
2001 mac_filters
->mcast_accept_all
| mask
:
2002 mac_filters
->mcast_accept_all
& ~mask
;
2004 mac_filters
->bcast_accept_all
= accp_all_bcast
?
2005 mac_filters
->bcast_accept_all
| mask
:
2006 mac_filters
->bcast_accept_all
& ~mask
;
2008 mac_filters
->unmatched_unicast
= unmatched_unicast
?
2009 mac_filters
->unmatched_unicast
| mask
:
2010 mac_filters
->unmatched_unicast
& ~mask
;
2012 ECORE_MSG("drop_ucast 0x%xdrop_mcast 0x%x accp_ucast 0x%x"
2013 "accp_mcast 0x%xaccp_bcast 0x%x",
2014 mac_filters
->ucast_drop_all
, mac_filters
->mcast_drop_all
,
2015 mac_filters
->ucast_accept_all
, mac_filters
->mcast_accept_all
,
2016 mac_filters
->bcast_accept_all
);
2018 /* write the MAC filter structure */
2019 __storm_memset_mac_filters(sc
, mac_filters
, p
->func_id
);
2021 /* The operation is completed */
2022 ECORE_CLEAR_BIT(p
->state
, p
->pstate
);
2023 ECORE_SMP_MB_AFTER_CLEAR_BIT();
2025 return ECORE_SUCCESS
;
2028 /* Setup ramrod data */
2029 static void ecore_rx_mode_set_rdata_hdr_e2(uint32_t cid
, struct eth_classify_header
2030 *hdr
, uint8_t rule_cnt
)
2032 hdr
->echo
= ECORE_CPU_TO_LE32(cid
);
2033 hdr
->rule_cnt
= rule_cnt
;
2036 static void ecore_rx_mode_set_cmd_state_e2(unsigned long *accept_flags
, struct eth_filter_rules_cmd
2037 *cmd
, int clear_accept_all
)
2041 /* start with 'drop-all' */
2042 state
= ETH_FILTER_RULES_CMD_UCAST_DROP_ALL
|
2043 ETH_FILTER_RULES_CMD_MCAST_DROP_ALL
;
2045 if (ECORE_TEST_BIT(ECORE_ACCEPT_UNICAST
, accept_flags
))
2046 state
&= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL
;
2048 if (ECORE_TEST_BIT(ECORE_ACCEPT_MULTICAST
, accept_flags
))
2049 state
&= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL
;
2051 if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_UNICAST
, accept_flags
)) {
2052 state
&= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL
;
2053 state
|= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL
;
2056 if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_MULTICAST
, accept_flags
)) {
2057 state
|= ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL
;
2058 state
&= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL
;
2060 if (ECORE_TEST_BIT(ECORE_ACCEPT_BROADCAST
, accept_flags
))
2061 state
|= ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL
;
2063 if (ECORE_TEST_BIT(ECORE_ACCEPT_UNMATCHED
, accept_flags
)) {
2064 state
&= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL
;
2065 state
|= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED
;
2067 if (ECORE_TEST_BIT(ECORE_ACCEPT_ANY_VLAN
, accept_flags
))
2068 state
|= ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN
;
2070 /* Clear ACCEPT_ALL_XXX flags for FCoE L2 Queue */
2071 if (clear_accept_all
) {
2072 state
&= ~ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL
;
2073 state
&= ~ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL
;
2074 state
&= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL
;
2075 state
&= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED
;
2078 cmd
->state
= ECORE_CPU_TO_LE16(state
);
2081 static int ecore_set_rx_mode_e2(struct bnx2x_softc
*sc
,
2082 struct ecore_rx_mode_ramrod_params
*p
)
2084 struct eth_filter_rules_ramrod_data
*data
= p
->rdata
;
2086 uint8_t rule_idx
= 0;
2088 /* Reset the ramrod data buffer */
2089 ECORE_MEMSET(data
, 0, sizeof(*data
));
2091 /* Setup ramrod data */
2093 /* Tx (internal switching) */
2094 if (ECORE_TEST_BIT(RAMROD_TX
, &p
->ramrod_flags
)) {
2095 data
->rules
[rule_idx
].client_id
= p
->cl_id
;
2096 data
->rules
[rule_idx
].func_id
= p
->func_id
;
2098 data
->rules
[rule_idx
].cmd_general_data
=
2099 ETH_FILTER_RULES_CMD_TX_CMD
;
2101 ecore_rx_mode_set_cmd_state_e2(&p
->tx_accept_flags
,
2102 &(data
->rules
[rule_idx
++]),
2107 if (ECORE_TEST_BIT(RAMROD_RX
, &p
->ramrod_flags
)) {
2108 data
->rules
[rule_idx
].client_id
= p
->cl_id
;
2109 data
->rules
[rule_idx
].func_id
= p
->func_id
;
2111 data
->rules
[rule_idx
].cmd_general_data
=
2112 ETH_FILTER_RULES_CMD_RX_CMD
;
2114 ecore_rx_mode_set_cmd_state_e2(&p
->rx_accept_flags
,
2115 &(data
->rules
[rule_idx
++]),
2119 /* If FCoE Queue configuration has been requested configure the Rx and
2120 * internal switching modes for this queue in separate rules.
2122 * FCoE queue shell never be set to ACCEPT_ALL packets of any sort:
2123 * MCAST_ALL, UCAST_ALL, BCAST_ALL and UNMATCHED.
2125 if (ECORE_TEST_BIT(ECORE_RX_MODE_FCOE_ETH
, &p
->rx_mode_flags
)) {
2126 /* Tx (internal switching) */
2127 if (ECORE_TEST_BIT(RAMROD_TX
, &p
->ramrod_flags
)) {
2128 data
->rules
[rule_idx
].client_id
= ECORE_FCOE_CID(sc
);
2129 data
->rules
[rule_idx
].func_id
= p
->func_id
;
2131 data
->rules
[rule_idx
].cmd_general_data
=
2132 ETH_FILTER_RULES_CMD_TX_CMD
;
2134 ecore_rx_mode_set_cmd_state_e2(&p
->tx_accept_flags
,
2136 [rule_idx
++]), TRUE
);
2140 if (ECORE_TEST_BIT(RAMROD_RX
, &p
->ramrod_flags
)) {
2141 data
->rules
[rule_idx
].client_id
= ECORE_FCOE_CID(sc
);
2142 data
->rules
[rule_idx
].func_id
= p
->func_id
;
2144 data
->rules
[rule_idx
].cmd_general_data
=
2145 ETH_FILTER_RULES_CMD_RX_CMD
;
2147 ecore_rx_mode_set_cmd_state_e2(&p
->rx_accept_flags
,
2149 [rule_idx
++]), TRUE
);
2153 /* Set the ramrod header (most importantly - number of rules to
2156 ecore_rx_mode_set_rdata_hdr_e2(p
->cid
, &data
->header
, rule_idx
);
2159 ("About to configure %d rules, rx_accept_flags 0x%lx, tx_accept_flags 0x%lx",
2160 data
->header
.rule_cnt
, p
->rx_accept_flags
, p
->tx_accept_flags
);
2162 /* No need for an explicit memory barrier here as long we would
2163 * need to ensure the ordering of writing to the SPQ element
2164 * and updating of the SPQ producer which involves a memory
2165 * read and we will have to put a full memory barrier there
2166 * (inside ecore_sp_post()).
2170 rc
= ecore_sp_post(sc
,
2171 RAMROD_CMD_ID_ETH_FILTER_RULES
,
2172 p
->cid
, p
->rdata_mapping
, ETH_CONNECTION_TYPE
);
2176 /* Ramrod completion is pending */
2177 return ECORE_PENDING
;
2180 static int ecore_wait_rx_mode_comp_e2(struct bnx2x_softc
*sc
,
2181 struct ecore_rx_mode_ramrod_params
*p
)
2183 return ecore_state_wait(sc
, p
->state
, p
->pstate
);
2186 static int ecore_empty_rx_mode_wait(__rte_unused
struct bnx2x_softc
*sc
,
2188 ecore_rx_mode_ramrod_params
*p
)
2191 return ECORE_SUCCESS
;
2194 int ecore_config_rx_mode(struct bnx2x_softc
*sc
,
2195 struct ecore_rx_mode_ramrod_params
*p
)
2199 /* Configure the new classification in the chip */
2200 if (p
->rx_mode_obj
->config_rx_mode
) {
2201 rc
= p
->rx_mode_obj
->config_rx_mode(sc
, p
);
2205 /* Wait for a ramrod completion if was requested */
2206 if (ECORE_TEST_BIT(RAMROD_COMP_WAIT
, &p
->ramrod_flags
)) {
2207 rc
= p
->rx_mode_obj
->wait_comp(sc
, p
);
2212 ECORE_MSG("ERROR: config_rx_mode is NULL");
2219 void ecore_init_rx_mode_obj(struct bnx2x_softc
*sc
, struct ecore_rx_mode_obj
*o
)
2221 if (CHIP_IS_E1x(sc
)) {
2222 o
->wait_comp
= ecore_empty_rx_mode_wait
;
2223 o
->config_rx_mode
= ecore_set_rx_mode_e1x
;
2225 o
->wait_comp
= ecore_wait_rx_mode_comp_e2
;
2226 o
->config_rx_mode
= ecore_set_rx_mode_e2
;
2230 /********************* Multicast verbs: SET, CLEAR ****************************/
2231 static uint8_t ecore_mcast_bin_from_mac(uint8_t * mac
)
2233 return (ECORE_CRC32_LE(0, mac
, ETH_ALEN
) >> 24) & 0xff;
2236 struct ecore_mcast_mac_elem
{
2237 ecore_list_entry_t link
;
2238 uint8_t mac
[ETH_ALEN
];
2239 uint8_t pad
[2]; /* For a natural alignment of the following buffer */
2242 struct ecore_pending_mcast_cmd
{
2243 ecore_list_entry_t link
;
2244 int type
; /* ECORE_MCAST_CMD_X */
2246 ecore_list_t macs_head
;
2247 uint32_t macs_num
; /* Needed for DEL command */
2248 int next_bin
; /* Needed for RESTORE flow with aprox match */
2251 int done
; /* set to TRUE, when the command has been handled,
2252 * practically used in 57712 handling only, where one pending
2253 * command may be handled in a few operations. As long as for
2254 * other chips every operation handling is completed in a
2255 * single ramrod, there is no need to utilize this field.
2259 static int ecore_mcast_wait(struct bnx2x_softc
*sc
, struct ecore_mcast_obj
*o
)
2261 if (ecore_state_wait(sc
, o
->sched_state
, o
->raw
.pstate
) ||
2262 o
->raw
.wait_comp(sc
, &o
->raw
))
2263 return ECORE_TIMEOUT
;
2265 return ECORE_SUCCESS
;
2268 static int ecore_mcast_enqueue_cmd(struct bnx2x_softc
*sc __rte_unused
,
2269 struct ecore_mcast_obj
*o
,
2270 struct ecore_mcast_ramrod_params
*p
,
2271 enum ecore_mcast_cmd cmd
)
2274 struct ecore_pending_mcast_cmd
*new_cmd
;
2275 struct ecore_mcast_mac_elem
*cur_mac
= NULL
;
2276 struct ecore_mcast_list_elem
*pos
;
2277 int macs_list_len
= ((cmd
== ECORE_MCAST_CMD_ADD
) ?
2278 p
->mcast_list_len
: 0);
2280 /* If the command is empty ("handle pending commands only"), break */
2281 if (!p
->mcast_list_len
)
2282 return ECORE_SUCCESS
;
2284 total_sz
= sizeof(*new_cmd
) +
2285 macs_list_len
* sizeof(struct ecore_mcast_mac_elem
);
2287 /* Add mcast is called under spin_lock, thus calling with GFP_ATOMIC */
2288 new_cmd
= ECORE_ZALLOC(total_sz
, GFP_ATOMIC
, sc
);
2293 ECORE_MSG("About to enqueue a new %d command. macs_list_len=%d",
2294 cmd
, macs_list_len
);
2296 ECORE_LIST_INIT(&new_cmd
->data
.macs_head
);
2298 new_cmd
->type
= cmd
;
2299 new_cmd
->done
= FALSE
;
2302 case ECORE_MCAST_CMD_ADD
:
2303 cur_mac
= (struct ecore_mcast_mac_elem
*)
2304 ((uint8_t *) new_cmd
+ sizeof(*new_cmd
));
2306 /* Push the MACs of the current command into the pending command
2309 ECORE_LIST_FOR_EACH_ENTRY(pos
, &p
->mcast_list
, link
,
2310 struct ecore_mcast_list_elem
) {
2311 ECORE_MEMCPY(cur_mac
->mac
, pos
->mac
, ETH_ALEN
);
2312 ECORE_LIST_PUSH_TAIL(&cur_mac
->link
,
2313 &new_cmd
->data
.macs_head
);
2319 case ECORE_MCAST_CMD_DEL
:
2320 new_cmd
->data
.macs_num
= p
->mcast_list_len
;
2323 case ECORE_MCAST_CMD_RESTORE
:
2324 new_cmd
->data
.next_bin
= 0;
2328 ECORE_FREE(sc
, new_cmd
, total_sz
);
2329 PMD_DRV_LOG(ERR
, "Unknown command: %d", cmd
);
2333 /* Push the new pending command to the tail of the pending list: FIFO */
2334 ECORE_LIST_PUSH_TAIL(&new_cmd
->link
, &o
->pending_cmds_head
);
2338 return ECORE_PENDING
;
2342 * ecore_mcast_get_next_bin - get the next set bin (index)
2345 * @last: index to start looking from (including)
2347 * returns the next found (set) bin or a negative value if none is found.
2349 static int ecore_mcast_get_next_bin(struct ecore_mcast_obj
*o
, int last
)
2351 int i
, j
, inner_start
= last
% BIT_VEC64_ELEM_SZ
;
2353 for (i
= last
/ BIT_VEC64_ELEM_SZ
; i
< ECORE_MCAST_VEC_SZ
; i
++) {
2354 if (o
->registry
.aprox_match
.vec
[i
])
2355 for (j
= inner_start
; j
< BIT_VEC64_ELEM_SZ
; j
++) {
2356 int cur_bit
= j
+ BIT_VEC64_ELEM_SZ
* i
;
2357 if (BIT_VEC64_TEST_BIT
2358 (o
->registry
.aprox_match
.vec
, cur_bit
)) {
2370 * ecore_mcast_clear_first_bin - find the first set bin and clear it
2374 * returns the index of the found bin or -1 if none is found
2376 static int ecore_mcast_clear_first_bin(struct ecore_mcast_obj
*o
)
2378 int cur_bit
= ecore_mcast_get_next_bin(o
, 0);
2381 BIT_VEC64_CLEAR_BIT(o
->registry
.aprox_match
.vec
, cur_bit
);
2386 static uint8_t ecore_mcast_get_rx_tx_flag(struct ecore_mcast_obj
*o
)
2388 struct ecore_raw_obj
*raw
= &o
->raw
;
2389 uint8_t rx_tx_flag
= 0;
2391 if ((raw
->obj_type
== ECORE_OBJ_TYPE_TX
) ||
2392 (raw
->obj_type
== ECORE_OBJ_TYPE_RX_TX
))
2393 rx_tx_flag
|= ETH_MULTICAST_RULES_CMD_TX_CMD
;
2395 if ((raw
->obj_type
== ECORE_OBJ_TYPE_RX
) ||
2396 (raw
->obj_type
== ECORE_OBJ_TYPE_RX_TX
))
2397 rx_tx_flag
|= ETH_MULTICAST_RULES_CMD_RX_CMD
;
2402 static void ecore_mcast_set_one_rule_e2(struct bnx2x_softc
*sc __rte_unused
,
2403 struct ecore_mcast_obj
*o
, int idx
,
2404 union ecore_mcast_config_data
*cfg_data
,
2405 enum ecore_mcast_cmd cmd
)
2407 struct ecore_raw_obj
*r
= &o
->raw
;
2408 struct eth_multicast_rules_ramrod_data
*data
=
2409 (struct eth_multicast_rules_ramrod_data
*)(r
->rdata
);
2410 uint8_t func_id
= r
->func_id
;
2411 uint8_t rx_tx_add_flag
= ecore_mcast_get_rx_tx_flag(o
);
2414 if ((cmd
== ECORE_MCAST_CMD_ADD
) || (cmd
== ECORE_MCAST_CMD_RESTORE
))
2415 rx_tx_add_flag
|= ETH_MULTICAST_RULES_CMD_IS_ADD
;
2417 data
->rules
[idx
].cmd_general_data
|= rx_tx_add_flag
;
2419 /* Get a bin and update a bins' vector */
2421 case ECORE_MCAST_CMD_ADD
:
2422 bin
= ecore_mcast_bin_from_mac(cfg_data
->mac
);
2423 BIT_VEC64_SET_BIT(o
->registry
.aprox_match
.vec
, bin
);
2426 case ECORE_MCAST_CMD_DEL
:
2427 /* If there were no more bins to clear
2428 * (ecore_mcast_clear_first_bin() returns -1) then we would
2429 * clear any (0xff) bin.
2430 * See ecore_mcast_validate_e2() for explanation when it may
2433 bin
= ecore_mcast_clear_first_bin(o
);
2436 case ECORE_MCAST_CMD_RESTORE
:
2437 bin
= cfg_data
->bin
;
2441 PMD_DRV_LOG(ERR
, "Unknown command: %d", cmd
);
2445 ECORE_MSG("%s bin %d",
2446 ((rx_tx_add_flag
& ETH_MULTICAST_RULES_CMD_IS_ADD
) ?
2447 "Setting" : "Clearing"), bin
);
2449 data
->rules
[idx
].bin_id
= (uint8_t) bin
;
2450 data
->rules
[idx
].func_id
= func_id
;
2451 data
->rules
[idx
].engine_id
= o
->engine_id
;
2455 * ecore_mcast_handle_restore_cmd_e2 - restore configuration from the registry
2457 * @sc: device handle
2459 * @start_bin: index in the registry to start from (including)
2460 * @rdata_idx: index in the ramrod data to start from
2462 * returns last handled bin index or -1 if all bins have been handled
2464 static int ecore_mcast_handle_restore_cmd_e2(struct bnx2x_softc
*sc
,
2465 struct ecore_mcast_obj
*o
,
2466 int start_bin
, int *rdata_idx
)
2468 int cur_bin
, cnt
= *rdata_idx
;
2469 union ecore_mcast_config_data cfg_data
= { NULL
};
2471 /* go through the registry and configure the bins from it */
2472 for (cur_bin
= ecore_mcast_get_next_bin(o
, start_bin
); cur_bin
>= 0;
2473 cur_bin
= ecore_mcast_get_next_bin(o
, cur_bin
+ 1)) {
2475 cfg_data
.bin
= (uint8_t) cur_bin
;
2476 o
->set_one_rule(sc
, o
, cnt
, &cfg_data
, ECORE_MCAST_CMD_RESTORE
);
2480 ECORE_MSG("About to configure a bin %d", cur_bin
);
2482 /* Break if we reached the maximum number
2485 if (cnt
>= o
->max_cmd_len
)
2494 static void ecore_mcast_hdl_pending_add_e2(struct bnx2x_softc
*sc
,
2495 struct ecore_mcast_obj
*o
,
2496 struct ecore_pending_mcast_cmd
2497 *cmd_pos
, int *line_idx
)
2499 struct ecore_mcast_mac_elem
*pmac_pos
, *pmac_pos_n
;
2500 int cnt
= *line_idx
;
2501 union ecore_mcast_config_data cfg_data
= { NULL
};
2503 ECORE_LIST_FOR_EACH_ENTRY_SAFE(pmac_pos
, pmac_pos_n
,
2504 &cmd_pos
->data
.macs_head
, link
,
2505 struct ecore_mcast_mac_elem
) {
2507 cfg_data
.mac
= &pmac_pos
->mac
[0];
2508 o
->set_one_rule(sc
, o
, cnt
, &cfg_data
, cmd_pos
->type
);
2513 ("About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC",
2514 pmac_pos
->mac
[0], pmac_pos
->mac
[1], pmac_pos
->mac
[2],
2515 pmac_pos
->mac
[3], pmac_pos
->mac
[4], pmac_pos
->mac
[5]);
2517 ECORE_LIST_REMOVE_ENTRY(&pmac_pos
->link
,
2518 &cmd_pos
->data
.macs_head
);
2520 /* Break if we reached the maximum number
2523 if (cnt
>= o
->max_cmd_len
)
2529 /* if no more MACs to configure - we are done */
2530 if (ECORE_LIST_IS_EMPTY(&cmd_pos
->data
.macs_head
))
2531 cmd_pos
->done
= TRUE
;
2534 static void ecore_mcast_hdl_pending_del_e2(struct bnx2x_softc
*sc
,
2535 struct ecore_mcast_obj
*o
,
2536 struct ecore_pending_mcast_cmd
2537 *cmd_pos
, int *line_idx
)
2539 int cnt
= *line_idx
;
2541 while (cmd_pos
->data
.macs_num
) {
2542 o
->set_one_rule(sc
, o
, cnt
, NULL
, cmd_pos
->type
);
2546 cmd_pos
->data
.macs_num
--;
2548 ECORE_MSG("Deleting MAC. %d left,cnt is %d",
2549 cmd_pos
->data
.macs_num
, cnt
);
2551 /* Break if we reached the maximum
2554 if (cnt
>= o
->max_cmd_len
)
2560 /* If we cleared all bins - we are done */
2561 if (!cmd_pos
->data
.macs_num
)
2562 cmd_pos
->done
= TRUE
;
2565 static void ecore_mcast_hdl_pending_restore_e2(struct bnx2x_softc
*sc
,
2566 struct ecore_mcast_obj
*o
, struct
2567 ecore_pending_mcast_cmd
2568 *cmd_pos
, int *line_idx
)
2570 cmd_pos
->data
.next_bin
= o
->hdl_restore(sc
, o
, cmd_pos
->data
.next_bin
,
2573 if (cmd_pos
->data
.next_bin
< 0)
2574 /* If o->set_restore returned -1 we are done */
2575 cmd_pos
->done
= TRUE
;
2577 /* Start from the next bin next time */
2578 cmd_pos
->data
.next_bin
++;
2581 static int ecore_mcast_handle_pending_cmds_e2(struct bnx2x_softc
*sc
, struct
2582 ecore_mcast_ramrod_params
2585 struct ecore_pending_mcast_cmd
*cmd_pos
, *cmd_pos_n
;
2587 struct ecore_mcast_obj
*o
= p
->mcast_obj
;
2589 ECORE_LIST_FOR_EACH_ENTRY_SAFE(cmd_pos
, cmd_pos_n
,
2590 &o
->pending_cmds_head
, link
,
2591 struct ecore_pending_mcast_cmd
) {
2592 switch (cmd_pos
->type
) {
2593 case ECORE_MCAST_CMD_ADD
:
2594 ecore_mcast_hdl_pending_add_e2(sc
, o
, cmd_pos
, &cnt
);
2597 case ECORE_MCAST_CMD_DEL
:
2598 ecore_mcast_hdl_pending_del_e2(sc
, o
, cmd_pos
, &cnt
);
2601 case ECORE_MCAST_CMD_RESTORE
:
2602 ecore_mcast_hdl_pending_restore_e2(sc
, o
, cmd_pos
,
2607 PMD_DRV_LOG(ERR
, "Unknown command: %d", cmd_pos
->type
);
2611 /* If the command has been completed - remove it from the list
2612 * and free the memory
2614 if (cmd_pos
->done
) {
2615 ECORE_LIST_REMOVE_ENTRY(&cmd_pos
->link
,
2616 &o
->pending_cmds_head
);
2617 ECORE_FREE(sc
, cmd_pos
, cmd_pos
->alloc_len
);
2620 /* Break if we reached the maximum number of rules */
2621 if (cnt
>= o
->max_cmd_len
)
2628 static void ecore_mcast_hdl_add(struct bnx2x_softc
*sc
,
2629 struct ecore_mcast_obj
*o
,
2630 struct ecore_mcast_ramrod_params
*p
,
2633 struct ecore_mcast_list_elem
*mlist_pos
;
2634 union ecore_mcast_config_data cfg_data
= { NULL
};
2635 int cnt
= *line_idx
;
2637 ECORE_LIST_FOR_EACH_ENTRY(mlist_pos
, &p
->mcast_list
, link
,
2638 struct ecore_mcast_list_elem
) {
2639 cfg_data
.mac
= mlist_pos
->mac
;
2640 o
->set_one_rule(sc
, o
, cnt
, &cfg_data
, ECORE_MCAST_CMD_ADD
);
2645 ("About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC",
2646 mlist_pos
->mac
[0], mlist_pos
->mac
[1], mlist_pos
->mac
[2],
2647 mlist_pos
->mac
[3], mlist_pos
->mac
[4], mlist_pos
->mac
[5]);
2653 static void ecore_mcast_hdl_del(struct bnx2x_softc
*sc
,
2654 struct ecore_mcast_obj
*o
,
2655 struct ecore_mcast_ramrod_params
*p
,
2658 int cnt
= *line_idx
, i
;
2660 for (i
= 0; i
< p
->mcast_list_len
; i
++) {
2661 o
->set_one_rule(sc
, o
, cnt
, NULL
, ECORE_MCAST_CMD_DEL
);
2665 ECORE_MSG("Deleting MAC. %d left", p
->mcast_list_len
- i
- 1);
2672 * ecore_mcast_handle_current_cmd -
2674 * @sc: device handle
2677 * @start_cnt: first line in the ramrod data that may be used
2679 * This function is called iff there is enough place for the current command in
2681 * Returns number of lines filled in the ramrod data in total.
2683 static int ecore_mcast_handle_current_cmd(struct bnx2x_softc
*sc
, struct
2684 ecore_mcast_ramrod_params
*p
,
2685 enum ecore_mcast_cmd cmd
,
2688 struct ecore_mcast_obj
*o
= p
->mcast_obj
;
2689 int cnt
= start_cnt
;
2691 ECORE_MSG("p->mcast_list_len=%d", p
->mcast_list_len
);
2694 case ECORE_MCAST_CMD_ADD
:
2695 ecore_mcast_hdl_add(sc
, o
, p
, &cnt
);
2698 case ECORE_MCAST_CMD_DEL
:
2699 ecore_mcast_hdl_del(sc
, o
, p
, &cnt
);
2702 case ECORE_MCAST_CMD_RESTORE
:
2703 o
->hdl_restore(sc
, o
, 0, &cnt
);
2707 PMD_DRV_LOG(ERR
, "Unknown command: %d", cmd
);
2711 /* The current command has been handled */
2712 p
->mcast_list_len
= 0;
2717 static int ecore_mcast_validate_e2(__rte_unused
struct bnx2x_softc
*sc
,
2718 struct ecore_mcast_ramrod_params
*p
,
2719 enum ecore_mcast_cmd cmd
)
2721 struct ecore_mcast_obj
*o
= p
->mcast_obj
;
2722 int reg_sz
= o
->get_registry_size(o
);
2725 /* DEL command deletes all currently configured MACs */
2726 case ECORE_MCAST_CMD_DEL
:
2727 o
->set_registry_size(o
, 0);
2730 /* RESTORE command will restore the entire multicast configuration */
2731 case ECORE_MCAST_CMD_RESTORE
:
2732 /* Here we set the approximate amount of work to do, which in
2733 * fact may be only less as some MACs in postponed ADD
2734 * command(s) scheduled before this command may fall into
2735 * the same bin and the actual number of bins set in the
2736 * registry would be less than we estimated here. See
2737 * ecore_mcast_set_one_rule_e2() for further details.
2739 p
->mcast_list_len
= reg_sz
;
2742 case ECORE_MCAST_CMD_ADD
:
2743 case ECORE_MCAST_CMD_CONT
:
2744 /* Here we assume that all new MACs will fall into new bins.
2745 * However we will correct the real registry size after we
2746 * handle all pending commands.
2748 o
->set_registry_size(o
, reg_sz
+ p
->mcast_list_len
);
2752 PMD_DRV_LOG(ERR
, "Unknown command: %d", cmd
);
2756 /* Increase the total number of MACs pending to be configured */
2757 o
->total_pending_num
+= p
->mcast_list_len
;
2759 return ECORE_SUCCESS
;
2762 static void ecore_mcast_revert_e2(__rte_unused
struct bnx2x_softc
*sc
,
2763 struct ecore_mcast_ramrod_params
*p
,
2766 struct ecore_mcast_obj
*o
= p
->mcast_obj
;
2768 o
->set_registry_size(o
, old_num_bins
);
2769 o
->total_pending_num
-= p
->mcast_list_len
;
2773 * ecore_mcast_set_rdata_hdr_e2 - sets a header values
2775 * @sc: device handle
2777 * @len: number of rules to handle
2779 static void ecore_mcast_set_rdata_hdr_e2(__rte_unused
struct bnx2x_softc
2780 *sc
, struct ecore_mcast_ramrod_params
2783 struct ecore_raw_obj
*r
= &p
->mcast_obj
->raw
;
2784 struct eth_multicast_rules_ramrod_data
*data
=
2785 (struct eth_multicast_rules_ramrod_data
*)(r
->rdata
);
2787 data
->header
.echo
= ECORE_CPU_TO_LE32((r
->cid
& ECORE_SWCID_MASK
) |
2788 (ECORE_FILTER_MCAST_PENDING
<<
2789 ECORE_SWCID_SHIFT
));
2790 data
->header
.rule_cnt
= len
;
2794 * ecore_mcast_refresh_registry_e2 - recalculate the actual number of set bins
2796 * @sc: device handle
2799 * Recalculate the actual number of set bins in the registry using Brian
2800 * Kernighan's algorithm: it's execution complexity is as a number of set bins.
2802 static int ecore_mcast_refresh_registry_e2(struct ecore_mcast_obj
*o
)
2807 for (i
= 0; i
< ECORE_MCAST_VEC_SZ
; i
++) {
2808 elem
= o
->registry
.aprox_match
.vec
[i
];
2813 o
->set_registry_size(o
, cnt
);
2815 return ECORE_SUCCESS
;
2818 static int ecore_mcast_setup_e2(struct bnx2x_softc
*sc
,
2819 struct ecore_mcast_ramrod_params
*p
,
2820 enum ecore_mcast_cmd cmd
)
2822 struct ecore_raw_obj
*raw
= &p
->mcast_obj
->raw
;
2823 struct ecore_mcast_obj
*o
= p
->mcast_obj
;
2824 struct eth_multicast_rules_ramrod_data
*data
=
2825 (struct eth_multicast_rules_ramrod_data
*)(raw
->rdata
);
2828 /* Reset the ramrod data buffer */
2829 ECORE_MEMSET(data
, 0, sizeof(*data
));
2831 cnt
= ecore_mcast_handle_pending_cmds_e2(sc
, p
);
2833 /* If there are no more pending commands - clear SCHEDULED state */
2834 if (ECORE_LIST_IS_EMPTY(&o
->pending_cmds_head
))
2837 /* The below may be TRUE iff there was enough room in ramrod
2838 * data for all pending commands and for the current
2839 * command. Otherwise the current command would have been added
2840 * to the pending commands and p->mcast_list_len would have been
2843 if (p
->mcast_list_len
> 0)
2844 cnt
= ecore_mcast_handle_current_cmd(sc
, p
, cmd
, cnt
);
2846 /* We've pulled out some MACs - update the total number of
2849 o
->total_pending_num
-= cnt
;
2852 ECORE_DBG_BREAK_IF(o
->total_pending_num
< 0);
2853 ECORE_DBG_BREAK_IF(cnt
> o
->max_cmd_len
);
2855 ecore_mcast_set_rdata_hdr_e2(sc
, p
, (uint8_t) cnt
);
2857 /* Update a registry size if there are no more pending operations.
2859 * We don't want to change the value of the registry size if there are
2860 * pending operations because we want it to always be equal to the
2861 * exact or the approximate number (see ecore_mcast_validate_e2()) of
2862 * set bins after the last requested operation in order to properly
2863 * evaluate the size of the next DEL/RESTORE operation.
2865 * Note that we update the registry itself during command(s) handling
2866 * - see ecore_mcast_set_one_rule_e2(). That's because for 57712 we
2867 * aggregate multiple commands (ADD/DEL/RESTORE) into one ramrod but
2868 * with a limited amount of update commands (per MAC/bin) and we don't
2869 * know in this scope what the actual state of bins configuration is
2870 * going to be after this ramrod.
2872 if (!o
->total_pending_num
)
2873 ecore_mcast_refresh_registry_e2(o
);
2875 /* If CLEAR_ONLY was requested - don't send a ramrod and clear
2876 * RAMROD_PENDING status immediately.
2878 if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY
, &p
->ramrod_flags
)) {
2879 raw
->clear_pending(raw
);
2880 return ECORE_SUCCESS
;
2882 /* No need for an explicit memory barrier here as long we would
2883 * need to ensure the ordering of writing to the SPQ element
2884 * and updating of the SPQ producer which involves a memory
2885 * read and we will have to put a full memory barrier there
2886 * (inside ecore_sp_post()).
2890 rc
= ecore_sp_post(sc
,
2891 RAMROD_CMD_ID_ETH_MULTICAST_RULES
,
2893 raw
->rdata_mapping
, ETH_CONNECTION_TYPE
);
2897 /* Ramrod completion is pending */
2898 return ECORE_PENDING
;
2902 static int ecore_mcast_validate_e1h(__rte_unused
struct bnx2x_softc
*sc
,
2903 struct ecore_mcast_ramrod_params
*p
,
2904 enum ecore_mcast_cmd cmd
)
2906 /* Mark, that there is a work to do */
2907 if ((cmd
== ECORE_MCAST_CMD_DEL
) || (cmd
== ECORE_MCAST_CMD_RESTORE
))
2908 p
->mcast_list_len
= 1;
2910 return ECORE_SUCCESS
;
2913 static void ecore_mcast_revert_e1h(__rte_unused
struct bnx2x_softc
*sc
,
2914 __rte_unused
struct ecore_mcast_ramrod_params
2915 *p
, __rte_unused
int old_num_bins
)
2920 #define ECORE_57711_SET_MC_FILTER(filter, bit) \
2922 (filter)[(bit) >> 5] |= (1 << ((bit) & 0x1f)); \
2925 static void ecore_mcast_hdl_add_e1h(struct bnx2x_softc
*sc __rte_unused
,
2926 struct ecore_mcast_obj
*o
,
2927 struct ecore_mcast_ramrod_params
*p
,
2928 uint32_t * mc_filter
)
2930 struct ecore_mcast_list_elem
*mlist_pos
;
2933 ECORE_LIST_FOR_EACH_ENTRY(mlist_pos
, &p
->mcast_list
, link
,
2934 struct ecore_mcast_list_elem
) {
2935 bit
= ecore_mcast_bin_from_mac(mlist_pos
->mac
);
2936 ECORE_57711_SET_MC_FILTER(mc_filter
, bit
);
2939 ("About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC, bin %d",
2940 mlist_pos
->mac
[0], mlist_pos
->mac
[1], mlist_pos
->mac
[2],
2941 mlist_pos
->mac
[3], mlist_pos
->mac
[4], mlist_pos
->mac
[5],
2944 /* bookkeeping... */
2945 BIT_VEC64_SET_BIT(o
->registry
.aprox_match
.vec
, bit
);
2949 static void ecore_mcast_hdl_restore_e1h(struct bnx2x_softc
*sc
2951 struct ecore_mcast_obj
*o
,
2952 uint32_t * mc_filter
)
2956 for (bit
= ecore_mcast_get_next_bin(o
, 0);
2957 bit
>= 0; bit
= ecore_mcast_get_next_bin(o
, bit
+ 1)) {
2958 ECORE_57711_SET_MC_FILTER(mc_filter
, bit
);
2959 ECORE_MSG("About to set bin %d", bit
);
2963 /* On 57711 we write the multicast MACs' approximate match
2964 * table by directly into the TSTORM's internal RAM. So we don't
2965 * really need to handle any tricks to make it work.
2967 static int ecore_mcast_setup_e1h(struct bnx2x_softc
*sc
,
2968 struct ecore_mcast_ramrod_params
*p
,
2969 enum ecore_mcast_cmd cmd
)
2972 struct ecore_mcast_obj
*o
= p
->mcast_obj
;
2973 struct ecore_raw_obj
*r
= &o
->raw
;
2975 /* If CLEAR_ONLY has been requested - clear the registry
2976 * and clear a pending bit.
2978 if (!ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY
, &p
->ramrod_flags
)) {
2979 uint32_t mc_filter
[ECORE_MC_HASH_SIZE
] = { 0 };
2981 /* Set the multicast filter bits before writing it into
2982 * the internal memory.
2985 case ECORE_MCAST_CMD_ADD
:
2986 ecore_mcast_hdl_add_e1h(sc
, o
, p
, mc_filter
);
2989 case ECORE_MCAST_CMD_DEL
:
2990 ECORE_MSG("Invalidating multicast MACs configuration");
2992 /* clear the registry */
2993 ECORE_MEMSET(o
->registry
.aprox_match
.vec
, 0,
2994 sizeof(o
->registry
.aprox_match
.vec
));
2997 case ECORE_MCAST_CMD_RESTORE
:
2998 ecore_mcast_hdl_restore_e1h(sc
, o
, mc_filter
);
3002 PMD_DRV_LOG(ERR
, "Unknown command: %d", cmd
);
3006 /* Set the mcast filter in the internal memory */
3007 for (i
= 0; i
< ECORE_MC_HASH_SIZE
; i
++)
3008 REG_WR(sc
, ECORE_MC_HASH_OFFSET(sc
, i
), mc_filter
[i
]);
3010 /* clear the registry */
3011 ECORE_MEMSET(o
->registry
.aprox_match
.vec
, 0,
3012 sizeof(o
->registry
.aprox_match
.vec
));
3015 r
->clear_pending(r
);
3017 return ECORE_SUCCESS
;
3020 static int ecore_mcast_get_registry_size_aprox(struct ecore_mcast_obj
*o
)
3022 return o
->registry
.aprox_match
.num_bins_set
;
3025 static void ecore_mcast_set_registry_size_aprox(struct ecore_mcast_obj
*o
,
3028 o
->registry
.aprox_match
.num_bins_set
= n
;
3031 int ecore_config_mcast(struct bnx2x_softc
*sc
,
3032 struct ecore_mcast_ramrod_params
*p
,
3033 enum ecore_mcast_cmd cmd
)
3035 struct ecore_mcast_obj
*o
= p
->mcast_obj
;
3036 struct ecore_raw_obj
*r
= &o
->raw
;
3037 int rc
= 0, old_reg_size
;
3039 /* This is needed to recover number of currently configured mcast macs
3040 * in case of failure.
3042 old_reg_size
= o
->get_registry_size(o
);
3044 /* Do some calculations and checks */
3045 rc
= o
->validate(sc
, p
, cmd
);
3049 /* Return if there is no work to do */
3050 if ((!p
->mcast_list_len
) && (!o
->check_sched(o
)))
3051 return ECORE_SUCCESS
;
3054 ("o->total_pending_num=%d p->mcast_list_len=%d o->max_cmd_len=%d",
3055 o
->total_pending_num
, p
->mcast_list_len
, o
->max_cmd_len
);
3057 /* Enqueue the current command to the pending list if we can't complete
3058 * it in the current iteration
3060 if (r
->check_pending(r
) ||
3061 ((o
->max_cmd_len
> 0) && (o
->total_pending_num
> o
->max_cmd_len
))) {
3062 rc
= o
->enqueue_cmd(sc
, p
->mcast_obj
, p
, cmd
);
3066 /* As long as the current command is in a command list we
3067 * don't need to handle it separately.
3069 p
->mcast_list_len
= 0;
3072 if (!r
->check_pending(r
)) {
3074 /* Set 'pending' state */
3077 /* Configure the new classification in the chip */
3078 rc
= o
->config_mcast(sc
, p
, cmd
);
3082 /* Wait for a ramrod completion if was requested */
3083 if (ECORE_TEST_BIT(RAMROD_COMP_WAIT
, &p
->ramrod_flags
))
3084 rc
= o
->wait_comp(sc
, o
);
3090 r
->clear_pending(r
);
3093 o
->revert(sc
, p
, old_reg_size
);
3098 static void ecore_mcast_clear_sched(struct ecore_mcast_obj
*o
)
3100 ECORE_SMP_MB_BEFORE_CLEAR_BIT();
3101 ECORE_CLEAR_BIT(o
->sched_state
, o
->raw
.pstate
);
3102 ECORE_SMP_MB_AFTER_CLEAR_BIT();
3105 static void ecore_mcast_set_sched(struct ecore_mcast_obj
*o
)
3107 ECORE_SMP_MB_BEFORE_CLEAR_BIT();
3108 ECORE_SET_BIT(o
->sched_state
, o
->raw
.pstate
);
3109 ECORE_SMP_MB_AFTER_CLEAR_BIT();
3112 static int ecore_mcast_check_sched(struct ecore_mcast_obj
*o
)
3114 return ! !ECORE_TEST_BIT(o
->sched_state
, o
->raw
.pstate
);
3117 static int ecore_mcast_check_pending(struct ecore_mcast_obj
*o
)
3119 return o
->raw
.check_pending(&o
->raw
) || o
->check_sched(o
);
3122 void ecore_init_mcast_obj(struct bnx2x_softc
*sc
,
3123 struct ecore_mcast_obj
*mcast_obj
,
3124 uint8_t mcast_cl_id
, uint32_t mcast_cid
,
3125 uint8_t func_id
, uint8_t engine_id
, void *rdata
,
3126 ecore_dma_addr_t rdata_mapping
, int state
,
3127 unsigned long *pstate
, ecore_obj_type type
)
3129 ECORE_MEMSET(mcast_obj
, 0, sizeof(*mcast_obj
));
3131 ecore_init_raw_obj(&mcast_obj
->raw
, mcast_cl_id
, mcast_cid
, func_id
,
3132 rdata
, rdata_mapping
, state
, pstate
, type
);
3134 mcast_obj
->engine_id
= engine_id
;
3136 ECORE_LIST_INIT(&mcast_obj
->pending_cmds_head
);
3138 mcast_obj
->sched_state
= ECORE_FILTER_MCAST_SCHED
;
3139 mcast_obj
->check_sched
= ecore_mcast_check_sched
;
3140 mcast_obj
->set_sched
= ecore_mcast_set_sched
;
3141 mcast_obj
->clear_sched
= ecore_mcast_clear_sched
;
3143 if (CHIP_IS_E1H(sc
)) {
3144 mcast_obj
->config_mcast
= ecore_mcast_setup_e1h
;
3145 mcast_obj
->enqueue_cmd
= NULL
;
3146 mcast_obj
->hdl_restore
= NULL
;
3147 mcast_obj
->check_pending
= ecore_mcast_check_pending
;
3149 /* 57711 doesn't send a ramrod, so it has unlimited credit
3152 mcast_obj
->max_cmd_len
= -1;
3153 mcast_obj
->wait_comp
= ecore_mcast_wait
;
3154 mcast_obj
->set_one_rule
= NULL
;
3155 mcast_obj
->validate
= ecore_mcast_validate_e1h
;
3156 mcast_obj
->revert
= ecore_mcast_revert_e1h
;
3157 mcast_obj
->get_registry_size
=
3158 ecore_mcast_get_registry_size_aprox
;
3159 mcast_obj
->set_registry_size
=
3160 ecore_mcast_set_registry_size_aprox
;
3162 mcast_obj
->config_mcast
= ecore_mcast_setup_e2
;
3163 mcast_obj
->enqueue_cmd
= ecore_mcast_enqueue_cmd
;
3164 mcast_obj
->hdl_restore
= ecore_mcast_handle_restore_cmd_e2
;
3165 mcast_obj
->check_pending
= ecore_mcast_check_pending
;
3166 mcast_obj
->max_cmd_len
= 16;
3167 mcast_obj
->wait_comp
= ecore_mcast_wait
;
3168 mcast_obj
->set_one_rule
= ecore_mcast_set_one_rule_e2
;
3169 mcast_obj
->validate
= ecore_mcast_validate_e2
;
3170 mcast_obj
->revert
= ecore_mcast_revert_e2
;
3171 mcast_obj
->get_registry_size
=
3172 ecore_mcast_get_registry_size_aprox
;
3173 mcast_obj
->set_registry_size
=
3174 ecore_mcast_set_registry_size_aprox
;
3178 /*************************** Credit handling **********************************/
3181 * atomic_add_ifless - add if the result is less than a given value.
3183 * @v: pointer of type ecore_atomic_t
3184 * @a: the amount to add to v...
3185 * @u: ...if (v + a) is less than u.
3187 * returns TRUE if (v + a) was less than u, and FALSE otherwise.
3190 static int __atomic_add_ifless(ecore_atomic_t
* v
, int a
, int u
)
3194 c
= ECORE_ATOMIC_READ(v
);
3196 if (ECORE_UNLIKELY(c
+ a
>= u
))
3199 old
= ECORE_ATOMIC_CMPXCHG((v
), c
, c
+ a
);
3200 if (ECORE_LIKELY(old
== c
))
3209 * atomic_dec_ifmoe - dec if the result is more or equal than a given value.
3211 * @v: pointer of type ecore_atomic_t
3212 * @a: the amount to dec from v...
3213 * @u: ...if (v - a) is more or equal than u.
3215 * returns TRUE if (v - a) was more or equal than u, and FALSE
3218 static int __atomic_dec_ifmoe(ecore_atomic_t
* v
, int a
, int u
)
3222 c
= ECORE_ATOMIC_READ(v
);
3224 if (ECORE_UNLIKELY(c
- a
< u
))
3227 old
= ECORE_ATOMIC_CMPXCHG((v
), c
, c
- a
);
3228 if (ECORE_LIKELY(old
== c
))
3236 static int ecore_credit_pool_get(struct ecore_credit_pool_obj
*o
, int cnt
)
3241 rc
= __atomic_dec_ifmoe(&o
->credit
, cnt
, 0);
3247 static int ecore_credit_pool_put(struct ecore_credit_pool_obj
*o
, int cnt
)
3253 /* Don't let to refill if credit + cnt > pool_sz */
3254 rc
= __atomic_add_ifless(&o
->credit
, cnt
, o
->pool_sz
+ 1);
3261 static int ecore_credit_pool_check(struct ecore_credit_pool_obj
*o
)
3266 cur_credit
= ECORE_ATOMIC_READ(&o
->credit
);
3271 static int ecore_credit_pool_always_TRUE(__rte_unused
struct
3272 ecore_credit_pool_obj
*o
,
3273 __rte_unused
int cnt
)
3278 static int ecore_credit_pool_get_entry(struct ecore_credit_pool_obj
*o
,
3285 /* Find "internal cam-offset" then add to base for this object... */
3286 for (vec
= 0; vec
< ECORE_POOL_VEC_SIZE
; vec
++) {
3288 /* Skip the current vector if there are no free entries in it */
3289 if (!o
->pool_mirror
[vec
])
3292 /* If we've got here we are going to find a free entry */
3293 for (idx
= vec
* BIT_VEC64_ELEM_SZ
, i
= 0;
3294 i
< BIT_VEC64_ELEM_SZ
; idx
++, i
++)
3296 if (BIT_VEC64_TEST_BIT(o
->pool_mirror
, idx
)) {
3298 BIT_VEC64_CLEAR_BIT(o
->pool_mirror
, idx
);
3299 *offset
= o
->base_pool_offset
+ idx
;
3307 static int ecore_credit_pool_put_entry(struct ecore_credit_pool_obj
*o
,
3310 if (offset
< o
->base_pool_offset
)
3313 offset
-= o
->base_pool_offset
;
3315 if (offset
>= o
->pool_sz
)
3318 /* Return the entry to the pool */
3319 BIT_VEC64_SET_BIT(o
->pool_mirror
, offset
);
3324 static int ecore_credit_pool_put_entry_always_TRUE(__rte_unused
struct
3325 ecore_credit_pool_obj
*o
,
3326 __rte_unused
int offset
)
3331 static int ecore_credit_pool_get_entry_always_TRUE(__rte_unused
struct
3332 ecore_credit_pool_obj
*o
,
3333 __rte_unused
int *offset
)
3340 * ecore_init_credit_pool - initialize credit pool internals.
3343 * @base: Base entry in the CAM to use.
3344 * @credit: pool size.
3346 * If base is negative no CAM entries handling will be performed.
3347 * If credit is negative pool operations will always succeed (unlimited pool).
3350 static void ecore_init_credit_pool(struct ecore_credit_pool_obj
*p
,
3351 int base
, int credit
)
3353 /* Zero the object first */
3354 ECORE_MEMSET(p
, 0, sizeof(*p
));
3356 /* Set the table to all 1s */
3357 ECORE_MEMSET(&p
->pool_mirror
, 0xff, sizeof(p
->pool_mirror
));
3359 /* Init a pool as full */
3360 ECORE_ATOMIC_SET(&p
->credit
, credit
);
3362 /* The total poll size */
3363 p
->pool_sz
= credit
;
3365 p
->base_pool_offset
= base
;
3367 /* Commit the change */
3370 p
->check
= ecore_credit_pool_check
;
3372 /* if pool credit is negative - disable the checks */
3374 p
->put
= ecore_credit_pool_put
;
3375 p
->get
= ecore_credit_pool_get
;
3376 p
->put_entry
= ecore_credit_pool_put_entry
;
3377 p
->get_entry
= ecore_credit_pool_get_entry
;
3379 p
->put
= ecore_credit_pool_always_TRUE
;
3380 p
->get
= ecore_credit_pool_always_TRUE
;
3381 p
->put_entry
= ecore_credit_pool_put_entry_always_TRUE
;
3382 p
->get_entry
= ecore_credit_pool_get_entry_always_TRUE
;
3385 /* If base is negative - disable entries handling */
3387 p
->put_entry
= ecore_credit_pool_put_entry_always_TRUE
;
3388 p
->get_entry
= ecore_credit_pool_get_entry_always_TRUE
;
3392 void ecore_init_mac_credit_pool(struct bnx2x_softc
*sc
,
3393 struct ecore_credit_pool_obj
*p
,
3394 uint8_t func_id
, uint8_t func_num
)
3397 #define ECORE_CAM_SIZE_EMUL 5
3401 if (CHIP_IS_E1H(sc
)) {
3402 /* CAM credit is equally divided between all active functions
3405 if ((func_num
> 0)) {
3406 if (!CHIP_REV_IS_SLOW(sc
))
3407 cam_sz
= (MAX_MAC_CREDIT_E1H
/ (2 * func_num
));
3409 cam_sz
= ECORE_CAM_SIZE_EMUL
;
3410 ecore_init_credit_pool(p
, func_id
* cam_sz
, cam_sz
);
3412 /* this should never happen! Block MAC operations. */
3413 ecore_init_credit_pool(p
, 0, 0);
3419 * CAM credit is equaly divided between all active functions
3422 if ((func_num
> 0)) {
3423 if (!CHIP_REV_IS_SLOW(sc
))
3424 cam_sz
= (MAX_MAC_CREDIT_E2
/ func_num
);
3426 cam_sz
= ECORE_CAM_SIZE_EMUL
;
3428 /* No need for CAM entries handling for 57712 and
3431 ecore_init_credit_pool(p
, -1, cam_sz
);
3433 /* this should never happen! Block MAC operations. */
3434 ecore_init_credit_pool(p
, 0, 0);
3439 void ecore_init_vlan_credit_pool(struct bnx2x_softc
*sc
,
3440 struct ecore_credit_pool_obj
*p
,
3441 uint8_t func_id
, uint8_t func_num
)
3443 if (CHIP_IS_E1x(sc
)) {
3444 /* There is no VLAN credit in HW on 57711 only
3445 * MAC / MAC-VLAN can be set
3447 ecore_init_credit_pool(p
, 0, -1);
3449 /* CAM credit is equally divided between all active functions
3453 int credit
= MAX_VLAN_CREDIT_E2
/ func_num
;
3454 ecore_init_credit_pool(p
, func_id
* credit
, credit
);
3456 /* this should never happen! Block VLAN operations. */
3457 ecore_init_credit_pool(p
, 0, 0);
3461 /****************** RSS Configuration ******************/
3464 * ecore_setup_rss - configure RSS
3466 * @sc: device handle
3467 * @p: rss configuration
3469 * sends on UPDATE ramrod for that matter.
3471 static int ecore_setup_rss(struct bnx2x_softc
*sc
,
3472 struct ecore_config_rss_params
*p
)
3474 struct ecore_rss_config_obj
*o
= p
->rss_obj
;
3475 struct ecore_raw_obj
*r
= &o
->raw
;
3476 struct eth_rss_update_ramrod_data
*data
=
3477 (struct eth_rss_update_ramrod_data
*)(r
->rdata
);
3478 uint8_t rss_mode
= 0;
3481 ECORE_MEMSET(data
, 0, sizeof(*data
));
3483 ECORE_MSG("Configuring RSS");
3485 /* Set an echo field */
3486 data
->echo
= ECORE_CPU_TO_LE32((r
->cid
& ECORE_SWCID_MASK
) |
3487 (r
->state
<< ECORE_SWCID_SHIFT
));
3490 if (ECORE_TEST_BIT(ECORE_RSS_MODE_DISABLED
, &p
->rss_flags
))
3491 rss_mode
= ETH_RSS_MODE_DISABLED
;
3492 else if (ECORE_TEST_BIT(ECORE_RSS_MODE_REGULAR
, &p
->rss_flags
))
3493 rss_mode
= ETH_RSS_MODE_REGULAR
;
3495 data
->rss_mode
= rss_mode
;
3497 ECORE_MSG("rss_mode=%d", rss_mode
);
3499 /* RSS capabilities */
3500 if (ECORE_TEST_BIT(ECORE_RSS_IPV4
, &p
->rss_flags
))
3501 data
->capabilities
|=
3502 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY
;
3504 if (ECORE_TEST_BIT(ECORE_RSS_IPV4_TCP
, &p
->rss_flags
))
3505 data
->capabilities
|=
3506 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY
;
3508 if (ECORE_TEST_BIT(ECORE_RSS_IPV4_UDP
, &p
->rss_flags
))
3509 data
->capabilities
|=
3510 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY
;
3512 if (ECORE_TEST_BIT(ECORE_RSS_IPV6
, &p
->rss_flags
))
3513 data
->capabilities
|=
3514 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY
;
3516 if (ECORE_TEST_BIT(ECORE_RSS_IPV6_TCP
, &p
->rss_flags
))
3517 data
->capabilities
|=
3518 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY
;
3520 if (ECORE_TEST_BIT(ECORE_RSS_IPV6_UDP
, &p
->rss_flags
))
3521 data
->capabilities
|=
3522 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY
;
3524 if (ECORE_TEST_BIT(ECORE_RSS_TUNNELING
, &p
->rss_flags
)) {
3525 data
->udp_4tuple_dst_port_mask
=
3526 ECORE_CPU_TO_LE16(p
->tunnel_mask
);
3527 data
->udp_4tuple_dst_port_value
=
3528 ECORE_CPU_TO_LE16(p
->tunnel_value
);
3532 data
->rss_result_mask
= p
->rss_result_mask
;
3535 data
->rss_engine_id
= o
->engine_id
;
3537 ECORE_MSG("rss_engine_id=%d", data
->rss_engine_id
);
3539 /* Indirection table */
3540 ECORE_MEMCPY(data
->indirection_table
, p
->ind_table
,
3541 T_ETH_INDIRECTION_TABLE_SIZE
);
3543 /* Remember the last configuration */
3544 ECORE_MEMCPY(o
->ind_table
, p
->ind_table
, T_ETH_INDIRECTION_TABLE_SIZE
);
3547 if (ECORE_TEST_BIT(ECORE_RSS_SET_SRCH
, &p
->rss_flags
)) {
3548 ECORE_MEMCPY(&data
->rss_key
[0], &p
->rss_key
[0],
3549 sizeof(data
->rss_key
));
3550 data
->capabilities
|= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY
;
3553 /* No need for an explicit memory barrier here as long we would
3554 * need to ensure the ordering of writing to the SPQ element
3555 * and updating of the SPQ producer which involves a memory
3556 * read and we will have to put a full memory barrier there
3557 * (inside ecore_sp_post()).
3561 rc
= ecore_sp_post(sc
,
3562 RAMROD_CMD_ID_ETH_RSS_UPDATE
,
3563 r
->cid
, r
->rdata_mapping
, ETH_CONNECTION_TYPE
);
3568 return ECORE_PENDING
;
3571 int ecore_config_rss(struct bnx2x_softc
*sc
, struct ecore_config_rss_params
*p
)
3574 struct ecore_rss_config_obj
*o
= p
->rss_obj
;
3575 struct ecore_raw_obj
*r
= &o
->raw
;
3577 /* Do nothing if only driver cleanup was requested */
3578 if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY
, &p
->ramrod_flags
))
3579 return ECORE_SUCCESS
;
3583 rc
= o
->config_rss(sc
, p
);
3585 r
->clear_pending(r
);
3589 if (ECORE_TEST_BIT(RAMROD_COMP_WAIT
, &p
->ramrod_flags
))
3590 rc
= r
->wait_comp(sc
, r
);
3595 void ecore_init_rss_config_obj(struct ecore_rss_config_obj
*rss_obj
,
3596 uint8_t cl_id
, uint32_t cid
, uint8_t func_id
,
3597 uint8_t engine_id
, void *rdata
,
3598 ecore_dma_addr_t rdata_mapping
, int state
,
3599 unsigned long *pstate
, ecore_obj_type type
)
3601 ecore_init_raw_obj(&rss_obj
->raw
, cl_id
, cid
, func_id
, rdata
,
3602 rdata_mapping
, state
, pstate
, type
);
3604 rss_obj
->engine_id
= engine_id
;
3605 rss_obj
->config_rss
= ecore_setup_rss
;
3608 /********************** Queue state object ***********************************/
3611 * ecore_queue_state_change - perform Queue state change transition
3613 * @sc: device handle
3614 * @params: parameters to perform the transition
3616 * returns 0 in case of successfully completed transition, negative error
3617 * code in case of failure, positive (EBUSY) value if there is a completion
3618 * to that is still pending (possible only if RAMROD_COMP_WAIT is
3619 * not set in params->ramrod_flags for asynchronous commands).
3622 int ecore_queue_state_change(struct bnx2x_softc
*sc
,
3623 struct ecore_queue_state_params
*params
)
3625 struct ecore_queue_sp_obj
*o
= params
->q_obj
;
3626 int rc
, pending_bit
;
3627 unsigned long *pending
= &o
->pending
;
3629 /* Check that the requested transition is legal */
3630 rc
= o
->check_transition(sc
, o
, params
);
3632 PMD_DRV_LOG(ERR
, "check transition returned an error. rc %d",
3637 /* Set "pending" bit */
3638 ECORE_MSG("pending bit was=%lx", o
->pending
);
3639 pending_bit
= o
->set_pending(o
, params
);
3640 ECORE_MSG("pending bit now=%lx", o
->pending
);
3642 /* Don't send a command if only driver cleanup was requested */
3643 if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY
, ¶ms
->ramrod_flags
))
3644 o
->complete_cmd(sc
, o
, pending_bit
);
3647 rc
= o
->send_cmd(sc
, params
);
3649 o
->next_state
= ECORE_Q_STATE_MAX
;
3650 ECORE_CLEAR_BIT(pending_bit
, pending
);
3651 ECORE_SMP_MB_AFTER_CLEAR_BIT();
3655 if (ECORE_TEST_BIT(RAMROD_COMP_WAIT
, ¶ms
->ramrod_flags
)) {
3656 rc
= o
->wait_comp(sc
, o
, pending_bit
);
3660 return ECORE_SUCCESS
;
3664 return ECORE_RET_PENDING(pending_bit
, pending
);
3667 static int ecore_queue_set_pending(struct ecore_queue_sp_obj
*obj
,
3668 struct ecore_queue_state_params
*params
)
3670 enum ecore_queue_cmd cmd
= params
->cmd
, bit
;
3672 /* ACTIVATE and DEACTIVATE commands are implemented on top of
3675 if ((cmd
== ECORE_Q_CMD_ACTIVATE
) || (cmd
== ECORE_Q_CMD_DEACTIVATE
))
3676 bit
= ECORE_Q_CMD_UPDATE
;
3680 ECORE_SET_BIT(bit
, &obj
->pending
);
3684 static int ecore_queue_wait_comp(struct bnx2x_softc
*sc
,
3685 struct ecore_queue_sp_obj
*o
,
3686 enum ecore_queue_cmd cmd
)
3688 return ecore_state_wait(sc
, cmd
, &o
->pending
);
3692 * ecore_queue_comp_cmd - complete the state change command.
3694 * @sc: device handle
3698 * Checks that the arrived completion is expected.
3700 static int ecore_queue_comp_cmd(struct bnx2x_softc
*sc __rte_unused
,
3701 struct ecore_queue_sp_obj
*o
,
3702 enum ecore_queue_cmd cmd
)
3704 unsigned long cur_pending
= o
->pending
;
3706 if (!ECORE_TEST_AND_CLEAR_BIT(cmd
, &cur_pending
)) {
3708 "Bad MC reply %d for queue %d in state %d pending 0x%lx, next_state %d",
3709 cmd
, o
->cids
[ECORE_PRIMARY_CID_INDEX
], o
->state
,
3710 cur_pending
, o
->next_state
);
3714 if (o
->next_tx_only
>= o
->max_cos
)
3715 /* >= because tx only must always be smaller than cos since the
3716 * primary connection supports COS 0
3719 "illegal value for next tx_only: %d. max cos was %d",
3720 o
->next_tx_only
, o
->max_cos
);
3722 ECORE_MSG("Completing command %d for queue %d, setting state to %d",
3723 cmd
, o
->cids
[ECORE_PRIMARY_CID_INDEX
], o
->next_state
);
3725 if (o
->next_tx_only
) /* print num tx-only if any exist */
3726 ECORE_MSG("primary cid %d: num tx-only cons %d",
3727 o
->cids
[ECORE_PRIMARY_CID_INDEX
], o
->next_tx_only
);
3729 o
->state
= o
->next_state
;
3730 o
->num_tx_only
= o
->next_tx_only
;
3731 o
->next_state
= ECORE_Q_STATE_MAX
;
3733 /* It's important that o->state and o->next_state are
3734 * updated before o->pending.
3738 ECORE_CLEAR_BIT(cmd
, &o
->pending
);
3739 ECORE_SMP_MB_AFTER_CLEAR_BIT();
3741 return ECORE_SUCCESS
;
3744 static void ecore_q_fill_setup_data_e2(struct ecore_queue_state_params
3746 struct client_init_ramrod_data
*data
)
3748 struct ecore_queue_setup_params
*params
= &cmd_params
->params
.setup
;
3752 /* IPv6 TPA supported for E2 and above only */
3753 data
->rx
.tpa_en
|= ECORE_TEST_BIT(ECORE_Q_FLG_TPA_IPV6
,
3755 CLIENT_INIT_RX_DATA_TPA_EN_IPV6
;
3758 static void ecore_q_fill_init_general_data(struct bnx2x_softc
*sc __rte_unused
,
3759 struct ecore_queue_sp_obj
*o
,
3760 struct ecore_general_setup_params
3761 *params
, struct client_init_general_data
3762 *gen_data
, unsigned long *flags
)
3764 gen_data
->client_id
= o
->cl_id
;
3766 if (ECORE_TEST_BIT(ECORE_Q_FLG_STATS
, flags
)) {
3767 gen_data
->statistics_counter_id
= params
->stat_id
;
3768 gen_data
->statistics_en_flg
= 1;
3769 gen_data
->statistics_zero_flg
=
3770 ECORE_TEST_BIT(ECORE_Q_FLG_ZERO_STATS
, flags
);
3772 gen_data
->statistics_counter_id
=
3773 DISABLE_STATISTIC_COUNTER_ID_VALUE
;
3775 gen_data
->is_fcoe_flg
= ECORE_TEST_BIT(ECORE_Q_FLG_FCOE
, flags
);
3776 gen_data
->activate_flg
= ECORE_TEST_BIT(ECORE_Q_FLG_ACTIVE
, flags
);
3777 gen_data
->sp_client_id
= params
->spcl_id
;
3778 gen_data
->mtu
= ECORE_CPU_TO_LE16(params
->mtu
);
3779 gen_data
->func_id
= o
->func_id
;
3781 gen_data
->cos
= params
->cos
;
3783 gen_data
->traffic_type
=
3784 ECORE_TEST_BIT(ECORE_Q_FLG_FCOE
, flags
) ?
3785 LLFC_TRAFFIC_TYPE_FCOE
: LLFC_TRAFFIC_TYPE_NW
;
3787 ECORE_MSG("flags: active %d, cos %d, stats en %d",
3788 gen_data
->activate_flg
, gen_data
->cos
,
3789 gen_data
->statistics_en_flg
);
3792 static void ecore_q_fill_init_tx_data(struct ecore_txq_setup_params
*params
,
3793 struct client_init_tx_data
*tx_data
,
3794 unsigned long *flags
)
3796 tx_data
->enforce_security_flg
=
3797 ECORE_TEST_BIT(ECORE_Q_FLG_TX_SEC
, flags
);
3798 tx_data
->default_vlan
= ECORE_CPU_TO_LE16(params
->default_vlan
);
3799 tx_data
->default_vlan_flg
= ECORE_TEST_BIT(ECORE_Q_FLG_DEF_VLAN
, flags
);
3800 tx_data
->tx_switching_flg
=
3801 ECORE_TEST_BIT(ECORE_Q_FLG_TX_SWITCH
, flags
);
3802 tx_data
->anti_spoofing_flg
=
3803 ECORE_TEST_BIT(ECORE_Q_FLG_ANTI_SPOOF
, flags
);
3804 tx_data
->force_default_pri_flg
=
3805 ECORE_TEST_BIT(ECORE_Q_FLG_FORCE_DEFAULT_PRI
, flags
);
3806 tx_data
->refuse_outband_vlan_flg
=
3807 ECORE_TEST_BIT(ECORE_Q_FLG_REFUSE_OUTBAND_VLAN
, flags
);
3808 tx_data
->tunnel_non_lso_pcsum_location
=
3809 ECORE_TEST_BIT(ECORE_Q_FLG_PCSUM_ON_PKT
, flags
) ? CSUM_ON_PKT
:
3812 tx_data
->tx_status_block_id
= params
->fw_sb_id
;
3813 tx_data
->tx_sb_index_number
= params
->sb_cq_index
;
3814 tx_data
->tss_leading_client_id
= params
->tss_leading_cl_id
;
3816 tx_data
->tx_bd_page_base
.lo
=
3817 ECORE_CPU_TO_LE32(U64_LO(params
->dscr_map
));
3818 tx_data
->tx_bd_page_base
.hi
=
3819 ECORE_CPU_TO_LE32(U64_HI(params
->dscr_map
));
3821 /* Don't configure any Tx switching mode during queue SETUP */
3825 static void ecore_q_fill_init_pause_data(struct rxq_pause_params
*params
,
3826 struct client_init_rx_data
*rx_data
)
3828 /* flow control data */
3829 rx_data
->cqe_pause_thr_low
= ECORE_CPU_TO_LE16(params
->rcq_th_lo
);
3830 rx_data
->cqe_pause_thr_high
= ECORE_CPU_TO_LE16(params
->rcq_th_hi
);
3831 rx_data
->bd_pause_thr_low
= ECORE_CPU_TO_LE16(params
->bd_th_lo
);
3832 rx_data
->bd_pause_thr_high
= ECORE_CPU_TO_LE16(params
->bd_th_hi
);
3833 rx_data
->sge_pause_thr_low
= ECORE_CPU_TO_LE16(params
->sge_th_lo
);
3834 rx_data
->sge_pause_thr_high
= ECORE_CPU_TO_LE16(params
->sge_th_hi
);
3835 rx_data
->rx_cos_mask
= ECORE_CPU_TO_LE16(params
->pri_map
);
3838 static void ecore_q_fill_init_rx_data(struct ecore_rxq_setup_params
*params
,
3839 struct client_init_rx_data
*rx_data
,
3840 unsigned long *flags
)
3842 rx_data
->tpa_en
= ECORE_TEST_BIT(ECORE_Q_FLG_TPA
, flags
) *
3843 CLIENT_INIT_RX_DATA_TPA_EN_IPV4
;
3844 rx_data
->tpa_en
|= ECORE_TEST_BIT(ECORE_Q_FLG_TPA_GRO
, flags
) *
3845 CLIENT_INIT_RX_DATA_TPA_MODE
;
3846 rx_data
->vmqueue_mode_en_flg
= 0;
3848 rx_data
->extra_data_over_sgl_en_flg
=
3849 ECORE_TEST_BIT(ECORE_Q_FLG_OOO
, flags
);
3850 rx_data
->cache_line_alignment_log_size
= params
->cache_line_log
;
3851 rx_data
->enable_dynamic_hc
= ECORE_TEST_BIT(ECORE_Q_FLG_DHC
, flags
);
3852 rx_data
->client_qzone_id
= params
->cl_qzone_id
;
3853 rx_data
->max_agg_size
= ECORE_CPU_TO_LE16(params
->tpa_agg_sz
);
3855 /* Always start in DROP_ALL mode */
3856 rx_data
->state
= ECORE_CPU_TO_LE16(CLIENT_INIT_RX_DATA_UCAST_DROP_ALL
|
3857 CLIENT_INIT_RX_DATA_MCAST_DROP_ALL
);
3859 /* We don't set drop flags */
3860 rx_data
->drop_ip_cs_err_flg
= 0;
3861 rx_data
->drop_tcp_cs_err_flg
= 0;
3862 rx_data
->drop_ttl0_flg
= 0;
3863 rx_data
->drop_udp_cs_err_flg
= 0;
3864 rx_data
->inner_vlan_removal_enable_flg
=
3865 ECORE_TEST_BIT(ECORE_Q_FLG_VLAN
, flags
);
3866 rx_data
->outer_vlan_removal_enable_flg
=
3867 ECORE_TEST_BIT(ECORE_Q_FLG_OV
, flags
);
3868 rx_data
->status_block_id
= params
->fw_sb_id
;
3869 rx_data
->rx_sb_index_number
= params
->sb_cq_index
;
3870 rx_data
->max_tpa_queues
= params
->max_tpa_queues
;
3871 rx_data
->max_bytes_on_bd
= ECORE_CPU_TO_LE16(params
->buf_sz
);
3872 rx_data
->bd_page_base
.lo
= ECORE_CPU_TO_LE32(U64_LO(params
->dscr_map
));
3873 rx_data
->bd_page_base
.hi
= ECORE_CPU_TO_LE32(U64_HI(params
->dscr_map
));
3874 rx_data
->cqe_page_base
.lo
= ECORE_CPU_TO_LE32(U64_LO(params
->rcq_map
));
3875 rx_data
->cqe_page_base
.hi
= ECORE_CPU_TO_LE32(U64_HI(params
->rcq_map
));
3876 rx_data
->is_leading_rss
= ECORE_TEST_BIT(ECORE_Q_FLG_LEADING_RSS
,
3879 if (ECORE_TEST_BIT(ECORE_Q_FLG_MCAST
, flags
)) {
3880 rx_data
->approx_mcast_engine_id
= params
->mcast_engine_id
;
3881 rx_data
->is_approx_mcast
= 1;
3884 rx_data
->rss_engine_id
= params
->rss_engine_id
;
3886 /* silent vlan removal */
3887 rx_data
->silent_vlan_removal_flg
=
3888 ECORE_TEST_BIT(ECORE_Q_FLG_SILENT_VLAN_REM
, flags
);
3889 rx_data
->silent_vlan_value
=
3890 ECORE_CPU_TO_LE16(params
->silent_removal_value
);
3891 rx_data
->silent_vlan_mask
=
3892 ECORE_CPU_TO_LE16(params
->silent_removal_mask
);
3895 /* initialize the general, tx and rx parts of a queue object */
3896 static void ecore_q_fill_setup_data_cmn(struct bnx2x_softc
*sc
, struct ecore_queue_state_params
3898 struct client_init_ramrod_data
*data
)
3900 ecore_q_fill_init_general_data(sc
, cmd_params
->q_obj
,
3901 &cmd_params
->params
.setup
.gen_params
,
3903 &cmd_params
->params
.setup
.flags
);
3905 ecore_q_fill_init_tx_data(&cmd_params
->params
.setup
.txq_params
,
3906 &data
->tx
, &cmd_params
->params
.setup
.flags
);
3908 ecore_q_fill_init_rx_data(&cmd_params
->params
.setup
.rxq_params
,
3909 &data
->rx
, &cmd_params
->params
.setup
.flags
);
3911 ecore_q_fill_init_pause_data(&cmd_params
->params
.setup
.pause_params
,
3915 /* initialize the general and tx parts of a tx-only queue object */
3916 static void ecore_q_fill_setup_tx_only(struct bnx2x_softc
*sc
, struct ecore_queue_state_params
3918 struct tx_queue_init_ramrod_data
*data
)
3920 ecore_q_fill_init_general_data(sc
, cmd_params
->q_obj
,
3921 &cmd_params
->params
.tx_only
.gen_params
,
3923 &cmd_params
->params
.tx_only
.flags
);
3925 ecore_q_fill_init_tx_data(&cmd_params
->params
.tx_only
.txq_params
,
3926 &data
->tx
, &cmd_params
->params
.tx_only
.flags
);
3928 ECORE_MSG("cid %d, tx bd page lo %x hi %x",
3929 cmd_params
->q_obj
->cids
[0],
3930 data
->tx
.tx_bd_page_base
.lo
, data
->tx
.tx_bd_page_base
.hi
);
3934 * ecore_q_init - init HW/FW queue
3936 * @sc: device handle
3939 * HW/FW initial Queue configuration:
3941 * - CDU context validation
3944 static int ecore_q_init(struct bnx2x_softc
*sc
,
3945 struct ecore_queue_state_params
*params
)
3947 struct ecore_queue_sp_obj
*o
= params
->q_obj
;
3948 struct ecore_queue_init_params
*init
= ¶ms
->params
.init
;
3952 /* Tx HC configuration */
3953 if (ECORE_TEST_BIT(ECORE_Q_TYPE_HAS_TX
, &o
->type
) &&
3954 ECORE_TEST_BIT(ECORE_Q_FLG_HC
, &init
->tx
.flags
)) {
3955 hc_usec
= init
->tx
.hc_rate
? 1000000 / init
->tx
.hc_rate
: 0;
3957 ECORE_UPDATE_COALESCE_SB_INDEX(sc
, init
->tx
.fw_sb_id
,
3958 init
->tx
.sb_cq_index
,
3961 &init
->tx
.flags
), hc_usec
);
3964 /* Rx HC configuration */
3965 if (ECORE_TEST_BIT(ECORE_Q_TYPE_HAS_RX
, &o
->type
) &&
3966 ECORE_TEST_BIT(ECORE_Q_FLG_HC
, &init
->rx
.flags
)) {
3967 hc_usec
= init
->rx
.hc_rate
? 1000000 / init
->rx
.hc_rate
: 0;
3969 ECORE_UPDATE_COALESCE_SB_INDEX(sc
, init
->rx
.fw_sb_id
,
3970 init
->rx
.sb_cq_index
,
3973 &init
->rx
.flags
), hc_usec
);
3976 /* Set CDU context validation values */
3977 for (cos
= 0; cos
< o
->max_cos
; cos
++) {
3978 ECORE_MSG("setting context validation. cid %d, cos %d",
3980 ECORE_MSG("context pointer %p", init
->cxts
[cos
]);
3981 ECORE_SET_CTX_VALIDATION(sc
, init
->cxts
[cos
], o
->cids
[cos
]);
3984 /* As no ramrod is sent, complete the command immediately */
3985 o
->complete_cmd(sc
, o
, ECORE_Q_CMD_INIT
);
3990 return ECORE_SUCCESS
;
3993 static int ecore_q_send_setup_e1x(struct bnx2x_softc
*sc
, struct ecore_queue_state_params
3996 struct ecore_queue_sp_obj
*o
= params
->q_obj
;
3997 struct client_init_ramrod_data
*rdata
=
3998 (struct client_init_ramrod_data
*)o
->rdata
;
3999 ecore_dma_addr_t data_mapping
= o
->rdata_mapping
;
4000 int ramrod
= RAMROD_CMD_ID_ETH_CLIENT_SETUP
;
4002 /* Clear the ramrod data */
4003 ECORE_MEMSET(rdata
, 0, sizeof(*rdata
));
4005 /* Fill the ramrod data */
4006 ecore_q_fill_setup_data_cmn(sc
, params
, rdata
);
4008 /* No need for an explicit memory barrier here as long we would
4009 * need to ensure the ordering of writing to the SPQ element
4010 * and updating of the SPQ producer which involves a memory
4011 * read and we will have to put a full memory barrier there
4012 * (inside ecore_sp_post()).
4015 return ecore_sp_post(sc
,
4017 o
->cids
[ECORE_PRIMARY_CID_INDEX
],
4018 data_mapping
, ETH_CONNECTION_TYPE
);
4021 static int ecore_q_send_setup_e2(struct bnx2x_softc
*sc
,
4022 struct ecore_queue_state_params
*params
)
4024 struct ecore_queue_sp_obj
*o
= params
->q_obj
;
4025 struct client_init_ramrod_data
*rdata
=
4026 (struct client_init_ramrod_data
*)o
->rdata
;
4027 ecore_dma_addr_t data_mapping
= o
->rdata_mapping
;
4028 int ramrod
= RAMROD_CMD_ID_ETH_CLIENT_SETUP
;
4030 /* Clear the ramrod data */
4031 ECORE_MEMSET(rdata
, 0, sizeof(*rdata
));
4033 /* Fill the ramrod data */
4034 ecore_q_fill_setup_data_cmn(sc
, params
, rdata
);
4035 ecore_q_fill_setup_data_e2(params
, rdata
);
4037 /* No need for an explicit memory barrier here as long we would
4038 * need to ensure the ordering of writing to the SPQ element
4039 * and updating of the SPQ producer which involves a memory
4040 * read and we will have to put a full memory barrier there
4041 * (inside ecore_sp_post()).
4044 return ecore_sp_post(sc
,
4046 o
->cids
[ECORE_PRIMARY_CID_INDEX
],
4047 data_mapping
, ETH_CONNECTION_TYPE
);
4050 static int ecore_q_send_setup_tx_only(struct bnx2x_softc
*sc
, struct ecore_queue_state_params
4053 struct ecore_queue_sp_obj
*o
= params
->q_obj
;
4054 struct tx_queue_init_ramrod_data
*rdata
=
4055 (struct tx_queue_init_ramrod_data
*)o
->rdata
;
4056 ecore_dma_addr_t data_mapping
= o
->rdata_mapping
;
4057 int ramrod
= RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP
;
4058 struct ecore_queue_setup_tx_only_params
*tx_only_params
=
4059 ¶ms
->params
.tx_only
;
4060 uint8_t cid_index
= tx_only_params
->cid_index
;
4062 if (ECORE_TEST_BIT(ECORE_Q_TYPE_FWD
, &o
->type
))
4063 ramrod
= RAMROD_CMD_ID_ETH_FORWARD_SETUP
;
4064 ECORE_MSG("sending forward tx-only ramrod");
4066 if (cid_index
>= o
->max_cos
) {
4067 PMD_DRV_LOG(ERR
, "queue[%d]: cid_index (%d) is out of range",
4068 o
->cl_id
, cid_index
);
4072 ECORE_MSG("parameters received: cos: %d sp-id: %d",
4073 tx_only_params
->gen_params
.cos
,
4074 tx_only_params
->gen_params
.spcl_id
);
4076 /* Clear the ramrod data */
4077 ECORE_MEMSET(rdata
, 0, sizeof(*rdata
));
4079 /* Fill the ramrod data */
4080 ecore_q_fill_setup_tx_only(sc
, params
, rdata
);
4083 ("sending tx-only ramrod: cid %d, client-id %d, sp-client id %d, cos %d",
4084 o
->cids
[cid_index
], rdata
->general
.client_id
,
4085 rdata
->general
.sp_client_id
, rdata
->general
.cos
);
4087 /* No need for an explicit memory barrier here as long we would
4088 * need to ensure the ordering of writing to the SPQ element
4089 * and updating of the SPQ producer which involves a memory
4090 * read and we will have to put a full memory barrier there
4091 * (inside ecore_sp_post()).
4094 return ecore_sp_post(sc
, ramrod
, o
->cids
[cid_index
],
4095 data_mapping
, ETH_CONNECTION_TYPE
);
4098 static void ecore_q_fill_update_data(struct ecore_queue_sp_obj
*obj
,
4099 struct ecore_queue_update_params
*params
,
4100 struct client_update_ramrod_data
*data
)
4102 /* Client ID of the client to update */
4103 data
->client_id
= obj
->cl_id
;
4105 /* Function ID of the client to update */
4106 data
->func_id
= obj
->func_id
;
4108 /* Default VLAN value */
4109 data
->default_vlan
= ECORE_CPU_TO_LE16(params
->def_vlan
);
4111 /* Inner VLAN stripping */
4112 data
->inner_vlan_removal_enable_flg
=
4113 ECORE_TEST_BIT(ECORE_Q_UPDATE_IN_VLAN_REM
, ¶ms
->update_flags
);
4114 data
->inner_vlan_removal_change_flg
=
4115 ECORE_TEST_BIT(ECORE_Q_UPDATE_IN_VLAN_REM_CHNG
,
4116 ¶ms
->update_flags
);
4118 /* Outer VLAN stripping */
4119 data
->outer_vlan_removal_enable_flg
=
4120 ECORE_TEST_BIT(ECORE_Q_UPDATE_OUT_VLAN_REM
, ¶ms
->update_flags
);
4121 data
->outer_vlan_removal_change_flg
=
4122 ECORE_TEST_BIT(ECORE_Q_UPDATE_OUT_VLAN_REM_CHNG
,
4123 ¶ms
->update_flags
);
4125 /* Drop packets that have source MAC that doesn't belong to this
4128 data
->anti_spoofing_enable_flg
=
4129 ECORE_TEST_BIT(ECORE_Q_UPDATE_ANTI_SPOOF
, ¶ms
->update_flags
);
4130 data
->anti_spoofing_change_flg
=
4131 ECORE_TEST_BIT(ECORE_Q_UPDATE_ANTI_SPOOF_CHNG
,
4132 ¶ms
->update_flags
);
4134 /* Activate/Deactivate */
4135 data
->activate_flg
=
4136 ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE
, ¶ms
->update_flags
);
4137 data
->activate_change_flg
=
4138 ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG
, ¶ms
->update_flags
);
4140 /* Enable default VLAN */
4141 data
->default_vlan_enable_flg
=
4142 ECORE_TEST_BIT(ECORE_Q_UPDATE_DEF_VLAN_EN
, ¶ms
->update_flags
);
4143 data
->default_vlan_change_flg
=
4144 ECORE_TEST_BIT(ECORE_Q_UPDATE_DEF_VLAN_EN_CHNG
,
4145 ¶ms
->update_flags
);
4147 /* silent vlan removal */
4148 data
->silent_vlan_change_flg
=
4149 ECORE_TEST_BIT(ECORE_Q_UPDATE_SILENT_VLAN_REM_CHNG
,
4150 ¶ms
->update_flags
);
4151 data
->silent_vlan_removal_flg
=
4152 ECORE_TEST_BIT(ECORE_Q_UPDATE_SILENT_VLAN_REM
,
4153 ¶ms
->update_flags
);
4154 data
->silent_vlan_value
=
4155 ECORE_CPU_TO_LE16(params
->silent_removal_value
);
4156 data
->silent_vlan_mask
= ECORE_CPU_TO_LE16(params
->silent_removal_mask
);
4159 data
->tx_switching_flg
=
4160 ECORE_TEST_BIT(ECORE_Q_UPDATE_TX_SWITCHING
, ¶ms
->update_flags
);
4161 data
->tx_switching_change_flg
=
4162 ECORE_TEST_BIT(ECORE_Q_UPDATE_TX_SWITCHING_CHNG
,
4163 ¶ms
->update_flags
);
4166 static int ecore_q_send_update(struct bnx2x_softc
*sc
,
4167 struct ecore_queue_state_params
*params
)
4169 struct ecore_queue_sp_obj
*o
= params
->q_obj
;
4170 struct client_update_ramrod_data
*rdata
=
4171 (struct client_update_ramrod_data
*)o
->rdata
;
4172 ecore_dma_addr_t data_mapping
= o
->rdata_mapping
;
4173 struct ecore_queue_update_params
*update_params
=
4174 ¶ms
->params
.update
;
4175 uint8_t cid_index
= update_params
->cid_index
;
4177 if (cid_index
>= o
->max_cos
) {
4178 PMD_DRV_LOG(ERR
, "queue[%d]: cid_index (%d) is out of range",
4179 o
->cl_id
, cid_index
);
4183 /* Clear the ramrod data */
4184 ECORE_MEMSET(rdata
, 0, sizeof(*rdata
));
4186 /* Fill the ramrod data */
4187 ecore_q_fill_update_data(o
, update_params
, rdata
);
4189 /* No need for an explicit memory barrier here as long we would
4190 * need to ensure the ordering of writing to the SPQ element
4191 * and updating of the SPQ producer which involves a memory
4192 * read and we will have to put a full memory barrier there
4193 * (inside ecore_sp_post()).
4196 return ecore_sp_post(sc
, RAMROD_CMD_ID_ETH_CLIENT_UPDATE
,
4197 o
->cids
[cid_index
], data_mapping
,
4198 ETH_CONNECTION_TYPE
);
4202 * ecore_q_send_deactivate - send DEACTIVATE command
4204 * @sc: device handle
4207 * implemented using the UPDATE command.
4209 static int ecore_q_send_deactivate(struct bnx2x_softc
*sc
, struct ecore_queue_state_params
4212 struct ecore_queue_update_params
*update
= ¶ms
->params
.update
;
4214 ECORE_MEMSET(update
, 0, sizeof(*update
));
4216 ECORE_SET_BIT_NA(ECORE_Q_UPDATE_ACTIVATE_CHNG
, &update
->update_flags
);
4218 return ecore_q_send_update(sc
, params
);
4222 * ecore_q_send_activate - send ACTIVATE command
4224 * @sc: device handle
4227 * implemented using the UPDATE command.
4229 static int ecore_q_send_activate(struct bnx2x_softc
*sc
,
4230 struct ecore_queue_state_params
*params
)
4232 struct ecore_queue_update_params
*update
= ¶ms
->params
.update
;
4234 ECORE_MEMSET(update
, 0, sizeof(*update
));
4236 ECORE_SET_BIT_NA(ECORE_Q_UPDATE_ACTIVATE
, &update
->update_flags
);
4237 ECORE_SET_BIT_NA(ECORE_Q_UPDATE_ACTIVATE_CHNG
, &update
->update_flags
);
4239 return ecore_q_send_update(sc
, params
);
4242 static int ecore_q_send_update_tpa(__rte_unused
struct bnx2x_softc
*sc
,
4244 ecore_queue_state_params
*params
)
4246 /* Not implemented yet. */
4250 static int ecore_q_send_halt(struct bnx2x_softc
*sc
,
4251 struct ecore_queue_state_params
*params
)
4253 struct ecore_queue_sp_obj
*o
= params
->q_obj
;
4255 /* build eth_halt_ramrod_data.client_id in a big-endian friendly way */
4256 ecore_dma_addr_t data_mapping
= 0;
4257 data_mapping
= (ecore_dma_addr_t
) o
->cl_id
;
4259 return ecore_sp_post(sc
,
4260 RAMROD_CMD_ID_ETH_HALT
,
4261 o
->cids
[ECORE_PRIMARY_CID_INDEX
],
4262 data_mapping
, ETH_CONNECTION_TYPE
);
4265 static int ecore_q_send_cfc_del(struct bnx2x_softc
*sc
,
4266 struct ecore_queue_state_params
*params
)
4268 struct ecore_queue_sp_obj
*o
= params
->q_obj
;
4269 uint8_t cid_idx
= params
->params
.cfc_del
.cid_index
;
4271 if (cid_idx
>= o
->max_cos
) {
4272 PMD_DRV_LOG(ERR
, "queue[%d]: cid_index (%d) is out of range",
4277 return ecore_sp_post(sc
, RAMROD_CMD_ID_COMMON_CFC_DEL
,
4278 o
->cids
[cid_idx
], 0, NONE_CONNECTION_TYPE
);
4281 static int ecore_q_send_terminate(struct bnx2x_softc
*sc
, struct ecore_queue_state_params
4284 struct ecore_queue_sp_obj
*o
= params
->q_obj
;
4285 uint8_t cid_index
= params
->params
.terminate
.cid_index
;
4287 if (cid_index
>= o
->max_cos
) {
4288 PMD_DRV_LOG(ERR
, "queue[%d]: cid_index (%d) is out of range",
4289 o
->cl_id
, cid_index
);
4293 return ecore_sp_post(sc
, RAMROD_CMD_ID_ETH_TERMINATE
,
4294 o
->cids
[cid_index
], 0, ETH_CONNECTION_TYPE
);
4297 static int ecore_q_send_empty(struct bnx2x_softc
*sc
,
4298 struct ecore_queue_state_params
*params
)
4300 struct ecore_queue_sp_obj
*o
= params
->q_obj
;
4302 return ecore_sp_post(sc
, RAMROD_CMD_ID_ETH_EMPTY
,
4303 o
->cids
[ECORE_PRIMARY_CID_INDEX
], 0,
4304 ETH_CONNECTION_TYPE
);
4307 static int ecore_queue_send_cmd_cmn(struct bnx2x_softc
*sc
, struct ecore_queue_state_params
4310 switch (params
->cmd
) {
4311 case ECORE_Q_CMD_INIT
:
4312 return ecore_q_init(sc
, params
);
4313 case ECORE_Q_CMD_SETUP_TX_ONLY
:
4314 return ecore_q_send_setup_tx_only(sc
, params
);
4315 case ECORE_Q_CMD_DEACTIVATE
:
4316 return ecore_q_send_deactivate(sc
, params
);
4317 case ECORE_Q_CMD_ACTIVATE
:
4318 return ecore_q_send_activate(sc
, params
);
4319 case ECORE_Q_CMD_UPDATE
:
4320 return ecore_q_send_update(sc
, params
);
4321 case ECORE_Q_CMD_UPDATE_TPA
:
4322 return ecore_q_send_update_tpa(sc
, params
);
4323 case ECORE_Q_CMD_HALT
:
4324 return ecore_q_send_halt(sc
, params
);
4325 case ECORE_Q_CMD_CFC_DEL
:
4326 return ecore_q_send_cfc_del(sc
, params
);
4327 case ECORE_Q_CMD_TERMINATE
:
4328 return ecore_q_send_terminate(sc
, params
);
4329 case ECORE_Q_CMD_EMPTY
:
4330 return ecore_q_send_empty(sc
, params
);
4332 PMD_DRV_LOG(ERR
, "Unknown command: %d", params
->cmd
);
4337 static int ecore_queue_send_cmd_e1x(struct bnx2x_softc
*sc
,
4338 struct ecore_queue_state_params
*params
)
4340 switch (params
->cmd
) {
4341 case ECORE_Q_CMD_SETUP
:
4342 return ecore_q_send_setup_e1x(sc
, params
);
4343 case ECORE_Q_CMD_INIT
:
4344 case ECORE_Q_CMD_SETUP_TX_ONLY
:
4345 case ECORE_Q_CMD_DEACTIVATE
:
4346 case ECORE_Q_CMD_ACTIVATE
:
4347 case ECORE_Q_CMD_UPDATE
:
4348 case ECORE_Q_CMD_UPDATE_TPA
:
4349 case ECORE_Q_CMD_HALT
:
4350 case ECORE_Q_CMD_CFC_DEL
:
4351 case ECORE_Q_CMD_TERMINATE
:
4352 case ECORE_Q_CMD_EMPTY
:
4353 return ecore_queue_send_cmd_cmn(sc
, params
);
4355 PMD_DRV_LOG(ERR
, "Unknown command: %d", params
->cmd
);
4360 static int ecore_queue_send_cmd_e2(struct bnx2x_softc
*sc
,
4361 struct ecore_queue_state_params
*params
)
4363 switch (params
->cmd
) {
4364 case ECORE_Q_CMD_SETUP
:
4365 return ecore_q_send_setup_e2(sc
, params
);
4366 case ECORE_Q_CMD_INIT
:
4367 case ECORE_Q_CMD_SETUP_TX_ONLY
:
4368 case ECORE_Q_CMD_DEACTIVATE
:
4369 case ECORE_Q_CMD_ACTIVATE
:
4370 case ECORE_Q_CMD_UPDATE
:
4371 case ECORE_Q_CMD_UPDATE_TPA
:
4372 case ECORE_Q_CMD_HALT
:
4373 case ECORE_Q_CMD_CFC_DEL
:
4374 case ECORE_Q_CMD_TERMINATE
:
4375 case ECORE_Q_CMD_EMPTY
:
4376 return ecore_queue_send_cmd_cmn(sc
, params
);
4378 PMD_DRV_LOG(ERR
, "Unknown command: %d", params
->cmd
);
4384 * ecore_queue_chk_transition - check state machine of a regular Queue
4386 * @sc: device handle
4391 * It both checks if the requested command is legal in a current
4392 * state and, if it's legal, sets a `next_state' in the object
4393 * that will be used in the completion flow to set the `state'
4396 * returns 0 if a requested command is a legal transition,
4397 * ECORE_INVAL otherwise.
4399 static int ecore_queue_chk_transition(struct bnx2x_softc
*sc __rte_unused
,
4400 struct ecore_queue_sp_obj
*o
,
4401 struct ecore_queue_state_params
*params
)
4403 enum ecore_q_state state
= o
->state
, next_state
= ECORE_Q_STATE_MAX
;
4404 enum ecore_queue_cmd cmd
= params
->cmd
;
4405 struct ecore_queue_update_params
*update_params
=
4406 ¶ms
->params
.update
;
4407 uint8_t next_tx_only
= o
->num_tx_only
;
4409 /* Forget all pending for completion commands if a driver only state
4410 * transition has been requested.
4412 if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY
, ¶ms
->ramrod_flags
)) {
4414 o
->next_state
= ECORE_Q_STATE_MAX
;
4417 /* Don't allow a next state transition if we are in the middle of
4421 PMD_DRV_LOG(ERR
, "Blocking transition since pending was %lx",
4427 case ECORE_Q_STATE_RESET
:
4428 if (cmd
== ECORE_Q_CMD_INIT
)
4429 next_state
= ECORE_Q_STATE_INITIALIZED
;
4432 case ECORE_Q_STATE_INITIALIZED
:
4433 if (cmd
== ECORE_Q_CMD_SETUP
) {
4434 if (ECORE_TEST_BIT(ECORE_Q_FLG_ACTIVE
,
4435 ¶ms
->params
.setup
.flags
))
4436 next_state
= ECORE_Q_STATE_ACTIVE
;
4438 next_state
= ECORE_Q_STATE_INACTIVE
;
4442 case ECORE_Q_STATE_ACTIVE
:
4443 if (cmd
== ECORE_Q_CMD_DEACTIVATE
)
4444 next_state
= ECORE_Q_STATE_INACTIVE
;
4446 else if ((cmd
== ECORE_Q_CMD_EMPTY
) ||
4447 (cmd
== ECORE_Q_CMD_UPDATE_TPA
))
4448 next_state
= ECORE_Q_STATE_ACTIVE
;
4450 else if (cmd
== ECORE_Q_CMD_SETUP_TX_ONLY
) {
4451 next_state
= ECORE_Q_STATE_MULTI_COS
;
4455 else if (cmd
== ECORE_Q_CMD_HALT
)
4456 next_state
= ECORE_Q_STATE_STOPPED
;
4458 else if (cmd
== ECORE_Q_CMD_UPDATE
) {
4459 /* If "active" state change is requested, update the
4460 * state accordingly.
4462 if (ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG
,
4463 &update_params
->update_flags
) &&
4464 !ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE
,
4465 &update_params
->update_flags
))
4466 next_state
= ECORE_Q_STATE_INACTIVE
;
4468 next_state
= ECORE_Q_STATE_ACTIVE
;
4472 case ECORE_Q_STATE_MULTI_COS
:
4473 if (cmd
== ECORE_Q_CMD_TERMINATE
)
4474 next_state
= ECORE_Q_STATE_MCOS_TERMINATED
;
4476 else if (cmd
== ECORE_Q_CMD_SETUP_TX_ONLY
) {
4477 next_state
= ECORE_Q_STATE_MULTI_COS
;
4478 next_tx_only
= o
->num_tx_only
+ 1;
4481 else if ((cmd
== ECORE_Q_CMD_EMPTY
) ||
4482 (cmd
== ECORE_Q_CMD_UPDATE_TPA
))
4483 next_state
= ECORE_Q_STATE_MULTI_COS
;
4485 else if (cmd
== ECORE_Q_CMD_UPDATE
) {
4486 /* If "active" state change is requested, update the
4487 * state accordingly.
4489 if (ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG
,
4490 &update_params
->update_flags
) &&
4491 !ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE
,
4492 &update_params
->update_flags
))
4493 next_state
= ECORE_Q_STATE_INACTIVE
;
4495 next_state
= ECORE_Q_STATE_MULTI_COS
;
4499 case ECORE_Q_STATE_MCOS_TERMINATED
:
4500 if (cmd
== ECORE_Q_CMD_CFC_DEL
) {
4501 next_tx_only
= o
->num_tx_only
- 1;
4502 if (next_tx_only
== 0)
4503 next_state
= ECORE_Q_STATE_ACTIVE
;
4505 next_state
= ECORE_Q_STATE_MULTI_COS
;
4509 case ECORE_Q_STATE_INACTIVE
:
4510 if (cmd
== ECORE_Q_CMD_ACTIVATE
)
4511 next_state
= ECORE_Q_STATE_ACTIVE
;
4513 else if ((cmd
== ECORE_Q_CMD_EMPTY
) ||
4514 (cmd
== ECORE_Q_CMD_UPDATE_TPA
))
4515 next_state
= ECORE_Q_STATE_INACTIVE
;
4517 else if (cmd
== ECORE_Q_CMD_HALT
)
4518 next_state
= ECORE_Q_STATE_STOPPED
;
4520 else if (cmd
== ECORE_Q_CMD_UPDATE
) {
4521 /* If "active" state change is requested, update the
4522 * state accordingly.
4524 if (ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG
,
4525 &update_params
->update_flags
) &&
4526 ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE
,
4527 &update_params
->update_flags
)) {
4528 if (o
->num_tx_only
== 0)
4529 next_state
= ECORE_Q_STATE_ACTIVE
;
4530 else /* tx only queues exist for this queue */
4531 next_state
= ECORE_Q_STATE_MULTI_COS
;
4533 next_state
= ECORE_Q_STATE_INACTIVE
;
4537 case ECORE_Q_STATE_STOPPED
:
4538 if (cmd
== ECORE_Q_CMD_TERMINATE
)
4539 next_state
= ECORE_Q_STATE_TERMINATED
;
4542 case ECORE_Q_STATE_TERMINATED
:
4543 if (cmd
== ECORE_Q_CMD_CFC_DEL
)
4544 next_state
= ECORE_Q_STATE_RESET
;
4548 PMD_DRV_LOG(ERR
, "Illegal state: %d", state
);
4551 /* Transition is assured */
4552 if (next_state
!= ECORE_Q_STATE_MAX
) {
4553 ECORE_MSG("Good state transition: %d(%d)->%d",
4554 state
, cmd
, next_state
);
4555 o
->next_state
= next_state
;
4556 o
->next_tx_only
= next_tx_only
;
4557 return ECORE_SUCCESS
;
4560 ECORE_MSG("Bad state transition request: %d %d", state
, cmd
);
4566 * ecore_queue_chk_fwd_transition - check state machine of a Forwarding Queue.
4568 * @sc: device handle
4572 * It both checks if the requested command is legal in a current
4573 * state and, if it's legal, sets a `next_state' in the object
4574 * that will be used in the completion flow to set the `state'
4577 * returns 0 if a requested command is a legal transition,
4578 * ECORE_INVAL otherwise.
4580 static int ecore_queue_chk_fwd_transition(struct bnx2x_softc
*sc __rte_unused
,
4581 struct ecore_queue_sp_obj
*o
,
4582 struct ecore_queue_state_params
4585 enum ecore_q_state state
= o
->state
, next_state
= ECORE_Q_STATE_MAX
;
4586 enum ecore_queue_cmd cmd
= params
->cmd
;
4589 case ECORE_Q_STATE_RESET
:
4590 if (cmd
== ECORE_Q_CMD_INIT
)
4591 next_state
= ECORE_Q_STATE_INITIALIZED
;
4594 case ECORE_Q_STATE_INITIALIZED
:
4595 if (cmd
== ECORE_Q_CMD_SETUP_TX_ONLY
) {
4596 if (ECORE_TEST_BIT(ECORE_Q_FLG_ACTIVE
,
4597 ¶ms
->params
.tx_only
.flags
))
4598 next_state
= ECORE_Q_STATE_ACTIVE
;
4600 next_state
= ECORE_Q_STATE_INACTIVE
;
4604 case ECORE_Q_STATE_ACTIVE
:
4605 case ECORE_Q_STATE_INACTIVE
:
4606 if (cmd
== ECORE_Q_CMD_CFC_DEL
)
4607 next_state
= ECORE_Q_STATE_RESET
;
4611 PMD_DRV_LOG(ERR
, "Illegal state: %d", state
);
4614 /* Transition is assured */
4615 if (next_state
!= ECORE_Q_STATE_MAX
) {
4616 ECORE_MSG("Good state transition: %d(%d)->%d",
4617 state
, cmd
, next_state
);
4618 o
->next_state
= next_state
;
4619 return ECORE_SUCCESS
;
4622 ECORE_MSG("Bad state transition request: %d %d", state
, cmd
);
4626 void ecore_init_queue_obj(struct bnx2x_softc
*sc
,
4627 struct ecore_queue_sp_obj
*obj
,
4628 uint8_t cl_id
, uint32_t * cids
, uint8_t cid_cnt
,
4629 uint8_t func_id
, void *rdata
,
4630 ecore_dma_addr_t rdata_mapping
, unsigned long type
)
4632 ECORE_MEMSET(obj
, 0, sizeof(*obj
));
4634 /* We support only ECORE_MULTI_TX_COS Tx CoS at the moment */
4635 ECORE_BUG_ON(ECORE_MULTI_TX_COS
< cid_cnt
);
4637 rte_memcpy(obj
->cids
, cids
, sizeof(obj
->cids
[0]) * cid_cnt
);
4638 obj
->max_cos
= cid_cnt
;
4640 obj
->func_id
= func_id
;
4642 obj
->rdata_mapping
= rdata_mapping
;
4644 obj
->next_state
= ECORE_Q_STATE_MAX
;
4646 if (CHIP_IS_E1x(sc
))
4647 obj
->send_cmd
= ecore_queue_send_cmd_e1x
;
4649 obj
->send_cmd
= ecore_queue_send_cmd_e2
;
4651 if (ECORE_TEST_BIT(ECORE_Q_TYPE_FWD
, &type
))
4652 obj
->check_transition
= ecore_queue_chk_fwd_transition
;
4654 obj
->check_transition
= ecore_queue_chk_transition
;
4656 obj
->complete_cmd
= ecore_queue_comp_cmd
;
4657 obj
->wait_comp
= ecore_queue_wait_comp
;
4658 obj
->set_pending
= ecore_queue_set_pending
;
4661 /********************** Function state object *********************************/
4662 enum ecore_func_state
ecore_func_get_state(__rte_unused
struct bnx2x_softc
*sc
,
4663 struct ecore_func_sp_obj
*o
)
4665 /* in the middle of transaction - return INVALID state */
4667 return ECORE_F_STATE_MAX
;
4669 /* unsure the order of reading of o->pending and o->state
4670 * o->pending should be read first
4677 static int ecore_func_wait_comp(struct bnx2x_softc
*sc
,
4678 struct ecore_func_sp_obj
*o
,
4679 enum ecore_func_cmd cmd
)
4681 return ecore_state_wait(sc
, cmd
, &o
->pending
);
4685 * ecore_func_state_change_comp - complete the state machine transition
4687 * @sc: device handle
4691 * Called on state change transition. Completes the state
4692 * machine transition only - no HW interaction.
4695 ecore_func_state_change_comp(struct bnx2x_softc
*sc __rte_unused
,
4696 struct ecore_func_sp_obj
*o
,
4697 enum ecore_func_cmd cmd
)
4699 unsigned long cur_pending
= o
->pending
;
4701 if (!ECORE_TEST_AND_CLEAR_BIT(cmd
, &cur_pending
)) {
4703 "Bad MC reply %d for func %d in state %d pending 0x%lx, next_state %d",
4704 cmd
, ECORE_FUNC_ID(sc
), o
->state
, cur_pending
,
4709 ECORE_MSG("Completing command %d for func %d, setting state to %d",
4710 cmd
, ECORE_FUNC_ID(sc
), o
->next_state
);
4712 o
->state
= o
->next_state
;
4713 o
->next_state
= ECORE_F_STATE_MAX
;
4715 /* It's important that o->state and o->next_state are
4716 * updated before o->pending.
4720 ECORE_CLEAR_BIT(cmd
, &o
->pending
);
4721 ECORE_SMP_MB_AFTER_CLEAR_BIT();
4723 return ECORE_SUCCESS
;
4727 * ecore_func_comp_cmd - complete the state change command
4729 * @sc: device handle
4733 * Checks that the arrived completion is expected.
4735 static int ecore_func_comp_cmd(struct bnx2x_softc
*sc
,
4736 struct ecore_func_sp_obj
*o
,
4737 enum ecore_func_cmd cmd
)
4739 /* Complete the state machine part first, check if it's a
4742 int rc
= ecore_func_state_change_comp(sc
, o
, cmd
);
4747 * ecore_func_chk_transition - perform function state machine transition
4749 * @sc: device handle
4753 * It both checks if the requested command is legal in a current
4754 * state and, if it's legal, sets a `next_state' in the object
4755 * that will be used in the completion flow to set the `state'
4758 * returns 0 if a requested command is a legal transition,
4759 * ECORE_INVAL otherwise.
4761 static int ecore_func_chk_transition(struct bnx2x_softc
*sc __rte_unused
,
4762 struct ecore_func_sp_obj
*o
,
4763 struct ecore_func_state_params
*params
)
4765 enum ecore_func_state state
= o
->state
, next_state
= ECORE_F_STATE_MAX
;
4766 enum ecore_func_cmd cmd
= params
->cmd
;
4768 /* Forget all pending for completion commands if a driver only state
4769 * transition has been requested.
4771 if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY
, ¶ms
->ramrod_flags
)) {
4773 o
->next_state
= ECORE_F_STATE_MAX
;
4776 /* Don't allow a next state transition if we are in the middle of
4783 case ECORE_F_STATE_RESET
:
4784 if (cmd
== ECORE_F_CMD_HW_INIT
)
4785 next_state
= ECORE_F_STATE_INITIALIZED
;
4788 case ECORE_F_STATE_INITIALIZED
:
4789 if (cmd
== ECORE_F_CMD_START
)
4790 next_state
= ECORE_F_STATE_STARTED
;
4792 else if (cmd
== ECORE_F_CMD_HW_RESET
)
4793 next_state
= ECORE_F_STATE_RESET
;
4796 case ECORE_F_STATE_STARTED
:
4797 if (cmd
== ECORE_F_CMD_STOP
)
4798 next_state
= ECORE_F_STATE_INITIALIZED
;
4799 /* afex ramrods can be sent only in started mode, and only
4800 * if not pending for function_stop ramrod completion
4801 * for these events - next state remained STARTED.
4803 else if ((cmd
== ECORE_F_CMD_AFEX_UPDATE
) &&
4804 (!ECORE_TEST_BIT(ECORE_F_CMD_STOP
, &o
->pending
)))
4805 next_state
= ECORE_F_STATE_STARTED
;
4807 else if ((cmd
== ECORE_F_CMD_AFEX_VIFLISTS
) &&
4808 (!ECORE_TEST_BIT(ECORE_F_CMD_STOP
, &o
->pending
)))
4809 next_state
= ECORE_F_STATE_STARTED
;
4811 /* Switch_update ramrod can be sent in either started or
4812 * tx_stopped state, and it doesn't change the state.
4814 else if ((cmd
== ECORE_F_CMD_SWITCH_UPDATE
) &&
4815 (!ECORE_TEST_BIT(ECORE_F_CMD_STOP
, &o
->pending
)))
4816 next_state
= ECORE_F_STATE_STARTED
;
4818 else if (cmd
== ECORE_F_CMD_TX_STOP
)
4819 next_state
= ECORE_F_STATE_TX_STOPPED
;
4822 case ECORE_F_STATE_TX_STOPPED
:
4823 if ((cmd
== ECORE_F_CMD_SWITCH_UPDATE
) &&
4824 (!ECORE_TEST_BIT(ECORE_F_CMD_STOP
, &o
->pending
)))
4825 next_state
= ECORE_F_STATE_TX_STOPPED
;
4827 else if (cmd
== ECORE_F_CMD_TX_START
)
4828 next_state
= ECORE_F_STATE_STARTED
;
4832 PMD_DRV_LOG(ERR
, "Unknown state: %d", state
);
4835 /* Transition is assured */
4836 if (next_state
!= ECORE_F_STATE_MAX
) {
4837 ECORE_MSG("Good function state transition: %d(%d)->%d",
4838 state
, cmd
, next_state
);
4839 o
->next_state
= next_state
;
4840 return ECORE_SUCCESS
;
4843 ECORE_MSG("Bad function state transition request: %d %d", state
, cmd
);
4849 * ecore_func_init_func - performs HW init at function stage
4851 * @sc: device handle
4854 * Init HW when the current phase is
4855 * FW_MSG_CODE_DRV_LOAD_FUNCTION: initialize only FUNCTION-only
4858 static int ecore_func_init_func(struct bnx2x_softc
*sc
,
4859 const struct ecore_func_sp_drv_ops
*drv
)
4861 return drv
->init_hw_func(sc
);
4865 * ecore_func_init_port - performs HW init at port stage
4867 * @sc: device handle
4870 * Init HW when the current phase is
4871 * FW_MSG_CODE_DRV_LOAD_PORT: initialize PORT-only and
4872 * FUNCTION-only HW blocks.
4875 static int ecore_func_init_port(struct bnx2x_softc
*sc
,
4876 const struct ecore_func_sp_drv_ops
*drv
)
4878 int rc
= drv
->init_hw_port(sc
);
4882 return ecore_func_init_func(sc
, drv
);
4886 * ecore_func_init_cmn_chip - performs HW init at chip-common stage
4888 * @sc: device handle
4891 * Init HW when the current phase is
4892 * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON_CHIP,
4893 * PORT-only and FUNCTION-only HW blocks.
4895 static int ecore_func_init_cmn_chip(struct bnx2x_softc
*sc
, const struct ecore_func_sp_drv_ops
4898 int rc
= drv
->init_hw_cmn_chip(sc
);
4902 return ecore_func_init_port(sc
, drv
);
4906 * ecore_func_init_cmn - performs HW init at common stage
4908 * @sc: device handle
4911 * Init HW when the current phase is
4912 * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON,
4913 * PORT-only and FUNCTION-only HW blocks.
4915 static int ecore_func_init_cmn(struct bnx2x_softc
*sc
,
4916 const struct ecore_func_sp_drv_ops
*drv
)
4918 int rc
= drv
->init_hw_cmn(sc
);
4922 return ecore_func_init_port(sc
, drv
);
4925 static int ecore_func_hw_init(struct bnx2x_softc
*sc
,
4926 struct ecore_func_state_params
*params
)
4928 uint32_t load_code
= params
->params
.hw_init
.load_phase
;
4929 struct ecore_func_sp_obj
*o
= params
->f_obj
;
4930 const struct ecore_func_sp_drv_ops
*drv
= o
->drv
;
4933 ECORE_MSG("function %d load_code %x",
4934 ECORE_ABS_FUNC_ID(sc
), load_code
);
4937 rc
= drv
->init_fw(sc
);
4939 PMD_DRV_LOG(ERR
, "Error loading firmware");
4943 /* Handle the beginning of COMMON_XXX pases separately... */
4944 switch (load_code
) {
4945 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP
:
4946 rc
= ecore_func_init_cmn_chip(sc
, drv
);
4951 case FW_MSG_CODE_DRV_LOAD_COMMON
:
4952 rc
= ecore_func_init_cmn(sc
, drv
);
4957 case FW_MSG_CODE_DRV_LOAD_PORT
:
4958 rc
= ecore_func_init_port(sc
, drv
);
4963 case FW_MSG_CODE_DRV_LOAD_FUNCTION
:
4964 rc
= ecore_func_init_func(sc
, drv
);
4970 PMD_DRV_LOG(ERR
, "Unknown load_code (0x%x) from MCP",
4976 /* In case of success, complete the command immediately: no ramrods
4980 o
->complete_cmd(sc
, o
, ECORE_F_CMD_HW_INIT
);
4986 * ecore_func_reset_func - reset HW at function stage
4988 * @sc: device handle
4991 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_FUNCTION stage: reset only
4992 * FUNCTION-only HW blocks.
4994 static void ecore_func_reset_func(struct bnx2x_softc
*sc
, const struct ecore_func_sp_drv_ops
4997 drv
->reset_hw_func(sc
);
5001 * ecore_func_reset_port - reser HW at port stage
5003 * @sc: device handle
5006 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_PORT stage: reset
5007 * FUNCTION-only and PORT-only HW blocks.
5011 * It's important to call reset_port before reset_func() as the last thing
5012 * reset_func does is pf_disable() thus disabling PGLUE_B, which
5013 * makes impossible any DMAE transactions.
5015 static void ecore_func_reset_port(struct bnx2x_softc
*sc
, const struct ecore_func_sp_drv_ops
5018 drv
->reset_hw_port(sc
);
5019 ecore_func_reset_func(sc
, drv
);
5023 * ecore_func_reset_cmn - reser HW at common stage
5025 * @sc: device handle
5028 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_COMMON and
5029 * FW_MSG_CODE_DRV_UNLOAD_COMMON_CHIP stages: reset COMMON,
5030 * COMMON_CHIP, FUNCTION-only and PORT-only HW blocks.
5032 static void ecore_func_reset_cmn(struct bnx2x_softc
*sc
,
5033 const struct ecore_func_sp_drv_ops
*drv
)
5035 ecore_func_reset_port(sc
, drv
);
5036 drv
->reset_hw_cmn(sc
);
5039 static int ecore_func_hw_reset(struct bnx2x_softc
*sc
,
5040 struct ecore_func_state_params
*params
)
5042 uint32_t reset_phase
= params
->params
.hw_reset
.reset_phase
;
5043 struct ecore_func_sp_obj
*o
= params
->f_obj
;
5044 const struct ecore_func_sp_drv_ops
*drv
= o
->drv
;
5046 ECORE_MSG("function %d reset_phase %x", ECORE_ABS_FUNC_ID(sc
),
5049 switch (reset_phase
) {
5050 case FW_MSG_CODE_DRV_UNLOAD_COMMON
:
5051 ecore_func_reset_cmn(sc
, drv
);
5053 case FW_MSG_CODE_DRV_UNLOAD_PORT
:
5054 ecore_func_reset_port(sc
, drv
);
5056 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION
:
5057 ecore_func_reset_func(sc
, drv
);
5060 PMD_DRV_LOG(ERR
, "Unknown reset_phase (0x%x) from MCP",
5065 /* Complete the command immediately: no ramrods have been sent. */
5066 o
->complete_cmd(sc
, o
, ECORE_F_CMD_HW_RESET
);
5068 return ECORE_SUCCESS
;
5071 static int ecore_func_send_start(struct bnx2x_softc
*sc
,
5072 struct ecore_func_state_params
*params
)
5074 struct ecore_func_sp_obj
*o
= params
->f_obj
;
5075 struct function_start_data
*rdata
=
5076 (struct function_start_data
*)o
->rdata
;
5077 ecore_dma_addr_t data_mapping
= o
->rdata_mapping
;
5078 struct ecore_func_start_params
*start_params
= ¶ms
->params
.start
;
5080 ECORE_MEMSET(rdata
, 0, sizeof(*rdata
));
5082 /* Fill the ramrod data with provided parameters */
5083 rdata
->function_mode
= (uint8_t) start_params
->mf_mode
;
5084 rdata
->sd_vlan_tag
= ECORE_CPU_TO_LE16(start_params
->sd_vlan_tag
);
5085 rdata
->path_id
= ECORE_PATH_ID(sc
);
5086 rdata
->network_cos_mode
= start_params
->network_cos_mode
;
5087 rdata
->gre_tunnel_mode
= start_params
->gre_tunnel_mode
;
5088 rdata
->gre_tunnel_rss
= start_params
->gre_tunnel_rss
;
5091 * No need for an explicit memory barrier here as long we would
5092 * need to ensure the ordering of writing to the SPQ element
5093 * and updating of the SPQ producer which involves a memory
5094 * read and we will have to put a full memory barrier there
5095 * (inside ecore_sp_post()).
5098 return ecore_sp_post(sc
, RAMROD_CMD_ID_COMMON_FUNCTION_START
, 0,
5099 data_mapping
, NONE_CONNECTION_TYPE
);
5102 static int ecore_func_send_switch_update(struct bnx2x_softc
*sc
, struct ecore_func_state_params
5105 struct ecore_func_sp_obj
*o
= params
->f_obj
;
5106 struct function_update_data
*rdata
=
5107 (struct function_update_data
*)o
->rdata
;
5108 ecore_dma_addr_t data_mapping
= o
->rdata_mapping
;
5109 struct ecore_func_switch_update_params
*switch_update_params
=
5110 ¶ms
->params
.switch_update
;
5112 ECORE_MEMSET(rdata
, 0, sizeof(*rdata
));
5114 /* Fill the ramrod data with provided parameters */
5115 rdata
->tx_switch_suspend_change_flg
= 1;
5116 rdata
->tx_switch_suspend
= switch_update_params
->suspend
;
5117 rdata
->echo
= SWITCH_UPDATE
;
5119 return ecore_sp_post(sc
, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE
, 0,
5120 data_mapping
, NONE_CONNECTION_TYPE
);
5123 static int ecore_func_send_afex_update(struct bnx2x_softc
*sc
, struct ecore_func_state_params
5126 struct ecore_func_sp_obj
*o
= params
->f_obj
;
5127 struct function_update_data
*rdata
=
5128 (struct function_update_data
*)o
->afex_rdata
;
5129 ecore_dma_addr_t data_mapping
= o
->afex_rdata_mapping
;
5130 struct ecore_func_afex_update_params
*afex_update_params
=
5131 ¶ms
->params
.afex_update
;
5133 ECORE_MEMSET(rdata
, 0, sizeof(*rdata
));
5135 /* Fill the ramrod data with provided parameters */
5136 rdata
->vif_id_change_flg
= 1;
5137 rdata
->vif_id
= ECORE_CPU_TO_LE16(afex_update_params
->vif_id
);
5138 rdata
->afex_default_vlan_change_flg
= 1;
5139 rdata
->afex_default_vlan
=
5140 ECORE_CPU_TO_LE16(afex_update_params
->afex_default_vlan
);
5141 rdata
->allowed_priorities_change_flg
= 1;
5142 rdata
->allowed_priorities
= afex_update_params
->allowed_priorities
;
5143 rdata
->echo
= AFEX_UPDATE
;
5145 /* No need for an explicit memory barrier here as long we would
5146 * need to ensure the ordering of writing to the SPQ element
5147 * and updating of the SPQ producer which involves a memory
5148 * read and we will have to put a full memory barrier there
5149 * (inside ecore_sp_post()).
5151 ECORE_MSG("afex: sending func_update vif_id 0x%x dvlan 0x%x prio 0x%x",
5153 rdata
->afex_default_vlan
, rdata
->allowed_priorities
);
5155 return ecore_sp_post(sc
, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE
, 0,
5156 data_mapping
, NONE_CONNECTION_TYPE
);
5160 inline int ecore_func_send_afex_viflists(struct bnx2x_softc
*sc
,
5161 struct ecore_func_state_params
*params
)
5163 struct ecore_func_sp_obj
*o
= params
->f_obj
;
5164 struct afex_vif_list_ramrod_data
*rdata
=
5165 (struct afex_vif_list_ramrod_data
*)o
->afex_rdata
;
5166 struct ecore_func_afex_viflists_params
*afex_vif_params
=
5167 ¶ms
->params
.afex_viflists
;
5168 uint64_t *p_rdata
= (uint64_t *) rdata
;
5170 ECORE_MEMSET(rdata
, 0, sizeof(*rdata
));
5172 /* Fill the ramrod data with provided parameters */
5173 rdata
->vif_list_index
=
5174 ECORE_CPU_TO_LE16(afex_vif_params
->vif_list_index
);
5175 rdata
->func_bit_map
= afex_vif_params
->func_bit_map
;
5176 rdata
->afex_vif_list_command
= afex_vif_params
->afex_vif_list_command
;
5177 rdata
->func_to_clear
= afex_vif_params
->func_to_clear
;
5179 /* send in echo type of sub command */
5180 rdata
->echo
= afex_vif_params
->afex_vif_list_command
;
5182 /* No need for an explicit memory barrier here as long we would
5183 * need to ensure the ordering of writing to the SPQ element
5184 * and updating of the SPQ producer which involves a memory
5185 * read and we will have to put a full memory barrier there
5186 * (inside ecore_sp_post()).
5190 ("afex: ramrod lists, cmd 0x%x index 0x%x func_bit_map 0x%x func_to_clr 0x%x",
5191 rdata
->afex_vif_list_command
, rdata
->vif_list_index
,
5192 rdata
->func_bit_map
, rdata
->func_to_clear
);
5194 /* this ramrod sends data directly and not through DMA mapping */
5195 return ecore_sp_post(sc
, RAMROD_CMD_ID_COMMON_AFEX_VIF_LISTS
, 0,
5196 *p_rdata
, NONE_CONNECTION_TYPE
);
5199 static int ecore_func_send_stop(struct bnx2x_softc
*sc
, __rte_unused
struct
5200 ecore_func_state_params
*params
)
5202 return ecore_sp_post(sc
, RAMROD_CMD_ID_COMMON_FUNCTION_STOP
, 0, 0,
5203 NONE_CONNECTION_TYPE
);
5206 static int ecore_func_send_tx_stop(struct bnx2x_softc
*sc
, __rte_unused
struct
5207 ecore_func_state_params
*params
)
5209 return ecore_sp_post(sc
, RAMROD_CMD_ID_COMMON_STOP_TRAFFIC
, 0, 0,
5210 NONE_CONNECTION_TYPE
);
5213 static int ecore_func_send_tx_start(struct bnx2x_softc
*sc
, struct ecore_func_state_params
5216 struct ecore_func_sp_obj
*o
= params
->f_obj
;
5217 struct flow_control_configuration
*rdata
=
5218 (struct flow_control_configuration
*)o
->rdata
;
5219 ecore_dma_addr_t data_mapping
= o
->rdata_mapping
;
5220 struct ecore_func_tx_start_params
*tx_start_params
=
5221 ¶ms
->params
.tx_start
;
5224 ECORE_MEMSET(rdata
, 0, sizeof(*rdata
));
5226 rdata
->dcb_enabled
= tx_start_params
->dcb_enabled
;
5227 rdata
->dcb_version
= tx_start_params
->dcb_version
;
5228 rdata
->dont_add_pri_0
= tx_start_params
->dont_add_pri_0
;
5230 for (i
= 0; i
< ARRAY_SIZE(rdata
->traffic_type_to_priority_cos
); i
++)
5231 rdata
->traffic_type_to_priority_cos
[i
] =
5232 tx_start_params
->traffic_type_to_priority_cos
[i
];
5234 return ecore_sp_post(sc
, RAMROD_CMD_ID_COMMON_START_TRAFFIC
, 0,
5235 data_mapping
, NONE_CONNECTION_TYPE
);
5238 static int ecore_func_send_cmd(struct bnx2x_softc
*sc
,
5239 struct ecore_func_state_params
*params
)
5241 switch (params
->cmd
) {
5242 case ECORE_F_CMD_HW_INIT
:
5243 return ecore_func_hw_init(sc
, params
);
5244 case ECORE_F_CMD_START
:
5245 return ecore_func_send_start(sc
, params
);
5246 case ECORE_F_CMD_STOP
:
5247 return ecore_func_send_stop(sc
, params
);
5248 case ECORE_F_CMD_HW_RESET
:
5249 return ecore_func_hw_reset(sc
, params
);
5250 case ECORE_F_CMD_AFEX_UPDATE
:
5251 return ecore_func_send_afex_update(sc
, params
);
5252 case ECORE_F_CMD_AFEX_VIFLISTS
:
5253 return ecore_func_send_afex_viflists(sc
, params
);
5254 case ECORE_F_CMD_TX_STOP
:
5255 return ecore_func_send_tx_stop(sc
, params
);
5256 case ECORE_F_CMD_TX_START
:
5257 return ecore_func_send_tx_start(sc
, params
);
5258 case ECORE_F_CMD_SWITCH_UPDATE
:
5259 return ecore_func_send_switch_update(sc
, params
);
5261 PMD_DRV_LOG(ERR
, "Unknown command: %d", params
->cmd
);
5266 void ecore_init_func_obj(__rte_unused
struct bnx2x_softc
*sc
,
5267 struct ecore_func_sp_obj
*obj
,
5268 void *rdata
, ecore_dma_addr_t rdata_mapping
,
5269 void *afex_rdata
, ecore_dma_addr_t afex_rdata_mapping
,
5270 struct ecore_func_sp_drv_ops
*drv_iface
)
5272 ECORE_MEMSET(obj
, 0, sizeof(*obj
));
5274 ECORE_MUTEX_INIT(&obj
->one_pending_mutex
);
5277 obj
->rdata_mapping
= rdata_mapping
;
5278 obj
->afex_rdata
= afex_rdata
;
5279 obj
->afex_rdata_mapping
= afex_rdata_mapping
;
5280 obj
->send_cmd
= ecore_func_send_cmd
;
5281 obj
->check_transition
= ecore_func_chk_transition
;
5282 obj
->complete_cmd
= ecore_func_comp_cmd
;
5283 obj
->wait_comp
= ecore_func_wait_comp
;
5284 obj
->drv
= drv_iface
;
5288 * ecore_func_state_change - perform Function state change transition
5290 * @sc: device handle
5291 * @params: parameters to perform the transaction
5293 * returns 0 in case of successfully completed transition,
5294 * negative error code in case of failure, positive
5295 * (EBUSY) value if there is a completion to that is
5296 * still pending (possible only if RAMROD_COMP_WAIT is
5297 * not set in params->ramrod_flags for asynchronous
5300 int ecore_func_state_change(struct bnx2x_softc
*sc
,
5301 struct ecore_func_state_params
*params
)
5303 struct ecore_func_sp_obj
*o
= params
->f_obj
;
5305 enum ecore_func_cmd cmd
= params
->cmd
;
5306 unsigned long *pending
= &o
->pending
;
5308 ECORE_MUTEX_LOCK(&o
->one_pending_mutex
);
5310 /* Check that the requested transition is legal */
5311 rc
= o
->check_transition(sc
, o
, params
);
5312 if ((rc
== ECORE_BUSY
) &&
5313 (ECORE_TEST_BIT(RAMROD_RETRY
, ¶ms
->ramrod_flags
))) {
5314 while ((rc
== ECORE_BUSY
) && (--cnt
> 0)) {
5315 ECORE_MUTEX_UNLOCK(&o
->one_pending_mutex
);
5317 ECORE_MUTEX_LOCK(&o
->one_pending_mutex
);
5318 rc
= o
->check_transition(sc
, o
, params
);
5320 if (rc
== ECORE_BUSY
) {
5321 ECORE_MUTEX_UNLOCK(&o
->one_pending_mutex
);
5323 "timeout waiting for previous ramrod completion");
5327 ECORE_MUTEX_UNLOCK(&o
->one_pending_mutex
);
5331 /* Set "pending" bit */
5332 ECORE_SET_BIT(cmd
, pending
);
5334 /* Don't send a command if only driver cleanup was requested */
5335 if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY
, ¶ms
->ramrod_flags
)) {
5336 ecore_func_state_change_comp(sc
, o
, cmd
);
5337 ECORE_MUTEX_UNLOCK(&o
->one_pending_mutex
);
5340 rc
= o
->send_cmd(sc
, params
);
5342 ECORE_MUTEX_UNLOCK(&o
->one_pending_mutex
);
5345 o
->next_state
= ECORE_F_STATE_MAX
;
5346 ECORE_CLEAR_BIT(cmd
, pending
);
5347 ECORE_SMP_MB_AFTER_CLEAR_BIT();
5351 if (ECORE_TEST_BIT(RAMROD_COMP_WAIT
, ¶ms
->ramrod_flags
)) {
5352 rc
= o
->wait_comp(sc
, o
, cmd
);
5356 return ECORE_SUCCESS
;
5360 return ECORE_RET_PENDING(cmd
, pending
);
5363 /******************************************************************************
5365 * Calculates crc 8 on a word value: polynomial 0-1-2-8
5366 * Code was translated from Verilog.
5368 *****************************************************************************/
5369 uint8_t ecore_calc_crc8(uint32_t data
, uint8_t crc
)
5377 /* split the data into 31 bits */
5378 for (i
= 0; i
< 32; i
++) {
5379 D
[i
] = (uint8_t) (data
& 1);
5383 /* split the crc into 8 bits */
5384 for (i
= 0; i
< 8; i
++) {
5389 NewCRC
[0] = D
[31] ^ D
[30] ^ D
[28] ^ D
[23] ^ D
[21] ^ D
[19] ^ D
[18] ^
5390 D
[16] ^ D
[14] ^ D
[12] ^ D
[8] ^ D
[7] ^ D
[6] ^ D
[0] ^ C
[4] ^
5392 NewCRC
[1] = D
[30] ^ D
[29] ^ D
[28] ^ D
[24] ^ D
[23] ^ D
[22] ^ D
[21] ^
5393 D
[20] ^ D
[18] ^ D
[17] ^ D
[16] ^ D
[15] ^ D
[14] ^ D
[13] ^
5394 D
[12] ^ D
[9] ^ D
[6] ^ D
[1] ^ D
[0] ^ C
[0] ^ C
[4] ^ C
[5] ^ C
[6];
5395 NewCRC
[2] = D
[29] ^ D
[28] ^ D
[25] ^ D
[24] ^ D
[22] ^ D
[17] ^ D
[15] ^
5396 D
[13] ^ D
[12] ^ D
[10] ^ D
[8] ^ D
[6] ^ D
[2] ^ D
[1] ^ D
[0] ^
5397 C
[0] ^ C
[1] ^ C
[4] ^ C
[5];
5398 NewCRC
[3] = D
[30] ^ D
[29] ^ D
[26] ^ D
[25] ^ D
[23] ^ D
[18] ^ D
[16] ^
5399 D
[14] ^ D
[13] ^ D
[11] ^ D
[9] ^ D
[7] ^ D
[3] ^ D
[2] ^ D
[1] ^
5400 C
[1] ^ C
[2] ^ C
[5] ^ C
[6];
5401 NewCRC
[4] = D
[31] ^ D
[30] ^ D
[27] ^ D
[26] ^ D
[24] ^ D
[19] ^ D
[17] ^
5402 D
[15] ^ D
[14] ^ D
[12] ^ D
[10] ^ D
[8] ^ D
[4] ^ D
[3] ^ D
[2] ^
5403 C
[0] ^ C
[2] ^ C
[3] ^ C
[6] ^ C
[7];
5404 NewCRC
[5] = D
[31] ^ D
[28] ^ D
[27] ^ D
[25] ^ D
[20] ^ D
[18] ^ D
[16] ^
5405 D
[15] ^ D
[13] ^ D
[11] ^ D
[9] ^ D
[5] ^ D
[4] ^ D
[3] ^ C
[1] ^
5407 NewCRC
[6] = D
[29] ^ D
[28] ^ D
[26] ^ D
[21] ^ D
[19] ^ D
[17] ^ D
[16] ^
5408 D
[14] ^ D
[12] ^ D
[10] ^ D
[6] ^ D
[5] ^ D
[4] ^ C
[2] ^ C
[4] ^ C
[5];
5409 NewCRC
[7] = D
[30] ^ D
[29] ^ D
[27] ^ D
[22] ^ D
[20] ^ D
[18] ^ D
[17] ^
5410 D
[15] ^ D
[13] ^ D
[11] ^ D
[7] ^ D
[6] ^ D
[5] ^ C
[3] ^ C
[5] ^ C
[6];
5413 for (i
= 0; i
< 8; i
++) {
5414 crc_res
|= (NewCRC
[i
] << i
);
5421 ecore_calc_crc32(uint32_t crc
, uint8_t const *p
, uint32_t len
, uint32_t magic
)
5426 for (i
= 0; i
< 8; i
++)
5427 crc
= (crc
>> 1) ^ ((crc
& 1) ? magic
: 0);