2 * Copyright (c) 2016 QLogic Corporation.
6 * See LICENSE.qede_pmd for copyright and licensing details.
11 #include "ecore_gtt_reg_addr.h"
12 #include "ecore_hsi_common.h"
14 #include "ecore_sp_api.h"
15 #include "ecore_spq.h"
16 #include "ecore_iro.h"
17 #include "ecore_init_fw_funcs.h"
18 #include "ecore_cxt.h"
19 #include "ecore_int.h"
20 #include "ecore_dev_api.h"
21 #include "ecore_mcp.h"
23 #include "ecore_sriov.h"
25 /***************************************************************************
26 * Structures & Definitions
27 ***************************************************************************/
29 #define SPQ_HIGH_PRI_RESERVE_DEFAULT (1)
31 #define SPQ_BLOCK_DELAY_MAX_ITER (10)
32 #define SPQ_BLOCK_DELAY_US (10)
33 #define SPQ_BLOCK_SLEEP_MAX_ITER (1000)
34 #define SPQ_BLOCK_SLEEP_MS (5)
36 /***************************************************************************
37 * Blocking Imp. (BLOCK/EBLOCK mode)
38 ***************************************************************************/
39 static void ecore_spq_blocking_cb(struct ecore_hwfn
*p_hwfn
,
41 union event_ring_data
*data
,
44 struct ecore_spq_comp_done
*comp_done
;
46 comp_done
= (struct ecore_spq_comp_done
*)cookie
;
48 comp_done
->done
= 0x1;
49 comp_done
->fw_return_code
= fw_return_code
;
51 /* make update visible to waiting thread */
52 OSAL_SMP_WMB(p_hwfn
->p_dev
);
55 static enum _ecore_status_t
__ecore_spq_block(struct ecore_hwfn
*p_hwfn
,
56 struct ecore_spq_entry
*p_ent
,
58 bool sleep_between_iter
)
60 struct ecore_spq_comp_done
*comp_done
;
63 comp_done
= (struct ecore_spq_comp_done
*)p_ent
->comp_cb
.cookie
;
64 iter_cnt
= sleep_between_iter
? SPQ_BLOCK_SLEEP_MAX_ITER
65 : SPQ_BLOCK_DELAY_MAX_ITER
;
68 OSAL_POLL_MODE_DPC(p_hwfn
);
69 OSAL_SMP_RMB(p_hwfn
->p_dev
);
70 if (comp_done
->done
== 1) {
72 *p_fw_ret
= comp_done
->fw_return_code
;
76 if (sleep_between_iter
)
77 OSAL_MSLEEP(SPQ_BLOCK_SLEEP_MS
);
79 OSAL_UDELAY(SPQ_BLOCK_DELAY_US
);
85 static enum _ecore_status_t
ecore_spq_block(struct ecore_hwfn
*p_hwfn
,
86 struct ecore_spq_entry
*p_ent
,
87 u8
*p_fw_ret
, bool skip_quick_poll
)
89 struct ecore_spq_comp_done
*comp_done
;
90 enum _ecore_status_t rc
;
92 /* A relatively short polling period w/o sleeping, to allow the FW to
93 * complete the ramrod and thus possibly to avoid the following sleeps.
95 if (!skip_quick_poll
) {
96 rc
= __ecore_spq_block(p_hwfn
, p_ent
, p_fw_ret
, false);
97 if (rc
== ECORE_SUCCESS
)
101 /* Move to polling with a sleeping period between iterations */
102 rc
= __ecore_spq_block(p_hwfn
, p_ent
, p_fw_ret
, true);
103 if (rc
== ECORE_SUCCESS
)
104 return ECORE_SUCCESS
;
106 DP_INFO(p_hwfn
, "Ramrod is stuck, requesting MCP drain\n");
107 rc
= ecore_mcp_drain(p_hwfn
, p_hwfn
->p_main_ptt
);
108 if (rc
!= ECORE_SUCCESS
) {
109 DP_NOTICE(p_hwfn
, true, "MCP drain failed\n");
113 /* Retry after drain */
114 rc
= __ecore_spq_block(p_hwfn
, p_ent
, p_fw_ret
, true);
115 if (rc
== ECORE_SUCCESS
)
116 return ECORE_SUCCESS
;
118 comp_done
= (struct ecore_spq_comp_done
*)p_ent
->comp_cb
.cookie
;
119 if (comp_done
->done
== 1) {
121 *p_fw_ret
= comp_done
->fw_return_code
;
122 return ECORE_SUCCESS
;
125 DP_NOTICE(p_hwfn
, true,
126 "Ramrod is stuck [CID %08x cmd %02x proto %02x echo %04x]\n",
127 OSAL_LE32_TO_CPU(p_ent
->elem
.hdr
.cid
),
128 p_ent
->elem
.hdr
.cmd_id
, p_ent
->elem
.hdr
.protocol_id
,
129 OSAL_LE16_TO_CPU(p_ent
->elem
.hdr
.echo
));
131 ecore_hw_err_notify(p_hwfn
, ECORE_HW_ERR_RAMROD_FAIL
);
136 /***************************************************************************
137 * SPQ entries inner API
138 ***************************************************************************/
139 static enum _ecore_status_t
140 ecore_spq_fill_entry(struct ecore_hwfn
*p_hwfn
, struct ecore_spq_entry
*p_ent
)
144 switch (p_ent
->comp_mode
) {
145 case ECORE_SPQ_MODE_EBLOCK
:
146 case ECORE_SPQ_MODE_BLOCK
:
147 p_ent
->comp_cb
.function
= ecore_spq_blocking_cb
;
149 case ECORE_SPQ_MODE_CB
:
152 DP_NOTICE(p_hwfn
, true, "Unknown SPQE completion mode %d\n",
157 DP_VERBOSE(p_hwfn
, ECORE_MSG_SPQ
,
158 "Ramrod header: [CID 0x%08x CMD 0x%02x protocol 0x%02x]"
159 " Data pointer: [%08x:%08x] Completion Mode: %s\n",
160 p_ent
->elem
.hdr
.cid
, p_ent
->elem
.hdr
.cmd_id
,
161 p_ent
->elem
.hdr
.protocol_id
,
162 p_ent
->elem
.data_ptr
.hi
, p_ent
->elem
.data_ptr
.lo
,
163 D_TRINE(p_ent
->comp_mode
, ECORE_SPQ_MODE_EBLOCK
,
164 ECORE_SPQ_MODE_BLOCK
, "MODE_EBLOCK", "MODE_BLOCK",
167 return ECORE_SUCCESS
;
170 /***************************************************************************
172 ***************************************************************************/
173 static void ecore_spq_hw_initialize(struct ecore_hwfn
*p_hwfn
,
174 struct ecore_spq
*p_spq
)
176 struct ecore_cxt_info cxt_info
;
177 struct core_conn_context
*p_cxt
;
178 enum _ecore_status_t rc
;
181 cxt_info
.iid
= p_spq
->cid
;
183 rc
= ecore_cxt_get_cid_info(p_hwfn
, &cxt_info
);
186 DP_NOTICE(p_hwfn
, true, "Cannot find context info for cid=%d\n",
191 p_cxt
= cxt_info
.p_cxt
;
193 /* @@@TBD we zero the context until we have ilt_reset implemented. */
194 OSAL_MEM_ZERO(p_cxt
, sizeof(*p_cxt
));
196 if (ECORE_IS_BB(p_hwfn
->p_dev
) || ECORE_IS_AH(p_hwfn
->p_dev
)) {
197 SET_FIELD(p_cxt
->xstorm_ag_context
.flags10
,
198 E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN
, 1);
199 SET_FIELD(p_cxt
->xstorm_ag_context
.flags1
,
200 E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE
, 1);
201 /* SET_FIELD(p_cxt->xstorm_ag_context.flags10,
202 * E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN, 1);
204 SET_FIELD(p_cxt
->xstorm_ag_context
.flags9
,
205 E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN
, 1);
208 /* CDU validation - FIXME currently disabled */
210 /* QM physical queue */
211 physical_q
= ecore_get_cm_pq_idx(p_hwfn
, PQ_FLAGS_LB
);
212 p_cxt
->xstorm_ag_context
.physical_q0
= OSAL_CPU_TO_LE16(physical_q
);
214 p_cxt
->xstorm_st_context
.spq_base_lo
=
215 DMA_LO_LE(p_spq
->chain
.p_phys_addr
);
216 p_cxt
->xstorm_st_context
.spq_base_hi
=
217 DMA_HI_LE(p_spq
->chain
.p_phys_addr
);
219 DMA_REGPAIR_LE(p_cxt
->xstorm_st_context
.consolid_base_addr
,
220 p_hwfn
->p_consq
->chain
.p_phys_addr
);
223 static enum _ecore_status_t
ecore_spq_hw_post(struct ecore_hwfn
*p_hwfn
,
224 struct ecore_spq
*p_spq
,
225 struct ecore_spq_entry
*p_ent
)
227 struct ecore_chain
*p_chain
= &p_hwfn
->p_spq
->chain
;
228 u16 echo
= ecore_chain_get_prod_idx(p_chain
);
229 struct slow_path_element
*elem
;
230 struct core_db_data db
;
232 p_ent
->elem
.hdr
.echo
= OSAL_CPU_TO_LE16(echo
);
233 elem
= ecore_chain_produce(p_chain
);
235 DP_NOTICE(p_hwfn
, true, "Failed to produce from SPQ chain\n");
239 *elem
= p_ent
->elem
; /* struct assignment */
241 /* send a doorbell on the slow hwfn session */
242 OSAL_MEMSET(&db
, 0, sizeof(db
));
243 SET_FIELD(db
.params
, CORE_DB_DATA_DEST
, DB_DEST_XCM
);
244 SET_FIELD(db
.params
, CORE_DB_DATA_AGG_CMD
, DB_AGG_CMD_SET
);
245 SET_FIELD(db
.params
, CORE_DB_DATA_AGG_VAL_SEL
,
246 DQ_XCM_CORE_SPQ_PROD_CMD
);
247 db
.agg_flags
= DQ_XCM_CORE_DQ_CF_CMD
;
248 db
.spq_prod
= OSAL_CPU_TO_LE16(ecore_chain_get_prod_idx(p_chain
));
250 /* make sure the SPQE is updated before the doorbell */
251 OSAL_WMB(p_hwfn
->p_dev
);
253 DOORBELL(p_hwfn
, DB_ADDR(p_spq
->cid
, DQ_DEMS_LEGACY
),
256 /* make sure doorbell is rang */
257 OSAL_WMB(p_hwfn
->p_dev
);
259 DP_VERBOSE(p_hwfn
, ECORE_MSG_SPQ
,
260 "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x"
261 " agg_params: %02x, prod: %04x\n",
262 DB_ADDR(p_spq
->cid
, DQ_DEMS_LEGACY
), p_spq
->cid
, db
.params
,
263 db
.agg_flags
, ecore_chain_get_prod_idx(p_chain
));
265 return ECORE_SUCCESS
;
268 /***************************************************************************
269 * Asynchronous events
270 ***************************************************************************/
272 static enum _ecore_status_t
273 ecore_async_event_completion(struct ecore_hwfn
*p_hwfn
,
274 struct event_ring_entry
*p_eqe
)
276 switch (p_eqe
->protocol_id
) {
277 case PROTOCOLID_COMMON
:
278 return ecore_sriov_eqe_event(p_hwfn
,
280 p_eqe
->echo
, &p_eqe
->data
);
283 true, "Unknown Async completion for protocol: %d\n",
289 /***************************************************************************
291 ***************************************************************************/
292 void ecore_eq_prod_update(struct ecore_hwfn
*p_hwfn
, u16 prod
)
294 u32 addr
= GTT_BAR0_MAP_REG_USDM_RAM
+
295 USTORM_EQE_CONS_OFFSET(p_hwfn
->rel_pf_id
);
297 REG_WR16(p_hwfn
, addr
, prod
);
299 /* keep prod updates ordered */
300 OSAL_MMIOWB(p_hwfn
->p_dev
);
303 enum _ecore_status_t
ecore_eq_completion(struct ecore_hwfn
*p_hwfn
,
306 struct ecore_eq
*p_eq
= cookie
;
307 struct ecore_chain
*p_chain
= &p_eq
->chain
;
308 enum _ecore_status_t rc
= 0;
310 /* take a snapshot of the FW consumer */
311 u16 fw_cons_idx
= OSAL_LE16_TO_CPU(*p_eq
->p_fw_cons
);
313 DP_VERBOSE(p_hwfn
, ECORE_MSG_SPQ
, "fw_cons_idx %x\n", fw_cons_idx
);
315 /* Need to guarantee the fw_cons index we use points to a usuable
316 * element (to comply with our chain), so our macros would comply
318 if ((fw_cons_idx
& ecore_chain_get_usable_per_page(p_chain
)) ==
319 ecore_chain_get_usable_per_page(p_chain
)) {
320 fw_cons_idx
+= ecore_chain_get_unusable_per_page(p_chain
);
323 /* Complete current segment of eq entries */
324 while (fw_cons_idx
!= ecore_chain_get_cons_idx(p_chain
)) {
325 struct event_ring_entry
*p_eqe
= ecore_chain_consume(p_chain
);
331 DP_VERBOSE(p_hwfn
, ECORE_MSG_SPQ
,
332 "op %x prot %x res0 %x echo %x fwret %x flags %x\n",
333 p_eqe
->opcode
, /* Event Opcode */
334 p_eqe
->protocol_id
, /* Event Protocol ID */
335 p_eqe
->reserved0
, /* Reserved */
336 /* Echo value from ramrod data on the host */
337 OSAL_LE16_TO_CPU(p_eqe
->echo
),
338 p_eqe
->fw_return_code
, /* FW return code for SP
343 if (GET_FIELD(p_eqe
->flags
, EVENT_RING_ENTRY_ASYNC
)) {
344 if (ecore_async_event_completion(p_hwfn
, p_eqe
))
346 } else if (ecore_spq_completion(p_hwfn
,
348 p_eqe
->fw_return_code
,
353 ecore_chain_recycle_consumed(p_chain
);
356 ecore_eq_prod_update(p_hwfn
, ecore_chain_get_prod_idx(p_chain
));
361 enum _ecore_status_t
ecore_eq_alloc(struct ecore_hwfn
*p_hwfn
, u16 num_elem
)
363 struct ecore_eq
*p_eq
;
365 /* Allocate EQ struct */
366 p_eq
= OSAL_ZALLOC(p_hwfn
->p_dev
, GFP_KERNEL
, sizeof(*p_eq
));
368 DP_NOTICE(p_hwfn
, true,
369 "Failed to allocate `struct ecore_eq'\n");
373 /* Allocate and initialize EQ chain*/
374 if (ecore_chain_alloc(p_hwfn
->p_dev
,
375 ECORE_CHAIN_USE_TO_PRODUCE
,
376 ECORE_CHAIN_MODE_PBL
,
377 ECORE_CHAIN_CNT_TYPE_U16
,
379 sizeof(union event_ring_element
),
380 &p_eq
->chain
, OSAL_NULL
) != ECORE_SUCCESS
) {
381 DP_NOTICE(p_hwfn
, true, "Failed to allocate eq chain\n");
382 goto eq_allocate_fail
;
385 /* register EQ completion on the SP SB */
386 ecore_int_register_cb(p_hwfn
, ecore_eq_completion
,
387 p_eq
, &p_eq
->eq_sb_index
, &p_eq
->p_fw_cons
);
390 return ECORE_SUCCESS
;
393 OSAL_FREE(p_hwfn
->p_dev
, p_eq
);
397 void ecore_eq_setup(struct ecore_hwfn
*p_hwfn
)
399 ecore_chain_reset(&p_hwfn
->p_eq
->chain
);
402 void ecore_eq_free(struct ecore_hwfn
*p_hwfn
)
407 ecore_chain_free(p_hwfn
->p_dev
, &p_hwfn
->p_eq
->chain
);
409 OSAL_FREE(p_hwfn
->p_dev
, p_hwfn
->p_eq
);
410 p_hwfn
->p_eq
= OSAL_NULL
;
413 /***************************************************************************
414 * CQE API - manipulate EQ functionality
415 ***************************************************************************/
416 static enum _ecore_status_t
ecore_cqe_completion(struct ecore_hwfn
*p_hwfn
,
417 struct eth_slow_path_rx_cqe
419 enum protocol_type protocol
)
421 if (IS_VF(p_hwfn
->p_dev
))
422 return OSAL_VF_CQE_COMPLETION(p_hwfn
, cqe
, protocol
);
424 /* @@@tmp - it's possible we'll eventually want to handle some
425 * actual commands that can arrive here, but for now this is only
426 * used to complete the ramrod using the echo value on the cqe
428 return ecore_spq_completion(p_hwfn
, cqe
->echo
, 0, OSAL_NULL
);
431 enum _ecore_status_t
ecore_eth_cqe_completion(struct ecore_hwfn
*p_hwfn
,
432 struct eth_slow_path_rx_cqe
*cqe
)
434 enum _ecore_status_t rc
;
436 rc
= ecore_cqe_completion(p_hwfn
, cqe
, PROTOCOLID_ETH
);
438 DP_NOTICE(p_hwfn
, true,
439 "Failed to handle RXQ CQE [cmd 0x%02x]\n",
446 /***************************************************************************
447 * Slow hwfn Queue (spq)
448 ***************************************************************************/
449 void ecore_spq_setup(struct ecore_hwfn
*p_hwfn
)
451 struct ecore_spq
*p_spq
= p_hwfn
->p_spq
;
452 struct ecore_spq_entry
*p_virt
= OSAL_NULL
;
453 dma_addr_t p_phys
= 0;
456 OSAL_LIST_INIT(&p_spq
->pending
);
457 OSAL_LIST_INIT(&p_spq
->completion_pending
);
458 OSAL_LIST_INIT(&p_spq
->free_pool
);
459 OSAL_LIST_INIT(&p_spq
->unlimited_pending
);
460 OSAL_SPIN_LOCK_INIT(&p_spq
->lock
);
463 p_phys
= p_spq
->p_phys
+ OFFSETOF(struct ecore_spq_entry
, ramrod
);
464 p_virt
= p_spq
->p_virt
;
466 capacity
= ecore_chain_get_capacity(&p_spq
->chain
);
467 for (i
= 0; i
< capacity
; i
++) {
468 DMA_REGPAIR_LE(p_virt
->elem
.data_ptr
, p_phys
);
470 OSAL_LIST_PUSH_TAIL(&p_virt
->list
, &p_spq
->free_pool
);
473 p_phys
+= sizeof(struct ecore_spq_entry
);
477 p_spq
->normal_count
= 0;
478 p_spq
->comp_count
= 0;
479 p_spq
->comp_sent_count
= 0;
480 p_spq
->unlimited_pending_count
= 0;
482 OSAL_MEM_ZERO(p_spq
->p_comp_bitmap
,
483 SPQ_COMP_BMAP_SIZE
* sizeof(unsigned long));
484 p_spq
->comp_bitmap_idx
= 0;
486 /* SPQ cid, cannot fail */
487 ecore_cxt_acquire_cid(p_hwfn
, PROTOCOLID_CORE
, &p_spq
->cid
);
488 ecore_spq_hw_initialize(p_hwfn
, p_spq
);
490 /* reset the chain itself */
491 ecore_chain_reset(&p_spq
->chain
);
494 enum _ecore_status_t
ecore_spq_alloc(struct ecore_hwfn
*p_hwfn
)
496 struct ecore_spq_entry
*p_virt
= OSAL_NULL
;
497 struct ecore_spq
*p_spq
= OSAL_NULL
;
498 dma_addr_t p_phys
= 0;
503 OSAL_ZALLOC(p_hwfn
->p_dev
, GFP_KERNEL
, sizeof(struct ecore_spq
));
505 DP_NOTICE(p_hwfn
, true,
506 "Failed to allocate `struct ecore_spq'\n");
511 if (ecore_chain_alloc(p_hwfn
->p_dev
,
512 ECORE_CHAIN_USE_TO_PRODUCE
,
513 ECORE_CHAIN_MODE_SINGLE
,
514 ECORE_CHAIN_CNT_TYPE_U16
,
515 0, /* N/A when the mode is SINGLE */
516 sizeof(struct slow_path_element
),
517 &p_spq
->chain
, OSAL_NULL
)) {
518 DP_NOTICE(p_hwfn
, true, "Failed to allocate spq chain\n");
519 goto spq_allocate_fail
;
522 /* allocate and fill the SPQ elements (incl. ramrod data list) */
523 capacity
= ecore_chain_get_capacity(&p_spq
->chain
);
524 p_virt
= OSAL_DMA_ALLOC_COHERENT(p_hwfn
->p_dev
, &p_phys
,
526 sizeof(struct ecore_spq_entry
));
528 goto spq_allocate_fail
;
530 p_spq
->p_virt
= p_virt
;
531 p_spq
->p_phys
= p_phys
;
533 OSAL_SPIN_LOCK_ALLOC(p_hwfn
, &p_spq
->lock
);
535 p_hwfn
->p_spq
= p_spq
;
536 return ECORE_SUCCESS
;
539 ecore_chain_free(p_hwfn
->p_dev
, &p_spq
->chain
);
540 OSAL_FREE(p_hwfn
->p_dev
, p_spq
);
544 void ecore_spq_free(struct ecore_hwfn
*p_hwfn
)
546 struct ecore_spq
*p_spq
= p_hwfn
->p_spq
;
553 capacity
= ecore_chain_get_capacity(&p_spq
->chain
);
554 OSAL_DMA_FREE_COHERENT(p_hwfn
->p_dev
,
558 sizeof(struct ecore_spq_entry
));
561 ecore_chain_free(p_hwfn
->p_dev
, &p_spq
->chain
);
562 OSAL_SPIN_LOCK_DEALLOC(&p_spq
->lock
);
563 OSAL_FREE(p_hwfn
->p_dev
, p_spq
);
567 ecore_spq_get_entry(struct ecore_hwfn
*p_hwfn
, struct ecore_spq_entry
**pp_ent
)
569 struct ecore_spq
*p_spq
= p_hwfn
->p_spq
;
570 struct ecore_spq_entry
*p_ent
= OSAL_NULL
;
571 enum _ecore_status_t rc
= ECORE_SUCCESS
;
573 OSAL_SPIN_LOCK(&p_spq
->lock
);
575 if (OSAL_LIST_IS_EMPTY(&p_spq
->free_pool
)) {
576 p_ent
= OSAL_ZALLOC(p_hwfn
->p_dev
, GFP_ATOMIC
, sizeof(*p_ent
));
578 DP_NOTICE(p_hwfn
, true,
579 "Failed to allocate an SPQ entry for a pending"
584 p_ent
->queue
= &p_spq
->unlimited_pending
;
586 p_ent
= OSAL_LIST_FIRST_ENTRY(&p_spq
->free_pool
,
587 struct ecore_spq_entry
, list
);
588 OSAL_LIST_REMOVE_ENTRY(&p_ent
->list
, &p_spq
->free_pool
);
589 p_ent
->queue
= &p_spq
->pending
;
595 OSAL_SPIN_UNLOCK(&p_spq
->lock
);
599 /* Locked variant; Should be called while the SPQ lock is taken */
600 static void __ecore_spq_return_entry(struct ecore_hwfn
*p_hwfn
,
601 struct ecore_spq_entry
*p_ent
)
603 OSAL_LIST_PUSH_TAIL(&p_ent
->list
, &p_hwfn
->p_spq
->free_pool
);
606 void ecore_spq_return_entry(struct ecore_hwfn
*p_hwfn
,
607 struct ecore_spq_entry
*p_ent
)
609 OSAL_SPIN_LOCK(&p_hwfn
->p_spq
->lock
);
610 __ecore_spq_return_entry(p_hwfn
, p_ent
);
611 OSAL_SPIN_UNLOCK(&p_hwfn
->p_spq
->lock
);
615 * @brief ecore_spq_add_entry - adds a new entry to the pending
616 * list. Should be used while lock is being held.
618 * Addes an entry to the pending list is there is room (en empty
619 * element is available in the free_pool), or else places the
620 * entry in the unlimited_pending pool.
626 * @return enum _ecore_status_t
628 static enum _ecore_status_t
629 ecore_spq_add_entry(struct ecore_hwfn
*p_hwfn
,
630 struct ecore_spq_entry
*p_ent
, enum spq_priority priority
)
632 struct ecore_spq
*p_spq
= p_hwfn
->p_spq
;
634 if (p_ent
->queue
== &p_spq
->unlimited_pending
) {
635 if (OSAL_LIST_IS_EMPTY(&p_spq
->free_pool
)) {
636 OSAL_LIST_PUSH_TAIL(&p_ent
->list
,
637 &p_spq
->unlimited_pending
);
638 p_spq
->unlimited_pending_count
++;
640 return ECORE_SUCCESS
;
643 struct ecore_spq_entry
*p_en2
;
645 p_en2
= OSAL_LIST_FIRST_ENTRY(&p_spq
->free_pool
,
646 struct ecore_spq_entry
,
648 OSAL_LIST_REMOVE_ENTRY(&p_en2
->list
, &p_spq
->free_pool
);
650 /* Copy the ring element physical pointer to the new
651 * entry, since we are about to override the entire ring
652 * entry and don't want to lose the pointer.
654 p_ent
->elem
.data_ptr
= p_en2
->elem
.data_ptr
;
658 /* EBLOCK responsible to free the allocated p_ent */
659 if (p_ent
->comp_mode
!= ECORE_SPQ_MODE_EBLOCK
)
660 OSAL_FREE(p_hwfn
->p_dev
, p_ent
);
666 /* entry is to be placed in 'pending' queue */
668 case ECORE_SPQ_PRIORITY_NORMAL
:
669 OSAL_LIST_PUSH_TAIL(&p_ent
->list
, &p_spq
->pending
);
670 p_spq
->normal_count
++;
672 case ECORE_SPQ_PRIORITY_HIGH
:
673 OSAL_LIST_PUSH_HEAD(&p_ent
->list
, &p_spq
->pending
);
680 return ECORE_SUCCESS
;
683 /***************************************************************************
685 ***************************************************************************/
687 u32
ecore_spq_get_cid(struct ecore_hwfn
*p_hwfn
)
690 return 0xffffffff; /* illegal */
691 return p_hwfn
->p_spq
->cid
;
694 /***************************************************************************
695 * Posting new Ramrods
696 ***************************************************************************/
698 static enum _ecore_status_t
ecore_spq_post_list(struct ecore_hwfn
*p_hwfn
,
702 struct ecore_spq
*p_spq
= p_hwfn
->p_spq
;
703 enum _ecore_status_t rc
;
705 /* TODO - implementation might be wasteful; will always keep room
706 * for an additional high priority ramrod (even if one is already
709 while (ecore_chain_get_elem_left(&p_spq
->chain
) > keep_reserve
&&
710 !OSAL_LIST_IS_EMPTY(head
)) {
711 struct ecore_spq_entry
*p_ent
=
712 OSAL_LIST_FIRST_ENTRY(head
, struct ecore_spq_entry
, list
);
713 if (p_ent
!= OSAL_NULL
) {
715 #pragma warning(suppress : 6011 28182)
717 OSAL_LIST_REMOVE_ENTRY(&p_ent
->list
, head
);
718 OSAL_LIST_PUSH_TAIL(&p_ent
->list
,
719 &p_spq
->completion_pending
);
720 p_spq
->comp_sent_count
++;
722 rc
= ecore_spq_hw_post(p_hwfn
, p_spq
, p_ent
);
724 OSAL_LIST_REMOVE_ENTRY(&p_ent
->list
,
725 &p_spq
->completion_pending
);
726 __ecore_spq_return_entry(p_hwfn
, p_ent
);
732 return ECORE_SUCCESS
;
735 static enum _ecore_status_t
ecore_spq_pend_post(struct ecore_hwfn
*p_hwfn
)
737 struct ecore_spq
*p_spq
= p_hwfn
->p_spq
;
738 struct ecore_spq_entry
*p_ent
= OSAL_NULL
;
740 while (!OSAL_LIST_IS_EMPTY(&p_spq
->free_pool
)) {
741 if (OSAL_LIST_IS_EMPTY(&p_spq
->unlimited_pending
))
744 p_ent
= OSAL_LIST_FIRST_ENTRY(&p_spq
->unlimited_pending
,
745 struct ecore_spq_entry
, list
);
750 #pragma warning(suppress : 6011)
752 OSAL_LIST_REMOVE_ENTRY(&p_ent
->list
, &p_spq
->unlimited_pending
);
754 ecore_spq_add_entry(p_hwfn
, p_ent
, p_ent
->priority
);
757 return ecore_spq_post_list(p_hwfn
,
758 &p_spq
->pending
, SPQ_HIGH_PRI_RESERVE_DEFAULT
);
761 enum _ecore_status_t
ecore_spq_post(struct ecore_hwfn
*p_hwfn
,
762 struct ecore_spq_entry
*p_ent
,
765 enum _ecore_status_t rc
= ECORE_SUCCESS
;
766 struct ecore_spq
*p_spq
= p_hwfn
? p_hwfn
->p_spq
: OSAL_NULL
;
767 bool b_ret_ent
= true;
773 DP_NOTICE(p_hwfn
, true, "Got a NULL pointer\n");
777 if (p_hwfn
->p_dev
->recov_in_prog
) {
778 DP_VERBOSE(p_hwfn
, ECORE_MSG_SPQ
,
779 "Recovery is in progress -> skip spq post"
780 " [cmd %02x protocol %02x]\n",
781 p_ent
->elem
.hdr
.cmd_id
, p_ent
->elem
.hdr
.protocol_id
);
782 /* Return success to let the flows to be completed successfully
783 * w/o any error handling.
785 return ECORE_SUCCESS
;
788 OSAL_SPIN_LOCK(&p_spq
->lock
);
790 /* Complete the entry */
791 rc
= ecore_spq_fill_entry(p_hwfn
, p_ent
);
793 /* Check return value after LOCK is taken for cleaner error flow */
797 /* Add the request to the pending queue */
798 rc
= ecore_spq_add_entry(p_hwfn
, p_ent
, p_ent
->priority
);
802 rc
= ecore_spq_pend_post(p_hwfn
);
804 /* Since it's possible that pending failed for a different
805 * entry [although unlikely], the failed entry was already
806 * dealt with; No need to return it here.
812 OSAL_SPIN_UNLOCK(&p_spq
->lock
);
814 if (p_ent
->comp_mode
== ECORE_SPQ_MODE_EBLOCK
) {
815 /* For entries in ECORE BLOCK mode, the completion code cannot
816 * perform the necessary cleanup - if it did, we couldn't
817 * access p_ent here to see whether it's successful or not.
818 * Thus, after gaining the answer perform the cleanup here.
820 rc
= ecore_spq_block(p_hwfn
, p_ent
, fw_return_code
,
821 p_ent
->queue
== &p_spq
->unlimited_pending
);
823 if (p_ent
->queue
== &p_spq
->unlimited_pending
) {
824 /* This is an allocated p_ent which does not need to
827 OSAL_FREE(p_hwfn
->p_dev
, p_ent
);
829 /* TBD: handle error flow and remove p_ent from
839 ecore_spq_return_entry(p_hwfn
, p_ent
);
844 OSAL_SPIN_LOCK(&p_spq
->lock
);
845 OSAL_LIST_REMOVE_ENTRY(&p_ent
->list
, &p_spq
->completion_pending
);
846 ecore_chain_return_produced(&p_spq
->chain
);
849 /* return to the free pool */
851 __ecore_spq_return_entry(p_hwfn
, p_ent
);
852 OSAL_SPIN_UNLOCK(&p_spq
->lock
);
857 enum _ecore_status_t
ecore_spq_completion(struct ecore_hwfn
*p_hwfn
,
860 union event_ring_data
*p_data
)
862 struct ecore_spq
*p_spq
;
863 struct ecore_spq_entry
*p_ent
= OSAL_NULL
;
864 struct ecore_spq_entry
*tmp
;
865 struct ecore_spq_entry
*found
= OSAL_NULL
;
866 enum _ecore_status_t rc
;
871 p_spq
= p_hwfn
->p_spq
;
875 OSAL_SPIN_LOCK(&p_spq
->lock
);
876 OSAL_LIST_FOR_EACH_ENTRY_SAFE(p_ent
,
878 &p_spq
->completion_pending
,
879 list
, struct ecore_spq_entry
) {
880 if (p_ent
->elem
.hdr
.echo
== echo
) {
881 OSAL_LIST_REMOVE_ENTRY(&p_ent
->list
,
882 &p_spq
->completion_pending
);
884 /* Avoid overriding of SPQ entries when getting
885 * out-of-order completions, by marking the completions
886 * in a bitmap and increasing the chain consumer only
887 * for the first successive completed entries.
889 SPQ_COMP_BMAP_SET_BIT(p_spq
, echo
);
890 while (SPQ_COMP_BMAP_TEST_BIT(p_spq
,
891 p_spq
->comp_bitmap_idx
)) {
892 SPQ_COMP_BMAP_CLEAR_BIT(p_spq
,
893 p_spq
->comp_bitmap_idx
);
894 p_spq
->comp_bitmap_idx
++;
895 ecore_chain_return_produced(&p_spq
->chain
);
903 /* This is debug and should be relatively uncommon - depends
904 * on scenarios which have mutliple per-PF sent ramrods.
906 DP_VERBOSE(p_hwfn
, ECORE_MSG_SPQ
,
907 "Got completion for echo %04x - doesn't match"
908 " echo %04x in completion pending list\n",
909 OSAL_LE16_TO_CPU(echo
),
910 OSAL_LE16_TO_CPU(p_ent
->elem
.hdr
.echo
));
913 /* Release lock before callback, as callback may post
914 * an additional ramrod.
916 OSAL_SPIN_UNLOCK(&p_spq
->lock
);
919 DP_NOTICE(p_hwfn
, true,
920 "Failed to find an entry this"
921 " EQE [echo %04x] completes\n",
922 OSAL_LE16_TO_CPU(echo
));
926 DP_VERBOSE(p_hwfn
, ECORE_MSG_SPQ
,
927 "Complete EQE [echo %04x]: func %p cookie %p)\n",
928 OSAL_LE16_TO_CPU(echo
),
929 p_ent
->comp_cb
.function
, p_ent
->comp_cb
.cookie
);
930 if (found
->comp_cb
.function
)
931 found
->comp_cb
.function(p_hwfn
, found
->comp_cb
.cookie
, p_data
,
934 DP_VERBOSE(p_hwfn
, ECORE_MSG_SPQ
,
935 "Got a completion without a callback function\n");
937 if ((found
->comp_mode
!= ECORE_SPQ_MODE_EBLOCK
) ||
938 (found
->queue
== &p_spq
->unlimited_pending
))
939 /* EBLOCK is responsible for returning its own entry into the
940 * free list, unless it originally added the entry into the
941 * unlimited pending list.
943 ecore_spq_return_entry(p_hwfn
, found
);
945 /* Attempt to post pending requests */
946 OSAL_SPIN_LOCK(&p_spq
->lock
);
947 rc
= ecore_spq_pend_post(p_hwfn
);
948 OSAL_SPIN_UNLOCK(&p_spq
->lock
);
953 enum _ecore_status_t
ecore_consq_alloc(struct ecore_hwfn
*p_hwfn
)
955 struct ecore_consq
*p_consq
;
957 /* Allocate ConsQ struct */
959 OSAL_ZALLOC(p_hwfn
->p_dev
, GFP_KERNEL
, sizeof(*p_consq
));
961 DP_NOTICE(p_hwfn
, true,
962 "Failed to allocate `struct ecore_consq'\n");
966 /* Allocate and initialize EQ chain */
967 if (ecore_chain_alloc(p_hwfn
->p_dev
,
968 ECORE_CHAIN_USE_TO_PRODUCE
,
969 ECORE_CHAIN_MODE_PBL
,
970 ECORE_CHAIN_CNT_TYPE_U16
,
971 ECORE_CHAIN_PAGE_SIZE
/ 0x80,
973 &p_consq
->chain
, OSAL_NULL
) != ECORE_SUCCESS
) {
974 DP_NOTICE(p_hwfn
, true, "Failed to allocate consq chain");
975 goto consq_allocate_fail
;
978 p_hwfn
->p_consq
= p_consq
;
979 return ECORE_SUCCESS
;
982 OSAL_FREE(p_hwfn
->p_dev
, p_consq
);
986 void ecore_consq_setup(struct ecore_hwfn
*p_hwfn
)
988 ecore_chain_reset(&p_hwfn
->p_consq
->chain
);
991 void ecore_consq_free(struct ecore_hwfn
*p_hwfn
)
993 if (!p_hwfn
->p_consq
)
996 ecore_chain_free(p_hwfn
->p_dev
, &p_hwfn
->p_consq
->chain
);
997 OSAL_FREE(p_hwfn
->p_dev
, p_hwfn
->p_consq
);