]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/net/ethernet/qlogic/qed/qed_spq.c
Merge branch 'linux-4.8' of git://github.com/skeggsb/linux into drm-next
[mirror_ubuntu-artful-kernel.git] / drivers / net / ethernet / qlogic / qed / qed_spq.c
CommitLineData
fe56b9e6
YM
1/* QLogic qed NIC Driver
2 * Copyright (c) 2015 QLogic Corporation
3 *
4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of
6 * this source tree.
7 */
8
9#include <linux/types.h>
10#include <asm/byteorder.h>
11#include <linux/io.h>
12#include <linux/delay.h>
13#include <linux/dma-mapping.h>
14#include <linux/errno.h>
15#include <linux/kernel.h>
16#include <linux/list.h>
17#include <linux/pci.h>
18#include <linux/slab.h>
19#include <linux/spinlock.h>
20#include <linux/string.h>
21#include "qed.h"
22#include "qed_cxt.h"
23#include "qed_dev_api.h"
24#include "qed_hsi.h"
25#include "qed_hw.h"
26#include "qed_int.h"
27#include "qed_mcp.h"
28#include "qed_reg_addr.h"
29#include "qed_sp.h"
37bff2b9 30#include "qed_sriov.h"
fe56b9e6
YM
31
32/***************************************************************************
33* Structures & Definitions
34***************************************************************************/
35
36#define SPQ_HIGH_PRI_RESERVE_DEFAULT (1)
37#define SPQ_BLOCK_SLEEP_LENGTH (1000)
38
39/***************************************************************************
40* Blocking Imp. (BLOCK/EBLOCK mode)
41***************************************************************************/
42static void qed_spq_blocking_cb(struct qed_hwfn *p_hwfn,
43 void *cookie,
44 union event_ring_data *data,
45 u8 fw_return_code)
46{
47 struct qed_spq_comp_done *comp_done;
48
49 comp_done = (struct qed_spq_comp_done *)cookie;
50
51 comp_done->done = 0x1;
52 comp_done->fw_return_code = fw_return_code;
53
54 /* make update visible to waiting thread */
55 smp_wmb();
56}
57
58static int qed_spq_block(struct qed_hwfn *p_hwfn,
59 struct qed_spq_entry *p_ent,
60 u8 *p_fw_ret)
61{
62 int sleep_count = SPQ_BLOCK_SLEEP_LENGTH;
63 struct qed_spq_comp_done *comp_done;
64 int rc;
65
66 comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
67 while (sleep_count) {
68 /* validate we receive completion update */
69 smp_rmb();
70 if (comp_done->done == 1) {
71 if (p_fw_ret)
72 *p_fw_ret = comp_done->fw_return_code;
73 return 0;
74 }
75 usleep_range(5000, 10000);
76 sleep_count--;
77 }
78
79 DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
80 rc = qed_mcp_drain(p_hwfn, p_hwfn->p_main_ptt);
81 if (rc != 0)
82 DP_NOTICE(p_hwfn, "MCP drain failed\n");
83
84 /* Retry after drain */
85 sleep_count = SPQ_BLOCK_SLEEP_LENGTH;
86 while (sleep_count) {
87 /* validate we receive completion update */
88 smp_rmb();
89 if (comp_done->done == 1) {
90 if (p_fw_ret)
91 *p_fw_ret = comp_done->fw_return_code;
92 return 0;
93 }
94 usleep_range(5000, 10000);
95 sleep_count--;
96 }
97
98 if (comp_done->done == 1) {
99 if (p_fw_ret)
100 *p_fw_ret = comp_done->fw_return_code;
101 return 0;
102 }
103
104 DP_NOTICE(p_hwfn, "Ramrod is stuck, MCP drain failed\n");
105
106 return -EBUSY;
107}
108
109/***************************************************************************
110* SPQ entries inner API
111***************************************************************************/
112static int
113qed_spq_fill_entry(struct qed_hwfn *p_hwfn,
114 struct qed_spq_entry *p_ent)
115{
fe56b9e6
YM
116 p_ent->flags = 0;
117
118 switch (p_ent->comp_mode) {
119 case QED_SPQ_MODE_EBLOCK:
120 case QED_SPQ_MODE_BLOCK:
121 p_ent->comp_cb.function = qed_spq_blocking_cb;
122 break;
123 case QED_SPQ_MODE_CB:
124 break;
125 default:
126 DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n",
127 p_ent->comp_mode);
128 return -EINVAL;
129 }
130
131 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
132 "Ramrod header: [CID 0x%08x CMD 0x%02x protocol 0x%02x] Data pointer: [%08x:%08x] Completion Mode: %s\n",
133 p_ent->elem.hdr.cid,
134 p_ent->elem.hdr.cmd_id,
135 p_ent->elem.hdr.protocol_id,
136 p_ent->elem.data_ptr.hi,
137 p_ent->elem.data_ptr.lo,
138 D_TRINE(p_ent->comp_mode, QED_SPQ_MODE_EBLOCK,
139 QED_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
140 "MODE_CB"));
141
142 return 0;
143}
144
145/***************************************************************************
146* HSI access
147***************************************************************************/
148static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn,
149 struct qed_spq *p_spq)
150{
151 u16 pq;
152 struct qed_cxt_info cxt_info;
153 struct core_conn_context *p_cxt;
154 union qed_qm_pq_params pq_params;
155 int rc;
156
157 cxt_info.iid = p_spq->cid;
158
159 rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info);
160
161 if (rc < 0) {
162 DP_NOTICE(p_hwfn, "Cannot find context info for cid=%d\n",
163 p_spq->cid);
164 return;
165 }
166
167 p_cxt = cxt_info.p_cxt;
168
169 SET_FIELD(p_cxt->xstorm_ag_context.flags10,
170 XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
171 SET_FIELD(p_cxt->xstorm_ag_context.flags1,
172 XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
173 SET_FIELD(p_cxt->xstorm_ag_context.flags9,
174 XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
175
176 /* QM physical queue */
177 memset(&pq_params, 0, sizeof(pq_params));
178 pq_params.core.tc = LB_TC;
179 pq = qed_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
180 p_cxt->xstorm_ag_context.physical_q0 = cpu_to_le16(pq);
181
182 p_cxt->xstorm_st_context.spq_base_lo =
183 DMA_LO_LE(p_spq->chain.p_phys_addr);
184 p_cxt->xstorm_st_context.spq_base_hi =
185 DMA_HI_LE(p_spq->chain.p_phys_addr);
186
94494598
YM
187 DMA_REGPAIR_LE(p_cxt->xstorm_st_context.consolid_base_addr,
188 p_hwfn->p_consq->chain.p_phys_addr);
fe56b9e6
YM
189}
190
191static int qed_spq_hw_post(struct qed_hwfn *p_hwfn,
192 struct qed_spq *p_spq,
193 struct qed_spq_entry *p_ent)
194{
76a9a364
TT
195 struct qed_chain *p_chain = &p_hwfn->p_spq->chain;
196 u16 echo = qed_chain_get_prod_idx(p_chain);
fe56b9e6
YM
197 struct slow_path_element *elem;
198 struct core_db_data db;
199
76a9a364 200 p_ent->elem.hdr.echo = cpu_to_le16(echo);
fe56b9e6
YM
201 elem = qed_chain_produce(p_chain);
202 if (!elem) {
203 DP_NOTICE(p_hwfn, "Failed to produce from SPQ chain\n");
204 return -EINVAL;
205 }
206
207 *elem = p_ent->elem; /* struct assignment */
208
209 /* send a doorbell on the slow hwfn session */
210 memset(&db, 0, sizeof(db));
211 SET_FIELD(db.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
212 SET_FIELD(db.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
213 SET_FIELD(db.params, CORE_DB_DATA_AGG_VAL_SEL,
214 DQ_XCM_CORE_SPQ_PROD_CMD);
215 db.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
fe56b9e6
YM
216 db.spq_prod = cpu_to_le16(qed_chain_get_prod_idx(p_chain));
217
34c7bb47
SRK
218 /* make sure the SPQE is updated before the doorbell */
219 wmb();
fe56b9e6
YM
220
221 DOORBELL(p_hwfn, qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY), *(u32 *)&db);
222
223 /* make sure doorbell is rang */
34c7bb47 224 wmb();
fe56b9e6
YM
225
226 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
227 "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x agg_params: %02x, prod: %04x\n",
228 qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY),
229 p_spq->cid, db.params, db.agg_flags,
230 qed_chain_get_prod_idx(p_chain));
231
232 return 0;
233}
234
235/***************************************************************************
236* Asynchronous events
237***************************************************************************/
238static int
239qed_async_event_completion(struct qed_hwfn *p_hwfn,
240 struct event_ring_entry *p_eqe)
241{
37bff2b9
YM
242 switch (p_eqe->protocol_id) {
243 case PROTOCOLID_COMMON:
244 return qed_sriov_eqe_event(p_hwfn,
245 p_eqe->opcode,
246 p_eqe->echo, &p_eqe->data);
247 default:
248 DP_NOTICE(p_hwfn,
249 "Unknown Async completion for protocol: %d\n",
250 p_eqe->protocol_id);
251 return -EINVAL;
252 }
fe56b9e6
YM
253}
254
255/***************************************************************************
256* EQ API
257***************************************************************************/
258void qed_eq_prod_update(struct qed_hwfn *p_hwfn,
259 u16 prod)
260{
261 u32 addr = GTT_BAR0_MAP_REG_USDM_RAM +
262 USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id);
263
264 REG_WR16(p_hwfn, addr, prod);
265
266 /* keep prod updates ordered */
267 mmiowb();
268}
269
270int qed_eq_completion(struct qed_hwfn *p_hwfn,
271 void *cookie)
272
273{
274 struct qed_eq *p_eq = cookie;
275 struct qed_chain *p_chain = &p_eq->chain;
276 int rc = 0;
277
278 /* take a snapshot of the FW consumer */
279 u16 fw_cons_idx = le16_to_cpu(*p_eq->p_fw_cons);
280
281 DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "fw_cons_idx %x\n", fw_cons_idx);
282
283 /* Need to guarantee the fw_cons index we use points to a usuable
284 * element (to comply with our chain), so our macros would comply
285 */
286 if ((fw_cons_idx & qed_chain_get_usable_per_page(p_chain)) ==
287 qed_chain_get_usable_per_page(p_chain))
288 fw_cons_idx += qed_chain_get_unusable_per_page(p_chain);
289
290 /* Complete current segment of eq entries */
291 while (fw_cons_idx != qed_chain_get_cons_idx(p_chain)) {
292 struct event_ring_entry *p_eqe = qed_chain_consume(p_chain);
293
294 if (!p_eqe) {
295 rc = -EINVAL;
296 break;
297 }
298
299 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
300 "op %x prot %x res0 %x echo %x fwret %x flags %x\n",
301 p_eqe->opcode,
302 p_eqe->protocol_id,
303 p_eqe->reserved0,
304 le16_to_cpu(p_eqe->echo),
305 p_eqe->fw_return_code,
306 p_eqe->flags);
307
308 if (GET_FIELD(p_eqe->flags, EVENT_RING_ENTRY_ASYNC)) {
309 if (qed_async_event_completion(p_hwfn, p_eqe))
310 rc = -EINVAL;
311 } else if (qed_spq_completion(p_hwfn,
312 p_eqe->echo,
313 p_eqe->fw_return_code,
314 &p_eqe->data)) {
315 rc = -EINVAL;
316 }
317
318 qed_chain_recycle_consumed(p_chain);
319 }
320
321 qed_eq_prod_update(p_hwfn, qed_chain_get_prod_idx(p_chain));
322
323 return rc;
324}
325
326struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn,
327 u16 num_elem)
328{
329 struct qed_eq *p_eq;
330
331 /* Allocate EQ struct */
60fffb3b 332 p_eq = kzalloc(sizeof(*p_eq), GFP_KERNEL);
fe56b9e6
YM
333 if (!p_eq) {
334 DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_eq'\n");
335 return NULL;
336 }
337
338 /* Allocate and initialize EQ chain*/
339 if (qed_chain_alloc(p_hwfn->cdev,
340 QED_CHAIN_USE_TO_PRODUCE,
341 QED_CHAIN_MODE_PBL,
342 num_elem,
343 sizeof(union event_ring_element),
344 &p_eq->chain)) {
345 DP_NOTICE(p_hwfn, "Failed to allocate eq chain\n");
346 goto eq_allocate_fail;
347 }
348
349 /* register EQ completion on the SP SB */
350 qed_int_register_cb(p_hwfn,
351 qed_eq_completion,
352 p_eq,
353 &p_eq->eq_sb_index,
354 &p_eq->p_fw_cons);
355
356 return p_eq;
357
358eq_allocate_fail:
359 qed_eq_free(p_hwfn, p_eq);
360 return NULL;
361}
362
363void qed_eq_setup(struct qed_hwfn *p_hwfn,
364 struct qed_eq *p_eq)
365{
366 qed_chain_reset(&p_eq->chain);
367}
368
369void qed_eq_free(struct qed_hwfn *p_hwfn,
370 struct qed_eq *p_eq)
371{
372 if (!p_eq)
373 return;
374 qed_chain_free(p_hwfn->cdev, &p_eq->chain);
375 kfree(p_eq);
376}
377
cee4d264
MC
378/***************************************************************************
379* CQE API - manipulate EQ functionality
380***************************************************************************/
381static int qed_cqe_completion(
382 struct qed_hwfn *p_hwfn,
383 struct eth_slow_path_rx_cqe *cqe,
384 enum protocol_type protocol)
385{
1408cc1f
YM
386 if (IS_VF(p_hwfn->cdev))
387 return 0;
388
cee4d264
MC
389 /* @@@tmp - it's possible we'll eventually want to handle some
390 * actual commands that can arrive here, but for now this is only
391 * used to complete the ramrod using the echo value on the cqe
392 */
393 return qed_spq_completion(p_hwfn, cqe->echo, 0, NULL);
394}
395
396int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn,
397 struct eth_slow_path_rx_cqe *cqe)
398{
399 int rc;
400
401 rc = qed_cqe_completion(p_hwfn, cqe, PROTOCOLID_ETH);
402 if (rc)
403 DP_NOTICE(p_hwfn,
404 "Failed to handle RXQ CQE [cmd 0x%02x]\n",
405 cqe->ramrod_cmd_id);
406
407 return rc;
408}
409
fe56b9e6
YM
410/***************************************************************************
411* Slow hwfn Queue (spq)
412***************************************************************************/
413void qed_spq_setup(struct qed_hwfn *p_hwfn)
414{
415 struct qed_spq *p_spq = p_hwfn->p_spq;
416 struct qed_spq_entry *p_virt = NULL;
417 dma_addr_t p_phys = 0;
418 unsigned int i = 0;
419
420 INIT_LIST_HEAD(&p_spq->pending);
421 INIT_LIST_HEAD(&p_spq->completion_pending);
422 INIT_LIST_HEAD(&p_spq->free_pool);
423 INIT_LIST_HEAD(&p_spq->unlimited_pending);
424 spin_lock_init(&p_spq->lock);
425
426 /* SPQ empty pool */
427 p_phys = p_spq->p_phys + offsetof(struct qed_spq_entry, ramrod);
428 p_virt = p_spq->p_virt;
429
430 for (i = 0; i < p_spq->chain.capacity; i++) {
94494598 431 DMA_REGPAIR_LE(p_virt->elem.data_ptr, p_phys);
fe56b9e6
YM
432
433 list_add_tail(&p_virt->list, &p_spq->free_pool);
434
435 p_virt++;
436 p_phys += sizeof(struct qed_spq_entry);
437 }
438
439 /* Statistics */
440 p_spq->normal_count = 0;
441 p_spq->comp_count = 0;
442 p_spq->comp_sent_count = 0;
443 p_spq->unlimited_pending_count = 0;
76a9a364
TT
444
445 bitmap_zero(p_spq->p_comp_bitmap, SPQ_RING_SIZE);
446 p_spq->comp_bitmap_idx = 0;
fe56b9e6
YM
447
448 /* SPQ cid, cannot fail */
449 qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid);
450 qed_spq_hw_initialize(p_hwfn, p_spq);
451
452 /* reset the chain itself */
453 qed_chain_reset(&p_spq->chain);
454}
455
456int qed_spq_alloc(struct qed_hwfn *p_hwfn)
457{
458 struct qed_spq *p_spq = NULL;
459 dma_addr_t p_phys = 0;
460 struct qed_spq_entry *p_virt = NULL;
461
462 /* SPQ struct */
463 p_spq =
60fffb3b 464 kzalloc(sizeof(struct qed_spq), GFP_KERNEL);
fe56b9e6
YM
465 if (!p_spq) {
466 DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_spq'\n");
467 return -ENOMEM;
468 }
469
470 /* SPQ ring */
471 if (qed_chain_alloc(p_hwfn->cdev,
472 QED_CHAIN_USE_TO_PRODUCE,
473 QED_CHAIN_MODE_SINGLE,
474 0, /* N/A when the mode is SINGLE */
475 sizeof(struct slow_path_element),
476 &p_spq->chain)) {
477 DP_NOTICE(p_hwfn, "Failed to allocate spq chain\n");
478 goto spq_allocate_fail;
479 }
480
481 /* allocate and fill the SPQ elements (incl. ramrod data list) */
482 p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
483 p_spq->chain.capacity *
484 sizeof(struct qed_spq_entry),
485 &p_phys,
486 GFP_KERNEL);
487
488 if (!p_virt)
489 goto spq_allocate_fail;
490
491 p_spq->p_virt = p_virt;
492 p_spq->p_phys = p_phys;
493 p_hwfn->p_spq = p_spq;
494
495 return 0;
496
497spq_allocate_fail:
498 qed_chain_free(p_hwfn->cdev, &p_spq->chain);
499 kfree(p_spq);
500 return -ENOMEM;
501}
502
503void qed_spq_free(struct qed_hwfn *p_hwfn)
504{
505 struct qed_spq *p_spq = p_hwfn->p_spq;
506
507 if (!p_spq)
508 return;
509
510 if (p_spq->p_virt)
511 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
512 p_spq->chain.capacity *
513 sizeof(struct qed_spq_entry),
514 p_spq->p_virt,
515 p_spq->p_phys);
516
517 qed_chain_free(p_hwfn->cdev, &p_spq->chain);
518 ;
519 kfree(p_spq);
520}
521
522int
523qed_spq_get_entry(struct qed_hwfn *p_hwfn,
524 struct qed_spq_entry **pp_ent)
525{
526 struct qed_spq *p_spq = p_hwfn->p_spq;
527 struct qed_spq_entry *p_ent = NULL;
528 int rc = 0;
529
530 spin_lock_bh(&p_spq->lock);
531
532 if (list_empty(&p_spq->free_pool)) {
533 p_ent = kzalloc(sizeof(*p_ent), GFP_ATOMIC);
534 if (!p_ent) {
535 rc = -ENOMEM;
536 goto out_unlock;
537 }
538 p_ent->queue = &p_spq->unlimited_pending;
539 } else {
540 p_ent = list_first_entry(&p_spq->free_pool,
541 struct qed_spq_entry,
542 list);
543 list_del(&p_ent->list);
544 p_ent->queue = &p_spq->pending;
545 }
546
547 *pp_ent = p_ent;
548
549out_unlock:
550 spin_unlock_bh(&p_spq->lock);
551 return rc;
552}
553
554/* Locked variant; Should be called while the SPQ lock is taken */
555static void __qed_spq_return_entry(struct qed_hwfn *p_hwfn,
556 struct qed_spq_entry *p_ent)
557{
558 list_add_tail(&p_ent->list, &p_hwfn->p_spq->free_pool);
559}
560
561void qed_spq_return_entry(struct qed_hwfn *p_hwfn,
562 struct qed_spq_entry *p_ent)
563{
564 spin_lock_bh(&p_hwfn->p_spq->lock);
565 __qed_spq_return_entry(p_hwfn, p_ent);
566 spin_unlock_bh(&p_hwfn->p_spq->lock);
567}
568
569/**
570 * @brief qed_spq_add_entry - adds a new entry to the pending
571 * list. Should be used while lock is being held.
572 *
573 * Addes an entry to the pending list is there is room (en empty
574 * element is available in the free_pool), or else places the
575 * entry in the unlimited_pending pool.
576 *
577 * @param p_hwfn
578 * @param p_ent
579 * @param priority
580 *
581 * @return int
582 */
583static int
584qed_spq_add_entry(struct qed_hwfn *p_hwfn,
585 struct qed_spq_entry *p_ent,
586 enum spq_priority priority)
587{
588 struct qed_spq *p_spq = p_hwfn->p_spq;
589
590 if (p_ent->queue == &p_spq->unlimited_pending) {
fe56b9e6
YM
591
592 if (list_empty(&p_spq->free_pool)) {
593 list_add_tail(&p_ent->list, &p_spq->unlimited_pending);
594 p_spq->unlimited_pending_count++;
595
596 return 0;
76a9a364
TT
597 } else {
598 struct qed_spq_entry *p_en2;
fe56b9e6 599
76a9a364
TT
600 p_en2 = list_first_entry(&p_spq->free_pool,
601 struct qed_spq_entry,
602 list);
603 list_del(&p_en2->list);
604
605 /* Copy the ring element physical pointer to the new
606 * entry, since we are about to override the entire ring
607 * entry and don't want to lose the pointer.
608 */
609 p_ent->elem.data_ptr = p_en2->elem.data_ptr;
fe56b9e6 610
76a9a364 611 *p_en2 = *p_ent;
fe56b9e6 612
db511c37
YM
613 /* EBLOCK responsible to free the allocated p_ent */
614 if (p_ent->comp_mode != QED_SPQ_MODE_EBLOCK)
615 kfree(p_ent);
fe56b9e6 616
76a9a364
TT
617 p_ent = p_en2;
618 }
fe56b9e6
YM
619 }
620
621 /* entry is to be placed in 'pending' queue */
622 switch (priority) {
623 case QED_SPQ_PRIORITY_NORMAL:
624 list_add_tail(&p_ent->list, &p_spq->pending);
625 p_spq->normal_count++;
626 break;
627 case QED_SPQ_PRIORITY_HIGH:
628 list_add(&p_ent->list, &p_spq->pending);
629 p_spq->high_count++;
630 break;
631 default:
632 return -EINVAL;
633 }
634
635 return 0;
636}
637
638/***************************************************************************
639* Accessor
640***************************************************************************/
641u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn)
642{
643 if (!p_hwfn->p_spq)
644 return 0xffffffff; /* illegal */
645 return p_hwfn->p_spq->cid;
646}
647
648/***************************************************************************
649* Posting new Ramrods
650***************************************************************************/
651static int qed_spq_post_list(struct qed_hwfn *p_hwfn,
652 struct list_head *head,
653 u32 keep_reserve)
654{
655 struct qed_spq *p_spq = p_hwfn->p_spq;
656 int rc;
657
658 while (qed_chain_get_elem_left(&p_spq->chain) > keep_reserve &&
659 !list_empty(head)) {
660 struct qed_spq_entry *p_ent =
661 list_first_entry(head, struct qed_spq_entry, list);
662 list_del(&p_ent->list);
663 list_add_tail(&p_ent->list, &p_spq->completion_pending);
664 p_spq->comp_sent_count++;
665
666 rc = qed_spq_hw_post(p_hwfn, p_spq, p_ent);
667 if (rc) {
668 list_del(&p_ent->list);
669 __qed_spq_return_entry(p_hwfn, p_ent);
670 return rc;
671 }
672 }
673
674 return 0;
675}
676
677static int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
678{
679 struct qed_spq *p_spq = p_hwfn->p_spq;
680 struct qed_spq_entry *p_ent = NULL;
681
682 while (!list_empty(&p_spq->free_pool)) {
683 if (list_empty(&p_spq->unlimited_pending))
684 break;
685
686 p_ent = list_first_entry(&p_spq->unlimited_pending,
687 struct qed_spq_entry,
688 list);
689 if (!p_ent)
690 return -EINVAL;
691
692 list_del(&p_ent->list);
693
694 qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
695 }
696
697 return qed_spq_post_list(p_hwfn, &p_spq->pending,
698 SPQ_HIGH_PRI_RESERVE_DEFAULT);
699}
700
701int qed_spq_post(struct qed_hwfn *p_hwfn,
702 struct qed_spq_entry *p_ent,
703 u8 *fw_return_code)
704{
705 int rc = 0;
706 struct qed_spq *p_spq = p_hwfn ? p_hwfn->p_spq : NULL;
707 bool b_ret_ent = true;
708
709 if (!p_hwfn)
710 return -EINVAL;
711
712 if (!p_ent) {
713 DP_NOTICE(p_hwfn, "Got a NULL pointer\n");
714 return -EINVAL;
715 }
716
717 /* Complete the entry */
718 rc = qed_spq_fill_entry(p_hwfn, p_ent);
719
720 spin_lock_bh(&p_spq->lock);
721
722 /* Check return value after LOCK is taken for cleaner error flow */
723 if (rc)
724 goto spq_post_fail;
725
726 /* Add the request to the pending queue */
727 rc = qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
728 if (rc)
729 goto spq_post_fail;
730
731 rc = qed_spq_pend_post(p_hwfn);
732 if (rc) {
733 /* Since it's possible that pending failed for a different
734 * entry [although unlikely], the failed entry was already
735 * dealt with; No need to return it here.
736 */
737 b_ret_ent = false;
738 goto spq_post_fail;
739 }
740
741 spin_unlock_bh(&p_spq->lock);
742
743 if (p_ent->comp_mode == QED_SPQ_MODE_EBLOCK) {
744 /* For entries in QED BLOCK mode, the completion code cannot
745 * perform the necessary cleanup - if it did, we couldn't
746 * access p_ent here to see whether it's successful or not.
747 * Thus, after gaining the answer perform the cleanup here.
748 */
749 rc = qed_spq_block(p_hwfn, p_ent, fw_return_code);
db511c37
YM
750
751 if (p_ent->queue == &p_spq->unlimited_pending) {
752 /* This is an allocated p_ent which does not need to
753 * return to pool.
754 */
755 kfree(p_ent);
756 return rc;
757 }
758
fe56b9e6
YM
759 if (rc)
760 goto spq_post_fail2;
761
762 /* return to pool */
763 qed_spq_return_entry(p_hwfn, p_ent);
764 }
765 return rc;
766
767spq_post_fail2:
768 spin_lock_bh(&p_spq->lock);
769 list_del(&p_ent->list);
770 qed_chain_return_produced(&p_spq->chain);
771
772spq_post_fail:
773 /* return to the free pool */
774 if (b_ret_ent)
775 __qed_spq_return_entry(p_hwfn, p_ent);
776 spin_unlock_bh(&p_spq->lock);
777
778 return rc;
779}
780
781int qed_spq_completion(struct qed_hwfn *p_hwfn,
782 __le16 echo,
783 u8 fw_return_code,
784 union event_ring_data *p_data)
785{
786 struct qed_spq *p_spq;
787 struct qed_spq_entry *p_ent = NULL;
788 struct qed_spq_entry *tmp;
789 struct qed_spq_entry *found = NULL;
790 int rc;
791
792 if (!p_hwfn)
793 return -EINVAL;
794
795 p_spq = p_hwfn->p_spq;
796 if (!p_spq)
797 return -EINVAL;
798
799 spin_lock_bh(&p_spq->lock);
800 list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending,
801 list) {
802 if (p_ent->elem.hdr.echo == echo) {
76a9a364
TT
803 u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE;
804
fe56b9e6
YM
805 list_del(&p_ent->list);
806
76a9a364
TT
807 /* Avoid overriding of SPQ entries when getting
808 * out-of-order completions, by marking the completions
809 * in a bitmap and increasing the chain consumer only
810 * for the first successive completed entries.
811 */
812 bitmap_set(p_spq->p_comp_bitmap, pos, SPQ_RING_SIZE);
813
814 while (test_bit(p_spq->comp_bitmap_idx,
815 p_spq->p_comp_bitmap)) {
816 bitmap_clear(p_spq->p_comp_bitmap,
817 p_spq->comp_bitmap_idx,
818 SPQ_RING_SIZE);
819 p_spq->comp_bitmap_idx++;
820 qed_chain_return_produced(&p_spq->chain);
821 }
822
fe56b9e6
YM
823 p_spq->comp_count++;
824 found = p_ent;
825 break;
826 }
76a9a364
TT
827
828 /* This is relatively uncommon - depends on scenarios
829 * which have mutliple per-PF sent ramrods.
830 */
831 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
832 "Got completion for echo %04x - doesn't match echo %04x in completion pending list\n",
833 le16_to_cpu(echo),
834 le16_to_cpu(p_ent->elem.hdr.echo));
fe56b9e6
YM
835 }
836
837 /* Release lock before callback, as callback may post
838 * an additional ramrod.
839 */
840 spin_unlock_bh(&p_spq->lock);
841
842 if (!found) {
843 DP_NOTICE(p_hwfn,
844 "Failed to find an entry this EQE completes\n");
845 return -EEXIST;
846 }
847
848 DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "Complete: func %p cookie %p)\n",
849 p_ent->comp_cb.function, p_ent->comp_cb.cookie);
850 if (found->comp_cb.function)
851 found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
852 fw_return_code);
853
db511c37
YM
854 if ((found->comp_mode != QED_SPQ_MODE_EBLOCK) ||
855 (found->queue == &p_spq->unlimited_pending))
856 /* EBLOCK is responsible for returning its own entry into the
857 * free list, unless it originally added the entry into the
858 * unlimited pending list.
859 */
fe56b9e6
YM
860 qed_spq_return_entry(p_hwfn, found);
861
862 /* Attempt to post pending requests */
863 spin_lock_bh(&p_spq->lock);
864 rc = qed_spq_pend_post(p_hwfn);
865 spin_unlock_bh(&p_spq->lock);
866
867 return rc;
868}
869
870struct qed_consq *qed_consq_alloc(struct qed_hwfn *p_hwfn)
871{
872 struct qed_consq *p_consq;
873
874 /* Allocate ConsQ struct */
60fffb3b 875 p_consq = kzalloc(sizeof(*p_consq), GFP_KERNEL);
fe56b9e6
YM
876 if (!p_consq) {
877 DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_consq'\n");
878 return NULL;
879 }
880
881 /* Allocate and initialize EQ chain*/
882 if (qed_chain_alloc(p_hwfn->cdev,
883 QED_CHAIN_USE_TO_PRODUCE,
884 QED_CHAIN_MODE_PBL,
885 QED_CHAIN_PAGE_SIZE / 0x80,
886 0x80,
887 &p_consq->chain)) {
888 DP_NOTICE(p_hwfn, "Failed to allocate consq chain");
889 goto consq_allocate_fail;
890 }
891
892 return p_consq;
893
894consq_allocate_fail:
895 qed_consq_free(p_hwfn, p_consq);
896 return NULL;
897}
898
899void qed_consq_setup(struct qed_hwfn *p_hwfn,
900 struct qed_consq *p_consq)
901{
902 qed_chain_reset(&p_consq->chain);
903}
904
905void qed_consq_free(struct qed_hwfn *p_hwfn,
906 struct qed_consq *p_consq)
907{
908 if (!p_consq)
909 return;
910 qed_chain_free(p_hwfn->cdev, &p_consq->chain);
911 kfree(p_consq);
912}