4 * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * * Neither the name of Freescale Semiconductor nor the
14 * names of its contributors may be used to endorse or promote products
15 * derived from this software without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
18 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
21 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
22 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
23 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
24 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
26 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 #include "qbman_portal.h"
31 /* QBMan portal management command codes */
32 #define QBMAN_MC_ACQUIRE 0x30
33 #define QBMAN_WQCHAN_CONFIGURE 0x46
35 /* CINH register offsets */
36 #define QBMAN_CINH_SWP_EQCR_PI 0x800
37 #define QBMAN_CINH_SWP_EQCR_CI 0x840
38 #define QBMAN_CINH_SWP_EQAR 0x8c0
39 #define QBMAN_CINH_SWP_DQPI 0xa00
40 #define QBMAN_CINH_SWP_DCAP 0xac0
41 #define QBMAN_CINH_SWP_SDQCR 0xb00
42 #define QBMAN_CINH_SWP_RAR 0xcc0
43 #define QBMAN_CINH_SWP_ISR 0xe00
44 #define QBMAN_CINH_SWP_IER 0xe40
45 #define QBMAN_CINH_SWP_ISDR 0xe80
46 #define QBMAN_CINH_SWP_IIR 0xec0
48 /* CENA register offsets */
49 #define QBMAN_CENA_SWP_EQCR(n) (0x000 + ((uint32_t)(n) << 6))
50 #define QBMAN_CENA_SWP_DQRR(n) (0x200 + ((uint32_t)(n) << 6))
51 #define QBMAN_CENA_SWP_RCR(n) (0x400 + ((uint32_t)(n) << 6))
52 #define QBMAN_CENA_SWP_CR 0x600
53 #define QBMAN_CENA_SWP_RR(vb) (0x700 + ((uint32_t)(vb) >> 1))
54 #define QBMAN_CENA_SWP_VDQCR 0x780
55 #define QBMAN_CENA_SWP_EQCR_CI 0x840
57 /* Reverse mapping of QBMAN_CENA_SWP_DQRR() */
58 #define QBMAN_IDX_FROM_DQRR(p) (((unsigned long)p & 0x1ff) >> 6)
60 /* QBMan FQ management command codes */
61 #define QBMAN_FQ_SCHEDULE 0x48
62 #define QBMAN_FQ_FORCE 0x49
63 #define QBMAN_FQ_XON 0x4d
64 #define QBMAN_FQ_XOFF 0x4e
66 /*******************************/
67 /* Pre-defined attribute codes */
68 /*******************************/
70 struct qb_attr_code code_generic_verb
= QB_CODE(0, 0, 7);
71 struct qb_attr_code code_generic_rslt
= QB_CODE(0, 8, 8);
73 /*************************/
74 /* SDQCR attribute codes */
75 /*************************/
77 /* we put these here because at least some of them are required by
80 struct qb_attr_code code_sdqcr_dct
= QB_CODE(0, 24, 2);
81 struct qb_attr_code code_sdqcr_fc
= QB_CODE(0, 29, 1);
82 struct qb_attr_code code_sdqcr_tok
= QB_CODE(0, 16, 8);
83 static struct qb_attr_code code_eq_dca_idx
;
84 #define CODE_SDQCR_DQSRC(n) QB_CODE(0, n, 1)
85 enum qbman_sdqcr_dct
{
86 qbman_sdqcr_dct_null
= 0,
87 qbman_sdqcr_dct_prio_ics
,
88 qbman_sdqcr_dct_active_ics
,
89 qbman_sdqcr_dct_active
93 qbman_sdqcr_fc_one
= 0,
94 qbman_sdqcr_fc_up_to_3
= 1
97 struct qb_attr_code code_sdqcr_dqsrc
= QB_CODE(0, 0, 16);
99 /* We need to keep track of which SWP triggered a pull command
100 * so keep an array of portal IDs and use the token field to
101 * be able to find the proper portal
103 #define MAX_QBMAN_PORTALS 35
104 static struct qbman_swp
*portal_idx_map
[MAX_QBMAN_PORTALS
];
106 uint32_t qman_version
;
108 /*********************************/
109 /* Portal constructor/destructor */
110 /*********************************/
112 /* Software portals should always be in the power-on state when we initialise,
113 * due to the CCSR-based portal reset functionality that MC has.
115 * Erk! Turns out that QMan versions prior to 4.1 do not correctly reset DQRR
116 * valid-bits, so we need to support a workaround where we don't trust
117 * valid-bits when detecting new entries until any stale ring entries have been
118 * overwritten at least once. The idea is that we read PI for the first few
119 * entries, then switch to valid-bit after that. The trick is to clear the
120 * bug-work-around boolean once the PI wraps around the ring for the first time.
122 * Note: this still carries a slight additional cost once the decrementer hits
125 struct qbman_swp
*qbman_swp_init(const struct qbman_swp_desc
*d
)
129 struct qbman_swp
*p
= kmalloc(sizeof(*p
), GFP_KERNEL
);
134 #ifdef QBMAN_CHECKING
135 p
->mc
.check
= swp_mc_can_start
;
137 p
->mc
.valid_bit
= QB_VALID_BIT
;
139 qb_attr_code_encode(&code_sdqcr_dct
, &p
->sdq
, qbman_sdqcr_dct_prio_ics
);
140 qb_attr_code_encode(&code_sdqcr_fc
, &p
->sdq
, qbman_sdqcr_fc_up_to_3
);
141 qb_attr_code_encode(&code_sdqcr_tok
, &p
->sdq
, 0xbb);
142 atomic_set(&p
->vdq
.busy
, 1);
143 p
->vdq
.valid_bit
= QB_VALID_BIT
;
144 p
->dqrr
.next_idx
= 0;
145 p
->dqrr
.valid_bit
= QB_VALID_BIT
;
146 qman_version
= p
->desc
.qman_version
;
147 if ((qman_version
& 0xFFFF0000) < QMAN_REV_4100
) {
148 p
->dqrr
.dqrr_size
= 4;
149 p
->dqrr
.reset_bug
= 1;
150 /* Set size of DQRR to 4, encoded in 2 bits */
151 code_eq_dca_idx
= (struct qb_attr_code
)QB_CODE(0, 8, 2);
153 p
->dqrr
.dqrr_size
= 8;
154 p
->dqrr
.reset_bug
= 0;
155 /* Set size of DQRR to 8, encoded in 3 bits */
156 code_eq_dca_idx
= (struct qb_attr_code
)QB_CODE(0, 8, 3);
159 ret
= qbman_swp_sys_init(&p
->sys
, d
, p
->dqrr
.dqrr_size
);
162 pr_err("qbman_swp_sys_init() failed %d\n", ret
);
165 /* SDQCR needs to be initialized to 0 when no channels are
166 * being dequeued from or else the QMan HW will indicate an
167 * error. The values that were calculated above will be
168 * applied when dequeues from a specific channel are enabled
170 qbman_cinh_write(&p
->sys
, QBMAN_CINH_SWP_SDQCR
, 0);
171 eqcr_pi
= qbman_cinh_read(&p
->sys
, QBMAN_CINH_SWP_EQCR_PI
);
172 p
->eqcr
.pi
= eqcr_pi
& 0xF;
173 p
->eqcr
.pi_vb
= eqcr_pi
& QB_VALID_BIT
;
174 p
->eqcr
.ci
= qbman_cinh_read(&p
->sys
, QBMAN_CINH_SWP_EQCR_CI
) & 0xF;
175 p
->eqcr
.available
= QBMAN_EQCR_SIZE
- qm_cyc_diff(QBMAN_EQCR_SIZE
,
176 p
->eqcr
.ci
, p
->eqcr
.pi
);
178 portal_idx_map
[p
->desc
.idx
] = p
;
182 void qbman_swp_finish(struct qbman_swp
*p
)
184 #ifdef QBMAN_CHECKING
185 QBMAN_BUG_ON(p
->mc
.check
!= swp_mc_can_start
);
187 qbman_swp_sys_finish(&p
->sys
);
188 portal_idx_map
[p
->desc
.idx
] = NULL
;
192 const struct qbman_swp_desc
*qbman_swp_get_desc(struct qbman_swp
*p
)
201 uint32_t qbman_swp_interrupt_get_vanish(struct qbman_swp
*p
)
203 return qbman_cinh_read(&p
->sys
, QBMAN_CINH_SWP_ISDR
);
206 void qbman_swp_interrupt_set_vanish(struct qbman_swp
*p
, uint32_t mask
)
208 qbman_cinh_write(&p
->sys
, QBMAN_CINH_SWP_ISDR
, mask
);
211 uint32_t qbman_swp_interrupt_read_status(struct qbman_swp
*p
)
213 return qbman_cinh_read(&p
->sys
, QBMAN_CINH_SWP_ISR
);
216 void qbman_swp_interrupt_clear_status(struct qbman_swp
*p
, uint32_t mask
)
218 qbman_cinh_write(&p
->sys
, QBMAN_CINH_SWP_ISR
, mask
);
221 uint32_t qbman_swp_interrupt_get_trigger(struct qbman_swp
*p
)
223 return qbman_cinh_read(&p
->sys
, QBMAN_CINH_SWP_IER
);
226 void qbman_swp_interrupt_set_trigger(struct qbman_swp
*p
, uint32_t mask
)
228 qbman_cinh_write(&p
->sys
, QBMAN_CINH_SWP_IER
, mask
);
231 int qbman_swp_interrupt_get_inhibit(struct qbman_swp
*p
)
233 return qbman_cinh_read(&p
->sys
, QBMAN_CINH_SWP_IIR
);
236 void qbman_swp_interrupt_set_inhibit(struct qbman_swp
*p
, int inhibit
)
238 qbman_cinh_write(&p
->sys
, QBMAN_CINH_SWP_IIR
, inhibit
? 0xffffffff : 0);
241 /***********************/
242 /* Management commands */
243 /***********************/
246 * Internal code common to all types of management commands.
249 void *qbman_swp_mc_start(struct qbman_swp
*p
)
252 #ifdef QBMAN_CHECKING
253 QBMAN_BUG_ON(p
->mc
.check
!= swp_mc_can_start
);
255 ret
= qbman_cena_write_start(&p
->sys
, QBMAN_CENA_SWP_CR
);
256 #ifdef QBMAN_CHECKING
258 p
->mc
.check
= swp_mc_can_submit
;
263 void qbman_swp_mc_submit(struct qbman_swp
*p
, void *cmd
, uint32_t cmd_verb
)
266 #ifdef QBMAN_CHECKING
267 QBMAN_BUG_ON(!(p
->mc
.check
!= swp_mc_can_submit
));
269 /* TBD: "|=" is going to hurt performance. Need to move as many fields
270 * out of word zero, and for those that remain, the "OR" needs to occur
271 * at the caller side. This debug check helps to catch cases where the
272 * caller wants to OR but has forgotten to do so.
274 QBMAN_BUG_ON((*v
& cmd_verb
) != *v
);
275 *v
= cmd_verb
| p
->mc
.valid_bit
;
276 qbman_cena_write_complete(&p
->sys
, QBMAN_CENA_SWP_CR
, cmd
);
277 #ifdef QBMAN_CHECKING
278 p
->mc
.check
= swp_mc_can_poll
;
282 void *qbman_swp_mc_result(struct qbman_swp
*p
)
285 #ifdef QBMAN_CHECKING
286 QBMAN_BUG_ON(p
->mc
.check
!= swp_mc_can_poll
);
288 qbman_cena_invalidate_prefetch(&p
->sys
,
289 QBMAN_CENA_SWP_RR(p
->mc
.valid_bit
));
290 ret
= qbman_cena_read(&p
->sys
, QBMAN_CENA_SWP_RR(p
->mc
.valid_bit
));
291 /* Remove the valid-bit - command completed iff the rest is non-zero */
292 verb
= ret
[0] & ~QB_VALID_BIT
;
295 #ifdef QBMAN_CHECKING
296 p
->mc
.check
= swp_mc_can_start
;
298 p
->mc
.valid_bit
^= QB_VALID_BIT
;
306 /* These should be const, eventually */
307 static struct qb_attr_code code_eq_cmd
= QB_CODE(0, 0, 2);
308 static struct qb_attr_code code_eq_eqdi
= QB_CODE(0, 3, 1);
309 static struct qb_attr_code code_eq_dca_en
= QB_CODE(0, 15, 1);
310 static struct qb_attr_code code_eq_dca_pk
= QB_CODE(0, 14, 1);
311 /* Can't set code_eq_dca_idx width. Need qman version. Read at runtime */
312 static struct qb_attr_code code_eq_orp_en
= QB_CODE(0, 2, 1);
313 static struct qb_attr_code code_eq_orp_is_nesn
= QB_CODE(0, 31, 1);
314 static struct qb_attr_code code_eq_orp_nlis
= QB_CODE(0, 30, 1);
315 static struct qb_attr_code code_eq_orp_seqnum
= QB_CODE(0, 16, 14);
316 static struct qb_attr_code code_eq_opr_id
= QB_CODE(1, 0, 16);
317 static struct qb_attr_code code_eq_tgt_id
= QB_CODE(2, 0, 24);
318 /* static struct qb_attr_code code_eq_tag = QB_CODE(3, 0, 32); */
319 static struct qb_attr_code code_eq_qd_en
= QB_CODE(0, 4, 1);
320 static struct qb_attr_code code_eq_qd_bin
= QB_CODE(4, 0, 16);
321 static struct qb_attr_code code_eq_qd_pri
= QB_CODE(4, 16, 4);
322 static struct qb_attr_code code_eq_rsp_stash
= QB_CODE(5, 16, 1);
323 static struct qb_attr_code code_eq_rsp_id
= QB_CODE(5, 24, 8);
324 static struct qb_attr_code code_eq_rsp_lo
= QB_CODE(6, 0, 32);
326 enum qbman_eq_cmd_e
{
327 /* No enqueue, primarily for plugging ORP gaps for dropped frames */
329 /* DMA an enqueue response once complete */
330 qbman_eq_cmd_respond
,
331 /* DMA an enqueue response only if the enqueue fails */
332 qbman_eq_cmd_respond_reject
335 void qbman_eq_desc_clear(struct qbman_eq_desc
*d
)
337 memset(d
, 0, sizeof(*d
));
340 void qbman_eq_desc_set_no_orp(struct qbman_eq_desc
*d
, int respond_success
)
342 uint32_t *cl
= qb_cl(d
);
344 qb_attr_code_encode(&code_eq_orp_en
, cl
, 0);
345 qb_attr_code_encode(&code_eq_cmd
, cl
,
346 respond_success
? qbman_eq_cmd_respond
:
347 qbman_eq_cmd_respond_reject
);
350 void qbman_eq_desc_set_orp(struct qbman_eq_desc
*d
, int respond_success
,
351 uint32_t opr_id
, uint32_t seqnum
, int incomplete
)
353 uint32_t *cl
= qb_cl(d
);
355 qb_attr_code_encode(&code_eq_orp_en
, cl
, 1);
356 qb_attr_code_encode(&code_eq_cmd
, cl
,
357 respond_success
? qbman_eq_cmd_respond
:
358 qbman_eq_cmd_respond_reject
);
359 qb_attr_code_encode(&code_eq_opr_id
, cl
, opr_id
);
360 qb_attr_code_encode(&code_eq_orp_seqnum
, cl
, seqnum
);
361 qb_attr_code_encode(&code_eq_orp_nlis
, cl
, !!incomplete
);
364 void qbman_eq_desc_set_orp_hole(struct qbman_eq_desc
*d
, uint32_t opr_id
,
367 uint32_t *cl
= qb_cl(d
);
369 qb_attr_code_encode(&code_eq_orp_en
, cl
, 1);
370 qb_attr_code_encode(&code_eq_cmd
, cl
, qbman_eq_cmd_empty
);
371 qb_attr_code_encode(&code_eq_opr_id
, cl
, opr_id
);
372 qb_attr_code_encode(&code_eq_orp_seqnum
, cl
, seqnum
);
373 qb_attr_code_encode(&code_eq_orp_nlis
, cl
, 0);
374 qb_attr_code_encode(&code_eq_orp_is_nesn
, cl
, 0);
377 void qbman_eq_desc_set_orp_nesn(struct qbman_eq_desc
*d
, uint32_t opr_id
,
380 uint32_t *cl
= qb_cl(d
);
382 qb_attr_code_encode(&code_eq_orp_en
, cl
, 1);
383 qb_attr_code_encode(&code_eq_cmd
, cl
, qbman_eq_cmd_empty
);
384 qb_attr_code_encode(&code_eq_opr_id
, cl
, opr_id
);
385 qb_attr_code_encode(&code_eq_orp_seqnum
, cl
, seqnum
);
386 qb_attr_code_encode(&code_eq_orp_nlis
, cl
, 0);
387 qb_attr_code_encode(&code_eq_orp_is_nesn
, cl
, 1);
390 void qbman_eq_desc_set_response(struct qbman_eq_desc
*d
,
391 dma_addr_t storage_phys
,
394 uint32_t *cl
= qb_cl(d
);
396 qb_attr_code_encode_64(&code_eq_rsp_lo
, (uint64_t *)cl
, storage_phys
);
397 qb_attr_code_encode(&code_eq_rsp_stash
, cl
, !!stash
);
400 void qbman_eq_desc_set_token(struct qbman_eq_desc
*d
, uint8_t token
)
402 uint32_t *cl
= qb_cl(d
);
404 qb_attr_code_encode(&code_eq_rsp_id
, cl
, (uint32_t)token
);
407 void qbman_eq_desc_set_fq(struct qbman_eq_desc
*d
, uint32_t fqid
)
409 uint32_t *cl
= qb_cl(d
);
411 qb_attr_code_encode(&code_eq_qd_en
, cl
, 0);
412 qb_attr_code_encode(&code_eq_tgt_id
, cl
, fqid
);
415 void qbman_eq_desc_set_qd(struct qbman_eq_desc
*d
, uint32_t qdid
,
416 uint32_t qd_bin
, uint32_t qd_prio
)
418 uint32_t *cl
= qb_cl(d
);
420 qb_attr_code_encode(&code_eq_qd_en
, cl
, 1);
421 qb_attr_code_encode(&code_eq_tgt_id
, cl
, qdid
);
422 qb_attr_code_encode(&code_eq_qd_bin
, cl
, qd_bin
);
423 qb_attr_code_encode(&code_eq_qd_pri
, cl
, qd_prio
);
426 void qbman_eq_desc_set_eqdi(struct qbman_eq_desc
*d
, int enable
)
428 uint32_t *cl
= qb_cl(d
);
430 qb_attr_code_encode(&code_eq_eqdi
, cl
, !!enable
);
433 void qbman_eq_desc_set_dca(struct qbman_eq_desc
*d
, int enable
,
434 uint32_t dqrr_idx
, int park
)
436 uint32_t *cl
= qb_cl(d
);
438 qb_attr_code_encode(&code_eq_dca_en
, cl
, !!enable
);
440 qb_attr_code_encode(&code_eq_dca_pk
, cl
, !!park
);
441 qb_attr_code_encode(&code_eq_dca_idx
, cl
, dqrr_idx
);
445 #define EQAR_IDX(eqar) ((eqar) & 0x7)
446 #define EQAR_VB(eqar) ((eqar) & 0x80)
447 #define EQAR_SUCCESS(eqar) ((eqar) & 0x100)
448 static int qbman_swp_enqueue_array_mode(struct qbman_swp
*s
,
449 const struct qbman_eq_desc
*d
,
450 const struct qbman_fd
*fd
)
453 const uint32_t *cl
= qb_cl(d
);
454 uint32_t eqar
= qbman_cinh_read(&s
->sys
, QBMAN_CINH_SWP_EQAR
);
456 pr_debug("EQAR=%08x\n", eqar
);
457 if (!EQAR_SUCCESS(eqar
))
459 p
= qbman_cena_write_start_wo_shadow(&s
->sys
,
460 QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar
)));
461 word_copy(&p
[1], &cl
[1], 7);
462 word_copy(&p
[8], fd
, sizeof(*fd
) >> 2);
463 /* Set the verb byte, have to substitute in the valid-bit */
465 p
[0] = cl
[0] | EQAR_VB(eqar
);
466 qbman_cena_write_complete_wo_shadow(&s
->sys
,
467 QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar
)));
471 static int qbman_swp_enqueue_ring_mode(struct qbman_swp
*s
,
472 const struct qbman_eq_desc
*d
,
473 const struct qbman_fd
*fd
)
476 const uint32_t *cl
= qb_cl(d
);
480 if (!s
->eqcr
.available
) {
481 eqcr_ci
= s
->eqcr
.ci
;
482 s
->eqcr
.ci
= qbman_cena_read_reg(&s
->sys
,
483 QBMAN_CENA_SWP_EQCR_CI
) & 0xF;
484 diff
= qm_cyc_diff(QBMAN_EQCR_SIZE
,
485 eqcr_ci
, s
->eqcr
.ci
);
486 s
->eqcr
.available
+= diff
;
491 p
= qbman_cena_write_start_wo_shadow(&s
->sys
,
492 QBMAN_CENA_SWP_EQCR(s
->eqcr
.pi
& 7));
493 word_copy(&p
[1], &cl
[1], 7);
494 word_copy(&p
[8], fd
, sizeof(*fd
) >> 2);
496 /* Set the verb byte, have to substitute in the valid-bit */
497 p
[0] = cl
[0] | s
->eqcr
.pi_vb
;
498 qbman_cena_write_complete_wo_shadow(&s
->sys
,
499 QBMAN_CENA_SWP_EQCR(s
->eqcr
.pi
& 7));
503 if (!(s
->eqcr
.pi
& 7))
504 s
->eqcr
.pi_vb
^= QB_VALID_BIT
;
508 int qbman_swp_fill_ring(struct qbman_swp
*s
,
509 const struct qbman_eq_desc
*d
,
510 const struct qbman_fd
*fd
,
511 __attribute__((unused
)) uint8_t burst_index
)
514 const uint32_t *cl
= qb_cl(d
);
518 if (!s
->eqcr
.available
) {
519 eqcr_ci
= s
->eqcr
.ci
;
520 s
->eqcr
.ci
= qbman_cena_read_reg(&s
->sys
,
521 QBMAN_CENA_SWP_EQCR_CI
) & 0xF;
522 diff
= qm_cyc_diff(QBMAN_EQCR_SIZE
,
523 eqcr_ci
, s
->eqcr
.ci
);
524 s
->eqcr
.available
+= diff
;
528 p
= qbman_cena_write_start_wo_shadow(&s
->sys
,
529 QBMAN_CENA_SWP_EQCR((s
->eqcr
.pi
/* +burst_index */) & 7));
530 /* word_copy(&p[1], &cl[1], 7); */
531 memcpy(&p
[1], &cl
[1], 7 * 4);
532 /* word_copy(&p[8], fd, sizeof(*fd) >> 2); */
533 memcpy(&p
[8], fd
, sizeof(struct qbman_fd
));
536 p
[0] = cl
[0] | s
->eqcr
.pi_vb
;
541 if (!(s
->eqcr
.pi
& 7))
542 s
->eqcr
.pi_vb
^= QB_VALID_BIT
;
547 int qbman_swp_flush_ring(struct qbman_swp
*s
)
549 void *ptr
= s
->sys
.addr_cena
;
552 dcbf((uint64_t)ptr
+ 0x40);
553 dcbf((uint64_t)ptr
+ 0x80);
554 dcbf((uint64_t)ptr
+ 0xc0);
555 dcbf((uint64_t)ptr
+ 0x100);
556 dcbf((uint64_t)ptr
+ 0x140);
557 dcbf((uint64_t)ptr
+ 0x180);
558 dcbf((uint64_t)ptr
+ 0x1c0);
563 void qbman_sync(void)
568 int qbman_swp_enqueue(struct qbman_swp
*s
, const struct qbman_eq_desc
*d
,
569 const struct qbman_fd
*fd
)
571 if (s
->sys
.eqcr_mode
== qman_eqcr_vb_array
)
572 return qbman_swp_enqueue_array_mode(s
, d
, fd
);
573 else /* Use ring mode by default */
574 return qbman_swp_enqueue_ring_mode(s
, d
, fd
);
577 /*************************/
578 /* Static (push) dequeue */
579 /*************************/
581 void qbman_swp_push_get(struct qbman_swp
*s
, uint8_t channel_idx
, int *enabled
)
583 struct qb_attr_code code
= CODE_SDQCR_DQSRC(channel_idx
);
585 QBMAN_BUG_ON(channel_idx
> 15);
586 *enabled
= (int)qb_attr_code_decode(&code
, &s
->sdq
);
589 void qbman_swp_push_set(struct qbman_swp
*s
, uint8_t channel_idx
, int enable
)
592 struct qb_attr_code code
= CODE_SDQCR_DQSRC(channel_idx
);
594 QBMAN_BUG_ON(channel_idx
> 15);
595 qb_attr_code_encode(&code
, &s
->sdq
, !!enable
);
596 /* Read make the complete src map. If no channels are enabled
597 * the SDQCR must be 0 or else QMan will assert errors
599 dqsrc
= (uint16_t)qb_attr_code_decode(&code_sdqcr_dqsrc
, &s
->sdq
);
601 qbman_cinh_write(&s
->sys
, QBMAN_CINH_SWP_SDQCR
, s
->sdq
);
603 qbman_cinh_write(&s
->sys
, QBMAN_CINH_SWP_SDQCR
, 0);
606 /***************************/
607 /* Volatile (pull) dequeue */
608 /***************************/
610 /* These should be const, eventually */
611 static struct qb_attr_code code_pull_dct
= QB_CODE(0, 0, 2);
612 static struct qb_attr_code code_pull_dt
= QB_CODE(0, 2, 2);
613 static struct qb_attr_code code_pull_rls
= QB_CODE(0, 4, 1);
614 static struct qb_attr_code code_pull_stash
= QB_CODE(0, 5, 1);
615 static struct qb_attr_code code_pull_numframes
= QB_CODE(0, 8, 4);
616 static struct qb_attr_code code_pull_token
= QB_CODE(0, 16, 8);
617 static struct qb_attr_code code_pull_dqsource
= QB_CODE(1, 0, 24);
618 static struct qb_attr_code code_pull_rsp_lo
= QB_CODE(2, 0, 32);
622 qb_pull_dt_workqueue
,
623 qb_pull_dt_framequeue
626 void qbman_pull_desc_clear(struct qbman_pull_desc
*d
)
628 memset(d
, 0, sizeof(*d
));
631 void qbman_pull_desc_set_storage(struct qbman_pull_desc
*d
,
632 struct qbman_result
*storage
,
633 dma_addr_t storage_phys
,
636 uint32_t *cl
= qb_cl(d
);
637 /* Squiggle the pointer 'storage' into the extra 2 words of the
638 * descriptor (which aren't copied to the hw command)
640 *(void **)&cl
[4] = storage
;
642 qb_attr_code_encode(&code_pull_rls
, cl
, 0);
645 qb_attr_code_encode(&code_pull_rls
, cl
, 1);
646 qb_attr_code_encode(&code_pull_stash
, cl
, !!stash
);
647 qb_attr_code_encode_64(&code_pull_rsp_lo
, (uint64_t *)cl
, storage_phys
);
650 void qbman_pull_desc_set_numframes(struct qbman_pull_desc
*d
, uint8_t numframes
)
652 uint32_t *cl
= qb_cl(d
);
654 QBMAN_BUG_ON(!numframes
|| (numframes
> 16));
655 qb_attr_code_encode(&code_pull_numframes
, cl
,
656 (uint32_t)(numframes
- 1));
659 void qbman_pull_desc_set_token(struct qbman_pull_desc
*d
, uint8_t token
)
661 uint32_t *cl
= qb_cl(d
);
663 qb_attr_code_encode(&code_pull_token
, cl
, token
);
666 void qbman_pull_desc_set_fq(struct qbman_pull_desc
*d
, uint32_t fqid
)
668 uint32_t *cl
= qb_cl(d
);
670 qb_attr_code_encode(&code_pull_dct
, cl
, 1);
671 qb_attr_code_encode(&code_pull_dt
, cl
, qb_pull_dt_framequeue
);
672 qb_attr_code_encode(&code_pull_dqsource
, cl
, fqid
);
675 void qbman_pull_desc_set_wq(struct qbman_pull_desc
*d
, uint32_t wqid
,
676 enum qbman_pull_type_e dct
)
678 uint32_t *cl
= qb_cl(d
);
680 qb_attr_code_encode(&code_pull_dct
, cl
, dct
);
681 qb_attr_code_encode(&code_pull_dt
, cl
, qb_pull_dt_workqueue
);
682 qb_attr_code_encode(&code_pull_dqsource
, cl
, wqid
);
685 void qbman_pull_desc_set_channel(struct qbman_pull_desc
*d
, uint32_t chid
,
686 enum qbman_pull_type_e dct
)
688 uint32_t *cl
= qb_cl(d
);
690 qb_attr_code_encode(&code_pull_dct
, cl
, dct
);
691 qb_attr_code_encode(&code_pull_dt
, cl
, qb_pull_dt_channel
);
692 qb_attr_code_encode(&code_pull_dqsource
, cl
, chid
);
695 int qbman_swp_pull(struct qbman_swp
*s
, struct qbman_pull_desc
*d
)
698 uint32_t *cl
= qb_cl(d
);
700 if (!atomic_dec_and_test(&s
->vdq
.busy
)) {
701 atomic_inc(&s
->vdq
.busy
);
704 s
->vdq
.storage
= *(void **)&cl
[4];
705 /* We use portal index +1 as token so that 0 still indicates
706 * that the result isn't valid yet.
708 qb_attr_code_encode(&code_pull_token
, cl
, s
->desc
.idx
+ 1);
709 p
= qbman_cena_write_start_wo_shadow(&s
->sys
, QBMAN_CENA_SWP_VDQCR
);
710 word_copy(&p
[1], &cl
[1], 3);
711 /* Set the verb byte, have to substitute in the valid-bit */
713 p
[0] = cl
[0] | s
->vdq
.valid_bit
;
714 s
->vdq
.valid_bit
^= QB_VALID_BIT
;
715 qbman_cena_write_complete_wo_shadow(&s
->sys
, QBMAN_CENA_SWP_VDQCR
);
723 static struct qb_attr_code code_dqrr_verb
= QB_CODE(0, 0, 8);
724 static struct qb_attr_code code_dqrr_response
= QB_CODE(0, 0, 7);
725 static struct qb_attr_code code_dqrr_stat
= QB_CODE(0, 8, 8);
726 static struct qb_attr_code code_dqrr_seqnum
= QB_CODE(0, 16, 14);
727 static struct qb_attr_code code_dqrr_odpid
= QB_CODE(1, 0, 16);
728 /* static struct qb_attr_code code_dqrr_tok = QB_CODE(1, 24, 8); */
729 static struct qb_attr_code code_dqrr_fqid
= QB_CODE(2, 0, 24);
730 static struct qb_attr_code code_dqrr_byte_count
= QB_CODE(4, 0, 32);
731 static struct qb_attr_code code_dqrr_frame_count
= QB_CODE(5, 0, 24);
732 static struct qb_attr_code code_dqrr_ctx_lo
= QB_CODE(6, 0, 32);
734 #define QBMAN_RESULT_DQ 0x60
735 #define QBMAN_RESULT_FQRN 0x21
736 #define QBMAN_RESULT_FQRNI 0x22
737 #define QBMAN_RESULT_FQPN 0x24
738 #define QBMAN_RESULT_FQDAN 0x25
739 #define QBMAN_RESULT_CDAN 0x26
740 #define QBMAN_RESULT_CSCN_MEM 0x27
741 #define QBMAN_RESULT_CGCU 0x28
742 #define QBMAN_RESULT_BPSCN 0x29
743 #define QBMAN_RESULT_CSCN_WQ 0x2a
745 static struct qb_attr_code code_dqpi_pi
= QB_CODE(0, 0, 4);
747 /* NULL return if there are no unconsumed DQRR entries. Returns a DQRR entry
748 * only once, so repeated calls can return a sequence of DQRR entries, without
749 * requiring they be consumed immediately or in any particular order.
751 const struct qbman_result
*qbman_swp_dqrr_next(struct qbman_swp
*s
)
754 uint32_t response_verb
;
756 const struct qbman_result
*dq
;
759 /* Before using valid-bit to detect if something is there, we have to
760 * handle the case of the DQRR reset bug...
762 if (unlikely(s
->dqrr
.reset_bug
)) {
763 /* We pick up new entries by cache-inhibited producer index,
764 * which means that a non-coherent mapping would require us to
765 * invalidate and read *only* once that PI has indicated that
766 * there's an entry here. The first trip around the DQRR ring
767 * will be much less efficient than all subsequent trips around
770 uint32_t dqpi
= qbman_cinh_read(&s
->sys
, QBMAN_CINH_SWP_DQPI
);
771 uint32_t pi
= qb_attr_code_decode(&code_dqpi_pi
, &dqpi
);
772 /* there are new entries iff pi != next_idx */
773 if (pi
== s
->dqrr
.next_idx
)
775 /* if next_idx is/was the last ring index, and 'pi' is
776 * different, we can disable the workaround as all the ring
777 * entries have now been DMA'd to so valid-bit checking is
778 * repaired. Note: this logic needs to be based on next_idx
779 * (which increments one at a time), rather than on pi (which
780 * can burst and wrap-around between our snapshots of it).
782 QBMAN_BUG_ON((s
->dqrr
.dqrr_size
- 1) < 0);
783 if (s
->dqrr
.next_idx
== (s
->dqrr
.dqrr_size
- 1u)) {
784 pr_debug("DEBUG: next_idx=%d, pi=%d, clear reset bug\n",
785 s
->dqrr
.next_idx
, pi
);
786 s
->dqrr
.reset_bug
= 0;
788 qbman_cena_invalidate_prefetch(&s
->sys
,
789 QBMAN_CENA_SWP_DQRR(s
->dqrr
.next_idx
));
791 dq
= qbman_cena_read_wo_shadow(&s
->sys
,
792 QBMAN_CENA_SWP_DQRR(s
->dqrr
.next_idx
));
794 verb
= qb_attr_code_decode(&code_dqrr_verb
, p
);
795 /* If the valid-bit isn't of the expected polarity, nothing there. Note,
796 * in the DQRR reset bug workaround, we shouldn't need to skip these
797 * check, because we've already determined that a new entry is available
798 * and we've invalidated the cacheline before reading it, so the
799 * valid-bit behaviour is repaired and should tell us what we already
800 * knew from reading PI.
802 if ((verb
& QB_VALID_BIT
) != s
->dqrr
.valid_bit
)
805 /* There's something there. Move "next_idx" attention to the next ring
806 * entry (and prefetch it) before returning what we found.
809 if (s
->dqrr
.next_idx
== s
->dqrr
.dqrr_size
) {
810 s
->dqrr
.next_idx
= 0;
811 s
->dqrr
.valid_bit
^= QB_VALID_BIT
;
813 /* If this is the final response to a volatile dequeue command
814 * indicate that the vdq is no longer busy.
816 flags
= qbman_result_DQ_flags(dq
);
817 response_verb
= qb_attr_code_decode(&code_dqrr_response
, &verb
);
818 if ((response_verb
== QBMAN_RESULT_DQ
) &&
819 (flags
& QBMAN_DQ_STAT_VOLATILE
) &&
820 (flags
& QBMAN_DQ_STAT_EXPIRED
))
821 atomic_inc(&s
->vdq
.busy
);
826 /* Consume DQRR entries previously returned from qbman_swp_dqrr_next(). */
827 void qbman_swp_dqrr_consume(struct qbman_swp
*s
,
828 const struct qbman_result
*dq
)
830 qbman_cinh_write(&s
->sys
, QBMAN_CINH_SWP_DCAP
, QBMAN_IDX_FROM_DQRR(dq
));
833 /*********************************/
834 /* Polling user-provided storage */
835 /*********************************/
837 int qbman_result_has_new_result(__attribute__((unused
)) struct qbman_swp
*s
,
838 const struct qbman_result
*dq
)
840 /* To avoid converting the little-endian DQ entry to host-endian prior
841 * to us knowing whether there is a valid entry or not (and run the
842 * risk of corrupting the incoming hardware LE write), we detect in
843 * hardware endianness rather than host. This means we need a different
844 * "code" depending on whether we are BE or LE in software, which is
845 * where DQRR_TOK_OFFSET comes in...
847 static struct qb_attr_code code_dqrr_tok_detect
=
848 QB_CODE(0, DQRR_TOK_OFFSET
, 8);
849 /* The user trying to poll for a result treats "dq" as const. It is
850 * however the same address that was provided to us non-const in the
851 * first place, for directing hardware DMA to. So we can cast away the
852 * const because it is mutable from our perspective.
854 uint32_t *p
= (uint32_t *)(unsigned long)qb_cl(dq
);
857 token
= qb_attr_code_decode(&code_dqrr_tok_detect
, &p
[1]);
860 /* Entry is valid - overwrite token back to 0 so
861 * a) If this memory is reused tokesn will be 0
862 * b) If someone calls "has_new_result()" again on this entry it
863 * will not appear to be new
865 qb_attr_code_encode(&code_dqrr_tok_detect
, &p
[1], 0);
867 /* Only now do we convert from hardware to host endianness. Also, as we
868 * are returning success, the user has promised not to call us again, so
869 * there's no risk of us converting the endianness twice...
875 int qbman_check_command_complete(struct qbman_swp
*s
,
876 const struct qbman_result
*dq
)
878 /* To avoid converting the little-endian DQ entry to host-endian prior
879 * to us knowing whether there is a valid entry or not (and run the
880 * risk of corrupting the incoming hardware LE write), we detect in
881 * hardware endianness rather than host. This means we need a different
882 * "code" depending on whether we are BE or LE in software, which is
883 * where DQRR_TOK_OFFSET comes in...
885 static struct qb_attr_code code_dqrr_tok_detect
=
886 QB_CODE(0, DQRR_TOK_OFFSET
, 8);
887 /* The user trying to poll for a result treats "dq" as const. It is
888 * however the same address that was provided to us non-const in the
889 * first place, for directing hardware DMA to. So we can cast away the
890 * const because it is mutable from our perspective.
892 uint32_t *p
= (uint32_t *)(unsigned long)qb_cl(dq
);
895 token
= qb_attr_code_decode(&code_dqrr_tok_detect
, &p
[1]);
898 /* TODO: Remove qbman_swp from parameters and make it a local
899 * once we've tested the reserve portal map change
901 s
= portal_idx_map
[token
- 1];
902 /* When token is set it indicates that VDQ command has been fetched
903 * by qbman and is working on it. It is safe for software to issue
904 * another VDQ command, so incrementing the busy variable.
906 if (s
->vdq
.storage
== dq
) {
907 s
->vdq
.storage
= NULL
;
908 atomic_inc(&s
->vdq
.busy
);
913 /********************************/
914 /* Categorising qbman results */
915 /********************************/
917 static struct qb_attr_code code_result_in_mem
=
918 QB_CODE(0, QBMAN_RESULT_VERB_OFFSET_IN_MEM
, 7);
920 static inline int __qbman_result_is_x(const struct qbman_result
*dq
,
923 const uint32_t *p
= qb_cl(dq
);
924 uint32_t response_verb
= qb_attr_code_decode(&code_dqrr_response
, p
);
926 return (response_verb
== x
);
929 static inline int __qbman_result_is_x_in_mem(const struct qbman_result
*dq
,
932 const uint32_t *p
= qb_cl(dq
);
933 uint32_t response_verb
= qb_attr_code_decode(&code_result_in_mem
, p
);
935 return (response_verb
== x
);
938 int qbman_result_is_DQ(const struct qbman_result
*dq
)
940 return __qbman_result_is_x(dq
, QBMAN_RESULT_DQ
);
943 int qbman_result_is_FQDAN(const struct qbman_result
*dq
)
945 return __qbman_result_is_x(dq
, QBMAN_RESULT_FQDAN
);
948 int qbman_result_is_CDAN(const struct qbman_result
*dq
)
950 return __qbman_result_is_x(dq
, QBMAN_RESULT_CDAN
);
953 int qbman_result_is_CSCN(const struct qbman_result
*dq
)
955 return __qbman_result_is_x_in_mem(dq
, QBMAN_RESULT_CSCN_MEM
) ||
956 __qbman_result_is_x(dq
, QBMAN_RESULT_CSCN_WQ
);
959 int qbman_result_is_BPSCN(const struct qbman_result
*dq
)
961 return __qbman_result_is_x_in_mem(dq
, QBMAN_RESULT_BPSCN
);
964 int qbman_result_is_CGCU(const struct qbman_result
*dq
)
966 return __qbman_result_is_x_in_mem(dq
, QBMAN_RESULT_CGCU
);
969 int qbman_result_is_FQRN(const struct qbman_result
*dq
)
971 return __qbman_result_is_x_in_mem(dq
, QBMAN_RESULT_FQRN
);
974 int qbman_result_is_FQRNI(const struct qbman_result
*dq
)
976 return __qbman_result_is_x_in_mem(dq
, QBMAN_RESULT_FQRNI
);
979 int qbman_result_is_FQPN(const struct qbman_result
*dq
)
981 return __qbman_result_is_x(dq
, QBMAN_RESULT_FQPN
);
984 /*********************************/
985 /* Parsing frame dequeue results */
986 /*********************************/
988 /* These APIs assume qbman_result_is_DQ() is TRUE */
990 uint32_t qbman_result_DQ_flags(const struct qbman_result
*dq
)
992 const uint32_t *p
= qb_cl(dq
);
994 return qb_attr_code_decode(&code_dqrr_stat
, p
);
997 uint16_t qbman_result_DQ_seqnum(const struct qbman_result
*dq
)
999 const uint32_t *p
= qb_cl(dq
);
1001 return (uint16_t)qb_attr_code_decode(&code_dqrr_seqnum
, p
);
1004 uint16_t qbman_result_DQ_odpid(const struct qbman_result
*dq
)
1006 const uint32_t *p
= qb_cl(dq
);
1008 return (uint16_t)qb_attr_code_decode(&code_dqrr_odpid
, p
);
1011 uint32_t qbman_result_DQ_fqid(const struct qbman_result
*dq
)
1013 const uint32_t *p
= qb_cl(dq
);
1015 return qb_attr_code_decode(&code_dqrr_fqid
, p
);
1018 uint32_t qbman_result_DQ_byte_count(const struct qbman_result
*dq
)
1020 const uint32_t *p
= qb_cl(dq
);
1022 return qb_attr_code_decode(&code_dqrr_byte_count
, p
);
1025 uint32_t qbman_result_DQ_frame_count(const struct qbman_result
*dq
)
1027 const uint32_t *p
= qb_cl(dq
);
1029 return qb_attr_code_decode(&code_dqrr_frame_count
, p
);
1032 uint64_t qbman_result_DQ_fqd_ctx(const struct qbman_result
*dq
)
1034 const uint64_t *p
= (const uint64_t *)qb_cl(dq
);
1036 return qb_attr_code_decode_64(&code_dqrr_ctx_lo
, p
);
1039 const struct qbman_fd
*qbman_result_DQ_fd(const struct qbman_result
*dq
)
1041 const uint32_t *p
= qb_cl(dq
);
1043 return (const struct qbman_fd
*)&p
[8];
1046 /**************************************/
1047 /* Parsing state-change notifications */
1048 /**************************************/
1050 static struct qb_attr_code code_scn_state
= QB_CODE(0, 16, 8);
1051 static struct qb_attr_code code_scn_rid
= QB_CODE(1, 0, 24);
1052 static struct qb_attr_code code_scn_state_in_mem
=
1053 QB_CODE(0, SCN_STATE_OFFSET_IN_MEM
, 8);
1054 static struct qb_attr_code code_scn_rid_in_mem
=
1055 QB_CODE(1, SCN_RID_OFFSET_IN_MEM
, 24);
1056 static struct qb_attr_code code_scn_ctx_lo
= QB_CODE(2, 0, 32);
1058 uint8_t qbman_result_SCN_state(const struct qbman_result
*scn
)
1060 const uint32_t *p
= qb_cl(scn
);
1062 return (uint8_t)qb_attr_code_decode(&code_scn_state
, p
);
1065 uint32_t qbman_result_SCN_rid(const struct qbman_result
*scn
)
1067 const uint32_t *p
= qb_cl(scn
);
1069 return qb_attr_code_decode(&code_scn_rid
, p
);
1072 uint64_t qbman_result_SCN_ctx(const struct qbman_result
*scn
)
1074 const uint64_t *p
= (const uint64_t *)qb_cl(scn
);
1076 return qb_attr_code_decode_64(&code_scn_ctx_lo
, p
);
1079 uint8_t qbman_result_SCN_state_in_mem(const struct qbman_result
*scn
)
1081 const uint32_t *p
= qb_cl(scn
);
1083 return (uint8_t)qb_attr_code_decode(&code_scn_state_in_mem
, p
);
1086 uint32_t qbman_result_SCN_rid_in_mem(const struct qbman_result
*scn
)
1088 const uint32_t *p
= qb_cl(scn
);
1089 uint32_t result_rid
;
1091 result_rid
= qb_attr_code_decode(&code_scn_rid_in_mem
, p
);
1092 return make_le24(result_rid
);
1098 uint16_t qbman_result_bpscn_bpid(const struct qbman_result
*scn
)
1100 return (uint16_t)qbman_result_SCN_rid_in_mem(scn
) & 0x3FFF;
1103 int qbman_result_bpscn_has_free_bufs(const struct qbman_result
*scn
)
1105 return !(int)(qbman_result_SCN_state_in_mem(scn
) & 0x1);
1108 int qbman_result_bpscn_is_depleted(const struct qbman_result
*scn
)
1110 return (int)(qbman_result_SCN_state_in_mem(scn
) & 0x2);
1113 int qbman_result_bpscn_is_surplus(const struct qbman_result
*scn
)
1115 return (int)(qbman_result_SCN_state_in_mem(scn
) & 0x4);
1118 uint64_t qbman_result_bpscn_ctx(const struct qbman_result
*scn
)
1121 uint32_t ctx_hi
, ctx_lo
;
1123 ctx
= qbman_result_SCN_ctx(scn
);
1124 ctx_hi
= upper32(ctx
);
1125 ctx_lo
= lower32(ctx
);
1126 return ((uint64_t)make_le32(ctx_hi
) << 32 |
1127 (uint64_t)make_le32(ctx_lo
));
1133 uint16_t qbman_result_cgcu_cgid(const struct qbman_result
*scn
)
1135 return (uint16_t)qbman_result_SCN_rid_in_mem(scn
) & 0xFFFF;
1138 uint64_t qbman_result_cgcu_icnt(const struct qbman_result
*scn
)
1141 uint32_t ctx_hi
, ctx_lo
;
1143 ctx
= qbman_result_SCN_ctx(scn
);
1144 ctx_hi
= upper32(ctx
);
1145 ctx_lo
= lower32(ctx
);
1146 return ((uint64_t)(make_le32(ctx_hi
) & 0xFF) << 32) |
1147 (uint64_t)make_le32(ctx_lo
);
1150 /******************/
1151 /* Buffer release */
1152 /******************/
1154 /* These should be const, eventually */
1155 /* static struct qb_attr_code code_release_num = QB_CODE(0, 0, 3); */
1156 static struct qb_attr_code code_release_set_me
= QB_CODE(0, 5, 1);
1157 static struct qb_attr_code code_release_rcdi
= QB_CODE(0, 6, 1);
1158 static struct qb_attr_code code_release_bpid
= QB_CODE(0, 16, 16);
1160 void qbman_release_desc_clear(struct qbman_release_desc
*d
)
1164 memset(d
, 0, sizeof(*d
));
1166 qb_attr_code_encode(&code_release_set_me
, cl
, 1);
1169 void qbman_release_desc_set_bpid(struct qbman_release_desc
*d
, uint32_t bpid
)
1171 uint32_t *cl
= qb_cl(d
);
1173 qb_attr_code_encode(&code_release_bpid
, cl
, bpid
);
1176 void qbman_release_desc_set_rcdi(struct qbman_release_desc
*d
, int enable
)
1178 uint32_t *cl
= qb_cl(d
);
1180 qb_attr_code_encode(&code_release_rcdi
, cl
, !!enable
);
1183 #define RAR_IDX(rar) ((rar) & 0x7)
1184 #define RAR_VB(rar) ((rar) & 0x80)
1185 #define RAR_SUCCESS(rar) ((rar) & 0x100)
1187 int qbman_swp_release(struct qbman_swp
*s
, const struct qbman_release_desc
*d
,
1188 const uint64_t *buffers
, unsigned int num_buffers
)
1191 const uint32_t *cl
= qb_cl(d
);
1192 uint32_t rar
= qbman_cinh_read(&s
->sys
, QBMAN_CINH_SWP_RAR
);
1194 pr_debug("RAR=%08x\n", rar
);
1195 if (!RAR_SUCCESS(rar
))
1197 QBMAN_BUG_ON(!num_buffers
|| (num_buffers
> 7));
1198 /* Start the release command */
1199 p
= qbman_cena_write_start_wo_shadow(&s
->sys
,
1200 QBMAN_CENA_SWP_RCR(RAR_IDX(rar
)));
1201 /* Copy the caller's buffer pointers to the command */
1202 u64_to_le32_copy(&p
[2], buffers
, num_buffers
);
1203 /* Set the verb byte, have to substitute in the valid-bit and the number
1207 p
[0] = cl
[0] | RAR_VB(rar
) | num_buffers
;
1208 qbman_cena_write_complete_wo_shadow(&s
->sys
,
1209 QBMAN_CENA_SWP_RCR(RAR_IDX(rar
)));
1213 /*******************/
1214 /* Buffer acquires */
1215 /*******************/
1217 /* These should be const, eventually */
1218 static struct qb_attr_code code_acquire_bpid
= QB_CODE(0, 16, 16);
1219 static struct qb_attr_code code_acquire_num
= QB_CODE(1, 0, 3);
1220 static struct qb_attr_code code_acquire_r_num
= QB_CODE(1, 0, 3);
1222 int qbman_swp_acquire(struct qbman_swp
*s
, uint32_t bpid
, uint64_t *buffers
,
1223 unsigned int num_buffers
)
1228 QBMAN_BUG_ON(!num_buffers
|| (num_buffers
> 7));
1230 /* Start the management command */
1231 p
= qbman_swp_mc_start(s
);
1236 /* Encode the caller-provided attributes */
1237 qb_attr_code_encode(&code_acquire_bpid
, p
, bpid
);
1238 qb_attr_code_encode(&code_acquire_num
, p
, num_buffers
);
1240 /* Complete the management command */
1241 p
= qbman_swp_mc_complete(s
, p
, p
[0] | QBMAN_MC_ACQUIRE
);
1243 /* Decode the outcome */
1244 rslt
= qb_attr_code_decode(&code_generic_rslt
, p
);
1245 num
= qb_attr_code_decode(&code_acquire_r_num
, p
);
1246 QBMAN_BUG_ON(qb_attr_code_decode(&code_generic_verb
, p
) !=
1249 /* Determine success or failure */
1250 if (unlikely(rslt
!= QBMAN_MC_RSLT_OK
)) {
1251 pr_err("Acquire buffers from BPID 0x%x failed, code=0x%02x\n",
1255 QBMAN_BUG_ON(num
> num_buffers
);
1256 /* Copy the acquired buffers to the caller's array */
1257 u64_from_le32_copy(buffers
, &p
[2], num
);
1265 static struct qb_attr_code code_fqalt_fqid
= QB_CODE(1, 0, 32);
1267 static int qbman_swp_alt_fq_state(struct qbman_swp
*s
, uint32_t fqid
,
1268 uint8_t alt_fq_verb
)
1273 /* Start the management command */
1274 p
= qbman_swp_mc_start(s
);
1278 qb_attr_code_encode(&code_fqalt_fqid
, p
, fqid
);
1279 /* Complete the management command */
1280 p
= qbman_swp_mc_complete(s
, p
, p
[0] | alt_fq_verb
);
1282 /* Decode the outcome */
1283 rslt
= qb_attr_code_decode(&code_generic_rslt
, p
);
1284 QBMAN_BUG_ON(qb_attr_code_decode(&code_generic_verb
, p
) != alt_fq_verb
);
1286 /* Determine success or failure */
1287 if (unlikely(rslt
!= QBMAN_MC_RSLT_OK
)) {
1288 pr_err("ALT FQID %d failed: verb = 0x%08x, code = 0x%02x\n",
1289 fqid
, alt_fq_verb
, rslt
);
1296 int qbman_swp_fq_schedule(struct qbman_swp
*s
, uint32_t fqid
)
1298 return qbman_swp_alt_fq_state(s
, fqid
, QBMAN_FQ_SCHEDULE
);
1301 int qbman_swp_fq_force(struct qbman_swp
*s
, uint32_t fqid
)
1303 return qbman_swp_alt_fq_state(s
, fqid
, QBMAN_FQ_FORCE
);
1306 int qbman_swp_fq_xon(struct qbman_swp
*s
, uint32_t fqid
)
1308 return qbman_swp_alt_fq_state(s
, fqid
, QBMAN_FQ_XON
);
1311 int qbman_swp_fq_xoff(struct qbman_swp
*s
, uint32_t fqid
)
1313 return qbman_swp_alt_fq_state(s
, fqid
, QBMAN_FQ_XOFF
);
1316 /**********************/
1317 /* Channel management */
1318 /**********************/
1320 static struct qb_attr_code code_cdan_cid
= QB_CODE(0, 16, 12);
1321 static struct qb_attr_code code_cdan_we
= QB_CODE(1, 0, 8);
1322 static struct qb_attr_code code_cdan_en
= QB_CODE(1, 8, 1);
1323 static struct qb_attr_code code_cdan_ctx_lo
= QB_CODE(2, 0, 32);
1325 /* Hide "ICD" for now as we don't use it, don't set it, and don't test it, so it
1326 * would be irresponsible to expose it.
1328 #define CODE_CDAN_WE_EN 0x1
1329 #define CODE_CDAN_WE_CTX 0x4
1331 static int qbman_swp_CDAN_set(struct qbman_swp
*s
, uint16_t channelid
,
1332 uint8_t we_mask
, uint8_t cdan_en
,
1338 /* Start the management command */
1339 p
= qbman_swp_mc_start(s
);
1343 /* Encode the caller-provided attributes */
1344 qb_attr_code_encode(&code_cdan_cid
, p
, channelid
);
1345 qb_attr_code_encode(&code_cdan_we
, p
, we_mask
);
1346 qb_attr_code_encode(&code_cdan_en
, p
, cdan_en
);
1347 qb_attr_code_encode_64(&code_cdan_ctx_lo
, (uint64_t *)p
, ctx
);
1348 /* Complete the management command */
1349 p
= qbman_swp_mc_complete(s
, p
, p
[0] | QBMAN_WQCHAN_CONFIGURE
);
1351 /* Decode the outcome */
1352 rslt
= qb_attr_code_decode(&code_generic_rslt
, p
);
1353 QBMAN_BUG_ON(qb_attr_code_decode(&code_generic_verb
, p
)
1354 != QBMAN_WQCHAN_CONFIGURE
);
1356 /* Determine success or failure */
1357 if (unlikely(rslt
!= QBMAN_MC_RSLT_OK
)) {
1358 pr_err("CDAN cQID %d failed: code = 0x%02x\n",
1366 int qbman_swp_CDAN_set_context(struct qbman_swp
*s
, uint16_t channelid
,
1369 return qbman_swp_CDAN_set(s
, channelid
,
1374 int qbman_swp_CDAN_enable(struct qbman_swp
*s
, uint16_t channelid
)
1376 return qbman_swp_CDAN_set(s
, channelid
,
1381 int qbman_swp_CDAN_disable(struct qbman_swp
*s
, uint16_t channelid
)
1383 return qbman_swp_CDAN_set(s
, channelid
,
1388 int qbman_swp_CDAN_set_context_enable(struct qbman_swp
*s
, uint16_t channelid
,
1391 return qbman_swp_CDAN_set(s
, channelid
,
1392 CODE_CDAN_WE_EN
| CODE_CDAN_WE_CTX
,
1396 uint8_t qbman_get_dqrr_idx(struct qbman_result
*dqrr
)
1398 return QBMAN_IDX_FROM_DQRR(dqrr
);
1401 struct qbman_result
*qbman_get_dqrr_from_idx(struct qbman_swp
*s
, uint8_t idx
)
1403 struct qbman_result
*dq
;
1405 dq
= qbman_cena_read(&s
->sys
, QBMAN_CENA_SWP_DQRR(idx
));
1409 int qbman_swp_send_multiple(struct qbman_swp
*s
,
1410 const struct qbman_eq_desc
*d
,
1411 const struct qbman_fd
*fd
,
1415 const uint32_t *cl
= qb_cl(d
);
1420 int initial_pi
= s
->eqcr
.pi
;
1421 uint64_t start_pointer
;
1423 if (!s
->eqcr
.available
) {
1424 eqcr_ci
= s
->eqcr
.ci
;
1425 s
->eqcr
.ci
= qbman_cena_read_reg(&s
->sys
,
1426 QBMAN_CENA_SWP_EQCR_CI
) & 0xF;
1427 diff
= qm_cyc_diff(QBMAN_EQCR_SIZE
,
1428 eqcr_ci
, s
->eqcr
.ci
);
1431 s
->eqcr
.available
+= diff
;
1434 /* we are trying to send frames_to_send,
1435 * if we have enough space in the ring
1437 while (s
->eqcr
.available
&& frames_to_send
--) {
1438 p
= qbman_cena_write_start_wo_shadow_fast(&s
->sys
,
1439 QBMAN_CENA_SWP_EQCR((initial_pi
) & 7));
1440 /* Write command (except of first byte) and FD */
1441 memcpy(&p
[1], &cl
[1], 7 * 4);
1442 memcpy(&p
[8], &fd
[sent
], sizeof(struct qbman_fd
));
1446 s
->eqcr
.available
--;
1451 initial_pi
= s
->eqcr
.pi
;
1454 /* in order for flushes to complete faster:
1455 * we use a following trick: we record all lines in 32 bit word
1458 initial_pi
= s
->eqcr
.pi
;
1459 for (i
= 0; i
< sent
; i
++) {
1460 p
= qbman_cena_write_start_wo_shadow_fast(&s
->sys
,
1461 QBMAN_CENA_SWP_EQCR((initial_pi
) & 7));
1463 p
[0] = cl
[0] | s
->eqcr
.pi_vb
;
1467 if (!(initial_pi
& 7))
1468 s
->eqcr
.pi_vb
^= QB_VALID_BIT
;
1471 initial_pi
= s
->eqcr
.pi
;
1473 /* We need to flush all the lines but without
1474 * load/store operations between them.
1475 * We assign start_pointer before we start loop so that
1476 * in loop we do not read it from memory
1478 start_pointer
= (uint64_t)s
->sys
.addr_cena
;
1479 for (i
= 0; i
< sent
; i
++) {
1480 p
= (uint32_t *)(start_pointer
1481 + QBMAN_CENA_SWP_EQCR(initial_pi
& 7));
1487 /* Update producer index for the next call */
1488 s
->eqcr
.pi
= initial_pi
;
1493 int qbman_get_version(void)
1495 return qman_version
;