1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
9 #include "qbman_portal.h"
11 /* QBMan portal management command codes */
12 #define QBMAN_MC_ACQUIRE 0x30
13 #define QBMAN_WQCHAN_CONFIGURE 0x46
15 /* Reverse mapping of QBMAN_CENA_SWP_DQRR() */
16 #define QBMAN_IDX_FROM_DQRR(p) (((unsigned long)p & 0x1ff) >> 6)
18 /* QBMan FQ management command codes */
19 #define QBMAN_FQ_SCHEDULE 0x48
20 #define QBMAN_FQ_FORCE 0x49
21 #define QBMAN_FQ_XON 0x4d
22 #define QBMAN_FQ_XOFF 0x4e
24 /*******************************/
25 /* Pre-defined attribute codes */
26 /*******************************/
28 #define QBMAN_RESPONSE_VERB_MASK 0x7f
30 /*************************/
31 /* SDQCR attribute codes */
32 /*************************/
33 #define QB_SDQCR_FC_SHIFT 29
34 #define QB_SDQCR_FC_MASK 0x1
35 #define QB_SDQCR_DCT_SHIFT 24
36 #define QB_SDQCR_DCT_MASK 0x3
37 #define QB_SDQCR_TOK_SHIFT 16
38 #define QB_SDQCR_TOK_MASK 0xff
39 #define QB_SDQCR_SRC_SHIFT 0
40 #define QB_SDQCR_SRC_MASK 0xffff
42 /* opaque token for static dequeues */
43 #define QMAN_SDQCR_TOKEN 0xbb
45 enum qbman_sdqcr_dct
{
46 qbman_sdqcr_dct_null
= 0,
47 qbman_sdqcr_dct_prio_ics
,
48 qbman_sdqcr_dct_active_ics
,
49 qbman_sdqcr_dct_active
53 qbman_sdqcr_fc_one
= 0,
54 qbman_sdqcr_fc_up_to_3
= 1
57 /* We need to keep track of which SWP triggered a pull command
58 * so keep an array of portal IDs and use the token field to
59 * be able to find the proper portal
61 #define MAX_QBMAN_PORTALS 64
62 static struct qbman_swp
*portal_idx_map
[MAX_QBMAN_PORTALS
];
64 /* Internal Function declaration */
66 qbman_swp_enqueue_array_mode_direct(struct qbman_swp
*s
,
67 const struct qbman_eq_desc
*d
,
68 const struct qbman_fd
*fd
);
70 qbman_swp_enqueue_array_mode_mem_back(struct qbman_swp
*s
,
71 const struct qbman_eq_desc
*d
,
72 const struct qbman_fd
*fd
);
75 qbman_swp_enqueue_ring_mode_direct(struct qbman_swp
*s
,
76 const struct qbman_eq_desc
*d
,
77 const struct qbman_fd
*fd
);
79 qbman_swp_enqueue_ring_mode_mem_back(struct qbman_swp
*s
,
80 const struct qbman_eq_desc
*d
,
81 const struct qbman_fd
*fd
);
84 qbman_swp_enqueue_multiple_direct(struct qbman_swp
*s
,
85 const struct qbman_eq_desc
*d
,
86 const struct qbman_fd
*fd
,
90 qbman_swp_enqueue_multiple_mem_back(struct qbman_swp
*s
,
91 const struct qbman_eq_desc
*d
,
92 const struct qbman_fd
*fd
,
97 qbman_swp_enqueue_multiple_fd_direct(struct qbman_swp
*s
,
98 const struct qbman_eq_desc
*d
,
104 qbman_swp_enqueue_multiple_fd_mem_back(struct qbman_swp
*s
,
105 const struct qbman_eq_desc
*d
,
106 struct qbman_fd
**fd
,
111 qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp
*s
,
112 const struct qbman_eq_desc
*d
,
113 const struct qbman_fd
*fd
,
116 qbman_swp_enqueue_multiple_desc_mem_back(struct qbman_swp
*s
,
117 const struct qbman_eq_desc
*d
,
118 const struct qbman_fd
*fd
,
122 qbman_swp_pull_direct(struct qbman_swp
*s
, struct qbman_pull_desc
*d
);
124 qbman_swp_pull_mem_back(struct qbman_swp
*s
, struct qbman_pull_desc
*d
);
126 const struct qbman_result
*qbman_swp_dqrr_next_direct(struct qbman_swp
*s
);
127 const struct qbman_result
*qbman_swp_dqrr_next_mem_back(struct qbman_swp
*s
);
130 qbman_swp_release_direct(struct qbman_swp
*s
,
131 const struct qbman_release_desc
*d
,
132 const uint64_t *buffers
, unsigned int num_buffers
);
134 qbman_swp_release_mem_back(struct qbman_swp
*s
,
135 const struct qbman_release_desc
*d
,
136 const uint64_t *buffers
, unsigned int num_buffers
);
138 /* Function pointers */
139 static int (*qbman_swp_enqueue_array_mode_ptr
)(struct qbman_swp
*s
,
140 const struct qbman_eq_desc
*d
,
141 const struct qbman_fd
*fd
)
142 = qbman_swp_enqueue_array_mode_direct
;
144 static int (*qbman_swp_enqueue_ring_mode_ptr
)(struct qbman_swp
*s
,
145 const struct qbman_eq_desc
*d
,
146 const struct qbman_fd
*fd
)
147 = qbman_swp_enqueue_ring_mode_direct
;
149 static int (*qbman_swp_enqueue_multiple_ptr
)(struct qbman_swp
*s
,
150 const struct qbman_eq_desc
*d
,
151 const struct qbman_fd
*fd
,
154 = qbman_swp_enqueue_multiple_direct
;
156 static int (*qbman_swp_enqueue_multiple_fd_ptr
)(struct qbman_swp
*s
,
157 const struct qbman_eq_desc
*d
,
158 struct qbman_fd
**fd
,
161 = qbman_swp_enqueue_multiple_fd_direct
;
163 static int (*qbman_swp_enqueue_multiple_desc_ptr
)(struct qbman_swp
*s
,
164 const struct qbman_eq_desc
*d
,
165 const struct qbman_fd
*fd
,
167 = qbman_swp_enqueue_multiple_desc_direct
;
169 static int (*qbman_swp_pull_ptr
)(struct qbman_swp
*s
,
170 struct qbman_pull_desc
*d
)
171 = qbman_swp_pull_direct
;
173 const struct qbman_result
*(*qbman_swp_dqrr_next_ptr
)(struct qbman_swp
*s
)
174 = qbman_swp_dqrr_next_direct
;
176 static int (*qbman_swp_release_ptr
)(struct qbman_swp
*s
,
177 const struct qbman_release_desc
*d
,
178 const uint64_t *buffers
, unsigned int num_buffers
)
179 = qbman_swp_release_direct
;
181 /*********************************/
182 /* Portal constructor/destructor */
183 /*********************************/
185 /* Software portals should always be in the power-on state when we initialise,
186 * due to the CCSR-based portal reset functionality that MC has.
188 * Erk! Turns out that QMan versions prior to 4.1 do not correctly reset DQRR
189 * valid-bits, so we need to support a workaround where we don't trust
190 * valid-bits when detecting new entries until any stale ring entries have been
191 * overwritten at least once. The idea is that we read PI for the first few
192 * entries, then switch to valid-bit after that. The trick is to clear the
193 * bug-work-around boolean once the PI wraps around the ring for the first time.
195 * Note: this still carries a slight additional cost once the decrementer hits
198 struct qbman_swp
*qbman_swp_init(const struct qbman_swp_desc
*d
)
203 struct qbman_swp
*p
= malloc(sizeof(*p
));
208 memset(p
, 0, sizeof(struct qbman_swp
));
211 #ifdef QBMAN_CHECKING
212 p
->mc
.check
= swp_mc_can_start
;
214 p
->mc
.valid_bit
= QB_VALID_BIT
;
215 p
->sdq
|= qbman_sdqcr_dct_prio_ics
<< QB_SDQCR_DCT_SHIFT
;
216 p
->sdq
|= qbman_sdqcr_fc_up_to_3
<< QB_SDQCR_FC_SHIFT
;
217 p
->sdq
|= QMAN_SDQCR_TOKEN
<< QB_SDQCR_TOK_SHIFT
;
218 if ((d
->qman_version
& QMAN_REV_MASK
) >= QMAN_REV_5000
219 && (d
->cena_access_mode
== qman_cena_fastest_access
))
220 p
->mr
.valid_bit
= QB_VALID_BIT
;
222 atomic_set(&p
->vdq
.busy
, 1);
223 p
->vdq
.valid_bit
= QB_VALID_BIT
;
224 p
->dqrr
.valid_bit
= QB_VALID_BIT
;
225 qman_version
= p
->desc
.qman_version
;
226 if ((qman_version
& QMAN_REV_MASK
) < QMAN_REV_4100
) {
227 p
->dqrr
.dqrr_size
= 4;
228 p
->dqrr
.reset_bug
= 1;
230 p
->dqrr
.dqrr_size
= 8;
231 p
->dqrr
.reset_bug
= 0;
234 ret
= qbman_swp_sys_init(&p
->sys
, d
, p
->dqrr
.dqrr_size
);
237 pr_err("qbman_swp_sys_init() failed %d\n", ret
);
241 /* Verify that the DQRRPI is 0 - if it is not the portal isn't
242 * in default state which is an error
244 if (qbman_cinh_read(&p
->sys
, QBMAN_CINH_SWP_DQPI
) & 0xF) {
245 pr_err("qbman DQRR PI is not zero, portal is not clean\n");
250 /* SDQCR needs to be initialized to 0 when no channels are
251 * being dequeued from or else the QMan HW will indicate an
252 * error. The values that were calculated above will be
253 * applied when dequeues from a specific channel are enabled.
255 qbman_cinh_write(&p
->sys
, QBMAN_CINH_SWP_SDQCR
, 0);
257 p
->eqcr
.pi_ring_size
= 8;
258 if ((qman_version
& QMAN_REV_MASK
) >= QMAN_REV_5000
259 && (d
->cena_access_mode
== qman_cena_fastest_access
)) {
260 p
->eqcr
.pi_ring_size
= 32;
261 qbman_swp_enqueue_array_mode_ptr
=
262 qbman_swp_enqueue_array_mode_mem_back
;
263 qbman_swp_enqueue_ring_mode_ptr
=
264 qbman_swp_enqueue_ring_mode_mem_back
;
265 qbman_swp_enqueue_multiple_ptr
=
266 qbman_swp_enqueue_multiple_mem_back
;
267 qbman_swp_enqueue_multiple_fd_ptr
=
268 qbman_swp_enqueue_multiple_fd_mem_back
;
269 qbman_swp_enqueue_multiple_desc_ptr
=
270 qbman_swp_enqueue_multiple_desc_mem_back
;
271 qbman_swp_pull_ptr
= qbman_swp_pull_mem_back
;
272 qbman_swp_dqrr_next_ptr
= qbman_swp_dqrr_next_mem_back
;
273 qbman_swp_release_ptr
= qbman_swp_release_mem_back
;
276 for (mask_size
= p
->eqcr
.pi_ring_size
; mask_size
> 0; mask_size
>>= 1)
277 p
->eqcr
.pi_ci_mask
= (p
->eqcr
.pi_ci_mask
<<1) + 1;
278 eqcr_pi
= qbman_cinh_read(&p
->sys
, QBMAN_CINH_SWP_EQCR_PI
);
279 p
->eqcr
.pi
= eqcr_pi
& p
->eqcr
.pi_ci_mask
;
280 p
->eqcr
.pi_vb
= eqcr_pi
& QB_VALID_BIT
;
281 if ((p
->desc
.qman_version
& QMAN_REV_MASK
) >= QMAN_REV_5000
282 && (d
->cena_access_mode
== qman_cena_fastest_access
))
283 p
->eqcr
.ci
= qbman_cinh_read(&p
->sys
, QBMAN_CINH_SWP_EQCR_PI
)
284 & p
->eqcr
.pi_ci_mask
;
286 p
->eqcr
.ci
= qbman_cinh_read(&p
->sys
, QBMAN_CINH_SWP_EQCR_CI
)
287 & p
->eqcr
.pi_ci_mask
;
288 p
->eqcr
.available
= p
->eqcr
.pi_ring_size
-
289 qm_cyc_diff(p
->eqcr
.pi_ring_size
,
290 p
->eqcr
.ci
& (p
->eqcr
.pi_ci_mask
<<1),
291 p
->eqcr
.pi
& (p
->eqcr
.pi_ci_mask
<<1));
293 portal_idx_map
[p
->desc
.idx
] = p
;
297 void qbman_swp_finish(struct qbman_swp
*p
)
299 #ifdef QBMAN_CHECKING
300 QBMAN_BUG_ON(p
->mc
.check
!= swp_mc_can_start
);
302 qbman_swp_sys_finish(&p
->sys
);
303 portal_idx_map
[p
->desc
.idx
] = NULL
;
307 const struct qbman_swp_desc
*qbman_swp_get_desc(struct qbman_swp
*p
)
316 uint32_t qbman_swp_interrupt_get_vanish(struct qbman_swp
*p
)
318 return qbman_cinh_read(&p
->sys
, QBMAN_CINH_SWP_ISDR
);
321 void qbman_swp_interrupt_set_vanish(struct qbman_swp
*p
, uint32_t mask
)
323 qbman_cinh_write(&p
->sys
, QBMAN_CINH_SWP_ISDR
, mask
);
326 uint32_t qbman_swp_interrupt_read_status(struct qbman_swp
*p
)
328 return qbman_cinh_read(&p
->sys
, QBMAN_CINH_SWP_ISR
);
331 void qbman_swp_interrupt_clear_status(struct qbman_swp
*p
, uint32_t mask
)
333 qbman_cinh_write(&p
->sys
, QBMAN_CINH_SWP_ISR
, mask
);
336 uint32_t qbman_swp_dqrr_thrshld_read_status(struct qbman_swp
*p
)
338 return qbman_cinh_read(&p
->sys
, QBMAN_CINH_SWP_DQRR_ITR
);
341 void qbman_swp_dqrr_thrshld_write(struct qbman_swp
*p
, uint32_t mask
)
343 qbman_cinh_write(&p
->sys
, QBMAN_CINH_SWP_DQRR_ITR
, mask
);
346 uint32_t qbman_swp_intr_timeout_read_status(struct qbman_swp
*p
)
348 return qbman_cinh_read(&p
->sys
, QBMAN_CINH_SWP_ITPR
);
351 void qbman_swp_intr_timeout_write(struct qbman_swp
*p
, uint32_t mask
)
353 qbman_cinh_write(&p
->sys
, QBMAN_CINH_SWP_ITPR
, mask
);
356 uint32_t qbman_swp_interrupt_get_trigger(struct qbman_swp
*p
)
358 return qbman_cinh_read(&p
->sys
, QBMAN_CINH_SWP_IER
);
361 void qbman_swp_interrupt_set_trigger(struct qbman_swp
*p
, uint32_t mask
)
363 qbman_cinh_write(&p
->sys
, QBMAN_CINH_SWP_IER
, mask
);
366 int qbman_swp_interrupt_get_inhibit(struct qbman_swp
*p
)
368 return qbman_cinh_read(&p
->sys
, QBMAN_CINH_SWP_IIR
);
371 void qbman_swp_interrupt_set_inhibit(struct qbman_swp
*p
, int inhibit
)
373 qbman_cinh_write(&p
->sys
, QBMAN_CINH_SWP_IIR
,
374 inhibit
? 0xffffffff : 0);
377 /***********************/
378 /* Management commands */
379 /***********************/
382 * Internal code common to all types of management commands.
385 void *qbman_swp_mc_start(struct qbman_swp
*p
)
388 #ifdef QBMAN_CHECKING
389 QBMAN_BUG_ON(p
->mc
.check
!= swp_mc_can_start
);
391 if ((p
->desc
.qman_version
& QMAN_REV_MASK
) >= QMAN_REV_5000
392 && (p
->desc
.cena_access_mode
== qman_cena_fastest_access
))
393 ret
= qbman_cena_write_start(&p
->sys
, QBMAN_CENA_SWP_CR_MEM
);
395 ret
= qbman_cena_write_start(&p
->sys
, QBMAN_CENA_SWP_CR
);
396 #ifdef QBMAN_CHECKING
398 p
->mc
.check
= swp_mc_can_submit
;
403 void qbman_swp_mc_submit(struct qbman_swp
*p
, void *cmd
, uint8_t cmd_verb
)
406 #ifdef QBMAN_CHECKING
407 QBMAN_BUG_ON(!(p
->mc
.check
!= swp_mc_can_submit
));
409 /* TBD: "|=" is going to hurt performance. Need to move as many fields
410 * out of word zero, and for those that remain, the "OR" needs to occur
411 * at the caller side. This debug check helps to catch cases where the
412 * caller wants to OR but has forgotten to do so.
414 QBMAN_BUG_ON((*v
& cmd_verb
) != *v
);
415 if ((p
->desc
.qman_version
& QMAN_REV_MASK
) >= QMAN_REV_5000
416 && (p
->desc
.cena_access_mode
== qman_cena_fastest_access
)) {
417 *v
= cmd_verb
| p
->mr
.valid_bit
;
418 qbman_cena_write_complete(&p
->sys
, QBMAN_CENA_SWP_CR_MEM
, cmd
);
420 qbman_cinh_write(&p
->sys
, QBMAN_CINH_SWP_CR_RT
, QMAN_RT_MODE
);
423 *v
= cmd_verb
| p
->mc
.valid_bit
;
424 qbman_cena_write_complete(&p
->sys
, QBMAN_CENA_SWP_CR
, cmd
);
427 #ifdef QBMAN_CHECKING
428 p
->mc
.check
= swp_mc_can_poll
;
432 void *qbman_swp_mc_result(struct qbman_swp
*p
)
435 #ifdef QBMAN_CHECKING
436 QBMAN_BUG_ON(p
->mc
.check
!= swp_mc_can_poll
);
438 if ((p
->desc
.qman_version
& QMAN_REV_MASK
) >= QMAN_REV_5000
439 && (p
->desc
.cena_access_mode
== qman_cena_fastest_access
)) {
440 ret
= qbman_cena_read(&p
->sys
, QBMAN_CENA_SWP_RR_MEM
);
441 /* Command completed if the valid bit is toggled */
442 if (p
->mr
.valid_bit
!= (ret
[0] & QB_VALID_BIT
))
444 /* Remove the valid-bit -
445 * command completed iff the rest is non-zero
447 verb
= ret
[0] & ~QB_VALID_BIT
;
450 p
->mr
.valid_bit
^= QB_VALID_BIT
;
452 qbman_cena_invalidate_prefetch(&p
->sys
,
453 QBMAN_CENA_SWP_RR(p
->mc
.valid_bit
));
454 ret
= qbman_cena_read(&p
->sys
,
455 QBMAN_CENA_SWP_RR(p
->mc
.valid_bit
));
456 /* Remove the valid-bit -
457 * command completed iff the rest is non-zero
459 verb
= ret
[0] & ~QB_VALID_BIT
;
462 p
->mc
.valid_bit
^= QB_VALID_BIT
;
464 #ifdef QBMAN_CHECKING
465 p
->mc
.check
= swp_mc_can_start
;
474 #define QB_ENQUEUE_CMD_OPTIONS_SHIFT 0
475 enum qb_enqueue_commands
{
477 enqueue_response_always
= 1,
478 enqueue_rejects_to_fq
= 2
481 #define QB_ENQUEUE_CMD_EC_OPTION_MASK 0x3
482 #define QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT 2
483 #define QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT 3
484 #define QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT 4
485 #define QB_ENQUEUE_CMD_DCA_PK_SHIFT 6
486 #define QB_ENQUEUE_CMD_DCA_EN_SHIFT 7
487 #define QB_ENQUEUE_CMD_NLIS_SHIFT 14
488 #define QB_ENQUEUE_CMD_IS_NESN_SHIFT 15
490 void qbman_eq_desc_clear(struct qbman_eq_desc
*d
)
492 memset(d
, 0, sizeof(*d
));
495 void qbman_eq_desc_set_no_orp(struct qbman_eq_desc
*d
, int respond_success
)
497 d
->eq
.verb
&= ~(1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT
);
499 d
->eq
.verb
|= enqueue_response_always
;
501 d
->eq
.verb
|= enqueue_rejects_to_fq
;
504 void qbman_eq_desc_set_orp(struct qbman_eq_desc
*d
, int respond_success
,
505 uint16_t opr_id
, uint16_t seqnum
, int incomplete
)
507 d
->eq
.verb
|= 1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT
;
509 d
->eq
.verb
|= enqueue_response_always
;
511 d
->eq
.verb
|= enqueue_rejects_to_fq
;
513 d
->eq
.orpid
= opr_id
;
514 d
->eq
.seqnum
= seqnum
;
516 d
->eq
.seqnum
|= 1 << QB_ENQUEUE_CMD_NLIS_SHIFT
;
518 d
->eq
.seqnum
&= ~(1 << QB_ENQUEUE_CMD_NLIS_SHIFT
);
521 void qbman_eq_desc_set_orp_hole(struct qbman_eq_desc
*d
, uint16_t opr_id
,
524 d
->eq
.verb
|= 1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT
;
525 d
->eq
.verb
&= ~QB_ENQUEUE_CMD_EC_OPTION_MASK
;
526 d
->eq
.orpid
= opr_id
;
527 d
->eq
.seqnum
= seqnum
;
528 d
->eq
.seqnum
&= ~(1 << QB_ENQUEUE_CMD_NLIS_SHIFT
);
529 d
->eq
.seqnum
&= ~(1 << QB_ENQUEUE_CMD_IS_NESN_SHIFT
);
532 void qbman_eq_desc_set_orp_nesn(struct qbman_eq_desc
*d
, uint16_t opr_id
,
535 d
->eq
.verb
|= 1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT
;
536 d
->eq
.verb
&= ~QB_ENQUEUE_CMD_EC_OPTION_MASK
;
537 d
->eq
.orpid
= opr_id
;
538 d
->eq
.seqnum
= seqnum
;
539 d
->eq
.seqnum
&= ~(1 << QB_ENQUEUE_CMD_NLIS_SHIFT
);
540 d
->eq
.seqnum
|= 1 << QB_ENQUEUE_CMD_IS_NESN_SHIFT
;
543 void qbman_eq_desc_set_response(struct qbman_eq_desc
*d
,
544 dma_addr_t storage_phys
,
547 d
->eq
.rsp_addr
= storage_phys
;
551 void qbman_eq_desc_set_token(struct qbman_eq_desc
*d
, uint8_t token
)
556 void qbman_eq_desc_set_fq(struct qbman_eq_desc
*d
, uint32_t fqid
)
558 d
->eq
.verb
&= ~(1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT
);
562 void qbman_eq_desc_set_qd(struct qbman_eq_desc
*d
, uint32_t qdid
,
563 uint16_t qd_bin
, uint8_t qd_prio
)
565 d
->eq
.verb
|= 1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT
;
567 d
->eq
.qdbin
= qd_bin
;
568 d
->eq
.qpri
= qd_prio
;
571 void qbman_eq_desc_set_eqdi(struct qbman_eq_desc
*d
, int enable
)
574 d
->eq
.verb
|= 1 << QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT
;
576 d
->eq
.verb
&= ~(1 << QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT
);
579 void qbman_eq_desc_set_dca(struct qbman_eq_desc
*d
, int enable
,
580 uint8_t dqrr_idx
, int park
)
583 d
->eq
.dca
= dqrr_idx
;
585 d
->eq
.dca
|= 1 << QB_ENQUEUE_CMD_DCA_PK_SHIFT
;
587 d
->eq
.dca
&= ~(1 << QB_ENQUEUE_CMD_DCA_PK_SHIFT
);
588 d
->eq
.dca
|= 1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT
;
590 d
->eq
.dca
&= ~(1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT
);
594 #define EQAR_IDX(eqar) ((eqar) & 0x1f)
595 #define EQAR_VB(eqar) ((eqar) & 0x80)
596 #define EQAR_SUCCESS(eqar) ((eqar) & 0x100)
598 static inline void qbman_write_eqcr_am_rt_register(struct qbman_swp
*p
,
602 qbman_cinh_write(&p
->sys
, QBMAN_CINH_SWP_EQCR_AM_RT
+ idx
* 4,
605 qbman_cinh_write(&p
->sys
, QBMAN_CINH_SWP_EQCR_AM_RT2
+
611 static int qbman_swp_enqueue_array_mode_direct(struct qbman_swp
*s
,
612 const struct qbman_eq_desc
*d
,
613 const struct qbman_fd
*fd
)
616 const uint32_t *cl
= qb_cl(d
);
617 uint32_t eqar
= qbman_cinh_read(&s
->sys
, QBMAN_CINH_SWP_EQAR
);
619 pr_debug("EQAR=%08x\n", eqar
);
620 if (!EQAR_SUCCESS(eqar
))
622 p
= qbman_cena_write_start_wo_shadow(&s
->sys
,
623 QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar
)));
624 memcpy(&p
[1], &cl
[1], 28);
625 memcpy(&p
[8], fd
, sizeof(*fd
));
627 /* Set the verb byte, have to substitute in the valid-bit */
629 p
[0] = cl
[0] | EQAR_VB(eqar
);
630 qbman_cena_write_complete_wo_shadow(&s
->sys
,
631 QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar
)));
634 static int qbman_swp_enqueue_array_mode_mem_back(struct qbman_swp
*s
,
635 const struct qbman_eq_desc
*d
,
636 const struct qbman_fd
*fd
)
639 const uint32_t *cl
= qb_cl(d
);
640 uint32_t eqar
= qbman_cinh_read(&s
->sys
, QBMAN_CINH_SWP_EQAR
);
642 pr_debug("EQAR=%08x\n", eqar
);
643 if (!EQAR_SUCCESS(eqar
))
645 p
= qbman_cena_write_start_wo_shadow(&s
->sys
,
646 QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar
)));
647 memcpy(&p
[1], &cl
[1], 28);
648 memcpy(&p
[8], fd
, sizeof(*fd
));
650 /* Set the verb byte, have to substitute in the valid-bit */
651 p
[0] = cl
[0] | EQAR_VB(eqar
);
653 qbman_write_eqcr_am_rt_register(s
, EQAR_IDX(eqar
));
657 static inline int qbman_swp_enqueue_array_mode(struct qbman_swp
*s
,
658 const struct qbman_eq_desc
*d
,
659 const struct qbman_fd
*fd
)
661 return qbman_swp_enqueue_array_mode_ptr(s
, d
, fd
);
664 static int qbman_swp_enqueue_ring_mode_direct(struct qbman_swp
*s
,
665 const struct qbman_eq_desc
*d
,
666 const struct qbman_fd
*fd
)
669 const uint32_t *cl
= qb_cl(d
);
670 uint32_t eqcr_ci
, full_mask
, half_mask
;
672 half_mask
= (s
->eqcr
.pi_ci_mask
>>1);
673 full_mask
= s
->eqcr
.pi_ci_mask
;
674 if (!s
->eqcr
.available
) {
675 eqcr_ci
= s
->eqcr
.ci
;
676 s
->eqcr
.ci
= qbman_cena_read_reg(&s
->sys
,
677 QBMAN_CENA_SWP_EQCR_CI
) & full_mask
;
678 s
->eqcr
.available
= qm_cyc_diff(s
->eqcr
.pi_ring_size
,
679 eqcr_ci
, s
->eqcr
.ci
);
680 if (!s
->eqcr
.available
)
684 p
= qbman_cena_write_start_wo_shadow(&s
->sys
,
685 QBMAN_CENA_SWP_EQCR(s
->eqcr
.pi
& half_mask
));
686 memcpy(&p
[1], &cl
[1], 28);
687 memcpy(&p
[8], fd
, sizeof(*fd
));
690 /* Set the verb byte, have to substitute in the valid-bit */
691 p
[0] = cl
[0] | s
->eqcr
.pi_vb
;
692 qbman_cena_write_complete_wo_shadow(&s
->sys
,
693 QBMAN_CENA_SWP_EQCR(s
->eqcr
.pi
& half_mask
));
695 s
->eqcr
.pi
&= full_mask
;
697 if (!(s
->eqcr
.pi
& half_mask
))
698 s
->eqcr
.pi_vb
^= QB_VALID_BIT
;
703 static int qbman_swp_enqueue_ring_mode_mem_back(struct qbman_swp
*s
,
704 const struct qbman_eq_desc
*d
,
705 const struct qbman_fd
*fd
)
708 const uint32_t *cl
= qb_cl(d
);
709 uint32_t eqcr_ci
, full_mask
, half_mask
;
711 half_mask
= (s
->eqcr
.pi_ci_mask
>>1);
712 full_mask
= s
->eqcr
.pi_ci_mask
;
713 if (!s
->eqcr
.available
) {
714 eqcr_ci
= s
->eqcr
.ci
;
715 s
->eqcr
.ci
= qbman_cena_read_reg(&s
->sys
,
716 QBMAN_CENA_SWP_EQCR_CI_MEMBACK
) & full_mask
;
717 s
->eqcr
.available
= qm_cyc_diff(s
->eqcr
.pi_ring_size
,
718 eqcr_ci
, s
->eqcr
.ci
);
719 if (!s
->eqcr
.available
)
723 p
= qbman_cena_write_start_wo_shadow(&s
->sys
,
724 QBMAN_CENA_SWP_EQCR(s
->eqcr
.pi
& half_mask
));
725 memcpy(&p
[1], &cl
[1], 28);
726 memcpy(&p
[8], fd
, sizeof(*fd
));
728 /* Set the verb byte, have to substitute in the valid-bit */
729 p
[0] = cl
[0] | s
->eqcr
.pi_vb
;
731 s
->eqcr
.pi
&= full_mask
;
733 if (!(s
->eqcr
.pi
& half_mask
))
734 s
->eqcr
.pi_vb
^= QB_VALID_BIT
;
736 qbman_cinh_write(&s
->sys
, QBMAN_CINH_SWP_EQCR_PI
,
737 (QB_RT_BIT
)|(s
->eqcr
.pi
)|s
->eqcr
.pi_vb
);
741 static int qbman_swp_enqueue_ring_mode(struct qbman_swp
*s
,
742 const struct qbman_eq_desc
*d
,
743 const struct qbman_fd
*fd
)
745 return qbman_swp_enqueue_ring_mode_ptr(s
, d
, fd
);
748 int qbman_swp_enqueue(struct qbman_swp
*s
, const struct qbman_eq_desc
*d
,
749 const struct qbman_fd
*fd
)
751 if (s
->sys
.eqcr_mode
== qman_eqcr_vb_array
)
752 return qbman_swp_enqueue_array_mode(s
, d
, fd
);
753 else /* Use ring mode by default */
754 return qbman_swp_enqueue_ring_mode(s
, d
, fd
);
757 static int qbman_swp_enqueue_multiple_direct(struct qbman_swp
*s
,
758 const struct qbman_eq_desc
*d
,
759 const struct qbman_fd
*fd
,
764 const uint32_t *cl
= qb_cl(d
);
765 uint32_t eqcr_ci
, eqcr_pi
, half_mask
, full_mask
;
766 int i
, num_enqueued
= 0;
769 half_mask
= (s
->eqcr
.pi_ci_mask
>>1);
770 full_mask
= s
->eqcr
.pi_ci_mask
;
771 if (!s
->eqcr
.available
) {
772 eqcr_ci
= s
->eqcr
.ci
;
773 s
->eqcr
.ci
= qbman_cena_read_reg(&s
->sys
,
774 QBMAN_CENA_SWP_EQCR_CI
) & full_mask
;
775 s
->eqcr
.available
= qm_cyc_diff(s
->eqcr
.pi_ring_size
,
776 eqcr_ci
, s
->eqcr
.ci
);
777 if (!s
->eqcr
.available
)
781 eqcr_pi
= s
->eqcr
.pi
;
782 num_enqueued
= (s
->eqcr
.available
< num_frames
) ?
783 s
->eqcr
.available
: num_frames
;
784 s
->eqcr
.available
-= num_enqueued
;
785 /* Fill in the EQCR ring */
786 for (i
= 0; i
< num_enqueued
; i
++) {
787 p
= qbman_cena_write_start_wo_shadow(&s
->sys
,
788 QBMAN_CENA_SWP_EQCR(eqcr_pi
& half_mask
));
789 memcpy(&p
[1], &cl
[1], 28);
790 memcpy(&p
[8], &fd
[i
], sizeof(*fd
));
796 /* Set the verb byte, have to substitute in the valid-bit */
797 eqcr_pi
= s
->eqcr
.pi
;
798 for (i
= 0; i
< num_enqueued
; i
++) {
799 p
= qbman_cena_write_start_wo_shadow(&s
->sys
,
800 QBMAN_CENA_SWP_EQCR(eqcr_pi
& half_mask
));
801 p
[0] = cl
[0] | s
->eqcr
.pi_vb
;
802 if (flags
&& (flags
[i
] & QBMAN_ENQUEUE_FLAG_DCA
)) {
803 struct qbman_eq_desc
*d
= (struct qbman_eq_desc
*)p
;
805 d
->eq
.dca
= (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT
) |
806 ((flags
[i
]) & QBMAN_EQCR_DCA_IDXMASK
);
809 if (!(eqcr_pi
& half_mask
))
810 s
->eqcr
.pi_vb
^= QB_VALID_BIT
;
813 /* Flush all the cacheline without load/store in between */
814 eqcr_pi
= s
->eqcr
.pi
;
815 addr_cena
= (size_t)s
->sys
.addr_cena
;
816 for (i
= 0; i
< num_enqueued
; i
++) {
817 dcbf((uintptr_t)(addr_cena
+
818 QBMAN_CENA_SWP_EQCR(eqcr_pi
& half_mask
)));
821 s
->eqcr
.pi
= eqcr_pi
& full_mask
;
826 static int qbman_swp_enqueue_multiple_mem_back(struct qbman_swp
*s
,
827 const struct qbman_eq_desc
*d
,
828 const struct qbman_fd
*fd
,
833 const uint32_t *cl
= qb_cl(d
);
834 uint32_t eqcr_ci
, eqcr_pi
, half_mask
, full_mask
;
835 int i
, num_enqueued
= 0;
837 half_mask
= (s
->eqcr
.pi_ci_mask
>>1);
838 full_mask
= s
->eqcr
.pi_ci_mask
;
839 if (!s
->eqcr
.available
) {
840 eqcr_ci
= s
->eqcr
.ci
;
841 s
->eqcr
.ci
= qbman_cena_read_reg(&s
->sys
,
842 QBMAN_CENA_SWP_EQCR_CI_MEMBACK
) & full_mask
;
843 s
->eqcr
.available
= qm_cyc_diff(s
->eqcr
.pi_ring_size
,
844 eqcr_ci
, s
->eqcr
.ci
);
845 if (!s
->eqcr
.available
)
849 eqcr_pi
= s
->eqcr
.pi
;
850 num_enqueued
= (s
->eqcr
.available
< num_frames
) ?
851 s
->eqcr
.available
: num_frames
;
852 s
->eqcr
.available
-= num_enqueued
;
853 /* Fill in the EQCR ring */
854 for (i
= 0; i
< num_enqueued
; i
++) {
855 p
= qbman_cena_write_start_wo_shadow(&s
->sys
,
856 QBMAN_CENA_SWP_EQCR(eqcr_pi
& half_mask
));
857 memcpy(&p
[1], &cl
[1], 28);
858 memcpy(&p
[8], &fd
[i
], sizeof(*fd
));
859 if (flags
&& (flags
[i
] & QBMAN_ENQUEUE_FLAG_DCA
)) {
860 struct qbman_eq_desc
*d
= (struct qbman_eq_desc
*)p
;
862 d
->eq
.dca
= (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT
) |
863 ((flags
[i
]) & QBMAN_EQCR_DCA_IDXMASK
);
866 p
[0] = cl
[0] | s
->eqcr
.pi_vb
;
868 if (!(eqcr_pi
& half_mask
))
869 s
->eqcr
.pi_vb
^= QB_VALID_BIT
;
871 s
->eqcr
.pi
= eqcr_pi
& full_mask
;
874 qbman_cinh_write(&s
->sys
, QBMAN_CINH_SWP_EQCR_PI
,
875 (QB_RT_BIT
)|(s
->eqcr
.pi
)|s
->eqcr
.pi_vb
);
879 inline int qbman_swp_enqueue_multiple(struct qbman_swp
*s
,
880 const struct qbman_eq_desc
*d
,
881 const struct qbman_fd
*fd
,
885 return qbman_swp_enqueue_multiple_ptr(s
, d
, fd
, flags
, num_frames
);
888 static int qbman_swp_enqueue_multiple_fd_direct(struct qbman_swp
*s
,
889 const struct qbman_eq_desc
*d
,
890 struct qbman_fd
**fd
,
895 const uint32_t *cl
= qb_cl(d
);
896 uint32_t eqcr_ci
, eqcr_pi
, half_mask
, full_mask
;
897 int i
, num_enqueued
= 0;
900 half_mask
= (s
->eqcr
.pi_ci_mask
>>1);
901 full_mask
= s
->eqcr
.pi_ci_mask
;
902 if (!s
->eqcr
.available
) {
903 eqcr_ci
= s
->eqcr
.ci
;
904 s
->eqcr
.ci
= qbman_cena_read_reg(&s
->sys
,
905 QBMAN_CENA_SWP_EQCR_CI
) & full_mask
;
906 s
->eqcr
.available
= qm_cyc_diff(s
->eqcr
.pi_ring_size
,
907 eqcr_ci
, s
->eqcr
.ci
);
908 if (!s
->eqcr
.available
)
912 eqcr_pi
= s
->eqcr
.pi
;
913 num_enqueued
= (s
->eqcr
.available
< num_frames
) ?
914 s
->eqcr
.available
: num_frames
;
915 s
->eqcr
.available
-= num_enqueued
;
916 /* Fill in the EQCR ring */
917 for (i
= 0; i
< num_enqueued
; i
++) {
918 p
= qbman_cena_write_start_wo_shadow(&s
->sys
,
919 QBMAN_CENA_SWP_EQCR(eqcr_pi
& half_mask
));
920 memcpy(&p
[1], &cl
[1], 28);
921 memcpy(&p
[8], fd
[i
], sizeof(struct qbman_fd
));
927 /* Set the verb byte, have to substitute in the valid-bit */
928 eqcr_pi
= s
->eqcr
.pi
;
929 for (i
= 0; i
< num_enqueued
; i
++) {
930 p
= qbman_cena_write_start_wo_shadow(&s
->sys
,
931 QBMAN_CENA_SWP_EQCR(eqcr_pi
& half_mask
));
932 p
[0] = cl
[0] | s
->eqcr
.pi_vb
;
933 if (flags
&& (flags
[i
] & QBMAN_ENQUEUE_FLAG_DCA
)) {
934 struct qbman_eq_desc
*d
= (struct qbman_eq_desc
*)p
;
936 d
->eq
.dca
= (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT
) |
937 ((flags
[i
]) & QBMAN_EQCR_DCA_IDXMASK
);
940 if (!(eqcr_pi
& half_mask
))
941 s
->eqcr
.pi_vb
^= QB_VALID_BIT
;
944 /* Flush all the cacheline without load/store in between */
945 eqcr_pi
= s
->eqcr
.pi
;
946 addr_cena
= (size_t)s
->sys
.addr_cena
;
947 for (i
= 0; i
< num_enqueued
; i
++) {
949 QBMAN_CENA_SWP_EQCR(eqcr_pi
& half_mask
));
952 s
->eqcr
.pi
= eqcr_pi
& full_mask
;
957 static int qbman_swp_enqueue_multiple_fd_mem_back(struct qbman_swp
*s
,
958 const struct qbman_eq_desc
*d
,
959 struct qbman_fd
**fd
,
964 const uint32_t *cl
= qb_cl(d
);
965 uint32_t eqcr_ci
, eqcr_pi
, half_mask
, full_mask
;
966 int i
, num_enqueued
= 0;
968 half_mask
= (s
->eqcr
.pi_ci_mask
>>1);
969 full_mask
= s
->eqcr
.pi_ci_mask
;
970 if (!s
->eqcr
.available
) {
971 eqcr_ci
= s
->eqcr
.ci
;
972 s
->eqcr
.ci
= qbman_cena_read_reg(&s
->sys
,
973 QBMAN_CENA_SWP_EQCR_CI_MEMBACK
) & full_mask
;
974 s
->eqcr
.available
= qm_cyc_diff(s
->eqcr
.pi_ring_size
,
975 eqcr_ci
, s
->eqcr
.ci
);
976 if (!s
->eqcr
.available
)
980 eqcr_pi
= s
->eqcr
.pi
;
981 num_enqueued
= (s
->eqcr
.available
< num_frames
) ?
982 s
->eqcr
.available
: num_frames
;
983 s
->eqcr
.available
-= num_enqueued
;
984 /* Fill in the EQCR ring */
985 for (i
= 0; i
< num_enqueued
; i
++) {
986 p
= qbman_cena_write_start_wo_shadow(&s
->sys
,
987 QBMAN_CENA_SWP_EQCR(eqcr_pi
& half_mask
));
988 memcpy(&p
[1], &cl
[1], 28);
989 memcpy(&p
[8], fd
[i
], sizeof(struct qbman_fd
));
993 /* Set the verb byte, have to substitute in the valid-bit */
994 eqcr_pi
= s
->eqcr
.pi
;
995 for (i
= 0; i
< num_enqueued
; i
++) {
996 p
= qbman_cena_write_start_wo_shadow(&s
->sys
,
997 QBMAN_CENA_SWP_EQCR(eqcr_pi
& half_mask
));
998 p
[0] = cl
[0] | s
->eqcr
.pi_vb
;
999 if (flags
&& (flags
[i
] & QBMAN_ENQUEUE_FLAG_DCA
)) {
1000 struct qbman_eq_desc
*d
= (struct qbman_eq_desc
*)p
;
1002 d
->eq
.dca
= (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT
) |
1003 ((flags
[i
]) & QBMAN_EQCR_DCA_IDXMASK
);
1006 if (!(eqcr_pi
& half_mask
))
1007 s
->eqcr
.pi_vb
^= QB_VALID_BIT
;
1009 s
->eqcr
.pi
= eqcr_pi
& full_mask
;
1012 qbman_cinh_write(&s
->sys
, QBMAN_CINH_SWP_EQCR_PI
,
1013 (QB_RT_BIT
)|(s
->eqcr
.pi
)|s
->eqcr
.pi_vb
);
1014 return num_enqueued
;
1017 inline int qbman_swp_enqueue_multiple_fd(struct qbman_swp
*s
,
1018 const struct qbman_eq_desc
*d
,
1019 struct qbman_fd
**fd
,
1023 return qbman_swp_enqueue_multiple_fd_ptr(s
, d
, fd
, flags
, num_frames
);
1026 static int qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp
*s
,
1027 const struct qbman_eq_desc
*d
,
1028 const struct qbman_fd
*fd
,
1033 uint32_t eqcr_ci
, eqcr_pi
, half_mask
, full_mask
;
1034 int i
, num_enqueued
= 0;
1037 half_mask
= (s
->eqcr
.pi_ci_mask
>>1);
1038 full_mask
= s
->eqcr
.pi_ci_mask
;
1039 if (!s
->eqcr
.available
) {
1040 eqcr_ci
= s
->eqcr
.ci
;
1041 s
->eqcr
.ci
= qbman_cena_read_reg(&s
->sys
,
1042 QBMAN_CENA_SWP_EQCR_CI
) & full_mask
;
1043 s
->eqcr
.available
= qm_cyc_diff(s
->eqcr
.pi_ring_size
,
1044 eqcr_ci
, s
->eqcr
.ci
);
1045 if (!s
->eqcr
.available
)
1049 eqcr_pi
= s
->eqcr
.pi
;
1050 num_enqueued
= (s
->eqcr
.available
< num_frames
) ?
1051 s
->eqcr
.available
: num_frames
;
1052 s
->eqcr
.available
-= num_enqueued
;
1053 /* Fill in the EQCR ring */
1054 for (i
= 0; i
< num_enqueued
; i
++) {
1055 p
= qbman_cena_write_start_wo_shadow(&s
->sys
,
1056 QBMAN_CENA_SWP_EQCR(eqcr_pi
& half_mask
));
1058 memcpy(&p
[1], &cl
[1], 28);
1059 memcpy(&p
[8], &fd
[i
], sizeof(*fd
));
1065 /* Set the verb byte, have to substitute in the valid-bit */
1066 eqcr_pi
= s
->eqcr
.pi
;
1067 for (i
= 0; i
< num_enqueued
; i
++) {
1068 p
= qbman_cena_write_start_wo_shadow(&s
->sys
,
1069 QBMAN_CENA_SWP_EQCR(eqcr_pi
& half_mask
));
1071 p
[0] = cl
[0] | s
->eqcr
.pi_vb
;
1073 if (!(eqcr_pi
& half_mask
))
1074 s
->eqcr
.pi_vb
^= QB_VALID_BIT
;
1077 /* Flush all the cacheline without load/store in between */
1078 eqcr_pi
= s
->eqcr
.pi
;
1079 addr_cena
= (size_t)s
->sys
.addr_cena
;
1080 for (i
= 0; i
< num_enqueued
; i
++) {
1081 dcbf((uintptr_t)(addr_cena
+
1082 QBMAN_CENA_SWP_EQCR(eqcr_pi
& half_mask
)));
1085 s
->eqcr
.pi
= eqcr_pi
& full_mask
;
1087 return num_enqueued
;
1090 static int qbman_swp_enqueue_multiple_desc_mem_back(struct qbman_swp
*s
,
1091 const struct qbman_eq_desc
*d
,
1092 const struct qbman_fd
*fd
,
1097 uint32_t eqcr_ci
, eqcr_pi
, half_mask
, full_mask
;
1098 int i
, num_enqueued
= 0;
1100 half_mask
= (s
->eqcr
.pi_ci_mask
>>1);
1101 full_mask
= s
->eqcr
.pi_ci_mask
;
1102 if (!s
->eqcr
.available
) {
1103 eqcr_ci
= s
->eqcr
.ci
;
1104 s
->eqcr
.ci
= qbman_cena_read_reg(&s
->sys
,
1105 QBMAN_CENA_SWP_EQCR_CI_MEMBACK
) & full_mask
;
1106 s
->eqcr
.available
= qm_cyc_diff(s
->eqcr
.pi_ring_size
,
1107 eqcr_ci
, s
->eqcr
.ci
);
1108 if (!s
->eqcr
.available
)
1112 eqcr_pi
= s
->eqcr
.pi
;
1113 num_enqueued
= (s
->eqcr
.available
< num_frames
) ?
1114 s
->eqcr
.available
: num_frames
;
1115 s
->eqcr
.available
-= num_enqueued
;
1116 /* Fill in the EQCR ring */
1117 for (i
= 0; i
< num_enqueued
; i
++) {
1118 p
= qbman_cena_write_start_wo_shadow(&s
->sys
,
1119 QBMAN_CENA_SWP_EQCR(eqcr_pi
& half_mask
));
1121 memcpy(&p
[1], &cl
[1], 28);
1122 memcpy(&p
[8], &fd
[i
], sizeof(*fd
));
1126 /* Set the verb byte, have to substitute in the valid-bit */
1127 eqcr_pi
= s
->eqcr
.pi
;
1128 for (i
= 0; i
< num_enqueued
; i
++) {
1129 p
= qbman_cena_write_start_wo_shadow(&s
->sys
,
1130 QBMAN_CENA_SWP_EQCR(eqcr_pi
& half_mask
));
1132 p
[0] = cl
[0] | s
->eqcr
.pi_vb
;
1134 if (!(eqcr_pi
& half_mask
))
1135 s
->eqcr
.pi_vb
^= QB_VALID_BIT
;
1138 s
->eqcr
.pi
= eqcr_pi
& full_mask
;
1141 qbman_cinh_write(&s
->sys
, QBMAN_CINH_SWP_EQCR_PI
,
1142 (QB_RT_BIT
)|(s
->eqcr
.pi
)|s
->eqcr
.pi_vb
);
1144 return num_enqueued
;
1146 inline int qbman_swp_enqueue_multiple_desc(struct qbman_swp
*s
,
1147 const struct qbman_eq_desc
*d
,
1148 const struct qbman_fd
*fd
,
1151 return qbman_swp_enqueue_multiple_desc_ptr(s
, d
, fd
, num_frames
);
1154 /*************************/
1155 /* Static (push) dequeue */
1156 /*************************/
1158 void qbman_swp_push_get(struct qbman_swp
*s
, uint8_t channel_idx
, int *enabled
)
1160 uint16_t src
= (s
->sdq
>> QB_SDQCR_SRC_SHIFT
) & QB_SDQCR_SRC_MASK
;
1162 QBMAN_BUG_ON(channel_idx
> 15);
1163 *enabled
= src
| (1 << channel_idx
);
1166 void qbman_swp_push_set(struct qbman_swp
*s
, uint8_t channel_idx
, int enable
)
1170 QBMAN_BUG_ON(channel_idx
> 15);
1172 s
->sdq
|= 1 << channel_idx
;
1174 s
->sdq
&= ~(1 << channel_idx
);
1176 /* Read make the complete src map. If no channels are enabled
1177 * the SDQCR must be 0 or else QMan will assert errors
1179 dqsrc
= (s
->sdq
>> QB_SDQCR_SRC_SHIFT
) & QB_SDQCR_SRC_MASK
;
1181 qbman_cinh_write(&s
->sys
, QBMAN_CINH_SWP_SDQCR
, s
->sdq
);
1183 qbman_cinh_write(&s
->sys
, QBMAN_CINH_SWP_SDQCR
, 0);
1186 /***************************/
1187 /* Volatile (pull) dequeue */
1188 /***************************/
1190 /* These should be const, eventually */
1191 #define QB_VDQCR_VERB_DCT_SHIFT 0
1192 #define QB_VDQCR_VERB_DT_SHIFT 2
1193 #define QB_VDQCR_VERB_RLS_SHIFT 4
1194 #define QB_VDQCR_VERB_WAE_SHIFT 5
1195 #define QB_VDQCR_VERB_RAD_SHIFT 6
1199 qb_pull_dt_workqueue
,
1200 qb_pull_dt_framequeue
1203 void qbman_pull_desc_clear(struct qbman_pull_desc
*d
)
1205 memset(d
, 0, sizeof(*d
));
1208 void qbman_pull_desc_set_storage(struct qbman_pull_desc
*d
,
1209 struct qbman_result
*storage
,
1210 dma_addr_t storage_phys
,
1213 d
->pull
.rsp_addr_virt
= (size_t)storage
;
1216 d
->pull
.verb
&= ~(1 << QB_VDQCR_VERB_RLS_SHIFT
);
1219 d
->pull
.verb
|= 1 << QB_VDQCR_VERB_RLS_SHIFT
;
1221 d
->pull
.verb
|= 1 << QB_VDQCR_VERB_WAE_SHIFT
;
1223 d
->pull
.verb
&= ~(1 << QB_VDQCR_VERB_WAE_SHIFT
);
1225 d
->pull
.rsp_addr
= storage_phys
;
1228 void qbman_pull_desc_set_numframes(struct qbman_pull_desc
*d
,
1231 d
->pull
.numf
= numframes
- 1;
1234 void qbman_pull_desc_set_token(struct qbman_pull_desc
*d
, uint8_t token
)
1236 d
->pull
.tok
= token
;
1239 void qbman_pull_desc_set_fq(struct qbman_pull_desc
*d
, uint32_t fqid
)
1241 d
->pull
.verb
|= 1 << QB_VDQCR_VERB_DCT_SHIFT
;
1242 d
->pull
.verb
|= qb_pull_dt_framequeue
<< QB_VDQCR_VERB_DT_SHIFT
;
1243 d
->pull
.dq_src
= fqid
;
1246 void qbman_pull_desc_set_wq(struct qbman_pull_desc
*d
, uint32_t wqid
,
1247 enum qbman_pull_type_e dct
)
1249 d
->pull
.verb
|= dct
<< QB_VDQCR_VERB_DCT_SHIFT
;
1250 d
->pull
.verb
|= qb_pull_dt_workqueue
<< QB_VDQCR_VERB_DT_SHIFT
;
1251 d
->pull
.dq_src
= wqid
;
1254 void qbman_pull_desc_set_channel(struct qbman_pull_desc
*d
, uint32_t chid
,
1255 enum qbman_pull_type_e dct
)
1257 d
->pull
.verb
|= dct
<< QB_VDQCR_VERB_DCT_SHIFT
;
1258 d
->pull
.verb
|= qb_pull_dt_channel
<< QB_VDQCR_VERB_DT_SHIFT
;
1259 d
->pull
.dq_src
= chid
;
1262 void qbman_pull_desc_set_rad(struct qbman_pull_desc
*d
, int rad
)
1264 if (d
->pull
.verb
& (1 << QB_VDQCR_VERB_RLS_SHIFT
)) {
1266 d
->pull
.verb
|= 1 << QB_VDQCR_VERB_RAD_SHIFT
;
1268 d
->pull
.verb
&= ~(1 << QB_VDQCR_VERB_RAD_SHIFT
);
1270 printf("The RAD feature is not valid when RLS = 0\n");
1274 static int qbman_swp_pull_direct(struct qbman_swp
*s
,
1275 struct qbman_pull_desc
*d
)
1278 uint32_t *cl
= qb_cl(d
);
1280 if (!atomic_dec_and_test(&s
->vdq
.busy
)) {
1281 atomic_inc(&s
->vdq
.busy
);
1285 d
->pull
.tok
= s
->sys
.idx
+ 1;
1286 s
->vdq
.storage
= (void *)(size_t)d
->pull
.rsp_addr_virt
;
1287 p
= qbman_cena_write_start_wo_shadow(&s
->sys
, QBMAN_CENA_SWP_VDQCR
);
1288 memcpy(&p
[1], &cl
[1], 12);
1290 /* Set the verb byte, have to substitute in the valid-bit */
1292 p
[0] = cl
[0] | s
->vdq
.valid_bit
;
1293 s
->vdq
.valid_bit
^= QB_VALID_BIT
;
1294 qbman_cena_write_complete_wo_shadow(&s
->sys
, QBMAN_CENA_SWP_VDQCR
);
1299 static int qbman_swp_pull_mem_back(struct qbman_swp
*s
,
1300 struct qbman_pull_desc
*d
)
1303 uint32_t *cl
= qb_cl(d
);
1305 if (!atomic_dec_and_test(&s
->vdq
.busy
)) {
1306 atomic_inc(&s
->vdq
.busy
);
1310 d
->pull
.tok
= s
->sys
.idx
+ 1;
1311 s
->vdq
.storage
= (void *)(size_t)d
->pull
.rsp_addr_virt
;
1312 p
= qbman_cena_write_start_wo_shadow(&s
->sys
, QBMAN_CENA_SWP_VDQCR_MEM
);
1313 memcpy(&p
[1], &cl
[1], 12);
1315 /* Set the verb byte, have to substitute in the valid-bit */
1316 p
[0] = cl
[0] | s
->vdq
.valid_bit
;
1317 s
->vdq
.valid_bit
^= QB_VALID_BIT
;
1319 qbman_cinh_write(&s
->sys
, QBMAN_CINH_SWP_VDQCR_RT
, QMAN_RT_MODE
);
1324 inline int qbman_swp_pull(struct qbman_swp
*s
, struct qbman_pull_desc
*d
)
1326 return qbman_swp_pull_ptr(s
, d
);
1333 #define QMAN_DQRR_PI_MASK 0xf
1335 #define QBMAN_RESULT_DQ 0x60
1336 #define QBMAN_RESULT_FQRN 0x21
1337 #define QBMAN_RESULT_FQRNI 0x22
1338 #define QBMAN_RESULT_FQPN 0x24
1339 #define QBMAN_RESULT_FQDAN 0x25
1340 #define QBMAN_RESULT_CDAN 0x26
1341 #define QBMAN_RESULT_CSCN_MEM 0x27
1342 #define QBMAN_RESULT_CGCU 0x28
1343 #define QBMAN_RESULT_BPSCN 0x29
1344 #define QBMAN_RESULT_CSCN_WQ 0x2a
1346 #include <rte_prefetch.h>
1348 void qbman_swp_prefetch_dqrr_next(struct qbman_swp
*s
)
1350 const struct qbman_result
*p
;
1352 p
= qbman_cena_read_wo_shadow(&s
->sys
,
1353 QBMAN_CENA_SWP_DQRR(s
->dqrr
.next_idx
));
1357 /* NULL return if there are no unconsumed DQRR entries. Returns a DQRR entry
1358 * only once, so repeated calls can return a sequence of DQRR entries, without
1359 * requiring they be consumed immediately or in any particular order.
1361 inline const struct qbman_result
*qbman_swp_dqrr_next(struct qbman_swp
*s
)
1363 return qbman_swp_dqrr_next_ptr(s
);
1366 const struct qbman_result
*qbman_swp_dqrr_next_direct(struct qbman_swp
*s
)
1369 uint32_t response_verb
;
1371 const struct qbman_result
*p
;
1373 /* Before using valid-bit to detect if something is there, we have to
1374 * handle the case of the DQRR reset bug...
1376 if (s
->dqrr
.reset_bug
) {
1377 /* We pick up new entries by cache-inhibited producer index,
1378 * which means that a non-coherent mapping would require us to
1379 * invalidate and read *only* once that PI has indicated that
1380 * there's an entry here. The first trip around the DQRR ring
1381 * will be much less efficient than all subsequent trips around
1384 uint8_t pi
= qbman_cinh_read(&s
->sys
, QBMAN_CINH_SWP_DQPI
) &
1387 /* there are new entries if pi != next_idx */
1388 if (pi
== s
->dqrr
.next_idx
)
1391 /* if next_idx is/was the last ring index, and 'pi' is
1392 * different, we can disable the workaround as all the ring
1393 * entries have now been DMA'd to so valid-bit checking is
1394 * repaired. Note: this logic needs to be based on next_idx
1395 * (which increments one at a time), rather than on pi (which
1396 * can burst and wrap-around between our snapshots of it).
1398 QBMAN_BUG_ON((s
->dqrr
.dqrr_size
- 1) < 0);
1399 if (s
->dqrr
.next_idx
== (s
->dqrr
.dqrr_size
- 1u)) {
1400 pr_debug("DEBUG: next_idx=%d, pi=%d, clear reset bug\n",
1401 s
->dqrr
.next_idx
, pi
);
1402 s
->dqrr
.reset_bug
= 0;
1404 qbman_cena_invalidate_prefetch(&s
->sys
,
1405 QBMAN_CENA_SWP_DQRR(s
->dqrr
.next_idx
));
1407 p
= qbman_cena_read_wo_shadow(&s
->sys
,
1408 QBMAN_CENA_SWP_DQRR(s
->dqrr
.next_idx
));
1412 /* If the valid-bit isn't of the expected polarity, nothing there. Note,
1413 * in the DQRR reset bug workaround, we shouldn't need to skip these
1414 * check, because we've already determined that a new entry is available
1415 * and we've invalidated the cacheline before reading it, so the
1416 * valid-bit behaviour is repaired and should tell us what we already
1417 * knew from reading PI.
1419 if ((verb
& QB_VALID_BIT
) != s
->dqrr
.valid_bit
)
1422 /* There's something there. Move "next_idx" attention to the next ring
1423 * entry (and prefetch it) before returning what we found.
1426 if (s
->dqrr
.next_idx
== s
->dqrr
.dqrr_size
) {
1427 s
->dqrr
.next_idx
= 0;
1428 s
->dqrr
.valid_bit
^= QB_VALID_BIT
;
1430 /* If this is the final response to a volatile dequeue command
1431 * indicate that the vdq is no longer busy
1434 response_verb
= verb
& QBMAN_RESPONSE_VERB_MASK
;
1435 if ((response_verb
== QBMAN_RESULT_DQ
) &&
1436 (flags
& QBMAN_DQ_STAT_VOLATILE
) &&
1437 (flags
& QBMAN_DQ_STAT_EXPIRED
))
1438 atomic_inc(&s
->vdq
.busy
);
1443 const struct qbman_result
*qbman_swp_dqrr_next_mem_back(struct qbman_swp
*s
)
1446 uint32_t response_verb
;
1448 const struct qbman_result
*p
;
1450 p
= qbman_cena_read_wo_shadow(&s
->sys
,
1451 QBMAN_CENA_SWP_DQRR_MEM(s
->dqrr
.next_idx
));
1455 /* If the valid-bit isn't of the expected polarity, nothing there. Note,
1456 * in the DQRR reset bug workaround, we shouldn't need to skip these
1457 * check, because we've already determined that a new entry is available
1458 * and we've invalidated the cacheline before reading it, so the
1459 * valid-bit behaviour is repaired and should tell us what we already
1460 * knew from reading PI.
1462 if ((verb
& QB_VALID_BIT
) != s
->dqrr
.valid_bit
)
1465 /* There's something there. Move "next_idx" attention to the next ring
1466 * entry (and prefetch it) before returning what we found.
1469 if (s
->dqrr
.next_idx
== s
->dqrr
.dqrr_size
) {
1470 s
->dqrr
.next_idx
= 0;
1471 s
->dqrr
.valid_bit
^= QB_VALID_BIT
;
1473 /* If this is the final response to a volatile dequeue command
1474 * indicate that the vdq is no longer busy
1477 response_verb
= verb
& QBMAN_RESPONSE_VERB_MASK
;
1478 if ((response_verb
== QBMAN_RESULT_DQ
)
1479 && (flags
& QBMAN_DQ_STAT_VOLATILE
)
1480 && (flags
& QBMAN_DQ_STAT_EXPIRED
))
1481 atomic_inc(&s
->vdq
.busy
);
1485 /* Consume DQRR entries previously returned from qbman_swp_dqrr_next(). */
1486 void qbman_swp_dqrr_consume(struct qbman_swp
*s
,
1487 const struct qbman_result
*dq
)
1489 qbman_cinh_write(&s
->sys
,
1490 QBMAN_CINH_SWP_DCAP
, QBMAN_IDX_FROM_DQRR(dq
));
1493 /* Consume DQRR entries previously returned from qbman_swp_dqrr_next(). */
1494 void qbman_swp_dqrr_idx_consume(struct qbman_swp
*s
,
1497 qbman_cinh_write(&s
->sys
, QBMAN_CINH_SWP_DCAP
, dqrr_index
);
1500 /*********************************/
1501 /* Polling user-provided storage */
1502 /*********************************/
1504 int qbman_result_has_new_result(struct qbman_swp
*s
,
1505 struct qbman_result
*dq
)
1507 if (dq
->dq
.tok
== 0)
1511 * Set token to be 0 so we will detect change back to 1
1512 * next time the looping is traversed. Const is cast away here
1513 * as we want users to treat the dequeue responses as read only.
1515 ((struct qbman_result
*)dq
)->dq
.tok
= 0;
1518 * VDQCR "no longer busy" hook - not quite the same as DQRR, because
1519 * the fact "VDQCR" shows busy doesn't mean that we hold the result
1520 * that makes it available. Eg. we may be looking at our 10th dequeue
1521 * result, having released VDQCR after the 1st result and it is now
1522 * busy due to some other command!
1524 if (s
->vdq
.storage
== dq
) {
1525 s
->vdq
.storage
= NULL
;
1526 atomic_inc(&s
->vdq
.busy
);
1532 int qbman_check_new_result(struct qbman_result
*dq
)
1534 if (dq
->dq
.tok
== 0)
1538 * Set token to be 0 so we will detect change back to 1
1539 * next time the looping is traversed. Const is cast away here
1540 * as we want users to treat the dequeue responses as read only.
1542 ((struct qbman_result
*)dq
)->dq
.tok
= 0;
1547 int qbman_check_command_complete(struct qbman_result
*dq
)
1549 struct qbman_swp
*s
;
1551 if (dq
->dq
.tok
== 0)
1554 s
= portal_idx_map
[dq
->dq
.tok
- 1];
1556 * VDQCR "no longer busy" hook - not quite the same as DQRR, because
1557 * the fact "VDQCR" shows busy doesn't mean that we hold the result
1558 * that makes it available. Eg. we may be looking at our 10th dequeue
1559 * result, having released VDQCR after the 1st result and it is now
1560 * busy due to some other command!
1562 if (s
->vdq
.storage
== dq
) {
1563 s
->vdq
.storage
= NULL
;
1564 atomic_inc(&s
->vdq
.busy
);
1570 /********************************/
1571 /* Categorising qbman results */
1572 /********************************/
1574 static inline int __qbman_result_is_x(const struct qbman_result
*dq
,
1577 uint8_t response_verb
= dq
->dq
.verb
& QBMAN_RESPONSE_VERB_MASK
;
1579 return (response_verb
== x
);
1582 int qbman_result_is_DQ(const struct qbman_result
*dq
)
1584 return __qbman_result_is_x(dq
, QBMAN_RESULT_DQ
);
1587 int qbman_result_is_FQDAN(const struct qbman_result
*dq
)
1589 return __qbman_result_is_x(dq
, QBMAN_RESULT_FQDAN
);
1592 int qbman_result_is_CDAN(const struct qbman_result
*dq
)
1594 return __qbman_result_is_x(dq
, QBMAN_RESULT_CDAN
);
1597 int qbman_result_is_CSCN(const struct qbman_result
*dq
)
1599 return __qbman_result_is_x(dq
, QBMAN_RESULT_CSCN_MEM
) ||
1600 __qbman_result_is_x(dq
, QBMAN_RESULT_CSCN_WQ
);
1603 int qbman_result_is_BPSCN(const struct qbman_result
*dq
)
1605 return __qbman_result_is_x(dq
, QBMAN_RESULT_BPSCN
);
1608 int qbman_result_is_CGCU(const struct qbman_result
*dq
)
1610 return __qbman_result_is_x(dq
, QBMAN_RESULT_CGCU
);
1613 int qbman_result_is_FQRN(const struct qbman_result
*dq
)
1615 return __qbman_result_is_x(dq
, QBMAN_RESULT_FQRN
);
1618 int qbman_result_is_FQRNI(const struct qbman_result
*dq
)
1620 return __qbman_result_is_x(dq
, QBMAN_RESULT_FQRNI
);
1623 int qbman_result_is_FQPN(const struct qbman_result
*dq
)
1625 return __qbman_result_is_x(dq
, QBMAN_RESULT_FQPN
);
1628 /*********************************/
1629 /* Parsing frame dequeue results */
1630 /*********************************/
1632 /* These APIs assume qbman_result_is_DQ() is TRUE */
1634 uint8_t qbman_result_DQ_flags(const struct qbman_result
*dq
)
1639 uint16_t qbman_result_DQ_seqnum(const struct qbman_result
*dq
)
1641 return dq
->dq
.seqnum
;
1644 uint16_t qbman_result_DQ_odpid(const struct qbman_result
*dq
)
1646 return dq
->dq
.oprid
;
1649 uint32_t qbman_result_DQ_fqid(const struct qbman_result
*dq
)
1654 uint32_t qbman_result_DQ_byte_count(const struct qbman_result
*dq
)
1656 return dq
->dq
.fq_byte_cnt
;
1659 uint32_t qbman_result_DQ_frame_count(const struct qbman_result
*dq
)
1661 return dq
->dq
.fq_frm_cnt
;
1664 uint64_t qbman_result_DQ_fqd_ctx(const struct qbman_result
*dq
)
1666 return dq
->dq
.fqd_ctx
;
1669 const struct qbman_fd
*qbman_result_DQ_fd(const struct qbman_result
*dq
)
1671 return (const struct qbman_fd
*)&dq
->dq
.fd
[0];
1674 /**************************************/
1675 /* Parsing state-change notifications */
1676 /**************************************/
1677 uint8_t qbman_result_SCN_state(const struct qbman_result
*scn
)
1679 return scn
->scn
.state
;
1682 uint32_t qbman_result_SCN_rid(const struct qbman_result
*scn
)
1684 return scn
->scn
.rid_tok
;
1687 uint64_t qbman_result_SCN_ctx(const struct qbman_result
*scn
)
1689 return scn
->scn
.ctx
;
1695 uint16_t qbman_result_bpscn_bpid(const struct qbman_result
*scn
)
1697 return (uint16_t)qbman_result_SCN_rid(scn
) & 0x3FFF;
1700 int qbman_result_bpscn_has_free_bufs(const struct qbman_result
*scn
)
1702 return !(int)(qbman_result_SCN_state(scn
) & 0x1);
1705 int qbman_result_bpscn_is_depleted(const struct qbman_result
*scn
)
1707 return (int)(qbman_result_SCN_state(scn
) & 0x2);
1710 int qbman_result_bpscn_is_surplus(const struct qbman_result
*scn
)
1712 return (int)(qbman_result_SCN_state(scn
) & 0x4);
1715 uint64_t qbman_result_bpscn_ctx(const struct qbman_result
*scn
)
1717 return qbman_result_SCN_ctx(scn
);
1723 uint16_t qbman_result_cgcu_cgid(const struct qbman_result
*scn
)
1725 return (uint16_t)qbman_result_SCN_rid(scn
) & 0xFFFF;
1728 uint64_t qbman_result_cgcu_icnt(const struct qbman_result
*scn
)
1730 return qbman_result_SCN_ctx(scn
);
1733 /********************/
1734 /* Parsing EQ RESP */
1735 /********************/
1736 struct qbman_fd
*qbman_result_eqresp_fd(struct qbman_result
*eqresp
)
1738 return (struct qbman_fd
*)&eqresp
->eq_resp
.fd
[0];
1741 void qbman_result_eqresp_set_rspid(struct qbman_result
*eqresp
, uint8_t val
)
1743 eqresp
->eq_resp
.rspid
= val
;
1746 uint8_t qbman_result_eqresp_rspid(struct qbman_result
*eqresp
)
1748 return eqresp
->eq_resp
.rspid
;
1751 uint8_t qbman_result_eqresp_rc(struct qbman_result
*eqresp
)
1753 if (eqresp
->eq_resp
.rc
== 0xE)
1759 /******************/
1760 /* Buffer release */
1761 /******************/
1762 #define QB_BR_RC_VALID_SHIFT 5
1763 #define QB_BR_RCDI_SHIFT 6
1765 void qbman_release_desc_clear(struct qbman_release_desc
*d
)
1767 memset(d
, 0, sizeof(*d
));
1768 d
->br
.verb
= 1 << QB_BR_RC_VALID_SHIFT
;
1771 void qbman_release_desc_set_bpid(struct qbman_release_desc
*d
, uint16_t bpid
)
1776 void qbman_release_desc_set_rcdi(struct qbman_release_desc
*d
, int enable
)
1779 d
->br
.verb
|= 1 << QB_BR_RCDI_SHIFT
;
1781 d
->br
.verb
&= ~(1 << QB_BR_RCDI_SHIFT
);
1784 #define RAR_IDX(rar) ((rar) & 0x7)
1785 #define RAR_VB(rar) ((rar) & 0x80)
1786 #define RAR_SUCCESS(rar) ((rar) & 0x100)
1788 static int qbman_swp_release_direct(struct qbman_swp
*s
,
1789 const struct qbman_release_desc
*d
,
1790 const uint64_t *buffers
,
1791 unsigned int num_buffers
)
1794 const uint32_t *cl
= qb_cl(d
);
1795 uint32_t rar
= qbman_cinh_read(&s
->sys
, QBMAN_CINH_SWP_RAR
);
1797 pr_debug("RAR=%08x\n", rar
);
1798 if (!RAR_SUCCESS(rar
))
1801 QBMAN_BUG_ON(!num_buffers
|| (num_buffers
> 7));
1803 /* Start the release command */
1804 p
= qbman_cena_write_start_wo_shadow(&s
->sys
,
1805 QBMAN_CENA_SWP_RCR(RAR_IDX(rar
)));
1807 /* Copy the caller's buffer pointers to the command */
1808 u64_to_le32_copy(&p
[2], buffers
, num_buffers
);
1810 /* Set the verb byte, have to substitute in the valid-bit and the
1811 * number of buffers.
1814 p
[0] = cl
[0] | RAR_VB(rar
) | num_buffers
;
1815 qbman_cena_write_complete_wo_shadow(&s
->sys
,
1816 QBMAN_CENA_SWP_RCR(RAR_IDX(rar
)));
1821 static int qbman_swp_release_mem_back(struct qbman_swp
*s
,
1822 const struct qbman_release_desc
*d
,
1823 const uint64_t *buffers
,
1824 unsigned int num_buffers
)
1827 const uint32_t *cl
= qb_cl(d
);
1828 uint32_t rar
= qbman_cinh_read(&s
->sys
, QBMAN_CINH_SWP_RAR
);
1830 pr_debug("RAR=%08x\n", rar
);
1831 if (!RAR_SUCCESS(rar
))
1834 QBMAN_BUG_ON(!num_buffers
|| (num_buffers
> 7));
1836 /* Start the release command */
1837 p
= qbman_cena_write_start_wo_shadow(&s
->sys
,
1838 QBMAN_CENA_SWP_RCR_MEM(RAR_IDX(rar
)));
1840 /* Copy the caller's buffer pointers to the command */
1841 u64_to_le32_copy(&p
[2], buffers
, num_buffers
);
1843 /* Set the verb byte, have to substitute in the valid-bit and the
1844 * number of buffers.
1846 p
[0] = cl
[0] | RAR_VB(rar
) | num_buffers
;
1848 qbman_cinh_write(&s
->sys
, QBMAN_CINH_SWP_RCR_AM_RT
+
1849 RAR_IDX(rar
) * 4, QMAN_RT_MODE
);
1854 inline int qbman_swp_release(struct qbman_swp
*s
,
1855 const struct qbman_release_desc
*d
,
1856 const uint64_t *buffers
,
1857 unsigned int num_buffers
)
1859 return qbman_swp_release_ptr(s
, d
, buffers
, num_buffers
);
1862 /*******************/
1863 /* Buffer acquires */
1864 /*******************/
1865 struct qbman_acquire_desc
{
1870 uint8_t reserved2
[59];
1873 struct qbman_acquire_rslt
{
1878 uint8_t reserved2
[3];
1882 int qbman_swp_acquire(struct qbman_swp
*s
, uint16_t bpid
, uint64_t *buffers
,
1883 unsigned int num_buffers
)
1885 struct qbman_acquire_desc
*p
;
1886 struct qbman_acquire_rslt
*r
;
1888 if (!num_buffers
|| (num_buffers
> 7))
1891 /* Start the management command */
1892 p
= qbman_swp_mc_start(s
);
1897 /* Encode the caller-provided attributes */
1899 p
->num
= num_buffers
;
1901 /* Complete the management command */
1902 r
= qbman_swp_mc_complete(s
, p
, QBMAN_MC_ACQUIRE
);
1904 pr_err("qbman: acquire from BPID %d failed, no response\n",
1909 /* Decode the outcome */
1910 QBMAN_BUG_ON((r
->verb
& QBMAN_RESPONSE_VERB_MASK
) != QBMAN_MC_ACQUIRE
);
1912 /* Determine success or failure */
1913 if (r
->rslt
!= QBMAN_MC_RSLT_OK
) {
1914 pr_err("Acquire buffers from BPID 0x%x failed, code=0x%02x\n",
1919 QBMAN_BUG_ON(r
->num
> num_buffers
);
1921 /* Copy the acquired buffers to the caller's array */
1922 u64_from_le32_copy(buffers
, &r
->buf
[0], r
->num
);
1930 struct qbman_alt_fq_state_desc
{
1932 uint8_t reserved
[3];
1934 uint8_t reserved2
[56];
1937 struct qbman_alt_fq_state_rslt
{
1940 uint8_t reserved
[62];
1943 #define ALT_FQ_FQID_MASK 0x00FFFFFF
1945 static int qbman_swp_alt_fq_state(struct qbman_swp
*s
, uint32_t fqid
,
1946 uint8_t alt_fq_verb
)
1948 struct qbman_alt_fq_state_desc
*p
;
1949 struct qbman_alt_fq_state_rslt
*r
;
1951 /* Start the management command */
1952 p
= qbman_swp_mc_start(s
);
1956 p
->fqid
= fqid
& ALT_FQ_FQID_MASK
;
1958 /* Complete the management command */
1959 r
= qbman_swp_mc_complete(s
, p
, alt_fq_verb
);
1961 pr_err("qbman: mgmt cmd failed, no response (verb=0x%x)\n",
1966 /* Decode the outcome */
1967 QBMAN_BUG_ON((r
->verb
& QBMAN_RESPONSE_VERB_MASK
) != alt_fq_verb
);
1969 /* Determine success or failure */
1970 if (r
->rslt
!= QBMAN_MC_RSLT_OK
) {
1971 pr_err("ALT FQID %d failed: verb = 0x%08x, code = 0x%02x\n",
1972 fqid
, alt_fq_verb
, r
->rslt
);
1979 int qbman_swp_fq_schedule(struct qbman_swp
*s
, uint32_t fqid
)
1981 return qbman_swp_alt_fq_state(s
, fqid
, QBMAN_FQ_SCHEDULE
);
1984 int qbman_swp_fq_force(struct qbman_swp
*s
, uint32_t fqid
)
1986 return qbman_swp_alt_fq_state(s
, fqid
, QBMAN_FQ_FORCE
);
1989 int qbman_swp_fq_xon(struct qbman_swp
*s
, uint32_t fqid
)
1991 return qbman_swp_alt_fq_state(s
, fqid
, QBMAN_FQ_XON
);
1994 int qbman_swp_fq_xoff(struct qbman_swp
*s
, uint32_t fqid
)
1996 return qbman_swp_alt_fq_state(s
, fqid
, QBMAN_FQ_XOFF
);
1999 /**********************/
2000 /* Channel management */
2001 /**********************/
2003 struct qbman_cdan_ctrl_desc
{
2011 uint8_t reserved3
[48];
2015 struct qbman_cdan_ctrl_rslt
{
2019 uint8_t reserved
[60];
2022 /* Hide "ICD" for now as we don't use it, don't set it, and don't test it, so it
2023 * would be irresponsible to expose it.
2025 #define CODE_CDAN_WE_EN 0x1
2026 #define CODE_CDAN_WE_CTX 0x4
2028 static int qbman_swp_CDAN_set(struct qbman_swp
*s
, uint16_t channelid
,
2029 uint8_t we_mask
, uint8_t cdan_en
,
2032 struct qbman_cdan_ctrl_desc
*p
;
2033 struct qbman_cdan_ctrl_rslt
*r
;
2035 /* Start the management command */
2036 p
= qbman_swp_mc_start(s
);
2040 /* Encode the caller-provided attributes */
2049 /* Complete the management command */
2050 r
= qbman_swp_mc_complete(s
, p
, QBMAN_WQCHAN_CONFIGURE
);
2052 pr_err("qbman: wqchan config failed, no response\n");
2056 /* Decode the outcome */
2057 QBMAN_BUG_ON((r
->verb
& QBMAN_RESPONSE_VERB_MASK
)
2058 != QBMAN_WQCHAN_CONFIGURE
);
2060 /* Determine success or failure */
2061 if (r
->rslt
!= QBMAN_MC_RSLT_OK
) {
2062 pr_err("CDAN cQID %d failed: code = 0x%02x\n",
2063 channelid
, r
->rslt
);
2070 int qbman_swp_CDAN_set_context(struct qbman_swp
*s
, uint16_t channelid
,
2073 return qbman_swp_CDAN_set(s
, channelid
,
2078 int qbman_swp_CDAN_enable(struct qbman_swp
*s
, uint16_t channelid
)
2080 return qbman_swp_CDAN_set(s
, channelid
,
2085 int qbman_swp_CDAN_disable(struct qbman_swp
*s
, uint16_t channelid
)
2087 return qbman_swp_CDAN_set(s
, channelid
,
2092 int qbman_swp_CDAN_set_context_enable(struct qbman_swp
*s
, uint16_t channelid
,
2095 return qbman_swp_CDAN_set(s
, channelid
,
2096 CODE_CDAN_WE_EN
| CODE_CDAN_WE_CTX
,
2100 uint8_t qbman_get_dqrr_idx(const struct qbman_result
*dqrr
)
2102 return QBMAN_IDX_FROM_DQRR(dqrr
);
2105 struct qbman_result
*qbman_get_dqrr_from_idx(struct qbman_swp
*s
, uint8_t idx
)
2107 struct qbman_result
*dq
;
2109 dq
= qbman_cena_read(&s
->sys
, QBMAN_CENA_SWP_DQRR(idx
));