1 /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
3 * Copyright 2008-2016 Freescale Semiconductor Inc.
4 * Copyright 2017,2019 NXP
9 #include <rte_branch_prediction.h>
10 #include <rte_dpaa_bus.h>
11 #include <rte_eventdev.h>
12 #include <rte_byteorder.h>
14 /* Compilation constants */
15 #define DQRR_MAXFILL 15
16 #define EQCR_ITHRESH 4 /* if EQCR congests, interrupt threshold */
17 #define IRQNAME "QMan portal %d"
18 #define MAX_IRQNAME 16 /* big enough for "QMan portal %d" */
19 /* maximum number of DQRR entries to process in qman_poll() */
20 #define FSL_QMAN_POLL_LIMIT 8
22 /* Lock/unlock frame queues, subject to the "LOCKED" flag. This is about
23 * inter-processor locking only. Note, FQLOCK() is always called either under a
24 * local_irq_save() or from interrupt context - hence there's no need for irq
25 * protection (and indeed, attempting to nest irq-protection doesn't work, as
26 * the "irq en/disable" machinery isn't recursive...).
30 struct qman_fq *__fq478 = (fq); \
31 if (fq_isset(__fq478, QMAN_FQ_FLAG_LOCKED)) \
32 spin_lock(&__fq478->fqlock); \
34 #define FQUNLOCK(fq) \
36 struct qman_fq *__fq478 = (fq); \
37 if (fq_isset(__fq478, QMAN_FQ_FLAG_LOCKED)) \
38 spin_unlock(&__fq478->fqlock); \
41 static inline void fq_set(struct qman_fq
*fq
, u32 mask
)
43 dpaa_set_bits(mask
, &fq
->flags
);
46 static inline void fq_clear(struct qman_fq
*fq
, u32 mask
)
48 dpaa_clear_bits(mask
, &fq
->flags
);
51 static inline int fq_isset(struct qman_fq
*fq
, u32 mask
)
53 return fq
->flags
& mask
;
56 static inline int fq_isclear(struct qman_fq
*fq
, u32 mask
)
58 return !(fq
->flags
& mask
);
63 /* PORTAL_BITS_*** - dynamic, strictly internal */
65 /* interrupt sources processed by portal_isr(), configurable */
66 unsigned long irq_sources
;
67 u32 use_eqcr_ci_stashing
;
68 u32 slowpoll
; /* only used when interrupts are off */
69 /* only 1 volatile dequeue at a time */
70 struct qman_fq
*vdqcr_owned
;
73 /* A portal-specific handler for DCP ERNs. If this is NULL, the global
74 * handler is called instead.
76 qman_cb_dc_ern cb_dc_ern
;
77 /* When the cpu-affine portal is activated, this is non-NULL */
78 const struct qm_portal_config
*config
;
79 struct dpa_rbtree retire_table
;
80 char irqname
[MAX_IRQNAME
];
81 /* 2-element array. cgrs[0] is mask, cgrs[1] is snapshot. */
82 struct qman_cgrs
*cgrs
;
83 /* linked-list of CSCN handlers. */
84 struct list_head cgr_cbs
;
87 /* track if memory was allocated by the driver */
88 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
89 /* Keep a shadow copy of the DQRR on LE systems as the SW needs to
90 * do byte swaps of DQRR read only memory. First entry must be aligned
91 * to 2 ** 10 to ensure DQRR index calculations based shadow copy
92 * address (6 bits for address shift + 4 bits for the DQRR size).
94 struct qm_dqrr_entry shadow_dqrr
[QM_DQRR_SIZE
]
95 __attribute__((aligned(1024)));
99 /* Global handler for DCP ERNs. Used when the portal receiving the message does
100 * not have a portal-specific handler.
102 static qman_cb_dc_ern cb_dc_ern
;
104 static cpumask_t affine_mask
;
105 static DEFINE_SPINLOCK(affine_mask_lock
);
106 static u16 affine_channels
[NR_CPUS
];
107 static RTE_DEFINE_PER_LCORE(struct qman_portal
, qman_affine_portal
);
109 static inline struct qman_portal
*get_affine_portal(void)
111 return &RTE_PER_LCORE(qman_affine_portal
);
114 /* This gives a FQID->FQ lookup to cover the fact that we can't directly demux
115 * retirement notifications (the fact they are sometimes h/w-consumed means that
116 * contextB isn't always a s/w demux - and as we can't know which case it is
117 * when looking at the notification, we have to use the slow lookup for all of
118 * them). NB, it's possible to have multiple FQ objects refer to the same FQID
119 * (though at most one of them should be the consumer), so this table isn't for
120 * all FQs - FQs are added when retirement commands are issued, and removed when
121 * they complete, which also massively reduces the size of this table.
123 IMPLEMENT_DPAA_RBTREE(fqtree
, struct qman_fq
, node
, fqid
);
125 * This is what everything can wait on, even if it migrates to a different cpu
126 * to the one whose affine portal it is waiting on.
128 static DECLARE_WAIT_QUEUE_HEAD(affine_queue
);
130 static inline int table_push_fq(struct qman_portal
*p
, struct qman_fq
*fq
)
132 int ret
= fqtree_push(&p
->retire_table
, fq
);
135 pr_err("ERROR: double FQ-retirement %d\n", fq
->fqid
);
139 static inline void table_del_fq(struct qman_portal
*p
, struct qman_fq
*fq
)
141 fqtree_del(&p
->retire_table
, fq
);
144 static inline struct qman_fq
*table_find_fq(struct qman_portal
*p
, u32 fqid
)
146 return fqtree_find(&p
->retire_table
, fqid
);
149 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
150 static void **qman_fq_lookup_table
;
151 static size_t qman_fq_lookup_table_size
;
153 int qman_setup_fq_lookup_table(size_t num_entries
)
156 /* Allocate 1 more entry since the first entry is not used */
157 qman_fq_lookup_table
= vmalloc((num_entries
* sizeof(void *)));
158 if (!qman_fq_lookup_table
) {
159 pr_err("QMan: Could not allocate fq lookup table\n");
162 memset(qman_fq_lookup_table
, 0, num_entries
* sizeof(void *));
163 qman_fq_lookup_table_size
= num_entries
;
164 pr_debug("QMan: Allocated lookup table at %p, entry count %lu\n",
165 qman_fq_lookup_table
,
166 (unsigned long)qman_fq_lookup_table_size
);
170 void qman_set_fq_lookup_table(void **fq_table
)
172 qman_fq_lookup_table
= fq_table
;
175 /* global structure that maintains fq object mapping */
176 static DEFINE_SPINLOCK(fq_hash_table_lock
);
178 static int find_empty_fq_table_entry(u32
*entry
, struct qman_fq
*fq
)
182 spin_lock(&fq_hash_table_lock
);
183 /* Can't use index zero because this has special meaning
184 * in context_b field.
186 for (i
= 1; i
< qman_fq_lookup_table_size
; i
++) {
187 if (qman_fq_lookup_table
[i
] == NULL
) {
189 qman_fq_lookup_table
[i
] = fq
;
190 spin_unlock(&fq_hash_table_lock
);
194 spin_unlock(&fq_hash_table_lock
);
198 static void clear_fq_table_entry(u32 entry
)
200 spin_lock(&fq_hash_table_lock
);
201 DPAA_BUG_ON(entry
>= qman_fq_lookup_table_size
);
202 qman_fq_lookup_table
[entry
] = NULL
;
203 spin_unlock(&fq_hash_table_lock
);
206 static inline struct qman_fq
*get_fq_table_entry(u32 entry
)
208 DPAA_BUG_ON(entry
>= qman_fq_lookup_table_size
);
209 return qman_fq_lookup_table
[entry
];
213 static inline void cpu_to_hw_fqd(struct qm_fqd
*fqd
)
215 /* Byteswap the FQD to HW format */
216 fqd
->fq_ctrl
= cpu_to_be16(fqd
->fq_ctrl
);
217 fqd
->dest_wq
= cpu_to_be16(fqd
->dest_wq
);
218 fqd
->ics_cred
= cpu_to_be16(fqd
->ics_cred
);
219 fqd
->context_b
= cpu_to_be32(fqd
->context_b
);
220 fqd
->context_a
.opaque
= cpu_to_be64(fqd
->context_a
.opaque
);
221 fqd
->opaque_td
= cpu_to_be16(fqd
->opaque_td
);
224 static inline void hw_fqd_to_cpu(struct qm_fqd
*fqd
)
226 /* Byteswap the FQD to CPU format */
227 fqd
->fq_ctrl
= be16_to_cpu(fqd
->fq_ctrl
);
228 fqd
->dest_wq
= be16_to_cpu(fqd
->dest_wq
);
229 fqd
->ics_cred
= be16_to_cpu(fqd
->ics_cred
);
230 fqd
->context_b
= be32_to_cpu(fqd
->context_b
);
231 fqd
->context_a
.opaque
= be64_to_cpu(fqd
->context_a
.opaque
);
234 static inline void cpu_to_hw_fd(struct qm_fd
*fd
)
236 fd
->addr
= cpu_to_be40(fd
->addr
);
237 fd
->status
= cpu_to_be32(fd
->status
);
238 fd
->opaque
= cpu_to_be32(fd
->opaque
);
241 static inline void hw_fd_to_cpu(struct qm_fd
*fd
)
243 fd
->addr
= be40_to_cpu(fd
->addr
);
244 fd
->status
= be32_to_cpu(fd
->status
);
245 fd
->opaque
= be32_to_cpu(fd
->opaque
);
248 /* In the case that slow- and fast-path handling are both done by qman_poll()
249 * (ie. because there is no interrupt handling), we ought to balance how often
250 * we do the fast-path poll versus the slow-path poll. We'll use two decrementer
251 * sources, so we call the fast poll 'n' times before calling the slow poll
252 * once. The idle decrementer constant is used when the last slow-poll detected
253 * no work to do, and the busy decrementer constant when the last slow-poll had
256 #define SLOW_POLL_IDLE 1000
257 #define SLOW_POLL_BUSY 10
258 static u32
__poll_portal_slow(struct qman_portal
*p
, u32 is
);
259 static inline unsigned int __poll_portal_fast(struct qman_portal
*p
,
260 unsigned int poll_limit
);
262 /* Portal interrupt handler */
263 static irqreturn_t
portal_isr(__always_unused
int irq
, void *ptr
)
265 struct qman_portal
*p
= ptr
;
267 * The CSCI/CCSCI source is cleared inside __poll_portal_slow(), because
268 * it could race against a Query Congestion State command also given
269 * as part of the handling of this interrupt source. We mustn't
270 * clear it a second time in this top-level function.
272 u32 clear
= QM_DQAVAIL_MASK
| (p
->irq_sources
&
273 ~(QM_PIRQ_CSCI
| QM_PIRQ_CCSCI
));
274 u32 is
= qm_isr_status_read(&p
->p
) & p
->irq_sources
;
275 /* DQRR-handling if it's interrupt-driven */
276 if (is
& QM_PIRQ_DQRI
)
277 __poll_portal_fast(p
, FSL_QMAN_POLL_LIMIT
);
278 /* Handling of anything else that's interrupt-driven */
279 clear
|= __poll_portal_slow(p
, is
);
280 qm_isr_status_clear(&p
->p
, clear
);
284 /* This inner version is used privately by qman_create_affine_portal(), as well
285 * as by the exported qman_stop_dequeues().
287 static inline void qman_stop_dequeues_ex(struct qman_portal
*p
)
289 if (!(p
->dqrr_disable_ref
++))
290 qm_dqrr_set_maxfill(&p
->p
, 0);
293 static int drain_mr_fqrni(struct qm_portal
*p
)
295 const struct qm_mr_entry
*msg
;
297 msg
= qm_mr_current(p
);
300 * if MR was full and h/w had other FQRNI entries to produce, we
301 * need to allow it time to produce those entries once the
302 * existing entries are consumed. A worst-case situation
303 * (fully-loaded system) means h/w sequencers may have to do 3-4
304 * other things before servicing the portal's MR pump, each of
305 * which (if slow) may take ~50 qman cycles (which is ~200
306 * processor cycles). So rounding up and then multiplying this
307 * worst-case estimate by a factor of 10, just to be
308 * ultra-paranoid, goes as high as 10,000 cycles. NB, we consume
309 * one entry at a time, so h/w has an opportunity to produce new
310 * entries well before the ring has been fully consumed, so
311 * we're being *really* paranoid here.
313 u64 now
, then
= mfatb();
317 } while ((then
+ 10000) > now
);
318 msg
= qm_mr_current(p
);
322 if ((msg
->ern
.verb
& QM_MR_VERB_TYPE_MASK
) != QM_MR_VERB_FQRNI
) {
323 /* We aren't draining anything but FQRNIs */
324 pr_err("Found verb 0x%x in MR\n", msg
->ern
.verb
);
328 qm_mr_cci_consume(p
, 1);
332 static inline int qm_eqcr_init(struct qm_portal
*portal
,
333 enum qm_eqcr_pmode pmode
,
334 unsigned int eq_stash_thresh
,
337 /* This use of 'register', as well as all other occurrences, is because
338 * it has been observed to generate much faster code with gcc than is
339 * otherwise the case.
341 register struct qm_eqcr
*eqcr
= &portal
->eqcr
;
345 eqcr
->ring
= portal
->addr
.ce
+ QM_CL_EQCR
;
346 eqcr
->ci
= qm_in(EQCR_CI_CINH
) & (QM_EQCR_SIZE
- 1);
347 qm_cl_invalidate(EQCR_CI
);
348 pi
= qm_in(EQCR_PI_CINH
) & (QM_EQCR_SIZE
- 1);
349 eqcr
->cursor
= eqcr
->ring
+ pi
;
350 eqcr
->vbit
= (qm_in(EQCR_PI_CINH
) & QM_EQCR_SIZE
) ?
351 QM_EQCR_VERB_VBIT
: 0;
352 eqcr
->available
= QM_EQCR_SIZE
- 1 -
353 qm_cyc_diff(QM_EQCR_SIZE
, eqcr
->ci
, pi
);
354 eqcr
->ithresh
= qm_in(EQCR_ITR
);
355 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
359 cfg
= (qm_in(CFG
) & 0x00ffffff) |
360 (eq_stash_thresh
<< 28) | /* QCSP_CFG: EST */
361 (eq_stash_prio
<< 26) | /* QCSP_CFG: EP */
362 ((pmode
& 0x3) << 24); /* QCSP_CFG::EPM */
367 static inline void qm_eqcr_finish(struct qm_portal
*portal
)
369 register struct qm_eqcr
*eqcr
= &portal
->eqcr
;
374 * Disable EQCI stashing because the QMan only
375 * presents the value it previously stashed to
376 * maintain coherency. Setting the stash threshold
377 * to 1 then 0 ensures that QMan has resyncronized
378 * its internal copy so that the portal is clean
379 * when it is reinitialized in the future
381 cfg
= (qm_in(CFG
) & 0x0fffffff) |
382 (1 << 28); /* QCSP_CFG: EST */
384 cfg
&= 0x0fffffff; /* stash threshold = 0 */
387 pi
= qm_in(EQCR_PI_CINH
) & (QM_EQCR_SIZE
- 1);
388 ci
= qm_in(EQCR_CI_CINH
) & (QM_EQCR_SIZE
- 1);
390 /* Refresh EQCR CI cache value */
391 qm_cl_invalidate(EQCR_CI
);
392 eqcr
->ci
= qm_cl_in(EQCR_CI
) & (QM_EQCR_SIZE
- 1);
394 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
395 DPAA_ASSERT(!eqcr
->busy
);
397 if (pi
!= EQCR_PTR2IDX(eqcr
->cursor
))
398 pr_crit("losing uncommitted EQCR entries\n");
400 pr_crit("missing existing EQCR completions\n");
401 if (eqcr
->ci
!= EQCR_PTR2IDX(eqcr
->cursor
))
402 pr_crit("EQCR destroyed unquiesced\n");
405 static inline int qm_dqrr_init(struct qm_portal
*portal
,
406 __maybe_unused
const struct qm_portal_config
*config
,
407 enum qm_dqrr_dmode dmode
,
408 __maybe_unused
enum qm_dqrr_pmode pmode
,
409 enum qm_dqrr_cmode cmode
, u8 max_fill
)
411 register struct qm_dqrr
*dqrr
= &portal
->dqrr
;
414 /* Make sure the DQRR will be idle when we enable */
415 qm_out(DQRR_SDQCR
, 0);
416 qm_out(DQRR_VDQCR
, 0);
417 qm_out(DQRR_PDQCR
, 0);
418 dqrr
->ring
= portal
->addr
.ce
+ QM_CL_DQRR
;
419 dqrr
->pi
= qm_in(DQRR_PI_CINH
) & (QM_DQRR_SIZE
- 1);
420 dqrr
->ci
= qm_in(DQRR_CI_CINH
) & (QM_DQRR_SIZE
- 1);
421 dqrr
->cursor
= dqrr
->ring
+ dqrr
->ci
;
422 dqrr
->fill
= qm_cyc_diff(QM_DQRR_SIZE
, dqrr
->ci
, dqrr
->pi
);
423 dqrr
->vbit
= (qm_in(DQRR_PI_CINH
) & QM_DQRR_SIZE
) ?
424 QM_DQRR_VERB_VBIT
: 0;
425 dqrr
->ithresh
= qm_in(DQRR_ITR
);
426 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
431 /* Invalidate every ring entry before beginning */
432 for (cfg
= 0; cfg
< QM_DQRR_SIZE
; cfg
++)
433 dccivac(qm_cl(dqrr
->ring
, cfg
));
434 cfg
= (qm_in(CFG
) & 0xff000f00) |
435 ((max_fill
& (QM_DQRR_SIZE
- 1)) << 20) | /* DQRR_MF */
436 ((dmode
& 1) << 18) | /* DP */
437 ((cmode
& 3) << 16) | /* DCM */
439 (0 ? 0x40 : 0) | /* Ignore RP */
440 (0 ? 0x10 : 0); /* Ignore SP */
442 qm_dqrr_set_maxfill(portal
, max_fill
);
446 static inline void qm_dqrr_finish(struct qm_portal
*portal
)
448 __maybe_unused
register struct qm_dqrr
*dqrr
= &portal
->dqrr
;
449 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
450 if ((dqrr
->cmode
!= qm_dqrr_cdc
) &&
451 (dqrr
->ci
!= DQRR_PTR2IDX(dqrr
->cursor
)))
452 pr_crit("Ignoring completed DQRR entries\n");
456 static inline int qm_mr_init(struct qm_portal
*portal
,
457 __maybe_unused
enum qm_mr_pmode pmode
,
458 enum qm_mr_cmode cmode
)
460 register struct qm_mr
*mr
= &portal
->mr
;
463 mr
->ring
= portal
->addr
.ce
+ QM_CL_MR
;
464 mr
->pi
= qm_in(MR_PI_CINH
) & (QM_MR_SIZE
- 1);
465 mr
->ci
= qm_in(MR_CI_CINH
) & (QM_MR_SIZE
- 1);
466 mr
->cursor
= mr
->ring
+ mr
->ci
;
467 mr
->fill
= qm_cyc_diff(QM_MR_SIZE
, mr
->ci
, mr
->pi
);
468 mr
->vbit
= (qm_in(MR_PI_CINH
) & QM_MR_SIZE
) ? QM_MR_VERB_VBIT
: 0;
469 mr
->ithresh
= qm_in(MR_ITR
);
470 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
474 cfg
= (qm_in(CFG
) & 0xfffff0ff) |
475 ((cmode
& 1) << 8); /* QCSP_CFG:MM */
480 static inline void qm_mr_pvb_update(struct qm_portal
*portal
)
482 register struct qm_mr
*mr
= &portal
->mr
;
483 const struct qm_mr_entry
*res
= qm_cl(mr
->ring
, mr
->pi
);
485 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
486 DPAA_ASSERT(mr
->pmode
== qm_mr_pvb
);
488 /* when accessing 'verb', use __raw_readb() to ensure that compiler
489 * inlining doesn't try to optimise out "excess reads".
491 if ((__raw_readb(&res
->ern
.verb
) & QM_MR_VERB_VBIT
) == mr
->vbit
) {
492 mr
->pi
= (mr
->pi
+ 1) & (QM_MR_SIZE
- 1);
494 mr
->vbit
^= QM_MR_VERB_VBIT
;
502 struct qman_portal
*qman_create_portal(
503 struct qman_portal
*portal
,
504 const struct qm_portal_config
*c
,
505 const struct qman_cgrs
*cgrs
)
514 if (dpaa_svr_family
== SVR_LS1043A_FAMILY
)
515 portal
->use_eqcr_ci_stashing
= 3;
517 portal
->use_eqcr_ci_stashing
=
518 ((qman_ip_rev
>= QMAN_REV30
) ? 1 : 0);
521 * prep the low-level portal struct with the mapped addresses from the
522 * config, everything that follows depends on it and "config" is more
525 p
->addr
.ce
= c
->addr_virt
[DPAA_PORTAL_CE
];
526 p
->addr
.ci
= c
->addr_virt
[DPAA_PORTAL_CI
];
528 * If CI-stashing is used, the current defaults use a threshold of 3,
529 * and stash with high-than-DQRR priority.
531 if (qm_eqcr_init(p
, qm_eqcr_pvb
,
532 portal
->use_eqcr_ci_stashing
, 1)) {
533 pr_err("Qman EQCR initialisation failed\n");
536 if (qm_dqrr_init(p
, c
, qm_dqrr_dpush
, qm_dqrr_pvb
,
537 qm_dqrr_cdc
, DQRR_MAXFILL
)) {
538 pr_err("Qman DQRR initialisation failed\n");
541 if (qm_mr_init(p
, qm_mr_pvb
, qm_mr_cci
)) {
542 pr_err("Qman MR initialisation failed\n");
546 pr_err("Qman MC initialisation failed\n");
550 /* static interrupt-gating controls */
551 qm_dqrr_set_ithresh(p
, 0);
552 qm_mr_set_ithresh(p
, 0);
553 qm_isr_set_iperiod(p
, 0);
554 portal
->cgrs
= kmalloc(2 * sizeof(*cgrs
), GFP_KERNEL
);
557 /* initial snapshot is no-depletion */
558 qman_cgrs_init(&portal
->cgrs
[1]);
560 portal
->cgrs
[0] = *cgrs
;
562 /* if the given mask is NULL, assume all CGRs can be seen */
563 qman_cgrs_fill(&portal
->cgrs
[0]);
564 INIT_LIST_HEAD(&portal
->cgr_cbs
);
565 spin_lock_init(&portal
->cgr_lock
);
567 portal
->slowpoll
= 0;
568 portal
->sdqcr
= QM_SDQCR_SOURCE_CHANNELS
| QM_SDQCR_COUNT_UPTO3
|
569 QM_SDQCR_DEDICATED_PRECEDENCE
| QM_SDQCR_TYPE_PRIO_QOS
|
570 QM_SDQCR_TOKEN_SET(0xab) | QM_SDQCR_CHANNELS_DEDICATED
;
571 portal
->dqrr_disable_ref
= 0;
572 portal
->cb_dc_ern
= NULL
;
573 sprintf(buf
, "qportal-%d", c
->channel
);
574 dpa_rbtree_init(&portal
->retire_table
);
576 qm_isr_disable_write(p
, isdr
);
577 portal
->irq_sources
= 0;
578 qm_isr_enable_write(p
, portal
->irq_sources
);
579 qm_isr_status_clear(p
, 0xffffffff);
580 snprintf(portal
->irqname
, MAX_IRQNAME
, IRQNAME
, c
->cpu
);
581 if (request_irq(c
->irq
, portal_isr
, 0, portal
->irqname
,
583 pr_err("request_irq() failed\n");
587 /* Need EQCR to be empty before continuing */
588 isdr
&= ~QM_PIRQ_EQCI
;
589 qm_isr_disable_write(p
, isdr
);
590 ret
= qm_eqcr_get_fill(p
);
592 pr_err("Qman EQCR unclean\n");
593 goto fail_eqcr_empty
;
595 isdr
&= ~(QM_PIRQ_DQRI
| QM_PIRQ_MRI
);
596 qm_isr_disable_write(p
, isdr
);
597 if (qm_dqrr_current(p
)) {
598 pr_err("Qman DQRR unclean\n");
599 qm_dqrr_cdc_consume_n(p
, 0xffff);
601 if (qm_mr_current(p
) && drain_mr_fqrni(p
)) {
602 /* special handling, drain just in case it's a few FQRNIs */
603 if (drain_mr_fqrni(p
))
604 goto fail_dqrr_mr_empty
;
608 qm_isr_disable_write(p
, 0);
610 /* Write a sane SDQCR */
611 qm_dqrr_sdqcr_set(p
, portal
->sdqcr
);
615 free_irq(c
->irq
, portal
);
618 spin_lock_destroy(&portal
->cgr_lock
);
631 #define MAX_GLOBAL_PORTALS 8
632 static struct qman_portal global_portals
[MAX_GLOBAL_PORTALS
];
633 static rte_atomic16_t global_portals_used
[MAX_GLOBAL_PORTALS
];
635 static struct qman_portal
*
636 qman_alloc_global_portal(void)
640 for (i
= 0; i
< MAX_GLOBAL_PORTALS
; i
++) {
641 if (rte_atomic16_test_and_set(&global_portals_used
[i
]))
642 return &global_portals
[i
];
644 pr_err("No portal available (%x)\n", MAX_GLOBAL_PORTALS
);
650 qman_free_global_portal(struct qman_portal
*portal
)
654 for (i
= 0; i
< MAX_GLOBAL_PORTALS
; i
++) {
655 if (&global_portals
[i
] == portal
) {
656 rte_atomic16_clear(&global_portals_used
[i
]);
663 struct qman_portal
*qman_create_affine_portal(const struct qm_portal_config
*c
,
664 const struct qman_cgrs
*cgrs
,
667 struct qman_portal
*res
;
668 struct qman_portal
*portal
;
671 portal
= qman_alloc_global_portal();
673 portal
= get_affine_portal();
675 /* A criteria for calling this function (from qman_driver.c) is that
676 * we're already affine to the cpu and won't schedule onto another cpu.
679 res
= qman_create_portal(portal
, c
, cgrs
);
681 spin_lock(&affine_mask_lock
);
682 CPU_SET(c
->cpu
, &affine_mask
);
683 affine_channels
[c
->cpu
] =
685 spin_unlock(&affine_mask_lock
);
691 void qman_destroy_portal(struct qman_portal
*qm
)
693 const struct qm_portal_config
*pcfg
;
695 /* Stop dequeues on the portal */
696 qm_dqrr_sdqcr_set(&qm
->p
, 0);
699 * NB we do this to "quiesce" EQCR. If we add enqueue-completions or
700 * something related to QM_PIRQ_EQCI, this may need fixing.
701 * Also, due to the prefetching model used for CI updates in the enqueue
702 * path, this update will only invalidate the CI cacheline *after*
703 * working on it, so we need to call this twice to ensure a full update
704 * irrespective of where the enqueue processing was at when the teardown
707 qm_eqcr_cce_update(&qm
->p
);
708 qm_eqcr_cce_update(&qm
->p
);
711 free_irq(pcfg
->irq
, qm
);
714 qm_mc_finish(&qm
->p
);
715 qm_mr_finish(&qm
->p
);
716 qm_dqrr_finish(&qm
->p
);
717 qm_eqcr_finish(&qm
->p
);
721 spin_lock_destroy(&qm
->cgr_lock
);
724 const struct qm_portal_config
*
725 qman_destroy_affine_portal(struct qman_portal
*qp
)
727 /* We don't want to redirect if we're a slave, use "raw" */
728 struct qman_portal
*qm
;
729 const struct qm_portal_config
*pcfg
;
733 qm
= get_affine_portal();
739 qman_destroy_portal(qm
);
741 spin_lock(&affine_mask_lock
);
742 CPU_CLR(cpu
, &affine_mask
);
743 spin_unlock(&affine_mask_lock
);
745 qman_free_global_portal(qm
);
750 int qman_get_portal_index(void)
752 struct qman_portal
*p
= get_affine_portal();
753 return p
->config
->index
;
756 /* Inline helper to reduce nesting in __poll_portal_slow() */
757 static inline void fq_state_change(struct qman_portal
*p
, struct qman_fq
*fq
,
758 const struct qm_mr_entry
*msg
, u8 verb
)
762 case QM_MR_VERB_FQRL
:
763 DPAA_ASSERT(fq_isset(fq
, QMAN_FQ_STATE_ORL
));
764 fq_clear(fq
, QMAN_FQ_STATE_ORL
);
767 case QM_MR_VERB_FQRN
:
768 DPAA_ASSERT((fq
->state
== qman_fq_state_parked
) ||
769 (fq
->state
== qman_fq_state_sched
));
770 DPAA_ASSERT(fq_isset(fq
, QMAN_FQ_STATE_CHANGING
));
771 fq_clear(fq
, QMAN_FQ_STATE_CHANGING
);
772 if (msg
->fq
.fqs
& QM_MR_FQS_NOTEMPTY
)
773 fq_set(fq
, QMAN_FQ_STATE_NE
);
774 if (msg
->fq
.fqs
& QM_MR_FQS_ORLPRESENT
)
775 fq_set(fq
, QMAN_FQ_STATE_ORL
);
778 fq
->state
= qman_fq_state_retired
;
780 case QM_MR_VERB_FQPN
:
781 DPAA_ASSERT(fq
->state
== qman_fq_state_sched
);
782 DPAA_ASSERT(fq_isclear(fq
, QMAN_FQ_STATE_CHANGING
));
783 fq
->state
= qman_fq_state_parked
;
788 static u32
__poll_portal_slow(struct qman_portal
*p
, u32 is
)
790 const struct qm_mr_entry
*msg
;
791 struct qm_mr_entry swapped_msg
;
793 if (is
& QM_PIRQ_CSCI
) {
794 struct qman_cgrs rr
, c
;
795 struct qm_mc_result
*mcr
;
796 struct qman_cgr
*cgr
;
798 spin_lock(&p
->cgr_lock
);
800 * The CSCI bit must be cleared _before_ issuing the
801 * Query Congestion State command, to ensure that a long
802 * CGR State Change callback cannot miss an intervening
805 qm_isr_status_clear(&p
->p
, QM_PIRQ_CSCI
);
807 qm_mc_commit(&p
->p
, QM_MCC_VERB_QUERYCONGESTION
);
808 while (!(mcr
= qm_mc_result(&p
->p
)))
810 /* mask out the ones I'm not interested in */
811 qman_cgrs_and(&rr
, (const struct qman_cgrs
*)
812 &mcr
->querycongestion
.state
, &p
->cgrs
[0]);
813 /* check previous snapshot for delta, enter/exit congestion */
814 qman_cgrs_xor(&c
, &rr
, &p
->cgrs
[1]);
815 /* update snapshot */
816 qman_cgrs_cp(&p
->cgrs
[1], &rr
);
817 /* Invoke callback */
818 list_for_each_entry(cgr
, &p
->cgr_cbs
, node
)
819 if (cgr
->cb
&& qman_cgrs_get(&c
, cgr
->cgrid
))
820 cgr
->cb(p
, cgr
, qman_cgrs_get(&rr
, cgr
->cgrid
));
821 spin_unlock(&p
->cgr_lock
);
824 if (is
& QM_PIRQ_EQRI
) {
825 qm_eqcr_cce_update(&p
->p
);
826 qm_eqcr_set_ithresh(&p
->p
, 0);
827 wake_up(&affine_queue
);
830 if (is
& QM_PIRQ_MRI
) {
834 qm_mr_pvb_update(&p
->p
);
835 msg
= qm_mr_current(&p
->p
);
839 hw_fd_to_cpu(&swapped_msg
.ern
.fd
);
840 verb
= msg
->ern
.verb
& QM_MR_VERB_TYPE_MASK
;
841 /* The message is a software ERN iff the 0x20 bit is set */
844 case QM_MR_VERB_FQRNI
:
845 /* nada, we drop FQRNIs on the floor */
847 case QM_MR_VERB_FQRN
:
848 case QM_MR_VERB_FQRL
:
849 /* Lookup in the retirement table */
850 fq
= table_find_fq(p
,
851 be32_to_cpu(msg
->fq
.fqid
));
853 fq_state_change(p
, fq
, &swapped_msg
, verb
);
855 fq
->cb
.fqs(p
, fq
, &swapped_msg
);
857 case QM_MR_VERB_FQPN
:
859 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
860 fq
= get_fq_table_entry(msg
->fq
.contextB
);
862 fq
= (void *)(uintptr_t)msg
->fq
.contextB
;
864 fq_state_change(p
, fq
, msg
, verb
);
866 fq
->cb
.fqs(p
, fq
, &swapped_msg
);
868 case QM_MR_VERB_DC_ERN
:
871 p
->cb_dc_ern(p
, msg
);
875 static int warn_once
;
878 pr_crit("Leaking DCP ERNs!\n");
884 pr_crit("Invalid MR verb 0x%02x\n", verb
);
887 /* Its a software ERN */
888 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
889 fq
= get_fq_table_entry(be32_to_cpu(msg
->ern
.tag
));
891 fq
= (void *)(uintptr_t)be32_to_cpu(msg
->ern
.tag
);
893 fq
->cb
.ern(p
, fq
, &swapped_msg
);
899 qm_mr_cci_consume(&p
->p
, num
);
902 * QM_PIRQ_CSCI/CCSCI has already been cleared, as part of its specific
903 * processing. If that interrupt source has meanwhile been re-asserted,
904 * we mustn't clear it here (or in the top-level interrupt handler).
906 return is
& (QM_PIRQ_EQCI
| QM_PIRQ_EQRI
| QM_PIRQ_MRI
);
910 * remove some slowish-path stuff from the "fast path" and make sure it isn't
913 static noinline
void clear_vdqcr(struct qman_portal
*p
, struct qman_fq
*fq
)
915 p
->vdqcr_owned
= NULL
;
917 fq_clear(fq
, QMAN_FQ_STATE_VDQCR
);
919 wake_up(&affine_queue
);
923 * The only states that would conflict with other things if they ran at the
924 * same time on the same cpu are:
926 * (i) setting/clearing vdqcr_owned, and
927 * (ii) clearing the NE (Not Empty) flag.
929 * Both are safe. Because;
931 * (i) this clearing can only occur after qman_set_vdq() has set the
932 * vdqcr_owned field (which it does before setting VDQCR), and
933 * qman_volatile_dequeue() blocks interrupts and preemption while this is
934 * done so that we can't interfere.
935 * (ii) the NE flag is only cleared after qman_retire_fq() has set it, and as
936 * with (i) that API prevents us from interfering until it's safe.
938 * The good thing is that qman_set_vdq() and qman_retire_fq() run far
939 * less frequently (ie. per-FQ) than __poll_portal_fast() does, so the nett
940 * advantage comes from this function not having to "lock" anything at all.
942 * Note also that the callbacks are invoked at points which are safe against the
943 * above potential conflicts, but that this function itself is not re-entrant
944 * (this is because the function tracks one end of each FIFO in the portal and
945 * we do *not* want to lock that). So the consequence is that it is safe for
946 * user callbacks to call into any QMan API.
948 static inline unsigned int __poll_portal_fast(struct qman_portal
*p
,
949 unsigned int poll_limit
)
951 const struct qm_dqrr_entry
*dq
;
953 enum qman_cb_dqrr_result res
;
954 unsigned int limit
= 0;
955 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
956 struct qm_dqrr_entry
*shadow
;
959 qm_dqrr_pvb_update(&p
->p
);
960 dq
= qm_dqrr_current(&p
->p
);
963 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
964 /* If running on an LE system the fields of the
965 * dequeue entry must be swapper. Because the
966 * QMan HW will ignore writes the DQRR entry is
967 * copied and the index stored within the copy
969 shadow
= &p
->shadow_dqrr
[DQRR_PTR2IDX(dq
)];
972 shadow
->fqid
= be32_to_cpu(shadow
->fqid
);
973 shadow
->seqnum
= be16_to_cpu(shadow
->seqnum
);
974 hw_fd_to_cpu(&shadow
->fd
);
977 if (dq
->stat
& QM_DQRR_STAT_UNSCHEDULED
) {
979 * VDQCR: don't trust context_b as the FQ may have
980 * been configured for h/w consumption and we're
981 * draining it post-retirement.
985 * We only set QMAN_FQ_STATE_NE when retiring, so we
986 * only need to check for clearing it when doing
987 * volatile dequeues. It's one less thing to check
988 * in the critical path (SDQCR).
990 if (dq
->stat
& QM_DQRR_STAT_FQ_EMPTY
)
991 fq_clear(fq
, QMAN_FQ_STATE_NE
);
993 * This is duplicated from the SDQCR code, but we
994 * have stuff to do before *and* after this callback,
995 * and we don't want multiple if()s in the critical
998 res
= fq
->cb
.dqrr(p
, fq
, dq
);
999 if (res
== qman_cb_dqrr_stop
)
1001 /* Check for VDQCR completion */
1002 if (dq
->stat
& QM_DQRR_STAT_DQCR_EXPIRED
)
1005 /* SDQCR: context_b points to the FQ */
1006 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
1007 fq
= get_fq_table_entry(dq
->contextB
);
1009 fq
= (void *)(uintptr_t)dq
->contextB
;
1011 /* Now let the callback do its stuff */
1012 res
= fq
->cb
.dqrr(p
, fq
, dq
);
1014 * The callback can request that we exit without
1015 * consuming this entry nor advancing;
1017 if (res
== qman_cb_dqrr_stop
)
1020 /* Interpret 'dq' from a driver perspective. */
1022 * Parking isn't possible unless HELDACTIVE was set. NB,
1023 * FORCEELIGIBLE implies HELDACTIVE, so we only need to
1024 * check for HELDACTIVE to cover both.
1026 DPAA_ASSERT((dq
->stat
& QM_DQRR_STAT_FQ_HELDACTIVE
) ||
1027 (res
!= qman_cb_dqrr_park
));
1028 /* just means "skip it, I'll consume it myself later on" */
1029 if (res
!= qman_cb_dqrr_defer
)
1030 qm_dqrr_cdc_consume_1ptr(&p
->p
, dq
,
1031 res
== qman_cb_dqrr_park
);
1033 qm_dqrr_next(&p
->p
);
1035 * Entry processed and consumed, increment our counter. The
1036 * callback can request that we exit after consuming the
1037 * entry, and we also exit if we reach our processing limit,
1038 * so loop back only if neither of these conditions is met.
1040 } while (++limit
< poll_limit
&& res
!= qman_cb_dqrr_consume_stop
);
1045 int qman_irqsource_add(u32 bits
)
1047 struct qman_portal
*p
= get_affine_portal();
1049 bits
= bits
& QM_PIRQ_VISIBLE
;
1051 /* Clear any previously remaining interrupt conditions in
1052 * QCSP_ISR. This prevents raising a false interrupt when
1053 * interrupt conditions are enabled in QCSP_IER.
1055 qm_isr_status_clear(&p
->p
, bits
);
1056 dpaa_set_bits(bits
, &p
->irq_sources
);
1057 qm_isr_enable_write(&p
->p
, p
->irq_sources
);
1063 int qman_irqsource_remove(u32 bits
)
1065 struct qman_portal
*p
= get_affine_portal();
1068 /* Our interrupt handler only processes+clears status register bits that
1069 * are in p->irq_sources. As we're trimming that mask, if one of them
1070 * were to assert in the status register just before we remove it from
1071 * the enable register, there would be an interrupt-storm when we
1072 * release the IRQ lock. So we wait for the enable register update to
1073 * take effect in h/w (by reading it back) and then clear all other bits
1074 * in the status register. Ie. we clear them from ISR once it's certain
1075 * IER won't allow them to reassert.
1078 bits
&= QM_PIRQ_VISIBLE
;
1079 dpaa_clear_bits(bits
, &p
->irq_sources
);
1080 qm_isr_enable_write(&p
->p
, p
->irq_sources
);
1081 ier
= qm_isr_enable_read(&p
->p
);
1082 /* Using "~ier" (rather than "bits" or "~p->irq_sources") creates a
1083 * data-dependency, ie. to protect against re-ordering.
1085 qm_isr_status_clear(&p
->p
, ~ier
);
1089 u16
qman_affine_channel(int cpu
)
1092 struct qman_portal
*portal
= get_affine_portal();
1094 cpu
= portal
->config
->cpu
;
1096 DPAA_BUG_ON(!CPU_ISSET(cpu
, &affine_mask
));
1097 return affine_channels
[cpu
];
1100 unsigned int qman_portal_poll_rx(unsigned int poll_limit
,
1102 struct qman_portal
*p
)
1104 struct qm_portal
*portal
= &p
->p
;
1105 register struct qm_dqrr
*dqrr
= &portal
->dqrr
;
1106 struct qm_dqrr_entry
*dq
[QM_DQRR_SIZE
], *shadow
[QM_DQRR_SIZE
];
1108 unsigned int limit
= 0, rx_number
= 0;
1109 uint32_t consume
= 0;
1112 qm_dqrr_pvb_update(&p
->p
);
1116 dq
[rx_number
] = dqrr
->cursor
;
1117 dqrr
->cursor
= DQRR_CARRYCLEAR(dqrr
->cursor
+ 1);
1118 /* Prefetch the next DQRR entry */
1119 rte_prefetch0(dqrr
->cursor
);
1121 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
1122 /* If running on an LE system the fields of the
1123 * dequeue entry must be swapper. Because the
1124 * QMan HW will ignore writes the DQRR entry is
1125 * copied and the index stored within the copy
1128 &p
->shadow_dqrr
[DQRR_PTR2IDX(dq
[rx_number
])];
1129 shadow
[rx_number
]->fd
.opaque_addr
=
1130 dq
[rx_number
]->fd
.opaque_addr
;
1131 shadow
[rx_number
]->fd
.addr
=
1132 be40_to_cpu(dq
[rx_number
]->fd
.addr
);
1133 shadow
[rx_number
]->fd
.opaque
=
1134 be32_to_cpu(dq
[rx_number
]->fd
.opaque
);
1136 shadow
[rx_number
] = dq
[rx_number
];
1139 /* SDQCR: context_b points to the FQ */
1140 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
1141 fq
= qman_fq_lookup_table
[dq
[rx_number
]->contextB
];
1143 fq
= (void *)dq
[rx_number
]->contextB
;
1145 if (fq
->cb
.dqrr_prepare
)
1146 fq
->cb
.dqrr_prepare(shadow
[rx_number
],
1149 consume
|= (1 << (31 - DQRR_PTR2IDX(shadow
[rx_number
])));
1152 } while (++limit
< poll_limit
);
1155 fq
->cb
.dqrr_dpdk_pull_cb(&fq
, shadow
, bufs
, rx_number
);
1157 /* Consume all the DQRR enries together */
1158 qm_out(DQRR_DCAP
, (1 << 8) | consume
);
1163 void qman_clear_irq(void)
1165 struct qman_portal
*p
= get_affine_portal();
1166 u32 clear
= QM_DQAVAIL_MASK
| (p
->irq_sources
&
1167 ~(QM_PIRQ_CSCI
| QM_PIRQ_CCSCI
));
1168 qm_isr_status_clear(&p
->p
, clear
);
1171 u32
qman_portal_dequeue(struct rte_event ev
[], unsigned int poll_limit
,
1174 const struct qm_dqrr_entry
*dq
;
1176 enum qman_cb_dqrr_result res
;
1177 unsigned int limit
= 0;
1178 struct qman_portal
*p
= get_affine_portal();
1179 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1180 struct qm_dqrr_entry
*shadow
;
1182 unsigned int rx_number
= 0;
1185 qm_dqrr_pvb_update(&p
->p
);
1186 dq
= qm_dqrr_current(&p
->p
);
1189 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1191 * If running on an LE system the fields of the
1192 * dequeue entry must be swapper. Because the
1193 * QMan HW will ignore writes the DQRR entry is
1194 * copied and the index stored within the copy
1196 shadow
= &p
->shadow_dqrr
[DQRR_PTR2IDX(dq
)];
1199 shadow
->fqid
= be32_to_cpu(shadow
->fqid
);
1200 shadow
->seqnum
= be16_to_cpu(shadow
->seqnum
);
1201 hw_fd_to_cpu(&shadow
->fd
);
1204 /* SDQCR: context_b points to the FQ */
1205 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
1206 fq
= get_fq_table_entry(dq
->contextB
);
1208 fq
= (void *)(uintptr_t)dq
->contextB
;
1210 /* Now let the callback do its stuff */
1211 res
= fq
->cb
.dqrr_dpdk_cb(&ev
[rx_number
], p
, fq
,
1212 dq
, &bufs
[rx_number
]);
1214 /* Interpret 'dq' from a driver perspective. */
1216 * Parking isn't possible unless HELDACTIVE was set. NB,
1217 * FORCEELIGIBLE implies HELDACTIVE, so we only need to
1218 * check for HELDACTIVE to cover both.
1220 DPAA_ASSERT((dq
->stat
& QM_DQRR_STAT_FQ_HELDACTIVE
) ||
1221 (res
!= qman_cb_dqrr_park
));
1222 if (res
!= qman_cb_dqrr_defer
)
1223 qm_dqrr_cdc_consume_1ptr(&p
->p
, dq
,
1224 res
== qman_cb_dqrr_park
);
1226 qm_dqrr_next(&p
->p
);
1228 * Entry processed and consumed, increment our counter. The
1229 * callback can request that we exit after consuming the
1230 * entry, and we also exit if we reach our processing limit,
1231 * so loop back only if neither of these conditions is met.
1233 } while (++limit
< poll_limit
);
1238 struct qm_dqrr_entry
*qman_dequeue(struct qman_fq
*fq
)
1240 struct qman_portal
*p
= get_affine_portal();
1241 const struct qm_dqrr_entry
*dq
;
1242 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
1243 struct qm_dqrr_entry
*shadow
;
1246 qm_dqrr_pvb_update(&p
->p
);
1247 dq
= qm_dqrr_current(&p
->p
);
1251 if (!(dq
->stat
& QM_DQRR_STAT_FD_VALID
)) {
1252 /* Invalid DQRR - put the portal and consume the DQRR.
1253 * Return NULL to user as no packet is seen.
1255 qman_dqrr_consume(fq
, (struct qm_dqrr_entry
*)dq
);
1259 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
1260 shadow
= &p
->shadow_dqrr
[DQRR_PTR2IDX(dq
)];
1263 shadow
->fqid
= be32_to_cpu(shadow
->fqid
);
1264 shadow
->seqnum
= be16_to_cpu(shadow
->seqnum
);
1265 hw_fd_to_cpu(&shadow
->fd
);
1268 if (dq
->stat
& QM_DQRR_STAT_FQ_EMPTY
)
1269 fq_clear(fq
, QMAN_FQ_STATE_NE
);
1271 return (struct qm_dqrr_entry
*)dq
;
1274 void qman_dqrr_consume(struct qman_fq
*fq
,
1275 struct qm_dqrr_entry
*dq
)
1277 struct qman_portal
*p
= get_affine_portal();
1279 if (dq
->stat
& QM_DQRR_STAT_DQCR_EXPIRED
)
1282 qm_dqrr_cdc_consume_1ptr(&p
->p
, dq
, 0);
1283 qm_dqrr_next(&p
->p
);
1286 int qman_poll_dqrr(unsigned int limit
)
1288 struct qman_portal
*p
= get_affine_portal();
1291 ret
= __poll_portal_fast(p
, limit
);
1295 void qman_poll(void)
1297 struct qman_portal
*p
= get_affine_portal();
1299 if ((~p
->irq_sources
) & QM_PIRQ_SLOW
) {
1300 if (!(p
->slowpoll
--)) {
1301 u32 is
= qm_isr_status_read(&p
->p
) & ~p
->irq_sources
;
1302 u32 active
= __poll_portal_slow(p
, is
);
1305 qm_isr_status_clear(&p
->p
, active
);
1306 p
->slowpoll
= SLOW_POLL_BUSY
;
1308 p
->slowpoll
= SLOW_POLL_IDLE
;
1311 if ((~p
->irq_sources
) & QM_PIRQ_DQRI
)
1312 __poll_portal_fast(p
, FSL_QMAN_POLL_LIMIT
);
1315 void qman_stop_dequeues(void)
1317 struct qman_portal
*p
= get_affine_portal();
1319 qman_stop_dequeues_ex(p
);
1322 void qman_start_dequeues(void)
1324 struct qman_portal
*p
= get_affine_portal();
1326 DPAA_ASSERT(p
->dqrr_disable_ref
> 0);
1327 if (!(--p
->dqrr_disable_ref
))
1328 qm_dqrr_set_maxfill(&p
->p
, DQRR_MAXFILL
);
1331 void qman_static_dequeue_add(u32 pools
, struct qman_portal
*qp
)
1333 struct qman_portal
*p
= qp
? qp
: get_affine_portal();
1335 pools
&= p
->config
->pools
;
1337 qm_dqrr_sdqcr_set(&p
->p
, p
->sdqcr
);
1340 void qman_static_dequeue_del(u32 pools
, struct qman_portal
*qp
)
1342 struct qman_portal
*p
= qp
? qp
: get_affine_portal();
1344 pools
&= p
->config
->pools
;
1346 qm_dqrr_sdqcr_set(&p
->p
, p
->sdqcr
);
1349 u32
qman_static_dequeue_get(struct qman_portal
*qp
)
1351 struct qman_portal
*p
= qp
? qp
: get_affine_portal();
1355 void qman_dca(const struct qm_dqrr_entry
*dq
, int park_request
)
1357 struct qman_portal
*p
= get_affine_portal();
1359 qm_dqrr_cdc_consume_1ptr(&p
->p
, dq
, park_request
);
1362 void qman_dca_index(u8 index
, int park_request
)
1364 struct qman_portal
*p
= get_affine_portal();
1366 qm_dqrr_cdc_consume_1(&p
->p
, index
, park_request
);
1369 /* Frame queue API */
1370 static const char *mcr_result_str(u8 result
)
1373 case QM_MCR_RESULT_NULL
:
1374 return "QM_MCR_RESULT_NULL";
1375 case QM_MCR_RESULT_OK
:
1376 return "QM_MCR_RESULT_OK";
1377 case QM_MCR_RESULT_ERR_FQID
:
1378 return "QM_MCR_RESULT_ERR_FQID";
1379 case QM_MCR_RESULT_ERR_FQSTATE
:
1380 return "QM_MCR_RESULT_ERR_FQSTATE";
1381 case QM_MCR_RESULT_ERR_NOTEMPTY
:
1382 return "QM_MCR_RESULT_ERR_NOTEMPTY";
1383 case QM_MCR_RESULT_PENDING
:
1384 return "QM_MCR_RESULT_PENDING";
1385 case QM_MCR_RESULT_ERR_BADCOMMAND
:
1386 return "QM_MCR_RESULT_ERR_BADCOMMAND";
1388 return "<unknown MCR result>";
1391 int qman_create_fq(u32 fqid
, u32 flags
, struct qman_fq
*fq
)
1394 struct qm_mcr_queryfq_np np
;
1395 struct qm_mc_command
*mcc
;
1396 struct qm_mc_result
*mcr
;
1397 struct qman_portal
*p
;
1399 if (flags
& QMAN_FQ_FLAG_DYNAMIC_FQID
) {
1400 int ret
= qman_alloc_fqid(&fqid
);
1405 spin_lock_init(&fq
->fqlock
);
1407 fq
->fqid_le
= cpu_to_be32(fqid
);
1409 fq
->state
= qman_fq_state_oos
;
1410 fq
->cgr_groupid
= 0;
1411 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
1412 if (unlikely(find_empty_fq_table_entry(&fq
->key
, fq
))) {
1413 pr_info("Find empty table entry failed\n");
1416 fq
->qman_fq_lookup_table
= qman_fq_lookup_table
;
1418 if (!(flags
& QMAN_FQ_FLAG_AS_IS
) || (flags
& QMAN_FQ_FLAG_NO_MODIFY
))
1420 /* Everything else is AS_IS support */
1421 p
= get_affine_portal();
1422 mcc
= qm_mc_start(&p
->p
);
1423 mcc
->queryfq
.fqid
= cpu_to_be32(fqid
);
1424 qm_mc_commit(&p
->p
, QM_MCC_VERB_QUERYFQ
);
1425 while (!(mcr
= qm_mc_result(&p
->p
)))
1427 DPAA_ASSERT((mcr
->verb
& QM_MCR_VERB_MASK
) == QM_MCC_VERB_QUERYFQ
);
1428 if (mcr
->result
!= QM_MCR_RESULT_OK
) {
1429 pr_err("QUERYFQ failed: %s\n", mcr_result_str(mcr
->result
));
1432 fqd
= mcr
->queryfq
.fqd
;
1433 hw_fqd_to_cpu(&fqd
);
1434 mcc
= qm_mc_start(&p
->p
);
1435 mcc
->queryfq_np
.fqid
= cpu_to_be32(fqid
);
1436 qm_mc_commit(&p
->p
, QM_MCC_VERB_QUERYFQ_NP
);
1437 while (!(mcr
= qm_mc_result(&p
->p
)))
1439 DPAA_ASSERT((mcr
->verb
& QM_MCR_VERB_MASK
) == QM_MCC_VERB_QUERYFQ_NP
);
1440 if (mcr
->result
!= QM_MCR_RESULT_OK
) {
1441 pr_err("QUERYFQ_NP failed: %s\n", mcr_result_str(mcr
->result
));
1444 np
= mcr
->queryfq_np
;
1445 /* Phew, have queryfq and queryfq_np results, stitch together
1446 * the FQ object from those.
1448 fq
->cgr_groupid
= fqd
.cgid
;
1449 switch (np
.state
& QM_MCR_NP_STATE_MASK
) {
1450 case QM_MCR_NP_STATE_OOS
:
1452 case QM_MCR_NP_STATE_RETIRED
:
1453 fq
->state
= qman_fq_state_retired
;
1455 fq_set(fq
, QMAN_FQ_STATE_NE
);
1457 case QM_MCR_NP_STATE_TEN_SCHED
:
1458 case QM_MCR_NP_STATE_TRU_SCHED
:
1459 case QM_MCR_NP_STATE_ACTIVE
:
1460 fq
->state
= qman_fq_state_sched
;
1461 if (np
.state
& QM_MCR_NP_STATE_R
)
1462 fq_set(fq
, QMAN_FQ_STATE_CHANGING
);
1464 case QM_MCR_NP_STATE_PARKED
:
1465 fq
->state
= qman_fq_state_parked
;
1468 DPAA_ASSERT(NULL
== "invalid FQ state");
1470 if (fqd
.fq_ctrl
& QM_FQCTRL_CGE
)
1471 fq
->state
|= QMAN_FQ_STATE_CGR_EN
;
1474 if (flags
& QMAN_FQ_FLAG_DYNAMIC_FQID
)
1475 qman_release_fqid(fqid
);
1479 void qman_destroy_fq(struct qman_fq
*fq
, u32 flags __maybe_unused
)
1482 * We don't need to lock the FQ as it is a pre-condition that the FQ be
1483 * quiesced. Instead, run some checks.
1485 switch (fq
->state
) {
1486 case qman_fq_state_parked
:
1487 DPAA_ASSERT(flags
& QMAN_FQ_DESTROY_PARKED
);
1489 case qman_fq_state_oos
:
1490 if (fq_isset(fq
, QMAN_FQ_FLAG_DYNAMIC_FQID
))
1491 qman_release_fqid(fq
->fqid
);
1492 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
1493 clear_fq_table_entry(fq
->key
);
1499 DPAA_ASSERT(NULL
== "qman_free_fq() on unquiesced FQ!");
1502 u32
qman_fq_fqid(struct qman_fq
*fq
)
1507 void qman_fq_state(struct qman_fq
*fq
, enum qman_fq_state
*state
, u32
*flags
)
1515 int qman_init_fq(struct qman_fq
*fq
, u32 flags
, struct qm_mcc_initfq
*opts
)
1517 struct qm_mc_command
*mcc
;
1518 struct qm_mc_result
*mcr
;
1519 struct qman_portal
*p
;
1521 u8 res
, myverb
= (flags
& QMAN_INITFQ_FLAG_SCHED
) ?
1522 QM_MCC_VERB_INITFQ_SCHED
: QM_MCC_VERB_INITFQ_PARKED
;
1524 if ((fq
->state
!= qman_fq_state_oos
) &&
1525 (fq
->state
!= qman_fq_state_parked
))
1527 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
1528 if (unlikely(fq_isset(fq
, QMAN_FQ_FLAG_NO_MODIFY
)))
1531 if (opts
&& (opts
->we_mask
& QM_INITFQ_WE_OAC
)) {
1532 /* And can't be set at the same time as TDTHRESH */
1533 if (opts
->we_mask
& QM_INITFQ_WE_TDTHRESH
)
1536 /* Issue an INITFQ_[PARKED|SCHED] management command */
1537 p
= get_affine_portal();
1539 if (unlikely((fq_isset(fq
, QMAN_FQ_STATE_CHANGING
)) ||
1540 ((fq
->state
!= qman_fq_state_oos
) &&
1541 (fq
->state
!= qman_fq_state_parked
)))) {
1545 mcc
= qm_mc_start(&p
->p
);
1547 mcc
->initfq
= *opts
;
1548 mcc
->initfq
.fqid
= cpu_to_be32(fq
->fqid
);
1549 mcc
->initfq
.count
= 0;
1551 * If the FQ does *not* have the TO_DCPORTAL flag, context_b is set as a
1552 * demux pointer. Otherwise, the caller-provided value is allowed to
1553 * stand, don't overwrite it.
1555 if (fq_isclear(fq
, QMAN_FQ_FLAG_TO_DCPORTAL
)) {
1558 mcc
->initfq
.we_mask
|= QM_INITFQ_WE_CONTEXTB
;
1559 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
1560 mcc
->initfq
.fqd
.context_b
= cpu_to_be32(fq
->key
);
1562 mcc
->initfq
.fqd
.context_b
= (u32
)(uintptr_t)fq
;
1565 * and the physical address - NB, if the user wasn't trying to
1566 * set CONTEXTA, clear the stashing settings.
1568 if (!(mcc
->initfq
.we_mask
& QM_INITFQ_WE_CONTEXTA
)) {
1569 mcc
->initfq
.we_mask
|= QM_INITFQ_WE_CONTEXTA
;
1570 memset(&mcc
->initfq
.fqd
.context_a
, 0,
1571 sizeof(mcc
->initfq
.fqd
.context_a
));
1573 phys_fq
= rte_mem_virt2iova(fq
);
1574 qm_fqd_stashing_set64(&mcc
->initfq
.fqd
, phys_fq
);
1577 if (flags
& QMAN_INITFQ_FLAG_LOCAL
) {
1578 mcc
->initfq
.fqd
.dest
.channel
= p
->config
->channel
;
1579 if (!(mcc
->initfq
.we_mask
& QM_INITFQ_WE_DESTWQ
)) {
1580 mcc
->initfq
.we_mask
|= QM_INITFQ_WE_DESTWQ
;
1581 mcc
->initfq
.fqd
.dest
.wq
= 4;
1584 mcc
->initfq
.we_mask
= cpu_to_be16(mcc
->initfq
.we_mask
);
1585 cpu_to_hw_fqd(&mcc
->initfq
.fqd
);
1586 qm_mc_commit(&p
->p
, myverb
);
1587 while (!(mcr
= qm_mc_result(&p
->p
)))
1589 DPAA_ASSERT((mcr
->verb
& QM_MCR_VERB_MASK
) == myverb
);
1591 if (res
!= QM_MCR_RESULT_OK
) {
1596 if (opts
->we_mask
& QM_INITFQ_WE_FQCTRL
) {
1597 if (opts
->fqd
.fq_ctrl
& QM_FQCTRL_CGE
)
1598 fq_set(fq
, QMAN_FQ_STATE_CGR_EN
);
1600 fq_clear(fq
, QMAN_FQ_STATE_CGR_EN
);
1602 if (opts
->we_mask
& QM_INITFQ_WE_CGID
)
1603 fq
->cgr_groupid
= opts
->fqd
.cgid
;
1605 fq
->state
= (flags
& QMAN_INITFQ_FLAG_SCHED
) ?
1606 qman_fq_state_sched
: qman_fq_state_parked
;
1611 int qman_schedule_fq(struct qman_fq
*fq
)
1613 struct qm_mc_command
*mcc
;
1614 struct qm_mc_result
*mcr
;
1615 struct qman_portal
*p
;
1620 if (fq
->state
!= qman_fq_state_parked
)
1622 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
1623 if (unlikely(fq_isset(fq
, QMAN_FQ_FLAG_NO_MODIFY
)))
1626 /* Issue a ALTERFQ_SCHED management command */
1627 p
= get_affine_portal();
1630 if (unlikely((fq_isset(fq
, QMAN_FQ_STATE_CHANGING
)) ||
1631 (fq
->state
!= qman_fq_state_parked
))) {
1635 mcc
= qm_mc_start(&p
->p
);
1636 mcc
->alterfq
.fqid
= cpu_to_be32(fq
->fqid
);
1637 qm_mc_commit(&p
->p
, QM_MCC_VERB_ALTER_SCHED
);
1638 while (!(mcr
= qm_mc_result(&p
->p
)))
1640 DPAA_ASSERT((mcr
->verb
& QM_MCR_VERB_MASK
) == QM_MCR_VERB_ALTER_SCHED
);
1642 if (res
!= QM_MCR_RESULT_OK
) {
1646 fq
->state
= qman_fq_state_sched
;
1653 int qman_retire_fq(struct qman_fq
*fq
, u32
*flags
)
1655 struct qm_mc_command
*mcc
;
1656 struct qm_mc_result
*mcr
;
1657 struct qman_portal
*p
;
1662 if ((fq
->state
!= qman_fq_state_parked
) &&
1663 (fq
->state
!= qman_fq_state_sched
))
1665 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
1666 if (unlikely(fq_isset(fq
, QMAN_FQ_FLAG_NO_MODIFY
)))
1669 p
= get_affine_portal();
1672 if (unlikely((fq_isset(fq
, QMAN_FQ_STATE_CHANGING
)) ||
1673 (fq
->state
== qman_fq_state_retired
) ||
1674 (fq
->state
== qman_fq_state_oos
))) {
1678 rval
= table_push_fq(p
, fq
);
1681 mcc
= qm_mc_start(&p
->p
);
1682 mcc
->alterfq
.fqid
= cpu_to_be32(fq
->fqid
);
1683 qm_mc_commit(&p
->p
, QM_MCC_VERB_ALTER_RETIRE
);
1684 while (!(mcr
= qm_mc_result(&p
->p
)))
1686 DPAA_ASSERT((mcr
->verb
& QM_MCR_VERB_MASK
) == QM_MCR_VERB_ALTER_RETIRE
);
1689 * "Elegant" would be to treat OK/PENDING the same way; set CHANGING,
1690 * and defer the flags until FQRNI or FQRN (respectively) show up. But
1691 * "Friendly" is to process OK immediately, and not set CHANGING. We do
1692 * friendly, otherwise the caller doesn't necessarily have a fully
1693 * "retired" FQ on return even if the retirement was immediate. However
1694 * this does mean some code duplication between here and
1695 * fq_state_change().
1697 if (likely(res
== QM_MCR_RESULT_OK
)) {
1699 /* Process 'fq' right away, we'll ignore FQRNI */
1700 if (mcr
->alterfq
.fqs
& QM_MCR_FQS_NOTEMPTY
)
1701 fq_set(fq
, QMAN_FQ_STATE_NE
);
1702 if (mcr
->alterfq
.fqs
& QM_MCR_FQS_ORLPRESENT
)
1703 fq_set(fq
, QMAN_FQ_STATE_ORL
);
1705 table_del_fq(p
, fq
);
1708 fq
->state
= qman_fq_state_retired
;
1711 * Another issue with supporting "immediate" retirement
1712 * is that we're forced to drop FQRNIs, because by the
1713 * time they're seen it may already be "too late" (the
1714 * fq may have been OOS'd and free()'d already). But if
1715 * the upper layer wants a callback whether it's
1716 * immediate or not, we have to fake a "MR" entry to
1717 * look like an FQRNI...
1719 struct qm_mr_entry msg
;
1721 msg
.ern
.verb
= QM_MR_VERB_FQRNI
;
1722 msg
.fq
.fqs
= mcr
->alterfq
.fqs
;
1723 msg
.fq
.fqid
= fq
->fqid
;
1724 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
1725 msg
.fq
.contextB
= fq
->key
;
1727 msg
.fq
.contextB
= (u32
)(uintptr_t)fq
;
1729 fq
->cb
.fqs(p
, fq
, &msg
);
1731 } else if (res
== QM_MCR_RESULT_PENDING
) {
1733 fq_set(fq
, QMAN_FQ_STATE_CHANGING
);
1736 table_del_fq(p
, fq
);
1743 int qman_oos_fq(struct qman_fq
*fq
)
1745 struct qm_mc_command
*mcc
;
1746 struct qm_mc_result
*mcr
;
1747 struct qman_portal
*p
;
1752 if (fq
->state
!= qman_fq_state_retired
)
1754 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
1755 if (unlikely(fq_isset(fq
, QMAN_FQ_FLAG_NO_MODIFY
)))
1758 p
= get_affine_portal();
1760 if (unlikely((fq_isset(fq
, QMAN_FQ_STATE_BLOCKOOS
)) ||
1761 (fq
->state
!= qman_fq_state_retired
))) {
1765 mcc
= qm_mc_start(&p
->p
);
1766 mcc
->alterfq
.fqid
= cpu_to_be32(fq
->fqid
);
1767 qm_mc_commit(&p
->p
, QM_MCC_VERB_ALTER_OOS
);
1768 while (!(mcr
= qm_mc_result(&p
->p
)))
1770 DPAA_ASSERT((mcr
->verb
& QM_MCR_VERB_MASK
) == QM_MCR_VERB_ALTER_OOS
);
1772 if (res
!= QM_MCR_RESULT_OK
) {
1776 fq
->state
= qman_fq_state_oos
;
1782 int qman_fq_flow_control(struct qman_fq
*fq
, int xon
)
1784 struct qm_mc_command
*mcc
;
1785 struct qm_mc_result
*mcr
;
1786 struct qman_portal
*p
;
1792 if ((fq
->state
== qman_fq_state_oos
) ||
1793 (fq
->state
== qman_fq_state_retired
) ||
1794 (fq
->state
== qman_fq_state_parked
))
1797 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
1798 if (unlikely(fq_isset(fq
, QMAN_FQ_FLAG_NO_MODIFY
)))
1801 /* Issue a ALTER_FQXON or ALTER_FQXOFF management command */
1802 p
= get_affine_portal();
1804 if (unlikely((fq_isset(fq
, QMAN_FQ_STATE_CHANGING
)) ||
1805 (fq
->state
== qman_fq_state_parked
) ||
1806 (fq
->state
== qman_fq_state_oos
) ||
1807 (fq
->state
== qman_fq_state_retired
))) {
1811 mcc
= qm_mc_start(&p
->p
);
1812 mcc
->alterfq
.fqid
= fq
->fqid
;
1813 mcc
->alterfq
.count
= 0;
1814 myverb
= xon
? QM_MCC_VERB_ALTER_FQXON
: QM_MCC_VERB_ALTER_FQXOFF
;
1816 qm_mc_commit(&p
->p
, myverb
);
1817 while (!(mcr
= qm_mc_result(&p
->p
)))
1819 DPAA_ASSERT((mcr
->verb
& QM_MCR_VERB_MASK
) == myverb
);
1822 if (res
!= QM_MCR_RESULT_OK
) {
1831 int qman_query_fq(struct qman_fq
*fq
, struct qm_fqd
*fqd
)
1833 struct qm_mc_command
*mcc
;
1834 struct qm_mc_result
*mcr
;
1835 struct qman_portal
*p
= get_affine_portal();
1839 mcc
= qm_mc_start(&p
->p
);
1840 mcc
->queryfq
.fqid
= cpu_to_be32(fq
->fqid
);
1841 qm_mc_commit(&p
->p
, QM_MCC_VERB_QUERYFQ
);
1842 while (!(mcr
= qm_mc_result(&p
->p
)))
1844 DPAA_ASSERT((mcr
->verb
& QM_MCR_VERB_MASK
) == QM_MCR_VERB_QUERYFQ
);
1846 if (res
== QM_MCR_RESULT_OK
)
1847 *fqd
= mcr
->queryfq
.fqd
;
1849 if (res
!= QM_MCR_RESULT_OK
)
1854 int qman_query_fq_has_pkts(struct qman_fq
*fq
)
1856 struct qm_mc_command
*mcc
;
1857 struct qm_mc_result
*mcr
;
1858 struct qman_portal
*p
= get_affine_portal();
1863 mcc
= qm_mc_start(&p
->p
);
1864 mcc
->queryfq
.fqid
= cpu_to_be32(fq
->fqid
);
1865 qm_mc_commit(&p
->p
, QM_MCC_VERB_QUERYFQ_NP
);
1866 while (!(mcr
= qm_mc_result(&p
->p
)))
1869 if (res
== QM_MCR_RESULT_OK
)
1870 ret
= !!mcr
->queryfq_np
.frm_cnt
;
1874 int qman_query_fq_np(struct qman_fq
*fq
, struct qm_mcr_queryfq_np
*np
)
1876 struct qm_mc_command
*mcc
;
1877 struct qm_mc_result
*mcr
;
1878 struct qman_portal
*p
= get_affine_portal();
1882 mcc
= qm_mc_start(&p
->p
);
1883 mcc
->queryfq
.fqid
= cpu_to_be32(fq
->fqid
);
1884 qm_mc_commit(&p
->p
, QM_MCC_VERB_QUERYFQ_NP
);
1885 while (!(mcr
= qm_mc_result(&p
->p
)))
1887 DPAA_ASSERT((mcr
->verb
& QM_MCR_VERB_MASK
) == QM_MCR_VERB_QUERYFQ_NP
);
1889 if (res
== QM_MCR_RESULT_OK
) {
1890 *np
= mcr
->queryfq_np
;
1891 np
->fqd_link
= be24_to_cpu(np
->fqd_link
);
1892 np
->odp_seq
= be16_to_cpu(np
->odp_seq
);
1893 np
->orp_nesn
= be16_to_cpu(np
->orp_nesn
);
1894 np
->orp_ea_hseq
= be16_to_cpu(np
->orp_ea_hseq
);
1895 np
->orp_ea_tseq
= be16_to_cpu(np
->orp_ea_tseq
);
1896 np
->orp_ea_hptr
= be24_to_cpu(np
->orp_ea_hptr
);
1897 np
->orp_ea_tptr
= be24_to_cpu(np
->orp_ea_tptr
);
1898 np
->pfdr_hptr
= be24_to_cpu(np
->pfdr_hptr
);
1899 np
->pfdr_tptr
= be24_to_cpu(np
->pfdr_tptr
);
1900 np
->ics_surp
= be16_to_cpu(np
->ics_surp
);
1901 np
->byte_cnt
= be32_to_cpu(np
->byte_cnt
);
1902 np
->frm_cnt
= be24_to_cpu(np
->frm_cnt
);
1903 np
->ra1_sfdr
= be16_to_cpu(np
->ra1_sfdr
);
1904 np
->ra2_sfdr
= be16_to_cpu(np
->ra2_sfdr
);
1905 np
->od1_sfdr
= be16_to_cpu(np
->od1_sfdr
);
1906 np
->od2_sfdr
= be16_to_cpu(np
->od2_sfdr
);
1907 np
->od3_sfdr
= be16_to_cpu(np
->od3_sfdr
);
1909 if (res
== QM_MCR_RESULT_ERR_FQID
)
1911 else if (res
!= QM_MCR_RESULT_OK
)
1916 int qman_query_fq_frm_cnt(struct qman_fq
*fq
, u32
*frm_cnt
)
1918 struct qm_mc_command
*mcc
;
1919 struct qm_mc_result
*mcr
;
1920 struct qman_portal
*p
= get_affine_portal();
1922 mcc
= qm_mc_start(&p
->p
);
1923 mcc
->queryfq
.fqid
= cpu_to_be32(fq
->fqid
);
1924 qm_mc_commit(&p
->p
, QM_MCC_VERB_QUERYFQ_NP
);
1925 while (!(mcr
= qm_mc_result(&p
->p
)))
1927 DPAA_ASSERT((mcr
->verb
& QM_MCR_VERB_MASK
) == QM_MCR_VERB_QUERYFQ_NP
);
1929 if (mcr
->result
== QM_MCR_RESULT_OK
)
1930 *frm_cnt
= be24_to_cpu(mcr
->queryfq_np
.frm_cnt
);
1931 else if (mcr
->result
== QM_MCR_RESULT_ERR_FQID
)
1933 else if (mcr
->result
!= QM_MCR_RESULT_OK
)
1938 int qman_query_wq(u8 query_dedicated
, struct qm_mcr_querywq
*wq
)
1940 struct qm_mc_command
*mcc
;
1941 struct qm_mc_result
*mcr
;
1942 struct qman_portal
*p
= get_affine_portal();
1946 myverb
= (query_dedicated
) ? QM_MCR_VERB_QUERYWQ_DEDICATED
:
1947 QM_MCR_VERB_QUERYWQ
;
1948 mcc
= qm_mc_start(&p
->p
);
1949 mcc
->querywq
.channel
.id
= cpu_to_be16(wq
->channel
.id
);
1950 qm_mc_commit(&p
->p
, myverb
);
1951 while (!(mcr
= qm_mc_result(&p
->p
)))
1953 DPAA_ASSERT((mcr
->verb
& QM_MCR_VERB_MASK
) == myverb
);
1955 if (res
== QM_MCR_RESULT_OK
) {
1958 wq
->channel
.id
= be16_to_cpu(mcr
->querywq
.channel
.id
);
1959 array_len
= ARRAY_SIZE(mcr
->querywq
.wq_len
);
1960 for (i
= 0; i
< array_len
; i
++)
1961 wq
->wq_len
[i
] = be32_to_cpu(mcr
->querywq
.wq_len
[i
]);
1963 if (res
!= QM_MCR_RESULT_OK
) {
1964 pr_err("QUERYWQ failed: %s\n", mcr_result_str(res
));
1970 int qman_testwrite_cgr(struct qman_cgr
*cgr
, u64 i_bcnt
,
1971 struct qm_mcr_cgrtestwrite
*result
)
1973 struct qm_mc_command
*mcc
;
1974 struct qm_mc_result
*mcr
;
1975 struct qman_portal
*p
= get_affine_portal();
1979 mcc
= qm_mc_start(&p
->p
);
1980 mcc
->cgrtestwrite
.cgid
= cgr
->cgrid
;
1981 mcc
->cgrtestwrite
.i_bcnt_hi
= (u8
)(i_bcnt
>> 32);
1982 mcc
->cgrtestwrite
.i_bcnt_lo
= (u32
)i_bcnt
;
1983 qm_mc_commit(&p
->p
, QM_MCC_VERB_CGRTESTWRITE
);
1984 while (!(mcr
= qm_mc_result(&p
->p
)))
1986 DPAA_ASSERT((mcr
->verb
& QM_MCR_VERB_MASK
) == QM_MCC_VERB_CGRTESTWRITE
);
1988 if (res
== QM_MCR_RESULT_OK
)
1989 *result
= mcr
->cgrtestwrite
;
1990 if (res
!= QM_MCR_RESULT_OK
) {
1991 pr_err("CGR TEST WRITE failed: %s\n", mcr_result_str(res
));
1997 int qman_query_cgr(struct qman_cgr
*cgr
, struct qm_mcr_querycgr
*cgrd
)
1999 struct qm_mc_command
*mcc
;
2000 struct qm_mc_result
*mcr
;
2001 struct qman_portal
*p
= get_affine_portal();
2005 mcc
= qm_mc_start(&p
->p
);
2006 mcc
->querycgr
.cgid
= cgr
->cgrid
;
2007 qm_mc_commit(&p
->p
, QM_MCC_VERB_QUERYCGR
);
2008 while (!(mcr
= qm_mc_result(&p
->p
)))
2010 DPAA_ASSERT((mcr
->verb
& QM_MCR_VERB_MASK
) == QM_MCC_VERB_QUERYCGR
);
2012 if (res
== QM_MCR_RESULT_OK
)
2013 *cgrd
= mcr
->querycgr
;
2014 if (res
!= QM_MCR_RESULT_OK
) {
2015 pr_err("QUERY_CGR failed: %s\n", mcr_result_str(res
));
2018 cgrd
->cgr
.wr_parm_g
.word
=
2019 be32_to_cpu(cgrd
->cgr
.wr_parm_g
.word
);
2020 cgrd
->cgr
.wr_parm_y
.word
=
2021 be32_to_cpu(cgrd
->cgr
.wr_parm_y
.word
);
2022 cgrd
->cgr
.wr_parm_r
.word
=
2023 be32_to_cpu(cgrd
->cgr
.wr_parm_r
.word
);
2024 cgrd
->cgr
.cscn_targ
= be32_to_cpu(cgrd
->cgr
.cscn_targ
);
2025 cgrd
->cgr
.__cs_thres
= be16_to_cpu(cgrd
->cgr
.__cs_thres
);
2026 for (i
= 0; i
< ARRAY_SIZE(cgrd
->cscn_targ_swp
); i
++)
2027 cgrd
->cscn_targ_swp
[i
] =
2028 be32_to_cpu(cgrd
->cscn_targ_swp
[i
]);
2032 int qman_query_congestion(struct qm_mcr_querycongestion
*congestion
)
2034 struct qm_mc_result
*mcr
;
2035 struct qman_portal
*p
= get_affine_portal();
2040 qm_mc_commit(&p
->p
, QM_MCC_VERB_QUERYCONGESTION
);
2041 while (!(mcr
= qm_mc_result(&p
->p
)))
2043 DPAA_ASSERT((mcr
->verb
& QM_MCR_VERB_MASK
) ==
2044 QM_MCC_VERB_QUERYCONGESTION
);
2046 if (res
== QM_MCR_RESULT_OK
)
2047 *congestion
= mcr
->querycongestion
;
2048 if (res
!= QM_MCR_RESULT_OK
) {
2049 pr_err("QUERY_CONGESTION failed: %s\n", mcr_result_str(res
));
2052 for (i
= 0; i
< ARRAY_SIZE(congestion
->state
.state
); i
++)
2053 congestion
->state
.state
[i
] =
2054 be32_to_cpu(congestion
->state
.state
[i
]);
2058 int qman_set_vdq(struct qman_fq
*fq
, u16 num
, uint32_t vdqcr_flags
)
2060 struct qman_portal
*p
= get_affine_portal();
2064 vdqcr
= vdqcr_flags
;
2065 vdqcr
|= QM_VDQCR_NUMFRAMES_SET(num
);
2067 if ((fq
->state
!= qman_fq_state_parked
) &&
2068 (fq
->state
!= qman_fq_state_retired
)) {
2072 if (fq_isset(fq
, QMAN_FQ_STATE_VDQCR
)) {
2076 vdqcr
= (vdqcr
& ~QM_VDQCR_FQID_MASK
) | fq
->fqid
;
2078 if (!p
->vdqcr_owned
) {
2080 if (fq_isset(fq
, QMAN_FQ_STATE_VDQCR
))
2082 fq_set(fq
, QMAN_FQ_STATE_VDQCR
);
2084 p
->vdqcr_owned
= fq
;
2089 qm_dqrr_vdqcr_set(&p
->p
, vdqcr
);
2095 int qman_volatile_dequeue(struct qman_fq
*fq
, u32 flags __maybe_unused
,
2098 struct qman_portal
*p
;
2101 if ((fq
->state
!= qman_fq_state_parked
) &&
2102 (fq
->state
!= qman_fq_state_retired
))
2104 if (vdqcr
& QM_VDQCR_FQID_MASK
)
2106 if (fq_isset(fq
, QMAN_FQ_STATE_VDQCR
))
2108 vdqcr
= (vdqcr
& ~QM_VDQCR_FQID_MASK
) | fq
->fqid
;
2110 p
= get_affine_portal();
2112 if (!p
->vdqcr_owned
) {
2114 if (fq_isset(fq
, QMAN_FQ_STATE_VDQCR
))
2116 fq_set(fq
, QMAN_FQ_STATE_VDQCR
);
2118 p
->vdqcr_owned
= fq
;
2126 qm_dqrr_vdqcr_set(&p
->p
, vdqcr
);
2130 static noinline
void update_eqcr_ci(struct qman_portal
*p
, u8 avail
)
2133 qm_eqcr_cce_prefetch(&p
->p
);
2135 qm_eqcr_cce_update(&p
->p
);
2138 int qman_eqcr_is_empty(void)
2140 struct qman_portal
*p
= get_affine_portal();
2143 update_eqcr_ci(p
, 0);
2144 avail
= qm_eqcr_get_fill(&p
->p
);
2145 return (avail
== 0);
2148 void qman_set_dc_ern(qman_cb_dc_ern handler
, int affine
)
2151 struct qman_portal
*p
= get_affine_portal();
2153 p
->cb_dc_ern
= handler
;
2155 cb_dc_ern
= handler
;
2158 static inline struct qm_eqcr_entry
*try_p_eq_start(struct qman_portal
*p
,
2160 const struct qm_fd
*fd
,
2163 struct qm_eqcr_entry
*eq
;
2166 if (p
->use_eqcr_ci_stashing
) {
2168 * The stashing case is easy, only update if we need to in
2169 * order to try and liberate ring entries.
2171 eq
= qm_eqcr_start_stash(&p
->p
);
2174 * The non-stashing case is harder, need to prefetch ahead of
2177 avail
= qm_eqcr_get_avail(&p
->p
);
2179 update_eqcr_ci(p
, avail
);
2180 eq
= qm_eqcr_start_no_stash(&p
->p
);
2186 if (flags
& QMAN_ENQUEUE_FLAG_DCA
)
2187 eq
->dca
= QM_EQCR_DCA_ENABLE
|
2188 ((flags
& QMAN_ENQUEUE_FLAG_DCA_PARK
) ?
2189 QM_EQCR_DCA_PARK
: 0) |
2190 ((flags
>> 8) & QM_EQCR_DCA_IDXMASK
);
2191 eq
->fqid
= cpu_to_be32(fq
->fqid
);
2192 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
2193 eq
->tag
= cpu_to_be32(fq
->key
);
2195 eq
->tag
= cpu_to_be32((u32
)(uintptr_t)fq
);
2198 cpu_to_hw_fd(&eq
->fd
);
2202 int qman_enqueue(struct qman_fq
*fq
, const struct qm_fd
*fd
, u32 flags
)
2204 struct qman_portal
*p
= get_affine_portal();
2205 struct qm_eqcr_entry
*eq
;
2207 eq
= try_p_eq_start(p
, fq
, fd
, flags
);
2210 /* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */
2211 qm_eqcr_pvb_commit(&p
->p
, QM_EQCR_VERB_CMD_ENQUEUE
|
2212 (flags
& (QM_EQCR_VERB_COLOUR_MASK
| QM_EQCR_VERB_INTERRUPT
)));
2213 /* Factor the below out, it's used from qman_enqueue_orp() too */
2217 int qman_enqueue_multi(struct qman_fq
*fq
,
2218 const struct qm_fd
*fd
, u32
*flags
,
2221 struct qman_portal
*p
= get_affine_portal();
2222 struct qm_portal
*portal
= &p
->p
;
2224 register struct qm_eqcr
*eqcr
= &portal
->eqcr
;
2225 struct qm_eqcr_entry
*eq
= eqcr
->cursor
, *prev_eq
;
2227 u8 i
= 0, diff
, old_ci
, sent
= 0;
2229 /* Update the available entries if no entry is free */
2230 if (!eqcr
->available
) {
2232 eqcr
->ci
= qm_cl_in(EQCR_CI
) & (QM_EQCR_SIZE
- 1);
2233 diff
= qm_cyc_diff(QM_EQCR_SIZE
, old_ci
, eqcr
->ci
);
2234 eqcr
->available
+= diff
;
2239 /* try to send as many frames as possible */
2240 while (eqcr
->available
&& frames_to_send
--) {
2241 eq
->fqid
= fq
->fqid_le
;
2242 eq
->fd
.opaque_addr
= fd
->opaque_addr
;
2243 eq
->fd
.addr
= cpu_to_be40(fd
->addr
);
2244 eq
->fd
.status
= cpu_to_be32(fd
->status
);
2245 eq
->fd
.opaque
= cpu_to_be32(fd
->opaque
);
2246 if (flags
&& (flags
[i
] & QMAN_ENQUEUE_FLAG_DCA
)) {
2247 eq
->dca
= QM_EQCR_DCA_ENABLE
|
2248 ((flags
[i
] >> 8) & QM_EQCR_DCA_IDXMASK
);
2251 eq
= (void *)((unsigned long)(eq
+ 1) &
2252 (~(unsigned long)(QM_EQCR_SIZE
<< 6)));
2259 /* In order for flushes to complete faster, all lines are recorded in
2263 for (i
= 0; i
< sent
; i
++) {
2264 eq
->__dont_write_directly__verb
=
2265 QM_EQCR_VERB_CMD_ENQUEUE
| eqcr
->vbit
;
2267 eq
= (void *)((unsigned long)(eq
+ 1) &
2268 (~(unsigned long)(QM_EQCR_SIZE
<< 6)));
2269 if (unlikely((prev_eq
+ 1) != eq
))
2270 eqcr
->vbit
^= QM_EQCR_VERB_VBIT
;
2273 /* We need to flush all the lines but without load/store operations
2277 for (i
= 0; i
< sent
; i
++) {
2279 eq
= (void *)((unsigned long)(eq
+ 1) &
2280 (~(unsigned long)(QM_EQCR_SIZE
<< 6)));
2282 /* Update cursor for the next call */
2288 qman_enqueue_multi_fq(struct qman_fq
*fq
[], const struct qm_fd
*fd
,
2291 struct qman_portal
*p
= get_affine_portal();
2292 struct qm_portal
*portal
= &p
->p
;
2294 register struct qm_eqcr
*eqcr
= &portal
->eqcr
;
2295 struct qm_eqcr_entry
*eq
= eqcr
->cursor
, *prev_eq
;
2297 u8 i
, diff
, old_ci
, sent
= 0;
2299 /* Update the available entries if no entry is free */
2300 if (!eqcr
->available
) {
2302 eqcr
->ci
= qm_cl_in(EQCR_CI
) & (QM_EQCR_SIZE
- 1);
2303 diff
= qm_cyc_diff(QM_EQCR_SIZE
, old_ci
, eqcr
->ci
);
2304 eqcr
->available
+= diff
;
2309 /* try to send as many frames as possible */
2310 while (eqcr
->available
&& frames_to_send
--) {
2311 eq
->fqid
= fq
[sent
]->fqid_le
;
2312 eq
->fd
.opaque_addr
= fd
->opaque_addr
;
2313 eq
->fd
.addr
= cpu_to_be40(fd
->addr
);
2314 eq
->fd
.status
= cpu_to_be32(fd
->status
);
2315 eq
->fd
.opaque
= cpu_to_be32(fd
->opaque
);
2317 eq
= (void *)((unsigned long)(eq
+ 1) &
2318 (~(unsigned long)(QM_EQCR_SIZE
<< 6)));
2325 /* In order for flushes to complete faster, all lines are recorded in
2329 for (i
= 0; i
< sent
; i
++) {
2330 eq
->__dont_write_directly__verb
=
2331 QM_EQCR_VERB_CMD_ENQUEUE
| eqcr
->vbit
;
2333 eq
= (void *)((unsigned long)(eq
+ 1) &
2334 (~(unsigned long)(QM_EQCR_SIZE
<< 6)));
2335 if (unlikely((prev_eq
+ 1) != eq
))
2336 eqcr
->vbit
^= QM_EQCR_VERB_VBIT
;
2339 /* We need to flush all the lines but without load/store operations
2343 for (i
= 0; i
< sent
; i
++) {
2345 eq
= (void *)((unsigned long)(eq
+ 1) &
2346 (~(unsigned long)(QM_EQCR_SIZE
<< 6)));
2348 /* Update cursor for the next call */
2353 int qman_enqueue_orp(struct qman_fq
*fq
, const struct qm_fd
*fd
, u32 flags
,
2354 struct qman_fq
*orp
, u16 orp_seqnum
)
2356 struct qman_portal
*p
= get_affine_portal();
2357 struct qm_eqcr_entry
*eq
;
2359 eq
= try_p_eq_start(p
, fq
, fd
, flags
);
2362 /* Process ORP-specifics here */
2363 if (flags
& QMAN_ENQUEUE_FLAG_NLIS
)
2364 orp_seqnum
|= QM_EQCR_SEQNUM_NLIS
;
2366 orp_seqnum
&= ~QM_EQCR_SEQNUM_NLIS
;
2367 if (flags
& QMAN_ENQUEUE_FLAG_NESN
)
2368 orp_seqnum
|= QM_EQCR_SEQNUM_NESN
;
2370 /* No need to check 4 QMAN_ENQUEUE_FLAG_HOLE */
2371 orp_seqnum
&= ~QM_EQCR_SEQNUM_NESN
;
2373 eq
->seqnum
= cpu_to_be16(orp_seqnum
);
2374 eq
->orp
= cpu_to_be32(orp
->fqid
);
2375 /* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */
2376 qm_eqcr_pvb_commit(&p
->p
, QM_EQCR_VERB_ORP
|
2377 ((flags
& (QMAN_ENQUEUE_FLAG_HOLE
| QMAN_ENQUEUE_FLAG_NESN
)) ?
2378 0 : QM_EQCR_VERB_CMD_ENQUEUE
) |
2379 (flags
& (QM_EQCR_VERB_COLOUR_MASK
| QM_EQCR_VERB_INTERRUPT
)));
2384 int qman_modify_cgr(struct qman_cgr
*cgr
, u32 flags
,
2385 struct qm_mcc_initcgr
*opts
)
2387 struct qm_mc_command
*mcc
;
2388 struct qm_mc_result
*mcr
;
2389 struct qman_portal
*p
= get_affine_portal();
2392 u8 verb
= QM_MCC_VERB_MODIFYCGR
;
2394 mcc
= qm_mc_start(&p
->p
);
2396 mcc
->initcgr
= *opts
;
2397 mcc
->initcgr
.we_mask
= cpu_to_be16(mcc
->initcgr
.we_mask
);
2398 mcc
->initcgr
.cgr
.wr_parm_g
.word
=
2399 cpu_to_be32(mcc
->initcgr
.cgr
.wr_parm_g
.word
);
2400 mcc
->initcgr
.cgr
.wr_parm_y
.word
=
2401 cpu_to_be32(mcc
->initcgr
.cgr
.wr_parm_y
.word
);
2402 mcc
->initcgr
.cgr
.wr_parm_r
.word
=
2403 cpu_to_be32(mcc
->initcgr
.cgr
.wr_parm_r
.word
);
2404 mcc
->initcgr
.cgr
.cscn_targ
= cpu_to_be32(mcc
->initcgr
.cgr
.cscn_targ
);
2405 mcc
->initcgr
.cgr
.__cs_thres
= cpu_to_be16(mcc
->initcgr
.cgr
.__cs_thres
);
2407 mcc
->initcgr
.cgid
= cgr
->cgrid
;
2408 if (flags
& QMAN_CGR_FLAG_USE_INIT
)
2409 verb
= QM_MCC_VERB_INITCGR
;
2410 qm_mc_commit(&p
->p
, verb
);
2411 while (!(mcr
= qm_mc_result(&p
->p
)))
2414 DPAA_ASSERT((mcr
->verb
& QM_MCR_VERB_MASK
) == verb
);
2416 return (res
== QM_MCR_RESULT_OK
) ? 0 : -EIO
;
2419 #define TARG_MASK(n) (0x80000000 >> (n->config->channel - \
2420 QM_CHANNEL_SWPORTAL0))
2421 #define TARG_DCP_MASK(n) (0x80000000 >> (10 + n))
2422 #define PORTAL_IDX(n) (n->config->channel - QM_CHANNEL_SWPORTAL0)
2424 int qman_create_cgr(struct qman_cgr
*cgr
, u32 flags
,
2425 struct qm_mcc_initcgr
*opts
)
2427 struct qm_mcr_querycgr cgr_state
;
2428 struct qm_mcc_initcgr local_opts
;
2430 struct qman_portal
*p
;
2432 /* We have to check that the provided CGRID is within the limits of the
2433 * data-structures, for obvious reasons. However we'll let h/w take
2434 * care of determining whether it's within the limits of what exists on
2437 if (cgr
->cgrid
>= __CGR_NUM
)
2440 p
= get_affine_portal();
2442 memset(&local_opts
, 0, sizeof(struct qm_mcc_initcgr
));
2443 cgr
->chan
= p
->config
->channel
;
2444 spin_lock(&p
->cgr_lock
);
2446 /* if no opts specified, just add it to the list */
2450 ret
= qman_query_cgr(cgr
, &cgr_state
);
2455 if ((qman_ip_rev
& 0xFF00) >= QMAN_REV30
)
2456 local_opts
.cgr
.cscn_targ_upd_ctrl
=
2457 QM_CGR_TARG_UDP_CTRL_WRITE_BIT
| PORTAL_IDX(p
);
2459 /* Overwrite TARG */
2460 local_opts
.cgr
.cscn_targ
= cgr_state
.cgr
.cscn_targ
|
2462 local_opts
.we_mask
|= QM_CGR_WE_CSCN_TARG
;
2464 /* send init if flags indicate so */
2465 if (opts
&& (flags
& QMAN_CGR_FLAG_USE_INIT
))
2466 ret
= qman_modify_cgr(cgr
, QMAN_CGR_FLAG_USE_INIT
, &local_opts
);
2468 ret
= qman_modify_cgr(cgr
, 0, &local_opts
);
2472 list_add(&cgr
->node
, &p
->cgr_cbs
);
2474 /* Determine if newly added object requires its callback to be called */
2475 ret
= qman_query_cgr(cgr
, &cgr_state
);
2477 /* we can't go back, so proceed and return success, but screen
2478 * and wail to the log file.
2480 pr_crit("CGR HW state partially modified\n");
2484 if (cgr
->cb
&& cgr_state
.cgr
.cscn_en
&& qman_cgrs_get(&p
->cgrs
[1],
2488 spin_unlock(&p
->cgr_lock
);
2492 int qman_create_cgr_to_dcp(struct qman_cgr
*cgr
, u32 flags
, u16 dcp_portal
,
2493 struct qm_mcc_initcgr
*opts
)
2495 struct qm_mcc_initcgr local_opts
;
2496 struct qm_mcr_querycgr cgr_state
;
2499 if ((qman_ip_rev
& 0xFF00) < QMAN_REV30
) {
2500 pr_warn("QMan version doesn't support CSCN => DCP portal\n");
2503 /* We have to check that the provided CGRID is within the limits of the
2504 * data-structures, for obvious reasons. However we'll let h/w take
2505 * care of determining whether it's within the limits of what exists on
2508 if (cgr
->cgrid
>= __CGR_NUM
)
2511 ret
= qman_query_cgr(cgr
, &cgr_state
);
2515 memset(&local_opts
, 0, sizeof(struct qm_mcc_initcgr
));
2519 if ((qman_ip_rev
& 0xFF00) >= QMAN_REV30
)
2520 local_opts
.cgr
.cscn_targ_upd_ctrl
=
2521 QM_CGR_TARG_UDP_CTRL_WRITE_BIT
|
2522 QM_CGR_TARG_UDP_CTRL_DCP
| dcp_portal
;
2524 local_opts
.cgr
.cscn_targ
= cgr_state
.cgr
.cscn_targ
|
2525 TARG_DCP_MASK(dcp_portal
);
2526 local_opts
.we_mask
|= QM_CGR_WE_CSCN_TARG
;
2528 /* send init if flags indicate so */
2529 if (opts
&& (flags
& QMAN_CGR_FLAG_USE_INIT
))
2530 ret
= qman_modify_cgr(cgr
, QMAN_CGR_FLAG_USE_INIT
,
2533 ret
= qman_modify_cgr(cgr
, 0, &local_opts
);
2538 int qman_delete_cgr(struct qman_cgr
*cgr
)
2540 struct qm_mcr_querycgr cgr_state
;
2541 struct qm_mcc_initcgr local_opts
;
2544 struct qman_portal
*p
= get_affine_portal();
2546 if (cgr
->chan
!= p
->config
->channel
) {
2547 pr_crit("Attempting to delete cgr from different portal than"
2548 " it was create: create 0x%x, delete 0x%x\n",
2549 cgr
->chan
, p
->config
->channel
);
2553 memset(&local_opts
, 0, sizeof(struct qm_mcc_initcgr
));
2554 spin_lock(&p
->cgr_lock
);
2555 list_del(&cgr
->node
);
2557 * If there are no other CGR objects for this CGRID in the list,
2558 * update CSCN_TARG accordingly
2560 list_for_each_entry(i
, &p
->cgr_cbs
, node
)
2561 if ((i
->cgrid
== cgr
->cgrid
) && i
->cb
)
2563 ret
= qman_query_cgr(cgr
, &cgr_state
);
2565 /* add back to the list */
2566 list_add(&cgr
->node
, &p
->cgr_cbs
);
2569 /* Overwrite TARG */
2570 local_opts
.we_mask
= QM_CGR_WE_CSCN_TARG
;
2571 if ((qman_ip_rev
& 0xFF00) >= QMAN_REV30
)
2572 local_opts
.cgr
.cscn_targ_upd_ctrl
= PORTAL_IDX(p
);
2574 local_opts
.cgr
.cscn_targ
= cgr_state
.cgr
.cscn_targ
&
2576 ret
= qman_modify_cgr(cgr
, 0, &local_opts
);
2578 /* add back to the list */
2579 list_add(&cgr
->node
, &p
->cgr_cbs
);
2581 spin_unlock(&p
->cgr_lock
);
2586 int qman_shutdown_fq(u32 fqid
)
2588 struct qman_portal
*p
;
2589 struct qm_portal
*low_p
;
2590 struct qm_mc_command
*mcc
;
2591 struct qm_mc_result
*mcr
;
2593 int orl_empty
, fq_empty
, drain
= 0;
2598 p
= get_affine_portal();
2601 /* Determine the state of the FQID */
2602 mcc
= qm_mc_start(low_p
);
2603 mcc
->queryfq_np
.fqid
= cpu_to_be32(fqid
);
2604 qm_mc_commit(low_p
, QM_MCC_VERB_QUERYFQ_NP
);
2605 while (!(mcr
= qm_mc_result(low_p
)))
2607 DPAA_ASSERT((mcr
->verb
& QM_MCR_VERB_MASK
) == QM_MCR_VERB_QUERYFQ_NP
);
2608 state
= mcr
->queryfq_np
.state
& QM_MCR_NP_STATE_MASK
;
2609 if (state
== QM_MCR_NP_STATE_OOS
)
2610 return 0; /* Already OOS, no need to do anymore checks */
2612 /* Query which channel the FQ is using */
2613 mcc
= qm_mc_start(low_p
);
2614 mcc
->queryfq
.fqid
= cpu_to_be32(fqid
);
2615 qm_mc_commit(low_p
, QM_MCC_VERB_QUERYFQ
);
2616 while (!(mcr
= qm_mc_result(low_p
)))
2618 DPAA_ASSERT((mcr
->verb
& QM_MCR_VERB_MASK
) == QM_MCR_VERB_QUERYFQ
);
2620 /* Need to store these since the MCR gets reused */
2621 dest_wq
= be16_to_cpu(mcr
->queryfq
.fqd
.dest_wq
);
2622 channel
= dest_wq
& 0x7;
2626 case QM_MCR_NP_STATE_TEN_SCHED
:
2627 case QM_MCR_NP_STATE_TRU_SCHED
:
2628 case QM_MCR_NP_STATE_ACTIVE
:
2629 case QM_MCR_NP_STATE_PARKED
:
2631 mcc
= qm_mc_start(low_p
);
2632 mcc
->alterfq
.fqid
= cpu_to_be32(fqid
);
2633 qm_mc_commit(low_p
, QM_MCC_VERB_ALTER_RETIRE
);
2634 while (!(mcr
= qm_mc_result(low_p
)))
2636 DPAA_ASSERT((mcr
->verb
& QM_MCR_VERB_MASK
) ==
2637 QM_MCR_VERB_ALTER_RETIRE
);
2638 result
= mcr
->result
; /* Make a copy as we reuse MCR below */
2640 if (result
== QM_MCR_RESULT_PENDING
) {
2641 /* Need to wait for the FQRN in the message ring, which
2642 * will only occur once the FQ has been drained. In
2643 * order for the FQ to drain the portal needs to be set
2644 * to dequeue from the channel the FQ is scheduled on
2646 const struct qm_mr_entry
*msg
;
2647 const struct qm_dqrr_entry
*dqrr
= NULL
;
2649 __maybe_unused u16 dequeue_wq
= 0;
2651 /* Flag that we need to drain FQ */
2654 if (channel
>= qm_channel_pool1
&&
2655 channel
< (u16
)(qm_channel_pool1
+ 15)) {
2656 /* Pool channel, enable the bit in the portal */
2657 dequeue_wq
= (channel
-
2658 qm_channel_pool1
+ 1) << 4 | wq
;
2659 } else if (channel
< qm_channel_pool1
) {
2660 /* Dedicated channel */
2663 pr_info("Cannot recover FQ 0x%x,"
2664 " it is scheduled on channel 0x%x",
2668 /* Set the sdqcr to drain this channel */
2669 if (channel
< qm_channel_pool1
)
2670 qm_dqrr_sdqcr_set(low_p
,
2671 QM_SDQCR_TYPE_ACTIVE
|
2672 QM_SDQCR_CHANNELS_DEDICATED
);
2674 qm_dqrr_sdqcr_set(low_p
,
2675 QM_SDQCR_TYPE_ACTIVE
|
2676 QM_SDQCR_CHANNELS_POOL_CONV
2678 while (!found_fqrn
) {
2679 /* Keep draining DQRR while checking the MR*/
2680 qm_dqrr_pvb_update(low_p
);
2681 dqrr
= qm_dqrr_current(low_p
);
2683 qm_dqrr_cdc_consume_1ptr(
2685 qm_dqrr_pvb_update(low_p
);
2686 qm_dqrr_next(low_p
);
2687 dqrr
= qm_dqrr_current(low_p
);
2689 /* Process message ring too */
2690 qm_mr_pvb_update(low_p
);
2691 msg
= qm_mr_current(low_p
);
2693 if ((msg
->ern
.verb
&
2694 QM_MR_VERB_TYPE_MASK
)
2698 qm_mr_cci_consume_to_current(low_p
);
2699 qm_mr_pvb_update(low_p
);
2700 msg
= qm_mr_current(low_p
);
2705 if (result
!= QM_MCR_RESULT_OK
&&
2706 result
!= QM_MCR_RESULT_PENDING
) {
2708 pr_err("qman_retire_fq failed on FQ 0x%x,"
2709 " result=0x%x\n", fqid
, result
);
2712 if (!(mcr
->alterfq
.fqs
& QM_MCR_FQS_ORLPRESENT
)) {
2713 /* ORL had no entries, no need to wait until the
2718 /* Retirement succeeded, check to see if FQ needs
2721 if (drain
|| mcr
->alterfq
.fqs
& QM_MCR_FQS_NOTEMPTY
) {
2722 /* FQ is Not Empty, drain using volatile DQ commands */
2725 const struct qm_dqrr_entry
*dqrr
= NULL
;
2726 u32 vdqcr
= fqid
| QM_VDQCR_NUMFRAMES_SET(3);
2728 qm_dqrr_vdqcr_set(low_p
, vdqcr
);
2730 /* Wait for a dequeue to occur */
2731 while (dqrr
== NULL
) {
2732 qm_dqrr_pvb_update(low_p
);
2733 dqrr
= qm_dqrr_current(low_p
);
2737 /* Process the dequeues, making sure to
2738 * empty the ring completely.
2741 if (dqrr
->fqid
== fqid
&&
2742 dqrr
->stat
& QM_DQRR_STAT_FQ_EMPTY
)
2744 qm_dqrr_cdc_consume_1ptr(low_p
,
2746 qm_dqrr_pvb_update(low_p
);
2747 qm_dqrr_next(low_p
);
2748 dqrr
= qm_dqrr_current(low_p
);
2750 } while (fq_empty
== 0);
2752 qm_dqrr_sdqcr_set(low_p
, 0);
2754 /* Wait for the ORL to have been completely drained */
2755 while (orl_empty
== 0) {
2756 const struct qm_mr_entry
*msg
;
2758 qm_mr_pvb_update(low_p
);
2759 msg
= qm_mr_current(low_p
);
2761 if ((msg
->ern
.verb
& QM_MR_VERB_TYPE_MASK
) ==
2765 qm_mr_cci_consume_to_current(low_p
);
2766 qm_mr_pvb_update(low_p
);
2767 msg
= qm_mr_current(low_p
);
2771 mcc
= qm_mc_start(low_p
);
2772 mcc
->alterfq
.fqid
= cpu_to_be32(fqid
);
2773 qm_mc_commit(low_p
, QM_MCC_VERB_ALTER_OOS
);
2774 while (!(mcr
= qm_mc_result(low_p
)))
2776 DPAA_ASSERT((mcr
->verb
& QM_MCR_VERB_MASK
) ==
2777 QM_MCR_VERB_ALTER_OOS
);
2778 if (mcr
->result
!= QM_MCR_RESULT_OK
) {
2780 "OOS after drain Failed on FQID 0x%x, result 0x%x\n",
2786 case QM_MCR_NP_STATE_RETIRED
:
2787 /* Send OOS Command */
2788 mcc
= qm_mc_start(low_p
);
2789 mcc
->alterfq
.fqid
= cpu_to_be32(fqid
);
2790 qm_mc_commit(low_p
, QM_MCC_VERB_ALTER_OOS
);
2791 while (!(mcr
= qm_mc_result(low_p
)))
2793 DPAA_ASSERT((mcr
->verb
& QM_MCR_VERB_MASK
) ==
2794 QM_MCR_VERB_ALTER_OOS
);
2796 pr_err("OOS Failed on FQID 0x%x\n", fqid
);