]> git.proxmox.com Git - ceph.git/blame - ceph/src/spdk/dpdk/drivers/bus/fslmc/qbman/include/fsl_qbman_portal.h
update source to Ceph Pacific 16.2.2
[ceph.git] / ceph / src / spdk / dpdk / drivers / bus / fslmc / qbman / include / fsl_qbman_portal.h
CommitLineData
11fdf7f2
TL
1/* SPDX-License-Identifier: BSD-3-Clause
2 *
3 * Copyright (C) 2014 Freescale Semiconductor, Inc.
f67539c2 4 * Copyright 2015-2019 NXP
11fdf7f2
TL
5 *
6 */
7#ifndef _FSL_QBMAN_PORTAL_H
8#define _FSL_QBMAN_PORTAL_H
9
f67539c2 10#include <rte_compat.h>
11fdf7f2
TL
11#include <fsl_qbman_base.h>
12
f67539c2
TL
13#define SVR_LS1080A 0x87030000
14#define SVR_LS2080A 0x87010000
15#define SVR_LS2088A 0x87090000
16#define SVR_LX2160A 0x87360000
17
18/* Variable to store DPAA2 platform type */
19extern uint32_t dpaa2_svr_family;
20
11fdf7f2
TL
21/**
22 * DOC - QBMan portal APIs to implement the following functions:
23 * - Initialize and destroy Software portal object.
24 * - Read and write Software portal interrupt registers.
25 * - Enqueue, including setting the enqueue descriptor, and issuing enqueue
26 * command etc.
27 * - Dequeue, including setting the dequeue descriptor, issuing dequeue command,
f67539c2 28 * parsing the dequeue response in DQRR and memory, parsing the state change
11fdf7f2
TL
29 * notifications etc.
30 * - Release, including setting the release descriptor, and issuing the buffer
31 * release command.
32 * - Acquire, acquire the buffer from the given buffer pool.
33 * - FQ management.
34 * - Channel management, enable/disable CDAN with or without context.
35 */
36
37/**
38 * qbman_swp_init() - Create a functional object representing the given
39 * QBMan portal descriptor.
40 * @d: the given qbman swp descriptor
41 *
42 * Return qbman_swp portal object for success, NULL if the object cannot
43 * be created.
44 */
45struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d);
46
47/**
48 * qbman_swp_finish() - Create and destroy a functional object representing
49 * the given QBMan portal descriptor.
50 * @p: the qbman_swp object to be destroyed.
51 *
52 */
53void qbman_swp_finish(struct qbman_swp *p);
54
9f95a23c
TL
55/**
56 * qbman_swp_invalidate() - Invalidate the cache enabled area of the QBMan
57 * portal. This is required to be called if a portal moved to another core
58 * because the QBMan portal area is non coherent
59 * @p: the qbman_swp object to be invalidated
60 *
61 */
62void qbman_swp_invalidate(struct qbman_swp *p);
63
11fdf7f2
TL
64/**
65 * qbman_swp_get_desc() - Get the descriptor of the given portal object.
66 * @p: the given portal object.
67 *
68 * Return the descriptor for this portal.
69 */
70const struct qbman_swp_desc *qbman_swp_get_desc(struct qbman_swp *p);
71
72 /**************/
73 /* Interrupts */
74 /**************/
75
76/* EQCR ring interrupt */
77#define QBMAN_SWP_INTERRUPT_EQRI ((uint32_t)0x00000001)
78/* Enqueue command dispatched interrupt */
79#define QBMAN_SWP_INTERRUPT_EQDI ((uint32_t)0x00000002)
80/* DQRR non-empty interrupt */
81#define QBMAN_SWP_INTERRUPT_DQRI ((uint32_t)0x00000004)
82/* RCR ring interrupt */
83#define QBMAN_SWP_INTERRUPT_RCRI ((uint32_t)0x00000008)
84/* Release command dispatched interrupt */
85#define QBMAN_SWP_INTERRUPT_RCDI ((uint32_t)0x00000010)
86/* Volatile dequeue command interrupt */
87#define QBMAN_SWP_INTERRUPT_VDCI ((uint32_t)0x00000020)
88
89/**
90 * qbman_swp_interrupt_get_vanish() - Get the data in software portal
91 * interrupt status disable register.
92 * @p: the given software portal object.
93 *
94 * Return the settings in SWP_ISDR register.
95 */
96uint32_t qbman_swp_interrupt_get_vanish(struct qbman_swp *p);
97
98/**
99 * qbman_swp_interrupt_set_vanish() - Set the data in software portal
100 * interrupt status disable register.
101 * @p: the given software portal object.
102 * @mask: The value to set in SWP_IDSR register.
103 */
104void qbman_swp_interrupt_set_vanish(struct qbman_swp *p, uint32_t mask);
105
106/**
107 * qbman_swp_interrupt_read_status() - Get the data in software portal
108 * interrupt status register.
109 * @p: the given software portal object.
110 *
111 * Return the settings in SWP_ISR register.
112 */
113uint32_t qbman_swp_interrupt_read_status(struct qbman_swp *p);
114
115/**
116 * qbman_swp_interrupt_clear_status() - Set the data in software portal
117 * interrupt status register.
118 * @p: the given software portal object.
119 * @mask: The value to set in SWP_ISR register.
120 */
f67539c2 121__rte_internal
11fdf7f2
TL
122void qbman_swp_interrupt_clear_status(struct qbman_swp *p, uint32_t mask);
123
124/**
125 * qbman_swp_dqrr_thrshld_read_status() - Get the data in software portal
126 * DQRR interrupt threshold register.
127 * @p: the given software portal object.
128 */
129uint32_t qbman_swp_dqrr_thrshld_read_status(struct qbman_swp *p);
130
131/**
132 * qbman_swp_dqrr_thrshld_write() - Set the data in software portal
133 * DQRR interrupt threshold register.
134 * @p: the given software portal object.
135 * @mask: The value to set in SWP_DQRR_ITR register.
136 */
137void qbman_swp_dqrr_thrshld_write(struct qbman_swp *p, uint32_t mask);
138
139/**
140 * qbman_swp_intr_timeout_read_status() - Get the data in software portal
141 * Interrupt Time-Out period register.
142 * @p: the given software portal object.
143 */
144uint32_t qbman_swp_intr_timeout_read_status(struct qbman_swp *p);
145
146/**
147 * qbman_swp_intr_timeout_write() - Set the data in software portal
148 * Interrupt Time-Out period register.
149 * @p: the given software portal object.
150 * @mask: The value to set in SWP_ITPR register.
151 */
152void qbman_swp_intr_timeout_write(struct qbman_swp *p, uint32_t mask);
153
154/**
155 * qbman_swp_interrupt_get_trigger() - Get the data in software portal
156 * interrupt enable register.
157 * @p: the given software portal object.
158 *
159 * Return the settings in SWP_IER register.
160 */
161uint32_t qbman_swp_interrupt_get_trigger(struct qbman_swp *p);
162
163/**
164 * qbman_swp_interrupt_set_trigger() - Set the data in software portal
165 * interrupt enable register.
166 * @p: the given software portal object.
167 * @mask: The value to set in SWP_IER register.
168 */
169void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, uint32_t mask);
170
171/**
172 * qbman_swp_interrupt_get_inhibit() - Get the data in software portal
173 * interrupt inhibit register.
174 * @p: the given software portal object.
175 *
176 * Return the settings in SWP_IIR register.
177 */
178int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p);
179
180/**
181 * qbman_swp_interrupt_set_inhibit() - Set the data in software portal
182 * interrupt inhibit register.
183 * @p: the given software portal object.
184 * @mask: The value to set in SWP_IIR register.
185 */
186void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit);
187
188 /************/
189 /* Dequeues */
190 /************/
191
192/**
193 * struct qbman_result - structure for qbman dequeue response and/or
194 * notification.
9f95a23c 195 * @dont_manipulate_directly: the 16 32bit data to represent the whole
11fdf7f2
TL
196 * possible qbman dequeue result.
197 */
198struct qbman_result {
199 union {
200 struct common {
201 uint8_t verb;
202 uint8_t reserved[63];
203 } common;
204 struct dq {
205 uint8_t verb;
206 uint8_t stat;
207 __le16 seqnum;
208 __le16 oprid;
209 uint8_t reserved;
210 uint8_t tok;
211 __le32 fqid;
212 uint32_t reserved2;
213 __le32 fq_byte_cnt;
214 __le32 fq_frm_cnt;
215 __le64 fqd_ctx;
216 uint8_t fd[32];
217 } dq;
218 struct scn {
219 uint8_t verb;
220 uint8_t stat;
221 uint8_t state;
222 uint8_t reserved;
223 __le32 rid_tok;
224 __le64 ctx;
225 } scn;
9f95a23c
TL
226 struct eq_resp {
227 uint8_t verb;
228 uint8_t dca;
229 __le16 seqnum;
230 __le16 oprid;
231 uint8_t reserved;
232 uint8_t rc;
233 __le32 tgtid;
234 __le32 tag;
235 uint16_t qdbin;
236 uint8_t qpri;
237 uint8_t reserved1;
238 __le32 fqid:24;
239 __le32 rspid:8;
240 __le64 rsp_addr;
241 uint8_t fd[32];
242 } eq_resp;
11fdf7f2
TL
243 };
244};
245
246/* TODO:
247 *A DQRI interrupt can be generated when there are dequeue results on the
248 * portal's DQRR (this mechanism does not deal with "pull" dequeues to
249 * user-supplied 'storage' addresses). There are two parameters to this
250 * interrupt source, one is a threshold and the other is a timeout. The
251 * interrupt will fire if either the fill-level of the ring exceeds 'thresh', or
252 * if the ring has been non-empty for been longer than 'timeout' nanoseconds.
253 * For timeout, an approximation to the desired nanosecond-granularity value is
254 * made, so there are get and set APIs to allow the user to see what actual
255 * timeout is set (compared to the timeout that was requested).
256 */
257int qbman_swp_dequeue_thresh(struct qbman_swp *s, unsigned int thresh);
258int qbman_swp_dequeue_set_timeout(struct qbman_swp *s, unsigned int timeout);
259int qbman_swp_dequeue_get_timeout(struct qbman_swp *s, unsigned int *timeout);
260
261/* ------------------- */
262/* Push-mode dequeuing */
263/* ------------------- */
264
265/* The user of a portal can enable and disable push-mode dequeuing of up to 16
266 * channels independently. It does not specify this toggling by channel IDs, but
267 * rather by specifying the index (from 0 to 15) that has been mapped to the
268 * desired channel.
269 */
270
271/**
272 * qbman_swp_push_get() - Get the push dequeue setup.
273 * @s: the software portal object.
274 * @channel_idx: the channel index to query.
275 * @enabled: returned boolean to show whether the push dequeue is enabled for
276 * the given channel.
277 */
278void qbman_swp_push_get(struct qbman_swp *s, uint8_t channel_idx, int *enabled);
279
280/**
281 * qbman_swp_push_set() - Enable or disable push dequeue.
282 * @s: the software portal object.
283 * @channel_idx: the channel index..
284 * @enable: enable or disable push dequeue.
285 *
286 * The user of a portal can enable and disable push-mode dequeuing of up to 16
287 * channels independently. It does not specify this toggling by channel IDs, but
288 * rather by specifying the index (from 0 to 15) that has been mapped to the
289 * desired channel.
290 */
f67539c2 291__rte_internal
11fdf7f2
TL
292void qbman_swp_push_set(struct qbman_swp *s, uint8_t channel_idx, int enable);
293
294/* ------------------- */
295/* Pull-mode dequeuing */
296/* ------------------- */
297
298/**
299 * struct qbman_pull_desc - the structure for pull dequeue descriptor
300 */
301struct qbman_pull_desc {
302 union {
9f95a23c 303 uint32_t dont_manipulate_directly[16];
11fdf7f2
TL
304 struct pull {
305 uint8_t verb;
306 uint8_t numf;
307 uint8_t tok;
308 uint8_t reserved;
309 uint32_t dq_src;
310 uint64_t rsp_addr;
311 uint64_t rsp_addr_virt;
312 uint8_t padding[40];
313 } pull;
314 };
315};
316
317enum qbman_pull_type_e {
318 /* dequeue with priority precedence, respect intra-class scheduling */
319 qbman_pull_type_prio = 1,
320 /* dequeue with active FQ precedence, respect ICS */
321 qbman_pull_type_active,
322 /* dequeue with active FQ precedence, no ICS */
323 qbman_pull_type_active_noics
324};
325
326/**
327 * qbman_pull_desc_clear() - Clear the contents of a descriptor to
328 * default/starting state.
329 * @d: the pull dequeue descriptor to be cleared.
330 */
f67539c2 331__rte_internal
11fdf7f2
TL
332void qbman_pull_desc_clear(struct qbman_pull_desc *d);
333
334/**
335 * qbman_pull_desc_set_storage()- Set the pull dequeue storage
336 * @d: the pull dequeue descriptor to be set.
337 * @storage: the pointer of the memory to store the dequeue result.
338 * @storage_phys: the physical address of the storage memory.
339 * @stash: to indicate whether write allocate is enabled.
340 *
341 * If not called, or if called with 'storage' as NULL, the result pull dequeues
342 * will produce results to DQRR. If 'storage' is non-NULL, then results are
343 * produced to the given memory location (using the physical/DMA address which
344 * the caller provides in 'storage_phys'), and 'stash' controls whether or not
345 * those writes to main-memory express a cache-warming attribute.
346 */
f67539c2 347__rte_internal
11fdf7f2
TL
348void qbman_pull_desc_set_storage(struct qbman_pull_desc *d,
349 struct qbman_result *storage,
350 uint64_t storage_phys,
351 int stash);
352/**
353 * qbman_pull_desc_set_numframes() - Set the number of frames to be dequeued.
354 * @d: the pull dequeue descriptor to be set.
355 * @numframes: number of frames to be set, must be between 1 and 16, inclusive.
356 */
f67539c2 357__rte_internal
11fdf7f2
TL
358void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d,
359 uint8_t numframes);
360/**
361 * qbman_pull_desc_set_token() - Set dequeue token for pull command
362 * @d: the dequeue descriptor
363 * @token: the token to be set
364 *
365 * token is the value that shows up in the dequeue response that can be used to
366 * detect when the results have been published. The easiest technique is to zero
367 * result "storage" before issuing a dequeue, and use any non-zero 'token' value
368 */
369void qbman_pull_desc_set_token(struct qbman_pull_desc *d, uint8_t token);
370
371/* Exactly one of the following descriptor "actions" should be set. (Calling any
372 * one of these will replace the effect of any prior call to one of these.)
373 * - pull dequeue from the given frame queue (FQ)
374 * - pull dequeue from any FQ in the given work queue (WQ)
375 * - pull dequeue from any FQ in any WQ in the given channel
376 */
377/**
378 * qbman_pull_desc_set_fq() - Set fqid from which the dequeue command dequeues.
379 * @fqid: the frame queue index of the given FQ.
380 */
f67539c2 381__rte_internal
11fdf7f2
TL
382void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, uint32_t fqid);
383
384/**
385 * qbman_pull_desc_set_wq() - Set wqid from which the dequeue command dequeues.
386 * @wqid: composed of channel id and wqid within the channel.
387 * @dct: the dequeue command type.
388 */
389void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, uint32_t wqid,
390 enum qbman_pull_type_e dct);
391
392/* qbman_pull_desc_set_channel() - Set channelid from which the dequeue command
393 * dequeues.
394 * @chid: the channel id to be dequeued.
395 * @dct: the dequeue command type.
396 */
397void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, uint32_t chid,
398 enum qbman_pull_type_e dct);
399
9f95a23c
TL
400/**
401 * qbman_pull_desc_set_rad() - Decide whether reschedule the fq after dequeue
402 *
403 * @rad: 1 = Reschedule the FQ after dequeue.
404 * 0 = Allow the FQ to remain active after dequeue.
405 */
406void qbman_pull_desc_set_rad(struct qbman_pull_desc *d, int rad);
407
11fdf7f2
TL
408/**
409 * qbman_swp_pull() - Issue the pull dequeue command
410 * @s: the software portal object.
411 * @d: the software portal descriptor which has been configured with
412 * the set of qbman_pull_desc_set_*() calls.
413 *
414 * Return 0 for success, and -EBUSY if the software portal is not ready
415 * to do pull dequeue.
416 */
f67539c2 417__rte_internal
11fdf7f2
TL
418int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d);
419
420/* -------------------------------- */
421/* Polling DQRR for dequeue results */
422/* -------------------------------- */
423
424/**
425 * qbman_swp_dqrr_next() - Get an valid DQRR entry.
426 * @s: the software portal object.
427 *
428 * Return NULL if there are no unconsumed DQRR entries. Return a DQRR entry
429 * only once, so repeated calls can return a sequence of DQRR entries, without
430 * requiring they be consumed immediately or in any particular order.
431 */
f67539c2 432__rte_internal
11fdf7f2
TL
433const struct qbman_result *qbman_swp_dqrr_next(struct qbman_swp *p);
434
435/**
436 * qbman_swp_prefetch_dqrr_next() - prefetch the next DQRR entry.
437 * @s: the software portal object.
438 */
f67539c2 439__rte_internal
11fdf7f2
TL
440void qbman_swp_prefetch_dqrr_next(struct qbman_swp *s);
441
442/**
443 * qbman_swp_dqrr_consume() - Consume DQRR entries previously returned from
444 * qbman_swp_dqrr_next().
445 * @s: the software portal object.
446 * @dq: the DQRR entry to be consumed.
447 */
f67539c2 448__rte_internal
11fdf7f2
TL
449void qbman_swp_dqrr_consume(struct qbman_swp *s, const struct qbman_result *dq);
450
451/**
452 * qbman_swp_dqrr_idx_consume() - Given the DQRR index consume the DQRR entry
453 * @s: the software portal object.
454 * @dqrr_index: the DQRR index entry to be consumed.
455 */
f67539c2 456__rte_internal
11fdf7f2
TL
457void qbman_swp_dqrr_idx_consume(struct qbman_swp *s, uint8_t dqrr_index);
458
459/**
460 * qbman_get_dqrr_idx() - Get dqrr index from the given dqrr
461 * @dqrr: the given dqrr object.
462 *
463 * Return dqrr index.
464 */
f67539c2 465__rte_internal
11fdf7f2
TL
466uint8_t qbman_get_dqrr_idx(const struct qbman_result *dqrr);
467
468/**
469 * qbman_get_dqrr_from_idx() - Use index to get the dqrr entry from the
470 * given portal
471 * @s: the given portal.
472 * @idx: the dqrr index.
473 *
474 * Return dqrr entry object.
475 */
f67539c2 476__rte_internal
11fdf7f2
TL
477struct qbman_result *qbman_get_dqrr_from_idx(struct qbman_swp *s, uint8_t idx);
478
479/* ------------------------------------------------- */
480/* Polling user-provided storage for dequeue results */
481/* ------------------------------------------------- */
482
483/**
484 * qbman_result_has_new_result() - Check and get the dequeue response from the
485 * dq storage memory set in pull dequeue command
486 * @s: the software portal object.
487 * @dq: the dequeue result read from the memory.
488 *
489 * Only used for user-provided storage of dequeue results, not DQRR. For
490 * efficiency purposes, the driver will perform any required endianness
491 * conversion to ensure that the user's dequeue result storage is in host-endian
492 * format (whether or not that is the same as the little-endian format that
493 * hardware DMA'd to the user's storage). As such, once the user has called
494 * qbman_result_has_new_result() and been returned a valid dequeue result,
495 * they should not call it again on the same memory location (except of course
496 * if another dequeue command has been executed to produce a new result to that
497 * location).
498 *
499 * Return 1 for getting a valid dequeue result, or 0 for not getting a valid
500 * dequeue result.
501 */
f67539c2 502__rte_internal
11fdf7f2
TL
503int qbman_result_has_new_result(struct qbman_swp *s,
504 struct qbman_result *dq);
505
506/**
507 * qbman_check_command_complete() - Check if the previous issued dq commnd
508 * is completed and results are available in memory.
509 * @s: the software portal object.
510 * @dq: the dequeue result read from the memory.
511 *
512 * Return 1 for getting a valid dequeue result, or 0 for not getting a valid
513 * dequeue result.
514 */
f67539c2 515__rte_internal
11fdf7f2
TL
516int qbman_check_command_complete(struct qbman_result *dq);
517
f67539c2 518__rte_internal
11fdf7f2
TL
519int qbman_check_new_result(struct qbman_result *dq);
520
521/* -------------------------------------------------------- */
522/* Parsing dequeue entries (DQRR and user-provided storage) */
523/* -------------------------------------------------------- */
524
525/**
526 * qbman_result_is_DQ() - check the dequeue result is a dequeue response or not
527 * @dq: the dequeue result to be checked.
528 *
529 * DQRR entries may contain non-dequeue results, ie. notifications
530 */
531int qbman_result_is_DQ(const struct qbman_result *dq);
532
533/**
534 * qbman_result_is_SCN() - Check the dequeue result is notification or not
535 * @dq: the dequeue result to be checked.
536 *
537 * All the non-dequeue results (FQDAN/CDAN/CSCN/...) are "state change
538 * notifications" of one type or another. Some APIs apply to all of them, of the
539 * form qbman_result_SCN_***().
540 */
541static inline int qbman_result_is_SCN(const struct qbman_result *dq)
542{
543 return !qbman_result_is_DQ(dq);
544}
545
546/* Recognise different notification types, only required if the user allows for
547 * these to occur, and cares about them when they do.
548 */
549
550/**
551 * qbman_result_is_FQDAN() - Check for FQ Data Availability
552 * @dq: the qbman_result object.
553 *
554 * Return 1 if this is FQDAN.
555 */
556int qbman_result_is_FQDAN(const struct qbman_result *dq);
557
558/**
559 * qbman_result_is_CDAN() - Check for Channel Data Availability
560 * @dq: the qbman_result object to check.
561 *
562 * Return 1 if this is CDAN.
563 */
564int qbman_result_is_CDAN(const struct qbman_result *dq);
565
566/**
567 * qbman_result_is_CSCN() - Check for Congestion State Change
568 * @dq: the qbman_result object to check.
569 *
570 * Return 1 if this is CSCN.
571 */
572int qbman_result_is_CSCN(const struct qbman_result *dq);
573
574/**
575 * qbman_result_is_BPSCN() - Check for Buffer Pool State Change.
576 * @dq: the qbman_result object to check.
577 *
578 * Return 1 if this is BPSCN.
579 */
580int qbman_result_is_BPSCN(const struct qbman_result *dq);
581
582/**
583 * qbman_result_is_CGCU() - Check for Congestion Group Count Update.
584 * @dq: the qbman_result object to check.
585 *
586 * Return 1 if this is CGCU.
587 */
588int qbman_result_is_CGCU(const struct qbman_result *dq);
589
590/* Frame queue state change notifications; (FQDAN in theory counts too as it
591 * leaves a FQ parked, but it is primarily a data availability notification)
592 */
593
594/**
595 * qbman_result_is_FQRN() - Check for FQ Retirement Notification.
596 * @dq: the qbman_result object to check.
597 *
598 * Return 1 if this is FQRN.
599 */
600int qbman_result_is_FQRN(const struct qbman_result *dq);
601
602/**
603 * qbman_result_is_FQRNI() - Check for FQ Retirement Immediate
604 * @dq: the qbman_result object to check.
605 *
606 * Return 1 if this is FQRNI.
607 */
608int qbman_result_is_FQRNI(const struct qbman_result *dq);
609
610/**
611 * qbman_result_is_FQPN() - Check for FQ Park Notification
612 * @dq: the qbman_result object to check.
613 *
614 * Return 1 if this is FQPN.
615 */
616int qbman_result_is_FQPN(const struct qbman_result *dq);
617
618/* Parsing frame dequeue results (qbman_result_is_DQ() must be TRUE)
619 */
620/* FQ empty */
621#define QBMAN_DQ_STAT_FQEMPTY 0x80
622/* FQ held active */
623#define QBMAN_DQ_STAT_HELDACTIVE 0x40
624/* FQ force eligible */
625#define QBMAN_DQ_STAT_FORCEELIGIBLE 0x20
626/* Valid frame */
627#define QBMAN_DQ_STAT_VALIDFRAME 0x10
628/* FQ ODP enable */
629#define QBMAN_DQ_STAT_ODPVALID 0x04
630/* Volatile dequeue */
631#define QBMAN_DQ_STAT_VOLATILE 0x02
632/* volatile dequeue command is expired */
633#define QBMAN_DQ_STAT_EXPIRED 0x01
634
635#define QBMAN_EQCR_DCA_IDXMASK 0x0f
636#define QBMAN_ENQUEUE_FLAG_DCA (1ULL << 31)
637
638/**
639 * qbman_result_DQ_flags() - Get the STAT field of dequeue response
640 * @dq: the dequeue result.
641 *
642 * Return the state field.
643 */
f67539c2 644__rte_internal
11fdf7f2
TL
645uint8_t qbman_result_DQ_flags(const struct qbman_result *dq);
646
647/**
648 * qbman_result_DQ_is_pull() - Check whether the dq response is from a pull
649 * command.
650 * @dq: the dequeue result.
651 *
652 * Return 1 for volatile(pull) dequeue, 0 for static dequeue.
653 */
654static inline int qbman_result_DQ_is_pull(const struct qbman_result *dq)
655{
656 return (int)(qbman_result_DQ_flags(dq) & QBMAN_DQ_STAT_VOLATILE);
657}
658
659/**
660 * qbman_result_DQ_is_pull_complete() - Check whether the pull command is
661 * completed.
662 * @dq: the dequeue result.
663 *
664 * Return boolean.
665 */
666static inline int qbman_result_DQ_is_pull_complete(
667 const struct qbman_result *dq)
668{
669 return (int)(qbman_result_DQ_flags(dq) & QBMAN_DQ_STAT_EXPIRED);
670}
671
672/**
673 * qbman_result_DQ_seqnum() - Get the seqnum field in dequeue response
674 * seqnum is valid only if VALIDFRAME flag is TRUE
675 * @dq: the dequeue result.
676 *
677 * Return seqnum.
678 */
f67539c2 679__rte_internal
11fdf7f2
TL
680uint16_t qbman_result_DQ_seqnum(const struct qbman_result *dq);
681
682/**
683 * qbman_result_DQ_odpid() - Get the seqnum field in dequeue response
684 * odpid is valid only if ODPVAILD flag is TRUE.
685 * @dq: the dequeue result.
686 *
687 * Return odpid.
688 */
f67539c2 689__rte_internal
11fdf7f2
TL
690uint16_t qbman_result_DQ_odpid(const struct qbman_result *dq);
691
692/**
693 * qbman_result_DQ_fqid() - Get the fqid in dequeue response
694 * @dq: the dequeue result.
695 *
696 * Return fqid.
697 */
698uint32_t qbman_result_DQ_fqid(const struct qbman_result *dq);
699
700/**
701 * qbman_result_DQ_byte_count() - Get the byte count in dequeue response
702 * @dq: the dequeue result.
703 *
704 * Return the byte count remaining in the FQ.
705 */
706uint32_t qbman_result_DQ_byte_count(const struct qbman_result *dq);
707
708/**
709 * qbman_result_DQ_frame_count - Get the frame count in dequeue response
710 * @dq: the dequeue result.
711 *
712 * Return the frame count remaining in the FQ.
713 */
714uint32_t qbman_result_DQ_frame_count(const struct qbman_result *dq);
715
716/**
717 * qbman_result_DQ_fqd_ctx() - Get the frame queue context in dequeue response
718 * @dq: the dequeue result.
719 *
720 * Return the frame queue context.
721 */
f67539c2 722__rte_internal
11fdf7f2
TL
723uint64_t qbman_result_DQ_fqd_ctx(const struct qbman_result *dq);
724
725/**
726 * qbman_result_DQ_fd() - Get the frame descriptor in dequeue response
727 * @dq: the dequeue result.
728 *
729 * Return the frame descriptor.
730 */
f67539c2 731__rte_internal
11fdf7f2
TL
732const struct qbman_fd *qbman_result_DQ_fd(const struct qbman_result *dq);
733
734/* State-change notifications (FQDAN/CDAN/CSCN/...). */
735
736/**
737 * qbman_result_SCN_state() - Get the state field in State-change notification
738 * @scn: the state change notification.
739 *
740 * Return the state in the notifiation.
741 */
f67539c2 742__rte_internal
11fdf7f2
TL
743uint8_t qbman_result_SCN_state(const struct qbman_result *scn);
744
745/**
746 * qbman_result_SCN_rid() - Get the resource id from the notification
747 * @scn: the state change notification.
748 *
749 * Return the resource id.
750 */
751uint32_t qbman_result_SCN_rid(const struct qbman_result *scn);
752
753/**
754 * qbman_result_SCN_ctx() - get the context from the notification
755 * @scn: the state change notification.
756 *
757 * Return the context.
758 */
759uint64_t qbman_result_SCN_ctx(const struct qbman_result *scn);
760
761/* Type-specific "resource IDs". Mainly for illustration purposes, though it
762 * also gives the appropriate type widths.
763 */
764/* Get the FQID from the FQDAN */
765#define qbman_result_FQDAN_fqid(dq) qbman_result_SCN_rid(dq)
766/* Get the FQID from the FQRN */
767#define qbman_result_FQRN_fqid(dq) qbman_result_SCN_rid(dq)
768/* Get the FQID from the FQRNI */
769#define qbman_result_FQRNI_fqid(dq) qbman_result_SCN_rid(dq)
770/* Get the FQID from the FQPN */
771#define qbman_result_FQPN_fqid(dq) qbman_result_SCN_rid(dq)
772/* Get the channel ID from the CDAN */
773#define qbman_result_CDAN_cid(dq) ((uint16_t)qbman_result_SCN_rid(dq))
774/* Get the CGID from the CSCN */
775#define qbman_result_CSCN_cgid(dq) ((uint16_t)qbman_result_SCN_rid(dq))
776
777/**
778 * qbman_result_bpscn_bpid() - Get the bpid from BPSCN
779 * @scn: the state change notification.
780 *
781 * Return the buffer pool id.
782 */
783uint16_t qbman_result_bpscn_bpid(const struct qbman_result *scn);
784
785/**
786 * qbman_result_bpscn_has_free_bufs() - Check whether there are free
787 * buffers in the pool from BPSCN.
788 * @scn: the state change notification.
789 *
790 * Return the number of free buffers.
791 */
792int qbman_result_bpscn_has_free_bufs(const struct qbman_result *scn);
793
794/**
795 * qbman_result_bpscn_is_depleted() - Check BPSCN to see whether the
796 * buffer pool is depleted.
797 * @scn: the state change notification.
798 *
799 * Return the status of buffer pool depletion.
800 */
801int qbman_result_bpscn_is_depleted(const struct qbman_result *scn);
802
803/**
804 * qbman_result_bpscn_is_surplus() - Check BPSCN to see whether the buffer
805 * pool is surplus or not.
806 * @scn: the state change notification.
807 *
808 * Return the status of buffer pool surplus.
809 */
810int qbman_result_bpscn_is_surplus(const struct qbman_result *scn);
811
812/**
813 * qbman_result_bpscn_ctx() - Get the BPSCN CTX from BPSCN message
814 * @scn: the state change notification.
815 *
816 * Return the BPSCN context.
817 */
818uint64_t qbman_result_bpscn_ctx(const struct qbman_result *scn);
819
820/* Parsing CGCU */
821/**
822 * qbman_result_cgcu_cgid() - Check CGCU resouce id, i.e. cgid
823 * @scn: the state change notification.
824 *
825 * Return the CGCU resource id.
826 */
827uint16_t qbman_result_cgcu_cgid(const struct qbman_result *scn);
828
829/**
830 * qbman_result_cgcu_icnt() - Get the I_CNT from CGCU
831 * @scn: the state change notification.
832 *
833 * Return instantaneous count in the CGCU notification.
834 */
835uint64_t qbman_result_cgcu_icnt(const struct qbman_result *scn);
836
837 /************/
838 /* Enqueues */
839 /************/
11fdf7f2
TL
840/* struct qbman_eq_desc - structure of enqueue descriptor */
841struct qbman_eq_desc {
842 union {
9f95a23c 843 uint32_t dont_manipulate_directly[8];
11fdf7f2
TL
844 struct eq {
845 uint8_t verb;
846 uint8_t dca;
847 uint16_t seqnum;
848 uint16_t orpid;
849 uint16_t reserved1;
850 uint32_t tgtid;
851 uint32_t tag;
852 uint16_t qdbin;
853 uint8_t qpri;
854 uint8_t reserved[3];
855 uint8_t wae;
856 uint8_t rspid;
857 uint64_t rsp_addr;
858 } eq;
859 };
860};
861
862/**
863 * struct qbman_eq_response - structure of enqueue response
9f95a23c 864 * @dont_manipulate_directly: the 16 32bit data to represent the whole
11fdf7f2
TL
865 * enqueue response.
866 */
867struct qbman_eq_response {
9f95a23c 868 uint32_t dont_manipulate_directly[16];
11fdf7f2
TL
869};
870
871/**
872 * qbman_eq_desc_clear() - Clear the contents of a descriptor to
873 * default/starting state.
874 * @d: the given enqueue descriptor.
875 */
f67539c2 876__rte_internal
11fdf7f2
TL
877void qbman_eq_desc_clear(struct qbman_eq_desc *d);
878
879/* Exactly one of the following descriptor "actions" should be set. (Calling
880 * any one of these will replace the effect of any prior call to one of these.)
881 * - enqueue without order-restoration
882 * - enqueue with order-restoration
883 * - fill a hole in the order-restoration sequence, without any enqueue
884 * - advance NESN (Next Expected Sequence Number), without any enqueue
885 * 'respond_success' indicates whether an enqueue response should be DMA'd
886 * after success (otherwise a response is DMA'd only after failure).
887 * 'incomplete' indicates that other fragments of the same 'seqnum' are yet to
888 * be enqueued.
889 */
890
891/**
892 * qbman_eq_desc_set_no_orp() - Set enqueue descriptor without orp
893 * @d: the enqueue descriptor.
894 * @response_success: 1 = enqueue with response always; 0 = enqueue with
895 * rejections returned on a FQ.
896 */
f67539c2 897__rte_internal
11fdf7f2
TL
898void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success);
899/**
900 * qbman_eq_desc_set_orp() - Set order-resotration in the enqueue descriptor
901 * @d: the enqueue descriptor.
902 * @response_success: 1 = enqueue with response always; 0 = enqueue with
903 * rejections returned on a FQ.
904 * @opr_id: the order point record id.
905 * @seqnum: the order restoration sequence number.
906 * @incomplete: indiates whether this is the last fragments using the same
907 * sequeue number.
908 */
f67539c2 909__rte_internal
11fdf7f2
TL
910void qbman_eq_desc_set_orp(struct qbman_eq_desc *d, int respond_success,
911 uint16_t opr_id, uint16_t seqnum, int incomplete);
912
913/**
914 * qbman_eq_desc_set_orp_hole() - fill a hole in the order-restoration sequence
915 * without any enqueue
916 * @d: the enqueue descriptor.
917 * @opr_id: the order point record id.
918 * @seqnum: the order restoration sequence number.
919 */
920void qbman_eq_desc_set_orp_hole(struct qbman_eq_desc *d, uint16_t opr_id,
921 uint16_t seqnum);
922
923/**
924 * qbman_eq_desc_set_orp_nesn() - advance NESN (Next Expected Sequence Number)
925 * without any enqueue
926 * @d: the enqueue descriptor.
927 * @opr_id: the order point record id.
928 * @seqnum: the order restoration sequence number.
929 */
930void qbman_eq_desc_set_orp_nesn(struct qbman_eq_desc *d, uint16_t opr_id,
931 uint16_t seqnum);
932/**
933 * qbman_eq_desc_set_response() - Set the enqueue response info.
934 * @d: the enqueue descriptor
935 * @storage_phys: the physical address of the enqueue response in memory.
936 * @stash: indicate that the write allocation enabled or not.
937 *
938 * In the case where an enqueue response is DMA'd, this determines where that
939 * response should go. (The physical/DMA address is given for hardware's
940 * benefit, but software should interpret it as a "struct qbman_eq_response"
941 * data structure.) 'stash' controls whether or not the write to main-memory
942 * expresses a cache-warming attribute.
943 */
f67539c2 944__rte_internal
11fdf7f2
TL
945void qbman_eq_desc_set_response(struct qbman_eq_desc *d,
946 uint64_t storage_phys,
947 int stash);
948
949/**
950 * qbman_eq_desc_set_token() - Set token for the enqueue command
951 * @d: the enqueue descriptor
952 * @token: the token to be set.
953 *
954 * token is the value that shows up in an enqueue response that can be used to
955 * detect when the results have been published. The easiest technique is to zero
956 * result "storage" before issuing an enqueue, and use any non-zero 'token'
957 * value.
958 */
f67539c2 959__rte_internal
11fdf7f2
TL
960void qbman_eq_desc_set_token(struct qbman_eq_desc *d, uint8_t token);
961
962/**
963 * Exactly one of the following descriptor "targets" should be set. (Calling any
964 * one of these will replace the effect of any prior call to one of these.)
965 * - enqueue to a frame queue
966 * - enqueue to a queuing destination
967 * Note, that none of these will have any affect if the "action" type has been
968 * set to "orp_hole" or "orp_nesn".
969 */
970/**
971 * qbman_eq_desc_set_fq() - Set Frame Queue id for the enqueue command
972 * @d: the enqueue descriptor
973 * @fqid: the id of the frame queue to be enqueued.
974 */
f67539c2 975__rte_internal
11fdf7f2
TL
976void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, uint32_t fqid);
977
978/**
979 * qbman_eq_desc_set_qd() - Set Queuing Destination for the enqueue command.
980 * @d: the enqueue descriptor
981 * @qdid: the id of the queuing destination to be enqueued.
982 * @qd_bin: the queuing destination bin
983 * @qd_prio: the queuing destination priority.
984 */
f67539c2 985__rte_internal
11fdf7f2
TL
986void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, uint32_t qdid,
987 uint16_t qd_bin, uint8_t qd_prio);
988
989/**
990 * qbman_eq_desc_set_eqdi() - enable/disable EQDI interrupt
991 * @d: the enqueue descriptor
992 * @enable: boolean to enable/disable EQDI
993 *
994 * Determines whether or not the portal's EQDI interrupt source should be
995 * asserted after the enqueue command is completed.
996 */
997void qbman_eq_desc_set_eqdi(struct qbman_eq_desc *d, int enable);
998
999/**
1000 * qbman_eq_desc_set_dca() - Set DCA mode in the enqueue command.
1001 * @d: the enqueue descriptor.
1002 * @enable: enabled/disable DCA mode.
1003 * @dqrr_idx: DCAP_CI, the DCAP consumer index.
1004 * @park: determine the whether park the FQ or not
1005 *
1006 * Determines whether or not a portal DQRR entry should be consumed once the
1007 * enqueue command is completed. (And if so, and the DQRR entry corresponds to a
1008 * held-active (order-preserving) FQ, whether the FQ should be parked instead of
1009 * being rescheduled.)
1010 */
f67539c2 1011__rte_internal
11fdf7f2
TL
1012void qbman_eq_desc_set_dca(struct qbman_eq_desc *d, int enable,
1013 uint8_t dqrr_idx, int park);
1014
9f95a23c
TL
1015/**
1016 * qbman_result_eqresp_fd() - Get fd from enqueue response.
1017 * @eqresp: enqueue response.
1018 *
1019 * Return the fd pointer.
1020 */
f67539c2 1021__rte_internal
9f95a23c
TL
1022struct qbman_fd *qbman_result_eqresp_fd(struct qbman_result *eqresp);
1023
1024/**
1025 * qbman_result_eqresp_set_rspid() - Set the response id in enqueue response.
1026 * @eqresp: enqueue response.
1027 * @val: values to set into the response id.
1028 *
1029 * This value is set into the response id before the enqueue command, which,
1030 * get overwritten by qbman once the enqueue command is complete.
1031 */
f67539c2 1032__rte_internal
9f95a23c
TL
1033void qbman_result_eqresp_set_rspid(struct qbman_result *eqresp, uint8_t val);
1034
1035/**
1036 * qbman_result_eqresp_rspid() - Get the response id.
1037 * @eqresp: enqueue response.
1038 *
1039 * Return the response id.
1040 *
1041 * At the time of enqueue user provides the response id. Response id gets
1042 * copied into the enqueue response to determine if the command has been
1043 * completed, and response has been updated.
1044 */
f67539c2 1045__rte_internal
9f95a23c
TL
1046uint8_t qbman_result_eqresp_rspid(struct qbman_result *eqresp);
1047
1048/**
1049 * qbman_result_eqresp_rc() - determines if enqueue command is sucessful.
1050 * @eqresp: enqueue response.
1051 *
1052 * Return 0 when command is sucessful.
1053 */
f67539c2 1054__rte_internal
9f95a23c
TL
1055uint8_t qbman_result_eqresp_rc(struct qbman_result *eqresp);
1056
11fdf7f2
TL
1057/**
1058 * qbman_swp_enqueue() - Issue an enqueue command.
1059 * @s: the software portal used for enqueue.
1060 * @d: the enqueue descriptor.
1061 * @fd: the frame descriptor to be enqueued.
1062 *
1063 * Please note that 'fd' should only be NULL if the "action" of the
1064 * descriptor is "orp_hole" or "orp_nesn".
1065 *
1066 * Return 0 for a successful enqueue, -EBUSY if the EQCR is not ready.
1067 */
1068int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,
1069 const struct qbman_fd *fd);
1070/**
1071 * qbman_swp_enqueue_multiple() - Enqueue multiple frames with same
1072 eq descriptor
1073 * @s: the software portal used for enqueue.
1074 * @d: the enqueue descriptor.
1075 * @fd: the frame descriptor to be enqueued.
9f95a23c 1076 * @flags: bit-mask of QBMAN_ENQUEUE_FLAG_*** options
11fdf7f2
TL
1077 * @num_frames: the number of the frames to be enqueued.
1078 *
1079 * Return the number of enqueued frames, -EBUSY if the EQCR is not ready.
1080 */
f67539c2 1081__rte_internal
11fdf7f2
TL
1082int qbman_swp_enqueue_multiple(struct qbman_swp *s,
1083 const struct qbman_eq_desc *d,
1084 const struct qbman_fd *fd,
1085 uint32_t *flags,
1086 int num_frames);
9f95a23c
TL
1087
1088/**
1089 * qbman_swp_enqueue_multiple_fd() - Enqueue multiple frames with same
1090 eq descriptor
1091 * @s: the software portal used for enqueue.
1092 * @d: the enqueue descriptor.
1093 * @fd: the frame descriptor to be enqueued.
1094 * @flags: bit-mask of QBMAN_ENQUEUE_FLAG_*** options
1095 * @num_frames: the number of the frames to be enqueued.
1096 *
1097 * Return the number of enqueued frames, -EBUSY if the EQCR is not ready.
1098 */
f67539c2 1099__rte_internal
9f95a23c
TL
1100int qbman_swp_enqueue_multiple_fd(struct qbman_swp *s,
1101 const struct qbman_eq_desc *d,
1102 struct qbman_fd **fd,
1103 uint32_t *flags,
1104 int num_frames);
1105
11fdf7f2
TL
1106/**
1107 * qbman_swp_enqueue_multiple_desc() - Enqueue multiple frames with
1108 * individual eq descriptor.
1109 * @s: the software portal used for enqueue.
1110 * @d: the enqueue descriptor.
1111 * @fd: the frame descriptor to be enqueued.
11fdf7f2
TL
1112 * @num_frames: the number of the frames to be enqueued.
1113 *
1114 * Return the number of enqueued frames, -EBUSY if the EQCR is not ready.
1115 */
f67539c2 1116__rte_internal
11fdf7f2
TL
1117int qbman_swp_enqueue_multiple_desc(struct qbman_swp *s,
1118 const struct qbman_eq_desc *d,
1119 const struct qbman_fd *fd,
1120 int num_frames);
1121
1122/* TODO:
1123 * qbman_swp_enqueue_thresh() - Set threshold for EQRI interrupt.
1124 * @s: the software portal.
1125 * @thresh: the threshold to trigger the EQRI interrupt.
1126 *
1127 * An EQRI interrupt can be generated when the fill-level of EQCR falls below
1128 * the 'thresh' value set here. Setting thresh==0 (the default) disables.
1129 */
1130int qbman_swp_enqueue_thresh(struct qbman_swp *s, unsigned int thresh);
1131
1132 /*******************/
1133 /* Buffer releases */
1134 /*******************/
1135/**
1136 * struct qbman_release_desc - The structure for buffer release descriptor
9f95a23c 1137 * @dont_manipulate_directly: the 32bit data to represent the whole
11fdf7f2
TL
1138 * possible settings of qbman release descriptor.
1139 */
1140struct qbman_release_desc {
1141 union {
9f95a23c 1142 uint32_t dont_manipulate_directly[16];
11fdf7f2
TL
1143 struct br {
1144 uint8_t verb;
1145 uint8_t reserved;
1146 uint16_t bpid;
1147 uint32_t reserved2;
1148 uint64_t buf[7];
1149 } br;
1150 };
1151};
1152
1153/**
1154 * qbman_release_desc_clear() - Clear the contents of a descriptor to
1155 * default/starting state.
1156 * @d: the qbman release descriptor.
1157 */
f67539c2 1158__rte_internal
11fdf7f2
TL
1159void qbman_release_desc_clear(struct qbman_release_desc *d);
1160
1161/**
1162 * qbman_release_desc_set_bpid() - Set the ID of the buffer pool to release to
1163 * @d: the qbman release descriptor.
1164 */
f67539c2 1165__rte_internal
11fdf7f2
TL
1166void qbman_release_desc_set_bpid(struct qbman_release_desc *d, uint16_t bpid);
1167
1168/**
1169 * qbman_release_desc_set_rcdi() - Determines whether or not the portal's RCDI
1170 * interrupt source should be asserted after the release command is completed.
1171 * @d: the qbman release descriptor.
1172 */
1173void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable);
1174
1175/**
1176 * qbman_swp_release() - Issue a buffer release command.
1177 * @s: the software portal object.
1178 * @d: the release descriptor.
1179 * @buffers: a pointer pointing to the buffer address to be released.
1180 * @num_buffers: number of buffers to be released, must be less than 8.
1181 *
1182 * Return 0 for success, -EBUSY if the release command ring is not ready.
1183 */
f67539c2 1184__rte_internal
11fdf7f2
TL
1185int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d,
1186 const uint64_t *buffers, unsigned int num_buffers);
1187
1188/* TODO:
1189 * qbman_swp_release_thresh() - Set threshold for RCRI interrupt
1190 * @s: the software portal.
1191 * @thresh: the threshold.
1192 * An RCRI interrupt can be generated when the fill-level of RCR falls below
1193 * the 'thresh' value set here. Setting thresh==0 (the default) disables.
1194 */
1195int qbman_swp_release_thresh(struct qbman_swp *s, unsigned int thresh);
1196
1197 /*******************/
1198 /* Buffer acquires */
1199 /*******************/
1200/**
1201 * qbman_swp_acquire() - Issue a buffer acquire command.
1202 * @s: the software portal object.
1203 * @bpid: the buffer pool index.
1204 * @buffers: a pointer pointing to the acquired buffer address|es.
1205 * @num_buffers: number of buffers to be acquired, must be less than 8.
1206 *
1207 * Return 0 for success, or negative error code if the acquire command
1208 * fails.
1209 */
f67539c2 1210__rte_internal
11fdf7f2
TL
1211int qbman_swp_acquire(struct qbman_swp *s, uint16_t bpid, uint64_t *buffers,
1212 unsigned int num_buffers);
1213
1214 /*****************/
1215 /* FQ management */
1216 /*****************/
1217/**
1218 * qbman_swp_fq_schedule() - Move the fq to the scheduled state.
1219 * @s: the software portal object.
1220 * @fqid: the index of frame queue to be scheduled.
1221 *
1222 * There are a couple of different ways that a FQ can end up parked state,
1223 * This schedules it.
1224 *
1225 * Return 0 for success, or negative error code for failure.
1226 */
1227int qbman_swp_fq_schedule(struct qbman_swp *s, uint32_t fqid);
1228
1229/**
1230 * qbman_swp_fq_force() - Force the FQ to fully scheduled state.
1231 * @s: the software portal object.
1232 * @fqid: the index of frame queue to be forced.
1233 *
1234 * Force eligible will force a tentatively-scheduled FQ to be fully-scheduled
1235 * and thus be available for selection by any channel-dequeuing behaviour (push
1236 * or pull). If the FQ is subsequently "dequeued" from the channel and is still
1237 * empty at the time this happens, the resulting dq_entry will have no FD.
1238 * (qbman_result_DQ_fd() will return NULL.)
1239 *
1240 * Return 0 for success, or negative error code for failure.
1241 */
1242int qbman_swp_fq_force(struct qbman_swp *s, uint32_t fqid);
1243
1244/**
1245 * These functions change the FQ flow-control stuff between XON/XOFF. (The
1246 * default is XON.) This setting doesn't affect enqueues to the FQ, just
1247 * dequeues. XOFF FQs will remain in the tenatively-scheduled state, even when
1248 * non-empty, meaning they won't be selected for scheduled dequeuing. If a FQ is
1249 * changed to XOFF after it had already become truly-scheduled to a channel, and
1250 * a pull dequeue of that channel occurs that selects that FQ for dequeuing,
1251 * then the resulting dq_entry will have no FD. (qbman_result_DQ_fd() will
1252 * return NULL.)
1253 */
1254/**
1255 * qbman_swp_fq_xon() - XON the frame queue.
1256 * @s: the software portal object.
1257 * @fqid: the index of frame queue.
1258 *
1259 * Return 0 for success, or negative error code for failure.
1260 */
1261int qbman_swp_fq_xon(struct qbman_swp *s, uint32_t fqid);
1262/**
1263 * qbman_swp_fq_xoff() - XOFF the frame queue.
1264 * @s: the software portal object.
1265 * @fqid: the index of frame queue.
1266 *
1267 * Return 0 for success, or negative error code for failure.
1268 */
1269int qbman_swp_fq_xoff(struct qbman_swp *s, uint32_t fqid);
1270
1271 /**********************/
1272 /* Channel management */
1273 /**********************/
1274
1275/**
1276 * If the user has been allocated a channel object that is going to generate
1277 * CDANs to another channel, then these functions will be necessary.
1278 * CDAN-enabled channels only generate a single CDAN notification, after which
1279 * it they need to be reenabled before they'll generate another. (The idea is
1280 * that pull dequeuing will occur in reaction to the CDAN, followed by a
1281 * reenable step.) Each function generates a distinct command to hardware, so a
1282 * combination function is provided if the user wishes to modify the "context"
1283 * (which shows up in each CDAN message) each time they reenable, as a single
1284 * command to hardware.
1285 */
1286
1287/**
1288 * qbman_swp_CDAN_set_context() - Set CDAN context
1289 * @s: the software portal object.
1290 * @channelid: the channel index.
1291 * @ctx: the context to be set in CDAN.
1292 *
1293 * Return 0 for success, or negative error code for failure.
1294 */
1295int qbman_swp_CDAN_set_context(struct qbman_swp *s, uint16_t channelid,
1296 uint64_t ctx);
1297
1298/**
1299 * qbman_swp_CDAN_enable() - Enable CDAN for the channel.
1300 * @s: the software portal object.
1301 * @channelid: the index of the channel to generate CDAN.
1302 *
1303 * Return 0 for success, or negative error code for failure.
1304 */
1305int qbman_swp_CDAN_enable(struct qbman_swp *s, uint16_t channelid);
1306
1307/**
1308 * qbman_swp_CDAN_disable() - disable CDAN for the channel.
1309 * @s: the software portal object.
1310 * @channelid: the index of the channel to generate CDAN.
1311 *
1312 * Return 0 for success, or negative error code for failure.
1313 */
1314int qbman_swp_CDAN_disable(struct qbman_swp *s, uint16_t channelid);
1315
1316/**
1317 * qbman_swp_CDAN_set_context_enable() - Set CDAN contest and enable CDAN
1318 * @s: the software portal object.
1319 * @channelid: the index of the channel to generate CDAN.
1320 * @ctx: the context set in CDAN.
1321 *
1322 * Return 0 for success, or negative error code for failure.
1323 */
1324int qbman_swp_CDAN_set_context_enable(struct qbman_swp *s, uint16_t channelid,
1325 uint64_t ctx);
1326#endif /* !_FSL_QBMAN_PORTAL_H */