]> git.proxmox.com Git - ceph.git/blame - ceph/src/spdk/dpdk/drivers/bus/fslmc/qbman/include/fsl_qbman_portal.h
bump version to 15.2.11-pve1
[ceph.git] / ceph / src / spdk / dpdk / drivers / bus / fslmc / qbman / include / fsl_qbman_portal.h
CommitLineData
11fdf7f2
TL
1/* SPDX-License-Identifier: BSD-3-Clause
2 *
3 * Copyright (C) 2014 Freescale Semiconductor, Inc.
4 *
5 */
6#ifndef _FSL_QBMAN_PORTAL_H
7#define _FSL_QBMAN_PORTAL_H
8
9#include <fsl_qbman_base.h>
10
11/**
12 * DOC - QBMan portal APIs to implement the following functions:
13 * - Initialize and destroy Software portal object.
14 * - Read and write Software portal interrupt registers.
15 * - Enqueue, including setting the enqueue descriptor, and issuing enqueue
16 * command etc.
17 * - Dequeue, including setting the dequeue descriptor, issuing dequeue command,
18 * parsing the dequeue response in DQRR and memeory, parsing the state change
19 * notifications etc.
20 * - Release, including setting the release descriptor, and issuing the buffer
21 * release command.
22 * - Acquire, acquire the buffer from the given buffer pool.
23 * - FQ management.
24 * - Channel management, enable/disable CDAN with or without context.
25 */
26
27/**
28 * qbman_swp_init() - Create a functional object representing the given
29 * QBMan portal descriptor.
30 * @d: the given qbman swp descriptor
31 *
32 * Return qbman_swp portal object for success, NULL if the object cannot
33 * be created.
34 */
35struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d);
36
37/**
38 * qbman_swp_finish() - Create and destroy a functional object representing
39 * the given QBMan portal descriptor.
40 * @p: the qbman_swp object to be destroyed.
41 *
42 */
43void qbman_swp_finish(struct qbman_swp *p);
44
9f95a23c
TL
45/**
46 * qbman_swp_invalidate() - Invalidate the cache enabled area of the QBMan
47 * portal. This is required to be called if a portal moved to another core
48 * because the QBMan portal area is non coherent
49 * @p: the qbman_swp object to be invalidated
50 *
51 */
52void qbman_swp_invalidate(struct qbman_swp *p);
53
11fdf7f2
TL
54/**
55 * qbman_swp_get_desc() - Get the descriptor of the given portal object.
56 * @p: the given portal object.
57 *
58 * Return the descriptor for this portal.
59 */
60const struct qbman_swp_desc *qbman_swp_get_desc(struct qbman_swp *p);
61
62 /**************/
63 /* Interrupts */
64 /**************/
65
66/* EQCR ring interrupt */
67#define QBMAN_SWP_INTERRUPT_EQRI ((uint32_t)0x00000001)
68/* Enqueue command dispatched interrupt */
69#define QBMAN_SWP_INTERRUPT_EQDI ((uint32_t)0x00000002)
70/* DQRR non-empty interrupt */
71#define QBMAN_SWP_INTERRUPT_DQRI ((uint32_t)0x00000004)
72/* RCR ring interrupt */
73#define QBMAN_SWP_INTERRUPT_RCRI ((uint32_t)0x00000008)
74/* Release command dispatched interrupt */
75#define QBMAN_SWP_INTERRUPT_RCDI ((uint32_t)0x00000010)
76/* Volatile dequeue command interrupt */
77#define QBMAN_SWP_INTERRUPT_VDCI ((uint32_t)0x00000020)
78
79/**
80 * qbman_swp_interrupt_get_vanish() - Get the data in software portal
81 * interrupt status disable register.
82 * @p: the given software portal object.
83 *
84 * Return the settings in SWP_ISDR register.
85 */
86uint32_t qbman_swp_interrupt_get_vanish(struct qbman_swp *p);
87
88/**
89 * qbman_swp_interrupt_set_vanish() - Set the data in software portal
90 * interrupt status disable register.
91 * @p: the given software portal object.
92 * @mask: The value to set in SWP_IDSR register.
93 */
94void qbman_swp_interrupt_set_vanish(struct qbman_swp *p, uint32_t mask);
95
96/**
97 * qbman_swp_interrupt_read_status() - Get the data in software portal
98 * interrupt status register.
99 * @p: the given software portal object.
100 *
101 * Return the settings in SWP_ISR register.
102 */
103uint32_t qbman_swp_interrupt_read_status(struct qbman_swp *p);
104
105/**
106 * qbman_swp_interrupt_clear_status() - Set the data in software portal
107 * interrupt status register.
108 * @p: the given software portal object.
109 * @mask: The value to set in SWP_ISR register.
110 */
111void qbman_swp_interrupt_clear_status(struct qbman_swp *p, uint32_t mask);
112
113/**
114 * qbman_swp_dqrr_thrshld_read_status() - Get the data in software portal
115 * DQRR interrupt threshold register.
116 * @p: the given software portal object.
117 */
118uint32_t qbman_swp_dqrr_thrshld_read_status(struct qbman_swp *p);
119
120/**
121 * qbman_swp_dqrr_thrshld_write() - Set the data in software portal
122 * DQRR interrupt threshold register.
123 * @p: the given software portal object.
124 * @mask: The value to set in SWP_DQRR_ITR register.
125 */
126void qbman_swp_dqrr_thrshld_write(struct qbman_swp *p, uint32_t mask);
127
128/**
129 * qbman_swp_intr_timeout_read_status() - Get the data in software portal
130 * Interrupt Time-Out period register.
131 * @p: the given software portal object.
132 */
133uint32_t qbman_swp_intr_timeout_read_status(struct qbman_swp *p);
134
135/**
136 * qbman_swp_intr_timeout_write() - Set the data in software portal
137 * Interrupt Time-Out period register.
138 * @p: the given software portal object.
139 * @mask: The value to set in SWP_ITPR register.
140 */
141void qbman_swp_intr_timeout_write(struct qbman_swp *p, uint32_t mask);
142
143/**
144 * qbman_swp_interrupt_get_trigger() - Get the data in software portal
145 * interrupt enable register.
146 * @p: the given software portal object.
147 *
148 * Return the settings in SWP_IER register.
149 */
150uint32_t qbman_swp_interrupt_get_trigger(struct qbman_swp *p);
151
152/**
153 * qbman_swp_interrupt_set_trigger() - Set the data in software portal
154 * interrupt enable register.
155 * @p: the given software portal object.
156 * @mask: The value to set in SWP_IER register.
157 */
158void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, uint32_t mask);
159
160/**
161 * qbman_swp_interrupt_get_inhibit() - Get the data in software portal
162 * interrupt inhibit register.
163 * @p: the given software portal object.
164 *
165 * Return the settings in SWP_IIR register.
166 */
167int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p);
168
169/**
170 * qbman_swp_interrupt_set_inhibit() - Set the data in software portal
171 * interrupt inhibit register.
172 * @p: the given software portal object.
173 * @mask: The value to set in SWP_IIR register.
174 */
175void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit);
176
177 /************/
178 /* Dequeues */
179 /************/
180
181/**
182 * struct qbman_result - structure for qbman dequeue response and/or
183 * notification.
9f95a23c 184 * @dont_manipulate_directly: the 16 32bit data to represent the whole
11fdf7f2
TL
185 * possible qbman dequeue result.
186 */
187struct qbman_result {
188 union {
189 struct common {
190 uint8_t verb;
191 uint8_t reserved[63];
192 } common;
193 struct dq {
194 uint8_t verb;
195 uint8_t stat;
196 __le16 seqnum;
197 __le16 oprid;
198 uint8_t reserved;
199 uint8_t tok;
200 __le32 fqid;
201 uint32_t reserved2;
202 __le32 fq_byte_cnt;
203 __le32 fq_frm_cnt;
204 __le64 fqd_ctx;
205 uint8_t fd[32];
206 } dq;
207 struct scn {
208 uint8_t verb;
209 uint8_t stat;
210 uint8_t state;
211 uint8_t reserved;
212 __le32 rid_tok;
213 __le64 ctx;
214 } scn;
9f95a23c
TL
215 struct eq_resp {
216 uint8_t verb;
217 uint8_t dca;
218 __le16 seqnum;
219 __le16 oprid;
220 uint8_t reserved;
221 uint8_t rc;
222 __le32 tgtid;
223 __le32 tag;
224 uint16_t qdbin;
225 uint8_t qpri;
226 uint8_t reserved1;
227 __le32 fqid:24;
228 __le32 rspid:8;
229 __le64 rsp_addr;
230 uint8_t fd[32];
231 } eq_resp;
11fdf7f2
TL
232 };
233};
234
235/* TODO:
236 *A DQRI interrupt can be generated when there are dequeue results on the
237 * portal's DQRR (this mechanism does not deal with "pull" dequeues to
238 * user-supplied 'storage' addresses). There are two parameters to this
239 * interrupt source, one is a threshold and the other is a timeout. The
240 * interrupt will fire if either the fill-level of the ring exceeds 'thresh', or
241 * if the ring has been non-empty for been longer than 'timeout' nanoseconds.
242 * For timeout, an approximation to the desired nanosecond-granularity value is
243 * made, so there are get and set APIs to allow the user to see what actual
244 * timeout is set (compared to the timeout that was requested).
245 */
246int qbman_swp_dequeue_thresh(struct qbman_swp *s, unsigned int thresh);
247int qbman_swp_dequeue_set_timeout(struct qbman_swp *s, unsigned int timeout);
248int qbman_swp_dequeue_get_timeout(struct qbman_swp *s, unsigned int *timeout);
249
250/* ------------------- */
251/* Push-mode dequeuing */
252/* ------------------- */
253
254/* The user of a portal can enable and disable push-mode dequeuing of up to 16
255 * channels independently. It does not specify this toggling by channel IDs, but
256 * rather by specifying the index (from 0 to 15) that has been mapped to the
257 * desired channel.
258 */
259
260/**
261 * qbman_swp_push_get() - Get the push dequeue setup.
262 * @s: the software portal object.
263 * @channel_idx: the channel index to query.
264 * @enabled: returned boolean to show whether the push dequeue is enabled for
265 * the given channel.
266 */
267void qbman_swp_push_get(struct qbman_swp *s, uint8_t channel_idx, int *enabled);
268
269/**
270 * qbman_swp_push_set() - Enable or disable push dequeue.
271 * @s: the software portal object.
272 * @channel_idx: the channel index..
273 * @enable: enable or disable push dequeue.
274 *
275 * The user of a portal can enable and disable push-mode dequeuing of up to 16
276 * channels independently. It does not specify this toggling by channel IDs, but
277 * rather by specifying the index (from 0 to 15) that has been mapped to the
278 * desired channel.
279 */
280void qbman_swp_push_set(struct qbman_swp *s, uint8_t channel_idx, int enable);
281
282/* ------------------- */
283/* Pull-mode dequeuing */
284/* ------------------- */
285
286/**
287 * struct qbman_pull_desc - the structure for pull dequeue descriptor
288 */
289struct qbman_pull_desc {
290 union {
9f95a23c 291 uint32_t dont_manipulate_directly[16];
11fdf7f2
TL
292 struct pull {
293 uint8_t verb;
294 uint8_t numf;
295 uint8_t tok;
296 uint8_t reserved;
297 uint32_t dq_src;
298 uint64_t rsp_addr;
299 uint64_t rsp_addr_virt;
300 uint8_t padding[40];
301 } pull;
302 };
303};
304
305enum qbman_pull_type_e {
306 /* dequeue with priority precedence, respect intra-class scheduling */
307 qbman_pull_type_prio = 1,
308 /* dequeue with active FQ precedence, respect ICS */
309 qbman_pull_type_active,
310 /* dequeue with active FQ precedence, no ICS */
311 qbman_pull_type_active_noics
312};
313
314/**
315 * qbman_pull_desc_clear() - Clear the contents of a descriptor to
316 * default/starting state.
317 * @d: the pull dequeue descriptor to be cleared.
318 */
319void qbman_pull_desc_clear(struct qbman_pull_desc *d);
320
321/**
322 * qbman_pull_desc_set_storage()- Set the pull dequeue storage
323 * @d: the pull dequeue descriptor to be set.
324 * @storage: the pointer of the memory to store the dequeue result.
325 * @storage_phys: the physical address of the storage memory.
326 * @stash: to indicate whether write allocate is enabled.
327 *
328 * If not called, or if called with 'storage' as NULL, the result pull dequeues
329 * will produce results to DQRR. If 'storage' is non-NULL, then results are
330 * produced to the given memory location (using the physical/DMA address which
331 * the caller provides in 'storage_phys'), and 'stash' controls whether or not
332 * those writes to main-memory express a cache-warming attribute.
333 */
334void qbman_pull_desc_set_storage(struct qbman_pull_desc *d,
335 struct qbman_result *storage,
336 uint64_t storage_phys,
337 int stash);
338/**
339 * qbman_pull_desc_set_numframes() - Set the number of frames to be dequeued.
340 * @d: the pull dequeue descriptor to be set.
341 * @numframes: number of frames to be set, must be between 1 and 16, inclusive.
342 */
343void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d,
344 uint8_t numframes);
345/**
346 * qbman_pull_desc_set_token() - Set dequeue token for pull command
347 * @d: the dequeue descriptor
348 * @token: the token to be set
349 *
350 * token is the value that shows up in the dequeue response that can be used to
351 * detect when the results have been published. The easiest technique is to zero
352 * result "storage" before issuing a dequeue, and use any non-zero 'token' value
353 */
354void qbman_pull_desc_set_token(struct qbman_pull_desc *d, uint8_t token);
355
356/* Exactly one of the following descriptor "actions" should be set. (Calling any
357 * one of these will replace the effect of any prior call to one of these.)
358 * - pull dequeue from the given frame queue (FQ)
359 * - pull dequeue from any FQ in the given work queue (WQ)
360 * - pull dequeue from any FQ in any WQ in the given channel
361 */
362/**
363 * qbman_pull_desc_set_fq() - Set fqid from which the dequeue command dequeues.
364 * @fqid: the frame queue index of the given FQ.
365 */
366void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, uint32_t fqid);
367
368/**
369 * qbman_pull_desc_set_wq() - Set wqid from which the dequeue command dequeues.
370 * @wqid: composed of channel id and wqid within the channel.
371 * @dct: the dequeue command type.
372 */
373void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, uint32_t wqid,
374 enum qbman_pull_type_e dct);
375
376/* qbman_pull_desc_set_channel() - Set channelid from which the dequeue command
377 * dequeues.
378 * @chid: the channel id to be dequeued.
379 * @dct: the dequeue command type.
380 */
381void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, uint32_t chid,
382 enum qbman_pull_type_e dct);
383
9f95a23c
TL
384/**
385 * qbman_pull_desc_set_rad() - Decide whether reschedule the fq after dequeue
386 *
387 * @rad: 1 = Reschedule the FQ after dequeue.
388 * 0 = Allow the FQ to remain active after dequeue.
389 */
390void qbman_pull_desc_set_rad(struct qbman_pull_desc *d, int rad);
391
11fdf7f2
TL
392/**
393 * qbman_swp_pull() - Issue the pull dequeue command
394 * @s: the software portal object.
395 * @d: the software portal descriptor which has been configured with
396 * the set of qbman_pull_desc_set_*() calls.
397 *
398 * Return 0 for success, and -EBUSY if the software portal is not ready
399 * to do pull dequeue.
400 */
401int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d);
402
403/* -------------------------------- */
404/* Polling DQRR for dequeue results */
405/* -------------------------------- */
406
407/**
408 * qbman_swp_dqrr_next() - Get an valid DQRR entry.
409 * @s: the software portal object.
410 *
411 * Return NULL if there are no unconsumed DQRR entries. Return a DQRR entry
412 * only once, so repeated calls can return a sequence of DQRR entries, without
413 * requiring they be consumed immediately or in any particular order.
414 */
415const struct qbman_result *qbman_swp_dqrr_next(struct qbman_swp *p);
416
417/**
418 * qbman_swp_prefetch_dqrr_next() - prefetch the next DQRR entry.
419 * @s: the software portal object.
420 */
421void qbman_swp_prefetch_dqrr_next(struct qbman_swp *s);
422
423/**
424 * qbman_swp_dqrr_consume() - Consume DQRR entries previously returned from
425 * qbman_swp_dqrr_next().
426 * @s: the software portal object.
427 * @dq: the DQRR entry to be consumed.
428 */
429void qbman_swp_dqrr_consume(struct qbman_swp *s, const struct qbman_result *dq);
430
431/**
432 * qbman_swp_dqrr_idx_consume() - Given the DQRR index consume the DQRR entry
433 * @s: the software portal object.
434 * @dqrr_index: the DQRR index entry to be consumed.
435 */
436void qbman_swp_dqrr_idx_consume(struct qbman_swp *s, uint8_t dqrr_index);
437
438/**
439 * qbman_get_dqrr_idx() - Get dqrr index from the given dqrr
440 * @dqrr: the given dqrr object.
441 *
442 * Return dqrr index.
443 */
444uint8_t qbman_get_dqrr_idx(const struct qbman_result *dqrr);
445
446/**
447 * qbman_get_dqrr_from_idx() - Use index to get the dqrr entry from the
448 * given portal
449 * @s: the given portal.
450 * @idx: the dqrr index.
451 *
452 * Return dqrr entry object.
453 */
454struct qbman_result *qbman_get_dqrr_from_idx(struct qbman_swp *s, uint8_t idx);
455
456/* ------------------------------------------------- */
457/* Polling user-provided storage for dequeue results */
458/* ------------------------------------------------- */
459
460/**
461 * qbman_result_has_new_result() - Check and get the dequeue response from the
462 * dq storage memory set in pull dequeue command
463 * @s: the software portal object.
464 * @dq: the dequeue result read from the memory.
465 *
466 * Only used for user-provided storage of dequeue results, not DQRR. For
467 * efficiency purposes, the driver will perform any required endianness
468 * conversion to ensure that the user's dequeue result storage is in host-endian
469 * format (whether or not that is the same as the little-endian format that
470 * hardware DMA'd to the user's storage). As such, once the user has called
471 * qbman_result_has_new_result() and been returned a valid dequeue result,
472 * they should not call it again on the same memory location (except of course
473 * if another dequeue command has been executed to produce a new result to that
474 * location).
475 *
476 * Return 1 for getting a valid dequeue result, or 0 for not getting a valid
477 * dequeue result.
478 */
479int qbman_result_has_new_result(struct qbman_swp *s,
480 struct qbman_result *dq);
481
482/**
483 * qbman_check_command_complete() - Check if the previous issued dq commnd
484 * is completed and results are available in memory.
485 * @s: the software portal object.
486 * @dq: the dequeue result read from the memory.
487 *
488 * Return 1 for getting a valid dequeue result, or 0 for not getting a valid
489 * dequeue result.
490 */
491int qbman_check_command_complete(struct qbman_result *dq);
492
493int qbman_check_new_result(struct qbman_result *dq);
494
495/* -------------------------------------------------------- */
496/* Parsing dequeue entries (DQRR and user-provided storage) */
497/* -------------------------------------------------------- */
498
499/**
500 * qbman_result_is_DQ() - check the dequeue result is a dequeue response or not
501 * @dq: the dequeue result to be checked.
502 *
503 * DQRR entries may contain non-dequeue results, ie. notifications
504 */
505int qbman_result_is_DQ(const struct qbman_result *dq);
506
507/**
508 * qbman_result_is_SCN() - Check the dequeue result is notification or not
509 * @dq: the dequeue result to be checked.
510 *
511 * All the non-dequeue results (FQDAN/CDAN/CSCN/...) are "state change
512 * notifications" of one type or another. Some APIs apply to all of them, of the
513 * form qbman_result_SCN_***().
514 */
515static inline int qbman_result_is_SCN(const struct qbman_result *dq)
516{
517 return !qbman_result_is_DQ(dq);
518}
519
520/* Recognise different notification types, only required if the user allows for
521 * these to occur, and cares about them when they do.
522 */
523
524/**
525 * qbman_result_is_FQDAN() - Check for FQ Data Availability
526 * @dq: the qbman_result object.
527 *
528 * Return 1 if this is FQDAN.
529 */
530int qbman_result_is_FQDAN(const struct qbman_result *dq);
531
532/**
533 * qbman_result_is_CDAN() - Check for Channel Data Availability
534 * @dq: the qbman_result object to check.
535 *
536 * Return 1 if this is CDAN.
537 */
538int qbman_result_is_CDAN(const struct qbman_result *dq);
539
540/**
541 * qbman_result_is_CSCN() - Check for Congestion State Change
542 * @dq: the qbman_result object to check.
543 *
544 * Return 1 if this is CSCN.
545 */
546int qbman_result_is_CSCN(const struct qbman_result *dq);
547
548/**
549 * qbman_result_is_BPSCN() - Check for Buffer Pool State Change.
550 * @dq: the qbman_result object to check.
551 *
552 * Return 1 if this is BPSCN.
553 */
554int qbman_result_is_BPSCN(const struct qbman_result *dq);
555
556/**
557 * qbman_result_is_CGCU() - Check for Congestion Group Count Update.
558 * @dq: the qbman_result object to check.
559 *
560 * Return 1 if this is CGCU.
561 */
562int qbman_result_is_CGCU(const struct qbman_result *dq);
563
564/* Frame queue state change notifications; (FQDAN in theory counts too as it
565 * leaves a FQ parked, but it is primarily a data availability notification)
566 */
567
568/**
569 * qbman_result_is_FQRN() - Check for FQ Retirement Notification.
570 * @dq: the qbman_result object to check.
571 *
572 * Return 1 if this is FQRN.
573 */
574int qbman_result_is_FQRN(const struct qbman_result *dq);
575
576/**
577 * qbman_result_is_FQRNI() - Check for FQ Retirement Immediate
578 * @dq: the qbman_result object to check.
579 *
580 * Return 1 if this is FQRNI.
581 */
582int qbman_result_is_FQRNI(const struct qbman_result *dq);
583
584/**
585 * qbman_result_is_FQPN() - Check for FQ Park Notification
586 * @dq: the qbman_result object to check.
587 *
588 * Return 1 if this is FQPN.
589 */
590int qbman_result_is_FQPN(const struct qbman_result *dq);
591
592/* Parsing frame dequeue results (qbman_result_is_DQ() must be TRUE)
593 */
594/* FQ empty */
595#define QBMAN_DQ_STAT_FQEMPTY 0x80
596/* FQ held active */
597#define QBMAN_DQ_STAT_HELDACTIVE 0x40
598/* FQ force eligible */
599#define QBMAN_DQ_STAT_FORCEELIGIBLE 0x20
600/* Valid frame */
601#define QBMAN_DQ_STAT_VALIDFRAME 0x10
602/* FQ ODP enable */
603#define QBMAN_DQ_STAT_ODPVALID 0x04
604/* Volatile dequeue */
605#define QBMAN_DQ_STAT_VOLATILE 0x02
606/* volatile dequeue command is expired */
607#define QBMAN_DQ_STAT_EXPIRED 0x01
608
609#define QBMAN_EQCR_DCA_IDXMASK 0x0f
610#define QBMAN_ENQUEUE_FLAG_DCA (1ULL << 31)
611
612/**
613 * qbman_result_DQ_flags() - Get the STAT field of dequeue response
614 * @dq: the dequeue result.
615 *
616 * Return the state field.
617 */
618uint8_t qbman_result_DQ_flags(const struct qbman_result *dq);
619
620/**
621 * qbman_result_DQ_is_pull() - Check whether the dq response is from a pull
622 * command.
623 * @dq: the dequeue result.
624 *
625 * Return 1 for volatile(pull) dequeue, 0 for static dequeue.
626 */
627static inline int qbman_result_DQ_is_pull(const struct qbman_result *dq)
628{
629 return (int)(qbman_result_DQ_flags(dq) & QBMAN_DQ_STAT_VOLATILE);
630}
631
632/**
633 * qbman_result_DQ_is_pull_complete() - Check whether the pull command is
634 * completed.
635 * @dq: the dequeue result.
636 *
637 * Return boolean.
638 */
639static inline int qbman_result_DQ_is_pull_complete(
640 const struct qbman_result *dq)
641{
642 return (int)(qbman_result_DQ_flags(dq) & QBMAN_DQ_STAT_EXPIRED);
643}
644
645/**
646 * qbman_result_DQ_seqnum() - Get the seqnum field in dequeue response
647 * seqnum is valid only if VALIDFRAME flag is TRUE
648 * @dq: the dequeue result.
649 *
650 * Return seqnum.
651 */
652uint16_t qbman_result_DQ_seqnum(const struct qbman_result *dq);
653
654/**
655 * qbman_result_DQ_odpid() - Get the seqnum field in dequeue response
656 * odpid is valid only if ODPVAILD flag is TRUE.
657 * @dq: the dequeue result.
658 *
659 * Return odpid.
660 */
661uint16_t qbman_result_DQ_odpid(const struct qbman_result *dq);
662
663/**
664 * qbman_result_DQ_fqid() - Get the fqid in dequeue response
665 * @dq: the dequeue result.
666 *
667 * Return fqid.
668 */
669uint32_t qbman_result_DQ_fqid(const struct qbman_result *dq);
670
671/**
672 * qbman_result_DQ_byte_count() - Get the byte count in dequeue response
673 * @dq: the dequeue result.
674 *
675 * Return the byte count remaining in the FQ.
676 */
677uint32_t qbman_result_DQ_byte_count(const struct qbman_result *dq);
678
679/**
680 * qbman_result_DQ_frame_count - Get the frame count in dequeue response
681 * @dq: the dequeue result.
682 *
683 * Return the frame count remaining in the FQ.
684 */
685uint32_t qbman_result_DQ_frame_count(const struct qbman_result *dq);
686
687/**
688 * qbman_result_DQ_fqd_ctx() - Get the frame queue context in dequeue response
689 * @dq: the dequeue result.
690 *
691 * Return the frame queue context.
692 */
693uint64_t qbman_result_DQ_fqd_ctx(const struct qbman_result *dq);
694
695/**
696 * qbman_result_DQ_fd() - Get the frame descriptor in dequeue response
697 * @dq: the dequeue result.
698 *
699 * Return the frame descriptor.
700 */
701const struct qbman_fd *qbman_result_DQ_fd(const struct qbman_result *dq);
702
703/* State-change notifications (FQDAN/CDAN/CSCN/...). */
704
705/**
706 * qbman_result_SCN_state() - Get the state field in State-change notification
707 * @scn: the state change notification.
708 *
709 * Return the state in the notifiation.
710 */
711uint8_t qbman_result_SCN_state(const struct qbman_result *scn);
712
713/**
714 * qbman_result_SCN_rid() - Get the resource id from the notification
715 * @scn: the state change notification.
716 *
717 * Return the resource id.
718 */
719uint32_t qbman_result_SCN_rid(const struct qbman_result *scn);
720
721/**
722 * qbman_result_SCN_ctx() - get the context from the notification
723 * @scn: the state change notification.
724 *
725 * Return the context.
726 */
727uint64_t qbman_result_SCN_ctx(const struct qbman_result *scn);
728
729/* Type-specific "resource IDs". Mainly for illustration purposes, though it
730 * also gives the appropriate type widths.
731 */
732/* Get the FQID from the FQDAN */
733#define qbman_result_FQDAN_fqid(dq) qbman_result_SCN_rid(dq)
734/* Get the FQID from the FQRN */
735#define qbman_result_FQRN_fqid(dq) qbman_result_SCN_rid(dq)
736/* Get the FQID from the FQRNI */
737#define qbman_result_FQRNI_fqid(dq) qbman_result_SCN_rid(dq)
738/* Get the FQID from the FQPN */
739#define qbman_result_FQPN_fqid(dq) qbman_result_SCN_rid(dq)
740/* Get the channel ID from the CDAN */
741#define qbman_result_CDAN_cid(dq) ((uint16_t)qbman_result_SCN_rid(dq))
742/* Get the CGID from the CSCN */
743#define qbman_result_CSCN_cgid(dq) ((uint16_t)qbman_result_SCN_rid(dq))
744
745/**
746 * qbman_result_bpscn_bpid() - Get the bpid from BPSCN
747 * @scn: the state change notification.
748 *
749 * Return the buffer pool id.
750 */
751uint16_t qbman_result_bpscn_bpid(const struct qbman_result *scn);
752
753/**
754 * qbman_result_bpscn_has_free_bufs() - Check whether there are free
755 * buffers in the pool from BPSCN.
756 * @scn: the state change notification.
757 *
758 * Return the number of free buffers.
759 */
760int qbman_result_bpscn_has_free_bufs(const struct qbman_result *scn);
761
762/**
763 * qbman_result_bpscn_is_depleted() - Check BPSCN to see whether the
764 * buffer pool is depleted.
765 * @scn: the state change notification.
766 *
767 * Return the status of buffer pool depletion.
768 */
769int qbman_result_bpscn_is_depleted(const struct qbman_result *scn);
770
771/**
772 * qbman_result_bpscn_is_surplus() - Check BPSCN to see whether the buffer
773 * pool is surplus or not.
774 * @scn: the state change notification.
775 *
776 * Return the status of buffer pool surplus.
777 */
778int qbman_result_bpscn_is_surplus(const struct qbman_result *scn);
779
780/**
781 * qbman_result_bpscn_ctx() - Get the BPSCN CTX from BPSCN message
782 * @scn: the state change notification.
783 *
784 * Return the BPSCN context.
785 */
786uint64_t qbman_result_bpscn_ctx(const struct qbman_result *scn);
787
788/* Parsing CGCU */
789/**
790 * qbman_result_cgcu_cgid() - Check CGCU resouce id, i.e. cgid
791 * @scn: the state change notification.
792 *
793 * Return the CGCU resource id.
794 */
795uint16_t qbman_result_cgcu_cgid(const struct qbman_result *scn);
796
797/**
798 * qbman_result_cgcu_icnt() - Get the I_CNT from CGCU
799 * @scn: the state change notification.
800 *
801 * Return instantaneous count in the CGCU notification.
802 */
803uint64_t qbman_result_cgcu_icnt(const struct qbman_result *scn);
804
805 /************/
806 /* Enqueues */
807 /************/
11fdf7f2
TL
808/* struct qbman_eq_desc - structure of enqueue descriptor */
809struct qbman_eq_desc {
810 union {
9f95a23c 811 uint32_t dont_manipulate_directly[8];
11fdf7f2
TL
812 struct eq {
813 uint8_t verb;
814 uint8_t dca;
815 uint16_t seqnum;
816 uint16_t orpid;
817 uint16_t reserved1;
818 uint32_t tgtid;
819 uint32_t tag;
820 uint16_t qdbin;
821 uint8_t qpri;
822 uint8_t reserved[3];
823 uint8_t wae;
824 uint8_t rspid;
825 uint64_t rsp_addr;
826 } eq;
827 };
828};
829
830/**
831 * struct qbman_eq_response - structure of enqueue response
9f95a23c 832 * @dont_manipulate_directly: the 16 32bit data to represent the whole
11fdf7f2
TL
833 * enqueue response.
834 */
835struct qbman_eq_response {
9f95a23c 836 uint32_t dont_manipulate_directly[16];
11fdf7f2
TL
837};
838
839/**
840 * qbman_eq_desc_clear() - Clear the contents of a descriptor to
841 * default/starting state.
842 * @d: the given enqueue descriptor.
843 */
844void qbman_eq_desc_clear(struct qbman_eq_desc *d);
845
846/* Exactly one of the following descriptor "actions" should be set. (Calling
847 * any one of these will replace the effect of any prior call to one of these.)
848 * - enqueue without order-restoration
849 * - enqueue with order-restoration
850 * - fill a hole in the order-restoration sequence, without any enqueue
851 * - advance NESN (Next Expected Sequence Number), without any enqueue
852 * 'respond_success' indicates whether an enqueue response should be DMA'd
853 * after success (otherwise a response is DMA'd only after failure).
854 * 'incomplete' indicates that other fragments of the same 'seqnum' are yet to
855 * be enqueued.
856 */
857
858/**
859 * qbman_eq_desc_set_no_orp() - Set enqueue descriptor without orp
860 * @d: the enqueue descriptor.
861 * @response_success: 1 = enqueue with response always; 0 = enqueue with
862 * rejections returned on a FQ.
863 */
864void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success);
865/**
866 * qbman_eq_desc_set_orp() - Set order-resotration in the enqueue descriptor
867 * @d: the enqueue descriptor.
868 * @response_success: 1 = enqueue with response always; 0 = enqueue with
869 * rejections returned on a FQ.
870 * @opr_id: the order point record id.
871 * @seqnum: the order restoration sequence number.
872 * @incomplete: indiates whether this is the last fragments using the same
873 * sequeue number.
874 */
875void qbman_eq_desc_set_orp(struct qbman_eq_desc *d, int respond_success,
876 uint16_t opr_id, uint16_t seqnum, int incomplete);
877
878/**
879 * qbman_eq_desc_set_orp_hole() - fill a hole in the order-restoration sequence
880 * without any enqueue
881 * @d: the enqueue descriptor.
882 * @opr_id: the order point record id.
883 * @seqnum: the order restoration sequence number.
884 */
885void qbman_eq_desc_set_orp_hole(struct qbman_eq_desc *d, uint16_t opr_id,
886 uint16_t seqnum);
887
888/**
889 * qbman_eq_desc_set_orp_nesn() - advance NESN (Next Expected Sequence Number)
890 * without any enqueue
891 * @d: the enqueue descriptor.
892 * @opr_id: the order point record id.
893 * @seqnum: the order restoration sequence number.
894 */
895void qbman_eq_desc_set_orp_nesn(struct qbman_eq_desc *d, uint16_t opr_id,
896 uint16_t seqnum);
897/**
898 * qbman_eq_desc_set_response() - Set the enqueue response info.
899 * @d: the enqueue descriptor
900 * @storage_phys: the physical address of the enqueue response in memory.
901 * @stash: indicate that the write allocation enabled or not.
902 *
903 * In the case where an enqueue response is DMA'd, this determines where that
904 * response should go. (The physical/DMA address is given for hardware's
905 * benefit, but software should interpret it as a "struct qbman_eq_response"
906 * data structure.) 'stash' controls whether or not the write to main-memory
907 * expresses a cache-warming attribute.
908 */
909void qbman_eq_desc_set_response(struct qbman_eq_desc *d,
910 uint64_t storage_phys,
911 int stash);
912
913/**
914 * qbman_eq_desc_set_token() - Set token for the enqueue command
915 * @d: the enqueue descriptor
916 * @token: the token to be set.
917 *
918 * token is the value that shows up in an enqueue response that can be used to
919 * detect when the results have been published. The easiest technique is to zero
920 * result "storage" before issuing an enqueue, and use any non-zero 'token'
921 * value.
922 */
923void qbman_eq_desc_set_token(struct qbman_eq_desc *d, uint8_t token);
924
925/**
926 * Exactly one of the following descriptor "targets" should be set. (Calling any
927 * one of these will replace the effect of any prior call to one of these.)
928 * - enqueue to a frame queue
929 * - enqueue to a queuing destination
930 * Note, that none of these will have any affect if the "action" type has been
931 * set to "orp_hole" or "orp_nesn".
932 */
933/**
934 * qbman_eq_desc_set_fq() - Set Frame Queue id for the enqueue command
935 * @d: the enqueue descriptor
936 * @fqid: the id of the frame queue to be enqueued.
937 */
938void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, uint32_t fqid);
939
940/**
941 * qbman_eq_desc_set_qd() - Set Queuing Destination for the enqueue command.
942 * @d: the enqueue descriptor
943 * @qdid: the id of the queuing destination to be enqueued.
944 * @qd_bin: the queuing destination bin
945 * @qd_prio: the queuing destination priority.
946 */
947void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, uint32_t qdid,
948 uint16_t qd_bin, uint8_t qd_prio);
949
950/**
951 * qbman_eq_desc_set_eqdi() - enable/disable EQDI interrupt
952 * @d: the enqueue descriptor
953 * @enable: boolean to enable/disable EQDI
954 *
955 * Determines whether or not the portal's EQDI interrupt source should be
956 * asserted after the enqueue command is completed.
957 */
958void qbman_eq_desc_set_eqdi(struct qbman_eq_desc *d, int enable);
959
960/**
961 * qbman_eq_desc_set_dca() - Set DCA mode in the enqueue command.
962 * @d: the enqueue descriptor.
963 * @enable: enabled/disable DCA mode.
964 * @dqrr_idx: DCAP_CI, the DCAP consumer index.
965 * @park: determine the whether park the FQ or not
966 *
967 * Determines whether or not a portal DQRR entry should be consumed once the
968 * enqueue command is completed. (And if so, and the DQRR entry corresponds to a
969 * held-active (order-preserving) FQ, whether the FQ should be parked instead of
970 * being rescheduled.)
971 */
972void qbman_eq_desc_set_dca(struct qbman_eq_desc *d, int enable,
973 uint8_t dqrr_idx, int park);
974
9f95a23c
TL
975/**
976 * qbman_result_eqresp_fd() - Get fd from enqueue response.
977 * @eqresp: enqueue response.
978 *
979 * Return the fd pointer.
980 */
981struct qbman_fd *qbman_result_eqresp_fd(struct qbman_result *eqresp);
982
983/**
984 * qbman_result_eqresp_set_rspid() - Set the response id in enqueue response.
985 * @eqresp: enqueue response.
986 * @val: values to set into the response id.
987 *
988 * This value is set into the response id before the enqueue command, which,
989 * get overwritten by qbman once the enqueue command is complete.
990 */
991void qbman_result_eqresp_set_rspid(struct qbman_result *eqresp, uint8_t val);
992
993/**
994 * qbman_result_eqresp_rspid() - Get the response id.
995 * @eqresp: enqueue response.
996 *
997 * Return the response id.
998 *
999 * At the time of enqueue user provides the response id. Response id gets
1000 * copied into the enqueue response to determine if the command has been
1001 * completed, and response has been updated.
1002 */
1003uint8_t qbman_result_eqresp_rspid(struct qbman_result *eqresp);
1004
1005/**
1006 * qbman_result_eqresp_rc() - determines if enqueue command is sucessful.
1007 * @eqresp: enqueue response.
1008 *
1009 * Return 0 when command is sucessful.
1010 */
1011uint8_t qbman_result_eqresp_rc(struct qbman_result *eqresp);
1012
11fdf7f2
TL
1013/**
1014 * qbman_swp_enqueue() - Issue an enqueue command.
1015 * @s: the software portal used for enqueue.
1016 * @d: the enqueue descriptor.
1017 * @fd: the frame descriptor to be enqueued.
1018 *
1019 * Please note that 'fd' should only be NULL if the "action" of the
1020 * descriptor is "orp_hole" or "orp_nesn".
1021 *
1022 * Return 0 for a successful enqueue, -EBUSY if the EQCR is not ready.
1023 */
1024int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,
1025 const struct qbman_fd *fd);
1026/**
1027 * qbman_swp_enqueue_multiple() - Enqueue multiple frames with same
1028 eq descriptor
1029 * @s: the software portal used for enqueue.
1030 * @d: the enqueue descriptor.
1031 * @fd: the frame descriptor to be enqueued.
9f95a23c 1032 * @flags: bit-mask of QBMAN_ENQUEUE_FLAG_*** options
11fdf7f2
TL
1033 * @num_frames: the number of the frames to be enqueued.
1034 *
1035 * Return the number of enqueued frames, -EBUSY if the EQCR is not ready.
1036 */
1037int qbman_swp_enqueue_multiple(struct qbman_swp *s,
1038 const struct qbman_eq_desc *d,
1039 const struct qbman_fd *fd,
1040 uint32_t *flags,
1041 int num_frames);
9f95a23c
TL
1042
1043/**
1044 * qbman_swp_enqueue_multiple_fd() - Enqueue multiple frames with same
1045 eq descriptor
1046 * @s: the software portal used for enqueue.
1047 * @d: the enqueue descriptor.
1048 * @fd: the frame descriptor to be enqueued.
1049 * @flags: bit-mask of QBMAN_ENQUEUE_FLAG_*** options
1050 * @num_frames: the number of the frames to be enqueued.
1051 *
1052 * Return the number of enqueued frames, -EBUSY if the EQCR is not ready.
1053 */
1054int qbman_swp_enqueue_multiple_fd(struct qbman_swp *s,
1055 const struct qbman_eq_desc *d,
1056 struct qbman_fd **fd,
1057 uint32_t *flags,
1058 int num_frames);
1059
11fdf7f2
TL
1060/**
1061 * qbman_swp_enqueue_multiple_desc() - Enqueue multiple frames with
1062 * individual eq descriptor.
1063 * @s: the software portal used for enqueue.
1064 * @d: the enqueue descriptor.
1065 * @fd: the frame descriptor to be enqueued.
11fdf7f2
TL
1066 * @num_frames: the number of the frames to be enqueued.
1067 *
1068 * Return the number of enqueued frames, -EBUSY if the EQCR is not ready.
1069 */
1070int qbman_swp_enqueue_multiple_desc(struct qbman_swp *s,
1071 const struct qbman_eq_desc *d,
1072 const struct qbman_fd *fd,
1073 int num_frames);
1074
1075/* TODO:
1076 * qbman_swp_enqueue_thresh() - Set threshold for EQRI interrupt.
1077 * @s: the software portal.
1078 * @thresh: the threshold to trigger the EQRI interrupt.
1079 *
1080 * An EQRI interrupt can be generated when the fill-level of EQCR falls below
1081 * the 'thresh' value set here. Setting thresh==0 (the default) disables.
1082 */
1083int qbman_swp_enqueue_thresh(struct qbman_swp *s, unsigned int thresh);
1084
1085 /*******************/
1086 /* Buffer releases */
1087 /*******************/
1088/**
1089 * struct qbman_release_desc - The structure for buffer release descriptor
9f95a23c 1090 * @dont_manipulate_directly: the 32bit data to represent the whole
11fdf7f2
TL
1091 * possible settings of qbman release descriptor.
1092 */
1093struct qbman_release_desc {
1094 union {
9f95a23c 1095 uint32_t dont_manipulate_directly[16];
11fdf7f2
TL
1096 struct br {
1097 uint8_t verb;
1098 uint8_t reserved;
1099 uint16_t bpid;
1100 uint32_t reserved2;
1101 uint64_t buf[7];
1102 } br;
1103 };
1104};
1105
1106/**
1107 * qbman_release_desc_clear() - Clear the contents of a descriptor to
1108 * default/starting state.
1109 * @d: the qbman release descriptor.
1110 */
1111void qbman_release_desc_clear(struct qbman_release_desc *d);
1112
1113/**
1114 * qbman_release_desc_set_bpid() - Set the ID of the buffer pool to release to
1115 * @d: the qbman release descriptor.
1116 */
1117void qbman_release_desc_set_bpid(struct qbman_release_desc *d, uint16_t bpid);
1118
1119/**
1120 * qbman_release_desc_set_rcdi() - Determines whether or not the portal's RCDI
1121 * interrupt source should be asserted after the release command is completed.
1122 * @d: the qbman release descriptor.
1123 */
1124void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable);
1125
1126/**
1127 * qbman_swp_release() - Issue a buffer release command.
1128 * @s: the software portal object.
1129 * @d: the release descriptor.
1130 * @buffers: a pointer pointing to the buffer address to be released.
1131 * @num_buffers: number of buffers to be released, must be less than 8.
1132 *
1133 * Return 0 for success, -EBUSY if the release command ring is not ready.
1134 */
1135int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d,
1136 const uint64_t *buffers, unsigned int num_buffers);
1137
1138/* TODO:
1139 * qbman_swp_release_thresh() - Set threshold for RCRI interrupt
1140 * @s: the software portal.
1141 * @thresh: the threshold.
1142 * An RCRI interrupt can be generated when the fill-level of RCR falls below
1143 * the 'thresh' value set here. Setting thresh==0 (the default) disables.
1144 */
1145int qbman_swp_release_thresh(struct qbman_swp *s, unsigned int thresh);
1146
1147 /*******************/
1148 /* Buffer acquires */
1149 /*******************/
1150/**
1151 * qbman_swp_acquire() - Issue a buffer acquire command.
1152 * @s: the software portal object.
1153 * @bpid: the buffer pool index.
1154 * @buffers: a pointer pointing to the acquired buffer address|es.
1155 * @num_buffers: number of buffers to be acquired, must be less than 8.
1156 *
1157 * Return 0 for success, or negative error code if the acquire command
1158 * fails.
1159 */
1160int qbman_swp_acquire(struct qbman_swp *s, uint16_t bpid, uint64_t *buffers,
1161 unsigned int num_buffers);
1162
1163 /*****************/
1164 /* FQ management */
1165 /*****************/
1166/**
1167 * qbman_swp_fq_schedule() - Move the fq to the scheduled state.
1168 * @s: the software portal object.
1169 * @fqid: the index of frame queue to be scheduled.
1170 *
1171 * There are a couple of different ways that a FQ can end up parked state,
1172 * This schedules it.
1173 *
1174 * Return 0 for success, or negative error code for failure.
1175 */
1176int qbman_swp_fq_schedule(struct qbman_swp *s, uint32_t fqid);
1177
1178/**
1179 * qbman_swp_fq_force() - Force the FQ to fully scheduled state.
1180 * @s: the software portal object.
1181 * @fqid: the index of frame queue to be forced.
1182 *
1183 * Force eligible will force a tentatively-scheduled FQ to be fully-scheduled
1184 * and thus be available for selection by any channel-dequeuing behaviour (push
1185 * or pull). If the FQ is subsequently "dequeued" from the channel and is still
1186 * empty at the time this happens, the resulting dq_entry will have no FD.
1187 * (qbman_result_DQ_fd() will return NULL.)
1188 *
1189 * Return 0 for success, or negative error code for failure.
1190 */
1191int qbman_swp_fq_force(struct qbman_swp *s, uint32_t fqid);
1192
1193/**
1194 * These functions change the FQ flow-control stuff between XON/XOFF. (The
1195 * default is XON.) This setting doesn't affect enqueues to the FQ, just
1196 * dequeues. XOFF FQs will remain in the tenatively-scheduled state, even when
1197 * non-empty, meaning they won't be selected for scheduled dequeuing. If a FQ is
1198 * changed to XOFF after it had already become truly-scheduled to a channel, and
1199 * a pull dequeue of that channel occurs that selects that FQ for dequeuing,
1200 * then the resulting dq_entry will have no FD. (qbman_result_DQ_fd() will
1201 * return NULL.)
1202 */
1203/**
1204 * qbman_swp_fq_xon() - XON the frame queue.
1205 * @s: the software portal object.
1206 * @fqid: the index of frame queue.
1207 *
1208 * Return 0 for success, or negative error code for failure.
1209 */
1210int qbman_swp_fq_xon(struct qbman_swp *s, uint32_t fqid);
1211/**
1212 * qbman_swp_fq_xoff() - XOFF the frame queue.
1213 * @s: the software portal object.
1214 * @fqid: the index of frame queue.
1215 *
1216 * Return 0 for success, or negative error code for failure.
1217 */
1218int qbman_swp_fq_xoff(struct qbman_swp *s, uint32_t fqid);
1219
1220 /**********************/
1221 /* Channel management */
1222 /**********************/
1223
1224/**
1225 * If the user has been allocated a channel object that is going to generate
1226 * CDANs to another channel, then these functions will be necessary.
1227 * CDAN-enabled channels only generate a single CDAN notification, after which
1228 * it they need to be reenabled before they'll generate another. (The idea is
1229 * that pull dequeuing will occur in reaction to the CDAN, followed by a
1230 * reenable step.) Each function generates a distinct command to hardware, so a
1231 * combination function is provided if the user wishes to modify the "context"
1232 * (which shows up in each CDAN message) each time they reenable, as a single
1233 * command to hardware.
1234 */
1235
1236/**
1237 * qbman_swp_CDAN_set_context() - Set CDAN context
1238 * @s: the software portal object.
1239 * @channelid: the channel index.
1240 * @ctx: the context to be set in CDAN.
1241 *
1242 * Return 0 for success, or negative error code for failure.
1243 */
1244int qbman_swp_CDAN_set_context(struct qbman_swp *s, uint16_t channelid,
1245 uint64_t ctx);
1246
1247/**
1248 * qbman_swp_CDAN_enable() - Enable CDAN for the channel.
1249 * @s: the software portal object.
1250 * @channelid: the index of the channel to generate CDAN.
1251 *
1252 * Return 0 for success, or negative error code for failure.
1253 */
1254int qbman_swp_CDAN_enable(struct qbman_swp *s, uint16_t channelid);
1255
1256/**
1257 * qbman_swp_CDAN_disable() - disable CDAN for the channel.
1258 * @s: the software portal object.
1259 * @channelid: the index of the channel to generate CDAN.
1260 *
1261 * Return 0 for success, or negative error code for failure.
1262 */
1263int qbman_swp_CDAN_disable(struct qbman_swp *s, uint16_t channelid);
1264
1265/**
1266 * qbman_swp_CDAN_set_context_enable() - Set CDAN contest and enable CDAN
1267 * @s: the software portal object.
1268 * @channelid: the index of the channel to generate CDAN.
1269 * @ctx: the context set in CDAN.
1270 *
1271 * Return 0 for success, or negative error code for failure.
1272 */
1273int qbman_swp_CDAN_set_context_enable(struct qbman_swp *s, uint16_t channelid,
1274 uint64_t ctx);
1275#endif /* !_FSL_QBMAN_PORTAL_H */