]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/dpdk/drivers/net/liquidio/lio_rxtx.h
update sources to ceph Nautilus 14.2.1
[ceph.git] / ceph / src / spdk / dpdk / drivers / net / liquidio / lio_rxtx.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Cavium, Inc
3 */
4
5 #ifndef _LIO_RXTX_H_
6 #define _LIO_RXTX_H_
7
8 #include <stdio.h>
9 #include <stdint.h>
10
11 #include <rte_spinlock.h>
12 #include <rte_memory.h>
13
14 #include "lio_struct.h"
15
16 #ifndef ROUNDUP4
17 #define ROUNDUP4(val) (((val) + 3) & 0xfffffffc)
18 #endif
19
20 #define LIO_STQUEUE_FIRST_ENTRY(ptr, type, elem) \
21 (type *)((char *)((ptr)->stqh_first) - offsetof(type, elem))
22
23 #define lio_check_timeout(cur_time, chk_time) ((cur_time) > (chk_time))
24
25 #define lio_uptime \
26 (size_t)(rte_get_timer_cycles() / rte_get_timer_hz())
27
28 /** Descriptor format.
29 * The descriptor ring is made of descriptors which have 2 64-bit values:
30 * -# Physical (bus) address of the data buffer.
31 * -# Physical (bus) address of a lio_droq_info structure.
32 * The device DMA's incoming packets and its information at the address
33 * given by these descriptor fields.
34 */
35 struct lio_droq_desc {
36 /** The buffer pointer */
37 uint64_t buffer_ptr;
38
39 /** The Info pointer */
40 uint64_t info_ptr;
41 };
42
43 #define LIO_DROQ_DESC_SIZE (sizeof(struct lio_droq_desc))
44
45 /** Information about packet DMA'ed by Octeon.
46 * The format of the information available at Info Pointer after Octeon
47 * has posted a packet. Not all descriptors have valid information. Only
48 * the Info field of the first descriptor for a packet has information
49 * about the packet.
50 */
51 struct lio_droq_info {
52 /** The Output Receive Header. */
53 union octeon_rh rh;
54
55 /** The Length of the packet. */
56 uint64_t length;
57 };
58
59 #define LIO_DROQ_INFO_SIZE (sizeof(struct lio_droq_info))
60
61 /** Pointer to data buffer.
62 * Driver keeps a pointer to the data buffer that it made available to
63 * the Octeon device. Since the descriptor ring keeps physical (bus)
64 * addresses, this field is required for the driver to keep track of
65 * the virtual address pointers.
66 */
67 struct lio_recv_buffer {
68 /** Packet buffer, including meta data. */
69 void *buffer;
70
71 /** Data in the packet buffer. */
72 uint8_t *data;
73
74 };
75
76 #define LIO_DROQ_RECVBUF_SIZE (sizeof(struct lio_recv_buffer))
77
78 #define LIO_DROQ_SIZE (sizeof(struct lio_droq))
79
80 #define LIO_IQ_SEND_OK 0
81 #define LIO_IQ_SEND_STOP 1
82 #define LIO_IQ_SEND_FAILED -1
83
84 /* conditions */
85 #define LIO_REQTYPE_NONE 0
86 #define LIO_REQTYPE_NORESP_NET 1
87 #define LIO_REQTYPE_NORESP_NET_SG 2
88 #define LIO_REQTYPE_SOFT_COMMAND 3
89
90 struct lio_request_list {
91 uint32_t reqtype;
92 void *buf;
93 };
94
95 /*---------------------- INSTRUCTION FORMAT ----------------------------*/
96
97 struct lio_instr3_64B {
98 /** Pointer where the input data is available. */
99 uint64_t dptr;
100
101 /** Instruction Header. */
102 uint64_t ih3;
103
104 /** Instruction Header. */
105 uint64_t pki_ih3;
106
107 /** Input Request Header. */
108 uint64_t irh;
109
110 /** opcode/subcode specific parameters */
111 uint64_t ossp[2];
112
113 /** Return Data Parameters */
114 uint64_t rdp;
115
116 /** Pointer where the response for a RAW mode packet will be written
117 * by Octeon.
118 */
119 uint64_t rptr;
120
121 };
122
123 union lio_instr_64B {
124 struct lio_instr3_64B cmd3;
125 };
126
127 /** The size of each buffer in soft command buffer pool */
128 #define LIO_SOFT_COMMAND_BUFFER_SIZE 1536
129
130 /** Maximum number of buffers to allocate into soft command buffer pool */
131 #define LIO_MAX_SOFT_COMMAND_BUFFERS 255
132
133 struct lio_soft_command {
134 /** Soft command buffer info. */
135 struct lio_stailq_node node;
136 uint64_t dma_addr;
137 uint32_t size;
138
139 /** Command and return status */
140 union lio_instr_64B cmd;
141
142 #define LIO_COMPLETION_WORD_INIT 0xffffffffffffffffULL
143 uint64_t *status_word;
144
145 /** Data buffer info */
146 void *virtdptr;
147 uint64_t dmadptr;
148 uint32_t datasize;
149
150 /** Return buffer info */
151 void *virtrptr;
152 uint64_t dmarptr;
153 uint32_t rdatasize;
154
155 /** Context buffer info */
156 void *ctxptr;
157 uint32_t ctxsize;
158
159 /** Time out and callback */
160 size_t wait_time;
161 size_t timeout;
162 uint32_t iq_no;
163 void (*callback)(uint32_t, void *);
164 void *callback_arg;
165 struct rte_mbuf *mbuf;
166 };
167
168 struct lio_iq_post_status {
169 int status;
170 int index;
171 };
172
173 /* wqe
174 * --------------- 0
175 * | wqe word0-3 |
176 * --------------- 32
177 * | PCI IH |
178 * --------------- 40
179 * | RPTR |
180 * --------------- 48
181 * | PCI IRH |
182 * --------------- 56
183 * | OCTEON_CMD |
184 * --------------- 64
185 * | Addtl 8-BData |
186 * | |
187 * ---------------
188 */
189
190 union octeon_cmd {
191 uint64_t cmd64;
192
193 struct {
194 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
195 uint64_t cmd : 5;
196
197 uint64_t more : 6; /* How many udd words follow the command */
198
199 uint64_t reserved : 29;
200
201 uint64_t param1 : 16;
202
203 uint64_t param2 : 8;
204
205 #elif RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
206
207 uint64_t param2 : 8;
208
209 uint64_t param1 : 16;
210
211 uint64_t reserved : 29;
212
213 uint64_t more : 6;
214
215 uint64_t cmd : 5;
216
217 #endif
218 } s;
219 };
220
221 #define OCTEON_CMD_SIZE (sizeof(union octeon_cmd))
222
223 /* Maximum number of 8-byte words can be
224 * sent in a NIC control message.
225 */
226 #define LIO_MAX_NCTRL_UDD 32
227
228 /* Structure of control information passed by driver to the BASE
229 * layer when sending control commands to Octeon device software.
230 */
231 struct lio_ctrl_pkt {
232 /** Command to be passed to the Octeon device software. */
233 union octeon_cmd ncmd;
234
235 /** Send buffer */
236 void *data;
237 uint64_t dmadata;
238
239 /** Response buffer */
240 void *rdata;
241 uint64_t dmardata;
242
243 /** Additional data that may be needed by some commands. */
244 uint64_t udd[LIO_MAX_NCTRL_UDD];
245
246 /** Input queue to use to send this command. */
247 uint64_t iq_no;
248
249 /** Time to wait for Octeon software to respond to this control command.
250 * If wait_time is 0, BASE assumes no response is expected.
251 */
252 size_t wait_time;
253
254 struct lio_dev_ctrl_cmd *ctrl_cmd;
255 };
256
257 /** Structure of data information passed by driver to the BASE
258 * layer when forwarding data to Octeon device software.
259 */
260 struct lio_data_pkt {
261 /** Pointer to information maintained by NIC module for this packet. The
262 * BASE layer passes this as-is to the driver.
263 */
264 void *buf;
265
266 /** Type of buffer passed in "buf" above. */
267 uint32_t reqtype;
268
269 /** Total data bytes to be transferred in this command. */
270 uint32_t datasize;
271
272 /** Command to be passed to the Octeon device software. */
273 union lio_instr_64B cmd;
274
275 /** Input queue to use to send this command. */
276 uint32_t q_no;
277 };
278
279 /** Structure passed by driver to BASE layer to prepare a command to send
280 * network data to Octeon.
281 */
282 union lio_cmd_setup {
283 struct {
284 uint32_t iq_no : 8;
285 uint32_t gather : 1;
286 uint32_t timestamp : 1;
287 uint32_t ip_csum : 1;
288 uint32_t transport_csum : 1;
289 uint32_t tnl_csum : 1;
290 uint32_t rsvd : 19;
291
292 union {
293 uint32_t datasize;
294 uint32_t gatherptrs;
295 } u;
296 } s;
297
298 uint64_t cmd_setup64;
299 };
300
301 /* Instruction Header */
302 struct octeon_instr_ih3 {
303 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
304
305 /** Reserved3 */
306 uint64_t reserved3 : 1;
307
308 /** Gather indicator 1=gather*/
309 uint64_t gather : 1;
310
311 /** Data length OR no. of entries in gather list */
312 uint64_t dlengsz : 14;
313
314 /** Front Data size */
315 uint64_t fsz : 6;
316
317 /** Reserved2 */
318 uint64_t reserved2 : 4;
319
320 /** PKI port kind - PKIND */
321 uint64_t pkind : 6;
322
323 /** Reserved1 */
324 uint64_t reserved1 : 32;
325
326 #elif RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
327 /** Reserved1 */
328 uint64_t reserved1 : 32;
329
330 /** PKI port kind - PKIND */
331 uint64_t pkind : 6;
332
333 /** Reserved2 */
334 uint64_t reserved2 : 4;
335
336 /** Front Data size */
337 uint64_t fsz : 6;
338
339 /** Data length OR no. of entries in gather list */
340 uint64_t dlengsz : 14;
341
342 /** Gather indicator 1=gather*/
343 uint64_t gather : 1;
344
345 /** Reserved3 */
346 uint64_t reserved3 : 1;
347
348 #endif
349 };
350
351 /* PKI Instruction Header(PKI IH) */
352 struct octeon_instr_pki_ih3 {
353 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
354
355 /** Wider bit */
356 uint64_t w : 1;
357
358 /** Raw mode indicator 1 = RAW */
359 uint64_t raw : 1;
360
361 /** Use Tag */
362 uint64_t utag : 1;
363
364 /** Use QPG */
365 uint64_t uqpg : 1;
366
367 /** Reserved2 */
368 uint64_t reserved2 : 1;
369
370 /** Parse Mode */
371 uint64_t pm : 3;
372
373 /** Skip Length */
374 uint64_t sl : 8;
375
376 /** Use Tag Type */
377 uint64_t utt : 1;
378
379 /** Tag type */
380 uint64_t tagtype : 2;
381
382 /** Reserved1 */
383 uint64_t reserved1 : 2;
384
385 /** QPG Value */
386 uint64_t qpg : 11;
387
388 /** Tag Value */
389 uint64_t tag : 32;
390
391 #elif RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
392
393 /** Tag Value */
394 uint64_t tag : 32;
395
396 /** QPG Value */
397 uint64_t qpg : 11;
398
399 /** Reserved1 */
400 uint64_t reserved1 : 2;
401
402 /** Tag type */
403 uint64_t tagtype : 2;
404
405 /** Use Tag Type */
406 uint64_t utt : 1;
407
408 /** Skip Length */
409 uint64_t sl : 8;
410
411 /** Parse Mode */
412 uint64_t pm : 3;
413
414 /** Reserved2 */
415 uint64_t reserved2 : 1;
416
417 /** Use QPG */
418 uint64_t uqpg : 1;
419
420 /** Use Tag */
421 uint64_t utag : 1;
422
423 /** Raw mode indicator 1 = RAW */
424 uint64_t raw : 1;
425
426 /** Wider bit */
427 uint64_t w : 1;
428 #endif
429 };
430
431 /** Input Request Header */
432 struct octeon_instr_irh {
433 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
434 uint64_t opcode : 4;
435 uint64_t rflag : 1;
436 uint64_t subcode : 7;
437 uint64_t vlan : 12;
438 uint64_t priority : 3;
439 uint64_t reserved : 5;
440 uint64_t ossp : 32; /* opcode/subcode specific parameters */
441 #elif RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
442 uint64_t ossp : 32; /* opcode/subcode specific parameters */
443 uint64_t reserved : 5;
444 uint64_t priority : 3;
445 uint64_t vlan : 12;
446 uint64_t subcode : 7;
447 uint64_t rflag : 1;
448 uint64_t opcode : 4;
449 #endif
450 };
451
452 /* pkiih3 + irh + ossp[0] + ossp[1] + rdp + rptr = 40 bytes */
453 #define OCTEON_SOFT_CMD_RESP_IH3 (40 + 8)
454 /* pki_h3 + irh + ossp[0] + ossp[1] = 32 bytes */
455 #define OCTEON_PCI_CMD_O3 (24 + 8)
456
457 /** Return Data Parameters */
458 struct octeon_instr_rdp {
459 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
460 uint64_t reserved : 49;
461 uint64_t pcie_port : 3;
462 uint64_t rlen : 12;
463 #elif RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
464 uint64_t rlen : 12;
465 uint64_t pcie_port : 3;
466 uint64_t reserved : 49;
467 #endif
468 };
469
470 union octeon_packet_params {
471 uint32_t pkt_params32;
472 struct {
473 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
474 uint32_t reserved : 24;
475 uint32_t ip_csum : 1; /* Perform IP header checksum(s) */
476 /* Perform Outer transport header checksum */
477 uint32_t transport_csum : 1;
478 /* Find tunnel, and perform transport csum. */
479 uint32_t tnl_csum : 1;
480 uint32_t tsflag : 1; /* Timestamp this packet */
481 uint32_t ipsec_ops : 4; /* IPsec operation */
482 #else
483 uint32_t ipsec_ops : 4;
484 uint32_t tsflag : 1;
485 uint32_t tnl_csum : 1;
486 uint32_t transport_csum : 1;
487 uint32_t ip_csum : 1;
488 uint32_t reserved : 7;
489 #endif
490 } s;
491 };
492
493 /** Utility function to prepare a 64B NIC instruction based on a setup command
494 * @param cmd - pointer to instruction to be filled in.
495 * @param setup - pointer to the setup structure
496 * @param q_no - which queue for back pressure
497 *
498 * Assumes the cmd instruction is pre-allocated, but no fields are filled in.
499 */
500 static inline void
501 lio_prepare_pci_cmd(struct lio_device *lio_dev,
502 union lio_instr_64B *cmd,
503 union lio_cmd_setup *setup,
504 uint32_t tag)
505 {
506 union octeon_packet_params packet_params;
507 struct octeon_instr_pki_ih3 *pki_ih3;
508 struct octeon_instr_irh *irh;
509 struct octeon_instr_ih3 *ih3;
510 int port;
511
512 memset(cmd, 0, sizeof(union lio_instr_64B));
513
514 ih3 = (struct octeon_instr_ih3 *)&cmd->cmd3.ih3;
515 pki_ih3 = (struct octeon_instr_pki_ih3 *)&cmd->cmd3.pki_ih3;
516
517 /* assume that rflag is cleared so therefore front data will only have
518 * irh and ossp[1] and ossp[2] for a total of 24 bytes
519 */
520 ih3->pkind = lio_dev->instr_queue[setup->s.iq_no]->txpciq.s.pkind;
521 /* PKI IH */
522 ih3->fsz = OCTEON_PCI_CMD_O3;
523
524 if (!setup->s.gather) {
525 ih3->dlengsz = setup->s.u.datasize;
526 } else {
527 ih3->gather = 1;
528 ih3->dlengsz = setup->s.u.gatherptrs;
529 }
530
531 pki_ih3->w = 1;
532 pki_ih3->raw = 0;
533 pki_ih3->utag = 0;
534 pki_ih3->utt = 1;
535 pki_ih3->uqpg = lio_dev->instr_queue[setup->s.iq_no]->txpciq.s.use_qpg;
536
537 port = (int)lio_dev->instr_queue[setup->s.iq_no]->txpciq.s.port;
538
539 if (tag)
540 pki_ih3->tag = tag;
541 else
542 pki_ih3->tag = LIO_DATA(port);
543
544 pki_ih3->tagtype = OCTEON_ORDERED_TAG;
545 pki_ih3->qpg = lio_dev->instr_queue[setup->s.iq_no]->txpciq.s.qpg;
546 pki_ih3->pm = 0x0; /* parse from L2 */
547 pki_ih3->sl = 32; /* sl will be sizeof(pki_ih3) + irh + ossp0 + ossp1*/
548
549 irh = (struct octeon_instr_irh *)&cmd->cmd3.irh;
550
551 irh->opcode = LIO_OPCODE;
552 irh->subcode = LIO_OPCODE_NW_DATA;
553
554 packet_params.pkt_params32 = 0;
555 packet_params.s.ip_csum = setup->s.ip_csum;
556 packet_params.s.transport_csum = setup->s.transport_csum;
557 packet_params.s.tnl_csum = setup->s.tnl_csum;
558 packet_params.s.tsflag = setup->s.timestamp;
559
560 irh->ossp = packet_params.pkt_params32;
561 }
562
563 int lio_setup_sc_buffer_pool(struct lio_device *lio_dev);
564 void lio_free_sc_buffer_pool(struct lio_device *lio_dev);
565
566 struct lio_soft_command *
567 lio_alloc_soft_command(struct lio_device *lio_dev,
568 uint32_t datasize, uint32_t rdatasize,
569 uint32_t ctxsize);
570 void lio_prepare_soft_command(struct lio_device *lio_dev,
571 struct lio_soft_command *sc,
572 uint8_t opcode, uint8_t subcode,
573 uint32_t irh_ossp, uint64_t ossp0,
574 uint64_t ossp1);
575 int lio_send_soft_command(struct lio_device *lio_dev,
576 struct lio_soft_command *sc);
577 void lio_free_soft_command(struct lio_soft_command *sc);
578
579 /** Send control packet to the device
580 * @param lio_dev - lio device pointer
581 * @param nctrl - control structure with command, timeout, and callback info
582 *
583 * @returns IQ_FAILED if it failed to add to the input queue. IQ_STOP if it the
584 * queue should be stopped, and LIO_IQ_SEND_OK if it sent okay.
585 */
586 int lio_send_ctrl_pkt(struct lio_device *lio_dev,
587 struct lio_ctrl_pkt *ctrl_pkt);
588
589 /** Maximum ordered requests to process in every invocation of
590 * lio_process_ordered_list(). The function will continue to process requests
591 * as long as it can find one that has finished processing. If it keeps
592 * finding requests that have completed, the function can run for ever. The
593 * value defined here sets an upper limit on the number of requests it can
594 * process before it returns control to the poll thread.
595 */
596 #define LIO_MAX_ORD_REQS_TO_PROCESS 4096
597
598 /** Error codes used in Octeon Host-Core communication.
599 *
600 * 31 16 15 0
601 * ----------------------------
602 * | | |
603 * ----------------------------
604 * Error codes are 32-bit wide. The upper 16-bits, called Major Error Number,
605 * are reserved to identify the group to which the error code belongs. The
606 * lower 16-bits, called Minor Error Number, carry the actual code.
607 *
608 * So error codes are (MAJOR NUMBER << 16)| MINOR_NUMBER.
609 */
610 /** Status for a request.
611 * If the request is successfully queued, the driver will return
612 * a LIO_REQUEST_PENDING status. LIO_REQUEST_TIMEOUT is only returned by
613 * the driver if the response for request failed to arrive before a
614 * time-out period or if the request processing * got interrupted due to
615 * a signal respectively.
616 */
617 enum {
618 /** A value of 0x00000000 indicates no error i.e. success */
619 LIO_REQUEST_DONE = 0x00000000,
620 /** (Major number: 0x0000; Minor Number: 0x0001) */
621 LIO_REQUEST_PENDING = 0x00000001,
622 LIO_REQUEST_TIMEOUT = 0x00000003,
623
624 };
625
626 /*------ Error codes used by firmware (bits 15..0 set by firmware */
627 #define LIO_FIRMWARE_MAJOR_ERROR_CODE 0x0001
628 #define LIO_FIRMWARE_STATUS_CODE(status) \
629 ((LIO_FIRMWARE_MAJOR_ERROR_CODE << 16) | (status))
630
631 /** Initialize the response lists. The number of response lists to create is
632 * given by count.
633 * @param lio_dev - the lio device structure.
634 */
635 void lio_setup_response_list(struct lio_device *lio_dev);
636
637 /** Check the status of first entry in the ordered list. If the instruction at
638 * that entry finished processing or has timed-out, the entry is cleaned.
639 * @param lio_dev - the lio device structure.
640 * @return 1 if the ordered list is empty, 0 otherwise.
641 */
642 int lio_process_ordered_list(struct lio_device *lio_dev);
643
644 #define LIO_INCR_INSTRQUEUE_PKT_COUNT(lio_dev, iq_no, field, count) \
645 (((lio_dev)->instr_queue[iq_no]->stats.field) += count)
646
647 static inline void
648 lio_swap_8B_data(uint64_t *data, uint32_t blocks)
649 {
650 while (blocks) {
651 *data = rte_cpu_to_be_64(*data);
652 blocks--;
653 data++;
654 }
655 }
656
657 static inline uint64_t
658 lio_map_ring(void *buf)
659 {
660 rte_iova_t dma_addr;
661
662 dma_addr = rte_mbuf_data_iova_default(((struct rte_mbuf *)buf));
663
664 return (uint64_t)dma_addr;
665 }
666
667 static inline uint64_t
668 lio_map_ring_info(struct lio_droq *droq, uint32_t i)
669 {
670 rte_iova_t dma_addr;
671
672 dma_addr = droq->info_list_dma + (i * LIO_DROQ_INFO_SIZE);
673
674 return (uint64_t)dma_addr;
675 }
676
677 static inline int
678 lio_opcode_slow_path(union octeon_rh *rh)
679 {
680 uint16_t subcode1, subcode2;
681
682 subcode1 = LIO_OPCODE_SUBCODE(rh->r.opcode, rh->r.subcode);
683 subcode2 = LIO_OPCODE_SUBCODE(LIO_OPCODE, LIO_OPCODE_NW_DATA);
684
685 return subcode2 != subcode1;
686 }
687
688 static inline void
689 lio_add_sg_size(struct lio_sg_entry *sg_entry,
690 uint16_t size, uint32_t pos)
691 {
692 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
693 sg_entry->u.size[pos] = size;
694 #elif RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
695 sg_entry->u.size[3 - pos] = size;
696 #endif
697 }
698
699 /* Macro to increment index.
700 * Index is incremented by count; if the sum exceeds
701 * max, index is wrapped-around to the start.
702 */
703 static inline uint32_t
704 lio_incr_index(uint32_t index, uint32_t count, uint32_t max)
705 {
706 if ((index + count) >= max)
707 index = index + count - max;
708 else
709 index += count;
710
711 return index;
712 }
713
714 int lio_setup_droq(struct lio_device *lio_dev, int q_no, int num_descs,
715 int desc_size, struct rte_mempool *mpool,
716 unsigned int socket_id);
717 uint16_t lio_dev_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
718 uint16_t budget);
719 void lio_delete_droq_queue(struct lio_device *lio_dev, int oq_no);
720
721 void lio_delete_sglist(struct lio_instr_queue *txq);
722 int lio_setup_sglists(struct lio_device *lio_dev, int iq_no,
723 int fw_mapped_iq, int num_descs, unsigned int socket_id);
724 uint16_t lio_dev_xmit_pkts(void *tx_queue, struct rte_mbuf **pkts,
725 uint16_t nb_pkts);
726 int lio_wait_for_instr_fetch(struct lio_device *lio_dev);
727 int lio_setup_iq(struct lio_device *lio_dev, int q_index,
728 union octeon_txpciq iq_no, uint32_t num_descs, void *app_ctx,
729 unsigned int socket_id);
730 int lio_flush_iq(struct lio_device *lio_dev, struct lio_instr_queue *iq);
731 void lio_delete_instruction_queue(struct lio_device *lio_dev, int iq_no);
732 /** Setup instruction queue zero for the device
733 * @param lio_dev which lio device to setup
734 *
735 * @return 0 if success. -1 if fails
736 */
737 int lio_setup_instr_queue0(struct lio_device *lio_dev);
738 void lio_free_instr_queue0(struct lio_device *lio_dev);
739 void lio_dev_clear_queues(struct rte_eth_dev *eth_dev);
740 #endif /* _LIO_RXTX_H_ */