]> git.proxmox.com Git - ceph.git/blob - ceph/src/dpdk/drivers/net/qede/qede_rxtx.h
add subtree-ish sources for 12.0.3
[ceph.git] / ceph / src / dpdk / drivers / net / qede / qede_rxtx.h
1 /*
2 * Copyright (c) 2016 QLogic Corporation.
3 * All rights reserved.
4 * www.qlogic.com
5 *
6 * See LICENSE.qede_pmd for copyright and licensing details.
7 */
8
9
10 #ifndef _QEDE_RXTX_H_
11 #define _QEDE_RXTX_H_
12
13 #include "qede_ethdev.h"
14
15 /* Ring Descriptors */
16 #define RX_RING_SIZE_POW 16 /* 64K */
17 #define RX_RING_SIZE (1ULL << RX_RING_SIZE_POW)
18 #define NUM_RX_BDS_MAX (RX_RING_SIZE - 1)
19 #define NUM_RX_BDS_MIN 128
20 #define NUM_RX_BDS_DEF NUM_RX_BDS_MAX
21 #define NUM_RX_BDS(q) (q->nb_rx_desc - 1)
22
23 #define TX_RING_SIZE_POW 16 /* 64K */
24 #define TX_RING_SIZE (1ULL << TX_RING_SIZE_POW)
25 #define NUM_TX_BDS_MAX (TX_RING_SIZE - 1)
26 #define NUM_TX_BDS_MIN 128
27 #define NUM_TX_BDS_DEF NUM_TX_BDS_MAX
28 #define NUM_TX_BDS(q) (q->nb_tx_desc - 1)
29
30 #define TX_CONS(txq) (txq->sw_tx_cons & NUM_TX_BDS(txq))
31 #define TX_PROD(txq) (txq->sw_tx_prod & NUM_TX_BDS(txq))
32
33 #define QEDE_DEFAULT_TX_FREE_THRESH 32
34
35 #define QEDE_CSUM_ERROR (1 << 0)
36 #define QEDE_CSUM_UNNECESSARY (1 << 1)
37 #define QEDE_TUNN_CSUM_UNNECESSARY (1 << 2)
38
39 #define QEDE_BD_SET_ADDR_LEN(bd, maddr, len) \
40 do { \
41 (bd)->addr.hi = rte_cpu_to_le_32(U64_HI(maddr)); \
42 (bd)->addr.lo = rte_cpu_to_le_32(U64_LO(maddr)); \
43 (bd)->nbytes = rte_cpu_to_le_16(len); \
44 /* FW 8.10.x specific change */ \
45 (bd)->data.bitfields = ((len) & \
46 ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) \
47 << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT; \
48 } while (0)
49
50 #define CQE_HAS_VLAN(flags) \
51 ((flags) & (PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK \
52 << PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT))
53
54 #define CQE_HAS_OUTER_VLAN(flags) \
55 ((flags) & (PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_MASK \
56 << PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_SHIFT))
57
58 /* Max supported alignment is 256 (8 shift)
59 * minimal alignment shift 6 is optimal for 57xxx HW performance
60 */
61 #define QEDE_L1_CACHE_SHIFT 6
62 #define QEDE_RX_ALIGN_SHIFT (RTE_MAX(6, RTE_MIN(8, QEDE_L1_CACHE_SHIFT)))
63 #define QEDE_FW_RX_ALIGN_END (1UL << QEDE_RX_ALIGN_SHIFT)
64
65 #define QEDE_ETH_OVERHEAD (ETHER_HDR_LEN + 8 + 8 + QEDE_FW_RX_ALIGN_END)
66
67 /* TBD: Excluding IPV6 */
68 #define QEDE_RSS_OFFLOAD_ALL (ETH_RSS_IPV4 | ETH_RSS_NONFRAG_IPV4_TCP | \
69 ETH_RSS_NONFRAG_IPV4_UDP)
70
71 #define QEDE_TXQ_FLAGS ((uint32_t)ETH_TXQ_FLAGS_NOMULTSEGS)
72
73 #define MAX_NUM_TC 8
74
75 #define for_each_queue(i) for (i = 0; i < qdev->num_queues; i++)
76
77 /*
78 * RX BD descriptor ring
79 */
80 struct qede_rx_entry {
81 struct rte_mbuf *mbuf;
82 uint32_t page_offset;
83 /* allows expansion .. */
84 };
85
86 /*
87 * Structure associated with each RX queue.
88 */
89 struct qede_rx_queue {
90 struct rte_mempool *mb_pool;
91 struct ecore_chain rx_bd_ring;
92 struct ecore_chain rx_comp_ring;
93 uint16_t *hw_cons_ptr;
94 void OSAL_IOMEM *hw_rxq_prod_addr;
95 struct qede_rx_entry *sw_rx_ring;
96 uint16_t sw_rx_cons;
97 uint16_t sw_rx_prod;
98 uint16_t nb_rx_desc;
99 uint16_t queue_id;
100 uint16_t port_id;
101 uint16_t rx_buf_size;
102 uint64_t rcv_pkts;
103 uint64_t rx_segs;
104 uint64_t rx_hw_errors;
105 uint64_t rx_alloc_errors;
106 struct qede_dev *qdev;
107 };
108
109 /*
110 * TX BD descriptor ring
111 */
112 struct qede_tx_entry {
113 struct rte_mbuf *mbuf;
114 uint8_t flags;
115 };
116
117 union db_prod {
118 struct eth_db_data data;
119 uint32_t raw;
120 };
121
122 struct qede_tx_queue {
123 struct ecore_chain tx_pbl;
124 struct qede_tx_entry *sw_tx_ring;
125 uint16_t nb_tx_desc;
126 uint16_t nb_tx_avail;
127 uint16_t tx_free_thresh;
128 uint16_t queue_id;
129 uint16_t *hw_cons_ptr;
130 uint16_t sw_tx_cons;
131 uint16_t sw_tx_prod;
132 void OSAL_IOMEM *doorbell_addr;
133 volatile union db_prod tx_db;
134 uint16_t port_id;
135 uint64_t xmit_pkts;
136 struct qede_dev *qdev;
137 };
138
139 struct qede_fastpath {
140 struct qede_dev *qdev;
141 u8 type;
142 uint8_t id;
143 struct ecore_sb_info *sb_info;
144 struct qede_rx_queue *rxq;
145 struct qede_tx_queue *txqs[MAX_NUM_TC];
146 char name[80];
147 };
148
149 /*
150 * RX/TX function prototypes
151 */
152 int qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
153 uint16_t nb_desc, unsigned int socket_id,
154 const struct rte_eth_rxconf *rx_conf,
155 struct rte_mempool *mp);
156
157 int qede_tx_queue_setup(struct rte_eth_dev *dev,
158 uint16_t queue_idx,
159 uint16_t nb_desc,
160 unsigned int socket_id,
161 const struct rte_eth_txconf *tx_conf);
162
163 void qede_rx_queue_release(void *rx_queue);
164
165 void qede_tx_queue_release(void *tx_queue);
166
167 int qede_dev_start(struct rte_eth_dev *eth_dev);
168
169 void qede_dev_stop(struct rte_eth_dev *eth_dev);
170
171 int qede_reset_fp_rings(struct qede_dev *qdev);
172
173 void qede_free_fp_arrays(struct qede_dev *qdev);
174
175 void qede_free_mem_load(struct rte_eth_dev *eth_dev);
176
177 uint16_t qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts,
178 uint16_t nb_pkts);
179
180 uint16_t qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts,
181 uint16_t nb_pkts);
182
183 /* Fastpath resource alloc/dealloc helpers */
184 int qede_alloc_fp_resc(struct qede_dev *qdev);
185
186 void qede_dealloc_fp_resc(struct rte_eth_dev *eth_dev);
187
188 #endif /* _QEDE_RXTX_H_ */