]> git.proxmox.com Git - ceph.git/blame - ceph/src/seastar/dpdk/drivers/net/qede/qede_rxtx.h
import 15.2.0 Octopus source
[ceph.git] / ceph / src / seastar / dpdk / drivers / net / qede / qede_rxtx.h
CommitLineData
9f95a23c
TL
1/* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2016 - 2018 Cavium Inc.
7c673cae 3 * All rights reserved.
9f95a23c 4 * www.cavium.com
7c673cae
FG
5 */
6
7
8#ifndef _QEDE_RXTX_H_
9#define _QEDE_RXTX_H_
10
11#include "qede_ethdev.h"
12
13/* Ring Descriptors */
14#define RX_RING_SIZE_POW 16 /* 64K */
15#define RX_RING_SIZE (1ULL << RX_RING_SIZE_POW)
16#define NUM_RX_BDS_MAX (RX_RING_SIZE - 1)
17#define NUM_RX_BDS_MIN 128
18#define NUM_RX_BDS_DEF NUM_RX_BDS_MAX
19#define NUM_RX_BDS(q) (q->nb_rx_desc - 1)
20
21#define TX_RING_SIZE_POW 16 /* 64K */
22#define TX_RING_SIZE (1ULL << TX_RING_SIZE_POW)
23#define NUM_TX_BDS_MAX (TX_RING_SIZE - 1)
24#define NUM_TX_BDS_MIN 128
25#define NUM_TX_BDS_DEF NUM_TX_BDS_MAX
26#define NUM_TX_BDS(q) (q->nb_tx_desc - 1)
27
28#define TX_CONS(txq) (txq->sw_tx_cons & NUM_TX_BDS(txq))
29#define TX_PROD(txq) (txq->sw_tx_prod & NUM_TX_BDS(txq))
30
31#define QEDE_DEFAULT_TX_FREE_THRESH 32
32
33#define QEDE_CSUM_ERROR (1 << 0)
34#define QEDE_CSUM_UNNECESSARY (1 << 1)
35#define QEDE_TUNN_CSUM_UNNECESSARY (1 << 2)
36
37#define QEDE_BD_SET_ADDR_LEN(bd, maddr, len) \
38 do { \
39 (bd)->addr.hi = rte_cpu_to_le_32(U64_HI(maddr)); \
40 (bd)->addr.lo = rte_cpu_to_le_32(U64_LO(maddr)); \
41 (bd)->nbytes = rte_cpu_to_le_16(len); \
7c673cae
FG
42 } while (0)
43
44#define CQE_HAS_VLAN(flags) \
45 ((flags) & (PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK \
46 << PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT))
47
48#define CQE_HAS_OUTER_VLAN(flags) \
49 ((flags) & (PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_MASK \
50 << PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_SHIFT))
51
11fdf7f2
TL
52#define QEDE_MIN_RX_BUFF_SIZE (1024)
53#define QEDE_VLAN_TAG_SIZE (4)
54#define QEDE_LLC_SNAP_HDR_LEN (8)
55
7c673cae
FG
56/* Max supported alignment is 256 (8 shift)
57 * minimal alignment shift 6 is optimal for 57xxx HW performance
58 */
59#define QEDE_L1_CACHE_SHIFT 6
60#define QEDE_RX_ALIGN_SHIFT (RTE_MAX(6, RTE_MIN(8, QEDE_L1_CACHE_SHIFT)))
61#define QEDE_FW_RX_ALIGN_END (1UL << QEDE_RX_ALIGN_SHIFT)
11fdf7f2
TL
62#define QEDE_CEIL_TO_CACHE_LINE_SIZE(n) (((n) + (QEDE_FW_RX_ALIGN_END - 1)) & \
63 ~(QEDE_FW_RX_ALIGN_END - 1))
9f95a23c
TL
64#define QEDE_FLOOR_TO_CACHE_LINE_SIZE(n) RTE_ALIGN_FLOOR(n, \
65 QEDE_FW_RX_ALIGN_END)
66
67/* Note: QEDE_LLC_SNAP_HDR_LEN is optional,
68 * +2 is for padding in front of L2 header
69 */
70#define QEDE_ETH_OVERHEAD (((2 * QEDE_VLAN_TAG_SIZE)) \
71 + (QEDE_LLC_SNAP_HDR_LEN) + 2)
72
73#define QEDE_MAX_ETHER_HDR_LEN (ETHER_HDR_LEN + QEDE_ETH_OVERHEAD)
11fdf7f2
TL
74
75#define QEDE_RSS_OFFLOAD_ALL (ETH_RSS_IPV4 |\
76 ETH_RSS_NONFRAG_IPV4_TCP |\
77 ETH_RSS_NONFRAG_IPV4_UDP |\
78 ETH_RSS_IPV6 |\
79 ETH_RSS_NONFRAG_IPV6_TCP |\
80 ETH_RSS_NONFRAG_IPV6_UDP |\
9f95a23c
TL
81 ETH_RSS_VXLAN |\
82 ETH_RSS_GENEVE)
7c673cae 83
9f95a23c
TL
84#define for_each_rss(i) for (i = 0; i < qdev->num_rx_queues; i++)
85#define for_each_tss(i) for (i = 0; i < qdev->num_tx_queues; i++)
86#define QEDE_RXTX_MAX(qdev) \
87 (RTE_MAX(QEDE_RSS_COUNT(qdev), QEDE_TSS_COUNT(qdev)))
11fdf7f2
TL
88
89/* Macros for non-tunnel packet types lkup table */
90#define QEDE_PKT_TYPE_UNKNOWN 0x0
9f95a23c
TL
91#define QEDE_PKT_TYPE_MAX 0x3f
92
11fdf7f2
TL
93#define QEDE_PKT_TYPE_IPV4 0x1
94#define QEDE_PKT_TYPE_IPV6 0x2
95#define QEDE_PKT_TYPE_IPV4_TCP 0x5
96#define QEDE_PKT_TYPE_IPV6_TCP 0x6
97#define QEDE_PKT_TYPE_IPV4_UDP 0x9
98#define QEDE_PKT_TYPE_IPV6_UDP 0xa
99
9f95a23c
TL
100/* For frag pkts, corresponding IP bits is set */
101#define QEDE_PKT_TYPE_IPV4_FRAG 0x11
102#define QEDE_PKT_TYPE_IPV6_FRAG 0x12
103
104#define QEDE_PKT_TYPE_IPV4_VLAN 0x21
105#define QEDE_PKT_TYPE_IPV6_VLAN 0x22
106#define QEDE_PKT_TYPE_IPV4_TCP_VLAN 0x25
107#define QEDE_PKT_TYPE_IPV6_TCP_VLAN 0x26
108#define QEDE_PKT_TYPE_IPV4_UDP_VLAN 0x29
109#define QEDE_PKT_TYPE_IPV6_UDP_VLAN 0x2a
110
111#define QEDE_PKT_TYPE_IPV4_VLAN_FRAG 0x31
112#define QEDE_PKT_TYPE_IPV6_VLAN_FRAG 0x32
113
11fdf7f2
TL
114/* Macros for tunneled packets with next protocol lkup table */
115#define QEDE_PKT_TYPE_TUNN_GENEVE 0x1
116#define QEDE_PKT_TYPE_TUNN_GRE 0x2
117#define QEDE_PKT_TYPE_TUNN_VXLAN 0x3
118
119/* Bit 2 is don't care bit */
120#define QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_GENEVE 0x9
9f95a23c 121#define QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_GRE 0xa
11fdf7f2
TL
122#define QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_VXLAN 0xb
123
124#define QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_GENEVE 0xd
125#define QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_GRE 0xe
9f95a23c 126#define QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_VXLAN 0xf
11fdf7f2
TL
127
128
129#define QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_GENEVE 0x11
130#define QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_GRE 0x12
131#define QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_VXLAN 0x13
132
133#define QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_GENEVE 0x15
9f95a23c 134#define QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_GRE 0x16
11fdf7f2
TL
135#define QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_VXLAN 0x17
136
137
138#define QEDE_PKT_TYPE_TUNN_IPV6_TENID_NOEXIST_GENEVE 0x19
139#define QEDE_PKT_TYPE_TUNN_IPV6_TENID_NOEXIST_GRE 0x1a
140#define QEDE_PKT_TYPE_TUNN_IPV6_TENID_NOEXIST_VXLAN 0x1b
141
142#define QEDE_PKT_TYPE_TUNN_IPV6_TENID_EXIST_GENEVE 0x1d
143#define QEDE_PKT_TYPE_TUNN_IPV6_TENID_EXIST_GRE 0x1e
144#define QEDE_PKT_TYPE_TUNN_IPV6_TENID_EXIST_VXLAN 0x1f
145
146#define QEDE_PKT_TYPE_TUNN_MAX_TYPE 0x20 /* 2^5 */
147
148#define QEDE_TX_CSUM_OFFLOAD_MASK (PKT_TX_IP_CKSUM | \
149 PKT_TX_TCP_CKSUM | \
150 PKT_TX_UDP_CKSUM | \
151 PKT_TX_OUTER_IP_CKSUM | \
9f95a23c
TL
152 PKT_TX_TCP_SEG | \
153 PKT_TX_IPV4 | \
154 PKT_TX_IPV6)
11fdf7f2
TL
155
156#define QEDE_TX_OFFLOAD_MASK (QEDE_TX_CSUM_OFFLOAD_MASK | \
9f95a23c
TL
157 PKT_TX_VLAN_PKT | \
158 PKT_TX_TUNNEL_MASK)
11fdf7f2
TL
159
160#define QEDE_TX_OFFLOAD_NOTSUP_MASK \
161 (PKT_TX_OFFLOAD_MASK ^ QEDE_TX_OFFLOAD_MASK)
162
7c673cae
FG
163/*
164 * RX BD descriptor ring
165 */
166struct qede_rx_entry {
167 struct rte_mbuf *mbuf;
168 uint32_t page_offset;
169 /* allows expansion .. */
170};
171
11fdf7f2
TL
172/* TPA related structures */
173struct qede_agg_info {
174 struct rte_mbuf *tpa_head; /* Pointer to first TPA segment */
175 struct rte_mbuf *tpa_tail; /* Pointer to last TPA segment */
176};
177
7c673cae
FG
178/*
179 * Structure associated with each RX queue.
180 */
181struct qede_rx_queue {
182 struct rte_mempool *mb_pool;
183 struct ecore_chain rx_bd_ring;
184 struct ecore_chain rx_comp_ring;
185 uint16_t *hw_cons_ptr;
186 void OSAL_IOMEM *hw_rxq_prod_addr;
187 struct qede_rx_entry *sw_rx_ring;
9f95a23c 188 struct ecore_sb_info *sb_info;
7c673cae
FG
189 uint16_t sw_rx_cons;
190 uint16_t sw_rx_prod;
191 uint16_t nb_rx_desc;
192 uint16_t queue_id;
193 uint16_t port_id;
194 uint16_t rx_buf_size;
9f95a23c
TL
195 uint16_t rx_alloc_count;
196 uint16_t unused;
7c673cae
FG
197 uint64_t rcv_pkts;
198 uint64_t rx_segs;
199 uint64_t rx_hw_errors;
200 uint64_t rx_alloc_errors;
11fdf7f2 201 struct qede_agg_info tpa_info[ETH_TPA_MAX_AGGS_NUM];
7c673cae 202 struct qede_dev *qdev;
11fdf7f2 203 void *handle;
7c673cae
FG
204};
205
206/*
207 * TX BD descriptor ring
208 */
209struct qede_tx_entry {
210 struct rte_mbuf *mbuf;
211 uint8_t flags;
212};
213
214union db_prod {
215 struct eth_db_data data;
216 uint32_t raw;
217};
218
219struct qede_tx_queue {
220 struct ecore_chain tx_pbl;
221 struct qede_tx_entry *sw_tx_ring;
222 uint16_t nb_tx_desc;
223 uint16_t nb_tx_avail;
224 uint16_t tx_free_thresh;
225 uint16_t queue_id;
226 uint16_t *hw_cons_ptr;
227 uint16_t sw_tx_cons;
228 uint16_t sw_tx_prod;
229 void OSAL_IOMEM *doorbell_addr;
230 volatile union db_prod tx_db;
231 uint16_t port_id;
232 uint64_t xmit_pkts;
11fdf7f2 233 bool is_legacy;
7c673cae 234 struct qede_dev *qdev;
11fdf7f2 235 void *handle;
7c673cae
FG
236};
237
238struct qede_fastpath {
7c673cae
FG
239 struct ecore_sb_info *sb_info;
240 struct qede_rx_queue *rxq;
9f95a23c 241 struct qede_tx_queue *txq;
7c673cae
FG
242};
243
244/*
245 * RX/TX function prototypes
246 */
247int qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
248 uint16_t nb_desc, unsigned int socket_id,
249 const struct rte_eth_rxconf *rx_conf,
250 struct rte_mempool *mp);
251
252int qede_tx_queue_setup(struct rte_eth_dev *dev,
253 uint16_t queue_idx,
254 uint16_t nb_desc,
255 unsigned int socket_id,
256 const struct rte_eth_txconf *tx_conf);
257
258void qede_rx_queue_release(void *rx_queue);
259
260void qede_tx_queue_release(void *tx_queue);
261
7c673cae
FG
262uint16_t qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts,
263 uint16_t nb_pkts);
264
11fdf7f2
TL
265uint16_t qede_xmit_prep_pkts(void *p_txq, struct rte_mbuf **tx_pkts,
266 uint16_t nb_pkts);
267
7c673cae
FG
268uint16_t qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts,
269 uint16_t nb_pkts);
270
9f95a23c
TL
271uint16_t qede_rxtx_pkts_dummy(void *p_rxq,
272 struct rte_mbuf **pkts,
273 uint16_t nb_pkts);
274
275int qede_start_queues(struct rte_eth_dev *eth_dev);
276
277void qede_stop_queues(struct rte_eth_dev *eth_dev);
278int qede_calc_rx_buf_size(struct rte_eth_dev *dev, uint16_t mbufsz,
279 uint16_t max_frame_size);
280int
281qede_rx_descriptor_status(void *rxq, uint16_t offset);
11fdf7f2 282
7c673cae
FG
283/* Fastpath resource alloc/dealloc helpers */
284int qede_alloc_fp_resc(struct qede_dev *qdev);
285
286void qede_dealloc_fp_resc(struct rte_eth_dev *eth_dev);
287
288#endif /* _QEDE_RXTX_H_ */