]> git.proxmox.com Git - ceph.git/blob - ceph/src/seastar/dpdk/drivers/net/enic/enic_rxtx_common.h
import 15.2.0 Octopus source
[ceph.git] / ceph / src / seastar / dpdk / drivers / net / enic / enic_rxtx_common.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2008-2018 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 */
5
6 #ifndef _ENIC_RXTX_COMMON_H_
7 #define _ENIC_RXTX_COMMON_H_
8
9 static inline uint16_t
10 enic_cq_rx_desc_ciflags(struct cq_enet_rq_desc *crd)
11 {
12 return le16_to_cpu(crd->completed_index_flags) & ~CQ_DESC_COMP_NDX_MASK;
13 }
14
15 static inline uint16_t
16 enic_cq_rx_desc_bwflags(struct cq_enet_rq_desc *crd)
17 {
18 return le16_to_cpu(crd->bytes_written_flags) &
19 ~CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
20 }
21
22 static inline uint8_t
23 enic_cq_rx_desc_packet_error(uint16_t bwflags)
24 {
25 return (bwflags & CQ_ENET_RQ_DESC_FLAGS_TRUNCATED) ==
26 CQ_ENET_RQ_DESC_FLAGS_TRUNCATED;
27 }
28
29 static inline uint8_t
30 enic_cq_rx_desc_eop(uint16_t ciflags)
31 {
32 return (ciflags & CQ_ENET_RQ_DESC_FLAGS_EOP)
33 == CQ_ENET_RQ_DESC_FLAGS_EOP;
34 }
35
36 static inline uint8_t
37 enic_cq_rx_desc_csum_not_calc(struct cq_enet_rq_desc *cqrd)
38 {
39 return (le16_to_cpu(cqrd->q_number_rss_type_flags) &
40 CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) ==
41 CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC;
42 }
43
44 static inline uint8_t
45 enic_cq_rx_desc_ipv4_csum_ok(struct cq_enet_rq_desc *cqrd)
46 {
47 return (cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) ==
48 CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK;
49 }
50
51 static inline uint8_t
52 enic_cq_rx_desc_tcp_udp_csum_ok(struct cq_enet_rq_desc *cqrd)
53 {
54 return (cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) ==
55 CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK;
56 }
57
58 static inline uint8_t
59 enic_cq_rx_desc_rss_type(struct cq_enet_rq_desc *cqrd)
60 {
61 return (uint8_t)((le16_to_cpu(cqrd->q_number_rss_type_flags) >>
62 CQ_DESC_Q_NUM_BITS) & CQ_ENET_RQ_DESC_RSS_TYPE_MASK);
63 }
64
65 static inline uint32_t
66 enic_cq_rx_desc_rss_hash(struct cq_enet_rq_desc *cqrd)
67 {
68 return le32_to_cpu(cqrd->rss_hash);
69 }
70
71 static inline uint16_t
72 enic_cq_rx_desc_vlan(struct cq_enet_rq_desc *cqrd)
73 {
74 return le16_to_cpu(cqrd->vlan);
75 }
76
77 static inline uint16_t
78 enic_cq_rx_desc_n_bytes(struct cq_desc *cqd)
79 {
80 struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
81 return le16_to_cpu(cqrd->bytes_written_flags) &
82 CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
83 }
84
85
86 static inline uint8_t
87 enic_cq_rx_check_err(struct cq_desc *cqd)
88 {
89 struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
90 uint16_t bwflags;
91
92 bwflags = enic_cq_rx_desc_bwflags(cqrd);
93 if (unlikely(enic_cq_rx_desc_packet_error(bwflags)))
94 return 1;
95 return 0;
96 }
97
98 /* Lookup table to translate RX CQ flags to mbuf flags. */
99 static uint32_t
100 enic_cq_rx_flags_to_pkt_type(struct cq_desc *cqd, uint8_t tnl)
101 {
102 struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
103 uint8_t cqrd_flags = cqrd->flags;
104 /*
105 * Odd-numbered entries are for tunnel packets. All packet type info
106 * applies to the inner packet, and there is no info on the outer
107 * packet. The outer flags in these entries exist only to avoid
108 * changing enic_cq_rx_to_pkt_flags(). They are cleared from mbuf
109 * afterwards.
110 *
111 * Also, as there is no tunnel type info (VXLAN, NVGRE, or GENEVE), set
112 * RTE_PTYPE_TUNNEL_GRENAT..
113 */
114 static const uint32_t cq_type_table[128] __rte_cache_aligned = {
115 [0x00] = RTE_PTYPE_UNKNOWN,
116 [0x01] = RTE_PTYPE_UNKNOWN |
117 RTE_PTYPE_TUNNEL_GRENAT |
118 RTE_PTYPE_INNER_L2_ETHER,
119 [0x20] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG,
120 [0x21] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG |
121 RTE_PTYPE_TUNNEL_GRENAT |
122 RTE_PTYPE_INNER_L2_ETHER |
123 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
124 RTE_PTYPE_INNER_L4_NONFRAG,
125 [0x22] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP,
126 [0x23] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
127 RTE_PTYPE_TUNNEL_GRENAT |
128 RTE_PTYPE_INNER_L2_ETHER |
129 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
130 RTE_PTYPE_INNER_L4_UDP,
131 [0x24] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_TCP,
132 [0x25] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_TCP |
133 RTE_PTYPE_TUNNEL_GRENAT |
134 RTE_PTYPE_INNER_L2_ETHER |
135 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
136 RTE_PTYPE_INNER_L4_TCP,
137 [0x60] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
138 [0x61] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |
139 RTE_PTYPE_TUNNEL_GRENAT |
140 RTE_PTYPE_INNER_L2_ETHER |
141 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
142 RTE_PTYPE_INNER_L4_FRAG,
143 [0x62] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
144 [0x63] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |
145 RTE_PTYPE_TUNNEL_GRENAT |
146 RTE_PTYPE_INNER_L2_ETHER |
147 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
148 RTE_PTYPE_INNER_L4_FRAG,
149 [0x64] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
150 [0x65] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |
151 RTE_PTYPE_TUNNEL_GRENAT |
152 RTE_PTYPE_INNER_L2_ETHER |
153 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
154 RTE_PTYPE_INNER_L4_FRAG,
155 [0x10] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG,
156 [0x11] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG |
157 RTE_PTYPE_TUNNEL_GRENAT |
158 RTE_PTYPE_INNER_L2_ETHER |
159 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
160 RTE_PTYPE_INNER_L4_NONFRAG,
161 [0x12] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_UDP,
162 [0x13] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
163 RTE_PTYPE_TUNNEL_GRENAT |
164 RTE_PTYPE_INNER_L2_ETHER |
165 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
166 RTE_PTYPE_INNER_L4_UDP,
167 [0x14] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_TCP,
168 [0x15] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_TCP |
169 RTE_PTYPE_TUNNEL_GRENAT |
170 RTE_PTYPE_INNER_L2_ETHER |
171 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
172 RTE_PTYPE_INNER_L4_TCP,
173 [0x50] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
174 [0x51] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |
175 RTE_PTYPE_TUNNEL_GRENAT |
176 RTE_PTYPE_INNER_L2_ETHER |
177 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
178 RTE_PTYPE_INNER_L4_FRAG,
179 [0x52] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
180 [0x53] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |
181 RTE_PTYPE_TUNNEL_GRENAT |
182 RTE_PTYPE_INNER_L2_ETHER |
183 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
184 RTE_PTYPE_INNER_L4_FRAG,
185 [0x54] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
186 [0x55] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |
187 RTE_PTYPE_TUNNEL_GRENAT |
188 RTE_PTYPE_INNER_L2_ETHER |
189 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
190 RTE_PTYPE_INNER_L4_FRAG,
191 /* All others reserved */
192 };
193 cqrd_flags &= CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT
194 | CQ_ENET_RQ_DESC_FLAGS_IPV4 | CQ_ENET_RQ_DESC_FLAGS_IPV6
195 | CQ_ENET_RQ_DESC_FLAGS_TCP | CQ_ENET_RQ_DESC_FLAGS_UDP;
196 return cq_type_table[cqrd_flags + tnl];
197 }
198
199 static void
200 enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct rte_mbuf *mbuf)
201 {
202 struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
203 uint16_t bwflags, pkt_flags = 0, vlan_tci;
204 bwflags = enic_cq_rx_desc_bwflags(cqrd);
205 vlan_tci = enic_cq_rx_desc_vlan(cqrd);
206
207 /* VLAN STRIPPED flag. The L2 packet type updated here also */
208 if (bwflags & CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) {
209 pkt_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
210 mbuf->packet_type |= RTE_PTYPE_L2_ETHER;
211 } else {
212 if (vlan_tci != 0) {
213 pkt_flags |= PKT_RX_VLAN;
214 mbuf->packet_type |= RTE_PTYPE_L2_ETHER_VLAN;
215 } else {
216 mbuf->packet_type |= RTE_PTYPE_L2_ETHER;
217 }
218 }
219 mbuf->vlan_tci = vlan_tci;
220
221 if ((cqd->type_color & CQ_DESC_TYPE_MASK) == CQ_DESC_TYPE_CLASSIFIER) {
222 struct cq_enet_rq_clsf_desc *clsf_cqd;
223 uint16_t filter_id;
224 clsf_cqd = (struct cq_enet_rq_clsf_desc *)cqd;
225 filter_id = clsf_cqd->filter_id;
226 if (filter_id) {
227 pkt_flags |= PKT_RX_FDIR;
228 if (filter_id != ENIC_MAGIC_FILTER_ID) {
229 /* filter_id = mark id + 1, so subtract 1 */
230 mbuf->hash.fdir.hi = filter_id - 1;
231 pkt_flags |= PKT_RX_FDIR_ID;
232 }
233 }
234 } else if (enic_cq_rx_desc_rss_type(cqrd)) {
235 /* RSS flag */
236 pkt_flags |= PKT_RX_RSS_HASH;
237 mbuf->hash.rss = enic_cq_rx_desc_rss_hash(cqrd);
238 }
239
240 /* checksum flags */
241 if (mbuf->packet_type & (RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L3_IPV6)) {
242 if (!enic_cq_rx_desc_csum_not_calc(cqrd)) {
243 uint32_t l4_flags;
244 l4_flags = mbuf->packet_type & RTE_PTYPE_L4_MASK;
245
246 /*
247 * When overlay offload is enabled, the NIC may
248 * set ipv4_csum_ok=1 if the inner packet is IPv6..
249 * So, explicitly check for IPv4 before checking
250 * ipv4_csum_ok.
251 */
252 if (mbuf->packet_type & RTE_PTYPE_L3_IPV4) {
253 if (enic_cq_rx_desc_ipv4_csum_ok(cqrd))
254 pkt_flags |= PKT_RX_IP_CKSUM_GOOD;
255 else
256 pkt_flags |= PKT_RX_IP_CKSUM_BAD;
257 }
258
259 if (l4_flags == RTE_PTYPE_L4_UDP ||
260 l4_flags == RTE_PTYPE_L4_TCP) {
261 if (enic_cq_rx_desc_tcp_udp_csum_ok(cqrd))
262 pkt_flags |= PKT_RX_L4_CKSUM_GOOD;
263 else
264 pkt_flags |= PKT_RX_L4_CKSUM_BAD;
265 }
266 }
267 }
268
269 mbuf->ol_flags = pkt_flags;
270 }
271
272 #endif /* _ENIC_RXTX_COMMON_H_ */