]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/dpdk/drivers/net/enic/enic_rxtx_common.h
update source to Ceph Pacific 16.2.2
[ceph.git] / ceph / src / spdk / dpdk / drivers / net / enic / enic_rxtx_common.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2008-2018 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 */
5
6 #ifndef _ENIC_RXTX_COMMON_H_
7 #define _ENIC_RXTX_COMMON_H_
8
9 #include <rte_byteorder.h>
10
11 static inline uint16_t
12 enic_cq_rx_desc_ciflags(struct cq_enet_rq_desc *crd)
13 {
14 return rte_le_to_cpu_16(crd->completed_index_flags) &
15 ~CQ_DESC_COMP_NDX_MASK;
16 }
17
18 static inline uint16_t
19 enic_cq_rx_desc_bwflags(struct cq_enet_rq_desc *crd)
20 {
21 return rte_le_to_cpu_16(crd->bytes_written_flags) &
22 ~CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
23 }
24
25 static inline uint8_t
26 enic_cq_rx_desc_packet_error(uint16_t bwflags)
27 {
28 return (bwflags & CQ_ENET_RQ_DESC_FLAGS_TRUNCATED) ==
29 CQ_ENET_RQ_DESC_FLAGS_TRUNCATED;
30 }
31
32 static inline uint8_t
33 enic_cq_rx_desc_eop(uint16_t ciflags)
34 {
35 return (ciflags & CQ_ENET_RQ_DESC_FLAGS_EOP)
36 == CQ_ENET_RQ_DESC_FLAGS_EOP;
37 }
38
39 static inline uint8_t
40 enic_cq_rx_desc_csum_not_calc(struct cq_enet_rq_desc *cqrd)
41 {
42 return (rte_le_to_cpu_16(cqrd->q_number_rss_type_flags) &
43 CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) ==
44 CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC;
45 }
46
47 static inline uint8_t
48 enic_cq_rx_desc_ipv4_csum_ok(struct cq_enet_rq_desc *cqrd)
49 {
50 return (cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) ==
51 CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK;
52 }
53
54 static inline uint8_t
55 enic_cq_rx_desc_tcp_udp_csum_ok(struct cq_enet_rq_desc *cqrd)
56 {
57 return (cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) ==
58 CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK;
59 }
60
61 static inline uint8_t
62 enic_cq_rx_desc_rss_type(struct cq_enet_rq_desc *cqrd)
63 {
64 return (uint8_t)((rte_le_to_cpu_16(cqrd->q_number_rss_type_flags) >>
65 CQ_DESC_Q_NUM_BITS) & CQ_ENET_RQ_DESC_RSS_TYPE_MASK);
66 }
67
68 static inline uint32_t
69 enic_cq_rx_desc_rss_hash(struct cq_enet_rq_desc *cqrd)
70 {
71 return rte_le_to_cpu_32(cqrd->rss_hash);
72 }
73
74 static inline uint16_t
75 enic_cq_rx_desc_vlan(struct cq_enet_rq_desc *cqrd)
76 {
77 return rte_le_to_cpu_16(cqrd->vlan);
78 }
79
80 static inline uint16_t
81 enic_cq_rx_desc_n_bytes(struct cq_desc *cqd)
82 {
83 struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
84 return rte_le_to_cpu_16(cqrd->bytes_written_flags) &
85 CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
86 }
87
88
89 static inline uint8_t
90 enic_cq_rx_check_err(struct cq_desc *cqd)
91 {
92 struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
93 uint16_t bwflags;
94
95 bwflags = enic_cq_rx_desc_bwflags(cqrd);
96 if (unlikely(enic_cq_rx_desc_packet_error(bwflags)))
97 return 1;
98 return 0;
99 }
100
101 /* Lookup table to translate RX CQ flags to mbuf flags. */
102 static uint32_t
103 enic_cq_rx_flags_to_pkt_type(struct cq_desc *cqd, uint8_t tnl)
104 {
105 struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
106 uint8_t cqrd_flags = cqrd->flags;
107 /*
108 * Odd-numbered entries are for tunnel packets. All packet type info
109 * applies to the inner packet, and there is no info on the outer
110 * packet. The outer flags in these entries exist only to avoid
111 * changing enic_cq_rx_to_pkt_flags(). They are cleared from mbuf
112 * afterwards.
113 *
114 * Also, as there is no tunnel type info (VXLAN, NVGRE, or GENEVE), set
115 * RTE_PTYPE_TUNNEL_GRENAT..
116 */
117 static const uint32_t cq_type_table[128] __rte_cache_aligned = {
118 [0x00] = RTE_PTYPE_UNKNOWN,
119 [0x01] = RTE_PTYPE_UNKNOWN |
120 RTE_PTYPE_TUNNEL_GRENAT |
121 RTE_PTYPE_INNER_L2_ETHER,
122 [0x20] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG,
123 [0x21] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG |
124 RTE_PTYPE_TUNNEL_GRENAT |
125 RTE_PTYPE_INNER_L2_ETHER |
126 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
127 RTE_PTYPE_INNER_L4_NONFRAG,
128 [0x22] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP,
129 [0x23] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
130 RTE_PTYPE_TUNNEL_GRENAT |
131 RTE_PTYPE_INNER_L2_ETHER |
132 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
133 RTE_PTYPE_INNER_L4_UDP,
134 [0x24] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_TCP,
135 [0x25] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_TCP |
136 RTE_PTYPE_TUNNEL_GRENAT |
137 RTE_PTYPE_INNER_L2_ETHER |
138 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
139 RTE_PTYPE_INNER_L4_TCP,
140 [0x60] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
141 [0x61] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |
142 RTE_PTYPE_TUNNEL_GRENAT |
143 RTE_PTYPE_INNER_L2_ETHER |
144 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
145 RTE_PTYPE_INNER_L4_FRAG,
146 [0x62] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
147 [0x63] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |
148 RTE_PTYPE_TUNNEL_GRENAT |
149 RTE_PTYPE_INNER_L2_ETHER |
150 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
151 RTE_PTYPE_INNER_L4_FRAG,
152 [0x64] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
153 [0x65] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |
154 RTE_PTYPE_TUNNEL_GRENAT |
155 RTE_PTYPE_INNER_L2_ETHER |
156 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
157 RTE_PTYPE_INNER_L4_FRAG,
158 [0x10] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG,
159 [0x11] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG |
160 RTE_PTYPE_TUNNEL_GRENAT |
161 RTE_PTYPE_INNER_L2_ETHER |
162 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
163 RTE_PTYPE_INNER_L4_NONFRAG,
164 [0x12] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_UDP,
165 [0x13] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
166 RTE_PTYPE_TUNNEL_GRENAT |
167 RTE_PTYPE_INNER_L2_ETHER |
168 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
169 RTE_PTYPE_INNER_L4_UDP,
170 [0x14] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_TCP,
171 [0x15] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_TCP |
172 RTE_PTYPE_TUNNEL_GRENAT |
173 RTE_PTYPE_INNER_L2_ETHER |
174 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
175 RTE_PTYPE_INNER_L4_TCP,
176 [0x50] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
177 [0x51] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |
178 RTE_PTYPE_TUNNEL_GRENAT |
179 RTE_PTYPE_INNER_L2_ETHER |
180 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
181 RTE_PTYPE_INNER_L4_FRAG,
182 [0x52] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
183 [0x53] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |
184 RTE_PTYPE_TUNNEL_GRENAT |
185 RTE_PTYPE_INNER_L2_ETHER |
186 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
187 RTE_PTYPE_INNER_L4_FRAG,
188 [0x54] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
189 [0x55] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |
190 RTE_PTYPE_TUNNEL_GRENAT |
191 RTE_PTYPE_INNER_L2_ETHER |
192 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
193 RTE_PTYPE_INNER_L4_FRAG,
194 /* All others reserved */
195 };
196 cqrd_flags &= CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT
197 | CQ_ENET_RQ_DESC_FLAGS_IPV4 | CQ_ENET_RQ_DESC_FLAGS_IPV6
198 | CQ_ENET_RQ_DESC_FLAGS_TCP | CQ_ENET_RQ_DESC_FLAGS_UDP;
199 return cq_type_table[cqrd_flags + tnl];
200 }
201
202 static void
203 enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct rte_mbuf *mbuf)
204 {
205 struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
206 uint16_t bwflags, pkt_flags = 0, vlan_tci;
207 bwflags = enic_cq_rx_desc_bwflags(cqrd);
208 vlan_tci = enic_cq_rx_desc_vlan(cqrd);
209
210 /* VLAN STRIPPED flag. The L2 packet type updated here also */
211 if (bwflags & CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) {
212 pkt_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
213 mbuf->packet_type |= RTE_PTYPE_L2_ETHER;
214 } else {
215 if (vlan_tci != 0) {
216 pkt_flags |= PKT_RX_VLAN;
217 mbuf->packet_type |= RTE_PTYPE_L2_ETHER_VLAN;
218 } else {
219 mbuf->packet_type |= RTE_PTYPE_L2_ETHER;
220 }
221 }
222 mbuf->vlan_tci = vlan_tci;
223
224 if ((cqd->type_color & CQ_DESC_TYPE_MASK) == CQ_DESC_TYPE_CLASSIFIER) {
225 struct cq_enet_rq_clsf_desc *clsf_cqd;
226 uint16_t filter_id;
227 clsf_cqd = (struct cq_enet_rq_clsf_desc *)cqd;
228 filter_id = clsf_cqd->filter_id;
229 if (filter_id) {
230 pkt_flags |= PKT_RX_FDIR;
231 if (filter_id != ENIC_MAGIC_FILTER_ID) {
232 /* filter_id = mark id + 1, so subtract 1 */
233 mbuf->hash.fdir.hi = filter_id - 1;
234 pkt_flags |= PKT_RX_FDIR_ID;
235 }
236 }
237 } else if (enic_cq_rx_desc_rss_type(cqrd)) {
238 /* RSS flag */
239 pkt_flags |= PKT_RX_RSS_HASH;
240 mbuf->hash.rss = enic_cq_rx_desc_rss_hash(cqrd);
241 }
242
243 /* checksum flags */
244 if (mbuf->packet_type & (RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L3_IPV6)) {
245 if (!enic_cq_rx_desc_csum_not_calc(cqrd)) {
246 uint32_t l4_flags;
247 l4_flags = mbuf->packet_type & RTE_PTYPE_L4_MASK;
248
249 /*
250 * When overlay offload is enabled, the NIC may
251 * set ipv4_csum_ok=1 if the inner packet is IPv6..
252 * So, explicitly check for IPv4 before checking
253 * ipv4_csum_ok.
254 */
255 if (mbuf->packet_type & RTE_PTYPE_L3_IPV4) {
256 if (enic_cq_rx_desc_ipv4_csum_ok(cqrd))
257 pkt_flags |= PKT_RX_IP_CKSUM_GOOD;
258 else
259 pkt_flags |= PKT_RX_IP_CKSUM_BAD;
260 }
261
262 if (l4_flags == RTE_PTYPE_L4_UDP ||
263 l4_flags == RTE_PTYPE_L4_TCP) {
264 if (enic_cq_rx_desc_tcp_udp_csum_ok(cqrd))
265 pkt_flags |= PKT_RX_L4_CKSUM_GOOD;
266 else
267 pkt_flags |= PKT_RX_L4_CKSUM_BAD;
268 }
269 }
270 }
271
272 mbuf->ol_flags = pkt_flags;
273 }
274
275 #endif /* _ENIC_RXTX_COMMON_H_ */