]> git.proxmox.com Git - mirror_ovs.git/blob - lib/dp-packet.h
python: Update build system to ensure dirs.py is created.
[mirror_ovs.git] / lib / dp-packet.h
1 /*
2 * Copyright (c) 2008, 2009, 2010, 2011, 2012, 2013 Nicira, Inc.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #ifndef DPBUF_H
18 #define DPBUF_H 1
19
20 #include <stddef.h>
21 #include <stdint.h>
22
23 #ifdef DPDK_NETDEV
24 #include <rte_config.h>
25 #include <rte_mbuf.h>
26 #endif
27
28 #include "netdev-afxdp.h"
29 #include "netdev-dpdk.h"
30 #include "openvswitch/list.h"
31 #include "packets.h"
32 #include "util.h"
33 #include "flow.h"
34
35 #ifdef __cplusplus
36 extern "C" {
37 #endif
38
39 enum OVS_PACKED_ENUM dp_packet_source {
40 DPBUF_MALLOC, /* Obtained via malloc(). */
41 DPBUF_STACK, /* Un-movable stack space or static buffer. */
42 DPBUF_STUB, /* Starts on stack, may expand into heap. */
43 DPBUF_DPDK, /* buffer data is from DPDK allocated memory.
44 * ref to dp_packet_init_dpdk() in dp-packet.c.
45 */
46 DPBUF_AFXDP, /* Buffer data from XDP frame. */
47 };
48
49 #define DP_PACKET_CONTEXT_SIZE 64
50
51 #ifdef DPDK_NETDEV
52 #define DEF_OL_FLAG(NAME, DPDK_DEF, GENERIC_DEF) NAME = DPDK_DEF
53 #else
54 #define DEF_OL_FLAG(NAME, DPDK_DEF, GENERIC_DEF) NAME = GENERIC_DEF
55 #endif
56
57 /* Bit masks for the 'ol_flags' member of the 'dp_packet' structure. */
58 enum dp_packet_offload_mask {
59 /* Value 0 is not used. */
60 /* Is the 'rss_hash' valid? */
61 DEF_OL_FLAG(DP_PACKET_OL_RSS_HASH, PKT_RX_RSS_HASH, 0x1),
62 /* Is the 'flow_mark' valid? */
63 DEF_OL_FLAG(DP_PACKET_OL_FLOW_MARK, PKT_RX_FDIR_ID, 0x2),
64 /* Bad L4 checksum in the packet. */
65 DEF_OL_FLAG(DP_PACKET_OL_RX_L4_CKSUM_BAD, PKT_RX_L4_CKSUM_BAD, 0x4),
66 /* Bad IP checksum in the packet. */
67 DEF_OL_FLAG(DP_PACKET_OL_RX_IP_CKSUM_BAD, PKT_RX_IP_CKSUM_BAD, 0x8),
68 /* Valid L4 checksum in the packet. */
69 DEF_OL_FLAG(DP_PACKET_OL_RX_L4_CKSUM_GOOD, PKT_RX_L4_CKSUM_GOOD, 0x10),
70 /* Valid IP checksum in the packet. */
71 DEF_OL_FLAG(DP_PACKET_OL_RX_IP_CKSUM_GOOD, PKT_RX_IP_CKSUM_GOOD, 0x20),
72 /* TCP Segmentation Offload. */
73 DEF_OL_FLAG(DP_PACKET_OL_TX_TCP_SEG, PKT_TX_TCP_SEG, 0x40),
74 /* Offloaded packet is IPv4. */
75 DEF_OL_FLAG(DP_PACKET_OL_TX_IPV4, PKT_TX_IPV4, 0x80),
76 /* Offloaded packet is IPv6. */
77 DEF_OL_FLAG(DP_PACKET_OL_TX_IPV6, PKT_TX_IPV6, 0x100),
78 /* Offload TCP checksum. */
79 DEF_OL_FLAG(DP_PACKET_OL_TX_TCP_CKSUM, PKT_TX_TCP_CKSUM, 0x200),
80 /* Offload UDP checksum. */
81 DEF_OL_FLAG(DP_PACKET_OL_TX_UDP_CKSUM, PKT_TX_UDP_CKSUM, 0x400),
82 /* Offload SCTP checksum. */
83 DEF_OL_FLAG(DP_PACKET_OL_TX_SCTP_CKSUM, PKT_TX_SCTP_CKSUM, 0x800),
84 /* Adding new field requires adding to DP_PACKET_OL_SUPPORTED_MASK. */
85 };
86
87 #define DP_PACKET_OL_SUPPORTED_MASK (DP_PACKET_OL_RSS_HASH | \
88 DP_PACKET_OL_FLOW_MARK | \
89 DP_PACKET_OL_RX_L4_CKSUM_BAD | \
90 DP_PACKET_OL_RX_IP_CKSUM_BAD | \
91 DP_PACKET_OL_RX_L4_CKSUM_GOOD | \
92 DP_PACKET_OL_RX_IP_CKSUM_GOOD | \
93 DP_PACKET_OL_TX_TCP_SEG | \
94 DP_PACKET_OL_TX_IPV4 | \
95 DP_PACKET_OL_TX_IPV6 | \
96 DP_PACKET_OL_TX_TCP_CKSUM | \
97 DP_PACKET_OL_TX_UDP_CKSUM | \
98 DP_PACKET_OL_TX_SCTP_CKSUM)
99
100 #define DP_PACKET_OL_TX_L4_MASK (DP_PACKET_OL_TX_TCP_CKSUM | \
101 DP_PACKET_OL_TX_UDP_CKSUM | \
102 DP_PACKET_OL_TX_SCTP_CKSUM)
103 #define DP_PACKET_OL_RX_IP_CKSUM_MASK (DP_PACKET_OL_RX_IP_CKSUM_GOOD | \
104 DP_PACKET_OL_RX_IP_CKSUM_BAD)
105 #define DP_PACKET_OL_RX_L4_CKSUM_MASK (DP_PACKET_OL_RX_L4_CKSUM_GOOD | \
106 DP_PACKET_OL_RX_L4_CKSUM_BAD)
107
108 /* Buffer for holding packet data. A dp_packet is automatically reallocated
109 * as necessary if it grows too large for the available memory.
110 * By default the packet type is set to Ethernet (PT_ETH).
111 */
112 struct dp_packet {
113 #ifdef DPDK_NETDEV
114 struct rte_mbuf mbuf; /* DPDK mbuf */
115 #else
116 void *base_; /* First byte of allocated space. */
117 uint16_t allocated_; /* Number of bytes allocated. */
118 uint16_t data_ofs; /* First byte actually in use. */
119 uint32_t size_; /* Number of bytes in use. */
120 uint32_t ol_flags; /* Offloading flags. */
121 uint32_t rss_hash; /* Packet hash. */
122 uint32_t flow_mark; /* Packet flow mark. */
123 #endif
124 enum dp_packet_source source; /* Source of memory allocated as 'base'. */
125
126 /* All the following elements of this struct are copied in a single call
127 * of memcpy in dp_packet_clone_with_headroom. */
128 uint8_t l2_pad_size; /* Detected l2 padding size.
129 * Padding is non-pullable. */
130 uint16_t l2_5_ofs; /* MPLS label stack offset, or UINT16_MAX */
131 uint16_t l3_ofs; /* Network-level header offset,
132 * or UINT16_MAX. */
133 uint16_t l4_ofs; /* Transport-level header offset,
134 or UINT16_MAX. */
135 uint32_t cutlen; /* length in bytes to cut from the end. */
136 ovs_be32 packet_type; /* Packet type as defined in OpenFlow */
137 union {
138 struct pkt_metadata md;
139 uint64_t data[DP_PACKET_CONTEXT_SIZE / 8];
140 };
141 };
142
143 #if HAVE_AF_XDP
144 struct dp_packet_afxdp {
145 struct umem_pool *mpool;
146 struct dp_packet packet;
147 };
148 #endif
149
150 static inline void *dp_packet_data(const struct dp_packet *);
151 static inline void dp_packet_set_data(struct dp_packet *, void *);
152 static inline void *dp_packet_base(const struct dp_packet *);
153 static inline void dp_packet_set_base(struct dp_packet *, void *);
154
155 static inline uint32_t dp_packet_size(const struct dp_packet *);
156 static inline void dp_packet_set_size(struct dp_packet *, uint32_t);
157
158 static inline uint16_t dp_packet_get_allocated(const struct dp_packet *);
159 static inline void dp_packet_set_allocated(struct dp_packet *, uint16_t);
160
161 void *dp_packet_resize_l2(struct dp_packet *, int increment);
162 void *dp_packet_resize_l2_5(struct dp_packet *, int increment);
163 static inline void *dp_packet_eth(const struct dp_packet *);
164 static inline void dp_packet_reset_offsets(struct dp_packet *);
165 static inline uint8_t dp_packet_l2_pad_size(const struct dp_packet *);
166 static inline void dp_packet_set_l2_pad_size(struct dp_packet *, uint8_t);
167 static inline void *dp_packet_l2_5(const struct dp_packet *);
168 static inline void dp_packet_set_l2_5(struct dp_packet *, void *);
169 static inline void *dp_packet_l3(const struct dp_packet *);
170 static inline void dp_packet_set_l3(struct dp_packet *, void *);
171 static inline void *dp_packet_l4(const struct dp_packet *);
172 static inline void dp_packet_set_l4(struct dp_packet *, void *);
173 static inline size_t dp_packet_l4_size(const struct dp_packet *);
174 static inline const void *dp_packet_get_tcp_payload(const struct dp_packet *);
175 static inline const void *dp_packet_get_udp_payload(const struct dp_packet *);
176 static inline const void *dp_packet_get_sctp_payload(const struct dp_packet *);
177 static inline const void *dp_packet_get_icmp_payload(const struct dp_packet *);
178 static inline const void *dp_packet_get_nd_payload(const struct dp_packet *);
179
180 void dp_packet_use(struct dp_packet *, void *, size_t);
181 void dp_packet_use_stub(struct dp_packet *, void *, size_t);
182 void dp_packet_use_const(struct dp_packet *, const void *, size_t);
183 #if HAVE_AF_XDP
184 void dp_packet_use_afxdp(struct dp_packet *, void *, size_t, size_t);
185 #endif
186 void dp_packet_init_dpdk(struct dp_packet *);
187
188 void dp_packet_init(struct dp_packet *, size_t);
189 void dp_packet_uninit(struct dp_packet *);
190
191 struct dp_packet *dp_packet_new(size_t);
192 struct dp_packet *dp_packet_new_with_headroom(size_t, size_t headroom);
193 struct dp_packet *dp_packet_clone(const struct dp_packet *);
194 struct dp_packet *dp_packet_clone_with_headroom(const struct dp_packet *,
195 size_t headroom);
196 struct dp_packet *dp_packet_clone_data(const void *, size_t);
197 struct dp_packet *dp_packet_clone_data_with_headroom(const void *, size_t,
198 size_t headroom);
199 void dp_packet_resize(struct dp_packet *b, size_t new_headroom,
200 size_t new_tailroom);
201 static inline void dp_packet_delete(struct dp_packet *);
202
203 static inline void *dp_packet_at(const struct dp_packet *, size_t offset,
204 size_t size);
205 static inline void *dp_packet_at_assert(const struct dp_packet *,
206 size_t offset, size_t size);
207 static inline void *dp_packet_tail(const struct dp_packet *);
208 static inline void *dp_packet_end(const struct dp_packet *);
209
210 void *dp_packet_put_uninit(struct dp_packet *, size_t);
211 void *dp_packet_put_zeros(struct dp_packet *, size_t);
212 void *dp_packet_put(struct dp_packet *, const void *, size_t);
213 char *dp_packet_put_hex(struct dp_packet *, const char *s, size_t *n);
214 void dp_packet_reserve(struct dp_packet *, size_t);
215 void dp_packet_reserve_with_tailroom(struct dp_packet *, size_t headroom,
216 size_t tailroom);
217 void *dp_packet_push_uninit(struct dp_packet *, size_t);
218 void *dp_packet_push_zeros(struct dp_packet *, size_t);
219 void *dp_packet_push(struct dp_packet *, const void *, size_t);
220
221 static inline size_t dp_packet_headroom(const struct dp_packet *);
222 static inline size_t dp_packet_tailroom(const struct dp_packet *);
223 void dp_packet_prealloc_headroom(struct dp_packet *, size_t);
224 void dp_packet_prealloc_tailroom(struct dp_packet *, size_t);
225 void dp_packet_shift(struct dp_packet *, int);
226
227 static inline void dp_packet_clear(struct dp_packet *);
228 static inline void *dp_packet_pull(struct dp_packet *, size_t);
229 static inline void *dp_packet_try_pull(struct dp_packet *, size_t);
230
231 void *dp_packet_steal_data(struct dp_packet *);
232
233 static inline bool dp_packet_equal(const struct dp_packet *,
234 const struct dp_packet *);
235
236 \f
237 /* Frees memory that 'b' points to, as well as 'b' itself. */
238 static inline void
239 dp_packet_delete(struct dp_packet *b)
240 {
241 if (b) {
242 if (b->source == DPBUF_DPDK) {
243 /* If this dp_packet was allocated by DPDK it must have been
244 * created as a dp_packet */
245 free_dpdk_buf((struct dp_packet*) b);
246 return;
247 }
248
249 if (b->source == DPBUF_AFXDP) {
250 free_afxdp_buf(b);
251 return;
252 }
253
254 dp_packet_uninit(b);
255 free(b);
256 }
257 }
258
259 /* If 'b' contains at least 'offset + size' bytes of data, returns a pointer to
260 * byte 'offset'. Otherwise, returns a null pointer. */
261 static inline void *
262 dp_packet_at(const struct dp_packet *b, size_t offset, size_t size)
263 {
264 return offset + size <= dp_packet_size(b)
265 ? (char *) dp_packet_data(b) + offset
266 : NULL;
267 }
268
269 /* Returns a pointer to byte 'offset' in 'b', which must contain at least
270 * 'offset + size' bytes of data. */
271 static inline void *
272 dp_packet_at_assert(const struct dp_packet *b, size_t offset, size_t size)
273 {
274 ovs_assert(offset + size <= dp_packet_size(b));
275 return ((char *) dp_packet_data(b)) + offset;
276 }
277
278 /* Returns a pointer to byte following the last byte of data in use in 'b'. */
279 static inline void *
280 dp_packet_tail(const struct dp_packet *b)
281 {
282 return (char *) dp_packet_data(b) + dp_packet_size(b);
283 }
284
285 /* Returns a pointer to byte following the last byte allocated for use (but
286 * not necessarily in use) in 'b'. */
287 static inline void *
288 dp_packet_end(const struct dp_packet *b)
289 {
290 return (char *) dp_packet_base(b) + dp_packet_get_allocated(b);
291 }
292
293 /* Returns the number of bytes of headroom in 'b', that is, the number of bytes
294 * of unused space in dp_packet 'b' before the data that is in use. (Most
295 * commonly, the data in a dp_packet is at its beginning, and thus the
296 * dp_packet's headroom is 0.) */
297 static inline size_t
298 dp_packet_headroom(const struct dp_packet *b)
299 {
300 return (char *) dp_packet_data(b) - (char *) dp_packet_base(b);
301 }
302
303 /* Returns the number of bytes that may be appended to the tail end of
304 * dp_packet 'b' before the dp_packet must be reallocated. */
305 static inline size_t
306 dp_packet_tailroom(const struct dp_packet *b)
307 {
308 return (char *) dp_packet_end(b) - (char *) dp_packet_tail(b);
309 }
310
311 /* Clears any data from 'b'. */
312 static inline void
313 dp_packet_clear(struct dp_packet *b)
314 {
315 dp_packet_set_data(b, dp_packet_base(b));
316 dp_packet_set_size(b, 0);
317 }
318
319 /* Removes 'size' bytes from the head end of 'b', which must contain at least
320 * 'size' bytes of data. Returns the first byte of data removed. */
321 static inline void *
322 dp_packet_pull(struct dp_packet *b, size_t size)
323 {
324 void *data = dp_packet_data(b);
325 ovs_assert(dp_packet_size(b) - dp_packet_l2_pad_size(b) >= size);
326 dp_packet_set_data(b, (char *) dp_packet_data(b) + size);
327 dp_packet_set_size(b, dp_packet_size(b) - size);
328 return data;
329 }
330
331 /* If 'b' has at least 'size' bytes of data, removes that many bytes from the
332 * head end of 'b' and returns the first byte removed. Otherwise, returns a
333 * null pointer without modifying 'b'. */
334 static inline void *
335 dp_packet_try_pull(struct dp_packet *b, size_t size)
336 {
337 return dp_packet_size(b) - dp_packet_l2_pad_size(b) >= size
338 ? dp_packet_pull(b, size) : NULL;
339 }
340
341 static inline bool
342 dp_packet_equal(const struct dp_packet *a, const struct dp_packet *b)
343 {
344 return dp_packet_size(a) == dp_packet_size(b) &&
345 !memcmp(dp_packet_data(a), dp_packet_data(b), dp_packet_size(a));
346 }
347
348 static inline bool
349 dp_packet_is_eth(const struct dp_packet *b)
350 {
351 return b->packet_type == htonl(PT_ETH);
352 }
353
354 /* Get the start of the Ethernet frame. 'l3_ofs' marks the end of the l2
355 * headers, so return NULL if it is not set. */
356 static inline void *
357 dp_packet_eth(const struct dp_packet *b)
358 {
359 return (dp_packet_is_eth(b) && b->l3_ofs != UINT16_MAX)
360 ? dp_packet_data(b) : NULL;
361 }
362
363 /* Resets all layer offsets. 'l3' offset must be set before 'l2' can be
364 * retrieved. */
365 static inline void
366 dp_packet_reset_offsets(struct dp_packet *b)
367 {
368 b->l2_pad_size = 0;
369 b->l2_5_ofs = UINT16_MAX;
370 b->l3_ofs = UINT16_MAX;
371 b->l4_ofs = UINT16_MAX;
372 }
373
374 static inline uint8_t
375 dp_packet_l2_pad_size(const struct dp_packet *b)
376 {
377 return b->l2_pad_size;
378 }
379
380 static inline void
381 dp_packet_set_l2_pad_size(struct dp_packet *b, uint8_t pad_size)
382 {
383 ovs_assert(pad_size <= dp_packet_size(b));
384 b->l2_pad_size = pad_size;
385 }
386
387 static inline void *
388 dp_packet_l2_5(const struct dp_packet *b)
389 {
390 return b->l2_5_ofs != UINT16_MAX
391 ? (char *) dp_packet_data(b) + b->l2_5_ofs
392 : NULL;
393 }
394
395 static inline void
396 dp_packet_set_l2_5(struct dp_packet *b, void *l2_5)
397 {
398 b->l2_5_ofs = l2_5
399 ? (char *) l2_5 - (char *) dp_packet_data(b)
400 : UINT16_MAX;
401 }
402
403 static inline void *
404 dp_packet_l3(const struct dp_packet *b)
405 {
406 return b->l3_ofs != UINT16_MAX
407 ? (char *) dp_packet_data(b) + b->l3_ofs
408 : NULL;
409 }
410
411 static inline void
412 dp_packet_set_l3(struct dp_packet *b, void *l3)
413 {
414 b->l3_ofs = l3 ? (char *) l3 - (char *) dp_packet_data(b) : UINT16_MAX;
415 }
416
417 static inline void *
418 dp_packet_l4(const struct dp_packet *b)
419 {
420 return b->l4_ofs != UINT16_MAX
421 ? (char *) dp_packet_data(b) + b->l4_ofs
422 : NULL;
423 }
424
425 static inline void
426 dp_packet_set_l4(struct dp_packet *b, void *l4)
427 {
428 b->l4_ofs = l4 ? (char *) l4 - (char *) dp_packet_data(b) : UINT16_MAX;
429 }
430
431 /* Returns the size of the packet from the beginning of the L3 header to the
432 * end of the L3 payload. Hence L2 padding is not included. */
433 static inline size_t
434 dp_packet_l3_size(const struct dp_packet *b)
435 {
436 return OVS_LIKELY(b->l3_ofs != UINT16_MAX)
437 ? (const char *)dp_packet_tail(b) - (const char *)dp_packet_l3(b)
438 - dp_packet_l2_pad_size(b)
439 : 0;
440 }
441
442 /* Returns the size of the packet from the beginning of the L4 header to the
443 * end of the L4 payload. Hence L2 padding is not included. */
444 static inline size_t
445 dp_packet_l4_size(const struct dp_packet *b)
446 {
447 return OVS_LIKELY(b->l4_ofs != UINT16_MAX)
448 ? (const char *)dp_packet_tail(b) - (const char *)dp_packet_l4(b)
449 - dp_packet_l2_pad_size(b)
450 : 0;
451 }
452
453 static inline const void *
454 dp_packet_get_tcp_payload(const struct dp_packet *b)
455 {
456 size_t l4_size = dp_packet_l4_size(b);
457
458 if (OVS_LIKELY(l4_size >= TCP_HEADER_LEN)) {
459 struct tcp_header *tcp = dp_packet_l4(b);
460 int tcp_len = TCP_OFFSET(tcp->tcp_ctl) * 4;
461
462 if (OVS_LIKELY(tcp_len >= TCP_HEADER_LEN && tcp_len <= l4_size)) {
463 return (const char *)tcp + tcp_len;
464 }
465 }
466 return NULL;
467 }
468
469 static inline const void *
470 dp_packet_get_udp_payload(const struct dp_packet *b)
471 {
472 return OVS_LIKELY(dp_packet_l4_size(b) >= UDP_HEADER_LEN)
473 ? (const char *)dp_packet_l4(b) + UDP_HEADER_LEN : NULL;
474 }
475
476 static inline const void *
477 dp_packet_get_sctp_payload(const struct dp_packet *b)
478 {
479 return OVS_LIKELY(dp_packet_l4_size(b) >= SCTP_HEADER_LEN)
480 ? (const char *)dp_packet_l4(b) + SCTP_HEADER_LEN : NULL;
481 }
482
483 static inline const void *
484 dp_packet_get_icmp_payload(const struct dp_packet *b)
485 {
486 return OVS_LIKELY(dp_packet_l4_size(b) >= ICMP_HEADER_LEN)
487 ? (const char *)dp_packet_l4(b) + ICMP_HEADER_LEN : NULL;
488 }
489
490 static inline const void *
491 dp_packet_get_nd_payload(const struct dp_packet *b)
492 {
493 return OVS_LIKELY(dp_packet_l4_size(b) >= ND_MSG_LEN)
494 ? (const char *)dp_packet_l4(b) + ND_MSG_LEN : NULL;
495 }
496
497 #ifdef DPDK_NETDEV
498 static inline uint64_t *
499 dp_packet_ol_flags_ptr(const struct dp_packet *b)
500 {
501 return CONST_CAST(uint64_t *, &b->mbuf.ol_flags);
502 }
503
504 static inline uint32_t *
505 dp_packet_rss_ptr(const struct dp_packet *b)
506 {
507 return CONST_CAST(uint32_t *, &b->mbuf.hash.rss);
508 }
509
510 static inline uint32_t *
511 dp_packet_flow_mark_ptr(const struct dp_packet *b)
512 {
513 return CONST_CAST(uint32_t *, &b->mbuf.hash.fdir.hi);
514 }
515
516 #else
517 static inline uint32_t *
518 dp_packet_ol_flags_ptr(const struct dp_packet *b)
519 {
520 return CONST_CAST(uint32_t *, &b->ol_flags);
521 }
522
523 static inline uint32_t *
524 dp_packet_rss_ptr(const struct dp_packet *b)
525 {
526 return CONST_CAST(uint32_t *, &b->rss_hash);
527 }
528
529 static inline uint32_t *
530 dp_packet_flow_mark_ptr(const struct dp_packet *b)
531 {
532 return CONST_CAST(uint32_t *, &b->flow_mark);
533 }
534 #endif
535
536 #ifdef DPDK_NETDEV
537 BUILD_ASSERT_DECL(offsetof(struct dp_packet, mbuf) == 0);
538
539 static inline void
540 dp_packet_init_specific(struct dp_packet *p)
541 {
542 /* This initialization is needed for packets that do not come from DPDK
543 * interfaces, when vswitchd is built with --with-dpdk. */
544 p->mbuf.ol_flags = p->mbuf.tx_offload = p->mbuf.packet_type = 0;
545 p->mbuf.nb_segs = 1;
546 p->mbuf.next = NULL;
547 }
548
549 static inline void *
550 dp_packet_base(const struct dp_packet *b)
551 {
552 return b->mbuf.buf_addr;
553 }
554
555 static inline void
556 dp_packet_set_base(struct dp_packet *b, void *d)
557 {
558 b->mbuf.buf_addr = d;
559 }
560
561 static inline uint32_t
562 dp_packet_size(const struct dp_packet *b)
563 {
564 return b->mbuf.pkt_len;
565 }
566
567 static inline void
568 dp_packet_set_size(struct dp_packet *b, uint32_t v)
569 {
570 /* netdev-dpdk does not currently support segmentation; consequently, for
571 * all intents and purposes, 'data_len' (16 bit) and 'pkt_len' (32 bit) may
572 * be used interchangably.
573 *
574 * On the datapath, it is expected that the size of packets
575 * (and thus 'v') will always be <= UINT16_MAX; this means that there is no
576 * loss of accuracy in assigning 'v' to 'data_len'.
577 */
578 b->mbuf.data_len = (uint16_t)v; /* Current seg length. */
579 b->mbuf.pkt_len = v; /* Total length of all segments linked to
580 * this segment. */
581 }
582
583 static inline uint16_t
584 __packet_data(const struct dp_packet *b)
585 {
586 return b->mbuf.data_off;
587 }
588
589 static inline void
590 __packet_set_data(struct dp_packet *b, uint16_t v)
591 {
592 b->mbuf.data_off = v;
593 }
594
595 static inline uint16_t
596 dp_packet_get_allocated(const struct dp_packet *b)
597 {
598 return b->mbuf.buf_len;
599 }
600
601 static inline void
602 dp_packet_set_allocated(struct dp_packet *b, uint16_t s)
603 {
604 b->mbuf.buf_len = s;
605 }
606
607 #else /* DPDK_NETDEV */
608
609 static inline void
610 dp_packet_init_specific(struct dp_packet *p OVS_UNUSED)
611 {
612 /* There are no implementation-specific fields for initialization. */
613 }
614
615 static inline void *
616 dp_packet_base(const struct dp_packet *b)
617 {
618 return b->base_;
619 }
620
621 static inline void
622 dp_packet_set_base(struct dp_packet *b, void *d)
623 {
624 b->base_ = d;
625 }
626
627 static inline uint32_t
628 dp_packet_size(const struct dp_packet *b)
629 {
630 return b->size_;
631 }
632
633 static inline void
634 dp_packet_set_size(struct dp_packet *b, uint32_t v)
635 {
636 b->size_ = v;
637 }
638
639 static inline uint16_t
640 __packet_data(const struct dp_packet *b)
641 {
642 return b->data_ofs;
643 }
644
645 static inline void
646 __packet_set_data(struct dp_packet *b, uint16_t v)
647 {
648 b->data_ofs = v;
649 }
650
651 static inline uint16_t
652 dp_packet_get_allocated(const struct dp_packet *b)
653 {
654 return b->allocated_;
655 }
656
657 static inline void
658 dp_packet_set_allocated(struct dp_packet *b, uint16_t s)
659 {
660 b->allocated_ = s;
661 }
662
663 #endif /* DPDK_NETDEV */
664
665 static inline void
666 dp_packet_reset_cutlen(struct dp_packet *b)
667 {
668 b->cutlen = 0;
669 }
670
671 static inline uint32_t
672 dp_packet_set_cutlen(struct dp_packet *b, uint32_t max_len)
673 {
674 if (max_len < ETH_HEADER_LEN) {
675 max_len = ETH_HEADER_LEN;
676 }
677
678 if (max_len >= dp_packet_size(b)) {
679 b->cutlen = 0;
680 } else {
681 b->cutlen = dp_packet_size(b) - max_len;
682 }
683 return b->cutlen;
684 }
685
686 static inline uint32_t
687 dp_packet_get_cutlen(const struct dp_packet *b)
688 {
689 /* Always in valid range if user uses dp_packet_set_cutlen. */
690 return b->cutlen;
691 }
692
693 static inline uint32_t
694 dp_packet_get_send_len(const struct dp_packet *b)
695 {
696 return dp_packet_size(b) - dp_packet_get_cutlen(b);
697 }
698
699 static inline void *
700 dp_packet_data(const struct dp_packet *b)
701 {
702 return __packet_data(b) != UINT16_MAX
703 ? (char *) dp_packet_base(b) + __packet_data(b) : NULL;
704 }
705
706 static inline void
707 dp_packet_set_data(struct dp_packet *b, void *data)
708 {
709 if (data) {
710 __packet_set_data(b, (char *) data - (char *) dp_packet_base(b));
711 } else {
712 __packet_set_data(b, UINT16_MAX);
713 }
714 }
715
716 static inline void
717 dp_packet_reset_packet(struct dp_packet *b, int off)
718 {
719 dp_packet_set_size(b, dp_packet_size(b) - off);
720 dp_packet_set_data(b, ((unsigned char *) dp_packet_data(b) + off));
721 dp_packet_reset_offsets(b);
722 }
723
724 enum { NETDEV_MAX_BURST = 32 }; /* Maximum number packets in a batch. */
725
726 struct dp_packet_batch {
727 size_t count;
728 bool trunc; /* true if the batch needs truncate. */
729 bool do_not_steal; /* Indicate that the packets should not be stolen. */
730 struct dp_packet *packets[NETDEV_MAX_BURST];
731 };
732
733 static inline void
734 dp_packet_batch_init(struct dp_packet_batch *batch)
735 {
736 batch->count = 0;
737 batch->trunc = false;
738 batch->do_not_steal = false;
739 }
740
741 static inline void
742 dp_packet_batch_add__(struct dp_packet_batch *batch,
743 struct dp_packet *packet, size_t limit)
744 {
745 if (batch->count < limit) {
746 batch->packets[batch->count++] = packet;
747 } else {
748 dp_packet_delete(packet);
749 }
750 }
751
752 /* When the batch is full, 'packet' will be dropped and freed. */
753 static inline void
754 dp_packet_batch_add(struct dp_packet_batch *batch, struct dp_packet *packet)
755 {
756 dp_packet_batch_add__(batch, packet, NETDEV_MAX_BURST);
757 }
758
759 static inline size_t
760 dp_packet_batch_size(const struct dp_packet_batch *batch)
761 {
762 return batch->count;
763 }
764
765 /* Clear 'batch' for refill. Use dp_packet_batch_refill() to add
766 * packets back into the 'batch'. */
767 static inline void
768 dp_packet_batch_refill_init(struct dp_packet_batch *batch)
769 {
770 batch->count = 0;
771 };
772
773 static inline void
774 dp_packet_batch_refill(struct dp_packet_batch *batch,
775 struct dp_packet *packet, size_t idx)
776 {
777 dp_packet_batch_add__(batch, packet, MIN(NETDEV_MAX_BURST, idx + 1));
778 }
779
780 static inline void
781 dp_packet_batch_init_packet(struct dp_packet_batch *batch, struct dp_packet *p)
782 {
783 dp_packet_batch_init(batch);
784 batch->count = 1;
785 batch->packets[0] = p;
786 }
787
788 static inline bool
789 dp_packet_batch_is_empty(const struct dp_packet_batch *batch)
790 {
791 return !dp_packet_batch_size(batch);
792 }
793
794 static inline bool
795 dp_packet_batch_is_full(const struct dp_packet_batch *batch)
796 {
797 return dp_packet_batch_size(batch) == NETDEV_MAX_BURST;
798 }
799
800 #define DP_PACKET_BATCH_FOR_EACH(IDX, PACKET, BATCH) \
801 for (size_t IDX = 0; IDX < dp_packet_batch_size(BATCH); IDX++) \
802 if (PACKET = BATCH->packets[IDX], true)
803
804 /* Use this macro for cases where some packets in the 'BATCH' may be
805 * dropped after going through each packet in the 'BATCH'.
806 *
807 * For packets to stay in the 'BATCH', they need to be refilled back
808 * into the 'BATCH' by calling dp_packet_batch_refill(). Caller owns
809 * the packets that are not refilled.
810 *
811 * Caller needs to supply 'SIZE', that stores the current number of
812 * packets in 'BATCH'. It is best to declare this variable with
813 * the 'const' modifier since it should not be modified by
814 * the iterator. */
815 #define DP_PACKET_BATCH_REFILL_FOR_EACH(IDX, SIZE, PACKET, BATCH) \
816 for (dp_packet_batch_refill_init(BATCH), IDX=0; IDX < SIZE; IDX++) \
817 if (PACKET = BATCH->packets[IDX], true)
818
819 static inline void
820 dp_packet_batch_clone(struct dp_packet_batch *dst,
821 struct dp_packet_batch *src)
822 {
823 struct dp_packet *packet;
824
825 dp_packet_batch_init(dst);
826 DP_PACKET_BATCH_FOR_EACH (i, packet, src) {
827 if (i + 1 < dp_packet_batch_size(src)) {
828 OVS_PREFETCH(src->packets[i + 1]);
829 }
830
831 uint32_t headroom = dp_packet_headroom(packet);
832 struct dp_packet *pkt_clone;
833
834 pkt_clone = dp_packet_clone_with_headroom(packet, headroom);
835 dp_packet_batch_add(dst, pkt_clone);
836 }
837 dst->trunc = src->trunc;
838 }
839
840 static inline void
841 dp_packet_delete_batch(struct dp_packet_batch *batch, bool should_steal)
842 {
843 if (should_steal) {
844 struct dp_packet *packet;
845
846 DP_PACKET_BATCH_FOR_EACH (i, packet, batch) {
847 dp_packet_delete(packet);
848 }
849 dp_packet_batch_init(batch);
850 }
851 }
852
853 static inline void
854 dp_packet_batch_init_packet_fields(struct dp_packet_batch *batch)
855 {
856 struct dp_packet *packet;
857
858 DP_PACKET_BATCH_FOR_EACH (i, packet, batch) {
859 dp_packet_reset_cutlen(packet);
860 packet->packet_type = htonl(PT_ETH);
861 }
862 }
863
864 static inline void
865 dp_packet_batch_apply_cutlen(struct dp_packet_batch *batch)
866 {
867 if (batch->trunc) {
868 struct dp_packet *packet;
869
870 DP_PACKET_BATCH_FOR_EACH (i, packet, batch) {
871 dp_packet_set_size(packet, dp_packet_get_send_len(packet));
872 dp_packet_reset_cutlen(packet);
873 }
874 batch->trunc = false;
875 }
876 }
877
878 static inline void
879 dp_packet_batch_reset_cutlen(struct dp_packet_batch *batch)
880 {
881 if (batch->trunc) {
882 struct dp_packet *packet;
883
884 DP_PACKET_BATCH_FOR_EACH (i, packet, batch) {
885 dp_packet_reset_cutlen(packet);
886 }
887 batch->trunc = false;
888 }
889 }
890
891 /* Returns the RSS hash of the packet 'p'. Note that the returned value is
892 * correct only if 'dp_packet_rss_valid(p)' returns 'true'. */
893 static inline uint32_t
894 dp_packet_get_rss_hash(const struct dp_packet *p)
895 {
896 return *dp_packet_rss_ptr(p);
897 }
898
899 static inline void
900 dp_packet_set_rss_hash(struct dp_packet *p, uint32_t hash)
901 {
902 *dp_packet_rss_ptr(p) = hash;
903 *dp_packet_ol_flags_ptr(p) |= DP_PACKET_OL_RSS_HASH;
904 }
905
906 static inline bool
907 dp_packet_rss_valid(const struct dp_packet *p)
908 {
909 return *dp_packet_ol_flags_ptr(p) & DP_PACKET_OL_RSS_HASH;
910 }
911
912 static inline void
913 dp_packet_reset_offload(struct dp_packet *p)
914 {
915 *dp_packet_ol_flags_ptr(p) &= ~DP_PACKET_OL_SUPPORTED_MASK;
916 }
917
918 static inline bool
919 dp_packet_has_flow_mark(const struct dp_packet *p, uint32_t *mark)
920 {
921 if (*dp_packet_ol_flags_ptr(p) & DP_PACKET_OL_FLOW_MARK) {
922 *mark = *dp_packet_flow_mark_ptr(p);
923 return true;
924 }
925
926 return false;
927 }
928
929 static inline void
930 dp_packet_set_flow_mark(struct dp_packet *p, uint32_t mark)
931 {
932 *dp_packet_flow_mark_ptr(p) = mark;
933 *dp_packet_ol_flags_ptr(p) |= DP_PACKET_OL_FLOW_MARK;
934 }
935
936 /* Returns the L4 cksum offload bitmask. */
937 static inline uint64_t
938 dp_packet_hwol_l4_mask(const struct dp_packet *b)
939 {
940 return *dp_packet_ol_flags_ptr(b) & DP_PACKET_OL_TX_L4_MASK;
941 }
942
943 /* Return true if the packet 'b' requested L4 checksum offload. */
944 static inline bool
945 dp_packet_hwol_tx_l4_checksum(const struct dp_packet *b)
946 {
947 return !!dp_packet_hwol_l4_mask(b);
948 }
949
950 /* Returns 'true' if packet 'b' is marked for TCP segmentation offloading. */
951 static inline bool
952 dp_packet_hwol_is_tso(const struct dp_packet *b)
953 {
954 return !!(*dp_packet_ol_flags_ptr(b) & DP_PACKET_OL_TX_TCP_SEG);
955 }
956
957 /* Returns 'true' if packet 'b' is marked for IPv4 checksum offloading. */
958 static inline bool
959 dp_packet_hwol_is_ipv4(const struct dp_packet *b)
960 {
961 return !!(*dp_packet_ol_flags_ptr(b) & DP_PACKET_OL_TX_IPV4);
962 }
963
964 /* Returns 'true' if packet 'b' is marked for TCP checksum offloading. */
965 static inline bool
966 dp_packet_hwol_l4_is_tcp(const struct dp_packet *b)
967 {
968 return (*dp_packet_ol_flags_ptr(b) & DP_PACKET_OL_TX_L4_MASK) ==
969 DP_PACKET_OL_TX_TCP_CKSUM;
970 }
971
972 /* Returns 'true' if packet 'b' is marked for UDP checksum offloading. */
973 static inline bool
974 dp_packet_hwol_l4_is_udp(struct dp_packet *b)
975 {
976 return (*dp_packet_ol_flags_ptr(b) & DP_PACKET_OL_TX_L4_MASK) ==
977 DP_PACKET_OL_TX_UDP_CKSUM;
978 }
979
980 /* Returns 'true' if packet 'b' is marked for SCTP checksum offloading. */
981 static inline bool
982 dp_packet_hwol_l4_is_sctp(struct dp_packet *b)
983 {
984 return (*dp_packet_ol_flags_ptr(b) & DP_PACKET_OL_TX_L4_MASK) ==
985 DP_PACKET_OL_TX_SCTP_CKSUM;
986 }
987
988 /* Mark packet 'b' for IPv4 checksum offloading. */
989 static inline void
990 dp_packet_hwol_set_tx_ipv4(struct dp_packet *b)
991 {
992 *dp_packet_ol_flags_ptr(b) |= DP_PACKET_OL_TX_IPV4;
993 }
994
995 /* Mark packet 'b' for IPv6 checksum offloading. */
996 static inline void
997 dp_packet_hwol_set_tx_ipv6(struct dp_packet *b)
998 {
999 *dp_packet_ol_flags_ptr(b) |= DP_PACKET_OL_TX_IPV6;
1000 }
1001
1002 /* Mark packet 'b' for TCP checksum offloading. It implies that either
1003 * the packet 'b' is marked for IPv4 or IPv6 checksum offloading. */
1004 static inline void
1005 dp_packet_hwol_set_csum_tcp(struct dp_packet *b)
1006 {
1007 *dp_packet_ol_flags_ptr(b) |= DP_PACKET_OL_TX_TCP_CKSUM;
1008 }
1009
1010 /* Mark packet 'b' for UDP checksum offloading. It implies that either
1011 * the packet 'b' is marked for IPv4 or IPv6 checksum offloading. */
1012 static inline void
1013 dp_packet_hwol_set_csum_udp(struct dp_packet *b)
1014 {
1015 *dp_packet_ol_flags_ptr(b) |= DP_PACKET_OL_TX_UDP_CKSUM;
1016 }
1017
1018 /* Mark packet 'b' for SCTP checksum offloading. It implies that either
1019 * the packet 'b' is marked for IPv4 or IPv6 checksum offloading. */
1020 static inline void
1021 dp_packet_hwol_set_csum_sctp(struct dp_packet *b)
1022 {
1023 *dp_packet_ol_flags_ptr(b) |= DP_PACKET_OL_TX_SCTP_CKSUM;
1024 }
1025
1026 /* Mark packet 'b' for TCP segmentation offloading. It implies that
1027 * either the packet 'b' is marked for IPv4 or IPv6 checksum offloading
1028 * and also for TCP checksum offloading. */
1029 static inline void
1030 dp_packet_hwol_set_tcp_seg(struct dp_packet *b)
1031 {
1032 *dp_packet_ol_flags_ptr(b) |= DP_PACKET_OL_TX_TCP_SEG;
1033 }
1034
1035 static inline bool
1036 dp_packet_ip_checksum_valid(const struct dp_packet *p)
1037 {
1038 return (*dp_packet_ol_flags_ptr(p) & DP_PACKET_OL_RX_IP_CKSUM_MASK) ==
1039 DP_PACKET_OL_RX_IP_CKSUM_GOOD;
1040 }
1041
1042 static inline bool
1043 dp_packet_ip_checksum_bad(const struct dp_packet *p)
1044 {
1045 return (*dp_packet_ol_flags_ptr(p) & DP_PACKET_OL_RX_IP_CKSUM_MASK) ==
1046 DP_PACKET_OL_RX_IP_CKSUM_BAD;
1047 }
1048
1049 static inline bool
1050 dp_packet_l4_checksum_valid(const struct dp_packet *p)
1051 {
1052 return (*dp_packet_ol_flags_ptr(p) & DP_PACKET_OL_RX_L4_CKSUM_MASK) ==
1053 DP_PACKET_OL_RX_L4_CKSUM_GOOD;
1054 }
1055
1056 static inline bool
1057 dp_packet_l4_checksum_bad(const struct dp_packet *p)
1058 {
1059 return (*dp_packet_ol_flags_ptr(p) & DP_PACKET_OL_RX_L4_CKSUM_MASK) ==
1060 DP_PACKET_OL_RX_L4_CKSUM_BAD;
1061 }
1062
1063 #ifdef __cplusplus
1064 }
1065 #endif
1066
1067 #endif /* dp-packet.h */