]> git.proxmox.com Git - mirror_ovs.git/blob - lib/packets.c
openflow: Table maintenance commands for Geneve options.
[mirror_ovs.git] / lib / packets.c
1 /*
2 * Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014, 2015 Nicira, Inc.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <config.h>
18 #include "packets.h"
19 #include <arpa/inet.h>
20 #include <sys/socket.h>
21 #include <netinet/in.h>
22 #include <netinet/ip6.h>
23 #include <netinet/icmp6.h>
24 #include <stdlib.h>
25 #include "byte-order.h"
26 #include "csum.h"
27 #include "crc32c.h"
28 #include "flow.h"
29 #include "hmap.h"
30 #include "dynamic-string.h"
31 #include "ovs-thread.h"
32 #include "odp-util.h"
33 #include "dp-packet.h"
34 #include "unaligned.h"
35
36 const struct in6_addr in6addr_exact = IN6ADDR_EXACT_INIT;
37
38 /* Parses 's' as a 16-digit hexadecimal number representing a datapath ID. On
39 * success stores the dpid into '*dpidp' and returns true, on failure stores 0
40 * into '*dpidp' and returns false.
41 *
42 * Rejects an all-zeros dpid as invalid. */
43 bool
44 dpid_from_string(const char *s, uint64_t *dpidp)
45 {
46 *dpidp = (strlen(s) == 16 && strspn(s, "0123456789abcdefABCDEF") == 16
47 ? strtoull(s, NULL, 16)
48 : 0);
49 return *dpidp != 0;
50 }
51
52 /* Returns true if 'ea' is a reserved address, that a bridge must never
53 * forward, false otherwise.
54 *
55 * If you change this function's behavior, please update corresponding
56 * documentation in vswitch.xml at the same time. */
57 bool
58 eth_addr_is_reserved(const uint8_t ea[ETH_ADDR_LEN])
59 {
60 struct eth_addr_node {
61 struct hmap_node hmap_node;
62 const uint64_t ea64;
63 };
64
65 static struct eth_addr_node nodes[] = {
66 /* STP, IEEE pause frames, and other reserved protocols. */
67 { HMAP_NODE_NULL_INITIALIZER, 0x0180c2000000ULL },
68 { HMAP_NODE_NULL_INITIALIZER, 0x0180c2000001ULL },
69 { HMAP_NODE_NULL_INITIALIZER, 0x0180c2000002ULL },
70 { HMAP_NODE_NULL_INITIALIZER, 0x0180c2000003ULL },
71 { HMAP_NODE_NULL_INITIALIZER, 0x0180c2000004ULL },
72 { HMAP_NODE_NULL_INITIALIZER, 0x0180c2000005ULL },
73 { HMAP_NODE_NULL_INITIALIZER, 0x0180c2000006ULL },
74 { HMAP_NODE_NULL_INITIALIZER, 0x0180c2000007ULL },
75 { HMAP_NODE_NULL_INITIALIZER, 0x0180c2000008ULL },
76 { HMAP_NODE_NULL_INITIALIZER, 0x0180c2000009ULL },
77 { HMAP_NODE_NULL_INITIALIZER, 0x0180c200000aULL },
78 { HMAP_NODE_NULL_INITIALIZER, 0x0180c200000bULL },
79 { HMAP_NODE_NULL_INITIALIZER, 0x0180c200000cULL },
80 { HMAP_NODE_NULL_INITIALIZER, 0x0180c200000dULL },
81 { HMAP_NODE_NULL_INITIALIZER, 0x0180c200000eULL },
82 { HMAP_NODE_NULL_INITIALIZER, 0x0180c200000fULL },
83
84 /* Extreme protocols. */
85 { HMAP_NODE_NULL_INITIALIZER, 0x00e02b000000ULL }, /* EDP. */
86 { HMAP_NODE_NULL_INITIALIZER, 0x00e02b000004ULL }, /* EAPS. */
87 { HMAP_NODE_NULL_INITIALIZER, 0x00e02b000006ULL }, /* EAPS. */
88
89 /* Cisco protocols. */
90 { HMAP_NODE_NULL_INITIALIZER, 0x01000c000000ULL }, /* ISL. */
91 { HMAP_NODE_NULL_INITIALIZER, 0x01000cccccccULL }, /* PAgP, UDLD, CDP,
92 * DTP, VTP. */
93 { HMAP_NODE_NULL_INITIALIZER, 0x01000ccccccdULL }, /* PVST+. */
94 { HMAP_NODE_NULL_INITIALIZER, 0x01000ccdcdcdULL }, /* STP Uplink Fast,
95 * FlexLink. */
96
97 /* Cisco CFM. */
98 { HMAP_NODE_NULL_INITIALIZER, 0x01000cccccc0ULL },
99 { HMAP_NODE_NULL_INITIALIZER, 0x01000cccccc1ULL },
100 { HMAP_NODE_NULL_INITIALIZER, 0x01000cccccc2ULL },
101 { HMAP_NODE_NULL_INITIALIZER, 0x01000cccccc3ULL },
102 { HMAP_NODE_NULL_INITIALIZER, 0x01000cccccc4ULL },
103 { HMAP_NODE_NULL_INITIALIZER, 0x01000cccccc5ULL },
104 { HMAP_NODE_NULL_INITIALIZER, 0x01000cccccc6ULL },
105 { HMAP_NODE_NULL_INITIALIZER, 0x01000cccccc7ULL },
106 };
107
108 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
109 struct eth_addr_node *node;
110 static struct hmap addrs;
111 uint64_t ea64;
112
113 if (ovsthread_once_start(&once)) {
114 hmap_init(&addrs);
115 for (node = nodes; node < &nodes[ARRAY_SIZE(nodes)]; node++) {
116 hmap_insert(&addrs, &node->hmap_node, hash_uint64(node->ea64));
117 }
118 ovsthread_once_done(&once);
119 }
120
121 ea64 = eth_addr_to_uint64(ea);
122 HMAP_FOR_EACH_IN_BUCKET (node, hmap_node, hash_uint64(ea64), &addrs) {
123 if (node->ea64 == ea64) {
124 return true;
125 }
126 }
127 return false;
128 }
129
130 bool
131 eth_addr_from_string(const char *s, uint8_t ea[ETH_ADDR_LEN])
132 {
133 if (ovs_scan(s, ETH_ADDR_SCAN_FMT, ETH_ADDR_SCAN_ARGS(ea))) {
134 return true;
135 } else {
136 memset(ea, 0, ETH_ADDR_LEN);
137 return false;
138 }
139 }
140
141 /* Fills 'b' with a Reverse ARP packet with Ethernet source address 'eth_src'.
142 * This function is used by Open vSwitch to compose packets in cases where
143 * context is important but content doesn't (or shouldn't) matter.
144 *
145 * The returned packet has enough headroom to insert an 802.1Q VLAN header if
146 * desired. */
147 void
148 compose_rarp(struct dp_packet *b, const uint8_t eth_src[ETH_ADDR_LEN])
149 {
150 struct eth_header *eth;
151 struct arp_eth_header *arp;
152
153 dp_packet_clear(b);
154 dp_packet_prealloc_tailroom(b, 2 + ETH_HEADER_LEN + VLAN_HEADER_LEN
155 + ARP_ETH_HEADER_LEN);
156 dp_packet_reserve(b, 2 + VLAN_HEADER_LEN);
157 eth = dp_packet_put_uninit(b, sizeof *eth);
158 memcpy(eth->eth_dst, eth_addr_broadcast, ETH_ADDR_LEN);
159 memcpy(eth->eth_src, eth_src, ETH_ADDR_LEN);
160 eth->eth_type = htons(ETH_TYPE_RARP);
161
162 arp = dp_packet_put_uninit(b, sizeof *arp);
163 arp->ar_hrd = htons(ARP_HRD_ETHERNET);
164 arp->ar_pro = htons(ARP_PRO_IP);
165 arp->ar_hln = sizeof arp->ar_sha;
166 arp->ar_pln = sizeof arp->ar_spa;
167 arp->ar_op = htons(ARP_OP_RARP);
168 memcpy(arp->ar_sha, eth_src, ETH_ADDR_LEN);
169 put_16aligned_be32(&arp->ar_spa, htonl(0));
170 memcpy(arp->ar_tha, eth_src, ETH_ADDR_LEN);
171 put_16aligned_be32(&arp->ar_tpa, htonl(0));
172
173 dp_packet_reset_offsets(b);
174 dp_packet_set_l3(b, arp);
175 }
176
177 /* Insert VLAN header according to given TCI. Packet passed must be Ethernet
178 * packet. Ignores the CFI bit of 'tci' using 0 instead.
179 *
180 * Also adjusts the layer offsets accordingly. */
181 void
182 eth_push_vlan(struct dp_packet *packet, ovs_be16 tpid, ovs_be16 tci)
183 {
184 struct vlan_eth_header *veh;
185
186 /* Insert new 802.1Q header. */
187 veh = dp_packet_resize_l2(packet, VLAN_HEADER_LEN);
188 memmove(veh, (char *)veh + VLAN_HEADER_LEN, 2 * ETH_ADDR_LEN);
189 veh->veth_type = tpid;
190 veh->veth_tci = tci & htons(~VLAN_CFI);
191 }
192
193 /* Removes outermost VLAN header (if any is present) from 'packet'.
194 *
195 * 'packet->l2_5' should initially point to 'packet''s outer-most VLAN header
196 * or may be NULL if there are no VLAN headers. */
197 void
198 eth_pop_vlan(struct dp_packet *packet)
199 {
200 struct vlan_eth_header *veh = dp_packet_l2(packet);
201
202 if (veh && dp_packet_size(packet) >= sizeof *veh
203 && eth_type_vlan(veh->veth_type)) {
204
205 memmove((char *)veh + VLAN_HEADER_LEN, veh, 2 * ETH_ADDR_LEN);
206 dp_packet_resize_l2(packet, -VLAN_HEADER_LEN);
207 }
208 }
209
210 /* Set ethertype of the packet. */
211 static void
212 set_ethertype(struct dp_packet *packet, ovs_be16 eth_type)
213 {
214 struct eth_header *eh = dp_packet_l2(packet);
215
216 if (!eh) {
217 return;
218 }
219
220 if (eth_type_vlan(eh->eth_type)) {
221 ovs_be16 *p;
222 char *l2_5 = dp_packet_l2_5(packet);
223
224 p = ALIGNED_CAST(ovs_be16 *,
225 (l2_5 ? l2_5 : (char *)dp_packet_l3(packet)) - 2);
226 *p = eth_type;
227 } else {
228 eh->eth_type = eth_type;
229 }
230 }
231
232 static bool is_mpls(struct dp_packet *packet)
233 {
234 return packet->l2_5_ofs != UINT16_MAX;
235 }
236
237 /* Set time to live (TTL) of an MPLS label stack entry (LSE). */
238 void
239 set_mpls_lse_ttl(ovs_be32 *lse, uint8_t ttl)
240 {
241 *lse &= ~htonl(MPLS_TTL_MASK);
242 *lse |= htonl((ttl << MPLS_TTL_SHIFT) & MPLS_TTL_MASK);
243 }
244
245 /* Set traffic class (TC) of an MPLS label stack entry (LSE). */
246 void
247 set_mpls_lse_tc(ovs_be32 *lse, uint8_t tc)
248 {
249 *lse &= ~htonl(MPLS_TC_MASK);
250 *lse |= htonl((tc << MPLS_TC_SHIFT) & MPLS_TC_MASK);
251 }
252
253 /* Set label of an MPLS label stack entry (LSE). */
254 void
255 set_mpls_lse_label(ovs_be32 *lse, ovs_be32 label)
256 {
257 *lse &= ~htonl(MPLS_LABEL_MASK);
258 *lse |= htonl((ntohl(label) << MPLS_LABEL_SHIFT) & MPLS_LABEL_MASK);
259 }
260
261 /* Set bottom of stack (BoS) bit of an MPLS label stack entry (LSE). */
262 void
263 set_mpls_lse_bos(ovs_be32 *lse, uint8_t bos)
264 {
265 *lse &= ~htonl(MPLS_BOS_MASK);
266 *lse |= htonl((bos << MPLS_BOS_SHIFT) & MPLS_BOS_MASK);
267 }
268
269 /* Compose an MPLS label stack entry (LSE) from its components:
270 * label, traffic class (TC), time to live (TTL) and
271 * bottom of stack (BoS) bit. */
272 ovs_be32
273 set_mpls_lse_values(uint8_t ttl, uint8_t tc, uint8_t bos, ovs_be32 label)
274 {
275 ovs_be32 lse = htonl(0);
276 set_mpls_lse_ttl(&lse, ttl);
277 set_mpls_lse_tc(&lse, tc);
278 set_mpls_lse_bos(&lse, bos);
279 set_mpls_lse_label(&lse, label);
280 return lse;
281 }
282
283 /* Set MPLS label stack entry to outermost MPLS header.*/
284 void
285 set_mpls_lse(struct dp_packet *packet, ovs_be32 mpls_lse)
286 {
287 /* Packet type should be MPLS to set label stack entry. */
288 if (is_mpls(packet)) {
289 struct mpls_hdr *mh = dp_packet_l2_5(packet);
290
291 /* Update mpls label stack entry. */
292 put_16aligned_be32(&mh->mpls_lse, mpls_lse);
293 }
294 }
295
296 /* Push MPLS label stack entry 'lse' onto 'packet' as the the outermost MPLS
297 * header. If 'packet' does not already have any MPLS labels, then its
298 * Ethertype is changed to 'ethtype' (which must be an MPLS Ethertype). */
299 void
300 push_mpls(struct dp_packet *packet, ovs_be16 ethtype, ovs_be32 lse)
301 {
302 char * header;
303 size_t len;
304
305 if (!eth_type_mpls(ethtype)) {
306 return;
307 }
308
309 if (!is_mpls(packet)) {
310 /* Set MPLS label stack offset. */
311 packet->l2_5_ofs = packet->l3_ofs;
312 }
313
314 set_ethertype(packet, ethtype);
315
316 /* Push new MPLS shim header onto packet. */
317 len = packet->l2_5_ofs;
318 header = dp_packet_resize_l2_5(packet, MPLS_HLEN);
319 memmove(header, header + MPLS_HLEN, len);
320 memcpy(header + len, &lse, sizeof lse);
321 }
322
323 /* If 'packet' is an MPLS packet, removes its outermost MPLS label stack entry.
324 * If the label that was removed was the only MPLS label, changes 'packet''s
325 * Ethertype to 'ethtype' (which ordinarily should not be an MPLS
326 * Ethertype). */
327 void
328 pop_mpls(struct dp_packet *packet, ovs_be16 ethtype)
329 {
330 if (is_mpls(packet)) {
331 struct mpls_hdr *mh = dp_packet_l2_5(packet);
332 size_t len = packet->l2_5_ofs;
333
334 set_ethertype(packet, ethtype);
335 if (get_16aligned_be32(&mh->mpls_lse) & htonl(MPLS_BOS_MASK)) {
336 dp_packet_set_l2_5(packet, NULL);
337 }
338 /* Shift the l2 header forward. */
339 memmove((char*)dp_packet_data(packet) + MPLS_HLEN, dp_packet_data(packet), len);
340 dp_packet_resize_l2_5(packet, -MPLS_HLEN);
341 }
342 }
343
344 /* Converts hex digits in 'hex' to an Ethernet packet in '*packetp'. The
345 * caller must free '*packetp'. On success, returns NULL. On failure, returns
346 * an error message and stores NULL in '*packetp'.
347 *
348 * Aligns the L3 header of '*packetp' on a 32-bit boundary. */
349 const char *
350 eth_from_hex(const char *hex, struct dp_packet **packetp)
351 {
352 struct dp_packet *packet;
353
354 /* Use 2 bytes of headroom to 32-bit align the L3 header. */
355 packet = *packetp = dp_packet_new_with_headroom(strlen(hex) / 2, 2);
356
357 if (dp_packet_put_hex(packet, hex, NULL)[0] != '\0') {
358 dp_packet_delete(packet);
359 *packetp = NULL;
360 return "Trailing garbage in packet data";
361 }
362
363 if (dp_packet_size(packet) < ETH_HEADER_LEN) {
364 dp_packet_delete(packet);
365 *packetp = NULL;
366 return "Packet data too short for Ethernet";
367 }
368
369 return NULL;
370 }
371
372 void
373 eth_format_masked(const uint8_t eth[ETH_ADDR_LEN],
374 const uint8_t mask[ETH_ADDR_LEN], struct ds *s)
375 {
376 ds_put_format(s, ETH_ADDR_FMT, ETH_ADDR_ARGS(eth));
377 if (mask && !eth_mask_is_exact(mask)) {
378 ds_put_format(s, "/"ETH_ADDR_FMT, ETH_ADDR_ARGS(mask));
379 }
380 }
381
382 void
383 eth_addr_bitand(const uint8_t src[ETH_ADDR_LEN],
384 const uint8_t mask[ETH_ADDR_LEN],
385 uint8_t dst[ETH_ADDR_LEN])
386 {
387 int i;
388
389 for (i = 0; i < ETH_ADDR_LEN; i++) {
390 dst[i] = src[i] & mask[i];
391 }
392 }
393
394 /* Given the IP netmask 'netmask', returns the number of bits of the IP address
395 * that it specifies, that is, the number of 1-bits in 'netmask'.
396 *
397 * If 'netmask' is not a CIDR netmask (see ip_is_cidr()), the return value will
398 * still be in the valid range but isn't otherwise meaningful. */
399 int
400 ip_count_cidr_bits(ovs_be32 netmask)
401 {
402 return 32 - ctz32(ntohl(netmask));
403 }
404
405 void
406 ip_format_masked(ovs_be32 ip, ovs_be32 mask, struct ds *s)
407 {
408 ds_put_format(s, IP_FMT, IP_ARGS(ip));
409 if (mask != OVS_BE32_MAX) {
410 if (ip_is_cidr(mask)) {
411 ds_put_format(s, "/%d", ip_count_cidr_bits(mask));
412 } else {
413 ds_put_format(s, "/"IP_FMT, IP_ARGS(mask));
414 }
415 }
416 }
417
418
419 /* Stores the string representation of the IPv6 address 'addr' into the
420 * character array 'addr_str', which must be at least INET6_ADDRSTRLEN
421 * bytes long. */
422 void
423 format_ipv6_addr(char *addr_str, const struct in6_addr *addr)
424 {
425 inet_ntop(AF_INET6, addr, addr_str, INET6_ADDRSTRLEN);
426 }
427
428 void
429 print_ipv6_addr(struct ds *string, const struct in6_addr *addr)
430 {
431 char *dst;
432
433 ds_reserve(string, string->length + INET6_ADDRSTRLEN);
434
435 dst = string->string + string->length;
436 format_ipv6_addr(dst, addr);
437 string->length += strlen(dst);
438 }
439
440 void
441 print_ipv6_masked(struct ds *s, const struct in6_addr *addr,
442 const struct in6_addr *mask)
443 {
444 print_ipv6_addr(s, addr);
445 if (mask && !ipv6_mask_is_exact(mask)) {
446 if (ipv6_is_cidr(mask)) {
447 int cidr_bits = ipv6_count_cidr_bits(mask);
448 ds_put_format(s, "/%d", cidr_bits);
449 } else {
450 ds_put_char(s, '/');
451 print_ipv6_addr(s, mask);
452 }
453 }
454 }
455
456 struct in6_addr ipv6_addr_bitand(const struct in6_addr *a,
457 const struct in6_addr *b)
458 {
459 int i;
460 struct in6_addr dst;
461
462 #ifdef s6_addr32
463 for (i=0; i<4; i++) {
464 dst.s6_addr32[i] = a->s6_addr32[i] & b->s6_addr32[i];
465 }
466 #else
467 for (i=0; i<16; i++) {
468 dst.s6_addr[i] = a->s6_addr[i] & b->s6_addr[i];
469 }
470 #endif
471
472 return dst;
473 }
474
475 /* Returns an in6_addr consisting of 'mask' high-order 1-bits and 128-N
476 * low-order 0-bits. */
477 struct in6_addr
478 ipv6_create_mask(int mask)
479 {
480 struct in6_addr netmask;
481 uint8_t *netmaskp = &netmask.s6_addr[0];
482
483 memset(&netmask, 0, sizeof netmask);
484 while (mask > 8) {
485 *netmaskp = 0xff;
486 netmaskp++;
487 mask -= 8;
488 }
489
490 if (mask) {
491 *netmaskp = 0xff << (8 - mask);
492 }
493
494 return netmask;
495 }
496
497 /* Given the IPv6 netmask 'netmask', returns the number of bits of the IPv6
498 * address that it specifies, that is, the number of 1-bits in 'netmask'.
499 * 'netmask' must be a CIDR netmask (see ipv6_is_cidr()).
500 *
501 * If 'netmask' is not a CIDR netmask (see ipv6_is_cidr()), the return value
502 * will still be in the valid range but isn't otherwise meaningful. */
503 int
504 ipv6_count_cidr_bits(const struct in6_addr *netmask)
505 {
506 int i;
507 int count = 0;
508 const uint8_t *netmaskp = &netmask->s6_addr[0];
509
510 for (i=0; i<16; i++) {
511 if (netmaskp[i] == 0xff) {
512 count += 8;
513 } else {
514 uint8_t nm;
515
516 for(nm = netmaskp[i]; nm; nm <<= 1) {
517 count++;
518 }
519 break;
520 }
521
522 }
523
524 return count;
525 }
526
527 /* Returns true if 'netmask' is a CIDR netmask, that is, if it consists of N
528 * high-order 1-bits and 128-N low-order 0-bits. */
529 bool
530 ipv6_is_cidr(const struct in6_addr *netmask)
531 {
532 const uint8_t *netmaskp = &netmask->s6_addr[0];
533 int i;
534
535 for (i=0; i<16; i++) {
536 if (netmaskp[i] != 0xff) {
537 uint8_t x = ~netmaskp[i];
538 if (x & (x + 1)) {
539 return false;
540 }
541 while (++i < 16) {
542 if (netmaskp[i]) {
543 return false;
544 }
545 }
546 }
547 }
548
549 return true;
550 }
551
552 /* Populates 'b' with an Ethernet II packet headed with the given 'eth_dst',
553 * 'eth_src' and 'eth_type' parameters. A payload of 'size' bytes is allocated
554 * in 'b' and returned. This payload may be populated with appropriate
555 * information by the caller. Sets 'b''s 'frame' pointer and 'l3' offset to
556 * the Ethernet header and payload respectively. Aligns b->l3 on a 32-bit
557 * boundary.
558 *
559 * The returned packet has enough headroom to insert an 802.1Q VLAN header if
560 * desired. */
561 void *
562 eth_compose(struct dp_packet *b, const uint8_t eth_dst[ETH_ADDR_LEN],
563 const uint8_t eth_src[ETH_ADDR_LEN], uint16_t eth_type,
564 size_t size)
565 {
566 void *data;
567 struct eth_header *eth;
568
569 dp_packet_clear(b);
570
571 /* The magic 2 here ensures that the L3 header (when it is added later)
572 * will be 32-bit aligned. */
573 dp_packet_prealloc_tailroom(b, 2 + ETH_HEADER_LEN + VLAN_HEADER_LEN + size);
574 dp_packet_reserve(b, 2 + VLAN_HEADER_LEN);
575 eth = dp_packet_put_uninit(b, ETH_HEADER_LEN);
576 data = dp_packet_put_uninit(b, size);
577
578 memcpy(eth->eth_dst, eth_dst, ETH_ADDR_LEN);
579 memcpy(eth->eth_src, eth_src, ETH_ADDR_LEN);
580 eth->eth_type = htons(eth_type);
581
582 dp_packet_reset_offsets(b);
583 dp_packet_set_l3(b, data);
584
585 return data;
586 }
587
588 static void
589 packet_set_ipv4_addr(struct dp_packet *packet,
590 ovs_16aligned_be32 *addr, ovs_be32 new_addr)
591 {
592 struct ip_header *nh = dp_packet_l3(packet);
593 ovs_be32 old_addr = get_16aligned_be32(addr);
594 size_t l4_size = dp_packet_l4_size(packet);
595
596 if (nh->ip_proto == IPPROTO_TCP && l4_size >= TCP_HEADER_LEN) {
597 struct tcp_header *th = dp_packet_l4(packet);
598
599 th->tcp_csum = recalc_csum32(th->tcp_csum, old_addr, new_addr);
600 } else if (nh->ip_proto == IPPROTO_UDP && l4_size >= UDP_HEADER_LEN ) {
601 struct udp_header *uh = dp_packet_l4(packet);
602
603 if (uh->udp_csum) {
604 uh->udp_csum = recalc_csum32(uh->udp_csum, old_addr, new_addr);
605 if (!uh->udp_csum) {
606 uh->udp_csum = htons(0xffff);
607 }
608 }
609 }
610 nh->ip_csum = recalc_csum32(nh->ip_csum, old_addr, new_addr);
611 put_16aligned_be32(addr, new_addr);
612 }
613
614 /* Returns true, if packet contains at least one routing header where
615 * segements_left > 0.
616 *
617 * This function assumes that L3 and L4 offsets are set in the packet. */
618 static bool
619 packet_rh_present(struct dp_packet *packet)
620 {
621 const struct ovs_16aligned_ip6_hdr *nh;
622 int nexthdr;
623 size_t len;
624 size_t remaining;
625 uint8_t *data = dp_packet_l3(packet);
626
627 remaining = packet->l4_ofs - packet->l3_ofs;
628
629 if (remaining < sizeof *nh) {
630 return false;
631 }
632 nh = ALIGNED_CAST(struct ovs_16aligned_ip6_hdr *, data);
633 data += sizeof *nh;
634 remaining -= sizeof *nh;
635 nexthdr = nh->ip6_nxt;
636
637 while (1) {
638 if ((nexthdr != IPPROTO_HOPOPTS)
639 && (nexthdr != IPPROTO_ROUTING)
640 && (nexthdr != IPPROTO_DSTOPTS)
641 && (nexthdr != IPPROTO_AH)
642 && (nexthdr != IPPROTO_FRAGMENT)) {
643 /* It's either a terminal header (e.g., TCP, UDP) or one we
644 * don't understand. In either case, we're done with the
645 * packet, so use it to fill in 'nw_proto'. */
646 break;
647 }
648
649 /* We only verify that at least 8 bytes of the next header are
650 * available, but many of these headers are longer. Ensure that
651 * accesses within the extension header are within those first 8
652 * bytes. All extension headers are required to be at least 8
653 * bytes. */
654 if (remaining < 8) {
655 return false;
656 }
657
658 if (nexthdr == IPPROTO_AH) {
659 /* A standard AH definition isn't available, but the fields
660 * we care about are in the same location as the generic
661 * option header--only the header length is calculated
662 * differently. */
663 const struct ip6_ext *ext_hdr = (struct ip6_ext *)data;
664
665 nexthdr = ext_hdr->ip6e_nxt;
666 len = (ext_hdr->ip6e_len + 2) * 4;
667 } else if (nexthdr == IPPROTO_FRAGMENT) {
668 const struct ovs_16aligned_ip6_frag *frag_hdr
669 = ALIGNED_CAST(struct ovs_16aligned_ip6_frag *, data);
670
671 nexthdr = frag_hdr->ip6f_nxt;
672 len = sizeof *frag_hdr;
673 } else if (nexthdr == IPPROTO_ROUTING) {
674 const struct ip6_rthdr *rh = (struct ip6_rthdr *)data;
675
676 if (rh->ip6r_segleft > 0) {
677 return true;
678 }
679
680 nexthdr = rh->ip6r_nxt;
681 len = (rh->ip6r_len + 1) * 8;
682 } else {
683 const struct ip6_ext *ext_hdr = (struct ip6_ext *)data;
684
685 nexthdr = ext_hdr->ip6e_nxt;
686 len = (ext_hdr->ip6e_len + 1) * 8;
687 }
688
689 if (remaining < len) {
690 return false;
691 }
692 remaining -= len;
693 data += len;
694 }
695
696 return false;
697 }
698
699 static void
700 packet_update_csum128(struct dp_packet *packet, uint8_t proto,
701 ovs_16aligned_be32 addr[4], const ovs_be32 new_addr[4])
702 {
703 size_t l4_size = dp_packet_l4_size(packet);
704
705 if (proto == IPPROTO_TCP && l4_size >= TCP_HEADER_LEN) {
706 struct tcp_header *th = dp_packet_l4(packet);
707
708 th->tcp_csum = recalc_csum128(th->tcp_csum, addr, new_addr);
709 } else if (proto == IPPROTO_UDP && l4_size >= UDP_HEADER_LEN) {
710 struct udp_header *uh = dp_packet_l4(packet);
711
712 if (uh->udp_csum) {
713 uh->udp_csum = recalc_csum128(uh->udp_csum, addr, new_addr);
714 if (!uh->udp_csum) {
715 uh->udp_csum = htons(0xffff);
716 }
717 }
718 } else if (proto == IPPROTO_ICMPV6 &&
719 l4_size >= sizeof(struct icmp6_header)) {
720 struct icmp6_header *icmp = dp_packet_l4(packet);
721
722 icmp->icmp6_cksum = recalc_csum128(icmp->icmp6_cksum, addr, new_addr);
723 }
724 }
725
726 static void
727 packet_set_ipv6_addr(struct dp_packet *packet, uint8_t proto,
728 ovs_16aligned_be32 addr[4], const ovs_be32 new_addr[4],
729 bool recalculate_csum)
730 {
731 if (recalculate_csum) {
732 packet_update_csum128(packet, proto, addr, new_addr);
733 }
734 memcpy(addr, new_addr, sizeof(ovs_be32[4]));
735 }
736
737 static void
738 packet_set_ipv6_flow_label(ovs_16aligned_be32 *flow_label, ovs_be32 flow_key)
739 {
740 ovs_be32 old_label = get_16aligned_be32(flow_label);
741 ovs_be32 new_label = (old_label & htonl(~IPV6_LABEL_MASK)) | flow_key;
742 put_16aligned_be32(flow_label, new_label);
743 }
744
745 static void
746 packet_set_ipv6_tc(ovs_16aligned_be32 *flow_label, uint8_t tc)
747 {
748 ovs_be32 old_label = get_16aligned_be32(flow_label);
749 ovs_be32 new_label = (old_label & htonl(0xF00FFFFF)) | htonl(tc << 20);
750 put_16aligned_be32(flow_label, new_label);
751 }
752
753 /* Modifies the IPv4 header fields of 'packet' to be consistent with 'src',
754 * 'dst', 'tos', and 'ttl'. Updates 'packet''s L4 checksums as appropriate.
755 * 'packet' must contain a valid IPv4 packet with correctly populated l[347]
756 * markers. */
757 void
758 packet_set_ipv4(struct dp_packet *packet, ovs_be32 src, ovs_be32 dst,
759 uint8_t tos, uint8_t ttl)
760 {
761 struct ip_header *nh = dp_packet_l3(packet);
762
763 if (get_16aligned_be32(&nh->ip_src) != src) {
764 packet_set_ipv4_addr(packet, &nh->ip_src, src);
765 }
766
767 if (get_16aligned_be32(&nh->ip_dst) != dst) {
768 packet_set_ipv4_addr(packet, &nh->ip_dst, dst);
769 }
770
771 if (nh->ip_tos != tos) {
772 uint8_t *field = &nh->ip_tos;
773
774 nh->ip_csum = recalc_csum16(nh->ip_csum, htons((uint16_t) *field),
775 htons((uint16_t) tos));
776 *field = tos;
777 }
778
779 if (nh->ip_ttl != ttl) {
780 uint8_t *field = &nh->ip_ttl;
781
782 nh->ip_csum = recalc_csum16(nh->ip_csum, htons(*field << 8),
783 htons(ttl << 8));
784 *field = ttl;
785 }
786 }
787
788 /* Modifies the IPv6 header fields of 'packet' to be consistent with 'src',
789 * 'dst', 'traffic class', and 'next hop'. Updates 'packet''s L4 checksums as
790 * appropriate. 'packet' must contain a valid IPv6 packet with correctly
791 * populated l[34] offsets. */
792 void
793 packet_set_ipv6(struct dp_packet *packet, uint8_t proto, const ovs_be32 src[4],
794 const ovs_be32 dst[4], uint8_t key_tc, ovs_be32 key_fl,
795 uint8_t key_hl)
796 {
797 struct ovs_16aligned_ip6_hdr *nh = dp_packet_l3(packet);
798
799 if (memcmp(&nh->ip6_src, src, sizeof(ovs_be32[4]))) {
800 packet_set_ipv6_addr(packet, proto, nh->ip6_src.be32, src, true);
801 }
802
803 if (memcmp(&nh->ip6_dst, dst, sizeof(ovs_be32[4]))) {
804 packet_set_ipv6_addr(packet, proto, nh->ip6_dst.be32, dst,
805 !packet_rh_present(packet));
806 }
807
808 packet_set_ipv6_tc(&nh->ip6_flow, key_tc);
809
810 packet_set_ipv6_flow_label(&nh->ip6_flow, key_fl);
811
812 nh->ip6_hlim = key_hl;
813 }
814
815 static void
816 packet_set_port(ovs_be16 *port, ovs_be16 new_port, ovs_be16 *csum)
817 {
818 if (*port != new_port) {
819 *csum = recalc_csum16(*csum, *port, new_port);
820 *port = new_port;
821 }
822 }
823
824 /* Sets the TCP source and destination port ('src' and 'dst' respectively) of
825 * the TCP header contained in 'packet'. 'packet' must be a valid TCP packet
826 * with its l4 offset properly populated. */
827 void
828 packet_set_tcp_port(struct dp_packet *packet, ovs_be16 src, ovs_be16 dst)
829 {
830 struct tcp_header *th = dp_packet_l4(packet);
831
832 packet_set_port(&th->tcp_src, src, &th->tcp_csum);
833 packet_set_port(&th->tcp_dst, dst, &th->tcp_csum);
834 }
835
836 /* Sets the UDP source and destination port ('src' and 'dst' respectively) of
837 * the UDP header contained in 'packet'. 'packet' must be a valid UDP packet
838 * with its l4 offset properly populated. */
839 void
840 packet_set_udp_port(struct dp_packet *packet, ovs_be16 src, ovs_be16 dst)
841 {
842 struct udp_header *uh = dp_packet_l4(packet);
843
844 if (uh->udp_csum) {
845 packet_set_port(&uh->udp_src, src, &uh->udp_csum);
846 packet_set_port(&uh->udp_dst, dst, &uh->udp_csum);
847
848 if (!uh->udp_csum) {
849 uh->udp_csum = htons(0xffff);
850 }
851 } else {
852 uh->udp_src = src;
853 uh->udp_dst = dst;
854 }
855 }
856
857 /* Sets the SCTP source and destination port ('src' and 'dst' respectively) of
858 * the SCTP header contained in 'packet'. 'packet' must be a valid SCTP packet
859 * with its l4 offset properly populated. */
860 void
861 packet_set_sctp_port(struct dp_packet *packet, ovs_be16 src, ovs_be16 dst)
862 {
863 struct sctp_header *sh = dp_packet_l4(packet);
864 ovs_be32 old_csum, old_correct_csum, new_csum;
865 uint16_t tp_len = dp_packet_l4_size(packet);
866
867 old_csum = get_16aligned_be32(&sh->sctp_csum);
868 put_16aligned_be32(&sh->sctp_csum, 0);
869 old_correct_csum = crc32c((void *)sh, tp_len);
870
871 sh->sctp_src = src;
872 sh->sctp_dst = dst;
873
874 new_csum = crc32c((void *)sh, tp_len);
875 put_16aligned_be32(&sh->sctp_csum, old_csum ^ old_correct_csum ^ new_csum);
876 }
877
878 void
879 packet_set_nd(struct dp_packet *packet, const ovs_be32 target[4],
880 const uint8_t sll[ETH_ADDR_LEN],
881 const uint8_t tll[ETH_ADDR_LEN]) {
882 struct ovs_nd_msg *ns;
883 struct ovs_nd_opt *nd_opt;
884 int bytes_remain = dp_packet_l4_size(packet);
885
886 if (OVS_UNLIKELY(bytes_remain < sizeof(*ns))) {
887 return;
888 }
889
890 ns = dp_packet_l4(packet);
891 nd_opt = &ns->options[0];
892 bytes_remain -= sizeof(*ns);
893
894 if (memcmp(&ns->target, target, sizeof(ovs_be32[4]))) {
895 packet_set_ipv6_addr(packet, IPPROTO_ICMPV6,
896 ns->target.be32,
897 target, true);
898 }
899
900 while (bytes_remain >= ND_OPT_LEN && nd_opt->nd_opt_len != 0) {
901 if (nd_opt->nd_opt_type == ND_OPT_SOURCE_LINKADDR
902 && nd_opt->nd_opt_len == 1) {
903 if (memcmp(nd_opt->nd_opt_data, sll, ETH_ADDR_LEN)) {
904 ovs_be16 *csum = &(ns->icmph.icmp6_cksum);
905
906 *csum = recalc_csum48(*csum, nd_opt->nd_opt_data, sll);
907 memcpy(nd_opt->nd_opt_data, sll, ETH_ADDR_LEN);
908 }
909
910 /* A packet can only contain one SLL or TLL option */
911 break;
912 } else if (nd_opt->nd_opt_type == ND_OPT_TARGET_LINKADDR
913 && nd_opt->nd_opt_len == 1) {
914 if (memcmp(nd_opt->nd_opt_data, tll, ETH_ADDR_LEN)) {
915 ovs_be16 *csum = &(ns->icmph.icmp6_cksum);
916
917 *csum = recalc_csum48(*csum, nd_opt->nd_opt_data, tll);
918 memcpy(nd_opt->nd_opt_data, tll, ETH_ADDR_LEN);
919 }
920
921 /* A packet can only contain one SLL or TLL option */
922 break;
923 }
924
925 nd_opt += nd_opt->nd_opt_len;
926 bytes_remain -= nd_opt->nd_opt_len * ND_OPT_LEN;
927 }
928 }
929
930 const char *
931 packet_tcp_flag_to_string(uint32_t flag)
932 {
933 switch (flag) {
934 case TCP_FIN:
935 return "fin";
936 case TCP_SYN:
937 return "syn";
938 case TCP_RST:
939 return "rst";
940 case TCP_PSH:
941 return "psh";
942 case TCP_ACK:
943 return "ack";
944 case TCP_URG:
945 return "urg";
946 case TCP_ECE:
947 return "ece";
948 case TCP_CWR:
949 return "cwr";
950 case TCP_NS:
951 return "ns";
952 case 0x200:
953 return "[200]";
954 case 0x400:
955 return "[400]";
956 case 0x800:
957 return "[800]";
958 default:
959 return NULL;
960 }
961 }
962
963 /* Appends a string representation of the TCP flags value 'tcp_flags'
964 * (e.g. from struct flow.tcp_flags or obtained via TCP_FLAGS) to 's', in the
965 * format used by tcpdump. */
966 void
967 packet_format_tcp_flags(struct ds *s, uint16_t tcp_flags)
968 {
969 if (!tcp_flags) {
970 ds_put_cstr(s, "none");
971 return;
972 }
973
974 if (tcp_flags & TCP_SYN) {
975 ds_put_char(s, 'S');
976 }
977 if (tcp_flags & TCP_FIN) {
978 ds_put_char(s, 'F');
979 }
980 if (tcp_flags & TCP_PSH) {
981 ds_put_char(s, 'P');
982 }
983 if (tcp_flags & TCP_RST) {
984 ds_put_char(s, 'R');
985 }
986 if (tcp_flags & TCP_URG) {
987 ds_put_char(s, 'U');
988 }
989 if (tcp_flags & TCP_ACK) {
990 ds_put_char(s, '.');
991 }
992 if (tcp_flags & TCP_ECE) {
993 ds_put_cstr(s, "E");
994 }
995 if (tcp_flags & TCP_CWR) {
996 ds_put_cstr(s, "C");
997 }
998 if (tcp_flags & TCP_NS) {
999 ds_put_cstr(s, "N");
1000 }
1001 if (tcp_flags & 0x200) {
1002 ds_put_cstr(s, "[200]");
1003 }
1004 if (tcp_flags & 0x400) {
1005 ds_put_cstr(s, "[400]");
1006 }
1007 if (tcp_flags & 0x800) {
1008 ds_put_cstr(s, "[800]");
1009 }
1010 }
1011
1012 #define ARP_PACKET_SIZE (2 + ETH_HEADER_LEN + VLAN_HEADER_LEN + \
1013 ARP_ETH_HEADER_LEN)
1014
1015 /* Clears 'b' and replaces its contents by an ARP frame with the specified
1016 * 'arp_op', 'arp_sha', 'arp_tha', 'arp_spa', and 'arp_tpa'. The outer
1017 * Ethernet frame is initialized with Ethernet source 'arp_sha' and destination
1018 * 'arp_tha', except that destination ff:ff:ff:ff:ff:ff is used instead if
1019 * 'broadcast' is true. */
1020 void
1021 compose_arp(struct dp_packet *b, uint16_t arp_op,
1022 const uint8_t arp_sha[ETH_ADDR_LEN],
1023 const uint8_t arp_tha[ETH_ADDR_LEN], bool broadcast,
1024 ovs_be32 arp_spa, ovs_be32 arp_tpa)
1025 {
1026 struct eth_header *eth;
1027 struct arp_eth_header *arp;
1028
1029 dp_packet_clear(b);
1030 dp_packet_prealloc_tailroom(b, ARP_PACKET_SIZE);
1031 dp_packet_reserve(b, 2 + VLAN_HEADER_LEN);
1032
1033 eth = dp_packet_put_uninit(b, sizeof *eth);
1034 memcpy(eth->eth_dst, broadcast ? eth_addr_broadcast : arp_tha,
1035 ETH_ADDR_LEN);
1036 memcpy(eth->eth_src, arp_sha, ETH_ADDR_LEN);
1037 eth->eth_type = htons(ETH_TYPE_ARP);
1038
1039 arp = dp_packet_put_uninit(b, sizeof *arp);
1040 arp->ar_hrd = htons(ARP_HRD_ETHERNET);
1041 arp->ar_pro = htons(ARP_PRO_IP);
1042 arp->ar_hln = sizeof arp->ar_sha;
1043 arp->ar_pln = sizeof arp->ar_spa;
1044 arp->ar_op = htons(arp_op);
1045 memcpy(arp->ar_sha, arp_sha, ETH_ADDR_LEN);
1046 memcpy(arp->ar_tha, arp_tha, ETH_ADDR_LEN);
1047
1048 put_16aligned_be32(&arp->ar_spa, arp_spa);
1049 put_16aligned_be32(&arp->ar_tpa, arp_tpa);
1050
1051 dp_packet_reset_offsets(b);
1052 dp_packet_set_l3(b, arp);
1053 }
1054
1055 uint32_t
1056 packet_csum_pseudoheader(const struct ip_header *ip)
1057 {
1058 uint32_t partial = 0;
1059
1060 partial = csum_add32(partial, get_16aligned_be32(&ip->ip_src));
1061 partial = csum_add32(partial, get_16aligned_be32(&ip->ip_dst));
1062 partial = csum_add16(partial, htons(ip->ip_proto));
1063 partial = csum_add16(partial, htons(ntohs(ip->ip_tot_len) -
1064 IP_IHL(ip->ip_ihl_ver) * 4));
1065
1066 return partial;
1067 }