]> git.proxmox.com Git - mirror_ovs.git/blob - lib/packets.c
Add support for connection tracking.
[mirror_ovs.git] / lib / packets.c
1 /*
2 * Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014, 2015 Nicira, Inc.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <config.h>
18 #include "packets.h"
19 #include <arpa/inet.h>
20 #include <sys/socket.h>
21 #include <netinet/in.h>
22 #include <netinet/ip6.h>
23 #include <netinet/icmp6.h>
24 #include <stdlib.h>
25 #include "byte-order.h"
26 #include "csum.h"
27 #include "crc32c.h"
28 #include "flow.h"
29 #include "hmap.h"
30 #include "dynamic-string.h"
31 #include "ovs-thread.h"
32 #include "odp-util.h"
33 #include "dp-packet.h"
34 #include "unaligned.h"
35
36 const struct in6_addr in6addr_exact = IN6ADDR_EXACT_INIT;
37 const struct in6_addr in6addr_all_hosts = IN6ADDR_ALL_HOSTS_INIT;
38
39 /* Parses 's' as a 16-digit hexadecimal number representing a datapath ID. On
40 * success stores the dpid into '*dpidp' and returns true, on failure stores 0
41 * into '*dpidp' and returns false.
42 *
43 * Rejects an all-zeros dpid as invalid. */
44 bool
45 dpid_from_string(const char *s, uint64_t *dpidp)
46 {
47 *dpidp = (strlen(s) == 16 && strspn(s, "0123456789abcdefABCDEF") == 16
48 ? strtoull(s, NULL, 16)
49 : 0);
50 return *dpidp != 0;
51 }
52
53 /* Returns true if 'ea' is a reserved address, that a bridge must never
54 * forward, false otherwise.
55 *
56 * If you change this function's behavior, please update corresponding
57 * documentation in vswitch.xml at the same time. */
58 bool
59 eth_addr_is_reserved(const struct eth_addr ea)
60 {
61 struct eth_addr_node {
62 struct hmap_node hmap_node;
63 const uint64_t ea64;
64 };
65
66 static struct eth_addr_node nodes[] = {
67 /* STP, IEEE pause frames, and other reserved protocols. */
68 { HMAP_NODE_NULL_INITIALIZER, 0x0180c2000000ULL },
69 { HMAP_NODE_NULL_INITIALIZER, 0x0180c2000001ULL },
70 { HMAP_NODE_NULL_INITIALIZER, 0x0180c2000002ULL },
71 { HMAP_NODE_NULL_INITIALIZER, 0x0180c2000003ULL },
72 { HMAP_NODE_NULL_INITIALIZER, 0x0180c2000004ULL },
73 { HMAP_NODE_NULL_INITIALIZER, 0x0180c2000005ULL },
74 { HMAP_NODE_NULL_INITIALIZER, 0x0180c2000006ULL },
75 { HMAP_NODE_NULL_INITIALIZER, 0x0180c2000007ULL },
76 { HMAP_NODE_NULL_INITIALIZER, 0x0180c2000008ULL },
77 { HMAP_NODE_NULL_INITIALIZER, 0x0180c2000009ULL },
78 { HMAP_NODE_NULL_INITIALIZER, 0x0180c200000aULL },
79 { HMAP_NODE_NULL_INITIALIZER, 0x0180c200000bULL },
80 { HMAP_NODE_NULL_INITIALIZER, 0x0180c200000cULL },
81 { HMAP_NODE_NULL_INITIALIZER, 0x0180c200000dULL },
82 { HMAP_NODE_NULL_INITIALIZER, 0x0180c200000eULL },
83 { HMAP_NODE_NULL_INITIALIZER, 0x0180c200000fULL },
84
85 /* Extreme protocols. */
86 { HMAP_NODE_NULL_INITIALIZER, 0x00e02b000000ULL }, /* EDP. */
87 { HMAP_NODE_NULL_INITIALIZER, 0x00e02b000004ULL }, /* EAPS. */
88 { HMAP_NODE_NULL_INITIALIZER, 0x00e02b000006ULL }, /* EAPS. */
89
90 /* Cisco protocols. */
91 { HMAP_NODE_NULL_INITIALIZER, 0x01000c000000ULL }, /* ISL. */
92 { HMAP_NODE_NULL_INITIALIZER, 0x01000cccccccULL }, /* PAgP, UDLD, CDP,
93 * DTP, VTP. */
94 { HMAP_NODE_NULL_INITIALIZER, 0x01000ccccccdULL }, /* PVST+. */
95 { HMAP_NODE_NULL_INITIALIZER, 0x01000ccdcdcdULL }, /* STP Uplink Fast,
96 * FlexLink. */
97
98 /* Cisco CFM. */
99 { HMAP_NODE_NULL_INITIALIZER, 0x01000cccccc0ULL },
100 { HMAP_NODE_NULL_INITIALIZER, 0x01000cccccc1ULL },
101 { HMAP_NODE_NULL_INITIALIZER, 0x01000cccccc2ULL },
102 { HMAP_NODE_NULL_INITIALIZER, 0x01000cccccc3ULL },
103 { HMAP_NODE_NULL_INITIALIZER, 0x01000cccccc4ULL },
104 { HMAP_NODE_NULL_INITIALIZER, 0x01000cccccc5ULL },
105 { HMAP_NODE_NULL_INITIALIZER, 0x01000cccccc6ULL },
106 { HMAP_NODE_NULL_INITIALIZER, 0x01000cccccc7ULL },
107 };
108
109 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
110 struct eth_addr_node *node;
111 static struct hmap addrs;
112 uint64_t ea64;
113
114 if (ovsthread_once_start(&once)) {
115 hmap_init(&addrs);
116 for (node = nodes; node < &nodes[ARRAY_SIZE(nodes)]; node++) {
117 hmap_insert(&addrs, &node->hmap_node, hash_uint64(node->ea64));
118 }
119 ovsthread_once_done(&once);
120 }
121
122 ea64 = eth_addr_to_uint64(ea);
123 HMAP_FOR_EACH_IN_BUCKET (node, hmap_node, hash_uint64(ea64), &addrs) {
124 if (node->ea64 == ea64) {
125 return true;
126 }
127 }
128 return false;
129 }
130
131 bool
132 eth_addr_from_string(const char *s, struct eth_addr *ea)
133 {
134 if (ovs_scan(s, ETH_ADDR_SCAN_FMT, ETH_ADDR_SCAN_ARGS(*ea))) {
135 return true;
136 } else {
137 *ea = eth_addr_zero;
138 return false;
139 }
140 }
141
142 /* Fills 'b' with a Reverse ARP packet with Ethernet source address 'eth_src'.
143 * This function is used by Open vSwitch to compose packets in cases where
144 * context is important but content doesn't (or shouldn't) matter.
145 *
146 * The returned packet has enough headroom to insert an 802.1Q VLAN header if
147 * desired. */
148 void
149 compose_rarp(struct dp_packet *b, const struct eth_addr eth_src)
150 {
151 struct eth_header *eth;
152 struct arp_eth_header *arp;
153
154 dp_packet_clear(b);
155 dp_packet_prealloc_tailroom(b, 2 + ETH_HEADER_LEN + VLAN_HEADER_LEN
156 + ARP_ETH_HEADER_LEN);
157 dp_packet_reserve(b, 2 + VLAN_HEADER_LEN);
158 eth = dp_packet_put_uninit(b, sizeof *eth);
159 eth->eth_dst = eth_addr_broadcast;
160 eth->eth_src = eth_src;
161 eth->eth_type = htons(ETH_TYPE_RARP);
162
163 arp = dp_packet_put_uninit(b, sizeof *arp);
164 arp->ar_hrd = htons(ARP_HRD_ETHERNET);
165 arp->ar_pro = htons(ARP_PRO_IP);
166 arp->ar_hln = sizeof arp->ar_sha;
167 arp->ar_pln = sizeof arp->ar_spa;
168 arp->ar_op = htons(ARP_OP_RARP);
169 arp->ar_sha = eth_src;
170 put_16aligned_be32(&arp->ar_spa, htonl(0));
171 arp->ar_tha = eth_src;
172 put_16aligned_be32(&arp->ar_tpa, htonl(0));
173
174 dp_packet_reset_offsets(b);
175 dp_packet_set_l3(b, arp);
176 }
177
178 /* Insert VLAN header according to given TCI. Packet passed must be Ethernet
179 * packet. Ignores the CFI bit of 'tci' using 0 instead.
180 *
181 * Also adjusts the layer offsets accordingly. */
182 void
183 eth_push_vlan(struct dp_packet *packet, ovs_be16 tpid, ovs_be16 tci)
184 {
185 struct vlan_eth_header *veh;
186
187 /* Insert new 802.1Q header. */
188 veh = dp_packet_resize_l2(packet, VLAN_HEADER_LEN);
189 memmove(veh, (char *)veh + VLAN_HEADER_LEN, 2 * ETH_ADDR_LEN);
190 veh->veth_type = tpid;
191 veh->veth_tci = tci & htons(~VLAN_CFI);
192 }
193
194 /* Removes outermost VLAN header (if any is present) from 'packet'.
195 *
196 * 'packet->l2_5' should initially point to 'packet''s outer-most VLAN header
197 * or may be NULL if there are no VLAN headers. */
198 void
199 eth_pop_vlan(struct dp_packet *packet)
200 {
201 struct vlan_eth_header *veh = dp_packet_l2(packet);
202
203 if (veh && dp_packet_size(packet) >= sizeof *veh
204 && eth_type_vlan(veh->veth_type)) {
205
206 memmove((char *)veh + VLAN_HEADER_LEN, veh, 2 * ETH_ADDR_LEN);
207 dp_packet_resize_l2(packet, -VLAN_HEADER_LEN);
208 }
209 }
210
211 /* Set ethertype of the packet. */
212 static void
213 set_ethertype(struct dp_packet *packet, ovs_be16 eth_type)
214 {
215 struct eth_header *eh = dp_packet_l2(packet);
216
217 if (!eh) {
218 return;
219 }
220
221 if (eth_type_vlan(eh->eth_type)) {
222 ovs_be16 *p;
223 char *l2_5 = dp_packet_l2_5(packet);
224
225 p = ALIGNED_CAST(ovs_be16 *,
226 (l2_5 ? l2_5 : (char *)dp_packet_l3(packet)) - 2);
227 *p = eth_type;
228 } else {
229 eh->eth_type = eth_type;
230 }
231 }
232
233 static bool is_mpls(struct dp_packet *packet)
234 {
235 return packet->l2_5_ofs != UINT16_MAX;
236 }
237
238 /* Set time to live (TTL) of an MPLS label stack entry (LSE). */
239 void
240 set_mpls_lse_ttl(ovs_be32 *lse, uint8_t ttl)
241 {
242 *lse &= ~htonl(MPLS_TTL_MASK);
243 *lse |= htonl((ttl << MPLS_TTL_SHIFT) & MPLS_TTL_MASK);
244 }
245
246 /* Set traffic class (TC) of an MPLS label stack entry (LSE). */
247 void
248 set_mpls_lse_tc(ovs_be32 *lse, uint8_t tc)
249 {
250 *lse &= ~htonl(MPLS_TC_MASK);
251 *lse |= htonl((tc << MPLS_TC_SHIFT) & MPLS_TC_MASK);
252 }
253
254 /* Set label of an MPLS label stack entry (LSE). */
255 void
256 set_mpls_lse_label(ovs_be32 *lse, ovs_be32 label)
257 {
258 *lse &= ~htonl(MPLS_LABEL_MASK);
259 *lse |= htonl((ntohl(label) << MPLS_LABEL_SHIFT) & MPLS_LABEL_MASK);
260 }
261
262 /* Set bottom of stack (BoS) bit of an MPLS label stack entry (LSE). */
263 void
264 set_mpls_lse_bos(ovs_be32 *lse, uint8_t bos)
265 {
266 *lse &= ~htonl(MPLS_BOS_MASK);
267 *lse |= htonl((bos << MPLS_BOS_SHIFT) & MPLS_BOS_MASK);
268 }
269
270 /* Compose an MPLS label stack entry (LSE) from its components:
271 * label, traffic class (TC), time to live (TTL) and
272 * bottom of stack (BoS) bit. */
273 ovs_be32
274 set_mpls_lse_values(uint8_t ttl, uint8_t tc, uint8_t bos, ovs_be32 label)
275 {
276 ovs_be32 lse = htonl(0);
277 set_mpls_lse_ttl(&lse, ttl);
278 set_mpls_lse_tc(&lse, tc);
279 set_mpls_lse_bos(&lse, bos);
280 set_mpls_lse_label(&lse, label);
281 return lse;
282 }
283
284 /* Set MPLS label stack entry to outermost MPLS header.*/
285 void
286 set_mpls_lse(struct dp_packet *packet, ovs_be32 mpls_lse)
287 {
288 /* Packet type should be MPLS to set label stack entry. */
289 if (is_mpls(packet)) {
290 struct mpls_hdr *mh = dp_packet_l2_5(packet);
291
292 /* Update mpls label stack entry. */
293 put_16aligned_be32(&mh->mpls_lse, mpls_lse);
294 }
295 }
296
297 /* Push MPLS label stack entry 'lse' onto 'packet' as the outermost MPLS
298 * header. If 'packet' does not already have any MPLS labels, then its
299 * Ethertype is changed to 'ethtype' (which must be an MPLS Ethertype). */
300 void
301 push_mpls(struct dp_packet *packet, ovs_be16 ethtype, ovs_be32 lse)
302 {
303 char * header;
304 size_t len;
305
306 if (!eth_type_mpls(ethtype)) {
307 return;
308 }
309
310 if (!is_mpls(packet)) {
311 /* Set MPLS label stack offset. */
312 packet->l2_5_ofs = packet->l3_ofs;
313 }
314
315 set_ethertype(packet, ethtype);
316
317 /* Push new MPLS shim header onto packet. */
318 len = packet->l2_5_ofs;
319 header = dp_packet_resize_l2_5(packet, MPLS_HLEN);
320 memmove(header, header + MPLS_HLEN, len);
321 memcpy(header + len, &lse, sizeof lse);
322 }
323
324 /* If 'packet' is an MPLS packet, removes its outermost MPLS label stack entry.
325 * If the label that was removed was the only MPLS label, changes 'packet''s
326 * Ethertype to 'ethtype' (which ordinarily should not be an MPLS
327 * Ethertype). */
328 void
329 pop_mpls(struct dp_packet *packet, ovs_be16 ethtype)
330 {
331 if (is_mpls(packet)) {
332 struct mpls_hdr *mh = dp_packet_l2_5(packet);
333 size_t len = packet->l2_5_ofs;
334
335 set_ethertype(packet, ethtype);
336 if (get_16aligned_be32(&mh->mpls_lse) & htonl(MPLS_BOS_MASK)) {
337 dp_packet_set_l2_5(packet, NULL);
338 }
339 /* Shift the l2 header forward. */
340 memmove((char*)dp_packet_data(packet) + MPLS_HLEN, dp_packet_data(packet), len);
341 dp_packet_resize_l2_5(packet, -MPLS_HLEN);
342 }
343 }
344
345 /* Converts hex digits in 'hex' to an Ethernet packet in '*packetp'. The
346 * caller must free '*packetp'. On success, returns NULL. On failure, returns
347 * an error message and stores NULL in '*packetp'.
348 *
349 * Aligns the L3 header of '*packetp' on a 32-bit boundary. */
350 const char *
351 eth_from_hex(const char *hex, struct dp_packet **packetp)
352 {
353 struct dp_packet *packet;
354
355 /* Use 2 bytes of headroom to 32-bit align the L3 header. */
356 packet = *packetp = dp_packet_new_with_headroom(strlen(hex) / 2, 2);
357
358 if (dp_packet_put_hex(packet, hex, NULL)[0] != '\0') {
359 dp_packet_delete(packet);
360 *packetp = NULL;
361 return "Trailing garbage in packet data";
362 }
363
364 if (dp_packet_size(packet) < ETH_HEADER_LEN) {
365 dp_packet_delete(packet);
366 *packetp = NULL;
367 return "Packet data too short for Ethernet";
368 }
369
370 return NULL;
371 }
372
373 void
374 eth_format_masked(const struct eth_addr eth,
375 const struct eth_addr *mask, struct ds *s)
376 {
377 ds_put_format(s, ETH_ADDR_FMT, ETH_ADDR_ARGS(eth));
378 if (mask && !eth_mask_is_exact(*mask)) {
379 ds_put_format(s, "/"ETH_ADDR_FMT, ETH_ADDR_ARGS(*mask));
380 }
381 }
382
383 /* Given the IP netmask 'netmask', returns the number of bits of the IP address
384 * that it specifies, that is, the number of 1-bits in 'netmask'.
385 *
386 * If 'netmask' is not a CIDR netmask (see ip_is_cidr()), the return value will
387 * still be in the valid range but isn't otherwise meaningful. */
388 int
389 ip_count_cidr_bits(ovs_be32 netmask)
390 {
391 return 32 - ctz32(ntohl(netmask));
392 }
393
394 void
395 ip_format_masked(ovs_be32 ip, ovs_be32 mask, struct ds *s)
396 {
397 ds_put_format(s, IP_FMT, IP_ARGS(ip));
398 if (mask != OVS_BE32_MAX) {
399 if (ip_is_cidr(mask)) {
400 ds_put_format(s, "/%d", ip_count_cidr_bits(mask));
401 } else {
402 ds_put_format(s, "/"IP_FMT, IP_ARGS(mask));
403 }
404 }
405 }
406
407
408 /* Stores the string representation of the IPv6 address 'addr' into the
409 * character array 'addr_str', which must be at least INET6_ADDRSTRLEN
410 * bytes long. */
411 void
412 format_ipv6_addr(char *addr_str, const struct in6_addr *addr)
413 {
414 inet_ntop(AF_INET6, addr, addr_str, INET6_ADDRSTRLEN);
415 }
416
417 void
418 print_ipv6_addr(struct ds *string, const struct in6_addr *addr)
419 {
420 char *dst;
421
422 ds_reserve(string, string->length + INET6_ADDRSTRLEN);
423
424 dst = string->string + string->length;
425 format_ipv6_addr(dst, addr);
426 string->length += strlen(dst);
427 }
428
429 void
430 print_ipv6_mapped(struct ds *s, const struct in6_addr *addr)
431 {
432 if (IN6_IS_ADDR_V4MAPPED(addr)) {
433 ds_put_format(s, IP_FMT, addr->s6_addr[12], addr->s6_addr[13],
434 addr->s6_addr[14], addr->s6_addr[15]);
435 } else {
436 print_ipv6_addr(s, addr);
437 }
438 }
439
440 void
441 print_ipv6_masked(struct ds *s, const struct in6_addr *addr,
442 const struct in6_addr *mask)
443 {
444 print_ipv6_addr(s, addr);
445 if (mask && !ipv6_mask_is_exact(mask)) {
446 if (ipv6_is_cidr(mask)) {
447 int cidr_bits = ipv6_count_cidr_bits(mask);
448 ds_put_format(s, "/%d", cidr_bits);
449 } else {
450 ds_put_char(s, '/');
451 print_ipv6_addr(s, mask);
452 }
453 }
454 }
455
456 struct in6_addr ipv6_addr_bitand(const struct in6_addr *a,
457 const struct in6_addr *b)
458 {
459 int i;
460 struct in6_addr dst;
461
462 #ifdef s6_addr32
463 for (i=0; i<4; i++) {
464 dst.s6_addr32[i] = a->s6_addr32[i] & b->s6_addr32[i];
465 }
466 #else
467 for (i=0; i<16; i++) {
468 dst.s6_addr[i] = a->s6_addr[i] & b->s6_addr[i];
469 }
470 #endif
471
472 return dst;
473 }
474
475 /* Returns an in6_addr consisting of 'mask' high-order 1-bits and 128-N
476 * low-order 0-bits. */
477 struct in6_addr
478 ipv6_create_mask(int mask)
479 {
480 struct in6_addr netmask;
481 uint8_t *netmaskp = &netmask.s6_addr[0];
482
483 memset(&netmask, 0, sizeof netmask);
484 while (mask > 8) {
485 *netmaskp = 0xff;
486 netmaskp++;
487 mask -= 8;
488 }
489
490 if (mask) {
491 *netmaskp = 0xff << (8 - mask);
492 }
493
494 return netmask;
495 }
496
497 /* Given the IPv6 netmask 'netmask', returns the number of bits of the IPv6
498 * address that it specifies, that is, the number of 1-bits in 'netmask'.
499 * 'netmask' must be a CIDR netmask (see ipv6_is_cidr()).
500 *
501 * If 'netmask' is not a CIDR netmask (see ipv6_is_cidr()), the return value
502 * will still be in the valid range but isn't otherwise meaningful. */
503 int
504 ipv6_count_cidr_bits(const struct in6_addr *netmask)
505 {
506 int i;
507 int count = 0;
508 const uint8_t *netmaskp = &netmask->s6_addr[0];
509
510 for (i=0; i<16; i++) {
511 if (netmaskp[i] == 0xff) {
512 count += 8;
513 } else {
514 uint8_t nm;
515
516 for(nm = netmaskp[i]; nm; nm <<= 1) {
517 count++;
518 }
519 break;
520 }
521
522 }
523
524 return count;
525 }
526
527 /* Returns true if 'netmask' is a CIDR netmask, that is, if it consists of N
528 * high-order 1-bits and 128-N low-order 0-bits. */
529 bool
530 ipv6_is_cidr(const struct in6_addr *netmask)
531 {
532 const uint8_t *netmaskp = &netmask->s6_addr[0];
533 int i;
534
535 for (i=0; i<16; i++) {
536 if (netmaskp[i] != 0xff) {
537 uint8_t x = ~netmaskp[i];
538 if (x & (x + 1)) {
539 return false;
540 }
541 while (++i < 16) {
542 if (netmaskp[i]) {
543 return false;
544 }
545 }
546 }
547 }
548
549 return true;
550 }
551
552 /* Populates 'b' with an Ethernet II packet headed with the given 'eth_dst',
553 * 'eth_src' and 'eth_type' parameters. A payload of 'size' bytes is allocated
554 * in 'b' and returned. This payload may be populated with appropriate
555 * information by the caller. Sets 'b''s 'frame' pointer and 'l3' offset to
556 * the Ethernet header and payload respectively. Aligns b->l3 on a 32-bit
557 * boundary.
558 *
559 * The returned packet has enough headroom to insert an 802.1Q VLAN header if
560 * desired. */
561 void *
562 eth_compose(struct dp_packet *b, const struct eth_addr eth_dst,
563 const struct eth_addr eth_src, uint16_t eth_type,
564 size_t size)
565 {
566 void *data;
567 struct eth_header *eth;
568
569 dp_packet_clear(b);
570
571 /* The magic 2 here ensures that the L3 header (when it is added later)
572 * will be 32-bit aligned. */
573 dp_packet_prealloc_tailroom(b, 2 + ETH_HEADER_LEN + VLAN_HEADER_LEN + size);
574 dp_packet_reserve(b, 2 + VLAN_HEADER_LEN);
575 eth = dp_packet_put_uninit(b, ETH_HEADER_LEN);
576 data = dp_packet_put_uninit(b, size);
577
578 eth->eth_dst = eth_dst;
579 eth->eth_src = eth_src;
580 eth->eth_type = htons(eth_type);
581
582 dp_packet_reset_offsets(b);
583 dp_packet_set_l3(b, data);
584
585 return data;
586 }
587
588 static void
589 packet_set_ipv4_addr(struct dp_packet *packet,
590 ovs_16aligned_be32 *addr, ovs_be32 new_addr)
591 {
592 struct ip_header *nh = dp_packet_l3(packet);
593 ovs_be32 old_addr = get_16aligned_be32(addr);
594 size_t l4_size = dp_packet_l4_size(packet);
595
596 if (nh->ip_proto == IPPROTO_TCP && l4_size >= TCP_HEADER_LEN) {
597 struct tcp_header *th = dp_packet_l4(packet);
598
599 th->tcp_csum = recalc_csum32(th->tcp_csum, old_addr, new_addr);
600 } else if (nh->ip_proto == IPPROTO_UDP && l4_size >= UDP_HEADER_LEN ) {
601 struct udp_header *uh = dp_packet_l4(packet);
602
603 if (uh->udp_csum) {
604 uh->udp_csum = recalc_csum32(uh->udp_csum, old_addr, new_addr);
605 if (!uh->udp_csum) {
606 uh->udp_csum = htons(0xffff);
607 }
608 }
609 }
610 nh->ip_csum = recalc_csum32(nh->ip_csum, old_addr, new_addr);
611 put_16aligned_be32(addr, new_addr);
612 }
613
614 /* Returns true, if packet contains at least one routing header where
615 * segements_left > 0.
616 *
617 * This function assumes that L3 and L4 offsets are set in the packet. */
618 static bool
619 packet_rh_present(struct dp_packet *packet)
620 {
621 const struct ovs_16aligned_ip6_hdr *nh;
622 int nexthdr;
623 size_t len;
624 size_t remaining;
625 uint8_t *data = dp_packet_l3(packet);
626
627 remaining = packet->l4_ofs - packet->l3_ofs;
628
629 if (remaining < sizeof *nh) {
630 return false;
631 }
632 nh = ALIGNED_CAST(struct ovs_16aligned_ip6_hdr *, data);
633 data += sizeof *nh;
634 remaining -= sizeof *nh;
635 nexthdr = nh->ip6_nxt;
636
637 while (1) {
638 if ((nexthdr != IPPROTO_HOPOPTS)
639 && (nexthdr != IPPROTO_ROUTING)
640 && (nexthdr != IPPROTO_DSTOPTS)
641 && (nexthdr != IPPROTO_AH)
642 && (nexthdr != IPPROTO_FRAGMENT)) {
643 /* It's either a terminal header (e.g., TCP, UDP) or one we
644 * don't understand. In either case, we're done with the
645 * packet, so use it to fill in 'nw_proto'. */
646 break;
647 }
648
649 /* We only verify that at least 8 bytes of the next header are
650 * available, but many of these headers are longer. Ensure that
651 * accesses within the extension header are within those first 8
652 * bytes. All extension headers are required to be at least 8
653 * bytes. */
654 if (remaining < 8) {
655 return false;
656 }
657
658 if (nexthdr == IPPROTO_AH) {
659 /* A standard AH definition isn't available, but the fields
660 * we care about are in the same location as the generic
661 * option header--only the header length is calculated
662 * differently. */
663 const struct ip6_ext *ext_hdr = (struct ip6_ext *)data;
664
665 nexthdr = ext_hdr->ip6e_nxt;
666 len = (ext_hdr->ip6e_len + 2) * 4;
667 } else if (nexthdr == IPPROTO_FRAGMENT) {
668 const struct ovs_16aligned_ip6_frag *frag_hdr
669 = ALIGNED_CAST(struct ovs_16aligned_ip6_frag *, data);
670
671 nexthdr = frag_hdr->ip6f_nxt;
672 len = sizeof *frag_hdr;
673 } else if (nexthdr == IPPROTO_ROUTING) {
674 const struct ip6_rthdr *rh = (struct ip6_rthdr *)data;
675
676 if (rh->ip6r_segleft > 0) {
677 return true;
678 }
679
680 nexthdr = rh->ip6r_nxt;
681 len = (rh->ip6r_len + 1) * 8;
682 } else {
683 const struct ip6_ext *ext_hdr = (struct ip6_ext *)data;
684
685 nexthdr = ext_hdr->ip6e_nxt;
686 len = (ext_hdr->ip6e_len + 1) * 8;
687 }
688
689 if (remaining < len) {
690 return false;
691 }
692 remaining -= len;
693 data += len;
694 }
695
696 return false;
697 }
698
699 static void
700 packet_update_csum128(struct dp_packet *packet, uint8_t proto,
701 ovs_16aligned_be32 addr[4], const ovs_be32 new_addr[4])
702 {
703 size_t l4_size = dp_packet_l4_size(packet);
704
705 if (proto == IPPROTO_TCP && l4_size >= TCP_HEADER_LEN) {
706 struct tcp_header *th = dp_packet_l4(packet);
707
708 th->tcp_csum = recalc_csum128(th->tcp_csum, addr, new_addr);
709 } else if (proto == IPPROTO_UDP && l4_size >= UDP_HEADER_LEN) {
710 struct udp_header *uh = dp_packet_l4(packet);
711
712 if (uh->udp_csum) {
713 uh->udp_csum = recalc_csum128(uh->udp_csum, addr, new_addr);
714 if (!uh->udp_csum) {
715 uh->udp_csum = htons(0xffff);
716 }
717 }
718 } else if (proto == IPPROTO_ICMPV6 &&
719 l4_size >= sizeof(struct icmp6_header)) {
720 struct icmp6_header *icmp = dp_packet_l4(packet);
721
722 icmp->icmp6_cksum = recalc_csum128(icmp->icmp6_cksum, addr, new_addr);
723 }
724 }
725
726 static void
727 packet_set_ipv6_addr(struct dp_packet *packet, uint8_t proto,
728 ovs_16aligned_be32 addr[4], const ovs_be32 new_addr[4],
729 bool recalculate_csum)
730 {
731 if (recalculate_csum) {
732 packet_update_csum128(packet, proto, addr, new_addr);
733 }
734 memcpy(addr, new_addr, sizeof(ovs_be32[4]));
735 }
736
737 static void
738 packet_set_ipv6_flow_label(ovs_16aligned_be32 *flow_label, ovs_be32 flow_key)
739 {
740 ovs_be32 old_label = get_16aligned_be32(flow_label);
741 ovs_be32 new_label = (old_label & htonl(~IPV6_LABEL_MASK)) | flow_key;
742 put_16aligned_be32(flow_label, new_label);
743 }
744
745 static void
746 packet_set_ipv6_tc(ovs_16aligned_be32 *flow_label, uint8_t tc)
747 {
748 ovs_be32 old_label = get_16aligned_be32(flow_label);
749 ovs_be32 new_label = (old_label & htonl(0xF00FFFFF)) | htonl(tc << 20);
750 put_16aligned_be32(flow_label, new_label);
751 }
752
753 /* Modifies the IPv4 header fields of 'packet' to be consistent with 'src',
754 * 'dst', 'tos', and 'ttl'. Updates 'packet''s L4 checksums as appropriate.
755 * 'packet' must contain a valid IPv4 packet with correctly populated l[347]
756 * markers. */
757 void
758 packet_set_ipv4(struct dp_packet *packet, ovs_be32 src, ovs_be32 dst,
759 uint8_t tos, uint8_t ttl)
760 {
761 struct ip_header *nh = dp_packet_l3(packet);
762
763 if (get_16aligned_be32(&nh->ip_src) != src) {
764 packet_set_ipv4_addr(packet, &nh->ip_src, src);
765 }
766
767 if (get_16aligned_be32(&nh->ip_dst) != dst) {
768 packet_set_ipv4_addr(packet, &nh->ip_dst, dst);
769 }
770
771 if (nh->ip_tos != tos) {
772 uint8_t *field = &nh->ip_tos;
773
774 nh->ip_csum = recalc_csum16(nh->ip_csum, htons((uint16_t) *field),
775 htons((uint16_t) tos));
776 *field = tos;
777 }
778
779 if (nh->ip_ttl != ttl) {
780 uint8_t *field = &nh->ip_ttl;
781
782 nh->ip_csum = recalc_csum16(nh->ip_csum, htons(*field << 8),
783 htons(ttl << 8));
784 *field = ttl;
785 }
786 }
787
788 /* Modifies the IPv6 header fields of 'packet' to be consistent with 'src',
789 * 'dst', 'traffic class', and 'next hop'. Updates 'packet''s L4 checksums as
790 * appropriate. 'packet' must contain a valid IPv6 packet with correctly
791 * populated l[34] offsets. */
792 void
793 packet_set_ipv6(struct dp_packet *packet, uint8_t proto, const ovs_be32 src[4],
794 const ovs_be32 dst[4], uint8_t key_tc, ovs_be32 key_fl,
795 uint8_t key_hl)
796 {
797 struct ovs_16aligned_ip6_hdr *nh = dp_packet_l3(packet);
798
799 if (memcmp(&nh->ip6_src, src, sizeof(ovs_be32[4]))) {
800 packet_set_ipv6_addr(packet, proto, nh->ip6_src.be32, src, true);
801 }
802
803 if (memcmp(&nh->ip6_dst, dst, sizeof(ovs_be32[4]))) {
804 packet_set_ipv6_addr(packet, proto, nh->ip6_dst.be32, dst,
805 !packet_rh_present(packet));
806 }
807
808 packet_set_ipv6_tc(&nh->ip6_flow, key_tc);
809
810 packet_set_ipv6_flow_label(&nh->ip6_flow, key_fl);
811
812 nh->ip6_hlim = key_hl;
813 }
814
815 static void
816 packet_set_port(ovs_be16 *port, ovs_be16 new_port, ovs_be16 *csum)
817 {
818 if (*port != new_port) {
819 *csum = recalc_csum16(*csum, *port, new_port);
820 *port = new_port;
821 }
822 }
823
824 /* Sets the TCP source and destination port ('src' and 'dst' respectively) of
825 * the TCP header contained in 'packet'. 'packet' must be a valid TCP packet
826 * with its l4 offset properly populated. */
827 void
828 packet_set_tcp_port(struct dp_packet *packet, ovs_be16 src, ovs_be16 dst)
829 {
830 struct tcp_header *th = dp_packet_l4(packet);
831
832 packet_set_port(&th->tcp_src, src, &th->tcp_csum);
833 packet_set_port(&th->tcp_dst, dst, &th->tcp_csum);
834 }
835
836 /* Sets the UDP source and destination port ('src' and 'dst' respectively) of
837 * the UDP header contained in 'packet'. 'packet' must be a valid UDP packet
838 * with its l4 offset properly populated. */
839 void
840 packet_set_udp_port(struct dp_packet *packet, ovs_be16 src, ovs_be16 dst)
841 {
842 struct udp_header *uh = dp_packet_l4(packet);
843
844 if (uh->udp_csum) {
845 packet_set_port(&uh->udp_src, src, &uh->udp_csum);
846 packet_set_port(&uh->udp_dst, dst, &uh->udp_csum);
847
848 if (!uh->udp_csum) {
849 uh->udp_csum = htons(0xffff);
850 }
851 } else {
852 uh->udp_src = src;
853 uh->udp_dst = dst;
854 }
855 }
856
857 /* Sets the SCTP source and destination port ('src' and 'dst' respectively) of
858 * the SCTP header contained in 'packet'. 'packet' must be a valid SCTP packet
859 * with its l4 offset properly populated. */
860 void
861 packet_set_sctp_port(struct dp_packet *packet, ovs_be16 src, ovs_be16 dst)
862 {
863 struct sctp_header *sh = dp_packet_l4(packet);
864 ovs_be32 old_csum, old_correct_csum, new_csum;
865 uint16_t tp_len = dp_packet_l4_size(packet);
866
867 old_csum = get_16aligned_be32(&sh->sctp_csum);
868 put_16aligned_be32(&sh->sctp_csum, 0);
869 old_correct_csum = crc32c((void *)sh, tp_len);
870
871 sh->sctp_src = src;
872 sh->sctp_dst = dst;
873
874 new_csum = crc32c((void *)sh, tp_len);
875 put_16aligned_be32(&sh->sctp_csum, old_csum ^ old_correct_csum ^ new_csum);
876 }
877
878 void
879 packet_set_nd(struct dp_packet *packet, const ovs_be32 target[4],
880 const struct eth_addr sll, const struct eth_addr tll) {
881 struct ovs_nd_msg *ns;
882 struct ovs_nd_opt *nd_opt;
883 int bytes_remain = dp_packet_l4_size(packet);
884
885 if (OVS_UNLIKELY(bytes_remain < sizeof(*ns))) {
886 return;
887 }
888
889 ns = dp_packet_l4(packet);
890 nd_opt = &ns->options[0];
891 bytes_remain -= sizeof(*ns);
892
893 if (memcmp(&ns->target, target, sizeof(ovs_be32[4]))) {
894 packet_set_ipv6_addr(packet, IPPROTO_ICMPV6,
895 ns->target.be32,
896 target, true);
897 }
898
899 while (bytes_remain >= ND_OPT_LEN && nd_opt->nd_opt_len != 0) {
900 if (nd_opt->nd_opt_type == ND_OPT_SOURCE_LINKADDR
901 && nd_opt->nd_opt_len == 1) {
902 if (!eth_addr_equals(nd_opt->nd_opt_mac, sll)) {
903 ovs_be16 *csum = &(ns->icmph.icmp6_cksum);
904
905 *csum = recalc_csum48(*csum, nd_opt->nd_opt_mac, sll);
906 nd_opt->nd_opt_mac = sll;
907 }
908
909 /* A packet can only contain one SLL or TLL option */
910 break;
911 } else if (nd_opt->nd_opt_type == ND_OPT_TARGET_LINKADDR
912 && nd_opt->nd_opt_len == 1) {
913 if (!eth_addr_equals(nd_opt->nd_opt_mac, tll)) {
914 ovs_be16 *csum = &(ns->icmph.icmp6_cksum);
915
916 *csum = recalc_csum48(*csum, nd_opt->nd_opt_mac, tll);
917 nd_opt->nd_opt_mac = tll;
918 }
919
920 /* A packet can only contain one SLL or TLL option */
921 break;
922 }
923
924 nd_opt += nd_opt->nd_opt_len;
925 bytes_remain -= nd_opt->nd_opt_len * ND_OPT_LEN;
926 }
927 }
928
929 const char *
930 packet_tcp_flag_to_string(uint32_t flag)
931 {
932 switch (flag) {
933 case TCP_FIN:
934 return "fin";
935 case TCP_SYN:
936 return "syn";
937 case TCP_RST:
938 return "rst";
939 case TCP_PSH:
940 return "psh";
941 case TCP_ACK:
942 return "ack";
943 case TCP_URG:
944 return "urg";
945 case TCP_ECE:
946 return "ece";
947 case TCP_CWR:
948 return "cwr";
949 case TCP_NS:
950 return "ns";
951 case 0x200:
952 return "[200]";
953 case 0x400:
954 return "[400]";
955 case 0x800:
956 return "[800]";
957 default:
958 return NULL;
959 }
960 }
961
962 /* Appends a string representation of the TCP flags value 'tcp_flags'
963 * (e.g. from struct flow.tcp_flags or obtained via TCP_FLAGS) to 's', in the
964 * format used by tcpdump. */
965 void
966 packet_format_tcp_flags(struct ds *s, uint16_t tcp_flags)
967 {
968 if (!tcp_flags) {
969 ds_put_cstr(s, "none");
970 return;
971 }
972
973 if (tcp_flags & TCP_SYN) {
974 ds_put_char(s, 'S');
975 }
976 if (tcp_flags & TCP_FIN) {
977 ds_put_char(s, 'F');
978 }
979 if (tcp_flags & TCP_PSH) {
980 ds_put_char(s, 'P');
981 }
982 if (tcp_flags & TCP_RST) {
983 ds_put_char(s, 'R');
984 }
985 if (tcp_flags & TCP_URG) {
986 ds_put_char(s, 'U');
987 }
988 if (tcp_flags & TCP_ACK) {
989 ds_put_char(s, '.');
990 }
991 if (tcp_flags & TCP_ECE) {
992 ds_put_cstr(s, "E");
993 }
994 if (tcp_flags & TCP_CWR) {
995 ds_put_cstr(s, "C");
996 }
997 if (tcp_flags & TCP_NS) {
998 ds_put_cstr(s, "N");
999 }
1000 if (tcp_flags & 0x200) {
1001 ds_put_cstr(s, "[200]");
1002 }
1003 if (tcp_flags & 0x400) {
1004 ds_put_cstr(s, "[400]");
1005 }
1006 if (tcp_flags & 0x800) {
1007 ds_put_cstr(s, "[800]");
1008 }
1009 }
1010
1011 #define ARP_PACKET_SIZE (2 + ETH_HEADER_LEN + VLAN_HEADER_LEN + \
1012 ARP_ETH_HEADER_LEN)
1013
1014 /* Clears 'b' and replaces its contents by an ARP frame with the specified
1015 * 'arp_op', 'arp_sha', 'arp_tha', 'arp_spa', and 'arp_tpa'. The outer
1016 * Ethernet frame is initialized with Ethernet source 'arp_sha' and destination
1017 * 'arp_tha', except that destination ff:ff:ff:ff:ff:ff is used instead if
1018 * 'broadcast' is true. */
1019 void
1020 compose_arp(struct dp_packet *b, uint16_t arp_op,
1021 const struct eth_addr arp_sha, const struct eth_addr arp_tha,
1022 bool broadcast, ovs_be32 arp_spa, ovs_be32 arp_tpa)
1023 {
1024 struct eth_header *eth;
1025 struct arp_eth_header *arp;
1026
1027 dp_packet_clear(b);
1028 dp_packet_prealloc_tailroom(b, ARP_PACKET_SIZE);
1029 dp_packet_reserve(b, 2 + VLAN_HEADER_LEN);
1030
1031 eth = dp_packet_put_uninit(b, sizeof *eth);
1032 eth->eth_dst = broadcast ? eth_addr_broadcast : arp_tha;
1033 eth->eth_src = arp_sha;
1034 eth->eth_type = htons(ETH_TYPE_ARP);
1035
1036 arp = dp_packet_put_uninit(b, sizeof *arp);
1037 arp->ar_hrd = htons(ARP_HRD_ETHERNET);
1038 arp->ar_pro = htons(ARP_PRO_IP);
1039 arp->ar_hln = sizeof arp->ar_sha;
1040 arp->ar_pln = sizeof arp->ar_spa;
1041 arp->ar_op = htons(arp_op);
1042 arp->ar_sha = arp_sha;
1043 arp->ar_tha = arp_tha;
1044
1045 put_16aligned_be32(&arp->ar_spa, arp_spa);
1046 put_16aligned_be32(&arp->ar_tpa, arp_tpa);
1047
1048 dp_packet_reset_offsets(b);
1049 dp_packet_set_l3(b, arp);
1050 }
1051
1052 uint32_t
1053 packet_csum_pseudoheader(const struct ip_header *ip)
1054 {
1055 uint32_t partial = 0;
1056
1057 partial = csum_add32(partial, get_16aligned_be32(&ip->ip_src));
1058 partial = csum_add32(partial, get_16aligned_be32(&ip->ip_dst));
1059 partial = csum_add16(partial, htons(ip->ip_proto));
1060 partial = csum_add16(partial, htons(ntohs(ip->ip_tot_len) -
1061 IP_IHL(ip->ip_ihl_ver) * 4));
1062
1063 return partial;
1064 }
1065