2 * Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014, 2015 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
19 #include <arpa/inet.h>
20 #include <sys/socket.h>
21 #include <netinet/in.h>
22 #include <netinet/ip6.h>
23 #include <netinet/icmp6.h>
25 #include "byte-order.h"
30 #include "dynamic-string.h"
31 #include "ovs-thread.h"
33 #include "dp-packet.h"
34 #include "unaligned.h"
36 const struct in6_addr in6addr_exact
= IN6ADDR_EXACT_INIT
;
37 const struct in6_addr in6addr_all_hosts
= IN6ADDR_ALL_HOSTS_INIT
;
39 /* Parses 's' as a 16-digit hexadecimal number representing a datapath ID. On
40 * success stores the dpid into '*dpidp' and returns true, on failure stores 0
41 * into '*dpidp' and returns false.
43 * Rejects an all-zeros dpid as invalid. */
45 dpid_from_string(const char *s
, uint64_t *dpidp
)
47 *dpidp
= (strlen(s
) == 16 && strspn(s
, "0123456789abcdefABCDEF") == 16
48 ? strtoull(s
, NULL
, 16)
53 /* Returns true if 'ea' is a reserved address, that a bridge must never
54 * forward, false otherwise.
56 * If you change this function's behavior, please update corresponding
57 * documentation in vswitch.xml at the same time. */
59 eth_addr_is_reserved(const struct eth_addr ea
)
61 struct eth_addr_node
{
62 struct hmap_node hmap_node
;
66 static struct eth_addr_node nodes
[] = {
67 /* STP, IEEE pause frames, and other reserved protocols. */
68 { HMAP_NODE_NULL_INITIALIZER
, 0x0180c2000000ULL
},
69 { HMAP_NODE_NULL_INITIALIZER
, 0x0180c2000001ULL
},
70 { HMAP_NODE_NULL_INITIALIZER
, 0x0180c2000002ULL
},
71 { HMAP_NODE_NULL_INITIALIZER
, 0x0180c2000003ULL
},
72 { HMAP_NODE_NULL_INITIALIZER
, 0x0180c2000004ULL
},
73 { HMAP_NODE_NULL_INITIALIZER
, 0x0180c2000005ULL
},
74 { HMAP_NODE_NULL_INITIALIZER
, 0x0180c2000006ULL
},
75 { HMAP_NODE_NULL_INITIALIZER
, 0x0180c2000007ULL
},
76 { HMAP_NODE_NULL_INITIALIZER
, 0x0180c2000008ULL
},
77 { HMAP_NODE_NULL_INITIALIZER
, 0x0180c2000009ULL
},
78 { HMAP_NODE_NULL_INITIALIZER
, 0x0180c200000aULL
},
79 { HMAP_NODE_NULL_INITIALIZER
, 0x0180c200000bULL
},
80 { HMAP_NODE_NULL_INITIALIZER
, 0x0180c200000cULL
},
81 { HMAP_NODE_NULL_INITIALIZER
, 0x0180c200000dULL
},
82 { HMAP_NODE_NULL_INITIALIZER
, 0x0180c200000eULL
},
83 { HMAP_NODE_NULL_INITIALIZER
, 0x0180c200000fULL
},
85 /* Extreme protocols. */
86 { HMAP_NODE_NULL_INITIALIZER
, 0x00e02b000000ULL
}, /* EDP. */
87 { HMAP_NODE_NULL_INITIALIZER
, 0x00e02b000004ULL
}, /* EAPS. */
88 { HMAP_NODE_NULL_INITIALIZER
, 0x00e02b000006ULL
}, /* EAPS. */
90 /* Cisco protocols. */
91 { HMAP_NODE_NULL_INITIALIZER
, 0x01000c000000ULL
}, /* ISL. */
92 { HMAP_NODE_NULL_INITIALIZER
, 0x01000cccccccULL
}, /* PAgP, UDLD, CDP,
94 { HMAP_NODE_NULL_INITIALIZER
, 0x01000ccccccdULL
}, /* PVST+. */
95 { HMAP_NODE_NULL_INITIALIZER
, 0x01000ccdcdcdULL
}, /* STP Uplink Fast,
99 { HMAP_NODE_NULL_INITIALIZER
, 0x01000cccccc0ULL
},
100 { HMAP_NODE_NULL_INITIALIZER
, 0x01000cccccc1ULL
},
101 { HMAP_NODE_NULL_INITIALIZER
, 0x01000cccccc2ULL
},
102 { HMAP_NODE_NULL_INITIALIZER
, 0x01000cccccc3ULL
},
103 { HMAP_NODE_NULL_INITIALIZER
, 0x01000cccccc4ULL
},
104 { HMAP_NODE_NULL_INITIALIZER
, 0x01000cccccc5ULL
},
105 { HMAP_NODE_NULL_INITIALIZER
, 0x01000cccccc6ULL
},
106 { HMAP_NODE_NULL_INITIALIZER
, 0x01000cccccc7ULL
},
109 static struct ovsthread_once once
= OVSTHREAD_ONCE_INITIALIZER
;
110 struct eth_addr_node
*node
;
111 static struct hmap addrs
;
114 if (ovsthread_once_start(&once
)) {
116 for (node
= nodes
; node
< &nodes
[ARRAY_SIZE(nodes
)]; node
++) {
117 hmap_insert(&addrs
, &node
->hmap_node
, hash_uint64(node
->ea64
));
119 ovsthread_once_done(&once
);
122 ea64
= eth_addr_to_uint64(ea
);
123 HMAP_FOR_EACH_IN_BUCKET (node
, hmap_node
, hash_uint64(ea64
), &addrs
) {
124 if (node
->ea64
== ea64
) {
132 eth_addr_from_string(const char *s
, struct eth_addr
*ea
)
134 if (ovs_scan(s
, ETH_ADDR_SCAN_FMT
, ETH_ADDR_SCAN_ARGS(*ea
))) {
142 /* Fills 'b' with a Reverse ARP packet with Ethernet source address 'eth_src'.
143 * This function is used by Open vSwitch to compose packets in cases where
144 * context is important but content doesn't (or shouldn't) matter.
146 * The returned packet has enough headroom to insert an 802.1Q VLAN header if
149 compose_rarp(struct dp_packet
*b
, const struct eth_addr eth_src
)
151 struct eth_header
*eth
;
152 struct arp_eth_header
*arp
;
155 dp_packet_prealloc_tailroom(b
, 2 + ETH_HEADER_LEN
+ VLAN_HEADER_LEN
156 + ARP_ETH_HEADER_LEN
);
157 dp_packet_reserve(b
, 2 + VLAN_HEADER_LEN
);
158 eth
= dp_packet_put_uninit(b
, sizeof *eth
);
159 eth
->eth_dst
= eth_addr_broadcast
;
160 eth
->eth_src
= eth_src
;
161 eth
->eth_type
= htons(ETH_TYPE_RARP
);
163 arp
= dp_packet_put_uninit(b
, sizeof *arp
);
164 arp
->ar_hrd
= htons(ARP_HRD_ETHERNET
);
165 arp
->ar_pro
= htons(ARP_PRO_IP
);
166 arp
->ar_hln
= sizeof arp
->ar_sha
;
167 arp
->ar_pln
= sizeof arp
->ar_spa
;
168 arp
->ar_op
= htons(ARP_OP_RARP
);
169 arp
->ar_sha
= eth_src
;
170 put_16aligned_be32(&arp
->ar_spa
, htonl(0));
171 arp
->ar_tha
= eth_src
;
172 put_16aligned_be32(&arp
->ar_tpa
, htonl(0));
174 dp_packet_reset_offsets(b
);
175 dp_packet_set_l3(b
, arp
);
178 /* Insert VLAN header according to given TCI. Packet passed must be Ethernet
179 * packet. Ignores the CFI bit of 'tci' using 0 instead.
181 * Also adjusts the layer offsets accordingly. */
183 eth_push_vlan(struct dp_packet
*packet
, ovs_be16 tpid
, ovs_be16 tci
)
185 struct vlan_eth_header
*veh
;
187 /* Insert new 802.1Q header. */
188 veh
= dp_packet_resize_l2(packet
, VLAN_HEADER_LEN
);
189 memmove(veh
, (char *)veh
+ VLAN_HEADER_LEN
, 2 * ETH_ADDR_LEN
);
190 veh
->veth_type
= tpid
;
191 veh
->veth_tci
= tci
& htons(~VLAN_CFI
);
194 /* Removes outermost VLAN header (if any is present) from 'packet'.
196 * 'packet->l2_5' should initially point to 'packet''s outer-most VLAN header
197 * or may be NULL if there are no VLAN headers. */
199 eth_pop_vlan(struct dp_packet
*packet
)
201 struct vlan_eth_header
*veh
= dp_packet_l2(packet
);
203 if (veh
&& dp_packet_size(packet
) >= sizeof *veh
204 && eth_type_vlan(veh
->veth_type
)) {
206 memmove((char *)veh
+ VLAN_HEADER_LEN
, veh
, 2 * ETH_ADDR_LEN
);
207 dp_packet_resize_l2(packet
, -VLAN_HEADER_LEN
);
211 /* Set ethertype of the packet. */
213 set_ethertype(struct dp_packet
*packet
, ovs_be16 eth_type
)
215 struct eth_header
*eh
= dp_packet_l2(packet
);
221 if (eth_type_vlan(eh
->eth_type
)) {
223 char *l2_5
= dp_packet_l2_5(packet
);
225 p
= ALIGNED_CAST(ovs_be16
*,
226 (l2_5
? l2_5
: (char *)dp_packet_l3(packet
)) - 2);
229 eh
->eth_type
= eth_type
;
233 static bool is_mpls(struct dp_packet
*packet
)
235 return packet
->l2_5_ofs
!= UINT16_MAX
;
238 /* Set time to live (TTL) of an MPLS label stack entry (LSE). */
240 set_mpls_lse_ttl(ovs_be32
*lse
, uint8_t ttl
)
242 *lse
&= ~htonl(MPLS_TTL_MASK
);
243 *lse
|= htonl((ttl
<< MPLS_TTL_SHIFT
) & MPLS_TTL_MASK
);
246 /* Set traffic class (TC) of an MPLS label stack entry (LSE). */
248 set_mpls_lse_tc(ovs_be32
*lse
, uint8_t tc
)
250 *lse
&= ~htonl(MPLS_TC_MASK
);
251 *lse
|= htonl((tc
<< MPLS_TC_SHIFT
) & MPLS_TC_MASK
);
254 /* Set label of an MPLS label stack entry (LSE). */
256 set_mpls_lse_label(ovs_be32
*lse
, ovs_be32 label
)
258 *lse
&= ~htonl(MPLS_LABEL_MASK
);
259 *lse
|= htonl((ntohl(label
) << MPLS_LABEL_SHIFT
) & MPLS_LABEL_MASK
);
262 /* Set bottom of stack (BoS) bit of an MPLS label stack entry (LSE). */
264 set_mpls_lse_bos(ovs_be32
*lse
, uint8_t bos
)
266 *lse
&= ~htonl(MPLS_BOS_MASK
);
267 *lse
|= htonl((bos
<< MPLS_BOS_SHIFT
) & MPLS_BOS_MASK
);
270 /* Compose an MPLS label stack entry (LSE) from its components:
271 * label, traffic class (TC), time to live (TTL) and
272 * bottom of stack (BoS) bit. */
274 set_mpls_lse_values(uint8_t ttl
, uint8_t tc
, uint8_t bos
, ovs_be32 label
)
276 ovs_be32 lse
= htonl(0);
277 set_mpls_lse_ttl(&lse
, ttl
);
278 set_mpls_lse_tc(&lse
, tc
);
279 set_mpls_lse_bos(&lse
, bos
);
280 set_mpls_lse_label(&lse
, label
);
284 /* Set MPLS label stack entry to outermost MPLS header.*/
286 set_mpls_lse(struct dp_packet
*packet
, ovs_be32 mpls_lse
)
288 /* Packet type should be MPLS to set label stack entry. */
289 if (is_mpls(packet
)) {
290 struct mpls_hdr
*mh
= dp_packet_l2_5(packet
);
292 /* Update mpls label stack entry. */
293 put_16aligned_be32(&mh
->mpls_lse
, mpls_lse
);
297 /* Push MPLS label stack entry 'lse' onto 'packet' as the outermost MPLS
298 * header. If 'packet' does not already have any MPLS labels, then its
299 * Ethertype is changed to 'ethtype' (which must be an MPLS Ethertype). */
301 push_mpls(struct dp_packet
*packet
, ovs_be16 ethtype
, ovs_be32 lse
)
306 if (!eth_type_mpls(ethtype
)) {
310 if (!is_mpls(packet
)) {
311 /* Set MPLS label stack offset. */
312 packet
->l2_5_ofs
= packet
->l3_ofs
;
315 set_ethertype(packet
, ethtype
);
317 /* Push new MPLS shim header onto packet. */
318 len
= packet
->l2_5_ofs
;
319 header
= dp_packet_resize_l2_5(packet
, MPLS_HLEN
);
320 memmove(header
, header
+ MPLS_HLEN
, len
);
321 memcpy(header
+ len
, &lse
, sizeof lse
);
324 /* If 'packet' is an MPLS packet, removes its outermost MPLS label stack entry.
325 * If the label that was removed was the only MPLS label, changes 'packet''s
326 * Ethertype to 'ethtype' (which ordinarily should not be an MPLS
329 pop_mpls(struct dp_packet
*packet
, ovs_be16 ethtype
)
331 if (is_mpls(packet
)) {
332 struct mpls_hdr
*mh
= dp_packet_l2_5(packet
);
333 size_t len
= packet
->l2_5_ofs
;
335 set_ethertype(packet
, ethtype
);
336 if (get_16aligned_be32(&mh
->mpls_lse
) & htonl(MPLS_BOS_MASK
)) {
337 dp_packet_set_l2_5(packet
, NULL
);
339 /* Shift the l2 header forward. */
340 memmove((char*)dp_packet_data(packet
) + MPLS_HLEN
, dp_packet_data(packet
), len
);
341 dp_packet_resize_l2_5(packet
, -MPLS_HLEN
);
345 /* Converts hex digits in 'hex' to an Ethernet packet in '*packetp'. The
346 * caller must free '*packetp'. On success, returns NULL. On failure, returns
347 * an error message and stores NULL in '*packetp'.
349 * Aligns the L3 header of '*packetp' on a 32-bit boundary. */
351 eth_from_hex(const char *hex
, struct dp_packet
**packetp
)
353 struct dp_packet
*packet
;
355 /* Use 2 bytes of headroom to 32-bit align the L3 header. */
356 packet
= *packetp
= dp_packet_new_with_headroom(strlen(hex
) / 2, 2);
358 if (dp_packet_put_hex(packet
, hex
, NULL
)[0] != '\0') {
359 dp_packet_delete(packet
);
361 return "Trailing garbage in packet data";
364 if (dp_packet_size(packet
) < ETH_HEADER_LEN
) {
365 dp_packet_delete(packet
);
367 return "Packet data too short for Ethernet";
374 eth_format_masked(const struct eth_addr eth
,
375 const struct eth_addr
*mask
, struct ds
*s
)
377 ds_put_format(s
, ETH_ADDR_FMT
, ETH_ADDR_ARGS(eth
));
378 if (mask
&& !eth_mask_is_exact(*mask
)) {
379 ds_put_format(s
, "/"ETH_ADDR_FMT
, ETH_ADDR_ARGS(*mask
));
383 /* Given the IP netmask 'netmask', returns the number of bits of the IP address
384 * that it specifies, that is, the number of 1-bits in 'netmask'.
386 * If 'netmask' is not a CIDR netmask (see ip_is_cidr()), the return value will
387 * still be in the valid range but isn't otherwise meaningful. */
389 ip_count_cidr_bits(ovs_be32 netmask
)
391 return 32 - ctz32(ntohl(netmask
));
395 ip_format_masked(ovs_be32 ip
, ovs_be32 mask
, struct ds
*s
)
397 ds_put_format(s
, IP_FMT
, IP_ARGS(ip
));
398 if (mask
!= OVS_BE32_MAX
) {
399 if (ip_is_cidr(mask
)) {
400 ds_put_format(s
, "/%d", ip_count_cidr_bits(mask
));
402 ds_put_format(s
, "/"IP_FMT
, IP_ARGS(mask
));
408 /* Stores the string representation of the IPv6 address 'addr' into the
409 * character array 'addr_str', which must be at least INET6_ADDRSTRLEN
412 format_ipv6_addr(char *addr_str
, const struct in6_addr
*addr
)
414 inet_ntop(AF_INET6
, addr
, addr_str
, INET6_ADDRSTRLEN
);
418 print_ipv6_addr(struct ds
*string
, const struct in6_addr
*addr
)
422 ds_reserve(string
, string
->length
+ INET6_ADDRSTRLEN
);
424 dst
= string
->string
+ string
->length
;
425 format_ipv6_addr(dst
, addr
);
426 string
->length
+= strlen(dst
);
430 print_ipv6_mapped(struct ds
*s
, const struct in6_addr
*addr
)
432 if (IN6_IS_ADDR_V4MAPPED(addr
)) {
433 ds_put_format(s
, IP_FMT
, addr
->s6_addr
[12], addr
->s6_addr
[13],
434 addr
->s6_addr
[14], addr
->s6_addr
[15]);
436 print_ipv6_addr(s
, addr
);
441 print_ipv6_masked(struct ds
*s
, const struct in6_addr
*addr
,
442 const struct in6_addr
*mask
)
444 print_ipv6_addr(s
, addr
);
445 if (mask
&& !ipv6_mask_is_exact(mask
)) {
446 if (ipv6_is_cidr(mask
)) {
447 int cidr_bits
= ipv6_count_cidr_bits(mask
);
448 ds_put_format(s
, "/%d", cidr_bits
);
451 print_ipv6_addr(s
, mask
);
456 struct in6_addr
ipv6_addr_bitand(const struct in6_addr
*a
,
457 const struct in6_addr
*b
)
463 for (i
=0; i
<4; i
++) {
464 dst
.s6_addr32
[i
] = a
->s6_addr32
[i
] & b
->s6_addr32
[i
];
467 for (i
=0; i
<16; i
++) {
468 dst
.s6_addr
[i
] = a
->s6_addr
[i
] & b
->s6_addr
[i
];
475 /* Returns an in6_addr consisting of 'mask' high-order 1-bits and 128-N
476 * low-order 0-bits. */
478 ipv6_create_mask(int mask
)
480 struct in6_addr netmask
;
481 uint8_t *netmaskp
= &netmask
.s6_addr
[0];
483 memset(&netmask
, 0, sizeof netmask
);
491 *netmaskp
= 0xff << (8 - mask
);
497 /* Given the IPv6 netmask 'netmask', returns the number of bits of the IPv6
498 * address that it specifies, that is, the number of 1-bits in 'netmask'.
499 * 'netmask' must be a CIDR netmask (see ipv6_is_cidr()).
501 * If 'netmask' is not a CIDR netmask (see ipv6_is_cidr()), the return value
502 * will still be in the valid range but isn't otherwise meaningful. */
504 ipv6_count_cidr_bits(const struct in6_addr
*netmask
)
508 const uint8_t *netmaskp
= &netmask
->s6_addr
[0];
510 for (i
=0; i
<16; i
++) {
511 if (netmaskp
[i
] == 0xff) {
516 for(nm
= netmaskp
[i
]; nm
; nm
<<= 1) {
527 /* Returns true if 'netmask' is a CIDR netmask, that is, if it consists of N
528 * high-order 1-bits and 128-N low-order 0-bits. */
530 ipv6_is_cidr(const struct in6_addr
*netmask
)
532 const uint8_t *netmaskp
= &netmask
->s6_addr
[0];
535 for (i
=0; i
<16; i
++) {
536 if (netmaskp
[i
] != 0xff) {
537 uint8_t x
= ~netmaskp
[i
];
552 /* Populates 'b' with an Ethernet II packet headed with the given 'eth_dst',
553 * 'eth_src' and 'eth_type' parameters. A payload of 'size' bytes is allocated
554 * in 'b' and returned. This payload may be populated with appropriate
555 * information by the caller. Sets 'b''s 'frame' pointer and 'l3' offset to
556 * the Ethernet header and payload respectively. Aligns b->l3 on a 32-bit
559 * The returned packet has enough headroom to insert an 802.1Q VLAN header if
562 eth_compose(struct dp_packet
*b
, const struct eth_addr eth_dst
,
563 const struct eth_addr eth_src
, uint16_t eth_type
,
567 struct eth_header
*eth
;
571 /* The magic 2 here ensures that the L3 header (when it is added later)
572 * will be 32-bit aligned. */
573 dp_packet_prealloc_tailroom(b
, 2 + ETH_HEADER_LEN
+ VLAN_HEADER_LEN
+ size
);
574 dp_packet_reserve(b
, 2 + VLAN_HEADER_LEN
);
575 eth
= dp_packet_put_uninit(b
, ETH_HEADER_LEN
);
576 data
= dp_packet_put_uninit(b
, size
);
578 eth
->eth_dst
= eth_dst
;
579 eth
->eth_src
= eth_src
;
580 eth
->eth_type
= htons(eth_type
);
582 dp_packet_reset_offsets(b
);
583 dp_packet_set_l3(b
, data
);
589 packet_set_ipv4_addr(struct dp_packet
*packet
,
590 ovs_16aligned_be32
*addr
, ovs_be32 new_addr
)
592 struct ip_header
*nh
= dp_packet_l3(packet
);
593 ovs_be32 old_addr
= get_16aligned_be32(addr
);
594 size_t l4_size
= dp_packet_l4_size(packet
);
596 if (nh
->ip_proto
== IPPROTO_TCP
&& l4_size
>= TCP_HEADER_LEN
) {
597 struct tcp_header
*th
= dp_packet_l4(packet
);
599 th
->tcp_csum
= recalc_csum32(th
->tcp_csum
, old_addr
, new_addr
);
600 } else if (nh
->ip_proto
== IPPROTO_UDP
&& l4_size
>= UDP_HEADER_LEN
) {
601 struct udp_header
*uh
= dp_packet_l4(packet
);
604 uh
->udp_csum
= recalc_csum32(uh
->udp_csum
, old_addr
, new_addr
);
606 uh
->udp_csum
= htons(0xffff);
610 nh
->ip_csum
= recalc_csum32(nh
->ip_csum
, old_addr
, new_addr
);
611 put_16aligned_be32(addr
, new_addr
);
614 /* Returns true, if packet contains at least one routing header where
615 * segements_left > 0.
617 * This function assumes that L3 and L4 offsets are set in the packet. */
619 packet_rh_present(struct dp_packet
*packet
)
621 const struct ovs_16aligned_ip6_hdr
*nh
;
625 uint8_t *data
= dp_packet_l3(packet
);
627 remaining
= packet
->l4_ofs
- packet
->l3_ofs
;
629 if (remaining
< sizeof *nh
) {
632 nh
= ALIGNED_CAST(struct ovs_16aligned_ip6_hdr
*, data
);
634 remaining
-= sizeof *nh
;
635 nexthdr
= nh
->ip6_nxt
;
638 if ((nexthdr
!= IPPROTO_HOPOPTS
)
639 && (nexthdr
!= IPPROTO_ROUTING
)
640 && (nexthdr
!= IPPROTO_DSTOPTS
)
641 && (nexthdr
!= IPPROTO_AH
)
642 && (nexthdr
!= IPPROTO_FRAGMENT
)) {
643 /* It's either a terminal header (e.g., TCP, UDP) or one we
644 * don't understand. In either case, we're done with the
645 * packet, so use it to fill in 'nw_proto'. */
649 /* We only verify that at least 8 bytes of the next header are
650 * available, but many of these headers are longer. Ensure that
651 * accesses within the extension header are within those first 8
652 * bytes. All extension headers are required to be at least 8
658 if (nexthdr
== IPPROTO_AH
) {
659 /* A standard AH definition isn't available, but the fields
660 * we care about are in the same location as the generic
661 * option header--only the header length is calculated
663 const struct ip6_ext
*ext_hdr
= (struct ip6_ext
*)data
;
665 nexthdr
= ext_hdr
->ip6e_nxt
;
666 len
= (ext_hdr
->ip6e_len
+ 2) * 4;
667 } else if (nexthdr
== IPPROTO_FRAGMENT
) {
668 const struct ovs_16aligned_ip6_frag
*frag_hdr
669 = ALIGNED_CAST(struct ovs_16aligned_ip6_frag
*, data
);
671 nexthdr
= frag_hdr
->ip6f_nxt
;
672 len
= sizeof *frag_hdr
;
673 } else if (nexthdr
== IPPROTO_ROUTING
) {
674 const struct ip6_rthdr
*rh
= (struct ip6_rthdr
*)data
;
676 if (rh
->ip6r_segleft
> 0) {
680 nexthdr
= rh
->ip6r_nxt
;
681 len
= (rh
->ip6r_len
+ 1) * 8;
683 const struct ip6_ext
*ext_hdr
= (struct ip6_ext
*)data
;
685 nexthdr
= ext_hdr
->ip6e_nxt
;
686 len
= (ext_hdr
->ip6e_len
+ 1) * 8;
689 if (remaining
< len
) {
700 packet_update_csum128(struct dp_packet
*packet
, uint8_t proto
,
701 ovs_16aligned_be32 addr
[4], const ovs_be32 new_addr
[4])
703 size_t l4_size
= dp_packet_l4_size(packet
);
705 if (proto
== IPPROTO_TCP
&& l4_size
>= TCP_HEADER_LEN
) {
706 struct tcp_header
*th
= dp_packet_l4(packet
);
708 th
->tcp_csum
= recalc_csum128(th
->tcp_csum
, addr
, new_addr
);
709 } else if (proto
== IPPROTO_UDP
&& l4_size
>= UDP_HEADER_LEN
) {
710 struct udp_header
*uh
= dp_packet_l4(packet
);
713 uh
->udp_csum
= recalc_csum128(uh
->udp_csum
, addr
, new_addr
);
715 uh
->udp_csum
= htons(0xffff);
718 } else if (proto
== IPPROTO_ICMPV6
&&
719 l4_size
>= sizeof(struct icmp6_header
)) {
720 struct icmp6_header
*icmp
= dp_packet_l4(packet
);
722 icmp
->icmp6_cksum
= recalc_csum128(icmp
->icmp6_cksum
, addr
, new_addr
);
727 packet_set_ipv6_addr(struct dp_packet
*packet
, uint8_t proto
,
728 ovs_16aligned_be32 addr
[4], const ovs_be32 new_addr
[4],
729 bool recalculate_csum
)
731 if (recalculate_csum
) {
732 packet_update_csum128(packet
, proto
, addr
, new_addr
);
734 memcpy(addr
, new_addr
, sizeof(ovs_be32
[4]));
738 packet_set_ipv6_flow_label(ovs_16aligned_be32
*flow_label
, ovs_be32 flow_key
)
740 ovs_be32 old_label
= get_16aligned_be32(flow_label
);
741 ovs_be32 new_label
= (old_label
& htonl(~IPV6_LABEL_MASK
)) | flow_key
;
742 put_16aligned_be32(flow_label
, new_label
);
746 packet_set_ipv6_tc(ovs_16aligned_be32
*flow_label
, uint8_t tc
)
748 ovs_be32 old_label
= get_16aligned_be32(flow_label
);
749 ovs_be32 new_label
= (old_label
& htonl(0xF00FFFFF)) | htonl(tc
<< 20);
750 put_16aligned_be32(flow_label
, new_label
);
753 /* Modifies the IPv4 header fields of 'packet' to be consistent with 'src',
754 * 'dst', 'tos', and 'ttl'. Updates 'packet''s L4 checksums as appropriate.
755 * 'packet' must contain a valid IPv4 packet with correctly populated l[347]
758 packet_set_ipv4(struct dp_packet
*packet
, ovs_be32 src
, ovs_be32 dst
,
759 uint8_t tos
, uint8_t ttl
)
761 struct ip_header
*nh
= dp_packet_l3(packet
);
763 if (get_16aligned_be32(&nh
->ip_src
) != src
) {
764 packet_set_ipv4_addr(packet
, &nh
->ip_src
, src
);
767 if (get_16aligned_be32(&nh
->ip_dst
) != dst
) {
768 packet_set_ipv4_addr(packet
, &nh
->ip_dst
, dst
);
771 if (nh
->ip_tos
!= tos
) {
772 uint8_t *field
= &nh
->ip_tos
;
774 nh
->ip_csum
= recalc_csum16(nh
->ip_csum
, htons((uint16_t) *field
),
775 htons((uint16_t) tos
));
779 if (nh
->ip_ttl
!= ttl
) {
780 uint8_t *field
= &nh
->ip_ttl
;
782 nh
->ip_csum
= recalc_csum16(nh
->ip_csum
, htons(*field
<< 8),
788 /* Modifies the IPv6 header fields of 'packet' to be consistent with 'src',
789 * 'dst', 'traffic class', and 'next hop'. Updates 'packet''s L4 checksums as
790 * appropriate. 'packet' must contain a valid IPv6 packet with correctly
791 * populated l[34] offsets. */
793 packet_set_ipv6(struct dp_packet
*packet
, uint8_t proto
, const ovs_be32 src
[4],
794 const ovs_be32 dst
[4], uint8_t key_tc
, ovs_be32 key_fl
,
797 struct ovs_16aligned_ip6_hdr
*nh
= dp_packet_l3(packet
);
799 if (memcmp(&nh
->ip6_src
, src
, sizeof(ovs_be32
[4]))) {
800 packet_set_ipv6_addr(packet
, proto
, nh
->ip6_src
.be32
, src
, true);
803 if (memcmp(&nh
->ip6_dst
, dst
, sizeof(ovs_be32
[4]))) {
804 packet_set_ipv6_addr(packet
, proto
, nh
->ip6_dst
.be32
, dst
,
805 !packet_rh_present(packet
));
808 packet_set_ipv6_tc(&nh
->ip6_flow
, key_tc
);
810 packet_set_ipv6_flow_label(&nh
->ip6_flow
, key_fl
);
812 nh
->ip6_hlim
= key_hl
;
816 packet_set_port(ovs_be16
*port
, ovs_be16 new_port
, ovs_be16
*csum
)
818 if (*port
!= new_port
) {
819 *csum
= recalc_csum16(*csum
, *port
, new_port
);
824 /* Sets the TCP source and destination port ('src' and 'dst' respectively) of
825 * the TCP header contained in 'packet'. 'packet' must be a valid TCP packet
826 * with its l4 offset properly populated. */
828 packet_set_tcp_port(struct dp_packet
*packet
, ovs_be16 src
, ovs_be16 dst
)
830 struct tcp_header
*th
= dp_packet_l4(packet
);
832 packet_set_port(&th
->tcp_src
, src
, &th
->tcp_csum
);
833 packet_set_port(&th
->tcp_dst
, dst
, &th
->tcp_csum
);
836 /* Sets the UDP source and destination port ('src' and 'dst' respectively) of
837 * the UDP header contained in 'packet'. 'packet' must be a valid UDP packet
838 * with its l4 offset properly populated. */
840 packet_set_udp_port(struct dp_packet
*packet
, ovs_be16 src
, ovs_be16 dst
)
842 struct udp_header
*uh
= dp_packet_l4(packet
);
845 packet_set_port(&uh
->udp_src
, src
, &uh
->udp_csum
);
846 packet_set_port(&uh
->udp_dst
, dst
, &uh
->udp_csum
);
849 uh
->udp_csum
= htons(0xffff);
857 /* Sets the SCTP source and destination port ('src' and 'dst' respectively) of
858 * the SCTP header contained in 'packet'. 'packet' must be a valid SCTP packet
859 * with its l4 offset properly populated. */
861 packet_set_sctp_port(struct dp_packet
*packet
, ovs_be16 src
, ovs_be16 dst
)
863 struct sctp_header
*sh
= dp_packet_l4(packet
);
864 ovs_be32 old_csum
, old_correct_csum
, new_csum
;
865 uint16_t tp_len
= dp_packet_l4_size(packet
);
867 old_csum
= get_16aligned_be32(&sh
->sctp_csum
);
868 put_16aligned_be32(&sh
->sctp_csum
, 0);
869 old_correct_csum
= crc32c((void *)sh
, tp_len
);
874 new_csum
= crc32c((void *)sh
, tp_len
);
875 put_16aligned_be32(&sh
->sctp_csum
, old_csum
^ old_correct_csum
^ new_csum
);
879 packet_set_nd(struct dp_packet
*packet
, const ovs_be32 target
[4],
880 const struct eth_addr sll
, const struct eth_addr tll
) {
881 struct ovs_nd_msg
*ns
;
882 struct ovs_nd_opt
*nd_opt
;
883 int bytes_remain
= dp_packet_l4_size(packet
);
885 if (OVS_UNLIKELY(bytes_remain
< sizeof(*ns
))) {
889 ns
= dp_packet_l4(packet
);
890 nd_opt
= &ns
->options
[0];
891 bytes_remain
-= sizeof(*ns
);
893 if (memcmp(&ns
->target
, target
, sizeof(ovs_be32
[4]))) {
894 packet_set_ipv6_addr(packet
, IPPROTO_ICMPV6
,
899 while (bytes_remain
>= ND_OPT_LEN
&& nd_opt
->nd_opt_len
!= 0) {
900 if (nd_opt
->nd_opt_type
== ND_OPT_SOURCE_LINKADDR
901 && nd_opt
->nd_opt_len
== 1) {
902 if (!eth_addr_equals(nd_opt
->nd_opt_mac
, sll
)) {
903 ovs_be16
*csum
= &(ns
->icmph
.icmp6_cksum
);
905 *csum
= recalc_csum48(*csum
, nd_opt
->nd_opt_mac
, sll
);
906 nd_opt
->nd_opt_mac
= sll
;
909 /* A packet can only contain one SLL or TLL option */
911 } else if (nd_opt
->nd_opt_type
== ND_OPT_TARGET_LINKADDR
912 && nd_opt
->nd_opt_len
== 1) {
913 if (!eth_addr_equals(nd_opt
->nd_opt_mac
, tll
)) {
914 ovs_be16
*csum
= &(ns
->icmph
.icmp6_cksum
);
916 *csum
= recalc_csum48(*csum
, nd_opt
->nd_opt_mac
, tll
);
917 nd_opt
->nd_opt_mac
= tll
;
920 /* A packet can only contain one SLL or TLL option */
924 nd_opt
+= nd_opt
->nd_opt_len
;
925 bytes_remain
-= nd_opt
->nd_opt_len
* ND_OPT_LEN
;
930 packet_tcp_flag_to_string(uint32_t flag
)
962 /* Appends a string representation of the TCP flags value 'tcp_flags'
963 * (e.g. from struct flow.tcp_flags or obtained via TCP_FLAGS) to 's', in the
964 * format used by tcpdump. */
966 packet_format_tcp_flags(struct ds
*s
, uint16_t tcp_flags
)
969 ds_put_cstr(s
, "none");
973 if (tcp_flags
& TCP_SYN
) {
976 if (tcp_flags
& TCP_FIN
) {
979 if (tcp_flags
& TCP_PSH
) {
982 if (tcp_flags
& TCP_RST
) {
985 if (tcp_flags
& TCP_URG
) {
988 if (tcp_flags
& TCP_ACK
) {
991 if (tcp_flags
& TCP_ECE
) {
994 if (tcp_flags
& TCP_CWR
) {
997 if (tcp_flags
& TCP_NS
) {
1000 if (tcp_flags
& 0x200) {
1001 ds_put_cstr(s
, "[200]");
1003 if (tcp_flags
& 0x400) {
1004 ds_put_cstr(s
, "[400]");
1006 if (tcp_flags
& 0x800) {
1007 ds_put_cstr(s
, "[800]");
1011 #define ARP_PACKET_SIZE (2 + ETH_HEADER_LEN + VLAN_HEADER_LEN + \
1014 /* Clears 'b' and replaces its contents by an ARP frame with the specified
1015 * 'arp_op', 'arp_sha', 'arp_tha', 'arp_spa', and 'arp_tpa'. The outer
1016 * Ethernet frame is initialized with Ethernet source 'arp_sha' and destination
1017 * 'arp_tha', except that destination ff:ff:ff:ff:ff:ff is used instead if
1018 * 'broadcast' is true. */
1020 compose_arp(struct dp_packet
*b
, uint16_t arp_op
,
1021 const struct eth_addr arp_sha
, const struct eth_addr arp_tha
,
1022 bool broadcast
, ovs_be32 arp_spa
, ovs_be32 arp_tpa
)
1024 struct eth_header
*eth
;
1025 struct arp_eth_header
*arp
;
1028 dp_packet_prealloc_tailroom(b
, ARP_PACKET_SIZE
);
1029 dp_packet_reserve(b
, 2 + VLAN_HEADER_LEN
);
1031 eth
= dp_packet_put_uninit(b
, sizeof *eth
);
1032 eth
->eth_dst
= broadcast
? eth_addr_broadcast
: arp_tha
;
1033 eth
->eth_src
= arp_sha
;
1034 eth
->eth_type
= htons(ETH_TYPE_ARP
);
1036 arp
= dp_packet_put_uninit(b
, sizeof *arp
);
1037 arp
->ar_hrd
= htons(ARP_HRD_ETHERNET
);
1038 arp
->ar_pro
= htons(ARP_PRO_IP
);
1039 arp
->ar_hln
= sizeof arp
->ar_sha
;
1040 arp
->ar_pln
= sizeof arp
->ar_spa
;
1041 arp
->ar_op
= htons(arp_op
);
1042 arp
->ar_sha
= arp_sha
;
1043 arp
->ar_tha
= arp_tha
;
1045 put_16aligned_be32(&arp
->ar_spa
, arp_spa
);
1046 put_16aligned_be32(&arp
->ar_tpa
, arp_tpa
);
1048 dp_packet_reset_offsets(b
);
1049 dp_packet_set_l3(b
, arp
);
1053 packet_csum_pseudoheader(const struct ip_header
*ip
)
1055 uint32_t partial
= 0;
1057 partial
= csum_add32(partial
, get_16aligned_be32(&ip
->ip_src
));
1058 partial
= csum_add32(partial
, get_16aligned_be32(&ip
->ip_dst
));
1059 partial
= csum_add16(partial
, htons(ip
->ip_proto
));
1060 partial
= csum_add16(partial
, htons(ntohs(ip
->ip_tot_len
) -
1061 IP_IHL(ip
->ip_ihl_ver
) * 4));