2 * Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
19 #include <arpa/inet.h>
20 #include <sys/socket.h>
21 #include <netinet/in.h>
22 #include <netinet/ip6.h>
23 #include <netinet/icmp6.h>
25 #include "byte-order.h"
30 #include "dynamic-string.h"
31 #include "ovs-thread.h"
33 #include "dp-packet.h"
34 #include "unaligned.h"
36 const struct in6_addr in6addr_exact
= IN6ADDR_EXACT_INIT
;
38 /* Parses 's' as a 16-digit hexadecimal number representing a datapath ID. On
39 * success stores the dpid into '*dpidp' and returns true, on failure stores 0
40 * into '*dpidp' and returns false.
42 * Rejects an all-zeros dpid as invalid. */
44 dpid_from_string(const char *s
, uint64_t *dpidp
)
46 *dpidp
= (strlen(s
) == 16 && strspn(s
, "0123456789abcdefABCDEF") == 16
47 ? strtoull(s
, NULL
, 16)
52 /* Returns true if 'ea' is a reserved address, that a bridge must never
53 * forward, false otherwise.
55 * If you change this function's behavior, please update corresponding
56 * documentation in vswitch.xml at the same time. */
58 eth_addr_is_reserved(const uint8_t ea
[ETH_ADDR_LEN
])
60 struct eth_addr_node
{
61 struct hmap_node hmap_node
;
65 static struct eth_addr_node nodes
[] = {
66 /* STP, IEEE pause frames, and other reserved protocols. */
67 { HMAP_NODE_NULL_INITIALIZER
, 0x0180c2000000ULL
},
68 { HMAP_NODE_NULL_INITIALIZER
, 0x0180c2000001ULL
},
69 { HMAP_NODE_NULL_INITIALIZER
, 0x0180c2000002ULL
},
70 { HMAP_NODE_NULL_INITIALIZER
, 0x0180c2000003ULL
},
71 { HMAP_NODE_NULL_INITIALIZER
, 0x0180c2000004ULL
},
72 { HMAP_NODE_NULL_INITIALIZER
, 0x0180c2000005ULL
},
73 { HMAP_NODE_NULL_INITIALIZER
, 0x0180c2000006ULL
},
74 { HMAP_NODE_NULL_INITIALIZER
, 0x0180c2000007ULL
},
75 { HMAP_NODE_NULL_INITIALIZER
, 0x0180c2000008ULL
},
76 { HMAP_NODE_NULL_INITIALIZER
, 0x0180c2000009ULL
},
77 { HMAP_NODE_NULL_INITIALIZER
, 0x0180c200000aULL
},
78 { HMAP_NODE_NULL_INITIALIZER
, 0x0180c200000bULL
},
79 { HMAP_NODE_NULL_INITIALIZER
, 0x0180c200000cULL
},
80 { HMAP_NODE_NULL_INITIALIZER
, 0x0180c200000dULL
},
81 { HMAP_NODE_NULL_INITIALIZER
, 0x0180c200000eULL
},
82 { HMAP_NODE_NULL_INITIALIZER
, 0x0180c200000fULL
},
84 /* Extreme protocols. */
85 { HMAP_NODE_NULL_INITIALIZER
, 0x00e02b000000ULL
}, /* EDP. */
86 { HMAP_NODE_NULL_INITIALIZER
, 0x00e02b000004ULL
}, /* EAPS. */
87 { HMAP_NODE_NULL_INITIALIZER
, 0x00e02b000006ULL
}, /* EAPS. */
89 /* Cisco protocols. */
90 { HMAP_NODE_NULL_INITIALIZER
, 0x01000c000000ULL
}, /* ISL. */
91 { HMAP_NODE_NULL_INITIALIZER
, 0x01000cccccccULL
}, /* PAgP, UDLD, CDP,
93 { HMAP_NODE_NULL_INITIALIZER
, 0x01000ccccccdULL
}, /* PVST+. */
94 { HMAP_NODE_NULL_INITIALIZER
, 0x01000ccdcdcdULL
}, /* STP Uplink Fast,
98 { HMAP_NODE_NULL_INITIALIZER
, 0x01000cccccc0ULL
},
99 { HMAP_NODE_NULL_INITIALIZER
, 0x01000cccccc1ULL
},
100 { HMAP_NODE_NULL_INITIALIZER
, 0x01000cccccc2ULL
},
101 { HMAP_NODE_NULL_INITIALIZER
, 0x01000cccccc3ULL
},
102 { HMAP_NODE_NULL_INITIALIZER
, 0x01000cccccc4ULL
},
103 { HMAP_NODE_NULL_INITIALIZER
, 0x01000cccccc5ULL
},
104 { HMAP_NODE_NULL_INITIALIZER
, 0x01000cccccc6ULL
},
105 { HMAP_NODE_NULL_INITIALIZER
, 0x01000cccccc7ULL
},
108 static struct ovsthread_once once
= OVSTHREAD_ONCE_INITIALIZER
;
109 struct eth_addr_node
*node
;
110 static struct hmap addrs
;
113 if (ovsthread_once_start(&once
)) {
115 for (node
= nodes
; node
< &nodes
[ARRAY_SIZE(nodes
)]; node
++) {
116 hmap_insert(&addrs
, &node
->hmap_node
, hash_uint64(node
->ea64
));
118 ovsthread_once_done(&once
);
121 ea64
= eth_addr_to_uint64(ea
);
122 HMAP_FOR_EACH_IN_BUCKET (node
, hmap_node
, hash_uint64(ea64
), &addrs
) {
123 if (node
->ea64
== ea64
) {
131 eth_addr_from_string(const char *s
, uint8_t ea
[ETH_ADDR_LEN
])
133 if (ovs_scan(s
, ETH_ADDR_SCAN_FMT
, ETH_ADDR_SCAN_ARGS(ea
))) {
136 memset(ea
, 0, ETH_ADDR_LEN
);
141 /* Fills 'b' with a Reverse ARP packet with Ethernet source address 'eth_src'.
142 * This function is used by Open vSwitch to compose packets in cases where
143 * context is important but content doesn't (or shouldn't) matter.
145 * The returned packet has enough headroom to insert an 802.1Q VLAN header if
148 compose_rarp(struct dp_packet
*b
, const uint8_t eth_src
[ETH_ADDR_LEN
])
150 struct eth_header
*eth
;
151 struct arp_eth_header
*arp
;
154 dp_packet_prealloc_tailroom(b
, 2 + ETH_HEADER_LEN
+ VLAN_HEADER_LEN
155 + ARP_ETH_HEADER_LEN
);
156 dp_packet_reserve(b
, 2 + VLAN_HEADER_LEN
);
157 eth
= dp_packet_put_uninit(b
, sizeof *eth
);
158 memcpy(eth
->eth_dst
, eth_addr_broadcast
, ETH_ADDR_LEN
);
159 memcpy(eth
->eth_src
, eth_src
, ETH_ADDR_LEN
);
160 eth
->eth_type
= htons(ETH_TYPE_RARP
);
162 arp
= dp_packet_put_uninit(b
, sizeof *arp
);
163 arp
->ar_hrd
= htons(ARP_HRD_ETHERNET
);
164 arp
->ar_pro
= htons(ARP_PRO_IP
);
165 arp
->ar_hln
= sizeof arp
->ar_sha
;
166 arp
->ar_pln
= sizeof arp
->ar_spa
;
167 arp
->ar_op
= htons(ARP_OP_RARP
);
168 memcpy(arp
->ar_sha
, eth_src
, ETH_ADDR_LEN
);
169 put_16aligned_be32(&arp
->ar_spa
, htonl(0));
170 memcpy(arp
->ar_tha
, eth_src
, ETH_ADDR_LEN
);
171 put_16aligned_be32(&arp
->ar_tpa
, htonl(0));
173 dp_packet_set_frame(b
, eth
);
174 dp_packet_set_l3(b
, arp
);
177 /* Insert VLAN header according to given TCI. Packet passed must be Ethernet
178 * packet. Ignores the CFI bit of 'tci' using 0 instead.
180 * Also adjusts the layer offsets accordingly. */
182 eth_push_vlan(struct dp_packet
*packet
, ovs_be16 tpid
, ovs_be16 tci
)
184 struct vlan_eth_header
*veh
;
186 /* Insert new 802.1Q header. */
187 veh
= dp_packet_resize_l2(packet
, VLAN_HEADER_LEN
);
188 memmove(veh
, (char *)veh
+ VLAN_HEADER_LEN
, 2 * ETH_ADDR_LEN
);
189 veh
->veth_type
= tpid
;
190 veh
->veth_tci
= tci
& htons(~VLAN_CFI
);
193 /* Removes outermost VLAN header (if any is present) from 'packet'.
195 * 'packet->l2_5' should initially point to 'packet''s outer-most MPLS header
196 * or may be NULL if there are no MPLS headers. */
198 eth_pop_vlan(struct dp_packet
*packet
)
200 struct vlan_eth_header
*veh
= dp_packet_l2(packet
);
202 if (veh
&& dp_packet_size(packet
) >= sizeof *veh
203 && veh
->veth_type
== htons(ETH_TYPE_VLAN
)) {
205 memmove((char *)veh
+ VLAN_HEADER_LEN
, veh
, 2 * ETH_ADDR_LEN
);
206 dp_packet_resize_l2(packet
, -VLAN_HEADER_LEN
);
210 /* Set ethertype of the packet. */
212 set_ethertype(struct dp_packet
*packet
, ovs_be16 eth_type
)
214 struct eth_header
*eh
= dp_packet_l2(packet
);
220 if (eh
->eth_type
== htons(ETH_TYPE_VLAN
)) {
222 char *l2_5
= dp_packet_l2_5(packet
);
224 p
= ALIGNED_CAST(ovs_be16
*,
225 (l2_5
? l2_5
: (char *)dp_packet_l3(packet
)) - 2);
228 eh
->eth_type
= eth_type
;
232 static bool is_mpls(struct dp_packet
*packet
)
234 return packet
->l2_5_ofs
!= UINT16_MAX
;
237 /* Set time to live (TTL) of an MPLS label stack entry (LSE). */
239 set_mpls_lse_ttl(ovs_be32
*lse
, uint8_t ttl
)
241 *lse
&= ~htonl(MPLS_TTL_MASK
);
242 *lse
|= htonl((ttl
<< MPLS_TTL_SHIFT
) & MPLS_TTL_MASK
);
245 /* Set traffic class (TC) of an MPLS label stack entry (LSE). */
247 set_mpls_lse_tc(ovs_be32
*lse
, uint8_t tc
)
249 *lse
&= ~htonl(MPLS_TC_MASK
);
250 *lse
|= htonl((tc
<< MPLS_TC_SHIFT
) & MPLS_TC_MASK
);
253 /* Set label of an MPLS label stack entry (LSE). */
255 set_mpls_lse_label(ovs_be32
*lse
, ovs_be32 label
)
257 *lse
&= ~htonl(MPLS_LABEL_MASK
);
258 *lse
|= htonl((ntohl(label
) << MPLS_LABEL_SHIFT
) & MPLS_LABEL_MASK
);
261 /* Set bottom of stack (BoS) bit of an MPLS label stack entry (LSE). */
263 set_mpls_lse_bos(ovs_be32
*lse
, uint8_t bos
)
265 *lse
&= ~htonl(MPLS_BOS_MASK
);
266 *lse
|= htonl((bos
<< MPLS_BOS_SHIFT
) & MPLS_BOS_MASK
);
269 /* Compose an MPLS label stack entry (LSE) from its components:
270 * label, traffic class (TC), time to live (TTL) and
271 * bottom of stack (BoS) bit. */
273 set_mpls_lse_values(uint8_t ttl
, uint8_t tc
, uint8_t bos
, ovs_be32 label
)
275 ovs_be32 lse
= htonl(0);
276 set_mpls_lse_ttl(&lse
, ttl
);
277 set_mpls_lse_tc(&lse
, tc
);
278 set_mpls_lse_bos(&lse
, bos
);
279 set_mpls_lse_label(&lse
, label
);
283 /* Set MPLS label stack entry to outermost MPLS header.*/
285 set_mpls_lse(struct dp_packet
*packet
, ovs_be32 mpls_lse
)
287 /* Packet type should be MPLS to set label stack entry. */
288 if (is_mpls(packet
)) {
289 struct mpls_hdr
*mh
= dp_packet_l2_5(packet
);
291 /* Update mpls label stack entry. */
292 put_16aligned_be32(&mh
->mpls_lse
, mpls_lse
);
296 /* Push MPLS label stack entry 'lse' onto 'packet' as the the outermost MPLS
297 * header. If 'packet' does not already have any MPLS labels, then its
298 * Ethertype is changed to 'ethtype' (which must be an MPLS Ethertype). */
300 push_mpls(struct dp_packet
*packet
, ovs_be16 ethtype
, ovs_be32 lse
)
305 if (!eth_type_mpls(ethtype
)) {
309 if (!is_mpls(packet
)) {
310 /* Set MPLS label stack offset. */
311 packet
->l2_5_ofs
= packet
->l3_ofs
;
314 set_ethertype(packet
, ethtype
);
316 /* Push new MPLS shim header onto packet. */
317 len
= packet
->l2_5_ofs
;
318 header
= dp_packet_resize_l2_5(packet
, MPLS_HLEN
);
319 memmove(header
, header
+ MPLS_HLEN
, len
);
320 memcpy(header
+ len
, &lse
, sizeof lse
);
323 /* If 'packet' is an MPLS packet, removes its outermost MPLS label stack entry.
324 * If the label that was removed was the only MPLS label, changes 'packet''s
325 * Ethertype to 'ethtype' (which ordinarily should not be an MPLS
328 pop_mpls(struct dp_packet
*packet
, ovs_be16 ethtype
)
330 if (is_mpls(packet
)) {
331 struct mpls_hdr
*mh
= dp_packet_l2_5(packet
);
332 size_t len
= packet
->l2_5_ofs
;
334 set_ethertype(packet
, ethtype
);
335 if (get_16aligned_be32(&mh
->mpls_lse
) & htonl(MPLS_BOS_MASK
)) {
336 dp_packet_set_l2_5(packet
, NULL
);
338 /* Shift the l2 header forward. */
339 memmove((char*)dp_packet_data(packet
) + MPLS_HLEN
, dp_packet_data(packet
), len
);
340 dp_packet_resize_l2_5(packet
, -MPLS_HLEN
);
344 /* Converts hex digits in 'hex' to an Ethernet packet in '*packetp'. The
345 * caller must free '*packetp'. On success, returns NULL. On failure, returns
346 * an error message and stores NULL in '*packetp'.
348 * Aligns the L3 header of '*packetp' on a 32-bit boundary. */
350 eth_from_hex(const char *hex
, struct dp_packet
**packetp
)
352 struct dp_packet
*packet
;
354 /* Use 2 bytes of headroom to 32-bit align the L3 header. */
355 packet
= *packetp
= dp_packet_new_with_headroom(strlen(hex
) / 2, 2);
357 if (dp_packet_put_hex(packet
, hex
, NULL
)[0] != '\0') {
358 dp_packet_delete(packet
);
360 return "Trailing garbage in packet data";
363 if (dp_packet_size(packet
) < ETH_HEADER_LEN
) {
364 dp_packet_delete(packet
);
366 return "Packet data too short for Ethernet";
373 eth_format_masked(const uint8_t eth
[ETH_ADDR_LEN
],
374 const uint8_t mask
[ETH_ADDR_LEN
], struct ds
*s
)
376 ds_put_format(s
, ETH_ADDR_FMT
, ETH_ADDR_ARGS(eth
));
377 if (mask
&& !eth_mask_is_exact(mask
)) {
378 ds_put_format(s
, "/"ETH_ADDR_FMT
, ETH_ADDR_ARGS(mask
));
383 eth_addr_bitand(const uint8_t src
[ETH_ADDR_LEN
],
384 const uint8_t mask
[ETH_ADDR_LEN
],
385 uint8_t dst
[ETH_ADDR_LEN
])
389 for (i
= 0; i
< ETH_ADDR_LEN
; i
++) {
390 dst
[i
] = src
[i
] & mask
[i
];
394 /* Given the IP netmask 'netmask', returns the number of bits of the IP address
395 * that it specifies, that is, the number of 1-bits in 'netmask'.
397 * If 'netmask' is not a CIDR netmask (see ip_is_cidr()), the return value will
398 * still be in the valid range but isn't otherwise meaningful. */
400 ip_count_cidr_bits(ovs_be32 netmask
)
402 return 32 - ctz32(ntohl(netmask
));
406 ip_format_masked(ovs_be32 ip
, ovs_be32 mask
, struct ds
*s
)
408 ds_put_format(s
, IP_FMT
, IP_ARGS(ip
));
409 if (mask
!= OVS_BE32_MAX
) {
410 if (ip_is_cidr(mask
)) {
411 ds_put_format(s
, "/%d", ip_count_cidr_bits(mask
));
413 ds_put_format(s
, "/"IP_FMT
, IP_ARGS(mask
));
419 /* Stores the string representation of the IPv6 address 'addr' into the
420 * character array 'addr_str', which must be at least INET6_ADDRSTRLEN
423 format_ipv6_addr(char *addr_str
, const struct in6_addr
*addr
)
425 inet_ntop(AF_INET6
, addr
, addr_str
, INET6_ADDRSTRLEN
);
429 print_ipv6_addr(struct ds
*string
, const struct in6_addr
*addr
)
433 ds_reserve(string
, string
->length
+ INET6_ADDRSTRLEN
);
435 dst
= string
->string
+ string
->length
;
436 format_ipv6_addr(dst
, addr
);
437 string
->length
+= strlen(dst
);
441 print_ipv6_masked(struct ds
*s
, const struct in6_addr
*addr
,
442 const struct in6_addr
*mask
)
444 print_ipv6_addr(s
, addr
);
445 if (mask
&& !ipv6_mask_is_exact(mask
)) {
446 if (ipv6_is_cidr(mask
)) {
447 int cidr_bits
= ipv6_count_cidr_bits(mask
);
448 ds_put_format(s
, "/%d", cidr_bits
);
451 print_ipv6_addr(s
, mask
);
456 struct in6_addr
ipv6_addr_bitand(const struct in6_addr
*a
,
457 const struct in6_addr
*b
)
463 for (i
=0; i
<4; i
++) {
464 dst
.s6_addr32
[i
] = a
->s6_addr32
[i
] & b
->s6_addr32
[i
];
467 for (i
=0; i
<16; i
++) {
468 dst
.s6_addr
[i
] = a
->s6_addr
[i
] & b
->s6_addr
[i
];
475 /* Returns an in6_addr consisting of 'mask' high-order 1-bits and 128-N
476 * low-order 0-bits. */
478 ipv6_create_mask(int mask
)
480 struct in6_addr netmask
;
481 uint8_t *netmaskp
= &netmask
.s6_addr
[0];
483 memset(&netmask
, 0, sizeof netmask
);
491 *netmaskp
= 0xff << (8 - mask
);
497 /* Given the IPv6 netmask 'netmask', returns the number of bits of the IPv6
498 * address that it specifies, that is, the number of 1-bits in 'netmask'.
499 * 'netmask' must be a CIDR netmask (see ipv6_is_cidr()).
501 * If 'netmask' is not a CIDR netmask (see ipv6_is_cidr()), the return value
502 * will still be in the valid range but isn't otherwise meaningful. */
504 ipv6_count_cidr_bits(const struct in6_addr
*netmask
)
508 const uint8_t *netmaskp
= &netmask
->s6_addr
[0];
510 for (i
=0; i
<16; i
++) {
511 if (netmaskp
[i
] == 0xff) {
516 for(nm
= netmaskp
[i
]; nm
; nm
<<= 1) {
527 /* Returns true if 'netmask' is a CIDR netmask, that is, if it consists of N
528 * high-order 1-bits and 128-N low-order 0-bits. */
530 ipv6_is_cidr(const struct in6_addr
*netmask
)
532 const uint8_t *netmaskp
= &netmask
->s6_addr
[0];
535 for (i
=0; i
<16; i
++) {
536 if (netmaskp
[i
] != 0xff) {
537 uint8_t x
= ~netmaskp
[i
];
552 /* Populates 'b' with an Ethernet II packet headed with the given 'eth_dst',
553 * 'eth_src' and 'eth_type' parameters. A payload of 'size' bytes is allocated
554 * in 'b' and returned. This payload may be populated with appropriate
555 * information by the caller. Sets 'b''s 'frame' pointer and 'l3' offset to
556 * the Ethernet header and payload respectively. Aligns b->l3 on a 32-bit
559 * The returned packet has enough headroom to insert an 802.1Q VLAN header if
562 eth_compose(struct dp_packet
*b
, const uint8_t eth_dst
[ETH_ADDR_LEN
],
563 const uint8_t eth_src
[ETH_ADDR_LEN
], uint16_t eth_type
,
567 struct eth_header
*eth
;
571 /* The magic 2 here ensures that the L3 header (when it is added later)
572 * will be 32-bit aligned. */
573 dp_packet_prealloc_tailroom(b
, 2 + ETH_HEADER_LEN
+ VLAN_HEADER_LEN
+ size
);
574 dp_packet_reserve(b
, 2 + VLAN_HEADER_LEN
);
575 eth
= dp_packet_put_uninit(b
, ETH_HEADER_LEN
);
576 data
= dp_packet_put_uninit(b
, size
);
578 memcpy(eth
->eth_dst
, eth_dst
, ETH_ADDR_LEN
);
579 memcpy(eth
->eth_src
, eth_src
, ETH_ADDR_LEN
);
580 eth
->eth_type
= htons(eth_type
);
582 dp_packet_set_frame(b
, eth
);
583 dp_packet_set_l3(b
, data
);
589 packet_set_ipv4_addr(struct dp_packet
*packet
,
590 ovs_16aligned_be32
*addr
, ovs_be32 new_addr
)
592 struct ip_header
*nh
= dp_packet_l3(packet
);
593 ovs_be32 old_addr
= get_16aligned_be32(addr
);
594 size_t l4_size
= dp_packet_l4_size(packet
);
596 if (nh
->ip_proto
== IPPROTO_TCP
&& l4_size
>= TCP_HEADER_LEN
) {
597 struct tcp_header
*th
= dp_packet_l4(packet
);
599 th
->tcp_csum
= recalc_csum32(th
->tcp_csum
, old_addr
, new_addr
);
600 } else if (nh
->ip_proto
== IPPROTO_UDP
&& l4_size
>= UDP_HEADER_LEN
) {
601 struct udp_header
*uh
= dp_packet_l4(packet
);
604 uh
->udp_csum
= recalc_csum32(uh
->udp_csum
, old_addr
, new_addr
);
606 uh
->udp_csum
= htons(0xffff);
610 nh
->ip_csum
= recalc_csum32(nh
->ip_csum
, old_addr
, new_addr
);
611 put_16aligned_be32(addr
, new_addr
);
614 /* Returns true, if packet contains at least one routing header where
615 * segements_left > 0.
617 * This function assumes that L3 and L4 offsets are set in the packet. */
619 packet_rh_present(struct dp_packet
*packet
)
621 const struct ovs_16aligned_ip6_hdr
*nh
;
625 uint8_t *data
= dp_packet_l3(packet
);
627 remaining
= packet
->l4_ofs
- packet
->l3_ofs
;
629 if (remaining
< sizeof *nh
) {
632 nh
= ALIGNED_CAST(struct ovs_16aligned_ip6_hdr
*, data
);
634 remaining
-= sizeof *nh
;
635 nexthdr
= nh
->ip6_nxt
;
638 if ((nexthdr
!= IPPROTO_HOPOPTS
)
639 && (nexthdr
!= IPPROTO_ROUTING
)
640 && (nexthdr
!= IPPROTO_DSTOPTS
)
641 && (nexthdr
!= IPPROTO_AH
)
642 && (nexthdr
!= IPPROTO_FRAGMENT
)) {
643 /* It's either a terminal header (e.g., TCP, UDP) or one we
644 * don't understand. In either case, we're done with the
645 * packet, so use it to fill in 'nw_proto'. */
649 /* We only verify that at least 8 bytes of the next header are
650 * available, but many of these headers are longer. Ensure that
651 * accesses within the extension header are within those first 8
652 * bytes. All extension headers are required to be at least 8
658 if (nexthdr
== IPPROTO_AH
) {
659 /* A standard AH definition isn't available, but the fields
660 * we care about are in the same location as the generic
661 * option header--only the header length is calculated
663 const struct ip6_ext
*ext_hdr
= (struct ip6_ext
*)data
;
665 nexthdr
= ext_hdr
->ip6e_nxt
;
666 len
= (ext_hdr
->ip6e_len
+ 2) * 4;
667 } else if (nexthdr
== IPPROTO_FRAGMENT
) {
668 const struct ovs_16aligned_ip6_frag
*frag_hdr
669 = ALIGNED_CAST(struct ovs_16aligned_ip6_frag
*, data
);
671 nexthdr
= frag_hdr
->ip6f_nxt
;
672 len
= sizeof *frag_hdr
;
673 } else if (nexthdr
== IPPROTO_ROUTING
) {
674 const struct ip6_rthdr
*rh
= (struct ip6_rthdr
*)data
;
676 if (rh
->ip6r_segleft
> 0) {
680 nexthdr
= rh
->ip6r_nxt
;
681 len
= (rh
->ip6r_len
+ 1) * 8;
683 const struct ip6_ext
*ext_hdr
= (struct ip6_ext
*)data
;
685 nexthdr
= ext_hdr
->ip6e_nxt
;
686 len
= (ext_hdr
->ip6e_len
+ 1) * 8;
689 if (remaining
< len
) {
700 packet_update_csum128(struct dp_packet
*packet
, uint8_t proto
,
701 ovs_16aligned_be32 addr
[4], const ovs_be32 new_addr
[4])
703 size_t l4_size
= dp_packet_l4_size(packet
);
705 if (proto
== IPPROTO_TCP
&& l4_size
>= TCP_HEADER_LEN
) {
706 struct tcp_header
*th
= dp_packet_l4(packet
);
708 th
->tcp_csum
= recalc_csum128(th
->tcp_csum
, addr
, new_addr
);
709 } else if (proto
== IPPROTO_UDP
&& l4_size
>= UDP_HEADER_LEN
) {
710 struct udp_header
*uh
= dp_packet_l4(packet
);
713 uh
->udp_csum
= recalc_csum128(uh
->udp_csum
, addr
, new_addr
);
715 uh
->udp_csum
= htons(0xffff);
718 } else if (proto
== IPPROTO_ICMPV6
&&
719 l4_size
>= sizeof(struct icmp6_header
)) {
720 struct icmp6_header
*icmp
= dp_packet_l4(packet
);
722 icmp
->icmp6_cksum
= recalc_csum128(icmp
->icmp6_cksum
, addr
, new_addr
);
727 packet_set_ipv6_addr(struct dp_packet
*packet
, uint8_t proto
,
728 ovs_16aligned_be32 addr
[4], const ovs_be32 new_addr
[4],
729 bool recalculate_csum
)
731 if (recalculate_csum
) {
732 packet_update_csum128(packet
, proto
, addr
, new_addr
);
734 memcpy(addr
, new_addr
, sizeof(ovs_be32
[4]));
738 packet_set_ipv6_flow_label(ovs_16aligned_be32
*flow_label
, ovs_be32 flow_key
)
740 ovs_be32 old_label
= get_16aligned_be32(flow_label
);
741 ovs_be32 new_label
= (old_label
& htonl(~IPV6_LABEL_MASK
)) | flow_key
;
742 put_16aligned_be32(flow_label
, new_label
);
746 packet_set_ipv6_tc(ovs_16aligned_be32
*flow_label
, uint8_t tc
)
748 ovs_be32 old_label
= get_16aligned_be32(flow_label
);
749 ovs_be32 new_label
= (old_label
& htonl(0xF00FFFFF)) | htonl(tc
<< 20);
750 put_16aligned_be32(flow_label
, new_label
);
753 /* Modifies the IPv4 header fields of 'packet' to be consistent with 'src',
754 * 'dst', 'tos', and 'ttl'. Updates 'packet''s L4 checksums as appropriate.
755 * 'packet' must contain a valid IPv4 packet with correctly populated l[347]
758 packet_set_ipv4(struct dp_packet
*packet
, ovs_be32 src
, ovs_be32 dst
,
759 uint8_t tos
, uint8_t ttl
)
761 struct ip_header
*nh
= dp_packet_l3(packet
);
763 if (get_16aligned_be32(&nh
->ip_src
) != src
) {
764 packet_set_ipv4_addr(packet
, &nh
->ip_src
, src
);
767 if (get_16aligned_be32(&nh
->ip_dst
) != dst
) {
768 packet_set_ipv4_addr(packet
, &nh
->ip_dst
, dst
);
771 if (nh
->ip_tos
!= tos
) {
772 uint8_t *field
= &nh
->ip_tos
;
774 nh
->ip_csum
= recalc_csum16(nh
->ip_csum
, htons((uint16_t) *field
),
775 htons((uint16_t) tos
));
779 if (nh
->ip_ttl
!= ttl
) {
780 uint8_t *field
= &nh
->ip_ttl
;
782 nh
->ip_csum
= recalc_csum16(nh
->ip_csum
, htons(*field
<< 8),
788 /* Modifies the IPv6 header fields of 'packet' to be consistent with 'src',
789 * 'dst', 'traffic class', and 'next hop'. Updates 'packet''s L4 checksums as
790 * appropriate. 'packet' must contain a valid IPv6 packet with correctly
791 * populated l[34] offsets. */
793 packet_set_ipv6(struct dp_packet
*packet
, uint8_t proto
, const ovs_be32 src
[4],
794 const ovs_be32 dst
[4], uint8_t key_tc
, ovs_be32 key_fl
,
797 struct ovs_16aligned_ip6_hdr
*nh
= dp_packet_l3(packet
);
799 if (memcmp(&nh
->ip6_src
, src
, sizeof(ovs_be32
[4]))) {
800 packet_set_ipv6_addr(packet
, proto
, nh
->ip6_src
.be32
, src
, true);
803 if (memcmp(&nh
->ip6_dst
, dst
, sizeof(ovs_be32
[4]))) {
804 packet_set_ipv6_addr(packet
, proto
, nh
->ip6_dst
.be32
, dst
,
805 !packet_rh_present(packet
));
808 packet_set_ipv6_tc(&nh
->ip6_flow
, key_tc
);
810 packet_set_ipv6_flow_label(&nh
->ip6_flow
, key_fl
);
812 nh
->ip6_hlim
= key_hl
;
816 packet_set_port(ovs_be16
*port
, ovs_be16 new_port
, ovs_be16
*csum
)
818 if (*port
!= new_port
) {
819 *csum
= recalc_csum16(*csum
, *port
, new_port
);
824 /* Sets the TCP source and destination port ('src' and 'dst' respectively) of
825 * the TCP header contained in 'packet'. 'packet' must be a valid TCP packet
826 * with its l4 offset properly populated. */
828 packet_set_tcp_port(struct dp_packet
*packet
, ovs_be16 src
, ovs_be16 dst
)
830 struct tcp_header
*th
= dp_packet_l4(packet
);
832 packet_set_port(&th
->tcp_src
, src
, &th
->tcp_csum
);
833 packet_set_port(&th
->tcp_dst
, dst
, &th
->tcp_csum
);
836 /* Sets the UDP source and destination port ('src' and 'dst' respectively) of
837 * the UDP header contained in 'packet'. 'packet' must be a valid UDP packet
838 * with its l4 offset properly populated. */
840 packet_set_udp_port(struct dp_packet
*packet
, ovs_be16 src
, ovs_be16 dst
)
842 struct udp_header
*uh
= dp_packet_l4(packet
);
845 packet_set_port(&uh
->udp_src
, src
, &uh
->udp_csum
);
846 packet_set_port(&uh
->udp_dst
, dst
, &uh
->udp_csum
);
849 uh
->udp_csum
= htons(0xffff);
857 /* Sets the SCTP source and destination port ('src' and 'dst' respectively) of
858 * the SCTP header contained in 'packet'. 'packet' must be a valid SCTP packet
859 * with its l4 offset properly populated. */
861 packet_set_sctp_port(struct dp_packet
*packet
, ovs_be16 src
, ovs_be16 dst
)
863 struct sctp_header
*sh
= dp_packet_l4(packet
);
864 ovs_be32 old_csum
, old_correct_csum
, new_csum
;
865 uint16_t tp_len
= dp_packet_l4_size(packet
);
867 old_csum
= get_16aligned_be32(&sh
->sctp_csum
);
868 put_16aligned_be32(&sh
->sctp_csum
, 0);
869 old_correct_csum
= crc32c((void *)sh
, tp_len
);
874 new_csum
= crc32c((void *)sh
, tp_len
);
875 put_16aligned_be32(&sh
->sctp_csum
, old_csum
^ old_correct_csum
^ new_csum
);
879 packet_set_nd(struct dp_packet
*packet
, const ovs_be32 target
[4],
880 const uint8_t sll
[ETH_ADDR_LEN
],
881 const uint8_t tll
[ETH_ADDR_LEN
]) {
882 struct ovs_nd_msg
*ns
;
883 struct ovs_nd_opt
*nd_opt
;
884 int bytes_remain
= dp_packet_l4_size(packet
);
886 if (OVS_UNLIKELY(bytes_remain
< sizeof(*ns
))) {
890 ns
= dp_packet_l4(packet
);
891 nd_opt
= &ns
->options
[0];
892 bytes_remain
-= sizeof(*ns
);
894 if (memcmp(&ns
->target
, target
, sizeof(ovs_be32
[4]))) {
895 packet_set_ipv6_addr(packet
, IPPROTO_ICMPV6
,
900 while (bytes_remain
>= ND_OPT_LEN
&& nd_opt
->nd_opt_len
!= 0) {
901 if (nd_opt
->nd_opt_type
== ND_OPT_SOURCE_LINKADDR
902 && nd_opt
->nd_opt_len
== 1) {
903 if (memcmp(nd_opt
->nd_opt_data
, sll
, ETH_ADDR_LEN
)) {
904 ovs_be16
*csum
= &(ns
->icmph
.icmp6_cksum
);
906 *csum
= recalc_csum48(*csum
, nd_opt
->nd_opt_data
, sll
);
907 memcpy(nd_opt
->nd_opt_data
, sll
, ETH_ADDR_LEN
);
910 /* A packet can only contain one SLL or TLL option */
912 } else if (nd_opt
->nd_opt_type
== ND_OPT_TARGET_LINKADDR
913 && nd_opt
->nd_opt_len
== 1) {
914 if (memcmp(nd_opt
->nd_opt_data
, tll
, ETH_ADDR_LEN
)) {
915 ovs_be16
*csum
= &(ns
->icmph
.icmp6_cksum
);
917 *csum
= recalc_csum48(*csum
, nd_opt
->nd_opt_data
, tll
);
918 memcpy(nd_opt
->nd_opt_data
, tll
, ETH_ADDR_LEN
);
921 /* A packet can only contain one SLL or TLL option */
925 nd_opt
+= nd_opt
->nd_opt_len
;
926 bytes_remain
-= nd_opt
->nd_opt_len
* ND_OPT_LEN
;
931 packet_tcp_flag_to_string(uint32_t flag
)
963 /* Appends a string representation of the TCP flags value 'tcp_flags'
964 * (e.g. from struct flow.tcp_flags or obtained via TCP_FLAGS) to 's', in the
965 * format used by tcpdump. */
967 packet_format_tcp_flags(struct ds
*s
, uint16_t tcp_flags
)
970 ds_put_cstr(s
, "none");
974 if (tcp_flags
& TCP_SYN
) {
977 if (tcp_flags
& TCP_FIN
) {
980 if (tcp_flags
& TCP_PSH
) {
983 if (tcp_flags
& TCP_RST
) {
986 if (tcp_flags
& TCP_URG
) {
989 if (tcp_flags
& TCP_ACK
) {
992 if (tcp_flags
& TCP_ECE
) {
995 if (tcp_flags
& TCP_CWR
) {
998 if (tcp_flags
& TCP_NS
) {
1001 if (tcp_flags
& 0x200) {
1002 ds_put_cstr(s
, "[200]");
1004 if (tcp_flags
& 0x400) {
1005 ds_put_cstr(s
, "[400]");
1007 if (tcp_flags
& 0x800) {
1008 ds_put_cstr(s
, "[800]");
1012 #define ARP_PACKET_SIZE (2 + ETH_HEADER_LEN + VLAN_HEADER_LEN + \
1016 compose_arp(struct dp_packet
*b
, const uint8_t eth_src
[ETH_ADDR_LEN
],
1017 ovs_be32 ip_src
, ovs_be32 ip_dst
)
1019 struct eth_header
*eth
;
1020 struct arp_eth_header
*arp
;
1023 dp_packet_prealloc_tailroom(b
, ARP_PACKET_SIZE
);
1024 dp_packet_reserve(b
, 2 + VLAN_HEADER_LEN
);
1026 eth
= dp_packet_put_uninit(b
, sizeof *eth
);
1027 memcpy(eth
->eth_dst
, eth_addr_broadcast
, ETH_ADDR_LEN
);
1028 memcpy(eth
->eth_src
, eth_src
, ETH_ADDR_LEN
);
1029 eth
->eth_type
= htons(ETH_TYPE_ARP
);
1031 arp
= dp_packet_put_uninit(b
, sizeof *arp
);
1032 arp
->ar_hrd
= htons(ARP_HRD_ETHERNET
);
1033 arp
->ar_pro
= htons(ARP_PRO_IP
);
1034 arp
->ar_hln
= sizeof arp
->ar_sha
;
1035 arp
->ar_pln
= sizeof arp
->ar_spa
;
1036 arp
->ar_op
= htons(ARP_OP_REQUEST
);
1037 memcpy(arp
->ar_sha
, eth_src
, ETH_ADDR_LEN
);
1038 memset(arp
->ar_tha
, 0, ETH_ADDR_LEN
);
1040 put_16aligned_be32(&arp
->ar_spa
, ip_src
);
1041 put_16aligned_be32(&arp
->ar_tpa
, ip_dst
);
1043 dp_packet_set_frame(b
, eth
);
1044 dp_packet_set_l3(b
, arp
);
1048 packet_csum_pseudoheader(const struct ip_header
*ip
)
1050 uint32_t partial
= 0;
1052 partial
= csum_add32(partial
, get_16aligned_be32(&ip
->ip_src
));
1053 partial
= csum_add32(partial
, get_16aligned_be32(&ip
->ip_dst
));
1054 partial
= csum_add16(partial
, htons(ip
->ip_proto
));
1055 partial
= csum_add16(partial
, htons(ntohs(ip
->ip_tot_len
) -
1056 IP_IHL(ip
->ip_ihl_ver
) * 4));