2 * Copyright (c) 2011 Nicira, Inc.
3 * Copyright (c) 2013 Cisco Systems, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22 #include <linux/version.h>
26 #include <linux/net.h>
27 #include <linux/rculist.h>
28 #include <linux/udp.h>
32 #include <net/route.h>
40 * LISP encapsulation header:
42 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
43 * |N|L|E|V|I|flags| Nonce/Map-Version |
44 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
45 * | Instance ID/Locator Status Bits |
46 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
51 * struct lisphdr - LISP header
52 * @nonce_present: Flag indicating the presence of a 24 bit nonce value.
53 * @locator_status_bits_present: Flag indicating the presence of Locator Status
55 * @solicit_echo_nonce: Flag indicating the use of the echo noncing mechanism.
56 * @map_version_present: Flag indicating the use of mapping versioning.
57 * @instance_id_present: Flag indicating the presence of a 24 bit Instance ID.
58 * @reserved_flags: 3 bits reserved for future flags.
59 * @nonce: 24 bit nonce value.
60 * @map_version: 24 bit mapping version.
61 * @locator_status_bits: Locator Status Bits: 32 bits when instance_id_present
62 * is not set, 8 bits when it is.
63 * @instance_id: 24 bit Instance ID
66 #ifdef __LITTLE_ENDIAN_BITFIELD
67 __u8 reserved_flags
:3;
68 __u8 instance_id_present
:1;
69 __u8 map_version_present
:1;
70 __u8 solicit_echo_nonce
:1;
71 __u8 locator_status_bits_present
:1;
75 __u8 locator_status_bits_present
:1;
76 __u8 solicit_echo_nonce
:1;
77 __u8 map_version_present
:1;
78 __u8 instance_id_present
:1;
79 __u8 reserved_flags
:3;
86 __be32 locator_status_bits
;
89 __u8 locator_status_bits
;
94 #define LISP_HLEN (sizeof(struct udphdr) + sizeof(struct lisphdr))
97 * struct lisp_port - Keeps track of open UDP ports
98 * @dst_port: lisp UDP port no.
99 * @list: list element in @lisp_ports.
100 * @lisp_rcv_socket: The socket created for this port number.
105 struct list_head list
;
106 struct socket
*lisp_rcv_socket
;
110 static LIST_HEAD(lisp_ports
);
112 static inline struct lisp_port
*lisp_vport(const struct vport
*vport
)
114 return vport_priv(vport
);
117 static struct lisp_port
*lisp_find_port(struct net
*net
, __be16 port
)
119 struct lisp_port
*lisp_port
;
121 list_for_each_entry_rcu(lisp_port
, &lisp_ports
, list
) {
122 if (lisp_port
->dst_port
== port
&&
123 net_eq(sock_net(lisp_port
->lisp_rcv_socket
->sk
), net
))
130 static inline struct lisphdr
*lisp_hdr(const struct sk_buff
*skb
)
132 return (struct lisphdr
*)(udp_hdr(skb
) + 1);
135 /* Convert 64 bit tunnel ID to 24 bit Instance ID. */
136 static void tunnel_id_to_instance_id(__be64 tun_id
, __u8
*iid
)
140 iid
[0] = (__force __u8
)(tun_id
>> 16);
141 iid
[1] = (__force __u8
)(tun_id
>> 8);
142 iid
[2] = (__force __u8
)tun_id
;
144 iid
[0] = (__force __u8
)((__force u64
)tun_id
>> 40);
145 iid
[1] = (__force __u8
)((__force u64
)tun_id
>> 48);
146 iid
[2] = (__force __u8
)((__force u64
)tun_id
>> 56);
150 /* Convert 24 bit Instance ID to 64 bit tunnel ID. */
151 static __be64
instance_id_to_tunnel_id(__u8
*iid
)
154 return (iid
[0] << 16) | (iid
[1] << 8) | iid
[2];
156 return (__force __be64
)(((__force u64
)iid
[0] << 40) |
157 ((__force u64
)iid
[1] << 48) |
158 ((__force u64
)iid
[2] << 56));
162 /* Compute source UDP port for outgoing packet.
163 * Currently we use the flow hash.
165 static u16
ovs_tnl_get_src_port(struct sk_buff
*skb
)
170 struct sw_flow_key
*pkt_key
= OVS_CB(skb
)->pkt_key
;
171 u32 hash
= jhash2((const u32
*)pkt_key
,
172 sizeof(*pkt_key
) / sizeof(u32
), 0);
174 inet_get_local_port_range(&low
, &high
);
175 range
= (high
- low
) + 1;
176 return (((u64
) hash
* range
) >> 32) + low
;
179 static void lisp_build_header(const struct vport
*vport
,
183 struct lisp_port
*lisp_port
= lisp_vport(vport
);
184 struct udphdr
*udph
= udp_hdr(skb
);
185 struct lisphdr
*lisph
= (struct lisphdr
*)(udph
+ 1);
186 const struct ovs_key_ipv4_tunnel
*tun_key
= OVS_CB(skb
)->tun_key
;
188 udph
->dest
= lisp_port
->dst_port
;
189 udph
->source
= htons(ovs_tnl_get_src_port(skb
));
191 udph
->len
= htons(skb
->len
- skb_transport_offset(skb
));
193 lisph
->nonce_present
= 0; /* We don't support echo nonce algorithm */
194 lisph
->locator_status_bits_present
= 1; /* Set LSB */
195 lisph
->solicit_echo_nonce
= 0; /* No echo noncing */
196 lisph
->map_version_present
= 0; /* No mapping versioning, nonce instead */
197 lisph
->instance_id_present
= 1; /* Store the tun_id as Instance ID */
198 lisph
->reserved_flags
= 0; /* Reserved flags, set to 0 */
200 lisph
->u1
.nonce
[0] = 0;
201 lisph
->u1
.nonce
[1] = 0;
202 lisph
->u1
.nonce
[2] = 0;
204 tunnel_id_to_instance_id(tun_key
->tun_id
, &lisph
->u2
.word2
.instance_id
[0]);
205 lisph
->u2
.word2
.locator_status_bits
= 1;
209 * ovs_tnl_rcv - ingress point for generic tunnel code
211 * @vport: port this packet was received on
212 * @skb: received packet
213 * @tun_key: tunnel that carried packet
215 * Must be called with rcu_read_lock.
217 * Packets received by this function are in the following state:
218 * - skb->data points to the inner Ethernet header.
219 * - The inner Ethernet header is in the linear data area.
220 * - The layer pointers are undefined.
222 static void ovs_tnl_rcv(struct vport
*vport
, struct sk_buff
*skb
,
223 struct ovs_key_ipv4_tunnel
*tun_key
)
227 skb_reset_mac_header(skb
);
230 if (likely(ntohs(eh
->h_proto
) >= ETH_P_802_3_MIN
))
231 skb
->protocol
= eh
->h_proto
;
233 skb
->protocol
= htons(ETH_P_802_2
);
237 skb_clear_rxhash(skb
);
239 vlan_set_tci(skb
, 0);
241 if (unlikely(compute_ip_summed(skb
, false))) {
246 ovs_vport_receive(vport
, skb
, tun_key
);
249 /* Called with rcu_read_lock and BH disabled. */
250 static int lisp_rcv(struct sock
*sk
, struct sk_buff
*skb
)
252 struct lisp_port
*lisp_port
;
253 struct lisphdr
*lisph
;
254 struct iphdr
*iph
, *inner_iph
;
255 struct ovs_key_ipv4_tunnel tun_key
;
260 lisp_port
= lisp_find_port(dev_net(skb
->dev
), udp_hdr(skb
)->dest
);
261 if (unlikely(!lisp_port
))
264 if (unlikely(!pskb_may_pull(skb
, LISP_HLEN
)))
267 lisph
= lisp_hdr(skb
);
269 skb_pull_rcsum(skb
, LISP_HLEN
);
271 if (lisph
->instance_id_present
!= 1)
274 key
= instance_id_to_tunnel_id(&lisph
->u2
.word2
.instance_id
[0]);
276 /* Save outer tunnel values */
278 ovs_flow_tun_key_init(&tun_key
, iph
, key
, TUNNEL_KEY
);
280 /* Drop non-IP inner packets */
281 inner_iph
= (struct iphdr
*)(lisph
+ 1);
282 switch (inner_iph
->version
) {
284 protocol
= htons(ETH_P_IP
);
287 protocol
= htons(ETH_P_IPV6
);
293 /* Add Ethernet header */
294 ethh
= (struct ethhdr
*)skb_push(skb
, ETH_HLEN
);
295 memset(ethh
, 0, ETH_HLEN
);
296 ethh
->h_dest
[0] = 0x02;
297 ethh
->h_source
[0] = 0x02;
298 ethh
->h_proto
= protocol
;
300 ovs_skb_postpush_rcsum(skb
, skb
->data
, ETH_HLEN
);
302 ovs_tnl_rcv(vport_from_priv(lisp_port
), skb
, &tun_key
);
311 /* Arbitrary value. Irrelevant as long as it's not 0 since we set the handler. */
312 #define UDP_ENCAP_LISP 1
313 static int lisp_socket_init(struct lisp_port
*lisp_port
, struct net
*net
)
315 struct sockaddr_in sin
;
318 err
= sock_create_kern(AF_INET
, SOCK_DGRAM
, 0,
319 &lisp_port
->lisp_rcv_socket
);
323 /* release net ref. */
324 sk_change_net(lisp_port
->lisp_rcv_socket
->sk
, net
);
326 sin
.sin_family
= AF_INET
;
327 sin
.sin_addr
.s_addr
= htonl(INADDR_ANY
);
328 sin
.sin_port
= lisp_port
->dst_port
;
330 err
= kernel_bind(lisp_port
->lisp_rcv_socket
, (struct sockaddr
*)&sin
,
331 sizeof(struct sockaddr_in
));
335 udp_sk(lisp_port
->lisp_rcv_socket
->sk
)->encap_type
= UDP_ENCAP_LISP
;
336 udp_sk(lisp_port
->lisp_rcv_socket
->sk
)->encap_rcv
= lisp_rcv
;
343 sk_release_kernel(lisp_port
->lisp_rcv_socket
->sk
);
345 pr_warn("cannot register lisp protocol handler: %d\n", err
);
349 static int lisp_get_options(const struct vport
*vport
, struct sk_buff
*skb
)
351 struct lisp_port
*lisp_port
= lisp_vport(vport
);
353 if (nla_put_u16(skb
, OVS_TUNNEL_ATTR_DST_PORT
, ntohs(lisp_port
->dst_port
)))
358 static void lisp_tnl_destroy(struct vport
*vport
)
360 struct lisp_port
*lisp_port
= lisp_vport(vport
);
362 list_del_rcu(&lisp_port
->list
);
364 sk_release_kernel(lisp_port
->lisp_rcv_socket
->sk
);
366 ovs_vport_deferred_free(vport
);
369 static struct vport
*lisp_tnl_create(const struct vport_parms
*parms
)
371 struct net
*net
= ovs_dp_get_net(parms
->dp
);
372 struct nlattr
*options
= parms
->options
;
373 struct lisp_port
*lisp_port
;
384 a
= nla_find_nested(options
, OVS_TUNNEL_ATTR_DST_PORT
);
385 if (a
&& nla_len(a
) == sizeof(u16
)) {
386 dst_port
= nla_get_u16(a
);
388 /* Require destination port from userspace. */
393 /* Verify if we already have a socket created for this port */
394 if (lisp_find_port(net
, htons(dst_port
))) {
399 vport
= ovs_vport_alloc(sizeof(struct lisp_port
),
400 &ovs_lisp_vport_ops
, parms
);
404 lisp_port
= lisp_vport(vport
);
405 lisp_port
->dst_port
= htons(dst_port
);
406 strncpy(lisp_port
->name
, parms
->name
, IFNAMSIZ
);
408 err
= lisp_socket_init(lisp_port
, net
);
412 list_add_tail_rcu(&lisp_port
->list
, &lisp_ports
);
416 ovs_vport_free(vport
);
421 static bool need_linearize(const struct sk_buff
*skb
)
425 if (unlikely(skb_shinfo(skb
)->frag_list
))
429 * Generally speaking we should linearize if there are paged frags.
430 * However, if all of the refcounts are 1 we know nobody else can
431 * change them from underneath us and we can skip the linearization.
433 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++)
434 if (unlikely(page_count(skb_frag_page(&skb_shinfo(skb
)->frags
[i
])) > 1))
440 static struct sk_buff
*handle_offloads(struct sk_buff
*skb
)
444 forward_ip_summed(skb
, true);
447 if (skb_is_gso(skb
)) {
448 struct sk_buff
*nskb
;
449 char cb
[sizeof(skb
->cb
)];
451 memcpy(cb
, skb
->cb
, sizeof(cb
));
453 nskb
= __skb_gso_segment(skb
, 0, false);
462 memcpy(nskb
->cb
, cb
, sizeof(cb
));
465 } else if (get_ip_summed(skb
) == OVS_CSUM_PARTIAL
) {
466 /* Pages aren't locked and could change at any time.
467 * If this happens after we compute the checksum, the
468 * checksum will be wrong. We linearize now to avoid
471 if (unlikely(need_linearize(skb
))) {
472 err
= __skb_linearize(skb
);
477 err
= skb_checksum_help(skb
);
482 set_ip_summed(skb
, OVS_CSUM_NONE
);
490 static int ovs_tnl_send(struct vport
*vport
, struct sk_buff
*skb
,
491 u8 ipproto
, int tunnel_hlen
,
492 void (*build_header
)(const struct vport
*,
501 struct sk_buff
*nskb
;
504 saddr
= OVS_CB(skb
)->tun_key
->ipv4_src
;
505 rt
= find_route(ovs_dp_get_net(vport
->dp
),
507 OVS_CB(skb
)->tun_key
->ipv4_dst
,
509 OVS_CB(skb
)->tun_key
->ipv4_tos
,
516 tunnel_hlen
+= sizeof(struct iphdr
);
518 min_headroom
= LL_RESERVED_SPACE(rt_dst(rt
).dev
) + rt_dst(rt
).header_len
520 + (vlan_tx_tag_present(skb
) ? VLAN_HLEN
: 0);
522 if (skb_headroom(skb
) < min_headroom
|| skb_header_cloned(skb
)) {
523 int head_delta
= SKB_DATA_ALIGN(min_headroom
-
527 err
= pskb_expand_head(skb
, max_t(int, head_delta
, 0),
534 nskb
= handle_offloads(skb
);
545 skb_clear_rxhash(skb
);
548 struct sk_buff
*next_skb
= skb
->next
;
554 if (unlikely(vlan_deaccel_tag(skb
)))
558 skb_push(skb
, tunnel_hlen
);
559 skb_reset_network_header(skb
);
560 skb_set_transport_header(skb
, sizeof(struct iphdr
));
563 skb_dst_set(skb
, dst_clone(&rt_dst(rt
)));
565 skb_dst_set(skb
, &rt_dst(rt
));
567 /* Push Tunnel header. */
568 build_header(vport
, skb
, tunnel_hlen
);
570 /* Push IP header. */
573 iph
->ihl
= sizeof(struct iphdr
) >> 2;
574 iph
->protocol
= ipproto
;
575 iph
->daddr
= OVS_CB(skb
)->tun_key
->ipv4_dst
;
577 iph
->tos
= OVS_CB(skb
)->tun_key
->ipv4_tos
;
578 iph
->ttl
= OVS_CB(skb
)->tun_key
->ipv4_ttl
;
579 iph
->frag_off
= OVS_CB(skb
)->tun_key
->tun_flags
&
580 TUNNEL_DONT_FRAGMENT
? htons(IP_DF
) : 0;
582 * Allow our local IP stack to fragment the outer packet even
583 * if the DF bit is set as a last resort. We also need to
584 * force selection of an IP ID here with __ip_select_ident(),
585 * as ip_select_ident() assumes a proper ID is not needed when
586 * when the DF bit is set.
589 __ip_select_ident(iph
, skb_dst(skb
), 0);
591 memset(IPCB(skb
), 0, sizeof(*IPCB(skb
)));
593 err
= ip_local_out(skb
);
594 if (unlikely(net_xmit_eval(err
)))
597 sent_len
+= frag_len
;
611 static int lisp_tnl_send(struct vport
*vport
, struct sk_buff
*skb
)
614 int network_offset
= skb_network_offset(skb
);
616 if (unlikely(!OVS_CB(skb
)->tun_key
))
619 /* We only encapsulate IPv4 and IPv6 packets */
620 switch (skb
->protocol
) {
621 case htons(ETH_P_IP
):
622 case htons(ETH_P_IPV6
):
623 /* Pop off "inner" Ethernet header */
624 skb_pull(skb
, network_offset
);
625 tnl_len
= ovs_tnl_send(vport
, skb
, IPPROTO_UDP
,
626 LISP_HLEN
, lisp_build_header
);
627 return tnl_len
> 0 ? tnl_len
+ network_offset
: tnl_len
;
634 static const char *lisp_get_name(const struct vport
*vport
)
636 struct lisp_port
*lisp_port
= lisp_vport(vport
);
637 return lisp_port
->name
;
640 const struct vport_ops ovs_lisp_vport_ops
= {
641 .type
= OVS_VPORT_TYPE_LISP
,
642 .create
= lisp_tnl_create
,
643 .destroy
= lisp_tnl_destroy
,
644 .get_name
= lisp_get_name
,
645 .get_options
= lisp_get_options
,
646 .send
= lisp_tnl_send
,