]> git.proxmox.com Git - mirror_ovs.git/blob - datapath/vport-lisp.c
datapath: Remove skb->mark compat code.
[mirror_ovs.git] / datapath / vport-lisp.c
1 /*
2 * Copyright (c) 2011 Nicira, Inc.
3 * Copyright (c) 2013 Cisco Systems, Inc.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
17 * 02110-1301, USA
18 */
19
20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
22 #include <linux/version.h>
23
24 #include <linux/in.h>
25 #include <linux/ip.h>
26 #include <linux/net.h>
27 #include <linux/rculist.h>
28 #include <linux/udp.h>
29
30 #include <net/icmp.h>
31 #include <net/ip.h>
32 #include <net/route.h>
33 #include <net/udp.h>
34 #include <net/xfrm.h>
35
36 #include "datapath.h"
37 #include "vport.h"
38
39 /*
40 * LISP encapsulation header:
41 *
42 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
43 * |N|L|E|V|I|flags| Nonce/Map-Version |
44 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
45 * | Instance ID/Locator Status Bits |
46 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
47 *
48 */
49
50 /**
51 * struct lisphdr - LISP header
52 * @nonce_present: Flag indicating the presence of a 24 bit nonce value.
53 * @locator_status_bits_present: Flag indicating the presence of Locator Status
54 * Bits (LSB).
55 * @solicit_echo_nonce: Flag indicating the use of the echo noncing mechanism.
56 * @map_version_present: Flag indicating the use of mapping versioning.
57 * @instance_id_present: Flag indicating the presence of a 24 bit Instance ID.
58 * @reserved_flags: 3 bits reserved for future flags.
59 * @nonce: 24 bit nonce value.
60 * @map_version: 24 bit mapping version.
61 * @locator_status_bits: Locator Status Bits: 32 bits when instance_id_present
62 * is not set, 8 bits when it is.
63 * @instance_id: 24 bit Instance ID
64 */
65 struct lisphdr {
66 #ifdef __LITTLE_ENDIAN_BITFIELD
67 __u8 reserved_flags:3;
68 __u8 instance_id_present:1;
69 __u8 map_version_present:1;
70 __u8 solicit_echo_nonce:1;
71 __u8 locator_status_bits_present:1;
72 __u8 nonce_present:1;
73 #else
74 __u8 nonce_present:1;
75 __u8 locator_status_bits_present:1;
76 __u8 solicit_echo_nonce:1;
77 __u8 map_version_present:1;
78 __u8 instance_id_present:1;
79 __u8 reserved_flags:3;
80 #endif
81 union {
82 __u8 nonce[3];
83 __u8 map_version[3];
84 } u1;
85 union {
86 __be32 locator_status_bits;
87 struct {
88 __u8 instance_id[3];
89 __u8 locator_status_bits;
90 } word2;
91 } u2;
92 };
93
94 #define LISP_HLEN (sizeof(struct udphdr) + sizeof(struct lisphdr))
95
96 /**
97 * struct lisp_port - Keeps track of open UDP ports
98 * @dst_port: lisp UDP port no.
99 * @list: list element in @lisp_ports.
100 * @lisp_rcv_socket: The socket created for this port number.
101 * @name: vport name.
102 */
103 struct lisp_port {
104 __be16 dst_port;
105 struct list_head list;
106 struct socket *lisp_rcv_socket;
107 char name[IFNAMSIZ];
108 };
109
110 static LIST_HEAD(lisp_ports);
111
112 static inline struct lisp_port *lisp_vport(const struct vport *vport)
113 {
114 return vport_priv(vport);
115 }
116
117 static struct lisp_port *lisp_find_port(struct net *net, __be16 port)
118 {
119 struct lisp_port *lisp_port;
120
121 list_for_each_entry_rcu(lisp_port, &lisp_ports, list) {
122 if (lisp_port->dst_port == port &&
123 net_eq(sock_net(lisp_port->lisp_rcv_socket->sk), net))
124 return lisp_port;
125 }
126
127 return NULL;
128 }
129
130 static inline struct lisphdr *lisp_hdr(const struct sk_buff *skb)
131 {
132 return (struct lisphdr *)(udp_hdr(skb) + 1);
133 }
134
135 /* Convert 64 bit tunnel ID to 24 bit Instance ID. */
136 static void tunnel_id_to_instance_id(__be64 tun_id, __u8 *iid)
137 {
138
139 #ifdef __BIG_ENDIAN
140 iid[0] = (__force __u8)(tun_id >> 16);
141 iid[1] = (__force __u8)(tun_id >> 8);
142 iid[2] = (__force __u8)tun_id;
143 #else
144 iid[0] = (__force __u8)((__force u64)tun_id >> 40);
145 iid[1] = (__force __u8)((__force u64)tun_id >> 48);
146 iid[2] = (__force __u8)((__force u64)tun_id >> 56);
147 #endif
148 }
149
150 /* Convert 24 bit Instance ID to 64 bit tunnel ID. */
151 static __be64 instance_id_to_tunnel_id(__u8 *iid)
152 {
153 #ifdef __BIG_ENDIAN
154 return (iid[0] << 16) | (iid[1] << 8) | iid[2];
155 #else
156 return (__force __be64)(((__force u64)iid[0] << 40) |
157 ((__force u64)iid[1] << 48) |
158 ((__force u64)iid[2] << 56));
159 #endif
160 }
161
162 /* Compute source UDP port for outgoing packet.
163 * Currently we use the flow hash.
164 */
165 static u16 ovs_tnl_get_src_port(struct sk_buff *skb)
166 {
167 int low;
168 int high;
169 unsigned int range;
170 struct sw_flow_key *pkt_key = OVS_CB(skb)->pkt_key;
171 u32 hash = jhash2((const u32 *)pkt_key,
172 sizeof(*pkt_key) / sizeof(u32), 0);
173
174 inet_get_local_port_range(&low, &high);
175 range = (high - low) + 1;
176 return (((u64) hash * range) >> 32) + low;
177 }
178
179 static void lisp_build_header(const struct vport *vport,
180 struct sk_buff *skb,
181 int tunnel_hlen)
182 {
183 struct lisp_port *lisp_port = lisp_vport(vport);
184 struct udphdr *udph = udp_hdr(skb);
185 struct lisphdr *lisph = (struct lisphdr *)(udph + 1);
186 const struct ovs_key_ipv4_tunnel *tun_key = OVS_CB(skb)->tun_key;
187
188 udph->dest = lisp_port->dst_port;
189 udph->source = htons(ovs_tnl_get_src_port(skb));
190 udph->check = 0;
191 udph->len = htons(skb->len - skb_transport_offset(skb));
192
193 lisph->nonce_present = 0; /* We don't support echo nonce algorithm */
194 lisph->locator_status_bits_present = 1; /* Set LSB */
195 lisph->solicit_echo_nonce = 0; /* No echo noncing */
196 lisph->map_version_present = 0; /* No mapping versioning, nonce instead */
197 lisph->instance_id_present = 1; /* Store the tun_id as Instance ID */
198 lisph->reserved_flags = 0; /* Reserved flags, set to 0 */
199
200 lisph->u1.nonce[0] = 0;
201 lisph->u1.nonce[1] = 0;
202 lisph->u1.nonce[2] = 0;
203
204 tunnel_id_to_instance_id(tun_key->tun_id, &lisph->u2.word2.instance_id[0]);
205 lisph->u2.word2.locator_status_bits = 1;
206 }
207
208 /**
209 * ovs_tnl_rcv - ingress point for generic tunnel code
210 *
211 * @vport: port this packet was received on
212 * @skb: received packet
213 * @tun_key: tunnel that carried packet
214 *
215 * Must be called with rcu_read_lock.
216 *
217 * Packets received by this function are in the following state:
218 * - skb->data points to the inner Ethernet header.
219 * - The inner Ethernet header is in the linear data area.
220 * - The layer pointers are undefined.
221 */
222 static void ovs_tnl_rcv(struct vport *vport, struct sk_buff *skb,
223 struct ovs_key_ipv4_tunnel *tun_key)
224 {
225 struct ethhdr *eh;
226
227 skb_reset_mac_header(skb);
228 eh = eth_hdr(skb);
229
230 if (likely(ntohs(eh->h_proto) >= ETH_P_802_3_MIN))
231 skb->protocol = eh->h_proto;
232 else
233 skb->protocol = htons(ETH_P_802_2);
234
235 skb_dst_drop(skb);
236 nf_reset(skb);
237 skb_clear_rxhash(skb);
238 secpath_reset(skb);
239 vlan_set_tci(skb, 0);
240
241 if (unlikely(compute_ip_summed(skb, false))) {
242 kfree_skb(skb);
243 return;
244 }
245
246 ovs_vport_receive(vport, skb, tun_key);
247 }
248
249 /* Called with rcu_read_lock and BH disabled. */
250 static int lisp_rcv(struct sock *sk, struct sk_buff *skb)
251 {
252 struct lisp_port *lisp_port;
253 struct lisphdr *lisph;
254 struct iphdr *iph, *inner_iph;
255 struct ovs_key_ipv4_tunnel tun_key;
256 __be64 key;
257 struct ethhdr *ethh;
258 __be16 protocol;
259
260 lisp_port = lisp_find_port(dev_net(skb->dev), udp_hdr(skb)->dest);
261 if (unlikely(!lisp_port))
262 goto error;
263
264 if (unlikely(!pskb_may_pull(skb, LISP_HLEN)))
265 goto error;
266
267 lisph = lisp_hdr(skb);
268
269 skb_pull_rcsum(skb, LISP_HLEN);
270
271 if (lisph->instance_id_present != 1)
272 key = 0;
273 else
274 key = instance_id_to_tunnel_id(&lisph->u2.word2.instance_id[0]);
275
276 /* Save outer tunnel values */
277 iph = ip_hdr(skb);
278 ovs_flow_tun_key_init(&tun_key, iph, key, TUNNEL_KEY);
279
280 /* Drop non-IP inner packets */
281 inner_iph = (struct iphdr *)(lisph + 1);
282 switch (inner_iph->version) {
283 case 4:
284 protocol = htons(ETH_P_IP);
285 break;
286 case 6:
287 protocol = htons(ETH_P_IPV6);
288 break;
289 default:
290 goto error;
291 }
292
293 /* Add Ethernet header */
294 ethh = (struct ethhdr *)skb_push(skb, ETH_HLEN);
295 memset(ethh, 0, ETH_HLEN);
296 ethh->h_dest[0] = 0x02;
297 ethh->h_source[0] = 0x02;
298 ethh->h_proto = protocol;
299
300 ovs_skb_postpush_rcsum(skb, skb->data, ETH_HLEN);
301
302 ovs_tnl_rcv(vport_from_priv(lisp_port), skb, &tun_key);
303 goto out;
304
305 error:
306 kfree_skb(skb);
307 out:
308 return 0;
309 }
310
311 /* Arbitrary value. Irrelevant as long as it's not 0 since we set the handler. */
312 #define UDP_ENCAP_LISP 1
313 static int lisp_socket_init(struct lisp_port *lisp_port, struct net *net)
314 {
315 struct sockaddr_in sin;
316 int err;
317
318 err = sock_create_kern(AF_INET, SOCK_DGRAM, 0,
319 &lisp_port->lisp_rcv_socket);
320 if (err)
321 goto error;
322
323 /* release net ref. */
324 sk_change_net(lisp_port->lisp_rcv_socket->sk, net);
325
326 sin.sin_family = AF_INET;
327 sin.sin_addr.s_addr = htonl(INADDR_ANY);
328 sin.sin_port = lisp_port->dst_port;
329
330 err = kernel_bind(lisp_port->lisp_rcv_socket, (struct sockaddr *)&sin,
331 sizeof(struct sockaddr_in));
332 if (err)
333 goto error_sock;
334
335 udp_sk(lisp_port->lisp_rcv_socket->sk)->encap_type = UDP_ENCAP_LISP;
336 udp_sk(lisp_port->lisp_rcv_socket->sk)->encap_rcv = lisp_rcv;
337
338 udp_encap_enable();
339
340 return 0;
341
342 error_sock:
343 sk_release_kernel(lisp_port->lisp_rcv_socket->sk);
344 error:
345 pr_warn("cannot register lisp protocol handler: %d\n", err);
346 return err;
347 }
348
349 static int lisp_get_options(const struct vport *vport, struct sk_buff *skb)
350 {
351 struct lisp_port *lisp_port = lisp_vport(vport);
352
353 if (nla_put_u16(skb, OVS_TUNNEL_ATTR_DST_PORT, ntohs(lisp_port->dst_port)))
354 return -EMSGSIZE;
355 return 0;
356 }
357
358 static void lisp_tnl_destroy(struct vport *vport)
359 {
360 struct lisp_port *lisp_port = lisp_vport(vport);
361
362 list_del_rcu(&lisp_port->list);
363 /* Release socket */
364 sk_release_kernel(lisp_port->lisp_rcv_socket->sk);
365
366 ovs_vport_deferred_free(vport);
367 }
368
369 static struct vport *lisp_tnl_create(const struct vport_parms *parms)
370 {
371 struct net *net = ovs_dp_get_net(parms->dp);
372 struct nlattr *options = parms->options;
373 struct lisp_port *lisp_port;
374 struct vport *vport;
375 struct nlattr *a;
376 int err;
377 u16 dst_port;
378
379 if (!options) {
380 err = -EINVAL;
381 goto error;
382 }
383
384 a = nla_find_nested(options, OVS_TUNNEL_ATTR_DST_PORT);
385 if (a && nla_len(a) == sizeof(u16)) {
386 dst_port = nla_get_u16(a);
387 } else {
388 /* Require destination port from userspace. */
389 err = -EINVAL;
390 goto error;
391 }
392
393 /* Verify if we already have a socket created for this port */
394 if (lisp_find_port(net, htons(dst_port))) {
395 err = -EEXIST;
396 goto error;
397 }
398
399 vport = ovs_vport_alloc(sizeof(struct lisp_port),
400 &ovs_lisp_vport_ops, parms);
401 if (IS_ERR(vport))
402 return vport;
403
404 lisp_port = lisp_vport(vport);
405 lisp_port->dst_port = htons(dst_port);
406 strncpy(lisp_port->name, parms->name, IFNAMSIZ);
407
408 err = lisp_socket_init(lisp_port, net);
409 if (err)
410 goto error_free;
411
412 list_add_tail_rcu(&lisp_port->list, &lisp_ports);
413 return vport;
414
415 error_free:
416 ovs_vport_free(vport);
417 error:
418 return ERR_PTR(err);
419 }
420
421 static bool need_linearize(const struct sk_buff *skb)
422 {
423 int i;
424
425 if (unlikely(skb_shinfo(skb)->frag_list))
426 return true;
427
428 /*
429 * Generally speaking we should linearize if there are paged frags.
430 * However, if all of the refcounts are 1 we know nobody else can
431 * change them from underneath us and we can skip the linearization.
432 */
433 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
434 if (unlikely(page_count(skb_frag_page(&skb_shinfo(skb)->frags[i])) > 1))
435 return true;
436
437 return false;
438 }
439
440 static struct sk_buff *handle_offloads(struct sk_buff *skb)
441 {
442 int err;
443
444 forward_ip_summed(skb, true);
445
446
447 if (skb_is_gso(skb)) {
448 struct sk_buff *nskb;
449 char cb[sizeof(skb->cb)];
450
451 memcpy(cb, skb->cb, sizeof(cb));
452
453 nskb = __skb_gso_segment(skb, 0, false);
454 if (IS_ERR(nskb)) {
455 err = PTR_ERR(nskb);
456 goto error;
457 }
458
459 consume_skb(skb);
460 skb = nskb;
461 while (nskb) {
462 memcpy(nskb->cb, cb, sizeof(cb));
463 nskb = nskb->next;
464 }
465 } else if (get_ip_summed(skb) == OVS_CSUM_PARTIAL) {
466 /* Pages aren't locked and could change at any time.
467 * If this happens after we compute the checksum, the
468 * checksum will be wrong. We linearize now to avoid
469 * this problem.
470 */
471 if (unlikely(need_linearize(skb))) {
472 err = __skb_linearize(skb);
473 if (unlikely(err))
474 goto error;
475 }
476
477 err = skb_checksum_help(skb);
478 if (unlikely(err))
479 goto error;
480 }
481
482 set_ip_summed(skb, OVS_CSUM_NONE);
483
484 return skb;
485
486 error:
487 return ERR_PTR(err);
488 }
489
490 static int ovs_tnl_send(struct vport *vport, struct sk_buff *skb,
491 u8 ipproto, int tunnel_hlen,
492 void (*build_header)(const struct vport *,
493 struct sk_buff *,
494 int tunnel_hlen))
495 {
496 int min_headroom;
497 struct rtable *rt;
498 __be32 saddr;
499 int sent_len = 0;
500 int err;
501 struct sk_buff *nskb;
502
503 /* Route lookup */
504 saddr = OVS_CB(skb)->tun_key->ipv4_src;
505 rt = find_route(ovs_dp_get_net(vport->dp),
506 &saddr,
507 OVS_CB(skb)->tun_key->ipv4_dst,
508 ipproto,
509 OVS_CB(skb)->tun_key->ipv4_tos,
510 skb->mark);
511 if (IS_ERR(rt)) {
512 err = PTR_ERR(rt);
513 goto error;
514 }
515
516 tunnel_hlen += sizeof(struct iphdr);
517
518 min_headroom = LL_RESERVED_SPACE(rt_dst(rt).dev) + rt_dst(rt).header_len
519 + tunnel_hlen
520 + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);
521
522 if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) {
523 int head_delta = SKB_DATA_ALIGN(min_headroom -
524 skb_headroom(skb) +
525 16);
526
527 err = pskb_expand_head(skb, max_t(int, head_delta, 0),
528 0, GFP_ATOMIC);
529 if (unlikely(err))
530 goto err_free_rt;
531 }
532
533 /* Offloading */
534 nskb = handle_offloads(skb);
535 if (IS_ERR(nskb)) {
536 err = PTR_ERR(nskb);
537 goto err_free_rt;
538 }
539 skb = nskb;
540
541 /* Reset SKB */
542 nf_reset(skb);
543 secpath_reset(skb);
544 skb_dst_drop(skb);
545 skb_clear_rxhash(skb);
546
547 while (skb) {
548 struct sk_buff *next_skb = skb->next;
549 struct iphdr *iph;
550 int frag_len;
551
552 skb->next = NULL;
553
554 if (unlikely(vlan_deaccel_tag(skb)))
555 goto next;
556
557 frag_len = skb->len;
558 skb_push(skb, tunnel_hlen);
559 skb_reset_network_header(skb);
560 skb_set_transport_header(skb, sizeof(struct iphdr));
561
562 if (next_skb)
563 skb_dst_set(skb, dst_clone(&rt_dst(rt)));
564 else
565 skb_dst_set(skb, &rt_dst(rt));
566
567 /* Push Tunnel header. */
568 build_header(vport, skb, tunnel_hlen);
569
570 /* Push IP header. */
571 iph = ip_hdr(skb);
572 iph->version = 4;
573 iph->ihl = sizeof(struct iphdr) >> 2;
574 iph->protocol = ipproto;
575 iph->daddr = OVS_CB(skb)->tun_key->ipv4_dst;
576 iph->saddr = saddr;
577 iph->tos = OVS_CB(skb)->tun_key->ipv4_tos;
578 iph->ttl = OVS_CB(skb)->tun_key->ipv4_ttl;
579 iph->frag_off = OVS_CB(skb)->tun_key->tun_flags &
580 TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
581 /*
582 * Allow our local IP stack to fragment the outer packet even
583 * if the DF bit is set as a last resort. We also need to
584 * force selection of an IP ID here with __ip_select_ident(),
585 * as ip_select_ident() assumes a proper ID is not needed when
586 * when the DF bit is set.
587 */
588 skb->local_df = 1;
589 __ip_select_ident(iph, skb_dst(skb), 0);
590
591 memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
592
593 err = ip_local_out(skb);
594 if (unlikely(net_xmit_eval(err)))
595 goto next;
596
597 sent_len += frag_len;
598
599 next:
600 skb = next_skb;
601 }
602
603 return sent_len;
604
605 err_free_rt:
606 ip_rt_put(rt);
607 error:
608 return err;
609 }
610
611 static int lisp_tnl_send(struct vport *vport, struct sk_buff *skb)
612 {
613 int tnl_len;
614 int network_offset = skb_network_offset(skb);
615
616 if (unlikely(!OVS_CB(skb)->tun_key))
617 return -EINVAL;
618
619 /* We only encapsulate IPv4 and IPv6 packets */
620 switch (skb->protocol) {
621 case htons(ETH_P_IP):
622 case htons(ETH_P_IPV6):
623 /* Pop off "inner" Ethernet header */
624 skb_pull(skb, network_offset);
625 tnl_len = ovs_tnl_send(vport, skb, IPPROTO_UDP,
626 LISP_HLEN, lisp_build_header);
627 return tnl_len > 0 ? tnl_len + network_offset : tnl_len;
628 default:
629 kfree_skb(skb);
630 return 0;
631 }
632 }
633
634 static const char *lisp_get_name(const struct vport *vport)
635 {
636 struct lisp_port *lisp_port = lisp_vport(vport);
637 return lisp_port->name;
638 }
639
640 const struct vport_ops ovs_lisp_vport_ops = {
641 .type = OVS_VPORT_TYPE_LISP,
642 .create = lisp_tnl_create,
643 .destroy = lisp_tnl_destroy,
644 .get_name = lisp_get_name,
645 .get_options = lisp_get_options,
646 .send = lisp_tnl_send,
647 };