]> git.proxmox.com Git - ovs.git/blob - datapath/linux/compat/include/net/ip_tunnels.h
compat: Substitute more dependable define
[ovs.git] / datapath / linux / compat / include / net / ip_tunnels.h
1 #ifndef __NET_IP_TUNNELS_WRAPPER_H
2 #define __NET_IP_TUNNELS_WRAPPER_H 1
3
4 #include <linux/version.h>
5
6 #ifdef USE_UPSTREAM_TUNNEL
7 /* Block all ip_tunnel functions.
8 * Only function that do not depend on ip_tunnel structure can
9 * be used. Those needs to be explicitly defined in this header file. */
10 #include_next <net/ip_tunnels.h>
11
12 #ifndef TUNNEL_ERSPAN_OPT
13 #define TUNNEL_ERSPAN_OPT __cpu_to_be16(0x4000)
14 #endif
15 #define ovs_ip_tunnel_encap ip_tunnel_encap
16 #else
17
18 #include <linux/if_tunnel.h>
19 #include <linux/types.h>
20 #include <net/dsfield.h>
21 #include <net/dst_cache.h>
22 #include <net/flow.h>
23 #include <net/inet_ecn.h>
24 #include <net/ip.h>
25 #include <net/rtnetlink.h>
26 #include <net/gro_cells.h>
27
28 #ifndef MAX_IPTUN_ENCAP_OPS
29 #define MAX_IPTUN_ENCAP_OPS 8
30 #endif
31
32 #ifndef HAVE_TUNNEL_ENCAP_TYPES
33 enum tunnel_encap_types {
34 TUNNEL_ENCAP_NONE,
35 TUNNEL_ENCAP_FOU,
36 TUNNEL_ENCAP_GUE,
37 };
38
39 #define HAVE_TUNNEL_ENCAP_TYPES 1
40 #endif
41
42 #define __iptunnel_pull_header rpl___iptunnel_pull_header
43 int rpl___iptunnel_pull_header(struct sk_buff *skb, int hdr_len,
44 __be16 inner_proto, bool raw_proto, bool xnet);
45
46 #define iptunnel_pull_header rpl_iptunnel_pull_header
47 static inline int rpl_iptunnel_pull_header(struct sk_buff *skb, int hdr_len,
48 __be16 inner_proto, bool xnet)
49 {
50 return rpl___iptunnel_pull_header(skb, hdr_len, inner_proto, false, xnet);
51 }
52
53 int ovs_iptunnel_handle_offloads(struct sk_buff *skb,
54 int gso_type_mask,
55 void (*fix_segment)(struct sk_buff *));
56
57 /* This is required to compile upstream gre.h. gre_handle_offloads()
58 * is defined in gre.h and needs iptunnel_handle_offloads(). This provides
59 * default signature for this function.
60 * rpl prefix is to make OVS build happy.
61 */
62 #define iptunnel_handle_offloads rpl_iptunnel_handle_offloads
63 #if LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0)
64 struct sk_buff *rpl_iptunnel_handle_offloads(struct sk_buff *skb,
65 bool csum_help,
66 int gso_type_mask);
67 #else
68 int rpl_iptunnel_handle_offloads(struct sk_buff *skb,
69 bool csum_help,
70 int gso_type_mask);
71 #endif
72
73 #define iptunnel_xmit rpl_iptunnel_xmit
74 void rpl_iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
75 __be32 src, __be32 dst, __u8 proto, __u8 tos, __u8 ttl,
76 __be16 df, bool xnet);
77 #define ip_tunnel_xmit rpl_ip_tunnel_xmit
78 void rpl_ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
79 const struct iphdr *tnl_params, const u8 protocol);
80
81
82 #ifndef TUNNEL_CSUM
83 #define TUNNEL_CSUM __cpu_to_be16(0x01)
84 #define TUNNEL_ROUTING __cpu_to_be16(0x02)
85 #define TUNNEL_KEY __cpu_to_be16(0x04)
86 #define TUNNEL_SEQ __cpu_to_be16(0x08)
87 #define TUNNEL_STRICT __cpu_to_be16(0x10)
88 #define TUNNEL_REC __cpu_to_be16(0x20)
89 #define TUNNEL_VERSION __cpu_to_be16(0x40)
90 #define TUNNEL_NO_KEY __cpu_to_be16(0x80)
91 #define TUNNEL_DONT_FRAGMENT __cpu_to_be16(0x0100)
92 #define TUNNEL_OAM __cpu_to_be16(0x0200)
93 #define TUNNEL_CRIT_OPT __cpu_to_be16(0x0400)
94 #define TUNNEL_GENEVE_OPT __cpu_to_be16(0x0800)
95 #define TUNNEL_VXLAN_OPT __cpu_to_be16(0x1000)
96 #define TUNNEL_NOCACHE __cpu_to_be16(0x2000)
97 #define TUNNEL_ERSPAN_OPT __cpu_to_be16(0x4000)
98
99 #undef TUNNEL_OPTIONS_PRESENT
100 #define TUNNEL_OPTIONS_PRESENT \
101 (TUNNEL_GENEVE_OPT | TUNNEL_VXLAN_OPT | TUNNEL_ERSPAN_OPT)
102
103 struct tnl_ptk_info {
104 __be16 flags;
105 __be16 proto;
106 __be32 key;
107 __be32 seq;
108 int hdr_len;
109 };
110
111 #define PACKET_RCVD 0
112 #define PACKET_REJECT 1
113 #define PACKET_NEXT 2
114 #endif
115
116 #define IP_TNL_HASH_BITS 7
117 #define IP_TNL_HASH_SIZE (1 << IP_TNL_HASH_BITS)
118
119 /* Keep error state on tunnel for 30 sec */
120 #define IPTUNNEL_ERR_TIMEO (30*HZ)
121
122 /* Used to memset ip_tunnel padding. */
123 #define IP_TUNNEL_KEY_SIZE offsetofend(struct ip_tunnel_key, tp_dst)
124
125 /* Used to memset ipv4 address padding. */
126 #define IP_TUNNEL_KEY_IPV4_PAD offsetofend(struct ip_tunnel_key, u.ipv4.dst)
127 #define IP_TUNNEL_KEY_IPV4_PAD_LEN \
128 (FIELD_SIZEOF(struct ip_tunnel_key, u) - \
129 FIELD_SIZEOF(struct ip_tunnel_key, u.ipv4))
130
131 struct ip_tunnel_key {
132 __be64 tun_id;
133 union {
134 struct {
135 __be32 src;
136 __be32 dst;
137 } ipv4;
138 struct {
139 struct in6_addr src;
140 struct in6_addr dst;
141 } ipv6;
142 } u;
143 __be16 tun_flags;
144 u8 tos; /* TOS for IPv4, TC for IPv6 */
145 u8 ttl; /* TTL for IPv4, HL for IPv6 */
146 __be32 label; /* Flow Label for IPv6 */
147 __be16 tp_src;
148 __be16 tp_dst;
149 };
150
151 /* Flags for ip_tunnel_info mode. */
152 #define IP_TUNNEL_INFO_TX 0x01 /* represents tx tunnel parameters */
153 #define IP_TUNNEL_INFO_IPV6 0x02 /* key contains IPv6 addresses */
154
155 struct ip_tunnel_info {
156 struct ip_tunnel_key key;
157 struct dst_cache dst_cache;
158 u8 options_len;
159 u8 mode;
160 };
161
162 /* 6rd prefix/relay information */
163 #ifdef CONFIG_IPV6_SIT_6RD
164 struct ip_tunnel_6rd_parm {
165 struct in6_addr prefix;
166 __be32 relay_prefix;
167 u16 prefixlen;
168 u16 relay_prefixlen;
169 };
170 #endif
171
172 struct ip_tunnel_encap {
173 u16 type;
174 u16 flags;
175 __be16 sport;
176 __be16 dport;
177 };
178
179 struct ip_tunnel_prl_entry {
180 struct ip_tunnel_prl_entry __rcu *next;
181 __be32 addr;
182 u16 flags;
183 struct rcu_head rcu_head;
184 };
185
186 static inline unsigned short ip_tunnel_info_af(const struct ip_tunnel_info *tun_info)
187 {
188 return tun_info->mode & IP_TUNNEL_INFO_IPV6 ? AF_INET6 : AF_INET;
189 }
190
191 static inline void *ip_tunnel_info_opts(struct ip_tunnel_info *info)
192 {
193 return info + 1;
194 }
195
196 static inline void ip_tunnel_info_opts_get(void *to,
197 const struct ip_tunnel_info *info)
198 {
199 memcpy(to, info + 1, info->options_len);
200 }
201
202 static inline void ip_tunnel_info_opts_set(struct ip_tunnel_info *info,
203 const void *from, int len)
204 {
205 memcpy(ip_tunnel_info_opts(info), from, len);
206 info->options_len = len;
207 }
208
209 static inline void ip_tunnel_key_init(struct ip_tunnel_key *key,
210 __be32 saddr, __be32 daddr,
211 u8 tos, u8 ttl, __be32 label,
212 __be16 tp_src, __be16 tp_dst,
213 __be64 tun_id, __be16 tun_flags)
214 {
215 key->tun_id = tun_id;
216 key->u.ipv4.src = saddr;
217 key->u.ipv4.dst = daddr;
218 memset((unsigned char *)key + IP_TUNNEL_KEY_IPV4_PAD,
219 0, IP_TUNNEL_KEY_IPV4_PAD_LEN);
220 key->tos = tos;
221 key->ttl = ttl;
222 key->label = label;
223 key->tun_flags = tun_flags;
224
225 /* For the tunnel types on the top of IPsec, the tp_src and tp_dst of
226 * the upper tunnel are used.
227 * E.g: GRE over IPSEC, the tp_src and tp_port are zero.
228 */
229 key->tp_src = tp_src;
230 key->tp_dst = tp_dst;
231
232 /* Clear struct padding. */
233 if (sizeof(*key) != IP_TUNNEL_KEY_SIZE)
234 memset((unsigned char *)key + IP_TUNNEL_KEY_SIZE,
235 0, sizeof(*key) - IP_TUNNEL_KEY_SIZE);
236 }
237
238 #define ip_tunnel_collect_metadata() true
239
240 #undef TUNNEL_NOCACHE
241 #define TUNNEL_NOCACHE 0
242
243 static inline bool
244 ip_tunnel_dst_cache_usable(const struct sk_buff *skb,
245 const struct ip_tunnel_info *info)
246 {
247 if (skb->mark)
248 return false;
249 if (!info)
250 return true;
251 if (info->key.tun_flags & TUNNEL_NOCACHE)
252 return false;
253
254 return true;
255 }
256
257 #define ip_tunnel_dst rpl_ip_tunnel_dst
258 struct rpl_ip_tunnel_dst {
259 struct dst_entry __rcu *dst;
260 __be32 saddr;
261 };
262
263 #define ip_tunnel rpl_ip_tunnel
264 struct rpl_ip_tunnel {
265 struct ip_tunnel __rcu *next;
266 struct hlist_node hash_node;
267 struct net_device *dev;
268 struct net *net; /* netns for packet i/o */
269
270 unsigned long err_time; /* Time when the last ICMP error
271 * arrived */
272 int err_count; /* Number of arrived ICMP errors */
273
274 /* These four fields used only by GRE */
275 u32 i_seqno; /* The last seen seqno */
276 u32 o_seqno; /* The last output seqno */
277 int tun_hlen; /* Precalculated header length */
278
279 /* These four fields used only by ERSPAN */
280 u32 index; /* ERSPAN type II index */
281 u8 erspan_ver; /* ERSPAN version */
282 u8 dir; /* ERSPAN direction */
283 u16 hwid; /* ERSPAN hardware ID */
284
285 struct dst_cache dst_cache;
286
287 struct ip_tunnel_parm parms;
288
289 int mlink;
290 int encap_hlen; /* Encap header length (FOU,GUE) */
291 int hlen; /* tun_hlen + encap_hlen */
292 struct ip_tunnel_encap encap;
293
294 /* for SIT */
295 #ifdef CONFIG_IPV6_SIT_6RD
296 struct ip_tunnel_6rd_parm ip6rd;
297 #endif
298 struct ip_tunnel_prl_entry __rcu *prl; /* potential router list */
299 unsigned int prl_count; /* # of entries in PRL */
300 unsigned int ip_tnl_net_id;
301 struct gro_cells gro_cells;
302 __u32 fwmark;
303 bool collect_md;
304 bool ignore_df;
305 };
306
307 #define ip_tunnel_net rpl_ip_tunnel_net
308 struct rpl_ip_tunnel_net {
309 struct net_device *fb_tunnel_dev;
310 struct hlist_head tunnels[IP_TNL_HASH_SIZE];
311 struct ip_tunnel __rcu *collect_md_tun;
312 };
313
314
315 struct ip_tunnel_encap_ops {
316 size_t (*encap_hlen)(struct ip_tunnel_encap *e);
317 int (*build_header)(struct sk_buff *skb, struct ip_tunnel_encap *e,
318 const u8 *protocol, struct flowi4 *fl4);
319 };
320
321 extern const struct ip_tunnel_encap_ops __rcu *
322 rpl_iptun_encaps[MAX_IPTUN_ENCAP_OPS];
323
324 #define ip_encap_hlen rpl_ip_encap_hlen
325 static inline int rpl_ip_encap_hlen(struct ip_tunnel_encap *e)
326 {
327 const struct ip_tunnel_encap_ops *ops;
328 int hlen = -EINVAL;
329
330 if (e->type == TUNNEL_ENCAP_NONE)
331 return 0;
332
333 if (e->type >= MAX_IPTUN_ENCAP_OPS)
334 return -EINVAL;
335
336 rcu_read_lock();
337 ops = rcu_dereference(rpl_iptun_encaps[e->type]);
338 if (likely(ops && ops->encap_hlen))
339 hlen = ops->encap_hlen(e);
340 rcu_read_unlock();
341
342 return hlen;
343 }
344
345 static inline int ovs_ip_tunnel_encap(struct sk_buff *skb, struct ip_tunnel *t,
346 const u8 *protocol, struct flowi4 *fl4)
347 {
348 const struct ip_tunnel_encap_ops *ops;
349 int ret = -EINVAL;
350
351 if (t->encap.type == TUNNEL_ENCAP_NONE)
352 return 0;
353
354 if (t->encap.type >= MAX_IPTUN_ENCAP_OPS)
355 return -EINVAL;
356
357 rcu_read_lock();
358 ops = rcu_dereference(rpl_iptun_encaps[t->encap.type]);
359 if (likely(ops && ops->build_header))
360 ret = ops->build_header(skb, &t->encap, protocol, fl4);
361 rcu_read_unlock();
362
363 return ret;
364 }
365
366 #define ip_tunnel_get_stats64 rpl_ip_tunnel_get_stats64
367 #if !defined(HAVE_VOID_NDO_GET_STATS64) && !defined(HAVE_RHEL7_MAX_MTU)
368 struct rtnl_link_stats64 *rpl_ip_tunnel_get_stats64(struct net_device *dev,
369 struct rtnl_link_stats64 *tot);
370 #else
371 void rpl_ip_tunnel_get_stats64(struct net_device *dev,
372 struct rtnl_link_stats64 *tot);
373 #endif
374 #define ip_tunnel_get_dsfield rpl_ip_tunnel_get_dsfield
375 static inline u8 rpl_ip_tunnel_get_dsfield(const struct iphdr *iph,
376 const struct sk_buff *skb)
377 {
378 if (skb->protocol == htons(ETH_P_IP))
379 return iph->tos;
380 else if (skb->protocol == htons(ETH_P_IPV6))
381 return ipv6_get_dsfield((const struct ipv6hdr *)iph);
382 else
383 return 0;
384 }
385
386 #define ip_tunnel_ecn_encap rpl_ip_tunnel_ecn_encap
387 static inline u8 rpl_ip_tunnel_ecn_encap(u8 tos, const struct iphdr *iph,
388 const struct sk_buff *skb)
389 {
390 u8 inner = ip_tunnel_get_dsfield(iph, skb);
391
392 return INET_ECN_encapsulate(tos, inner);
393 }
394
395 static inline void iptunnel_xmit_stats(struct net_device *dev, int pkt_len)
396 {
397 if (pkt_len > 0) {
398 struct pcpu_sw_netstats *tstats = get_cpu_ptr(dev->tstats);
399
400 u64_stats_update_begin(&tstats->syncp);
401 tstats->tx_bytes += pkt_len;
402 tstats->tx_packets++;
403 u64_stats_update_end(&tstats->syncp);
404 put_cpu_ptr(tstats);
405 } else {
406 struct net_device_stats *err_stats = &dev->stats;
407
408 if (pkt_len < 0) {
409 err_stats->tx_errors++;
410 err_stats->tx_aborted_errors++;
411 } else {
412 err_stats->tx_dropped++;
413 }
414 }
415 }
416
417 static inline __be64 key32_to_tunnel_id(__be32 key)
418 {
419 #ifdef __BIG_ENDIAN
420 return (__force __be64)key;
421 #else
422 return (__force __be64)((__force u64)key << 32);
423 #endif
424 }
425
426 /* Returns the least-significant 32 bits of a __be64. */
427 static inline __be32 tunnel_id_to_key32(__be64 tun_id)
428 {
429 #ifdef __BIG_ENDIAN
430 return (__force __be32)tun_id;
431 #else
432 return (__force __be32)((__force u64)tun_id >> 32);
433 #endif
434 }
435
436 #define ip_tunnel_init rpl_ip_tunnel_init
437 int rpl_ip_tunnel_init(struct net_device *dev);
438
439 #define ip_tunnel_uninit rpl_ip_tunnel_uninit
440 void rpl_ip_tunnel_uninit(struct net_device *dev);
441
442 #define ip_tunnel_change_mtu rpl_ip_tunnel_change_mtu
443 int rpl_ip_tunnel_change_mtu(struct net_device *dev, int new_mtu);
444
445 #define ip_tunnel_newlink rpl_ip_tunnel_newlink
446 int rpl_ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[],
447 struct ip_tunnel_parm *p);
448
449 #define ip_tunnel_dellink rpl_ip_tunnel_dellink
450 void rpl_ip_tunnel_dellink(struct net_device *dev, struct list_head *head);
451
452 #define ip_tunnel_init_net rpl_ip_tunnel_init_net
453 int rpl_ip_tunnel_init_net(struct net *net, int ip_tnl_net_id,
454 struct rtnl_link_ops *ops, char *devname);
455
456 #define ip_tunnel_delete_net rpl_ip_tunnel_delete_net
457 void rpl_ip_tunnel_delete_net(struct ip_tunnel_net *itn, struct rtnl_link_ops *ops);
458
459 #define ip_tunnel_setup rpl_ip_tunnel_setup
460 void rpl_ip_tunnel_setup(struct net_device *dev, int net_id);
461
462 #define ip_tunnel_get_iflink rpl_ip_tunnel_get_iflink
463 int rpl_ip_tunnel_get_iflink(const struct net_device *dev);
464
465 #define ip_tunnel_get_link_net rpl_ip_tunnel_get_link_net
466 struct net *rpl_ip_tunnel_get_link_net(const struct net_device *dev);
467
468 #define __ip_tunnel_change_mtu rpl___ip_tunnel_change_mtu
469 int rpl___ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict);
470
471 #define ip_tunnel_lookup rpl_ip_tunnel_lookup
472 struct ip_tunnel *rpl_ip_tunnel_lookup(struct ip_tunnel_net *itn,
473 int link, __be16 flags,
474 __be32 remote, __be32 local,
475 __be32 key);
476
477 static inline int iptunnel_pull_offloads(struct sk_buff *skb)
478 {
479 if (skb_is_gso(skb)) {
480 int err;
481
482 err = skb_unclone(skb, GFP_ATOMIC);
483 if (unlikely(err))
484 return err;
485 skb_shinfo(skb)->gso_type &= ~(NETIF_F_GSO_ENCAP_ALL >>
486 NETIF_F_GSO_SHIFT);
487 }
488
489 skb->encapsulation = 0;
490 return 0;
491 }
492 #endif /* USE_UPSTREAM_TUNNEL */
493
494 #define skb_is_encapsulated ovs_skb_is_encapsulated
495 bool ovs_skb_is_encapsulated(struct sk_buff *skb);
496
497 #endif /* __NET_IP_TUNNELS_H */