]> git.proxmox.com Git - mirror_ovs.git/blob - datapath/linux/compat/include/net/ip_tunnels.h
datapath: Set a large MTU on tunnel devices.
[mirror_ovs.git] / datapath / linux / compat / include / net / ip_tunnels.h
1 #ifndef __NET_IP_TUNNELS_WRAPPER_H
2 #define __NET_IP_TUNNELS_WRAPPER_H 1
3
4 #include <linux/version.h>
5
6 #ifdef HAVE_METADATA_DST
7 /* Block all ip_tunnel functions.
8 * Only function that do not depend on ip_tunnel structure can
9 * be used. Those needs to be explicitly defined in this header file. */
10 #include_next <net/ip_tunnels.h>
11 #endif
12
13 #include <linux/if_tunnel.h>
14 #include <linux/types.h>
15 #include <net/dsfield.h>
16 #include <net/flow.h>
17 #include <net/inet_ecn.h>
18 #include <net/ip.h>
19 #include <net/rtnetlink.h>
20
21 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,18,0)
22 struct sk_buff *ovs_iptunnel_handle_offloads(struct sk_buff *skb,
23 bool csum_help, int gso_type_mask,
24 void (*fix_segment)(struct sk_buff *));
25
26 #define iptunnel_xmit rpl_iptunnel_xmit
27 int rpl_iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
28 __be32 src, __be32 dst, __u8 proto, __u8 tos, __u8 ttl,
29 __be16 df, bool xnet);
30
31 #define iptunnel_pull_header rpl_iptunnel_pull_header
32 int rpl_iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto);
33
34 #else
35
36 #define ovs_iptunnel_handle_offloads(skb, csum_help, gso_type_mask, fix_segment) \
37 iptunnel_handle_offloads(skb, csum_help, gso_type_mask)
38
39 /* This macro is to make OVS build happy about declared functions name. */
40 #define rpl_iptunnel_pull_header iptunnel_pull_header
41 int rpl_iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto);
42
43 #define rpl_iptunnel_xmit iptunnel_xmit
44 int rpl_iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
45 __be32 src, __be32 dst, __u8 proto, __u8 tos, __u8 ttl,
46 __be16 df, bool xnet);
47
48 #endif /* 3.18 */
49
50 /* This is not required for OVS on kernel older than 3.18, but gre.h
51 * header file needs this declaration for function gre_handle_offloads().
52 * So it is defined for all kernel version.
53 */
54 #define rpl_iptunnel_handle_offloads iptunnel_handle_offloads
55 struct sk_buff *rpl_iptunnel_handle_offloads(struct sk_buff *skb, bool gre_csum,
56 int gso_type_mask);
57
58 #ifndef TUNNEL_CSUM
59 #define TUNNEL_CSUM __cpu_to_be16(0x01)
60 #define TUNNEL_ROUTING __cpu_to_be16(0x02)
61 #define TUNNEL_KEY __cpu_to_be16(0x04)
62 #define TUNNEL_SEQ __cpu_to_be16(0x08)
63 #define TUNNEL_STRICT __cpu_to_be16(0x10)
64 #define TUNNEL_REC __cpu_to_be16(0x20)
65 #define TUNNEL_VERSION __cpu_to_be16(0x40)
66 #define TUNNEL_NO_KEY __cpu_to_be16(0x80)
67
68 struct tnl_ptk_info {
69 __be16 flags;
70 __be16 proto;
71 __be32 key;
72 __be32 seq;
73 };
74
75 #define PACKET_RCVD 0
76 #define PACKET_REJECT 1
77 #endif
78
79 #ifndef TUNNEL_DONT_FRAGMENT
80 #define TUNNEL_DONT_FRAGMENT __cpu_to_be16(0x0100)
81 #endif
82
83 #ifndef TUNNEL_OAM
84 #define TUNNEL_OAM __cpu_to_be16(0x0200)
85 #define TUNNEL_CRIT_OPT __cpu_to_be16(0x0400)
86 #endif
87
88 #ifndef TUNNEL_GENEVE_OPT
89 #define TUNNEL_GENEVE_OPT __cpu_to_be16(0x0800)
90 #endif
91
92 #ifndef TUNNEL_VXLAN_OPT
93 #define TUNNEL_VXLAN_OPT __cpu_to_be16(0x1000)
94 #endif
95
96 /* Older kernels defined TUNNEL_OPTIONS_PRESENT to GENEVE only */
97 #undef TUNNEL_OPTIONS_PRESENT
98 #define TUNNEL_OPTIONS_PRESENT (TUNNEL_GENEVE_OPT | TUNNEL_VXLAN_OPT)
99
100 #define skb_is_encapsulated ovs_skb_is_encapsulated
101 bool ovs_skb_is_encapsulated(struct sk_buff *skb);
102
103 #ifndef HAVE_METADATA_DST
104 /* Used to memset ip_tunnel padding. */
105 #define IP_TUNNEL_KEY_SIZE offsetofend(struct ip_tunnel_key, tp_dst)
106
107 /* Used to memset ipv4 address padding. */
108 #define IP_TUNNEL_KEY_IPV4_PAD offsetofend(struct ip_tunnel_key, u.ipv4.dst)
109 #define IP_TUNNEL_KEY_IPV4_PAD_LEN \
110 (FIELD_SIZEOF(struct ip_tunnel_key, u) - \
111 FIELD_SIZEOF(struct ip_tunnel_key, u.ipv4))
112
113 struct ip_tunnel_key {
114 __be64 tun_id;
115 union {
116 struct {
117 __be32 src;
118 __be32 dst;
119 } ipv4;
120 struct {
121 struct in6_addr src;
122 struct in6_addr dst;
123 } ipv6;
124 } u;
125 __be16 tun_flags;
126 u8 tos; /* TOS for IPv4, TC for IPv6 */
127 u8 ttl; /* TTL for IPv4, HL for IPv6 */
128 __be16 tp_src;
129 __be16 tp_dst;
130 };
131
132 /* Flags for ip_tunnel_info mode. */
133 #define IP_TUNNEL_INFO_TX 0x01 /* represents tx tunnel parameters */
134 #define IP_TUNNEL_INFO_IPV6 0x02 /* key contains IPv6 addresses */
135
136 struct ip_tunnel_info {
137 struct ip_tunnel_key key;
138 u8 options_len;
139 u8 mode;
140 };
141
142 static inline unsigned short ip_tunnel_info_af(const struct ip_tunnel_info *tun_info)
143 {
144 return tun_info->mode & IP_TUNNEL_INFO_IPV6 ? AF_INET6 : AF_INET;
145 }
146
147 static inline void *ip_tunnel_info_opts(struct ip_tunnel_info *info)
148 {
149 return info + 1;
150 }
151
152 static inline void ip_tunnel_info_opts_get(void *to,
153 const struct ip_tunnel_info *info)
154 {
155 memcpy(to, info + 1, info->options_len);
156 }
157
158 static inline void ip_tunnel_info_opts_set(struct ip_tunnel_info *info,
159 const void *from, int len)
160 {
161 memcpy(ip_tunnel_info_opts(info), from, len);
162 info->options_len = len;
163 }
164
165 static inline void ip_tunnel_key_init(struct ip_tunnel_key *key,
166 __be32 saddr, __be32 daddr,
167 u8 tos, u8 ttl,
168 __be16 tp_src, __be16 tp_dst,
169 __be64 tun_id, __be16 tun_flags)
170 {
171 key->tun_id = tun_id;
172 key->u.ipv4.src = saddr;
173 key->u.ipv4.dst = daddr;
174 memset((unsigned char *)key + IP_TUNNEL_KEY_IPV4_PAD,
175 0, IP_TUNNEL_KEY_IPV4_PAD_LEN);
176 key->tos = tos;
177 key->ttl = ttl;
178 key->tun_flags = tun_flags;
179
180 /* For the tunnel types on the top of IPsec, the tp_src and tp_dst of
181 * the upper tunnel are used.
182 * E.g: GRE over IPSEC, the tp_src and tp_port are zero.
183 */
184 key->tp_src = tp_src;
185 key->tp_dst = tp_dst;
186
187 /* Clear struct padding. */
188 if (sizeof(*key) != IP_TUNNEL_KEY_SIZE)
189 memset((unsigned char *)key + IP_TUNNEL_KEY_SIZE,
190 0, sizeof(*key) - IP_TUNNEL_KEY_SIZE);
191 }
192
193 #define ip_tunnel_collect_metadata() true
194
195
196 #define ip_tunnel rpl_ip_tunnel
197
198 struct ip_tunnel {
199 struct net_device *dev;
200 struct net *net; /* netns for packet i/o */
201
202 int err_count; /* Number of arrived ICMP errors */
203 unsigned long err_time; /* Time when the last ICMP error
204 * arrived
205 */
206
207 /* These four fields used only by GRE */
208 u32 i_seqno; /* The last seen seqno */
209 u32 o_seqno; /* The last output seqno */
210 int tun_hlen; /* Precalculated header length */
211 int mlink;
212
213 struct ip_tunnel_parm parms;
214
215 int encap_hlen; /* Encap header length (FOU,GUE) */
216 int hlen; /* tun_hlen + encap_hlen */
217
218 int ip_tnl_net_id;
219 bool collect_md;
220 };
221
222 #define ip_tunnel_net rpl_ip_tunnel_net
223 struct ip_tunnel_net {
224 struct ip_tunnel __rcu *collect_md_tun;
225 struct rtnl_link_ops *rtnl_ops;
226 };
227
228
229 #ifndef HAVE_PCPU_SW_NETSTATS
230 #define ip_tunnel_get_stats64 rpl_ip_tunnel_get_stats64
231 #else
232 #define rpl_ip_tunnel_get_stats64 ip_tunnel_get_stats64
233 #endif
234 struct rtnl_link_stats64 *rpl_ip_tunnel_get_stats64(struct net_device *dev,
235 struct rtnl_link_stats64 *tot);
236
237 #define ip_tunnel_get_dsfield rpl_ip_tunnel_get_dsfield
238 static inline u8 ip_tunnel_get_dsfield(const struct iphdr *iph,
239 const struct sk_buff *skb)
240 {
241 if (skb->protocol == htons(ETH_P_IP))
242 return iph->tos;
243 else if (skb->protocol == htons(ETH_P_IPV6))
244 return ipv6_get_dsfield((const struct ipv6hdr *)iph);
245 else
246 return 0;
247 }
248
249 #define ip_tunnel_ecn_encap rpl_ip_tunnel_ecn_encap
250 static inline u8 ip_tunnel_ecn_encap(u8 tos, const struct iphdr *iph,
251 const struct sk_buff *skb)
252 {
253 u8 inner = ip_tunnel_get_dsfield(iph, skb);
254
255 return INET_ECN_encapsulate(tos, inner);
256 }
257
258 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39)
259 #define iptunnel_xmit_stats(err, stats, dummy) \
260 do { \
261 if (err > 0) { \
262 (stats)->tx_bytes += err; \
263 (stats)->tx_packets++; \
264 } else if (err < 0) { \
265 (stats)->tx_errors++; \
266 (stats)->tx_aborted_errors++; \
267 } else { \
268 (stats)->tx_dropped++; \
269 } \
270 } while (0)
271
272 #else
273 #define iptunnel_xmit_stats rpl_iptunnel_xmit_stats
274 static inline void iptunnel_xmit_stats(int err,
275 struct net_device_stats *err_stats,
276 struct pcpu_sw_netstats __percpu *stats)
277 {
278 if (err > 0) {
279 struct pcpu_sw_netstats *tstats = this_cpu_ptr(stats);
280
281 u64_stats_update_begin(&tstats->syncp);
282 tstats->tx_bytes += err;
283 tstats->tx_packets++;
284 u64_stats_update_end(&tstats->syncp);
285 } else if (err < 0) {
286 err_stats->tx_errors++;
287 err_stats->tx_aborted_errors++;
288 } else {
289 err_stats->tx_dropped++;
290 }
291 }
292 #endif
293
294 #define ip_tunnel_init rpl_ip_tunnel_init
295 int rpl_ip_tunnel_init(struct net_device *dev);
296
297 #define ip_tunnel_uninit rpl_ip_tunnel_uninit
298 void rpl_ip_tunnel_uninit(struct net_device *dev);
299
300 #define ip_tunnel_change_mtu rpl_ip_tunnel_change_mtu
301 int rpl_ip_tunnel_change_mtu(struct net_device *dev, int new_mtu);
302
303 #define ip_tunnel_newlink rpl_ip_tunnel_newlink
304 int rpl_ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[],
305 struct ip_tunnel_parm *p);
306
307 #define ip_tunnel_dellink rpl_ip_tunnel_dellink
308 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)
309 void rpl_ip_tunnel_dellink(struct net_device *dev, struct list_head *head);
310 #else
311 void rpl_ip_tunnel_dellink(struct net_device *dev);
312 #endif
313
314 #define ip_tunnel_init_net rpl_ip_tunnel_init_net
315 int rpl_ip_tunnel_init_net(struct net *net, int ip_tnl_net_id,
316 struct rtnl_link_ops *ops, char *devname);
317
318 #define ip_tunnel_delete_net rpl_ip_tunnel_delete_net
319 void rpl_ip_tunnel_delete_net(struct ip_tunnel_net *itn, struct rtnl_link_ops *ops);
320
321 #define ip_tunnel_setup rpl_ip_tunnel_setup
322 void rpl_ip_tunnel_setup(struct net_device *dev, int net_id);
323
324 #define ip_tunnel_get_iflink rpl_ip_tunnel_get_iflink
325 int rpl_ip_tunnel_get_iflink(const struct net_device *dev);
326
327 #define ip_tunnel_get_link_net rpl_ip_tunnel_get_link_net
328 struct net *rpl_ip_tunnel_get_link_net(const struct net_device *dev);
329 #endif /* HAVE_METADATA_DST */
330
331 #ifndef HAVE___IP_TUNNEL_CHANGE_MTU
332 #define __ip_tunnel_change_mtu rpl___ip_tunnel_change_mtu
333 int rpl___ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict);
334 #endif
335
336 #endif /* __NET_IP_TUNNELS_H */