]> git.proxmox.com Git - mirror_ovs.git/blob - datapath/linux/compat/include/net/ip_tunnels.h
datapath: Add support for lwtunnel
[mirror_ovs.git] / datapath / linux / compat / include / net / ip_tunnels.h
1 #ifndef __NET_IP_TUNNELS_WRAPPER_H
2 #define __NET_IP_TUNNELS_WRAPPER_H 1
3
4 #include <linux/version.h>
5
6 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
7 #include_next <net/ip_tunnels.h>
8 #endif
9
10 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,18,0)
11
12 #include <linux/if_tunnel.h>
13 #include <linux/netdevice.h>
14 #include <linux/skbuff.h>
15 #include <linux/types.h>
16 #include <net/dsfield.h>
17 #include <net/flow.h>
18 #include <net/inet_ecn.h>
19 #include <net/ip.h>
20 #include <net/rtnetlink.h>
21
22 struct sk_buff *ovs_iptunnel_handle_offloads(struct sk_buff *skb,
23 bool csum_help, int gso_type_mask,
24 void (*fix_segment)(struct sk_buff *));
25
26 #define iptunnel_xmit rpl_iptunnel_xmit
27 int rpl_iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
28 __be32 src, __be32 dst, __u8 proto, __u8 tos, __u8 ttl,
29 __be16 df, bool xnet);
30
31 #define iptunnel_pull_header rpl_iptunnel_pull_header
32 int rpl_iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto);
33
34 #else
35
36 #define ovs_iptunnel_handle_offloads(skb, csum_help, gso_type_mask, fix_segment) \
37 iptunnel_handle_offloads(skb, csum_help, gso_type_mask)
38
39 #endif /* 3.18 */
40
41 #ifndef TUNNEL_CSUM
42 #define TUNNEL_CSUM __cpu_to_be16(0x01)
43 #define TUNNEL_ROUTING __cpu_to_be16(0x02)
44 #define TUNNEL_KEY __cpu_to_be16(0x04)
45 #define TUNNEL_SEQ __cpu_to_be16(0x08)
46 #define TUNNEL_STRICT __cpu_to_be16(0x10)
47 #define TUNNEL_REC __cpu_to_be16(0x20)
48 #define TUNNEL_VERSION __cpu_to_be16(0x40)
49 #define TUNNEL_NO_KEY __cpu_to_be16(0x80)
50
51 struct tnl_ptk_info {
52 __be16 flags;
53 __be16 proto;
54 __be32 key;
55 __be32 seq;
56 };
57
58 #define PACKET_RCVD 0
59 #define PACKET_REJECT 1
60 #endif
61
62 #ifndef TUNNEL_DONT_FRAGMENT
63 #define TUNNEL_DONT_FRAGMENT __cpu_to_be16(0x0100)
64 #endif
65
66 #ifndef TUNNEL_OAM
67 #define TUNNEL_OAM __cpu_to_be16(0x0200)
68 #define TUNNEL_CRIT_OPT __cpu_to_be16(0x0400)
69 #endif
70
71 #ifndef TUNNEL_GENEVE_OPT
72 #define TUNNEL_GENEVE_OPT __cpu_to_be16(0x0800)
73 #endif
74
75 #ifndef TUNNEL_VXLAN_OPT
76 #define TUNNEL_VXLAN_OPT __cpu_to_be16(0x1000)
77 #endif
78
79 /* Older kernels defined TUNNEL_OPTIONS_PRESENT to GENEVE only */
80 #undef TUNNEL_OPTIONS_PRESENT
81 #define TUNNEL_OPTIONS_PRESENT (TUNNEL_GENEVE_OPT | TUNNEL_VXLAN_OPT)
82
83 #define skb_is_encapsulated ovs_skb_is_encapsulated
84 bool ovs_skb_is_encapsulated(struct sk_buff *skb);
85
86 #ifndef HAVE_METADATA_DST
87 /* Used to memset ip_tunnel padding. */
88 #define IP_TUNNEL_KEY_SIZE offsetofend(struct ip_tunnel_key, tp_dst)
89
90 /* Used to memset ipv4 address padding. */
91 #define IP_TUNNEL_KEY_IPV4_PAD offsetofend(struct ip_tunnel_key, u.ipv4.dst)
92 #define IP_TUNNEL_KEY_IPV4_PAD_LEN \
93 (FIELD_SIZEOF(struct ip_tunnel_key, u) - \
94 FIELD_SIZEOF(struct ip_tunnel_key, u.ipv4))
95
96 struct ip_tunnel_key {
97 __be64 tun_id;
98 union {
99 struct {
100 __be32 src;
101 __be32 dst;
102 } ipv4;
103 struct {
104 struct in6_addr src;
105 struct in6_addr dst;
106 } ipv6;
107 } u;
108 __be16 tun_flags;
109 u8 tos; /* TOS for IPv4, TC for IPv6 */
110 u8 ttl; /* TTL for IPv4, HL for IPv6 */
111 __be16 tp_src;
112 __be16 tp_dst;
113 };
114
115 /* Flags for ip_tunnel_info mode. */
116 #define IP_TUNNEL_INFO_TX 0x01 /* represents tx tunnel parameters */
117 #define IP_TUNNEL_INFO_IPV6 0x02 /* key contains IPv6 addresses */
118
119 struct ip_tunnel_info {
120 struct ip_tunnel_key key;
121 u8 options_len;
122 u8 mode;
123 };
124
125 static inline unsigned short ip_tunnel_info_af(const struct ip_tunnel_info *tun_info)
126 {
127 return tun_info->mode & IP_TUNNEL_INFO_IPV6 ? AF_INET6 : AF_INET;
128 }
129
130 static inline void *ip_tunnel_info_opts(struct ip_tunnel_info *info)
131 {
132 return info + 1;
133 }
134
135 static inline void ip_tunnel_info_opts_get(void *to,
136 const struct ip_tunnel_info *info)
137 {
138 memcpy(to, info + 1, info->options_len);
139 }
140
141 static inline void ip_tunnel_info_opts_set(struct ip_tunnel_info *info,
142 const void *from, int len)
143 {
144 memcpy(ip_tunnel_info_opts(info), from, len);
145 info->options_len = len;
146 }
147
148 static inline void ip_tunnel_key_init(struct ip_tunnel_key *key,
149 __be32 saddr, __be32 daddr,
150 u8 tos, u8 ttl,
151 __be16 tp_src, __be16 tp_dst,
152 __be64 tun_id, __be16 tun_flags)
153 {
154 key->tun_id = tun_id;
155 key->u.ipv4.src = saddr;
156 key->u.ipv4.dst = daddr;
157 memset((unsigned char *)key + IP_TUNNEL_KEY_IPV4_PAD,
158 0, IP_TUNNEL_KEY_IPV4_PAD_LEN);
159 key->tos = tos;
160 key->ttl = ttl;
161 key->tun_flags = tun_flags;
162
163 /* For the tunnel types on the top of IPsec, the tp_src and tp_dst of
164 * the upper tunnel are used.
165 * E.g: GRE over IPSEC, the tp_src and tp_port are zero.
166 */
167 key->tp_src = tp_src;
168 key->tp_dst = tp_dst;
169
170 /* Clear struct padding. */
171 if (sizeof(*key) != IP_TUNNEL_KEY_SIZE)
172 memset((unsigned char *)key + IP_TUNNEL_KEY_SIZE,
173 0, sizeof(*key) - IP_TUNNEL_KEY_SIZE);
174 }
175
176 #define ip_tunnel_collect_metadata() true
177
178
179 #define ip_tunnel rpl_ip_tunnel
180
181 struct ip_tunnel {
182 struct net_device *dev;
183 struct net *net; /* netns for packet i/o */
184
185 int err_count; /* Number of arrived ICMP errors */
186 unsigned long err_time; /* Time when the last ICMP error
187 * arrived
188 */
189
190 /* These four fields used only by GRE */
191 u32 i_seqno; /* The last seen seqno */
192 u32 o_seqno; /* The last output seqno */
193 int tun_hlen; /* Precalculated header length */
194 int mlink;
195
196 struct ip_tunnel_parm parms;
197
198 int encap_hlen; /* Encap header length (FOU,GUE) */
199 int hlen; /* tun_hlen + encap_hlen */
200
201 int ip_tnl_net_id;
202 bool collect_md;
203 };
204
205 #define ip_tunnel_net rpl_ip_tunnel_net
206 struct ip_tunnel_net {
207 struct ip_tunnel __rcu *collect_md_tun;
208 struct rtnl_link_ops *rtnl_ops;
209 };
210
211
212 #ifndef HAVE_PCPU_SW_NETSTATS
213 #define ip_tunnel_get_stats64 rpl_ip_tunnel_get_stats64
214 struct rtnl_link_stats64 *rpl_ip_tunnel_get_stats64(struct net_device *dev,
215 struct rtnl_link_stats64 *tot);
216 #endif
217
218 #define ip_tunnel_get_dsfield rpl_ip_tunnel_get_dsfield
219 static inline u8 ip_tunnel_get_dsfield(const struct iphdr *iph,
220 const struct sk_buff *skb)
221 {
222 if (skb->protocol == htons(ETH_P_IP))
223 return iph->tos;
224 else if (skb->protocol == htons(ETH_P_IPV6))
225 return ipv6_get_dsfield((const struct ipv6hdr *)iph);
226 else
227 return 0;
228 }
229
230 #define ip_tunnel_ecn_encap rpl_ip_tunnel_ecn_encap
231 static inline u8 ip_tunnel_ecn_encap(u8 tos, const struct iphdr *iph,
232 const struct sk_buff *skb)
233 {
234 u8 inner = ip_tunnel_get_dsfield(iph, skb);
235
236 return INET_ECN_encapsulate(tos, inner);
237 }
238
239 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39)
240 #define iptunnel_xmit_stats(err, stats, dummy) \
241 do { \
242 if (err > 0) { \
243 (stats)->tx_bytes += err; \
244 (stats)->tx_packets++; \
245 } else if (err < 0) { \
246 (stats)->tx_errors++; \
247 (stats)->tx_aborted_errors++; \
248 } else { \
249 (stats)->tx_dropped++; \
250 } \
251 } while (0)
252
253 #else
254 #define iptunnel_xmit_stats rpl_iptunnel_xmit_stats
255 static inline void iptunnel_xmit_stats(int err,
256 struct net_device_stats *err_stats,
257 struct pcpu_sw_netstats __percpu *stats)
258 {
259 if (err > 0) {
260 struct pcpu_sw_netstats *tstats = this_cpu_ptr(stats);
261
262 u64_stats_update_begin(&tstats->syncp);
263 tstats->tx_bytes += err;
264 tstats->tx_packets++;
265 u64_stats_update_end(&tstats->syncp);
266 } else if (err < 0) {
267 err_stats->tx_errors++;
268 err_stats->tx_aborted_errors++;
269 } else {
270 err_stats->tx_dropped++;
271 }
272 }
273 #endif
274
275 #define ip_tunnel_init rpl_ip_tunnel_init
276 int rpl_ip_tunnel_init(struct net_device *dev);
277
278 #define ip_tunnel_uninit rpl_ip_tunnel_uninit
279 void rpl_ip_tunnel_uninit(struct net_device *dev);
280
281 #define ip_tunnel_change_mtu rpl_ip_tunnel_change_mtu
282 int rpl_ip_tunnel_change_mtu(struct net_device *dev, int new_mtu);
283
284 #define ip_tunnel_newlink rpl_ip_tunnel_newlink
285 int rpl_ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[],
286 struct ip_tunnel_parm *p);
287
288 #define ip_tunnel_dellink rpl_ip_tunnel_dellink
289 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)
290 void rpl_ip_tunnel_dellink(struct net_device *dev, struct list_head *head);
291 #else
292 void rpl_ip_tunnel_dellink(struct net_device *dev);
293 #endif
294
295 #define ip_tunnel_init_net rpl_ip_tunnel_init_net
296 int rpl_ip_tunnel_init_net(struct net *net, int ip_tnl_net_id,
297 struct rtnl_link_ops *ops, char *devname);
298
299 #define ip_tunnel_delete_net rpl_ip_tunnel_delete_net
300 void rpl_ip_tunnel_delete_net(struct ip_tunnel_net *itn, struct rtnl_link_ops *ops);
301
302 #define ip_tunnel_setup rpl_ip_tunnel_setup
303 void rpl_ip_tunnel_setup(struct net_device *dev, int net_id);
304
305 #endif /* HAVE_METADATA_DST */
306 #endif /* __NET_IP_TUNNELS_H */