]>
Commit | Line | Data |
---|---|---|
02059c09 JG |
1 | #ifndef __NET_IP_WRAPPER_H |
2 | #define __NET_IP_WRAPPER_H 1 | |
3 | ||
4 | #include_next <net/ip.h> | |
5 | ||
213e1f54 | 6 | #include <net/route.h> |
02059c09 | 7 | #include <linux/version.h> |
02059c09 | 8 | |
13dd4a97 | 9 | #ifndef HAVE_INET_GET_LOCAL_PORT_RANGE_USING_NET |
cb25142c PS |
10 | static inline void rpl_inet_get_local_port_range(struct net *net, int *low, |
11 | int *high) | |
12 | { | |
13 | inet_get_local_port_range(low, high); | |
14 | } | |
15 | #define inet_get_local_port_range rpl_inet_get_local_port_range | |
16 | ||
17 | #endif | |
18 | ||
595e069a JS |
19 | #ifndef IPSKB_FRAG_PMTU |
20 | #define IPSKB_FRAG_PMTU BIT(6) | |
21 | #endif | |
22 | ||
cfda4537 JS |
23 | /* IPv4 datagram length is stored into 16bit field (tot_len) */ |
24 | #ifndef IP_MAX_MTU | |
25 | #define IP_MAX_MTU 0xFFFFU | |
26 | #endif | |
27 | ||
28 | #ifndef HAVE_IP_SKB_DST_MTU | |
29 | static inline bool rpl_ip_sk_use_pmtu(const struct sock *sk) | |
30 | { | |
31 | return inet_sk(sk)->pmtudisc < IP_PMTUDISC_PROBE; | |
32 | } | |
33 | #define ip_sk_use_pmtu rpl_ip_sk_use_pmtu | |
34 | ||
35 | static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst, | |
36 | bool forwarding) | |
37 | { | |
38 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0) | |
39 | struct net *net = dev_net(dst->dev); | |
40 | ||
41 | if (net->ipv4.sysctl_ip_fwd_use_pmtu || | |
42 | dst_metric_locked(dst, RTAX_MTU) || | |
43 | !forwarding) | |
44 | return dst_mtu(dst); | |
45 | #endif | |
46 | ||
47 | return min(dst->dev->mtu, IP_MAX_MTU); | |
48 | } | |
49 | ||
50 | static inline unsigned int rpl_ip_skb_dst_mtu(const struct sk_buff *skb) | |
51 | { | |
52 | if (!skb->sk || ip_sk_use_pmtu(skb->sk)) { | |
53 | bool forwarding = IPCB(skb)->flags & IPSKB_FORWARDED; | |
54 | return ip_dst_mtu_maybe_forward(skb_dst(skb), forwarding); | |
55 | } else { | |
56 | return min(skb_dst(skb)->dev->mtu, IP_MAX_MTU); | |
57 | } | |
58 | } | |
59 | #define ip_skb_dst_mtu rpl_ip_skb_dst_mtu | |
60 | #endif /* HAVE_IP_SKB_DST_MTU */ | |
61 | ||
213e1f54 | 62 | #ifdef HAVE_IP_FRAGMENT_TAKES_SOCK |
0643a78b PS |
63 | #ifdef HAVE_IP_LOCAL_OUT_TAKES_NET |
64 | #define OVS_VPORT_OUTPUT_PARAMS struct net *net, struct sock *sock, struct sk_buff *skb | |
65 | #else | |
213e1f54 | 66 | #define OVS_VPORT_OUTPUT_PARAMS struct sock *sock, struct sk_buff *skb |
0643a78b | 67 | #endif |
213e1f54 JS |
68 | #else |
69 | #define OVS_VPORT_OUTPUT_PARAMS struct sk_buff *skb | |
70 | #endif | |
71 | ||
0f09d6e3 JS |
72 | /* Prior to upstream commit d6b915e29f4a ("ip_fragment: don't forward |
73 | * defragmented DF packet"), IPCB(skb)->frag_max_size was not always populated | |
74 | * correctly, which would lead to reassembled packets not being refragmented. | |
75 | * So, we backport all of ip_defrag() in these cases. | |
76 | */ | |
3cdc5697 | 77 | #ifndef HAVE_CORRECT_MRU_HANDLING |
213e1f54 JS |
78 | |
79 | #if LINUX_VERSION_CODE < KERNEL_VERSION(4,2,0) | |
80 | static inline bool ip_defrag_user_in_between(u32 user, | |
81 | enum ip_defrag_users lower_bond, | |
82 | enum ip_defrag_users upper_bond) | |
83 | { | |
84 | return user >= lower_bond && user <= upper_bond; | |
85 | } | |
0f09d6e3 | 86 | #endif /* < v4.2 */ |
213e1f54 | 87 | |
ea2bad6b JS |
88 | int rpl_ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, |
89 | int (*output)(OVS_VPORT_OUTPUT_PARAMS)); | |
213e1f54 | 90 | #define ip_do_fragment rpl_ip_do_fragment |
595e069a | 91 | |
9323abed JS |
92 | /* If backporting IP defrag, then init/exit functions need to be called from |
93 | * compat_{in,ex}it() to prepare the backported fragmentation cache. In this | |
94 | * case we declare the functions which are defined in | |
95 | * datapath/linux/compat/ip_fragment.c. */ | |
39c0ff22 | 96 | int rpl_ip_defrag(struct net *net, struct sk_buff *skb, u32 user); |
595e069a | 97 | #define ip_defrag rpl_ip_defrag |
595e069a JS |
98 | int __init rpl_ipfrag_init(void); |
99 | void rpl_ipfrag_fini(void); | |
7f4a5d68 | 100 | void ovs_netns_frags_init(struct net *net); |
101 | void ovs_netns_frags_exit(struct net *net); | |
3cdc5697 PS |
102 | |
103 | #else /* HAVE_CORRECT_MRU_HANDLING */ | |
792e5ed7 | 104 | |
0374bcbe EB |
105 | #ifndef HAVE_IP_DO_FRAGMENT_TAKES_NET |
106 | static inline int rpl_ip_do_fragment(struct net *net, struct sock *sk, | |
107 | struct sk_buff *skb, | |
108 | int (*output)(OVS_VPORT_OUTPUT_PARAMS)) | |
109 | { | |
110 | return ip_do_fragment(sk, skb, output); | |
111 | } | |
112 | #define ip_do_fragment rpl_ip_do_fragment | |
113 | #endif /* IP_DO_FRAGMENT_TAKES_NET */ | |
114 | ||
792e5ed7 JS |
115 | /* We have no good way to detect the presence of upstream commit 8282f27449bf |
116 | * ("inet: frag: Always orphan skbs inside ip_defrag()"), but it should be | |
117 | * always included in kernels 4.5+. */ | |
118 | #if LINUX_VERSION_CODE < KERNEL_VERSION(4,5,0) | |
39c0ff22 | 119 | static inline int rpl_ip_defrag(struct net *net, struct sk_buff *skb, u32 user) |
792e5ed7 JS |
120 | { |
121 | skb_orphan(skb); | |
39c0ff22 | 122 | #ifndef HAVE_IP_DEFRAG_TAKES_NET |
3f82513a | 123 | return ip_defrag(skb, user); |
39c0ff22 JS |
124 | #else |
125 | return ip_defrag(net, skb, user); | |
126 | #endif | |
792e5ed7 JS |
127 | } |
128 | #define ip_defrag rpl_ip_defrag | |
129 | #endif | |
130 | ||
9323abed JS |
131 | /* If we can use upstream defrag then we can rely on the upstream |
132 | * defrag module to init/exit correctly. In this case the calls in | |
133 | * compat_{in,ex}it() can be no-ops. */ | |
595e069a JS |
134 | static inline int rpl_ipfrag_init(void) { return 0; } |
135 | static inline void rpl_ipfrag_fini(void) { } | |
7f4a5d68 | 136 | static inline void ovs_netns_frags_init(struct net *net) { } |
137 | static inline void ovs_netns_frags_exit(struct net *net) { } | |
3cdc5697 PS |
138 | #endif /* HAVE_CORRECT_MRU_HANDLING */ |
139 | ||
595e069a JS |
140 | #define ipfrag_init rpl_ipfrag_init |
141 | #define ipfrag_fini rpl_ipfrag_fini | |
142 | ||
02059c09 | 143 | #endif |