1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/types.h>
4 #include <linux/atomic.h>
5 #include <linux/inetdevice.h>
6 #include <linux/netfilter.h>
7 #include <linux/netfilter_ipv4.h>
8 #include <linux/netfilter_ipv6.h>
10 #include <net/netfilter/ipv4/nf_nat_masquerade.h>
11 #include <net/netfilter/ipv6/nf_nat_masquerade.h>
13 static DEFINE_MUTEX(masq_mutex
);
14 static unsigned int masq_refcnt __read_mostly
;
17 nf_nat_masquerade_ipv4(struct sk_buff
*skb
, unsigned int hooknum
,
18 const struct nf_nat_range2
*range
,
19 const struct net_device
*out
)
22 struct nf_conn_nat
*nat
;
23 enum ip_conntrack_info ctinfo
;
24 struct nf_nat_range2 newrange
;
25 const struct rtable
*rt
;
28 WARN_ON(hooknum
!= NF_INET_POST_ROUTING
);
30 ct
= nf_ct_get(skb
, &ctinfo
);
32 WARN_ON(!(ct
&& (ctinfo
== IP_CT_NEW
|| ctinfo
== IP_CT_RELATED
||
33 ctinfo
== IP_CT_RELATED_REPLY
)));
35 /* Source address is 0.0.0.0 - locally generated packet that is
36 * probably not supposed to be masqueraded.
38 if (ct
->tuplehash
[IP_CT_DIR_ORIGINAL
].tuple
.src
.u3
.ip
== 0)
42 nh
= rt_nexthop(rt
, ip_hdr(skb
)->daddr
);
43 newsrc
= inet_select_addr(out
, nh
, RT_SCOPE_UNIVERSE
);
45 pr_info("%s ate my IP address\n", out
->name
);
49 nat
= nf_ct_nat_ext_add(ct
);
51 nat
->masq_index
= out
->ifindex
;
53 /* Transfer from original range. */
54 memset(&newrange
.min_addr
, 0, sizeof(newrange
.min_addr
));
55 memset(&newrange
.max_addr
, 0, sizeof(newrange
.max_addr
));
56 newrange
.flags
= range
->flags
| NF_NAT_RANGE_MAP_IPS
;
57 newrange
.min_addr
.ip
= newsrc
;
58 newrange
.max_addr
.ip
= newsrc
;
59 newrange
.min_proto
= range
->min_proto
;
60 newrange
.max_proto
= range
->max_proto
;
62 /* Hand modified range to generic setup. */
63 return nf_nat_setup_info(ct
, &newrange
, NF_NAT_MANIP_SRC
);
65 EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv4
);
67 static int device_cmp(struct nf_conn
*i
, void *ifindex
)
69 const struct nf_conn_nat
*nat
= nfct_nat(i
);
73 return nat
->masq_index
== (int)(long)ifindex
;
76 static int masq_device_event(struct notifier_block
*this,
80 const struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
81 struct net
*net
= dev_net(dev
);
83 if (event
== NETDEV_DOWN
) {
84 /* Device was downed. Search entire table for
85 * conntracks which were associated with that device,
89 nf_ct_iterate_cleanup_net(net
, device_cmp
,
90 (void *)(long)dev
->ifindex
, 0, 0);
96 static int inet_cmp(struct nf_conn
*ct
, void *ptr
)
98 struct in_ifaddr
*ifa
= (struct in_ifaddr
*)ptr
;
99 struct net_device
*dev
= ifa
->ifa_dev
->dev
;
100 struct nf_conntrack_tuple
*tuple
;
102 if (!device_cmp(ct
, (void *)(long)dev
->ifindex
))
105 tuple
= &ct
->tuplehash
[IP_CT_DIR_REPLY
].tuple
;
107 return ifa
->ifa_address
== tuple
->dst
.u3
.ip
;
110 static int masq_inet_event(struct notifier_block
*this,
114 struct in_device
*idev
= ((struct in_ifaddr
*)ptr
)->ifa_dev
;
115 struct net
*net
= dev_net(idev
->dev
);
117 /* The masq_dev_notifier will catch the case of the device going
118 * down. So if the inetdev is dead and being destroyed we have
119 * no work to do. Otherwise this is an individual address removal
120 * and we have to perform the flush.
125 if (event
== NETDEV_DOWN
)
126 nf_ct_iterate_cleanup_net(net
, inet_cmp
, ptr
, 0, 0);
131 static struct notifier_block masq_dev_notifier
= {
132 .notifier_call
= masq_device_event
,
135 static struct notifier_block masq_inet_notifier
= {
136 .notifier_call
= masq_inet_event
,
139 int nf_nat_masquerade_ipv4_register_notifier(void)
143 mutex_lock(&masq_mutex
);
144 /* check if the notifier was already set */
145 if (++masq_refcnt
> 1)
148 /* Register for device down reports */
149 ret
= register_netdevice_notifier(&masq_dev_notifier
);
152 /* Register IP address change reports */
153 ret
= register_inetaddr_notifier(&masq_inet_notifier
);
157 mutex_unlock(&masq_mutex
);
161 unregister_netdevice_notifier(&masq_dev_notifier
);
165 mutex_unlock(&masq_mutex
);
168 EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv4_register_notifier
);
170 void nf_nat_masquerade_ipv4_unregister_notifier(void)
172 mutex_lock(&masq_mutex
);
173 /* check if the notifier still has clients */
174 if (--masq_refcnt
> 0)
177 unregister_netdevice_notifier(&masq_dev_notifier
);
178 unregister_inetaddr_notifier(&masq_inet_notifier
);
180 mutex_unlock(&masq_mutex
);
182 EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv4_unregister_notifier
);
184 #if IS_ENABLED(CONFIG_IPV6)
185 static atomic_t v6_worker_count __read_mostly
;
188 nat_ipv6_dev_get_saddr(struct net
*net
, const struct net_device
*dev
,
189 const struct in6_addr
*daddr
, unsigned int srcprefs
,
190 struct in6_addr
*saddr
)
192 #ifdef CONFIG_IPV6_MODULE
193 const struct nf_ipv6_ops
*v6_ops
= nf_get_ipv6_ops();
196 return -EHOSTUNREACH
;
198 return v6_ops
->dev_get_saddr(net
, dev
, daddr
, srcprefs
, saddr
);
200 return ipv6_dev_get_saddr(net
, dev
, daddr
, srcprefs
, saddr
);
205 nf_nat_masquerade_ipv6(struct sk_buff
*skb
, const struct nf_nat_range2
*range
,
206 const struct net_device
*out
)
208 enum ip_conntrack_info ctinfo
;
209 struct nf_conn_nat
*nat
;
212 struct nf_nat_range2 newrange
;
214 ct
= nf_ct_get(skb
, &ctinfo
);
215 WARN_ON(!(ct
&& (ctinfo
== IP_CT_NEW
|| ctinfo
== IP_CT_RELATED
||
216 ctinfo
== IP_CT_RELATED_REPLY
)));
218 if (nat_ipv6_dev_get_saddr(nf_ct_net(ct
), out
,
219 &ipv6_hdr(skb
)->daddr
, 0, &src
) < 0)
222 nat
= nf_ct_nat_ext_add(ct
);
224 nat
->masq_index
= out
->ifindex
;
226 newrange
.flags
= range
->flags
| NF_NAT_RANGE_MAP_IPS
;
227 newrange
.min_addr
.in6
= src
;
228 newrange
.max_addr
.in6
= src
;
229 newrange
.min_proto
= range
->min_proto
;
230 newrange
.max_proto
= range
->max_proto
;
232 return nf_nat_setup_info(ct
, &newrange
, NF_NAT_MANIP_SRC
);
234 EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv6
);
236 struct masq_dev_work
{
237 struct work_struct work
;
239 struct in6_addr addr
;
243 static int inet6_cmp(struct nf_conn
*ct
, void *work
)
245 struct masq_dev_work
*w
= (struct masq_dev_work
*)work
;
246 struct nf_conntrack_tuple
*tuple
;
248 if (!device_cmp(ct
, (void *)(long)w
->ifindex
))
251 tuple
= &ct
->tuplehash
[IP_CT_DIR_REPLY
].tuple
;
253 return ipv6_addr_equal(&w
->addr
, &tuple
->dst
.u3
.in6
);
256 static void iterate_cleanup_work(struct work_struct
*work
)
258 struct masq_dev_work
*w
;
260 w
= container_of(work
, struct masq_dev_work
, work
);
262 nf_ct_iterate_cleanup_net(w
->net
, inet6_cmp
, (void *)w
, 0, 0);
266 atomic_dec(&v6_worker_count
);
267 module_put(THIS_MODULE
);
270 /* atomic notifier; can't call nf_ct_iterate_cleanup_net (it can sleep).
272 * Defer it to the system workqueue.
274 * As we can have 'a lot' of inet_events (depending on amount of ipv6
275 * addresses being deleted), we also need to limit work item queue.
277 static int masq_inet6_event(struct notifier_block
*this,
278 unsigned long event
, void *ptr
)
280 struct inet6_ifaddr
*ifa
= ptr
;
281 const struct net_device
*dev
;
282 struct masq_dev_work
*w
;
285 if (event
!= NETDEV_DOWN
|| atomic_read(&v6_worker_count
) >= 16)
288 dev
= ifa
->idev
->dev
;
289 net
= maybe_get_net(dev_net(dev
));
293 if (!try_module_get(THIS_MODULE
))
296 w
= kmalloc(sizeof(*w
), GFP_ATOMIC
);
298 atomic_inc(&v6_worker_count
);
300 INIT_WORK(&w
->work
, iterate_cleanup_work
);
301 w
->ifindex
= dev
->ifindex
;
304 schedule_work(&w
->work
);
309 module_put(THIS_MODULE
);
315 static struct notifier_block masq_inet6_notifier
= {
316 .notifier_call
= masq_inet6_event
,
319 int nf_nat_masquerade_ipv6_register_notifier(void)
323 mutex_lock(&masq_mutex
);
324 /* check if the notifier is already set */
325 if (++masq_refcnt
> 1)
328 ret
= register_netdevice_notifier(&masq_dev_notifier
);
332 ret
= register_inet6addr_notifier(&masq_inet6_notifier
);
336 mutex_unlock(&masq_mutex
);
340 unregister_netdevice_notifier(&masq_dev_notifier
);
344 mutex_unlock(&masq_mutex
);
347 EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv6_register_notifier
);
349 void nf_nat_masquerade_ipv6_unregister_notifier(void)
351 mutex_lock(&masq_mutex
);
352 /* check if the notifier still has clients */
353 if (--masq_refcnt
> 0)
356 unregister_inet6addr_notifier(&masq_inet6_notifier
);
357 unregister_netdevice_notifier(&masq_dev_notifier
);
359 mutex_unlock(&masq_mutex
);
361 EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv6_unregister_notifier
);