]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blob - net/netfilter/nf_nat_masquerade.c
HID: logitech-dj: fix spelling in printk
[mirror_ubuntu-kernels.git] / net / netfilter / nf_nat_masquerade.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include <linux/types.h>
4 #include <linux/atomic.h>
5 #include <linux/inetdevice.h>
6 #include <linux/netfilter.h>
7 #include <linux/netfilter_ipv4.h>
8 #include <linux/netfilter_ipv6.h>
9
10 #include <net/netfilter/ipv4/nf_nat_masquerade.h>
11 #include <net/netfilter/ipv6/nf_nat_masquerade.h>
12
13 static DEFINE_MUTEX(masq_mutex);
14 static unsigned int masq_refcnt __read_mostly;
15
16 unsigned int
17 nf_nat_masquerade_ipv4(struct sk_buff *skb, unsigned int hooknum,
18 const struct nf_nat_range2 *range,
19 const struct net_device *out)
20 {
21 struct nf_conn *ct;
22 struct nf_conn_nat *nat;
23 enum ip_conntrack_info ctinfo;
24 struct nf_nat_range2 newrange;
25 const struct rtable *rt;
26 __be32 newsrc, nh;
27
28 WARN_ON(hooknum != NF_INET_POST_ROUTING);
29
30 ct = nf_ct_get(skb, &ctinfo);
31
32 WARN_ON(!(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED ||
33 ctinfo == IP_CT_RELATED_REPLY)));
34
35 /* Source address is 0.0.0.0 - locally generated packet that is
36 * probably not supposed to be masqueraded.
37 */
38 if (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip == 0)
39 return NF_ACCEPT;
40
41 rt = skb_rtable(skb);
42 nh = rt_nexthop(rt, ip_hdr(skb)->daddr);
43 newsrc = inet_select_addr(out, nh, RT_SCOPE_UNIVERSE);
44 if (!newsrc) {
45 pr_info("%s ate my IP address\n", out->name);
46 return NF_DROP;
47 }
48
49 nat = nf_ct_nat_ext_add(ct);
50 if (nat)
51 nat->masq_index = out->ifindex;
52
53 /* Transfer from original range. */
54 memset(&newrange.min_addr, 0, sizeof(newrange.min_addr));
55 memset(&newrange.max_addr, 0, sizeof(newrange.max_addr));
56 newrange.flags = range->flags | NF_NAT_RANGE_MAP_IPS;
57 newrange.min_addr.ip = newsrc;
58 newrange.max_addr.ip = newsrc;
59 newrange.min_proto = range->min_proto;
60 newrange.max_proto = range->max_proto;
61
62 /* Hand modified range to generic setup. */
63 return nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_SRC);
64 }
65 EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv4);
66
67 static int device_cmp(struct nf_conn *i, void *ifindex)
68 {
69 const struct nf_conn_nat *nat = nfct_nat(i);
70
71 if (!nat)
72 return 0;
73 return nat->masq_index == (int)(long)ifindex;
74 }
75
76 static int masq_device_event(struct notifier_block *this,
77 unsigned long event,
78 void *ptr)
79 {
80 const struct net_device *dev = netdev_notifier_info_to_dev(ptr);
81 struct net *net = dev_net(dev);
82
83 if (event == NETDEV_DOWN) {
84 /* Device was downed. Search entire table for
85 * conntracks which were associated with that device,
86 * and forget them.
87 */
88
89 nf_ct_iterate_cleanup_net(net, device_cmp,
90 (void *)(long)dev->ifindex, 0, 0);
91 }
92
93 return NOTIFY_DONE;
94 }
95
96 static int inet_cmp(struct nf_conn *ct, void *ptr)
97 {
98 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
99 struct net_device *dev = ifa->ifa_dev->dev;
100 struct nf_conntrack_tuple *tuple;
101
102 if (!device_cmp(ct, (void *)(long)dev->ifindex))
103 return 0;
104
105 tuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple;
106
107 return ifa->ifa_address == tuple->dst.u3.ip;
108 }
109
110 static int masq_inet_event(struct notifier_block *this,
111 unsigned long event,
112 void *ptr)
113 {
114 struct in_device *idev = ((struct in_ifaddr *)ptr)->ifa_dev;
115 struct net *net = dev_net(idev->dev);
116
117 /* The masq_dev_notifier will catch the case of the device going
118 * down. So if the inetdev is dead and being destroyed we have
119 * no work to do. Otherwise this is an individual address removal
120 * and we have to perform the flush.
121 */
122 if (idev->dead)
123 return NOTIFY_DONE;
124
125 if (event == NETDEV_DOWN)
126 nf_ct_iterate_cleanup_net(net, inet_cmp, ptr, 0, 0);
127
128 return NOTIFY_DONE;
129 }
130
131 static struct notifier_block masq_dev_notifier = {
132 .notifier_call = masq_device_event,
133 };
134
135 static struct notifier_block masq_inet_notifier = {
136 .notifier_call = masq_inet_event,
137 };
138
139 int nf_nat_masquerade_ipv4_register_notifier(void)
140 {
141 int ret = 0;
142
143 mutex_lock(&masq_mutex);
144 /* check if the notifier was already set */
145 if (++masq_refcnt > 1)
146 goto out_unlock;
147
148 /* Register for device down reports */
149 ret = register_netdevice_notifier(&masq_dev_notifier);
150 if (ret)
151 goto err_dec;
152 /* Register IP address change reports */
153 ret = register_inetaddr_notifier(&masq_inet_notifier);
154 if (ret)
155 goto err_unregister;
156
157 mutex_unlock(&masq_mutex);
158 return ret;
159
160 err_unregister:
161 unregister_netdevice_notifier(&masq_dev_notifier);
162 err_dec:
163 masq_refcnt--;
164 out_unlock:
165 mutex_unlock(&masq_mutex);
166 return ret;
167 }
168 EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv4_register_notifier);
169
170 void nf_nat_masquerade_ipv4_unregister_notifier(void)
171 {
172 mutex_lock(&masq_mutex);
173 /* check if the notifier still has clients */
174 if (--masq_refcnt > 0)
175 goto out_unlock;
176
177 unregister_netdevice_notifier(&masq_dev_notifier);
178 unregister_inetaddr_notifier(&masq_inet_notifier);
179 out_unlock:
180 mutex_unlock(&masq_mutex);
181 }
182 EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv4_unregister_notifier);
183
184 #if IS_ENABLED(CONFIG_IPV6)
185 static atomic_t v6_worker_count __read_mostly;
186
187 static int
188 nat_ipv6_dev_get_saddr(struct net *net, const struct net_device *dev,
189 const struct in6_addr *daddr, unsigned int srcprefs,
190 struct in6_addr *saddr)
191 {
192 #ifdef CONFIG_IPV6_MODULE
193 const struct nf_ipv6_ops *v6_ops = nf_get_ipv6_ops();
194
195 if (!v6_ops)
196 return -EHOSTUNREACH;
197
198 return v6_ops->dev_get_saddr(net, dev, daddr, srcprefs, saddr);
199 #else
200 return ipv6_dev_get_saddr(net, dev, daddr, srcprefs, saddr);
201 #endif
202 }
203
204 unsigned int
205 nf_nat_masquerade_ipv6(struct sk_buff *skb, const struct nf_nat_range2 *range,
206 const struct net_device *out)
207 {
208 enum ip_conntrack_info ctinfo;
209 struct nf_conn_nat *nat;
210 struct in6_addr src;
211 struct nf_conn *ct;
212 struct nf_nat_range2 newrange;
213
214 ct = nf_ct_get(skb, &ctinfo);
215 WARN_ON(!(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED ||
216 ctinfo == IP_CT_RELATED_REPLY)));
217
218 if (nat_ipv6_dev_get_saddr(nf_ct_net(ct), out,
219 &ipv6_hdr(skb)->daddr, 0, &src) < 0)
220 return NF_DROP;
221
222 nat = nf_ct_nat_ext_add(ct);
223 if (nat)
224 nat->masq_index = out->ifindex;
225
226 newrange.flags = range->flags | NF_NAT_RANGE_MAP_IPS;
227 newrange.min_addr.in6 = src;
228 newrange.max_addr.in6 = src;
229 newrange.min_proto = range->min_proto;
230 newrange.max_proto = range->max_proto;
231
232 return nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_SRC);
233 }
234 EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv6);
235
236 struct masq_dev_work {
237 struct work_struct work;
238 struct net *net;
239 struct in6_addr addr;
240 int ifindex;
241 };
242
243 static int inet6_cmp(struct nf_conn *ct, void *work)
244 {
245 struct masq_dev_work *w = (struct masq_dev_work *)work;
246 struct nf_conntrack_tuple *tuple;
247
248 if (!device_cmp(ct, (void *)(long)w->ifindex))
249 return 0;
250
251 tuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple;
252
253 return ipv6_addr_equal(&w->addr, &tuple->dst.u3.in6);
254 }
255
256 static void iterate_cleanup_work(struct work_struct *work)
257 {
258 struct masq_dev_work *w;
259
260 w = container_of(work, struct masq_dev_work, work);
261
262 nf_ct_iterate_cleanup_net(w->net, inet6_cmp, (void *)w, 0, 0);
263
264 put_net(w->net);
265 kfree(w);
266 atomic_dec(&v6_worker_count);
267 module_put(THIS_MODULE);
268 }
269
270 /* atomic notifier; can't call nf_ct_iterate_cleanup_net (it can sleep).
271 *
272 * Defer it to the system workqueue.
273 *
274 * As we can have 'a lot' of inet_events (depending on amount of ipv6
275 * addresses being deleted), we also need to limit work item queue.
276 */
277 static int masq_inet6_event(struct notifier_block *this,
278 unsigned long event, void *ptr)
279 {
280 struct inet6_ifaddr *ifa = ptr;
281 const struct net_device *dev;
282 struct masq_dev_work *w;
283 struct net *net;
284
285 if (event != NETDEV_DOWN || atomic_read(&v6_worker_count) >= 16)
286 return NOTIFY_DONE;
287
288 dev = ifa->idev->dev;
289 net = maybe_get_net(dev_net(dev));
290 if (!net)
291 return NOTIFY_DONE;
292
293 if (!try_module_get(THIS_MODULE))
294 goto err_module;
295
296 w = kmalloc(sizeof(*w), GFP_ATOMIC);
297 if (w) {
298 atomic_inc(&v6_worker_count);
299
300 INIT_WORK(&w->work, iterate_cleanup_work);
301 w->ifindex = dev->ifindex;
302 w->net = net;
303 w->addr = ifa->addr;
304 schedule_work(&w->work);
305
306 return NOTIFY_DONE;
307 }
308
309 module_put(THIS_MODULE);
310 err_module:
311 put_net(net);
312 return NOTIFY_DONE;
313 }
314
315 static struct notifier_block masq_inet6_notifier = {
316 .notifier_call = masq_inet6_event,
317 };
318
319 int nf_nat_masquerade_ipv6_register_notifier(void)
320 {
321 int ret = 0;
322
323 mutex_lock(&masq_mutex);
324 /* check if the notifier is already set */
325 if (++masq_refcnt > 1)
326 goto out_unlock;
327
328 ret = register_netdevice_notifier(&masq_dev_notifier);
329 if (ret)
330 goto err_dec;
331
332 ret = register_inet6addr_notifier(&masq_inet6_notifier);
333 if (ret)
334 goto err_unregister;
335
336 mutex_unlock(&masq_mutex);
337 return ret;
338
339 err_unregister:
340 unregister_netdevice_notifier(&masq_dev_notifier);
341 err_dec:
342 masq_refcnt--;
343 out_unlock:
344 mutex_unlock(&masq_mutex);
345 return ret;
346 }
347 EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv6_register_notifier);
348
349 void nf_nat_masquerade_ipv6_unregister_notifier(void)
350 {
351 mutex_lock(&masq_mutex);
352 /* check if the notifier still has clients */
353 if (--masq_refcnt > 0)
354 goto out_unlock;
355
356 unregister_inet6addr_notifier(&masq_inet6_notifier);
357 unregister_netdevice_notifier(&masq_dev_notifier);
358 out_unlock:
359 mutex_unlock(&masq_mutex);
360 }
361 EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv6_unregister_notifier);
362 #endif