]>
Commit | Line | Data |
---|---|---|
1 | /* netfilter.c: look after the filters for various protocols. | |
2 | * Heavily influenced by the old firewall.c by David Bonn and Alan Cox. | |
3 | * | |
4 | * Thanks to Rob `CmdrTaco' Malda for not influencing this code in any | |
5 | * way. | |
6 | * | |
7 | * This code is GPL. | |
8 | */ | |
9 | #include <linux/kernel.h> | |
10 | #include <linux/netfilter.h> | |
11 | #include <net/protocol.h> | |
12 | #include <linux/init.h> | |
13 | #include <linux/skbuff.h> | |
14 | #include <linux/wait.h> | |
15 | #include <linux/module.h> | |
16 | #include <linux/interrupt.h> | |
17 | #include <linux/if.h> | |
18 | #include <linux/netdevice.h> | |
19 | #include <linux/netfilter_ipv6.h> | |
20 | #include <linux/inetdevice.h> | |
21 | #include <linux/proc_fs.h> | |
22 | #include <linux/mutex.h> | |
23 | #include <linux/mm.h> | |
24 | #include <linux/rcupdate.h> | |
25 | #include <net/net_namespace.h> | |
26 | #include <net/netfilter/nf_queue.h> | |
27 | #include <net/sock.h> | |
28 | ||
29 | #include "nf_internals.h" | |
30 | ||
31 | const struct nf_ipv6_ops __rcu *nf_ipv6_ops __read_mostly; | |
32 | EXPORT_SYMBOL_GPL(nf_ipv6_ops); | |
33 | ||
34 | DEFINE_PER_CPU(bool, nf_skb_duplicated); | |
35 | EXPORT_SYMBOL_GPL(nf_skb_duplicated); | |
36 | ||
37 | #ifdef CONFIG_JUMP_LABEL | |
38 | struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS]; | |
39 | EXPORT_SYMBOL(nf_hooks_needed); | |
40 | #endif | |
41 | ||
42 | static DEFINE_MUTEX(nf_hook_mutex); | |
43 | ||
44 | /* max hooks per family/hooknum */ | |
45 | #define MAX_HOOK_COUNT 1024 | |
46 | ||
47 | #define nf_entry_dereference(e) \ | |
48 | rcu_dereference_protected(e, lockdep_is_held(&nf_hook_mutex)) | |
49 | ||
50 | static struct nf_hook_entries *allocate_hook_entries_size(u16 num) | |
51 | { | |
52 | struct nf_hook_entries *e; | |
53 | size_t alloc = sizeof(*e) + | |
54 | sizeof(struct nf_hook_entry) * num + | |
55 | sizeof(struct nf_hook_ops *) * num + | |
56 | sizeof(struct nf_hook_entries_rcu_head); | |
57 | ||
58 | if (num == 0) | |
59 | return NULL; | |
60 | ||
61 | e = kvzalloc(alloc, GFP_KERNEL); | |
62 | if (e) | |
63 | e->num_hook_entries = num; | |
64 | return e; | |
65 | } | |
66 | ||
67 | static void __nf_hook_entries_free(struct rcu_head *h) | |
68 | { | |
69 | struct nf_hook_entries_rcu_head *head; | |
70 | ||
71 | head = container_of(h, struct nf_hook_entries_rcu_head, head); | |
72 | kvfree(head->allocation); | |
73 | } | |
74 | ||
75 | static void nf_hook_entries_free(struct nf_hook_entries *e) | |
76 | { | |
77 | struct nf_hook_entries_rcu_head *head; | |
78 | struct nf_hook_ops **ops; | |
79 | unsigned int num; | |
80 | ||
81 | if (!e) | |
82 | return; | |
83 | ||
84 | num = e->num_hook_entries; | |
85 | ops = nf_hook_entries_get_hook_ops(e); | |
86 | head = (void *)&ops[num]; | |
87 | head->allocation = e; | |
88 | call_rcu(&head->head, __nf_hook_entries_free); | |
89 | } | |
90 | ||
91 | static unsigned int accept_all(void *priv, | |
92 | struct sk_buff *skb, | |
93 | const struct nf_hook_state *state) | |
94 | { | |
95 | return NF_ACCEPT; /* ACCEPT makes nf_hook_slow call next hook */ | |
96 | } | |
97 | ||
98 | static const struct nf_hook_ops dummy_ops = { | |
99 | .hook = accept_all, | |
100 | .priority = INT_MIN, | |
101 | }; | |
102 | ||
103 | static struct nf_hook_entries * | |
104 | nf_hook_entries_grow(const struct nf_hook_entries *old, | |
105 | const struct nf_hook_ops *reg) | |
106 | { | |
107 | unsigned int i, alloc_entries, nhooks, old_entries; | |
108 | struct nf_hook_ops **orig_ops = NULL; | |
109 | struct nf_hook_ops **new_ops; | |
110 | struct nf_hook_entries *new; | |
111 | bool inserted = false; | |
112 | ||
113 | alloc_entries = 1; | |
114 | old_entries = old ? old->num_hook_entries : 0; | |
115 | ||
116 | if (old) { | |
117 | orig_ops = nf_hook_entries_get_hook_ops(old); | |
118 | ||
119 | for (i = 0; i < old_entries; i++) { | |
120 | if (orig_ops[i] != &dummy_ops) | |
121 | alloc_entries++; | |
122 | } | |
123 | } | |
124 | ||
125 | if (alloc_entries > MAX_HOOK_COUNT) | |
126 | return ERR_PTR(-E2BIG); | |
127 | ||
128 | new = allocate_hook_entries_size(alloc_entries); | |
129 | if (!new) | |
130 | return ERR_PTR(-ENOMEM); | |
131 | ||
132 | new_ops = nf_hook_entries_get_hook_ops(new); | |
133 | ||
134 | i = 0; | |
135 | nhooks = 0; | |
136 | while (i < old_entries) { | |
137 | if (orig_ops[i] == &dummy_ops) { | |
138 | ++i; | |
139 | continue; | |
140 | } | |
141 | ||
142 | if (inserted || reg->priority > orig_ops[i]->priority) { | |
143 | new_ops[nhooks] = (void *)orig_ops[i]; | |
144 | new->hooks[nhooks] = old->hooks[i]; | |
145 | i++; | |
146 | } else { | |
147 | new_ops[nhooks] = (void *)reg; | |
148 | new->hooks[nhooks].hook = reg->hook; | |
149 | new->hooks[nhooks].priv = reg->priv; | |
150 | inserted = true; | |
151 | } | |
152 | nhooks++; | |
153 | } | |
154 | ||
155 | if (!inserted) { | |
156 | new_ops[nhooks] = (void *)reg; | |
157 | new->hooks[nhooks].hook = reg->hook; | |
158 | new->hooks[nhooks].priv = reg->priv; | |
159 | } | |
160 | ||
161 | return new; | |
162 | } | |
163 | ||
164 | static void hooks_validate(const struct nf_hook_entries *hooks) | |
165 | { | |
166 | #ifdef CONFIG_DEBUG_MISC | |
167 | struct nf_hook_ops **orig_ops; | |
168 | int prio = INT_MIN; | |
169 | size_t i = 0; | |
170 | ||
171 | orig_ops = nf_hook_entries_get_hook_ops(hooks); | |
172 | ||
173 | for (i = 0; i < hooks->num_hook_entries; i++) { | |
174 | if (orig_ops[i] == &dummy_ops) | |
175 | continue; | |
176 | ||
177 | WARN_ON(orig_ops[i]->priority < prio); | |
178 | ||
179 | if (orig_ops[i]->priority > prio) | |
180 | prio = orig_ops[i]->priority; | |
181 | } | |
182 | #endif | |
183 | } | |
184 | ||
185 | int nf_hook_entries_insert_raw(struct nf_hook_entries __rcu **pp, | |
186 | const struct nf_hook_ops *reg) | |
187 | { | |
188 | struct nf_hook_entries *new_hooks; | |
189 | struct nf_hook_entries *p; | |
190 | ||
191 | p = rcu_dereference_raw(*pp); | |
192 | new_hooks = nf_hook_entries_grow(p, reg); | |
193 | if (IS_ERR(new_hooks)) | |
194 | return PTR_ERR(new_hooks); | |
195 | ||
196 | hooks_validate(new_hooks); | |
197 | ||
198 | rcu_assign_pointer(*pp, new_hooks); | |
199 | ||
200 | BUG_ON(p == new_hooks); | |
201 | nf_hook_entries_free(p); | |
202 | return 0; | |
203 | } | |
204 | EXPORT_SYMBOL_GPL(nf_hook_entries_insert_raw); | |
205 | ||
206 | /* | |
207 | * __nf_hook_entries_try_shrink - try to shrink hook array | |
208 | * | |
209 | * @old -- current hook blob at @pp | |
210 | * @pp -- location of hook blob | |
211 | * | |
212 | * Hook unregistration must always succeed, so to-be-removed hooks | |
213 | * are replaced by a dummy one that will just move to next hook. | |
214 | * | |
215 | * This counts the current dummy hooks, attempts to allocate new blob, | |
216 | * copies the live hooks, then replaces and discards old one. | |
217 | * | |
218 | * return values: | |
219 | * | |
220 | * Returns address to free, or NULL. | |
221 | */ | |
222 | static void *__nf_hook_entries_try_shrink(struct nf_hook_entries *old, | |
223 | struct nf_hook_entries __rcu **pp) | |
224 | { | |
225 | unsigned int i, j, skip = 0, hook_entries; | |
226 | struct nf_hook_entries *new = NULL; | |
227 | struct nf_hook_ops **orig_ops; | |
228 | struct nf_hook_ops **new_ops; | |
229 | ||
230 | if (WARN_ON_ONCE(!old)) | |
231 | return NULL; | |
232 | ||
233 | orig_ops = nf_hook_entries_get_hook_ops(old); | |
234 | for (i = 0; i < old->num_hook_entries; i++) { | |
235 | if (orig_ops[i] == &dummy_ops) | |
236 | skip++; | |
237 | } | |
238 | ||
239 | /* if skip == hook_entries all hooks have been removed */ | |
240 | hook_entries = old->num_hook_entries; | |
241 | if (skip == hook_entries) | |
242 | goto out_assign; | |
243 | ||
244 | if (skip == 0) | |
245 | return NULL; | |
246 | ||
247 | hook_entries -= skip; | |
248 | new = allocate_hook_entries_size(hook_entries); | |
249 | if (!new) | |
250 | return NULL; | |
251 | ||
252 | new_ops = nf_hook_entries_get_hook_ops(new); | |
253 | for (i = 0, j = 0; i < old->num_hook_entries; i++) { | |
254 | if (orig_ops[i] == &dummy_ops) | |
255 | continue; | |
256 | new->hooks[j] = old->hooks[i]; | |
257 | new_ops[j] = (void *)orig_ops[i]; | |
258 | j++; | |
259 | } | |
260 | hooks_validate(new); | |
261 | out_assign: | |
262 | rcu_assign_pointer(*pp, new); | |
263 | return old; | |
264 | } | |
265 | ||
266 | static struct nf_hook_entries __rcu ** | |
267 | nf_hook_entry_head(struct net *net, int pf, unsigned int hooknum, | |
268 | struct net_device *dev) | |
269 | { | |
270 | switch (pf) { | |
271 | case NFPROTO_NETDEV: | |
272 | break; | |
273 | #ifdef CONFIG_NETFILTER_FAMILY_ARP | |
274 | case NFPROTO_ARP: | |
275 | if (WARN_ON_ONCE(ARRAY_SIZE(net->nf.hooks_arp) <= hooknum)) | |
276 | return NULL; | |
277 | return net->nf.hooks_arp + hooknum; | |
278 | #endif | |
279 | #ifdef CONFIG_NETFILTER_FAMILY_BRIDGE | |
280 | case NFPROTO_BRIDGE: | |
281 | if (WARN_ON_ONCE(ARRAY_SIZE(net->nf.hooks_bridge) <= hooknum)) | |
282 | return NULL; | |
283 | return net->nf.hooks_bridge + hooknum; | |
284 | #endif | |
285 | #ifdef CONFIG_NETFILTER_INGRESS | |
286 | case NFPROTO_INET: | |
287 | if (WARN_ON_ONCE(hooknum != NF_INET_INGRESS)) | |
288 | return NULL; | |
289 | if (!dev || dev_net(dev) != net) { | |
290 | WARN_ON_ONCE(1); | |
291 | return NULL; | |
292 | } | |
293 | return &dev->nf_hooks_ingress; | |
294 | #endif | |
295 | case NFPROTO_IPV4: | |
296 | if (WARN_ON_ONCE(ARRAY_SIZE(net->nf.hooks_ipv4) <= hooknum)) | |
297 | return NULL; | |
298 | return net->nf.hooks_ipv4 + hooknum; | |
299 | case NFPROTO_IPV6: | |
300 | if (WARN_ON_ONCE(ARRAY_SIZE(net->nf.hooks_ipv6) <= hooknum)) | |
301 | return NULL; | |
302 | return net->nf.hooks_ipv6 + hooknum; | |
303 | #if IS_ENABLED(CONFIG_DECNET) | |
304 | case NFPROTO_DECNET: | |
305 | if (WARN_ON_ONCE(ARRAY_SIZE(net->nf.hooks_decnet) <= hooknum)) | |
306 | return NULL; | |
307 | return net->nf.hooks_decnet + hooknum; | |
308 | #endif | |
309 | default: | |
310 | WARN_ON_ONCE(1); | |
311 | return NULL; | |
312 | } | |
313 | ||
314 | #ifdef CONFIG_NETFILTER_INGRESS | |
315 | if (hooknum == NF_NETDEV_INGRESS) { | |
316 | if (dev && dev_net(dev) == net) | |
317 | return &dev->nf_hooks_ingress; | |
318 | } | |
319 | #endif | |
320 | WARN_ON_ONCE(1); | |
321 | return NULL; | |
322 | } | |
323 | ||
324 | static int nf_ingress_check(struct net *net, const struct nf_hook_ops *reg, | |
325 | int hooknum) | |
326 | { | |
327 | #ifndef CONFIG_NETFILTER_INGRESS | |
328 | if (reg->hooknum == hooknum) | |
329 | return -EOPNOTSUPP; | |
330 | #endif | |
331 | if (reg->hooknum != hooknum || | |
332 | !reg->dev || dev_net(reg->dev) != net) | |
333 | return -EINVAL; | |
334 | ||
335 | return 0; | |
336 | } | |
337 | ||
338 | static inline bool nf_ingress_hook(const struct nf_hook_ops *reg, int pf) | |
339 | { | |
340 | if ((pf == NFPROTO_NETDEV && reg->hooknum == NF_NETDEV_INGRESS) || | |
341 | (pf == NFPROTO_INET && reg->hooknum == NF_INET_INGRESS)) | |
342 | return true; | |
343 | ||
344 | return false; | |
345 | } | |
346 | ||
347 | static void nf_static_key_inc(const struct nf_hook_ops *reg, int pf) | |
348 | { | |
349 | #ifdef CONFIG_JUMP_LABEL | |
350 | int hooknum; | |
351 | ||
352 | if (pf == NFPROTO_INET && reg->hooknum == NF_INET_INGRESS) { | |
353 | pf = NFPROTO_NETDEV; | |
354 | hooknum = NF_NETDEV_INGRESS; | |
355 | } else { | |
356 | hooknum = reg->hooknum; | |
357 | } | |
358 | static_key_slow_inc(&nf_hooks_needed[pf][hooknum]); | |
359 | #endif | |
360 | } | |
361 | ||
362 | static void nf_static_key_dec(const struct nf_hook_ops *reg, int pf) | |
363 | { | |
364 | #ifdef CONFIG_JUMP_LABEL | |
365 | int hooknum; | |
366 | ||
367 | if (pf == NFPROTO_INET && reg->hooknum == NF_INET_INGRESS) { | |
368 | pf = NFPROTO_NETDEV; | |
369 | hooknum = NF_NETDEV_INGRESS; | |
370 | } else { | |
371 | hooknum = reg->hooknum; | |
372 | } | |
373 | static_key_slow_dec(&nf_hooks_needed[pf][hooknum]); | |
374 | #endif | |
375 | } | |
376 | ||
377 | static int __nf_register_net_hook(struct net *net, int pf, | |
378 | const struct nf_hook_ops *reg) | |
379 | { | |
380 | struct nf_hook_entries *p, *new_hooks; | |
381 | struct nf_hook_entries __rcu **pp; | |
382 | int err; | |
383 | ||
384 | switch (pf) { | |
385 | case NFPROTO_NETDEV: | |
386 | err = nf_ingress_check(net, reg, NF_NETDEV_INGRESS); | |
387 | if (err < 0) | |
388 | return err; | |
389 | break; | |
390 | case NFPROTO_INET: | |
391 | if (reg->hooknum != NF_INET_INGRESS) | |
392 | break; | |
393 | ||
394 | err = nf_ingress_check(net, reg, NF_INET_INGRESS); | |
395 | if (err < 0) | |
396 | return err; | |
397 | break; | |
398 | } | |
399 | ||
400 | pp = nf_hook_entry_head(net, pf, reg->hooknum, reg->dev); | |
401 | if (!pp) | |
402 | return -EINVAL; | |
403 | ||
404 | mutex_lock(&nf_hook_mutex); | |
405 | ||
406 | p = nf_entry_dereference(*pp); | |
407 | new_hooks = nf_hook_entries_grow(p, reg); | |
408 | ||
409 | if (!IS_ERR(new_hooks)) | |
410 | rcu_assign_pointer(*pp, new_hooks); | |
411 | ||
412 | mutex_unlock(&nf_hook_mutex); | |
413 | if (IS_ERR(new_hooks)) | |
414 | return PTR_ERR(new_hooks); | |
415 | ||
416 | hooks_validate(new_hooks); | |
417 | #ifdef CONFIG_NETFILTER_INGRESS | |
418 | if (nf_ingress_hook(reg, pf)) | |
419 | net_inc_ingress_queue(); | |
420 | #endif | |
421 | nf_static_key_inc(reg, pf); | |
422 | ||
423 | BUG_ON(p == new_hooks); | |
424 | nf_hook_entries_free(p); | |
425 | return 0; | |
426 | } | |
427 | ||
428 | /* | |
429 | * nf_remove_net_hook - remove a hook from blob | |
430 | * | |
431 | * @oldp: current address of hook blob | |
432 | * @unreg: hook to unregister | |
433 | * | |
434 | * This cannot fail, hook unregistration must always succeed. | |
435 | * Therefore replace the to-be-removed hook with a dummy hook. | |
436 | */ | |
437 | static bool nf_remove_net_hook(struct nf_hook_entries *old, | |
438 | const struct nf_hook_ops *unreg) | |
439 | { | |
440 | struct nf_hook_ops **orig_ops; | |
441 | unsigned int i; | |
442 | ||
443 | orig_ops = nf_hook_entries_get_hook_ops(old); | |
444 | for (i = 0; i < old->num_hook_entries; i++) { | |
445 | if (orig_ops[i] != unreg) | |
446 | continue; | |
447 | WRITE_ONCE(old->hooks[i].hook, accept_all); | |
448 | WRITE_ONCE(orig_ops[i], (void *)&dummy_ops); | |
449 | return true; | |
450 | } | |
451 | ||
452 | return false; | |
453 | } | |
454 | ||
455 | static void __nf_unregister_net_hook(struct net *net, int pf, | |
456 | const struct nf_hook_ops *reg) | |
457 | { | |
458 | struct nf_hook_entries __rcu **pp; | |
459 | struct nf_hook_entries *p; | |
460 | ||
461 | pp = nf_hook_entry_head(net, pf, reg->hooknum, reg->dev); | |
462 | if (!pp) | |
463 | return; | |
464 | ||
465 | mutex_lock(&nf_hook_mutex); | |
466 | ||
467 | p = nf_entry_dereference(*pp); | |
468 | if (WARN_ON_ONCE(!p)) { | |
469 | mutex_unlock(&nf_hook_mutex); | |
470 | return; | |
471 | } | |
472 | ||
473 | if (nf_remove_net_hook(p, reg)) { | |
474 | #ifdef CONFIG_NETFILTER_INGRESS | |
475 | if (nf_ingress_hook(reg, pf)) | |
476 | net_dec_ingress_queue(); | |
477 | #endif | |
478 | nf_static_key_dec(reg, pf); | |
479 | } else { | |
480 | WARN_ONCE(1, "hook not found, pf %d num %d", pf, reg->hooknum); | |
481 | } | |
482 | ||
483 | p = __nf_hook_entries_try_shrink(p, pp); | |
484 | mutex_unlock(&nf_hook_mutex); | |
485 | if (!p) | |
486 | return; | |
487 | ||
488 | nf_queue_nf_hook_drop(net); | |
489 | nf_hook_entries_free(p); | |
490 | } | |
491 | ||
492 | void nf_unregister_net_hook(struct net *net, const struct nf_hook_ops *reg) | |
493 | { | |
494 | if (reg->pf == NFPROTO_INET) { | |
495 | if (reg->hooknum == NF_INET_INGRESS) { | |
496 | __nf_unregister_net_hook(net, NFPROTO_INET, reg); | |
497 | } else { | |
498 | __nf_unregister_net_hook(net, NFPROTO_IPV4, reg); | |
499 | __nf_unregister_net_hook(net, NFPROTO_IPV6, reg); | |
500 | } | |
501 | } else { | |
502 | __nf_unregister_net_hook(net, reg->pf, reg); | |
503 | } | |
504 | } | |
505 | EXPORT_SYMBOL(nf_unregister_net_hook); | |
506 | ||
507 | void nf_hook_entries_delete_raw(struct nf_hook_entries __rcu **pp, | |
508 | const struct nf_hook_ops *reg) | |
509 | { | |
510 | struct nf_hook_entries *p; | |
511 | ||
512 | p = rcu_dereference_raw(*pp); | |
513 | if (nf_remove_net_hook(p, reg)) { | |
514 | p = __nf_hook_entries_try_shrink(p, pp); | |
515 | nf_hook_entries_free(p); | |
516 | } | |
517 | } | |
518 | EXPORT_SYMBOL_GPL(nf_hook_entries_delete_raw); | |
519 | ||
520 | int nf_register_net_hook(struct net *net, const struct nf_hook_ops *reg) | |
521 | { | |
522 | int err; | |
523 | ||
524 | if (reg->pf == NFPROTO_INET) { | |
525 | if (reg->hooknum == NF_INET_INGRESS) { | |
526 | err = __nf_register_net_hook(net, NFPROTO_INET, reg); | |
527 | if (err < 0) | |
528 | return err; | |
529 | } else { | |
530 | err = __nf_register_net_hook(net, NFPROTO_IPV4, reg); | |
531 | if (err < 0) | |
532 | return err; | |
533 | ||
534 | err = __nf_register_net_hook(net, NFPROTO_IPV6, reg); | |
535 | if (err < 0) { | |
536 | __nf_unregister_net_hook(net, NFPROTO_IPV4, reg); | |
537 | return err; | |
538 | } | |
539 | } | |
540 | } else { | |
541 | err = __nf_register_net_hook(net, reg->pf, reg); | |
542 | if (err < 0) | |
543 | return err; | |
544 | } | |
545 | ||
546 | return 0; | |
547 | } | |
548 | EXPORT_SYMBOL(nf_register_net_hook); | |
549 | ||
550 | int nf_register_net_hooks(struct net *net, const struct nf_hook_ops *reg, | |
551 | unsigned int n) | |
552 | { | |
553 | unsigned int i; | |
554 | int err = 0; | |
555 | ||
556 | for (i = 0; i < n; i++) { | |
557 | err = nf_register_net_hook(net, ®[i]); | |
558 | if (err) | |
559 | goto err; | |
560 | } | |
561 | return err; | |
562 | ||
563 | err: | |
564 | if (i > 0) | |
565 | nf_unregister_net_hooks(net, reg, i); | |
566 | return err; | |
567 | } | |
568 | EXPORT_SYMBOL(nf_register_net_hooks); | |
569 | ||
570 | void nf_unregister_net_hooks(struct net *net, const struct nf_hook_ops *reg, | |
571 | unsigned int hookcount) | |
572 | { | |
573 | unsigned int i; | |
574 | ||
575 | for (i = 0; i < hookcount; i++) | |
576 | nf_unregister_net_hook(net, ®[i]); | |
577 | } | |
578 | EXPORT_SYMBOL(nf_unregister_net_hooks); | |
579 | ||
580 | /* Returns 1 if okfn() needs to be executed by the caller, | |
581 | * -EPERM for NF_DROP, 0 otherwise. Caller must hold rcu_read_lock. */ | |
582 | int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state, | |
583 | const struct nf_hook_entries *e, unsigned int s) | |
584 | { | |
585 | unsigned int verdict; | |
586 | int ret; | |
587 | ||
588 | for (; s < e->num_hook_entries; s++) { | |
589 | verdict = nf_hook_entry_hookfn(&e->hooks[s], skb, state); | |
590 | switch (verdict & NF_VERDICT_MASK) { | |
591 | case NF_ACCEPT: | |
592 | break; | |
593 | case NF_DROP: | |
594 | kfree_skb(skb); | |
595 | ret = NF_DROP_GETERR(verdict); | |
596 | if (ret == 0) | |
597 | ret = -EPERM; | |
598 | return ret; | |
599 | case NF_QUEUE: | |
600 | ret = nf_queue(skb, state, s, verdict); | |
601 | if (ret == 1) | |
602 | continue; | |
603 | return ret; | |
604 | default: | |
605 | /* Implicit handling for NF_STOLEN, as well as any other | |
606 | * non conventional verdicts. | |
607 | */ | |
608 | return 0; | |
609 | } | |
610 | } | |
611 | ||
612 | return 1; | |
613 | } | |
614 | EXPORT_SYMBOL(nf_hook_slow); | |
615 | ||
616 | void nf_hook_slow_list(struct list_head *head, struct nf_hook_state *state, | |
617 | const struct nf_hook_entries *e) | |
618 | { | |
619 | struct sk_buff *skb, *next; | |
620 | struct list_head sublist; | |
621 | int ret; | |
622 | ||
623 | INIT_LIST_HEAD(&sublist); | |
624 | ||
625 | list_for_each_entry_safe(skb, next, head, list) { | |
626 | skb_list_del_init(skb); | |
627 | ret = nf_hook_slow(skb, state, e, 0); | |
628 | if (ret == 1) | |
629 | list_add_tail(&skb->list, &sublist); | |
630 | } | |
631 | /* Put passed packets back on main list */ | |
632 | list_splice(&sublist, head); | |
633 | } | |
634 | EXPORT_SYMBOL(nf_hook_slow_list); | |
635 | ||
636 | /* This needs to be compiled in any case to avoid dependencies between the | |
637 | * nfnetlink_queue code and nf_conntrack. | |
638 | */ | |
639 | struct nfnl_ct_hook __rcu *nfnl_ct_hook __read_mostly; | |
640 | EXPORT_SYMBOL_GPL(nfnl_ct_hook); | |
641 | ||
642 | struct nf_ct_hook __rcu *nf_ct_hook __read_mostly; | |
643 | EXPORT_SYMBOL_GPL(nf_ct_hook); | |
644 | ||
645 | #if IS_ENABLED(CONFIG_NF_CONNTRACK) | |
646 | /* This does not belong here, but locally generated errors need it if connection | |
647 | tracking in use: without this, connection may not be in hash table, and hence | |
648 | manufactured ICMP or RST packets will not be associated with it. */ | |
649 | void (*ip_ct_attach)(struct sk_buff *, const struct sk_buff *) | |
650 | __rcu __read_mostly; | |
651 | EXPORT_SYMBOL(ip_ct_attach); | |
652 | ||
653 | struct nf_nat_hook __rcu *nf_nat_hook __read_mostly; | |
654 | EXPORT_SYMBOL_GPL(nf_nat_hook); | |
655 | ||
656 | void nf_ct_attach(struct sk_buff *new, const struct sk_buff *skb) | |
657 | { | |
658 | void (*attach)(struct sk_buff *, const struct sk_buff *); | |
659 | ||
660 | if (skb->_nfct) { | |
661 | rcu_read_lock(); | |
662 | attach = rcu_dereference(ip_ct_attach); | |
663 | if (attach) | |
664 | attach(new, skb); | |
665 | rcu_read_unlock(); | |
666 | } | |
667 | } | |
668 | EXPORT_SYMBOL(nf_ct_attach); | |
669 | ||
670 | void nf_conntrack_destroy(struct nf_conntrack *nfct) | |
671 | { | |
672 | struct nf_ct_hook *ct_hook; | |
673 | ||
674 | rcu_read_lock(); | |
675 | ct_hook = rcu_dereference(nf_ct_hook); | |
676 | BUG_ON(ct_hook == NULL); | |
677 | ct_hook->destroy(nfct); | |
678 | rcu_read_unlock(); | |
679 | } | |
680 | EXPORT_SYMBOL(nf_conntrack_destroy); | |
681 | ||
682 | bool nf_ct_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple, | |
683 | const struct sk_buff *skb) | |
684 | { | |
685 | struct nf_ct_hook *ct_hook; | |
686 | bool ret = false; | |
687 | ||
688 | rcu_read_lock(); | |
689 | ct_hook = rcu_dereference(nf_ct_hook); | |
690 | if (ct_hook) | |
691 | ret = ct_hook->get_tuple_skb(dst_tuple, skb); | |
692 | rcu_read_unlock(); | |
693 | return ret; | |
694 | } | |
695 | EXPORT_SYMBOL(nf_ct_get_tuple_skb); | |
696 | ||
697 | /* Built-in default zone used e.g. by modules. */ | |
698 | const struct nf_conntrack_zone nf_ct_zone_dflt = { | |
699 | .id = NF_CT_DEFAULT_ZONE_ID, | |
700 | .dir = NF_CT_DEFAULT_ZONE_DIR, | |
701 | }; | |
702 | EXPORT_SYMBOL_GPL(nf_ct_zone_dflt); | |
703 | #endif /* CONFIG_NF_CONNTRACK */ | |
704 | ||
705 | static void __net_init | |
706 | __netfilter_net_init(struct nf_hook_entries __rcu **e, int max) | |
707 | { | |
708 | int h; | |
709 | ||
710 | for (h = 0; h < max; h++) | |
711 | RCU_INIT_POINTER(e[h], NULL); | |
712 | } | |
713 | ||
714 | static int __net_init netfilter_net_init(struct net *net) | |
715 | { | |
716 | __netfilter_net_init(net->nf.hooks_ipv4, ARRAY_SIZE(net->nf.hooks_ipv4)); | |
717 | __netfilter_net_init(net->nf.hooks_ipv6, ARRAY_SIZE(net->nf.hooks_ipv6)); | |
718 | #ifdef CONFIG_NETFILTER_FAMILY_ARP | |
719 | __netfilter_net_init(net->nf.hooks_arp, ARRAY_SIZE(net->nf.hooks_arp)); | |
720 | #endif | |
721 | #ifdef CONFIG_NETFILTER_FAMILY_BRIDGE | |
722 | __netfilter_net_init(net->nf.hooks_bridge, ARRAY_SIZE(net->nf.hooks_bridge)); | |
723 | #endif | |
724 | #if IS_ENABLED(CONFIG_DECNET) | |
725 | __netfilter_net_init(net->nf.hooks_decnet, ARRAY_SIZE(net->nf.hooks_decnet)); | |
726 | #endif | |
727 | ||
728 | #ifdef CONFIG_PROC_FS | |
729 | net->nf.proc_netfilter = proc_net_mkdir(net, "netfilter", | |
730 | net->proc_net); | |
731 | if (!net->nf.proc_netfilter) { | |
732 | if (!net_eq(net, &init_net)) | |
733 | pr_err("cannot create netfilter proc entry"); | |
734 | ||
735 | return -ENOMEM; | |
736 | } | |
737 | #endif | |
738 | ||
739 | return 0; | |
740 | } | |
741 | ||
742 | static void __net_exit netfilter_net_exit(struct net *net) | |
743 | { | |
744 | remove_proc_entry("netfilter", net->proc_net); | |
745 | } | |
746 | ||
747 | static struct pernet_operations netfilter_net_ops = { | |
748 | .init = netfilter_net_init, | |
749 | .exit = netfilter_net_exit, | |
750 | }; | |
751 | ||
752 | int __init netfilter_init(void) | |
753 | { | |
754 | int ret; | |
755 | ||
756 | ret = register_pernet_subsys(&netfilter_net_ops); | |
757 | if (ret < 0) | |
758 | goto err; | |
759 | ||
760 | ret = netfilter_log_init(); | |
761 | if (ret < 0) | |
762 | goto err_pernet; | |
763 | ||
764 | return 0; | |
765 | err_pernet: | |
766 | unregister_pernet_subsys(&netfilter_net_ops); | |
767 | err: | |
768 | return ret; | |
769 | } |