1 /* netfilter.c: look after the filters for various protocols.
2 * Heavily influenced by the old firewall.c by David Bonn and Alan Cox.
4 * Thanks to Rob `CmdrTaco' Malda for not influencing this code in any
7 * Rusty Russell (C)2000 -- This code is GPL.
8 * Patrick McHardy (c) 2006-2012
10 #include <linux/kernel.h>
11 #include <linux/netfilter.h>
12 #include <net/protocol.h>
13 #include <linux/init.h>
14 #include <linux/skbuff.h>
15 #include <linux/wait.h>
16 #include <linux/module.h>
17 #include <linux/interrupt.h>
19 #include <linux/netdevice.h>
20 #include <linux/netfilter_ipv6.h>
21 #include <linux/inetdevice.h>
22 #include <linux/proc_fs.h>
23 #include <linux/mutex.h>
24 #include <linux/slab.h>
25 #include <net/net_namespace.h>
28 #include "nf_internals.h"
30 static DEFINE_MUTEX(afinfo_mutex
);
32 const struct nf_afinfo __rcu
*nf_afinfo
[NFPROTO_NUMPROTO
] __read_mostly
;
33 EXPORT_SYMBOL(nf_afinfo
);
34 const struct nf_ipv6_ops __rcu
*nf_ipv6_ops __read_mostly
;
35 EXPORT_SYMBOL_GPL(nf_ipv6_ops
);
37 DEFINE_PER_CPU(bool, nf_skb_duplicated
);
38 EXPORT_SYMBOL_GPL(nf_skb_duplicated
);
40 int nf_register_afinfo(const struct nf_afinfo
*afinfo
)
42 mutex_lock(&afinfo_mutex
);
43 RCU_INIT_POINTER(nf_afinfo
[afinfo
->family
], afinfo
);
44 mutex_unlock(&afinfo_mutex
);
47 EXPORT_SYMBOL_GPL(nf_register_afinfo
);
49 void nf_unregister_afinfo(const struct nf_afinfo
*afinfo
)
51 mutex_lock(&afinfo_mutex
);
52 RCU_INIT_POINTER(nf_afinfo
[afinfo
->family
], NULL
);
53 mutex_unlock(&afinfo_mutex
);
56 EXPORT_SYMBOL_GPL(nf_unregister_afinfo
);
58 #ifdef HAVE_JUMP_LABEL
59 struct static_key nf_hooks_needed
[NFPROTO_NUMPROTO
][NF_MAX_HOOKS
];
60 EXPORT_SYMBOL(nf_hooks_needed
);
63 static DEFINE_MUTEX(nf_hook_mutex
);
65 static struct list_head
*find_nf_hook_list(struct net
*net
,
66 const struct nf_hook_ops
*reg
)
68 struct list_head
*nf_hook_list
= NULL
;
70 if (reg
->pf
!= NFPROTO_NETDEV
)
71 nf_hook_list
= &net
->nf
.hooks
[reg
->pf
][reg
->hooknum
];
72 else if (reg
->hooknum
== NF_NETDEV_INGRESS
) {
73 #ifdef CONFIG_NETFILTER_INGRESS
74 if (reg
->dev
&& dev_net(reg
->dev
) == net
)
75 nf_hook_list
= ®
->dev
->nf_hooks_ingress
;
81 int nf_register_net_hook(struct net
*net
, const struct nf_hook_ops
*reg
)
83 struct list_head
*nf_hook_list
;
84 struct nf_hook_ops
*elem
, *new;
86 new = kzalloc(sizeof(*new), GFP_KERNEL
);
90 new->hook
= reg
->hook
;
92 new->owner
= reg
->owner
;
93 new->priv
= reg
->priv
;
95 new->hooknum
= reg
->hooknum
;
96 new->priority
= reg
->priority
;
98 nf_hook_list
= find_nf_hook_list(net
, reg
);
102 mutex_lock(&nf_hook_mutex
);
103 list_for_each_entry(elem
, nf_hook_list
, list
) {
104 if (reg
->priority
< elem
->priority
)
107 list_add_rcu(&new->list
, elem
->list
.prev
);
108 mutex_unlock(&nf_hook_mutex
);
109 #ifdef CONFIG_NETFILTER_INGRESS
110 if (reg
->pf
== NFPROTO_NETDEV
&& reg
->hooknum
== NF_NETDEV_INGRESS
)
111 net_inc_ingress_queue();
113 #ifdef HAVE_JUMP_LABEL
114 static_key_slow_inc(&nf_hooks_needed
[reg
->pf
][reg
->hooknum
]);
118 EXPORT_SYMBOL(nf_register_net_hook
);
120 void nf_unregister_net_hook(struct net
*net
, const struct nf_hook_ops
*reg
)
122 struct list_head
*nf_hook_list
;
123 struct nf_hook_ops
*elem
;
125 nf_hook_list
= find_nf_hook_list(net
, reg
);
129 mutex_lock(&nf_hook_mutex
);
130 list_for_each_entry(elem
, nf_hook_list
, list
) {
131 if ((reg
->hook
== elem
->hook
) &&
132 (reg
->dev
== elem
->dev
) &&
133 (reg
->owner
== elem
->owner
) &&
134 (reg
->priv
== elem
->priv
) &&
135 (reg
->pf
== elem
->pf
) &&
136 (reg
->hooknum
== elem
->hooknum
) &&
137 (reg
->priority
== elem
->priority
)) {
138 list_del_rcu(&elem
->list
);
142 mutex_unlock(&nf_hook_mutex
);
143 if (&elem
->list
== nf_hook_list
) {
144 WARN(1, "nf_unregister_net_hook: hook not found!\n");
147 #ifdef CONFIG_NETFILTER_INGRESS
148 if (reg
->pf
== NFPROTO_NETDEV
&& reg
->hooknum
== NF_NETDEV_INGRESS
)
149 net_dec_ingress_queue();
151 #ifdef HAVE_JUMP_LABEL
152 static_key_slow_dec(&nf_hooks_needed
[reg
->pf
][reg
->hooknum
]);
155 nf_queue_nf_hook_drop(elem
);
158 EXPORT_SYMBOL(nf_unregister_net_hook
);
160 int nf_register_net_hooks(struct net
*net
, const struct nf_hook_ops
*reg
,
166 for (i
= 0; i
< n
; i
++) {
167 err
= nf_register_net_hook(net
, ®
[i
]);
175 nf_unregister_net_hooks(net
, reg
, i
);
178 EXPORT_SYMBOL(nf_register_net_hooks
);
180 void nf_unregister_net_hooks(struct net
*net
, const struct nf_hook_ops
*reg
,
184 nf_unregister_net_hook(net
, ®
[n
]);
186 EXPORT_SYMBOL(nf_unregister_net_hooks
);
188 static LIST_HEAD(nf_hook_list
);
190 int nf_register_hook(struct nf_hook_ops
*reg
)
192 struct net
*net
, *last
;
197 ret
= nf_register_net_hook(net
, reg
);
198 if (ret
&& ret
!= -ENOENT
)
201 list_add_tail(®
->list
, &nf_hook_list
);
210 nf_unregister_net_hook(net
, reg
);
215 EXPORT_SYMBOL(nf_register_hook
);
217 void nf_unregister_hook(struct nf_hook_ops
*reg
)
222 list_del(®
->list
);
224 nf_unregister_net_hook(net
, reg
);
227 EXPORT_SYMBOL(nf_unregister_hook
);
229 int nf_register_hooks(struct nf_hook_ops
*reg
, unsigned int n
)
234 for (i
= 0; i
< n
; i
++) {
235 err
= nf_register_hook(®
[i
]);
243 nf_unregister_hooks(reg
, i
);
246 EXPORT_SYMBOL(nf_register_hooks
);
248 void nf_unregister_hooks(struct nf_hook_ops
*reg
, unsigned int n
)
251 nf_unregister_hook(®
[n
]);
253 EXPORT_SYMBOL(nf_unregister_hooks
);
255 unsigned int nf_iterate(struct list_head
*head
,
257 struct nf_hook_state
*state
,
258 struct nf_hook_ops
**elemp
)
260 unsigned int verdict
;
263 * The caller must not block between calls to this
264 * function because of risk of continuing from deleted element.
266 list_for_each_entry_continue_rcu((*elemp
), head
, list
) {
267 if (state
->thresh
> (*elemp
)->priority
)
270 /* Optimization: we don't need to hold module
271 reference here, since function can't sleep. --RR */
273 verdict
= (*elemp
)->hook(*elemp
, skb
, state
);
274 if (verdict
!= NF_ACCEPT
) {
275 #ifdef CONFIG_NETFILTER_DEBUG
276 if (unlikely((verdict
& NF_VERDICT_MASK
)
278 NFDEBUG("Evil return from %p(%u).\n",
279 (*elemp
)->hook
, state
->hook
);
283 if (verdict
!= NF_REPEAT
)
292 /* Returns 1 if okfn() needs to be executed by the caller,
293 * -EPERM for NF_DROP, 0 otherwise. */
294 int nf_hook_slow(struct sk_buff
*skb
, struct nf_hook_state
*state
)
296 struct nf_hook_ops
*elem
;
297 unsigned int verdict
;
300 /* We may already have this, but read-locks nest anyway */
303 elem
= list_entry_rcu(state
->hook_list
, struct nf_hook_ops
, list
);
305 verdict
= nf_iterate(state
->hook_list
, skb
, state
, &elem
);
306 if (verdict
== NF_ACCEPT
|| verdict
== NF_STOP
) {
308 } else if ((verdict
& NF_VERDICT_MASK
) == NF_DROP
) {
310 ret
= NF_DROP_GETERR(verdict
);
313 } else if ((verdict
& NF_VERDICT_MASK
) == NF_QUEUE
) {
314 int err
= nf_queue(skb
, elem
, state
,
315 verdict
>> NF_VERDICT_QBITS
);
317 if (err
== -ECANCELED
)
320 (verdict
& NF_VERDICT_FLAG_QUEUE_BYPASS
))
328 EXPORT_SYMBOL(nf_hook_slow
);
331 int skb_make_writable(struct sk_buff
*skb
, unsigned int writable_len
)
333 if (writable_len
> skb
->len
)
336 /* Not exclusive use of packet? Must copy. */
337 if (!skb_cloned(skb
)) {
338 if (writable_len
<= skb_headlen(skb
))
340 } else if (skb_clone_writable(skb
, writable_len
))
343 if (writable_len
<= skb_headlen(skb
))
346 writable_len
-= skb_headlen(skb
);
348 return !!__pskb_pull_tail(skb
, writable_len
);
350 EXPORT_SYMBOL(skb_make_writable
);
352 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
353 /* This does not belong here, but locally generated errors need it if connection
354 tracking in use: without this, connection may not be in hash table, and hence
355 manufactured ICMP or RST packets will not be associated with it. */
356 void (*ip_ct_attach
)(struct sk_buff
*, const struct sk_buff
*)
358 EXPORT_SYMBOL(ip_ct_attach
);
360 void nf_ct_attach(struct sk_buff
*new, const struct sk_buff
*skb
)
362 void (*attach
)(struct sk_buff
*, const struct sk_buff
*);
366 attach
= rcu_dereference(ip_ct_attach
);
372 EXPORT_SYMBOL(nf_ct_attach
);
374 void (*nf_ct_destroy
)(struct nf_conntrack
*) __rcu __read_mostly
;
375 EXPORT_SYMBOL(nf_ct_destroy
);
377 void nf_conntrack_destroy(struct nf_conntrack
*nfct
)
379 void (*destroy
)(struct nf_conntrack
*);
382 destroy
= rcu_dereference(nf_ct_destroy
);
383 BUG_ON(destroy
== NULL
);
387 EXPORT_SYMBOL(nf_conntrack_destroy
);
389 struct nfq_ct_hook __rcu
*nfq_ct_hook __read_mostly
;
390 EXPORT_SYMBOL_GPL(nfq_ct_hook
);
392 struct nfq_ct_nat_hook __rcu
*nfq_ct_nat_hook __read_mostly
;
393 EXPORT_SYMBOL_GPL(nfq_ct_nat_hook
);
395 #endif /* CONFIG_NF_CONNTRACK */
397 #ifdef CONFIG_NF_NAT_NEEDED
398 void (*nf_nat_decode_session_hook
)(struct sk_buff
*, struct flowi
*);
399 EXPORT_SYMBOL(nf_nat_decode_session_hook
);
402 static int nf_register_hook_list(struct net
*net
)
404 struct nf_hook_ops
*elem
;
408 list_for_each_entry(elem
, &nf_hook_list
, list
) {
409 ret
= nf_register_net_hook(net
, elem
);
410 if (ret
&& ret
!= -ENOENT
)
417 list_for_each_entry_continue_reverse(elem
, &nf_hook_list
, list
)
418 nf_unregister_net_hook(net
, elem
);
423 static void nf_unregister_hook_list(struct net
*net
)
425 struct nf_hook_ops
*elem
;
428 list_for_each_entry(elem
, &nf_hook_list
, list
)
429 nf_unregister_net_hook(net
, elem
);
433 static int __net_init
netfilter_net_init(struct net
*net
)
437 for (i
= 0; i
< ARRAY_SIZE(net
->nf
.hooks
); i
++) {
438 for (h
= 0; h
< NF_MAX_HOOKS
; h
++)
439 INIT_LIST_HEAD(&net
->nf
.hooks
[i
][h
]);
442 #ifdef CONFIG_PROC_FS
443 net
->nf
.proc_netfilter
= proc_net_mkdir(net
, "netfilter",
445 if (!net
->nf
.proc_netfilter
) {
446 if (!net_eq(net
, &init_net
))
447 pr_err("cannot create netfilter proc entry");
452 ret
= nf_register_hook_list(net
);
454 remove_proc_entry("netfilter", net
->proc_net
);
459 static void __net_exit
netfilter_net_exit(struct net
*net
)
461 nf_unregister_hook_list(net
);
462 remove_proc_entry("netfilter", net
->proc_net
);
465 static struct pernet_operations netfilter_net_ops
= {
466 .init
= netfilter_net_init
,
467 .exit
= netfilter_net_exit
,
470 int __init
netfilter_init(void)
474 ret
= register_pernet_subsys(&netfilter_net_ops
);
478 ret
= netfilter_log_init();
484 unregister_pernet_subsys(&netfilter_net_ops
);