1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Event cache for netfilter. */
5 * (C) 2005 Harald Welte <laforge@gnumonks.org>
6 * (C) 2005 Patrick McHardy <kaber@trash.net>
7 * (C) 2005-2006 Netfilter Core Team <coreteam@netfilter.org>
8 * (C) 2005 USAGI/WIDE Project <http://www.linux-ipv6.org>
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 #include <linux/types.h>
14 #include <linux/netfilter.h>
15 #include <linux/skbuff.h>
16 #include <linux/vmalloc.h>
17 #include <linux/stddef.h>
18 #include <linux/err.h>
19 #include <linux/percpu.h>
20 #include <linux/kernel.h>
21 #include <linux/netdevice.h>
22 #include <linux/slab.h>
23 #include <linux/export.h>
25 #include <net/netfilter/nf_conntrack.h>
26 #include <net/netfilter/nf_conntrack_core.h>
27 #include <net/netfilter/nf_conntrack_ecache.h>
28 #include <net/netfilter/nf_conntrack_extend.h>
30 static DEFINE_MUTEX(nf_ct_ecache_mutex
);
32 #define ECACHE_RETRY_WAIT (HZ/10)
33 #define ECACHE_STACK_ALLOC (256 / sizeof(void *))
41 static enum retry_state
ecache_work_evict_list(struct ct_pcpu
*pcpu
)
43 struct nf_conn
*refs
[ECACHE_STACK_ALLOC
];
44 enum retry_state ret
= STATE_DONE
;
45 struct nf_conntrack_tuple_hash
*h
;
46 struct hlist_nulls_node
*n
;
47 unsigned int evicted
= 0;
49 spin_lock(&pcpu
->lock
);
51 hlist_nulls_for_each_entry(h
, n
, &pcpu
->dying
, hnnode
) {
52 struct nf_conn
*ct
= nf_ct_tuplehash_to_ctrack(h
);
53 struct nf_conntrack_ecache
*e
;
55 if (!nf_ct_is_confirmed(ct
))
58 /* This ecache access is safe because the ct is on the
59 * pcpu dying list and we hold the spinlock -- the entry
60 * cannot be free'd until after the lock is released.
62 * This is true even if ct has a refcount of 0: the
63 * cpu that is about to free the entry must remove it
64 * from the dying list and needs the lock to do so.
66 e
= nf_ct_ecache_find(ct
);
67 if (!e
|| e
->state
!= NFCT_ECACHE_DESTROY_FAIL
)
70 /* ct is in NFCT_ECACHE_DESTROY_FAIL state, this means
71 * the worker owns this entry: the ct will remain valid
72 * until the worker puts its ct reference.
74 if (nf_conntrack_event(IPCT_DESTROY
, ct
)) {
75 ret
= STATE_CONGESTED
;
79 e
->state
= NFCT_ECACHE_DESTROY_SENT
;
82 if (++evicted
>= ARRAY_SIZE(refs
)) {
88 spin_unlock(&pcpu
->lock
);
90 /* can't _put while holding lock */
92 nf_ct_put(refs
[--evicted
]);
97 static void ecache_work(struct work_struct
*work
)
99 struct netns_ct
*ctnet
=
100 container_of(work
, struct netns_ct
, ecache_dwork
.work
);
102 struct ct_pcpu
*pcpu
;
106 for_each_possible_cpu(cpu
) {
107 enum retry_state ret
;
109 pcpu
= per_cpu_ptr(ctnet
->pcpu_lists
, cpu
);
111 ret
= ecache_work_evict_list(pcpu
);
114 case STATE_CONGESTED
:
115 delay
= ECACHE_RETRY_WAIT
;
128 ctnet
->ecache_dwork_pending
= delay
> 0;
130 schedule_delayed_work(&ctnet
->ecache_dwork
, delay
);
133 int nf_conntrack_eventmask_report(unsigned int eventmask
, struct nf_conn
*ct
,
134 u32 portid
, int report
)
137 struct net
*net
= nf_ct_net(ct
);
138 struct nf_ct_event_notifier
*notify
;
139 struct nf_conntrack_ecache
*e
;
142 notify
= rcu_dereference(net
->ct
.nf_conntrack_event_cb
);
146 e
= nf_ct_ecache_find(ct
);
150 if (nf_ct_is_confirmed(ct
)) {
151 struct nf_ct_event item
= {
153 .portid
= e
->portid
? e
->portid
: portid
,
156 /* This is a resent of a destroy event? If so, skip missed */
157 unsigned long missed
= e
->portid
? 0 : e
->missed
;
159 if (!((eventmask
| missed
) & e
->ctmask
))
162 ret
= notify
->fcn(eventmask
| missed
, &item
);
163 if (unlikely(ret
< 0 || missed
)) {
164 spin_lock_bh(&ct
->lock
);
166 /* This is a destroy event that has been
167 * triggered by a process, we store the PORTID
168 * to include it in the retransmission.
170 if (eventmask
& (1 << IPCT_DESTROY
)) {
171 if (e
->portid
== 0 && portid
!= 0)
173 e
->state
= NFCT_ECACHE_DESTROY_FAIL
;
175 e
->missed
|= eventmask
;
178 e
->missed
&= ~missed
;
180 spin_unlock_bh(&ct
->lock
);
187 EXPORT_SYMBOL_GPL(nf_conntrack_eventmask_report
);
189 /* deliver cached events and clear cache entry - must be called with locally
190 * disabled softirqs */
191 void nf_ct_deliver_cached_events(struct nf_conn
*ct
)
193 struct net
*net
= nf_ct_net(ct
);
194 unsigned long events
, missed
;
195 struct nf_ct_event_notifier
*notify
;
196 struct nf_conntrack_ecache
*e
;
197 struct nf_ct_event item
;
201 notify
= rcu_dereference(net
->ct
.nf_conntrack_event_cb
);
205 if (!nf_ct_is_confirmed(ct
) || nf_ct_is_dying(ct
))
208 e
= nf_ct_ecache_find(ct
);
212 events
= xchg(&e
->cache
, 0);
214 /* We make a copy of the missed event cache without taking
215 * the lock, thus we may send missed events twice. However,
216 * this does not harm and it happens very rarely. */
219 if (!((events
| missed
) & e
->ctmask
))
226 ret
= notify
->fcn(events
| missed
, &item
);
228 if (likely(ret
== 0 && !missed
))
231 spin_lock_bh(&ct
->lock
);
235 e
->missed
&= ~missed
;
236 spin_unlock_bh(&ct
->lock
);
241 EXPORT_SYMBOL_GPL(nf_ct_deliver_cached_events
);
243 void nf_ct_expect_event_report(enum ip_conntrack_expect_events event
,
244 struct nf_conntrack_expect
*exp
,
245 u32 portid
, int report
)
248 struct net
*net
= nf_ct_exp_net(exp
);
249 struct nf_exp_event_notifier
*notify
;
250 struct nf_conntrack_ecache
*e
;
253 notify
= rcu_dereference(net
->ct
.nf_expect_event_cb
);
257 e
= nf_ct_ecache_find(exp
->master
);
261 if (e
->expmask
& (1 << event
)) {
262 struct nf_exp_event item
= {
267 notify
->fcn(1 << event
, &item
);
273 int nf_conntrack_register_notifier(struct net
*net
,
274 struct nf_ct_event_notifier
*new)
277 struct nf_ct_event_notifier
*notify
;
279 mutex_lock(&nf_ct_ecache_mutex
);
280 notify
= rcu_dereference_protected(net
->ct
.nf_conntrack_event_cb
,
281 lockdep_is_held(&nf_ct_ecache_mutex
));
282 if (notify
!= NULL
) {
286 rcu_assign_pointer(net
->ct
.nf_conntrack_event_cb
, new);
290 mutex_unlock(&nf_ct_ecache_mutex
);
293 EXPORT_SYMBOL_GPL(nf_conntrack_register_notifier
);
295 void nf_conntrack_unregister_notifier(struct net
*net
,
296 struct nf_ct_event_notifier
*new)
298 struct nf_ct_event_notifier
*notify
;
300 mutex_lock(&nf_ct_ecache_mutex
);
301 notify
= rcu_dereference_protected(net
->ct
.nf_conntrack_event_cb
,
302 lockdep_is_held(&nf_ct_ecache_mutex
));
303 BUG_ON(notify
!= new);
304 RCU_INIT_POINTER(net
->ct
.nf_conntrack_event_cb
, NULL
);
305 mutex_unlock(&nf_ct_ecache_mutex
);
306 /* synchronize_rcu() is called from ctnetlink_exit. */
308 EXPORT_SYMBOL_GPL(nf_conntrack_unregister_notifier
);
310 int nf_ct_expect_register_notifier(struct net
*net
,
311 struct nf_exp_event_notifier
*new)
314 struct nf_exp_event_notifier
*notify
;
316 mutex_lock(&nf_ct_ecache_mutex
);
317 notify
= rcu_dereference_protected(net
->ct
.nf_expect_event_cb
,
318 lockdep_is_held(&nf_ct_ecache_mutex
));
319 if (notify
!= NULL
) {
323 rcu_assign_pointer(net
->ct
.nf_expect_event_cb
, new);
327 mutex_unlock(&nf_ct_ecache_mutex
);
330 EXPORT_SYMBOL_GPL(nf_ct_expect_register_notifier
);
332 void nf_ct_expect_unregister_notifier(struct net
*net
,
333 struct nf_exp_event_notifier
*new)
335 struct nf_exp_event_notifier
*notify
;
337 mutex_lock(&nf_ct_ecache_mutex
);
338 notify
= rcu_dereference_protected(net
->ct
.nf_expect_event_cb
,
339 lockdep_is_held(&nf_ct_ecache_mutex
));
340 BUG_ON(notify
!= new);
341 RCU_INIT_POINTER(net
->ct
.nf_expect_event_cb
, NULL
);
342 mutex_unlock(&nf_ct_ecache_mutex
);
343 /* synchronize_rcu() is called from ctnetlink_exit. */
345 EXPORT_SYMBOL_GPL(nf_ct_expect_unregister_notifier
);
347 #define NF_CT_EVENTS_DEFAULT 1
348 static int nf_ct_events __read_mostly
= NF_CT_EVENTS_DEFAULT
;
350 static const struct nf_ct_ext_type event_extend
= {
351 .len
= sizeof(struct nf_conntrack_ecache
),
352 .align
= __alignof__(struct nf_conntrack_ecache
),
353 .id
= NF_CT_EXT_ECACHE
,
356 void nf_conntrack_ecache_pernet_init(struct net
*net
)
358 net
->ct
.sysctl_events
= nf_ct_events
;
359 INIT_DELAYED_WORK(&net
->ct
.ecache_dwork
, ecache_work
);
362 void nf_conntrack_ecache_pernet_fini(struct net
*net
)
364 cancel_delayed_work_sync(&net
->ct
.ecache_dwork
);
367 int nf_conntrack_ecache_init(void)
369 int ret
= nf_ct_extend_register(&event_extend
);
371 pr_err("Unable to register event extension\n");
373 BUILD_BUG_ON(__IPCT_MAX
>= 16); /* ctmask, missed use u16 */
378 void nf_conntrack_ecache_fini(void)
380 nf_ct_extend_unregister(&event_extend
);