]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/netfilter/nf_conntrack_ecache.c
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[mirror_ubuntu-bionic-kernel.git] / net / netfilter / nf_conntrack_ecache.c
1 /* Event cache for netfilter. */
2
3 /* (C) 1999-2001 Paul `Rusty' Russell
4 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
5 * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12 #include <linux/types.h>
13 #include <linux/netfilter.h>
14 #include <linux/skbuff.h>
15 #include <linux/vmalloc.h>
16 #include <linux/stddef.h>
17 #include <linux/err.h>
18 #include <linux/percpu.h>
19 #include <linux/kernel.h>
20 #include <linux/netdevice.h>
21 #include <linux/slab.h>
22
23 #include <net/netfilter/nf_conntrack.h>
24 #include <net/netfilter/nf_conntrack_core.h>
25 #include <net/netfilter/nf_conntrack_extend.h>
26
27 static DEFINE_MUTEX(nf_ct_ecache_mutex);
28
29 struct nf_ct_event_notifier *nf_conntrack_event_cb __read_mostly;
30 EXPORT_SYMBOL_GPL(nf_conntrack_event_cb);
31
32 struct nf_exp_event_notifier *nf_expect_event_cb __read_mostly;
33 EXPORT_SYMBOL_GPL(nf_expect_event_cb);
34
35 /* deliver cached events and clear cache entry - must be called with locally
36 * disabled softirqs */
37 void nf_ct_deliver_cached_events(struct nf_conn *ct)
38 {
39 unsigned long events;
40 struct nf_ct_event_notifier *notify;
41 struct nf_conntrack_ecache *e;
42
43 rcu_read_lock();
44 notify = rcu_dereference(nf_conntrack_event_cb);
45 if (notify == NULL)
46 goto out_unlock;
47
48 e = nf_ct_ecache_find(ct);
49 if (e == NULL)
50 goto out_unlock;
51
52 events = xchg(&e->cache, 0);
53
54 if (nf_ct_is_confirmed(ct) && !nf_ct_is_dying(ct) && events) {
55 struct nf_ct_event item = {
56 .ct = ct,
57 .pid = 0,
58 .report = 0
59 };
60 int ret;
61 /* We make a copy of the missed event cache without taking
62 * the lock, thus we may send missed events twice. However,
63 * this does not harm and it happens very rarely. */
64 unsigned long missed = e->missed;
65
66 ret = notify->fcn(events | missed, &item);
67 if (unlikely(ret < 0 || missed)) {
68 spin_lock_bh(&ct->lock);
69 if (ret < 0)
70 e->missed |= events;
71 else
72 e->missed &= ~missed;
73 spin_unlock_bh(&ct->lock);
74 }
75 }
76
77 out_unlock:
78 rcu_read_unlock();
79 }
80 EXPORT_SYMBOL_GPL(nf_ct_deliver_cached_events);
81
82 int nf_conntrack_register_notifier(struct nf_ct_event_notifier *new)
83 {
84 int ret = 0;
85 struct nf_ct_event_notifier *notify;
86
87 mutex_lock(&nf_ct_ecache_mutex);
88 notify = rcu_dereference(nf_conntrack_event_cb);
89 if (notify != NULL) {
90 ret = -EBUSY;
91 goto out_unlock;
92 }
93 rcu_assign_pointer(nf_conntrack_event_cb, new);
94 mutex_unlock(&nf_ct_ecache_mutex);
95 return ret;
96
97 out_unlock:
98 mutex_unlock(&nf_ct_ecache_mutex);
99 return ret;
100 }
101 EXPORT_SYMBOL_GPL(nf_conntrack_register_notifier);
102
103 void nf_conntrack_unregister_notifier(struct nf_ct_event_notifier *new)
104 {
105 struct nf_ct_event_notifier *notify;
106
107 mutex_lock(&nf_ct_ecache_mutex);
108 notify = rcu_dereference(nf_conntrack_event_cb);
109 BUG_ON(notify != new);
110 rcu_assign_pointer(nf_conntrack_event_cb, NULL);
111 mutex_unlock(&nf_ct_ecache_mutex);
112 }
113 EXPORT_SYMBOL_GPL(nf_conntrack_unregister_notifier);
114
115 int nf_ct_expect_register_notifier(struct nf_exp_event_notifier *new)
116 {
117 int ret = 0;
118 struct nf_exp_event_notifier *notify;
119
120 mutex_lock(&nf_ct_ecache_mutex);
121 notify = rcu_dereference(nf_expect_event_cb);
122 if (notify != NULL) {
123 ret = -EBUSY;
124 goto out_unlock;
125 }
126 rcu_assign_pointer(nf_expect_event_cb, new);
127 mutex_unlock(&nf_ct_ecache_mutex);
128 return ret;
129
130 out_unlock:
131 mutex_unlock(&nf_ct_ecache_mutex);
132 return ret;
133 }
134 EXPORT_SYMBOL_GPL(nf_ct_expect_register_notifier);
135
136 void nf_ct_expect_unregister_notifier(struct nf_exp_event_notifier *new)
137 {
138 struct nf_exp_event_notifier *notify;
139
140 mutex_lock(&nf_ct_ecache_mutex);
141 notify = rcu_dereference(nf_expect_event_cb);
142 BUG_ON(notify != new);
143 rcu_assign_pointer(nf_expect_event_cb, NULL);
144 mutex_unlock(&nf_ct_ecache_mutex);
145 }
146 EXPORT_SYMBOL_GPL(nf_ct_expect_unregister_notifier);
147
148 #define NF_CT_EVENTS_DEFAULT 1
149 static int nf_ct_events __read_mostly = NF_CT_EVENTS_DEFAULT;
150 static int nf_ct_events_retry_timeout __read_mostly = 15*HZ;
151
152 #ifdef CONFIG_SYSCTL
153 static struct ctl_table event_sysctl_table[] = {
154 {
155 .procname = "nf_conntrack_events",
156 .data = &init_net.ct.sysctl_events,
157 .maxlen = sizeof(unsigned int),
158 .mode = 0644,
159 .proc_handler = proc_dointvec,
160 },
161 {
162 .procname = "nf_conntrack_events_retry_timeout",
163 .data = &init_net.ct.sysctl_events_retry_timeout,
164 .maxlen = sizeof(unsigned int),
165 .mode = 0644,
166 .proc_handler = proc_dointvec_jiffies,
167 },
168 {}
169 };
170 #endif /* CONFIG_SYSCTL */
171
172 static struct nf_ct_ext_type event_extend __read_mostly = {
173 .len = sizeof(struct nf_conntrack_ecache),
174 .align = __alignof__(struct nf_conntrack_ecache),
175 .id = NF_CT_EXT_ECACHE,
176 };
177
178 #ifdef CONFIG_SYSCTL
179 static int nf_conntrack_event_init_sysctl(struct net *net)
180 {
181 struct ctl_table *table;
182
183 table = kmemdup(event_sysctl_table, sizeof(event_sysctl_table),
184 GFP_KERNEL);
185 if (!table)
186 goto out;
187
188 table[0].data = &net->ct.sysctl_events;
189 table[1].data = &net->ct.sysctl_events_retry_timeout;
190
191 net->ct.event_sysctl_header =
192 register_net_sysctl_table(net,
193 nf_net_netfilter_sysctl_path, table);
194 if (!net->ct.event_sysctl_header) {
195 printk(KERN_ERR "nf_ct_event: can't register to sysctl.\n");
196 goto out_register;
197 }
198 return 0;
199
200 out_register:
201 kfree(table);
202 out:
203 return -ENOMEM;
204 }
205
206 static void nf_conntrack_event_fini_sysctl(struct net *net)
207 {
208 struct ctl_table *table;
209
210 table = net->ct.event_sysctl_header->ctl_table_arg;
211 unregister_net_sysctl_table(net->ct.event_sysctl_header);
212 kfree(table);
213 }
214 #else
215 static int nf_conntrack_event_init_sysctl(struct net *net)
216 {
217 return 0;
218 }
219
220 static void nf_conntrack_event_fini_sysctl(struct net *net)
221 {
222 }
223 #endif /* CONFIG_SYSCTL */
224
225 int nf_conntrack_ecache_init(struct net *net)
226 {
227 int ret;
228
229 net->ct.sysctl_events = nf_ct_events;
230 net->ct.sysctl_events_retry_timeout = nf_ct_events_retry_timeout;
231
232 if (net_eq(net, &init_net)) {
233 ret = nf_ct_extend_register(&event_extend);
234 if (ret < 0) {
235 printk(KERN_ERR "nf_ct_event: Unable to register "
236 "event extension.\n");
237 goto out_extend_register;
238 }
239 }
240
241 ret = nf_conntrack_event_init_sysctl(net);
242 if (ret < 0)
243 goto out_sysctl;
244
245 return 0;
246
247 out_sysctl:
248 if (net_eq(net, &init_net))
249 nf_ct_extend_unregister(&event_extend);
250 out_extend_register:
251 return ret;
252 }
253
254 void nf_conntrack_ecache_fini(struct net *net)
255 {
256 nf_conntrack_event_fini_sysctl(net);
257 if (net_eq(net, &init_net))
258 nf_ct_extend_unregister(&event_extend);
259 }