]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/netfilter/nf_conntrack_ecache.c
net: Add export.h for EXPORT_SYMBOL/THIS_MODULE to non-modules
[mirror_ubuntu-bionic-kernel.git] / net / netfilter / nf_conntrack_ecache.c
1 /* Event cache for netfilter. */
2
3 /* (C) 1999-2001 Paul `Rusty' Russell
4 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
5 * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12 #include <linux/types.h>
13 #include <linux/netfilter.h>
14 #include <linux/skbuff.h>
15 #include <linux/vmalloc.h>
16 #include <linux/stddef.h>
17 #include <linux/err.h>
18 #include <linux/percpu.h>
19 #include <linux/kernel.h>
20 #include <linux/netdevice.h>
21 #include <linux/slab.h>
22 #include <linux/export.h>
23
24 #include <net/netfilter/nf_conntrack.h>
25 #include <net/netfilter/nf_conntrack_core.h>
26 #include <net/netfilter/nf_conntrack_extend.h>
27
28 static DEFINE_MUTEX(nf_ct_ecache_mutex);
29
30 struct nf_ct_event_notifier __rcu *nf_conntrack_event_cb __read_mostly;
31 EXPORT_SYMBOL_GPL(nf_conntrack_event_cb);
32
33 struct nf_exp_event_notifier __rcu *nf_expect_event_cb __read_mostly;
34 EXPORT_SYMBOL_GPL(nf_expect_event_cb);
35
36 /* deliver cached events and clear cache entry - must be called with locally
37 * disabled softirqs */
38 void nf_ct_deliver_cached_events(struct nf_conn *ct)
39 {
40 unsigned long events;
41 struct nf_ct_event_notifier *notify;
42 struct nf_conntrack_ecache *e;
43
44 rcu_read_lock();
45 notify = rcu_dereference(nf_conntrack_event_cb);
46 if (notify == NULL)
47 goto out_unlock;
48
49 e = nf_ct_ecache_find(ct);
50 if (e == NULL)
51 goto out_unlock;
52
53 events = xchg(&e->cache, 0);
54
55 if (nf_ct_is_confirmed(ct) && !nf_ct_is_dying(ct) && events) {
56 struct nf_ct_event item = {
57 .ct = ct,
58 .pid = 0,
59 .report = 0
60 };
61 int ret;
62 /* We make a copy of the missed event cache without taking
63 * the lock, thus we may send missed events twice. However,
64 * this does not harm and it happens very rarely. */
65 unsigned long missed = e->missed;
66
67 if (!((events | missed) & e->ctmask))
68 goto out_unlock;
69
70 ret = notify->fcn(events | missed, &item);
71 if (unlikely(ret < 0 || missed)) {
72 spin_lock_bh(&ct->lock);
73 if (ret < 0)
74 e->missed |= events;
75 else
76 e->missed &= ~missed;
77 spin_unlock_bh(&ct->lock);
78 }
79 }
80
81 out_unlock:
82 rcu_read_unlock();
83 }
84 EXPORT_SYMBOL_GPL(nf_ct_deliver_cached_events);
85
86 int nf_conntrack_register_notifier(struct nf_ct_event_notifier *new)
87 {
88 int ret = 0;
89 struct nf_ct_event_notifier *notify;
90
91 mutex_lock(&nf_ct_ecache_mutex);
92 notify = rcu_dereference_protected(nf_conntrack_event_cb,
93 lockdep_is_held(&nf_ct_ecache_mutex));
94 if (notify != NULL) {
95 ret = -EBUSY;
96 goto out_unlock;
97 }
98 RCU_INIT_POINTER(nf_conntrack_event_cb, new);
99 mutex_unlock(&nf_ct_ecache_mutex);
100 return ret;
101
102 out_unlock:
103 mutex_unlock(&nf_ct_ecache_mutex);
104 return ret;
105 }
106 EXPORT_SYMBOL_GPL(nf_conntrack_register_notifier);
107
108 void nf_conntrack_unregister_notifier(struct nf_ct_event_notifier *new)
109 {
110 struct nf_ct_event_notifier *notify;
111
112 mutex_lock(&nf_ct_ecache_mutex);
113 notify = rcu_dereference_protected(nf_conntrack_event_cb,
114 lockdep_is_held(&nf_ct_ecache_mutex));
115 BUG_ON(notify != new);
116 RCU_INIT_POINTER(nf_conntrack_event_cb, NULL);
117 mutex_unlock(&nf_ct_ecache_mutex);
118 }
119 EXPORT_SYMBOL_GPL(nf_conntrack_unregister_notifier);
120
121 int nf_ct_expect_register_notifier(struct nf_exp_event_notifier *new)
122 {
123 int ret = 0;
124 struct nf_exp_event_notifier *notify;
125
126 mutex_lock(&nf_ct_ecache_mutex);
127 notify = rcu_dereference_protected(nf_expect_event_cb,
128 lockdep_is_held(&nf_ct_ecache_mutex));
129 if (notify != NULL) {
130 ret = -EBUSY;
131 goto out_unlock;
132 }
133 RCU_INIT_POINTER(nf_expect_event_cb, new);
134 mutex_unlock(&nf_ct_ecache_mutex);
135 return ret;
136
137 out_unlock:
138 mutex_unlock(&nf_ct_ecache_mutex);
139 return ret;
140 }
141 EXPORT_SYMBOL_GPL(nf_ct_expect_register_notifier);
142
143 void nf_ct_expect_unregister_notifier(struct nf_exp_event_notifier *new)
144 {
145 struct nf_exp_event_notifier *notify;
146
147 mutex_lock(&nf_ct_ecache_mutex);
148 notify = rcu_dereference_protected(nf_expect_event_cb,
149 lockdep_is_held(&nf_ct_ecache_mutex));
150 BUG_ON(notify != new);
151 RCU_INIT_POINTER(nf_expect_event_cb, NULL);
152 mutex_unlock(&nf_ct_ecache_mutex);
153 }
154 EXPORT_SYMBOL_GPL(nf_ct_expect_unregister_notifier);
155
156 #define NF_CT_EVENTS_DEFAULT 1
157 static int nf_ct_events __read_mostly = NF_CT_EVENTS_DEFAULT;
158 static int nf_ct_events_retry_timeout __read_mostly = 15*HZ;
159
160 #ifdef CONFIG_SYSCTL
161 static struct ctl_table event_sysctl_table[] = {
162 {
163 .procname = "nf_conntrack_events",
164 .data = &init_net.ct.sysctl_events,
165 .maxlen = sizeof(unsigned int),
166 .mode = 0644,
167 .proc_handler = proc_dointvec,
168 },
169 {
170 .procname = "nf_conntrack_events_retry_timeout",
171 .data = &init_net.ct.sysctl_events_retry_timeout,
172 .maxlen = sizeof(unsigned int),
173 .mode = 0644,
174 .proc_handler = proc_dointvec_jiffies,
175 },
176 {}
177 };
178 #endif /* CONFIG_SYSCTL */
179
180 static struct nf_ct_ext_type event_extend __read_mostly = {
181 .len = sizeof(struct nf_conntrack_ecache),
182 .align = __alignof__(struct nf_conntrack_ecache),
183 .id = NF_CT_EXT_ECACHE,
184 };
185
186 #ifdef CONFIG_SYSCTL
187 static int nf_conntrack_event_init_sysctl(struct net *net)
188 {
189 struct ctl_table *table;
190
191 table = kmemdup(event_sysctl_table, sizeof(event_sysctl_table),
192 GFP_KERNEL);
193 if (!table)
194 goto out;
195
196 table[0].data = &net->ct.sysctl_events;
197 table[1].data = &net->ct.sysctl_events_retry_timeout;
198
199 net->ct.event_sysctl_header =
200 register_net_sysctl_table(net,
201 nf_net_netfilter_sysctl_path, table);
202 if (!net->ct.event_sysctl_header) {
203 printk(KERN_ERR "nf_ct_event: can't register to sysctl.\n");
204 goto out_register;
205 }
206 return 0;
207
208 out_register:
209 kfree(table);
210 out:
211 return -ENOMEM;
212 }
213
214 static void nf_conntrack_event_fini_sysctl(struct net *net)
215 {
216 struct ctl_table *table;
217
218 table = net->ct.event_sysctl_header->ctl_table_arg;
219 unregister_net_sysctl_table(net->ct.event_sysctl_header);
220 kfree(table);
221 }
222 #else
223 static int nf_conntrack_event_init_sysctl(struct net *net)
224 {
225 return 0;
226 }
227
228 static void nf_conntrack_event_fini_sysctl(struct net *net)
229 {
230 }
231 #endif /* CONFIG_SYSCTL */
232
233 int nf_conntrack_ecache_init(struct net *net)
234 {
235 int ret;
236
237 net->ct.sysctl_events = nf_ct_events;
238 net->ct.sysctl_events_retry_timeout = nf_ct_events_retry_timeout;
239
240 if (net_eq(net, &init_net)) {
241 ret = nf_ct_extend_register(&event_extend);
242 if (ret < 0) {
243 printk(KERN_ERR "nf_ct_event: Unable to register "
244 "event extension.\n");
245 goto out_extend_register;
246 }
247 }
248
249 ret = nf_conntrack_event_init_sysctl(net);
250 if (ret < 0)
251 goto out_sysctl;
252
253 return 0;
254
255 out_sysctl:
256 if (net_eq(net, &init_net))
257 nf_ct_extend_unregister(&event_extend);
258 out_extend_register:
259 return ret;
260 }
261
262 void nf_conntrack_ecache_fini(struct net *net)
263 {
264 nf_conntrack_event_fini_sysctl(net);
265 if (net_eq(net, &init_net))
266 nf_ct_extend_unregister(&event_extend);
267 }