]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - net/netfilter/nfnetlink.c
mtd: nand: atmel: Relax tADL_min constraint
[mirror_ubuntu-artful-kernel.git] / net / netfilter / nfnetlink.c
1 /* Netfilter messages via netlink socket. Allows for user space
2 * protocol helpers and general trouble making from userspace.
3 *
4 * (C) 2001 by Jay Schulist <jschlst@samba.org>,
5 * (C) 2002-2005 by Harald Welte <laforge@gnumonks.org>
6 * (C) 2005-2017 by Pablo Neira Ayuso <pablo@netfilter.org>
7 *
8 * Initial netfilter messages via netlink development funded and
9 * generally made possible by Network Robots, Inc. (www.networkrobots.com)
10 *
11 * Further development of this code funded by Astaro AG (http://www.astaro.com)
12 *
13 * This software may be used and distributed according to the terms
14 * of the GNU General Public License, incorporated herein by reference.
15 */
16
17 #include <linux/module.h>
18 #include <linux/types.h>
19 #include <linux/socket.h>
20 #include <linux/kernel.h>
21 #include <linux/string.h>
22 #include <linux/sockios.h>
23 #include <linux/net.h>
24 #include <linux/skbuff.h>
25 #include <linux/uaccess.h>
26 #include <net/sock.h>
27 #include <linux/init.h>
28
29 #include <net/netlink.h>
30 #include <linux/netfilter/nfnetlink.h>
31
32 MODULE_LICENSE("GPL");
33 MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
34 MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_NETFILTER);
35
36 #define nfnl_dereference_protected(id) \
37 rcu_dereference_protected(table[(id)].subsys, \
38 lockdep_nfnl_is_held((id)))
39
40 static char __initdata nfversion[] = "0.30";
41
42 static struct {
43 struct mutex mutex;
44 const struct nfnetlink_subsystem __rcu *subsys;
45 } table[NFNL_SUBSYS_COUNT];
46
47 static const int nfnl_group2type[NFNLGRP_MAX+1] = {
48 [NFNLGRP_CONNTRACK_NEW] = NFNL_SUBSYS_CTNETLINK,
49 [NFNLGRP_CONNTRACK_UPDATE] = NFNL_SUBSYS_CTNETLINK,
50 [NFNLGRP_CONNTRACK_DESTROY] = NFNL_SUBSYS_CTNETLINK,
51 [NFNLGRP_CONNTRACK_EXP_NEW] = NFNL_SUBSYS_CTNETLINK_EXP,
52 [NFNLGRP_CONNTRACK_EXP_UPDATE] = NFNL_SUBSYS_CTNETLINK_EXP,
53 [NFNLGRP_CONNTRACK_EXP_DESTROY] = NFNL_SUBSYS_CTNETLINK_EXP,
54 [NFNLGRP_NFTABLES] = NFNL_SUBSYS_NFTABLES,
55 [NFNLGRP_ACCT_QUOTA] = NFNL_SUBSYS_ACCT,
56 [NFNLGRP_NFTRACE] = NFNL_SUBSYS_NFTABLES,
57 };
58
59 void nfnl_lock(__u8 subsys_id)
60 {
61 mutex_lock(&table[subsys_id].mutex);
62 }
63 EXPORT_SYMBOL_GPL(nfnl_lock);
64
65 void nfnl_unlock(__u8 subsys_id)
66 {
67 mutex_unlock(&table[subsys_id].mutex);
68 }
69 EXPORT_SYMBOL_GPL(nfnl_unlock);
70
71 #ifdef CONFIG_PROVE_LOCKING
72 bool lockdep_nfnl_is_held(u8 subsys_id)
73 {
74 return lockdep_is_held(&table[subsys_id].mutex);
75 }
76 EXPORT_SYMBOL_GPL(lockdep_nfnl_is_held);
77 #endif
78
79 int nfnetlink_subsys_register(const struct nfnetlink_subsystem *n)
80 {
81 nfnl_lock(n->subsys_id);
82 if (table[n->subsys_id].subsys) {
83 nfnl_unlock(n->subsys_id);
84 return -EBUSY;
85 }
86 rcu_assign_pointer(table[n->subsys_id].subsys, n);
87 nfnl_unlock(n->subsys_id);
88
89 return 0;
90 }
91 EXPORT_SYMBOL_GPL(nfnetlink_subsys_register);
92
93 int nfnetlink_subsys_unregister(const struct nfnetlink_subsystem *n)
94 {
95 nfnl_lock(n->subsys_id);
96 table[n->subsys_id].subsys = NULL;
97 nfnl_unlock(n->subsys_id);
98 synchronize_rcu();
99 return 0;
100 }
101 EXPORT_SYMBOL_GPL(nfnetlink_subsys_unregister);
102
103 static inline const struct nfnetlink_subsystem *nfnetlink_get_subsys(u16 type)
104 {
105 u8 subsys_id = NFNL_SUBSYS_ID(type);
106
107 if (subsys_id >= NFNL_SUBSYS_COUNT)
108 return NULL;
109
110 return rcu_dereference(table[subsys_id].subsys);
111 }
112
113 static inline const struct nfnl_callback *
114 nfnetlink_find_client(u16 type, const struct nfnetlink_subsystem *ss)
115 {
116 u8 cb_id = NFNL_MSG_TYPE(type);
117
118 if (cb_id >= ss->cb_count)
119 return NULL;
120
121 return &ss->cb[cb_id];
122 }
123
124 int nfnetlink_has_listeners(struct net *net, unsigned int group)
125 {
126 return netlink_has_listeners(net->nfnl, group);
127 }
128 EXPORT_SYMBOL_GPL(nfnetlink_has_listeners);
129
130 int nfnetlink_send(struct sk_buff *skb, struct net *net, u32 portid,
131 unsigned int group, int echo, gfp_t flags)
132 {
133 return nlmsg_notify(net->nfnl, skb, portid, group, echo, flags);
134 }
135 EXPORT_SYMBOL_GPL(nfnetlink_send);
136
137 int nfnetlink_set_err(struct net *net, u32 portid, u32 group, int error)
138 {
139 return netlink_set_err(net->nfnl, portid, group, error);
140 }
141 EXPORT_SYMBOL_GPL(nfnetlink_set_err);
142
143 int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u32 portid,
144 int flags)
145 {
146 return netlink_unicast(net->nfnl, skb, portid, flags);
147 }
148 EXPORT_SYMBOL_GPL(nfnetlink_unicast);
149
150 /* Process one complete nfnetlink message. */
151 static int nfnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
152 struct netlink_ext_ack *extack)
153 {
154 struct net *net = sock_net(skb->sk);
155 const struct nfnl_callback *nc;
156 const struct nfnetlink_subsystem *ss;
157 int type, err;
158
159 /* All the messages must at least contain nfgenmsg */
160 if (nlmsg_len(nlh) < sizeof(struct nfgenmsg))
161 return 0;
162
163 type = nlh->nlmsg_type;
164 replay:
165 rcu_read_lock();
166 ss = nfnetlink_get_subsys(type);
167 if (!ss) {
168 #ifdef CONFIG_MODULES
169 rcu_read_unlock();
170 request_module("nfnetlink-subsys-%d", NFNL_SUBSYS_ID(type));
171 rcu_read_lock();
172 ss = nfnetlink_get_subsys(type);
173 if (!ss)
174 #endif
175 {
176 rcu_read_unlock();
177 return -EINVAL;
178 }
179 }
180
181 nc = nfnetlink_find_client(type, ss);
182 if (!nc) {
183 rcu_read_unlock();
184 return -EINVAL;
185 }
186
187 {
188 int min_len = nlmsg_total_size(sizeof(struct nfgenmsg));
189 u8 cb_id = NFNL_MSG_TYPE(nlh->nlmsg_type);
190 struct nlattr *cda[ss->cb[cb_id].attr_count + 1];
191 struct nlattr *attr = (void *)nlh + min_len;
192 int attrlen = nlh->nlmsg_len - min_len;
193 __u8 subsys_id = NFNL_SUBSYS_ID(type);
194
195 err = nla_parse(cda, ss->cb[cb_id].attr_count, attr, attrlen,
196 ss->cb[cb_id].policy, extack);
197 if (err < 0) {
198 rcu_read_unlock();
199 return err;
200 }
201
202 if (nc->call_rcu) {
203 err = nc->call_rcu(net, net->nfnl, skb, nlh,
204 (const struct nlattr **)cda,
205 extack);
206 rcu_read_unlock();
207 } else {
208 rcu_read_unlock();
209 nfnl_lock(subsys_id);
210 if (nfnl_dereference_protected(subsys_id) != ss ||
211 nfnetlink_find_client(type, ss) != nc)
212 err = -EAGAIN;
213 else if (nc->call)
214 err = nc->call(net, net->nfnl, skb, nlh,
215 (const struct nlattr **)cda,
216 extack);
217 else
218 err = -EINVAL;
219 nfnl_unlock(subsys_id);
220 }
221 if (err == -EAGAIN)
222 goto replay;
223 return err;
224 }
225 }
226
227 struct nfnl_err {
228 struct list_head head;
229 struct nlmsghdr *nlh;
230 int err;
231 struct netlink_ext_ack extack;
232 };
233
234 static int nfnl_err_add(struct list_head *list, struct nlmsghdr *nlh, int err,
235 const struct netlink_ext_ack *extack)
236 {
237 struct nfnl_err *nfnl_err;
238
239 nfnl_err = kmalloc(sizeof(struct nfnl_err), GFP_KERNEL);
240 if (nfnl_err == NULL)
241 return -ENOMEM;
242
243 nfnl_err->nlh = nlh;
244 nfnl_err->err = err;
245 nfnl_err->extack = *extack;
246 list_add_tail(&nfnl_err->head, list);
247
248 return 0;
249 }
250
251 static void nfnl_err_del(struct nfnl_err *nfnl_err)
252 {
253 list_del(&nfnl_err->head);
254 kfree(nfnl_err);
255 }
256
257 static void nfnl_err_reset(struct list_head *err_list)
258 {
259 struct nfnl_err *nfnl_err, *next;
260
261 list_for_each_entry_safe(nfnl_err, next, err_list, head)
262 nfnl_err_del(nfnl_err);
263 }
264
265 static void nfnl_err_deliver(struct list_head *err_list, struct sk_buff *skb)
266 {
267 struct nfnl_err *nfnl_err, *next;
268
269 list_for_each_entry_safe(nfnl_err, next, err_list, head) {
270 netlink_ack(skb, nfnl_err->nlh, nfnl_err->err,
271 &nfnl_err->extack);
272 nfnl_err_del(nfnl_err);
273 }
274 }
275
276 enum {
277 NFNL_BATCH_FAILURE = (1 << 0),
278 NFNL_BATCH_DONE = (1 << 1),
279 NFNL_BATCH_REPLAY = (1 << 2),
280 };
281
282 static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh,
283 u16 subsys_id, u32 genid)
284 {
285 struct sk_buff *oskb = skb;
286 struct net *net = sock_net(skb->sk);
287 const struct nfnetlink_subsystem *ss;
288 const struct nfnl_callback *nc;
289 struct netlink_ext_ack extack;
290 LIST_HEAD(err_list);
291 u32 status;
292 int err;
293
294 if (subsys_id >= NFNL_SUBSYS_COUNT)
295 return netlink_ack(skb, nlh, -EINVAL, NULL);
296 replay:
297 status = 0;
298
299 skb = netlink_skb_clone(oskb, GFP_KERNEL);
300 if (!skb)
301 return netlink_ack(oskb, nlh, -ENOMEM, NULL);
302
303 nfnl_lock(subsys_id);
304 ss = nfnl_dereference_protected(subsys_id);
305 if (!ss) {
306 #ifdef CONFIG_MODULES
307 nfnl_unlock(subsys_id);
308 request_module("nfnetlink-subsys-%d", subsys_id);
309 nfnl_lock(subsys_id);
310 ss = nfnl_dereference_protected(subsys_id);
311 if (!ss)
312 #endif
313 {
314 nfnl_unlock(subsys_id);
315 netlink_ack(oskb, nlh, -EOPNOTSUPP, NULL);
316 return kfree_skb(skb);
317 }
318 }
319
320 if (!ss->commit || !ss->abort) {
321 nfnl_unlock(subsys_id);
322 netlink_ack(oskb, nlh, -EOPNOTSUPP, NULL);
323 return kfree_skb(skb);
324 }
325
326 if (genid && ss->valid_genid && !ss->valid_genid(net, genid)) {
327 nfnl_unlock(subsys_id);
328 netlink_ack(oskb, nlh, -ERESTART, NULL);
329 return kfree_skb(skb);
330 }
331
332 while (skb->len >= nlmsg_total_size(0)) {
333 int msglen, type;
334
335 memset(&extack, 0, sizeof(extack));
336 nlh = nlmsg_hdr(skb);
337 err = 0;
338
339 if (nlh->nlmsg_len < NLMSG_HDRLEN ||
340 skb->len < nlh->nlmsg_len ||
341 nlmsg_len(nlh) < sizeof(struct nfgenmsg)) {
342 nfnl_err_reset(&err_list);
343 status |= NFNL_BATCH_FAILURE;
344 goto done;
345 }
346
347 /* Only requests are handled by the kernel */
348 if (!(nlh->nlmsg_flags & NLM_F_REQUEST)) {
349 err = -EINVAL;
350 goto ack;
351 }
352
353 type = nlh->nlmsg_type;
354 if (type == NFNL_MSG_BATCH_BEGIN) {
355 /* Malformed: Batch begin twice */
356 nfnl_err_reset(&err_list);
357 status |= NFNL_BATCH_FAILURE;
358 goto done;
359 } else if (type == NFNL_MSG_BATCH_END) {
360 status |= NFNL_BATCH_DONE;
361 goto done;
362 } else if (type < NLMSG_MIN_TYPE) {
363 err = -EINVAL;
364 goto ack;
365 }
366
367 /* We only accept a batch with messages for the same
368 * subsystem.
369 */
370 if (NFNL_SUBSYS_ID(type) != subsys_id) {
371 err = -EINVAL;
372 goto ack;
373 }
374
375 nc = nfnetlink_find_client(type, ss);
376 if (!nc) {
377 err = -EINVAL;
378 goto ack;
379 }
380
381 {
382 int min_len = nlmsg_total_size(sizeof(struct nfgenmsg));
383 u8 cb_id = NFNL_MSG_TYPE(nlh->nlmsg_type);
384 struct nlattr *cda[ss->cb[cb_id].attr_count + 1];
385 struct nlattr *attr = (void *)nlh + min_len;
386 int attrlen = nlh->nlmsg_len - min_len;
387
388 err = nla_parse(cda, ss->cb[cb_id].attr_count, attr,
389 attrlen, ss->cb[cb_id].policy, NULL);
390 if (err < 0)
391 goto ack;
392
393 if (nc->call_batch) {
394 err = nc->call_batch(net, net->nfnl, skb, nlh,
395 (const struct nlattr **)cda,
396 &extack);
397 }
398
399 /* The lock was released to autoload some module, we
400 * have to abort and start from scratch using the
401 * original skb.
402 */
403 if (err == -EAGAIN) {
404 status |= NFNL_BATCH_REPLAY;
405 goto next;
406 }
407 }
408 ack:
409 if (nlh->nlmsg_flags & NLM_F_ACK || err) {
410 /* Errors are delivered once the full batch has been
411 * processed, this avoids that the same error is
412 * reported several times when replaying the batch.
413 */
414 if (nfnl_err_add(&err_list, nlh, err, &extack) < 0) {
415 /* We failed to enqueue an error, reset the
416 * list of errors and send OOM to userspace
417 * pointing to the batch header.
418 */
419 nfnl_err_reset(&err_list);
420 netlink_ack(oskb, nlmsg_hdr(oskb), -ENOMEM,
421 NULL);
422 status |= NFNL_BATCH_FAILURE;
423 goto done;
424 }
425 /* We don't stop processing the batch on errors, thus,
426 * userspace gets all the errors that the batch
427 * triggers.
428 */
429 if (err)
430 status |= NFNL_BATCH_FAILURE;
431 }
432 next:
433 msglen = NLMSG_ALIGN(nlh->nlmsg_len);
434 if (msglen > skb->len)
435 msglen = skb->len;
436 skb_pull(skb, msglen);
437 }
438 done:
439 if (status & NFNL_BATCH_REPLAY) {
440 ss->abort(net, oskb);
441 nfnl_err_reset(&err_list);
442 nfnl_unlock(subsys_id);
443 kfree_skb(skb);
444 goto replay;
445 } else if (status == NFNL_BATCH_DONE) {
446 ss->commit(net, oskb);
447 } else {
448 ss->abort(net, oskb);
449 }
450
451 nfnl_err_deliver(&err_list, oskb);
452 nfnl_unlock(subsys_id);
453 kfree_skb(skb);
454 }
455
456 static const struct nla_policy nfnl_batch_policy[NFNL_BATCH_MAX + 1] = {
457 [NFNL_BATCH_GENID] = { .type = NLA_U32 },
458 };
459
460 static void nfnetlink_rcv_skb_batch(struct sk_buff *skb, struct nlmsghdr *nlh)
461 {
462 int min_len = nlmsg_total_size(sizeof(struct nfgenmsg));
463 struct nlattr *attr = (void *)nlh + min_len;
464 struct nlattr *cda[NFNL_BATCH_MAX + 1];
465 int attrlen = nlh->nlmsg_len - min_len;
466 struct nfgenmsg *nfgenmsg;
467 int msglen, err;
468 u32 gen_id = 0;
469 u16 res_id;
470
471 msglen = NLMSG_ALIGN(nlh->nlmsg_len);
472 if (msglen > skb->len)
473 msglen = skb->len;
474
475 if (nlh->nlmsg_len < NLMSG_HDRLEN ||
476 skb->len < NLMSG_HDRLEN + sizeof(struct nfgenmsg))
477 return;
478
479 err = nla_parse(cda, NFNL_BATCH_MAX, attr, attrlen, nfnl_batch_policy,
480 NULL);
481 if (err < 0) {
482 netlink_ack(skb, nlh, err, NULL);
483 return;
484 }
485 if (cda[NFNL_BATCH_GENID])
486 gen_id = ntohl(nla_get_be32(cda[NFNL_BATCH_GENID]));
487
488 nfgenmsg = nlmsg_data(nlh);
489 skb_pull(skb, msglen);
490 /* Work around old nft using host byte order */
491 if (nfgenmsg->res_id == NFNL_SUBSYS_NFTABLES)
492 res_id = NFNL_SUBSYS_NFTABLES;
493 else
494 res_id = ntohs(nfgenmsg->res_id);
495
496 nfnetlink_rcv_batch(skb, nlh, res_id, gen_id);
497 }
498
499 static void nfnetlink_rcv(struct sk_buff *skb)
500 {
501 struct nlmsghdr *nlh = nlmsg_hdr(skb);
502
503 if (nlh->nlmsg_len < NLMSG_HDRLEN ||
504 skb->len < nlh->nlmsg_len)
505 return;
506
507 if (!netlink_net_capable(skb, CAP_NET_ADMIN)) {
508 netlink_ack(skb, nlh, -EPERM, NULL);
509 return;
510 }
511
512 if (nlh->nlmsg_type == NFNL_MSG_BATCH_BEGIN)
513 nfnetlink_rcv_skb_batch(skb, nlh);
514 else
515 netlink_rcv_skb(skb, nfnetlink_rcv_msg);
516 }
517
518 #ifdef CONFIG_MODULES
519 static int nfnetlink_bind(struct net *net, int group)
520 {
521 const struct nfnetlink_subsystem *ss;
522 int type;
523
524 if (group <= NFNLGRP_NONE || group > NFNLGRP_MAX)
525 return 0;
526
527 type = nfnl_group2type[group];
528
529 rcu_read_lock();
530 ss = nfnetlink_get_subsys(type << 8);
531 rcu_read_unlock();
532 if (!ss)
533 request_module("nfnetlink-subsys-%d", type);
534 return 0;
535 }
536 #endif
537
538 static int __net_init nfnetlink_net_init(struct net *net)
539 {
540 struct sock *nfnl;
541 struct netlink_kernel_cfg cfg = {
542 .groups = NFNLGRP_MAX,
543 .input = nfnetlink_rcv,
544 #ifdef CONFIG_MODULES
545 .bind = nfnetlink_bind,
546 #endif
547 };
548
549 nfnl = netlink_kernel_create(net, NETLINK_NETFILTER, &cfg);
550 if (!nfnl)
551 return -ENOMEM;
552 net->nfnl_stash = nfnl;
553 rcu_assign_pointer(net->nfnl, nfnl);
554 return 0;
555 }
556
557 static void __net_exit nfnetlink_net_exit_batch(struct list_head *net_exit_list)
558 {
559 struct net *net;
560
561 list_for_each_entry(net, net_exit_list, exit_list)
562 RCU_INIT_POINTER(net->nfnl, NULL);
563 synchronize_net();
564 list_for_each_entry(net, net_exit_list, exit_list)
565 netlink_kernel_release(net->nfnl_stash);
566 }
567
568 static struct pernet_operations nfnetlink_net_ops = {
569 .init = nfnetlink_net_init,
570 .exit_batch = nfnetlink_net_exit_batch,
571 };
572
573 static int __init nfnetlink_init(void)
574 {
575 int i;
576
577 for (i = NFNLGRP_NONE + 1; i <= NFNLGRP_MAX; i++)
578 BUG_ON(nfnl_group2type[i] == NFNL_SUBSYS_NONE);
579
580 for (i=0; i<NFNL_SUBSYS_COUNT; i++)
581 mutex_init(&table[i].mutex);
582
583 pr_info("Netfilter messages via NETLINK v%s.\n", nfversion);
584 return register_pernet_subsys(&nfnetlink_net_ops);
585 }
586
587 static void __exit nfnetlink_exit(void)
588 {
589 pr_info("Removing netfilter NETLINK layer.\n");
590 unregister_pernet_subsys(&nfnetlink_net_ops);
591 }
592 module_init(nfnetlink_init);
593 module_exit(nfnetlink_exit);