]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/netfilter/nf_conntrack_netlink.c
netfilter: ctnetlink: refactor ctnetlink_create_expect
[mirror_ubuntu-bionic-kernel.git] / net / netfilter / nf_conntrack_netlink.c
1 /* Connection tracking via netlink socket. Allows for user space
2 * protocol helpers and general trouble making from userspace.
3 *
4 * (C) 2001 by Jay Schulist <jschlst@samba.org>
5 * (C) 2002-2006 by Harald Welte <laforge@gnumonks.org>
6 * (C) 2003 by Patrick Mchardy <kaber@trash.net>
7 * (C) 2005-2012 by Pablo Neira Ayuso <pablo@netfilter.org>
8 *
9 * Initial connection tracking via netlink development funded and
10 * generally made possible by Network Robots, Inc. (www.networkrobots.com)
11 *
12 * Further development of this code funded by Astaro AG (http://www.astaro.com)
13 *
14 * This software may be used and distributed according to the terms
15 * of the GNU General Public License, incorporated herein by reference.
16 */
17
18 #include <linux/init.h>
19 #include <linux/module.h>
20 #include <linux/kernel.h>
21 #include <linux/rculist.h>
22 #include <linux/rculist_nulls.h>
23 #include <linux/types.h>
24 #include <linux/timer.h>
25 #include <linux/security.h>
26 #include <linux/skbuff.h>
27 #include <linux/errno.h>
28 #include <linux/netlink.h>
29 #include <linux/spinlock.h>
30 #include <linux/interrupt.h>
31 #include <linux/slab.h>
32
33 #include <linux/netfilter.h>
34 #include <net/netlink.h>
35 #include <net/sock.h>
36 #include <net/netfilter/nf_conntrack.h>
37 #include <net/netfilter/nf_conntrack_core.h>
38 #include <net/netfilter/nf_conntrack_expect.h>
39 #include <net/netfilter/nf_conntrack_helper.h>
40 #include <net/netfilter/nf_conntrack_l3proto.h>
41 #include <net/netfilter/nf_conntrack_l4proto.h>
42 #include <net/netfilter/nf_conntrack_tuple.h>
43 #include <net/netfilter/nf_conntrack_acct.h>
44 #include <net/netfilter/nf_conntrack_zones.h>
45 #include <net/netfilter/nf_conntrack_timestamp.h>
46 #include <net/netfilter/nf_conntrack_labels.h>
47 #ifdef CONFIG_NF_NAT_NEEDED
48 #include <net/netfilter/nf_nat_core.h>
49 #include <net/netfilter/nf_nat_l4proto.h>
50 #include <net/netfilter/nf_nat_helper.h>
51 #endif
52
53 #include <linux/netfilter/nfnetlink.h>
54 #include <linux/netfilter/nfnetlink_conntrack.h>
55
56 MODULE_LICENSE("GPL");
57
58 static char __initdata version[] = "0.93";
59
60 static inline int
61 ctnetlink_dump_tuples_proto(struct sk_buff *skb,
62 const struct nf_conntrack_tuple *tuple,
63 struct nf_conntrack_l4proto *l4proto)
64 {
65 int ret = 0;
66 struct nlattr *nest_parms;
67
68 nest_parms = nla_nest_start(skb, CTA_TUPLE_PROTO | NLA_F_NESTED);
69 if (!nest_parms)
70 goto nla_put_failure;
71 if (nla_put_u8(skb, CTA_PROTO_NUM, tuple->dst.protonum))
72 goto nla_put_failure;
73
74 if (likely(l4proto->tuple_to_nlattr))
75 ret = l4proto->tuple_to_nlattr(skb, tuple);
76
77 nla_nest_end(skb, nest_parms);
78
79 return ret;
80
81 nla_put_failure:
82 return -1;
83 }
84
85 static inline int
86 ctnetlink_dump_tuples_ip(struct sk_buff *skb,
87 const struct nf_conntrack_tuple *tuple,
88 struct nf_conntrack_l3proto *l3proto)
89 {
90 int ret = 0;
91 struct nlattr *nest_parms;
92
93 nest_parms = nla_nest_start(skb, CTA_TUPLE_IP | NLA_F_NESTED);
94 if (!nest_parms)
95 goto nla_put_failure;
96
97 if (likely(l3proto->tuple_to_nlattr))
98 ret = l3proto->tuple_to_nlattr(skb, tuple);
99
100 nla_nest_end(skb, nest_parms);
101
102 return ret;
103
104 nla_put_failure:
105 return -1;
106 }
107
108 static int
109 ctnetlink_dump_tuples(struct sk_buff *skb,
110 const struct nf_conntrack_tuple *tuple)
111 {
112 int ret;
113 struct nf_conntrack_l3proto *l3proto;
114 struct nf_conntrack_l4proto *l4proto;
115
116 rcu_read_lock();
117 l3proto = __nf_ct_l3proto_find(tuple->src.l3num);
118 ret = ctnetlink_dump_tuples_ip(skb, tuple, l3proto);
119
120 if (ret >= 0) {
121 l4proto = __nf_ct_l4proto_find(tuple->src.l3num,
122 tuple->dst.protonum);
123 ret = ctnetlink_dump_tuples_proto(skb, tuple, l4proto);
124 }
125 rcu_read_unlock();
126 return ret;
127 }
128
129 static inline int
130 ctnetlink_dump_status(struct sk_buff *skb, const struct nf_conn *ct)
131 {
132 if (nla_put_be32(skb, CTA_STATUS, htonl(ct->status)))
133 goto nla_put_failure;
134 return 0;
135
136 nla_put_failure:
137 return -1;
138 }
139
140 static inline int
141 ctnetlink_dump_timeout(struct sk_buff *skb, const struct nf_conn *ct)
142 {
143 long timeout = ((long)ct->timeout.expires - (long)jiffies) / HZ;
144
145 if (timeout < 0)
146 timeout = 0;
147
148 if (nla_put_be32(skb, CTA_TIMEOUT, htonl(timeout)))
149 goto nla_put_failure;
150 return 0;
151
152 nla_put_failure:
153 return -1;
154 }
155
156 static inline int
157 ctnetlink_dump_protoinfo(struct sk_buff *skb, struct nf_conn *ct)
158 {
159 struct nf_conntrack_l4proto *l4proto;
160 struct nlattr *nest_proto;
161 int ret;
162
163 l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
164 if (!l4proto->to_nlattr)
165 return 0;
166
167 nest_proto = nla_nest_start(skb, CTA_PROTOINFO | NLA_F_NESTED);
168 if (!nest_proto)
169 goto nla_put_failure;
170
171 ret = l4proto->to_nlattr(skb, nest_proto, ct);
172
173 nla_nest_end(skb, nest_proto);
174
175 return ret;
176
177 nla_put_failure:
178 return -1;
179 }
180
181 static inline int
182 ctnetlink_dump_helpinfo(struct sk_buff *skb, const struct nf_conn *ct)
183 {
184 struct nlattr *nest_helper;
185 const struct nf_conn_help *help = nfct_help(ct);
186 struct nf_conntrack_helper *helper;
187
188 if (!help)
189 return 0;
190
191 helper = rcu_dereference(help->helper);
192 if (!helper)
193 goto out;
194
195 nest_helper = nla_nest_start(skb, CTA_HELP | NLA_F_NESTED);
196 if (!nest_helper)
197 goto nla_put_failure;
198 if (nla_put_string(skb, CTA_HELP_NAME, helper->name))
199 goto nla_put_failure;
200
201 if (helper->to_nlattr)
202 helper->to_nlattr(skb, ct);
203
204 nla_nest_end(skb, nest_helper);
205 out:
206 return 0;
207
208 nla_put_failure:
209 return -1;
210 }
211
212 static int
213 dump_counters(struct sk_buff *skb, u64 pkts, u64 bytes,
214 enum ip_conntrack_dir dir)
215 {
216 enum ctattr_type type = dir ? CTA_COUNTERS_REPLY: CTA_COUNTERS_ORIG;
217 struct nlattr *nest_count;
218
219 nest_count = nla_nest_start(skb, type | NLA_F_NESTED);
220 if (!nest_count)
221 goto nla_put_failure;
222
223 if (nla_put_be64(skb, CTA_COUNTERS_PACKETS, cpu_to_be64(pkts)) ||
224 nla_put_be64(skb, CTA_COUNTERS_BYTES, cpu_to_be64(bytes)))
225 goto nla_put_failure;
226
227 nla_nest_end(skb, nest_count);
228
229 return 0;
230
231 nla_put_failure:
232 return -1;
233 }
234
235 static int
236 ctnetlink_dump_counters(struct sk_buff *skb, const struct nf_conn *ct,
237 enum ip_conntrack_dir dir, int type)
238 {
239 struct nf_conn_counter *acct;
240 u64 pkts, bytes;
241
242 acct = nf_conn_acct_find(ct);
243 if (!acct)
244 return 0;
245
246 if (type == IPCTNL_MSG_CT_GET_CTRZERO) {
247 pkts = atomic64_xchg(&acct[dir].packets, 0);
248 bytes = atomic64_xchg(&acct[dir].bytes, 0);
249 } else {
250 pkts = atomic64_read(&acct[dir].packets);
251 bytes = atomic64_read(&acct[dir].bytes);
252 }
253 return dump_counters(skb, pkts, bytes, dir);
254 }
255
256 static int
257 ctnetlink_dump_timestamp(struct sk_buff *skb, const struct nf_conn *ct)
258 {
259 struct nlattr *nest_count;
260 const struct nf_conn_tstamp *tstamp;
261
262 tstamp = nf_conn_tstamp_find(ct);
263 if (!tstamp)
264 return 0;
265
266 nest_count = nla_nest_start(skb, CTA_TIMESTAMP | NLA_F_NESTED);
267 if (!nest_count)
268 goto nla_put_failure;
269
270 if (nla_put_be64(skb, CTA_TIMESTAMP_START, cpu_to_be64(tstamp->start)) ||
271 (tstamp->stop != 0 && nla_put_be64(skb, CTA_TIMESTAMP_STOP,
272 cpu_to_be64(tstamp->stop))))
273 goto nla_put_failure;
274 nla_nest_end(skb, nest_count);
275
276 return 0;
277
278 nla_put_failure:
279 return -1;
280 }
281
282 #ifdef CONFIG_NF_CONNTRACK_MARK
283 static inline int
284 ctnetlink_dump_mark(struct sk_buff *skb, const struct nf_conn *ct)
285 {
286 if (nla_put_be32(skb, CTA_MARK, htonl(ct->mark)))
287 goto nla_put_failure;
288 return 0;
289
290 nla_put_failure:
291 return -1;
292 }
293 #else
294 #define ctnetlink_dump_mark(a, b) (0)
295 #endif
296
297 #ifdef CONFIG_NF_CONNTRACK_SECMARK
298 static inline int
299 ctnetlink_dump_secctx(struct sk_buff *skb, const struct nf_conn *ct)
300 {
301 struct nlattr *nest_secctx;
302 int len, ret;
303 char *secctx;
304
305 ret = security_secid_to_secctx(ct->secmark, &secctx, &len);
306 if (ret)
307 return 0;
308
309 ret = -1;
310 nest_secctx = nla_nest_start(skb, CTA_SECCTX | NLA_F_NESTED);
311 if (!nest_secctx)
312 goto nla_put_failure;
313
314 if (nla_put_string(skb, CTA_SECCTX_NAME, secctx))
315 goto nla_put_failure;
316 nla_nest_end(skb, nest_secctx);
317
318 ret = 0;
319 nla_put_failure:
320 security_release_secctx(secctx, len);
321 return ret;
322 }
323 #else
324 #define ctnetlink_dump_secctx(a, b) (0)
325 #endif
326
327 #ifdef CONFIG_NF_CONNTRACK_LABELS
328 static int ctnetlink_label_size(const struct nf_conn *ct)
329 {
330 struct nf_conn_labels *labels = nf_ct_labels_find(ct);
331
332 if (!labels)
333 return 0;
334 return nla_total_size(labels->words * sizeof(long));
335 }
336
337 static int
338 ctnetlink_dump_labels(struct sk_buff *skb, const struct nf_conn *ct)
339 {
340 struct nf_conn_labels *labels = nf_ct_labels_find(ct);
341 unsigned int len, i;
342
343 if (!labels)
344 return 0;
345
346 len = labels->words * sizeof(long);
347 i = 0;
348 do {
349 if (labels->bits[i] != 0)
350 return nla_put(skb, CTA_LABELS, len, labels->bits);
351 i++;
352 } while (i < labels->words);
353
354 return 0;
355 }
356 #else
357 #define ctnetlink_dump_labels(a, b) (0)
358 #define ctnetlink_label_size(a) (0)
359 #endif
360
361 #define master_tuple(ct) &(ct->master->tuplehash[IP_CT_DIR_ORIGINAL].tuple)
362
363 static inline int
364 ctnetlink_dump_master(struct sk_buff *skb, const struct nf_conn *ct)
365 {
366 struct nlattr *nest_parms;
367
368 if (!(ct->status & IPS_EXPECTED))
369 return 0;
370
371 nest_parms = nla_nest_start(skb, CTA_TUPLE_MASTER | NLA_F_NESTED);
372 if (!nest_parms)
373 goto nla_put_failure;
374 if (ctnetlink_dump_tuples(skb, master_tuple(ct)) < 0)
375 goto nla_put_failure;
376 nla_nest_end(skb, nest_parms);
377
378 return 0;
379
380 nla_put_failure:
381 return -1;
382 }
383
384 #ifdef CONFIG_NF_NAT_NEEDED
385 static int
386 dump_nat_seq_adj(struct sk_buff *skb, const struct nf_nat_seq *natseq, int type)
387 {
388 struct nlattr *nest_parms;
389
390 nest_parms = nla_nest_start(skb, type | NLA_F_NESTED);
391 if (!nest_parms)
392 goto nla_put_failure;
393
394 if (nla_put_be32(skb, CTA_NAT_SEQ_CORRECTION_POS,
395 htonl(natseq->correction_pos)) ||
396 nla_put_be32(skb, CTA_NAT_SEQ_OFFSET_BEFORE,
397 htonl(natseq->offset_before)) ||
398 nla_put_be32(skb, CTA_NAT_SEQ_OFFSET_AFTER,
399 htonl(natseq->offset_after)))
400 goto nla_put_failure;
401
402 nla_nest_end(skb, nest_parms);
403
404 return 0;
405
406 nla_put_failure:
407 return -1;
408 }
409
410 static inline int
411 ctnetlink_dump_nat_seq_adj(struct sk_buff *skb, const struct nf_conn *ct)
412 {
413 struct nf_nat_seq *natseq;
414 struct nf_conn_nat *nat = nfct_nat(ct);
415
416 if (!(ct->status & IPS_SEQ_ADJUST) || !nat)
417 return 0;
418
419 natseq = &nat->seq[IP_CT_DIR_ORIGINAL];
420 if (dump_nat_seq_adj(skb, natseq, CTA_NAT_SEQ_ADJ_ORIG) == -1)
421 return -1;
422
423 natseq = &nat->seq[IP_CT_DIR_REPLY];
424 if (dump_nat_seq_adj(skb, natseq, CTA_NAT_SEQ_ADJ_REPLY) == -1)
425 return -1;
426
427 return 0;
428 }
429 #else
430 #define ctnetlink_dump_nat_seq_adj(a, b) (0)
431 #endif
432
433 static inline int
434 ctnetlink_dump_id(struct sk_buff *skb, const struct nf_conn *ct)
435 {
436 if (nla_put_be32(skb, CTA_ID, htonl((unsigned long)ct)))
437 goto nla_put_failure;
438 return 0;
439
440 nla_put_failure:
441 return -1;
442 }
443
444 static inline int
445 ctnetlink_dump_use(struct sk_buff *skb, const struct nf_conn *ct)
446 {
447 if (nla_put_be32(skb, CTA_USE, htonl(atomic_read(&ct->ct_general.use))))
448 goto nla_put_failure;
449 return 0;
450
451 nla_put_failure:
452 return -1;
453 }
454
455 static int
456 ctnetlink_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
457 struct nf_conn *ct)
458 {
459 struct nlmsghdr *nlh;
460 struct nfgenmsg *nfmsg;
461 struct nlattr *nest_parms;
462 unsigned int flags = portid ? NLM_F_MULTI : 0, event;
463
464 event = (NFNL_SUBSYS_CTNETLINK << 8 | IPCTNL_MSG_CT_NEW);
465 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
466 if (nlh == NULL)
467 goto nlmsg_failure;
468
469 nfmsg = nlmsg_data(nlh);
470 nfmsg->nfgen_family = nf_ct_l3num(ct);
471 nfmsg->version = NFNETLINK_V0;
472 nfmsg->res_id = 0;
473
474 nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG | NLA_F_NESTED);
475 if (!nest_parms)
476 goto nla_put_failure;
477 if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL)) < 0)
478 goto nla_put_failure;
479 nla_nest_end(skb, nest_parms);
480
481 nest_parms = nla_nest_start(skb, CTA_TUPLE_REPLY | NLA_F_NESTED);
482 if (!nest_parms)
483 goto nla_put_failure;
484 if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_REPLY)) < 0)
485 goto nla_put_failure;
486 nla_nest_end(skb, nest_parms);
487
488 if (nf_ct_zone(ct) &&
489 nla_put_be16(skb, CTA_ZONE, htons(nf_ct_zone(ct))))
490 goto nla_put_failure;
491
492 if (ctnetlink_dump_status(skb, ct) < 0 ||
493 ctnetlink_dump_timeout(skb, ct) < 0 ||
494 ctnetlink_dump_counters(skb, ct, IP_CT_DIR_ORIGINAL, type) < 0 ||
495 ctnetlink_dump_counters(skb, ct, IP_CT_DIR_REPLY, type) < 0 ||
496 ctnetlink_dump_timestamp(skb, ct) < 0 ||
497 ctnetlink_dump_protoinfo(skb, ct) < 0 ||
498 ctnetlink_dump_helpinfo(skb, ct) < 0 ||
499 ctnetlink_dump_mark(skb, ct) < 0 ||
500 ctnetlink_dump_secctx(skb, ct) < 0 ||
501 ctnetlink_dump_labels(skb, ct) < 0 ||
502 ctnetlink_dump_id(skb, ct) < 0 ||
503 ctnetlink_dump_use(skb, ct) < 0 ||
504 ctnetlink_dump_master(skb, ct) < 0 ||
505 ctnetlink_dump_nat_seq_adj(skb, ct) < 0)
506 goto nla_put_failure;
507
508 nlmsg_end(skb, nlh);
509 return skb->len;
510
511 nlmsg_failure:
512 nla_put_failure:
513 nlmsg_cancel(skb, nlh);
514 return -1;
515 }
516
517 static inline size_t
518 ctnetlink_proto_size(const struct nf_conn *ct)
519 {
520 struct nf_conntrack_l3proto *l3proto;
521 struct nf_conntrack_l4proto *l4proto;
522 size_t len = 0;
523
524 rcu_read_lock();
525 l3proto = __nf_ct_l3proto_find(nf_ct_l3num(ct));
526 len += l3proto->nla_size;
527
528 l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
529 len += l4proto->nla_size;
530 rcu_read_unlock();
531
532 return len;
533 }
534
535 static inline size_t
536 ctnetlink_counters_size(const struct nf_conn *ct)
537 {
538 if (!nf_ct_ext_exist(ct, NF_CT_EXT_ACCT))
539 return 0;
540 return 2 * nla_total_size(0) /* CTA_COUNTERS_ORIG|REPL */
541 + 2 * nla_total_size(sizeof(uint64_t)) /* CTA_COUNTERS_PACKETS */
542 + 2 * nla_total_size(sizeof(uint64_t)) /* CTA_COUNTERS_BYTES */
543 ;
544 }
545
546 static inline int
547 ctnetlink_secctx_size(const struct nf_conn *ct)
548 {
549 #ifdef CONFIG_NF_CONNTRACK_SECMARK
550 int len, ret;
551
552 ret = security_secid_to_secctx(ct->secmark, NULL, &len);
553 if (ret)
554 return 0;
555
556 return nla_total_size(0) /* CTA_SECCTX */
557 + nla_total_size(sizeof(char) * len); /* CTA_SECCTX_NAME */
558 #else
559 return 0;
560 #endif
561 }
562
563 static inline size_t
564 ctnetlink_timestamp_size(const struct nf_conn *ct)
565 {
566 #ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
567 if (!nf_ct_ext_exist(ct, NF_CT_EXT_TSTAMP))
568 return 0;
569 return nla_total_size(0) + 2 * nla_total_size(sizeof(uint64_t));
570 #else
571 return 0;
572 #endif
573 }
574
575 static inline size_t
576 ctnetlink_nlmsg_size(const struct nf_conn *ct)
577 {
578 return NLMSG_ALIGN(sizeof(struct nfgenmsg))
579 + 3 * nla_total_size(0) /* CTA_TUPLE_ORIG|REPL|MASTER */
580 + 3 * nla_total_size(0) /* CTA_TUPLE_IP */
581 + 3 * nla_total_size(0) /* CTA_TUPLE_PROTO */
582 + 3 * nla_total_size(sizeof(u_int8_t)) /* CTA_PROTO_NUM */
583 + nla_total_size(sizeof(u_int32_t)) /* CTA_ID */
584 + nla_total_size(sizeof(u_int32_t)) /* CTA_STATUS */
585 + ctnetlink_counters_size(ct)
586 + ctnetlink_timestamp_size(ct)
587 + nla_total_size(sizeof(u_int32_t)) /* CTA_TIMEOUT */
588 + nla_total_size(0) /* CTA_PROTOINFO */
589 + nla_total_size(0) /* CTA_HELP */
590 + nla_total_size(NF_CT_HELPER_NAME_LEN) /* CTA_HELP_NAME */
591 + ctnetlink_secctx_size(ct)
592 #ifdef CONFIG_NF_NAT_NEEDED
593 + 2 * nla_total_size(0) /* CTA_NAT_SEQ_ADJ_ORIG|REPL */
594 + 6 * nla_total_size(sizeof(u_int32_t)) /* CTA_NAT_SEQ_OFFSET */
595 #endif
596 #ifdef CONFIG_NF_CONNTRACK_MARK
597 + nla_total_size(sizeof(u_int32_t)) /* CTA_MARK */
598 #endif
599 + ctnetlink_proto_size(ct)
600 + ctnetlink_label_size(ct)
601 ;
602 }
603
604 #ifdef CONFIG_NF_CONNTRACK_EVENTS
605 static int
606 ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item)
607 {
608 struct net *net;
609 struct nlmsghdr *nlh;
610 struct nfgenmsg *nfmsg;
611 struct nlattr *nest_parms;
612 struct nf_conn *ct = item->ct;
613 struct sk_buff *skb;
614 unsigned int type;
615 unsigned int flags = 0, group;
616 int err;
617
618 /* ignore our fake conntrack entry */
619 if (nf_ct_is_untracked(ct))
620 return 0;
621
622 if (events & (1 << IPCT_DESTROY)) {
623 type = IPCTNL_MSG_CT_DELETE;
624 group = NFNLGRP_CONNTRACK_DESTROY;
625 } else if (events & ((1 << IPCT_NEW) | (1 << IPCT_RELATED))) {
626 type = IPCTNL_MSG_CT_NEW;
627 flags = NLM_F_CREATE|NLM_F_EXCL;
628 group = NFNLGRP_CONNTRACK_NEW;
629 } else if (events) {
630 type = IPCTNL_MSG_CT_NEW;
631 group = NFNLGRP_CONNTRACK_UPDATE;
632 } else
633 return 0;
634
635 net = nf_ct_net(ct);
636 if (!item->report && !nfnetlink_has_listeners(net, group))
637 return 0;
638
639 skb = nlmsg_new(ctnetlink_nlmsg_size(ct), GFP_ATOMIC);
640 if (skb == NULL)
641 goto errout;
642
643 type |= NFNL_SUBSYS_CTNETLINK << 8;
644 nlh = nlmsg_put(skb, item->portid, 0, type, sizeof(*nfmsg), flags);
645 if (nlh == NULL)
646 goto nlmsg_failure;
647
648 nfmsg = nlmsg_data(nlh);
649 nfmsg->nfgen_family = nf_ct_l3num(ct);
650 nfmsg->version = NFNETLINK_V0;
651 nfmsg->res_id = 0;
652
653 rcu_read_lock();
654 nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG | NLA_F_NESTED);
655 if (!nest_parms)
656 goto nla_put_failure;
657 if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL)) < 0)
658 goto nla_put_failure;
659 nla_nest_end(skb, nest_parms);
660
661 nest_parms = nla_nest_start(skb, CTA_TUPLE_REPLY | NLA_F_NESTED);
662 if (!nest_parms)
663 goto nla_put_failure;
664 if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_REPLY)) < 0)
665 goto nla_put_failure;
666 nla_nest_end(skb, nest_parms);
667
668 if (nf_ct_zone(ct) &&
669 nla_put_be16(skb, CTA_ZONE, htons(nf_ct_zone(ct))))
670 goto nla_put_failure;
671
672 if (ctnetlink_dump_id(skb, ct) < 0)
673 goto nla_put_failure;
674
675 if (ctnetlink_dump_status(skb, ct) < 0)
676 goto nla_put_failure;
677
678 if (events & (1 << IPCT_DESTROY)) {
679 if (ctnetlink_dump_counters(skb, ct,
680 IP_CT_DIR_ORIGINAL, type) < 0 ||
681 ctnetlink_dump_counters(skb, ct,
682 IP_CT_DIR_REPLY, type) < 0 ||
683 ctnetlink_dump_timestamp(skb, ct) < 0)
684 goto nla_put_failure;
685 } else {
686 if (ctnetlink_dump_timeout(skb, ct) < 0)
687 goto nla_put_failure;
688
689 if (events & (1 << IPCT_PROTOINFO)
690 && ctnetlink_dump_protoinfo(skb, ct) < 0)
691 goto nla_put_failure;
692
693 if ((events & (1 << IPCT_HELPER) || nfct_help(ct))
694 && ctnetlink_dump_helpinfo(skb, ct) < 0)
695 goto nla_put_failure;
696
697 #ifdef CONFIG_NF_CONNTRACK_SECMARK
698 if ((events & (1 << IPCT_SECMARK) || ct->secmark)
699 && ctnetlink_dump_secctx(skb, ct) < 0)
700 goto nla_put_failure;
701 #endif
702 if (events & (1 << IPCT_LABEL) &&
703 ctnetlink_dump_labels(skb, ct) < 0)
704 goto nla_put_failure;
705
706 if (events & (1 << IPCT_RELATED) &&
707 ctnetlink_dump_master(skb, ct) < 0)
708 goto nla_put_failure;
709
710 if (events & (1 << IPCT_NATSEQADJ) &&
711 ctnetlink_dump_nat_seq_adj(skb, ct) < 0)
712 goto nla_put_failure;
713 }
714
715 #ifdef CONFIG_NF_CONNTRACK_MARK
716 if ((events & (1 << IPCT_MARK) || ct->mark)
717 && ctnetlink_dump_mark(skb, ct) < 0)
718 goto nla_put_failure;
719 #endif
720 rcu_read_unlock();
721
722 nlmsg_end(skb, nlh);
723 err = nfnetlink_send(skb, net, item->portid, group, item->report,
724 GFP_ATOMIC);
725 if (err == -ENOBUFS || err == -EAGAIN)
726 return -ENOBUFS;
727
728 return 0;
729
730 nla_put_failure:
731 rcu_read_unlock();
732 nlmsg_cancel(skb, nlh);
733 nlmsg_failure:
734 kfree_skb(skb);
735 errout:
736 if (nfnetlink_set_err(net, 0, group, -ENOBUFS) > 0)
737 return -ENOBUFS;
738
739 return 0;
740 }
741 #endif /* CONFIG_NF_CONNTRACK_EVENTS */
742
743 static int ctnetlink_done(struct netlink_callback *cb)
744 {
745 if (cb->args[1])
746 nf_ct_put((struct nf_conn *)cb->args[1]);
747 if (cb->data)
748 kfree(cb->data);
749 return 0;
750 }
751
752 struct ctnetlink_dump_filter {
753 struct {
754 u_int32_t val;
755 u_int32_t mask;
756 } mark;
757 };
758
759 static int
760 ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
761 {
762 struct net *net = sock_net(skb->sk);
763 struct nf_conn *ct, *last;
764 struct nf_conntrack_tuple_hash *h;
765 struct hlist_nulls_node *n;
766 struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
767 u_int8_t l3proto = nfmsg->nfgen_family;
768 int res;
769 #ifdef CONFIG_NF_CONNTRACK_MARK
770 const struct ctnetlink_dump_filter *filter = cb->data;
771 #endif
772
773 spin_lock_bh(&nf_conntrack_lock);
774 last = (struct nf_conn *)cb->args[1];
775 for (; cb->args[0] < net->ct.htable_size; cb->args[0]++) {
776 restart:
777 hlist_nulls_for_each_entry(h, n, &net->ct.hash[cb->args[0]],
778 hnnode) {
779 if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL)
780 continue;
781 ct = nf_ct_tuplehash_to_ctrack(h);
782 /* Dump entries of a given L3 protocol number.
783 * If it is not specified, ie. l3proto == 0,
784 * then dump everything. */
785 if (l3proto && nf_ct_l3num(ct) != l3proto)
786 continue;
787 if (cb->args[1]) {
788 if (ct != last)
789 continue;
790 cb->args[1] = 0;
791 }
792 #ifdef CONFIG_NF_CONNTRACK_MARK
793 if (filter && !((ct->mark & filter->mark.mask) ==
794 filter->mark.val)) {
795 continue;
796 }
797 #endif
798 rcu_read_lock();
799 res =
800 ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).portid,
801 cb->nlh->nlmsg_seq,
802 NFNL_MSG_TYPE(cb->nlh->nlmsg_type),
803 ct);
804 rcu_read_unlock();
805 if (res < 0) {
806 nf_conntrack_get(&ct->ct_general);
807 cb->args[1] = (unsigned long)ct;
808 goto out;
809 }
810 }
811 if (cb->args[1]) {
812 cb->args[1] = 0;
813 goto restart;
814 }
815 }
816 out:
817 spin_unlock_bh(&nf_conntrack_lock);
818 if (last)
819 nf_ct_put(last);
820
821 return skb->len;
822 }
823
824 static inline int
825 ctnetlink_parse_tuple_ip(struct nlattr *attr, struct nf_conntrack_tuple *tuple)
826 {
827 struct nlattr *tb[CTA_IP_MAX+1];
828 struct nf_conntrack_l3proto *l3proto;
829 int ret = 0;
830
831 ret = nla_parse_nested(tb, CTA_IP_MAX, attr, NULL);
832 if (ret < 0)
833 return ret;
834
835 rcu_read_lock();
836 l3proto = __nf_ct_l3proto_find(tuple->src.l3num);
837
838 if (likely(l3proto->nlattr_to_tuple)) {
839 ret = nla_validate_nested(attr, CTA_IP_MAX,
840 l3proto->nla_policy);
841 if (ret == 0)
842 ret = l3proto->nlattr_to_tuple(tb, tuple);
843 }
844
845 rcu_read_unlock();
846
847 return ret;
848 }
849
850 static const struct nla_policy proto_nla_policy[CTA_PROTO_MAX+1] = {
851 [CTA_PROTO_NUM] = { .type = NLA_U8 },
852 };
853
854 static inline int
855 ctnetlink_parse_tuple_proto(struct nlattr *attr,
856 struct nf_conntrack_tuple *tuple)
857 {
858 struct nlattr *tb[CTA_PROTO_MAX+1];
859 struct nf_conntrack_l4proto *l4proto;
860 int ret = 0;
861
862 ret = nla_parse_nested(tb, CTA_PROTO_MAX, attr, proto_nla_policy);
863 if (ret < 0)
864 return ret;
865
866 if (!tb[CTA_PROTO_NUM])
867 return -EINVAL;
868 tuple->dst.protonum = nla_get_u8(tb[CTA_PROTO_NUM]);
869
870 rcu_read_lock();
871 l4proto = __nf_ct_l4proto_find(tuple->src.l3num, tuple->dst.protonum);
872
873 if (likely(l4proto->nlattr_to_tuple)) {
874 ret = nla_validate_nested(attr, CTA_PROTO_MAX,
875 l4proto->nla_policy);
876 if (ret == 0)
877 ret = l4proto->nlattr_to_tuple(tb, tuple);
878 }
879
880 rcu_read_unlock();
881
882 return ret;
883 }
884
885 static const struct nla_policy tuple_nla_policy[CTA_TUPLE_MAX+1] = {
886 [CTA_TUPLE_IP] = { .type = NLA_NESTED },
887 [CTA_TUPLE_PROTO] = { .type = NLA_NESTED },
888 };
889
890 static int
891 ctnetlink_parse_tuple(const struct nlattr * const cda[],
892 struct nf_conntrack_tuple *tuple,
893 enum ctattr_type type, u_int8_t l3num)
894 {
895 struct nlattr *tb[CTA_TUPLE_MAX+1];
896 int err;
897
898 memset(tuple, 0, sizeof(*tuple));
899
900 err = nla_parse_nested(tb, CTA_TUPLE_MAX, cda[type], tuple_nla_policy);
901 if (err < 0)
902 return err;
903
904 if (!tb[CTA_TUPLE_IP])
905 return -EINVAL;
906
907 tuple->src.l3num = l3num;
908
909 err = ctnetlink_parse_tuple_ip(tb[CTA_TUPLE_IP], tuple);
910 if (err < 0)
911 return err;
912
913 if (!tb[CTA_TUPLE_PROTO])
914 return -EINVAL;
915
916 err = ctnetlink_parse_tuple_proto(tb[CTA_TUPLE_PROTO], tuple);
917 if (err < 0)
918 return err;
919
920 /* orig and expect tuples get DIR_ORIGINAL */
921 if (type == CTA_TUPLE_REPLY)
922 tuple->dst.dir = IP_CT_DIR_REPLY;
923 else
924 tuple->dst.dir = IP_CT_DIR_ORIGINAL;
925
926 return 0;
927 }
928
929 static int
930 ctnetlink_parse_zone(const struct nlattr *attr, u16 *zone)
931 {
932 if (attr)
933 #ifdef CONFIG_NF_CONNTRACK_ZONES
934 *zone = ntohs(nla_get_be16(attr));
935 #else
936 return -EOPNOTSUPP;
937 #endif
938 else
939 *zone = 0;
940
941 return 0;
942 }
943
944 static const struct nla_policy help_nla_policy[CTA_HELP_MAX+1] = {
945 [CTA_HELP_NAME] = { .type = NLA_NUL_STRING,
946 .len = NF_CT_HELPER_NAME_LEN - 1 },
947 };
948
949 static inline int
950 ctnetlink_parse_help(const struct nlattr *attr, char **helper_name,
951 struct nlattr **helpinfo)
952 {
953 int err;
954 struct nlattr *tb[CTA_HELP_MAX+1];
955
956 err = nla_parse_nested(tb, CTA_HELP_MAX, attr, help_nla_policy);
957 if (err < 0)
958 return err;
959
960 if (!tb[CTA_HELP_NAME])
961 return -EINVAL;
962
963 *helper_name = nla_data(tb[CTA_HELP_NAME]);
964
965 if (tb[CTA_HELP_INFO])
966 *helpinfo = tb[CTA_HELP_INFO];
967
968 return 0;
969 }
970
971 #define __CTA_LABELS_MAX_LENGTH ((XT_CONNLABEL_MAXBIT + 1) / BITS_PER_BYTE)
972 static const struct nla_policy ct_nla_policy[CTA_MAX+1] = {
973 [CTA_TUPLE_ORIG] = { .type = NLA_NESTED },
974 [CTA_TUPLE_REPLY] = { .type = NLA_NESTED },
975 [CTA_STATUS] = { .type = NLA_U32 },
976 [CTA_PROTOINFO] = { .type = NLA_NESTED },
977 [CTA_HELP] = { .type = NLA_NESTED },
978 [CTA_NAT_SRC] = { .type = NLA_NESTED },
979 [CTA_TIMEOUT] = { .type = NLA_U32 },
980 [CTA_MARK] = { .type = NLA_U32 },
981 [CTA_ID] = { .type = NLA_U32 },
982 [CTA_NAT_DST] = { .type = NLA_NESTED },
983 [CTA_TUPLE_MASTER] = { .type = NLA_NESTED },
984 [CTA_NAT_SEQ_ADJ_ORIG] = { .type = NLA_NESTED },
985 [CTA_NAT_SEQ_ADJ_REPLY] = { .type = NLA_NESTED },
986 [CTA_ZONE] = { .type = NLA_U16 },
987 [CTA_MARK_MASK] = { .type = NLA_U32 },
988 [CTA_LABELS] = { .type = NLA_BINARY,
989 .len = __CTA_LABELS_MAX_LENGTH },
990 [CTA_LABELS_MASK] = { .type = NLA_BINARY,
991 .len = __CTA_LABELS_MAX_LENGTH },
992 };
993
994 static int
995 ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb,
996 const struct nlmsghdr *nlh,
997 const struct nlattr * const cda[])
998 {
999 struct net *net = sock_net(ctnl);
1000 struct nf_conntrack_tuple_hash *h;
1001 struct nf_conntrack_tuple tuple;
1002 struct nf_conn *ct;
1003 struct nfgenmsg *nfmsg = nlmsg_data(nlh);
1004 u_int8_t u3 = nfmsg->nfgen_family;
1005 u16 zone;
1006 int err;
1007
1008 err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone);
1009 if (err < 0)
1010 return err;
1011
1012 if (cda[CTA_TUPLE_ORIG])
1013 err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG, u3);
1014 else if (cda[CTA_TUPLE_REPLY])
1015 err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY, u3);
1016 else {
1017 /* Flush the whole table */
1018 nf_conntrack_flush_report(net,
1019 NETLINK_CB(skb).portid,
1020 nlmsg_report(nlh));
1021 return 0;
1022 }
1023
1024 if (err < 0)
1025 return err;
1026
1027 h = nf_conntrack_find_get(net, zone, &tuple);
1028 if (!h)
1029 return -ENOENT;
1030
1031 ct = nf_ct_tuplehash_to_ctrack(h);
1032
1033 if (cda[CTA_ID]) {
1034 u_int32_t id = ntohl(nla_get_be32(cda[CTA_ID]));
1035 if (id != (u32)(unsigned long)ct) {
1036 nf_ct_put(ct);
1037 return -ENOENT;
1038 }
1039 }
1040
1041 if (del_timer(&ct->timeout))
1042 nf_ct_delete(ct, NETLINK_CB(skb).portid, nlmsg_report(nlh));
1043
1044 nf_ct_put(ct);
1045
1046 return 0;
1047 }
1048
1049 static int
1050 ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb,
1051 const struct nlmsghdr *nlh,
1052 const struct nlattr * const cda[])
1053 {
1054 struct net *net = sock_net(ctnl);
1055 struct nf_conntrack_tuple_hash *h;
1056 struct nf_conntrack_tuple tuple;
1057 struct nf_conn *ct;
1058 struct sk_buff *skb2 = NULL;
1059 struct nfgenmsg *nfmsg = nlmsg_data(nlh);
1060 u_int8_t u3 = nfmsg->nfgen_family;
1061 u16 zone;
1062 int err;
1063
1064 if (nlh->nlmsg_flags & NLM_F_DUMP) {
1065 struct netlink_dump_control c = {
1066 .dump = ctnetlink_dump_table,
1067 .done = ctnetlink_done,
1068 };
1069 #ifdef CONFIG_NF_CONNTRACK_MARK
1070 if (cda[CTA_MARK] && cda[CTA_MARK_MASK]) {
1071 struct ctnetlink_dump_filter *filter;
1072
1073 filter = kzalloc(sizeof(struct ctnetlink_dump_filter),
1074 GFP_ATOMIC);
1075 if (filter == NULL)
1076 return -ENOMEM;
1077
1078 filter->mark.val = ntohl(nla_get_be32(cda[CTA_MARK]));
1079 filter->mark.mask =
1080 ntohl(nla_get_be32(cda[CTA_MARK_MASK]));
1081 c.data = filter;
1082 }
1083 #endif
1084 return netlink_dump_start(ctnl, skb, nlh, &c);
1085 }
1086
1087 err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone);
1088 if (err < 0)
1089 return err;
1090
1091 if (cda[CTA_TUPLE_ORIG])
1092 err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG, u3);
1093 else if (cda[CTA_TUPLE_REPLY])
1094 err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY, u3);
1095 else
1096 return -EINVAL;
1097
1098 if (err < 0)
1099 return err;
1100
1101 h = nf_conntrack_find_get(net, zone, &tuple);
1102 if (!h)
1103 return -ENOENT;
1104
1105 ct = nf_ct_tuplehash_to_ctrack(h);
1106
1107 err = -ENOMEM;
1108 skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1109 if (skb2 == NULL) {
1110 nf_ct_put(ct);
1111 return -ENOMEM;
1112 }
1113
1114 rcu_read_lock();
1115 err = ctnetlink_fill_info(skb2, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
1116 NFNL_MSG_TYPE(nlh->nlmsg_type), ct);
1117 rcu_read_unlock();
1118 nf_ct_put(ct);
1119 if (err <= 0)
1120 goto free;
1121
1122 err = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).portid, MSG_DONTWAIT);
1123 if (err < 0)
1124 goto out;
1125
1126 return 0;
1127
1128 free:
1129 kfree_skb(skb2);
1130 out:
1131 /* this avoids a loop in nfnetlink. */
1132 return err == -EAGAIN ? -ENOBUFS : err;
1133 }
1134
1135 static int ctnetlink_done_list(struct netlink_callback *cb)
1136 {
1137 if (cb->args[1])
1138 nf_ct_put((struct nf_conn *)cb->args[1]);
1139 return 0;
1140 }
1141
1142 static int
1143 ctnetlink_dump_list(struct sk_buff *skb, struct netlink_callback *cb,
1144 struct hlist_nulls_head *list)
1145 {
1146 struct nf_conn *ct, *last;
1147 struct nf_conntrack_tuple_hash *h;
1148 struct hlist_nulls_node *n;
1149 struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
1150 u_int8_t l3proto = nfmsg->nfgen_family;
1151 int res;
1152
1153 if (cb->args[2])
1154 return 0;
1155
1156 spin_lock_bh(&nf_conntrack_lock);
1157 last = (struct nf_conn *)cb->args[1];
1158 restart:
1159 hlist_nulls_for_each_entry(h, n, list, hnnode) {
1160 ct = nf_ct_tuplehash_to_ctrack(h);
1161 if (l3proto && nf_ct_l3num(ct) != l3proto)
1162 continue;
1163 if (cb->args[1]) {
1164 if (ct != last)
1165 continue;
1166 cb->args[1] = 0;
1167 }
1168 rcu_read_lock();
1169 res = ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).portid,
1170 cb->nlh->nlmsg_seq,
1171 NFNL_MSG_TYPE(cb->nlh->nlmsg_type),
1172 ct);
1173 rcu_read_unlock();
1174 if (res < 0) {
1175 nf_conntrack_get(&ct->ct_general);
1176 cb->args[1] = (unsigned long)ct;
1177 goto out;
1178 }
1179 }
1180 if (cb->args[1]) {
1181 cb->args[1] = 0;
1182 goto restart;
1183 } else
1184 cb->args[2] = 1;
1185 out:
1186 spin_unlock_bh(&nf_conntrack_lock);
1187 if (last)
1188 nf_ct_put(last);
1189
1190 return skb->len;
1191 }
1192
1193 static int
1194 ctnetlink_dump_dying(struct sk_buff *skb, struct netlink_callback *cb)
1195 {
1196 struct net *net = sock_net(skb->sk);
1197
1198 return ctnetlink_dump_list(skb, cb, &net->ct.dying);
1199 }
1200
1201 static int
1202 ctnetlink_get_ct_dying(struct sock *ctnl, struct sk_buff *skb,
1203 const struct nlmsghdr *nlh,
1204 const struct nlattr * const cda[])
1205 {
1206 if (nlh->nlmsg_flags & NLM_F_DUMP) {
1207 struct netlink_dump_control c = {
1208 .dump = ctnetlink_dump_dying,
1209 .done = ctnetlink_done_list,
1210 };
1211 return netlink_dump_start(ctnl, skb, nlh, &c);
1212 }
1213
1214 return -EOPNOTSUPP;
1215 }
1216
1217 static int
1218 ctnetlink_dump_unconfirmed(struct sk_buff *skb, struct netlink_callback *cb)
1219 {
1220 struct net *net = sock_net(skb->sk);
1221
1222 return ctnetlink_dump_list(skb, cb, &net->ct.unconfirmed);
1223 }
1224
1225 static int
1226 ctnetlink_get_ct_unconfirmed(struct sock *ctnl, struct sk_buff *skb,
1227 const struct nlmsghdr *nlh,
1228 const struct nlattr * const cda[])
1229 {
1230 if (nlh->nlmsg_flags & NLM_F_DUMP) {
1231 struct netlink_dump_control c = {
1232 .dump = ctnetlink_dump_unconfirmed,
1233 .done = ctnetlink_done_list,
1234 };
1235 return netlink_dump_start(ctnl, skb, nlh, &c);
1236 }
1237
1238 return -EOPNOTSUPP;
1239 }
1240
1241 #ifdef CONFIG_NF_NAT_NEEDED
1242 static int
1243 ctnetlink_parse_nat_setup(struct nf_conn *ct,
1244 enum nf_nat_manip_type manip,
1245 const struct nlattr *attr)
1246 {
1247 typeof(nfnetlink_parse_nat_setup_hook) parse_nat_setup;
1248 int err;
1249
1250 parse_nat_setup = rcu_dereference(nfnetlink_parse_nat_setup_hook);
1251 if (!parse_nat_setup) {
1252 #ifdef CONFIG_MODULES
1253 rcu_read_unlock();
1254 nfnl_unlock(NFNL_SUBSYS_CTNETLINK);
1255 if (request_module("nf-nat") < 0) {
1256 nfnl_lock(NFNL_SUBSYS_CTNETLINK);
1257 rcu_read_lock();
1258 return -EOPNOTSUPP;
1259 }
1260 nfnl_lock(NFNL_SUBSYS_CTNETLINK);
1261 rcu_read_lock();
1262 if (nfnetlink_parse_nat_setup_hook)
1263 return -EAGAIN;
1264 #endif
1265 return -EOPNOTSUPP;
1266 }
1267
1268 err = parse_nat_setup(ct, manip, attr);
1269 if (err == -EAGAIN) {
1270 #ifdef CONFIG_MODULES
1271 rcu_read_unlock();
1272 nfnl_unlock(NFNL_SUBSYS_CTNETLINK);
1273 if (request_module("nf-nat-%u", nf_ct_l3num(ct)) < 0) {
1274 nfnl_lock(NFNL_SUBSYS_CTNETLINK);
1275 rcu_read_lock();
1276 return -EOPNOTSUPP;
1277 }
1278 nfnl_lock(NFNL_SUBSYS_CTNETLINK);
1279 rcu_read_lock();
1280 #else
1281 err = -EOPNOTSUPP;
1282 #endif
1283 }
1284 return err;
1285 }
1286 #endif
1287
1288 static int
1289 ctnetlink_change_status(struct nf_conn *ct, const struct nlattr * const cda[])
1290 {
1291 unsigned long d;
1292 unsigned int status = ntohl(nla_get_be32(cda[CTA_STATUS]));
1293 d = ct->status ^ status;
1294
1295 if (d & (IPS_EXPECTED|IPS_CONFIRMED|IPS_DYING))
1296 /* unchangeable */
1297 return -EBUSY;
1298
1299 if (d & IPS_SEEN_REPLY && !(status & IPS_SEEN_REPLY))
1300 /* SEEN_REPLY bit can only be set */
1301 return -EBUSY;
1302
1303 if (d & IPS_ASSURED && !(status & IPS_ASSURED))
1304 /* ASSURED bit can only be set */
1305 return -EBUSY;
1306
1307 /* Be careful here, modifying NAT bits can screw up things,
1308 * so don't let users modify them directly if they don't pass
1309 * nf_nat_range. */
1310 ct->status |= status & ~(IPS_NAT_DONE_MASK | IPS_NAT_MASK);
1311 return 0;
1312 }
1313
1314 static int
1315 ctnetlink_change_nat(struct nf_conn *ct, const struct nlattr * const cda[])
1316 {
1317 #ifdef CONFIG_NF_NAT_NEEDED
1318 int ret;
1319
1320 if (cda[CTA_NAT_DST]) {
1321 ret = ctnetlink_parse_nat_setup(ct,
1322 NF_NAT_MANIP_DST,
1323 cda[CTA_NAT_DST]);
1324 if (ret < 0)
1325 return ret;
1326 }
1327 if (cda[CTA_NAT_SRC]) {
1328 ret = ctnetlink_parse_nat_setup(ct,
1329 NF_NAT_MANIP_SRC,
1330 cda[CTA_NAT_SRC]);
1331 if (ret < 0)
1332 return ret;
1333 }
1334 return 0;
1335 #else
1336 return -EOPNOTSUPP;
1337 #endif
1338 }
1339
1340 static inline int
1341 ctnetlink_change_helper(struct nf_conn *ct, const struct nlattr * const cda[])
1342 {
1343 struct nf_conntrack_helper *helper;
1344 struct nf_conn_help *help = nfct_help(ct);
1345 char *helpname = NULL;
1346 struct nlattr *helpinfo = NULL;
1347 int err;
1348
1349 /* don't change helper of sibling connections */
1350 if (ct->master)
1351 return -EBUSY;
1352
1353 err = ctnetlink_parse_help(cda[CTA_HELP], &helpname, &helpinfo);
1354 if (err < 0)
1355 return err;
1356
1357 if (!strcmp(helpname, "")) {
1358 if (help && help->helper) {
1359 /* we had a helper before ... */
1360 nf_ct_remove_expectations(ct);
1361 RCU_INIT_POINTER(help->helper, NULL);
1362 }
1363
1364 return 0;
1365 }
1366
1367 helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct),
1368 nf_ct_protonum(ct));
1369 if (helper == NULL) {
1370 #ifdef CONFIG_MODULES
1371 spin_unlock_bh(&nf_conntrack_lock);
1372
1373 if (request_module("nfct-helper-%s", helpname) < 0) {
1374 spin_lock_bh(&nf_conntrack_lock);
1375 return -EOPNOTSUPP;
1376 }
1377
1378 spin_lock_bh(&nf_conntrack_lock);
1379 helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct),
1380 nf_ct_protonum(ct));
1381 if (helper)
1382 return -EAGAIN;
1383 #endif
1384 return -EOPNOTSUPP;
1385 }
1386
1387 if (help) {
1388 if (help->helper == helper) {
1389 /* update private helper data if allowed. */
1390 if (helper->from_nlattr)
1391 helper->from_nlattr(helpinfo, ct);
1392 return 0;
1393 } else
1394 return -EBUSY;
1395 }
1396
1397 /* we cannot set a helper for an existing conntrack */
1398 return -EOPNOTSUPP;
1399 }
1400
1401 static inline int
1402 ctnetlink_change_timeout(struct nf_conn *ct, const struct nlattr * const cda[])
1403 {
1404 u_int32_t timeout = ntohl(nla_get_be32(cda[CTA_TIMEOUT]));
1405
1406 if (!del_timer(&ct->timeout))
1407 return -ETIME;
1408
1409 ct->timeout.expires = jiffies + timeout * HZ;
1410 add_timer(&ct->timeout);
1411
1412 return 0;
1413 }
1414
1415 static const struct nla_policy protoinfo_policy[CTA_PROTOINFO_MAX+1] = {
1416 [CTA_PROTOINFO_TCP] = { .type = NLA_NESTED },
1417 [CTA_PROTOINFO_DCCP] = { .type = NLA_NESTED },
1418 [CTA_PROTOINFO_SCTP] = { .type = NLA_NESTED },
1419 };
1420
1421 static inline int
1422 ctnetlink_change_protoinfo(struct nf_conn *ct, const struct nlattr * const cda[])
1423 {
1424 const struct nlattr *attr = cda[CTA_PROTOINFO];
1425 struct nlattr *tb[CTA_PROTOINFO_MAX+1];
1426 struct nf_conntrack_l4proto *l4proto;
1427 int err = 0;
1428
1429 err = nla_parse_nested(tb, CTA_PROTOINFO_MAX, attr, protoinfo_policy);
1430 if (err < 0)
1431 return err;
1432
1433 rcu_read_lock();
1434 l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
1435 if (l4proto->from_nlattr)
1436 err = l4proto->from_nlattr(tb, ct);
1437 rcu_read_unlock();
1438
1439 return err;
1440 }
1441
1442 #ifdef CONFIG_NF_NAT_NEEDED
1443 static const struct nla_policy nat_seq_policy[CTA_NAT_SEQ_MAX+1] = {
1444 [CTA_NAT_SEQ_CORRECTION_POS] = { .type = NLA_U32 },
1445 [CTA_NAT_SEQ_OFFSET_BEFORE] = { .type = NLA_U32 },
1446 [CTA_NAT_SEQ_OFFSET_AFTER] = { .type = NLA_U32 },
1447 };
1448
1449 static inline int
1450 change_nat_seq_adj(struct nf_nat_seq *natseq, const struct nlattr * const attr)
1451 {
1452 int err;
1453 struct nlattr *cda[CTA_NAT_SEQ_MAX+1];
1454
1455 err = nla_parse_nested(cda, CTA_NAT_SEQ_MAX, attr, nat_seq_policy);
1456 if (err < 0)
1457 return err;
1458
1459 if (!cda[CTA_NAT_SEQ_CORRECTION_POS])
1460 return -EINVAL;
1461
1462 natseq->correction_pos =
1463 ntohl(nla_get_be32(cda[CTA_NAT_SEQ_CORRECTION_POS]));
1464
1465 if (!cda[CTA_NAT_SEQ_OFFSET_BEFORE])
1466 return -EINVAL;
1467
1468 natseq->offset_before =
1469 ntohl(nla_get_be32(cda[CTA_NAT_SEQ_OFFSET_BEFORE]));
1470
1471 if (!cda[CTA_NAT_SEQ_OFFSET_AFTER])
1472 return -EINVAL;
1473
1474 natseq->offset_after =
1475 ntohl(nla_get_be32(cda[CTA_NAT_SEQ_OFFSET_AFTER]));
1476
1477 return 0;
1478 }
1479
1480 static int
1481 ctnetlink_change_nat_seq_adj(struct nf_conn *ct,
1482 const struct nlattr * const cda[])
1483 {
1484 int ret = 0;
1485 struct nf_conn_nat *nat = nfct_nat(ct);
1486
1487 if (!nat)
1488 return 0;
1489
1490 if (cda[CTA_NAT_SEQ_ADJ_ORIG]) {
1491 ret = change_nat_seq_adj(&nat->seq[IP_CT_DIR_ORIGINAL],
1492 cda[CTA_NAT_SEQ_ADJ_ORIG]);
1493 if (ret < 0)
1494 return ret;
1495
1496 ct->status |= IPS_SEQ_ADJUST;
1497 }
1498
1499 if (cda[CTA_NAT_SEQ_ADJ_REPLY]) {
1500 ret = change_nat_seq_adj(&nat->seq[IP_CT_DIR_REPLY],
1501 cda[CTA_NAT_SEQ_ADJ_REPLY]);
1502 if (ret < 0)
1503 return ret;
1504
1505 ct->status |= IPS_SEQ_ADJUST;
1506 }
1507
1508 return 0;
1509 }
1510 #endif
1511
1512 static int
1513 ctnetlink_attach_labels(struct nf_conn *ct, const struct nlattr * const cda[])
1514 {
1515 #ifdef CONFIG_NF_CONNTRACK_LABELS
1516 size_t len = nla_len(cda[CTA_LABELS]);
1517 const void *mask = cda[CTA_LABELS_MASK];
1518
1519 if (len & (sizeof(u32)-1)) /* must be multiple of u32 */
1520 return -EINVAL;
1521
1522 if (mask) {
1523 if (nla_len(cda[CTA_LABELS_MASK]) == 0 ||
1524 nla_len(cda[CTA_LABELS_MASK]) != len)
1525 return -EINVAL;
1526 mask = nla_data(cda[CTA_LABELS_MASK]);
1527 }
1528
1529 len /= sizeof(u32);
1530
1531 return nf_connlabels_replace(ct, nla_data(cda[CTA_LABELS]), mask, len);
1532 #else
1533 return -EOPNOTSUPP;
1534 #endif
1535 }
1536
1537 static int
1538 ctnetlink_change_conntrack(struct nf_conn *ct,
1539 const struct nlattr * const cda[])
1540 {
1541 int err;
1542
1543 /* only allow NAT changes and master assignation for new conntracks */
1544 if (cda[CTA_NAT_SRC] || cda[CTA_NAT_DST] || cda[CTA_TUPLE_MASTER])
1545 return -EOPNOTSUPP;
1546
1547 if (cda[CTA_HELP]) {
1548 err = ctnetlink_change_helper(ct, cda);
1549 if (err < 0)
1550 return err;
1551 }
1552
1553 if (cda[CTA_TIMEOUT]) {
1554 err = ctnetlink_change_timeout(ct, cda);
1555 if (err < 0)
1556 return err;
1557 }
1558
1559 if (cda[CTA_STATUS]) {
1560 err = ctnetlink_change_status(ct, cda);
1561 if (err < 0)
1562 return err;
1563 }
1564
1565 if (cda[CTA_PROTOINFO]) {
1566 err = ctnetlink_change_protoinfo(ct, cda);
1567 if (err < 0)
1568 return err;
1569 }
1570
1571 #if defined(CONFIG_NF_CONNTRACK_MARK)
1572 if (cda[CTA_MARK])
1573 ct->mark = ntohl(nla_get_be32(cda[CTA_MARK]));
1574 #endif
1575
1576 #ifdef CONFIG_NF_NAT_NEEDED
1577 if (cda[CTA_NAT_SEQ_ADJ_ORIG] || cda[CTA_NAT_SEQ_ADJ_REPLY]) {
1578 err = ctnetlink_change_nat_seq_adj(ct, cda);
1579 if (err < 0)
1580 return err;
1581 }
1582 #endif
1583 if (cda[CTA_LABELS]) {
1584 err = ctnetlink_attach_labels(ct, cda);
1585 if (err < 0)
1586 return err;
1587 }
1588
1589 return 0;
1590 }
1591
1592 static struct nf_conn *
1593 ctnetlink_create_conntrack(struct net *net, u16 zone,
1594 const struct nlattr * const cda[],
1595 struct nf_conntrack_tuple *otuple,
1596 struct nf_conntrack_tuple *rtuple,
1597 u8 u3)
1598 {
1599 struct nf_conn *ct;
1600 int err = -EINVAL;
1601 struct nf_conntrack_helper *helper;
1602 struct nf_conn_tstamp *tstamp;
1603
1604 ct = nf_conntrack_alloc(net, zone, otuple, rtuple, GFP_ATOMIC);
1605 if (IS_ERR(ct))
1606 return ERR_PTR(-ENOMEM);
1607
1608 if (!cda[CTA_TIMEOUT])
1609 goto err1;
1610 ct->timeout.expires = ntohl(nla_get_be32(cda[CTA_TIMEOUT]));
1611
1612 ct->timeout.expires = jiffies + ct->timeout.expires * HZ;
1613
1614 rcu_read_lock();
1615 if (cda[CTA_HELP]) {
1616 char *helpname = NULL;
1617 struct nlattr *helpinfo = NULL;
1618
1619 err = ctnetlink_parse_help(cda[CTA_HELP], &helpname, &helpinfo);
1620 if (err < 0)
1621 goto err2;
1622
1623 helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct),
1624 nf_ct_protonum(ct));
1625 if (helper == NULL) {
1626 rcu_read_unlock();
1627 #ifdef CONFIG_MODULES
1628 if (request_module("nfct-helper-%s", helpname) < 0) {
1629 err = -EOPNOTSUPP;
1630 goto err1;
1631 }
1632
1633 rcu_read_lock();
1634 helper = __nf_conntrack_helper_find(helpname,
1635 nf_ct_l3num(ct),
1636 nf_ct_protonum(ct));
1637 if (helper) {
1638 err = -EAGAIN;
1639 goto err2;
1640 }
1641 rcu_read_unlock();
1642 #endif
1643 err = -EOPNOTSUPP;
1644 goto err1;
1645 } else {
1646 struct nf_conn_help *help;
1647
1648 help = nf_ct_helper_ext_add(ct, helper, GFP_ATOMIC);
1649 if (help == NULL) {
1650 err = -ENOMEM;
1651 goto err2;
1652 }
1653 /* set private helper data if allowed. */
1654 if (helper->from_nlattr)
1655 helper->from_nlattr(helpinfo, ct);
1656
1657 /* not in hash table yet so not strictly necessary */
1658 RCU_INIT_POINTER(help->helper, helper);
1659 }
1660 } else {
1661 /* try an implicit helper assignation */
1662 err = __nf_ct_try_assign_helper(ct, NULL, GFP_ATOMIC);
1663 if (err < 0)
1664 goto err2;
1665 }
1666
1667 if (cda[CTA_NAT_SRC] || cda[CTA_NAT_DST]) {
1668 err = ctnetlink_change_nat(ct, cda);
1669 if (err < 0)
1670 goto err2;
1671 }
1672
1673 nf_ct_acct_ext_add(ct, GFP_ATOMIC);
1674 nf_ct_tstamp_ext_add(ct, GFP_ATOMIC);
1675 nf_ct_ecache_ext_add(ct, 0, 0, GFP_ATOMIC);
1676 nf_ct_labels_ext_add(ct);
1677
1678 /* we must add conntrack extensions before confirmation. */
1679 ct->status |= IPS_CONFIRMED;
1680
1681 if (cda[CTA_STATUS]) {
1682 err = ctnetlink_change_status(ct, cda);
1683 if (err < 0)
1684 goto err2;
1685 }
1686
1687 #ifdef CONFIG_NF_NAT_NEEDED
1688 if (cda[CTA_NAT_SEQ_ADJ_ORIG] || cda[CTA_NAT_SEQ_ADJ_REPLY]) {
1689 err = ctnetlink_change_nat_seq_adj(ct, cda);
1690 if (err < 0)
1691 goto err2;
1692 }
1693 #endif
1694
1695 memset(&ct->proto, 0, sizeof(ct->proto));
1696 if (cda[CTA_PROTOINFO]) {
1697 err = ctnetlink_change_protoinfo(ct, cda);
1698 if (err < 0)
1699 goto err2;
1700 }
1701
1702 #if defined(CONFIG_NF_CONNTRACK_MARK)
1703 if (cda[CTA_MARK])
1704 ct->mark = ntohl(nla_get_be32(cda[CTA_MARK]));
1705 #endif
1706
1707 /* setup master conntrack: this is a confirmed expectation */
1708 if (cda[CTA_TUPLE_MASTER]) {
1709 struct nf_conntrack_tuple master;
1710 struct nf_conntrack_tuple_hash *master_h;
1711 struct nf_conn *master_ct;
1712
1713 err = ctnetlink_parse_tuple(cda, &master, CTA_TUPLE_MASTER, u3);
1714 if (err < 0)
1715 goto err2;
1716
1717 master_h = nf_conntrack_find_get(net, zone, &master);
1718 if (master_h == NULL) {
1719 err = -ENOENT;
1720 goto err2;
1721 }
1722 master_ct = nf_ct_tuplehash_to_ctrack(master_h);
1723 __set_bit(IPS_EXPECTED_BIT, &ct->status);
1724 ct->master = master_ct;
1725 }
1726 tstamp = nf_conn_tstamp_find(ct);
1727 if (tstamp)
1728 tstamp->start = ktime_to_ns(ktime_get_real());
1729
1730 err = nf_conntrack_hash_check_insert(ct);
1731 if (err < 0)
1732 goto err2;
1733
1734 rcu_read_unlock();
1735
1736 return ct;
1737
1738 err2:
1739 rcu_read_unlock();
1740 err1:
1741 nf_conntrack_free(ct);
1742 return ERR_PTR(err);
1743 }
1744
1745 static int
1746 ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
1747 const struct nlmsghdr *nlh,
1748 const struct nlattr * const cda[])
1749 {
1750 struct net *net = sock_net(ctnl);
1751 struct nf_conntrack_tuple otuple, rtuple;
1752 struct nf_conntrack_tuple_hash *h = NULL;
1753 struct nfgenmsg *nfmsg = nlmsg_data(nlh);
1754 struct nf_conn *ct;
1755 u_int8_t u3 = nfmsg->nfgen_family;
1756 u16 zone;
1757 int err;
1758
1759 err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone);
1760 if (err < 0)
1761 return err;
1762
1763 if (cda[CTA_TUPLE_ORIG]) {
1764 err = ctnetlink_parse_tuple(cda, &otuple, CTA_TUPLE_ORIG, u3);
1765 if (err < 0)
1766 return err;
1767 }
1768
1769 if (cda[CTA_TUPLE_REPLY]) {
1770 err = ctnetlink_parse_tuple(cda, &rtuple, CTA_TUPLE_REPLY, u3);
1771 if (err < 0)
1772 return err;
1773 }
1774
1775 if (cda[CTA_TUPLE_ORIG])
1776 h = nf_conntrack_find_get(net, zone, &otuple);
1777 else if (cda[CTA_TUPLE_REPLY])
1778 h = nf_conntrack_find_get(net, zone, &rtuple);
1779
1780 if (h == NULL) {
1781 err = -ENOENT;
1782 if (nlh->nlmsg_flags & NLM_F_CREATE) {
1783 enum ip_conntrack_events events;
1784
1785 if (!cda[CTA_TUPLE_ORIG] || !cda[CTA_TUPLE_REPLY])
1786 return -EINVAL;
1787
1788 ct = ctnetlink_create_conntrack(net, zone, cda, &otuple,
1789 &rtuple, u3);
1790 if (IS_ERR(ct))
1791 return PTR_ERR(ct);
1792
1793 err = 0;
1794 if (test_bit(IPS_EXPECTED_BIT, &ct->status))
1795 events = IPCT_RELATED;
1796 else
1797 events = IPCT_NEW;
1798
1799 if (cda[CTA_LABELS] &&
1800 ctnetlink_attach_labels(ct, cda) == 0)
1801 events |= (1 << IPCT_LABEL);
1802
1803 nf_conntrack_eventmask_report((1 << IPCT_REPLY) |
1804 (1 << IPCT_ASSURED) |
1805 (1 << IPCT_HELPER) |
1806 (1 << IPCT_PROTOINFO) |
1807 (1 << IPCT_NATSEQADJ) |
1808 (1 << IPCT_MARK) | events,
1809 ct, NETLINK_CB(skb).portid,
1810 nlmsg_report(nlh));
1811 nf_ct_put(ct);
1812 }
1813
1814 return err;
1815 }
1816 /* implicit 'else' */
1817
1818 err = -EEXIST;
1819 ct = nf_ct_tuplehash_to_ctrack(h);
1820 if (!(nlh->nlmsg_flags & NLM_F_EXCL)) {
1821 spin_lock_bh(&nf_conntrack_lock);
1822 err = ctnetlink_change_conntrack(ct, cda);
1823 spin_unlock_bh(&nf_conntrack_lock);
1824 if (err == 0) {
1825 nf_conntrack_eventmask_report((1 << IPCT_REPLY) |
1826 (1 << IPCT_ASSURED) |
1827 (1 << IPCT_HELPER) |
1828 (1 << IPCT_LABEL) |
1829 (1 << IPCT_PROTOINFO) |
1830 (1 << IPCT_NATSEQADJ) |
1831 (1 << IPCT_MARK),
1832 ct, NETLINK_CB(skb).portid,
1833 nlmsg_report(nlh));
1834 }
1835 }
1836
1837 nf_ct_put(ct);
1838 return err;
1839 }
1840
1841 static int
1842 ctnetlink_ct_stat_cpu_fill_info(struct sk_buff *skb, u32 portid, u32 seq,
1843 __u16 cpu, const struct ip_conntrack_stat *st)
1844 {
1845 struct nlmsghdr *nlh;
1846 struct nfgenmsg *nfmsg;
1847 unsigned int flags = portid ? NLM_F_MULTI : 0, event;
1848
1849 event = (NFNL_SUBSYS_CTNETLINK << 8 | IPCTNL_MSG_CT_GET_STATS_CPU);
1850 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
1851 if (nlh == NULL)
1852 goto nlmsg_failure;
1853
1854 nfmsg = nlmsg_data(nlh);
1855 nfmsg->nfgen_family = AF_UNSPEC;
1856 nfmsg->version = NFNETLINK_V0;
1857 nfmsg->res_id = htons(cpu);
1858
1859 if (nla_put_be32(skb, CTA_STATS_SEARCHED, htonl(st->searched)) ||
1860 nla_put_be32(skb, CTA_STATS_FOUND, htonl(st->found)) ||
1861 nla_put_be32(skb, CTA_STATS_NEW, htonl(st->new)) ||
1862 nla_put_be32(skb, CTA_STATS_INVALID, htonl(st->invalid)) ||
1863 nla_put_be32(skb, CTA_STATS_IGNORE, htonl(st->ignore)) ||
1864 nla_put_be32(skb, CTA_STATS_DELETE, htonl(st->delete)) ||
1865 nla_put_be32(skb, CTA_STATS_DELETE_LIST, htonl(st->delete_list)) ||
1866 nla_put_be32(skb, CTA_STATS_INSERT, htonl(st->insert)) ||
1867 nla_put_be32(skb, CTA_STATS_INSERT_FAILED,
1868 htonl(st->insert_failed)) ||
1869 nla_put_be32(skb, CTA_STATS_DROP, htonl(st->drop)) ||
1870 nla_put_be32(skb, CTA_STATS_EARLY_DROP, htonl(st->early_drop)) ||
1871 nla_put_be32(skb, CTA_STATS_ERROR, htonl(st->error)) ||
1872 nla_put_be32(skb, CTA_STATS_SEARCH_RESTART,
1873 htonl(st->search_restart)))
1874 goto nla_put_failure;
1875
1876 nlmsg_end(skb, nlh);
1877 return skb->len;
1878
1879 nla_put_failure:
1880 nlmsg_failure:
1881 nlmsg_cancel(skb, nlh);
1882 return -1;
1883 }
1884
1885 static int
1886 ctnetlink_ct_stat_cpu_dump(struct sk_buff *skb, struct netlink_callback *cb)
1887 {
1888 int cpu;
1889 struct net *net = sock_net(skb->sk);
1890
1891 if (cb->args[0] == nr_cpu_ids)
1892 return 0;
1893
1894 for (cpu = cb->args[0]; cpu < nr_cpu_ids; cpu++) {
1895 const struct ip_conntrack_stat *st;
1896
1897 if (!cpu_possible(cpu))
1898 continue;
1899
1900 st = per_cpu_ptr(net->ct.stat, cpu);
1901 if (ctnetlink_ct_stat_cpu_fill_info(skb,
1902 NETLINK_CB(cb->skb).portid,
1903 cb->nlh->nlmsg_seq,
1904 cpu, st) < 0)
1905 break;
1906 }
1907 cb->args[0] = cpu;
1908
1909 return skb->len;
1910 }
1911
1912 static int
1913 ctnetlink_stat_ct_cpu(struct sock *ctnl, struct sk_buff *skb,
1914 const struct nlmsghdr *nlh,
1915 const struct nlattr * const cda[])
1916 {
1917 if (nlh->nlmsg_flags & NLM_F_DUMP) {
1918 struct netlink_dump_control c = {
1919 .dump = ctnetlink_ct_stat_cpu_dump,
1920 };
1921 return netlink_dump_start(ctnl, skb, nlh, &c);
1922 }
1923
1924 return 0;
1925 }
1926
1927 static int
1928 ctnetlink_stat_ct_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
1929 struct net *net)
1930 {
1931 struct nlmsghdr *nlh;
1932 struct nfgenmsg *nfmsg;
1933 unsigned int flags = portid ? NLM_F_MULTI : 0, event;
1934 unsigned int nr_conntracks = atomic_read(&net->ct.count);
1935
1936 event = (NFNL_SUBSYS_CTNETLINK << 8 | IPCTNL_MSG_CT_GET_STATS);
1937 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
1938 if (nlh == NULL)
1939 goto nlmsg_failure;
1940
1941 nfmsg = nlmsg_data(nlh);
1942 nfmsg->nfgen_family = AF_UNSPEC;
1943 nfmsg->version = NFNETLINK_V0;
1944 nfmsg->res_id = 0;
1945
1946 if (nla_put_be32(skb, CTA_STATS_GLOBAL_ENTRIES, htonl(nr_conntracks)))
1947 goto nla_put_failure;
1948
1949 nlmsg_end(skb, nlh);
1950 return skb->len;
1951
1952 nla_put_failure:
1953 nlmsg_failure:
1954 nlmsg_cancel(skb, nlh);
1955 return -1;
1956 }
1957
1958 static int
1959 ctnetlink_stat_ct(struct sock *ctnl, struct sk_buff *skb,
1960 const struct nlmsghdr *nlh,
1961 const struct nlattr * const cda[])
1962 {
1963 struct sk_buff *skb2;
1964 int err;
1965
1966 skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1967 if (skb2 == NULL)
1968 return -ENOMEM;
1969
1970 err = ctnetlink_stat_ct_fill_info(skb2, NETLINK_CB(skb).portid,
1971 nlh->nlmsg_seq,
1972 NFNL_MSG_TYPE(nlh->nlmsg_type),
1973 sock_net(skb->sk));
1974 if (err <= 0)
1975 goto free;
1976
1977 err = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).portid, MSG_DONTWAIT);
1978 if (err < 0)
1979 goto out;
1980
1981 return 0;
1982
1983 free:
1984 kfree_skb(skb2);
1985 out:
1986 /* this avoids a loop in nfnetlink. */
1987 return err == -EAGAIN ? -ENOBUFS : err;
1988 }
1989
1990 #ifdef CONFIG_NETFILTER_NETLINK_QUEUE_CT
1991 static size_t
1992 ctnetlink_nfqueue_build_size(const struct nf_conn *ct)
1993 {
1994 return 3 * nla_total_size(0) /* CTA_TUPLE_ORIG|REPL|MASTER */
1995 + 3 * nla_total_size(0) /* CTA_TUPLE_IP */
1996 + 3 * nla_total_size(0) /* CTA_TUPLE_PROTO */
1997 + 3 * nla_total_size(sizeof(u_int8_t)) /* CTA_PROTO_NUM */
1998 + nla_total_size(sizeof(u_int32_t)) /* CTA_ID */
1999 + nla_total_size(sizeof(u_int32_t)) /* CTA_STATUS */
2000 + nla_total_size(sizeof(u_int32_t)) /* CTA_TIMEOUT */
2001 + nla_total_size(0) /* CTA_PROTOINFO */
2002 + nla_total_size(0) /* CTA_HELP */
2003 + nla_total_size(NF_CT_HELPER_NAME_LEN) /* CTA_HELP_NAME */
2004 + ctnetlink_secctx_size(ct)
2005 #ifdef CONFIG_NF_NAT_NEEDED
2006 + 2 * nla_total_size(0) /* CTA_NAT_SEQ_ADJ_ORIG|REPL */
2007 + 6 * nla_total_size(sizeof(u_int32_t)) /* CTA_NAT_SEQ_OFFSET */
2008 #endif
2009 #ifdef CONFIG_NF_CONNTRACK_MARK
2010 + nla_total_size(sizeof(u_int32_t)) /* CTA_MARK */
2011 #endif
2012 + ctnetlink_proto_size(ct)
2013 ;
2014 }
2015
2016 static int
2017 ctnetlink_nfqueue_build(struct sk_buff *skb, struct nf_conn *ct)
2018 {
2019 struct nlattr *nest_parms;
2020
2021 rcu_read_lock();
2022 nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG | NLA_F_NESTED);
2023 if (!nest_parms)
2024 goto nla_put_failure;
2025 if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL)) < 0)
2026 goto nla_put_failure;
2027 nla_nest_end(skb, nest_parms);
2028
2029 nest_parms = nla_nest_start(skb, CTA_TUPLE_REPLY | NLA_F_NESTED);
2030 if (!nest_parms)
2031 goto nla_put_failure;
2032 if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_REPLY)) < 0)
2033 goto nla_put_failure;
2034 nla_nest_end(skb, nest_parms);
2035
2036 if (nf_ct_zone(ct)) {
2037 if (nla_put_be16(skb, CTA_ZONE, htons(nf_ct_zone(ct))))
2038 goto nla_put_failure;
2039 }
2040
2041 if (ctnetlink_dump_id(skb, ct) < 0)
2042 goto nla_put_failure;
2043
2044 if (ctnetlink_dump_status(skb, ct) < 0)
2045 goto nla_put_failure;
2046
2047 if (ctnetlink_dump_timeout(skb, ct) < 0)
2048 goto nla_put_failure;
2049
2050 if (ctnetlink_dump_protoinfo(skb, ct) < 0)
2051 goto nla_put_failure;
2052
2053 if (ctnetlink_dump_helpinfo(skb, ct) < 0)
2054 goto nla_put_failure;
2055
2056 #ifdef CONFIG_NF_CONNTRACK_SECMARK
2057 if (ct->secmark && ctnetlink_dump_secctx(skb, ct) < 0)
2058 goto nla_put_failure;
2059 #endif
2060 if (ct->master && ctnetlink_dump_master(skb, ct) < 0)
2061 goto nla_put_failure;
2062
2063 if ((ct->status & IPS_SEQ_ADJUST) &&
2064 ctnetlink_dump_nat_seq_adj(skb, ct) < 0)
2065 goto nla_put_failure;
2066
2067 #ifdef CONFIG_NF_CONNTRACK_MARK
2068 if (ct->mark && ctnetlink_dump_mark(skb, ct) < 0)
2069 goto nla_put_failure;
2070 #endif
2071 if (ctnetlink_dump_labels(skb, ct) < 0)
2072 goto nla_put_failure;
2073 rcu_read_unlock();
2074 return 0;
2075
2076 nla_put_failure:
2077 rcu_read_unlock();
2078 return -ENOSPC;
2079 }
2080
2081 static int
2082 ctnetlink_nfqueue_parse_ct(const struct nlattr *cda[], struct nf_conn *ct)
2083 {
2084 int err;
2085
2086 if (cda[CTA_TIMEOUT]) {
2087 err = ctnetlink_change_timeout(ct, cda);
2088 if (err < 0)
2089 return err;
2090 }
2091 if (cda[CTA_STATUS]) {
2092 err = ctnetlink_change_status(ct, cda);
2093 if (err < 0)
2094 return err;
2095 }
2096 if (cda[CTA_HELP]) {
2097 err = ctnetlink_change_helper(ct, cda);
2098 if (err < 0)
2099 return err;
2100 }
2101 if (cda[CTA_LABELS]) {
2102 err = ctnetlink_attach_labels(ct, cda);
2103 if (err < 0)
2104 return err;
2105 }
2106 #if defined(CONFIG_NF_CONNTRACK_MARK)
2107 if (cda[CTA_MARK])
2108 ct->mark = ntohl(nla_get_be32(cda[CTA_MARK]));
2109 #endif
2110 return 0;
2111 }
2112
2113 static int
2114 ctnetlink_nfqueue_parse(const struct nlattr *attr, struct nf_conn *ct)
2115 {
2116 struct nlattr *cda[CTA_MAX+1];
2117 int ret;
2118
2119 ret = nla_parse_nested(cda, CTA_MAX, attr, ct_nla_policy);
2120 if (ret < 0)
2121 return ret;
2122
2123 spin_lock_bh(&nf_conntrack_lock);
2124 ret = ctnetlink_nfqueue_parse_ct((const struct nlattr **)cda, ct);
2125 spin_unlock_bh(&nf_conntrack_lock);
2126
2127 return ret;
2128 }
2129
2130 static struct nfq_ct_hook ctnetlink_nfqueue_hook = {
2131 .build_size = ctnetlink_nfqueue_build_size,
2132 .build = ctnetlink_nfqueue_build,
2133 .parse = ctnetlink_nfqueue_parse,
2134 };
2135 #endif /* CONFIG_NETFILTER_NETLINK_QUEUE_CT */
2136
2137 /***********************************************************************
2138 * EXPECT
2139 ***********************************************************************/
2140
2141 static inline int
2142 ctnetlink_exp_dump_tuple(struct sk_buff *skb,
2143 const struct nf_conntrack_tuple *tuple,
2144 enum ctattr_expect type)
2145 {
2146 struct nlattr *nest_parms;
2147
2148 nest_parms = nla_nest_start(skb, type | NLA_F_NESTED);
2149 if (!nest_parms)
2150 goto nla_put_failure;
2151 if (ctnetlink_dump_tuples(skb, tuple) < 0)
2152 goto nla_put_failure;
2153 nla_nest_end(skb, nest_parms);
2154
2155 return 0;
2156
2157 nla_put_failure:
2158 return -1;
2159 }
2160
2161 static inline int
2162 ctnetlink_exp_dump_mask(struct sk_buff *skb,
2163 const struct nf_conntrack_tuple *tuple,
2164 const struct nf_conntrack_tuple_mask *mask)
2165 {
2166 int ret;
2167 struct nf_conntrack_l3proto *l3proto;
2168 struct nf_conntrack_l4proto *l4proto;
2169 struct nf_conntrack_tuple m;
2170 struct nlattr *nest_parms;
2171
2172 memset(&m, 0xFF, sizeof(m));
2173 memcpy(&m.src.u3, &mask->src.u3, sizeof(m.src.u3));
2174 m.src.u.all = mask->src.u.all;
2175 m.dst.protonum = tuple->dst.protonum;
2176
2177 nest_parms = nla_nest_start(skb, CTA_EXPECT_MASK | NLA_F_NESTED);
2178 if (!nest_parms)
2179 goto nla_put_failure;
2180
2181 rcu_read_lock();
2182 l3proto = __nf_ct_l3proto_find(tuple->src.l3num);
2183 ret = ctnetlink_dump_tuples_ip(skb, &m, l3proto);
2184 if (ret >= 0) {
2185 l4proto = __nf_ct_l4proto_find(tuple->src.l3num,
2186 tuple->dst.protonum);
2187 ret = ctnetlink_dump_tuples_proto(skb, &m, l4proto);
2188 }
2189 rcu_read_unlock();
2190
2191 if (unlikely(ret < 0))
2192 goto nla_put_failure;
2193
2194 nla_nest_end(skb, nest_parms);
2195
2196 return 0;
2197
2198 nla_put_failure:
2199 return -1;
2200 }
2201
2202 static const union nf_inet_addr any_addr;
2203
2204 static int
2205 ctnetlink_exp_dump_expect(struct sk_buff *skb,
2206 const struct nf_conntrack_expect *exp)
2207 {
2208 struct nf_conn *master = exp->master;
2209 long timeout = ((long)exp->timeout.expires - (long)jiffies) / HZ;
2210 struct nf_conn_help *help;
2211 #ifdef CONFIG_NF_NAT_NEEDED
2212 struct nlattr *nest_parms;
2213 struct nf_conntrack_tuple nat_tuple = {};
2214 #endif
2215 struct nf_ct_helper_expectfn *expfn;
2216
2217 if (timeout < 0)
2218 timeout = 0;
2219
2220 if (ctnetlink_exp_dump_tuple(skb, &exp->tuple, CTA_EXPECT_TUPLE) < 0)
2221 goto nla_put_failure;
2222 if (ctnetlink_exp_dump_mask(skb, &exp->tuple, &exp->mask) < 0)
2223 goto nla_put_failure;
2224 if (ctnetlink_exp_dump_tuple(skb,
2225 &master->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
2226 CTA_EXPECT_MASTER) < 0)
2227 goto nla_put_failure;
2228
2229 #ifdef CONFIG_NF_NAT_NEEDED
2230 if (!nf_inet_addr_cmp(&exp->saved_addr, &any_addr) ||
2231 exp->saved_proto.all) {
2232 nest_parms = nla_nest_start(skb, CTA_EXPECT_NAT | NLA_F_NESTED);
2233 if (!nest_parms)
2234 goto nla_put_failure;
2235
2236 if (nla_put_be32(skb, CTA_EXPECT_NAT_DIR, htonl(exp->dir)))
2237 goto nla_put_failure;
2238
2239 nat_tuple.src.l3num = nf_ct_l3num(master);
2240 nat_tuple.src.u3 = exp->saved_addr;
2241 nat_tuple.dst.protonum = nf_ct_protonum(master);
2242 nat_tuple.src.u = exp->saved_proto;
2243
2244 if (ctnetlink_exp_dump_tuple(skb, &nat_tuple,
2245 CTA_EXPECT_NAT_TUPLE) < 0)
2246 goto nla_put_failure;
2247 nla_nest_end(skb, nest_parms);
2248 }
2249 #endif
2250 if (nla_put_be32(skb, CTA_EXPECT_TIMEOUT, htonl(timeout)) ||
2251 nla_put_be32(skb, CTA_EXPECT_ID, htonl((unsigned long)exp)) ||
2252 nla_put_be32(skb, CTA_EXPECT_FLAGS, htonl(exp->flags)) ||
2253 nla_put_be32(skb, CTA_EXPECT_CLASS, htonl(exp->class)))
2254 goto nla_put_failure;
2255 help = nfct_help(master);
2256 if (help) {
2257 struct nf_conntrack_helper *helper;
2258
2259 helper = rcu_dereference(help->helper);
2260 if (helper &&
2261 nla_put_string(skb, CTA_EXPECT_HELP_NAME, helper->name))
2262 goto nla_put_failure;
2263 }
2264 expfn = nf_ct_helper_expectfn_find_by_symbol(exp->expectfn);
2265 if (expfn != NULL &&
2266 nla_put_string(skb, CTA_EXPECT_FN, expfn->name))
2267 goto nla_put_failure;
2268
2269 return 0;
2270
2271 nla_put_failure:
2272 return -1;
2273 }
2274
2275 static int
2276 ctnetlink_exp_fill_info(struct sk_buff *skb, u32 portid, u32 seq,
2277 int event, const struct nf_conntrack_expect *exp)
2278 {
2279 struct nlmsghdr *nlh;
2280 struct nfgenmsg *nfmsg;
2281 unsigned int flags = portid ? NLM_F_MULTI : 0;
2282
2283 event |= NFNL_SUBSYS_CTNETLINK_EXP << 8;
2284 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
2285 if (nlh == NULL)
2286 goto nlmsg_failure;
2287
2288 nfmsg = nlmsg_data(nlh);
2289 nfmsg->nfgen_family = exp->tuple.src.l3num;
2290 nfmsg->version = NFNETLINK_V0;
2291 nfmsg->res_id = 0;
2292
2293 if (ctnetlink_exp_dump_expect(skb, exp) < 0)
2294 goto nla_put_failure;
2295
2296 nlmsg_end(skb, nlh);
2297 return skb->len;
2298
2299 nlmsg_failure:
2300 nla_put_failure:
2301 nlmsg_cancel(skb, nlh);
2302 return -1;
2303 }
2304
2305 #ifdef CONFIG_NF_CONNTRACK_EVENTS
2306 static int
2307 ctnetlink_expect_event(unsigned int events, struct nf_exp_event *item)
2308 {
2309 struct nf_conntrack_expect *exp = item->exp;
2310 struct net *net = nf_ct_exp_net(exp);
2311 struct nlmsghdr *nlh;
2312 struct nfgenmsg *nfmsg;
2313 struct sk_buff *skb;
2314 unsigned int type, group;
2315 int flags = 0;
2316
2317 if (events & (1 << IPEXP_DESTROY)) {
2318 type = IPCTNL_MSG_EXP_DELETE;
2319 group = NFNLGRP_CONNTRACK_EXP_DESTROY;
2320 } else if (events & (1 << IPEXP_NEW)) {
2321 type = IPCTNL_MSG_EXP_NEW;
2322 flags = NLM_F_CREATE|NLM_F_EXCL;
2323 group = NFNLGRP_CONNTRACK_EXP_NEW;
2324 } else
2325 return 0;
2326
2327 if (!item->report && !nfnetlink_has_listeners(net, group))
2328 return 0;
2329
2330 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
2331 if (skb == NULL)
2332 goto errout;
2333
2334 type |= NFNL_SUBSYS_CTNETLINK_EXP << 8;
2335 nlh = nlmsg_put(skb, item->portid, 0, type, sizeof(*nfmsg), flags);
2336 if (nlh == NULL)
2337 goto nlmsg_failure;
2338
2339 nfmsg = nlmsg_data(nlh);
2340 nfmsg->nfgen_family = exp->tuple.src.l3num;
2341 nfmsg->version = NFNETLINK_V0;
2342 nfmsg->res_id = 0;
2343
2344 rcu_read_lock();
2345 if (ctnetlink_exp_dump_expect(skb, exp) < 0)
2346 goto nla_put_failure;
2347 rcu_read_unlock();
2348
2349 nlmsg_end(skb, nlh);
2350 nfnetlink_send(skb, net, item->portid, group, item->report, GFP_ATOMIC);
2351 return 0;
2352
2353 nla_put_failure:
2354 rcu_read_unlock();
2355 nlmsg_cancel(skb, nlh);
2356 nlmsg_failure:
2357 kfree_skb(skb);
2358 errout:
2359 nfnetlink_set_err(net, 0, 0, -ENOBUFS);
2360 return 0;
2361 }
2362 #endif
2363 static int ctnetlink_exp_done(struct netlink_callback *cb)
2364 {
2365 if (cb->args[1])
2366 nf_ct_expect_put((struct nf_conntrack_expect *)cb->args[1]);
2367 return 0;
2368 }
2369
2370 static int
2371 ctnetlink_exp_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
2372 {
2373 struct net *net = sock_net(skb->sk);
2374 struct nf_conntrack_expect *exp, *last;
2375 struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
2376 u_int8_t l3proto = nfmsg->nfgen_family;
2377
2378 rcu_read_lock();
2379 last = (struct nf_conntrack_expect *)cb->args[1];
2380 for (; cb->args[0] < nf_ct_expect_hsize; cb->args[0]++) {
2381 restart:
2382 hlist_for_each_entry(exp, &net->ct.expect_hash[cb->args[0]],
2383 hnode) {
2384 if (l3proto && exp->tuple.src.l3num != l3proto)
2385 continue;
2386 if (cb->args[1]) {
2387 if (exp != last)
2388 continue;
2389 cb->args[1] = 0;
2390 }
2391 if (ctnetlink_exp_fill_info(skb,
2392 NETLINK_CB(cb->skb).portid,
2393 cb->nlh->nlmsg_seq,
2394 IPCTNL_MSG_EXP_NEW,
2395 exp) < 0) {
2396 if (!atomic_inc_not_zero(&exp->use))
2397 continue;
2398 cb->args[1] = (unsigned long)exp;
2399 goto out;
2400 }
2401 }
2402 if (cb->args[1]) {
2403 cb->args[1] = 0;
2404 goto restart;
2405 }
2406 }
2407 out:
2408 rcu_read_unlock();
2409 if (last)
2410 nf_ct_expect_put(last);
2411
2412 return skb->len;
2413 }
2414
2415 static int
2416 ctnetlink_exp_ct_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
2417 {
2418 struct nf_conntrack_expect *exp, *last;
2419 struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
2420 struct nf_conn *ct = cb->data;
2421 struct nf_conn_help *help = nfct_help(ct);
2422 u_int8_t l3proto = nfmsg->nfgen_family;
2423
2424 if (cb->args[0])
2425 return 0;
2426
2427 rcu_read_lock();
2428 last = (struct nf_conntrack_expect *)cb->args[1];
2429 restart:
2430 hlist_for_each_entry(exp, &help->expectations, lnode) {
2431 if (l3proto && exp->tuple.src.l3num != l3proto)
2432 continue;
2433 if (cb->args[1]) {
2434 if (exp != last)
2435 continue;
2436 cb->args[1] = 0;
2437 }
2438 if (ctnetlink_exp_fill_info(skb, NETLINK_CB(cb->skb).portid,
2439 cb->nlh->nlmsg_seq,
2440 IPCTNL_MSG_EXP_NEW,
2441 exp) < 0) {
2442 if (!atomic_inc_not_zero(&exp->use))
2443 continue;
2444 cb->args[1] = (unsigned long)exp;
2445 goto out;
2446 }
2447 }
2448 if (cb->args[1]) {
2449 cb->args[1] = 0;
2450 goto restart;
2451 }
2452 cb->args[0] = 1;
2453 out:
2454 rcu_read_unlock();
2455 if (last)
2456 nf_ct_expect_put(last);
2457
2458 return skb->len;
2459 }
2460
2461 static int ctnetlink_dump_exp_ct(struct sock *ctnl, struct sk_buff *skb,
2462 const struct nlmsghdr *nlh,
2463 const struct nlattr * const cda[])
2464 {
2465 int err;
2466 struct net *net = sock_net(ctnl);
2467 struct nfgenmsg *nfmsg = nlmsg_data(nlh);
2468 u_int8_t u3 = nfmsg->nfgen_family;
2469 struct nf_conntrack_tuple tuple;
2470 struct nf_conntrack_tuple_hash *h;
2471 struct nf_conn *ct;
2472 u16 zone = 0;
2473 struct netlink_dump_control c = {
2474 .dump = ctnetlink_exp_ct_dump_table,
2475 .done = ctnetlink_exp_done,
2476 };
2477
2478 err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_MASTER, u3);
2479 if (err < 0)
2480 return err;
2481
2482 if (cda[CTA_EXPECT_ZONE]) {
2483 err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone);
2484 if (err < 0)
2485 return err;
2486 }
2487
2488 h = nf_conntrack_find_get(net, zone, &tuple);
2489 if (!h)
2490 return -ENOENT;
2491
2492 ct = nf_ct_tuplehash_to_ctrack(h);
2493 c.data = ct;
2494
2495 err = netlink_dump_start(ctnl, skb, nlh, &c);
2496 nf_ct_put(ct);
2497
2498 return err;
2499 }
2500
2501 static const struct nla_policy exp_nla_policy[CTA_EXPECT_MAX+1] = {
2502 [CTA_EXPECT_MASTER] = { .type = NLA_NESTED },
2503 [CTA_EXPECT_TUPLE] = { .type = NLA_NESTED },
2504 [CTA_EXPECT_MASK] = { .type = NLA_NESTED },
2505 [CTA_EXPECT_TIMEOUT] = { .type = NLA_U32 },
2506 [CTA_EXPECT_ID] = { .type = NLA_U32 },
2507 [CTA_EXPECT_HELP_NAME] = { .type = NLA_NUL_STRING,
2508 .len = NF_CT_HELPER_NAME_LEN - 1 },
2509 [CTA_EXPECT_ZONE] = { .type = NLA_U16 },
2510 [CTA_EXPECT_FLAGS] = { .type = NLA_U32 },
2511 [CTA_EXPECT_CLASS] = { .type = NLA_U32 },
2512 [CTA_EXPECT_NAT] = { .type = NLA_NESTED },
2513 [CTA_EXPECT_FN] = { .type = NLA_NUL_STRING },
2514 };
2515
2516 static int
2517 ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb,
2518 const struct nlmsghdr *nlh,
2519 const struct nlattr * const cda[])
2520 {
2521 struct net *net = sock_net(ctnl);
2522 struct nf_conntrack_tuple tuple;
2523 struct nf_conntrack_expect *exp;
2524 struct sk_buff *skb2;
2525 struct nfgenmsg *nfmsg = nlmsg_data(nlh);
2526 u_int8_t u3 = nfmsg->nfgen_family;
2527 u16 zone;
2528 int err;
2529
2530 if (nlh->nlmsg_flags & NLM_F_DUMP) {
2531 if (cda[CTA_EXPECT_MASTER])
2532 return ctnetlink_dump_exp_ct(ctnl, skb, nlh, cda);
2533 else {
2534 struct netlink_dump_control c = {
2535 .dump = ctnetlink_exp_dump_table,
2536 .done = ctnetlink_exp_done,
2537 };
2538 return netlink_dump_start(ctnl, skb, nlh, &c);
2539 }
2540 }
2541
2542 err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone);
2543 if (err < 0)
2544 return err;
2545
2546 if (cda[CTA_EXPECT_TUPLE])
2547 err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3);
2548 else if (cda[CTA_EXPECT_MASTER])
2549 err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_MASTER, u3);
2550 else
2551 return -EINVAL;
2552
2553 if (err < 0)
2554 return err;
2555
2556 exp = nf_ct_expect_find_get(net, zone, &tuple);
2557 if (!exp)
2558 return -ENOENT;
2559
2560 if (cda[CTA_EXPECT_ID]) {
2561 __be32 id = nla_get_be32(cda[CTA_EXPECT_ID]);
2562 if (ntohl(id) != (u32)(unsigned long)exp) {
2563 nf_ct_expect_put(exp);
2564 return -ENOENT;
2565 }
2566 }
2567
2568 err = -ENOMEM;
2569 skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
2570 if (skb2 == NULL) {
2571 nf_ct_expect_put(exp);
2572 goto out;
2573 }
2574
2575 rcu_read_lock();
2576 err = ctnetlink_exp_fill_info(skb2, NETLINK_CB(skb).portid,
2577 nlh->nlmsg_seq, IPCTNL_MSG_EXP_NEW, exp);
2578 rcu_read_unlock();
2579 nf_ct_expect_put(exp);
2580 if (err <= 0)
2581 goto free;
2582
2583 err = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).portid, MSG_DONTWAIT);
2584 if (err < 0)
2585 goto out;
2586
2587 return 0;
2588
2589 free:
2590 kfree_skb(skb2);
2591 out:
2592 /* this avoids a loop in nfnetlink. */
2593 return err == -EAGAIN ? -ENOBUFS : err;
2594 }
2595
2596 static int
2597 ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb,
2598 const struct nlmsghdr *nlh,
2599 const struct nlattr * const cda[])
2600 {
2601 struct net *net = sock_net(ctnl);
2602 struct nf_conntrack_expect *exp;
2603 struct nf_conntrack_tuple tuple;
2604 struct nfgenmsg *nfmsg = nlmsg_data(nlh);
2605 struct hlist_node *next;
2606 u_int8_t u3 = nfmsg->nfgen_family;
2607 unsigned int i;
2608 u16 zone;
2609 int err;
2610
2611 if (cda[CTA_EXPECT_TUPLE]) {
2612 /* delete a single expect by tuple */
2613 err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone);
2614 if (err < 0)
2615 return err;
2616
2617 err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3);
2618 if (err < 0)
2619 return err;
2620
2621 /* bump usage count to 2 */
2622 exp = nf_ct_expect_find_get(net, zone, &tuple);
2623 if (!exp)
2624 return -ENOENT;
2625
2626 if (cda[CTA_EXPECT_ID]) {
2627 __be32 id = nla_get_be32(cda[CTA_EXPECT_ID]);
2628 if (ntohl(id) != (u32)(unsigned long)exp) {
2629 nf_ct_expect_put(exp);
2630 return -ENOENT;
2631 }
2632 }
2633
2634 /* after list removal, usage count == 1 */
2635 spin_lock_bh(&nf_conntrack_lock);
2636 if (del_timer(&exp->timeout)) {
2637 nf_ct_unlink_expect_report(exp, NETLINK_CB(skb).portid,
2638 nlmsg_report(nlh));
2639 nf_ct_expect_put(exp);
2640 }
2641 spin_unlock_bh(&nf_conntrack_lock);
2642 /* have to put what we 'get' above.
2643 * after this line usage count == 0 */
2644 nf_ct_expect_put(exp);
2645 } else if (cda[CTA_EXPECT_HELP_NAME]) {
2646 char *name = nla_data(cda[CTA_EXPECT_HELP_NAME]);
2647 struct nf_conn_help *m_help;
2648
2649 /* delete all expectations for this helper */
2650 spin_lock_bh(&nf_conntrack_lock);
2651 for (i = 0; i < nf_ct_expect_hsize; i++) {
2652 hlist_for_each_entry_safe(exp, next,
2653 &net->ct.expect_hash[i],
2654 hnode) {
2655 m_help = nfct_help(exp->master);
2656 if (!strcmp(m_help->helper->name, name) &&
2657 del_timer(&exp->timeout)) {
2658 nf_ct_unlink_expect_report(exp,
2659 NETLINK_CB(skb).portid,
2660 nlmsg_report(nlh));
2661 nf_ct_expect_put(exp);
2662 }
2663 }
2664 }
2665 spin_unlock_bh(&nf_conntrack_lock);
2666 } else {
2667 /* This basically means we have to flush everything*/
2668 spin_lock_bh(&nf_conntrack_lock);
2669 for (i = 0; i < nf_ct_expect_hsize; i++) {
2670 hlist_for_each_entry_safe(exp, next,
2671 &net->ct.expect_hash[i],
2672 hnode) {
2673 if (del_timer(&exp->timeout)) {
2674 nf_ct_unlink_expect_report(exp,
2675 NETLINK_CB(skb).portid,
2676 nlmsg_report(nlh));
2677 nf_ct_expect_put(exp);
2678 }
2679 }
2680 }
2681 spin_unlock_bh(&nf_conntrack_lock);
2682 }
2683
2684 return 0;
2685 }
2686 static int
2687 ctnetlink_change_expect(struct nf_conntrack_expect *x,
2688 const struct nlattr * const cda[])
2689 {
2690 if (cda[CTA_EXPECT_TIMEOUT]) {
2691 if (!del_timer(&x->timeout))
2692 return -ETIME;
2693
2694 x->timeout.expires = jiffies +
2695 ntohl(nla_get_be32(cda[CTA_EXPECT_TIMEOUT])) * HZ;
2696 add_timer(&x->timeout);
2697 }
2698 return 0;
2699 }
2700
2701 static const struct nla_policy exp_nat_nla_policy[CTA_EXPECT_NAT_MAX+1] = {
2702 [CTA_EXPECT_NAT_DIR] = { .type = NLA_U32 },
2703 [CTA_EXPECT_NAT_TUPLE] = { .type = NLA_NESTED },
2704 };
2705
2706 static int
2707 ctnetlink_parse_expect_nat(const struct nlattr *attr,
2708 struct nf_conntrack_expect *exp,
2709 u_int8_t u3)
2710 {
2711 #ifdef CONFIG_NF_NAT_NEEDED
2712 struct nlattr *tb[CTA_EXPECT_NAT_MAX+1];
2713 struct nf_conntrack_tuple nat_tuple = {};
2714 int err;
2715
2716 err = nla_parse_nested(tb, CTA_EXPECT_NAT_MAX, attr, exp_nat_nla_policy);
2717 if (err < 0)
2718 return err;
2719
2720 if (!tb[CTA_EXPECT_NAT_DIR] || !tb[CTA_EXPECT_NAT_TUPLE])
2721 return -EINVAL;
2722
2723 err = ctnetlink_parse_tuple((const struct nlattr * const *)tb,
2724 &nat_tuple, CTA_EXPECT_NAT_TUPLE, u3);
2725 if (err < 0)
2726 return err;
2727
2728 exp->saved_addr = nat_tuple.src.u3;
2729 exp->saved_proto = nat_tuple.src.u;
2730 exp->dir = ntohl(nla_get_be32(tb[CTA_EXPECT_NAT_DIR]));
2731
2732 return 0;
2733 #else
2734 return -EOPNOTSUPP;
2735 #endif
2736 }
2737
2738 static struct nf_conntrack_expect *
2739 ctnetlink_alloc_expect(const struct nlattr * const cda[], struct nf_conn *ct,
2740 struct nf_conntrack_helper *helper,
2741 struct nf_conntrack_tuple *tuple,
2742 struct nf_conntrack_tuple *mask)
2743 {
2744 u_int32_t class = 0;
2745 struct nf_conntrack_expect *exp;
2746 struct nf_conn_help *help;
2747 int err;
2748
2749 if (cda[CTA_EXPECT_CLASS] && helper) {
2750 class = ntohl(nla_get_be32(cda[CTA_EXPECT_CLASS]));
2751 if (class > helper->expect_class_max)
2752 return ERR_PTR(-EINVAL);
2753 }
2754 exp = nf_ct_expect_alloc(ct);
2755 if (!exp)
2756 return ERR_PTR(-ENOMEM);
2757
2758 help = nfct_help(ct);
2759 if (!help) {
2760 if (!cda[CTA_EXPECT_TIMEOUT]) {
2761 err = -EINVAL;
2762 goto err_out;
2763 }
2764 exp->timeout.expires =
2765 jiffies + ntohl(nla_get_be32(cda[CTA_EXPECT_TIMEOUT])) * HZ;
2766
2767 exp->flags = NF_CT_EXPECT_USERSPACE;
2768 if (cda[CTA_EXPECT_FLAGS]) {
2769 exp->flags |=
2770 ntohl(nla_get_be32(cda[CTA_EXPECT_FLAGS]));
2771 }
2772 } else {
2773 if (cda[CTA_EXPECT_FLAGS]) {
2774 exp->flags = ntohl(nla_get_be32(cda[CTA_EXPECT_FLAGS]));
2775 exp->flags &= ~NF_CT_EXPECT_USERSPACE;
2776 } else
2777 exp->flags = 0;
2778 }
2779 if (cda[CTA_EXPECT_FN]) {
2780 const char *name = nla_data(cda[CTA_EXPECT_FN]);
2781 struct nf_ct_helper_expectfn *expfn;
2782
2783 expfn = nf_ct_helper_expectfn_find_by_name(name);
2784 if (expfn == NULL) {
2785 err = -EINVAL;
2786 goto err_out;
2787 }
2788 exp->expectfn = expfn->expectfn;
2789 } else
2790 exp->expectfn = NULL;
2791
2792 exp->class = class;
2793 exp->master = ct;
2794 exp->helper = helper;
2795 exp->tuple = *tuple;
2796 exp->mask.src.u3 = mask->src.u3;
2797 exp->mask.src.u.all = mask->src.u.all;
2798
2799 if (cda[CTA_EXPECT_NAT]) {
2800 err = ctnetlink_parse_expect_nat(cda[CTA_EXPECT_NAT],
2801 exp, nf_ct_l3num(ct));
2802 if (err < 0)
2803 goto err_out;
2804 }
2805 return exp;
2806 err_out:
2807 nf_ct_expect_put(exp);
2808 return ERR_PTR(err);
2809 }
2810
2811 static int
2812 ctnetlink_create_expect(struct net *net, u16 zone,
2813 const struct nlattr * const cda[],
2814 u_int8_t u3, u32 portid, int report)
2815 {
2816 struct nf_conntrack_tuple tuple, mask, master_tuple;
2817 struct nf_conntrack_tuple_hash *h = NULL;
2818 struct nf_conntrack_helper *helper = NULL;
2819 struct nf_conntrack_expect *exp;
2820 struct nf_conn *ct;
2821 int err;
2822
2823 /* caller guarantees that those three CTA_EXPECT_* exist */
2824 err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3);
2825 if (err < 0)
2826 return err;
2827 err = ctnetlink_parse_tuple(cda, &mask, CTA_EXPECT_MASK, u3);
2828 if (err < 0)
2829 return err;
2830 err = ctnetlink_parse_tuple(cda, &master_tuple, CTA_EXPECT_MASTER, u3);
2831 if (err < 0)
2832 return err;
2833
2834 /* Look for master conntrack of this expectation */
2835 h = nf_conntrack_find_get(net, zone, &master_tuple);
2836 if (!h)
2837 return -ENOENT;
2838 ct = nf_ct_tuplehash_to_ctrack(h);
2839
2840 if (cda[CTA_EXPECT_HELP_NAME]) {
2841 const char *helpname = nla_data(cda[CTA_EXPECT_HELP_NAME]);
2842
2843 helper = __nf_conntrack_helper_find(helpname, u3,
2844 nf_ct_protonum(ct));
2845 if (helper == NULL) {
2846 #ifdef CONFIG_MODULES
2847 if (request_module("nfct-helper-%s", helpname) < 0) {
2848 err = -EOPNOTSUPP;
2849 goto err_ct;
2850 }
2851 helper = __nf_conntrack_helper_find(helpname, u3,
2852 nf_ct_protonum(ct));
2853 if (helper) {
2854 err = -EAGAIN;
2855 goto err_ct;
2856 }
2857 #endif
2858 err = -EOPNOTSUPP;
2859 goto err_ct;
2860 }
2861 }
2862
2863 exp = ctnetlink_alloc_expect(cda, ct, helper, &tuple, &mask);
2864 if (IS_ERR(exp)) {
2865 err = PTR_ERR(exp);
2866 goto err_ct;
2867 }
2868
2869 err = nf_ct_expect_related_report(exp, portid, report);
2870 if (err < 0)
2871 goto err_exp;
2872
2873 return 0;
2874 err_exp:
2875 nf_ct_expect_put(exp);
2876 err_ct:
2877 nf_ct_put(ct);
2878 return err;
2879 }
2880
2881 static int
2882 ctnetlink_new_expect(struct sock *ctnl, struct sk_buff *skb,
2883 const struct nlmsghdr *nlh,
2884 const struct nlattr * const cda[])
2885 {
2886 struct net *net = sock_net(ctnl);
2887 struct nf_conntrack_tuple tuple;
2888 struct nf_conntrack_expect *exp;
2889 struct nfgenmsg *nfmsg = nlmsg_data(nlh);
2890 u_int8_t u3 = nfmsg->nfgen_family;
2891 u16 zone;
2892 int err;
2893
2894 if (!cda[CTA_EXPECT_TUPLE]
2895 || !cda[CTA_EXPECT_MASK]
2896 || !cda[CTA_EXPECT_MASTER])
2897 return -EINVAL;
2898
2899 err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone);
2900 if (err < 0)
2901 return err;
2902
2903 err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3);
2904 if (err < 0)
2905 return err;
2906
2907 spin_lock_bh(&nf_conntrack_lock);
2908 exp = __nf_ct_expect_find(net, zone, &tuple);
2909
2910 if (!exp) {
2911 spin_unlock_bh(&nf_conntrack_lock);
2912 err = -ENOENT;
2913 if (nlh->nlmsg_flags & NLM_F_CREATE) {
2914 err = ctnetlink_create_expect(net, zone, cda,
2915 u3,
2916 NETLINK_CB(skb).portid,
2917 nlmsg_report(nlh));
2918 }
2919 return err;
2920 }
2921
2922 err = -EEXIST;
2923 if (!(nlh->nlmsg_flags & NLM_F_EXCL))
2924 err = ctnetlink_change_expect(exp, cda);
2925 spin_unlock_bh(&nf_conntrack_lock);
2926
2927 return err;
2928 }
2929
2930 static int
2931 ctnetlink_exp_stat_fill_info(struct sk_buff *skb, u32 portid, u32 seq, int cpu,
2932 const struct ip_conntrack_stat *st)
2933 {
2934 struct nlmsghdr *nlh;
2935 struct nfgenmsg *nfmsg;
2936 unsigned int flags = portid ? NLM_F_MULTI : 0, event;
2937
2938 event = (NFNL_SUBSYS_CTNETLINK << 8 | IPCTNL_MSG_EXP_GET_STATS_CPU);
2939 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
2940 if (nlh == NULL)
2941 goto nlmsg_failure;
2942
2943 nfmsg = nlmsg_data(nlh);
2944 nfmsg->nfgen_family = AF_UNSPEC;
2945 nfmsg->version = NFNETLINK_V0;
2946 nfmsg->res_id = htons(cpu);
2947
2948 if (nla_put_be32(skb, CTA_STATS_EXP_NEW, htonl(st->expect_new)) ||
2949 nla_put_be32(skb, CTA_STATS_EXP_CREATE, htonl(st->expect_create)) ||
2950 nla_put_be32(skb, CTA_STATS_EXP_DELETE, htonl(st->expect_delete)))
2951 goto nla_put_failure;
2952
2953 nlmsg_end(skb, nlh);
2954 return skb->len;
2955
2956 nla_put_failure:
2957 nlmsg_failure:
2958 nlmsg_cancel(skb, nlh);
2959 return -1;
2960 }
2961
2962 static int
2963 ctnetlink_exp_stat_cpu_dump(struct sk_buff *skb, struct netlink_callback *cb)
2964 {
2965 int cpu;
2966 struct net *net = sock_net(skb->sk);
2967
2968 if (cb->args[0] == nr_cpu_ids)
2969 return 0;
2970
2971 for (cpu = cb->args[0]; cpu < nr_cpu_ids; cpu++) {
2972 const struct ip_conntrack_stat *st;
2973
2974 if (!cpu_possible(cpu))
2975 continue;
2976
2977 st = per_cpu_ptr(net->ct.stat, cpu);
2978 if (ctnetlink_exp_stat_fill_info(skb, NETLINK_CB(cb->skb).portid,
2979 cb->nlh->nlmsg_seq,
2980 cpu, st) < 0)
2981 break;
2982 }
2983 cb->args[0] = cpu;
2984
2985 return skb->len;
2986 }
2987
2988 static int
2989 ctnetlink_stat_exp_cpu(struct sock *ctnl, struct sk_buff *skb,
2990 const struct nlmsghdr *nlh,
2991 const struct nlattr * const cda[])
2992 {
2993 if (nlh->nlmsg_flags & NLM_F_DUMP) {
2994 struct netlink_dump_control c = {
2995 .dump = ctnetlink_exp_stat_cpu_dump,
2996 };
2997 return netlink_dump_start(ctnl, skb, nlh, &c);
2998 }
2999
3000 return 0;
3001 }
3002
3003 #ifdef CONFIG_NF_CONNTRACK_EVENTS
3004 static struct nf_ct_event_notifier ctnl_notifier = {
3005 .fcn = ctnetlink_conntrack_event,
3006 };
3007
3008 static struct nf_exp_event_notifier ctnl_notifier_exp = {
3009 .fcn = ctnetlink_expect_event,
3010 };
3011 #endif
3012
3013 static const struct nfnl_callback ctnl_cb[IPCTNL_MSG_MAX] = {
3014 [IPCTNL_MSG_CT_NEW] = { .call = ctnetlink_new_conntrack,
3015 .attr_count = CTA_MAX,
3016 .policy = ct_nla_policy },
3017 [IPCTNL_MSG_CT_GET] = { .call = ctnetlink_get_conntrack,
3018 .attr_count = CTA_MAX,
3019 .policy = ct_nla_policy },
3020 [IPCTNL_MSG_CT_DELETE] = { .call = ctnetlink_del_conntrack,
3021 .attr_count = CTA_MAX,
3022 .policy = ct_nla_policy },
3023 [IPCTNL_MSG_CT_GET_CTRZERO] = { .call = ctnetlink_get_conntrack,
3024 .attr_count = CTA_MAX,
3025 .policy = ct_nla_policy },
3026 [IPCTNL_MSG_CT_GET_STATS_CPU] = { .call = ctnetlink_stat_ct_cpu },
3027 [IPCTNL_MSG_CT_GET_STATS] = { .call = ctnetlink_stat_ct },
3028 [IPCTNL_MSG_CT_GET_DYING] = { .call = ctnetlink_get_ct_dying },
3029 [IPCTNL_MSG_CT_GET_UNCONFIRMED] = { .call = ctnetlink_get_ct_unconfirmed },
3030 };
3031
3032 static const struct nfnl_callback ctnl_exp_cb[IPCTNL_MSG_EXP_MAX] = {
3033 [IPCTNL_MSG_EXP_GET] = { .call = ctnetlink_get_expect,
3034 .attr_count = CTA_EXPECT_MAX,
3035 .policy = exp_nla_policy },
3036 [IPCTNL_MSG_EXP_NEW] = { .call = ctnetlink_new_expect,
3037 .attr_count = CTA_EXPECT_MAX,
3038 .policy = exp_nla_policy },
3039 [IPCTNL_MSG_EXP_DELETE] = { .call = ctnetlink_del_expect,
3040 .attr_count = CTA_EXPECT_MAX,
3041 .policy = exp_nla_policy },
3042 [IPCTNL_MSG_EXP_GET_STATS_CPU] = { .call = ctnetlink_stat_exp_cpu },
3043 };
3044
3045 static const struct nfnetlink_subsystem ctnl_subsys = {
3046 .name = "conntrack",
3047 .subsys_id = NFNL_SUBSYS_CTNETLINK,
3048 .cb_count = IPCTNL_MSG_MAX,
3049 .cb = ctnl_cb,
3050 };
3051
3052 static const struct nfnetlink_subsystem ctnl_exp_subsys = {
3053 .name = "conntrack_expect",
3054 .subsys_id = NFNL_SUBSYS_CTNETLINK_EXP,
3055 .cb_count = IPCTNL_MSG_EXP_MAX,
3056 .cb = ctnl_exp_cb,
3057 };
3058
3059 MODULE_ALIAS("ip_conntrack_netlink");
3060 MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK);
3061 MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK_EXP);
3062
3063 static int __net_init ctnetlink_net_init(struct net *net)
3064 {
3065 #ifdef CONFIG_NF_CONNTRACK_EVENTS
3066 int ret;
3067
3068 ret = nf_conntrack_register_notifier(net, &ctnl_notifier);
3069 if (ret < 0) {
3070 pr_err("ctnetlink_init: cannot register notifier.\n");
3071 goto err_out;
3072 }
3073
3074 ret = nf_ct_expect_register_notifier(net, &ctnl_notifier_exp);
3075 if (ret < 0) {
3076 pr_err("ctnetlink_init: cannot expect register notifier.\n");
3077 goto err_unreg_notifier;
3078 }
3079 #endif
3080 return 0;
3081
3082 #ifdef CONFIG_NF_CONNTRACK_EVENTS
3083 err_unreg_notifier:
3084 nf_conntrack_unregister_notifier(net, &ctnl_notifier);
3085 err_out:
3086 return ret;
3087 #endif
3088 }
3089
3090 static void ctnetlink_net_exit(struct net *net)
3091 {
3092 #ifdef CONFIG_NF_CONNTRACK_EVENTS
3093 nf_ct_expect_unregister_notifier(net, &ctnl_notifier_exp);
3094 nf_conntrack_unregister_notifier(net, &ctnl_notifier);
3095 #endif
3096 }
3097
3098 static void __net_exit ctnetlink_net_exit_batch(struct list_head *net_exit_list)
3099 {
3100 struct net *net;
3101
3102 list_for_each_entry(net, net_exit_list, exit_list)
3103 ctnetlink_net_exit(net);
3104 }
3105
3106 static struct pernet_operations ctnetlink_net_ops = {
3107 .init = ctnetlink_net_init,
3108 .exit_batch = ctnetlink_net_exit_batch,
3109 };
3110
3111 static int __init ctnetlink_init(void)
3112 {
3113 int ret;
3114
3115 pr_info("ctnetlink v%s: registering with nfnetlink.\n", version);
3116 ret = nfnetlink_subsys_register(&ctnl_subsys);
3117 if (ret < 0) {
3118 pr_err("ctnetlink_init: cannot register with nfnetlink.\n");
3119 goto err_out;
3120 }
3121
3122 ret = nfnetlink_subsys_register(&ctnl_exp_subsys);
3123 if (ret < 0) {
3124 pr_err("ctnetlink_init: cannot register exp with nfnetlink.\n");
3125 goto err_unreg_subsys;
3126 }
3127
3128 ret = register_pernet_subsys(&ctnetlink_net_ops);
3129 if (ret < 0) {
3130 pr_err("ctnetlink_init: cannot register pernet operations\n");
3131 goto err_unreg_exp_subsys;
3132 }
3133 #ifdef CONFIG_NETFILTER_NETLINK_QUEUE_CT
3134 /* setup interaction between nf_queue and nf_conntrack_netlink. */
3135 RCU_INIT_POINTER(nfq_ct_hook, &ctnetlink_nfqueue_hook);
3136 #endif
3137 return 0;
3138
3139 err_unreg_exp_subsys:
3140 nfnetlink_subsys_unregister(&ctnl_exp_subsys);
3141 err_unreg_subsys:
3142 nfnetlink_subsys_unregister(&ctnl_subsys);
3143 err_out:
3144 return ret;
3145 }
3146
3147 static void __exit ctnetlink_exit(void)
3148 {
3149 pr_info("ctnetlink: unregistering from nfnetlink.\n");
3150
3151 unregister_pernet_subsys(&ctnetlink_net_ops);
3152 nfnetlink_subsys_unregister(&ctnl_exp_subsys);
3153 nfnetlink_subsys_unregister(&ctnl_subsys);
3154 #ifdef CONFIG_NETFILTER_NETLINK_QUEUE_CT
3155 RCU_INIT_POINTER(nfq_ct_hook, NULL);
3156 #endif
3157 }
3158
3159 module_init(ctnetlink_init);
3160 module_exit(ctnetlink_exit);