1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
3 * Patrick Schaaf <bof@bof.de>
4 * Copyright (C) 2003-2013 Jozsef Kadlecsik <kadlec@netfilter.org>
7 /* Kernel module for IP set management */
9 #include <linux/init.h>
10 #include <linux/module.h>
11 #include <linux/moduleparam.h>
13 #include <linux/skbuff.h>
14 #include <linux/spinlock.h>
15 #include <linux/rculist.h>
16 #include <net/netlink.h>
17 #include <net/net_namespace.h>
18 #include <net/netns/generic.h>
20 #include <linux/netfilter.h>
21 #include <linux/netfilter/x_tables.h>
22 #include <linux/netfilter/nfnetlink.h>
23 #include <linux/netfilter/ipset/ip_set.h>
25 static LIST_HEAD(ip_set_type_list
); /* all registered set types */
26 static DEFINE_MUTEX(ip_set_type_mutex
); /* protects ip_set_type_list */
27 static DEFINE_RWLOCK(ip_set_ref_lock
); /* protects the set refs */
30 struct ip_set
* __rcu
*ip_set_list
; /* all individual sets */
31 ip_set_id_t ip_set_max
; /* max number of sets */
32 bool is_deleted
; /* deleted by ip_set_net_exit */
33 bool is_destroyed
; /* all sets are destroyed */
36 static unsigned int ip_set_net_id __read_mostly
;
38 static struct ip_set_net
*ip_set_pernet(struct net
*net
)
40 return net_generic(net
, ip_set_net_id
);
44 #define STRNCMP(a, b) (strncmp(a, b, IPSET_MAXNAMELEN) == 0)
46 static unsigned int max_sets
;
48 module_param(max_sets
, int, 0600);
49 MODULE_PARM_DESC(max_sets
, "maximal number of sets");
50 MODULE_LICENSE("GPL");
51 MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@netfilter.org>");
52 MODULE_DESCRIPTION("core IP set support");
53 MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_IPSET
);
55 /* When the nfnl mutex or ip_set_ref_lock is held: */
56 #define ip_set_dereference(p) \
57 rcu_dereference_protected(p, \
58 lockdep_nfnl_is_held(NFNL_SUBSYS_IPSET) || \
59 lockdep_is_held(&ip_set_ref_lock))
60 #define ip_set(inst, id) \
61 ip_set_dereference((inst)->ip_set_list)[id]
62 #define ip_set_ref_netlink(inst,id) \
63 rcu_dereference_raw((inst)->ip_set_list)[id]
65 /* The set types are implemented in modules and registered set types
66 * can be found in ip_set_type_list. Adding/deleting types is
67 * serialized by ip_set_type_mutex.
71 ip_set_type_lock(void)
73 mutex_lock(&ip_set_type_mutex
);
77 ip_set_type_unlock(void)
79 mutex_unlock(&ip_set_type_mutex
);
82 /* Register and deregister settype */
84 static struct ip_set_type
*
85 find_set_type(const char *name
, u8 family
, u8 revision
)
87 struct ip_set_type
*type
;
89 list_for_each_entry_rcu(type
, &ip_set_type_list
, list
,
90 lockdep_is_held(&ip_set_type_mutex
))
91 if (STRNCMP(type
->name
, name
) &&
92 (type
->family
== family
||
93 type
->family
== NFPROTO_UNSPEC
) &&
94 revision
>= type
->revision_min
&&
95 revision
<= type
->revision_max
)
100 /* Unlock, try to load a set type module and lock again */
102 load_settype(const char *name
)
104 nfnl_unlock(NFNL_SUBSYS_IPSET
);
105 pr_debug("try to load ip_set_%s\n", name
);
106 if (request_module("ip_set_%s", name
) < 0) {
107 pr_warn("Can't find ip_set type %s\n", name
);
108 nfnl_lock(NFNL_SUBSYS_IPSET
);
111 nfnl_lock(NFNL_SUBSYS_IPSET
);
115 /* Find a set type and reference it */
116 #define find_set_type_get(name, family, revision, found) \
117 __find_set_type_get(name, family, revision, found, false)
120 __find_set_type_get(const char *name
, u8 family
, u8 revision
,
121 struct ip_set_type
**found
, bool retry
)
123 struct ip_set_type
*type
;
126 if (retry
&& !load_settype(name
))
127 return -IPSET_ERR_FIND_TYPE
;
130 *found
= find_set_type(name
, family
, revision
);
132 err
= !try_module_get((*found
)->me
) ? -EFAULT
: 0;
135 /* Make sure the type is already loaded
136 * but we don't support the revision
138 list_for_each_entry_rcu(type
, &ip_set_type_list
, list
)
139 if (STRNCMP(type
->name
, name
)) {
140 err
= -IPSET_ERR_FIND_TYPE
;
145 return retry
? -IPSET_ERR_FIND_TYPE
:
146 __find_set_type_get(name
, family
, revision
, found
, true);
153 /* Find a given set type by name and family.
154 * If we succeeded, the supported minimal and maximum revisions are
157 #define find_set_type_minmax(name, family, min, max) \
158 __find_set_type_minmax(name, family, min, max, false)
161 __find_set_type_minmax(const char *name
, u8 family
, u8
*min
, u8
*max
,
164 struct ip_set_type
*type
;
167 if (retry
&& !load_settype(name
))
168 return -IPSET_ERR_FIND_TYPE
;
170 *min
= 255; *max
= 0;
172 list_for_each_entry_rcu(type
, &ip_set_type_list
, list
)
173 if (STRNCMP(type
->name
, name
) &&
174 (type
->family
== family
||
175 type
->family
== NFPROTO_UNSPEC
)) {
177 if (type
->revision_min
< *min
)
178 *min
= type
->revision_min
;
179 if (type
->revision_max
> *max
)
180 *max
= type
->revision_max
;
186 return retry
? -IPSET_ERR_FIND_TYPE
:
187 __find_set_type_minmax(name
, family
, min
, max
, true);
190 #define family_name(f) ((f) == NFPROTO_IPV4 ? "inet" : \
191 (f) == NFPROTO_IPV6 ? "inet6" : "any")
193 /* Register a set type structure. The type is identified by
194 * the unique triple of name, family and revision.
197 ip_set_type_register(struct ip_set_type
*type
)
201 if (type
->protocol
!= IPSET_PROTOCOL
) {
202 pr_warn("ip_set type %s, family %s, revision %u:%u uses wrong protocol version %u (want %u)\n",
203 type
->name
, family_name(type
->family
),
204 type
->revision_min
, type
->revision_max
,
205 type
->protocol
, IPSET_PROTOCOL
);
210 if (find_set_type(type
->name
, type
->family
, type
->revision_min
)) {
212 pr_warn("ip_set type %s, family %s with revision min %u already registered!\n",
213 type
->name
, family_name(type
->family
),
215 ip_set_type_unlock();
218 list_add_rcu(&type
->list
, &ip_set_type_list
);
219 pr_debug("type %s, family %s, revision %u:%u registered.\n",
220 type
->name
, family_name(type
->family
),
221 type
->revision_min
, type
->revision_max
);
222 ip_set_type_unlock();
226 EXPORT_SYMBOL_GPL(ip_set_type_register
);
228 /* Unregister a set type. There's a small race with ip_set_create */
230 ip_set_type_unregister(struct ip_set_type
*type
)
233 if (!find_set_type(type
->name
, type
->family
, type
->revision_min
)) {
234 pr_warn("ip_set type %s, family %s with revision min %u not registered\n",
235 type
->name
, family_name(type
->family
),
237 ip_set_type_unlock();
240 list_del_rcu(&type
->list
);
241 pr_debug("type %s, family %s with revision min %u unregistered.\n",
242 type
->name
, family_name(type
->family
), type
->revision_min
);
243 ip_set_type_unlock();
247 EXPORT_SYMBOL_GPL(ip_set_type_unregister
);
249 /* Utility functions */
251 ip_set_alloc(size_t size
)
253 return kvzalloc(size
, GFP_KERNEL_ACCOUNT
);
255 EXPORT_SYMBOL_GPL(ip_set_alloc
);
258 ip_set_free(void *members
)
260 pr_debug("%p: free with %s\n", members
,
261 is_vmalloc_addr(members
) ? "vfree" : "kfree");
264 EXPORT_SYMBOL_GPL(ip_set_free
);
267 flag_nested(const struct nlattr
*nla
)
269 return nla
->nla_type
& NLA_F_NESTED
;
272 static const struct nla_policy ipaddr_policy
[IPSET_ATTR_IPADDR_MAX
+ 1] = {
273 [IPSET_ATTR_IPADDR_IPV4
] = { .type
= NLA_U32
},
274 [IPSET_ATTR_IPADDR_IPV6
] = NLA_POLICY_EXACT_LEN(sizeof(struct in6_addr
)),
278 ip_set_get_ipaddr4(struct nlattr
*nla
, __be32
*ipaddr
)
280 struct nlattr
*tb
[IPSET_ATTR_IPADDR_MAX
+ 1];
282 if (unlikely(!flag_nested(nla
)))
283 return -IPSET_ERR_PROTOCOL
;
284 if (nla_parse_nested(tb
, IPSET_ATTR_IPADDR_MAX
, nla
,
285 ipaddr_policy
, NULL
))
286 return -IPSET_ERR_PROTOCOL
;
287 if (unlikely(!ip_set_attr_netorder(tb
, IPSET_ATTR_IPADDR_IPV4
)))
288 return -IPSET_ERR_PROTOCOL
;
290 *ipaddr
= nla_get_be32(tb
[IPSET_ATTR_IPADDR_IPV4
]);
293 EXPORT_SYMBOL_GPL(ip_set_get_ipaddr4
);
296 ip_set_get_ipaddr6(struct nlattr
*nla
, union nf_inet_addr
*ipaddr
)
298 struct nlattr
*tb
[IPSET_ATTR_IPADDR_MAX
+ 1];
300 if (unlikely(!flag_nested(nla
)))
301 return -IPSET_ERR_PROTOCOL
;
303 if (nla_parse_nested(tb
, IPSET_ATTR_IPADDR_MAX
, nla
,
304 ipaddr_policy
, NULL
))
305 return -IPSET_ERR_PROTOCOL
;
306 if (unlikely(!ip_set_attr_netorder(tb
, IPSET_ATTR_IPADDR_IPV6
)))
307 return -IPSET_ERR_PROTOCOL
;
309 memcpy(ipaddr
, nla_data(tb
[IPSET_ATTR_IPADDR_IPV6
]),
310 sizeof(struct in6_addr
));
313 EXPORT_SYMBOL_GPL(ip_set_get_ipaddr6
);
316 ip_set_timeout_get(const unsigned long *timeout
)
320 if (*timeout
== IPSET_ELEM_PERMANENT
)
323 t
= jiffies_to_msecs(*timeout
- jiffies
) / MSEC_PER_SEC
;
324 /* Zero value in userspace means no timeout */
325 return t
== 0 ? 1 : t
;
329 ip_set_comment_uget(struct nlattr
*tb
)
334 /* Called from uadd only, protected by the set spinlock.
335 * The kadt functions don't use the comment extensions in any way.
338 ip_set_init_comment(struct ip_set
*set
, struct ip_set_comment
*comment
,
339 const struct ip_set_ext
*ext
)
341 struct ip_set_comment_rcu
*c
= rcu_dereference_protected(comment
->c
, 1);
342 size_t len
= ext
->comment
? strlen(ext
->comment
) : 0;
345 set
->ext_size
-= sizeof(*c
) + strlen(c
->str
) + 1;
347 rcu_assign_pointer(comment
->c
, NULL
);
351 if (unlikely(len
> IPSET_MAX_COMMENT_SIZE
))
352 len
= IPSET_MAX_COMMENT_SIZE
;
353 c
= kmalloc(sizeof(*c
) + len
+ 1, GFP_ATOMIC
);
356 strlcpy(c
->str
, ext
->comment
, len
+ 1);
357 set
->ext_size
+= sizeof(*c
) + strlen(c
->str
) + 1;
358 rcu_assign_pointer(comment
->c
, c
);
360 EXPORT_SYMBOL_GPL(ip_set_init_comment
);
362 /* Used only when dumping a set, protected by rcu_read_lock() */
364 ip_set_put_comment(struct sk_buff
*skb
, const struct ip_set_comment
*comment
)
366 struct ip_set_comment_rcu
*c
= rcu_dereference(comment
->c
);
370 return nla_put_string(skb
, IPSET_ATTR_COMMENT
, c
->str
);
373 /* Called from uadd/udel, flush or the garbage collectors protected
374 * by the set spinlock.
375 * Called when the set is destroyed and when there can't be any user
376 * of the set data anymore.
379 ip_set_comment_free(struct ip_set
*set
, void *ptr
)
381 struct ip_set_comment
*comment
= ptr
;
382 struct ip_set_comment_rcu
*c
;
384 c
= rcu_dereference_protected(comment
->c
, 1);
387 set
->ext_size
-= sizeof(*c
) + strlen(c
->str
) + 1;
389 rcu_assign_pointer(comment
->c
, NULL
);
392 typedef void (*destroyer
)(struct ip_set
*, void *);
393 /* ipset data extension types, in size order */
395 const struct ip_set_ext_type ip_set_extensions
[] = {
396 [IPSET_EXT_ID_COUNTER
] = {
397 .type
= IPSET_EXT_COUNTER
,
398 .flag
= IPSET_FLAG_WITH_COUNTERS
,
399 .len
= sizeof(struct ip_set_counter
),
400 .align
= __alignof__(struct ip_set_counter
),
402 [IPSET_EXT_ID_TIMEOUT
] = {
403 .type
= IPSET_EXT_TIMEOUT
,
404 .len
= sizeof(unsigned long),
405 .align
= __alignof__(unsigned long),
407 [IPSET_EXT_ID_SKBINFO
] = {
408 .type
= IPSET_EXT_SKBINFO
,
409 .flag
= IPSET_FLAG_WITH_SKBINFO
,
410 .len
= sizeof(struct ip_set_skbinfo
),
411 .align
= __alignof__(struct ip_set_skbinfo
),
413 [IPSET_EXT_ID_COMMENT
] = {
414 .type
= IPSET_EXT_COMMENT
| IPSET_EXT_DESTROY
,
415 .flag
= IPSET_FLAG_WITH_COMMENT
,
416 .len
= sizeof(struct ip_set_comment
),
417 .align
= __alignof__(struct ip_set_comment
),
418 .destroy
= ip_set_comment_free
,
421 EXPORT_SYMBOL_GPL(ip_set_extensions
);
424 add_extension(enum ip_set_ext_id id
, u32 flags
, struct nlattr
*tb
[])
426 return ip_set_extensions
[id
].flag
?
427 (flags
& ip_set_extensions
[id
].flag
) :
428 !!tb
[IPSET_ATTR_TIMEOUT
];
432 ip_set_elem_len(struct ip_set
*set
, struct nlattr
*tb
[], size_t len
,
435 enum ip_set_ext_id id
;
438 if (tb
[IPSET_ATTR_CADT_FLAGS
])
439 cadt_flags
= ip_set_get_h32(tb
[IPSET_ATTR_CADT_FLAGS
]);
440 if (cadt_flags
& IPSET_FLAG_WITH_FORCEADD
)
441 set
->flags
|= IPSET_CREATE_FLAG_FORCEADD
;
444 for (id
= 0; id
< IPSET_EXT_ID_MAX
; id
++) {
445 if (!add_extension(id
, cadt_flags
, tb
))
447 if (align
< ip_set_extensions
[id
].align
)
448 align
= ip_set_extensions
[id
].align
;
449 len
= ALIGN(len
, ip_set_extensions
[id
].align
);
450 set
->offset
[id
] = len
;
451 set
->extensions
|= ip_set_extensions
[id
].type
;
452 len
+= ip_set_extensions
[id
].len
;
454 return ALIGN(len
, align
);
456 EXPORT_SYMBOL_GPL(ip_set_elem_len
);
459 ip_set_get_extensions(struct ip_set
*set
, struct nlattr
*tb
[],
460 struct ip_set_ext
*ext
)
464 if (unlikely(!ip_set_optattr_netorder(tb
, IPSET_ATTR_TIMEOUT
) ||
465 !ip_set_optattr_netorder(tb
, IPSET_ATTR_PACKETS
) ||
466 !ip_set_optattr_netorder(tb
, IPSET_ATTR_BYTES
) ||
467 !ip_set_optattr_netorder(tb
, IPSET_ATTR_SKBMARK
) ||
468 !ip_set_optattr_netorder(tb
, IPSET_ATTR_SKBPRIO
) ||
469 !ip_set_optattr_netorder(tb
, IPSET_ATTR_SKBQUEUE
)))
470 return -IPSET_ERR_PROTOCOL
;
472 if (tb
[IPSET_ATTR_TIMEOUT
]) {
473 if (!SET_WITH_TIMEOUT(set
))
474 return -IPSET_ERR_TIMEOUT
;
475 ext
->timeout
= ip_set_timeout_uget(tb
[IPSET_ATTR_TIMEOUT
]);
477 if (tb
[IPSET_ATTR_BYTES
] || tb
[IPSET_ATTR_PACKETS
]) {
478 if (!SET_WITH_COUNTER(set
))
479 return -IPSET_ERR_COUNTER
;
480 if (tb
[IPSET_ATTR_BYTES
])
481 ext
->bytes
= be64_to_cpu(nla_get_be64(
482 tb
[IPSET_ATTR_BYTES
]));
483 if (tb
[IPSET_ATTR_PACKETS
])
484 ext
->packets
= be64_to_cpu(nla_get_be64(
485 tb
[IPSET_ATTR_PACKETS
]));
487 if (tb
[IPSET_ATTR_COMMENT
]) {
488 if (!SET_WITH_COMMENT(set
))
489 return -IPSET_ERR_COMMENT
;
490 ext
->comment
= ip_set_comment_uget(tb
[IPSET_ATTR_COMMENT
]);
492 if (tb
[IPSET_ATTR_SKBMARK
]) {
493 if (!SET_WITH_SKBINFO(set
))
494 return -IPSET_ERR_SKBINFO
;
495 fullmark
= be64_to_cpu(nla_get_be64(tb
[IPSET_ATTR_SKBMARK
]));
496 ext
->skbinfo
.skbmark
= fullmark
>> 32;
497 ext
->skbinfo
.skbmarkmask
= fullmark
& 0xffffffff;
499 if (tb
[IPSET_ATTR_SKBPRIO
]) {
500 if (!SET_WITH_SKBINFO(set
))
501 return -IPSET_ERR_SKBINFO
;
502 ext
->skbinfo
.skbprio
=
503 be32_to_cpu(nla_get_be32(tb
[IPSET_ATTR_SKBPRIO
]));
505 if (tb
[IPSET_ATTR_SKBQUEUE
]) {
506 if (!SET_WITH_SKBINFO(set
))
507 return -IPSET_ERR_SKBINFO
;
508 ext
->skbinfo
.skbqueue
=
509 be16_to_cpu(nla_get_be16(tb
[IPSET_ATTR_SKBQUEUE
]));
513 EXPORT_SYMBOL_GPL(ip_set_get_extensions
);
516 ip_set_get_bytes(const struct ip_set_counter
*counter
)
518 return (u64
)atomic64_read(&(counter
)->bytes
);
522 ip_set_get_packets(const struct ip_set_counter
*counter
)
524 return (u64
)atomic64_read(&(counter
)->packets
);
528 ip_set_put_counter(struct sk_buff
*skb
, const struct ip_set_counter
*counter
)
530 return nla_put_net64(skb
, IPSET_ATTR_BYTES
,
531 cpu_to_be64(ip_set_get_bytes(counter
)),
533 nla_put_net64(skb
, IPSET_ATTR_PACKETS
,
534 cpu_to_be64(ip_set_get_packets(counter
)),
539 ip_set_put_skbinfo(struct sk_buff
*skb
, const struct ip_set_skbinfo
*skbinfo
)
541 /* Send nonzero parameters only */
542 return ((skbinfo
->skbmark
|| skbinfo
->skbmarkmask
) &&
543 nla_put_net64(skb
, IPSET_ATTR_SKBMARK
,
544 cpu_to_be64((u64
)skbinfo
->skbmark
<< 32 |
545 skbinfo
->skbmarkmask
),
548 nla_put_net32(skb
, IPSET_ATTR_SKBPRIO
,
549 cpu_to_be32(skbinfo
->skbprio
))) ||
550 (skbinfo
->skbqueue
&&
551 nla_put_net16(skb
, IPSET_ATTR_SKBQUEUE
,
552 cpu_to_be16(skbinfo
->skbqueue
)));
556 ip_set_put_extensions(struct sk_buff
*skb
, const struct ip_set
*set
,
557 const void *e
, bool active
)
559 if (SET_WITH_TIMEOUT(set
)) {
560 unsigned long *timeout
= ext_timeout(e
, set
);
562 if (nla_put_net32(skb
, IPSET_ATTR_TIMEOUT
,
563 htonl(active
? ip_set_timeout_get(timeout
)
567 if (SET_WITH_COUNTER(set
) &&
568 ip_set_put_counter(skb
, ext_counter(e
, set
)))
570 if (SET_WITH_COMMENT(set
) &&
571 ip_set_put_comment(skb
, ext_comment(e
, set
)))
573 if (SET_WITH_SKBINFO(set
) &&
574 ip_set_put_skbinfo(skb
, ext_skbinfo(e
, set
)))
578 EXPORT_SYMBOL_GPL(ip_set_put_extensions
);
581 ip_set_match_counter(u64 counter
, u64 match
, u8 op
)
584 case IPSET_COUNTER_NONE
:
586 case IPSET_COUNTER_EQ
:
587 return counter
== match
;
588 case IPSET_COUNTER_NE
:
589 return counter
!= match
;
590 case IPSET_COUNTER_LT
:
591 return counter
< match
;
592 case IPSET_COUNTER_GT
:
593 return counter
> match
;
599 ip_set_add_bytes(u64 bytes
, struct ip_set_counter
*counter
)
601 atomic64_add((long long)bytes
, &(counter
)->bytes
);
605 ip_set_add_packets(u64 packets
, struct ip_set_counter
*counter
)
607 atomic64_add((long long)packets
, &(counter
)->packets
);
611 ip_set_update_counter(struct ip_set_counter
*counter
,
612 const struct ip_set_ext
*ext
, u32 flags
)
614 if (ext
->packets
!= ULLONG_MAX
&&
615 !(flags
& IPSET_FLAG_SKIP_COUNTER_UPDATE
)) {
616 ip_set_add_bytes(ext
->bytes
, counter
);
617 ip_set_add_packets(ext
->packets
, counter
);
622 ip_set_get_skbinfo(struct ip_set_skbinfo
*skbinfo
,
623 const struct ip_set_ext
*ext
,
624 struct ip_set_ext
*mext
, u32 flags
)
626 mext
->skbinfo
= *skbinfo
;
630 ip_set_match_extensions(struct ip_set
*set
, const struct ip_set_ext
*ext
,
631 struct ip_set_ext
*mext
, u32 flags
, void *data
)
633 if (SET_WITH_TIMEOUT(set
) &&
634 ip_set_timeout_expired(ext_timeout(data
, set
)))
636 if (SET_WITH_COUNTER(set
)) {
637 struct ip_set_counter
*counter
= ext_counter(data
, set
);
639 ip_set_update_counter(counter
, ext
, flags
);
641 if (flags
& IPSET_FLAG_MATCH_COUNTERS
&&
642 !(ip_set_match_counter(ip_set_get_packets(counter
),
643 mext
->packets
, mext
->packets_op
) &&
644 ip_set_match_counter(ip_set_get_bytes(counter
),
645 mext
->bytes
, mext
->bytes_op
)))
648 if (SET_WITH_SKBINFO(set
))
649 ip_set_get_skbinfo(ext_skbinfo(data
, set
),
653 EXPORT_SYMBOL_GPL(ip_set_match_extensions
);
655 /* Creating/destroying/renaming/swapping affect the existence and
656 * the properties of a set. All of these can be executed from userspace
657 * only and serialized by the nfnl mutex indirectly from nfnetlink.
659 * Sets are identified by their index in ip_set_list and the index
660 * is used by the external references (set/SET netfilter modules).
662 * The set behind an index may change by swapping only, from userspace.
666 __ip_set_get(struct ip_set
*set
)
668 write_lock_bh(&ip_set_ref_lock
);
670 write_unlock_bh(&ip_set_ref_lock
);
674 __ip_set_put(struct ip_set
*set
)
676 write_lock_bh(&ip_set_ref_lock
);
677 BUG_ON(set
->ref
== 0);
679 write_unlock_bh(&ip_set_ref_lock
);
682 /* set->ref can be swapped out by ip_set_swap, netlink events (like dump) need
683 * a separate reference counter
686 __ip_set_put_netlink(struct ip_set
*set
)
688 write_lock_bh(&ip_set_ref_lock
);
689 BUG_ON(set
->ref_netlink
== 0);
691 write_unlock_bh(&ip_set_ref_lock
);
694 /* Add, del and test set entries from kernel.
696 * The set behind the index must exist and must be referenced
697 * so it can't be destroyed (or changed) under our foot.
700 static struct ip_set
*
701 ip_set_rcu_get(struct net
*net
, ip_set_id_t index
)
704 struct ip_set_net
*inst
= ip_set_pernet(net
);
707 /* ip_set_list itself needs to be protected */
708 set
= rcu_dereference(inst
->ip_set_list
)[index
];
715 ip_set_lock(struct ip_set
*set
)
717 if (!set
->variant
->region_lock
)
718 spin_lock_bh(&set
->lock
);
722 ip_set_unlock(struct ip_set
*set
)
724 if (!set
->variant
->region_lock
)
725 spin_unlock_bh(&set
->lock
);
729 ip_set_test(ip_set_id_t index
, const struct sk_buff
*skb
,
730 const struct xt_action_param
*par
, struct ip_set_adt_opt
*opt
)
732 struct ip_set
*set
= ip_set_rcu_get(xt_net(par
), index
);
736 pr_debug("set %s, index %u\n", set
->name
, index
);
738 if (opt
->dim
< set
->type
->dimension
||
739 !(opt
->family
== set
->family
|| set
->family
== NFPROTO_UNSPEC
))
743 ret
= set
->variant
->kadt(set
, skb
, par
, IPSET_TEST
, opt
);
744 rcu_read_unlock_bh();
746 if (ret
== -EAGAIN
) {
747 /* Type requests element to be completed */
748 pr_debug("element must be completed, ADD is triggered\n");
750 set
->variant
->kadt(set
, skb
, par
, IPSET_ADD
, opt
);
754 /* --return-nomatch: invert matched element */
755 if ((opt
->cmdflags
& IPSET_FLAG_RETURN_NOMATCH
) &&
756 (set
->type
->features
& IPSET_TYPE_NOMATCH
) &&
757 (ret
> 0 || ret
== -ENOTEMPTY
))
761 /* Convert error codes to nomatch */
762 return (ret
< 0 ? 0 : ret
);
764 EXPORT_SYMBOL_GPL(ip_set_test
);
767 ip_set_add(ip_set_id_t index
, const struct sk_buff
*skb
,
768 const struct xt_action_param
*par
, struct ip_set_adt_opt
*opt
)
770 struct ip_set
*set
= ip_set_rcu_get(xt_net(par
), index
);
774 pr_debug("set %s, index %u\n", set
->name
, index
);
776 if (opt
->dim
< set
->type
->dimension
||
777 !(opt
->family
== set
->family
|| set
->family
== NFPROTO_UNSPEC
))
778 return -IPSET_ERR_TYPE_MISMATCH
;
781 ret
= set
->variant
->kadt(set
, skb
, par
, IPSET_ADD
, opt
);
786 EXPORT_SYMBOL_GPL(ip_set_add
);
789 ip_set_del(ip_set_id_t index
, const struct sk_buff
*skb
,
790 const struct xt_action_param
*par
, struct ip_set_adt_opt
*opt
)
792 struct ip_set
*set
= ip_set_rcu_get(xt_net(par
), index
);
796 pr_debug("set %s, index %u\n", set
->name
, index
);
798 if (opt
->dim
< set
->type
->dimension
||
799 !(opt
->family
== set
->family
|| set
->family
== NFPROTO_UNSPEC
))
800 return -IPSET_ERR_TYPE_MISMATCH
;
803 ret
= set
->variant
->kadt(set
, skb
, par
, IPSET_DEL
, opt
);
808 EXPORT_SYMBOL_GPL(ip_set_del
);
810 /* Find set by name, reference it once. The reference makes sure the
811 * thing pointed to, does not go away under our feet.
815 ip_set_get_byname(struct net
*net
, const char *name
, struct ip_set
**set
)
817 ip_set_id_t i
, index
= IPSET_INVALID_ID
;
819 struct ip_set_net
*inst
= ip_set_pernet(net
);
822 for (i
= 0; i
< inst
->ip_set_max
; i
++) {
823 s
= rcu_dereference(inst
->ip_set_list
)[i
];
824 if (s
&& STRNCMP(s
->name
, name
)) {
835 EXPORT_SYMBOL_GPL(ip_set_get_byname
);
837 /* If the given set pointer points to a valid set, decrement
838 * reference count by 1. The caller shall not assume the index
839 * to be valid, after calling this function.
844 __ip_set_put_byindex(struct ip_set_net
*inst
, ip_set_id_t index
)
849 set
= rcu_dereference(inst
->ip_set_list
)[index
];
856 ip_set_put_byindex(struct net
*net
, ip_set_id_t index
)
858 struct ip_set_net
*inst
= ip_set_pernet(net
);
860 __ip_set_put_byindex(inst
, index
);
862 EXPORT_SYMBOL_GPL(ip_set_put_byindex
);
864 /* Get the name of a set behind a set index.
865 * Set itself is protected by RCU, but its name isn't: to protect against
866 * renaming, grab ip_set_ref_lock as reader (see ip_set_rename()) and copy the
870 ip_set_name_byindex(struct net
*net
, ip_set_id_t index
, char *name
)
872 struct ip_set
*set
= ip_set_rcu_get(net
, index
);
876 read_lock_bh(&ip_set_ref_lock
);
877 strncpy(name
, set
->name
, IPSET_MAXNAMELEN
);
878 read_unlock_bh(&ip_set_ref_lock
);
880 EXPORT_SYMBOL_GPL(ip_set_name_byindex
);
882 /* Routines to call by external subsystems, which do not
883 * call nfnl_lock for us.
886 /* Find set by index, reference it once. The reference makes sure the
887 * thing pointed to, does not go away under our feet.
889 * The nfnl mutex is used in the function.
892 ip_set_nfnl_get_byindex(struct net
*net
, ip_set_id_t index
)
895 struct ip_set_net
*inst
= ip_set_pernet(net
);
897 if (index
>= inst
->ip_set_max
)
898 return IPSET_INVALID_ID
;
900 nfnl_lock(NFNL_SUBSYS_IPSET
);
901 set
= ip_set(inst
, index
);
905 index
= IPSET_INVALID_ID
;
906 nfnl_unlock(NFNL_SUBSYS_IPSET
);
910 EXPORT_SYMBOL_GPL(ip_set_nfnl_get_byindex
);
912 /* If the given set pointer points to a valid set, decrement
913 * reference count by 1. The caller shall not assume the index
914 * to be valid, after calling this function.
916 * The nfnl mutex is used in the function.
919 ip_set_nfnl_put(struct net
*net
, ip_set_id_t index
)
922 struct ip_set_net
*inst
= ip_set_pernet(net
);
924 nfnl_lock(NFNL_SUBSYS_IPSET
);
925 if (!inst
->is_deleted
) { /* already deleted from ip_set_net_exit() */
926 set
= ip_set(inst
, index
);
930 nfnl_unlock(NFNL_SUBSYS_IPSET
);
932 EXPORT_SYMBOL_GPL(ip_set_nfnl_put
);
934 /* Communication protocol with userspace over netlink.
936 * The commands are serialized by the nfnl mutex.
939 static inline u8
protocol(const struct nlattr
* const tb
[])
941 return nla_get_u8(tb
[IPSET_ATTR_PROTOCOL
]);
945 protocol_failed(const struct nlattr
* const tb
[])
947 return !tb
[IPSET_ATTR_PROTOCOL
] || protocol(tb
) != IPSET_PROTOCOL
;
951 protocol_min_failed(const struct nlattr
* const tb
[])
953 return !tb
[IPSET_ATTR_PROTOCOL
] || protocol(tb
) < IPSET_PROTOCOL_MIN
;
957 flag_exist(const struct nlmsghdr
*nlh
)
959 return nlh
->nlmsg_flags
& NLM_F_EXCL
? 0 : IPSET_FLAG_EXIST
;
962 static struct nlmsghdr
*
963 start_msg(struct sk_buff
*skb
, u32 portid
, u32 seq
, unsigned int flags
,
966 struct nlmsghdr
*nlh
;
967 struct nfgenmsg
*nfmsg
;
969 nlh
= nlmsg_put(skb
, portid
, seq
, nfnl_msg_type(NFNL_SUBSYS_IPSET
, cmd
),
970 sizeof(*nfmsg
), flags
);
974 nfmsg
= nlmsg_data(nlh
);
975 nfmsg
->nfgen_family
= NFPROTO_IPV4
;
976 nfmsg
->version
= NFNETLINK_V0
;
984 static const struct nla_policy ip_set_create_policy
[IPSET_ATTR_CMD_MAX
+ 1] = {
985 [IPSET_ATTR_PROTOCOL
] = { .type
= NLA_U8
},
986 [IPSET_ATTR_SETNAME
] = { .type
= NLA_NUL_STRING
,
987 .len
= IPSET_MAXNAMELEN
- 1 },
988 [IPSET_ATTR_TYPENAME
] = { .type
= NLA_NUL_STRING
,
989 .len
= IPSET_MAXNAMELEN
- 1},
990 [IPSET_ATTR_REVISION
] = { .type
= NLA_U8
},
991 [IPSET_ATTR_FAMILY
] = { .type
= NLA_U8
},
992 [IPSET_ATTR_DATA
] = { .type
= NLA_NESTED
},
995 static struct ip_set
*
996 find_set_and_id(struct ip_set_net
*inst
, const char *name
, ip_set_id_t
*id
)
998 struct ip_set
*set
= NULL
;
1001 *id
= IPSET_INVALID_ID
;
1002 for (i
= 0; i
< inst
->ip_set_max
; i
++) {
1003 set
= ip_set(inst
, i
);
1004 if (set
&& STRNCMP(set
->name
, name
)) {
1009 return (*id
== IPSET_INVALID_ID
? NULL
: set
);
1012 static inline struct ip_set
*
1013 find_set(struct ip_set_net
*inst
, const char *name
)
1017 return find_set_and_id(inst
, name
, &id
);
1021 find_free_id(struct ip_set_net
*inst
, const char *name
, ip_set_id_t
*index
,
1022 struct ip_set
**set
)
1027 *index
= IPSET_INVALID_ID
;
1028 for (i
= 0; i
< inst
->ip_set_max
; i
++) {
1029 s
= ip_set(inst
, i
);
1031 if (*index
== IPSET_INVALID_ID
)
1033 } else if (STRNCMP(name
, s
->name
)) {
1039 if (*index
== IPSET_INVALID_ID
)
1040 /* No free slot remained */
1041 return -IPSET_ERR_MAX_SETS
;
1045 static int ip_set_none(struct net
*net
, struct sock
*ctnl
, struct sk_buff
*skb
,
1046 const struct nlmsghdr
*nlh
,
1047 const struct nlattr
* const attr
[],
1048 struct netlink_ext_ack
*extack
)
1053 static int ip_set_create(struct net
*net
, struct sock
*ctnl
,
1054 struct sk_buff
*skb
, const struct nlmsghdr
*nlh
,
1055 const struct nlattr
* const attr
[],
1056 struct netlink_ext_ack
*extack
)
1058 struct ip_set_net
*inst
= ip_set_pernet(net
);
1059 struct ip_set
*set
, *clash
= NULL
;
1060 ip_set_id_t index
= IPSET_INVALID_ID
;
1061 struct nlattr
*tb
[IPSET_ATTR_CREATE_MAX
+ 1] = {};
1062 const char *name
, *typename
;
1063 u8 family
, revision
;
1064 u32 flags
= flag_exist(nlh
);
1067 if (unlikely(protocol_min_failed(attr
) ||
1068 !attr
[IPSET_ATTR_SETNAME
] ||
1069 !attr
[IPSET_ATTR_TYPENAME
] ||
1070 !attr
[IPSET_ATTR_REVISION
] ||
1071 !attr
[IPSET_ATTR_FAMILY
] ||
1072 (attr
[IPSET_ATTR_DATA
] &&
1073 !flag_nested(attr
[IPSET_ATTR_DATA
]))))
1074 return -IPSET_ERR_PROTOCOL
;
1076 name
= nla_data(attr
[IPSET_ATTR_SETNAME
]);
1077 typename
= nla_data(attr
[IPSET_ATTR_TYPENAME
]);
1078 family
= nla_get_u8(attr
[IPSET_ATTR_FAMILY
]);
1079 revision
= nla_get_u8(attr
[IPSET_ATTR_REVISION
]);
1080 pr_debug("setname: %s, typename: %s, family: %s, revision: %u\n",
1081 name
, typename
, family_name(family
), revision
);
1083 /* First, and without any locks, allocate and initialize
1084 * a normal base set structure.
1086 set
= kzalloc(sizeof(*set
), GFP_KERNEL
);
1089 spin_lock_init(&set
->lock
);
1090 strlcpy(set
->name
, name
, IPSET_MAXNAMELEN
);
1091 set
->family
= family
;
1092 set
->revision
= revision
;
1094 /* Next, check that we know the type, and take
1095 * a reference on the type, to make sure it stays available
1096 * while constructing our new set.
1098 * After referencing the type, we try to create the type
1099 * specific part of the set without holding any locks.
1101 ret
= find_set_type_get(typename
, family
, revision
, &set
->type
);
1105 /* Without holding any locks, create private part. */
1106 if (attr
[IPSET_ATTR_DATA
] &&
1107 nla_parse_nested(tb
, IPSET_ATTR_CREATE_MAX
, attr
[IPSET_ATTR_DATA
],
1108 set
->type
->create_policy
, NULL
)) {
1109 ret
= -IPSET_ERR_PROTOCOL
;
1112 /* Set create flags depending on the type revision */
1113 set
->flags
|= set
->type
->create_flags
[revision
];
1115 ret
= set
->type
->create(net
, set
, tb
, flags
);
1119 /* BTW, ret==0 here. */
1121 /* Here, we have a valid, constructed set and we are protected
1122 * by the nfnl mutex. Find the first free index in ip_set_list
1123 * and check clashing.
1125 ret
= find_free_id(inst
, set
->name
, &index
, &clash
);
1126 if (ret
== -EEXIST
) {
1127 /* If this is the same set and requested, ignore error */
1128 if ((flags
& IPSET_FLAG_EXIST
) &&
1129 STRNCMP(set
->type
->name
, clash
->type
->name
) &&
1130 set
->type
->family
== clash
->type
->family
&&
1131 set
->type
->revision_min
== clash
->type
->revision_min
&&
1132 set
->type
->revision_max
== clash
->type
->revision_max
&&
1133 set
->variant
->same_set(set
, clash
))
1136 } else if (ret
== -IPSET_ERR_MAX_SETS
) {
1137 struct ip_set
**list
, **tmp
;
1138 ip_set_id_t i
= inst
->ip_set_max
+ IP_SET_INC
;
1140 if (i
< inst
->ip_set_max
|| i
== IPSET_INVALID_ID
)
1144 list
= kvcalloc(i
, sizeof(struct ip_set
*), GFP_KERNEL
);
1147 /* nfnl mutex is held, both lists are valid */
1148 tmp
= ip_set_dereference(inst
->ip_set_list
);
1149 memcpy(list
, tmp
, sizeof(struct ip_set
*) * inst
->ip_set_max
);
1150 rcu_assign_pointer(inst
->ip_set_list
, list
);
1151 /* Make sure all current packets have passed through */
1154 index
= inst
->ip_set_max
;
1155 inst
->ip_set_max
= i
;
1162 /* Finally! Add our shiny new set to the list, and be done. */
1163 pr_debug("create: '%s' created with index %u!\n", set
->name
, index
);
1164 ip_set(inst
, index
) = set
;
1169 set
->variant
->destroy(set
);
1171 module_put(set
->type
->me
);
1179 static const struct nla_policy
1180 ip_set_setname_policy
[IPSET_ATTR_CMD_MAX
+ 1] = {
1181 [IPSET_ATTR_PROTOCOL
] = { .type
= NLA_U8
},
1182 [IPSET_ATTR_SETNAME
] = { .type
= NLA_NUL_STRING
,
1183 .len
= IPSET_MAXNAMELEN
- 1 },
1187 ip_set_destroy_set(struct ip_set
*set
)
1189 pr_debug("set: %s\n", set
->name
);
1191 /* Must call it without holding any lock */
1192 set
->variant
->destroy(set
);
1193 module_put(set
->type
->me
);
1197 static int ip_set_destroy(struct net
*net
, struct sock
*ctnl
,
1198 struct sk_buff
*skb
, const struct nlmsghdr
*nlh
,
1199 const struct nlattr
* const attr
[],
1200 struct netlink_ext_ack
*extack
)
1202 struct ip_set_net
*inst
= ip_set_pernet(net
);
1207 if (unlikely(protocol_min_failed(attr
)))
1208 return -IPSET_ERR_PROTOCOL
;
1210 /* Must wait for flush to be really finished in list:set */
1213 /* Commands are serialized and references are
1214 * protected by the ip_set_ref_lock.
1215 * External systems (i.e. xt_set) must call
1216 * ip_set_put|get_nfnl_* functions, that way we
1217 * can safely check references here.
1219 * list:set timer can only decrement the reference
1220 * counter, so if it's already zero, we can proceed
1221 * without holding the lock.
1223 read_lock_bh(&ip_set_ref_lock
);
1224 if (!attr
[IPSET_ATTR_SETNAME
]) {
1225 for (i
= 0; i
< inst
->ip_set_max
; i
++) {
1226 s
= ip_set(inst
, i
);
1227 if (s
&& (s
->ref
|| s
->ref_netlink
)) {
1228 ret
= -IPSET_ERR_BUSY
;
1232 inst
->is_destroyed
= true;
1233 read_unlock_bh(&ip_set_ref_lock
);
1234 for (i
= 0; i
< inst
->ip_set_max
; i
++) {
1235 s
= ip_set(inst
, i
);
1237 ip_set(inst
, i
) = NULL
;
1238 ip_set_destroy_set(s
);
1241 /* Modified by ip_set_destroy() only, which is serialized */
1242 inst
->is_destroyed
= false;
1244 u32 flags
= flag_exist(nlh
);
1245 s
= find_set_and_id(inst
, nla_data(attr
[IPSET_ATTR_SETNAME
]),
1248 if (!(flags
& IPSET_FLAG_EXIST
))
1251 } else if (s
->ref
|| s
->ref_netlink
) {
1252 ret
= -IPSET_ERR_BUSY
;
1255 ip_set(inst
, i
) = NULL
;
1256 read_unlock_bh(&ip_set_ref_lock
);
1258 ip_set_destroy_set(s
);
1262 read_unlock_bh(&ip_set_ref_lock
);
1269 ip_set_flush_set(struct ip_set
*set
)
1271 pr_debug("set: %s\n", set
->name
);
1274 set
->variant
->flush(set
);
1278 static int ip_set_flush(struct net
*net
, struct sock
*ctnl
, struct sk_buff
*skb
,
1279 const struct nlmsghdr
*nlh
,
1280 const struct nlattr
* const attr
[],
1281 struct netlink_ext_ack
*extack
)
1283 struct ip_set_net
*inst
= ip_set_pernet(net
);
1287 if (unlikely(protocol_min_failed(attr
)))
1288 return -IPSET_ERR_PROTOCOL
;
1290 if (!attr
[IPSET_ATTR_SETNAME
]) {
1291 for (i
= 0; i
< inst
->ip_set_max
; i
++) {
1292 s
= ip_set(inst
, i
);
1294 ip_set_flush_set(s
);
1297 s
= find_set(inst
, nla_data(attr
[IPSET_ATTR_SETNAME
]));
1301 ip_set_flush_set(s
);
1309 static const struct nla_policy
1310 ip_set_setname2_policy
[IPSET_ATTR_CMD_MAX
+ 1] = {
1311 [IPSET_ATTR_PROTOCOL
] = { .type
= NLA_U8
},
1312 [IPSET_ATTR_SETNAME
] = { .type
= NLA_NUL_STRING
,
1313 .len
= IPSET_MAXNAMELEN
- 1 },
1314 [IPSET_ATTR_SETNAME2
] = { .type
= NLA_NUL_STRING
,
1315 .len
= IPSET_MAXNAMELEN
- 1 },
1318 static int ip_set_rename(struct net
*net
, struct sock
*ctnl
,
1319 struct sk_buff
*skb
, const struct nlmsghdr
*nlh
,
1320 const struct nlattr
* const attr
[],
1321 struct netlink_ext_ack
*extack
)
1323 struct ip_set_net
*inst
= ip_set_pernet(net
);
1324 struct ip_set
*set
, *s
;
1329 if (unlikely(protocol_min_failed(attr
) ||
1330 !attr
[IPSET_ATTR_SETNAME
] ||
1331 !attr
[IPSET_ATTR_SETNAME2
]))
1332 return -IPSET_ERR_PROTOCOL
;
1334 set
= find_set(inst
, nla_data(attr
[IPSET_ATTR_SETNAME
]));
1338 write_lock_bh(&ip_set_ref_lock
);
1339 if (set
->ref
!= 0 || set
->ref_netlink
!= 0) {
1340 ret
= -IPSET_ERR_REFERENCED
;
1344 name2
= nla_data(attr
[IPSET_ATTR_SETNAME2
]);
1345 for (i
= 0; i
< inst
->ip_set_max
; i
++) {
1346 s
= ip_set(inst
, i
);
1347 if (s
&& STRNCMP(s
->name
, name2
)) {
1348 ret
= -IPSET_ERR_EXIST_SETNAME2
;
1352 strncpy(set
->name
, name2
, IPSET_MAXNAMELEN
);
1355 write_unlock_bh(&ip_set_ref_lock
);
1359 /* Swap two sets so that name/index points to the other.
1360 * References and set names are also swapped.
1362 * The commands are serialized by the nfnl mutex and references are
1363 * protected by the ip_set_ref_lock. The kernel interfaces
1364 * do not hold the mutex but the pointer settings are atomic
1365 * so the ip_set_list always contains valid pointers to the sets.
1368 static int ip_set_swap(struct net
*net
, struct sock
*ctnl
, struct sk_buff
*skb
,
1369 const struct nlmsghdr
*nlh
,
1370 const struct nlattr
* const attr
[],
1371 struct netlink_ext_ack
*extack
)
1373 struct ip_set_net
*inst
= ip_set_pernet(net
);
1374 struct ip_set
*from
, *to
;
1375 ip_set_id_t from_id
, to_id
;
1376 char from_name
[IPSET_MAXNAMELEN
];
1378 if (unlikely(protocol_min_failed(attr
) ||
1379 !attr
[IPSET_ATTR_SETNAME
] ||
1380 !attr
[IPSET_ATTR_SETNAME2
]))
1381 return -IPSET_ERR_PROTOCOL
;
1383 from
= find_set_and_id(inst
, nla_data(attr
[IPSET_ATTR_SETNAME
]),
1388 to
= find_set_and_id(inst
, nla_data(attr
[IPSET_ATTR_SETNAME2
]),
1391 return -IPSET_ERR_EXIST_SETNAME2
;
1393 /* Features must not change.
1394 * Not an artifical restriction anymore, as we must prevent
1395 * possible loops created by swapping in setlist type of sets.
1397 if (!(from
->type
->features
== to
->type
->features
&&
1398 from
->family
== to
->family
))
1399 return -IPSET_ERR_TYPE_MISMATCH
;
1401 write_lock_bh(&ip_set_ref_lock
);
1403 if (from
->ref_netlink
|| to
->ref_netlink
) {
1404 write_unlock_bh(&ip_set_ref_lock
);
1408 strncpy(from_name
, from
->name
, IPSET_MAXNAMELEN
);
1409 strncpy(from
->name
, to
->name
, IPSET_MAXNAMELEN
);
1410 strncpy(to
->name
, from_name
, IPSET_MAXNAMELEN
);
1412 swap(from
->ref
, to
->ref
);
1413 ip_set(inst
, from_id
) = to
;
1414 ip_set(inst
, to_id
) = from
;
1415 write_unlock_bh(&ip_set_ref_lock
);
1420 /* List/save set data */
1427 #define DUMP_TYPE(arg) (((u32)(arg)) & 0x0000FFFF)
1428 #define DUMP_FLAGS(arg) (((u32)(arg)) >> 16)
1431 ip_set_put_flags(struct sk_buff
*skb
, struct ip_set
*set
)
1435 if (SET_WITH_TIMEOUT(set
))
1436 if (unlikely(nla_put_net32(skb
, IPSET_ATTR_TIMEOUT
,
1437 htonl(set
->timeout
))))
1439 if (SET_WITH_COUNTER(set
))
1440 cadt_flags
|= IPSET_FLAG_WITH_COUNTERS
;
1441 if (SET_WITH_COMMENT(set
))
1442 cadt_flags
|= IPSET_FLAG_WITH_COMMENT
;
1443 if (SET_WITH_SKBINFO(set
))
1444 cadt_flags
|= IPSET_FLAG_WITH_SKBINFO
;
1445 if (SET_WITH_FORCEADD(set
))
1446 cadt_flags
|= IPSET_FLAG_WITH_FORCEADD
;
1450 return nla_put_net32(skb
, IPSET_ATTR_CADT_FLAGS
, htonl(cadt_flags
));
1452 EXPORT_SYMBOL_GPL(ip_set_put_flags
);
1455 ip_set_dump_done(struct netlink_callback
*cb
)
1457 if (cb
->args
[IPSET_CB_ARG0
]) {
1458 struct ip_set_net
*inst
=
1459 (struct ip_set_net
*)cb
->args
[IPSET_CB_NET
];
1460 ip_set_id_t index
= (ip_set_id_t
)cb
->args
[IPSET_CB_INDEX
];
1461 struct ip_set
*set
= ip_set_ref_netlink(inst
, index
);
1463 if (set
->variant
->uref
)
1464 set
->variant
->uref(set
, cb
, false);
1465 pr_debug("release set %s\n", set
->name
);
1466 __ip_set_put_netlink(set
);
1472 dump_attrs(struct nlmsghdr
*nlh
)
1474 const struct nlattr
*attr
;
1477 pr_debug("dump nlmsg\n");
1478 nlmsg_for_each_attr(attr
, nlh
, sizeof(struct nfgenmsg
), rem
) {
1479 pr_debug("type: %u, len %u\n", nla_type(attr
), attr
->nla_len
);
1483 static const struct nla_policy
1484 ip_set_dump_policy
[IPSET_ATTR_CMD_MAX
+ 1] = {
1485 [IPSET_ATTR_PROTOCOL
] = { .type
= NLA_U8
},
1486 [IPSET_ATTR_SETNAME
] = { .type
= NLA_NUL_STRING
,
1487 .len
= IPSET_MAXNAMELEN
- 1 },
1488 [IPSET_ATTR_FLAGS
] = { .type
= NLA_U32
},
1492 ip_set_dump_start(struct netlink_callback
*cb
)
1494 struct nlmsghdr
*nlh
= nlmsg_hdr(cb
->skb
);
1495 int min_len
= nlmsg_total_size(sizeof(struct nfgenmsg
));
1496 struct nlattr
*cda
[IPSET_ATTR_CMD_MAX
+ 1];
1497 struct nlattr
*attr
= (void *)nlh
+ min_len
;
1498 struct sk_buff
*skb
= cb
->skb
;
1499 struct ip_set_net
*inst
= ip_set_pernet(sock_net(skb
->sk
));
1503 ret
= nla_parse(cda
, IPSET_ATTR_CMD_MAX
, attr
,
1504 nlh
->nlmsg_len
- min_len
,
1505 ip_set_dump_policy
, NULL
);
1509 cb
->args
[IPSET_CB_PROTO
] = nla_get_u8(cda
[IPSET_ATTR_PROTOCOL
]);
1510 if (cda
[IPSET_ATTR_SETNAME
]) {
1514 set
= find_set_and_id(inst
, nla_data(cda
[IPSET_ATTR_SETNAME
]),
1520 dump_type
= DUMP_ONE
;
1521 cb
->args
[IPSET_CB_INDEX
] = index
;
1523 dump_type
= DUMP_ALL
;
1526 if (cda
[IPSET_ATTR_FLAGS
]) {
1527 u32 f
= ip_set_get_h32(cda
[IPSET_ATTR_FLAGS
]);
1529 dump_type
|= (f
<< 16);
1531 cb
->args
[IPSET_CB_NET
] = (unsigned long)inst
;
1532 cb
->args
[IPSET_CB_DUMP
] = dump_type
;
1537 /* We have to create and send the error message manually :-( */
1538 if (nlh
->nlmsg_flags
& NLM_F_ACK
) {
1539 netlink_ack(cb
->skb
, nlh
, ret
, NULL
);
1545 ip_set_dump_do(struct sk_buff
*skb
, struct netlink_callback
*cb
)
1547 ip_set_id_t index
= IPSET_INVALID_ID
, max
;
1548 struct ip_set
*set
= NULL
;
1549 struct nlmsghdr
*nlh
= NULL
;
1550 unsigned int flags
= NETLINK_CB(cb
->skb
).portid
? NLM_F_MULTI
: 0;
1551 struct ip_set_net
*inst
= ip_set_pernet(sock_net(skb
->sk
));
1552 u32 dump_type
, dump_flags
;
1556 if (!cb
->args
[IPSET_CB_DUMP
])
1559 if (cb
->args
[IPSET_CB_INDEX
] >= inst
->ip_set_max
)
1562 dump_type
= DUMP_TYPE(cb
->args
[IPSET_CB_DUMP
]);
1563 dump_flags
= DUMP_FLAGS(cb
->args
[IPSET_CB_DUMP
]);
1564 max
= dump_type
== DUMP_ONE
? cb
->args
[IPSET_CB_INDEX
] + 1
1567 pr_debug("dump type, flag: %u %u index: %ld\n",
1568 dump_type
, dump_flags
, cb
->args
[IPSET_CB_INDEX
]);
1569 for (; cb
->args
[IPSET_CB_INDEX
] < max
; cb
->args
[IPSET_CB_INDEX
]++) {
1570 index
= (ip_set_id_t
)cb
->args
[IPSET_CB_INDEX
];
1571 write_lock_bh(&ip_set_ref_lock
);
1572 set
= ip_set(inst
, index
);
1573 is_destroyed
= inst
->is_destroyed
;
1574 if (!set
|| is_destroyed
) {
1575 write_unlock_bh(&ip_set_ref_lock
);
1576 if (dump_type
== DUMP_ONE
) {
1581 /* All sets are just being destroyed */
1587 /* When dumping all sets, we must dump "sorted"
1588 * so that lists (unions of sets) are dumped last.
1590 if (dump_type
!= DUMP_ONE
&&
1591 ((dump_type
== DUMP_ALL
) ==
1592 !!(set
->type
->features
& IPSET_DUMP_LAST
))) {
1593 write_unlock_bh(&ip_set_ref_lock
);
1596 pr_debug("List set: %s\n", set
->name
);
1597 if (!cb
->args
[IPSET_CB_ARG0
]) {
1598 /* Start listing: make sure set won't be destroyed */
1599 pr_debug("reference set\n");
1602 write_unlock_bh(&ip_set_ref_lock
);
1603 nlh
= start_msg(skb
, NETLINK_CB(cb
->skb
).portid
,
1604 cb
->nlh
->nlmsg_seq
, flags
,
1608 goto release_refcount
;
1610 if (nla_put_u8(skb
, IPSET_ATTR_PROTOCOL
,
1611 cb
->args
[IPSET_CB_PROTO
]) ||
1612 nla_put_string(skb
, IPSET_ATTR_SETNAME
, set
->name
))
1613 goto nla_put_failure
;
1614 if (dump_flags
& IPSET_FLAG_LIST_SETNAME
)
1616 switch (cb
->args
[IPSET_CB_ARG0
]) {
1618 /* Core header data */
1619 if (nla_put_string(skb
, IPSET_ATTR_TYPENAME
,
1621 nla_put_u8(skb
, IPSET_ATTR_FAMILY
,
1623 nla_put_u8(skb
, IPSET_ATTR_REVISION
,
1625 goto nla_put_failure
;
1626 if (cb
->args
[IPSET_CB_PROTO
] > IPSET_PROTOCOL_MIN
&&
1627 nla_put_net16(skb
, IPSET_ATTR_INDEX
, htons(index
)))
1628 goto nla_put_failure
;
1629 ret
= set
->variant
->head(set
, skb
);
1631 goto release_refcount
;
1632 if (dump_flags
& IPSET_FLAG_LIST_HEADER
)
1634 if (set
->variant
->uref
)
1635 set
->variant
->uref(set
, cb
, true);
1638 ret
= set
->variant
->list(set
, skb
, cb
);
1639 if (!cb
->args
[IPSET_CB_ARG0
])
1640 /* Set is done, proceed with next one */
1642 goto release_refcount
;
1645 /* If we dump all sets, continue with dumping last ones */
1646 if (dump_type
== DUMP_ALL
) {
1647 dump_type
= DUMP_LAST
;
1648 cb
->args
[IPSET_CB_DUMP
] = dump_type
| (dump_flags
<< 16);
1649 cb
->args
[IPSET_CB_INDEX
] = 0;
1650 if (set
&& set
->variant
->uref
)
1651 set
->variant
->uref(set
, cb
, false);
1659 if (dump_type
== DUMP_ONE
)
1660 cb
->args
[IPSET_CB_INDEX
] = IPSET_INVALID_ID
;
1662 cb
->args
[IPSET_CB_INDEX
]++;
1664 /* If there was an error or set is done, release set */
1665 if (ret
|| !cb
->args
[IPSET_CB_ARG0
]) {
1666 set
= ip_set_ref_netlink(inst
, index
);
1667 if (set
->variant
->uref
)
1668 set
->variant
->uref(set
, cb
, false);
1669 pr_debug("release set %s\n", set
->name
);
1670 __ip_set_put_netlink(set
);
1671 cb
->args
[IPSET_CB_ARG0
] = 0;
1675 nlmsg_end(skb
, nlh
);
1676 pr_debug("nlmsg_len: %u\n", nlh
->nlmsg_len
);
1680 return ret
< 0 ? ret
: skb
->len
;
1683 static int ip_set_dump(struct net
*net
, struct sock
*ctnl
, struct sk_buff
*skb
,
1684 const struct nlmsghdr
*nlh
,
1685 const struct nlattr
* const attr
[],
1686 struct netlink_ext_ack
*extack
)
1688 if (unlikely(protocol_min_failed(attr
)))
1689 return -IPSET_ERR_PROTOCOL
;
1692 struct netlink_dump_control c
= {
1693 .start
= ip_set_dump_start
,
1694 .dump
= ip_set_dump_do
,
1695 .done
= ip_set_dump_done
,
1697 return netlink_dump_start(ctnl
, skb
, nlh
, &c
);
1701 /* Add, del and test */
1703 static const struct nla_policy ip_set_adt_policy
[IPSET_ATTR_CMD_MAX
+ 1] = {
1704 [IPSET_ATTR_PROTOCOL
] = { .type
= NLA_U8
},
1705 [IPSET_ATTR_SETNAME
] = { .type
= NLA_NUL_STRING
,
1706 .len
= IPSET_MAXNAMELEN
- 1 },
1707 [IPSET_ATTR_LINENO
] = { .type
= NLA_U32
},
1708 [IPSET_ATTR_DATA
] = { .type
= NLA_NESTED
},
1709 [IPSET_ATTR_ADT
] = { .type
= NLA_NESTED
},
1713 call_ad(struct sock
*ctnl
, struct sk_buff
*skb
, struct ip_set
*set
,
1714 struct nlattr
*tb
[], enum ipset_adt adt
,
1715 u32 flags
, bool use_lineno
)
1719 bool eexist
= flags
& IPSET_FLAG_EXIST
, retried
= false;
1723 ret
= set
->variant
->uadt(set
, tb
, adt
, &lineno
, flags
, retried
);
1726 } while (ret
== -EAGAIN
&&
1727 set
->variant
->resize
&&
1728 (ret
= set
->variant
->resize(set
, retried
)) == 0);
1730 if (!ret
|| (ret
== -IPSET_ERR_EXIST
&& eexist
))
1732 if (lineno
&& use_lineno
) {
1733 /* Error in restore/batch mode: send back lineno */
1734 struct nlmsghdr
*rep
, *nlh
= nlmsg_hdr(skb
);
1735 struct sk_buff
*skb2
;
1736 struct nlmsgerr
*errmsg
;
1737 size_t payload
= min(SIZE_MAX
,
1738 sizeof(*errmsg
) + nlmsg_len(nlh
));
1739 int min_len
= nlmsg_total_size(sizeof(struct nfgenmsg
));
1740 struct nlattr
*cda
[IPSET_ATTR_CMD_MAX
+ 1];
1741 struct nlattr
*cmdattr
;
1744 skb2
= nlmsg_new(payload
, GFP_KERNEL
);
1747 rep
= __nlmsg_put(skb2
, NETLINK_CB(skb
).portid
,
1748 nlh
->nlmsg_seq
, NLMSG_ERROR
, payload
, 0);
1749 errmsg
= nlmsg_data(rep
);
1750 errmsg
->error
= ret
;
1751 memcpy(&errmsg
->msg
, nlh
, nlh
->nlmsg_len
);
1752 cmdattr
= (void *)&errmsg
->msg
+ min_len
;
1754 ret
= nla_parse(cda
, IPSET_ATTR_CMD_MAX
, cmdattr
,
1755 nlh
->nlmsg_len
- min_len
, ip_set_adt_policy
,
1762 errline
= nla_data(cda
[IPSET_ATTR_LINENO
]);
1766 netlink_unicast(ctnl
, skb2
, NETLINK_CB(skb
).portid
,
1768 /* Signal netlink not to send its ACK/errmsg. */
1775 static int ip_set_ad(struct net
*net
, struct sock
*ctnl
,
1776 struct sk_buff
*skb
,
1778 const struct nlmsghdr
*nlh
,
1779 const struct nlattr
* const attr
[],
1780 struct netlink_ext_ack
*extack
)
1782 struct ip_set_net
*inst
= ip_set_pernet(net
);
1784 struct nlattr
*tb
[IPSET_ATTR_ADT_MAX
+ 1] = {};
1785 const struct nlattr
*nla
;
1786 u32 flags
= flag_exist(nlh
);
1790 if (unlikely(protocol_min_failed(attr
) ||
1791 !attr
[IPSET_ATTR_SETNAME
] ||
1792 !((attr
[IPSET_ATTR_DATA
] != NULL
) ^
1793 (attr
[IPSET_ATTR_ADT
] != NULL
)) ||
1794 (attr
[IPSET_ATTR_DATA
] &&
1795 !flag_nested(attr
[IPSET_ATTR_DATA
])) ||
1796 (attr
[IPSET_ATTR_ADT
] &&
1797 (!flag_nested(attr
[IPSET_ATTR_ADT
]) ||
1798 !attr
[IPSET_ATTR_LINENO
]))))
1799 return -IPSET_ERR_PROTOCOL
;
1801 set
= find_set(inst
, nla_data(attr
[IPSET_ATTR_SETNAME
]));
1805 use_lineno
= !!attr
[IPSET_ATTR_LINENO
];
1806 if (attr
[IPSET_ATTR_DATA
]) {
1807 if (nla_parse_nested(tb
, IPSET_ATTR_ADT_MAX
,
1808 attr
[IPSET_ATTR_DATA
],
1809 set
->type
->adt_policy
, NULL
))
1810 return -IPSET_ERR_PROTOCOL
;
1811 ret
= call_ad(ctnl
, skb
, set
, tb
, adt
, flags
,
1816 nla_for_each_nested(nla
, attr
[IPSET_ATTR_ADT
], nla_rem
) {
1817 if (nla_type(nla
) != IPSET_ATTR_DATA
||
1818 !flag_nested(nla
) ||
1819 nla_parse_nested(tb
, IPSET_ATTR_ADT_MAX
, nla
,
1820 set
->type
->adt_policy
, NULL
))
1821 return -IPSET_ERR_PROTOCOL
;
1822 ret
= call_ad(ctnl
, skb
, set
, tb
, adt
,
1831 static int ip_set_uadd(struct net
*net
, struct sock
*ctnl
,
1832 struct sk_buff
*skb
, const struct nlmsghdr
*nlh
,
1833 const struct nlattr
* const attr
[],
1834 struct netlink_ext_ack
*extack
)
1836 return ip_set_ad(net
, ctnl
, skb
,
1837 IPSET_ADD
, nlh
, attr
, extack
);
1840 static int ip_set_udel(struct net
*net
, struct sock
*ctnl
,
1841 struct sk_buff
*skb
, const struct nlmsghdr
*nlh
,
1842 const struct nlattr
* const attr
[],
1843 struct netlink_ext_ack
*extack
)
1845 return ip_set_ad(net
, ctnl
, skb
,
1846 IPSET_DEL
, nlh
, attr
, extack
);
1849 static int ip_set_utest(struct net
*net
, struct sock
*ctnl
, struct sk_buff
*skb
,
1850 const struct nlmsghdr
*nlh
,
1851 const struct nlattr
* const attr
[],
1852 struct netlink_ext_ack
*extack
)
1854 struct ip_set_net
*inst
= ip_set_pernet(net
);
1856 struct nlattr
*tb
[IPSET_ATTR_ADT_MAX
+ 1] = {};
1860 if (unlikely(protocol_min_failed(attr
) ||
1861 !attr
[IPSET_ATTR_SETNAME
] ||
1862 !attr
[IPSET_ATTR_DATA
] ||
1863 !flag_nested(attr
[IPSET_ATTR_DATA
])))
1864 return -IPSET_ERR_PROTOCOL
;
1866 set
= find_set(inst
, nla_data(attr
[IPSET_ATTR_SETNAME
]));
1870 if (nla_parse_nested(tb
, IPSET_ATTR_ADT_MAX
, attr
[IPSET_ATTR_DATA
],
1871 set
->type
->adt_policy
, NULL
))
1872 return -IPSET_ERR_PROTOCOL
;
1875 ret
= set
->variant
->uadt(set
, tb
, IPSET_TEST
, &lineno
, 0, 0);
1876 rcu_read_unlock_bh();
1877 /* Userspace can't trigger element to be re-added */
1881 return ret
> 0 ? 0 : -IPSET_ERR_EXIST
;
1884 /* Get headed data of a set */
1886 static int ip_set_header(struct net
*net
, struct sock
*ctnl
,
1887 struct sk_buff
*skb
, const struct nlmsghdr
*nlh
,
1888 const struct nlattr
* const attr
[],
1889 struct netlink_ext_ack
*extack
)
1891 struct ip_set_net
*inst
= ip_set_pernet(net
);
1892 const struct ip_set
*set
;
1893 struct sk_buff
*skb2
;
1894 struct nlmsghdr
*nlh2
;
1897 if (unlikely(protocol_min_failed(attr
) ||
1898 !attr
[IPSET_ATTR_SETNAME
]))
1899 return -IPSET_ERR_PROTOCOL
;
1901 set
= find_set(inst
, nla_data(attr
[IPSET_ATTR_SETNAME
]));
1905 skb2
= nlmsg_new(NLMSG_DEFAULT_SIZE
, GFP_KERNEL
);
1909 nlh2
= start_msg(skb2
, NETLINK_CB(skb
).portid
, nlh
->nlmsg_seq
, 0,
1913 if (nla_put_u8(skb2
, IPSET_ATTR_PROTOCOL
, protocol(attr
)) ||
1914 nla_put_string(skb2
, IPSET_ATTR_SETNAME
, set
->name
) ||
1915 nla_put_string(skb2
, IPSET_ATTR_TYPENAME
, set
->type
->name
) ||
1916 nla_put_u8(skb2
, IPSET_ATTR_FAMILY
, set
->family
) ||
1917 nla_put_u8(skb2
, IPSET_ATTR_REVISION
, set
->revision
))
1918 goto nla_put_failure
;
1919 nlmsg_end(skb2
, nlh2
);
1921 ret
= netlink_unicast(ctnl
, skb2
, NETLINK_CB(skb
).portid
, MSG_DONTWAIT
);
1928 nlmsg_cancel(skb2
, nlh2
);
1936 static const struct nla_policy ip_set_type_policy
[IPSET_ATTR_CMD_MAX
+ 1] = {
1937 [IPSET_ATTR_PROTOCOL
] = { .type
= NLA_U8
},
1938 [IPSET_ATTR_TYPENAME
] = { .type
= NLA_NUL_STRING
,
1939 .len
= IPSET_MAXNAMELEN
- 1 },
1940 [IPSET_ATTR_FAMILY
] = { .type
= NLA_U8
},
1943 static int ip_set_type(struct net
*net
, struct sock
*ctnl
, struct sk_buff
*skb
,
1944 const struct nlmsghdr
*nlh
,
1945 const struct nlattr
* const attr
[],
1946 struct netlink_ext_ack
*extack
)
1948 struct sk_buff
*skb2
;
1949 struct nlmsghdr
*nlh2
;
1950 u8 family
, min
, max
;
1951 const char *typename
;
1954 if (unlikely(protocol_min_failed(attr
) ||
1955 !attr
[IPSET_ATTR_TYPENAME
] ||
1956 !attr
[IPSET_ATTR_FAMILY
]))
1957 return -IPSET_ERR_PROTOCOL
;
1959 family
= nla_get_u8(attr
[IPSET_ATTR_FAMILY
]);
1960 typename
= nla_data(attr
[IPSET_ATTR_TYPENAME
]);
1961 ret
= find_set_type_minmax(typename
, family
, &min
, &max
);
1965 skb2
= nlmsg_new(NLMSG_DEFAULT_SIZE
, GFP_KERNEL
);
1969 nlh2
= start_msg(skb2
, NETLINK_CB(skb
).portid
, nlh
->nlmsg_seq
, 0,
1973 if (nla_put_u8(skb2
, IPSET_ATTR_PROTOCOL
, protocol(attr
)) ||
1974 nla_put_string(skb2
, IPSET_ATTR_TYPENAME
, typename
) ||
1975 nla_put_u8(skb2
, IPSET_ATTR_FAMILY
, family
) ||
1976 nla_put_u8(skb2
, IPSET_ATTR_REVISION
, max
) ||
1977 nla_put_u8(skb2
, IPSET_ATTR_REVISION_MIN
, min
))
1978 goto nla_put_failure
;
1979 nlmsg_end(skb2
, nlh2
);
1981 pr_debug("Send TYPE, nlmsg_len: %u\n", nlh2
->nlmsg_len
);
1982 ret
= netlink_unicast(ctnl
, skb2
, NETLINK_CB(skb
).portid
, MSG_DONTWAIT
);
1989 nlmsg_cancel(skb2
, nlh2
);
1995 /* Get protocol version */
1997 static const struct nla_policy
1998 ip_set_protocol_policy
[IPSET_ATTR_CMD_MAX
+ 1] = {
1999 [IPSET_ATTR_PROTOCOL
] = { .type
= NLA_U8
},
2002 static int ip_set_protocol(struct net
*net
, struct sock
*ctnl
,
2003 struct sk_buff
*skb
, const struct nlmsghdr
*nlh
,
2004 const struct nlattr
* const attr
[],
2005 struct netlink_ext_ack
*extack
)
2007 struct sk_buff
*skb2
;
2008 struct nlmsghdr
*nlh2
;
2011 if (unlikely(!attr
[IPSET_ATTR_PROTOCOL
]))
2012 return -IPSET_ERR_PROTOCOL
;
2014 skb2
= nlmsg_new(NLMSG_DEFAULT_SIZE
, GFP_KERNEL
);
2018 nlh2
= start_msg(skb2
, NETLINK_CB(skb
).portid
, nlh
->nlmsg_seq
, 0,
2019 IPSET_CMD_PROTOCOL
);
2022 if (nla_put_u8(skb2
, IPSET_ATTR_PROTOCOL
, IPSET_PROTOCOL
))
2023 goto nla_put_failure
;
2024 if (nla_put_u8(skb2
, IPSET_ATTR_PROTOCOL_MIN
, IPSET_PROTOCOL_MIN
))
2025 goto nla_put_failure
;
2026 nlmsg_end(skb2
, nlh2
);
2028 ret
= netlink_unicast(ctnl
, skb2
, NETLINK_CB(skb
).portid
, MSG_DONTWAIT
);
2035 nlmsg_cancel(skb2
, nlh2
);
2041 /* Get set by name or index, from userspace */
2043 static int ip_set_byname(struct net
*net
, struct sock
*ctnl
,
2044 struct sk_buff
*skb
, const struct nlmsghdr
*nlh
,
2045 const struct nlattr
* const attr
[],
2046 struct netlink_ext_ack
*extack
)
2048 struct ip_set_net
*inst
= ip_set_pernet(net
);
2049 struct sk_buff
*skb2
;
2050 struct nlmsghdr
*nlh2
;
2051 ip_set_id_t id
= IPSET_INVALID_ID
;
2052 const struct ip_set
*set
;
2055 if (unlikely(protocol_failed(attr
) ||
2056 !attr
[IPSET_ATTR_SETNAME
]))
2057 return -IPSET_ERR_PROTOCOL
;
2059 set
= find_set_and_id(inst
, nla_data(attr
[IPSET_ATTR_SETNAME
]), &id
);
2060 if (id
== IPSET_INVALID_ID
)
2063 skb2
= nlmsg_new(NLMSG_DEFAULT_SIZE
, GFP_KERNEL
);
2067 nlh2
= start_msg(skb2
, NETLINK_CB(skb
).portid
, nlh
->nlmsg_seq
, 0,
2068 IPSET_CMD_GET_BYNAME
);
2071 if (nla_put_u8(skb2
, IPSET_ATTR_PROTOCOL
, protocol(attr
)) ||
2072 nla_put_u8(skb2
, IPSET_ATTR_FAMILY
, set
->family
) ||
2073 nla_put_net16(skb2
, IPSET_ATTR_INDEX
, htons(id
)))
2074 goto nla_put_failure
;
2075 nlmsg_end(skb2
, nlh2
);
2077 ret
= netlink_unicast(ctnl
, skb2
, NETLINK_CB(skb
).portid
, MSG_DONTWAIT
);
2084 nlmsg_cancel(skb2
, nlh2
);
2090 static const struct nla_policy ip_set_index_policy
[IPSET_ATTR_CMD_MAX
+ 1] = {
2091 [IPSET_ATTR_PROTOCOL
] = { .type
= NLA_U8
},
2092 [IPSET_ATTR_INDEX
] = { .type
= NLA_U16
},
2095 static int ip_set_byindex(struct net
*net
, struct sock
*ctnl
,
2096 struct sk_buff
*skb
, const struct nlmsghdr
*nlh
,
2097 const struct nlattr
* const attr
[],
2098 struct netlink_ext_ack
*extack
)
2100 struct ip_set_net
*inst
= ip_set_pernet(net
);
2101 struct sk_buff
*skb2
;
2102 struct nlmsghdr
*nlh2
;
2103 ip_set_id_t id
= IPSET_INVALID_ID
;
2104 const struct ip_set
*set
;
2107 if (unlikely(protocol_failed(attr
) ||
2108 !attr
[IPSET_ATTR_INDEX
]))
2109 return -IPSET_ERR_PROTOCOL
;
2111 id
= ip_set_get_h16(attr
[IPSET_ATTR_INDEX
]);
2112 if (id
>= inst
->ip_set_max
)
2114 set
= ip_set(inst
, id
);
2118 skb2
= nlmsg_new(NLMSG_DEFAULT_SIZE
, GFP_KERNEL
);
2122 nlh2
= start_msg(skb2
, NETLINK_CB(skb
).portid
, nlh
->nlmsg_seq
, 0,
2123 IPSET_CMD_GET_BYINDEX
);
2126 if (nla_put_u8(skb2
, IPSET_ATTR_PROTOCOL
, protocol(attr
)) ||
2127 nla_put_string(skb2
, IPSET_ATTR_SETNAME
, set
->name
))
2128 goto nla_put_failure
;
2129 nlmsg_end(skb2
, nlh2
);
2131 ret
= netlink_unicast(ctnl
, skb2
, NETLINK_CB(skb
).portid
, MSG_DONTWAIT
);
2138 nlmsg_cancel(skb2
, nlh2
);
2144 static const struct nfnl_callback ip_set_netlink_subsys_cb
[IPSET_MSG_MAX
] = {
2145 [IPSET_CMD_NONE
] = {
2146 .call
= ip_set_none
,
2147 .attr_count
= IPSET_ATTR_CMD_MAX
,
2149 [IPSET_CMD_CREATE
] = {
2150 .call
= ip_set_create
,
2151 .attr_count
= IPSET_ATTR_CMD_MAX
,
2152 .policy
= ip_set_create_policy
,
2154 [IPSET_CMD_DESTROY
] = {
2155 .call
= ip_set_destroy
,
2156 .attr_count
= IPSET_ATTR_CMD_MAX
,
2157 .policy
= ip_set_setname_policy
,
2159 [IPSET_CMD_FLUSH
] = {
2160 .call
= ip_set_flush
,
2161 .attr_count
= IPSET_ATTR_CMD_MAX
,
2162 .policy
= ip_set_setname_policy
,
2164 [IPSET_CMD_RENAME
] = {
2165 .call
= ip_set_rename
,
2166 .attr_count
= IPSET_ATTR_CMD_MAX
,
2167 .policy
= ip_set_setname2_policy
,
2169 [IPSET_CMD_SWAP
] = {
2170 .call
= ip_set_swap
,
2171 .attr_count
= IPSET_ATTR_CMD_MAX
,
2172 .policy
= ip_set_setname2_policy
,
2174 [IPSET_CMD_LIST
] = {
2175 .call
= ip_set_dump
,
2176 .attr_count
= IPSET_ATTR_CMD_MAX
,
2177 .policy
= ip_set_dump_policy
,
2179 [IPSET_CMD_SAVE
] = {
2180 .call
= ip_set_dump
,
2181 .attr_count
= IPSET_ATTR_CMD_MAX
,
2182 .policy
= ip_set_setname_policy
,
2185 .call
= ip_set_uadd
,
2186 .attr_count
= IPSET_ATTR_CMD_MAX
,
2187 .policy
= ip_set_adt_policy
,
2190 .call
= ip_set_udel
,
2191 .attr_count
= IPSET_ATTR_CMD_MAX
,
2192 .policy
= ip_set_adt_policy
,
2194 [IPSET_CMD_TEST
] = {
2195 .call
= ip_set_utest
,
2196 .attr_count
= IPSET_ATTR_CMD_MAX
,
2197 .policy
= ip_set_adt_policy
,
2199 [IPSET_CMD_HEADER
] = {
2200 .call
= ip_set_header
,
2201 .attr_count
= IPSET_ATTR_CMD_MAX
,
2202 .policy
= ip_set_setname_policy
,
2204 [IPSET_CMD_TYPE
] = {
2205 .call
= ip_set_type
,
2206 .attr_count
= IPSET_ATTR_CMD_MAX
,
2207 .policy
= ip_set_type_policy
,
2209 [IPSET_CMD_PROTOCOL
] = {
2210 .call
= ip_set_protocol
,
2211 .attr_count
= IPSET_ATTR_CMD_MAX
,
2212 .policy
= ip_set_protocol_policy
,
2214 [IPSET_CMD_GET_BYNAME
] = {
2215 .call
= ip_set_byname
,
2216 .attr_count
= IPSET_ATTR_CMD_MAX
,
2217 .policy
= ip_set_setname_policy
,
2219 [IPSET_CMD_GET_BYINDEX
] = {
2220 .call
= ip_set_byindex
,
2221 .attr_count
= IPSET_ATTR_CMD_MAX
,
2222 .policy
= ip_set_index_policy
,
2226 static struct nfnetlink_subsystem ip_set_netlink_subsys __read_mostly
= {
2228 .subsys_id
= NFNL_SUBSYS_IPSET
,
2229 .cb_count
= IPSET_MSG_MAX
,
2230 .cb
= ip_set_netlink_subsys_cb
,
2233 /* Interface to iptables/ip6tables */
2236 ip_set_sockfn_get(struct sock
*sk
, int optval
, void __user
*user
, int *len
)
2240 int copylen
= *len
, ret
= 0;
2241 struct net
*net
= sock_net(sk
);
2242 struct ip_set_net
*inst
= ip_set_pernet(net
);
2244 if (!ns_capable(net
->user_ns
, CAP_NET_ADMIN
))
2246 if (optval
!= SO_IP_SET
)
2248 if (*len
< sizeof(unsigned int))
2251 data
= vmalloc(*len
);
2254 if (copy_from_user(data
, user
, *len
) != 0) {
2260 if (*op
< IP_SET_OP_VERSION
) {
2261 /* Check the version at the beginning of operations */
2262 struct ip_set_req_version
*req_version
= data
;
2264 if (*len
< sizeof(struct ip_set_req_version
)) {
2269 if (req_version
->version
< IPSET_PROTOCOL_MIN
) {
2276 case IP_SET_OP_VERSION
: {
2277 struct ip_set_req_version
*req_version
= data
;
2279 if (*len
!= sizeof(struct ip_set_req_version
)) {
2284 req_version
->version
= IPSET_PROTOCOL
;
2285 if (copy_to_user(user
, req_version
,
2286 sizeof(struct ip_set_req_version
)))
2290 case IP_SET_OP_GET_BYNAME
: {
2291 struct ip_set_req_get_set
*req_get
= data
;
2294 if (*len
!= sizeof(struct ip_set_req_get_set
)) {
2298 req_get
->set
.name
[IPSET_MAXNAMELEN
- 1] = '\0';
2299 nfnl_lock(NFNL_SUBSYS_IPSET
);
2300 find_set_and_id(inst
, req_get
->set
.name
, &id
);
2301 req_get
->set
.index
= id
;
2302 nfnl_unlock(NFNL_SUBSYS_IPSET
);
2305 case IP_SET_OP_GET_FNAME
: {
2306 struct ip_set_req_get_set_family
*req_get
= data
;
2309 if (*len
!= sizeof(struct ip_set_req_get_set_family
)) {
2313 req_get
->set
.name
[IPSET_MAXNAMELEN
- 1] = '\0';
2314 nfnl_lock(NFNL_SUBSYS_IPSET
);
2315 find_set_and_id(inst
, req_get
->set
.name
, &id
);
2316 req_get
->set
.index
= id
;
2317 if (id
!= IPSET_INVALID_ID
)
2318 req_get
->family
= ip_set(inst
, id
)->family
;
2319 nfnl_unlock(NFNL_SUBSYS_IPSET
);
2322 case IP_SET_OP_GET_BYINDEX
: {
2323 struct ip_set_req_get_set
*req_get
= data
;
2326 if (*len
!= sizeof(struct ip_set_req_get_set
) ||
2327 req_get
->set
.index
>= inst
->ip_set_max
) {
2331 nfnl_lock(NFNL_SUBSYS_IPSET
);
2332 set
= ip_set(inst
, req_get
->set
.index
);
2333 ret
= strscpy(req_get
->set
.name
, set
? set
->name
: "",
2335 nfnl_unlock(NFNL_SUBSYS_IPSET
);
2343 } /* end of switch(op) */
2346 if (copy_to_user(user
, data
, copylen
))
2356 static struct nf_sockopt_ops so_set __read_mostly
= {
2358 .get_optmin
= SO_IP_SET
,
2359 .get_optmax
= SO_IP_SET
+ 1,
2360 .get
= ip_set_sockfn_get
,
2361 .owner
= THIS_MODULE
,
2364 static int __net_init
2365 ip_set_net_init(struct net
*net
)
2367 struct ip_set_net
*inst
= ip_set_pernet(net
);
2368 struct ip_set
**list
;
2370 inst
->ip_set_max
= max_sets
? max_sets
: CONFIG_IP_SET_MAX
;
2371 if (inst
->ip_set_max
>= IPSET_INVALID_ID
)
2372 inst
->ip_set_max
= IPSET_INVALID_ID
- 1;
2374 list
= kvcalloc(inst
->ip_set_max
, sizeof(struct ip_set
*), GFP_KERNEL
);
2377 inst
->is_deleted
= false;
2378 inst
->is_destroyed
= false;
2379 rcu_assign_pointer(inst
->ip_set_list
, list
);
2383 static void __net_exit
2384 ip_set_net_exit(struct net
*net
)
2386 struct ip_set_net
*inst
= ip_set_pernet(net
);
2388 struct ip_set
*set
= NULL
;
2391 inst
->is_deleted
= true; /* flag for ip_set_nfnl_put */
2393 nfnl_lock(NFNL_SUBSYS_IPSET
);
2394 for (i
= 0; i
< inst
->ip_set_max
; i
++) {
2395 set
= ip_set(inst
, i
);
2397 ip_set(inst
, i
) = NULL
;
2398 ip_set_destroy_set(set
);
2401 nfnl_unlock(NFNL_SUBSYS_IPSET
);
2402 kvfree(rcu_dereference_protected(inst
->ip_set_list
, 1));
2405 static struct pernet_operations ip_set_net_ops
= {
2406 .init
= ip_set_net_init
,
2407 .exit
= ip_set_net_exit
,
2408 .id
= &ip_set_net_id
,
2409 .size
= sizeof(struct ip_set_net
),
2415 int ret
= register_pernet_subsys(&ip_set_net_ops
);
2418 pr_err("ip_set: cannot register pernet_subsys.\n");
2422 ret
= nfnetlink_subsys_register(&ip_set_netlink_subsys
);
2424 pr_err("ip_set: cannot register with nfnetlink.\n");
2425 unregister_pernet_subsys(&ip_set_net_ops
);
2429 ret
= nf_register_sockopt(&so_set
);
2431 pr_err("SO_SET registry failed: %d\n", ret
);
2432 nfnetlink_subsys_unregister(&ip_set_netlink_subsys
);
2433 unregister_pernet_subsys(&ip_set_net_ops
);
2443 nf_unregister_sockopt(&so_set
);
2444 nfnetlink_subsys_unregister(&ip_set_netlink_subsys
);
2446 unregister_pernet_subsys(&ip_set_net_ops
);
2447 pr_debug("these are the famous last words\n");
2450 module_init(ip_set_init
);
2451 module_exit(ip_set_fini
);
2453 MODULE_DESCRIPTION("ip_set: protocol " __stringify(IPSET_PROTOCOL
));