]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - net/sched/cls_api.c
Merge tag 'armsoc-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc
[mirror_ubuntu-hirsute-kernel.git] / net / sched / cls_api.c
CommitLineData
2874c5fd 1// SPDX-License-Identifier: GPL-2.0-or-later
1da177e4
LT
2/*
3 * net/sched/cls_api.c Packet classifier API.
4 *
1da177e4
LT
5 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
6 *
7 * Changes:
8 *
9 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
1da177e4
LT
10 */
11
1da177e4
LT
12#include <linux/module.h>
13#include <linux/types.h>
14#include <linux/kernel.h>
1da177e4 15#include <linux/string.h>
1da177e4 16#include <linux/errno.h>
33a48927 17#include <linux/err.h>
1da177e4 18#include <linux/skbuff.h>
1da177e4
LT
19#include <linux/init.h>
20#include <linux/kmod.h>
5a0e3ad6 21#include <linux/slab.h>
48617387 22#include <linux/idr.h>
7f76fa36 23#include <linux/rhashtable.h>
b854272b
DL
24#include <net/net_namespace.h>
25#include <net/sock.h>
dc5fc579 26#include <net/netlink.h>
1da177e4
LT
27#include <net/pkt_sched.h>
28#include <net/pkt_cls.h>
e3ab786b 29#include <net/tc_act/tc_pedit.h>
3a7b6861
PNA
30#include <net/tc_act/tc_mirred.h>
31#include <net/tc_act/tc_vlan.h>
32#include <net/tc_act/tc_tunnel_key.h>
33#include <net/tc_act/tc_csum.h>
34#include <net/tc_act/tc_gact.h>
8c8cfc6e 35#include <net/tc_act/tc_police.h>
a7a7be60 36#include <net/tc_act/tc_sample.h>
3a7b6861 37#include <net/tc_act/tc_skbedit.h>
b57dc7c1 38#include <net/tc_act/tc_ct.h>
6749d590 39#include <net/tc_act/tc_mpls.h>
4e481908 40#include <net/flow_offload.h>
1da177e4 41
e331473f
DC
42extern const struct nla_policy rtm_tca_policy[TCA_MAX + 1];
43
1da177e4 44/* The list of all installed classifier types */
36272874 45static LIST_HEAD(tcf_proto_base);
1da177e4
LT
46
47/* Protects list of registered TC modules. It is pure SMP lock. */
48static DEFINE_RWLOCK(cls_mod_lock);
49
50/* Find classifier type by string name */
51
f34e8bff 52static const struct tcf_proto_ops *__tcf_proto_lookup_ops(const char *kind)
1da177e4 53{
dcd76081 54 const struct tcf_proto_ops *t, *res = NULL;
1da177e4
LT
55
56 if (kind) {
57 read_lock(&cls_mod_lock);
36272874 58 list_for_each_entry(t, &tcf_proto_base, head) {
33a48927 59 if (strcmp(kind, t->kind) == 0) {
dcd76081
ED
60 if (try_module_get(t->owner))
61 res = t;
1da177e4
LT
62 break;
63 }
64 }
65 read_unlock(&cls_mod_lock);
66 }
dcd76081 67 return res;
1da177e4
LT
68}
69
f34e8bff 70static const struct tcf_proto_ops *
12db03b6
VB
71tcf_proto_lookup_ops(const char *kind, bool rtnl_held,
72 struct netlink_ext_ack *extack)
f34e8bff
JP
73{
74 const struct tcf_proto_ops *ops;
75
76 ops = __tcf_proto_lookup_ops(kind);
77 if (ops)
78 return ops;
79#ifdef CONFIG_MODULES
12db03b6
VB
80 if (rtnl_held)
81 rtnl_unlock();
f34e8bff 82 request_module("cls_%s", kind);
12db03b6
VB
83 if (rtnl_held)
84 rtnl_lock();
f34e8bff
JP
85 ops = __tcf_proto_lookup_ops(kind);
86 /* We dropped the RTNL semaphore in order to perform
87 * the module load. So, even if we succeeded in loading
88 * the module we have to replay the request. We indicate
89 * this using -EAGAIN.
90 */
91 if (ops) {
92 module_put(ops->owner);
93 return ERR_PTR(-EAGAIN);
94 }
95#endif
96 NL_SET_ERR_MSG(extack, "TC classifier not found");
97 return ERR_PTR(-ENOENT);
98}
99
1da177e4
LT
100/* Register(unregister) new classifier type */
101
102int register_tcf_proto_ops(struct tcf_proto_ops *ops)
103{
36272874 104 struct tcf_proto_ops *t;
1da177e4
LT
105 int rc = -EEXIST;
106
107 write_lock(&cls_mod_lock);
36272874 108 list_for_each_entry(t, &tcf_proto_base, head)
1da177e4
LT
109 if (!strcmp(ops->kind, t->kind))
110 goto out;
111
36272874 112 list_add_tail(&ops->head, &tcf_proto_base);
1da177e4
LT
113 rc = 0;
114out:
115 write_unlock(&cls_mod_lock);
116 return rc;
117}
aa767bfe 118EXPORT_SYMBOL(register_tcf_proto_ops);
1da177e4 119
7aa0045d
CW
120static struct workqueue_struct *tc_filter_wq;
121
1da177e4
LT
122int unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
123{
36272874 124 struct tcf_proto_ops *t;
1da177e4
LT
125 int rc = -ENOENT;
126
c78e1746
DB
127 /* Wait for outstanding call_rcu()s, if any, from a
128 * tcf_proto_ops's destroy() handler.
129 */
130 rcu_barrier();
7aa0045d 131 flush_workqueue(tc_filter_wq);
c78e1746 132
1da177e4 133 write_lock(&cls_mod_lock);
dcd76081
ED
134 list_for_each_entry(t, &tcf_proto_base, head) {
135 if (t == ops) {
136 list_del(&t->head);
137 rc = 0;
1da177e4 138 break;
dcd76081
ED
139 }
140 }
1da177e4
LT
141 write_unlock(&cls_mod_lock);
142 return rc;
143}
aa767bfe 144EXPORT_SYMBOL(unregister_tcf_proto_ops);
1da177e4 145
aaa908ff 146bool tcf_queue_work(struct rcu_work *rwork, work_func_t func)
7aa0045d 147{
aaa908ff
CW
148 INIT_RCU_WORK(rwork, func);
149 return queue_rcu_work(tc_filter_wq, rwork);
7aa0045d
CW
150}
151EXPORT_SYMBOL(tcf_queue_work);
152
1da177e4
LT
153/* Select new prio value from the range, managed by kernel. */
154
aa767bfe 155static inline u32 tcf_auto_prio(struct tcf_proto *tp)
1da177e4 156{
aa767bfe 157 u32 first = TC_H_MAKE(0xC0000000U, 0U);
1da177e4
LT
158
159 if (tp)
cc7ec456 160 first = tp->prio - 1;
1da177e4 161
7961973a 162 return TC_H_MAJ(first);
1da177e4
LT
163}
164
470502de
VB
165static bool tcf_proto_is_unlocked(const char *kind)
166{
167 const struct tcf_proto_ops *ops;
168 bool ret;
169
170 ops = tcf_proto_lookup_ops(kind, false, NULL);
171 /* On error return false to take rtnl lock. Proto lookup/create
172 * functions will perform lookup again and properly handle errors.
173 */
174 if (IS_ERR(ops))
175 return false;
176
177 ret = !!(ops->flags & TCF_PROTO_OPS_DOIT_UNLOCKED);
178 module_put(ops->owner);
179 return ret;
180}
181
33a48927 182static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol,
c35a4acc 183 u32 prio, struct tcf_chain *chain,
12db03b6 184 bool rtnl_held,
c35a4acc 185 struct netlink_ext_ack *extack)
33a48927
JP
186{
187 struct tcf_proto *tp;
188 int err;
189
190 tp = kzalloc(sizeof(*tp), GFP_KERNEL);
191 if (!tp)
192 return ERR_PTR(-ENOBUFS);
193
12db03b6 194 tp->ops = tcf_proto_lookup_ops(kind, rtnl_held, extack);
f34e8bff
JP
195 if (IS_ERR(tp->ops)) {
196 err = PTR_ERR(tp->ops);
d68d75fd 197 goto errout;
33a48927
JP
198 }
199 tp->classify = tp->ops->classify;
200 tp->protocol = protocol;
201 tp->prio = prio;
5bc17018 202 tp->chain = chain;
8b64678e 203 spin_lock_init(&tp->lock);
4dbfa766 204 refcount_set(&tp->refcnt, 1);
33a48927
JP
205
206 err = tp->ops->init(tp);
207 if (err) {
208 module_put(tp->ops->owner);
209 goto errout;
210 }
211 return tp;
212
213errout:
214 kfree(tp);
215 return ERR_PTR(err);
216}
217
4dbfa766
VB
218static void tcf_proto_get(struct tcf_proto *tp)
219{
220 refcount_inc(&tp->refcnt);
221}
222
223static void tcf_chain_put(struct tcf_chain *chain);
224
12db03b6 225static void tcf_proto_destroy(struct tcf_proto *tp, bool rtnl_held,
715df5ec 226 struct netlink_ext_ack *extack)
cf1facda 227{
12db03b6 228 tp->ops->destroy(tp, rtnl_held, extack);
4dbfa766 229 tcf_chain_put(tp->chain);
763dbf63
WC
230 module_put(tp->ops->owner);
231 kfree_rcu(tp, rcu);
cf1facda
JP
232}
233
12db03b6 234static void tcf_proto_put(struct tcf_proto *tp, bool rtnl_held,
4dbfa766
VB
235 struct netlink_ext_ack *extack)
236{
237 if (refcount_dec_and_test(&tp->refcnt))
12db03b6 238 tcf_proto_destroy(tp, rtnl_held, extack);
4dbfa766
VB
239}
240
268a351d 241static int walker_check_empty(struct tcf_proto *tp, void *fh,
6676d5e4 242 struct tcf_walker *arg)
8b64678e 243{
268a351d 244 if (fh) {
6676d5e4
VB
245 arg->nonempty = true;
246 return -1;
247 }
248 return 0;
8b64678e
VB
249}
250
12db03b6 251static bool tcf_proto_is_empty(struct tcf_proto *tp, bool rtnl_held)
8b64678e 252{
6676d5e4 253 struct tcf_walker walker = { .fn = walker_check_empty, };
8b64678e
VB
254
255 if (tp->ops->walk) {
12db03b6 256 tp->ops->walk(tp, &walker, rtnl_held);
6676d5e4 257 return !walker.nonempty;
8b64678e
VB
258 }
259 return true;
260}
261
12db03b6 262static bool tcf_proto_check_delete(struct tcf_proto *tp, bool rtnl_held)
8b64678e
VB
263{
264 spin_lock(&tp->lock);
12db03b6 265 if (tcf_proto_is_empty(tp, rtnl_held))
8b64678e
VB
266 tp->deleting = true;
267 spin_unlock(&tp->lock);
268 return tp->deleting;
269}
270
271static void tcf_proto_mark_delete(struct tcf_proto *tp)
272{
273 spin_lock(&tp->lock);
274 tp->deleting = true;
275 spin_unlock(&tp->lock);
276}
277
278static bool tcf_proto_is_deleting(struct tcf_proto *tp)
279{
280 bool deleting;
281
282 spin_lock(&tp->lock);
283 deleting = tp->deleting;
284 spin_unlock(&tp->lock);
285
286 return deleting;
287}
288
c266f64d
VB
289#define ASSERT_BLOCK_LOCKED(block) \
290 lockdep_assert_held(&(block)->lock)
291
a9b19443
JP
292struct tcf_filter_chain_list_item {
293 struct list_head list;
294 tcf_chain_head_change_t *chain_head_change;
295 void *chain_head_change_priv;
296};
297
5bc17018
JP
298static struct tcf_chain *tcf_chain_create(struct tcf_block *block,
299 u32 chain_index)
2190d1d0 300{
5bc17018
JP
301 struct tcf_chain *chain;
302
c266f64d
VB
303 ASSERT_BLOCK_LOCKED(block);
304
5bc17018
JP
305 chain = kzalloc(sizeof(*chain), GFP_KERNEL);
306 if (!chain)
307 return NULL;
308 list_add_tail(&chain->list, &block->chain_list);
ed76f5ed 309 mutex_init(&chain->filter_chain_lock);
5bc17018
JP
310 chain->block = block;
311 chain->index = chain_index;
e2ef7544 312 chain->refcnt = 1;
f71e0ca4
JP
313 if (!chain->index)
314 block->chain0.chain = chain;
5bc17018 315 return chain;
2190d1d0
JP
316}
317
a9b19443
JP
318static void tcf_chain_head_change_item(struct tcf_filter_chain_list_item *item,
319 struct tcf_proto *tp_head)
320{
321 if (item->chain_head_change)
322 item->chain_head_change(tp_head, item->chain_head_change_priv);
323}
f71e0ca4
JP
324
325static void tcf_chain0_head_change(struct tcf_chain *chain,
326 struct tcf_proto *tp_head)
c7eb7d72 327{
a9b19443 328 struct tcf_filter_chain_list_item *item;
f71e0ca4 329 struct tcf_block *block = chain->block;
a9b19443 330
f71e0ca4
JP
331 if (chain->index)
332 return;
165f0135
VB
333
334 mutex_lock(&block->lock);
f71e0ca4 335 list_for_each_entry(item, &block->chain0.filter_chain_list, list)
a9b19443 336 tcf_chain_head_change_item(item, tp_head);
165f0135 337 mutex_unlock(&block->lock);
c7eb7d72
JP
338}
339
c266f64d
VB
340/* Returns true if block can be safely freed. */
341
342static bool tcf_chain_detach(struct tcf_chain *chain)
f93e1cdc 343{
efbf7897
CW
344 struct tcf_block *block = chain->block;
345
c266f64d
VB
346 ASSERT_BLOCK_LOCKED(block);
347
e2ef7544 348 list_del(&chain->list);
f71e0ca4
JP
349 if (!chain->index)
350 block->chain0.chain = NULL;
c266f64d
VB
351
352 if (list_empty(&block->chain_list) &&
353 refcount_read(&block->refcnt) == 0)
354 return true;
355
356 return false;
357}
358
359static void tcf_block_destroy(struct tcf_block *block)
360{
361 mutex_destroy(&block->lock);
362 kfree_rcu(block, rcu);
363}
364
365static void tcf_chain_destroy(struct tcf_chain *chain, bool free_block)
366{
367 struct tcf_block *block = chain->block;
368
ed76f5ed 369 mutex_destroy(&chain->filter_chain_lock);
ee3bbfe8 370 kfree_rcu(chain, rcu);
c266f64d
VB
371 if (free_block)
372 tcf_block_destroy(block);
e2ef7544 373}
744a4cf6 374
e2ef7544
CW
375static void tcf_chain_hold(struct tcf_chain *chain)
376{
c266f64d
VB
377 ASSERT_BLOCK_LOCKED(chain->block);
378
e2ef7544 379 ++chain->refcnt;
2190d1d0
JP
380}
381
3d32f4c5 382static bool tcf_chain_held_by_acts_only(struct tcf_chain *chain)
1f3ed383 383{
c266f64d
VB
384 ASSERT_BLOCK_LOCKED(chain->block);
385
1f3ed383 386 /* In case all the references are action references, this
3d32f4c5 387 * chain should not be shown to the user.
1f3ed383
JP
388 */
389 return chain->refcnt == chain->action_refcnt;
390}
391
32a4f5ec
JP
392static struct tcf_chain *tcf_chain_lookup(struct tcf_block *block,
393 u32 chain_index)
5bc17018
JP
394{
395 struct tcf_chain *chain;
396
c266f64d
VB
397 ASSERT_BLOCK_LOCKED(block);
398
5bc17018 399 list_for_each_entry(chain, &block->chain_list, list) {
32a4f5ec 400 if (chain->index == chain_index)
e2ef7544 401 return chain;
32a4f5ec
JP
402 }
403 return NULL;
404}
405
406static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
407 u32 seq, u16 flags, int event, bool unicast);
408
53681407
JP
409static struct tcf_chain *__tcf_chain_get(struct tcf_block *block,
410 u32 chain_index, bool create,
411 bool by_act)
32a4f5ec 412{
c266f64d
VB
413 struct tcf_chain *chain = NULL;
414 bool is_first_reference;
32a4f5ec 415
c266f64d
VB
416 mutex_lock(&block->lock);
417 chain = tcf_chain_lookup(block, chain_index);
32a4f5ec
JP
418 if (chain) {
419 tcf_chain_hold(chain);
53681407
JP
420 } else {
421 if (!create)
c266f64d 422 goto errout;
53681407
JP
423 chain = tcf_chain_create(block, chain_index);
424 if (!chain)
c266f64d 425 goto errout;
5bc17018 426 }
80532384 427
53681407
JP
428 if (by_act)
429 ++chain->action_refcnt;
c266f64d
VB
430 is_first_reference = chain->refcnt - chain->action_refcnt == 1;
431 mutex_unlock(&block->lock);
53681407
JP
432
433 /* Send notification only in case we got the first
434 * non-action reference. Until then, the chain acts only as
435 * a placeholder for actions pointing to it and user ought
436 * not know about them.
437 */
c266f64d 438 if (is_first_reference && !by_act)
53681407
JP
439 tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
440 RTM_NEWCHAIN, false);
441
32a4f5ec 442 return chain;
c266f64d
VB
443
444errout:
445 mutex_unlock(&block->lock);
446 return chain;
5bc17018 447}
53681407 448
290b1c8b
JP
449static struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
450 bool create)
53681407
JP
451{
452 return __tcf_chain_get(block, chain_index, create, false);
453}
5bc17018 454
1f3ed383
JP
455struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block, u32 chain_index)
456{
53681407 457 return __tcf_chain_get(block, chain_index, true, true);
1f3ed383
JP
458}
459EXPORT_SYMBOL(tcf_chain_get_by_act);
460
a5654820
VB
461static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops,
462 void *tmplt_priv);
463static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops,
464 void *tmplt_priv, u32 chain_index,
465 struct tcf_block *block, struct sk_buff *oskb,
466 u32 seq, u16 flags, bool unicast);
9f407f17 467
91052fa1
VB
468static void __tcf_chain_put(struct tcf_chain *chain, bool by_act,
469 bool explicitly_created)
5bc17018 470{
c266f64d 471 struct tcf_block *block = chain->block;
a5654820 472 const struct tcf_proto_ops *tmplt_ops;
b62989fc 473 bool free_block = false;
c266f64d 474 unsigned int refcnt;
a5654820 475 void *tmplt_priv;
c266f64d
VB
476
477 mutex_lock(&block->lock);
91052fa1
VB
478 if (explicitly_created) {
479 if (!chain->explicitly_created) {
480 mutex_unlock(&block->lock);
481 return;
482 }
483 chain->explicitly_created = false;
484 }
485
53681407
JP
486 if (by_act)
487 chain->action_refcnt--;
c266f64d
VB
488
489 /* tc_chain_notify_delete can't be called while holding block lock.
490 * However, when block is unlocked chain can be changed concurrently, so
491 * save these to temporary variables.
492 */
493 refcnt = --chain->refcnt;
a5654820
VB
494 tmplt_ops = chain->tmplt_ops;
495 tmplt_priv = chain->tmplt_priv;
53681407
JP
496
497 /* The last dropped non-action reference will trigger notification. */
b62989fc
VB
498 if (refcnt - chain->action_refcnt == 0 && !by_act) {
499 tc_chain_notify_delete(tmplt_ops, tmplt_priv, chain->index,
a5654820 500 block, NULL, 0, 0, false);
726d0612
VB
501 /* Last reference to chain, no need to lock. */
502 chain->flushing = false;
503 }
53681407 504
b62989fc
VB
505 if (refcnt == 0)
506 free_block = tcf_chain_detach(chain);
507 mutex_unlock(&block->lock);
508
c266f64d 509 if (refcnt == 0) {
a5654820 510 tc_chain_tmplt_del(tmplt_ops, tmplt_priv);
c266f64d 511 tcf_chain_destroy(chain, free_block);
32a4f5ec 512 }
5bc17018 513}
53681407 514
290b1c8b 515static void tcf_chain_put(struct tcf_chain *chain)
53681407 516{
91052fa1 517 __tcf_chain_put(chain, false, false);
53681407 518}
5bc17018 519
1f3ed383
JP
520void tcf_chain_put_by_act(struct tcf_chain *chain)
521{
91052fa1 522 __tcf_chain_put(chain, true, false);
1f3ed383
JP
523}
524EXPORT_SYMBOL(tcf_chain_put_by_act);
525
32a4f5ec
JP
526static void tcf_chain_put_explicitly_created(struct tcf_chain *chain)
527{
91052fa1 528 __tcf_chain_put(chain, false, true);
32a4f5ec
JP
529}
530
12db03b6 531static void tcf_chain_flush(struct tcf_chain *chain, bool rtnl_held)
290b1c8b 532{
4dbfa766 533 struct tcf_proto *tp, *tp_next;
290b1c8b 534
ed76f5ed
VB
535 mutex_lock(&chain->filter_chain_lock);
536 tp = tcf_chain_dereference(chain->filter_chain, chain);
4dbfa766 537 RCU_INIT_POINTER(chain->filter_chain, NULL);
290b1c8b 538 tcf_chain0_head_change(chain, NULL);
726d0612 539 chain->flushing = true;
ed76f5ed
VB
540 mutex_unlock(&chain->filter_chain_lock);
541
290b1c8b 542 while (tp) {
4dbfa766 543 tp_next = rcu_dereference_protected(tp->next, 1);
12db03b6 544 tcf_proto_put(tp, rtnl_held, NULL);
4dbfa766 545 tp = tp_next;
290b1c8b
JP
546 }
547}
548
59094b1e
PNA
549static int tcf_block_setup(struct tcf_block *block,
550 struct flow_block_offload *bo);
551
242453c2 552static void tc_indr_block_ing_cmd(struct net_device *dev,
553 struct tcf_block *block,
4e481908 554 flow_indr_block_bind_cb_t *cb,
242453c2 555 void *cb_priv,
9c0e189e 556 enum flow_block_command command)
7f76fa36 557{
955bcb6e 558 struct flow_block_offload bo = {
7f76fa36 559 .command = command,
32f8c409 560 .binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS,
242453c2 561 .net = dev_net(dev),
562 .block_shared = tcf_block_non_null_shared(block),
7f76fa36 563 };
59094b1e 564 INIT_LIST_HEAD(&bo.cb_list);
7f76fa36 565
242453c2 566 if (!block)
7f76fa36
JH
567 return;
568
242453c2 569 bo.block = &block->flow_block;
14bfb13f 570
4f8116c8 571 down_write(&block->cb_lock);
242453c2 572 cb(dev, cb_priv, TC_SETUP_BLOCK, &bo);
573
574 tcf_block_setup(block, &bo);
4f8116c8 575 up_write(&block->cb_lock);
7f76fa36
JH
576}
577
4e481908 578static struct tcf_block *tc_dev_ingress_block(struct net_device *dev)
7f76fa36 579{
4e481908 580 const struct Qdisc_class_ops *cops;
581 struct Qdisc *qdisc;
7f76fa36 582
4e481908 583 if (!dev_ingress_queue(dev))
584 return NULL;
7f76fa36 585
4e481908 586 qdisc = dev_ingress_queue(dev)->qdisc_sleeping;
587 if (!qdisc)
588 return NULL;
7f76fa36 589
4e481908 590 cops = qdisc->ops->cl_ops;
591 if (!cops)
592 return NULL;
7f76fa36 593
4e481908 594 if (!cops->tcf_block)
595 return NULL;
7f76fa36 596
4e481908 597 return cops->tcf_block(qdisc, TC_H_MIN_INGRESS, NULL);
7f76fa36 598}
7f76fa36 599
4e481908 600static void tc_indr_block_get_and_ing_cmd(struct net_device *dev,
601 flow_indr_block_bind_cb_t *cb,
602 void *cb_priv,
603 enum flow_block_command command)
e4da9102 604{
4e481908 605 struct tcf_block *block = tc_dev_ingress_block(dev);
e4da9102 606
4e481908 607 tc_indr_block_ing_cmd(dev, block, cb, cb_priv, command);
e4da9102 608}
609
4e481908 610static void tc_indr_block_call(struct tcf_block *block,
611 struct net_device *dev,
7f76fa36 612 struct tcf_block_ext_info *ei,
9c0e189e 613 enum flow_block_command command,
7f76fa36
JH
614 struct netlink_ext_ack *extack)
615{
955bcb6e 616 struct flow_block_offload bo = {
7f76fa36
JH
617 .command = command,
618 .binder_type = ei->binder_type,
da3eeb90 619 .net = dev_net(dev),
14bfb13f 620 .block = &block->flow_block,
955bcb6e 621 .block_shared = tcf_block_shared(block),
7f76fa36
JH
622 .extack = extack,
623 };
59094b1e 624 INIT_LIST_HEAD(&bo.cb_list);
7f76fa36 625
1150ab0f 626 flow_indr_block_call(dev, &bo, command);
59094b1e 627 tcf_block_setup(block, &bo);
7f76fa36
JH
628}
629
caa72601
JP
630static bool tcf_block_offload_in_use(struct tcf_block *block)
631{
97394bef 632 return atomic_read(&block->offloadcnt);
caa72601
JP
633}
634
635static int tcf_block_offload_cmd(struct tcf_block *block,
636 struct net_device *dev,
637 struct tcf_block_ext_info *ei,
9c0e189e 638 enum flow_block_command command,
60513bd8 639 struct netlink_ext_ack *extack)
8c4083b3 640{
955bcb6e 641 struct flow_block_offload bo = {};
59094b1e 642 int err;
8c4083b3 643
da3eeb90 644 bo.net = dev_net(dev);
8c4083b3
JP
645 bo.command = command;
646 bo.binder_type = ei->binder_type;
14bfb13f 647 bo.block = &block->flow_block;
955bcb6e 648 bo.block_shared = tcf_block_shared(block);
60513bd8 649 bo.extack = extack;
59094b1e
PNA
650 INIT_LIST_HEAD(&bo.cb_list);
651
652 err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
653 if (err < 0)
654 return err;
655
656 return tcf_block_setup(block, &bo);
8c4083b3
JP
657}
658
caa72601 659static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q,
60513bd8
JH
660 struct tcf_block_ext_info *ei,
661 struct netlink_ext_ack *extack)
8c4083b3 662{
caa72601
JP
663 struct net_device *dev = q->dev_queue->dev;
664 int err;
665
4f8116c8 666 down_write(&block->cb_lock);
caa72601
JP
667 if (!dev->netdev_ops->ndo_setup_tc)
668 goto no_offload_dev_inc;
669
670 /* If tc offload feature is disabled and the block we try to bind
671 * to already has some offloaded filters, forbid to bind.
672 */
60513bd8
JH
673 if (!tc_can_offload(dev) && tcf_block_offload_in_use(block)) {
674 NL_SET_ERR_MSG(extack, "Bind to offloaded block failed as dev has offload disabled");
4f8116c8
VB
675 err = -EOPNOTSUPP;
676 goto err_unlock;
60513bd8 677 }
caa72601 678
9c0e189e 679 err = tcf_block_offload_cmd(block, dev, ei, FLOW_BLOCK_BIND, extack);
caa72601
JP
680 if (err == -EOPNOTSUPP)
681 goto no_offload_dev_inc;
7f76fa36 682 if (err)
4f8116c8 683 goto err_unlock;
7f76fa36 684
9c0e189e 685 tc_indr_block_call(block, dev, ei, FLOW_BLOCK_BIND, extack);
4f8116c8 686 up_write(&block->cb_lock);
7f76fa36 687 return 0;
caa72601
JP
688
689no_offload_dev_inc:
4f8116c8
VB
690 if (tcf_block_offload_in_use(block)) {
691 err = -EOPNOTSUPP;
692 goto err_unlock;
693 }
694 err = 0;
caa72601 695 block->nooffloaddevcnt++;
9c0e189e 696 tc_indr_block_call(block, dev, ei, FLOW_BLOCK_BIND, extack);
4f8116c8
VB
697err_unlock:
698 up_write(&block->cb_lock);
699 return err;
8c4083b3
JP
700}
701
702static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q,
703 struct tcf_block_ext_info *ei)
704{
caa72601
JP
705 struct net_device *dev = q->dev_queue->dev;
706 int err;
707
4f8116c8 708 down_write(&block->cb_lock);
9c0e189e 709 tc_indr_block_call(block, dev, ei, FLOW_BLOCK_UNBIND, NULL);
7f76fa36 710
caa72601
JP
711 if (!dev->netdev_ops->ndo_setup_tc)
712 goto no_offload_dev_dec;
9c0e189e 713 err = tcf_block_offload_cmd(block, dev, ei, FLOW_BLOCK_UNBIND, NULL);
caa72601
JP
714 if (err == -EOPNOTSUPP)
715 goto no_offload_dev_dec;
4f8116c8 716 up_write(&block->cb_lock);
caa72601
JP
717 return;
718
719no_offload_dev_dec:
720 WARN_ON(block->nooffloaddevcnt-- == 0);
4f8116c8 721 up_write(&block->cb_lock);
8c4083b3
JP
722}
723
a9b19443 724static int
f71e0ca4
JP
725tcf_chain0_head_change_cb_add(struct tcf_block *block,
726 struct tcf_block_ext_info *ei,
727 struct netlink_ext_ack *extack)
a9b19443
JP
728{
729 struct tcf_filter_chain_list_item *item;
165f0135 730 struct tcf_chain *chain0;
a9b19443
JP
731
732 item = kmalloc(sizeof(*item), GFP_KERNEL);
733 if (!item) {
734 NL_SET_ERR_MSG(extack, "Memory allocation for head change callback item failed");
735 return -ENOMEM;
736 }
737 item->chain_head_change = ei->chain_head_change;
738 item->chain_head_change_priv = ei->chain_head_change_priv;
165f0135
VB
739
740 mutex_lock(&block->lock);
741 chain0 = block->chain0.chain;
ed76f5ed
VB
742 if (chain0)
743 tcf_chain_hold(chain0);
744 else
745 list_add(&item->list, &block->chain0.filter_chain_list);
165f0135
VB
746 mutex_unlock(&block->lock);
747
ed76f5ed
VB
748 if (chain0) {
749 struct tcf_proto *tp_head;
750
751 mutex_lock(&chain0->filter_chain_lock);
752
753 tp_head = tcf_chain_dereference(chain0->filter_chain, chain0);
754 if (tp_head)
755 tcf_chain_head_change_item(item, tp_head);
756
757 mutex_lock(&block->lock);
758 list_add(&item->list, &block->chain0.filter_chain_list);
759 mutex_unlock(&block->lock);
760
761 mutex_unlock(&chain0->filter_chain_lock);
762 tcf_chain_put(chain0);
763 }
764
a9b19443
JP
765 return 0;
766}
767
768static void
f71e0ca4
JP
769tcf_chain0_head_change_cb_del(struct tcf_block *block,
770 struct tcf_block_ext_info *ei)
a9b19443
JP
771{
772 struct tcf_filter_chain_list_item *item;
773
165f0135 774 mutex_lock(&block->lock);
f71e0ca4 775 list_for_each_entry(item, &block->chain0.filter_chain_list, list) {
a9b19443
JP
776 if ((!ei->chain_head_change && !ei->chain_head_change_priv) ||
777 (item->chain_head_change == ei->chain_head_change &&
778 item->chain_head_change_priv == ei->chain_head_change_priv)) {
165f0135 779 if (block->chain0.chain)
f71e0ca4 780 tcf_chain_head_change_item(item, NULL);
a9b19443 781 list_del(&item->list);
165f0135
VB
782 mutex_unlock(&block->lock);
783
a9b19443
JP
784 kfree(item);
785 return;
786 }
787 }
165f0135 788 mutex_unlock(&block->lock);
a9b19443
JP
789 WARN_ON(1);
790}
791
48617387 792struct tcf_net {
ab281629 793 spinlock_t idr_lock; /* Protects idr */
48617387
JP
794 struct idr idr;
795};
796
797static unsigned int tcf_net_id;
798
799static int tcf_block_insert(struct tcf_block *block, struct net *net,
bb047ddd 800 struct netlink_ext_ack *extack)
a9b19443 801{
48617387 802 struct tcf_net *tn = net_generic(net, tcf_net_id);
ab281629
VB
803 int err;
804
805 idr_preload(GFP_KERNEL);
806 spin_lock(&tn->idr_lock);
807 err = idr_alloc_u32(&tn->idr, block, &block->index, block->index,
808 GFP_NOWAIT);
809 spin_unlock(&tn->idr_lock);
810 idr_preload_end();
48617387 811
ab281629 812 return err;
a9b19443
JP
813}
814
48617387
JP
815static void tcf_block_remove(struct tcf_block *block, struct net *net)
816{
817 struct tcf_net *tn = net_generic(net, tcf_net_id);
818
ab281629 819 spin_lock(&tn->idr_lock);
9c160941 820 idr_remove(&tn->idr, block->index);
ab281629 821 spin_unlock(&tn->idr_lock);
48617387
JP
822}
823
824static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q,
bb047ddd 825 u32 block_index,
48617387 826 struct netlink_ext_ack *extack)
6529eaba 827{
48617387 828 struct tcf_block *block;
6529eaba 829
48617387 830 block = kzalloc(sizeof(*block), GFP_KERNEL);
8d1a77f9
AA
831 if (!block) {
832 NL_SET_ERR_MSG(extack, "Memory allocation for block failed");
48617387 833 return ERR_PTR(-ENOMEM);
8d1a77f9 834 }
c266f64d 835 mutex_init(&block->lock);
4f8116c8 836 init_rwsem(&block->cb_lock);
14bfb13f 837 flow_block_init(&block->flow_block);
5bc17018 838 INIT_LIST_HEAD(&block->chain_list);
f36fe1c4 839 INIT_LIST_HEAD(&block->owner_list);
f71e0ca4 840 INIT_LIST_HEAD(&block->chain0.filter_chain_list);
acb67442 841
cfebd7e2 842 refcount_set(&block->refcnt, 1);
48617387 843 block->net = net;
bb047ddd
JP
844 block->index = block_index;
845
846 /* Don't store q pointer for blocks which are shared */
847 if (!tcf_block_shared(block))
848 block->q = q;
48617387 849 return block;
48617387
JP
850}
851
852static struct tcf_block *tcf_block_lookup(struct net *net, u32 block_index)
853{
854 struct tcf_net *tn = net_generic(net, tcf_net_id);
855
322d884b 856 return idr_find(&tn->idr, block_index);
48617387
JP
857}
858
0607e439
VB
859static struct tcf_block *tcf_block_refcnt_get(struct net *net, u32 block_index)
860{
861 struct tcf_block *block;
862
863 rcu_read_lock();
864 block = tcf_block_lookup(net, block_index);
865 if (block && !refcount_inc_not_zero(&block->refcnt))
866 block = NULL;
867 rcu_read_unlock();
868
869 return block;
870}
871
bbf73830
VB
872static struct tcf_chain *
873__tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain)
f0023436 874{
bbf73830
VB
875 mutex_lock(&block->lock);
876 if (chain)
877 chain = list_is_last(&chain->list, &block->chain_list) ?
878 NULL : list_next_entry(chain, list);
879 else
880 chain = list_first_entry_or_null(&block->chain_list,
881 struct tcf_chain, list);
f0023436 882
bbf73830
VB
883 /* skip all action-only chains */
884 while (chain && tcf_chain_held_by_acts_only(chain))
885 chain = list_is_last(&chain->list, &block->chain_list) ?
886 NULL : list_next_entry(chain, list);
887
888 if (chain)
f0023436 889 tcf_chain_hold(chain);
bbf73830 890 mutex_unlock(&block->lock);
f0023436 891
bbf73830 892 return chain;
f0023436
VB
893}
894
bbf73830
VB
895/* Function to be used by all clients that want to iterate over all chains on
896 * block. It properly obtains block->lock and takes reference to chain before
897 * returning it. Users of this function must be tolerant to concurrent chain
898 * insertion/deletion or ensure that no concurrent chain modification is
899 * possible. Note that all netlink dump callbacks cannot guarantee to provide
900 * consistent dump because rtnl lock is released each time skb is filled with
901 * data and sent to user-space.
902 */
903
904struct tcf_chain *
905tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain)
f0023436 906{
bbf73830 907 struct tcf_chain *chain_next = __tcf_get_next_chain(block, chain);
f0023436 908
bbf73830 909 if (chain)
f0023436 910 tcf_chain_put(chain);
bbf73830
VB
911
912 return chain_next;
913}
914EXPORT_SYMBOL(tcf_get_next_chain);
915
fe2923af
VB
916static struct tcf_proto *
917__tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp)
918{
8b64678e
VB
919 u32 prio = 0;
920
fe2923af
VB
921 ASSERT_RTNL();
922 mutex_lock(&chain->filter_chain_lock);
923
8b64678e 924 if (!tp) {
fe2923af 925 tp = tcf_chain_dereference(chain->filter_chain, chain);
8b64678e
VB
926 } else if (tcf_proto_is_deleting(tp)) {
927 /* 'deleting' flag is set and chain->filter_chain_lock was
928 * unlocked, which means next pointer could be invalid. Restart
929 * search.
930 */
931 prio = tp->prio + 1;
932 tp = tcf_chain_dereference(chain->filter_chain, chain);
933
934 for (; tp; tp = tcf_chain_dereference(tp->next, chain))
935 if (!tp->deleting && tp->prio >= prio)
936 break;
937 } else {
fe2923af 938 tp = tcf_chain_dereference(tp->next, chain);
8b64678e 939 }
fe2923af
VB
940
941 if (tp)
942 tcf_proto_get(tp);
943
944 mutex_unlock(&chain->filter_chain_lock);
945
946 return tp;
947}
948
949/* Function to be used by all clients that want to iterate over all tp's on
950 * chain. Users of this function must be tolerant to concurrent tp
951 * insertion/deletion or ensure that no concurrent chain modification is
952 * possible. Note that all netlink dump callbacks cannot guarantee to provide
953 * consistent dump because rtnl lock is released each time skb is filled with
954 * data and sent to user-space.
955 */
956
957struct tcf_proto *
12db03b6
VB
958tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp,
959 bool rtnl_held)
fe2923af
VB
960{
961 struct tcf_proto *tp_next = __tcf_get_next_proto(chain, tp);
962
963 if (tp)
12db03b6 964 tcf_proto_put(tp, rtnl_held, NULL);
fe2923af
VB
965
966 return tp_next;
967}
968EXPORT_SYMBOL(tcf_get_next_proto);
969
12db03b6 970static void tcf_block_flush_all_chains(struct tcf_block *block, bool rtnl_held)
bbf73830
VB
971{
972 struct tcf_chain *chain;
973
974 /* Last reference to block. At this point chains cannot be added or
975 * removed concurrently.
976 */
977 for (chain = tcf_get_next_chain(block, NULL);
978 chain;
979 chain = tcf_get_next_chain(block, chain)) {
980 tcf_chain_put_explicitly_created(chain);
12db03b6 981 tcf_chain_flush(chain, rtnl_held);
f0023436
VB
982 }
983}
984
18d3eefb
VB
985/* Lookup Qdisc and increments its reference counter.
986 * Set parent, if necessary.
987 */
988
989static int __tcf_qdisc_find(struct net *net, struct Qdisc **q,
990 u32 *parent, int ifindex, bool rtnl_held,
991 struct netlink_ext_ack *extack)
992{
993 const struct Qdisc_class_ops *cops;
994 struct net_device *dev;
995 int err = 0;
996
997 if (ifindex == TCM_IFINDEX_MAGIC_BLOCK)
998 return 0;
999
1000 rcu_read_lock();
1001
1002 /* Find link */
1003 dev = dev_get_by_index_rcu(net, ifindex);
1004 if (!dev) {
1005 rcu_read_unlock();
1006 return -ENODEV;
1007 }
1008
1009 /* Find qdisc */
1010 if (!*parent) {
1011 *q = dev->qdisc;
1012 *parent = (*q)->handle;
1013 } else {
1014 *q = qdisc_lookup_rcu(dev, TC_H_MAJ(*parent));
1015 if (!*q) {
1016 NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
1017 err = -EINVAL;
1018 goto errout_rcu;
1019 }
1020 }
1021
1022 *q = qdisc_refcount_inc_nz(*q);
1023 if (!*q) {
1024 NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
1025 err = -EINVAL;
1026 goto errout_rcu;
1027 }
1028
1029 /* Is it classful? */
1030 cops = (*q)->ops->cl_ops;
1031 if (!cops) {
1032 NL_SET_ERR_MSG(extack, "Qdisc not classful");
1033 err = -EINVAL;
1034 goto errout_qdisc;
1035 }
1036
1037 if (!cops->tcf_block) {
1038 NL_SET_ERR_MSG(extack, "Class doesn't support blocks");
1039 err = -EOPNOTSUPP;
1040 goto errout_qdisc;
1041 }
1042
1043errout_rcu:
1044 /* At this point we know that qdisc is not noop_qdisc,
1045 * which means that qdisc holds a reference to net_device
1046 * and we hold a reference to qdisc, so it is safe to release
1047 * rcu read lock.
1048 */
1049 rcu_read_unlock();
1050 return err;
1051
1052errout_qdisc:
1053 rcu_read_unlock();
1054
1055 if (rtnl_held)
1056 qdisc_put(*q);
1057 else
1058 qdisc_put_unlocked(*q);
1059 *q = NULL;
1060
1061 return err;
1062}
1063
1064static int __tcf_qdisc_cl_find(struct Qdisc *q, u32 parent, unsigned long *cl,
1065 int ifindex, struct netlink_ext_ack *extack)
1066{
1067 if (ifindex == TCM_IFINDEX_MAGIC_BLOCK)
1068 return 0;
1069
1070 /* Do we search for filter, attached to class? */
1071 if (TC_H_MIN(parent)) {
1072 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1073
1074 *cl = cops->find(q, parent);
1075 if (*cl == 0) {
1076 NL_SET_ERR_MSG(extack, "Specified class doesn't exist");
1077 return -ENOENT;
1078 }
1079 }
1080
1081 return 0;
1082}
1083
1084static struct tcf_block *__tcf_block_find(struct net *net, struct Qdisc *q,
1085 unsigned long cl, int ifindex,
1086 u32 block_index,
1087 struct netlink_ext_ack *extack)
1088{
1089 struct tcf_block *block;
1090
1091 if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
1092 block = tcf_block_refcnt_get(net, block_index);
1093 if (!block) {
1094 NL_SET_ERR_MSG(extack, "Block of given index was not found");
1095 return ERR_PTR(-EINVAL);
1096 }
1097 } else {
1098 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1099
1100 block = cops->tcf_block(q, cl, extack);
1101 if (!block)
1102 return ERR_PTR(-EINVAL);
1103
1104 if (tcf_block_shared(block)) {
1105 NL_SET_ERR_MSG(extack, "This filter block is shared. Please use the block index to manipulate the filters");
1106 return ERR_PTR(-EOPNOTSUPP);
1107 }
1108
1109 /* Always take reference to block in order to support execution
1110 * of rules update path of cls API without rtnl lock. Caller
1111 * must release block when it is finished using it. 'if' block
1112 * of this conditional obtain reference to block by calling
1113 * tcf_block_refcnt_get().
1114 */
1115 refcount_inc(&block->refcnt);
1116 }
1117
1118 return block;
1119}
1120
0607e439 1121static void __tcf_block_put(struct tcf_block *block, struct Qdisc *q,
12db03b6 1122 struct tcf_block_ext_info *ei, bool rtnl_held)
0607e439 1123{
c266f64d 1124 if (refcount_dec_and_mutex_lock(&block->refcnt, &block->lock)) {
0607e439
VB
1125 /* Flushing/putting all chains will cause the block to be
1126 * deallocated when last chain is freed. However, if chain_list
1127 * is empty, block has to be manually deallocated. After block
1128 * reference counter reached 0, it is no longer possible to
1129 * increment it or add new chains to block.
1130 */
1131 bool free_block = list_empty(&block->chain_list);
1132
c266f64d 1133 mutex_unlock(&block->lock);
0607e439
VB
1134 if (tcf_block_shared(block))
1135 tcf_block_remove(block, block->net);
0607e439
VB
1136
1137 if (q)
1138 tcf_block_offload_unbind(block, q, ei);
1139
1140 if (free_block)
c266f64d 1141 tcf_block_destroy(block);
0607e439 1142 else
12db03b6 1143 tcf_block_flush_all_chains(block, rtnl_held);
0607e439
VB
1144 } else if (q) {
1145 tcf_block_offload_unbind(block, q, ei);
1146 }
1147}
1148
12db03b6 1149static void tcf_block_refcnt_put(struct tcf_block *block, bool rtnl_held)
0607e439 1150{
12db03b6 1151 __tcf_block_put(block, NULL, NULL, rtnl_held);
0607e439
VB
1152}
1153
c431f89b
VB
1154/* Find tcf block.
1155 * Set q, parent, cl when appropriate.
1156 */
1157
1158static struct tcf_block *tcf_block_find(struct net *net, struct Qdisc **q,
1159 u32 *parent, unsigned long *cl,
1160 int ifindex, u32 block_index,
1161 struct netlink_ext_ack *extack)
1162{
1163 struct tcf_block *block;
e368fdb6 1164 int err = 0;
c431f89b 1165
18d3eefb 1166 ASSERT_RTNL();
e368fdb6 1167
18d3eefb
VB
1168 err = __tcf_qdisc_find(net, q, parent, ifindex, true, extack);
1169 if (err)
1170 goto errout;
c431f89b 1171
18d3eefb
VB
1172 err = __tcf_qdisc_cl_find(*q, *parent, cl, ifindex, extack);
1173 if (err)
1174 goto errout_qdisc;
787ce6d0 1175
18d3eefb 1176 block = __tcf_block_find(net, *q, *cl, ifindex, block_index, extack);
af736bf0
DC
1177 if (IS_ERR(block)) {
1178 err = PTR_ERR(block);
18d3eefb 1179 goto errout_qdisc;
af736bf0 1180 }
c431f89b
VB
1181
1182 return block;
e368fdb6 1183
e368fdb6 1184errout_qdisc:
18d3eefb 1185 if (*q)
e368fdb6 1186 qdisc_put(*q);
18d3eefb
VB
1187errout:
1188 *q = NULL;
e368fdb6
VB
1189 return ERR_PTR(err);
1190}
1191
12db03b6
VB
1192static void tcf_block_release(struct Qdisc *q, struct tcf_block *block,
1193 bool rtnl_held)
e368fdb6 1194{
787ce6d0 1195 if (!IS_ERR_OR_NULL(block))
12db03b6 1196 tcf_block_refcnt_put(block, rtnl_held);
787ce6d0 1197
470502de
VB
1198 if (q) {
1199 if (rtnl_held)
1200 qdisc_put(q);
1201 else
1202 qdisc_put_unlocked(q);
1203 }
c431f89b
VB
1204}
1205
f36fe1c4
JP
1206struct tcf_block_owner_item {
1207 struct list_head list;
1208 struct Qdisc *q;
32f8c409 1209 enum flow_block_binder_type binder_type;
f36fe1c4
JP
1210};
1211
1212static void
1213tcf_block_owner_netif_keep_dst(struct tcf_block *block,
1214 struct Qdisc *q,
32f8c409 1215 enum flow_block_binder_type binder_type)
f36fe1c4
JP
1216{
1217 if (block->keep_dst &&
32f8c409
PNA
1218 binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
1219 binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
f36fe1c4
JP
1220 netif_keep_dst(qdisc_dev(q));
1221}
1222
1223void tcf_block_netif_keep_dst(struct tcf_block *block)
1224{
1225 struct tcf_block_owner_item *item;
1226
1227 block->keep_dst = true;
1228 list_for_each_entry(item, &block->owner_list, list)
1229 tcf_block_owner_netif_keep_dst(block, item->q,
1230 item->binder_type);
1231}
1232EXPORT_SYMBOL(tcf_block_netif_keep_dst);
1233
1234static int tcf_block_owner_add(struct tcf_block *block,
1235 struct Qdisc *q,
32f8c409 1236 enum flow_block_binder_type binder_type)
f36fe1c4
JP
1237{
1238 struct tcf_block_owner_item *item;
1239
1240 item = kmalloc(sizeof(*item), GFP_KERNEL);
1241 if (!item)
1242 return -ENOMEM;
1243 item->q = q;
1244 item->binder_type = binder_type;
1245 list_add(&item->list, &block->owner_list);
1246 return 0;
1247}
1248
1249static void tcf_block_owner_del(struct tcf_block *block,
1250 struct Qdisc *q,
32f8c409 1251 enum flow_block_binder_type binder_type)
f36fe1c4
JP
1252{
1253 struct tcf_block_owner_item *item;
1254
1255 list_for_each_entry(item, &block->owner_list, list) {
1256 if (item->q == q && item->binder_type == binder_type) {
1257 list_del(&item->list);
1258 kfree(item);
1259 return;
1260 }
1261 }
1262 WARN_ON(1);
1263}
1264
48617387
JP
1265int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
1266 struct tcf_block_ext_info *ei,
1267 struct netlink_ext_ack *extack)
1268{
1269 struct net *net = qdisc_net(q);
1270 struct tcf_block *block = NULL;
48617387
JP
1271 int err;
1272
787ce6d0 1273 if (ei->block_index)
48617387 1274 /* block_index not 0 means the shared block is requested */
787ce6d0 1275 block = tcf_block_refcnt_get(net, ei->block_index);
48617387
JP
1276
1277 if (!block) {
bb047ddd 1278 block = tcf_block_create(net, q, ei->block_index, extack);
48617387
JP
1279 if (IS_ERR(block))
1280 return PTR_ERR(block);
bb047ddd
JP
1281 if (tcf_block_shared(block)) {
1282 err = tcf_block_insert(block, net, extack);
48617387
JP
1283 if (err)
1284 goto err_block_insert;
1285 }
1286 }
1287
f36fe1c4
JP
1288 err = tcf_block_owner_add(block, q, ei->binder_type);
1289 if (err)
1290 goto err_block_owner_add;
1291
1292 tcf_block_owner_netif_keep_dst(block, q, ei->binder_type);
1293
f71e0ca4 1294 err = tcf_chain0_head_change_cb_add(block, ei, extack);
a9b19443 1295 if (err)
f71e0ca4 1296 goto err_chain0_head_change_cb_add;
caa72601 1297
60513bd8 1298 err = tcf_block_offload_bind(block, q, ei, extack);
caa72601
JP
1299 if (err)
1300 goto err_block_offload_bind;
1301
6529eaba
JP
1302 *p_block = block;
1303 return 0;
2190d1d0 1304
caa72601 1305err_block_offload_bind:
f71e0ca4
JP
1306 tcf_chain0_head_change_cb_del(block, ei);
1307err_chain0_head_change_cb_add:
f36fe1c4
JP
1308 tcf_block_owner_del(block, q, ei->binder_type);
1309err_block_owner_add:
48617387 1310err_block_insert:
12db03b6 1311 tcf_block_refcnt_put(block, true);
2190d1d0 1312 return err;
6529eaba 1313}
8c4083b3
JP
1314EXPORT_SYMBOL(tcf_block_get_ext);
1315
c7eb7d72
JP
1316static void tcf_chain_head_change_dflt(struct tcf_proto *tp_head, void *priv)
1317{
1318 struct tcf_proto __rcu **p_filter_chain = priv;
1319
1320 rcu_assign_pointer(*p_filter_chain, tp_head);
1321}
1322
8c4083b3 1323int tcf_block_get(struct tcf_block **p_block,
8d1a77f9
AA
1324 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
1325 struct netlink_ext_ack *extack)
8c4083b3 1326{
c7eb7d72
JP
1327 struct tcf_block_ext_info ei = {
1328 .chain_head_change = tcf_chain_head_change_dflt,
1329 .chain_head_change_priv = p_filter_chain,
1330 };
8c4083b3 1331
c7eb7d72 1332 WARN_ON(!p_filter_chain);
8d1a77f9 1333 return tcf_block_get_ext(p_block, q, &ei, extack);
8c4083b3 1334}
6529eaba
JP
1335EXPORT_SYMBOL(tcf_block_get);
1336
7aa0045d 1337/* XXX: Standalone actions are not allowed to jump to any chain, and bound
a60b3f51 1338 * actions should be all removed after flushing.
7aa0045d 1339 */
c7eb7d72 1340void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
e1ea2f98 1341 struct tcf_block_ext_info *ei)
7aa0045d 1342{
c30abd5e
DM
1343 if (!block)
1344 return;
f71e0ca4 1345 tcf_chain0_head_change_cb_del(block, ei);
f36fe1c4 1346 tcf_block_owner_del(block, q, ei->binder_type);
a60b3f51 1347
12db03b6 1348 __tcf_block_put(block, q, ei, true);
6529eaba 1349}
8c4083b3
JP
1350EXPORT_SYMBOL(tcf_block_put_ext);
1351
1352void tcf_block_put(struct tcf_block *block)
1353{
1354 struct tcf_block_ext_info ei = {0, };
1355
4853f128
JP
1356 if (!block)
1357 return;
c7eb7d72 1358 tcf_block_put_ext(block, block->q, &ei);
8c4083b3 1359}
e1ea2f98 1360
6529eaba 1361EXPORT_SYMBOL(tcf_block_put);
cf1facda 1362
32636742 1363static int
a7323311 1364tcf_block_playback_offloads(struct tcf_block *block, flow_setup_cb_t *cb,
32636742
JH
1365 void *cb_priv, bool add, bool offload_in_use,
1366 struct netlink_ext_ack *extack)
1367{
bbf73830 1368 struct tcf_chain *chain, *chain_prev;
fe2923af 1369 struct tcf_proto *tp, *tp_prev;
32636742
JH
1370 int err;
1371
4f8116c8
VB
1372 lockdep_assert_held(&block->cb_lock);
1373
bbf73830
VB
1374 for (chain = __tcf_get_next_chain(block, NULL);
1375 chain;
1376 chain_prev = chain,
1377 chain = __tcf_get_next_chain(block, chain),
1378 tcf_chain_put(chain_prev)) {
fe2923af
VB
1379 for (tp = __tcf_get_next_proto(chain, NULL); tp;
1380 tp_prev = tp,
1381 tp = __tcf_get_next_proto(chain, tp),
12db03b6 1382 tcf_proto_put(tp_prev, true, NULL)) {
32636742
JH
1383 if (tp->ops->reoffload) {
1384 err = tp->ops->reoffload(tp, add, cb, cb_priv,
1385 extack);
1386 if (err && add)
1387 goto err_playback_remove;
1388 } else if (add && offload_in_use) {
1389 err = -EOPNOTSUPP;
1390 NL_SET_ERR_MSG(extack, "Filter HW offload failed - classifier without re-offloading support");
1391 goto err_playback_remove;
1392 }
1393 }
1394 }
1395
1396 return 0;
1397
1398err_playback_remove:
12db03b6 1399 tcf_proto_put(tp, true, NULL);
bbf73830 1400 tcf_chain_put(chain);
32636742
JH
1401 tcf_block_playback_offloads(block, cb, cb_priv, false, offload_in_use,
1402 extack);
1403 return err;
1404}
1405
59094b1e
PNA
1406static int tcf_block_bind(struct tcf_block *block,
1407 struct flow_block_offload *bo)
1408{
1409 struct flow_block_cb *block_cb, *next;
1410 int err, i = 0;
1411
4f8116c8
VB
1412 lockdep_assert_held(&block->cb_lock);
1413
59094b1e
PNA
1414 list_for_each_entry(block_cb, &bo->cb_list, list) {
1415 err = tcf_block_playback_offloads(block, block_cb->cb,
1416 block_cb->cb_priv, true,
1417 tcf_block_offload_in_use(block),
1418 bo->extack);
1419 if (err)
1420 goto err_unroll;
c9f14470
VB
1421 if (!bo->unlocked_driver_cb)
1422 block->lockeddevcnt++;
59094b1e
PNA
1423
1424 i++;
1425 }
14bfb13f 1426 list_splice(&bo->cb_list, &block->flow_block.cb_list);
59094b1e
PNA
1427
1428 return 0;
1429
1430err_unroll:
1431 list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
1432 if (i-- > 0) {
1433 list_del(&block_cb->list);
1434 tcf_block_playback_offloads(block, block_cb->cb,
1435 block_cb->cb_priv, false,
1436 tcf_block_offload_in_use(block),
1437 NULL);
c9f14470
VB
1438 if (!bo->unlocked_driver_cb)
1439 block->lockeddevcnt--;
59094b1e
PNA
1440 }
1441 flow_block_cb_free(block_cb);
1442 }
1443
1444 return err;
1445}
1446
1447static void tcf_block_unbind(struct tcf_block *block,
1448 struct flow_block_offload *bo)
1449{
1450 struct flow_block_cb *block_cb, *next;
1451
4f8116c8
VB
1452 lockdep_assert_held(&block->cb_lock);
1453
59094b1e
PNA
1454 list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
1455 tcf_block_playback_offloads(block, block_cb->cb,
1456 block_cb->cb_priv, false,
1457 tcf_block_offload_in_use(block),
1458 NULL);
1459 list_del(&block_cb->list);
1460 flow_block_cb_free(block_cb);
c9f14470
VB
1461 if (!bo->unlocked_driver_cb)
1462 block->lockeddevcnt--;
59094b1e
PNA
1463 }
1464}
1465
1466static int tcf_block_setup(struct tcf_block *block,
1467 struct flow_block_offload *bo)
1468{
1469 int err;
1470
1471 switch (bo->command) {
1472 case FLOW_BLOCK_BIND:
1473 err = tcf_block_bind(block, bo);
1474 break;
1475 case FLOW_BLOCK_UNBIND:
1476 err = 0;
1477 tcf_block_unbind(block, bo);
1478 break;
1479 default:
1480 WARN_ON_ONCE(1);
1481 err = -EOPNOTSUPP;
1482 }
1483
1484 return err;
1485}
1486
87d83093
JP
1487/* Main classifier routine: scans classifier chain attached
1488 * to this qdisc, (optionally) tests for protocol and asks
1489 * specific classifiers.
1490 */
1491int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
1492 struct tcf_result *res, bool compat_mode)
1493{
87d83093
JP
1494#ifdef CONFIG_NET_CLS_ACT
1495 const int max_reclassify_loop = 4;
ee538dce
JP
1496 const struct tcf_proto *orig_tp = tp;
1497 const struct tcf_proto *first_tp;
87d83093
JP
1498 int limit = 0;
1499
1500reclassify:
1501#endif
1502 for (; tp; tp = rcu_dereference_bh(tp->next)) {
cd0c4e70 1503 __be16 protocol = tc_skb_protocol(skb);
87d83093
JP
1504 int err;
1505
1506 if (tp->protocol != protocol &&
1507 tp->protocol != htons(ETH_P_ALL))
1508 continue;
1509
1510 err = tp->classify(skb, tp, res);
1511#ifdef CONFIG_NET_CLS_ACT
db50514f 1512 if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode)) {
ee538dce 1513 first_tp = orig_tp;
87d83093 1514 goto reset;
db50514f 1515 } else if (unlikely(TC_ACT_EXT_CMP(err, TC_ACT_GOTO_CHAIN))) {
ee538dce 1516 first_tp = res->goto_tp;
95a7233c
PB
1517
1518#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
1519 {
1520 struct tc_skb_ext *ext;
1521
1522 ext = skb_ext_add(skb, TC_SKB_EXT);
1523 if (WARN_ON_ONCE(!ext))
1524 return TC_ACT_SHOT;
1525
1526 ext->chain = err & TC_ACT_EXT_VAL_MASK;
1527 }
1528#endif
db50514f
JP
1529 goto reset;
1530 }
87d83093
JP
1531#endif
1532 if (err >= 0)
1533 return err;
1534 }
1535
1536 return TC_ACT_UNSPEC; /* signal: continue lookup */
1537#ifdef CONFIG_NET_CLS_ACT
1538reset:
1539 if (unlikely(limit++ >= max_reclassify_loop)) {
9d3aaff3
JP
1540 net_notice_ratelimited("%u: reclassify loop, rule prio %u, protocol %02x\n",
1541 tp->chain->block->index,
1542 tp->prio & 0xffff,
87d83093
JP
1543 ntohs(tp->protocol));
1544 return TC_ACT_SHOT;
1545 }
1546
ee538dce 1547 tp = first_tp;
87d83093
JP
1548 goto reclassify;
1549#endif
1550}
1551EXPORT_SYMBOL(tcf_classify);
1552
2190d1d0
JP
1553struct tcf_chain_info {
1554 struct tcf_proto __rcu **pprev;
1555 struct tcf_proto __rcu *next;
1556};
1557
ed76f5ed
VB
1558static struct tcf_proto *tcf_chain_tp_prev(struct tcf_chain *chain,
1559 struct tcf_chain_info *chain_info)
2190d1d0 1560{
ed76f5ed 1561 return tcf_chain_dereference(*chain_info->pprev, chain);
2190d1d0
JP
1562}
1563
726d0612
VB
1564static int tcf_chain_tp_insert(struct tcf_chain *chain,
1565 struct tcf_chain_info *chain_info,
1566 struct tcf_proto *tp)
2190d1d0 1567{
726d0612
VB
1568 if (chain->flushing)
1569 return -EAGAIN;
1570
c7eb7d72 1571 if (*chain_info->pprev == chain->filter_chain)
f71e0ca4 1572 tcf_chain0_head_change(chain, tp);
4dbfa766 1573 tcf_proto_get(tp);
ed76f5ed 1574 RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain, chain_info));
2190d1d0 1575 rcu_assign_pointer(*chain_info->pprev, tp);
726d0612
VB
1576
1577 return 0;
2190d1d0
JP
1578}
1579
1580static void tcf_chain_tp_remove(struct tcf_chain *chain,
1581 struct tcf_chain_info *chain_info,
1582 struct tcf_proto *tp)
1583{
ed76f5ed 1584 struct tcf_proto *next = tcf_chain_dereference(chain_info->next, chain);
2190d1d0 1585
8b64678e 1586 tcf_proto_mark_delete(tp);
c7eb7d72 1587 if (tp == chain->filter_chain)
f71e0ca4 1588 tcf_chain0_head_change(chain, next);
2190d1d0
JP
1589 RCU_INIT_POINTER(*chain_info->pprev, next);
1590}
1591
8b64678e
VB
1592static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
1593 struct tcf_chain_info *chain_info,
1594 u32 protocol, u32 prio,
1595 bool prio_allocate);
1596
1597/* Try to insert new proto.
1598 * If proto with specified priority already exists, free new proto
1599 * and return existing one.
1600 */
1601
1602static struct tcf_proto *tcf_chain_tp_insert_unique(struct tcf_chain *chain,
1603 struct tcf_proto *tp_new,
12db03b6
VB
1604 u32 protocol, u32 prio,
1605 bool rtnl_held)
8b64678e
VB
1606{
1607 struct tcf_chain_info chain_info;
1608 struct tcf_proto *tp;
726d0612 1609 int err = 0;
8b64678e
VB
1610
1611 mutex_lock(&chain->filter_chain_lock);
1612
1613 tp = tcf_chain_tp_find(chain, &chain_info,
1614 protocol, prio, false);
1615 if (!tp)
726d0612 1616 err = tcf_chain_tp_insert(chain, &chain_info, tp_new);
8b64678e
VB
1617 mutex_unlock(&chain->filter_chain_lock);
1618
1619 if (tp) {
12db03b6 1620 tcf_proto_destroy(tp_new, rtnl_held, NULL);
8b64678e 1621 tp_new = tp;
726d0612 1622 } else if (err) {
12db03b6 1623 tcf_proto_destroy(tp_new, rtnl_held, NULL);
726d0612 1624 tp_new = ERR_PTR(err);
8b64678e
VB
1625 }
1626
1627 return tp_new;
1628}
1629
1630static void tcf_chain_tp_delete_empty(struct tcf_chain *chain,
12db03b6 1631 struct tcf_proto *tp, bool rtnl_held,
8b64678e
VB
1632 struct netlink_ext_ack *extack)
1633{
1634 struct tcf_chain_info chain_info;
1635 struct tcf_proto *tp_iter;
1636 struct tcf_proto **pprev;
1637 struct tcf_proto *next;
1638
1639 mutex_lock(&chain->filter_chain_lock);
1640
1641 /* Atomically find and remove tp from chain. */
1642 for (pprev = &chain->filter_chain;
1643 (tp_iter = tcf_chain_dereference(*pprev, chain));
1644 pprev = &tp_iter->next) {
1645 if (tp_iter == tp) {
1646 chain_info.pprev = pprev;
1647 chain_info.next = tp_iter->next;
1648 WARN_ON(tp_iter->deleting);
1649 break;
1650 }
1651 }
1652 /* Verify that tp still exists and no new filters were inserted
1653 * concurrently.
1654 * Mark tp for deletion if it is empty.
1655 */
12db03b6 1656 if (!tp_iter || !tcf_proto_check_delete(tp, rtnl_held)) {
8b64678e
VB
1657 mutex_unlock(&chain->filter_chain_lock);
1658 return;
1659 }
1660
1661 next = tcf_chain_dereference(chain_info.next, chain);
1662 if (tp == chain->filter_chain)
1663 tcf_chain0_head_change(chain, next);
1664 RCU_INIT_POINTER(*chain_info.pprev, next);
1665 mutex_unlock(&chain->filter_chain_lock);
1666
12db03b6 1667 tcf_proto_put(tp, rtnl_held, extack);
8b64678e
VB
1668}
1669
2190d1d0
JP
1670static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
1671 struct tcf_chain_info *chain_info,
1672 u32 protocol, u32 prio,
1673 bool prio_allocate)
1674{
1675 struct tcf_proto **pprev;
1676 struct tcf_proto *tp;
1677
1678 /* Check the chain for existence of proto-tcf with this priority */
1679 for (pprev = &chain->filter_chain;
ed76f5ed
VB
1680 (tp = tcf_chain_dereference(*pprev, chain));
1681 pprev = &tp->next) {
2190d1d0
JP
1682 if (tp->prio >= prio) {
1683 if (tp->prio == prio) {
1684 if (prio_allocate ||
1685 (tp->protocol != protocol && protocol))
1686 return ERR_PTR(-EINVAL);
1687 } else {
1688 tp = NULL;
1689 }
1690 break;
1691 }
1692 }
1693 chain_info->pprev = pprev;
4dbfa766
VB
1694 if (tp) {
1695 chain_info->next = tp->next;
1696 tcf_proto_get(tp);
1697 } else {
1698 chain_info->next = NULL;
1699 }
2190d1d0
JP
1700 return tp;
1701}
1702
7120371c 1703static int tcf_fill_node(struct net *net, struct sk_buff *skb,
7960d1da
JP
1704 struct tcf_proto *tp, struct tcf_block *block,
1705 struct Qdisc *q, u32 parent, void *fh,
12db03b6
VB
1706 u32 portid, u32 seq, u16 flags, int event,
1707 bool rtnl_held)
7120371c
WC
1708{
1709 struct tcmsg *tcm;
1710 struct nlmsghdr *nlh;
1711 unsigned char *b = skb_tail_pointer(skb);
1712
1713 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
1714 if (!nlh)
1715 goto out_nlmsg_trim;
1716 tcm = nlmsg_data(nlh);
1717 tcm->tcm_family = AF_UNSPEC;
1718 tcm->tcm__pad1 = 0;
1719 tcm->tcm__pad2 = 0;
7960d1da
JP
1720 if (q) {
1721 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1722 tcm->tcm_parent = parent;
1723 } else {
1724 tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
1725 tcm->tcm_block_index = block->index;
1726 }
7120371c
WC
1727 tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol);
1728 if (nla_put_string(skb, TCA_KIND, tp->ops->kind))
1729 goto nla_put_failure;
1730 if (nla_put_u32(skb, TCA_CHAIN, tp->chain->index))
1731 goto nla_put_failure;
1732 if (!fh) {
1733 tcm->tcm_handle = 0;
1734 } else {
12db03b6
VB
1735 if (tp->ops->dump &&
1736 tp->ops->dump(net, tp, fh, skb, tcm, rtnl_held) < 0)
7120371c
WC
1737 goto nla_put_failure;
1738 }
1739 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1740 return skb->len;
1741
1742out_nlmsg_trim:
1743nla_put_failure:
1744 nlmsg_trim(skb, b);
1745 return -1;
1746}
1747
1748static int tfilter_notify(struct net *net, struct sk_buff *oskb,
1749 struct nlmsghdr *n, struct tcf_proto *tp,
7960d1da 1750 struct tcf_block *block, struct Qdisc *q,
12db03b6
VB
1751 u32 parent, void *fh, int event, bool unicast,
1752 bool rtnl_held)
7120371c
WC
1753{
1754 struct sk_buff *skb;
1755 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
5b5f99b1 1756 int err = 0;
7120371c
WC
1757
1758 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1759 if (!skb)
1760 return -ENOBUFS;
1761
7960d1da 1762 if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
12db03b6
VB
1763 n->nlmsg_seq, n->nlmsg_flags, event,
1764 rtnl_held) <= 0) {
7120371c
WC
1765 kfree_skb(skb);
1766 return -EINVAL;
1767 }
1768
1769 if (unicast)
5b5f99b1
ZW
1770 err = netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
1771 else
1772 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1773 n->nlmsg_flags & NLM_F_ECHO);
7120371c 1774
5b5f99b1
ZW
1775 if (err > 0)
1776 err = 0;
1777 return err;
7120371c
WC
1778}
1779
1780static int tfilter_del_notify(struct net *net, struct sk_buff *oskb,
1781 struct nlmsghdr *n, struct tcf_proto *tp,
7960d1da 1782 struct tcf_block *block, struct Qdisc *q,
c35a4acc 1783 u32 parent, void *fh, bool unicast, bool *last,
12db03b6 1784 bool rtnl_held, struct netlink_ext_ack *extack)
7120371c
WC
1785{
1786 struct sk_buff *skb;
1787 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1788 int err;
1789
1790 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1791 if (!skb)
1792 return -ENOBUFS;
1793
7960d1da 1794 if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
12db03b6
VB
1795 n->nlmsg_seq, n->nlmsg_flags, RTM_DELTFILTER,
1796 rtnl_held) <= 0) {
c35a4acc 1797 NL_SET_ERR_MSG(extack, "Failed to build del event notification");
7120371c
WC
1798 kfree_skb(skb);
1799 return -EINVAL;
1800 }
1801
12db03b6 1802 err = tp->ops->delete(tp, fh, last, rtnl_held, extack);
7120371c
WC
1803 if (err) {
1804 kfree_skb(skb);
1805 return err;
1806 }
1807
1808 if (unicast)
5b5f99b1
ZW
1809 err = netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
1810 else
1811 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1812 n->nlmsg_flags & NLM_F_ECHO);
c35a4acc
AA
1813 if (err < 0)
1814 NL_SET_ERR_MSG(extack, "Failed to send filter delete notification");
5b5f99b1
ZW
1815
1816 if (err > 0)
1817 err = 0;
c35a4acc 1818 return err;
7120371c
WC
1819}
1820
1821static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb,
7960d1da
JP
1822 struct tcf_block *block, struct Qdisc *q,
1823 u32 parent, struct nlmsghdr *n,
12db03b6
VB
1824 struct tcf_chain *chain, int event,
1825 bool rtnl_held)
7120371c
WC
1826{
1827 struct tcf_proto *tp;
1828
12db03b6
VB
1829 for (tp = tcf_get_next_proto(chain, NULL, rtnl_held);
1830 tp; tp = tcf_get_next_proto(chain, tp, rtnl_held))
7960d1da 1831 tfilter_notify(net, oskb, n, tp, block,
12db03b6 1832 q, parent, NULL, event, false, rtnl_held);
7120371c
WC
1833}
1834
7d5509fa
VB
1835static void tfilter_put(struct tcf_proto *tp, void *fh)
1836{
1837 if (tp->ops->put && fh)
1838 tp->ops->put(tp, fh);
1839}
1840
c431f89b 1841static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
c21ef3e3 1842 struct netlink_ext_ack *extack)
1da177e4 1843{
3b1e0a65 1844 struct net *net = sock_net(skb->sk);
add93b61 1845 struct nlattr *tca[TCA_MAX + 1];
1da177e4
LT
1846 struct tcmsg *t;
1847 u32 protocol;
1848 u32 prio;
9d36d9e5 1849 bool prio_allocate;
1da177e4 1850 u32 parent;
5bc17018 1851 u32 chain_index;
7960d1da 1852 struct Qdisc *q = NULL;
2190d1d0 1853 struct tcf_chain_info chain_info;
5bc17018 1854 struct tcf_chain *chain = NULL;
6529eaba 1855 struct tcf_block *block;
1da177e4 1856 struct tcf_proto *tp;
1da177e4 1857 unsigned long cl;
8113c095 1858 void *fh;
1da177e4 1859 int err;
628185cf 1860 int tp_created;
470502de 1861 bool rtnl_held = false;
1da177e4 1862
c431f89b 1863 if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
dfc47ef8 1864 return -EPERM;
de179c8c 1865
1da177e4 1866replay:
628185cf
DB
1867 tp_created = 0;
1868
8cb08174
JB
1869 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
1870 rtm_tca_policy, extack);
de179c8c
H
1871 if (err < 0)
1872 return err;
1873
942b8165 1874 t = nlmsg_data(n);
1da177e4
LT
1875 protocol = TC_H_MIN(t->tcm_info);
1876 prio = TC_H_MAJ(t->tcm_info);
9d36d9e5 1877 prio_allocate = false;
1da177e4 1878 parent = t->tcm_parent;
4dbfa766 1879 tp = NULL;
1da177e4 1880 cl = 0;
470502de 1881 block = NULL;
1da177e4
LT
1882
1883 if (prio == 0) {
c431f89b
VB
1884 /* If no priority is provided by the user,
1885 * we allocate one.
1886 */
1887 if (n->nlmsg_flags & NLM_F_CREATE) {
1888 prio = TC_H_MAKE(0x80000000U, 0U);
1889 prio_allocate = true;
1890 } else {
c35a4acc 1891 NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
1da177e4 1892 return -ENOENT;
ea7f8277 1893 }
1da177e4
LT
1894 }
1895
1896 /* Find head of filter chain. */
1897
470502de
VB
1898 err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
1899 if (err)
1900 return err;
1901
1902 /* Take rtnl mutex if rtnl_held was set to true on previous iteration,
1903 * block is shared (no qdisc found), qdisc is not unlocked, classifier
1904 * type is not specified, classifier is not unlocked.
1905 */
1906 if (rtnl_held ||
1907 (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
1908 !tca[TCA_KIND] || !tcf_proto_is_unlocked(nla_data(tca[TCA_KIND]))) {
1909 rtnl_held = true;
1910 rtnl_lock();
1911 }
1912
1913 err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
1914 if (err)
1915 goto errout;
1916
1917 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
1918 extack);
c431f89b
VB
1919 if (IS_ERR(block)) {
1920 err = PTR_ERR(block);
1921 goto errout;
6bb16e7a 1922 }
5bc17018
JP
1923
1924 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
1925 if (chain_index > TC_ACT_EXT_VAL_MASK) {
c35a4acc 1926 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
5bc17018
JP
1927 err = -EINVAL;
1928 goto errout;
1929 }
c431f89b 1930 chain = tcf_chain_get(block, chain_index, true);
5bc17018 1931 if (!chain) {
d5ed72a5 1932 NL_SET_ERR_MSG(extack, "Cannot create specified filter chain");
c431f89b 1933 err = -ENOMEM;
ea7f8277
DB
1934 goto errout;
1935 }
1da177e4 1936
ed76f5ed 1937 mutex_lock(&chain->filter_chain_lock);
2190d1d0
JP
1938 tp = tcf_chain_tp_find(chain, &chain_info, protocol,
1939 prio, prio_allocate);
1940 if (IS_ERR(tp)) {
c35a4acc 1941 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2190d1d0 1942 err = PTR_ERR(tp);
ed76f5ed 1943 goto errout_locked;
1da177e4
LT
1944 }
1945
1946 if (tp == NULL) {
8b64678e
VB
1947 struct tcf_proto *tp_new = NULL;
1948
726d0612
VB
1949 if (chain->flushing) {
1950 err = -EAGAIN;
1951 goto errout_locked;
1952 }
1953
1da177e4
LT
1954 /* Proto-tcf does not exist, create new one */
1955
6bb16e7a 1956 if (tca[TCA_KIND] == NULL || !protocol) {
c35a4acc 1957 NL_SET_ERR_MSG(extack, "Filter kind and protocol must be specified");
6bb16e7a 1958 err = -EINVAL;
ed76f5ed 1959 goto errout_locked;
6bb16e7a 1960 }
1da177e4 1961
c431f89b 1962 if (!(n->nlmsg_flags & NLM_F_CREATE)) {
c35a4acc 1963 NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
6bb16e7a 1964 err = -ENOENT;
ed76f5ed 1965 goto errout_locked;
6bb16e7a 1966 }
1da177e4 1967
9d36d9e5 1968 if (prio_allocate)
ed76f5ed
VB
1969 prio = tcf_auto_prio(tcf_chain_tp_prev(chain,
1970 &chain_info));
1da177e4 1971
ed76f5ed 1972 mutex_unlock(&chain->filter_chain_lock);
8b64678e 1973 tp_new = tcf_proto_create(nla_data(tca[TCA_KIND]),
12db03b6
VB
1974 protocol, prio, chain, rtnl_held,
1975 extack);
8b64678e
VB
1976 if (IS_ERR(tp_new)) {
1977 err = PTR_ERR(tp_new);
726d0612 1978 goto errout_tp;
1da177e4 1979 }
ed76f5ed 1980
12186be7 1981 tp_created = 1;
12db03b6
VB
1982 tp = tcf_chain_tp_insert_unique(chain, tp_new, protocol, prio,
1983 rtnl_held);
726d0612
VB
1984 if (IS_ERR(tp)) {
1985 err = PTR_ERR(tp);
1986 goto errout_tp;
1987 }
ed76f5ed
VB
1988 } else {
1989 mutex_unlock(&chain->filter_chain_lock);
6bb16e7a 1990 }
1da177e4 1991
8b64678e
VB
1992 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
1993 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
1994 err = -EINVAL;
1995 goto errout;
1996 }
1997
1da177e4
LT
1998 fh = tp->ops->get(tp, t->tcm_handle);
1999
8113c095 2000 if (!fh) {
c431f89b 2001 if (!(n->nlmsg_flags & NLM_F_CREATE)) {
c35a4acc 2002 NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
6bb16e7a 2003 err = -ENOENT;
1da177e4 2004 goto errout;
6bb16e7a 2005 }
c431f89b 2006 } else if (n->nlmsg_flags & NLM_F_EXCL) {
7d5509fa 2007 tfilter_put(tp, fh);
c431f89b
VB
2008 NL_SET_ERR_MSG(extack, "Filter already exists");
2009 err = -EEXIST;
2010 goto errout;
1da177e4
LT
2011 }
2012
9f407f17
JP
2013 if (chain->tmplt_ops && chain->tmplt_ops != tp->ops) {
2014 NL_SET_ERR_MSG(extack, "Chain template is set to a different filter kind");
2015 err = -EINVAL;
2016 goto errout;
2017 }
2018
2f7ef2f8 2019 err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh,
7306db38 2020 n->nlmsg_flags & NLM_F_CREATE ? TCA_ACT_NOREPLACE : TCA_ACT_REPLACE,
12db03b6 2021 rtnl_held, extack);
7d5509fa 2022 if (err == 0) {
7960d1da 2023 tfilter_notify(net, skb, n, tp, block, q, parent, fh,
12db03b6 2024 RTM_NEWTFILTER, false, rtnl_held);
7d5509fa 2025 tfilter_put(tp, fh);
503d81d4
VB
2026 /* q pointer is NULL for shared blocks */
2027 if (q)
2028 q->flags &= ~TCQ_F_CAN_BYPASS;
7d5509fa 2029 }
1da177e4
LT
2030
2031errout:
8b64678e 2032 if (err && tp_created)
12db03b6 2033 tcf_chain_tp_delete_empty(chain, tp, rtnl_held, NULL);
726d0612 2034errout_tp:
4dbfa766
VB
2035 if (chain) {
2036 if (tp && !IS_ERR(tp))
12db03b6 2037 tcf_proto_put(tp, rtnl_held, NULL);
4dbfa766
VB
2038 if (!tp_created)
2039 tcf_chain_put(chain);
2040 }
12db03b6 2041 tcf_block_release(q, block, rtnl_held);
470502de
VB
2042
2043 if (rtnl_held)
2044 rtnl_unlock();
2045
2046 if (err == -EAGAIN) {
2047 /* Take rtnl lock in case EAGAIN is caused by concurrent flush
2048 * of target chain.
2049 */
2050 rtnl_held = true;
1da177e4
LT
2051 /* Replay the request. */
2052 goto replay;
470502de 2053 }
1da177e4 2054 return err;
ed76f5ed
VB
2055
2056errout_locked:
2057 mutex_unlock(&chain->filter_chain_lock);
2058 goto errout;
1da177e4
LT
2059}
2060
c431f89b
VB
2061static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2062 struct netlink_ext_ack *extack)
2063{
2064 struct net *net = sock_net(skb->sk);
2065 struct nlattr *tca[TCA_MAX + 1];
2066 struct tcmsg *t;
2067 u32 protocol;
2068 u32 prio;
2069 u32 parent;
2070 u32 chain_index;
2071 struct Qdisc *q = NULL;
2072 struct tcf_chain_info chain_info;
2073 struct tcf_chain *chain = NULL;
470502de 2074 struct tcf_block *block = NULL;
c431f89b
VB
2075 struct tcf_proto *tp = NULL;
2076 unsigned long cl = 0;
2077 void *fh = NULL;
2078 int err;
470502de 2079 bool rtnl_held = false;
c431f89b
VB
2080
2081 if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
2082 return -EPERM;
2083
8cb08174
JB
2084 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2085 rtm_tca_policy, extack);
c431f89b
VB
2086 if (err < 0)
2087 return err;
2088
2089 t = nlmsg_data(n);
2090 protocol = TC_H_MIN(t->tcm_info);
2091 prio = TC_H_MAJ(t->tcm_info);
2092 parent = t->tcm_parent;
2093
2094 if (prio == 0 && (protocol || t->tcm_handle || tca[TCA_KIND])) {
2095 NL_SET_ERR_MSG(extack, "Cannot flush filters with protocol, handle or kind set");
2096 return -ENOENT;
2097 }
2098
2099 /* Find head of filter chain. */
2100
470502de
VB
2101 err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2102 if (err)
2103 return err;
2104
2105 /* Take rtnl mutex if flushing whole chain, block is shared (no qdisc
2106 * found), qdisc is not unlocked, classifier type is not specified,
2107 * classifier is not unlocked.
2108 */
2109 if (!prio ||
2110 (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2111 !tca[TCA_KIND] || !tcf_proto_is_unlocked(nla_data(tca[TCA_KIND]))) {
2112 rtnl_held = true;
2113 rtnl_lock();
2114 }
2115
2116 err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2117 if (err)
2118 goto errout;
2119
2120 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2121 extack);
c431f89b
VB
2122 if (IS_ERR(block)) {
2123 err = PTR_ERR(block);
2124 goto errout;
2125 }
2126
2127 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2128 if (chain_index > TC_ACT_EXT_VAL_MASK) {
2129 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2130 err = -EINVAL;
2131 goto errout;
2132 }
2133 chain = tcf_chain_get(block, chain_index, false);
2134 if (!chain) {
5ca8a25c
JP
2135 /* User requested flush on non-existent chain. Nothing to do,
2136 * so just return success.
2137 */
2138 if (prio == 0) {
2139 err = 0;
2140 goto errout;
2141 }
c431f89b 2142 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
b7b4247d 2143 err = -ENOENT;
c431f89b
VB
2144 goto errout;
2145 }
2146
2147 if (prio == 0) {
2148 tfilter_notify_chain(net, skb, block, q, parent, n,
12db03b6
VB
2149 chain, RTM_DELTFILTER, rtnl_held);
2150 tcf_chain_flush(chain, rtnl_held);
c431f89b
VB
2151 err = 0;
2152 goto errout;
2153 }
2154
ed76f5ed 2155 mutex_lock(&chain->filter_chain_lock);
c431f89b
VB
2156 tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2157 prio, false);
2158 if (!tp || IS_ERR(tp)) {
2159 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
0e399035 2160 err = tp ? PTR_ERR(tp) : -ENOENT;
ed76f5ed 2161 goto errout_locked;
c431f89b
VB
2162 } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2163 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2164 err = -EINVAL;
ed76f5ed
VB
2165 goto errout_locked;
2166 } else if (t->tcm_handle == 0) {
2167 tcf_chain_tp_remove(chain, &chain_info, tp);
2168 mutex_unlock(&chain->filter_chain_lock);
2169
12db03b6 2170 tcf_proto_put(tp, rtnl_held, NULL);
ed76f5ed 2171 tfilter_notify(net, skb, n, tp, block, q, parent, fh,
12db03b6 2172 RTM_DELTFILTER, false, rtnl_held);
ed76f5ed 2173 err = 0;
c431f89b
VB
2174 goto errout;
2175 }
ed76f5ed 2176 mutex_unlock(&chain->filter_chain_lock);
c431f89b
VB
2177
2178 fh = tp->ops->get(tp, t->tcm_handle);
2179
2180 if (!fh) {
ed76f5ed
VB
2181 NL_SET_ERR_MSG(extack, "Specified filter handle not found");
2182 err = -ENOENT;
c431f89b
VB
2183 } else {
2184 bool last;
2185
2186 err = tfilter_del_notify(net, skb, n, tp, block,
2187 q, parent, fh, false, &last,
12db03b6
VB
2188 rtnl_held, extack);
2189
c431f89b
VB
2190 if (err)
2191 goto errout;
8b64678e 2192 if (last)
12db03b6 2193 tcf_chain_tp_delete_empty(chain, tp, rtnl_held, extack);
c431f89b
VB
2194 }
2195
2196errout:
4dbfa766
VB
2197 if (chain) {
2198 if (tp && !IS_ERR(tp))
12db03b6 2199 tcf_proto_put(tp, rtnl_held, NULL);
c431f89b 2200 tcf_chain_put(chain);
4dbfa766 2201 }
12db03b6 2202 tcf_block_release(q, block, rtnl_held);
470502de
VB
2203
2204 if (rtnl_held)
2205 rtnl_unlock();
2206
c431f89b 2207 return err;
ed76f5ed
VB
2208
2209errout_locked:
2210 mutex_unlock(&chain->filter_chain_lock);
2211 goto errout;
c431f89b
VB
2212}
2213
2214static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2215 struct netlink_ext_ack *extack)
2216{
2217 struct net *net = sock_net(skb->sk);
2218 struct nlattr *tca[TCA_MAX + 1];
2219 struct tcmsg *t;
2220 u32 protocol;
2221 u32 prio;
2222 u32 parent;
2223 u32 chain_index;
2224 struct Qdisc *q = NULL;
2225 struct tcf_chain_info chain_info;
2226 struct tcf_chain *chain = NULL;
470502de 2227 struct tcf_block *block = NULL;
c431f89b
VB
2228 struct tcf_proto *tp = NULL;
2229 unsigned long cl = 0;
2230 void *fh = NULL;
2231 int err;
470502de 2232 bool rtnl_held = false;
c431f89b 2233
8cb08174
JB
2234 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2235 rtm_tca_policy, extack);
c431f89b
VB
2236 if (err < 0)
2237 return err;
2238
2239 t = nlmsg_data(n);
2240 protocol = TC_H_MIN(t->tcm_info);
2241 prio = TC_H_MAJ(t->tcm_info);
2242 parent = t->tcm_parent;
2243
2244 if (prio == 0) {
2245 NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
2246 return -ENOENT;
2247 }
2248
2249 /* Find head of filter chain. */
2250
470502de
VB
2251 err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2252 if (err)
2253 return err;
2254
2255 /* Take rtnl mutex if block is shared (no qdisc found), qdisc is not
2256 * unlocked, classifier type is not specified, classifier is not
2257 * unlocked.
2258 */
2259 if ((q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2260 !tca[TCA_KIND] || !tcf_proto_is_unlocked(nla_data(tca[TCA_KIND]))) {
2261 rtnl_held = true;
2262 rtnl_lock();
2263 }
2264
2265 err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2266 if (err)
2267 goto errout;
2268
2269 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2270 extack);
c431f89b
VB
2271 if (IS_ERR(block)) {
2272 err = PTR_ERR(block);
2273 goto errout;
2274 }
2275
2276 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2277 if (chain_index > TC_ACT_EXT_VAL_MASK) {
2278 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2279 err = -EINVAL;
2280 goto errout;
2281 }
2282 chain = tcf_chain_get(block, chain_index, false);
2283 if (!chain) {
2284 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2285 err = -EINVAL;
2286 goto errout;
2287 }
2288
ed76f5ed 2289 mutex_lock(&chain->filter_chain_lock);
c431f89b
VB
2290 tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2291 prio, false);
ed76f5ed 2292 mutex_unlock(&chain->filter_chain_lock);
c431f89b
VB
2293 if (!tp || IS_ERR(tp)) {
2294 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
0e399035 2295 err = tp ? PTR_ERR(tp) : -ENOENT;
c431f89b
VB
2296 goto errout;
2297 } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2298 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2299 err = -EINVAL;
2300 goto errout;
2301 }
2302
2303 fh = tp->ops->get(tp, t->tcm_handle);
2304
2305 if (!fh) {
2306 NL_SET_ERR_MSG(extack, "Specified filter handle not found");
2307 err = -ENOENT;
2308 } else {
2309 err = tfilter_notify(net, skb, n, tp, block, q, parent,
12db03b6 2310 fh, RTM_NEWTFILTER, true, rtnl_held);
c431f89b
VB
2311 if (err < 0)
2312 NL_SET_ERR_MSG(extack, "Failed to send filter notify message");
2313 }
2314
7d5509fa 2315 tfilter_put(tp, fh);
c431f89b 2316errout:
4dbfa766
VB
2317 if (chain) {
2318 if (tp && !IS_ERR(tp))
12db03b6 2319 tcf_proto_put(tp, rtnl_held, NULL);
c431f89b 2320 tcf_chain_put(chain);
4dbfa766 2321 }
12db03b6 2322 tcf_block_release(q, block, rtnl_held);
470502de
VB
2323
2324 if (rtnl_held)
2325 rtnl_unlock();
2326
c431f89b
VB
2327 return err;
2328}
2329
aa767bfe 2330struct tcf_dump_args {
1da177e4
LT
2331 struct tcf_walker w;
2332 struct sk_buff *skb;
2333 struct netlink_callback *cb;
7960d1da 2334 struct tcf_block *block;
a10fa201
JP
2335 struct Qdisc *q;
2336 u32 parent;
1da177e4
LT
2337};
2338
8113c095 2339static int tcf_node_dump(struct tcf_proto *tp, void *n, struct tcf_walker *arg)
1da177e4 2340{
aa767bfe 2341 struct tcf_dump_args *a = (void *)arg;
832d1d5b 2342 struct net *net = sock_net(a->skb->sk);
1da177e4 2343
7960d1da 2344 return tcf_fill_node(net, a->skb, tp, a->block, a->q, a->parent,
a10fa201 2345 n, NETLINK_CB(a->cb->skb).portid,
5a7a5555 2346 a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
12db03b6 2347 RTM_NEWTFILTER, true);
1da177e4
LT
2348}
2349
a10fa201
JP
2350static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent,
2351 struct sk_buff *skb, struct netlink_callback *cb,
acb31fae
JP
2352 long index_start, long *p_index)
2353{
2354 struct net *net = sock_net(skb->sk);
7960d1da 2355 struct tcf_block *block = chain->block;
acb31fae 2356 struct tcmsg *tcm = nlmsg_data(cb->nlh);
fe2923af 2357 struct tcf_proto *tp, *tp_prev;
acb31fae 2358 struct tcf_dump_args arg;
acb31fae 2359
fe2923af
VB
2360 for (tp = __tcf_get_next_proto(chain, NULL);
2361 tp;
2362 tp_prev = tp,
2363 tp = __tcf_get_next_proto(chain, tp),
12db03b6 2364 tcf_proto_put(tp_prev, true, NULL),
fe2923af 2365 (*p_index)++) {
acb31fae
JP
2366 if (*p_index < index_start)
2367 continue;
2368 if (TC_H_MAJ(tcm->tcm_info) &&
2369 TC_H_MAJ(tcm->tcm_info) != tp->prio)
2370 continue;
2371 if (TC_H_MIN(tcm->tcm_info) &&
2372 TC_H_MIN(tcm->tcm_info) != tp->protocol)
2373 continue;
2374 if (*p_index > index_start)
2375 memset(&cb->args[1], 0,
2376 sizeof(cb->args) - sizeof(cb->args[0]));
2377 if (cb->args[1] == 0) {
53189183 2378 if (tcf_fill_node(net, skb, tp, block, q, parent, NULL,
acb31fae
JP
2379 NETLINK_CB(cb->skb).portid,
2380 cb->nlh->nlmsg_seq, NLM_F_MULTI,
12db03b6 2381 RTM_NEWTFILTER, true) <= 0)
fe2923af 2382 goto errout;
acb31fae
JP
2383 cb->args[1] = 1;
2384 }
2385 if (!tp->ops->walk)
2386 continue;
2387 arg.w.fn = tcf_node_dump;
2388 arg.skb = skb;
2389 arg.cb = cb;
7960d1da 2390 arg.block = block;
a10fa201
JP
2391 arg.q = q;
2392 arg.parent = parent;
acb31fae
JP
2393 arg.w.stop = 0;
2394 arg.w.skip = cb->args[1] - 1;
2395 arg.w.count = 0;
01683a14 2396 arg.w.cookie = cb->args[2];
12db03b6 2397 tp->ops->walk(tp, &arg.w, true);
01683a14 2398 cb->args[2] = arg.w.cookie;
acb31fae
JP
2399 cb->args[1] = arg.w.count + 1;
2400 if (arg.w.stop)
fe2923af 2401 goto errout;
acb31fae 2402 }
5bc17018 2403 return true;
fe2923af
VB
2404
2405errout:
12db03b6 2406 tcf_proto_put(tp, true, NULL);
fe2923af 2407 return false;
acb31fae
JP
2408}
2409
bd27a875 2410/* called with RTNL */
1da177e4
LT
2411static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
2412{
bbf73830 2413 struct tcf_chain *chain, *chain_prev;
3b1e0a65 2414 struct net *net = sock_net(skb->sk);
5bc17018 2415 struct nlattr *tca[TCA_MAX + 1];
7960d1da 2416 struct Qdisc *q = NULL;
6529eaba 2417 struct tcf_block *block;
942b8165 2418 struct tcmsg *tcm = nlmsg_data(cb->nlh);
acb31fae
JP
2419 long index_start;
2420 long index;
a10fa201 2421 u32 parent;
5bc17018 2422 int err;
1da177e4 2423
573ce260 2424 if (nlmsg_len(cb->nlh) < sizeof(*tcm))
1da177e4 2425 return skb->len;
5bc17018 2426
8cb08174
JB
2427 err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX,
2428 NULL, cb->extack);
5bc17018
JP
2429 if (err)
2430 return err;
2431
7960d1da 2432 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
787ce6d0 2433 block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
7960d1da
JP
2434 if (!block)
2435 goto out;
d680b352
JP
2436 /* If we work with block index, q is NULL and parent value
2437 * will never be used in the following code. The check
2438 * in tcf_fill_node prevents it. However, compiler does not
2439 * see that far, so set parent to zero to silence the warning
2440 * about parent being uninitialized.
2441 */
2442 parent = 0;
a10fa201 2443 } else {
7960d1da
JP
2444 const struct Qdisc_class_ops *cops;
2445 struct net_device *dev;
2446 unsigned long cl = 0;
2447
2448 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
2449 if (!dev)
2450 return skb->len;
2451
2452 parent = tcm->tcm_parent;
2453 if (!parent) {
2454 q = dev->qdisc;
2455 parent = q->handle;
2456 } else {
2457 q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
2458 }
2459 if (!q)
2460 goto out;
2461 cops = q->ops->cl_ops;
2462 if (!cops)
143976ce 2463 goto out;
7960d1da
JP
2464 if (!cops->tcf_block)
2465 goto out;
2466 if (TC_H_MIN(tcm->tcm_parent)) {
2467 cl = cops->find(q, tcm->tcm_parent);
2468 if (cl == 0)
2469 goto out;
2470 }
2471 block = cops->tcf_block(q, cl, NULL);
2472 if (!block)
2473 goto out;
2474 if (tcf_block_shared(block))
2475 q = NULL;
1da177e4 2476 }
1da177e4 2477
acb31fae
JP
2478 index_start = cb->args[0];
2479 index = 0;
5bc17018 2480
bbf73830
VB
2481 for (chain = __tcf_get_next_chain(block, NULL);
2482 chain;
2483 chain_prev = chain,
2484 chain = __tcf_get_next_chain(block, chain),
2485 tcf_chain_put(chain_prev)) {
5bc17018
JP
2486 if (tca[TCA_CHAIN] &&
2487 nla_get_u32(tca[TCA_CHAIN]) != chain->index)
2488 continue;
a10fa201 2489 if (!tcf_chain_dump(chain, q, parent, skb, cb,
5ae437ad 2490 index_start, &index)) {
bbf73830 2491 tcf_chain_put(chain);
5ae437ad 2492 err = -EMSGSIZE;
5bc17018 2493 break;
5ae437ad 2494 }
5bc17018
JP
2495 }
2496
787ce6d0 2497 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
12db03b6 2498 tcf_block_refcnt_put(block, true);
acb31fae 2499 cb->args[0] = index;
1da177e4 2500
1da177e4 2501out:
5ae437ad
RK
2502 /* If we did no progress, the error (EMSGSIZE) is real */
2503 if (skb->len == 0 && err)
2504 return err;
1da177e4
LT
2505 return skb->len;
2506}
2507
a5654820
VB
2508static int tc_chain_fill_node(const struct tcf_proto_ops *tmplt_ops,
2509 void *tmplt_priv, u32 chain_index,
2510 struct net *net, struct sk_buff *skb,
2511 struct tcf_block *block,
32a4f5ec
JP
2512 u32 portid, u32 seq, u16 flags, int event)
2513{
2514 unsigned char *b = skb_tail_pointer(skb);
9f407f17 2515 const struct tcf_proto_ops *ops;
32a4f5ec
JP
2516 struct nlmsghdr *nlh;
2517 struct tcmsg *tcm;
9f407f17
JP
2518 void *priv;
2519
a5654820
VB
2520 ops = tmplt_ops;
2521 priv = tmplt_priv;
32a4f5ec
JP
2522
2523 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
2524 if (!nlh)
2525 goto out_nlmsg_trim;
2526 tcm = nlmsg_data(nlh);
2527 tcm->tcm_family = AF_UNSPEC;
2528 tcm->tcm__pad1 = 0;
2529 tcm->tcm__pad2 = 0;
2530 tcm->tcm_handle = 0;
2531 if (block->q) {
2532 tcm->tcm_ifindex = qdisc_dev(block->q)->ifindex;
2533 tcm->tcm_parent = block->q->handle;
2534 } else {
2535 tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
2536 tcm->tcm_block_index = block->index;
2537 }
2538
a5654820 2539 if (nla_put_u32(skb, TCA_CHAIN, chain_index))
32a4f5ec
JP
2540 goto nla_put_failure;
2541
9f407f17
JP
2542 if (ops) {
2543 if (nla_put_string(skb, TCA_KIND, ops->kind))
2544 goto nla_put_failure;
2545 if (ops->tmplt_dump(skb, net, priv) < 0)
2546 goto nla_put_failure;
2547 }
2548
32a4f5ec
JP
2549 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
2550 return skb->len;
2551
2552out_nlmsg_trim:
2553nla_put_failure:
2554 nlmsg_trim(skb, b);
2555 return -EMSGSIZE;
2556}
2557
2558static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
2559 u32 seq, u16 flags, int event, bool unicast)
2560{
2561 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2562 struct tcf_block *block = chain->block;
2563 struct net *net = block->net;
2564 struct sk_buff *skb;
5b5f99b1 2565 int err = 0;
32a4f5ec
JP
2566
2567 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2568 if (!skb)
2569 return -ENOBUFS;
2570
a5654820
VB
2571 if (tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv,
2572 chain->index, net, skb, block, portid,
32a4f5ec
JP
2573 seq, flags, event) <= 0) {
2574 kfree_skb(skb);
2575 return -EINVAL;
2576 }
2577
2578 if (unicast)
5b5f99b1
ZW
2579 err = netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
2580 else
2581 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
2582 flags & NLM_F_ECHO);
32a4f5ec 2583
5b5f99b1
ZW
2584 if (err > 0)
2585 err = 0;
2586 return err;
32a4f5ec
JP
2587}
2588
a5654820
VB
2589static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops,
2590 void *tmplt_priv, u32 chain_index,
2591 struct tcf_block *block, struct sk_buff *oskb,
2592 u32 seq, u16 flags, bool unicast)
2593{
2594 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2595 struct net *net = block->net;
2596 struct sk_buff *skb;
2597
2598 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2599 if (!skb)
2600 return -ENOBUFS;
2601
2602 if (tc_chain_fill_node(tmplt_ops, tmplt_priv, chain_index, net, skb,
2603 block, portid, seq, flags, RTM_DELCHAIN) <= 0) {
2604 kfree_skb(skb);
2605 return -EINVAL;
2606 }
2607
2608 if (unicast)
2609 return netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
2610
2611 return rtnetlink_send(skb, net, portid, RTNLGRP_TC, flags & NLM_F_ECHO);
2612}
2613
9f407f17
JP
2614static int tc_chain_tmplt_add(struct tcf_chain *chain, struct net *net,
2615 struct nlattr **tca,
2616 struct netlink_ext_ack *extack)
2617{
2618 const struct tcf_proto_ops *ops;
2619 void *tmplt_priv;
2620
2621 /* If kind is not set, user did not specify template. */
2622 if (!tca[TCA_KIND])
2623 return 0;
2624
12db03b6 2625 ops = tcf_proto_lookup_ops(nla_data(tca[TCA_KIND]), true, extack);
9f407f17
JP
2626 if (IS_ERR(ops))
2627 return PTR_ERR(ops);
2628 if (!ops->tmplt_create || !ops->tmplt_destroy || !ops->tmplt_dump) {
2629 NL_SET_ERR_MSG(extack, "Chain templates are not supported with specified classifier");
2630 return -EOPNOTSUPP;
2631 }
2632
2633 tmplt_priv = ops->tmplt_create(net, chain, tca, extack);
2634 if (IS_ERR(tmplt_priv)) {
2635 module_put(ops->owner);
2636 return PTR_ERR(tmplt_priv);
2637 }
2638 chain->tmplt_ops = ops;
2639 chain->tmplt_priv = tmplt_priv;
2640 return 0;
2641}
2642
a5654820
VB
2643static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops,
2644 void *tmplt_priv)
9f407f17 2645{
9f407f17 2646 /* If template ops are set, no work to do for us. */
a5654820 2647 if (!tmplt_ops)
9f407f17
JP
2648 return;
2649
a5654820
VB
2650 tmplt_ops->tmplt_destroy(tmplt_priv);
2651 module_put(tmplt_ops->owner);
9f407f17
JP
2652}
2653
32a4f5ec
JP
2654/* Add/delete/get a chain */
2655
2656static int tc_ctl_chain(struct sk_buff *skb, struct nlmsghdr *n,
2657 struct netlink_ext_ack *extack)
2658{
2659 struct net *net = sock_net(skb->sk);
2660 struct nlattr *tca[TCA_MAX + 1];
2661 struct tcmsg *t;
2662 u32 parent;
2663 u32 chain_index;
2664 struct Qdisc *q = NULL;
2665 struct tcf_chain *chain = NULL;
2666 struct tcf_block *block;
2667 unsigned long cl;
2668 int err;
2669
2670 if (n->nlmsg_type != RTM_GETCHAIN &&
2671 !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
2672 return -EPERM;
2673
2674replay:
8cb08174
JB
2675 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2676 rtm_tca_policy, extack);
32a4f5ec
JP
2677 if (err < 0)
2678 return err;
2679
2680 t = nlmsg_data(n);
2681 parent = t->tcm_parent;
2682 cl = 0;
2683
2684 block = tcf_block_find(net, &q, &parent, &cl,
2685 t->tcm_ifindex, t->tcm_block_index, extack);
2686 if (IS_ERR(block))
2687 return PTR_ERR(block);
2688
2689 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2690 if (chain_index > TC_ACT_EXT_VAL_MASK) {
2691 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
e368fdb6
VB
2692 err = -EINVAL;
2693 goto errout_block;
32a4f5ec 2694 }
2cbfab07
VB
2695
2696 mutex_lock(&block->lock);
32a4f5ec
JP
2697 chain = tcf_chain_lookup(block, chain_index);
2698 if (n->nlmsg_type == RTM_NEWCHAIN) {
2699 if (chain) {
3d32f4c5 2700 if (tcf_chain_held_by_acts_only(chain)) {
1f3ed383 2701 /* The chain exists only because there is
3d32f4c5 2702 * some action referencing it.
1f3ed383
JP
2703 */
2704 tcf_chain_hold(chain);
2705 } else {
2706 NL_SET_ERR_MSG(extack, "Filter chain already exists");
e368fdb6 2707 err = -EEXIST;
2cbfab07 2708 goto errout_block_locked;
1f3ed383
JP
2709 }
2710 } else {
2711 if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2712 NL_SET_ERR_MSG(extack, "Need both RTM_NEWCHAIN and NLM_F_CREATE to create a new chain");
e368fdb6 2713 err = -ENOENT;
2cbfab07 2714 goto errout_block_locked;
1f3ed383
JP
2715 }
2716 chain = tcf_chain_create(block, chain_index);
2717 if (!chain) {
2718 NL_SET_ERR_MSG(extack, "Failed to create filter chain");
e368fdb6 2719 err = -ENOMEM;
2cbfab07 2720 goto errout_block_locked;
1f3ed383 2721 }
32a4f5ec
JP
2722 }
2723 } else {
3d32f4c5 2724 if (!chain || tcf_chain_held_by_acts_only(chain)) {
32a4f5ec 2725 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
e368fdb6 2726 err = -EINVAL;
2cbfab07 2727 goto errout_block_locked;
32a4f5ec
JP
2728 }
2729 tcf_chain_hold(chain);
2730 }
2731
2cbfab07
VB
2732 if (n->nlmsg_type == RTM_NEWCHAIN) {
2733 /* Modifying chain requires holding parent block lock. In case
2734 * the chain was successfully added, take a reference to the
2735 * chain. This ensures that an empty chain does not disappear at
2736 * the end of this function.
2737 */
2738 tcf_chain_hold(chain);
2739 chain->explicitly_created = true;
2740 }
2741 mutex_unlock(&block->lock);
2742
32a4f5ec
JP
2743 switch (n->nlmsg_type) {
2744 case RTM_NEWCHAIN:
9f407f17 2745 err = tc_chain_tmplt_add(chain, net, tca, extack);
2cbfab07
VB
2746 if (err) {
2747 tcf_chain_put_explicitly_created(chain);
9f407f17 2748 goto errout;
2cbfab07
VB
2749 }
2750
32a4f5ec
JP
2751 tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
2752 RTM_NEWCHAIN, false);
2753 break;
2754 case RTM_DELCHAIN:
f5b9bac7 2755 tfilter_notify_chain(net, skb, block, q, parent, n,
12db03b6 2756 chain, RTM_DELTFILTER, true);
32a4f5ec 2757 /* Flush the chain first as the user requested chain removal. */
12db03b6 2758 tcf_chain_flush(chain, true);
32a4f5ec
JP
2759 /* In case the chain was successfully deleted, put a reference
2760 * to the chain previously taken during addition.
2761 */
2762 tcf_chain_put_explicitly_created(chain);
2763 break;
2764 case RTM_GETCHAIN:
32a4f5ec
JP
2765 err = tc_chain_notify(chain, skb, n->nlmsg_seq,
2766 n->nlmsg_seq, n->nlmsg_type, true);
2767 if (err < 0)
2768 NL_SET_ERR_MSG(extack, "Failed to send chain notify message");
2769 break;
2770 default:
2771 err = -EOPNOTSUPP;
2772 NL_SET_ERR_MSG(extack, "Unsupported message type");
2773 goto errout;
2774 }
2775
2776errout:
2777 tcf_chain_put(chain);
e368fdb6 2778errout_block:
12db03b6 2779 tcf_block_release(q, block, true);
32a4f5ec
JP
2780 if (err == -EAGAIN)
2781 /* Replay the request. */
2782 goto replay;
2783 return err;
2cbfab07
VB
2784
2785errout_block_locked:
2786 mutex_unlock(&block->lock);
2787 goto errout_block;
32a4f5ec
JP
2788}
2789
2790/* called with RTNL */
2791static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb)
2792{
2793 struct net *net = sock_net(skb->sk);
2794 struct nlattr *tca[TCA_MAX + 1];
2795 struct Qdisc *q = NULL;
2796 struct tcf_block *block;
32a4f5ec 2797 struct tcmsg *tcm = nlmsg_data(cb->nlh);
ace4a267 2798 struct tcf_chain *chain;
32a4f5ec
JP
2799 long index_start;
2800 long index;
2801 u32 parent;
2802 int err;
2803
2804 if (nlmsg_len(cb->nlh) < sizeof(*tcm))
2805 return skb->len;
2806
8cb08174
JB
2807 err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX,
2808 rtm_tca_policy, cb->extack);
32a4f5ec
JP
2809 if (err)
2810 return err;
2811
2812 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
787ce6d0 2813 block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
32a4f5ec
JP
2814 if (!block)
2815 goto out;
2816 /* If we work with block index, q is NULL and parent value
2817 * will never be used in the following code. The check
2818 * in tcf_fill_node prevents it. However, compiler does not
2819 * see that far, so set parent to zero to silence the warning
2820 * about parent being uninitialized.
2821 */
2822 parent = 0;
2823 } else {
2824 const struct Qdisc_class_ops *cops;
2825 struct net_device *dev;
2826 unsigned long cl = 0;
2827
2828 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
2829 if (!dev)
2830 return skb->len;
2831
2832 parent = tcm->tcm_parent;
2833 if (!parent) {
2834 q = dev->qdisc;
2835 parent = q->handle;
2836 } else {
2837 q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
2838 }
2839 if (!q)
2840 goto out;
2841 cops = q->ops->cl_ops;
2842 if (!cops)
2843 goto out;
2844 if (!cops->tcf_block)
2845 goto out;
2846 if (TC_H_MIN(tcm->tcm_parent)) {
2847 cl = cops->find(q, tcm->tcm_parent);
2848 if (cl == 0)
2849 goto out;
2850 }
2851 block = cops->tcf_block(q, cl, NULL);
2852 if (!block)
2853 goto out;
2854 if (tcf_block_shared(block))
2855 q = NULL;
2856 }
2857
2858 index_start = cb->args[0];
2859 index = 0;
2860
ace4a267
VB
2861 mutex_lock(&block->lock);
2862 list_for_each_entry(chain, &block->chain_list, list) {
32a4f5ec
JP
2863 if ((tca[TCA_CHAIN] &&
2864 nla_get_u32(tca[TCA_CHAIN]) != chain->index))
2865 continue;
2866 if (index < index_start) {
2867 index++;
2868 continue;
2869 }
ace4a267
VB
2870 if (tcf_chain_held_by_acts_only(chain))
2871 continue;
a5654820
VB
2872 err = tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv,
2873 chain->index, net, skb, block,
32a4f5ec
JP
2874 NETLINK_CB(cb->skb).portid,
2875 cb->nlh->nlmsg_seq, NLM_F_MULTI,
2876 RTM_NEWCHAIN);
ace4a267 2877 if (err <= 0)
32a4f5ec
JP
2878 break;
2879 index++;
2880 }
ace4a267 2881 mutex_unlock(&block->lock);
32a4f5ec 2882
787ce6d0 2883 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
12db03b6 2884 tcf_block_refcnt_put(block, true);
32a4f5ec
JP
2885 cb->args[0] = index;
2886
2887out:
2888 /* If we did no progress, the error (EMSGSIZE) is real */
2889 if (skb->len == 0 && err)
2890 return err;
2891 return skb->len;
2892}
2893
18d0264f 2894void tcf_exts_destroy(struct tcf_exts *exts)
1da177e4
LT
2895{
2896#ifdef CONFIG_NET_CLS_ACT
3d66b89c
ED
2897 if (exts->actions) {
2898 tcf_action_destroy(exts->actions, TCA_ACT_UNBIND);
2899 kfree(exts->actions);
2900 }
22dc13c8 2901 exts->nr_actions = 0;
1da177e4
LT
2902#endif
2903}
aa767bfe 2904EXPORT_SYMBOL(tcf_exts_destroy);
1da177e4 2905
c1b52739 2906int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
50a56190 2907 struct nlattr *rate_tlv, struct tcf_exts *exts, bool ovr,
ec6743a1 2908 bool rtnl_held, struct netlink_ext_ack *extack)
1da177e4 2909{
1da177e4
LT
2910#ifdef CONFIG_NET_CLS_ACT
2911 {
1da177e4 2912 struct tc_action *act;
d04e6990 2913 size_t attr_size = 0;
1da177e4 2914
5da57f42 2915 if (exts->police && tb[exts->police]) {
9fb9f251
JP
2916 act = tcf_action_init_1(net, tp, tb[exts->police],
2917 rate_tlv, "police", ovr,
ec6743a1
VB
2918 TCA_ACT_BIND, rtnl_held,
2919 extack);
ab27cfb8
PM
2920 if (IS_ERR(act))
2921 return PTR_ERR(act);
1da177e4 2922
33be6271 2923 act->type = exts->type = TCA_OLD_COMPAT;
22dc13c8
WC
2924 exts->actions[0] = act;
2925 exts->nr_actions = 1;
5da57f42 2926 } else if (exts->action && tb[exts->action]) {
90b73b77 2927 int err;
22dc13c8 2928
9fb9f251
JP
2929 err = tcf_action_init(net, tp, tb[exts->action],
2930 rate_tlv, NULL, ovr, TCA_ACT_BIND,
ec6743a1
VB
2931 exts->actions, &attr_size,
2932 rtnl_held, extack);
90b73b77 2933 if (err < 0)
33be6271 2934 return err;
90b73b77 2935 exts->nr_actions = err;
1da177e4
LT
2936 }
2937 }
1da177e4 2938#else
5da57f42 2939 if ((exts->action && tb[exts->action]) ||
50a56190
AA
2940 (exts->police && tb[exts->police])) {
2941 NL_SET_ERR_MSG(extack, "Classifier actions are not supported per compile options (CONFIG_NET_CLS_ACT)");
1da177e4 2942 return -EOPNOTSUPP;
50a56190 2943 }
1da177e4
LT
2944#endif
2945
2946 return 0;
2947}
aa767bfe 2948EXPORT_SYMBOL(tcf_exts_validate);
1da177e4 2949
9b0d4446 2950void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src)
1da177e4
LT
2951{
2952#ifdef CONFIG_NET_CLS_ACT
22dc13c8
WC
2953 struct tcf_exts old = *dst;
2954
9b0d4446 2955 *dst = *src;
22dc13c8 2956 tcf_exts_destroy(&old);
1da177e4
LT
2957#endif
2958}
aa767bfe 2959EXPORT_SYMBOL(tcf_exts_change);
1da177e4 2960
22dc13c8
WC
2961#ifdef CONFIG_NET_CLS_ACT
2962static struct tc_action *tcf_exts_first_act(struct tcf_exts *exts)
2963{
2964 if (exts->nr_actions == 0)
2965 return NULL;
2966 else
2967 return exts->actions[0];
2968}
2969#endif
33be6271 2970
5da57f42 2971int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts)
1da177e4
LT
2972{
2973#ifdef CONFIG_NET_CLS_ACT
9cc63db5
CW
2974 struct nlattr *nest;
2975
978dfd8d 2976 if (exts->action && tcf_exts_has_actions(exts)) {
1da177e4
LT
2977 /*
2978 * again for backward compatible mode - we want
2979 * to work with both old and new modes of entering
2980 * tc data even if iproute2 was newer - jhs
2981 */
33be6271 2982 if (exts->type != TCA_OLD_COMPAT) {
ae0be8de 2983 nest = nla_nest_start_noflag(skb, exts->action);
4b3550ef
PM
2984 if (nest == NULL)
2985 goto nla_put_failure;
22dc13c8 2986
90b73b77 2987 if (tcf_action_dump(skb, exts->actions, 0, 0) < 0)
add93b61 2988 goto nla_put_failure;
4b3550ef 2989 nla_nest_end(skb, nest);
5da57f42 2990 } else if (exts->police) {
33be6271 2991 struct tc_action *act = tcf_exts_first_act(exts);
ae0be8de 2992 nest = nla_nest_start_noflag(skb, exts->police);
63acd680 2993 if (nest == NULL || !act)
4b3550ef 2994 goto nla_put_failure;
33be6271 2995 if (tcf_action_dump_old(skb, act, 0, 0) < 0)
add93b61 2996 goto nla_put_failure;
4b3550ef 2997 nla_nest_end(skb, nest);
1da177e4
LT
2998 }
2999 }
1da177e4 3000 return 0;
9cc63db5
CW
3001
3002nla_put_failure:
3003 nla_nest_cancel(skb, nest);
1da177e4 3004 return -1;
9cc63db5
CW
3005#else
3006 return 0;
3007#endif
1da177e4 3008}
aa767bfe 3009EXPORT_SYMBOL(tcf_exts_dump);
1da177e4 3010
aa767bfe 3011
5da57f42 3012int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts)
1da177e4
LT
3013{
3014#ifdef CONFIG_NET_CLS_ACT
33be6271 3015 struct tc_action *a = tcf_exts_first_act(exts);
b057df24 3016 if (a != NULL && tcf_action_copy_stats(skb, a, 1) < 0)
33be6271 3017 return -1;
1da177e4
LT
3018#endif
3019 return 0;
1da177e4 3020}
aa767bfe 3021EXPORT_SYMBOL(tcf_exts_dump_stats);
1da177e4 3022
40119211
VB
3023static void tcf_block_offload_inc(struct tcf_block *block, u32 *flags)
3024{
3025 if (*flags & TCA_CLS_FLAGS_IN_HW)
3026 return;
3027 *flags |= TCA_CLS_FLAGS_IN_HW;
3028 atomic_inc(&block->offloadcnt);
3029}
3030
3031static void tcf_block_offload_dec(struct tcf_block *block, u32 *flags)
3032{
3033 if (!(*flags & TCA_CLS_FLAGS_IN_HW))
3034 return;
3035 *flags &= ~TCA_CLS_FLAGS_IN_HW;
3036 atomic_dec(&block->offloadcnt);
3037}
3038
3039static void tc_cls_offload_cnt_update(struct tcf_block *block,
3040 struct tcf_proto *tp, u32 *cnt,
3041 u32 *flags, u32 diff, bool add)
3042{
3043 lockdep_assert_held(&block->cb_lock);
3044
3045 spin_lock(&tp->lock);
3046 if (add) {
3047 if (!*cnt)
3048 tcf_block_offload_inc(block, flags);
3049 *cnt += diff;
3050 } else {
3051 *cnt -= diff;
3052 if (!*cnt)
3053 tcf_block_offload_dec(block, flags);
3054 }
3055 spin_unlock(&tp->lock);
3056}
3057
3058static void
3059tc_cls_offload_cnt_reset(struct tcf_block *block, struct tcf_proto *tp,
3060 u32 *cnt, u32 *flags)
3061{
3062 lockdep_assert_held(&block->cb_lock);
3063
3064 spin_lock(&tp->lock);
3065 tcf_block_offload_dec(block, flags);
3066 *cnt = 0;
3067 spin_unlock(&tp->lock);
3068}
3069
3070static int
3071__tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
3072 void *type_data, bool err_stop)
717503b9 3073{
955bcb6e 3074 struct flow_block_cb *block_cb;
aeb3fecd
CW
3075 int ok_count = 0;
3076 int err;
3077
14bfb13f 3078 list_for_each_entry(block_cb, &block->flow_block.cb_list, list) {
aeb3fecd
CW
3079 err = block_cb->cb(type, type_data, block_cb->cb_priv);
3080 if (err) {
40119211
VB
3081 if (err_stop)
3082 return err;
aeb3fecd
CW
3083 } else {
3084 ok_count++;
3085 }
3086 }
40119211
VB
3087 return ok_count;
3088}
3089
3090int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
3091 void *type_data, bool err_stop, bool rtnl_held)
3092{
11bd634d 3093 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
40119211
VB
3094 int ok_count;
3095
11bd634d
VB
3096retry:
3097 if (take_rtnl)
3098 rtnl_lock();
40119211 3099 down_read(&block->cb_lock);
11bd634d
VB
3100 /* Need to obtain rtnl lock if block is bound to devs that require it.
3101 * In block bind code cb_lock is obtained while holding rtnl, so we must
3102 * obtain the locks in same order here.
3103 */
3104 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3105 up_read(&block->cb_lock);
3106 take_rtnl = true;
3107 goto retry;
3108 }
3109
40119211 3110 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
11bd634d 3111
4f8116c8 3112 up_read(&block->cb_lock);
11bd634d
VB
3113 if (take_rtnl)
3114 rtnl_unlock();
aeb3fecd 3115 return ok_count;
717503b9
JP
3116}
3117EXPORT_SYMBOL(tc_setup_cb_call);
b3f55bdd 3118
40119211
VB
3119/* Non-destructive filter add. If filter that wasn't already in hardware is
3120 * successfully offloaded, increment block offloads counter. On failure,
3121 * previously offloaded filter is considered to be intact and offloads counter
3122 * is not decremented.
3123 */
3124
3125int tc_setup_cb_add(struct tcf_block *block, struct tcf_proto *tp,
3126 enum tc_setup_type type, void *type_data, bool err_stop,
3127 u32 *flags, unsigned int *in_hw_count, bool rtnl_held)
3128{
11bd634d 3129 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
40119211
VB
3130 int ok_count;
3131
11bd634d
VB
3132retry:
3133 if (take_rtnl)
3134 rtnl_lock();
40119211 3135 down_read(&block->cb_lock);
11bd634d
VB
3136 /* Need to obtain rtnl lock if block is bound to devs that require it.
3137 * In block bind code cb_lock is obtained while holding rtnl, so we must
3138 * obtain the locks in same order here.
3139 */
3140 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3141 up_read(&block->cb_lock);
3142 take_rtnl = true;
3143 goto retry;
3144 }
3145
40119211
VB
3146 /* Make sure all netdevs sharing this block are offload-capable. */
3147 if (block->nooffloaddevcnt && err_stop) {
3148 ok_count = -EOPNOTSUPP;
3149 goto err_unlock;
3150 }
3151
3152 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
a449a3e7
VB
3153 if (ok_count < 0)
3154 goto err_unlock;
3155
3156 if (tp->ops->hw_add)
3157 tp->ops->hw_add(tp, type_data);
40119211
VB
3158 if (ok_count > 0)
3159 tc_cls_offload_cnt_update(block, tp, in_hw_count, flags,
3160 ok_count, true);
3161err_unlock:
3162 up_read(&block->cb_lock);
11bd634d
VB
3163 if (take_rtnl)
3164 rtnl_unlock();
40119211
VB
3165 return ok_count < 0 ? ok_count : 0;
3166}
3167EXPORT_SYMBOL(tc_setup_cb_add);
3168
3169/* Destructive filter replace. If filter that wasn't already in hardware is
3170 * successfully offloaded, increment block offload counter. On failure,
3171 * previously offloaded filter is considered to be destroyed and offload counter
3172 * is decremented.
3173 */
3174
3175int tc_setup_cb_replace(struct tcf_block *block, struct tcf_proto *tp,
3176 enum tc_setup_type type, void *type_data, bool err_stop,
3177 u32 *old_flags, unsigned int *old_in_hw_count,
3178 u32 *new_flags, unsigned int *new_in_hw_count,
3179 bool rtnl_held)
3180{
11bd634d 3181 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
40119211
VB
3182 int ok_count;
3183
11bd634d
VB
3184retry:
3185 if (take_rtnl)
3186 rtnl_lock();
40119211 3187 down_read(&block->cb_lock);
11bd634d
VB
3188 /* Need to obtain rtnl lock if block is bound to devs that require it.
3189 * In block bind code cb_lock is obtained while holding rtnl, so we must
3190 * obtain the locks in same order here.
3191 */
3192 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3193 up_read(&block->cb_lock);
3194 take_rtnl = true;
3195 goto retry;
3196 }
3197
40119211
VB
3198 /* Make sure all netdevs sharing this block are offload-capable. */
3199 if (block->nooffloaddevcnt && err_stop) {
3200 ok_count = -EOPNOTSUPP;
3201 goto err_unlock;
3202 }
3203
3204 tc_cls_offload_cnt_reset(block, tp, old_in_hw_count, old_flags);
a449a3e7
VB
3205 if (tp->ops->hw_del)
3206 tp->ops->hw_del(tp, type_data);
40119211
VB
3207
3208 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
a449a3e7
VB
3209 if (ok_count < 0)
3210 goto err_unlock;
3211
3212 if (tp->ops->hw_add)
3213 tp->ops->hw_add(tp, type_data);
40119211 3214 if (ok_count > 0)
a449a3e7
VB
3215 tc_cls_offload_cnt_update(block, tp, new_in_hw_count,
3216 new_flags, ok_count, true);
40119211
VB
3217err_unlock:
3218 up_read(&block->cb_lock);
11bd634d
VB
3219 if (take_rtnl)
3220 rtnl_unlock();
40119211
VB
3221 return ok_count < 0 ? ok_count : 0;
3222}
3223EXPORT_SYMBOL(tc_setup_cb_replace);
3224
3225/* Destroy filter and decrement block offload counter, if filter was previously
3226 * offloaded.
3227 */
3228
3229int tc_setup_cb_destroy(struct tcf_block *block, struct tcf_proto *tp,
3230 enum tc_setup_type type, void *type_data, bool err_stop,
3231 u32 *flags, unsigned int *in_hw_count, bool rtnl_held)
3232{
11bd634d 3233 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
40119211
VB
3234 int ok_count;
3235
11bd634d
VB
3236retry:
3237 if (take_rtnl)
3238 rtnl_lock();
40119211 3239 down_read(&block->cb_lock);
11bd634d
VB
3240 /* Need to obtain rtnl lock if block is bound to devs that require it.
3241 * In block bind code cb_lock is obtained while holding rtnl, so we must
3242 * obtain the locks in same order here.
3243 */
3244 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3245 up_read(&block->cb_lock);
3246 take_rtnl = true;
3247 goto retry;
3248 }
3249
40119211
VB
3250 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3251
3252 tc_cls_offload_cnt_reset(block, tp, in_hw_count, flags);
a449a3e7
VB
3253 if (tp->ops->hw_del)
3254 tp->ops->hw_del(tp, type_data);
3255
40119211 3256 up_read(&block->cb_lock);
11bd634d
VB
3257 if (take_rtnl)
3258 rtnl_unlock();
40119211
VB
3259 return ok_count < 0 ? ok_count : 0;
3260}
3261EXPORT_SYMBOL(tc_setup_cb_destroy);
3262
3263int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp,
3264 bool add, flow_setup_cb_t *cb,
3265 enum tc_setup_type type, void *type_data,
3266 void *cb_priv, u32 *flags, unsigned int *in_hw_count)
3267{
3268 int err = cb(type, type_data, cb_priv);
3269
3270 if (err) {
3271 if (add && tc_skip_sw(*flags))
3272 return err;
3273 } else {
3274 tc_cls_offload_cnt_update(block, tp, in_hw_count, flags, 1,
3275 add);
3276 }
3277
3278 return 0;
3279}
3280EXPORT_SYMBOL(tc_setup_cb_reoffload);
3281
5a6ff4b1
VB
3282void tc_cleanup_flow_action(struct flow_action *flow_action)
3283{
3284 struct flow_action_entry *entry;
3285 int i;
3286
1158958a
VB
3287 flow_action_for_each(i, entry, flow_action)
3288 if (entry->destructor)
3289 entry->destructor(entry->destructor_priv);
5a6ff4b1
VB
3290}
3291EXPORT_SYMBOL(tc_cleanup_flow_action);
3292
1158958a
VB
3293static void tcf_mirred_get_dev(struct flow_action_entry *entry,
3294 const struct tc_action *act)
3295{
470d5060
VB
3296#ifdef CONFIG_NET_CLS_ACT
3297 entry->dev = act->ops->get_dev(act, &entry->destructor);
1158958a
VB
3298 if (!entry->dev)
3299 return;
1158958a 3300 entry->destructor_priv = entry->dev;
470d5060 3301#endif
1158958a
VB
3302}
3303
3304static void tcf_tunnel_encap_put_tunnel(void *priv)
3305{
3306 struct ip_tunnel_info *tunnel = priv;
3307
3308 kfree(tunnel);
3309}
3310
3311static int tcf_tunnel_encap_get_tunnel(struct flow_action_entry *entry,
3312 const struct tc_action *act)
3313{
3314 entry->tunnel = tcf_tunnel_info_copy(act);
3315 if (!entry->tunnel)
3316 return -ENOMEM;
3317 entry->destructor = tcf_tunnel_encap_put_tunnel;
3318 entry->destructor_priv = entry->tunnel;
3319 return 0;
3320}
3321
4a5da47d
VB
3322static void tcf_sample_get_group(struct flow_action_entry *entry,
3323 const struct tc_action *act)
3324{
3325#ifdef CONFIG_NET_CLS_ACT
3326 entry->sample.psample_group =
3327 act->ops->get_psample_group(act, &entry->destructor);
3328 entry->destructor_priv = entry->sample.psample_group;
3329#endif
3330}
3331
3a7b6861 3332int tc_setup_flow_action(struct flow_action *flow_action,
9838b20a 3333 const struct tcf_exts *exts, bool rtnl_held)
3a7b6861
PNA
3334{
3335 const struct tc_action *act;
9838b20a 3336 int i, j, k, err = 0;
3a7b6861
PNA
3337
3338 if (!exts)
3339 return 0;
3340
9838b20a
VB
3341 if (!rtnl_held)
3342 rtnl_lock();
3343
3a7b6861
PNA
3344 j = 0;
3345 tcf_exts_for_each_action(i, act, exts) {
3346 struct flow_action_entry *entry;
3347
3348 entry = &flow_action->entries[j];
3349 if (is_tcf_gact_ok(act)) {
3350 entry->id = FLOW_ACTION_ACCEPT;
3351 } else if (is_tcf_gact_shot(act)) {
3352 entry->id = FLOW_ACTION_DROP;
3353 } else if (is_tcf_gact_trap(act)) {
3354 entry->id = FLOW_ACTION_TRAP;
3355 } else if (is_tcf_gact_goto_chain(act)) {
3356 entry->id = FLOW_ACTION_GOTO;
3357 entry->chain_index = tcf_gact_goto_chain_index(act);
3358 } else if (is_tcf_mirred_egress_redirect(act)) {
3359 entry->id = FLOW_ACTION_REDIRECT;
1158958a 3360 tcf_mirred_get_dev(entry, act);
3a7b6861
PNA
3361 } else if (is_tcf_mirred_egress_mirror(act)) {
3362 entry->id = FLOW_ACTION_MIRRED;
1158958a 3363 tcf_mirred_get_dev(entry, act);
48e584ac
JH
3364 } else if (is_tcf_mirred_ingress_redirect(act)) {
3365 entry->id = FLOW_ACTION_REDIRECT_INGRESS;
1158958a 3366 tcf_mirred_get_dev(entry, act);
48e584ac
JH
3367 } else if (is_tcf_mirred_ingress_mirror(act)) {
3368 entry->id = FLOW_ACTION_MIRRED_INGRESS;
1158958a 3369 tcf_mirred_get_dev(entry, act);
3a7b6861
PNA
3370 } else if (is_tcf_vlan(act)) {
3371 switch (tcf_vlan_action(act)) {
3372 case TCA_VLAN_ACT_PUSH:
3373 entry->id = FLOW_ACTION_VLAN_PUSH;
3374 entry->vlan.vid = tcf_vlan_push_vid(act);
3375 entry->vlan.proto = tcf_vlan_push_proto(act);
3376 entry->vlan.prio = tcf_vlan_push_prio(act);
3377 break;
3378 case TCA_VLAN_ACT_POP:
3379 entry->id = FLOW_ACTION_VLAN_POP;
3380 break;
3381 case TCA_VLAN_ACT_MODIFY:
3382 entry->id = FLOW_ACTION_VLAN_MANGLE;
3383 entry->vlan.vid = tcf_vlan_push_vid(act);
3384 entry->vlan.proto = tcf_vlan_push_proto(act);
3385 entry->vlan.prio = tcf_vlan_push_prio(act);
3386 break;
3387 default:
9838b20a 3388 err = -EOPNOTSUPP;
3a7b6861
PNA
3389 goto err_out;
3390 }
3391 } else if (is_tcf_tunnel_set(act)) {
3392 entry->id = FLOW_ACTION_TUNNEL_ENCAP;
1158958a
VB
3393 err = tcf_tunnel_encap_get_tunnel(entry, act);
3394 if (err)
1444c175 3395 goto err_out;
3a7b6861
PNA
3396 } else if (is_tcf_tunnel_release(act)) {
3397 entry->id = FLOW_ACTION_TUNNEL_DECAP;
3a7b6861
PNA
3398 } else if (is_tcf_pedit(act)) {
3399 for (k = 0; k < tcf_pedit_nkeys(act); k++) {
3400 switch (tcf_pedit_cmd(act, k)) {
3401 case TCA_PEDIT_KEY_EX_CMD_SET:
3402 entry->id = FLOW_ACTION_MANGLE;
3403 break;
3404 case TCA_PEDIT_KEY_EX_CMD_ADD:
3405 entry->id = FLOW_ACTION_ADD;
3406 break;
3407 default:
9838b20a 3408 err = -EOPNOTSUPP;
3a7b6861
PNA
3409 goto err_out;
3410 }
3411 entry->mangle.htype = tcf_pedit_htype(act, k);
3412 entry->mangle.mask = tcf_pedit_mask(act, k);
3413 entry->mangle.val = tcf_pedit_val(act, k);
3414 entry->mangle.offset = tcf_pedit_offset(act, k);
3415 entry = &flow_action->entries[++j];
3416 }
3417 } else if (is_tcf_csum(act)) {
3418 entry->id = FLOW_ACTION_CSUM;
3419 entry->csum_flags = tcf_csum_update_flags(act);
3420 } else if (is_tcf_skbedit_mark(act)) {
3421 entry->id = FLOW_ACTION_MARK;
3422 entry->mark = tcf_skbedit_mark(act);
a7a7be60
PJV
3423 } else if (is_tcf_sample(act)) {
3424 entry->id = FLOW_ACTION_SAMPLE;
a7a7be60
PJV
3425 entry->sample.trunc_size = tcf_sample_trunc_size(act);
3426 entry->sample.truncate = tcf_sample_truncate(act);
3427 entry->sample.rate = tcf_sample_rate(act);
4a5da47d 3428 tcf_sample_get_group(entry, act);
8c8cfc6e
PJV
3429 } else if (is_tcf_police(act)) {
3430 entry->id = FLOW_ACTION_POLICE;
3431 entry->police.burst = tcf_police_tcfp_burst(act);
3432 entry->police.rate_bytes_ps =
3433 tcf_police_rate_bytes_ps(act);
b57dc7c1
PB
3434 } else if (is_tcf_ct(act)) {
3435 entry->id = FLOW_ACTION_CT;
3436 entry->ct.action = tcf_ct_action(act);
3437 entry->ct.zone = tcf_ct_zone(act);
6749d590
JH
3438 } else if (is_tcf_mpls(act)) {
3439 switch (tcf_mpls_action(act)) {
3440 case TCA_MPLS_ACT_PUSH:
3441 entry->id = FLOW_ACTION_MPLS_PUSH;
3442 entry->mpls_push.proto = tcf_mpls_proto(act);
3443 entry->mpls_push.label = tcf_mpls_label(act);
3444 entry->mpls_push.tc = tcf_mpls_tc(act);
3445 entry->mpls_push.bos = tcf_mpls_bos(act);
3446 entry->mpls_push.ttl = tcf_mpls_ttl(act);
3447 break;
3448 case TCA_MPLS_ACT_POP:
3449 entry->id = FLOW_ACTION_MPLS_POP;
3450 entry->mpls_pop.proto = tcf_mpls_proto(act);
3451 break;
3452 case TCA_MPLS_ACT_MODIFY:
3453 entry->id = FLOW_ACTION_MPLS_MANGLE;
3454 entry->mpls_mangle.label = tcf_mpls_label(act);
3455 entry->mpls_mangle.tc = tcf_mpls_tc(act);
3456 entry->mpls_mangle.bos = tcf_mpls_bos(act);
3457 entry->mpls_mangle.ttl = tcf_mpls_ttl(act);
3458 break;
3459 default:
3460 goto err_out;
3461 }
fb1b775a
JH
3462 } else if (is_tcf_skbedit_ptype(act)) {
3463 entry->id = FLOW_ACTION_PTYPE;
3464 entry->ptype = tcf_skbedit_ptype(act);
3a7b6861 3465 } else {
9838b20a 3466 err = -EOPNOTSUPP;
3a7b6861
PNA
3467 goto err_out;
3468 }
3469
3470 if (!is_tcf_pedit(act))
3471 j++;
3472 }
9838b20a 3473
3a7b6861 3474err_out:
9838b20a
VB
3475 if (!rtnl_held)
3476 rtnl_unlock();
3477
5a6ff4b1
VB
3478 if (err)
3479 tc_cleanup_flow_action(flow_action);
3480
9838b20a 3481 return err;
3a7b6861
PNA
3482}
3483EXPORT_SYMBOL(tc_setup_flow_action);
3484
e3ab786b
PNA
3485unsigned int tcf_exts_num_actions(struct tcf_exts *exts)
3486{
3487 unsigned int num_acts = 0;
3488 struct tc_action *act;
3489 int i;
3490
3491 tcf_exts_for_each_action(i, act, exts) {
3492 if (is_tcf_pedit(act))
3493 num_acts += tcf_pedit_nkeys(act);
3494 else
3495 num_acts++;
3496 }
3497 return num_acts;
3498}
3499EXPORT_SYMBOL(tcf_exts_num_actions);
3500
48617387
JP
3501static __net_init int tcf_net_init(struct net *net)
3502{
3503 struct tcf_net *tn = net_generic(net, tcf_net_id);
3504
ab281629 3505 spin_lock_init(&tn->idr_lock);
48617387
JP
3506 idr_init(&tn->idr);
3507 return 0;
3508}
3509
3510static void __net_exit tcf_net_exit(struct net *net)
3511{
3512 struct tcf_net *tn = net_generic(net, tcf_net_id);
3513
3514 idr_destroy(&tn->idr);
3515}
3516
3517static struct pernet_operations tcf_net_ops = {
3518 .init = tcf_net_init,
3519 .exit = tcf_net_exit,
3520 .id = &tcf_net_id,
3521 .size = sizeof(struct tcf_net),
3522};
3523
1150ab0f 3524static struct flow_indr_block_ing_entry block_ing_entry = {
3525 .cb = tc_indr_block_get_and_ing_cmd,
3526 .list = LIST_HEAD_INIT(block_ing_entry.list),
3527};
3528
1da177e4
LT
3529static int __init tc_filter_init(void)
3530{
48617387
JP
3531 int err;
3532
7aa0045d
CW
3533 tc_filter_wq = alloc_ordered_workqueue("tc_filter_workqueue", 0);
3534 if (!tc_filter_wq)
3535 return -ENOMEM;
3536
48617387
JP
3537 err = register_pernet_subsys(&tcf_net_ops);
3538 if (err)
3539 goto err_register_pernet_subsys;
3540
1150ab0f 3541 flow_indr_add_block_ing_cb(&block_ing_entry);
3542
470502de
VB
3543 rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_new_tfilter, NULL,
3544 RTNL_FLAG_DOIT_UNLOCKED);
3545 rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_del_tfilter, NULL,
3546 RTNL_FLAG_DOIT_UNLOCKED);
c431f89b 3547 rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_get_tfilter,
470502de 3548 tc_dump_tfilter, RTNL_FLAG_DOIT_UNLOCKED);
32a4f5ec
JP
3549 rtnl_register(PF_UNSPEC, RTM_NEWCHAIN, tc_ctl_chain, NULL, 0);
3550 rtnl_register(PF_UNSPEC, RTM_DELCHAIN, tc_ctl_chain, NULL, 0);
3551 rtnl_register(PF_UNSPEC, RTM_GETCHAIN, tc_ctl_chain,
3552 tc_dump_chain, 0);
1da177e4 3553
1da177e4 3554 return 0;
48617387
JP
3555
3556err_register_pernet_subsys:
3557 destroy_workqueue(tc_filter_wq);
3558 return err;
1da177e4
LT
3559}
3560
3561subsys_initcall(tc_filter_init);