2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
3 * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the names of the copyright holders nor the names of its
15 * contributors may be used to endorse or promote products derived from
16 * this software without specific prior written permission.
18 * Alternatively, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") version 2 as published by the Free
20 * Software Foundation.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 * POSSIBILITY OF SUCH DAMAGE.
35 #include <linux/kernel.h>
36 #include <linux/slab.h>
37 #include <linux/errno.h>
38 #include <linux/list.h>
39 #include <linux/string.h>
40 #include <linux/rhashtable.h>
41 #include <linux/netdevice.h>
42 #include <net/net_namespace.h>
43 #include <net/tc_act/tc_vlan.h>
47 #include "resources.h"
49 #include "core_acl_flex_keys.h"
50 #include "core_acl_flex_actions.h"
51 #include "spectrum_acl_flex_keys.h"
54 struct mlxsw_sp
*mlxsw_sp
;
55 struct mlxsw_afk
*afk
;
56 struct mlxsw_sp_fid
*dummy_fid
;
57 const struct mlxsw_sp_acl_ops
*ops
;
58 struct rhashtable ruleset_ht
;
59 struct list_head rules
;
61 struct delayed_work dw
;
62 unsigned long interval
; /* ms */
63 #define MLXSW_SP_ACL_RULE_ACTIVITY_UPDATE_PERIOD_MS 1000
64 } rule_activity_update
;
65 unsigned long priv
[0];
66 /* priv has to be always the last item */
69 struct mlxsw_afk
*mlxsw_sp_acl_afk(struct mlxsw_sp_acl
*acl
)
74 struct mlxsw_sp_acl_block_binding
{
75 struct list_head list
;
76 struct net_device
*dev
;
77 struct mlxsw_sp_port
*mlxsw_sp_port
;
81 struct mlxsw_sp_acl_block
{
82 struct list_head binding_list
;
83 struct mlxsw_sp_acl_ruleset
*ruleset_zero
;
84 struct mlxsw_sp
*mlxsw_sp
;
85 unsigned int rule_count
;
86 unsigned int disable_count
;
89 struct mlxsw_sp_acl_ruleset_ht_key
{
90 struct mlxsw_sp_acl_block
*block
;
92 const struct mlxsw_sp_acl_profile_ops
*ops
;
95 struct mlxsw_sp_acl_ruleset
{
96 struct rhash_head ht_node
; /* Member of acl HT */
97 struct mlxsw_sp_acl_ruleset_ht_key ht_key
;
98 struct rhashtable rule_ht
;
99 unsigned int ref_count
;
100 unsigned long priv
[0];
101 /* priv has to be always the last item */
104 struct mlxsw_sp_acl_rule
{
105 struct rhash_head ht_node
; /* Member of rule HT */
106 struct list_head list
;
107 unsigned long cookie
; /* HT key */
108 struct mlxsw_sp_acl_ruleset
*ruleset
;
109 struct mlxsw_sp_acl_rule_info
*rulei
;
113 unsigned long priv
[0];
114 /* priv has to be always the last item */
117 static const struct rhashtable_params mlxsw_sp_acl_ruleset_ht_params
= {
118 .key_len
= sizeof(struct mlxsw_sp_acl_ruleset_ht_key
),
119 .key_offset
= offsetof(struct mlxsw_sp_acl_ruleset
, ht_key
),
120 .head_offset
= offsetof(struct mlxsw_sp_acl_ruleset
, ht_node
),
121 .automatic_shrinking
= true,
124 static const struct rhashtable_params mlxsw_sp_acl_rule_ht_params
= {
125 .key_len
= sizeof(unsigned long),
126 .key_offset
= offsetof(struct mlxsw_sp_acl_rule
, cookie
),
127 .head_offset
= offsetof(struct mlxsw_sp_acl_rule
, ht_node
),
128 .automatic_shrinking
= true,
131 struct mlxsw_sp_fid
*mlxsw_sp_acl_dummy_fid(struct mlxsw_sp
*mlxsw_sp
)
133 return mlxsw_sp
->acl
->dummy_fid
;
136 struct mlxsw_sp
*mlxsw_sp_acl_block_mlxsw_sp(struct mlxsw_sp_acl_block
*block
)
138 return block
->mlxsw_sp
;
141 unsigned int mlxsw_sp_acl_block_rule_count(struct mlxsw_sp_acl_block
*block
)
143 return block
? block
->rule_count
: 0;
146 void mlxsw_sp_acl_block_disable_inc(struct mlxsw_sp_acl_block
*block
)
149 block
->disable_count
++;
152 void mlxsw_sp_acl_block_disable_dec(struct mlxsw_sp_acl_block
*block
)
155 block
->disable_count
--;
158 bool mlxsw_sp_acl_block_disabled(struct mlxsw_sp_acl_block
*block
)
160 return block
->disable_count
;
164 mlxsw_sp_acl_ruleset_is_singular(const struct mlxsw_sp_acl_ruleset
*ruleset
)
166 /* We hold a reference on ruleset ourselves */
167 return ruleset
->ref_count
== 2;
171 mlxsw_sp_acl_ruleset_bind(struct mlxsw_sp
*mlxsw_sp
,
172 struct mlxsw_sp_acl_block
*block
,
173 struct mlxsw_sp_acl_block_binding
*binding
)
175 struct mlxsw_sp_acl_ruleset
*ruleset
= block
->ruleset_zero
;
176 const struct mlxsw_sp_acl_profile_ops
*ops
= ruleset
->ht_key
.ops
;
178 return ops
->ruleset_bind(mlxsw_sp
, ruleset
->priv
,
179 binding
->mlxsw_sp_port
, binding
->ingress
);
183 mlxsw_sp_acl_ruleset_unbind(struct mlxsw_sp
*mlxsw_sp
,
184 struct mlxsw_sp_acl_block
*block
,
185 struct mlxsw_sp_acl_block_binding
*binding
)
187 struct mlxsw_sp_acl_ruleset
*ruleset
= block
->ruleset_zero
;
188 const struct mlxsw_sp_acl_profile_ops
*ops
= ruleset
->ht_key
.ops
;
190 ops
->ruleset_unbind(mlxsw_sp
, ruleset
->priv
,
191 binding
->mlxsw_sp_port
, binding
->ingress
);
194 static bool mlxsw_sp_acl_ruleset_block_bound(struct mlxsw_sp_acl_block
*block
)
196 return block
->ruleset_zero
;
200 mlxsw_sp_acl_ruleset_block_bind(struct mlxsw_sp
*mlxsw_sp
,
201 struct mlxsw_sp_acl_ruleset
*ruleset
,
202 struct mlxsw_sp_acl_block
*block
)
204 struct mlxsw_sp_acl_block_binding
*binding
;
207 block
->ruleset_zero
= ruleset
;
208 list_for_each_entry(binding
, &block
->binding_list
, list
) {
209 err
= mlxsw_sp_acl_ruleset_bind(mlxsw_sp
, block
, binding
);
216 list_for_each_entry_continue_reverse(binding
, &block
->binding_list
,
218 mlxsw_sp_acl_ruleset_unbind(mlxsw_sp
, block
, binding
);
219 block
->ruleset_zero
= NULL
;
225 mlxsw_sp_acl_ruleset_block_unbind(struct mlxsw_sp
*mlxsw_sp
,
226 struct mlxsw_sp_acl_ruleset
*ruleset
,
227 struct mlxsw_sp_acl_block
*block
)
229 struct mlxsw_sp_acl_block_binding
*binding
;
231 list_for_each_entry(binding
, &block
->binding_list
, list
)
232 mlxsw_sp_acl_ruleset_unbind(mlxsw_sp
, block
, binding
);
233 block
->ruleset_zero
= NULL
;
236 struct mlxsw_sp_acl_block
*mlxsw_sp_acl_block_create(struct mlxsw_sp
*mlxsw_sp
,
239 struct mlxsw_sp_acl_block
*block
;
241 block
= kzalloc(sizeof(*block
), GFP_KERNEL
);
244 INIT_LIST_HEAD(&block
->binding_list
);
245 block
->mlxsw_sp
= mlxsw_sp
;
249 void mlxsw_sp_acl_block_destroy(struct mlxsw_sp_acl_block
*block
)
251 WARN_ON(!list_empty(&block
->binding_list
));
255 static struct mlxsw_sp_acl_block_binding
*
256 mlxsw_sp_acl_block_lookup(struct mlxsw_sp_acl_block
*block
,
257 struct mlxsw_sp_port
*mlxsw_sp_port
, bool ingress
)
259 struct mlxsw_sp_acl_block_binding
*binding
;
261 list_for_each_entry(binding
, &block
->binding_list
, list
)
262 if (binding
->mlxsw_sp_port
== mlxsw_sp_port
&&
263 binding
->ingress
== ingress
)
268 int mlxsw_sp_acl_block_bind(struct mlxsw_sp
*mlxsw_sp
,
269 struct mlxsw_sp_acl_block
*block
,
270 struct mlxsw_sp_port
*mlxsw_sp_port
,
273 struct mlxsw_sp_acl_block_binding
*binding
;
276 if (WARN_ON(mlxsw_sp_acl_block_lookup(block
, mlxsw_sp_port
, ingress
)))
279 binding
= kzalloc(sizeof(*binding
), GFP_KERNEL
);
282 binding
->mlxsw_sp_port
= mlxsw_sp_port
;
283 binding
->ingress
= ingress
;
285 if (mlxsw_sp_acl_ruleset_block_bound(block
)) {
286 err
= mlxsw_sp_acl_ruleset_bind(mlxsw_sp
, block
, binding
);
288 goto err_ruleset_bind
;
291 list_add(&binding
->list
, &block
->binding_list
);
299 int mlxsw_sp_acl_block_unbind(struct mlxsw_sp
*mlxsw_sp
,
300 struct mlxsw_sp_acl_block
*block
,
301 struct mlxsw_sp_port
*mlxsw_sp_port
,
304 struct mlxsw_sp_acl_block_binding
*binding
;
306 binding
= mlxsw_sp_acl_block_lookup(block
, mlxsw_sp_port
, ingress
);
310 list_del(&binding
->list
);
312 if (mlxsw_sp_acl_ruleset_block_bound(block
))
313 mlxsw_sp_acl_ruleset_unbind(mlxsw_sp
, block
, binding
);
319 static struct mlxsw_sp_acl_ruleset
*
320 mlxsw_sp_acl_ruleset_create(struct mlxsw_sp
*mlxsw_sp
,
321 struct mlxsw_sp_acl_block
*block
, u32 chain_index
,
322 const struct mlxsw_sp_acl_profile_ops
*ops
)
324 struct mlxsw_sp_acl
*acl
= mlxsw_sp
->acl
;
325 struct mlxsw_sp_acl_ruleset
*ruleset
;
329 alloc_size
= sizeof(*ruleset
) + ops
->ruleset_priv_size
;
330 ruleset
= kzalloc(alloc_size
, GFP_KERNEL
);
332 return ERR_PTR(-ENOMEM
);
333 ruleset
->ref_count
= 1;
334 ruleset
->ht_key
.block
= block
;
335 ruleset
->ht_key
.chain_index
= chain_index
;
336 ruleset
->ht_key
.ops
= ops
;
338 err
= rhashtable_init(&ruleset
->rule_ht
, &mlxsw_sp_acl_rule_ht_params
);
340 goto err_rhashtable_init
;
342 err
= ops
->ruleset_add(mlxsw_sp
, acl
->priv
, ruleset
->priv
);
344 goto err_ops_ruleset_add
;
346 err
= rhashtable_insert_fast(&acl
->ruleset_ht
, &ruleset
->ht_node
,
347 mlxsw_sp_acl_ruleset_ht_params
);
354 ops
->ruleset_del(mlxsw_sp
, ruleset
->priv
);
356 rhashtable_destroy(&ruleset
->rule_ht
);
362 static void mlxsw_sp_acl_ruleset_destroy(struct mlxsw_sp
*mlxsw_sp
,
363 struct mlxsw_sp_acl_ruleset
*ruleset
)
365 const struct mlxsw_sp_acl_profile_ops
*ops
= ruleset
->ht_key
.ops
;
366 struct mlxsw_sp_acl
*acl
= mlxsw_sp
->acl
;
368 rhashtable_remove_fast(&acl
->ruleset_ht
, &ruleset
->ht_node
,
369 mlxsw_sp_acl_ruleset_ht_params
);
370 ops
->ruleset_del(mlxsw_sp
, ruleset
->priv
);
371 rhashtable_destroy(&ruleset
->rule_ht
);
375 static void mlxsw_sp_acl_ruleset_ref_inc(struct mlxsw_sp_acl_ruleset
*ruleset
)
377 ruleset
->ref_count
++;
380 static void mlxsw_sp_acl_ruleset_ref_dec(struct mlxsw_sp
*mlxsw_sp
,
381 struct mlxsw_sp_acl_ruleset
*ruleset
)
383 if (--ruleset
->ref_count
)
385 mlxsw_sp_acl_ruleset_destroy(mlxsw_sp
, ruleset
);
388 static struct mlxsw_sp_acl_ruleset
*
389 __mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp_acl
*acl
,
390 struct mlxsw_sp_acl_block
*block
, u32 chain_index
,
391 const struct mlxsw_sp_acl_profile_ops
*ops
)
393 struct mlxsw_sp_acl_ruleset_ht_key ht_key
;
395 memset(&ht_key
, 0, sizeof(ht_key
));
396 ht_key
.block
= block
;
397 ht_key
.chain_index
= chain_index
;
399 return rhashtable_lookup_fast(&acl
->ruleset_ht
, &ht_key
,
400 mlxsw_sp_acl_ruleset_ht_params
);
403 struct mlxsw_sp_acl_ruleset
*
404 mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp
*mlxsw_sp
,
405 struct mlxsw_sp_acl_block
*block
, u32 chain_index
,
406 enum mlxsw_sp_acl_profile profile
)
408 const struct mlxsw_sp_acl_profile_ops
*ops
;
409 struct mlxsw_sp_acl
*acl
= mlxsw_sp
->acl
;
410 struct mlxsw_sp_acl_ruleset
*ruleset
;
412 ops
= acl
->ops
->profile_ops(mlxsw_sp
, profile
);
414 return ERR_PTR(-EINVAL
);
415 ruleset
= __mlxsw_sp_acl_ruleset_lookup(acl
, block
, chain_index
, ops
);
417 return ERR_PTR(-ENOENT
);
421 struct mlxsw_sp_acl_ruleset
*
422 mlxsw_sp_acl_ruleset_get(struct mlxsw_sp
*mlxsw_sp
,
423 struct mlxsw_sp_acl_block
*block
, u32 chain_index
,
424 enum mlxsw_sp_acl_profile profile
)
426 const struct mlxsw_sp_acl_profile_ops
*ops
;
427 struct mlxsw_sp_acl
*acl
= mlxsw_sp
->acl
;
428 struct mlxsw_sp_acl_ruleset
*ruleset
;
430 ops
= acl
->ops
->profile_ops(mlxsw_sp
, profile
);
432 return ERR_PTR(-EINVAL
);
434 ruleset
= __mlxsw_sp_acl_ruleset_lookup(acl
, block
, chain_index
, ops
);
436 mlxsw_sp_acl_ruleset_ref_inc(ruleset
);
439 return mlxsw_sp_acl_ruleset_create(mlxsw_sp
, block
, chain_index
, ops
);
442 void mlxsw_sp_acl_ruleset_put(struct mlxsw_sp
*mlxsw_sp
,
443 struct mlxsw_sp_acl_ruleset
*ruleset
)
445 mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp
, ruleset
);
448 u16
mlxsw_sp_acl_ruleset_group_id(struct mlxsw_sp_acl_ruleset
*ruleset
)
450 const struct mlxsw_sp_acl_profile_ops
*ops
= ruleset
->ht_key
.ops
;
452 return ops
->ruleset_group_id(ruleset
->priv
);
455 struct mlxsw_sp_acl_rule_info
*
456 mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl
*acl
)
458 struct mlxsw_sp_acl_rule_info
*rulei
;
461 rulei
= kzalloc(sizeof(*rulei
), GFP_KERNEL
);
464 rulei
->act_block
= mlxsw_afa_block_create(acl
->mlxsw_sp
->afa
);
465 if (IS_ERR(rulei
->act_block
)) {
466 err
= PTR_ERR(rulei
->act_block
);
467 goto err_afa_block_create
;
471 err_afa_block_create
:
476 void mlxsw_sp_acl_rulei_destroy(struct mlxsw_sp_acl_rule_info
*rulei
)
478 mlxsw_afa_block_destroy(rulei
->act_block
);
482 int mlxsw_sp_acl_rulei_commit(struct mlxsw_sp_acl_rule_info
*rulei
)
484 return mlxsw_afa_block_commit(rulei
->act_block
);
487 void mlxsw_sp_acl_rulei_priority(struct mlxsw_sp_acl_rule_info
*rulei
,
488 unsigned int priority
)
490 rulei
->priority
= priority
;
493 void mlxsw_sp_acl_rulei_keymask_u32(struct mlxsw_sp_acl_rule_info
*rulei
,
494 enum mlxsw_afk_element element
,
495 u32 key_value
, u32 mask_value
)
497 mlxsw_afk_values_add_u32(&rulei
->values
, element
,
498 key_value
, mask_value
);
501 void mlxsw_sp_acl_rulei_keymask_buf(struct mlxsw_sp_acl_rule_info
*rulei
,
502 enum mlxsw_afk_element element
,
503 const char *key_value
,
504 const char *mask_value
, unsigned int len
)
506 mlxsw_afk_values_add_buf(&rulei
->values
, element
,
507 key_value
, mask_value
, len
);
510 int mlxsw_sp_acl_rulei_act_continue(struct mlxsw_sp_acl_rule_info
*rulei
)
512 return mlxsw_afa_block_continue(rulei
->act_block
);
515 int mlxsw_sp_acl_rulei_act_jump(struct mlxsw_sp_acl_rule_info
*rulei
,
518 return mlxsw_afa_block_jump(rulei
->act_block
, group_id
);
521 int mlxsw_sp_acl_rulei_act_terminate(struct mlxsw_sp_acl_rule_info
*rulei
)
523 return mlxsw_afa_block_terminate(rulei
->act_block
);
526 int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info
*rulei
)
528 return mlxsw_afa_block_append_drop(rulei
->act_block
);
531 int mlxsw_sp_acl_rulei_act_trap(struct mlxsw_sp_acl_rule_info
*rulei
)
533 return mlxsw_afa_block_append_trap(rulei
->act_block
,
537 int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp
*mlxsw_sp
,
538 struct mlxsw_sp_acl_rule_info
*rulei
,
539 struct net_device
*out_dev
)
541 struct mlxsw_sp_port
*mlxsw_sp_port
;
546 if (!mlxsw_sp_port_dev_check(out_dev
))
548 mlxsw_sp_port
= netdev_priv(out_dev
);
549 if (mlxsw_sp_port
->mlxsw_sp
!= mlxsw_sp
)
551 local_port
= mlxsw_sp_port
->local_port
;
554 /* If out_dev is NULL, the caller wants to
555 * set forward to ingress port.
560 return mlxsw_afa_block_append_fwd(rulei
->act_block
,
561 local_port
, in_port
);
564 int mlxsw_sp_acl_rulei_act_mirror(struct mlxsw_sp
*mlxsw_sp
,
565 struct mlxsw_sp_acl_rule_info
*rulei
,
566 struct mlxsw_sp_acl_block
*block
,
567 struct net_device
*out_dev
)
569 struct mlxsw_sp_acl_block_binding
*binding
;
570 struct mlxsw_sp_port
*in_port
;
572 if (!list_is_singular(&block
->binding_list
))
575 binding
= list_first_entry(&block
->binding_list
,
576 struct mlxsw_sp_acl_block_binding
, list
);
577 in_port
= binding
->mlxsw_sp_port
;
579 return mlxsw_afa_block_append_mirror(rulei
->act_block
,
585 int mlxsw_sp_acl_rulei_act_vlan(struct mlxsw_sp
*mlxsw_sp
,
586 struct mlxsw_sp_acl_rule_info
*rulei
,
587 u32 action
, u16 vid
, u16 proto
, u8 prio
)
591 if (action
== TCA_VLAN_ACT_MODIFY
) {
600 dev_err(mlxsw_sp
->bus_info
->dev
, "Unsupported VLAN protocol %#04x\n",
605 return mlxsw_afa_block_append_vlan_modify(rulei
->act_block
,
606 vid
, prio
, ethertype
);
608 dev_err(mlxsw_sp
->bus_info
->dev
, "Unsupported VLAN action\n");
613 int mlxsw_sp_acl_rulei_act_count(struct mlxsw_sp
*mlxsw_sp
,
614 struct mlxsw_sp_acl_rule_info
*rulei
)
616 return mlxsw_afa_block_append_counter(rulei
->act_block
,
617 &rulei
->counter_index
);
620 int mlxsw_sp_acl_rulei_act_fid_set(struct mlxsw_sp
*mlxsw_sp
,
621 struct mlxsw_sp_acl_rule_info
*rulei
,
624 return mlxsw_afa_block_append_fid_set(rulei
->act_block
, fid
);
627 struct mlxsw_sp_acl_rule
*
628 mlxsw_sp_acl_rule_create(struct mlxsw_sp
*mlxsw_sp
,
629 struct mlxsw_sp_acl_ruleset
*ruleset
,
630 unsigned long cookie
)
632 const struct mlxsw_sp_acl_profile_ops
*ops
= ruleset
->ht_key
.ops
;
633 struct mlxsw_sp_acl_rule
*rule
;
636 mlxsw_sp_acl_ruleset_ref_inc(ruleset
);
637 rule
= kzalloc(sizeof(*rule
) + ops
->rule_priv_size
, GFP_KERNEL
);
642 rule
->cookie
= cookie
;
643 rule
->ruleset
= ruleset
;
645 rule
->rulei
= mlxsw_sp_acl_rulei_create(mlxsw_sp
->acl
);
646 if (IS_ERR(rule
->rulei
)) {
647 err
= PTR_ERR(rule
->rulei
);
648 goto err_rulei_create
;
656 mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp
, ruleset
);
660 void mlxsw_sp_acl_rule_destroy(struct mlxsw_sp
*mlxsw_sp
,
661 struct mlxsw_sp_acl_rule
*rule
)
663 struct mlxsw_sp_acl_ruleset
*ruleset
= rule
->ruleset
;
665 mlxsw_sp_acl_rulei_destroy(rule
->rulei
);
667 mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp
, ruleset
);
670 int mlxsw_sp_acl_rule_add(struct mlxsw_sp
*mlxsw_sp
,
671 struct mlxsw_sp_acl_rule
*rule
)
673 struct mlxsw_sp_acl_ruleset
*ruleset
= rule
->ruleset
;
674 const struct mlxsw_sp_acl_profile_ops
*ops
= ruleset
->ht_key
.ops
;
677 err
= ops
->rule_add(mlxsw_sp
, ruleset
->priv
, rule
->priv
, rule
->rulei
);
681 err
= rhashtable_insert_fast(&ruleset
->rule_ht
, &rule
->ht_node
,
682 mlxsw_sp_acl_rule_ht_params
);
684 goto err_rhashtable_insert
;
686 if (!ruleset
->ht_key
.chain_index
&&
687 mlxsw_sp_acl_ruleset_is_singular(ruleset
)) {
688 /* We only need ruleset with chain index 0, the implicit
689 * one, to be directly bound to device. The rest of the
690 * rulesets are bound by "Goto action set".
692 err
= mlxsw_sp_acl_ruleset_block_bind(mlxsw_sp
, ruleset
,
693 ruleset
->ht_key
.block
);
695 goto err_ruleset_block_bind
;
698 list_add_tail(&rule
->list
, &mlxsw_sp
->acl
->rules
);
699 ruleset
->ht_key
.block
->rule_count
++;
702 err_ruleset_block_bind
:
703 rhashtable_remove_fast(&ruleset
->rule_ht
, &rule
->ht_node
,
704 mlxsw_sp_acl_rule_ht_params
);
705 err_rhashtable_insert
:
706 ops
->rule_del(mlxsw_sp
, rule
->priv
);
710 void mlxsw_sp_acl_rule_del(struct mlxsw_sp
*mlxsw_sp
,
711 struct mlxsw_sp_acl_rule
*rule
)
713 struct mlxsw_sp_acl_ruleset
*ruleset
= rule
->ruleset
;
714 const struct mlxsw_sp_acl_profile_ops
*ops
= ruleset
->ht_key
.ops
;
716 ruleset
->ht_key
.block
->rule_count
--;
717 list_del(&rule
->list
);
718 if (!ruleset
->ht_key
.chain_index
&&
719 mlxsw_sp_acl_ruleset_is_singular(ruleset
))
720 mlxsw_sp_acl_ruleset_block_unbind(mlxsw_sp
, ruleset
,
721 ruleset
->ht_key
.block
);
722 rhashtable_remove_fast(&ruleset
->rule_ht
, &rule
->ht_node
,
723 mlxsw_sp_acl_rule_ht_params
);
724 ops
->rule_del(mlxsw_sp
, rule
->priv
);
727 struct mlxsw_sp_acl_rule
*
728 mlxsw_sp_acl_rule_lookup(struct mlxsw_sp
*mlxsw_sp
,
729 struct mlxsw_sp_acl_ruleset
*ruleset
,
730 unsigned long cookie
)
732 return rhashtable_lookup_fast(&ruleset
->rule_ht
, &cookie
,
733 mlxsw_sp_acl_rule_ht_params
);
736 struct mlxsw_sp_acl_rule_info
*
737 mlxsw_sp_acl_rule_rulei(struct mlxsw_sp_acl_rule
*rule
)
742 static int mlxsw_sp_acl_rule_activity_update(struct mlxsw_sp
*mlxsw_sp
,
743 struct mlxsw_sp_acl_rule
*rule
)
745 struct mlxsw_sp_acl_ruleset
*ruleset
= rule
->ruleset
;
746 const struct mlxsw_sp_acl_profile_ops
*ops
= ruleset
->ht_key
.ops
;
750 err
= ops
->rule_activity_get(mlxsw_sp
, rule
->priv
, &active
);
754 rule
->last_used
= jiffies
;
758 static int mlxsw_sp_acl_rules_activity_update(struct mlxsw_sp_acl
*acl
)
760 struct mlxsw_sp_acl_rule
*rule
;
763 /* Protect internal structures from changes */
765 list_for_each_entry(rule
, &acl
->rules
, list
) {
766 err
= mlxsw_sp_acl_rule_activity_update(acl
->mlxsw_sp
,
769 goto err_rule_update
;
779 static void mlxsw_sp_acl_rule_activity_work_schedule(struct mlxsw_sp_acl
*acl
)
781 unsigned long interval
= acl
->rule_activity_update
.interval
;
783 mlxsw_core_schedule_dw(&acl
->rule_activity_update
.dw
,
784 msecs_to_jiffies(interval
));
787 static void mlxsw_sp_acl_rul_activity_update_work(struct work_struct
*work
)
789 struct mlxsw_sp_acl
*acl
= container_of(work
, struct mlxsw_sp_acl
,
790 rule_activity_update
.dw
.work
);
793 err
= mlxsw_sp_acl_rules_activity_update(acl
);
795 dev_err(acl
->mlxsw_sp
->bus_info
->dev
, "Could not update acl activity");
797 mlxsw_sp_acl_rule_activity_work_schedule(acl
);
800 int mlxsw_sp_acl_rule_get_stats(struct mlxsw_sp
*mlxsw_sp
,
801 struct mlxsw_sp_acl_rule
*rule
,
802 u64
*packets
, u64
*bytes
, u64
*last_use
)
805 struct mlxsw_sp_acl_rule_info
*rulei
;
810 rulei
= mlxsw_sp_acl_rule_rulei(rule
);
811 err
= mlxsw_sp_flow_counter_get(mlxsw_sp
, rulei
->counter_index
,
812 ¤t_packets
, ¤t_bytes
);
816 *packets
= current_packets
- rule
->last_packets
;
817 *bytes
= current_bytes
- rule
->last_bytes
;
818 *last_use
= rule
->last_used
;
820 rule
->last_bytes
= current_bytes
;
821 rule
->last_packets
= current_packets
;
826 int mlxsw_sp_acl_init(struct mlxsw_sp
*mlxsw_sp
)
828 const struct mlxsw_sp_acl_ops
*acl_ops
= &mlxsw_sp_acl_tcam_ops
;
829 struct mlxsw_sp_fid
*fid
;
830 struct mlxsw_sp_acl
*acl
;
833 acl
= kzalloc(sizeof(*acl
) + acl_ops
->priv_size
, GFP_KERNEL
);
837 acl
->mlxsw_sp
= mlxsw_sp
;
838 acl
->afk
= mlxsw_afk_create(MLXSW_CORE_RES_GET(mlxsw_sp
->core
,
841 MLXSW_SP_AFK_BLOCKS_COUNT
);
847 err
= rhashtable_init(&acl
->ruleset_ht
,
848 &mlxsw_sp_acl_ruleset_ht_params
);
850 goto err_rhashtable_init
;
852 fid
= mlxsw_sp_fid_dummy_get(mlxsw_sp
);
857 acl
->dummy_fid
= fid
;
859 INIT_LIST_HEAD(&acl
->rules
);
860 err
= acl_ops
->init(mlxsw_sp
, acl
->priv
);
862 goto err_acl_ops_init
;
866 /* Create the delayed work for the rule activity_update */
867 INIT_DELAYED_WORK(&acl
->rule_activity_update
.dw
,
868 mlxsw_sp_acl_rul_activity_update_work
);
869 acl
->rule_activity_update
.interval
= MLXSW_SP_ACL_RULE_ACTIVITY_UPDATE_PERIOD_MS
;
870 mlxsw_core_schedule_dw(&acl
->rule_activity_update
.dw
, 0);
874 mlxsw_sp_fid_put(fid
);
876 rhashtable_destroy(&acl
->ruleset_ht
);
878 mlxsw_afk_destroy(acl
->afk
);
884 void mlxsw_sp_acl_fini(struct mlxsw_sp
*mlxsw_sp
)
886 struct mlxsw_sp_acl
*acl
= mlxsw_sp
->acl
;
887 const struct mlxsw_sp_acl_ops
*acl_ops
= acl
->ops
;
889 cancel_delayed_work_sync(&mlxsw_sp
->acl
->rule_activity_update
.dw
);
890 acl_ops
->fini(mlxsw_sp
, acl
->priv
);
891 WARN_ON(!list_empty(&acl
->rules
));
892 mlxsw_sp_fid_put(acl
->dummy_fid
);
893 rhashtable_destroy(&acl
->ruleset_ht
);
894 mlxsw_afk_destroy(acl
->afk
);