]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blob - drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
Merge remote-tracking branches 'regulator/fix/resume' and 'regulator/fix/stm32-vfrefb...
[mirror_ubuntu-eoan-kernel.git] / drivers / net / ethernet / mellanox / mlxsw / spectrum_flower.c
1 /*
2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
3 * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the names of the copyright holders nor the names of its
15 * contributors may be used to endorse or promote products derived from
16 * this software without specific prior written permission.
17 *
18 * Alternatively, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") version 2 as published by the Free
20 * Software Foundation.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 * POSSIBILITY OF SUCH DAMAGE.
33 */
34
35 #include <linux/kernel.h>
36 #include <linux/errno.h>
37 #include <linux/netdevice.h>
38 #include <net/net_namespace.h>
39 #include <net/flow_dissector.h>
40 #include <net/pkt_cls.h>
41 #include <net/tc_act/tc_gact.h>
42 #include <net/tc_act/tc_mirred.h>
43 #include <net/tc_act/tc_vlan.h>
44
45 #include "spectrum.h"
46 #include "core_acl_flex_keys.h"
47
48 static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
49 struct mlxsw_sp_acl_block *block,
50 struct mlxsw_sp_acl_rule_info *rulei,
51 struct tcf_exts *exts)
52 {
53 const struct tc_action *a;
54 LIST_HEAD(actions);
55 int err;
56
57 if (!tcf_exts_has_actions(exts))
58 return 0;
59
60 /* Count action is inserted first */
61 err = mlxsw_sp_acl_rulei_act_count(mlxsw_sp, rulei);
62 if (err)
63 return err;
64
65 tcf_exts_to_list(exts, &actions);
66 list_for_each_entry(a, &actions, list) {
67 if (is_tcf_gact_ok(a)) {
68 err = mlxsw_sp_acl_rulei_act_continue(rulei);
69 if (err)
70 return err;
71 } else if (is_tcf_gact_shot(a)) {
72 err = mlxsw_sp_acl_rulei_act_drop(rulei);
73 if (err)
74 return err;
75 } else if (is_tcf_gact_trap(a)) {
76 err = mlxsw_sp_acl_rulei_act_trap(rulei);
77 if (err)
78 return err;
79 } else if (is_tcf_gact_goto_chain(a)) {
80 u32 chain_index = tcf_gact_goto_chain_index(a);
81 struct mlxsw_sp_acl_ruleset *ruleset;
82 u16 group_id;
83
84 ruleset = mlxsw_sp_acl_ruleset_lookup(mlxsw_sp, block,
85 chain_index,
86 MLXSW_SP_ACL_PROFILE_FLOWER);
87 if (IS_ERR(ruleset))
88 return PTR_ERR(ruleset);
89
90 group_id = mlxsw_sp_acl_ruleset_group_id(ruleset);
91 err = mlxsw_sp_acl_rulei_act_jump(rulei, group_id);
92 if (err)
93 return err;
94 } else if (is_tcf_mirred_egress_redirect(a)) {
95 struct net_device *out_dev;
96 struct mlxsw_sp_fid *fid;
97 u16 fid_index;
98
99 fid = mlxsw_sp_acl_dummy_fid(mlxsw_sp);
100 fid_index = mlxsw_sp_fid_index(fid);
101 err = mlxsw_sp_acl_rulei_act_fid_set(mlxsw_sp, rulei,
102 fid_index);
103 if (err)
104 return err;
105
106 out_dev = tcf_mirred_dev(a);
107 err = mlxsw_sp_acl_rulei_act_fwd(mlxsw_sp, rulei,
108 out_dev);
109 if (err)
110 return err;
111 } else if (is_tcf_mirred_egress_mirror(a)) {
112 struct net_device *out_dev = tcf_mirred_dev(a);
113
114 err = mlxsw_sp_acl_rulei_act_mirror(mlxsw_sp, rulei,
115 block, out_dev);
116 if (err)
117 return err;
118 } else if (is_tcf_vlan(a)) {
119 u16 proto = be16_to_cpu(tcf_vlan_push_proto(a));
120 u32 action = tcf_vlan_action(a);
121 u8 prio = tcf_vlan_push_prio(a);
122 u16 vid = tcf_vlan_push_vid(a);
123
124 return mlxsw_sp_acl_rulei_act_vlan(mlxsw_sp, rulei,
125 action, vid,
126 proto, prio);
127 } else {
128 dev_err(mlxsw_sp->bus_info->dev, "Unsupported action\n");
129 return -EOPNOTSUPP;
130 }
131 }
132 return 0;
133 }
134
135 static void mlxsw_sp_flower_parse_ipv4(struct mlxsw_sp_acl_rule_info *rulei,
136 struct tc_cls_flower_offload *f)
137 {
138 struct flow_dissector_key_ipv4_addrs *key =
139 skb_flow_dissector_target(f->dissector,
140 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
141 f->key);
142 struct flow_dissector_key_ipv4_addrs *mask =
143 skb_flow_dissector_target(f->dissector,
144 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
145 f->mask);
146
147 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_SRC_IP4,
148 ntohl(key->src), ntohl(mask->src));
149 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_DST_IP4,
150 ntohl(key->dst), ntohl(mask->dst));
151 }
152
153 static void mlxsw_sp_flower_parse_ipv6(struct mlxsw_sp_acl_rule_info *rulei,
154 struct tc_cls_flower_offload *f)
155 {
156 struct flow_dissector_key_ipv6_addrs *key =
157 skb_flow_dissector_target(f->dissector,
158 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
159 f->key);
160 struct flow_dissector_key_ipv6_addrs *mask =
161 skb_flow_dissector_target(f->dissector,
162 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
163 f->mask);
164 size_t addr_half_size = sizeof(key->src) / 2;
165
166 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP6_HI,
167 &key->src.s6_addr[0],
168 &mask->src.s6_addr[0],
169 addr_half_size);
170 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP6_LO,
171 &key->src.s6_addr[addr_half_size],
172 &mask->src.s6_addr[addr_half_size],
173 addr_half_size);
174 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP6_HI,
175 &key->dst.s6_addr[0],
176 &mask->dst.s6_addr[0],
177 addr_half_size);
178 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP6_LO,
179 &key->dst.s6_addr[addr_half_size],
180 &mask->dst.s6_addr[addr_half_size],
181 addr_half_size);
182 }
183
184 static int mlxsw_sp_flower_parse_ports(struct mlxsw_sp *mlxsw_sp,
185 struct mlxsw_sp_acl_rule_info *rulei,
186 struct tc_cls_flower_offload *f,
187 u8 ip_proto)
188 {
189 struct flow_dissector_key_ports *key, *mask;
190
191 if (!dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS))
192 return 0;
193
194 if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) {
195 dev_err(mlxsw_sp->bus_info->dev, "Only UDP and TCP keys are supported\n");
196 return -EINVAL;
197 }
198
199 key = skb_flow_dissector_target(f->dissector,
200 FLOW_DISSECTOR_KEY_PORTS,
201 f->key);
202 mask = skb_flow_dissector_target(f->dissector,
203 FLOW_DISSECTOR_KEY_PORTS,
204 f->mask);
205 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_DST_L4_PORT,
206 ntohs(key->dst), ntohs(mask->dst));
207 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_SRC_L4_PORT,
208 ntohs(key->src), ntohs(mask->src));
209 return 0;
210 }
211
212 static int mlxsw_sp_flower_parse_tcp(struct mlxsw_sp *mlxsw_sp,
213 struct mlxsw_sp_acl_rule_info *rulei,
214 struct tc_cls_flower_offload *f,
215 u8 ip_proto)
216 {
217 struct flow_dissector_key_tcp *key, *mask;
218
219 if (!dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_TCP))
220 return 0;
221
222 if (ip_proto != IPPROTO_TCP) {
223 dev_err(mlxsw_sp->bus_info->dev, "TCP keys supported only for TCP\n");
224 return -EINVAL;
225 }
226
227 key = skb_flow_dissector_target(f->dissector,
228 FLOW_DISSECTOR_KEY_TCP,
229 f->key);
230 mask = skb_flow_dissector_target(f->dissector,
231 FLOW_DISSECTOR_KEY_TCP,
232 f->mask);
233 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_TCP_FLAGS,
234 ntohs(key->flags), ntohs(mask->flags));
235 return 0;
236 }
237
238 static int mlxsw_sp_flower_parse_ip(struct mlxsw_sp *mlxsw_sp,
239 struct mlxsw_sp_acl_rule_info *rulei,
240 struct tc_cls_flower_offload *f,
241 u16 n_proto)
242 {
243 struct flow_dissector_key_ip *key, *mask;
244
245 if (!dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_IP))
246 return 0;
247
248 if (n_proto != ETH_P_IP && n_proto != ETH_P_IPV6) {
249 dev_err(mlxsw_sp->bus_info->dev, "IP keys supported only for IPv4/6\n");
250 return -EINVAL;
251 }
252
253 key = skb_flow_dissector_target(f->dissector,
254 FLOW_DISSECTOR_KEY_IP,
255 f->key);
256 mask = skb_flow_dissector_target(f->dissector,
257 FLOW_DISSECTOR_KEY_IP,
258 f->mask);
259 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_TTL_,
260 key->ttl, mask->ttl);
261
262 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_ECN,
263 key->tos & 0x3, mask->tos & 0x3);
264
265 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_DSCP,
266 key->tos >> 6, mask->tos >> 6);
267
268 return 0;
269 }
270
271 static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
272 struct mlxsw_sp_acl_block *block,
273 struct mlxsw_sp_acl_rule_info *rulei,
274 struct tc_cls_flower_offload *f)
275 {
276 u16 n_proto_mask = 0;
277 u16 n_proto_key = 0;
278 u16 addr_type = 0;
279 u8 ip_proto = 0;
280 int err;
281
282 if (f->dissector->used_keys &
283 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
284 BIT(FLOW_DISSECTOR_KEY_BASIC) |
285 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
286 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
287 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
288 BIT(FLOW_DISSECTOR_KEY_PORTS) |
289 BIT(FLOW_DISSECTOR_KEY_TCP) |
290 BIT(FLOW_DISSECTOR_KEY_IP) |
291 BIT(FLOW_DISSECTOR_KEY_VLAN))) {
292 dev_err(mlxsw_sp->bus_info->dev, "Unsupported key\n");
293 return -EOPNOTSUPP;
294 }
295
296 mlxsw_sp_acl_rulei_priority(rulei, f->common.prio);
297
298 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
299 struct flow_dissector_key_control *key =
300 skb_flow_dissector_target(f->dissector,
301 FLOW_DISSECTOR_KEY_CONTROL,
302 f->key);
303 addr_type = key->addr_type;
304 }
305
306 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
307 struct flow_dissector_key_basic *key =
308 skb_flow_dissector_target(f->dissector,
309 FLOW_DISSECTOR_KEY_BASIC,
310 f->key);
311 struct flow_dissector_key_basic *mask =
312 skb_flow_dissector_target(f->dissector,
313 FLOW_DISSECTOR_KEY_BASIC,
314 f->mask);
315 n_proto_key = ntohs(key->n_proto);
316 n_proto_mask = ntohs(mask->n_proto);
317
318 if (n_proto_key == ETH_P_ALL) {
319 n_proto_key = 0;
320 n_proto_mask = 0;
321 }
322 mlxsw_sp_acl_rulei_keymask_u32(rulei,
323 MLXSW_AFK_ELEMENT_ETHERTYPE,
324 n_proto_key, n_proto_mask);
325
326 ip_proto = key->ip_proto;
327 mlxsw_sp_acl_rulei_keymask_u32(rulei,
328 MLXSW_AFK_ELEMENT_IP_PROTO,
329 key->ip_proto, mask->ip_proto);
330 }
331
332 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
333 struct flow_dissector_key_eth_addrs *key =
334 skb_flow_dissector_target(f->dissector,
335 FLOW_DISSECTOR_KEY_ETH_ADDRS,
336 f->key);
337 struct flow_dissector_key_eth_addrs *mask =
338 skb_flow_dissector_target(f->dissector,
339 FLOW_DISSECTOR_KEY_ETH_ADDRS,
340 f->mask);
341
342 mlxsw_sp_acl_rulei_keymask_buf(rulei,
343 MLXSW_AFK_ELEMENT_DMAC,
344 key->dst, mask->dst,
345 sizeof(key->dst));
346 mlxsw_sp_acl_rulei_keymask_buf(rulei,
347 MLXSW_AFK_ELEMENT_SMAC,
348 key->src, mask->src,
349 sizeof(key->src));
350 }
351
352 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
353 struct flow_dissector_key_vlan *key =
354 skb_flow_dissector_target(f->dissector,
355 FLOW_DISSECTOR_KEY_VLAN,
356 f->key);
357 struct flow_dissector_key_vlan *mask =
358 skb_flow_dissector_target(f->dissector,
359 FLOW_DISSECTOR_KEY_VLAN,
360 f->mask);
361 if (mask->vlan_id != 0)
362 mlxsw_sp_acl_rulei_keymask_u32(rulei,
363 MLXSW_AFK_ELEMENT_VID,
364 key->vlan_id,
365 mask->vlan_id);
366 if (mask->vlan_priority != 0)
367 mlxsw_sp_acl_rulei_keymask_u32(rulei,
368 MLXSW_AFK_ELEMENT_PCP,
369 key->vlan_priority,
370 mask->vlan_priority);
371 }
372
373 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS)
374 mlxsw_sp_flower_parse_ipv4(rulei, f);
375
376 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS)
377 mlxsw_sp_flower_parse_ipv6(rulei, f);
378
379 err = mlxsw_sp_flower_parse_ports(mlxsw_sp, rulei, f, ip_proto);
380 if (err)
381 return err;
382 err = mlxsw_sp_flower_parse_tcp(mlxsw_sp, rulei, f, ip_proto);
383 if (err)
384 return err;
385
386 err = mlxsw_sp_flower_parse_ip(mlxsw_sp, rulei, f, n_proto_key & n_proto_mask);
387 if (err)
388 return err;
389
390 return mlxsw_sp_flower_parse_actions(mlxsw_sp, block, rulei, f->exts);
391 }
392
393 int mlxsw_sp_flower_replace(struct mlxsw_sp *mlxsw_sp,
394 struct mlxsw_sp_acl_block *block,
395 struct tc_cls_flower_offload *f)
396 {
397 struct mlxsw_sp_acl_rule_info *rulei;
398 struct mlxsw_sp_acl_ruleset *ruleset;
399 struct mlxsw_sp_acl_rule *rule;
400 int err;
401
402 ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
403 f->common.chain_index,
404 MLXSW_SP_ACL_PROFILE_FLOWER);
405 if (IS_ERR(ruleset))
406 return PTR_ERR(ruleset);
407
408 rule = mlxsw_sp_acl_rule_create(mlxsw_sp, ruleset, f->cookie);
409 if (IS_ERR(rule)) {
410 err = PTR_ERR(rule);
411 goto err_rule_create;
412 }
413
414 rulei = mlxsw_sp_acl_rule_rulei(rule);
415 err = mlxsw_sp_flower_parse(mlxsw_sp, block, rulei, f);
416 if (err)
417 goto err_flower_parse;
418
419 err = mlxsw_sp_acl_rulei_commit(rulei);
420 if (err)
421 goto err_rulei_commit;
422
423 err = mlxsw_sp_acl_rule_add(mlxsw_sp, rule);
424 if (err)
425 goto err_rule_add;
426
427 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
428 return 0;
429
430 err_rule_add:
431 err_rulei_commit:
432 err_flower_parse:
433 mlxsw_sp_acl_rule_destroy(mlxsw_sp, rule);
434 err_rule_create:
435 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
436 return err;
437 }
438
439 void mlxsw_sp_flower_destroy(struct mlxsw_sp *mlxsw_sp,
440 struct mlxsw_sp_acl_block *block,
441 struct tc_cls_flower_offload *f)
442 {
443 struct mlxsw_sp_acl_ruleset *ruleset;
444 struct mlxsw_sp_acl_rule *rule;
445
446 ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
447 f->common.chain_index,
448 MLXSW_SP_ACL_PROFILE_FLOWER);
449 if (IS_ERR(ruleset))
450 return;
451
452 rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset, f->cookie);
453 if (rule) {
454 mlxsw_sp_acl_rule_del(mlxsw_sp, rule);
455 mlxsw_sp_acl_rule_destroy(mlxsw_sp, rule);
456 }
457
458 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
459 }
460
461 int mlxsw_sp_flower_stats(struct mlxsw_sp *mlxsw_sp,
462 struct mlxsw_sp_acl_block *block,
463 struct tc_cls_flower_offload *f)
464 {
465 struct mlxsw_sp_acl_ruleset *ruleset;
466 struct mlxsw_sp_acl_rule *rule;
467 u64 packets;
468 u64 lastuse;
469 u64 bytes;
470 int err;
471
472 ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
473 f->common.chain_index,
474 MLXSW_SP_ACL_PROFILE_FLOWER);
475 if (WARN_ON(IS_ERR(ruleset)))
476 return -EINVAL;
477
478 rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset, f->cookie);
479 if (!rule)
480 return -EINVAL;
481
482 err = mlxsw_sp_acl_rule_get_stats(mlxsw_sp, rule, &packets, &bytes,
483 &lastuse);
484 if (err)
485 goto err_rule_get_stats;
486
487 tcf_exts_stats_update(f->exts, bytes, packets, lastuse);
488
489 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
490 return 0;
491
492 err_rule_get_stats:
493 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
494 return err;
495 }