]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
spi-imx: Implements handling of the SPI_READY mode flag.
[mirror_ubuntu-bionic-kernel.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_tc.c
1 /*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <net/flow_dissector.h>
34 #include <net/sch_generic.h>
35 #include <net/pkt_cls.h>
36 #include <net/tc_act/tc_gact.h>
37 #include <net/tc_act/tc_skbedit.h>
38 #include <linux/mlx5/fs.h>
39 #include <linux/mlx5/device.h>
40 #include <linux/rhashtable.h>
41 #include <net/switchdev.h>
42 #include <net/tc_act/tc_mirred.h>
43 #include <net/tc_act/tc_vlan.h>
44 #include <net/tc_act/tc_tunnel_key.h>
45 #include <net/vxlan.h>
46 #include "en.h"
47 #include "en_tc.h"
48 #include "eswitch.h"
49 #include "vxlan.h"
50
51 struct mlx5e_tc_flow {
52 struct rhash_head node;
53 u64 cookie;
54 struct mlx5_flow_handle *rule;
55 struct list_head encap; /* flows sharing the same encap */
56 struct mlx5_esw_flow_attr *attr;
57 };
58
59 enum {
60 MLX5_HEADER_TYPE_VXLAN = 0x0,
61 MLX5_HEADER_TYPE_NVGRE = 0x1,
62 };
63
64 #define MLX5E_TC_TABLE_NUM_ENTRIES 1024
65 #define MLX5E_TC_TABLE_NUM_GROUPS 4
66
67 static struct mlx5_flow_handle *
68 mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
69 struct mlx5_flow_spec *spec,
70 u32 action, u32 flow_tag)
71 {
72 struct mlx5_core_dev *dev = priv->mdev;
73 struct mlx5_flow_destination dest = { 0 };
74 struct mlx5_flow_act flow_act = {
75 .action = action,
76 .flow_tag = flow_tag,
77 .encap_id = 0,
78 };
79 struct mlx5_fc *counter = NULL;
80 struct mlx5_flow_handle *rule;
81 bool table_created = false;
82
83 if (action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
84 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
85 dest.ft = priv->fs.vlan.ft.t;
86 } else if (action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
87 counter = mlx5_fc_create(dev, true);
88 if (IS_ERR(counter))
89 return ERR_CAST(counter);
90
91 dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
92 dest.counter = counter;
93 }
94
95 if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
96 priv->fs.tc.t =
97 mlx5_create_auto_grouped_flow_table(priv->fs.ns,
98 MLX5E_TC_PRIO,
99 MLX5E_TC_TABLE_NUM_ENTRIES,
100 MLX5E_TC_TABLE_NUM_GROUPS,
101 0, 0);
102 if (IS_ERR(priv->fs.tc.t)) {
103 netdev_err(priv->netdev,
104 "Failed to create tc offload table\n");
105 rule = ERR_CAST(priv->fs.tc.t);
106 goto err_create_ft;
107 }
108
109 table_created = true;
110 }
111
112 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
113 rule = mlx5_add_flow_rules(priv->fs.tc.t, spec, &flow_act, &dest, 1);
114
115 if (IS_ERR(rule))
116 goto err_add_rule;
117
118 return rule;
119
120 err_add_rule:
121 if (table_created) {
122 mlx5_destroy_flow_table(priv->fs.tc.t);
123 priv->fs.tc.t = NULL;
124 }
125 err_create_ft:
126 mlx5_fc_destroy(dev, counter);
127
128 return rule;
129 }
130
131 static struct mlx5_flow_handle *
132 mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
133 struct mlx5_flow_spec *spec,
134 struct mlx5_esw_flow_attr *attr)
135 {
136 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
137 int err;
138
139 err = mlx5_eswitch_add_vlan_action(esw, attr);
140 if (err)
141 return ERR_PTR(err);
142
143 return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
144 }
145
146 static void mlx5e_detach_encap(struct mlx5e_priv *priv,
147 struct mlx5e_tc_flow *flow) {
148 struct list_head *next = flow->encap.next;
149
150 list_del(&flow->encap);
151 if (list_empty(next)) {
152 struct mlx5_encap_entry *e;
153
154 e = list_entry(next, struct mlx5_encap_entry, flows);
155 if (e->n) {
156 mlx5_encap_dealloc(priv->mdev, e->encap_id);
157 neigh_release(e->n);
158 }
159 hlist_del_rcu(&e->encap_hlist);
160 kfree(e);
161 }
162 }
163
164 /* we get here also when setting rule to the FW failed, etc. It means that the
165 * flow rule itself might not exist, but some offloading related to the actions
166 * should be cleaned.
167 */
168 static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
169 struct mlx5e_tc_flow *flow)
170 {
171 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
172 struct mlx5_fc *counter = NULL;
173
174 if (!IS_ERR(flow->rule)) {
175 counter = mlx5_flow_rule_counter(flow->rule);
176 mlx5_del_flow_rules(flow->rule);
177 mlx5_fc_destroy(priv->mdev, counter);
178 }
179
180 if (esw && esw->mode == SRIOV_OFFLOADS) {
181 mlx5_eswitch_del_vlan_action(esw, flow->attr);
182 if (flow->attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
183 mlx5e_detach_encap(priv, flow);
184 }
185
186 if (!mlx5e_tc_num_filters(priv) && (priv->fs.tc.t)) {
187 mlx5_destroy_flow_table(priv->fs.tc.t);
188 priv->fs.tc.t = NULL;
189 }
190 }
191
192 static void parse_vxlan_attr(struct mlx5_flow_spec *spec,
193 struct tc_cls_flower_offload *f)
194 {
195 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
196 outer_headers);
197 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
198 outer_headers);
199 void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
200 misc_parameters);
201 void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
202 misc_parameters);
203
204 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_protocol);
205 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
206
207 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
208 struct flow_dissector_key_keyid *key =
209 skb_flow_dissector_target(f->dissector,
210 FLOW_DISSECTOR_KEY_ENC_KEYID,
211 f->key);
212 struct flow_dissector_key_keyid *mask =
213 skb_flow_dissector_target(f->dissector,
214 FLOW_DISSECTOR_KEY_ENC_KEYID,
215 f->mask);
216 MLX5_SET(fte_match_set_misc, misc_c, vxlan_vni,
217 be32_to_cpu(mask->keyid));
218 MLX5_SET(fte_match_set_misc, misc_v, vxlan_vni,
219 be32_to_cpu(key->keyid));
220 }
221 }
222
223 static int parse_tunnel_attr(struct mlx5e_priv *priv,
224 struct mlx5_flow_spec *spec,
225 struct tc_cls_flower_offload *f)
226 {
227 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
228 outer_headers);
229 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
230 outer_headers);
231
232 struct flow_dissector_key_control *enc_control =
233 skb_flow_dissector_target(f->dissector,
234 FLOW_DISSECTOR_KEY_ENC_CONTROL,
235 f->key);
236
237 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
238 struct flow_dissector_key_ports *key =
239 skb_flow_dissector_target(f->dissector,
240 FLOW_DISSECTOR_KEY_ENC_PORTS,
241 f->key);
242 struct flow_dissector_key_ports *mask =
243 skb_flow_dissector_target(f->dissector,
244 FLOW_DISSECTOR_KEY_ENC_PORTS,
245 f->mask);
246
247 /* Full udp dst port must be given */
248 if (memchr_inv(&mask->dst, 0xff, sizeof(mask->dst)))
249 goto vxlan_match_offload_err;
250
251 if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->dst)) &&
252 MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap))
253 parse_vxlan_attr(spec, f);
254 else {
255 netdev_warn(priv->netdev,
256 "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->dst));
257 return -EOPNOTSUPP;
258 }
259
260 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
261 udp_dport, ntohs(mask->dst));
262 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
263 udp_dport, ntohs(key->dst));
264
265 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
266 udp_sport, ntohs(mask->src));
267 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
268 udp_sport, ntohs(key->src));
269 } else { /* udp dst port must be given */
270 vxlan_match_offload_err:
271 netdev_warn(priv->netdev,
272 "IP tunnel decap offload supported only for vxlan, must set UDP dport\n");
273 return -EOPNOTSUPP;
274 }
275
276 if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
277 struct flow_dissector_key_ipv4_addrs *key =
278 skb_flow_dissector_target(f->dissector,
279 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
280 f->key);
281 struct flow_dissector_key_ipv4_addrs *mask =
282 skb_flow_dissector_target(f->dissector,
283 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
284 f->mask);
285 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
286 src_ipv4_src_ipv6.ipv4_layout.ipv4,
287 ntohl(mask->src));
288 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
289 src_ipv4_src_ipv6.ipv4_layout.ipv4,
290 ntohl(key->src));
291
292 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
293 dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
294 ntohl(mask->dst));
295 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
296 dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
297 ntohl(key->dst));
298
299 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
300 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IP);
301 } else if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
302 struct flow_dissector_key_ipv6_addrs *key =
303 skb_flow_dissector_target(f->dissector,
304 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
305 f->key);
306 struct flow_dissector_key_ipv6_addrs *mask =
307 skb_flow_dissector_target(f->dissector,
308 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
309 f->mask);
310
311 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
312 src_ipv4_src_ipv6.ipv6_layout.ipv6),
313 &mask->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
314 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
315 src_ipv4_src_ipv6.ipv6_layout.ipv6),
316 &key->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
317
318 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
319 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
320 &mask->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
321 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
322 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
323 &key->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
324
325 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
326 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IPV6);
327 }
328
329 /* Enforce DMAC when offloading incoming tunneled flows.
330 * Flow counters require a match on the DMAC.
331 */
332 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_47_16);
333 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_15_0);
334 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
335 dmac_47_16), priv->netdev->dev_addr);
336
337 /* let software handle IP fragments */
338 MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
339 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0);
340
341 return 0;
342 }
343
344 static int __parse_cls_flower(struct mlx5e_priv *priv,
345 struct mlx5_flow_spec *spec,
346 struct tc_cls_flower_offload *f,
347 u8 *min_inline)
348 {
349 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
350 outer_headers);
351 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
352 outer_headers);
353 u16 addr_type = 0;
354 u8 ip_proto = 0;
355
356 *min_inline = MLX5_INLINE_MODE_L2;
357
358 if (f->dissector->used_keys &
359 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
360 BIT(FLOW_DISSECTOR_KEY_BASIC) |
361 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
362 BIT(FLOW_DISSECTOR_KEY_VLAN) |
363 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
364 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
365 BIT(FLOW_DISSECTOR_KEY_PORTS) |
366 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
367 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
368 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
369 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) |
370 BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL))) {
371 netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
372 f->dissector->used_keys);
373 return -EOPNOTSUPP;
374 }
375
376 if ((dissector_uses_key(f->dissector,
377 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) ||
378 dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID) ||
379 dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) &&
380 dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
381 struct flow_dissector_key_control *key =
382 skb_flow_dissector_target(f->dissector,
383 FLOW_DISSECTOR_KEY_ENC_CONTROL,
384 f->key);
385 switch (key->addr_type) {
386 case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
387 case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
388 if (parse_tunnel_attr(priv, spec, f))
389 return -EOPNOTSUPP;
390 break;
391 default:
392 return -EOPNOTSUPP;
393 }
394
395 /* In decap flow, header pointers should point to the inner
396 * headers, outer header were already set by parse_tunnel_attr
397 */
398 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
399 inner_headers);
400 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
401 inner_headers);
402 }
403
404 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
405 struct flow_dissector_key_control *key =
406 skb_flow_dissector_target(f->dissector,
407 FLOW_DISSECTOR_KEY_CONTROL,
408 f->key);
409
410 struct flow_dissector_key_control *mask =
411 skb_flow_dissector_target(f->dissector,
412 FLOW_DISSECTOR_KEY_CONTROL,
413 f->mask);
414 addr_type = key->addr_type;
415
416 if (mask->flags & FLOW_DIS_IS_FRAGMENT) {
417 MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
418 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
419 key->flags & FLOW_DIS_IS_FRAGMENT);
420
421 /* the HW doesn't need L3 inline to match on frag=no */
422 if (key->flags & FLOW_DIS_IS_FRAGMENT)
423 *min_inline = MLX5_INLINE_MODE_IP;
424 }
425 }
426
427 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
428 struct flow_dissector_key_basic *key =
429 skb_flow_dissector_target(f->dissector,
430 FLOW_DISSECTOR_KEY_BASIC,
431 f->key);
432 struct flow_dissector_key_basic *mask =
433 skb_flow_dissector_target(f->dissector,
434 FLOW_DISSECTOR_KEY_BASIC,
435 f->mask);
436 ip_proto = key->ip_proto;
437
438 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
439 ntohs(mask->n_proto));
440 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
441 ntohs(key->n_proto));
442
443 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
444 mask->ip_proto);
445 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
446 key->ip_proto);
447
448 if (mask->ip_proto)
449 *min_inline = MLX5_INLINE_MODE_IP;
450 }
451
452 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
453 struct flow_dissector_key_eth_addrs *key =
454 skb_flow_dissector_target(f->dissector,
455 FLOW_DISSECTOR_KEY_ETH_ADDRS,
456 f->key);
457 struct flow_dissector_key_eth_addrs *mask =
458 skb_flow_dissector_target(f->dissector,
459 FLOW_DISSECTOR_KEY_ETH_ADDRS,
460 f->mask);
461
462 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
463 dmac_47_16),
464 mask->dst);
465 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
466 dmac_47_16),
467 key->dst);
468
469 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
470 smac_47_16),
471 mask->src);
472 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
473 smac_47_16),
474 key->src);
475 }
476
477 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
478 struct flow_dissector_key_vlan *key =
479 skb_flow_dissector_target(f->dissector,
480 FLOW_DISSECTOR_KEY_VLAN,
481 f->key);
482 struct flow_dissector_key_vlan *mask =
483 skb_flow_dissector_target(f->dissector,
484 FLOW_DISSECTOR_KEY_VLAN,
485 f->mask);
486 if (mask->vlan_id || mask->vlan_priority) {
487 MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
488 MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
489
490 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, mask->vlan_id);
491 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, key->vlan_id);
492
493 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio, mask->vlan_priority);
494 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, key->vlan_priority);
495 }
496 }
497
498 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
499 struct flow_dissector_key_ipv4_addrs *key =
500 skb_flow_dissector_target(f->dissector,
501 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
502 f->key);
503 struct flow_dissector_key_ipv4_addrs *mask =
504 skb_flow_dissector_target(f->dissector,
505 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
506 f->mask);
507
508 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
509 src_ipv4_src_ipv6.ipv4_layout.ipv4),
510 &mask->src, sizeof(mask->src));
511 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
512 src_ipv4_src_ipv6.ipv4_layout.ipv4),
513 &key->src, sizeof(key->src));
514 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
515 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
516 &mask->dst, sizeof(mask->dst));
517 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
518 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
519 &key->dst, sizeof(key->dst));
520
521 if (mask->src || mask->dst)
522 *min_inline = MLX5_INLINE_MODE_IP;
523 }
524
525 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
526 struct flow_dissector_key_ipv6_addrs *key =
527 skb_flow_dissector_target(f->dissector,
528 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
529 f->key);
530 struct flow_dissector_key_ipv6_addrs *mask =
531 skb_flow_dissector_target(f->dissector,
532 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
533 f->mask);
534
535 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
536 src_ipv4_src_ipv6.ipv6_layout.ipv6),
537 &mask->src, sizeof(mask->src));
538 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
539 src_ipv4_src_ipv6.ipv6_layout.ipv6),
540 &key->src, sizeof(key->src));
541
542 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
543 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
544 &mask->dst, sizeof(mask->dst));
545 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
546 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
547 &key->dst, sizeof(key->dst));
548
549 if (ipv6_addr_type(&mask->src) != IPV6_ADDR_ANY ||
550 ipv6_addr_type(&mask->dst) != IPV6_ADDR_ANY)
551 *min_inline = MLX5_INLINE_MODE_IP;
552 }
553
554 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
555 struct flow_dissector_key_ports *key =
556 skb_flow_dissector_target(f->dissector,
557 FLOW_DISSECTOR_KEY_PORTS,
558 f->key);
559 struct flow_dissector_key_ports *mask =
560 skb_flow_dissector_target(f->dissector,
561 FLOW_DISSECTOR_KEY_PORTS,
562 f->mask);
563 switch (ip_proto) {
564 case IPPROTO_TCP:
565 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
566 tcp_sport, ntohs(mask->src));
567 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
568 tcp_sport, ntohs(key->src));
569
570 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
571 tcp_dport, ntohs(mask->dst));
572 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
573 tcp_dport, ntohs(key->dst));
574 break;
575
576 case IPPROTO_UDP:
577 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
578 udp_sport, ntohs(mask->src));
579 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
580 udp_sport, ntohs(key->src));
581
582 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
583 udp_dport, ntohs(mask->dst));
584 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
585 udp_dport, ntohs(key->dst));
586 break;
587 default:
588 netdev_err(priv->netdev,
589 "Only UDP and TCP transport are supported\n");
590 return -EINVAL;
591 }
592
593 if (mask->src || mask->dst)
594 *min_inline = MLX5_INLINE_MODE_TCP_UDP;
595 }
596
597 return 0;
598 }
599
600 static int parse_cls_flower(struct mlx5e_priv *priv,
601 struct mlx5_flow_spec *spec,
602 struct tc_cls_flower_offload *f)
603 {
604 struct mlx5_core_dev *dev = priv->mdev;
605 struct mlx5_eswitch *esw = dev->priv.eswitch;
606 struct mlx5_eswitch_rep *rep = priv->ppriv;
607 u8 min_inline;
608 int err;
609
610 err = __parse_cls_flower(priv, spec, f, &min_inline);
611
612 if (!err && esw->mode == SRIOV_OFFLOADS &&
613 rep->vport != FDB_UPLINK_VPORT) {
614 if (min_inline > esw->offloads.inline_mode) {
615 netdev_warn(priv->netdev,
616 "Flow is not offloaded due to min inline setting, required %d actual %d\n",
617 min_inline, esw->offloads.inline_mode);
618 return -EOPNOTSUPP;
619 }
620 }
621
622 return err;
623 }
624
625 static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
626 u32 *action, u32 *flow_tag)
627 {
628 const struct tc_action *a;
629 LIST_HEAD(actions);
630
631 if (tc_no_actions(exts))
632 return -EINVAL;
633
634 *flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
635 *action = 0;
636
637 tcf_exts_to_list(exts, &actions);
638 list_for_each_entry(a, &actions, list) {
639 /* Only support a single action per rule */
640 if (*action)
641 return -EINVAL;
642
643 if (is_tcf_gact_shot(a)) {
644 *action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
645 if (MLX5_CAP_FLOWTABLE(priv->mdev,
646 flow_table_properties_nic_receive.flow_counter))
647 *action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
648 continue;
649 }
650
651 if (is_tcf_skbedit_mark(a)) {
652 u32 mark = tcf_skbedit_mark(a);
653
654 if (mark & ~MLX5E_TC_FLOW_ID_MASK) {
655 netdev_warn(priv->netdev, "Bad flow mark - only 16 bit is supported: 0x%x\n",
656 mark);
657 return -EINVAL;
658 }
659
660 *flow_tag = mark;
661 *action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
662 continue;
663 }
664
665 return -EINVAL;
666 }
667
668 return 0;
669 }
670
671 static inline int cmp_encap_info(struct ip_tunnel_key *a,
672 struct ip_tunnel_key *b)
673 {
674 return memcmp(a, b, sizeof(*a));
675 }
676
677 static inline int hash_encap_info(struct ip_tunnel_key *key)
678 {
679 return jhash(key, sizeof(*key), 0);
680 }
681
682 static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
683 struct net_device *mirred_dev,
684 struct net_device **out_dev,
685 struct flowi4 *fl4,
686 struct neighbour **out_n,
687 int *out_ttl)
688 {
689 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
690 struct rtable *rt;
691 struct neighbour *n = NULL;
692
693 #if IS_ENABLED(CONFIG_INET)
694 int ret;
695
696 rt = ip_route_output_key(dev_net(mirred_dev), fl4);
697 ret = PTR_ERR_OR_ZERO(rt);
698 if (ret)
699 return ret;
700 #else
701 return -EOPNOTSUPP;
702 #endif
703 /* if the egress device isn't on the same HW e-switch, we use the uplink */
704 if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev))
705 *out_dev = mlx5_eswitch_get_uplink_netdev(esw);
706 else
707 *out_dev = rt->dst.dev;
708
709 *out_ttl = ip4_dst_hoplimit(&rt->dst);
710 n = dst_neigh_lookup(&rt->dst, &fl4->daddr);
711 ip_rt_put(rt);
712 if (!n)
713 return -ENOMEM;
714
715 *out_n = n;
716 return 0;
717 }
718
719 static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
720 struct net_device *mirred_dev,
721 struct net_device **out_dev,
722 struct flowi6 *fl6,
723 struct neighbour **out_n,
724 int *out_ttl)
725 {
726 struct neighbour *n = NULL;
727 struct dst_entry *dst;
728
729 #if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
730 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
731 int ret;
732
733 dst = ip6_route_output(dev_net(mirred_dev), NULL, fl6);
734 ret = dst->error;
735 if (ret) {
736 dst_release(dst);
737 return ret;
738 }
739
740 *out_ttl = ip6_dst_hoplimit(dst);
741
742 /* if the egress device isn't on the same HW e-switch, we use the uplink */
743 if (!switchdev_port_same_parent_id(priv->netdev, dst->dev))
744 *out_dev = mlx5_eswitch_get_uplink_netdev(esw);
745 else
746 *out_dev = dst->dev;
747 #else
748 return -EOPNOTSUPP;
749 #endif
750
751 n = dst_neigh_lookup(dst, &fl6->daddr);
752 dst_release(dst);
753 if (!n)
754 return -ENOMEM;
755
756 *out_n = n;
757 return 0;
758 }
759
760 static int gen_vxlan_header_ipv4(struct net_device *out_dev,
761 char buf[],
762 unsigned char h_dest[ETH_ALEN],
763 int ttl,
764 __be32 daddr,
765 __be32 saddr,
766 __be16 udp_dst_port,
767 __be32 vx_vni)
768 {
769 int encap_size = VXLAN_HLEN + sizeof(struct iphdr) + ETH_HLEN;
770 struct ethhdr *eth = (struct ethhdr *)buf;
771 struct iphdr *ip = (struct iphdr *)((char *)eth + sizeof(struct ethhdr));
772 struct udphdr *udp = (struct udphdr *)((char *)ip + sizeof(struct iphdr));
773 struct vxlanhdr *vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr));
774
775 memset(buf, 0, encap_size);
776
777 ether_addr_copy(eth->h_dest, h_dest);
778 ether_addr_copy(eth->h_source, out_dev->dev_addr);
779 eth->h_proto = htons(ETH_P_IP);
780
781 ip->daddr = daddr;
782 ip->saddr = saddr;
783
784 ip->ttl = ttl;
785 ip->protocol = IPPROTO_UDP;
786 ip->version = 0x4;
787 ip->ihl = 0x5;
788
789 udp->dest = udp_dst_port;
790 vxh->vx_flags = VXLAN_HF_VNI;
791 vxh->vx_vni = vxlan_vni_field(vx_vni);
792
793 return encap_size;
794 }
795
796 static int gen_vxlan_header_ipv6(struct net_device *out_dev,
797 char buf[],
798 unsigned char h_dest[ETH_ALEN],
799 int ttl,
800 struct in6_addr *daddr,
801 struct in6_addr *saddr,
802 __be16 udp_dst_port,
803 __be32 vx_vni)
804 {
805 int encap_size = VXLAN_HLEN + sizeof(struct ipv6hdr) + ETH_HLEN;
806 struct ethhdr *eth = (struct ethhdr *)buf;
807 struct ipv6hdr *ip6h = (struct ipv6hdr *)((char *)eth + sizeof(struct ethhdr));
808 struct udphdr *udp = (struct udphdr *)((char *)ip6h + sizeof(struct ipv6hdr));
809 struct vxlanhdr *vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr));
810
811 memset(buf, 0, encap_size);
812
813 ether_addr_copy(eth->h_dest, h_dest);
814 ether_addr_copy(eth->h_source, out_dev->dev_addr);
815 eth->h_proto = htons(ETH_P_IPV6);
816
817 ip6_flow_hdr(ip6h, 0, 0);
818 /* the HW fills up ipv6 payload len */
819 ip6h->nexthdr = IPPROTO_UDP;
820 ip6h->hop_limit = ttl;
821 ip6h->daddr = *daddr;
822 ip6h->saddr = *saddr;
823
824 udp->dest = udp_dst_port;
825 vxh->vx_flags = VXLAN_HF_VNI;
826 vxh->vx_vni = vxlan_vni_field(vx_vni);
827
828 return encap_size;
829 }
830
831 static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
832 struct net_device *mirred_dev,
833 struct mlx5_encap_entry *e,
834 struct net_device **out_dev)
835 {
836 int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
837 struct ip_tunnel_key *tun_key = &e->tun_info.key;
838 int encap_size, ttl, err;
839 struct neighbour *n = NULL;
840 struct flowi4 fl4 = {};
841 char *encap_header;
842
843 encap_header = kzalloc(max_encap_size, GFP_KERNEL);
844 if (!encap_header)
845 return -ENOMEM;
846
847 switch (e->tunnel_type) {
848 case MLX5_HEADER_TYPE_VXLAN:
849 fl4.flowi4_proto = IPPROTO_UDP;
850 fl4.fl4_dport = tun_key->tp_dst;
851 break;
852 default:
853 err = -EOPNOTSUPP;
854 goto out;
855 }
856 fl4.flowi4_tos = tun_key->tos;
857 fl4.daddr = tun_key->u.ipv4.dst;
858 fl4.saddr = tun_key->u.ipv4.src;
859
860 err = mlx5e_route_lookup_ipv4(priv, mirred_dev, out_dev,
861 &fl4, &n, &ttl);
862 if (err)
863 goto out;
864
865 if (!(n->nud_state & NUD_VALID)) {
866 pr_warn("%s: can't offload, neighbour to %pI4 invalid\n", __func__, &fl4.daddr);
867 err = -EOPNOTSUPP;
868 goto out;
869 }
870
871 e->n = n;
872 e->out_dev = *out_dev;
873
874 neigh_ha_snapshot(e->h_dest, n, *out_dev);
875
876 switch (e->tunnel_type) {
877 case MLX5_HEADER_TYPE_VXLAN:
878 encap_size = gen_vxlan_header_ipv4(*out_dev, encap_header,
879 e->h_dest, ttl,
880 fl4.daddr,
881 fl4.saddr, tun_key->tp_dst,
882 tunnel_id_to_key32(tun_key->tun_id));
883 break;
884 default:
885 err = -EOPNOTSUPP;
886 goto out;
887 }
888
889 err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
890 encap_size, encap_header, &e->encap_id);
891 out:
892 if (err && n)
893 neigh_release(n);
894 kfree(encap_header);
895 return err;
896 }
897
898 static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
899 struct net_device *mirred_dev,
900 struct mlx5_encap_entry *e,
901 struct net_device **out_dev)
902
903 {
904 int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
905 struct ip_tunnel_key *tun_key = &e->tun_info.key;
906 int encap_size, err, ttl = 0;
907 struct neighbour *n = NULL;
908 struct flowi6 fl6 = {};
909 char *encap_header;
910
911 encap_header = kzalloc(max_encap_size, GFP_KERNEL);
912 if (!encap_header)
913 return -ENOMEM;
914
915 switch (e->tunnel_type) {
916 case MLX5_HEADER_TYPE_VXLAN:
917 fl6.flowi6_proto = IPPROTO_UDP;
918 fl6.fl6_dport = tun_key->tp_dst;
919 break;
920 default:
921 err = -EOPNOTSUPP;
922 goto out;
923 }
924
925 fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tun_key->tos), tun_key->label);
926 fl6.daddr = tun_key->u.ipv6.dst;
927 fl6.saddr = tun_key->u.ipv6.src;
928
929 err = mlx5e_route_lookup_ipv6(priv, mirred_dev, out_dev,
930 &fl6, &n, &ttl);
931 if (err)
932 goto out;
933
934 if (!(n->nud_state & NUD_VALID)) {
935 pr_warn("%s: can't offload, neighbour to %pI6 invalid\n", __func__, &fl6.daddr);
936 err = -EOPNOTSUPP;
937 goto out;
938 }
939
940 e->n = n;
941 e->out_dev = *out_dev;
942
943 neigh_ha_snapshot(e->h_dest, n, *out_dev);
944
945 switch (e->tunnel_type) {
946 case MLX5_HEADER_TYPE_VXLAN:
947 encap_size = gen_vxlan_header_ipv6(*out_dev, encap_header,
948 e->h_dest, ttl,
949 &fl6.daddr,
950 &fl6.saddr, tun_key->tp_dst,
951 tunnel_id_to_key32(tun_key->tun_id));
952 break;
953 default:
954 err = -EOPNOTSUPP;
955 goto out;
956 }
957
958 err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
959 encap_size, encap_header, &e->encap_id);
960 out:
961 if (err && n)
962 neigh_release(n);
963 kfree(encap_header);
964 return err;
965 }
966
967 static int mlx5e_attach_encap(struct mlx5e_priv *priv,
968 struct ip_tunnel_info *tun_info,
969 struct net_device *mirred_dev,
970 struct mlx5_esw_flow_attr *attr)
971 {
972 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
973 unsigned short family = ip_tunnel_info_af(tun_info);
974 struct ip_tunnel_key *key = &tun_info->key;
975 struct mlx5_encap_entry *e;
976 struct net_device *out_dev;
977 int tunnel_type, err = -EOPNOTSUPP;
978 uintptr_t hash_key;
979 bool found = false;
980
981 /* udp dst port must be set */
982 if (!memchr_inv(&key->tp_dst, 0, sizeof(key->tp_dst)))
983 goto vxlan_encap_offload_err;
984
985 /* setting udp src port isn't supported */
986 if (memchr_inv(&key->tp_src, 0, sizeof(key->tp_src))) {
987 vxlan_encap_offload_err:
988 netdev_warn(priv->netdev,
989 "must set udp dst port and not set udp src port\n");
990 return -EOPNOTSUPP;
991 }
992
993 if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->tp_dst)) &&
994 MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) {
995 tunnel_type = MLX5_HEADER_TYPE_VXLAN;
996 } else {
997 netdev_warn(priv->netdev,
998 "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->tp_dst));
999 return -EOPNOTSUPP;
1000 }
1001
1002 hash_key = hash_encap_info(key);
1003
1004 hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
1005 encap_hlist, hash_key) {
1006 if (!cmp_encap_info(&e->tun_info.key, key)) {
1007 found = true;
1008 break;
1009 }
1010 }
1011
1012 if (found) {
1013 attr->encap = e;
1014 return 0;
1015 }
1016
1017 e = kzalloc(sizeof(*e), GFP_KERNEL);
1018 if (!e)
1019 return -ENOMEM;
1020
1021 e->tun_info = *tun_info;
1022 e->tunnel_type = tunnel_type;
1023 INIT_LIST_HEAD(&e->flows);
1024
1025 if (family == AF_INET)
1026 err = mlx5e_create_encap_header_ipv4(priv, mirred_dev, e, &out_dev);
1027 else if (family == AF_INET6)
1028 err = mlx5e_create_encap_header_ipv6(priv, mirred_dev, e, &out_dev);
1029
1030 if (err)
1031 goto out_err;
1032
1033 attr->encap = e;
1034 hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key);
1035
1036 return err;
1037
1038 out_err:
1039 kfree(e);
1040 return err;
1041 }
1042
1043 static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
1044 struct mlx5e_tc_flow *flow)
1045 {
1046 struct mlx5_esw_flow_attr *attr = flow->attr;
1047 struct ip_tunnel_info *info = NULL;
1048 const struct tc_action *a;
1049 LIST_HEAD(actions);
1050 bool encap = false;
1051 int err;
1052
1053 if (tc_no_actions(exts))
1054 return -EINVAL;
1055
1056 memset(attr, 0, sizeof(*attr));
1057 attr->in_rep = priv->ppriv;
1058
1059 tcf_exts_to_list(exts, &actions);
1060 list_for_each_entry(a, &actions, list) {
1061 if (is_tcf_gact_shot(a)) {
1062 attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
1063 MLX5_FLOW_CONTEXT_ACTION_COUNT;
1064 continue;
1065 }
1066
1067 if (is_tcf_mirred_egress_redirect(a)) {
1068 int ifindex = tcf_mirred_ifindex(a);
1069 struct net_device *out_dev;
1070 struct mlx5e_priv *out_priv;
1071
1072 out_dev = __dev_get_by_index(dev_net(priv->netdev), ifindex);
1073
1074 if (switchdev_port_same_parent_id(priv->netdev,
1075 out_dev)) {
1076 attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
1077 MLX5_FLOW_CONTEXT_ACTION_COUNT;
1078 out_priv = netdev_priv(out_dev);
1079 attr->out_rep = out_priv->ppriv;
1080 } else if (encap) {
1081 err = mlx5e_attach_encap(priv, info,
1082 out_dev, attr);
1083 if (err)
1084 return err;
1085 list_add(&flow->encap, &attr->encap->flows);
1086 attr->action |= MLX5_FLOW_CONTEXT_ACTION_ENCAP |
1087 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
1088 MLX5_FLOW_CONTEXT_ACTION_COUNT;
1089 out_priv = netdev_priv(attr->encap->out_dev);
1090 attr->out_rep = out_priv->ppriv;
1091 } else {
1092 pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
1093 priv->netdev->name, out_dev->name);
1094 return -EINVAL;
1095 }
1096 continue;
1097 }
1098
1099 if (is_tcf_tunnel_set(a)) {
1100 info = tcf_tunnel_info(a);
1101 if (info)
1102 encap = true;
1103 else
1104 return -EOPNOTSUPP;
1105 continue;
1106 }
1107
1108 if (is_tcf_vlan(a)) {
1109 if (tcf_vlan_action(a) == VLAN_F_POP) {
1110 attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
1111 } else if (tcf_vlan_action(a) == VLAN_F_PUSH) {
1112 if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q))
1113 return -EOPNOTSUPP;
1114
1115 attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
1116 attr->vlan = tcf_vlan_push_vid(a);
1117 }
1118 continue;
1119 }
1120
1121 if (is_tcf_tunnel_release(a)) {
1122 attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
1123 continue;
1124 }
1125
1126 return -EINVAL;
1127 }
1128 return 0;
1129 }
1130
1131 int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
1132 struct tc_cls_flower_offload *f)
1133 {
1134 struct mlx5e_tc_table *tc = &priv->fs.tc;
1135 int err = 0;
1136 bool fdb_flow = false;
1137 u32 flow_tag, action;
1138 struct mlx5e_tc_flow *flow;
1139 struct mlx5_flow_spec *spec;
1140 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1141
1142 if (esw && esw->mode == SRIOV_OFFLOADS)
1143 fdb_flow = true;
1144
1145 if (fdb_flow)
1146 flow = kzalloc(sizeof(*flow) +
1147 sizeof(struct mlx5_esw_flow_attr),
1148 GFP_KERNEL);
1149 else
1150 flow = kzalloc(sizeof(*flow), GFP_KERNEL);
1151
1152 spec = mlx5_vzalloc(sizeof(*spec));
1153 if (!spec || !flow) {
1154 err = -ENOMEM;
1155 goto err_free;
1156 }
1157
1158 flow->cookie = f->cookie;
1159
1160 err = parse_cls_flower(priv, spec, f);
1161 if (err < 0)
1162 goto err_free;
1163
1164 if (fdb_flow) {
1165 flow->attr = (struct mlx5_esw_flow_attr *)(flow + 1);
1166 err = parse_tc_fdb_actions(priv, f->exts, flow);
1167 if (err < 0)
1168 goto err_free;
1169 flow->rule = mlx5e_tc_add_fdb_flow(priv, spec, flow->attr);
1170 } else {
1171 err = parse_tc_nic_actions(priv, f->exts, &action, &flow_tag);
1172 if (err < 0)
1173 goto err_free;
1174 flow->rule = mlx5e_tc_add_nic_flow(priv, spec, action, flow_tag);
1175 }
1176
1177 if (IS_ERR(flow->rule)) {
1178 err = PTR_ERR(flow->rule);
1179 goto err_del_rule;
1180 }
1181
1182 err = rhashtable_insert_fast(&tc->ht, &flow->node,
1183 tc->ht_params);
1184 if (err)
1185 goto err_del_rule;
1186
1187 goto out;
1188
1189 err_del_rule:
1190 mlx5e_tc_del_flow(priv, flow);
1191
1192 err_free:
1193 kfree(flow);
1194 out:
1195 kvfree(spec);
1196 return err;
1197 }
1198
1199 int mlx5e_delete_flower(struct mlx5e_priv *priv,
1200 struct tc_cls_flower_offload *f)
1201 {
1202 struct mlx5e_tc_flow *flow;
1203 struct mlx5e_tc_table *tc = &priv->fs.tc;
1204
1205 flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
1206 tc->ht_params);
1207 if (!flow)
1208 return -EINVAL;
1209
1210 rhashtable_remove_fast(&tc->ht, &flow->node, tc->ht_params);
1211
1212 mlx5e_tc_del_flow(priv, flow);
1213
1214
1215 kfree(flow);
1216
1217 return 0;
1218 }
1219
1220 int mlx5e_stats_flower(struct mlx5e_priv *priv,
1221 struct tc_cls_flower_offload *f)
1222 {
1223 struct mlx5e_tc_table *tc = &priv->fs.tc;
1224 struct mlx5e_tc_flow *flow;
1225 struct tc_action *a;
1226 struct mlx5_fc *counter;
1227 LIST_HEAD(actions);
1228 u64 bytes;
1229 u64 packets;
1230 u64 lastuse;
1231
1232 flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
1233 tc->ht_params);
1234 if (!flow)
1235 return -EINVAL;
1236
1237 counter = mlx5_flow_rule_counter(flow->rule);
1238 if (!counter)
1239 return 0;
1240
1241 mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
1242
1243 preempt_disable();
1244
1245 tcf_exts_to_list(f->exts, &actions);
1246 list_for_each_entry(a, &actions, list)
1247 tcf_action_stats_update(a, bytes, packets, lastuse);
1248
1249 preempt_enable();
1250
1251 return 0;
1252 }
1253
1254 static const struct rhashtable_params mlx5e_tc_flow_ht_params = {
1255 .head_offset = offsetof(struct mlx5e_tc_flow, node),
1256 .key_offset = offsetof(struct mlx5e_tc_flow, cookie),
1257 .key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie),
1258 .automatic_shrinking = true,
1259 };
1260
1261 int mlx5e_tc_init(struct mlx5e_priv *priv)
1262 {
1263 struct mlx5e_tc_table *tc = &priv->fs.tc;
1264
1265 tc->ht_params = mlx5e_tc_flow_ht_params;
1266 return rhashtable_init(&tc->ht, &tc->ht_params);
1267 }
1268
1269 static void _mlx5e_tc_del_flow(void *ptr, void *arg)
1270 {
1271 struct mlx5e_tc_flow *flow = ptr;
1272 struct mlx5e_priv *priv = arg;
1273
1274 mlx5e_tc_del_flow(priv, flow);
1275 kfree(flow);
1276 }
1277
1278 void mlx5e_tc_cleanup(struct mlx5e_priv *priv)
1279 {
1280 struct mlx5e_tc_table *tc = &priv->fs.tc;
1281
1282 rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, priv);
1283
1284 if (!IS_ERR_OR_NULL(tc->t)) {
1285 mlx5_destroy_flow_table(tc->t);
1286 tc->t = NULL;
1287 }
1288 }