]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blame - drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
Merge branch '10GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next...
[mirror_ubuntu-focal-kernel.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_tc.c
CommitLineData
e8f887ac
AV
1/*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
e3a2b7ed 33#include <net/flow_dissector.h>
3f7d0eb4 34#include <net/sch_generic.h>
e3a2b7ed
AV
35#include <net/pkt_cls.h>
36#include <net/tc_act/tc_gact.h>
12185a9f 37#include <net/tc_act/tc_skbedit.h>
e8f887ac
AV
38#include <linux/mlx5/fs.h>
39#include <linux/mlx5/device.h>
40#include <linux/rhashtable.h>
03a9d11e
OG
41#include <net/switchdev.h>
42#include <net/tc_act/tc_mirred.h>
776b12b6 43#include <net/tc_act/tc_vlan.h>
bbd00f7e 44#include <net/tc_act/tc_tunnel_key.h>
d79b6df6 45#include <net/tc_act/tc_pedit.h>
26c02749 46#include <net/tc_act/tc_csum.h>
a54e20b4 47#include <net/vxlan.h>
f6dfb4c3 48#include <net/arp.h>
e8f887ac 49#include "en.h"
1d447a39 50#include "en_rep.h"
232c0013 51#include "en_tc.h"
03a9d11e 52#include "eswitch.h"
bbd00f7e 53#include "vxlan.h"
e8f887ac 54
3bc4b7bf
OG
55struct mlx5_nic_flow_attr {
56 u32 action;
57 u32 flow_tag;
2f4fe4ca 58 u32 mod_hdr_id;
3bc4b7bf
OG
59};
60
65ba8fb7
OG
61enum {
62 MLX5E_TC_FLOW_ESWITCH = BIT(0),
3bc4b7bf 63 MLX5E_TC_FLOW_NIC = BIT(1),
0b67a38f 64 MLX5E_TC_FLOW_OFFLOADED = BIT(2),
65ba8fb7
OG
65};
66
e8f887ac
AV
67struct mlx5e_tc_flow {
68 struct rhash_head node;
69 u64 cookie;
65ba8fb7 70 u8 flags;
74491de9 71 struct mlx5_flow_handle *rule;
a54e20b4 72 struct list_head encap; /* flows sharing the same encap */
3bc4b7bf
OG
73 union {
74 struct mlx5_esw_flow_attr esw_attr[0];
75 struct mlx5_nic_flow_attr nic_attr[0];
76 };
e8f887ac
AV
77};
78
17091853
OG
79struct mlx5e_tc_flow_parse_attr {
80 struct mlx5_flow_spec spec;
d79b6df6
OG
81 int num_mod_hdr_actions;
82 void *mod_hdr_actions;
17091853
OG
83};
84
a54e20b4
HHZ
85enum {
86 MLX5_HEADER_TYPE_VXLAN = 0x0,
87 MLX5_HEADER_TYPE_NVGRE = 0x1,
88};
89
acff797c
MG
90#define MLX5E_TC_TABLE_NUM_ENTRIES 1024
91#define MLX5E_TC_TABLE_NUM_GROUPS 4
e8f887ac 92
74491de9
MB
93static struct mlx5_flow_handle *
94mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
17091853 95 struct mlx5e_tc_flow_parse_attr *parse_attr,
aa0cbbae 96 struct mlx5e_tc_flow *flow)
e8f887ac 97{
aa0cbbae 98 struct mlx5_nic_flow_attr *attr = flow->nic_attr;
aad7e08d 99 struct mlx5_core_dev *dev = priv->mdev;
aa0cbbae 100 struct mlx5_flow_destination dest = {};
66958ed9 101 struct mlx5_flow_act flow_act = {
3bc4b7bf
OG
102 .action = attr->action,
103 .flow_tag = attr->flow_tag,
66958ed9
HHZ
104 .encap_id = 0,
105 };
aad7e08d 106 struct mlx5_fc *counter = NULL;
74491de9 107 struct mlx5_flow_handle *rule;
e8f887ac 108 bool table_created = false;
2f4fe4ca 109 int err;
e8f887ac 110
3bc4b7bf 111 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
aad7e08d
AV
112 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
113 dest.ft = priv->fs.vlan.ft.t;
3bc4b7bf 114 } else if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
aad7e08d
AV
115 counter = mlx5_fc_create(dev, true);
116 if (IS_ERR(counter))
117 return ERR_CAST(counter);
118
119 dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
120 dest.counter = counter;
121 }
122
2f4fe4ca
OG
123 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
124 err = mlx5_modify_header_alloc(dev, MLX5_FLOW_NAMESPACE_KERNEL,
125 parse_attr->num_mod_hdr_actions,
126 parse_attr->mod_hdr_actions,
127 &attr->mod_hdr_id);
d7e75a32 128 flow_act.modify_id = attr->mod_hdr_id;
2f4fe4ca
OG
129 kfree(parse_attr->mod_hdr_actions);
130 if (err) {
131 rule = ERR_PTR(err);
132 goto err_create_mod_hdr_id;
133 }
134 }
135
acff797c
MG
136 if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
137 priv->fs.tc.t =
138 mlx5_create_auto_grouped_flow_table(priv->fs.ns,
139 MLX5E_TC_PRIO,
140 MLX5E_TC_TABLE_NUM_ENTRIES,
141 MLX5E_TC_TABLE_NUM_GROUPS,
c9f1b073 142 0, 0);
acff797c 143 if (IS_ERR(priv->fs.tc.t)) {
e8f887ac
AV
144 netdev_err(priv->netdev,
145 "Failed to create tc offload table\n");
aad7e08d
AV
146 rule = ERR_CAST(priv->fs.tc.t);
147 goto err_create_ft;
e8f887ac
AV
148 }
149
150 table_created = true;
151 }
152
17091853
OG
153 parse_attr->spec.match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
154 rule = mlx5_add_flow_rules(priv->fs.tc.t, &parse_attr->spec,
155 &flow_act, &dest, 1);
aad7e08d
AV
156
157 if (IS_ERR(rule))
158 goto err_add_rule;
159
160 return rule;
e8f887ac 161
aad7e08d
AV
162err_add_rule:
163 if (table_created) {
acff797c
MG
164 mlx5_destroy_flow_table(priv->fs.tc.t);
165 priv->fs.tc.t = NULL;
e8f887ac 166 }
aad7e08d 167err_create_ft:
2f4fe4ca
OG
168 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
169 mlx5_modify_header_dealloc(priv->mdev,
170 attr->mod_hdr_id);
171err_create_mod_hdr_id:
aad7e08d 172 mlx5_fc_destroy(dev, counter);
e8f887ac
AV
173
174 return rule;
175}
176
d85cdccb
OG
177static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
178 struct mlx5e_tc_flow *flow)
179{
180 struct mlx5_fc *counter = NULL;
181
aa0cbbae
OG
182 counter = mlx5_flow_rule_counter(flow->rule);
183 mlx5_del_flow_rules(flow->rule);
184 mlx5_fc_destroy(priv->mdev, counter);
d85cdccb
OG
185
186 if (!mlx5e_tc_num_filters(priv) && (priv->fs.tc.t)) {
187 mlx5_destroy_flow_table(priv->fs.tc.t);
188 priv->fs.tc.t = NULL;
189 }
2f4fe4ca
OG
190
191 if (flow->nic_attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
192 mlx5_modify_header_dealloc(priv->mdev,
193 flow->nic_attr->mod_hdr_id);
d85cdccb
OG
194}
195
aa0cbbae
OG
196static void mlx5e_detach_encap(struct mlx5e_priv *priv,
197 struct mlx5e_tc_flow *flow);
198
74491de9
MB
199static struct mlx5_flow_handle *
200mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
17091853 201 struct mlx5e_tc_flow_parse_attr *parse_attr,
aa0cbbae 202 struct mlx5e_tc_flow *flow)
adb4c123
OG
203{
204 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
aa0cbbae
OG
205 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
206 struct mlx5_flow_handle *rule;
8b32580d
OG
207 int err;
208
209 err = mlx5_eswitch_add_vlan_action(esw, attr);
aa0cbbae
OG
210 if (err) {
211 rule = ERR_PTR(err);
212 goto err_add_vlan;
213 }
adb4c123 214
d7e75a32
OG
215 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
216 err = mlx5_modify_header_alloc(priv->mdev, MLX5_FLOW_NAMESPACE_FDB,
217 parse_attr->num_mod_hdr_actions,
218 parse_attr->mod_hdr_actions,
219 &attr->mod_hdr_id);
220 kfree(parse_attr->mod_hdr_actions);
221 if (err) {
222 rule = ERR_PTR(err);
223 goto err_mod_hdr;
224 }
225 }
226
aa0cbbae
OG
227 rule = mlx5_eswitch_add_offloaded_rule(esw, &parse_attr->spec, attr);
228 if (IS_ERR(rule))
229 goto err_add_rule;
adb4c123 230
aa0cbbae
OG
231 return rule;
232
233err_add_rule:
d7e75a32
OG
234 if (flow->esw_attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
235 mlx5_modify_header_dealloc(priv->mdev,
236 attr->mod_hdr_id);
237err_mod_hdr:
aa0cbbae
OG
238 mlx5_eswitch_del_vlan_action(esw, attr);
239err_add_vlan:
240 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
241 mlx5e_detach_encap(priv, flow);
aa0cbbae
OG
242 return rule;
243}
d85cdccb
OG
244
245static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
246 struct mlx5e_tc_flow *flow)
247{
248 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
d7e75a32 249 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
d85cdccb 250
232c0013
HHZ
251 if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
252 flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED;
0b67a38f 253 mlx5_eswitch_del_offloaded_rule(esw, flow->rule, flow->esw_attr);
232c0013 254 }
d85cdccb 255
ecf5bb79 256 mlx5_eswitch_del_vlan_action(esw, flow->esw_attr);
d85cdccb 257
232c0013 258 if (flow->esw_attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP) {
d85cdccb 259 mlx5e_detach_encap(priv, flow);
232c0013
HHZ
260 kvfree(flow->esw_attr->parse_attr);
261 }
d7e75a32
OG
262
263 if (flow->esw_attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
264 mlx5_modify_header_dealloc(priv->mdev,
265 attr->mod_hdr_id);
d85cdccb
OG
266}
267
232c0013
HHZ
268void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
269 struct mlx5e_encap_entry *e)
270{
271 struct mlx5e_tc_flow *flow;
272 int err;
273
274 err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
275 e->encap_size, e->encap_header,
276 &e->encap_id);
277 if (err) {
278 mlx5_core_warn(priv->mdev, "Failed to offload cached encapsulation header, %d\n",
279 err);
280 return;
281 }
282 e->flags |= MLX5_ENCAP_ENTRY_VALID;
f6dfb4c3 283 mlx5e_rep_queue_neigh_stats_work(priv);
232c0013
HHZ
284
285 list_for_each_entry(flow, &e->flows, encap) {
286 flow->esw_attr->encap_id = e->encap_id;
287 flow->rule = mlx5e_tc_add_fdb_flow(priv,
288 flow->esw_attr->parse_attr,
289 flow);
290 if (IS_ERR(flow->rule)) {
291 err = PTR_ERR(flow->rule);
292 mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n",
293 err);
294 continue;
295 }
296 flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
297 }
298}
299
300void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
301 struct mlx5e_encap_entry *e)
302{
303 struct mlx5e_tc_flow *flow;
304 struct mlx5_fc *counter;
305
306 list_for_each_entry(flow, &e->flows, encap) {
307 if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
308 flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED;
309 counter = mlx5_flow_rule_counter(flow->rule);
310 mlx5_del_flow_rules(flow->rule);
311 mlx5_fc_destroy(priv->mdev, counter);
312 }
313 }
314
315 if (e->flags & MLX5_ENCAP_ENTRY_VALID) {
316 e->flags &= ~MLX5_ENCAP_ENTRY_VALID;
317 mlx5_encap_dealloc(priv->mdev, e->encap_id);
318 }
319}
320
f6dfb4c3
HHZ
321void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
322{
323 struct mlx5e_neigh *m_neigh = &nhe->m_neigh;
324 u64 bytes, packets, lastuse = 0;
325 struct mlx5e_tc_flow *flow;
326 struct mlx5e_encap_entry *e;
327 struct mlx5_fc *counter;
328 struct neigh_table *tbl;
329 bool neigh_used = false;
330 struct neighbour *n;
331
332 if (m_neigh->family == AF_INET)
333 tbl = &arp_tbl;
334#if IS_ENABLED(CONFIG_IPV6)
335 else if (m_neigh->family == AF_INET6)
336 tbl = ipv6_stub->nd_tbl;
337#endif
338 else
339 return;
340
341 list_for_each_entry(e, &nhe->encap_list, encap_list) {
342 if (!(e->flags & MLX5_ENCAP_ENTRY_VALID))
343 continue;
344 list_for_each_entry(flow, &e->flows, encap) {
345 if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
346 counter = mlx5_flow_rule_counter(flow->rule);
347 mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
348 if (time_after((unsigned long)lastuse, nhe->reported_lastuse)) {
349 neigh_used = true;
350 break;
351 }
352 }
353 }
354 }
355
356 if (neigh_used) {
357 nhe->reported_lastuse = jiffies;
358
359 /* find the relevant neigh according to the cached device and
360 * dst ip pair
361 */
362 n = neigh_lookup(tbl, &m_neigh->dst_ip, m_neigh->dev);
363 if (!n) {
364 WARN(1, "The neighbour already freed\n");
365 return;
366 }
367
368 neigh_event_send(n, NULL);
369 neigh_release(n);
370 }
371}
372
d85cdccb
OG
373static void mlx5e_detach_encap(struct mlx5e_priv *priv,
374 struct mlx5e_tc_flow *flow)
375{
5067b602
RD
376 struct list_head *next = flow->encap.next;
377
378 list_del(&flow->encap);
379 if (list_empty(next)) {
c1ae1152 380 struct mlx5e_encap_entry *e;
5067b602 381
c1ae1152 382 e = list_entry(next, struct mlx5e_encap_entry, flows);
232c0013
HHZ
383 mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
384
385 if (e->flags & MLX5_ENCAP_ENTRY_VALID)
5067b602 386 mlx5_encap_dealloc(priv->mdev, e->encap_id);
232c0013 387
cdc5a7f3 388 hash_del_rcu(&e->encap_hlist);
232c0013 389 kfree(e->encap_header);
5067b602
RD
390 kfree(e);
391 }
392}
393
e8f887ac 394static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
961e8979 395 struct mlx5e_tc_flow *flow)
e8f887ac 396{
d85cdccb
OG
397 if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
398 mlx5e_tc_del_fdb_flow(priv, flow);
399 else
400 mlx5e_tc_del_nic_flow(priv, flow);
e8f887ac
AV
401}
402
bbd00f7e
HHZ
403static void parse_vxlan_attr(struct mlx5_flow_spec *spec,
404 struct tc_cls_flower_offload *f)
405{
406 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
407 outer_headers);
408 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
409 outer_headers);
410 void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
411 misc_parameters);
412 void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
413 misc_parameters);
414
415 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_protocol);
416 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
417
418 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
419 struct flow_dissector_key_keyid *key =
420 skb_flow_dissector_target(f->dissector,
421 FLOW_DISSECTOR_KEY_ENC_KEYID,
422 f->key);
423 struct flow_dissector_key_keyid *mask =
424 skb_flow_dissector_target(f->dissector,
425 FLOW_DISSECTOR_KEY_ENC_KEYID,
426 f->mask);
427 MLX5_SET(fte_match_set_misc, misc_c, vxlan_vni,
428 be32_to_cpu(mask->keyid));
429 MLX5_SET(fte_match_set_misc, misc_v, vxlan_vni,
430 be32_to_cpu(key->keyid));
431 }
432}
433
434static int parse_tunnel_attr(struct mlx5e_priv *priv,
435 struct mlx5_flow_spec *spec,
436 struct tc_cls_flower_offload *f)
437{
438 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
439 outer_headers);
440 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
441 outer_headers);
442
2e72eb43
OG
443 struct flow_dissector_key_control *enc_control =
444 skb_flow_dissector_target(f->dissector,
445 FLOW_DISSECTOR_KEY_ENC_CONTROL,
446 f->key);
447
bbd00f7e
HHZ
448 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
449 struct flow_dissector_key_ports *key =
450 skb_flow_dissector_target(f->dissector,
451 FLOW_DISSECTOR_KEY_ENC_PORTS,
452 f->key);
453 struct flow_dissector_key_ports *mask =
454 skb_flow_dissector_target(f->dissector,
455 FLOW_DISSECTOR_KEY_ENC_PORTS,
456 f->mask);
1ad9a00a
PB
457 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
458 struct net_device *up_dev = mlx5_eswitch_get_uplink_netdev(esw);
459 struct mlx5e_priv *up_priv = netdev_priv(up_dev);
bbd00f7e
HHZ
460
461 /* Full udp dst port must be given */
462 if (memchr_inv(&mask->dst, 0xff, sizeof(mask->dst)))
2fcd82e9 463 goto vxlan_match_offload_err;
bbd00f7e 464
1ad9a00a 465 if (mlx5e_vxlan_lookup_port(up_priv, be16_to_cpu(key->dst)) &&
bbd00f7e
HHZ
466 MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap))
467 parse_vxlan_attr(spec, f);
2fcd82e9
OG
468 else {
469 netdev_warn(priv->netdev,
470 "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->dst));
bbd00f7e 471 return -EOPNOTSUPP;
2fcd82e9 472 }
bbd00f7e
HHZ
473
474 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
475 udp_dport, ntohs(mask->dst));
476 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
477 udp_dport, ntohs(key->dst));
478
cd377663
OG
479 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
480 udp_sport, ntohs(mask->src));
481 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
482 udp_sport, ntohs(key->src));
bbd00f7e 483 } else { /* udp dst port must be given */
2fcd82e9
OG
484vxlan_match_offload_err:
485 netdev_warn(priv->netdev,
486 "IP tunnel decap offload supported only for vxlan, must set UDP dport\n");
487 return -EOPNOTSUPP;
bbd00f7e
HHZ
488 }
489
2e72eb43 490 if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
bbd00f7e
HHZ
491 struct flow_dissector_key_ipv4_addrs *key =
492 skb_flow_dissector_target(f->dissector,
493 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
494 f->key);
495 struct flow_dissector_key_ipv4_addrs *mask =
496 skb_flow_dissector_target(f->dissector,
497 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
498 f->mask);
499 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
500 src_ipv4_src_ipv6.ipv4_layout.ipv4,
501 ntohl(mask->src));
502 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
503 src_ipv4_src_ipv6.ipv4_layout.ipv4,
504 ntohl(key->src));
505
506 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
507 dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
508 ntohl(mask->dst));
509 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
510 dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
511 ntohl(key->dst));
bbd00f7e 512
2e72eb43
OG
513 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
514 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IP);
19f44401
OG
515 } else if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
516 struct flow_dissector_key_ipv6_addrs *key =
517 skb_flow_dissector_target(f->dissector,
518 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
519 f->key);
520 struct flow_dissector_key_ipv6_addrs *mask =
521 skb_flow_dissector_target(f->dissector,
522 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
523 f->mask);
524
525 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
526 src_ipv4_src_ipv6.ipv6_layout.ipv6),
527 &mask->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
528 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
529 src_ipv4_src_ipv6.ipv6_layout.ipv6),
530 &key->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
531
532 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
533 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
534 &mask->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
535 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
536 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
537 &key->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
538
539 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
540 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IPV6);
2e72eb43 541 }
bbd00f7e
HHZ
542
543 /* Enforce DMAC when offloading incoming tunneled flows.
544 * Flow counters require a match on the DMAC.
545 */
546 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_47_16);
547 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_15_0);
548 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
549 dmac_47_16), priv->netdev->dev_addr);
550
551 /* let software handle IP fragments */
552 MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
553 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0);
554
555 return 0;
556}
557
de0af0bf
RD
558static int __parse_cls_flower(struct mlx5e_priv *priv,
559 struct mlx5_flow_spec *spec,
560 struct tc_cls_flower_offload *f,
561 u8 *min_inline)
e3a2b7ed 562{
c5bb1730
MG
563 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
564 outer_headers);
565 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
566 outer_headers);
e3a2b7ed
AV
567 u16 addr_type = 0;
568 u8 ip_proto = 0;
569
de0af0bf
RD
570 *min_inline = MLX5_INLINE_MODE_L2;
571
e3a2b7ed
AV
572 if (f->dissector->used_keys &
573 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
574 BIT(FLOW_DISSECTOR_KEY_BASIC) |
575 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
095b6cfd 576 BIT(FLOW_DISSECTOR_KEY_VLAN) |
e3a2b7ed
AV
577 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
578 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
bbd00f7e
HHZ
579 BIT(FLOW_DISSECTOR_KEY_PORTS) |
580 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
581 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
582 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
583 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) |
584 BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL))) {
e3a2b7ed
AV
585 netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
586 f->dissector->used_keys);
587 return -EOPNOTSUPP;
588 }
589
bbd00f7e
HHZ
590 if ((dissector_uses_key(f->dissector,
591 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) ||
592 dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID) ||
593 dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) &&
594 dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
595 struct flow_dissector_key_control *key =
596 skb_flow_dissector_target(f->dissector,
597 FLOW_DISSECTOR_KEY_ENC_CONTROL,
598 f->key);
599 switch (key->addr_type) {
600 case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
19f44401 601 case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
bbd00f7e
HHZ
602 if (parse_tunnel_attr(priv, spec, f))
603 return -EOPNOTSUPP;
604 break;
605 default:
606 return -EOPNOTSUPP;
607 }
608
609 /* In decap flow, header pointers should point to the inner
610 * headers, outer header were already set by parse_tunnel_attr
611 */
612 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
613 inner_headers);
614 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
615 inner_headers);
616 }
617
e3a2b7ed
AV
618 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
619 struct flow_dissector_key_control *key =
620 skb_flow_dissector_target(f->dissector,
1dbd0d37 621 FLOW_DISSECTOR_KEY_CONTROL,
e3a2b7ed 622 f->key);
3f7d0eb4
OG
623
624 struct flow_dissector_key_control *mask =
625 skb_flow_dissector_target(f->dissector,
626 FLOW_DISSECTOR_KEY_CONTROL,
627 f->mask);
e3a2b7ed 628 addr_type = key->addr_type;
3f7d0eb4
OG
629
630 if (mask->flags & FLOW_DIS_IS_FRAGMENT) {
631 MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
632 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
633 key->flags & FLOW_DIS_IS_FRAGMENT);
0827444d
OG
634
635 /* the HW doesn't need L3 inline to match on frag=no */
636 if (key->flags & FLOW_DIS_IS_FRAGMENT)
637 *min_inline = MLX5_INLINE_MODE_IP;
3f7d0eb4 638 }
e3a2b7ed
AV
639 }
640
641 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
642 struct flow_dissector_key_basic *key =
643 skb_flow_dissector_target(f->dissector,
644 FLOW_DISSECTOR_KEY_BASIC,
645 f->key);
646 struct flow_dissector_key_basic *mask =
647 skb_flow_dissector_target(f->dissector,
648 FLOW_DISSECTOR_KEY_BASIC,
649 f->mask);
650 ip_proto = key->ip_proto;
651
652 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
653 ntohs(mask->n_proto));
654 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
655 ntohs(key->n_proto));
656
657 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
658 mask->ip_proto);
659 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
660 key->ip_proto);
de0af0bf
RD
661
662 if (mask->ip_proto)
663 *min_inline = MLX5_INLINE_MODE_IP;
e3a2b7ed
AV
664 }
665
666 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
667 struct flow_dissector_key_eth_addrs *key =
668 skb_flow_dissector_target(f->dissector,
669 FLOW_DISSECTOR_KEY_ETH_ADDRS,
670 f->key);
671 struct flow_dissector_key_eth_addrs *mask =
672 skb_flow_dissector_target(f->dissector,
673 FLOW_DISSECTOR_KEY_ETH_ADDRS,
674 f->mask);
675
676 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
677 dmac_47_16),
678 mask->dst);
679 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
680 dmac_47_16),
681 key->dst);
682
683 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
684 smac_47_16),
685 mask->src);
686 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
687 smac_47_16),
688 key->src);
689 }
690
095b6cfd
OG
691 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
692 struct flow_dissector_key_vlan *key =
693 skb_flow_dissector_target(f->dissector,
694 FLOW_DISSECTOR_KEY_VLAN,
695 f->key);
696 struct flow_dissector_key_vlan *mask =
697 skb_flow_dissector_target(f->dissector,
698 FLOW_DISSECTOR_KEY_VLAN,
699 f->mask);
358d79a4 700 if (mask->vlan_id || mask->vlan_priority) {
10543365
MHY
701 MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
702 MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
095b6cfd
OG
703
704 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, mask->vlan_id);
705 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, key->vlan_id);
358d79a4
OG
706
707 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio, mask->vlan_priority);
708 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, key->vlan_priority);
095b6cfd
OG
709 }
710 }
711
e3a2b7ed
AV
712 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
713 struct flow_dissector_key_ipv4_addrs *key =
714 skb_flow_dissector_target(f->dissector,
715 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
716 f->key);
717 struct flow_dissector_key_ipv4_addrs *mask =
718 skb_flow_dissector_target(f->dissector,
719 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
720 f->mask);
721
722 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
723 src_ipv4_src_ipv6.ipv4_layout.ipv4),
724 &mask->src, sizeof(mask->src));
725 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
726 src_ipv4_src_ipv6.ipv4_layout.ipv4),
727 &key->src, sizeof(key->src));
728 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
729 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
730 &mask->dst, sizeof(mask->dst));
731 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
732 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
733 &key->dst, sizeof(key->dst));
de0af0bf
RD
734
735 if (mask->src || mask->dst)
736 *min_inline = MLX5_INLINE_MODE_IP;
e3a2b7ed
AV
737 }
738
739 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
740 struct flow_dissector_key_ipv6_addrs *key =
741 skb_flow_dissector_target(f->dissector,
742 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
743 f->key);
744 struct flow_dissector_key_ipv6_addrs *mask =
745 skb_flow_dissector_target(f->dissector,
746 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
747 f->mask);
748
749 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
750 src_ipv4_src_ipv6.ipv6_layout.ipv6),
751 &mask->src, sizeof(mask->src));
752 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
753 src_ipv4_src_ipv6.ipv6_layout.ipv6),
754 &key->src, sizeof(key->src));
755
756 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
757 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
758 &mask->dst, sizeof(mask->dst));
759 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
760 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
761 &key->dst, sizeof(key->dst));
de0af0bf
RD
762
763 if (ipv6_addr_type(&mask->src) != IPV6_ADDR_ANY ||
764 ipv6_addr_type(&mask->dst) != IPV6_ADDR_ANY)
765 *min_inline = MLX5_INLINE_MODE_IP;
e3a2b7ed
AV
766 }
767
768 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
769 struct flow_dissector_key_ports *key =
770 skb_flow_dissector_target(f->dissector,
771 FLOW_DISSECTOR_KEY_PORTS,
772 f->key);
773 struct flow_dissector_key_ports *mask =
774 skb_flow_dissector_target(f->dissector,
775 FLOW_DISSECTOR_KEY_PORTS,
776 f->mask);
777 switch (ip_proto) {
778 case IPPROTO_TCP:
779 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
780 tcp_sport, ntohs(mask->src));
781 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
782 tcp_sport, ntohs(key->src));
783
784 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
785 tcp_dport, ntohs(mask->dst));
786 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
787 tcp_dport, ntohs(key->dst));
788 break;
789
790 case IPPROTO_UDP:
791 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
792 udp_sport, ntohs(mask->src));
793 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
794 udp_sport, ntohs(key->src));
795
796 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
797 udp_dport, ntohs(mask->dst));
798 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
799 udp_dport, ntohs(key->dst));
800 break;
801 default:
802 netdev_err(priv->netdev,
803 "Only UDP and TCP transport are supported\n");
804 return -EINVAL;
805 }
de0af0bf
RD
806
807 if (mask->src || mask->dst)
808 *min_inline = MLX5_INLINE_MODE_TCP_UDP;
e3a2b7ed
AV
809 }
810
811 return 0;
812}
813
de0af0bf 814static int parse_cls_flower(struct mlx5e_priv *priv,
65ba8fb7 815 struct mlx5e_tc_flow *flow,
de0af0bf
RD
816 struct mlx5_flow_spec *spec,
817 struct tc_cls_flower_offload *f)
818{
819 struct mlx5_core_dev *dev = priv->mdev;
820 struct mlx5_eswitch *esw = dev->priv.eswitch;
1d447a39
SM
821 struct mlx5e_rep_priv *rpriv = priv->ppriv;
822 struct mlx5_eswitch_rep *rep;
de0af0bf
RD
823 u8 min_inline;
824 int err;
825
826 err = __parse_cls_flower(priv, spec, f, &min_inline);
827
1d447a39
SM
828 if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH)) {
829 rep = rpriv->rep;
830 if (rep->vport != FDB_UPLINK_VPORT &&
831 (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE &&
832 esw->offloads.inline_mode < min_inline)) {
de0af0bf
RD
833 netdev_warn(priv->netdev,
834 "Flow is not offloaded due to min inline setting, required %d actual %d\n",
835 min_inline, esw->offloads.inline_mode);
836 return -EOPNOTSUPP;
837 }
838 }
839
840 return err;
841}
842
d79b6df6
OG
843struct pedit_headers {
844 struct ethhdr eth;
845 struct iphdr ip4;
846 struct ipv6hdr ip6;
847 struct tcphdr tcp;
848 struct udphdr udp;
849};
850
851static int pedit_header_offsets[] = {
852 [TCA_PEDIT_KEY_EX_HDR_TYPE_ETH] = offsetof(struct pedit_headers, eth),
853 [TCA_PEDIT_KEY_EX_HDR_TYPE_IP4] = offsetof(struct pedit_headers, ip4),
854 [TCA_PEDIT_KEY_EX_HDR_TYPE_IP6] = offsetof(struct pedit_headers, ip6),
855 [TCA_PEDIT_KEY_EX_HDR_TYPE_TCP] = offsetof(struct pedit_headers, tcp),
856 [TCA_PEDIT_KEY_EX_HDR_TYPE_UDP] = offsetof(struct pedit_headers, udp),
857};
858
859#define pedit_header(_ph, _htype) ((void *)(_ph) + pedit_header_offsets[_htype])
860
861static int set_pedit_val(u8 hdr_type, u32 mask, u32 val, u32 offset,
862 struct pedit_headers *masks,
863 struct pedit_headers *vals)
864{
865 u32 *curr_pmask, *curr_pval;
866
867 if (hdr_type >= __PEDIT_HDR_TYPE_MAX)
868 goto out_err;
869
870 curr_pmask = (u32 *)(pedit_header(masks, hdr_type) + offset);
871 curr_pval = (u32 *)(pedit_header(vals, hdr_type) + offset);
872
873 if (*curr_pmask & mask) /* disallow acting twice on the same location */
874 goto out_err;
875
876 *curr_pmask |= mask;
877 *curr_pval |= (val & mask);
878
879 return 0;
880
881out_err:
882 return -EOPNOTSUPP;
883}
884
885struct mlx5_fields {
886 u8 field;
887 u8 size;
888 u32 offset;
889};
890
891static struct mlx5_fields fields[] = {
892 {MLX5_ACTION_IN_FIELD_OUT_DMAC_47_16, 4, offsetof(struct pedit_headers, eth.h_dest[0])},
893 {MLX5_ACTION_IN_FIELD_OUT_DMAC_15_0, 2, offsetof(struct pedit_headers, eth.h_dest[4])},
894 {MLX5_ACTION_IN_FIELD_OUT_SMAC_47_16, 4, offsetof(struct pedit_headers, eth.h_source[0])},
895 {MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0, 2, offsetof(struct pedit_headers, eth.h_source[4])},
896 {MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE, 2, offsetof(struct pedit_headers, eth.h_proto)},
897
898 {MLX5_ACTION_IN_FIELD_OUT_IP_DSCP, 1, offsetof(struct pedit_headers, ip4.tos)},
899 {MLX5_ACTION_IN_FIELD_OUT_IP_TTL, 1, offsetof(struct pedit_headers, ip4.ttl)},
900 {MLX5_ACTION_IN_FIELD_OUT_SIPV4, 4, offsetof(struct pedit_headers, ip4.saddr)},
901 {MLX5_ACTION_IN_FIELD_OUT_DIPV4, 4, offsetof(struct pedit_headers, ip4.daddr)},
902
903 {MLX5_ACTION_IN_FIELD_OUT_SIPV6_127_96, 4, offsetof(struct pedit_headers, ip6.saddr.s6_addr32[0])},
904 {MLX5_ACTION_IN_FIELD_OUT_SIPV6_95_64, 4, offsetof(struct pedit_headers, ip6.saddr.s6_addr32[1])},
905 {MLX5_ACTION_IN_FIELD_OUT_SIPV6_63_32, 4, offsetof(struct pedit_headers, ip6.saddr.s6_addr32[2])},
906 {MLX5_ACTION_IN_FIELD_OUT_SIPV6_31_0, 4, offsetof(struct pedit_headers, ip6.saddr.s6_addr32[3])},
907 {MLX5_ACTION_IN_FIELD_OUT_DIPV6_127_96, 4, offsetof(struct pedit_headers, ip6.daddr.s6_addr32[0])},
908 {MLX5_ACTION_IN_FIELD_OUT_DIPV6_95_64, 4, offsetof(struct pedit_headers, ip6.daddr.s6_addr32[1])},
909 {MLX5_ACTION_IN_FIELD_OUT_DIPV6_63_32, 4, offsetof(struct pedit_headers, ip6.daddr.s6_addr32[2])},
910 {MLX5_ACTION_IN_FIELD_OUT_DIPV6_31_0, 4, offsetof(struct pedit_headers, ip6.daddr.s6_addr32[3])},
911
912 {MLX5_ACTION_IN_FIELD_OUT_TCP_SPORT, 2, offsetof(struct pedit_headers, tcp.source)},
913 {MLX5_ACTION_IN_FIELD_OUT_TCP_DPORT, 2, offsetof(struct pedit_headers, tcp.dest)},
914 {MLX5_ACTION_IN_FIELD_OUT_TCP_FLAGS, 1, offsetof(struct pedit_headers, tcp.ack_seq) + 5},
915
916 {MLX5_ACTION_IN_FIELD_OUT_UDP_SPORT, 2, offsetof(struct pedit_headers, udp.source)},
917 {MLX5_ACTION_IN_FIELD_OUT_UDP_DPORT, 2, offsetof(struct pedit_headers, udp.dest)},
918};
919
920/* On input attr->num_mod_hdr_actions tells how many HW actions can be parsed at
921 * max from the SW pedit action. On success, it says how many HW actions were
922 * actually parsed.
923 */
924static int offload_pedit_fields(struct pedit_headers *masks,
925 struct pedit_headers *vals,
926 struct mlx5e_tc_flow_parse_attr *parse_attr)
927{
928 struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals;
d824bf3f 929 int i, action_size, nactions, max_actions, first, last, first_z;
d79b6df6 930 void *s_masks_p, *a_masks_p, *vals_p;
d79b6df6
OG
931 struct mlx5_fields *f;
932 u8 cmd, field_bsize;
e3ca4e05 933 u32 s_mask, a_mask;
d79b6df6
OG
934 unsigned long mask;
935 void *action;
936
937 set_masks = &masks[TCA_PEDIT_KEY_EX_CMD_SET];
938 add_masks = &masks[TCA_PEDIT_KEY_EX_CMD_ADD];
939 set_vals = &vals[TCA_PEDIT_KEY_EX_CMD_SET];
940 add_vals = &vals[TCA_PEDIT_KEY_EX_CMD_ADD];
941
942 action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
943 action = parse_attr->mod_hdr_actions;
944 max_actions = parse_attr->num_mod_hdr_actions;
945 nactions = 0;
946
947 for (i = 0; i < ARRAY_SIZE(fields); i++) {
948 f = &fields[i];
949 /* avoid seeing bits set from previous iterations */
e3ca4e05
OG
950 s_mask = 0;
951 a_mask = 0;
d79b6df6
OG
952
953 s_masks_p = (void *)set_masks + f->offset;
954 a_masks_p = (void *)add_masks + f->offset;
955
956 memcpy(&s_mask, s_masks_p, f->size);
957 memcpy(&a_mask, a_masks_p, f->size);
958
959 if (!s_mask && !a_mask) /* nothing to offload here */
960 continue;
961
962 if (s_mask && a_mask) {
963 printk(KERN_WARNING "mlx5: can't set and add to the same HW field (%x)\n", f->field);
964 return -EOPNOTSUPP;
965 }
966
967 if (nactions == max_actions) {
968 printk(KERN_WARNING "mlx5: parsed %d pedit actions, can't do more\n", nactions);
969 return -EOPNOTSUPP;
970 }
971
972 if (s_mask) {
973 cmd = MLX5_ACTION_TYPE_SET;
974 mask = s_mask;
975 vals_p = (void *)set_vals + f->offset;
976 /* clear to denote we consumed this field */
977 memset(s_masks_p, 0, f->size);
978 } else {
979 cmd = MLX5_ACTION_TYPE_ADD;
980 mask = a_mask;
981 vals_p = (void *)add_vals + f->offset;
982 /* clear to denote we consumed this field */
983 memset(a_masks_p, 0, f->size);
984 }
985
d79b6df6 986 field_bsize = f->size * BITS_PER_BYTE;
e3ca4e05 987
d824bf3f 988 first_z = find_first_zero_bit(&mask, field_bsize);
d79b6df6
OG
989 first = find_first_bit(&mask, field_bsize);
990 last = find_last_bit(&mask, field_bsize);
d824bf3f 991 if (first > 0 || last != (field_bsize - 1) || first_z < last) {
d79b6df6
OG
992 printk(KERN_WARNING "mlx5: partial rewrite (mask %lx) is currently not offloaded\n",
993 mask);
994 return -EOPNOTSUPP;
995 }
996
997 MLX5_SET(set_action_in, action, action_type, cmd);
998 MLX5_SET(set_action_in, action, field, f->field);
999
1000 if (cmd == MLX5_ACTION_TYPE_SET) {
1001 MLX5_SET(set_action_in, action, offset, 0);
1002 /* length is num of bits to be written, zero means length of 32 */
1003 MLX5_SET(set_action_in, action, length, field_bsize);
1004 }
1005
1006 if (field_bsize == 32)
e3ca4e05 1007 MLX5_SET(set_action_in, action, data, ntohl(*(__be32 *)vals_p));
d79b6df6 1008 else if (field_bsize == 16)
e3ca4e05 1009 MLX5_SET(set_action_in, action, data, ntohs(*(__be16 *)vals_p));
d79b6df6 1010 else if (field_bsize == 8)
e3ca4e05 1011 MLX5_SET(set_action_in, action, data, *(u8 *)vals_p);
d79b6df6
OG
1012
1013 action += action_size;
1014 nactions++;
1015 }
1016
1017 parse_attr->num_mod_hdr_actions = nactions;
1018 return 0;
1019}
1020
1021static int alloc_mod_hdr_actions(struct mlx5e_priv *priv,
1022 const struct tc_action *a, int namespace,
1023 struct mlx5e_tc_flow_parse_attr *parse_attr)
1024{
1025 int nkeys, action_size, max_actions;
1026
1027 nkeys = tcf_pedit_nkeys(a);
1028 action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
1029
1030 if (namespace == MLX5_FLOW_NAMESPACE_FDB) /* FDB offloading */
1031 max_actions = MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, max_modify_header_actions);
1032 else /* namespace is MLX5_FLOW_NAMESPACE_KERNEL - NIC offloading */
1033 max_actions = MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, max_modify_header_actions);
1034
1035 /* can get up to crazingly 16 HW actions in 32 bits pedit SW key */
1036 max_actions = min(max_actions, nkeys * 16);
1037
1038 parse_attr->mod_hdr_actions = kcalloc(max_actions, action_size, GFP_KERNEL);
1039 if (!parse_attr->mod_hdr_actions)
1040 return -ENOMEM;
1041
1042 parse_attr->num_mod_hdr_actions = max_actions;
1043 return 0;
1044}
1045
1046static const struct pedit_headers zero_masks = {};
1047
1048static int parse_tc_pedit_action(struct mlx5e_priv *priv,
1049 const struct tc_action *a, int namespace,
1050 struct mlx5e_tc_flow_parse_attr *parse_attr)
1051{
1052 struct pedit_headers masks[__PEDIT_CMD_MAX], vals[__PEDIT_CMD_MAX], *cmd_masks;
1053 int nkeys, i, err = -EOPNOTSUPP;
1054 u32 mask, val, offset;
1055 u8 cmd, htype;
1056
1057 nkeys = tcf_pedit_nkeys(a);
1058
1059 memset(masks, 0, sizeof(struct pedit_headers) * __PEDIT_CMD_MAX);
1060 memset(vals, 0, sizeof(struct pedit_headers) * __PEDIT_CMD_MAX);
1061
1062 for (i = 0; i < nkeys; i++) {
1063 htype = tcf_pedit_htype(a, i);
1064 cmd = tcf_pedit_cmd(a, i);
1065 err = -EOPNOTSUPP; /* can't be all optimistic */
1066
1067 if (htype == TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK) {
1068 printk(KERN_WARNING "mlx5: legacy pedit isn't offloaded\n");
1069 goto out_err;
1070 }
1071
1072 if (cmd != TCA_PEDIT_KEY_EX_CMD_SET && cmd != TCA_PEDIT_KEY_EX_CMD_ADD) {
1073 printk(KERN_WARNING "mlx5: pedit cmd %d isn't offloaded\n", cmd);
1074 goto out_err;
1075 }
1076
1077 mask = tcf_pedit_mask(a, i);
1078 val = tcf_pedit_val(a, i);
1079 offset = tcf_pedit_offset(a, i);
1080
1081 err = set_pedit_val(htype, ~mask, val, offset, &masks[cmd], &vals[cmd]);
1082 if (err)
1083 goto out_err;
1084 }
1085
1086 err = alloc_mod_hdr_actions(priv, a, namespace, parse_attr);
1087 if (err)
1088 goto out_err;
1089
1090 err = offload_pedit_fields(masks, vals, parse_attr);
1091 if (err < 0)
1092 goto out_dealloc_parsed_actions;
1093
1094 for (cmd = 0; cmd < __PEDIT_CMD_MAX; cmd++) {
1095 cmd_masks = &masks[cmd];
1096 if (memcmp(cmd_masks, &zero_masks, sizeof(zero_masks))) {
1097 printk(KERN_WARNING "mlx5: attempt to offload an unsupported field (cmd %d)\n",
1098 cmd);
1099 print_hex_dump(KERN_WARNING, "mask: ", DUMP_PREFIX_ADDRESS,
1100 16, 1, cmd_masks, sizeof(zero_masks), true);
1101 err = -EOPNOTSUPP;
1102 goto out_dealloc_parsed_actions;
1103 }
1104 }
1105
1106 return 0;
1107
1108out_dealloc_parsed_actions:
1109 kfree(parse_attr->mod_hdr_actions);
1110out_err:
1111 return err;
1112}
1113
26c02749
OG
1114static bool csum_offload_supported(struct mlx5e_priv *priv, u32 action, u32 update_flags)
1115{
1116 u32 prot_flags = TCA_CSUM_UPDATE_FLAG_IPV4HDR | TCA_CSUM_UPDATE_FLAG_TCP |
1117 TCA_CSUM_UPDATE_FLAG_UDP;
1118
1119 /* The HW recalcs checksums only if re-writing headers */
1120 if (!(action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)) {
1121 netdev_warn(priv->netdev,
1122 "TC csum action is only offloaded with pedit\n");
1123 return false;
1124 }
1125
1126 if (update_flags & ~prot_flags) {
1127 netdev_warn(priv->netdev,
1128 "can't offload TC csum action for some header/s - flags %#x\n",
1129 update_flags);
1130 return false;
1131 }
1132
1133 return true;
1134}
1135
5c40348c 1136static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
aa0cbbae
OG
1137 struct mlx5e_tc_flow_parse_attr *parse_attr,
1138 struct mlx5e_tc_flow *flow)
e3a2b7ed 1139{
aa0cbbae 1140 struct mlx5_nic_flow_attr *attr = flow->nic_attr;
e3a2b7ed 1141 const struct tc_action *a;
22dc13c8 1142 LIST_HEAD(actions);
2f4fe4ca 1143 int err;
e3a2b7ed
AV
1144
1145 if (tc_no_actions(exts))
1146 return -EINVAL;
1147
3bc4b7bf
OG
1148 attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
1149 attr->action = 0;
e3a2b7ed 1150
22dc13c8
WC
1151 tcf_exts_to_list(exts, &actions);
1152 list_for_each_entry(a, &actions, list) {
e3a2b7ed 1153 /* Only support a single action per rule */
3bc4b7bf 1154 if (attr->action)
e3a2b7ed
AV
1155 return -EINVAL;
1156
1157 if (is_tcf_gact_shot(a)) {
3bc4b7bf 1158 attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
aad7e08d
AV
1159 if (MLX5_CAP_FLOWTABLE(priv->mdev,
1160 flow_table_properties_nic_receive.flow_counter))
3bc4b7bf 1161 attr->action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
e3a2b7ed
AV
1162 continue;
1163 }
1164
2f4fe4ca
OG
1165 if (is_tcf_pedit(a)) {
1166 err = parse_tc_pedit_action(priv, a, MLX5_FLOW_NAMESPACE_KERNEL,
1167 parse_attr);
1168 if (err)
1169 return err;
1170
1171 attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
1172 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1173 continue;
1174 }
1175
26c02749
OG
1176 if (is_tcf_csum(a)) {
1177 if (csum_offload_supported(priv, attr->action,
1178 tcf_csum_update_flags(a)))
1179 continue;
1180
1181 return -EOPNOTSUPP;
1182 }
1183
e3a2b7ed
AV
1184 if (is_tcf_skbedit_mark(a)) {
1185 u32 mark = tcf_skbedit_mark(a);
1186
1187 if (mark & ~MLX5E_TC_FLOW_ID_MASK) {
1188 netdev_warn(priv->netdev, "Bad flow mark - only 16 bit is supported: 0x%x\n",
1189 mark);
1190 return -EINVAL;
1191 }
1192
3bc4b7bf
OG
1193 attr->flow_tag = mark;
1194 attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
e3a2b7ed
AV
1195 continue;
1196 }
1197
1198 return -EINVAL;
1199 }
1200
1201 return 0;
1202}
1203
76f7444d
OG
1204static inline int cmp_encap_info(struct ip_tunnel_key *a,
1205 struct ip_tunnel_key *b)
a54e20b4
HHZ
1206{
1207 return memcmp(a, b, sizeof(*a));
1208}
1209
76f7444d 1210static inline int hash_encap_info(struct ip_tunnel_key *key)
a54e20b4 1211{
76f7444d 1212 return jhash(key, sizeof(*key), 0);
a54e20b4
HHZ
1213}
1214
1215static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
1216 struct net_device *mirred_dev,
1217 struct net_device **out_dev,
1218 struct flowi4 *fl4,
1219 struct neighbour **out_n,
a54e20b4
HHZ
1220 int *out_ttl)
1221{
3e621b19 1222 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
a54e20b4
HHZ
1223 struct rtable *rt;
1224 struct neighbour *n = NULL;
a54e20b4
HHZ
1225
1226#if IS_ENABLED(CONFIG_INET)
abeffce9
AB
1227 int ret;
1228
a54e20b4 1229 rt = ip_route_output_key(dev_net(mirred_dev), fl4);
abeffce9
AB
1230 ret = PTR_ERR_OR_ZERO(rt);
1231 if (ret)
1232 return ret;
a54e20b4
HHZ
1233#else
1234 return -EOPNOTSUPP;
1235#endif
3e621b19
HHZ
1236 /* if the egress device isn't on the same HW e-switch, we use the uplink */
1237 if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev))
1238 *out_dev = mlx5_eswitch_get_uplink_netdev(esw);
1239 else
1240 *out_dev = rt->dst.dev;
a54e20b4 1241
75c33da8 1242 *out_ttl = ip4_dst_hoplimit(&rt->dst);
a54e20b4
HHZ
1243 n = dst_neigh_lookup(&rt->dst, &fl4->daddr);
1244 ip_rt_put(rt);
1245 if (!n)
1246 return -ENOMEM;
1247
1248 *out_n = n;
a54e20b4
HHZ
1249 return 0;
1250}
1251
ce99f6b9
OG
1252static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
1253 struct net_device *mirred_dev,
1254 struct net_device **out_dev,
1255 struct flowi6 *fl6,
1256 struct neighbour **out_n,
1257 int *out_ttl)
1258{
1259 struct neighbour *n = NULL;
1260 struct dst_entry *dst;
1261
1262#if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
1263 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1264 int ret;
1265
1266 dst = ip6_route_output(dev_net(mirred_dev), NULL, fl6);
321fa4ff
AB
1267 ret = dst->error;
1268 if (ret) {
ce99f6b9
OG
1269 dst_release(dst);
1270 return ret;
1271 }
1272
1273 *out_ttl = ip6_dst_hoplimit(dst);
1274
1275 /* if the egress device isn't on the same HW e-switch, we use the uplink */
1276 if (!switchdev_port_same_parent_id(priv->netdev, dst->dev))
1277 *out_dev = mlx5_eswitch_get_uplink_netdev(esw);
1278 else
1279 *out_dev = dst->dev;
1280#else
1281 return -EOPNOTSUPP;
1282#endif
1283
1284 n = dst_neigh_lookup(dst, &fl6->daddr);
1285 dst_release(dst);
1286 if (!n)
1287 return -ENOMEM;
1288
1289 *out_n = n;
1290 return 0;
1291}
1292
32f3671f
OG
1293static void gen_vxlan_header_ipv4(struct net_device *out_dev,
1294 char buf[], int encap_size,
1295 unsigned char h_dest[ETH_ALEN],
1296 int ttl,
1297 __be32 daddr,
1298 __be32 saddr,
1299 __be16 udp_dst_port,
1300 __be32 vx_vni)
a54e20b4 1301{
a54e20b4
HHZ
1302 struct ethhdr *eth = (struct ethhdr *)buf;
1303 struct iphdr *ip = (struct iphdr *)((char *)eth + sizeof(struct ethhdr));
1304 struct udphdr *udp = (struct udphdr *)((char *)ip + sizeof(struct iphdr));
1305 struct vxlanhdr *vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr));
1306
1307 memset(buf, 0, encap_size);
1308
1309 ether_addr_copy(eth->h_dest, h_dest);
1310 ether_addr_copy(eth->h_source, out_dev->dev_addr);
1311 eth->h_proto = htons(ETH_P_IP);
1312
1313 ip->daddr = daddr;
1314 ip->saddr = saddr;
1315
1316 ip->ttl = ttl;
1317 ip->protocol = IPPROTO_UDP;
1318 ip->version = 0x4;
1319 ip->ihl = 0x5;
1320
1321 udp->dest = udp_dst_port;
1322 vxh->vx_flags = VXLAN_HF_VNI;
1323 vxh->vx_vni = vxlan_vni_field(vx_vni);
a54e20b4
HHZ
1324}
1325
225aabaf
OG
1326static void gen_vxlan_header_ipv6(struct net_device *out_dev,
1327 char buf[], int encap_size,
1328 unsigned char h_dest[ETH_ALEN],
1329 int ttl,
1330 struct in6_addr *daddr,
1331 struct in6_addr *saddr,
1332 __be16 udp_dst_port,
1333 __be32 vx_vni)
ce99f6b9 1334{
ce99f6b9
OG
1335 struct ethhdr *eth = (struct ethhdr *)buf;
1336 struct ipv6hdr *ip6h = (struct ipv6hdr *)((char *)eth + sizeof(struct ethhdr));
1337 struct udphdr *udp = (struct udphdr *)((char *)ip6h + sizeof(struct ipv6hdr));
1338 struct vxlanhdr *vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr));
1339
1340 memset(buf, 0, encap_size);
1341
1342 ether_addr_copy(eth->h_dest, h_dest);
1343 ether_addr_copy(eth->h_source, out_dev->dev_addr);
1344 eth->h_proto = htons(ETH_P_IPV6);
1345
1346 ip6_flow_hdr(ip6h, 0, 0);
1347 /* the HW fills up ipv6 payload len */
1348 ip6h->nexthdr = IPPROTO_UDP;
1349 ip6h->hop_limit = ttl;
1350 ip6h->daddr = *daddr;
1351 ip6h->saddr = *saddr;
1352
1353 udp->dest = udp_dst_port;
1354 vxh->vx_flags = VXLAN_HF_VNI;
1355 vxh->vx_vni = vxlan_vni_field(vx_vni);
ce99f6b9
OG
1356}
1357
a54e20b4
HHZ
1358static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
1359 struct net_device *mirred_dev,
1a8552bd 1360 struct mlx5e_encap_entry *e)
a54e20b4
HHZ
1361{
1362 int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
32f3671f 1363 int ipv4_encap_size = ETH_HLEN + sizeof(struct iphdr) + VXLAN_HLEN;
76f7444d 1364 struct ip_tunnel_key *tun_key = &e->tun_info.key;
1a8552bd 1365 struct net_device *out_dev;
a42485eb 1366 struct neighbour *n = NULL;
a54e20b4 1367 struct flowi4 fl4 = {};
a54e20b4 1368 char *encap_header;
32f3671f 1369 int ttl, err;
033354d5 1370 u8 nud_state;
32f3671f
OG
1371
1372 if (max_encap_size < ipv4_encap_size) {
1373 mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
1374 ipv4_encap_size, max_encap_size);
1375 return -EOPNOTSUPP;
1376 }
a54e20b4 1377
32f3671f 1378 encap_header = kzalloc(ipv4_encap_size, GFP_KERNEL);
a54e20b4
HHZ
1379 if (!encap_header)
1380 return -ENOMEM;
1381
1382 switch (e->tunnel_type) {
1383 case MLX5_HEADER_TYPE_VXLAN:
1384 fl4.flowi4_proto = IPPROTO_UDP;
76f7444d 1385 fl4.fl4_dport = tun_key->tp_dst;
a54e20b4
HHZ
1386 break;
1387 default:
1388 err = -EOPNOTSUPP;
1389 goto out;
1390 }
9a941117 1391 fl4.flowi4_tos = tun_key->tos;
76f7444d 1392 fl4.daddr = tun_key->u.ipv4.dst;
9a941117 1393 fl4.saddr = tun_key->u.ipv4.src;
a54e20b4 1394
1a8552bd 1395 err = mlx5e_route_lookup_ipv4(priv, mirred_dev, &out_dev,
9a941117 1396 &fl4, &n, &ttl);
a54e20b4
HHZ
1397 if (err)
1398 goto out;
1399
232c0013
HHZ
1400 /* used by mlx5e_detach_encap to lookup a neigh hash table
1401 * entry in the neigh hash table when a user deletes a rule
1402 */
1403 e->m_neigh.dev = n->dev;
f6dfb4c3 1404 e->m_neigh.family = n->ops->family;
232c0013
HHZ
1405 memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
1406 e->out_dev = out_dev;
1407
1408 /* It's importent to add the neigh to the hash table before checking
1409 * the neigh validity state. So if we'll get a notification, in case the
1410 * neigh changes it's validity state, we would find the relevant neigh
1411 * in the hash.
1412 */
1413 err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e);
1414 if (err)
1415 goto out;
1416
033354d5
HHZ
1417 read_lock_bh(&n->lock);
1418 nud_state = n->nud_state;
1419 ether_addr_copy(e->h_dest, n->ha);
1420 read_unlock_bh(&n->lock);
1421
a54e20b4
HHZ
1422 switch (e->tunnel_type) {
1423 case MLX5_HEADER_TYPE_VXLAN:
1a8552bd 1424 gen_vxlan_header_ipv4(out_dev, encap_header,
32f3671f
OG
1425 ipv4_encap_size, e->h_dest, ttl,
1426 fl4.daddr,
1427 fl4.saddr, tun_key->tp_dst,
1428 tunnel_id_to_key32(tun_key->tun_id));
a54e20b4
HHZ
1429 break;
1430 default:
1431 err = -EOPNOTSUPP;
232c0013
HHZ
1432 goto destroy_neigh_entry;
1433 }
1434 e->encap_size = ipv4_encap_size;
1435 e->encap_header = encap_header;
1436
1437 if (!(nud_state & NUD_VALID)) {
1438 neigh_event_send(n, NULL);
27902f08
WY
1439 err = -EAGAIN;
1440 goto out;
a54e20b4
HHZ
1441 }
1442
1443 err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
32f3671f 1444 ipv4_encap_size, encap_header, &e->encap_id);
232c0013
HHZ
1445 if (err)
1446 goto destroy_neigh_entry;
1447
1448 e->flags |= MLX5_ENCAP_ENTRY_VALID;
f6dfb4c3 1449 mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev));
232c0013
HHZ
1450 neigh_release(n);
1451 return err;
1452
1453destroy_neigh_entry:
1454 mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
a54e20b4
HHZ
1455out:
1456 kfree(encap_header);
232c0013
HHZ
1457 if (n)
1458 neigh_release(n);
a54e20b4
HHZ
1459 return err;
1460}
1461
ce99f6b9
OG
1462static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
1463 struct net_device *mirred_dev,
1a8552bd 1464 struct mlx5e_encap_entry *e)
ce99f6b9
OG
1465{
1466 int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
225aabaf 1467 int ipv6_encap_size = ETH_HLEN + sizeof(struct ipv6hdr) + VXLAN_HLEN;
ce99f6b9 1468 struct ip_tunnel_key *tun_key = &e->tun_info.key;
1a8552bd 1469 struct net_device *out_dev;
ce99f6b9
OG
1470 struct neighbour *n = NULL;
1471 struct flowi6 fl6 = {};
1472 char *encap_header;
225aabaf 1473 int err, ttl = 0;
033354d5 1474 u8 nud_state;
ce99f6b9 1475
225aabaf
OG
1476 if (max_encap_size < ipv6_encap_size) {
1477 mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
1478 ipv6_encap_size, max_encap_size);
1479 return -EOPNOTSUPP;
1480 }
ce99f6b9 1481
225aabaf 1482 encap_header = kzalloc(ipv6_encap_size, GFP_KERNEL);
ce99f6b9
OG
1483 if (!encap_header)
1484 return -ENOMEM;
1485
1486 switch (e->tunnel_type) {
1487 case MLX5_HEADER_TYPE_VXLAN:
1488 fl6.flowi6_proto = IPPROTO_UDP;
1489 fl6.fl6_dport = tun_key->tp_dst;
1490 break;
1491 default:
1492 err = -EOPNOTSUPP;
1493 goto out;
1494 }
1495
1496 fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tun_key->tos), tun_key->label);
1497 fl6.daddr = tun_key->u.ipv6.dst;
1498 fl6.saddr = tun_key->u.ipv6.src;
1499
1a8552bd 1500 err = mlx5e_route_lookup_ipv6(priv, mirred_dev, &out_dev,
ce99f6b9
OG
1501 &fl6, &n, &ttl);
1502 if (err)
1503 goto out;
1504
232c0013
HHZ
1505 /* used by mlx5e_detach_encap to lookup a neigh hash table
1506 * entry in the neigh hash table when a user deletes a rule
1507 */
1508 e->m_neigh.dev = n->dev;
f6dfb4c3 1509 e->m_neigh.family = n->ops->family;
232c0013
HHZ
1510 memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
1511 e->out_dev = out_dev;
1512
1513 /* It's importent to add the neigh to the hash table before checking
1514 * the neigh validity state. So if we'll get a notification, in case the
1515 * neigh changes it's validity state, we would find the relevant neigh
1516 * in the hash.
1517 */
1518 err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e);
1519 if (err)
1520 goto out;
1521
033354d5
HHZ
1522 read_lock_bh(&n->lock);
1523 nud_state = n->nud_state;
1524 ether_addr_copy(e->h_dest, n->ha);
1525 read_unlock_bh(&n->lock);
1526
ce99f6b9
OG
1527 switch (e->tunnel_type) {
1528 case MLX5_HEADER_TYPE_VXLAN:
1a8552bd 1529 gen_vxlan_header_ipv6(out_dev, encap_header,
225aabaf
OG
1530 ipv6_encap_size, e->h_dest, ttl,
1531 &fl6.daddr,
1532 &fl6.saddr, tun_key->tp_dst,
1533 tunnel_id_to_key32(tun_key->tun_id));
ce99f6b9
OG
1534 break;
1535 default:
1536 err = -EOPNOTSUPP;
232c0013
HHZ
1537 goto destroy_neigh_entry;
1538 }
1539
1540 e->encap_size = ipv6_encap_size;
1541 e->encap_header = encap_header;
1542
1543 if (!(nud_state & NUD_VALID)) {
1544 neigh_event_send(n, NULL);
27902f08
WY
1545 err = -EAGAIN;
1546 goto out;
ce99f6b9
OG
1547 }
1548
1549 err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
225aabaf 1550 ipv6_encap_size, encap_header, &e->encap_id);
232c0013
HHZ
1551 if (err)
1552 goto destroy_neigh_entry;
1553
1554 e->flags |= MLX5_ENCAP_ENTRY_VALID;
f6dfb4c3 1555 mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev));
232c0013
HHZ
1556 neigh_release(n);
1557 return err;
1558
1559destroy_neigh_entry:
1560 mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
ce99f6b9 1561out:
ce99f6b9 1562 kfree(encap_header);
232c0013
HHZ
1563 if (n)
1564 neigh_release(n);
ce99f6b9
OG
1565 return err;
1566}
1567
a54e20b4
HHZ
1568static int mlx5e_attach_encap(struct mlx5e_priv *priv,
1569 struct ip_tunnel_info *tun_info,
1570 struct net_device *mirred_dev,
45247bf2
OG
1571 struct net_device **encap_dev,
1572 struct mlx5e_tc_flow *flow)
a54e20b4
HHZ
1573{
1574 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1ad9a00a 1575 struct net_device *up_dev = mlx5_eswitch_get_uplink_netdev(esw);
a54e20b4 1576 unsigned short family = ip_tunnel_info_af(tun_info);
45247bf2
OG
1577 struct mlx5e_priv *up_priv = netdev_priv(up_dev);
1578 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
a54e20b4 1579 struct ip_tunnel_key *key = &tun_info->key;
c1ae1152 1580 struct mlx5e_encap_entry *e;
45247bf2 1581 int tunnel_type, err = 0;
a54e20b4
HHZ
1582 uintptr_t hash_key;
1583 bool found = false;
a54e20b4 1584
2fcd82e9 1585 /* udp dst port must be set */
a54e20b4 1586 if (!memchr_inv(&key->tp_dst, 0, sizeof(key->tp_dst)))
2fcd82e9 1587 goto vxlan_encap_offload_err;
a54e20b4 1588
cd377663 1589 /* setting udp src port isn't supported */
2fcd82e9
OG
1590 if (memchr_inv(&key->tp_src, 0, sizeof(key->tp_src))) {
1591vxlan_encap_offload_err:
1592 netdev_warn(priv->netdev,
1593 "must set udp dst port and not set udp src port\n");
cd377663 1594 return -EOPNOTSUPP;
2fcd82e9 1595 }
cd377663 1596
1ad9a00a 1597 if (mlx5e_vxlan_lookup_port(up_priv, be16_to_cpu(key->tp_dst)) &&
a54e20b4 1598 MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) {
a54e20b4
HHZ
1599 tunnel_type = MLX5_HEADER_TYPE_VXLAN;
1600 } else {
2fcd82e9
OG
1601 netdev_warn(priv->netdev,
1602 "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->tp_dst));
a54e20b4
HHZ
1603 return -EOPNOTSUPP;
1604 }
1605
76f7444d 1606 hash_key = hash_encap_info(key);
a54e20b4
HHZ
1607
1608 hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
1609 encap_hlist, hash_key) {
76f7444d 1610 if (!cmp_encap_info(&e->tun_info.key, key)) {
a54e20b4
HHZ
1611 found = true;
1612 break;
1613 }
1614 }
1615
45247bf2
OG
1616 if (found)
1617 goto attach_flow;
a54e20b4
HHZ
1618
1619 e = kzalloc(sizeof(*e), GFP_KERNEL);
1620 if (!e)
1621 return -ENOMEM;
1622
76f7444d 1623 e->tun_info = *tun_info;
a54e20b4
HHZ
1624 e->tunnel_type = tunnel_type;
1625 INIT_LIST_HEAD(&e->flows);
1626
ce99f6b9 1627 if (family == AF_INET)
1a8552bd 1628 err = mlx5e_create_encap_header_ipv4(priv, mirred_dev, e);
ce99f6b9 1629 else if (family == AF_INET6)
1a8552bd 1630 err = mlx5e_create_encap_header_ipv6(priv, mirred_dev, e);
ce99f6b9 1631
232c0013 1632 if (err && err != -EAGAIN)
a54e20b4
HHZ
1633 goto out_err;
1634
a54e20b4
HHZ
1635 hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key);
1636
45247bf2
OG
1637attach_flow:
1638 list_add(&flow->encap, &e->flows);
1639 *encap_dev = e->out_dev;
232c0013
HHZ
1640 if (e->flags & MLX5_ENCAP_ENTRY_VALID)
1641 attr->encap_id = e->encap_id;
45247bf2 1642
232c0013 1643 return err;
a54e20b4
HHZ
1644
1645out_err:
1646 kfree(e);
1647 return err;
1648}
1649
03a9d11e 1650static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
d7e75a32 1651 struct mlx5e_tc_flow_parse_attr *parse_attr,
a54e20b4 1652 struct mlx5e_tc_flow *flow)
03a9d11e 1653{
ecf5bb79 1654 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
1d447a39 1655 struct mlx5e_rep_priv *rpriv = priv->ppriv;
a54e20b4 1656 struct ip_tunnel_info *info = NULL;
03a9d11e 1657 const struct tc_action *a;
22dc13c8 1658 LIST_HEAD(actions);
a54e20b4 1659 bool encap = false;
232c0013 1660 int err = 0;
03a9d11e
OG
1661
1662 if (tc_no_actions(exts))
1663 return -EINVAL;
1664
776b12b6 1665 memset(attr, 0, sizeof(*attr));
1d447a39 1666 attr->in_rep = rpriv->rep;
03a9d11e 1667
22dc13c8
WC
1668 tcf_exts_to_list(exts, &actions);
1669 list_for_each_entry(a, &actions, list) {
03a9d11e 1670 if (is_tcf_gact_shot(a)) {
8b32580d
OG
1671 attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
1672 MLX5_FLOW_CONTEXT_ACTION_COUNT;
03a9d11e
OG
1673 continue;
1674 }
1675
d7e75a32
OG
1676 if (is_tcf_pedit(a)) {
1677 err = parse_tc_pedit_action(priv, a, MLX5_FLOW_NAMESPACE_FDB,
1678 parse_attr);
1679 if (err)
1680 return err;
1681
1682 attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
1683 continue;
1684 }
1685
26c02749
OG
1686 if (is_tcf_csum(a)) {
1687 if (csum_offload_supported(priv, attr->action,
1688 tcf_csum_update_flags(a)))
1689 continue;
1690
1691 return -EOPNOTSUPP;
1692 }
1693
5724b8b5 1694 if (is_tcf_mirred_egress_redirect(a)) {
03a9d11e 1695 int ifindex = tcf_mirred_ifindex(a);
45247bf2 1696 struct net_device *out_dev, *encap_dev = NULL;
03a9d11e 1697 struct mlx5e_priv *out_priv;
03a9d11e
OG
1698
1699 out_dev = __dev_get_by_index(dev_net(priv->netdev), ifindex);
1700
a54e20b4
HHZ
1701 if (switchdev_port_same_parent_id(priv->netdev,
1702 out_dev)) {
1703 attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
1704 MLX5_FLOW_CONTEXT_ACTION_COUNT;
1705 out_priv = netdev_priv(out_dev);
1d447a39
SM
1706 rpriv = out_priv->ppriv;
1707 attr->out_rep = rpriv->rep;
a54e20b4
HHZ
1708 } else if (encap) {
1709 err = mlx5e_attach_encap(priv, info,
45247bf2 1710 out_dev, &encap_dev, flow);
232c0013 1711 if (err && err != -EAGAIN)
a54e20b4 1712 return err;
a54e20b4
HHZ
1713 attr->action |= MLX5_FLOW_CONTEXT_ACTION_ENCAP |
1714 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
1715 MLX5_FLOW_CONTEXT_ACTION_COUNT;
45247bf2 1716 out_priv = netdev_priv(encap_dev);
1d447a39
SM
1717 rpriv = out_priv->ppriv;
1718 attr->out_rep = rpriv->rep;
232c0013 1719 attr->parse_attr = parse_attr;
a54e20b4 1720 } else {
03a9d11e
OG
1721 pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
1722 priv->netdev->name, out_dev->name);
1723 return -EINVAL;
1724 }
a54e20b4
HHZ
1725 continue;
1726 }
03a9d11e 1727
a54e20b4
HHZ
1728 if (is_tcf_tunnel_set(a)) {
1729 info = tcf_tunnel_info(a);
1730 if (info)
1731 encap = true;
1732 else
1733 return -EOPNOTSUPP;
03a9d11e
OG
1734 continue;
1735 }
1736
8b32580d 1737 if (is_tcf_vlan(a)) {
09c91ddf 1738 if (tcf_vlan_action(a) == TCA_VLAN_ACT_POP) {
8b32580d 1739 attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
09c91ddf 1740 } else if (tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) {
8b32580d
OG
1741 if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q))
1742 return -EOPNOTSUPP;
1743
1744 attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
1745 attr->vlan = tcf_vlan_push_vid(a);
09c91ddf
OG
1746 } else { /* action is TCA_VLAN_ACT_MODIFY */
1747 return -EOPNOTSUPP;
8b32580d
OG
1748 }
1749 continue;
1750 }
1751
bbd00f7e
HHZ
1752 if (is_tcf_tunnel_release(a)) {
1753 attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
1754 continue;
1755 }
1756
03a9d11e
OG
1757 return -EINVAL;
1758 }
232c0013 1759 return err;
03a9d11e
OG
1760}
1761
e3a2b7ed
AV
1762int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
1763 struct tc_cls_flower_offload *f)
1764{
3bc4b7bf 1765 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
17091853 1766 struct mlx5e_tc_flow_parse_attr *parse_attr;
acff797c 1767 struct mlx5e_tc_table *tc = &priv->fs.tc;
3bc4b7bf
OG
1768 struct mlx5e_tc_flow *flow;
1769 int attr_size, err = 0;
65ba8fb7 1770 u8 flow_flags = 0;
e3a2b7ed 1771
65ba8fb7
OG
1772 if (esw && esw->mode == SRIOV_OFFLOADS) {
1773 flow_flags = MLX5E_TC_FLOW_ESWITCH;
1774 attr_size = sizeof(struct mlx5_esw_flow_attr);
3bc4b7bf
OG
1775 } else {
1776 flow_flags = MLX5E_TC_FLOW_NIC;
1777 attr_size = sizeof(struct mlx5_nic_flow_attr);
65ba8fb7 1778 }
e3a2b7ed 1779
65ba8fb7 1780 flow = kzalloc(sizeof(*flow) + attr_size, GFP_KERNEL);
1b9a07ee 1781 parse_attr = kvzalloc(sizeof(*parse_attr), GFP_KERNEL);
17091853 1782 if (!parse_attr || !flow) {
e3a2b7ed
AV
1783 err = -ENOMEM;
1784 goto err_free;
1785 }
1786
1787 flow->cookie = f->cookie;
65ba8fb7 1788 flow->flags = flow_flags;
e3a2b7ed 1789
17091853 1790 err = parse_cls_flower(priv, flow, &parse_attr->spec, f);
e3a2b7ed
AV
1791 if (err < 0)
1792 goto err_free;
1793
65ba8fb7 1794 if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
d7e75a32 1795 err = parse_tc_fdb_actions(priv, f->exts, parse_attr, flow);
adb4c123 1796 if (err < 0)
232c0013 1797 goto err_handle_encap_flow;
aa0cbbae 1798 flow->rule = mlx5e_tc_add_fdb_flow(priv, parse_attr, flow);
adb4c123 1799 } else {
aa0cbbae 1800 err = parse_tc_nic_actions(priv, f->exts, parse_attr, flow);
adb4c123
OG
1801 if (err < 0)
1802 goto err_free;
aa0cbbae 1803 flow->rule = mlx5e_tc_add_nic_flow(priv, parse_attr, flow);
adb4c123 1804 }
e3a2b7ed 1805
e3a2b7ed
AV
1806 if (IS_ERR(flow->rule)) {
1807 err = PTR_ERR(flow->rule);
aa0cbbae 1808 goto err_free;
e3a2b7ed
AV
1809 }
1810
0b67a38f 1811 flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
5c40348c
OG
1812 err = rhashtable_insert_fast(&tc->ht, &flow->node,
1813 tc->ht_params);
1814 if (err)
1815 goto err_del_rule;
1816
232c0013
HHZ
1817 if (flow->flags & MLX5E_TC_FLOW_ESWITCH &&
1818 !(flow->esw_attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP))
1819 kvfree(parse_attr);
1820 return err;
e3a2b7ed 1821
5c40348c 1822err_del_rule:
5e86397a 1823 mlx5e_tc_del_flow(priv, flow);
e3a2b7ed 1824
232c0013
HHZ
1825err_handle_encap_flow:
1826 if (err == -EAGAIN) {
1827 err = rhashtable_insert_fast(&tc->ht, &flow->node,
1828 tc->ht_params);
1829 if (err)
1830 mlx5e_tc_del_flow(priv, flow);
1831 else
1832 return 0;
1833 }
1834
e3a2b7ed 1835err_free:
17091853 1836 kvfree(parse_attr);
232c0013 1837 kfree(flow);
e3a2b7ed
AV
1838 return err;
1839}
1840
1841int mlx5e_delete_flower(struct mlx5e_priv *priv,
1842 struct tc_cls_flower_offload *f)
1843{
1844 struct mlx5e_tc_flow *flow;
acff797c 1845 struct mlx5e_tc_table *tc = &priv->fs.tc;
e3a2b7ed
AV
1846
1847 flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
1848 tc->ht_params);
1849 if (!flow)
1850 return -EINVAL;
1851
1852 rhashtable_remove_fast(&tc->ht, &flow->node, tc->ht_params);
1853
961e8979 1854 mlx5e_tc_del_flow(priv, flow);
e3a2b7ed
AV
1855
1856 kfree(flow);
1857
1858 return 0;
1859}
1860
aad7e08d
AV
1861int mlx5e_stats_flower(struct mlx5e_priv *priv,
1862 struct tc_cls_flower_offload *f)
1863{
1864 struct mlx5e_tc_table *tc = &priv->fs.tc;
1865 struct mlx5e_tc_flow *flow;
1866 struct tc_action *a;
1867 struct mlx5_fc *counter;
22dc13c8 1868 LIST_HEAD(actions);
aad7e08d
AV
1869 u64 bytes;
1870 u64 packets;
1871 u64 lastuse;
1872
1873 flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
1874 tc->ht_params);
1875 if (!flow)
1876 return -EINVAL;
1877
0b67a38f
HHZ
1878 if (!(flow->flags & MLX5E_TC_FLOW_OFFLOADED))
1879 return 0;
1880
aad7e08d
AV
1881 counter = mlx5_flow_rule_counter(flow->rule);
1882 if (!counter)
1883 return 0;
1884
1885 mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
1886
fed06ee8
OG
1887 preempt_disable();
1888
22dc13c8
WC
1889 tcf_exts_to_list(f->exts, &actions);
1890 list_for_each_entry(a, &actions, list)
aad7e08d
AV
1891 tcf_action_stats_update(a, bytes, packets, lastuse);
1892
fed06ee8
OG
1893 preempt_enable();
1894
aad7e08d
AV
1895 return 0;
1896}
1897
e8f887ac
AV
1898static const struct rhashtable_params mlx5e_tc_flow_ht_params = {
1899 .head_offset = offsetof(struct mlx5e_tc_flow, node),
1900 .key_offset = offsetof(struct mlx5e_tc_flow, cookie),
1901 .key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie),
1902 .automatic_shrinking = true,
1903};
1904
1905int mlx5e_tc_init(struct mlx5e_priv *priv)
1906{
acff797c 1907 struct mlx5e_tc_table *tc = &priv->fs.tc;
e8f887ac
AV
1908
1909 tc->ht_params = mlx5e_tc_flow_ht_params;
1910 return rhashtable_init(&tc->ht, &tc->ht_params);
1911}
1912
1913static void _mlx5e_tc_del_flow(void *ptr, void *arg)
1914{
1915 struct mlx5e_tc_flow *flow = ptr;
1916 struct mlx5e_priv *priv = arg;
1917
961e8979 1918 mlx5e_tc_del_flow(priv, flow);
e8f887ac
AV
1919 kfree(flow);
1920}
1921
1922void mlx5e_tc_cleanup(struct mlx5e_priv *priv)
1923{
acff797c 1924 struct mlx5e_tc_table *tc = &priv->fs.tc;
e8f887ac
AV
1925
1926 rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, priv);
1927
acff797c
MG
1928 if (!IS_ERR_OR_NULL(tc->t)) {
1929 mlx5_destroy_flow_table(tc->t);
1930 tc->t = NULL;
e8f887ac
AV
1931 }
1932}