]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blob - drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
Merge branches 'pm-cpuidle', 'pm-cpufreq' and 'pm-sleep'
[mirror_ubuntu-eoan-kernel.git] / drivers / net / ethernet / mellanox / mlx5 / core / en / tc_tun.c
1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2 /* Copyright (c) 2018 Mellanox Technologies. */
3
4 #include <net/vxlan.h>
5 #include <net/gre.h>
6 #include "lib/vxlan.h"
7 #include "en/tc_tun.h"
8
9 static int get_route_and_out_devs(struct mlx5e_priv *priv,
10 struct net_device *dev,
11 struct net_device **route_dev,
12 struct net_device **out_dev)
13 {
14 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
15 struct net_device *uplink_dev, *uplink_upper;
16 bool dst_is_lag_dev;
17
18 uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH);
19 uplink_upper = netdev_master_upper_dev_get(uplink_dev);
20 dst_is_lag_dev = (uplink_upper &&
21 netif_is_lag_master(uplink_upper) &&
22 dev == uplink_upper &&
23 mlx5_lag_is_sriov(priv->mdev));
24
25 /* if the egress device isn't on the same HW e-switch or
26 * it's a LAG device, use the uplink
27 */
28 if (!switchdev_port_same_parent_id(priv->netdev, dev) ||
29 dst_is_lag_dev) {
30 *route_dev = uplink_dev;
31 *out_dev = *route_dev;
32 } else {
33 *route_dev = dev;
34 if (is_vlan_dev(*route_dev))
35 *out_dev = uplink_dev;
36 else if (mlx5e_eswitch_rep(dev))
37 *out_dev = *route_dev;
38 else
39 return -EOPNOTSUPP;
40 }
41
42 return 0;
43 }
44
45 static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
46 struct net_device *mirred_dev,
47 struct net_device **out_dev,
48 struct net_device **route_dev,
49 struct flowi4 *fl4,
50 struct neighbour **out_n,
51 u8 *out_ttl)
52 {
53 struct rtable *rt;
54 struct neighbour *n = NULL;
55
56 #if IS_ENABLED(CONFIG_INET)
57 int ret;
58
59 rt = ip_route_output_key(dev_net(mirred_dev), fl4);
60 ret = PTR_ERR_OR_ZERO(rt);
61 if (ret)
62 return ret;
63 #else
64 return -EOPNOTSUPP;
65 #endif
66
67 ret = get_route_and_out_devs(priv, rt->dst.dev, route_dev, out_dev);
68 if (ret < 0)
69 return ret;
70
71 if (!(*out_ttl))
72 *out_ttl = ip4_dst_hoplimit(&rt->dst);
73 n = dst_neigh_lookup(&rt->dst, &fl4->daddr);
74 ip_rt_put(rt);
75 if (!n)
76 return -ENOMEM;
77
78 *out_n = n;
79 return 0;
80 }
81
82 static const char *mlx5e_netdev_kind(struct net_device *dev)
83 {
84 if (dev->rtnl_link_ops)
85 return dev->rtnl_link_ops->kind;
86 else
87 return "";
88 }
89
90 static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
91 struct net_device *mirred_dev,
92 struct net_device **out_dev,
93 struct net_device **route_dev,
94 struct flowi6 *fl6,
95 struct neighbour **out_n,
96 u8 *out_ttl)
97 {
98 struct neighbour *n = NULL;
99 struct dst_entry *dst;
100
101 #if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
102 int ret;
103
104 ret = ipv6_stub->ipv6_dst_lookup(dev_net(mirred_dev), NULL, &dst,
105 fl6);
106 if (ret < 0)
107 return ret;
108
109 if (!(*out_ttl))
110 *out_ttl = ip6_dst_hoplimit(dst);
111
112 ret = get_route_and_out_devs(priv, dst->dev, route_dev, out_dev);
113 if (ret < 0)
114 return ret;
115 #else
116 return -EOPNOTSUPP;
117 #endif
118
119 n = dst_neigh_lookup(dst, &fl6->daddr);
120 dst_release(dst);
121 if (!n)
122 return -ENOMEM;
123
124 *out_n = n;
125 return 0;
126 }
127
128 static int mlx5e_gen_vxlan_header(char buf[], struct ip_tunnel_key *tun_key)
129 {
130 __be32 tun_id = tunnel_id_to_key32(tun_key->tun_id);
131 struct udphdr *udp = (struct udphdr *)(buf);
132 struct vxlanhdr *vxh = (struct vxlanhdr *)
133 ((char *)udp + sizeof(struct udphdr));
134
135 udp->dest = tun_key->tp_dst;
136 vxh->vx_flags = VXLAN_HF_VNI;
137 vxh->vx_vni = vxlan_vni_field(tun_id);
138
139 return 0;
140 }
141
142 static int mlx5e_gen_gre_header(char buf[], struct ip_tunnel_key *tun_key)
143 {
144 __be32 tun_id = tunnel_id_to_key32(tun_key->tun_id);
145 int hdr_len;
146 struct gre_base_hdr *greh = (struct gre_base_hdr *)(buf);
147
148 /* the HW does not calculate GRE csum or sequences */
149 if (tun_key->tun_flags & (TUNNEL_CSUM | TUNNEL_SEQ))
150 return -EOPNOTSUPP;
151
152 greh->protocol = htons(ETH_P_TEB);
153
154 /* GRE key */
155 hdr_len = gre_calc_hlen(tun_key->tun_flags);
156 greh->flags = gre_tnl_flags_to_gre_flags(tun_key->tun_flags);
157 if (tun_key->tun_flags & TUNNEL_KEY) {
158 __be32 *ptr = (__be32 *)(((u8 *)greh) + hdr_len - 4);
159
160 *ptr = tun_id;
161 }
162
163 return 0;
164 }
165
166 static int mlx5e_gen_ip_tunnel_header(char buf[], __u8 *ip_proto,
167 struct mlx5e_encap_entry *e)
168 {
169 int err = 0;
170 struct ip_tunnel_key *key = &e->tun_info.key;
171
172 if (e->tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN) {
173 *ip_proto = IPPROTO_UDP;
174 err = mlx5e_gen_vxlan_header(buf, key);
175 } else if (e->tunnel_type == MLX5E_TC_TUNNEL_TYPE_GRETAP) {
176 *ip_proto = IPPROTO_GRE;
177 err = mlx5e_gen_gre_header(buf, key);
178 } else {
179 pr_warn("mlx5: Cannot generate tunnel header for tunnel type (%d)\n"
180 , e->tunnel_type);
181 err = -EOPNOTSUPP;
182 }
183
184 return err;
185 }
186
187 static char *gen_eth_tnl_hdr(char *buf, struct net_device *dev,
188 struct mlx5e_encap_entry *e,
189 u16 proto)
190 {
191 struct ethhdr *eth = (struct ethhdr *)buf;
192 char *ip;
193
194 ether_addr_copy(eth->h_dest, e->h_dest);
195 ether_addr_copy(eth->h_source, dev->dev_addr);
196 if (is_vlan_dev(dev)) {
197 struct vlan_hdr *vlan = (struct vlan_hdr *)
198 ((char *)eth + ETH_HLEN);
199 ip = (char *)vlan + VLAN_HLEN;
200 eth->h_proto = vlan_dev_vlan_proto(dev);
201 vlan->h_vlan_TCI = htons(vlan_dev_vlan_id(dev));
202 vlan->h_vlan_encapsulated_proto = htons(proto);
203 } else {
204 eth->h_proto = htons(proto);
205 ip = (char *)eth + ETH_HLEN;
206 }
207
208 return ip;
209 }
210
211 int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
212 struct net_device *mirred_dev,
213 struct mlx5e_encap_entry *e)
214 {
215 int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
216 struct ip_tunnel_key *tun_key = &e->tun_info.key;
217 struct net_device *out_dev, *route_dev;
218 struct neighbour *n = NULL;
219 struct flowi4 fl4 = {};
220 int ipv4_encap_size;
221 char *encap_header;
222 u8 nud_state, ttl;
223 struct iphdr *ip;
224 int err;
225
226 /* add the IP fields */
227 fl4.flowi4_tos = tun_key->tos;
228 fl4.daddr = tun_key->u.ipv4.dst;
229 fl4.saddr = tun_key->u.ipv4.src;
230 ttl = tun_key->ttl;
231
232 err = mlx5e_route_lookup_ipv4(priv, mirred_dev, &out_dev, &route_dev,
233 &fl4, &n, &ttl);
234 if (err)
235 return err;
236
237 ipv4_encap_size =
238 (is_vlan_dev(route_dev) ? VLAN_ETH_HLEN : ETH_HLEN) +
239 sizeof(struct iphdr) +
240 e->tunnel_hlen;
241
242 if (max_encap_size < ipv4_encap_size) {
243 mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
244 ipv4_encap_size, max_encap_size);
245 return -EOPNOTSUPP;
246 }
247
248 encap_header = kzalloc(ipv4_encap_size, GFP_KERNEL);
249 if (!encap_header)
250 return -ENOMEM;
251
252 /* used by mlx5e_detach_encap to lookup a neigh hash table
253 * entry in the neigh hash table when a user deletes a rule
254 */
255 e->m_neigh.dev = n->dev;
256 e->m_neigh.family = n->ops->family;
257 memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
258 e->out_dev = out_dev;
259
260 /* It's important to add the neigh to the hash table before checking
261 * the neigh validity state. So if we'll get a notification, in case the
262 * neigh changes it's validity state, we would find the relevant neigh
263 * in the hash.
264 */
265 err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e);
266 if (err)
267 goto free_encap;
268
269 read_lock_bh(&n->lock);
270 nud_state = n->nud_state;
271 ether_addr_copy(e->h_dest, n->ha);
272 read_unlock_bh(&n->lock);
273
274 /* add ethernet header */
275 ip = (struct iphdr *)gen_eth_tnl_hdr(encap_header, route_dev, e,
276 ETH_P_IP);
277
278 /* add ip header */
279 ip->tos = tun_key->tos;
280 ip->version = 0x4;
281 ip->ihl = 0x5;
282 ip->ttl = ttl;
283 ip->daddr = fl4.daddr;
284 ip->saddr = fl4.saddr;
285
286 /* add tunneling protocol header */
287 err = mlx5e_gen_ip_tunnel_header((char *)ip + sizeof(struct iphdr),
288 &ip->protocol, e);
289 if (err)
290 goto destroy_neigh_entry;
291
292 e->encap_size = ipv4_encap_size;
293 e->encap_header = encap_header;
294
295 if (!(nud_state & NUD_VALID)) {
296 neigh_event_send(n, NULL);
297 err = -EAGAIN;
298 goto out;
299 }
300
301 err = mlx5_packet_reformat_alloc(priv->mdev,
302 e->reformat_type,
303 ipv4_encap_size, encap_header,
304 MLX5_FLOW_NAMESPACE_FDB,
305 &e->encap_id);
306 if (err)
307 goto destroy_neigh_entry;
308
309 e->flags |= MLX5_ENCAP_ENTRY_VALID;
310 mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev));
311 neigh_release(n);
312 return err;
313
314 destroy_neigh_entry:
315 mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
316 free_encap:
317 kfree(encap_header);
318 out:
319 if (n)
320 neigh_release(n);
321 return err;
322 }
323
324 int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
325 struct net_device *mirred_dev,
326 struct mlx5e_encap_entry *e)
327 {
328 int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
329 struct ip_tunnel_key *tun_key = &e->tun_info.key;
330 struct net_device *out_dev, *route_dev;
331 struct neighbour *n = NULL;
332 struct flowi6 fl6 = {};
333 struct ipv6hdr *ip6h;
334 int ipv6_encap_size;
335 char *encap_header;
336 u8 nud_state, ttl;
337 int err;
338
339 ttl = tun_key->ttl;
340
341 fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tun_key->tos), tun_key->label);
342 fl6.daddr = tun_key->u.ipv6.dst;
343 fl6.saddr = tun_key->u.ipv6.src;
344
345 err = mlx5e_route_lookup_ipv6(priv, mirred_dev, &out_dev, &route_dev,
346 &fl6, &n, &ttl);
347 if (err)
348 return err;
349
350 ipv6_encap_size =
351 (is_vlan_dev(route_dev) ? VLAN_ETH_HLEN : ETH_HLEN) +
352 sizeof(struct ipv6hdr) +
353 e->tunnel_hlen;
354
355 if (max_encap_size < ipv6_encap_size) {
356 mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
357 ipv6_encap_size, max_encap_size);
358 return -EOPNOTSUPP;
359 }
360
361 encap_header = kzalloc(ipv6_encap_size, GFP_KERNEL);
362 if (!encap_header)
363 return -ENOMEM;
364
365 /* used by mlx5e_detach_encap to lookup a neigh hash table
366 * entry in the neigh hash table when a user deletes a rule
367 */
368 e->m_neigh.dev = n->dev;
369 e->m_neigh.family = n->ops->family;
370 memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
371 e->out_dev = out_dev;
372
373 /* It's importent to add the neigh to the hash table before checking
374 * the neigh validity state. So if we'll get a notification, in case the
375 * neigh changes it's validity state, we would find the relevant neigh
376 * in the hash.
377 */
378 err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e);
379 if (err)
380 goto free_encap;
381
382 read_lock_bh(&n->lock);
383 nud_state = n->nud_state;
384 ether_addr_copy(e->h_dest, n->ha);
385 read_unlock_bh(&n->lock);
386
387 /* add ethernet header */
388 ip6h = (struct ipv6hdr *)gen_eth_tnl_hdr(encap_header, route_dev, e,
389 ETH_P_IPV6);
390
391 /* add ip header */
392 ip6_flow_hdr(ip6h, tun_key->tos, 0);
393 /* the HW fills up ipv6 payload len */
394 ip6h->hop_limit = ttl;
395 ip6h->daddr = fl6.daddr;
396 ip6h->saddr = fl6.saddr;
397
398 /* add tunneling protocol header */
399 err = mlx5e_gen_ip_tunnel_header((char *)ip6h + sizeof(struct ipv6hdr),
400 &ip6h->nexthdr, e);
401 if (err)
402 goto destroy_neigh_entry;
403
404 e->encap_size = ipv6_encap_size;
405 e->encap_header = encap_header;
406
407 if (!(nud_state & NUD_VALID)) {
408 neigh_event_send(n, NULL);
409 err = -EAGAIN;
410 goto out;
411 }
412
413 err = mlx5_packet_reformat_alloc(priv->mdev,
414 e->reformat_type,
415 ipv6_encap_size, encap_header,
416 MLX5_FLOW_NAMESPACE_FDB,
417 &e->encap_id);
418 if (err)
419 goto destroy_neigh_entry;
420
421 e->flags |= MLX5_ENCAP_ENTRY_VALID;
422 mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev));
423 neigh_release(n);
424 return err;
425
426 destroy_neigh_entry:
427 mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
428 free_encap:
429 kfree(encap_header);
430 out:
431 if (n)
432 neigh_release(n);
433 return err;
434 }
435
436 int mlx5e_tc_tun_get_type(struct net_device *tunnel_dev)
437 {
438 if (netif_is_vxlan(tunnel_dev))
439 return MLX5E_TC_TUNNEL_TYPE_VXLAN;
440 else if (netif_is_gretap(tunnel_dev) ||
441 netif_is_ip6gretap(tunnel_dev))
442 return MLX5E_TC_TUNNEL_TYPE_GRETAP;
443 else
444 return MLX5E_TC_TUNNEL_TYPE_UNKNOWN;
445 }
446
447 bool mlx5e_tc_tun_device_to_offload(struct mlx5e_priv *priv,
448 struct net_device *netdev)
449 {
450 int tunnel_type = mlx5e_tc_tun_get_type(netdev);
451
452 if (tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN &&
453 MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap))
454 return true;
455 else if (tunnel_type == MLX5E_TC_TUNNEL_TYPE_GRETAP &&
456 MLX5_CAP_ESW(priv->mdev, nvgre_encap_decap))
457 return true;
458 else
459 return false;
460 }
461
462 int mlx5e_tc_tun_init_encap_attr(struct net_device *tunnel_dev,
463 struct mlx5e_priv *priv,
464 struct mlx5e_encap_entry *e,
465 struct netlink_ext_ack *extack)
466 {
467 e->tunnel_type = mlx5e_tc_tun_get_type(tunnel_dev);
468
469 if (e->tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN) {
470 int dst_port = be16_to_cpu(e->tun_info.key.tp_dst);
471
472 if (!mlx5_vxlan_lookup_port(priv->mdev->vxlan, dst_port)) {
473 NL_SET_ERR_MSG_MOD(extack,
474 "vxlan udp dport was not registered with the HW");
475 netdev_warn(priv->netdev,
476 "%d isn't an offloaded vxlan udp dport\n",
477 dst_port);
478 return -EOPNOTSUPP;
479 }
480 e->reformat_type = MLX5_REFORMAT_TYPE_L2_TO_VXLAN;
481 e->tunnel_hlen = VXLAN_HLEN;
482 } else if (e->tunnel_type == MLX5E_TC_TUNNEL_TYPE_GRETAP) {
483 e->reformat_type = MLX5_REFORMAT_TYPE_L2_TO_NVGRE;
484 e->tunnel_hlen = gre_calc_hlen(e->tun_info.key.tun_flags);
485 } else {
486 e->reformat_type = -1;
487 e->tunnel_hlen = -1;
488 return -EOPNOTSUPP;
489 }
490 return 0;
491 }
492
493 static int mlx5e_tc_tun_parse_vxlan(struct mlx5e_priv *priv,
494 struct mlx5_flow_spec *spec,
495 struct tc_cls_flower_offload *f,
496 void *headers_c,
497 void *headers_v)
498 {
499 struct netlink_ext_ack *extack = f->common.extack;
500 struct flow_dissector_key_ports *key =
501 skb_flow_dissector_target(f->dissector,
502 FLOW_DISSECTOR_KEY_ENC_PORTS,
503 f->key);
504 struct flow_dissector_key_ports *mask =
505 skb_flow_dissector_target(f->dissector,
506 FLOW_DISSECTOR_KEY_ENC_PORTS,
507 f->mask);
508 void *misc_c = MLX5_ADDR_OF(fte_match_param,
509 spec->match_criteria,
510 misc_parameters);
511 void *misc_v = MLX5_ADDR_OF(fte_match_param,
512 spec->match_value,
513 misc_parameters);
514
515 /* Full udp dst port must be given */
516 if (!dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS) ||
517 memchr_inv(&mask->dst, 0xff, sizeof(mask->dst))) {
518 NL_SET_ERR_MSG_MOD(extack,
519 "VXLAN decap filter must include enc_dst_port condition");
520 netdev_warn(priv->netdev,
521 "VXLAN decap filter must include enc_dst_port condition\n");
522 return -EOPNOTSUPP;
523 }
524
525 /* udp dst port must be knonwn as a VXLAN port */
526 if (!mlx5_vxlan_lookup_port(priv->mdev->vxlan, be16_to_cpu(key->dst))) {
527 NL_SET_ERR_MSG_MOD(extack,
528 "Matched UDP port is not registered as a VXLAN port");
529 netdev_warn(priv->netdev,
530 "UDP port %d is not registered as a VXLAN port\n",
531 be16_to_cpu(key->dst));
532 return -EOPNOTSUPP;
533 }
534
535 /* dst UDP port is valid here */
536 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_protocol);
537 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
538
539 MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_dport, ntohs(mask->dst));
540 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, ntohs(key->dst));
541
542 MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_sport, ntohs(mask->src));
543 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport, ntohs(key->src));
544
545 /* match on VNI */
546 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
547 struct flow_dissector_key_keyid *key =
548 skb_flow_dissector_target(f->dissector,
549 FLOW_DISSECTOR_KEY_ENC_KEYID,
550 f->key);
551 struct flow_dissector_key_keyid *mask =
552 skb_flow_dissector_target(f->dissector,
553 FLOW_DISSECTOR_KEY_ENC_KEYID,
554 f->mask);
555 MLX5_SET(fte_match_set_misc, misc_c, vxlan_vni,
556 be32_to_cpu(mask->keyid));
557 MLX5_SET(fte_match_set_misc, misc_v, vxlan_vni,
558 be32_to_cpu(key->keyid));
559 }
560 return 0;
561 }
562
563 static int mlx5e_tc_tun_parse_gretap(struct mlx5e_priv *priv,
564 struct mlx5_flow_spec *spec,
565 struct tc_cls_flower_offload *f,
566 void *outer_headers_c,
567 void *outer_headers_v)
568 {
569 void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
570 misc_parameters);
571 void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
572 misc_parameters);
573
574 if (!MLX5_CAP_ESW(priv->mdev, nvgre_encap_decap)) {
575 NL_SET_ERR_MSG_MOD(f->common.extack,
576 "GRE HW offloading is not supported");
577 netdev_warn(priv->netdev, "GRE HW offloading is not supported\n");
578 return -EOPNOTSUPP;
579 }
580
581 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol);
582 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
583 ip_protocol, IPPROTO_GRE);
584
585 /* gre protocol*/
586 MLX5_SET_TO_ONES(fte_match_set_misc, misc_c, gre_protocol);
587 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol, ETH_P_TEB);
588
589 /* gre key */
590 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
591 struct flow_dissector_key_keyid *mask = NULL;
592 struct flow_dissector_key_keyid *key = NULL;
593
594 mask = skb_flow_dissector_target(f->dissector,
595 FLOW_DISSECTOR_KEY_ENC_KEYID,
596 f->mask);
597 MLX5_SET(fte_match_set_misc, misc_c,
598 gre_key.key, be32_to_cpu(mask->keyid));
599
600 key = skb_flow_dissector_target(f->dissector,
601 FLOW_DISSECTOR_KEY_ENC_KEYID,
602 f->key);
603 MLX5_SET(fte_match_set_misc, misc_v,
604 gre_key.key, be32_to_cpu(key->keyid));
605 }
606
607 return 0;
608 }
609
610 int mlx5e_tc_tun_parse(struct net_device *filter_dev,
611 struct mlx5e_priv *priv,
612 struct mlx5_flow_spec *spec,
613 struct tc_cls_flower_offload *f,
614 void *headers_c,
615 void *headers_v)
616 {
617 int tunnel_type;
618 int err = 0;
619
620 tunnel_type = mlx5e_tc_tun_get_type(filter_dev);
621 if (tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN) {
622 err = mlx5e_tc_tun_parse_vxlan(priv, spec, f,
623 headers_c, headers_v);
624 } else if (tunnel_type == MLX5E_TC_TUNNEL_TYPE_GRETAP) {
625 err = mlx5e_tc_tun_parse_gretap(priv, spec, f,
626 headers_c, headers_v);
627 } else {
628 netdev_warn(priv->netdev,
629 "decapsulation offload is not supported for %s net device (%d)\n",
630 mlx5e_netdev_kind(filter_dev), tunnel_type);
631 return -EOPNOTSUPP;
632 }
633 return err;
634 }