1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2020 Mellanox Technologies. */
4 #include <net/dst_metadata.h>
5 #include <linux/netdevice.h>
6 #include <linux/list.h>
7 #include <linux/rculist.h>
8 #include <linux/rtnetlink.h>
9 #include <linux/workqueue.h>
10 #include <linux/spinlock.h>
15 #include "lib/fs_chains.h"
17 #include "en/mapping.h"
18 #include "en/tc_tun.h"
19 #include "lib/port_tun.h"
20 #include "en/tc/sample.h"
21 #include "en_accel/ipsec_rxtx.h"
22 #include "en/tc/int_port.h"
24 struct mlx5e_rep_indr_block_priv
{
25 struct net_device
*netdev
;
26 struct mlx5e_rep_priv
*rpriv
;
28 struct list_head list
;
31 int mlx5e_rep_encap_entry_attach(struct mlx5e_priv
*priv
,
32 struct mlx5e_encap_entry
*e
,
33 struct mlx5e_neigh
*m_neigh
,
34 struct net_device
*neigh_dev
)
36 struct mlx5e_rep_priv
*rpriv
= priv
->ppriv
;
37 struct mlx5_rep_uplink_priv
*uplink_priv
= &rpriv
->uplink_priv
;
38 struct mlx5_tun_entropy
*tun_entropy
= &uplink_priv
->tun_entropy
;
39 struct mlx5e_neigh_hash_entry
*nhe
;
42 err
= mlx5_tun_entropy_refcount_inc(tun_entropy
, e
->reformat_type
);
46 mutex_lock(&rpriv
->neigh_update
.encap_lock
);
47 nhe
= mlx5e_rep_neigh_entry_lookup(priv
, m_neigh
);
49 err
= mlx5e_rep_neigh_entry_create(priv
, m_neigh
, neigh_dev
, &nhe
);
51 mutex_unlock(&rpriv
->neigh_update
.encap_lock
);
52 mlx5_tun_entropy_refcount_dec(tun_entropy
,
59 spin_lock(&nhe
->encap_list_lock
);
60 list_add_rcu(&e
->encap_list
, &nhe
->encap_list
);
61 spin_unlock(&nhe
->encap_list_lock
);
63 mutex_unlock(&rpriv
->neigh_update
.encap_lock
);
68 void mlx5e_rep_encap_entry_detach(struct mlx5e_priv
*priv
,
69 struct mlx5e_encap_entry
*e
)
71 struct mlx5e_rep_priv
*rpriv
= priv
->ppriv
;
72 struct mlx5_rep_uplink_priv
*uplink_priv
= &rpriv
->uplink_priv
;
73 struct mlx5_tun_entropy
*tun_entropy
= &uplink_priv
->tun_entropy
;
78 spin_lock(&e
->nhe
->encap_list_lock
);
79 list_del_rcu(&e
->encap_list
);
80 spin_unlock(&e
->nhe
->encap_list_lock
);
82 mlx5e_rep_neigh_entry_release(e
->nhe
);
84 mlx5_tun_entropy_refcount_dec(tun_entropy
, e
->reformat_type
);
87 void mlx5e_rep_update_flows(struct mlx5e_priv
*priv
,
88 struct mlx5e_encap_entry
*e
,
90 unsigned char ha
[ETH_ALEN
])
92 struct ethhdr
*eth
= (struct ethhdr
*)e
->encap_header
;
93 struct mlx5_eswitch
*esw
= priv
->mdev
->priv
.eswitch
;
99 mutex_lock(&esw
->offloads
.encap_tbl_lock
);
100 encap_connected
= !!(e
->flags
& MLX5_ENCAP_ENTRY_VALID
);
101 if (encap_connected
== neigh_connected
&& ether_addr_equal(e
->h_dest
, ha
))
104 mlx5e_take_all_encap_flows(e
, &flow_list
);
106 if ((e
->flags
& MLX5_ENCAP_ENTRY_VALID
) &&
107 (!neigh_connected
|| !ether_addr_equal(e
->h_dest
, ha
)))
108 mlx5e_tc_encap_flows_del(priv
, e
, &flow_list
);
110 if (neigh_connected
&& !(e
->flags
& MLX5_ENCAP_ENTRY_VALID
)) {
111 struct net_device
*route_dev
;
113 ether_addr_copy(e
->h_dest
, ha
);
114 ether_addr_copy(eth
->h_dest
, ha
);
115 /* Update the encap source mac, in case that we delete
116 * the flows when encap source mac changed.
118 route_dev
= __dev_get_by_index(dev_net(priv
->netdev
), e
->route_dev_ifindex
);
120 ether_addr_copy(eth
->h_source
, route_dev
->dev_addr
);
122 mlx5e_tc_encap_flows_add(priv
, e
, &flow_list
);
125 mutex_unlock(&esw
->offloads
.encap_tbl_lock
);
126 mlx5e_put_flow_list(priv
, &flow_list
);
130 mlx5e_rep_setup_tc_cls_flower(struct mlx5e_priv
*priv
,
131 struct flow_cls_offload
*cls_flower
, int flags
)
133 switch (cls_flower
->command
) {
134 case FLOW_CLS_REPLACE
:
135 return mlx5e_configure_flower(priv
->netdev
, priv
, cls_flower
,
137 case FLOW_CLS_DESTROY
:
138 return mlx5e_delete_flower(priv
->netdev
, priv
, cls_flower
,
141 return mlx5e_stats_flower(priv
->netdev
, priv
, cls_flower
,
149 int mlx5e_rep_setup_tc_cls_matchall(struct mlx5e_priv
*priv
,
150 struct tc_cls_matchall_offload
*ma
)
152 switch (ma
->command
) {
153 case TC_CLSMATCHALL_REPLACE
:
154 return mlx5e_tc_configure_matchall(priv
, ma
);
155 case TC_CLSMATCHALL_DESTROY
:
156 return mlx5e_tc_delete_matchall(priv
, ma
);
157 case TC_CLSMATCHALL_STATS
:
158 mlx5e_tc_stats_matchall(priv
, ma
);
165 static int mlx5e_rep_setup_tc_cb(enum tc_setup_type type
, void *type_data
,
168 unsigned long flags
= MLX5_TC_FLAG(INGRESS
) | MLX5_TC_FLAG(ESW_OFFLOAD
);
169 struct mlx5e_priv
*priv
= cb_priv
;
171 if (!priv
->netdev
|| !netif_device_present(priv
->netdev
))
175 case TC_SETUP_CLSFLOWER
:
176 return mlx5e_rep_setup_tc_cls_flower(priv
, type_data
, flags
);
177 case TC_SETUP_CLSMATCHALL
:
178 return mlx5e_rep_setup_tc_cls_matchall(priv
, type_data
);
184 static int mlx5e_rep_setup_ft_cb(enum tc_setup_type type
, void *type_data
,
187 struct flow_cls_offload tmp
, *f
= type_data
;
188 struct mlx5e_priv
*priv
= cb_priv
;
189 struct mlx5_eswitch
*esw
;
193 flags
= MLX5_TC_FLAG(INGRESS
) |
194 MLX5_TC_FLAG(ESW_OFFLOAD
) |
195 MLX5_TC_FLAG(FT_OFFLOAD
);
196 esw
= priv
->mdev
->priv
.eswitch
;
199 case TC_SETUP_CLSFLOWER
:
200 memcpy(&tmp
, f
, sizeof(*f
));
202 if (!mlx5_chains_prios_supported(esw_chains(esw
)))
205 /* Re-use tc offload path by moving the ft flow to the
208 * FT offload can use prio range [0, INT_MAX], so we normalize
209 * it to range [1, mlx5_esw_chains_get_prio_range(esw)]
210 * as with tc, where prio 0 isn't supported.
212 * We only support chain 0 of FT offload.
214 if (tmp
.common
.prio
>= mlx5_chains_get_prio_range(esw_chains(esw
)))
216 if (tmp
.common
.chain_index
!= 0)
219 tmp
.common
.chain_index
= mlx5_chains_get_nf_ft_chain(esw_chains(esw
));
221 err
= mlx5e_rep_setup_tc_cls_flower(priv
, &tmp
, flags
);
222 memcpy(&f
->stats
, &tmp
.stats
, sizeof(f
->stats
));
229 static LIST_HEAD(mlx5e_rep_block_tc_cb_list
);
230 static LIST_HEAD(mlx5e_rep_block_ft_cb_list
);
231 int mlx5e_rep_setup_tc(struct net_device
*dev
, enum tc_setup_type type
,
234 struct mlx5e_priv
*priv
= netdev_priv(dev
);
235 struct flow_block_offload
*f
= type_data
;
237 f
->unlocked_driver_cb
= true;
241 return flow_block_cb_setup_simple(type_data
,
242 &mlx5e_rep_block_tc_cb_list
,
243 mlx5e_rep_setup_tc_cb
,
246 return flow_block_cb_setup_simple(type_data
,
247 &mlx5e_rep_block_ft_cb_list
,
248 mlx5e_rep_setup_ft_cb
,
255 int mlx5e_rep_tc_init(struct mlx5e_rep_priv
*rpriv
)
257 struct mlx5_rep_uplink_priv
*uplink_priv
= &rpriv
->uplink_priv
;
260 mutex_init(&uplink_priv
->unready_flows_lock
);
261 INIT_LIST_HEAD(&uplink_priv
->unready_flows
);
263 /* init shared tc flow table */
264 err
= mlx5e_tc_esw_init(&uplink_priv
->tc_ht
);
268 void mlx5e_rep_tc_cleanup(struct mlx5e_rep_priv
*rpriv
)
270 /* delete shared tc flow table */
271 mlx5e_tc_esw_cleanup(&rpriv
->uplink_priv
.tc_ht
);
272 mutex_destroy(&rpriv
->uplink_priv
.unready_flows_lock
);
275 void mlx5e_rep_tc_enable(struct mlx5e_priv
*priv
)
277 struct mlx5e_rep_priv
*rpriv
= priv
->ppriv
;
279 INIT_WORK(&rpriv
->uplink_priv
.reoffload_flows_work
,
280 mlx5e_tc_reoffload_flows_work
);
283 void mlx5e_rep_tc_disable(struct mlx5e_priv
*priv
)
285 struct mlx5e_rep_priv
*rpriv
= priv
->ppriv
;
287 cancel_work_sync(&rpriv
->uplink_priv
.reoffload_flows_work
);
290 int mlx5e_rep_tc_event_port_affinity(struct mlx5e_priv
*priv
)
292 struct mlx5e_rep_priv
*rpriv
= priv
->ppriv
;
294 queue_work(priv
->wq
, &rpriv
->uplink_priv
.reoffload_flows_work
);
299 static struct mlx5e_rep_indr_block_priv
*
300 mlx5e_rep_indr_block_priv_lookup(struct mlx5e_rep_priv
*rpriv
,
301 struct net_device
*netdev
)
303 struct mlx5e_rep_indr_block_priv
*cb_priv
;
305 list_for_each_entry(cb_priv
,
306 &rpriv
->uplink_priv
.tc_indr_block_priv_list
,
308 if (cb_priv
->netdev
== netdev
)
315 mlx5e_rep_indr_offload(struct net_device
*netdev
,
316 struct flow_cls_offload
*flower
,
317 struct mlx5e_rep_indr_block_priv
*indr_priv
,
320 struct mlx5e_priv
*priv
= netdev_priv(indr_priv
->rpriv
->netdev
);
323 if (!netif_device_present(indr_priv
->rpriv
->netdev
))
326 switch (flower
->command
) {
327 case FLOW_CLS_REPLACE
:
328 err
= mlx5e_configure_flower(netdev
, priv
, flower
, flags
);
330 case FLOW_CLS_DESTROY
:
331 err
= mlx5e_delete_flower(netdev
, priv
, flower
, flags
);
334 err
= mlx5e_stats_flower(netdev
, priv
, flower
, flags
);
343 static int mlx5e_rep_indr_setup_tc_cb(enum tc_setup_type type
,
344 void *type_data
, void *indr_priv
)
346 unsigned long flags
= MLX5_TC_FLAG(EGRESS
) | MLX5_TC_FLAG(ESW_OFFLOAD
);
347 struct mlx5e_rep_indr_block_priv
*priv
= indr_priv
;
350 case TC_SETUP_CLSFLOWER
:
351 return mlx5e_rep_indr_offload(priv
->netdev
, type_data
, priv
,
358 static int mlx5e_rep_indr_setup_ft_cb(enum tc_setup_type type
,
359 void *type_data
, void *indr_priv
)
361 struct mlx5e_rep_indr_block_priv
*priv
= indr_priv
;
362 struct flow_cls_offload
*f
= type_data
;
363 struct flow_cls_offload tmp
;
364 struct mlx5e_priv
*mpriv
;
365 struct mlx5_eswitch
*esw
;
369 mpriv
= netdev_priv(priv
->rpriv
->netdev
);
370 esw
= mpriv
->mdev
->priv
.eswitch
;
372 flags
= MLX5_TC_FLAG(EGRESS
) |
373 MLX5_TC_FLAG(ESW_OFFLOAD
) |
374 MLX5_TC_FLAG(FT_OFFLOAD
);
377 case TC_SETUP_CLSFLOWER
:
378 memcpy(&tmp
, f
, sizeof(*f
));
380 /* Re-use tc offload path by moving the ft flow to the
383 * FT offload can use prio range [0, INT_MAX], so we normalize
384 * it to range [1, mlx5_esw_chains_get_prio_range(esw)]
385 * as with tc, where prio 0 isn't supported.
387 * We only support chain 0 of FT offload.
389 if (!mlx5_chains_prios_supported(esw_chains(esw
)) ||
390 tmp
.common
.prio
>= mlx5_chains_get_prio_range(esw_chains(esw
)) ||
391 tmp
.common
.chain_index
)
394 tmp
.common
.chain_index
= mlx5_chains_get_nf_ft_chain(esw_chains(esw
));
396 err
= mlx5e_rep_indr_offload(priv
->netdev
, &tmp
, priv
, flags
);
397 memcpy(&f
->stats
, &tmp
.stats
, sizeof(f
->stats
));
404 static void mlx5e_rep_indr_block_unbind(void *cb_priv
)
406 struct mlx5e_rep_indr_block_priv
*indr_priv
= cb_priv
;
408 list_del(&indr_priv
->list
);
412 static LIST_HEAD(mlx5e_block_cb_list
);
415 mlx5e_rep_indr_setup_block(struct net_device
*netdev
, struct Qdisc
*sch
,
416 struct mlx5e_rep_priv
*rpriv
,
417 struct flow_block_offload
*f
,
418 flow_setup_cb_t
*setup_cb
,
420 void (*cleanup
)(struct flow_block_cb
*block_cb
))
422 struct mlx5e_priv
*priv
= netdev_priv(rpriv
->netdev
);
423 struct mlx5e_rep_indr_block_priv
*indr_priv
;
424 struct flow_block_cb
*block_cb
;
426 if (!mlx5e_tc_tun_device_to_offload(priv
, netdev
) &&
427 !(is_vlan_dev(netdev
) && vlan_dev_real_dev(netdev
) == rpriv
->netdev
))
430 if (f
->binder_type
!= FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS
)
433 f
->unlocked_driver_cb
= true;
434 f
->driver_block_list
= &mlx5e_block_cb_list
;
436 switch (f
->command
) {
437 case FLOW_BLOCK_BIND
:
438 indr_priv
= mlx5e_rep_indr_block_priv_lookup(rpriv
, netdev
);
442 indr_priv
= kmalloc(sizeof(*indr_priv
), GFP_KERNEL
);
446 indr_priv
->netdev
= netdev
;
447 indr_priv
->rpriv
= rpriv
;
448 list_add(&indr_priv
->list
,
449 &rpriv
->uplink_priv
.tc_indr_block_priv_list
);
451 block_cb
= flow_indr_block_cb_alloc(setup_cb
, indr_priv
, indr_priv
,
452 mlx5e_rep_indr_block_unbind
,
453 f
, netdev
, sch
, data
, rpriv
,
455 if (IS_ERR(block_cb
)) {
456 list_del(&indr_priv
->list
);
458 return PTR_ERR(block_cb
);
460 flow_block_cb_add(block_cb
, f
);
461 list_add_tail(&block_cb
->driver_list
, &mlx5e_block_cb_list
);
464 case FLOW_BLOCK_UNBIND
:
465 indr_priv
= mlx5e_rep_indr_block_priv_lookup(rpriv
, netdev
);
469 block_cb
= flow_block_cb_lookup(f
->block
, setup_cb
, indr_priv
);
473 flow_indr_block_cb_remove(block_cb
, f
);
474 list_del(&block_cb
->driver_list
);
483 int mlx5e_rep_indr_setup_cb(struct net_device
*netdev
, struct Qdisc
*sch
, void *cb_priv
,
484 enum tc_setup_type type
, void *type_data
,
486 void (*cleanup
)(struct flow_block_cb
*block_cb
))
490 return mlx5e_rep_indr_setup_block(netdev
, sch
, cb_priv
, type_data
,
491 mlx5e_rep_indr_setup_tc_cb
,
494 return mlx5e_rep_indr_setup_block(netdev
, sch
, cb_priv
, type_data
,
495 mlx5e_rep_indr_setup_ft_cb
,
502 int mlx5e_rep_tc_netdevice_event_register(struct mlx5e_rep_priv
*rpriv
)
504 struct mlx5_rep_uplink_priv
*uplink_priv
= &rpriv
->uplink_priv
;
506 /* init indirect block notifications */
507 INIT_LIST_HEAD(&uplink_priv
->tc_indr_block_priv_list
);
509 return flow_indr_dev_register(mlx5e_rep_indr_setup_cb
, rpriv
);
512 void mlx5e_rep_tc_netdevice_event_unregister(struct mlx5e_rep_priv
*rpriv
)
514 flow_indr_dev_unregister(mlx5e_rep_indr_setup_cb
, rpriv
,
515 mlx5e_rep_indr_block_unbind
);
518 static bool mlx5e_restore_tunnel(struct mlx5e_priv
*priv
, struct sk_buff
*skb
,
519 struct mlx5e_tc_update_priv
*tc_priv
,
522 struct mlx5_eswitch
*esw
= priv
->mdev
->priv
.eswitch
;
523 struct tunnel_match_enc_opts enc_opts
= {};
524 struct mlx5_rep_uplink_priv
*uplink_priv
;
525 struct mlx5e_rep_priv
*uplink_rpriv
;
526 struct metadata_dst
*tun_dst
;
527 struct tunnel_match_key key
;
528 u32 tun_id
, enc_opts_id
;
529 struct net_device
*dev
;
532 enc_opts_id
= tunnel_id
& ENC_OPTS_BITS_MASK
;
533 tun_id
= tunnel_id
>> ENC_OPTS_BITS
;
538 uplink_rpriv
= mlx5_eswitch_get_uplink_priv(esw
, REP_ETH
);
539 uplink_priv
= &uplink_rpriv
->uplink_priv
;
541 err
= mapping_find(uplink_priv
->tunnel_mapping
, tun_id
, &key
);
544 netdev_dbg(priv
->netdev
,
545 "Couldn't find tunnel for tun_id: %d, err: %d\n",
551 err
= mapping_find(uplink_priv
->tunnel_enc_opts_mapping
,
552 enc_opts_id
, &enc_opts
);
554 netdev_dbg(priv
->netdev
,
555 "Couldn't find tunnel (opts) for tun_id: %d, err: %d\n",
561 if (key
.enc_control
.addr_type
== FLOW_DISSECTOR_KEY_IPV4_ADDRS
) {
562 tun_dst
= __ip_tun_set_dst(key
.enc_ipv4
.src
, key
.enc_ipv4
.dst
,
563 key
.enc_ip
.tos
, key
.enc_ip
.ttl
,
564 key
.enc_tp
.dst
, TUNNEL_KEY
,
565 key32_to_tunnel_id(key
.enc_key_id
.keyid
),
567 } else if (key
.enc_control
.addr_type
== FLOW_DISSECTOR_KEY_IPV6_ADDRS
) {
568 tun_dst
= __ipv6_tun_set_dst(&key
.enc_ipv6
.src
, &key
.enc_ipv6
.dst
,
569 key
.enc_ip
.tos
, key
.enc_ip
.ttl
,
570 key
.enc_tp
.dst
, 0, TUNNEL_KEY
,
571 key32_to_tunnel_id(key
.enc_key_id
.keyid
),
574 netdev_dbg(priv
->netdev
,
575 "Couldn't restore tunnel, unsupported addr_type: %d\n",
576 key
.enc_control
.addr_type
);
581 netdev_dbg(priv
->netdev
, "Couldn't restore tunnel, no tun_dst\n");
585 tun_dst
->u
.tun_info
.key
.tp_src
= key
.enc_tp
.src
;
587 if (enc_opts
.key
.len
)
588 ip_tunnel_info_opts_set(&tun_dst
->u
.tun_info
,
591 enc_opts
.key
.dst_opt_type
);
593 skb_dst_set(skb
, (struct dst_entry
*)tun_dst
);
594 dev
= dev_get_by_index(&init_net
, key
.filter_ifindex
);
596 netdev_dbg(priv
->netdev
,
597 "Couldn't find tunnel device with ifindex: %d\n",
602 /* Set fwd_dev so we do dev_put() after datapath */
603 tc_priv
->fwd_dev
= dev
;
610 static bool mlx5e_restore_skb_chain(struct sk_buff
*skb
, u32 chain
, u32 reg_c1
,
611 struct mlx5e_tc_update_priv
*tc_priv
)
613 struct mlx5e_priv
*priv
= netdev_priv(skb
->dev
);
614 u32 tunnel_id
= (reg_c1
>> ESW_TUN_OFFSET
) & TUNNEL_ID_MASK
;
616 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
618 struct mlx5_rep_uplink_priv
*uplink_priv
;
619 struct mlx5e_rep_priv
*uplink_rpriv
;
620 struct tc_skb_ext
*tc_skb_ext
;
621 struct mlx5_eswitch
*esw
;
624 tc_skb_ext
= tc_skb_ext_alloc(skb
);
629 tc_skb_ext
->chain
= chain
;
630 zone_restore_id
= reg_c1
& ESW_ZONE_ID_MASK
;
631 esw
= priv
->mdev
->priv
.eswitch
;
632 uplink_rpriv
= mlx5_eswitch_get_uplink_priv(esw
, REP_ETH
);
633 uplink_priv
= &uplink_rpriv
->uplink_priv
;
634 if (!mlx5e_tc_ct_restore_flow(uplink_priv
->ct_priv
, skb
,
638 #endif /* CONFIG_NET_TC_SKB_EXT */
640 return mlx5e_restore_tunnel(priv
, skb
, tc_priv
, tunnel_id
);
643 static void mlx5_rep_tc_post_napi_receive(struct mlx5e_tc_update_priv
*tc_priv
)
645 if (tc_priv
->fwd_dev
)
646 dev_put(tc_priv
->fwd_dev
);
649 static void mlx5e_restore_skb_sample(struct mlx5e_priv
*priv
, struct sk_buff
*skb
,
650 struct mlx5_mapped_obj
*mapped_obj
,
651 struct mlx5e_tc_update_priv
*tc_priv
)
653 if (!mlx5e_restore_tunnel(priv
, skb
, tc_priv
, mapped_obj
->sample
.tunnel_id
)) {
654 netdev_dbg(priv
->netdev
,
655 "Failed to restore tunnel info for sampled packet\n");
658 mlx5e_tc_sample_skb(skb
, mapped_obj
);
659 mlx5_rep_tc_post_napi_receive(tc_priv
);
662 static bool mlx5e_restore_skb_int_port(struct mlx5e_priv
*priv
, struct sk_buff
*skb
,
663 struct mlx5_mapped_obj
*mapped_obj
,
664 struct mlx5e_tc_update_priv
*tc_priv
,
668 u32 tunnel_id
= (reg_c1
>> ESW_TUN_OFFSET
) & TUNNEL_ID_MASK
;
669 struct mlx5_eswitch
*esw
= priv
->mdev
->priv
.eswitch
;
670 struct mlx5_rep_uplink_priv
*uplink_priv
;
671 struct mlx5e_rep_priv
*uplink_rpriv
;
673 /* Tunnel restore takes precedence over int port restore */
675 return mlx5e_restore_tunnel(priv
, skb
, tc_priv
, tunnel_id
);
677 uplink_rpriv
= mlx5_eswitch_get_uplink_priv(esw
, REP_ETH
);
678 uplink_priv
= &uplink_rpriv
->uplink_priv
;
680 if (mlx5e_tc_int_port_dev_fwd(uplink_priv
->int_port_priv
, skb
,
681 mapped_obj
->int_port_metadata
, forward_tx
)) {
682 /* Set fwd_dev for future dev_put */
683 tc_priv
->fwd_dev
= skb
->dev
;
691 void mlx5e_rep_tc_receive(struct mlx5_cqe64
*cqe
, struct mlx5e_rq
*rq
,
694 u32 reg_c1
= be32_to_cpu(cqe
->ft_metadata
);
695 struct mlx5e_tc_update_priv tc_priv
= {};
696 struct mlx5_mapped_obj mapped_obj
;
697 struct mlx5_eswitch
*esw
;
698 bool forward_tx
= false;
699 struct mlx5e_priv
*priv
;
703 reg_c0
= (be32_to_cpu(cqe
->sop_drop_qpn
) & MLX5E_TC_FLOW_ID_MASK
);
704 if (!reg_c0
|| reg_c0
== MLX5_FS_DEFAULT_FLOW_TAG
)
707 /* If reg_c0 is not equal to the default flow tag then skb->mark
708 * is not supported and must be reset back to 0.
712 priv
= netdev_priv(skb
->dev
);
713 esw
= priv
->mdev
->priv
.eswitch
;
714 err
= mapping_find(esw
->offloads
.reg_c0_obj_pool
, reg_c0
, &mapped_obj
);
716 netdev_dbg(priv
->netdev
,
717 "Couldn't find mapped object for reg_c0: %d, err: %d\n",
722 if (mapped_obj
.type
== MLX5_MAPPED_OBJ_CHAIN
) {
723 if (!mlx5e_restore_skb_chain(skb
, mapped_obj
.chain
, reg_c1
, &tc_priv
) &&
724 !mlx5_ipsec_is_rx_flow(cqe
))
726 } else if (mapped_obj
.type
== MLX5_MAPPED_OBJ_SAMPLE
) {
727 mlx5e_restore_skb_sample(priv
, skb
, &mapped_obj
, &tc_priv
);
729 } else if (mapped_obj
.type
== MLX5_MAPPED_OBJ_INT_PORT_METADATA
) {
730 if (!mlx5e_restore_skb_int_port(priv
, skb
, &mapped_obj
, &tc_priv
,
731 &forward_tx
, reg_c1
))
734 netdev_dbg(priv
->netdev
, "Invalid mapped object type: %d\n", mapped_obj
.type
);
742 napi_gro_receive(rq
->cq
.napi
, skb
);
744 mlx5_rep_tc_post_napi_receive(&tc_priv
);
749 dev_kfree_skb_any(skb
);