1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2018 Intel Corporation
7 #include <rte_common.h>
8 #include <rte_byteorder.h>
9 #include <rte_cycles.h>
10 #include <rte_malloc.h>
11 #include <rte_memcpy.h>
12 #include <rte_ether.h>
17 #include <rte_vxlan.h>
18 #include <rte_cryptodev.h>
19 #include <rte_cryptodev_pmd.h>
21 #include "rte_table_action.h"
23 #define rte_htons rte_cpu_to_be_16
24 #define rte_htonl rte_cpu_to_be_32
26 #define rte_ntohs rte_be_to_cpu_16
27 #define rte_ntohl rte_be_to_cpu_32
30 * RTE_TABLE_ACTION_FWD
32 #define fwd_data rte_pipeline_table_entry
35 fwd_apply(struct fwd_data
*data
,
36 struct rte_table_action_fwd_params
*p
)
38 data
->action
= p
->action
;
40 if (p
->action
== RTE_PIPELINE_ACTION_PORT
)
41 data
->port_id
= p
->id
;
43 if (p
->action
== RTE_PIPELINE_ACTION_TABLE
)
44 data
->table_id
= p
->id
;
53 lb_cfg_check(struct rte_table_action_lb_config
*cfg
)
56 (cfg
->key_size
< RTE_TABLE_ACTION_LB_KEY_SIZE_MIN
) ||
57 (cfg
->key_size
> RTE_TABLE_ACTION_LB_KEY_SIZE_MAX
) ||
58 (!rte_is_power_of_2(cfg
->key_size
)) ||
59 (cfg
->f_hash
== NULL
))
66 uint32_t out
[RTE_TABLE_ACTION_LB_TABLE_SIZE
];
70 lb_apply(struct lb_data
*data
,
71 struct rte_table_action_lb_params
*p
)
73 memcpy(data
->out
, p
->out
, sizeof(data
->out
));
78 static __rte_always_inline
void
79 pkt_work_lb(struct rte_mbuf
*mbuf
,
81 struct rte_table_action_lb_config
*cfg
)
83 uint8_t *pkt_key
= RTE_MBUF_METADATA_UINT8_PTR(mbuf
, cfg
->key_offset
);
84 uint32_t *out
= RTE_MBUF_METADATA_UINT32_PTR(mbuf
, cfg
->out_offset
);
88 digest
= cfg
->f_hash(pkt_key
,
92 pos
= digest
& (RTE_TABLE_ACTION_LB_TABLE_SIZE
- 1);
93 out_val
= data
->out
[pos
];
99 * RTE_TABLE_ACTION_MTR
102 mtr_cfg_check(struct rte_table_action_mtr_config
*mtr
)
104 if ((mtr
->alg
== RTE_TABLE_ACTION_METER_SRTCM
) ||
105 ((mtr
->n_tc
!= 1) && (mtr
->n_tc
!= 4)) ||
106 (mtr
->n_bytes_enabled
!= 0))
111 struct mtr_trtcm_data
{
112 struct rte_meter_trtcm trtcm
;
113 uint64_t stats
[RTE_COLORS
];
116 #define MTR_TRTCM_DATA_METER_PROFILE_ID_GET(data) \
117 (((data)->stats[RTE_COLOR_GREEN] & 0xF8LLU) >> 3)
120 mtr_trtcm_data_meter_profile_id_set(struct mtr_trtcm_data
*data
,
123 data
->stats
[RTE_COLOR_GREEN
] &= ~0xF8LLU
;
124 data
->stats
[RTE_COLOR_GREEN
] |= (profile_id
% 32) << 3;
127 #define MTR_TRTCM_DATA_POLICER_ACTION_DROP_GET(data, color)\
128 (((data)->stats[(color)] & 4LLU) >> 2)
130 #define MTR_TRTCM_DATA_POLICER_ACTION_COLOR_GET(data, color)\
131 ((enum rte_color)((data)->stats[(color)] & 3LLU))
134 mtr_trtcm_data_policer_action_set(struct mtr_trtcm_data
*data
,
135 enum rte_color color
,
136 enum rte_table_action_policer action
)
138 if (action
== RTE_TABLE_ACTION_POLICER_DROP
) {
139 data
->stats
[color
] |= 4LLU;
141 data
->stats
[color
] &= ~7LLU;
142 data
->stats
[color
] |= color
& 3LLU;
147 mtr_trtcm_data_stats_get(struct mtr_trtcm_data
*data
,
148 enum rte_color color
)
150 return data
->stats
[color
] >> 8;
154 mtr_trtcm_data_stats_reset(struct mtr_trtcm_data
*data
,
155 enum rte_color color
)
157 data
->stats
[color
] &= 0xFFLU
;
160 #define MTR_TRTCM_DATA_STATS_INC(data, color) \
161 ((data)->stats[(color)] += (1LLU << 8))
164 mtr_data_size(struct rte_table_action_mtr_config
*mtr
)
166 return mtr
->n_tc
* sizeof(struct mtr_trtcm_data
);
169 struct dscp_table_entry_data
{
170 enum rte_color color
;
175 struct dscp_table_data
{
176 struct dscp_table_entry_data entry
[64];
179 struct meter_profile_data
{
180 struct rte_meter_trtcm_profile profile
;
185 static struct meter_profile_data
*
186 meter_profile_data_find(struct meter_profile_data
*mp
,
192 for (i
= 0; i
< mp_size
; i
++) {
193 struct meter_profile_data
*mp_data
= &mp
[i
];
195 if (mp_data
->valid
&& (mp_data
->profile_id
== profile_id
))
202 static struct meter_profile_data
*
203 meter_profile_data_find_unused(struct meter_profile_data
*mp
,
208 for (i
= 0; i
< mp_size
; i
++) {
209 struct meter_profile_data
*mp_data
= &mp
[i
];
219 mtr_apply_check(struct rte_table_action_mtr_params
*p
,
220 struct rte_table_action_mtr_config
*cfg
,
221 struct meter_profile_data
*mp
,
226 if (p
->tc_mask
> RTE_LEN2MASK(cfg
->n_tc
, uint32_t))
229 for (i
= 0; i
< RTE_TABLE_ACTION_TC_MAX
; i
++) {
230 struct rte_table_action_mtr_tc_params
*p_tc
= &p
->mtr
[i
];
231 struct meter_profile_data
*mp_data
;
233 if ((p
->tc_mask
& (1LLU << i
)) == 0)
236 mp_data
= meter_profile_data_find(mp
,
238 p_tc
->meter_profile_id
);
247 mtr_apply(struct mtr_trtcm_data
*data
,
248 struct rte_table_action_mtr_params
*p
,
249 struct rte_table_action_mtr_config
*cfg
,
250 struct meter_profile_data
*mp
,
256 /* Check input arguments */
257 status
= mtr_apply_check(p
, cfg
, mp
, mp_size
);
262 for (i
= 0; i
< RTE_TABLE_ACTION_TC_MAX
; i
++) {
263 struct rte_table_action_mtr_tc_params
*p_tc
= &p
->mtr
[i
];
264 struct mtr_trtcm_data
*data_tc
= &data
[i
];
265 struct meter_profile_data
*mp_data
;
267 if ((p
->tc_mask
& (1LLU << i
)) == 0)
271 mp_data
= meter_profile_data_find(mp
,
273 p_tc
->meter_profile_id
);
277 memset(data_tc
, 0, sizeof(*data_tc
));
280 status
= rte_meter_trtcm_config(&data_tc
->trtcm
,
286 mtr_trtcm_data_meter_profile_id_set(data_tc
,
289 /* Policer actions */
290 mtr_trtcm_data_policer_action_set(data_tc
,
292 p_tc
->policer
[RTE_COLOR_GREEN
]);
294 mtr_trtcm_data_policer_action_set(data_tc
,
296 p_tc
->policer
[RTE_COLOR_YELLOW
]);
298 mtr_trtcm_data_policer_action_set(data_tc
,
300 p_tc
->policer
[RTE_COLOR_RED
]);
306 static __rte_always_inline
uint64_t
307 pkt_work_mtr(struct rte_mbuf
*mbuf
,
308 struct mtr_trtcm_data
*data
,
309 struct dscp_table_data
*dscp_table
,
310 struct meter_profile_data
*mp
,
313 uint16_t total_length
)
316 struct dscp_table_entry_data
*dscp_entry
= &dscp_table
->entry
[dscp
];
317 enum rte_color color_in
, color_meter
, color_policer
;
321 color_in
= dscp_entry
->color
;
323 mp_id
= MTR_TRTCM_DATA_METER_PROFILE_ID_GET(data
);
326 color_meter
= rte_meter_trtcm_color_aware_check(
334 MTR_TRTCM_DATA_STATS_INC(data
, color_meter
);
337 drop_mask
= MTR_TRTCM_DATA_POLICER_ACTION_DROP_GET(data
, color_meter
);
339 MTR_TRTCM_DATA_POLICER_ACTION_COLOR_GET(data
, color_meter
);
340 rte_mbuf_sched_color_set(mbuf
, (uint8_t)color_policer
);
346 * RTE_TABLE_ACTION_TM
349 tm_cfg_check(struct rte_table_action_tm_config
*tm
)
351 if ((tm
->n_subports_per_port
== 0) ||
352 (rte_is_power_of_2(tm
->n_subports_per_port
) == 0) ||
353 (tm
->n_subports_per_port
> UINT16_MAX
) ||
354 (tm
->n_pipes_per_subport
== 0) ||
355 (rte_is_power_of_2(tm
->n_pipes_per_subport
) == 0))
367 tm_apply_check(struct rte_table_action_tm_params
*p
,
368 struct rte_table_action_tm_config
*cfg
)
370 if ((p
->subport_id
>= cfg
->n_subports_per_port
) ||
371 (p
->pipe_id
>= cfg
->n_pipes_per_subport
))
378 tm_apply(struct tm_data
*data
,
379 struct rte_table_action_tm_params
*p
,
380 struct rte_table_action_tm_config
*cfg
)
384 /* Check input arguments */
385 status
= tm_apply_check(p
, cfg
);
390 data
->queue_id
= p
->subport_id
<<
391 (__builtin_ctz(cfg
->n_pipes_per_subport
) + 4) |
397 static __rte_always_inline
void
398 pkt_work_tm(struct rte_mbuf
*mbuf
,
399 struct tm_data
*data
,
400 struct dscp_table_data
*dscp_table
,
403 struct dscp_table_entry_data
*dscp_entry
= &dscp_table
->entry
[dscp
];
404 uint32_t queue_id
= data
->queue_id
|
405 dscp_entry
->tc_queue
;
406 rte_mbuf_sched_set(mbuf
, queue_id
, dscp_entry
->tc
,
407 (uint8_t)dscp_entry
->color
);
411 * RTE_TABLE_ACTION_ENCAP
414 encap_valid(enum rte_table_action_encap_type encap
)
417 case RTE_TABLE_ACTION_ENCAP_ETHER
:
418 case RTE_TABLE_ACTION_ENCAP_VLAN
:
419 case RTE_TABLE_ACTION_ENCAP_QINQ
:
420 case RTE_TABLE_ACTION_ENCAP_MPLS
:
421 case RTE_TABLE_ACTION_ENCAP_PPPOE
:
422 case RTE_TABLE_ACTION_ENCAP_VXLAN
:
423 case RTE_TABLE_ACTION_ENCAP_QINQ_PPPOE
:
431 encap_cfg_check(struct rte_table_action_encap_config
*encap
)
433 if ((encap
->encap_mask
== 0) ||
434 (__builtin_popcountll(encap
->encap_mask
) != 1))
440 struct encap_ether_data
{
441 struct rte_ether_hdr ether
;
444 #define VLAN(pcp, dei, vid) \
445 ((uint16_t)((((uint64_t)(pcp)) & 0x7LLU) << 13) | \
446 ((((uint64_t)(dei)) & 0x1LLU) << 12) | \
447 (((uint64_t)(vid)) & 0xFFFLLU)) \
449 struct encap_vlan_data {
450 struct rte_ether_hdr ether
;
451 struct rte_vlan_hdr vlan
;
454 struct encap_qinq_data
{
455 struct rte_ether_hdr ether
;
456 struct rte_vlan_hdr svlan
;
457 struct rte_vlan_hdr cvlan
;
460 #define ETHER_TYPE_MPLS_UNICAST 0x8847
462 #define ETHER_TYPE_MPLS_MULTICAST 0x8848
464 #define MPLS(label, tc, s, ttl) \
465 ((uint32_t)(((((uint64_t)(label)) & 0xFFFFFLLU) << 12) |\
466 ((((uint64_t)(tc)) & 0x7LLU) << 9) | \
467 ((((uint64_t)(s)) & 0x1LLU) << 8) | \
468 (((uint64_t)(ttl)) & 0xFFLLU)))
470 struct encap_mpls_data
{
471 struct rte_ether_hdr ether
;
472 uint32_t mpls
[RTE_TABLE_ACTION_MPLS_LABELS_MAX
];
474 } __rte_packed
__rte_aligned(2);
476 #define PPP_PROTOCOL_IP 0x0021
478 struct pppoe_ppp_hdr
{
479 uint16_t ver_type_code
;
485 struct encap_pppoe_data
{
486 struct rte_ether_hdr ether
;
487 struct pppoe_ppp_hdr pppoe_ppp
;
490 #define IP_PROTO_UDP 17
492 struct encap_vxlan_ipv4_data
{
493 struct rte_ether_hdr ether
;
494 struct rte_ipv4_hdr ipv4
;
495 struct rte_udp_hdr udp
;
496 struct rte_vxlan_hdr vxlan
;
497 } __rte_packed
__rte_aligned(2);
499 struct encap_vxlan_ipv4_vlan_data
{
500 struct rte_ether_hdr ether
;
501 struct rte_vlan_hdr vlan
;
502 struct rte_ipv4_hdr ipv4
;
503 struct rte_udp_hdr udp
;
504 struct rte_vxlan_hdr vxlan
;
505 } __rte_packed
__rte_aligned(2);
507 struct encap_vxlan_ipv6_data
{
508 struct rte_ether_hdr ether
;
509 struct rte_ipv6_hdr ipv6
;
510 struct rte_udp_hdr udp
;
511 struct rte_vxlan_hdr vxlan
;
512 } __rte_packed
__rte_aligned(2);
514 struct encap_vxlan_ipv6_vlan_data
{
515 struct rte_ether_hdr ether
;
516 struct rte_vlan_hdr vlan
;
517 struct rte_ipv6_hdr ipv6
;
518 struct rte_udp_hdr udp
;
519 struct rte_vxlan_hdr vxlan
;
520 } __rte_packed
__rte_aligned(2);
522 struct encap_qinq_pppoe_data
{
523 struct rte_ether_hdr ether
;
524 struct rte_vlan_hdr svlan
;
525 struct rte_vlan_hdr cvlan
;
526 struct pppoe_ppp_hdr pppoe_ppp
;
527 } __rte_packed
__rte_aligned(2);
530 encap_data_size(struct rte_table_action_encap_config
*encap
)
532 switch (encap
->encap_mask
) {
533 case 1LLU << RTE_TABLE_ACTION_ENCAP_ETHER
:
534 return sizeof(struct encap_ether_data
);
536 case 1LLU << RTE_TABLE_ACTION_ENCAP_VLAN
:
537 return sizeof(struct encap_vlan_data
);
539 case 1LLU << RTE_TABLE_ACTION_ENCAP_QINQ
:
540 return sizeof(struct encap_qinq_data
);
542 case 1LLU << RTE_TABLE_ACTION_ENCAP_MPLS
:
543 return sizeof(struct encap_mpls_data
);
545 case 1LLU << RTE_TABLE_ACTION_ENCAP_PPPOE
:
546 return sizeof(struct encap_pppoe_data
);
548 case 1LLU << RTE_TABLE_ACTION_ENCAP_VXLAN
:
549 if (encap
->vxlan
.ip_version
)
550 if (encap
->vxlan
.vlan
)
551 return sizeof(struct encap_vxlan_ipv4_vlan_data
);
553 return sizeof(struct encap_vxlan_ipv4_data
);
555 if (encap
->vxlan
.vlan
)
556 return sizeof(struct encap_vxlan_ipv6_vlan_data
);
558 return sizeof(struct encap_vxlan_ipv6_data
);
560 case 1LLU << RTE_TABLE_ACTION_ENCAP_QINQ_PPPOE
:
561 return sizeof(struct encap_qinq_pppoe_data
);
569 encap_apply_check(struct rte_table_action_encap_params
*p
,
570 struct rte_table_action_encap_config
*cfg
)
572 if ((encap_valid(p
->type
) == 0) ||
573 ((cfg
->encap_mask
& (1LLU << p
->type
)) == 0))
577 case RTE_TABLE_ACTION_ENCAP_ETHER
:
580 case RTE_TABLE_ACTION_ENCAP_VLAN
:
583 case RTE_TABLE_ACTION_ENCAP_QINQ
:
586 case RTE_TABLE_ACTION_ENCAP_MPLS
:
587 if ((p
->mpls
.mpls_count
== 0) ||
588 (p
->mpls
.mpls_count
> RTE_TABLE_ACTION_MPLS_LABELS_MAX
))
593 case RTE_TABLE_ACTION_ENCAP_PPPOE
:
596 case RTE_TABLE_ACTION_ENCAP_VXLAN
:
599 case RTE_TABLE_ACTION_ENCAP_QINQ_PPPOE
:
608 encap_ether_apply(void *data
,
609 struct rte_table_action_encap_params
*p
,
610 struct rte_table_action_common_config
*common_cfg
)
612 struct encap_ether_data
*d
= data
;
613 uint16_t ethertype
= (common_cfg
->ip_version
) ?
614 RTE_ETHER_TYPE_IPV4
:
618 rte_ether_addr_copy(&p
->ether
.ether
.da
, &d
->ether
.d_addr
);
619 rte_ether_addr_copy(&p
->ether
.ether
.sa
, &d
->ether
.s_addr
);
620 d
->ether
.ether_type
= rte_htons(ethertype
);
626 encap_vlan_apply(void *data
,
627 struct rte_table_action_encap_params
*p
,
628 struct rte_table_action_common_config
*common_cfg
)
630 struct encap_vlan_data
*d
= data
;
631 uint16_t ethertype
= (common_cfg
->ip_version
) ?
632 RTE_ETHER_TYPE_IPV4
:
636 rte_ether_addr_copy(&p
->vlan
.ether
.da
, &d
->ether
.d_addr
);
637 rte_ether_addr_copy(&p
->vlan
.ether
.sa
, &d
->ether
.s_addr
);
638 d
->ether
.ether_type
= rte_htons(RTE_ETHER_TYPE_VLAN
);
641 d
->vlan
.vlan_tci
= rte_htons(VLAN(p
->vlan
.vlan
.pcp
,
644 d
->vlan
.eth_proto
= rte_htons(ethertype
);
650 encap_qinq_apply(void *data
,
651 struct rte_table_action_encap_params
*p
,
652 struct rte_table_action_common_config
*common_cfg
)
654 struct encap_qinq_data
*d
= data
;
655 uint16_t ethertype
= (common_cfg
->ip_version
) ?
656 RTE_ETHER_TYPE_IPV4
:
660 rte_ether_addr_copy(&p
->qinq
.ether
.da
, &d
->ether
.d_addr
);
661 rte_ether_addr_copy(&p
->qinq
.ether
.sa
, &d
->ether
.s_addr
);
662 d
->ether
.ether_type
= rte_htons(RTE_ETHER_TYPE_QINQ
);
665 d
->svlan
.vlan_tci
= rte_htons(VLAN(p
->qinq
.svlan
.pcp
,
668 d
->svlan
.eth_proto
= rte_htons(RTE_ETHER_TYPE_VLAN
);
671 d
->cvlan
.vlan_tci
= rte_htons(VLAN(p
->qinq
.cvlan
.pcp
,
674 d
->cvlan
.eth_proto
= rte_htons(ethertype
);
680 encap_qinq_pppoe_apply(void *data
,
681 struct rte_table_action_encap_params
*p
)
683 struct encap_qinq_pppoe_data
*d
= data
;
686 rte_ether_addr_copy(&p
->qinq
.ether
.da
, &d
->ether
.d_addr
);
687 rte_ether_addr_copy(&p
->qinq
.ether
.sa
, &d
->ether
.s_addr
);
688 d
->ether
.ether_type
= rte_htons(RTE_ETHER_TYPE_VLAN
);
691 d
->svlan
.vlan_tci
= rte_htons(VLAN(p
->qinq
.svlan
.pcp
,
694 d
->svlan
.eth_proto
= rte_htons(RTE_ETHER_TYPE_VLAN
);
697 d
->cvlan
.vlan_tci
= rte_htons(VLAN(p
->qinq
.cvlan
.pcp
,
700 d
->cvlan
.eth_proto
= rte_htons(RTE_ETHER_TYPE_PPPOE_SESSION
);
703 d
->pppoe_ppp
.ver_type_code
= rte_htons(0x1100);
704 d
->pppoe_ppp
.session_id
= rte_htons(p
->qinq_pppoe
.pppoe
.session_id
);
705 d
->pppoe_ppp
.length
= 0; /* not pre-computed */
706 d
->pppoe_ppp
.protocol
= rte_htons(PPP_PROTOCOL_IP
);
712 encap_mpls_apply(void *data
,
713 struct rte_table_action_encap_params
*p
)
715 struct encap_mpls_data
*d
= data
;
716 uint16_t ethertype
= (p
->mpls
.unicast
) ?
717 ETHER_TYPE_MPLS_UNICAST
:
718 ETHER_TYPE_MPLS_MULTICAST
;
722 rte_ether_addr_copy(&p
->mpls
.ether
.da
, &d
->ether
.d_addr
);
723 rte_ether_addr_copy(&p
->mpls
.ether
.sa
, &d
->ether
.s_addr
);
724 d
->ether
.ether_type
= rte_htons(ethertype
);
727 for (i
= 0; i
< p
->mpls
.mpls_count
- 1; i
++)
728 d
->mpls
[i
] = rte_htonl(MPLS(p
->mpls
.mpls
[i
].label
,
731 p
->mpls
.mpls
[i
].ttl
));
733 d
->mpls
[i
] = rte_htonl(MPLS(p
->mpls
.mpls
[i
].label
,
736 p
->mpls
.mpls
[i
].ttl
));
738 d
->mpls_count
= p
->mpls
.mpls_count
;
743 encap_pppoe_apply(void *data
,
744 struct rte_table_action_encap_params
*p
)
746 struct encap_pppoe_data
*d
= data
;
749 rte_ether_addr_copy(&p
->pppoe
.ether
.da
, &d
->ether
.d_addr
);
750 rte_ether_addr_copy(&p
->pppoe
.ether
.sa
, &d
->ether
.s_addr
);
751 d
->ether
.ether_type
= rte_htons(RTE_ETHER_TYPE_PPPOE_SESSION
);
754 d
->pppoe_ppp
.ver_type_code
= rte_htons(0x1100);
755 d
->pppoe_ppp
.session_id
= rte_htons(p
->pppoe
.pppoe
.session_id
);
756 d
->pppoe_ppp
.length
= 0; /* not pre-computed */
757 d
->pppoe_ppp
.protocol
= rte_htons(PPP_PROTOCOL_IP
);
763 encap_vxlan_apply(void *data
,
764 struct rte_table_action_encap_params
*p
,
765 struct rte_table_action_encap_config
*cfg
)
767 if ((p
->vxlan
.vxlan
.vni
> 0xFFFFFF) ||
768 (cfg
->vxlan
.ip_version
&& (p
->vxlan
.ipv4
.dscp
> 0x3F)) ||
769 (!cfg
->vxlan
.ip_version
&& (p
->vxlan
.ipv6
.flow_label
> 0xFFFFF)) ||
770 (!cfg
->vxlan
.ip_version
&& (p
->vxlan
.ipv6
.dscp
> 0x3F)) ||
771 (cfg
->vxlan
.vlan
&& (p
->vxlan
.vlan
.vid
> 0xFFF)))
774 if (cfg
->vxlan
.ip_version
)
775 if (cfg
->vxlan
.vlan
) {
776 struct encap_vxlan_ipv4_vlan_data
*d
= data
;
779 rte_ether_addr_copy(&p
->vxlan
.ether
.da
,
781 rte_ether_addr_copy(&p
->vxlan
.ether
.sa
,
783 d
->ether
.ether_type
= rte_htons(RTE_ETHER_TYPE_VLAN
);
786 d
->vlan
.vlan_tci
= rte_htons(VLAN(p
->vxlan
.vlan
.pcp
,
789 d
->vlan
.eth_proto
= rte_htons(RTE_ETHER_TYPE_IPV4
);
792 d
->ipv4
.version_ihl
= 0x45;
793 d
->ipv4
.type_of_service
= p
->vxlan
.ipv4
.dscp
<< 2;
794 d
->ipv4
.total_length
= 0; /* not pre-computed */
795 d
->ipv4
.packet_id
= 0;
796 d
->ipv4
.fragment_offset
= 0;
797 d
->ipv4
.time_to_live
= p
->vxlan
.ipv4
.ttl
;
798 d
->ipv4
.next_proto_id
= IP_PROTO_UDP
;
799 d
->ipv4
.hdr_checksum
= 0;
800 d
->ipv4
.src_addr
= rte_htonl(p
->vxlan
.ipv4
.sa
);
801 d
->ipv4
.dst_addr
= rte_htonl(p
->vxlan
.ipv4
.da
);
803 d
->ipv4
.hdr_checksum
= rte_ipv4_cksum(&d
->ipv4
);
806 d
->udp
.src_port
= rte_htons(p
->vxlan
.udp
.sp
);
807 d
->udp
.dst_port
= rte_htons(p
->vxlan
.udp
.dp
);
808 d
->udp
.dgram_len
= 0; /* not pre-computed */
809 d
->udp
.dgram_cksum
= 0;
812 d
->vxlan
.vx_flags
= rte_htonl(0x08000000);
813 d
->vxlan
.vx_vni
= rte_htonl(p
->vxlan
.vxlan
.vni
<< 8);
817 struct encap_vxlan_ipv4_data
*d
= data
;
820 rte_ether_addr_copy(&p
->vxlan
.ether
.da
,
822 rte_ether_addr_copy(&p
->vxlan
.ether
.sa
,
824 d
->ether
.ether_type
= rte_htons(RTE_ETHER_TYPE_IPV4
);
827 d
->ipv4
.version_ihl
= 0x45;
828 d
->ipv4
.type_of_service
= p
->vxlan
.ipv4
.dscp
<< 2;
829 d
->ipv4
.total_length
= 0; /* not pre-computed */
830 d
->ipv4
.packet_id
= 0;
831 d
->ipv4
.fragment_offset
= 0;
832 d
->ipv4
.time_to_live
= p
->vxlan
.ipv4
.ttl
;
833 d
->ipv4
.next_proto_id
= IP_PROTO_UDP
;
834 d
->ipv4
.hdr_checksum
= 0;
835 d
->ipv4
.src_addr
= rte_htonl(p
->vxlan
.ipv4
.sa
);
836 d
->ipv4
.dst_addr
= rte_htonl(p
->vxlan
.ipv4
.da
);
838 d
->ipv4
.hdr_checksum
= rte_ipv4_cksum(&d
->ipv4
);
841 d
->udp
.src_port
= rte_htons(p
->vxlan
.udp
.sp
);
842 d
->udp
.dst_port
= rte_htons(p
->vxlan
.udp
.dp
);
843 d
->udp
.dgram_len
= 0; /* not pre-computed */
844 d
->udp
.dgram_cksum
= 0;
847 d
->vxlan
.vx_flags
= rte_htonl(0x08000000);
848 d
->vxlan
.vx_vni
= rte_htonl(p
->vxlan
.vxlan
.vni
<< 8);
853 if (cfg
->vxlan
.vlan
) {
854 struct encap_vxlan_ipv6_vlan_data
*d
= data
;
857 rte_ether_addr_copy(&p
->vxlan
.ether
.da
,
859 rte_ether_addr_copy(&p
->vxlan
.ether
.sa
,
861 d
->ether
.ether_type
= rte_htons(RTE_ETHER_TYPE_VLAN
);
864 d
->vlan
.vlan_tci
= rte_htons(VLAN(p
->vxlan
.vlan
.pcp
,
867 d
->vlan
.eth_proto
= rte_htons(RTE_ETHER_TYPE_IPV6
);
870 d
->ipv6
.vtc_flow
= rte_htonl((6 << 28) |
871 (p
->vxlan
.ipv6
.dscp
<< 22) |
872 p
->vxlan
.ipv6
.flow_label
);
873 d
->ipv6
.payload_len
= 0; /* not pre-computed */
874 d
->ipv6
.proto
= IP_PROTO_UDP
;
875 d
->ipv6
.hop_limits
= p
->vxlan
.ipv6
.hop_limit
;
876 memcpy(d
->ipv6
.src_addr
,
878 sizeof(p
->vxlan
.ipv6
.sa
));
879 memcpy(d
->ipv6
.dst_addr
,
881 sizeof(p
->vxlan
.ipv6
.da
));
884 d
->udp
.src_port
= rte_htons(p
->vxlan
.udp
.sp
);
885 d
->udp
.dst_port
= rte_htons(p
->vxlan
.udp
.dp
);
886 d
->udp
.dgram_len
= 0; /* not pre-computed */
887 d
->udp
.dgram_cksum
= 0;
890 d
->vxlan
.vx_flags
= rte_htonl(0x08000000);
891 d
->vxlan
.vx_vni
= rte_htonl(p
->vxlan
.vxlan
.vni
<< 8);
895 struct encap_vxlan_ipv6_data
*d
= data
;
898 rte_ether_addr_copy(&p
->vxlan
.ether
.da
,
900 rte_ether_addr_copy(&p
->vxlan
.ether
.sa
,
902 d
->ether
.ether_type
= rte_htons(RTE_ETHER_TYPE_IPV6
);
905 d
->ipv6
.vtc_flow
= rte_htonl((6 << 28) |
906 (p
->vxlan
.ipv6
.dscp
<< 22) |
907 p
->vxlan
.ipv6
.flow_label
);
908 d
->ipv6
.payload_len
= 0; /* not pre-computed */
909 d
->ipv6
.proto
= IP_PROTO_UDP
;
910 d
->ipv6
.hop_limits
= p
->vxlan
.ipv6
.hop_limit
;
911 memcpy(d
->ipv6
.src_addr
,
913 sizeof(p
->vxlan
.ipv6
.sa
));
914 memcpy(d
->ipv6
.dst_addr
,
916 sizeof(p
->vxlan
.ipv6
.da
));
919 d
->udp
.src_port
= rte_htons(p
->vxlan
.udp
.sp
);
920 d
->udp
.dst_port
= rte_htons(p
->vxlan
.udp
.dp
);
921 d
->udp
.dgram_len
= 0; /* not pre-computed */
922 d
->udp
.dgram_cksum
= 0;
925 d
->vxlan
.vx_flags
= rte_htonl(0x08000000);
926 d
->vxlan
.vx_vni
= rte_htonl(p
->vxlan
.vxlan
.vni
<< 8);
933 encap_apply(void *data
,
934 struct rte_table_action_encap_params
*p
,
935 struct rte_table_action_encap_config
*cfg
,
936 struct rte_table_action_common_config
*common_cfg
)
940 /* Check input arguments */
941 status
= encap_apply_check(p
, cfg
);
946 case RTE_TABLE_ACTION_ENCAP_ETHER
:
947 return encap_ether_apply(data
, p
, common_cfg
);
949 case RTE_TABLE_ACTION_ENCAP_VLAN
:
950 return encap_vlan_apply(data
, p
, common_cfg
);
952 case RTE_TABLE_ACTION_ENCAP_QINQ
:
953 return encap_qinq_apply(data
, p
, common_cfg
);
955 case RTE_TABLE_ACTION_ENCAP_MPLS
:
956 return encap_mpls_apply(data
, p
);
958 case RTE_TABLE_ACTION_ENCAP_PPPOE
:
959 return encap_pppoe_apply(data
, p
);
961 case RTE_TABLE_ACTION_ENCAP_VXLAN
:
962 return encap_vxlan_apply(data
, p
, cfg
);
964 case RTE_TABLE_ACTION_ENCAP_QINQ_PPPOE
:
965 return encap_qinq_pppoe_apply(data
, p
);
972 static __rte_always_inline
uint16_t
973 encap_vxlan_ipv4_checksum_update(uint16_t cksum0
,
974 uint16_t total_length
)
979 cksum1
= ~cksum1
& 0xFFFF;
981 /* Add total length (one's complement logic) */
982 cksum1
+= total_length
;
983 cksum1
= (cksum1
& 0xFFFF) + (cksum1
>> 16);
984 cksum1
= (cksum1
& 0xFFFF) + (cksum1
>> 16);
986 return (uint16_t)(~cksum1
);
989 static __rte_always_inline
void *
990 encap(void *dst
, const void *src
, size_t n
)
992 dst
= ((uint8_t *) dst
) - n
;
993 return rte_memcpy(dst
, src
, n
);
996 static __rte_always_inline
void
997 pkt_work_encap_vxlan_ipv4(struct rte_mbuf
*mbuf
,
998 struct encap_vxlan_ipv4_data
*vxlan_tbl
,
999 struct rte_table_action_encap_config
*cfg
)
1001 uint32_t ether_offset
= cfg
->vxlan
.data_offset
;
1002 void *ether
= RTE_MBUF_METADATA_UINT32_PTR(mbuf
, ether_offset
);
1003 struct encap_vxlan_ipv4_data
*vxlan_pkt
;
1004 uint16_t ether_length
, ipv4_total_length
, ipv4_hdr_cksum
, udp_length
;
1006 ether_length
= (uint16_t)mbuf
->pkt_len
;
1007 ipv4_total_length
= ether_length
+
1008 (sizeof(struct rte_vxlan_hdr
) +
1009 sizeof(struct rte_udp_hdr
) +
1010 sizeof(struct rte_ipv4_hdr
));
1011 ipv4_hdr_cksum
= encap_vxlan_ipv4_checksum_update(vxlan_tbl
->ipv4
.hdr_checksum
,
1012 rte_htons(ipv4_total_length
));
1013 udp_length
= ether_length
+
1014 (sizeof(struct rte_vxlan_hdr
) +
1015 sizeof(struct rte_udp_hdr
));
1017 vxlan_pkt
= encap(ether
, vxlan_tbl
, sizeof(*vxlan_tbl
));
1018 vxlan_pkt
->ipv4
.total_length
= rte_htons(ipv4_total_length
);
1019 vxlan_pkt
->ipv4
.hdr_checksum
= ipv4_hdr_cksum
;
1020 vxlan_pkt
->udp
.dgram_len
= rte_htons(udp_length
);
1022 mbuf
->data_off
= ether_offset
- (sizeof(struct rte_mbuf
) + sizeof(*vxlan_pkt
));
1023 mbuf
->pkt_len
= mbuf
->data_len
= ether_length
+ sizeof(*vxlan_pkt
);
1026 static __rte_always_inline
void
1027 pkt_work_encap_vxlan_ipv4_vlan(struct rte_mbuf
*mbuf
,
1028 struct encap_vxlan_ipv4_vlan_data
*vxlan_tbl
,
1029 struct rte_table_action_encap_config
*cfg
)
1031 uint32_t ether_offset
= cfg
->vxlan
.data_offset
;
1032 void *ether
= RTE_MBUF_METADATA_UINT32_PTR(mbuf
, ether_offset
);
1033 struct encap_vxlan_ipv4_vlan_data
*vxlan_pkt
;
1034 uint16_t ether_length
, ipv4_total_length
, ipv4_hdr_cksum
, udp_length
;
1036 ether_length
= (uint16_t)mbuf
->pkt_len
;
1037 ipv4_total_length
= ether_length
+
1038 (sizeof(struct rte_vxlan_hdr
) +
1039 sizeof(struct rte_udp_hdr
) +
1040 sizeof(struct rte_ipv4_hdr
));
1041 ipv4_hdr_cksum
= encap_vxlan_ipv4_checksum_update(vxlan_tbl
->ipv4
.hdr_checksum
,
1042 rte_htons(ipv4_total_length
));
1043 udp_length
= ether_length
+
1044 (sizeof(struct rte_vxlan_hdr
) +
1045 sizeof(struct rte_udp_hdr
));
1047 vxlan_pkt
= encap(ether
, vxlan_tbl
, sizeof(*vxlan_tbl
));
1048 vxlan_pkt
->ipv4
.total_length
= rte_htons(ipv4_total_length
);
1049 vxlan_pkt
->ipv4
.hdr_checksum
= ipv4_hdr_cksum
;
1050 vxlan_pkt
->udp
.dgram_len
= rte_htons(udp_length
);
1052 mbuf
->data_off
= ether_offset
- (sizeof(struct rte_mbuf
) + sizeof(*vxlan_pkt
));
1053 mbuf
->pkt_len
= mbuf
->data_len
= ether_length
+ sizeof(*vxlan_pkt
);
1056 static __rte_always_inline
void
1057 pkt_work_encap_vxlan_ipv6(struct rte_mbuf
*mbuf
,
1058 struct encap_vxlan_ipv6_data
*vxlan_tbl
,
1059 struct rte_table_action_encap_config
*cfg
)
1061 uint32_t ether_offset
= cfg
->vxlan
.data_offset
;
1062 void *ether
= RTE_MBUF_METADATA_UINT32_PTR(mbuf
, ether_offset
);
1063 struct encap_vxlan_ipv6_data
*vxlan_pkt
;
1064 uint16_t ether_length
, ipv6_payload_length
, udp_length
;
1066 ether_length
= (uint16_t)mbuf
->pkt_len
;
1067 ipv6_payload_length
= ether_length
+
1068 (sizeof(struct rte_vxlan_hdr
) +
1069 sizeof(struct rte_udp_hdr
));
1070 udp_length
= ether_length
+
1071 (sizeof(struct rte_vxlan_hdr
) +
1072 sizeof(struct rte_udp_hdr
));
1074 vxlan_pkt
= encap(ether
, vxlan_tbl
, sizeof(*vxlan_tbl
));
1075 vxlan_pkt
->ipv6
.payload_len
= rte_htons(ipv6_payload_length
);
1076 vxlan_pkt
->udp
.dgram_len
= rte_htons(udp_length
);
1078 mbuf
->data_off
= ether_offset
- (sizeof(struct rte_mbuf
) + sizeof(*vxlan_pkt
));
1079 mbuf
->pkt_len
= mbuf
->data_len
= ether_length
+ sizeof(*vxlan_pkt
);
1082 static __rte_always_inline
void
1083 pkt_work_encap_vxlan_ipv6_vlan(struct rte_mbuf
*mbuf
,
1084 struct encap_vxlan_ipv6_vlan_data
*vxlan_tbl
,
1085 struct rte_table_action_encap_config
*cfg
)
1087 uint32_t ether_offset
= cfg
->vxlan
.data_offset
;
1088 void *ether
= RTE_MBUF_METADATA_UINT32_PTR(mbuf
, ether_offset
);
1089 struct encap_vxlan_ipv6_vlan_data
*vxlan_pkt
;
1090 uint16_t ether_length
, ipv6_payload_length
, udp_length
;
1092 ether_length
= (uint16_t)mbuf
->pkt_len
;
1093 ipv6_payload_length
= ether_length
+
1094 (sizeof(struct rte_vxlan_hdr
) +
1095 sizeof(struct rte_udp_hdr
));
1096 udp_length
= ether_length
+
1097 (sizeof(struct rte_vxlan_hdr
) +
1098 sizeof(struct rte_udp_hdr
));
1100 vxlan_pkt
= encap(ether
, vxlan_tbl
, sizeof(*vxlan_tbl
));
1101 vxlan_pkt
->ipv6
.payload_len
= rte_htons(ipv6_payload_length
);
1102 vxlan_pkt
->udp
.dgram_len
= rte_htons(udp_length
);
1104 mbuf
->data_off
= ether_offset
- (sizeof(struct rte_mbuf
) + sizeof(*vxlan_pkt
));
1105 mbuf
->pkt_len
= mbuf
->data_len
= ether_length
+ sizeof(*vxlan_pkt
);
1108 static __rte_always_inline
void
1109 pkt_work_encap(struct rte_mbuf
*mbuf
,
1111 struct rte_table_action_encap_config
*cfg
,
1113 uint16_t total_length
,
1116 switch (cfg
->encap_mask
) {
1117 case 1LLU << RTE_TABLE_ACTION_ENCAP_ETHER
:
1118 encap(ip
, data
, sizeof(struct encap_ether_data
));
1119 mbuf
->data_off
= ip_offset
- (sizeof(struct rte_mbuf
) +
1120 sizeof(struct encap_ether_data
));
1121 mbuf
->pkt_len
= mbuf
->data_len
= total_length
+
1122 sizeof(struct encap_ether_data
);
1125 case 1LLU << RTE_TABLE_ACTION_ENCAP_VLAN
:
1126 encap(ip
, data
, sizeof(struct encap_vlan_data
));
1127 mbuf
->data_off
= ip_offset
- (sizeof(struct rte_mbuf
) +
1128 sizeof(struct encap_vlan_data
));
1129 mbuf
->pkt_len
= mbuf
->data_len
= total_length
+
1130 sizeof(struct encap_vlan_data
);
1133 case 1LLU << RTE_TABLE_ACTION_ENCAP_QINQ
:
1134 encap(ip
, data
, sizeof(struct encap_qinq_data
));
1135 mbuf
->data_off
= ip_offset
- (sizeof(struct rte_mbuf
) +
1136 sizeof(struct encap_qinq_data
));
1137 mbuf
->pkt_len
= mbuf
->data_len
= total_length
+
1138 sizeof(struct encap_qinq_data
);
1141 case 1LLU << RTE_TABLE_ACTION_ENCAP_MPLS
:
1143 struct encap_mpls_data
*mpls
= data
;
1144 size_t size
= sizeof(struct rte_ether_hdr
) +
1145 mpls
->mpls_count
* 4;
1147 encap(ip
, data
, size
);
1148 mbuf
->data_off
= ip_offset
- (sizeof(struct rte_mbuf
) + size
);
1149 mbuf
->pkt_len
= mbuf
->data_len
= total_length
+ size
;
1153 case 1LLU << RTE_TABLE_ACTION_ENCAP_PPPOE
:
1155 struct encap_pppoe_data
*pppoe
=
1156 encap(ip
, data
, sizeof(struct encap_pppoe_data
));
1157 pppoe
->pppoe_ppp
.length
= rte_htons(total_length
+ 2);
1158 mbuf
->data_off
= ip_offset
- (sizeof(struct rte_mbuf
) +
1159 sizeof(struct encap_pppoe_data
));
1160 mbuf
->pkt_len
= mbuf
->data_len
= total_length
+
1161 sizeof(struct encap_pppoe_data
);
1165 case 1LLU << RTE_TABLE_ACTION_ENCAP_QINQ_PPPOE
:
1167 struct encap_qinq_pppoe_data
*qinq_pppoe
=
1168 encap(ip
, data
, sizeof(struct encap_qinq_pppoe_data
));
1169 qinq_pppoe
->pppoe_ppp
.length
= rte_htons(total_length
+ 2);
1170 mbuf
->data_off
= ip_offset
- (sizeof(struct rte_mbuf
) +
1171 sizeof(struct encap_qinq_pppoe_data
));
1172 mbuf
->pkt_len
= mbuf
->data_len
= total_length
+
1173 sizeof(struct encap_qinq_pppoe_data
);
1177 case 1LLU << RTE_TABLE_ACTION_ENCAP_VXLAN
:
1179 if (cfg
->vxlan
.ip_version
)
1180 if (cfg
->vxlan
.vlan
)
1181 pkt_work_encap_vxlan_ipv4_vlan(mbuf
, data
, cfg
);
1183 pkt_work_encap_vxlan_ipv4(mbuf
, data
, cfg
);
1185 if (cfg
->vxlan
.vlan
)
1186 pkt_work_encap_vxlan_ipv6_vlan(mbuf
, data
, cfg
);
1188 pkt_work_encap_vxlan_ipv6(mbuf
, data
, cfg
);
1197 * RTE_TABLE_ACTION_NAT
1200 nat_cfg_check(struct rte_table_action_nat_config
*nat
)
1202 if ((nat
->proto
!= 0x06) &&
1203 (nat
->proto
!= 0x11))
1209 struct nat_ipv4_data
{
1214 struct nat_ipv6_data
{
1220 nat_data_size(struct rte_table_action_nat_config
*nat __rte_unused
,
1221 struct rte_table_action_common_config
*common
)
1223 int ip_version
= common
->ip_version
;
1225 return (ip_version
) ?
1226 sizeof(struct nat_ipv4_data
) :
1227 sizeof(struct nat_ipv6_data
);
1231 nat_apply_check(struct rte_table_action_nat_params
*p
,
1232 struct rte_table_action_common_config
*cfg
)
1234 if ((p
->ip_version
&& (cfg
->ip_version
== 0)) ||
1235 ((p
->ip_version
== 0) && cfg
->ip_version
))
1242 nat_apply(void *data
,
1243 struct rte_table_action_nat_params
*p
,
1244 struct rte_table_action_common_config
*cfg
)
1248 /* Check input arguments */
1249 status
= nat_apply_check(p
, cfg
);
1254 if (p
->ip_version
) {
1255 struct nat_ipv4_data
*d
= data
;
1257 d
->addr
= rte_htonl(p
->addr
.ipv4
);
1258 d
->port
= rte_htons(p
->port
);
1260 struct nat_ipv6_data
*d
= data
;
1262 memcpy(d
->addr
, p
->addr
.ipv6
, sizeof(d
->addr
));
1263 d
->port
= rte_htons(p
->port
);
1269 static __rte_always_inline
uint16_t
1270 nat_ipv4_checksum_update(uint16_t cksum0
,
1277 cksum1
= ~cksum1
& 0xFFFF;
1279 /* Subtract ip0 (one's complement logic) */
1280 cksum1
-= (ip0
>> 16) + (ip0
& 0xFFFF);
1281 cksum1
= (cksum1
& 0xFFFF) + (cksum1
>> 16);
1282 cksum1
= (cksum1
& 0xFFFF) + (cksum1
>> 16);
1284 /* Add ip1 (one's complement logic) */
1285 cksum1
+= (ip1
>> 16) + (ip1
& 0xFFFF);
1286 cksum1
= (cksum1
& 0xFFFF) + (cksum1
>> 16);
1287 cksum1
= (cksum1
& 0xFFFF) + (cksum1
>> 16);
1289 return (uint16_t)(~cksum1
);
1292 static __rte_always_inline
uint16_t
1293 nat_ipv4_tcp_udp_checksum_update(uint16_t cksum0
,
1302 cksum1
= ~cksum1
& 0xFFFF;
1304 /* Subtract ip0 and port 0 (one's complement logic) */
1305 cksum1
-= (ip0
>> 16) + (ip0
& 0xFFFF) + port0
;
1306 cksum1
= (cksum1
& 0xFFFF) + (cksum1
>> 16);
1307 cksum1
= (cksum1
& 0xFFFF) + (cksum1
>> 16);
1309 /* Add ip1 and port1 (one's complement logic) */
1310 cksum1
+= (ip1
>> 16) + (ip1
& 0xFFFF) + port1
;
1311 cksum1
= (cksum1
& 0xFFFF) + (cksum1
>> 16);
1312 cksum1
= (cksum1
& 0xFFFF) + (cksum1
>> 16);
1314 return (uint16_t)(~cksum1
);
1317 static __rte_always_inline
uint16_t
1318 nat_ipv6_tcp_udp_checksum_update(uint16_t cksum0
,
1327 cksum1
= ~cksum1
& 0xFFFF;
1329 /* Subtract ip0 and port 0 (one's complement logic) */
1330 cksum1
-= ip0
[0] + ip0
[1] + ip0
[2] + ip0
[3] +
1331 ip0
[4] + ip0
[5] + ip0
[6] + ip0
[7] + port0
;
1332 cksum1
= (cksum1
& 0xFFFF) + (cksum1
>> 16);
1333 cksum1
= (cksum1
& 0xFFFF) + (cksum1
>> 16);
1335 /* Add ip1 and port1 (one's complement logic) */
1336 cksum1
+= ip1
[0] + ip1
[1] + ip1
[2] + ip1
[3] +
1337 ip1
[4] + ip1
[5] + ip1
[6] + ip1
[7] + port1
;
1338 cksum1
= (cksum1
& 0xFFFF) + (cksum1
>> 16);
1339 cksum1
= (cksum1
& 0xFFFF) + (cksum1
>> 16);
1341 return (uint16_t)(~cksum1
);
1344 static __rte_always_inline
void
1345 pkt_ipv4_work_nat(struct rte_ipv4_hdr
*ip
,
1346 struct nat_ipv4_data
*data
,
1347 struct rte_table_action_nat_config
*cfg
)
1349 if (cfg
->source_nat
) {
1350 if (cfg
->proto
== 0x6) {
1351 struct rte_tcp_hdr
*tcp
= (struct rte_tcp_hdr
*) &ip
[1];
1352 uint16_t ip_cksum
, tcp_cksum
;
1354 ip_cksum
= nat_ipv4_checksum_update(ip
->hdr_checksum
,
1358 tcp_cksum
= nat_ipv4_tcp_udp_checksum_update(tcp
->cksum
,
1364 ip
->src_addr
= data
->addr
;
1365 ip
->hdr_checksum
= ip_cksum
;
1366 tcp
->src_port
= data
->port
;
1367 tcp
->cksum
= tcp_cksum
;
1369 struct rte_udp_hdr
*udp
= (struct rte_udp_hdr
*) &ip
[1];
1370 uint16_t ip_cksum
, udp_cksum
;
1372 ip_cksum
= nat_ipv4_checksum_update(ip
->hdr_checksum
,
1376 udp_cksum
= nat_ipv4_tcp_udp_checksum_update(udp
->dgram_cksum
,
1382 ip
->src_addr
= data
->addr
;
1383 ip
->hdr_checksum
= ip_cksum
;
1384 udp
->src_port
= data
->port
;
1385 if (udp
->dgram_cksum
)
1386 udp
->dgram_cksum
= udp_cksum
;
1389 if (cfg
->proto
== 0x6) {
1390 struct rte_tcp_hdr
*tcp
= (struct rte_tcp_hdr
*) &ip
[1];
1391 uint16_t ip_cksum
, tcp_cksum
;
1393 ip_cksum
= nat_ipv4_checksum_update(ip
->hdr_checksum
,
1397 tcp_cksum
= nat_ipv4_tcp_udp_checksum_update(tcp
->cksum
,
1403 ip
->dst_addr
= data
->addr
;
1404 ip
->hdr_checksum
= ip_cksum
;
1405 tcp
->dst_port
= data
->port
;
1406 tcp
->cksum
= tcp_cksum
;
1408 struct rte_udp_hdr
*udp
= (struct rte_udp_hdr
*) &ip
[1];
1409 uint16_t ip_cksum
, udp_cksum
;
1411 ip_cksum
= nat_ipv4_checksum_update(ip
->hdr_checksum
,
1415 udp_cksum
= nat_ipv4_tcp_udp_checksum_update(udp
->dgram_cksum
,
1421 ip
->dst_addr
= data
->addr
;
1422 ip
->hdr_checksum
= ip_cksum
;
1423 udp
->dst_port
= data
->port
;
1424 if (udp
->dgram_cksum
)
1425 udp
->dgram_cksum
= udp_cksum
;
1430 static __rte_always_inline
void
1431 pkt_ipv6_work_nat(struct rte_ipv6_hdr
*ip
,
1432 struct nat_ipv6_data
*data
,
1433 struct rte_table_action_nat_config
*cfg
)
1435 if (cfg
->source_nat
) {
1436 if (cfg
->proto
== 0x6) {
1437 struct rte_tcp_hdr
*tcp
= (struct rte_tcp_hdr
*) &ip
[1];
1440 tcp_cksum
= nat_ipv6_tcp_udp_checksum_update(tcp
->cksum
,
1441 (uint16_t *)ip
->src_addr
,
1442 (uint16_t *)data
->addr
,
1446 rte_memcpy(ip
->src_addr
, data
->addr
, 16);
1447 tcp
->src_port
= data
->port
;
1448 tcp
->cksum
= tcp_cksum
;
1450 struct rte_udp_hdr
*udp
= (struct rte_udp_hdr
*) &ip
[1];
1453 udp_cksum
= nat_ipv6_tcp_udp_checksum_update(udp
->dgram_cksum
,
1454 (uint16_t *)ip
->src_addr
,
1455 (uint16_t *)data
->addr
,
1459 rte_memcpy(ip
->src_addr
, data
->addr
, 16);
1460 udp
->src_port
= data
->port
;
1461 udp
->dgram_cksum
= udp_cksum
;
1464 if (cfg
->proto
== 0x6) {
1465 struct rte_tcp_hdr
*tcp
= (struct rte_tcp_hdr
*) &ip
[1];
1468 tcp_cksum
= nat_ipv6_tcp_udp_checksum_update(tcp
->cksum
,
1469 (uint16_t *)ip
->dst_addr
,
1470 (uint16_t *)data
->addr
,
1474 rte_memcpy(ip
->dst_addr
, data
->addr
, 16);
1475 tcp
->dst_port
= data
->port
;
1476 tcp
->cksum
= tcp_cksum
;
1478 struct rte_udp_hdr
*udp
= (struct rte_udp_hdr
*) &ip
[1];
1481 udp_cksum
= nat_ipv6_tcp_udp_checksum_update(udp
->dgram_cksum
,
1482 (uint16_t *)ip
->dst_addr
,
1483 (uint16_t *)data
->addr
,
1487 rte_memcpy(ip
->dst_addr
, data
->addr
, 16);
1488 udp
->dst_port
= data
->port
;
1489 udp
->dgram_cksum
= udp_cksum
;
1495 * RTE_TABLE_ACTION_TTL
1498 ttl_cfg_check(struct rte_table_action_ttl_config
*ttl
)
1510 #define TTL_INIT(data, decrement) \
1511 ((data)->n_packets = (decrement) ? 1 : 0)
1513 #define TTL_DEC_GET(data) \
1514 ((uint8_t)((data)->n_packets & 1))
1516 #define TTL_STATS_RESET(data) \
1517 ((data)->n_packets = ((data)->n_packets & 1))
1519 #define TTL_STATS_READ(data) \
1520 ((data)->n_packets >> 1)
1522 #define TTL_STATS_ADD(data, value) \
1523 ((data)->n_packets = \
1524 (((((data)->n_packets >> 1) + (value)) << 1) | \
1525 ((data)->n_packets & 1)))
1528 ttl_apply(void *data
,
1529 struct rte_table_action_ttl_params
*p
)
1531 struct ttl_data
*d
= data
;
1533 TTL_INIT(d
, p
->decrement
);
1538 static __rte_always_inline
uint64_t
1539 pkt_ipv4_work_ttl(struct rte_ipv4_hdr
*ip
,
1540 struct ttl_data
*data
)
1543 uint16_t cksum
= ip
->hdr_checksum
;
1544 uint8_t ttl
= ip
->time_to_live
;
1545 uint8_t ttl_diff
= TTL_DEC_GET(data
);
1550 ip
->hdr_checksum
= cksum
;
1551 ip
->time_to_live
= ttl
;
1553 drop
= (ttl
== 0) ? 1 : 0;
1554 TTL_STATS_ADD(data
, drop
);
1559 static __rte_always_inline
uint64_t
1560 pkt_ipv6_work_ttl(struct rte_ipv6_hdr
*ip
,
1561 struct ttl_data
*data
)
1564 uint8_t ttl
= ip
->hop_limits
;
1565 uint8_t ttl_diff
= TTL_DEC_GET(data
);
1569 ip
->hop_limits
= ttl
;
1571 drop
= (ttl
== 0) ? 1 : 0;
1572 TTL_STATS_ADD(data
, drop
);
1578 * RTE_TABLE_ACTION_STATS
1581 stats_cfg_check(struct rte_table_action_stats_config
*stats
)
1583 if ((stats
->n_packets_enabled
== 0) && (stats
->n_bytes_enabled
== 0))
1595 stats_apply(struct stats_data
*data
,
1596 struct rte_table_action_stats_params
*p
)
1598 data
->n_packets
= p
->n_packets
;
1599 data
->n_bytes
= p
->n_bytes
;
1604 static __rte_always_inline
void
1605 pkt_work_stats(struct stats_data
*data
,
1606 uint16_t total_length
)
1609 data
->n_bytes
+= total_length
;
1613 * RTE_TABLE_ACTION_TIME
1620 time_apply(struct time_data
*data
,
1621 struct rte_table_action_time_params
*p
)
1623 data
->time
= p
->time
;
1627 static __rte_always_inline
void
1628 pkt_work_time(struct time_data
*data
,
1636 * RTE_TABLE_ACTION_CRYPTO
1639 #define CRYPTO_OP_MASK_CIPHER 0x1
1640 #define CRYPTO_OP_MASK_AUTH 0x2
1641 #define CRYPTO_OP_MASK_AEAD 0x4
1643 struct crypto_op_sym_iv_aad
{
1644 struct rte_crypto_op op
;
1645 struct rte_crypto_sym_op sym_op
;
1649 RTE_TABLE_ACTION_SYM_CRYPTO_IV_SIZE_MAX
];
1651 RTE_TABLE_ACTION_SYM_CRYPTO_IV_SIZE_MAX
];
1655 uint8_t iv
[RTE_TABLE_ACTION_SYM_CRYPTO_IV_SIZE_MAX
];
1656 uint8_t aad
[RTE_TABLE_ACTION_SYM_CRYPTO_AAD_SIZE_MAX
];
1662 struct sym_crypto_data
{
1667 /** Length of cipher iv. */
1668 uint16_t cipher_iv_len
;
1670 /** Offset from start of IP header to the cipher iv. */
1671 uint16_t cipher_iv_data_offset
;
1673 /** Length of cipher iv to be updated in the mbuf. */
1674 uint16_t cipher_iv_update_len
;
1676 /** Offset from start of IP header to the auth iv. */
1677 uint16_t auth_iv_data_offset
;
1679 /** Length of auth iv in the mbuf. */
1680 uint16_t auth_iv_len
;
1682 /** Length of auth iv to be updated in the mbuf. */
1683 uint16_t auth_iv_update_len
;
1688 /** Length of iv. */
1691 /** Offset from start of IP header to the aead iv. */
1692 uint16_t iv_data_offset
;
1694 /** Length of iv to be updated in the mbuf. */
1695 uint16_t iv_update_len
;
1697 /** Length of aad */
1700 /** Offset from start of IP header to the aad. */
1701 uint16_t aad_data_offset
;
1703 /** Length of aad to updated in the mbuf. */
1704 uint16_t aad_update_len
;
1709 /** Offset from start of IP header to the data. */
1710 uint16_t data_offset
;
1712 /** Digest length. */
1713 uint16_t digest_len
;
1716 uint16_t block_size
;
1718 /** Mask of crypto operation */
1721 /** Session pointer. */
1722 struct rte_cryptodev_sym_session
*session
;
1724 /** Direction of crypto, encrypt or decrypt */
1727 /** Private data size to store cipher iv / aad. */
1728 uint8_t iv_aad_data
[32];
1733 sym_crypto_cfg_check(struct rte_table_action_sym_crypto_config
*cfg
)
1735 if (!rte_cryptodev_pmd_is_valid_dev(cfg
->cryptodev_id
))
1737 if (cfg
->mp_create
== NULL
|| cfg
->mp_init
== NULL
)
1744 get_block_size(const struct rte_crypto_sym_xform
*xform
, uint8_t cdev_id
)
1746 struct rte_cryptodev_info dev_info
;
1747 const struct rte_cryptodev_capabilities
*cap
;
1750 rte_cryptodev_info_get(cdev_id
, &dev_info
);
1752 for (i
= 0; dev_info
.capabilities
[i
].op
!= RTE_CRYPTO_OP_TYPE_UNDEFINED
;
1754 cap
= &dev_info
.capabilities
[i
];
1756 if (cap
->sym
.xform_type
!= xform
->type
)
1759 if ((xform
->type
== RTE_CRYPTO_SYM_XFORM_CIPHER
) &&
1760 (cap
->sym
.cipher
.algo
== xform
->cipher
.algo
))
1761 return cap
->sym
.cipher
.block_size
;
1763 if ((xform
->type
== RTE_CRYPTO_SYM_XFORM_AEAD
) &&
1764 (cap
->sym
.aead
.algo
== xform
->aead
.algo
))
1765 return cap
->sym
.aead
.block_size
;
1767 if (xform
->type
== RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED
)
1775 sym_crypto_apply(struct sym_crypto_data
*data
,
1776 struct rte_table_action_sym_crypto_config
*cfg
,
1777 struct rte_table_action_sym_crypto_params
*p
)
1779 const struct rte_crypto_cipher_xform
*cipher_xform
= NULL
;
1780 const struct rte_crypto_auth_xform
*auth_xform
= NULL
;
1781 const struct rte_crypto_aead_xform
*aead_xform
= NULL
;
1782 struct rte_crypto_sym_xform
*xform
= p
->xform
;
1783 struct rte_cryptodev_sym_session
*session
;
1786 memset(data
, 0, sizeof(*data
));
1789 if (xform
->type
== RTE_CRYPTO_SYM_XFORM_CIPHER
) {
1790 cipher_xform
= &xform
->cipher
;
1792 if (cipher_xform
->iv
.length
>
1793 RTE_TABLE_ACTION_SYM_CRYPTO_IV_SIZE_MAX
)
1795 if (cipher_xform
->iv
.offset
!=
1796 RTE_TABLE_ACTION_SYM_CRYPTO_IV_OFFSET
)
1799 ret
= get_block_size(xform
, cfg
->cryptodev_id
);
1802 data
->block_size
= (uint16_t)ret
;
1803 data
->op_mask
|= CRYPTO_OP_MASK_CIPHER
;
1805 data
->cipher_auth
.cipher_iv_len
=
1806 cipher_xform
->iv
.length
;
1807 data
->cipher_auth
.cipher_iv_data_offset
= (uint16_t)
1808 p
->cipher_auth
.cipher_iv_update
.offset
;
1809 data
->cipher_auth
.cipher_iv_update_len
= (uint16_t)
1810 p
->cipher_auth
.cipher_iv_update
.length
;
1812 rte_memcpy(data
->iv_aad_data
,
1813 p
->cipher_auth
.cipher_iv
.val
,
1814 p
->cipher_auth
.cipher_iv
.length
);
1816 data
->direction
= cipher_xform
->op
;
1818 } else if (xform
->type
== RTE_CRYPTO_SYM_XFORM_AUTH
) {
1819 auth_xform
= &xform
->auth
;
1820 if (auth_xform
->iv
.length
>
1821 RTE_TABLE_ACTION_SYM_CRYPTO_IV_SIZE_MAX
)
1823 data
->op_mask
|= CRYPTO_OP_MASK_AUTH
;
1825 data
->cipher_auth
.auth_iv_len
= auth_xform
->iv
.length
;
1826 data
->cipher_auth
.auth_iv_data_offset
= (uint16_t)
1827 p
->cipher_auth
.auth_iv_update
.offset
;
1828 data
->cipher_auth
.auth_iv_update_len
= (uint16_t)
1829 p
->cipher_auth
.auth_iv_update
.length
;
1830 data
->digest_len
= auth_xform
->digest_length
;
1832 data
->direction
= (auth_xform
->op
==
1833 RTE_CRYPTO_AUTH_OP_GENERATE
) ?
1834 RTE_CRYPTO_CIPHER_OP_ENCRYPT
:
1835 RTE_CRYPTO_CIPHER_OP_DECRYPT
;
1837 } else if (xform
->type
== RTE_CRYPTO_SYM_XFORM_AEAD
) {
1838 aead_xform
= &xform
->aead
;
1840 if ((aead_xform
->iv
.length
>
1841 RTE_TABLE_ACTION_SYM_CRYPTO_IV_SIZE_MAX
) || (
1842 aead_xform
->aad_length
>
1843 RTE_TABLE_ACTION_SYM_CRYPTO_AAD_SIZE_MAX
))
1845 if (aead_xform
->iv
.offset
!=
1846 RTE_TABLE_ACTION_SYM_CRYPTO_IV_OFFSET
)
1849 ret
= get_block_size(xform
, cfg
->cryptodev_id
);
1852 data
->block_size
= (uint16_t)ret
;
1853 data
->op_mask
|= CRYPTO_OP_MASK_AEAD
;
1855 data
->digest_len
= aead_xform
->digest_length
;
1856 data
->aead
.iv_len
= aead_xform
->iv
.length
;
1857 data
->aead
.aad_len
= aead_xform
->aad_length
;
1859 data
->aead
.iv_data_offset
= (uint16_t)
1860 p
->aead
.iv_update
.offset
;
1861 data
->aead
.iv_update_len
= (uint16_t)
1862 p
->aead
.iv_update
.length
;
1863 data
->aead
.aad_data_offset
= (uint16_t)
1864 p
->aead
.aad_update
.offset
;
1865 data
->aead
.aad_update_len
= (uint16_t)
1866 p
->aead
.aad_update
.length
;
1868 rte_memcpy(data
->iv_aad_data
,
1872 rte_memcpy(data
->iv_aad_data
+ p
->aead
.iv
.length
,
1874 p
->aead
.aad
.length
);
1876 data
->direction
= (aead_xform
->op
==
1877 RTE_CRYPTO_AEAD_OP_ENCRYPT
) ?
1878 RTE_CRYPTO_CIPHER_OP_ENCRYPT
:
1879 RTE_CRYPTO_CIPHER_OP_DECRYPT
;
1883 xform
= xform
->next
;
1886 if (auth_xform
&& auth_xform
->iv
.length
) {
1888 if (auth_xform
->iv
.offset
!=
1889 RTE_TABLE_ACTION_SYM_CRYPTO_IV_OFFSET
+
1890 cipher_xform
->iv
.length
)
1893 rte_memcpy(data
->iv_aad_data
+ cipher_xform
->iv
.length
,
1894 p
->cipher_auth
.auth_iv
.val
,
1895 p
->cipher_auth
.auth_iv
.length
);
1897 rte_memcpy(data
->iv_aad_data
,
1898 p
->cipher_auth
.auth_iv
.val
,
1899 p
->cipher_auth
.auth_iv
.length
);
1903 session
= rte_cryptodev_sym_session_create(cfg
->mp_create
);
1907 ret
= rte_cryptodev_sym_session_init(cfg
->cryptodev_id
, session
,
1908 p
->xform
, cfg
->mp_init
);
1910 rte_cryptodev_sym_session_free(session
);
1914 data
->data_offset
= (uint16_t)p
->data_offset
;
1915 data
->session
= session
;
1920 static __rte_always_inline
uint64_t
1921 pkt_work_sym_crypto(struct rte_mbuf
*mbuf
, struct sym_crypto_data
*data
,
1922 struct rte_table_action_sym_crypto_config
*cfg
,
1925 struct crypto_op_sym_iv_aad
*crypto_op
= (struct crypto_op_sym_iv_aad
*)
1926 RTE_MBUF_METADATA_UINT8_PTR(mbuf
, cfg
->op_offset
);
1927 struct rte_crypto_op
*op
= &crypto_op
->op
;
1928 struct rte_crypto_sym_op
*sym
= op
->sym
;
1929 uint32_t pkt_offset
= sizeof(*mbuf
) + mbuf
->data_off
;
1930 uint32_t payload_len
= pkt_offset
+ mbuf
->data_len
- data
->data_offset
;
1932 op
->type
= RTE_CRYPTO_OP_TYPE_SYMMETRIC
;
1933 op
->sess_type
= RTE_CRYPTO_OP_WITH_SESSION
;
1934 op
->phys_addr
= mbuf
->buf_iova
+ cfg
->op_offset
- sizeof(*mbuf
);
1935 op
->status
= RTE_CRYPTO_OP_STATUS_NOT_PROCESSED
;
1938 sym
->session
= data
->session
;
1940 /** pad the packet */
1941 if (data
->direction
== RTE_CRYPTO_CIPHER_OP_ENCRYPT
) {
1942 uint32_t append_len
= RTE_ALIGN_CEIL(payload_len
,
1943 data
->block_size
) - payload_len
;
1945 if (unlikely(rte_pktmbuf_append(mbuf
, append_len
+
1946 data
->digest_len
) == NULL
))
1949 payload_len
+= append_len
;
1951 payload_len
-= data
->digest_len
;
1953 if (data
->op_mask
& CRYPTO_OP_MASK_CIPHER
) {
1954 /** prepare cipher op */
1955 uint8_t *iv
= crypto_op
->iv_aad
.cipher_auth
.cipher_iv
;
1957 sym
->cipher
.data
.length
= payload_len
;
1958 sym
->cipher
.data
.offset
= data
->data_offset
- pkt_offset
;
1960 if (data
->cipher_auth
.cipher_iv_update_len
) {
1961 uint8_t *pkt_iv
= RTE_MBUF_METADATA_UINT8_PTR(mbuf
,
1962 data
->cipher_auth
.cipher_iv_data_offset
1965 /** For encryption, update the pkt iv field, otherwise
1966 * update the iv_aad_field
1968 if (data
->direction
== RTE_CRYPTO_CIPHER_OP_ENCRYPT
)
1969 rte_memcpy(pkt_iv
, data
->iv_aad_data
,
1970 data
->cipher_auth
.cipher_iv_update_len
);
1972 rte_memcpy(data
->iv_aad_data
, pkt_iv
,
1973 data
->cipher_auth
.cipher_iv_update_len
);
1977 rte_memcpy(iv
, data
->iv_aad_data
,
1978 data
->cipher_auth
.cipher_iv_len
);
1981 if (data
->op_mask
& CRYPTO_OP_MASK_AUTH
) {
1982 /** authentication always start from IP header. */
1983 sym
->auth
.data
.offset
= ip_offset
- pkt_offset
;
1984 sym
->auth
.data
.length
= mbuf
->data_len
- sym
->auth
.data
.offset
-
1986 sym
->auth
.digest
.data
= rte_pktmbuf_mtod_offset(mbuf
,
1987 uint8_t *, rte_pktmbuf_pkt_len(mbuf
) -
1989 sym
->auth
.digest
.phys_addr
= rte_pktmbuf_iova_offset(mbuf
,
1990 rte_pktmbuf_pkt_len(mbuf
) - data
->digest_len
);
1992 if (data
->cipher_auth
.auth_iv_update_len
) {
1993 uint8_t *pkt_iv
= RTE_MBUF_METADATA_UINT8_PTR(mbuf
,
1994 data
->cipher_auth
.auth_iv_data_offset
1996 uint8_t *data_iv
= data
->iv_aad_data
+
1997 data
->cipher_auth
.cipher_iv_len
;
1999 if (data
->direction
== RTE_CRYPTO_CIPHER_OP_ENCRYPT
)
2000 rte_memcpy(pkt_iv
, data_iv
,
2001 data
->cipher_auth
.auth_iv_update_len
);
2003 rte_memcpy(data_iv
, pkt_iv
,
2004 data
->cipher_auth
.auth_iv_update_len
);
2007 if (data
->cipher_auth
.auth_iv_len
) {
2008 /** prepare cipher op */
2009 uint8_t *iv
= crypto_op
->iv_aad
.cipher_auth
.auth_iv
;
2011 rte_memcpy(iv
, data
->iv_aad_data
+
2012 data
->cipher_auth
.cipher_iv_len
,
2013 data
->cipher_auth
.auth_iv_len
);
2017 if (data
->op_mask
& CRYPTO_OP_MASK_AEAD
) {
2018 uint8_t *iv
= crypto_op
->iv_aad
.aead_iv_aad
.iv
;
2019 uint8_t *aad
= crypto_op
->iv_aad
.aead_iv_aad
.aad
;
2021 sym
->aead
.aad
.data
= aad
;
2022 sym
->aead
.aad
.phys_addr
= rte_pktmbuf_iova_offset(mbuf
,
2023 aad
- rte_pktmbuf_mtod(mbuf
, uint8_t *));
2024 sym
->aead
.digest
.data
= rte_pktmbuf_mtod_offset(mbuf
,
2025 uint8_t *, rte_pktmbuf_pkt_len(mbuf
) -
2027 sym
->aead
.digest
.phys_addr
= rte_pktmbuf_iova_offset(mbuf
,
2028 rte_pktmbuf_pkt_len(mbuf
) - data
->digest_len
);
2029 sym
->aead
.data
.offset
= data
->data_offset
- pkt_offset
;
2030 sym
->aead
.data
.length
= payload_len
;
2032 if (data
->aead
.iv_update_len
) {
2033 uint8_t *pkt_iv
= RTE_MBUF_METADATA_UINT8_PTR(mbuf
,
2034 data
->aead
.iv_data_offset
+ ip_offset
);
2035 uint8_t *data_iv
= data
->iv_aad_data
;
2037 if (data
->direction
== RTE_CRYPTO_CIPHER_OP_ENCRYPT
)
2038 rte_memcpy(pkt_iv
, data_iv
,
2039 data
->aead
.iv_update_len
);
2041 rte_memcpy(data_iv
, pkt_iv
,
2042 data
->aead
.iv_update_len
);
2045 rte_memcpy(iv
, data
->iv_aad_data
, data
->aead
.iv_len
);
2047 if (data
->aead
.aad_update_len
) {
2048 uint8_t *pkt_aad
= RTE_MBUF_METADATA_UINT8_PTR(mbuf
,
2049 data
->aead
.aad_data_offset
+ ip_offset
);
2050 uint8_t *data_aad
= data
->iv_aad_data
+
2053 if (data
->direction
== RTE_CRYPTO_CIPHER_OP_ENCRYPT
)
2054 rte_memcpy(pkt_aad
, data_aad
,
2055 data
->aead
.iv_update_len
);
2057 rte_memcpy(data_aad
, pkt_aad
,
2058 data
->aead
.iv_update_len
);
2061 rte_memcpy(aad
, data
->iv_aad_data
+ data
->aead
.iv_len
,
2062 data
->aead
.aad_len
);
2069 * RTE_TABLE_ACTION_TAG
2076 tag_apply(struct tag_data
*data
,
2077 struct rte_table_action_tag_params
*p
)
2083 static __rte_always_inline
void
2084 pkt_work_tag(struct rte_mbuf
*mbuf
,
2085 struct tag_data
*data
)
2087 mbuf
->hash
.fdir
.hi
= data
->tag
;
2088 mbuf
->ol_flags
|= PKT_RX_FDIR
| PKT_RX_FDIR_ID
;
2091 static __rte_always_inline
void
2092 pkt4_work_tag(struct rte_mbuf
*mbuf0
,
2093 struct rte_mbuf
*mbuf1
,
2094 struct rte_mbuf
*mbuf2
,
2095 struct rte_mbuf
*mbuf3
,
2096 struct tag_data
*data0
,
2097 struct tag_data
*data1
,
2098 struct tag_data
*data2
,
2099 struct tag_data
*data3
)
2101 mbuf0
->hash
.fdir
.hi
= data0
->tag
;
2102 mbuf1
->hash
.fdir
.hi
= data1
->tag
;
2103 mbuf2
->hash
.fdir
.hi
= data2
->tag
;
2104 mbuf3
->hash
.fdir
.hi
= data3
->tag
;
2106 mbuf0
->ol_flags
|= PKT_RX_FDIR
| PKT_RX_FDIR_ID
;
2107 mbuf1
->ol_flags
|= PKT_RX_FDIR
| PKT_RX_FDIR_ID
;
2108 mbuf2
->ol_flags
|= PKT_RX_FDIR
| PKT_RX_FDIR_ID
;
2109 mbuf3
->ol_flags
|= PKT_RX_FDIR
| PKT_RX_FDIR_ID
;
2113 * RTE_TABLE_ACTION_DECAP
2120 decap_apply(struct decap_data
*data
,
2121 struct rte_table_action_decap_params
*p
)
2127 static __rte_always_inline
void
2128 pkt_work_decap(struct rte_mbuf
*mbuf
,
2129 struct decap_data
*data
)
2131 uint16_t data_off
= mbuf
->data_off
;
2132 uint16_t data_len
= mbuf
->data_len
;
2133 uint32_t pkt_len
= mbuf
->pkt_len
;
2134 uint16_t n
= data
->n
;
2136 mbuf
->data_off
= data_off
+ n
;
2137 mbuf
->data_len
= data_len
- n
;
2138 mbuf
->pkt_len
= pkt_len
- n
;
2141 static __rte_always_inline
void
2142 pkt4_work_decap(struct rte_mbuf
*mbuf0
,
2143 struct rte_mbuf
*mbuf1
,
2144 struct rte_mbuf
*mbuf2
,
2145 struct rte_mbuf
*mbuf3
,
2146 struct decap_data
*data0
,
2147 struct decap_data
*data1
,
2148 struct decap_data
*data2
,
2149 struct decap_data
*data3
)
2151 uint16_t data_off0
= mbuf0
->data_off
;
2152 uint16_t data_len0
= mbuf0
->data_len
;
2153 uint32_t pkt_len0
= mbuf0
->pkt_len
;
2155 uint16_t data_off1
= mbuf1
->data_off
;
2156 uint16_t data_len1
= mbuf1
->data_len
;
2157 uint32_t pkt_len1
= mbuf1
->pkt_len
;
2159 uint16_t data_off2
= mbuf2
->data_off
;
2160 uint16_t data_len2
= mbuf2
->data_len
;
2161 uint32_t pkt_len2
= mbuf2
->pkt_len
;
2163 uint16_t data_off3
= mbuf3
->data_off
;
2164 uint16_t data_len3
= mbuf3
->data_len
;
2165 uint32_t pkt_len3
= mbuf3
->pkt_len
;
2167 uint16_t n0
= data0
->n
;
2168 uint16_t n1
= data1
->n
;
2169 uint16_t n2
= data2
->n
;
2170 uint16_t n3
= data3
->n
;
2172 mbuf0
->data_off
= data_off0
+ n0
;
2173 mbuf0
->data_len
= data_len0
- n0
;
2174 mbuf0
->pkt_len
= pkt_len0
- n0
;
2176 mbuf1
->data_off
= data_off1
+ n1
;
2177 mbuf1
->data_len
= data_len1
- n1
;
2178 mbuf1
->pkt_len
= pkt_len1
- n1
;
2180 mbuf2
->data_off
= data_off2
+ n2
;
2181 mbuf2
->data_len
= data_len2
- n2
;
2182 mbuf2
->pkt_len
= pkt_len2
- n2
;
2184 mbuf3
->data_off
= data_off3
+ n3
;
2185 mbuf3
->data_len
= data_len3
- n3
;
2186 mbuf3
->pkt_len
= pkt_len3
- n3
;
2193 action_valid(enum rte_table_action_type action
)
2196 case RTE_TABLE_ACTION_FWD
:
2197 case RTE_TABLE_ACTION_LB
:
2198 case RTE_TABLE_ACTION_MTR
:
2199 case RTE_TABLE_ACTION_TM
:
2200 case RTE_TABLE_ACTION_ENCAP
:
2201 case RTE_TABLE_ACTION_NAT
:
2202 case RTE_TABLE_ACTION_TTL
:
2203 case RTE_TABLE_ACTION_STATS
:
2204 case RTE_TABLE_ACTION_TIME
:
2205 case RTE_TABLE_ACTION_SYM_CRYPTO
:
2206 case RTE_TABLE_ACTION_TAG
:
2207 case RTE_TABLE_ACTION_DECAP
:
2215 #define RTE_TABLE_ACTION_MAX 64
2218 uint64_t action_mask
;
2219 struct rte_table_action_common_config common
;
2220 struct rte_table_action_lb_config lb
;
2221 struct rte_table_action_mtr_config mtr
;
2222 struct rte_table_action_tm_config tm
;
2223 struct rte_table_action_encap_config encap
;
2224 struct rte_table_action_nat_config nat
;
2225 struct rte_table_action_ttl_config ttl
;
2226 struct rte_table_action_stats_config stats
;
2227 struct rte_table_action_sym_crypto_config sym_crypto
;
2231 action_cfg_size(enum rte_table_action_type action
)
2234 case RTE_TABLE_ACTION_LB
:
2235 return sizeof(struct rte_table_action_lb_config
);
2236 case RTE_TABLE_ACTION_MTR
:
2237 return sizeof(struct rte_table_action_mtr_config
);
2238 case RTE_TABLE_ACTION_TM
:
2239 return sizeof(struct rte_table_action_tm_config
);
2240 case RTE_TABLE_ACTION_ENCAP
:
2241 return sizeof(struct rte_table_action_encap_config
);
2242 case RTE_TABLE_ACTION_NAT
:
2243 return sizeof(struct rte_table_action_nat_config
);
2244 case RTE_TABLE_ACTION_TTL
:
2245 return sizeof(struct rte_table_action_ttl_config
);
2246 case RTE_TABLE_ACTION_STATS
:
2247 return sizeof(struct rte_table_action_stats_config
);
2248 case RTE_TABLE_ACTION_SYM_CRYPTO
:
2249 return sizeof(struct rte_table_action_sym_crypto_config
);
2256 action_cfg_get(struct ap_config
*ap_config
,
2257 enum rte_table_action_type type
)
2260 case RTE_TABLE_ACTION_LB
:
2261 return &ap_config
->lb
;
2263 case RTE_TABLE_ACTION_MTR
:
2264 return &ap_config
->mtr
;
2266 case RTE_TABLE_ACTION_TM
:
2267 return &ap_config
->tm
;
2269 case RTE_TABLE_ACTION_ENCAP
:
2270 return &ap_config
->encap
;
2272 case RTE_TABLE_ACTION_NAT
:
2273 return &ap_config
->nat
;
2275 case RTE_TABLE_ACTION_TTL
:
2276 return &ap_config
->ttl
;
2278 case RTE_TABLE_ACTION_STATS
:
2279 return &ap_config
->stats
;
2281 case RTE_TABLE_ACTION_SYM_CRYPTO
:
2282 return &ap_config
->sym_crypto
;
2289 action_cfg_set(struct ap_config
*ap_config
,
2290 enum rte_table_action_type type
,
2293 void *dst
= action_cfg_get(ap_config
, type
);
2296 memcpy(dst
, action_cfg
, action_cfg_size(type
));
2298 ap_config
->action_mask
|= 1LLU << type
;
2302 size_t offset
[RTE_TABLE_ACTION_MAX
];
2307 action_data_size(enum rte_table_action_type action
,
2308 struct ap_config
*ap_config
)
2311 case RTE_TABLE_ACTION_FWD
:
2312 return sizeof(struct fwd_data
);
2314 case RTE_TABLE_ACTION_LB
:
2315 return sizeof(struct lb_data
);
2317 case RTE_TABLE_ACTION_MTR
:
2318 return mtr_data_size(&ap_config
->mtr
);
2320 case RTE_TABLE_ACTION_TM
:
2321 return sizeof(struct tm_data
);
2323 case RTE_TABLE_ACTION_ENCAP
:
2324 return encap_data_size(&ap_config
->encap
);
2326 case RTE_TABLE_ACTION_NAT
:
2327 return nat_data_size(&ap_config
->nat
,
2328 &ap_config
->common
);
2330 case RTE_TABLE_ACTION_TTL
:
2331 return sizeof(struct ttl_data
);
2333 case RTE_TABLE_ACTION_STATS
:
2334 return sizeof(struct stats_data
);
2336 case RTE_TABLE_ACTION_TIME
:
2337 return sizeof(struct time_data
);
2339 case RTE_TABLE_ACTION_SYM_CRYPTO
:
2340 return (sizeof(struct sym_crypto_data
));
2342 case RTE_TABLE_ACTION_TAG
:
2343 return sizeof(struct tag_data
);
2345 case RTE_TABLE_ACTION_DECAP
:
2346 return sizeof(struct decap_data
);
2355 action_data_offset_set(struct ap_data
*ap_data
,
2356 struct ap_config
*ap_config
)
2358 uint64_t action_mask
= ap_config
->action_mask
;
2362 memset(ap_data
->offset
, 0, sizeof(ap_data
->offset
));
2365 for (action
= 0; action
< RTE_TABLE_ACTION_MAX
; action
++)
2366 if (action_mask
& (1LLU << action
)) {
2367 ap_data
->offset
[action
] = offset
;
2368 offset
+= action_data_size((enum rte_table_action_type
)action
,
2372 ap_data
->total_size
= offset
;
2375 struct rte_table_action_profile
{
2376 struct ap_config cfg
;
2377 struct ap_data data
;
2381 struct rte_table_action_profile
*
2382 rte_table_action_profile_create(struct rte_table_action_common_config
*common
)
2384 struct rte_table_action_profile
*ap
;
2386 /* Check input arguments */
2390 /* Memory allocation */
2391 ap
= calloc(1, sizeof(struct rte_table_action_profile
));
2395 /* Initialization */
2396 memcpy(&ap
->cfg
.common
, common
, sizeof(*common
));
2403 rte_table_action_profile_action_register(struct rte_table_action_profile
*profile
,
2404 enum rte_table_action_type type
,
2405 void *action_config
)
2409 /* Check input arguments */
2410 if ((profile
== NULL
) ||
2412 (action_valid(type
) == 0) ||
2413 (profile
->cfg
.action_mask
& (1LLU << type
)) ||
2414 ((action_cfg_size(type
) == 0) && action_config
) ||
2415 (action_cfg_size(type
) && (action_config
== NULL
)))
2419 case RTE_TABLE_ACTION_LB
:
2420 status
= lb_cfg_check(action_config
);
2423 case RTE_TABLE_ACTION_MTR
:
2424 status
= mtr_cfg_check(action_config
);
2427 case RTE_TABLE_ACTION_TM
:
2428 status
= tm_cfg_check(action_config
);
2431 case RTE_TABLE_ACTION_ENCAP
:
2432 status
= encap_cfg_check(action_config
);
2435 case RTE_TABLE_ACTION_NAT
:
2436 status
= nat_cfg_check(action_config
);
2439 case RTE_TABLE_ACTION_TTL
:
2440 status
= ttl_cfg_check(action_config
);
2443 case RTE_TABLE_ACTION_STATS
:
2444 status
= stats_cfg_check(action_config
);
2447 case RTE_TABLE_ACTION_SYM_CRYPTO
:
2448 status
= sym_crypto_cfg_check(action_config
);
2460 action_cfg_set(&profile
->cfg
, type
, action_config
);
2466 rte_table_action_profile_freeze(struct rte_table_action_profile
*profile
)
2468 if (profile
->frozen
)
2471 profile
->cfg
.action_mask
|= 1LLU << RTE_TABLE_ACTION_FWD
;
2472 action_data_offset_set(&profile
->data
, &profile
->cfg
);
2473 profile
->frozen
= 1;
2479 rte_table_action_profile_free(struct rte_table_action_profile
*profile
)
2481 if (profile
== NULL
)
2491 #define METER_PROFILES_MAX 32
2493 struct rte_table_action
{
2494 struct ap_config cfg
;
2495 struct ap_data data
;
2496 struct dscp_table_data dscp_table
;
2497 struct meter_profile_data mp
[METER_PROFILES_MAX
];
2500 struct rte_table_action
*
2501 rte_table_action_create(struct rte_table_action_profile
*profile
,
2504 struct rte_table_action
*action
;
2506 /* Check input arguments */
2507 if ((profile
== NULL
) ||
2508 (profile
->frozen
== 0))
2511 /* Memory allocation */
2512 action
= rte_zmalloc_socket(NULL
,
2513 sizeof(struct rte_table_action
),
2514 RTE_CACHE_LINE_SIZE
,
2519 /* Initialization */
2520 memcpy(&action
->cfg
, &profile
->cfg
, sizeof(profile
->cfg
));
2521 memcpy(&action
->data
, &profile
->data
, sizeof(profile
->data
));
2526 static __rte_always_inline
void *
2527 action_data_get(void *data
,
2528 struct rte_table_action
*action
,
2529 enum rte_table_action_type type
)
2531 size_t offset
= action
->data
.offset
[type
];
2532 uint8_t *data_bytes
= data
;
2534 return &data_bytes
[offset
];
2538 rte_table_action_apply(struct rte_table_action
*action
,
2540 enum rte_table_action_type type
,
2541 void *action_params
)
2545 /* Check input arguments */
2546 if ((action
== NULL
) ||
2548 (action_valid(type
) == 0) ||
2549 ((action
->cfg
.action_mask
& (1LLU << type
)) == 0) ||
2550 (action_params
== NULL
))
2554 action_data
= action_data_get(data
, action
, type
);
2557 case RTE_TABLE_ACTION_FWD
:
2558 return fwd_apply(action_data
,
2561 case RTE_TABLE_ACTION_LB
:
2562 return lb_apply(action_data
,
2565 case RTE_TABLE_ACTION_MTR
:
2566 return mtr_apply(action_data
,
2570 RTE_DIM(action
->mp
));
2572 case RTE_TABLE_ACTION_TM
:
2573 return tm_apply(action_data
,
2577 case RTE_TABLE_ACTION_ENCAP
:
2578 return encap_apply(action_data
,
2581 &action
->cfg
.common
);
2583 case RTE_TABLE_ACTION_NAT
:
2584 return nat_apply(action_data
,
2586 &action
->cfg
.common
);
2588 case RTE_TABLE_ACTION_TTL
:
2589 return ttl_apply(action_data
,
2592 case RTE_TABLE_ACTION_STATS
:
2593 return stats_apply(action_data
,
2596 case RTE_TABLE_ACTION_TIME
:
2597 return time_apply(action_data
,
2600 case RTE_TABLE_ACTION_SYM_CRYPTO
:
2601 return sym_crypto_apply(action_data
,
2602 &action
->cfg
.sym_crypto
,
2605 case RTE_TABLE_ACTION_TAG
:
2606 return tag_apply(action_data
,
2609 case RTE_TABLE_ACTION_DECAP
:
2610 return decap_apply(action_data
,
2619 rte_table_action_dscp_table_update(struct rte_table_action
*action
,
2621 struct rte_table_action_dscp_table
*table
)
2625 /* Check input arguments */
2626 if ((action
== NULL
) ||
2627 ((action
->cfg
.action_mask
& ((1LLU << RTE_TABLE_ACTION_MTR
) |
2628 (1LLU << RTE_TABLE_ACTION_TM
))) == 0) ||
2633 for (i
= 0; i
< RTE_DIM(table
->entry
); i
++) {
2634 struct dscp_table_entry_data
*data
=
2635 &action
->dscp_table
.entry
[i
];
2636 struct rte_table_action_dscp_table_entry
*entry
=
2639 if ((dscp_mask
& (1LLU << i
)) == 0)
2642 data
->color
= entry
->color
;
2643 data
->tc
= entry
->tc_id
;
2644 data
->tc_queue
= entry
->tc_queue_id
;
2651 rte_table_action_meter_profile_add(struct rte_table_action
*action
,
2652 uint32_t meter_profile_id
,
2653 struct rte_table_action_meter_profile
*profile
)
2655 struct meter_profile_data
*mp_data
;
2658 /* Check input arguments */
2659 if ((action
== NULL
) ||
2660 ((action
->cfg
.action_mask
& (1LLU << RTE_TABLE_ACTION_MTR
)) == 0) ||
2664 if (profile
->alg
!= RTE_TABLE_ACTION_METER_TRTCM
)
2667 mp_data
= meter_profile_data_find(action
->mp
,
2668 RTE_DIM(action
->mp
),
2673 mp_data
= meter_profile_data_find_unused(action
->mp
,
2674 RTE_DIM(action
->mp
));
2678 /* Install new profile */
2679 status
= rte_meter_trtcm_profile_config(&mp_data
->profile
,
2684 mp_data
->profile_id
= meter_profile_id
;
2691 rte_table_action_meter_profile_delete(struct rte_table_action
*action
,
2692 uint32_t meter_profile_id
)
2694 struct meter_profile_data
*mp_data
;
2696 /* Check input arguments */
2697 if ((action
== NULL
) ||
2698 ((action
->cfg
.action_mask
& (1LLU << RTE_TABLE_ACTION_MTR
)) == 0))
2701 mp_data
= meter_profile_data_find(action
->mp
,
2702 RTE_DIM(action
->mp
),
2707 /* Uninstall profile */
2714 rte_table_action_meter_read(struct rte_table_action
*action
,
2717 struct rte_table_action_mtr_counters
*stats
,
2720 struct mtr_trtcm_data
*mtr_data
;
2723 /* Check input arguments */
2724 if ((action
== NULL
) ||
2725 ((action
->cfg
.action_mask
& (1LLU << RTE_TABLE_ACTION_MTR
)) == 0) ||
2727 (tc_mask
> RTE_LEN2MASK(action
->cfg
.mtr
.n_tc
, uint32_t)))
2730 mtr_data
= action_data_get(data
, action
, RTE_TABLE_ACTION_MTR
);
2734 for (i
= 0; i
< RTE_TABLE_ACTION_TC_MAX
; i
++) {
2735 struct rte_table_action_mtr_counters_tc
*dst
=
2737 struct mtr_trtcm_data
*src
= &mtr_data
[i
];
2739 if ((tc_mask
& (1 << i
)) == 0)
2742 dst
->n_packets
[RTE_COLOR_GREEN
] =
2743 mtr_trtcm_data_stats_get(src
, RTE_COLOR_GREEN
);
2745 dst
->n_packets
[RTE_COLOR_YELLOW
] =
2746 mtr_trtcm_data_stats_get(src
, RTE_COLOR_YELLOW
);
2748 dst
->n_packets
[RTE_COLOR_RED
] =
2749 mtr_trtcm_data_stats_get(src
, RTE_COLOR_RED
);
2751 dst
->n_packets_valid
= 1;
2752 dst
->n_bytes_valid
= 0;
2755 stats
->tc_mask
= tc_mask
;
2760 for (i
= 0; i
< RTE_TABLE_ACTION_TC_MAX
; i
++) {
2761 struct mtr_trtcm_data
*src
= &mtr_data
[i
];
2763 if ((tc_mask
& (1 << i
)) == 0)
2766 mtr_trtcm_data_stats_reset(src
, RTE_COLOR_GREEN
);
2767 mtr_trtcm_data_stats_reset(src
, RTE_COLOR_YELLOW
);
2768 mtr_trtcm_data_stats_reset(src
, RTE_COLOR_RED
);
2776 rte_table_action_ttl_read(struct rte_table_action
*action
,
2778 struct rte_table_action_ttl_counters
*stats
,
2781 struct ttl_data
*ttl_data
;
2783 /* Check input arguments */
2784 if ((action
== NULL
) ||
2785 ((action
->cfg
.action_mask
&
2786 (1LLU << RTE_TABLE_ACTION_TTL
)) == 0) ||
2790 ttl_data
= action_data_get(data
, action
, RTE_TABLE_ACTION_TTL
);
2794 stats
->n_packets
= TTL_STATS_READ(ttl_data
);
2798 TTL_STATS_RESET(ttl_data
);
2804 rte_table_action_stats_read(struct rte_table_action
*action
,
2806 struct rte_table_action_stats_counters
*stats
,
2809 struct stats_data
*stats_data
;
2811 /* Check input arguments */
2812 if ((action
== NULL
) ||
2813 ((action
->cfg
.action_mask
&
2814 (1LLU << RTE_TABLE_ACTION_STATS
)) == 0) ||
2818 stats_data
= action_data_get(data
, action
,
2819 RTE_TABLE_ACTION_STATS
);
2823 stats
->n_packets
= stats_data
->n_packets
;
2824 stats
->n_bytes
= stats_data
->n_bytes
;
2825 stats
->n_packets_valid
= 1;
2826 stats
->n_bytes_valid
= 1;
2831 stats_data
->n_packets
= 0;
2832 stats_data
->n_bytes
= 0;
2839 rte_table_action_time_read(struct rte_table_action
*action
,
2841 uint64_t *timestamp
)
2843 struct time_data
*time_data
;
2845 /* Check input arguments */
2846 if ((action
== NULL
) ||
2847 ((action
->cfg
.action_mask
&
2848 (1LLU << RTE_TABLE_ACTION_TIME
)) == 0) ||
2850 (timestamp
== NULL
))
2853 time_data
= action_data_get(data
, action
, RTE_TABLE_ACTION_TIME
);
2856 *timestamp
= time_data
->time
;
2861 struct rte_cryptodev_sym_session
*
2862 rte_table_action_crypto_sym_session_get(struct rte_table_action
*action
,
2865 struct sym_crypto_data
*sym_crypto_data
;
2867 /* Check input arguments */
2868 if ((action
== NULL
) ||
2869 ((action
->cfg
.action_mask
&
2870 (1LLU << RTE_TABLE_ACTION_SYM_CRYPTO
)) == 0) ||
2874 sym_crypto_data
= action_data_get(data
, action
,
2875 RTE_TABLE_ACTION_SYM_CRYPTO
);
2877 return sym_crypto_data
->session
;
2880 static __rte_always_inline
uint64_t
2881 pkt_work(struct rte_mbuf
*mbuf
,
2882 struct rte_pipeline_table_entry
*table_entry
,
2884 struct rte_table_action
*action
,
2885 struct ap_config
*cfg
)
2887 uint64_t drop_mask
= 0;
2889 uint32_t ip_offset
= action
->cfg
.common
.ip_offset
;
2890 void *ip
= RTE_MBUF_METADATA_UINT32_PTR(mbuf
, ip_offset
);
2893 uint16_t total_length
;
2895 if (cfg
->common
.ip_version
) {
2896 struct rte_ipv4_hdr
*hdr
= ip
;
2898 dscp
= hdr
->type_of_service
>> 2;
2899 total_length
= rte_ntohs(hdr
->total_length
);
2901 struct rte_ipv6_hdr
*hdr
= ip
;
2903 dscp
= (rte_ntohl(hdr
->vtc_flow
) & 0x0F600000) >> 18;
2904 total_length
= rte_ntohs(hdr
->payload_len
) +
2905 sizeof(struct rte_ipv6_hdr
);
2908 if (cfg
->action_mask
& (1LLU << RTE_TABLE_ACTION_LB
)) {
2910 action_data_get(table_entry
, action
, RTE_TABLE_ACTION_LB
);
2916 if (cfg
->action_mask
& (1LLU << RTE_TABLE_ACTION_MTR
)) {
2918 action_data_get(table_entry
, action
, RTE_TABLE_ACTION_MTR
);
2920 drop_mask
|= pkt_work_mtr(mbuf
,
2922 &action
->dscp_table
,
2929 if (cfg
->action_mask
& (1LLU << RTE_TABLE_ACTION_TM
)) {
2931 action_data_get(table_entry
, action
, RTE_TABLE_ACTION_TM
);
2935 &action
->dscp_table
,
2939 if (cfg
->action_mask
& (1LLU << RTE_TABLE_ACTION_DECAP
)) {
2940 void *data
= action_data_get(table_entry
,
2942 RTE_TABLE_ACTION_DECAP
);
2944 pkt_work_decap(mbuf
, data
);
2947 if (cfg
->action_mask
& (1LLU << RTE_TABLE_ACTION_ENCAP
)) {
2949 action_data_get(table_entry
, action
, RTE_TABLE_ACTION_ENCAP
);
2951 pkt_work_encap(mbuf
,
2959 if (cfg
->action_mask
& (1LLU << RTE_TABLE_ACTION_NAT
)) {
2961 action_data_get(table_entry
, action
, RTE_TABLE_ACTION_NAT
);
2963 if (cfg
->common
.ip_version
)
2964 pkt_ipv4_work_nat(ip
, data
, &cfg
->nat
);
2966 pkt_ipv6_work_nat(ip
, data
, &cfg
->nat
);
2969 if (cfg
->action_mask
& (1LLU << RTE_TABLE_ACTION_TTL
)) {
2971 action_data_get(table_entry
, action
, RTE_TABLE_ACTION_TTL
);
2973 if (cfg
->common
.ip_version
)
2974 drop_mask
|= pkt_ipv4_work_ttl(ip
, data
);
2976 drop_mask
|= pkt_ipv6_work_ttl(ip
, data
);
2979 if (cfg
->action_mask
& (1LLU << RTE_TABLE_ACTION_STATS
)) {
2981 action_data_get(table_entry
, action
, RTE_TABLE_ACTION_STATS
);
2983 pkt_work_stats(data
, total_length
);
2986 if (cfg
->action_mask
& (1LLU << RTE_TABLE_ACTION_TIME
)) {
2988 action_data_get(table_entry
, action
, RTE_TABLE_ACTION_TIME
);
2990 pkt_work_time(data
, time
);
2993 if (cfg
->action_mask
& (1LLU << RTE_TABLE_ACTION_SYM_CRYPTO
)) {
2994 void *data
= action_data_get(table_entry
, action
,
2995 RTE_TABLE_ACTION_SYM_CRYPTO
);
2997 drop_mask
|= pkt_work_sym_crypto(mbuf
, data
, &cfg
->sym_crypto
,
3001 if (cfg
->action_mask
& (1LLU << RTE_TABLE_ACTION_TAG
)) {
3002 void *data
= action_data_get(table_entry
,
3004 RTE_TABLE_ACTION_TAG
);
3006 pkt_work_tag(mbuf
, data
);
3012 static __rte_always_inline
uint64_t
3013 pkt4_work(struct rte_mbuf
**mbufs
,
3014 struct rte_pipeline_table_entry
**table_entries
,
3016 struct rte_table_action
*action
,
3017 struct ap_config
*cfg
)
3019 uint64_t drop_mask0
= 0;
3020 uint64_t drop_mask1
= 0;
3021 uint64_t drop_mask2
= 0;
3022 uint64_t drop_mask3
= 0;
3024 struct rte_mbuf
*mbuf0
= mbufs
[0];
3025 struct rte_mbuf
*mbuf1
= mbufs
[1];
3026 struct rte_mbuf
*mbuf2
= mbufs
[2];
3027 struct rte_mbuf
*mbuf3
= mbufs
[3];
3029 struct rte_pipeline_table_entry
*table_entry0
= table_entries
[0];
3030 struct rte_pipeline_table_entry
*table_entry1
= table_entries
[1];
3031 struct rte_pipeline_table_entry
*table_entry2
= table_entries
[2];
3032 struct rte_pipeline_table_entry
*table_entry3
= table_entries
[3];
3034 uint32_t ip_offset
= action
->cfg
.common
.ip_offset
;
3035 void *ip0
= RTE_MBUF_METADATA_UINT32_PTR(mbuf0
, ip_offset
);
3036 void *ip1
= RTE_MBUF_METADATA_UINT32_PTR(mbuf1
, ip_offset
);
3037 void *ip2
= RTE_MBUF_METADATA_UINT32_PTR(mbuf2
, ip_offset
);
3038 void *ip3
= RTE_MBUF_METADATA_UINT32_PTR(mbuf3
, ip_offset
);
3040 uint32_t dscp0
, dscp1
, dscp2
, dscp3
;
3041 uint16_t total_length0
, total_length1
, total_length2
, total_length3
;
3043 if (cfg
->common
.ip_version
) {
3044 struct rte_ipv4_hdr
*hdr0
= ip0
;
3045 struct rte_ipv4_hdr
*hdr1
= ip1
;
3046 struct rte_ipv4_hdr
*hdr2
= ip2
;
3047 struct rte_ipv4_hdr
*hdr3
= ip3
;
3049 dscp0
= hdr0
->type_of_service
>> 2;
3050 dscp1
= hdr1
->type_of_service
>> 2;
3051 dscp2
= hdr2
->type_of_service
>> 2;
3052 dscp3
= hdr3
->type_of_service
>> 2;
3054 total_length0
= rte_ntohs(hdr0
->total_length
);
3055 total_length1
= rte_ntohs(hdr1
->total_length
);
3056 total_length2
= rte_ntohs(hdr2
->total_length
);
3057 total_length3
= rte_ntohs(hdr3
->total_length
);
3059 struct rte_ipv6_hdr
*hdr0
= ip0
;
3060 struct rte_ipv6_hdr
*hdr1
= ip1
;
3061 struct rte_ipv6_hdr
*hdr2
= ip2
;
3062 struct rte_ipv6_hdr
*hdr3
= ip3
;
3064 dscp0
= (rte_ntohl(hdr0
->vtc_flow
) & 0x0F600000) >> 18;
3065 dscp1
= (rte_ntohl(hdr1
->vtc_flow
) & 0x0F600000) >> 18;
3066 dscp2
= (rte_ntohl(hdr2
->vtc_flow
) & 0x0F600000) >> 18;
3067 dscp3
= (rte_ntohl(hdr3
->vtc_flow
) & 0x0F600000) >> 18;
3069 total_length0
= rte_ntohs(hdr0
->payload_len
) +
3070 sizeof(struct rte_ipv6_hdr
);
3071 total_length1
= rte_ntohs(hdr1
->payload_len
) +
3072 sizeof(struct rte_ipv6_hdr
);
3073 total_length2
= rte_ntohs(hdr2
->payload_len
) +
3074 sizeof(struct rte_ipv6_hdr
);
3075 total_length3
= rte_ntohs(hdr3
->payload_len
) +
3076 sizeof(struct rte_ipv6_hdr
);
3079 if (cfg
->action_mask
& (1LLU << RTE_TABLE_ACTION_LB
)) {
3081 action_data_get(table_entry0
, action
, RTE_TABLE_ACTION_LB
);
3083 action_data_get(table_entry1
, action
, RTE_TABLE_ACTION_LB
);
3085 action_data_get(table_entry2
, action
, RTE_TABLE_ACTION_LB
);
3087 action_data_get(table_entry3
, action
, RTE_TABLE_ACTION_LB
);
3106 if (cfg
->action_mask
& (1LLU << RTE_TABLE_ACTION_MTR
)) {
3108 action_data_get(table_entry0
, action
, RTE_TABLE_ACTION_MTR
);
3110 action_data_get(table_entry1
, action
, RTE_TABLE_ACTION_MTR
);
3112 action_data_get(table_entry2
, action
, RTE_TABLE_ACTION_MTR
);
3114 action_data_get(table_entry3
, action
, RTE_TABLE_ACTION_MTR
);
3116 drop_mask0
|= pkt_work_mtr(mbuf0
,
3118 &action
->dscp_table
,
3124 drop_mask1
|= pkt_work_mtr(mbuf1
,
3126 &action
->dscp_table
,
3132 drop_mask2
|= pkt_work_mtr(mbuf2
,
3134 &action
->dscp_table
,
3140 drop_mask3
|= pkt_work_mtr(mbuf3
,
3142 &action
->dscp_table
,
3149 if (cfg
->action_mask
& (1LLU << RTE_TABLE_ACTION_TM
)) {
3151 action_data_get(table_entry0
, action
, RTE_TABLE_ACTION_TM
);
3153 action_data_get(table_entry1
, action
, RTE_TABLE_ACTION_TM
);
3155 action_data_get(table_entry2
, action
, RTE_TABLE_ACTION_TM
);
3157 action_data_get(table_entry3
, action
, RTE_TABLE_ACTION_TM
);
3161 &action
->dscp_table
,
3166 &action
->dscp_table
,
3171 &action
->dscp_table
,
3176 &action
->dscp_table
,
3180 if (cfg
->action_mask
& (1LLU << RTE_TABLE_ACTION_DECAP
)) {
3181 void *data0
= action_data_get(table_entry0
,
3183 RTE_TABLE_ACTION_DECAP
);
3184 void *data1
= action_data_get(table_entry1
,
3186 RTE_TABLE_ACTION_DECAP
);
3187 void *data2
= action_data_get(table_entry2
,
3189 RTE_TABLE_ACTION_DECAP
);
3190 void *data3
= action_data_get(table_entry3
,
3192 RTE_TABLE_ACTION_DECAP
);
3194 pkt4_work_decap(mbuf0
, mbuf1
, mbuf2
, mbuf3
,
3195 data0
, data1
, data2
, data3
);
3198 if (cfg
->action_mask
& (1LLU << RTE_TABLE_ACTION_ENCAP
)) {
3200 action_data_get(table_entry0
, action
, RTE_TABLE_ACTION_ENCAP
);
3202 action_data_get(table_entry1
, action
, RTE_TABLE_ACTION_ENCAP
);
3204 action_data_get(table_entry2
, action
, RTE_TABLE_ACTION_ENCAP
);
3206 action_data_get(table_entry3
, action
, RTE_TABLE_ACTION_ENCAP
);
3208 pkt_work_encap(mbuf0
,
3215 pkt_work_encap(mbuf1
,
3222 pkt_work_encap(mbuf2
,
3229 pkt_work_encap(mbuf3
,
3237 if (cfg
->action_mask
& (1LLU << RTE_TABLE_ACTION_NAT
)) {
3239 action_data_get(table_entry0
, action
, RTE_TABLE_ACTION_NAT
);
3241 action_data_get(table_entry1
, action
, RTE_TABLE_ACTION_NAT
);
3243 action_data_get(table_entry2
, action
, RTE_TABLE_ACTION_NAT
);
3245 action_data_get(table_entry3
, action
, RTE_TABLE_ACTION_NAT
);
3247 if (cfg
->common
.ip_version
) {
3248 pkt_ipv4_work_nat(ip0
, data0
, &cfg
->nat
);
3249 pkt_ipv4_work_nat(ip1
, data1
, &cfg
->nat
);
3250 pkt_ipv4_work_nat(ip2
, data2
, &cfg
->nat
);
3251 pkt_ipv4_work_nat(ip3
, data3
, &cfg
->nat
);
3253 pkt_ipv6_work_nat(ip0
, data0
, &cfg
->nat
);
3254 pkt_ipv6_work_nat(ip1
, data1
, &cfg
->nat
);
3255 pkt_ipv6_work_nat(ip2
, data2
, &cfg
->nat
);
3256 pkt_ipv6_work_nat(ip3
, data3
, &cfg
->nat
);
3260 if (cfg
->action_mask
& (1LLU << RTE_TABLE_ACTION_TTL
)) {
3262 action_data_get(table_entry0
, action
, RTE_TABLE_ACTION_TTL
);
3264 action_data_get(table_entry1
, action
, RTE_TABLE_ACTION_TTL
);
3266 action_data_get(table_entry2
, action
, RTE_TABLE_ACTION_TTL
);
3268 action_data_get(table_entry3
, action
, RTE_TABLE_ACTION_TTL
);
3270 if (cfg
->common
.ip_version
) {
3271 drop_mask0
|= pkt_ipv4_work_ttl(ip0
, data0
);
3272 drop_mask1
|= pkt_ipv4_work_ttl(ip1
, data1
);
3273 drop_mask2
|= pkt_ipv4_work_ttl(ip2
, data2
);
3274 drop_mask3
|= pkt_ipv4_work_ttl(ip3
, data3
);
3276 drop_mask0
|= pkt_ipv6_work_ttl(ip0
, data0
);
3277 drop_mask1
|= pkt_ipv6_work_ttl(ip1
, data1
);
3278 drop_mask2
|= pkt_ipv6_work_ttl(ip2
, data2
);
3279 drop_mask3
|= pkt_ipv6_work_ttl(ip3
, data3
);
3283 if (cfg
->action_mask
& (1LLU << RTE_TABLE_ACTION_STATS
)) {
3285 action_data_get(table_entry0
, action
, RTE_TABLE_ACTION_STATS
);
3287 action_data_get(table_entry1
, action
, RTE_TABLE_ACTION_STATS
);
3289 action_data_get(table_entry2
, action
, RTE_TABLE_ACTION_STATS
);
3291 action_data_get(table_entry3
, action
, RTE_TABLE_ACTION_STATS
);
3293 pkt_work_stats(data0
, total_length0
);
3294 pkt_work_stats(data1
, total_length1
);
3295 pkt_work_stats(data2
, total_length2
);
3296 pkt_work_stats(data3
, total_length3
);
3299 if (cfg
->action_mask
& (1LLU << RTE_TABLE_ACTION_TIME
)) {
3301 action_data_get(table_entry0
, action
, RTE_TABLE_ACTION_TIME
);
3303 action_data_get(table_entry1
, action
, RTE_TABLE_ACTION_TIME
);
3305 action_data_get(table_entry2
, action
, RTE_TABLE_ACTION_TIME
);
3307 action_data_get(table_entry3
, action
, RTE_TABLE_ACTION_TIME
);
3309 pkt_work_time(data0
, time
);
3310 pkt_work_time(data1
, time
);
3311 pkt_work_time(data2
, time
);
3312 pkt_work_time(data3
, time
);
3315 if (cfg
->action_mask
& (1LLU << RTE_TABLE_ACTION_SYM_CRYPTO
)) {
3316 void *data0
= action_data_get(table_entry0
, action
,
3317 RTE_TABLE_ACTION_SYM_CRYPTO
);
3318 void *data1
= action_data_get(table_entry1
, action
,
3319 RTE_TABLE_ACTION_SYM_CRYPTO
);
3320 void *data2
= action_data_get(table_entry2
, action
,
3321 RTE_TABLE_ACTION_SYM_CRYPTO
);
3322 void *data3
= action_data_get(table_entry3
, action
,
3323 RTE_TABLE_ACTION_SYM_CRYPTO
);
3325 drop_mask0
|= pkt_work_sym_crypto(mbuf0
, data0
, &cfg
->sym_crypto
,
3327 drop_mask1
|= pkt_work_sym_crypto(mbuf1
, data1
, &cfg
->sym_crypto
,
3329 drop_mask2
|= pkt_work_sym_crypto(mbuf2
, data2
, &cfg
->sym_crypto
,
3331 drop_mask3
|= pkt_work_sym_crypto(mbuf3
, data3
, &cfg
->sym_crypto
,
3335 if (cfg
->action_mask
& (1LLU << RTE_TABLE_ACTION_TAG
)) {
3336 void *data0
= action_data_get(table_entry0
,
3338 RTE_TABLE_ACTION_TAG
);
3339 void *data1
= action_data_get(table_entry1
,
3341 RTE_TABLE_ACTION_TAG
);
3342 void *data2
= action_data_get(table_entry2
,
3344 RTE_TABLE_ACTION_TAG
);
3345 void *data3
= action_data_get(table_entry3
,
3347 RTE_TABLE_ACTION_TAG
);
3349 pkt4_work_tag(mbuf0
, mbuf1
, mbuf2
, mbuf3
,
3350 data0
, data1
, data2
, data3
);
3359 static __rte_always_inline
int
3360 ah(struct rte_pipeline
*p
,
3361 struct rte_mbuf
**pkts
,
3363 struct rte_pipeline_table_entry
**entries
,
3364 struct rte_table_action
*action
,
3365 struct ap_config
*cfg
)
3367 uint64_t pkts_drop_mask
= 0;
3370 if (cfg
->action_mask
& ((1LLU << RTE_TABLE_ACTION_MTR
) |
3371 (1LLU << RTE_TABLE_ACTION_TIME
)))
3374 if ((pkts_mask
& (pkts_mask
+ 1)) == 0) {
3375 uint64_t n_pkts
= __builtin_popcountll(pkts_mask
);
3378 for (i
= 0; i
< (n_pkts
& (~0x3LLU
)); i
+= 4) {
3381 drop_mask
= pkt4_work(&pkts
[i
],
3387 pkts_drop_mask
|= drop_mask
<< i
;
3390 for ( ; i
< n_pkts
; i
++) {
3393 drop_mask
= pkt_work(pkts
[i
],
3399 pkts_drop_mask
|= drop_mask
<< i
;
3402 for ( ; pkts_mask
; ) {
3403 uint32_t pos
= __builtin_ctzll(pkts_mask
);
3404 uint64_t pkt_mask
= 1LLU << pos
;
3407 drop_mask
= pkt_work(pkts
[pos
],
3413 pkts_mask
&= ~pkt_mask
;
3414 pkts_drop_mask
|= drop_mask
<< pos
;
3417 rte_pipeline_ah_packet_drop(p
, pkts_drop_mask
);
3423 ah_default(struct rte_pipeline
*p
,
3424 struct rte_mbuf
**pkts
,
3426 struct rte_pipeline_table_entry
**entries
,
3429 struct rte_table_action
*action
= arg
;
3439 static rte_pipeline_table_action_handler_hit
3440 ah_selector(struct rte_table_action
*action
)
3442 if (action
->cfg
.action_mask
== (1LLU << RTE_TABLE_ACTION_FWD
))
3449 rte_table_action_table_params_get(struct rte_table_action
*action
,
3450 struct rte_pipeline_table_params
*params
)
3452 rte_pipeline_table_action_handler_hit f_action_hit
;
3453 uint32_t total_size
;
3455 /* Check input arguments */
3456 if ((action
== NULL
) ||
3460 f_action_hit
= ah_selector(action
);
3461 total_size
= rte_align32pow2(action
->data
.total_size
);
3463 /* Fill in params */
3464 params
->f_action_hit
= f_action_hit
;
3465 params
->f_action_miss
= NULL
;
3466 params
->arg_ah
= (f_action_hit
) ? action
: NULL
;
3467 params
->action_data_size
= total_size
-
3468 sizeof(struct rte_pipeline_table_entry
);
3474 rte_table_action_free(struct rte_table_action
*action
)