1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2018 Intel Corporation
7 #include <rte_common.h>
8 #include <rte_byteorder.h>
9 #include <rte_cycles.h>
10 #include <rte_malloc.h>
11 #include <rte_memcpy.h>
12 #include <rte_ether.h>
17 #include <rte_cryptodev.h>
18 #include <rte_cryptodev_pmd.h>
20 #include "rte_table_action.h"
22 #define rte_htons rte_cpu_to_be_16
23 #define rte_htonl rte_cpu_to_be_32
25 #define rte_ntohs rte_be_to_cpu_16
26 #define rte_ntohl rte_be_to_cpu_32
29 * RTE_TABLE_ACTION_FWD
31 #define fwd_data rte_pipeline_table_entry
34 fwd_apply(struct fwd_data
*data
,
35 struct rte_table_action_fwd_params
*p
)
37 data
->action
= p
->action
;
39 if (p
->action
== RTE_PIPELINE_ACTION_PORT
)
40 data
->port_id
= p
->id
;
42 if (p
->action
== RTE_PIPELINE_ACTION_TABLE
)
43 data
->table_id
= p
->id
;
52 lb_cfg_check(struct rte_table_action_lb_config
*cfg
)
55 (cfg
->key_size
< RTE_TABLE_ACTION_LB_KEY_SIZE_MIN
) ||
56 (cfg
->key_size
> RTE_TABLE_ACTION_LB_KEY_SIZE_MAX
) ||
57 (!rte_is_power_of_2(cfg
->key_size
)) ||
58 (cfg
->f_hash
== NULL
))
65 uint32_t out
[RTE_TABLE_ACTION_LB_TABLE_SIZE
];
66 } __attribute__((__packed__
));
69 lb_apply(struct lb_data
*data
,
70 struct rte_table_action_lb_params
*p
)
72 memcpy(data
->out
, p
->out
, sizeof(data
->out
));
77 static __rte_always_inline
void
78 pkt_work_lb(struct rte_mbuf
*mbuf
,
80 struct rte_table_action_lb_config
*cfg
)
82 uint8_t *pkt_key
= RTE_MBUF_METADATA_UINT8_PTR(mbuf
, cfg
->key_offset
);
83 uint32_t *out
= RTE_MBUF_METADATA_UINT32_PTR(mbuf
, cfg
->out_offset
);
87 digest
= cfg
->f_hash(pkt_key
,
91 pos
= digest
& (RTE_TABLE_ACTION_LB_TABLE_SIZE
- 1);
92 out_val
= data
->out
[pos
];
98 * RTE_TABLE_ACTION_MTR
101 mtr_cfg_check(struct rte_table_action_mtr_config
*mtr
)
103 if ((mtr
->alg
== RTE_TABLE_ACTION_METER_SRTCM
) ||
104 ((mtr
->n_tc
!= 1) && (mtr
->n_tc
!= 4)) ||
105 (mtr
->n_bytes_enabled
!= 0))
110 struct mtr_trtcm_data
{
111 struct rte_meter_trtcm trtcm
;
112 uint64_t stats
[RTE_COLORS
];
113 } __attribute__((__packed__
));
115 #define MTR_TRTCM_DATA_METER_PROFILE_ID_GET(data) \
116 (((data)->stats[RTE_COLOR_GREEN] & 0xF8LLU) >> 3)
119 mtr_trtcm_data_meter_profile_id_set(struct mtr_trtcm_data
*data
,
122 data
->stats
[RTE_COLOR_GREEN
] &= ~0xF8LLU
;
123 data
->stats
[RTE_COLOR_GREEN
] |= (profile_id
% 32) << 3;
126 #define MTR_TRTCM_DATA_POLICER_ACTION_DROP_GET(data, color)\
127 (((data)->stats[(color)] & 4LLU) >> 2)
129 #define MTR_TRTCM_DATA_POLICER_ACTION_COLOR_GET(data, color)\
130 ((enum rte_color)((data)->stats[(color)] & 3LLU))
133 mtr_trtcm_data_policer_action_set(struct mtr_trtcm_data
*data
,
134 enum rte_color color
,
135 enum rte_table_action_policer action
)
137 if (action
== RTE_TABLE_ACTION_POLICER_DROP
) {
138 data
->stats
[color
] |= 4LLU;
140 data
->stats
[color
] &= ~7LLU;
141 data
->stats
[color
] |= color
& 3LLU;
146 mtr_trtcm_data_stats_get(struct mtr_trtcm_data
*data
,
147 enum rte_color color
)
149 return data
->stats
[color
] >> 8;
153 mtr_trtcm_data_stats_reset(struct mtr_trtcm_data
*data
,
154 enum rte_color color
)
156 data
->stats
[color
] &= 0xFFLU
;
159 #define MTR_TRTCM_DATA_STATS_INC(data, color) \
160 ((data)->stats[(color)] += (1LLU << 8))
163 mtr_data_size(struct rte_table_action_mtr_config
*mtr
)
165 return mtr
->n_tc
* sizeof(struct mtr_trtcm_data
);
168 struct dscp_table_entry_data
{
169 enum rte_color color
;
174 struct dscp_table_data
{
175 struct dscp_table_entry_data entry
[64];
178 struct meter_profile_data
{
179 struct rte_meter_trtcm_profile profile
;
184 static struct meter_profile_data
*
185 meter_profile_data_find(struct meter_profile_data
*mp
,
191 for (i
= 0; i
< mp_size
; i
++) {
192 struct meter_profile_data
*mp_data
= &mp
[i
];
194 if (mp_data
->valid
&& (mp_data
->profile_id
== profile_id
))
201 static struct meter_profile_data
*
202 meter_profile_data_find_unused(struct meter_profile_data
*mp
,
207 for (i
= 0; i
< mp_size
; i
++) {
208 struct meter_profile_data
*mp_data
= &mp
[i
];
218 mtr_apply_check(struct rte_table_action_mtr_params
*p
,
219 struct rte_table_action_mtr_config
*cfg
,
220 struct meter_profile_data
*mp
,
225 if (p
->tc_mask
> RTE_LEN2MASK(cfg
->n_tc
, uint32_t))
228 for (i
= 0; i
< RTE_TABLE_ACTION_TC_MAX
; i
++) {
229 struct rte_table_action_mtr_tc_params
*p_tc
= &p
->mtr
[i
];
230 struct meter_profile_data
*mp_data
;
232 if ((p
->tc_mask
& (1LLU << i
)) == 0)
235 mp_data
= meter_profile_data_find(mp
,
237 p_tc
->meter_profile_id
);
246 mtr_apply(struct mtr_trtcm_data
*data
,
247 struct rte_table_action_mtr_params
*p
,
248 struct rte_table_action_mtr_config
*cfg
,
249 struct meter_profile_data
*mp
,
255 /* Check input arguments */
256 status
= mtr_apply_check(p
, cfg
, mp
, mp_size
);
261 for (i
= 0; i
< RTE_TABLE_ACTION_TC_MAX
; i
++) {
262 struct rte_table_action_mtr_tc_params
*p_tc
= &p
->mtr
[i
];
263 struct mtr_trtcm_data
*data_tc
= &data
[i
];
264 struct meter_profile_data
*mp_data
;
266 if ((p
->tc_mask
& (1LLU << i
)) == 0)
270 mp_data
= meter_profile_data_find(mp
,
272 p_tc
->meter_profile_id
);
276 memset(data_tc
, 0, sizeof(*data_tc
));
279 status
= rte_meter_trtcm_config(&data_tc
->trtcm
,
285 mtr_trtcm_data_meter_profile_id_set(data_tc
,
288 /* Policer actions */
289 mtr_trtcm_data_policer_action_set(data_tc
,
291 p_tc
->policer
[RTE_COLOR_GREEN
]);
293 mtr_trtcm_data_policer_action_set(data_tc
,
295 p_tc
->policer
[RTE_COLOR_YELLOW
]);
297 mtr_trtcm_data_policer_action_set(data_tc
,
299 p_tc
->policer
[RTE_COLOR_RED
]);
305 static __rte_always_inline
uint64_t
306 pkt_work_mtr(struct rte_mbuf
*mbuf
,
307 struct mtr_trtcm_data
*data
,
308 struct dscp_table_data
*dscp_table
,
309 struct meter_profile_data
*mp
,
312 uint16_t total_length
)
315 struct dscp_table_entry_data
*dscp_entry
= &dscp_table
->entry
[dscp
];
316 enum rte_color color_in
, color_meter
, color_policer
;
320 color_in
= dscp_entry
->color
;
322 mp_id
= MTR_TRTCM_DATA_METER_PROFILE_ID_GET(data
);
325 color_meter
= rte_meter_trtcm_color_aware_check(
333 MTR_TRTCM_DATA_STATS_INC(data
, color_meter
);
336 drop_mask
= MTR_TRTCM_DATA_POLICER_ACTION_DROP_GET(data
, color_meter
);
338 MTR_TRTCM_DATA_POLICER_ACTION_COLOR_GET(data
, color_meter
);
339 rte_mbuf_sched_color_set(mbuf
, (uint8_t)color_policer
);
345 * RTE_TABLE_ACTION_TM
348 tm_cfg_check(struct rte_table_action_tm_config
*tm
)
350 if ((tm
->n_subports_per_port
== 0) ||
351 (rte_is_power_of_2(tm
->n_subports_per_port
) == 0) ||
352 (tm
->n_subports_per_port
> UINT16_MAX
) ||
353 (tm
->n_pipes_per_subport
== 0) ||
354 (rte_is_power_of_2(tm
->n_pipes_per_subport
) == 0))
363 } __attribute__((__packed__
));
366 tm_apply_check(struct rte_table_action_tm_params
*p
,
367 struct rte_table_action_tm_config
*cfg
)
369 if ((p
->subport_id
>= cfg
->n_subports_per_port
) ||
370 (p
->pipe_id
>= cfg
->n_pipes_per_subport
))
377 tm_apply(struct tm_data
*data
,
378 struct rte_table_action_tm_params
*p
,
379 struct rte_table_action_tm_config
*cfg
)
383 /* Check input arguments */
384 status
= tm_apply_check(p
, cfg
);
389 data
->queue_id
= p
->subport_id
<<
390 (__builtin_ctz(cfg
->n_pipes_per_subport
) + 4) |
396 static __rte_always_inline
void
397 pkt_work_tm(struct rte_mbuf
*mbuf
,
398 struct tm_data
*data
,
399 struct dscp_table_data
*dscp_table
,
402 struct dscp_table_entry_data
*dscp_entry
= &dscp_table
->entry
[dscp
];
403 uint32_t queue_id
= data
->queue_id
|
404 (dscp_entry
->tc
<< 2) |
405 dscp_entry
->tc_queue
;
406 rte_mbuf_sched_set(mbuf
, queue_id
, dscp_entry
->tc
,
407 (uint8_t)dscp_entry
->color
);
411 * RTE_TABLE_ACTION_ENCAP
414 encap_valid(enum rte_table_action_encap_type encap
)
417 case RTE_TABLE_ACTION_ENCAP_ETHER
:
418 case RTE_TABLE_ACTION_ENCAP_VLAN
:
419 case RTE_TABLE_ACTION_ENCAP_QINQ
:
420 case RTE_TABLE_ACTION_ENCAP_MPLS
:
421 case RTE_TABLE_ACTION_ENCAP_PPPOE
:
422 case RTE_TABLE_ACTION_ENCAP_VXLAN
:
423 case RTE_TABLE_ACTION_ENCAP_QINQ_PPPOE
:
431 encap_cfg_check(struct rte_table_action_encap_config
*encap
)
433 if ((encap
->encap_mask
== 0) ||
434 (__builtin_popcountll(encap
->encap_mask
) != 1))
440 struct encap_ether_data
{
441 struct ether_hdr ether
;
442 } __attribute__((__packed__
));
444 #define VLAN(pcp, dei, vid) \
445 ((uint16_t)((((uint64_t)(pcp)) & 0x7LLU) << 13) | \
446 ((((uint64_t)(dei)) & 0x1LLU) << 12) | \
447 (((uint64_t)(vid)) & 0xFFFLLU)) \
449 struct encap_vlan_data {
450 struct ether_hdr ether
;
451 struct vlan_hdr vlan
;
452 } __attribute__((__packed__
));
454 struct encap_qinq_data
{
455 struct ether_hdr ether
;
456 struct vlan_hdr svlan
;
457 struct vlan_hdr cvlan
;
458 } __attribute__((__packed__
));
460 #define ETHER_TYPE_MPLS_UNICAST 0x8847
462 #define ETHER_TYPE_MPLS_MULTICAST 0x8848
464 #define MPLS(label, tc, s, ttl) \
465 ((uint32_t)(((((uint64_t)(label)) & 0xFFFFFLLU) << 12) |\
466 ((((uint64_t)(tc)) & 0x7LLU) << 9) | \
467 ((((uint64_t)(s)) & 0x1LLU) << 8) | \
468 (((uint64_t)(ttl)) & 0xFFLLU)))
470 struct encap_mpls_data
{
471 struct ether_hdr ether
;
472 uint32_t mpls
[RTE_TABLE_ACTION_MPLS_LABELS_MAX
];
474 } __attribute__((__packed__
));
476 #define PPP_PROTOCOL_IP 0x0021
478 struct pppoe_ppp_hdr
{
479 uint16_t ver_type_code
;
483 } __attribute__((__packed__
));
485 struct encap_pppoe_data
{
486 struct ether_hdr ether
;
487 struct pppoe_ppp_hdr pppoe_ppp
;
488 } __attribute__((__packed__
));
490 #define IP_PROTO_UDP 17
492 struct encap_vxlan_ipv4_data
{
493 struct ether_hdr ether
;
494 struct ipv4_hdr ipv4
;
496 struct vxlan_hdr vxlan
;
497 } __attribute__((__packed__
));
499 struct encap_vxlan_ipv4_vlan_data
{
500 struct ether_hdr ether
;
501 struct vlan_hdr vlan
;
502 struct ipv4_hdr ipv4
;
504 struct vxlan_hdr vxlan
;
505 } __attribute__((__packed__
));
507 struct encap_vxlan_ipv6_data
{
508 struct ether_hdr ether
;
509 struct ipv6_hdr ipv6
;
511 struct vxlan_hdr vxlan
;
512 } __attribute__((__packed__
));
514 struct encap_vxlan_ipv6_vlan_data
{
515 struct ether_hdr ether
;
516 struct vlan_hdr vlan
;
517 struct ipv6_hdr ipv6
;
519 struct vxlan_hdr vxlan
;
520 } __attribute__((__packed__
));
522 struct encap_qinq_pppoe_data
{
523 struct ether_hdr ether
;
524 struct vlan_hdr svlan
;
525 struct vlan_hdr cvlan
;
526 struct pppoe_ppp_hdr pppoe_ppp
;
527 } __attribute__((__packed__
));
530 encap_data_size(struct rte_table_action_encap_config
*encap
)
532 switch (encap
->encap_mask
) {
533 case 1LLU << RTE_TABLE_ACTION_ENCAP_ETHER
:
534 return sizeof(struct encap_ether_data
);
536 case 1LLU << RTE_TABLE_ACTION_ENCAP_VLAN
:
537 return sizeof(struct encap_vlan_data
);
539 case 1LLU << RTE_TABLE_ACTION_ENCAP_QINQ
:
540 return sizeof(struct encap_qinq_data
);
542 case 1LLU << RTE_TABLE_ACTION_ENCAP_MPLS
:
543 return sizeof(struct encap_mpls_data
);
545 case 1LLU << RTE_TABLE_ACTION_ENCAP_PPPOE
:
546 return sizeof(struct encap_pppoe_data
);
548 case 1LLU << RTE_TABLE_ACTION_ENCAP_VXLAN
:
549 if (encap
->vxlan
.ip_version
)
550 if (encap
->vxlan
.vlan
)
551 return sizeof(struct encap_vxlan_ipv4_vlan_data
);
553 return sizeof(struct encap_vxlan_ipv4_data
);
555 if (encap
->vxlan
.vlan
)
556 return sizeof(struct encap_vxlan_ipv6_vlan_data
);
558 return sizeof(struct encap_vxlan_ipv6_data
);
560 case 1LLU << RTE_TABLE_ACTION_ENCAP_QINQ_PPPOE
:
561 return sizeof(struct encap_qinq_pppoe_data
);
569 encap_apply_check(struct rte_table_action_encap_params
*p
,
570 struct rte_table_action_encap_config
*cfg
)
572 if ((encap_valid(p
->type
) == 0) ||
573 ((cfg
->encap_mask
& (1LLU << p
->type
)) == 0))
577 case RTE_TABLE_ACTION_ENCAP_ETHER
:
580 case RTE_TABLE_ACTION_ENCAP_VLAN
:
583 case RTE_TABLE_ACTION_ENCAP_QINQ
:
586 case RTE_TABLE_ACTION_ENCAP_MPLS
:
587 if ((p
->mpls
.mpls_count
== 0) ||
588 (p
->mpls
.mpls_count
> RTE_TABLE_ACTION_MPLS_LABELS_MAX
))
593 case RTE_TABLE_ACTION_ENCAP_PPPOE
:
596 case RTE_TABLE_ACTION_ENCAP_VXLAN
:
599 case RTE_TABLE_ACTION_ENCAP_QINQ_PPPOE
:
608 encap_ether_apply(void *data
,
609 struct rte_table_action_encap_params
*p
,
610 struct rte_table_action_common_config
*common_cfg
)
612 struct encap_ether_data
*d
= data
;
613 uint16_t ethertype
= (common_cfg
->ip_version
) ?
618 ether_addr_copy(&p
->ether
.ether
.da
, &d
->ether
.d_addr
);
619 ether_addr_copy(&p
->ether
.ether
.sa
, &d
->ether
.s_addr
);
620 d
->ether
.ether_type
= rte_htons(ethertype
);
626 encap_vlan_apply(void *data
,
627 struct rte_table_action_encap_params
*p
,
628 struct rte_table_action_common_config
*common_cfg
)
630 struct encap_vlan_data
*d
= data
;
631 uint16_t ethertype
= (common_cfg
->ip_version
) ?
636 ether_addr_copy(&p
->vlan
.ether
.da
, &d
->ether
.d_addr
);
637 ether_addr_copy(&p
->vlan
.ether
.sa
, &d
->ether
.s_addr
);
638 d
->ether
.ether_type
= rte_htons(ETHER_TYPE_VLAN
);
641 d
->vlan
.vlan_tci
= rte_htons(VLAN(p
->vlan
.vlan
.pcp
,
644 d
->vlan
.eth_proto
= rte_htons(ethertype
);
650 encap_qinq_apply(void *data
,
651 struct rte_table_action_encap_params
*p
,
652 struct rte_table_action_common_config
*common_cfg
)
654 struct encap_qinq_data
*d
= data
;
655 uint16_t ethertype
= (common_cfg
->ip_version
) ?
660 ether_addr_copy(&p
->qinq
.ether
.da
, &d
->ether
.d_addr
);
661 ether_addr_copy(&p
->qinq
.ether
.sa
, &d
->ether
.s_addr
);
662 d
->ether
.ether_type
= rte_htons(ETHER_TYPE_QINQ
);
665 d
->svlan
.vlan_tci
= rte_htons(VLAN(p
->qinq
.svlan
.pcp
,
668 d
->svlan
.eth_proto
= rte_htons(ETHER_TYPE_VLAN
);
671 d
->cvlan
.vlan_tci
= rte_htons(VLAN(p
->qinq
.cvlan
.pcp
,
674 d
->cvlan
.eth_proto
= rte_htons(ethertype
);
680 encap_qinq_pppoe_apply(void *data
,
681 struct rte_table_action_encap_params
*p
)
683 struct encap_qinq_pppoe_data
*d
= data
;
686 ether_addr_copy(&p
->qinq
.ether
.da
, &d
->ether
.d_addr
);
687 ether_addr_copy(&p
->qinq
.ether
.sa
, &d
->ether
.s_addr
);
688 d
->ether
.ether_type
= rte_htons(ETHER_TYPE_VLAN
);
691 d
->svlan
.vlan_tci
= rte_htons(VLAN(p
->qinq
.svlan
.pcp
,
694 d
->svlan
.eth_proto
= rte_htons(ETHER_TYPE_VLAN
);
697 d
->cvlan
.vlan_tci
= rte_htons(VLAN(p
->qinq
.cvlan
.pcp
,
700 d
->cvlan
.eth_proto
= rte_htons(ETHER_TYPE_PPPOE_SESSION
);
703 d
->pppoe_ppp
.ver_type_code
= rte_htons(0x1100);
704 d
->pppoe_ppp
.session_id
= rte_htons(p
->qinq_pppoe
.pppoe
.session_id
);
705 d
->pppoe_ppp
.length
= 0; /* not pre-computed */
706 d
->pppoe_ppp
.protocol
= rte_htons(PPP_PROTOCOL_IP
);
712 encap_mpls_apply(void *data
,
713 struct rte_table_action_encap_params
*p
)
715 struct encap_mpls_data
*d
= data
;
716 uint16_t ethertype
= (p
->mpls
.unicast
) ?
717 ETHER_TYPE_MPLS_UNICAST
:
718 ETHER_TYPE_MPLS_MULTICAST
;
722 ether_addr_copy(&p
->mpls
.ether
.da
, &d
->ether
.d_addr
);
723 ether_addr_copy(&p
->mpls
.ether
.sa
, &d
->ether
.s_addr
);
724 d
->ether
.ether_type
= rte_htons(ethertype
);
727 for (i
= 0; i
< p
->mpls
.mpls_count
- 1; i
++)
728 d
->mpls
[i
] = rte_htonl(MPLS(p
->mpls
.mpls
[i
].label
,
731 p
->mpls
.mpls
[i
].ttl
));
733 d
->mpls
[i
] = rte_htonl(MPLS(p
->mpls
.mpls
[i
].label
,
736 p
->mpls
.mpls
[i
].ttl
));
738 d
->mpls_count
= p
->mpls
.mpls_count
;
743 encap_pppoe_apply(void *data
,
744 struct rte_table_action_encap_params
*p
)
746 struct encap_pppoe_data
*d
= data
;
749 ether_addr_copy(&p
->pppoe
.ether
.da
, &d
->ether
.d_addr
);
750 ether_addr_copy(&p
->pppoe
.ether
.sa
, &d
->ether
.s_addr
);
751 d
->ether
.ether_type
= rte_htons(ETHER_TYPE_PPPOE_SESSION
);
754 d
->pppoe_ppp
.ver_type_code
= rte_htons(0x1100);
755 d
->pppoe_ppp
.session_id
= rte_htons(p
->pppoe
.pppoe
.session_id
);
756 d
->pppoe_ppp
.length
= 0; /* not pre-computed */
757 d
->pppoe_ppp
.protocol
= rte_htons(PPP_PROTOCOL_IP
);
763 encap_vxlan_apply(void *data
,
764 struct rte_table_action_encap_params
*p
,
765 struct rte_table_action_encap_config
*cfg
)
767 if ((p
->vxlan
.vxlan
.vni
> 0xFFFFFF) ||
768 (cfg
->vxlan
.ip_version
&& (p
->vxlan
.ipv4
.dscp
> 0x3F)) ||
769 (!cfg
->vxlan
.ip_version
&& (p
->vxlan
.ipv6
.flow_label
> 0xFFFFF)) ||
770 (!cfg
->vxlan
.ip_version
&& (p
->vxlan
.ipv6
.dscp
> 0x3F)) ||
771 (cfg
->vxlan
.vlan
&& (p
->vxlan
.vlan
.vid
> 0xFFF)))
774 if (cfg
->vxlan
.ip_version
)
775 if (cfg
->vxlan
.vlan
) {
776 struct encap_vxlan_ipv4_vlan_data
*d
= data
;
779 ether_addr_copy(&p
->vxlan
.ether
.da
, &d
->ether
.d_addr
);
780 ether_addr_copy(&p
->vxlan
.ether
.sa
, &d
->ether
.s_addr
);
781 d
->ether
.ether_type
= rte_htons(ETHER_TYPE_VLAN
);
784 d
->vlan
.vlan_tci
= rte_htons(VLAN(p
->vxlan
.vlan
.pcp
,
787 d
->vlan
.eth_proto
= rte_htons(ETHER_TYPE_IPv4
);
790 d
->ipv4
.version_ihl
= 0x45;
791 d
->ipv4
.type_of_service
= p
->vxlan
.ipv4
.dscp
<< 2;
792 d
->ipv4
.total_length
= 0; /* not pre-computed */
793 d
->ipv4
.packet_id
= 0;
794 d
->ipv4
.fragment_offset
= 0;
795 d
->ipv4
.time_to_live
= p
->vxlan
.ipv4
.ttl
;
796 d
->ipv4
.next_proto_id
= IP_PROTO_UDP
;
797 d
->ipv4
.hdr_checksum
= 0;
798 d
->ipv4
.src_addr
= rte_htonl(p
->vxlan
.ipv4
.sa
);
799 d
->ipv4
.dst_addr
= rte_htonl(p
->vxlan
.ipv4
.da
);
801 d
->ipv4
.hdr_checksum
= rte_ipv4_cksum(&d
->ipv4
);
804 d
->udp
.src_port
= rte_htons(p
->vxlan
.udp
.sp
);
805 d
->udp
.dst_port
= rte_htons(p
->vxlan
.udp
.dp
);
806 d
->udp
.dgram_len
= 0; /* not pre-computed */
807 d
->udp
.dgram_cksum
= 0;
810 d
->vxlan
.vx_flags
= rte_htonl(0x08000000);
811 d
->vxlan
.vx_vni
= rte_htonl(p
->vxlan
.vxlan
.vni
<< 8);
815 struct encap_vxlan_ipv4_data
*d
= data
;
818 ether_addr_copy(&p
->vxlan
.ether
.da
, &d
->ether
.d_addr
);
819 ether_addr_copy(&p
->vxlan
.ether
.sa
, &d
->ether
.s_addr
);
820 d
->ether
.ether_type
= rte_htons(ETHER_TYPE_IPv4
);
823 d
->ipv4
.version_ihl
= 0x45;
824 d
->ipv4
.type_of_service
= p
->vxlan
.ipv4
.dscp
<< 2;
825 d
->ipv4
.total_length
= 0; /* not pre-computed */
826 d
->ipv4
.packet_id
= 0;
827 d
->ipv4
.fragment_offset
= 0;
828 d
->ipv4
.time_to_live
= p
->vxlan
.ipv4
.ttl
;
829 d
->ipv4
.next_proto_id
= IP_PROTO_UDP
;
830 d
->ipv4
.hdr_checksum
= 0;
831 d
->ipv4
.src_addr
= rte_htonl(p
->vxlan
.ipv4
.sa
);
832 d
->ipv4
.dst_addr
= rte_htonl(p
->vxlan
.ipv4
.da
);
834 d
->ipv4
.hdr_checksum
= rte_ipv4_cksum(&d
->ipv4
);
837 d
->udp
.src_port
= rte_htons(p
->vxlan
.udp
.sp
);
838 d
->udp
.dst_port
= rte_htons(p
->vxlan
.udp
.dp
);
839 d
->udp
.dgram_len
= 0; /* not pre-computed */
840 d
->udp
.dgram_cksum
= 0;
843 d
->vxlan
.vx_flags
= rte_htonl(0x08000000);
844 d
->vxlan
.vx_vni
= rte_htonl(p
->vxlan
.vxlan
.vni
<< 8);
849 if (cfg
->vxlan
.vlan
) {
850 struct encap_vxlan_ipv6_vlan_data
*d
= data
;
853 ether_addr_copy(&p
->vxlan
.ether
.da
, &d
->ether
.d_addr
);
854 ether_addr_copy(&p
->vxlan
.ether
.sa
, &d
->ether
.s_addr
);
855 d
->ether
.ether_type
= rte_htons(ETHER_TYPE_VLAN
);
858 d
->vlan
.vlan_tci
= rte_htons(VLAN(p
->vxlan
.vlan
.pcp
,
861 d
->vlan
.eth_proto
= rte_htons(ETHER_TYPE_IPv6
);
864 d
->ipv6
.vtc_flow
= rte_htonl((6 << 28) |
865 (p
->vxlan
.ipv6
.dscp
<< 22) |
866 p
->vxlan
.ipv6
.flow_label
);
867 d
->ipv6
.payload_len
= 0; /* not pre-computed */
868 d
->ipv6
.proto
= IP_PROTO_UDP
;
869 d
->ipv6
.hop_limits
= p
->vxlan
.ipv6
.hop_limit
;
870 memcpy(d
->ipv6
.src_addr
,
872 sizeof(p
->vxlan
.ipv6
.sa
));
873 memcpy(d
->ipv6
.dst_addr
,
875 sizeof(p
->vxlan
.ipv6
.da
));
878 d
->udp
.src_port
= rte_htons(p
->vxlan
.udp
.sp
);
879 d
->udp
.dst_port
= rte_htons(p
->vxlan
.udp
.dp
);
880 d
->udp
.dgram_len
= 0; /* not pre-computed */
881 d
->udp
.dgram_cksum
= 0;
884 d
->vxlan
.vx_flags
= rte_htonl(0x08000000);
885 d
->vxlan
.vx_vni
= rte_htonl(p
->vxlan
.vxlan
.vni
<< 8);
889 struct encap_vxlan_ipv6_data
*d
= data
;
892 ether_addr_copy(&p
->vxlan
.ether
.da
, &d
->ether
.d_addr
);
893 ether_addr_copy(&p
->vxlan
.ether
.sa
, &d
->ether
.s_addr
);
894 d
->ether
.ether_type
= rte_htons(ETHER_TYPE_IPv6
);
897 d
->ipv6
.vtc_flow
= rte_htonl((6 << 28) |
898 (p
->vxlan
.ipv6
.dscp
<< 22) |
899 p
->vxlan
.ipv6
.flow_label
);
900 d
->ipv6
.payload_len
= 0; /* not pre-computed */
901 d
->ipv6
.proto
= IP_PROTO_UDP
;
902 d
->ipv6
.hop_limits
= p
->vxlan
.ipv6
.hop_limit
;
903 memcpy(d
->ipv6
.src_addr
,
905 sizeof(p
->vxlan
.ipv6
.sa
));
906 memcpy(d
->ipv6
.dst_addr
,
908 sizeof(p
->vxlan
.ipv6
.da
));
911 d
->udp
.src_port
= rte_htons(p
->vxlan
.udp
.sp
);
912 d
->udp
.dst_port
= rte_htons(p
->vxlan
.udp
.dp
);
913 d
->udp
.dgram_len
= 0; /* not pre-computed */
914 d
->udp
.dgram_cksum
= 0;
917 d
->vxlan
.vx_flags
= rte_htonl(0x08000000);
918 d
->vxlan
.vx_vni
= rte_htonl(p
->vxlan
.vxlan
.vni
<< 8);
925 encap_apply(void *data
,
926 struct rte_table_action_encap_params
*p
,
927 struct rte_table_action_encap_config
*cfg
,
928 struct rte_table_action_common_config
*common_cfg
)
932 /* Check input arguments */
933 status
= encap_apply_check(p
, cfg
);
938 case RTE_TABLE_ACTION_ENCAP_ETHER
:
939 return encap_ether_apply(data
, p
, common_cfg
);
941 case RTE_TABLE_ACTION_ENCAP_VLAN
:
942 return encap_vlan_apply(data
, p
, common_cfg
);
944 case RTE_TABLE_ACTION_ENCAP_QINQ
:
945 return encap_qinq_apply(data
, p
, common_cfg
);
947 case RTE_TABLE_ACTION_ENCAP_MPLS
:
948 return encap_mpls_apply(data
, p
);
950 case RTE_TABLE_ACTION_ENCAP_PPPOE
:
951 return encap_pppoe_apply(data
, p
);
953 case RTE_TABLE_ACTION_ENCAP_VXLAN
:
954 return encap_vxlan_apply(data
, p
, cfg
);
956 case RTE_TABLE_ACTION_ENCAP_QINQ_PPPOE
:
957 return encap_qinq_pppoe_apply(data
, p
);
964 static __rte_always_inline
uint16_t
965 encap_vxlan_ipv4_checksum_update(uint16_t cksum0
,
966 uint16_t total_length
)
971 cksum1
= ~cksum1
& 0xFFFF;
973 /* Add total length (one's complement logic) */
974 cksum1
+= total_length
;
975 cksum1
= (cksum1
& 0xFFFF) + (cksum1
>> 16);
976 cksum1
= (cksum1
& 0xFFFF) + (cksum1
>> 16);
978 return (uint16_t)(~cksum1
);
981 static __rte_always_inline
void *
982 encap(void *dst
, const void *src
, size_t n
)
984 dst
= ((uint8_t *) dst
) - n
;
985 return rte_memcpy(dst
, src
, n
);
988 static __rte_always_inline
void
989 pkt_work_encap_vxlan_ipv4(struct rte_mbuf
*mbuf
,
990 struct encap_vxlan_ipv4_data
*vxlan_tbl
,
991 struct rte_table_action_encap_config
*cfg
)
993 uint32_t ether_offset
= cfg
->vxlan
.data_offset
;
994 void *ether
= RTE_MBUF_METADATA_UINT32_PTR(mbuf
, ether_offset
);
995 struct encap_vxlan_ipv4_data
*vxlan_pkt
;
996 uint16_t ether_length
, ipv4_total_length
, ipv4_hdr_cksum
, udp_length
;
998 ether_length
= (uint16_t)mbuf
->pkt_len
;
999 ipv4_total_length
= ether_length
+
1000 (sizeof(struct vxlan_hdr
) +
1001 sizeof(struct udp_hdr
) +
1002 sizeof(struct ipv4_hdr
));
1003 ipv4_hdr_cksum
= encap_vxlan_ipv4_checksum_update(vxlan_tbl
->ipv4
.hdr_checksum
,
1004 rte_htons(ipv4_total_length
));
1005 udp_length
= ether_length
+
1006 (sizeof(struct vxlan_hdr
) +
1007 sizeof(struct udp_hdr
));
1009 vxlan_pkt
= encap(ether
, vxlan_tbl
, sizeof(*vxlan_tbl
));
1010 vxlan_pkt
->ipv4
.total_length
= rte_htons(ipv4_total_length
);
1011 vxlan_pkt
->ipv4
.hdr_checksum
= ipv4_hdr_cksum
;
1012 vxlan_pkt
->udp
.dgram_len
= rte_htons(udp_length
);
1014 mbuf
->data_off
= ether_offset
- (sizeof(struct rte_mbuf
) + sizeof(*vxlan_pkt
));
1015 mbuf
->pkt_len
= mbuf
->data_len
= ether_length
+ sizeof(*vxlan_pkt
);
1018 static __rte_always_inline
void
1019 pkt_work_encap_vxlan_ipv4_vlan(struct rte_mbuf
*mbuf
,
1020 struct encap_vxlan_ipv4_vlan_data
*vxlan_tbl
,
1021 struct rte_table_action_encap_config
*cfg
)
1023 uint32_t ether_offset
= cfg
->vxlan
.data_offset
;
1024 void *ether
= RTE_MBUF_METADATA_UINT32_PTR(mbuf
, ether_offset
);
1025 struct encap_vxlan_ipv4_vlan_data
*vxlan_pkt
;
1026 uint16_t ether_length
, ipv4_total_length
, ipv4_hdr_cksum
, udp_length
;
1028 ether_length
= (uint16_t)mbuf
->pkt_len
;
1029 ipv4_total_length
= ether_length
+
1030 (sizeof(struct vxlan_hdr
) +
1031 sizeof(struct udp_hdr
) +
1032 sizeof(struct ipv4_hdr
));
1033 ipv4_hdr_cksum
= encap_vxlan_ipv4_checksum_update(vxlan_tbl
->ipv4
.hdr_checksum
,
1034 rte_htons(ipv4_total_length
));
1035 udp_length
= ether_length
+
1036 (sizeof(struct vxlan_hdr
) +
1037 sizeof(struct udp_hdr
));
1039 vxlan_pkt
= encap(ether
, vxlan_tbl
, sizeof(*vxlan_tbl
));
1040 vxlan_pkt
->ipv4
.total_length
= rte_htons(ipv4_total_length
);
1041 vxlan_pkt
->ipv4
.hdr_checksum
= ipv4_hdr_cksum
;
1042 vxlan_pkt
->udp
.dgram_len
= rte_htons(udp_length
);
1044 mbuf
->data_off
= ether_offset
- (sizeof(struct rte_mbuf
) + sizeof(*vxlan_pkt
));
1045 mbuf
->pkt_len
= mbuf
->data_len
= ether_length
+ sizeof(*vxlan_pkt
);
1048 static __rte_always_inline
void
1049 pkt_work_encap_vxlan_ipv6(struct rte_mbuf
*mbuf
,
1050 struct encap_vxlan_ipv6_data
*vxlan_tbl
,
1051 struct rte_table_action_encap_config
*cfg
)
1053 uint32_t ether_offset
= cfg
->vxlan
.data_offset
;
1054 void *ether
= RTE_MBUF_METADATA_UINT32_PTR(mbuf
, ether_offset
);
1055 struct encap_vxlan_ipv6_data
*vxlan_pkt
;
1056 uint16_t ether_length
, ipv6_payload_length
, udp_length
;
1058 ether_length
= (uint16_t)mbuf
->pkt_len
;
1059 ipv6_payload_length
= ether_length
+
1060 (sizeof(struct vxlan_hdr
) +
1061 sizeof(struct udp_hdr
));
1062 udp_length
= ether_length
+
1063 (sizeof(struct vxlan_hdr
) +
1064 sizeof(struct udp_hdr
));
1066 vxlan_pkt
= encap(ether
, vxlan_tbl
, sizeof(*vxlan_tbl
));
1067 vxlan_pkt
->ipv6
.payload_len
= rte_htons(ipv6_payload_length
);
1068 vxlan_pkt
->udp
.dgram_len
= rte_htons(udp_length
);
1070 mbuf
->data_off
= ether_offset
- (sizeof(struct rte_mbuf
) + sizeof(*vxlan_pkt
));
1071 mbuf
->pkt_len
= mbuf
->data_len
= ether_length
+ sizeof(*vxlan_pkt
);
1074 static __rte_always_inline
void
1075 pkt_work_encap_vxlan_ipv6_vlan(struct rte_mbuf
*mbuf
,
1076 struct encap_vxlan_ipv6_vlan_data
*vxlan_tbl
,
1077 struct rte_table_action_encap_config
*cfg
)
1079 uint32_t ether_offset
= cfg
->vxlan
.data_offset
;
1080 void *ether
= RTE_MBUF_METADATA_UINT32_PTR(mbuf
, ether_offset
);
1081 struct encap_vxlan_ipv6_vlan_data
*vxlan_pkt
;
1082 uint16_t ether_length
, ipv6_payload_length
, udp_length
;
1084 ether_length
= (uint16_t)mbuf
->pkt_len
;
1085 ipv6_payload_length
= ether_length
+
1086 (sizeof(struct vxlan_hdr
) +
1087 sizeof(struct udp_hdr
));
1088 udp_length
= ether_length
+
1089 (sizeof(struct vxlan_hdr
) +
1090 sizeof(struct udp_hdr
));
1092 vxlan_pkt
= encap(ether
, vxlan_tbl
, sizeof(*vxlan_tbl
));
1093 vxlan_pkt
->ipv6
.payload_len
= rte_htons(ipv6_payload_length
);
1094 vxlan_pkt
->udp
.dgram_len
= rte_htons(udp_length
);
1096 mbuf
->data_off
= ether_offset
- (sizeof(struct rte_mbuf
) + sizeof(*vxlan_pkt
));
1097 mbuf
->pkt_len
= mbuf
->data_len
= ether_length
+ sizeof(*vxlan_pkt
);
1100 static __rte_always_inline
void
1101 pkt_work_encap(struct rte_mbuf
*mbuf
,
1103 struct rte_table_action_encap_config
*cfg
,
1105 uint16_t total_length
,
1108 switch (cfg
->encap_mask
) {
1109 case 1LLU << RTE_TABLE_ACTION_ENCAP_ETHER
:
1110 encap(ip
, data
, sizeof(struct encap_ether_data
));
1111 mbuf
->data_off
= ip_offset
- (sizeof(struct rte_mbuf
) +
1112 sizeof(struct encap_ether_data
));
1113 mbuf
->pkt_len
= mbuf
->data_len
= total_length
+
1114 sizeof(struct encap_ether_data
);
1117 case 1LLU << RTE_TABLE_ACTION_ENCAP_VLAN
:
1118 encap(ip
, data
, sizeof(struct encap_vlan_data
));
1119 mbuf
->data_off
= ip_offset
- (sizeof(struct rte_mbuf
) +
1120 sizeof(struct encap_vlan_data
));
1121 mbuf
->pkt_len
= mbuf
->data_len
= total_length
+
1122 sizeof(struct encap_vlan_data
);
1125 case 1LLU << RTE_TABLE_ACTION_ENCAP_QINQ
:
1126 encap(ip
, data
, sizeof(struct encap_qinq_data
));
1127 mbuf
->data_off
= ip_offset
- (sizeof(struct rte_mbuf
) +
1128 sizeof(struct encap_qinq_data
));
1129 mbuf
->pkt_len
= mbuf
->data_len
= total_length
+
1130 sizeof(struct encap_qinq_data
);
1133 case 1LLU << RTE_TABLE_ACTION_ENCAP_MPLS
:
1135 struct encap_mpls_data
*mpls
= data
;
1136 size_t size
= sizeof(struct ether_hdr
) +
1137 mpls
->mpls_count
* 4;
1139 encap(ip
, data
, size
);
1140 mbuf
->data_off
= ip_offset
- (sizeof(struct rte_mbuf
) + size
);
1141 mbuf
->pkt_len
= mbuf
->data_len
= total_length
+ size
;
1145 case 1LLU << RTE_TABLE_ACTION_ENCAP_PPPOE
:
1147 struct encap_pppoe_data
*pppoe
=
1148 encap(ip
, data
, sizeof(struct encap_pppoe_data
));
1149 pppoe
->pppoe_ppp
.length
= rte_htons(total_length
+ 2);
1150 mbuf
->data_off
= ip_offset
- (sizeof(struct rte_mbuf
) +
1151 sizeof(struct encap_pppoe_data
));
1152 mbuf
->pkt_len
= mbuf
->data_len
= total_length
+
1153 sizeof(struct encap_pppoe_data
);
1157 case 1LLU << RTE_TABLE_ACTION_ENCAP_QINQ_PPPOE
:
1159 struct encap_qinq_pppoe_data
*qinq_pppoe
=
1160 encap(ip
, data
, sizeof(struct encap_qinq_pppoe_data
));
1161 qinq_pppoe
->pppoe_ppp
.length
= rte_htons(total_length
+ 2);
1162 mbuf
->data_off
= ip_offset
- (sizeof(struct rte_mbuf
) +
1163 sizeof(struct encap_qinq_pppoe_data
));
1164 mbuf
->pkt_len
= mbuf
->data_len
= total_length
+
1165 sizeof(struct encap_qinq_pppoe_data
);
1169 case 1LLU << RTE_TABLE_ACTION_ENCAP_VXLAN
:
1171 if (cfg
->vxlan
.ip_version
)
1172 if (cfg
->vxlan
.vlan
)
1173 pkt_work_encap_vxlan_ipv4_vlan(mbuf
, data
, cfg
);
1175 pkt_work_encap_vxlan_ipv4(mbuf
, data
, cfg
);
1177 if (cfg
->vxlan
.vlan
)
1178 pkt_work_encap_vxlan_ipv6_vlan(mbuf
, data
, cfg
);
1180 pkt_work_encap_vxlan_ipv6(mbuf
, data
, cfg
);
1189 * RTE_TABLE_ACTION_NAT
1192 nat_cfg_check(struct rte_table_action_nat_config
*nat
)
1194 if ((nat
->proto
!= 0x06) &&
1195 (nat
->proto
!= 0x11))
1201 struct nat_ipv4_data
{
1204 } __attribute__((__packed__
));
1206 struct nat_ipv6_data
{
1209 } __attribute__((__packed__
));
1212 nat_data_size(struct rte_table_action_nat_config
*nat __rte_unused
,
1213 struct rte_table_action_common_config
*common
)
1215 int ip_version
= common
->ip_version
;
1217 return (ip_version
) ?
1218 sizeof(struct nat_ipv4_data
) :
1219 sizeof(struct nat_ipv6_data
);
1223 nat_apply_check(struct rte_table_action_nat_params
*p
,
1224 struct rte_table_action_common_config
*cfg
)
1226 if ((p
->ip_version
&& (cfg
->ip_version
== 0)) ||
1227 ((p
->ip_version
== 0) && cfg
->ip_version
))
1234 nat_apply(void *data
,
1235 struct rte_table_action_nat_params
*p
,
1236 struct rte_table_action_common_config
*cfg
)
1240 /* Check input arguments */
1241 status
= nat_apply_check(p
, cfg
);
1246 if (p
->ip_version
) {
1247 struct nat_ipv4_data
*d
= data
;
1249 d
->addr
= rte_htonl(p
->addr
.ipv4
);
1250 d
->port
= rte_htons(p
->port
);
1252 struct nat_ipv6_data
*d
= data
;
1254 memcpy(d
->addr
, p
->addr
.ipv6
, sizeof(d
->addr
));
1255 d
->port
= rte_htons(p
->port
);
1261 static __rte_always_inline
uint16_t
1262 nat_ipv4_checksum_update(uint16_t cksum0
,
1269 cksum1
= ~cksum1
& 0xFFFF;
1271 /* Subtract ip0 (one's complement logic) */
1272 cksum1
-= (ip0
>> 16) + (ip0
& 0xFFFF);
1273 cksum1
= (cksum1
& 0xFFFF) + (cksum1
>> 16);
1274 cksum1
= (cksum1
& 0xFFFF) + (cksum1
>> 16);
1276 /* Add ip1 (one's complement logic) */
1277 cksum1
+= (ip1
>> 16) + (ip1
& 0xFFFF);
1278 cksum1
= (cksum1
& 0xFFFF) + (cksum1
>> 16);
1279 cksum1
= (cksum1
& 0xFFFF) + (cksum1
>> 16);
1281 return (uint16_t)(~cksum1
);
1284 static __rte_always_inline
uint16_t
1285 nat_ipv4_tcp_udp_checksum_update(uint16_t cksum0
,
1294 cksum1
= ~cksum1
& 0xFFFF;
1296 /* Subtract ip0 and port 0 (one's complement logic) */
1297 cksum1
-= (ip0
>> 16) + (ip0
& 0xFFFF) + port0
;
1298 cksum1
= (cksum1
& 0xFFFF) + (cksum1
>> 16);
1299 cksum1
= (cksum1
& 0xFFFF) + (cksum1
>> 16);
1301 /* Add ip1 and port1 (one's complement logic) */
1302 cksum1
+= (ip1
>> 16) + (ip1
& 0xFFFF) + port1
;
1303 cksum1
= (cksum1
& 0xFFFF) + (cksum1
>> 16);
1304 cksum1
= (cksum1
& 0xFFFF) + (cksum1
>> 16);
1306 return (uint16_t)(~cksum1
);
1309 static __rte_always_inline
uint16_t
1310 nat_ipv6_tcp_udp_checksum_update(uint16_t cksum0
,
1319 cksum1
= ~cksum1
& 0xFFFF;
1321 /* Subtract ip0 and port 0 (one's complement logic) */
1322 cksum1
-= ip0
[0] + ip0
[1] + ip0
[2] + ip0
[3] +
1323 ip0
[4] + ip0
[5] + ip0
[6] + ip0
[7] + port0
;
1324 cksum1
= (cksum1
& 0xFFFF) + (cksum1
>> 16);
1325 cksum1
= (cksum1
& 0xFFFF) + (cksum1
>> 16);
1327 /* Add ip1 and port1 (one's complement logic) */
1328 cksum1
+= ip1
[0] + ip1
[1] + ip1
[2] + ip1
[3] +
1329 ip1
[4] + ip1
[5] + ip1
[6] + ip1
[7] + port1
;
1330 cksum1
= (cksum1
& 0xFFFF) + (cksum1
>> 16);
1331 cksum1
= (cksum1
& 0xFFFF) + (cksum1
>> 16);
1333 return (uint16_t)(~cksum1
);
1336 static __rte_always_inline
void
1337 pkt_ipv4_work_nat(struct ipv4_hdr
*ip
,
1338 struct nat_ipv4_data
*data
,
1339 struct rte_table_action_nat_config
*cfg
)
1341 if (cfg
->source_nat
) {
1342 if (cfg
->proto
== 0x6) {
1343 struct tcp_hdr
*tcp
= (struct tcp_hdr
*) &ip
[1];
1344 uint16_t ip_cksum
, tcp_cksum
;
1346 ip_cksum
= nat_ipv4_checksum_update(ip
->hdr_checksum
,
1350 tcp_cksum
= nat_ipv4_tcp_udp_checksum_update(tcp
->cksum
,
1356 ip
->src_addr
= data
->addr
;
1357 ip
->hdr_checksum
= ip_cksum
;
1358 tcp
->src_port
= data
->port
;
1359 tcp
->cksum
= tcp_cksum
;
1361 struct udp_hdr
*udp
= (struct udp_hdr
*) &ip
[1];
1362 uint16_t ip_cksum
, udp_cksum
;
1364 ip_cksum
= nat_ipv4_checksum_update(ip
->hdr_checksum
,
1368 udp_cksum
= nat_ipv4_tcp_udp_checksum_update(udp
->dgram_cksum
,
1374 ip
->src_addr
= data
->addr
;
1375 ip
->hdr_checksum
= ip_cksum
;
1376 udp
->src_port
= data
->port
;
1377 if (udp
->dgram_cksum
)
1378 udp
->dgram_cksum
= udp_cksum
;
1381 if (cfg
->proto
== 0x6) {
1382 struct tcp_hdr
*tcp
= (struct tcp_hdr
*) &ip
[1];
1383 uint16_t ip_cksum
, tcp_cksum
;
1385 ip_cksum
= nat_ipv4_checksum_update(ip
->hdr_checksum
,
1389 tcp_cksum
= nat_ipv4_tcp_udp_checksum_update(tcp
->cksum
,
1395 ip
->dst_addr
= data
->addr
;
1396 ip
->hdr_checksum
= ip_cksum
;
1397 tcp
->dst_port
= data
->port
;
1398 tcp
->cksum
= tcp_cksum
;
1400 struct udp_hdr
*udp
= (struct udp_hdr
*) &ip
[1];
1401 uint16_t ip_cksum
, udp_cksum
;
1403 ip_cksum
= nat_ipv4_checksum_update(ip
->hdr_checksum
,
1407 udp_cksum
= nat_ipv4_tcp_udp_checksum_update(udp
->dgram_cksum
,
1413 ip
->dst_addr
= data
->addr
;
1414 ip
->hdr_checksum
= ip_cksum
;
1415 udp
->dst_port
= data
->port
;
1416 if (udp
->dgram_cksum
)
1417 udp
->dgram_cksum
= udp_cksum
;
1422 static __rte_always_inline
void
1423 pkt_ipv6_work_nat(struct ipv6_hdr
*ip
,
1424 struct nat_ipv6_data
*data
,
1425 struct rte_table_action_nat_config
*cfg
)
1427 if (cfg
->source_nat
) {
1428 if (cfg
->proto
== 0x6) {
1429 struct tcp_hdr
*tcp
= (struct tcp_hdr
*) &ip
[1];
1432 tcp_cksum
= nat_ipv6_tcp_udp_checksum_update(tcp
->cksum
,
1433 (uint16_t *)ip
->src_addr
,
1434 (uint16_t *)data
->addr
,
1438 rte_memcpy(ip
->src_addr
, data
->addr
, 16);
1439 tcp
->src_port
= data
->port
;
1440 tcp
->cksum
= tcp_cksum
;
1442 struct udp_hdr
*udp
= (struct udp_hdr
*) &ip
[1];
1445 udp_cksum
= nat_ipv6_tcp_udp_checksum_update(udp
->dgram_cksum
,
1446 (uint16_t *)ip
->src_addr
,
1447 (uint16_t *)data
->addr
,
1451 rte_memcpy(ip
->src_addr
, data
->addr
, 16);
1452 udp
->src_port
= data
->port
;
1453 udp
->dgram_cksum
= udp_cksum
;
1456 if (cfg
->proto
== 0x6) {
1457 struct tcp_hdr
*tcp
= (struct tcp_hdr
*) &ip
[1];
1460 tcp_cksum
= nat_ipv6_tcp_udp_checksum_update(tcp
->cksum
,
1461 (uint16_t *)ip
->dst_addr
,
1462 (uint16_t *)data
->addr
,
1466 rte_memcpy(ip
->dst_addr
, data
->addr
, 16);
1467 tcp
->dst_port
= data
->port
;
1468 tcp
->cksum
= tcp_cksum
;
1470 struct udp_hdr
*udp
= (struct udp_hdr
*) &ip
[1];
1473 udp_cksum
= nat_ipv6_tcp_udp_checksum_update(udp
->dgram_cksum
,
1474 (uint16_t *)ip
->dst_addr
,
1475 (uint16_t *)data
->addr
,
1479 rte_memcpy(ip
->dst_addr
, data
->addr
, 16);
1480 udp
->dst_port
= data
->port
;
1481 udp
->dgram_cksum
= udp_cksum
;
1487 * RTE_TABLE_ACTION_TTL
1490 ttl_cfg_check(struct rte_table_action_ttl_config
*ttl
)
1500 } __attribute__((__packed__
));
1502 #define TTL_INIT(data, decrement) \
1503 ((data)->n_packets = (decrement) ? 1 : 0)
1505 #define TTL_DEC_GET(data) \
1506 ((uint8_t)((data)->n_packets & 1))
1508 #define TTL_STATS_RESET(data) \
1509 ((data)->n_packets = ((data)->n_packets & 1))
1511 #define TTL_STATS_READ(data) \
1512 ((data)->n_packets >> 1)
1514 #define TTL_STATS_ADD(data, value) \
1515 ((data)->n_packets = \
1516 (((((data)->n_packets >> 1) + (value)) << 1) | \
1517 ((data)->n_packets & 1)))
1520 ttl_apply(void *data
,
1521 struct rte_table_action_ttl_params
*p
)
1523 struct ttl_data
*d
= data
;
1525 TTL_INIT(d
, p
->decrement
);
1530 static __rte_always_inline
uint64_t
1531 pkt_ipv4_work_ttl(struct ipv4_hdr
*ip
,
1532 struct ttl_data
*data
)
1535 uint16_t cksum
= ip
->hdr_checksum
;
1536 uint8_t ttl
= ip
->time_to_live
;
1537 uint8_t ttl_diff
= TTL_DEC_GET(data
);
1542 ip
->hdr_checksum
= cksum
;
1543 ip
->time_to_live
= ttl
;
1545 drop
= (ttl
== 0) ? 1 : 0;
1546 TTL_STATS_ADD(data
, drop
);
1551 static __rte_always_inline
uint64_t
1552 pkt_ipv6_work_ttl(struct ipv6_hdr
*ip
,
1553 struct ttl_data
*data
)
1556 uint8_t ttl
= ip
->hop_limits
;
1557 uint8_t ttl_diff
= TTL_DEC_GET(data
);
1561 ip
->hop_limits
= ttl
;
1563 drop
= (ttl
== 0) ? 1 : 0;
1564 TTL_STATS_ADD(data
, drop
);
1570 * RTE_TABLE_ACTION_STATS
1573 stats_cfg_check(struct rte_table_action_stats_config
*stats
)
1575 if ((stats
->n_packets_enabled
== 0) && (stats
->n_bytes_enabled
== 0))
1584 } __attribute__((__packed__
));
1587 stats_apply(struct stats_data
*data
,
1588 struct rte_table_action_stats_params
*p
)
1590 data
->n_packets
= p
->n_packets
;
1591 data
->n_bytes
= p
->n_bytes
;
1596 static __rte_always_inline
void
1597 pkt_work_stats(struct stats_data
*data
,
1598 uint16_t total_length
)
1601 data
->n_bytes
+= total_length
;
1605 * RTE_TABLE_ACTION_TIME
1609 } __attribute__((__packed__
));
1612 time_apply(struct time_data
*data
,
1613 struct rte_table_action_time_params
*p
)
1615 data
->time
= p
->time
;
1619 static __rte_always_inline
void
1620 pkt_work_time(struct time_data
*data
,
1628 * RTE_TABLE_ACTION_CRYPTO
1631 #define CRYPTO_OP_MASK_CIPHER 0x1
1632 #define CRYPTO_OP_MASK_AUTH 0x2
1633 #define CRYPTO_OP_MASK_AEAD 0x4
1635 struct crypto_op_sym_iv_aad
{
1636 struct rte_crypto_op op
;
1637 struct rte_crypto_sym_op sym_op
;
1641 RTE_TABLE_ACTION_SYM_CRYPTO_IV_SIZE_MAX
];
1643 RTE_TABLE_ACTION_SYM_CRYPTO_IV_SIZE_MAX
];
1647 uint8_t iv
[RTE_TABLE_ACTION_SYM_CRYPTO_IV_SIZE_MAX
];
1648 uint8_t aad
[RTE_TABLE_ACTION_SYM_CRYPTO_AAD_SIZE_MAX
];
1654 struct sym_crypto_data
{
1659 /** Length of cipher iv. */
1660 uint16_t cipher_iv_len
;
1662 /** Offset from start of IP header to the cipher iv. */
1663 uint16_t cipher_iv_data_offset
;
1665 /** Length of cipher iv to be updated in the mbuf. */
1666 uint16_t cipher_iv_update_len
;
1668 /** Offset from start of IP header to the auth iv. */
1669 uint16_t auth_iv_data_offset
;
1671 /** Length of auth iv in the mbuf. */
1672 uint16_t auth_iv_len
;
1674 /** Length of auth iv to be updated in the mbuf. */
1675 uint16_t auth_iv_update_len
;
1680 /** Length of iv. */
1683 /** Offset from start of IP header to the aead iv. */
1684 uint16_t iv_data_offset
;
1686 /** Length of iv to be updated in the mbuf. */
1687 uint16_t iv_update_len
;
1689 /** Length of aad */
1692 /** Offset from start of IP header to the aad. */
1693 uint16_t aad_data_offset
;
1695 /** Length of aad to updated in the mbuf. */
1696 uint16_t aad_update_len
;
1701 /** Offset from start of IP header to the data. */
1702 uint16_t data_offset
;
1704 /** Digest length. */
1705 uint16_t digest_len
;
1708 uint16_t block_size
;
1710 /** Mask of crypto operation */
1713 /** Session pointer. */
1714 struct rte_cryptodev_sym_session
*session
;
1716 /** Direction of crypto, encrypt or decrypt */
1719 /** Private data size to store cipher iv / aad. */
1720 uint8_t iv_aad_data
[32];
1722 } __attribute__((__packed__
));
1725 sym_crypto_cfg_check(struct rte_table_action_sym_crypto_config
*cfg
)
1727 if (!rte_cryptodev_pmd_is_valid_dev(cfg
->cryptodev_id
))
1729 if (cfg
->mp_create
== NULL
|| cfg
->mp_init
== NULL
)
1736 get_block_size(const struct rte_crypto_sym_xform
*xform
, uint8_t cdev_id
)
1738 struct rte_cryptodev_info dev_info
;
1739 const struct rte_cryptodev_capabilities
*cap
;
1742 rte_cryptodev_info_get(cdev_id
, &dev_info
);
1744 for (i
= 0; dev_info
.capabilities
[i
].op
!= RTE_CRYPTO_OP_TYPE_UNDEFINED
;
1746 cap
= &dev_info
.capabilities
[i
];
1748 if (cap
->sym
.xform_type
!= xform
->type
)
1751 if ((xform
->type
== RTE_CRYPTO_SYM_XFORM_CIPHER
) &&
1752 (cap
->sym
.cipher
.algo
== xform
->cipher
.algo
))
1753 return cap
->sym
.cipher
.block_size
;
1755 if ((xform
->type
== RTE_CRYPTO_SYM_XFORM_AEAD
) &&
1756 (cap
->sym
.aead
.algo
== xform
->aead
.algo
))
1757 return cap
->sym
.aead
.block_size
;
1759 if (xform
->type
== RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED
)
1767 sym_crypto_apply(struct sym_crypto_data
*data
,
1768 struct rte_table_action_sym_crypto_config
*cfg
,
1769 struct rte_table_action_sym_crypto_params
*p
)
1771 const struct rte_crypto_cipher_xform
*cipher_xform
= NULL
;
1772 const struct rte_crypto_auth_xform
*auth_xform
= NULL
;
1773 const struct rte_crypto_aead_xform
*aead_xform
= NULL
;
1774 struct rte_crypto_sym_xform
*xform
= p
->xform
;
1775 struct rte_cryptodev_sym_session
*session
;
1778 memset(data
, 0, sizeof(*data
));
1781 if (xform
->type
== RTE_CRYPTO_SYM_XFORM_CIPHER
) {
1782 cipher_xform
= &xform
->cipher
;
1784 if (cipher_xform
->iv
.length
>
1785 RTE_TABLE_ACTION_SYM_CRYPTO_IV_SIZE_MAX
)
1787 if (cipher_xform
->iv
.offset
!=
1788 RTE_TABLE_ACTION_SYM_CRYPTO_IV_OFFSET
)
1791 ret
= get_block_size(xform
, cfg
->cryptodev_id
);
1794 data
->block_size
= (uint16_t)ret
;
1795 data
->op_mask
|= CRYPTO_OP_MASK_CIPHER
;
1797 data
->cipher_auth
.cipher_iv_len
=
1798 cipher_xform
->iv
.length
;
1799 data
->cipher_auth
.cipher_iv_data_offset
= (uint16_t)
1800 p
->cipher_auth
.cipher_iv_update
.offset
;
1801 data
->cipher_auth
.cipher_iv_update_len
= (uint16_t)
1802 p
->cipher_auth
.cipher_iv_update
.length
;
1804 rte_memcpy(data
->iv_aad_data
,
1805 p
->cipher_auth
.cipher_iv
.val
,
1806 p
->cipher_auth
.cipher_iv
.length
);
1808 data
->direction
= cipher_xform
->op
;
1810 } else if (xform
->type
== RTE_CRYPTO_SYM_XFORM_AUTH
) {
1811 auth_xform
= &xform
->auth
;
1812 if (auth_xform
->iv
.length
>
1813 RTE_TABLE_ACTION_SYM_CRYPTO_IV_SIZE_MAX
)
1815 data
->op_mask
|= CRYPTO_OP_MASK_AUTH
;
1817 data
->cipher_auth
.auth_iv_len
= auth_xform
->iv
.length
;
1818 data
->cipher_auth
.auth_iv_data_offset
= (uint16_t)
1819 p
->cipher_auth
.auth_iv_update
.offset
;
1820 data
->cipher_auth
.auth_iv_update_len
= (uint16_t)
1821 p
->cipher_auth
.auth_iv_update
.length
;
1822 data
->digest_len
= auth_xform
->digest_length
;
1824 data
->direction
= (auth_xform
->op
==
1825 RTE_CRYPTO_AUTH_OP_GENERATE
) ?
1826 RTE_CRYPTO_CIPHER_OP_ENCRYPT
:
1827 RTE_CRYPTO_CIPHER_OP_DECRYPT
;
1829 } else if (xform
->type
== RTE_CRYPTO_SYM_XFORM_AEAD
) {
1830 aead_xform
= &xform
->aead
;
1832 if ((aead_xform
->iv
.length
>
1833 RTE_TABLE_ACTION_SYM_CRYPTO_IV_SIZE_MAX
) || (
1834 aead_xform
->aad_length
>
1835 RTE_TABLE_ACTION_SYM_CRYPTO_AAD_SIZE_MAX
))
1837 if (aead_xform
->iv
.offset
!=
1838 RTE_TABLE_ACTION_SYM_CRYPTO_IV_OFFSET
)
1841 ret
= get_block_size(xform
, cfg
->cryptodev_id
);
1844 data
->block_size
= (uint16_t)ret
;
1845 data
->op_mask
|= CRYPTO_OP_MASK_AEAD
;
1847 data
->digest_len
= aead_xform
->digest_length
;
1848 data
->aead
.iv_len
= aead_xform
->iv
.length
;
1849 data
->aead
.aad_len
= aead_xform
->aad_length
;
1851 data
->aead
.iv_data_offset
= (uint16_t)
1852 p
->aead
.iv_update
.offset
;
1853 data
->aead
.iv_update_len
= (uint16_t)
1854 p
->aead
.iv_update
.length
;
1855 data
->aead
.aad_data_offset
= (uint16_t)
1856 p
->aead
.aad_update
.offset
;
1857 data
->aead
.aad_update_len
= (uint16_t)
1858 p
->aead
.aad_update
.length
;
1860 rte_memcpy(data
->iv_aad_data
,
1864 rte_memcpy(data
->iv_aad_data
+ p
->aead
.iv
.length
,
1866 p
->aead
.aad
.length
);
1868 data
->direction
= (aead_xform
->op
==
1869 RTE_CRYPTO_AEAD_OP_ENCRYPT
) ?
1870 RTE_CRYPTO_CIPHER_OP_ENCRYPT
:
1871 RTE_CRYPTO_CIPHER_OP_DECRYPT
;
1875 xform
= xform
->next
;
1878 if (auth_xform
&& auth_xform
->iv
.length
) {
1880 if (auth_xform
->iv
.offset
!=
1881 RTE_TABLE_ACTION_SYM_CRYPTO_IV_OFFSET
+
1882 cipher_xform
->iv
.length
)
1885 rte_memcpy(data
->iv_aad_data
+ cipher_xform
->iv
.length
,
1886 p
->cipher_auth
.auth_iv
.val
,
1887 p
->cipher_auth
.auth_iv
.length
);
1889 rte_memcpy(data
->iv_aad_data
,
1890 p
->cipher_auth
.auth_iv
.val
,
1891 p
->cipher_auth
.auth_iv
.length
);
1895 session
= rte_cryptodev_sym_session_create(cfg
->mp_create
);
1899 ret
= rte_cryptodev_sym_session_init(cfg
->cryptodev_id
, session
,
1900 p
->xform
, cfg
->mp_init
);
1902 rte_cryptodev_sym_session_free(session
);
1906 data
->data_offset
= (uint16_t)p
->data_offset
;
1907 data
->session
= session
;
1912 static __rte_always_inline
uint64_t
1913 pkt_work_sym_crypto(struct rte_mbuf
*mbuf
, struct sym_crypto_data
*data
,
1914 struct rte_table_action_sym_crypto_config
*cfg
,
1917 struct crypto_op_sym_iv_aad
*crypto_op
= (struct crypto_op_sym_iv_aad
*)
1918 RTE_MBUF_METADATA_UINT8_PTR(mbuf
, cfg
->op_offset
);
1919 struct rte_crypto_op
*op
= &crypto_op
->op
;
1920 struct rte_crypto_sym_op
*sym
= op
->sym
;
1921 uint32_t pkt_offset
= sizeof(*mbuf
) + mbuf
->data_off
;
1922 uint32_t payload_len
= pkt_offset
+ mbuf
->data_len
- data
->data_offset
;
1924 op
->type
= RTE_CRYPTO_OP_TYPE_SYMMETRIC
;
1925 op
->sess_type
= RTE_CRYPTO_OP_WITH_SESSION
;
1926 op
->phys_addr
= mbuf
->buf_iova
+ cfg
->op_offset
- sizeof(*mbuf
);
1927 op
->status
= RTE_CRYPTO_OP_STATUS_NOT_PROCESSED
;
1930 sym
->session
= data
->session
;
1932 /** pad the packet */
1933 if (data
->direction
== RTE_CRYPTO_CIPHER_OP_ENCRYPT
) {
1934 uint32_t append_len
= RTE_ALIGN_CEIL(payload_len
,
1935 data
->block_size
) - payload_len
;
1937 if (unlikely(rte_pktmbuf_append(mbuf
, append_len
+
1938 data
->digest_len
) == NULL
))
1941 payload_len
+= append_len
;
1943 payload_len
-= data
->digest_len
;
1945 if (data
->op_mask
& CRYPTO_OP_MASK_CIPHER
) {
1946 /** prepare cipher op */
1947 uint8_t *iv
= crypto_op
->iv_aad
.cipher_auth
.cipher_iv
;
1949 sym
->cipher
.data
.length
= payload_len
;
1950 sym
->cipher
.data
.offset
= data
->data_offset
- pkt_offset
;
1952 if (data
->cipher_auth
.cipher_iv_update_len
) {
1953 uint8_t *pkt_iv
= RTE_MBUF_METADATA_UINT8_PTR(mbuf
,
1954 data
->cipher_auth
.cipher_iv_data_offset
1957 /** For encryption, update the pkt iv field, otherwise
1958 * update the iv_aad_field
1960 if (data
->direction
== RTE_CRYPTO_CIPHER_OP_ENCRYPT
)
1961 rte_memcpy(pkt_iv
, data
->iv_aad_data
,
1962 data
->cipher_auth
.cipher_iv_update_len
);
1964 rte_memcpy(data
->iv_aad_data
, pkt_iv
,
1965 data
->cipher_auth
.cipher_iv_update_len
);
1969 rte_memcpy(iv
, data
->iv_aad_data
,
1970 data
->cipher_auth
.cipher_iv_len
);
1973 if (data
->op_mask
& CRYPTO_OP_MASK_AUTH
) {
1974 /** authentication always start from IP header. */
1975 sym
->auth
.data
.offset
= ip_offset
- pkt_offset
;
1976 sym
->auth
.data
.length
= mbuf
->data_len
- sym
->auth
.data
.offset
-
1978 sym
->auth
.digest
.data
= rte_pktmbuf_mtod_offset(mbuf
,
1979 uint8_t *, rte_pktmbuf_pkt_len(mbuf
) -
1981 sym
->auth
.digest
.phys_addr
= rte_pktmbuf_iova_offset(mbuf
,
1982 rte_pktmbuf_pkt_len(mbuf
) - data
->digest_len
);
1984 if (data
->cipher_auth
.auth_iv_update_len
) {
1985 uint8_t *pkt_iv
= RTE_MBUF_METADATA_UINT8_PTR(mbuf
,
1986 data
->cipher_auth
.auth_iv_data_offset
1988 uint8_t *data_iv
= data
->iv_aad_data
+
1989 data
->cipher_auth
.cipher_iv_len
;
1991 if (data
->direction
== RTE_CRYPTO_CIPHER_OP_ENCRYPT
)
1992 rte_memcpy(pkt_iv
, data_iv
,
1993 data
->cipher_auth
.auth_iv_update_len
);
1995 rte_memcpy(data_iv
, pkt_iv
,
1996 data
->cipher_auth
.auth_iv_update_len
);
1999 if (data
->cipher_auth
.auth_iv_len
) {
2000 /** prepare cipher op */
2001 uint8_t *iv
= crypto_op
->iv_aad
.cipher_auth
.auth_iv
;
2003 rte_memcpy(iv
, data
->iv_aad_data
+
2004 data
->cipher_auth
.cipher_iv_len
,
2005 data
->cipher_auth
.auth_iv_len
);
2009 if (data
->op_mask
& CRYPTO_OP_MASK_AEAD
) {
2010 uint8_t *iv
= crypto_op
->iv_aad
.aead_iv_aad
.iv
;
2011 uint8_t *aad
= crypto_op
->iv_aad
.aead_iv_aad
.aad
;
2013 sym
->aead
.aad
.data
= aad
;
2014 sym
->aead
.aad
.phys_addr
= rte_pktmbuf_iova_offset(mbuf
,
2015 aad
- rte_pktmbuf_mtod(mbuf
, uint8_t *));
2016 sym
->aead
.digest
.data
= rte_pktmbuf_mtod_offset(mbuf
,
2017 uint8_t *, rte_pktmbuf_pkt_len(mbuf
) -
2019 sym
->aead
.digest
.phys_addr
= rte_pktmbuf_iova_offset(mbuf
,
2020 rte_pktmbuf_pkt_len(mbuf
) - data
->digest_len
);
2021 sym
->aead
.data
.offset
= data
->data_offset
- pkt_offset
;
2022 sym
->aead
.data
.length
= payload_len
;
2024 if (data
->aead
.iv_update_len
) {
2025 uint8_t *pkt_iv
= RTE_MBUF_METADATA_UINT8_PTR(mbuf
,
2026 data
->aead
.iv_data_offset
+ ip_offset
);
2027 uint8_t *data_iv
= data
->iv_aad_data
;
2029 if (data
->direction
== RTE_CRYPTO_CIPHER_OP_ENCRYPT
)
2030 rte_memcpy(pkt_iv
, data_iv
,
2031 data
->aead
.iv_update_len
);
2033 rte_memcpy(data_iv
, pkt_iv
,
2034 data
->aead
.iv_update_len
);
2037 rte_memcpy(iv
, data
->iv_aad_data
, data
->aead
.iv_len
);
2039 if (data
->aead
.aad_update_len
) {
2040 uint8_t *pkt_aad
= RTE_MBUF_METADATA_UINT8_PTR(mbuf
,
2041 data
->aead
.aad_data_offset
+ ip_offset
);
2042 uint8_t *data_aad
= data
->iv_aad_data
+
2045 if (data
->direction
== RTE_CRYPTO_CIPHER_OP_ENCRYPT
)
2046 rte_memcpy(pkt_aad
, data_aad
,
2047 data
->aead
.iv_update_len
);
2049 rte_memcpy(data_aad
, pkt_aad
,
2050 data
->aead
.iv_update_len
);
2053 rte_memcpy(aad
, data
->iv_aad_data
+ data
->aead
.iv_len
,
2054 data
->aead
.aad_len
);
2061 * RTE_TABLE_ACTION_TAG
2065 } __attribute__((__packed__
));
2068 tag_apply(struct tag_data
*data
,
2069 struct rte_table_action_tag_params
*p
)
2075 static __rte_always_inline
void
2076 pkt_work_tag(struct rte_mbuf
*mbuf
,
2077 struct tag_data
*data
)
2079 mbuf
->hash
.fdir
.hi
= data
->tag
;
2080 mbuf
->ol_flags
|= PKT_RX_FDIR
| PKT_RX_FDIR_ID
;
2083 static __rte_always_inline
void
2084 pkt4_work_tag(struct rte_mbuf
*mbuf0
,
2085 struct rte_mbuf
*mbuf1
,
2086 struct rte_mbuf
*mbuf2
,
2087 struct rte_mbuf
*mbuf3
,
2088 struct tag_data
*data0
,
2089 struct tag_data
*data1
,
2090 struct tag_data
*data2
,
2091 struct tag_data
*data3
)
2093 mbuf0
->hash
.fdir
.hi
= data0
->tag
;
2094 mbuf1
->hash
.fdir
.hi
= data1
->tag
;
2095 mbuf2
->hash
.fdir
.hi
= data2
->tag
;
2096 mbuf3
->hash
.fdir
.hi
= data3
->tag
;
2098 mbuf0
->ol_flags
|= PKT_RX_FDIR
| PKT_RX_FDIR_ID
;
2099 mbuf1
->ol_flags
|= PKT_RX_FDIR
| PKT_RX_FDIR_ID
;
2100 mbuf2
->ol_flags
|= PKT_RX_FDIR
| PKT_RX_FDIR_ID
;
2101 mbuf3
->ol_flags
|= PKT_RX_FDIR
| PKT_RX_FDIR_ID
;
2105 * RTE_TABLE_ACTION_DECAP
2109 } __attribute__((__packed__
));
2112 decap_apply(struct decap_data
*data
,
2113 struct rte_table_action_decap_params
*p
)
2119 static __rte_always_inline
void
2120 pkt_work_decap(struct rte_mbuf
*mbuf
,
2121 struct decap_data
*data
)
2123 uint16_t data_off
= mbuf
->data_off
;
2124 uint16_t data_len
= mbuf
->data_len
;
2125 uint32_t pkt_len
= mbuf
->pkt_len
;
2126 uint16_t n
= data
->n
;
2128 mbuf
->data_off
= data_off
+ n
;
2129 mbuf
->data_len
= data_len
- n
;
2130 mbuf
->pkt_len
= pkt_len
- n
;
2133 static __rte_always_inline
void
2134 pkt4_work_decap(struct rte_mbuf
*mbuf0
,
2135 struct rte_mbuf
*mbuf1
,
2136 struct rte_mbuf
*mbuf2
,
2137 struct rte_mbuf
*mbuf3
,
2138 struct decap_data
*data0
,
2139 struct decap_data
*data1
,
2140 struct decap_data
*data2
,
2141 struct decap_data
*data3
)
2143 uint16_t data_off0
= mbuf0
->data_off
;
2144 uint16_t data_len0
= mbuf0
->data_len
;
2145 uint32_t pkt_len0
= mbuf0
->pkt_len
;
2147 uint16_t data_off1
= mbuf1
->data_off
;
2148 uint16_t data_len1
= mbuf1
->data_len
;
2149 uint32_t pkt_len1
= mbuf1
->pkt_len
;
2151 uint16_t data_off2
= mbuf2
->data_off
;
2152 uint16_t data_len2
= mbuf2
->data_len
;
2153 uint32_t pkt_len2
= mbuf2
->pkt_len
;
2155 uint16_t data_off3
= mbuf3
->data_off
;
2156 uint16_t data_len3
= mbuf3
->data_len
;
2157 uint32_t pkt_len3
= mbuf3
->pkt_len
;
2159 uint16_t n0
= data0
->n
;
2160 uint16_t n1
= data1
->n
;
2161 uint16_t n2
= data2
->n
;
2162 uint16_t n3
= data3
->n
;
2164 mbuf0
->data_off
= data_off0
+ n0
;
2165 mbuf0
->data_len
= data_len0
- n0
;
2166 mbuf0
->pkt_len
= pkt_len0
- n0
;
2168 mbuf1
->data_off
= data_off1
+ n1
;
2169 mbuf1
->data_len
= data_len1
- n1
;
2170 mbuf1
->pkt_len
= pkt_len1
- n1
;
2172 mbuf2
->data_off
= data_off2
+ n2
;
2173 mbuf2
->data_len
= data_len2
- n2
;
2174 mbuf2
->pkt_len
= pkt_len2
- n2
;
2176 mbuf3
->data_off
= data_off3
+ n3
;
2177 mbuf3
->data_len
= data_len3
- n3
;
2178 mbuf3
->pkt_len
= pkt_len3
- n3
;
2185 action_valid(enum rte_table_action_type action
)
2188 case RTE_TABLE_ACTION_FWD
:
2189 case RTE_TABLE_ACTION_LB
:
2190 case RTE_TABLE_ACTION_MTR
:
2191 case RTE_TABLE_ACTION_TM
:
2192 case RTE_TABLE_ACTION_ENCAP
:
2193 case RTE_TABLE_ACTION_NAT
:
2194 case RTE_TABLE_ACTION_TTL
:
2195 case RTE_TABLE_ACTION_STATS
:
2196 case RTE_TABLE_ACTION_TIME
:
2197 case RTE_TABLE_ACTION_SYM_CRYPTO
:
2198 case RTE_TABLE_ACTION_TAG
:
2199 case RTE_TABLE_ACTION_DECAP
:
2207 #define RTE_TABLE_ACTION_MAX 64
2210 uint64_t action_mask
;
2211 struct rte_table_action_common_config common
;
2212 struct rte_table_action_lb_config lb
;
2213 struct rte_table_action_mtr_config mtr
;
2214 struct rte_table_action_tm_config tm
;
2215 struct rte_table_action_encap_config encap
;
2216 struct rte_table_action_nat_config nat
;
2217 struct rte_table_action_ttl_config ttl
;
2218 struct rte_table_action_stats_config stats
;
2219 struct rte_table_action_sym_crypto_config sym_crypto
;
2223 action_cfg_size(enum rte_table_action_type action
)
2226 case RTE_TABLE_ACTION_LB
:
2227 return sizeof(struct rte_table_action_lb_config
);
2228 case RTE_TABLE_ACTION_MTR
:
2229 return sizeof(struct rte_table_action_mtr_config
);
2230 case RTE_TABLE_ACTION_TM
:
2231 return sizeof(struct rte_table_action_tm_config
);
2232 case RTE_TABLE_ACTION_ENCAP
:
2233 return sizeof(struct rte_table_action_encap_config
);
2234 case RTE_TABLE_ACTION_NAT
:
2235 return sizeof(struct rte_table_action_nat_config
);
2236 case RTE_TABLE_ACTION_TTL
:
2237 return sizeof(struct rte_table_action_ttl_config
);
2238 case RTE_TABLE_ACTION_STATS
:
2239 return sizeof(struct rte_table_action_stats_config
);
2240 case RTE_TABLE_ACTION_SYM_CRYPTO
:
2241 return sizeof(struct rte_table_action_sym_crypto_config
);
2248 action_cfg_get(struct ap_config
*ap_config
,
2249 enum rte_table_action_type type
)
2252 case RTE_TABLE_ACTION_LB
:
2253 return &ap_config
->lb
;
2255 case RTE_TABLE_ACTION_MTR
:
2256 return &ap_config
->mtr
;
2258 case RTE_TABLE_ACTION_TM
:
2259 return &ap_config
->tm
;
2261 case RTE_TABLE_ACTION_ENCAP
:
2262 return &ap_config
->encap
;
2264 case RTE_TABLE_ACTION_NAT
:
2265 return &ap_config
->nat
;
2267 case RTE_TABLE_ACTION_TTL
:
2268 return &ap_config
->ttl
;
2270 case RTE_TABLE_ACTION_STATS
:
2271 return &ap_config
->stats
;
2273 case RTE_TABLE_ACTION_SYM_CRYPTO
:
2274 return &ap_config
->sym_crypto
;
2281 action_cfg_set(struct ap_config
*ap_config
,
2282 enum rte_table_action_type type
,
2285 void *dst
= action_cfg_get(ap_config
, type
);
2288 memcpy(dst
, action_cfg
, action_cfg_size(type
));
2290 ap_config
->action_mask
|= 1LLU << type
;
2294 size_t offset
[RTE_TABLE_ACTION_MAX
];
2299 action_data_size(enum rte_table_action_type action
,
2300 struct ap_config
*ap_config
)
2303 case RTE_TABLE_ACTION_FWD
:
2304 return sizeof(struct fwd_data
);
2306 case RTE_TABLE_ACTION_LB
:
2307 return sizeof(struct lb_data
);
2309 case RTE_TABLE_ACTION_MTR
:
2310 return mtr_data_size(&ap_config
->mtr
);
2312 case RTE_TABLE_ACTION_TM
:
2313 return sizeof(struct tm_data
);
2315 case RTE_TABLE_ACTION_ENCAP
:
2316 return encap_data_size(&ap_config
->encap
);
2318 case RTE_TABLE_ACTION_NAT
:
2319 return nat_data_size(&ap_config
->nat
,
2320 &ap_config
->common
);
2322 case RTE_TABLE_ACTION_TTL
:
2323 return sizeof(struct ttl_data
);
2325 case RTE_TABLE_ACTION_STATS
:
2326 return sizeof(struct stats_data
);
2328 case RTE_TABLE_ACTION_TIME
:
2329 return sizeof(struct time_data
);
2331 case RTE_TABLE_ACTION_SYM_CRYPTO
:
2332 return (sizeof(struct sym_crypto_data
));
2334 case RTE_TABLE_ACTION_TAG
:
2335 return sizeof(struct tag_data
);
2337 case RTE_TABLE_ACTION_DECAP
:
2338 return sizeof(struct decap_data
);
2347 action_data_offset_set(struct ap_data
*ap_data
,
2348 struct ap_config
*ap_config
)
2350 uint64_t action_mask
= ap_config
->action_mask
;
2354 memset(ap_data
->offset
, 0, sizeof(ap_data
->offset
));
2357 for (action
= 0; action
< RTE_TABLE_ACTION_MAX
; action
++)
2358 if (action_mask
& (1LLU << action
)) {
2359 ap_data
->offset
[action
] = offset
;
2360 offset
+= action_data_size((enum rte_table_action_type
)action
,
2364 ap_data
->total_size
= offset
;
2367 struct rte_table_action_profile
{
2368 struct ap_config cfg
;
2369 struct ap_data data
;
2373 struct rte_table_action_profile
*
2374 rte_table_action_profile_create(struct rte_table_action_common_config
*common
)
2376 struct rte_table_action_profile
*ap
;
2378 /* Check input arguments */
2382 /* Memory allocation */
2383 ap
= calloc(1, sizeof(struct rte_table_action_profile
));
2387 /* Initialization */
2388 memcpy(&ap
->cfg
.common
, common
, sizeof(*common
));
2395 rte_table_action_profile_action_register(struct rte_table_action_profile
*profile
,
2396 enum rte_table_action_type type
,
2397 void *action_config
)
2401 /* Check input arguments */
2402 if ((profile
== NULL
) ||
2404 (action_valid(type
) == 0) ||
2405 (profile
->cfg
.action_mask
& (1LLU << type
)) ||
2406 ((action_cfg_size(type
) == 0) && action_config
) ||
2407 (action_cfg_size(type
) && (action_config
== NULL
)))
2411 case RTE_TABLE_ACTION_LB
:
2412 status
= lb_cfg_check(action_config
);
2415 case RTE_TABLE_ACTION_MTR
:
2416 status
= mtr_cfg_check(action_config
);
2419 case RTE_TABLE_ACTION_TM
:
2420 status
= tm_cfg_check(action_config
);
2423 case RTE_TABLE_ACTION_ENCAP
:
2424 status
= encap_cfg_check(action_config
);
2427 case RTE_TABLE_ACTION_NAT
:
2428 status
= nat_cfg_check(action_config
);
2431 case RTE_TABLE_ACTION_TTL
:
2432 status
= ttl_cfg_check(action_config
);
2435 case RTE_TABLE_ACTION_STATS
:
2436 status
= stats_cfg_check(action_config
);
2439 case RTE_TABLE_ACTION_SYM_CRYPTO
:
2440 status
= sym_crypto_cfg_check(action_config
);
2452 action_cfg_set(&profile
->cfg
, type
, action_config
);
2458 rte_table_action_profile_freeze(struct rte_table_action_profile
*profile
)
2460 if (profile
->frozen
)
2463 profile
->cfg
.action_mask
|= 1LLU << RTE_TABLE_ACTION_FWD
;
2464 action_data_offset_set(&profile
->data
, &profile
->cfg
);
2465 profile
->frozen
= 1;
2471 rte_table_action_profile_free(struct rte_table_action_profile
*profile
)
2473 if (profile
== NULL
)
2483 #define METER_PROFILES_MAX 32
2485 struct rte_table_action
{
2486 struct ap_config cfg
;
2487 struct ap_data data
;
2488 struct dscp_table_data dscp_table
;
2489 struct meter_profile_data mp
[METER_PROFILES_MAX
];
2492 struct rte_table_action
*
2493 rte_table_action_create(struct rte_table_action_profile
*profile
,
2496 struct rte_table_action
*action
;
2498 /* Check input arguments */
2499 if ((profile
== NULL
) ||
2500 (profile
->frozen
== 0))
2503 /* Memory allocation */
2504 action
= rte_zmalloc_socket(NULL
,
2505 sizeof(struct rte_table_action
),
2506 RTE_CACHE_LINE_SIZE
,
2511 /* Initialization */
2512 memcpy(&action
->cfg
, &profile
->cfg
, sizeof(profile
->cfg
));
2513 memcpy(&action
->data
, &profile
->data
, sizeof(profile
->data
));
2518 static __rte_always_inline
void *
2519 action_data_get(void *data
,
2520 struct rte_table_action
*action
,
2521 enum rte_table_action_type type
)
2523 size_t offset
= action
->data
.offset
[type
];
2524 uint8_t *data_bytes
= data
;
2526 return &data_bytes
[offset
];
2530 rte_table_action_apply(struct rte_table_action
*action
,
2532 enum rte_table_action_type type
,
2533 void *action_params
)
2537 /* Check input arguments */
2538 if ((action
== NULL
) ||
2540 (action_valid(type
) == 0) ||
2541 ((action
->cfg
.action_mask
& (1LLU << type
)) == 0) ||
2542 (action_params
== NULL
))
2546 action_data
= action_data_get(data
, action
, type
);
2549 case RTE_TABLE_ACTION_FWD
:
2550 return fwd_apply(action_data
,
2553 case RTE_TABLE_ACTION_LB
:
2554 return lb_apply(action_data
,
2557 case RTE_TABLE_ACTION_MTR
:
2558 return mtr_apply(action_data
,
2562 RTE_DIM(action
->mp
));
2564 case RTE_TABLE_ACTION_TM
:
2565 return tm_apply(action_data
,
2569 case RTE_TABLE_ACTION_ENCAP
:
2570 return encap_apply(action_data
,
2573 &action
->cfg
.common
);
2575 case RTE_TABLE_ACTION_NAT
:
2576 return nat_apply(action_data
,
2578 &action
->cfg
.common
);
2580 case RTE_TABLE_ACTION_TTL
:
2581 return ttl_apply(action_data
,
2584 case RTE_TABLE_ACTION_STATS
:
2585 return stats_apply(action_data
,
2588 case RTE_TABLE_ACTION_TIME
:
2589 return time_apply(action_data
,
2592 case RTE_TABLE_ACTION_SYM_CRYPTO
:
2593 return sym_crypto_apply(action_data
,
2594 &action
->cfg
.sym_crypto
,
2597 case RTE_TABLE_ACTION_TAG
:
2598 return tag_apply(action_data
,
2601 case RTE_TABLE_ACTION_DECAP
:
2602 return decap_apply(action_data
,
2611 rte_table_action_dscp_table_update(struct rte_table_action
*action
,
2613 struct rte_table_action_dscp_table
*table
)
2617 /* Check input arguments */
2618 if ((action
== NULL
) ||
2619 ((action
->cfg
.action_mask
& ((1LLU << RTE_TABLE_ACTION_MTR
) |
2620 (1LLU << RTE_TABLE_ACTION_TM
))) == 0) ||
2625 for (i
= 0; i
< RTE_DIM(table
->entry
); i
++) {
2626 struct dscp_table_entry_data
*data
=
2627 &action
->dscp_table
.entry
[i
];
2628 struct rte_table_action_dscp_table_entry
*entry
=
2631 if ((dscp_mask
& (1LLU << i
)) == 0)
2634 data
->color
= entry
->color
;
2635 data
->tc
= entry
->tc_id
;
2636 data
->tc_queue
= entry
->tc_queue_id
;
2643 rte_table_action_meter_profile_add(struct rte_table_action
*action
,
2644 uint32_t meter_profile_id
,
2645 struct rte_table_action_meter_profile
*profile
)
2647 struct meter_profile_data
*mp_data
;
2650 /* Check input arguments */
2651 if ((action
== NULL
) ||
2652 ((action
->cfg
.action_mask
& (1LLU << RTE_TABLE_ACTION_MTR
)) == 0) ||
2656 if (profile
->alg
!= RTE_TABLE_ACTION_METER_TRTCM
)
2659 mp_data
= meter_profile_data_find(action
->mp
,
2660 RTE_DIM(action
->mp
),
2665 mp_data
= meter_profile_data_find_unused(action
->mp
,
2666 RTE_DIM(action
->mp
));
2670 /* Install new profile */
2671 status
= rte_meter_trtcm_profile_config(&mp_data
->profile
,
2676 mp_data
->profile_id
= meter_profile_id
;
2683 rte_table_action_meter_profile_delete(struct rte_table_action
*action
,
2684 uint32_t meter_profile_id
)
2686 struct meter_profile_data
*mp_data
;
2688 /* Check input arguments */
2689 if ((action
== NULL
) ||
2690 ((action
->cfg
.action_mask
& (1LLU << RTE_TABLE_ACTION_MTR
)) == 0))
2693 mp_data
= meter_profile_data_find(action
->mp
,
2694 RTE_DIM(action
->mp
),
2699 /* Uninstall profile */
2706 rte_table_action_meter_read(struct rte_table_action
*action
,
2709 struct rte_table_action_mtr_counters
*stats
,
2712 struct mtr_trtcm_data
*mtr_data
;
2715 /* Check input arguments */
2716 if ((action
== NULL
) ||
2717 ((action
->cfg
.action_mask
& (1LLU << RTE_TABLE_ACTION_MTR
)) == 0) ||
2719 (tc_mask
> RTE_LEN2MASK(action
->cfg
.mtr
.n_tc
, uint32_t)))
2722 mtr_data
= action_data_get(data
, action
, RTE_TABLE_ACTION_MTR
);
2726 for (i
= 0; i
< RTE_TABLE_ACTION_TC_MAX
; i
++) {
2727 struct rte_table_action_mtr_counters_tc
*dst
=
2729 struct mtr_trtcm_data
*src
= &mtr_data
[i
];
2731 if ((tc_mask
& (1 << i
)) == 0)
2734 dst
->n_packets
[RTE_COLOR_GREEN
] =
2735 mtr_trtcm_data_stats_get(src
, RTE_COLOR_GREEN
);
2737 dst
->n_packets
[RTE_COLOR_YELLOW
] =
2738 mtr_trtcm_data_stats_get(src
, RTE_COLOR_YELLOW
);
2740 dst
->n_packets
[RTE_COLOR_RED
] =
2741 mtr_trtcm_data_stats_get(src
, RTE_COLOR_RED
);
2743 dst
->n_packets_valid
= 1;
2744 dst
->n_bytes_valid
= 0;
2747 stats
->tc_mask
= tc_mask
;
2752 for (i
= 0; i
< RTE_TABLE_ACTION_TC_MAX
; i
++) {
2753 struct mtr_trtcm_data
*src
= &mtr_data
[i
];
2755 if ((tc_mask
& (1 << i
)) == 0)
2758 mtr_trtcm_data_stats_reset(src
, RTE_COLOR_GREEN
);
2759 mtr_trtcm_data_stats_reset(src
, RTE_COLOR_YELLOW
);
2760 mtr_trtcm_data_stats_reset(src
, RTE_COLOR_RED
);
2768 rte_table_action_ttl_read(struct rte_table_action
*action
,
2770 struct rte_table_action_ttl_counters
*stats
,
2773 struct ttl_data
*ttl_data
;
2775 /* Check input arguments */
2776 if ((action
== NULL
) ||
2777 ((action
->cfg
.action_mask
&
2778 (1LLU << RTE_TABLE_ACTION_TTL
)) == 0) ||
2782 ttl_data
= action_data_get(data
, action
, RTE_TABLE_ACTION_TTL
);
2786 stats
->n_packets
= TTL_STATS_READ(ttl_data
);
2790 TTL_STATS_RESET(ttl_data
);
2796 rte_table_action_stats_read(struct rte_table_action
*action
,
2798 struct rte_table_action_stats_counters
*stats
,
2801 struct stats_data
*stats_data
;
2803 /* Check input arguments */
2804 if ((action
== NULL
) ||
2805 ((action
->cfg
.action_mask
&
2806 (1LLU << RTE_TABLE_ACTION_STATS
)) == 0) ||
2810 stats_data
= action_data_get(data
, action
,
2811 RTE_TABLE_ACTION_STATS
);
2815 stats
->n_packets
= stats_data
->n_packets
;
2816 stats
->n_bytes
= stats_data
->n_bytes
;
2817 stats
->n_packets_valid
= 1;
2818 stats
->n_bytes_valid
= 1;
2823 stats_data
->n_packets
= 0;
2824 stats_data
->n_bytes
= 0;
2831 rte_table_action_time_read(struct rte_table_action
*action
,
2833 uint64_t *timestamp
)
2835 struct time_data
*time_data
;
2837 /* Check input arguments */
2838 if ((action
== NULL
) ||
2839 ((action
->cfg
.action_mask
&
2840 (1LLU << RTE_TABLE_ACTION_TIME
)) == 0) ||
2842 (timestamp
== NULL
))
2845 time_data
= action_data_get(data
, action
, RTE_TABLE_ACTION_TIME
);
2848 *timestamp
= time_data
->time
;
2853 struct rte_cryptodev_sym_session
*
2854 rte_table_action_crypto_sym_session_get(struct rte_table_action
*action
,
2857 struct sym_crypto_data
*sym_crypto_data
;
2859 /* Check input arguments */
2860 if ((action
== NULL
) ||
2861 ((action
->cfg
.action_mask
&
2862 (1LLU << RTE_TABLE_ACTION_SYM_CRYPTO
)) == 0) ||
2866 sym_crypto_data
= action_data_get(data
, action
,
2867 RTE_TABLE_ACTION_SYM_CRYPTO
);
2869 return sym_crypto_data
->session
;
2872 static __rte_always_inline
uint64_t
2873 pkt_work(struct rte_mbuf
*mbuf
,
2874 struct rte_pipeline_table_entry
*table_entry
,
2876 struct rte_table_action
*action
,
2877 struct ap_config
*cfg
)
2879 uint64_t drop_mask
= 0;
2881 uint32_t ip_offset
= action
->cfg
.common
.ip_offset
;
2882 void *ip
= RTE_MBUF_METADATA_UINT32_PTR(mbuf
, ip_offset
);
2885 uint16_t total_length
;
2887 if (cfg
->common
.ip_version
) {
2888 struct ipv4_hdr
*hdr
= ip
;
2890 dscp
= hdr
->type_of_service
>> 2;
2891 total_length
= rte_ntohs(hdr
->total_length
);
2893 struct ipv6_hdr
*hdr
= ip
;
2895 dscp
= (rte_ntohl(hdr
->vtc_flow
) & 0x0F600000) >> 18;
2897 rte_ntohs(hdr
->payload_len
) + sizeof(struct ipv6_hdr
);
2900 if (cfg
->action_mask
& (1LLU << RTE_TABLE_ACTION_LB
)) {
2902 action_data_get(table_entry
, action
, RTE_TABLE_ACTION_LB
);
2908 if (cfg
->action_mask
& (1LLU << RTE_TABLE_ACTION_MTR
)) {
2910 action_data_get(table_entry
, action
, RTE_TABLE_ACTION_MTR
);
2912 drop_mask
|= pkt_work_mtr(mbuf
,
2914 &action
->dscp_table
,
2921 if (cfg
->action_mask
& (1LLU << RTE_TABLE_ACTION_TM
)) {
2923 action_data_get(table_entry
, action
, RTE_TABLE_ACTION_TM
);
2927 &action
->dscp_table
,
2931 if (cfg
->action_mask
& (1LLU << RTE_TABLE_ACTION_DECAP
)) {
2932 void *data
= action_data_get(table_entry
,
2934 RTE_TABLE_ACTION_DECAP
);
2936 pkt_work_decap(mbuf
, data
);
2939 if (cfg
->action_mask
& (1LLU << RTE_TABLE_ACTION_ENCAP
)) {
2941 action_data_get(table_entry
, action
, RTE_TABLE_ACTION_ENCAP
);
2943 pkt_work_encap(mbuf
,
2951 if (cfg
->action_mask
& (1LLU << RTE_TABLE_ACTION_NAT
)) {
2953 action_data_get(table_entry
, action
, RTE_TABLE_ACTION_NAT
);
2955 if (cfg
->common
.ip_version
)
2956 pkt_ipv4_work_nat(ip
, data
, &cfg
->nat
);
2958 pkt_ipv6_work_nat(ip
, data
, &cfg
->nat
);
2961 if (cfg
->action_mask
& (1LLU << RTE_TABLE_ACTION_TTL
)) {
2963 action_data_get(table_entry
, action
, RTE_TABLE_ACTION_TTL
);
2965 if (cfg
->common
.ip_version
)
2966 drop_mask
|= pkt_ipv4_work_ttl(ip
, data
);
2968 drop_mask
|= pkt_ipv6_work_ttl(ip
, data
);
2971 if (cfg
->action_mask
& (1LLU << RTE_TABLE_ACTION_STATS
)) {
2973 action_data_get(table_entry
, action
, RTE_TABLE_ACTION_STATS
);
2975 pkt_work_stats(data
, total_length
);
2978 if (cfg
->action_mask
& (1LLU << RTE_TABLE_ACTION_TIME
)) {
2980 action_data_get(table_entry
, action
, RTE_TABLE_ACTION_TIME
);
2982 pkt_work_time(data
, time
);
2985 if (cfg
->action_mask
& (1LLU << RTE_TABLE_ACTION_SYM_CRYPTO
)) {
2986 void *data
= action_data_get(table_entry
, action
,
2987 RTE_TABLE_ACTION_SYM_CRYPTO
);
2989 drop_mask
|= pkt_work_sym_crypto(mbuf
, data
, &cfg
->sym_crypto
,
2993 if (cfg
->action_mask
& (1LLU << RTE_TABLE_ACTION_TAG
)) {
2994 void *data
= action_data_get(table_entry
,
2996 RTE_TABLE_ACTION_TAG
);
2998 pkt_work_tag(mbuf
, data
);
3004 static __rte_always_inline
uint64_t
3005 pkt4_work(struct rte_mbuf
**mbufs
,
3006 struct rte_pipeline_table_entry
**table_entries
,
3008 struct rte_table_action
*action
,
3009 struct ap_config
*cfg
)
3011 uint64_t drop_mask0
= 0;
3012 uint64_t drop_mask1
= 0;
3013 uint64_t drop_mask2
= 0;
3014 uint64_t drop_mask3
= 0;
3016 struct rte_mbuf
*mbuf0
= mbufs
[0];
3017 struct rte_mbuf
*mbuf1
= mbufs
[1];
3018 struct rte_mbuf
*mbuf2
= mbufs
[2];
3019 struct rte_mbuf
*mbuf3
= mbufs
[3];
3021 struct rte_pipeline_table_entry
*table_entry0
= table_entries
[0];
3022 struct rte_pipeline_table_entry
*table_entry1
= table_entries
[1];
3023 struct rte_pipeline_table_entry
*table_entry2
= table_entries
[2];
3024 struct rte_pipeline_table_entry
*table_entry3
= table_entries
[3];
3026 uint32_t ip_offset
= action
->cfg
.common
.ip_offset
;
3027 void *ip0
= RTE_MBUF_METADATA_UINT32_PTR(mbuf0
, ip_offset
);
3028 void *ip1
= RTE_MBUF_METADATA_UINT32_PTR(mbuf1
, ip_offset
);
3029 void *ip2
= RTE_MBUF_METADATA_UINT32_PTR(mbuf2
, ip_offset
);
3030 void *ip3
= RTE_MBUF_METADATA_UINT32_PTR(mbuf3
, ip_offset
);
3032 uint32_t dscp0
, dscp1
, dscp2
, dscp3
;
3033 uint16_t total_length0
, total_length1
, total_length2
, total_length3
;
3035 if (cfg
->common
.ip_version
) {
3036 struct ipv4_hdr
*hdr0
= ip0
;
3037 struct ipv4_hdr
*hdr1
= ip1
;
3038 struct ipv4_hdr
*hdr2
= ip2
;
3039 struct ipv4_hdr
*hdr3
= ip3
;
3041 dscp0
= hdr0
->type_of_service
>> 2;
3042 dscp1
= hdr1
->type_of_service
>> 2;
3043 dscp2
= hdr2
->type_of_service
>> 2;
3044 dscp3
= hdr3
->type_of_service
>> 2;
3046 total_length0
= rte_ntohs(hdr0
->total_length
);
3047 total_length1
= rte_ntohs(hdr1
->total_length
);
3048 total_length2
= rte_ntohs(hdr2
->total_length
);
3049 total_length3
= rte_ntohs(hdr3
->total_length
);
3051 struct ipv6_hdr
*hdr0
= ip0
;
3052 struct ipv6_hdr
*hdr1
= ip1
;
3053 struct ipv6_hdr
*hdr2
= ip2
;
3054 struct ipv6_hdr
*hdr3
= ip3
;
3056 dscp0
= (rte_ntohl(hdr0
->vtc_flow
) & 0x0F600000) >> 18;
3057 dscp1
= (rte_ntohl(hdr1
->vtc_flow
) & 0x0F600000) >> 18;
3058 dscp2
= (rte_ntohl(hdr2
->vtc_flow
) & 0x0F600000) >> 18;
3059 dscp3
= (rte_ntohl(hdr3
->vtc_flow
) & 0x0F600000) >> 18;
3062 rte_ntohs(hdr0
->payload_len
) + sizeof(struct ipv6_hdr
);
3064 rte_ntohs(hdr1
->payload_len
) + sizeof(struct ipv6_hdr
);
3066 rte_ntohs(hdr2
->payload_len
) + sizeof(struct ipv6_hdr
);
3068 rte_ntohs(hdr3
->payload_len
) + sizeof(struct ipv6_hdr
);
3071 if (cfg
->action_mask
& (1LLU << RTE_TABLE_ACTION_LB
)) {
3073 action_data_get(table_entry0
, action
, RTE_TABLE_ACTION_LB
);
3075 action_data_get(table_entry1
, action
, RTE_TABLE_ACTION_LB
);
3077 action_data_get(table_entry2
, action
, RTE_TABLE_ACTION_LB
);
3079 action_data_get(table_entry3
, action
, RTE_TABLE_ACTION_LB
);
3098 if (cfg
->action_mask
& (1LLU << RTE_TABLE_ACTION_MTR
)) {
3100 action_data_get(table_entry0
, action
, RTE_TABLE_ACTION_MTR
);
3102 action_data_get(table_entry1
, action
, RTE_TABLE_ACTION_MTR
);
3104 action_data_get(table_entry2
, action
, RTE_TABLE_ACTION_MTR
);
3106 action_data_get(table_entry3
, action
, RTE_TABLE_ACTION_MTR
);
3108 drop_mask0
|= pkt_work_mtr(mbuf0
,
3110 &action
->dscp_table
,
3116 drop_mask1
|= pkt_work_mtr(mbuf1
,
3118 &action
->dscp_table
,
3124 drop_mask2
|= pkt_work_mtr(mbuf2
,
3126 &action
->dscp_table
,
3132 drop_mask3
|= pkt_work_mtr(mbuf3
,
3134 &action
->dscp_table
,
3141 if (cfg
->action_mask
& (1LLU << RTE_TABLE_ACTION_TM
)) {
3143 action_data_get(table_entry0
, action
, RTE_TABLE_ACTION_TM
);
3145 action_data_get(table_entry1
, action
, RTE_TABLE_ACTION_TM
);
3147 action_data_get(table_entry2
, action
, RTE_TABLE_ACTION_TM
);
3149 action_data_get(table_entry3
, action
, RTE_TABLE_ACTION_TM
);
3153 &action
->dscp_table
,
3158 &action
->dscp_table
,
3163 &action
->dscp_table
,
3168 &action
->dscp_table
,
3172 if (cfg
->action_mask
& (1LLU << RTE_TABLE_ACTION_DECAP
)) {
3173 void *data0
= action_data_get(table_entry0
,
3175 RTE_TABLE_ACTION_DECAP
);
3176 void *data1
= action_data_get(table_entry1
,
3178 RTE_TABLE_ACTION_DECAP
);
3179 void *data2
= action_data_get(table_entry2
,
3181 RTE_TABLE_ACTION_DECAP
);
3182 void *data3
= action_data_get(table_entry3
,
3184 RTE_TABLE_ACTION_DECAP
);
3186 pkt4_work_decap(mbuf0
, mbuf1
, mbuf2
, mbuf3
,
3187 data0
, data1
, data2
, data3
);
3190 if (cfg
->action_mask
& (1LLU << RTE_TABLE_ACTION_ENCAP
)) {
3192 action_data_get(table_entry0
, action
, RTE_TABLE_ACTION_ENCAP
);
3194 action_data_get(table_entry1
, action
, RTE_TABLE_ACTION_ENCAP
);
3196 action_data_get(table_entry2
, action
, RTE_TABLE_ACTION_ENCAP
);
3198 action_data_get(table_entry3
, action
, RTE_TABLE_ACTION_ENCAP
);
3200 pkt_work_encap(mbuf0
,
3207 pkt_work_encap(mbuf1
,
3214 pkt_work_encap(mbuf2
,
3221 pkt_work_encap(mbuf3
,
3229 if (cfg
->action_mask
& (1LLU << RTE_TABLE_ACTION_NAT
)) {
3231 action_data_get(table_entry0
, action
, RTE_TABLE_ACTION_NAT
);
3233 action_data_get(table_entry1
, action
, RTE_TABLE_ACTION_NAT
);
3235 action_data_get(table_entry2
, action
, RTE_TABLE_ACTION_NAT
);
3237 action_data_get(table_entry3
, action
, RTE_TABLE_ACTION_NAT
);
3239 if (cfg
->common
.ip_version
) {
3240 pkt_ipv4_work_nat(ip0
, data0
, &cfg
->nat
);
3241 pkt_ipv4_work_nat(ip1
, data1
, &cfg
->nat
);
3242 pkt_ipv4_work_nat(ip2
, data2
, &cfg
->nat
);
3243 pkt_ipv4_work_nat(ip3
, data3
, &cfg
->nat
);
3245 pkt_ipv6_work_nat(ip0
, data0
, &cfg
->nat
);
3246 pkt_ipv6_work_nat(ip1
, data1
, &cfg
->nat
);
3247 pkt_ipv6_work_nat(ip2
, data2
, &cfg
->nat
);
3248 pkt_ipv6_work_nat(ip3
, data3
, &cfg
->nat
);
3252 if (cfg
->action_mask
& (1LLU << RTE_TABLE_ACTION_TTL
)) {
3254 action_data_get(table_entry0
, action
, RTE_TABLE_ACTION_TTL
);
3256 action_data_get(table_entry1
, action
, RTE_TABLE_ACTION_TTL
);
3258 action_data_get(table_entry2
, action
, RTE_TABLE_ACTION_TTL
);
3260 action_data_get(table_entry3
, action
, RTE_TABLE_ACTION_TTL
);
3262 if (cfg
->common
.ip_version
) {
3263 drop_mask0
|= pkt_ipv4_work_ttl(ip0
, data0
);
3264 drop_mask1
|= pkt_ipv4_work_ttl(ip1
, data1
);
3265 drop_mask2
|= pkt_ipv4_work_ttl(ip2
, data2
);
3266 drop_mask3
|= pkt_ipv4_work_ttl(ip3
, data3
);
3268 drop_mask0
|= pkt_ipv6_work_ttl(ip0
, data0
);
3269 drop_mask1
|= pkt_ipv6_work_ttl(ip1
, data1
);
3270 drop_mask2
|= pkt_ipv6_work_ttl(ip2
, data2
);
3271 drop_mask3
|= pkt_ipv6_work_ttl(ip3
, data3
);
3275 if (cfg
->action_mask
& (1LLU << RTE_TABLE_ACTION_STATS
)) {
3277 action_data_get(table_entry0
, action
, RTE_TABLE_ACTION_STATS
);
3279 action_data_get(table_entry1
, action
, RTE_TABLE_ACTION_STATS
);
3281 action_data_get(table_entry2
, action
, RTE_TABLE_ACTION_STATS
);
3283 action_data_get(table_entry3
, action
, RTE_TABLE_ACTION_STATS
);
3285 pkt_work_stats(data0
, total_length0
);
3286 pkt_work_stats(data1
, total_length1
);
3287 pkt_work_stats(data2
, total_length2
);
3288 pkt_work_stats(data3
, total_length3
);
3291 if (cfg
->action_mask
& (1LLU << RTE_TABLE_ACTION_TIME
)) {
3293 action_data_get(table_entry0
, action
, RTE_TABLE_ACTION_TIME
);
3295 action_data_get(table_entry1
, action
, RTE_TABLE_ACTION_TIME
);
3297 action_data_get(table_entry2
, action
, RTE_TABLE_ACTION_TIME
);
3299 action_data_get(table_entry3
, action
, RTE_TABLE_ACTION_TIME
);
3301 pkt_work_time(data0
, time
);
3302 pkt_work_time(data1
, time
);
3303 pkt_work_time(data2
, time
);
3304 pkt_work_time(data3
, time
);
3307 if (cfg
->action_mask
& (1LLU << RTE_TABLE_ACTION_SYM_CRYPTO
)) {
3308 void *data0
= action_data_get(table_entry0
, action
,
3309 RTE_TABLE_ACTION_SYM_CRYPTO
);
3310 void *data1
= action_data_get(table_entry1
, action
,
3311 RTE_TABLE_ACTION_SYM_CRYPTO
);
3312 void *data2
= action_data_get(table_entry2
, action
,
3313 RTE_TABLE_ACTION_SYM_CRYPTO
);
3314 void *data3
= action_data_get(table_entry3
, action
,
3315 RTE_TABLE_ACTION_SYM_CRYPTO
);
3317 drop_mask0
|= pkt_work_sym_crypto(mbuf0
, data0
, &cfg
->sym_crypto
,
3319 drop_mask1
|= pkt_work_sym_crypto(mbuf1
, data1
, &cfg
->sym_crypto
,
3321 drop_mask2
|= pkt_work_sym_crypto(mbuf2
, data2
, &cfg
->sym_crypto
,
3323 drop_mask3
|= pkt_work_sym_crypto(mbuf3
, data3
, &cfg
->sym_crypto
,
3327 if (cfg
->action_mask
& (1LLU << RTE_TABLE_ACTION_TAG
)) {
3328 void *data0
= action_data_get(table_entry0
,
3330 RTE_TABLE_ACTION_TAG
);
3331 void *data1
= action_data_get(table_entry1
,
3333 RTE_TABLE_ACTION_TAG
);
3334 void *data2
= action_data_get(table_entry2
,
3336 RTE_TABLE_ACTION_TAG
);
3337 void *data3
= action_data_get(table_entry3
,
3339 RTE_TABLE_ACTION_TAG
);
3341 pkt4_work_tag(mbuf0
, mbuf1
, mbuf2
, mbuf3
,
3342 data0
, data1
, data2
, data3
);
3351 static __rte_always_inline
int
3352 ah(struct rte_pipeline
*p
,
3353 struct rte_mbuf
**pkts
,
3355 struct rte_pipeline_table_entry
**entries
,
3356 struct rte_table_action
*action
,
3357 struct ap_config
*cfg
)
3359 uint64_t pkts_drop_mask
= 0;
3362 if (cfg
->action_mask
& ((1LLU << RTE_TABLE_ACTION_MTR
) |
3363 (1LLU << RTE_TABLE_ACTION_TIME
)))
3366 if ((pkts_mask
& (pkts_mask
+ 1)) == 0) {
3367 uint64_t n_pkts
= __builtin_popcountll(pkts_mask
);
3370 for (i
= 0; i
< (n_pkts
& (~0x3LLU
)); i
+= 4) {
3373 drop_mask
= pkt4_work(&pkts
[i
],
3379 pkts_drop_mask
|= drop_mask
<< i
;
3382 for ( ; i
< n_pkts
; i
++) {
3385 drop_mask
= pkt_work(pkts
[i
],
3391 pkts_drop_mask
|= drop_mask
<< i
;
3394 for ( ; pkts_mask
; ) {
3395 uint32_t pos
= __builtin_ctzll(pkts_mask
);
3396 uint64_t pkt_mask
= 1LLU << pos
;
3399 drop_mask
= pkt_work(pkts
[pos
],
3405 pkts_mask
&= ~pkt_mask
;
3406 pkts_drop_mask
|= drop_mask
<< pos
;
3409 rte_pipeline_ah_packet_drop(p
, pkts_drop_mask
);
3415 ah_default(struct rte_pipeline
*p
,
3416 struct rte_mbuf
**pkts
,
3418 struct rte_pipeline_table_entry
**entries
,
3421 struct rte_table_action
*action
= arg
;
3431 static rte_pipeline_table_action_handler_hit
3432 ah_selector(struct rte_table_action
*action
)
3434 if (action
->cfg
.action_mask
== (1LLU << RTE_TABLE_ACTION_FWD
))
3441 rte_table_action_table_params_get(struct rte_table_action
*action
,
3442 struct rte_pipeline_table_params
*params
)
3444 rte_pipeline_table_action_handler_hit f_action_hit
;
3445 uint32_t total_size
;
3447 /* Check input arguments */
3448 if ((action
== NULL
) ||
3452 f_action_hit
= ah_selector(action
);
3453 total_size
= rte_align32pow2(action
->data
.total_size
);
3455 /* Fill in params */
3456 params
->f_action_hit
= f_action_hit
;
3457 params
->f_action_miss
= NULL
;
3458 params
->arg_ah
= (f_action_hit
) ? action
: NULL
;
3459 params
->action_data_size
= total_size
-
3460 sizeof(struct rte_pipeline_table_entry
);
3466 rte_table_action_free(struct rte_table_action
*action
)