1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2020 Intel Corporation
5 #include <netinet/in.h>
6 #include <netinet/ip.h>
8 #include <rte_branch_prediction.h>
10 #include <rte_crypto.h>
11 #include <rte_security.h>
12 #include <rte_cryptodev.h>
13 #include <rte_ipsec.h>
14 #include <rte_ethdev.h>
22 set_ipsec_conf(struct ipsec_sa
*sa
, struct rte_security_ipsec_xform
*ipsec
)
24 if (ipsec
->mode
== RTE_SECURITY_IPSEC_SA_MODE_TUNNEL
) {
25 struct rte_security_ipsec_tunnel_param
*tunnel
=
27 if (IS_IP4_TUNNEL(sa
->flags
)) {
29 RTE_SECURITY_IPSEC_TUNNEL_IPV4
;
30 tunnel
->ipv4
.ttl
= IPDEFTTL
;
32 memcpy((uint8_t *)&tunnel
->ipv4
.src_ip
,
33 (uint8_t *)&sa
->src
.ip
.ip4
, 4);
35 memcpy((uint8_t *)&tunnel
->ipv4
.dst_ip
,
36 (uint8_t *)&sa
->dst
.ip
.ip4
, 4);
37 } else if (IS_IP6_TUNNEL(sa
->flags
)) {
39 RTE_SECURITY_IPSEC_TUNNEL_IPV6
;
40 tunnel
->ipv6
.hlimit
= IPDEFTTL
;
41 tunnel
->ipv6
.dscp
= 0;
42 tunnel
->ipv6
.flabel
= 0;
44 memcpy((uint8_t *)&tunnel
->ipv6
.src_addr
,
45 (uint8_t *)&sa
->src
.ip
.ip6
.ip6_b
, 16);
47 memcpy((uint8_t *)&tunnel
->ipv6
.dst_addr
,
48 (uint8_t *)&sa
->dst
.ip
.ip6
.ip6_b
, 16);
50 /* TODO support for Transport */
52 ipsec
->esn_soft_limit
= IPSEC_OFFLOAD_ESN_SOFTLIMIT
;
53 ipsec
->replay_win_sz
= app_sa_prm
.window_size
;
54 ipsec
->options
.esn
= app_sa_prm
.enable_esn
;
58 create_lookaside_session(struct ipsec_ctx
*ipsec_ctx
, struct ipsec_sa
*sa
,
59 struct rte_ipsec_session
*ips
)
61 struct rte_cryptodev_info cdev_info
;
62 unsigned long cdev_id_qp
= 0;
64 struct cdev_key key
= { 0 };
66 key
.lcore_id
= (uint8_t)rte_lcore_id();
68 key
.cipher_algo
= (uint8_t)sa
->cipher_algo
;
69 key
.auth_algo
= (uint8_t)sa
->auth_algo
;
70 key
.aead_algo
= (uint8_t)sa
->aead_algo
;
72 ret
= rte_hash_lookup_data(ipsec_ctx
->cdev_map
, &key
,
73 (void **)&cdev_id_qp
);
76 "No cryptodev: core %u, cipher_algo %u, "
77 "auth_algo %u, aead_algo %u\n",
85 RTE_LOG_DP(DEBUG
, IPSEC
, "Create session for SA spi %u on cryptodev "
86 "%u qp %u\n", sa
->spi
,
87 ipsec_ctx
->tbl
[cdev_id_qp
].id
,
88 ipsec_ctx
->tbl
[cdev_id_qp
].qp
);
90 if (ips
->type
!= RTE_SECURITY_ACTION_TYPE_NONE
&&
91 ips
->type
!= RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO
) {
92 struct rte_security_session_conf sess_conf
= {
93 .action_type
= ips
->type
,
94 .protocol
= RTE_SECURITY_PROTOCOL_IPSEC
,
100 .direction
= sa
->direction
,
101 .proto
= RTE_SECURITY_IPSEC_SA_PROTO_ESP
,
102 .mode
= (IS_TUNNEL(sa
->flags
)) ?
103 RTE_SECURITY_IPSEC_SA_MODE_TUNNEL
:
104 RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT
,
106 .crypto_xform
= sa
->xforms
,
111 if (ips
->type
== RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL
) {
112 struct rte_security_ctx
*ctx
= (struct rte_security_ctx
*)
113 rte_cryptodev_get_sec_ctx(
114 ipsec_ctx
->tbl
[cdev_id_qp
].id
);
116 /* Set IPsec parameters in conf */
117 set_ipsec_conf(sa
, &(sess_conf
.ipsec
));
119 ips
->security
.ses
= rte_security_session_create(ctx
,
120 &sess_conf
, ipsec_ctx
->session_priv_pool
);
121 if (ips
->security
.ses
== NULL
) {
123 "SEC Session init failed: err: %d\n", ret
);
127 RTE_LOG(ERR
, IPSEC
, "Inline not supported\n");
131 if (ips
->type
== RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO
) {
132 struct rte_cryptodev_info info
;
135 cdev_id
= ipsec_ctx
->tbl
[cdev_id_qp
].id
;
136 rte_cryptodev_info_get(cdev_id
, &info
);
137 if (!(info
.feature_flags
&
138 RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO
))
141 ips
->crypto
.dev_id
= cdev_id
;
143 ips
->crypto
.ses
= rte_cryptodev_sym_session_create(
144 ipsec_ctx
->session_pool
);
145 rte_cryptodev_sym_session_init(ipsec_ctx
->tbl
[cdev_id_qp
].id
,
146 ips
->crypto
.ses
, sa
->xforms
,
147 ipsec_ctx
->session_priv_pool
);
149 rte_cryptodev_info_get(ipsec_ctx
->tbl
[cdev_id_qp
].id
,
153 sa
->cdev_id_qp
= cdev_id_qp
;
159 create_inline_session(struct socket_ctx
*skt_ctx
, struct ipsec_sa
*sa
,
160 struct rte_ipsec_session
*ips
)
163 struct rte_security_ctx
*sec_ctx
;
164 struct rte_security_session_conf sess_conf
= {
165 .action_type
= ips
->type
,
166 .protocol
= RTE_SECURITY_PROTOCOL_IPSEC
,
172 .direction
= sa
->direction
,
173 .proto
= RTE_SECURITY_IPSEC_SA_PROTO_ESP
,
174 .mode
= (sa
->flags
== IP4_TUNNEL
||
175 sa
->flags
== IP6_TUNNEL
) ?
176 RTE_SECURITY_IPSEC_SA_MODE_TUNNEL
:
177 RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT
,
179 .crypto_xform
= sa
->xforms
,
183 RTE_LOG_DP(DEBUG
, IPSEC
, "Create session for SA spi %u on port %u\n",
184 sa
->spi
, sa
->portid
);
186 if (ips
->type
== RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO
) {
187 struct rte_flow_error err
;
188 const struct rte_security_capability
*sec_cap
;
191 sec_ctx
= (struct rte_security_ctx
*)
192 rte_eth_dev_get_sec_ctx(
194 if (sec_ctx
== NULL
) {
196 " rte_eth_dev_get_sec_ctx failed\n");
200 ips
->security
.ses
= rte_security_session_create(sec_ctx
,
201 &sess_conf
, skt_ctx
->session_pool
);
202 if (ips
->security
.ses
== NULL
) {
204 "SEC Session init failed: err: %d\n", ret
);
208 sec_cap
= rte_security_capabilities_get(sec_ctx
);
210 /* iterate until ESP tunnel*/
211 while (sec_cap
->action
!= RTE_SECURITY_ACTION_TYPE_NONE
) {
212 if (sec_cap
->action
== ips
->type
&&
214 RTE_SECURITY_PROTOCOL_IPSEC
&&
215 sec_cap
->ipsec
.mode
==
216 RTE_SECURITY_IPSEC_SA_MODE_TUNNEL
&&
217 sec_cap
->ipsec
.direction
== sa
->direction
)
222 if (sec_cap
->action
== RTE_SECURITY_ACTION_TYPE_NONE
) {
224 "No suitable security capability found\n");
228 ips
->security
.ol_flags
= sec_cap
->ol_flags
;
229 ips
->security
.ctx
= sec_ctx
;
230 sa
->pattern
[0].type
= RTE_FLOW_ITEM_TYPE_ETH
;
232 if (IS_IP6(sa
->flags
)) {
233 sa
->pattern
[1].mask
= &rte_flow_item_ipv6_mask
;
234 sa
->pattern
[1].type
= RTE_FLOW_ITEM_TYPE_IPV6
;
235 sa
->pattern
[1].spec
= &sa
->ipv6_spec
;
237 memcpy(sa
->ipv6_spec
.hdr
.dst_addr
,
238 sa
->dst
.ip
.ip6
.ip6_b
, 16);
239 memcpy(sa
->ipv6_spec
.hdr
.src_addr
,
240 sa
->src
.ip
.ip6
.ip6_b
, 16);
241 } else if (IS_IP4(sa
->flags
)) {
242 sa
->pattern
[1].mask
= &rte_flow_item_ipv4_mask
;
243 sa
->pattern
[1].type
= RTE_FLOW_ITEM_TYPE_IPV4
;
244 sa
->pattern
[1].spec
= &sa
->ipv4_spec
;
246 sa
->ipv4_spec
.hdr
.dst_addr
= sa
->dst
.ip
.ip4
;
247 sa
->ipv4_spec
.hdr
.src_addr
= sa
->src
.ip
.ip4
;
250 sa
->pattern
[2].type
= RTE_FLOW_ITEM_TYPE_ESP
;
251 sa
->pattern
[2].spec
= &sa
->esp_spec
;
252 sa
->pattern
[2].mask
= &rte_flow_item_esp_mask
;
253 sa
->esp_spec
.hdr
.spi
= rte_cpu_to_be_32(sa
->spi
);
255 sa
->pattern
[3].type
= RTE_FLOW_ITEM_TYPE_END
;
257 sa
->action
[0].type
= RTE_FLOW_ACTION_TYPE_SECURITY
;
258 sa
->action
[0].conf
= ips
->security
.ses
;
260 sa
->action
[1].type
= RTE_FLOW_ACTION_TYPE_END
;
262 sa
->attr
.egress
= (sa
->direction
==
263 RTE_SECURITY_IPSEC_SA_DIR_EGRESS
);
264 sa
->attr
.ingress
= (sa
->direction
==
265 RTE_SECURITY_IPSEC_SA_DIR_INGRESS
);
266 if (sa
->attr
.ingress
) {
268 struct rte_eth_rss_conf rss_conf
= {
272 struct rte_eth_dev_info dev_info
;
273 uint16_t queue
[RTE_MAX_QUEUES_PER_PORT
];
274 struct rte_flow_action_rss action_rss
;
278 /* Don't create flow if default flow is created */
279 if (flow_info_tbl
[sa
->portid
].rx_def_flow
)
282 ret
= rte_eth_dev_info_get(sa
->portid
, &dev_info
);
285 "Error during getting device (port %u) info: %s\n",
286 sa
->portid
, strerror(-ret
));
290 sa
->action
[2].type
= RTE_FLOW_ACTION_TYPE_END
;
292 sa
->action
[1].type
= RTE_FLOW_ACTION_TYPE_RSS
;
293 sa
->action
[1].conf
= &action_rss
;
294 ret
= rte_eth_dev_rss_hash_conf_get(sa
->portid
,
298 "rte_eth_dev_rss_hash_conf_get:ret=%d\n",
302 for (i
= 0, j
= 0; i
< dev_info
.nb_rx_queues
; ++i
)
305 action_rss
= (struct rte_flow_action_rss
){
306 .types
= rss_conf
.rss_hf
,
307 .key_len
= rss_conf
.rss_key_len
,
312 ret
= rte_flow_validate(sa
->portid
, &sa
->attr
,
313 sa
->pattern
, sa
->action
,
318 sa
->action
[1].type
= RTE_FLOW_ACTION_TYPE_QUEUE
;
320 &(struct rte_flow_action_queue
){
323 ret
= rte_flow_validate(sa
->portid
, &sa
->attr
,
324 sa
->pattern
, sa
->action
,
327 sa
->action
[1].type
= RTE_FLOW_ACTION_TYPE_END
;
328 sa
->action
[1].conf
= NULL
;
329 ret
= rte_flow_validate(sa
->portid
, &sa
->attr
,
330 sa
->pattern
, sa
->action
,
333 goto flow_create_failure
;
334 } else if (sa
->attr
.egress
&&
335 (ips
->security
.ol_flags
&
336 RTE_SECURITY_TX_HW_TRAILER_OFFLOAD
)) {
338 RTE_FLOW_ACTION_TYPE_PASSTHRU
;
340 RTE_FLOW_ACTION_TYPE_END
;
343 sa
->flow
= rte_flow_create(sa
->portid
,
344 &sa
->attr
, sa
->pattern
, sa
->action
, &err
);
345 if (sa
->flow
== NULL
) {
348 "Failed to create ipsec flow msg: %s\n",
352 } else if (ips
->type
== RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL
) {
353 const struct rte_security_capability
*sec_cap
;
355 sec_ctx
= (struct rte_security_ctx
*)
356 rte_eth_dev_get_sec_ctx(sa
->portid
);
358 if (sec_ctx
== NULL
) {
360 "Ethernet device doesn't have security features registered\n");
364 /* Set IPsec parameters in conf */
365 set_ipsec_conf(sa
, &(sess_conf
.ipsec
));
367 /* Save SA as userdata for the security session. When
368 * the packet is received, this userdata will be
369 * retrieved using the metadata from the packet.
371 * The PMD is expected to set similar metadata for other
372 * operations, like rte_eth_event, which are tied to
373 * security session. In such cases, the userdata could
374 * be obtained to uniquely identify the security
375 * parameters denoted.
378 sess_conf
.userdata
= (void *) sa
;
380 ips
->security
.ses
= rte_security_session_create(sec_ctx
,
381 &sess_conf
, skt_ctx
->session_pool
);
382 if (ips
->security
.ses
== NULL
) {
384 "SEC Session init failed: err: %d\n", ret
);
388 sec_cap
= rte_security_capabilities_get(sec_ctx
);
389 if (sec_cap
== NULL
) {
391 "No capabilities registered\n");
395 /* iterate until ESP tunnel*/
396 while (sec_cap
->action
!=
397 RTE_SECURITY_ACTION_TYPE_NONE
) {
398 if (sec_cap
->action
== ips
->type
&&
400 RTE_SECURITY_PROTOCOL_IPSEC
&&
401 sec_cap
->ipsec
.mode
==
402 sess_conf
.ipsec
.mode
&&
403 sec_cap
->ipsec
.direction
== sa
->direction
)
408 if (sec_cap
->action
== RTE_SECURITY_ACTION_TYPE_NONE
) {
410 "No suitable security capability found\n");
414 ips
->security
.ol_flags
= sec_cap
->ol_flags
;
415 ips
->security
.ctx
= sec_ctx
;
422 create_ipsec_esp_flow(struct ipsec_sa
*sa
)
425 struct rte_flow_error err
;
426 if (sa
->direction
== RTE_SECURITY_IPSEC_SA_DIR_EGRESS
) {
428 "No Flow director rule for Egress traffic\n");
431 if (sa
->flags
== TRANSPORT
) {
433 "No Flow director rule for transport mode\n");
436 sa
->action
[0].type
= RTE_FLOW_ACTION_TYPE_QUEUE
;
437 sa
->pattern
[0].type
= RTE_FLOW_ITEM_TYPE_ETH
;
438 sa
->action
[0].conf
= &(struct rte_flow_action_queue
) {
439 .index
= sa
->fdir_qid
,
442 sa
->attr
.ingress
= 1;
443 if (IS_IP6(sa
->flags
)) {
444 sa
->pattern
[1].mask
= &rte_flow_item_ipv6_mask
;
445 sa
->pattern
[1].type
= RTE_FLOW_ITEM_TYPE_IPV6
;
446 sa
->pattern
[1].spec
= &sa
->ipv6_spec
;
447 memcpy(sa
->ipv6_spec
.hdr
.dst_addr
,
448 sa
->dst
.ip
.ip6
.ip6_b
, sizeof(sa
->dst
.ip
.ip6
.ip6_b
));
449 memcpy(sa
->ipv6_spec
.hdr
.src_addr
,
450 sa
->src
.ip
.ip6
.ip6_b
, sizeof(sa
->src
.ip
.ip6
.ip6_b
));
451 sa
->pattern
[2].type
= RTE_FLOW_ITEM_TYPE_ESP
;
452 sa
->pattern
[2].spec
= &sa
->esp_spec
;
453 sa
->pattern
[2].mask
= &rte_flow_item_esp_mask
;
454 sa
->esp_spec
.hdr
.spi
= rte_cpu_to_be_32(sa
->spi
);
455 sa
->pattern
[3].type
= RTE_FLOW_ITEM_TYPE_END
;
456 } else if (IS_IP4(sa
->flags
)) {
457 sa
->pattern
[1].mask
= &rte_flow_item_ipv4_mask
;
458 sa
->pattern
[1].type
= RTE_FLOW_ITEM_TYPE_IPV4
;
459 sa
->pattern
[1].spec
= &sa
->ipv4_spec
;
460 sa
->ipv4_spec
.hdr
.dst_addr
= sa
->dst
.ip
.ip4
;
461 sa
->ipv4_spec
.hdr
.src_addr
= sa
->src
.ip
.ip4
;
462 sa
->pattern
[2].type
= RTE_FLOW_ITEM_TYPE_ESP
;
463 sa
->pattern
[2].spec
= &sa
->esp_spec
;
464 sa
->pattern
[2].mask
= &rte_flow_item_esp_mask
;
465 sa
->esp_spec
.hdr
.spi
= rte_cpu_to_be_32(sa
->spi
);
466 sa
->pattern
[3].type
= RTE_FLOW_ITEM_TYPE_END
;
468 sa
->action
[1].type
= RTE_FLOW_ACTION_TYPE_END
;
470 ret
= rte_flow_validate(sa
->portid
, &sa
->attr
, sa
->pattern
, sa
->action
,
473 RTE_LOG(ERR
, IPSEC
, "Flow validation failed %s\n", err
.message
);
477 sa
->flow
= rte_flow_create(sa
->portid
, &sa
->attr
, sa
->pattern
,
480 RTE_LOG(ERR
, IPSEC
, "Flow creation failed %s\n", err
.message
);
488 * queue crypto-ops into PMD queue.
491 enqueue_cop_burst(struct cdev_qp
*cqp
)
493 uint32_t i
, len
, ret
;
496 ret
= rte_cryptodev_enqueue_burst(cqp
->id
, cqp
->qp
, cqp
->buf
, len
);
498 RTE_LOG_DP(DEBUG
, IPSEC
, "Cryptodev %u queue %u:"
499 " enqueued %u crypto ops out of %u\n",
500 cqp
->id
, cqp
->qp
, ret
, len
);
501 /* drop packets that we fail to enqueue */
502 for (i
= ret
; i
< len
; i
++)
503 rte_pktmbuf_free(cqp
->buf
[i
]->sym
->m_src
);
505 cqp
->in_flight
+= ret
;
510 enqueue_cop(struct cdev_qp
*cqp
, struct rte_crypto_op
*cop
)
512 cqp
->buf
[cqp
->len
++] = cop
;
514 if (cqp
->len
== MAX_PKT_BURST
)
515 enqueue_cop_burst(cqp
);
519 ipsec_enqueue(ipsec_xform_fn xform_func
, struct ipsec_ctx
*ipsec_ctx
,
520 struct rte_mbuf
*pkts
[], void *sas
[],
524 struct ipsec_mbuf_metadata
*priv
;
525 struct rte_crypto_sym_op
*sym_cop
;
527 struct rte_ipsec_session
*ips
;
529 for (i
= 0; i
< nb_pkts
; i
++) {
530 if (unlikely(sas
[i
] == NULL
)) {
531 rte_pktmbuf_free(pkts
[i
]);
535 rte_prefetch0(sas
[i
]);
536 rte_prefetch0(pkts
[i
]);
538 priv
= get_priv(pkts
[i
]);
539 sa
= ipsec_mask_saptr(sas
[i
]);
541 ips
= ipsec_get_primary_session(sa
);
544 case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL
:
545 priv
->cop
.type
= RTE_CRYPTO_OP_TYPE_SYMMETRIC
;
546 priv
->cop
.status
= RTE_CRYPTO_OP_STATUS_NOT_PROCESSED
;
548 rte_prefetch0(&priv
->sym_cop
);
550 if ((unlikely(ips
->security
.ses
== NULL
)) &&
551 create_lookaside_session(ipsec_ctx
, sa
, ips
)) {
552 rte_pktmbuf_free(pkts
[i
]);
556 sym_cop
= get_sym_cop(&priv
->cop
);
557 sym_cop
->m_src
= pkts
[i
];
559 rte_security_attach_session(&priv
->cop
,
563 case RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO
:
564 RTE_LOG(ERR
, IPSEC
, "CPU crypto is not supported by the"
566 rte_pktmbuf_free(pkts
[i
]);
569 case RTE_SECURITY_ACTION_TYPE_NONE
:
571 priv
->cop
.type
= RTE_CRYPTO_OP_TYPE_SYMMETRIC
;
572 priv
->cop
.status
= RTE_CRYPTO_OP_STATUS_NOT_PROCESSED
;
574 rte_prefetch0(&priv
->sym_cop
);
576 if ((unlikely(ips
->crypto
.ses
== NULL
)) &&
577 create_lookaside_session(ipsec_ctx
, sa
, ips
)) {
578 rte_pktmbuf_free(pkts
[i
]);
582 rte_crypto_op_attach_sym_session(&priv
->cop
,
585 ret
= xform_func(pkts
[i
], sa
, &priv
->cop
);
587 rte_pktmbuf_free(pkts
[i
]);
591 case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL
:
592 RTE_ASSERT(ips
->security
.ses
!= NULL
);
593 ipsec_ctx
->ol_pkts
[ipsec_ctx
->ol_pkts_cnt
++] = pkts
[i
];
594 if (ips
->security
.ol_flags
&
595 RTE_SECURITY_TX_OLOAD_NEED_MDATA
)
596 rte_security_set_pkt_metadata(
597 ips
->security
.ctx
, ips
->security
.ses
,
600 case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO
:
601 RTE_ASSERT(ips
->security
.ses
!= NULL
);
602 priv
->cop
.type
= RTE_CRYPTO_OP_TYPE_SYMMETRIC
;
603 priv
->cop
.status
= RTE_CRYPTO_OP_STATUS_NOT_PROCESSED
;
605 rte_prefetch0(&priv
->sym_cop
);
606 rte_security_attach_session(&priv
->cop
,
609 ret
= xform_func(pkts
[i
], sa
, &priv
->cop
);
611 rte_pktmbuf_free(pkts
[i
]);
615 ipsec_ctx
->ol_pkts
[ipsec_ctx
->ol_pkts_cnt
++] = pkts
[i
];
616 if (ips
->security
.ol_flags
&
617 RTE_SECURITY_TX_OLOAD_NEED_MDATA
)
618 rte_security_set_pkt_metadata(
619 ips
->security
.ctx
, ips
->security
.ses
,
624 RTE_ASSERT(sa
->cdev_id_qp
< ipsec_ctx
->nb_qps
);
625 enqueue_cop(&ipsec_ctx
->tbl
[sa
->cdev_id_qp
], &priv
->cop
);
629 static inline int32_t
630 ipsec_inline_dequeue(ipsec_xform_fn xform_func
, struct ipsec_ctx
*ipsec_ctx
,
631 struct rte_mbuf
*pkts
[], uint16_t max_pkts
)
633 int32_t nb_pkts
, ret
;
634 struct ipsec_mbuf_metadata
*priv
;
636 struct rte_mbuf
*pkt
;
639 while (ipsec_ctx
->ol_pkts_cnt
> 0 && nb_pkts
< max_pkts
) {
640 pkt
= ipsec_ctx
->ol_pkts
[--ipsec_ctx
->ol_pkts_cnt
];
642 priv
= get_priv(pkt
);
644 ret
= xform_func(pkt
, sa
, &priv
->cop
);
646 rte_pktmbuf_free(pkt
);
649 pkts
[nb_pkts
++] = pkt
;
656 ipsec_dequeue(ipsec_xform_fn xform_func
, struct ipsec_ctx
*ipsec_ctx
,
657 struct rte_mbuf
*pkts
[], uint16_t max_pkts
)
659 int32_t nb_pkts
= 0, ret
= 0, i
, j
, nb_cops
;
660 struct ipsec_mbuf_metadata
*priv
;
661 struct rte_crypto_op
*cops
[max_pkts
];
663 struct rte_mbuf
*pkt
;
665 for (i
= 0; i
< ipsec_ctx
->nb_qps
&& nb_pkts
< max_pkts
; i
++) {
668 cqp
= &ipsec_ctx
->tbl
[ipsec_ctx
->last_qp
++];
669 if (ipsec_ctx
->last_qp
== ipsec_ctx
->nb_qps
)
670 ipsec_ctx
->last_qp
%= ipsec_ctx
->nb_qps
;
672 if (cqp
->in_flight
== 0)
675 nb_cops
= rte_cryptodev_dequeue_burst(cqp
->id
, cqp
->qp
,
676 cops
, max_pkts
- nb_pkts
);
678 cqp
->in_flight
-= nb_cops
;
680 for (j
= 0; j
< nb_cops
; j
++) {
681 pkt
= cops
[j
]->sym
->m_src
;
684 priv
= get_priv(pkt
);
687 RTE_ASSERT(sa
!= NULL
);
689 if (ipsec_get_action_type(sa
) ==
690 RTE_SECURITY_ACTION_TYPE_NONE
) {
691 ret
= xform_func(pkt
, sa
, cops
[j
]);
693 rte_pktmbuf_free(pkt
);
696 } else if (ipsec_get_action_type(sa
) ==
697 RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL
) {
698 if (cops
[j
]->status
) {
699 rte_pktmbuf_free(pkt
);
703 pkts
[nb_pkts
++] = pkt
;
712 ipsec_inbound(struct ipsec_ctx
*ctx
, struct rte_mbuf
*pkts
[],
713 uint16_t nb_pkts
, uint16_t len
)
717 inbound_sa_lookup(ctx
->sa_ctx
, pkts
, sas
, nb_pkts
);
719 ipsec_enqueue(esp_inbound
, ctx
, pkts
, sas
, nb_pkts
);
721 return ipsec_inline_dequeue(esp_inbound_post
, ctx
, pkts
, len
);
725 ipsec_inbound_cqp_dequeue(struct ipsec_ctx
*ctx
, struct rte_mbuf
*pkts
[],
728 return ipsec_dequeue(esp_inbound_post
, ctx
, pkts
, len
);
732 ipsec_outbound(struct ipsec_ctx
*ctx
, struct rte_mbuf
*pkts
[],
733 uint32_t sa_idx
[], uint16_t nb_pkts
, uint16_t len
)
737 outbound_sa_lookup(ctx
->sa_ctx
, sa_idx
, sas
, nb_pkts
);
739 ipsec_enqueue(esp_outbound
, ctx
, pkts
, sas
, nb_pkts
);
741 return ipsec_inline_dequeue(esp_outbound_post
, ctx
, pkts
, len
);
745 ipsec_outbound_cqp_dequeue(struct ipsec_ctx
*ctx
, struct rte_mbuf
*pkts
[],
748 return ipsec_dequeue(esp_outbound_post
, ctx
, pkts
, len
);