]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/dpdk/examples/ipsec-secgw/ipsec.c
update source to Ceph Pacific 16.2.2
[ceph.git] / ceph / src / spdk / dpdk / examples / ipsec-secgw / ipsec.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2020 Intel Corporation
3 */
4 #include <sys/types.h>
5 #include <netinet/in.h>
6 #include <netinet/ip.h>
7
8 #include <rte_branch_prediction.h>
9 #include <rte_log.h>
10 #include <rte_crypto.h>
11 #include <rte_security.h>
12 #include <rte_cryptodev.h>
13 #include <rte_ipsec.h>
14 #include <rte_ethdev.h>
15 #include <rte_mbuf.h>
16 #include <rte_hash.h>
17
18 #include "ipsec.h"
19 #include "esp.h"
20
21 static inline void
22 set_ipsec_conf(struct ipsec_sa *sa, struct rte_security_ipsec_xform *ipsec)
23 {
24 if (ipsec->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) {
25 struct rte_security_ipsec_tunnel_param *tunnel =
26 &ipsec->tunnel;
27 if (IS_IP4_TUNNEL(sa->flags)) {
28 tunnel->type =
29 RTE_SECURITY_IPSEC_TUNNEL_IPV4;
30 tunnel->ipv4.ttl = IPDEFTTL;
31
32 memcpy((uint8_t *)&tunnel->ipv4.src_ip,
33 (uint8_t *)&sa->src.ip.ip4, 4);
34
35 memcpy((uint8_t *)&tunnel->ipv4.dst_ip,
36 (uint8_t *)&sa->dst.ip.ip4, 4);
37 } else if (IS_IP6_TUNNEL(sa->flags)) {
38 tunnel->type =
39 RTE_SECURITY_IPSEC_TUNNEL_IPV6;
40 tunnel->ipv6.hlimit = IPDEFTTL;
41 tunnel->ipv6.dscp = 0;
42 tunnel->ipv6.flabel = 0;
43
44 memcpy((uint8_t *)&tunnel->ipv6.src_addr,
45 (uint8_t *)&sa->src.ip.ip6.ip6_b, 16);
46
47 memcpy((uint8_t *)&tunnel->ipv6.dst_addr,
48 (uint8_t *)&sa->dst.ip.ip6.ip6_b, 16);
49 }
50 /* TODO support for Transport */
51 }
52 ipsec->esn_soft_limit = IPSEC_OFFLOAD_ESN_SOFTLIMIT;
53 ipsec->replay_win_sz = app_sa_prm.window_size;
54 ipsec->options.esn = app_sa_prm.enable_esn;
55 }
56
57 int
58 create_lookaside_session(struct ipsec_ctx *ipsec_ctx, struct ipsec_sa *sa,
59 struct rte_ipsec_session *ips)
60 {
61 struct rte_cryptodev_info cdev_info;
62 unsigned long cdev_id_qp = 0;
63 int32_t ret = 0;
64 struct cdev_key key = { 0 };
65
66 key.lcore_id = (uint8_t)rte_lcore_id();
67
68 key.cipher_algo = (uint8_t)sa->cipher_algo;
69 key.auth_algo = (uint8_t)sa->auth_algo;
70 key.aead_algo = (uint8_t)sa->aead_algo;
71
72 ret = rte_hash_lookup_data(ipsec_ctx->cdev_map, &key,
73 (void **)&cdev_id_qp);
74 if (ret < 0) {
75 RTE_LOG(ERR, IPSEC,
76 "No cryptodev: core %u, cipher_algo %u, "
77 "auth_algo %u, aead_algo %u\n",
78 key.lcore_id,
79 key.cipher_algo,
80 key.auth_algo,
81 key.aead_algo);
82 return -1;
83 }
84
85 RTE_LOG_DP(DEBUG, IPSEC, "Create session for SA spi %u on cryptodev "
86 "%u qp %u\n", sa->spi,
87 ipsec_ctx->tbl[cdev_id_qp].id,
88 ipsec_ctx->tbl[cdev_id_qp].qp);
89
90 if (ips->type != RTE_SECURITY_ACTION_TYPE_NONE &&
91 ips->type != RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO) {
92 struct rte_security_session_conf sess_conf = {
93 .action_type = ips->type,
94 .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
95 {.ipsec = {
96 .spi = sa->spi,
97 .salt = sa->salt,
98 .options = { 0 },
99 .replay_win_sz = 0,
100 .direction = sa->direction,
101 .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
102 .mode = (IS_TUNNEL(sa->flags)) ?
103 RTE_SECURITY_IPSEC_SA_MODE_TUNNEL :
104 RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
105 } },
106 .crypto_xform = sa->xforms,
107 .userdata = NULL,
108
109 };
110
111 if (ips->type == RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL) {
112 struct rte_security_ctx *ctx = (struct rte_security_ctx *)
113 rte_cryptodev_get_sec_ctx(
114 ipsec_ctx->tbl[cdev_id_qp].id);
115
116 /* Set IPsec parameters in conf */
117 set_ipsec_conf(sa, &(sess_conf.ipsec));
118
119 ips->security.ses = rte_security_session_create(ctx,
120 &sess_conf, ipsec_ctx->session_priv_pool);
121 if (ips->security.ses == NULL) {
122 RTE_LOG(ERR, IPSEC,
123 "SEC Session init failed: err: %d\n", ret);
124 return -1;
125 }
126 } else {
127 RTE_LOG(ERR, IPSEC, "Inline not supported\n");
128 return -1;
129 }
130 } else {
131 if (ips->type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO) {
132 struct rte_cryptodev_info info;
133 uint16_t cdev_id;
134
135 cdev_id = ipsec_ctx->tbl[cdev_id_qp].id;
136 rte_cryptodev_info_get(cdev_id, &info);
137 if (!(info.feature_flags &
138 RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO))
139 return -ENOTSUP;
140
141 ips->crypto.dev_id = cdev_id;
142 }
143 ips->crypto.ses = rte_cryptodev_sym_session_create(
144 ipsec_ctx->session_pool);
145 rte_cryptodev_sym_session_init(ipsec_ctx->tbl[cdev_id_qp].id,
146 ips->crypto.ses, sa->xforms,
147 ipsec_ctx->session_priv_pool);
148
149 rte_cryptodev_info_get(ipsec_ctx->tbl[cdev_id_qp].id,
150 &cdev_info);
151 }
152
153 sa->cdev_id_qp = cdev_id_qp;
154
155 return 0;
156 }
157
158 int
159 create_inline_session(struct socket_ctx *skt_ctx, struct ipsec_sa *sa,
160 struct rte_ipsec_session *ips)
161 {
162 int32_t ret = 0;
163 struct rte_security_ctx *sec_ctx;
164 struct rte_security_session_conf sess_conf = {
165 .action_type = ips->type,
166 .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
167 {.ipsec = {
168 .spi = sa->spi,
169 .salt = sa->salt,
170 .options = { 0 },
171 .replay_win_sz = 0,
172 .direction = sa->direction,
173 .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
174 .mode = (sa->flags == IP4_TUNNEL ||
175 sa->flags == IP6_TUNNEL) ?
176 RTE_SECURITY_IPSEC_SA_MODE_TUNNEL :
177 RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
178 } },
179 .crypto_xform = sa->xforms,
180 .userdata = NULL,
181 };
182
183 RTE_LOG_DP(DEBUG, IPSEC, "Create session for SA spi %u on port %u\n",
184 sa->spi, sa->portid);
185
186 if (ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
187 struct rte_flow_error err;
188 const struct rte_security_capability *sec_cap;
189 int ret = 0;
190
191 sec_ctx = (struct rte_security_ctx *)
192 rte_eth_dev_get_sec_ctx(
193 sa->portid);
194 if (sec_ctx == NULL) {
195 RTE_LOG(ERR, IPSEC,
196 " rte_eth_dev_get_sec_ctx failed\n");
197 return -1;
198 }
199
200 ips->security.ses = rte_security_session_create(sec_ctx,
201 &sess_conf, skt_ctx->session_pool);
202 if (ips->security.ses == NULL) {
203 RTE_LOG(ERR, IPSEC,
204 "SEC Session init failed: err: %d\n", ret);
205 return -1;
206 }
207
208 sec_cap = rte_security_capabilities_get(sec_ctx);
209
210 /* iterate until ESP tunnel*/
211 while (sec_cap->action != RTE_SECURITY_ACTION_TYPE_NONE) {
212 if (sec_cap->action == ips->type &&
213 sec_cap->protocol ==
214 RTE_SECURITY_PROTOCOL_IPSEC &&
215 sec_cap->ipsec.mode ==
216 RTE_SECURITY_IPSEC_SA_MODE_TUNNEL &&
217 sec_cap->ipsec.direction == sa->direction)
218 break;
219 sec_cap++;
220 }
221
222 if (sec_cap->action == RTE_SECURITY_ACTION_TYPE_NONE) {
223 RTE_LOG(ERR, IPSEC,
224 "No suitable security capability found\n");
225 return -1;
226 }
227
228 ips->security.ol_flags = sec_cap->ol_flags;
229 ips->security.ctx = sec_ctx;
230 sa->pattern[0].type = RTE_FLOW_ITEM_TYPE_ETH;
231
232 if (IS_IP6(sa->flags)) {
233 sa->pattern[1].mask = &rte_flow_item_ipv6_mask;
234 sa->pattern[1].type = RTE_FLOW_ITEM_TYPE_IPV6;
235 sa->pattern[1].spec = &sa->ipv6_spec;
236
237 memcpy(sa->ipv6_spec.hdr.dst_addr,
238 sa->dst.ip.ip6.ip6_b, 16);
239 memcpy(sa->ipv6_spec.hdr.src_addr,
240 sa->src.ip.ip6.ip6_b, 16);
241 } else if (IS_IP4(sa->flags)) {
242 sa->pattern[1].mask = &rte_flow_item_ipv4_mask;
243 sa->pattern[1].type = RTE_FLOW_ITEM_TYPE_IPV4;
244 sa->pattern[1].spec = &sa->ipv4_spec;
245
246 sa->ipv4_spec.hdr.dst_addr = sa->dst.ip.ip4;
247 sa->ipv4_spec.hdr.src_addr = sa->src.ip.ip4;
248 }
249
250 sa->pattern[2].type = RTE_FLOW_ITEM_TYPE_ESP;
251 sa->pattern[2].spec = &sa->esp_spec;
252 sa->pattern[2].mask = &rte_flow_item_esp_mask;
253 sa->esp_spec.hdr.spi = rte_cpu_to_be_32(sa->spi);
254
255 sa->pattern[3].type = RTE_FLOW_ITEM_TYPE_END;
256
257 sa->action[0].type = RTE_FLOW_ACTION_TYPE_SECURITY;
258 sa->action[0].conf = ips->security.ses;
259
260 sa->action[1].type = RTE_FLOW_ACTION_TYPE_END;
261
262 sa->attr.egress = (sa->direction ==
263 RTE_SECURITY_IPSEC_SA_DIR_EGRESS);
264 sa->attr.ingress = (sa->direction ==
265 RTE_SECURITY_IPSEC_SA_DIR_INGRESS);
266 if (sa->attr.ingress) {
267 uint8_t rss_key[40];
268 struct rte_eth_rss_conf rss_conf = {
269 .rss_key = rss_key,
270 .rss_key_len = 40,
271 };
272 struct rte_eth_dev_info dev_info;
273 uint16_t queue[RTE_MAX_QUEUES_PER_PORT];
274 struct rte_flow_action_rss action_rss;
275 unsigned int i;
276 unsigned int j;
277
278 /* Don't create flow if default flow is created */
279 if (flow_info_tbl[sa->portid].rx_def_flow)
280 return 0;
281
282 ret = rte_eth_dev_info_get(sa->portid, &dev_info);
283 if (ret != 0) {
284 RTE_LOG(ERR, IPSEC,
285 "Error during getting device (port %u) info: %s\n",
286 sa->portid, strerror(-ret));
287 return ret;
288 }
289
290 sa->action[2].type = RTE_FLOW_ACTION_TYPE_END;
291 /* Try RSS. */
292 sa->action[1].type = RTE_FLOW_ACTION_TYPE_RSS;
293 sa->action[1].conf = &action_rss;
294 ret = rte_eth_dev_rss_hash_conf_get(sa->portid,
295 &rss_conf);
296 if (ret != 0) {
297 RTE_LOG(ERR, IPSEC,
298 "rte_eth_dev_rss_hash_conf_get:ret=%d\n",
299 ret);
300 return -1;
301 }
302 for (i = 0, j = 0; i < dev_info.nb_rx_queues; ++i)
303 queue[j++] = i;
304
305 action_rss = (struct rte_flow_action_rss){
306 .types = rss_conf.rss_hf,
307 .key_len = rss_conf.rss_key_len,
308 .queue_num = j,
309 .key = rss_key,
310 .queue = queue,
311 };
312 ret = rte_flow_validate(sa->portid, &sa->attr,
313 sa->pattern, sa->action,
314 &err);
315 if (!ret)
316 goto flow_create;
317 /* Try Queue. */
318 sa->action[1].type = RTE_FLOW_ACTION_TYPE_QUEUE;
319 sa->action[1].conf =
320 &(struct rte_flow_action_queue){
321 .index = 0,
322 };
323 ret = rte_flow_validate(sa->portid, &sa->attr,
324 sa->pattern, sa->action,
325 &err);
326 /* Try End. */
327 sa->action[1].type = RTE_FLOW_ACTION_TYPE_END;
328 sa->action[1].conf = NULL;
329 ret = rte_flow_validate(sa->portid, &sa->attr,
330 sa->pattern, sa->action,
331 &err);
332 if (ret)
333 goto flow_create_failure;
334 } else if (sa->attr.egress &&
335 (ips->security.ol_flags &
336 RTE_SECURITY_TX_HW_TRAILER_OFFLOAD)) {
337 sa->action[1].type =
338 RTE_FLOW_ACTION_TYPE_PASSTHRU;
339 sa->action[2].type =
340 RTE_FLOW_ACTION_TYPE_END;
341 }
342 flow_create:
343 sa->flow = rte_flow_create(sa->portid,
344 &sa->attr, sa->pattern, sa->action, &err);
345 if (sa->flow == NULL) {
346 flow_create_failure:
347 RTE_LOG(ERR, IPSEC,
348 "Failed to create ipsec flow msg: %s\n",
349 err.message);
350 return -1;
351 }
352 } else if (ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) {
353 const struct rte_security_capability *sec_cap;
354
355 sec_ctx = (struct rte_security_ctx *)
356 rte_eth_dev_get_sec_ctx(sa->portid);
357
358 if (sec_ctx == NULL) {
359 RTE_LOG(ERR, IPSEC,
360 "Ethernet device doesn't have security features registered\n");
361 return -1;
362 }
363
364 /* Set IPsec parameters in conf */
365 set_ipsec_conf(sa, &(sess_conf.ipsec));
366
367 /* Save SA as userdata for the security session. When
368 * the packet is received, this userdata will be
369 * retrieved using the metadata from the packet.
370 *
371 * The PMD is expected to set similar metadata for other
372 * operations, like rte_eth_event, which are tied to
373 * security session. In such cases, the userdata could
374 * be obtained to uniquely identify the security
375 * parameters denoted.
376 */
377
378 sess_conf.userdata = (void *) sa;
379
380 ips->security.ses = rte_security_session_create(sec_ctx,
381 &sess_conf, skt_ctx->session_pool);
382 if (ips->security.ses == NULL) {
383 RTE_LOG(ERR, IPSEC,
384 "SEC Session init failed: err: %d\n", ret);
385 return -1;
386 }
387
388 sec_cap = rte_security_capabilities_get(sec_ctx);
389 if (sec_cap == NULL) {
390 RTE_LOG(ERR, IPSEC,
391 "No capabilities registered\n");
392 return -1;
393 }
394
395 /* iterate until ESP tunnel*/
396 while (sec_cap->action !=
397 RTE_SECURITY_ACTION_TYPE_NONE) {
398 if (sec_cap->action == ips->type &&
399 sec_cap->protocol ==
400 RTE_SECURITY_PROTOCOL_IPSEC &&
401 sec_cap->ipsec.mode ==
402 sess_conf.ipsec.mode &&
403 sec_cap->ipsec.direction == sa->direction)
404 break;
405 sec_cap++;
406 }
407
408 if (sec_cap->action == RTE_SECURITY_ACTION_TYPE_NONE) {
409 RTE_LOG(ERR, IPSEC,
410 "No suitable security capability found\n");
411 return -1;
412 }
413
414 ips->security.ol_flags = sec_cap->ol_flags;
415 ips->security.ctx = sec_ctx;
416 }
417
418 return 0;
419 }
420
421 int
422 create_ipsec_esp_flow(struct ipsec_sa *sa)
423 {
424 int ret = 0;
425 struct rte_flow_error err;
426 if (sa->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
427 RTE_LOG(ERR, IPSEC,
428 "No Flow director rule for Egress traffic\n");
429 return -1;
430 }
431 if (sa->flags == TRANSPORT) {
432 RTE_LOG(ERR, IPSEC,
433 "No Flow director rule for transport mode\n");
434 return -1;
435 }
436 sa->action[0].type = RTE_FLOW_ACTION_TYPE_QUEUE;
437 sa->pattern[0].type = RTE_FLOW_ITEM_TYPE_ETH;
438 sa->action[0].conf = &(struct rte_flow_action_queue) {
439 .index = sa->fdir_qid,
440 };
441 sa->attr.egress = 0;
442 sa->attr.ingress = 1;
443 if (IS_IP6(sa->flags)) {
444 sa->pattern[1].mask = &rte_flow_item_ipv6_mask;
445 sa->pattern[1].type = RTE_FLOW_ITEM_TYPE_IPV6;
446 sa->pattern[1].spec = &sa->ipv6_spec;
447 memcpy(sa->ipv6_spec.hdr.dst_addr,
448 sa->dst.ip.ip6.ip6_b, sizeof(sa->dst.ip.ip6.ip6_b));
449 memcpy(sa->ipv6_spec.hdr.src_addr,
450 sa->src.ip.ip6.ip6_b, sizeof(sa->src.ip.ip6.ip6_b));
451 sa->pattern[2].type = RTE_FLOW_ITEM_TYPE_ESP;
452 sa->pattern[2].spec = &sa->esp_spec;
453 sa->pattern[2].mask = &rte_flow_item_esp_mask;
454 sa->esp_spec.hdr.spi = rte_cpu_to_be_32(sa->spi);
455 sa->pattern[3].type = RTE_FLOW_ITEM_TYPE_END;
456 } else if (IS_IP4(sa->flags)) {
457 sa->pattern[1].mask = &rte_flow_item_ipv4_mask;
458 sa->pattern[1].type = RTE_FLOW_ITEM_TYPE_IPV4;
459 sa->pattern[1].spec = &sa->ipv4_spec;
460 sa->ipv4_spec.hdr.dst_addr = sa->dst.ip.ip4;
461 sa->ipv4_spec.hdr.src_addr = sa->src.ip.ip4;
462 sa->pattern[2].type = RTE_FLOW_ITEM_TYPE_ESP;
463 sa->pattern[2].spec = &sa->esp_spec;
464 sa->pattern[2].mask = &rte_flow_item_esp_mask;
465 sa->esp_spec.hdr.spi = rte_cpu_to_be_32(sa->spi);
466 sa->pattern[3].type = RTE_FLOW_ITEM_TYPE_END;
467 }
468 sa->action[1].type = RTE_FLOW_ACTION_TYPE_END;
469
470 ret = rte_flow_validate(sa->portid, &sa->attr, sa->pattern, sa->action,
471 &err);
472 if (ret < 0) {
473 RTE_LOG(ERR, IPSEC, "Flow validation failed %s\n", err.message);
474 return ret;
475 }
476
477 sa->flow = rte_flow_create(sa->portid, &sa->attr, sa->pattern,
478 sa->action, &err);
479 if (!sa->flow) {
480 RTE_LOG(ERR, IPSEC, "Flow creation failed %s\n", err.message);
481 return -1;
482 }
483
484 return 0;
485 }
486
487 /*
488 * queue crypto-ops into PMD queue.
489 */
490 void
491 enqueue_cop_burst(struct cdev_qp *cqp)
492 {
493 uint32_t i, len, ret;
494
495 len = cqp->len;
496 ret = rte_cryptodev_enqueue_burst(cqp->id, cqp->qp, cqp->buf, len);
497 if (ret < len) {
498 RTE_LOG_DP(DEBUG, IPSEC, "Cryptodev %u queue %u:"
499 " enqueued %u crypto ops out of %u\n",
500 cqp->id, cqp->qp, ret, len);
501 /* drop packets that we fail to enqueue */
502 for (i = ret; i < len; i++)
503 rte_pktmbuf_free(cqp->buf[i]->sym->m_src);
504 }
505 cqp->in_flight += ret;
506 cqp->len = 0;
507 }
508
509 static inline void
510 enqueue_cop(struct cdev_qp *cqp, struct rte_crypto_op *cop)
511 {
512 cqp->buf[cqp->len++] = cop;
513
514 if (cqp->len == MAX_PKT_BURST)
515 enqueue_cop_burst(cqp);
516 }
517
518 static inline void
519 ipsec_enqueue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
520 struct rte_mbuf *pkts[], void *sas[],
521 uint16_t nb_pkts)
522 {
523 int32_t ret = 0, i;
524 struct ipsec_mbuf_metadata *priv;
525 struct rte_crypto_sym_op *sym_cop;
526 struct ipsec_sa *sa;
527 struct rte_ipsec_session *ips;
528
529 for (i = 0; i < nb_pkts; i++) {
530 if (unlikely(sas[i] == NULL)) {
531 rte_pktmbuf_free(pkts[i]);
532 continue;
533 }
534
535 rte_prefetch0(sas[i]);
536 rte_prefetch0(pkts[i]);
537
538 priv = get_priv(pkts[i]);
539 sa = ipsec_mask_saptr(sas[i]);
540 priv->sa = sa;
541 ips = ipsec_get_primary_session(sa);
542
543 switch (ips->type) {
544 case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL:
545 priv->cop.type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
546 priv->cop.status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
547
548 rte_prefetch0(&priv->sym_cop);
549
550 if ((unlikely(ips->security.ses == NULL)) &&
551 create_lookaside_session(ipsec_ctx, sa, ips)) {
552 rte_pktmbuf_free(pkts[i]);
553 continue;
554 }
555
556 sym_cop = get_sym_cop(&priv->cop);
557 sym_cop->m_src = pkts[i];
558
559 rte_security_attach_session(&priv->cop,
560 ips->security.ses);
561 break;
562
563 case RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO:
564 RTE_LOG(ERR, IPSEC, "CPU crypto is not supported by the"
565 " legacy mode.");
566 rte_pktmbuf_free(pkts[i]);
567 continue;
568
569 case RTE_SECURITY_ACTION_TYPE_NONE:
570
571 priv->cop.type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
572 priv->cop.status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
573
574 rte_prefetch0(&priv->sym_cop);
575
576 if ((unlikely(ips->crypto.ses == NULL)) &&
577 create_lookaside_session(ipsec_ctx, sa, ips)) {
578 rte_pktmbuf_free(pkts[i]);
579 continue;
580 }
581
582 rte_crypto_op_attach_sym_session(&priv->cop,
583 ips->crypto.ses);
584
585 ret = xform_func(pkts[i], sa, &priv->cop);
586 if (unlikely(ret)) {
587 rte_pktmbuf_free(pkts[i]);
588 continue;
589 }
590 break;
591 case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
592 RTE_ASSERT(ips->security.ses != NULL);
593 ipsec_ctx->ol_pkts[ipsec_ctx->ol_pkts_cnt++] = pkts[i];
594 if (ips->security.ol_flags &
595 RTE_SECURITY_TX_OLOAD_NEED_MDATA)
596 rte_security_set_pkt_metadata(
597 ips->security.ctx, ips->security.ses,
598 pkts[i], NULL);
599 continue;
600 case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO:
601 RTE_ASSERT(ips->security.ses != NULL);
602 priv->cop.type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
603 priv->cop.status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
604
605 rte_prefetch0(&priv->sym_cop);
606 rte_security_attach_session(&priv->cop,
607 ips->security.ses);
608
609 ret = xform_func(pkts[i], sa, &priv->cop);
610 if (unlikely(ret)) {
611 rte_pktmbuf_free(pkts[i]);
612 continue;
613 }
614
615 ipsec_ctx->ol_pkts[ipsec_ctx->ol_pkts_cnt++] = pkts[i];
616 if (ips->security.ol_flags &
617 RTE_SECURITY_TX_OLOAD_NEED_MDATA)
618 rte_security_set_pkt_metadata(
619 ips->security.ctx, ips->security.ses,
620 pkts[i], NULL);
621 continue;
622 }
623
624 RTE_ASSERT(sa->cdev_id_qp < ipsec_ctx->nb_qps);
625 enqueue_cop(&ipsec_ctx->tbl[sa->cdev_id_qp], &priv->cop);
626 }
627 }
628
629 static inline int32_t
630 ipsec_inline_dequeue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
631 struct rte_mbuf *pkts[], uint16_t max_pkts)
632 {
633 int32_t nb_pkts, ret;
634 struct ipsec_mbuf_metadata *priv;
635 struct ipsec_sa *sa;
636 struct rte_mbuf *pkt;
637
638 nb_pkts = 0;
639 while (ipsec_ctx->ol_pkts_cnt > 0 && nb_pkts < max_pkts) {
640 pkt = ipsec_ctx->ol_pkts[--ipsec_ctx->ol_pkts_cnt];
641 rte_prefetch0(pkt);
642 priv = get_priv(pkt);
643 sa = priv->sa;
644 ret = xform_func(pkt, sa, &priv->cop);
645 if (unlikely(ret)) {
646 rte_pktmbuf_free(pkt);
647 continue;
648 }
649 pkts[nb_pkts++] = pkt;
650 }
651
652 return nb_pkts;
653 }
654
655 static inline int
656 ipsec_dequeue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
657 struct rte_mbuf *pkts[], uint16_t max_pkts)
658 {
659 int32_t nb_pkts = 0, ret = 0, i, j, nb_cops;
660 struct ipsec_mbuf_metadata *priv;
661 struct rte_crypto_op *cops[max_pkts];
662 struct ipsec_sa *sa;
663 struct rte_mbuf *pkt;
664
665 for (i = 0; i < ipsec_ctx->nb_qps && nb_pkts < max_pkts; i++) {
666 struct cdev_qp *cqp;
667
668 cqp = &ipsec_ctx->tbl[ipsec_ctx->last_qp++];
669 if (ipsec_ctx->last_qp == ipsec_ctx->nb_qps)
670 ipsec_ctx->last_qp %= ipsec_ctx->nb_qps;
671
672 if (cqp->in_flight == 0)
673 continue;
674
675 nb_cops = rte_cryptodev_dequeue_burst(cqp->id, cqp->qp,
676 cops, max_pkts - nb_pkts);
677
678 cqp->in_flight -= nb_cops;
679
680 for (j = 0; j < nb_cops; j++) {
681 pkt = cops[j]->sym->m_src;
682 rte_prefetch0(pkt);
683
684 priv = get_priv(pkt);
685 sa = priv->sa;
686
687 RTE_ASSERT(sa != NULL);
688
689 if (ipsec_get_action_type(sa) ==
690 RTE_SECURITY_ACTION_TYPE_NONE) {
691 ret = xform_func(pkt, sa, cops[j]);
692 if (unlikely(ret)) {
693 rte_pktmbuf_free(pkt);
694 continue;
695 }
696 } else if (ipsec_get_action_type(sa) ==
697 RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL) {
698 if (cops[j]->status) {
699 rte_pktmbuf_free(pkt);
700 continue;
701 }
702 }
703 pkts[nb_pkts++] = pkt;
704 }
705 }
706
707 /* return packets */
708 return nb_pkts;
709 }
710
711 uint16_t
712 ipsec_inbound(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[],
713 uint16_t nb_pkts, uint16_t len)
714 {
715 void *sas[nb_pkts];
716
717 inbound_sa_lookup(ctx->sa_ctx, pkts, sas, nb_pkts);
718
719 ipsec_enqueue(esp_inbound, ctx, pkts, sas, nb_pkts);
720
721 return ipsec_inline_dequeue(esp_inbound_post, ctx, pkts, len);
722 }
723
724 uint16_t
725 ipsec_inbound_cqp_dequeue(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[],
726 uint16_t len)
727 {
728 return ipsec_dequeue(esp_inbound_post, ctx, pkts, len);
729 }
730
731 uint16_t
732 ipsec_outbound(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[],
733 uint32_t sa_idx[], uint16_t nb_pkts, uint16_t len)
734 {
735 void *sas[nb_pkts];
736
737 outbound_sa_lookup(ctx->sa_ctx, sa_idx, sas, nb_pkts);
738
739 ipsec_enqueue(esp_outbound, ctx, pkts, sas, nb_pkts);
740
741 return ipsec_inline_dequeue(esp_outbound_post, ctx, pkts, len);
742 }
743
744 uint16_t
745 ipsec_outbound_cqp_dequeue(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[],
746 uint16_t len)
747 {
748 return ipsec_dequeue(esp_outbound_post, ctx, pkts, len);
749 }