]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/dpdk/lib/librte_ipsec/esp_outb.c
fb9d5864c8619e552f287dc33983e5cffaeb610e
[ceph.git] / ceph / src / spdk / dpdk / lib / librte_ipsec / esp_outb.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018-2020 Intel Corporation
3 */
4
5 #include <rte_ipsec.h>
6 #include <rte_esp.h>
7 #include <rte_ip.h>
8 #include <rte_errno.h>
9 #include <rte_cryptodev.h>
10
11 #include "sa.h"
12 #include "ipsec_sqn.h"
13 #include "crypto.h"
14 #include "iph.h"
15 #include "misc.h"
16 #include "pad.h"
17
18 typedef int32_t (*esp_outb_prepare_t)(struct rte_ipsec_sa *sa, rte_be64_t sqc,
19 const uint64_t ivp[IPSEC_MAX_IV_QWORD], struct rte_mbuf *mb,
20 union sym_op_data *icv, uint8_t sqh_len);
21
22 /*
23 * helper function to fill crypto_sym op for cipher+auth algorithms.
24 * used by outb_cop_prepare(), see below.
25 */
26 static inline void
27 sop_ciph_auth_prepare(struct rte_crypto_sym_op *sop,
28 const struct rte_ipsec_sa *sa, const union sym_op_data *icv,
29 uint32_t pofs, uint32_t plen)
30 {
31 sop->cipher.data.offset = sa->ctp.cipher.offset + pofs;
32 sop->cipher.data.length = sa->ctp.cipher.length + plen;
33 sop->auth.data.offset = sa->ctp.auth.offset + pofs;
34 sop->auth.data.length = sa->ctp.auth.length + plen;
35 sop->auth.digest.data = icv->va;
36 sop->auth.digest.phys_addr = icv->pa;
37 }
38
39 /*
40 * helper function to fill crypto_sym op for cipher+auth algorithms.
41 * used by outb_cop_prepare(), see below.
42 */
43 static inline void
44 sop_aead_prepare(struct rte_crypto_sym_op *sop,
45 const struct rte_ipsec_sa *sa, const union sym_op_data *icv,
46 uint32_t pofs, uint32_t plen)
47 {
48 sop->aead.data.offset = sa->ctp.cipher.offset + pofs;
49 sop->aead.data.length = sa->ctp.cipher.length + plen;
50 sop->aead.digest.data = icv->va;
51 sop->aead.digest.phys_addr = icv->pa;
52 sop->aead.aad.data = icv->va + sa->icv_len;
53 sop->aead.aad.phys_addr = icv->pa + sa->icv_len;
54 }
55
56 /*
57 * setup crypto op and crypto sym op for ESP outbound packet.
58 */
59 static inline void
60 outb_cop_prepare(struct rte_crypto_op *cop,
61 const struct rte_ipsec_sa *sa, const uint64_t ivp[IPSEC_MAX_IV_QWORD],
62 const union sym_op_data *icv, uint32_t hlen, uint32_t plen)
63 {
64 struct rte_crypto_sym_op *sop;
65 struct aead_gcm_iv *gcm;
66 struct aesctr_cnt_blk *ctr;
67 uint32_t algo;
68
69 algo = sa->algo_type;
70
71 /* fill sym op fields */
72 sop = cop->sym;
73
74 switch (algo) {
75 case ALGO_TYPE_AES_CBC:
76 /* Cipher-Auth (AES-CBC *) case */
77 case ALGO_TYPE_3DES_CBC:
78 /* Cipher-Auth (3DES-CBC *) case */
79 case ALGO_TYPE_NULL:
80 /* NULL case */
81 sop_ciph_auth_prepare(sop, sa, icv, hlen, plen);
82 break;
83 case ALGO_TYPE_AES_GCM:
84 /* AEAD (AES_GCM) case */
85 sop_aead_prepare(sop, sa, icv, hlen, plen);
86
87 /* fill AAD IV (located inside crypto op) */
88 gcm = rte_crypto_op_ctod_offset(cop, struct aead_gcm_iv *,
89 sa->iv_ofs);
90 aead_gcm_iv_fill(gcm, ivp[0], sa->salt);
91 break;
92 case ALGO_TYPE_AES_CTR:
93 /* Cipher-Auth (AES-CTR *) case */
94 sop_ciph_auth_prepare(sop, sa, icv, hlen, plen);
95
96 /* fill CTR block (located inside crypto op) */
97 ctr = rte_crypto_op_ctod_offset(cop, struct aesctr_cnt_blk *,
98 sa->iv_ofs);
99 aes_ctr_cnt_blk_fill(ctr, ivp[0], sa->salt);
100 break;
101 }
102 }
103
104 /*
105 * setup/update packet data and metadata for ESP outbound tunnel case.
106 */
107 static inline int32_t
108 outb_tun_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc,
109 const uint64_t ivp[IPSEC_MAX_IV_QWORD], struct rte_mbuf *mb,
110 union sym_op_data *icv, uint8_t sqh_len)
111 {
112 uint32_t clen, hlen, l2len, pdlen, pdofs, plen, tlen;
113 struct rte_mbuf *ml;
114 struct rte_esp_hdr *esph;
115 struct rte_esp_tail *espt;
116 char *ph, *pt;
117 uint64_t *iv;
118
119 /* calculate extra header space required */
120 hlen = sa->hdr_len + sa->iv_len + sizeof(*esph);
121
122 /* size of ipsec protected data */
123 l2len = mb->l2_len;
124 plen = mb->pkt_len - l2len;
125
126 /* number of bytes to encrypt */
127 clen = plen + sizeof(*espt);
128 clen = RTE_ALIGN_CEIL(clen, sa->pad_align);
129
130 /* pad length + esp tail */
131 pdlen = clen - plen;
132 tlen = pdlen + sa->icv_len + sqh_len;
133
134 /* do append and prepend */
135 ml = rte_pktmbuf_lastseg(mb);
136 if (tlen + sa->aad_len > rte_pktmbuf_tailroom(ml))
137 return -ENOSPC;
138
139 /* prepend header */
140 ph = rte_pktmbuf_prepend(mb, hlen - l2len);
141 if (ph == NULL)
142 return -ENOSPC;
143
144 /* append tail */
145 pdofs = ml->data_len;
146 ml->data_len += tlen;
147 mb->pkt_len += tlen;
148 pt = rte_pktmbuf_mtod_offset(ml, typeof(pt), pdofs);
149
150 /* update pkt l2/l3 len */
151 mb->tx_offload = (mb->tx_offload & sa->tx_offload.msk) |
152 sa->tx_offload.val;
153
154 /* copy tunnel pkt header */
155 rte_memcpy(ph, sa->hdr, sa->hdr_len);
156
157 /* update original and new ip header fields */
158 update_tun_outb_l3hdr(sa, ph + sa->hdr_l3_off, ph + hlen,
159 mb->pkt_len - sqh_len, sa->hdr_l3_off, sqn_low16(sqc));
160
161 /* update spi, seqn and iv */
162 esph = (struct rte_esp_hdr *)(ph + sa->hdr_len);
163 iv = (uint64_t *)(esph + 1);
164 copy_iv(iv, ivp, sa->iv_len);
165
166 esph->spi = sa->spi;
167 esph->seq = sqn_low32(sqc);
168
169 /* offset for ICV */
170 pdofs += pdlen + sa->sqh_len;
171
172 /* pad length */
173 pdlen -= sizeof(*espt);
174
175 /* copy padding data */
176 rte_memcpy(pt, esp_pad_bytes, pdlen);
177
178 /* update esp trailer */
179 espt = (struct rte_esp_tail *)(pt + pdlen);
180 espt->pad_len = pdlen;
181 espt->next_proto = sa->proto;
182
183 /* set icv va/pa value(s) */
184 icv->va = rte_pktmbuf_mtod_offset(ml, void *, pdofs);
185 icv->pa = rte_pktmbuf_iova_offset(ml, pdofs);
186
187 return clen;
188 }
189
190 /*
191 * for pure cryptodev (lookaside none) depending on SA settings,
192 * we might have to write some extra data to the packet.
193 */
194 static inline void
195 outb_pkt_xprepare(const struct rte_ipsec_sa *sa, rte_be64_t sqc,
196 const union sym_op_data *icv)
197 {
198 uint32_t *psqh;
199 struct aead_gcm_aad *aad;
200
201 /* insert SQN.hi between ESP trailer and ICV */
202 if (sa->sqh_len != 0) {
203 psqh = (uint32_t *)(icv->va - sa->sqh_len);
204 psqh[0] = sqn_hi32(sqc);
205 }
206
207 /*
208 * fill IV and AAD fields, if any (aad fields are placed after icv),
209 * right now we support only one AEAD algorithm: AES-GCM .
210 */
211 if (sa->aad_len != 0) {
212 aad = (struct aead_gcm_aad *)(icv->va + sa->icv_len);
213 aead_gcm_aad_fill(aad, sa->spi, sqc, IS_ESN(sa));
214 }
215 }
216
217 /*
218 * setup/update packets and crypto ops for ESP outbound tunnel case.
219 */
220 uint16_t
221 esp_outb_tun_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
222 struct rte_crypto_op *cop[], uint16_t num)
223 {
224 int32_t rc;
225 uint32_t i, k, n;
226 uint64_t sqn;
227 rte_be64_t sqc;
228 struct rte_ipsec_sa *sa;
229 struct rte_cryptodev_sym_session *cs;
230 union sym_op_data icv;
231 uint64_t iv[IPSEC_MAX_IV_QWORD];
232 uint32_t dr[num];
233
234 sa = ss->sa;
235 cs = ss->crypto.ses;
236
237 n = num;
238 sqn = esn_outb_update_sqn(sa, &n);
239 if (n != num)
240 rte_errno = EOVERFLOW;
241
242 k = 0;
243 for (i = 0; i != n; i++) {
244
245 sqc = rte_cpu_to_be_64(sqn + i);
246 gen_iv(iv, sqc);
247
248 /* try to update the packet itself */
249 rc = outb_tun_pkt_prepare(sa, sqc, iv, mb[i], &icv,
250 sa->sqh_len);
251 /* success, setup crypto op */
252 if (rc >= 0) {
253 outb_pkt_xprepare(sa, sqc, &icv);
254 lksd_none_cop_prepare(cop[k], cs, mb[i]);
255 outb_cop_prepare(cop[k], sa, iv, &icv, 0, rc);
256 k++;
257 /* failure, put packet into the death-row */
258 } else {
259 dr[i - k] = i;
260 rte_errno = -rc;
261 }
262 }
263
264 /* copy not prepared mbufs beyond good ones */
265 if (k != n && k != 0)
266 move_bad_mbufs(mb, dr, n, n - k);
267
268 return k;
269 }
270
271 /*
272 * setup/update packet data and metadata for ESP outbound transport case.
273 */
274 static inline int32_t
275 outb_trs_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc,
276 const uint64_t ivp[IPSEC_MAX_IV_QWORD], struct rte_mbuf *mb,
277 union sym_op_data *icv, uint8_t sqh_len)
278 {
279 uint8_t np;
280 uint32_t clen, hlen, pdlen, pdofs, plen, tlen, uhlen;
281 struct rte_mbuf *ml;
282 struct rte_esp_hdr *esph;
283 struct rte_esp_tail *espt;
284 char *ph, *pt;
285 uint64_t *iv;
286 uint32_t l2len, l3len;
287
288 l2len = mb->l2_len;
289 l3len = mb->l3_len;
290
291 uhlen = l2len + l3len;
292 plen = mb->pkt_len - uhlen;
293
294 /* calculate extra header space required */
295 hlen = sa->iv_len + sizeof(*esph);
296
297 /* number of bytes to encrypt */
298 clen = plen + sizeof(*espt);
299 clen = RTE_ALIGN_CEIL(clen, sa->pad_align);
300
301 /* pad length + esp tail */
302 pdlen = clen - plen;
303 tlen = pdlen + sa->icv_len + sqh_len;
304
305 /* do append and insert */
306 ml = rte_pktmbuf_lastseg(mb);
307 if (tlen + sa->aad_len > rte_pktmbuf_tailroom(ml))
308 return -ENOSPC;
309
310 /* prepend space for ESP header */
311 ph = rte_pktmbuf_prepend(mb, hlen);
312 if (ph == NULL)
313 return -ENOSPC;
314
315 /* append tail */
316 pdofs = ml->data_len;
317 ml->data_len += tlen;
318 mb->pkt_len += tlen;
319 pt = rte_pktmbuf_mtod_offset(ml, typeof(pt), pdofs);
320
321 /* shift L2/L3 headers */
322 insert_esph(ph, ph + hlen, uhlen);
323
324 /* update ip header fields */
325 np = update_trs_l3hdr(sa, ph + l2len, mb->pkt_len - sqh_len, l2len,
326 l3len, IPPROTO_ESP);
327
328 /* update spi, seqn and iv */
329 esph = (struct rte_esp_hdr *)(ph + uhlen);
330 iv = (uint64_t *)(esph + 1);
331 copy_iv(iv, ivp, sa->iv_len);
332
333 esph->spi = sa->spi;
334 esph->seq = sqn_low32(sqc);
335
336 /* offset for ICV */
337 pdofs += pdlen + sa->sqh_len;
338
339 /* pad length */
340 pdlen -= sizeof(*espt);
341
342 /* copy padding data */
343 rte_memcpy(pt, esp_pad_bytes, pdlen);
344
345 /* update esp trailer */
346 espt = (struct rte_esp_tail *)(pt + pdlen);
347 espt->pad_len = pdlen;
348 espt->next_proto = np;
349
350 /* set icv va/pa value(s) */
351 icv->va = rte_pktmbuf_mtod_offset(ml, void *, pdofs);
352 icv->pa = rte_pktmbuf_iova_offset(ml, pdofs);
353
354 return clen;
355 }
356
357 /*
358 * setup/update packets and crypto ops for ESP outbound transport case.
359 */
360 uint16_t
361 esp_outb_trs_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
362 struct rte_crypto_op *cop[], uint16_t num)
363 {
364 int32_t rc;
365 uint32_t i, k, n, l2, l3;
366 uint64_t sqn;
367 rte_be64_t sqc;
368 struct rte_ipsec_sa *sa;
369 struct rte_cryptodev_sym_session *cs;
370 union sym_op_data icv;
371 uint64_t iv[IPSEC_MAX_IV_QWORD];
372 uint32_t dr[num];
373
374 sa = ss->sa;
375 cs = ss->crypto.ses;
376
377 n = num;
378 sqn = esn_outb_update_sqn(sa, &n);
379 if (n != num)
380 rte_errno = EOVERFLOW;
381
382 k = 0;
383 for (i = 0; i != n; i++) {
384
385 l2 = mb[i]->l2_len;
386 l3 = mb[i]->l3_len;
387
388 sqc = rte_cpu_to_be_64(sqn + i);
389 gen_iv(iv, sqc);
390
391 /* try to update the packet itself */
392 rc = outb_trs_pkt_prepare(sa, sqc, iv, mb[i], &icv,
393 sa->sqh_len);
394 /* success, setup crypto op */
395 if (rc >= 0) {
396 outb_pkt_xprepare(sa, sqc, &icv);
397 lksd_none_cop_prepare(cop[k], cs, mb[i]);
398 outb_cop_prepare(cop[k], sa, iv, &icv, l2 + l3, rc);
399 k++;
400 /* failure, put packet into the death-row */
401 } else {
402 dr[i - k] = i;
403 rte_errno = -rc;
404 }
405 }
406
407 /* copy not prepared mbufs beyond good ones */
408 if (k != n && k != 0)
409 move_bad_mbufs(mb, dr, n, n - k);
410
411 return k;
412 }
413
414
415 static inline uint32_t
416 outb_cpu_crypto_prepare(const struct rte_ipsec_sa *sa, uint32_t *pofs,
417 uint32_t plen, void *iv)
418 {
419 uint64_t *ivp = iv;
420 struct aead_gcm_iv *gcm;
421 struct aesctr_cnt_blk *ctr;
422 uint32_t clen;
423
424 switch (sa->algo_type) {
425 case ALGO_TYPE_AES_GCM:
426 gcm = iv;
427 aead_gcm_iv_fill(gcm, ivp[0], sa->salt);
428 break;
429 case ALGO_TYPE_AES_CTR:
430 ctr = iv;
431 aes_ctr_cnt_blk_fill(ctr, ivp[0], sa->salt);
432 break;
433 }
434
435 *pofs += sa->ctp.auth.offset;
436 clen = plen + sa->ctp.auth.length;
437 return clen;
438 }
439
440 static uint16_t
441 cpu_outb_pkt_prepare(const struct rte_ipsec_session *ss,
442 struct rte_mbuf *mb[], uint16_t num,
443 esp_outb_prepare_t prepare, uint32_t cofs_mask)
444 {
445 int32_t rc;
446 uint64_t sqn;
447 rte_be64_t sqc;
448 struct rte_ipsec_sa *sa;
449 uint32_t i, k, n;
450 uint32_t l2, l3;
451 union sym_op_data icv;
452 void *iv[num];
453 void *aad[num];
454 void *dgst[num];
455 uint32_t dr[num];
456 uint32_t l4ofs[num];
457 uint32_t clen[num];
458 uint64_t ivbuf[num][IPSEC_MAX_IV_QWORD];
459
460 sa = ss->sa;
461
462 n = num;
463 sqn = esn_outb_update_sqn(sa, &n);
464 if (n != num)
465 rte_errno = EOVERFLOW;
466
467 for (i = 0, k = 0; i != n; i++) {
468
469 l2 = mb[i]->l2_len;
470 l3 = mb[i]->l3_len;
471
472 /* calculate ESP header offset */
473 l4ofs[k] = (l2 + l3) & cofs_mask;
474
475 sqc = rte_cpu_to_be_64(sqn + i);
476 gen_iv(ivbuf[k], sqc);
477
478 /* try to update the packet itself */
479 rc = prepare(sa, sqc, ivbuf[k], mb[i], &icv, sa->sqh_len);
480
481 /* success, proceed with preparations */
482 if (rc >= 0) {
483
484 outb_pkt_xprepare(sa, sqc, &icv);
485
486 /* get encrypted data offset and length */
487 clen[k] = outb_cpu_crypto_prepare(sa, l4ofs + k, rc,
488 ivbuf[k]);
489
490 /* fill iv, digest and aad */
491 iv[k] = ivbuf[k];
492 aad[k] = icv.va + sa->icv_len;
493 dgst[k++] = icv.va;
494 } else {
495 dr[i - k] = i;
496 rte_errno = -rc;
497 }
498 }
499
500 /* copy not prepared mbufs beyond good ones */
501 if (k != n && k != 0)
502 move_bad_mbufs(mb, dr, n, n - k);
503
504 /* convert mbufs to iovecs and do actual crypto/auth processing */
505 if (k != 0)
506 cpu_crypto_bulk(ss, sa->cofs, mb, iv, aad, dgst,
507 l4ofs, clen, k);
508 return k;
509 }
510
511 uint16_t
512 cpu_outb_tun_pkt_prepare(const struct rte_ipsec_session *ss,
513 struct rte_mbuf *mb[], uint16_t num)
514 {
515 return cpu_outb_pkt_prepare(ss, mb, num, outb_tun_pkt_prepare, 0);
516 }
517
518 uint16_t
519 cpu_outb_trs_pkt_prepare(const struct rte_ipsec_session *ss,
520 struct rte_mbuf *mb[], uint16_t num)
521 {
522 return cpu_outb_pkt_prepare(ss, mb, num, outb_trs_pkt_prepare,
523 UINT32_MAX);
524 }
525
526 /*
527 * process outbound packets for SA with ESN support,
528 * for algorithms that require SQN.hibits to be implictly included
529 * into digest computation.
530 * In that case we have to move ICV bytes back to their proper place.
531 */
532 uint16_t
533 esp_outb_sqh_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
534 uint16_t num)
535 {
536 uint32_t i, k, icv_len, *icv;
537 struct rte_mbuf *ml;
538 struct rte_ipsec_sa *sa;
539 uint32_t dr[num];
540
541 sa = ss->sa;
542
543 k = 0;
544 icv_len = sa->icv_len;
545
546 for (i = 0; i != num; i++) {
547 if ((mb[i]->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED) == 0) {
548 ml = rte_pktmbuf_lastseg(mb[i]);
549 /* remove high-order 32 bits of esn from packet len */
550 mb[i]->pkt_len -= sa->sqh_len;
551 ml->data_len -= sa->sqh_len;
552 icv = rte_pktmbuf_mtod_offset(ml, void *,
553 ml->data_len - icv_len);
554 remove_sqh(icv, icv_len);
555 k++;
556 } else
557 dr[i - k] = i;
558 }
559
560 /* handle unprocessed mbufs */
561 if (k != num) {
562 rte_errno = EBADMSG;
563 if (k != 0)
564 move_bad_mbufs(mb, dr, num, num - k);
565 }
566
567 return k;
568 }
569
570 /*
571 * prepare packets for inline ipsec processing:
572 * set ol_flags and attach metadata.
573 */
574 static inline void
575 inline_outb_mbuf_prepare(const struct rte_ipsec_session *ss,
576 struct rte_mbuf *mb[], uint16_t num)
577 {
578 uint32_t i, ol_flags;
579
580 ol_flags = ss->security.ol_flags & RTE_SECURITY_TX_OLOAD_NEED_MDATA;
581 for (i = 0; i != num; i++) {
582
583 mb[i]->ol_flags |= PKT_TX_SEC_OFFLOAD;
584 if (ol_flags != 0)
585 rte_security_set_pkt_metadata(ss->security.ctx,
586 ss->security.ses, mb[i], NULL);
587 }
588 }
589
590 /*
591 * process group of ESP outbound tunnel packets destined for
592 * INLINE_CRYPTO type of device.
593 */
594 uint16_t
595 inline_outb_tun_pkt_process(const struct rte_ipsec_session *ss,
596 struct rte_mbuf *mb[], uint16_t num)
597 {
598 int32_t rc;
599 uint32_t i, k, n;
600 uint64_t sqn;
601 rte_be64_t sqc;
602 struct rte_ipsec_sa *sa;
603 union sym_op_data icv;
604 uint64_t iv[IPSEC_MAX_IV_QWORD];
605 uint32_t dr[num];
606
607 sa = ss->sa;
608
609 n = num;
610 sqn = esn_outb_update_sqn(sa, &n);
611 if (n != num)
612 rte_errno = EOVERFLOW;
613
614 k = 0;
615 for (i = 0; i != n; i++) {
616
617 sqc = rte_cpu_to_be_64(sqn + i);
618 gen_iv(iv, sqc);
619
620 /* try to update the packet itself */
621 rc = outb_tun_pkt_prepare(sa, sqc, iv, mb[i], &icv, 0);
622
623 k += (rc >= 0);
624
625 /* failure, put packet into the death-row */
626 if (rc < 0) {
627 dr[i - k] = i;
628 rte_errno = -rc;
629 }
630 }
631
632 /* copy not processed mbufs beyond good ones */
633 if (k != n && k != 0)
634 move_bad_mbufs(mb, dr, n, n - k);
635
636 inline_outb_mbuf_prepare(ss, mb, k);
637 return k;
638 }
639
640 /*
641 * process group of ESP outbound transport packets destined for
642 * INLINE_CRYPTO type of device.
643 */
644 uint16_t
645 inline_outb_trs_pkt_process(const struct rte_ipsec_session *ss,
646 struct rte_mbuf *mb[], uint16_t num)
647 {
648 int32_t rc;
649 uint32_t i, k, n;
650 uint64_t sqn;
651 rte_be64_t sqc;
652 struct rte_ipsec_sa *sa;
653 union sym_op_data icv;
654 uint64_t iv[IPSEC_MAX_IV_QWORD];
655 uint32_t dr[num];
656
657 sa = ss->sa;
658
659 n = num;
660 sqn = esn_outb_update_sqn(sa, &n);
661 if (n != num)
662 rte_errno = EOVERFLOW;
663
664 k = 0;
665 for (i = 0; i != n; i++) {
666
667 sqc = rte_cpu_to_be_64(sqn + i);
668 gen_iv(iv, sqc);
669
670 /* try to update the packet itself */
671 rc = outb_trs_pkt_prepare(sa, sqc, iv, mb[i], &icv, 0);
672
673 k += (rc >= 0);
674
675 /* failure, put packet into the death-row */
676 if (rc < 0) {
677 dr[i - k] = i;
678 rte_errno = -rc;
679 }
680 }
681
682 /* copy not processed mbufs beyond good ones */
683 if (k != n && k != 0)
684 move_bad_mbufs(mb, dr, n, n - k);
685
686 inline_outb_mbuf_prepare(ss, mb, k);
687 return k;
688 }
689
690 /*
691 * outbound for RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
692 * actual processing is done by HW/PMD, just set flags and metadata.
693 */
694 uint16_t
695 inline_proto_outb_pkt_process(const struct rte_ipsec_session *ss,
696 struct rte_mbuf *mb[], uint16_t num)
697 {
698 inline_outb_mbuf_prepare(ss, mb, num);
699 return num;
700 }