X-Git-Url: https://git.proxmox.com/?a=blobdiff_plain;f=ceph%2Fsrc%2Fspdk%2Fdpdk%2Flib%2Flibrte_ipsec%2Fesp_outb.c;fp=ceph%2Fsrc%2Fspdk%2Fdpdk%2Flib%2Flibrte_ipsec%2Fesp_outb.c;h=fb9d5864c8619e552f287dc33983e5cffaeb610e;hb=f67539c23b11f3b8a2ecaeeddf7a403ae1c442a8;hp=c798bc4c499d4b3255ec0d7280c00e93b63c36af;hpb=64a4c04e6850c6d9086e4c37f57c4eada541b05e;p=ceph.git diff --git a/ceph/src/spdk/dpdk/lib/librte_ipsec/esp_outb.c b/ceph/src/spdk/dpdk/lib/librte_ipsec/esp_outb.c index c798bc4c4..fb9d5864c 100644 --- a/ceph/src/spdk/dpdk/lib/librte_ipsec/esp_outb.c +++ b/ceph/src/spdk/dpdk/lib/librte_ipsec/esp_outb.c @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018-2020 Intel Corporation */ #include @@ -15,6 +15,9 @@ #include "misc.h" #include "pad.h" +typedef int32_t (*esp_outb_prepare_t)(struct rte_ipsec_sa *sa, rte_be64_t sqc, + const uint64_t ivp[IPSEC_MAX_IV_QWORD], struct rte_mbuf *mb, + union sym_op_data *icv, uint8_t sqh_len); /* * helper function to fill crypto_sym op for cipher+auth algorithms. @@ -104,12 +107,12 @@ outb_cop_prepare(struct rte_crypto_op *cop, static inline int32_t outb_tun_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc, const uint64_t ivp[IPSEC_MAX_IV_QWORD], struct rte_mbuf *mb, - union sym_op_data *icv) + union sym_op_data *icv, uint8_t sqh_len) { uint32_t clen, hlen, l2len, pdlen, pdofs, plen, tlen; struct rte_mbuf *ml; - struct esp_hdr *esph; - struct esp_tail *espt; + struct rte_esp_hdr *esph; + struct rte_esp_tail *espt; char *ph, *pt; uint64_t *iv; @@ -126,11 +129,11 @@ outb_tun_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc, /* pad length + esp tail */ pdlen = clen - plen; - tlen = pdlen + sa->icv_len; + tlen = pdlen + sa->icv_len + sqh_len; /* do append and prepend */ ml = rte_pktmbuf_lastseg(mb); - if (tlen + sa->sqh_len + sa->aad_len > rte_pktmbuf_tailroom(ml)) + if (tlen + sa->aad_len > rte_pktmbuf_tailroom(ml)) return -ENOSPC; /* prepend header */ @@ -152,11 +155,11 @@ outb_tun_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc, rte_memcpy(ph, sa->hdr, sa->hdr_len); /* update original and new ip header fields */ - update_tun_l3hdr(sa, ph + sa->hdr_l3_off, mb->pkt_len, sa->hdr_l3_off, - sqn_low16(sqc)); + update_tun_outb_l3hdr(sa, ph + sa->hdr_l3_off, ph + hlen, + mb->pkt_len - sqh_len, sa->hdr_l3_off, sqn_low16(sqc)); /* update spi, seqn and iv */ - esph = (struct esp_hdr *)(ph + sa->hdr_len); + esph = (struct rte_esp_hdr *)(ph + sa->hdr_len); iv = (uint64_t *)(esph + 1); copy_iv(iv, ivp, sa->iv_len); @@ -173,10 +176,11 @@ outb_tun_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc, rte_memcpy(pt, esp_pad_bytes, pdlen); /* update esp trailer */ - espt = (struct esp_tail *)(pt + pdlen); + espt = (struct rte_esp_tail *)(pt + pdlen); espt->pad_len = pdlen; espt->next_proto = sa->proto; + /* set icv va/pa value(s) */ icv->va = rte_pktmbuf_mtod_offset(ml, void *, pdofs); icv->pa = rte_pktmbuf_iova_offset(ml, pdofs); @@ -242,8 +246,8 @@ esp_outb_tun_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[], gen_iv(iv, sqc); /* try to update the packet itself */ - rc = outb_tun_pkt_prepare(sa, sqc, iv, mb[i], &icv); - + rc = outb_tun_pkt_prepare(sa, sqc, iv, mb[i], &icv, + sa->sqh_len); /* success, setup crypto op */ if (rc >= 0) { outb_pkt_xprepare(sa, sqc, &icv); @@ -270,15 +274,19 @@ esp_outb_tun_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[], static inline int32_t outb_trs_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc, const uint64_t ivp[IPSEC_MAX_IV_QWORD], struct rte_mbuf *mb, - uint32_t l2len, uint32_t l3len, union sym_op_data *icv) + union sym_op_data *icv, uint8_t sqh_len) { uint8_t np; uint32_t clen, hlen, pdlen, pdofs, plen, tlen, uhlen; struct rte_mbuf *ml; - struct esp_hdr *esph; - struct esp_tail *espt; + struct rte_esp_hdr *esph; + struct rte_esp_tail *espt; char *ph, *pt; uint64_t *iv; + uint32_t l2len, l3len; + + l2len = mb->l2_len; + l3len = mb->l3_len; uhlen = l2len + l3len; plen = mb->pkt_len - uhlen; @@ -292,11 +300,11 @@ outb_trs_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc, /* pad length + esp tail */ pdlen = clen - plen; - tlen = pdlen + sa->icv_len; + tlen = pdlen + sa->icv_len + sqh_len; /* do append and insert */ ml = rte_pktmbuf_lastseg(mb); - if (tlen + sa->sqh_len + sa->aad_len > rte_pktmbuf_tailroom(ml)) + if (tlen + sa->aad_len > rte_pktmbuf_tailroom(ml)) return -ENOSPC; /* prepend space for ESP header */ @@ -314,11 +322,11 @@ outb_trs_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc, insert_esph(ph, ph + hlen, uhlen); /* update ip header fields */ - np = update_trs_l3hdr(sa, ph + l2len, mb->pkt_len, l2len, l3len, - IPPROTO_ESP); + np = update_trs_l3hdr(sa, ph + l2len, mb->pkt_len - sqh_len, l2len, + l3len, IPPROTO_ESP); /* update spi, seqn and iv */ - esph = (struct esp_hdr *)(ph + uhlen); + esph = (struct rte_esp_hdr *)(ph + uhlen); iv = (uint64_t *)(esph + 1); copy_iv(iv, ivp, sa->iv_len); @@ -335,10 +343,11 @@ outb_trs_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc, rte_memcpy(pt, esp_pad_bytes, pdlen); /* update esp trailer */ - espt = (struct esp_tail *)(pt + pdlen); + espt = (struct rte_esp_tail *)(pt + pdlen); espt->pad_len = pdlen; espt->next_proto = np; + /* set icv va/pa value(s) */ icv->va = rte_pktmbuf_mtod_offset(ml, void *, pdofs); icv->pa = rte_pktmbuf_iova_offset(ml, pdofs); @@ -380,8 +389,8 @@ esp_outb_trs_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[], gen_iv(iv, sqc); /* try to update the packet itself */ - rc = outb_trs_pkt_prepare(sa, sqc, iv, mb[i], l2, l3, &icv); - + rc = outb_trs_pkt_prepare(sa, sqc, iv, mb[i], &icv, + sa->sqh_len); /* success, setup crypto op */ if (rc >= 0) { outb_pkt_xprepare(sa, sqc, &icv); @@ -402,6 +411,118 @@ esp_outb_trs_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[], return k; } + +static inline uint32_t +outb_cpu_crypto_prepare(const struct rte_ipsec_sa *sa, uint32_t *pofs, + uint32_t plen, void *iv) +{ + uint64_t *ivp = iv; + struct aead_gcm_iv *gcm; + struct aesctr_cnt_blk *ctr; + uint32_t clen; + + switch (sa->algo_type) { + case ALGO_TYPE_AES_GCM: + gcm = iv; + aead_gcm_iv_fill(gcm, ivp[0], sa->salt); + break; + case ALGO_TYPE_AES_CTR: + ctr = iv; + aes_ctr_cnt_blk_fill(ctr, ivp[0], sa->salt); + break; + } + + *pofs += sa->ctp.auth.offset; + clen = plen + sa->ctp.auth.length; + return clen; +} + +static uint16_t +cpu_outb_pkt_prepare(const struct rte_ipsec_session *ss, + struct rte_mbuf *mb[], uint16_t num, + esp_outb_prepare_t prepare, uint32_t cofs_mask) +{ + int32_t rc; + uint64_t sqn; + rte_be64_t sqc; + struct rte_ipsec_sa *sa; + uint32_t i, k, n; + uint32_t l2, l3; + union sym_op_data icv; + void *iv[num]; + void *aad[num]; + void *dgst[num]; + uint32_t dr[num]; + uint32_t l4ofs[num]; + uint32_t clen[num]; + uint64_t ivbuf[num][IPSEC_MAX_IV_QWORD]; + + sa = ss->sa; + + n = num; + sqn = esn_outb_update_sqn(sa, &n); + if (n != num) + rte_errno = EOVERFLOW; + + for (i = 0, k = 0; i != n; i++) { + + l2 = mb[i]->l2_len; + l3 = mb[i]->l3_len; + + /* calculate ESP header offset */ + l4ofs[k] = (l2 + l3) & cofs_mask; + + sqc = rte_cpu_to_be_64(sqn + i); + gen_iv(ivbuf[k], sqc); + + /* try to update the packet itself */ + rc = prepare(sa, sqc, ivbuf[k], mb[i], &icv, sa->sqh_len); + + /* success, proceed with preparations */ + if (rc >= 0) { + + outb_pkt_xprepare(sa, sqc, &icv); + + /* get encrypted data offset and length */ + clen[k] = outb_cpu_crypto_prepare(sa, l4ofs + k, rc, + ivbuf[k]); + + /* fill iv, digest and aad */ + iv[k] = ivbuf[k]; + aad[k] = icv.va + sa->icv_len; + dgst[k++] = icv.va; + } else { + dr[i - k] = i; + rte_errno = -rc; + } + } + + /* copy not prepared mbufs beyond good ones */ + if (k != n && k != 0) + move_bad_mbufs(mb, dr, n, n - k); + + /* convert mbufs to iovecs and do actual crypto/auth processing */ + if (k != 0) + cpu_crypto_bulk(ss, sa->cofs, mb, iv, aad, dgst, + l4ofs, clen, k); + return k; +} + +uint16_t +cpu_outb_tun_pkt_prepare(const struct rte_ipsec_session *ss, + struct rte_mbuf *mb[], uint16_t num) +{ + return cpu_outb_pkt_prepare(ss, mb, num, outb_tun_pkt_prepare, 0); +} + +uint16_t +cpu_outb_trs_pkt_prepare(const struct rte_ipsec_session *ss, + struct rte_mbuf *mb[], uint16_t num) +{ + return cpu_outb_pkt_prepare(ss, mb, num, outb_trs_pkt_prepare, + UINT32_MAX); +} + /* * process outbound packets for SA with ESN support, * for algorithms that require SQN.hibits to be implictly included @@ -425,6 +546,9 @@ esp_outb_sqh_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[], for (i = 0; i != num; i++) { if ((mb[i]->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED) == 0) { ml = rte_pktmbuf_lastseg(mb[i]); + /* remove high-order 32 bits of esn from packet len */ + mb[i]->pkt_len -= sa->sqh_len; + ml->data_len -= sa->sqh_len; icv = rte_pktmbuf_mtod_offset(ml, void *, ml->data_len - icv_len); remove_sqh(icv, icv_len); @@ -494,7 +618,7 @@ inline_outb_tun_pkt_process(const struct rte_ipsec_session *ss, gen_iv(iv, sqc); /* try to update the packet itself */ - rc = outb_tun_pkt_prepare(sa, sqc, iv, mb[i], &icv); + rc = outb_tun_pkt_prepare(sa, sqc, iv, mb[i], &icv, 0); k += (rc >= 0); @@ -522,7 +646,7 @@ inline_outb_trs_pkt_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[], uint16_t num) { int32_t rc; - uint32_t i, k, n, l2, l3; + uint32_t i, k, n; uint64_t sqn; rte_be64_t sqc; struct rte_ipsec_sa *sa; @@ -540,15 +664,11 @@ inline_outb_trs_pkt_process(const struct rte_ipsec_session *ss, k = 0; for (i = 0; i != n; i++) { - l2 = mb[i]->l2_len; - l3 = mb[i]->l3_len; - sqc = rte_cpu_to_be_64(sqn + i); gen_iv(iv, sqc); /* try to update the packet itself */ - rc = outb_trs_pkt_prepare(sa, sqc, iv, mb[i], - l2, l3, &icv); + rc = outb_trs_pkt_prepare(sa, sqc, iv, mb[i], &icv, 0); k += (rc >= 0);