]> git.proxmox.com Git - ceph.git/blob - ceph/src/seastar/dpdk/lib/librte_ipsec/esp_inb.c
import 15.2.0 Octopus source
[ceph.git] / ceph / src / seastar / dpdk / lib / librte_ipsec / esp_inb.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
3 */
4
5 #include <rte_ipsec.h>
6 #include <rte_esp.h>
7 #include <rte_ip.h>
8 #include <rte_errno.h>
9 #include <rte_cryptodev.h>
10
11 #include "sa.h"
12 #include "ipsec_sqn.h"
13 #include "crypto.h"
14 #include "iph.h"
15 #include "misc.h"
16 #include "pad.h"
17
18 typedef uint16_t (*esp_inb_process_t)(const struct rte_ipsec_sa *sa,
19 struct rte_mbuf *mb[], uint32_t sqn[], uint32_t dr[], uint16_t num);
20
21 /*
22 * helper function to fill crypto_sym op for cipher+auth algorithms.
23 * used by inb_cop_prepare(), see below.
24 */
25 static inline void
26 sop_ciph_auth_prepare(struct rte_crypto_sym_op *sop,
27 const struct rte_ipsec_sa *sa, const union sym_op_data *icv,
28 uint32_t pofs, uint32_t plen)
29 {
30 sop->cipher.data.offset = pofs + sa->ctp.cipher.offset;
31 sop->cipher.data.length = plen - sa->ctp.cipher.length;
32 sop->auth.data.offset = pofs + sa->ctp.auth.offset;
33 sop->auth.data.length = plen - sa->ctp.auth.length;
34 sop->auth.digest.data = icv->va;
35 sop->auth.digest.phys_addr = icv->pa;
36 }
37
38 /*
39 * helper function to fill crypto_sym op for aead algorithms
40 * used by inb_cop_prepare(), see below.
41 */
42 static inline void
43 sop_aead_prepare(struct rte_crypto_sym_op *sop,
44 const struct rte_ipsec_sa *sa, const union sym_op_data *icv,
45 uint32_t pofs, uint32_t plen)
46 {
47 sop->aead.data.offset = pofs + sa->ctp.cipher.offset;
48 sop->aead.data.length = plen - sa->ctp.cipher.length;
49 sop->aead.digest.data = icv->va;
50 sop->aead.digest.phys_addr = icv->pa;
51 sop->aead.aad.data = icv->va + sa->icv_len;
52 sop->aead.aad.phys_addr = icv->pa + sa->icv_len;
53 }
54
55 /*
56 * setup crypto op and crypto sym op for ESP inbound packet.
57 */
58 static inline void
59 inb_cop_prepare(struct rte_crypto_op *cop,
60 const struct rte_ipsec_sa *sa, struct rte_mbuf *mb,
61 const union sym_op_data *icv, uint32_t pofs, uint32_t plen)
62 {
63 struct rte_crypto_sym_op *sop;
64 struct aead_gcm_iv *gcm;
65 struct aesctr_cnt_blk *ctr;
66 uint64_t *ivc, *ivp;
67 uint32_t algo;
68
69 algo = sa->algo_type;
70 ivp = rte_pktmbuf_mtod_offset(mb, uint64_t *,
71 pofs + sizeof(struct esp_hdr));
72
73 /* fill sym op fields */
74 sop = cop->sym;
75
76 switch (algo) {
77 case ALGO_TYPE_AES_GCM:
78 sop_aead_prepare(sop, sa, icv, pofs, plen);
79
80 /* fill AAD IV (located inside crypto op) */
81 gcm = rte_crypto_op_ctod_offset(cop, struct aead_gcm_iv *,
82 sa->iv_ofs);
83 aead_gcm_iv_fill(gcm, ivp[0], sa->salt);
84 break;
85 case ALGO_TYPE_AES_CBC:
86 case ALGO_TYPE_3DES_CBC:
87 sop_ciph_auth_prepare(sop, sa, icv, pofs, plen);
88
89 /* copy iv from the input packet to the cop */
90 ivc = rte_crypto_op_ctod_offset(cop, uint64_t *, sa->iv_ofs);
91 copy_iv(ivc, ivp, sa->iv_len);
92 break;
93 case ALGO_TYPE_AES_CTR:
94 sop_ciph_auth_prepare(sop, sa, icv, pofs, plen);
95
96 /* fill CTR block (located inside crypto op) */
97 ctr = rte_crypto_op_ctod_offset(cop, struct aesctr_cnt_blk *,
98 sa->iv_ofs);
99 aes_ctr_cnt_blk_fill(ctr, ivp[0], sa->salt);
100 break;
101 case ALGO_TYPE_NULL:
102 sop_ciph_auth_prepare(sop, sa, icv, pofs, plen);
103 break;
104 }
105 }
106
107 /*
108 * for pure cryptodev (lookaside none) depending on SA settings,
109 * we might have to write some extra data to the packet.
110 */
111 static inline void
112 inb_pkt_xprepare(const struct rte_ipsec_sa *sa, rte_be64_t sqc,
113 const union sym_op_data *icv)
114 {
115 struct aead_gcm_aad *aad;
116
117 /* insert SQN.hi between ESP trailer and ICV */
118 if (sa->sqh_len != 0)
119 insert_sqh(sqn_hi32(sqc), icv->va, sa->icv_len);
120
121 /*
122 * fill AAD fields, if any (aad fields are placed after icv),
123 * right now we support only one AEAD algorithm: AES-GCM.
124 */
125 if (sa->aad_len != 0) {
126 aad = (struct aead_gcm_aad *)(icv->va + sa->icv_len);
127 aead_gcm_aad_fill(aad, sa->spi, sqc, IS_ESN(sa));
128 }
129 }
130
131 /*
132 * setup/update packet data and metadata for ESP inbound tunnel case.
133 */
134 static inline int32_t
135 inb_pkt_prepare(const struct rte_ipsec_sa *sa, const struct replay_sqn *rsn,
136 struct rte_mbuf *mb, uint32_t hlen, union sym_op_data *icv)
137 {
138 int32_t rc;
139 uint64_t sqn;
140 uint32_t clen, icv_ofs, plen;
141 struct rte_mbuf *ml;
142 struct esp_hdr *esph;
143
144 esph = rte_pktmbuf_mtod_offset(mb, struct esp_hdr *, hlen);
145
146 /*
147 * retrieve and reconstruct SQN, then check it, then
148 * convert it back into network byte order.
149 */
150 sqn = rte_be_to_cpu_32(esph->seq);
151 if (IS_ESN(sa))
152 sqn = reconstruct_esn(rsn->sqn, sqn, sa->replay.win_sz);
153
154 rc = esn_inb_check_sqn(rsn, sa, sqn);
155 if (rc != 0)
156 return rc;
157
158 sqn = rte_cpu_to_be_64(sqn);
159
160 /* start packet manipulation */
161 plen = mb->pkt_len;
162 plen = plen - hlen;
163
164 ml = rte_pktmbuf_lastseg(mb);
165 icv_ofs = ml->data_len - sa->icv_len + sa->sqh_len;
166
167 /* check that packet has a valid length */
168 clen = plen - sa->ctp.cipher.length;
169 if ((int32_t)clen < 0 || (clen & (sa->pad_align - 1)) != 0)
170 return -EBADMSG;
171
172 /* we have to allocate space for AAD somewhere,
173 * right now - just use free trailing space at the last segment.
174 * Would probably be more convenient to reserve space for AAD
175 * inside rte_crypto_op itself
176 * (again for IV space is already reserved inside cop).
177 */
178 if (sa->aad_len + sa->sqh_len > rte_pktmbuf_tailroom(ml))
179 return -ENOSPC;
180
181 icv->va = rte_pktmbuf_mtod_offset(ml, void *, icv_ofs);
182 icv->pa = rte_pktmbuf_iova_offset(ml, icv_ofs);
183
184 inb_pkt_xprepare(sa, sqn, icv);
185 return plen;
186 }
187
188 /*
189 * setup/update packets and crypto ops for ESP inbound case.
190 */
191 uint16_t
192 esp_inb_pkt_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
193 struct rte_crypto_op *cop[], uint16_t num)
194 {
195 int32_t rc;
196 uint32_t i, k, hl;
197 struct rte_ipsec_sa *sa;
198 struct rte_cryptodev_sym_session *cs;
199 struct replay_sqn *rsn;
200 union sym_op_data icv;
201 uint32_t dr[num];
202
203 sa = ss->sa;
204 cs = ss->crypto.ses;
205 rsn = rsn_acquire(sa);
206
207 k = 0;
208 for (i = 0; i != num; i++) {
209
210 hl = mb[i]->l2_len + mb[i]->l3_len;
211 rc = inb_pkt_prepare(sa, rsn, mb[i], hl, &icv);
212 if (rc >= 0) {
213 lksd_none_cop_prepare(cop[k], cs, mb[i]);
214 inb_cop_prepare(cop[k], sa, mb[i], &icv, hl, rc);
215 k++;
216 } else
217 dr[i - k] = i;
218 }
219
220 rsn_release(sa, rsn);
221
222 /* copy not prepared mbufs beyond good ones */
223 if (k != num && k != 0) {
224 move_bad_mbufs(mb, dr, num, num - k);
225 rte_errno = EBADMSG;
226 }
227
228 return k;
229 }
230
231 /*
232 * Start with processing inbound packet.
233 * This is common part for both tunnel and transport mode.
234 * Extract information that will be needed later from mbuf metadata and
235 * actual packet data:
236 * - mbuf for packet's last segment
237 * - length of the L2/L3 headers
238 * - esp tail structure
239 */
240 static inline void
241 process_step1(struct rte_mbuf *mb, uint32_t tlen, struct rte_mbuf **ml,
242 struct esp_tail *espt, uint32_t *hlen)
243 {
244 const struct esp_tail *pt;
245
246 ml[0] = rte_pktmbuf_lastseg(mb);
247 hlen[0] = mb->l2_len + mb->l3_len;
248 pt = rte_pktmbuf_mtod_offset(ml[0], const struct esp_tail *,
249 ml[0]->data_len - tlen);
250 espt[0] = pt[0];
251 }
252
253 /*
254 * packet checks for transport mode:
255 * - no reported IPsec related failures in ol_flags
256 * - tail length is valid
257 * - padding bytes are valid
258 */
259 static inline int32_t
260 trs_process_check(const struct rte_mbuf *mb, const struct rte_mbuf *ml,
261 struct esp_tail espt, uint32_t hlen, uint32_t tlen)
262 {
263 const uint8_t *pd;
264 int32_t ofs;
265
266 ofs = ml->data_len - tlen;
267 pd = rte_pktmbuf_mtod_offset(ml, const uint8_t *, ofs);
268
269 return ((mb->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED) != 0 ||
270 ofs < 0 || tlen + hlen > mb->pkt_len ||
271 (espt.pad_len != 0 && memcmp(pd, esp_pad_bytes, espt.pad_len)));
272 }
273
274 /*
275 * packet checks for tunnel mode:
276 * - same as for trasnport mode
277 * - esp tail next proto contains expected for that SA value
278 */
279 static inline int32_t
280 tun_process_check(const struct rte_mbuf *mb, struct rte_mbuf *ml,
281 struct esp_tail espt, uint32_t hlen, const uint32_t tlen, uint8_t proto)
282 {
283 return (trs_process_check(mb, ml, espt, hlen, tlen) ||
284 espt.next_proto != proto);
285 }
286
287 /*
288 * step two for tunnel mode:
289 * - read SQN value (for future use)
290 * - cut of ICV, ESP tail and padding bytes
291 * - cut of ESP header and IV, also if needed - L2/L3 headers
292 * (controlled by *adj* value)
293 */
294 static inline void *
295 tun_process_step2(struct rte_mbuf *mb, struct rte_mbuf *ml, uint32_t hlen,
296 uint32_t adj, uint32_t tlen, uint32_t *sqn)
297 {
298 const struct esp_hdr *ph;
299
300 /* read SQN value */
301 ph = rte_pktmbuf_mtod_offset(mb, const struct esp_hdr *, hlen);
302 sqn[0] = ph->seq;
303
304 /* cut of ICV, ESP tail and padding bytes */
305 ml->data_len -= tlen;
306 mb->pkt_len -= tlen;
307
308 /* cut of L2/L3 headers, ESP header and IV */
309 return rte_pktmbuf_adj(mb, adj);
310 }
311
312 /*
313 * step two for transport mode:
314 * - read SQN value (for future use)
315 * - cut of ICV, ESP tail and padding bytes
316 * - cut of ESP header and IV
317 * - move L2/L3 header to fill the gap after ESP header removal
318 */
319 static inline void *
320 trs_process_step2(struct rte_mbuf *mb, struct rte_mbuf *ml, uint32_t hlen,
321 uint32_t adj, uint32_t tlen, uint32_t *sqn)
322 {
323 char *np, *op;
324
325 /* get start of the packet before modifications */
326 op = rte_pktmbuf_mtod(mb, char *);
327
328 /* cut off ESP header and IV */
329 np = tun_process_step2(mb, ml, hlen, adj, tlen, sqn);
330
331 /* move header bytes to fill the gap after ESP header removal */
332 remove_esph(np, op, hlen);
333 return np;
334 }
335
336 /*
337 * step three for transport mode:
338 * update mbuf metadata:
339 * - packet_type
340 * - ol_flags
341 */
342 static inline void
343 trs_process_step3(struct rte_mbuf *mb)
344 {
345 /* reset mbuf packet type */
346 mb->packet_type &= (RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK);
347
348 /* clear the PKT_RX_SEC_OFFLOAD flag if set */
349 mb->ol_flags &= ~PKT_RX_SEC_OFFLOAD;
350 }
351
352 /*
353 * step three for tunnel mode:
354 * update mbuf metadata:
355 * - packet_type
356 * - ol_flags
357 * - tx_offload
358 */
359 static inline void
360 tun_process_step3(struct rte_mbuf *mb, uint64_t txof_msk, uint64_t txof_val)
361 {
362 /* reset mbuf metatdata: L2/L3 len, packet type */
363 mb->packet_type = RTE_PTYPE_UNKNOWN;
364 mb->tx_offload = (mb->tx_offload & txof_msk) | txof_val;
365
366 /* clear the PKT_RX_SEC_OFFLOAD flag if set */
367 mb->ol_flags &= ~PKT_RX_SEC_OFFLOAD;
368 }
369
370
371 /*
372 * *process* function for tunnel packets
373 */
374 static inline uint16_t
375 tun_process(const struct rte_ipsec_sa *sa, struct rte_mbuf *mb[],
376 uint32_t sqn[], uint32_t dr[], uint16_t num)
377 {
378 uint32_t adj, i, k, tl;
379 uint32_t hl[num];
380 struct esp_tail espt[num];
381 struct rte_mbuf *ml[num];
382
383 const uint32_t tlen = sa->icv_len + sizeof(espt[0]);
384 const uint32_t cofs = sa->ctp.cipher.offset;
385
386 /*
387 * to minimize stalls due to load latency,
388 * read mbufs metadata and esp tail first.
389 */
390 for (i = 0; i != num; i++)
391 process_step1(mb[i], tlen, &ml[i], &espt[i], &hl[i]);
392
393 k = 0;
394 for (i = 0; i != num; i++) {
395
396 adj = hl[i] + cofs;
397 tl = tlen + espt[i].pad_len;
398
399 /* check that packet is valid */
400 if (tun_process_check(mb[i], ml[i], espt[i], adj, tl,
401 sa->proto) == 0) {
402
403 /* modify packet's layout */
404 tun_process_step2(mb[i], ml[i], hl[i], adj,
405 tl, sqn + k);
406 /* update mbuf's metadata */
407 tun_process_step3(mb[i], sa->tx_offload.msk,
408 sa->tx_offload.val);
409 k++;
410 } else
411 dr[i - k] = i;
412 }
413
414 return k;
415 }
416
417
418 /*
419 * *process* function for tunnel packets
420 */
421 static inline uint16_t
422 trs_process(const struct rte_ipsec_sa *sa, struct rte_mbuf *mb[],
423 uint32_t sqn[], uint32_t dr[], uint16_t num)
424 {
425 char *np;
426 uint32_t i, k, l2, tl;
427 uint32_t hl[num];
428 struct esp_tail espt[num];
429 struct rte_mbuf *ml[num];
430
431 const uint32_t tlen = sa->icv_len + sizeof(espt[0]);
432 const uint32_t cofs = sa->ctp.cipher.offset;
433
434 /*
435 * to minimize stalls due to load latency,
436 * read mbufs metadata and esp tail first.
437 */
438 for (i = 0; i != num; i++)
439 process_step1(mb[i], tlen, &ml[i], &espt[i], &hl[i]);
440
441 k = 0;
442 for (i = 0; i != num; i++) {
443
444 tl = tlen + espt[i].pad_len;
445 l2 = mb[i]->l2_len;
446
447 /* check that packet is valid */
448 if (trs_process_check(mb[i], ml[i], espt[i], hl[i] + cofs,
449 tl) == 0) {
450
451 /* modify packet's layout */
452 np = trs_process_step2(mb[i], ml[i], hl[i], cofs, tl,
453 sqn + k);
454 update_trs_l3hdr(sa, np + l2, mb[i]->pkt_len,
455 l2, hl[i] - l2, espt[i].next_proto);
456
457 /* update mbuf's metadata */
458 trs_process_step3(mb[i]);
459 k++;
460 } else
461 dr[i - k] = i;
462 }
463
464 return k;
465 }
466
467 /*
468 * for group of ESP inbound packets perform SQN check and update.
469 */
470 static inline uint16_t
471 esp_inb_rsn_update(struct rte_ipsec_sa *sa, const uint32_t sqn[],
472 uint32_t dr[], uint16_t num)
473 {
474 uint32_t i, k;
475 struct replay_sqn *rsn;
476
477 /* replay not enabled */
478 if (sa->replay.win_sz == 0)
479 return num;
480
481 rsn = rsn_update_start(sa);
482
483 k = 0;
484 for (i = 0; i != num; i++) {
485 if (esn_inb_update_sqn(rsn, sa, rte_be_to_cpu_32(sqn[i])) == 0)
486 k++;
487 else
488 dr[i - k] = i;
489 }
490
491 rsn_update_finish(sa, rsn);
492 return k;
493 }
494
495 /*
496 * process group of ESP inbound packets.
497 */
498 static inline uint16_t
499 esp_inb_pkt_process(const struct rte_ipsec_session *ss,
500 struct rte_mbuf *mb[], uint16_t num, esp_inb_process_t process)
501 {
502 uint32_t k, n;
503 struct rte_ipsec_sa *sa;
504 uint32_t sqn[num];
505 uint32_t dr[num];
506
507 sa = ss->sa;
508
509 /* process packets, extract seq numbers */
510 k = process(sa, mb, sqn, dr, num);
511
512 /* handle unprocessed mbufs */
513 if (k != num && k != 0)
514 move_bad_mbufs(mb, dr, num, num - k);
515
516 /* update SQN and replay winow */
517 n = esp_inb_rsn_update(sa, sqn, dr, k);
518
519 /* handle mbufs with wrong SQN */
520 if (n != k && n != 0)
521 move_bad_mbufs(mb, dr, k, k - n);
522
523 if (n != num)
524 rte_errno = EBADMSG;
525
526 return n;
527 }
528
529 /*
530 * process group of ESP inbound tunnel packets.
531 */
532 uint16_t
533 esp_inb_tun_pkt_process(const struct rte_ipsec_session *ss,
534 struct rte_mbuf *mb[], uint16_t num)
535 {
536 return esp_inb_pkt_process(ss, mb, num, tun_process);
537 }
538
539 /*
540 * process group of ESP inbound transport packets.
541 */
542 uint16_t
543 esp_inb_trs_pkt_process(const struct rte_ipsec_session *ss,
544 struct rte_mbuf *mb[], uint16_t num)
545 {
546 return esp_inb_pkt_process(ss, mb, num, trs_process);
547 }