]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/dpdk/examples/ipsec-secgw/esp.c
update sources to ceph Nautilus 14.2.1
[ceph.git] / ceph / src / spdk / dpdk / examples / ipsec-secgw / esp.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2017 Intel Corporation
3 */
4
5 #include <stdint.h>
6 #include <stdlib.h>
7 #include <sys/types.h>
8 #include <sys/stat.h>
9 #include <netinet/in.h>
10 #include <netinet/ip.h>
11 #include <netinet/ip6.h>
12 #include <fcntl.h>
13 #include <unistd.h>
14
15 #include <rte_common.h>
16 #include <rte_crypto.h>
17 #include <rte_cryptodev.h>
18 #include <rte_random.h>
19
20 #include "ipsec.h"
21 #include "esp.h"
22 #include "ipip.h"
23
24 int
25 esp_inbound(struct rte_mbuf *m, struct ipsec_sa *sa,
26 struct rte_crypto_op *cop)
27 {
28 struct ip *ip4;
29 struct rte_crypto_sym_op *sym_cop;
30 int32_t payload_len, ip_hdr_len;
31
32 RTE_ASSERT(sa != NULL);
33 if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO)
34 return 0;
35
36 RTE_ASSERT(m != NULL);
37 RTE_ASSERT(cop != NULL);
38
39 ip4 = rte_pktmbuf_mtod(m, struct ip *);
40 if (likely(ip4->ip_v == IPVERSION))
41 ip_hdr_len = ip4->ip_hl * 4;
42 else if (ip4->ip_v == IP6_VERSION)
43 /* XXX No option headers supported */
44 ip_hdr_len = sizeof(struct ip6_hdr);
45 else {
46 RTE_LOG(ERR, IPSEC_ESP, "invalid IP packet type %d\n",
47 ip4->ip_v);
48 return -EINVAL;
49 }
50
51 payload_len = rte_pktmbuf_pkt_len(m) - ip_hdr_len -
52 sizeof(struct esp_hdr) - sa->iv_len - sa->digest_len;
53
54 if ((payload_len & (sa->block_size - 1)) || (payload_len <= 0)) {
55 RTE_LOG_DP(DEBUG, IPSEC_ESP, "payload %d not multiple of %u\n",
56 payload_len, sa->block_size);
57 return -EINVAL;
58 }
59
60 sym_cop = get_sym_cop(cop);
61 sym_cop->m_src = m;
62
63 if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) {
64 sym_cop->aead.data.offset = ip_hdr_len + sizeof(struct esp_hdr) +
65 sa->iv_len;
66 sym_cop->aead.data.length = payload_len;
67
68 struct cnt_blk *icb;
69 uint8_t *aad;
70 uint8_t *iv = RTE_PTR_ADD(ip4, ip_hdr_len + sizeof(struct esp_hdr));
71
72 icb = get_cnt_blk(m);
73 icb->salt = sa->salt;
74 memcpy(&icb->iv, iv, 8);
75 icb->cnt = rte_cpu_to_be_32(1);
76
77 aad = get_aad(m);
78 memcpy(aad, iv - sizeof(struct esp_hdr), 8);
79 sym_cop->aead.aad.data = aad;
80 sym_cop->aead.aad.phys_addr = rte_pktmbuf_iova_offset(m,
81 aad - rte_pktmbuf_mtod(m, uint8_t *));
82
83 sym_cop->aead.digest.data = rte_pktmbuf_mtod_offset(m, void*,
84 rte_pktmbuf_pkt_len(m) - sa->digest_len);
85 sym_cop->aead.digest.phys_addr = rte_pktmbuf_iova_offset(m,
86 rte_pktmbuf_pkt_len(m) - sa->digest_len);
87 } else {
88 sym_cop->cipher.data.offset = ip_hdr_len + sizeof(struct esp_hdr) +
89 sa->iv_len;
90 sym_cop->cipher.data.length = payload_len;
91
92 struct cnt_blk *icb;
93 uint8_t *iv = RTE_PTR_ADD(ip4, ip_hdr_len + sizeof(struct esp_hdr));
94 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(cop,
95 uint8_t *, IV_OFFSET);
96
97 switch (sa->cipher_algo) {
98 case RTE_CRYPTO_CIPHER_NULL:
99 case RTE_CRYPTO_CIPHER_AES_CBC:
100 /* Copy IV at the end of crypto operation */
101 rte_memcpy(iv_ptr, iv, sa->iv_len);
102 break;
103 case RTE_CRYPTO_CIPHER_AES_CTR:
104 icb = get_cnt_blk(m);
105 icb->salt = sa->salt;
106 memcpy(&icb->iv, iv, 8);
107 icb->cnt = rte_cpu_to_be_32(1);
108 break;
109 default:
110 RTE_LOG(ERR, IPSEC_ESP, "unsupported cipher algorithm %u\n",
111 sa->cipher_algo);
112 return -EINVAL;
113 }
114
115 switch (sa->auth_algo) {
116 case RTE_CRYPTO_AUTH_NULL:
117 case RTE_CRYPTO_AUTH_SHA1_HMAC:
118 case RTE_CRYPTO_AUTH_SHA256_HMAC:
119 sym_cop->auth.data.offset = ip_hdr_len;
120 sym_cop->auth.data.length = sizeof(struct esp_hdr) +
121 sa->iv_len + payload_len;
122 break;
123 default:
124 RTE_LOG(ERR, IPSEC_ESP, "unsupported auth algorithm %u\n",
125 sa->auth_algo);
126 return -EINVAL;
127 }
128
129 sym_cop->auth.digest.data = rte_pktmbuf_mtod_offset(m, void*,
130 rte_pktmbuf_pkt_len(m) - sa->digest_len);
131 sym_cop->auth.digest.phys_addr = rte_pktmbuf_iova_offset(m,
132 rte_pktmbuf_pkt_len(m) - sa->digest_len);
133 }
134
135 return 0;
136 }
137
138 int
139 esp_inbound_post(struct rte_mbuf *m, struct ipsec_sa *sa,
140 struct rte_crypto_op *cop)
141 {
142 struct ip *ip4, *ip;
143 struct ip6_hdr *ip6;
144 uint8_t *nexthdr, *pad_len;
145 uint8_t *padding;
146 uint16_t i;
147
148 RTE_ASSERT(m != NULL);
149 RTE_ASSERT(sa != NULL);
150 RTE_ASSERT(cop != NULL);
151
152 if ((sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) ||
153 (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO)) {
154 if (m->ol_flags & PKT_RX_SEC_OFFLOAD) {
155 if (m->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED)
156 cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
157 else
158 cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
159 } else
160 cop->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
161 }
162
163 if (cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS) {
164 RTE_LOG(ERR, IPSEC_ESP, "failed crypto op\n");
165 return -1;
166 }
167
168 if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO &&
169 sa->ol_flags & RTE_SECURITY_RX_HW_TRAILER_OFFLOAD) {
170 nexthdr = &m->inner_esp_next_proto;
171 } else {
172 nexthdr = rte_pktmbuf_mtod_offset(m, uint8_t*,
173 rte_pktmbuf_pkt_len(m) - sa->digest_len - 1);
174 pad_len = nexthdr - 1;
175
176 padding = pad_len - *pad_len;
177 for (i = 0; i < *pad_len; i++) {
178 if (padding[i] != i + 1) {
179 RTE_LOG(ERR, IPSEC_ESP, "invalid padding\n");
180 return -EINVAL;
181 }
182 }
183
184 if (rte_pktmbuf_trim(m, *pad_len + 2 + sa->digest_len)) {
185 RTE_LOG(ERR, IPSEC_ESP,
186 "failed to remove pad_len + digest\n");
187 return -EINVAL;
188 }
189 }
190
191 if (unlikely(sa->flags == TRANSPORT)) {
192 ip = rte_pktmbuf_mtod(m, struct ip *);
193 ip4 = (struct ip *)rte_pktmbuf_adj(m,
194 sizeof(struct esp_hdr) + sa->iv_len);
195 if (likely(ip->ip_v == IPVERSION)) {
196 memmove(ip4, ip, ip->ip_hl * 4);
197 ip4->ip_p = *nexthdr;
198 ip4->ip_len = htons(rte_pktmbuf_data_len(m));
199 } else {
200 ip6 = (struct ip6_hdr *)ip4;
201 /* XXX No option headers supported */
202 memmove(ip6, ip, sizeof(struct ip6_hdr));
203 ip6->ip6_nxt = *nexthdr;
204 ip6->ip6_plen = htons(rte_pktmbuf_data_len(m) -
205 sizeof(struct ip6_hdr));
206 }
207 } else
208 ipip_inbound(m, sizeof(struct esp_hdr) + sa->iv_len);
209
210 return 0;
211 }
212
213 int
214 esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa,
215 struct rte_crypto_op *cop)
216 {
217 struct ip *ip4;
218 struct ip6_hdr *ip6;
219 struct esp_hdr *esp = NULL;
220 uint8_t *padding = NULL, *new_ip, nlp;
221 struct rte_crypto_sym_op *sym_cop;
222 int32_t i;
223 uint16_t pad_payload_len, pad_len, ip_hdr_len;
224
225 RTE_ASSERT(m != NULL);
226 RTE_ASSERT(sa != NULL);
227
228 ip_hdr_len = 0;
229
230 ip4 = rte_pktmbuf_mtod(m, struct ip *);
231 if (likely(ip4->ip_v == IPVERSION)) {
232 if (unlikely(sa->flags == TRANSPORT)) {
233 ip_hdr_len = ip4->ip_hl * 4;
234 nlp = ip4->ip_p;
235 } else
236 nlp = IPPROTO_IPIP;
237 } else if (ip4->ip_v == IP6_VERSION) {
238 if (unlikely(sa->flags == TRANSPORT)) {
239 /* XXX No option headers supported */
240 ip_hdr_len = sizeof(struct ip6_hdr);
241 ip6 = (struct ip6_hdr *)ip4;
242 nlp = ip6->ip6_nxt;
243 } else
244 nlp = IPPROTO_IPV6;
245 } else {
246 RTE_LOG(ERR, IPSEC_ESP, "invalid IP packet type %d\n",
247 ip4->ip_v);
248 return -EINVAL;
249 }
250
251 /* Padded payload length */
252 pad_payload_len = RTE_ALIGN_CEIL(rte_pktmbuf_pkt_len(m) -
253 ip_hdr_len + 2, sa->block_size);
254 pad_len = pad_payload_len + ip_hdr_len - rte_pktmbuf_pkt_len(m);
255
256 RTE_ASSERT(sa->flags == IP4_TUNNEL || sa->flags == IP6_TUNNEL ||
257 sa->flags == TRANSPORT);
258
259 if (likely(sa->flags == IP4_TUNNEL))
260 ip_hdr_len = sizeof(struct ip);
261 else if (sa->flags == IP6_TUNNEL)
262 ip_hdr_len = sizeof(struct ip6_hdr);
263 else if (sa->flags != TRANSPORT) {
264 RTE_LOG(ERR, IPSEC_ESP, "Unsupported SA flags: 0x%x\n",
265 sa->flags);
266 return -EINVAL;
267 }
268
269 /* Check maximum packet size */
270 if (unlikely(ip_hdr_len + sizeof(struct esp_hdr) + sa->iv_len +
271 pad_payload_len + sa->digest_len > IP_MAXPACKET)) {
272 RTE_LOG(ERR, IPSEC_ESP, "ipsec packet is too big\n");
273 return -EINVAL;
274 }
275
276 /* Add trailer padding if it is not constructed by HW */
277 if (sa->type != RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
278 (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO &&
279 !(sa->ol_flags & RTE_SECURITY_TX_HW_TRAILER_OFFLOAD))) {
280 padding = (uint8_t *)rte_pktmbuf_append(m, pad_len +
281 sa->digest_len);
282 if (unlikely(padding == NULL)) {
283 RTE_LOG(ERR, IPSEC_ESP,
284 "not enough mbuf trailing space\n");
285 return -ENOSPC;
286 }
287 rte_prefetch0(padding);
288 }
289
290 switch (sa->flags) {
291 case IP4_TUNNEL:
292 ip4 = ip4ip_outbound(m, sizeof(struct esp_hdr) + sa->iv_len,
293 &sa->src, &sa->dst);
294 esp = (struct esp_hdr *)(ip4 + 1);
295 break;
296 case IP6_TUNNEL:
297 ip6 = ip6ip_outbound(m, sizeof(struct esp_hdr) + sa->iv_len,
298 &sa->src, &sa->dst);
299 esp = (struct esp_hdr *)(ip6 + 1);
300 break;
301 case TRANSPORT:
302 new_ip = (uint8_t *)rte_pktmbuf_prepend(m,
303 sizeof(struct esp_hdr) + sa->iv_len);
304 memmove(new_ip, ip4, ip_hdr_len);
305 esp = (struct esp_hdr *)(new_ip + ip_hdr_len);
306 ip4 = (struct ip *)new_ip;
307 if (likely(ip4->ip_v == IPVERSION)) {
308 ip4->ip_p = IPPROTO_ESP;
309 ip4->ip_len = htons(rte_pktmbuf_data_len(m));
310 } else {
311 ip6 = (struct ip6_hdr *)new_ip;
312 ip6->ip6_nxt = IPPROTO_ESP;
313 ip6->ip6_plen = htons(rte_pktmbuf_data_len(m) -
314 sizeof(struct ip6_hdr));
315 }
316 }
317
318 sa->seq++;
319 esp->spi = rte_cpu_to_be_32(sa->spi);
320 esp->seq = rte_cpu_to_be_32((uint32_t)sa->seq);
321
322 /* set iv */
323 uint64_t *iv = (uint64_t *)(esp + 1);
324 if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) {
325 *iv = rte_cpu_to_be_64(sa->seq);
326 } else {
327 switch (sa->cipher_algo) {
328 case RTE_CRYPTO_CIPHER_NULL:
329 case RTE_CRYPTO_CIPHER_AES_CBC:
330 memset(iv, 0, sa->iv_len);
331 break;
332 case RTE_CRYPTO_CIPHER_AES_CTR:
333 *iv = rte_cpu_to_be_64(sa->seq);
334 break;
335 default:
336 RTE_LOG(ERR, IPSEC_ESP,
337 "unsupported cipher algorithm %u\n",
338 sa->cipher_algo);
339 return -EINVAL;
340 }
341 }
342
343 if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
344 if (sa->ol_flags & RTE_SECURITY_TX_HW_TRAILER_OFFLOAD) {
345 /* Set the inner esp next protocol for HW trailer */
346 m->inner_esp_next_proto = nlp;
347 m->packet_type |= RTE_PTYPE_TUNNEL_ESP;
348 } else {
349 padding[pad_len - 2] = pad_len - 2;
350 padding[pad_len - 1] = nlp;
351 }
352 goto done;
353 }
354
355 RTE_ASSERT(cop != NULL);
356 sym_cop = get_sym_cop(cop);
357 sym_cop->m_src = m;
358
359 if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) {
360 uint8_t *aad;
361
362 sym_cop->aead.data.offset = ip_hdr_len +
363 sizeof(struct esp_hdr) + sa->iv_len;
364 sym_cop->aead.data.length = pad_payload_len;
365
366 /* Fill pad_len using default sequential scheme */
367 for (i = 0; i < pad_len - 2; i++)
368 padding[i] = i + 1;
369 padding[pad_len - 2] = pad_len - 2;
370 padding[pad_len - 1] = nlp;
371
372 struct cnt_blk *icb = get_cnt_blk(m);
373 icb->salt = sa->salt;
374 icb->iv = rte_cpu_to_be_64(sa->seq);
375 icb->cnt = rte_cpu_to_be_32(1);
376
377 aad = get_aad(m);
378 memcpy(aad, esp, 8);
379 sym_cop->aead.aad.data = aad;
380 sym_cop->aead.aad.phys_addr = rte_pktmbuf_iova_offset(m,
381 aad - rte_pktmbuf_mtod(m, uint8_t *));
382
383 sym_cop->aead.digest.data = rte_pktmbuf_mtod_offset(m, uint8_t *,
384 rte_pktmbuf_pkt_len(m) - sa->digest_len);
385 sym_cop->aead.digest.phys_addr = rte_pktmbuf_iova_offset(m,
386 rte_pktmbuf_pkt_len(m) - sa->digest_len);
387 } else {
388 switch (sa->cipher_algo) {
389 case RTE_CRYPTO_CIPHER_NULL:
390 case RTE_CRYPTO_CIPHER_AES_CBC:
391 sym_cop->cipher.data.offset = ip_hdr_len +
392 sizeof(struct esp_hdr);
393 sym_cop->cipher.data.length = pad_payload_len + sa->iv_len;
394 break;
395 case RTE_CRYPTO_CIPHER_AES_CTR:
396 sym_cop->cipher.data.offset = ip_hdr_len +
397 sizeof(struct esp_hdr) + sa->iv_len;
398 sym_cop->cipher.data.length = pad_payload_len;
399 break;
400 default:
401 RTE_LOG(ERR, IPSEC_ESP, "unsupported cipher algorithm %u\n",
402 sa->cipher_algo);
403 return -EINVAL;
404 }
405
406 /* Fill pad_len using default sequential scheme */
407 for (i = 0; i < pad_len - 2; i++)
408 padding[i] = i + 1;
409 padding[pad_len - 2] = pad_len - 2;
410 padding[pad_len - 1] = nlp;
411
412 struct cnt_blk *icb = get_cnt_blk(m);
413 icb->salt = sa->salt;
414 icb->iv = rte_cpu_to_be_64(sa->seq);
415 icb->cnt = rte_cpu_to_be_32(1);
416
417 switch (sa->auth_algo) {
418 case RTE_CRYPTO_AUTH_NULL:
419 case RTE_CRYPTO_AUTH_SHA1_HMAC:
420 case RTE_CRYPTO_AUTH_SHA256_HMAC:
421 sym_cop->auth.data.offset = ip_hdr_len;
422 sym_cop->auth.data.length = sizeof(struct esp_hdr) +
423 sa->iv_len + pad_payload_len;
424 break;
425 default:
426 RTE_LOG(ERR, IPSEC_ESP, "unsupported auth algorithm %u\n",
427 sa->auth_algo);
428 return -EINVAL;
429 }
430
431 sym_cop->auth.digest.data = rte_pktmbuf_mtod_offset(m, uint8_t *,
432 rte_pktmbuf_pkt_len(m) - sa->digest_len);
433 sym_cop->auth.digest.phys_addr = rte_pktmbuf_iova_offset(m,
434 rte_pktmbuf_pkt_len(m) - sa->digest_len);
435 }
436
437 done:
438 return 0;
439 }
440
441 int
442 esp_outbound_post(struct rte_mbuf *m,
443 struct ipsec_sa *sa,
444 struct rte_crypto_op *cop)
445 {
446 RTE_ASSERT(m != NULL);
447 RTE_ASSERT(sa != NULL);
448
449 if ((sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) ||
450 (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO)) {
451 m->ol_flags |= PKT_TX_SEC_OFFLOAD;
452 } else {
453 RTE_ASSERT(cop != NULL);
454 if (cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS) {
455 RTE_LOG(ERR, IPSEC_ESP, "Failed crypto op\n");
456 return -1;
457 }
458 }
459
460 return 0;
461 }