]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - net/tls/tls_sw.c
UBUNTU: Ubuntu-4.15.0-96.97
[mirror_ubuntu-bionic-kernel.git] / net / tls / tls_sw.c
CommitLineData
3c4d7559
DW
1/*
2 * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
4 * Copyright (c) 2016-2017, Lance Chao <lancerchao@fb.com>. All rights reserved.
5 * Copyright (c) 2016, Fridolin Pokorny <fridolin.pokorny@gmail.com>. All rights reserved.
6 * Copyright (c) 2016, Nikos Mavrogiannopoulos <nmav@gnutls.org>. All rights reserved.
7 *
8 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU
10 * General Public License (GPL) Version 2, available from the file
11 * COPYING in the main directory of this source tree, or the
12 * OpenIB.org BSD license below:
13 *
14 * Redistribution and use in source and binary forms, with or
15 * without modification, are permitted provided that the following
16 * conditions are met:
17 *
18 * - Redistributions of source code must retain the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer.
21 *
22 * - Redistributions in binary form must reproduce the above
23 * copyright notice, this list of conditions and the following
24 * disclaimer in the documentation and/or other materials
25 * provided with the distribution.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * SOFTWARE.
35 */
36
37#include <linux/module.h>
38#include <crypto/aead.h>
39
40#include <net/tls.h>
41
3c4d7559
DW
42static void trim_sg(struct sock *sk, struct scatterlist *sg,
43 int *sg_num_elem, unsigned int *sg_size, int target_size)
44{
45 int i = *sg_num_elem - 1;
46 int trim = *sg_size - target_size;
47
48 if (trim <= 0) {
49 WARN_ON(trim < 0);
50 return;
51 }
52
53 *sg_size = target_size;
54 while (trim >= sg[i].length) {
55 trim -= sg[i].length;
56 sk_mem_uncharge(sk, sg[i].length);
57 put_page(sg_page(&sg[i]));
58 i--;
59
60 if (i < 0)
61 goto out;
62 }
63
64 sg[i].length -= trim;
65 sk_mem_uncharge(sk, trim);
66
67out:
68 *sg_num_elem = i + 1;
69}
70
71static void trim_both_sgl(struct sock *sk, int target_size)
72{
73 struct tls_context *tls_ctx = tls_get_ctx(sk);
74 struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx);
75
76 trim_sg(sk, ctx->sg_plaintext_data,
77 &ctx->sg_plaintext_num_elem,
78 &ctx->sg_plaintext_size,
79 target_size);
80
81 if (target_size > 0)
82 target_size += tls_ctx->overhead_size;
83
84 trim_sg(sk, ctx->sg_encrypted_data,
85 &ctx->sg_encrypted_num_elem,
86 &ctx->sg_encrypted_size,
87 target_size);
88}
89
90static int alloc_sg(struct sock *sk, int len, struct scatterlist *sg,
91 int *sg_num_elem, unsigned int *sg_size,
92 int first_coalesce)
93{
94 struct page_frag *pfrag;
95 unsigned int size = *sg_size;
96 int num_elem = *sg_num_elem, use = 0, rc = 0;
97 struct scatterlist *sge;
98 unsigned int orig_offset;
99
100 len -= size;
101 pfrag = sk_page_frag(sk);
102
103 while (len > 0) {
104 if (!sk_page_frag_refill(sk, pfrag)) {
105 rc = -ENOMEM;
106 goto out;
107 }
108
109 use = min_t(int, len, pfrag->size - pfrag->offset);
110
111 if (!sk_wmem_schedule(sk, use)) {
112 rc = -ENOMEM;
113 goto out;
114 }
115
116 sk_mem_charge(sk, use);
117 size += use;
118 orig_offset = pfrag->offset;
119 pfrag->offset += use;
120
121 sge = sg + num_elem - 1;
e03ad33a
DB
122
123 if (num_elem > first_coalesce && sg_page(sge) == pfrag->page &&
124 sge->offset + sge->length == orig_offset) {
125 sge->length += use;
3c4d7559
DW
126 } else {
127 sge++;
128 sg_unmark_end(sge);
129 sg_set_page(sge, pfrag->page, use, orig_offset);
130 get_page(pfrag->page);
131 ++num_elem;
132 if (num_elem == MAX_SKB_FRAGS) {
133 rc = -ENOSPC;
134 break;
135 }
136 }
137
138 len -= use;
139 }
140 goto out;
141
142out:
143 *sg_size = size;
144 *sg_num_elem = num_elem;
145 return rc;
146}
147
148static int alloc_encrypted_sg(struct sock *sk, int len)
149{
150 struct tls_context *tls_ctx = tls_get_ctx(sk);
151 struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx);
152 int rc = 0;
153
154 rc = alloc_sg(sk, len, ctx->sg_encrypted_data,
155 &ctx->sg_encrypted_num_elem, &ctx->sg_encrypted_size, 0);
156
6cebad56
VG
157 if (rc == -ENOSPC)
158 ctx->sg_encrypted_num_elem = ARRAY_SIZE(ctx->sg_encrypted_data);
159
3c4d7559
DW
160 return rc;
161}
162
163static int alloc_plaintext_sg(struct sock *sk, int len)
164{
165 struct tls_context *tls_ctx = tls_get_ctx(sk);
166 struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx);
167 int rc = 0;
168
169 rc = alloc_sg(sk, len, ctx->sg_plaintext_data,
170 &ctx->sg_plaintext_num_elem, &ctx->sg_plaintext_size,
171 tls_ctx->pending_open_record_frags);
172
6cebad56
VG
173 if (rc == -ENOSPC)
174 ctx->sg_plaintext_num_elem = ARRAY_SIZE(ctx->sg_plaintext_data);
175
3c4d7559
DW
176 return rc;
177}
178
179static void free_sg(struct sock *sk, struct scatterlist *sg,
180 int *sg_num_elem, unsigned int *sg_size)
181{
182 int i, n = *sg_num_elem;
183
184 for (i = 0; i < n; ++i) {
185 sk_mem_uncharge(sk, sg[i].length);
186 put_page(sg_page(&sg[i]));
187 }
188 *sg_num_elem = 0;
189 *sg_size = 0;
190}
191
192static void tls_free_both_sg(struct sock *sk)
193{
194 struct tls_context *tls_ctx = tls_get_ctx(sk);
195 struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx);
196
197 free_sg(sk, ctx->sg_encrypted_data, &ctx->sg_encrypted_num_elem,
198 &ctx->sg_encrypted_size);
199
200 free_sg(sk, ctx->sg_plaintext_data, &ctx->sg_plaintext_num_elem,
201 &ctx->sg_plaintext_size);
202}
203
204static int tls_do_encryption(struct tls_context *tls_ctx,
1677b200
DB
205 struct tls_sw_context *ctx,
206 struct aead_request *aead_req,
207 size_t data_len)
3c4d7559 208{
3c4d7559
DW
209 int rc;
210
3c4d7559
DW
211 ctx->sg_encrypted_data[0].offset += tls_ctx->prepend_size;
212 ctx->sg_encrypted_data[0].length -= tls_ctx->prepend_size;
213
214 aead_request_set_tfm(aead_req, ctx->aead_send);
215 aead_request_set_ad(aead_req, TLS_AAD_SPACE_SIZE);
216 aead_request_set_crypt(aead_req, ctx->sg_aead_in, ctx->sg_aead_out,
217 data_len, tls_ctx->iv);
218 rc = crypto_aead_encrypt(aead_req);
219
220 ctx->sg_encrypted_data[0].offset -= tls_ctx->prepend_size;
221 ctx->sg_encrypted_data[0].length += tls_ctx->prepend_size;
222
3c4d7559
DW
223 return rc;
224}
225
226static int tls_push_record(struct sock *sk, int flags,
227 unsigned char record_type)
228{
229 struct tls_context *tls_ctx = tls_get_ctx(sk);
230 struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx);
1677b200 231 struct aead_request *req;
3c4d7559
DW
232 int rc;
233
1677b200
DB
234 req = kzalloc(sizeof(struct aead_request) +
235 crypto_aead_reqsize(ctx->aead_send), sk->sk_allocation);
236 if (!req)
237 return -ENOMEM;
238
3c4d7559
DW
239 sg_mark_end(ctx->sg_plaintext_data + ctx->sg_plaintext_num_elem - 1);
240 sg_mark_end(ctx->sg_encrypted_data + ctx->sg_encrypted_num_elem - 1);
241
213ef6e7 242 tls_make_aad(ctx->aad_space, ctx->sg_plaintext_size,
3c4d7559
DW
243 tls_ctx->rec_seq, tls_ctx->rec_seq_size,
244 record_type);
245
246 tls_fill_prepend(tls_ctx,
247 page_address(sg_page(&ctx->sg_encrypted_data[0])) +
248 ctx->sg_encrypted_data[0].offset,
249 ctx->sg_plaintext_size, record_type);
250
251 tls_ctx->pending_open_record_frags = 0;
252 set_bit(TLS_PENDING_CLOSED_RECORD, &tls_ctx->flags);
253
1677b200 254 rc = tls_do_encryption(tls_ctx, ctx, req, ctx->sg_plaintext_size);
3c4d7559
DW
255 if (rc < 0) {
256 /* If we are called from write_space and
257 * we fail, we need to set this SOCK_NOSPACE
258 * to trigger another write_space in the future.
259 */
260 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1677b200 261 goto out_req;
3c4d7559
DW
262 }
263
264 free_sg(sk, ctx->sg_plaintext_data, &ctx->sg_plaintext_num_elem,
265 &ctx->sg_plaintext_size);
266
267 ctx->sg_encrypted_num_elem = 0;
268 ctx->sg_encrypted_size = 0;
269
270 /* Only pass through MSG_DONTWAIT and MSG_NOSIGNAL flags */
271 rc = tls_push_sg(sk, tls_ctx, ctx->sg_encrypted_data, 0, flags);
272 if (rc < 0 && rc != -EAGAIN)
273 tls_err_abort(sk);
274
275 tls_advance_record_sn(sk, tls_ctx);
1677b200
DB
276out_req:
277 kfree(req);
3c4d7559
DW
278 return rc;
279}
280
281static int tls_sw_push_pending_record(struct sock *sk, int flags)
282{
283 return tls_push_record(sk, flags, TLS_RECORD_TYPE_DATA);
284}
285
286static int zerocopy_from_iter(struct sock *sk, struct iov_iter *from,
287 int length)
288{
289 struct tls_context *tls_ctx = tls_get_ctx(sk);
290 struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx);
291 struct page *pages[MAX_SKB_FRAGS];
292
293 size_t offset;
294 ssize_t copied, use;
295 int i = 0;
296 unsigned int size = ctx->sg_plaintext_size;
297 int num_elem = ctx->sg_plaintext_num_elem;
298 int rc = 0;
299 int maxpages;
300
301 while (length > 0) {
302 i = 0;
303 maxpages = ARRAY_SIZE(ctx->sg_plaintext_data) - num_elem;
304 if (maxpages == 0) {
305 rc = -EFAULT;
306 goto out;
307 }
308 copied = iov_iter_get_pages(from, pages,
309 length,
310 maxpages, &offset);
311 if (copied <= 0) {
312 rc = -EFAULT;
313 goto out;
314 }
315
316 iov_iter_advance(from, copied);
317
318 length -= copied;
319 size += copied;
320 while (copied) {
321 use = min_t(int, copied, PAGE_SIZE - offset);
322
323 sg_set_page(&ctx->sg_plaintext_data[num_elem],
324 pages[i], use, offset);
325 sg_unmark_end(&ctx->sg_plaintext_data[num_elem]);
326 sk_mem_charge(sk, use);
327
328 offset = 0;
329 copied -= use;
330
331 ++i;
332 ++num_elem;
333 }
334 }
335
336out:
337 ctx->sg_plaintext_size = size;
338 ctx->sg_plaintext_num_elem = num_elem;
339 return rc;
340}
341
342static int memcopy_from_iter(struct sock *sk, struct iov_iter *from,
343 int bytes)
344{
345 struct tls_context *tls_ctx = tls_get_ctx(sk);
346 struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx);
347 struct scatterlist *sg = ctx->sg_plaintext_data;
348 int copy, i, rc = 0;
349
350 for (i = tls_ctx->pending_open_record_frags;
351 i < ctx->sg_plaintext_num_elem; ++i) {
352 copy = sg[i].length;
353 if (copy_from_iter(
354 page_address(sg_page(&sg[i])) + sg[i].offset,
355 copy, from) != copy) {
356 rc = -EFAULT;
357 goto out;
358 }
359 bytes -= copy;
360
361 ++tls_ctx->pending_open_record_frags;
362
363 if (!bytes)
364 break;
365 }
366
367out:
368 return rc;
369}
370
371int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
372{
373 struct tls_context *tls_ctx = tls_get_ctx(sk);
374 struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx);
7217724f 375 int ret;
3c4d7559
DW
376 int required_size;
377 long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
378 bool eor = !(msg->msg_flags & MSG_MORE);
379 size_t try_to_copy, copied = 0;
380 unsigned char record_type = TLS_RECORD_TYPE_DATA;
381 int record_room;
382 bool full_record;
383 int orig_size;
384
385 if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL))
386 return -ENOTSUPP;
387
388 lock_sock(sk);
389
7217724f
VG
390 ret = tls_complete_pending_work(sk, tls_ctx, msg->msg_flags, &timeo);
391 if (ret)
3c4d7559
DW
392 goto send_end;
393
394 if (unlikely(msg->msg_controllen)) {
395 ret = tls_proccess_cmsg(sk, msg, &record_type);
396 if (ret)
397 goto send_end;
398 }
399
400 while (msg_data_left(msg)) {
401 if (sk->sk_err) {
30be8f8d 402 ret = -sk->sk_err;
3c4d7559
DW
403 goto send_end;
404 }
405
406 orig_size = ctx->sg_plaintext_size;
407 full_record = false;
408 try_to_copy = msg_data_left(msg);
409 record_room = TLS_MAX_PAYLOAD_SIZE - ctx->sg_plaintext_size;
410 if (try_to_copy >= record_room) {
411 try_to_copy = record_room;
412 full_record = true;
413 }
414
415 required_size = ctx->sg_plaintext_size + try_to_copy +
416 tls_ctx->overhead_size;
417
418 if (!sk_stream_memory_free(sk))
419 goto wait_for_sndbuf;
420alloc_encrypted:
421 ret = alloc_encrypted_sg(sk, required_size);
422 if (ret) {
423 if (ret != -ENOSPC)
424 goto wait_for_memory;
425
426 /* Adjust try_to_copy according to the amount that was
427 * actually allocated. The difference is due
428 * to max sg elements limit
429 */
430 try_to_copy -= required_size - ctx->sg_encrypted_size;
431 full_record = true;
432 }
433
434 if (full_record || eor) {
435 ret = zerocopy_from_iter(sk, &msg->msg_iter,
436 try_to_copy);
437 if (ret)
438 goto fallback_to_reg_send;
439
440 copied += try_to_copy;
441 ret = tls_push_record(sk, msg->msg_flags, record_type);
442 if (!ret)
443 continue;
ef5b20ef 444 if (ret < 0)
3c4d7559
DW
445 goto send_end;
446
447 copied -= try_to_copy;
448fallback_to_reg_send:
449 iov_iter_revert(&msg->msg_iter,
450 ctx->sg_plaintext_size - orig_size);
451 trim_sg(sk, ctx->sg_plaintext_data,
452 &ctx->sg_plaintext_num_elem,
453 &ctx->sg_plaintext_size,
454 orig_size);
455 }
456
457 required_size = ctx->sg_plaintext_size + try_to_copy;
458alloc_plaintext:
459 ret = alloc_plaintext_sg(sk, required_size);
460 if (ret) {
461 if (ret != -ENOSPC)
462 goto wait_for_memory;
463
464 /* Adjust try_to_copy according to the amount that was
465 * actually allocated. The difference is due
466 * to max sg elements limit
467 */
468 try_to_copy -= required_size - ctx->sg_plaintext_size;
469 full_record = true;
470
471 trim_sg(sk, ctx->sg_encrypted_data,
472 &ctx->sg_encrypted_num_elem,
473 &ctx->sg_encrypted_size,
474 ctx->sg_plaintext_size +
475 tls_ctx->overhead_size);
476 }
477
478 ret = memcopy_from_iter(sk, &msg->msg_iter, try_to_copy);
479 if (ret)
480 goto trim_sgl;
481
482 copied += try_to_copy;
483 if (full_record || eor) {
484push_record:
485 ret = tls_push_record(sk, msg->msg_flags, record_type);
486 if (ret) {
487 if (ret == -ENOMEM)
488 goto wait_for_memory;
489
490 goto send_end;
491 }
492 }
493
494 continue;
495
496wait_for_sndbuf:
497 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
498wait_for_memory:
499 ret = sk_stream_wait_memory(sk, &timeo);
500 if (ret) {
501trim_sgl:
502 trim_both_sgl(sk, orig_size);
503 goto send_end;
504 }
505
506 if (tls_is_pending_closed_record(tls_ctx))
507 goto push_record;
508
509 if (ctx->sg_encrypted_size < required_size)
510 goto alloc_encrypted;
511
512 goto alloc_plaintext;
513 }
514
515send_end:
516 ret = sk_stream_error(sk, msg->msg_flags, ret);
517
518 release_sock(sk);
519 return copied ? copied : ret;
520}
521
522int tls_sw_sendpage(struct sock *sk, struct page *page,
523 int offset, size_t size, int flags)
524{
525 struct tls_context *tls_ctx = tls_get_ctx(sk);
526 struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx);
7217724f 527 int ret;
3c4d7559
DW
528 long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
529 bool eor;
530 size_t orig_size = size;
531 unsigned char record_type = TLS_RECORD_TYPE_DATA;
532 struct scatterlist *sg;
533 bool full_record;
534 int record_room;
535
536 if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
537 MSG_SENDPAGE_NOTLAST))
538 return -ENOTSUPP;
539
540 /* No MSG_EOR from splice, only look at MSG_MORE */
541 eor = !(flags & (MSG_MORE | MSG_SENDPAGE_NOTLAST));
542
543 lock_sock(sk);
544
545 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
546
7217724f
VG
547 ret = tls_complete_pending_work(sk, tls_ctx, flags, &timeo);
548 if (ret)
3c4d7559
DW
549 goto sendpage_end;
550
551 /* Call the sk_stream functions to manage the sndbuf mem. */
552 while (size > 0) {
553 size_t copy, required_size;
554
555 if (sk->sk_err) {
30be8f8d 556 ret = -sk->sk_err;
3c4d7559
DW
557 goto sendpage_end;
558 }
559
560 full_record = false;
561 record_room = TLS_MAX_PAYLOAD_SIZE - ctx->sg_plaintext_size;
562 copy = size;
563 if (copy >= record_room) {
564 copy = record_room;
565 full_record = true;
566 }
567 required_size = ctx->sg_plaintext_size + copy +
568 tls_ctx->overhead_size;
569
570 if (!sk_stream_memory_free(sk))
571 goto wait_for_sndbuf;
572alloc_payload:
573 ret = alloc_encrypted_sg(sk, required_size);
574 if (ret) {
575 if (ret != -ENOSPC)
576 goto wait_for_memory;
577
578 /* Adjust copy according to the amount that was
579 * actually allocated. The difference is due
580 * to max sg elements limit
581 */
582 copy -= required_size - ctx->sg_plaintext_size;
583 full_record = true;
584 }
585
586 get_page(page);
587 sg = ctx->sg_plaintext_data + ctx->sg_plaintext_num_elem;
588 sg_set_page(sg, page, copy, offset);
7a8c4dd9
DW
589 sg_unmark_end(sg);
590
3c4d7559
DW
591 ctx->sg_plaintext_num_elem++;
592
593 sk_mem_charge(sk, copy);
594 offset += copy;
595 size -= copy;
596 ctx->sg_plaintext_size += copy;
597 tls_ctx->pending_open_record_frags = ctx->sg_plaintext_num_elem;
598
599 if (full_record || eor ||
600 ctx->sg_plaintext_num_elem ==
601 ARRAY_SIZE(ctx->sg_plaintext_data)) {
602push_record:
603 ret = tls_push_record(sk, flags, record_type);
604 if (ret) {
605 if (ret == -ENOMEM)
606 goto wait_for_memory;
607
608 goto sendpage_end;
609 }
610 }
611 continue;
612wait_for_sndbuf:
613 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
614wait_for_memory:
615 ret = sk_stream_wait_memory(sk, &timeo);
616 if (ret) {
617 trim_both_sgl(sk, ctx->sg_plaintext_size);
618 goto sendpage_end;
619 }
620
621 if (tls_is_pending_closed_record(tls_ctx))
622 goto push_record;
623
624 goto alloc_payload;
625 }
626
627sendpage_end:
628 if (orig_size > size)
629 ret = orig_size - size;
630 else
631 ret = sk_stream_error(sk, flags, ret);
632
633 release_sock(sk);
634 return ret;
635}
636
ff45d820 637void tls_sw_free_tx_resources(struct sock *sk)
3c4d7559
DW
638{
639 struct tls_context *tls_ctx = tls_get_ctx(sk);
640 struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx);
641
642 if (ctx->aead_send)
643 crypto_free_aead(ctx->aead_send);
644
645 tls_free_both_sg(sk);
646
647 kfree(ctx);
ff45d820 648 kfree(tls_ctx);
3c4d7559
DW
649}
650
651int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx)
652{
3c4d7559
DW
653 struct tls_crypto_info *crypto_info;
654 struct tls12_crypto_info_aes_gcm_128 *gcm_128_info;
655 struct tls_sw_context *sw_ctx;
656 u16 nonce_size, tag_size, iv_size, rec_seq_size;
657 char *iv, *rec_seq;
658 int rc = 0;
659
660 if (!ctx) {
661 rc = -EINVAL;
662 goto out;
663 }
664
665 if (ctx->priv_ctx) {
666 rc = -EEXIST;
667 goto out;
668 }
669
670 sw_ctx = kzalloc(sizeof(*sw_ctx), GFP_KERNEL);
671 if (!sw_ctx) {
672 rc = -ENOMEM;
673 goto out;
674 }
675
676 ctx->priv_ctx = (struct tls_offload_context *)sw_ctx;
3c4d7559 677
028e5727 678 crypto_info = &ctx->crypto_send.info;
3c4d7559
DW
679 switch (crypto_info->cipher_type) {
680 case TLS_CIPHER_AES_GCM_128: {
681 nonce_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
682 tag_size = TLS_CIPHER_AES_GCM_128_TAG_SIZE;
683 iv_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
684 iv = ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->iv;
685 rec_seq_size = TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE;
686 rec_seq =
687 ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->rec_seq;
688 gcm_128_info =
689 (struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
690 break;
691 }
692 default:
693 rc = -EINVAL;
cf6d43ef 694 goto free_priv;
3c4d7559
DW
695 }
696
697 ctx->prepend_size = TLS_HEADER_SIZE + nonce_size;
698 ctx->tag_size = tag_size;
699 ctx->overhead_size = ctx->prepend_size + ctx->tag_size;
700 ctx->iv_size = iv_size;
cf6d43ef 701 ctx->iv = kmalloc(iv_size + TLS_CIPHER_AES_GCM_128_SALT_SIZE, GFP_KERNEL);
3c4d7559
DW
702 if (!ctx->iv) {
703 rc = -ENOMEM;
cf6d43ef 704 goto free_priv;
3c4d7559
DW
705 }
706 memcpy(ctx->iv, gcm_128_info->salt, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
707 memcpy(ctx->iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, iv, iv_size);
708 ctx->rec_seq_size = rec_seq_size;
709 ctx->rec_seq = kmalloc(rec_seq_size, GFP_KERNEL);
710 if (!ctx->rec_seq) {
711 rc = -ENOMEM;
712 goto free_iv;
713 }
714 memcpy(ctx->rec_seq, rec_seq, rec_seq_size);
715
716 sg_init_table(sw_ctx->sg_encrypted_data,
717 ARRAY_SIZE(sw_ctx->sg_encrypted_data));
718 sg_init_table(sw_ctx->sg_plaintext_data,
719 ARRAY_SIZE(sw_ctx->sg_plaintext_data));
720
721 sg_init_table(sw_ctx->sg_aead_in, 2);
722 sg_set_buf(&sw_ctx->sg_aead_in[0], sw_ctx->aad_space,
723 sizeof(sw_ctx->aad_space));
724 sg_unmark_end(&sw_ctx->sg_aead_in[1]);
725 sg_chain(sw_ctx->sg_aead_in, 2, sw_ctx->sg_plaintext_data);
726 sg_init_table(sw_ctx->sg_aead_out, 2);
727 sg_set_buf(&sw_ctx->sg_aead_out[0], sw_ctx->aad_space,
728 sizeof(sw_ctx->aad_space));
729 sg_unmark_end(&sw_ctx->sg_aead_out[1]);
730 sg_chain(sw_ctx->sg_aead_out, 2, sw_ctx->sg_encrypted_data);
731
732 if (!sw_ctx->aead_send) {
733 sw_ctx->aead_send = crypto_alloc_aead("gcm(aes)", 0, 0);
734 if (IS_ERR(sw_ctx->aead_send)) {
735 rc = PTR_ERR(sw_ctx->aead_send);
736 sw_ctx->aead_send = NULL;
737 goto free_rec_seq;
738 }
739 }
740
741 ctx->push_pending_record = tls_sw_push_pending_record;
742
eeabb79b 743 rc = crypto_aead_setkey(sw_ctx->aead_send, gcm_128_info->key,
3c4d7559
DW
744 TLS_CIPHER_AES_GCM_128_KEY_SIZE);
745 if (rc)
746 goto free_aead;
747
748 rc = crypto_aead_setauthsize(sw_ctx->aead_send, ctx->tag_size);
749 if (!rc)
cf6d43ef 750 return 0;
3c4d7559
DW
751
752free_aead:
753 crypto_free_aead(sw_ctx->aead_send);
754 sw_ctx->aead_send = NULL;
755free_rec_seq:
756 kfree(ctx->rec_seq);
757 ctx->rec_seq = NULL;
758free_iv:
759 kfree(ctx->iv);
760 ctx->iv = NULL;
cf6d43ef
SD
761free_priv:
762 kfree(ctx->priv_ctx);
763 ctx->priv_ctx = NULL;
3c4d7559
DW
764out:
765 return rc;
766}