]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - net/tls/tls_sw.c
Merge branch 'next-tpm' of git://git.kernel.org/pub/scm/linux/kernel/git/jmorris...
[mirror_ubuntu-hirsute-kernel.git] / net / tls / tls_sw.c
1 /*
2 * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
4 * Copyright (c) 2016-2017, Lance Chao <lancerchao@fb.com>. All rights reserved.
5 * Copyright (c) 2016, Fridolin Pokorny <fridolin.pokorny@gmail.com>. All rights reserved.
6 * Copyright (c) 2016, Nikos Mavrogiannopoulos <nmav@gnutls.org>. All rights reserved.
7 * Copyright (c) 2018, Covalent IO, Inc. http://covalent.io
8 *
9 * This software is available to you under a choice of one of two
10 * licenses. You may choose to be licensed under the terms of the GNU
11 * General Public License (GPL) Version 2, available from the file
12 * COPYING in the main directory of this source tree, or the
13 * OpenIB.org BSD license below:
14 *
15 * Redistribution and use in source and binary forms, with or
16 * without modification, are permitted provided that the following
17 * conditions are met:
18 *
19 * - Redistributions of source code must retain the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer.
22 *
23 * - Redistributions in binary form must reproduce the above
24 * copyright notice, this list of conditions and the following
25 * disclaimer in the documentation and/or other materials
26 * provided with the distribution.
27 *
28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
29 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
30 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
31 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
32 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
33 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
34 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 * SOFTWARE.
36 */
37
38 #include <linux/sched/signal.h>
39 #include <linux/module.h>
40 #include <crypto/aead.h>
41
42 #include <net/strparser.h>
43 #include <net/tls.h>
44
45 #define MAX_IV_SIZE TLS_CIPHER_AES_GCM_128_IV_SIZE
46
47 static int __skb_nsg(struct sk_buff *skb, int offset, int len,
48 unsigned int recursion_level)
49 {
50 int start = skb_headlen(skb);
51 int i, chunk = start - offset;
52 struct sk_buff *frag_iter;
53 int elt = 0;
54
55 if (unlikely(recursion_level >= 24))
56 return -EMSGSIZE;
57
58 if (chunk > 0) {
59 if (chunk > len)
60 chunk = len;
61 elt++;
62 len -= chunk;
63 if (len == 0)
64 return elt;
65 offset += chunk;
66 }
67
68 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
69 int end;
70
71 WARN_ON(start > offset + len);
72
73 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
74 chunk = end - offset;
75 if (chunk > 0) {
76 if (chunk > len)
77 chunk = len;
78 elt++;
79 len -= chunk;
80 if (len == 0)
81 return elt;
82 offset += chunk;
83 }
84 start = end;
85 }
86
87 if (unlikely(skb_has_frag_list(skb))) {
88 skb_walk_frags(skb, frag_iter) {
89 int end, ret;
90
91 WARN_ON(start > offset + len);
92
93 end = start + frag_iter->len;
94 chunk = end - offset;
95 if (chunk > 0) {
96 if (chunk > len)
97 chunk = len;
98 ret = __skb_nsg(frag_iter, offset - start, chunk,
99 recursion_level + 1);
100 if (unlikely(ret < 0))
101 return ret;
102 elt += ret;
103 len -= chunk;
104 if (len == 0)
105 return elt;
106 offset += chunk;
107 }
108 start = end;
109 }
110 }
111 BUG_ON(len);
112 return elt;
113 }
114
115 /* Return the number of scatterlist elements required to completely map the
116 * skb, or -EMSGSIZE if the recursion depth is exceeded.
117 */
118 static int skb_nsg(struct sk_buff *skb, int offset, int len)
119 {
120 return __skb_nsg(skb, offset, len, 0);
121 }
122
123 static void tls_decrypt_done(struct crypto_async_request *req, int err)
124 {
125 struct aead_request *aead_req = (struct aead_request *)req;
126 struct scatterlist *sgout = aead_req->dst;
127 struct tls_sw_context_rx *ctx;
128 struct tls_context *tls_ctx;
129 struct scatterlist *sg;
130 struct sk_buff *skb;
131 unsigned int pages;
132 int pending;
133
134 skb = (struct sk_buff *)req->data;
135 tls_ctx = tls_get_ctx(skb->sk);
136 ctx = tls_sw_ctx_rx(tls_ctx);
137 pending = atomic_dec_return(&ctx->decrypt_pending);
138
139 /* Propagate if there was an err */
140 if (err) {
141 ctx->async_wait.err = err;
142 tls_err_abort(skb->sk, err);
143 }
144
145 /* After using skb->sk to propagate sk through crypto async callback
146 * we need to NULL it again.
147 */
148 skb->sk = NULL;
149
150 /* Release the skb, pages and memory allocated for crypto req */
151 kfree_skb(skb);
152
153 /* Skip the first S/G entry as it points to AAD */
154 for_each_sg(sg_next(sgout), sg, UINT_MAX, pages) {
155 if (!sg)
156 break;
157 put_page(sg_page(sg));
158 }
159
160 kfree(aead_req);
161
162 if (!pending && READ_ONCE(ctx->async_notify))
163 complete(&ctx->async_wait.completion);
164 }
165
166 static int tls_do_decryption(struct sock *sk,
167 struct sk_buff *skb,
168 struct scatterlist *sgin,
169 struct scatterlist *sgout,
170 char *iv_recv,
171 size_t data_len,
172 struct aead_request *aead_req,
173 bool async)
174 {
175 struct tls_context *tls_ctx = tls_get_ctx(sk);
176 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
177 int ret;
178
179 aead_request_set_tfm(aead_req, ctx->aead_recv);
180 aead_request_set_ad(aead_req, TLS_AAD_SPACE_SIZE);
181 aead_request_set_crypt(aead_req, sgin, sgout,
182 data_len + tls_ctx->rx.tag_size,
183 (u8 *)iv_recv);
184
185 if (async) {
186 /* Using skb->sk to push sk through to crypto async callback
187 * handler. This allows propagating errors up to the socket
188 * if needed. It _must_ be cleared in the async handler
189 * before kfree_skb is called. We _know_ skb->sk is NULL
190 * because it is a clone from strparser.
191 */
192 skb->sk = sk;
193 aead_request_set_callback(aead_req,
194 CRYPTO_TFM_REQ_MAY_BACKLOG,
195 tls_decrypt_done, skb);
196 atomic_inc(&ctx->decrypt_pending);
197 } else {
198 aead_request_set_callback(aead_req,
199 CRYPTO_TFM_REQ_MAY_BACKLOG,
200 crypto_req_done, &ctx->async_wait);
201 }
202
203 ret = crypto_aead_decrypt(aead_req);
204 if (ret == -EINPROGRESS) {
205 if (async)
206 return ret;
207
208 ret = crypto_wait_req(ret, &ctx->async_wait);
209 }
210
211 if (async)
212 atomic_dec(&ctx->decrypt_pending);
213
214 return ret;
215 }
216
217 static void tls_trim_both_msgs(struct sock *sk, int target_size)
218 {
219 struct tls_context *tls_ctx = tls_get_ctx(sk);
220 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
221 struct tls_rec *rec = ctx->open_rec;
222
223 sk_msg_trim(sk, &rec->msg_plaintext, target_size);
224 if (target_size > 0)
225 target_size += tls_ctx->tx.overhead_size;
226 sk_msg_trim(sk, &rec->msg_encrypted, target_size);
227 }
228
229 static int tls_alloc_encrypted_msg(struct sock *sk, int len)
230 {
231 struct tls_context *tls_ctx = tls_get_ctx(sk);
232 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
233 struct tls_rec *rec = ctx->open_rec;
234 struct sk_msg *msg_en = &rec->msg_encrypted;
235
236 return sk_msg_alloc(sk, msg_en, len, 0);
237 }
238
239 static int tls_clone_plaintext_msg(struct sock *sk, int required)
240 {
241 struct tls_context *tls_ctx = tls_get_ctx(sk);
242 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
243 struct tls_rec *rec = ctx->open_rec;
244 struct sk_msg *msg_pl = &rec->msg_plaintext;
245 struct sk_msg *msg_en = &rec->msg_encrypted;
246 int skip, len;
247
248 /* We add page references worth len bytes from encrypted sg
249 * at the end of plaintext sg. It is guaranteed that msg_en
250 * has enough required room (ensured by caller).
251 */
252 len = required - msg_pl->sg.size;
253
254 /* Skip initial bytes in msg_en's data to be able to use
255 * same offset of both plain and encrypted data.
256 */
257 skip = tls_ctx->tx.prepend_size + msg_pl->sg.size;
258
259 return sk_msg_clone(sk, msg_pl, msg_en, skip, len);
260 }
261
262 static struct tls_rec *tls_get_rec(struct sock *sk)
263 {
264 struct tls_context *tls_ctx = tls_get_ctx(sk);
265 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
266 struct sk_msg *msg_pl, *msg_en;
267 struct tls_rec *rec;
268 int mem_size;
269
270 mem_size = sizeof(struct tls_rec) + crypto_aead_reqsize(ctx->aead_send);
271
272 rec = kzalloc(mem_size, sk->sk_allocation);
273 if (!rec)
274 return NULL;
275
276 msg_pl = &rec->msg_plaintext;
277 msg_en = &rec->msg_encrypted;
278
279 sk_msg_init(msg_pl);
280 sk_msg_init(msg_en);
281
282 sg_init_table(rec->sg_aead_in, 2);
283 sg_set_buf(&rec->sg_aead_in[0], rec->aad_space,
284 sizeof(rec->aad_space));
285 sg_unmark_end(&rec->sg_aead_in[1]);
286
287 sg_init_table(rec->sg_aead_out, 2);
288 sg_set_buf(&rec->sg_aead_out[0], rec->aad_space,
289 sizeof(rec->aad_space));
290 sg_unmark_end(&rec->sg_aead_out[1]);
291
292 return rec;
293 }
294
295 static void tls_free_rec(struct sock *sk, struct tls_rec *rec)
296 {
297 sk_msg_free(sk, &rec->msg_encrypted);
298 sk_msg_free(sk, &rec->msg_plaintext);
299 kfree(rec);
300 }
301
302 static void tls_free_open_rec(struct sock *sk)
303 {
304 struct tls_context *tls_ctx = tls_get_ctx(sk);
305 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
306 struct tls_rec *rec = ctx->open_rec;
307
308 if (rec) {
309 tls_free_rec(sk, rec);
310 ctx->open_rec = NULL;
311 }
312 }
313
314 int tls_tx_records(struct sock *sk, int flags)
315 {
316 struct tls_context *tls_ctx = tls_get_ctx(sk);
317 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
318 struct tls_rec *rec, *tmp;
319 struct sk_msg *msg_en;
320 int tx_flags, rc = 0;
321
322 if (tls_is_partially_sent_record(tls_ctx)) {
323 rec = list_first_entry(&ctx->tx_list,
324 struct tls_rec, list);
325
326 if (flags == -1)
327 tx_flags = rec->tx_flags;
328 else
329 tx_flags = flags;
330
331 rc = tls_push_partial_record(sk, tls_ctx, tx_flags);
332 if (rc)
333 goto tx_err;
334
335 /* Full record has been transmitted.
336 * Remove the head of tx_list
337 */
338 list_del(&rec->list);
339 sk_msg_free(sk, &rec->msg_plaintext);
340 kfree(rec);
341 }
342
343 /* Tx all ready records */
344 list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) {
345 if (READ_ONCE(rec->tx_ready)) {
346 if (flags == -1)
347 tx_flags = rec->tx_flags;
348 else
349 tx_flags = flags;
350
351 msg_en = &rec->msg_encrypted;
352 rc = tls_push_sg(sk, tls_ctx,
353 &msg_en->sg.data[msg_en->sg.curr],
354 0, tx_flags);
355 if (rc)
356 goto tx_err;
357
358 list_del(&rec->list);
359 sk_msg_free(sk, &rec->msg_plaintext);
360 kfree(rec);
361 } else {
362 break;
363 }
364 }
365
366 tx_err:
367 if (rc < 0 && rc != -EAGAIN)
368 tls_err_abort(sk, EBADMSG);
369
370 return rc;
371 }
372
373 static void tls_encrypt_done(struct crypto_async_request *req, int err)
374 {
375 struct aead_request *aead_req = (struct aead_request *)req;
376 struct sock *sk = req->data;
377 struct tls_context *tls_ctx = tls_get_ctx(sk);
378 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
379 struct scatterlist *sge;
380 struct sk_msg *msg_en;
381 struct tls_rec *rec;
382 bool ready = false;
383 int pending;
384
385 rec = container_of(aead_req, struct tls_rec, aead_req);
386 msg_en = &rec->msg_encrypted;
387
388 sge = sk_msg_elem(msg_en, msg_en->sg.curr);
389 sge->offset -= tls_ctx->tx.prepend_size;
390 sge->length += tls_ctx->tx.prepend_size;
391
392 /* Check if error is previously set on socket */
393 if (err || sk->sk_err) {
394 rec = NULL;
395
396 /* If err is already set on socket, return the same code */
397 if (sk->sk_err) {
398 ctx->async_wait.err = sk->sk_err;
399 } else {
400 ctx->async_wait.err = err;
401 tls_err_abort(sk, err);
402 }
403 }
404
405 if (rec) {
406 struct tls_rec *first_rec;
407
408 /* Mark the record as ready for transmission */
409 smp_store_mb(rec->tx_ready, true);
410
411 /* If received record is at head of tx_list, schedule tx */
412 first_rec = list_first_entry(&ctx->tx_list,
413 struct tls_rec, list);
414 if (rec == first_rec)
415 ready = true;
416 }
417
418 pending = atomic_dec_return(&ctx->encrypt_pending);
419
420 if (!pending && READ_ONCE(ctx->async_notify))
421 complete(&ctx->async_wait.completion);
422
423 if (!ready)
424 return;
425
426 /* Schedule the transmission */
427 if (!test_and_set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask))
428 schedule_delayed_work(&ctx->tx_work.work, 1);
429 }
430
431 static int tls_do_encryption(struct sock *sk,
432 struct tls_context *tls_ctx,
433 struct tls_sw_context_tx *ctx,
434 struct aead_request *aead_req,
435 size_t data_len, u32 start)
436 {
437 struct tls_rec *rec = ctx->open_rec;
438 struct sk_msg *msg_en = &rec->msg_encrypted;
439 struct scatterlist *sge = sk_msg_elem(msg_en, start);
440 int rc;
441
442 sge->offset += tls_ctx->tx.prepend_size;
443 sge->length -= tls_ctx->tx.prepend_size;
444
445 msg_en->sg.curr = start;
446
447 aead_request_set_tfm(aead_req, ctx->aead_send);
448 aead_request_set_ad(aead_req, TLS_AAD_SPACE_SIZE);
449 aead_request_set_crypt(aead_req, rec->sg_aead_in,
450 rec->sg_aead_out,
451 data_len, tls_ctx->tx.iv);
452
453 aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
454 tls_encrypt_done, sk);
455
456 /* Add the record in tx_list */
457 list_add_tail((struct list_head *)&rec->list, &ctx->tx_list);
458 atomic_inc(&ctx->encrypt_pending);
459
460 rc = crypto_aead_encrypt(aead_req);
461 if (!rc || rc != -EINPROGRESS) {
462 atomic_dec(&ctx->encrypt_pending);
463 sge->offset -= tls_ctx->tx.prepend_size;
464 sge->length += tls_ctx->tx.prepend_size;
465 }
466
467 if (!rc) {
468 WRITE_ONCE(rec->tx_ready, true);
469 } else if (rc != -EINPROGRESS) {
470 list_del(&rec->list);
471 return rc;
472 }
473
474 /* Unhook the record from context if encryption is not failure */
475 ctx->open_rec = NULL;
476 tls_advance_record_sn(sk, &tls_ctx->tx);
477 return rc;
478 }
479
480 static int tls_split_open_record(struct sock *sk, struct tls_rec *from,
481 struct tls_rec **to, struct sk_msg *msg_opl,
482 struct sk_msg *msg_oen, u32 split_point,
483 u32 tx_overhead_size, u32 *orig_end)
484 {
485 u32 i, j, bytes = 0, apply = msg_opl->apply_bytes;
486 struct scatterlist *sge, *osge, *nsge;
487 u32 orig_size = msg_opl->sg.size;
488 struct scatterlist tmp = { };
489 struct sk_msg *msg_npl;
490 struct tls_rec *new;
491 int ret;
492
493 new = tls_get_rec(sk);
494 if (!new)
495 return -ENOMEM;
496 ret = sk_msg_alloc(sk, &new->msg_encrypted, msg_opl->sg.size +
497 tx_overhead_size, 0);
498 if (ret < 0) {
499 tls_free_rec(sk, new);
500 return ret;
501 }
502
503 *orig_end = msg_opl->sg.end;
504 i = msg_opl->sg.start;
505 sge = sk_msg_elem(msg_opl, i);
506 while (apply && sge->length) {
507 if (sge->length > apply) {
508 u32 len = sge->length - apply;
509
510 get_page(sg_page(sge));
511 sg_set_page(&tmp, sg_page(sge), len,
512 sge->offset + apply);
513 sge->length = apply;
514 bytes += apply;
515 apply = 0;
516 } else {
517 apply -= sge->length;
518 bytes += sge->length;
519 }
520
521 sk_msg_iter_var_next(i);
522 if (i == msg_opl->sg.end)
523 break;
524 sge = sk_msg_elem(msg_opl, i);
525 }
526
527 msg_opl->sg.end = i;
528 msg_opl->sg.curr = i;
529 msg_opl->sg.copybreak = 0;
530 msg_opl->apply_bytes = 0;
531 msg_opl->sg.size = bytes;
532
533 msg_npl = &new->msg_plaintext;
534 msg_npl->apply_bytes = apply;
535 msg_npl->sg.size = orig_size - bytes;
536
537 j = msg_npl->sg.start;
538 nsge = sk_msg_elem(msg_npl, j);
539 if (tmp.length) {
540 memcpy(nsge, &tmp, sizeof(*nsge));
541 sk_msg_iter_var_next(j);
542 nsge = sk_msg_elem(msg_npl, j);
543 }
544
545 osge = sk_msg_elem(msg_opl, i);
546 while (osge->length) {
547 memcpy(nsge, osge, sizeof(*nsge));
548 sg_unmark_end(nsge);
549 sk_msg_iter_var_next(i);
550 sk_msg_iter_var_next(j);
551 if (i == *orig_end)
552 break;
553 osge = sk_msg_elem(msg_opl, i);
554 nsge = sk_msg_elem(msg_npl, j);
555 }
556
557 msg_npl->sg.end = j;
558 msg_npl->sg.curr = j;
559 msg_npl->sg.copybreak = 0;
560
561 *to = new;
562 return 0;
563 }
564
565 static void tls_merge_open_record(struct sock *sk, struct tls_rec *to,
566 struct tls_rec *from, u32 orig_end)
567 {
568 struct sk_msg *msg_npl = &from->msg_plaintext;
569 struct sk_msg *msg_opl = &to->msg_plaintext;
570 struct scatterlist *osge, *nsge;
571 u32 i, j;
572
573 i = msg_opl->sg.end;
574 sk_msg_iter_var_prev(i);
575 j = msg_npl->sg.start;
576
577 osge = sk_msg_elem(msg_opl, i);
578 nsge = sk_msg_elem(msg_npl, j);
579
580 if (sg_page(osge) == sg_page(nsge) &&
581 osge->offset + osge->length == nsge->offset) {
582 osge->length += nsge->length;
583 put_page(sg_page(nsge));
584 }
585
586 msg_opl->sg.end = orig_end;
587 msg_opl->sg.curr = orig_end;
588 msg_opl->sg.copybreak = 0;
589 msg_opl->apply_bytes = msg_opl->sg.size + msg_npl->sg.size;
590 msg_opl->sg.size += msg_npl->sg.size;
591
592 sk_msg_free(sk, &to->msg_encrypted);
593 sk_msg_xfer_full(&to->msg_encrypted, &from->msg_encrypted);
594
595 kfree(from);
596 }
597
598 static int tls_push_record(struct sock *sk, int flags,
599 unsigned char record_type)
600 {
601 struct tls_context *tls_ctx = tls_get_ctx(sk);
602 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
603 struct tls_rec *rec = ctx->open_rec, *tmp = NULL;
604 u32 i, split_point, uninitialized_var(orig_end);
605 struct sk_msg *msg_pl, *msg_en;
606 struct aead_request *req;
607 bool split;
608 int rc;
609
610 if (!rec)
611 return 0;
612
613 msg_pl = &rec->msg_plaintext;
614 msg_en = &rec->msg_encrypted;
615
616 split_point = msg_pl->apply_bytes;
617 split = split_point && split_point < msg_pl->sg.size;
618 if (split) {
619 rc = tls_split_open_record(sk, rec, &tmp, msg_pl, msg_en,
620 split_point, tls_ctx->tx.overhead_size,
621 &orig_end);
622 if (rc < 0)
623 return rc;
624 sk_msg_trim(sk, msg_en, msg_pl->sg.size +
625 tls_ctx->tx.overhead_size);
626 }
627
628 rec->tx_flags = flags;
629 req = &rec->aead_req;
630
631 i = msg_pl->sg.end;
632 sk_msg_iter_var_prev(i);
633 sg_mark_end(sk_msg_elem(msg_pl, i));
634
635 i = msg_pl->sg.start;
636 sg_chain(rec->sg_aead_in, 2, rec->inplace_crypto ?
637 &msg_en->sg.data[i] : &msg_pl->sg.data[i]);
638
639 i = msg_en->sg.end;
640 sk_msg_iter_var_prev(i);
641 sg_mark_end(sk_msg_elem(msg_en, i));
642
643 i = msg_en->sg.start;
644 sg_chain(rec->sg_aead_out, 2, &msg_en->sg.data[i]);
645
646 tls_make_aad(rec->aad_space, msg_pl->sg.size,
647 tls_ctx->tx.rec_seq, tls_ctx->tx.rec_seq_size,
648 record_type);
649
650 tls_fill_prepend(tls_ctx,
651 page_address(sg_page(&msg_en->sg.data[i])) +
652 msg_en->sg.data[i].offset, msg_pl->sg.size,
653 record_type);
654
655 tls_ctx->pending_open_record_frags = false;
656
657 rc = tls_do_encryption(sk, tls_ctx, ctx, req, msg_pl->sg.size, i);
658 if (rc < 0) {
659 if (rc != -EINPROGRESS) {
660 tls_err_abort(sk, EBADMSG);
661 if (split) {
662 tls_ctx->pending_open_record_frags = true;
663 tls_merge_open_record(sk, rec, tmp, orig_end);
664 }
665 }
666 return rc;
667 } else if (split) {
668 msg_pl = &tmp->msg_plaintext;
669 msg_en = &tmp->msg_encrypted;
670 sk_msg_trim(sk, msg_en, msg_pl->sg.size +
671 tls_ctx->tx.overhead_size);
672 tls_ctx->pending_open_record_frags = true;
673 ctx->open_rec = tmp;
674 }
675
676 return tls_tx_records(sk, flags);
677 }
678
679 static int bpf_exec_tx_verdict(struct sk_msg *msg, struct sock *sk,
680 bool full_record, u8 record_type,
681 size_t *copied, int flags)
682 {
683 struct tls_context *tls_ctx = tls_get_ctx(sk);
684 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
685 struct sk_msg msg_redir = { };
686 struct sk_psock *psock;
687 struct sock *sk_redir;
688 struct tls_rec *rec;
689 bool enospc, policy;
690 int err = 0, send;
691 u32 delta = 0;
692
693 policy = !(flags & MSG_SENDPAGE_NOPOLICY);
694 psock = sk_psock_get(sk);
695 if (!psock || !policy)
696 return tls_push_record(sk, flags, record_type);
697 more_data:
698 enospc = sk_msg_full(msg);
699 if (psock->eval == __SK_NONE) {
700 delta = msg->sg.size;
701 psock->eval = sk_psock_msg_verdict(sk, psock, msg);
702 if (delta < msg->sg.size)
703 delta -= msg->sg.size;
704 else
705 delta = 0;
706 }
707 if (msg->cork_bytes && msg->cork_bytes > msg->sg.size &&
708 !enospc && !full_record) {
709 err = -ENOSPC;
710 goto out_err;
711 }
712 msg->cork_bytes = 0;
713 send = msg->sg.size;
714 if (msg->apply_bytes && msg->apply_bytes < send)
715 send = msg->apply_bytes;
716
717 switch (psock->eval) {
718 case __SK_PASS:
719 err = tls_push_record(sk, flags, record_type);
720 if (err < 0) {
721 *copied -= sk_msg_free(sk, msg);
722 tls_free_open_rec(sk);
723 goto out_err;
724 }
725 break;
726 case __SK_REDIRECT:
727 sk_redir = psock->sk_redir;
728 memcpy(&msg_redir, msg, sizeof(*msg));
729 if (msg->apply_bytes < send)
730 msg->apply_bytes = 0;
731 else
732 msg->apply_bytes -= send;
733 sk_msg_return_zero(sk, msg, send);
734 msg->sg.size -= send;
735 release_sock(sk);
736 err = tcp_bpf_sendmsg_redir(sk_redir, &msg_redir, send, flags);
737 lock_sock(sk);
738 if (err < 0) {
739 *copied -= sk_msg_free_nocharge(sk, &msg_redir);
740 msg->sg.size = 0;
741 }
742 if (msg->sg.size == 0)
743 tls_free_open_rec(sk);
744 break;
745 case __SK_DROP:
746 default:
747 sk_msg_free_partial(sk, msg, send);
748 if (msg->apply_bytes < send)
749 msg->apply_bytes = 0;
750 else
751 msg->apply_bytes -= send;
752 if (msg->sg.size == 0)
753 tls_free_open_rec(sk);
754 *copied -= (send + delta);
755 err = -EACCES;
756 }
757
758 if (likely(!err)) {
759 bool reset_eval = !ctx->open_rec;
760
761 rec = ctx->open_rec;
762 if (rec) {
763 msg = &rec->msg_plaintext;
764 if (!msg->apply_bytes)
765 reset_eval = true;
766 }
767 if (reset_eval) {
768 psock->eval = __SK_NONE;
769 if (psock->sk_redir) {
770 sock_put(psock->sk_redir);
771 psock->sk_redir = NULL;
772 }
773 }
774 if (rec)
775 goto more_data;
776 }
777 out_err:
778 sk_psock_put(sk, psock);
779 return err;
780 }
781
782 static int tls_sw_push_pending_record(struct sock *sk, int flags)
783 {
784 struct tls_context *tls_ctx = tls_get_ctx(sk);
785 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
786 struct tls_rec *rec = ctx->open_rec;
787 struct sk_msg *msg_pl;
788 size_t copied;
789
790 if (!rec)
791 return 0;
792
793 msg_pl = &rec->msg_plaintext;
794 copied = msg_pl->sg.size;
795 if (!copied)
796 return 0;
797
798 return bpf_exec_tx_verdict(msg_pl, sk, true, TLS_RECORD_TYPE_DATA,
799 &copied, flags);
800 }
801
802 int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
803 {
804 long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
805 struct tls_context *tls_ctx = tls_get_ctx(sk);
806 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
807 struct crypto_tfm *tfm = crypto_aead_tfm(ctx->aead_send);
808 bool async_capable = tfm->__crt_alg->cra_flags & CRYPTO_ALG_ASYNC;
809 unsigned char record_type = TLS_RECORD_TYPE_DATA;
810 bool is_kvec = iov_iter_is_kvec(&msg->msg_iter);
811 bool eor = !(msg->msg_flags & MSG_MORE);
812 size_t try_to_copy, copied = 0;
813 struct sk_msg *msg_pl, *msg_en;
814 struct tls_rec *rec;
815 int required_size;
816 int num_async = 0;
817 bool full_record;
818 int record_room;
819 int num_zc = 0;
820 int orig_size;
821 int ret = 0;
822
823 if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL))
824 return -ENOTSUPP;
825
826 lock_sock(sk);
827
828 /* Wait till there is any pending write on socket */
829 if (unlikely(sk->sk_write_pending)) {
830 ret = wait_on_pending_writer(sk, &timeo);
831 if (unlikely(ret))
832 goto send_end;
833 }
834
835 if (unlikely(msg->msg_controllen)) {
836 ret = tls_proccess_cmsg(sk, msg, &record_type);
837 if (ret) {
838 if (ret == -EINPROGRESS)
839 num_async++;
840 else if (ret != -EAGAIN)
841 goto send_end;
842 }
843 }
844
845 while (msg_data_left(msg)) {
846 if (sk->sk_err) {
847 ret = -sk->sk_err;
848 goto send_end;
849 }
850
851 if (ctx->open_rec)
852 rec = ctx->open_rec;
853 else
854 rec = ctx->open_rec = tls_get_rec(sk);
855 if (!rec) {
856 ret = -ENOMEM;
857 goto send_end;
858 }
859
860 msg_pl = &rec->msg_plaintext;
861 msg_en = &rec->msg_encrypted;
862
863 orig_size = msg_pl->sg.size;
864 full_record = false;
865 try_to_copy = msg_data_left(msg);
866 record_room = TLS_MAX_PAYLOAD_SIZE - msg_pl->sg.size;
867 if (try_to_copy >= record_room) {
868 try_to_copy = record_room;
869 full_record = true;
870 }
871
872 required_size = msg_pl->sg.size + try_to_copy +
873 tls_ctx->tx.overhead_size;
874
875 if (!sk_stream_memory_free(sk))
876 goto wait_for_sndbuf;
877
878 alloc_encrypted:
879 ret = tls_alloc_encrypted_msg(sk, required_size);
880 if (ret) {
881 if (ret != -ENOSPC)
882 goto wait_for_memory;
883
884 /* Adjust try_to_copy according to the amount that was
885 * actually allocated. The difference is due
886 * to max sg elements limit
887 */
888 try_to_copy -= required_size - msg_en->sg.size;
889 full_record = true;
890 }
891
892 if (!is_kvec && (full_record || eor) && !async_capable) {
893 u32 first = msg_pl->sg.end;
894
895 ret = sk_msg_zerocopy_from_iter(sk, &msg->msg_iter,
896 msg_pl, try_to_copy);
897 if (ret)
898 goto fallback_to_reg_send;
899
900 rec->inplace_crypto = 0;
901
902 num_zc++;
903 copied += try_to_copy;
904
905 sk_msg_sg_copy_set(msg_pl, first);
906 ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
907 record_type, &copied,
908 msg->msg_flags);
909 if (ret) {
910 if (ret == -EINPROGRESS)
911 num_async++;
912 else if (ret == -ENOMEM)
913 goto wait_for_memory;
914 else if (ret == -ENOSPC)
915 goto rollback_iter;
916 else if (ret != -EAGAIN)
917 goto send_end;
918 }
919 continue;
920 rollback_iter:
921 copied -= try_to_copy;
922 sk_msg_sg_copy_clear(msg_pl, first);
923 iov_iter_revert(&msg->msg_iter,
924 msg_pl->sg.size - orig_size);
925 fallback_to_reg_send:
926 sk_msg_trim(sk, msg_pl, orig_size);
927 }
928
929 required_size = msg_pl->sg.size + try_to_copy;
930
931 ret = tls_clone_plaintext_msg(sk, required_size);
932 if (ret) {
933 if (ret != -ENOSPC)
934 goto send_end;
935
936 /* Adjust try_to_copy according to the amount that was
937 * actually allocated. The difference is due
938 * to max sg elements limit
939 */
940 try_to_copy -= required_size - msg_pl->sg.size;
941 full_record = true;
942 sk_msg_trim(sk, msg_en, msg_pl->sg.size +
943 tls_ctx->tx.overhead_size);
944 }
945
946 if (try_to_copy) {
947 ret = sk_msg_memcopy_from_iter(sk, &msg->msg_iter,
948 msg_pl, try_to_copy);
949 if (ret < 0)
950 goto trim_sgl;
951 }
952
953 /* Open records defined only if successfully copied, otherwise
954 * we would trim the sg but not reset the open record frags.
955 */
956 tls_ctx->pending_open_record_frags = true;
957 copied += try_to_copy;
958 if (full_record || eor) {
959 ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
960 record_type, &copied,
961 msg->msg_flags);
962 if (ret) {
963 if (ret == -EINPROGRESS)
964 num_async++;
965 else if (ret == -ENOMEM)
966 goto wait_for_memory;
967 else if (ret != -EAGAIN) {
968 if (ret == -ENOSPC)
969 ret = 0;
970 goto send_end;
971 }
972 }
973 }
974
975 continue;
976
977 wait_for_sndbuf:
978 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
979 wait_for_memory:
980 ret = sk_stream_wait_memory(sk, &timeo);
981 if (ret) {
982 trim_sgl:
983 tls_trim_both_msgs(sk, orig_size);
984 goto send_end;
985 }
986
987 if (msg_en->sg.size < required_size)
988 goto alloc_encrypted;
989 }
990
991 if (!num_async) {
992 goto send_end;
993 } else if (num_zc) {
994 /* Wait for pending encryptions to get completed */
995 smp_store_mb(ctx->async_notify, true);
996
997 if (atomic_read(&ctx->encrypt_pending))
998 crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
999 else
1000 reinit_completion(&ctx->async_wait.completion);
1001
1002 WRITE_ONCE(ctx->async_notify, false);
1003
1004 if (ctx->async_wait.err) {
1005 ret = ctx->async_wait.err;
1006 copied = 0;
1007 }
1008 }
1009
1010 /* Transmit if any encryptions have completed */
1011 if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
1012 cancel_delayed_work(&ctx->tx_work.work);
1013 tls_tx_records(sk, msg->msg_flags);
1014 }
1015
1016 send_end:
1017 ret = sk_stream_error(sk, msg->msg_flags, ret);
1018
1019 release_sock(sk);
1020 return copied ? copied : ret;
1021 }
1022
1023 int tls_sw_do_sendpage(struct sock *sk, struct page *page,
1024 int offset, size_t size, int flags)
1025 {
1026 long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
1027 struct tls_context *tls_ctx = tls_get_ctx(sk);
1028 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
1029 unsigned char record_type = TLS_RECORD_TYPE_DATA;
1030 struct sk_msg *msg_pl;
1031 struct tls_rec *rec;
1032 int num_async = 0;
1033 size_t copied = 0;
1034 bool full_record;
1035 int record_room;
1036 int ret = 0;
1037 bool eor;
1038
1039 eor = !(flags & (MSG_MORE | MSG_SENDPAGE_NOTLAST));
1040 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
1041
1042 /* Wait till there is any pending write on socket */
1043 if (unlikely(sk->sk_write_pending)) {
1044 ret = wait_on_pending_writer(sk, &timeo);
1045 if (unlikely(ret))
1046 goto sendpage_end;
1047 }
1048
1049 /* Call the sk_stream functions to manage the sndbuf mem. */
1050 while (size > 0) {
1051 size_t copy, required_size;
1052
1053 if (sk->sk_err) {
1054 ret = -sk->sk_err;
1055 goto sendpage_end;
1056 }
1057
1058 if (ctx->open_rec)
1059 rec = ctx->open_rec;
1060 else
1061 rec = ctx->open_rec = tls_get_rec(sk);
1062 if (!rec) {
1063 ret = -ENOMEM;
1064 goto sendpage_end;
1065 }
1066
1067 msg_pl = &rec->msg_plaintext;
1068
1069 full_record = false;
1070 record_room = TLS_MAX_PAYLOAD_SIZE - msg_pl->sg.size;
1071 copied = 0;
1072 copy = size;
1073 if (copy >= record_room) {
1074 copy = record_room;
1075 full_record = true;
1076 }
1077
1078 required_size = msg_pl->sg.size + copy +
1079 tls_ctx->tx.overhead_size;
1080
1081 if (!sk_stream_memory_free(sk))
1082 goto wait_for_sndbuf;
1083 alloc_payload:
1084 ret = tls_alloc_encrypted_msg(sk, required_size);
1085 if (ret) {
1086 if (ret != -ENOSPC)
1087 goto wait_for_memory;
1088
1089 /* Adjust copy according to the amount that was
1090 * actually allocated. The difference is due
1091 * to max sg elements limit
1092 */
1093 copy -= required_size - msg_pl->sg.size;
1094 full_record = true;
1095 }
1096
1097 sk_msg_page_add(msg_pl, page, copy, offset);
1098 sk_mem_charge(sk, copy);
1099
1100 offset += copy;
1101 size -= copy;
1102 copied += copy;
1103
1104 tls_ctx->pending_open_record_frags = true;
1105 if (full_record || eor || sk_msg_full(msg_pl)) {
1106 rec->inplace_crypto = 0;
1107 ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
1108 record_type, &copied, flags);
1109 if (ret) {
1110 if (ret == -EINPROGRESS)
1111 num_async++;
1112 else if (ret == -ENOMEM)
1113 goto wait_for_memory;
1114 else if (ret != -EAGAIN) {
1115 if (ret == -ENOSPC)
1116 ret = 0;
1117 goto sendpage_end;
1118 }
1119 }
1120 }
1121 continue;
1122 wait_for_sndbuf:
1123 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1124 wait_for_memory:
1125 ret = sk_stream_wait_memory(sk, &timeo);
1126 if (ret) {
1127 tls_trim_both_msgs(sk, msg_pl->sg.size);
1128 goto sendpage_end;
1129 }
1130
1131 goto alloc_payload;
1132 }
1133
1134 if (num_async) {
1135 /* Transmit if any encryptions have completed */
1136 if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
1137 cancel_delayed_work(&ctx->tx_work.work);
1138 tls_tx_records(sk, flags);
1139 }
1140 }
1141 sendpage_end:
1142 ret = sk_stream_error(sk, flags, ret);
1143 return copied ? copied : ret;
1144 }
1145
1146 int tls_sw_sendpage_locked(struct sock *sk, struct page *page,
1147 int offset, size_t size, int flags)
1148 {
1149 if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
1150 MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY))
1151 return -ENOTSUPP;
1152
1153 return tls_sw_do_sendpage(sk, page, offset, size, flags);
1154 }
1155
1156 int tls_sw_sendpage(struct sock *sk, struct page *page,
1157 int offset, size_t size, int flags)
1158 {
1159 int ret;
1160
1161 if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
1162 MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY))
1163 return -ENOTSUPP;
1164
1165 lock_sock(sk);
1166 ret = tls_sw_do_sendpage(sk, page, offset, size, flags);
1167 release_sock(sk);
1168 return ret;
1169 }
1170
1171 static struct sk_buff *tls_wait_data(struct sock *sk, struct sk_psock *psock,
1172 int flags, long timeo, int *err)
1173 {
1174 struct tls_context *tls_ctx = tls_get_ctx(sk);
1175 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1176 struct sk_buff *skb;
1177 DEFINE_WAIT_FUNC(wait, woken_wake_function);
1178
1179 while (!(skb = ctx->recv_pkt) && sk_psock_queue_empty(psock)) {
1180 if (sk->sk_err) {
1181 *err = sock_error(sk);
1182 return NULL;
1183 }
1184
1185 if (sk->sk_shutdown & RCV_SHUTDOWN)
1186 return NULL;
1187
1188 if (sock_flag(sk, SOCK_DONE))
1189 return NULL;
1190
1191 if ((flags & MSG_DONTWAIT) || !timeo) {
1192 *err = -EAGAIN;
1193 return NULL;
1194 }
1195
1196 add_wait_queue(sk_sleep(sk), &wait);
1197 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
1198 sk_wait_event(sk, &timeo,
1199 ctx->recv_pkt != skb ||
1200 !sk_psock_queue_empty(psock),
1201 &wait);
1202 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
1203 remove_wait_queue(sk_sleep(sk), &wait);
1204
1205 /* Handle signals */
1206 if (signal_pending(current)) {
1207 *err = sock_intr_errno(timeo);
1208 return NULL;
1209 }
1210 }
1211
1212 return skb;
1213 }
1214
1215 static int tls_setup_from_iter(struct sock *sk, struct iov_iter *from,
1216 int length, int *pages_used,
1217 unsigned int *size_used,
1218 struct scatterlist *to,
1219 int to_max_pages)
1220 {
1221 int rc = 0, i = 0, num_elem = *pages_used, maxpages;
1222 struct page *pages[MAX_SKB_FRAGS];
1223 unsigned int size = *size_used;
1224 ssize_t copied, use;
1225 size_t offset;
1226
1227 while (length > 0) {
1228 i = 0;
1229 maxpages = to_max_pages - num_elem;
1230 if (maxpages == 0) {
1231 rc = -EFAULT;
1232 goto out;
1233 }
1234 copied = iov_iter_get_pages(from, pages,
1235 length,
1236 maxpages, &offset);
1237 if (copied <= 0) {
1238 rc = -EFAULT;
1239 goto out;
1240 }
1241
1242 iov_iter_advance(from, copied);
1243
1244 length -= copied;
1245 size += copied;
1246 while (copied) {
1247 use = min_t(int, copied, PAGE_SIZE - offset);
1248
1249 sg_set_page(&to[num_elem],
1250 pages[i], use, offset);
1251 sg_unmark_end(&to[num_elem]);
1252 /* We do not uncharge memory from this API */
1253
1254 offset = 0;
1255 copied -= use;
1256
1257 i++;
1258 num_elem++;
1259 }
1260 }
1261 /* Mark the end in the last sg entry if newly added */
1262 if (num_elem > *pages_used)
1263 sg_mark_end(&to[num_elem - 1]);
1264 out:
1265 if (rc)
1266 iov_iter_revert(from, size - *size_used);
1267 *size_used = size;
1268 *pages_used = num_elem;
1269
1270 return rc;
1271 }
1272
1273 /* This function decrypts the input skb into either out_iov or in out_sg
1274 * or in skb buffers itself. The input parameter 'zc' indicates if
1275 * zero-copy mode needs to be tried or not. With zero-copy mode, either
1276 * out_iov or out_sg must be non-NULL. In case both out_iov and out_sg are
1277 * NULL, then the decryption happens inside skb buffers itself, i.e.
1278 * zero-copy gets disabled and 'zc' is updated.
1279 */
1280
1281 static int decrypt_internal(struct sock *sk, struct sk_buff *skb,
1282 struct iov_iter *out_iov,
1283 struct scatterlist *out_sg,
1284 int *chunk, bool *zc)
1285 {
1286 struct tls_context *tls_ctx = tls_get_ctx(sk);
1287 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1288 struct strp_msg *rxm = strp_msg(skb);
1289 int n_sgin, n_sgout, nsg, mem_size, aead_size, err, pages = 0;
1290 struct aead_request *aead_req;
1291 struct sk_buff *unused;
1292 u8 *aad, *iv, *mem = NULL;
1293 struct scatterlist *sgin = NULL;
1294 struct scatterlist *sgout = NULL;
1295 const int data_len = rxm->full_len - tls_ctx->rx.overhead_size;
1296
1297 if (*zc && (out_iov || out_sg)) {
1298 if (out_iov)
1299 n_sgout = iov_iter_npages(out_iov, INT_MAX) + 1;
1300 else
1301 n_sgout = sg_nents(out_sg);
1302 n_sgin = skb_nsg(skb, rxm->offset + tls_ctx->rx.prepend_size,
1303 rxm->full_len - tls_ctx->rx.prepend_size);
1304 } else {
1305 n_sgout = 0;
1306 *zc = false;
1307 n_sgin = skb_cow_data(skb, 0, &unused);
1308 }
1309
1310 if (n_sgin < 1)
1311 return -EBADMSG;
1312
1313 /* Increment to accommodate AAD */
1314 n_sgin = n_sgin + 1;
1315
1316 nsg = n_sgin + n_sgout;
1317
1318 aead_size = sizeof(*aead_req) + crypto_aead_reqsize(ctx->aead_recv);
1319 mem_size = aead_size + (nsg * sizeof(struct scatterlist));
1320 mem_size = mem_size + TLS_AAD_SPACE_SIZE;
1321 mem_size = mem_size + crypto_aead_ivsize(ctx->aead_recv);
1322
1323 /* Allocate a single block of memory which contains
1324 * aead_req || sgin[] || sgout[] || aad || iv.
1325 * This order achieves correct alignment for aead_req, sgin, sgout.
1326 */
1327 mem = kmalloc(mem_size, sk->sk_allocation);
1328 if (!mem)
1329 return -ENOMEM;
1330
1331 /* Segment the allocated memory */
1332 aead_req = (struct aead_request *)mem;
1333 sgin = (struct scatterlist *)(mem + aead_size);
1334 sgout = sgin + n_sgin;
1335 aad = (u8 *)(sgout + n_sgout);
1336 iv = aad + TLS_AAD_SPACE_SIZE;
1337
1338 /* Prepare IV */
1339 err = skb_copy_bits(skb, rxm->offset + TLS_HEADER_SIZE,
1340 iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
1341 tls_ctx->rx.iv_size);
1342 if (err < 0) {
1343 kfree(mem);
1344 return err;
1345 }
1346 memcpy(iv, tls_ctx->rx.iv, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
1347
1348 /* Prepare AAD */
1349 tls_make_aad(aad, rxm->full_len - tls_ctx->rx.overhead_size,
1350 tls_ctx->rx.rec_seq, tls_ctx->rx.rec_seq_size,
1351 ctx->control);
1352
1353 /* Prepare sgin */
1354 sg_init_table(sgin, n_sgin);
1355 sg_set_buf(&sgin[0], aad, TLS_AAD_SPACE_SIZE);
1356 err = skb_to_sgvec(skb, &sgin[1],
1357 rxm->offset + tls_ctx->rx.prepend_size,
1358 rxm->full_len - tls_ctx->rx.prepend_size);
1359 if (err < 0) {
1360 kfree(mem);
1361 return err;
1362 }
1363
1364 if (n_sgout) {
1365 if (out_iov) {
1366 sg_init_table(sgout, n_sgout);
1367 sg_set_buf(&sgout[0], aad, TLS_AAD_SPACE_SIZE);
1368
1369 *chunk = 0;
1370 err = tls_setup_from_iter(sk, out_iov, data_len,
1371 &pages, chunk, &sgout[1],
1372 (n_sgout - 1));
1373 if (err < 0)
1374 goto fallback_to_reg_recv;
1375 } else if (out_sg) {
1376 memcpy(sgout, out_sg, n_sgout * sizeof(*sgout));
1377 } else {
1378 goto fallback_to_reg_recv;
1379 }
1380 } else {
1381 fallback_to_reg_recv:
1382 sgout = sgin;
1383 pages = 0;
1384 *chunk = 0;
1385 *zc = false;
1386 }
1387
1388 /* Prepare and submit AEAD request */
1389 err = tls_do_decryption(sk, skb, sgin, sgout, iv,
1390 data_len, aead_req, *zc);
1391 if (err == -EINPROGRESS)
1392 return err;
1393
1394 /* Release the pages in case iov was mapped to pages */
1395 for (; pages > 0; pages--)
1396 put_page(sg_page(&sgout[pages]));
1397
1398 kfree(mem);
1399 return err;
1400 }
1401
1402 static int decrypt_skb_update(struct sock *sk, struct sk_buff *skb,
1403 struct iov_iter *dest, int *chunk, bool *zc)
1404 {
1405 struct tls_context *tls_ctx = tls_get_ctx(sk);
1406 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1407 struct strp_msg *rxm = strp_msg(skb);
1408 int err = 0;
1409
1410 #ifdef CONFIG_TLS_DEVICE
1411 err = tls_device_decrypted(sk, skb);
1412 if (err < 0)
1413 return err;
1414 #endif
1415 if (!ctx->decrypted) {
1416 err = decrypt_internal(sk, skb, dest, NULL, chunk, zc);
1417 if (err < 0) {
1418 if (err == -EINPROGRESS)
1419 tls_advance_record_sn(sk, &tls_ctx->rx);
1420
1421 return err;
1422 }
1423 } else {
1424 *zc = false;
1425 }
1426
1427 rxm->offset += tls_ctx->rx.prepend_size;
1428 rxm->full_len -= tls_ctx->rx.overhead_size;
1429 tls_advance_record_sn(sk, &tls_ctx->rx);
1430 ctx->decrypted = true;
1431 ctx->saved_data_ready(sk);
1432
1433 return err;
1434 }
1435
1436 int decrypt_skb(struct sock *sk, struct sk_buff *skb,
1437 struct scatterlist *sgout)
1438 {
1439 bool zc = true;
1440 int chunk;
1441
1442 return decrypt_internal(sk, skb, NULL, sgout, &chunk, &zc);
1443 }
1444
1445 static bool tls_sw_advance_skb(struct sock *sk, struct sk_buff *skb,
1446 unsigned int len)
1447 {
1448 struct tls_context *tls_ctx = tls_get_ctx(sk);
1449 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1450
1451 if (skb) {
1452 struct strp_msg *rxm = strp_msg(skb);
1453
1454 if (len < rxm->full_len) {
1455 rxm->offset += len;
1456 rxm->full_len -= len;
1457 return false;
1458 }
1459 kfree_skb(skb);
1460 }
1461
1462 /* Finished with message */
1463 ctx->recv_pkt = NULL;
1464 __strp_unpause(&ctx->strp);
1465
1466 return true;
1467 }
1468
1469 int tls_sw_recvmsg(struct sock *sk,
1470 struct msghdr *msg,
1471 size_t len,
1472 int nonblock,
1473 int flags,
1474 int *addr_len)
1475 {
1476 struct tls_context *tls_ctx = tls_get_ctx(sk);
1477 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1478 struct sk_psock *psock;
1479 unsigned char control;
1480 struct strp_msg *rxm;
1481 struct sk_buff *skb;
1482 ssize_t copied = 0;
1483 bool cmsg = false;
1484 int target, err = 0;
1485 long timeo;
1486 bool is_kvec = iov_iter_is_kvec(&msg->msg_iter);
1487 int num_async = 0;
1488
1489 flags |= nonblock;
1490
1491 if (unlikely(flags & MSG_ERRQUEUE))
1492 return sock_recv_errqueue(sk, msg, len, SOL_IP, IP_RECVERR);
1493
1494 psock = sk_psock_get(sk);
1495 lock_sock(sk);
1496
1497 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
1498 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1499 do {
1500 bool zc = false;
1501 bool async = false;
1502 int chunk = 0;
1503
1504 skb = tls_wait_data(sk, psock, flags, timeo, &err);
1505 if (!skb) {
1506 if (psock) {
1507 int ret = __tcp_bpf_recvmsg(sk, psock,
1508 msg, len, flags);
1509
1510 if (ret > 0) {
1511 copied += ret;
1512 len -= ret;
1513 continue;
1514 }
1515 }
1516 goto recv_end;
1517 }
1518
1519 rxm = strp_msg(skb);
1520
1521 if (!cmsg) {
1522 int cerr;
1523
1524 cerr = put_cmsg(msg, SOL_TLS, TLS_GET_RECORD_TYPE,
1525 sizeof(ctx->control), &ctx->control);
1526 cmsg = true;
1527 control = ctx->control;
1528 if (ctx->control != TLS_RECORD_TYPE_DATA) {
1529 if (cerr || msg->msg_flags & MSG_CTRUNC) {
1530 err = -EIO;
1531 goto recv_end;
1532 }
1533 }
1534 } else if (control != ctx->control) {
1535 goto recv_end;
1536 }
1537
1538 if (!ctx->decrypted) {
1539 int to_copy = rxm->full_len - tls_ctx->rx.overhead_size;
1540
1541 if (!is_kvec && to_copy <= len &&
1542 likely(!(flags & MSG_PEEK)))
1543 zc = true;
1544
1545 err = decrypt_skb_update(sk, skb, &msg->msg_iter,
1546 &chunk, &zc);
1547 if (err < 0 && err != -EINPROGRESS) {
1548 tls_err_abort(sk, EBADMSG);
1549 goto recv_end;
1550 }
1551
1552 if (err == -EINPROGRESS) {
1553 async = true;
1554 num_async++;
1555 goto pick_next_record;
1556 }
1557
1558 ctx->decrypted = true;
1559 }
1560
1561 if (!zc) {
1562 chunk = min_t(unsigned int, rxm->full_len, len);
1563
1564 err = skb_copy_datagram_msg(skb, rxm->offset, msg,
1565 chunk);
1566 if (err < 0)
1567 goto recv_end;
1568 }
1569
1570 pick_next_record:
1571 copied += chunk;
1572 len -= chunk;
1573 if (likely(!(flags & MSG_PEEK))) {
1574 u8 control = ctx->control;
1575
1576 /* For async, drop current skb reference */
1577 if (async)
1578 skb = NULL;
1579
1580 if (tls_sw_advance_skb(sk, skb, chunk)) {
1581 /* Return full control message to
1582 * userspace before trying to parse
1583 * another message type
1584 */
1585 msg->msg_flags |= MSG_EOR;
1586 if (control != TLS_RECORD_TYPE_DATA)
1587 goto recv_end;
1588 } else {
1589 break;
1590 }
1591 } else {
1592 /* MSG_PEEK right now cannot look beyond current skb
1593 * from strparser, meaning we cannot advance skb here
1594 * and thus unpause strparser since we'd loose original
1595 * one.
1596 */
1597 break;
1598 }
1599
1600 /* If we have a new message from strparser, continue now. */
1601 if (copied >= target && !ctx->recv_pkt)
1602 break;
1603 } while (len);
1604
1605 recv_end:
1606 if (num_async) {
1607 /* Wait for all previously submitted records to be decrypted */
1608 smp_store_mb(ctx->async_notify, true);
1609 if (atomic_read(&ctx->decrypt_pending)) {
1610 err = crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
1611 if (err) {
1612 /* one of async decrypt failed */
1613 tls_err_abort(sk, err);
1614 copied = 0;
1615 }
1616 } else {
1617 reinit_completion(&ctx->async_wait.completion);
1618 }
1619 WRITE_ONCE(ctx->async_notify, false);
1620 }
1621
1622 release_sock(sk);
1623 if (psock)
1624 sk_psock_put(sk, psock);
1625 return copied ? : err;
1626 }
1627
1628 ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
1629 struct pipe_inode_info *pipe,
1630 size_t len, unsigned int flags)
1631 {
1632 struct tls_context *tls_ctx = tls_get_ctx(sock->sk);
1633 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1634 struct strp_msg *rxm = NULL;
1635 struct sock *sk = sock->sk;
1636 struct sk_buff *skb;
1637 ssize_t copied = 0;
1638 int err = 0;
1639 long timeo;
1640 int chunk;
1641 bool zc = false;
1642
1643 lock_sock(sk);
1644
1645 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1646
1647 skb = tls_wait_data(sk, NULL, flags, timeo, &err);
1648 if (!skb)
1649 goto splice_read_end;
1650
1651 /* splice does not support reading control messages */
1652 if (ctx->control != TLS_RECORD_TYPE_DATA) {
1653 err = -ENOTSUPP;
1654 goto splice_read_end;
1655 }
1656
1657 if (!ctx->decrypted) {
1658 err = decrypt_skb_update(sk, skb, NULL, &chunk, &zc);
1659
1660 if (err < 0) {
1661 tls_err_abort(sk, EBADMSG);
1662 goto splice_read_end;
1663 }
1664 ctx->decrypted = true;
1665 }
1666 rxm = strp_msg(skb);
1667
1668 chunk = min_t(unsigned int, rxm->full_len, len);
1669 copied = skb_splice_bits(skb, sk, rxm->offset, pipe, chunk, flags);
1670 if (copied < 0)
1671 goto splice_read_end;
1672
1673 if (likely(!(flags & MSG_PEEK)))
1674 tls_sw_advance_skb(sk, skb, copied);
1675
1676 splice_read_end:
1677 release_sock(sk);
1678 return copied ? : err;
1679 }
1680
1681 bool tls_sw_stream_read(const struct sock *sk)
1682 {
1683 struct tls_context *tls_ctx = tls_get_ctx(sk);
1684 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1685 bool ingress_empty = true;
1686 struct sk_psock *psock;
1687
1688 rcu_read_lock();
1689 psock = sk_psock(sk);
1690 if (psock)
1691 ingress_empty = list_empty(&psock->ingress_msg);
1692 rcu_read_unlock();
1693
1694 return !ingress_empty || ctx->recv_pkt;
1695 }
1696
1697 static int tls_read_size(struct strparser *strp, struct sk_buff *skb)
1698 {
1699 struct tls_context *tls_ctx = tls_get_ctx(strp->sk);
1700 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1701 char header[TLS_HEADER_SIZE + MAX_IV_SIZE];
1702 struct strp_msg *rxm = strp_msg(skb);
1703 size_t cipher_overhead;
1704 size_t data_len = 0;
1705 int ret;
1706
1707 /* Verify that we have a full TLS header, or wait for more data */
1708 if (rxm->offset + tls_ctx->rx.prepend_size > skb->len)
1709 return 0;
1710
1711 /* Sanity-check size of on-stack buffer. */
1712 if (WARN_ON(tls_ctx->rx.prepend_size > sizeof(header))) {
1713 ret = -EINVAL;
1714 goto read_failure;
1715 }
1716
1717 /* Linearize header to local buffer */
1718 ret = skb_copy_bits(skb, rxm->offset, header, tls_ctx->rx.prepend_size);
1719
1720 if (ret < 0)
1721 goto read_failure;
1722
1723 ctx->control = header[0];
1724
1725 data_len = ((header[4] & 0xFF) | (header[3] << 8));
1726
1727 cipher_overhead = tls_ctx->rx.tag_size + tls_ctx->rx.iv_size;
1728
1729 if (data_len > TLS_MAX_PAYLOAD_SIZE + cipher_overhead) {
1730 ret = -EMSGSIZE;
1731 goto read_failure;
1732 }
1733 if (data_len < cipher_overhead) {
1734 ret = -EBADMSG;
1735 goto read_failure;
1736 }
1737
1738 if (header[1] != TLS_VERSION_MINOR(tls_ctx->crypto_recv.info.version) ||
1739 header[2] != TLS_VERSION_MAJOR(tls_ctx->crypto_recv.info.version)) {
1740 ret = -EINVAL;
1741 goto read_failure;
1742 }
1743
1744 #ifdef CONFIG_TLS_DEVICE
1745 handle_device_resync(strp->sk, TCP_SKB_CB(skb)->seq + rxm->offset,
1746 *(u64*)tls_ctx->rx.rec_seq);
1747 #endif
1748 return data_len + TLS_HEADER_SIZE;
1749
1750 read_failure:
1751 tls_err_abort(strp->sk, ret);
1752
1753 return ret;
1754 }
1755
1756 static void tls_queue(struct strparser *strp, struct sk_buff *skb)
1757 {
1758 struct tls_context *tls_ctx = tls_get_ctx(strp->sk);
1759 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1760
1761 ctx->decrypted = false;
1762
1763 ctx->recv_pkt = skb;
1764 strp_pause(strp);
1765
1766 ctx->saved_data_ready(strp->sk);
1767 }
1768
1769 static void tls_data_ready(struct sock *sk)
1770 {
1771 struct tls_context *tls_ctx = tls_get_ctx(sk);
1772 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1773 struct sk_psock *psock;
1774
1775 strp_data_ready(&ctx->strp);
1776
1777 psock = sk_psock_get(sk);
1778 if (psock && !list_empty(&psock->ingress_msg)) {
1779 ctx->saved_data_ready(sk);
1780 sk_psock_put(sk, psock);
1781 }
1782 }
1783
1784 void tls_sw_free_resources_tx(struct sock *sk)
1785 {
1786 struct tls_context *tls_ctx = tls_get_ctx(sk);
1787 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
1788 struct tls_rec *rec, *tmp;
1789
1790 /* Wait for any pending async encryptions to complete */
1791 smp_store_mb(ctx->async_notify, true);
1792 if (atomic_read(&ctx->encrypt_pending))
1793 crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
1794
1795 cancel_delayed_work_sync(&ctx->tx_work.work);
1796
1797 /* Tx whatever records we can transmit and abandon the rest */
1798 tls_tx_records(sk, -1);
1799
1800 /* Free up un-sent records in tx_list. First, free
1801 * the partially sent record if any at head of tx_list.
1802 */
1803 if (tls_ctx->partially_sent_record) {
1804 struct scatterlist *sg = tls_ctx->partially_sent_record;
1805
1806 while (1) {
1807 put_page(sg_page(sg));
1808 sk_mem_uncharge(sk, sg->length);
1809
1810 if (sg_is_last(sg))
1811 break;
1812 sg++;
1813 }
1814
1815 tls_ctx->partially_sent_record = NULL;
1816
1817 rec = list_first_entry(&ctx->tx_list,
1818 struct tls_rec, list);
1819 list_del(&rec->list);
1820 sk_msg_free(sk, &rec->msg_plaintext);
1821 kfree(rec);
1822 }
1823
1824 list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) {
1825 list_del(&rec->list);
1826 sk_msg_free(sk, &rec->msg_encrypted);
1827 sk_msg_free(sk, &rec->msg_plaintext);
1828 kfree(rec);
1829 }
1830
1831 crypto_free_aead(ctx->aead_send);
1832 tls_free_open_rec(sk);
1833
1834 kfree(ctx);
1835 }
1836
1837 void tls_sw_release_resources_rx(struct sock *sk)
1838 {
1839 struct tls_context *tls_ctx = tls_get_ctx(sk);
1840 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1841
1842 if (ctx->aead_recv) {
1843 kfree_skb(ctx->recv_pkt);
1844 ctx->recv_pkt = NULL;
1845 crypto_free_aead(ctx->aead_recv);
1846 strp_stop(&ctx->strp);
1847 write_lock_bh(&sk->sk_callback_lock);
1848 sk->sk_data_ready = ctx->saved_data_ready;
1849 write_unlock_bh(&sk->sk_callback_lock);
1850 release_sock(sk);
1851 strp_done(&ctx->strp);
1852 lock_sock(sk);
1853 }
1854 }
1855
1856 void tls_sw_free_resources_rx(struct sock *sk)
1857 {
1858 struct tls_context *tls_ctx = tls_get_ctx(sk);
1859 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1860
1861 tls_sw_release_resources_rx(sk);
1862
1863 kfree(ctx);
1864 }
1865
1866 /* The work handler to transmitt the encrypted records in tx_list */
1867 static void tx_work_handler(struct work_struct *work)
1868 {
1869 struct delayed_work *delayed_work = to_delayed_work(work);
1870 struct tx_work *tx_work = container_of(delayed_work,
1871 struct tx_work, work);
1872 struct sock *sk = tx_work->sk;
1873 struct tls_context *tls_ctx = tls_get_ctx(sk);
1874 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
1875
1876 if (!test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask))
1877 return;
1878
1879 lock_sock(sk);
1880 tls_tx_records(sk, -1);
1881 release_sock(sk);
1882 }
1883
1884 int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
1885 {
1886 struct tls_crypto_info *crypto_info;
1887 struct tls12_crypto_info_aes_gcm_128 *gcm_128_info;
1888 struct tls_sw_context_tx *sw_ctx_tx = NULL;
1889 struct tls_sw_context_rx *sw_ctx_rx = NULL;
1890 struct cipher_context *cctx;
1891 struct crypto_aead **aead;
1892 struct strp_callbacks cb;
1893 u16 nonce_size, tag_size, iv_size, rec_seq_size;
1894 char *iv, *rec_seq;
1895 int rc = 0;
1896
1897 if (!ctx) {
1898 rc = -EINVAL;
1899 goto out;
1900 }
1901
1902 if (tx) {
1903 if (!ctx->priv_ctx_tx) {
1904 sw_ctx_tx = kzalloc(sizeof(*sw_ctx_tx), GFP_KERNEL);
1905 if (!sw_ctx_tx) {
1906 rc = -ENOMEM;
1907 goto out;
1908 }
1909 ctx->priv_ctx_tx = sw_ctx_tx;
1910 } else {
1911 sw_ctx_tx =
1912 (struct tls_sw_context_tx *)ctx->priv_ctx_tx;
1913 }
1914 } else {
1915 if (!ctx->priv_ctx_rx) {
1916 sw_ctx_rx = kzalloc(sizeof(*sw_ctx_rx), GFP_KERNEL);
1917 if (!sw_ctx_rx) {
1918 rc = -ENOMEM;
1919 goto out;
1920 }
1921 ctx->priv_ctx_rx = sw_ctx_rx;
1922 } else {
1923 sw_ctx_rx =
1924 (struct tls_sw_context_rx *)ctx->priv_ctx_rx;
1925 }
1926 }
1927
1928 if (tx) {
1929 crypto_init_wait(&sw_ctx_tx->async_wait);
1930 crypto_info = &ctx->crypto_send.info;
1931 cctx = &ctx->tx;
1932 aead = &sw_ctx_tx->aead_send;
1933 INIT_LIST_HEAD(&sw_ctx_tx->tx_list);
1934 INIT_DELAYED_WORK(&sw_ctx_tx->tx_work.work, tx_work_handler);
1935 sw_ctx_tx->tx_work.sk = sk;
1936 } else {
1937 crypto_init_wait(&sw_ctx_rx->async_wait);
1938 crypto_info = &ctx->crypto_recv.info;
1939 cctx = &ctx->rx;
1940 aead = &sw_ctx_rx->aead_recv;
1941 }
1942
1943 switch (crypto_info->cipher_type) {
1944 case TLS_CIPHER_AES_GCM_128: {
1945 nonce_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
1946 tag_size = TLS_CIPHER_AES_GCM_128_TAG_SIZE;
1947 iv_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
1948 iv = ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->iv;
1949 rec_seq_size = TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE;
1950 rec_seq =
1951 ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->rec_seq;
1952 gcm_128_info =
1953 (struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
1954 break;
1955 }
1956 default:
1957 rc = -EINVAL;
1958 goto free_priv;
1959 }
1960
1961 /* Sanity-check the IV size for stack allocations. */
1962 if (iv_size > MAX_IV_SIZE || nonce_size > MAX_IV_SIZE) {
1963 rc = -EINVAL;
1964 goto free_priv;
1965 }
1966
1967 cctx->prepend_size = TLS_HEADER_SIZE + nonce_size;
1968 cctx->tag_size = tag_size;
1969 cctx->overhead_size = cctx->prepend_size + cctx->tag_size;
1970 cctx->iv_size = iv_size;
1971 cctx->iv = kmalloc(iv_size + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
1972 GFP_KERNEL);
1973 if (!cctx->iv) {
1974 rc = -ENOMEM;
1975 goto free_priv;
1976 }
1977 memcpy(cctx->iv, gcm_128_info->salt, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
1978 memcpy(cctx->iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, iv, iv_size);
1979 cctx->rec_seq_size = rec_seq_size;
1980 cctx->rec_seq = kmemdup(rec_seq, rec_seq_size, GFP_KERNEL);
1981 if (!cctx->rec_seq) {
1982 rc = -ENOMEM;
1983 goto free_iv;
1984 }
1985
1986 if (!*aead) {
1987 *aead = crypto_alloc_aead("gcm(aes)", 0, 0);
1988 if (IS_ERR(*aead)) {
1989 rc = PTR_ERR(*aead);
1990 *aead = NULL;
1991 goto free_rec_seq;
1992 }
1993 }
1994
1995 ctx->push_pending_record = tls_sw_push_pending_record;
1996
1997 rc = crypto_aead_setkey(*aead, gcm_128_info->key,
1998 TLS_CIPHER_AES_GCM_128_KEY_SIZE);
1999 if (rc)
2000 goto free_aead;
2001
2002 rc = crypto_aead_setauthsize(*aead, cctx->tag_size);
2003 if (rc)
2004 goto free_aead;
2005
2006 if (sw_ctx_rx) {
2007 /* Set up strparser */
2008 memset(&cb, 0, sizeof(cb));
2009 cb.rcv_msg = tls_queue;
2010 cb.parse_msg = tls_read_size;
2011
2012 strp_init(&sw_ctx_rx->strp, sk, &cb);
2013
2014 write_lock_bh(&sk->sk_callback_lock);
2015 sw_ctx_rx->saved_data_ready = sk->sk_data_ready;
2016 sk->sk_data_ready = tls_data_ready;
2017 write_unlock_bh(&sk->sk_callback_lock);
2018
2019 strp_check_rcv(&sw_ctx_rx->strp);
2020 }
2021
2022 goto out;
2023
2024 free_aead:
2025 crypto_free_aead(*aead);
2026 *aead = NULL;
2027 free_rec_seq:
2028 kfree(cctx->rec_seq);
2029 cctx->rec_seq = NULL;
2030 free_iv:
2031 kfree(cctx->iv);
2032 cctx->iv = NULL;
2033 free_priv:
2034 if (tx) {
2035 kfree(ctx->priv_ctx_tx);
2036 ctx->priv_ctx_tx = NULL;
2037 } else {
2038 kfree(ctx->priv_ctx_rx);
2039 ctx->priv_ctx_rx = NULL;
2040 }
2041 out:
2042 return rc;
2043 }