2 * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
4 * Copyright (c) 2016-2017, Lance Chao <lancerchao@fb.com>. All rights reserved.
5 * Copyright (c) 2016, Fridolin Pokorny <fridolin.pokorny@gmail.com>. All rights reserved.
6 * Copyright (c) 2016, Nikos Mavrogiannopoulos <nmav@gnutls.org>. All rights reserved.
7 * Copyright (c) 2018, Covalent IO, Inc. http://covalent.io
9 * This software is available to you under a choice of one of two
10 * licenses. You may choose to be licensed under the terms of the GNU
11 * General Public License (GPL) Version 2, available from the file
12 * COPYING in the main directory of this source tree, or the
13 * OpenIB.org BSD license below:
15 * Redistribution and use in source and binary forms, with or
16 * without modification, are permitted provided that the following
19 * - Redistributions of source code must retain the above
20 * copyright notice, this list of conditions and the following
23 * - Redistributions in binary form must reproduce the above
24 * copyright notice, this list of conditions and the following
25 * disclaimer in the documentation and/or other materials
26 * provided with the distribution.
28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
29 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
30 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
31 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
32 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
33 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
34 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
38 #include <linux/sched/signal.h>
39 #include <linux/module.h>
40 #include <linux/splice.h>
41 #include <crypto/aead.h>
43 #include <net/strparser.h>
46 static int __skb_nsg(struct sk_buff
*skb
, int offset
, int len
,
47 unsigned int recursion_level
)
49 int start
= skb_headlen(skb
);
50 int i
, chunk
= start
- offset
;
51 struct sk_buff
*frag_iter
;
54 if (unlikely(recursion_level
>= 24))
67 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
70 WARN_ON(start
> offset
+ len
);
72 end
= start
+ skb_frag_size(&skb_shinfo(skb
)->frags
[i
]);
86 if (unlikely(skb_has_frag_list(skb
))) {
87 skb_walk_frags(skb
, frag_iter
) {
90 WARN_ON(start
> offset
+ len
);
92 end
= start
+ frag_iter
->len
;
97 ret
= __skb_nsg(frag_iter
, offset
- start
, chunk
,
99 if (unlikely(ret
< 0))
114 /* Return the number of scatterlist elements required to completely map the
115 * skb, or -EMSGSIZE if the recursion depth is exceeded.
117 static int skb_nsg(struct sk_buff
*skb
, int offset
, int len
)
119 return __skb_nsg(skb
, offset
, len
, 0);
122 static int padding_length(struct tls_sw_context_rx
*ctx
,
123 struct tls_prot_info
*prot
, struct sk_buff
*skb
)
125 struct strp_msg
*rxm
= strp_msg(skb
);
128 /* Determine zero-padding length */
129 if (prot
->version
== TLS_1_3_VERSION
) {
130 char content_type
= 0;
134 while (content_type
== 0) {
135 if (back
> rxm
->full_len
- prot
->prepend_size
)
137 err
= skb_copy_bits(skb
,
138 rxm
->offset
+ rxm
->full_len
- back
,
147 ctx
->control
= content_type
;
152 static void tls_decrypt_done(struct crypto_async_request
*req
, int err
)
154 struct aead_request
*aead_req
= (struct aead_request
*)req
;
155 struct scatterlist
*sgout
= aead_req
->dst
;
156 struct scatterlist
*sgin
= aead_req
->src
;
157 struct tls_sw_context_rx
*ctx
;
158 struct tls_context
*tls_ctx
;
159 struct tls_prot_info
*prot
;
160 struct scatterlist
*sg
;
165 skb
= (struct sk_buff
*)req
->data
;
166 tls_ctx
= tls_get_ctx(skb
->sk
);
167 ctx
= tls_sw_ctx_rx(tls_ctx
);
168 prot
= &tls_ctx
->prot_info
;
170 /* Propagate if there was an err */
173 TLS_INC_STATS(sock_net(skb
->sk
),
174 LINUX_MIB_TLSDECRYPTERROR
);
175 ctx
->async_wait
.err
= err
;
176 tls_err_abort(skb
->sk
, err
);
178 struct strp_msg
*rxm
= strp_msg(skb
);
181 pad
= padding_length(ctx
, prot
, skb
);
183 ctx
->async_wait
.err
= pad
;
184 tls_err_abort(skb
->sk
, pad
);
186 rxm
->full_len
-= pad
;
187 rxm
->offset
+= prot
->prepend_size
;
188 rxm
->full_len
-= prot
->overhead_size
;
192 /* After using skb->sk to propagate sk through crypto async callback
193 * we need to NULL it again.
198 /* Free the destination pages if skb was not decrypted inplace */
200 /* Skip the first S/G entry as it points to AAD */
201 for_each_sg(sg_next(sgout
), sg
, UINT_MAX
, pages
) {
204 put_page(sg_page(sg
));
210 spin_lock_bh(&ctx
->decrypt_compl_lock
);
211 pending
= atomic_dec_return(&ctx
->decrypt_pending
);
213 if (!pending
&& ctx
->async_notify
)
214 complete(&ctx
->async_wait
.completion
);
215 spin_unlock_bh(&ctx
->decrypt_compl_lock
);
218 static int tls_do_decryption(struct sock
*sk
,
220 struct scatterlist
*sgin
,
221 struct scatterlist
*sgout
,
224 struct aead_request
*aead_req
,
227 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
228 struct tls_prot_info
*prot
= &tls_ctx
->prot_info
;
229 struct tls_sw_context_rx
*ctx
= tls_sw_ctx_rx(tls_ctx
);
232 aead_request_set_tfm(aead_req
, ctx
->aead_recv
);
233 aead_request_set_ad(aead_req
, prot
->aad_size
);
234 aead_request_set_crypt(aead_req
, sgin
, sgout
,
235 data_len
+ prot
->tag_size
,
239 /* Using skb->sk to push sk through to crypto async callback
240 * handler. This allows propagating errors up to the socket
241 * if needed. It _must_ be cleared in the async handler
242 * before consume_skb is called. We _know_ skb->sk is NULL
243 * because it is a clone from strparser.
246 aead_request_set_callback(aead_req
,
247 CRYPTO_TFM_REQ_MAY_BACKLOG
,
248 tls_decrypt_done
, skb
);
249 atomic_inc(&ctx
->decrypt_pending
);
251 aead_request_set_callback(aead_req
,
252 CRYPTO_TFM_REQ_MAY_BACKLOG
,
253 crypto_req_done
, &ctx
->async_wait
);
256 ret
= crypto_aead_decrypt(aead_req
);
257 if (ret
== -EINPROGRESS
) {
261 ret
= crypto_wait_req(ret
, &ctx
->async_wait
);
265 atomic_dec(&ctx
->decrypt_pending
);
270 static void tls_trim_both_msgs(struct sock
*sk
, int target_size
)
272 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
273 struct tls_prot_info
*prot
= &tls_ctx
->prot_info
;
274 struct tls_sw_context_tx
*ctx
= tls_sw_ctx_tx(tls_ctx
);
275 struct tls_rec
*rec
= ctx
->open_rec
;
277 sk_msg_trim(sk
, &rec
->msg_plaintext
, target_size
);
279 target_size
+= prot
->overhead_size
;
280 sk_msg_trim(sk
, &rec
->msg_encrypted
, target_size
);
283 static int tls_alloc_encrypted_msg(struct sock
*sk
, int len
)
285 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
286 struct tls_sw_context_tx
*ctx
= tls_sw_ctx_tx(tls_ctx
);
287 struct tls_rec
*rec
= ctx
->open_rec
;
288 struct sk_msg
*msg_en
= &rec
->msg_encrypted
;
290 return sk_msg_alloc(sk
, msg_en
, len
, 0);
293 static int tls_clone_plaintext_msg(struct sock
*sk
, int required
)
295 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
296 struct tls_prot_info
*prot
= &tls_ctx
->prot_info
;
297 struct tls_sw_context_tx
*ctx
= tls_sw_ctx_tx(tls_ctx
);
298 struct tls_rec
*rec
= ctx
->open_rec
;
299 struct sk_msg
*msg_pl
= &rec
->msg_plaintext
;
300 struct sk_msg
*msg_en
= &rec
->msg_encrypted
;
303 /* We add page references worth len bytes from encrypted sg
304 * at the end of plaintext sg. It is guaranteed that msg_en
305 * has enough required room (ensured by caller).
307 len
= required
- msg_pl
->sg
.size
;
309 /* Skip initial bytes in msg_en's data to be able to use
310 * same offset of both plain and encrypted data.
312 skip
= prot
->prepend_size
+ msg_pl
->sg
.size
;
314 return sk_msg_clone(sk
, msg_pl
, msg_en
, skip
, len
);
317 static struct tls_rec
*tls_get_rec(struct sock
*sk
)
319 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
320 struct tls_prot_info
*prot
= &tls_ctx
->prot_info
;
321 struct tls_sw_context_tx
*ctx
= tls_sw_ctx_tx(tls_ctx
);
322 struct sk_msg
*msg_pl
, *msg_en
;
326 mem_size
= sizeof(struct tls_rec
) + crypto_aead_reqsize(ctx
->aead_send
);
328 rec
= kzalloc(mem_size
, sk
->sk_allocation
);
332 msg_pl
= &rec
->msg_plaintext
;
333 msg_en
= &rec
->msg_encrypted
;
338 sg_init_table(rec
->sg_aead_in
, 2);
339 sg_set_buf(&rec
->sg_aead_in
[0], rec
->aad_space
, prot
->aad_size
);
340 sg_unmark_end(&rec
->sg_aead_in
[1]);
342 sg_init_table(rec
->sg_aead_out
, 2);
343 sg_set_buf(&rec
->sg_aead_out
[0], rec
->aad_space
, prot
->aad_size
);
344 sg_unmark_end(&rec
->sg_aead_out
[1]);
349 static void tls_free_rec(struct sock
*sk
, struct tls_rec
*rec
)
351 sk_msg_free(sk
, &rec
->msg_encrypted
);
352 sk_msg_free(sk
, &rec
->msg_plaintext
);
356 static void tls_free_open_rec(struct sock
*sk
)
358 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
359 struct tls_sw_context_tx
*ctx
= tls_sw_ctx_tx(tls_ctx
);
360 struct tls_rec
*rec
= ctx
->open_rec
;
363 tls_free_rec(sk
, rec
);
364 ctx
->open_rec
= NULL
;
368 int tls_tx_records(struct sock
*sk
, int flags
)
370 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
371 struct tls_sw_context_tx
*ctx
= tls_sw_ctx_tx(tls_ctx
);
372 struct tls_rec
*rec
, *tmp
;
373 struct sk_msg
*msg_en
;
374 int tx_flags
, rc
= 0;
376 if (tls_is_partially_sent_record(tls_ctx
)) {
377 rec
= list_first_entry(&ctx
->tx_list
,
378 struct tls_rec
, list
);
381 tx_flags
= rec
->tx_flags
;
385 rc
= tls_push_partial_record(sk
, tls_ctx
, tx_flags
);
389 /* Full record has been transmitted.
390 * Remove the head of tx_list
392 list_del(&rec
->list
);
393 sk_msg_free(sk
, &rec
->msg_plaintext
);
397 /* Tx all ready records */
398 list_for_each_entry_safe(rec
, tmp
, &ctx
->tx_list
, list
) {
399 if (READ_ONCE(rec
->tx_ready
)) {
401 tx_flags
= rec
->tx_flags
;
405 msg_en
= &rec
->msg_encrypted
;
406 rc
= tls_push_sg(sk
, tls_ctx
,
407 &msg_en
->sg
.data
[msg_en
->sg
.curr
],
412 list_del(&rec
->list
);
413 sk_msg_free(sk
, &rec
->msg_plaintext
);
421 if (rc
< 0 && rc
!= -EAGAIN
)
422 tls_err_abort(sk
, EBADMSG
);
427 static void tls_encrypt_done(struct crypto_async_request
*req
, int err
)
429 struct aead_request
*aead_req
= (struct aead_request
*)req
;
430 struct sock
*sk
= req
->data
;
431 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
432 struct tls_prot_info
*prot
= &tls_ctx
->prot_info
;
433 struct tls_sw_context_tx
*ctx
= tls_sw_ctx_tx(tls_ctx
);
434 struct scatterlist
*sge
;
435 struct sk_msg
*msg_en
;
440 rec
= container_of(aead_req
, struct tls_rec
, aead_req
);
441 msg_en
= &rec
->msg_encrypted
;
443 sge
= sk_msg_elem(msg_en
, msg_en
->sg
.curr
);
444 sge
->offset
-= prot
->prepend_size
;
445 sge
->length
+= prot
->prepend_size
;
447 /* Check if error is previously set on socket */
448 if (err
|| sk
->sk_err
) {
451 /* If err is already set on socket, return the same code */
453 ctx
->async_wait
.err
= sk
->sk_err
;
455 ctx
->async_wait
.err
= err
;
456 tls_err_abort(sk
, err
);
461 struct tls_rec
*first_rec
;
463 /* Mark the record as ready for transmission */
464 smp_store_mb(rec
->tx_ready
, true);
466 /* If received record is at head of tx_list, schedule tx */
467 first_rec
= list_first_entry(&ctx
->tx_list
,
468 struct tls_rec
, list
);
469 if (rec
== first_rec
)
473 spin_lock_bh(&ctx
->encrypt_compl_lock
);
474 pending
= atomic_dec_return(&ctx
->encrypt_pending
);
476 if (!pending
&& ctx
->async_notify
)
477 complete(&ctx
->async_wait
.completion
);
478 spin_unlock_bh(&ctx
->encrypt_compl_lock
);
483 /* Schedule the transmission */
484 if (!test_and_set_bit(BIT_TX_SCHEDULED
, &ctx
->tx_bitmask
))
485 schedule_delayed_work(&ctx
->tx_work
.work
, 1);
488 static int tls_do_encryption(struct sock
*sk
,
489 struct tls_context
*tls_ctx
,
490 struct tls_sw_context_tx
*ctx
,
491 struct aead_request
*aead_req
,
492 size_t data_len
, u32 start
)
494 struct tls_prot_info
*prot
= &tls_ctx
->prot_info
;
495 struct tls_rec
*rec
= ctx
->open_rec
;
496 struct sk_msg
*msg_en
= &rec
->msg_encrypted
;
497 struct scatterlist
*sge
= sk_msg_elem(msg_en
, start
);
498 int rc
, iv_offset
= 0;
500 /* For CCM based ciphers, first byte of IV is a constant */
501 if (prot
->cipher_type
== TLS_CIPHER_AES_CCM_128
) {
502 rec
->iv_data
[0] = TLS_AES_CCM_IV_B0_BYTE
;
506 memcpy(&rec
->iv_data
[iv_offset
], tls_ctx
->tx
.iv
,
507 prot
->iv_size
+ prot
->salt_size
);
509 xor_iv_with_seq(prot
, rec
->iv_data
, tls_ctx
->tx
.rec_seq
);
511 sge
->offset
+= prot
->prepend_size
;
512 sge
->length
-= prot
->prepend_size
;
514 msg_en
->sg
.curr
= start
;
516 aead_request_set_tfm(aead_req
, ctx
->aead_send
);
517 aead_request_set_ad(aead_req
, prot
->aad_size
);
518 aead_request_set_crypt(aead_req
, rec
->sg_aead_in
,
520 data_len
, rec
->iv_data
);
522 aead_request_set_callback(aead_req
, CRYPTO_TFM_REQ_MAY_BACKLOG
,
523 tls_encrypt_done
, sk
);
525 /* Add the record in tx_list */
526 list_add_tail((struct list_head
*)&rec
->list
, &ctx
->tx_list
);
527 atomic_inc(&ctx
->encrypt_pending
);
529 rc
= crypto_aead_encrypt(aead_req
);
530 if (!rc
|| rc
!= -EINPROGRESS
) {
531 atomic_dec(&ctx
->encrypt_pending
);
532 sge
->offset
-= prot
->prepend_size
;
533 sge
->length
+= prot
->prepend_size
;
537 WRITE_ONCE(rec
->tx_ready
, true);
538 } else if (rc
!= -EINPROGRESS
) {
539 list_del(&rec
->list
);
543 /* Unhook the record from context if encryption is not failure */
544 ctx
->open_rec
= NULL
;
545 tls_advance_record_sn(sk
, prot
, &tls_ctx
->tx
);
549 static int tls_split_open_record(struct sock
*sk
, struct tls_rec
*from
,
550 struct tls_rec
**to
, struct sk_msg
*msg_opl
,
551 struct sk_msg
*msg_oen
, u32 split_point
,
552 u32 tx_overhead_size
, u32
*orig_end
)
554 u32 i
, j
, bytes
= 0, apply
= msg_opl
->apply_bytes
;
555 struct scatterlist
*sge
, *osge
, *nsge
;
556 u32 orig_size
= msg_opl
->sg
.size
;
557 struct scatterlist tmp
= { };
558 struct sk_msg
*msg_npl
;
562 new = tls_get_rec(sk
);
565 ret
= sk_msg_alloc(sk
, &new->msg_encrypted
, msg_opl
->sg
.size
+
566 tx_overhead_size
, 0);
568 tls_free_rec(sk
, new);
572 *orig_end
= msg_opl
->sg
.end
;
573 i
= msg_opl
->sg
.start
;
574 sge
= sk_msg_elem(msg_opl
, i
);
575 while (apply
&& sge
->length
) {
576 if (sge
->length
> apply
) {
577 u32 len
= sge
->length
- apply
;
579 get_page(sg_page(sge
));
580 sg_set_page(&tmp
, sg_page(sge
), len
,
581 sge
->offset
+ apply
);
586 apply
-= sge
->length
;
587 bytes
+= sge
->length
;
590 sk_msg_iter_var_next(i
);
591 if (i
== msg_opl
->sg
.end
)
593 sge
= sk_msg_elem(msg_opl
, i
);
597 msg_opl
->sg
.curr
= i
;
598 msg_opl
->sg
.copybreak
= 0;
599 msg_opl
->apply_bytes
= 0;
600 msg_opl
->sg
.size
= bytes
;
602 msg_npl
= &new->msg_plaintext
;
603 msg_npl
->apply_bytes
= apply
;
604 msg_npl
->sg
.size
= orig_size
- bytes
;
606 j
= msg_npl
->sg
.start
;
607 nsge
= sk_msg_elem(msg_npl
, j
);
609 memcpy(nsge
, &tmp
, sizeof(*nsge
));
610 sk_msg_iter_var_next(j
);
611 nsge
= sk_msg_elem(msg_npl
, j
);
614 osge
= sk_msg_elem(msg_opl
, i
);
615 while (osge
->length
) {
616 memcpy(nsge
, osge
, sizeof(*nsge
));
618 sk_msg_iter_var_next(i
);
619 sk_msg_iter_var_next(j
);
622 osge
= sk_msg_elem(msg_opl
, i
);
623 nsge
= sk_msg_elem(msg_npl
, j
);
627 msg_npl
->sg
.curr
= j
;
628 msg_npl
->sg
.copybreak
= 0;
634 static void tls_merge_open_record(struct sock
*sk
, struct tls_rec
*to
,
635 struct tls_rec
*from
, u32 orig_end
)
637 struct sk_msg
*msg_npl
= &from
->msg_plaintext
;
638 struct sk_msg
*msg_opl
= &to
->msg_plaintext
;
639 struct scatterlist
*osge
, *nsge
;
643 sk_msg_iter_var_prev(i
);
644 j
= msg_npl
->sg
.start
;
646 osge
= sk_msg_elem(msg_opl
, i
);
647 nsge
= sk_msg_elem(msg_npl
, j
);
649 if (sg_page(osge
) == sg_page(nsge
) &&
650 osge
->offset
+ osge
->length
== nsge
->offset
) {
651 osge
->length
+= nsge
->length
;
652 put_page(sg_page(nsge
));
655 msg_opl
->sg
.end
= orig_end
;
656 msg_opl
->sg
.curr
= orig_end
;
657 msg_opl
->sg
.copybreak
= 0;
658 msg_opl
->apply_bytes
= msg_opl
->sg
.size
+ msg_npl
->sg
.size
;
659 msg_opl
->sg
.size
+= msg_npl
->sg
.size
;
661 sk_msg_free(sk
, &to
->msg_encrypted
);
662 sk_msg_xfer_full(&to
->msg_encrypted
, &from
->msg_encrypted
);
667 static int tls_push_record(struct sock
*sk
, int flags
,
668 unsigned char record_type
)
670 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
671 struct tls_prot_info
*prot
= &tls_ctx
->prot_info
;
672 struct tls_sw_context_tx
*ctx
= tls_sw_ctx_tx(tls_ctx
);
673 struct tls_rec
*rec
= ctx
->open_rec
, *tmp
= NULL
;
674 u32 i
, split_point
, orig_end
;
675 struct sk_msg
*msg_pl
, *msg_en
;
676 struct aead_request
*req
;
683 msg_pl
= &rec
->msg_plaintext
;
684 msg_en
= &rec
->msg_encrypted
;
686 split_point
= msg_pl
->apply_bytes
;
687 split
= split_point
&& split_point
< msg_pl
->sg
.size
;
688 if (unlikely((!split
&&
690 prot
->overhead_size
> msg_en
->sg
.size
) ||
693 prot
->overhead_size
> msg_en
->sg
.size
))) {
695 split_point
= msg_en
->sg
.size
;
698 rc
= tls_split_open_record(sk
, rec
, &tmp
, msg_pl
, msg_en
,
699 split_point
, prot
->overhead_size
,
703 /* This can happen if above tls_split_open_record allocates
704 * a single large encryption buffer instead of two smaller
705 * ones. In this case adjust pointers and continue without
708 if (!msg_pl
->sg
.size
) {
709 tls_merge_open_record(sk
, rec
, tmp
, orig_end
);
710 msg_pl
= &rec
->msg_plaintext
;
711 msg_en
= &rec
->msg_encrypted
;
714 sk_msg_trim(sk
, msg_en
, msg_pl
->sg
.size
+
715 prot
->overhead_size
);
718 rec
->tx_flags
= flags
;
719 req
= &rec
->aead_req
;
722 sk_msg_iter_var_prev(i
);
724 rec
->content_type
= record_type
;
725 if (prot
->version
== TLS_1_3_VERSION
) {
726 /* Add content type to end of message. No padding added */
727 sg_set_buf(&rec
->sg_content_type
, &rec
->content_type
, 1);
728 sg_mark_end(&rec
->sg_content_type
);
729 sg_chain(msg_pl
->sg
.data
, msg_pl
->sg
.end
+ 1,
730 &rec
->sg_content_type
);
732 sg_mark_end(sk_msg_elem(msg_pl
, i
));
735 if (msg_pl
->sg
.end
< msg_pl
->sg
.start
) {
736 sg_chain(&msg_pl
->sg
.data
[msg_pl
->sg
.start
],
737 MAX_SKB_FRAGS
- msg_pl
->sg
.start
+ 1,
741 i
= msg_pl
->sg
.start
;
742 sg_chain(rec
->sg_aead_in
, 2, &msg_pl
->sg
.data
[i
]);
745 sk_msg_iter_var_prev(i
);
746 sg_mark_end(sk_msg_elem(msg_en
, i
));
748 i
= msg_en
->sg
.start
;
749 sg_chain(rec
->sg_aead_out
, 2, &msg_en
->sg
.data
[i
]);
751 tls_make_aad(rec
->aad_space
, msg_pl
->sg
.size
+ prot
->tail_size
,
752 tls_ctx
->tx
.rec_seq
, record_type
, prot
);
754 tls_fill_prepend(tls_ctx
,
755 page_address(sg_page(&msg_en
->sg
.data
[i
])) +
756 msg_en
->sg
.data
[i
].offset
,
757 msg_pl
->sg
.size
+ prot
->tail_size
,
760 tls_ctx
->pending_open_record_frags
= false;
762 rc
= tls_do_encryption(sk
, tls_ctx
, ctx
, req
,
763 msg_pl
->sg
.size
+ prot
->tail_size
, i
);
765 if (rc
!= -EINPROGRESS
) {
766 tls_err_abort(sk
, EBADMSG
);
768 tls_ctx
->pending_open_record_frags
= true;
769 tls_merge_open_record(sk
, rec
, tmp
, orig_end
);
772 ctx
->async_capable
= 1;
775 msg_pl
= &tmp
->msg_plaintext
;
776 msg_en
= &tmp
->msg_encrypted
;
777 sk_msg_trim(sk
, msg_en
, msg_pl
->sg
.size
+ prot
->overhead_size
);
778 tls_ctx
->pending_open_record_frags
= true;
782 return tls_tx_records(sk
, flags
);
785 static int bpf_exec_tx_verdict(struct sk_msg
*msg
, struct sock
*sk
,
786 bool full_record
, u8 record_type
,
787 ssize_t
*copied
, int flags
)
789 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
790 struct tls_sw_context_tx
*ctx
= tls_sw_ctx_tx(tls_ctx
);
791 struct sk_msg msg_redir
= { };
792 struct sk_psock
*psock
;
793 struct sock
*sk_redir
;
799 policy
= !(flags
& MSG_SENDPAGE_NOPOLICY
);
800 psock
= sk_psock_get(sk
);
801 if (!psock
|| !policy
) {
802 err
= tls_push_record(sk
, flags
, record_type
);
803 if (err
&& sk
->sk_err
== EBADMSG
) {
804 *copied
-= sk_msg_free(sk
, msg
);
805 tls_free_open_rec(sk
);
809 sk_psock_put(sk
, psock
);
813 enospc
= sk_msg_full(msg
);
814 if (psock
->eval
== __SK_NONE
) {
815 delta
= msg
->sg
.size
;
816 psock
->eval
= sk_psock_msg_verdict(sk
, psock
, msg
);
817 delta
-= msg
->sg
.size
;
819 if (msg
->cork_bytes
&& msg
->cork_bytes
> msg
->sg
.size
&&
820 !enospc
&& !full_record
) {
826 if (msg
->apply_bytes
&& msg
->apply_bytes
< send
)
827 send
= msg
->apply_bytes
;
829 switch (psock
->eval
) {
831 err
= tls_push_record(sk
, flags
, record_type
);
832 if (err
&& sk
->sk_err
== EBADMSG
) {
833 *copied
-= sk_msg_free(sk
, msg
);
834 tls_free_open_rec(sk
);
840 sk_redir
= psock
->sk_redir
;
841 memcpy(&msg_redir
, msg
, sizeof(*msg
));
842 if (msg
->apply_bytes
< send
)
843 msg
->apply_bytes
= 0;
845 msg
->apply_bytes
-= send
;
846 sk_msg_return_zero(sk
, msg
, send
);
847 msg
->sg
.size
-= send
;
849 err
= tcp_bpf_sendmsg_redir(sk_redir
, &msg_redir
, send
, flags
);
852 *copied
-= sk_msg_free_nocharge(sk
, &msg_redir
);
855 if (msg
->sg
.size
== 0)
856 tls_free_open_rec(sk
);
860 sk_msg_free_partial(sk
, msg
, send
);
861 if (msg
->apply_bytes
< send
)
862 msg
->apply_bytes
= 0;
864 msg
->apply_bytes
-= send
;
865 if (msg
->sg
.size
== 0)
866 tls_free_open_rec(sk
);
867 *copied
-= (send
+ delta
);
872 bool reset_eval
= !ctx
->open_rec
;
876 msg
= &rec
->msg_plaintext
;
877 if (!msg
->apply_bytes
)
881 psock
->eval
= __SK_NONE
;
882 if (psock
->sk_redir
) {
883 sock_put(psock
->sk_redir
);
884 psock
->sk_redir
= NULL
;
891 sk_psock_put(sk
, psock
);
895 static int tls_sw_push_pending_record(struct sock
*sk
, int flags
)
897 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
898 struct tls_sw_context_tx
*ctx
= tls_sw_ctx_tx(tls_ctx
);
899 struct tls_rec
*rec
= ctx
->open_rec
;
900 struct sk_msg
*msg_pl
;
906 msg_pl
= &rec
->msg_plaintext
;
907 copied
= msg_pl
->sg
.size
;
911 return bpf_exec_tx_verdict(msg_pl
, sk
, true, TLS_RECORD_TYPE_DATA
,
915 int tls_sw_sendmsg(struct sock
*sk
, struct msghdr
*msg
, size_t size
)
917 long timeo
= sock_sndtimeo(sk
, msg
->msg_flags
& MSG_DONTWAIT
);
918 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
919 struct tls_prot_info
*prot
= &tls_ctx
->prot_info
;
920 struct tls_sw_context_tx
*ctx
= tls_sw_ctx_tx(tls_ctx
);
921 bool async_capable
= ctx
->async_capable
;
922 unsigned char record_type
= TLS_RECORD_TYPE_DATA
;
923 bool is_kvec
= iov_iter_is_kvec(&msg
->msg_iter
);
924 bool eor
= !(msg
->msg_flags
& MSG_MORE
);
927 struct sk_msg
*msg_pl
, *msg_en
;
938 if (msg
->msg_flags
& ~(MSG_MORE
| MSG_DONTWAIT
| MSG_NOSIGNAL
|
942 mutex_lock(&tls_ctx
->tx_lock
);
945 if (unlikely(msg
->msg_controllen
)) {
946 ret
= tls_proccess_cmsg(sk
, msg
, &record_type
);
948 if (ret
== -EINPROGRESS
)
950 else if (ret
!= -EAGAIN
)
955 while (msg_data_left(msg
)) {
964 rec
= ctx
->open_rec
= tls_get_rec(sk
);
970 msg_pl
= &rec
->msg_plaintext
;
971 msg_en
= &rec
->msg_encrypted
;
973 orig_size
= msg_pl
->sg
.size
;
975 try_to_copy
= msg_data_left(msg
);
976 record_room
= TLS_MAX_PAYLOAD_SIZE
- msg_pl
->sg
.size
;
977 if (try_to_copy
>= record_room
) {
978 try_to_copy
= record_room
;
982 required_size
= msg_pl
->sg
.size
+ try_to_copy
+
985 if (!sk_stream_memory_free(sk
))
986 goto wait_for_sndbuf
;
989 ret
= tls_alloc_encrypted_msg(sk
, required_size
);
992 goto wait_for_memory
;
994 /* Adjust try_to_copy according to the amount that was
995 * actually allocated. The difference is due
996 * to max sg elements limit
998 try_to_copy
-= required_size
- msg_en
->sg
.size
;
1002 if (!is_kvec
&& (full_record
|| eor
) && !async_capable
) {
1003 u32 first
= msg_pl
->sg
.end
;
1005 ret
= sk_msg_zerocopy_from_iter(sk
, &msg
->msg_iter
,
1006 msg_pl
, try_to_copy
);
1008 goto fallback_to_reg_send
;
1011 copied
+= try_to_copy
;
1013 sk_msg_sg_copy_set(msg_pl
, first
);
1014 ret
= bpf_exec_tx_verdict(msg_pl
, sk
, full_record
,
1015 record_type
, &copied
,
1018 if (ret
== -EINPROGRESS
)
1020 else if (ret
== -ENOMEM
)
1021 goto wait_for_memory
;
1022 else if (ctx
->open_rec
&& ret
== -ENOSPC
)
1024 else if (ret
!= -EAGAIN
)
1029 copied
-= try_to_copy
;
1030 sk_msg_sg_copy_clear(msg_pl
, first
);
1031 iov_iter_revert(&msg
->msg_iter
,
1032 msg_pl
->sg
.size
- orig_size
);
1033 fallback_to_reg_send
:
1034 sk_msg_trim(sk
, msg_pl
, orig_size
);
1037 required_size
= msg_pl
->sg
.size
+ try_to_copy
;
1039 ret
= tls_clone_plaintext_msg(sk
, required_size
);
1044 /* Adjust try_to_copy according to the amount that was
1045 * actually allocated. The difference is due
1046 * to max sg elements limit
1048 try_to_copy
-= required_size
- msg_pl
->sg
.size
;
1050 sk_msg_trim(sk
, msg_en
,
1051 msg_pl
->sg
.size
+ prot
->overhead_size
);
1055 ret
= sk_msg_memcopy_from_iter(sk
, &msg
->msg_iter
,
1056 msg_pl
, try_to_copy
);
1061 /* Open records defined only if successfully copied, otherwise
1062 * we would trim the sg but not reset the open record frags.
1064 tls_ctx
->pending_open_record_frags
= true;
1065 copied
+= try_to_copy
;
1066 if (full_record
|| eor
) {
1067 ret
= bpf_exec_tx_verdict(msg_pl
, sk
, full_record
,
1068 record_type
, &copied
,
1071 if (ret
== -EINPROGRESS
)
1073 else if (ret
== -ENOMEM
)
1074 goto wait_for_memory
;
1075 else if (ret
!= -EAGAIN
) {
1086 set_bit(SOCK_NOSPACE
, &sk
->sk_socket
->flags
);
1088 ret
= sk_stream_wait_memory(sk
, &timeo
);
1092 tls_trim_both_msgs(sk
, orig_size
);
1096 if (ctx
->open_rec
&& msg_en
->sg
.size
< required_size
)
1097 goto alloc_encrypted
;
1102 } else if (num_zc
) {
1103 /* Wait for pending encryptions to get completed */
1104 spin_lock_bh(&ctx
->encrypt_compl_lock
);
1105 ctx
->async_notify
= true;
1107 pending
= atomic_read(&ctx
->encrypt_pending
);
1108 spin_unlock_bh(&ctx
->encrypt_compl_lock
);
1110 crypto_wait_req(-EINPROGRESS
, &ctx
->async_wait
);
1112 reinit_completion(&ctx
->async_wait
.completion
);
1114 /* There can be no concurrent accesses, since we have no
1115 * pending encrypt operations
1117 WRITE_ONCE(ctx
->async_notify
, false);
1119 if (ctx
->async_wait
.err
) {
1120 ret
= ctx
->async_wait
.err
;
1125 /* Transmit if any encryptions have completed */
1126 if (test_and_clear_bit(BIT_TX_SCHEDULED
, &ctx
->tx_bitmask
)) {
1127 cancel_delayed_work(&ctx
->tx_work
.work
);
1128 tls_tx_records(sk
, msg
->msg_flags
);
1132 ret
= sk_stream_error(sk
, msg
->msg_flags
, ret
);
1135 mutex_unlock(&tls_ctx
->tx_lock
);
1136 return copied
> 0 ? copied
: ret
;
1139 static int tls_sw_do_sendpage(struct sock
*sk
, struct page
*page
,
1140 int offset
, size_t size
, int flags
)
1142 long timeo
= sock_sndtimeo(sk
, flags
& MSG_DONTWAIT
);
1143 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
1144 struct tls_sw_context_tx
*ctx
= tls_sw_ctx_tx(tls_ctx
);
1145 struct tls_prot_info
*prot
= &tls_ctx
->prot_info
;
1146 unsigned char record_type
= TLS_RECORD_TYPE_DATA
;
1147 struct sk_msg
*msg_pl
;
1148 struct tls_rec
*rec
;
1156 eor
= !(flags
& MSG_SENDPAGE_NOTLAST
);
1157 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE
, sk
);
1159 /* Call the sk_stream functions to manage the sndbuf mem. */
1161 size_t copy
, required_size
;
1169 rec
= ctx
->open_rec
;
1171 rec
= ctx
->open_rec
= tls_get_rec(sk
);
1177 msg_pl
= &rec
->msg_plaintext
;
1179 full_record
= false;
1180 record_room
= TLS_MAX_PAYLOAD_SIZE
- msg_pl
->sg
.size
;
1182 if (copy
>= record_room
) {
1187 required_size
= msg_pl
->sg
.size
+ copy
+ prot
->overhead_size
;
1189 if (!sk_stream_memory_free(sk
))
1190 goto wait_for_sndbuf
;
1192 ret
= tls_alloc_encrypted_msg(sk
, required_size
);
1195 goto wait_for_memory
;
1197 /* Adjust copy according to the amount that was
1198 * actually allocated. The difference is due
1199 * to max sg elements limit
1201 copy
-= required_size
- msg_pl
->sg
.size
;
1205 sk_msg_page_add(msg_pl
, page
, copy
, offset
);
1206 sk_mem_charge(sk
, copy
);
1212 tls_ctx
->pending_open_record_frags
= true;
1213 if (full_record
|| eor
|| sk_msg_full(msg_pl
)) {
1214 ret
= bpf_exec_tx_verdict(msg_pl
, sk
, full_record
,
1215 record_type
, &copied
, flags
);
1217 if (ret
== -EINPROGRESS
)
1219 else if (ret
== -ENOMEM
)
1220 goto wait_for_memory
;
1221 else if (ret
!= -EAGAIN
) {
1230 set_bit(SOCK_NOSPACE
, &sk
->sk_socket
->flags
);
1232 ret
= sk_stream_wait_memory(sk
, &timeo
);
1235 tls_trim_both_msgs(sk
, msg_pl
->sg
.size
);
1244 /* Transmit if any encryptions have completed */
1245 if (test_and_clear_bit(BIT_TX_SCHEDULED
, &ctx
->tx_bitmask
)) {
1246 cancel_delayed_work(&ctx
->tx_work
.work
);
1247 tls_tx_records(sk
, flags
);
1251 ret
= sk_stream_error(sk
, flags
, ret
);
1252 return copied
> 0 ? copied
: ret
;
1255 int tls_sw_sendpage_locked(struct sock
*sk
, struct page
*page
,
1256 int offset
, size_t size
, int flags
)
1258 if (flags
& ~(MSG_MORE
| MSG_DONTWAIT
| MSG_NOSIGNAL
|
1259 MSG_SENDPAGE_NOTLAST
| MSG_SENDPAGE_NOPOLICY
|
1260 MSG_NO_SHARED_FRAGS
))
1263 return tls_sw_do_sendpage(sk
, page
, offset
, size
, flags
);
1266 int tls_sw_sendpage(struct sock
*sk
, struct page
*page
,
1267 int offset
, size_t size
, int flags
)
1269 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
1272 if (flags
& ~(MSG_MORE
| MSG_DONTWAIT
| MSG_NOSIGNAL
|
1273 MSG_SENDPAGE_NOTLAST
| MSG_SENDPAGE_NOPOLICY
))
1276 mutex_lock(&tls_ctx
->tx_lock
);
1278 ret
= tls_sw_do_sendpage(sk
, page
, offset
, size
, flags
);
1280 mutex_unlock(&tls_ctx
->tx_lock
);
1284 static struct sk_buff
*tls_wait_data(struct sock
*sk
, struct sk_psock
*psock
,
1285 bool nonblock
, long timeo
, int *err
)
1287 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
1288 struct tls_sw_context_rx
*ctx
= tls_sw_ctx_rx(tls_ctx
);
1289 struct sk_buff
*skb
;
1290 DEFINE_WAIT_FUNC(wait
, woken_wake_function
);
1292 while (!(skb
= ctx
->recv_pkt
) && sk_psock_queue_empty(psock
)) {
1294 *err
= sock_error(sk
);
1298 if (!skb_queue_empty(&sk
->sk_receive_queue
)) {
1299 __strp_unpause(&ctx
->strp
);
1301 return ctx
->recv_pkt
;
1304 if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
1307 if (sock_flag(sk
, SOCK_DONE
))
1310 if (nonblock
|| !timeo
) {
1315 add_wait_queue(sk_sleep(sk
), &wait
);
1316 sk_set_bit(SOCKWQ_ASYNC_WAITDATA
, sk
);
1317 sk_wait_event(sk
, &timeo
,
1318 ctx
->recv_pkt
!= skb
||
1319 !sk_psock_queue_empty(psock
),
1321 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA
, sk
);
1322 remove_wait_queue(sk_sleep(sk
), &wait
);
1324 /* Handle signals */
1325 if (signal_pending(current
)) {
1326 *err
= sock_intr_errno(timeo
);
1334 static int tls_setup_from_iter(struct sock
*sk
, struct iov_iter
*from
,
1335 int length
, int *pages_used
,
1336 unsigned int *size_used
,
1337 struct scatterlist
*to
,
1340 int rc
= 0, i
= 0, num_elem
= *pages_used
, maxpages
;
1341 struct page
*pages
[MAX_SKB_FRAGS
];
1342 unsigned int size
= *size_used
;
1343 ssize_t copied
, use
;
1346 while (length
> 0) {
1348 maxpages
= to_max_pages
- num_elem
;
1349 if (maxpages
== 0) {
1353 copied
= iov_iter_get_pages(from
, pages
,
1361 iov_iter_advance(from
, copied
);
1366 use
= min_t(int, copied
, PAGE_SIZE
- offset
);
1368 sg_set_page(&to
[num_elem
],
1369 pages
[i
], use
, offset
);
1370 sg_unmark_end(&to
[num_elem
]);
1371 /* We do not uncharge memory from this API */
1380 /* Mark the end in the last sg entry if newly added */
1381 if (num_elem
> *pages_used
)
1382 sg_mark_end(&to
[num_elem
- 1]);
1385 iov_iter_revert(from
, size
- *size_used
);
1387 *pages_used
= num_elem
;
1392 /* This function decrypts the input skb into either out_iov or in out_sg
1393 * or in skb buffers itself. The input parameter 'zc' indicates if
1394 * zero-copy mode needs to be tried or not. With zero-copy mode, either
1395 * out_iov or out_sg must be non-NULL. In case both out_iov and out_sg are
1396 * NULL, then the decryption happens inside skb buffers itself, i.e.
1397 * zero-copy gets disabled and 'zc' is updated.
1400 static int decrypt_internal(struct sock
*sk
, struct sk_buff
*skb
,
1401 struct iov_iter
*out_iov
,
1402 struct scatterlist
*out_sg
,
1403 int *chunk
, bool *zc
, bool async
)
1405 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
1406 struct tls_sw_context_rx
*ctx
= tls_sw_ctx_rx(tls_ctx
);
1407 struct tls_prot_info
*prot
= &tls_ctx
->prot_info
;
1408 struct strp_msg
*rxm
= strp_msg(skb
);
1409 int n_sgin
, n_sgout
, nsg
, mem_size
, aead_size
, err
, pages
= 0;
1410 struct aead_request
*aead_req
;
1411 struct sk_buff
*unused
;
1412 u8
*aad
, *iv
, *mem
= NULL
;
1413 struct scatterlist
*sgin
= NULL
;
1414 struct scatterlist
*sgout
= NULL
;
1415 const int data_len
= rxm
->full_len
- prot
->overhead_size
+
1419 if (*zc
&& (out_iov
|| out_sg
)) {
1421 n_sgout
= iov_iter_npages(out_iov
, INT_MAX
) + 1;
1423 n_sgout
= sg_nents(out_sg
);
1424 n_sgin
= skb_nsg(skb
, rxm
->offset
+ prot
->prepend_size
,
1425 rxm
->full_len
- prot
->prepend_size
);
1429 n_sgin
= skb_cow_data(skb
, 0, &unused
);
1435 /* Increment to accommodate AAD */
1436 n_sgin
= n_sgin
+ 1;
1438 nsg
= n_sgin
+ n_sgout
;
1440 aead_size
= sizeof(*aead_req
) + crypto_aead_reqsize(ctx
->aead_recv
);
1441 mem_size
= aead_size
+ (nsg
* sizeof(struct scatterlist
));
1442 mem_size
= mem_size
+ prot
->aad_size
;
1443 mem_size
= mem_size
+ crypto_aead_ivsize(ctx
->aead_recv
);
1445 /* Allocate a single block of memory which contains
1446 * aead_req || sgin[] || sgout[] || aad || iv.
1447 * This order achieves correct alignment for aead_req, sgin, sgout.
1449 mem
= kmalloc(mem_size
, sk
->sk_allocation
);
1453 /* Segment the allocated memory */
1454 aead_req
= (struct aead_request
*)mem
;
1455 sgin
= (struct scatterlist
*)(mem
+ aead_size
);
1456 sgout
= sgin
+ n_sgin
;
1457 aad
= (u8
*)(sgout
+ n_sgout
);
1458 iv
= aad
+ prot
->aad_size
;
1460 /* For CCM based ciphers, first byte of nonce+iv is always '2' */
1461 if (prot
->cipher_type
== TLS_CIPHER_AES_CCM_128
) {
1467 err
= skb_copy_bits(skb
, rxm
->offset
+ TLS_HEADER_SIZE
,
1468 iv
+ iv_offset
+ prot
->salt_size
,
1474 if (prot
->version
== TLS_1_3_VERSION
||
1475 prot
->cipher_type
== TLS_CIPHER_CHACHA20_POLY1305
)
1476 memcpy(iv
+ iv_offset
, tls_ctx
->rx
.iv
,
1477 crypto_aead_ivsize(ctx
->aead_recv
));
1479 memcpy(iv
+ iv_offset
, tls_ctx
->rx
.iv
, prot
->salt_size
);
1481 xor_iv_with_seq(prot
, iv
, tls_ctx
->rx
.rec_seq
);
1484 tls_make_aad(aad
, rxm
->full_len
- prot
->overhead_size
+
1486 tls_ctx
->rx
.rec_seq
, ctx
->control
, prot
);
1489 sg_init_table(sgin
, n_sgin
);
1490 sg_set_buf(&sgin
[0], aad
, prot
->aad_size
);
1491 err
= skb_to_sgvec(skb
, &sgin
[1],
1492 rxm
->offset
+ prot
->prepend_size
,
1493 rxm
->full_len
- prot
->prepend_size
);
1501 sg_init_table(sgout
, n_sgout
);
1502 sg_set_buf(&sgout
[0], aad
, prot
->aad_size
);
1505 err
= tls_setup_from_iter(sk
, out_iov
, data_len
,
1506 &pages
, chunk
, &sgout
[1],
1509 goto fallback_to_reg_recv
;
1510 } else if (out_sg
) {
1511 memcpy(sgout
, out_sg
, n_sgout
* sizeof(*sgout
));
1513 goto fallback_to_reg_recv
;
1516 fallback_to_reg_recv
:
1523 /* Prepare and submit AEAD request */
1524 err
= tls_do_decryption(sk
, skb
, sgin
, sgout
, iv
,
1525 data_len
, aead_req
, async
);
1526 if (err
== -EINPROGRESS
)
1529 /* Release the pages in case iov was mapped to pages */
1530 for (; pages
> 0; pages
--)
1531 put_page(sg_page(&sgout
[pages
]));
1537 static int decrypt_skb_update(struct sock
*sk
, struct sk_buff
*skb
,
1538 struct iov_iter
*dest
, int *chunk
, bool *zc
,
1541 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
1542 struct tls_sw_context_rx
*ctx
= tls_sw_ctx_rx(tls_ctx
);
1543 struct tls_prot_info
*prot
= &tls_ctx
->prot_info
;
1544 struct strp_msg
*rxm
= strp_msg(skb
);
1547 if (!ctx
->decrypted
) {
1548 if (tls_ctx
->rx_conf
== TLS_HW
) {
1549 err
= tls_device_decrypted(sk
, tls_ctx
, skb
, rxm
);
1554 /* Still not decrypted after tls_device */
1555 if (!ctx
->decrypted
) {
1556 err
= decrypt_internal(sk
, skb
, dest
, NULL
, chunk
, zc
,
1559 if (err
== -EINPROGRESS
)
1560 tls_advance_record_sn(sk
, prot
,
1562 else if (err
== -EBADMSG
)
1563 TLS_INC_STATS(sock_net(sk
),
1564 LINUX_MIB_TLSDECRYPTERROR
);
1571 pad
= padding_length(ctx
, prot
, skb
);
1575 rxm
->full_len
-= pad
;
1576 rxm
->offset
+= prot
->prepend_size
;
1577 rxm
->full_len
-= prot
->overhead_size
;
1578 tls_advance_record_sn(sk
, prot
, &tls_ctx
->rx
);
1580 ctx
->saved_data_ready(sk
);
1588 int decrypt_skb(struct sock
*sk
, struct sk_buff
*skb
,
1589 struct scatterlist
*sgout
)
1594 return decrypt_internal(sk
, skb
, NULL
, sgout
, &chunk
, &zc
, false);
1597 static bool tls_sw_advance_skb(struct sock
*sk
, struct sk_buff
*skb
,
1600 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
1601 struct tls_sw_context_rx
*ctx
= tls_sw_ctx_rx(tls_ctx
);
1604 struct strp_msg
*rxm
= strp_msg(skb
);
1606 if (len
< rxm
->full_len
) {
1608 rxm
->full_len
-= len
;
1614 /* Finished with message */
1615 ctx
->recv_pkt
= NULL
;
1616 __strp_unpause(&ctx
->strp
);
1621 /* This function traverses the rx_list in tls receive context to copies the
1622 * decrypted records into the buffer provided by caller zero copy is not
1623 * true. Further, the records are removed from the rx_list if it is not a peek
1624 * case and the record has been consumed completely.
1626 static int process_rx_list(struct tls_sw_context_rx
*ctx
,
1635 struct sk_buff
*skb
= skb_peek(&ctx
->rx_list
);
1638 struct tls_msg
*tlm
;
1641 /* Set the record type in 'control' if caller didn't pass it */
1644 ctrl
= tlm
->control
;
1647 while (skip
&& skb
) {
1648 struct strp_msg
*rxm
= strp_msg(skb
);
1651 /* Cannot process a record of different type */
1652 if (ctrl
!= tlm
->control
)
1655 if (skip
< rxm
->full_len
)
1658 skip
= skip
- rxm
->full_len
;
1659 skb
= skb_peek_next(skb
, &ctx
->rx_list
);
1662 while (len
&& skb
) {
1663 struct sk_buff
*next_skb
;
1664 struct strp_msg
*rxm
= strp_msg(skb
);
1665 int chunk
= min_t(unsigned int, rxm
->full_len
- skip
, len
);
1669 /* Cannot process a record of different type */
1670 if (ctrl
!= tlm
->control
)
1673 /* Set record type if not already done. For a non-data record,
1674 * do not proceed if record type could not be copied.
1677 int cerr
= put_cmsg(msg
, SOL_TLS
, TLS_GET_RECORD_TYPE
,
1678 sizeof(ctrl
), &ctrl
);
1680 if (ctrl
!= TLS_RECORD_TYPE_DATA
) {
1681 if (cerr
|| msg
->msg_flags
& MSG_CTRUNC
)
1688 if (!zc
|| (rxm
->full_len
- skip
) > len
) {
1689 int err
= skb_copy_datagram_msg(skb
, rxm
->offset
+ skip
,
1696 copied
= copied
+ chunk
;
1698 /* Consume the data from record if it is non-peek case*/
1700 rxm
->offset
= rxm
->offset
+ chunk
;
1701 rxm
->full_len
= rxm
->full_len
- chunk
;
1703 /* Return if there is unconsumed data in the record */
1704 if (rxm
->full_len
- skip
)
1708 /* The remaining skip-bytes must lie in 1st record in rx_list.
1709 * So from the 2nd record, 'skip' should be 0.
1714 msg
->msg_flags
|= MSG_EOR
;
1716 next_skb
= skb_peek_next(skb
, &ctx
->rx_list
);
1719 skb_unlink(skb
, &ctx
->rx_list
);
1730 int tls_sw_recvmsg(struct sock
*sk
,
1737 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
1738 struct tls_sw_context_rx
*ctx
= tls_sw_ctx_rx(tls_ctx
);
1739 struct tls_prot_info
*prot
= &tls_ctx
->prot_info
;
1740 struct sk_psock
*psock
;
1741 unsigned char control
= 0;
1742 ssize_t decrypted
= 0;
1743 struct strp_msg
*rxm
;
1744 struct tls_msg
*tlm
;
1745 struct sk_buff
*skb
;
1748 int target
, err
= 0;
1750 bool is_kvec
= iov_iter_is_kvec(&msg
->msg_iter
);
1751 bool is_peek
= flags
& MSG_PEEK
;
1752 bool bpf_strp_enabled
;
1758 if (unlikely(flags
& MSG_ERRQUEUE
))
1759 return sock_recv_errqueue(sk
, msg
, len
, SOL_IP
, IP_RECVERR
);
1761 psock
= sk_psock_get(sk
);
1763 bpf_strp_enabled
= sk_psock_strp_enabled(psock
);
1765 /* Process pending decrypted records. It must be non-zero-copy */
1766 err
= process_rx_list(ctx
, msg
, &control
, &cmsg
, 0, len
, false,
1769 tls_err_abort(sk
, err
);
1778 target
= sock_rcvlowat(sk
, flags
& MSG_WAITALL
, len
);
1780 timeo
= sock_rcvtimeo(sk
, flags
& MSG_DONTWAIT
);
1782 while (len
&& (decrypted
+ copied
< target
|| ctx
->recv_pkt
)) {
1783 bool retain_skb
= false;
1790 skb
= tls_wait_data(sk
, psock
, flags
& MSG_DONTWAIT
, timeo
, &err
);
1793 int ret
= sk_msg_recvmsg(sk
, psock
, msg
, len
,
1805 if (prot
->version
== TLS_1_3_VERSION
)
1808 tlm
->control
= ctx
->control
;
1811 rxm
= strp_msg(skb
);
1813 to_decrypt
= rxm
->full_len
- prot
->overhead_size
;
1815 if (to_decrypt
<= len
&& !is_kvec
&& !is_peek
&&
1816 ctx
->control
== TLS_RECORD_TYPE_DATA
&&
1817 prot
->version
!= TLS_1_3_VERSION
&&
1821 /* Do not use async mode if record is non-data */
1822 if (ctx
->control
== TLS_RECORD_TYPE_DATA
&& !bpf_strp_enabled
)
1823 async_capable
= ctx
->async_capable
;
1825 async_capable
= false;
1827 err
= decrypt_skb_update(sk
, skb
, &msg
->msg_iter
,
1828 &chunk
, &zc
, async_capable
);
1829 if (err
< 0 && err
!= -EINPROGRESS
) {
1830 tls_err_abort(sk
, EBADMSG
);
1834 if (err
== -EINPROGRESS
) {
1837 } else if (prot
->version
== TLS_1_3_VERSION
) {
1838 tlm
->control
= ctx
->control
;
1841 /* If the type of records being processed is not known yet,
1842 * set it to record type just dequeued. If it is already known,
1843 * but does not match the record type just dequeued, go to end.
1844 * We always get record type here since for tls1.2, record type
1845 * is known just after record is dequeued from stream parser.
1846 * For tls1.3, we disable async.
1850 control
= tlm
->control
;
1851 else if (control
!= tlm
->control
)
1857 cerr
= put_cmsg(msg
, SOL_TLS
, TLS_GET_RECORD_TYPE
,
1858 sizeof(control
), &control
);
1860 if (control
!= TLS_RECORD_TYPE_DATA
) {
1861 if (cerr
|| msg
->msg_flags
& MSG_CTRUNC
) {
1869 goto pick_next_record
;
1872 if (bpf_strp_enabled
) {
1873 err
= sk_psock_tls_strp_read(psock
, skb
);
1874 if (err
!= __SK_PASS
) {
1875 rxm
->offset
= rxm
->offset
+ rxm
->full_len
;
1877 if (err
== __SK_DROP
)
1879 ctx
->recv_pkt
= NULL
;
1880 __strp_unpause(&ctx
->strp
);
1885 if (rxm
->full_len
> len
) {
1889 chunk
= rxm
->full_len
;
1892 err
= skb_copy_datagram_msg(skb
, rxm
->offset
,
1898 rxm
->offset
= rxm
->offset
+ chunk
;
1899 rxm
->full_len
= rxm
->full_len
- chunk
;
1910 /* For async or peek case, queue the current skb */
1911 if (async
|| is_peek
|| retain_skb
) {
1912 skb_queue_tail(&ctx
->rx_list
, skb
);
1916 if (tls_sw_advance_skb(sk
, skb
, chunk
)) {
1917 /* Return full control message to
1918 * userspace before trying to parse
1919 * another message type
1921 msg
->msg_flags
|= MSG_EOR
;
1922 if (control
!= TLS_RECORD_TYPE_DATA
)
1931 /* Wait for all previously submitted records to be decrypted */
1932 spin_lock_bh(&ctx
->decrypt_compl_lock
);
1933 ctx
->async_notify
= true;
1934 pending
= atomic_read(&ctx
->decrypt_pending
);
1935 spin_unlock_bh(&ctx
->decrypt_compl_lock
);
1937 err
= crypto_wait_req(-EINPROGRESS
, &ctx
->async_wait
);
1939 /* one of async decrypt failed */
1940 tls_err_abort(sk
, err
);
1946 reinit_completion(&ctx
->async_wait
.completion
);
1949 /* There can be no concurrent accesses, since we have no
1950 * pending decrypt operations
1952 WRITE_ONCE(ctx
->async_notify
, false);
1954 /* Drain records from the rx_list & copy if required */
1955 if (is_peek
|| is_kvec
)
1956 err
= process_rx_list(ctx
, msg
, &control
, &cmsg
, copied
,
1957 decrypted
, false, is_peek
);
1959 err
= process_rx_list(ctx
, msg
, &control
, &cmsg
, 0,
1960 decrypted
, true, is_peek
);
1962 tls_err_abort(sk
, err
);
1968 copied
+= decrypted
;
1973 sk_psock_put(sk
, psock
);
1974 return copied
? : err
;
1977 ssize_t
tls_sw_splice_read(struct socket
*sock
, loff_t
*ppos
,
1978 struct pipe_inode_info
*pipe
,
1979 size_t len
, unsigned int flags
)
1981 struct tls_context
*tls_ctx
= tls_get_ctx(sock
->sk
);
1982 struct tls_sw_context_rx
*ctx
= tls_sw_ctx_rx(tls_ctx
);
1983 struct strp_msg
*rxm
= NULL
;
1984 struct sock
*sk
= sock
->sk
;
1985 struct sk_buff
*skb
;
1994 timeo
= sock_rcvtimeo(sk
, flags
& SPLICE_F_NONBLOCK
);
1996 skb
= tls_wait_data(sk
, NULL
, flags
& SPLICE_F_NONBLOCK
, timeo
, &err
);
1998 goto splice_read_end
;
2000 if (!ctx
->decrypted
) {
2001 err
= decrypt_skb_update(sk
, skb
, NULL
, &chunk
, &zc
, false);
2003 /* splice does not support reading control messages */
2004 if (ctx
->control
!= TLS_RECORD_TYPE_DATA
) {
2006 goto splice_read_end
;
2010 tls_err_abort(sk
, EBADMSG
);
2011 goto splice_read_end
;
2015 rxm
= strp_msg(skb
);
2017 chunk
= min_t(unsigned int, rxm
->full_len
, len
);
2018 copied
= skb_splice_bits(skb
, sk
, rxm
->offset
, pipe
, chunk
, flags
);
2020 goto splice_read_end
;
2022 tls_sw_advance_skb(sk
, skb
, copied
);
2026 return copied
? : err
;
2029 bool tls_sw_stream_read(const struct sock
*sk
)
2031 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
2032 struct tls_sw_context_rx
*ctx
= tls_sw_ctx_rx(tls_ctx
);
2033 bool ingress_empty
= true;
2034 struct sk_psock
*psock
;
2037 psock
= sk_psock(sk
);
2039 ingress_empty
= list_empty(&psock
->ingress_msg
);
2042 return !ingress_empty
|| ctx
->recv_pkt
||
2043 !skb_queue_empty(&ctx
->rx_list
);
2046 static int tls_read_size(struct strparser
*strp
, struct sk_buff
*skb
)
2048 struct tls_context
*tls_ctx
= tls_get_ctx(strp
->sk
);
2049 struct tls_sw_context_rx
*ctx
= tls_sw_ctx_rx(tls_ctx
);
2050 struct tls_prot_info
*prot
= &tls_ctx
->prot_info
;
2051 char header
[TLS_HEADER_SIZE
+ MAX_IV_SIZE
];
2052 struct strp_msg
*rxm
= strp_msg(skb
);
2053 size_t cipher_overhead
;
2054 size_t data_len
= 0;
2057 /* Verify that we have a full TLS header, or wait for more data */
2058 if (rxm
->offset
+ prot
->prepend_size
> skb
->len
)
2061 /* Sanity-check size of on-stack buffer. */
2062 if (WARN_ON(prot
->prepend_size
> sizeof(header
))) {
2067 /* Linearize header to local buffer */
2068 ret
= skb_copy_bits(skb
, rxm
->offset
, header
, prot
->prepend_size
);
2073 ctx
->control
= header
[0];
2075 data_len
= ((header
[4] & 0xFF) | (header
[3] << 8));
2077 cipher_overhead
= prot
->tag_size
;
2078 if (prot
->version
!= TLS_1_3_VERSION
&&
2079 prot
->cipher_type
!= TLS_CIPHER_CHACHA20_POLY1305
)
2080 cipher_overhead
+= prot
->iv_size
;
2082 if (data_len
> TLS_MAX_PAYLOAD_SIZE
+ cipher_overhead
+
2087 if (data_len
< cipher_overhead
) {
2092 /* Note that both TLS1.3 and TLS1.2 use TLS_1_2 version here */
2093 if (header
[1] != TLS_1_2_VERSION_MINOR
||
2094 header
[2] != TLS_1_2_VERSION_MAJOR
) {
2099 tls_device_rx_resync_new_rec(strp
->sk
, data_len
+ TLS_HEADER_SIZE
,
2100 TCP_SKB_CB(skb
)->seq
+ rxm
->offset
);
2101 return data_len
+ TLS_HEADER_SIZE
;
2104 tls_err_abort(strp
->sk
, ret
);
2109 static void tls_queue(struct strparser
*strp
, struct sk_buff
*skb
)
2111 struct tls_context
*tls_ctx
= tls_get_ctx(strp
->sk
);
2112 struct tls_sw_context_rx
*ctx
= tls_sw_ctx_rx(tls_ctx
);
2116 ctx
->recv_pkt
= skb
;
2119 ctx
->saved_data_ready(strp
->sk
);
2122 static void tls_data_ready(struct sock
*sk
)
2124 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
2125 struct tls_sw_context_rx
*ctx
= tls_sw_ctx_rx(tls_ctx
);
2126 struct sk_psock
*psock
;
2128 strp_data_ready(&ctx
->strp
);
2130 psock
= sk_psock_get(sk
);
2132 if (!list_empty(&psock
->ingress_msg
))
2133 ctx
->saved_data_ready(sk
);
2134 sk_psock_put(sk
, psock
);
2138 void tls_sw_cancel_work_tx(struct tls_context
*tls_ctx
)
2140 struct tls_sw_context_tx
*ctx
= tls_sw_ctx_tx(tls_ctx
);
2142 set_bit(BIT_TX_CLOSING
, &ctx
->tx_bitmask
);
2143 set_bit(BIT_TX_SCHEDULED
, &ctx
->tx_bitmask
);
2144 cancel_delayed_work_sync(&ctx
->tx_work
.work
);
2147 void tls_sw_release_resources_tx(struct sock
*sk
)
2149 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
2150 struct tls_sw_context_tx
*ctx
= tls_sw_ctx_tx(tls_ctx
);
2151 struct tls_rec
*rec
, *tmp
;
2154 /* Wait for any pending async encryptions to complete */
2155 spin_lock_bh(&ctx
->encrypt_compl_lock
);
2156 ctx
->async_notify
= true;
2157 pending
= atomic_read(&ctx
->encrypt_pending
);
2158 spin_unlock_bh(&ctx
->encrypt_compl_lock
);
2161 crypto_wait_req(-EINPROGRESS
, &ctx
->async_wait
);
2163 tls_tx_records(sk
, -1);
2165 /* Free up un-sent records in tx_list. First, free
2166 * the partially sent record if any at head of tx_list.
2168 if (tls_ctx
->partially_sent_record
) {
2169 tls_free_partial_record(sk
, tls_ctx
);
2170 rec
= list_first_entry(&ctx
->tx_list
,
2171 struct tls_rec
, list
);
2172 list_del(&rec
->list
);
2173 sk_msg_free(sk
, &rec
->msg_plaintext
);
2177 list_for_each_entry_safe(rec
, tmp
, &ctx
->tx_list
, list
) {
2178 list_del(&rec
->list
);
2179 sk_msg_free(sk
, &rec
->msg_encrypted
);
2180 sk_msg_free(sk
, &rec
->msg_plaintext
);
2184 crypto_free_aead(ctx
->aead_send
);
2185 tls_free_open_rec(sk
);
2188 void tls_sw_free_ctx_tx(struct tls_context
*tls_ctx
)
2190 struct tls_sw_context_tx
*ctx
= tls_sw_ctx_tx(tls_ctx
);
2195 void tls_sw_release_resources_rx(struct sock
*sk
)
2197 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
2198 struct tls_sw_context_rx
*ctx
= tls_sw_ctx_rx(tls_ctx
);
2200 kfree(tls_ctx
->rx
.rec_seq
);
2201 kfree(tls_ctx
->rx
.iv
);
2203 if (ctx
->aead_recv
) {
2204 kfree_skb(ctx
->recv_pkt
);
2205 ctx
->recv_pkt
= NULL
;
2206 skb_queue_purge(&ctx
->rx_list
);
2207 crypto_free_aead(ctx
->aead_recv
);
2208 strp_stop(&ctx
->strp
);
2209 /* If tls_sw_strparser_arm() was not called (cleanup paths)
2210 * we still want to strp_stop(), but sk->sk_data_ready was
2213 if (ctx
->saved_data_ready
) {
2214 write_lock_bh(&sk
->sk_callback_lock
);
2215 sk
->sk_data_ready
= ctx
->saved_data_ready
;
2216 write_unlock_bh(&sk
->sk_callback_lock
);
2221 void tls_sw_strparser_done(struct tls_context
*tls_ctx
)
2223 struct tls_sw_context_rx
*ctx
= tls_sw_ctx_rx(tls_ctx
);
2225 strp_done(&ctx
->strp
);
2228 void tls_sw_free_ctx_rx(struct tls_context
*tls_ctx
)
2230 struct tls_sw_context_rx
*ctx
= tls_sw_ctx_rx(tls_ctx
);
2235 void tls_sw_free_resources_rx(struct sock
*sk
)
2237 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
2239 tls_sw_release_resources_rx(sk
);
2240 tls_sw_free_ctx_rx(tls_ctx
);
2243 /* The work handler to transmitt the encrypted records in tx_list */
2244 static void tx_work_handler(struct work_struct
*work
)
2246 struct delayed_work
*delayed_work
= to_delayed_work(work
);
2247 struct tx_work
*tx_work
= container_of(delayed_work
,
2248 struct tx_work
, work
);
2249 struct sock
*sk
= tx_work
->sk
;
2250 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
2251 struct tls_sw_context_tx
*ctx
;
2253 if (unlikely(!tls_ctx
))
2256 ctx
= tls_sw_ctx_tx(tls_ctx
);
2257 if (test_bit(BIT_TX_CLOSING
, &ctx
->tx_bitmask
))
2260 if (!test_and_clear_bit(BIT_TX_SCHEDULED
, &ctx
->tx_bitmask
))
2262 mutex_lock(&tls_ctx
->tx_lock
);
2264 tls_tx_records(sk
, -1);
2266 mutex_unlock(&tls_ctx
->tx_lock
);
2269 void tls_sw_write_space(struct sock
*sk
, struct tls_context
*ctx
)
2271 struct tls_sw_context_tx
*tx_ctx
= tls_sw_ctx_tx(ctx
);
2273 /* Schedule the transmission if tx list is ready */
2274 if (is_tx_ready(tx_ctx
) &&
2275 !test_and_set_bit(BIT_TX_SCHEDULED
, &tx_ctx
->tx_bitmask
))
2276 schedule_delayed_work(&tx_ctx
->tx_work
.work
, 0);
2279 void tls_sw_strparser_arm(struct sock
*sk
, struct tls_context
*tls_ctx
)
2281 struct tls_sw_context_rx
*rx_ctx
= tls_sw_ctx_rx(tls_ctx
);
2283 write_lock_bh(&sk
->sk_callback_lock
);
2284 rx_ctx
->saved_data_ready
= sk
->sk_data_ready
;
2285 sk
->sk_data_ready
= tls_data_ready
;
2286 write_unlock_bh(&sk
->sk_callback_lock
);
2288 strp_check_rcv(&rx_ctx
->strp
);
2291 int tls_set_sw_offload(struct sock
*sk
, struct tls_context
*ctx
, int tx
)
2293 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
2294 struct tls_prot_info
*prot
= &tls_ctx
->prot_info
;
2295 struct tls_crypto_info
*crypto_info
;
2296 struct tls12_crypto_info_aes_gcm_128
*gcm_128_info
;
2297 struct tls12_crypto_info_aes_gcm_256
*gcm_256_info
;
2298 struct tls12_crypto_info_aes_ccm_128
*ccm_128_info
;
2299 struct tls12_crypto_info_chacha20_poly1305
*chacha20_poly1305_info
;
2300 struct tls_sw_context_tx
*sw_ctx_tx
= NULL
;
2301 struct tls_sw_context_rx
*sw_ctx_rx
= NULL
;
2302 struct cipher_context
*cctx
;
2303 struct crypto_aead
**aead
;
2304 struct strp_callbacks cb
;
2305 u16 nonce_size
, tag_size
, iv_size
, rec_seq_size
, salt_size
;
2306 struct crypto_tfm
*tfm
;
2307 char *iv
, *rec_seq
, *key
, *salt
, *cipher_name
;
2317 if (!ctx
->priv_ctx_tx
) {
2318 sw_ctx_tx
= kzalloc(sizeof(*sw_ctx_tx
), GFP_KERNEL
);
2323 ctx
->priv_ctx_tx
= sw_ctx_tx
;
2326 (struct tls_sw_context_tx
*)ctx
->priv_ctx_tx
;
2329 if (!ctx
->priv_ctx_rx
) {
2330 sw_ctx_rx
= kzalloc(sizeof(*sw_ctx_rx
), GFP_KERNEL
);
2335 ctx
->priv_ctx_rx
= sw_ctx_rx
;
2338 (struct tls_sw_context_rx
*)ctx
->priv_ctx_rx
;
2343 crypto_init_wait(&sw_ctx_tx
->async_wait
);
2344 spin_lock_init(&sw_ctx_tx
->encrypt_compl_lock
);
2345 crypto_info
= &ctx
->crypto_send
.info
;
2347 aead
= &sw_ctx_tx
->aead_send
;
2348 INIT_LIST_HEAD(&sw_ctx_tx
->tx_list
);
2349 INIT_DELAYED_WORK(&sw_ctx_tx
->tx_work
.work
, tx_work_handler
);
2350 sw_ctx_tx
->tx_work
.sk
= sk
;
2352 crypto_init_wait(&sw_ctx_rx
->async_wait
);
2353 spin_lock_init(&sw_ctx_rx
->decrypt_compl_lock
);
2354 crypto_info
= &ctx
->crypto_recv
.info
;
2356 skb_queue_head_init(&sw_ctx_rx
->rx_list
);
2357 aead
= &sw_ctx_rx
->aead_recv
;
2360 switch (crypto_info
->cipher_type
) {
2361 case TLS_CIPHER_AES_GCM_128
: {
2362 nonce_size
= TLS_CIPHER_AES_GCM_128_IV_SIZE
;
2363 tag_size
= TLS_CIPHER_AES_GCM_128_TAG_SIZE
;
2364 iv_size
= TLS_CIPHER_AES_GCM_128_IV_SIZE
;
2365 iv
= ((struct tls12_crypto_info_aes_gcm_128
*)crypto_info
)->iv
;
2366 rec_seq_size
= TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE
;
2368 ((struct tls12_crypto_info_aes_gcm_128
*)crypto_info
)->rec_seq
;
2370 (struct tls12_crypto_info_aes_gcm_128
*)crypto_info
;
2371 keysize
= TLS_CIPHER_AES_GCM_128_KEY_SIZE
;
2372 key
= gcm_128_info
->key
;
2373 salt
= gcm_128_info
->salt
;
2374 salt_size
= TLS_CIPHER_AES_GCM_128_SALT_SIZE
;
2375 cipher_name
= "gcm(aes)";
2378 case TLS_CIPHER_AES_GCM_256
: {
2379 nonce_size
= TLS_CIPHER_AES_GCM_256_IV_SIZE
;
2380 tag_size
= TLS_CIPHER_AES_GCM_256_TAG_SIZE
;
2381 iv_size
= TLS_CIPHER_AES_GCM_256_IV_SIZE
;
2382 iv
= ((struct tls12_crypto_info_aes_gcm_256
*)crypto_info
)->iv
;
2383 rec_seq_size
= TLS_CIPHER_AES_GCM_256_REC_SEQ_SIZE
;
2385 ((struct tls12_crypto_info_aes_gcm_256
*)crypto_info
)->rec_seq
;
2387 (struct tls12_crypto_info_aes_gcm_256
*)crypto_info
;
2388 keysize
= TLS_CIPHER_AES_GCM_256_KEY_SIZE
;
2389 key
= gcm_256_info
->key
;
2390 salt
= gcm_256_info
->salt
;
2391 salt_size
= TLS_CIPHER_AES_GCM_256_SALT_SIZE
;
2392 cipher_name
= "gcm(aes)";
2395 case TLS_CIPHER_AES_CCM_128
: {
2396 nonce_size
= TLS_CIPHER_AES_CCM_128_IV_SIZE
;
2397 tag_size
= TLS_CIPHER_AES_CCM_128_TAG_SIZE
;
2398 iv_size
= TLS_CIPHER_AES_CCM_128_IV_SIZE
;
2399 iv
= ((struct tls12_crypto_info_aes_ccm_128
*)crypto_info
)->iv
;
2400 rec_seq_size
= TLS_CIPHER_AES_CCM_128_REC_SEQ_SIZE
;
2402 ((struct tls12_crypto_info_aes_ccm_128
*)crypto_info
)->rec_seq
;
2404 (struct tls12_crypto_info_aes_ccm_128
*)crypto_info
;
2405 keysize
= TLS_CIPHER_AES_CCM_128_KEY_SIZE
;
2406 key
= ccm_128_info
->key
;
2407 salt
= ccm_128_info
->salt
;
2408 salt_size
= TLS_CIPHER_AES_CCM_128_SALT_SIZE
;
2409 cipher_name
= "ccm(aes)";
2412 case TLS_CIPHER_CHACHA20_POLY1305
: {
2413 chacha20_poly1305_info
= (void *)crypto_info
;
2415 tag_size
= TLS_CIPHER_CHACHA20_POLY1305_TAG_SIZE
;
2416 iv_size
= TLS_CIPHER_CHACHA20_POLY1305_IV_SIZE
;
2417 iv
= chacha20_poly1305_info
->iv
;
2418 rec_seq_size
= TLS_CIPHER_CHACHA20_POLY1305_REC_SEQ_SIZE
;
2419 rec_seq
= chacha20_poly1305_info
->rec_seq
;
2420 keysize
= TLS_CIPHER_CHACHA20_POLY1305_KEY_SIZE
;
2421 key
= chacha20_poly1305_info
->key
;
2422 salt
= chacha20_poly1305_info
->salt
;
2423 salt_size
= TLS_CIPHER_CHACHA20_POLY1305_SALT_SIZE
;
2424 cipher_name
= "rfc7539(chacha20,poly1305)";
2432 /* Sanity-check the sizes for stack allocations. */
2433 if (iv_size
> MAX_IV_SIZE
|| nonce_size
> MAX_IV_SIZE
||
2434 rec_seq_size
> TLS_MAX_REC_SEQ_SIZE
) {
2439 if (crypto_info
->version
== TLS_1_3_VERSION
) {
2441 prot
->aad_size
= TLS_HEADER_SIZE
;
2442 prot
->tail_size
= 1;
2444 prot
->aad_size
= TLS_AAD_SPACE_SIZE
;
2445 prot
->tail_size
= 0;
2448 prot
->version
= crypto_info
->version
;
2449 prot
->cipher_type
= crypto_info
->cipher_type
;
2450 prot
->prepend_size
= TLS_HEADER_SIZE
+ nonce_size
;
2451 prot
->tag_size
= tag_size
;
2452 prot
->overhead_size
= prot
->prepend_size
+
2453 prot
->tag_size
+ prot
->tail_size
;
2454 prot
->iv_size
= iv_size
;
2455 prot
->salt_size
= salt_size
;
2456 cctx
->iv
= kmalloc(iv_size
+ salt_size
, GFP_KERNEL
);
2461 /* Note: 128 & 256 bit salt are the same size */
2462 prot
->rec_seq_size
= rec_seq_size
;
2463 memcpy(cctx
->iv
, salt
, salt_size
);
2464 memcpy(cctx
->iv
+ salt_size
, iv
, iv_size
);
2465 cctx
->rec_seq
= kmemdup(rec_seq
, rec_seq_size
, GFP_KERNEL
);
2466 if (!cctx
->rec_seq
) {
2472 *aead
= crypto_alloc_aead(cipher_name
, 0, 0);
2473 if (IS_ERR(*aead
)) {
2474 rc
= PTR_ERR(*aead
);
2480 ctx
->push_pending_record
= tls_sw_push_pending_record
;
2482 rc
= crypto_aead_setkey(*aead
, key
, keysize
);
2487 rc
= crypto_aead_setauthsize(*aead
, prot
->tag_size
);
2492 tfm
= crypto_aead_tfm(sw_ctx_rx
->aead_recv
);
2494 if (crypto_info
->version
== TLS_1_3_VERSION
)
2495 sw_ctx_rx
->async_capable
= 0;
2497 sw_ctx_rx
->async_capable
=
2498 !!(tfm
->__crt_alg
->cra_flags
&
2501 /* Set up strparser */
2502 memset(&cb
, 0, sizeof(cb
));
2503 cb
.rcv_msg
= tls_queue
;
2504 cb
.parse_msg
= tls_read_size
;
2506 strp_init(&sw_ctx_rx
->strp
, sk
, &cb
);
2512 crypto_free_aead(*aead
);
2515 kfree(cctx
->rec_seq
);
2516 cctx
->rec_seq
= NULL
;
2522 kfree(ctx
->priv_ctx_tx
);
2523 ctx
->priv_ctx_tx
= NULL
;
2525 kfree(ctx
->priv_ctx_rx
);
2526 ctx
->priv_ctx_rx
= NULL
;