1 /* Copyright (c) 2018, Mellanox Technologies All rights reserved.
3 * This software is available to you under a choice of one of two
4 * licenses. You may choose to be licensed under the terms of the GNU
5 * General Public License (GPL) Version 2, available from the file
6 * COPYING in the main directory of this source tree, or the
7 * OpenIB.org BSD license below:
9 * Redistribution and use in source and binary forms, with or
10 * without modification, are permitted provided that the following
13 * - Redistributions of source code must retain the above
14 * copyright notice, this list of conditions and the following
17 * - Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials
20 * provided with the distribution.
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
23 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
24 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
25 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
26 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
27 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
28 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <crypto/aead.h>
33 #include <linux/highmem.h>
34 #include <linux/module.h>
35 #include <linux/netdevice.h>
37 #include <net/inet_connection_sock.h>
43 /* device_offload_lock is used to synchronize tls_dev_add
44 * against NETDEV_DOWN notifications.
46 static DECLARE_RWSEM(device_offload_lock
);
48 static void tls_device_gc_task(struct work_struct
*work
);
50 static DECLARE_WORK(tls_device_gc_work
, tls_device_gc_task
);
51 static LIST_HEAD(tls_device_gc_list
);
52 static LIST_HEAD(tls_device_list
);
53 static LIST_HEAD(tls_device_down_list
);
54 static DEFINE_SPINLOCK(tls_device_lock
);
56 static void tls_device_free_ctx(struct tls_context
*ctx
)
58 if (ctx
->tx_conf
== TLS_HW
) {
59 kfree(tls_offload_ctx_tx(ctx
));
60 kfree(ctx
->tx
.rec_seq
);
64 if (ctx
->rx_conf
== TLS_HW
)
65 kfree(tls_offload_ctx_rx(ctx
));
67 tls_ctx_free(NULL
, ctx
);
70 static void tls_device_gc_task(struct work_struct
*work
)
72 struct tls_context
*ctx
, *tmp
;
76 spin_lock_irqsave(&tls_device_lock
, flags
);
77 list_splice_init(&tls_device_gc_list
, &gc_list
);
78 spin_unlock_irqrestore(&tls_device_lock
, flags
);
80 list_for_each_entry_safe(ctx
, tmp
, &gc_list
, list
) {
81 struct net_device
*netdev
= ctx
->netdev
;
83 if (netdev
&& ctx
->tx_conf
== TLS_HW
) {
84 netdev
->tlsdev_ops
->tls_dev_del(netdev
, ctx
,
85 TLS_OFFLOAD_CTX_DIR_TX
);
91 tls_device_free_ctx(ctx
);
95 static void tls_device_queue_ctx_destruction(struct tls_context
*ctx
)
99 spin_lock_irqsave(&tls_device_lock
, flags
);
100 list_move_tail(&ctx
->list
, &tls_device_gc_list
);
102 /* schedule_work inside the spinlock
103 * to make sure tls_device_down waits for that work.
105 schedule_work(&tls_device_gc_work
);
107 spin_unlock_irqrestore(&tls_device_lock
, flags
);
110 /* We assume that the socket is already connected */
111 static struct net_device
*get_netdev_for_sock(struct sock
*sk
)
113 struct dst_entry
*dst
= sk_dst_get(sk
);
114 struct net_device
*netdev
= NULL
;
117 netdev
= netdev_sk_get_lowest_dev(dst
->dev
, sk
);
126 static void destroy_record(struct tls_record_info
*record
)
130 for (i
= 0; i
< record
->num_frags
; i
++)
131 __skb_frag_unref(&record
->frags
[i
], false);
135 static void delete_all_records(struct tls_offload_context_tx
*offload_ctx
)
137 struct tls_record_info
*info
, *temp
;
139 list_for_each_entry_safe(info
, temp
, &offload_ctx
->records_list
, list
) {
140 list_del(&info
->list
);
141 destroy_record(info
);
144 offload_ctx
->retransmit_hint
= NULL
;
147 static void tls_icsk_clean_acked(struct sock
*sk
, u32 acked_seq
)
149 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
150 struct tls_record_info
*info
, *temp
;
151 struct tls_offload_context_tx
*ctx
;
152 u64 deleted_records
= 0;
158 ctx
= tls_offload_ctx_tx(tls_ctx
);
160 spin_lock_irqsave(&ctx
->lock
, flags
);
161 info
= ctx
->retransmit_hint
;
162 if (info
&& !before(acked_seq
, info
->end_seq
))
163 ctx
->retransmit_hint
= NULL
;
165 list_for_each_entry_safe(info
, temp
, &ctx
->records_list
, list
) {
166 if (before(acked_seq
, info
->end_seq
))
168 list_del(&info
->list
);
170 destroy_record(info
);
174 ctx
->unacked_record_sn
+= deleted_records
;
175 spin_unlock_irqrestore(&ctx
->lock
, flags
);
178 /* At this point, there should be no references on this
179 * socket and no in-flight SKBs associated with this
180 * socket, so it is safe to free all the resources.
182 void tls_device_sk_destruct(struct sock
*sk
)
184 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
185 struct tls_offload_context_tx
*ctx
= tls_offload_ctx_tx(tls_ctx
);
187 tls_ctx
->sk_destruct(sk
);
189 if (tls_ctx
->tx_conf
== TLS_HW
) {
190 if (ctx
->open_record
)
191 destroy_record(ctx
->open_record
);
192 delete_all_records(ctx
);
193 crypto_free_aead(ctx
->aead_send
);
194 clean_acked_data_disable(inet_csk(sk
));
197 if (refcount_dec_and_test(&tls_ctx
->refcount
))
198 tls_device_queue_ctx_destruction(tls_ctx
);
200 EXPORT_SYMBOL_GPL(tls_device_sk_destruct
);
202 void tls_device_free_resources_tx(struct sock
*sk
)
204 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
206 tls_free_partial_record(sk
, tls_ctx
);
209 void tls_offload_tx_resync_request(struct sock
*sk
, u32 got_seq
, u32 exp_seq
)
211 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
213 trace_tls_device_tx_resync_req(sk
, got_seq
, exp_seq
);
214 WARN_ON(test_and_set_bit(TLS_TX_SYNC_SCHED
, &tls_ctx
->flags
));
216 EXPORT_SYMBOL_GPL(tls_offload_tx_resync_request
);
218 static void tls_device_resync_tx(struct sock
*sk
, struct tls_context
*tls_ctx
,
221 struct net_device
*netdev
;
226 skb
= tcp_write_queue_tail(sk
);
228 TCP_SKB_CB(skb
)->eor
= 1;
230 rcd_sn
= tls_ctx
->tx
.rec_seq
;
232 trace_tls_device_tx_resync_send(sk
, seq
, rcd_sn
);
233 down_read(&device_offload_lock
);
234 netdev
= tls_ctx
->netdev
;
236 err
= netdev
->tlsdev_ops
->tls_dev_resync(netdev
, sk
, seq
,
238 TLS_OFFLOAD_CTX_DIR_TX
);
239 up_read(&device_offload_lock
);
243 clear_bit_unlock(TLS_TX_SYNC_SCHED
, &tls_ctx
->flags
);
246 static void tls_append_frag(struct tls_record_info
*record
,
247 struct page_frag
*pfrag
,
252 frag
= &record
->frags
[record
->num_frags
- 1];
253 if (skb_frag_page(frag
) == pfrag
->page
&&
254 skb_frag_off(frag
) + skb_frag_size(frag
) == pfrag
->offset
) {
255 skb_frag_size_add(frag
, size
);
258 __skb_frag_set_page(frag
, pfrag
->page
);
259 skb_frag_off_set(frag
, pfrag
->offset
);
260 skb_frag_size_set(frag
, size
);
262 get_page(pfrag
->page
);
265 pfrag
->offset
+= size
;
269 static int tls_push_record(struct sock
*sk
,
270 struct tls_context
*ctx
,
271 struct tls_offload_context_tx
*offload_ctx
,
272 struct tls_record_info
*record
,
275 struct tls_prot_info
*prot
= &ctx
->prot_info
;
276 struct tcp_sock
*tp
= tcp_sk(sk
);
280 record
->end_seq
= tp
->write_seq
+ record
->len
;
281 list_add_tail_rcu(&record
->list
, &offload_ctx
->records_list
);
282 offload_ctx
->open_record
= NULL
;
284 if (test_bit(TLS_TX_SYNC_SCHED
, &ctx
->flags
))
285 tls_device_resync_tx(sk
, ctx
, tp
->write_seq
);
287 tls_advance_record_sn(sk
, prot
, &ctx
->tx
);
289 for (i
= 0; i
< record
->num_frags
; i
++) {
290 frag
= &record
->frags
[i
];
291 sg_unmark_end(&offload_ctx
->sg_tx_data
[i
]);
292 sg_set_page(&offload_ctx
->sg_tx_data
[i
], skb_frag_page(frag
),
293 skb_frag_size(frag
), skb_frag_off(frag
));
294 sk_mem_charge(sk
, skb_frag_size(frag
));
295 get_page(skb_frag_page(frag
));
297 sg_mark_end(&offload_ctx
->sg_tx_data
[record
->num_frags
- 1]);
299 /* all ready, send */
300 return tls_push_sg(sk
, ctx
, offload_ctx
->sg_tx_data
, 0, flags
);
303 static int tls_device_record_close(struct sock
*sk
,
304 struct tls_context
*ctx
,
305 struct tls_record_info
*record
,
306 struct page_frag
*pfrag
,
307 unsigned char record_type
)
309 struct tls_prot_info
*prot
= &ctx
->prot_info
;
313 * device will fill in the tag, we just need to append a placeholder
314 * use socket memory to improve coalescing (re-using a single buffer
315 * increases frag count)
316 * if we can't allocate memory now, steal some back from data
318 if (likely(skb_page_frag_refill(prot
->tag_size
, pfrag
,
319 sk
->sk_allocation
))) {
321 tls_append_frag(record
, pfrag
, prot
->tag_size
);
323 ret
= prot
->tag_size
;
324 if (record
->len
<= prot
->overhead_size
)
329 tls_fill_prepend(ctx
, skb_frag_address(&record
->frags
[0]),
330 record
->len
- prot
->overhead_size
,
335 static int tls_create_new_record(struct tls_offload_context_tx
*offload_ctx
,
336 struct page_frag
*pfrag
,
339 struct tls_record_info
*record
;
342 record
= kmalloc(sizeof(*record
), GFP_KERNEL
);
346 frag
= &record
->frags
[0];
347 __skb_frag_set_page(frag
, pfrag
->page
);
348 skb_frag_off_set(frag
, pfrag
->offset
);
349 skb_frag_size_set(frag
, prepend_size
);
351 get_page(pfrag
->page
);
352 pfrag
->offset
+= prepend_size
;
354 record
->num_frags
= 1;
355 record
->len
= prepend_size
;
356 offload_ctx
->open_record
= record
;
360 static int tls_do_allocation(struct sock
*sk
,
361 struct tls_offload_context_tx
*offload_ctx
,
362 struct page_frag
*pfrag
,
367 if (!offload_ctx
->open_record
) {
368 if (unlikely(!skb_page_frag_refill(prepend_size
, pfrag
,
369 sk
->sk_allocation
))) {
370 READ_ONCE(sk
->sk_prot
)->enter_memory_pressure(sk
);
371 sk_stream_moderate_sndbuf(sk
);
375 ret
= tls_create_new_record(offload_ctx
, pfrag
, prepend_size
);
379 if (pfrag
->size
> pfrag
->offset
)
383 if (!sk_page_frag_refill(sk
, pfrag
))
389 static int tls_device_copy_data(void *addr
, size_t bytes
, struct iov_iter
*i
)
391 size_t pre_copy
, nocache
;
393 pre_copy
= ~((unsigned long)addr
- 1) & (SMP_CACHE_BYTES
- 1);
395 pre_copy
= min(pre_copy
, bytes
);
396 if (copy_from_iter(addr
, pre_copy
, i
) != pre_copy
)
402 nocache
= round_down(bytes
, SMP_CACHE_BYTES
);
403 if (copy_from_iter_nocache(addr
, nocache
, i
) != nocache
)
408 if (bytes
&& copy_from_iter(addr
, bytes
, i
) != bytes
)
414 static int tls_push_data(struct sock
*sk
,
415 struct iov_iter
*msg_iter
,
416 size_t size
, int flags
,
417 unsigned char record_type
)
419 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
420 struct tls_prot_info
*prot
= &tls_ctx
->prot_info
;
421 struct tls_offload_context_tx
*ctx
= tls_offload_ctx_tx(tls_ctx
);
422 struct tls_record_info
*record
;
423 int tls_push_record_flags
;
424 struct page_frag
*pfrag
;
425 size_t orig_size
= size
;
426 u32 max_open_record_len
;
433 ~(MSG_MORE
| MSG_DONTWAIT
| MSG_NOSIGNAL
| MSG_SENDPAGE_NOTLAST
))
436 if (unlikely(sk
->sk_err
))
439 flags
|= MSG_SENDPAGE_DECRYPTED
;
440 tls_push_record_flags
= flags
| MSG_SENDPAGE_NOTLAST
;
442 timeo
= sock_sndtimeo(sk
, flags
& MSG_DONTWAIT
);
443 if (tls_is_partially_sent_record(tls_ctx
)) {
444 rc
= tls_push_partial_record(sk
, tls_ctx
, flags
);
449 pfrag
= sk_page_frag(sk
);
451 /* TLS_HEADER_SIZE is not counted as part of the TLS record, and
452 * we need to leave room for an authentication tag.
454 max_open_record_len
= TLS_MAX_PAYLOAD_SIZE
+
457 rc
= tls_do_allocation(sk
, ctx
, pfrag
, prot
->prepend_size
);
459 rc
= sk_stream_wait_memory(sk
, &timeo
);
463 record
= ctx
->open_record
;
467 if (record_type
!= TLS_RECORD_TYPE_DATA
) {
468 /* avoid sending partial
469 * record with type !=
473 destroy_record(record
);
474 ctx
->open_record
= NULL
;
475 } else if (record
->len
> prot
->prepend_size
) {
482 record
= ctx
->open_record
;
483 copy
= min_t(size_t, size
, (pfrag
->size
- pfrag
->offset
));
484 copy
= min_t(size_t, copy
, (max_open_record_len
- record
->len
));
487 rc
= tls_device_copy_data(page_address(pfrag
->page
) +
488 pfrag
->offset
, copy
, msg_iter
);
491 tls_append_frag(record
, pfrag
, copy
);
497 tls_push_record_flags
= flags
;
498 if (flags
& (MSG_SENDPAGE_NOTLAST
| MSG_MORE
)) {
506 if (done
|| record
->len
>= max_open_record_len
||
507 (record
->num_frags
>= MAX_SKB_FRAGS
- 1)) {
508 rc
= tls_device_record_close(sk
, tls_ctx
, record
,
515 destroy_record(record
);
516 ctx
->open_record
= NULL
;
521 rc
= tls_push_record(sk
,
525 tls_push_record_flags
);
531 tls_ctx
->pending_open_record_frags
= more
;
533 if (orig_size
- size
> 0)
534 rc
= orig_size
- size
;
539 int tls_device_sendmsg(struct sock
*sk
, struct msghdr
*msg
, size_t size
)
541 unsigned char record_type
= TLS_RECORD_TYPE_DATA
;
542 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
545 mutex_lock(&tls_ctx
->tx_lock
);
548 if (unlikely(msg
->msg_controllen
)) {
549 rc
= tls_proccess_cmsg(sk
, msg
, &record_type
);
554 rc
= tls_push_data(sk
, &msg
->msg_iter
, size
,
555 msg
->msg_flags
, record_type
);
559 mutex_unlock(&tls_ctx
->tx_lock
);
563 int tls_device_sendpage(struct sock
*sk
, struct page
*page
,
564 int offset
, size_t size
, int flags
)
566 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
567 struct iov_iter msg_iter
;
572 if (flags
& MSG_SENDPAGE_NOTLAST
)
575 mutex_lock(&tls_ctx
->tx_lock
);
578 if (flags
& MSG_OOB
) {
584 iov
.iov_base
= kaddr
+ offset
;
586 iov_iter_kvec(&msg_iter
, WRITE
, &iov
, 1, size
);
587 rc
= tls_push_data(sk
, &msg_iter
, size
,
588 flags
, TLS_RECORD_TYPE_DATA
);
593 mutex_unlock(&tls_ctx
->tx_lock
);
597 struct tls_record_info
*tls_get_record(struct tls_offload_context_tx
*context
,
598 u32 seq
, u64
*p_record_sn
)
600 u64 record_sn
= context
->hint_record_sn
;
601 struct tls_record_info
*info
, *last
;
603 info
= context
->retransmit_hint
;
605 before(seq
, info
->end_seq
- info
->len
)) {
606 /* if retransmit_hint is irrelevant start
607 * from the beginning of the list
609 info
= list_first_entry_or_null(&context
->records_list
,
610 struct tls_record_info
, list
);
613 /* send the start_marker record if seq number is before the
614 * tls offload start marker sequence number. This record is
615 * required to handle TCP packets which are before TLS offload
617 * And if it's not start marker, look if this seq number
618 * belongs to the list.
620 if (likely(!tls_record_is_start_marker(info
))) {
621 /* we have the first record, get the last record to see
622 * if this seq number belongs to the list.
624 last
= list_last_entry(&context
->records_list
,
625 struct tls_record_info
, list
);
627 if (!between(seq
, tls_record_start_seq(info
),
631 record_sn
= context
->unacked_record_sn
;
634 /* We just need the _rcu for the READ_ONCE() */
636 list_for_each_entry_from_rcu(info
, &context
->records_list
, list
) {
637 if (before(seq
, info
->end_seq
)) {
638 if (!context
->retransmit_hint
||
640 context
->retransmit_hint
->end_seq
)) {
641 context
->hint_record_sn
= record_sn
;
642 context
->retransmit_hint
= info
;
644 *p_record_sn
= record_sn
;
645 goto exit_rcu_unlock
;
655 EXPORT_SYMBOL(tls_get_record
);
657 static int tls_device_push_pending_record(struct sock
*sk
, int flags
)
659 struct iov_iter msg_iter
;
661 iov_iter_kvec(&msg_iter
, WRITE
, NULL
, 0, 0);
662 return tls_push_data(sk
, &msg_iter
, 0, flags
, TLS_RECORD_TYPE_DATA
);
665 void tls_device_write_space(struct sock
*sk
, struct tls_context
*ctx
)
667 if (tls_is_partially_sent_record(ctx
)) {
668 gfp_t sk_allocation
= sk
->sk_allocation
;
670 WARN_ON_ONCE(sk
->sk_write_pending
);
672 sk
->sk_allocation
= GFP_ATOMIC
;
673 tls_push_partial_record(sk
, ctx
,
674 MSG_DONTWAIT
| MSG_NOSIGNAL
|
675 MSG_SENDPAGE_DECRYPTED
);
676 sk
->sk_allocation
= sk_allocation
;
680 static void tls_device_resync_rx(struct tls_context
*tls_ctx
,
681 struct sock
*sk
, u32 seq
, u8
*rcd_sn
)
683 struct tls_offload_context_rx
*rx_ctx
= tls_offload_ctx_rx(tls_ctx
);
684 struct net_device
*netdev
;
686 trace_tls_device_rx_resync_send(sk
, seq
, rcd_sn
, rx_ctx
->resync_type
);
688 netdev
= READ_ONCE(tls_ctx
->netdev
);
690 netdev
->tlsdev_ops
->tls_dev_resync(netdev
, sk
, seq
, rcd_sn
,
691 TLS_OFFLOAD_CTX_DIR_RX
);
693 TLS_INC_STATS(sock_net(sk
), LINUX_MIB_TLSRXDEVICERESYNC
);
697 tls_device_rx_resync_async(struct tls_offload_resync_async
*resync_async
,
698 s64 resync_req
, u32
*seq
, u16
*rcd_delta
)
700 u32 is_async
= resync_req
& RESYNC_REQ_ASYNC
;
701 u32 req_seq
= resync_req
>> 32;
702 u32 req_end
= req_seq
+ ((resync_req
>> 16) & 0xffff);
708 /* shouldn't get to wraparound:
709 * too long in async stage, something bad happened
711 if (WARN_ON_ONCE(resync_async
->rcd_delta
== USHRT_MAX
))
714 /* asynchronous stage: log all headers seq such that
715 * req_seq <= seq <= end_seq, and wait for real resync request
717 if (before(*seq
, req_seq
))
719 if (!after(*seq
, req_end
) &&
720 resync_async
->loglen
< TLS_DEVICE_RESYNC_ASYNC_LOGMAX
)
721 resync_async
->log
[resync_async
->loglen
++] = *seq
;
723 resync_async
->rcd_delta
++;
728 /* synchronous stage: check against the logged entries and
729 * proceed to check the next entries if no match was found
731 for (i
= 0; i
< resync_async
->loglen
; i
++)
732 if (req_seq
== resync_async
->log
[i
] &&
733 atomic64_try_cmpxchg(&resync_async
->req
, &resync_req
, 0)) {
734 *rcd_delta
= resync_async
->rcd_delta
- i
;
736 resync_async
->loglen
= 0;
737 resync_async
->rcd_delta
= 0;
741 resync_async
->loglen
= 0;
742 resync_async
->rcd_delta
= 0;
744 if (req_seq
== *seq
&&
745 atomic64_try_cmpxchg(&resync_async
->req
,
752 void tls_device_rx_resync_new_rec(struct sock
*sk
, u32 rcd_len
, u32 seq
)
754 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
755 struct tls_offload_context_rx
*rx_ctx
;
756 u8 rcd_sn
[TLS_MAX_REC_SEQ_SIZE
];
757 u32 sock_data
, is_req_pending
;
758 struct tls_prot_info
*prot
;
763 if (tls_ctx
->rx_conf
!= TLS_HW
)
765 if (unlikely(test_bit(TLS_RX_DEV_DEGRADED
, &tls_ctx
->flags
)))
768 prot
= &tls_ctx
->prot_info
;
769 rx_ctx
= tls_offload_ctx_rx(tls_ctx
);
770 memcpy(rcd_sn
, tls_ctx
->rx
.rec_seq
, prot
->rec_seq_size
);
772 switch (rx_ctx
->resync_type
) {
773 case TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ
:
774 resync_req
= atomic64_read(&rx_ctx
->resync_req
);
775 req_seq
= resync_req
>> 32;
776 seq
+= TLS_HEADER_SIZE
- 1;
777 is_req_pending
= resync_req
;
779 if (likely(!is_req_pending
) || req_seq
!= seq
||
780 !atomic64_try_cmpxchg(&rx_ctx
->resync_req
, &resync_req
, 0))
783 case TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT
:
784 if (likely(!rx_ctx
->resync_nh_do_now
))
787 /* head of next rec is already in, note that the sock_inq will
788 * include the currently parsed message when called from parser
790 sock_data
= tcp_inq(sk
);
791 if (sock_data
> rcd_len
) {
792 trace_tls_device_rx_resync_nh_delay(sk
, sock_data
,
797 rx_ctx
->resync_nh_do_now
= 0;
799 tls_bigint_increment(rcd_sn
, prot
->rec_seq_size
);
801 case TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ_ASYNC
:
802 resync_req
= atomic64_read(&rx_ctx
->resync_async
->req
);
803 is_req_pending
= resync_req
;
804 if (likely(!is_req_pending
))
807 if (!tls_device_rx_resync_async(rx_ctx
->resync_async
,
808 resync_req
, &seq
, &rcd_delta
))
810 tls_bigint_subtract(rcd_sn
, rcd_delta
);
814 tls_device_resync_rx(tls_ctx
, sk
, seq
, rcd_sn
);
817 static void tls_device_core_ctrl_rx_resync(struct tls_context
*tls_ctx
,
818 struct tls_offload_context_rx
*ctx
,
819 struct sock
*sk
, struct sk_buff
*skb
)
821 struct strp_msg
*rxm
;
823 /* device will request resyncs by itself based on stream scan */
824 if (ctx
->resync_type
!= TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT
)
826 /* already scheduled */
827 if (ctx
->resync_nh_do_now
)
829 /* seen decrypted fragments since last fully-failed record */
830 if (ctx
->resync_nh_reset
) {
831 ctx
->resync_nh_reset
= 0;
832 ctx
->resync_nh
.decrypted_failed
= 1;
833 ctx
->resync_nh
.decrypted_tgt
= TLS_DEVICE_RESYNC_NH_START_IVAL
;
837 if (++ctx
->resync_nh
.decrypted_failed
<= ctx
->resync_nh
.decrypted_tgt
)
840 /* doing resync, bump the next target in case it fails */
841 if (ctx
->resync_nh
.decrypted_tgt
< TLS_DEVICE_RESYNC_NH_MAX_IVAL
)
842 ctx
->resync_nh
.decrypted_tgt
*= 2;
844 ctx
->resync_nh
.decrypted_tgt
+= TLS_DEVICE_RESYNC_NH_MAX_IVAL
;
848 /* head of next rec is already in, parser will sync for us */
849 if (tcp_inq(sk
) > rxm
->full_len
) {
850 trace_tls_device_rx_resync_nh_schedule(sk
);
851 ctx
->resync_nh_do_now
= 1;
853 struct tls_prot_info
*prot
= &tls_ctx
->prot_info
;
854 u8 rcd_sn
[TLS_MAX_REC_SEQ_SIZE
];
856 memcpy(rcd_sn
, tls_ctx
->rx
.rec_seq
, prot
->rec_seq_size
);
857 tls_bigint_increment(rcd_sn
, prot
->rec_seq_size
);
859 tls_device_resync_rx(tls_ctx
, sk
, tcp_sk(sk
)->copied_seq
,
864 static int tls_device_reencrypt(struct sock
*sk
, struct sk_buff
*skb
)
866 struct strp_msg
*rxm
= strp_msg(skb
);
867 int err
= 0, offset
= rxm
->offset
, copy
, nsg
, data_len
, pos
;
868 struct sk_buff
*skb_iter
, *unused
;
869 struct scatterlist sg
[1];
870 char *orig_buf
, *buf
;
872 orig_buf
= kmalloc(rxm
->full_len
+ TLS_HEADER_SIZE
+
873 TLS_CIPHER_AES_GCM_128_IV_SIZE
, sk
->sk_allocation
);
878 nsg
= skb_cow_data(skb
, 0, &unused
);
879 if (unlikely(nsg
< 0)) {
884 sg_init_table(sg
, 1);
885 sg_set_buf(&sg
[0], buf
,
886 rxm
->full_len
+ TLS_HEADER_SIZE
+
887 TLS_CIPHER_AES_GCM_128_IV_SIZE
);
888 err
= skb_copy_bits(skb
, offset
, buf
,
889 TLS_HEADER_SIZE
+ TLS_CIPHER_AES_GCM_128_IV_SIZE
);
893 /* We are interested only in the decrypted data not the auth */
894 err
= decrypt_skb(sk
, skb
, sg
);
900 data_len
= rxm
->full_len
- TLS_CIPHER_AES_GCM_128_TAG_SIZE
;
902 if (skb_pagelen(skb
) > offset
) {
903 copy
= min_t(int, skb_pagelen(skb
) - offset
, data_len
);
905 if (skb
->decrypted
) {
906 err
= skb_store_bits(skb
, offset
, buf
, copy
);
915 pos
= skb_pagelen(skb
);
916 skb_walk_frags(skb
, skb_iter
) {
919 /* Practically all frags must belong to msg if reencrypt
920 * is needed with current strparser and coalescing logic,
921 * but strparser may "get optimized", so let's be safe.
923 if (pos
+ skb_iter
->len
<= offset
)
925 if (pos
>= data_len
+ rxm
->offset
)
928 frag_pos
= offset
- pos
;
929 copy
= min_t(int, skb_iter
->len
- frag_pos
,
930 data_len
+ rxm
->offset
- offset
);
932 if (skb_iter
->decrypted
) {
933 err
= skb_store_bits(skb_iter
, frag_pos
, buf
, copy
);
941 pos
+= skb_iter
->len
;
949 int tls_device_decrypted(struct sock
*sk
, struct tls_context
*tls_ctx
,
950 struct sk_buff
*skb
, struct strp_msg
*rxm
)
952 struct tls_offload_context_rx
*ctx
= tls_offload_ctx_rx(tls_ctx
);
953 int is_decrypted
= skb
->decrypted
;
954 int is_encrypted
= !is_decrypted
;
955 struct sk_buff
*skb_iter
;
957 /* Check if all the data is decrypted already */
958 skb_walk_frags(skb
, skb_iter
) {
959 is_decrypted
&= skb_iter
->decrypted
;
960 is_encrypted
&= !skb_iter
->decrypted
;
963 trace_tls_device_decrypted(sk
, tcp_sk(sk
)->copied_seq
- rxm
->full_len
,
964 tls_ctx
->rx
.rec_seq
, rxm
->full_len
,
965 is_encrypted
, is_decrypted
);
967 ctx
->sw
.decrypted
|= is_decrypted
;
969 if (unlikely(test_bit(TLS_RX_DEV_DEGRADED
, &tls_ctx
->flags
))) {
970 if (likely(is_encrypted
|| is_decrypted
))
973 /* After tls_device_down disables the offload, the next SKB will
974 * likely have initial fragments decrypted, and final ones not
975 * decrypted. We need to reencrypt that single SKB.
977 return tls_device_reencrypt(sk
, skb
);
980 /* Return immediately if the record is either entirely plaintext or
981 * entirely ciphertext. Otherwise handle reencrypt partially decrypted
985 ctx
->resync_nh_reset
= 1;
989 tls_device_core_ctrl_rx_resync(tls_ctx
, ctx
, sk
, skb
);
993 ctx
->resync_nh_reset
= 1;
994 return tls_device_reencrypt(sk
, skb
);
997 static void tls_device_attach(struct tls_context
*ctx
, struct sock
*sk
,
998 struct net_device
*netdev
)
1000 if (sk
->sk_destruct
!= tls_device_sk_destruct
) {
1001 refcount_set(&ctx
->refcount
, 1);
1003 ctx
->netdev
= netdev
;
1004 spin_lock_irq(&tls_device_lock
);
1005 list_add_tail(&ctx
->list
, &tls_device_list
);
1006 spin_unlock_irq(&tls_device_lock
);
1008 ctx
->sk_destruct
= sk
->sk_destruct
;
1009 smp_store_release(&sk
->sk_destruct
, tls_device_sk_destruct
);
1013 int tls_set_device_offload(struct sock
*sk
, struct tls_context
*ctx
)
1015 u16 nonce_size
, tag_size
, iv_size
, rec_seq_size
, salt_size
;
1016 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
1017 struct tls_prot_info
*prot
= &tls_ctx
->prot_info
;
1018 struct tls_record_info
*start_marker_record
;
1019 struct tls_offload_context_tx
*offload_ctx
;
1020 struct tls_crypto_info
*crypto_info
;
1021 struct net_device
*netdev
;
1023 struct sk_buff
*skb
;
1030 if (ctx
->priv_ctx_tx
)
1033 start_marker_record
= kmalloc(sizeof(*start_marker_record
), GFP_KERNEL
);
1034 if (!start_marker_record
)
1037 offload_ctx
= kzalloc(TLS_OFFLOAD_CONTEXT_SIZE_TX
, GFP_KERNEL
);
1040 goto free_marker_record
;
1043 crypto_info
= &ctx
->crypto_send
.info
;
1044 if (crypto_info
->version
!= TLS_1_2_VERSION
) {
1046 goto free_offload_ctx
;
1049 switch (crypto_info
->cipher_type
) {
1050 case TLS_CIPHER_AES_GCM_128
:
1051 nonce_size
= TLS_CIPHER_AES_GCM_128_IV_SIZE
;
1052 tag_size
= TLS_CIPHER_AES_GCM_128_TAG_SIZE
;
1053 iv_size
= TLS_CIPHER_AES_GCM_128_IV_SIZE
;
1054 iv
= ((struct tls12_crypto_info_aes_gcm_128
*)crypto_info
)->iv
;
1055 rec_seq_size
= TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE
;
1056 salt_size
= TLS_CIPHER_AES_GCM_128_SALT_SIZE
;
1058 ((struct tls12_crypto_info_aes_gcm_128
*)crypto_info
)->rec_seq
;
1062 goto free_offload_ctx
;
1065 /* Sanity-check the rec_seq_size for stack allocations */
1066 if (rec_seq_size
> TLS_MAX_REC_SEQ_SIZE
) {
1068 goto free_offload_ctx
;
1071 prot
->version
= crypto_info
->version
;
1072 prot
->cipher_type
= crypto_info
->cipher_type
;
1073 prot
->prepend_size
= TLS_HEADER_SIZE
+ nonce_size
;
1074 prot
->tag_size
= tag_size
;
1075 prot
->overhead_size
= prot
->prepend_size
+ prot
->tag_size
;
1076 prot
->iv_size
= iv_size
;
1077 prot
->salt_size
= salt_size
;
1078 ctx
->tx
.iv
= kmalloc(iv_size
+ TLS_CIPHER_AES_GCM_128_SALT_SIZE
,
1082 goto free_offload_ctx
;
1085 memcpy(ctx
->tx
.iv
+ TLS_CIPHER_AES_GCM_128_SALT_SIZE
, iv
, iv_size
);
1087 prot
->rec_seq_size
= rec_seq_size
;
1088 ctx
->tx
.rec_seq
= kmemdup(rec_seq
, rec_seq_size
, GFP_KERNEL
);
1089 if (!ctx
->tx
.rec_seq
) {
1094 rc
= tls_sw_fallback_init(sk
, offload_ctx
, crypto_info
);
1098 /* start at rec_seq - 1 to account for the start marker record */
1099 memcpy(&rcd_sn
, ctx
->tx
.rec_seq
, sizeof(rcd_sn
));
1100 offload_ctx
->unacked_record_sn
= be64_to_cpu(rcd_sn
) - 1;
1102 start_marker_record
->end_seq
= tcp_sk(sk
)->write_seq
;
1103 start_marker_record
->len
= 0;
1104 start_marker_record
->num_frags
= 0;
1106 INIT_LIST_HEAD(&offload_ctx
->records_list
);
1107 list_add_tail(&start_marker_record
->list
, &offload_ctx
->records_list
);
1108 spin_lock_init(&offload_ctx
->lock
);
1109 sg_init_table(offload_ctx
->sg_tx_data
,
1110 ARRAY_SIZE(offload_ctx
->sg_tx_data
));
1112 clean_acked_data_enable(inet_csk(sk
), &tls_icsk_clean_acked
);
1113 ctx
->push_pending_record
= tls_device_push_pending_record
;
1115 /* TLS offload is greatly simplified if we don't send
1116 * SKBs where only part of the payload needs to be encrypted.
1117 * So mark the last skb in the write queue as end of record.
1119 skb
= tcp_write_queue_tail(sk
);
1121 TCP_SKB_CB(skb
)->eor
= 1;
1123 netdev
= get_netdev_for_sock(sk
);
1125 pr_err_ratelimited("%s: netdev not found\n", __func__
);
1130 if (!(netdev
->features
& NETIF_F_HW_TLS_TX
)) {
1132 goto release_netdev
;
1135 /* Avoid offloading if the device is down
1136 * We don't want to offload new flows after
1137 * the NETDEV_DOWN event
1139 * device_offload_lock is taken in tls_devices's NETDEV_DOWN
1140 * handler thus protecting from the device going down before
1141 * ctx was added to tls_device_list.
1143 down_read(&device_offload_lock
);
1144 if (!(netdev
->flags
& IFF_UP
)) {
1149 ctx
->priv_ctx_tx
= offload_ctx
;
1150 rc
= netdev
->tlsdev_ops
->tls_dev_add(netdev
, sk
, TLS_OFFLOAD_CTX_DIR_TX
,
1151 &ctx
->crypto_send
.info
,
1152 tcp_sk(sk
)->write_seq
);
1153 trace_tls_device_offload_set(sk
, TLS_OFFLOAD_CTX_DIR_TX
,
1154 tcp_sk(sk
)->write_seq
, rec_seq
, rc
);
1158 tls_device_attach(ctx
, sk
, netdev
);
1159 up_read(&device_offload_lock
);
1161 /* following this assignment tls_is_sk_tx_device_offloaded
1162 * will return true and the context might be accessed
1163 * by the netdev's xmit function.
1165 smp_store_release(&sk
->sk_validate_xmit_skb
, tls_validate_xmit_skb
);
1171 up_read(&device_offload_lock
);
1175 clean_acked_data_disable(inet_csk(sk
));
1176 crypto_free_aead(offload_ctx
->aead_send
);
1178 kfree(ctx
->tx
.rec_seq
);
1183 ctx
->priv_ctx_tx
= NULL
;
1185 kfree(start_marker_record
);
1189 int tls_set_device_offload_rx(struct sock
*sk
, struct tls_context
*ctx
)
1191 struct tls12_crypto_info_aes_gcm_128
*info
;
1192 struct tls_offload_context_rx
*context
;
1193 struct net_device
*netdev
;
1196 if (ctx
->crypto_recv
.info
.version
!= TLS_1_2_VERSION
)
1199 netdev
= get_netdev_for_sock(sk
);
1201 pr_err_ratelimited("%s: netdev not found\n", __func__
);
1205 if (!(netdev
->features
& NETIF_F_HW_TLS_RX
)) {
1207 goto release_netdev
;
1210 /* Avoid offloading if the device is down
1211 * We don't want to offload new flows after
1212 * the NETDEV_DOWN event
1214 * device_offload_lock is taken in tls_devices's NETDEV_DOWN
1215 * handler thus protecting from the device going down before
1216 * ctx was added to tls_device_list.
1218 down_read(&device_offload_lock
);
1219 if (!(netdev
->flags
& IFF_UP
)) {
1224 context
= kzalloc(TLS_OFFLOAD_CONTEXT_SIZE_RX
, GFP_KERNEL
);
1229 context
->resync_nh_reset
= 1;
1231 ctx
->priv_ctx_rx
= context
;
1232 rc
= tls_set_sw_offload(sk
, ctx
, 0);
1236 rc
= netdev
->tlsdev_ops
->tls_dev_add(netdev
, sk
, TLS_OFFLOAD_CTX_DIR_RX
,
1237 &ctx
->crypto_recv
.info
,
1238 tcp_sk(sk
)->copied_seq
);
1239 info
= (void *)&ctx
->crypto_recv
.info
;
1240 trace_tls_device_offload_set(sk
, TLS_OFFLOAD_CTX_DIR_RX
,
1241 tcp_sk(sk
)->copied_seq
, info
->rec_seq
, rc
);
1243 goto free_sw_resources
;
1245 tls_device_attach(ctx
, sk
, netdev
);
1246 up_read(&device_offload_lock
);
1253 up_read(&device_offload_lock
);
1254 tls_sw_free_resources_rx(sk
);
1255 down_read(&device_offload_lock
);
1257 ctx
->priv_ctx_rx
= NULL
;
1259 up_read(&device_offload_lock
);
1265 void tls_device_offload_cleanup_rx(struct sock
*sk
)
1267 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
1268 struct net_device
*netdev
;
1270 down_read(&device_offload_lock
);
1271 netdev
= tls_ctx
->netdev
;
1275 netdev
->tlsdev_ops
->tls_dev_del(netdev
, tls_ctx
,
1276 TLS_OFFLOAD_CTX_DIR_RX
);
1278 if (tls_ctx
->tx_conf
!= TLS_HW
) {
1280 tls_ctx
->netdev
= NULL
;
1282 set_bit(TLS_RX_DEV_CLOSED
, &tls_ctx
->flags
);
1285 up_read(&device_offload_lock
);
1286 tls_sw_release_resources_rx(sk
);
1289 static int tls_device_down(struct net_device
*netdev
)
1291 struct tls_context
*ctx
, *tmp
;
1292 unsigned long flags
;
1295 /* Request a write lock to block new offload attempts */
1296 down_write(&device_offload_lock
);
1298 spin_lock_irqsave(&tls_device_lock
, flags
);
1299 list_for_each_entry_safe(ctx
, tmp
, &tls_device_list
, list
) {
1300 if (ctx
->netdev
!= netdev
||
1301 !refcount_inc_not_zero(&ctx
->refcount
))
1304 list_move(&ctx
->list
, &list
);
1306 spin_unlock_irqrestore(&tls_device_lock
, flags
);
1308 list_for_each_entry_safe(ctx
, tmp
, &list
, list
) {
1309 /* Stop offloaded TX and switch to the fallback.
1310 * tls_is_sk_tx_device_offloaded will return false.
1312 WRITE_ONCE(ctx
->sk
->sk_validate_xmit_skb
, tls_validate_xmit_skb_sw
);
1314 /* Stop the RX and TX resync.
1315 * tls_dev_resync must not be called after tls_dev_del.
1317 WRITE_ONCE(ctx
->netdev
, NULL
);
1319 /* Start skipping the RX resync logic completely. */
1320 set_bit(TLS_RX_DEV_DEGRADED
, &ctx
->flags
);
1322 /* Sync with inflight packets. After this point:
1323 * TX: no non-encrypted packets will be passed to the driver.
1324 * RX: resync requests from the driver will be ignored.
1328 /* Release the offload context on the driver side. */
1329 if (ctx
->tx_conf
== TLS_HW
)
1330 netdev
->tlsdev_ops
->tls_dev_del(netdev
, ctx
,
1331 TLS_OFFLOAD_CTX_DIR_TX
);
1332 if (ctx
->rx_conf
== TLS_HW
&&
1333 !test_bit(TLS_RX_DEV_CLOSED
, &ctx
->flags
))
1334 netdev
->tlsdev_ops
->tls_dev_del(netdev
, ctx
,
1335 TLS_OFFLOAD_CTX_DIR_RX
);
1339 /* Move the context to a separate list for two reasons:
1340 * 1. When the context is deallocated, list_del is called.
1341 * 2. It's no longer an offloaded context, so we don't want to
1342 * run offload-specific code on this context.
1344 spin_lock_irqsave(&tls_device_lock
, flags
);
1345 list_move_tail(&ctx
->list
, &tls_device_down_list
);
1346 spin_unlock_irqrestore(&tls_device_lock
, flags
);
1348 /* Device contexts for RX and TX will be freed in on sk_destruct
1349 * by tls_device_free_ctx. rx_conf and tx_conf stay in TLS_HW.
1353 up_write(&device_offload_lock
);
1355 flush_work(&tls_device_gc_work
);
1360 static int tls_dev_event(struct notifier_block
*this, unsigned long event
,
1363 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
1365 if (!dev
->tlsdev_ops
&&
1366 !(dev
->features
& (NETIF_F_HW_TLS_RX
| NETIF_F_HW_TLS_TX
)))
1370 case NETDEV_REGISTER
:
1371 case NETDEV_FEAT_CHANGE
:
1372 if (netif_is_bond_master(dev
))
1374 if ((dev
->features
& NETIF_F_HW_TLS_RX
) &&
1375 !dev
->tlsdev_ops
->tls_dev_resync
)
1378 if (dev
->tlsdev_ops
&&
1379 dev
->tlsdev_ops
->tls_dev_add
&&
1380 dev
->tlsdev_ops
->tls_dev_del
)
1385 return tls_device_down(dev
);
1390 static struct notifier_block tls_dev_notifier
= {
1391 .notifier_call
= tls_dev_event
,
1394 void __init
tls_device_init(void)
1396 register_netdevice_notifier(&tls_dev_notifier
);
1399 void __exit
tls_device_cleanup(void)
1401 unregister_netdevice_notifier(&tls_dev_notifier
);
1402 flush_work(&tls_device_gc_work
);
1403 clean_acked_data_flush();