]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - net/tls/tls_device.c
Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
[mirror_ubuntu-jammy-kernel.git] / net / tls / tls_device.c
CommitLineData
e8f69799
IL
1/* Copyright (c) 2018, Mellanox Technologies All rights reserved.
2 *
3 * This software is available to you under a choice of one of two
4 * licenses. You may choose to be licensed under the terms of the GNU
5 * General Public License (GPL) Version 2, available from the file
6 * COPYING in the main directory of this source tree, or the
7 * OpenIB.org BSD license below:
8 *
9 * Redistribution and use in source and binary forms, with or
10 * without modification, are permitted provided that the following
11 * conditions are met:
12 *
13 * - Redistributions of source code must retain the above
14 * copyright notice, this list of conditions and the following
15 * disclaimer.
16 *
17 * - Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials
20 * provided with the distribution.
21 *
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
23 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
24 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
25 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
26 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
27 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
28 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
29 * SOFTWARE.
30 */
31
32#include <crypto/aead.h>
33#include <linux/highmem.h>
34#include <linux/module.h>
35#include <linux/netdevice.h>
36#include <net/dst.h>
37#include <net/inet_connection_sock.h>
38#include <net/tcp.h>
39#include <net/tls.h>
40
41/* device_offload_lock is used to synchronize tls_dev_add
42 * against NETDEV_DOWN notifications.
43 */
44static DECLARE_RWSEM(device_offload_lock);
45
46static void tls_device_gc_task(struct work_struct *work);
47
48static DECLARE_WORK(tls_device_gc_work, tls_device_gc_task);
49static LIST_HEAD(tls_device_gc_list);
50static LIST_HEAD(tls_device_list);
51static DEFINE_SPINLOCK(tls_device_lock);
52
53static void tls_device_free_ctx(struct tls_context *ctx)
54{
5a03bc73 55 if (ctx->tx_conf == TLS_HW) {
4799ac81 56 kfree(tls_offload_ctx_tx(ctx));
5a03bc73
JK
57 kfree(ctx->tx.rec_seq);
58 kfree(ctx->tx.iv);
59 }
4799ac81
BP
60
61 if (ctx->rx_conf == TLS_HW)
62 kfree(tls_offload_ctx_rx(ctx));
e8f69799 63
acd3e96d 64 tls_ctx_free(ctx);
e8f69799
IL
65}
66
67static void tls_device_gc_task(struct work_struct *work)
68{
69 struct tls_context *ctx, *tmp;
70 unsigned long flags;
71 LIST_HEAD(gc_list);
72
73 spin_lock_irqsave(&tls_device_lock, flags);
74 list_splice_init(&tls_device_gc_list, &gc_list);
75 spin_unlock_irqrestore(&tls_device_lock, flags);
76
77 list_for_each_entry_safe(ctx, tmp, &gc_list, list) {
78 struct net_device *netdev = ctx->netdev;
79
4799ac81 80 if (netdev && ctx->tx_conf == TLS_HW) {
e8f69799
IL
81 netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
82 TLS_OFFLOAD_CTX_DIR_TX);
83 dev_put(netdev);
4799ac81 84 ctx->netdev = NULL;
e8f69799
IL
85 }
86
87 list_del(&ctx->list);
88 tls_device_free_ctx(ctx);
89 }
90}
91
92static void tls_device_queue_ctx_destruction(struct tls_context *ctx)
93{
94 unsigned long flags;
95
96 spin_lock_irqsave(&tls_device_lock, flags);
97 list_move_tail(&ctx->list, &tls_device_gc_list);
98
99 /* schedule_work inside the spinlock
100 * to make sure tls_device_down waits for that work.
101 */
102 schedule_work(&tls_device_gc_work);
103
104 spin_unlock_irqrestore(&tls_device_lock, flags);
105}
106
107/* We assume that the socket is already connected */
108static struct net_device *get_netdev_for_sock(struct sock *sk)
109{
110 struct dst_entry *dst = sk_dst_get(sk);
111 struct net_device *netdev = NULL;
112
113 if (likely(dst)) {
114 netdev = dst->dev;
115 dev_hold(netdev);
116 }
117
118 dst_release(dst);
119
120 return netdev;
121}
122
123static void destroy_record(struct tls_record_info *record)
124{
125 int nr_frags = record->num_frags;
126 skb_frag_t *frag;
127
128 while (nr_frags-- > 0) {
129 frag = &record->frags[nr_frags];
130 __skb_frag_unref(frag);
131 }
132 kfree(record);
133}
134
d80a1b9d 135static void delete_all_records(struct tls_offload_context_tx *offload_ctx)
e8f69799
IL
136{
137 struct tls_record_info *info, *temp;
138
139 list_for_each_entry_safe(info, temp, &offload_ctx->records_list, list) {
140 list_del(&info->list);
141 destroy_record(info);
142 }
143
144 offload_ctx->retransmit_hint = NULL;
145}
146
147static void tls_icsk_clean_acked(struct sock *sk, u32 acked_seq)
148{
149 struct tls_context *tls_ctx = tls_get_ctx(sk);
150 struct tls_record_info *info, *temp;
d80a1b9d 151 struct tls_offload_context_tx *ctx;
e8f69799
IL
152 u64 deleted_records = 0;
153 unsigned long flags;
154
155 if (!tls_ctx)
156 return;
157
d80a1b9d 158 ctx = tls_offload_ctx_tx(tls_ctx);
e8f69799
IL
159
160 spin_lock_irqsave(&ctx->lock, flags);
161 info = ctx->retransmit_hint;
162 if (info && !before(acked_seq, info->end_seq)) {
163 ctx->retransmit_hint = NULL;
164 list_del(&info->list);
165 destroy_record(info);
166 deleted_records++;
167 }
168
169 list_for_each_entry_safe(info, temp, &ctx->records_list, list) {
170 if (before(acked_seq, info->end_seq))
171 break;
172 list_del(&info->list);
173
174 destroy_record(info);
175 deleted_records++;
176 }
177
178 ctx->unacked_record_sn += deleted_records;
179 spin_unlock_irqrestore(&ctx->lock, flags);
180}
181
182/* At this point, there should be no references on this
183 * socket and no in-flight SKBs associated with this
184 * socket, so it is safe to free all the resources.
185 */
9e995797 186static void tls_device_sk_destruct(struct sock *sk)
e8f69799
IL
187{
188 struct tls_context *tls_ctx = tls_get_ctx(sk);
d80a1b9d 189 struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
e8f69799 190
4799ac81 191 tls_ctx->sk_destruct(sk);
e8f69799 192
4799ac81
BP
193 if (tls_ctx->tx_conf == TLS_HW) {
194 if (ctx->open_record)
195 destroy_record(ctx->open_record);
196 delete_all_records(ctx);
197 crypto_free_aead(ctx->aead_send);
198 clean_acked_data_disable(inet_csk(sk));
199 }
e8f69799
IL
200
201 if (refcount_dec_and_test(&tls_ctx->refcount))
202 tls_device_queue_ctx_destruction(tls_ctx);
203}
e8f69799 204
35b71a34
JK
205void tls_device_free_resources_tx(struct sock *sk)
206{
207 struct tls_context *tls_ctx = tls_get_ctx(sk);
208
209 tls_free_partial_record(sk, tls_ctx);
210}
211
50180074
JK
212static void tls_device_resync_tx(struct sock *sk, struct tls_context *tls_ctx,
213 u32 seq)
214{
215 struct net_device *netdev;
216 struct sk_buff *skb;
b5d9a834 217 int err = 0;
50180074
JK
218 u8 *rcd_sn;
219
220 skb = tcp_write_queue_tail(sk);
221 if (skb)
222 TCP_SKB_CB(skb)->eor = 1;
223
224 rcd_sn = tls_ctx->tx.rec_seq;
225
226 down_read(&device_offload_lock);
227 netdev = tls_ctx->netdev;
228 if (netdev)
b5d9a834
DM
229 err = netdev->tlsdev_ops->tls_dev_resync(netdev, sk, seq,
230 rcd_sn,
231 TLS_OFFLOAD_CTX_DIR_TX);
50180074 232 up_read(&device_offload_lock);
b5d9a834
DM
233 if (err)
234 return;
50180074
JK
235
236 clear_bit_unlock(TLS_TX_SYNC_SCHED, &tls_ctx->flags);
237}
238
e8f69799
IL
239static void tls_append_frag(struct tls_record_info *record,
240 struct page_frag *pfrag,
241 int size)
242{
243 skb_frag_t *frag;
244
245 frag = &record->frags[record->num_frags - 1];
246 if (frag->page.p == pfrag->page &&
247 frag->page_offset + frag->size == pfrag->offset) {
248 frag->size += size;
249 } else {
250 ++frag;
251 frag->page.p = pfrag->page;
252 frag->page_offset = pfrag->offset;
253 frag->size = size;
254 ++record->num_frags;
255 get_page(pfrag->page);
256 }
257
258 pfrag->offset += size;
259 record->len += size;
260}
261
262static int tls_push_record(struct sock *sk,
263 struct tls_context *ctx,
d80a1b9d 264 struct tls_offload_context_tx *offload_ctx,
e8f69799
IL
265 struct tls_record_info *record,
266 struct page_frag *pfrag,
267 int flags,
268 unsigned char record_type)
269{
4509de14 270 struct tls_prot_info *prot = &ctx->prot_info;
e8f69799
IL
271 struct tcp_sock *tp = tcp_sk(sk);
272 struct page_frag dummy_tag_frag;
273 skb_frag_t *frag;
274 int i;
275
276 /* fill prepend */
277 frag = &record->frags[0];
278 tls_fill_prepend(ctx,
279 skb_frag_address(frag),
4509de14 280 record->len - prot->prepend_size,
130b392c 281 record_type,
9cd81988 282 prot->version);
e8f69799
IL
283
284 /* HW doesn't care about the data in the tag, because it fills it. */
285 dummy_tag_frag.page = skb_frag_page(frag);
286 dummy_tag_frag.offset = 0;
287
4509de14 288 tls_append_frag(record, &dummy_tag_frag, prot->tag_size);
e8f69799
IL
289 record->end_seq = tp->write_seq + record->len;
290 spin_lock_irq(&offload_ctx->lock);
291 list_add_tail(&record->list, &offload_ctx->records_list);
292 spin_unlock_irq(&offload_ctx->lock);
293 offload_ctx->open_record = NULL;
50180074
JK
294
295 if (test_bit(TLS_TX_SYNC_SCHED, &ctx->flags))
296 tls_device_resync_tx(sk, ctx, tp->write_seq);
297
fb0f886f 298 tls_advance_record_sn(sk, prot, &ctx->tx);
e8f69799
IL
299
300 for (i = 0; i < record->num_frags; i++) {
301 frag = &record->frags[i];
302 sg_unmark_end(&offload_ctx->sg_tx_data[i]);
303 sg_set_page(&offload_ctx->sg_tx_data[i], skb_frag_page(frag),
304 frag->size, frag->page_offset);
305 sk_mem_charge(sk, frag->size);
306 get_page(skb_frag_page(frag));
307 }
308 sg_mark_end(&offload_ctx->sg_tx_data[record->num_frags - 1]);
309
310 /* all ready, send */
311 return tls_push_sg(sk, ctx, offload_ctx->sg_tx_data, 0, flags);
312}
313
d80a1b9d 314static int tls_create_new_record(struct tls_offload_context_tx *offload_ctx,
e8f69799
IL
315 struct page_frag *pfrag,
316 size_t prepend_size)
317{
318 struct tls_record_info *record;
319 skb_frag_t *frag;
320
321 record = kmalloc(sizeof(*record), GFP_KERNEL);
322 if (!record)
323 return -ENOMEM;
324
325 frag = &record->frags[0];
326 __skb_frag_set_page(frag, pfrag->page);
327 frag->page_offset = pfrag->offset;
328 skb_frag_size_set(frag, prepend_size);
329
330 get_page(pfrag->page);
331 pfrag->offset += prepend_size;
332
333 record->num_frags = 1;
334 record->len = prepend_size;
335 offload_ctx->open_record = record;
336 return 0;
337}
338
339static int tls_do_allocation(struct sock *sk,
d80a1b9d 340 struct tls_offload_context_tx *offload_ctx,
e8f69799
IL
341 struct page_frag *pfrag,
342 size_t prepend_size)
343{
344 int ret;
345
346 if (!offload_ctx->open_record) {
347 if (unlikely(!skb_page_frag_refill(prepend_size, pfrag,
348 sk->sk_allocation))) {
349 sk->sk_prot->enter_memory_pressure(sk);
350 sk_stream_moderate_sndbuf(sk);
351 return -ENOMEM;
352 }
353
354 ret = tls_create_new_record(offload_ctx, pfrag, prepend_size);
355 if (ret)
356 return ret;
357
358 if (pfrag->size > pfrag->offset)
359 return 0;
360 }
361
362 if (!sk_page_frag_refill(sk, pfrag))
363 return -ENOMEM;
364
365 return 0;
366}
367
368static int tls_push_data(struct sock *sk,
369 struct iov_iter *msg_iter,
370 size_t size, int flags,
371 unsigned char record_type)
372{
373 struct tls_context *tls_ctx = tls_get_ctx(sk);
4509de14 374 struct tls_prot_info *prot = &tls_ctx->prot_info;
d80a1b9d 375 struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
e8f69799
IL
376 int tls_push_record_flags = flags | MSG_SENDPAGE_NOTLAST;
377 int more = flags & (MSG_SENDPAGE_NOTLAST | MSG_MORE);
378 struct tls_record_info *record = ctx->open_record;
379 struct page_frag *pfrag;
380 size_t orig_size = size;
381 u32 max_open_record_len;
382 int copy, rc = 0;
383 bool done = false;
384 long timeo;
385
386 if (flags &
387 ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL | MSG_SENDPAGE_NOTLAST))
388 return -ENOTSUPP;
389
390 if (sk->sk_err)
391 return -sk->sk_err;
392
393 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
94850257
BP
394 if (tls_is_partially_sent_record(tls_ctx)) {
395 rc = tls_push_partial_record(sk, tls_ctx, flags);
396 if (rc < 0)
397 return rc;
398 }
e8f69799
IL
399
400 pfrag = sk_page_frag(sk);
401
402 /* TLS_HEADER_SIZE is not counted as part of the TLS record, and
403 * we need to leave room for an authentication tag.
404 */
405 max_open_record_len = TLS_MAX_PAYLOAD_SIZE +
4509de14 406 prot->prepend_size;
e8f69799
IL
407 do {
408 rc = tls_do_allocation(sk, ctx, pfrag,
4509de14 409 prot->prepend_size);
e8f69799
IL
410 if (rc) {
411 rc = sk_stream_wait_memory(sk, &timeo);
412 if (!rc)
413 continue;
414
415 record = ctx->open_record;
416 if (!record)
417 break;
418handle_error:
419 if (record_type != TLS_RECORD_TYPE_DATA) {
420 /* avoid sending partial
421 * record with type !=
422 * application_data
423 */
424 size = orig_size;
425 destroy_record(record);
426 ctx->open_record = NULL;
4509de14 427 } else if (record->len > prot->prepend_size) {
e8f69799
IL
428 goto last_record;
429 }
430
431 break;
432 }
433
434 record = ctx->open_record;
435 copy = min_t(size_t, size, (pfrag->size - pfrag->offset));
436 copy = min_t(size_t, copy, (max_open_record_len - record->len));
437
438 if (copy_from_iter_nocache(page_address(pfrag->page) +
439 pfrag->offset,
440 copy, msg_iter) != copy) {
441 rc = -EFAULT;
442 goto handle_error;
443 }
444 tls_append_frag(record, pfrag, copy);
445
446 size -= copy;
447 if (!size) {
448last_record:
449 tls_push_record_flags = flags;
450 if (more) {
451 tls_ctx->pending_open_record_frags =
d829e9c4 452 !!record->num_frags;
e8f69799
IL
453 break;
454 }
455
456 done = true;
457 }
458
459 if (done || record->len >= max_open_record_len ||
460 (record->num_frags >= MAX_SKB_FRAGS - 1)) {
461 rc = tls_push_record(sk,
462 tls_ctx,
463 ctx,
464 record,
465 pfrag,
466 tls_push_record_flags,
467 record_type);
468 if (rc < 0)
469 break;
470 }
471 } while (!done);
472
473 if (orig_size - size > 0)
474 rc = orig_size - size;
475
476 return rc;
477}
478
479int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
480{
481 unsigned char record_type = TLS_RECORD_TYPE_DATA;
482 int rc;
483
484 lock_sock(sk);
485
486 if (unlikely(msg->msg_controllen)) {
487 rc = tls_proccess_cmsg(sk, msg, &record_type);
488 if (rc)
489 goto out;
490 }
491
492 rc = tls_push_data(sk, &msg->msg_iter, size,
493 msg->msg_flags, record_type);
494
495out:
496 release_sock(sk);
497 return rc;
498}
499
500int tls_device_sendpage(struct sock *sk, struct page *page,
501 int offset, size_t size, int flags)
502{
503 struct iov_iter msg_iter;
504 char *kaddr = kmap(page);
505 struct kvec iov;
506 int rc;
507
508 if (flags & MSG_SENDPAGE_NOTLAST)
509 flags |= MSG_MORE;
510
511 lock_sock(sk);
512
513 if (flags & MSG_OOB) {
514 rc = -ENOTSUPP;
515 goto out;
516 }
517
518 iov.iov_base = kaddr + offset;
519 iov.iov_len = size;
aa563d7b 520 iov_iter_kvec(&msg_iter, WRITE, &iov, 1, size);
e8f69799
IL
521 rc = tls_push_data(sk, &msg_iter, size,
522 flags, TLS_RECORD_TYPE_DATA);
523 kunmap(page);
524
525out:
526 release_sock(sk);
527 return rc;
528}
529
d80a1b9d 530struct tls_record_info *tls_get_record(struct tls_offload_context_tx *context,
e8f69799
IL
531 u32 seq, u64 *p_record_sn)
532{
533 u64 record_sn = context->hint_record_sn;
534 struct tls_record_info *info;
535
536 info = context->retransmit_hint;
537 if (!info ||
538 before(seq, info->end_seq - info->len)) {
539 /* if retransmit_hint is irrelevant start
540 * from the beggining of the list
541 */
542 info = list_first_entry(&context->records_list,
543 struct tls_record_info, list);
544 record_sn = context->unacked_record_sn;
545 }
546
547 list_for_each_entry_from(info, &context->records_list, list) {
548 if (before(seq, info->end_seq)) {
549 if (!context->retransmit_hint ||
550 after(info->end_seq,
551 context->retransmit_hint->end_seq)) {
552 context->hint_record_sn = record_sn;
553 context->retransmit_hint = info;
554 }
555 *p_record_sn = record_sn;
556 return info;
557 }
558 record_sn++;
559 }
560
561 return NULL;
562}
563EXPORT_SYMBOL(tls_get_record);
564
565static int tls_device_push_pending_record(struct sock *sk, int flags)
566{
567 struct iov_iter msg_iter;
568
aa563d7b 569 iov_iter_kvec(&msg_iter, WRITE, NULL, 0, 0);
e8f69799
IL
570 return tls_push_data(sk, &msg_iter, 0, flags, TLS_RECORD_TYPE_DATA);
571}
572
7463d3a2
BP
573void tls_device_write_space(struct sock *sk, struct tls_context *ctx)
574{
7463d3a2
BP
575 if (!sk->sk_write_pending && tls_is_partially_sent_record(ctx)) {
576 gfp_t sk_allocation = sk->sk_allocation;
577
578 sk->sk_allocation = GFP_ATOMIC;
88c80bee 579 tls_push_partial_record(sk, ctx, MSG_DONTWAIT | MSG_NOSIGNAL);
7463d3a2
BP
580 sk->sk_allocation = sk_allocation;
581 }
7463d3a2
BP
582}
583
e52972c1 584static void tls_device_resync_rx(struct tls_context *tls_ctx,
89fec474 585 struct sock *sk, u32 seq, u8 *rcd_sn)
e52972c1
JK
586{
587 struct net_device *netdev;
588
589 if (WARN_ON(test_and_set_bit(TLS_RX_SYNC_RUNNING, &tls_ctx->flags)))
590 return;
591 netdev = READ_ONCE(tls_ctx->netdev);
592 if (netdev)
eeb2efaf
JK
593 netdev->tlsdev_ops->tls_dev_resync(netdev, sk, seq, rcd_sn,
594 TLS_OFFLOAD_CTX_DIR_RX);
e52972c1
JK
595 clear_bit_unlock(TLS_RX_SYNC_RUNNING, &tls_ctx->flags);
596}
597
f953d33b 598void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq)
4799ac81
BP
599{
600 struct tls_context *tls_ctx = tls_get_ctx(sk);
4799ac81 601 struct tls_offload_context_rx *rx_ctx;
f953d33b
JK
602 u8 rcd_sn[TLS_MAX_REC_SEQ_SIZE];
603 struct tls_prot_info *prot;
4799ac81
BP
604 u32 is_req_pending;
605 s64 resync_req;
606 u32 req_seq;
607
608 if (tls_ctx->rx_conf != TLS_HW)
609 return;
610
f953d33b 611 prot = &tls_ctx->prot_info;
4799ac81 612 rx_ctx = tls_offload_ctx_rx(tls_ctx);
f953d33b
JK
613 memcpy(rcd_sn, tls_ctx->rx.rec_seq, prot->rec_seq_size);
614
615 switch (rx_ctx->resync_type) {
616 case TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ:
617 resync_req = atomic64_read(&rx_ctx->resync_req);
618 req_seq = resync_req >> 32;
619 seq += TLS_HEADER_SIZE - 1;
620 is_req_pending = resync_req;
621
622 if (likely(!is_req_pending) || req_seq != seq ||
623 !atomic64_try_cmpxchg(&rx_ctx->resync_req, &resync_req, 0))
624 return;
625 break;
626 case TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT:
627 if (likely(!rx_ctx->resync_nh_do_now))
628 return;
629
630 /* head of next rec is already in, note that the sock_inq will
631 * include the currently parsed message when called from parser
632 */
633 if (tcp_inq(sk) > rcd_len)
634 return;
635
636 rx_ctx->resync_nh_do_now = 0;
637 seq += rcd_len;
638 tls_bigint_increment(rcd_sn, prot->rec_seq_size);
639 break;
640 }
641
642 tls_device_resync_rx(tls_ctx, sk, seq, rcd_sn);
643}
644
645static void tls_device_core_ctrl_rx_resync(struct tls_context *tls_ctx,
646 struct tls_offload_context_rx *ctx,
647 struct sock *sk, struct sk_buff *skb)
648{
649 struct strp_msg *rxm;
650
651 /* device will request resyncs by itself based on stream scan */
652 if (ctx->resync_type != TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT)
653 return;
654 /* already scheduled */
655 if (ctx->resync_nh_do_now)
656 return;
657 /* seen decrypted fragments since last fully-failed record */
658 if (ctx->resync_nh_reset) {
659 ctx->resync_nh_reset = 0;
660 ctx->resync_nh.decrypted_failed = 1;
661 ctx->resync_nh.decrypted_tgt = TLS_DEVICE_RESYNC_NH_START_IVAL;
662 return;
663 }
664
665 if (++ctx->resync_nh.decrypted_failed <= ctx->resync_nh.decrypted_tgt)
666 return;
667
668 /* doing resync, bump the next target in case it fails */
669 if (ctx->resync_nh.decrypted_tgt < TLS_DEVICE_RESYNC_NH_MAX_IVAL)
670 ctx->resync_nh.decrypted_tgt *= 2;
671 else
672 ctx->resync_nh.decrypted_tgt += TLS_DEVICE_RESYNC_NH_MAX_IVAL;
673
674 rxm = strp_msg(skb);
675
676 /* head of next rec is already in, parser will sync for us */
677 if (tcp_inq(sk) > rxm->full_len) {
678 ctx->resync_nh_do_now = 1;
679 } else {
680 struct tls_prot_info *prot = &tls_ctx->prot_info;
681 u8 rcd_sn[TLS_MAX_REC_SEQ_SIZE];
682
683 memcpy(rcd_sn, tls_ctx->rx.rec_seq, prot->rec_seq_size);
684 tls_bigint_increment(rcd_sn, prot->rec_seq_size);
685
686 tls_device_resync_rx(tls_ctx, sk, tcp_sk(sk)->copied_seq,
687 rcd_sn);
688 }
4799ac81
BP
689}
690
691static int tls_device_reencrypt(struct sock *sk, struct sk_buff *skb)
692{
693 struct strp_msg *rxm = strp_msg(skb);
eb3d38d5 694 int err = 0, offset = rxm->offset, copy, nsg, data_len, pos;
4799ac81
BP
695 struct sk_buff *skb_iter, *unused;
696 struct scatterlist sg[1];
697 char *orig_buf, *buf;
698
699 orig_buf = kmalloc(rxm->full_len + TLS_HEADER_SIZE +
700 TLS_CIPHER_AES_GCM_128_IV_SIZE, sk->sk_allocation);
701 if (!orig_buf)
702 return -ENOMEM;
703 buf = orig_buf;
704
705 nsg = skb_cow_data(skb, 0, &unused);
706 if (unlikely(nsg < 0)) {
707 err = nsg;
708 goto free_buf;
709 }
710
711 sg_init_table(sg, 1);
712 sg_set_buf(&sg[0], buf,
713 rxm->full_len + TLS_HEADER_SIZE +
714 TLS_CIPHER_AES_GCM_128_IV_SIZE);
aeb11ff0
JK
715 err = skb_copy_bits(skb, offset, buf,
716 TLS_HEADER_SIZE + TLS_CIPHER_AES_GCM_128_IV_SIZE);
717 if (err)
718 goto free_buf;
4799ac81
BP
719
720 /* We are interested only in the decrypted data not the auth */
721 err = decrypt_skb(sk, skb, sg);
722 if (err != -EBADMSG)
723 goto free_buf;
724 else
725 err = 0;
726
eb3d38d5 727 data_len = rxm->full_len - TLS_CIPHER_AES_GCM_128_TAG_SIZE;
4799ac81 728
97e1caa5 729 if (skb_pagelen(skb) > offset) {
eb3d38d5 730 copy = min_t(int, skb_pagelen(skb) - offset, data_len);
4799ac81 731
aeb11ff0
JK
732 if (skb->decrypted) {
733 err = skb_store_bits(skb, offset, buf, copy);
734 if (err)
735 goto free_buf;
736 }
4799ac81 737
97e1caa5
JK
738 offset += copy;
739 buf += copy;
740 }
4799ac81 741
eb3d38d5 742 pos = skb_pagelen(skb);
4799ac81 743 skb_walk_frags(skb, skb_iter) {
eb3d38d5
JK
744 int frag_pos;
745
746 /* Practically all frags must belong to msg if reencrypt
747 * is needed with current strparser and coalescing logic,
748 * but strparser may "get optimized", so let's be safe.
749 */
750 if (pos + skb_iter->len <= offset)
751 goto done_with_frag;
752 if (pos >= data_len + rxm->offset)
753 break;
754
755 frag_pos = offset - pos;
756 copy = min_t(int, skb_iter->len - frag_pos,
757 data_len + rxm->offset - offset);
4799ac81 758
aeb11ff0
JK
759 if (skb_iter->decrypted) {
760 err = skb_store_bits(skb_iter, frag_pos, buf, copy);
761 if (err)
762 goto free_buf;
763 }
4799ac81
BP
764
765 offset += copy;
766 buf += copy;
eb3d38d5
JK
767done_with_frag:
768 pos += skb_iter->len;
4799ac81
BP
769 }
770
771free_buf:
772 kfree(orig_buf);
773 return err;
774}
775
776int tls_device_decrypted(struct sock *sk, struct sk_buff *skb)
777{
778 struct tls_context *tls_ctx = tls_get_ctx(sk);
779 struct tls_offload_context_rx *ctx = tls_offload_ctx_rx(tls_ctx);
780 int is_decrypted = skb->decrypted;
781 int is_encrypted = !is_decrypted;
782 struct sk_buff *skb_iter;
783
4799ac81
BP
784 /* Check if all the data is decrypted already */
785 skb_walk_frags(skb, skb_iter) {
786 is_decrypted &= skb_iter->decrypted;
787 is_encrypted &= !skb_iter->decrypted;
788 }
789
790 ctx->sw.decrypted |= is_decrypted;
791
f953d33b 792 /* Return immediately if the record is either entirely plaintext or
4799ac81
BP
793 * entirely ciphertext. Otherwise handle reencrypt partially decrypted
794 * record.
795 */
f953d33b
JK
796 if (is_decrypted) {
797 ctx->resync_nh_reset = 1;
798 return 0;
799 }
800 if (is_encrypted) {
801 tls_device_core_ctrl_rx_resync(tls_ctx, ctx, sk, skb);
802 return 0;
803 }
804
805 ctx->resync_nh_reset = 1;
806 return tls_device_reencrypt(sk, skb);
4799ac81
BP
807}
808
9e995797
JK
809static void tls_device_attach(struct tls_context *ctx, struct sock *sk,
810 struct net_device *netdev)
811{
812 if (sk->sk_destruct != tls_device_sk_destruct) {
813 refcount_set(&ctx->refcount, 1);
814 dev_hold(netdev);
815 ctx->netdev = netdev;
816 spin_lock_irq(&tls_device_lock);
817 list_add_tail(&ctx->list, &tls_device_list);
818 spin_unlock_irq(&tls_device_lock);
819
820 ctx->sk_destruct = sk->sk_destruct;
821 sk->sk_destruct = tls_device_sk_destruct;
822 }
823}
824
e8f69799
IL
825int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
826{
827 u16 nonce_size, tag_size, iv_size, rec_seq_size;
4509de14
VG
828 struct tls_context *tls_ctx = tls_get_ctx(sk);
829 struct tls_prot_info *prot = &tls_ctx->prot_info;
e8f69799 830 struct tls_record_info *start_marker_record;
d80a1b9d 831 struct tls_offload_context_tx *offload_ctx;
e8f69799
IL
832 struct tls_crypto_info *crypto_info;
833 struct net_device *netdev;
834 char *iv, *rec_seq;
835 struct sk_buff *skb;
836 int rc = -EINVAL;
837 __be64 rcd_sn;
838
839 if (!ctx)
840 goto out;
841
842 if (ctx->priv_ctx_tx) {
843 rc = -EEXIST;
844 goto out;
845 }
846
847 start_marker_record = kmalloc(sizeof(*start_marker_record), GFP_KERNEL);
848 if (!start_marker_record) {
849 rc = -ENOMEM;
850 goto out;
851 }
852
d80a1b9d 853 offload_ctx = kzalloc(TLS_OFFLOAD_CONTEXT_SIZE_TX, GFP_KERNEL);
e8f69799
IL
854 if (!offload_ctx) {
855 rc = -ENOMEM;
856 goto free_marker_record;
857 }
858
86029d10 859 crypto_info = &ctx->crypto_send.info;
618bac45
JK
860 if (crypto_info->version != TLS_1_2_VERSION) {
861 rc = -EOPNOTSUPP;
862 goto free_offload_ctx;
863 }
864
e8f69799
IL
865 switch (crypto_info->cipher_type) {
866 case TLS_CIPHER_AES_GCM_128:
867 nonce_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
868 tag_size = TLS_CIPHER_AES_GCM_128_TAG_SIZE;
869 iv_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
870 iv = ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->iv;
871 rec_seq_size = TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE;
872 rec_seq =
873 ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->rec_seq;
874 break;
875 default:
876 rc = -EINVAL;
877 goto free_offload_ctx;
878 }
879
89fec474
JK
880 /* Sanity-check the rec_seq_size for stack allocations */
881 if (rec_seq_size > TLS_MAX_REC_SEQ_SIZE) {
882 rc = -EINVAL;
883 goto free_offload_ctx;
884 }
885
ab232e61
JK
886 prot->version = crypto_info->version;
887 prot->cipher_type = crypto_info->cipher_type;
4509de14
VG
888 prot->prepend_size = TLS_HEADER_SIZE + nonce_size;
889 prot->tag_size = tag_size;
890 prot->overhead_size = prot->prepend_size + prot->tag_size;
891 prot->iv_size = iv_size;
e8f69799
IL
892 ctx->tx.iv = kmalloc(iv_size + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
893 GFP_KERNEL);
894 if (!ctx->tx.iv) {
895 rc = -ENOMEM;
896 goto free_offload_ctx;
897 }
898
899 memcpy(ctx->tx.iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, iv, iv_size);
900
4509de14 901 prot->rec_seq_size = rec_seq_size;
969d5090 902 ctx->tx.rec_seq = kmemdup(rec_seq, rec_seq_size, GFP_KERNEL);
e8f69799
IL
903 if (!ctx->tx.rec_seq) {
904 rc = -ENOMEM;
905 goto free_iv;
906 }
e8f69799
IL
907
908 rc = tls_sw_fallback_init(sk, offload_ctx, crypto_info);
909 if (rc)
910 goto free_rec_seq;
911
912 /* start at rec_seq - 1 to account for the start marker record */
913 memcpy(&rcd_sn, ctx->tx.rec_seq, sizeof(rcd_sn));
914 offload_ctx->unacked_record_sn = be64_to_cpu(rcd_sn) - 1;
915
916 start_marker_record->end_seq = tcp_sk(sk)->write_seq;
917 start_marker_record->len = 0;
918 start_marker_record->num_frags = 0;
919
920 INIT_LIST_HEAD(&offload_ctx->records_list);
921 list_add_tail(&start_marker_record->list, &offload_ctx->records_list);
922 spin_lock_init(&offload_ctx->lock);
895262d8
BP
923 sg_init_table(offload_ctx->sg_tx_data,
924 ARRAY_SIZE(offload_ctx->sg_tx_data));
e8f69799
IL
925
926 clean_acked_data_enable(inet_csk(sk), &tls_icsk_clean_acked);
927 ctx->push_pending_record = tls_device_push_pending_record;
e8f69799
IL
928
929 /* TLS offload is greatly simplified if we don't send
930 * SKBs where only part of the payload needs to be encrypted.
931 * So mark the last skb in the write queue as end of record.
932 */
933 skb = tcp_write_queue_tail(sk);
934 if (skb)
935 TCP_SKB_CB(skb)->eor = 1;
936
e8f69799
IL
937 /* We support starting offload on multiple sockets
938 * concurrently, so we only need a read lock here.
939 * This lock must precede get_netdev_for_sock to prevent races between
940 * NETDEV_DOWN and setsockopt.
941 */
942 down_read(&device_offload_lock);
943 netdev = get_netdev_for_sock(sk);
944 if (!netdev) {
945 pr_err_ratelimited("%s: netdev not found\n", __func__);
946 rc = -EINVAL;
947 goto release_lock;
948 }
949
950 if (!(netdev->features & NETIF_F_HW_TLS_TX)) {
951 rc = -ENOTSUPP;
952 goto release_netdev;
953 }
954
955 /* Avoid offloading if the device is down
956 * We don't want to offload new flows after
957 * the NETDEV_DOWN event
958 */
959 if (!(netdev->flags & IFF_UP)) {
960 rc = -EINVAL;
961 goto release_netdev;
962 }
963
964 ctx->priv_ctx_tx = offload_ctx;
965 rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_TX,
86029d10 966 &ctx->crypto_send.info,
e8f69799
IL
967 tcp_sk(sk)->write_seq);
968 if (rc)
969 goto release_netdev;
970
4799ac81 971 tls_device_attach(ctx, sk, netdev);
e8f69799 972
e8f69799
IL
973 /* following this assignment tls_is_sk_tx_device_offloaded
974 * will return true and the context might be accessed
975 * by the netdev's xmit function.
976 */
4799ac81
BP
977 smp_store_release(&sk->sk_validate_xmit_skb, tls_validate_xmit_skb);
978 dev_put(netdev);
e8f69799
IL
979 up_read(&device_offload_lock);
980 goto out;
981
982release_netdev:
983 dev_put(netdev);
984release_lock:
985 up_read(&device_offload_lock);
986 clean_acked_data_disable(inet_csk(sk));
987 crypto_free_aead(offload_ctx->aead_send);
988free_rec_seq:
989 kfree(ctx->tx.rec_seq);
990free_iv:
991 kfree(ctx->tx.iv);
992free_offload_ctx:
993 kfree(offload_ctx);
994 ctx->priv_ctx_tx = NULL;
995free_marker_record:
996 kfree(start_marker_record);
997out:
998 return rc;
999}
1000
4799ac81
BP
1001int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx)
1002{
1003 struct tls_offload_context_rx *context;
1004 struct net_device *netdev;
1005 int rc = 0;
1006
618bac45
JK
1007 if (ctx->crypto_recv.info.version != TLS_1_2_VERSION)
1008 return -EOPNOTSUPP;
1009
4799ac81
BP
1010 /* We support starting offload on multiple sockets
1011 * concurrently, so we only need a read lock here.
1012 * This lock must precede get_netdev_for_sock to prevent races between
1013 * NETDEV_DOWN and setsockopt.
1014 */
1015 down_read(&device_offload_lock);
1016 netdev = get_netdev_for_sock(sk);
1017 if (!netdev) {
1018 pr_err_ratelimited("%s: netdev not found\n", __func__);
1019 rc = -EINVAL;
1020 goto release_lock;
1021 }
1022
1023 if (!(netdev->features & NETIF_F_HW_TLS_RX)) {
4799ac81
BP
1024 rc = -ENOTSUPP;
1025 goto release_netdev;
1026 }
1027
1028 /* Avoid offloading if the device is down
1029 * We don't want to offload new flows after
1030 * the NETDEV_DOWN event
1031 */
1032 if (!(netdev->flags & IFF_UP)) {
1033 rc = -EINVAL;
1034 goto release_netdev;
1035 }
1036
1037 context = kzalloc(TLS_OFFLOAD_CONTEXT_SIZE_RX, GFP_KERNEL);
1038 if (!context) {
1039 rc = -ENOMEM;
1040 goto release_netdev;
1041 }
f953d33b 1042 context->resync_nh_reset = 1;
4799ac81
BP
1043
1044 ctx->priv_ctx_rx = context;
1045 rc = tls_set_sw_offload(sk, ctx, 0);
1046 if (rc)
1047 goto release_ctx;
1048
1049 rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_RX,
86029d10 1050 &ctx->crypto_recv.info,
4799ac81 1051 tcp_sk(sk)->copied_seq);
e49d268d 1052 if (rc)
4799ac81 1053 goto free_sw_resources;
4799ac81
BP
1054
1055 tls_device_attach(ctx, sk, netdev);
1056 goto release_netdev;
1057
1058free_sw_resources:
62ef81d5 1059 up_read(&device_offload_lock);
4799ac81 1060 tls_sw_free_resources_rx(sk);
62ef81d5 1061 down_read(&device_offload_lock);
4799ac81
BP
1062release_ctx:
1063 ctx->priv_ctx_rx = NULL;
1064release_netdev:
1065 dev_put(netdev);
1066release_lock:
1067 up_read(&device_offload_lock);
1068 return rc;
1069}
1070
1071void tls_device_offload_cleanup_rx(struct sock *sk)
1072{
1073 struct tls_context *tls_ctx = tls_get_ctx(sk);
1074 struct net_device *netdev;
1075
1076 down_read(&device_offload_lock);
1077 netdev = tls_ctx->netdev;
1078 if (!netdev)
1079 goto out;
1080
4799ac81
BP
1081 netdev->tlsdev_ops->tls_dev_del(netdev, tls_ctx,
1082 TLS_OFFLOAD_CTX_DIR_RX);
1083
1084 if (tls_ctx->tx_conf != TLS_HW) {
1085 dev_put(netdev);
1086 tls_ctx->netdev = NULL;
1087 }
1088out:
1089 up_read(&device_offload_lock);
4799ac81
BP
1090 tls_sw_release_resources_rx(sk);
1091}
1092
e8f69799
IL
1093static int tls_device_down(struct net_device *netdev)
1094{
1095 struct tls_context *ctx, *tmp;
1096 unsigned long flags;
1097 LIST_HEAD(list);
1098
1099 /* Request a write lock to block new offload attempts */
1100 down_write(&device_offload_lock);
1101
1102 spin_lock_irqsave(&tls_device_lock, flags);
1103 list_for_each_entry_safe(ctx, tmp, &tls_device_list, list) {
1104 if (ctx->netdev != netdev ||
1105 !refcount_inc_not_zero(&ctx->refcount))
1106 continue;
1107
1108 list_move(&ctx->list, &list);
1109 }
1110 spin_unlock_irqrestore(&tls_device_lock, flags);
1111
1112 list_for_each_entry_safe(ctx, tmp, &list, list) {
4799ac81
BP
1113 if (ctx->tx_conf == TLS_HW)
1114 netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
1115 TLS_OFFLOAD_CTX_DIR_TX);
1116 if (ctx->rx_conf == TLS_HW)
1117 netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
1118 TLS_OFFLOAD_CTX_DIR_RX);
e52972c1
JK
1119 WRITE_ONCE(ctx->netdev, NULL);
1120 smp_mb__before_atomic(); /* pairs with test_and_set_bit() */
1121 while (test_bit(TLS_RX_SYNC_RUNNING, &ctx->flags))
1122 usleep_range(10, 200);
e8f69799
IL
1123 dev_put(netdev);
1124 list_del_init(&ctx->list);
1125
1126 if (refcount_dec_and_test(&ctx->refcount))
1127 tls_device_free_ctx(ctx);
1128 }
1129
1130 up_write(&device_offload_lock);
1131
1132 flush_work(&tls_device_gc_work);
1133
1134 return NOTIFY_DONE;
1135}
1136
1137static int tls_dev_event(struct notifier_block *this, unsigned long event,
1138 void *ptr)
1139{
1140 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1141
c3f4a6c3
JK
1142 if (!dev->tlsdev_ops &&
1143 !(dev->features & (NETIF_F_HW_TLS_RX | NETIF_F_HW_TLS_TX)))
e8f69799
IL
1144 return NOTIFY_DONE;
1145
1146 switch (event) {
1147 case NETDEV_REGISTER:
1148 case NETDEV_FEAT_CHANGE:
4799ac81 1149 if ((dev->features & NETIF_F_HW_TLS_RX) &&
eeb2efaf 1150 !dev->tlsdev_ops->tls_dev_resync)
4799ac81
BP
1151 return NOTIFY_BAD;
1152
e8f69799
IL
1153 if (dev->tlsdev_ops &&
1154 dev->tlsdev_ops->tls_dev_add &&
1155 dev->tlsdev_ops->tls_dev_del)
1156 return NOTIFY_DONE;
1157 else
1158 return NOTIFY_BAD;
1159 case NETDEV_DOWN:
1160 return tls_device_down(dev);
1161 }
1162 return NOTIFY_DONE;
1163}
1164
1165static struct notifier_block tls_dev_notifier = {
1166 .notifier_call = tls_dev_event,
1167};
1168
1169void __init tls_device_init(void)
1170{
1171 register_netdevice_notifier(&tls_dev_notifier);
1172}
1173
1174void __exit tls_device_cleanup(void)
1175{
1176 unregister_netdevice_notifier(&tls_dev_notifier);
1177 flush_work(&tls_device_gc_work);
494bc1d2 1178 clean_acked_data_flush();
e8f69799 1179}