2 * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/module.h>
37 #include <net/inet_common.h>
38 #include <linux/highmem.h>
39 #include <linux/netdevice.h>
40 #include <linux/sched/signal.h>
41 #include <linux/inetdevice.h>
42 #include <linux/inet_diag.h>
46 #include <net/tls_toe.h>
48 MODULE_AUTHOR("Mellanox Technologies");
49 MODULE_DESCRIPTION("Transport Layer Security Support");
50 MODULE_LICENSE("Dual BSD/GPL");
51 MODULE_ALIAS_TCP_ULP("tls");
59 static const struct proto
*saved_tcpv6_prot
;
60 static DEFINE_MUTEX(tcpv6_prot_mutex
);
61 static const struct proto
*saved_tcpv4_prot
;
62 static DEFINE_MUTEX(tcpv4_prot_mutex
);
63 static struct proto tls_prots
[TLS_NUM_PROTS
][TLS_NUM_CONFIG
][TLS_NUM_CONFIG
];
64 static struct proto_ops tls_proto_ops
[TLS_NUM_PROTS
][TLS_NUM_CONFIG
][TLS_NUM_CONFIG
];
65 static void build_protos(struct proto prot
[TLS_NUM_CONFIG
][TLS_NUM_CONFIG
],
66 const struct proto
*base
);
68 void update_sk_prot(struct sock
*sk
, struct tls_context
*ctx
)
70 int ip_ver
= sk
->sk_family
== AF_INET6
? TLSV6
: TLSV4
;
72 WRITE_ONCE(sk
->sk_prot
,
73 &tls_prots
[ip_ver
][ctx
->tx_conf
][ctx
->rx_conf
]);
74 WRITE_ONCE(sk
->sk_socket
->ops
,
75 &tls_proto_ops
[ip_ver
][ctx
->tx_conf
][ctx
->rx_conf
]);
78 int wait_on_pending_writer(struct sock
*sk
, long *timeo
)
81 DEFINE_WAIT_FUNC(wait
, woken_wake_function
);
83 add_wait_queue(sk_sleep(sk
), &wait
);
90 if (signal_pending(current
)) {
91 rc
= sock_intr_errno(*timeo
);
95 if (sk_wait_event(sk
, timeo
, !sk
->sk_write_pending
, &wait
))
98 remove_wait_queue(sk_sleep(sk
), &wait
);
102 int tls_push_sg(struct sock
*sk
,
103 struct tls_context
*ctx
,
104 struct scatterlist
*sg
,
108 int sendpage_flags
= flags
| MSG_SENDPAGE_NOTLAST
;
112 int offset
= first_offset
;
114 size
= sg
->length
- offset
;
115 offset
+= sg
->offset
;
117 ctx
->in_tcp_sendpages
= true;
120 sendpage_flags
= flags
;
122 /* is sending application-limited? */
123 tcp_rate_check_app_limited(sk
);
126 ret
= do_tcp_sendpages(sk
, p
, offset
, size
, sendpage_flags
);
135 offset
-= sg
->offset
;
136 ctx
->partially_sent_offset
= offset
;
137 ctx
->partially_sent_record
= (void *)sg
;
138 ctx
->in_tcp_sendpages
= false;
143 sk_mem_uncharge(sk
, sg
->length
);
152 ctx
->in_tcp_sendpages
= false;
157 static int tls_handle_open_record(struct sock
*sk
, int flags
)
159 struct tls_context
*ctx
= tls_get_ctx(sk
);
161 if (tls_is_pending_open_record(ctx
))
162 return ctx
->push_pending_record(sk
, flags
);
167 int tls_proccess_cmsg(struct sock
*sk
, struct msghdr
*msg
,
168 unsigned char *record_type
)
170 struct cmsghdr
*cmsg
;
173 for_each_cmsghdr(cmsg
, msg
) {
174 if (!CMSG_OK(msg
, cmsg
))
176 if (cmsg
->cmsg_level
!= SOL_TLS
)
179 switch (cmsg
->cmsg_type
) {
180 case TLS_SET_RECORD_TYPE
:
181 if (cmsg
->cmsg_len
< CMSG_LEN(sizeof(*record_type
)))
184 if (msg
->msg_flags
& MSG_MORE
)
187 rc
= tls_handle_open_record(sk
, msg
->msg_flags
);
191 *record_type
= *(unsigned char *)CMSG_DATA(cmsg
);
202 int tls_push_partial_record(struct sock
*sk
, struct tls_context
*ctx
,
205 struct scatterlist
*sg
;
208 sg
= ctx
->partially_sent_record
;
209 offset
= ctx
->partially_sent_offset
;
211 ctx
->partially_sent_record
= NULL
;
212 return tls_push_sg(sk
, ctx
, sg
, offset
, flags
);
215 void tls_free_partial_record(struct sock
*sk
, struct tls_context
*ctx
)
217 struct scatterlist
*sg
;
219 for (sg
= ctx
->partially_sent_record
; sg
; sg
= sg_next(sg
)) {
220 put_page(sg_page(sg
));
221 sk_mem_uncharge(sk
, sg
->length
);
223 ctx
->partially_sent_record
= NULL
;
226 static void tls_write_space(struct sock
*sk
)
228 struct tls_context
*ctx
= tls_get_ctx(sk
);
230 /* If in_tcp_sendpages call lower protocol write space handler
231 * to ensure we wake up any waiting operations there. For example
232 * if do_tcp_sendpages where to call sk_wait_event.
234 if (ctx
->in_tcp_sendpages
) {
235 ctx
->sk_write_space(sk
);
239 #ifdef CONFIG_TLS_DEVICE
240 if (ctx
->tx_conf
== TLS_HW
)
241 tls_device_write_space(sk
, ctx
);
244 tls_sw_write_space(sk
, ctx
);
246 ctx
->sk_write_space(sk
);
250 * tls_ctx_free() - free TLS ULP context
251 * @sk: socket to with @ctx is attached
252 * @ctx: TLS context structure
254 * Free TLS context. If @sk is %NULL caller guarantees that the socket
255 * to which @ctx was attached has no outstanding references.
257 void tls_ctx_free(struct sock
*sk
, struct tls_context
*ctx
)
262 memzero_explicit(&ctx
->crypto_send
, sizeof(ctx
->crypto_send
));
263 memzero_explicit(&ctx
->crypto_recv
, sizeof(ctx
->crypto_recv
));
264 mutex_destroy(&ctx
->tx_lock
);
272 static void tls_sk_proto_cleanup(struct sock
*sk
,
273 struct tls_context
*ctx
, long timeo
)
275 if (unlikely(sk
->sk_write_pending
) &&
276 !wait_on_pending_writer(sk
, &timeo
))
277 tls_handle_open_record(sk
, 0);
279 /* We need these for tls_sw_fallback handling of other packets */
280 if (ctx
->tx_conf
== TLS_SW
) {
281 kfree(ctx
->tx
.rec_seq
);
283 tls_sw_release_resources_tx(sk
);
284 TLS_DEC_STATS(sock_net(sk
), LINUX_MIB_TLSCURRTXSW
);
285 } else if (ctx
->tx_conf
== TLS_HW
) {
286 tls_device_free_resources_tx(sk
);
287 TLS_DEC_STATS(sock_net(sk
), LINUX_MIB_TLSCURRTXDEVICE
);
290 if (ctx
->rx_conf
== TLS_SW
) {
291 tls_sw_release_resources_rx(sk
);
292 TLS_DEC_STATS(sock_net(sk
), LINUX_MIB_TLSCURRRXSW
);
293 } else if (ctx
->rx_conf
== TLS_HW
) {
294 tls_device_offload_cleanup_rx(sk
);
295 TLS_DEC_STATS(sock_net(sk
), LINUX_MIB_TLSCURRRXDEVICE
);
299 static void tls_sk_proto_close(struct sock
*sk
, long timeout
)
301 struct inet_connection_sock
*icsk
= inet_csk(sk
);
302 struct tls_context
*ctx
= tls_get_ctx(sk
);
303 long timeo
= sock_sndtimeo(sk
, 0);
306 if (ctx
->tx_conf
== TLS_SW
)
307 tls_sw_cancel_work_tx(ctx
);
310 free_ctx
= ctx
->tx_conf
!= TLS_HW
&& ctx
->rx_conf
!= TLS_HW
;
312 if (ctx
->tx_conf
!= TLS_BASE
|| ctx
->rx_conf
!= TLS_BASE
)
313 tls_sk_proto_cleanup(sk
, ctx
, timeo
);
315 write_lock_bh(&sk
->sk_callback_lock
);
317 rcu_assign_pointer(icsk
->icsk_ulp_data
, NULL
);
318 WRITE_ONCE(sk
->sk_prot
, ctx
->sk_proto
);
319 if (sk
->sk_write_space
== tls_write_space
)
320 sk
->sk_write_space
= ctx
->sk_write_space
;
321 write_unlock_bh(&sk
->sk_callback_lock
);
323 if (ctx
->tx_conf
== TLS_SW
)
324 tls_sw_free_ctx_tx(ctx
);
325 if (ctx
->rx_conf
== TLS_SW
|| ctx
->rx_conf
== TLS_HW
)
326 tls_sw_strparser_done(ctx
);
327 if (ctx
->rx_conf
== TLS_SW
)
328 tls_sw_free_ctx_rx(ctx
);
329 ctx
->sk_proto
->close(sk
, timeout
);
332 tls_ctx_free(sk
, ctx
);
335 static int do_tls_getsockopt_conf(struct sock
*sk
, char __user
*optval
,
336 int __user
*optlen
, int tx
)
339 struct tls_context
*ctx
= tls_get_ctx(sk
);
340 struct tls_crypto_info
*crypto_info
;
341 struct cipher_context
*cctx
;
344 if (get_user(len
, optlen
))
347 if (!optval
|| (len
< sizeof(*crypto_info
))) {
357 /* get user crypto info */
359 crypto_info
= &ctx
->crypto_send
.info
;
362 crypto_info
= &ctx
->crypto_recv
.info
;
366 if (!TLS_CRYPTO_INFO_READY(crypto_info
)) {
371 if (len
== sizeof(*crypto_info
)) {
372 if (copy_to_user(optval
, crypto_info
, sizeof(*crypto_info
)))
377 switch (crypto_info
->cipher_type
) {
378 case TLS_CIPHER_AES_GCM_128
: {
379 struct tls12_crypto_info_aes_gcm_128
*
380 crypto_info_aes_gcm_128
=
381 container_of(crypto_info
,
382 struct tls12_crypto_info_aes_gcm_128
,
385 if (len
!= sizeof(*crypto_info_aes_gcm_128
)) {
390 memcpy(crypto_info_aes_gcm_128
->iv
,
391 cctx
->iv
+ TLS_CIPHER_AES_GCM_128_SALT_SIZE
,
392 TLS_CIPHER_AES_GCM_128_IV_SIZE
);
393 memcpy(crypto_info_aes_gcm_128
->rec_seq
, cctx
->rec_seq
,
394 TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE
);
396 if (copy_to_user(optval
,
397 crypto_info_aes_gcm_128
,
398 sizeof(*crypto_info_aes_gcm_128
)))
402 case TLS_CIPHER_AES_GCM_256
: {
403 struct tls12_crypto_info_aes_gcm_256
*
404 crypto_info_aes_gcm_256
=
405 container_of(crypto_info
,
406 struct tls12_crypto_info_aes_gcm_256
,
409 if (len
!= sizeof(*crypto_info_aes_gcm_256
)) {
414 memcpy(crypto_info_aes_gcm_256
->iv
,
415 cctx
->iv
+ TLS_CIPHER_AES_GCM_256_SALT_SIZE
,
416 TLS_CIPHER_AES_GCM_256_IV_SIZE
);
417 memcpy(crypto_info_aes_gcm_256
->rec_seq
, cctx
->rec_seq
,
418 TLS_CIPHER_AES_GCM_256_REC_SEQ_SIZE
);
420 if (copy_to_user(optval
,
421 crypto_info_aes_gcm_256
,
422 sizeof(*crypto_info_aes_gcm_256
)))
434 static int do_tls_getsockopt(struct sock
*sk
, int optname
,
435 char __user
*optval
, int __user
*optlen
)
442 rc
= do_tls_getsockopt_conf(sk
, optval
, optlen
,
452 static int tls_getsockopt(struct sock
*sk
, int level
, int optname
,
453 char __user
*optval
, int __user
*optlen
)
455 struct tls_context
*ctx
= tls_get_ctx(sk
);
457 if (level
!= SOL_TLS
)
458 return ctx
->sk_proto
->getsockopt(sk
, level
,
459 optname
, optval
, optlen
);
461 return do_tls_getsockopt(sk
, optname
, optval
, optlen
);
464 static int do_tls_setsockopt_conf(struct sock
*sk
, sockptr_t optval
,
465 unsigned int optlen
, int tx
)
467 struct tls_crypto_info
*crypto_info
;
468 struct tls_crypto_info
*alt_crypto_info
;
469 struct tls_context
*ctx
= tls_get_ctx(sk
);
474 if (sockptr_is_null(optval
) || (optlen
< sizeof(*crypto_info
))) {
480 crypto_info
= &ctx
->crypto_send
.info
;
481 alt_crypto_info
= &ctx
->crypto_recv
.info
;
483 crypto_info
= &ctx
->crypto_recv
.info
;
484 alt_crypto_info
= &ctx
->crypto_send
.info
;
487 /* Currently we don't support set crypto info more than one time */
488 if (TLS_CRYPTO_INFO_READY(crypto_info
)) {
493 rc
= copy_from_sockptr(crypto_info
, optval
, sizeof(*crypto_info
));
496 goto err_crypto_info
;
500 if (crypto_info
->version
!= TLS_1_2_VERSION
&&
501 crypto_info
->version
!= TLS_1_3_VERSION
) {
503 goto err_crypto_info
;
506 /* Ensure that TLS version and ciphers are same in both directions */
507 if (TLS_CRYPTO_INFO_READY(alt_crypto_info
)) {
508 if (alt_crypto_info
->version
!= crypto_info
->version
||
509 alt_crypto_info
->cipher_type
!= crypto_info
->cipher_type
) {
511 goto err_crypto_info
;
515 switch (crypto_info
->cipher_type
) {
516 case TLS_CIPHER_AES_GCM_128
:
517 optsize
= sizeof(struct tls12_crypto_info_aes_gcm_128
);
519 case TLS_CIPHER_AES_GCM_256
: {
520 optsize
= sizeof(struct tls12_crypto_info_aes_gcm_256
);
523 case TLS_CIPHER_AES_CCM_128
:
524 optsize
= sizeof(struct tls12_crypto_info_aes_ccm_128
);
526 case TLS_CIPHER_CHACHA20_POLY1305
:
527 optsize
= sizeof(struct tls12_crypto_info_chacha20_poly1305
);
531 goto err_crypto_info
;
534 if (optlen
!= optsize
) {
536 goto err_crypto_info
;
539 rc
= copy_from_sockptr_offset(crypto_info
+ 1, optval
,
540 sizeof(*crypto_info
),
541 optlen
- sizeof(*crypto_info
));
544 goto err_crypto_info
;
548 rc
= tls_set_device_offload(sk
, ctx
);
551 TLS_INC_STATS(sock_net(sk
), LINUX_MIB_TLSTXDEVICE
);
552 TLS_INC_STATS(sock_net(sk
), LINUX_MIB_TLSCURRTXDEVICE
);
554 rc
= tls_set_sw_offload(sk
, ctx
, 1);
556 goto err_crypto_info
;
557 TLS_INC_STATS(sock_net(sk
), LINUX_MIB_TLSTXSW
);
558 TLS_INC_STATS(sock_net(sk
), LINUX_MIB_TLSCURRTXSW
);
562 rc
= tls_set_device_offload_rx(sk
, ctx
);
565 TLS_INC_STATS(sock_net(sk
), LINUX_MIB_TLSRXDEVICE
);
566 TLS_INC_STATS(sock_net(sk
), LINUX_MIB_TLSCURRRXDEVICE
);
568 rc
= tls_set_sw_offload(sk
, ctx
, 0);
570 goto err_crypto_info
;
571 TLS_INC_STATS(sock_net(sk
), LINUX_MIB_TLSRXSW
);
572 TLS_INC_STATS(sock_net(sk
), LINUX_MIB_TLSCURRRXSW
);
575 tls_sw_strparser_arm(sk
, ctx
);
582 update_sk_prot(sk
, ctx
);
584 ctx
->sk_write_space
= sk
->sk_write_space
;
585 sk
->sk_write_space
= tls_write_space
;
590 memzero_explicit(crypto_info
, sizeof(union tls_crypto_context
));
595 static int do_tls_setsockopt(struct sock
*sk
, int optname
, sockptr_t optval
,
604 rc
= do_tls_setsockopt_conf(sk
, optval
, optlen
,
615 static int tls_setsockopt(struct sock
*sk
, int level
, int optname
,
616 sockptr_t optval
, unsigned int optlen
)
618 struct tls_context
*ctx
= tls_get_ctx(sk
);
620 if (level
!= SOL_TLS
)
621 return ctx
->sk_proto
->setsockopt(sk
, level
, optname
, optval
,
624 return do_tls_setsockopt(sk
, optname
, optval
, optlen
);
627 struct tls_context
*tls_ctx_create(struct sock
*sk
)
629 struct inet_connection_sock
*icsk
= inet_csk(sk
);
630 struct tls_context
*ctx
;
632 ctx
= kzalloc(sizeof(*ctx
), GFP_ATOMIC
);
636 mutex_init(&ctx
->tx_lock
);
637 rcu_assign_pointer(icsk
->icsk_ulp_data
, ctx
);
638 ctx
->sk_proto
= READ_ONCE(sk
->sk_prot
);
643 static void build_proto_ops(struct proto_ops ops
[TLS_NUM_CONFIG
][TLS_NUM_CONFIG
],
644 const struct proto_ops
*base
)
646 ops
[TLS_BASE
][TLS_BASE
] = *base
;
648 ops
[TLS_SW
][TLS_BASE
] = ops
[TLS_BASE
][TLS_BASE
];
649 ops
[TLS_SW
][TLS_BASE
].sendpage_locked
= tls_sw_sendpage_locked
;
651 ops
[TLS_BASE
][TLS_SW
] = ops
[TLS_BASE
][TLS_BASE
];
652 ops
[TLS_BASE
][TLS_SW
].splice_read
= tls_sw_splice_read
;
654 ops
[TLS_SW
][TLS_SW
] = ops
[TLS_SW
][TLS_BASE
];
655 ops
[TLS_SW
][TLS_SW
].splice_read
= tls_sw_splice_read
;
657 #ifdef CONFIG_TLS_DEVICE
658 ops
[TLS_HW
][TLS_BASE
] = ops
[TLS_BASE
][TLS_BASE
];
659 ops
[TLS_HW
][TLS_BASE
].sendpage_locked
= NULL
;
661 ops
[TLS_HW
][TLS_SW
] = ops
[TLS_BASE
][TLS_SW
];
662 ops
[TLS_HW
][TLS_SW
].sendpage_locked
= NULL
;
664 ops
[TLS_BASE
][TLS_HW
] = ops
[TLS_BASE
][TLS_SW
];
666 ops
[TLS_SW
][TLS_HW
] = ops
[TLS_SW
][TLS_SW
];
668 ops
[TLS_HW
][TLS_HW
] = ops
[TLS_HW
][TLS_SW
];
669 ops
[TLS_HW
][TLS_HW
].sendpage_locked
= NULL
;
671 #ifdef CONFIG_TLS_TOE
672 ops
[TLS_HW_RECORD
][TLS_HW_RECORD
] = *base
;
676 static void tls_build_proto(struct sock
*sk
)
678 int ip_ver
= sk
->sk_family
== AF_INET6
? TLSV6
: TLSV4
;
679 struct proto
*prot
= READ_ONCE(sk
->sk_prot
);
681 /* Build IPv6 TLS whenever the address of tcpv6 _prot changes */
682 if (ip_ver
== TLSV6
&&
683 unlikely(prot
!= smp_load_acquire(&saved_tcpv6_prot
))) {
684 mutex_lock(&tcpv6_prot_mutex
);
685 if (likely(prot
!= saved_tcpv6_prot
)) {
686 build_protos(tls_prots
[TLSV6
], prot
);
687 build_proto_ops(tls_proto_ops
[TLSV6
],
689 smp_store_release(&saved_tcpv6_prot
, prot
);
691 mutex_unlock(&tcpv6_prot_mutex
);
694 if (ip_ver
== TLSV4
&&
695 unlikely(prot
!= smp_load_acquire(&saved_tcpv4_prot
))) {
696 mutex_lock(&tcpv4_prot_mutex
);
697 if (likely(prot
!= saved_tcpv4_prot
)) {
698 build_protos(tls_prots
[TLSV4
], prot
);
699 build_proto_ops(tls_proto_ops
[TLSV4
],
701 smp_store_release(&saved_tcpv4_prot
, prot
);
703 mutex_unlock(&tcpv4_prot_mutex
);
707 static void build_protos(struct proto prot
[TLS_NUM_CONFIG
][TLS_NUM_CONFIG
],
708 const struct proto
*base
)
710 prot
[TLS_BASE
][TLS_BASE
] = *base
;
711 prot
[TLS_BASE
][TLS_BASE
].setsockopt
= tls_setsockopt
;
712 prot
[TLS_BASE
][TLS_BASE
].getsockopt
= tls_getsockopt
;
713 prot
[TLS_BASE
][TLS_BASE
].close
= tls_sk_proto_close
;
715 prot
[TLS_SW
][TLS_BASE
] = prot
[TLS_BASE
][TLS_BASE
];
716 prot
[TLS_SW
][TLS_BASE
].sendmsg
= tls_sw_sendmsg
;
717 prot
[TLS_SW
][TLS_BASE
].sendpage
= tls_sw_sendpage
;
719 prot
[TLS_BASE
][TLS_SW
] = prot
[TLS_BASE
][TLS_BASE
];
720 prot
[TLS_BASE
][TLS_SW
].recvmsg
= tls_sw_recvmsg
;
721 prot
[TLS_BASE
][TLS_SW
].sock_is_readable
= tls_sw_sock_is_readable
;
722 prot
[TLS_BASE
][TLS_SW
].close
= tls_sk_proto_close
;
724 prot
[TLS_SW
][TLS_SW
] = prot
[TLS_SW
][TLS_BASE
];
725 prot
[TLS_SW
][TLS_SW
].recvmsg
= tls_sw_recvmsg
;
726 prot
[TLS_SW
][TLS_SW
].sock_is_readable
= tls_sw_sock_is_readable
;
727 prot
[TLS_SW
][TLS_SW
].close
= tls_sk_proto_close
;
729 #ifdef CONFIG_TLS_DEVICE
730 prot
[TLS_HW
][TLS_BASE
] = prot
[TLS_BASE
][TLS_BASE
];
731 prot
[TLS_HW
][TLS_BASE
].sendmsg
= tls_device_sendmsg
;
732 prot
[TLS_HW
][TLS_BASE
].sendpage
= tls_device_sendpage
;
734 prot
[TLS_HW
][TLS_SW
] = prot
[TLS_BASE
][TLS_SW
];
735 prot
[TLS_HW
][TLS_SW
].sendmsg
= tls_device_sendmsg
;
736 prot
[TLS_HW
][TLS_SW
].sendpage
= tls_device_sendpage
;
738 prot
[TLS_BASE
][TLS_HW
] = prot
[TLS_BASE
][TLS_SW
];
740 prot
[TLS_SW
][TLS_HW
] = prot
[TLS_SW
][TLS_SW
];
742 prot
[TLS_HW
][TLS_HW
] = prot
[TLS_HW
][TLS_SW
];
744 #ifdef CONFIG_TLS_TOE
745 prot
[TLS_HW_RECORD
][TLS_HW_RECORD
] = *base
;
746 prot
[TLS_HW_RECORD
][TLS_HW_RECORD
].hash
= tls_toe_hash
;
747 prot
[TLS_HW_RECORD
][TLS_HW_RECORD
].unhash
= tls_toe_unhash
;
751 static int tls_init(struct sock
*sk
)
753 struct tls_context
*ctx
;
758 #ifdef CONFIG_TLS_TOE
759 if (tls_toe_bypass(sk
))
763 /* The TLS ulp is currently supported only for TCP sockets
764 * in ESTABLISHED state.
765 * Supporting sockets in LISTEN state will require us
766 * to modify the accept implementation to clone rather then
767 * share the ulp context.
769 if (sk
->sk_state
!= TCP_ESTABLISHED
)
772 /* allocate tls context */
773 write_lock_bh(&sk
->sk_callback_lock
);
774 ctx
= tls_ctx_create(sk
);
780 ctx
->tx_conf
= TLS_BASE
;
781 ctx
->rx_conf
= TLS_BASE
;
782 update_sk_prot(sk
, ctx
);
784 write_unlock_bh(&sk
->sk_callback_lock
);
788 static void tls_update(struct sock
*sk
, struct proto
*p
,
789 void (*write_space
)(struct sock
*sk
))
791 struct tls_context
*ctx
;
793 ctx
= tls_get_ctx(sk
);
795 ctx
->sk_write_space
= write_space
;
798 /* Pairs with lockless read in sk_clone_lock(). */
799 WRITE_ONCE(sk
->sk_prot
, p
);
800 sk
->sk_write_space
= write_space
;
804 static int tls_get_info(const struct sock
*sk
, struct sk_buff
*skb
)
806 u16 version
, cipher_type
;
807 struct tls_context
*ctx
;
808 struct nlattr
*start
;
811 start
= nla_nest_start_noflag(skb
, INET_ULP_INFO_TLS
);
816 ctx
= rcu_dereference(inet_csk(sk
)->icsk_ulp_data
);
821 version
= ctx
->prot_info
.version
;
823 err
= nla_put_u16(skb
, TLS_INFO_VERSION
, version
);
827 cipher_type
= ctx
->prot_info
.cipher_type
;
829 err
= nla_put_u16(skb
, TLS_INFO_CIPHER
, cipher_type
);
833 err
= nla_put_u16(skb
, TLS_INFO_TXCONF
, tls_user_config(ctx
, true));
837 err
= nla_put_u16(skb
, TLS_INFO_RXCONF
, tls_user_config(ctx
, false));
842 nla_nest_end(skb
, start
);
847 nla_nest_cancel(skb
, start
);
851 static size_t tls_get_info_size(const struct sock
*sk
)
855 size
+= nla_total_size(0) + /* INET_ULP_INFO_TLS */
856 nla_total_size(sizeof(u16
)) + /* TLS_INFO_VERSION */
857 nla_total_size(sizeof(u16
)) + /* TLS_INFO_CIPHER */
858 nla_total_size(sizeof(u16
)) + /* TLS_INFO_RXCONF */
859 nla_total_size(sizeof(u16
)) + /* TLS_INFO_TXCONF */
865 static int __net_init
tls_init_net(struct net
*net
)
869 net
->mib
.tls_statistics
= alloc_percpu(struct linux_tls_mib
);
870 if (!net
->mib
.tls_statistics
)
873 err
= tls_proc_init(net
);
879 free_percpu(net
->mib
.tls_statistics
);
883 static void __net_exit
tls_exit_net(struct net
*net
)
886 free_percpu(net
->mib
.tls_statistics
);
889 static struct pernet_operations tls_proc_ops
= {
890 .init
= tls_init_net
,
891 .exit
= tls_exit_net
,
894 static struct tcp_ulp_ops tcp_tls_ulp_ops __read_mostly
= {
896 .owner
= THIS_MODULE
,
898 .update
= tls_update
,
899 .get_info
= tls_get_info
,
900 .get_info_size
= tls_get_info_size
,
903 static int __init
tls_register(void)
907 err
= register_pernet_subsys(&tls_proc_ops
);
912 tcp_register_ulp(&tcp_tls_ulp_ops
);
917 static void __exit
tls_unregister(void)
919 tcp_unregister_ulp(&tcp_tls_ulp_ops
);
920 tls_device_cleanup();
921 unregister_pernet_subsys(&tls_proc_ops
);
924 module_init(tls_register
);
925 module_exit(tls_unregister
);