2 * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #ifndef _TLS_OFFLOAD_H
35 #define _TLS_OFFLOAD_H
37 #include <linux/types.h>
38 #include <asm/byteorder.h>
39 #include <linux/crypto.h>
40 #include <linux/socket.h>
41 #include <linux/tcp.h>
42 #include <linux/skmsg.h>
43 #include <linux/netdevice.h>
46 #include <net/strparser.h>
47 #include <crypto/aead.h>
48 #include <uapi/linux/tls.h>
51 /* Maximum data size carried in a TLS record */
52 #define TLS_MAX_PAYLOAD_SIZE ((size_t)1 << 14)
54 #define TLS_HEADER_SIZE 5
55 #define TLS_NONCE_OFFSET TLS_HEADER_SIZE
57 #define TLS_CRYPTO_INFO_READY(info) ((info)->cipher_type)
59 #define TLS_RECORD_TYPE_DATA 0x17
61 #define TLS_AAD_SPACE_SIZE 13
62 #define TLS_DEVICE_NAME_MAX 32
64 #define MAX_IV_SIZE 16
65 #define TLS_MAX_REC_SEQ_SIZE 8
67 /* For AES-CCM, the full 16-bytes of IV is made of '4' fields of given sizes.
69 * IV[16] = b0[1] || implicit nonce[4] || explicit nonce[8] || length[3]
71 * The field 'length' is encoded in field 'b0' as '(length width - 1)'.
72 * Hence b0 contains (3 - 1) = 2.
74 #define TLS_AES_CCM_IV_B0_BYTE 2
77 * This structure defines the routines for Inline TLS driver.
78 * The following routines are optional and filled with a
79 * null pointer if not defined.
81 * @name: Its the name of registered Inline tls device
82 * @dev_list: Inline tls device list
83 * int (*feature)(struct tls_device *device);
84 * Called to return Inline TLS driver capability
86 * int (*hash)(struct tls_device *device, struct sock *sk);
87 * This function sets Inline driver for listen and program
88 * device specific functioanlity as required
90 * void (*unhash)(struct tls_device *device, struct sock *sk);
91 * This function cleans listen state set by Inline TLS driver
93 * void (*release)(struct kref *kref);
94 * Release the registered device and allocated resources
95 * @kref: Number of reference to tls_device
98 char name
[TLS_DEVICE_NAME_MAX
];
99 struct list_head dev_list
;
100 int (*feature
)(struct tls_device
*device
);
101 int (*hash
)(struct tls_device
*device
, struct sock
*sk
);
102 void (*unhash
)(struct tls_device
*device
, struct sock
*sk
);
103 void (*release
)(struct kref
*kref
);
115 /* TLS records are maintained in 'struct tls_rec'. It stores the memory pages
116 * allocated or mapped for each TLS record. After encryption, the records are
117 * stores in a linked list.
120 struct list_head list
;
125 struct sk_msg msg_plaintext
;
126 struct sk_msg msg_encrypted
;
128 /* AAD | msg_plaintext.sg.data | sg_tag */
129 struct scatterlist sg_aead_in
[2];
130 /* AAD | msg_encrypted.sg.data (data contains overhead for hdr & iv & tag) */
131 struct scatterlist sg_aead_out
[2];
134 struct scatterlist sg_content_type
;
136 char aad_space
[TLS_AAD_SPACE_SIZE
];
137 u8 iv_data
[MAX_IV_SIZE
];
138 struct aead_request aead_req
;
148 struct delayed_work work
;
152 struct tls_sw_context_tx
{
153 struct crypto_aead
*aead_send
;
154 struct crypto_wait async_wait
;
155 struct tx_work tx_work
;
156 struct tls_rec
*open_rec
;
157 struct list_head tx_list
;
158 atomic_t encrypt_pending
;
162 #define BIT_TX_SCHEDULED 0
163 #define BIT_TX_CLOSING 1
164 unsigned long tx_bitmask
;
167 struct tls_sw_context_rx
{
168 struct crypto_aead
*aead_recv
;
169 struct crypto_wait async_wait
;
170 struct strparser strp
;
171 struct sk_buff_head rx_list
; /* list of decrypted 'data' records */
172 void (*saved_data_ready
)(struct sock
*sk
);
174 struct sk_buff
*recv_pkt
;
178 atomic_t decrypt_pending
;
182 struct tls_record_info
{
183 struct list_head list
;
187 skb_frag_t frags
[MAX_SKB_FRAGS
];
190 struct tls_offload_context_tx
{
191 struct crypto_aead
*aead_send
;
192 spinlock_t lock
; /* protects records list */
193 struct list_head records_list
;
194 struct tls_record_info
*open_record
;
195 struct tls_record_info
*retransmit_hint
;
197 u64 unacked_record_sn
;
199 struct scatterlist sg_tx_data
[MAX_SKB_FRAGS
];
200 void (*sk_destruct
)(struct sock
*sk
);
201 u8 driver_state
[] __aligned(8);
202 /* The TLS layer reserves room for driver specific state
203 * Currently the belief is that there is not enough
204 * driver specific state to justify another layer of indirection
206 #define TLS_DRIVER_STATE_SIZE_TX 16
209 #define TLS_OFFLOAD_CONTEXT_SIZE_TX \
210 (sizeof(struct tls_offload_context_tx) + TLS_DRIVER_STATE_SIZE_TX)
212 enum tls_context_flags
{
213 TLS_RX_SYNC_RUNNING
= 0,
214 /* Unlike RX where resync is driven entirely by the core in TX only
215 * the driver knows when things went out of sync, so we need the flag
218 TLS_TX_SYNC_SCHED
= 1,
221 struct cipher_context
{
226 union tls_crypto_context
{
227 struct tls_crypto_info info
;
229 struct tls12_crypto_info_aes_gcm_128 aes_gcm_128
;
230 struct tls12_crypto_info_aes_gcm_256 aes_gcm_256
;
234 struct tls_prot_info
{
248 /* read-only cache line */
249 struct tls_prot_info prot_info
;
254 int (*push_pending_record
)(struct sock
*sk
, int flags
);
255 void (*sk_write_space
)(struct sock
*sk
);
260 struct net_device
*netdev
;
263 struct cipher_context tx
;
264 struct cipher_context rx
;
266 struct scatterlist
*partially_sent_record
;
267 u16 partially_sent_offset
;
269 bool in_tcp_sendpages
;
270 bool pending_open_record_frags
;
273 /* cache cold stuff */
274 struct proto
*sk_proto
;
276 void (*sk_destruct
)(struct sock
*sk
);
277 void (*sk_proto_close
)(struct sock
*sk
, long timeout
);
279 int (*setsockopt
)(struct sock
*sk
, int level
,
280 int optname
, char __user
*optval
,
281 unsigned int optlen
);
282 int (*getsockopt
)(struct sock
*sk
, int level
,
283 int optname
, char __user
*optval
,
285 int (*hash
)(struct sock
*sk
);
286 void (*unhash
)(struct sock
*sk
);
288 union tls_crypto_context crypto_send
;
289 union tls_crypto_context crypto_recv
;
291 struct list_head list
;
295 enum tls_offload_ctx_dir
{
296 TLS_OFFLOAD_CTX_DIR_RX
,
297 TLS_OFFLOAD_CTX_DIR_TX
,
301 int (*tls_dev_add
)(struct net_device
*netdev
, struct sock
*sk
,
302 enum tls_offload_ctx_dir direction
,
303 struct tls_crypto_info
*crypto_info
,
304 u32 start_offload_tcp_sn
);
305 void (*tls_dev_del
)(struct net_device
*netdev
,
306 struct tls_context
*ctx
,
307 enum tls_offload_ctx_dir direction
);
308 int (*tls_dev_resync
)(struct net_device
*netdev
,
309 struct sock
*sk
, u32 seq
, u8
*rcd_sn
,
310 enum tls_offload_ctx_dir direction
);
313 enum tls_offload_sync_type
{
314 TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ
= 0,
315 TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT
= 1,
318 #define TLS_DEVICE_RESYNC_NH_START_IVAL 2
319 #define TLS_DEVICE_RESYNC_NH_MAX_IVAL 128
321 struct tls_offload_context_rx
{
322 /* sw must be the first member of tls_offload_context_rx */
323 struct tls_sw_context_rx sw
;
324 enum tls_offload_sync_type resync_type
;
325 /* this member is set regardless of resync_type, to avoid branches */
326 u8 resync_nh_reset
:1;
327 /* CORE_NEXT_HINT-only member, but use the hole here */
328 u8 resync_nh_do_now
:1;
330 /* TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ */
332 atomic64_t resync_req
;
334 /* TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT */
336 u32 decrypted_failed
;
340 u8 driver_state
[] __aligned(8);
341 /* The TLS layer reserves room for driver specific state
342 * Currently the belief is that there is not enough
343 * driver specific state to justify another layer of indirection
345 #define TLS_DRIVER_STATE_SIZE_RX 8
348 #define TLS_OFFLOAD_CONTEXT_SIZE_RX \
349 (sizeof(struct tls_offload_context_rx) + TLS_DRIVER_STATE_SIZE_RX)
351 void tls_ctx_free(struct tls_context
*ctx
);
352 int wait_on_pending_writer(struct sock
*sk
, long *timeo
);
353 int tls_sk_query(struct sock
*sk
, int optname
, char __user
*optval
,
355 int tls_sk_attach(struct sock
*sk
, int optname
, char __user
*optval
,
356 unsigned int optlen
);
358 int tls_set_sw_offload(struct sock
*sk
, struct tls_context
*ctx
, int tx
);
359 void tls_sw_strparser_arm(struct sock
*sk
, struct tls_context
*ctx
);
360 void tls_sw_strparser_done(struct tls_context
*tls_ctx
);
361 int tls_sw_sendmsg(struct sock
*sk
, struct msghdr
*msg
, size_t size
);
362 int tls_sw_sendpage(struct sock
*sk
, struct page
*page
,
363 int offset
, size_t size
, int flags
);
364 void tls_sw_cancel_work_tx(struct tls_context
*tls_ctx
);
365 void tls_sw_release_resources_tx(struct sock
*sk
);
366 void tls_sw_free_ctx_tx(struct tls_context
*tls_ctx
);
367 void tls_sw_free_resources_rx(struct sock
*sk
);
368 void tls_sw_release_resources_rx(struct sock
*sk
);
369 void tls_sw_free_ctx_rx(struct tls_context
*tls_ctx
);
370 int tls_sw_recvmsg(struct sock
*sk
, struct msghdr
*msg
, size_t len
,
371 int nonblock
, int flags
, int *addr_len
);
372 bool tls_sw_stream_read(const struct sock
*sk
);
373 ssize_t
tls_sw_splice_read(struct socket
*sock
, loff_t
*ppos
,
374 struct pipe_inode_info
*pipe
,
375 size_t len
, unsigned int flags
);
377 int tls_set_device_offload(struct sock
*sk
, struct tls_context
*ctx
);
378 int tls_device_sendmsg(struct sock
*sk
, struct msghdr
*msg
, size_t size
);
379 int tls_device_sendpage(struct sock
*sk
, struct page
*page
,
380 int offset
, size_t size
, int flags
);
381 void tls_device_free_resources_tx(struct sock
*sk
);
382 void tls_device_init(void);
383 void tls_device_cleanup(void);
384 int tls_tx_records(struct sock
*sk
, int flags
);
386 struct tls_record_info
*tls_get_record(struct tls_offload_context_tx
*context
,
387 u32 seq
, u64
*p_record_sn
);
389 static inline bool tls_record_is_start_marker(struct tls_record_info
*rec
)
391 return rec
->len
== 0;
394 static inline u32
tls_record_start_seq(struct tls_record_info
*rec
)
396 return rec
->end_seq
- rec
->len
;
399 int tls_push_sg(struct sock
*sk
, struct tls_context
*ctx
,
400 struct scatterlist
*sg
, u16 first_offset
,
402 int tls_push_partial_record(struct sock
*sk
, struct tls_context
*ctx
,
404 bool tls_free_partial_record(struct sock
*sk
, struct tls_context
*ctx
);
406 static inline struct tls_msg
*tls_msg(struct sk_buff
*skb
)
408 return (struct tls_msg
*)strp_msg(skb
);
411 static inline bool tls_is_partially_sent_record(struct tls_context
*ctx
)
413 return !!ctx
->partially_sent_record
;
416 static inline bool tls_is_pending_open_record(struct tls_context
*tls_ctx
)
418 return tls_ctx
->pending_open_record_frags
;
421 static inline bool is_tx_ready(struct tls_sw_context_tx
*ctx
)
425 rec
= list_first_entry(&ctx
->tx_list
, struct tls_rec
, list
);
429 return READ_ONCE(rec
->tx_ready
);
433 tls_validate_xmit_skb(struct sock
*sk
, struct net_device
*dev
,
434 struct sk_buff
*skb
);
436 static inline bool tls_is_sk_tx_device_offloaded(struct sock
*sk
)
438 #ifdef CONFIG_SOCK_VALIDATE_XMIT
439 return sk_fullsock(sk
) &&
440 (smp_load_acquire(&sk
->sk_validate_xmit_skb
) ==
441 &tls_validate_xmit_skb
);
447 static inline void tls_err_abort(struct sock
*sk
, int err
)
450 sk
->sk_error_report(sk
);
453 static inline bool tls_bigint_increment(unsigned char *seq
, int len
)
457 for (i
= len
- 1; i
>= 0; i
--) {
466 static inline struct tls_context
*tls_get_ctx(const struct sock
*sk
)
468 struct inet_connection_sock
*icsk
= inet_csk(sk
);
470 return icsk
->icsk_ulp_data
;
473 static inline void tls_advance_record_sn(struct sock
*sk
,
474 struct tls_prot_info
*prot
,
475 struct cipher_context
*ctx
)
477 if (tls_bigint_increment(ctx
->rec_seq
, prot
->rec_seq_size
))
478 tls_err_abort(sk
, EBADMSG
);
480 if (prot
->version
!= TLS_1_3_VERSION
)
481 tls_bigint_increment(ctx
->iv
+ TLS_CIPHER_AES_GCM_128_SALT_SIZE
,
485 static inline void tls_fill_prepend(struct tls_context
*ctx
,
487 size_t plaintext_len
,
488 unsigned char record_type
,
491 struct tls_prot_info
*prot
= &ctx
->prot_info
;
492 size_t pkt_len
, iv_size
= prot
->iv_size
;
494 pkt_len
= plaintext_len
+ prot
->tag_size
;
495 if (version
!= TLS_1_3_VERSION
) {
498 memcpy(buf
+ TLS_NONCE_OFFSET
,
499 ctx
->tx
.iv
+ TLS_CIPHER_AES_GCM_128_SALT_SIZE
, iv_size
);
502 /* we cover nonce explicit here as well, so buf should be of
503 * size KTLS_DTLS_HEADER_SIZE + KTLS_DTLS_NONCE_EXPLICIT_SIZE
505 buf
[0] = version
== TLS_1_3_VERSION
?
506 TLS_RECORD_TYPE_DATA
: record_type
;
507 /* Note that VERSION must be TLS_1_2 for both TLS1.2 and TLS1.3 */
508 buf
[1] = TLS_1_2_VERSION_MINOR
;
509 buf
[2] = TLS_1_2_VERSION_MAJOR
;
510 /* we can use IV for nonce explicit according to spec */
511 buf
[3] = pkt_len
>> 8;
512 buf
[4] = pkt_len
& 0xFF;
515 static inline void tls_make_aad(char *buf
,
517 char *record_sequence
,
518 int record_sequence_size
,
519 unsigned char record_type
,
522 if (version
!= TLS_1_3_VERSION
) {
523 memcpy(buf
, record_sequence
, record_sequence_size
);
526 size
+= TLS_CIPHER_AES_GCM_128_TAG_SIZE
;
529 buf
[0] = version
== TLS_1_3_VERSION
?
530 TLS_RECORD_TYPE_DATA
: record_type
;
531 buf
[1] = TLS_1_2_VERSION_MAJOR
;
532 buf
[2] = TLS_1_2_VERSION_MINOR
;
534 buf
[4] = size
& 0xFF;
537 static inline void xor_iv_with_seq(int version
, char *iv
, char *seq
)
541 if (version
== TLS_1_3_VERSION
) {
542 for (i
= 0; i
< 8; i
++)
548 static inline struct tls_sw_context_rx
*tls_sw_ctx_rx(
549 const struct tls_context
*tls_ctx
)
551 return (struct tls_sw_context_rx
*)tls_ctx
->priv_ctx_rx
;
554 static inline struct tls_sw_context_tx
*tls_sw_ctx_tx(
555 const struct tls_context
*tls_ctx
)
557 return (struct tls_sw_context_tx
*)tls_ctx
->priv_ctx_tx
;
560 static inline struct tls_offload_context_tx
*
561 tls_offload_ctx_tx(const struct tls_context
*tls_ctx
)
563 return (struct tls_offload_context_tx
*)tls_ctx
->priv_ctx_tx
;
566 static inline bool tls_sw_has_ctx_tx(const struct sock
*sk
)
568 struct tls_context
*ctx
= tls_get_ctx(sk
);
572 return !!tls_sw_ctx_tx(ctx
);
575 void tls_sw_write_space(struct sock
*sk
, struct tls_context
*ctx
);
576 void tls_device_write_space(struct sock
*sk
, struct tls_context
*ctx
);
578 static inline struct tls_offload_context_rx
*
579 tls_offload_ctx_rx(const struct tls_context
*tls_ctx
)
581 return (struct tls_offload_context_rx
*)tls_ctx
->priv_ctx_rx
;
584 #if IS_ENABLED(CONFIG_TLS_DEVICE)
585 static inline void *__tls_driver_ctx(struct tls_context
*tls_ctx
,
586 enum tls_offload_ctx_dir direction
)
588 if (direction
== TLS_OFFLOAD_CTX_DIR_TX
)
589 return tls_offload_ctx_tx(tls_ctx
)->driver_state
;
591 return tls_offload_ctx_rx(tls_ctx
)->driver_state
;
595 tls_driver_ctx(const struct sock
*sk
, enum tls_offload_ctx_dir direction
)
597 return __tls_driver_ctx(tls_get_ctx(sk
), direction
);
601 /* The TLS context is valid until sk_destruct is called */
602 static inline void tls_offload_rx_resync_request(struct sock
*sk
, __be32 seq
)
604 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
605 struct tls_offload_context_rx
*rx_ctx
= tls_offload_ctx_rx(tls_ctx
);
607 atomic64_set(&rx_ctx
->resync_req
, ((u64
)ntohl(seq
) << 32) | 1);
611 tls_offload_rx_resync_set_type(struct sock
*sk
, enum tls_offload_sync_type type
)
613 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
615 tls_offload_ctx_rx(tls_ctx
)->resync_type
= type
;
618 static inline void tls_offload_tx_resync_request(struct sock
*sk
)
620 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
622 WARN_ON(test_and_set_bit(TLS_TX_SYNC_SCHED
, &tls_ctx
->flags
));
625 /* Driver's seq tracking has to be disabled until resync succeeded */
626 static inline bool tls_offload_tx_resync_pending(struct sock
*sk
)
628 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
631 ret
= test_bit(TLS_TX_SYNC_SCHED
, &tls_ctx
->flags
);
632 smp_mb__after_atomic();
636 int tls_proccess_cmsg(struct sock
*sk
, struct msghdr
*msg
,
637 unsigned char *record_type
);
638 void tls_register_device(struct tls_device
*device
);
639 void tls_unregister_device(struct tls_device
*device
);
640 int tls_device_decrypted(struct sock
*sk
, struct sk_buff
*skb
);
641 int decrypt_skb(struct sock
*sk
, struct sk_buff
*skb
,
642 struct scatterlist
*sgout
);
643 struct sk_buff
*tls_encrypt_skb(struct sk_buff
*skb
);
645 struct sk_buff
*tls_validate_xmit_skb(struct sock
*sk
,
646 struct net_device
*dev
,
647 struct sk_buff
*skb
);
649 int tls_sw_fallback_init(struct sock
*sk
,
650 struct tls_offload_context_tx
*offload_ctx
,
651 struct tls_crypto_info
*crypto_info
);
653 int tls_set_device_offload_rx(struct sock
*sk
, struct tls_context
*ctx
);
655 void tls_device_offload_cleanup_rx(struct sock
*sk
);
656 void tls_device_rx_resync_new_rec(struct sock
*sk
, u32 rcd_len
, u32 seq
);
658 #endif /* _TLS_OFFLOAD_H */