2 * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #ifndef _TLS_OFFLOAD_H
35 #define _TLS_OFFLOAD_H
37 #include <linux/types.h>
38 #include <asm/byteorder.h>
39 #include <linux/crypto.h>
40 #include <linux/socket.h>
41 #include <linux/tcp.h>
42 #include <linux/skmsg.h>
45 #include <net/strparser.h>
46 #include <crypto/aead.h>
47 #include <uapi/linux/tls.h>
50 /* Maximum data size carried in a TLS record */
51 #define TLS_MAX_PAYLOAD_SIZE ((size_t)1 << 14)
53 #define TLS_HEADER_SIZE 5
54 #define TLS_NONCE_OFFSET TLS_HEADER_SIZE
56 #define TLS_CRYPTO_INFO_READY(info) ((info)->cipher_type)
58 #define TLS_RECORD_TYPE_DATA 0x17
60 #define TLS_AAD_SPACE_SIZE 13
61 #define TLS_DEVICE_NAME_MAX 32
64 * This structure defines the routines for Inline TLS driver.
65 * The following routines are optional and filled with a
66 * null pointer if not defined.
68 * @name: Its the name of registered Inline tls device
69 * @dev_list: Inline tls device list
70 * int (*feature)(struct tls_device *device);
71 * Called to return Inline TLS driver capability
73 * int (*hash)(struct tls_device *device, struct sock *sk);
74 * This function sets Inline driver for listen and program
75 * device specific functioanlity as required
77 * void (*unhash)(struct tls_device *device, struct sock *sk);
78 * This function cleans listen state set by Inline TLS driver
80 * void (*release)(struct kref *kref);
81 * Release the registered device and allocated resources
82 * @kref: Number of reference to tls_device
85 char name
[TLS_DEVICE_NAME_MAX
];
86 struct list_head dev_list
;
87 int (*feature
)(struct tls_device
*device
);
88 int (*hash
)(struct tls_device
*device
, struct sock
*sk
);
89 void (*unhash
)(struct tls_device
*device
, struct sock
*sk
);
90 void (*release
)(struct kref
*kref
);
97 #ifdef CONFIG_TLS_DEVICE
104 /* TLS records are maintained in 'struct tls_rec'. It stores the memory pages
105 * allocated or mapped for each TLS record. After encryption, the records are
106 * stores in a linked list.
109 struct list_head list
;
114 struct sk_msg msg_plaintext
;
115 struct sk_msg msg_encrypted
;
117 /* AAD | msg_plaintext.sg.data | sg_tag */
118 struct scatterlist sg_aead_in
[2];
119 /* AAD | msg_encrypted.sg.data (data contains overhead for hdr & iv & tag) */
120 struct scatterlist sg_aead_out
[2];
122 char aad_space
[TLS_AAD_SPACE_SIZE
];
123 u8 iv_data
[TLS_CIPHER_AES_GCM_128_IV_SIZE
+
124 TLS_CIPHER_AES_GCM_128_SALT_SIZE
];
125 struct aead_request aead_req
;
130 struct delayed_work work
;
134 struct tls_sw_context_tx
{
135 struct crypto_aead
*aead_send
;
136 struct crypto_wait async_wait
;
137 struct tx_work tx_work
;
138 struct tls_rec
*open_rec
;
139 struct list_head tx_list
;
140 atomic_t encrypt_pending
;
143 #define BIT_TX_SCHEDULED 0
144 unsigned long tx_bitmask
;
147 struct tls_sw_context_rx
{
148 struct crypto_aead
*aead_recv
;
149 struct crypto_wait async_wait
;
151 struct strparser strp
;
152 void (*saved_data_ready
)(struct sock
*sk
);
154 struct sk_buff
*recv_pkt
;
157 atomic_t decrypt_pending
;
161 struct tls_record_info
{
162 struct list_head list
;
166 skb_frag_t frags
[MAX_SKB_FRAGS
];
169 struct tls_offload_context_tx
{
170 struct crypto_aead
*aead_send
;
171 spinlock_t lock
; /* protects records list */
172 struct list_head records_list
;
173 struct tls_record_info
*open_record
;
174 struct tls_record_info
*retransmit_hint
;
176 u64 unacked_record_sn
;
178 struct scatterlist sg_tx_data
[MAX_SKB_FRAGS
];
179 void (*sk_destruct
)(struct sock
*sk
);
181 /* The TLS layer reserves room for driver specific state
182 * Currently the belief is that there is not enough
183 * driver specific state to justify another layer of indirection
185 #define TLS_DRIVER_STATE_SIZE (max_t(size_t, 8, sizeof(void *)))
188 #define TLS_OFFLOAD_CONTEXT_SIZE_TX \
189 (ALIGN(sizeof(struct tls_offload_context_tx), sizeof(void *)) + \
190 TLS_DRIVER_STATE_SIZE)
193 TLS_PENDING_CLOSED_RECORD
196 struct cipher_context
{
206 union tls_crypto_context
{
207 struct tls_crypto_info info
;
208 struct tls12_crypto_info_aes_gcm_128 aes_gcm_128
;
212 union tls_crypto_context crypto_send
;
213 union tls_crypto_context crypto_recv
;
215 struct list_head list
;
216 struct net_device
*netdev
;
225 struct cipher_context tx
;
226 struct cipher_context rx
;
228 struct scatterlist
*partially_sent_record
;
229 u16 partially_sent_offset
;
232 bool in_tcp_sendpages
;
233 bool pending_open_record_frags
;
235 int (*push_pending_record
)(struct sock
*sk
, int flags
);
237 void (*sk_write_space
)(struct sock
*sk
);
238 void (*sk_destruct
)(struct sock
*sk
);
239 void (*sk_proto_close
)(struct sock
*sk
, long timeout
);
241 int (*setsockopt
)(struct sock
*sk
, int level
,
242 int optname
, char __user
*optval
,
243 unsigned int optlen
);
244 int (*getsockopt
)(struct sock
*sk
, int level
,
245 int optname
, char __user
*optval
,
247 int (*hash
)(struct sock
*sk
);
248 void (*unhash
)(struct sock
*sk
);
251 struct tls_offload_context_rx
{
252 /* sw must be the first member of tls_offload_context_rx */
253 struct tls_sw_context_rx sw
;
254 atomic64_t resync_req
;
256 /* The TLS layer reserves room for driver specific state
257 * Currently the belief is that there is not enough
258 * driver specific state to justify another layer of indirection
262 #define TLS_OFFLOAD_CONTEXT_SIZE_RX \
263 (ALIGN(sizeof(struct tls_offload_context_rx), sizeof(void *)) + \
264 TLS_DRIVER_STATE_SIZE)
266 int wait_on_pending_writer(struct sock
*sk
, long *timeo
);
267 int tls_sk_query(struct sock
*sk
, int optname
, char __user
*optval
,
269 int tls_sk_attach(struct sock
*sk
, int optname
, char __user
*optval
,
270 unsigned int optlen
);
272 int tls_set_sw_offload(struct sock
*sk
, struct tls_context
*ctx
, int tx
);
273 int tls_sw_sendmsg(struct sock
*sk
, struct msghdr
*msg
, size_t size
);
274 int tls_sw_sendpage(struct sock
*sk
, struct page
*page
,
275 int offset
, size_t size
, int flags
);
276 void tls_sw_close(struct sock
*sk
, long timeout
);
277 void tls_sw_free_resources_tx(struct sock
*sk
);
278 void tls_sw_free_resources_rx(struct sock
*sk
);
279 void tls_sw_release_resources_rx(struct sock
*sk
);
280 int tls_sw_recvmsg(struct sock
*sk
, struct msghdr
*msg
, size_t len
,
281 int nonblock
, int flags
, int *addr_len
);
282 bool tls_sw_stream_read(const struct sock
*sk
);
283 ssize_t
tls_sw_splice_read(struct socket
*sock
, loff_t
*ppos
,
284 struct pipe_inode_info
*pipe
,
285 size_t len
, unsigned int flags
);
287 int tls_set_device_offload(struct sock
*sk
, struct tls_context
*ctx
);
288 int tls_device_sendmsg(struct sock
*sk
, struct msghdr
*msg
, size_t size
);
289 int tls_device_sendpage(struct sock
*sk
, struct page
*page
,
290 int offset
, size_t size
, int flags
);
291 void tls_device_sk_destruct(struct sock
*sk
);
292 void tls_device_init(void);
293 void tls_device_cleanup(void);
294 int tls_tx_records(struct sock
*sk
, int flags
);
296 struct tls_record_info
*tls_get_record(struct tls_offload_context_tx
*context
,
297 u32 seq
, u64
*p_record_sn
);
299 static inline bool tls_record_is_start_marker(struct tls_record_info
*rec
)
301 return rec
->len
== 0;
304 static inline u32
tls_record_start_seq(struct tls_record_info
*rec
)
306 return rec
->end_seq
- rec
->len
;
309 void tls_sk_destruct(struct sock
*sk
, struct tls_context
*ctx
);
310 int tls_push_sg(struct sock
*sk
, struct tls_context
*ctx
,
311 struct scatterlist
*sg
, u16 first_offset
,
313 int tls_push_partial_record(struct sock
*sk
, struct tls_context
*ctx
,
316 int tls_push_pending_closed_record(struct sock
*sk
, struct tls_context
*ctx
,
317 int flags
, long *timeo
);
319 static inline bool tls_is_pending_closed_record(struct tls_context
*ctx
)
321 return test_bit(TLS_PENDING_CLOSED_RECORD
, &ctx
->flags
);
324 static inline int tls_complete_pending_work(struct sock
*sk
,
325 struct tls_context
*ctx
,
326 int flags
, long *timeo
)
330 if (unlikely(sk
->sk_write_pending
))
331 rc
= wait_on_pending_writer(sk
, timeo
);
333 if (!rc
&& tls_is_pending_closed_record(ctx
))
334 rc
= tls_push_pending_closed_record(sk
, ctx
, flags
, timeo
);
339 static inline bool tls_is_partially_sent_record(struct tls_context
*ctx
)
341 return !!ctx
->partially_sent_record
;
344 static inline bool tls_is_pending_open_record(struct tls_context
*tls_ctx
)
346 return tls_ctx
->pending_open_record_frags
;
349 static inline bool is_tx_ready(struct tls_sw_context_tx
*ctx
)
353 rec
= list_first_entry(&ctx
->tx_list
, struct tls_rec
, list
);
357 return READ_ONCE(rec
->tx_ready
);
361 tls_validate_xmit_skb(struct sock
*sk
, struct net_device
*dev
,
362 struct sk_buff
*skb
);
364 static inline bool tls_is_sk_tx_device_offloaded(struct sock
*sk
)
366 #ifdef CONFIG_SOCK_VALIDATE_XMIT
367 return sk_fullsock(sk
) &
368 (smp_load_acquire(&sk
->sk_validate_xmit_skb
) ==
369 &tls_validate_xmit_skb
);
375 static inline void tls_err_abort(struct sock
*sk
, int err
)
378 sk
->sk_error_report(sk
);
381 static inline bool tls_bigint_increment(unsigned char *seq
, int len
)
385 for (i
= len
- 1; i
>= 0; i
--) {
394 static inline void tls_advance_record_sn(struct sock
*sk
,
395 struct cipher_context
*ctx
)
397 if (tls_bigint_increment(ctx
->rec_seq
, ctx
->rec_seq_size
))
398 tls_err_abort(sk
, EBADMSG
);
399 tls_bigint_increment(ctx
->iv
+ TLS_CIPHER_AES_GCM_128_SALT_SIZE
,
403 static inline void tls_fill_prepend(struct tls_context
*ctx
,
405 size_t plaintext_len
,
406 unsigned char record_type
)
408 size_t pkt_len
, iv_size
= ctx
->tx
.iv_size
;
410 pkt_len
= plaintext_len
+ iv_size
+ ctx
->tx
.tag_size
;
412 /* we cover nonce explicit here as well, so buf should be of
413 * size KTLS_DTLS_HEADER_SIZE + KTLS_DTLS_NONCE_EXPLICIT_SIZE
415 buf
[0] = record_type
;
416 buf
[1] = TLS_VERSION_MINOR(ctx
->crypto_send
.info
.version
);
417 buf
[2] = TLS_VERSION_MAJOR(ctx
->crypto_send
.info
.version
);
418 /* we can use IV for nonce explicit according to spec */
419 buf
[3] = pkt_len
>> 8;
420 buf
[4] = pkt_len
& 0xFF;
421 memcpy(buf
+ TLS_NONCE_OFFSET
,
422 ctx
->tx
.iv
+ TLS_CIPHER_AES_GCM_128_SALT_SIZE
, iv_size
);
425 static inline void tls_make_aad(char *buf
,
427 char *record_sequence
,
428 int record_sequence_size
,
429 unsigned char record_type
)
431 memcpy(buf
, record_sequence
, record_sequence_size
);
433 buf
[8] = record_type
;
434 buf
[9] = TLS_1_2_VERSION_MAJOR
;
435 buf
[10] = TLS_1_2_VERSION_MINOR
;
437 buf
[12] = size
& 0xFF;
440 static inline struct tls_context
*tls_get_ctx(const struct sock
*sk
)
442 struct inet_connection_sock
*icsk
= inet_csk(sk
);
444 return icsk
->icsk_ulp_data
;
447 static inline struct tls_sw_context_rx
*tls_sw_ctx_rx(
448 const struct tls_context
*tls_ctx
)
450 return (struct tls_sw_context_rx
*)tls_ctx
->priv_ctx_rx
;
453 static inline struct tls_sw_context_tx
*tls_sw_ctx_tx(
454 const struct tls_context
*tls_ctx
)
456 return (struct tls_sw_context_tx
*)tls_ctx
->priv_ctx_tx
;
459 static inline struct tls_offload_context_tx
*
460 tls_offload_ctx_tx(const struct tls_context
*tls_ctx
)
462 return (struct tls_offload_context_tx
*)tls_ctx
->priv_ctx_tx
;
465 static inline bool tls_sw_has_ctx_tx(const struct sock
*sk
)
467 struct tls_context
*ctx
= tls_get_ctx(sk
);
471 return !!tls_sw_ctx_tx(ctx
);
474 static inline struct tls_offload_context_rx
*
475 tls_offload_ctx_rx(const struct tls_context
*tls_ctx
)
477 return (struct tls_offload_context_rx
*)tls_ctx
->priv_ctx_rx
;
480 /* The TLS context is valid until sk_destruct is called */
481 static inline void tls_offload_rx_resync_request(struct sock
*sk
, __be32 seq
)
483 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
484 struct tls_offload_context_rx
*rx_ctx
= tls_offload_ctx_rx(tls_ctx
);
486 atomic64_set(&rx_ctx
->resync_req
, ((((uint64_t)seq
) << 32) | 1));
490 int tls_proccess_cmsg(struct sock
*sk
, struct msghdr
*msg
,
491 unsigned char *record_type
);
492 void tls_register_device(struct tls_device
*device
);
493 void tls_unregister_device(struct tls_device
*device
);
494 int tls_device_decrypted(struct sock
*sk
, struct sk_buff
*skb
);
495 int decrypt_skb(struct sock
*sk
, struct sk_buff
*skb
,
496 struct scatterlist
*sgout
);
498 struct sk_buff
*tls_validate_xmit_skb(struct sock
*sk
,
499 struct net_device
*dev
,
500 struct sk_buff
*skb
);
502 int tls_sw_fallback_init(struct sock
*sk
,
503 struct tls_offload_context_tx
*offload_ctx
,
504 struct tls_crypto_info
*crypto_info
);
506 int tls_set_device_offload_rx(struct sock
*sk
, struct tls_context
*ctx
);
508 void tls_device_offload_cleanup_rx(struct sock
*sk
);
509 void handle_device_resync(struct sock
*sk
, u32 seq
, u64 rcd_sn
);
511 #endif /* _TLS_OFFLOAD_H */