1 /*
2 * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34 #ifndef _TLS_OFFLOAD_H
35 #define _TLS_OFFLOAD_H
36
37 #include <linux/types.h>
38 #include <asm/byteorder.h>
39 #include <linux/crypto.h>
40 #include <linux/socket.h>
41 #include <linux/tcp.h>
42 #include <linux/skmsg.h>
43 #include <linux/mutex.h>
44 #include <linux/netdevice.h>
45 #include <linux/rcupdate.h>
46
47 #include <net/net_namespace.h>
48 #include <net/tcp.h>
49 #include <net/strparser.h>
50 #include <crypto/aead.h>
51 #include <uapi/linux/tls.h>
52
53
54 /* Maximum data size carried in a TLS record */
55 #define TLS_MAX_PAYLOAD_SIZE ((size_t)1 << 14)
56
57 #define TLS_HEADER_SIZE 5
58 #define TLS_NONCE_OFFSET TLS_HEADER_SIZE
59
60 #define TLS_CRYPTO_INFO_READY(info) ((info)->cipher_type)
61
62 #define TLS_RECORD_TYPE_DATA 0x17
63
64 #define TLS_AAD_SPACE_SIZE 13
65
66 #define MAX_IV_SIZE 16
67 #define TLS_MAX_REC_SEQ_SIZE 8
68
69 /* For AES-CCM, the full 16-bytes of IV is made of '4' fields of given sizes.
70 *
71 * IV[16] = b0[1] || implicit nonce[4] || explicit nonce[8] || length[3]
72 *
73 * The field 'length' is encoded in field 'b0' as '(length width - 1)'.
74 * Hence b0 contains (3 - 1) = 2.
75 */
76 #define TLS_AES_CCM_IV_B0_BYTE 2
77
78 #define __TLS_INC_STATS(net, field) \
79 __SNMP_INC_STATS((net)->mib.tls_statistics, field)
80 #define TLS_INC_STATS(net, field) \
81 SNMP_INC_STATS((net)->mib.tls_statistics, field)
82 #define __TLS_DEC_STATS(net, field) \
83 __SNMP_DEC_STATS((net)->mib.tls_statistics, field)
84 #define TLS_DEC_STATS(net, field) \
85 SNMP_DEC_STATS((net)->mib.tls_statistics, field)
86
87 enum {
88 TLS_BASE,
89 TLS_SW,
90 TLS_HW,
91 TLS_HW_RECORD,
92 TLS_NUM_CONFIG,
93 };
94
95 /* TLS records are maintained in 'struct tls_rec'. It stores the memory pages
96 * allocated or mapped for each TLS record. After encryption, the records are
97 * stores in a linked list.
98 */
99 struct tls_rec {
100 struct list_head list;
101 int tx_ready;
102 int tx_flags;
103
104 struct sk_msg msg_plaintext;
105 struct sk_msg msg_encrypted;
106
107 /* AAD | msg_plaintext.sg.data | sg_tag */
108 struct scatterlist sg_aead_in[2];
109 /* AAD | msg_encrypted.sg.data (data contains overhead for hdr & iv & tag) */
110 struct scatterlist sg_aead_out[2];
111
112 char content_type;
113 struct scatterlist sg_content_type;
114
115 char aad_space[TLS_AAD_SPACE_SIZE];
116 u8 iv_data[MAX_IV_SIZE];
117 struct aead_request aead_req;
118 u8 aead_req_ctx[];
119 };
120
121 struct tls_msg {
122 struct strp_msg rxm;
123 u8 control;
124 };
125
126 struct tx_work {
127 struct delayed_work work;
128 struct sock *sk;
129 };
130
131 struct tls_sw_context_tx {
132 struct crypto_aead *aead_send;
133 struct crypto_wait async_wait;
134 struct tx_work tx_work;
135 struct tls_rec *open_rec;
136 struct list_head tx_list;
137 atomic_t encrypt_pending;
138 /* protect crypto_wait with encrypt_pending */
139 spinlock_t encrypt_compl_lock;
140 int async_notify;
141 u8 async_capable:1;
142
143 #define BIT_TX_SCHEDULED 0
144 #define BIT_TX_CLOSING 1
145 unsigned long tx_bitmask;
146 };
147
148 struct tls_sw_context_rx {
149 struct crypto_aead *aead_recv;
150 struct crypto_wait async_wait;
151 struct strparser strp;
152 struct sk_buff_head rx_list; /* list of decrypted 'data' records */
153 void (*saved_data_ready)(struct sock *sk);
154
155 struct sk_buff *recv_pkt;
156 u8 control;
157 u8 async_capable:1;
158 u8 decrypted:1;
159 atomic_t decrypt_pending;
160 /* protect crypto_wait with decrypt_pending*/
161 spinlock_t decrypt_compl_lock;
162 bool async_notify;
163 };
164
165 struct tls_record_info {
166 struct list_head list;
167 u32 end_seq;
168 int len;
169 int num_frags;
170 skb_frag_t frags[MAX_SKB_FRAGS];
171 };
172
173 struct tls_offload_context_tx {
174 struct crypto_aead *aead_send;
175 spinlock_t lock; /* protects records list */
176 struct list_head records_list;
177 struct tls_record_info *open_record;
178 struct tls_record_info *retransmit_hint;
179 u64 hint_record_sn;
180 u64 unacked_record_sn;
181
182 struct scatterlist sg_tx_data[MAX_SKB_FRAGS];
183 void (*sk_destruct)(struct sock *sk);
184 u8 driver_state[] __aligned(8);
185 /* The TLS layer reserves room for driver specific state
186 * Currently the belief is that there is not enough
187 * driver specific state to justify another layer of indirection
188 */
189 #define TLS_DRIVER_STATE_SIZE_TX 16
190 };
191
192 #define TLS_OFFLOAD_CONTEXT_SIZE_TX \
193 (sizeof(struct tls_offload_context_tx) + TLS_DRIVER_STATE_SIZE_TX)
194
195 enum tls_context_flags {
196 TLS_RX_SYNC_RUNNING = 0,
197 /* Unlike RX where resync is driven entirely by the core in TX only
198 * the driver knows when things went out of sync, so we need the flag
199 * to be atomic.
200 */
201 TLS_TX_SYNC_SCHED = 1,
202 /* tls_dev_del was called for the RX side, device state was released,
203 * but tls_ctx->netdev might still be kept, because TX-side driver
204 * resources might not be released yet. Used to prevent the second
205 * tls_dev_del call in tls_device_down if it happens simultaneously.
206 */
207 TLS_RX_DEV_CLOSED = 2,
208 };
209
210 struct cipher_context {
211 char *iv;
212 char *rec_seq;
213 };
214
215 union tls_crypto_context {
216 struct tls_crypto_info info;
217 union {
218 struct tls12_crypto_info_aes_gcm_128 aes_gcm_128;
219 struct tls12_crypto_info_aes_gcm_256 aes_gcm_256;
220 };
221 };
222
223 struct tls_prot_info {
224 u16 version;
225 u16 cipher_type;
226 u16 prepend_size;
227 u16 tag_size;
228 u16 overhead_size;
229 u16 iv_size;
230 u16 salt_size;
231 u16 rec_seq_size;
232 u16 aad_size;
233 u16 tail_size;
234 };
235
236 struct tls_context {
237 /* read-only cache line */
238 struct tls_prot_info prot_info;
239
240 u8 tx_conf:3;
241 u8 rx_conf:3;
242
243 int (*push_pending_record)(struct sock *sk, int flags);
244 void (*sk_write_space)(struct sock *sk);
245
246 void *priv_ctx_tx;
247 void *priv_ctx_rx;
248
249 struct net_device *netdev;
250
251 /* rw cache line */
252 struct cipher_context tx;
253 struct cipher_context rx;
254
255 struct scatterlist *partially_sent_record;
256 u16 partially_sent_offset;
257
258 bool in_tcp_sendpages;
259 bool pending_open_record_frags;
260
261 struct mutex tx_lock; /* protects partially_sent_* fields and
262 * per-type TX fields
263 */
264 unsigned long flags;
265
266 /* cache cold stuff */
267 struct proto *sk_proto;
268
269 void (*sk_destruct)(struct sock *sk);
270
271 union tls_crypto_context crypto_send;
272 union tls_crypto_context crypto_recv;
273
274 struct list_head list;
275 refcount_t refcount;
276 struct rcu_head rcu;
277 };
278
279 enum tls_offload_ctx_dir {
280 TLS_OFFLOAD_CTX_DIR_RX,
281 TLS_OFFLOAD_CTX_DIR_TX,
282 };
283
284 struct tlsdev_ops {
285 int (*tls_dev_add)(struct net_device *netdev, struct sock *sk,
286 enum tls_offload_ctx_dir direction,
287 struct tls_crypto_info *crypto_info,
288 u32 start_offload_tcp_sn);
289 void (*tls_dev_del)(struct net_device *netdev,
290 struct tls_context *ctx,
291 enum tls_offload_ctx_dir direction);
292 int (*tls_dev_resync)(struct net_device *netdev,
293 struct sock *sk, u32 seq, u8 *rcd_sn,
294 enum tls_offload_ctx_dir direction);
295 };
296
297 enum tls_offload_sync_type {
298 TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ = 0,
299 TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT = 1,
300 TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ_ASYNC = 2,
301 };
302
303 #define TLS_DEVICE_RESYNC_NH_START_IVAL 2
304 #define TLS_DEVICE_RESYNC_NH_MAX_IVAL 128
305
306 #define TLS_DEVICE_RESYNC_ASYNC_LOGMAX 13
307 struct tls_offload_resync_async {
308 atomic64_t req;
309 u16 loglen;
310 u16 rcd_delta;
311 u32 log[TLS_DEVICE_RESYNC_ASYNC_LOGMAX];
312 };
313
314 struct tls_offload_context_rx {
315 /* sw must be the first member of tls_offload_context_rx */
316 struct tls_sw_context_rx sw;
317 enum tls_offload_sync_type resync_type;
318 /* this member is set regardless of resync_type, to avoid branches */
319 u8 resync_nh_reset:1;
320 /* CORE_NEXT_HINT-only member, but use the hole here */
321 u8 resync_nh_do_now:1;
322 union {
323 /* TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ */
324 struct {
325 atomic64_t resync_req;
326 };
327 /* TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT */
328 struct {
329 u32 decrypted_failed;
330 u32 decrypted_tgt;
331 } resync_nh;
332 /* TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ_ASYNC */
333 struct {
334 struct tls_offload_resync_async *resync_async;
335 };
336 };
337 u8 driver_state[] __aligned(8);
338 /* The TLS layer reserves room for driver specific state
339 * Currently the belief is that there is not enough
340 * driver specific state to justify another layer of indirection
341 */
342 #define TLS_DRIVER_STATE_SIZE_RX 8
343 };
344
345 #define TLS_OFFLOAD_CONTEXT_SIZE_RX \
346 (sizeof(struct tls_offload_context_rx) + TLS_DRIVER_STATE_SIZE_RX)
347
348 struct tls_context *tls_ctx_create(struct sock *sk);
349 void tls_ctx_free(struct sock *sk, struct tls_context *ctx);
350 void update_sk_prot(struct sock *sk, struct tls_context *ctx);
351
352 int wait_on_pending_writer(struct sock *sk, long *timeo);
353 int tls_sk_query(struct sock *sk, int optname, char __user *optval,
354 int __user *optlen);
355 int tls_sk_attach(struct sock *sk, int optname, char __user *optval,
356 unsigned int optlen);
357
358 int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx);
359 void tls_sw_strparser_arm(struct sock *sk, struct tls_context *ctx);
360 void tls_sw_strparser_done(struct tls_context *tls_ctx);
361 int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
362 int tls_sw_sendpage_locked(struct sock *sk, struct page *page,
363 int offset, size_t size, int flags);
364 int tls_sw_sendpage(struct sock *sk, struct page *page,
365 int offset, size_t size, int flags);
366 void tls_sw_cancel_work_tx(struct tls_context *tls_ctx);
367 void tls_sw_release_resources_tx(struct sock *sk);
368 void tls_sw_free_ctx_tx(struct tls_context *tls_ctx);
369 void tls_sw_free_resources_rx(struct sock *sk);
370 void tls_sw_release_resources_rx(struct sock *sk);
371 void tls_sw_free_ctx_rx(struct tls_context *tls_ctx);
372 int tls_sw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
373 int nonblock, int flags, int *addr_len);
374 bool tls_sw_stream_read(const struct sock *sk);
375 ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
376 struct pipe_inode_info *pipe,
377 size_t len, unsigned int flags);
378
379 int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
380 int tls_device_sendpage(struct sock *sk, struct page *page,
381 int offset, size_t size, int flags);
382 int tls_tx_records(struct sock *sk, int flags);
383
384 struct tls_record_info *tls_get_record(struct tls_offload_context_tx *context,
385 u32 seq, u64 *p_record_sn);
386
tls_record_is_start_marker(struct tls_record_info * rec)387 static inline bool tls_record_is_start_marker(struct tls_record_info *rec)
388 {
389 return rec->len == 0;
390 }
391
tls_record_start_seq(struct tls_record_info * rec)392 static inline u32 tls_record_start_seq(struct tls_record_info *rec)
393 {
394 return rec->end_seq - rec->len;
395 }
396
397 int tls_push_sg(struct sock *sk, struct tls_context *ctx,
398 struct scatterlist *sg, u16 first_offset,
399 int flags);
400 int tls_push_partial_record(struct sock *sk, struct tls_context *ctx,
401 int flags);
402 void tls_free_partial_record(struct sock *sk, struct tls_context *ctx);
403
tls_msg(struct sk_buff * skb)404 static inline struct tls_msg *tls_msg(struct sk_buff *skb)
405 {
406 return (struct tls_msg *)strp_msg(skb);
407 }
408
tls_is_partially_sent_record(struct tls_context * ctx)409 static inline bool tls_is_partially_sent_record(struct tls_context *ctx)
410 {
411 return !!ctx->partially_sent_record;
412 }
413
tls_is_pending_open_record(struct tls_context * tls_ctx)414 static inline bool tls_is_pending_open_record(struct tls_context *tls_ctx)
415 {
416 return tls_ctx->pending_open_record_frags;
417 }
418
is_tx_ready(struct tls_sw_context_tx * ctx)419 static inline bool is_tx_ready(struct tls_sw_context_tx *ctx)
420 {
421 struct tls_rec *rec;
422
423 rec = list_first_entry(&ctx->tx_list, struct tls_rec, list);
424 if (!rec)
425 return false;
426
427 return READ_ONCE(rec->tx_ready);
428 }
429
tls_user_config(struct tls_context * ctx,bool tx)430 static inline u16 tls_user_config(struct tls_context *ctx, bool tx)
431 {
432 u16 config = tx ? ctx->tx_conf : ctx->rx_conf;
433
434 switch (config) {
435 case TLS_BASE:
436 return TLS_CONF_BASE;
437 case TLS_SW:
438 return TLS_CONF_SW;
439 case TLS_HW:
440 return TLS_CONF_HW;
441 case TLS_HW_RECORD:
442 return TLS_CONF_HW_RECORD;
443 }
444 return 0;
445 }
446
447 struct sk_buff *
448 tls_validate_xmit_skb(struct sock *sk, struct net_device *dev,
449 struct sk_buff *skb);
450
tls_is_sk_tx_device_offloaded(struct sock * sk)451 static inline bool tls_is_sk_tx_device_offloaded(struct sock *sk)
452 {
453 #ifdef CONFIG_SOCK_VALIDATE_XMIT
454 return sk_fullsock(sk) &&
455 (smp_load_acquire(&sk->sk_validate_xmit_skb) ==
456 &tls_validate_xmit_skb);
457 #else
458 return false;
459 #endif
460 }
461
tls_err_abort(struct sock * sk,int err)462 static inline void tls_err_abort(struct sock *sk, int err)
463 {
464 sk->sk_err = err;
465 sk->sk_error_report(sk);
466 }
467
tls_bigint_increment(unsigned char * seq,int len)468 static inline bool tls_bigint_increment(unsigned char *seq, int len)
469 {
470 int i;
471
472 for (i = len - 1; i >= 0; i--) {
473 ++seq[i];
474 if (seq[i] != 0)
475 break;
476 }
477
478 return (i == -1);
479 }
480
tls_bigint_subtract(unsigned char * seq,int n)481 static inline void tls_bigint_subtract(unsigned char *seq, int n)
482 {
483 u64 rcd_sn;
484 __be64 *p;
485
486 BUILD_BUG_ON(TLS_MAX_REC_SEQ_SIZE != 8);
487
488 p = (__be64 *)seq;
489 rcd_sn = be64_to_cpu(*p);
490 *p = cpu_to_be64(rcd_sn - n);
491 }
492
tls_get_ctx(const struct sock * sk)493 static inline struct tls_context *tls_get_ctx(const struct sock *sk)
494 {
495 struct inet_connection_sock *icsk = inet_csk(sk);
496
497 /* Use RCU on icsk_ulp_data only for sock diag code,
498 * TLS data path doesn't need rcu_dereference().
499 */
500 return (__force void *)icsk->icsk_ulp_data;
501 }
502
tls_advance_record_sn(struct sock * sk,struct tls_prot_info * prot,struct cipher_context * ctx)503 static inline void tls_advance_record_sn(struct sock *sk,
504 struct tls_prot_info *prot,
505 struct cipher_context *ctx)
506 {
507 if (tls_bigint_increment(ctx->rec_seq, prot->rec_seq_size))
508 tls_err_abort(sk, EBADMSG);
509
510 if (prot->version != TLS_1_3_VERSION)
511 tls_bigint_increment(ctx->iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
512 prot->iv_size);
513 }
514
tls_fill_prepend(struct tls_context * ctx,char * buf,size_t plaintext_len,unsigned char record_type,int version)515 static inline void tls_fill_prepend(struct tls_context *ctx,
516 char *buf,
517 size_t plaintext_len,
518 unsigned char record_type,
519 int version)
520 {
521 struct tls_prot_info *prot = &ctx->prot_info;
522 size_t pkt_len, iv_size = prot->iv_size;
523
524 pkt_len = plaintext_len + prot->tag_size;
525 if (version != TLS_1_3_VERSION) {
526 pkt_len += iv_size;
527
528 memcpy(buf + TLS_NONCE_OFFSET,
529 ctx->tx.iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, iv_size);
530 }
531
532 /* we cover nonce explicit here as well, so buf should be of
533 * size KTLS_DTLS_HEADER_SIZE + KTLS_DTLS_NONCE_EXPLICIT_SIZE
534 */
535 buf[0] = version == TLS_1_3_VERSION ?
536 TLS_RECORD_TYPE_DATA : record_type;
537 /* Note that VERSION must be TLS_1_2 for both TLS1.2 and TLS1.3 */
538 buf[1] = TLS_1_2_VERSION_MINOR;
539 buf[2] = TLS_1_2_VERSION_MAJOR;
540 /* we can use IV for nonce explicit according to spec */
541 buf[3] = pkt_len >> 8;
542 buf[4] = pkt_len & 0xFF;
543 }
544
tls_make_aad(char * buf,size_t size,char * record_sequence,int record_sequence_size,unsigned char record_type,int version)545 static inline void tls_make_aad(char *buf,
546 size_t size,
547 char *record_sequence,
548 int record_sequence_size,
549 unsigned char record_type,
550 int version)
551 {
552 if (version != TLS_1_3_VERSION) {
553 memcpy(buf, record_sequence, record_sequence_size);
554 buf += 8;
555 } else {
556 size += TLS_CIPHER_AES_GCM_128_TAG_SIZE;
557 }
558
559 buf[0] = version == TLS_1_3_VERSION ?
560 TLS_RECORD_TYPE_DATA : record_type;
561 buf[1] = TLS_1_2_VERSION_MAJOR;
562 buf[2] = TLS_1_2_VERSION_MINOR;
563 buf[3] = size >> 8;
564 buf[4] = size & 0xFF;
565 }
566
xor_iv_with_seq(int version,char * iv,char * seq)567 static inline void xor_iv_with_seq(int version, char *iv, char *seq)
568 {
569 int i;
570
571 if (version == TLS_1_3_VERSION) {
572 for (i = 0; i < 8; i++)
573 iv[i + 4] ^= seq[i];
574 }
575 }
576
577
tls_sw_ctx_rx(const struct tls_context * tls_ctx)578 static inline struct tls_sw_context_rx *tls_sw_ctx_rx(
579 const struct tls_context *tls_ctx)
580 {
581 return (struct tls_sw_context_rx *)tls_ctx->priv_ctx_rx;
582 }
583
tls_sw_ctx_tx(const struct tls_context * tls_ctx)584 static inline struct tls_sw_context_tx *tls_sw_ctx_tx(
585 const struct tls_context *tls_ctx)
586 {
587 return (struct tls_sw_context_tx *)tls_ctx->priv_ctx_tx;
588 }
589
590 static inline struct tls_offload_context_tx *
tls_offload_ctx_tx(const struct tls_context * tls_ctx)591 tls_offload_ctx_tx(const struct tls_context *tls_ctx)
592 {
593 return (struct tls_offload_context_tx *)tls_ctx->priv_ctx_tx;
594 }
595
tls_sw_has_ctx_tx(const struct sock * sk)596 static inline bool tls_sw_has_ctx_tx(const struct sock *sk)
597 {
598 struct tls_context *ctx = tls_get_ctx(sk);
599
600 if (!ctx)
601 return false;
602 return !!tls_sw_ctx_tx(ctx);
603 }
604
tls_sw_has_ctx_rx(const struct sock * sk)605 static inline bool tls_sw_has_ctx_rx(const struct sock *sk)
606 {
607 struct tls_context *ctx = tls_get_ctx(sk);
608
609 if (!ctx)
610 return false;
611 return !!tls_sw_ctx_rx(ctx);
612 }
613
614 void tls_sw_write_space(struct sock *sk, struct tls_context *ctx);
615 void tls_device_write_space(struct sock *sk, struct tls_context *ctx);
616
617 static inline struct tls_offload_context_rx *
tls_offload_ctx_rx(const struct tls_context * tls_ctx)618 tls_offload_ctx_rx(const struct tls_context *tls_ctx)
619 {
620 return (struct tls_offload_context_rx *)tls_ctx->priv_ctx_rx;
621 }
622
623 #if IS_ENABLED(CONFIG_TLS_DEVICE)
__tls_driver_ctx(struct tls_context * tls_ctx,enum tls_offload_ctx_dir direction)624 static inline void *__tls_driver_ctx(struct tls_context *tls_ctx,
625 enum tls_offload_ctx_dir direction)
626 {
627 if (direction == TLS_OFFLOAD_CTX_DIR_TX)
628 return tls_offload_ctx_tx(tls_ctx)->driver_state;
629 else
630 return tls_offload_ctx_rx(tls_ctx)->driver_state;
631 }
632
633 static inline void *
tls_driver_ctx(const struct sock * sk,enum tls_offload_ctx_dir direction)634 tls_driver_ctx(const struct sock *sk, enum tls_offload_ctx_dir direction)
635 {
636 return __tls_driver_ctx(tls_get_ctx(sk), direction);
637 }
638 #endif
639
640 #define RESYNC_REQ BIT(0)
641 #define RESYNC_REQ_ASYNC BIT(1)
642 /* The TLS context is valid until sk_destruct is called */
tls_offload_rx_resync_request(struct sock * sk,__be32 seq)643 static inline void tls_offload_rx_resync_request(struct sock *sk, __be32 seq)
644 {
645 struct tls_context *tls_ctx = tls_get_ctx(sk);
646 struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
647
648 atomic64_set(&rx_ctx->resync_req, ((u64)ntohl(seq) << 32) | RESYNC_REQ);
649 }
650
651 /* Log all TLS record header TCP sequences in [seq, seq+len] */
652 static inline void
tls_offload_rx_resync_async_request_start(struct sock * sk,__be32 seq,u16 len)653 tls_offload_rx_resync_async_request_start(struct sock *sk, __be32 seq, u16 len)
654 {
655 struct tls_context *tls_ctx = tls_get_ctx(sk);
656 struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
657
658 atomic64_set(&rx_ctx->resync_async->req, ((u64)ntohl(seq) << 32) |
659 ((u64)len << 16) | RESYNC_REQ | RESYNC_REQ_ASYNC);
660 rx_ctx->resync_async->loglen = 0;
661 rx_ctx->resync_async->rcd_delta = 0;
662 }
663
664 static inline void
tls_offload_rx_resync_async_request_end(struct sock * sk,__be32 seq)665 tls_offload_rx_resync_async_request_end(struct sock *sk, __be32 seq)
666 {
667 struct tls_context *tls_ctx = tls_get_ctx(sk);
668 struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
669
670 atomic64_set(&rx_ctx->resync_async->req,
671 ((u64)ntohl(seq) << 32) | RESYNC_REQ);
672 }
673
674 static inline void
tls_offload_rx_resync_set_type(struct sock * sk,enum tls_offload_sync_type type)675 tls_offload_rx_resync_set_type(struct sock *sk, enum tls_offload_sync_type type)
676 {
677 struct tls_context *tls_ctx = tls_get_ctx(sk);
678
679 tls_offload_ctx_rx(tls_ctx)->resync_type = type;
680 }
681
682 /* Driver's seq tracking has to be disabled until resync succeeded */
tls_offload_tx_resync_pending(struct sock * sk)683 static inline bool tls_offload_tx_resync_pending(struct sock *sk)
684 {
685 struct tls_context *tls_ctx = tls_get_ctx(sk);
686 bool ret;
687
688 ret = test_bit(TLS_TX_SYNC_SCHED, &tls_ctx->flags);
689 smp_mb__after_atomic();
690 return ret;
691 }
692
693 int __net_init tls_proc_init(struct net *net);
694 void __net_exit tls_proc_fini(struct net *net);
695
696 int tls_proccess_cmsg(struct sock *sk, struct msghdr *msg,
697 unsigned char *record_type);
698 int decrypt_skb(struct sock *sk, struct sk_buff *skb,
699 struct scatterlist *sgout);
700 struct sk_buff *tls_encrypt_skb(struct sk_buff *skb);
701
702 int tls_sw_fallback_init(struct sock *sk,
703 struct tls_offload_context_tx *offload_ctx,
704 struct tls_crypto_info *crypto_info);
705
706 #ifdef CONFIG_TLS_DEVICE
707 void tls_device_init(void);
708 void tls_device_cleanup(void);
709 void tls_device_sk_destruct(struct sock *sk);
710 int tls_set_device_offload(struct sock *sk, struct tls_context *ctx);
711 void tls_device_free_resources_tx(struct sock *sk);
712 int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx);
713 void tls_device_offload_cleanup_rx(struct sock *sk);
714 void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq);
715 void tls_offload_tx_resync_request(struct sock *sk, u32 got_seq, u32 exp_seq);
716 int tls_device_decrypted(struct sock *sk, struct tls_context *tls_ctx,
717 struct sk_buff *skb, struct strp_msg *rxm);
718
tls_is_sk_rx_device_offloaded(struct sock * sk)719 static inline bool tls_is_sk_rx_device_offloaded(struct sock *sk)
720 {
721 if (!sk_fullsock(sk) ||
722 smp_load_acquire(&sk->sk_destruct) != tls_device_sk_destruct)
723 return false;
724 return tls_get_ctx(sk)->rx_conf == TLS_HW;
725 }
726 #else
tls_device_init(void)727 static inline void tls_device_init(void) {}
tls_device_cleanup(void)728 static inline void tls_device_cleanup(void) {}
729
730 static inline int
tls_set_device_offload(struct sock * sk,struct tls_context * ctx)731 tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
732 {
733 return -EOPNOTSUPP;
734 }
735
tls_device_free_resources_tx(struct sock * sk)736 static inline void tls_device_free_resources_tx(struct sock *sk) {}
737
738 static inline int
tls_set_device_offload_rx(struct sock * sk,struct tls_context * ctx)739 tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx)
740 {
741 return -EOPNOTSUPP;
742 }
743
tls_device_offload_cleanup_rx(struct sock * sk)744 static inline void tls_device_offload_cleanup_rx(struct sock *sk) {}
745 static inline void
tls_device_rx_resync_new_rec(struct sock * sk,u32 rcd_len,u32 seq)746 tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq) {}
747
748 static inline int
tls_device_decrypted(struct sock * sk,struct tls_context * tls_ctx,struct sk_buff * skb,struct strp_msg * rxm)749 tls_device_decrypted(struct sock *sk, struct tls_context *tls_ctx,
750 struct sk_buff *skb, struct strp_msg *rxm)
751 {
752 return 0;
753 }
754 #endif
755 #endif /* _TLS_OFFLOAD_H */
756