1 /*
2 * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
4 * Copyright (c) 2016-2017, Lance Chao <lancerchao@fb.com>. All rights reserved.
5 * Copyright (c) 2016, Fridolin Pokorny <fridolin.pokorny@gmail.com>. All rights reserved.
6 * Copyright (c) 2016, Nikos Mavrogiannopoulos <nmav@gnutls.org>. All rights reserved.
7 * Copyright (c) 2018, Covalent IO, Inc. http://covalent.io
8 *
9 * This software is available to you under a choice of one of two
10 * licenses. You may choose to be licensed under the terms of the GNU
11 * General Public License (GPL) Version 2, available from the file
12 * COPYING in the main directory of this source tree, or the
13 * OpenIB.org BSD license below:
14 *
15 * Redistribution and use in source and binary forms, with or
16 * without modification, are permitted provided that the following
17 * conditions are met:
18 *
19 * - Redistributions of source code must retain the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer.
22 *
23 * - Redistributions in binary form must reproduce the above
24 * copyright notice, this list of conditions and the following
25 * disclaimer in the documentation and/or other materials
26 * provided with the distribution.
27 *
28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
29 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
30 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
31 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
32 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
33 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
34 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 * SOFTWARE.
36 */
37
38 #include <linux/sched/signal.h>
39 #include <linux/module.h>
40 #include <crypto/aead.h>
41
42 #include <net/strparser.h>
43 #include <net/tls.h>
44
__skb_nsg(struct sk_buff * skb,int offset,int len,unsigned int recursion_level)45 static int __skb_nsg(struct sk_buff *skb, int offset, int len,
46 unsigned int recursion_level)
47 {
48 int start = skb_headlen(skb);
49 int i, chunk = start - offset;
50 struct sk_buff *frag_iter;
51 int elt = 0;
52
53 if (unlikely(recursion_level >= 24))
54 return -EMSGSIZE;
55
56 if (chunk > 0) {
57 if (chunk > len)
58 chunk = len;
59 elt++;
60 len -= chunk;
61 if (len == 0)
62 return elt;
63 offset += chunk;
64 }
65
66 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
67 int end;
68
69 WARN_ON(start > offset + len);
70
71 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
72 chunk = end - offset;
73 if (chunk > 0) {
74 if (chunk > len)
75 chunk = len;
76 elt++;
77 len -= chunk;
78 if (len == 0)
79 return elt;
80 offset += chunk;
81 }
82 start = end;
83 }
84
85 if (unlikely(skb_has_frag_list(skb))) {
86 skb_walk_frags(skb, frag_iter) {
87 int end, ret;
88
89 WARN_ON(start > offset + len);
90
91 end = start + frag_iter->len;
92 chunk = end - offset;
93 if (chunk > 0) {
94 if (chunk > len)
95 chunk = len;
96 ret = __skb_nsg(frag_iter, offset - start, chunk,
97 recursion_level + 1);
98 if (unlikely(ret < 0))
99 return ret;
100 elt += ret;
101 len -= chunk;
102 if (len == 0)
103 return elt;
104 offset += chunk;
105 }
106 start = end;
107 }
108 }
109 BUG_ON(len);
110 return elt;
111 }
112
113 /* Return the number of scatterlist elements required to completely map the
114 * skb, or -EMSGSIZE if the recursion depth is exceeded.
115 */
skb_nsg(struct sk_buff * skb,int offset,int len)116 static int skb_nsg(struct sk_buff *skb, int offset, int len)
117 {
118 return __skb_nsg(skb, offset, len, 0);
119 }
120
padding_length(struct tls_sw_context_rx * ctx,struct tls_prot_info * prot,struct sk_buff * skb)121 static int padding_length(struct tls_sw_context_rx *ctx,
122 struct tls_prot_info *prot, struct sk_buff *skb)
123 {
124 struct strp_msg *rxm = strp_msg(skb);
125 int sub = 0;
126
127 /* Determine zero-padding length */
128 if (prot->version == TLS_1_3_VERSION) {
129 char content_type = 0;
130 int err;
131 int back = 17;
132
133 while (content_type == 0) {
134 if (back > rxm->full_len - prot->prepend_size)
135 return -EBADMSG;
136 err = skb_copy_bits(skb,
137 rxm->offset + rxm->full_len - back,
138 &content_type, 1);
139 if (err)
140 return err;
141 if (content_type)
142 break;
143 sub++;
144 back++;
145 }
146 ctx->control = content_type;
147 }
148 return sub;
149 }
150
tls_decrypt_done(struct crypto_async_request * req,int err)151 static void tls_decrypt_done(struct crypto_async_request *req, int err)
152 {
153 struct aead_request *aead_req = (struct aead_request *)req;
154 struct scatterlist *sgout = aead_req->dst;
155 struct scatterlist *sgin = aead_req->src;
156 struct tls_sw_context_rx *ctx;
157 struct tls_context *tls_ctx;
158 struct tls_prot_info *prot;
159 struct scatterlist *sg;
160 struct sk_buff *skb;
161 unsigned int pages;
162 int pending;
163
164 skb = (struct sk_buff *)req->data;
165 tls_ctx = tls_get_ctx(skb->sk);
166 ctx = tls_sw_ctx_rx(tls_ctx);
167 prot = &tls_ctx->prot_info;
168
169 /* Propagate if there was an err */
170 if (err) {
171 ctx->async_wait.err = err;
172 tls_err_abort(skb->sk, err);
173 } else {
174 struct strp_msg *rxm = strp_msg(skb);
175 int pad;
176
177 pad = padding_length(ctx, prot, skb);
178 if (pad < 0) {
179 ctx->async_wait.err = pad;
180 tls_err_abort(skb->sk, pad);
181 } else {
182 rxm->full_len -= pad;
183 rxm->offset += prot->prepend_size;
184 rxm->full_len -= prot->overhead_size;
185 }
186 }
187
188 /* After using skb->sk to propagate sk through crypto async callback
189 * we need to NULL it again.
190 */
191 skb->sk = NULL;
192
193
194 /* Free the destination pages if skb was not decrypted inplace */
195 if (sgout != sgin) {
196 /* Skip the first S/G entry as it points to AAD */
197 for_each_sg(sg_next(sgout), sg, UINT_MAX, pages) {
198 if (!sg)
199 break;
200 put_page(sg_page(sg));
201 }
202 }
203
204 kfree(aead_req);
205
206 pending = atomic_dec_return(&ctx->decrypt_pending);
207
208 if (!pending && READ_ONCE(ctx->async_notify))
209 complete(&ctx->async_wait.completion);
210 }
211
tls_do_decryption(struct sock * sk,struct sk_buff * skb,struct scatterlist * sgin,struct scatterlist * sgout,char * iv_recv,size_t data_len,struct aead_request * aead_req,bool async)212 static int tls_do_decryption(struct sock *sk,
213 struct sk_buff *skb,
214 struct scatterlist *sgin,
215 struct scatterlist *sgout,
216 char *iv_recv,
217 size_t data_len,
218 struct aead_request *aead_req,
219 bool async)
220 {
221 struct tls_context *tls_ctx = tls_get_ctx(sk);
222 struct tls_prot_info *prot = &tls_ctx->prot_info;
223 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
224 int ret;
225
226 aead_request_set_tfm(aead_req, ctx->aead_recv);
227 aead_request_set_ad(aead_req, prot->aad_size);
228 aead_request_set_crypt(aead_req, sgin, sgout,
229 data_len + prot->tag_size,
230 (u8 *)iv_recv);
231
232 if (async) {
233 /* Using skb->sk to push sk through to crypto async callback
234 * handler. This allows propagating errors up to the socket
235 * if needed. It _must_ be cleared in the async handler
236 * before consume_skb is called. We _know_ skb->sk is NULL
237 * because it is a clone from strparser.
238 */
239 skb->sk = sk;
240 aead_request_set_callback(aead_req,
241 CRYPTO_TFM_REQ_MAY_BACKLOG,
242 tls_decrypt_done, skb);
243 atomic_inc(&ctx->decrypt_pending);
244 } else {
245 aead_request_set_callback(aead_req,
246 CRYPTO_TFM_REQ_MAY_BACKLOG,
247 crypto_req_done, &ctx->async_wait);
248 }
249
250 ret = crypto_aead_decrypt(aead_req);
251 if (ret == -EINPROGRESS) {
252 if (async)
253 return ret;
254
255 ret = crypto_wait_req(ret, &ctx->async_wait);
256 }
257
258 if (async)
259 atomic_dec(&ctx->decrypt_pending);
260
261 return ret;
262 }
263
tls_trim_both_msgs(struct sock * sk,int target_size)264 static void tls_trim_both_msgs(struct sock *sk, int target_size)
265 {
266 struct tls_context *tls_ctx = tls_get_ctx(sk);
267 struct tls_prot_info *prot = &tls_ctx->prot_info;
268 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
269 struct tls_rec *rec = ctx->open_rec;
270
271 sk_msg_trim(sk, &rec->msg_plaintext, target_size);
272 if (target_size > 0)
273 target_size += prot->overhead_size;
274 sk_msg_trim(sk, &rec->msg_encrypted, target_size);
275 }
276
tls_alloc_encrypted_msg(struct sock * sk,int len)277 static int tls_alloc_encrypted_msg(struct sock *sk, int len)
278 {
279 struct tls_context *tls_ctx = tls_get_ctx(sk);
280 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
281 struct tls_rec *rec = ctx->open_rec;
282 struct sk_msg *msg_en = &rec->msg_encrypted;
283
284 return sk_msg_alloc(sk, msg_en, len, 0);
285 }
286
tls_clone_plaintext_msg(struct sock * sk,int required)287 static int tls_clone_plaintext_msg(struct sock *sk, int required)
288 {
289 struct tls_context *tls_ctx = tls_get_ctx(sk);
290 struct tls_prot_info *prot = &tls_ctx->prot_info;
291 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
292 struct tls_rec *rec = ctx->open_rec;
293 struct sk_msg *msg_pl = &rec->msg_plaintext;
294 struct sk_msg *msg_en = &rec->msg_encrypted;
295 int skip, len;
296
297 /* We add page references worth len bytes from encrypted sg
298 * at the end of plaintext sg. It is guaranteed that msg_en
299 * has enough required room (ensured by caller).
300 */
301 len = required - msg_pl->sg.size;
302
303 /* Skip initial bytes in msg_en's data to be able to use
304 * same offset of both plain and encrypted data.
305 */
306 skip = prot->prepend_size + msg_pl->sg.size;
307
308 return sk_msg_clone(sk, msg_pl, msg_en, skip, len);
309 }
310
tls_get_rec(struct sock * sk)311 static struct tls_rec *tls_get_rec(struct sock *sk)
312 {
313 struct tls_context *tls_ctx = tls_get_ctx(sk);
314 struct tls_prot_info *prot = &tls_ctx->prot_info;
315 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
316 struct sk_msg *msg_pl, *msg_en;
317 struct tls_rec *rec;
318 int mem_size;
319
320 mem_size = sizeof(struct tls_rec) + crypto_aead_reqsize(ctx->aead_send);
321
322 rec = kzalloc(mem_size, sk->sk_allocation);
323 if (!rec)
324 return NULL;
325
326 msg_pl = &rec->msg_plaintext;
327 msg_en = &rec->msg_encrypted;
328
329 sk_msg_init(msg_pl);
330 sk_msg_init(msg_en);
331
332 sg_init_table(rec->sg_aead_in, 2);
333 sg_set_buf(&rec->sg_aead_in[0], rec->aad_space, prot->aad_size);
334 sg_unmark_end(&rec->sg_aead_in[1]);
335
336 sg_init_table(rec->sg_aead_out, 2);
337 sg_set_buf(&rec->sg_aead_out[0], rec->aad_space, prot->aad_size);
338 sg_unmark_end(&rec->sg_aead_out[1]);
339
340 return rec;
341 }
342
tls_free_rec(struct sock * sk,struct tls_rec * rec)343 static void tls_free_rec(struct sock *sk, struct tls_rec *rec)
344 {
345 sk_msg_free(sk, &rec->msg_encrypted);
346 sk_msg_free(sk, &rec->msg_plaintext);
347 kfree(rec);
348 }
349
tls_free_open_rec(struct sock * sk)350 static void tls_free_open_rec(struct sock *sk)
351 {
352 struct tls_context *tls_ctx = tls_get_ctx(sk);
353 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
354 struct tls_rec *rec = ctx->open_rec;
355
356 if (rec) {
357 tls_free_rec(sk, rec);
358 ctx->open_rec = NULL;
359 }
360 }
361
tls_tx_records(struct sock * sk,int flags)362 int tls_tx_records(struct sock *sk, int flags)
363 {
364 struct tls_context *tls_ctx = tls_get_ctx(sk);
365 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
366 struct tls_rec *rec, *tmp;
367 struct sk_msg *msg_en;
368 int tx_flags, rc = 0;
369
370 if (tls_is_partially_sent_record(tls_ctx)) {
371 rec = list_first_entry(&ctx->tx_list,
372 struct tls_rec, list);
373
374 if (flags == -1)
375 tx_flags = rec->tx_flags;
376 else
377 tx_flags = flags;
378
379 rc = tls_push_partial_record(sk, tls_ctx, tx_flags);
380 if (rc)
381 goto tx_err;
382
383 /* Full record has been transmitted.
384 * Remove the head of tx_list
385 */
386 list_del(&rec->list);
387 sk_msg_free(sk, &rec->msg_plaintext);
388 kfree(rec);
389 }
390
391 /* Tx all ready records */
392 list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) {
393 if (READ_ONCE(rec->tx_ready)) {
394 if (flags == -1)
395 tx_flags = rec->tx_flags;
396 else
397 tx_flags = flags;
398
399 msg_en = &rec->msg_encrypted;
400 rc = tls_push_sg(sk, tls_ctx,
401 &msg_en->sg.data[msg_en->sg.curr],
402 0, tx_flags);
403 if (rc)
404 goto tx_err;
405
406 list_del(&rec->list);
407 sk_msg_free(sk, &rec->msg_plaintext);
408 kfree(rec);
409 } else {
410 break;
411 }
412 }
413
414 tx_err:
415 if (rc < 0 && rc != -EAGAIN)
416 tls_err_abort(sk, EBADMSG);
417
418 return rc;
419 }
420
tls_encrypt_done(struct crypto_async_request * req,int err)421 static void tls_encrypt_done(struct crypto_async_request *req, int err)
422 {
423 struct aead_request *aead_req = (struct aead_request *)req;
424 struct sock *sk = req->data;
425 struct tls_context *tls_ctx = tls_get_ctx(sk);
426 struct tls_prot_info *prot = &tls_ctx->prot_info;
427 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
428 struct scatterlist *sge;
429 struct sk_msg *msg_en;
430 struct tls_rec *rec;
431 bool ready = false;
432 int pending;
433
434 rec = container_of(aead_req, struct tls_rec, aead_req);
435 msg_en = &rec->msg_encrypted;
436
437 sge = sk_msg_elem(msg_en, msg_en->sg.curr);
438 sge->offset -= prot->prepend_size;
439 sge->length += prot->prepend_size;
440
441 /* Check if error is previously set on socket */
442 if (err || sk->sk_err) {
443 rec = NULL;
444
445 /* If err is already set on socket, return the same code */
446 if (sk->sk_err) {
447 ctx->async_wait.err = sk->sk_err;
448 } else {
449 ctx->async_wait.err = err;
450 tls_err_abort(sk, err);
451 }
452 }
453
454 if (rec) {
455 struct tls_rec *first_rec;
456
457 /* Mark the record as ready for transmission */
458 smp_store_mb(rec->tx_ready, true);
459
460 /* If received record is at head of tx_list, schedule tx */
461 first_rec = list_first_entry(&ctx->tx_list,
462 struct tls_rec, list);
463 if (rec == first_rec)
464 ready = true;
465 }
466
467 pending = atomic_dec_return(&ctx->encrypt_pending);
468
469 if (!pending && READ_ONCE(ctx->async_notify))
470 complete(&ctx->async_wait.completion);
471
472 if (!ready)
473 return;
474
475 /* Schedule the transmission */
476 if (!test_and_set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask))
477 schedule_delayed_work(&ctx->tx_work.work, 1);
478 }
479
tls_do_encryption(struct sock * sk,struct tls_context * tls_ctx,struct tls_sw_context_tx * ctx,struct aead_request * aead_req,size_t data_len,u32 start)480 static int tls_do_encryption(struct sock *sk,
481 struct tls_context *tls_ctx,
482 struct tls_sw_context_tx *ctx,
483 struct aead_request *aead_req,
484 size_t data_len, u32 start)
485 {
486 struct tls_prot_info *prot = &tls_ctx->prot_info;
487 struct tls_rec *rec = ctx->open_rec;
488 struct sk_msg *msg_en = &rec->msg_encrypted;
489 struct scatterlist *sge = sk_msg_elem(msg_en, start);
490 int rc, iv_offset = 0;
491
492 /* For CCM based ciphers, first byte of IV is a constant */
493 if (prot->cipher_type == TLS_CIPHER_AES_CCM_128) {
494 rec->iv_data[0] = TLS_AES_CCM_IV_B0_BYTE;
495 iv_offset = 1;
496 }
497
498 memcpy(&rec->iv_data[iv_offset], tls_ctx->tx.iv,
499 prot->iv_size + prot->salt_size);
500
501 xor_iv_with_seq(prot->version, rec->iv_data, tls_ctx->tx.rec_seq);
502
503 sge->offset += prot->prepend_size;
504 sge->length -= prot->prepend_size;
505
506 msg_en->sg.curr = start;
507
508 aead_request_set_tfm(aead_req, ctx->aead_send);
509 aead_request_set_ad(aead_req, prot->aad_size);
510 aead_request_set_crypt(aead_req, rec->sg_aead_in,
511 rec->sg_aead_out,
512 data_len, rec->iv_data);
513
514 aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
515 tls_encrypt_done, sk);
516
517 /* Add the record in tx_list */
518 list_add_tail((struct list_head *)&rec->list, &ctx->tx_list);
519 atomic_inc(&ctx->encrypt_pending);
520
521 rc = crypto_aead_encrypt(aead_req);
522 if (!rc || rc != -EINPROGRESS) {
523 atomic_dec(&ctx->encrypt_pending);
524 sge->offset -= prot->prepend_size;
525 sge->length += prot->prepend_size;
526 }
527
528 if (!rc) {
529 WRITE_ONCE(rec->tx_ready, true);
530 } else if (rc != -EINPROGRESS) {
531 list_del(&rec->list);
532 return rc;
533 }
534
535 /* Unhook the record from context if encryption is not failure */
536 ctx->open_rec = NULL;
537 tls_advance_record_sn(sk, prot, &tls_ctx->tx);
538 return rc;
539 }
540
tls_split_open_record(struct sock * sk,struct tls_rec * from,struct tls_rec ** to,struct sk_msg * msg_opl,struct sk_msg * msg_oen,u32 split_point,u32 tx_overhead_size,u32 * orig_end)541 static int tls_split_open_record(struct sock *sk, struct tls_rec *from,
542 struct tls_rec **to, struct sk_msg *msg_opl,
543 struct sk_msg *msg_oen, u32 split_point,
544 u32 tx_overhead_size, u32 *orig_end)
545 {
546 u32 i, j, bytes = 0, apply = msg_opl->apply_bytes;
547 struct scatterlist *sge, *osge, *nsge;
548 u32 orig_size = msg_opl->sg.size;
549 struct scatterlist tmp = { };
550 struct sk_msg *msg_npl;
551 struct tls_rec *new;
552 int ret;
553
554 new = tls_get_rec(sk);
555 if (!new)
556 return -ENOMEM;
557 ret = sk_msg_alloc(sk, &new->msg_encrypted, msg_opl->sg.size +
558 tx_overhead_size, 0);
559 if (ret < 0) {
560 tls_free_rec(sk, new);
561 return ret;
562 }
563
564 *orig_end = msg_opl->sg.end;
565 i = msg_opl->sg.start;
566 sge = sk_msg_elem(msg_opl, i);
567 while (apply && sge->length) {
568 if (sge->length > apply) {
569 u32 len = sge->length - apply;
570
571 get_page(sg_page(sge));
572 sg_set_page(&tmp, sg_page(sge), len,
573 sge->offset + apply);
574 sge->length = apply;
575 bytes += apply;
576 apply = 0;
577 } else {
578 apply -= sge->length;
579 bytes += sge->length;
580 }
581
582 sk_msg_iter_var_next(i);
583 if (i == msg_opl->sg.end)
584 break;
585 sge = sk_msg_elem(msg_opl, i);
586 }
587
588 msg_opl->sg.end = i;
589 msg_opl->sg.curr = i;
590 msg_opl->sg.copybreak = 0;
591 msg_opl->apply_bytes = 0;
592 msg_opl->sg.size = bytes;
593
594 msg_npl = &new->msg_plaintext;
595 msg_npl->apply_bytes = apply;
596 msg_npl->sg.size = orig_size - bytes;
597
598 j = msg_npl->sg.start;
599 nsge = sk_msg_elem(msg_npl, j);
600 if (tmp.length) {
601 memcpy(nsge, &tmp, sizeof(*nsge));
602 sk_msg_iter_var_next(j);
603 nsge = sk_msg_elem(msg_npl, j);
604 }
605
606 osge = sk_msg_elem(msg_opl, i);
607 while (osge->length) {
608 memcpy(nsge, osge, sizeof(*nsge));
609 sg_unmark_end(nsge);
610 sk_msg_iter_var_next(i);
611 sk_msg_iter_var_next(j);
612 if (i == *orig_end)
613 break;
614 osge = sk_msg_elem(msg_opl, i);
615 nsge = sk_msg_elem(msg_npl, j);
616 }
617
618 msg_npl->sg.end = j;
619 msg_npl->sg.curr = j;
620 msg_npl->sg.copybreak = 0;
621
622 *to = new;
623 return 0;
624 }
625
tls_merge_open_record(struct sock * sk,struct tls_rec * to,struct tls_rec * from,u32 orig_end)626 static void tls_merge_open_record(struct sock *sk, struct tls_rec *to,
627 struct tls_rec *from, u32 orig_end)
628 {
629 struct sk_msg *msg_npl = &from->msg_plaintext;
630 struct sk_msg *msg_opl = &to->msg_plaintext;
631 struct scatterlist *osge, *nsge;
632 u32 i, j;
633
634 i = msg_opl->sg.end;
635 sk_msg_iter_var_prev(i);
636 j = msg_npl->sg.start;
637
638 osge = sk_msg_elem(msg_opl, i);
639 nsge = sk_msg_elem(msg_npl, j);
640
641 if (sg_page(osge) == sg_page(nsge) &&
642 osge->offset + osge->length == nsge->offset) {
643 osge->length += nsge->length;
644 put_page(sg_page(nsge));
645 }
646
647 msg_opl->sg.end = orig_end;
648 msg_opl->sg.curr = orig_end;
649 msg_opl->sg.copybreak = 0;
650 msg_opl->apply_bytes = msg_opl->sg.size + msg_npl->sg.size;
651 msg_opl->sg.size += msg_npl->sg.size;
652
653 sk_msg_free(sk, &to->msg_encrypted);
654 sk_msg_xfer_full(&to->msg_encrypted, &from->msg_encrypted);
655
656 kfree(from);
657 }
658
tls_push_record(struct sock * sk,int flags,unsigned char record_type)659 static int tls_push_record(struct sock *sk, int flags,
660 unsigned char record_type)
661 {
662 struct tls_context *tls_ctx = tls_get_ctx(sk);
663 struct tls_prot_info *prot = &tls_ctx->prot_info;
664 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
665 struct tls_rec *rec = ctx->open_rec, *tmp = NULL;
666 u32 i, split_point, uninitialized_var(orig_end);
667 struct sk_msg *msg_pl, *msg_en;
668 struct aead_request *req;
669 bool split;
670 int rc;
671
672 if (!rec)
673 return 0;
674
675 msg_pl = &rec->msg_plaintext;
676 msg_en = &rec->msg_encrypted;
677
678 split_point = msg_pl->apply_bytes;
679 split = split_point && split_point < msg_pl->sg.size;
680 if (split) {
681 rc = tls_split_open_record(sk, rec, &tmp, msg_pl, msg_en,
682 split_point, prot->overhead_size,
683 &orig_end);
684 if (rc < 0)
685 return rc;
686 sk_msg_trim(sk, msg_en, msg_pl->sg.size +
687 prot->overhead_size);
688 }
689
690 rec->tx_flags = flags;
691 req = &rec->aead_req;
692
693 i = msg_pl->sg.end;
694 sk_msg_iter_var_prev(i);
695
696 rec->content_type = record_type;
697 if (prot->version == TLS_1_3_VERSION) {
698 /* Add content type to end of message. No padding added */
699 sg_set_buf(&rec->sg_content_type, &rec->content_type, 1);
700 sg_mark_end(&rec->sg_content_type);
701 sg_chain(msg_pl->sg.data, msg_pl->sg.end + 1,
702 &rec->sg_content_type);
703 } else {
704 sg_mark_end(sk_msg_elem(msg_pl, i));
705 }
706
707 i = msg_pl->sg.start;
708 sg_chain(rec->sg_aead_in, 2, rec->inplace_crypto ?
709 &msg_en->sg.data[i] : &msg_pl->sg.data[i]);
710
711 i = msg_en->sg.end;
712 sk_msg_iter_var_prev(i);
713 sg_mark_end(sk_msg_elem(msg_en, i));
714
715 i = msg_en->sg.start;
716 sg_chain(rec->sg_aead_out, 2, &msg_en->sg.data[i]);
717
718 tls_make_aad(rec->aad_space, msg_pl->sg.size + prot->tail_size,
719 tls_ctx->tx.rec_seq, prot->rec_seq_size,
720 record_type, prot->version);
721
722 tls_fill_prepend(tls_ctx,
723 page_address(sg_page(&msg_en->sg.data[i])) +
724 msg_en->sg.data[i].offset,
725 msg_pl->sg.size + prot->tail_size,
726 record_type, prot->version);
727
728 tls_ctx->pending_open_record_frags = false;
729
730 rc = tls_do_encryption(sk, tls_ctx, ctx, req,
731 msg_pl->sg.size + prot->tail_size, i);
732 if (rc < 0) {
733 if (rc != -EINPROGRESS) {
734 tls_err_abort(sk, EBADMSG);
735 if (split) {
736 tls_ctx->pending_open_record_frags = true;
737 tls_merge_open_record(sk, rec, tmp, orig_end);
738 }
739 }
740 ctx->async_capable = 1;
741 return rc;
742 } else if (split) {
743 msg_pl = &tmp->msg_plaintext;
744 msg_en = &tmp->msg_encrypted;
745 sk_msg_trim(sk, msg_en, msg_pl->sg.size + prot->overhead_size);
746 tls_ctx->pending_open_record_frags = true;
747 ctx->open_rec = tmp;
748 }
749
750 return tls_tx_records(sk, flags);
751 }
752
bpf_exec_tx_verdict(struct sk_msg * msg,struct sock * sk,bool full_record,u8 record_type,size_t * copied,int flags)753 static int bpf_exec_tx_verdict(struct sk_msg *msg, struct sock *sk,
754 bool full_record, u8 record_type,
755 size_t *copied, int flags)
756 {
757 struct tls_context *tls_ctx = tls_get_ctx(sk);
758 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
759 struct sk_msg msg_redir = { };
760 struct sk_psock *psock;
761 struct sock *sk_redir;
762 struct tls_rec *rec;
763 bool enospc, policy;
764 int err = 0, send;
765 u32 delta = 0;
766
767 policy = !(flags & MSG_SENDPAGE_NOPOLICY);
768 psock = sk_psock_get(sk);
769 if (!psock || !policy)
770 return tls_push_record(sk, flags, record_type);
771 more_data:
772 enospc = sk_msg_full(msg);
773 if (psock->eval == __SK_NONE) {
774 delta = msg->sg.size;
775 psock->eval = sk_psock_msg_verdict(sk, psock, msg);
776 if (delta < msg->sg.size)
777 delta -= msg->sg.size;
778 else
779 delta = 0;
780 }
781 if (msg->cork_bytes && msg->cork_bytes > msg->sg.size &&
782 !enospc && !full_record) {
783 err = -ENOSPC;
784 goto out_err;
785 }
786 msg->cork_bytes = 0;
787 send = msg->sg.size;
788 if (msg->apply_bytes && msg->apply_bytes < send)
789 send = msg->apply_bytes;
790
791 switch (psock->eval) {
792 case __SK_PASS:
793 err = tls_push_record(sk, flags, record_type);
794 if (err < 0) {
795 *copied -= sk_msg_free(sk, msg);
796 tls_free_open_rec(sk);
797 goto out_err;
798 }
799 break;
800 case __SK_REDIRECT:
801 sk_redir = psock->sk_redir;
802 memcpy(&msg_redir, msg, sizeof(*msg));
803 if (msg->apply_bytes < send)
804 msg->apply_bytes = 0;
805 else
806 msg->apply_bytes -= send;
807 sk_msg_return_zero(sk, msg, send);
808 msg->sg.size -= send;
809 release_sock(sk);
810 err = tcp_bpf_sendmsg_redir(sk_redir, &msg_redir, send, flags);
811 lock_sock(sk);
812 if (err < 0) {
813 *copied -= sk_msg_free_nocharge(sk, &msg_redir);
814 msg->sg.size = 0;
815 }
816 if (msg->sg.size == 0)
817 tls_free_open_rec(sk);
818 break;
819 case __SK_DROP:
820 default:
821 sk_msg_free_partial(sk, msg, send);
822 if (msg->apply_bytes < send)
823 msg->apply_bytes = 0;
824 else
825 msg->apply_bytes -= send;
826 if (msg->sg.size == 0)
827 tls_free_open_rec(sk);
828 *copied -= (send + delta);
829 err = -EACCES;
830 }
831
832 if (likely(!err)) {
833 bool reset_eval = !ctx->open_rec;
834
835 rec = ctx->open_rec;
836 if (rec) {
837 msg = &rec->msg_plaintext;
838 if (!msg->apply_bytes)
839 reset_eval = true;
840 }
841 if (reset_eval) {
842 psock->eval = __SK_NONE;
843 if (psock->sk_redir) {
844 sock_put(psock->sk_redir);
845 psock->sk_redir = NULL;
846 }
847 }
848 if (rec)
849 goto more_data;
850 }
851 out_err:
852 sk_psock_put(sk, psock);
853 return err;
854 }
855
tls_sw_push_pending_record(struct sock * sk,int flags)856 static int tls_sw_push_pending_record(struct sock *sk, int flags)
857 {
858 struct tls_context *tls_ctx = tls_get_ctx(sk);
859 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
860 struct tls_rec *rec = ctx->open_rec;
861 struct sk_msg *msg_pl;
862 size_t copied;
863
864 if (!rec)
865 return 0;
866
867 msg_pl = &rec->msg_plaintext;
868 copied = msg_pl->sg.size;
869 if (!copied)
870 return 0;
871
872 return bpf_exec_tx_verdict(msg_pl, sk, true, TLS_RECORD_TYPE_DATA,
873 &copied, flags);
874 }
875
tls_sw_sendmsg(struct sock * sk,struct msghdr * msg,size_t size)876 int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
877 {
878 long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
879 struct tls_context *tls_ctx = tls_get_ctx(sk);
880 struct tls_prot_info *prot = &tls_ctx->prot_info;
881 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
882 bool async_capable = ctx->async_capable;
883 unsigned char record_type = TLS_RECORD_TYPE_DATA;
884 bool is_kvec = iov_iter_is_kvec(&msg->msg_iter);
885 bool eor = !(msg->msg_flags & MSG_MORE);
886 size_t try_to_copy, copied = 0;
887 struct sk_msg *msg_pl, *msg_en;
888 struct tls_rec *rec;
889 int required_size;
890 int num_async = 0;
891 bool full_record;
892 int record_room;
893 int num_zc = 0;
894 int orig_size;
895 int ret = 0;
896
897 if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL))
898 return -ENOTSUPP;
899
900 mutex_lock(&tls_ctx->tx_lock);
901 lock_sock(sk);
902
903 if (unlikely(msg->msg_controllen)) {
904 ret = tls_proccess_cmsg(sk, msg, &record_type);
905 if (ret) {
906 if (ret == -EINPROGRESS)
907 num_async++;
908 else if (ret != -EAGAIN)
909 goto send_end;
910 }
911 }
912
913 while (msg_data_left(msg)) {
914 if (sk->sk_err) {
915 ret = -sk->sk_err;
916 goto send_end;
917 }
918
919 if (ctx->open_rec)
920 rec = ctx->open_rec;
921 else
922 rec = ctx->open_rec = tls_get_rec(sk);
923 if (!rec) {
924 ret = -ENOMEM;
925 goto send_end;
926 }
927
928 msg_pl = &rec->msg_plaintext;
929 msg_en = &rec->msg_encrypted;
930
931 orig_size = msg_pl->sg.size;
932 full_record = false;
933 try_to_copy = msg_data_left(msg);
934 record_room = TLS_MAX_PAYLOAD_SIZE - msg_pl->sg.size;
935 if (try_to_copy >= record_room) {
936 try_to_copy = record_room;
937 full_record = true;
938 }
939
940 required_size = msg_pl->sg.size + try_to_copy +
941 prot->overhead_size;
942
943 if (!sk_stream_memory_free(sk))
944 goto wait_for_sndbuf;
945
946 alloc_encrypted:
947 ret = tls_alloc_encrypted_msg(sk, required_size);
948 if (ret) {
949 if (ret != -ENOSPC)
950 goto wait_for_memory;
951
952 /* Adjust try_to_copy according to the amount that was
953 * actually allocated. The difference is due
954 * to max sg elements limit
955 */
956 try_to_copy -= required_size - msg_en->sg.size;
957 full_record = true;
958 }
959
960 if (!is_kvec && (full_record || eor) && !async_capable) {
961 u32 first = msg_pl->sg.end;
962
963 ret = sk_msg_zerocopy_from_iter(sk, &msg->msg_iter,
964 msg_pl, try_to_copy);
965 if (ret)
966 goto fallback_to_reg_send;
967
968 rec->inplace_crypto = 0;
969
970 num_zc++;
971 copied += try_to_copy;
972
973 sk_msg_sg_copy_set(msg_pl, first);
974 ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
975 record_type, &copied,
976 msg->msg_flags);
977 if (ret) {
978 if (ret == -EINPROGRESS)
979 num_async++;
980 else if (ret == -ENOMEM)
981 goto wait_for_memory;
982 else if (ret == -ENOSPC)
983 goto rollback_iter;
984 else if (ret != -EAGAIN)
985 goto send_end;
986 }
987 continue;
988 rollback_iter:
989 copied -= try_to_copy;
990 sk_msg_sg_copy_clear(msg_pl, first);
991 iov_iter_revert(&msg->msg_iter,
992 msg_pl->sg.size - orig_size);
993 fallback_to_reg_send:
994 sk_msg_trim(sk, msg_pl, orig_size);
995 }
996
997 required_size = msg_pl->sg.size + try_to_copy;
998
999 ret = tls_clone_plaintext_msg(sk, required_size);
1000 if (ret) {
1001 if (ret != -ENOSPC)
1002 goto send_end;
1003
1004 /* Adjust try_to_copy according to the amount that was
1005 * actually allocated. The difference is due
1006 * to max sg elements limit
1007 */
1008 try_to_copy -= required_size - msg_pl->sg.size;
1009 full_record = true;
1010 sk_msg_trim(sk, msg_en,
1011 msg_pl->sg.size + prot->overhead_size);
1012 }
1013
1014 if (try_to_copy) {
1015 ret = sk_msg_memcopy_from_iter(sk, &msg->msg_iter,
1016 msg_pl, try_to_copy);
1017 if (ret < 0)
1018 goto trim_sgl;
1019 }
1020
1021 /* Open records defined only if successfully copied, otherwise
1022 * we would trim the sg but not reset the open record frags.
1023 */
1024 tls_ctx->pending_open_record_frags = true;
1025 copied += try_to_copy;
1026 if (full_record || eor) {
1027 ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
1028 record_type, &copied,
1029 msg->msg_flags);
1030 if (ret) {
1031 if (ret == -EINPROGRESS)
1032 num_async++;
1033 else if (ret == -ENOMEM)
1034 goto wait_for_memory;
1035 else if (ret != -EAGAIN) {
1036 if (ret == -ENOSPC)
1037 ret = 0;
1038 goto send_end;
1039 }
1040 }
1041 }
1042
1043 continue;
1044
1045 wait_for_sndbuf:
1046 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1047 wait_for_memory:
1048 ret = sk_stream_wait_memory(sk, &timeo);
1049 if (ret) {
1050 trim_sgl:
1051 tls_trim_both_msgs(sk, orig_size);
1052 goto send_end;
1053 }
1054
1055 if (msg_en->sg.size < required_size)
1056 goto alloc_encrypted;
1057 }
1058
1059 if (!num_async) {
1060 goto send_end;
1061 } else if (num_zc) {
1062 /* Wait for pending encryptions to get completed */
1063 smp_store_mb(ctx->async_notify, true);
1064
1065 if (atomic_read(&ctx->encrypt_pending))
1066 crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
1067 else
1068 reinit_completion(&ctx->async_wait.completion);
1069
1070 WRITE_ONCE(ctx->async_notify, false);
1071
1072 if (ctx->async_wait.err) {
1073 ret = ctx->async_wait.err;
1074 copied = 0;
1075 }
1076 }
1077
1078 /* Transmit if any encryptions have completed */
1079 if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
1080 cancel_delayed_work(&ctx->tx_work.work);
1081 tls_tx_records(sk, msg->msg_flags);
1082 }
1083
1084 send_end:
1085 ret = sk_stream_error(sk, msg->msg_flags, ret);
1086
1087 release_sock(sk);
1088 mutex_unlock(&tls_ctx->tx_lock);
1089 return copied ? copied : ret;
1090 }
1091
tls_sw_do_sendpage(struct sock * sk,struct page * page,int offset,size_t size,int flags)1092 static int tls_sw_do_sendpage(struct sock *sk, struct page *page,
1093 int offset, size_t size, int flags)
1094 {
1095 long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
1096 struct tls_context *tls_ctx = tls_get_ctx(sk);
1097 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
1098 struct tls_prot_info *prot = &tls_ctx->prot_info;
1099 unsigned char record_type = TLS_RECORD_TYPE_DATA;
1100 struct sk_msg *msg_pl;
1101 struct tls_rec *rec;
1102 int num_async = 0;
1103 size_t copied = 0;
1104 bool full_record;
1105 int record_room;
1106 int ret = 0;
1107 bool eor;
1108
1109 eor = !(flags & (MSG_MORE | MSG_SENDPAGE_NOTLAST));
1110 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
1111
1112 /* Call the sk_stream functions to manage the sndbuf mem. */
1113 while (size > 0) {
1114 size_t copy, required_size;
1115
1116 if (sk->sk_err) {
1117 ret = -sk->sk_err;
1118 goto sendpage_end;
1119 }
1120
1121 if (ctx->open_rec)
1122 rec = ctx->open_rec;
1123 else
1124 rec = ctx->open_rec = tls_get_rec(sk);
1125 if (!rec) {
1126 ret = -ENOMEM;
1127 goto sendpage_end;
1128 }
1129
1130 msg_pl = &rec->msg_plaintext;
1131
1132 full_record = false;
1133 record_room = TLS_MAX_PAYLOAD_SIZE - msg_pl->sg.size;
1134 copy = size;
1135 if (copy >= record_room) {
1136 copy = record_room;
1137 full_record = true;
1138 }
1139
1140 required_size = msg_pl->sg.size + copy + prot->overhead_size;
1141
1142 if (!sk_stream_memory_free(sk))
1143 goto wait_for_sndbuf;
1144 alloc_payload:
1145 ret = tls_alloc_encrypted_msg(sk, required_size);
1146 if (ret) {
1147 if (ret != -ENOSPC)
1148 goto wait_for_memory;
1149
1150 /* Adjust copy according to the amount that was
1151 * actually allocated. The difference is due
1152 * to max sg elements limit
1153 */
1154 copy -= required_size - msg_pl->sg.size;
1155 full_record = true;
1156 }
1157
1158 sk_msg_page_add(msg_pl, page, copy, offset);
1159 sk_mem_charge(sk, copy);
1160
1161 offset += copy;
1162 size -= copy;
1163 copied += copy;
1164
1165 tls_ctx->pending_open_record_frags = true;
1166 if (full_record || eor || sk_msg_full(msg_pl)) {
1167 rec->inplace_crypto = 0;
1168 ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
1169 record_type, &copied, flags);
1170 if (ret) {
1171 if (ret == -EINPROGRESS)
1172 num_async++;
1173 else if (ret == -ENOMEM)
1174 goto wait_for_memory;
1175 else if (ret != -EAGAIN) {
1176 if (ret == -ENOSPC)
1177 ret = 0;
1178 goto sendpage_end;
1179 }
1180 }
1181 }
1182 continue;
1183 wait_for_sndbuf:
1184 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1185 wait_for_memory:
1186 ret = sk_stream_wait_memory(sk, &timeo);
1187 if (ret) {
1188 tls_trim_both_msgs(sk, msg_pl->sg.size);
1189 goto sendpage_end;
1190 }
1191
1192 goto alloc_payload;
1193 }
1194
1195 if (num_async) {
1196 /* Transmit if any encryptions have completed */
1197 if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
1198 cancel_delayed_work(&ctx->tx_work.work);
1199 tls_tx_records(sk, flags);
1200 }
1201 }
1202 sendpage_end:
1203 ret = sk_stream_error(sk, flags, ret);
1204 return copied ? copied : ret;
1205 }
1206
tls_sw_sendpage_locked(struct sock * sk,struct page * page,int offset,size_t size,int flags)1207 int tls_sw_sendpage_locked(struct sock *sk, struct page *page,
1208 int offset, size_t size, int flags)
1209 {
1210 if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
1211 MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY |
1212 MSG_NO_SHARED_FRAGS))
1213 return -ENOTSUPP;
1214
1215 return tls_sw_do_sendpage(sk, page, offset, size, flags);
1216 }
1217
tls_sw_sendpage(struct sock * sk,struct page * page,int offset,size_t size,int flags)1218 int tls_sw_sendpage(struct sock *sk, struct page *page,
1219 int offset, size_t size, int flags)
1220 {
1221 struct tls_context *tls_ctx = tls_get_ctx(sk);
1222 int ret;
1223
1224 if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
1225 MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY))
1226 return -ENOTSUPP;
1227
1228 mutex_lock(&tls_ctx->tx_lock);
1229 lock_sock(sk);
1230 ret = tls_sw_do_sendpage(sk, page, offset, size, flags);
1231 release_sock(sk);
1232 mutex_unlock(&tls_ctx->tx_lock);
1233 return ret;
1234 }
1235
tls_wait_data(struct sock * sk,struct sk_psock * psock,int flags,long timeo,int * err)1236 static struct sk_buff *tls_wait_data(struct sock *sk, struct sk_psock *psock,
1237 int flags, long timeo, int *err)
1238 {
1239 struct tls_context *tls_ctx = tls_get_ctx(sk);
1240 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1241 struct sk_buff *skb;
1242 DEFINE_WAIT_FUNC(wait, woken_wake_function);
1243
1244 while (!(skb = ctx->recv_pkt) && sk_psock_queue_empty(psock)) {
1245 if (sk->sk_err) {
1246 *err = sock_error(sk);
1247 return NULL;
1248 }
1249
1250 if (sk->sk_shutdown & RCV_SHUTDOWN)
1251 return NULL;
1252
1253 if (sock_flag(sk, SOCK_DONE))
1254 return NULL;
1255
1256 if ((flags & MSG_DONTWAIT) || !timeo) {
1257 *err = -EAGAIN;
1258 return NULL;
1259 }
1260
1261 add_wait_queue(sk_sleep(sk), &wait);
1262 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
1263 sk_wait_event(sk, &timeo,
1264 ctx->recv_pkt != skb ||
1265 !sk_psock_queue_empty(psock),
1266 &wait);
1267 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
1268 remove_wait_queue(sk_sleep(sk), &wait);
1269
1270 /* Handle signals */
1271 if (signal_pending(current)) {
1272 *err = sock_intr_errno(timeo);
1273 return NULL;
1274 }
1275 }
1276
1277 return skb;
1278 }
1279
tls_setup_from_iter(struct sock * sk,struct iov_iter * from,int length,int * pages_used,unsigned int * size_used,struct scatterlist * to,int to_max_pages)1280 static int tls_setup_from_iter(struct sock *sk, struct iov_iter *from,
1281 int length, int *pages_used,
1282 unsigned int *size_used,
1283 struct scatterlist *to,
1284 int to_max_pages)
1285 {
1286 int rc = 0, i = 0, num_elem = *pages_used, maxpages;
1287 struct page *pages[MAX_SKB_FRAGS];
1288 unsigned int size = *size_used;
1289 ssize_t copied, use;
1290 size_t offset;
1291
1292 while (length > 0) {
1293 i = 0;
1294 maxpages = to_max_pages - num_elem;
1295 if (maxpages == 0) {
1296 rc = -EFAULT;
1297 goto out;
1298 }
1299 copied = iov_iter_get_pages(from, pages,
1300 length,
1301 maxpages, &offset);
1302 if (copied <= 0) {
1303 rc = -EFAULT;
1304 goto out;
1305 }
1306
1307 iov_iter_advance(from, copied);
1308
1309 length -= copied;
1310 size += copied;
1311 while (copied) {
1312 use = min_t(int, copied, PAGE_SIZE - offset);
1313
1314 sg_set_page(&to[num_elem],
1315 pages[i], use, offset);
1316 sg_unmark_end(&to[num_elem]);
1317 /* We do not uncharge memory from this API */
1318
1319 offset = 0;
1320 copied -= use;
1321
1322 i++;
1323 num_elem++;
1324 }
1325 }
1326 /* Mark the end in the last sg entry if newly added */
1327 if (num_elem > *pages_used)
1328 sg_mark_end(&to[num_elem - 1]);
1329 out:
1330 if (rc)
1331 iov_iter_revert(from, size - *size_used);
1332 *size_used = size;
1333 *pages_used = num_elem;
1334
1335 return rc;
1336 }
1337
1338 /* This function decrypts the input skb into either out_iov or in out_sg
1339 * or in skb buffers itself. The input parameter 'zc' indicates if
1340 * zero-copy mode needs to be tried or not. With zero-copy mode, either
1341 * out_iov or out_sg must be non-NULL. In case both out_iov and out_sg are
1342 * NULL, then the decryption happens inside skb buffers itself, i.e.
1343 * zero-copy gets disabled and 'zc' is updated.
1344 */
1345
decrypt_internal(struct sock * sk,struct sk_buff * skb,struct iov_iter * out_iov,struct scatterlist * out_sg,int * chunk,bool * zc,bool async)1346 static int decrypt_internal(struct sock *sk, struct sk_buff *skb,
1347 struct iov_iter *out_iov,
1348 struct scatterlist *out_sg,
1349 int *chunk, bool *zc, bool async)
1350 {
1351 struct tls_context *tls_ctx = tls_get_ctx(sk);
1352 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1353 struct tls_prot_info *prot = &tls_ctx->prot_info;
1354 struct strp_msg *rxm = strp_msg(skb);
1355 int n_sgin, n_sgout, nsg, mem_size, aead_size, err, pages = 0;
1356 struct aead_request *aead_req;
1357 struct sk_buff *unused;
1358 u8 *aad, *iv, *mem = NULL;
1359 struct scatterlist *sgin = NULL;
1360 struct scatterlist *sgout = NULL;
1361 const int data_len = rxm->full_len - prot->overhead_size +
1362 prot->tail_size;
1363 int iv_offset = 0;
1364
1365 if (*zc && (out_iov || out_sg)) {
1366 if (out_iov)
1367 n_sgout = iov_iter_npages(out_iov, INT_MAX) + 1;
1368 else
1369 n_sgout = sg_nents(out_sg);
1370 n_sgin = skb_nsg(skb, rxm->offset + prot->prepend_size,
1371 rxm->full_len - prot->prepend_size);
1372 } else {
1373 n_sgout = 0;
1374 *zc = false;
1375 n_sgin = skb_cow_data(skb, 0, &unused);
1376 }
1377
1378 if (n_sgin < 1)
1379 return -EBADMSG;
1380
1381 /* Increment to accommodate AAD */
1382 n_sgin = n_sgin + 1;
1383
1384 nsg = n_sgin + n_sgout;
1385
1386 aead_size = sizeof(*aead_req) + crypto_aead_reqsize(ctx->aead_recv);
1387 mem_size = aead_size + (nsg * sizeof(struct scatterlist));
1388 mem_size = mem_size + prot->aad_size;
1389 mem_size = mem_size + crypto_aead_ivsize(ctx->aead_recv);
1390
1391 /* Allocate a single block of memory which contains
1392 * aead_req || sgin[] || sgout[] || aad || iv.
1393 * This order achieves correct alignment for aead_req, sgin, sgout.
1394 */
1395 mem = kmalloc(mem_size, sk->sk_allocation);
1396 if (!mem)
1397 return -ENOMEM;
1398
1399 /* Segment the allocated memory */
1400 aead_req = (struct aead_request *)mem;
1401 sgin = (struct scatterlist *)(mem + aead_size);
1402 sgout = sgin + n_sgin;
1403 aad = (u8 *)(sgout + n_sgout);
1404 iv = aad + prot->aad_size;
1405
1406 /* For CCM based ciphers, first byte of nonce+iv is always '2' */
1407 if (prot->cipher_type == TLS_CIPHER_AES_CCM_128) {
1408 iv[0] = 2;
1409 iv_offset = 1;
1410 }
1411
1412 /* Prepare IV */
1413 err = skb_copy_bits(skb, rxm->offset + TLS_HEADER_SIZE,
1414 iv + iv_offset + prot->salt_size,
1415 prot->iv_size);
1416 if (err < 0) {
1417 kfree(mem);
1418 return err;
1419 }
1420 if (prot->version == TLS_1_3_VERSION)
1421 memcpy(iv + iv_offset, tls_ctx->rx.iv,
1422 crypto_aead_ivsize(ctx->aead_recv));
1423 else
1424 memcpy(iv + iv_offset, tls_ctx->rx.iv, prot->salt_size);
1425
1426 xor_iv_with_seq(prot->version, iv, tls_ctx->rx.rec_seq);
1427
1428 /* Prepare AAD */
1429 tls_make_aad(aad, rxm->full_len - prot->overhead_size +
1430 prot->tail_size,
1431 tls_ctx->rx.rec_seq, prot->rec_seq_size,
1432 ctx->control, prot->version);
1433
1434 /* Prepare sgin */
1435 sg_init_table(sgin, n_sgin);
1436 sg_set_buf(&sgin[0], aad, prot->aad_size);
1437 err = skb_to_sgvec(skb, &sgin[1],
1438 rxm->offset + prot->prepend_size,
1439 rxm->full_len - prot->prepend_size);
1440 if (err < 0) {
1441 kfree(mem);
1442 return err;
1443 }
1444
1445 if (n_sgout) {
1446 if (out_iov) {
1447 sg_init_table(sgout, n_sgout);
1448 sg_set_buf(&sgout[0], aad, prot->aad_size);
1449
1450 *chunk = 0;
1451 err = tls_setup_from_iter(sk, out_iov, data_len,
1452 &pages, chunk, &sgout[1],
1453 (n_sgout - 1));
1454 if (err < 0)
1455 goto fallback_to_reg_recv;
1456 } else if (out_sg) {
1457 memcpy(sgout, out_sg, n_sgout * sizeof(*sgout));
1458 } else {
1459 goto fallback_to_reg_recv;
1460 }
1461 } else {
1462 fallback_to_reg_recv:
1463 sgout = sgin;
1464 pages = 0;
1465 *chunk = data_len;
1466 *zc = false;
1467 }
1468
1469 /* Prepare and submit AEAD request */
1470 err = tls_do_decryption(sk, skb, sgin, sgout, iv,
1471 data_len, aead_req, async);
1472 if (err == -EINPROGRESS)
1473 return err;
1474
1475 /* Release the pages in case iov was mapped to pages */
1476 for (; pages > 0; pages--)
1477 put_page(sg_page(&sgout[pages]));
1478
1479 kfree(mem);
1480 return err;
1481 }
1482
decrypt_skb_update(struct sock * sk,struct sk_buff * skb,struct iov_iter * dest,int * chunk,bool * zc,bool async)1483 static int decrypt_skb_update(struct sock *sk, struct sk_buff *skb,
1484 struct iov_iter *dest, int *chunk, bool *zc,
1485 bool async)
1486 {
1487 struct tls_context *tls_ctx = tls_get_ctx(sk);
1488 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1489 struct tls_prot_info *prot = &tls_ctx->prot_info;
1490 struct strp_msg *rxm = strp_msg(skb);
1491 int pad, err = 0;
1492
1493 if (!ctx->decrypted) {
1494 if (tls_ctx->rx_conf == TLS_HW) {
1495 err = tls_device_decrypted(sk, skb);
1496 if (err < 0)
1497 return err;
1498 }
1499
1500 /* Still not decrypted after tls_device */
1501 if (!ctx->decrypted) {
1502 err = decrypt_internal(sk, skb, dest, NULL, chunk, zc,
1503 async);
1504 if (err < 0) {
1505 if (err == -EINPROGRESS)
1506 tls_advance_record_sn(sk, prot,
1507 &tls_ctx->rx);
1508
1509 return err;
1510 }
1511 } else {
1512 *zc = false;
1513 }
1514
1515 pad = padding_length(ctx, prot, skb);
1516 if (pad < 0)
1517 return pad;
1518
1519 rxm->full_len -= pad;
1520 rxm->offset += prot->prepend_size;
1521 rxm->full_len -= prot->overhead_size;
1522 tls_advance_record_sn(sk, prot, &tls_ctx->rx);
1523 ctx->decrypted = true;
1524 ctx->saved_data_ready(sk);
1525 } else {
1526 *zc = false;
1527 }
1528
1529 return err;
1530 }
1531
decrypt_skb(struct sock * sk,struct sk_buff * skb,struct scatterlist * sgout)1532 int decrypt_skb(struct sock *sk, struct sk_buff *skb,
1533 struct scatterlist *sgout)
1534 {
1535 bool zc = true;
1536 int chunk;
1537
1538 return decrypt_internal(sk, skb, NULL, sgout, &chunk, &zc, false);
1539 }
1540
tls_sw_advance_skb(struct sock * sk,struct sk_buff * skb,unsigned int len)1541 static bool tls_sw_advance_skb(struct sock *sk, struct sk_buff *skb,
1542 unsigned int len)
1543 {
1544 struct tls_context *tls_ctx = tls_get_ctx(sk);
1545 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1546
1547 if (skb) {
1548 struct strp_msg *rxm = strp_msg(skb);
1549
1550 if (len < rxm->full_len) {
1551 rxm->offset += len;
1552 rxm->full_len -= len;
1553 return false;
1554 }
1555 consume_skb(skb);
1556 }
1557
1558 /* Finished with message */
1559 ctx->recv_pkt = NULL;
1560 __strp_unpause(&ctx->strp);
1561
1562 return true;
1563 }
1564
1565 /* This function traverses the rx_list in tls receive context to copies the
1566 * decrypted records into the buffer provided by caller zero copy is not
1567 * true. Further, the records are removed from the rx_list if it is not a peek
1568 * case and the record has been consumed completely.
1569 */
process_rx_list(struct tls_sw_context_rx * ctx,struct msghdr * msg,u8 * control,bool * cmsg,size_t skip,size_t len,bool zc,bool is_peek)1570 static int process_rx_list(struct tls_sw_context_rx *ctx,
1571 struct msghdr *msg,
1572 u8 *control,
1573 bool *cmsg,
1574 size_t skip,
1575 size_t len,
1576 bool zc,
1577 bool is_peek)
1578 {
1579 struct sk_buff *skb = skb_peek(&ctx->rx_list);
1580 u8 ctrl = *control;
1581 u8 msgc = *cmsg;
1582 struct tls_msg *tlm;
1583 ssize_t copied = 0;
1584
1585 /* Set the record type in 'control' if caller didn't pass it */
1586 if (!ctrl && skb) {
1587 tlm = tls_msg(skb);
1588 ctrl = tlm->control;
1589 }
1590
1591 while (skip && skb) {
1592 struct strp_msg *rxm = strp_msg(skb);
1593 tlm = tls_msg(skb);
1594
1595 /* Cannot process a record of different type */
1596 if (ctrl != tlm->control)
1597 return 0;
1598
1599 if (skip < rxm->full_len)
1600 break;
1601
1602 skip = skip - rxm->full_len;
1603 skb = skb_peek_next(skb, &ctx->rx_list);
1604 }
1605
1606 while (len && skb) {
1607 struct sk_buff *next_skb;
1608 struct strp_msg *rxm = strp_msg(skb);
1609 int chunk = min_t(unsigned int, rxm->full_len - skip, len);
1610
1611 tlm = tls_msg(skb);
1612
1613 /* Cannot process a record of different type */
1614 if (ctrl != tlm->control)
1615 return 0;
1616
1617 /* Set record type if not already done. For a non-data record,
1618 * do not proceed if record type could not be copied.
1619 */
1620 if (!msgc) {
1621 int cerr = put_cmsg(msg, SOL_TLS, TLS_GET_RECORD_TYPE,
1622 sizeof(ctrl), &ctrl);
1623 msgc = true;
1624 if (ctrl != TLS_RECORD_TYPE_DATA) {
1625 if (cerr || msg->msg_flags & MSG_CTRUNC)
1626 return -EIO;
1627
1628 *cmsg = msgc;
1629 }
1630 }
1631
1632 if (!zc || (rxm->full_len - skip) > len) {
1633 int err = skb_copy_datagram_msg(skb, rxm->offset + skip,
1634 msg, chunk);
1635 if (err < 0)
1636 return err;
1637 }
1638
1639 len = len - chunk;
1640 copied = copied + chunk;
1641
1642 /* Consume the data from record if it is non-peek case*/
1643 if (!is_peek) {
1644 rxm->offset = rxm->offset + chunk;
1645 rxm->full_len = rxm->full_len - chunk;
1646
1647 /* Return if there is unconsumed data in the record */
1648 if (rxm->full_len - skip)
1649 break;
1650 }
1651
1652 /* The remaining skip-bytes must lie in 1st record in rx_list.
1653 * So from the 2nd record, 'skip' should be 0.
1654 */
1655 skip = 0;
1656
1657 if (msg)
1658 msg->msg_flags |= MSG_EOR;
1659
1660 next_skb = skb_peek_next(skb, &ctx->rx_list);
1661
1662 if (!is_peek) {
1663 skb_unlink(skb, &ctx->rx_list);
1664 consume_skb(skb);
1665 }
1666
1667 skb = next_skb;
1668 }
1669
1670 *control = ctrl;
1671 return copied;
1672 }
1673
tls_sw_recvmsg(struct sock * sk,struct msghdr * msg,size_t len,int nonblock,int flags,int * addr_len)1674 int tls_sw_recvmsg(struct sock *sk,
1675 struct msghdr *msg,
1676 size_t len,
1677 int nonblock,
1678 int flags,
1679 int *addr_len)
1680 {
1681 struct tls_context *tls_ctx = tls_get_ctx(sk);
1682 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1683 struct tls_prot_info *prot = &tls_ctx->prot_info;
1684 struct sk_psock *psock;
1685 unsigned char control = 0;
1686 ssize_t decrypted = 0;
1687 struct strp_msg *rxm;
1688 struct tls_msg *tlm;
1689 struct sk_buff *skb;
1690 ssize_t copied = 0;
1691 bool cmsg = false;
1692 int target, err = 0;
1693 long timeo;
1694 bool is_kvec = iov_iter_is_kvec(&msg->msg_iter);
1695 bool is_peek = flags & MSG_PEEK;
1696 int num_async = 0;
1697
1698 flags |= nonblock;
1699
1700 if (unlikely(flags & MSG_ERRQUEUE))
1701 return sock_recv_errqueue(sk, msg, len, SOL_IP, IP_RECVERR);
1702
1703 psock = sk_psock_get(sk);
1704 lock_sock(sk);
1705
1706 /* Process pending decrypted records. It must be non-zero-copy */
1707 err = process_rx_list(ctx, msg, &control, &cmsg, 0, len, false,
1708 is_peek);
1709 if (err < 0) {
1710 tls_err_abort(sk, err);
1711 goto end;
1712 } else {
1713 copied = err;
1714 }
1715
1716 if (len <= copied)
1717 goto recv_end;
1718
1719 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
1720 len = len - copied;
1721 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1722
1723 while (len && (decrypted + copied < target || ctx->recv_pkt)) {
1724 bool retain_skb = false;
1725 bool zc = false;
1726 int to_decrypt;
1727 int chunk = 0;
1728 bool async_capable;
1729 bool async = false;
1730
1731 skb = tls_wait_data(sk, psock, flags, timeo, &err);
1732 if (!skb) {
1733 if (psock) {
1734 int ret = __tcp_bpf_recvmsg(sk, psock,
1735 msg, len, flags);
1736
1737 if (ret > 0) {
1738 decrypted += ret;
1739 len -= ret;
1740 continue;
1741 }
1742 }
1743 goto recv_end;
1744 } else {
1745 tlm = tls_msg(skb);
1746 if (prot->version == TLS_1_3_VERSION)
1747 tlm->control = 0;
1748 else
1749 tlm->control = ctx->control;
1750 }
1751
1752 rxm = strp_msg(skb);
1753
1754 to_decrypt = rxm->full_len - prot->overhead_size;
1755
1756 if (to_decrypt <= len && !is_kvec && !is_peek &&
1757 ctx->control == TLS_RECORD_TYPE_DATA &&
1758 prot->version != TLS_1_3_VERSION)
1759 zc = true;
1760
1761 /* Do not use async mode if record is non-data */
1762 if (ctx->control == TLS_RECORD_TYPE_DATA)
1763 async_capable = ctx->async_capable;
1764 else
1765 async_capable = false;
1766
1767 err = decrypt_skb_update(sk, skb, &msg->msg_iter,
1768 &chunk, &zc, async_capable);
1769 if (err < 0 && err != -EINPROGRESS) {
1770 tls_err_abort(sk, EBADMSG);
1771 goto recv_end;
1772 }
1773
1774 if (err == -EINPROGRESS) {
1775 async = true;
1776 num_async++;
1777 } else if (prot->version == TLS_1_3_VERSION) {
1778 tlm->control = ctx->control;
1779 }
1780
1781 /* If the type of records being processed is not known yet,
1782 * set it to record type just dequeued. If it is already known,
1783 * but does not match the record type just dequeued, go to end.
1784 * We always get record type here since for tls1.2, record type
1785 * is known just after record is dequeued from stream parser.
1786 * For tls1.3, we disable async.
1787 */
1788
1789 if (!control)
1790 control = tlm->control;
1791 else if (control != tlm->control)
1792 goto recv_end;
1793
1794 if (!cmsg) {
1795 int cerr;
1796
1797 cerr = put_cmsg(msg, SOL_TLS, TLS_GET_RECORD_TYPE,
1798 sizeof(control), &control);
1799 cmsg = true;
1800 if (control != TLS_RECORD_TYPE_DATA) {
1801 if (cerr || msg->msg_flags & MSG_CTRUNC) {
1802 err = -EIO;
1803 goto recv_end;
1804 }
1805 }
1806 }
1807
1808 if (async)
1809 goto pick_next_record;
1810
1811 if (!zc) {
1812 if (rxm->full_len > len) {
1813 retain_skb = true;
1814 chunk = len;
1815 } else {
1816 chunk = rxm->full_len;
1817 }
1818
1819 err = skb_copy_datagram_msg(skb, rxm->offset,
1820 msg, chunk);
1821 if (err < 0)
1822 goto recv_end;
1823
1824 if (!is_peek) {
1825 rxm->offset = rxm->offset + chunk;
1826 rxm->full_len = rxm->full_len - chunk;
1827 }
1828 }
1829
1830 pick_next_record:
1831 if (chunk > len)
1832 chunk = len;
1833
1834 decrypted += chunk;
1835 len -= chunk;
1836
1837 /* For async or peek case, queue the current skb */
1838 if (async || is_peek || retain_skb) {
1839 skb_queue_tail(&ctx->rx_list, skb);
1840 skb = NULL;
1841 }
1842
1843 if (tls_sw_advance_skb(sk, skb, chunk)) {
1844 /* Return full control message to
1845 * userspace before trying to parse
1846 * another message type
1847 */
1848 msg->msg_flags |= MSG_EOR;
1849 if (ctx->control != TLS_RECORD_TYPE_DATA)
1850 goto recv_end;
1851 } else {
1852 break;
1853 }
1854 }
1855
1856 recv_end:
1857 if (num_async) {
1858 /* Wait for all previously submitted records to be decrypted */
1859 smp_store_mb(ctx->async_notify, true);
1860 if (atomic_read(&ctx->decrypt_pending)) {
1861 err = crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
1862 if (err) {
1863 /* one of async decrypt failed */
1864 tls_err_abort(sk, err);
1865 copied = 0;
1866 decrypted = 0;
1867 goto end;
1868 }
1869 } else {
1870 reinit_completion(&ctx->async_wait.completion);
1871 }
1872 WRITE_ONCE(ctx->async_notify, false);
1873
1874 /* Drain records from the rx_list & copy if required */
1875 if (is_peek || is_kvec)
1876 err = process_rx_list(ctx, msg, &control, &cmsg, copied,
1877 decrypted, false, is_peek);
1878 else
1879 err = process_rx_list(ctx, msg, &control, &cmsg, 0,
1880 decrypted, true, is_peek);
1881 if (err < 0) {
1882 tls_err_abort(sk, err);
1883 copied = 0;
1884 goto end;
1885 }
1886 }
1887
1888 copied += decrypted;
1889
1890 end:
1891 release_sock(sk);
1892 if (psock)
1893 sk_psock_put(sk, psock);
1894 return copied ? : err;
1895 }
1896
tls_sw_splice_read(struct socket * sock,loff_t * ppos,struct pipe_inode_info * pipe,size_t len,unsigned int flags)1897 ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
1898 struct pipe_inode_info *pipe,
1899 size_t len, unsigned int flags)
1900 {
1901 struct tls_context *tls_ctx = tls_get_ctx(sock->sk);
1902 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1903 struct strp_msg *rxm = NULL;
1904 struct sock *sk = sock->sk;
1905 struct sk_buff *skb;
1906 ssize_t copied = 0;
1907 int err = 0;
1908 long timeo;
1909 int chunk;
1910 bool zc = false;
1911
1912 lock_sock(sk);
1913
1914 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1915
1916 skb = tls_wait_data(sk, NULL, flags, timeo, &err);
1917 if (!skb)
1918 goto splice_read_end;
1919
1920 if (!ctx->decrypted) {
1921 err = decrypt_skb_update(sk, skb, NULL, &chunk, &zc, false);
1922
1923 /* splice does not support reading control messages */
1924 if (ctx->control != TLS_RECORD_TYPE_DATA) {
1925 err = -ENOTSUPP;
1926 goto splice_read_end;
1927 }
1928
1929 if (err < 0) {
1930 tls_err_abort(sk, EBADMSG);
1931 goto splice_read_end;
1932 }
1933 ctx->decrypted = true;
1934 }
1935 rxm = strp_msg(skb);
1936
1937 chunk = min_t(unsigned int, rxm->full_len, len);
1938 copied = skb_splice_bits(skb, sk, rxm->offset, pipe, chunk, flags);
1939 if (copied < 0)
1940 goto splice_read_end;
1941
1942 if (likely(!(flags & MSG_PEEK)))
1943 tls_sw_advance_skb(sk, skb, copied);
1944
1945 splice_read_end:
1946 release_sock(sk);
1947 return copied ? : err;
1948 }
1949
tls_sw_stream_read(const struct sock * sk)1950 bool tls_sw_stream_read(const struct sock *sk)
1951 {
1952 struct tls_context *tls_ctx = tls_get_ctx(sk);
1953 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1954 bool ingress_empty = true;
1955 struct sk_psock *psock;
1956
1957 rcu_read_lock();
1958 psock = sk_psock(sk);
1959 if (psock)
1960 ingress_empty = list_empty(&psock->ingress_msg);
1961 rcu_read_unlock();
1962
1963 return !ingress_empty || ctx->recv_pkt ||
1964 !skb_queue_empty(&ctx->rx_list);
1965 }
1966
tls_read_size(struct strparser * strp,struct sk_buff * skb)1967 static int tls_read_size(struct strparser *strp, struct sk_buff *skb)
1968 {
1969 struct tls_context *tls_ctx = tls_get_ctx(strp->sk);
1970 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1971 struct tls_prot_info *prot = &tls_ctx->prot_info;
1972 char header[TLS_HEADER_SIZE + MAX_IV_SIZE];
1973 struct strp_msg *rxm = strp_msg(skb);
1974 size_t cipher_overhead;
1975 size_t data_len = 0;
1976 int ret;
1977
1978 /* Verify that we have a full TLS header, or wait for more data */
1979 if (rxm->offset + prot->prepend_size > skb->len)
1980 return 0;
1981
1982 /* Sanity-check size of on-stack buffer. */
1983 if (WARN_ON(prot->prepend_size > sizeof(header))) {
1984 ret = -EINVAL;
1985 goto read_failure;
1986 }
1987
1988 /* Linearize header to local buffer */
1989 ret = skb_copy_bits(skb, rxm->offset, header, prot->prepend_size);
1990
1991 if (ret < 0)
1992 goto read_failure;
1993
1994 ctx->control = header[0];
1995
1996 data_len = ((header[4] & 0xFF) | (header[3] << 8));
1997
1998 cipher_overhead = prot->tag_size;
1999 if (prot->version != TLS_1_3_VERSION)
2000 cipher_overhead += prot->iv_size;
2001
2002 if (data_len > TLS_MAX_PAYLOAD_SIZE + cipher_overhead +
2003 prot->tail_size) {
2004 ret = -EMSGSIZE;
2005 goto read_failure;
2006 }
2007 if (data_len < cipher_overhead) {
2008 ret = -EBADMSG;
2009 goto read_failure;
2010 }
2011
2012 /* Note that both TLS1.3 and TLS1.2 use TLS_1_2 version here */
2013 if (header[1] != TLS_1_2_VERSION_MINOR ||
2014 header[2] != TLS_1_2_VERSION_MAJOR) {
2015 ret = -EINVAL;
2016 goto read_failure;
2017 }
2018
2019 tls_device_rx_resync_new_rec(strp->sk, data_len + TLS_HEADER_SIZE,
2020 TCP_SKB_CB(skb)->seq + rxm->offset);
2021 return data_len + TLS_HEADER_SIZE;
2022
2023 read_failure:
2024 tls_err_abort(strp->sk, ret);
2025
2026 return ret;
2027 }
2028
tls_queue(struct strparser * strp,struct sk_buff * skb)2029 static void tls_queue(struct strparser *strp, struct sk_buff *skb)
2030 {
2031 struct tls_context *tls_ctx = tls_get_ctx(strp->sk);
2032 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2033
2034 ctx->decrypted = false;
2035
2036 ctx->recv_pkt = skb;
2037 strp_pause(strp);
2038
2039 ctx->saved_data_ready(strp->sk);
2040 }
2041
tls_data_ready(struct sock * sk)2042 static void tls_data_ready(struct sock *sk)
2043 {
2044 struct tls_context *tls_ctx = tls_get_ctx(sk);
2045 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2046 struct sk_psock *psock;
2047
2048 strp_data_ready(&ctx->strp);
2049
2050 psock = sk_psock_get(sk);
2051 if (psock && !list_empty(&psock->ingress_msg)) {
2052 ctx->saved_data_ready(sk);
2053 sk_psock_put(sk, psock);
2054 }
2055 }
2056
tls_sw_cancel_work_tx(struct tls_context * tls_ctx)2057 void tls_sw_cancel_work_tx(struct tls_context *tls_ctx)
2058 {
2059 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
2060
2061 set_bit(BIT_TX_CLOSING, &ctx->tx_bitmask);
2062 set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask);
2063 cancel_delayed_work_sync(&ctx->tx_work.work);
2064 }
2065
tls_sw_release_resources_tx(struct sock * sk)2066 void tls_sw_release_resources_tx(struct sock *sk)
2067 {
2068 struct tls_context *tls_ctx = tls_get_ctx(sk);
2069 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
2070 struct tls_rec *rec, *tmp;
2071
2072 /* Wait for any pending async encryptions to complete */
2073 smp_store_mb(ctx->async_notify, true);
2074 if (atomic_read(&ctx->encrypt_pending))
2075 crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
2076
2077 tls_tx_records(sk, -1);
2078
2079 /* Free up un-sent records in tx_list. First, free
2080 * the partially sent record if any at head of tx_list.
2081 */
2082 if (tls_free_partial_record(sk, tls_ctx)) {
2083 rec = list_first_entry(&ctx->tx_list,
2084 struct tls_rec, list);
2085 list_del(&rec->list);
2086 sk_msg_free(sk, &rec->msg_plaintext);
2087 kfree(rec);
2088 }
2089
2090 list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) {
2091 list_del(&rec->list);
2092 sk_msg_free(sk, &rec->msg_encrypted);
2093 sk_msg_free(sk, &rec->msg_plaintext);
2094 kfree(rec);
2095 }
2096
2097 crypto_free_aead(ctx->aead_send);
2098 tls_free_open_rec(sk);
2099 }
2100
tls_sw_free_ctx_tx(struct tls_context * tls_ctx)2101 void tls_sw_free_ctx_tx(struct tls_context *tls_ctx)
2102 {
2103 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
2104
2105 kfree(ctx);
2106 }
2107
tls_sw_release_resources_rx(struct sock * sk)2108 void tls_sw_release_resources_rx(struct sock *sk)
2109 {
2110 struct tls_context *tls_ctx = tls_get_ctx(sk);
2111 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2112
2113 kfree(tls_ctx->rx.rec_seq);
2114 kfree(tls_ctx->rx.iv);
2115
2116 if (ctx->aead_recv) {
2117 kfree_skb(ctx->recv_pkt);
2118 ctx->recv_pkt = NULL;
2119 skb_queue_purge(&ctx->rx_list);
2120 crypto_free_aead(ctx->aead_recv);
2121 strp_stop(&ctx->strp);
2122 /* If tls_sw_strparser_arm() was not called (cleanup paths)
2123 * we still want to strp_stop(), but sk->sk_data_ready was
2124 * never swapped.
2125 */
2126 if (ctx->saved_data_ready) {
2127 write_lock_bh(&sk->sk_callback_lock);
2128 sk->sk_data_ready = ctx->saved_data_ready;
2129 write_unlock_bh(&sk->sk_callback_lock);
2130 }
2131 }
2132 }
2133
tls_sw_strparser_done(struct tls_context * tls_ctx)2134 void tls_sw_strparser_done(struct tls_context *tls_ctx)
2135 {
2136 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2137
2138 strp_done(&ctx->strp);
2139 }
2140
tls_sw_free_ctx_rx(struct tls_context * tls_ctx)2141 void tls_sw_free_ctx_rx(struct tls_context *tls_ctx)
2142 {
2143 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2144
2145 kfree(ctx);
2146 }
2147
tls_sw_free_resources_rx(struct sock * sk)2148 void tls_sw_free_resources_rx(struct sock *sk)
2149 {
2150 struct tls_context *tls_ctx = tls_get_ctx(sk);
2151
2152 tls_sw_release_resources_rx(sk);
2153 tls_sw_free_ctx_rx(tls_ctx);
2154 }
2155
2156 /* The work handler to transmitt the encrypted records in tx_list */
tx_work_handler(struct work_struct * work)2157 static void tx_work_handler(struct work_struct *work)
2158 {
2159 struct delayed_work *delayed_work = to_delayed_work(work);
2160 struct tx_work *tx_work = container_of(delayed_work,
2161 struct tx_work, work);
2162 struct sock *sk = tx_work->sk;
2163 struct tls_context *tls_ctx = tls_get_ctx(sk);
2164 struct tls_sw_context_tx *ctx;
2165
2166 if (unlikely(!tls_ctx))
2167 return;
2168
2169 ctx = tls_sw_ctx_tx(tls_ctx);
2170 if (test_bit(BIT_TX_CLOSING, &ctx->tx_bitmask))
2171 return;
2172
2173 if (!test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask))
2174 return;
2175 mutex_lock(&tls_ctx->tx_lock);
2176 lock_sock(sk);
2177 tls_tx_records(sk, -1);
2178 release_sock(sk);
2179 mutex_unlock(&tls_ctx->tx_lock);
2180 }
2181
tls_sw_write_space(struct sock * sk,struct tls_context * ctx)2182 void tls_sw_write_space(struct sock *sk, struct tls_context *ctx)
2183 {
2184 struct tls_sw_context_tx *tx_ctx = tls_sw_ctx_tx(ctx);
2185
2186 /* Schedule the transmission if tx list is ready */
2187 if (is_tx_ready(tx_ctx) &&
2188 !test_and_set_bit(BIT_TX_SCHEDULED, &tx_ctx->tx_bitmask))
2189 schedule_delayed_work(&tx_ctx->tx_work.work, 0);
2190 }
2191
tls_sw_strparser_arm(struct sock * sk,struct tls_context * tls_ctx)2192 void tls_sw_strparser_arm(struct sock *sk, struct tls_context *tls_ctx)
2193 {
2194 struct tls_sw_context_rx *rx_ctx = tls_sw_ctx_rx(tls_ctx);
2195
2196 write_lock_bh(&sk->sk_callback_lock);
2197 rx_ctx->saved_data_ready = sk->sk_data_ready;
2198 sk->sk_data_ready = tls_data_ready;
2199 write_unlock_bh(&sk->sk_callback_lock);
2200
2201 strp_check_rcv(&rx_ctx->strp);
2202 }
2203
tls_set_sw_offload(struct sock * sk,struct tls_context * ctx,int tx)2204 int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
2205 {
2206 struct tls_context *tls_ctx = tls_get_ctx(sk);
2207 struct tls_prot_info *prot = &tls_ctx->prot_info;
2208 struct tls_crypto_info *crypto_info;
2209 struct tls12_crypto_info_aes_gcm_128 *gcm_128_info;
2210 struct tls12_crypto_info_aes_gcm_256 *gcm_256_info;
2211 struct tls12_crypto_info_aes_ccm_128 *ccm_128_info;
2212 struct tls_sw_context_tx *sw_ctx_tx = NULL;
2213 struct tls_sw_context_rx *sw_ctx_rx = NULL;
2214 struct cipher_context *cctx;
2215 struct crypto_aead **aead;
2216 struct strp_callbacks cb;
2217 u16 nonce_size, tag_size, iv_size, rec_seq_size, salt_size;
2218 struct crypto_tfm *tfm;
2219 char *iv, *rec_seq, *key, *salt, *cipher_name;
2220 size_t keysize;
2221 int rc = 0;
2222
2223 if (!ctx) {
2224 rc = -EINVAL;
2225 goto out;
2226 }
2227
2228 if (tx) {
2229 if (!ctx->priv_ctx_tx) {
2230 sw_ctx_tx = kzalloc(sizeof(*sw_ctx_tx), GFP_KERNEL);
2231 if (!sw_ctx_tx) {
2232 rc = -ENOMEM;
2233 goto out;
2234 }
2235 ctx->priv_ctx_tx = sw_ctx_tx;
2236 } else {
2237 sw_ctx_tx =
2238 (struct tls_sw_context_tx *)ctx->priv_ctx_tx;
2239 }
2240 } else {
2241 if (!ctx->priv_ctx_rx) {
2242 sw_ctx_rx = kzalloc(sizeof(*sw_ctx_rx), GFP_KERNEL);
2243 if (!sw_ctx_rx) {
2244 rc = -ENOMEM;
2245 goto out;
2246 }
2247 ctx->priv_ctx_rx = sw_ctx_rx;
2248 } else {
2249 sw_ctx_rx =
2250 (struct tls_sw_context_rx *)ctx->priv_ctx_rx;
2251 }
2252 }
2253
2254 if (tx) {
2255 crypto_init_wait(&sw_ctx_tx->async_wait);
2256 crypto_info = &ctx->crypto_send.info;
2257 cctx = &ctx->tx;
2258 aead = &sw_ctx_tx->aead_send;
2259 INIT_LIST_HEAD(&sw_ctx_tx->tx_list);
2260 INIT_DELAYED_WORK(&sw_ctx_tx->tx_work.work, tx_work_handler);
2261 sw_ctx_tx->tx_work.sk = sk;
2262 } else {
2263 crypto_init_wait(&sw_ctx_rx->async_wait);
2264 crypto_info = &ctx->crypto_recv.info;
2265 cctx = &ctx->rx;
2266 skb_queue_head_init(&sw_ctx_rx->rx_list);
2267 aead = &sw_ctx_rx->aead_recv;
2268 }
2269
2270 switch (crypto_info->cipher_type) {
2271 case TLS_CIPHER_AES_GCM_128: {
2272 nonce_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
2273 tag_size = TLS_CIPHER_AES_GCM_128_TAG_SIZE;
2274 iv_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
2275 iv = ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->iv;
2276 rec_seq_size = TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE;
2277 rec_seq =
2278 ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->rec_seq;
2279 gcm_128_info =
2280 (struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
2281 keysize = TLS_CIPHER_AES_GCM_128_KEY_SIZE;
2282 key = gcm_128_info->key;
2283 salt = gcm_128_info->salt;
2284 salt_size = TLS_CIPHER_AES_GCM_128_SALT_SIZE;
2285 cipher_name = "gcm(aes)";
2286 break;
2287 }
2288 case TLS_CIPHER_AES_GCM_256: {
2289 nonce_size = TLS_CIPHER_AES_GCM_256_IV_SIZE;
2290 tag_size = TLS_CIPHER_AES_GCM_256_TAG_SIZE;
2291 iv_size = TLS_CIPHER_AES_GCM_256_IV_SIZE;
2292 iv = ((struct tls12_crypto_info_aes_gcm_256 *)crypto_info)->iv;
2293 rec_seq_size = TLS_CIPHER_AES_GCM_256_REC_SEQ_SIZE;
2294 rec_seq =
2295 ((struct tls12_crypto_info_aes_gcm_256 *)crypto_info)->rec_seq;
2296 gcm_256_info =
2297 (struct tls12_crypto_info_aes_gcm_256 *)crypto_info;
2298 keysize = TLS_CIPHER_AES_GCM_256_KEY_SIZE;
2299 key = gcm_256_info->key;
2300 salt = gcm_256_info->salt;
2301 salt_size = TLS_CIPHER_AES_GCM_256_SALT_SIZE;
2302 cipher_name = "gcm(aes)";
2303 break;
2304 }
2305 case TLS_CIPHER_AES_CCM_128: {
2306 nonce_size = TLS_CIPHER_AES_CCM_128_IV_SIZE;
2307 tag_size = TLS_CIPHER_AES_CCM_128_TAG_SIZE;
2308 iv_size = TLS_CIPHER_AES_CCM_128_IV_SIZE;
2309 iv = ((struct tls12_crypto_info_aes_ccm_128 *)crypto_info)->iv;
2310 rec_seq_size = TLS_CIPHER_AES_CCM_128_REC_SEQ_SIZE;
2311 rec_seq =
2312 ((struct tls12_crypto_info_aes_ccm_128 *)crypto_info)->rec_seq;
2313 ccm_128_info =
2314 (struct tls12_crypto_info_aes_ccm_128 *)crypto_info;
2315 keysize = TLS_CIPHER_AES_CCM_128_KEY_SIZE;
2316 key = ccm_128_info->key;
2317 salt = ccm_128_info->salt;
2318 salt_size = TLS_CIPHER_AES_CCM_128_SALT_SIZE;
2319 cipher_name = "ccm(aes)";
2320 break;
2321 }
2322 default:
2323 rc = -EINVAL;
2324 goto free_priv;
2325 }
2326
2327 /* Sanity-check the sizes for stack allocations. */
2328 if (iv_size > MAX_IV_SIZE || nonce_size > MAX_IV_SIZE ||
2329 rec_seq_size > TLS_MAX_REC_SEQ_SIZE) {
2330 rc = -EINVAL;
2331 goto free_priv;
2332 }
2333
2334 if (crypto_info->version == TLS_1_3_VERSION) {
2335 nonce_size = 0;
2336 prot->aad_size = TLS_HEADER_SIZE;
2337 prot->tail_size = 1;
2338 } else {
2339 prot->aad_size = TLS_AAD_SPACE_SIZE;
2340 prot->tail_size = 0;
2341 }
2342
2343 prot->version = crypto_info->version;
2344 prot->cipher_type = crypto_info->cipher_type;
2345 prot->prepend_size = TLS_HEADER_SIZE + nonce_size;
2346 prot->tag_size = tag_size;
2347 prot->overhead_size = prot->prepend_size +
2348 prot->tag_size + prot->tail_size;
2349 prot->iv_size = iv_size;
2350 prot->salt_size = salt_size;
2351 cctx->iv = kmalloc(iv_size + salt_size, GFP_KERNEL);
2352 if (!cctx->iv) {
2353 rc = -ENOMEM;
2354 goto free_priv;
2355 }
2356 /* Note: 128 & 256 bit salt are the same size */
2357 prot->rec_seq_size = rec_seq_size;
2358 memcpy(cctx->iv, salt, salt_size);
2359 memcpy(cctx->iv + salt_size, iv, iv_size);
2360 cctx->rec_seq = kmemdup(rec_seq, rec_seq_size, GFP_KERNEL);
2361 if (!cctx->rec_seq) {
2362 rc = -ENOMEM;
2363 goto free_iv;
2364 }
2365
2366 if (!*aead) {
2367 *aead = crypto_alloc_aead(cipher_name, 0, 0);
2368 if (IS_ERR(*aead)) {
2369 rc = PTR_ERR(*aead);
2370 *aead = NULL;
2371 goto free_rec_seq;
2372 }
2373 }
2374
2375 ctx->push_pending_record = tls_sw_push_pending_record;
2376
2377 rc = crypto_aead_setkey(*aead, key, keysize);
2378
2379 if (rc)
2380 goto free_aead;
2381
2382 rc = crypto_aead_setauthsize(*aead, prot->tag_size);
2383 if (rc)
2384 goto free_aead;
2385
2386 if (sw_ctx_rx) {
2387 tfm = crypto_aead_tfm(sw_ctx_rx->aead_recv);
2388
2389 if (crypto_info->version == TLS_1_3_VERSION)
2390 sw_ctx_rx->async_capable = false;
2391 else
2392 sw_ctx_rx->async_capable =
2393 tfm->__crt_alg->cra_flags & CRYPTO_ALG_ASYNC;
2394
2395 /* Set up strparser */
2396 memset(&cb, 0, sizeof(cb));
2397 cb.rcv_msg = tls_queue;
2398 cb.parse_msg = tls_read_size;
2399
2400 strp_init(&sw_ctx_rx->strp, sk, &cb);
2401 }
2402
2403 goto out;
2404
2405 free_aead:
2406 crypto_free_aead(*aead);
2407 *aead = NULL;
2408 free_rec_seq:
2409 kfree(cctx->rec_seq);
2410 cctx->rec_seq = NULL;
2411 free_iv:
2412 kfree(cctx->iv);
2413 cctx->iv = NULL;
2414 free_priv:
2415 if (tx) {
2416 kfree(ctx->priv_ctx_tx);
2417 ctx->priv_ctx_tx = NULL;
2418 } else {
2419 kfree(ctx->priv_ctx_rx);
2420 ctx->priv_ctx_rx = NULL;
2421 }
2422 out:
2423 return rc;
2424 }
2425