1 /*
2 * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
4 * Copyright (c) 2016-2017, Lance Chao <lancerchao@fb.com>. All rights reserved.
5 * Copyright (c) 2016, Fridolin Pokorny <fridolin.pokorny@gmail.com>. All rights reserved.
6 * Copyright (c) 2016, Nikos Mavrogiannopoulos <nmav@gnutls.org>. All rights reserved.
7 * Copyright (c) 2018, Covalent IO, Inc. http://covalent.io
8 *
9 * This software is available to you under a choice of one of two
10 * licenses. You may choose to be licensed under the terms of the GNU
11 * General Public License (GPL) Version 2, available from the file
12 * COPYING in the main directory of this source tree, or the
13 * OpenIB.org BSD license below:
14 *
15 * Redistribution and use in source and binary forms, with or
16 * without modification, are permitted provided that the following
17 * conditions are met:
18 *
19 * - Redistributions of source code must retain the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer.
22 *
23 * - Redistributions in binary form must reproduce the above
24 * copyright notice, this list of conditions and the following
25 * disclaimer in the documentation and/or other materials
26 * provided with the distribution.
27 *
28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
29 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
30 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
31 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
32 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
33 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
34 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 * SOFTWARE.
36 */
37
38 #include <linux/bug.h>
39 #include <linux/sched/signal.h>
40 #include <linux/module.h>
41 #include <linux/splice.h>
42 #include <crypto/aead.h>
43
44 #include <net/strparser.h>
45 #include <net/tls.h>
46
tls_err_abort(struct sock * sk,int err)47 noinline void tls_err_abort(struct sock *sk, int err)
48 {
49 WARN_ON_ONCE(err >= 0);
50 /* sk->sk_err should contain a positive error code. */
51 sk->sk_err = -err;
52 sk_error_report(sk);
53 }
54
__skb_nsg(struct sk_buff * skb,int offset,int len,unsigned int recursion_level)55 static int __skb_nsg(struct sk_buff *skb, int offset, int len,
56 unsigned int recursion_level)
57 {
58 int start = skb_headlen(skb);
59 int i, chunk = start - offset;
60 struct sk_buff *frag_iter;
61 int elt = 0;
62
63 if (unlikely(recursion_level >= 24))
64 return -EMSGSIZE;
65
66 if (chunk > 0) {
67 if (chunk > len)
68 chunk = len;
69 elt++;
70 len -= chunk;
71 if (len == 0)
72 return elt;
73 offset += chunk;
74 }
75
76 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
77 int end;
78
79 WARN_ON(start > offset + len);
80
81 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
82 chunk = end - offset;
83 if (chunk > 0) {
84 if (chunk > len)
85 chunk = len;
86 elt++;
87 len -= chunk;
88 if (len == 0)
89 return elt;
90 offset += chunk;
91 }
92 start = end;
93 }
94
95 if (unlikely(skb_has_frag_list(skb))) {
96 skb_walk_frags(skb, frag_iter) {
97 int end, ret;
98
99 WARN_ON(start > offset + len);
100
101 end = start + frag_iter->len;
102 chunk = end - offset;
103 if (chunk > 0) {
104 if (chunk > len)
105 chunk = len;
106 ret = __skb_nsg(frag_iter, offset - start, chunk,
107 recursion_level + 1);
108 if (unlikely(ret < 0))
109 return ret;
110 elt += ret;
111 len -= chunk;
112 if (len == 0)
113 return elt;
114 offset += chunk;
115 }
116 start = end;
117 }
118 }
119 BUG_ON(len);
120 return elt;
121 }
122
123 /* Return the number of scatterlist elements required to completely map the
124 * skb, or -EMSGSIZE if the recursion depth is exceeded.
125 */
skb_nsg(struct sk_buff * skb,int offset,int len)126 static int skb_nsg(struct sk_buff *skb, int offset, int len)
127 {
128 return __skb_nsg(skb, offset, len, 0);
129 }
130
padding_length(struct tls_sw_context_rx * ctx,struct tls_prot_info * prot,struct sk_buff * skb)131 static int padding_length(struct tls_sw_context_rx *ctx,
132 struct tls_prot_info *prot, struct sk_buff *skb)
133 {
134 struct strp_msg *rxm = strp_msg(skb);
135 int sub = 0;
136
137 /* Determine zero-padding length */
138 if (prot->version == TLS_1_3_VERSION) {
139 char content_type = 0;
140 int err;
141 int back = 17;
142
143 while (content_type == 0) {
144 if (back > rxm->full_len - prot->prepend_size)
145 return -EBADMSG;
146 err = skb_copy_bits(skb,
147 rxm->offset + rxm->full_len - back,
148 &content_type, 1);
149 if (err)
150 return err;
151 if (content_type)
152 break;
153 sub++;
154 back++;
155 }
156 ctx->control = content_type;
157 }
158 return sub;
159 }
160
tls_decrypt_done(struct crypto_async_request * req,int err)161 static void tls_decrypt_done(struct crypto_async_request *req, int err)
162 {
163 struct aead_request *aead_req = (struct aead_request *)req;
164 struct scatterlist *sgout = aead_req->dst;
165 struct scatterlist *sgin = aead_req->src;
166 struct tls_sw_context_rx *ctx;
167 struct tls_context *tls_ctx;
168 struct tls_prot_info *prot;
169 struct scatterlist *sg;
170 struct sk_buff *skb;
171 unsigned int pages;
172 int pending;
173
174 skb = (struct sk_buff *)req->data;
175 tls_ctx = tls_get_ctx(skb->sk);
176 ctx = tls_sw_ctx_rx(tls_ctx);
177 prot = &tls_ctx->prot_info;
178
179 /* Propagate if there was an err */
180 if (err) {
181 if (err == -EBADMSG)
182 TLS_INC_STATS(sock_net(skb->sk),
183 LINUX_MIB_TLSDECRYPTERROR);
184 ctx->async_wait.err = err;
185 tls_err_abort(skb->sk, err);
186 } else {
187 struct strp_msg *rxm = strp_msg(skb);
188 int pad;
189
190 pad = padding_length(ctx, prot, skb);
191 if (pad < 0) {
192 ctx->async_wait.err = pad;
193 tls_err_abort(skb->sk, pad);
194 } else {
195 rxm->full_len -= pad;
196 rxm->offset += prot->prepend_size;
197 rxm->full_len -= prot->overhead_size;
198 }
199 }
200
201 /* After using skb->sk to propagate sk through crypto async callback
202 * we need to NULL it again.
203 */
204 skb->sk = NULL;
205
206
207 /* Free the destination pages if skb was not decrypted inplace */
208 if (sgout != sgin) {
209 /* Skip the first S/G entry as it points to AAD */
210 for_each_sg(sg_next(sgout), sg, UINT_MAX, pages) {
211 if (!sg)
212 break;
213 put_page(sg_page(sg));
214 }
215 }
216
217 kfree(aead_req);
218
219 spin_lock_bh(&ctx->decrypt_compl_lock);
220 pending = atomic_dec_return(&ctx->decrypt_pending);
221
222 if (!pending && ctx->async_notify)
223 complete(&ctx->async_wait.completion);
224 spin_unlock_bh(&ctx->decrypt_compl_lock);
225 }
226
tls_do_decryption(struct sock * sk,struct sk_buff * skb,struct scatterlist * sgin,struct scatterlist * sgout,char * iv_recv,size_t data_len,struct aead_request * aead_req,bool async)227 static int tls_do_decryption(struct sock *sk,
228 struct sk_buff *skb,
229 struct scatterlist *sgin,
230 struct scatterlist *sgout,
231 char *iv_recv,
232 size_t data_len,
233 struct aead_request *aead_req,
234 bool async)
235 {
236 struct tls_context *tls_ctx = tls_get_ctx(sk);
237 struct tls_prot_info *prot = &tls_ctx->prot_info;
238 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
239 int ret;
240
241 aead_request_set_tfm(aead_req, ctx->aead_recv);
242 aead_request_set_ad(aead_req, prot->aad_size);
243 aead_request_set_crypt(aead_req, sgin, sgout,
244 data_len + prot->tag_size,
245 (u8 *)iv_recv);
246
247 if (async) {
248 /* Using skb->sk to push sk through to crypto async callback
249 * handler. This allows propagating errors up to the socket
250 * if needed. It _must_ be cleared in the async handler
251 * before consume_skb is called. We _know_ skb->sk is NULL
252 * because it is a clone from strparser.
253 */
254 skb->sk = sk;
255 aead_request_set_callback(aead_req,
256 CRYPTO_TFM_REQ_MAY_BACKLOG,
257 tls_decrypt_done, skb);
258 atomic_inc(&ctx->decrypt_pending);
259 } else {
260 aead_request_set_callback(aead_req,
261 CRYPTO_TFM_REQ_MAY_BACKLOG,
262 crypto_req_done, &ctx->async_wait);
263 }
264
265 ret = crypto_aead_decrypt(aead_req);
266 if (ret == -EINPROGRESS) {
267 if (async)
268 return ret;
269
270 ret = crypto_wait_req(ret, &ctx->async_wait);
271 }
272
273 if (async)
274 atomic_dec(&ctx->decrypt_pending);
275
276 return ret;
277 }
278
tls_trim_both_msgs(struct sock * sk,int target_size)279 static void tls_trim_both_msgs(struct sock *sk, int target_size)
280 {
281 struct tls_context *tls_ctx = tls_get_ctx(sk);
282 struct tls_prot_info *prot = &tls_ctx->prot_info;
283 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
284 struct tls_rec *rec = ctx->open_rec;
285
286 sk_msg_trim(sk, &rec->msg_plaintext, target_size);
287 if (target_size > 0)
288 target_size += prot->overhead_size;
289 sk_msg_trim(sk, &rec->msg_encrypted, target_size);
290 }
291
tls_alloc_encrypted_msg(struct sock * sk,int len)292 static int tls_alloc_encrypted_msg(struct sock *sk, int len)
293 {
294 struct tls_context *tls_ctx = tls_get_ctx(sk);
295 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
296 struct tls_rec *rec = ctx->open_rec;
297 struct sk_msg *msg_en = &rec->msg_encrypted;
298
299 return sk_msg_alloc(sk, msg_en, len, 0);
300 }
301
tls_clone_plaintext_msg(struct sock * sk,int required)302 static int tls_clone_plaintext_msg(struct sock *sk, int required)
303 {
304 struct tls_context *tls_ctx = tls_get_ctx(sk);
305 struct tls_prot_info *prot = &tls_ctx->prot_info;
306 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
307 struct tls_rec *rec = ctx->open_rec;
308 struct sk_msg *msg_pl = &rec->msg_plaintext;
309 struct sk_msg *msg_en = &rec->msg_encrypted;
310 int skip, len;
311
312 /* We add page references worth len bytes from encrypted sg
313 * at the end of plaintext sg. It is guaranteed that msg_en
314 * has enough required room (ensured by caller).
315 */
316 len = required - msg_pl->sg.size;
317
318 /* Skip initial bytes in msg_en's data to be able to use
319 * same offset of both plain and encrypted data.
320 */
321 skip = prot->prepend_size + msg_pl->sg.size;
322
323 return sk_msg_clone(sk, msg_pl, msg_en, skip, len);
324 }
325
tls_get_rec(struct sock * sk)326 static struct tls_rec *tls_get_rec(struct sock *sk)
327 {
328 struct tls_context *tls_ctx = tls_get_ctx(sk);
329 struct tls_prot_info *prot = &tls_ctx->prot_info;
330 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
331 struct sk_msg *msg_pl, *msg_en;
332 struct tls_rec *rec;
333 int mem_size;
334
335 mem_size = sizeof(struct tls_rec) + crypto_aead_reqsize(ctx->aead_send);
336
337 rec = kzalloc(mem_size, sk->sk_allocation);
338 if (!rec)
339 return NULL;
340
341 msg_pl = &rec->msg_plaintext;
342 msg_en = &rec->msg_encrypted;
343
344 sk_msg_init(msg_pl);
345 sk_msg_init(msg_en);
346
347 sg_init_table(rec->sg_aead_in, 2);
348 sg_set_buf(&rec->sg_aead_in[0], rec->aad_space, prot->aad_size);
349 sg_unmark_end(&rec->sg_aead_in[1]);
350
351 sg_init_table(rec->sg_aead_out, 2);
352 sg_set_buf(&rec->sg_aead_out[0], rec->aad_space, prot->aad_size);
353 sg_unmark_end(&rec->sg_aead_out[1]);
354
355 return rec;
356 }
357
tls_free_rec(struct sock * sk,struct tls_rec * rec)358 static void tls_free_rec(struct sock *sk, struct tls_rec *rec)
359 {
360 sk_msg_free(sk, &rec->msg_encrypted);
361 sk_msg_free(sk, &rec->msg_plaintext);
362 kfree(rec);
363 }
364
tls_free_open_rec(struct sock * sk)365 static void tls_free_open_rec(struct sock *sk)
366 {
367 struct tls_context *tls_ctx = tls_get_ctx(sk);
368 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
369 struct tls_rec *rec = ctx->open_rec;
370
371 if (rec) {
372 tls_free_rec(sk, rec);
373 ctx->open_rec = NULL;
374 }
375 }
376
tls_tx_records(struct sock * sk,int flags)377 int tls_tx_records(struct sock *sk, int flags)
378 {
379 struct tls_context *tls_ctx = tls_get_ctx(sk);
380 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
381 struct tls_rec *rec, *tmp;
382 struct sk_msg *msg_en;
383 int tx_flags, rc = 0;
384
385 if (tls_is_partially_sent_record(tls_ctx)) {
386 rec = list_first_entry(&ctx->tx_list,
387 struct tls_rec, list);
388
389 if (flags == -1)
390 tx_flags = rec->tx_flags;
391 else
392 tx_flags = flags;
393
394 rc = tls_push_partial_record(sk, tls_ctx, tx_flags);
395 if (rc)
396 goto tx_err;
397
398 /* Full record has been transmitted.
399 * Remove the head of tx_list
400 */
401 list_del(&rec->list);
402 sk_msg_free(sk, &rec->msg_plaintext);
403 kfree(rec);
404 }
405
406 /* Tx all ready records */
407 list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) {
408 if (READ_ONCE(rec->tx_ready)) {
409 if (flags == -1)
410 tx_flags = rec->tx_flags;
411 else
412 tx_flags = flags;
413
414 msg_en = &rec->msg_encrypted;
415 rc = tls_push_sg(sk, tls_ctx,
416 &msg_en->sg.data[msg_en->sg.curr],
417 0, tx_flags);
418 if (rc)
419 goto tx_err;
420
421 list_del(&rec->list);
422 sk_msg_free(sk, &rec->msg_plaintext);
423 kfree(rec);
424 } else {
425 break;
426 }
427 }
428
429 tx_err:
430 if (rc < 0 && rc != -EAGAIN)
431 tls_err_abort(sk, -EBADMSG);
432
433 return rc;
434 }
435
tls_encrypt_done(struct crypto_async_request * req,int err)436 static void tls_encrypt_done(struct crypto_async_request *req, int err)
437 {
438 struct aead_request *aead_req = (struct aead_request *)req;
439 struct sock *sk = req->data;
440 struct tls_context *tls_ctx = tls_get_ctx(sk);
441 struct tls_prot_info *prot = &tls_ctx->prot_info;
442 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
443 struct scatterlist *sge;
444 struct sk_msg *msg_en;
445 struct tls_rec *rec;
446 bool ready = false;
447 int pending;
448
449 rec = container_of(aead_req, struct tls_rec, aead_req);
450 msg_en = &rec->msg_encrypted;
451
452 sge = sk_msg_elem(msg_en, msg_en->sg.curr);
453 sge->offset -= prot->prepend_size;
454 sge->length += prot->prepend_size;
455
456 /* Check if error is previously set on socket */
457 if (err || sk->sk_err) {
458 rec = NULL;
459
460 /* If err is already set on socket, return the same code */
461 if (sk->sk_err) {
462 ctx->async_wait.err = -sk->sk_err;
463 } else {
464 ctx->async_wait.err = err;
465 tls_err_abort(sk, err);
466 }
467 }
468
469 if (rec) {
470 struct tls_rec *first_rec;
471
472 /* Mark the record as ready for transmission */
473 smp_store_mb(rec->tx_ready, true);
474
475 /* If received record is at head of tx_list, schedule tx */
476 first_rec = list_first_entry(&ctx->tx_list,
477 struct tls_rec, list);
478 if (rec == first_rec)
479 ready = true;
480 }
481
482 spin_lock_bh(&ctx->encrypt_compl_lock);
483 pending = atomic_dec_return(&ctx->encrypt_pending);
484
485 if (!pending && ctx->async_notify)
486 complete(&ctx->async_wait.completion);
487 spin_unlock_bh(&ctx->encrypt_compl_lock);
488
489 if (!ready)
490 return;
491
492 /* Schedule the transmission */
493 if (!test_and_set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask))
494 schedule_delayed_work(&ctx->tx_work.work, 1);
495 }
496
tls_do_encryption(struct sock * sk,struct tls_context * tls_ctx,struct tls_sw_context_tx * ctx,struct aead_request * aead_req,size_t data_len,u32 start)497 static int tls_do_encryption(struct sock *sk,
498 struct tls_context *tls_ctx,
499 struct tls_sw_context_tx *ctx,
500 struct aead_request *aead_req,
501 size_t data_len, u32 start)
502 {
503 struct tls_prot_info *prot = &tls_ctx->prot_info;
504 struct tls_rec *rec = ctx->open_rec;
505 struct sk_msg *msg_en = &rec->msg_encrypted;
506 struct scatterlist *sge = sk_msg_elem(msg_en, start);
507 int rc, iv_offset = 0;
508
509 /* For CCM based ciphers, first byte of IV is a constant */
510 if (prot->cipher_type == TLS_CIPHER_AES_CCM_128) {
511 rec->iv_data[0] = TLS_AES_CCM_IV_B0_BYTE;
512 iv_offset = 1;
513 }
514
515 memcpy(&rec->iv_data[iv_offset], tls_ctx->tx.iv,
516 prot->iv_size + prot->salt_size);
517
518 xor_iv_with_seq(prot, rec->iv_data, tls_ctx->tx.rec_seq);
519
520 sge->offset += prot->prepend_size;
521 sge->length -= prot->prepend_size;
522
523 msg_en->sg.curr = start;
524
525 aead_request_set_tfm(aead_req, ctx->aead_send);
526 aead_request_set_ad(aead_req, prot->aad_size);
527 aead_request_set_crypt(aead_req, rec->sg_aead_in,
528 rec->sg_aead_out,
529 data_len, rec->iv_data);
530
531 aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
532 tls_encrypt_done, sk);
533
534 /* Add the record in tx_list */
535 list_add_tail((struct list_head *)&rec->list, &ctx->tx_list);
536 atomic_inc(&ctx->encrypt_pending);
537
538 rc = crypto_aead_encrypt(aead_req);
539 if (!rc || rc != -EINPROGRESS) {
540 atomic_dec(&ctx->encrypt_pending);
541 sge->offset -= prot->prepend_size;
542 sge->length += prot->prepend_size;
543 }
544
545 if (!rc) {
546 WRITE_ONCE(rec->tx_ready, true);
547 } else if (rc != -EINPROGRESS) {
548 list_del(&rec->list);
549 return rc;
550 }
551
552 /* Unhook the record from context if encryption is not failure */
553 ctx->open_rec = NULL;
554 tls_advance_record_sn(sk, prot, &tls_ctx->tx);
555 return rc;
556 }
557
tls_split_open_record(struct sock * sk,struct tls_rec * from,struct tls_rec ** to,struct sk_msg * msg_opl,struct sk_msg * msg_oen,u32 split_point,u32 tx_overhead_size,u32 * orig_end)558 static int tls_split_open_record(struct sock *sk, struct tls_rec *from,
559 struct tls_rec **to, struct sk_msg *msg_opl,
560 struct sk_msg *msg_oen, u32 split_point,
561 u32 tx_overhead_size, u32 *orig_end)
562 {
563 u32 i, j, bytes = 0, apply = msg_opl->apply_bytes;
564 struct scatterlist *sge, *osge, *nsge;
565 u32 orig_size = msg_opl->sg.size;
566 struct scatterlist tmp = { };
567 struct sk_msg *msg_npl;
568 struct tls_rec *new;
569 int ret;
570
571 new = tls_get_rec(sk);
572 if (!new)
573 return -ENOMEM;
574 ret = sk_msg_alloc(sk, &new->msg_encrypted, msg_opl->sg.size +
575 tx_overhead_size, 0);
576 if (ret < 0) {
577 tls_free_rec(sk, new);
578 return ret;
579 }
580
581 *orig_end = msg_opl->sg.end;
582 i = msg_opl->sg.start;
583 sge = sk_msg_elem(msg_opl, i);
584 while (apply && sge->length) {
585 if (sge->length > apply) {
586 u32 len = sge->length - apply;
587
588 get_page(sg_page(sge));
589 sg_set_page(&tmp, sg_page(sge), len,
590 sge->offset + apply);
591 sge->length = apply;
592 bytes += apply;
593 apply = 0;
594 } else {
595 apply -= sge->length;
596 bytes += sge->length;
597 }
598
599 sk_msg_iter_var_next(i);
600 if (i == msg_opl->sg.end)
601 break;
602 sge = sk_msg_elem(msg_opl, i);
603 }
604
605 msg_opl->sg.end = i;
606 msg_opl->sg.curr = i;
607 msg_opl->sg.copybreak = 0;
608 msg_opl->apply_bytes = 0;
609 msg_opl->sg.size = bytes;
610
611 msg_npl = &new->msg_plaintext;
612 msg_npl->apply_bytes = apply;
613 msg_npl->sg.size = orig_size - bytes;
614
615 j = msg_npl->sg.start;
616 nsge = sk_msg_elem(msg_npl, j);
617 if (tmp.length) {
618 memcpy(nsge, &tmp, sizeof(*nsge));
619 sk_msg_iter_var_next(j);
620 nsge = sk_msg_elem(msg_npl, j);
621 }
622
623 osge = sk_msg_elem(msg_opl, i);
624 while (osge->length) {
625 memcpy(nsge, osge, sizeof(*nsge));
626 sg_unmark_end(nsge);
627 sk_msg_iter_var_next(i);
628 sk_msg_iter_var_next(j);
629 if (i == *orig_end)
630 break;
631 osge = sk_msg_elem(msg_opl, i);
632 nsge = sk_msg_elem(msg_npl, j);
633 }
634
635 msg_npl->sg.end = j;
636 msg_npl->sg.curr = j;
637 msg_npl->sg.copybreak = 0;
638
639 *to = new;
640 return 0;
641 }
642
tls_merge_open_record(struct sock * sk,struct tls_rec * to,struct tls_rec * from,u32 orig_end)643 static void tls_merge_open_record(struct sock *sk, struct tls_rec *to,
644 struct tls_rec *from, u32 orig_end)
645 {
646 struct sk_msg *msg_npl = &from->msg_plaintext;
647 struct sk_msg *msg_opl = &to->msg_plaintext;
648 struct scatterlist *osge, *nsge;
649 u32 i, j;
650
651 i = msg_opl->sg.end;
652 sk_msg_iter_var_prev(i);
653 j = msg_npl->sg.start;
654
655 osge = sk_msg_elem(msg_opl, i);
656 nsge = sk_msg_elem(msg_npl, j);
657
658 if (sg_page(osge) == sg_page(nsge) &&
659 osge->offset + osge->length == nsge->offset) {
660 osge->length += nsge->length;
661 put_page(sg_page(nsge));
662 }
663
664 msg_opl->sg.end = orig_end;
665 msg_opl->sg.curr = orig_end;
666 msg_opl->sg.copybreak = 0;
667 msg_opl->apply_bytes = msg_opl->sg.size + msg_npl->sg.size;
668 msg_opl->sg.size += msg_npl->sg.size;
669
670 sk_msg_free(sk, &to->msg_encrypted);
671 sk_msg_xfer_full(&to->msg_encrypted, &from->msg_encrypted);
672
673 kfree(from);
674 }
675
tls_push_record(struct sock * sk,int flags,unsigned char record_type)676 static int tls_push_record(struct sock *sk, int flags,
677 unsigned char record_type)
678 {
679 struct tls_context *tls_ctx = tls_get_ctx(sk);
680 struct tls_prot_info *prot = &tls_ctx->prot_info;
681 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
682 struct tls_rec *rec = ctx->open_rec, *tmp = NULL;
683 u32 i, split_point, orig_end;
684 struct sk_msg *msg_pl, *msg_en;
685 struct aead_request *req;
686 bool split;
687 int rc;
688
689 if (!rec)
690 return 0;
691
692 msg_pl = &rec->msg_plaintext;
693 msg_en = &rec->msg_encrypted;
694
695 split_point = msg_pl->apply_bytes;
696 split = split_point && split_point < msg_pl->sg.size;
697 if (unlikely((!split &&
698 msg_pl->sg.size +
699 prot->overhead_size > msg_en->sg.size) ||
700 (split &&
701 split_point +
702 prot->overhead_size > msg_en->sg.size))) {
703 split = true;
704 split_point = msg_en->sg.size;
705 }
706 if (split) {
707 rc = tls_split_open_record(sk, rec, &tmp, msg_pl, msg_en,
708 split_point, prot->overhead_size,
709 &orig_end);
710 if (rc < 0)
711 return rc;
712 /* This can happen if above tls_split_open_record allocates
713 * a single large encryption buffer instead of two smaller
714 * ones. In this case adjust pointers and continue without
715 * split.
716 */
717 if (!msg_pl->sg.size) {
718 tls_merge_open_record(sk, rec, tmp, orig_end);
719 msg_pl = &rec->msg_plaintext;
720 msg_en = &rec->msg_encrypted;
721 split = false;
722 }
723 sk_msg_trim(sk, msg_en, msg_pl->sg.size +
724 prot->overhead_size);
725 }
726
727 rec->tx_flags = flags;
728 req = &rec->aead_req;
729
730 i = msg_pl->sg.end;
731 sk_msg_iter_var_prev(i);
732
733 rec->content_type = record_type;
734 if (prot->version == TLS_1_3_VERSION) {
735 /* Add content type to end of message. No padding added */
736 sg_set_buf(&rec->sg_content_type, &rec->content_type, 1);
737 sg_mark_end(&rec->sg_content_type);
738 sg_chain(msg_pl->sg.data, msg_pl->sg.end + 1,
739 &rec->sg_content_type);
740 } else {
741 sg_mark_end(sk_msg_elem(msg_pl, i));
742 }
743
744 if (msg_pl->sg.end < msg_pl->sg.start) {
745 sg_chain(&msg_pl->sg.data[msg_pl->sg.start],
746 MAX_SKB_FRAGS - msg_pl->sg.start + 1,
747 msg_pl->sg.data);
748 }
749
750 i = msg_pl->sg.start;
751 sg_chain(rec->sg_aead_in, 2, &msg_pl->sg.data[i]);
752
753 i = msg_en->sg.end;
754 sk_msg_iter_var_prev(i);
755 sg_mark_end(sk_msg_elem(msg_en, i));
756
757 i = msg_en->sg.start;
758 sg_chain(rec->sg_aead_out, 2, &msg_en->sg.data[i]);
759
760 tls_make_aad(rec->aad_space, msg_pl->sg.size + prot->tail_size,
761 tls_ctx->tx.rec_seq, record_type, prot);
762
763 tls_fill_prepend(tls_ctx,
764 page_address(sg_page(&msg_en->sg.data[i])) +
765 msg_en->sg.data[i].offset,
766 msg_pl->sg.size + prot->tail_size,
767 record_type);
768
769 tls_ctx->pending_open_record_frags = false;
770
771 rc = tls_do_encryption(sk, tls_ctx, ctx, req,
772 msg_pl->sg.size + prot->tail_size, i);
773 if (rc < 0) {
774 if (rc != -EINPROGRESS) {
775 tls_err_abort(sk, -EBADMSG);
776 if (split) {
777 tls_ctx->pending_open_record_frags = true;
778 tls_merge_open_record(sk, rec, tmp, orig_end);
779 }
780 }
781 ctx->async_capable = 1;
782 return rc;
783 } else if (split) {
784 msg_pl = &tmp->msg_plaintext;
785 msg_en = &tmp->msg_encrypted;
786 sk_msg_trim(sk, msg_en, msg_pl->sg.size + prot->overhead_size);
787 tls_ctx->pending_open_record_frags = true;
788 ctx->open_rec = tmp;
789 }
790
791 return tls_tx_records(sk, flags);
792 }
793
bpf_exec_tx_verdict(struct sk_msg * msg,struct sock * sk,bool full_record,u8 record_type,ssize_t * copied,int flags)794 static int bpf_exec_tx_verdict(struct sk_msg *msg, struct sock *sk,
795 bool full_record, u8 record_type,
796 ssize_t *copied, int flags)
797 {
798 struct tls_context *tls_ctx = tls_get_ctx(sk);
799 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
800 struct sk_msg msg_redir = { };
801 struct sk_psock *psock;
802 struct sock *sk_redir;
803 struct tls_rec *rec;
804 bool enospc, policy;
805 int err = 0, send;
806 u32 delta = 0;
807
808 policy = !(flags & MSG_SENDPAGE_NOPOLICY);
809 psock = sk_psock_get(sk);
810 if (!psock || !policy) {
811 err = tls_push_record(sk, flags, record_type);
812 if (err && sk->sk_err == EBADMSG) {
813 *copied -= sk_msg_free(sk, msg);
814 tls_free_open_rec(sk);
815 err = -sk->sk_err;
816 }
817 if (psock)
818 sk_psock_put(sk, psock);
819 return err;
820 }
821 more_data:
822 enospc = sk_msg_full(msg);
823 if (psock->eval == __SK_NONE) {
824 delta = msg->sg.size;
825 psock->eval = sk_psock_msg_verdict(sk, psock, msg);
826 delta -= msg->sg.size;
827 }
828 if (msg->cork_bytes && msg->cork_bytes > msg->sg.size &&
829 !enospc && !full_record) {
830 err = -ENOSPC;
831 goto out_err;
832 }
833 msg->cork_bytes = 0;
834 send = msg->sg.size;
835 if (msg->apply_bytes && msg->apply_bytes < send)
836 send = msg->apply_bytes;
837
838 switch (psock->eval) {
839 case __SK_PASS:
840 err = tls_push_record(sk, flags, record_type);
841 if (err && sk->sk_err == EBADMSG) {
842 *copied -= sk_msg_free(sk, msg);
843 tls_free_open_rec(sk);
844 err = -sk->sk_err;
845 goto out_err;
846 }
847 break;
848 case __SK_REDIRECT:
849 sk_redir = psock->sk_redir;
850 memcpy(&msg_redir, msg, sizeof(*msg));
851 if (msg->apply_bytes < send)
852 msg->apply_bytes = 0;
853 else
854 msg->apply_bytes -= send;
855 sk_msg_return_zero(sk, msg, send);
856 msg->sg.size -= send;
857 release_sock(sk);
858 err = tcp_bpf_sendmsg_redir(sk_redir, &msg_redir, send, flags);
859 lock_sock(sk);
860 if (err < 0) {
861 *copied -= sk_msg_free_nocharge(sk, &msg_redir);
862 msg->sg.size = 0;
863 }
864 if (msg->sg.size == 0)
865 tls_free_open_rec(sk);
866 break;
867 case __SK_DROP:
868 default:
869 sk_msg_free_partial(sk, msg, send);
870 if (msg->apply_bytes < send)
871 msg->apply_bytes = 0;
872 else
873 msg->apply_bytes -= send;
874 if (msg->sg.size == 0)
875 tls_free_open_rec(sk);
876 *copied -= (send + delta);
877 err = -EACCES;
878 }
879
880 if (likely(!err)) {
881 bool reset_eval = !ctx->open_rec;
882
883 rec = ctx->open_rec;
884 if (rec) {
885 msg = &rec->msg_plaintext;
886 if (!msg->apply_bytes)
887 reset_eval = true;
888 }
889 if (reset_eval) {
890 psock->eval = __SK_NONE;
891 if (psock->sk_redir) {
892 sock_put(psock->sk_redir);
893 psock->sk_redir = NULL;
894 }
895 }
896 if (rec)
897 goto more_data;
898 }
899 out_err:
900 sk_psock_put(sk, psock);
901 return err;
902 }
903
tls_sw_push_pending_record(struct sock * sk,int flags)904 static int tls_sw_push_pending_record(struct sock *sk, int flags)
905 {
906 struct tls_context *tls_ctx = tls_get_ctx(sk);
907 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
908 struct tls_rec *rec = ctx->open_rec;
909 struct sk_msg *msg_pl;
910 size_t copied;
911
912 if (!rec)
913 return 0;
914
915 msg_pl = &rec->msg_plaintext;
916 copied = msg_pl->sg.size;
917 if (!copied)
918 return 0;
919
920 return bpf_exec_tx_verdict(msg_pl, sk, true, TLS_RECORD_TYPE_DATA,
921 &copied, flags);
922 }
923
tls_sw_sendmsg(struct sock * sk,struct msghdr * msg,size_t size)924 int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
925 {
926 long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
927 struct tls_context *tls_ctx = tls_get_ctx(sk);
928 struct tls_prot_info *prot = &tls_ctx->prot_info;
929 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
930 bool async_capable = ctx->async_capable;
931 unsigned char record_type = TLS_RECORD_TYPE_DATA;
932 bool is_kvec = iov_iter_is_kvec(&msg->msg_iter);
933 bool eor = !(msg->msg_flags & MSG_MORE);
934 size_t try_to_copy;
935 ssize_t copied = 0;
936 struct sk_msg *msg_pl, *msg_en;
937 struct tls_rec *rec;
938 int required_size;
939 int num_async = 0;
940 bool full_record;
941 int record_room;
942 int num_zc = 0;
943 int orig_size;
944 int ret = 0;
945 int pending;
946
947 if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
948 MSG_CMSG_COMPAT))
949 return -EOPNOTSUPP;
950
951 mutex_lock(&tls_ctx->tx_lock);
952 lock_sock(sk);
953
954 if (unlikely(msg->msg_controllen)) {
955 ret = tls_proccess_cmsg(sk, msg, &record_type);
956 if (ret) {
957 if (ret == -EINPROGRESS)
958 num_async++;
959 else if (ret != -EAGAIN)
960 goto send_end;
961 }
962 }
963
964 while (msg_data_left(msg)) {
965 if (sk->sk_err) {
966 ret = -sk->sk_err;
967 goto send_end;
968 }
969
970 if (ctx->open_rec)
971 rec = ctx->open_rec;
972 else
973 rec = ctx->open_rec = tls_get_rec(sk);
974 if (!rec) {
975 ret = -ENOMEM;
976 goto send_end;
977 }
978
979 msg_pl = &rec->msg_plaintext;
980 msg_en = &rec->msg_encrypted;
981
982 orig_size = msg_pl->sg.size;
983 full_record = false;
984 try_to_copy = msg_data_left(msg);
985 record_room = TLS_MAX_PAYLOAD_SIZE - msg_pl->sg.size;
986 if (try_to_copy >= record_room) {
987 try_to_copy = record_room;
988 full_record = true;
989 }
990
991 required_size = msg_pl->sg.size + try_to_copy +
992 prot->overhead_size;
993
994 if (!sk_stream_memory_free(sk))
995 goto wait_for_sndbuf;
996
997 alloc_encrypted:
998 ret = tls_alloc_encrypted_msg(sk, required_size);
999 if (ret) {
1000 if (ret != -ENOSPC)
1001 goto wait_for_memory;
1002
1003 /* Adjust try_to_copy according to the amount that was
1004 * actually allocated. The difference is due
1005 * to max sg elements limit
1006 */
1007 try_to_copy -= required_size - msg_en->sg.size;
1008 full_record = true;
1009 }
1010
1011 if (!is_kvec && (full_record || eor) && !async_capable) {
1012 u32 first = msg_pl->sg.end;
1013
1014 ret = sk_msg_zerocopy_from_iter(sk, &msg->msg_iter,
1015 msg_pl, try_to_copy);
1016 if (ret)
1017 goto fallback_to_reg_send;
1018
1019 num_zc++;
1020 copied += try_to_copy;
1021
1022 sk_msg_sg_copy_set(msg_pl, first);
1023 ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
1024 record_type, &copied,
1025 msg->msg_flags);
1026 if (ret) {
1027 if (ret == -EINPROGRESS)
1028 num_async++;
1029 else if (ret == -ENOMEM)
1030 goto wait_for_memory;
1031 else if (ctx->open_rec && ret == -ENOSPC)
1032 goto rollback_iter;
1033 else if (ret != -EAGAIN)
1034 goto send_end;
1035 }
1036 continue;
1037 rollback_iter:
1038 copied -= try_to_copy;
1039 sk_msg_sg_copy_clear(msg_pl, first);
1040 iov_iter_revert(&msg->msg_iter,
1041 msg_pl->sg.size - orig_size);
1042 fallback_to_reg_send:
1043 sk_msg_trim(sk, msg_pl, orig_size);
1044 }
1045
1046 required_size = msg_pl->sg.size + try_to_copy;
1047
1048 ret = tls_clone_plaintext_msg(sk, required_size);
1049 if (ret) {
1050 if (ret != -ENOSPC)
1051 goto send_end;
1052
1053 /* Adjust try_to_copy according to the amount that was
1054 * actually allocated. The difference is due
1055 * to max sg elements limit
1056 */
1057 try_to_copy -= required_size - msg_pl->sg.size;
1058 full_record = true;
1059 sk_msg_trim(sk, msg_en,
1060 msg_pl->sg.size + prot->overhead_size);
1061 }
1062
1063 if (try_to_copy) {
1064 ret = sk_msg_memcopy_from_iter(sk, &msg->msg_iter,
1065 msg_pl, try_to_copy);
1066 if (ret < 0)
1067 goto trim_sgl;
1068 }
1069
1070 /* Open records defined only if successfully copied, otherwise
1071 * we would trim the sg but not reset the open record frags.
1072 */
1073 tls_ctx->pending_open_record_frags = true;
1074 copied += try_to_copy;
1075 if (full_record || eor) {
1076 ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
1077 record_type, &copied,
1078 msg->msg_flags);
1079 if (ret) {
1080 if (ret == -EINPROGRESS)
1081 num_async++;
1082 else if (ret == -ENOMEM)
1083 goto wait_for_memory;
1084 else if (ret != -EAGAIN) {
1085 if (ret == -ENOSPC)
1086 ret = 0;
1087 goto send_end;
1088 }
1089 }
1090 }
1091
1092 continue;
1093
1094 wait_for_sndbuf:
1095 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1096 wait_for_memory:
1097 ret = sk_stream_wait_memory(sk, &timeo);
1098 if (ret) {
1099 trim_sgl:
1100 if (ctx->open_rec)
1101 tls_trim_both_msgs(sk, orig_size);
1102 goto send_end;
1103 }
1104
1105 if (ctx->open_rec && msg_en->sg.size < required_size)
1106 goto alloc_encrypted;
1107 }
1108
1109 if (!num_async) {
1110 goto send_end;
1111 } else if (num_zc) {
1112 /* Wait for pending encryptions to get completed */
1113 spin_lock_bh(&ctx->encrypt_compl_lock);
1114 ctx->async_notify = true;
1115
1116 pending = atomic_read(&ctx->encrypt_pending);
1117 spin_unlock_bh(&ctx->encrypt_compl_lock);
1118 if (pending)
1119 crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
1120 else
1121 reinit_completion(&ctx->async_wait.completion);
1122
1123 /* There can be no concurrent accesses, since we have no
1124 * pending encrypt operations
1125 */
1126 WRITE_ONCE(ctx->async_notify, false);
1127
1128 if (ctx->async_wait.err) {
1129 ret = ctx->async_wait.err;
1130 copied = 0;
1131 }
1132 }
1133
1134 /* Transmit if any encryptions have completed */
1135 if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
1136 cancel_delayed_work(&ctx->tx_work.work);
1137 tls_tx_records(sk, msg->msg_flags);
1138 }
1139
1140 send_end:
1141 ret = sk_stream_error(sk, msg->msg_flags, ret);
1142
1143 release_sock(sk);
1144 mutex_unlock(&tls_ctx->tx_lock);
1145 return copied > 0 ? copied : ret;
1146 }
1147
tls_sw_do_sendpage(struct sock * sk,struct page * page,int offset,size_t size,int flags)1148 static int tls_sw_do_sendpage(struct sock *sk, struct page *page,
1149 int offset, size_t size, int flags)
1150 {
1151 long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
1152 struct tls_context *tls_ctx = tls_get_ctx(sk);
1153 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
1154 struct tls_prot_info *prot = &tls_ctx->prot_info;
1155 unsigned char record_type = TLS_RECORD_TYPE_DATA;
1156 struct sk_msg *msg_pl;
1157 struct tls_rec *rec;
1158 int num_async = 0;
1159 ssize_t copied = 0;
1160 bool full_record;
1161 int record_room;
1162 int ret = 0;
1163 bool eor;
1164
1165 eor = !(flags & MSG_SENDPAGE_NOTLAST);
1166 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
1167
1168 /* Call the sk_stream functions to manage the sndbuf mem. */
1169 while (size > 0) {
1170 size_t copy, required_size;
1171
1172 if (sk->sk_err) {
1173 ret = -sk->sk_err;
1174 goto sendpage_end;
1175 }
1176
1177 if (ctx->open_rec)
1178 rec = ctx->open_rec;
1179 else
1180 rec = ctx->open_rec = tls_get_rec(sk);
1181 if (!rec) {
1182 ret = -ENOMEM;
1183 goto sendpage_end;
1184 }
1185
1186 msg_pl = &rec->msg_plaintext;
1187
1188 full_record = false;
1189 record_room = TLS_MAX_PAYLOAD_SIZE - msg_pl->sg.size;
1190 copy = size;
1191 if (copy >= record_room) {
1192 copy = record_room;
1193 full_record = true;
1194 }
1195
1196 required_size = msg_pl->sg.size + copy + prot->overhead_size;
1197
1198 if (!sk_stream_memory_free(sk))
1199 goto wait_for_sndbuf;
1200 alloc_payload:
1201 ret = tls_alloc_encrypted_msg(sk, required_size);
1202 if (ret) {
1203 if (ret != -ENOSPC)
1204 goto wait_for_memory;
1205
1206 /* Adjust copy according to the amount that was
1207 * actually allocated. The difference is due
1208 * to max sg elements limit
1209 */
1210 copy -= required_size - msg_pl->sg.size;
1211 full_record = true;
1212 }
1213
1214 sk_msg_page_add(msg_pl, page, copy, offset);
1215 sk_mem_charge(sk, copy);
1216
1217 offset += copy;
1218 size -= copy;
1219 copied += copy;
1220
1221 tls_ctx->pending_open_record_frags = true;
1222 if (full_record || eor || sk_msg_full(msg_pl)) {
1223 ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
1224 record_type, &copied, flags);
1225 if (ret) {
1226 if (ret == -EINPROGRESS)
1227 num_async++;
1228 else if (ret == -ENOMEM)
1229 goto wait_for_memory;
1230 else if (ret != -EAGAIN) {
1231 if (ret == -ENOSPC)
1232 ret = 0;
1233 goto sendpage_end;
1234 }
1235 }
1236 }
1237 continue;
1238 wait_for_sndbuf:
1239 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1240 wait_for_memory:
1241 ret = sk_stream_wait_memory(sk, &timeo);
1242 if (ret) {
1243 if (ctx->open_rec)
1244 tls_trim_both_msgs(sk, msg_pl->sg.size);
1245 goto sendpage_end;
1246 }
1247
1248 if (ctx->open_rec)
1249 goto alloc_payload;
1250 }
1251
1252 if (num_async) {
1253 /* Transmit if any encryptions have completed */
1254 if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
1255 cancel_delayed_work(&ctx->tx_work.work);
1256 tls_tx_records(sk, flags);
1257 }
1258 }
1259 sendpage_end:
1260 ret = sk_stream_error(sk, flags, ret);
1261 return copied > 0 ? copied : ret;
1262 }
1263
tls_sw_sendpage_locked(struct sock * sk,struct page * page,int offset,size_t size,int flags)1264 int tls_sw_sendpage_locked(struct sock *sk, struct page *page,
1265 int offset, size_t size, int flags)
1266 {
1267 if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
1268 MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY |
1269 MSG_NO_SHARED_FRAGS))
1270 return -EOPNOTSUPP;
1271
1272 return tls_sw_do_sendpage(sk, page, offset, size, flags);
1273 }
1274
tls_sw_sendpage(struct sock * sk,struct page * page,int offset,size_t size,int flags)1275 int tls_sw_sendpage(struct sock *sk, struct page *page,
1276 int offset, size_t size, int flags)
1277 {
1278 struct tls_context *tls_ctx = tls_get_ctx(sk);
1279 int ret;
1280
1281 if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
1282 MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY))
1283 return -EOPNOTSUPP;
1284
1285 mutex_lock(&tls_ctx->tx_lock);
1286 lock_sock(sk);
1287 ret = tls_sw_do_sendpage(sk, page, offset, size, flags);
1288 release_sock(sk);
1289 mutex_unlock(&tls_ctx->tx_lock);
1290 return ret;
1291 }
1292
tls_wait_data(struct sock * sk,struct sk_psock * psock,bool nonblock,long timeo,int * err)1293 static struct sk_buff *tls_wait_data(struct sock *sk, struct sk_psock *psock,
1294 bool nonblock, long timeo, int *err)
1295 {
1296 struct tls_context *tls_ctx = tls_get_ctx(sk);
1297 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1298 struct sk_buff *skb;
1299 DEFINE_WAIT_FUNC(wait, woken_wake_function);
1300
1301 while (!(skb = ctx->recv_pkt) && sk_psock_queue_empty(psock)) {
1302 if (sk->sk_err) {
1303 *err = sock_error(sk);
1304 return NULL;
1305 }
1306
1307 if (!skb_queue_empty(&sk->sk_receive_queue)) {
1308 __strp_unpause(&ctx->strp);
1309 if (ctx->recv_pkt)
1310 return ctx->recv_pkt;
1311 }
1312
1313 if (sk->sk_shutdown & RCV_SHUTDOWN)
1314 return NULL;
1315
1316 if (sock_flag(sk, SOCK_DONE))
1317 return NULL;
1318
1319 if (nonblock || !timeo) {
1320 *err = -EAGAIN;
1321 return NULL;
1322 }
1323
1324 add_wait_queue(sk_sleep(sk), &wait);
1325 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
1326 sk_wait_event(sk, &timeo,
1327 ctx->recv_pkt != skb ||
1328 !sk_psock_queue_empty(psock),
1329 &wait);
1330 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
1331 remove_wait_queue(sk_sleep(sk), &wait);
1332
1333 /* Handle signals */
1334 if (signal_pending(current)) {
1335 *err = sock_intr_errno(timeo);
1336 return NULL;
1337 }
1338 }
1339
1340 return skb;
1341 }
1342
tls_setup_from_iter(struct sock * sk,struct iov_iter * from,int length,int * pages_used,unsigned int * size_used,struct scatterlist * to,int to_max_pages)1343 static int tls_setup_from_iter(struct sock *sk, struct iov_iter *from,
1344 int length, int *pages_used,
1345 unsigned int *size_used,
1346 struct scatterlist *to,
1347 int to_max_pages)
1348 {
1349 int rc = 0, i = 0, num_elem = *pages_used, maxpages;
1350 struct page *pages[MAX_SKB_FRAGS];
1351 unsigned int size = *size_used;
1352 ssize_t copied, use;
1353 size_t offset;
1354
1355 while (length > 0) {
1356 i = 0;
1357 maxpages = to_max_pages - num_elem;
1358 if (maxpages == 0) {
1359 rc = -EFAULT;
1360 goto out;
1361 }
1362 copied = iov_iter_get_pages(from, pages,
1363 length,
1364 maxpages, &offset);
1365 if (copied <= 0) {
1366 rc = -EFAULT;
1367 goto out;
1368 }
1369
1370 iov_iter_advance(from, copied);
1371
1372 length -= copied;
1373 size += copied;
1374 while (copied) {
1375 use = min_t(int, copied, PAGE_SIZE - offset);
1376
1377 sg_set_page(&to[num_elem],
1378 pages[i], use, offset);
1379 sg_unmark_end(&to[num_elem]);
1380 /* We do not uncharge memory from this API */
1381
1382 offset = 0;
1383 copied -= use;
1384
1385 i++;
1386 num_elem++;
1387 }
1388 }
1389 /* Mark the end in the last sg entry if newly added */
1390 if (num_elem > *pages_used)
1391 sg_mark_end(&to[num_elem - 1]);
1392 out:
1393 if (rc)
1394 iov_iter_revert(from, size - *size_used);
1395 *size_used = size;
1396 *pages_used = num_elem;
1397
1398 return rc;
1399 }
1400
1401 /* This function decrypts the input skb into either out_iov or in out_sg
1402 * or in skb buffers itself. The input parameter 'zc' indicates if
1403 * zero-copy mode needs to be tried or not. With zero-copy mode, either
1404 * out_iov or out_sg must be non-NULL. In case both out_iov and out_sg are
1405 * NULL, then the decryption happens inside skb buffers itself, i.e.
1406 * zero-copy gets disabled and 'zc' is updated.
1407 */
1408
decrypt_internal(struct sock * sk,struct sk_buff * skb,struct iov_iter * out_iov,struct scatterlist * out_sg,int * chunk,bool * zc,bool async)1409 static int decrypt_internal(struct sock *sk, struct sk_buff *skb,
1410 struct iov_iter *out_iov,
1411 struct scatterlist *out_sg,
1412 int *chunk, bool *zc, bool async)
1413 {
1414 struct tls_context *tls_ctx = tls_get_ctx(sk);
1415 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1416 struct tls_prot_info *prot = &tls_ctx->prot_info;
1417 struct strp_msg *rxm = strp_msg(skb);
1418 int n_sgin, n_sgout, nsg, mem_size, aead_size, err, pages = 0;
1419 struct aead_request *aead_req;
1420 struct sk_buff *unused;
1421 u8 *aad, *iv, *mem = NULL;
1422 struct scatterlist *sgin = NULL;
1423 struct scatterlist *sgout = NULL;
1424 const int data_len = rxm->full_len - prot->overhead_size +
1425 prot->tail_size;
1426 int iv_offset = 0;
1427
1428 if (*zc && (out_iov || out_sg)) {
1429 if (out_iov)
1430 n_sgout = iov_iter_npages(out_iov, INT_MAX) + 1;
1431 else
1432 n_sgout = sg_nents(out_sg);
1433 n_sgin = skb_nsg(skb, rxm->offset + prot->prepend_size,
1434 rxm->full_len - prot->prepend_size);
1435 } else {
1436 n_sgout = 0;
1437 *zc = false;
1438 n_sgin = skb_cow_data(skb, 0, &unused);
1439 }
1440
1441 if (n_sgin < 1)
1442 return -EBADMSG;
1443
1444 /* Increment to accommodate AAD */
1445 n_sgin = n_sgin + 1;
1446
1447 nsg = n_sgin + n_sgout;
1448
1449 aead_size = sizeof(*aead_req) + crypto_aead_reqsize(ctx->aead_recv);
1450 mem_size = aead_size + (nsg * sizeof(struct scatterlist));
1451 mem_size = mem_size + prot->aad_size;
1452 mem_size = mem_size + crypto_aead_ivsize(ctx->aead_recv);
1453
1454 /* Allocate a single block of memory which contains
1455 * aead_req || sgin[] || sgout[] || aad || iv.
1456 * This order achieves correct alignment for aead_req, sgin, sgout.
1457 */
1458 mem = kmalloc(mem_size, sk->sk_allocation);
1459 if (!mem)
1460 return -ENOMEM;
1461
1462 /* Segment the allocated memory */
1463 aead_req = (struct aead_request *)mem;
1464 sgin = (struct scatterlist *)(mem + aead_size);
1465 sgout = sgin + n_sgin;
1466 aad = (u8 *)(sgout + n_sgout);
1467 iv = aad + prot->aad_size;
1468
1469 /* For CCM based ciphers, first byte of nonce+iv is always '2' */
1470 if (prot->cipher_type == TLS_CIPHER_AES_CCM_128) {
1471 iv[0] = 2;
1472 iv_offset = 1;
1473 }
1474
1475 /* Prepare IV */
1476 err = skb_copy_bits(skb, rxm->offset + TLS_HEADER_SIZE,
1477 iv + iv_offset + prot->salt_size,
1478 prot->iv_size);
1479 if (err < 0) {
1480 kfree(mem);
1481 return err;
1482 }
1483 if (prot->version == TLS_1_3_VERSION ||
1484 prot->cipher_type == TLS_CIPHER_CHACHA20_POLY1305)
1485 memcpy(iv + iv_offset, tls_ctx->rx.iv,
1486 crypto_aead_ivsize(ctx->aead_recv));
1487 else
1488 memcpy(iv + iv_offset, tls_ctx->rx.iv, prot->salt_size);
1489
1490 xor_iv_with_seq(prot, iv, tls_ctx->rx.rec_seq);
1491
1492 /* Prepare AAD */
1493 tls_make_aad(aad, rxm->full_len - prot->overhead_size +
1494 prot->tail_size,
1495 tls_ctx->rx.rec_seq, ctx->control, prot);
1496
1497 /* Prepare sgin */
1498 sg_init_table(sgin, n_sgin);
1499 sg_set_buf(&sgin[0], aad, prot->aad_size);
1500 err = skb_to_sgvec(skb, &sgin[1],
1501 rxm->offset + prot->prepend_size,
1502 rxm->full_len - prot->prepend_size);
1503 if (err < 0) {
1504 kfree(mem);
1505 return err;
1506 }
1507
1508 if (n_sgout) {
1509 if (out_iov) {
1510 sg_init_table(sgout, n_sgout);
1511 sg_set_buf(&sgout[0], aad, prot->aad_size);
1512
1513 *chunk = 0;
1514 err = tls_setup_from_iter(sk, out_iov, data_len,
1515 &pages, chunk, &sgout[1],
1516 (n_sgout - 1));
1517 if (err < 0)
1518 goto fallback_to_reg_recv;
1519 } else if (out_sg) {
1520 memcpy(sgout, out_sg, n_sgout * sizeof(*sgout));
1521 } else {
1522 goto fallback_to_reg_recv;
1523 }
1524 } else {
1525 fallback_to_reg_recv:
1526 sgout = sgin;
1527 pages = 0;
1528 *chunk = data_len;
1529 *zc = false;
1530 }
1531
1532 /* Prepare and submit AEAD request */
1533 err = tls_do_decryption(sk, skb, sgin, sgout, iv,
1534 data_len, aead_req, async);
1535 if (err == -EINPROGRESS)
1536 return err;
1537
1538 /* Release the pages in case iov was mapped to pages */
1539 for (; pages > 0; pages--)
1540 put_page(sg_page(&sgout[pages]));
1541
1542 kfree(mem);
1543 return err;
1544 }
1545
decrypt_skb_update(struct sock * sk,struct sk_buff * skb,struct iov_iter * dest,int * chunk,bool * zc,bool async)1546 static int decrypt_skb_update(struct sock *sk, struct sk_buff *skb,
1547 struct iov_iter *dest, int *chunk, bool *zc,
1548 bool async)
1549 {
1550 struct tls_context *tls_ctx = tls_get_ctx(sk);
1551 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1552 struct tls_prot_info *prot = &tls_ctx->prot_info;
1553 struct strp_msg *rxm = strp_msg(skb);
1554 int pad, err = 0;
1555
1556 if (!ctx->decrypted) {
1557 if (tls_ctx->rx_conf == TLS_HW) {
1558 err = tls_device_decrypted(sk, tls_ctx, skb, rxm);
1559 if (err < 0)
1560 return err;
1561 }
1562
1563 /* Still not decrypted after tls_device */
1564 if (!ctx->decrypted) {
1565 err = decrypt_internal(sk, skb, dest, NULL, chunk, zc,
1566 async);
1567 if (err < 0) {
1568 if (err == -EINPROGRESS)
1569 tls_advance_record_sn(sk, prot,
1570 &tls_ctx->rx);
1571 else if (err == -EBADMSG)
1572 TLS_INC_STATS(sock_net(sk),
1573 LINUX_MIB_TLSDECRYPTERROR);
1574 return err;
1575 }
1576 } else {
1577 *zc = false;
1578 }
1579
1580 pad = padding_length(ctx, prot, skb);
1581 if (pad < 0)
1582 return pad;
1583
1584 rxm->full_len -= pad;
1585 rxm->offset += prot->prepend_size;
1586 rxm->full_len -= prot->overhead_size;
1587 tls_advance_record_sn(sk, prot, &tls_ctx->rx);
1588 ctx->decrypted = 1;
1589 ctx->saved_data_ready(sk);
1590 } else {
1591 *zc = false;
1592 }
1593
1594 return err;
1595 }
1596
decrypt_skb(struct sock * sk,struct sk_buff * skb,struct scatterlist * sgout)1597 int decrypt_skb(struct sock *sk, struct sk_buff *skb,
1598 struct scatterlist *sgout)
1599 {
1600 bool zc = true;
1601 int chunk;
1602
1603 return decrypt_internal(sk, skb, NULL, sgout, &chunk, &zc, false);
1604 }
1605
tls_sw_advance_skb(struct sock * sk,struct sk_buff * skb,unsigned int len)1606 static bool tls_sw_advance_skb(struct sock *sk, struct sk_buff *skb,
1607 unsigned int len)
1608 {
1609 struct tls_context *tls_ctx = tls_get_ctx(sk);
1610 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1611
1612 if (skb) {
1613 struct strp_msg *rxm = strp_msg(skb);
1614
1615 if (len < rxm->full_len) {
1616 rxm->offset += len;
1617 rxm->full_len -= len;
1618 return false;
1619 }
1620 consume_skb(skb);
1621 }
1622
1623 /* Finished with message */
1624 ctx->recv_pkt = NULL;
1625 __strp_unpause(&ctx->strp);
1626
1627 return true;
1628 }
1629
1630 /* This function traverses the rx_list in tls receive context to copies the
1631 * decrypted records into the buffer provided by caller zero copy is not
1632 * true. Further, the records are removed from the rx_list if it is not a peek
1633 * case and the record has been consumed completely.
1634 */
process_rx_list(struct tls_sw_context_rx * ctx,struct msghdr * msg,u8 * control,bool * cmsg,size_t skip,size_t len,bool zc,bool is_peek)1635 static int process_rx_list(struct tls_sw_context_rx *ctx,
1636 struct msghdr *msg,
1637 u8 *control,
1638 bool *cmsg,
1639 size_t skip,
1640 size_t len,
1641 bool zc,
1642 bool is_peek)
1643 {
1644 struct sk_buff *skb = skb_peek(&ctx->rx_list);
1645 u8 ctrl = *control;
1646 u8 msgc = *cmsg;
1647 struct tls_msg *tlm;
1648 ssize_t copied = 0;
1649
1650 /* Set the record type in 'control' if caller didn't pass it */
1651 if (!ctrl && skb) {
1652 tlm = tls_msg(skb);
1653 ctrl = tlm->control;
1654 }
1655
1656 while (skip && skb) {
1657 struct strp_msg *rxm = strp_msg(skb);
1658 tlm = tls_msg(skb);
1659
1660 /* Cannot process a record of different type */
1661 if (ctrl != tlm->control)
1662 return 0;
1663
1664 if (skip < rxm->full_len)
1665 break;
1666
1667 skip = skip - rxm->full_len;
1668 skb = skb_peek_next(skb, &ctx->rx_list);
1669 }
1670
1671 while (len && skb) {
1672 struct sk_buff *next_skb;
1673 struct strp_msg *rxm = strp_msg(skb);
1674 int chunk = min_t(unsigned int, rxm->full_len - skip, len);
1675
1676 tlm = tls_msg(skb);
1677
1678 /* Cannot process a record of different type */
1679 if (ctrl != tlm->control)
1680 return 0;
1681
1682 /* Set record type if not already done. For a non-data record,
1683 * do not proceed if record type could not be copied.
1684 */
1685 if (!msgc) {
1686 int cerr = put_cmsg(msg, SOL_TLS, TLS_GET_RECORD_TYPE,
1687 sizeof(ctrl), &ctrl);
1688 msgc = true;
1689 if (ctrl != TLS_RECORD_TYPE_DATA) {
1690 if (cerr || msg->msg_flags & MSG_CTRUNC)
1691 return -EIO;
1692
1693 *cmsg = msgc;
1694 }
1695 }
1696
1697 if (!zc || (rxm->full_len - skip) > len) {
1698 int err = skb_copy_datagram_msg(skb, rxm->offset + skip,
1699 msg, chunk);
1700 if (err < 0)
1701 return err;
1702 }
1703
1704 len = len - chunk;
1705 copied = copied + chunk;
1706
1707 /* Consume the data from record if it is non-peek case*/
1708 if (!is_peek) {
1709 rxm->offset = rxm->offset + chunk;
1710 rxm->full_len = rxm->full_len - chunk;
1711
1712 /* Return if there is unconsumed data in the record */
1713 if (rxm->full_len - skip)
1714 break;
1715 }
1716
1717 /* The remaining skip-bytes must lie in 1st record in rx_list.
1718 * So from the 2nd record, 'skip' should be 0.
1719 */
1720 skip = 0;
1721
1722 if (msg)
1723 msg->msg_flags |= MSG_EOR;
1724
1725 next_skb = skb_peek_next(skb, &ctx->rx_list);
1726
1727 if (!is_peek) {
1728 skb_unlink(skb, &ctx->rx_list);
1729 consume_skb(skb);
1730 }
1731
1732 skb = next_skb;
1733 }
1734
1735 *control = ctrl;
1736 return copied;
1737 }
1738
tls_sw_recvmsg(struct sock * sk,struct msghdr * msg,size_t len,int nonblock,int flags,int * addr_len)1739 int tls_sw_recvmsg(struct sock *sk,
1740 struct msghdr *msg,
1741 size_t len,
1742 int nonblock,
1743 int flags,
1744 int *addr_len)
1745 {
1746 struct tls_context *tls_ctx = tls_get_ctx(sk);
1747 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1748 struct tls_prot_info *prot = &tls_ctx->prot_info;
1749 struct sk_psock *psock;
1750 unsigned char control = 0;
1751 ssize_t decrypted = 0;
1752 struct strp_msg *rxm;
1753 struct tls_msg *tlm;
1754 struct sk_buff *skb;
1755 ssize_t copied = 0;
1756 bool cmsg = false;
1757 int target, err = 0;
1758 long timeo;
1759 bool is_kvec = iov_iter_is_kvec(&msg->msg_iter);
1760 bool is_peek = flags & MSG_PEEK;
1761 bool bpf_strp_enabled;
1762 int num_async = 0;
1763 int pending;
1764
1765 flags |= nonblock;
1766
1767 if (unlikely(flags & MSG_ERRQUEUE))
1768 return sock_recv_errqueue(sk, msg, len, SOL_IP, IP_RECVERR);
1769
1770 psock = sk_psock_get(sk);
1771 lock_sock(sk);
1772 bpf_strp_enabled = sk_psock_strp_enabled(psock);
1773
1774 /* Process pending decrypted records. It must be non-zero-copy */
1775 err = process_rx_list(ctx, msg, &control, &cmsg, 0, len, false,
1776 is_peek);
1777 if (err < 0) {
1778 tls_err_abort(sk, err);
1779 goto end;
1780 } else {
1781 copied = err;
1782 }
1783
1784 if (len <= copied)
1785 goto recv_end;
1786
1787 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
1788 len = len - copied;
1789 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1790
1791 while (len && (decrypted + copied < target || ctx->recv_pkt)) {
1792 bool retain_skb = false;
1793 bool zc = false;
1794 int to_decrypt;
1795 int chunk = 0;
1796 bool async_capable;
1797 bool async = false;
1798
1799 skb = tls_wait_data(sk, psock, flags & MSG_DONTWAIT, timeo, &err);
1800 if (!skb) {
1801 if (psock) {
1802 int ret = sk_msg_recvmsg(sk, psock, msg, len,
1803 flags);
1804
1805 if (ret > 0) {
1806 decrypted += ret;
1807 len -= ret;
1808 continue;
1809 }
1810 }
1811 goto recv_end;
1812 } else {
1813 tlm = tls_msg(skb);
1814 if (prot->version == TLS_1_3_VERSION)
1815 tlm->control = 0;
1816 else
1817 tlm->control = ctx->control;
1818 }
1819
1820 rxm = strp_msg(skb);
1821
1822 to_decrypt = rxm->full_len - prot->overhead_size;
1823
1824 if (to_decrypt <= len && !is_kvec && !is_peek &&
1825 ctx->control == TLS_RECORD_TYPE_DATA &&
1826 prot->version != TLS_1_3_VERSION &&
1827 !bpf_strp_enabled)
1828 zc = true;
1829
1830 /* Do not use async mode if record is non-data */
1831 if (ctx->control == TLS_RECORD_TYPE_DATA && !bpf_strp_enabled)
1832 async_capable = ctx->async_capable;
1833 else
1834 async_capable = false;
1835
1836 err = decrypt_skb_update(sk, skb, &msg->msg_iter,
1837 &chunk, &zc, async_capable);
1838 if (err < 0 && err != -EINPROGRESS) {
1839 tls_err_abort(sk, -EBADMSG);
1840 goto recv_end;
1841 }
1842
1843 if (err == -EINPROGRESS) {
1844 async = true;
1845 num_async++;
1846 } else if (prot->version == TLS_1_3_VERSION) {
1847 tlm->control = ctx->control;
1848 }
1849
1850 /* If the type of records being processed is not known yet,
1851 * set it to record type just dequeued. If it is already known,
1852 * but does not match the record type just dequeued, go to end.
1853 * We always get record type here since for tls1.2, record type
1854 * is known just after record is dequeued from stream parser.
1855 * For tls1.3, we disable async.
1856 */
1857
1858 if (!control)
1859 control = tlm->control;
1860 else if (control != tlm->control)
1861 goto recv_end;
1862
1863 if (!cmsg) {
1864 int cerr;
1865
1866 cerr = put_cmsg(msg, SOL_TLS, TLS_GET_RECORD_TYPE,
1867 sizeof(control), &control);
1868 cmsg = true;
1869 if (control != TLS_RECORD_TYPE_DATA) {
1870 if (cerr || msg->msg_flags & MSG_CTRUNC) {
1871 err = -EIO;
1872 goto recv_end;
1873 }
1874 }
1875 }
1876
1877 if (async)
1878 goto pick_next_record;
1879
1880 if (!zc) {
1881 if (bpf_strp_enabled) {
1882 err = sk_psock_tls_strp_read(psock, skb);
1883 if (err != __SK_PASS) {
1884 rxm->offset = rxm->offset + rxm->full_len;
1885 rxm->full_len = 0;
1886 if (err == __SK_DROP)
1887 consume_skb(skb);
1888 ctx->recv_pkt = NULL;
1889 __strp_unpause(&ctx->strp);
1890 continue;
1891 }
1892 }
1893
1894 if (rxm->full_len > len) {
1895 retain_skb = true;
1896 chunk = len;
1897 } else {
1898 chunk = rxm->full_len;
1899 }
1900
1901 err = skb_copy_datagram_msg(skb, rxm->offset,
1902 msg, chunk);
1903 if (err < 0)
1904 goto recv_end;
1905
1906 if (!is_peek) {
1907 rxm->offset = rxm->offset + chunk;
1908 rxm->full_len = rxm->full_len - chunk;
1909 }
1910 }
1911
1912 pick_next_record:
1913 if (chunk > len)
1914 chunk = len;
1915
1916 decrypted += chunk;
1917 len -= chunk;
1918
1919 /* For async or peek case, queue the current skb */
1920 if (async || is_peek || retain_skb) {
1921 skb_queue_tail(&ctx->rx_list, skb);
1922 skb = NULL;
1923 }
1924
1925 if (tls_sw_advance_skb(sk, skb, chunk)) {
1926 /* Return full control message to
1927 * userspace before trying to parse
1928 * another message type
1929 */
1930 msg->msg_flags |= MSG_EOR;
1931 if (control != TLS_RECORD_TYPE_DATA)
1932 goto recv_end;
1933 } else {
1934 break;
1935 }
1936 }
1937
1938 recv_end:
1939 if (num_async) {
1940 /* Wait for all previously submitted records to be decrypted */
1941 spin_lock_bh(&ctx->decrypt_compl_lock);
1942 ctx->async_notify = true;
1943 pending = atomic_read(&ctx->decrypt_pending);
1944 spin_unlock_bh(&ctx->decrypt_compl_lock);
1945 if (pending) {
1946 err = crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
1947 if (err) {
1948 /* one of async decrypt failed */
1949 tls_err_abort(sk, err);
1950 copied = 0;
1951 decrypted = 0;
1952 goto end;
1953 }
1954 } else {
1955 reinit_completion(&ctx->async_wait.completion);
1956 }
1957
1958 /* There can be no concurrent accesses, since we have no
1959 * pending decrypt operations
1960 */
1961 WRITE_ONCE(ctx->async_notify, false);
1962
1963 /* Drain records from the rx_list & copy if required */
1964 if (is_peek || is_kvec)
1965 err = process_rx_list(ctx, msg, &control, &cmsg, copied,
1966 decrypted, false, is_peek);
1967 else
1968 err = process_rx_list(ctx, msg, &control, &cmsg, 0,
1969 decrypted, true, is_peek);
1970 if (err < 0) {
1971 tls_err_abort(sk, err);
1972 copied = 0;
1973 goto end;
1974 }
1975 }
1976
1977 copied += decrypted;
1978
1979 end:
1980 release_sock(sk);
1981 if (psock)
1982 sk_psock_put(sk, psock);
1983 return copied ? : err;
1984 }
1985
tls_sw_splice_read(struct socket * sock,loff_t * ppos,struct pipe_inode_info * pipe,size_t len,unsigned int flags)1986 ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
1987 struct pipe_inode_info *pipe,
1988 size_t len, unsigned int flags)
1989 {
1990 struct tls_context *tls_ctx = tls_get_ctx(sock->sk);
1991 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1992 struct strp_msg *rxm = NULL;
1993 struct sock *sk = sock->sk;
1994 struct sk_buff *skb;
1995 ssize_t copied = 0;
1996 int err = 0;
1997 long timeo;
1998 int chunk;
1999 bool zc = false;
2000
2001 lock_sock(sk);
2002
2003 timeo = sock_rcvtimeo(sk, flags & SPLICE_F_NONBLOCK);
2004
2005 skb = tls_wait_data(sk, NULL, flags & SPLICE_F_NONBLOCK, timeo, &err);
2006 if (!skb)
2007 goto splice_read_end;
2008
2009 if (!ctx->decrypted) {
2010 err = decrypt_skb_update(sk, skb, NULL, &chunk, &zc, false);
2011
2012 /* splice does not support reading control messages */
2013 if (ctx->control != TLS_RECORD_TYPE_DATA) {
2014 err = -EINVAL;
2015 goto splice_read_end;
2016 }
2017
2018 if (err < 0) {
2019 tls_err_abort(sk, -EBADMSG);
2020 goto splice_read_end;
2021 }
2022 ctx->decrypted = 1;
2023 }
2024 rxm = strp_msg(skb);
2025
2026 chunk = min_t(unsigned int, rxm->full_len, len);
2027 copied = skb_splice_bits(skb, sk, rxm->offset, pipe, chunk, flags);
2028 if (copied < 0)
2029 goto splice_read_end;
2030
2031 tls_sw_advance_skb(sk, skb, copied);
2032
2033 splice_read_end:
2034 release_sock(sk);
2035 return copied ? : err;
2036 }
2037
tls_sw_sock_is_readable(struct sock * sk)2038 bool tls_sw_sock_is_readable(struct sock *sk)
2039 {
2040 struct tls_context *tls_ctx = tls_get_ctx(sk);
2041 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2042 bool ingress_empty = true;
2043 struct sk_psock *psock;
2044
2045 rcu_read_lock();
2046 psock = sk_psock(sk);
2047 if (psock)
2048 ingress_empty = list_empty(&psock->ingress_msg);
2049 rcu_read_unlock();
2050
2051 return !ingress_empty || ctx->recv_pkt ||
2052 !skb_queue_empty(&ctx->rx_list);
2053 }
2054
tls_read_size(struct strparser * strp,struct sk_buff * skb)2055 static int tls_read_size(struct strparser *strp, struct sk_buff *skb)
2056 {
2057 struct tls_context *tls_ctx = tls_get_ctx(strp->sk);
2058 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2059 struct tls_prot_info *prot = &tls_ctx->prot_info;
2060 char header[TLS_HEADER_SIZE + MAX_IV_SIZE];
2061 struct strp_msg *rxm = strp_msg(skb);
2062 size_t cipher_overhead;
2063 size_t data_len = 0;
2064 int ret;
2065
2066 /* Verify that we have a full TLS header, or wait for more data */
2067 if (rxm->offset + prot->prepend_size > skb->len)
2068 return 0;
2069
2070 /* Sanity-check size of on-stack buffer. */
2071 if (WARN_ON(prot->prepend_size > sizeof(header))) {
2072 ret = -EINVAL;
2073 goto read_failure;
2074 }
2075
2076 /* Linearize header to local buffer */
2077 ret = skb_copy_bits(skb, rxm->offset, header, prot->prepend_size);
2078
2079 if (ret < 0)
2080 goto read_failure;
2081
2082 ctx->control = header[0];
2083
2084 data_len = ((header[4] & 0xFF) | (header[3] << 8));
2085
2086 cipher_overhead = prot->tag_size;
2087 if (prot->version != TLS_1_3_VERSION &&
2088 prot->cipher_type != TLS_CIPHER_CHACHA20_POLY1305)
2089 cipher_overhead += prot->iv_size;
2090
2091 if (data_len > TLS_MAX_PAYLOAD_SIZE + cipher_overhead +
2092 prot->tail_size) {
2093 ret = -EMSGSIZE;
2094 goto read_failure;
2095 }
2096 if (data_len < cipher_overhead) {
2097 ret = -EBADMSG;
2098 goto read_failure;
2099 }
2100
2101 /* Note that both TLS1.3 and TLS1.2 use TLS_1_2 version here */
2102 if (header[1] != TLS_1_2_VERSION_MINOR ||
2103 header[2] != TLS_1_2_VERSION_MAJOR) {
2104 ret = -EINVAL;
2105 goto read_failure;
2106 }
2107
2108 tls_device_rx_resync_new_rec(strp->sk, data_len + TLS_HEADER_SIZE,
2109 TCP_SKB_CB(skb)->seq + rxm->offset);
2110 return data_len + TLS_HEADER_SIZE;
2111
2112 read_failure:
2113 tls_err_abort(strp->sk, ret);
2114
2115 return ret;
2116 }
2117
tls_queue(struct strparser * strp,struct sk_buff * skb)2118 static void tls_queue(struct strparser *strp, struct sk_buff *skb)
2119 {
2120 struct tls_context *tls_ctx = tls_get_ctx(strp->sk);
2121 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2122
2123 ctx->decrypted = 0;
2124
2125 ctx->recv_pkt = skb;
2126 strp_pause(strp);
2127
2128 ctx->saved_data_ready(strp->sk);
2129 }
2130
tls_data_ready(struct sock * sk)2131 static void tls_data_ready(struct sock *sk)
2132 {
2133 struct tls_context *tls_ctx = tls_get_ctx(sk);
2134 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2135 struct sk_psock *psock;
2136
2137 strp_data_ready(&ctx->strp);
2138
2139 psock = sk_psock_get(sk);
2140 if (psock) {
2141 if (!list_empty(&psock->ingress_msg))
2142 ctx->saved_data_ready(sk);
2143 sk_psock_put(sk, psock);
2144 }
2145 }
2146
tls_sw_cancel_work_tx(struct tls_context * tls_ctx)2147 void tls_sw_cancel_work_tx(struct tls_context *tls_ctx)
2148 {
2149 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
2150
2151 set_bit(BIT_TX_CLOSING, &ctx->tx_bitmask);
2152 set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask);
2153 cancel_delayed_work_sync(&ctx->tx_work.work);
2154 }
2155
tls_sw_release_resources_tx(struct sock * sk)2156 void tls_sw_release_resources_tx(struct sock *sk)
2157 {
2158 struct tls_context *tls_ctx = tls_get_ctx(sk);
2159 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
2160 struct tls_rec *rec, *tmp;
2161 int pending;
2162
2163 /* Wait for any pending async encryptions to complete */
2164 spin_lock_bh(&ctx->encrypt_compl_lock);
2165 ctx->async_notify = true;
2166 pending = atomic_read(&ctx->encrypt_pending);
2167 spin_unlock_bh(&ctx->encrypt_compl_lock);
2168
2169 if (pending)
2170 crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
2171
2172 tls_tx_records(sk, -1);
2173
2174 /* Free up un-sent records in tx_list. First, free
2175 * the partially sent record if any at head of tx_list.
2176 */
2177 if (tls_ctx->partially_sent_record) {
2178 tls_free_partial_record(sk, tls_ctx);
2179 rec = list_first_entry(&ctx->tx_list,
2180 struct tls_rec, list);
2181 list_del(&rec->list);
2182 sk_msg_free(sk, &rec->msg_plaintext);
2183 kfree(rec);
2184 }
2185
2186 list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) {
2187 list_del(&rec->list);
2188 sk_msg_free(sk, &rec->msg_encrypted);
2189 sk_msg_free(sk, &rec->msg_plaintext);
2190 kfree(rec);
2191 }
2192
2193 crypto_free_aead(ctx->aead_send);
2194 tls_free_open_rec(sk);
2195 }
2196
tls_sw_free_ctx_tx(struct tls_context * tls_ctx)2197 void tls_sw_free_ctx_tx(struct tls_context *tls_ctx)
2198 {
2199 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
2200
2201 kfree(ctx);
2202 }
2203
tls_sw_release_resources_rx(struct sock * sk)2204 void tls_sw_release_resources_rx(struct sock *sk)
2205 {
2206 struct tls_context *tls_ctx = tls_get_ctx(sk);
2207 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2208
2209 kfree(tls_ctx->rx.rec_seq);
2210 kfree(tls_ctx->rx.iv);
2211
2212 if (ctx->aead_recv) {
2213 kfree_skb(ctx->recv_pkt);
2214 ctx->recv_pkt = NULL;
2215 skb_queue_purge(&ctx->rx_list);
2216 crypto_free_aead(ctx->aead_recv);
2217 strp_stop(&ctx->strp);
2218 /* If tls_sw_strparser_arm() was not called (cleanup paths)
2219 * we still want to strp_stop(), but sk->sk_data_ready was
2220 * never swapped.
2221 */
2222 if (ctx->saved_data_ready) {
2223 write_lock_bh(&sk->sk_callback_lock);
2224 sk->sk_data_ready = ctx->saved_data_ready;
2225 write_unlock_bh(&sk->sk_callback_lock);
2226 }
2227 }
2228 }
2229
tls_sw_strparser_done(struct tls_context * tls_ctx)2230 void tls_sw_strparser_done(struct tls_context *tls_ctx)
2231 {
2232 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2233
2234 strp_done(&ctx->strp);
2235 }
2236
tls_sw_free_ctx_rx(struct tls_context * tls_ctx)2237 void tls_sw_free_ctx_rx(struct tls_context *tls_ctx)
2238 {
2239 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2240
2241 kfree(ctx);
2242 }
2243
tls_sw_free_resources_rx(struct sock * sk)2244 void tls_sw_free_resources_rx(struct sock *sk)
2245 {
2246 struct tls_context *tls_ctx = tls_get_ctx(sk);
2247
2248 tls_sw_release_resources_rx(sk);
2249 tls_sw_free_ctx_rx(tls_ctx);
2250 }
2251
2252 /* The work handler to transmitt the encrypted records in tx_list */
tx_work_handler(struct work_struct * work)2253 static void tx_work_handler(struct work_struct *work)
2254 {
2255 struct delayed_work *delayed_work = to_delayed_work(work);
2256 struct tx_work *tx_work = container_of(delayed_work,
2257 struct tx_work, work);
2258 struct sock *sk = tx_work->sk;
2259 struct tls_context *tls_ctx = tls_get_ctx(sk);
2260 struct tls_sw_context_tx *ctx;
2261
2262 if (unlikely(!tls_ctx))
2263 return;
2264
2265 ctx = tls_sw_ctx_tx(tls_ctx);
2266 if (test_bit(BIT_TX_CLOSING, &ctx->tx_bitmask))
2267 return;
2268
2269 if (!test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask))
2270 return;
2271 mutex_lock(&tls_ctx->tx_lock);
2272 lock_sock(sk);
2273 tls_tx_records(sk, -1);
2274 release_sock(sk);
2275 mutex_unlock(&tls_ctx->tx_lock);
2276 }
2277
tls_sw_write_space(struct sock * sk,struct tls_context * ctx)2278 void tls_sw_write_space(struct sock *sk, struct tls_context *ctx)
2279 {
2280 struct tls_sw_context_tx *tx_ctx = tls_sw_ctx_tx(ctx);
2281
2282 /* Schedule the transmission if tx list is ready */
2283 if (is_tx_ready(tx_ctx) &&
2284 !test_and_set_bit(BIT_TX_SCHEDULED, &tx_ctx->tx_bitmask))
2285 schedule_delayed_work(&tx_ctx->tx_work.work, 0);
2286 }
2287
tls_sw_strparser_arm(struct sock * sk,struct tls_context * tls_ctx)2288 void tls_sw_strparser_arm(struct sock *sk, struct tls_context *tls_ctx)
2289 {
2290 struct tls_sw_context_rx *rx_ctx = tls_sw_ctx_rx(tls_ctx);
2291
2292 write_lock_bh(&sk->sk_callback_lock);
2293 rx_ctx->saved_data_ready = sk->sk_data_ready;
2294 sk->sk_data_ready = tls_data_ready;
2295 write_unlock_bh(&sk->sk_callback_lock);
2296
2297 strp_check_rcv(&rx_ctx->strp);
2298 }
2299
tls_set_sw_offload(struct sock * sk,struct tls_context * ctx,int tx)2300 int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
2301 {
2302 struct tls_context *tls_ctx = tls_get_ctx(sk);
2303 struct tls_prot_info *prot = &tls_ctx->prot_info;
2304 struct tls_crypto_info *crypto_info;
2305 struct tls12_crypto_info_aes_gcm_128 *gcm_128_info;
2306 struct tls12_crypto_info_aes_gcm_256 *gcm_256_info;
2307 struct tls12_crypto_info_aes_ccm_128 *ccm_128_info;
2308 struct tls12_crypto_info_chacha20_poly1305 *chacha20_poly1305_info;
2309 struct tls_sw_context_tx *sw_ctx_tx = NULL;
2310 struct tls_sw_context_rx *sw_ctx_rx = NULL;
2311 struct cipher_context *cctx;
2312 struct crypto_aead **aead;
2313 struct strp_callbacks cb;
2314 u16 nonce_size, tag_size, iv_size, rec_seq_size, salt_size;
2315 struct crypto_tfm *tfm;
2316 char *iv, *rec_seq, *key, *salt, *cipher_name;
2317 size_t keysize;
2318 int rc = 0;
2319
2320 if (!ctx) {
2321 rc = -EINVAL;
2322 goto out;
2323 }
2324
2325 if (tx) {
2326 if (!ctx->priv_ctx_tx) {
2327 sw_ctx_tx = kzalloc(sizeof(*sw_ctx_tx), GFP_KERNEL);
2328 if (!sw_ctx_tx) {
2329 rc = -ENOMEM;
2330 goto out;
2331 }
2332 ctx->priv_ctx_tx = sw_ctx_tx;
2333 } else {
2334 sw_ctx_tx =
2335 (struct tls_sw_context_tx *)ctx->priv_ctx_tx;
2336 }
2337 } else {
2338 if (!ctx->priv_ctx_rx) {
2339 sw_ctx_rx = kzalloc(sizeof(*sw_ctx_rx), GFP_KERNEL);
2340 if (!sw_ctx_rx) {
2341 rc = -ENOMEM;
2342 goto out;
2343 }
2344 ctx->priv_ctx_rx = sw_ctx_rx;
2345 } else {
2346 sw_ctx_rx =
2347 (struct tls_sw_context_rx *)ctx->priv_ctx_rx;
2348 }
2349 }
2350
2351 if (tx) {
2352 crypto_init_wait(&sw_ctx_tx->async_wait);
2353 spin_lock_init(&sw_ctx_tx->encrypt_compl_lock);
2354 crypto_info = &ctx->crypto_send.info;
2355 cctx = &ctx->tx;
2356 aead = &sw_ctx_tx->aead_send;
2357 INIT_LIST_HEAD(&sw_ctx_tx->tx_list);
2358 INIT_DELAYED_WORK(&sw_ctx_tx->tx_work.work, tx_work_handler);
2359 sw_ctx_tx->tx_work.sk = sk;
2360 } else {
2361 crypto_init_wait(&sw_ctx_rx->async_wait);
2362 spin_lock_init(&sw_ctx_rx->decrypt_compl_lock);
2363 crypto_info = &ctx->crypto_recv.info;
2364 cctx = &ctx->rx;
2365 skb_queue_head_init(&sw_ctx_rx->rx_list);
2366 aead = &sw_ctx_rx->aead_recv;
2367 }
2368
2369 switch (crypto_info->cipher_type) {
2370 case TLS_CIPHER_AES_GCM_128: {
2371 nonce_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
2372 tag_size = TLS_CIPHER_AES_GCM_128_TAG_SIZE;
2373 iv_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
2374 iv = ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->iv;
2375 rec_seq_size = TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE;
2376 rec_seq =
2377 ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->rec_seq;
2378 gcm_128_info =
2379 (struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
2380 keysize = TLS_CIPHER_AES_GCM_128_KEY_SIZE;
2381 key = gcm_128_info->key;
2382 salt = gcm_128_info->salt;
2383 salt_size = TLS_CIPHER_AES_GCM_128_SALT_SIZE;
2384 cipher_name = "gcm(aes)";
2385 break;
2386 }
2387 case TLS_CIPHER_AES_GCM_256: {
2388 nonce_size = TLS_CIPHER_AES_GCM_256_IV_SIZE;
2389 tag_size = TLS_CIPHER_AES_GCM_256_TAG_SIZE;
2390 iv_size = TLS_CIPHER_AES_GCM_256_IV_SIZE;
2391 iv = ((struct tls12_crypto_info_aes_gcm_256 *)crypto_info)->iv;
2392 rec_seq_size = TLS_CIPHER_AES_GCM_256_REC_SEQ_SIZE;
2393 rec_seq =
2394 ((struct tls12_crypto_info_aes_gcm_256 *)crypto_info)->rec_seq;
2395 gcm_256_info =
2396 (struct tls12_crypto_info_aes_gcm_256 *)crypto_info;
2397 keysize = TLS_CIPHER_AES_GCM_256_KEY_SIZE;
2398 key = gcm_256_info->key;
2399 salt = gcm_256_info->salt;
2400 salt_size = TLS_CIPHER_AES_GCM_256_SALT_SIZE;
2401 cipher_name = "gcm(aes)";
2402 break;
2403 }
2404 case TLS_CIPHER_AES_CCM_128: {
2405 nonce_size = TLS_CIPHER_AES_CCM_128_IV_SIZE;
2406 tag_size = TLS_CIPHER_AES_CCM_128_TAG_SIZE;
2407 iv_size = TLS_CIPHER_AES_CCM_128_IV_SIZE;
2408 iv = ((struct tls12_crypto_info_aes_ccm_128 *)crypto_info)->iv;
2409 rec_seq_size = TLS_CIPHER_AES_CCM_128_REC_SEQ_SIZE;
2410 rec_seq =
2411 ((struct tls12_crypto_info_aes_ccm_128 *)crypto_info)->rec_seq;
2412 ccm_128_info =
2413 (struct tls12_crypto_info_aes_ccm_128 *)crypto_info;
2414 keysize = TLS_CIPHER_AES_CCM_128_KEY_SIZE;
2415 key = ccm_128_info->key;
2416 salt = ccm_128_info->salt;
2417 salt_size = TLS_CIPHER_AES_CCM_128_SALT_SIZE;
2418 cipher_name = "ccm(aes)";
2419 break;
2420 }
2421 case TLS_CIPHER_CHACHA20_POLY1305: {
2422 chacha20_poly1305_info = (void *)crypto_info;
2423 nonce_size = 0;
2424 tag_size = TLS_CIPHER_CHACHA20_POLY1305_TAG_SIZE;
2425 iv_size = TLS_CIPHER_CHACHA20_POLY1305_IV_SIZE;
2426 iv = chacha20_poly1305_info->iv;
2427 rec_seq_size = TLS_CIPHER_CHACHA20_POLY1305_REC_SEQ_SIZE;
2428 rec_seq = chacha20_poly1305_info->rec_seq;
2429 keysize = TLS_CIPHER_CHACHA20_POLY1305_KEY_SIZE;
2430 key = chacha20_poly1305_info->key;
2431 salt = chacha20_poly1305_info->salt;
2432 salt_size = TLS_CIPHER_CHACHA20_POLY1305_SALT_SIZE;
2433 cipher_name = "rfc7539(chacha20,poly1305)";
2434 break;
2435 }
2436 default:
2437 rc = -EINVAL;
2438 goto free_priv;
2439 }
2440
2441 /* Sanity-check the sizes for stack allocations. */
2442 if (iv_size > MAX_IV_SIZE || nonce_size > MAX_IV_SIZE ||
2443 rec_seq_size > TLS_MAX_REC_SEQ_SIZE) {
2444 rc = -EINVAL;
2445 goto free_priv;
2446 }
2447
2448 if (crypto_info->version == TLS_1_3_VERSION) {
2449 nonce_size = 0;
2450 prot->aad_size = TLS_HEADER_SIZE;
2451 prot->tail_size = 1;
2452 } else {
2453 prot->aad_size = TLS_AAD_SPACE_SIZE;
2454 prot->tail_size = 0;
2455 }
2456
2457 prot->version = crypto_info->version;
2458 prot->cipher_type = crypto_info->cipher_type;
2459 prot->prepend_size = TLS_HEADER_SIZE + nonce_size;
2460 prot->tag_size = tag_size;
2461 prot->overhead_size = prot->prepend_size +
2462 prot->tag_size + prot->tail_size;
2463 prot->iv_size = iv_size;
2464 prot->salt_size = salt_size;
2465 cctx->iv = kmalloc(iv_size + salt_size, GFP_KERNEL);
2466 if (!cctx->iv) {
2467 rc = -ENOMEM;
2468 goto free_priv;
2469 }
2470 /* Note: 128 & 256 bit salt are the same size */
2471 prot->rec_seq_size = rec_seq_size;
2472 memcpy(cctx->iv, salt, salt_size);
2473 memcpy(cctx->iv + salt_size, iv, iv_size);
2474 cctx->rec_seq = kmemdup(rec_seq, rec_seq_size, GFP_KERNEL);
2475 if (!cctx->rec_seq) {
2476 rc = -ENOMEM;
2477 goto free_iv;
2478 }
2479
2480 if (!*aead) {
2481 *aead = crypto_alloc_aead(cipher_name, 0, 0);
2482 if (IS_ERR(*aead)) {
2483 rc = PTR_ERR(*aead);
2484 *aead = NULL;
2485 goto free_rec_seq;
2486 }
2487 }
2488
2489 ctx->push_pending_record = tls_sw_push_pending_record;
2490
2491 rc = crypto_aead_setkey(*aead, key, keysize);
2492
2493 if (rc)
2494 goto free_aead;
2495
2496 rc = crypto_aead_setauthsize(*aead, prot->tag_size);
2497 if (rc)
2498 goto free_aead;
2499
2500 if (sw_ctx_rx) {
2501 tfm = crypto_aead_tfm(sw_ctx_rx->aead_recv);
2502
2503 if (crypto_info->version == TLS_1_3_VERSION)
2504 sw_ctx_rx->async_capable = 0;
2505 else
2506 sw_ctx_rx->async_capable =
2507 !!(tfm->__crt_alg->cra_flags &
2508 CRYPTO_ALG_ASYNC);
2509
2510 /* Set up strparser */
2511 memset(&cb, 0, sizeof(cb));
2512 cb.rcv_msg = tls_queue;
2513 cb.parse_msg = tls_read_size;
2514
2515 strp_init(&sw_ctx_rx->strp, sk, &cb);
2516 }
2517
2518 goto out;
2519
2520 free_aead:
2521 crypto_free_aead(*aead);
2522 *aead = NULL;
2523 free_rec_seq:
2524 kfree(cctx->rec_seq);
2525 cctx->rec_seq = NULL;
2526 free_iv:
2527 kfree(cctx->iv);
2528 cctx->iv = NULL;
2529 free_priv:
2530 if (tx) {
2531 kfree(ctx->priv_ctx_tx);
2532 ctx->priv_ctx_tx = NULL;
2533 } else {
2534 kfree(ctx->priv_ctx_rx);
2535 ctx->priv_ctx_rx = NULL;
2536 }
2537 out:
2538 return rc;
2539 }
2540