Lines Matching +full:key +full:- +full:release
1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * af_alg: User-space algorithm interface
5 * This file provides the user-space API for algorithms.
15 #include <linux/key.h>
16 #include <linux/key-type.h>
25 #include <keys/user-type.h>
26 #include <keys/trusted-type.h>
27 #include <keys/encrypted-type.h>
45 const struct af_alg_type *type = ERR_PTR(-ENOENT); in alg_get_type()
50 if (strcmp(node->type->name, name)) in alg_get_type()
53 if (try_module_get(node->type->owner)) in alg_get_type()
54 type = node->type; in alg_get_type()
65 int err = -EEXIST; in af_alg_register_type()
69 if (!strcmp(node->type->name, type->name)) in af_alg_register_type()
74 err = -ENOMEM; in af_alg_register_type()
78 type->ops->owner = THIS_MODULE; in af_alg_register_type()
79 if (type->ops_nokey) in af_alg_register_type()
80 type->ops_nokey->owner = THIS_MODULE; in af_alg_register_type()
81 node->type = type; in af_alg_register_type()
82 list_add(&node->list, &alg_types); in af_alg_register_type()
95 int err = -ENOENT; in af_alg_unregister_type()
99 if (strcmp(node->type->name, type->name)) in af_alg_unregister_type()
102 list_del(&node->list); in af_alg_unregister_type()
118 type->release(private); in alg_do_release()
119 module_put(type->owner); in alg_do_release()
124 if (sock->sk) { in af_alg_release()
125 sock_put(sock->sk); in af_alg_release()
126 sock->sk = NULL; in af_alg_release()
135 unsigned int nokey = atomic_read(&ask->nokey_refcnt); in af_alg_release_parent()
137 sk = ask->parent; in af_alg_release_parent()
141 atomic_dec(&ask->nokey_refcnt); in af_alg_release_parent()
143 if (atomic_dec_and_test(&ask->refcnt)) in af_alg_release_parent()
151 struct sock *sk = sock->sk; in alg_bind()
158 if (sock->state == SS_CONNECTED) in alg_bind()
159 return -EINVAL; in alg_bind()
166 return -EINVAL; in alg_bind()
168 /* If caller uses non-allowed flag, return error. */ in alg_bind()
169 if ((sa->salg_feat & ~allowed) || (sa->salg_mask & ~allowed)) in alg_bind()
170 return -EINVAL; in alg_bind()
172 sa->salg_type[sizeof(sa->salg_type) - 1] = 0; in alg_bind()
173 sa->salg_name[addr_len - sizeof(*sa) - 1] = 0; in alg_bind()
175 type = alg_get_type(sa->salg_type); in alg_bind()
176 if (PTR_ERR(type) == -ENOENT) { in alg_bind()
177 request_module("algif-%s", sa->salg_type); in alg_bind()
178 type = alg_get_type(sa->salg_type); in alg_bind()
184 private = type->bind(sa->salg_name, sa->salg_feat, sa->salg_mask); in alg_bind()
186 module_put(type->owner); in alg_bind()
190 err = -EBUSY; in alg_bind()
192 if (atomic_read(&ask->refcnt)) in alg_bind()
195 swap(ask->type, type); in alg_bind()
196 swap(ask->private, private); in alg_bind()
211 const struct af_alg_type *type = ask->type; in alg_setkey()
212 u8 *key; in alg_setkey() local
215 key = sock_kmalloc(sk, keylen, GFP_KERNEL); in alg_setkey()
216 if (!key) in alg_setkey()
217 return -ENOMEM; in alg_setkey()
219 err = -EFAULT; in alg_setkey()
220 if (copy_from_sockptr(key, ukey, keylen)) in alg_setkey()
223 err = type->setkey(ask->private, key, keylen); in alg_setkey()
226 sock_kzfree_s(sk, key, keylen); in alg_setkey()
233 static const u8 *key_data_ptr_user(const struct key *key, in key_data_ptr_user() argument
238 ukp = user_key_payload_locked(key); in key_data_ptr_user()
240 return ERR_PTR(-EKEYREVOKED); in key_data_ptr_user()
242 *datalen = key->datalen; in key_data_ptr_user()
244 return ukp->data; in key_data_ptr_user()
247 static const u8 *key_data_ptr_encrypted(const struct key *key, in key_data_ptr_encrypted() argument
252 ekp = dereference_key_locked(key); in key_data_ptr_encrypted()
254 return ERR_PTR(-EKEYREVOKED); in key_data_ptr_encrypted()
256 *datalen = ekp->decrypted_datalen; in key_data_ptr_encrypted()
258 return ekp->decrypted_data; in key_data_ptr_encrypted()
261 static const u8 *key_data_ptr_trusted(const struct key *key, in key_data_ptr_trusted() argument
266 tkp = dereference_key_locked(key); in key_data_ptr_trusted()
268 return ERR_PTR(-EKEYREVOKED); in key_data_ptr_trusted()
270 *datalen = tkp->key_len; in key_data_ptr_trusted()
272 return tkp->key; in key_data_ptr_trusted()
275 static struct key *lookup_key(key_serial_t serial) in lookup_key()
289 const struct af_alg_type *type = ask->type; in alg_setkey_by_key_serial()
293 struct key *key; in alg_setkey_by_key_serial() local
298 return -EINVAL; in alg_setkey_by_key_serial()
301 return -EFAULT; in alg_setkey_by_key_serial()
303 key = lookup_key(serial); in alg_setkey_by_key_serial()
304 if (IS_ERR(key)) in alg_setkey_by_key_serial()
305 return PTR_ERR(key); in alg_setkey_by_key_serial()
307 down_read(&key->sem); in alg_setkey_by_key_serial()
309 ret = ERR_PTR(-ENOPROTOOPT); in alg_setkey_by_key_serial()
310 if (!strcmp(key->type->name, "user") || in alg_setkey_by_key_serial()
311 !strcmp(key->type->name, "logon")) { in alg_setkey_by_key_serial()
312 ret = key_data_ptr_user(key, &key_datalen); in alg_setkey_by_key_serial()
314 !strcmp(key->type->name, "encrypted")) { in alg_setkey_by_key_serial()
315 ret = key_data_ptr_encrypted(key, &key_datalen); in alg_setkey_by_key_serial()
317 !strcmp(key->type->name, "trusted")) { in alg_setkey_by_key_serial()
318 ret = key_data_ptr_trusted(key, &key_datalen); in alg_setkey_by_key_serial()
322 up_read(&key->sem); in alg_setkey_by_key_serial()
323 key_put(key); in alg_setkey_by_key_serial()
327 key_data = sock_kmalloc(&ask->sk, key_datalen, GFP_KERNEL); in alg_setkey_by_key_serial()
329 up_read(&key->sem); in alg_setkey_by_key_serial()
330 key_put(key); in alg_setkey_by_key_serial()
331 return -ENOMEM; in alg_setkey_by_key_serial()
336 up_read(&key->sem); in alg_setkey_by_key_serial()
337 key_put(key); in alg_setkey_by_key_serial()
339 err = type->setkey(ask->private, key_data, key_datalen); in alg_setkey_by_key_serial()
341 sock_kzfree_s(&ask->sk, key_data, key_datalen); in alg_setkey_by_key_serial()
352 return -ENOPROTOOPT; in alg_setkey_by_key_serial()
360 struct sock *sk = sock->sk; in alg_setsockopt()
363 int err = -EBUSY; in alg_setsockopt()
366 if (atomic_read(&ask->refcnt) != atomic_read(&ask->nokey_refcnt)) in alg_setsockopt()
369 type = ask->type; in alg_setsockopt()
371 err = -ENOPROTOOPT; in alg_setsockopt()
378 if (sock->state == SS_CONNECTED) in alg_setsockopt()
380 if (!type->setkey) in alg_setsockopt()
389 if (sock->state == SS_CONNECTED) in alg_setsockopt()
391 if (!type->setauthsize) in alg_setsockopt()
393 err = type->setauthsize(ask->private, optlen); in alg_setsockopt()
396 if (sock->state == SS_CONNECTED) in alg_setsockopt()
398 if (!type->setentropy) in alg_setsockopt()
401 err = type->setentropy(ask->private, optval, optlen); in alg_setsockopt()
419 type = ask->type; in af_alg_accept()
421 err = -EINVAL; in af_alg_accept()
426 err = -ENOMEM; in af_alg_accept()
435 * newsock->ops assigned here to allow type->accept call to override in af_alg_accept()
438 newsock->ops = type->ops; in af_alg_accept()
439 err = type->accept(ask->private, sk2); in af_alg_accept()
441 nokey = err == -ENOKEY; in af_alg_accept()
442 if (nokey && type->accept_nokey) in af_alg_accept()
443 err = type->accept_nokey(ask->private, sk2); in af_alg_accept()
448 if (atomic_inc_return_relaxed(&ask->refcnt) == 1) in af_alg_accept()
451 atomic_inc(&ask->nokey_refcnt); in af_alg_accept()
452 atomic_set(&alg_sk(sk2)->nokey_refcnt, 1); in af_alg_accept()
454 alg_sk(sk2)->parent = sk; in af_alg_accept()
455 alg_sk(sk2)->type = type; in af_alg_accept()
457 newsock->state = SS_CONNECTED; in af_alg_accept()
460 newsock->ops = type->ops_nokey; in af_alg_accept()
474 return af_alg_accept(sock->sk, newsock, kern); in alg_accept()
492 .release = af_alg_release,
501 alg_do_release(ask->type, ask->private); in alg_sock_destruct()
510 if (sock->type != SOCK_SEQPACKET) in alg_create()
511 return -ESOCKTNOSUPPORT; in alg_create()
513 return -EPROTONOSUPPORT; in alg_create()
515 err = -ENOMEM; in alg_create()
520 sock->ops = &alg_proto_ops; in alg_create()
523 sk->sk_destruct = alg_sock_destruct; in alg_create()
539 sg_unmark_end(sgl_prev->sgt.sgl + sgl_prev->sgt.nents - 1); in af_alg_link_sg()
540 sg_chain(sgl_prev->sgt.sgl, sgl_prev->sgt.nents + 1, sgl_new->sgt.sgl); in af_alg_link_sg()
547 if (sgl->sgt.sgl) { in af_alg_free_sg()
548 if (sgl->need_unpin) in af_alg_free_sg()
549 for (i = 0; i < sgl->sgt.nents; i++) in af_alg_free_sg()
550 unpin_user_page(sg_page(&sgl->sgt.sgl[i])); in af_alg_free_sg()
551 if (sgl->sgt.sgl != sgl->sgl) in af_alg_free_sg()
552 kvfree(sgl->sgt.sgl); in af_alg_free_sg()
553 sgl->sgt.sgl = NULL; in af_alg_free_sg()
564 return -EINVAL; in af_alg_cmsg_send()
565 if (cmsg->cmsg_level != SOL_ALG) in af_alg_cmsg_send()
568 switch (cmsg->cmsg_type) { in af_alg_cmsg_send()
570 if (cmsg->cmsg_len < CMSG_LEN(sizeof(*con->iv))) in af_alg_cmsg_send()
571 return -EINVAL; in af_alg_cmsg_send()
572 con->iv = (void *)CMSG_DATA(cmsg); in af_alg_cmsg_send()
573 if (cmsg->cmsg_len < CMSG_LEN(con->iv->ivlen + in af_alg_cmsg_send()
574 sizeof(*con->iv))) in af_alg_cmsg_send()
575 return -EINVAL; in af_alg_cmsg_send()
579 if (cmsg->cmsg_len < CMSG_LEN(sizeof(u32))) in af_alg_cmsg_send()
580 return -EINVAL; in af_alg_cmsg_send()
581 con->op = *(u32 *)CMSG_DATA(cmsg); in af_alg_cmsg_send()
585 if (cmsg->cmsg_len < CMSG_LEN(sizeof(u32))) in af_alg_cmsg_send()
586 return -EINVAL; in af_alg_cmsg_send()
587 con->aead_assoclen = *(u32 *)CMSG_DATA(cmsg); in af_alg_cmsg_send()
591 return -EINVAL; in af_alg_cmsg_send()
599 * af_alg_alloc_tsgl - allocate the TX SGL
607 struct af_alg_ctx *ctx = ask->private; in af_alg_alloc_tsgl()
611 sgl = list_entry(ctx->tsgl_list.prev, struct af_alg_tsgl, list); in af_alg_alloc_tsgl()
612 if (!list_empty(&ctx->tsgl_list)) in af_alg_alloc_tsgl()
613 sg = sgl->sg; in af_alg_alloc_tsgl()
615 if (!sg || sgl->cur >= MAX_SGL_ENTS) { in af_alg_alloc_tsgl()
620 return -ENOMEM; in af_alg_alloc_tsgl()
622 sg_init_table(sgl->sg, MAX_SGL_ENTS + 1); in af_alg_alloc_tsgl()
623 sgl->cur = 0; in af_alg_alloc_tsgl()
626 sg_chain(sg, MAX_SGL_ENTS + 1, sgl->sg); in af_alg_alloc_tsgl()
628 list_add_tail(&sgl->list, &ctx->tsgl_list); in af_alg_alloc_tsgl()
635 * af_alg_count_tsgl - Count number of TX SG entries
648 const struct af_alg_ctx *ctx = ask->private; in af_alg_count_tsgl()
656 list_for_each_entry(sgl, &ctx->tsgl_list, list) { in af_alg_count_tsgl()
657 const struct scatterlist *sg = sgl->sg; in af_alg_count_tsgl()
659 for (i = 0; i < sgl->cur; i++) { in af_alg_count_tsgl()
664 offset -= sg[i].length; in af_alg_count_tsgl()
665 bytes -= sg[i].length; in af_alg_count_tsgl()
669 bytes_count = sg[i].length - offset; in af_alg_count_tsgl()
678 bytes -= bytes_count; in af_alg_count_tsgl()
687 * af_alg_pull_tsgl - Release the specified buffers from TX SGL
689 * If @dst is non-null, reassign the pages to @dst. The caller must release
696 * @dst: If non-NULL, buffer is reassigned to dst SGL instead of releasing. The
697 * caller must release the buffers in dst.
705 struct af_alg_ctx *ctx = ask->private; in af_alg_pull_tsgl()
710 while (!list_empty(&ctx->tsgl_list)) { in af_alg_pull_tsgl()
711 sgl = list_first_entry(&ctx->tsgl_list, struct af_alg_tsgl, in af_alg_pull_tsgl()
713 sg = sgl->sg; in af_alg_pull_tsgl()
715 for (i = 0; i < sgl->cur; i++) { in af_alg_pull_tsgl()
729 dst_offset -= plen; in af_alg_pull_tsgl()
734 plen - dst_offset, in af_alg_pull_tsgl()
741 sg[i].length -= plen; in af_alg_pull_tsgl()
744 used -= plen; in af_alg_pull_tsgl()
745 ctx->used -= plen; in af_alg_pull_tsgl()
754 list_del(&sgl->list); in af_alg_pull_tsgl()
758 if (!ctx->used) in af_alg_pull_tsgl()
759 ctx->merge = 0; in af_alg_pull_tsgl()
760 ctx->init = ctx->more; in af_alg_pull_tsgl()
765 * af_alg_free_areq_sgls - Release TX and RX SGLs of the request
771 struct sock *sk = areq->sk; in af_alg_free_areq_sgls()
773 struct af_alg_ctx *ctx = ask->private; in af_alg_free_areq_sgls()
779 list_for_each_entry_safe(rsgl, tmp, &areq->rsgl_list, list) { in af_alg_free_areq_sgls()
780 atomic_sub(rsgl->sg_num_bytes, &ctx->rcvused); in af_alg_free_areq_sgls()
781 af_alg_free_sg(&rsgl->sgl); in af_alg_free_areq_sgls()
782 list_del(&rsgl->list); in af_alg_free_areq_sgls()
783 if (rsgl != &areq->first_rsgl) in af_alg_free_areq_sgls()
787 tsgl = areq->tsgl; in af_alg_free_areq_sgls()
789 for_each_sg(tsgl, sg, areq->tsgl_entries, i) { in af_alg_free_areq_sgls()
795 sock_kfree_s(sk, tsgl, areq->tsgl_entries * sizeof(*tsgl)); in af_alg_free_areq_sgls()
800 * af_alg_wait_for_wmem - wait for availability of writable memory
809 int err = -ERESTARTSYS; in af_alg_wait_for_wmem()
813 return -EAGAIN; in af_alg_wait_for_wmem()
833 * af_alg_wmem_wakeup - wakeup caller when writable memory is available
845 wq = rcu_dereference(sk->sk_wq); in af_alg_wmem_wakeup()
847 wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN | in af_alg_wmem_wakeup()
856 * af_alg_wait_for_data - wait for availability of TX data
867 struct af_alg_ctx *ctx = ask->private; in af_alg_wait_for_data()
869 int err = -ERESTARTSYS; in af_alg_wait_for_data()
872 return -EAGAIN; in af_alg_wait_for_data()
882 ctx->init && (!ctx->more || in af_alg_wait_for_data()
883 (min && ctx->used >= min)), in af_alg_wait_for_data()
898 * af_alg_data_wakeup - wakeup caller when new data can be sent to kernel
905 struct af_alg_ctx *ctx = ask->private; in af_alg_data_wakeup()
908 if (!ctx->used) in af_alg_data_wakeup()
912 wq = rcu_dereference(sk->sk_wq); in af_alg_data_wakeup()
914 wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT | in af_alg_data_wakeup()
922 * af_alg_sendmsg - implementation of sendmsg system call handler
925 * in ctx->tsgl_list. This implies allocation of the required numbers of
934 * user-space-provided IV has the right size
940 struct sock *sk = sock->sk; in af_alg_sendmsg()
942 struct af_alg_ctx *ctx = ask->private; in af_alg_sendmsg()
950 if (msg->msg_controllen) { in af_alg_sendmsg()
964 return -EINVAL; in af_alg_sendmsg()
967 if (con.iv && con.iv->ivlen != ivsize) in af_alg_sendmsg()
968 return -EINVAL; in af_alg_sendmsg()
972 if (ctx->init && !ctx->more) { in af_alg_sendmsg()
973 if (ctx->used) { in af_alg_sendmsg()
974 err = -EINVAL; in af_alg_sendmsg()
980 current->comm); in af_alg_sendmsg()
982 ctx->init = true; in af_alg_sendmsg()
985 ctx->enc = enc; in af_alg_sendmsg()
987 memcpy(ctx->iv, con.iv->iv, ivsize); in af_alg_sendmsg()
989 ctx->aead_assoclen = con.aead_assoclen; in af_alg_sendmsg()
998 if (ctx->merge && !(msg->msg_flags & MSG_SPLICE_PAGES)) { in af_alg_sendmsg()
999 sgl = list_entry(ctx->tsgl_list.prev, in af_alg_sendmsg()
1001 sg = sgl->sg + sgl->cur - 1; in af_alg_sendmsg()
1003 PAGE_SIZE - sg->offset - sg->length); in af_alg_sendmsg()
1006 sg->offset + sg->length, in af_alg_sendmsg()
1011 sg->length += len; in af_alg_sendmsg()
1012 ctx->merge = (sg->offset + sg->length) & in af_alg_sendmsg()
1013 (PAGE_SIZE - 1); in af_alg_sendmsg()
1015 ctx->used += len; in af_alg_sendmsg()
1017 size -= len; in af_alg_sendmsg()
1022 err = af_alg_wait_for_wmem(sk, msg->msg_flags); in af_alg_sendmsg()
1034 sgl = list_entry(ctx->tsgl_list.prev, struct af_alg_tsgl, in af_alg_sendmsg()
1036 sg = sgl->sg; in af_alg_sendmsg()
1037 if (sgl->cur) in af_alg_sendmsg()
1038 sg_unmark_end(sg + sgl->cur - 1); in af_alg_sendmsg()
1040 if (msg->msg_flags & MSG_SPLICE_PAGES) { in af_alg_sendmsg()
1043 .nents = sgl->cur, in af_alg_sendmsg()
1044 .orig_nents = sgl->cur, in af_alg_sendmsg()
1047 plen = extract_iter_to_sg(&msg->msg_iter, len, &sgtable, in af_alg_sendmsg()
1048 MAX_SGL_ENTS - sgl->cur, 0); in af_alg_sendmsg()
1054 for (; sgl->cur < sgtable.nents; sgl->cur++) in af_alg_sendmsg()
1055 get_page(sg_page(&sg[sgl->cur])); in af_alg_sendmsg()
1056 len -= plen; in af_alg_sendmsg()
1057 ctx->used += plen; in af_alg_sendmsg()
1059 size -= plen; in af_alg_sendmsg()
1060 ctx->merge = 0; in af_alg_sendmsg()
1064 unsigned int i = sgl->cur; in af_alg_sendmsg()
1070 err = -ENOMEM; in af_alg_sendmsg()
1086 len -= plen; in af_alg_sendmsg()
1087 ctx->used += plen; in af_alg_sendmsg()
1089 size -= plen; in af_alg_sendmsg()
1090 sgl->cur++; in af_alg_sendmsg()
1091 } while (len && sgl->cur < MAX_SGL_ENTS); in af_alg_sendmsg()
1093 ctx->merge = plen & (PAGE_SIZE - 1); in af_alg_sendmsg()
1097 sg_mark_end(sg + sgl->cur - 1); in af_alg_sendmsg()
1102 ctx->more = msg->msg_flags & MSG_MORE; in af_alg_sendmsg()
1113 * af_alg_free_resources - release resources required for crypto request
1118 struct sock *sk = areq->sk; in af_alg_free_resources()
1121 sock_kfree_s(sk, areq, areq->areqlen); in af_alg_free_resources()
1126 * af_alg_async_cb - AIO callback handler
1128 * @err: if non-zero, error result to be returned via ki_complete();
1135 * in areq->outlen before the AIO callback handler is invoked.
1140 struct sock *sk = areq->sk; in af_alg_async_cb()
1141 struct kiocb *iocb = areq->iocb; in af_alg_async_cb()
1145 resultlen = areq->outlen; in af_alg_async_cb()
1150 iocb->ki_complete(iocb, err ? err : (int)resultlen); in af_alg_async_cb()
1155 * af_alg_poll - poll system call handler
1163 struct sock *sk = sock->sk; in af_alg_poll()
1165 struct af_alg_ctx *ctx = ask->private; in af_alg_poll()
1171 if (!ctx->more || ctx->used) in af_alg_poll()
1182 * af_alg_alloc_areq - allocate struct af_alg_async_req
1194 return ERR_PTR(-ENOMEM); in af_alg_alloc_areq()
1196 areq->areqlen = areqlen; in af_alg_alloc_areq()
1197 areq->sk = sk; in af_alg_alloc_areq()
1198 areq->first_rsgl.sgl.sgt.sgl = areq->first_rsgl.sgl.sgl; in af_alg_alloc_areq()
1199 areq->last_rsgl = NULL; in af_alg_alloc_areq()
1200 INIT_LIST_HEAD(&areq->rsgl_list); in af_alg_alloc_areq()
1201 areq->tsgl = NULL; in af_alg_alloc_areq()
1202 areq->tsgl_entries = 0; in af_alg_alloc_areq()
1209 * af_alg_get_rsgl - create the RX SGL for the output data from the crypto
1225 struct af_alg_ctx *ctx = ask->private; in af_alg_get_rsgl()
1237 seglen = min_t(size_t, (maxsize - len), in af_alg_get_rsgl()
1240 if (list_empty(&areq->rsgl_list)) { in af_alg_get_rsgl()
1241 rsgl = &areq->first_rsgl; in af_alg_get_rsgl()
1245 return -ENOMEM; in af_alg_get_rsgl()
1248 rsgl->sgl.need_unpin = in af_alg_get_rsgl()
1249 iov_iter_extract_will_pin(&msg->msg_iter); in af_alg_get_rsgl()
1250 rsgl->sgl.sgt.sgl = rsgl->sgl.sgl; in af_alg_get_rsgl()
1251 rsgl->sgl.sgt.nents = 0; in af_alg_get_rsgl()
1252 rsgl->sgl.sgt.orig_nents = 0; in af_alg_get_rsgl()
1253 list_add_tail(&rsgl->list, &areq->rsgl_list); in af_alg_get_rsgl()
1255 sg_init_table(rsgl->sgl.sgt.sgl, ALG_MAX_PAGES); in af_alg_get_rsgl()
1256 err = extract_iter_to_sg(&msg->msg_iter, seglen, &rsgl->sgl.sgt, in af_alg_get_rsgl()
1259 rsgl->sg_num_bytes = 0; in af_alg_get_rsgl()
1263 sg_mark_end(rsgl->sgl.sgt.sgl + rsgl->sgl.sgt.nents - 1); in af_alg_get_rsgl()
1266 if (areq->last_rsgl) in af_alg_get_rsgl()
1267 af_alg_link_sg(&areq->last_rsgl->sgl, &rsgl->sgl); in af_alg_get_rsgl()
1269 areq->last_rsgl = rsgl; in af_alg_get_rsgl()
1271 atomic_add(err, &ctx->rcvused); in af_alg_get_rsgl()
1272 rsgl->sg_num_bytes = err; in af_alg_get_rsgl()