Lines Matching +full:input +full:- +full:depth
1 // SPDX-License-Identifier: GPL-2.0
17 #include <linux/dma-mapping.h>
62 #define SEC_TOTAL_IV_SZ(depth) (SEC_IV_SIZE * (depth)) argument
69 #define SEC_TOTAL_MAC_SZ(depth) (SEC_MAX_MAC_LEN * (depth)) argument
77 #define SEC_PBUF_PAGE_NUM(depth) ((depth) / SEC_PBUF_NUM) argument
78 #define SEC_PBUF_LEFT_SZ(depth) (SEC_PBUF_PKG * ((depth) - \ argument
79 SEC_PBUF_PAGE_NUM(depth) * SEC_PBUF_NUM))
80 #define SEC_TOTAL_PBUF_SZ(depth) (PAGE_SIZE * SEC_PBUF_PAGE_NUM(depth) + \ argument
81 SEC_PBUF_LEFT_SZ(depth))
117 /* Get an en/de-cipher queue cyclically to balance load over queues of TFM */
120 if (req->c_req.encrypt) in sec_alloc_queue_id()
121 return (u32)atomic_inc_return(&ctx->enc_qcyclic) % in sec_alloc_queue_id()
122 ctx->hlf_q_num; in sec_alloc_queue_id()
124 return (u32)atomic_inc_return(&ctx->dec_qcyclic) % ctx->hlf_q_num + in sec_alloc_queue_id()
125 ctx->hlf_q_num; in sec_alloc_queue_id()
130 if (req->c_req.encrypt) in sec_free_queue_id()
131 atomic_dec(&ctx->enc_qcyclic); in sec_free_queue_id()
133 atomic_dec(&ctx->dec_qcyclic); in sec_free_queue_id()
140 spin_lock_bh(&qp_ctx->req_lock); in sec_alloc_req_id()
141 req_id = idr_alloc_cyclic(&qp_ctx->req_idr, NULL, 0, qp_ctx->qp->sq_depth, GFP_ATOMIC); in sec_alloc_req_id()
142 spin_unlock_bh(&qp_ctx->req_lock); in sec_alloc_req_id()
144 dev_err(req->ctx->dev, "alloc req id fail!\n"); in sec_alloc_req_id()
148 req->qp_ctx = qp_ctx; in sec_alloc_req_id()
149 qp_ctx->req_list[req_id] = req; in sec_alloc_req_id()
156 struct sec_qp_ctx *qp_ctx = req->qp_ctx; in sec_free_req_id()
157 int req_id = req->req_id; in sec_free_req_id()
159 if (unlikely(req_id < 0 || req_id >= qp_ctx->qp->sq_depth)) { in sec_free_req_id()
160 dev_err(req->ctx->dev, "free request id invalid!\n"); in sec_free_req_id()
164 qp_ctx->req_list[req_id] = NULL; in sec_free_req_id()
165 req->qp_ctx = NULL; in sec_free_req_id()
167 spin_lock_bh(&qp_ctx->req_lock); in sec_free_req_id()
168 idr_remove(&qp_ctx->req_idr, req_id); in sec_free_req_id()
169 spin_unlock_bh(&qp_ctx->req_lock); in sec_free_req_id()
176 status->done = le16_to_cpu(bd->type2.done_flag) & SEC_DONE_MASK; in pre_parse_finished_bd()
177 status->icv = (le16_to_cpu(bd->type2.done_flag) & SEC_ICV_MASK) >> 1; in pre_parse_finished_bd()
178 status->flag = (le16_to_cpu(bd->type2.done_flag) & in pre_parse_finished_bd()
180 status->tag = le16_to_cpu(bd->type2.tag); in pre_parse_finished_bd()
181 status->err_type = bd->type2.error_type; in pre_parse_finished_bd()
183 return bd->type_cipher_auth & SEC_TYPE_MASK; in pre_parse_finished_bd()
190 status->done = le16_to_cpu(bd3->done_flag) & SEC_DONE_MASK; in pre_parse_finished_bd3()
191 status->icv = (le16_to_cpu(bd3->done_flag) & SEC_ICV_MASK) >> 1; in pre_parse_finished_bd3()
192 status->flag = (le16_to_cpu(bd3->done_flag) & in pre_parse_finished_bd3()
194 status->tag = le64_to_cpu(bd3->tag); in pre_parse_finished_bd3()
195 status->err_type = bd3->error_type; in pre_parse_finished_bd3()
197 return le32_to_cpu(bd3->bd_param) & SEC_TYPE_MASK; in pre_parse_finished_bd3()
203 struct sec_ctx *ctx = req->ctx; in sec_cb_status_check()
205 if (unlikely(req->err_type || status->done != SEC_SQE_DONE)) { in sec_cb_status_check()
206 dev_err_ratelimited(ctx->dev, "err_type[%d], done[%u]\n", in sec_cb_status_check()
207 req->err_type, status->done); in sec_cb_status_check()
208 return -EIO; in sec_cb_status_check()
211 if (unlikely(ctx->alg_type == SEC_SKCIPHER)) { in sec_cb_status_check()
212 if (unlikely(status->flag != SEC_SQE_CFLAG)) { in sec_cb_status_check()
213 dev_err_ratelimited(ctx->dev, "flag[%u]\n", in sec_cb_status_check()
214 status->flag); in sec_cb_status_check()
215 return -EIO; in sec_cb_status_check()
217 } else if (unlikely(ctx->alg_type == SEC_AEAD)) { in sec_cb_status_check()
218 if (unlikely(status->flag != SEC_SQE_AEAD_FLAG || in sec_cb_status_check()
219 status->icv == SEC_ICV_ERR)) { in sec_cb_status_check()
220 dev_err_ratelimited(ctx->dev, in sec_cb_status_check()
222 status->flag, status->icv); in sec_cb_status_check()
223 return -EBADMSG; in sec_cb_status_check()
232 struct sec_qp_ctx *qp_ctx = qp->qp_ctx; in sec_req_cb()
233 struct sec_dfx *dfx = &qp_ctx->ctx->sec->debug.dfx; in sec_req_cb()
234 u8 type_supported = qp_ctx->ctx->type_supported; in sec_req_cb()
243 req = qp_ctx->req_list[status.tag]; in sec_req_cb()
250 atomic64_inc(&dfx->err_bd_cnt); in sec_req_cb()
256 atomic64_inc(&dfx->invalid_req_cnt); in sec_req_cb()
257 atomic_inc(&qp->qp_status.used); in sec_req_cb()
261 req->err_type = status.err_type; in sec_req_cb()
262 ctx = req->ctx; in sec_req_cb()
265 atomic64_inc(&dfx->done_flag_cnt); in sec_req_cb()
267 atomic64_inc(&dfx->recv_cnt); in sec_req_cb()
269 ctx->req_op->buf_unmap(ctx, req); in sec_req_cb()
271 ctx->req_op->callback(ctx, req, err); in sec_req_cb()
276 struct sec_qp_ctx *qp_ctx = req->qp_ctx; in sec_bd_send()
279 if (ctx->fake_req_limit <= in sec_bd_send()
280 atomic_read(&qp_ctx->qp->qp_status.used) && in sec_bd_send()
281 !(req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG)) in sec_bd_send()
282 return -EBUSY; in sec_bd_send()
284 spin_lock_bh(&qp_ctx->req_lock); in sec_bd_send()
285 ret = hisi_qp_send(qp_ctx->qp, &req->sec_sqe); in sec_bd_send()
287 if (ctx->fake_req_limit <= in sec_bd_send()
288 atomic_read(&qp_ctx->qp->qp_status.used) && !ret) { in sec_bd_send()
289 list_add_tail(&req->backlog_head, &qp_ctx->backlog); in sec_bd_send()
290 atomic64_inc(&ctx->sec->debug.dfx.send_cnt); in sec_bd_send()
291 atomic64_inc(&ctx->sec->debug.dfx.send_busy_cnt); in sec_bd_send()
292 spin_unlock_bh(&qp_ctx->req_lock); in sec_bd_send()
293 return -EBUSY; in sec_bd_send()
295 spin_unlock_bh(&qp_ctx->req_lock); in sec_bd_send()
297 if (unlikely(ret == -EBUSY)) in sec_bd_send()
298 return -ENOBUFS; in sec_bd_send()
301 ret = -EINPROGRESS; in sec_bd_send()
302 atomic64_inc(&ctx->sec->debug.dfx.send_cnt); in sec_bd_send()
311 u16 q_depth = res->depth; in sec_alloc_civ_resource()
314 res->c_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ(q_depth), in sec_alloc_civ_resource()
315 &res->c_ivin_dma, GFP_KERNEL); in sec_alloc_civ_resource()
316 if (!res->c_ivin) in sec_alloc_civ_resource()
317 return -ENOMEM; in sec_alloc_civ_resource()
320 res[i].c_ivin_dma = res->c_ivin_dma + i * SEC_IV_SIZE; in sec_alloc_civ_resource()
321 res[i].c_ivin = res->c_ivin + i * SEC_IV_SIZE; in sec_alloc_civ_resource()
329 if (res->c_ivin) in sec_free_civ_resource()
330 dma_free_coherent(dev, SEC_TOTAL_IV_SZ(res->depth), in sec_free_civ_resource()
331 res->c_ivin, res->c_ivin_dma); in sec_free_civ_resource()
336 u16 q_depth = res->depth; in sec_alloc_aiv_resource()
339 res->a_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ(q_depth), in sec_alloc_aiv_resource()
340 &res->a_ivin_dma, GFP_KERNEL); in sec_alloc_aiv_resource()
341 if (!res->a_ivin) in sec_alloc_aiv_resource()
342 return -ENOMEM; in sec_alloc_aiv_resource()
345 res[i].a_ivin_dma = res->a_ivin_dma + i * SEC_IV_SIZE; in sec_alloc_aiv_resource()
346 res[i].a_ivin = res->a_ivin + i * SEC_IV_SIZE; in sec_alloc_aiv_resource()
354 if (res->a_ivin) in sec_free_aiv_resource()
355 dma_free_coherent(dev, SEC_TOTAL_IV_SZ(res->depth), in sec_free_aiv_resource()
356 res->a_ivin, res->a_ivin_dma); in sec_free_aiv_resource()
361 u16 q_depth = res->depth; in sec_alloc_mac_resource()
364 res->out_mac = dma_alloc_coherent(dev, SEC_TOTAL_MAC_SZ(q_depth) << 1, in sec_alloc_mac_resource()
365 &res->out_mac_dma, GFP_KERNEL); in sec_alloc_mac_resource()
366 if (!res->out_mac) in sec_alloc_mac_resource()
367 return -ENOMEM; in sec_alloc_mac_resource()
370 res[i].out_mac_dma = res->out_mac_dma + in sec_alloc_mac_resource()
372 res[i].out_mac = res->out_mac + i * (SEC_MAX_MAC_LEN << 1); in sec_alloc_mac_resource()
380 if (res->out_mac) in sec_free_mac_resource()
381 dma_free_coherent(dev, SEC_TOTAL_MAC_SZ(res->depth) << 1, in sec_free_mac_resource()
382 res->out_mac, res->out_mac_dma); in sec_free_mac_resource()
387 if (res->pbuf) in sec_free_pbuf_resource()
388 dma_free_coherent(dev, SEC_TOTAL_PBUF_SZ(res->depth), in sec_free_pbuf_resource()
389 res->pbuf, res->pbuf_dma); in sec_free_pbuf_resource()
398 u16 q_depth = res->depth; in sec_alloc_pbuf_resource()
403 res->pbuf = dma_alloc_coherent(dev, SEC_TOTAL_PBUF_SZ(q_depth), in sec_alloc_pbuf_resource()
404 &res->pbuf_dma, GFP_KERNEL); in sec_alloc_pbuf_resource()
405 if (!res->pbuf) in sec_alloc_pbuf_resource()
406 return -ENOMEM; in sec_alloc_pbuf_resource()
422 res[k].pbuf = res->pbuf + in sec_alloc_pbuf_resource()
424 res[k].pbuf_dma = res->pbuf_dma + in sec_alloc_pbuf_resource()
435 struct sec_alg_res *res = qp_ctx->res; in sec_alg_resource_alloc()
436 struct device *dev = ctx->dev; in sec_alg_resource_alloc()
443 if (ctx->alg_type == SEC_AEAD) { in sec_alg_resource_alloc()
452 if (ctx->pbuf_supported) { in sec_alg_resource_alloc()
463 if (ctx->alg_type == SEC_AEAD) in sec_alg_resource_alloc()
464 sec_free_mac_resource(dev, qp_ctx->res); in sec_alg_resource_alloc()
466 if (ctx->alg_type == SEC_AEAD) in sec_alg_resource_alloc()
476 struct device *dev = ctx->dev; in sec_alg_resource_free()
478 sec_free_civ_resource(dev, qp_ctx->res); in sec_alg_resource_free()
480 if (ctx->pbuf_supported) in sec_alg_resource_free()
481 sec_free_pbuf_resource(dev, qp_ctx->res); in sec_alg_resource_free()
482 if (ctx->alg_type == SEC_AEAD) in sec_alg_resource_free()
483 sec_free_mac_resource(dev, qp_ctx->res); in sec_alg_resource_free()
489 u16 q_depth = qp_ctx->qp->sq_depth; in sec_alloc_qp_ctx_resource()
490 struct device *dev = ctx->dev; in sec_alloc_qp_ctx_resource()
491 int ret = -ENOMEM; in sec_alloc_qp_ctx_resource()
493 qp_ctx->req_list = kcalloc(q_depth, sizeof(struct sec_req *), GFP_KERNEL); in sec_alloc_qp_ctx_resource()
494 if (!qp_ctx->req_list) in sec_alloc_qp_ctx_resource()
497 qp_ctx->res = kcalloc(q_depth, sizeof(struct sec_alg_res), GFP_KERNEL); in sec_alloc_qp_ctx_resource()
498 if (!qp_ctx->res) in sec_alloc_qp_ctx_resource()
500 qp_ctx->res->depth = q_depth; in sec_alloc_qp_ctx_resource()
502 qp_ctx->c_in_pool = hisi_acc_create_sgl_pool(dev, q_depth, SEC_SGL_SGE_NR); in sec_alloc_qp_ctx_resource()
503 if (IS_ERR(qp_ctx->c_in_pool)) { in sec_alloc_qp_ctx_resource()
504 dev_err(dev, "fail to create sgl pool for input!\n"); in sec_alloc_qp_ctx_resource()
508 qp_ctx->c_out_pool = hisi_acc_create_sgl_pool(dev, q_depth, SEC_SGL_SGE_NR); in sec_alloc_qp_ctx_resource()
509 if (IS_ERR(qp_ctx->c_out_pool)) { in sec_alloc_qp_ctx_resource()
521 hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool); in sec_alloc_qp_ctx_resource()
523 hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool); in sec_alloc_qp_ctx_resource()
525 kfree(qp_ctx->res); in sec_alloc_qp_ctx_resource()
527 kfree(qp_ctx->req_list); in sec_alloc_qp_ctx_resource()
533 struct device *dev = ctx->dev; in sec_free_qp_ctx_resource()
536 hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool); in sec_free_qp_ctx_resource()
537 hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool); in sec_free_qp_ctx_resource()
538 kfree(qp_ctx->res); in sec_free_qp_ctx_resource()
539 kfree(qp_ctx->req_list); in sec_free_qp_ctx_resource()
549 qp_ctx = &ctx->qp_ctx[qp_ctx_id]; in sec_create_qp_ctx()
550 qp = ctx->qps[qp_ctx_id]; in sec_create_qp_ctx()
551 qp->req_type = 0; in sec_create_qp_ctx()
552 qp->qp_ctx = qp_ctx; in sec_create_qp_ctx()
553 qp_ctx->qp = qp; in sec_create_qp_ctx()
554 qp_ctx->ctx = ctx; in sec_create_qp_ctx()
556 qp->req_cb = sec_req_cb; in sec_create_qp_ctx()
558 spin_lock_init(&qp_ctx->req_lock); in sec_create_qp_ctx()
559 idr_init(&qp_ctx->req_idr); in sec_create_qp_ctx()
560 INIT_LIST_HEAD(&qp_ctx->backlog); in sec_create_qp_ctx()
575 idr_destroy(&qp_ctx->req_idr); in sec_create_qp_ctx()
582 hisi_qm_stop_qp(qp_ctx->qp); in sec_release_qp_ctx()
584 idr_destroy(&qp_ctx->req_idr); in sec_release_qp_ctx()
592 ctx->qps = sec_create_qps(); in sec_ctx_base_init()
593 if (!ctx->qps) { in sec_ctx_base_init()
595 return -ENODEV; in sec_ctx_base_init()
598 sec = container_of(ctx->qps[0]->qm, struct sec_dev, qm); in sec_ctx_base_init()
599 ctx->sec = sec; in sec_ctx_base_init()
600 ctx->dev = &sec->qm.pdev->dev; in sec_ctx_base_init()
601 ctx->hlf_q_num = sec->ctx_q_num >> 1; in sec_ctx_base_init()
603 ctx->pbuf_supported = ctx->sec->iommu_used; in sec_ctx_base_init()
605 /* Half of queue depth is taken as fake requests limit in the queue. */ in sec_ctx_base_init()
606 ctx->fake_req_limit = ctx->qps[0]->sq_depth >> 1; in sec_ctx_base_init()
607 ctx->qp_ctx = kcalloc(sec->ctx_q_num, sizeof(struct sec_qp_ctx), in sec_ctx_base_init()
609 if (!ctx->qp_ctx) { in sec_ctx_base_init()
610 ret = -ENOMEM; in sec_ctx_base_init()
614 for (i = 0; i < sec->ctx_q_num; i++) { in sec_ctx_base_init()
615 ret = sec_create_qp_ctx(&sec->qm, ctx, i, 0); in sec_ctx_base_init()
623 for (i = i - 1; i >= 0; i--) in sec_ctx_base_init()
624 sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]); in sec_ctx_base_init()
625 kfree(ctx->qp_ctx); in sec_ctx_base_init()
627 sec_destroy_qps(ctx->qps, sec->ctx_q_num); in sec_ctx_base_init()
635 for (i = 0; i < ctx->sec->ctx_q_num; i++) in sec_ctx_base_uninit()
636 sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]); in sec_ctx_base_uninit()
638 sec_destroy_qps(ctx->qps, ctx->sec->ctx_q_num); in sec_ctx_base_uninit()
639 kfree(ctx->qp_ctx); in sec_ctx_base_uninit()
644 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; in sec_cipher_init()
646 c_ctx->c_key = dma_alloc_coherent(ctx->dev, SEC_MAX_KEY_SIZE, in sec_cipher_init()
647 &c_ctx->c_key_dma, GFP_KERNEL); in sec_cipher_init()
648 if (!c_ctx->c_key) in sec_cipher_init()
649 return -ENOMEM; in sec_cipher_init()
656 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; in sec_cipher_uninit()
658 memzero_explicit(c_ctx->c_key, SEC_MAX_KEY_SIZE); in sec_cipher_uninit()
659 dma_free_coherent(ctx->dev, SEC_MAX_KEY_SIZE, in sec_cipher_uninit()
660 c_ctx->c_key, c_ctx->c_key_dma); in sec_cipher_uninit()
665 struct sec_auth_ctx *a_ctx = &ctx->a_ctx; in sec_auth_init()
667 a_ctx->a_key = dma_alloc_coherent(ctx->dev, SEC_MAX_AKEY_SIZE, in sec_auth_init()
668 &a_ctx->a_key_dma, GFP_KERNEL); in sec_auth_init()
669 if (!a_ctx->a_key) in sec_auth_init()
670 return -ENOMEM; in sec_auth_init()
677 struct sec_auth_ctx *a_ctx = &ctx->a_ctx; in sec_auth_uninit()
679 memzero_explicit(a_ctx->a_key, SEC_MAX_AKEY_SIZE); in sec_auth_uninit()
680 dma_free_coherent(ctx->dev, SEC_MAX_AKEY_SIZE, in sec_auth_uninit()
681 a_ctx->a_key, a_ctx->a_key_dma); in sec_auth_uninit()
686 const char *alg = crypto_tfm_alg_name(&tfm->base); in sec_skcipher_fbtfm_init()
688 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; in sec_skcipher_fbtfm_init()
690 c_ctx->fallback = false; in sec_skcipher_fbtfm_init()
696 c_ctx->fbtfm = crypto_alloc_sync_skcipher(alg, 0, in sec_skcipher_fbtfm_init()
698 if (IS_ERR(c_ctx->fbtfm)) { in sec_skcipher_fbtfm_init()
700 return PTR_ERR(c_ctx->fbtfm); in sec_skcipher_fbtfm_init()
711 ctx->alg_type = SEC_SKCIPHER; in sec_skcipher_init()
713 ctx->c_ctx.ivsize = crypto_skcipher_ivsize(tfm); in sec_skcipher_init()
714 if (ctx->c_ctx.ivsize > SEC_IV_SIZE) { in sec_skcipher_init()
716 return -EINVAL; in sec_skcipher_init()
744 if (ctx->c_ctx.fbtfm) in sec_skcipher_uninit()
745 crypto_free_sync_skcipher(ctx->c_ctx.fbtfm); in sec_skcipher_uninit()
756 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; in sec_skcipher_3des_setkey()
765 c_ctx->c_key_len = SEC_CKEY_3DES_2KEY; in sec_skcipher_3des_setkey()
768 c_ctx->c_key_len = SEC_CKEY_3DES_3KEY; in sec_skcipher_3des_setkey()
771 return -EINVAL; in sec_skcipher_3des_setkey()
784 c_ctx->c_key_len = SEC_CKEY_128BIT; in sec_skcipher_aes_sm4_setkey()
787 c_ctx->fallback = true; in sec_skcipher_aes_sm4_setkey()
790 c_ctx->c_key_len = SEC_CKEY_256BIT; in sec_skcipher_aes_sm4_setkey()
794 return -EINVAL; in sec_skcipher_aes_sm4_setkey()
797 if (c_ctx->c_alg == SEC_CALG_SM4 && in sec_skcipher_aes_sm4_setkey()
800 return -EINVAL; in sec_skcipher_aes_sm4_setkey()
804 c_ctx->c_key_len = SEC_CKEY_128BIT; in sec_skcipher_aes_sm4_setkey()
807 c_ctx->c_key_len = SEC_CKEY_192BIT; in sec_skcipher_aes_sm4_setkey()
810 c_ctx->c_key_len = SEC_CKEY_256BIT; in sec_skcipher_aes_sm4_setkey()
814 return -EINVAL; in sec_skcipher_aes_sm4_setkey()
827 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; in sec_skcipher_setkey()
828 struct device *dev = ctx->dev; in sec_skcipher_setkey()
839 c_ctx->c_alg = c_alg; in sec_skcipher_setkey()
840 c_ctx->c_mode = c_mode; in sec_skcipher_setkey()
851 return -EINVAL; in sec_skcipher_setkey()
859 memcpy(c_ctx->c_key, key, keylen); in sec_skcipher_setkey()
860 if (c_ctx->fallback && c_ctx->fbtfm) { in sec_skcipher_setkey()
861 ret = crypto_sync_skcipher_setkey(c_ctx->fbtfm, key, keylen); in sec_skcipher_setkey()
894 struct sec_aead_req *a_req = &req->aead_req; in GEN_SEC_SETKEY_FUNC()
895 struct aead_request *aead_req = a_req->aead_req; in GEN_SEC_SETKEY_FUNC()
896 struct sec_cipher_req *c_req = &req->c_req; in GEN_SEC_SETKEY_FUNC()
897 struct sec_qp_ctx *qp_ctx = req->qp_ctx; in GEN_SEC_SETKEY_FUNC()
898 struct device *dev = ctx->dev; in GEN_SEC_SETKEY_FUNC()
900 int req_id = req->req_id; in GEN_SEC_SETKEY_FUNC()
905 if (ctx->alg_type == SEC_AEAD) in GEN_SEC_SETKEY_FUNC()
906 copy_size = aead_req->cryptlen + aead_req->assoclen; in GEN_SEC_SETKEY_FUNC()
908 copy_size = c_req->c_len; in GEN_SEC_SETKEY_FUNC()
911 qp_ctx->res[req_id].pbuf, copy_size); in GEN_SEC_SETKEY_FUNC()
914 return -EINVAL; in GEN_SEC_SETKEY_FUNC()
916 if (!c_req->encrypt && ctx->alg_type == SEC_AEAD) { in GEN_SEC_SETKEY_FUNC()
919 mac_offset = qp_ctx->res[req_id].pbuf + copy_size - authsize; in GEN_SEC_SETKEY_FUNC()
920 memcpy(a_req->out_mac, mac_offset, authsize); in GEN_SEC_SETKEY_FUNC()
923 req->in_dma = qp_ctx->res[req_id].pbuf_dma; in GEN_SEC_SETKEY_FUNC()
924 c_req->c_out_dma = req->in_dma; in GEN_SEC_SETKEY_FUNC()
932 struct aead_request *aead_req = req->aead_req.aead_req; in sec_cipher_pbuf_unmap()
933 struct sec_cipher_req *c_req = &req->c_req; in sec_cipher_pbuf_unmap()
934 struct sec_qp_ctx *qp_ctx = req->qp_ctx; in sec_cipher_pbuf_unmap()
936 int req_id = req->req_id; in sec_cipher_pbuf_unmap()
938 if (ctx->alg_type == SEC_AEAD) in sec_cipher_pbuf_unmap()
939 copy_size = c_req->c_len + aead_req->assoclen; in sec_cipher_pbuf_unmap()
941 copy_size = c_req->c_len; in sec_cipher_pbuf_unmap()
944 qp_ctx->res[req_id].pbuf, copy_size); in sec_cipher_pbuf_unmap()
946 dev_err(ctx->dev, "copy pbuf data to dst error!\n"); in sec_cipher_pbuf_unmap()
951 struct aead_request *aead_req = req->aead_req; in sec_aead_mac_init()
954 u8 *mac_out = req->out_mac; in sec_aead_mac_init()
955 struct scatterlist *sgl = aead_req->src; in sec_aead_mac_init()
959 /* Copy input mac */ in sec_aead_mac_init()
960 skip_size = aead_req->assoclen + aead_req->cryptlen - authsize; in sec_aead_mac_init()
964 return -EINVAL; in sec_aead_mac_init()
972 struct sec_cipher_req *c_req = &req->c_req; in sec_cipher_map()
973 struct sec_aead_req *a_req = &req->aead_req; in sec_cipher_map()
974 struct sec_qp_ctx *qp_ctx = req->qp_ctx; in sec_cipher_map()
975 struct sec_alg_res *res = &qp_ctx->res[req->req_id]; in sec_cipher_map()
976 struct device *dev = ctx->dev; in sec_cipher_map()
979 if (req->use_pbuf) { in sec_cipher_map()
980 c_req->c_ivin = res->pbuf + SEC_PBUF_IV_OFFSET; in sec_cipher_map()
981 c_req->c_ivin_dma = res->pbuf_dma + SEC_PBUF_IV_OFFSET; in sec_cipher_map()
982 if (ctx->alg_type == SEC_AEAD) { in sec_cipher_map()
983 a_req->a_ivin = res->a_ivin; in sec_cipher_map()
984 a_req->a_ivin_dma = res->a_ivin_dma; in sec_cipher_map()
985 a_req->out_mac = res->pbuf + SEC_PBUF_MAC_OFFSET; in sec_cipher_map()
986 a_req->out_mac_dma = res->pbuf_dma + in sec_cipher_map()
993 c_req->c_ivin = res->c_ivin; in sec_cipher_map()
994 c_req->c_ivin_dma = res->c_ivin_dma; in sec_cipher_map()
995 if (ctx->alg_type == SEC_AEAD) { in sec_cipher_map()
996 a_req->a_ivin = res->a_ivin; in sec_cipher_map()
997 a_req->a_ivin_dma = res->a_ivin_dma; in sec_cipher_map()
998 a_req->out_mac = res->out_mac; in sec_cipher_map()
999 a_req->out_mac_dma = res->out_mac_dma; in sec_cipher_map()
1002 req->in = hisi_acc_sg_buf_map_to_hw_sgl(dev, src, in sec_cipher_map()
1003 qp_ctx->c_in_pool, in sec_cipher_map()
1004 req->req_id, in sec_cipher_map()
1005 &req->in_dma); in sec_cipher_map()
1006 if (IS_ERR(req->in)) { in sec_cipher_map()
1007 dev_err(dev, "fail to dma map input sgl buffers!\n"); in sec_cipher_map()
1008 return PTR_ERR(req->in); in sec_cipher_map()
1011 if (!c_req->encrypt && ctx->alg_type == SEC_AEAD) { in sec_cipher_map()
1020 c_req->c_out = req->in; in sec_cipher_map()
1021 c_req->c_out_dma = req->in_dma; in sec_cipher_map()
1023 c_req->c_out = hisi_acc_sg_buf_map_to_hw_sgl(dev, dst, in sec_cipher_map()
1024 qp_ctx->c_out_pool, in sec_cipher_map()
1025 req->req_id, in sec_cipher_map()
1026 &c_req->c_out_dma); in sec_cipher_map()
1028 if (IS_ERR(c_req->c_out)) { in sec_cipher_map()
1030 hisi_acc_sg_buf_unmap(dev, src, req->in); in sec_cipher_map()
1031 return PTR_ERR(c_req->c_out); in sec_cipher_map()
1041 struct sec_cipher_req *c_req = &req->c_req; in sec_cipher_unmap()
1042 struct device *dev = ctx->dev; in sec_cipher_unmap()
1044 if (req->use_pbuf) { in sec_cipher_unmap()
1048 hisi_acc_sg_buf_unmap(dev, src, req->in); in sec_cipher_unmap()
1050 hisi_acc_sg_buf_unmap(dev, dst, c_req->c_out); in sec_cipher_unmap()
1056 struct skcipher_request *sq = req->c_req.sk_req; in sec_skcipher_sgl_map()
1058 return sec_cipher_map(ctx, req, sq->src, sq->dst); in sec_skcipher_sgl_map()
1063 struct skcipher_request *sq = req->c_req.sk_req; in sec_skcipher_sgl_unmap()
1065 sec_cipher_unmap(ctx, req, sq->src, sq->dst); in sec_skcipher_sgl_unmap()
1071 switch (keys->enckeylen) { in sec_aead_aes_set_key()
1073 c_ctx->c_key_len = SEC_CKEY_128BIT; in sec_aead_aes_set_key()
1076 c_ctx->c_key_len = SEC_CKEY_192BIT; in sec_aead_aes_set_key()
1079 c_ctx->c_key_len = SEC_CKEY_256BIT; in sec_aead_aes_set_key()
1083 return -EINVAL; in sec_aead_aes_set_key()
1085 memcpy(c_ctx->c_key, keys->enckey, keys->enckeylen); in sec_aead_aes_set_key()
1093 struct crypto_shash *hash_tfm = ctx->hash_tfm; in sec_aead_auth_set_key()
1096 if (!keys->authkeylen) { in sec_aead_auth_set_key()
1098 return -EINVAL; in sec_aead_auth_set_key()
1103 if (keys->authkeylen > blocksize) { in sec_aead_auth_set_key()
1104 ret = crypto_shash_tfm_digest(hash_tfm, keys->authkey, in sec_aead_auth_set_key()
1105 keys->authkeylen, ctx->a_key); in sec_aead_auth_set_key()
1108 return -EINVAL; in sec_aead_auth_set_key()
1110 ctx->a_key_len = digestsize; in sec_aead_auth_set_key()
1112 memcpy(ctx->a_key, keys->authkey, keys->authkeylen); in sec_aead_auth_set_key()
1113 ctx->a_key_len = keys->authkeylen; in sec_aead_auth_set_key()
1123 struct sec_auth_ctx *a_ctx = &ctx->a_ctx; in sec_aead_setauthsize()
1125 if (unlikely(a_ctx->fallback_aead_tfm)) in sec_aead_setauthsize()
1126 return crypto_aead_setauthsize(a_ctx->fallback_aead_tfm, authsize); in sec_aead_setauthsize()
1135 crypto_aead_clear_flags(a_ctx->fallback_aead_tfm, CRYPTO_TFM_REQ_MASK); in sec_aead_fallback_setkey()
1136 crypto_aead_set_flags(a_ctx->fallback_aead_tfm, in sec_aead_fallback_setkey()
1138 return crypto_aead_setkey(a_ctx->fallback_aead_tfm, key, keylen); in sec_aead_fallback_setkey()
1148 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; in sec_aead_setkey()
1149 struct sec_auth_ctx *a_ctx = &ctx->a_ctx; in sec_aead_setkey()
1150 struct device *dev = ctx->dev; in sec_aead_setkey()
1154 ctx->a_ctx.a_alg = a_alg; in sec_aead_setkey()
1155 ctx->c_ctx.c_alg = c_alg; in sec_aead_setkey()
1156 ctx->a_ctx.mac_len = mac_len; in sec_aead_setkey()
1157 c_ctx->c_mode = c_mode; in sec_aead_setkey()
1165 memcpy(c_ctx->c_key, key, keylen); in sec_aead_setkey()
1167 if (unlikely(a_ctx->fallback_aead_tfm)) { in sec_aead_setkey()
1185 ret = sec_aead_auth_set_key(&ctx->a_ctx, &keys); in sec_aead_setkey()
1191 if ((ctx->a_ctx.mac_len & SEC_SQE_LEN_RATE_MASK) || in sec_aead_setkey()
1192 (ctx->a_ctx.a_key_len & SEC_SQE_LEN_RATE_MASK)) { in sec_aead_setkey()
1201 return -EINVAL; in sec_aead_setkey()
1229 struct aead_request *aq = req->aead_req.aead_req; in GEN_SEC_AEAD_SETKEY_FUNC()
1231 return sec_cipher_map(ctx, req, aq->src, aq->dst); in GEN_SEC_AEAD_SETKEY_FUNC()
1236 struct aead_request *aq = req->aead_req.aead_req; in sec_aead_sgl_unmap()
1238 sec_cipher_unmap(ctx, req, aq->src, aq->dst); in sec_aead_sgl_unmap()
1245 ret = ctx->req_op->buf_map(ctx, req); in sec_request_transfer()
1249 ctx->req_op->do_transfer(ctx, req); in sec_request_transfer()
1251 ret = ctx->req_op->bd_fill(ctx, req); in sec_request_transfer()
1258 ctx->req_op->buf_unmap(ctx, req); in sec_request_transfer()
1264 ctx->req_op->buf_unmap(ctx, req); in sec_request_untransfer()
1269 struct skcipher_request *sk_req = req->c_req.sk_req; in sec_skcipher_copy_iv()
1270 struct sec_cipher_req *c_req = &req->c_req; in sec_skcipher_copy_iv()
1272 memcpy(c_req->c_ivin, sk_req->iv, ctx->c_ctx.ivsize); in sec_skcipher_copy_iv()
1277 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; in sec_skcipher_bd_fill()
1278 struct sec_cipher_req *c_req = &req->c_req; in sec_skcipher_bd_fill()
1279 struct sec_sqe *sec_sqe = &req->sec_sqe; in sec_skcipher_bd_fill()
1286 sec_sqe->type2.c_key_addr = cpu_to_le64(c_ctx->c_key_dma); in sec_skcipher_bd_fill()
1287 sec_sqe->type2.c_ivin_addr = cpu_to_le64(c_req->c_ivin_dma); in sec_skcipher_bd_fill()
1288 sec_sqe->type2.data_src_addr = cpu_to_le64(req->in_dma); in sec_skcipher_bd_fill()
1289 sec_sqe->type2.data_dst_addr = cpu_to_le64(c_req->c_out_dma); in sec_skcipher_bd_fill()
1291 sec_sqe->type2.icvw_kmode |= cpu_to_le16(((u16)c_ctx->c_mode) << in sec_skcipher_bd_fill()
1293 sec_sqe->type2.c_alg = c_ctx->c_alg; in sec_skcipher_bd_fill()
1294 sec_sqe->type2.icvw_kmode |= cpu_to_le16(((u16)c_ctx->c_key_len) << in sec_skcipher_bd_fill()
1298 if (c_req->encrypt) in sec_skcipher_bd_fill()
1302 sec_sqe->type_cipher_auth = bd_type | cipher; in sec_skcipher_bd_fill()
1305 if (req->use_pbuf) { in sec_skcipher_bd_fill()
1313 sec_sqe->sdm_addr_type |= da_type; in sec_skcipher_bd_fill()
1315 if (req->in_dma != c_req->c_out_dma) in sec_skcipher_bd_fill()
1318 sec_sqe->sds_sa_type = (de | scene | sa_type); in sec_skcipher_bd_fill()
1320 sec_sqe->type2.clen_ivhlen |= cpu_to_le32(c_req->c_len); in sec_skcipher_bd_fill()
1321 sec_sqe->type2.tag = cpu_to_le16((u16)req->req_id); in sec_skcipher_bd_fill()
1328 struct sec_sqe3 *sec_sqe3 = &req->sec_sqe3; in sec_skcipher_bd_fill_v3()
1329 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; in sec_skcipher_bd_fill_v3()
1330 struct sec_cipher_req *c_req = &req->c_req; in sec_skcipher_bd_fill_v3()
1336 sec_sqe3->c_key_addr = cpu_to_le64(c_ctx->c_key_dma); in sec_skcipher_bd_fill_v3()
1337 sec_sqe3->no_scene.c_ivin_addr = cpu_to_le64(c_req->c_ivin_dma); in sec_skcipher_bd_fill_v3()
1338 sec_sqe3->data_src_addr = cpu_to_le64(req->in_dma); in sec_skcipher_bd_fill_v3()
1339 sec_sqe3->data_dst_addr = cpu_to_le64(c_req->c_out_dma); in sec_skcipher_bd_fill_v3()
1341 sec_sqe3->c_mode_alg = ((u8)c_ctx->c_alg << SEC_CALG_OFFSET_V3) | in sec_skcipher_bd_fill_v3()
1342 c_ctx->c_mode; in sec_skcipher_bd_fill_v3()
1343 sec_sqe3->c_icv_key |= cpu_to_le16(((u16)c_ctx->c_key_len) << in sec_skcipher_bd_fill_v3()
1346 if (c_req->encrypt) in sec_skcipher_bd_fill_v3()
1350 sec_sqe3->c_icv_key |= cpu_to_le16(cipher); in sec_skcipher_bd_fill_v3()
1353 sec_sqe3->auth_mac_key = cpu_to_le32((u32)SEC_CTR_CNT_ROLLOVER << in sec_skcipher_bd_fill_v3()
1356 if (req->use_pbuf) { in sec_skcipher_bd_fill_v3()
1365 if (req->in_dma != c_req->c_out_dma) in sec_skcipher_bd_fill_v3()
1369 sec_sqe3->bd_param = cpu_to_le32(bd_param); in sec_skcipher_bd_fill_v3()
1371 sec_sqe3->c_len_ivin |= cpu_to_le32(c_req->c_len); in sec_skcipher_bd_fill_v3()
1372 sec_sqe3->tag = cpu_to_le64(req); in sec_skcipher_bd_fill_v3()
1377 /* increment counter (128-bit int) */
1381 --bits; in ctr_iv_inc()
1390 struct aead_request *aead_req = req->aead_req.aead_req; in sec_update_iv()
1391 struct skcipher_request *sk_req = req->c_req.sk_req; in sec_update_iv()
1392 u32 iv_size = req->ctx->c_ctx.ivsize; in sec_update_iv()
1398 if (req->c_req.encrypt) in sec_update_iv()
1399 sgl = alg_type == SEC_SKCIPHER ? sk_req->dst : aead_req->dst; in sec_update_iv()
1401 sgl = alg_type == SEC_SKCIPHER ? sk_req->src : aead_req->src; in sec_update_iv()
1404 iv = sk_req->iv; in sec_update_iv()
1405 cryptlen = sk_req->cryptlen; in sec_update_iv()
1407 iv = aead_req->iv; in sec_update_iv()
1408 cryptlen = aead_req->cryptlen; in sec_update_iv()
1411 if (req->ctx->c_ctx.c_mode == SEC_CMODE_CBC) { in sec_update_iv()
1413 cryptlen - iv_size); in sec_update_iv()
1415 dev_err(req->ctx->dev, "copy output iv error!\n"); in sec_update_iv()
1429 spin_lock_bh(&qp_ctx->req_lock); in sec_back_req_clear()
1430 if (ctx->fake_req_limit >= in sec_back_req_clear()
1431 atomic_read(&qp_ctx->qp->qp_status.used) && in sec_back_req_clear()
1432 !list_empty(&qp_ctx->backlog)) { in sec_back_req_clear()
1433 backlog_req = list_first_entry(&qp_ctx->backlog, in sec_back_req_clear()
1435 list_del(&backlog_req->backlog_head); in sec_back_req_clear()
1437 spin_unlock_bh(&qp_ctx->req_lock); in sec_back_req_clear()
1445 struct skcipher_request *sk_req = req->c_req.sk_req; in sec_skcipher_callback()
1446 struct sec_qp_ctx *qp_ctx = req->qp_ctx; in sec_skcipher_callback()
1453 if (!err && (ctx->c_ctx.c_mode == SEC_CMODE_CBC || in sec_skcipher_callback()
1454 ctx->c_ctx.c_mode == SEC_CMODE_CTR) && req->c_req.encrypt) in sec_skcipher_callback()
1462 backlog_sk_req = backlog_req->c_req.sk_req; in sec_skcipher_callback()
1463 backlog_sk_req->base.complete(&backlog_sk_req->base, in sec_skcipher_callback()
1464 -EINPROGRESS); in sec_skcipher_callback()
1465 atomic64_inc(&ctx->sec->debug.dfx.recv_busy_cnt); in sec_skcipher_callback()
1468 sk_req->base.complete(&sk_req->base, err); in sec_skcipher_callback()
1473 struct aead_request *aead_req = req->aead_req.aead_req; in set_aead_auth_iv()
1474 struct sec_cipher_req *c_req = &req->c_req; in set_aead_auth_iv()
1475 struct sec_aead_req *a_req = &req->aead_req; in set_aead_auth_iv()
1476 size_t authsize = ctx->a_ctx.mac_len; in set_aead_auth_iv()
1477 u32 data_size = aead_req->cryptlen; in set_aead_auth_iv()
1482 cl = c_req->c_ivin[0] + 1; in set_aead_auth_iv()
1483 c_req->c_ivin[ctx->c_ctx.ivsize - cl] = 0x00; in set_aead_auth_iv()
1484 memset(&c_req->c_ivin[ctx->c_ctx.ivsize - cl], 0, cl); in set_aead_auth_iv()
1485 c_req->c_ivin[ctx->c_ctx.ivsize - IV_LAST_BYTE1] = IV_CTR_INIT; in set_aead_auth_iv()
1488 flage |= c_req->c_ivin[0] & IV_CL_MASK; in set_aead_auth_iv()
1491 cm = (authsize - IV_CM_CAL_NUM) / IV_CM_CAL_NUM; in set_aead_auth_iv()
1493 if (aead_req->assoclen) in set_aead_auth_iv()
1496 memcpy(a_req->a_ivin, c_req->c_ivin, ctx->c_ctx.ivsize); in set_aead_auth_iv()
1497 a_req->a_ivin[0] = flage; in set_aead_auth_iv()
1504 if (!c_req->encrypt) in set_aead_auth_iv()
1505 data_size = aead_req->cryptlen - authsize; in set_aead_auth_iv()
1507 a_req->a_ivin[ctx->c_ctx.ivsize - IV_LAST_BYTE1] = in set_aead_auth_iv()
1510 a_req->a_ivin[ctx->c_ctx.ivsize - IV_LAST_BYTE2] = in set_aead_auth_iv()
1516 struct aead_request *aead_req = req->aead_req.aead_req; in sec_aead_set_iv()
1519 struct sec_cipher_req *c_req = &req->c_req; in sec_aead_set_iv()
1520 struct sec_aead_req *a_req = &req->aead_req; in sec_aead_set_iv()
1522 memcpy(c_req->c_ivin, aead_req->iv, ctx->c_ctx.ivsize); in sec_aead_set_iv()
1524 if (ctx->c_ctx.c_mode == SEC_CMODE_CCM) { in sec_aead_set_iv()
1529 ctx->a_ctx.mac_len = authsize; in sec_aead_set_iv()
1535 if (ctx->c_ctx.c_mode == SEC_CMODE_GCM) { in sec_aead_set_iv()
1536 ctx->a_ctx.mac_len = authsize; in sec_aead_set_iv()
1537 memcpy(a_req->a_ivin, c_req->c_ivin, SEC_AIV_SIZE); in sec_aead_set_iv()
1544 struct sec_aead_req *a_req = &req->aead_req; in sec_auth_bd_fill_xcm()
1545 struct aead_request *aq = a_req->aead_req; in sec_auth_bd_fill_xcm()
1548 sec_sqe->type2.icvw_kmode |= cpu_to_le16((u16)ctx->mac_len); in sec_auth_bd_fill_xcm()
1551 sec_sqe->type2.a_key_addr = sec_sqe->type2.c_key_addr; in sec_auth_bd_fill_xcm()
1552 sec_sqe->type2.a_ivin_addr = cpu_to_le64(a_req->a_ivin_dma); in sec_auth_bd_fill_xcm()
1553 sec_sqe->type_cipher_auth |= SEC_NO_AUTH << SEC_AUTH_OFFSET; in sec_auth_bd_fill_xcm()
1556 sec_sqe->sds_sa_type &= SEC_CIPHER_AUTH; in sec_auth_bd_fill_xcm()
1558 sec_sqe->sds_sa_type |= SEC_AUTH_CIPHER; in sec_auth_bd_fill_xcm()
1560 sec_sqe->type2.alen_ivllen = cpu_to_le32(aq->assoclen); in sec_auth_bd_fill_xcm()
1561 sec_sqe->type2.auth_src_offset = cpu_to_le16(0x0); in sec_auth_bd_fill_xcm()
1562 sec_sqe->type2.cipher_src_offset = cpu_to_le16((u16)aq->assoclen); in sec_auth_bd_fill_xcm()
1564 sec_sqe->type2.mac_addr = cpu_to_le64(a_req->out_mac_dma); in sec_auth_bd_fill_xcm()
1570 struct sec_aead_req *a_req = &req->aead_req; in sec_auth_bd_fill_xcm_v3()
1571 struct aead_request *aq = a_req->aead_req; in sec_auth_bd_fill_xcm_v3()
1574 sqe3->c_icv_key |= cpu_to_le16((u16)ctx->mac_len << SEC_MAC_OFFSET_V3); in sec_auth_bd_fill_xcm_v3()
1577 sqe3->a_key_addr = sqe3->c_key_addr; in sec_auth_bd_fill_xcm_v3()
1578 sqe3->auth_ivin.a_ivin_addr = cpu_to_le64(a_req->a_ivin_dma); in sec_auth_bd_fill_xcm_v3()
1579 sqe3->auth_mac_key |= SEC_NO_AUTH; in sec_auth_bd_fill_xcm_v3()
1582 sqe3->huk_iv_seq &= SEC_CIPHER_AUTH_V3; in sec_auth_bd_fill_xcm_v3()
1584 sqe3->huk_iv_seq |= SEC_AUTH_CIPHER_V3; in sec_auth_bd_fill_xcm_v3()
1586 sqe3->a_len_key = cpu_to_le32(aq->assoclen); in sec_auth_bd_fill_xcm_v3()
1587 sqe3->auth_src_offset = cpu_to_le16(0x0); in sec_auth_bd_fill_xcm_v3()
1588 sqe3->cipher_src_offset = cpu_to_le16((u16)aq->assoclen); in sec_auth_bd_fill_xcm_v3()
1589 sqe3->mac_addr = cpu_to_le64(a_req->out_mac_dma); in sec_auth_bd_fill_xcm_v3()
1595 struct sec_aead_req *a_req = &req->aead_req; in sec_auth_bd_fill_ex()
1596 struct sec_cipher_req *c_req = &req->c_req; in sec_auth_bd_fill_ex()
1597 struct aead_request *aq = a_req->aead_req; in sec_auth_bd_fill_ex()
1599 sec_sqe->type2.a_key_addr = cpu_to_le64(ctx->a_key_dma); in sec_auth_bd_fill_ex()
1601 sec_sqe->type2.mac_key_alg = in sec_auth_bd_fill_ex()
1602 cpu_to_le32(ctx->mac_len / SEC_SQE_LEN_RATE); in sec_auth_bd_fill_ex()
1604 sec_sqe->type2.mac_key_alg |= in sec_auth_bd_fill_ex()
1605 cpu_to_le32((u32)((ctx->a_key_len) / in sec_auth_bd_fill_ex()
1608 sec_sqe->type2.mac_key_alg |= in sec_auth_bd_fill_ex()
1609 cpu_to_le32((u32)(ctx->a_alg) << SEC_AEAD_ALG_OFFSET); in sec_auth_bd_fill_ex()
1612 sec_sqe->type_cipher_auth |= SEC_AUTH_TYPE1 << SEC_AUTH_OFFSET; in sec_auth_bd_fill_ex()
1613 sec_sqe->sds_sa_type &= SEC_CIPHER_AUTH; in sec_auth_bd_fill_ex()
1615 sec_sqe->type_cipher_auth |= SEC_AUTH_TYPE2 << SEC_AUTH_OFFSET; in sec_auth_bd_fill_ex()
1616 sec_sqe->sds_sa_type |= SEC_AUTH_CIPHER; in sec_auth_bd_fill_ex()
1618 sec_sqe->type2.alen_ivllen = cpu_to_le32(c_req->c_len + aq->assoclen); in sec_auth_bd_fill_ex()
1620 sec_sqe->type2.cipher_src_offset = cpu_to_le16((u16)aq->assoclen); in sec_auth_bd_fill_ex()
1622 sec_sqe->type2.mac_addr = cpu_to_le64(a_req->out_mac_dma); in sec_auth_bd_fill_ex()
1627 struct sec_auth_ctx *auth_ctx = &ctx->a_ctx; in sec_aead_bd_fill()
1628 struct sec_sqe *sec_sqe = &req->sec_sqe; in sec_aead_bd_fill()
1633 dev_err(ctx->dev, "skcipher bd fill is error!\n"); in sec_aead_bd_fill()
1637 if (ctx->c_ctx.c_mode == SEC_CMODE_CCM || in sec_aead_bd_fill()
1638 ctx->c_ctx.c_mode == SEC_CMODE_GCM) in sec_aead_bd_fill()
1639 sec_auth_bd_fill_xcm(auth_ctx, req->c_req.encrypt, req, sec_sqe); in sec_aead_bd_fill()
1641 sec_auth_bd_fill_ex(auth_ctx, req->c_req.encrypt, req, sec_sqe); in sec_aead_bd_fill()
1649 struct sec_aead_req *a_req = &req->aead_req; in sec_auth_bd_fill_ex_v3()
1650 struct sec_cipher_req *c_req = &req->c_req; in sec_auth_bd_fill_ex_v3()
1651 struct aead_request *aq = a_req->aead_req; in sec_auth_bd_fill_ex_v3()
1653 sqe3->a_key_addr = cpu_to_le64(ctx->a_key_dma); in sec_auth_bd_fill_ex_v3()
1655 sqe3->auth_mac_key |= in sec_auth_bd_fill_ex_v3()
1656 cpu_to_le32((u32)(ctx->mac_len / in sec_auth_bd_fill_ex_v3()
1659 sqe3->auth_mac_key |= in sec_auth_bd_fill_ex_v3()
1660 cpu_to_le32((u32)(ctx->a_key_len / in sec_auth_bd_fill_ex_v3()
1663 sqe3->auth_mac_key |= in sec_auth_bd_fill_ex_v3()
1664 cpu_to_le32((u32)(ctx->a_alg) << SEC_AUTH_ALG_OFFSET_V3); in sec_auth_bd_fill_ex_v3()
1667 sqe3->auth_mac_key |= cpu_to_le32((u32)SEC_AUTH_TYPE1); in sec_auth_bd_fill_ex_v3()
1668 sqe3->huk_iv_seq &= SEC_CIPHER_AUTH_V3; in sec_auth_bd_fill_ex_v3()
1670 sqe3->auth_mac_key |= cpu_to_le32((u32)SEC_AUTH_TYPE2); in sec_auth_bd_fill_ex_v3()
1671 sqe3->huk_iv_seq |= SEC_AUTH_CIPHER_V3; in sec_auth_bd_fill_ex_v3()
1673 sqe3->a_len_key = cpu_to_le32(c_req->c_len + aq->assoclen); in sec_auth_bd_fill_ex_v3()
1675 sqe3->cipher_src_offset = cpu_to_le16((u16)aq->assoclen); in sec_auth_bd_fill_ex_v3()
1677 sqe3->mac_addr = cpu_to_le64(a_req->out_mac_dma); in sec_auth_bd_fill_ex_v3()
1682 struct sec_auth_ctx *auth_ctx = &ctx->a_ctx; in sec_aead_bd_fill_v3()
1683 struct sec_sqe3 *sec_sqe3 = &req->sec_sqe3; in sec_aead_bd_fill_v3()
1688 dev_err(ctx->dev, "skcipher bd3 fill is error!\n"); in sec_aead_bd_fill_v3()
1692 if (ctx->c_ctx.c_mode == SEC_CMODE_CCM || in sec_aead_bd_fill_v3()
1693 ctx->c_ctx.c_mode == SEC_CMODE_GCM) in sec_aead_bd_fill_v3()
1694 sec_auth_bd_fill_xcm_v3(auth_ctx, req->c_req.encrypt, in sec_aead_bd_fill_v3()
1697 sec_auth_bd_fill_ex_v3(auth_ctx, req->c_req.encrypt, in sec_aead_bd_fill_v3()
1705 struct aead_request *a_req = req->aead_req.aead_req; in sec_aead_callback()
1707 struct sec_aead_req *aead_req = &req->aead_req; in sec_aead_callback()
1708 struct sec_cipher_req *c_req = &req->c_req; in sec_aead_callback()
1710 struct sec_qp_ctx *qp_ctx = req->qp_ctx; in sec_aead_callback()
1715 if (!err && c->c_ctx.c_mode == SEC_CMODE_CBC && c_req->encrypt) in sec_aead_callback()
1719 if (!err && c_req->encrypt) { in sec_aead_callback()
1720 struct scatterlist *sgl = a_req->dst; in sec_aead_callback()
1723 aead_req->out_mac, in sec_aead_callback()
1724 authsize, a_req->cryptlen + in sec_aead_callback()
1725 a_req->assoclen); in sec_aead_callback()
1727 dev_err(c->dev, "copy out mac err!\n"); in sec_aead_callback()
1728 err = -EINVAL; in sec_aead_callback()
1739 backlog_aead_req = backlog_req->aead_req.aead_req; in sec_aead_callback()
1740 backlog_aead_req->base.complete(&backlog_aead_req->base, in sec_aead_callback()
1741 -EINPROGRESS); in sec_aead_callback()
1742 atomic64_inc(&c->sec->debug.dfx.recv_busy_cnt); in sec_aead_callback()
1745 a_req->base.complete(&a_req->base, err); in sec_aead_callback()
1761 qp_ctx = &ctx->qp_ctx[queue_id]; in sec_request_init()
1763 req->req_id = sec_alloc_req_id(req, qp_ctx); in sec_request_init()
1764 if (unlikely(req->req_id < 0)) { in sec_request_init()
1766 return req->req_id; in sec_request_init()
1774 struct sec_cipher_req *c_req = &req->c_req; in sec_process()
1786 if (!req->c_req.encrypt && (ctx->c_ctx.c_mode == SEC_CMODE_CBC || in sec_process()
1787 ctx->c_ctx.c_mode == SEC_CMODE_CTR)) in sec_process()
1788 sec_update_iv(req, ctx->alg_type); in sec_process()
1790 ret = ctx->req_op->bd_send(ctx, req); in sec_process()
1791 if (unlikely((ret != -EBUSY && ret != -EINPROGRESS) || in sec_process()
1792 (ret == -EBUSY && !(req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG)))) { in sec_process()
1793 dev_err_ratelimited(ctx->dev, "send sec request failed!\n"); in sec_process()
1801 if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && !req->c_req.encrypt) { in sec_process()
1802 if (ctx->alg_type == SEC_SKCIPHER) in sec_process()
1803 memcpy(req->c_req.sk_req->iv, c_req->c_ivin, in sec_process()
1804 ctx->c_ctx.ivsize); in sec_process()
1806 memcpy(req->aead_req.aead_req->iv, c_req->c_ivin, in sec_process()
1807 ctx->c_ctx.ivsize); in sec_process()
1865 if (ctx->sec->qm.ver < QM_HW_V3) { in sec_skcipher_ctx_init()
1866 ctx->type_supported = SEC_BD_TYPE2; in sec_skcipher_ctx_init()
1867 ctx->req_op = &sec_skcipher_req_ops; in sec_skcipher_ctx_init()
1869 ctx->type_supported = SEC_BD_TYPE3; in sec_skcipher_ctx_init()
1870 ctx->req_op = &sec_skcipher_req_ops_v3; in sec_skcipher_ctx_init()
1887 ctx->alg_type = SEC_AEAD; in sec_aead_init()
1888 ctx->c_ctx.ivsize = crypto_aead_ivsize(tfm); in sec_aead_init()
1889 if (ctx->c_ctx.ivsize < SEC_AIV_SIZE || in sec_aead_init()
1890 ctx->c_ctx.ivsize > SEC_IV_SIZE) { in sec_aead_init()
1892 return -EINVAL; in sec_aead_init()
1898 if (ctx->sec->qm.ver < QM_HW_V3) { in sec_aead_init()
1899 ctx->type_supported = SEC_BD_TYPE2; in sec_aead_init()
1900 ctx->req_op = &sec_aead_req_ops; in sec_aead_init()
1902 ctx->type_supported = SEC_BD_TYPE3; in sec_aead_init()
1903 ctx->req_op = &sec_aead_req_ops_v3; in sec_aead_init()
1935 struct sec_auth_ctx *auth_ctx = &ctx->a_ctx; in sec_aead_ctx_init()
1944 auth_ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0); in sec_aead_ctx_init()
1945 if (IS_ERR(auth_ctx->hash_tfm)) { in sec_aead_ctx_init()
1946 dev_err(ctx->dev, "aead alloc shash error!\n"); in sec_aead_ctx_init()
1948 return PTR_ERR(auth_ctx->hash_tfm); in sec_aead_ctx_init()
1958 crypto_free_shash(ctx->a_ctx.hash_tfm); in sec_aead_ctx_exit()
1966 struct sec_auth_ctx *a_ctx = &ctx->a_ctx; in sec_aead_xcm_ctx_init()
1967 const char *aead_name = alg->base.cra_name; in sec_aead_xcm_ctx_init()
1972 dev_err(ctx->dev, "hisi_sec2: aead xcm init error!\n"); in sec_aead_xcm_ctx_init()
1976 a_ctx->fallback_aead_tfm = crypto_alloc_aead(aead_name, 0, in sec_aead_xcm_ctx_init()
1979 if (IS_ERR(a_ctx->fallback_aead_tfm)) { in sec_aead_xcm_ctx_init()
1980 dev_err(ctx->dev, "aead driver alloc fallback tfm error!\n"); in sec_aead_xcm_ctx_init()
1982 return PTR_ERR(a_ctx->fallback_aead_tfm); in sec_aead_xcm_ctx_init()
1984 a_ctx->fallback = false; in sec_aead_xcm_ctx_init()
1993 crypto_free_aead(ctx->a_ctx.fallback_aead_tfm); in sec_aead_xcm_ctx_exit()
2015 u32 cryptlen = sreq->c_req.sk_req->cryptlen; in sec_skcipher_cryptlen_ckeck()
2016 struct device *dev = ctx->dev; in sec_skcipher_cryptlen_ckeck()
2017 u8 c_mode = ctx->c_ctx.c_mode; in sec_skcipher_cryptlen_ckeck()
2023 dev_err(dev, "skcipher XTS mode input length error!\n"); in sec_skcipher_cryptlen_ckeck()
2024 ret = -EINVAL; in sec_skcipher_cryptlen_ckeck()
2029 if (unlikely(cryptlen & (AES_BLOCK_SIZE - 1))) { in sec_skcipher_cryptlen_ckeck()
2030 dev_err(dev, "skcipher AES input length error!\n"); in sec_skcipher_cryptlen_ckeck()
2031 ret = -EINVAL; in sec_skcipher_cryptlen_ckeck()
2037 if (unlikely(ctx->sec->qm.ver < QM_HW_V3)) { in sec_skcipher_cryptlen_ckeck()
2039 ret = -EINVAL; in sec_skcipher_cryptlen_ckeck()
2043 ret = -EINVAL; in sec_skcipher_cryptlen_ckeck()
2051 struct skcipher_request *sk_req = sreq->c_req.sk_req; in sec_skcipher_param_check()
2052 struct device *dev = ctx->dev; in sec_skcipher_param_check()
2053 u8 c_alg = ctx->c_ctx.c_alg; in sec_skcipher_param_check()
2055 if (unlikely(!sk_req->src || !sk_req->dst || in sec_skcipher_param_check()
2056 sk_req->cryptlen > MAX_INPUT_DATA_LEN)) { in sec_skcipher_param_check()
2057 dev_err(dev, "skcipher input param error!\n"); in sec_skcipher_param_check()
2058 return -EINVAL; in sec_skcipher_param_check()
2060 sreq->c_req.c_len = sk_req->cryptlen; in sec_skcipher_param_check()
2062 if (ctx->pbuf_supported && sk_req->cryptlen <= SEC_PBUF_SZ) in sec_skcipher_param_check()
2063 sreq->use_pbuf = true; in sec_skcipher_param_check()
2065 sreq->use_pbuf = false; in sec_skcipher_param_check()
2068 if (unlikely(sk_req->cryptlen & (DES3_EDE_BLOCK_SIZE - 1))) { in sec_skcipher_param_check()
2069 dev_err(dev, "skcipher 3des input length error!\n"); in sec_skcipher_param_check()
2070 return -EINVAL; in sec_skcipher_param_check()
2079 return -EINVAL; in sec_skcipher_param_check()
2085 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; in sec_skcipher_soft_crypto()
2086 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, c_ctx->fbtfm); in sec_skcipher_soft_crypto()
2087 struct device *dev = ctx->dev; in sec_skcipher_soft_crypto()
2090 if (!c_ctx->fbtfm) { in sec_skcipher_soft_crypto()
2092 return -EINVAL; in sec_skcipher_soft_crypto()
2095 skcipher_request_set_sync_tfm(subreq, c_ctx->fbtfm); in sec_skcipher_soft_crypto()
2098 skcipher_request_set_callback(subreq, sreq->base.flags, in sec_skcipher_soft_crypto()
2100 skcipher_request_set_crypt(subreq, sreq->src, sreq->dst, in sec_skcipher_soft_crypto()
2101 sreq->cryptlen, sreq->iv); in sec_skcipher_soft_crypto()
2119 if (!sk_req->cryptlen) { in sec_skcipher_crypto()
2120 if (ctx->c_ctx.c_mode == SEC_CMODE_XTS) in sec_skcipher_crypto()
2121 return -EINVAL; in sec_skcipher_crypto()
2125 req->flag = sk_req->base.flags; in sec_skcipher_crypto()
2126 req->c_req.sk_req = sk_req; in sec_skcipher_crypto()
2127 req->c_req.encrypt = encrypt; in sec_skcipher_crypto()
2128 req->ctx = ctx; in sec_skcipher_crypto()
2132 return -EINVAL; in sec_skcipher_crypto()
2134 if (unlikely(ctx->c_ctx.fallback)) in sec_skcipher_crypto()
2137 return ctx->req_op->process(ctx, req); in sec_skcipher_crypto()
2251 cl = aead_req->iv[0] + 1; in aead_iv_demension_check()
2253 return -EINVAL; in aead_iv_demension_check()
2255 if (cl < IV_CL_MID && aead_req->cryptlen >> (BYTE_BITS * cl)) in aead_iv_demension_check()
2256 return -EOVERFLOW; in aead_iv_demension_check()
2263 struct aead_request *req = sreq->aead_req.aead_req; in sec_aead_spec_check()
2266 u8 c_mode = ctx->c_ctx.c_mode; in sec_aead_spec_check()
2267 struct device *dev = ctx->dev; in sec_aead_spec_check()
2270 if (unlikely(req->cryptlen + req->assoclen > MAX_INPUT_DATA_LEN || in sec_aead_spec_check()
2271 req->assoclen > SEC_MAX_AAD_LEN)) { in sec_aead_spec_check()
2272 dev_err(dev, "aead input spec error!\n"); in sec_aead_spec_check()
2273 return -EINVAL; in sec_aead_spec_check()
2279 dev_err(dev, "aead input mac length error!\n"); in sec_aead_spec_check()
2280 return -EINVAL; in sec_aead_spec_check()
2284 if (unlikely(req->assoclen > SEC_MAX_CCM_AAD_LEN)) { in sec_aead_spec_check()
2285 dev_err_ratelimited(dev, "CCM input aad parameter is too long!\n"); in sec_aead_spec_check()
2286 return -EINVAL; in sec_aead_spec_check()
2290 dev_err(dev, "aead input iv param error!\n"); in sec_aead_spec_check()
2295 if (sreq->c_req.encrypt) in sec_aead_spec_check()
2296 sreq->c_req.c_len = req->cryptlen; in sec_aead_spec_check()
2298 sreq->c_req.c_len = req->cryptlen - authsize; in sec_aead_spec_check()
2300 if (unlikely(sreq->c_req.c_len & (AES_BLOCK_SIZE - 1))) { in sec_aead_spec_check()
2302 return -EINVAL; in sec_aead_spec_check()
2311 struct aead_request *req = sreq->aead_req.aead_req; in sec_aead_param_check()
2314 struct device *dev = ctx->dev; in sec_aead_param_check()
2315 u8 c_alg = ctx->c_ctx.c_alg; in sec_aead_param_check()
2317 if (unlikely(!req->src || !req->dst)) { in sec_aead_param_check()
2318 dev_err(dev, "aead input param error!\n"); in sec_aead_param_check()
2319 return -EINVAL; in sec_aead_param_check()
2322 if (ctx->sec->qm.ver == QM_HW_V2) { in sec_aead_param_check()
2323 if (unlikely(!req->cryptlen || (!sreq->c_req.encrypt && in sec_aead_param_check()
2324 req->cryptlen <= authsize))) { in sec_aead_param_check()
2325 ctx->a_ctx.fallback = true; in sec_aead_param_check()
2326 return -EINVAL; in sec_aead_param_check()
2333 return -EINVAL; in sec_aead_param_check()
2337 return -EINVAL; in sec_aead_param_check()
2339 if (ctx->pbuf_supported && (req->cryptlen + req->assoclen) <= in sec_aead_param_check()
2341 sreq->use_pbuf = true; in sec_aead_param_check()
2343 sreq->use_pbuf = false; in sec_aead_param_check()
2352 struct sec_auth_ctx *a_ctx = &ctx->a_ctx; in sec_aead_soft_crypto()
2353 struct device *dev = ctx->dev; in sec_aead_soft_crypto()
2357 /* Kunpeng920 aead mode not support input 0 size */ in sec_aead_soft_crypto()
2358 if (!a_ctx->fallback_aead_tfm) { in sec_aead_soft_crypto()
2360 return -EINVAL; in sec_aead_soft_crypto()
2363 subreq = aead_request_alloc(a_ctx->fallback_aead_tfm, GFP_KERNEL); in sec_aead_soft_crypto()
2365 return -ENOMEM; in sec_aead_soft_crypto()
2367 aead_request_set_tfm(subreq, a_ctx->fallback_aead_tfm); in sec_aead_soft_crypto()
2368 aead_request_set_callback(subreq, aead_req->base.flags, in sec_aead_soft_crypto()
2369 aead_req->base.complete, aead_req->base.data); in sec_aead_soft_crypto()
2370 aead_request_set_crypt(subreq, aead_req->src, aead_req->dst, in sec_aead_soft_crypto()
2371 aead_req->cryptlen, aead_req->iv); in sec_aead_soft_crypto()
2372 aead_request_set_ad(subreq, aead_req->assoclen); in sec_aead_soft_crypto()
2390 req->flag = a_req->base.flags; in sec_aead_crypto()
2391 req->aead_req.aead_req = a_req; in sec_aead_crypto()
2392 req->c_req.encrypt = encrypt; in sec_aead_crypto()
2393 req->ctx = ctx; in sec_aead_crypto()
2397 if (ctx->a_ctx.fallback) in sec_aead_crypto()
2399 return -EINVAL; in sec_aead_crypto()
2402 return ctx->req_op->process(ctx, req); in sec_aead_crypto()