Lines Matching +full:aes +full:- +full:gcm

1 // SPDX-License-Identifier: GPL-2.0-only
5 * Driver for EIP97 AES acceleration.
9 * Some ideas are from atmel-aes.c drivers.
12 #include <crypto/aes.h>
13 #include <crypto/gcm.h>
15 #include "mtk-platform.h"
20 & ~(AES_BLOCK_SIZE - 1))
27 /* AES-CBC/ECB/CTR/OFB/CFB command token */
31 /* AES-GCM command token */
40 /* AES transform information word 0 fields */
51 /* AES transform information word 1 fields */
58 #define AES_TFM_3IV cpu_to_le32(0x7 << 5) /* using IV 0-2 */
59 #define AES_TFM_FULL_IV cpu_to_le32(0xf << 5) /* using IV 0-3 */
63 /* AES flags */
77 * mtk_aes_info - hardware information of AES
82 * Memory layout of GCM buffer:
83 * /-----------\
84 * | AES KEY | 128/196/256 bits
85 * |-----------|
87 * |-----------|
89 * \-----------/
92 * - Commands decoding and control of the engine's data path.
93 * - Coordinating hardware data fetch and store operations.
94 * - Result token construction and output.
155 return readl_relaxed(cryp->base + offset); in mtk_aes_read()
161 writel_relaxed(value, cryp->base + offset); in mtk_aes_write()
170 if (!ctx->cryp) { in mtk_aes_find_dev()
175 ctx->cryp = cryp; in mtk_aes_find_dev()
177 cryp = ctx->cryp; in mtk_aes_find_dev()
186 len &= AES_BLOCK_SIZE - 1; in mtk_aes_padlen()
187 return len ? AES_BLOCK_SIZE - len : 0; in mtk_aes_padlen()
199 if (!IS_ALIGNED(sg->offset, sizeof(u32))) in mtk_aes_check_aligned()
202 if (len <= sg->length) { in mtk_aes_check_aligned()
206 dma->nents = nents + 1; in mtk_aes_check_aligned()
207 dma->remainder = sg->length - len; in mtk_aes_check_aligned()
208 sg->length = len; in mtk_aes_check_aligned()
212 if (!IS_ALIGNED(sg->length, AES_BLOCK_SIZE)) in mtk_aes_check_aligned()
215 len -= sg->length; in mtk_aes_check_aligned()
221 static inline void mtk_aes_set_mode(struct mtk_aes_rec *aes, in mtk_aes_set_mode() argument
225 aes->flags = (aes->flags & AES_FLAGS_BUSY) | rctx->mode; in mtk_aes_set_mode()
230 struct scatterlist *sg = dma->sg; in mtk_aes_restore_sg()
231 int nents = dma->nents; in mtk_aes_restore_sg()
233 if (!dma->remainder) in mtk_aes_restore_sg()
236 while (--nents > 0 && sg) in mtk_aes_restore_sg()
242 sg->length += dma->remainder; in mtk_aes_restore_sg()
246 struct mtk_aes_rec *aes, in mtk_aes_complete() argument
249 aes->flags &= ~AES_FLAGS_BUSY; in mtk_aes_complete()
250 aes->areq->complete(aes->areq, err); in mtk_aes_complete()
252 tasklet_schedule(&aes->queue_task); in mtk_aes_complete()
260 static int mtk_aes_xmit(struct mtk_cryp *cryp, struct mtk_aes_rec *aes) in mtk_aes_xmit() argument
262 struct mtk_ring *ring = cryp->ring[aes->id]; in mtk_aes_xmit()
264 struct scatterlist *ssg = aes->src.sg, *dsg = aes->dst.sg; in mtk_aes_xmit()
265 u32 slen = aes->src.sg_len, dlen = aes->dst.sg_len; in mtk_aes_xmit()
270 cmd = ring->cmd_next; in mtk_aes_xmit()
271 cmd->hdr = MTK_DESC_BUF_LEN(ssg->length); in mtk_aes_xmit()
272 cmd->buf = cpu_to_le32(sg_dma_address(ssg)); in mtk_aes_xmit()
275 cmd->hdr |= MTK_DESC_FIRST | in mtk_aes_xmit()
276 MTK_DESC_CT_LEN(aes->ctx->ct_size); in mtk_aes_xmit()
277 cmd->ct = cpu_to_le32(aes->ctx->ct_dma); in mtk_aes_xmit()
278 cmd->ct_hdr = aes->ctx->ct_hdr; in mtk_aes_xmit()
279 cmd->tfm = cpu_to_le32(aes->ctx->tfm_dma); in mtk_aes_xmit()
283 if (++ring->cmd_next == ring->cmd_base + MTK_DESC_NUM) in mtk_aes_xmit()
284 ring->cmd_next = ring->cmd_base; in mtk_aes_xmit()
286 cmd->hdr |= MTK_DESC_LAST; in mtk_aes_xmit()
290 res = ring->res_next; in mtk_aes_xmit()
291 res->hdr = MTK_DESC_BUF_LEN(dsg->length); in mtk_aes_xmit()
292 res->buf = cpu_to_le32(sg_dma_address(dsg)); in mtk_aes_xmit()
295 res->hdr |= MTK_DESC_FIRST; in mtk_aes_xmit()
298 if (++ring->res_next == ring->res_base + MTK_DESC_NUM) in mtk_aes_xmit()
299 ring->res_next = ring->res_base; in mtk_aes_xmit()
301 res->hdr |= MTK_DESC_LAST; in mtk_aes_xmit()
304 ring->res_prev = res; in mtk_aes_xmit()
307 if (aes->flags & AES_FLAGS_GCM) in mtk_aes_xmit()
308 le32_add_cpu(&res->hdr, AES_BLOCK_SIZE); in mtk_aes_xmit()
316 mtk_aes_write(cryp, RDR_PREP_COUNT(aes->id), MTK_DESC_CNT(dlen)); in mtk_aes_xmit()
317 mtk_aes_write(cryp, CDR_PREP_COUNT(aes->id), MTK_DESC_CNT(slen)); in mtk_aes_xmit()
319 return -EINPROGRESS; in mtk_aes_xmit()
322 static void mtk_aes_unmap(struct mtk_cryp *cryp, struct mtk_aes_rec *aes) in mtk_aes_unmap() argument
324 struct mtk_aes_base_ctx *ctx = aes->ctx; in mtk_aes_unmap()
326 dma_unmap_single(cryp->dev, ctx->ct_dma, sizeof(ctx->info), in mtk_aes_unmap()
329 if (aes->src.sg == aes->dst.sg) { in mtk_aes_unmap()
330 dma_unmap_sg(cryp->dev, aes->src.sg, aes->src.nents, in mtk_aes_unmap()
333 if (aes->src.sg != &aes->aligned_sg) in mtk_aes_unmap()
334 mtk_aes_restore_sg(&aes->src); in mtk_aes_unmap()
336 dma_unmap_sg(cryp->dev, aes->dst.sg, aes->dst.nents, in mtk_aes_unmap()
339 if (aes->dst.sg != &aes->aligned_sg) in mtk_aes_unmap()
340 mtk_aes_restore_sg(&aes->dst); in mtk_aes_unmap()
342 dma_unmap_sg(cryp->dev, aes->src.sg, aes->src.nents, in mtk_aes_unmap()
345 if (aes->src.sg != &aes->aligned_sg) in mtk_aes_unmap()
346 mtk_aes_restore_sg(&aes->src); in mtk_aes_unmap()
349 if (aes->dst.sg == &aes->aligned_sg) in mtk_aes_unmap()
350 sg_copy_from_buffer(aes->real_dst, sg_nents(aes->real_dst), in mtk_aes_unmap()
351 aes->buf, aes->total); in mtk_aes_unmap()
354 static int mtk_aes_map(struct mtk_cryp *cryp, struct mtk_aes_rec *aes) in mtk_aes_map() argument
356 struct mtk_aes_base_ctx *ctx = aes->ctx; in mtk_aes_map()
357 struct mtk_aes_info *info = &ctx->info; in mtk_aes_map()
359 ctx->ct_dma = dma_map_single(cryp->dev, info, sizeof(*info), in mtk_aes_map()
361 if (unlikely(dma_mapping_error(cryp->dev, ctx->ct_dma))) in mtk_aes_map()
364 ctx->tfm_dma = ctx->ct_dma + sizeof(info->cmd); in mtk_aes_map()
366 if (aes->src.sg == aes->dst.sg) { in mtk_aes_map()
367 aes->src.sg_len = dma_map_sg(cryp->dev, aes->src.sg, in mtk_aes_map()
368 aes->src.nents, in mtk_aes_map()
370 aes->dst.sg_len = aes->src.sg_len; in mtk_aes_map()
371 if (unlikely(!aes->src.sg_len)) in mtk_aes_map()
374 aes->src.sg_len = dma_map_sg(cryp->dev, aes->src.sg, in mtk_aes_map()
375 aes->src.nents, DMA_TO_DEVICE); in mtk_aes_map()
376 if (unlikely(!aes->src.sg_len)) in mtk_aes_map()
379 aes->dst.sg_len = dma_map_sg(cryp->dev, aes->dst.sg, in mtk_aes_map()
380 aes->dst.nents, DMA_FROM_DEVICE); in mtk_aes_map()
381 if (unlikely(!aes->dst.sg_len)) { in mtk_aes_map()
382 dma_unmap_sg(cryp->dev, aes->src.sg, aes->src.nents, in mtk_aes_map()
388 return mtk_aes_xmit(cryp, aes); in mtk_aes_map()
391 dma_unmap_single(cryp->dev, ctx->ct_dma, sizeof(*info), DMA_TO_DEVICE); in mtk_aes_map()
393 return mtk_aes_complete(cryp, aes, -EINVAL); in mtk_aes_map()
397 static void mtk_aes_info_init(struct mtk_cryp *cryp, struct mtk_aes_rec *aes, in mtk_aes_info_init() argument
400 struct skcipher_request *req = skcipher_request_cast(aes->areq); in mtk_aes_info_init()
401 struct mtk_aes_base_ctx *ctx = aes->ctx; in mtk_aes_info_init()
402 struct mtk_aes_info *info = &ctx->info; in mtk_aes_info_init()
405 ctx->ct_hdr = AES_CT_CTRL_HDR | cpu_to_le32(len); in mtk_aes_info_init()
406 info->cmd[cnt++] = AES_CMD0 | cpu_to_le32(len); in mtk_aes_info_init()
407 info->cmd[cnt++] = AES_CMD1; in mtk_aes_info_init()
409 info->tfm[0] = AES_TFM_SIZE(ctx->keylen) | ctx->keymode; in mtk_aes_info_init()
410 if (aes->flags & AES_FLAGS_ENCRYPT) in mtk_aes_info_init()
411 info->tfm[0] |= AES_TFM_BASIC_OUT; in mtk_aes_info_init()
413 info->tfm[0] |= AES_TFM_BASIC_IN; in mtk_aes_info_init()
415 switch (aes->flags & AES_FLAGS_CIPHER_MSK) { in mtk_aes_info_init()
417 info->tfm[1] = AES_TFM_CBC; in mtk_aes_info_init()
420 info->tfm[1] = AES_TFM_ECB; in mtk_aes_info_init()
423 info->tfm[1] = AES_TFM_CTR_LOAD; in mtk_aes_info_init()
426 info->tfm[1] = AES_TFM_OFB; in mtk_aes_info_init()
429 info->tfm[1] = AES_TFM_CFB128; in mtk_aes_info_init()
436 memcpy(info->state + ctx->keylen, req->iv, AES_BLOCK_SIZE); in mtk_aes_info_init()
438 le32_add_cpu(&info->tfm[0], in mtk_aes_info_init()
440 info->tfm[1] |= AES_TFM_FULL_IV; in mtk_aes_info_init()
441 info->cmd[cnt++] = AES_CMD2; in mtk_aes_info_init()
443 ctx->ct_size = cnt; in mtk_aes_info_init()
446 static int mtk_aes_dma(struct mtk_cryp *cryp, struct mtk_aes_rec *aes, in mtk_aes_dma() argument
453 aes->total = len; in mtk_aes_dma()
454 aes->src.sg = src; in mtk_aes_dma()
455 aes->dst.sg = dst; in mtk_aes_dma()
456 aes->real_dst = dst; in mtk_aes_dma()
458 src_aligned = mtk_aes_check_aligned(src, len, &aes->src); in mtk_aes_dma()
462 dst_aligned = mtk_aes_check_aligned(dst, len, &aes->dst); in mtk_aes_dma()
468 return mtk_aes_complete(cryp, aes, -ENOMEM); in mtk_aes_dma()
471 sg_copy_to_buffer(src, sg_nents(src), aes->buf, len); in mtk_aes_dma()
472 aes->src.sg = &aes->aligned_sg; in mtk_aes_dma()
473 aes->src.nents = 1; in mtk_aes_dma()
474 aes->src.remainder = 0; in mtk_aes_dma()
478 aes->dst.sg = &aes->aligned_sg; in mtk_aes_dma()
479 aes->dst.nents = 1; in mtk_aes_dma()
480 aes->dst.remainder = 0; in mtk_aes_dma()
483 sg_init_table(&aes->aligned_sg, 1); in mtk_aes_dma()
484 sg_set_buf(&aes->aligned_sg, aes->buf, len + padlen); in mtk_aes_dma()
487 mtk_aes_info_init(cryp, aes, len + padlen); in mtk_aes_dma()
489 return mtk_aes_map(cryp, aes); in mtk_aes_dma()
495 struct mtk_aes_rec *aes = cryp->aes[id]; in mtk_aes_handle_queue() local
501 spin_lock_irqsave(&aes->lock, flags); in mtk_aes_handle_queue()
503 ret = crypto_enqueue_request(&aes->queue, new_areq); in mtk_aes_handle_queue()
504 if (aes->flags & AES_FLAGS_BUSY) { in mtk_aes_handle_queue()
505 spin_unlock_irqrestore(&aes->lock, flags); in mtk_aes_handle_queue()
508 backlog = crypto_get_backlog(&aes->queue); in mtk_aes_handle_queue()
509 areq = crypto_dequeue_request(&aes->queue); in mtk_aes_handle_queue()
511 aes->flags |= AES_FLAGS_BUSY; in mtk_aes_handle_queue()
512 spin_unlock_irqrestore(&aes->lock, flags); in mtk_aes_handle_queue()
518 backlog->complete(backlog, -EINPROGRESS); in mtk_aes_handle_queue()
520 ctx = crypto_tfm_ctx(areq->tfm); in mtk_aes_handle_queue()
522 memcpy(ctx->info.state, ctx->key, sizeof(ctx->key)); in mtk_aes_handle_queue()
524 aes->areq = areq; in mtk_aes_handle_queue()
525 aes->ctx = ctx; in mtk_aes_handle_queue()
527 return ctx->start(cryp, aes); in mtk_aes_handle_queue()
531 struct mtk_aes_rec *aes) in mtk_aes_transfer_complete() argument
533 return mtk_aes_complete(cryp, aes, 0); in mtk_aes_transfer_complete()
536 static int mtk_aes_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes) in mtk_aes_start() argument
538 struct skcipher_request *req = skcipher_request_cast(aes->areq); in mtk_aes_start()
541 mtk_aes_set_mode(aes, rctx); in mtk_aes_start()
542 aes->resume = mtk_aes_transfer_complete; in mtk_aes_start()
544 return mtk_aes_dma(cryp, aes, req->src, req->dst, req->cryptlen); in mtk_aes_start()
553 static int mtk_aes_ctr_transfer(struct mtk_cryp *cryp, struct mtk_aes_rec *aes) in mtk_aes_ctr_transfer() argument
555 struct mtk_aes_base_ctx *ctx = aes->ctx; in mtk_aes_ctr_transfer()
557 struct skcipher_request *req = skcipher_request_cast(aes->areq); in mtk_aes_ctr_transfer()
564 cctx->offset += aes->total; in mtk_aes_ctr_transfer()
565 if (cctx->offset >= req->cryptlen) in mtk_aes_ctr_transfer()
566 return mtk_aes_transfer_complete(cryp, aes); in mtk_aes_ctr_transfer()
569 datalen = req->cryptlen - cctx->offset; in mtk_aes_ctr_transfer()
571 ctr = be32_to_cpu(cctx->iv[3]); in mtk_aes_ctr_transfer()
575 end = start + blocks - 1; in mtk_aes_ctr_transfer()
578 datalen = AES_BLOCK_SIZE * -start; in mtk_aes_ctr_transfer()
583 src = scatterwalk_ffwd(cctx->src, req->src, cctx->offset); in mtk_aes_ctr_transfer()
584 dst = ((req->src == req->dst) ? src : in mtk_aes_ctr_transfer()
585 scatterwalk_ffwd(cctx->dst, req->dst, cctx->offset)); in mtk_aes_ctr_transfer()
588 memcpy(ctx->info.state + ctx->keylen, cctx->iv, AES_BLOCK_SIZE); in mtk_aes_ctr_transfer()
595 cctx->iv[3] = cpu_to_be32(ctr); in mtk_aes_ctr_transfer()
596 crypto_inc((u8 *)cctx->iv, AES_BLOCK_SIZE); in mtk_aes_ctr_transfer()
599 return mtk_aes_dma(cryp, aes, src, dst, datalen); in mtk_aes_ctr_transfer()
602 static int mtk_aes_ctr_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes) in mtk_aes_ctr_start() argument
604 struct mtk_aes_ctr_ctx *cctx = mtk_aes_ctr_ctx_cast(aes->ctx); in mtk_aes_ctr_start()
605 struct skcipher_request *req = skcipher_request_cast(aes->areq); in mtk_aes_ctr_start()
608 mtk_aes_set_mode(aes, rctx); in mtk_aes_ctr_start()
610 memcpy(cctx->iv, req->iv, AES_BLOCK_SIZE); in mtk_aes_ctr_start()
611 cctx->offset = 0; in mtk_aes_ctr_start()
612 aes->total = 0; in mtk_aes_ctr_start()
613 aes->resume = mtk_aes_ctr_transfer; in mtk_aes_ctr_start()
615 return mtk_aes_ctr_transfer(cryp, aes); in mtk_aes_ctr_start()
618 /* Check and set the AES key to transform state buffer */
626 ctx->keymode = AES_TFM_128BITS; in mtk_aes_setkey()
629 ctx->keymode = AES_TFM_192BITS; in mtk_aes_setkey()
632 ctx->keymode = AES_TFM_256BITS; in mtk_aes_setkey()
636 return -EINVAL; in mtk_aes_setkey()
639 ctx->keylen = SIZE_IN_WORDS(keylen); in mtk_aes_setkey()
640 memcpy(ctx->key, key, keylen); in mtk_aes_setkey()
654 return -ENODEV; in mtk_aes_crypt()
657 rctx->mode = mode; in mtk_aes_crypt()
660 &req->base); in mtk_aes_crypt()
718 ctx->base.start = mtk_aes_start; in mtk_aes_init_tfm()
727 ctx->base.start = mtk_aes_ctr_start; in mtk_aes_ctr_init_tfm()
733 .base.cra_name = "cbc(aes)",
734 .base.cra_driver_name = "cbc-aes-mtk",
751 .base.cra_name = "ecb(aes)",
752 .base.cra_driver_name = "ecb-aes-mtk",
768 .base.cra_name = "ctr(aes)",
769 .base.cra_driver_name = "ctr-aes-mtk",
786 .base.cra_name = "ofb(aes)",
787 .base.cra_driver_name = "ofb-aes-mtk",
803 .base.cra_name = "cfb(aes)",
804 .base.cra_driver_name = "cfb-aes-mtk",
832 struct mtk_aes_rec *aes) in mtk_aes_gcm_tag_verify() argument
834 __le32 status = cryp->ring[aes->id]->res_prev->ct; in mtk_aes_gcm_tag_verify()
836 return mtk_aes_complete(cryp, aes, (status & AES_AUTH_TAG_ERR) ? in mtk_aes_gcm_tag_verify()
837 -EBADMSG : 0); in mtk_aes_gcm_tag_verify()
840 /* Initialize transform information of GCM mode */
842 struct mtk_aes_rec *aes, in mtk_aes_gcm_info_init() argument
845 struct aead_request *req = aead_request_cast(aes->areq); in mtk_aes_gcm_info_init()
846 struct mtk_aes_base_ctx *ctx = aes->ctx; in mtk_aes_gcm_info_init()
848 struct mtk_aes_info *info = &ctx->info; in mtk_aes_gcm_info_init()
852 ctx->ct_hdr = AES_CT_CTRL_HDR | cpu_to_le32(len); in mtk_aes_gcm_info_init()
854 info->cmd[cnt++] = AES_GCM_CMD0 | cpu_to_le32(req->assoclen); in mtk_aes_gcm_info_init()
855 info->cmd[cnt++] = AES_GCM_CMD1 | cpu_to_le32(req->assoclen); in mtk_aes_gcm_info_init()
856 info->cmd[cnt++] = AES_GCM_CMD2; in mtk_aes_gcm_info_init()
857 info->cmd[cnt++] = AES_GCM_CMD3 | cpu_to_le32(gctx->textlen); in mtk_aes_gcm_info_init()
859 if (aes->flags & AES_FLAGS_ENCRYPT) { in mtk_aes_gcm_info_init()
860 info->cmd[cnt++] = AES_GCM_CMD4 | cpu_to_le32(gctx->authsize); in mtk_aes_gcm_info_init()
861 info->tfm[0] = AES_TFM_GCM_OUT; in mtk_aes_gcm_info_init()
863 info->cmd[cnt++] = AES_GCM_CMD5 | cpu_to_le32(gctx->authsize); in mtk_aes_gcm_info_init()
864 info->cmd[cnt++] = AES_GCM_CMD6 | cpu_to_le32(gctx->authsize); in mtk_aes_gcm_info_init()
865 info->tfm[0] = AES_TFM_GCM_IN; in mtk_aes_gcm_info_init()
867 ctx->ct_size = cnt; in mtk_aes_gcm_info_init()
869 info->tfm[0] |= AES_TFM_GHASH_DIGEST | AES_TFM_GHASH | AES_TFM_SIZE( in mtk_aes_gcm_info_init()
870 ctx->keylen + SIZE_IN_WORDS(AES_BLOCK_SIZE + ivsize)) | in mtk_aes_gcm_info_init()
871 ctx->keymode; in mtk_aes_gcm_info_init()
872 info->tfm[1] = AES_TFM_CTR_INIT | AES_TFM_IV_CTR_MODE | AES_TFM_3IV | in mtk_aes_gcm_info_init()
875 memcpy(info->state + ctx->keylen + SIZE_IN_WORDS(AES_BLOCK_SIZE), in mtk_aes_gcm_info_init()
876 req->iv, ivsize); in mtk_aes_gcm_info_init()
879 static int mtk_aes_gcm_dma(struct mtk_cryp *cryp, struct mtk_aes_rec *aes, in mtk_aes_gcm_dma() argument
885 aes->src.sg = src; in mtk_aes_gcm_dma()
886 aes->dst.sg = dst; in mtk_aes_gcm_dma()
887 aes->real_dst = dst; in mtk_aes_gcm_dma()
889 src_aligned = mtk_aes_check_aligned(src, len, &aes->src); in mtk_aes_gcm_dma()
893 dst_aligned = mtk_aes_check_aligned(dst, len, &aes->dst); in mtk_aes_gcm_dma()
896 if (aes->total > AES_BUF_SIZE) in mtk_aes_gcm_dma()
897 return mtk_aes_complete(cryp, aes, -ENOMEM); in mtk_aes_gcm_dma()
900 sg_copy_to_buffer(src, sg_nents(src), aes->buf, len); in mtk_aes_gcm_dma()
901 aes->src.sg = &aes->aligned_sg; in mtk_aes_gcm_dma()
902 aes->src.nents = 1; in mtk_aes_gcm_dma()
903 aes->src.remainder = 0; in mtk_aes_gcm_dma()
907 aes->dst.sg = &aes->aligned_sg; in mtk_aes_gcm_dma()
908 aes->dst.nents = 1; in mtk_aes_gcm_dma()
909 aes->dst.remainder = 0; in mtk_aes_gcm_dma()
912 sg_init_table(&aes->aligned_sg, 1); in mtk_aes_gcm_dma()
913 sg_set_buf(&aes->aligned_sg, aes->buf, aes->total); in mtk_aes_gcm_dma()
916 mtk_aes_gcm_info_init(cryp, aes, len); in mtk_aes_gcm_dma()
918 return mtk_aes_map(cryp, aes); in mtk_aes_gcm_dma()
922 static int mtk_aes_gcm_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes) in mtk_aes_gcm_start() argument
924 struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(aes->ctx); in mtk_aes_gcm_start()
925 struct aead_request *req = aead_request_cast(aes->areq); in mtk_aes_gcm_start()
927 u32 len = req->assoclen + req->cryptlen; in mtk_aes_gcm_start()
929 mtk_aes_set_mode(aes, rctx); in mtk_aes_gcm_start()
931 if (aes->flags & AES_FLAGS_ENCRYPT) { in mtk_aes_gcm_start()
934 aes->resume = mtk_aes_transfer_complete; in mtk_aes_gcm_start()
936 aes->total = len + gctx->authsize; in mtk_aes_gcm_start()
938 scatterwalk_map_and_copy(tag, req->dst, len, gctx->authsize, 1); in mtk_aes_gcm_start()
940 aes->resume = mtk_aes_gcm_tag_verify; in mtk_aes_gcm_start()
941 aes->total = len; in mtk_aes_gcm_start()
944 return mtk_aes_gcm_dma(cryp, aes, req->src, req->dst, len); in mtk_aes_gcm_start()
957 return -ENODEV; in mtk_aes_gcm_crypt()
960 gctx->textlen = req->cryptlen - (enc ? 0 : gctx->authsize); in mtk_aes_gcm_crypt()
963 if (!gctx->textlen && !req->assoclen) in mtk_aes_gcm_crypt()
964 return -EINVAL; in mtk_aes_gcm_crypt()
966 rctx->mode = AES_FLAGS_GCM | mode; in mtk_aes_gcm_crypt()
968 return mtk_aes_handle_queue(cryp, enc, &req->base); in mtk_aes_gcm_crypt()
972 * Because of the hardware limitation, we need to pre-calculate key(H)
990 ctx->keymode = AES_TFM_128BITS; in mtk_aes_gcm_setkey()
993 ctx->keymode = AES_TFM_192BITS; in mtk_aes_gcm_setkey()
996 ctx->keymode = AES_TFM_256BITS; in mtk_aes_gcm_setkey()
1000 return -EINVAL; in mtk_aes_gcm_setkey()
1003 ctx->keylen = SIZE_IN_WORDS(keylen); in mtk_aes_gcm_setkey()
1012 memcpy(ctx->key, key, keylen); in mtk_aes_gcm_setkey()
1018 memcpy(ctx->key + ctx->keylen, &hash, AES_BLOCK_SIZE); in mtk_aes_gcm_setkey()
1029 /* Same as crypto_gcm_authsize() from crypto/gcm.c */ in mtk_aes_gcm_setauthsize()
1036 return -EINVAL; in mtk_aes_gcm_setauthsize()
1039 gctx->authsize = authsize; in mtk_aes_gcm_setauthsize()
1058 ctx->base.start = mtk_aes_gcm_start; in mtk_aes_gcm_init()
1072 .cra_name = "gcm(aes)",
1073 .cra_driver_name = "gcm-aes-mtk",
1085 struct mtk_aes_rec *aes = (struct mtk_aes_rec *)data; in mtk_aes_queue_task() local
1087 mtk_aes_handle_queue(aes->cryp, aes->id, NULL); in mtk_aes_queue_task()
1092 struct mtk_aes_rec *aes = (struct mtk_aes_rec *)data; in mtk_aes_done_task() local
1093 struct mtk_cryp *cryp = aes->cryp; in mtk_aes_done_task()
1095 mtk_aes_unmap(cryp, aes); in mtk_aes_done_task()
1096 aes->resume(cryp, aes); in mtk_aes_done_task()
1101 struct mtk_aes_rec *aes = (struct mtk_aes_rec *)dev_id; in mtk_aes_irq() local
1102 struct mtk_cryp *cryp = aes->cryp; in mtk_aes_irq()
1103 u32 val = mtk_aes_read(cryp, RDR_STAT(aes->id)); in mtk_aes_irq()
1105 mtk_aes_write(cryp, RDR_STAT(aes->id), val); in mtk_aes_irq()
1107 if (likely(AES_FLAGS_BUSY & aes->flags)) { in mtk_aes_irq()
1108 mtk_aes_write(cryp, RDR_PROC_COUNT(aes->id), MTK_CNT_RST); in mtk_aes_irq()
1109 mtk_aes_write(cryp, RDR_THRESH(aes->id), in mtk_aes_irq()
1112 tasklet_schedule(&aes->done_task); in mtk_aes_irq()
1114 dev_warn(cryp->dev, "AES interrupt when no active requests.\n"); in mtk_aes_irq()
1127 struct mtk_aes_rec **aes = cryp->aes; in mtk_aes_record_init() local
1128 int i, err = -ENOMEM; in mtk_aes_record_init()
1131 aes[i] = kzalloc(sizeof(**aes), GFP_KERNEL); in mtk_aes_record_init()
1132 if (!aes[i]) in mtk_aes_record_init()
1135 aes[i]->buf = (void *)__get_free_pages(GFP_KERNEL, in mtk_aes_record_init()
1137 if (!aes[i]->buf) in mtk_aes_record_init()
1140 aes[i]->cryp = cryp; in mtk_aes_record_init()
1142 spin_lock_init(&aes[i]->lock); in mtk_aes_record_init()
1143 crypto_init_queue(&aes[i]->queue, AES_QUEUE_SIZE); in mtk_aes_record_init()
1145 tasklet_init(&aes[i]->queue_task, mtk_aes_queue_task, in mtk_aes_record_init()
1146 (unsigned long)aes[i]); in mtk_aes_record_init()
1147 tasklet_init(&aes[i]->done_task, mtk_aes_done_task, in mtk_aes_record_init()
1148 (unsigned long)aes[i]); in mtk_aes_record_init()
1152 aes[0]->id = MTK_RING0; in mtk_aes_record_init()
1153 aes[1]->id = MTK_RING1; in mtk_aes_record_init()
1158 for (; i--; ) { in mtk_aes_record_init()
1159 free_page((unsigned long)aes[i]->buf); in mtk_aes_record_init()
1160 kfree(aes[i]); in mtk_aes_record_init()
1171 tasklet_kill(&cryp->aes[i]->done_task); in mtk_aes_record_free()
1172 tasklet_kill(&cryp->aes[i]->queue_task); in mtk_aes_record_free()
1174 free_page((unsigned long)cryp->aes[i]->buf); in mtk_aes_record_free()
1175 kfree(cryp->aes[i]); in mtk_aes_record_free()
1206 for (; i--; ) in mtk_aes_register_algs()
1216 INIT_LIST_HEAD(&cryp->aes_list); in mtk_cipher_alg_register()
1223 ret = devm_request_irq(cryp->dev, cryp->irq[MTK_RING0], mtk_aes_irq, in mtk_cipher_alg_register()
1224 0, "mtk-aes", cryp->aes[0]); in mtk_cipher_alg_register()
1226 dev_err(cryp->dev, "unable to request AES irq.\n"); in mtk_cipher_alg_register()
1230 ret = devm_request_irq(cryp->dev, cryp->irq[MTK_RING1], mtk_aes_irq, in mtk_cipher_alg_register()
1231 0, "mtk-aes", cryp->aes[1]); in mtk_cipher_alg_register()
1233 dev_err(cryp->dev, "unable to request AES irq.\n"); in mtk_cipher_alg_register()
1242 list_add_tail(&cryp->aes_list, &mtk_aes.dev_list); in mtk_cipher_alg_register()
1253 list_del(&cryp->aes_list); in mtk_cipher_alg_register()
1259 dev_err(cryp->dev, "mtk-aes initialization failed.\n"); in mtk_cipher_alg_register()
1266 list_del(&cryp->aes_list); in mtk_cipher_alg_release()