Lines Matching refs:areq_ctx
55 struct aead_req_ctx *areq_ctx = aead_request_ctx(req); in cc_copy_mac() local
58 cc_copy_sg_portion(dev, areq_ctx->backup_mac, req->src, in cc_copy_mac()
59 (skip - areq_ctx->req_authsize), skip, dir); in cc_copy_mac()
289 cc_set_aead_conf_buf(struct device *dev, struct aead_req_ctx *areq_ctx, in cc_set_aead_conf_buf() argument
295 sg_init_one(&areq_ctx->ccm_adata_sg, config_data, in cc_set_aead_conf_buf()
296 AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size); in cc_set_aead_conf_buf()
297 if (dma_map_sg(dev, &areq_ctx->ccm_adata_sg, 1, DMA_TO_DEVICE) != 1) { in cc_set_aead_conf_buf()
302 &sg_dma_address(&areq_ctx->ccm_adata_sg), in cc_set_aead_conf_buf()
303 sg_page(&areq_ctx->ccm_adata_sg), in cc_set_aead_conf_buf()
304 sg_virt(&areq_ctx->ccm_adata_sg), in cc_set_aead_conf_buf()
305 areq_ctx->ccm_adata_sg.offset, areq_ctx->ccm_adata_sg.length); in cc_set_aead_conf_buf()
308 cc_add_sg_entry(dev, sg_data, 1, &areq_ctx->ccm_adata_sg, in cc_set_aead_conf_buf()
309 (AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size), in cc_set_aead_conf_buf()
315 static int cc_set_hash_buf(struct device *dev, struct ahash_req_ctx *areq_ctx, in cc_set_hash_buf() argument
321 sg_init_one(areq_ctx->buff_sg, curr_buff, curr_buff_cnt); in cc_set_hash_buf()
322 if (dma_map_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE) != 1) { in cc_set_hash_buf()
327 &sg_dma_address(areq_ctx->buff_sg), sg_page(areq_ctx->buff_sg), in cc_set_hash_buf()
328 sg_virt(areq_ctx->buff_sg), areq_ctx->buff_sg->offset, in cc_set_hash_buf()
329 areq_ctx->buff_sg->length); in cc_set_hash_buf()
330 areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI; in cc_set_hash_buf()
331 areq_ctx->curr_sg = areq_ctx->buff_sg; in cc_set_hash_buf()
332 areq_ctx->in_nents = 0; in cc_set_hash_buf()
334 cc_add_sg_entry(dev, sg_data, 1, areq_ctx->buff_sg, curr_buff_cnt, 0, in cc_set_hash_buf()
459 struct aead_req_ctx *areq_ctx = aead_request_ctx(req); in cc_unmap_aead_request() local
460 unsigned int hw_iv_size = areq_ctx->hw_iv_size; in cc_unmap_aead_request()
464 if (areq_ctx->mac_buf_dma_addr) { in cc_unmap_aead_request()
465 dma_unmap_single(dev, areq_ctx->mac_buf_dma_addr, in cc_unmap_aead_request()
469 if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) { in cc_unmap_aead_request()
470 if (areq_ctx->hkey_dma_addr) { in cc_unmap_aead_request()
471 dma_unmap_single(dev, areq_ctx->hkey_dma_addr, in cc_unmap_aead_request()
475 if (areq_ctx->gcm_block_len_dma_addr) { in cc_unmap_aead_request()
476 dma_unmap_single(dev, areq_ctx->gcm_block_len_dma_addr, in cc_unmap_aead_request()
480 if (areq_ctx->gcm_iv_inc1_dma_addr) { in cc_unmap_aead_request()
481 dma_unmap_single(dev, areq_ctx->gcm_iv_inc1_dma_addr, in cc_unmap_aead_request()
485 if (areq_ctx->gcm_iv_inc2_dma_addr) { in cc_unmap_aead_request()
486 dma_unmap_single(dev, areq_ctx->gcm_iv_inc2_dma_addr, in cc_unmap_aead_request()
491 if (areq_ctx->ccm_hdr_size != ccm_header_size_null) { in cc_unmap_aead_request()
492 if (areq_ctx->ccm_iv0_dma_addr) { in cc_unmap_aead_request()
493 dma_unmap_single(dev, areq_ctx->ccm_iv0_dma_addr, in cc_unmap_aead_request()
497 dma_unmap_sg(dev, &areq_ctx->ccm_adata_sg, 1, DMA_TO_DEVICE); in cc_unmap_aead_request()
499 if (areq_ctx->gen_ctx.iv_dma_addr) { in cc_unmap_aead_request()
500 dma_unmap_single(dev, areq_ctx->gen_ctx.iv_dma_addr, in cc_unmap_aead_request()
502 kfree_sensitive(areq_ctx->gen_ctx.iv); in cc_unmap_aead_request()
506 if ((areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI || in cc_unmap_aead_request()
507 areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) && in cc_unmap_aead_request()
508 (areq_ctx->mlli_params.mlli_virt_addr)) { in cc_unmap_aead_request()
510 &areq_ctx->mlli_params.mlli_dma_addr, in cc_unmap_aead_request()
511 areq_ctx->mlli_params.mlli_virt_addr); in cc_unmap_aead_request()
512 dma_pool_free(areq_ctx->mlli_params.curr_pool, in cc_unmap_aead_request()
513 areq_ctx->mlli_params.mlli_virt_addr, in cc_unmap_aead_request()
514 areq_ctx->mlli_params.mlli_dma_addr); in cc_unmap_aead_request()
518 sg_virt(req->src), areq_ctx->src.nents, areq_ctx->assoc.nents, in cc_unmap_aead_request()
519 areq_ctx->assoclen, req->cryptlen); in cc_unmap_aead_request()
521 dma_unmap_sg(dev, req->src, areq_ctx->src.mapped_nents, src_direction); in cc_unmap_aead_request()
525 dma_unmap_sg(dev, req->dst, areq_ctx->dst.mapped_nents, DMA_FROM_DEVICE); in cc_unmap_aead_request()
528 areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT && in cc_unmap_aead_request()
549 struct aead_req_ctx *areq_ctx = aead_request_ctx(req); in cc_aead_chain_iv() local
550 unsigned int hw_iv_size = areq_ctx->hw_iv_size; in cc_aead_chain_iv()
556 areq_ctx->gen_ctx.iv_dma_addr = 0; in cc_aead_chain_iv()
557 areq_ctx->gen_ctx.iv = NULL; in cc_aead_chain_iv()
561 areq_ctx->gen_ctx.iv = kmemdup(req->iv, hw_iv_size, flags); in cc_aead_chain_iv()
562 if (!areq_ctx->gen_ctx.iv) in cc_aead_chain_iv()
565 areq_ctx->gen_ctx.iv_dma_addr = in cc_aead_chain_iv()
566 dma_map_single(dev, areq_ctx->gen_ctx.iv, hw_iv_size, in cc_aead_chain_iv()
568 if (dma_mapping_error(dev, areq_ctx->gen_ctx.iv_dma_addr)) { in cc_aead_chain_iv()
571 kfree_sensitive(areq_ctx->gen_ctx.iv); in cc_aead_chain_iv()
572 areq_ctx->gen_ctx.iv = NULL; in cc_aead_chain_iv()
578 hw_iv_size, req->iv, &areq_ctx->gen_ctx.iv_dma_addr); in cc_aead_chain_iv()
589 struct aead_req_ctx *areq_ctx = aead_request_ctx(req); in cc_aead_chain_assoc() local
599 if (areq_ctx->assoclen == 0) { in cc_aead_chain_assoc()
600 areq_ctx->assoc_buff_type = CC_DMA_BUF_NULL; in cc_aead_chain_assoc()
601 areq_ctx->assoc.nents = 0; in cc_aead_chain_assoc()
602 areq_ctx->assoc.mlli_nents = 0; in cc_aead_chain_assoc()
604 cc_dma_buf_type(areq_ctx->assoc_buff_type), in cc_aead_chain_assoc()
605 areq_ctx->assoc.nents); in cc_aead_chain_assoc()
609 mapped_nents = sg_nents_for_len(req->src, areq_ctx->assoclen); in cc_aead_chain_assoc()
618 areq_ctx->assoc.nents = mapped_nents; in cc_aead_chain_assoc()
623 if (areq_ctx->ccm_hdr_size != ccm_header_size_null) { in cc_aead_chain_assoc()
626 (areq_ctx->assoc.nents + 1), in cc_aead_chain_assoc()
633 if (mapped_nents == 1 && areq_ctx->ccm_hdr_size == ccm_header_size_null) in cc_aead_chain_assoc()
634 areq_ctx->assoc_buff_type = CC_DMA_BUF_DLLI; in cc_aead_chain_assoc()
636 areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI; in cc_aead_chain_assoc()
638 if (do_chain || areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI) { in cc_aead_chain_assoc()
640 cc_dma_buf_type(areq_ctx->assoc_buff_type), in cc_aead_chain_assoc()
641 areq_ctx->assoc.nents); in cc_aead_chain_assoc()
642 cc_add_sg_entry(dev, sg_data, areq_ctx->assoc.nents, req->src, in cc_aead_chain_assoc()
643 areq_ctx->assoclen, 0, is_last, in cc_aead_chain_assoc()
644 &areq_ctx->assoc.mlli_nents); in cc_aead_chain_assoc()
645 areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI; in cc_aead_chain_assoc()
655 struct aead_req_ctx *areq_ctx = aead_request_ctx(req); in cc_prepare_aead_data_dlli() local
656 enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type; in cc_prepare_aead_data_dlli()
657 unsigned int authsize = areq_ctx->req_authsize; in cc_prepare_aead_data_dlli()
661 areq_ctx->is_icv_fragmented = false; in cc_prepare_aead_data_dlli()
664 sg = areq_ctx->src_sgl; in cc_prepare_aead_data_dlli()
667 sg = areq_ctx->dst_sgl; in cc_prepare_aead_data_dlli()
671 areq_ctx->icv_dma_addr = sg_dma_address(sg) + offset; in cc_prepare_aead_data_dlli()
672 areq_ctx->icv_virt_addr = sg_virt(sg) + offset; in cc_prepare_aead_data_dlli()
681 struct aead_req_ctx *areq_ctx = aead_request_ctx(req); in cc_prepare_aead_data_mlli() local
682 enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type; in cc_prepare_aead_data_mlli()
683 unsigned int authsize = areq_ctx->req_authsize; in cc_prepare_aead_data_mlli()
689 cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents, in cc_prepare_aead_data_mlli()
690 areq_ctx->src_sgl, areq_ctx->cryptlen, in cc_prepare_aead_data_mlli()
691 areq_ctx->src_offset, is_last_table, in cc_prepare_aead_data_mlli()
692 &areq_ctx->src.mlli_nents); in cc_prepare_aead_data_mlli()
694 areq_ctx->is_icv_fragmented = in cc_prepare_aead_data_mlli()
695 cc_is_icv_frag(areq_ctx->src.nents, authsize, in cc_prepare_aead_data_mlli()
698 if (areq_ctx->is_icv_fragmented) { in cc_prepare_aead_data_mlli()
712 areq_ctx->icv_virt_addr = areq_ctx->backup_mac; in cc_prepare_aead_data_mlli()
714 areq_ctx->icv_virt_addr = areq_ctx->mac_buf; in cc_prepare_aead_data_mlli()
715 areq_ctx->icv_dma_addr = in cc_prepare_aead_data_mlli()
716 areq_ctx->mac_buf_dma_addr; in cc_prepare_aead_data_mlli()
719 sg = &areq_ctx->src_sgl[areq_ctx->src.nents - 1]; in cc_prepare_aead_data_mlli()
721 areq_ctx->icv_dma_addr = sg_dma_address(sg) + in cc_prepare_aead_data_mlli()
723 areq_ctx->icv_virt_addr = sg_virt(sg) + in cc_prepare_aead_data_mlli()
729 cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents, in cc_prepare_aead_data_mlli()
730 areq_ctx->src_sgl, areq_ctx->cryptlen, in cc_prepare_aead_data_mlli()
731 areq_ctx->src_offset, is_last_table, in cc_prepare_aead_data_mlli()
732 &areq_ctx->src.mlli_nents); in cc_prepare_aead_data_mlli()
733 cc_add_sg_entry(dev, sg_data, areq_ctx->dst.nents, in cc_prepare_aead_data_mlli()
734 areq_ctx->dst_sgl, areq_ctx->cryptlen, in cc_prepare_aead_data_mlli()
735 areq_ctx->dst_offset, is_last_table, in cc_prepare_aead_data_mlli()
736 &areq_ctx->dst.mlli_nents); in cc_prepare_aead_data_mlli()
738 areq_ctx->is_icv_fragmented = in cc_prepare_aead_data_mlli()
739 cc_is_icv_frag(areq_ctx->src.nents, authsize, in cc_prepare_aead_data_mlli()
746 if (areq_ctx->is_icv_fragmented) { in cc_prepare_aead_data_mlli()
748 areq_ctx->icv_virt_addr = areq_ctx->backup_mac; in cc_prepare_aead_data_mlli()
751 sg = &areq_ctx->src_sgl[areq_ctx->src.nents - 1]; in cc_prepare_aead_data_mlli()
753 areq_ctx->icv_dma_addr = sg_dma_address(sg) + in cc_prepare_aead_data_mlli()
755 areq_ctx->icv_virt_addr = sg_virt(sg) + in cc_prepare_aead_data_mlli()
761 cc_add_sg_entry(dev, sg_data, areq_ctx->dst.nents, in cc_prepare_aead_data_mlli()
762 areq_ctx->dst_sgl, areq_ctx->cryptlen, in cc_prepare_aead_data_mlli()
763 areq_ctx->dst_offset, is_last_table, in cc_prepare_aead_data_mlli()
764 &areq_ctx->dst.mlli_nents); in cc_prepare_aead_data_mlli()
765 cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents, in cc_prepare_aead_data_mlli()
766 areq_ctx->src_sgl, areq_ctx->cryptlen, in cc_prepare_aead_data_mlli()
767 areq_ctx->src_offset, is_last_table, in cc_prepare_aead_data_mlli()
768 &areq_ctx->src.mlli_nents); in cc_prepare_aead_data_mlli()
770 areq_ctx->is_icv_fragmented = in cc_prepare_aead_data_mlli()
771 cc_is_icv_frag(areq_ctx->dst.nents, authsize, in cc_prepare_aead_data_mlli()
774 if (!areq_ctx->is_icv_fragmented) { in cc_prepare_aead_data_mlli()
775 sg = &areq_ctx->dst_sgl[areq_ctx->dst.nents - 1]; in cc_prepare_aead_data_mlli()
777 areq_ctx->icv_dma_addr = sg_dma_address(sg) + in cc_prepare_aead_data_mlli()
779 areq_ctx->icv_virt_addr = sg_virt(sg) + in cc_prepare_aead_data_mlli()
782 areq_ctx->icv_dma_addr = areq_ctx->mac_buf_dma_addr; in cc_prepare_aead_data_mlli()
783 areq_ctx->icv_virt_addr = areq_ctx->mac_buf; in cc_prepare_aead_data_mlli()
793 struct aead_req_ctx *areq_ctx = aead_request_ctx(req); in cc_aead_chain_data() local
795 enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type; in cc_aead_chain_data()
796 unsigned int authsize = areq_ctx->req_authsize; in cc_aead_chain_data()
812 areq_ctx->src_sgl = req->src; in cc_aead_chain_data()
813 areq_ctx->dst_sgl = req->dst; in cc_aead_chain_data()
819 sg_index = areq_ctx->src_sgl->length; in cc_aead_chain_data()
823 offset -= areq_ctx->src_sgl->length; in cc_aead_chain_data()
824 sgl = sg_next(areq_ctx->src_sgl); in cc_aead_chain_data()
827 areq_ctx->src_sgl = sgl; in cc_aead_chain_data()
828 sg_index += areq_ctx->src_sgl->length; in cc_aead_chain_data()
836 areq_ctx->src.nents = src_mapped_nents; in cc_aead_chain_data()
838 areq_ctx->src_offset = offset; in cc_aead_chain_data()
849 &areq_ctx->dst.mapped_nents, in cc_aead_chain_data()
858 sg_index = areq_ctx->dst_sgl->length; in cc_aead_chain_data()
864 offset -= areq_ctx->dst_sgl->length; in cc_aead_chain_data()
865 sgl = sg_next(areq_ctx->dst_sgl); in cc_aead_chain_data()
868 areq_ctx->dst_sgl = sgl; in cc_aead_chain_data()
869 sg_index += areq_ctx->dst_sgl->length; in cc_aead_chain_data()
876 areq_ctx->dst.nents = dst_mapped_nents; in cc_aead_chain_data()
877 areq_ctx->dst_offset = offset; in cc_aead_chain_data()
881 areq_ctx->data_buff_type = CC_DMA_BUF_MLLI; in cc_aead_chain_data()
886 areq_ctx->data_buff_type = CC_DMA_BUF_DLLI; in cc_aead_chain_data()
898 struct aead_req_ctx *areq_ctx = aead_request_ctx(req); in cc_update_aead_mlli_nents() local
901 if (areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI) { in cc_update_aead_mlli_nents()
902 areq_ctx->assoc.sram_addr = drvdata->mlli_sram_addr; in cc_update_aead_mlli_nents()
903 curr_mlli_size = areq_ctx->assoc.mlli_nents * in cc_update_aead_mlli_nents()
907 if (areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) { in cc_update_aead_mlli_nents()
910 areq_ctx->dst.mlli_nents = areq_ctx->src.mlli_nents; in cc_update_aead_mlli_nents()
911 areq_ctx->src.sram_addr = drvdata->mlli_sram_addr + in cc_update_aead_mlli_nents()
913 areq_ctx->dst.sram_addr = areq_ctx->src.sram_addr; in cc_update_aead_mlli_nents()
914 if (!areq_ctx->is_single_pass) in cc_update_aead_mlli_nents()
915 areq_ctx->assoc.mlli_nents += in cc_update_aead_mlli_nents()
916 areq_ctx->src.mlli_nents; in cc_update_aead_mlli_nents()
918 if (areq_ctx->gen_ctx.op_type == in cc_update_aead_mlli_nents()
920 areq_ctx->src.sram_addr = in cc_update_aead_mlli_nents()
923 areq_ctx->dst.sram_addr = in cc_update_aead_mlli_nents()
924 areq_ctx->src.sram_addr + in cc_update_aead_mlli_nents()
925 areq_ctx->src.mlli_nents * in cc_update_aead_mlli_nents()
927 if (!areq_ctx->is_single_pass) in cc_update_aead_mlli_nents()
928 areq_ctx->assoc.mlli_nents += in cc_update_aead_mlli_nents()
929 areq_ctx->src.mlli_nents; in cc_update_aead_mlli_nents()
931 areq_ctx->dst.sram_addr = in cc_update_aead_mlli_nents()
934 areq_ctx->src.sram_addr = in cc_update_aead_mlli_nents()
935 areq_ctx->dst.sram_addr + in cc_update_aead_mlli_nents()
936 areq_ctx->dst.mlli_nents * in cc_update_aead_mlli_nents()
938 if (!areq_ctx->is_single_pass) in cc_update_aead_mlli_nents()
939 areq_ctx->assoc.mlli_nents += in cc_update_aead_mlli_nents()
940 areq_ctx->dst.mlli_nents; in cc_update_aead_mlli_nents()
948 struct aead_req_ctx *areq_ctx = aead_request_ctx(req); in cc_map_aead_request() local
949 struct mlli_params *mlli_params = &areq_ctx->mlli_params; in cc_map_aead_request()
952 unsigned int authsize = areq_ctx->req_authsize; in cc_map_aead_request()
967 areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT && in cc_map_aead_request()
972 areq_ctx->cryptlen = (areq_ctx->gen_ctx.op_type == in cc_map_aead_request()
977 dma_addr = dma_map_single(dev, areq_ctx->mac_buf, MAX_MAC_SIZE, in cc_map_aead_request()
981 MAX_MAC_SIZE, areq_ctx->mac_buf); in cc_map_aead_request()
985 areq_ctx->mac_buf_dma_addr = dma_addr; in cc_map_aead_request()
987 if (areq_ctx->ccm_hdr_size != ccm_header_size_null) { in cc_map_aead_request()
988 void *addr = areq_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET; in cc_map_aead_request()
996 areq_ctx->ccm_iv0_dma_addr = 0; in cc_map_aead_request()
1000 areq_ctx->ccm_iv0_dma_addr = dma_addr; in cc_map_aead_request()
1002 rc = cc_set_aead_conf_buf(dev, areq_ctx, areq_ctx->ccm_config, in cc_map_aead_request()
1003 &sg_data, areq_ctx->assoclen); in cc_map_aead_request()
1008 if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) { in cc_map_aead_request()
1009 dma_addr = dma_map_single(dev, areq_ctx->hkey, AES_BLOCK_SIZE, in cc_map_aead_request()
1013 AES_BLOCK_SIZE, areq_ctx->hkey); in cc_map_aead_request()
1017 areq_ctx->hkey_dma_addr = dma_addr; in cc_map_aead_request()
1019 dma_addr = dma_map_single(dev, &areq_ctx->gcm_len_block, in cc_map_aead_request()
1023 AES_BLOCK_SIZE, &areq_ctx->gcm_len_block); in cc_map_aead_request()
1027 areq_ctx->gcm_block_len_dma_addr = dma_addr; in cc_map_aead_request()
1029 dma_addr = dma_map_single(dev, areq_ctx->gcm_iv_inc1, in cc_map_aead_request()
1034 AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc1)); in cc_map_aead_request()
1035 areq_ctx->gcm_iv_inc1_dma_addr = 0; in cc_map_aead_request()
1039 areq_ctx->gcm_iv_inc1_dma_addr = dma_addr; in cc_map_aead_request()
1041 dma_addr = dma_map_single(dev, areq_ctx->gcm_iv_inc2, in cc_map_aead_request()
1046 AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc2)); in cc_map_aead_request()
1047 areq_ctx->gcm_iv_inc2_dma_addr = 0; in cc_map_aead_request()
1051 areq_ctx->gcm_iv_inc2_dma_addr = dma_addr; in cc_map_aead_request()
1056 if ((areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT) && in cc_map_aead_request()
1063 &areq_ctx->src.mapped_nents, in cc_map_aead_request()
1070 if (areq_ctx->is_single_pass) { in cc_map_aead_request()
1121 if (areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI || in cc_map_aead_request()
1122 areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) { in cc_map_aead_request()
1130 areq_ctx->assoc.mlli_nents); in cc_map_aead_request()
1131 dev_dbg(dev, "src params mn %d\n", areq_ctx->src.mlli_nents); in cc_map_aead_request()
1132 dev_dbg(dev, "dst params mn %d\n", areq_ctx->dst.mlli_nents); in cc_map_aead_request()
1145 struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx; in cc_map_hash_request_final() local
1147 u8 *curr_buff = cc_hash_buf(areq_ctx); in cc_map_hash_request_final()
1148 u32 *curr_buff_cnt = cc_hash_buf_cnt(areq_ctx); in cc_map_hash_request_final()
1149 struct mlli_params *mlli_params = &areq_ctx->mlli_params; in cc_map_hash_request_final()
1156 curr_buff, *curr_buff_cnt, nbytes, src, areq_ctx->buff_index); in cc_map_hash_request_final()
1158 areq_ctx->data_dma_buf_type = CC_DMA_BUF_NULL; in cc_map_hash_request_final()
1161 areq_ctx->in_nents = 0; in cc_map_hash_request_final()
1170 rc = cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt, in cc_map_hash_request_final()
1178 &areq_ctx->in_nents, LLI_MAX_NUM_OF_DATA_ENTRIES, in cc_map_hash_request_final()
1183 areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) { in cc_map_hash_request_final()
1184 memcpy(areq_ctx->buff_sg, src, in cc_map_hash_request_final()
1186 areq_ctx->buff_sg->length = nbytes; in cc_map_hash_request_final()
1187 areq_ctx->curr_sg = areq_ctx->buff_sg; in cc_map_hash_request_final()
1188 areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI; in cc_map_hash_request_final()
1190 areq_ctx->data_dma_buf_type = CC_DMA_BUF_MLLI; in cc_map_hash_request_final()
1195 if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_MLLI) { in cc_map_hash_request_final()
1198 cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src, nbytes, in cc_map_hash_request_final()
1199 0, true, &areq_ctx->mlli_nents); in cc_map_hash_request_final()
1205 areq_ctx->buff_index = (areq_ctx->buff_index ^ 1); in cc_map_hash_request_final()
1207 cc_dma_buf_type(areq_ctx->data_dma_buf_type)); in cc_map_hash_request_final()
1211 dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE); in cc_map_hash_request_final()
1215 dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE); in cc_map_hash_request_final()
1224 struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx; in cc_map_hash_request_update() local
1226 u8 *curr_buff = cc_hash_buf(areq_ctx); in cc_map_hash_request_update()
1227 u32 *curr_buff_cnt = cc_hash_buf_cnt(areq_ctx); in cc_map_hash_request_update()
1228 u8 *next_buff = cc_next_buf(areq_ctx); in cc_map_hash_request_update()
1229 u32 *next_buff_cnt = cc_next_buf_cnt(areq_ctx); in cc_map_hash_request_update()
1230 struct mlli_params *mlli_params = &areq_ctx->mlli_params; in cc_map_hash_request_update()
1240 curr_buff, *curr_buff_cnt, nbytes, src, areq_ctx->buff_index); in cc_map_hash_request_update()
1242 areq_ctx->data_dma_buf_type = CC_DMA_BUF_NULL; in cc_map_hash_request_update()
1244 areq_ctx->curr_sg = NULL; in cc_map_hash_request_update()
1246 areq_ctx->in_nents = 0; in cc_map_hash_request_update()
1251 areq_ctx->in_nents = sg_nents_for_len(src, nbytes); in cc_map_hash_request_update()
1252 sg_copy_to_buffer(src, areq_ctx->in_nents, in cc_map_hash_request_update()
1279 rc = cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt, in cc_map_hash_request_update()
1289 DMA_TO_DEVICE, &areq_ctx->in_nents, in cc_map_hash_request_update()
1295 areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) { in cc_map_hash_request_update()
1297 memcpy(areq_ctx->buff_sg, src, in cc_map_hash_request_update()
1299 areq_ctx->buff_sg->length = update_data_len; in cc_map_hash_request_update()
1300 areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI; in cc_map_hash_request_update()
1301 areq_ctx->curr_sg = areq_ctx->buff_sg; in cc_map_hash_request_update()
1303 areq_ctx->data_dma_buf_type = CC_DMA_BUF_MLLI; in cc_map_hash_request_update()
1307 if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_MLLI) { in cc_map_hash_request_update()
1310 cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src, in cc_map_hash_request_update()
1312 &areq_ctx->mlli_nents); in cc_map_hash_request_update()
1317 areq_ctx->buff_index = (areq_ctx->buff_index ^ swap_index); in cc_map_hash_request_update()
1322 dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE); in cc_map_hash_request_update()
1326 dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE); in cc_map_hash_request_update()
1334 struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx; in cc_unmap_hash_request() local
1335 u32 *prev_len = cc_next_buf_cnt(areq_ctx); in cc_unmap_hash_request()
1340 if (areq_ctx->mlli_params.curr_pool) { in cc_unmap_hash_request()
1342 &areq_ctx->mlli_params.mlli_dma_addr, in cc_unmap_hash_request()
1343 areq_ctx->mlli_params.mlli_virt_addr); in cc_unmap_hash_request()
1344 dma_pool_free(areq_ctx->mlli_params.curr_pool, in cc_unmap_hash_request()
1345 areq_ctx->mlli_params.mlli_virt_addr, in cc_unmap_hash_request()
1346 areq_ctx->mlli_params.mlli_dma_addr); in cc_unmap_hash_request()
1349 if (src && areq_ctx->in_nents) { in cc_unmap_hash_request()
1353 areq_ctx->in_nents, DMA_TO_DEVICE); in cc_unmap_hash_request()
1358 sg_virt(areq_ctx->buff_sg), in cc_unmap_hash_request()
1359 &sg_dma_address(areq_ctx->buff_sg), in cc_unmap_hash_request()
1360 sg_dma_len(areq_ctx->buff_sg)); in cc_unmap_hash_request()
1361 dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE); in cc_unmap_hash_request()
1368 areq_ctx->buff_index ^= 1; in cc_unmap_hash_request()