Lines Matching refs:edesc

124 	struct ahash_edesc *edesc;  member
552 struct ahash_edesc *edesc, in ahash_unmap() argument
557 if (edesc->src_nents) in ahash_unmap()
558 dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE); in ahash_unmap()
560 if (edesc->sec4_sg_bytes) in ahash_unmap()
561 dma_unmap_single(dev, edesc->sec4_sg_dma, in ahash_unmap()
562 edesc->sec4_sg_bytes, DMA_TO_DEVICE); in ahash_unmap()
572 struct ahash_edesc *edesc, in ahash_unmap_ctx() argument
581 ahash_unmap(dev, edesc, req, dst_len); in ahash_unmap_ctx()
589 struct ahash_edesc *edesc; in ahash_done_cpy() local
599 edesc = state->edesc; in ahash_done_cpy()
600 has_bklog = edesc->bklog; in ahash_done_cpy()
605 ahash_unmap_ctx(jrdev, edesc, req, digestsize, dir); in ahash_done_cpy()
607 kfree(edesc); in ahash_done_cpy()
640 struct ahash_edesc *edesc; in ahash_done_switch() local
650 edesc = state->edesc; in ahash_done_switch()
651 has_bklog = edesc->bklog; in ahash_done_switch()
655 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, dir); in ahash_done_switch()
656 kfree(edesc); in ahash_done_switch()
709 struct ahash_edesc *edesc; in ahash_edesc_alloc() local
711 edesc = kzalloc(struct_size(edesc, sec4_sg, sg_num), flags); in ahash_edesc_alloc()
712 if (!edesc) in ahash_edesc_alloc()
715 state->edesc = edesc; in ahash_edesc_alloc()
717 init_job_desc_shared(edesc->hw_desc, sh_desc_dma, desc_len(sh_desc), in ahash_edesc_alloc()
720 return edesc; in ahash_edesc_alloc()
724 struct ahash_edesc *edesc, in ahash_edesc_add_src() argument
733 struct sec4_sg_entry *sg = edesc->sec4_sg; in ahash_edesc_add_src()
745 edesc->sec4_sg_bytes = sgsize; in ahash_edesc_add_src()
746 edesc->sec4_sg_dma = src_dma; in ahash_edesc_add_src()
753 append_seq_in_ptr(edesc->hw_desc, src_dma, first_bytes + to_hash, in ahash_edesc_add_src()
765 u32 *desc = state->edesc->hw_desc; in ahash_do_one_req()
768 state->edesc->bklog = true; in ahash_do_one_req()
776 ahash_unmap(jrdev, state->edesc, req, 0); in ahash_do_one_req()
777 kfree(state->edesc); in ahash_do_one_req()
793 struct ahash_edesc *edesc = state->edesc; in ahash_enqueue_req() local
794 u32 *desc = edesc->hw_desc; in ahash_enqueue_req()
811 ahash_unmap_ctx(jrdev, edesc, req, dst_len, dir); in ahash_enqueue_req()
812 kfree(edesc); in ahash_enqueue_req()
832 struct ahash_edesc *edesc; in ahash_update_ctx() local
878 edesc = ahash_edesc_alloc(req, pad_nents, ctx->sh_desc_update, in ahash_update_ctx()
880 if (!edesc) { in ahash_update_ctx()
885 edesc->src_nents = src_nents; in ahash_update_ctx()
886 edesc->sec4_sg_bytes = sec4_sg_bytes; in ahash_update_ctx()
889 edesc->sec4_sg, DMA_BIDIRECTIONAL); in ahash_update_ctx()
893 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state); in ahash_update_ctx()
899 edesc->sec4_sg + sec4_sg_src_index, in ahash_update_ctx()
902 sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index - in ahash_update_ctx()
905 desc = edesc->hw_desc; in ahash_update_ctx()
907 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, in ahash_update_ctx()
910 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { in ahash_update_ctx()
916 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + in ahash_update_ctx()
939 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL); in ahash_update_ctx()
940 kfree(edesc); in ahash_update_ctx()
954 struct ahash_edesc *edesc; in ahash_final_ctx() local
961 edesc = ahash_edesc_alloc(req, 4, ctx->sh_desc_fin, in ahash_final_ctx()
963 if (!edesc) in ahash_final_ctx()
966 desc = edesc->hw_desc; in ahash_final_ctx()
968 edesc->sec4_sg_bytes = sec4_sg_bytes; in ahash_final_ctx()
971 edesc->sec4_sg, DMA_BIDIRECTIONAL); in ahash_final_ctx()
975 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state); in ahash_final_ctx()
979 sg_to_sec4_set_last(edesc->sec4_sg + (buflen ? 1 : 0)); in ahash_final_ctx()
981 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, in ahash_final_ctx()
983 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { in ahash_final_ctx()
989 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen, in ahash_final_ctx()
1000 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL); in ahash_final_ctx()
1001 kfree(edesc); in ahash_final_ctx()
1016 struct ahash_edesc *edesc; in ahash_finup_ctx() local
1039 edesc = ahash_edesc_alloc(req, sec4_sg_src_index + mapped_nents, in ahash_finup_ctx()
1041 if (!edesc) { in ahash_finup_ctx()
1046 desc = edesc->hw_desc; in ahash_finup_ctx()
1048 edesc->src_nents = src_nents; in ahash_finup_ctx()
1051 edesc->sec4_sg, DMA_BIDIRECTIONAL); in ahash_finup_ctx()
1055 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state); in ahash_finup_ctx()
1059 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, in ahash_finup_ctx()
1074 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL); in ahash_finup_ctx()
1075 kfree(edesc); in ahash_finup_ctx()
1088 struct ahash_edesc *edesc; in ahash_digest() local
1111 edesc = ahash_edesc_alloc(req, mapped_nents > 1 ? mapped_nents : 0, in ahash_digest()
1113 if (!edesc) { in ahash_digest()
1118 edesc->src_nents = src_nents; in ahash_digest()
1120 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0, in ahash_digest()
1123 ahash_unmap(jrdev, edesc, req, digestsize); in ahash_digest()
1124 kfree(edesc); in ahash_digest()
1128 desc = edesc->hw_desc; in ahash_digest()
1132 ahash_unmap(jrdev, edesc, req, digestsize); in ahash_digest()
1133 kfree(edesc); in ahash_digest()
1156 struct ahash_edesc *edesc; in ahash_final_no_ctx() local
1160 edesc = ahash_edesc_alloc(req, 0, ctx->sh_desc_digest, in ahash_final_no_ctx()
1162 if (!edesc) in ahash_final_no_ctx()
1165 desc = edesc->hw_desc; in ahash_final_no_ctx()
1189 ahash_unmap(jrdev, edesc, req, digestsize); in ahash_final_no_ctx()
1190 kfree(edesc); in ahash_final_no_ctx()
1207 struct ahash_edesc *edesc; in ahash_update_no_ctx() local
1253 edesc = ahash_edesc_alloc(req, pad_nents, in ahash_update_no_ctx()
1256 if (!edesc) { in ahash_update_no_ctx()
1261 edesc->src_nents = src_nents; in ahash_update_no_ctx()
1262 edesc->sec4_sg_bytes = sec4_sg_bytes; in ahash_update_no_ctx()
1264 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state); in ahash_update_no_ctx()
1268 sg_to_sec4_sg_last(req->src, src_len, edesc->sec4_sg + 1, 0); in ahash_update_no_ctx()
1270 desc = edesc->hw_desc; in ahash_update_no_ctx()
1272 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, in ahash_update_no_ctx()
1275 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { in ahash_update_no_ctx()
1281 append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF); in ahash_update_no_ctx()
1310 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE); in ahash_update_no_ctx()
1311 kfree(edesc); in ahash_update_no_ctx()
1326 struct ahash_edesc *edesc; in ahash_finup_no_ctx() local
1351 edesc = ahash_edesc_alloc(req, sec4_sg_src_index + mapped_nents, in ahash_finup_no_ctx()
1353 if (!edesc) { in ahash_finup_no_ctx()
1358 desc = edesc->hw_desc; in ahash_finup_no_ctx()
1360 edesc->src_nents = src_nents; in ahash_finup_no_ctx()
1361 edesc->sec4_sg_bytes = sec4_sg_bytes; in ahash_finup_no_ctx()
1363 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state); in ahash_finup_no_ctx()
1367 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 1, buflen, in ahash_finup_no_ctx()
1385 ahash_unmap(jrdev, edesc, req, digestsize); in ahash_finup_no_ctx()
1386 kfree(edesc); in ahash_finup_no_ctx()
1405 struct ahash_edesc *edesc; in ahash_update_first() local
1445 edesc = ahash_edesc_alloc(req, mapped_nents > 1 ? in ahash_update_first()
1449 if (!edesc) { in ahash_update_first()
1454 edesc->src_nents = src_nents; in ahash_update_first()
1456 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0, in ahash_update_first()
1461 desc = edesc->hw_desc; in ahash_update_first()
1493 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE); in ahash_update_first()
1494 kfree(edesc); in ahash_update_first()