Lines Matching refs:qidev

69 	struct device *qidev;  member
854 drv_ctx = caam_drv_ctx_init(ctx->qidev, &cpu, desc); in get_drv_ctx()
914 struct device *qidev; in aead_done() local
921 qidev = caam_ctx->qidev; in aead_done()
924 ecode = caam_jr_strstatus(qidev, status); in aead_done()
927 aead_unmap(qidev, edesc, aead_req); in aead_done()
943 struct device *qidev = ctx->qidev; in aead_edesc_alloc() local
964 dev_err(qidev, "could not allocate extended descriptor\n"); in aead_edesc_alloc()
974 dev_err(qidev, "Insufficient bytes (%d) in src S/G\n", in aead_edesc_alloc()
980 mapped_src_nents = dma_map_sg(qidev, req->src, src_nents, in aead_edesc_alloc()
983 dev_err(qidev, "unable to map source\n"); in aead_edesc_alloc()
993 dev_err(qidev, "Insufficient bytes (%d) in src S/G\n", in aead_edesc_alloc()
1001 dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n", in aead_edesc_alloc()
1008 mapped_src_nents = dma_map_sg(qidev, req->src, in aead_edesc_alloc()
1011 dev_err(qidev, "unable to map source\n"); in aead_edesc_alloc()
1020 mapped_dst_nents = dma_map_sg(qidev, req->dst, in aead_edesc_alloc()
1024 dev_err(qidev, "unable to map destination\n"); in aead_edesc_alloc()
1025 dma_unmap_sg(qidev, req->src, src_nents, in aead_edesc_alloc()
1063 dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n", in aead_edesc_alloc()
1065 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, in aead_edesc_alloc()
1077 iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE); in aead_edesc_alloc()
1078 if (dma_mapping_error(qidev, iv_dma)) { in aead_edesc_alloc()
1079 dev_err(qidev, "unable to map IV\n"); in aead_edesc_alloc()
1080 caam_unmap(qidev, req->src, req->dst, src_nents, in aead_edesc_alloc()
1095 edesc->assoclen_dma = dma_map_single(qidev, &edesc->assoclen, 4, in aead_edesc_alloc()
1097 if (dma_mapping_error(qidev, edesc->assoclen_dma)) { in aead_edesc_alloc()
1098 dev_err(qidev, "unable to map assoclen\n"); in aead_edesc_alloc()
1099 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, in aead_edesc_alloc()
1117 qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE); in aead_edesc_alloc()
1118 if (dma_mapping_error(qidev, qm_sg_dma)) { in aead_edesc_alloc()
1119 dev_err(qidev, "unable to map S/G table\n"); in aead_edesc_alloc()
1120 dma_unmap_single(qidev, edesc->assoclen_dma, 4, DMA_TO_DEVICE); in aead_edesc_alloc()
1121 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, in aead_edesc_alloc()
1172 ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req); in aead_crypt()
1176 aead_unmap(ctx->qidev, edesc, req); in aead_crypt()
1211 struct device *qidev = caam_ctx->qidev; in skcipher_done() local
1215 dev_dbg(qidev, "%s %d: status 0x%x\n", __func__, __LINE__, status); in skcipher_done()
1220 ecode = caam_jr_strstatus(qidev, status); in skcipher_done()
1229 skcipher_unmap(qidev, edesc, req); in skcipher_done()
1249 struct device *qidev = ctx->qidev; in skcipher_edesc_alloc() local
1267 dev_err(qidev, "Insufficient bytes (%d) in src S/G\n", in skcipher_edesc_alloc()
1275 dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n", in skcipher_edesc_alloc()
1280 mapped_src_nents = dma_map_sg(qidev, req->src, src_nents, in skcipher_edesc_alloc()
1283 dev_err(qidev, "unable to map source\n"); in skcipher_edesc_alloc()
1287 mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents, in skcipher_edesc_alloc()
1290 dev_err(qidev, "unable to map destination\n"); in skcipher_edesc_alloc()
1291 dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE); in skcipher_edesc_alloc()
1295 mapped_src_nents = dma_map_sg(qidev, req->src, src_nents, in skcipher_edesc_alloc()
1298 dev_err(qidev, "unable to map source\n"); in skcipher_edesc_alloc()
1322 dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n", in skcipher_edesc_alloc()
1324 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, in skcipher_edesc_alloc()
1332 dev_err(qidev, "could not allocate extended descriptor\n"); in skcipher_edesc_alloc()
1333 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, in skcipher_edesc_alloc()
1343 iv_dma = dma_map_single(qidev, iv, ivsize, DMA_BIDIRECTIONAL); in skcipher_edesc_alloc()
1344 if (dma_mapping_error(qidev, iv_dma)) { in skcipher_edesc_alloc()
1345 dev_err(qidev, "unable to map IV\n"); in skcipher_edesc_alloc()
1346 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, in skcipher_edesc_alloc()
1369 edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes, in skcipher_edesc_alloc()
1371 if (dma_mapping_error(qidev, edesc->qm_sg_dma)) { in skcipher_edesc_alloc()
1372 dev_err(qidev, "unable to map S/G table\n"); in skcipher_edesc_alloc()
1373 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, in skcipher_edesc_alloc()
1444 ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req); in skcipher_crypt()
1448 skcipher_unmap(ctx->qidev, edesc, req); in skcipher_crypt()
2480 ctx->qidev = dev; in caam_init_common()