Lines Matching refs:qidev
60 struct device *qidev; member
790 drv_ctx = caam_drv_ctx_init(ctx->qidev, &cpu, desc); in get_drv_ctx()
852 struct device *qidev; in aead_done() local
859 qidev = caam_ctx->qidev; in aead_done()
865 caam_jr_strstatus(qidev, status); in aead_done()
877 aead_unmap(qidev, edesc, aead_req); in aead_done()
893 struct device *qidev = ctx->qidev; in aead_edesc_alloc() local
914 dev_err(qidev, "could not allocate extended descriptor\n"); in aead_edesc_alloc()
923 dev_err(qidev, "Insufficient bytes (%d) in src S/G\n", in aead_edesc_alloc()
930 mapped_src_nents = dma_map_sg(qidev, req->src, src_nents, in aead_edesc_alloc()
933 dev_err(qidev, "unable to map source\n"); in aead_edesc_alloc()
941 dev_err(qidev, "Insufficient bytes (%d) in src S/G\n", in aead_edesc_alloc()
952 dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n", in aead_edesc_alloc()
960 mapped_src_nents = dma_map_sg(qidev, req->src, in aead_edesc_alloc()
963 dev_err(qidev, "unable to map source\n"); in aead_edesc_alloc()
971 mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents, in aead_edesc_alloc()
974 dev_err(qidev, "unable to map destination\n"); in aead_edesc_alloc()
975 dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE); in aead_edesc_alloc()
994 dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n", in aead_edesc_alloc()
996 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, in aead_edesc_alloc()
1008 iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE); in aead_edesc_alloc()
1009 if (dma_mapping_error(qidev, iv_dma)) { in aead_edesc_alloc()
1010 dev_err(qidev, "unable to map IV\n"); in aead_edesc_alloc()
1011 caam_unmap(qidev, req->src, req->dst, src_nents, in aead_edesc_alloc()
1026 edesc->assoclen_dma = dma_map_single(qidev, &edesc->assoclen, 4, in aead_edesc_alloc()
1028 if (dma_mapping_error(qidev, edesc->assoclen_dma)) { in aead_edesc_alloc()
1029 dev_err(qidev, "unable to map assoclen\n"); in aead_edesc_alloc()
1030 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, in aead_edesc_alloc()
1049 qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE); in aead_edesc_alloc()
1050 if (dma_mapping_error(qidev, qm_sg_dma)) { in aead_edesc_alloc()
1051 dev_err(qidev, "unable to map S/G table\n"); in aead_edesc_alloc()
1052 dma_unmap_single(qidev, edesc->assoclen_dma, 4, DMA_TO_DEVICE); in aead_edesc_alloc()
1053 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, in aead_edesc_alloc()
1104 ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req); in aead_crypt()
1108 aead_unmap(ctx->qidev, edesc, req); in aead_crypt()
1147 struct device *qidev = caam_ctx->qidev; in ablkcipher_done() local
1151 dev_err(qidev, "%s %d: status 0x%x\n", __func__, __LINE__, status); in ablkcipher_done()
1157 caam_jr_strstatus(qidev, status); in ablkcipher_done()
1168 ablkcipher_unmap(qidev, edesc, req); in ablkcipher_done()
1198 struct device *qidev = ctx->qidev; in ablkcipher_edesc_alloc() local
1217 dev_err(qidev, "Insufficient bytes (%d) in src S/G\n", in ablkcipher_edesc_alloc()
1225 dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n", in ablkcipher_edesc_alloc()
1230 mapped_src_nents = dma_map_sg(qidev, req->src, src_nents, in ablkcipher_edesc_alloc()
1233 dev_err(qidev, "unable to map source\n"); in ablkcipher_edesc_alloc()
1237 mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents, in ablkcipher_edesc_alloc()
1240 dev_err(qidev, "unable to map destination\n"); in ablkcipher_edesc_alloc()
1241 dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE); in ablkcipher_edesc_alloc()
1245 mapped_src_nents = dma_map_sg(qidev, req->src, src_nents, in ablkcipher_edesc_alloc()
1248 dev_err(qidev, "unable to map source\n"); in ablkcipher_edesc_alloc()
1260 dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n", in ablkcipher_edesc_alloc()
1262 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, in ablkcipher_edesc_alloc()
1270 dev_err(qidev, "could not allocate extended descriptor\n"); in ablkcipher_edesc_alloc()
1271 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, in ablkcipher_edesc_alloc()
1281 iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE); in ablkcipher_edesc_alloc()
1282 if (dma_mapping_error(qidev, iv_dma)) { in ablkcipher_edesc_alloc()
1283 dev_err(qidev, "unable to map IV\n"); in ablkcipher_edesc_alloc()
1284 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, in ablkcipher_edesc_alloc()
1305 edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes, in ablkcipher_edesc_alloc()
1307 if (dma_mapping_error(qidev, edesc->qm_sg_dma)) { in ablkcipher_edesc_alloc()
1308 dev_err(qidev, "unable to map S/G table\n"); in ablkcipher_edesc_alloc()
1309 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, in ablkcipher_edesc_alloc()
1340 struct device *qidev = ctx->qidev; in ablkcipher_giv_edesc_alloc() local
1358 dev_err(qidev, "Insufficient bytes (%d) in src S/G\n", in ablkcipher_giv_edesc_alloc()
1366 dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n", in ablkcipher_giv_edesc_alloc()
1371 mapped_src_nents = dma_map_sg(qidev, req->src, src_nents, in ablkcipher_giv_edesc_alloc()
1374 dev_err(qidev, "unable to map source\n"); in ablkcipher_giv_edesc_alloc()
1378 mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents, in ablkcipher_giv_edesc_alloc()
1381 dev_err(qidev, "unable to map destination\n"); in ablkcipher_giv_edesc_alloc()
1382 dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE); in ablkcipher_giv_edesc_alloc()
1386 mapped_src_nents = dma_map_sg(qidev, req->src, src_nents, in ablkcipher_giv_edesc_alloc()
1389 dev_err(qidev, "unable to map source\n"); in ablkcipher_giv_edesc_alloc()
1404 dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n", in ablkcipher_giv_edesc_alloc()
1406 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, in ablkcipher_giv_edesc_alloc()
1414 dev_err(qidev, "could not allocate extended descriptor\n"); in ablkcipher_giv_edesc_alloc()
1415 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, in ablkcipher_giv_edesc_alloc()
1423 iv_dma = dma_map_single(qidev, iv, ivsize, DMA_FROM_DEVICE); in ablkcipher_giv_edesc_alloc()
1424 if (dma_mapping_error(qidev, iv_dma)) { in ablkcipher_giv_edesc_alloc()
1425 dev_err(qidev, "unable to map IV\n"); in ablkcipher_giv_edesc_alloc()
1426 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, in ablkcipher_giv_edesc_alloc()
1447 edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes, in ablkcipher_giv_edesc_alloc()
1449 if (dma_mapping_error(qidev, edesc->qm_sg_dma)) { in ablkcipher_giv_edesc_alloc()
1450 dev_err(qidev, "unable to map S/G table\n"); in ablkcipher_giv_edesc_alloc()
1451 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, in ablkcipher_giv_edesc_alloc()
1496 ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req); in ablkcipher_crypt()
1500 ablkcipher_unmap(ctx->qidev, edesc, req); in ablkcipher_crypt()
1533 ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req); in ablkcipher_givencrypt()
1537 ablkcipher_unmap(ctx->qidev, edesc, req); in ablkcipher_givencrypt()
2570 ctx->qidev = priv->qidev; in caam_init_common()
2771 dev_warn(priv->qidev, "%s alg allocation failed\n", in caam_qi_algapi_init()
2778 dev_warn(priv->qidev, "%s alg registration failed\n", in caam_qi_algapi_init()
2836 dev_info(priv->qidev, "algorithms registered in /proc/crypto\n"); in caam_qi_algapi_init()