Lines Matching refs:edesc

43 static void rsa_io_unmap(struct device *dev, struct rsa_edesc *edesc,  in rsa_io_unmap()  argument
48 dma_unmap_sg(dev, req->dst, edesc->dst_nents, DMA_FROM_DEVICE); in rsa_io_unmap()
49 dma_unmap_sg(dev, req_ctx->fixup_src, edesc->src_nents, DMA_TO_DEVICE); in rsa_io_unmap()
51 if (edesc->sec4_sg_bytes) in rsa_io_unmap()
52 dma_unmap_single(dev, edesc->sec4_sg_dma, edesc->sec4_sg_bytes, in rsa_io_unmap()
56 static void rsa_pub_unmap(struct device *dev, struct rsa_edesc *edesc, in rsa_pub_unmap() argument
62 struct rsa_pub_pdb *pdb = &edesc->pdb.pub; in rsa_pub_unmap()
68 static void rsa_priv_f1_unmap(struct device *dev, struct rsa_edesc *edesc, in rsa_priv_f1_unmap() argument
74 struct rsa_priv_f1_pdb *pdb = &edesc->pdb.priv_f1; in rsa_priv_f1_unmap()
80 static void rsa_priv_f2_unmap(struct device *dev, struct rsa_edesc *edesc, in rsa_priv_f2_unmap() argument
86 struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2; in rsa_priv_f2_unmap()
97 static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc, in rsa_priv_f3_unmap() argument
103 struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3; in rsa_priv_f3_unmap()
120 struct rsa_edesc *edesc; in rsa_pub_done() local
126 edesc = container_of(desc, struct rsa_edesc, hw_desc[0]); in rsa_pub_done()
128 rsa_pub_unmap(dev, edesc, req); in rsa_pub_done()
129 rsa_io_unmap(dev, edesc, req); in rsa_pub_done()
130 kfree(edesc); in rsa_pub_done()
139 struct rsa_edesc *edesc; in rsa_priv_f1_done() local
145 edesc = container_of(desc, struct rsa_edesc, hw_desc[0]); in rsa_priv_f1_done()
147 rsa_priv_f1_unmap(dev, edesc, req); in rsa_priv_f1_done()
148 rsa_io_unmap(dev, edesc, req); in rsa_priv_f1_done()
149 kfree(edesc); in rsa_priv_f1_done()
158 struct rsa_edesc *edesc; in rsa_priv_f2_done() local
164 edesc = container_of(desc, struct rsa_edesc, hw_desc[0]); in rsa_priv_f2_done()
166 rsa_priv_f2_unmap(dev, edesc, req); in rsa_priv_f2_done()
167 rsa_io_unmap(dev, edesc, req); in rsa_priv_f2_done()
168 kfree(edesc); in rsa_priv_f2_done()
177 struct rsa_edesc *edesc; in rsa_priv_f3_done() local
183 edesc = container_of(desc, struct rsa_edesc, hw_desc[0]); in rsa_priv_f3_done()
185 rsa_priv_f3_unmap(dev, edesc, req); in rsa_priv_f3_done()
186 rsa_io_unmap(dev, edesc, req); in rsa_priv_f3_done()
187 kfree(edesc); in rsa_priv_f3_done()
251 struct rsa_edesc *edesc; in rsa_edesc_alloc() local
301 edesc = kzalloc(sizeof(*edesc) + desclen + sec4_sg_bytes, in rsa_edesc_alloc()
303 if (!edesc) in rsa_edesc_alloc()
318 edesc->sec4_sg = (void *)edesc + sizeof(*edesc) + desclen; in rsa_edesc_alloc()
320 dma_to_sec4_sg_one(edesc->sec4_sg, ctx->padding_dma, diff_size, in rsa_edesc_alloc()
325 edesc->sec4_sg + !!diff_size, 0); in rsa_edesc_alloc()
329 edesc->sec4_sg + sec4_sg_index, 0); in rsa_edesc_alloc()
332 edesc->src_nents = src_nents; in rsa_edesc_alloc()
333 edesc->dst_nents = dst_nents; in rsa_edesc_alloc()
336 return edesc; in rsa_edesc_alloc()
338 edesc->sec4_sg_dma = dma_map_single(dev, edesc->sec4_sg, in rsa_edesc_alloc()
340 if (dma_mapping_error(dev, edesc->sec4_sg_dma)) { in rsa_edesc_alloc()
345 edesc->sec4_sg_bytes = sec4_sg_bytes; in rsa_edesc_alloc()
348 DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg, in rsa_edesc_alloc()
349 edesc->sec4_sg_bytes, 1); in rsa_edesc_alloc()
351 return edesc; in rsa_edesc_alloc()
358 kfree(edesc); in rsa_edesc_alloc()
363 struct rsa_edesc *edesc) in set_rsa_pub_pdb() argument
370 struct rsa_pub_pdb *pdb = &edesc->pdb.pub; in set_rsa_pub_pdb()
386 if (edesc->src_nents > 1) { in set_rsa_pub_pdb()
388 pdb->f_dma = edesc->sec4_sg_dma; in set_rsa_pub_pdb()
389 sec4_sg_index += edesc->src_nents; in set_rsa_pub_pdb()
394 if (edesc->dst_nents > 1) { in set_rsa_pub_pdb()
396 pdb->g_dma = edesc->sec4_sg_dma + in set_rsa_pub_pdb()
409 struct rsa_edesc *edesc) in set_rsa_priv_f1_pdb() argument
415 struct rsa_priv_f1_pdb *pdb = &edesc->pdb.priv_f1; in set_rsa_priv_f1_pdb()
431 if (edesc->src_nents > 1) { in set_rsa_priv_f1_pdb()
433 pdb->g_dma = edesc->sec4_sg_dma; in set_rsa_priv_f1_pdb()
434 sec4_sg_index += edesc->src_nents; in set_rsa_priv_f1_pdb()
441 if (edesc->dst_nents > 1) { in set_rsa_priv_f1_pdb()
443 pdb->f_dma = edesc->sec4_sg_dma + in set_rsa_priv_f1_pdb()
455 struct rsa_edesc *edesc) in set_rsa_priv_f2_pdb() argument
461 struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2; in set_rsa_priv_f2_pdb()
496 if (edesc->src_nents > 1) { in set_rsa_priv_f2_pdb()
498 pdb->g_dma = edesc->sec4_sg_dma; in set_rsa_priv_f2_pdb()
499 sec4_sg_index += edesc->src_nents; in set_rsa_priv_f2_pdb()
506 if (edesc->dst_nents > 1) { in set_rsa_priv_f2_pdb()
508 pdb->f_dma = edesc->sec4_sg_dma + in set_rsa_priv_f2_pdb()
532 struct rsa_edesc *edesc) in set_rsa_priv_f3_pdb() argument
538 struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3; in set_rsa_priv_f3_pdb()
585 if (edesc->src_nents > 1) { in set_rsa_priv_f3_pdb()
587 pdb->g_dma = edesc->sec4_sg_dma; in set_rsa_priv_f3_pdb()
588 sec4_sg_index += edesc->src_nents; in set_rsa_priv_f3_pdb()
595 if (edesc->dst_nents > 1) { in set_rsa_priv_f3_pdb()
597 pdb->f_dma = edesc->sec4_sg_dma + in set_rsa_priv_f3_pdb()
630 struct rsa_edesc *edesc; in caam_rsa_enc() local
643 edesc = rsa_edesc_alloc(req, DESC_RSA_PUB_LEN); in caam_rsa_enc()
644 if (IS_ERR(edesc)) in caam_rsa_enc()
645 return PTR_ERR(edesc); in caam_rsa_enc()
648 ret = set_rsa_pub_pdb(req, edesc); in caam_rsa_enc()
653 init_rsa_pub_desc(edesc->hw_desc, &edesc->pdb.pub); in caam_rsa_enc()
655 ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_pub_done, req); in caam_rsa_enc()
659 rsa_pub_unmap(jrdev, edesc, req); in caam_rsa_enc()
662 rsa_io_unmap(jrdev, edesc, req); in caam_rsa_enc()
663 kfree(edesc); in caam_rsa_enc()
672 struct rsa_edesc *edesc; in caam_rsa_dec_priv_f1() local
676 edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F1_LEN); in caam_rsa_dec_priv_f1()
677 if (IS_ERR(edesc)) in caam_rsa_dec_priv_f1()
678 return PTR_ERR(edesc); in caam_rsa_dec_priv_f1()
681 ret = set_rsa_priv_f1_pdb(req, edesc); in caam_rsa_dec_priv_f1()
686 init_rsa_priv_f1_desc(edesc->hw_desc, &edesc->pdb.priv_f1); in caam_rsa_dec_priv_f1()
688 ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f1_done, req); in caam_rsa_dec_priv_f1()
692 rsa_priv_f1_unmap(jrdev, edesc, req); in caam_rsa_dec_priv_f1()
695 rsa_io_unmap(jrdev, edesc, req); in caam_rsa_dec_priv_f1()
696 kfree(edesc); in caam_rsa_dec_priv_f1()
705 struct rsa_edesc *edesc; in caam_rsa_dec_priv_f2() local
709 edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F2_LEN); in caam_rsa_dec_priv_f2()
710 if (IS_ERR(edesc)) in caam_rsa_dec_priv_f2()
711 return PTR_ERR(edesc); in caam_rsa_dec_priv_f2()
714 ret = set_rsa_priv_f2_pdb(req, edesc); in caam_rsa_dec_priv_f2()
719 init_rsa_priv_f2_desc(edesc->hw_desc, &edesc->pdb.priv_f2); in caam_rsa_dec_priv_f2()
721 ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f2_done, req); in caam_rsa_dec_priv_f2()
725 rsa_priv_f2_unmap(jrdev, edesc, req); in caam_rsa_dec_priv_f2()
728 rsa_io_unmap(jrdev, edesc, req); in caam_rsa_dec_priv_f2()
729 kfree(edesc); in caam_rsa_dec_priv_f2()
738 struct rsa_edesc *edesc; in caam_rsa_dec_priv_f3() local
742 edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F3_LEN); in caam_rsa_dec_priv_f3()
743 if (IS_ERR(edesc)) in caam_rsa_dec_priv_f3()
744 return PTR_ERR(edesc); in caam_rsa_dec_priv_f3()
747 ret = set_rsa_priv_f3_pdb(req, edesc); in caam_rsa_dec_priv_f3()
752 init_rsa_priv_f3_desc(edesc->hw_desc, &edesc->pdb.priv_f3); in caam_rsa_dec_priv_f3()
754 ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f3_done, req); in caam_rsa_dec_priv_f3()
758 rsa_priv_f3_unmap(jrdev, edesc, req); in caam_rsa_dec_priv_f3()
761 rsa_io_unmap(jrdev, edesc, req); in caam_rsa_dec_priv_f3()
762 kfree(edesc); in caam_rsa_dec_priv_f3()