Lines Matching refs:jrdev
71 struct device *jrdev; member
81 static inline void rng_unmap_buf(struct device *jrdev, struct buf_data *bd) in rng_unmap_buf() argument
84 dma_unmap_single(jrdev, bd->addr, RN_BUF_SIZE, in rng_unmap_buf()
90 struct device *jrdev = ctx->jrdev; in rng_unmap_ctx() local
93 dma_unmap_single(jrdev, ctx->sh_desc_dma, in rng_unmap_ctx()
95 rng_unmap_buf(jrdev, &ctx->bufs[0]); in rng_unmap_ctx()
96 rng_unmap_buf(jrdev, &ctx->bufs[1]); in rng_unmap_ctx()
99 static void rng_done(struct device *jrdev, u32 *desc, u32 err, void *context) in rng_done() argument
106 caam_jr_strstatus(jrdev, err); in rng_done()
112 dma_sync_single_for_cpu(jrdev, bd->addr, RN_BUF_SIZE, DMA_FROM_DEVICE); in rng_done()
123 struct device *jrdev = ctx->jrdev; in submit_job() local
127 dev_dbg(jrdev, "submitting job %d\n", !(to_current ^ ctx->current_buf)); in submit_job()
129 err = caam_jr_enqueue(jrdev, desc, rng_done, ctx); in submit_job()
163 dev_dbg(ctx->jrdev, "%s: start reading at buffer %d, idx %d\n", in caam_read()
184 dev_dbg(ctx->jrdev, "switched to buffer %d\n", ctx->current_buf); in caam_read()
193 struct device *jrdev = ctx->jrdev; in rng_create_sh_desc() local
204 ctx->sh_desc_dma = dma_map_single(jrdev, desc, desc_bytes(desc), in rng_create_sh_desc()
206 if (dma_mapping_error(jrdev, ctx->sh_desc_dma)) { in rng_create_sh_desc()
207 dev_err(jrdev, "unable to map shared descriptor\n"); in rng_create_sh_desc()
219 struct device *jrdev = ctx->jrdev; in rng_create_job_desc() local
227 bd->addr = dma_map_single(jrdev, bd->buf, RN_BUF_SIZE, DMA_FROM_DEVICE); in rng_create_job_desc()
228 if (dma_mapping_error(jrdev, bd->addr)) { in rng_create_job_desc()
229 dev_err(jrdev, "unable to map dst\n"); in rng_create_job_desc()
271 static int caam_init_rng(struct caam_rng_ctx *ctx, struct device *jrdev) in caam_init_rng() argument
275 ctx->jrdev = jrdev; in caam_init_rng()
299 caam_jr_free(rng_ctx->jrdev); in caam_rng_exit()