Lines Matching +full:imx23 +full:- +full:dcp

1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Freescale i.MX23/i.MX28 Data Co-Processor driver
8 #include <linux/dma-mapping.h>
46 /* DCP DMA descriptor. */
70 struct dcp { struct
95 /* SHA Hash-specific context */
100 /* Crypto-specific context */
123 * There can even be only one instance of the MXS DCP due to the
126 static struct dcp *global_sdcp;
128 /* DCP register layout. */
174 struct dcp *sdcp = global_sdcp; in mxs_dcp_start_dma()
175 const int chan = actx->chan; in mxs_dcp_start_dma()
178 struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan]; in mxs_dcp_start_dma()
179 dma_addr_t desc_phys = dma_map_single(sdcp->dev, desc, sizeof(*desc), in mxs_dcp_start_dma()
182 dma_err = dma_mapping_error(sdcp->dev, desc_phys); in mxs_dcp_start_dma()
186 reinit_completion(&sdcp->completion[chan]); in mxs_dcp_start_dma()
189 writel(0xffffffff, sdcp->base + MXS_DCP_CH_N_STAT_CLR(chan)); in mxs_dcp_start_dma()
192 writel(desc_phys, sdcp->base + MXS_DCP_CH_N_CMDPTR(chan)); in mxs_dcp_start_dma()
195 writel(1, sdcp->base + MXS_DCP_CH_N_SEMA(chan)); in mxs_dcp_start_dma()
197 ret = wait_for_completion_timeout(&sdcp->completion[chan], in mxs_dcp_start_dma()
200 dev_err(sdcp->dev, "Channel %i timeout (DCP_STAT=0x%08x)\n", in mxs_dcp_start_dma()
201 chan, readl(sdcp->base + MXS_DCP_STAT)); in mxs_dcp_start_dma()
202 return -ETIMEDOUT; in mxs_dcp_start_dma()
205 stat = readl(sdcp->base + MXS_DCP_CH_N_STAT(chan)); in mxs_dcp_start_dma()
207 dev_err(sdcp->dev, "Channel %i error (CH_STAT=0x%08x)\n", in mxs_dcp_start_dma()
209 return -EINVAL; in mxs_dcp_start_dma()
212 dma_unmap_single(sdcp->dev, desc_phys, sizeof(*desc), DMA_TO_DEVICE); in mxs_dcp_start_dma()
224 struct dcp *sdcp = global_sdcp; in mxs_dcp_run_aes()
225 struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan]; in mxs_dcp_run_aes()
229 key_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_key, in mxs_dcp_run_aes()
231 ret = dma_mapping_error(sdcp->dev, key_phys); in mxs_dcp_run_aes()
235 src_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_in_buf, in mxs_dcp_run_aes()
237 ret = dma_mapping_error(sdcp->dev, src_phys); in mxs_dcp_run_aes()
241 dst_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_out_buf, in mxs_dcp_run_aes()
243 ret = dma_mapping_error(sdcp->dev, dst_phys); in mxs_dcp_run_aes()
247 if (actx->fill % AES_BLOCK_SIZE) { in mxs_dcp_run_aes()
248 dev_err(sdcp->dev, "Invalid block size!\n"); in mxs_dcp_run_aes()
249 ret = -EINVAL; in mxs_dcp_run_aes()
254 desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE | in mxs_dcp_run_aes()
259 desc->control0 |= MXS_DCP_CONTROL0_PAYLOAD_KEY; in mxs_dcp_run_aes()
261 if (rctx->enc) in mxs_dcp_run_aes()
262 desc->control0 |= MXS_DCP_CONTROL0_CIPHER_ENCRYPT; in mxs_dcp_run_aes()
264 desc->control0 |= MXS_DCP_CONTROL0_CIPHER_INIT; in mxs_dcp_run_aes()
266 desc->control1 = MXS_DCP_CONTROL1_CIPHER_SELECT_AES128; in mxs_dcp_run_aes()
268 if (rctx->ecb) in mxs_dcp_run_aes()
269 desc->control1 |= MXS_DCP_CONTROL1_CIPHER_MODE_ECB; in mxs_dcp_run_aes()
271 desc->control1 |= MXS_DCP_CONTROL1_CIPHER_MODE_CBC; in mxs_dcp_run_aes()
273 desc->next_cmd_addr = 0; in mxs_dcp_run_aes()
274 desc->source = src_phys; in mxs_dcp_run_aes()
275 desc->destination = dst_phys; in mxs_dcp_run_aes()
276 desc->size = actx->fill; in mxs_dcp_run_aes()
277 desc->payload = key_phys; in mxs_dcp_run_aes()
278 desc->status = 0; in mxs_dcp_run_aes()
283 dma_unmap_single(sdcp->dev, dst_phys, DCP_BUF_SZ, DMA_FROM_DEVICE); in mxs_dcp_run_aes()
285 dma_unmap_single(sdcp->dev, src_phys, DCP_BUF_SZ, DMA_TO_DEVICE); in mxs_dcp_run_aes()
287 dma_unmap_single(sdcp->dev, key_phys, 2 * AES_KEYSIZE_128, in mxs_dcp_run_aes()
295 struct dcp *sdcp = global_sdcp; in mxs_dcp_aes_block_crypt()
298 struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm); in mxs_dcp_aes_block_crypt()
301 struct scatterlist *dst = req->dst; in mxs_dcp_aes_block_crypt()
302 struct scatterlist *src = req->src; in mxs_dcp_aes_block_crypt()
306 uint8_t *in_buf = sdcp->coh->aes_in_buf; in mxs_dcp_aes_block_crypt()
307 uint8_t *out_buf = sdcp->coh->aes_out_buf; in mxs_dcp_aes_block_crypt()
313 uint8_t *key = sdcp->coh->aes_key; in mxs_dcp_aes_block_crypt()
320 actx->fill = 0; in mxs_dcp_aes_block_crypt()
323 memcpy(key, actx->key, actx->key_len); in mxs_dcp_aes_block_crypt()
325 if (!rctx->ecb) { in mxs_dcp_aes_block_crypt()
327 memcpy(key + AES_KEYSIZE_128, req->iv, AES_KEYSIZE_128); in mxs_dcp_aes_block_crypt()
334 for_each_sg(req->src, src, sg_nents(src), i) { in mxs_dcp_aes_block_crypt()
338 limit_hit = tlen > req->cryptlen; in mxs_dcp_aes_block_crypt()
341 len = req->cryptlen - (tlen - len); in mxs_dcp_aes_block_crypt()
344 if (actx->fill + len > out_off) in mxs_dcp_aes_block_crypt()
345 clen = out_off - actx->fill; in mxs_dcp_aes_block_crypt()
349 memcpy(in_buf + actx->fill, src_buf, clen); in mxs_dcp_aes_block_crypt()
350 len -= clen; in mxs_dcp_aes_block_crypt()
352 actx->fill += clen; in mxs_dcp_aes_block_crypt()
358 if (actx->fill == out_off || sg_is_last(src) || in mxs_dcp_aes_block_crypt()
366 actx->fill, dst_off); in mxs_dcp_aes_block_crypt()
367 dst_off += actx->fill; in mxs_dcp_aes_block_crypt()
368 last_out_len = actx->fill; in mxs_dcp_aes_block_crypt()
369 actx->fill = 0; in mxs_dcp_aes_block_crypt()
378 if (!rctx->ecb) { in mxs_dcp_aes_block_crypt()
379 if (rctx->enc) in mxs_dcp_aes_block_crypt()
380 memcpy(req->iv, out_buf+(last_out_len-AES_BLOCK_SIZE), in mxs_dcp_aes_block_crypt()
383 memcpy(req->iv, in_buf+(last_out_len-AES_BLOCK_SIZE), in mxs_dcp_aes_block_crypt()
392 struct dcp *sdcp = global_sdcp; in dcp_chan_thread_aes()
403 spin_lock(&sdcp->lock[chan]); in dcp_chan_thread_aes()
404 backlog = crypto_get_backlog(&sdcp->queue[chan]); in dcp_chan_thread_aes()
405 arq = crypto_dequeue_request(&sdcp->queue[chan]); in dcp_chan_thread_aes()
406 spin_unlock(&sdcp->lock[chan]); in dcp_chan_thread_aes()
416 backlog->complete(backlog, -EINPROGRESS); in dcp_chan_thread_aes()
420 arq->complete(arq, ret); in dcp_chan_thread_aes()
434 skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback); in mxs_dcp_block_fallback()
435 skcipher_request_set_callback(&rctx->fallback_req, req->base.flags, in mxs_dcp_block_fallback()
436 req->base.complete, req->base.data); in mxs_dcp_block_fallback()
437 skcipher_request_set_crypt(&rctx->fallback_req, req->src, req->dst, in mxs_dcp_block_fallback()
438 req->cryptlen, req->iv); in mxs_dcp_block_fallback()
441 ret = crypto_skcipher_encrypt(&rctx->fallback_req); in mxs_dcp_block_fallback()
443 ret = crypto_skcipher_decrypt(&rctx->fallback_req); in mxs_dcp_block_fallback()
450 struct dcp *sdcp = global_sdcp; in mxs_dcp_aes_enqueue()
451 struct crypto_async_request *arq = &req->base; in mxs_dcp_aes_enqueue()
452 struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm); in mxs_dcp_aes_enqueue()
456 if (unlikely(actx->key_len != AES_KEYSIZE_128)) in mxs_dcp_aes_enqueue()
459 rctx->enc = enc; in mxs_dcp_aes_enqueue()
460 rctx->ecb = ecb; in mxs_dcp_aes_enqueue()
461 actx->chan = DCP_CHAN_CRYPTO; in mxs_dcp_aes_enqueue()
463 spin_lock(&sdcp->lock[actx->chan]); in mxs_dcp_aes_enqueue()
464 ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base); in mxs_dcp_aes_enqueue()
465 spin_unlock(&sdcp->lock[actx->chan]); in mxs_dcp_aes_enqueue()
467 wake_up_process(sdcp->thread[actx->chan]); in mxs_dcp_aes_enqueue()
502 actx->key_len = len; in mxs_dcp_aes_setkey()
504 memcpy(actx->key, key, len); in mxs_dcp_aes_setkey()
510 * but is supported by in-kernel software implementation, we use in mxs_dcp_aes_setkey()
513 crypto_skcipher_clear_flags(actx->fallback, CRYPTO_TFM_REQ_MASK); in mxs_dcp_aes_setkey()
514 crypto_skcipher_set_flags(actx->fallback, in mxs_dcp_aes_setkey()
515 tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK); in mxs_dcp_aes_setkey()
516 return crypto_skcipher_setkey(actx->fallback, key, len); in mxs_dcp_aes_setkey()
529 actx->fallback = blk; in mxs_dcp_aes_fallback_init_tfm()
539 crypto_free_skcipher(actx->fallback); in mxs_dcp_aes_fallback_exit_tfm()
547 struct dcp *sdcp = global_sdcp; in mxs_dcp_run_sha()
553 struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan]; in mxs_dcp_run_sha()
556 dma_addr_t buf_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_in_buf, in mxs_dcp_run_sha()
559 ret = dma_mapping_error(sdcp->dev, buf_phys); in mxs_dcp_run_sha()
564 desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE | in mxs_dcp_run_sha()
567 if (rctx->init) in mxs_dcp_run_sha()
568 desc->control0 |= MXS_DCP_CONTROL0_HASH_INIT; in mxs_dcp_run_sha()
570 desc->control1 = actx->alg; in mxs_dcp_run_sha()
571 desc->next_cmd_addr = 0; in mxs_dcp_run_sha()
572 desc->source = buf_phys; in mxs_dcp_run_sha()
573 desc->destination = 0; in mxs_dcp_run_sha()
574 desc->size = actx->fill; in mxs_dcp_run_sha()
575 desc->payload = 0; in mxs_dcp_run_sha()
576 desc->status = 0; in mxs_dcp_run_sha()
581 if (rctx->init && rctx->fini && desc->size == 0) { in mxs_dcp_run_sha()
584 (actx->alg == MXS_DCP_CONTROL1_HASH_SELECT_SHA1) ? in mxs_dcp_run_sha()
586 memcpy(sdcp->coh->sha_out_buf, sha_buf, halg->digestsize); in mxs_dcp_run_sha()
592 if (rctx->fini) { in mxs_dcp_run_sha()
593 digest_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_out_buf, in mxs_dcp_run_sha()
595 ret = dma_mapping_error(sdcp->dev, digest_phys); in mxs_dcp_run_sha()
599 desc->control0 |= MXS_DCP_CONTROL0_HASH_TERM; in mxs_dcp_run_sha()
600 desc->payload = digest_phys; in mxs_dcp_run_sha()
605 if (rctx->fini) in mxs_dcp_run_sha()
606 dma_unmap_single(sdcp->dev, digest_phys, DCP_SHA_PAY_SZ, in mxs_dcp_run_sha()
610 dma_unmap_single(sdcp->dev, buf_phys, DCP_BUF_SZ, DMA_TO_DEVICE); in mxs_dcp_run_sha()
617 struct dcp *sdcp = global_sdcp; in dcp_sha_req_to_buf()
625 uint8_t *in_buf = sdcp->coh->sha_in_buf; in dcp_sha_req_to_buf()
626 uint8_t *out_buf = sdcp->coh->sha_out_buf; in dcp_sha_req_to_buf()
633 int fin = rctx->fini; in dcp_sha_req_to_buf()
635 rctx->fini = 0; in dcp_sha_req_to_buf()
637 src = req->src; in dcp_sha_req_to_buf()
638 len = req->nbytes; in dcp_sha_req_to_buf()
641 if (actx->fill + len > DCP_BUF_SZ) in dcp_sha_req_to_buf()
642 clen = DCP_BUF_SZ - actx->fill; in dcp_sha_req_to_buf()
646 scatterwalk_map_and_copy(in_buf + actx->fill, src, oft, clen, in dcp_sha_req_to_buf()
649 len -= clen; in dcp_sha_req_to_buf()
651 actx->fill += clen; in dcp_sha_req_to_buf()
657 if (len && actx->fill == DCP_BUF_SZ) { in dcp_sha_req_to_buf()
661 actx->fill = 0; in dcp_sha_req_to_buf()
662 rctx->init = 0; in dcp_sha_req_to_buf()
667 rctx->fini = 1; in dcp_sha_req_to_buf()
670 if (!req->result) in dcp_sha_req_to_buf()
671 return -EINVAL; in dcp_sha_req_to_buf()
677 actx->fill = 0; in dcp_sha_req_to_buf()
680 for (i = 0; i < halg->digestsize; i++) in dcp_sha_req_to_buf()
681 req->result[i] = out_buf[halg->digestsize - i - 1]; in dcp_sha_req_to_buf()
689 struct dcp *sdcp = global_sdcp; in dcp_chan_thread_sha()
699 spin_lock(&sdcp->lock[chan]); in dcp_chan_thread_sha()
700 backlog = crypto_get_backlog(&sdcp->queue[chan]); in dcp_chan_thread_sha()
701 arq = crypto_dequeue_request(&sdcp->queue[chan]); in dcp_chan_thread_sha()
702 spin_unlock(&sdcp->lock[chan]); in dcp_chan_thread_sha()
712 backlog->complete(backlog, -EINPROGRESS); in dcp_chan_thread_sha()
716 arq->complete(arq, ret); in dcp_chan_thread_sha()
736 if (strcmp(halg->base.cra_name, "sha1") == 0) in dcp_sha_init()
737 actx->alg = MXS_DCP_CONTROL1_HASH_SELECT_SHA1; in dcp_sha_init()
739 actx->alg = MXS_DCP_CONTROL1_HASH_SELECT_SHA256; in dcp_sha_init()
741 actx->fill = 0; in dcp_sha_init()
742 actx->hot = 0; in dcp_sha_init()
743 actx->chan = DCP_CHAN_HASH_SHA; in dcp_sha_init()
745 mutex_init(&actx->mutex); in dcp_sha_init()
752 struct dcp *sdcp = global_sdcp; in dcp_sha_update_fx()
764 if (!req->nbytes && !fini) in dcp_sha_update_fx()
767 mutex_lock(&actx->mutex); in dcp_sha_update_fx()
769 rctx->fini = fini; in dcp_sha_update_fx()
771 if (!actx->hot) { in dcp_sha_update_fx()
772 actx->hot = 1; in dcp_sha_update_fx()
773 rctx->init = 1; in dcp_sha_update_fx()
776 spin_lock(&sdcp->lock[actx->chan]); in dcp_sha_update_fx()
777 ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base); in dcp_sha_update_fx()
778 spin_unlock(&sdcp->lock[actx->chan]); in dcp_sha_update_fx()
780 wake_up_process(sdcp->thread[actx->chan]); in dcp_sha_update_fx()
781 mutex_unlock(&actx->mutex); in dcp_sha_update_fx()
793 ahash_request_set_crypt(req, NULL, req->result, 0); in dcp_sha_final()
794 req->nbytes = 0; in dcp_sha_final()
823 memcpy(rctx, &export->req_ctx, sizeof(struct dcp_sha_req_ctx)); in dcp_sha_import()
824 memcpy(actx, &export->async_ctx, sizeof(struct dcp_async_ctx)); in dcp_sha_import()
836 memcpy(&export->req_ctx, rctx_state, sizeof(struct dcp_sha_req_ctx)); in dcp_sha_export()
837 memcpy(&export->async_ctx, actx_state, sizeof(struct dcp_async_ctx)); in dcp_sha_export()
857 .base.cra_driver_name = "ecb-aes-dcp",
875 .base.cra_driver_name = "cbc-aes-dcp",
909 .cra_driver_name = "sha1-dcp",
936 .cra_driver_name = "sha256-dcp",
951 struct dcp *sdcp = context; in mxs_dcp_irq()
955 stat = readl(sdcp->base + MXS_DCP_STAT); in mxs_dcp_irq()
961 writel(stat, sdcp->base + MXS_DCP_STAT_CLR); in mxs_dcp_irq()
966 complete(&sdcp->completion[i]); in mxs_dcp_irq()
973 struct device *dev = &pdev->dev; in mxs_dcp_probe()
974 struct dcp *sdcp = NULL; in mxs_dcp_probe()
979 dev_err(dev, "Only one DCP instance allowed!\n"); in mxs_dcp_probe()
980 return -ENODEV; in mxs_dcp_probe()
993 return -ENOMEM; in mxs_dcp_probe()
995 sdcp->dev = dev; in mxs_dcp_probe()
996 sdcp->base = devm_platform_ioremap_resource(pdev, 0); in mxs_dcp_probe()
997 if (IS_ERR(sdcp->base)) in mxs_dcp_probe()
998 return PTR_ERR(sdcp->base); in mxs_dcp_probe()
1002 "dcp-vmi-irq", sdcp); in mxs_dcp_probe()
1004 dev_err(dev, "Failed to claim DCP VMI IRQ!\n"); in mxs_dcp_probe()
1009 "dcp-irq", sdcp); in mxs_dcp_probe()
1011 dev_err(dev, "Failed to claim DCP IRQ!\n"); in mxs_dcp_probe()
1016 sdcp->coh = devm_kzalloc(dev, sizeof(*sdcp->coh) + DCP_ALIGNMENT, in mxs_dcp_probe()
1018 if (!sdcp->coh) in mxs_dcp_probe()
1019 return -ENOMEM; in mxs_dcp_probe()
1021 /* Re-align the structure so it fits the DCP constraints. */ in mxs_dcp_probe()
1022 sdcp->coh = PTR_ALIGN(sdcp->coh, DCP_ALIGNMENT); in mxs_dcp_probe()
1024 /* DCP clock is optional, only used on some SOCs */ in mxs_dcp_probe()
1025 sdcp->dcp_clk = devm_clk_get(dev, "dcp"); in mxs_dcp_probe()
1026 if (IS_ERR(sdcp->dcp_clk)) { in mxs_dcp_probe()
1027 if (sdcp->dcp_clk != ERR_PTR(-ENOENT)) in mxs_dcp_probe()
1028 return PTR_ERR(sdcp->dcp_clk); in mxs_dcp_probe()
1029 sdcp->dcp_clk = NULL; in mxs_dcp_probe()
1031 ret = clk_prepare_enable(sdcp->dcp_clk); in mxs_dcp_probe()
1035 /* Restart the DCP block. */ in mxs_dcp_probe()
1036 ret = stmp_reset_block(sdcp->base); in mxs_dcp_probe()
1045 sdcp->base + MXS_DCP_CTRL); in mxs_dcp_probe()
1047 /* Enable all DCP DMA channels. */ in mxs_dcp_probe()
1049 sdcp->base + MXS_DCP_CHANNELCTRL); in mxs_dcp_probe()
1054 * inadvertantly enabled, the DCP will return an error instead of in mxs_dcp_probe()
1055 * trashing good memory. The DCP DMA cannot access ROM, so any ROM in mxs_dcp_probe()
1058 writel(0xffff0000, sdcp->base + MXS_DCP_CONTEXT); in mxs_dcp_probe()
1060 writel(0xffffffff, sdcp->base + MXS_DCP_CH_N_STAT_CLR(i)); in mxs_dcp_probe()
1061 writel(0xffffffff, sdcp->base + MXS_DCP_STAT_CLR); in mxs_dcp_probe()
1068 spin_lock_init(&sdcp->lock[i]); in mxs_dcp_probe()
1069 init_completion(&sdcp->completion[i]); in mxs_dcp_probe()
1070 crypto_init_queue(&sdcp->queue[i], 50); in mxs_dcp_probe()
1074 sdcp->thread[DCP_CHAN_HASH_SHA] = kthread_run(dcp_chan_thread_sha, in mxs_dcp_probe()
1076 if (IS_ERR(sdcp->thread[DCP_CHAN_HASH_SHA])) { in mxs_dcp_probe()
1078 ret = PTR_ERR(sdcp->thread[DCP_CHAN_HASH_SHA]); in mxs_dcp_probe()
1082 sdcp->thread[DCP_CHAN_CRYPTO] = kthread_run(dcp_chan_thread_aes, in mxs_dcp_probe()
1084 if (IS_ERR(sdcp->thread[DCP_CHAN_CRYPTO])) { in mxs_dcp_probe()
1086 ret = PTR_ERR(sdcp->thread[DCP_CHAN_CRYPTO]); in mxs_dcp_probe()
1091 sdcp->caps = readl(sdcp->base + MXS_DCP_CAPABILITY1); in mxs_dcp_probe()
1093 if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128) { in mxs_dcp_probe()
1103 if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1) { in mxs_dcp_probe()
1112 if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA256) { in mxs_dcp_probe()
1124 if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1) in mxs_dcp_probe()
1128 if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128) in mxs_dcp_probe()
1132 kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]); in mxs_dcp_probe()
1135 kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]); in mxs_dcp_probe()
1138 clk_disable_unprepare(sdcp->dcp_clk); in mxs_dcp_probe()
1145 struct dcp *sdcp = platform_get_drvdata(pdev); in mxs_dcp_remove()
1147 if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA256) in mxs_dcp_remove()
1150 if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1) in mxs_dcp_remove()
1153 if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128) in mxs_dcp_remove()
1156 kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]); in mxs_dcp_remove()
1157 kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]); in mxs_dcp_remove()
1159 clk_disable_unprepare(sdcp->dcp_clk); in mxs_dcp_remove()
1169 { .compatible = "fsl,imx23-dcp", .data = NULL, },
1170 { .compatible = "fsl,imx28-dcp", .data = NULL, },
1180 .name = "mxs-dcp",
1188 MODULE_DESCRIPTION("Freescale MXS DCP Driver");
1190 MODULE_ALIAS("platform:mxs-dcp");