Lines Matching +full:big +full:- +full:endian +full:- +full:desc
1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (C) 2013-2019 Advanced Micro Devices, Inc.
11 #include <linux/dma-mapping.h>
19 #include "ccp-dev.h"
56 #define CCP_NEW_JOBID(ccp) ((ccp->vdata->version == CCP_VERSION(3, 0)) ? \
61 return atomic_inc_return(&ccp->current_id) & CCP_JOBID_MASK; in ccp_gen_jobid()
66 if (wa->dma_count) in ccp_sg_free()
67 dma_unmap_sg(wa->dma_dev, wa->dma_sg_head, wa->nents, wa->dma_dir); in ccp_sg_free()
69 wa->dma_count = 0; in ccp_sg_free()
78 wa->sg = sg; in ccp_init_sg_workarea()
82 wa->nents = sg_nents_for_len(sg, len); in ccp_init_sg_workarea()
83 if (wa->nents < 0) in ccp_init_sg_workarea()
84 return wa->nents; in ccp_init_sg_workarea()
86 wa->bytes_left = len; in ccp_init_sg_workarea()
87 wa->sg_used = 0; in ccp_init_sg_workarea()
95 wa->dma_sg = sg; in ccp_init_sg_workarea()
96 wa->dma_sg_head = sg; in ccp_init_sg_workarea()
97 wa->dma_dev = dev; in ccp_init_sg_workarea()
98 wa->dma_dir = dma_dir; in ccp_init_sg_workarea()
99 wa->dma_count = dma_map_sg(dev, sg, wa->nents, dma_dir); in ccp_init_sg_workarea()
100 if (!wa->dma_count) in ccp_init_sg_workarea()
101 return -ENOMEM; in ccp_init_sg_workarea()
108 unsigned int nbytes = min_t(u64, len, wa->bytes_left); in ccp_update_sg_workarea()
111 if (!wa->sg) in ccp_update_sg_workarea()
114 wa->sg_used += nbytes; in ccp_update_sg_workarea()
115 wa->bytes_left -= nbytes; in ccp_update_sg_workarea()
116 if (wa->sg_used == sg_dma_len(wa->dma_sg)) { in ccp_update_sg_workarea()
118 wa->dma_sg = sg_next(wa->dma_sg); in ccp_update_sg_workarea()
121 * that have been merged, the non-DMA mapped scatterlist in ccp_update_sg_workarea()
123 * This ensures that the current non-DMA mapped entry in ccp_update_sg_workarea()
127 sg_combined_len += wa->sg->length; in ccp_update_sg_workarea()
128 wa->sg = sg_next(wa->sg); in ccp_update_sg_workarea()
129 } while (wa->sg_used > sg_combined_len); in ccp_update_sg_workarea()
131 wa->sg_used = 0; in ccp_update_sg_workarea()
137 if (wa->length <= CCP_DMAPOOL_MAX_SIZE) { in ccp_dm_free()
138 if (wa->address) in ccp_dm_free()
139 dma_pool_free(wa->dma_pool, wa->address, in ccp_dm_free()
140 wa->dma.address); in ccp_dm_free()
142 if (wa->dma.address) in ccp_dm_free()
143 dma_unmap_single(wa->dev, wa->dma.address, wa->length, in ccp_dm_free()
144 wa->dma.dir); in ccp_dm_free()
145 kfree(wa->address); in ccp_dm_free()
148 wa->address = NULL; in ccp_dm_free()
149 wa->dma.address = 0; in ccp_dm_free()
162 wa->dev = cmd_q->ccp->dev; in ccp_init_dm_workarea()
163 wa->length = len; in ccp_init_dm_workarea()
166 wa->dma_pool = cmd_q->dma_pool; in ccp_init_dm_workarea()
168 wa->address = dma_pool_zalloc(wa->dma_pool, GFP_KERNEL, in ccp_init_dm_workarea()
169 &wa->dma.address); in ccp_init_dm_workarea()
170 if (!wa->address) in ccp_init_dm_workarea()
171 return -ENOMEM; in ccp_init_dm_workarea()
173 wa->dma.length = CCP_DMAPOOL_MAX_SIZE; in ccp_init_dm_workarea()
176 wa->address = kzalloc(len, GFP_KERNEL); in ccp_init_dm_workarea()
177 if (!wa->address) in ccp_init_dm_workarea()
178 return -ENOMEM; in ccp_init_dm_workarea()
180 wa->dma.address = dma_map_single(wa->dev, wa->address, len, in ccp_init_dm_workarea()
182 if (dma_mapping_error(wa->dev, wa->dma.address)) in ccp_init_dm_workarea()
183 return -ENOMEM; in ccp_init_dm_workarea()
185 wa->dma.length = len; in ccp_init_dm_workarea()
187 wa->dma.dir = dir; in ccp_init_dm_workarea()
196 WARN_ON(!wa->address); in ccp_set_dm_area()
198 if (len > (wa->length - wa_offset)) in ccp_set_dm_area()
199 return -EINVAL; in ccp_set_dm_area()
201 scatterwalk_map_and_copy(wa->address + wa_offset, sg, sg_offset, len, in ccp_set_dm_area()
210 WARN_ON(!wa->address); in ccp_get_dm_area()
212 scatterwalk_map_and_copy(wa->address + wa_offset, sg, sg_offset, len, in ccp_get_dm_area()
229 p = wa->address + wa_offset; in ccp_reverse_set_dm_area()
230 q = p + len - 1; in ccp_reverse_set_dm_area()
236 q--; in ccp_reverse_set_dm_area()
249 p = wa->address + wa_offset; in ccp_reverse_get_dm_area()
250 q = p + len - 1; in ccp_reverse_get_dm_area()
256 q--; in ccp_reverse_get_dm_area()
264 ccp_dm_free(&data->dm_wa); in ccp_free_data()
265 ccp_sg_free(&data->sg_wa); in ccp_free_data()
277 ret = ccp_init_sg_workarea(&data->sg_wa, cmd_q->ccp->dev, sg, sg_len, in ccp_init_data()
282 ret = ccp_init_dm_workarea(&data->dm_wa, cmd_q, dm_len, dir); in ccp_init_data()
296 struct ccp_sg_workarea *sg_wa = &data->sg_wa; in ccp_queue_buf()
297 struct ccp_dm_workarea *dm_wa = &data->dm_wa; in ccp_queue_buf()
302 memset(dm_wa->address, 0, dm_wa->length); in ccp_queue_buf()
304 if (!sg_wa->sg) in ccp_queue_buf()
308 * nbytes will always be <= UINT_MAX because dm_wa->length is in ccp_queue_buf()
311 nbytes = min_t(u64, sg_wa->bytes_left, dm_wa->length); in ccp_queue_buf()
312 scatterwalk_map_and_copy(dm_wa->address, sg_wa->sg, sg_wa->sg_used, in ccp_queue_buf()
317 while (sg_wa->bytes_left && (buf_count < dm_wa->length)) { in ccp_queue_buf()
318 nbytes = min(sg_dma_len(sg_wa->dma_sg) - sg_wa->sg_used, in ccp_queue_buf()
319 dm_wa->length - buf_count); in ccp_queue_buf()
320 nbytes = min_t(u64, sg_wa->bytes_left, nbytes); in ccp_queue_buf()
350 sg_src_len = sg_dma_len(src->sg_wa.dma_sg) - src->sg_wa.sg_used; in ccp_prepare_data()
351 sg_src_len = min_t(u64, src->sg_wa.bytes_left, sg_src_len); in ccp_prepare_data()
354 sg_dst_len = sg_dma_len(dst->sg_wa.dma_sg) - dst->sg_wa.sg_used; in ccp_prepare_data()
355 sg_dst_len = min_t(u64, src->sg_wa.bytes_left, sg_dst_len); in ccp_prepare_data()
368 op->soc = 0; in ccp_prepare_data()
376 op->soc = 1; in ccp_prepare_data()
377 op->src.u.dma.address = src->dm_wa.dma.address; in ccp_prepare_data()
378 op->src.u.dma.offset = 0; in ccp_prepare_data()
379 op->src.u.dma.length = (blocksize_op) ? block_size : cp_len; in ccp_prepare_data()
384 op->src.u.dma.address = sg_dma_address(src->sg_wa.dma_sg); in ccp_prepare_data()
385 op->src.u.dma.offset = src->sg_wa.sg_used; in ccp_prepare_data()
386 op->src.u.dma.length = op_len & ~(block_size - 1); in ccp_prepare_data()
388 ccp_update_sg_workarea(&src->sg_wa, op->src.u.dma.length); in ccp_prepare_data()
397 op->soc = 1; in ccp_prepare_data()
398 op->dst.u.dma.address = dst->dm_wa.dma.address; in ccp_prepare_data()
399 op->dst.u.dma.offset = 0; in ccp_prepare_data()
400 op->dst.u.dma.length = op->src.u.dma.length; in ccp_prepare_data()
405 op->dst.u.dma.address = sg_dma_address(dst->sg_wa.dma_sg); in ccp_prepare_data()
406 op->dst.u.dma.offset = dst->sg_wa.sg_used; in ccp_prepare_data()
407 op->dst.u.dma.length = op->src.u.dma.length; in ccp_prepare_data()
415 op->init = 0; in ccp_process_data()
418 if (op->dst.u.dma.address == dst->dm_wa.dma.address) in ccp_process_data()
421 ccp_update_sg_workarea(&dst->sg_wa, in ccp_process_data()
422 op->dst.u.dma.length); in ccp_process_data()
443 op.dst.u.dma.address = wa->dma.address; in ccp_copy_to_from_sb()
444 op.dst.u.dma.length = wa->length; in ccp_copy_to_from_sb()
447 op.src.u.dma.address = wa->dma.address; in ccp_copy_to_from_sb()
448 op.src.u.dma.length = wa->length; in ccp_copy_to_from_sb()
455 return cmd_q->ccp->vdata->perform->passthru(&op); in ccp_copy_to_from_sb()
475 struct ccp_aes_engine *aes = &cmd->u.aes; in ccp_run_aes_cmac_cmd()
482 if (!((aes->key_len == AES_KEYSIZE_128) || in ccp_run_aes_cmac_cmd()
483 (aes->key_len == AES_KEYSIZE_192) || in ccp_run_aes_cmac_cmd()
484 (aes->key_len == AES_KEYSIZE_256))) in ccp_run_aes_cmac_cmd()
485 return -EINVAL; in ccp_run_aes_cmac_cmd()
487 if (aes->src_len & (AES_BLOCK_SIZE - 1)) in ccp_run_aes_cmac_cmd()
488 return -EINVAL; in ccp_run_aes_cmac_cmd()
490 if (aes->iv_len != AES_BLOCK_SIZE) in ccp_run_aes_cmac_cmd()
491 return -EINVAL; in ccp_run_aes_cmac_cmd()
493 if (!aes->key || !aes->iv || !aes->src) in ccp_run_aes_cmac_cmd()
494 return -EINVAL; in ccp_run_aes_cmac_cmd()
496 if (aes->cmac_final) { in ccp_run_aes_cmac_cmd()
497 if (aes->cmac_key_len != AES_BLOCK_SIZE) in ccp_run_aes_cmac_cmd()
498 return -EINVAL; in ccp_run_aes_cmac_cmd()
500 if (!aes->cmac_key) in ccp_run_aes_cmac_cmd()
501 return -EINVAL; in ccp_run_aes_cmac_cmd()
507 ret = -EIO; in ccp_run_aes_cmac_cmd()
510 op.jobid = CCP_NEW_JOBID(cmd_q->ccp); in ccp_run_aes_cmac_cmd()
511 op.sb_key = cmd_q->sb_key; in ccp_run_aes_cmac_cmd()
512 op.sb_ctx = cmd_q->sb_ctx; in ccp_run_aes_cmac_cmd()
514 op.u.aes.type = aes->type; in ccp_run_aes_cmac_cmd()
515 op.u.aes.mode = aes->mode; in ccp_run_aes_cmac_cmd()
516 op.u.aes.action = aes->action; in ccp_run_aes_cmac_cmd()
518 /* All supported key sizes fit in a single (32-byte) SB entry in ccp_run_aes_cmac_cmd()
519 * and must be in little endian format. Use the 256-bit byte in ccp_run_aes_cmac_cmd()
520 * swap passthru option to convert from big endian to little in ccp_run_aes_cmac_cmd()
521 * endian. in ccp_run_aes_cmac_cmd()
529 dm_offset = CCP_SB_BYTES - aes->key_len; in ccp_run_aes_cmac_cmd()
530 ret = ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len); in ccp_run_aes_cmac_cmd()
536 cmd->engine_error = cmd_q->cmd_error; in ccp_run_aes_cmac_cmd()
540 /* The AES context fits in a single (32-byte) SB entry and in ccp_run_aes_cmac_cmd()
541 * must be in little endian format. Use the 256-bit byte swap in ccp_run_aes_cmac_cmd()
542 * passthru option to convert from big endian to little endian. in ccp_run_aes_cmac_cmd()
550 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE; in ccp_run_aes_cmac_cmd()
551 ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len); in ccp_run_aes_cmac_cmd()
557 cmd->engine_error = cmd_q->cmd_error; in ccp_run_aes_cmac_cmd()
562 ret = ccp_init_data(&src, cmd_q, aes->src, aes->src_len, in ccp_run_aes_cmac_cmd()
569 if (aes->cmac_final && !src.sg_wa.bytes_left) { in ccp_run_aes_cmac_cmd()
577 cmd->engine_error = cmd_q->cmd_error; in ccp_run_aes_cmac_cmd()
581 ret = ccp_set_dm_area(&ctx, 0, aes->cmac_key, 0, in ccp_run_aes_cmac_cmd()
582 aes->cmac_key_len); in ccp_run_aes_cmac_cmd()
588 cmd->engine_error = cmd_q->cmd_error; in ccp_run_aes_cmac_cmd()
593 ret = cmd_q->ccp->vdata->perform->aes(&op); in ccp_run_aes_cmac_cmd()
595 cmd->engine_error = cmd_q->cmd_error; in ccp_run_aes_cmac_cmd()
602 /* Retrieve the AES context - convert from LE to BE using in ccp_run_aes_cmac_cmd()
603 * 32-byte (256-bit) byteswapping in ccp_run_aes_cmac_cmd()
608 cmd->engine_error = cmd_q->cmd_error; in ccp_run_aes_cmac_cmd()
613 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE; in ccp_run_aes_cmac_cmd()
614 ccp_get_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len); in ccp_run_aes_cmac_cmd()
631 struct ccp_aes_engine *aes = &cmd->u.aes; in ccp_run_aes_gcm_cmd()
649 if (!aes->iv) in ccp_run_aes_gcm_cmd()
650 return -EINVAL; in ccp_run_aes_gcm_cmd()
652 if (!((aes->key_len == AES_KEYSIZE_128) || in ccp_run_aes_gcm_cmd()
653 (aes->key_len == AES_KEYSIZE_192) || in ccp_run_aes_gcm_cmd()
654 (aes->key_len == AES_KEYSIZE_256))) in ccp_run_aes_gcm_cmd()
655 return -EINVAL; in ccp_run_aes_gcm_cmd()
657 if (!aes->key) /* Gotta have a key SGL */ in ccp_run_aes_gcm_cmd()
658 return -EINVAL; in ccp_run_aes_gcm_cmd()
661 authsize = aes->authsize ? aes->authsize : AES_BLOCK_SIZE; in ccp_run_aes_gcm_cmd()
672 return -EINVAL; in ccp_run_aes_gcm_cmd()
681 p_aad = aes->src; in ccp_run_aes_gcm_cmd()
682 p_inp = scatterwalk_ffwd(sg_inp, aes->src, aes->aad_len); in ccp_run_aes_gcm_cmd()
683 p_outp = scatterwalk_ffwd(sg_outp, aes->dst, aes->aad_len); in ccp_run_aes_gcm_cmd()
684 if (aes->action == CCP_AES_ACTION_ENCRYPT) { in ccp_run_aes_gcm_cmd()
685 ilen = aes->src_len; in ccp_run_aes_gcm_cmd()
689 ilen = aes->src_len - authsize; in ccp_run_aes_gcm_cmd()
693 jobid = CCP_NEW_JOBID(cmd_q->ccp); in ccp_run_aes_gcm_cmd()
698 op.sb_key = cmd_q->sb_key; /* Pre-allocated */ in ccp_run_aes_gcm_cmd()
699 op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */ in ccp_run_aes_gcm_cmd()
701 op.u.aes.type = aes->type; in ccp_run_aes_gcm_cmd()
710 dm_offset = CCP_SB_BYTES - aes->key_len; in ccp_run_aes_gcm_cmd()
711 ret = ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len); in ccp_run_aes_gcm_cmd()
717 cmd->engine_error = cmd_q->cmd_error; in ccp_run_aes_gcm_cmd()
731 dm_offset = CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES - aes->iv_len; in ccp_run_aes_gcm_cmd()
732 ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len); in ccp_run_aes_gcm_cmd()
739 cmd->engine_error = cmd_q->cmd_error; in ccp_run_aes_gcm_cmd()
744 if (aes->aad_len > 0) { in ccp_run_aes_gcm_cmd()
746 ret = ccp_init_data(&aad, cmd_q, p_aad, aes->aad_len, in ccp_run_aes_gcm_cmd()
758 ret = cmd_q->ccp->vdata->perform->aes(&op); in ccp_run_aes_gcm_cmd()
760 cmd->engine_error = cmd_q->cmd_error; in ccp_run_aes_gcm_cmd()
770 op.u.aes.action = aes->action; in ccp_run_aes_gcm_cmd()
802 op.u.aes.size = (nbytes * 8) - 1; in ccp_run_aes_gcm_cmd()
806 ret = cmd_q->ccp->vdata->perform->aes(&op); in ccp_run_aes_gcm_cmd()
808 cmd->engine_error = cmd_q->cmd_error; in ccp_run_aes_gcm_cmd()
821 cmd->engine_error = cmd_q->cmd_error; in ccp_run_aes_gcm_cmd()
825 ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len); in ccp_run_aes_gcm_cmd()
832 cmd->engine_error = cmd_q->cmd_error; in ccp_run_aes_gcm_cmd()
844 final[0] = cpu_to_be64(aes->aad_len * 8); in ccp_run_aes_gcm_cmd()
850 op.sb_key = cmd_q->sb_key; /* Pre-allocated */ in ccp_run_aes_gcm_cmd()
851 op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */ in ccp_run_aes_gcm_cmd()
853 op.u.aes.type = aes->type; in ccp_run_aes_gcm_cmd()
864 ret = cmd_q->ccp->vdata->perform->aes(&op); in ccp_run_aes_gcm_cmd()
868 if (aes->action == CCP_AES_ACTION_ENCRYPT) { in ccp_run_aes_gcm_cmd()
884 authsize) ? -EBADMSG : 0; in ccp_run_aes_gcm_cmd()
900 if (aes->aad_len) in ccp_run_aes_gcm_cmd()
915 struct ccp_aes_engine *aes = &cmd->u.aes; in ccp_run_aes_cmd()
923 if (!((aes->key_len == AES_KEYSIZE_128) || in ccp_run_aes_cmd()
924 (aes->key_len == AES_KEYSIZE_192) || in ccp_run_aes_cmd()
925 (aes->key_len == AES_KEYSIZE_256))) in ccp_run_aes_cmd()
926 return -EINVAL; in ccp_run_aes_cmd()
928 if (((aes->mode == CCP_AES_MODE_ECB) || in ccp_run_aes_cmd()
929 (aes->mode == CCP_AES_MODE_CBC)) && in ccp_run_aes_cmd()
930 (aes->src_len & (AES_BLOCK_SIZE - 1))) in ccp_run_aes_cmd()
931 return -EINVAL; in ccp_run_aes_cmd()
933 if (!aes->key || !aes->src || !aes->dst) in ccp_run_aes_cmd()
934 return -EINVAL; in ccp_run_aes_cmd()
936 if (aes->mode != CCP_AES_MODE_ECB) { in ccp_run_aes_cmd()
937 if (aes->iv_len != AES_BLOCK_SIZE) in ccp_run_aes_cmd()
938 return -EINVAL; in ccp_run_aes_cmd()
940 if (!aes->iv) in ccp_run_aes_cmd()
941 return -EINVAL; in ccp_run_aes_cmd()
947 ret = -EIO; in ccp_run_aes_cmd()
950 op.jobid = CCP_NEW_JOBID(cmd_q->ccp); in ccp_run_aes_cmd()
951 op.sb_key = cmd_q->sb_key; in ccp_run_aes_cmd()
952 op.sb_ctx = cmd_q->sb_ctx; in ccp_run_aes_cmd()
953 op.init = (aes->mode == CCP_AES_MODE_ECB) ? 0 : 1; in ccp_run_aes_cmd()
954 op.u.aes.type = aes->type; in ccp_run_aes_cmd()
955 op.u.aes.mode = aes->mode; in ccp_run_aes_cmd()
956 op.u.aes.action = aes->action; in ccp_run_aes_cmd()
958 /* All supported key sizes fit in a single (32-byte) SB entry in ccp_run_aes_cmd()
959 * and must be in little endian format. Use the 256-bit byte in ccp_run_aes_cmd()
960 * swap passthru option to convert from big endian to little in ccp_run_aes_cmd()
961 * endian. in ccp_run_aes_cmd()
969 dm_offset = CCP_SB_BYTES - aes->key_len; in ccp_run_aes_cmd()
970 ret = ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len); in ccp_run_aes_cmd()
976 cmd->engine_error = cmd_q->cmd_error; in ccp_run_aes_cmd()
980 /* The AES context fits in a single (32-byte) SB entry and in ccp_run_aes_cmd()
981 * must be in little endian format. Use the 256-bit byte swap in ccp_run_aes_cmd()
982 * passthru option to convert from big endian to little endian. in ccp_run_aes_cmd()
990 if (aes->mode != CCP_AES_MODE_ECB) { in ccp_run_aes_cmd()
991 /* Load the AES context - convert to LE */ in ccp_run_aes_cmd()
992 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE; in ccp_run_aes_cmd()
993 ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len); in ccp_run_aes_cmd()
999 cmd->engine_error = cmd_q->cmd_error; in ccp_run_aes_cmd()
1003 switch (aes->mode) { in ccp_run_aes_cmd()
1006 op.u.aes.size = AES_BLOCK_SIZE * BITS_PER_BYTE - 1; in ccp_run_aes_cmd()
1012 /* Prepare the input and output data workareas. For in-place in ccp_run_aes_cmd()
1016 if (sg_virt(aes->src) == sg_virt(aes->dst)) in ccp_run_aes_cmd()
1019 ret = ccp_init_data(&src, cmd_q, aes->src, aes->src_len, in ccp_run_aes_cmd()
1028 ret = ccp_init_data(&dst, cmd_q, aes->dst, aes->src_len, in ccp_run_aes_cmd()
1044 if (aes->mode == CCP_AES_MODE_ECB) in ccp_run_aes_cmd()
1048 ret = cmd_q->ccp->vdata->perform->aes(&op); in ccp_run_aes_cmd()
1050 cmd->engine_error = cmd_q->cmd_error; in ccp_run_aes_cmd()
1057 if (aes->mode != CCP_AES_MODE_ECB) { in ccp_run_aes_cmd()
1058 /* Retrieve the AES context - convert from LE to BE using in ccp_run_aes_cmd()
1059 * 32-byte (256-bit) byteswapping in ccp_run_aes_cmd()
1064 cmd->engine_error = cmd_q->cmd_error; in ccp_run_aes_cmd()
1069 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE; in ccp_run_aes_cmd()
1070 ccp_get_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len); in ccp_run_aes_cmd()
1092 struct ccp_xts_aes_engine *xts = &cmd->u.xts; in ccp_run_xts_aes_cmd()
1102 switch (xts->unit_size) { in ccp_run_xts_aes_cmd()
1120 return -EINVAL; in ccp_run_xts_aes_cmd()
1123 if (xts->key_len == AES_KEYSIZE_128) in ccp_run_xts_aes_cmd()
1125 else if (xts->key_len == AES_KEYSIZE_256) in ccp_run_xts_aes_cmd()
1128 return -EINVAL; in ccp_run_xts_aes_cmd()
1130 if (!xts->final && (xts->src_len & (AES_BLOCK_SIZE - 1))) in ccp_run_xts_aes_cmd()
1131 return -EINVAL; in ccp_run_xts_aes_cmd()
1133 if (xts->iv_len != AES_BLOCK_SIZE) in ccp_run_xts_aes_cmd()
1134 return -EINVAL; in ccp_run_xts_aes_cmd()
1136 if (!xts->key || !xts->iv || !xts->src || !xts->dst) in ccp_run_xts_aes_cmd()
1137 return -EINVAL; in ccp_run_xts_aes_cmd()
1142 ret = -EIO; in ccp_run_xts_aes_cmd()
1145 op.jobid = CCP_NEW_JOBID(cmd_q->ccp); in ccp_run_xts_aes_cmd()
1146 op.sb_key = cmd_q->sb_key; in ccp_run_xts_aes_cmd()
1147 op.sb_ctx = cmd_q->sb_ctx; in ccp_run_xts_aes_cmd()
1150 op.u.xts.action = xts->action; in ccp_run_xts_aes_cmd()
1151 op.u.xts.unit_size = xts->unit_size; in ccp_run_xts_aes_cmd()
1153 /* A version 3 device only supports 128-bit keys, which fits into a in ccp_run_xts_aes_cmd()
1154 * single SB entry. A version 5 device uses a 512-bit vector, so two in ccp_run_xts_aes_cmd()
1157 if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0)) in ccp_run_xts_aes_cmd()
1167 if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0)) { in ccp_run_xts_aes_cmd()
1168 /* All supported key sizes must be in little endian format. in ccp_run_xts_aes_cmd()
1169 * Use the 256-bit byte swap passthru option to convert from in ccp_run_xts_aes_cmd()
1170 * big endian to little endian. in ccp_run_xts_aes_cmd()
1172 dm_offset = CCP_SB_BYTES - AES_KEYSIZE_128; in ccp_run_xts_aes_cmd()
1173 ret = ccp_set_dm_area(&key, dm_offset, xts->key, 0, xts->key_len); in ccp_run_xts_aes_cmd()
1176 ret = ccp_set_dm_area(&key, 0, xts->key, xts->key_len, xts->key_len); in ccp_run_xts_aes_cmd()
1180 /* Version 5 CCPs use a 512-bit space for the key: each portion in ccp_run_xts_aes_cmd()
1181 * occupies 256 bits, or one entire slot, and is zero-padded. in ccp_run_xts_aes_cmd()
1186 pad = dm_offset - xts->key_len; in ccp_run_xts_aes_cmd()
1187 ret = ccp_set_dm_area(&key, pad, xts->key, 0, xts->key_len); in ccp_run_xts_aes_cmd()
1190 ret = ccp_set_dm_area(&key, dm_offset + pad, xts->key, in ccp_run_xts_aes_cmd()
1191 xts->key_len, xts->key_len); in ccp_run_xts_aes_cmd()
1198 cmd->engine_error = cmd_q->cmd_error; in ccp_run_xts_aes_cmd()
1202 /* The AES context fits in a single (32-byte) SB entry and in ccp_run_xts_aes_cmd()
1203 * for XTS is already in little endian format so no byte swapping in ccp_run_xts_aes_cmd()
1212 ret = ccp_set_dm_area(&ctx, 0, xts->iv, 0, xts->iv_len); in ccp_run_xts_aes_cmd()
1218 cmd->engine_error = cmd_q->cmd_error; in ccp_run_xts_aes_cmd()
1222 /* Prepare the input and output data workareas. For in-place in ccp_run_xts_aes_cmd()
1226 if (sg_virt(xts->src) == sg_virt(xts->dst)) in ccp_run_xts_aes_cmd()
1229 ret = ccp_init_data(&src, cmd_q, xts->src, xts->src_len, in ccp_run_xts_aes_cmd()
1238 ret = ccp_init_data(&dst, cmd_q, xts->dst, xts->src_len, in ccp_run_xts_aes_cmd()
1250 ret = cmd_q->ccp->vdata->perform->xts_aes(&op); in ccp_run_xts_aes_cmd()
1252 cmd->engine_error = cmd_q->cmd_error; in ccp_run_xts_aes_cmd()
1259 /* Retrieve the AES context - convert from LE to BE using in ccp_run_xts_aes_cmd()
1260 * 32-byte (256-bit) byteswapping in ccp_run_xts_aes_cmd()
1265 cmd->engine_error = cmd_q->cmd_error; in ccp_run_xts_aes_cmd()
1270 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE; in ccp_run_xts_aes_cmd()
1271 ccp_get_dm_area(&ctx, dm_offset, xts->iv, 0, xts->iv_len); in ccp_run_xts_aes_cmd()
1292 struct ccp_des3_engine *des3 = &cmd->u.des3; in ccp_run_des3_cmd()
1303 if (cmd_q->ccp->vdata->version < CCP_VERSION(5, 0)) in ccp_run_des3_cmd()
1304 return -EINVAL; in ccp_run_des3_cmd()
1306 if (!cmd_q->ccp->vdata->perform->des3) in ccp_run_des3_cmd()
1307 return -EINVAL; in ccp_run_des3_cmd()
1309 if (des3->key_len != DES3_EDE_KEY_SIZE) in ccp_run_des3_cmd()
1310 return -EINVAL; in ccp_run_des3_cmd()
1312 if (((des3->mode == CCP_DES3_MODE_ECB) || in ccp_run_des3_cmd()
1313 (des3->mode == CCP_DES3_MODE_CBC)) && in ccp_run_des3_cmd()
1314 (des3->src_len & (DES3_EDE_BLOCK_SIZE - 1))) in ccp_run_des3_cmd()
1315 return -EINVAL; in ccp_run_des3_cmd()
1317 if (!des3->key || !des3->src || !des3->dst) in ccp_run_des3_cmd()
1318 return -EINVAL; in ccp_run_des3_cmd()
1320 if (des3->mode != CCP_DES3_MODE_ECB) { in ccp_run_des3_cmd()
1321 if (des3->iv_len != DES3_EDE_BLOCK_SIZE) in ccp_run_des3_cmd()
1322 return -EINVAL; in ccp_run_des3_cmd()
1324 if (!des3->iv) in ccp_run_des3_cmd()
1325 return -EINVAL; in ccp_run_des3_cmd()
1328 /* Zero out all the fields of the command desc */ in ccp_run_des3_cmd()
1333 op.jobid = CCP_NEW_JOBID(cmd_q->ccp); in ccp_run_des3_cmd()
1334 op.sb_key = cmd_q->sb_key; in ccp_run_des3_cmd()
1336 op.init = (des3->mode == CCP_DES3_MODE_ECB) ? 0 : 1; in ccp_run_des3_cmd()
1337 op.u.des3.type = des3->type; in ccp_run_des3_cmd()
1338 op.u.des3.mode = des3->mode; in ccp_run_des3_cmd()
1339 op.u.des3.action = des3->action; in ccp_run_des3_cmd()
1342 * All supported key sizes fit in a single (32-byte) KSB entry and in ccp_run_des3_cmd()
1343 * (like AES) must be in little endian format. Use the 256-bit byte in ccp_run_des3_cmd()
1344 * swap passthru option to convert from big endian to little endian. in ccp_run_des3_cmd()
1357 dm_offset = CCP_SB_BYTES - des3->key_len; /* Basic offset */ in ccp_run_des3_cmd()
1359 len_singlekey = des3->key_len / 3; in ccp_run_des3_cmd()
1361 des3->key, 0, len_singlekey); in ccp_run_des3_cmd()
1365 des3->key, len_singlekey, len_singlekey); in ccp_run_des3_cmd()
1369 des3->key, 2 * len_singlekey, len_singlekey); in ccp_run_des3_cmd()
1377 cmd->engine_error = cmd_q->cmd_error; in ccp_run_des3_cmd()
1382 * The DES3 context fits in a single (32-byte) KSB entry and in ccp_run_des3_cmd()
1383 * must be in little endian format. Use the 256-bit byte swap in ccp_run_des3_cmd()
1384 * passthru option to convert from big endian to little endian. in ccp_run_des3_cmd()
1386 if (des3->mode != CCP_DES3_MODE_ECB) { in ccp_run_des3_cmd()
1387 op.sb_ctx = cmd_q->sb_ctx; in ccp_run_des3_cmd()
1396 dm_offset = CCP_SB_BYTES - des3->iv_len; in ccp_run_des3_cmd()
1397 ret = ccp_set_dm_area(&ctx, dm_offset, des3->iv, 0, in ccp_run_des3_cmd()
1398 des3->iv_len); in ccp_run_des3_cmd()
1405 cmd->engine_error = cmd_q->cmd_error; in ccp_run_des3_cmd()
1411 * Prepare the input and output data workareas. For in-place in ccp_run_des3_cmd()
1415 if (sg_virt(des3->src) == sg_virt(des3->dst)) in ccp_run_des3_cmd()
1418 ret = ccp_init_data(&src, cmd_q, des3->src, des3->src_len, in ccp_run_des3_cmd()
1427 ret = ccp_init_data(&dst, cmd_q, des3->dst, des3->src_len, in ccp_run_des3_cmd()
1446 ret = cmd_q->ccp->vdata->perform->des3(&op); in ccp_run_des3_cmd()
1448 cmd->engine_error = cmd_q->cmd_error; in ccp_run_des3_cmd()
1455 if (des3->mode != CCP_DES3_MODE_ECB) { in ccp_run_des3_cmd()
1460 cmd->engine_error = cmd_q->cmd_error; in ccp_run_des3_cmd()
1465 ccp_get_dm_area(&ctx, dm_offset, des3->iv, 0, in ccp_run_des3_cmd()
1476 if (des3->mode != CCP_DES3_MODE_ECB) in ccp_run_des3_cmd()
1488 struct ccp_sha_engine *sha = &cmd->u.sha; in ccp_run_sha_cmd()
1500 switch (sha->type) { in ccp_run_sha_cmd()
1502 if (sha->ctx_len < SHA1_DIGEST_SIZE) in ccp_run_sha_cmd()
1503 return -EINVAL; in ccp_run_sha_cmd()
1507 if (sha->ctx_len < SHA224_DIGEST_SIZE) in ccp_run_sha_cmd()
1508 return -EINVAL; in ccp_run_sha_cmd()
1512 if (sha->ctx_len < SHA256_DIGEST_SIZE) in ccp_run_sha_cmd()
1513 return -EINVAL; in ccp_run_sha_cmd()
1517 if (cmd_q->ccp->vdata->version < CCP_VERSION(4, 0) in ccp_run_sha_cmd()
1518 || sha->ctx_len < SHA384_DIGEST_SIZE) in ccp_run_sha_cmd()
1519 return -EINVAL; in ccp_run_sha_cmd()
1523 if (cmd_q->ccp->vdata->version < CCP_VERSION(4, 0) in ccp_run_sha_cmd()
1524 || sha->ctx_len < SHA512_DIGEST_SIZE) in ccp_run_sha_cmd()
1525 return -EINVAL; in ccp_run_sha_cmd()
1529 return -EINVAL; in ccp_run_sha_cmd()
1532 if (!sha->ctx) in ccp_run_sha_cmd()
1533 return -EINVAL; in ccp_run_sha_cmd()
1535 if (!sha->final && (sha->src_len & (block_size - 1))) in ccp_run_sha_cmd()
1536 return -EINVAL; in ccp_run_sha_cmd()
1538 /* The version 3 device can't handle zero-length input */ in ccp_run_sha_cmd()
1539 if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0)) { in ccp_run_sha_cmd()
1541 if (!sha->src_len) { in ccp_run_sha_cmd()
1546 if (!sha->final) in ccp_run_sha_cmd()
1552 if (sha->msg_bits) in ccp_run_sha_cmd()
1553 return -EINVAL; in ccp_run_sha_cmd()
1555 /* The CCP cannot perform zero-length sha operations in ccp_run_sha_cmd()
1561 switch (sha->type) { in ccp_run_sha_cmd()
1575 return -EINVAL; in ccp_run_sha_cmd()
1578 scatterwalk_map_and_copy((void *)sha_zero, sha->ctx, 0, in ccp_run_sha_cmd()
1586 switch (sha->type) { in ccp_run_sha_cmd()
1592 if (cmd_q->ccp->vdata->version != CCP_VERSION(3, 0)) in ccp_run_sha_cmd()
1593 ooffset = ioffset = CCP_SB_BYTES - SHA1_DIGEST_SIZE; in ccp_run_sha_cmd()
1603 if (cmd_q->ccp->vdata->version != CCP_VERSION(3, 0)) in ccp_run_sha_cmd()
1604 ooffset = CCP_SB_BYTES - SHA224_DIGEST_SIZE; in ccp_run_sha_cmd()
1621 ooffset = 2 * CCP_SB_BYTES - SHA384_DIGEST_SIZE; in ccp_run_sha_cmd()
1631 ret = -EINVAL; in ccp_run_sha_cmd()
1635 /* For zero-length plaintext the src pointer is ignored; in ccp_run_sha_cmd()
1638 if (sha->src_len && !sha->src) in ccp_run_sha_cmd()
1639 return -EINVAL; in ccp_run_sha_cmd()
1643 op.jobid = CCP_NEW_JOBID(cmd_q->ccp); in ccp_run_sha_cmd()
1644 op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */ in ccp_run_sha_cmd()
1645 op.u.sha.type = sha->type; in ccp_run_sha_cmd()
1646 op.u.sha.msg_bits = sha->msg_bits; in ccp_run_sha_cmd()
1648 /* For SHA1/224/256 the context fits in a single (32-byte) SB entry; in ccp_run_sha_cmd()
1651 * be in little endian format: use the 256-bit byte swap option. in ccp_run_sha_cmd()
1657 if (sha->first) { in ccp_run_sha_cmd()
1658 switch (sha->type) { in ccp_run_sha_cmd()
1672 ret = -EINVAL; in ccp_run_sha_cmd()
1677 ret = ccp_set_dm_area(&ctx, 0, sha->ctx, 0, in ccp_run_sha_cmd()
1686 cmd->engine_error = cmd_q->cmd_error; in ccp_run_sha_cmd()
1690 if (sha->src) { in ccp_run_sha_cmd()
1692 ret = ccp_init_data(&src, cmd_q, sha->src, sha->src_len, in ccp_run_sha_cmd()
1699 if (sha->final && !src.sg_wa.bytes_left) in ccp_run_sha_cmd()
1702 ret = cmd_q->ccp->vdata->perform->sha(&op); in ccp_run_sha_cmd()
1704 cmd->engine_error = cmd_q->cmd_error; in ccp_run_sha_cmd()
1712 ret = cmd_q->ccp->vdata->perform->sha(&op); in ccp_run_sha_cmd()
1714 cmd->engine_error = cmd_q->cmd_error; in ccp_run_sha_cmd()
1719 /* Retrieve the SHA context - convert from LE to BE using in ccp_run_sha_cmd()
1720 * 32-byte (256-bit) byteswapping to BE in ccp_run_sha_cmd()
1725 cmd->engine_error = cmd_q->cmd_error; in ccp_run_sha_cmd()
1729 if (sha->final) { in ccp_run_sha_cmd()
1731 switch (sha->type) { in ccp_run_sha_cmd()
1736 sha->ctx, 0, in ccp_run_sha_cmd()
1742 sha->ctx, LSB_ITEM_SIZE - ooffset, in ccp_run_sha_cmd()
1745 sha->ctx, 0, in ccp_run_sha_cmd()
1746 LSB_ITEM_SIZE - ooffset); in ccp_run_sha_cmd()
1749 ret = -EINVAL; in ccp_run_sha_cmd()
1754 ccp_get_dm_area(&ctx, 0, sha->ctx, 0, in ccp_run_sha_cmd()
1758 if (sha->final && sha->opad) { in ccp_run_sha_cmd()
1764 if (sha->opad_len != block_size) { in ccp_run_sha_cmd()
1765 ret = -EINVAL; in ccp_run_sha_cmd()
1771 ret = -ENOMEM; in ccp_run_sha_cmd()
1776 scatterwalk_map_and_copy(hmac_buf, sha->opad, 0, block_size, 0); in ccp_run_sha_cmd()
1777 switch (sha->type) { in ccp_run_sha_cmd()
1791 (LSB_ITEM_SIZE - ooffset), in ccp_run_sha_cmd()
1797 ret = -EINVAL; in ccp_run_sha_cmd()
1803 hmac_cmd.u.sha.type = sha->type; in ccp_run_sha_cmd()
1804 hmac_cmd.u.sha.ctx = sha->ctx; in ccp_run_sha_cmd()
1805 hmac_cmd.u.sha.ctx_len = sha->ctx_len; in ccp_run_sha_cmd()
1816 cmd->engine_error = hmac_cmd.engine_error; in ccp_run_sha_cmd()
1822 if (sha->src) in ccp_run_sha_cmd()
1834 struct ccp_rsa_engine *rsa = &cmd->u.rsa; in ccp_run_rsa_cmd()
1841 if (rsa->key_size > cmd_q->ccp->vdata->rsamax) in ccp_run_rsa_cmd()
1842 return -EINVAL; in ccp_run_rsa_cmd()
1844 if (!rsa->exp || !rsa->mod || !rsa->src || !rsa->dst) in ccp_run_rsa_cmd()
1845 return -EINVAL; in ccp_run_rsa_cmd()
1849 op.jobid = CCP_NEW_JOBID(cmd_q->ccp); in ccp_run_rsa_cmd()
1855 * must be a multiple of 256-bits). Compute o_len, i_len in bytes. in ccp_run_rsa_cmd()
1859 o_len = 32 * ((rsa->key_size + 255) / 256); in ccp_run_rsa_cmd()
1863 if (cmd_q->ccp->vdata->version < CCP_VERSION(5, 0)) { in ccp_run_rsa_cmd()
1868 op.sb_key = cmd_q->ccp->vdata->perform->sballoc(cmd_q, in ccp_run_rsa_cmd()
1871 return -EIO; in ccp_run_rsa_cmd()
1877 op.sb_key = cmd_q->sb_key; in ccp_run_rsa_cmd()
1880 /* The RSA exponent must be in little endian format. Reverse its in ccp_run_rsa_cmd()
1887 ret = ccp_reverse_set_dm_area(&exp, 0, rsa->exp, 0, rsa->exp_len); in ccp_run_rsa_cmd()
1891 if (cmd_q->ccp->vdata->version < CCP_VERSION(5, 0)) { in ccp_run_rsa_cmd()
1893 * as many 32-byte blocks as were allocated above. It's in ccp_run_rsa_cmd()
1894 * already little endian, so no further change is required. in ccp_run_rsa_cmd()
1899 cmd->engine_error = cmd_q->cmd_error; in ccp_run_rsa_cmd()
1909 * the operands must be in little endian format. Since the input in ccp_run_rsa_cmd()
1910 * is in big endian format it must be converted. in ccp_run_rsa_cmd()
1916 ret = ccp_reverse_set_dm_area(&src, 0, rsa->mod, 0, rsa->mod_len); in ccp_run_rsa_cmd()
1919 ret = ccp_reverse_set_dm_area(&src, o_len, rsa->src, 0, rsa->src_len); in ccp_run_rsa_cmd()
1936 op.u.rsa.mod_size = rsa->key_size; in ccp_run_rsa_cmd()
1939 ret = cmd_q->ccp->vdata->perform->rsa(&op); in ccp_run_rsa_cmd()
1941 cmd->engine_error = cmd_q->cmd_error; in ccp_run_rsa_cmd()
1945 ccp_reverse_get_dm_area(&dst, 0, rsa->dst, 0, rsa->mod_len); in ccp_run_rsa_cmd()
1958 cmd_q->ccp->vdata->perform->sbfree(cmd_q, op.sb_key, sb_count); in ccp_run_rsa_cmd()
1966 struct ccp_passthru_engine *pt = &cmd->u.passthru; in ccp_run_passthru_cmd()
1974 if (!pt->final && (pt->src_len & (CCP_PASSTHRU_BLOCKSIZE - 1))) in ccp_run_passthru_cmd()
1975 return -EINVAL; in ccp_run_passthru_cmd()
1977 if (!pt->src || !pt->dst) in ccp_run_passthru_cmd()
1978 return -EINVAL; in ccp_run_passthru_cmd()
1980 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) { in ccp_run_passthru_cmd()
1981 if (pt->mask_len != CCP_PASSTHRU_MASKSIZE) in ccp_run_passthru_cmd()
1982 return -EINVAL; in ccp_run_passthru_cmd()
1983 if (!pt->mask) in ccp_run_passthru_cmd()
1984 return -EINVAL; in ccp_run_passthru_cmd()
1991 op.jobid = CCP_NEW_JOBID(cmd_q->ccp); in ccp_run_passthru_cmd()
1993 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) { in ccp_run_passthru_cmd()
1995 op.sb_key = cmd_q->sb_key; in ccp_run_passthru_cmd()
2004 ret = ccp_set_dm_area(&mask, 0, pt->mask, 0, pt->mask_len); in ccp_run_passthru_cmd()
2010 cmd->engine_error = cmd_q->cmd_error; in ccp_run_passthru_cmd()
2015 /* Prepare the input and output data workareas. For in-place in ccp_run_passthru_cmd()
2019 if (sg_virt(pt->src) == sg_virt(pt->dst)) in ccp_run_passthru_cmd()
2022 ret = ccp_init_data(&src, cmd_q, pt->src, pt->src_len, in ccp_run_passthru_cmd()
2031 ret = ccp_init_data(&dst, cmd_q, pt->dst, pt->src_len, in ccp_run_passthru_cmd()
2048 ret = -EINVAL; in ccp_run_passthru_cmd()
2067 ret = cmd_q->ccp->vdata->perform->passthru(&op); in ccp_run_passthru_cmd()
2069 cmd->engine_error = cmd_q->cmd_error; in ccp_run_passthru_cmd()
2089 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) in ccp_run_passthru_cmd()
2099 struct ccp_passthru_nomap_engine *pt = &cmd->u.passthru_nomap; in ccp_run_passthru_nomap_cmd()
2104 if (!pt->final && (pt->src_len & (CCP_PASSTHRU_BLOCKSIZE - 1))) in ccp_run_passthru_nomap_cmd()
2105 return -EINVAL; in ccp_run_passthru_nomap_cmd()
2107 if (!pt->src_dma || !pt->dst_dma) in ccp_run_passthru_nomap_cmd()
2108 return -EINVAL; in ccp_run_passthru_nomap_cmd()
2110 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) { in ccp_run_passthru_nomap_cmd()
2111 if (pt->mask_len != CCP_PASSTHRU_MASKSIZE) in ccp_run_passthru_nomap_cmd()
2112 return -EINVAL; in ccp_run_passthru_nomap_cmd()
2113 if (!pt->mask) in ccp_run_passthru_nomap_cmd()
2114 return -EINVAL; in ccp_run_passthru_nomap_cmd()
2121 op.jobid = CCP_NEW_JOBID(cmd_q->ccp); in ccp_run_passthru_nomap_cmd()
2123 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) { in ccp_run_passthru_nomap_cmd()
2125 op.sb_key = cmd_q->sb_key; in ccp_run_passthru_nomap_cmd()
2127 mask.length = pt->mask_len; in ccp_run_passthru_nomap_cmd()
2128 mask.dma.address = pt->mask; in ccp_run_passthru_nomap_cmd()
2129 mask.dma.length = pt->mask_len; in ccp_run_passthru_nomap_cmd()
2134 cmd->engine_error = cmd_q->cmd_error; in ccp_run_passthru_nomap_cmd()
2144 op.src.u.dma.address = pt->src_dma; in ccp_run_passthru_nomap_cmd()
2146 op.src.u.dma.length = pt->src_len; in ccp_run_passthru_nomap_cmd()
2149 op.dst.u.dma.address = pt->dst_dma; in ccp_run_passthru_nomap_cmd()
2151 op.dst.u.dma.length = pt->src_len; in ccp_run_passthru_nomap_cmd()
2153 ret = cmd_q->ccp->vdata->perform->passthru(&op); in ccp_run_passthru_nomap_cmd()
2155 cmd->engine_error = cmd_q->cmd_error; in ccp_run_passthru_nomap_cmd()
2162 struct ccp_ecc_engine *ecc = &cmd->u.ecc; in ccp_run_ecc_mm_cmd()
2168 if (!ecc->u.mm.operand_1 || in ccp_run_ecc_mm_cmd()
2169 (ecc->u.mm.operand_1_len > CCP_ECC_MODULUS_BYTES)) in ccp_run_ecc_mm_cmd()
2170 return -EINVAL; in ccp_run_ecc_mm_cmd()
2172 if (ecc->function != CCP_ECC_FUNCTION_MINV_384BIT) in ccp_run_ecc_mm_cmd()
2173 if (!ecc->u.mm.operand_2 || in ccp_run_ecc_mm_cmd()
2174 (ecc->u.mm.operand_2_len > CCP_ECC_MODULUS_BYTES)) in ccp_run_ecc_mm_cmd()
2175 return -EINVAL; in ccp_run_ecc_mm_cmd()
2177 if (!ecc->u.mm.result || in ccp_run_ecc_mm_cmd()
2178 (ecc->u.mm.result_len < CCP_ECC_MODULUS_BYTES)) in ccp_run_ecc_mm_cmd()
2179 return -EINVAL; in ccp_run_ecc_mm_cmd()
2183 op.jobid = CCP_NEW_JOBID(cmd_q->ccp); in ccp_run_ecc_mm_cmd()
2186 * the operands must be in little endian format. Since the input in ccp_run_ecc_mm_cmd()
2187 * is in big endian format it must be converted and placed in a in ccp_run_ecc_mm_cmd()
2201 ret = ccp_reverse_set_dm_area(&src, 0, ecc->mod, 0, ecc->mod_len); in ccp_run_ecc_mm_cmd()
2207 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.mm.operand_1, 0, in ccp_run_ecc_mm_cmd()
2208 ecc->u.mm.operand_1_len); in ccp_run_ecc_mm_cmd()
2213 if (ecc->function != CCP_ECC_FUNCTION_MINV_384BIT) { in ccp_run_ecc_mm_cmd()
2215 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.mm.operand_2, 0, in ccp_run_ecc_mm_cmd()
2216 ecc->u.mm.operand_2_len); in ccp_run_ecc_mm_cmd()
2239 op.u.ecc.function = cmd->u.ecc.function; in ccp_run_ecc_mm_cmd()
2241 ret = cmd_q->ccp->vdata->perform->ecc(&op); in ccp_run_ecc_mm_cmd()
2243 cmd->engine_error = cmd_q->cmd_error; in ccp_run_ecc_mm_cmd()
2247 ecc->ecc_result = le16_to_cpup( in ccp_run_ecc_mm_cmd()
2249 if (!(ecc->ecc_result & CCP_ECC_RESULT_SUCCESS)) { in ccp_run_ecc_mm_cmd()
2250 ret = -EIO; in ccp_run_ecc_mm_cmd()
2255 ccp_reverse_get_dm_area(&dst, 0, ecc->u.mm.result, 0, in ccp_run_ecc_mm_cmd()
2269 struct ccp_ecc_engine *ecc = &cmd->u.ecc; in ccp_run_ecc_pm_cmd()
2275 if (!ecc->u.pm.point_1.x || in ccp_run_ecc_pm_cmd()
2276 (ecc->u.pm.point_1.x_len > CCP_ECC_MODULUS_BYTES) || in ccp_run_ecc_pm_cmd()
2277 !ecc->u.pm.point_1.y || in ccp_run_ecc_pm_cmd()
2278 (ecc->u.pm.point_1.y_len > CCP_ECC_MODULUS_BYTES)) in ccp_run_ecc_pm_cmd()
2279 return -EINVAL; in ccp_run_ecc_pm_cmd()
2281 if (ecc->function == CCP_ECC_FUNCTION_PADD_384BIT) { in ccp_run_ecc_pm_cmd()
2282 if (!ecc->u.pm.point_2.x || in ccp_run_ecc_pm_cmd()
2283 (ecc->u.pm.point_2.x_len > CCP_ECC_MODULUS_BYTES) || in ccp_run_ecc_pm_cmd()
2284 !ecc->u.pm.point_2.y || in ccp_run_ecc_pm_cmd()
2285 (ecc->u.pm.point_2.y_len > CCP_ECC_MODULUS_BYTES)) in ccp_run_ecc_pm_cmd()
2286 return -EINVAL; in ccp_run_ecc_pm_cmd()
2288 if (!ecc->u.pm.domain_a || in ccp_run_ecc_pm_cmd()
2289 (ecc->u.pm.domain_a_len > CCP_ECC_MODULUS_BYTES)) in ccp_run_ecc_pm_cmd()
2290 return -EINVAL; in ccp_run_ecc_pm_cmd()
2292 if (ecc->function == CCP_ECC_FUNCTION_PMUL_384BIT) in ccp_run_ecc_pm_cmd()
2293 if (!ecc->u.pm.scalar || in ccp_run_ecc_pm_cmd()
2294 (ecc->u.pm.scalar_len > CCP_ECC_MODULUS_BYTES)) in ccp_run_ecc_pm_cmd()
2295 return -EINVAL; in ccp_run_ecc_pm_cmd()
2298 if (!ecc->u.pm.result.x || in ccp_run_ecc_pm_cmd()
2299 (ecc->u.pm.result.x_len < CCP_ECC_MODULUS_BYTES) || in ccp_run_ecc_pm_cmd()
2300 !ecc->u.pm.result.y || in ccp_run_ecc_pm_cmd()
2301 (ecc->u.pm.result.y_len < CCP_ECC_MODULUS_BYTES)) in ccp_run_ecc_pm_cmd()
2302 return -EINVAL; in ccp_run_ecc_pm_cmd()
2306 op.jobid = CCP_NEW_JOBID(cmd_q->ccp); in ccp_run_ecc_pm_cmd()
2309 * the operands must be in little endian format. Since the input in ccp_run_ecc_pm_cmd()
2310 * is in big endian format it must be converted and placed in a in ccp_run_ecc_pm_cmd()
2324 ret = ccp_reverse_set_dm_area(&src, 0, ecc->mod, 0, ecc->mod_len); in ccp_run_ecc_pm_cmd()
2330 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.point_1.x, 0, in ccp_run_ecc_pm_cmd()
2331 ecc->u.pm.point_1.x_len); in ccp_run_ecc_pm_cmd()
2335 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.point_1.y, 0, in ccp_run_ecc_pm_cmd()
2336 ecc->u.pm.point_1.y_len); in ccp_run_ecc_pm_cmd()
2345 if (ecc->function == CCP_ECC_FUNCTION_PADD_384BIT) { in ccp_run_ecc_pm_cmd()
2347 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.point_2.x, 0, in ccp_run_ecc_pm_cmd()
2348 ecc->u.pm.point_2.x_len); in ccp_run_ecc_pm_cmd()
2352 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.point_2.y, 0, in ccp_run_ecc_pm_cmd()
2353 ecc->u.pm.point_2.y_len); in ccp_run_ecc_pm_cmd()
2363 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.domain_a, 0, in ccp_run_ecc_pm_cmd()
2364 ecc->u.pm.domain_a_len); in ccp_run_ecc_pm_cmd()
2369 if (ecc->function == CCP_ECC_FUNCTION_PMUL_384BIT) { in ccp_run_ecc_pm_cmd()
2372 ecc->u.pm.scalar, 0, in ccp_run_ecc_pm_cmd()
2373 ecc->u.pm.scalar_len); in ccp_run_ecc_pm_cmd()
2397 op.u.ecc.function = cmd->u.ecc.function; in ccp_run_ecc_pm_cmd()
2399 ret = cmd_q->ccp->vdata->perform->ecc(&op); in ccp_run_ecc_pm_cmd()
2401 cmd->engine_error = cmd_q->cmd_error; in ccp_run_ecc_pm_cmd()
2405 ecc->ecc_result = le16_to_cpup( in ccp_run_ecc_pm_cmd()
2407 if (!(ecc->ecc_result & CCP_ECC_RESULT_SUCCESS)) { in ccp_run_ecc_pm_cmd()
2408 ret = -EIO; in ccp_run_ecc_pm_cmd()
2418 ccp_reverse_get_dm_area(&dst, 0, ecc->u.pm.result.x, 0, in ccp_run_ecc_pm_cmd()
2421 ccp_reverse_get_dm_area(&dst, 0, ecc->u.pm.result.y, 0, in ccp_run_ecc_pm_cmd()
2439 struct ccp_ecc_engine *ecc = &cmd->u.ecc; in ccp_run_ecc_cmd()
2441 ecc->ecc_result = 0; in ccp_run_ecc_cmd()
2443 if (!ecc->mod || in ccp_run_ecc_cmd()
2444 (ecc->mod_len > CCP_ECC_MODULUS_BYTES)) in ccp_run_ecc_cmd()
2445 return -EINVAL; in ccp_run_ecc_cmd()
2447 switch (ecc->function) { in ccp_run_ecc_cmd()
2459 return -EINVAL; in ccp_run_ecc_cmd()
2467 cmd->engine_error = 0; in ccp_run_cmd()
2468 cmd_q->cmd_error = 0; in ccp_run_cmd()
2469 cmd_q->int_rcvd = 0; in ccp_run_cmd()
2470 cmd_q->free_slots = cmd_q->ccp->vdata->perform->get_free_slots(cmd_q); in ccp_run_cmd()
2472 switch (cmd->engine) { in ccp_run_cmd()
2474 switch (cmd->u.aes.mode) { in ccp_run_cmd()
2499 if (cmd->flags & CCP_CMD_PASSTHRU_NO_DMA_MAP) in ccp_run_cmd()
2508 ret = -EINVAL; in ccp_run_cmd()