Lines Matching +full:big +full:- +full:endian +full:- +full:desc

1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (C) 2013-2019 Advanced Micro Devices, Inc.
11 #include <linux/dma-mapping.h>
19 #include "ccp-dev.h"
56 #define CCP_NEW_JOBID(ccp) ((ccp->vdata->version == CCP_VERSION(3, 0)) ? \
61 return atomic_inc_return(&ccp->current_id) & CCP_JOBID_MASK; in ccp_gen_jobid()
66 if (wa->dma_count) in ccp_sg_free()
67 dma_unmap_sg(wa->dma_dev, wa->dma_sg_head, wa->nents, wa->dma_dir); in ccp_sg_free()
69 wa->dma_count = 0; in ccp_sg_free()
78 wa->sg = sg; in ccp_init_sg_workarea()
82 wa->nents = sg_nents_for_len(sg, len); in ccp_init_sg_workarea()
83 if (wa->nents < 0) in ccp_init_sg_workarea()
84 return wa->nents; in ccp_init_sg_workarea()
86 wa->bytes_left = len; in ccp_init_sg_workarea()
87 wa->sg_used = 0; in ccp_init_sg_workarea()
95 wa->dma_sg = sg; in ccp_init_sg_workarea()
96 wa->dma_sg_head = sg; in ccp_init_sg_workarea()
97 wa->dma_dev = dev; in ccp_init_sg_workarea()
98 wa->dma_dir = dma_dir; in ccp_init_sg_workarea()
99 wa->dma_count = dma_map_sg(dev, sg, wa->nents, dma_dir); in ccp_init_sg_workarea()
100 if (!wa->dma_count) in ccp_init_sg_workarea()
101 return -ENOMEM; in ccp_init_sg_workarea()
108 unsigned int nbytes = min_t(u64, len, wa->bytes_left); in ccp_update_sg_workarea()
111 if (!wa->sg) in ccp_update_sg_workarea()
114 wa->sg_used += nbytes; in ccp_update_sg_workarea()
115 wa->bytes_left -= nbytes; in ccp_update_sg_workarea()
116 if (wa->sg_used == sg_dma_len(wa->dma_sg)) { in ccp_update_sg_workarea()
118 wa->dma_sg = sg_next(wa->dma_sg); in ccp_update_sg_workarea()
121 * that have been merged, the non-DMA mapped scatterlist in ccp_update_sg_workarea()
123 * This ensures that the current non-DMA mapped entry in ccp_update_sg_workarea()
127 sg_combined_len += wa->sg->length; in ccp_update_sg_workarea()
128 wa->sg = sg_next(wa->sg); in ccp_update_sg_workarea()
129 } while (wa->sg_used > sg_combined_len); in ccp_update_sg_workarea()
131 wa->sg_used = 0; in ccp_update_sg_workarea()
137 if (wa->length <= CCP_DMAPOOL_MAX_SIZE) { in ccp_dm_free()
138 if (wa->address) in ccp_dm_free()
139 dma_pool_free(wa->dma_pool, wa->address, in ccp_dm_free()
140 wa->dma.address); in ccp_dm_free()
142 if (wa->dma.address) in ccp_dm_free()
143 dma_unmap_single(wa->dev, wa->dma.address, wa->length, in ccp_dm_free()
144 wa->dma.dir); in ccp_dm_free()
145 kfree(wa->address); in ccp_dm_free()
148 wa->address = NULL; in ccp_dm_free()
149 wa->dma.address = 0; in ccp_dm_free()
162 wa->dev = cmd_q->ccp->dev; in ccp_init_dm_workarea()
163 wa->length = len; in ccp_init_dm_workarea()
166 wa->dma_pool = cmd_q->dma_pool; in ccp_init_dm_workarea()
168 wa->address = dma_pool_zalloc(wa->dma_pool, GFP_KERNEL, in ccp_init_dm_workarea()
169 &wa->dma.address); in ccp_init_dm_workarea()
170 if (!wa->address) in ccp_init_dm_workarea()
171 return -ENOMEM; in ccp_init_dm_workarea()
173 wa->dma.length = CCP_DMAPOOL_MAX_SIZE; in ccp_init_dm_workarea()
176 wa->address = kzalloc(len, GFP_KERNEL); in ccp_init_dm_workarea()
177 if (!wa->address) in ccp_init_dm_workarea()
178 return -ENOMEM; in ccp_init_dm_workarea()
180 wa->dma.address = dma_map_single(wa->dev, wa->address, len, in ccp_init_dm_workarea()
182 if (dma_mapping_error(wa->dev, wa->dma.address)) in ccp_init_dm_workarea()
183 return -ENOMEM; in ccp_init_dm_workarea()
185 wa->dma.length = len; in ccp_init_dm_workarea()
187 wa->dma.dir = dir; in ccp_init_dm_workarea()
196 WARN_ON(!wa->address); in ccp_set_dm_area()
198 if (len > (wa->length - wa_offset)) in ccp_set_dm_area()
199 return -EINVAL; in ccp_set_dm_area()
201 scatterwalk_map_and_copy(wa->address + wa_offset, sg, sg_offset, len, in ccp_set_dm_area()
210 WARN_ON(!wa->address); in ccp_get_dm_area()
212 scatterwalk_map_and_copy(wa->address + wa_offset, sg, sg_offset, len, in ccp_get_dm_area()
229 p = wa->address + wa_offset; in ccp_reverse_set_dm_area()
230 q = p + len - 1; in ccp_reverse_set_dm_area()
236 q--; in ccp_reverse_set_dm_area()
249 p = wa->address + wa_offset; in ccp_reverse_get_dm_area()
250 q = p + len - 1; in ccp_reverse_get_dm_area()
256 q--; in ccp_reverse_get_dm_area()
264 ccp_dm_free(&data->dm_wa); in ccp_free_data()
265 ccp_sg_free(&data->sg_wa); in ccp_free_data()
277 ret = ccp_init_sg_workarea(&data->sg_wa, cmd_q->ccp->dev, sg, sg_len, in ccp_init_data()
282 ret = ccp_init_dm_workarea(&data->dm_wa, cmd_q, dm_len, dir); in ccp_init_data()
296 struct ccp_sg_workarea *sg_wa = &data->sg_wa; in ccp_queue_buf()
297 struct ccp_dm_workarea *dm_wa = &data->dm_wa; in ccp_queue_buf()
302 memset(dm_wa->address, 0, dm_wa->length); in ccp_queue_buf()
304 if (!sg_wa->sg) in ccp_queue_buf()
308 * nbytes will always be <= UINT_MAX because dm_wa->length is in ccp_queue_buf()
311 nbytes = min_t(u64, sg_wa->bytes_left, dm_wa->length); in ccp_queue_buf()
312 scatterwalk_map_and_copy(dm_wa->address, sg_wa->sg, sg_wa->sg_used, in ccp_queue_buf()
317 while (sg_wa->bytes_left && (buf_count < dm_wa->length)) { in ccp_queue_buf()
318 nbytes = min(sg_dma_len(sg_wa->dma_sg) - sg_wa->sg_used, in ccp_queue_buf()
319 dm_wa->length - buf_count); in ccp_queue_buf()
320 nbytes = min_t(u64, sg_wa->bytes_left, nbytes); in ccp_queue_buf()
350 sg_src_len = sg_dma_len(src->sg_wa.dma_sg) - src->sg_wa.sg_used; in ccp_prepare_data()
351 sg_src_len = min_t(u64, src->sg_wa.bytes_left, sg_src_len); in ccp_prepare_data()
354 sg_dst_len = sg_dma_len(dst->sg_wa.dma_sg) - dst->sg_wa.sg_used; in ccp_prepare_data()
355 sg_dst_len = min_t(u64, src->sg_wa.bytes_left, sg_dst_len); in ccp_prepare_data()
368 op->soc = 0; in ccp_prepare_data()
376 op->soc = 1; in ccp_prepare_data()
377 op->src.u.dma.address = src->dm_wa.dma.address; in ccp_prepare_data()
378 op->src.u.dma.offset = 0; in ccp_prepare_data()
379 op->src.u.dma.length = (blocksize_op) ? block_size : cp_len; in ccp_prepare_data()
384 op->src.u.dma.address = sg_dma_address(src->sg_wa.dma_sg); in ccp_prepare_data()
385 op->src.u.dma.offset = src->sg_wa.sg_used; in ccp_prepare_data()
386 op->src.u.dma.length = op_len & ~(block_size - 1); in ccp_prepare_data()
388 ccp_update_sg_workarea(&src->sg_wa, op->src.u.dma.length); in ccp_prepare_data()
397 op->soc = 1; in ccp_prepare_data()
398 op->dst.u.dma.address = dst->dm_wa.dma.address; in ccp_prepare_data()
399 op->dst.u.dma.offset = 0; in ccp_prepare_data()
400 op->dst.u.dma.length = op->src.u.dma.length; in ccp_prepare_data()
405 op->dst.u.dma.address = sg_dma_address(dst->sg_wa.dma_sg); in ccp_prepare_data()
406 op->dst.u.dma.offset = dst->sg_wa.sg_used; in ccp_prepare_data()
407 op->dst.u.dma.length = op->src.u.dma.length; in ccp_prepare_data()
415 op->init = 0; in ccp_process_data()
418 if (op->dst.u.dma.address == dst->dm_wa.dma.address) in ccp_process_data()
421 ccp_update_sg_workarea(&dst->sg_wa, in ccp_process_data()
422 op->dst.u.dma.length); in ccp_process_data()
443 op.dst.u.dma.address = wa->dma.address; in ccp_copy_to_from_sb()
444 op.dst.u.dma.length = wa->length; in ccp_copy_to_from_sb()
447 op.src.u.dma.address = wa->dma.address; in ccp_copy_to_from_sb()
448 op.src.u.dma.length = wa->length; in ccp_copy_to_from_sb()
455 return cmd_q->ccp->vdata->perform->passthru(&op); in ccp_copy_to_from_sb()
475 struct ccp_aes_engine *aes = &cmd->u.aes; in ccp_run_aes_cmac_cmd()
482 if (!((aes->key_len == AES_KEYSIZE_128) || in ccp_run_aes_cmac_cmd()
483 (aes->key_len == AES_KEYSIZE_192) || in ccp_run_aes_cmac_cmd()
484 (aes->key_len == AES_KEYSIZE_256))) in ccp_run_aes_cmac_cmd()
485 return -EINVAL; in ccp_run_aes_cmac_cmd()
487 if (aes->src_len & (AES_BLOCK_SIZE - 1)) in ccp_run_aes_cmac_cmd()
488 return -EINVAL; in ccp_run_aes_cmac_cmd()
490 if (aes->iv_len != AES_BLOCK_SIZE) in ccp_run_aes_cmac_cmd()
491 return -EINVAL; in ccp_run_aes_cmac_cmd()
493 if (!aes->key || !aes->iv || !aes->src) in ccp_run_aes_cmac_cmd()
494 return -EINVAL; in ccp_run_aes_cmac_cmd()
496 if (aes->cmac_final) { in ccp_run_aes_cmac_cmd()
497 if (aes->cmac_key_len != AES_BLOCK_SIZE) in ccp_run_aes_cmac_cmd()
498 return -EINVAL; in ccp_run_aes_cmac_cmd()
500 if (!aes->cmac_key) in ccp_run_aes_cmac_cmd()
501 return -EINVAL; in ccp_run_aes_cmac_cmd()
507 ret = -EIO; in ccp_run_aes_cmac_cmd()
510 op.jobid = CCP_NEW_JOBID(cmd_q->ccp); in ccp_run_aes_cmac_cmd()
511 op.sb_key = cmd_q->sb_key; in ccp_run_aes_cmac_cmd()
512 op.sb_ctx = cmd_q->sb_ctx; in ccp_run_aes_cmac_cmd()
514 op.u.aes.type = aes->type; in ccp_run_aes_cmac_cmd()
515 op.u.aes.mode = aes->mode; in ccp_run_aes_cmac_cmd()
516 op.u.aes.action = aes->action; in ccp_run_aes_cmac_cmd()
518 /* All supported key sizes fit in a single (32-byte) SB entry in ccp_run_aes_cmac_cmd()
519 * and must be in little endian format. Use the 256-bit byte in ccp_run_aes_cmac_cmd()
520 * swap passthru option to convert from big endian to little in ccp_run_aes_cmac_cmd()
521 * endian. in ccp_run_aes_cmac_cmd()
529 dm_offset = CCP_SB_BYTES - aes->key_len; in ccp_run_aes_cmac_cmd()
530 ret = ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len); in ccp_run_aes_cmac_cmd()
536 cmd->engine_error = cmd_q->cmd_error; in ccp_run_aes_cmac_cmd()
540 /* The AES context fits in a single (32-byte) SB entry and in ccp_run_aes_cmac_cmd()
541 * must be in little endian format. Use the 256-bit byte swap in ccp_run_aes_cmac_cmd()
542 * passthru option to convert from big endian to little endian. in ccp_run_aes_cmac_cmd()
550 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE; in ccp_run_aes_cmac_cmd()
551 ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len); in ccp_run_aes_cmac_cmd()
557 cmd->engine_error = cmd_q->cmd_error; in ccp_run_aes_cmac_cmd()
562 ret = ccp_init_data(&src, cmd_q, aes->src, aes->src_len, in ccp_run_aes_cmac_cmd()
569 if (aes->cmac_final && !src.sg_wa.bytes_left) { in ccp_run_aes_cmac_cmd()
577 cmd->engine_error = cmd_q->cmd_error; in ccp_run_aes_cmac_cmd()
581 ret = ccp_set_dm_area(&ctx, 0, aes->cmac_key, 0, in ccp_run_aes_cmac_cmd()
582 aes->cmac_key_len); in ccp_run_aes_cmac_cmd()
588 cmd->engine_error = cmd_q->cmd_error; in ccp_run_aes_cmac_cmd()
593 ret = cmd_q->ccp->vdata->perform->aes(&op); in ccp_run_aes_cmac_cmd()
595 cmd->engine_error = cmd_q->cmd_error; in ccp_run_aes_cmac_cmd()
602 /* Retrieve the AES context - convert from LE to BE using in ccp_run_aes_cmac_cmd()
603 * 32-byte (256-bit) byteswapping in ccp_run_aes_cmac_cmd()
608 cmd->engine_error = cmd_q->cmd_error; in ccp_run_aes_cmac_cmd()
613 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE; in ccp_run_aes_cmac_cmd()
614 ccp_get_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len); in ccp_run_aes_cmac_cmd()
631 struct ccp_aes_engine *aes = &cmd->u.aes; in ccp_run_aes_gcm_cmd()
649 if (!aes->iv) in ccp_run_aes_gcm_cmd()
650 return -EINVAL; in ccp_run_aes_gcm_cmd()
652 if (!((aes->key_len == AES_KEYSIZE_128) || in ccp_run_aes_gcm_cmd()
653 (aes->key_len == AES_KEYSIZE_192) || in ccp_run_aes_gcm_cmd()
654 (aes->key_len == AES_KEYSIZE_256))) in ccp_run_aes_gcm_cmd()
655 return -EINVAL; in ccp_run_aes_gcm_cmd()
657 if (!aes->key) /* Gotta have a key SGL */ in ccp_run_aes_gcm_cmd()
658 return -EINVAL; in ccp_run_aes_gcm_cmd()
661 authsize = aes->authsize ? aes->authsize : AES_BLOCK_SIZE; in ccp_run_aes_gcm_cmd()
672 return -EINVAL; in ccp_run_aes_gcm_cmd()
681 p_aad = aes->src; in ccp_run_aes_gcm_cmd()
682 p_inp = scatterwalk_ffwd(sg_inp, aes->src, aes->aad_len); in ccp_run_aes_gcm_cmd()
683 p_outp = scatterwalk_ffwd(sg_outp, aes->dst, aes->aad_len); in ccp_run_aes_gcm_cmd()
684 if (aes->action == CCP_AES_ACTION_ENCRYPT) { in ccp_run_aes_gcm_cmd()
685 ilen = aes->src_len; in ccp_run_aes_gcm_cmd()
689 ilen = aes->src_len - authsize; in ccp_run_aes_gcm_cmd()
693 jobid = CCP_NEW_JOBID(cmd_q->ccp); in ccp_run_aes_gcm_cmd()
698 op.sb_key = cmd_q->sb_key; /* Pre-allocated */ in ccp_run_aes_gcm_cmd()
699 op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */ in ccp_run_aes_gcm_cmd()
701 op.u.aes.type = aes->type; in ccp_run_aes_gcm_cmd()
710 dm_offset = CCP_SB_BYTES - aes->key_len; in ccp_run_aes_gcm_cmd()
711 ret = ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len); in ccp_run_aes_gcm_cmd()
717 cmd->engine_error = cmd_q->cmd_error; in ccp_run_aes_gcm_cmd()
731 dm_offset = CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES - aes->iv_len; in ccp_run_aes_gcm_cmd()
732 ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len); in ccp_run_aes_gcm_cmd()
739 cmd->engine_error = cmd_q->cmd_error; in ccp_run_aes_gcm_cmd()
744 if (aes->aad_len > 0) { in ccp_run_aes_gcm_cmd()
746 ret = ccp_init_data(&aad, cmd_q, p_aad, aes->aad_len, in ccp_run_aes_gcm_cmd()
758 ret = cmd_q->ccp->vdata->perform->aes(&op); in ccp_run_aes_gcm_cmd()
760 cmd->engine_error = cmd_q->cmd_error; in ccp_run_aes_gcm_cmd()
770 op.u.aes.action = aes->action; in ccp_run_aes_gcm_cmd()
802 op.u.aes.size = (nbytes * 8) - 1; in ccp_run_aes_gcm_cmd()
806 ret = cmd_q->ccp->vdata->perform->aes(&op); in ccp_run_aes_gcm_cmd()
808 cmd->engine_error = cmd_q->cmd_error; in ccp_run_aes_gcm_cmd()
821 cmd->engine_error = cmd_q->cmd_error; in ccp_run_aes_gcm_cmd()
825 ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len); in ccp_run_aes_gcm_cmd()
832 cmd->engine_error = cmd_q->cmd_error; in ccp_run_aes_gcm_cmd()
844 final[0] = cpu_to_be64(aes->aad_len * 8); in ccp_run_aes_gcm_cmd()
850 op.sb_key = cmd_q->sb_key; /* Pre-allocated */ in ccp_run_aes_gcm_cmd()
851 op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */ in ccp_run_aes_gcm_cmd()
853 op.u.aes.type = aes->type; in ccp_run_aes_gcm_cmd()
864 ret = cmd_q->ccp->vdata->perform->aes(&op); in ccp_run_aes_gcm_cmd()
868 if (aes->action == CCP_AES_ACTION_ENCRYPT) { in ccp_run_aes_gcm_cmd()
882 authsize) ? -EBADMSG : 0; in ccp_run_aes_gcm_cmd()
898 if (aes->aad_len) in ccp_run_aes_gcm_cmd()
913 struct ccp_aes_engine *aes = &cmd->u.aes; in ccp_run_aes_cmd()
921 if (!((aes->key_len == AES_KEYSIZE_128) || in ccp_run_aes_cmd()
922 (aes->key_len == AES_KEYSIZE_192) || in ccp_run_aes_cmd()
923 (aes->key_len == AES_KEYSIZE_256))) in ccp_run_aes_cmd()
924 return -EINVAL; in ccp_run_aes_cmd()
926 if (((aes->mode == CCP_AES_MODE_ECB) || in ccp_run_aes_cmd()
927 (aes->mode == CCP_AES_MODE_CBC)) && in ccp_run_aes_cmd()
928 (aes->src_len & (AES_BLOCK_SIZE - 1))) in ccp_run_aes_cmd()
929 return -EINVAL; in ccp_run_aes_cmd()
931 if (!aes->key || !aes->src || !aes->dst) in ccp_run_aes_cmd()
932 return -EINVAL; in ccp_run_aes_cmd()
934 if (aes->mode != CCP_AES_MODE_ECB) { in ccp_run_aes_cmd()
935 if (aes->iv_len != AES_BLOCK_SIZE) in ccp_run_aes_cmd()
936 return -EINVAL; in ccp_run_aes_cmd()
938 if (!aes->iv) in ccp_run_aes_cmd()
939 return -EINVAL; in ccp_run_aes_cmd()
945 ret = -EIO; in ccp_run_aes_cmd()
948 op.jobid = CCP_NEW_JOBID(cmd_q->ccp); in ccp_run_aes_cmd()
949 op.sb_key = cmd_q->sb_key; in ccp_run_aes_cmd()
950 op.sb_ctx = cmd_q->sb_ctx; in ccp_run_aes_cmd()
951 op.init = (aes->mode == CCP_AES_MODE_ECB) ? 0 : 1; in ccp_run_aes_cmd()
952 op.u.aes.type = aes->type; in ccp_run_aes_cmd()
953 op.u.aes.mode = aes->mode; in ccp_run_aes_cmd()
954 op.u.aes.action = aes->action; in ccp_run_aes_cmd()
956 /* All supported key sizes fit in a single (32-byte) SB entry in ccp_run_aes_cmd()
957 * and must be in little endian format. Use the 256-bit byte in ccp_run_aes_cmd()
958 * swap passthru option to convert from big endian to little in ccp_run_aes_cmd()
959 * endian. in ccp_run_aes_cmd()
967 dm_offset = CCP_SB_BYTES - aes->key_len; in ccp_run_aes_cmd()
968 ret = ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len); in ccp_run_aes_cmd()
974 cmd->engine_error = cmd_q->cmd_error; in ccp_run_aes_cmd()
978 /* The AES context fits in a single (32-byte) SB entry and in ccp_run_aes_cmd()
979 * must be in little endian format. Use the 256-bit byte swap in ccp_run_aes_cmd()
980 * passthru option to convert from big endian to little endian. in ccp_run_aes_cmd()
988 if (aes->mode != CCP_AES_MODE_ECB) { in ccp_run_aes_cmd()
989 /* Load the AES context - convert to LE */ in ccp_run_aes_cmd()
990 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE; in ccp_run_aes_cmd()
991 ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len); in ccp_run_aes_cmd()
997 cmd->engine_error = cmd_q->cmd_error; in ccp_run_aes_cmd()
1001 switch (aes->mode) { in ccp_run_aes_cmd()
1004 op.u.aes.size = AES_BLOCK_SIZE * BITS_PER_BYTE - 1; in ccp_run_aes_cmd()
1010 /* Prepare the input and output data workareas. For in-place in ccp_run_aes_cmd()
1014 if (sg_virt(aes->src) == sg_virt(aes->dst)) in ccp_run_aes_cmd()
1017 ret = ccp_init_data(&src, cmd_q, aes->src, aes->src_len, in ccp_run_aes_cmd()
1026 ret = ccp_init_data(&dst, cmd_q, aes->dst, aes->src_len, in ccp_run_aes_cmd()
1042 if (aes->mode == CCP_AES_MODE_ECB) in ccp_run_aes_cmd()
1046 ret = cmd_q->ccp->vdata->perform->aes(&op); in ccp_run_aes_cmd()
1048 cmd->engine_error = cmd_q->cmd_error; in ccp_run_aes_cmd()
1055 if (aes->mode != CCP_AES_MODE_ECB) { in ccp_run_aes_cmd()
1056 /* Retrieve the AES context - convert from LE to BE using in ccp_run_aes_cmd()
1057 * 32-byte (256-bit) byteswapping in ccp_run_aes_cmd()
1062 cmd->engine_error = cmd_q->cmd_error; in ccp_run_aes_cmd()
1067 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE; in ccp_run_aes_cmd()
1068 ccp_get_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len); in ccp_run_aes_cmd()
1090 struct ccp_xts_aes_engine *xts = &cmd->u.xts; in ccp_run_xts_aes_cmd()
1100 switch (xts->unit_size) { in ccp_run_xts_aes_cmd()
1118 return -EINVAL; in ccp_run_xts_aes_cmd()
1121 if (xts->key_len == AES_KEYSIZE_128) in ccp_run_xts_aes_cmd()
1123 else if (xts->key_len == AES_KEYSIZE_256) in ccp_run_xts_aes_cmd()
1126 return -EINVAL; in ccp_run_xts_aes_cmd()
1128 if (!xts->final && (xts->src_len & (AES_BLOCK_SIZE - 1))) in ccp_run_xts_aes_cmd()
1129 return -EINVAL; in ccp_run_xts_aes_cmd()
1131 if (xts->iv_len != AES_BLOCK_SIZE) in ccp_run_xts_aes_cmd()
1132 return -EINVAL; in ccp_run_xts_aes_cmd()
1134 if (!xts->key || !xts->iv || !xts->src || !xts->dst) in ccp_run_xts_aes_cmd()
1135 return -EINVAL; in ccp_run_xts_aes_cmd()
1140 ret = -EIO; in ccp_run_xts_aes_cmd()
1143 op.jobid = CCP_NEW_JOBID(cmd_q->ccp); in ccp_run_xts_aes_cmd()
1144 op.sb_key = cmd_q->sb_key; in ccp_run_xts_aes_cmd()
1145 op.sb_ctx = cmd_q->sb_ctx; in ccp_run_xts_aes_cmd()
1148 op.u.xts.action = xts->action; in ccp_run_xts_aes_cmd()
1149 op.u.xts.unit_size = xts->unit_size; in ccp_run_xts_aes_cmd()
1151 /* A version 3 device only supports 128-bit keys, which fits into a in ccp_run_xts_aes_cmd()
1152 * single SB entry. A version 5 device uses a 512-bit vector, so two in ccp_run_xts_aes_cmd()
1155 if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0)) in ccp_run_xts_aes_cmd()
1165 if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0)) { in ccp_run_xts_aes_cmd()
1166 /* All supported key sizes must be in little endian format. in ccp_run_xts_aes_cmd()
1167 * Use the 256-bit byte swap passthru option to convert from in ccp_run_xts_aes_cmd()
1168 * big endian to little endian. in ccp_run_xts_aes_cmd()
1170 dm_offset = CCP_SB_BYTES - AES_KEYSIZE_128; in ccp_run_xts_aes_cmd()
1171 ret = ccp_set_dm_area(&key, dm_offset, xts->key, 0, xts->key_len); in ccp_run_xts_aes_cmd()
1174 ret = ccp_set_dm_area(&key, 0, xts->key, xts->key_len, xts->key_len); in ccp_run_xts_aes_cmd()
1178 /* Version 5 CCPs use a 512-bit space for the key: each portion in ccp_run_xts_aes_cmd()
1179 * occupies 256 bits, or one entire slot, and is zero-padded. in ccp_run_xts_aes_cmd()
1184 pad = dm_offset - xts->key_len; in ccp_run_xts_aes_cmd()
1185 ret = ccp_set_dm_area(&key, pad, xts->key, 0, xts->key_len); in ccp_run_xts_aes_cmd()
1188 ret = ccp_set_dm_area(&key, dm_offset + pad, xts->key, in ccp_run_xts_aes_cmd()
1189 xts->key_len, xts->key_len); in ccp_run_xts_aes_cmd()
1196 cmd->engine_error = cmd_q->cmd_error; in ccp_run_xts_aes_cmd()
1200 /* The AES context fits in a single (32-byte) SB entry and in ccp_run_xts_aes_cmd()
1201 * for XTS is already in little endian format so no byte swapping in ccp_run_xts_aes_cmd()
1210 ret = ccp_set_dm_area(&ctx, 0, xts->iv, 0, xts->iv_len); in ccp_run_xts_aes_cmd()
1216 cmd->engine_error = cmd_q->cmd_error; in ccp_run_xts_aes_cmd()
1220 /* Prepare the input and output data workareas. For in-place in ccp_run_xts_aes_cmd()
1224 if (sg_virt(xts->src) == sg_virt(xts->dst)) in ccp_run_xts_aes_cmd()
1227 ret = ccp_init_data(&src, cmd_q, xts->src, xts->src_len, in ccp_run_xts_aes_cmd()
1236 ret = ccp_init_data(&dst, cmd_q, xts->dst, xts->src_len, in ccp_run_xts_aes_cmd()
1248 ret = cmd_q->ccp->vdata->perform->xts_aes(&op); in ccp_run_xts_aes_cmd()
1250 cmd->engine_error = cmd_q->cmd_error; in ccp_run_xts_aes_cmd()
1257 /* Retrieve the AES context - convert from LE to BE using in ccp_run_xts_aes_cmd()
1258 * 32-byte (256-bit) byteswapping in ccp_run_xts_aes_cmd()
1263 cmd->engine_error = cmd_q->cmd_error; in ccp_run_xts_aes_cmd()
1268 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE; in ccp_run_xts_aes_cmd()
1269 ccp_get_dm_area(&ctx, dm_offset, xts->iv, 0, xts->iv_len); in ccp_run_xts_aes_cmd()
1290 struct ccp_des3_engine *des3 = &cmd->u.des3; in ccp_run_des3_cmd()
1301 if (cmd_q->ccp->vdata->version < CCP_VERSION(5, 0)) in ccp_run_des3_cmd()
1302 return -EINVAL; in ccp_run_des3_cmd()
1304 if (!cmd_q->ccp->vdata->perform->des3) in ccp_run_des3_cmd()
1305 return -EINVAL; in ccp_run_des3_cmd()
1307 if (des3->key_len != DES3_EDE_KEY_SIZE) in ccp_run_des3_cmd()
1308 return -EINVAL; in ccp_run_des3_cmd()
1310 if (((des3->mode == CCP_DES3_MODE_ECB) || in ccp_run_des3_cmd()
1311 (des3->mode == CCP_DES3_MODE_CBC)) && in ccp_run_des3_cmd()
1312 (des3->src_len & (DES3_EDE_BLOCK_SIZE - 1))) in ccp_run_des3_cmd()
1313 return -EINVAL; in ccp_run_des3_cmd()
1315 if (!des3->key || !des3->src || !des3->dst) in ccp_run_des3_cmd()
1316 return -EINVAL; in ccp_run_des3_cmd()
1318 if (des3->mode != CCP_DES3_MODE_ECB) { in ccp_run_des3_cmd()
1319 if (des3->iv_len != DES3_EDE_BLOCK_SIZE) in ccp_run_des3_cmd()
1320 return -EINVAL; in ccp_run_des3_cmd()
1322 if (!des3->iv) in ccp_run_des3_cmd()
1323 return -EINVAL; in ccp_run_des3_cmd()
1326 /* Zero out all the fields of the command desc */ in ccp_run_des3_cmd()
1331 op.jobid = CCP_NEW_JOBID(cmd_q->ccp); in ccp_run_des3_cmd()
1332 op.sb_key = cmd_q->sb_key; in ccp_run_des3_cmd()
1334 op.init = (des3->mode == CCP_DES3_MODE_ECB) ? 0 : 1; in ccp_run_des3_cmd()
1335 op.u.des3.type = des3->type; in ccp_run_des3_cmd()
1336 op.u.des3.mode = des3->mode; in ccp_run_des3_cmd()
1337 op.u.des3.action = des3->action; in ccp_run_des3_cmd()
1340 * All supported key sizes fit in a single (32-byte) KSB entry and in ccp_run_des3_cmd()
1341 * (like AES) must be in little endian format. Use the 256-bit byte in ccp_run_des3_cmd()
1342 * swap passthru option to convert from big endian to little endian. in ccp_run_des3_cmd()
1355 dm_offset = CCP_SB_BYTES - des3->key_len; /* Basic offset */ in ccp_run_des3_cmd()
1357 len_singlekey = des3->key_len / 3; in ccp_run_des3_cmd()
1359 des3->key, 0, len_singlekey); in ccp_run_des3_cmd()
1363 des3->key, len_singlekey, len_singlekey); in ccp_run_des3_cmd()
1367 des3->key, 2 * len_singlekey, len_singlekey); in ccp_run_des3_cmd()
1375 cmd->engine_error = cmd_q->cmd_error; in ccp_run_des3_cmd()
1380 * The DES3 context fits in a single (32-byte) KSB entry and in ccp_run_des3_cmd()
1381 * must be in little endian format. Use the 256-bit byte swap in ccp_run_des3_cmd()
1382 * passthru option to convert from big endian to little endian. in ccp_run_des3_cmd()
1384 if (des3->mode != CCP_DES3_MODE_ECB) { in ccp_run_des3_cmd()
1385 op.sb_ctx = cmd_q->sb_ctx; in ccp_run_des3_cmd()
1394 dm_offset = CCP_SB_BYTES - des3->iv_len; in ccp_run_des3_cmd()
1395 ret = ccp_set_dm_area(&ctx, dm_offset, des3->iv, 0, in ccp_run_des3_cmd()
1396 des3->iv_len); in ccp_run_des3_cmd()
1403 cmd->engine_error = cmd_q->cmd_error; in ccp_run_des3_cmd()
1409 * Prepare the input and output data workareas. For in-place in ccp_run_des3_cmd()
1413 if (sg_virt(des3->src) == sg_virt(des3->dst)) in ccp_run_des3_cmd()
1416 ret = ccp_init_data(&src, cmd_q, des3->src, des3->src_len, in ccp_run_des3_cmd()
1425 ret = ccp_init_data(&dst, cmd_q, des3->dst, des3->src_len, in ccp_run_des3_cmd()
1444 ret = cmd_q->ccp->vdata->perform->des3(&op); in ccp_run_des3_cmd()
1446 cmd->engine_error = cmd_q->cmd_error; in ccp_run_des3_cmd()
1453 if (des3->mode != CCP_DES3_MODE_ECB) { in ccp_run_des3_cmd()
1458 cmd->engine_error = cmd_q->cmd_error; in ccp_run_des3_cmd()
1463 ccp_get_dm_area(&ctx, dm_offset, des3->iv, 0, in ccp_run_des3_cmd()
1474 if (des3->mode != CCP_DES3_MODE_ECB) in ccp_run_des3_cmd()
1486 struct ccp_sha_engine *sha = &cmd->u.sha; in ccp_run_sha_cmd()
1498 switch (sha->type) { in ccp_run_sha_cmd()
1500 if (sha->ctx_len < SHA1_DIGEST_SIZE) in ccp_run_sha_cmd()
1501 return -EINVAL; in ccp_run_sha_cmd()
1505 if (sha->ctx_len < SHA224_DIGEST_SIZE) in ccp_run_sha_cmd()
1506 return -EINVAL; in ccp_run_sha_cmd()
1510 if (sha->ctx_len < SHA256_DIGEST_SIZE) in ccp_run_sha_cmd()
1511 return -EINVAL; in ccp_run_sha_cmd()
1515 if (cmd_q->ccp->vdata->version < CCP_VERSION(4, 0) in ccp_run_sha_cmd()
1516 || sha->ctx_len < SHA384_DIGEST_SIZE) in ccp_run_sha_cmd()
1517 return -EINVAL; in ccp_run_sha_cmd()
1521 if (cmd_q->ccp->vdata->version < CCP_VERSION(4, 0) in ccp_run_sha_cmd()
1522 || sha->ctx_len < SHA512_DIGEST_SIZE) in ccp_run_sha_cmd()
1523 return -EINVAL; in ccp_run_sha_cmd()
1527 return -EINVAL; in ccp_run_sha_cmd()
1530 if (!sha->ctx) in ccp_run_sha_cmd()
1531 return -EINVAL; in ccp_run_sha_cmd()
1533 if (!sha->final && (sha->src_len & (block_size - 1))) in ccp_run_sha_cmd()
1534 return -EINVAL; in ccp_run_sha_cmd()
1536 /* The version 3 device can't handle zero-length input */ in ccp_run_sha_cmd()
1537 if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0)) { in ccp_run_sha_cmd()
1539 if (!sha->src_len) { in ccp_run_sha_cmd()
1544 if (!sha->final) in ccp_run_sha_cmd()
1550 if (sha->msg_bits) in ccp_run_sha_cmd()
1551 return -EINVAL; in ccp_run_sha_cmd()
1553 /* The CCP cannot perform zero-length sha operations in ccp_run_sha_cmd()
1559 switch (sha->type) { in ccp_run_sha_cmd()
1573 return -EINVAL; in ccp_run_sha_cmd()
1576 scatterwalk_map_and_copy((void *)sha_zero, sha->ctx, 0, in ccp_run_sha_cmd()
1584 switch (sha->type) { in ccp_run_sha_cmd()
1590 if (cmd_q->ccp->vdata->version != CCP_VERSION(3, 0)) in ccp_run_sha_cmd()
1591 ooffset = ioffset = CCP_SB_BYTES - SHA1_DIGEST_SIZE; in ccp_run_sha_cmd()
1601 if (cmd_q->ccp->vdata->version != CCP_VERSION(3, 0)) in ccp_run_sha_cmd()
1602 ooffset = CCP_SB_BYTES - SHA224_DIGEST_SIZE; in ccp_run_sha_cmd()
1619 ooffset = 2 * CCP_SB_BYTES - SHA384_DIGEST_SIZE; in ccp_run_sha_cmd()
1629 ret = -EINVAL; in ccp_run_sha_cmd()
1633 /* For zero-length plaintext the src pointer is ignored; in ccp_run_sha_cmd()
1636 if (sha->src_len && !sha->src) in ccp_run_sha_cmd()
1637 return -EINVAL; in ccp_run_sha_cmd()
1641 op.jobid = CCP_NEW_JOBID(cmd_q->ccp); in ccp_run_sha_cmd()
1642 op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */ in ccp_run_sha_cmd()
1643 op.u.sha.type = sha->type; in ccp_run_sha_cmd()
1644 op.u.sha.msg_bits = sha->msg_bits; in ccp_run_sha_cmd()
1646 /* For SHA1/224/256 the context fits in a single (32-byte) SB entry; in ccp_run_sha_cmd()
1649 * be in little endian format: use the 256-bit byte swap option. in ccp_run_sha_cmd()
1655 if (sha->first) { in ccp_run_sha_cmd()
1656 switch (sha->type) { in ccp_run_sha_cmd()
1670 ret = -EINVAL; in ccp_run_sha_cmd()
1675 ret = ccp_set_dm_area(&ctx, 0, sha->ctx, 0, in ccp_run_sha_cmd()
1684 cmd->engine_error = cmd_q->cmd_error; in ccp_run_sha_cmd()
1688 if (sha->src) { in ccp_run_sha_cmd()
1690 ret = ccp_init_data(&src, cmd_q, sha->src, sha->src_len, in ccp_run_sha_cmd()
1697 if (sha->final && !src.sg_wa.bytes_left) in ccp_run_sha_cmd()
1700 ret = cmd_q->ccp->vdata->perform->sha(&op); in ccp_run_sha_cmd()
1702 cmd->engine_error = cmd_q->cmd_error; in ccp_run_sha_cmd()
1710 ret = cmd_q->ccp->vdata->perform->sha(&op); in ccp_run_sha_cmd()
1712 cmd->engine_error = cmd_q->cmd_error; in ccp_run_sha_cmd()
1717 /* Retrieve the SHA context - convert from LE to BE using in ccp_run_sha_cmd()
1718 * 32-byte (256-bit) byteswapping to BE in ccp_run_sha_cmd()
1723 cmd->engine_error = cmd_q->cmd_error; in ccp_run_sha_cmd()
1727 if (sha->final) { in ccp_run_sha_cmd()
1729 switch (sha->type) { in ccp_run_sha_cmd()
1734 sha->ctx, 0, in ccp_run_sha_cmd()
1740 sha->ctx, LSB_ITEM_SIZE - ooffset, in ccp_run_sha_cmd()
1743 sha->ctx, 0, in ccp_run_sha_cmd()
1744 LSB_ITEM_SIZE - ooffset); in ccp_run_sha_cmd()
1747 ret = -EINVAL; in ccp_run_sha_cmd()
1752 ccp_get_dm_area(&ctx, 0, sha->ctx, 0, in ccp_run_sha_cmd()
1756 if (sha->final && sha->opad) { in ccp_run_sha_cmd()
1762 if (sha->opad_len != block_size) { in ccp_run_sha_cmd()
1763 ret = -EINVAL; in ccp_run_sha_cmd()
1769 ret = -ENOMEM; in ccp_run_sha_cmd()
1774 scatterwalk_map_and_copy(hmac_buf, sha->opad, 0, block_size, 0); in ccp_run_sha_cmd()
1775 switch (sha->type) { in ccp_run_sha_cmd()
1789 (LSB_ITEM_SIZE - ooffset), in ccp_run_sha_cmd()
1795 ret = -EINVAL; in ccp_run_sha_cmd()
1801 hmac_cmd.u.sha.type = sha->type; in ccp_run_sha_cmd()
1802 hmac_cmd.u.sha.ctx = sha->ctx; in ccp_run_sha_cmd()
1803 hmac_cmd.u.sha.ctx_len = sha->ctx_len; in ccp_run_sha_cmd()
1814 cmd->engine_error = hmac_cmd.engine_error; in ccp_run_sha_cmd()
1820 if (sha->src) in ccp_run_sha_cmd()
1832 struct ccp_rsa_engine *rsa = &cmd->u.rsa; in ccp_run_rsa_cmd()
1839 if (rsa->key_size > cmd_q->ccp->vdata->rsamax) in ccp_run_rsa_cmd()
1840 return -EINVAL; in ccp_run_rsa_cmd()
1842 if (!rsa->exp || !rsa->mod || !rsa->src || !rsa->dst) in ccp_run_rsa_cmd()
1843 return -EINVAL; in ccp_run_rsa_cmd()
1847 op.jobid = CCP_NEW_JOBID(cmd_q->ccp); in ccp_run_rsa_cmd()
1853 * must be a multiple of 256-bits). Compute o_len, i_len in bytes. in ccp_run_rsa_cmd()
1857 o_len = 32 * ((rsa->key_size + 255) / 256); in ccp_run_rsa_cmd()
1861 if (cmd_q->ccp->vdata->version < CCP_VERSION(5, 0)) { in ccp_run_rsa_cmd()
1866 op.sb_key = cmd_q->ccp->vdata->perform->sballoc(cmd_q, in ccp_run_rsa_cmd()
1869 return -EIO; in ccp_run_rsa_cmd()
1875 op.sb_key = cmd_q->sb_key; in ccp_run_rsa_cmd()
1878 /* The RSA exponent must be in little endian format. Reverse its in ccp_run_rsa_cmd()
1885 ret = ccp_reverse_set_dm_area(&exp, 0, rsa->exp, 0, rsa->exp_len); in ccp_run_rsa_cmd()
1889 if (cmd_q->ccp->vdata->version < CCP_VERSION(5, 0)) { in ccp_run_rsa_cmd()
1891 * as many 32-byte blocks as were allocated above. It's in ccp_run_rsa_cmd()
1892 * already little endian, so no further change is required. in ccp_run_rsa_cmd()
1897 cmd->engine_error = cmd_q->cmd_error; in ccp_run_rsa_cmd()
1907 * the operands must be in little endian format. Since the input in ccp_run_rsa_cmd()
1908 * is in big endian format it must be converted. in ccp_run_rsa_cmd()
1914 ret = ccp_reverse_set_dm_area(&src, 0, rsa->mod, 0, rsa->mod_len); in ccp_run_rsa_cmd()
1917 ret = ccp_reverse_set_dm_area(&src, o_len, rsa->src, 0, rsa->src_len); in ccp_run_rsa_cmd()
1934 op.u.rsa.mod_size = rsa->key_size; in ccp_run_rsa_cmd()
1937 ret = cmd_q->ccp->vdata->perform->rsa(&op); in ccp_run_rsa_cmd()
1939 cmd->engine_error = cmd_q->cmd_error; in ccp_run_rsa_cmd()
1943 ccp_reverse_get_dm_area(&dst, 0, rsa->dst, 0, rsa->mod_len); in ccp_run_rsa_cmd()
1956 cmd_q->ccp->vdata->perform->sbfree(cmd_q, op.sb_key, sb_count); in ccp_run_rsa_cmd()
1964 struct ccp_passthru_engine *pt = &cmd->u.passthru; in ccp_run_passthru_cmd()
1972 if (!pt->final && (pt->src_len & (CCP_PASSTHRU_BLOCKSIZE - 1))) in ccp_run_passthru_cmd()
1973 return -EINVAL; in ccp_run_passthru_cmd()
1975 if (!pt->src || !pt->dst) in ccp_run_passthru_cmd()
1976 return -EINVAL; in ccp_run_passthru_cmd()
1978 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) { in ccp_run_passthru_cmd()
1979 if (pt->mask_len != CCP_PASSTHRU_MASKSIZE) in ccp_run_passthru_cmd()
1980 return -EINVAL; in ccp_run_passthru_cmd()
1981 if (!pt->mask) in ccp_run_passthru_cmd()
1982 return -EINVAL; in ccp_run_passthru_cmd()
1989 op.jobid = CCP_NEW_JOBID(cmd_q->ccp); in ccp_run_passthru_cmd()
1991 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) { in ccp_run_passthru_cmd()
1993 op.sb_key = cmd_q->sb_key; in ccp_run_passthru_cmd()
2002 ret = ccp_set_dm_area(&mask, 0, pt->mask, 0, pt->mask_len); in ccp_run_passthru_cmd()
2008 cmd->engine_error = cmd_q->cmd_error; in ccp_run_passthru_cmd()
2013 /* Prepare the input and output data workareas. For in-place in ccp_run_passthru_cmd()
2017 if (sg_virt(pt->src) == sg_virt(pt->dst)) in ccp_run_passthru_cmd()
2020 ret = ccp_init_data(&src, cmd_q, pt->src, pt->src_len, in ccp_run_passthru_cmd()
2029 ret = ccp_init_data(&dst, cmd_q, pt->dst, pt->src_len, in ccp_run_passthru_cmd()
2046 ret = -EINVAL; in ccp_run_passthru_cmd()
2065 ret = cmd_q->ccp->vdata->perform->passthru(&op); in ccp_run_passthru_cmd()
2067 cmd->engine_error = cmd_q->cmd_error; in ccp_run_passthru_cmd()
2087 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) in ccp_run_passthru_cmd()
2097 struct ccp_passthru_nomap_engine *pt = &cmd->u.passthru_nomap; in ccp_run_passthru_nomap_cmd()
2102 if (!pt->final && (pt->src_len & (CCP_PASSTHRU_BLOCKSIZE - 1))) in ccp_run_passthru_nomap_cmd()
2103 return -EINVAL; in ccp_run_passthru_nomap_cmd()
2105 if (!pt->src_dma || !pt->dst_dma) in ccp_run_passthru_nomap_cmd()
2106 return -EINVAL; in ccp_run_passthru_nomap_cmd()
2108 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) { in ccp_run_passthru_nomap_cmd()
2109 if (pt->mask_len != CCP_PASSTHRU_MASKSIZE) in ccp_run_passthru_nomap_cmd()
2110 return -EINVAL; in ccp_run_passthru_nomap_cmd()
2111 if (!pt->mask) in ccp_run_passthru_nomap_cmd()
2112 return -EINVAL; in ccp_run_passthru_nomap_cmd()
2119 op.jobid = CCP_NEW_JOBID(cmd_q->ccp); in ccp_run_passthru_nomap_cmd()
2121 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) { in ccp_run_passthru_nomap_cmd()
2123 op.sb_key = cmd_q->sb_key; in ccp_run_passthru_nomap_cmd()
2125 mask.length = pt->mask_len; in ccp_run_passthru_nomap_cmd()
2126 mask.dma.address = pt->mask; in ccp_run_passthru_nomap_cmd()
2127 mask.dma.length = pt->mask_len; in ccp_run_passthru_nomap_cmd()
2132 cmd->engine_error = cmd_q->cmd_error; in ccp_run_passthru_nomap_cmd()
2142 op.src.u.dma.address = pt->src_dma; in ccp_run_passthru_nomap_cmd()
2144 op.src.u.dma.length = pt->src_len; in ccp_run_passthru_nomap_cmd()
2147 op.dst.u.dma.address = pt->dst_dma; in ccp_run_passthru_nomap_cmd()
2149 op.dst.u.dma.length = pt->src_len; in ccp_run_passthru_nomap_cmd()
2151 ret = cmd_q->ccp->vdata->perform->passthru(&op); in ccp_run_passthru_nomap_cmd()
2153 cmd->engine_error = cmd_q->cmd_error; in ccp_run_passthru_nomap_cmd()
2160 struct ccp_ecc_engine *ecc = &cmd->u.ecc; in ccp_run_ecc_mm_cmd()
2166 if (!ecc->u.mm.operand_1 || in ccp_run_ecc_mm_cmd()
2167 (ecc->u.mm.operand_1_len > CCP_ECC_MODULUS_BYTES)) in ccp_run_ecc_mm_cmd()
2168 return -EINVAL; in ccp_run_ecc_mm_cmd()
2170 if (ecc->function != CCP_ECC_FUNCTION_MINV_384BIT) in ccp_run_ecc_mm_cmd()
2171 if (!ecc->u.mm.operand_2 || in ccp_run_ecc_mm_cmd()
2172 (ecc->u.mm.operand_2_len > CCP_ECC_MODULUS_BYTES)) in ccp_run_ecc_mm_cmd()
2173 return -EINVAL; in ccp_run_ecc_mm_cmd()
2175 if (!ecc->u.mm.result || in ccp_run_ecc_mm_cmd()
2176 (ecc->u.mm.result_len < CCP_ECC_MODULUS_BYTES)) in ccp_run_ecc_mm_cmd()
2177 return -EINVAL; in ccp_run_ecc_mm_cmd()
2181 op.jobid = CCP_NEW_JOBID(cmd_q->ccp); in ccp_run_ecc_mm_cmd()
2184 * the operands must be in little endian format. Since the input in ccp_run_ecc_mm_cmd()
2185 * is in big endian format it must be converted and placed in a in ccp_run_ecc_mm_cmd()
2199 ret = ccp_reverse_set_dm_area(&src, 0, ecc->mod, 0, ecc->mod_len); in ccp_run_ecc_mm_cmd()
2205 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.mm.operand_1, 0, in ccp_run_ecc_mm_cmd()
2206 ecc->u.mm.operand_1_len); in ccp_run_ecc_mm_cmd()
2211 if (ecc->function != CCP_ECC_FUNCTION_MINV_384BIT) { in ccp_run_ecc_mm_cmd()
2213 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.mm.operand_2, 0, in ccp_run_ecc_mm_cmd()
2214 ecc->u.mm.operand_2_len); in ccp_run_ecc_mm_cmd()
2237 op.u.ecc.function = cmd->u.ecc.function; in ccp_run_ecc_mm_cmd()
2239 ret = cmd_q->ccp->vdata->perform->ecc(&op); in ccp_run_ecc_mm_cmd()
2241 cmd->engine_error = cmd_q->cmd_error; in ccp_run_ecc_mm_cmd()
2245 ecc->ecc_result = le16_to_cpup( in ccp_run_ecc_mm_cmd()
2247 if (!(ecc->ecc_result & CCP_ECC_RESULT_SUCCESS)) { in ccp_run_ecc_mm_cmd()
2248 ret = -EIO; in ccp_run_ecc_mm_cmd()
2253 ccp_reverse_get_dm_area(&dst, 0, ecc->u.mm.result, 0, in ccp_run_ecc_mm_cmd()
2267 struct ccp_ecc_engine *ecc = &cmd->u.ecc; in ccp_run_ecc_pm_cmd()
2273 if (!ecc->u.pm.point_1.x || in ccp_run_ecc_pm_cmd()
2274 (ecc->u.pm.point_1.x_len > CCP_ECC_MODULUS_BYTES) || in ccp_run_ecc_pm_cmd()
2275 !ecc->u.pm.point_1.y || in ccp_run_ecc_pm_cmd()
2276 (ecc->u.pm.point_1.y_len > CCP_ECC_MODULUS_BYTES)) in ccp_run_ecc_pm_cmd()
2277 return -EINVAL; in ccp_run_ecc_pm_cmd()
2279 if (ecc->function == CCP_ECC_FUNCTION_PADD_384BIT) { in ccp_run_ecc_pm_cmd()
2280 if (!ecc->u.pm.point_2.x || in ccp_run_ecc_pm_cmd()
2281 (ecc->u.pm.point_2.x_len > CCP_ECC_MODULUS_BYTES) || in ccp_run_ecc_pm_cmd()
2282 !ecc->u.pm.point_2.y || in ccp_run_ecc_pm_cmd()
2283 (ecc->u.pm.point_2.y_len > CCP_ECC_MODULUS_BYTES)) in ccp_run_ecc_pm_cmd()
2284 return -EINVAL; in ccp_run_ecc_pm_cmd()
2286 if (!ecc->u.pm.domain_a || in ccp_run_ecc_pm_cmd()
2287 (ecc->u.pm.domain_a_len > CCP_ECC_MODULUS_BYTES)) in ccp_run_ecc_pm_cmd()
2288 return -EINVAL; in ccp_run_ecc_pm_cmd()
2290 if (ecc->function == CCP_ECC_FUNCTION_PMUL_384BIT) in ccp_run_ecc_pm_cmd()
2291 if (!ecc->u.pm.scalar || in ccp_run_ecc_pm_cmd()
2292 (ecc->u.pm.scalar_len > CCP_ECC_MODULUS_BYTES)) in ccp_run_ecc_pm_cmd()
2293 return -EINVAL; in ccp_run_ecc_pm_cmd()
2296 if (!ecc->u.pm.result.x || in ccp_run_ecc_pm_cmd()
2297 (ecc->u.pm.result.x_len < CCP_ECC_MODULUS_BYTES) || in ccp_run_ecc_pm_cmd()
2298 !ecc->u.pm.result.y || in ccp_run_ecc_pm_cmd()
2299 (ecc->u.pm.result.y_len < CCP_ECC_MODULUS_BYTES)) in ccp_run_ecc_pm_cmd()
2300 return -EINVAL; in ccp_run_ecc_pm_cmd()
2304 op.jobid = CCP_NEW_JOBID(cmd_q->ccp); in ccp_run_ecc_pm_cmd()
2307 * the operands must be in little endian format. Since the input in ccp_run_ecc_pm_cmd()
2308 * is in big endian format it must be converted and placed in a in ccp_run_ecc_pm_cmd()
2322 ret = ccp_reverse_set_dm_area(&src, 0, ecc->mod, 0, ecc->mod_len); in ccp_run_ecc_pm_cmd()
2328 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.point_1.x, 0, in ccp_run_ecc_pm_cmd()
2329 ecc->u.pm.point_1.x_len); in ccp_run_ecc_pm_cmd()
2333 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.point_1.y, 0, in ccp_run_ecc_pm_cmd()
2334 ecc->u.pm.point_1.y_len); in ccp_run_ecc_pm_cmd()
2343 if (ecc->function == CCP_ECC_FUNCTION_PADD_384BIT) { in ccp_run_ecc_pm_cmd()
2345 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.point_2.x, 0, in ccp_run_ecc_pm_cmd()
2346 ecc->u.pm.point_2.x_len); in ccp_run_ecc_pm_cmd()
2350 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.point_2.y, 0, in ccp_run_ecc_pm_cmd()
2351 ecc->u.pm.point_2.y_len); in ccp_run_ecc_pm_cmd()
2361 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.domain_a, 0, in ccp_run_ecc_pm_cmd()
2362 ecc->u.pm.domain_a_len); in ccp_run_ecc_pm_cmd()
2367 if (ecc->function == CCP_ECC_FUNCTION_PMUL_384BIT) { in ccp_run_ecc_pm_cmd()
2370 ecc->u.pm.scalar, 0, in ccp_run_ecc_pm_cmd()
2371 ecc->u.pm.scalar_len); in ccp_run_ecc_pm_cmd()
2395 op.u.ecc.function = cmd->u.ecc.function; in ccp_run_ecc_pm_cmd()
2397 ret = cmd_q->ccp->vdata->perform->ecc(&op); in ccp_run_ecc_pm_cmd()
2399 cmd->engine_error = cmd_q->cmd_error; in ccp_run_ecc_pm_cmd()
2403 ecc->ecc_result = le16_to_cpup( in ccp_run_ecc_pm_cmd()
2405 if (!(ecc->ecc_result & CCP_ECC_RESULT_SUCCESS)) { in ccp_run_ecc_pm_cmd()
2406 ret = -EIO; in ccp_run_ecc_pm_cmd()
2416 ccp_reverse_get_dm_area(&dst, 0, ecc->u.pm.result.x, 0, in ccp_run_ecc_pm_cmd()
2419 ccp_reverse_get_dm_area(&dst, 0, ecc->u.pm.result.y, 0, in ccp_run_ecc_pm_cmd()
2438 struct ccp_ecc_engine *ecc = &cmd->u.ecc; in ccp_run_ecc_cmd()
2440 ecc->ecc_result = 0; in ccp_run_ecc_cmd()
2442 if (!ecc->mod || in ccp_run_ecc_cmd()
2443 (ecc->mod_len > CCP_ECC_MODULUS_BYTES)) in ccp_run_ecc_cmd()
2444 return -EINVAL; in ccp_run_ecc_cmd()
2446 switch (ecc->function) { in ccp_run_ecc_cmd()
2458 return -EINVAL; in ccp_run_ecc_cmd()
2466 cmd->engine_error = 0; in ccp_run_cmd()
2467 cmd_q->cmd_error = 0; in ccp_run_cmd()
2468 cmd_q->int_rcvd = 0; in ccp_run_cmd()
2469 cmd_q->free_slots = cmd_q->ccp->vdata->perform->get_free_slots(cmd_q); in ccp_run_cmd()
2471 switch (cmd->engine) { in ccp_run_cmd()
2473 switch (cmd->u.aes.mode) { in ccp_run_cmd()
2498 if (cmd->flags & CCP_CMD_PASSTHRU_NO_DMA_MAP) in ccp_run_cmd()
2507 ret = -EINVAL; in ccp_run_cmd()