Lines Matching +full:engine +full:- +full:specific
1 /* SPDX-License-Identifier: GPL-2.0 */
8 #include <linux/dma-direction.h>
70 * in Errata 4.12. It looks like that it was part of an IRQ-controller in FPGA
124 * /-----------\ 0
126 * |-----------| 0x20
128 * |-----------| 0x40
130 * |-----------| 0x40 (inplace)
132 * |-----------| 0x80
133 * | DATA IN | 16 * x (max ->max_req_size)
134 * |-----------| 0x80 (inplace operation)
135 * | DATA OUT | 16 * x (max ->max_req_size)
136 * \-----------/ SRAM size
141 * /-----------\ 0
143 * |-----------| 0x20
145 * |-----------| 0x40
147 * |-----------| 0x60
149 * |-----------| 0x80
150 * | DATA IN | 64 * x (max ->max_req_size)
151 * \-----------/ SRAM size
196 #define CESA_SA_SRAM_PAYLOAD_SIZE (cesa_dev->sram_size - \
202 #define CESA_SA_SRAM_MSK (2048 - 1)
205 #define CESA_HASH_BLOCK_SIZE_MSK (CESA_MAX_HASH_BLOCK_SIZE - 1)
208 * struct mv_cesa_sec_accel_desc - security accelerator descriptor
209 * @config: engine config
218 * Structure passed to the CESA engine to describe the crypto operation
233 * struct mv_cesa_skcipher_op_ctx - cipher operation context
245 * struct mv_cesa_hash_op_ctx - hash or hmac operation context
257 * struct mv_cesa_op_ctx - crypto operation context
284 * struct mv_cesa_tdma_desc - TDMA descriptor
322 * struct mv_cesa_sg_dma_iter - scatter-gather iterator
339 * struct mv_cesa_dma_iter - crypto operation iterator
342 * @op_len: sub-operation length (the crypto engine can only act on 2kb
354 * struct mv_cesa_tdma_chain - TDMA chain
358 * Stores a TDMA chain for a specific crypto operation.
368 * struct mv_cesa_caps - CESA device capabilities
388 * struct mv_cesa_dev_dma - DMA pools
406 * struct mv_cesa_dev - CESA device
427 * struct mv_cesa_engine - CESA engine
428 * @id: engine id
429 * @regs: engine registers
433 * @lock: engine lock
435 * @clk: engine clk
436 * @zclk: engine zclk
442 * @load: engine load counter, useful for load balancing
444 * by this engine.
445 * @complete_queue: fifo of the processed requests by the engine
447 * Structure storing CESA engine information.
472 * struct mv_cesa_req_ops - CESA request operations
474 * operation, -EINPROGRESS if it needs more steps or an error
489 * struct mv_cesa_ctx - CESA operation context
492 * Base context structure inherited by operation specific ones.
499 * struct mv_cesa_hash_ctx - CESA hash operation context
509 * struct mv_cesa_hash_ctx - CESA hmac operation context
521 * enum mv_cesa_req_type - request type definitions
531 * struct mv_cesa_req - CESA request
532 * @engine: engine associated with this request
536 struct mv_cesa_engine *engine; member
541 * struct mv_cesa_sg_std_iter - CESA scatter-gather iterator for standard
552 * struct mv_cesa_skcipher_std_req - cipher standard request
565 * struct mv_cesa_skcipher_req - cipher request
566 * @req: type specific request information
578 * struct mv_cesa_ahash_std_req - standard hash request
586 * struct mv_cesa_ahash_dma_req - DMA hash request
599 * struct mv_cesa_ahash_req - hash request
600 * @req: type specific request information
631 mv_cesa_engine_enqueue_complete_request(struct mv_cesa_engine *engine, in mv_cesa_engine_enqueue_complete_request() argument
634 list_add_tail(&req->list, &engine->complete_queue); in mv_cesa_engine_enqueue_complete_request()
638 mv_cesa_engine_dequeue_complete_request(struct mv_cesa_engine *engine) in mv_cesa_engine_dequeue_complete_request() argument
642 req = list_first_entry_or_null(&engine->complete_queue, in mv_cesa_engine_dequeue_complete_request()
646 list_del(&req->list); in mv_cesa_engine_dequeue_complete_request()
655 return req->chain.first ? CESA_DMA_REQ : CESA_STD_REQ; in mv_cesa_req_get_type()
661 op->desc.config &= cpu_to_le32(~mask); in mv_cesa_update_op_cfg()
662 op->desc.config |= cpu_to_le32(cfg); in mv_cesa_update_op_cfg()
667 return le32_to_cpu(op->desc.config); in mv_cesa_get_op_cfg()
672 op->desc.config = cpu_to_le32(cfg); in mv_cesa_set_op_cfg()
675 static inline void mv_cesa_adjust_op(struct mv_cesa_engine *engine, in mv_cesa_adjust_op() argument
678 u32 offset = engine->sram_dma & CESA_SA_SRAM_MSK; in mv_cesa_adjust_op()
680 op->desc.enc_p = CESA_SA_DESC_CRYPT_DATA(offset); in mv_cesa_adjust_op()
681 op->desc.enc_key_p = CESA_SA_DESC_CRYPT_KEY(offset); in mv_cesa_adjust_op()
682 op->desc.enc_iv = CESA_SA_DESC_CRYPT_IV(offset); in mv_cesa_adjust_op()
683 op->desc.mac_src_p &= ~CESA_SA_DESC_MAC_DATA_MSK; in mv_cesa_adjust_op()
684 op->desc.mac_src_p |= CESA_SA_DESC_MAC_DATA(offset); in mv_cesa_adjust_op()
685 op->desc.mac_digest &= ~CESA_SA_DESC_MAC_DIGEST_MSK; in mv_cesa_adjust_op()
686 op->desc.mac_digest |= CESA_SA_DESC_MAC_DIGEST(offset); in mv_cesa_adjust_op()
687 op->desc.mac_iv = CESA_SA_DESC_MAC_IV(offset); in mv_cesa_adjust_op()
692 op->desc.enc_len = cpu_to_le32(len); in mv_cesa_set_crypt_op_len()
698 op->desc.mac_src_p &= ~CESA_SA_DESC_MAC_TOTAL_LEN_MSK; in mv_cesa_set_mac_op_total_len()
699 op->desc.mac_src_p |= CESA_SA_DESC_MAC_TOTAL_LEN(len); in mv_cesa_set_mac_op_total_len()
705 op->desc.mac_digest &= ~CESA_SA_DESC_MAC_FRAG_LEN_MSK; in mv_cesa_set_mac_op_frag_len()
706 op->desc.mac_digest |= CESA_SA_DESC_MAC_FRAG_LEN(len); in mv_cesa_set_mac_op_frag_len()
709 static inline void mv_cesa_set_int_mask(struct mv_cesa_engine *engine, in mv_cesa_set_int_mask() argument
712 if (int_mask == engine->int_mask) in mv_cesa_set_int_mask()
715 writel_relaxed(int_mask, engine->regs + CESA_SA_INT_MSK); in mv_cesa_set_int_mask()
716 engine->int_mask = int_mask; in mv_cesa_set_int_mask()
719 static inline u32 mv_cesa_get_int_mask(struct mv_cesa_engine *engine) in mv_cesa_get_int_mask() argument
721 return engine->int_mask; in mv_cesa_get_int_mask()
734 mv_cesa_dequeue_req_locked(struct mv_cesa_engine *engine,
743 for (i = 0; i < cesa_dev->caps->nengines; i++) { in mv_cesa_select_engine()
744 struct mv_cesa_engine *engine = cesa_dev->engines + i; in mv_cesa_select_engine() local
745 u32 load = atomic_read(&engine->load); in mv_cesa_select_engine()
749 selected = engine; in mv_cesa_select_engine()
753 atomic_add(weight, &selected->load); in mv_cesa_select_engine()
769 if (ret == -EINPROGRESS) in mv_cesa_req_needs_cleanup()
778 if (ret == -EBUSY) in mv_cesa_req_needs_cleanup()
790 iter->len = len; in mv_cesa_req_dma_iter_init()
791 iter->op_len = min(len, CESA_SA_SRAM_PAYLOAD_SIZE); in mv_cesa_req_dma_iter_init()
792 iter->offset = 0; in mv_cesa_req_dma_iter_init()
799 iter->op_offset = 0; in mv_cesa_sg_dma_iter_init()
800 iter->offset = 0; in mv_cesa_sg_dma_iter_init()
801 iter->sg = sg; in mv_cesa_sg_dma_iter_init()
802 iter->dir = dir; in mv_cesa_sg_dma_iter_init()
809 return min(iter->op_len - sgiter->op_offset, in mv_cesa_req_dma_iter_transfer_len()
810 sg_dma_len(sgiter->sg) - sgiter->offset); in mv_cesa_req_dma_iter_transfer_len()
819 iter->offset += iter->op_len; in mv_cesa_req_dma_iter_next_op()
820 iter->op_len = min(iter->len - iter->offset, in mv_cesa_req_dma_iter_next_op()
823 return iter->op_len; in mv_cesa_req_dma_iter_next_op()
832 return -EINPROGRESS; in mv_cesa_dma_process()
835 return -EINVAL; in mv_cesa_dma_process()
841 struct mv_cesa_engine *engine);
843 void mv_cesa_tdma_chain(struct mv_cesa_engine *engine,
845 int mv_cesa_tdma_process(struct mv_cesa_engine *engine, u32 status);
874 size_t mv_cesa_sg_copy(struct mv_cesa_engine *engine,
879 static inline size_t mv_cesa_sg_copy_to_sram(struct mv_cesa_engine *engine, in mv_cesa_sg_copy_to_sram() argument
885 return mv_cesa_sg_copy(engine, sgl, nents, sram_off, buflen, skip, in mv_cesa_sg_copy_to_sram()
889 static inline size_t mv_cesa_sg_copy_from_sram(struct mv_cesa_engine *engine, in mv_cesa_sg_copy_from_sram() argument
895 return mv_cesa_sg_copy(engine, sgl, nents, sram_off, buflen, skip, in mv_cesa_sg_copy_from_sram()