1 /*
2 * Cipher algorithms supported by the CESA: DES, 3DES and AES.
3 *
4 * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
5 * Author: Arnaud Ebalard <arno@natisbad.org>
6 *
7 * This work is based on an initial version written by
8 * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License version 2 as published
12 * by the Free Software Foundation.
13 */
14
15 #include <crypto/aes.h>
16 #include <crypto/des.h>
17
18 #include "cesa.h"
19
20 struct mv_cesa_des_ctx {
21 struct mv_cesa_ctx base;
22 u8 key[DES_KEY_SIZE];
23 };
24
25 struct mv_cesa_des3_ctx {
26 struct mv_cesa_ctx base;
27 u8 key[DES3_EDE_KEY_SIZE];
28 };
29
30 struct mv_cesa_aes_ctx {
31 struct mv_cesa_ctx base;
32 struct crypto_aes_ctx aes;
33 };
34
35 struct mv_cesa_skcipher_dma_iter {
36 struct mv_cesa_dma_iter base;
37 struct mv_cesa_sg_dma_iter src;
38 struct mv_cesa_sg_dma_iter dst;
39 };
40
41 static inline void
mv_cesa_skcipher_req_iter_init(struct mv_cesa_skcipher_dma_iter * iter,struct skcipher_request * req)42 mv_cesa_skcipher_req_iter_init(struct mv_cesa_skcipher_dma_iter *iter,
43 struct skcipher_request *req)
44 {
45 mv_cesa_req_dma_iter_init(&iter->base, req->cryptlen);
46 mv_cesa_sg_dma_iter_init(&iter->src, req->src, DMA_TO_DEVICE);
47 mv_cesa_sg_dma_iter_init(&iter->dst, req->dst, DMA_FROM_DEVICE);
48 }
49
50 static inline bool
mv_cesa_skcipher_req_iter_next_op(struct mv_cesa_skcipher_dma_iter * iter)51 mv_cesa_skcipher_req_iter_next_op(struct mv_cesa_skcipher_dma_iter *iter)
52 {
53 iter->src.op_offset = 0;
54 iter->dst.op_offset = 0;
55
56 return mv_cesa_req_dma_iter_next_op(&iter->base);
57 }
58
59 static inline void
mv_cesa_skcipher_dma_cleanup(struct skcipher_request * req)60 mv_cesa_skcipher_dma_cleanup(struct skcipher_request *req)
61 {
62 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
63
64 if (req->dst != req->src) {
65 dma_unmap_sg(cesa_dev->dev, req->dst, creq->dst_nents,
66 DMA_FROM_DEVICE);
67 dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents,
68 DMA_TO_DEVICE);
69 } else {
70 dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents,
71 DMA_BIDIRECTIONAL);
72 }
73 mv_cesa_dma_cleanup(&creq->base);
74 }
75
mv_cesa_skcipher_cleanup(struct skcipher_request * req)76 static inline void mv_cesa_skcipher_cleanup(struct skcipher_request *req)
77 {
78 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
79
80 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
81 mv_cesa_skcipher_dma_cleanup(req);
82 }
83
mv_cesa_skcipher_std_step(struct skcipher_request * req)84 static void mv_cesa_skcipher_std_step(struct skcipher_request *req)
85 {
86 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
87 struct mv_cesa_skcipher_std_req *sreq = &creq->std;
88 struct mv_cesa_engine *engine = creq->base.engine;
89 size_t len = min_t(size_t, req->cryptlen - sreq->offset,
90 CESA_SA_SRAM_PAYLOAD_SIZE);
91
92 mv_cesa_adjust_op(engine, &sreq->op);
93 memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op));
94
95 len = sg_pcopy_to_buffer(req->src, creq->src_nents,
96 engine->sram + CESA_SA_DATA_SRAM_OFFSET,
97 len, sreq->offset);
98
99 sreq->size = len;
100 mv_cesa_set_crypt_op_len(&sreq->op, len);
101
102 /* FIXME: only update enc_len field */
103 if (!sreq->skip_ctx) {
104 memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op));
105 sreq->skip_ctx = true;
106 } else {
107 memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op.desc));
108 }
109
110 mv_cesa_set_int_mask(engine, CESA_SA_INT_ACCEL0_DONE);
111 writel_relaxed(CESA_SA_CFG_PARA_DIS, engine->regs + CESA_SA_CFG);
112 BUG_ON(readl(engine->regs + CESA_SA_CMD) &
113 CESA_SA_CMD_EN_CESA_SA_ACCL0);
114 writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD);
115 }
116
mv_cesa_skcipher_std_process(struct skcipher_request * req,u32 status)117 static int mv_cesa_skcipher_std_process(struct skcipher_request *req,
118 u32 status)
119 {
120 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
121 struct mv_cesa_skcipher_std_req *sreq = &creq->std;
122 struct mv_cesa_engine *engine = creq->base.engine;
123 size_t len;
124
125 len = sg_pcopy_from_buffer(req->dst, creq->dst_nents,
126 engine->sram + CESA_SA_DATA_SRAM_OFFSET,
127 sreq->size, sreq->offset);
128
129 sreq->offset += len;
130 if (sreq->offset < req->cryptlen)
131 return -EINPROGRESS;
132
133 return 0;
134 }
135
mv_cesa_skcipher_process(struct crypto_async_request * req,u32 status)136 static int mv_cesa_skcipher_process(struct crypto_async_request *req,
137 u32 status)
138 {
139 struct skcipher_request *skreq = skcipher_request_cast(req);
140 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(skreq);
141 struct mv_cesa_req *basereq = &creq->base;
142
143 if (mv_cesa_req_get_type(basereq) == CESA_STD_REQ)
144 return mv_cesa_skcipher_std_process(skreq, status);
145
146 return mv_cesa_dma_process(basereq, status);
147 }
148
mv_cesa_skcipher_step(struct crypto_async_request * req)149 static void mv_cesa_skcipher_step(struct crypto_async_request *req)
150 {
151 struct skcipher_request *skreq = skcipher_request_cast(req);
152 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(skreq);
153
154 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
155 mv_cesa_dma_step(&creq->base);
156 else
157 mv_cesa_skcipher_std_step(skreq);
158 }
159
160 static inline void
mv_cesa_skcipher_dma_prepare(struct skcipher_request * req)161 mv_cesa_skcipher_dma_prepare(struct skcipher_request *req)
162 {
163 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
164 struct mv_cesa_req *basereq = &creq->base;
165
166 mv_cesa_dma_prepare(basereq, basereq->engine);
167 }
168
169 static inline void
mv_cesa_skcipher_std_prepare(struct skcipher_request * req)170 mv_cesa_skcipher_std_prepare(struct skcipher_request *req)
171 {
172 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
173 struct mv_cesa_skcipher_std_req *sreq = &creq->std;
174
175 sreq->size = 0;
176 sreq->offset = 0;
177 }
178
mv_cesa_skcipher_prepare(struct crypto_async_request * req,struct mv_cesa_engine * engine)179 static inline void mv_cesa_skcipher_prepare(struct crypto_async_request *req,
180 struct mv_cesa_engine *engine)
181 {
182 struct skcipher_request *skreq = skcipher_request_cast(req);
183 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(skreq);
184 creq->base.engine = engine;
185
186 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
187 mv_cesa_skcipher_dma_prepare(skreq);
188 else
189 mv_cesa_skcipher_std_prepare(skreq);
190 }
191
192 static inline void
mv_cesa_skcipher_req_cleanup(struct crypto_async_request * req)193 mv_cesa_skcipher_req_cleanup(struct crypto_async_request *req)
194 {
195 struct skcipher_request *skreq = skcipher_request_cast(req);
196
197 mv_cesa_skcipher_cleanup(skreq);
198 }
199
200 static void
mv_cesa_skcipher_complete(struct crypto_async_request * req)201 mv_cesa_skcipher_complete(struct crypto_async_request *req)
202 {
203 struct skcipher_request *skreq = skcipher_request_cast(req);
204 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(skreq);
205 struct mv_cesa_engine *engine = creq->base.engine;
206 unsigned int ivsize;
207
208 atomic_sub(skreq->cryptlen, &engine->load);
209 ivsize = crypto_skcipher_ivsize(crypto_skcipher_reqtfm(skreq));
210
211 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) {
212 struct mv_cesa_req *basereq;
213
214 basereq = &creq->base;
215 memcpy(skreq->iv, basereq->chain.last->op->ctx.blkcipher.iv,
216 ivsize);
217 } else {
218 memcpy_fromio(skreq->iv,
219 engine->sram + CESA_SA_CRYPT_IV_SRAM_OFFSET,
220 ivsize);
221 }
222 }
223
224 static const struct mv_cesa_req_ops mv_cesa_skcipher_req_ops = {
225 .step = mv_cesa_skcipher_step,
226 .process = mv_cesa_skcipher_process,
227 .cleanup = mv_cesa_skcipher_req_cleanup,
228 .complete = mv_cesa_skcipher_complete,
229 };
230
mv_cesa_skcipher_cra_exit(struct crypto_tfm * tfm)231 static void mv_cesa_skcipher_cra_exit(struct crypto_tfm *tfm)
232 {
233 void *ctx = crypto_tfm_ctx(tfm);
234
235 memzero_explicit(ctx, tfm->__crt_alg->cra_ctxsize);
236 }
237
mv_cesa_skcipher_cra_init(struct crypto_tfm * tfm)238 static int mv_cesa_skcipher_cra_init(struct crypto_tfm *tfm)
239 {
240 struct mv_cesa_ctx *ctx = crypto_tfm_ctx(tfm);
241
242 ctx->ops = &mv_cesa_skcipher_req_ops;
243
244 crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm),
245 sizeof(struct mv_cesa_skcipher_req));
246
247 return 0;
248 }
249
mv_cesa_aes_setkey(struct crypto_skcipher * cipher,const u8 * key,unsigned int len)250 static int mv_cesa_aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
251 unsigned int len)
252 {
253 struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
254 struct mv_cesa_aes_ctx *ctx = crypto_tfm_ctx(tfm);
255 int remaining;
256 int offset;
257 int ret;
258 int i;
259
260 ret = crypto_aes_expand_key(&ctx->aes, key, len);
261 if (ret) {
262 crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
263 return ret;
264 }
265
266 remaining = (ctx->aes.key_length - 16) / 4;
267 offset = ctx->aes.key_length + 24 - remaining;
268 for (i = 0; i < remaining; i++)
269 ctx->aes.key_dec[4 + i] =
270 cpu_to_le32(ctx->aes.key_enc[offset + i]);
271
272 return 0;
273 }
274
mv_cesa_des_setkey(struct crypto_skcipher * cipher,const u8 * key,unsigned int len)275 static int mv_cesa_des_setkey(struct crypto_skcipher *cipher, const u8 *key,
276 unsigned int len)
277 {
278 struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
279 struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(tfm);
280 u32 tmp[DES_EXPKEY_WORDS];
281 int ret;
282
283 if (len != DES_KEY_SIZE) {
284 crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
285 return -EINVAL;
286 }
287
288 ret = des_ekey(tmp, key);
289 if (!ret && (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
290 tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
291 return -EINVAL;
292 }
293
294 memcpy(ctx->key, key, DES_KEY_SIZE);
295
296 return 0;
297 }
298
mv_cesa_des3_ede_setkey(struct crypto_skcipher * cipher,const u8 * key,unsigned int len)299 static int mv_cesa_des3_ede_setkey(struct crypto_skcipher *cipher,
300 const u8 *key, unsigned int len)
301 {
302 struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
303 struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(tfm);
304
305 if (len != DES3_EDE_KEY_SIZE) {
306 crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
307 return -EINVAL;
308 }
309
310 memcpy(ctx->key, key, DES3_EDE_KEY_SIZE);
311
312 return 0;
313 }
314
mv_cesa_skcipher_dma_req_init(struct skcipher_request * req,const struct mv_cesa_op_ctx * op_templ)315 static int mv_cesa_skcipher_dma_req_init(struct skcipher_request *req,
316 const struct mv_cesa_op_ctx *op_templ)
317 {
318 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
319 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
320 GFP_KERNEL : GFP_ATOMIC;
321 struct mv_cesa_req *basereq = &creq->base;
322 struct mv_cesa_skcipher_dma_iter iter;
323 bool skip_ctx = false;
324 int ret;
325 unsigned int ivsize;
326
327 basereq->chain.first = NULL;
328 basereq->chain.last = NULL;
329
330 if (req->src != req->dst) {
331 ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents,
332 DMA_TO_DEVICE);
333 if (!ret)
334 return -ENOMEM;
335
336 ret = dma_map_sg(cesa_dev->dev, req->dst, creq->dst_nents,
337 DMA_FROM_DEVICE);
338 if (!ret) {
339 ret = -ENOMEM;
340 goto err_unmap_src;
341 }
342 } else {
343 ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents,
344 DMA_BIDIRECTIONAL);
345 if (!ret)
346 return -ENOMEM;
347 }
348
349 mv_cesa_tdma_desc_iter_init(&basereq->chain);
350 mv_cesa_skcipher_req_iter_init(&iter, req);
351
352 do {
353 struct mv_cesa_op_ctx *op;
354
355 op = mv_cesa_dma_add_op(&basereq->chain, op_templ, skip_ctx, flags);
356 if (IS_ERR(op)) {
357 ret = PTR_ERR(op);
358 goto err_free_tdma;
359 }
360 skip_ctx = true;
361
362 mv_cesa_set_crypt_op_len(op, iter.base.op_len);
363
364 /* Add input transfers */
365 ret = mv_cesa_dma_add_op_transfers(&basereq->chain, &iter.base,
366 &iter.src, flags);
367 if (ret)
368 goto err_free_tdma;
369
370 /* Add dummy desc to launch the crypto operation */
371 ret = mv_cesa_dma_add_dummy_launch(&basereq->chain, flags);
372 if (ret)
373 goto err_free_tdma;
374
375 /* Add output transfers */
376 ret = mv_cesa_dma_add_op_transfers(&basereq->chain, &iter.base,
377 &iter.dst, flags);
378 if (ret)
379 goto err_free_tdma;
380
381 } while (mv_cesa_skcipher_req_iter_next_op(&iter));
382
383 /* Add output data for IV */
384 ivsize = crypto_skcipher_ivsize(crypto_skcipher_reqtfm(req));
385 ret = mv_cesa_dma_add_result_op(&basereq->chain, CESA_SA_CFG_SRAM_OFFSET,
386 CESA_SA_DATA_SRAM_OFFSET,
387 CESA_TDMA_SRC_IN_SRAM, flags);
388
389 if (ret)
390 goto err_free_tdma;
391
392 basereq->chain.last->flags |= CESA_TDMA_END_OF_REQ;
393
394 return 0;
395
396 err_free_tdma:
397 mv_cesa_dma_cleanup(basereq);
398 if (req->dst != req->src)
399 dma_unmap_sg(cesa_dev->dev, req->dst, creq->dst_nents,
400 DMA_FROM_DEVICE);
401
402 err_unmap_src:
403 dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents,
404 req->dst != req->src ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL);
405
406 return ret;
407 }
408
409 static inline int
mv_cesa_skcipher_std_req_init(struct skcipher_request * req,const struct mv_cesa_op_ctx * op_templ)410 mv_cesa_skcipher_std_req_init(struct skcipher_request *req,
411 const struct mv_cesa_op_ctx *op_templ)
412 {
413 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
414 struct mv_cesa_skcipher_std_req *sreq = &creq->std;
415 struct mv_cesa_req *basereq = &creq->base;
416
417 sreq->op = *op_templ;
418 sreq->skip_ctx = false;
419 basereq->chain.first = NULL;
420 basereq->chain.last = NULL;
421
422 return 0;
423 }
424
mv_cesa_skcipher_req_init(struct skcipher_request * req,struct mv_cesa_op_ctx * tmpl)425 static int mv_cesa_skcipher_req_init(struct skcipher_request *req,
426 struct mv_cesa_op_ctx *tmpl)
427 {
428 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
429 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
430 unsigned int blksize = crypto_skcipher_blocksize(tfm);
431 int ret;
432
433 if (!IS_ALIGNED(req->cryptlen, blksize))
434 return -EINVAL;
435
436 creq->src_nents = sg_nents_for_len(req->src, req->cryptlen);
437 if (creq->src_nents < 0) {
438 dev_err(cesa_dev->dev, "Invalid number of src SG");
439 return creq->src_nents;
440 }
441 creq->dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
442 if (creq->dst_nents < 0) {
443 dev_err(cesa_dev->dev, "Invalid number of dst SG");
444 return creq->dst_nents;
445 }
446
447 mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_OP_CRYPT_ONLY,
448 CESA_SA_DESC_CFG_OP_MSK);
449
450 if (cesa_dev->caps->has_tdma)
451 ret = mv_cesa_skcipher_dma_req_init(req, tmpl);
452 else
453 ret = mv_cesa_skcipher_std_req_init(req, tmpl);
454
455 return ret;
456 }
457
mv_cesa_skcipher_queue_req(struct skcipher_request * req,struct mv_cesa_op_ctx * tmpl)458 static int mv_cesa_skcipher_queue_req(struct skcipher_request *req,
459 struct mv_cesa_op_ctx *tmpl)
460 {
461 int ret;
462 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
463 struct mv_cesa_engine *engine;
464
465 ret = mv_cesa_skcipher_req_init(req, tmpl);
466 if (ret)
467 return ret;
468
469 engine = mv_cesa_select_engine(req->cryptlen);
470 mv_cesa_skcipher_prepare(&req->base, engine);
471
472 ret = mv_cesa_queue_req(&req->base, &creq->base);
473
474 if (mv_cesa_req_needs_cleanup(&req->base, ret))
475 mv_cesa_skcipher_cleanup(req);
476
477 return ret;
478 }
479
mv_cesa_des_op(struct skcipher_request * req,struct mv_cesa_op_ctx * tmpl)480 static int mv_cesa_des_op(struct skcipher_request *req,
481 struct mv_cesa_op_ctx *tmpl)
482 {
483 struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
484
485 mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTM_DES,
486 CESA_SA_DESC_CFG_CRYPTM_MSK);
487
488 memcpy(tmpl->ctx.blkcipher.key, ctx->key, DES_KEY_SIZE);
489
490 return mv_cesa_skcipher_queue_req(req, tmpl);
491 }
492
mv_cesa_ecb_des_encrypt(struct skcipher_request * req)493 static int mv_cesa_ecb_des_encrypt(struct skcipher_request *req)
494 {
495 struct mv_cesa_op_ctx tmpl;
496
497 mv_cesa_set_op_cfg(&tmpl,
498 CESA_SA_DESC_CFG_CRYPTCM_ECB |
499 CESA_SA_DESC_CFG_DIR_ENC);
500
501 return mv_cesa_des_op(req, &tmpl);
502 }
503
mv_cesa_ecb_des_decrypt(struct skcipher_request * req)504 static int mv_cesa_ecb_des_decrypt(struct skcipher_request *req)
505 {
506 struct mv_cesa_op_ctx tmpl;
507
508 mv_cesa_set_op_cfg(&tmpl,
509 CESA_SA_DESC_CFG_CRYPTCM_ECB |
510 CESA_SA_DESC_CFG_DIR_DEC);
511
512 return mv_cesa_des_op(req, &tmpl);
513 }
514
515 struct skcipher_alg mv_cesa_ecb_des_alg = {
516 .setkey = mv_cesa_des_setkey,
517 .encrypt = mv_cesa_ecb_des_encrypt,
518 .decrypt = mv_cesa_ecb_des_decrypt,
519 .min_keysize = DES_KEY_SIZE,
520 .max_keysize = DES_KEY_SIZE,
521 .base = {
522 .cra_name = "ecb(des)",
523 .cra_driver_name = "mv-ecb-des",
524 .cra_priority = 300,
525 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
526 .cra_blocksize = DES_BLOCK_SIZE,
527 .cra_ctxsize = sizeof(struct mv_cesa_des_ctx),
528 .cra_alignmask = 0,
529 .cra_module = THIS_MODULE,
530 .cra_init = mv_cesa_skcipher_cra_init,
531 .cra_exit = mv_cesa_skcipher_cra_exit,
532 },
533 };
534
mv_cesa_cbc_des_op(struct skcipher_request * req,struct mv_cesa_op_ctx * tmpl)535 static int mv_cesa_cbc_des_op(struct skcipher_request *req,
536 struct mv_cesa_op_ctx *tmpl)
537 {
538 mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTCM_CBC,
539 CESA_SA_DESC_CFG_CRYPTCM_MSK);
540
541 memcpy(tmpl->ctx.blkcipher.iv, req->iv, DES_BLOCK_SIZE);
542
543 return mv_cesa_des_op(req, tmpl);
544 }
545
mv_cesa_cbc_des_encrypt(struct skcipher_request * req)546 static int mv_cesa_cbc_des_encrypt(struct skcipher_request *req)
547 {
548 struct mv_cesa_op_ctx tmpl;
549
550 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_ENC);
551
552 return mv_cesa_cbc_des_op(req, &tmpl);
553 }
554
mv_cesa_cbc_des_decrypt(struct skcipher_request * req)555 static int mv_cesa_cbc_des_decrypt(struct skcipher_request *req)
556 {
557 struct mv_cesa_op_ctx tmpl;
558
559 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_DEC);
560
561 return mv_cesa_cbc_des_op(req, &tmpl);
562 }
563
564 struct skcipher_alg mv_cesa_cbc_des_alg = {
565 .setkey = mv_cesa_des_setkey,
566 .encrypt = mv_cesa_cbc_des_encrypt,
567 .decrypt = mv_cesa_cbc_des_decrypt,
568 .min_keysize = DES_KEY_SIZE,
569 .max_keysize = DES_KEY_SIZE,
570 .ivsize = DES_BLOCK_SIZE,
571 .base = {
572 .cra_name = "cbc(des)",
573 .cra_driver_name = "mv-cbc-des",
574 .cra_priority = 300,
575 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
576 .cra_blocksize = DES_BLOCK_SIZE,
577 .cra_ctxsize = sizeof(struct mv_cesa_des_ctx),
578 .cra_alignmask = 0,
579 .cra_module = THIS_MODULE,
580 .cra_init = mv_cesa_skcipher_cra_init,
581 .cra_exit = mv_cesa_skcipher_cra_exit,
582 },
583 };
584
mv_cesa_des3_op(struct skcipher_request * req,struct mv_cesa_op_ctx * tmpl)585 static int mv_cesa_des3_op(struct skcipher_request *req,
586 struct mv_cesa_op_ctx *tmpl)
587 {
588 struct mv_cesa_des3_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
589
590 mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTM_3DES,
591 CESA_SA_DESC_CFG_CRYPTM_MSK);
592
593 memcpy(tmpl->ctx.blkcipher.key, ctx->key, DES3_EDE_KEY_SIZE);
594
595 return mv_cesa_skcipher_queue_req(req, tmpl);
596 }
597
mv_cesa_ecb_des3_ede_encrypt(struct skcipher_request * req)598 static int mv_cesa_ecb_des3_ede_encrypt(struct skcipher_request *req)
599 {
600 struct mv_cesa_op_ctx tmpl;
601
602 mv_cesa_set_op_cfg(&tmpl,
603 CESA_SA_DESC_CFG_CRYPTCM_ECB |
604 CESA_SA_DESC_CFG_3DES_EDE |
605 CESA_SA_DESC_CFG_DIR_ENC);
606
607 return mv_cesa_des3_op(req, &tmpl);
608 }
609
mv_cesa_ecb_des3_ede_decrypt(struct skcipher_request * req)610 static int mv_cesa_ecb_des3_ede_decrypt(struct skcipher_request *req)
611 {
612 struct mv_cesa_op_ctx tmpl;
613
614 mv_cesa_set_op_cfg(&tmpl,
615 CESA_SA_DESC_CFG_CRYPTCM_ECB |
616 CESA_SA_DESC_CFG_3DES_EDE |
617 CESA_SA_DESC_CFG_DIR_DEC);
618
619 return mv_cesa_des3_op(req, &tmpl);
620 }
621
622 struct skcipher_alg mv_cesa_ecb_des3_ede_alg = {
623 .setkey = mv_cesa_des3_ede_setkey,
624 .encrypt = mv_cesa_ecb_des3_ede_encrypt,
625 .decrypt = mv_cesa_ecb_des3_ede_decrypt,
626 .min_keysize = DES3_EDE_KEY_SIZE,
627 .max_keysize = DES3_EDE_KEY_SIZE,
628 .ivsize = DES3_EDE_BLOCK_SIZE,
629 .base = {
630 .cra_name = "ecb(des3_ede)",
631 .cra_driver_name = "mv-ecb-des3-ede",
632 .cra_priority = 300,
633 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
634 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
635 .cra_ctxsize = sizeof(struct mv_cesa_des3_ctx),
636 .cra_alignmask = 0,
637 .cra_module = THIS_MODULE,
638 .cra_init = mv_cesa_skcipher_cra_init,
639 .cra_exit = mv_cesa_skcipher_cra_exit,
640 },
641 };
642
mv_cesa_cbc_des3_op(struct skcipher_request * req,struct mv_cesa_op_ctx * tmpl)643 static int mv_cesa_cbc_des3_op(struct skcipher_request *req,
644 struct mv_cesa_op_ctx *tmpl)
645 {
646 memcpy(tmpl->ctx.blkcipher.iv, req->iv, DES3_EDE_BLOCK_SIZE);
647
648 return mv_cesa_des3_op(req, tmpl);
649 }
650
mv_cesa_cbc_des3_ede_encrypt(struct skcipher_request * req)651 static int mv_cesa_cbc_des3_ede_encrypt(struct skcipher_request *req)
652 {
653 struct mv_cesa_op_ctx tmpl;
654
655 mv_cesa_set_op_cfg(&tmpl,
656 CESA_SA_DESC_CFG_CRYPTCM_CBC |
657 CESA_SA_DESC_CFG_3DES_EDE |
658 CESA_SA_DESC_CFG_DIR_ENC);
659
660 return mv_cesa_cbc_des3_op(req, &tmpl);
661 }
662
mv_cesa_cbc_des3_ede_decrypt(struct skcipher_request * req)663 static int mv_cesa_cbc_des3_ede_decrypt(struct skcipher_request *req)
664 {
665 struct mv_cesa_op_ctx tmpl;
666
667 mv_cesa_set_op_cfg(&tmpl,
668 CESA_SA_DESC_CFG_CRYPTCM_CBC |
669 CESA_SA_DESC_CFG_3DES_EDE |
670 CESA_SA_DESC_CFG_DIR_DEC);
671
672 return mv_cesa_cbc_des3_op(req, &tmpl);
673 }
674
675 struct skcipher_alg mv_cesa_cbc_des3_ede_alg = {
676 .setkey = mv_cesa_des3_ede_setkey,
677 .encrypt = mv_cesa_cbc_des3_ede_encrypt,
678 .decrypt = mv_cesa_cbc_des3_ede_decrypt,
679 .min_keysize = DES3_EDE_KEY_SIZE,
680 .max_keysize = DES3_EDE_KEY_SIZE,
681 .ivsize = DES3_EDE_BLOCK_SIZE,
682 .base = {
683 .cra_name = "cbc(des3_ede)",
684 .cra_driver_name = "mv-cbc-des3-ede",
685 .cra_priority = 300,
686 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
687 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
688 .cra_ctxsize = sizeof(struct mv_cesa_des3_ctx),
689 .cra_alignmask = 0,
690 .cra_module = THIS_MODULE,
691 .cra_init = mv_cesa_skcipher_cra_init,
692 .cra_exit = mv_cesa_skcipher_cra_exit,
693 },
694 };
695
mv_cesa_aes_op(struct skcipher_request * req,struct mv_cesa_op_ctx * tmpl)696 static int mv_cesa_aes_op(struct skcipher_request *req,
697 struct mv_cesa_op_ctx *tmpl)
698 {
699 struct mv_cesa_aes_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
700 int i;
701 u32 *key;
702 u32 cfg;
703
704 cfg = CESA_SA_DESC_CFG_CRYPTM_AES;
705
706 if (mv_cesa_get_op_cfg(tmpl) & CESA_SA_DESC_CFG_DIR_DEC)
707 key = ctx->aes.key_dec;
708 else
709 key = ctx->aes.key_enc;
710
711 for (i = 0; i < ctx->aes.key_length / sizeof(u32); i++)
712 tmpl->ctx.blkcipher.key[i] = cpu_to_le32(key[i]);
713
714 if (ctx->aes.key_length == 24)
715 cfg |= CESA_SA_DESC_CFG_AES_LEN_192;
716 else if (ctx->aes.key_length == 32)
717 cfg |= CESA_SA_DESC_CFG_AES_LEN_256;
718
719 mv_cesa_update_op_cfg(tmpl, cfg,
720 CESA_SA_DESC_CFG_CRYPTM_MSK |
721 CESA_SA_DESC_CFG_AES_LEN_MSK);
722
723 return mv_cesa_skcipher_queue_req(req, tmpl);
724 }
725
mv_cesa_ecb_aes_encrypt(struct skcipher_request * req)726 static int mv_cesa_ecb_aes_encrypt(struct skcipher_request *req)
727 {
728 struct mv_cesa_op_ctx tmpl;
729
730 mv_cesa_set_op_cfg(&tmpl,
731 CESA_SA_DESC_CFG_CRYPTCM_ECB |
732 CESA_SA_DESC_CFG_DIR_ENC);
733
734 return mv_cesa_aes_op(req, &tmpl);
735 }
736
mv_cesa_ecb_aes_decrypt(struct skcipher_request * req)737 static int mv_cesa_ecb_aes_decrypt(struct skcipher_request *req)
738 {
739 struct mv_cesa_op_ctx tmpl;
740
741 mv_cesa_set_op_cfg(&tmpl,
742 CESA_SA_DESC_CFG_CRYPTCM_ECB |
743 CESA_SA_DESC_CFG_DIR_DEC);
744
745 return mv_cesa_aes_op(req, &tmpl);
746 }
747
748 struct skcipher_alg mv_cesa_ecb_aes_alg = {
749 .setkey = mv_cesa_aes_setkey,
750 .encrypt = mv_cesa_ecb_aes_encrypt,
751 .decrypt = mv_cesa_ecb_aes_decrypt,
752 .min_keysize = AES_MIN_KEY_SIZE,
753 .max_keysize = AES_MAX_KEY_SIZE,
754 .base = {
755 .cra_name = "ecb(aes)",
756 .cra_driver_name = "mv-ecb-aes",
757 .cra_priority = 300,
758 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
759 .cra_blocksize = AES_BLOCK_SIZE,
760 .cra_ctxsize = sizeof(struct mv_cesa_aes_ctx),
761 .cra_alignmask = 0,
762 .cra_module = THIS_MODULE,
763 .cra_init = mv_cesa_skcipher_cra_init,
764 .cra_exit = mv_cesa_skcipher_cra_exit,
765 },
766 };
767
mv_cesa_cbc_aes_op(struct skcipher_request * req,struct mv_cesa_op_ctx * tmpl)768 static int mv_cesa_cbc_aes_op(struct skcipher_request *req,
769 struct mv_cesa_op_ctx *tmpl)
770 {
771 mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTCM_CBC,
772 CESA_SA_DESC_CFG_CRYPTCM_MSK);
773 memcpy(tmpl->ctx.blkcipher.iv, req->iv, AES_BLOCK_SIZE);
774
775 return mv_cesa_aes_op(req, tmpl);
776 }
777
mv_cesa_cbc_aes_encrypt(struct skcipher_request * req)778 static int mv_cesa_cbc_aes_encrypt(struct skcipher_request *req)
779 {
780 struct mv_cesa_op_ctx tmpl;
781
782 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_ENC);
783
784 return mv_cesa_cbc_aes_op(req, &tmpl);
785 }
786
mv_cesa_cbc_aes_decrypt(struct skcipher_request * req)787 static int mv_cesa_cbc_aes_decrypt(struct skcipher_request *req)
788 {
789 struct mv_cesa_op_ctx tmpl;
790
791 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_DEC);
792
793 return mv_cesa_cbc_aes_op(req, &tmpl);
794 }
795
796 struct skcipher_alg mv_cesa_cbc_aes_alg = {
797 .setkey = mv_cesa_aes_setkey,
798 .encrypt = mv_cesa_cbc_aes_encrypt,
799 .decrypt = mv_cesa_cbc_aes_decrypt,
800 .min_keysize = AES_MIN_KEY_SIZE,
801 .max_keysize = AES_MAX_KEY_SIZE,
802 .ivsize = AES_BLOCK_SIZE,
803 .base = {
804 .cra_name = "cbc(aes)",
805 .cra_driver_name = "mv-cbc-aes",
806 .cra_priority = 300,
807 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
808 .cra_blocksize = AES_BLOCK_SIZE,
809 .cra_ctxsize = sizeof(struct mv_cesa_aes_ctx),
810 .cra_alignmask = 0,
811 .cra_module = THIS_MODULE,
812 .cra_init = mv_cesa_skcipher_cra_init,
813 .cra_exit = mv_cesa_skcipher_cra_exit,
814 },
815 };
816