1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * sun4i-ss-cipher.c - hardware cryptographic accelerator for Allwinner A20 SoC
4 *
5 * Copyright (C) 2013-2015 Corentin LABBE <clabbe.montjoie@gmail.com>
6 *
7 * This file add support for AES cipher with 128,192,256 bits
8 * keysize in CBC and ECB mode.
9 * Add support also for DES and 3DES in CBC and ECB mode.
10 *
11 * You could find the datasheet in Documentation/arm/sunxi.rst
12 */
13 #include "sun4i-ss.h"
14
sun4i_ss_opti_poll(struct skcipher_request * areq)15 static int noinline_for_stack sun4i_ss_opti_poll(struct skcipher_request *areq)
16 {
17 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
18 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
19 struct sun4i_ss_ctx *ss = op->ss;
20 unsigned int ivsize = crypto_skcipher_ivsize(tfm);
21 struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq);
22 u32 mode = ctx->mode;
23 void *backup_iv = NULL;
24 /* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */
25 u32 rx_cnt = SS_RX_DEFAULT;
26 u32 tx_cnt = 0;
27 u32 spaces;
28 u32 v;
29 int err = 0;
30 unsigned int i;
31 unsigned int ileft = areq->cryptlen;
32 unsigned int oleft = areq->cryptlen;
33 unsigned int todo;
34 unsigned long pi = 0, po = 0; /* progress for in and out */
35 bool miter_err;
36 struct sg_mapping_iter mi, mo;
37 unsigned int oi, oo; /* offset for in and out */
38 unsigned long flags;
39 struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
40 struct sun4i_ss_alg_template *algt;
41
42 if (!areq->cryptlen)
43 return 0;
44
45 if (!areq->src || !areq->dst) {
46 dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n");
47 return -EINVAL;
48 }
49
50 if (areq->iv && ivsize > 0 && mode & SS_DECRYPTION) {
51 backup_iv = kzalloc(ivsize, GFP_KERNEL);
52 if (!backup_iv)
53 return -ENOMEM;
54 scatterwalk_map_and_copy(backup_iv, areq->src, areq->cryptlen - ivsize, ivsize, 0);
55 }
56
57 if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN4I_SS_DEBUG)) {
58 algt = container_of(alg, struct sun4i_ss_alg_template, alg.crypto);
59 algt->stat_opti++;
60 algt->stat_bytes += areq->cryptlen;
61 }
62
63 spin_lock_irqsave(&ss->slock, flags);
64
65 for (i = 0; i < op->keylen / 4; i++)
66 writesl(ss->base + SS_KEY0 + i * 4, &op->key[i], 1);
67
68 if (areq->iv) {
69 for (i = 0; i < 4 && i < ivsize / 4; i++) {
70 v = *(u32 *)(areq->iv + i * 4);
71 writesl(ss->base + SS_IV0 + i * 4, &v, 1);
72 }
73 }
74 writel(mode, ss->base + SS_CTL);
75
76
77 ileft = areq->cryptlen / 4;
78 oleft = areq->cryptlen / 4;
79 oi = 0;
80 oo = 0;
81 do {
82 if (ileft) {
83 sg_miter_start(&mi, areq->src, sg_nents(areq->src),
84 SG_MITER_FROM_SG | SG_MITER_ATOMIC);
85 if (pi)
86 sg_miter_skip(&mi, pi);
87 miter_err = sg_miter_next(&mi);
88 if (!miter_err || !mi.addr) {
89 dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
90 err = -EINVAL;
91 goto release_ss;
92 }
93 todo = min(rx_cnt, ileft);
94 todo = min_t(size_t, todo, (mi.length - oi) / 4);
95 if (todo) {
96 ileft -= todo;
97 writesl(ss->base + SS_RXFIFO, mi.addr + oi, todo);
98 oi += todo * 4;
99 }
100 if (oi == mi.length) {
101 pi += mi.length;
102 oi = 0;
103 }
104 sg_miter_stop(&mi);
105 }
106
107 spaces = readl(ss->base + SS_FCSR);
108 rx_cnt = SS_RXFIFO_SPACES(spaces);
109 tx_cnt = SS_TXFIFO_SPACES(spaces);
110
111 sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
112 SG_MITER_TO_SG | SG_MITER_ATOMIC);
113 if (po)
114 sg_miter_skip(&mo, po);
115 miter_err = sg_miter_next(&mo);
116 if (!miter_err || !mo.addr) {
117 dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
118 err = -EINVAL;
119 goto release_ss;
120 }
121 todo = min(tx_cnt, oleft);
122 todo = min_t(size_t, todo, (mo.length - oo) / 4);
123 if (todo) {
124 oleft -= todo;
125 readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo);
126 oo += todo * 4;
127 }
128 if (oo == mo.length) {
129 oo = 0;
130 po += mo.length;
131 }
132 sg_miter_stop(&mo);
133 } while (oleft);
134
135 if (areq->iv) {
136 if (mode & SS_DECRYPTION) {
137 memcpy(areq->iv, backup_iv, ivsize);
138 kfree_sensitive(backup_iv);
139 } else {
140 scatterwalk_map_and_copy(areq->iv, areq->dst, areq->cryptlen - ivsize,
141 ivsize, 0);
142 }
143 }
144
145 release_ss:
146 writel(0, ss->base + SS_CTL);
147 spin_unlock_irqrestore(&ss->slock, flags);
148 return err;
149 }
150
sun4i_ss_cipher_poll_fallback(struct skcipher_request * areq)151 static int noinline_for_stack sun4i_ss_cipher_poll_fallback(struct skcipher_request *areq)
152 {
153 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
154 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
155 struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq);
156 int err;
157 struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
158 struct sun4i_ss_alg_template *algt;
159
160 if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN4I_SS_DEBUG)) {
161 algt = container_of(alg, struct sun4i_ss_alg_template, alg.crypto);
162 algt->stat_fb++;
163 }
164
165 skcipher_request_set_tfm(&ctx->fallback_req, op->fallback_tfm);
166 skcipher_request_set_callback(&ctx->fallback_req, areq->base.flags,
167 areq->base.complete, areq->base.data);
168 skcipher_request_set_crypt(&ctx->fallback_req, areq->src, areq->dst,
169 areq->cryptlen, areq->iv);
170 if (ctx->mode & SS_DECRYPTION)
171 err = crypto_skcipher_decrypt(&ctx->fallback_req);
172 else
173 err = crypto_skcipher_encrypt(&ctx->fallback_req);
174
175 return err;
176 }
177
178 /* Generic function that support SG with size not multiple of 4 */
sun4i_ss_cipher_poll(struct skcipher_request * areq)179 static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
180 {
181 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
182 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
183 struct sun4i_ss_ctx *ss = op->ss;
184 int no_chunk = 1;
185 struct scatterlist *in_sg = areq->src;
186 struct scatterlist *out_sg = areq->dst;
187 unsigned int ivsize = crypto_skcipher_ivsize(tfm);
188 struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq);
189 struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
190 struct sun4i_ss_alg_template *algt;
191 u32 mode = ctx->mode;
192 /* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */
193 u32 rx_cnt = SS_RX_DEFAULT;
194 u32 tx_cnt = 0;
195 u32 v;
196 u32 spaces;
197 int err = 0;
198 unsigned int i;
199 unsigned int ileft = areq->cryptlen;
200 unsigned int oleft = areq->cryptlen;
201 unsigned int todo;
202 void *backup_iv = NULL;
203 struct sg_mapping_iter mi, mo;
204 unsigned long pi = 0, po = 0; /* progress for in and out */
205 bool miter_err;
206 unsigned int oi, oo; /* offset for in and out */
207 unsigned int ob = 0; /* offset in buf */
208 unsigned int obo = 0; /* offset in bufo*/
209 unsigned int obl = 0; /* length of data in bufo */
210 unsigned long flags;
211 bool need_fallback = false;
212
213 if (!areq->cryptlen)
214 return 0;
215
216 if (!areq->src || !areq->dst) {
217 dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n");
218 return -EINVAL;
219 }
220
221 algt = container_of(alg, struct sun4i_ss_alg_template, alg.crypto);
222 if (areq->cryptlen % algt->alg.crypto.base.cra_blocksize)
223 need_fallback = true;
224
225 /*
226 * if we have only SGs with size multiple of 4,
227 * we can use the SS optimized function
228 */
229 while (in_sg && no_chunk == 1) {
230 if ((in_sg->length | in_sg->offset) & 3u)
231 no_chunk = 0;
232 in_sg = sg_next(in_sg);
233 }
234 while (out_sg && no_chunk == 1) {
235 if ((out_sg->length | out_sg->offset) & 3u)
236 no_chunk = 0;
237 out_sg = sg_next(out_sg);
238 }
239
240 if (no_chunk == 1 && !need_fallback)
241 return sun4i_ss_opti_poll(areq);
242
243 if (need_fallback)
244 return sun4i_ss_cipher_poll_fallback(areq);
245
246 if (areq->iv && ivsize > 0 && mode & SS_DECRYPTION) {
247 backup_iv = kzalloc(ivsize, GFP_KERNEL);
248 if (!backup_iv)
249 return -ENOMEM;
250 scatterwalk_map_and_copy(backup_iv, areq->src, areq->cryptlen - ivsize, ivsize, 0);
251 }
252
253 if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN4I_SS_DEBUG)) {
254 algt->stat_req++;
255 algt->stat_bytes += areq->cryptlen;
256 }
257
258 spin_lock_irqsave(&ss->slock, flags);
259
260 for (i = 0; i < op->keylen / 4; i++)
261 writesl(ss->base + SS_KEY0 + i * 4, &op->key[i], 1);
262
263 if (areq->iv) {
264 for (i = 0; i < 4 && i < ivsize / 4; i++) {
265 v = *(u32 *)(areq->iv + i * 4);
266 writesl(ss->base + SS_IV0 + i * 4, &v, 1);
267 }
268 }
269 writel(mode, ss->base + SS_CTL);
270
271 ileft = areq->cryptlen;
272 oleft = areq->cryptlen;
273 oi = 0;
274 oo = 0;
275
276 while (oleft) {
277 if (ileft) {
278 sg_miter_start(&mi, areq->src, sg_nents(areq->src),
279 SG_MITER_FROM_SG | SG_MITER_ATOMIC);
280 if (pi)
281 sg_miter_skip(&mi, pi);
282 miter_err = sg_miter_next(&mi);
283 if (!miter_err || !mi.addr) {
284 dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
285 err = -EINVAL;
286 goto release_ss;
287 }
288 /*
289 * todo is the number of consecutive 4byte word that we
290 * can read from current SG
291 */
292 todo = min(rx_cnt, ileft / 4);
293 todo = min_t(size_t, todo, (mi.length - oi) / 4);
294 if (todo && !ob) {
295 writesl(ss->base + SS_RXFIFO, mi.addr + oi,
296 todo);
297 ileft -= todo * 4;
298 oi += todo * 4;
299 } else {
300 /*
301 * not enough consecutive bytes, so we need to
302 * linearize in buf. todo is in bytes
303 * After that copy, if we have a multiple of 4
304 * we need to be able to write all buf in one
305 * pass, so it is why we min() with rx_cnt
306 */
307 todo = min(rx_cnt * 4 - ob, ileft);
308 todo = min_t(size_t, todo, mi.length - oi);
309 memcpy(ss->buf + ob, mi.addr + oi, todo);
310 ileft -= todo;
311 oi += todo;
312 ob += todo;
313 if (!(ob % 4)) {
314 writesl(ss->base + SS_RXFIFO, ss->buf,
315 ob / 4);
316 ob = 0;
317 }
318 }
319 if (oi == mi.length) {
320 pi += mi.length;
321 oi = 0;
322 }
323 sg_miter_stop(&mi);
324 }
325
326 spaces = readl(ss->base + SS_FCSR);
327 rx_cnt = SS_RXFIFO_SPACES(spaces);
328 tx_cnt = SS_TXFIFO_SPACES(spaces);
329
330 if (!tx_cnt)
331 continue;
332 sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
333 SG_MITER_TO_SG | SG_MITER_ATOMIC);
334 if (po)
335 sg_miter_skip(&mo, po);
336 miter_err = sg_miter_next(&mo);
337 if (!miter_err || !mo.addr) {
338 dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
339 err = -EINVAL;
340 goto release_ss;
341 }
342 /* todo in 4bytes word */
343 todo = min(tx_cnt, oleft / 4);
344 todo = min_t(size_t, todo, (mo.length - oo) / 4);
345
346 if (todo) {
347 readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo);
348 oleft -= todo * 4;
349 oo += todo * 4;
350 if (oo == mo.length) {
351 po += mo.length;
352 oo = 0;
353 }
354 } else {
355 /*
356 * read obl bytes in bufo, we read at maximum for
357 * emptying the device
358 */
359 readsl(ss->base + SS_TXFIFO, ss->bufo, tx_cnt);
360 obl = tx_cnt * 4;
361 obo = 0;
362 do {
363 /*
364 * how many bytes we can copy ?
365 * no more than remaining SG size
366 * no more than remaining buffer
367 * no need to test against oleft
368 */
369 todo = min_t(size_t,
370 mo.length - oo, obl - obo);
371 memcpy(mo.addr + oo, ss->bufo + obo, todo);
372 oleft -= todo;
373 obo += todo;
374 oo += todo;
375 if (oo == mo.length) {
376 po += mo.length;
377 sg_miter_next(&mo);
378 oo = 0;
379 }
380 } while (obo < obl);
381 /* bufo must be fully used here */
382 }
383 sg_miter_stop(&mo);
384 }
385 if (areq->iv) {
386 if (mode & SS_DECRYPTION) {
387 memcpy(areq->iv, backup_iv, ivsize);
388 kfree_sensitive(backup_iv);
389 } else {
390 scatterwalk_map_and_copy(areq->iv, areq->dst, areq->cryptlen - ivsize,
391 ivsize, 0);
392 }
393 }
394
395 release_ss:
396 writel(0, ss->base + SS_CTL);
397 spin_unlock_irqrestore(&ss->slock, flags);
398
399 return err;
400 }
401
402 /* CBC AES */
sun4i_ss_cbc_aes_encrypt(struct skcipher_request * areq)403 int sun4i_ss_cbc_aes_encrypt(struct skcipher_request *areq)
404 {
405 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
406 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
407 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
408
409 rctx->mode = SS_OP_AES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
410 op->keymode;
411 return sun4i_ss_cipher_poll(areq);
412 }
413
sun4i_ss_cbc_aes_decrypt(struct skcipher_request * areq)414 int sun4i_ss_cbc_aes_decrypt(struct skcipher_request *areq)
415 {
416 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
417 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
418 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
419
420 rctx->mode = SS_OP_AES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
421 op->keymode;
422 return sun4i_ss_cipher_poll(areq);
423 }
424
425 /* ECB AES */
sun4i_ss_ecb_aes_encrypt(struct skcipher_request * areq)426 int sun4i_ss_ecb_aes_encrypt(struct skcipher_request *areq)
427 {
428 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
429 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
430 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
431
432 rctx->mode = SS_OP_AES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
433 op->keymode;
434 return sun4i_ss_cipher_poll(areq);
435 }
436
sun4i_ss_ecb_aes_decrypt(struct skcipher_request * areq)437 int sun4i_ss_ecb_aes_decrypt(struct skcipher_request *areq)
438 {
439 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
440 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
441 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
442
443 rctx->mode = SS_OP_AES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
444 op->keymode;
445 return sun4i_ss_cipher_poll(areq);
446 }
447
448 /* CBC DES */
sun4i_ss_cbc_des_encrypt(struct skcipher_request * areq)449 int sun4i_ss_cbc_des_encrypt(struct skcipher_request *areq)
450 {
451 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
452 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
453 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
454
455 rctx->mode = SS_OP_DES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
456 op->keymode;
457 return sun4i_ss_cipher_poll(areq);
458 }
459
sun4i_ss_cbc_des_decrypt(struct skcipher_request * areq)460 int sun4i_ss_cbc_des_decrypt(struct skcipher_request *areq)
461 {
462 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
463 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
464 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
465
466 rctx->mode = SS_OP_DES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
467 op->keymode;
468 return sun4i_ss_cipher_poll(areq);
469 }
470
471 /* ECB DES */
sun4i_ss_ecb_des_encrypt(struct skcipher_request * areq)472 int sun4i_ss_ecb_des_encrypt(struct skcipher_request *areq)
473 {
474 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
475 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
476 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
477
478 rctx->mode = SS_OP_DES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
479 op->keymode;
480 return sun4i_ss_cipher_poll(areq);
481 }
482
sun4i_ss_ecb_des_decrypt(struct skcipher_request * areq)483 int sun4i_ss_ecb_des_decrypt(struct skcipher_request *areq)
484 {
485 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
486 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
487 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
488
489 rctx->mode = SS_OP_DES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
490 op->keymode;
491 return sun4i_ss_cipher_poll(areq);
492 }
493
494 /* CBC 3DES */
sun4i_ss_cbc_des3_encrypt(struct skcipher_request * areq)495 int sun4i_ss_cbc_des3_encrypt(struct skcipher_request *areq)
496 {
497 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
498 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
499 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
500
501 rctx->mode = SS_OP_3DES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
502 op->keymode;
503 return sun4i_ss_cipher_poll(areq);
504 }
505
sun4i_ss_cbc_des3_decrypt(struct skcipher_request * areq)506 int sun4i_ss_cbc_des3_decrypt(struct skcipher_request *areq)
507 {
508 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
509 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
510 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
511
512 rctx->mode = SS_OP_3DES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
513 op->keymode;
514 return sun4i_ss_cipher_poll(areq);
515 }
516
517 /* ECB 3DES */
sun4i_ss_ecb_des3_encrypt(struct skcipher_request * areq)518 int sun4i_ss_ecb_des3_encrypt(struct skcipher_request *areq)
519 {
520 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
521 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
522 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
523
524 rctx->mode = SS_OP_3DES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
525 op->keymode;
526 return sun4i_ss_cipher_poll(areq);
527 }
528
sun4i_ss_ecb_des3_decrypt(struct skcipher_request * areq)529 int sun4i_ss_ecb_des3_decrypt(struct skcipher_request *areq)
530 {
531 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
532 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
533 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
534
535 rctx->mode = SS_OP_3DES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
536 op->keymode;
537 return sun4i_ss_cipher_poll(areq);
538 }
539
sun4i_ss_cipher_init(struct crypto_tfm * tfm)540 int sun4i_ss_cipher_init(struct crypto_tfm *tfm)
541 {
542 struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm);
543 struct sun4i_ss_alg_template *algt;
544 const char *name = crypto_tfm_alg_name(tfm);
545 int err;
546
547 memset(op, 0, sizeof(struct sun4i_tfm_ctx));
548
549 algt = container_of(tfm->__crt_alg, struct sun4i_ss_alg_template,
550 alg.crypto.base);
551 op->ss = algt->ss;
552
553 op->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
554 if (IS_ERR(op->fallback_tfm)) {
555 dev_err(op->ss->dev, "ERROR: Cannot allocate fallback for %s %ld\n",
556 name, PTR_ERR(op->fallback_tfm));
557 return PTR_ERR(op->fallback_tfm);
558 }
559
560 crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm),
561 sizeof(struct sun4i_cipher_req_ctx) +
562 crypto_skcipher_reqsize(op->fallback_tfm));
563
564 err = pm_runtime_resume_and_get(op->ss->dev);
565 if (err < 0)
566 goto error_pm;
567
568 return 0;
569 error_pm:
570 crypto_free_skcipher(op->fallback_tfm);
571 return err;
572 }
573
sun4i_ss_cipher_exit(struct crypto_tfm * tfm)574 void sun4i_ss_cipher_exit(struct crypto_tfm *tfm)
575 {
576 struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm);
577
578 crypto_free_skcipher(op->fallback_tfm);
579 pm_runtime_put(op->ss->dev);
580 }
581
582 /* check and set the AES key, prepare the mode to be used */
sun4i_ss_aes_setkey(struct crypto_skcipher * tfm,const u8 * key,unsigned int keylen)583 int sun4i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
584 unsigned int keylen)
585 {
586 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
587 struct sun4i_ss_ctx *ss = op->ss;
588
589 switch (keylen) {
590 case 128 / 8:
591 op->keymode = SS_AES_128BITS;
592 break;
593 case 192 / 8:
594 op->keymode = SS_AES_192BITS;
595 break;
596 case 256 / 8:
597 op->keymode = SS_AES_256BITS;
598 break;
599 default:
600 dev_dbg(ss->dev, "ERROR: Invalid keylen %u\n", keylen);
601 return -EINVAL;
602 }
603 op->keylen = keylen;
604 memcpy(op->key, key, keylen);
605
606 crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
607 crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
608
609 return crypto_skcipher_setkey(op->fallback_tfm, key, keylen);
610 }
611
612 /* check and set the DES key, prepare the mode to be used */
sun4i_ss_des_setkey(struct crypto_skcipher * tfm,const u8 * key,unsigned int keylen)613 int sun4i_ss_des_setkey(struct crypto_skcipher *tfm, const u8 *key,
614 unsigned int keylen)
615 {
616 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
617 int err;
618
619 err = verify_skcipher_des_key(tfm, key);
620 if (err)
621 return err;
622
623 op->keylen = keylen;
624 memcpy(op->key, key, keylen);
625
626 crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
627 crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
628
629 return crypto_skcipher_setkey(op->fallback_tfm, key, keylen);
630 }
631
632 /* check and set the 3DES key, prepare the mode to be used */
sun4i_ss_des3_setkey(struct crypto_skcipher * tfm,const u8 * key,unsigned int keylen)633 int sun4i_ss_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
634 unsigned int keylen)
635 {
636 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
637 int err;
638
639 err = verify_skcipher_des3_key(tfm, key);
640 if (err)
641 return err;
642
643 op->keylen = keylen;
644 memcpy(op->key, key, keylen);
645
646 crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
647 crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
648
649 return crypto_skcipher_setkey(op->fallback_tfm, key, keylen);
650 }
651