1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * sun4i-ss-cipher.c - hardware cryptographic accelerator for Allwinner A20 SoC
4  *
5  * Copyright (C) 2013-2015 Corentin LABBE <clabbe.montjoie@gmail.com>
6  *
7  * This file add support for AES cipher with 128,192,256 bits
8  * keysize in CBC and ECB mode.
9  * Add support also for DES and 3DES in CBC and ECB mode.
10  *
11  * You could find the datasheet in Documentation/arm/sunxi.rst
12  */
13 #include "sun4i-ss.h"
14 
sun4i_ss_opti_poll(struct skcipher_request * areq)15 static int noinline_for_stack sun4i_ss_opti_poll(struct skcipher_request *areq)
16 {
17 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
18 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
19 	struct sun4i_ss_ctx *ss = op->ss;
20 	unsigned int ivsize = crypto_skcipher_ivsize(tfm);
21 	struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq);
22 	u32 mode = ctx->mode;
23 	/* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */
24 	u32 rx_cnt = SS_RX_DEFAULT;
25 	u32 tx_cnt = 0;
26 	u32 spaces;
27 	u32 v;
28 	int err = 0;
29 	unsigned int i;
30 	unsigned int ileft = areq->cryptlen;
31 	unsigned int oleft = areq->cryptlen;
32 	unsigned int todo;
33 	struct sg_mapping_iter mi, mo;
34 	unsigned int oi, oo; /* offset for in and out */
35 	unsigned long flags;
36 
37 	if (!areq->cryptlen)
38 		return 0;
39 
40 	if (!areq->src || !areq->dst) {
41 		dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n");
42 		return -EINVAL;
43 	}
44 
45 	spin_lock_irqsave(&ss->slock, flags);
46 
47 	for (i = 0; i < op->keylen; i += 4)
48 		writel(*(op->key + i / 4), ss->base + SS_KEY0 + i);
49 
50 	if (areq->iv) {
51 		for (i = 0; i < 4 && i < ivsize / 4; i++) {
52 			v = *(u32 *)(areq->iv + i * 4);
53 			writel(v, ss->base + SS_IV0 + i * 4);
54 		}
55 	}
56 	writel(mode, ss->base + SS_CTL);
57 
58 	sg_miter_start(&mi, areq->src, sg_nents(areq->src),
59 		       SG_MITER_FROM_SG | SG_MITER_ATOMIC);
60 	sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
61 		       SG_MITER_TO_SG | SG_MITER_ATOMIC);
62 	sg_miter_next(&mi);
63 	sg_miter_next(&mo);
64 	if (!mi.addr || !mo.addr) {
65 		dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
66 		err = -EINVAL;
67 		goto release_ss;
68 	}
69 
70 	ileft = areq->cryptlen / 4;
71 	oleft = areq->cryptlen / 4;
72 	oi = 0;
73 	oo = 0;
74 	do {
75 		todo = min3(rx_cnt, ileft, (mi.length - oi) / 4);
76 		if (todo) {
77 			ileft -= todo;
78 			writesl(ss->base + SS_RXFIFO, mi.addr + oi, todo);
79 			oi += todo * 4;
80 		}
81 		if (oi == mi.length) {
82 			sg_miter_next(&mi);
83 			oi = 0;
84 		}
85 
86 		spaces = readl(ss->base + SS_FCSR);
87 		rx_cnt = SS_RXFIFO_SPACES(spaces);
88 		tx_cnt = SS_TXFIFO_SPACES(spaces);
89 
90 		todo = min3(tx_cnt, oleft, (mo.length - oo) / 4);
91 		if (todo) {
92 			oleft -= todo;
93 			readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo);
94 			oo += todo * 4;
95 		}
96 		if (oo == mo.length) {
97 			sg_miter_next(&mo);
98 			oo = 0;
99 		}
100 	} while (oleft);
101 
102 	if (areq->iv) {
103 		for (i = 0; i < 4 && i < ivsize / 4; i++) {
104 			v = readl(ss->base + SS_IV0 + i * 4);
105 			*(u32 *)(areq->iv + i * 4) = v;
106 		}
107 	}
108 
109 release_ss:
110 	sg_miter_stop(&mi);
111 	sg_miter_stop(&mo);
112 	writel(0, ss->base + SS_CTL);
113 	spin_unlock_irqrestore(&ss->slock, flags);
114 	return err;
115 }
116 
117 
sun4i_ss_cipher_poll_fallback(struct skcipher_request * areq)118 static int noinline_for_stack sun4i_ss_cipher_poll_fallback(struct skcipher_request *areq)
119 {
120 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
121 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
122 	struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq);
123 	SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, op->fallback_tfm);
124 	int err;
125 
126 	skcipher_request_set_sync_tfm(subreq, op->fallback_tfm);
127 	skcipher_request_set_callback(subreq, areq->base.flags, NULL,
128 				      NULL);
129 	skcipher_request_set_crypt(subreq, areq->src, areq->dst,
130 				   areq->cryptlen, areq->iv);
131 	if (ctx->mode & SS_DECRYPTION)
132 		err = crypto_skcipher_decrypt(subreq);
133 	else
134 		err = crypto_skcipher_encrypt(subreq);
135 	skcipher_request_zero(subreq);
136 
137 	return err;
138 }
139 
140 /* Generic function that support SG with size not multiple of 4 */
sun4i_ss_cipher_poll(struct skcipher_request * areq)141 static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
142 {
143 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
144 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
145 	struct sun4i_ss_ctx *ss = op->ss;
146 	int no_chunk = 1;
147 	struct scatterlist *in_sg = areq->src;
148 	struct scatterlist *out_sg = areq->dst;
149 	unsigned int ivsize = crypto_skcipher_ivsize(tfm);
150 	struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq);
151 	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
152 	struct sun4i_ss_alg_template *algt;
153 	u32 mode = ctx->mode;
154 	/* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */
155 	u32 rx_cnt = SS_RX_DEFAULT;
156 	u32 tx_cnt = 0;
157 	u32 v;
158 	u32 spaces;
159 	int err = 0;
160 	unsigned int i;
161 	unsigned int ileft = areq->cryptlen;
162 	unsigned int oleft = areq->cryptlen;
163 	unsigned int todo;
164 	struct sg_mapping_iter mi, mo;
165 	unsigned int oi, oo;	/* offset for in and out */
166 	unsigned int ob = 0;	/* offset in buf */
167 	unsigned int obo = 0;	/* offset in bufo*/
168 	unsigned int obl = 0;	/* length of data in bufo */
169 	unsigned long flags;
170 	bool need_fallback;
171 
172 	if (!areq->cryptlen)
173 		return 0;
174 
175 	if (!areq->src || !areq->dst) {
176 		dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n");
177 		return -EINVAL;
178 	}
179 
180 	algt = container_of(alg, struct sun4i_ss_alg_template, alg.crypto);
181 	if (areq->cryptlen % algt->alg.crypto.base.cra_blocksize)
182 		need_fallback = true;
183 
184 	/*
185 	 * if we have only SGs with size multiple of 4,
186 	 * we can use the SS optimized function
187 	 */
188 	while (in_sg && no_chunk == 1) {
189 		if (in_sg->length % 4)
190 			no_chunk = 0;
191 		in_sg = sg_next(in_sg);
192 	}
193 	while (out_sg && no_chunk == 1) {
194 		if (out_sg->length % 4)
195 			no_chunk = 0;
196 		out_sg = sg_next(out_sg);
197 	}
198 
199 	if (no_chunk == 1 && !need_fallback)
200 		return sun4i_ss_opti_poll(areq);
201 
202 	if (need_fallback)
203 		return sun4i_ss_cipher_poll_fallback(areq);
204 
205 	spin_lock_irqsave(&ss->slock, flags);
206 
207 	for (i = 0; i < op->keylen; i += 4)
208 		writel(*(op->key + i / 4), ss->base + SS_KEY0 + i);
209 
210 	if (areq->iv) {
211 		for (i = 0; i < 4 && i < ivsize / 4; i++) {
212 			v = *(u32 *)(areq->iv + i * 4);
213 			writel(v, ss->base + SS_IV0 + i * 4);
214 		}
215 	}
216 	writel(mode, ss->base + SS_CTL);
217 
218 	sg_miter_start(&mi, areq->src, sg_nents(areq->src),
219 		       SG_MITER_FROM_SG | SG_MITER_ATOMIC);
220 	sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
221 		       SG_MITER_TO_SG | SG_MITER_ATOMIC);
222 	sg_miter_next(&mi);
223 	sg_miter_next(&mo);
224 	if (!mi.addr || !mo.addr) {
225 		dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
226 		err = -EINVAL;
227 		goto release_ss;
228 	}
229 	ileft = areq->cryptlen;
230 	oleft = areq->cryptlen;
231 	oi = 0;
232 	oo = 0;
233 
234 	while (oleft) {
235 		if (ileft) {
236 			char buf[4 * SS_RX_MAX];/* buffer for linearize SG src */
237 
238 			/*
239 			 * todo is the number of consecutive 4byte word that we
240 			 * can read from current SG
241 			 */
242 			todo = min3(rx_cnt, ileft / 4, (mi.length - oi) / 4);
243 			if (todo && !ob) {
244 				writesl(ss->base + SS_RXFIFO, mi.addr + oi,
245 					todo);
246 				ileft -= todo * 4;
247 				oi += todo * 4;
248 			} else {
249 				/*
250 				 * not enough consecutive bytes, so we need to
251 				 * linearize in buf. todo is in bytes
252 				 * After that copy, if we have a multiple of 4
253 				 * we need to be able to write all buf in one
254 				 * pass, so it is why we min() with rx_cnt
255 				 */
256 				todo = min3(rx_cnt * 4 - ob, ileft,
257 					    mi.length - oi);
258 				memcpy(buf + ob, mi.addr + oi, todo);
259 				ileft -= todo;
260 				oi += todo;
261 				ob += todo;
262 				if (!(ob % 4)) {
263 					writesl(ss->base + SS_RXFIFO, buf,
264 						ob / 4);
265 					ob = 0;
266 				}
267 			}
268 			if (oi == mi.length) {
269 				sg_miter_next(&mi);
270 				oi = 0;
271 			}
272 		}
273 
274 		spaces = readl(ss->base + SS_FCSR);
275 		rx_cnt = SS_RXFIFO_SPACES(spaces);
276 		tx_cnt = SS_TXFIFO_SPACES(spaces);
277 		dev_dbg(ss->dev, "%x %u/%u %u/%u cnt=%u %u/%u %u/%u cnt=%u %u\n",
278 			mode,
279 			oi, mi.length, ileft, areq->cryptlen, rx_cnt,
280 			oo, mo.length, oleft, areq->cryptlen, tx_cnt, ob);
281 
282 		if (!tx_cnt)
283 			continue;
284 		/* todo in 4bytes word */
285 		todo = min3(tx_cnt, oleft / 4, (mo.length - oo) / 4);
286 		if (todo) {
287 			readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo);
288 			oleft -= todo * 4;
289 			oo += todo * 4;
290 			if (oo == mo.length) {
291 				sg_miter_next(&mo);
292 				oo = 0;
293 			}
294 		} else {
295 			char bufo[4 * SS_TX_MAX]; /* buffer for linearize SG dst */
296 
297 			/*
298 			 * read obl bytes in bufo, we read at maximum for
299 			 * emptying the device
300 			 */
301 			readsl(ss->base + SS_TXFIFO, bufo, tx_cnt);
302 			obl = tx_cnt * 4;
303 			obo = 0;
304 			do {
305 				/*
306 				 * how many bytes we can copy ?
307 				 * no more than remaining SG size
308 				 * no more than remaining buffer
309 				 * no need to test against oleft
310 				 */
311 				todo = min(mo.length - oo, obl - obo);
312 				memcpy(mo.addr + oo, bufo + obo, todo);
313 				oleft -= todo;
314 				obo += todo;
315 				oo += todo;
316 				if (oo == mo.length) {
317 					sg_miter_next(&mo);
318 					oo = 0;
319 				}
320 			} while (obo < obl);
321 			/* bufo must be fully used here */
322 		}
323 	}
324 	if (areq->iv) {
325 		for (i = 0; i < 4 && i < ivsize / 4; i++) {
326 			v = readl(ss->base + SS_IV0 + i * 4);
327 			*(u32 *)(areq->iv + i * 4) = v;
328 		}
329 	}
330 
331 release_ss:
332 	sg_miter_stop(&mi);
333 	sg_miter_stop(&mo);
334 	writel(0, ss->base + SS_CTL);
335 	spin_unlock_irqrestore(&ss->slock, flags);
336 
337 	return err;
338 }
339 
340 /* CBC AES */
sun4i_ss_cbc_aes_encrypt(struct skcipher_request * areq)341 int sun4i_ss_cbc_aes_encrypt(struct skcipher_request *areq)
342 {
343 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
344 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
345 	struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
346 
347 	rctx->mode = SS_OP_AES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
348 		op->keymode;
349 	return sun4i_ss_cipher_poll(areq);
350 }
351 
sun4i_ss_cbc_aes_decrypt(struct skcipher_request * areq)352 int sun4i_ss_cbc_aes_decrypt(struct skcipher_request *areq)
353 {
354 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
355 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
356 	struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
357 
358 	rctx->mode = SS_OP_AES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
359 		op->keymode;
360 	return sun4i_ss_cipher_poll(areq);
361 }
362 
363 /* ECB AES */
sun4i_ss_ecb_aes_encrypt(struct skcipher_request * areq)364 int sun4i_ss_ecb_aes_encrypt(struct skcipher_request *areq)
365 {
366 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
367 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
368 	struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
369 
370 	rctx->mode = SS_OP_AES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
371 		op->keymode;
372 	return sun4i_ss_cipher_poll(areq);
373 }
374 
sun4i_ss_ecb_aes_decrypt(struct skcipher_request * areq)375 int sun4i_ss_ecb_aes_decrypt(struct skcipher_request *areq)
376 {
377 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
378 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
379 	struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
380 
381 	rctx->mode = SS_OP_AES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
382 		op->keymode;
383 	return sun4i_ss_cipher_poll(areq);
384 }
385 
386 /* CBC DES */
sun4i_ss_cbc_des_encrypt(struct skcipher_request * areq)387 int sun4i_ss_cbc_des_encrypt(struct skcipher_request *areq)
388 {
389 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
390 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
391 	struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
392 
393 	rctx->mode = SS_OP_DES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
394 		op->keymode;
395 	return sun4i_ss_cipher_poll(areq);
396 }
397 
sun4i_ss_cbc_des_decrypt(struct skcipher_request * areq)398 int sun4i_ss_cbc_des_decrypt(struct skcipher_request *areq)
399 {
400 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
401 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
402 	struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
403 
404 	rctx->mode = SS_OP_DES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
405 		op->keymode;
406 	return sun4i_ss_cipher_poll(areq);
407 }
408 
409 /* ECB DES */
sun4i_ss_ecb_des_encrypt(struct skcipher_request * areq)410 int sun4i_ss_ecb_des_encrypt(struct skcipher_request *areq)
411 {
412 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
413 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
414 	struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
415 
416 	rctx->mode = SS_OP_DES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
417 		op->keymode;
418 	return sun4i_ss_cipher_poll(areq);
419 }
420 
sun4i_ss_ecb_des_decrypt(struct skcipher_request * areq)421 int sun4i_ss_ecb_des_decrypt(struct skcipher_request *areq)
422 {
423 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
424 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
425 	struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
426 
427 	rctx->mode = SS_OP_DES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
428 		op->keymode;
429 	return sun4i_ss_cipher_poll(areq);
430 }
431 
432 /* CBC 3DES */
sun4i_ss_cbc_des3_encrypt(struct skcipher_request * areq)433 int sun4i_ss_cbc_des3_encrypt(struct skcipher_request *areq)
434 {
435 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
436 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
437 	struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
438 
439 	rctx->mode = SS_OP_3DES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
440 		op->keymode;
441 	return sun4i_ss_cipher_poll(areq);
442 }
443 
sun4i_ss_cbc_des3_decrypt(struct skcipher_request * areq)444 int sun4i_ss_cbc_des3_decrypt(struct skcipher_request *areq)
445 {
446 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
447 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
448 	struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
449 
450 	rctx->mode = SS_OP_3DES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
451 		op->keymode;
452 	return sun4i_ss_cipher_poll(areq);
453 }
454 
455 /* ECB 3DES */
sun4i_ss_ecb_des3_encrypt(struct skcipher_request * areq)456 int sun4i_ss_ecb_des3_encrypt(struct skcipher_request *areq)
457 {
458 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
459 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
460 	struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
461 
462 	rctx->mode = SS_OP_3DES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
463 		op->keymode;
464 	return sun4i_ss_cipher_poll(areq);
465 }
466 
sun4i_ss_ecb_des3_decrypt(struct skcipher_request * areq)467 int sun4i_ss_ecb_des3_decrypt(struct skcipher_request *areq)
468 {
469 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
470 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
471 	struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
472 
473 	rctx->mode = SS_OP_3DES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
474 		op->keymode;
475 	return sun4i_ss_cipher_poll(areq);
476 }
477 
sun4i_ss_cipher_init(struct crypto_tfm * tfm)478 int sun4i_ss_cipher_init(struct crypto_tfm *tfm)
479 {
480 	struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm);
481 	struct sun4i_ss_alg_template *algt;
482 	const char *name = crypto_tfm_alg_name(tfm);
483 
484 	memset(op, 0, sizeof(struct sun4i_tfm_ctx));
485 
486 	algt = container_of(tfm->__crt_alg, struct sun4i_ss_alg_template,
487 			    alg.crypto.base);
488 	op->ss = algt->ss;
489 
490 	crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm),
491 				    sizeof(struct sun4i_cipher_req_ctx));
492 
493 	op->fallback_tfm = crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
494 	if (IS_ERR(op->fallback_tfm)) {
495 		dev_err(op->ss->dev, "ERROR: Cannot allocate fallback for %s %ld\n",
496 			name, PTR_ERR(op->fallback_tfm));
497 		return PTR_ERR(op->fallback_tfm);
498 	}
499 
500 	return 0;
501 }
502 
sun4i_ss_cipher_exit(struct crypto_tfm * tfm)503 void sun4i_ss_cipher_exit(struct crypto_tfm *tfm)
504 {
505 	struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm);
506 	crypto_free_sync_skcipher(op->fallback_tfm);
507 }
508 
509 /* check and set the AES key, prepare the mode to be used */
sun4i_ss_aes_setkey(struct crypto_skcipher * tfm,const u8 * key,unsigned int keylen)510 int sun4i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
511 			unsigned int keylen)
512 {
513 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
514 	struct sun4i_ss_ctx *ss = op->ss;
515 
516 	switch (keylen) {
517 	case 128 / 8:
518 		op->keymode = SS_AES_128BITS;
519 		break;
520 	case 192 / 8:
521 		op->keymode = SS_AES_192BITS;
522 		break;
523 	case 256 / 8:
524 		op->keymode = SS_AES_256BITS;
525 		break;
526 	default:
527 		dev_err(ss->dev, "ERROR: Invalid keylen %u\n", keylen);
528 		crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
529 		return -EINVAL;
530 	}
531 	op->keylen = keylen;
532 	memcpy(op->key, key, keylen);
533 
534 	crypto_sync_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
535 	crypto_sync_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
536 
537 	return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen);
538 }
539 
540 /* check and set the DES key, prepare the mode to be used */
sun4i_ss_des_setkey(struct crypto_skcipher * tfm,const u8 * key,unsigned int keylen)541 int sun4i_ss_des_setkey(struct crypto_skcipher *tfm, const u8 *key,
542 			unsigned int keylen)
543 {
544 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
545 	int err;
546 
547 	err = verify_skcipher_des_key(tfm, key);
548 	if (err)
549 		return err;
550 
551 	op->keylen = keylen;
552 	memcpy(op->key, key, keylen);
553 
554 	crypto_sync_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
555 	crypto_sync_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
556 
557 	return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen);
558 }
559 
560 /* check and set the 3DES key, prepare the mode to be used */
sun4i_ss_des3_setkey(struct crypto_skcipher * tfm,const u8 * key,unsigned int keylen)561 int sun4i_ss_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
562 			 unsigned int keylen)
563 {
564 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
565 	int err;
566 
567 	err = verify_skcipher_des3_key(tfm, key);
568 	if (err)
569 		return err;
570 
571 	op->keylen = keylen;
572 	memcpy(op->key, key, keylen);
573 
574 	crypto_sync_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
575 	crypto_sync_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
576 
577 	return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen);
578 
579 }
580