1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Cryptographic API.
4  *
5  * Driver for EIP97 AES acceleration.
6  *
7  * Copyright (c) 2016 Ryder Lee <ryder.lee@mediatek.com>
8  *
9  * Some ideas are from atmel-aes.c drivers.
10  */
11 
12 #include <crypto/aes.h>
13 #include <crypto/gcm.h>
14 #include "mtk-platform.h"
15 
16 #define AES_QUEUE_SIZE		512
17 #define AES_BUF_ORDER		2
18 #define AES_BUF_SIZE		((PAGE_SIZE << AES_BUF_ORDER) \
19 				& ~(AES_BLOCK_SIZE - 1))
20 #define AES_MAX_STATE_BUF_SIZE	SIZE_IN_WORDS(AES_KEYSIZE_256 + \
21 				AES_BLOCK_SIZE * 2)
22 #define AES_MAX_CT_SIZE		6
23 
24 #define AES_CT_CTRL_HDR		cpu_to_le32(0x00220000)
25 
26 /* AES-CBC/ECB/CTR/OFB/CFB command token */
27 #define AES_CMD0		cpu_to_le32(0x05000000)
28 #define AES_CMD1		cpu_to_le32(0x2d060000)
29 #define AES_CMD2		cpu_to_le32(0xe4a63806)
30 /* AES-GCM command token */
31 #define AES_GCM_CMD0		cpu_to_le32(0x0b000000)
32 #define AES_GCM_CMD1		cpu_to_le32(0xa0800000)
33 #define AES_GCM_CMD2		cpu_to_le32(0x25000010)
34 #define AES_GCM_CMD3		cpu_to_le32(0x0f020000)
35 #define AES_GCM_CMD4		cpu_to_le32(0x21e60000)
36 #define AES_GCM_CMD5		cpu_to_le32(0x40e60000)
37 #define AES_GCM_CMD6		cpu_to_le32(0xd0070000)
38 
39 /* AES transform information word 0 fields */
40 #define AES_TFM_BASIC_OUT	cpu_to_le32(0x4 << 0)
41 #define AES_TFM_BASIC_IN	cpu_to_le32(0x5 << 0)
42 #define AES_TFM_GCM_OUT		cpu_to_le32(0x6 << 0)
43 #define AES_TFM_GCM_IN		cpu_to_le32(0xf << 0)
44 #define AES_TFM_SIZE(x)		cpu_to_le32((x) << 8)
45 #define AES_TFM_128BITS		cpu_to_le32(0xb << 16)
46 #define AES_TFM_192BITS		cpu_to_le32(0xd << 16)
47 #define AES_TFM_256BITS		cpu_to_le32(0xf << 16)
48 #define AES_TFM_GHASH_DIGEST	cpu_to_le32(0x2 << 21)
49 #define AES_TFM_GHASH		cpu_to_le32(0x4 << 23)
50 /* AES transform information word 1 fields */
51 #define AES_TFM_ECB		cpu_to_le32(0x0 << 0)
52 #define AES_TFM_CBC		cpu_to_le32(0x1 << 0)
53 #define AES_TFM_OFB		cpu_to_le32(0x4 << 0)
54 #define AES_TFM_CFB128		cpu_to_le32(0x5 << 0)
55 #define AES_TFM_CTR_INIT	cpu_to_le32(0x2 << 0)	/* init counter to 1 */
56 #define AES_TFM_CTR_LOAD	cpu_to_le32(0x6 << 0)	/* load/reuse counter */
57 #define AES_TFM_3IV		cpu_to_le32(0x7 << 5)	/* using IV 0-2 */
58 #define AES_TFM_FULL_IV		cpu_to_le32(0xf << 5)	/* using IV 0-3 */
59 #define AES_TFM_IV_CTR_MODE	cpu_to_le32(0x1 << 10)
60 #define AES_TFM_ENC_HASH	cpu_to_le32(0x1 << 17)
61 
62 /* AES flags */
63 #define AES_FLAGS_CIPHER_MSK	GENMASK(4, 0)
64 #define AES_FLAGS_ECB		BIT(0)
65 #define AES_FLAGS_CBC		BIT(1)
66 #define AES_FLAGS_CTR		BIT(2)
67 #define AES_FLAGS_OFB		BIT(3)
68 #define AES_FLAGS_CFB128	BIT(4)
69 #define AES_FLAGS_GCM		BIT(5)
70 #define AES_FLAGS_ENCRYPT	BIT(6)
71 #define AES_FLAGS_BUSY		BIT(7)
72 
73 #define AES_AUTH_TAG_ERR	cpu_to_le32(BIT(26))
74 
75 /**
76  * mtk_aes_info - hardware information of AES
77  * @cmd:	command token, hardware instruction
78  * @tfm:	transform state of cipher algorithm.
79  * @state:	contains keys and initial vectors.
80  *
81  * Memory layout of GCM buffer:
82  * /-----------\
83  * |  AES KEY  | 128/196/256 bits
84  * |-----------|
85  * |  HASH KEY | a string 128 zero bits encrypted using the block cipher
86  * |-----------|
87  * |    IVs    | 4 * 4 bytes
88  * \-----------/
89  *
90  * The engine requires all these info to do:
91  * - Commands decoding and control of the engine's data path.
92  * - Coordinating hardware data fetch and store operations.
93  * - Result token construction and output.
94  */
95 struct mtk_aes_info {
96 	__le32 cmd[AES_MAX_CT_SIZE];
97 	__le32 tfm[2];
98 	__le32 state[AES_MAX_STATE_BUF_SIZE];
99 };
100 
101 struct mtk_aes_reqctx {
102 	u64 mode;
103 };
104 
105 struct mtk_aes_base_ctx {
106 	struct mtk_cryp *cryp;
107 	u32 keylen;
108 	__le32 key[12];
109 	__le32 keymode;
110 
111 	mtk_aes_fn start;
112 
113 	struct mtk_aes_info info;
114 	dma_addr_t ct_dma;
115 	dma_addr_t tfm_dma;
116 
117 	__le32 ct_hdr;
118 	u32 ct_size;
119 };
120 
121 struct mtk_aes_ctx {
122 	struct mtk_aes_base_ctx	base;
123 };
124 
125 struct mtk_aes_ctr_ctx {
126 	struct mtk_aes_base_ctx base;
127 
128 	u32	iv[AES_BLOCK_SIZE / sizeof(u32)];
129 	size_t offset;
130 	struct scatterlist src[2];
131 	struct scatterlist dst[2];
132 };
133 
134 struct mtk_aes_gcm_ctx {
135 	struct mtk_aes_base_ctx base;
136 
137 	u32 authsize;
138 	size_t textlen;
139 
140 	struct crypto_skcipher *ctr;
141 };
142 
143 struct mtk_aes_drv {
144 	struct list_head dev_list;
145 	/* Device list lock */
146 	spinlock_t lock;
147 };
148 
149 static struct mtk_aes_drv mtk_aes = {
150 	.dev_list = LIST_HEAD_INIT(mtk_aes.dev_list),
151 	.lock = __SPIN_LOCK_UNLOCKED(mtk_aes.lock),
152 };
153 
mtk_aes_read(struct mtk_cryp * cryp,u32 offset)154 static inline u32 mtk_aes_read(struct mtk_cryp *cryp, u32 offset)
155 {
156 	return readl_relaxed(cryp->base + offset);
157 }
158 
mtk_aes_write(struct mtk_cryp * cryp,u32 offset,u32 value)159 static inline void mtk_aes_write(struct mtk_cryp *cryp,
160 				 u32 offset, u32 value)
161 {
162 	writel_relaxed(value, cryp->base + offset);
163 }
164 
mtk_aes_find_dev(struct mtk_aes_base_ctx * ctx)165 static struct mtk_cryp *mtk_aes_find_dev(struct mtk_aes_base_ctx *ctx)
166 {
167 	struct mtk_cryp *cryp = NULL;
168 	struct mtk_cryp *tmp;
169 
170 	spin_lock_bh(&mtk_aes.lock);
171 	if (!ctx->cryp) {
172 		list_for_each_entry(tmp, &mtk_aes.dev_list, aes_list) {
173 			cryp = tmp;
174 			break;
175 		}
176 		ctx->cryp = cryp;
177 	} else {
178 		cryp = ctx->cryp;
179 	}
180 	spin_unlock_bh(&mtk_aes.lock);
181 
182 	return cryp;
183 }
184 
mtk_aes_padlen(size_t len)185 static inline size_t mtk_aes_padlen(size_t len)
186 {
187 	len &= AES_BLOCK_SIZE - 1;
188 	return len ? AES_BLOCK_SIZE - len : 0;
189 }
190 
mtk_aes_check_aligned(struct scatterlist * sg,size_t len,struct mtk_aes_dma * dma)191 static bool mtk_aes_check_aligned(struct scatterlist *sg, size_t len,
192 				  struct mtk_aes_dma *dma)
193 {
194 	int nents;
195 
196 	if (!IS_ALIGNED(len, AES_BLOCK_SIZE))
197 		return false;
198 
199 	for (nents = 0; sg; sg = sg_next(sg), ++nents) {
200 		if (!IS_ALIGNED(sg->offset, sizeof(u32)))
201 			return false;
202 
203 		if (len <= sg->length) {
204 			if (!IS_ALIGNED(len, AES_BLOCK_SIZE))
205 				return false;
206 
207 			dma->nents = nents + 1;
208 			dma->remainder = sg->length - len;
209 			sg->length = len;
210 			return true;
211 		}
212 
213 		if (!IS_ALIGNED(sg->length, AES_BLOCK_SIZE))
214 			return false;
215 
216 		len -= sg->length;
217 	}
218 
219 	return false;
220 }
221 
mtk_aes_set_mode(struct mtk_aes_rec * aes,const struct mtk_aes_reqctx * rctx)222 static inline void mtk_aes_set_mode(struct mtk_aes_rec *aes,
223 				    const struct mtk_aes_reqctx *rctx)
224 {
225 	/* Clear all but persistent flags and set request flags. */
226 	aes->flags = (aes->flags & AES_FLAGS_BUSY) | rctx->mode;
227 }
228 
mtk_aes_restore_sg(const struct mtk_aes_dma * dma)229 static inline void mtk_aes_restore_sg(const struct mtk_aes_dma *dma)
230 {
231 	struct scatterlist *sg = dma->sg;
232 	int nents = dma->nents;
233 
234 	if (!dma->remainder)
235 		return;
236 
237 	while (--nents > 0 && sg)
238 		sg = sg_next(sg);
239 
240 	if (!sg)
241 		return;
242 
243 	sg->length += dma->remainder;
244 }
245 
mtk_aes_write_state_le(__le32 * dst,const u32 * src,u32 size)246 static inline void mtk_aes_write_state_le(__le32 *dst, const u32 *src, u32 size)
247 {
248 	int i;
249 
250 	for (i = 0; i < SIZE_IN_WORDS(size); i++)
251 		dst[i] = cpu_to_le32(src[i]);
252 }
253 
mtk_aes_write_state_be(__be32 * dst,const u32 * src,u32 size)254 static inline void mtk_aes_write_state_be(__be32 *dst, const u32 *src, u32 size)
255 {
256 	int i;
257 
258 	for (i = 0; i < SIZE_IN_WORDS(size); i++)
259 		dst[i] = cpu_to_be32(src[i]);
260 }
261 
mtk_aes_complete(struct mtk_cryp * cryp,struct mtk_aes_rec * aes,int err)262 static inline int mtk_aes_complete(struct mtk_cryp *cryp,
263 				   struct mtk_aes_rec *aes,
264 				   int err)
265 {
266 	aes->flags &= ~AES_FLAGS_BUSY;
267 	aes->areq->complete(aes->areq, err);
268 	/* Handle new request */
269 	tasklet_schedule(&aes->queue_task);
270 	return err;
271 }
272 
273 /*
274  * Write descriptors for processing. This will configure the engine, load
275  * the transform information and then start the packet processing.
276  */
mtk_aes_xmit(struct mtk_cryp * cryp,struct mtk_aes_rec * aes)277 static int mtk_aes_xmit(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
278 {
279 	struct mtk_ring *ring = cryp->ring[aes->id];
280 	struct mtk_desc *cmd = NULL, *res = NULL;
281 	struct scatterlist *ssg = aes->src.sg, *dsg = aes->dst.sg;
282 	u32 slen = aes->src.sg_len, dlen = aes->dst.sg_len;
283 	int nents;
284 
285 	/* Write command descriptors */
286 	for (nents = 0; nents < slen; ++nents, ssg = sg_next(ssg)) {
287 		cmd = ring->cmd_next;
288 		cmd->hdr = MTK_DESC_BUF_LEN(ssg->length);
289 		cmd->buf = cpu_to_le32(sg_dma_address(ssg));
290 
291 		if (nents == 0) {
292 			cmd->hdr |= MTK_DESC_FIRST |
293 				    MTK_DESC_CT_LEN(aes->ctx->ct_size);
294 			cmd->ct = cpu_to_le32(aes->ctx->ct_dma);
295 			cmd->ct_hdr = aes->ctx->ct_hdr;
296 			cmd->tfm = cpu_to_le32(aes->ctx->tfm_dma);
297 		}
298 
299 		/* Shift ring buffer and check boundary */
300 		if (++ring->cmd_next == ring->cmd_base + MTK_DESC_NUM)
301 			ring->cmd_next = ring->cmd_base;
302 	}
303 	cmd->hdr |= MTK_DESC_LAST;
304 
305 	/* Prepare result descriptors */
306 	for (nents = 0; nents < dlen; ++nents, dsg = sg_next(dsg)) {
307 		res = ring->res_next;
308 		res->hdr = MTK_DESC_BUF_LEN(dsg->length);
309 		res->buf = cpu_to_le32(sg_dma_address(dsg));
310 
311 		if (nents == 0)
312 			res->hdr |= MTK_DESC_FIRST;
313 
314 		/* Shift ring buffer and check boundary */
315 		if (++ring->res_next == ring->res_base + MTK_DESC_NUM)
316 			ring->res_next = ring->res_base;
317 	}
318 	res->hdr |= MTK_DESC_LAST;
319 
320 	/* Pointer to current result descriptor */
321 	ring->res_prev = res;
322 
323 	/* Prepare enough space for authenticated tag */
324 	if (aes->flags & AES_FLAGS_GCM)
325 		res->hdr += AES_BLOCK_SIZE;
326 
327 	/*
328 	 * Make sure that all changes to the DMA ring are done before we
329 	 * start engine.
330 	 */
331 	wmb();
332 	/* Start DMA transfer */
333 	mtk_aes_write(cryp, RDR_PREP_COUNT(aes->id), MTK_DESC_CNT(dlen));
334 	mtk_aes_write(cryp, CDR_PREP_COUNT(aes->id), MTK_DESC_CNT(slen));
335 
336 	return -EINPROGRESS;
337 }
338 
mtk_aes_unmap(struct mtk_cryp * cryp,struct mtk_aes_rec * aes)339 static void mtk_aes_unmap(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
340 {
341 	struct mtk_aes_base_ctx *ctx = aes->ctx;
342 
343 	dma_unmap_single(cryp->dev, ctx->ct_dma, sizeof(ctx->info),
344 			 DMA_TO_DEVICE);
345 
346 	if (aes->src.sg == aes->dst.sg) {
347 		dma_unmap_sg(cryp->dev, aes->src.sg, aes->src.nents,
348 			     DMA_BIDIRECTIONAL);
349 
350 		if (aes->src.sg != &aes->aligned_sg)
351 			mtk_aes_restore_sg(&aes->src);
352 	} else {
353 		dma_unmap_sg(cryp->dev, aes->dst.sg, aes->dst.nents,
354 			     DMA_FROM_DEVICE);
355 
356 		if (aes->dst.sg != &aes->aligned_sg)
357 			mtk_aes_restore_sg(&aes->dst);
358 
359 		dma_unmap_sg(cryp->dev, aes->src.sg, aes->src.nents,
360 			     DMA_TO_DEVICE);
361 
362 		if (aes->src.sg != &aes->aligned_sg)
363 			mtk_aes_restore_sg(&aes->src);
364 	}
365 
366 	if (aes->dst.sg == &aes->aligned_sg)
367 		sg_copy_from_buffer(aes->real_dst, sg_nents(aes->real_dst),
368 				    aes->buf, aes->total);
369 }
370 
mtk_aes_map(struct mtk_cryp * cryp,struct mtk_aes_rec * aes)371 static int mtk_aes_map(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
372 {
373 	struct mtk_aes_base_ctx *ctx = aes->ctx;
374 	struct mtk_aes_info *info = &ctx->info;
375 
376 	ctx->ct_dma = dma_map_single(cryp->dev, info, sizeof(*info),
377 				     DMA_TO_DEVICE);
378 	if (unlikely(dma_mapping_error(cryp->dev, ctx->ct_dma)))
379 		goto exit;
380 
381 	ctx->tfm_dma = ctx->ct_dma + sizeof(info->cmd);
382 
383 	if (aes->src.sg == aes->dst.sg) {
384 		aes->src.sg_len = dma_map_sg(cryp->dev, aes->src.sg,
385 					     aes->src.nents,
386 					     DMA_BIDIRECTIONAL);
387 		aes->dst.sg_len = aes->src.sg_len;
388 		if (unlikely(!aes->src.sg_len))
389 			goto sg_map_err;
390 	} else {
391 		aes->src.sg_len = dma_map_sg(cryp->dev, aes->src.sg,
392 					     aes->src.nents, DMA_TO_DEVICE);
393 		if (unlikely(!aes->src.sg_len))
394 			goto sg_map_err;
395 
396 		aes->dst.sg_len = dma_map_sg(cryp->dev, aes->dst.sg,
397 					     aes->dst.nents, DMA_FROM_DEVICE);
398 		if (unlikely(!aes->dst.sg_len)) {
399 			dma_unmap_sg(cryp->dev, aes->src.sg, aes->src.nents,
400 				     DMA_TO_DEVICE);
401 			goto sg_map_err;
402 		}
403 	}
404 
405 	return mtk_aes_xmit(cryp, aes);
406 
407 sg_map_err:
408 	dma_unmap_single(cryp->dev, ctx->ct_dma, sizeof(*info), DMA_TO_DEVICE);
409 exit:
410 	return mtk_aes_complete(cryp, aes, -EINVAL);
411 }
412 
413 /* Initialize transform information of CBC/ECB/CTR/OFB/CFB mode */
mtk_aes_info_init(struct mtk_cryp * cryp,struct mtk_aes_rec * aes,size_t len)414 static void mtk_aes_info_init(struct mtk_cryp *cryp, struct mtk_aes_rec *aes,
415 			      size_t len)
416 {
417 	struct ablkcipher_request *req = ablkcipher_request_cast(aes->areq);
418 	struct mtk_aes_base_ctx *ctx = aes->ctx;
419 	struct mtk_aes_info *info = &ctx->info;
420 	u32 cnt = 0;
421 
422 	ctx->ct_hdr = AES_CT_CTRL_HDR | cpu_to_le32(len);
423 	info->cmd[cnt++] = AES_CMD0 | cpu_to_le32(len);
424 	info->cmd[cnt++] = AES_CMD1;
425 
426 	info->tfm[0] = AES_TFM_SIZE(ctx->keylen) | ctx->keymode;
427 	if (aes->flags & AES_FLAGS_ENCRYPT)
428 		info->tfm[0] |= AES_TFM_BASIC_OUT;
429 	else
430 		info->tfm[0] |= AES_TFM_BASIC_IN;
431 
432 	switch (aes->flags & AES_FLAGS_CIPHER_MSK) {
433 	case AES_FLAGS_CBC:
434 		info->tfm[1] = AES_TFM_CBC;
435 		break;
436 	case AES_FLAGS_ECB:
437 		info->tfm[1] = AES_TFM_ECB;
438 		goto ecb;
439 	case AES_FLAGS_CTR:
440 		info->tfm[1] = AES_TFM_CTR_LOAD;
441 		goto ctr;
442 	case AES_FLAGS_OFB:
443 		info->tfm[1] = AES_TFM_OFB;
444 		break;
445 	case AES_FLAGS_CFB128:
446 		info->tfm[1] = AES_TFM_CFB128;
447 		break;
448 	default:
449 		/* Should not happen... */
450 		return;
451 	}
452 
453 	mtk_aes_write_state_le(info->state + ctx->keylen, req->info,
454 			       AES_BLOCK_SIZE);
455 ctr:
456 	info->tfm[0] += AES_TFM_SIZE(SIZE_IN_WORDS(AES_BLOCK_SIZE));
457 	info->tfm[1] |= AES_TFM_FULL_IV;
458 	info->cmd[cnt++] = AES_CMD2;
459 ecb:
460 	ctx->ct_size = cnt;
461 }
462 
mtk_aes_dma(struct mtk_cryp * cryp,struct mtk_aes_rec * aes,struct scatterlist * src,struct scatterlist * dst,size_t len)463 static int mtk_aes_dma(struct mtk_cryp *cryp, struct mtk_aes_rec *aes,
464 		       struct scatterlist *src, struct scatterlist *dst,
465 		       size_t len)
466 {
467 	size_t padlen = 0;
468 	bool src_aligned, dst_aligned;
469 
470 	aes->total = len;
471 	aes->src.sg = src;
472 	aes->dst.sg = dst;
473 	aes->real_dst = dst;
474 
475 	src_aligned = mtk_aes_check_aligned(src, len, &aes->src);
476 	if (src == dst)
477 		dst_aligned = src_aligned;
478 	else
479 		dst_aligned = mtk_aes_check_aligned(dst, len, &aes->dst);
480 
481 	if (!src_aligned || !dst_aligned) {
482 		padlen = mtk_aes_padlen(len);
483 
484 		if (len + padlen > AES_BUF_SIZE)
485 			return mtk_aes_complete(cryp, aes, -ENOMEM);
486 
487 		if (!src_aligned) {
488 			sg_copy_to_buffer(src, sg_nents(src), aes->buf, len);
489 			aes->src.sg = &aes->aligned_sg;
490 			aes->src.nents = 1;
491 			aes->src.remainder = 0;
492 		}
493 
494 		if (!dst_aligned) {
495 			aes->dst.sg = &aes->aligned_sg;
496 			aes->dst.nents = 1;
497 			aes->dst.remainder = 0;
498 		}
499 
500 		sg_init_table(&aes->aligned_sg, 1);
501 		sg_set_buf(&aes->aligned_sg, aes->buf, len + padlen);
502 	}
503 
504 	mtk_aes_info_init(cryp, aes, len + padlen);
505 
506 	return mtk_aes_map(cryp, aes);
507 }
508 
mtk_aes_handle_queue(struct mtk_cryp * cryp,u8 id,struct crypto_async_request * new_areq)509 static int mtk_aes_handle_queue(struct mtk_cryp *cryp, u8 id,
510 				struct crypto_async_request *new_areq)
511 {
512 	struct mtk_aes_rec *aes = cryp->aes[id];
513 	struct crypto_async_request *areq, *backlog;
514 	struct mtk_aes_base_ctx *ctx;
515 	unsigned long flags;
516 	int ret = 0;
517 
518 	spin_lock_irqsave(&aes->lock, flags);
519 	if (new_areq)
520 		ret = crypto_enqueue_request(&aes->queue, new_areq);
521 	if (aes->flags & AES_FLAGS_BUSY) {
522 		spin_unlock_irqrestore(&aes->lock, flags);
523 		return ret;
524 	}
525 	backlog = crypto_get_backlog(&aes->queue);
526 	areq = crypto_dequeue_request(&aes->queue);
527 	if (areq)
528 		aes->flags |= AES_FLAGS_BUSY;
529 	spin_unlock_irqrestore(&aes->lock, flags);
530 
531 	if (!areq)
532 		return ret;
533 
534 	if (backlog)
535 		backlog->complete(backlog, -EINPROGRESS);
536 
537 	ctx = crypto_tfm_ctx(areq->tfm);
538 	/* Write key into state buffer */
539 	memcpy(ctx->info.state, ctx->key, sizeof(ctx->key));
540 
541 	aes->areq = areq;
542 	aes->ctx = ctx;
543 
544 	return ctx->start(cryp, aes);
545 }
546 
mtk_aes_transfer_complete(struct mtk_cryp * cryp,struct mtk_aes_rec * aes)547 static int mtk_aes_transfer_complete(struct mtk_cryp *cryp,
548 				     struct mtk_aes_rec *aes)
549 {
550 	return mtk_aes_complete(cryp, aes, 0);
551 }
552 
mtk_aes_start(struct mtk_cryp * cryp,struct mtk_aes_rec * aes)553 static int mtk_aes_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
554 {
555 	struct ablkcipher_request *req = ablkcipher_request_cast(aes->areq);
556 	struct mtk_aes_reqctx *rctx = ablkcipher_request_ctx(req);
557 
558 	mtk_aes_set_mode(aes, rctx);
559 	aes->resume = mtk_aes_transfer_complete;
560 
561 	return mtk_aes_dma(cryp, aes, req->src, req->dst, req->nbytes);
562 }
563 
564 static inline struct mtk_aes_ctr_ctx *
mtk_aes_ctr_ctx_cast(struct mtk_aes_base_ctx * ctx)565 mtk_aes_ctr_ctx_cast(struct mtk_aes_base_ctx *ctx)
566 {
567 	return container_of(ctx, struct mtk_aes_ctr_ctx, base);
568 }
569 
mtk_aes_ctr_transfer(struct mtk_cryp * cryp,struct mtk_aes_rec * aes)570 static int mtk_aes_ctr_transfer(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
571 {
572 	struct mtk_aes_base_ctx *ctx = aes->ctx;
573 	struct mtk_aes_ctr_ctx *cctx = mtk_aes_ctr_ctx_cast(ctx);
574 	struct ablkcipher_request *req = ablkcipher_request_cast(aes->areq);
575 	struct scatterlist *src, *dst;
576 	u32 start, end, ctr, blocks;
577 	size_t datalen;
578 	bool fragmented = false;
579 
580 	/* Check for transfer completion. */
581 	cctx->offset += aes->total;
582 	if (cctx->offset >= req->nbytes)
583 		return mtk_aes_transfer_complete(cryp, aes);
584 
585 	/* Compute data length. */
586 	datalen = req->nbytes - cctx->offset;
587 	blocks = DIV_ROUND_UP(datalen, AES_BLOCK_SIZE);
588 	ctr = be32_to_cpu(cctx->iv[3]);
589 
590 	/* Check 32bit counter overflow. */
591 	start = ctr;
592 	end = start + blocks - 1;
593 	if (end < start) {
594 		ctr |= 0xffffffff;
595 		datalen = AES_BLOCK_SIZE * -start;
596 		fragmented = true;
597 	}
598 
599 	/* Jump to offset. */
600 	src = scatterwalk_ffwd(cctx->src, req->src, cctx->offset);
601 	dst = ((req->src == req->dst) ? src :
602 	       scatterwalk_ffwd(cctx->dst, req->dst, cctx->offset));
603 
604 	/* Write IVs into transform state buffer. */
605 	mtk_aes_write_state_le(ctx->info.state + ctx->keylen, cctx->iv,
606 			       AES_BLOCK_SIZE);
607 
608 	if (unlikely(fragmented)) {
609 	/*
610 	 * Increment the counter manually to cope with the hardware
611 	 * counter overflow.
612 	 */
613 		cctx->iv[3] = cpu_to_be32(ctr);
614 		crypto_inc((u8 *)cctx->iv, AES_BLOCK_SIZE);
615 	}
616 
617 	return mtk_aes_dma(cryp, aes, src, dst, datalen);
618 }
619 
mtk_aes_ctr_start(struct mtk_cryp * cryp,struct mtk_aes_rec * aes)620 static int mtk_aes_ctr_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
621 {
622 	struct mtk_aes_ctr_ctx *cctx = mtk_aes_ctr_ctx_cast(aes->ctx);
623 	struct ablkcipher_request *req = ablkcipher_request_cast(aes->areq);
624 	struct mtk_aes_reqctx *rctx = ablkcipher_request_ctx(req);
625 
626 	mtk_aes_set_mode(aes, rctx);
627 
628 	memcpy(cctx->iv, req->info, AES_BLOCK_SIZE);
629 	cctx->offset = 0;
630 	aes->total = 0;
631 	aes->resume = mtk_aes_ctr_transfer;
632 
633 	return mtk_aes_ctr_transfer(cryp, aes);
634 }
635 
636 /* Check and set the AES key to transform state buffer */
mtk_aes_setkey(struct crypto_ablkcipher * tfm,const u8 * key,u32 keylen)637 static int mtk_aes_setkey(struct crypto_ablkcipher *tfm,
638 			  const u8 *key, u32 keylen)
639 {
640 	struct mtk_aes_base_ctx *ctx = crypto_ablkcipher_ctx(tfm);
641 
642 	switch (keylen) {
643 	case AES_KEYSIZE_128:
644 		ctx->keymode = AES_TFM_128BITS;
645 		break;
646 	case AES_KEYSIZE_192:
647 		ctx->keymode = AES_TFM_192BITS;
648 		break;
649 	case AES_KEYSIZE_256:
650 		ctx->keymode = AES_TFM_256BITS;
651 		break;
652 
653 	default:
654 		crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
655 		return -EINVAL;
656 	}
657 
658 	ctx->keylen = SIZE_IN_WORDS(keylen);
659 	mtk_aes_write_state_le(ctx->key, (const u32 *)key, keylen);
660 
661 	return 0;
662 }
663 
mtk_aes_crypt(struct ablkcipher_request * req,u64 mode)664 static int mtk_aes_crypt(struct ablkcipher_request *req, u64 mode)
665 {
666 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
667 	struct mtk_aes_base_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
668 	struct mtk_aes_reqctx *rctx;
669 	struct mtk_cryp *cryp;
670 
671 	cryp = mtk_aes_find_dev(ctx);
672 	if (!cryp)
673 		return -ENODEV;
674 
675 	rctx = ablkcipher_request_ctx(req);
676 	rctx->mode = mode;
677 
678 	return mtk_aes_handle_queue(cryp, !(mode & AES_FLAGS_ENCRYPT),
679 				    &req->base);
680 }
681 
mtk_aes_ecb_encrypt(struct ablkcipher_request * req)682 static int mtk_aes_ecb_encrypt(struct ablkcipher_request *req)
683 {
684 	return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_ECB);
685 }
686 
mtk_aes_ecb_decrypt(struct ablkcipher_request * req)687 static int mtk_aes_ecb_decrypt(struct ablkcipher_request *req)
688 {
689 	return mtk_aes_crypt(req, AES_FLAGS_ECB);
690 }
691 
mtk_aes_cbc_encrypt(struct ablkcipher_request * req)692 static int mtk_aes_cbc_encrypt(struct ablkcipher_request *req)
693 {
694 	return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_CBC);
695 }
696 
mtk_aes_cbc_decrypt(struct ablkcipher_request * req)697 static int mtk_aes_cbc_decrypt(struct ablkcipher_request *req)
698 {
699 	return mtk_aes_crypt(req, AES_FLAGS_CBC);
700 }
701 
mtk_aes_ctr_encrypt(struct ablkcipher_request * req)702 static int mtk_aes_ctr_encrypt(struct ablkcipher_request *req)
703 {
704 	return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_CTR);
705 }
706 
mtk_aes_ctr_decrypt(struct ablkcipher_request * req)707 static int mtk_aes_ctr_decrypt(struct ablkcipher_request *req)
708 {
709 	return mtk_aes_crypt(req, AES_FLAGS_CTR);
710 }
711 
mtk_aes_ofb_encrypt(struct ablkcipher_request * req)712 static int mtk_aes_ofb_encrypt(struct ablkcipher_request *req)
713 {
714 	return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_OFB);
715 }
716 
mtk_aes_ofb_decrypt(struct ablkcipher_request * req)717 static int mtk_aes_ofb_decrypt(struct ablkcipher_request *req)
718 {
719 	return mtk_aes_crypt(req, AES_FLAGS_OFB);
720 }
721 
mtk_aes_cfb_encrypt(struct ablkcipher_request * req)722 static int mtk_aes_cfb_encrypt(struct ablkcipher_request *req)
723 {
724 	return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_CFB128);
725 }
726 
mtk_aes_cfb_decrypt(struct ablkcipher_request * req)727 static int mtk_aes_cfb_decrypt(struct ablkcipher_request *req)
728 {
729 	return mtk_aes_crypt(req, AES_FLAGS_CFB128);
730 }
731 
mtk_aes_cra_init(struct crypto_tfm * tfm)732 static int mtk_aes_cra_init(struct crypto_tfm *tfm)
733 {
734 	struct mtk_aes_ctx *ctx = crypto_tfm_ctx(tfm);
735 
736 	tfm->crt_ablkcipher.reqsize = sizeof(struct mtk_aes_reqctx);
737 	ctx->base.start = mtk_aes_start;
738 	return 0;
739 }
740 
mtk_aes_ctr_cra_init(struct crypto_tfm * tfm)741 static int mtk_aes_ctr_cra_init(struct crypto_tfm *tfm)
742 {
743 	struct mtk_aes_ctx *ctx = crypto_tfm_ctx(tfm);
744 
745 	tfm->crt_ablkcipher.reqsize = sizeof(struct mtk_aes_reqctx);
746 	ctx->base.start = mtk_aes_ctr_start;
747 	return 0;
748 }
749 
750 static struct crypto_alg aes_algs[] = {
751 {
752 	.cra_name		= "cbc(aes)",
753 	.cra_driver_name	= "cbc-aes-mtk",
754 	.cra_priority		= 400,
755 	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
756 				  CRYPTO_ALG_ASYNC,
757 	.cra_init		= mtk_aes_cra_init,
758 	.cra_blocksize		= AES_BLOCK_SIZE,
759 	.cra_ctxsize		= sizeof(struct mtk_aes_ctx),
760 	.cra_alignmask		= 0xf,
761 	.cra_type		= &crypto_ablkcipher_type,
762 	.cra_module		= THIS_MODULE,
763 	.cra_u.ablkcipher = {
764 		.min_keysize	= AES_MIN_KEY_SIZE,
765 		.max_keysize	= AES_MAX_KEY_SIZE,
766 		.setkey		= mtk_aes_setkey,
767 		.encrypt	= mtk_aes_cbc_encrypt,
768 		.decrypt	= mtk_aes_cbc_decrypt,
769 		.ivsize		= AES_BLOCK_SIZE,
770 	}
771 },
772 {
773 	.cra_name		= "ecb(aes)",
774 	.cra_driver_name	= "ecb-aes-mtk",
775 	.cra_priority		= 400,
776 	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
777 				  CRYPTO_ALG_ASYNC,
778 	.cra_init		= mtk_aes_cra_init,
779 	.cra_blocksize		= AES_BLOCK_SIZE,
780 	.cra_ctxsize		= sizeof(struct mtk_aes_ctx),
781 	.cra_alignmask		= 0xf,
782 	.cra_type		= &crypto_ablkcipher_type,
783 	.cra_module		= THIS_MODULE,
784 	.cra_u.ablkcipher = {
785 		.min_keysize	= AES_MIN_KEY_SIZE,
786 		.max_keysize	= AES_MAX_KEY_SIZE,
787 		.setkey		= mtk_aes_setkey,
788 		.encrypt	= mtk_aes_ecb_encrypt,
789 		.decrypt	= mtk_aes_ecb_decrypt,
790 	}
791 },
792 {
793 	.cra_name		= "ctr(aes)",
794 	.cra_driver_name	= "ctr-aes-mtk",
795 	.cra_priority		= 400,
796 	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
797 				  CRYPTO_ALG_ASYNC,
798 	.cra_init		= mtk_aes_ctr_cra_init,
799 	.cra_blocksize		= 1,
800 	.cra_ctxsize		= sizeof(struct mtk_aes_ctr_ctx),
801 	.cra_alignmask		= 0xf,
802 	.cra_type		= &crypto_ablkcipher_type,
803 	.cra_module		= THIS_MODULE,
804 	.cra_u.ablkcipher = {
805 		.min_keysize	= AES_MIN_KEY_SIZE,
806 		.max_keysize	= AES_MAX_KEY_SIZE,
807 		.ivsize		= AES_BLOCK_SIZE,
808 		.setkey		= mtk_aes_setkey,
809 		.encrypt	= mtk_aes_ctr_encrypt,
810 		.decrypt	= mtk_aes_ctr_decrypt,
811 	}
812 },
813 {
814 	.cra_name		= "ofb(aes)",
815 	.cra_driver_name	= "ofb-aes-mtk",
816 	.cra_priority		= 400,
817 	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
818 				  CRYPTO_ALG_ASYNC,
819 	.cra_init		= mtk_aes_cra_init,
820 	.cra_blocksize		= 1,
821 	.cra_ctxsize		= sizeof(struct mtk_aes_ctx),
822 	.cra_alignmask		= 0xf,
823 	.cra_type		= &crypto_ablkcipher_type,
824 	.cra_module		= THIS_MODULE,
825 	.cra_u.ablkcipher = {
826 		.min_keysize	= AES_MIN_KEY_SIZE,
827 		.max_keysize	= AES_MAX_KEY_SIZE,
828 		.ivsize		= AES_BLOCK_SIZE,
829 		.setkey		= mtk_aes_setkey,
830 		.encrypt	= mtk_aes_ofb_encrypt,
831 		.decrypt	= mtk_aes_ofb_decrypt,
832 	}
833 },
834 {
835 	.cra_name		= "cfb(aes)",
836 	.cra_driver_name	= "cfb-aes-mtk",
837 	.cra_priority		= 400,
838 	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
839 				  CRYPTO_ALG_ASYNC,
840 	.cra_init		= mtk_aes_cra_init,
841 	.cra_blocksize		= 1,
842 	.cra_ctxsize		= sizeof(struct mtk_aes_ctx),
843 	.cra_alignmask		= 0xf,
844 	.cra_type		= &crypto_ablkcipher_type,
845 	.cra_module		= THIS_MODULE,
846 	.cra_u.ablkcipher = {
847 		.min_keysize	= AES_MIN_KEY_SIZE,
848 		.max_keysize	= AES_MAX_KEY_SIZE,
849 		.ivsize		= AES_BLOCK_SIZE,
850 		.setkey		= mtk_aes_setkey,
851 		.encrypt	= mtk_aes_cfb_encrypt,
852 		.decrypt	= mtk_aes_cfb_decrypt,
853 	}
854 },
855 };
856 
857 static inline struct mtk_aes_gcm_ctx *
mtk_aes_gcm_ctx_cast(struct mtk_aes_base_ctx * ctx)858 mtk_aes_gcm_ctx_cast(struct mtk_aes_base_ctx *ctx)
859 {
860 	return container_of(ctx, struct mtk_aes_gcm_ctx, base);
861 }
862 
863 /*
864  * Engine will verify and compare tag automatically, so we just need
865  * to check returned status which stored in the result descriptor.
866  */
mtk_aes_gcm_tag_verify(struct mtk_cryp * cryp,struct mtk_aes_rec * aes)867 static int mtk_aes_gcm_tag_verify(struct mtk_cryp *cryp,
868 				  struct mtk_aes_rec *aes)
869 {
870 	u32 status = cryp->ring[aes->id]->res_prev->ct;
871 
872 	return mtk_aes_complete(cryp, aes, (status & AES_AUTH_TAG_ERR) ?
873 				-EBADMSG : 0);
874 }
875 
876 /* Initialize transform information of GCM mode */
mtk_aes_gcm_info_init(struct mtk_cryp * cryp,struct mtk_aes_rec * aes,size_t len)877 static void mtk_aes_gcm_info_init(struct mtk_cryp *cryp,
878 				  struct mtk_aes_rec *aes,
879 				  size_t len)
880 {
881 	struct aead_request *req = aead_request_cast(aes->areq);
882 	struct mtk_aes_base_ctx *ctx = aes->ctx;
883 	struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(ctx);
884 	struct mtk_aes_info *info = &ctx->info;
885 	u32 ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req));
886 	u32 cnt = 0;
887 
888 	ctx->ct_hdr = AES_CT_CTRL_HDR | len;
889 
890 	info->cmd[cnt++] = AES_GCM_CMD0 | cpu_to_le32(req->assoclen);
891 	info->cmd[cnt++] = AES_GCM_CMD1 | cpu_to_le32(req->assoclen);
892 	info->cmd[cnt++] = AES_GCM_CMD2;
893 	info->cmd[cnt++] = AES_GCM_CMD3 | cpu_to_le32(gctx->textlen);
894 
895 	if (aes->flags & AES_FLAGS_ENCRYPT) {
896 		info->cmd[cnt++] = AES_GCM_CMD4 | cpu_to_le32(gctx->authsize);
897 		info->tfm[0] = AES_TFM_GCM_OUT;
898 	} else {
899 		info->cmd[cnt++] = AES_GCM_CMD5 | cpu_to_le32(gctx->authsize);
900 		info->cmd[cnt++] = AES_GCM_CMD6 | cpu_to_le32(gctx->authsize);
901 		info->tfm[0] = AES_TFM_GCM_IN;
902 	}
903 	ctx->ct_size = cnt;
904 
905 	info->tfm[0] |= AES_TFM_GHASH_DIGEST | AES_TFM_GHASH | AES_TFM_SIZE(
906 			ctx->keylen + SIZE_IN_WORDS(AES_BLOCK_SIZE + ivsize)) |
907 			ctx->keymode;
908 	info->tfm[1] = AES_TFM_CTR_INIT | AES_TFM_IV_CTR_MODE | AES_TFM_3IV |
909 		       AES_TFM_ENC_HASH;
910 
911 	mtk_aes_write_state_le(info->state + ctx->keylen + SIZE_IN_WORDS(
912 			       AES_BLOCK_SIZE), (const u32 *)req->iv, ivsize);
913 }
914 
mtk_aes_gcm_dma(struct mtk_cryp * cryp,struct mtk_aes_rec * aes,struct scatterlist * src,struct scatterlist * dst,size_t len)915 static int mtk_aes_gcm_dma(struct mtk_cryp *cryp, struct mtk_aes_rec *aes,
916 			   struct scatterlist *src, struct scatterlist *dst,
917 			   size_t len)
918 {
919 	bool src_aligned, dst_aligned;
920 
921 	aes->src.sg = src;
922 	aes->dst.sg = dst;
923 	aes->real_dst = dst;
924 
925 	src_aligned = mtk_aes_check_aligned(src, len, &aes->src);
926 	if (src == dst)
927 		dst_aligned = src_aligned;
928 	else
929 		dst_aligned = mtk_aes_check_aligned(dst, len, &aes->dst);
930 
931 	if (!src_aligned || !dst_aligned) {
932 		if (aes->total > AES_BUF_SIZE)
933 			return mtk_aes_complete(cryp, aes, -ENOMEM);
934 
935 		if (!src_aligned) {
936 			sg_copy_to_buffer(src, sg_nents(src), aes->buf, len);
937 			aes->src.sg = &aes->aligned_sg;
938 			aes->src.nents = 1;
939 			aes->src.remainder = 0;
940 		}
941 
942 		if (!dst_aligned) {
943 			aes->dst.sg = &aes->aligned_sg;
944 			aes->dst.nents = 1;
945 			aes->dst.remainder = 0;
946 		}
947 
948 		sg_init_table(&aes->aligned_sg, 1);
949 		sg_set_buf(&aes->aligned_sg, aes->buf, aes->total);
950 	}
951 
952 	mtk_aes_gcm_info_init(cryp, aes, len);
953 
954 	return mtk_aes_map(cryp, aes);
955 }
956 
957 /* Todo: GMAC */
mtk_aes_gcm_start(struct mtk_cryp * cryp,struct mtk_aes_rec * aes)958 static int mtk_aes_gcm_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
959 {
960 	struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(aes->ctx);
961 	struct aead_request *req = aead_request_cast(aes->areq);
962 	struct mtk_aes_reqctx *rctx = aead_request_ctx(req);
963 	u32 len = req->assoclen + req->cryptlen;
964 
965 	mtk_aes_set_mode(aes, rctx);
966 
967 	if (aes->flags & AES_FLAGS_ENCRYPT) {
968 		u32 tag[4];
969 
970 		aes->resume = mtk_aes_transfer_complete;
971 		/* Compute total process length. */
972 		aes->total = len + gctx->authsize;
973 		/* Hardware will append authenticated tag to output buffer */
974 		scatterwalk_map_and_copy(tag, req->dst, len, gctx->authsize, 1);
975 	} else {
976 		aes->resume = mtk_aes_gcm_tag_verify;
977 		aes->total = len;
978 	}
979 
980 	return mtk_aes_gcm_dma(cryp, aes, req->src, req->dst, len);
981 }
982 
mtk_aes_gcm_crypt(struct aead_request * req,u64 mode)983 static int mtk_aes_gcm_crypt(struct aead_request *req, u64 mode)
984 {
985 	struct mtk_aes_base_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
986 	struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(ctx);
987 	struct mtk_aes_reqctx *rctx = aead_request_ctx(req);
988 	struct mtk_cryp *cryp;
989 	bool enc = !!(mode & AES_FLAGS_ENCRYPT);
990 
991 	cryp = mtk_aes_find_dev(ctx);
992 	if (!cryp)
993 		return -ENODEV;
994 
995 	/* Compute text length. */
996 	gctx->textlen = req->cryptlen - (enc ? 0 : gctx->authsize);
997 
998 	/* Empty messages are not supported yet */
999 	if (!gctx->textlen && !req->assoclen)
1000 		return -EINVAL;
1001 
1002 	rctx->mode = AES_FLAGS_GCM | mode;
1003 
1004 	return mtk_aes_handle_queue(cryp, enc, &req->base);
1005 }
1006 
1007 /*
1008  * Because of the hardware limitation, we need to pre-calculate key(H)
1009  * for the GHASH operation. The result of the encryption operation
1010  * need to be stored in the transform state buffer.
1011  */
mtk_aes_gcm_setkey(struct crypto_aead * aead,const u8 * key,u32 keylen)1012 static int mtk_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key,
1013 			      u32 keylen)
1014 {
1015 	struct mtk_aes_base_ctx *ctx = crypto_aead_ctx(aead);
1016 	struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(ctx);
1017 	struct crypto_skcipher *ctr = gctx->ctr;
1018 	struct {
1019 		u32 hash[4];
1020 		u8 iv[8];
1021 
1022 		struct crypto_wait wait;
1023 
1024 		struct scatterlist sg[1];
1025 		struct skcipher_request req;
1026 	} *data;
1027 	int err;
1028 
1029 	switch (keylen) {
1030 	case AES_KEYSIZE_128:
1031 		ctx->keymode = AES_TFM_128BITS;
1032 		break;
1033 	case AES_KEYSIZE_192:
1034 		ctx->keymode = AES_TFM_192BITS;
1035 		break;
1036 	case AES_KEYSIZE_256:
1037 		ctx->keymode = AES_TFM_256BITS;
1038 		break;
1039 
1040 	default:
1041 		crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
1042 		return -EINVAL;
1043 	}
1044 
1045 	ctx->keylen = SIZE_IN_WORDS(keylen);
1046 
1047 	/* Same as crypto_gcm_setkey() from crypto/gcm.c */
1048 	crypto_skcipher_clear_flags(ctr, CRYPTO_TFM_REQ_MASK);
1049 	crypto_skcipher_set_flags(ctr, crypto_aead_get_flags(aead) &
1050 				  CRYPTO_TFM_REQ_MASK);
1051 	err = crypto_skcipher_setkey(ctr, key, keylen);
1052 	crypto_aead_set_flags(aead, crypto_skcipher_get_flags(ctr) &
1053 			      CRYPTO_TFM_RES_MASK);
1054 	if (err)
1055 		return err;
1056 
1057 	data = kzalloc(sizeof(*data) + crypto_skcipher_reqsize(ctr),
1058 		       GFP_KERNEL);
1059 	if (!data)
1060 		return -ENOMEM;
1061 
1062 	crypto_init_wait(&data->wait);
1063 	sg_init_one(data->sg, &data->hash, AES_BLOCK_SIZE);
1064 	skcipher_request_set_tfm(&data->req, ctr);
1065 	skcipher_request_set_callback(&data->req, CRYPTO_TFM_REQ_MAY_SLEEP |
1066 				      CRYPTO_TFM_REQ_MAY_BACKLOG,
1067 				      crypto_req_done, &data->wait);
1068 	skcipher_request_set_crypt(&data->req, data->sg, data->sg,
1069 				   AES_BLOCK_SIZE, data->iv);
1070 
1071 	err = crypto_wait_req(crypto_skcipher_encrypt(&data->req),
1072 			      &data->wait);
1073 	if (err)
1074 		goto out;
1075 
1076 	mtk_aes_write_state_le(ctx->key, (const u32 *)key, keylen);
1077 	mtk_aes_write_state_be(ctx->key + ctx->keylen, data->hash,
1078 			       AES_BLOCK_SIZE);
1079 out:
1080 	kzfree(data);
1081 	return err;
1082 }
1083 
mtk_aes_gcm_setauthsize(struct crypto_aead * aead,u32 authsize)1084 static int mtk_aes_gcm_setauthsize(struct crypto_aead *aead,
1085 				   u32 authsize)
1086 {
1087 	struct mtk_aes_base_ctx *ctx = crypto_aead_ctx(aead);
1088 	struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(ctx);
1089 
1090 	/* Same as crypto_gcm_authsize() from crypto/gcm.c */
1091 	switch (authsize) {
1092 	case 8:
1093 	case 12:
1094 	case 16:
1095 		break;
1096 	default:
1097 		return -EINVAL;
1098 	}
1099 
1100 	gctx->authsize = authsize;
1101 	return 0;
1102 }
1103 
mtk_aes_gcm_encrypt(struct aead_request * req)1104 static int mtk_aes_gcm_encrypt(struct aead_request *req)
1105 {
1106 	return mtk_aes_gcm_crypt(req, AES_FLAGS_ENCRYPT);
1107 }
1108 
mtk_aes_gcm_decrypt(struct aead_request * req)1109 static int mtk_aes_gcm_decrypt(struct aead_request *req)
1110 {
1111 	return mtk_aes_gcm_crypt(req, 0);
1112 }
1113 
mtk_aes_gcm_init(struct crypto_aead * aead)1114 static int mtk_aes_gcm_init(struct crypto_aead *aead)
1115 {
1116 	struct mtk_aes_gcm_ctx *ctx = crypto_aead_ctx(aead);
1117 
1118 	ctx->ctr = crypto_alloc_skcipher("ctr(aes)", 0,
1119 					 CRYPTO_ALG_ASYNC);
1120 	if (IS_ERR(ctx->ctr)) {
1121 		pr_err("Error allocating ctr(aes)\n");
1122 		return PTR_ERR(ctx->ctr);
1123 	}
1124 
1125 	crypto_aead_set_reqsize(aead, sizeof(struct mtk_aes_reqctx));
1126 	ctx->base.start = mtk_aes_gcm_start;
1127 	return 0;
1128 }
1129 
mtk_aes_gcm_exit(struct crypto_aead * aead)1130 static void mtk_aes_gcm_exit(struct crypto_aead *aead)
1131 {
1132 	struct mtk_aes_gcm_ctx *ctx = crypto_aead_ctx(aead);
1133 
1134 	crypto_free_skcipher(ctx->ctr);
1135 }
1136 
1137 static struct aead_alg aes_gcm_alg = {
1138 	.setkey		= mtk_aes_gcm_setkey,
1139 	.setauthsize	= mtk_aes_gcm_setauthsize,
1140 	.encrypt	= mtk_aes_gcm_encrypt,
1141 	.decrypt	= mtk_aes_gcm_decrypt,
1142 	.init		= mtk_aes_gcm_init,
1143 	.exit		= mtk_aes_gcm_exit,
1144 	.ivsize		= GCM_AES_IV_SIZE,
1145 	.maxauthsize	= AES_BLOCK_SIZE,
1146 
1147 	.base = {
1148 		.cra_name		= "gcm(aes)",
1149 		.cra_driver_name	= "gcm-aes-mtk",
1150 		.cra_priority		= 400,
1151 		.cra_flags		= CRYPTO_ALG_ASYNC,
1152 		.cra_blocksize		= 1,
1153 		.cra_ctxsize		= sizeof(struct mtk_aes_gcm_ctx),
1154 		.cra_alignmask		= 0xf,
1155 		.cra_module		= THIS_MODULE,
1156 	},
1157 };
1158 
mtk_aes_queue_task(unsigned long data)1159 static void mtk_aes_queue_task(unsigned long data)
1160 {
1161 	struct mtk_aes_rec *aes = (struct mtk_aes_rec *)data;
1162 
1163 	mtk_aes_handle_queue(aes->cryp, aes->id, NULL);
1164 }
1165 
mtk_aes_done_task(unsigned long data)1166 static void mtk_aes_done_task(unsigned long data)
1167 {
1168 	struct mtk_aes_rec *aes = (struct mtk_aes_rec *)data;
1169 	struct mtk_cryp *cryp = aes->cryp;
1170 
1171 	mtk_aes_unmap(cryp, aes);
1172 	aes->resume(cryp, aes);
1173 }
1174 
mtk_aes_irq(int irq,void * dev_id)1175 static irqreturn_t mtk_aes_irq(int irq, void *dev_id)
1176 {
1177 	struct mtk_aes_rec *aes  = (struct mtk_aes_rec *)dev_id;
1178 	struct mtk_cryp *cryp = aes->cryp;
1179 	u32 val = mtk_aes_read(cryp, RDR_STAT(aes->id));
1180 
1181 	mtk_aes_write(cryp, RDR_STAT(aes->id), val);
1182 
1183 	if (likely(AES_FLAGS_BUSY & aes->flags)) {
1184 		mtk_aes_write(cryp, RDR_PROC_COUNT(aes->id), MTK_CNT_RST);
1185 		mtk_aes_write(cryp, RDR_THRESH(aes->id),
1186 			      MTK_RDR_PROC_THRESH | MTK_RDR_PROC_MODE);
1187 
1188 		tasklet_schedule(&aes->done_task);
1189 	} else {
1190 		dev_warn(cryp->dev, "AES interrupt when no active requests.\n");
1191 	}
1192 	return IRQ_HANDLED;
1193 }
1194 
1195 /*
1196  * The purpose of creating encryption and decryption records is
1197  * to process outbound/inbound data in parallel, it can improve
1198  * performance in most use cases, such as IPSec VPN, especially
1199  * under heavy network traffic.
1200  */
mtk_aes_record_init(struct mtk_cryp * cryp)1201 static int mtk_aes_record_init(struct mtk_cryp *cryp)
1202 {
1203 	struct mtk_aes_rec **aes = cryp->aes;
1204 	int i, err = -ENOMEM;
1205 
1206 	for (i = 0; i < MTK_REC_NUM; i++) {
1207 		aes[i] = kzalloc(sizeof(**aes), GFP_KERNEL);
1208 		if (!aes[i])
1209 			goto err_cleanup;
1210 
1211 		aes[i]->buf = (void *)__get_free_pages(GFP_KERNEL,
1212 						AES_BUF_ORDER);
1213 		if (!aes[i]->buf)
1214 			goto err_cleanup;
1215 
1216 		aes[i]->cryp = cryp;
1217 
1218 		spin_lock_init(&aes[i]->lock);
1219 		crypto_init_queue(&aes[i]->queue, AES_QUEUE_SIZE);
1220 
1221 		tasklet_init(&aes[i]->queue_task, mtk_aes_queue_task,
1222 			     (unsigned long)aes[i]);
1223 		tasklet_init(&aes[i]->done_task, mtk_aes_done_task,
1224 			     (unsigned long)aes[i]);
1225 	}
1226 
1227 	/* Link to ring0 and ring1 respectively */
1228 	aes[0]->id = MTK_RING0;
1229 	aes[1]->id = MTK_RING1;
1230 
1231 	return 0;
1232 
1233 err_cleanup:
1234 	for (; i--; ) {
1235 		free_page((unsigned long)aes[i]->buf);
1236 		kfree(aes[i]);
1237 	}
1238 
1239 	return err;
1240 }
1241 
mtk_aes_record_free(struct mtk_cryp * cryp)1242 static void mtk_aes_record_free(struct mtk_cryp *cryp)
1243 {
1244 	int i;
1245 
1246 	for (i = 0; i < MTK_REC_NUM; i++) {
1247 		tasklet_kill(&cryp->aes[i]->done_task);
1248 		tasklet_kill(&cryp->aes[i]->queue_task);
1249 
1250 		free_page((unsigned long)cryp->aes[i]->buf);
1251 		kfree(cryp->aes[i]);
1252 	}
1253 }
1254 
mtk_aes_unregister_algs(void)1255 static void mtk_aes_unregister_algs(void)
1256 {
1257 	int i;
1258 
1259 	crypto_unregister_aead(&aes_gcm_alg);
1260 
1261 	for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
1262 		crypto_unregister_alg(&aes_algs[i]);
1263 }
1264 
mtk_aes_register_algs(void)1265 static int mtk_aes_register_algs(void)
1266 {
1267 	int err, i;
1268 
1269 	for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
1270 		err = crypto_register_alg(&aes_algs[i]);
1271 		if (err)
1272 			goto err_aes_algs;
1273 	}
1274 
1275 	err = crypto_register_aead(&aes_gcm_alg);
1276 	if (err)
1277 		goto err_aes_algs;
1278 
1279 	return 0;
1280 
1281 err_aes_algs:
1282 	for (; i--; )
1283 		crypto_unregister_alg(&aes_algs[i]);
1284 
1285 	return err;
1286 }
1287 
mtk_cipher_alg_register(struct mtk_cryp * cryp)1288 int mtk_cipher_alg_register(struct mtk_cryp *cryp)
1289 {
1290 	int ret;
1291 
1292 	INIT_LIST_HEAD(&cryp->aes_list);
1293 
1294 	/* Initialize two cipher records */
1295 	ret = mtk_aes_record_init(cryp);
1296 	if (ret)
1297 		goto err_record;
1298 
1299 	ret = devm_request_irq(cryp->dev, cryp->irq[MTK_RING0], mtk_aes_irq,
1300 			       0, "mtk-aes", cryp->aes[0]);
1301 	if (ret) {
1302 		dev_err(cryp->dev, "unable to request AES irq.\n");
1303 		goto err_res;
1304 	}
1305 
1306 	ret = devm_request_irq(cryp->dev, cryp->irq[MTK_RING1], mtk_aes_irq,
1307 			       0, "mtk-aes", cryp->aes[1]);
1308 	if (ret) {
1309 		dev_err(cryp->dev, "unable to request AES irq.\n");
1310 		goto err_res;
1311 	}
1312 
1313 	/* Enable ring0 and ring1 interrupt */
1314 	mtk_aes_write(cryp, AIC_ENABLE_SET(MTK_RING0), MTK_IRQ_RDR0);
1315 	mtk_aes_write(cryp, AIC_ENABLE_SET(MTK_RING1), MTK_IRQ_RDR1);
1316 
1317 	spin_lock(&mtk_aes.lock);
1318 	list_add_tail(&cryp->aes_list, &mtk_aes.dev_list);
1319 	spin_unlock(&mtk_aes.lock);
1320 
1321 	ret = mtk_aes_register_algs();
1322 	if (ret)
1323 		goto err_algs;
1324 
1325 	return 0;
1326 
1327 err_algs:
1328 	spin_lock(&mtk_aes.lock);
1329 	list_del(&cryp->aes_list);
1330 	spin_unlock(&mtk_aes.lock);
1331 err_res:
1332 	mtk_aes_record_free(cryp);
1333 err_record:
1334 
1335 	dev_err(cryp->dev, "mtk-aes initialization failed.\n");
1336 	return ret;
1337 }
1338 
mtk_cipher_alg_release(struct mtk_cryp * cryp)1339 void mtk_cipher_alg_release(struct mtk_cryp *cryp)
1340 {
1341 	spin_lock(&mtk_aes.lock);
1342 	list_del(&cryp->aes_list);
1343 	spin_unlock(&mtk_aes.lock);
1344 
1345 	mtk_aes_unregister_algs();
1346 	mtk_aes_record_free(cryp);
1347 }
1348