1 /*
2  * Copyright (c) 2010-2011 Picochip Ltd., Jamie Iles
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
17  */
18 #include <crypto/internal/aead.h>
19 #include <crypto/aes.h>
20 #include <crypto/algapi.h>
21 #include <crypto/authenc.h>
22 #include <crypto/des.h>
23 #include <crypto/md5.h>
24 #include <crypto/sha.h>
25 #include <crypto/internal/skcipher.h>
26 #include <linux/clk.h>
27 #include <linux/crypto.h>
28 #include <linux/delay.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/dmapool.h>
31 #include <linux/err.h>
32 #include <linux/init.h>
33 #include <linux/interrupt.h>
34 #include <linux/io.h>
35 #include <linux/list.h>
36 #include <linux/module.h>
37 #include <linux/of.h>
38 #include <linux/platform_device.h>
39 #include <linux/pm.h>
40 #include <linux/rtnetlink.h>
41 #include <linux/scatterlist.h>
42 #include <linux/sched.h>
43 #include <linux/sizes.h>
44 #include <linux/slab.h>
45 #include <linux/timer.h>
46 
47 #include "picoxcell_crypto_regs.h"
48 
49 /*
50  * The threshold for the number of entries in the CMD FIFO available before
51  * the CMD0_CNT interrupt is raised. Increasing this value will reduce the
52  * number of interrupts raised to the CPU.
53  */
54 #define CMD0_IRQ_THRESHOLD   1
55 
56 /*
57  * The timeout period (in jiffies) for a PDU. When the the number of PDUs in
58  * flight is greater than the STAT_IRQ_THRESHOLD or 0 the timer is disabled.
59  * When there are packets in flight but lower than the threshold, we enable
60  * the timer and at expiry, attempt to remove any processed packets from the
61  * queue and if there are still packets left, schedule the timer again.
62  */
63 #define PACKET_TIMEOUT	    1
64 
65 /* The priority to register each algorithm with. */
66 #define SPACC_CRYPTO_ALG_PRIORITY	10000
67 
68 #define SPACC_CRYPTO_KASUMI_F8_KEY_LEN	16
69 #define SPACC_CRYPTO_IPSEC_CIPHER_PG_SZ 64
70 #define SPACC_CRYPTO_IPSEC_HASH_PG_SZ	64
71 #define SPACC_CRYPTO_IPSEC_MAX_CTXS	32
72 #define SPACC_CRYPTO_IPSEC_FIFO_SZ	32
73 #define SPACC_CRYPTO_L2_CIPHER_PG_SZ	64
74 #define SPACC_CRYPTO_L2_HASH_PG_SZ	64
75 #define SPACC_CRYPTO_L2_MAX_CTXS	128
76 #define SPACC_CRYPTO_L2_FIFO_SZ		128
77 
78 #define MAX_DDT_LEN			16
79 
80 /* DDT format. This must match the hardware DDT format exactly. */
81 struct spacc_ddt {
82 	dma_addr_t	p;
83 	u32		len;
84 };
85 
86 /*
87  * Asynchronous crypto request structure.
88  *
89  * This structure defines a request that is either queued for processing or
90  * being processed.
91  */
92 struct spacc_req {
93 	struct list_head		list;
94 	struct spacc_engine		*engine;
95 	struct crypto_async_request	*req;
96 	int				result;
97 	bool				is_encrypt;
98 	unsigned			ctx_id;
99 	dma_addr_t			src_addr, dst_addr;
100 	struct spacc_ddt		*src_ddt, *dst_ddt;
101 	void				(*complete)(struct spacc_req *req);
102 };
103 
104 struct spacc_aead {
105 	unsigned long			ctrl_default;
106 	unsigned long			type;
107 	struct aead_alg			alg;
108 	struct spacc_engine		*engine;
109 	struct list_head		entry;
110 	int				key_offs;
111 	int				iv_offs;
112 };
113 
114 struct spacc_engine {
115 	void __iomem			*regs;
116 	struct list_head		pending;
117 	int				next_ctx;
118 	spinlock_t			hw_lock;
119 	int				in_flight;
120 	struct list_head		completed;
121 	struct list_head		in_progress;
122 	struct tasklet_struct		complete;
123 	unsigned long			fifo_sz;
124 	void __iomem			*cipher_ctx_base;
125 	void __iomem			*hash_key_base;
126 	struct spacc_alg		*algs;
127 	unsigned			num_algs;
128 	struct list_head		registered_algs;
129 	struct spacc_aead		*aeads;
130 	unsigned			num_aeads;
131 	struct list_head		registered_aeads;
132 	size_t				cipher_pg_sz;
133 	size_t				hash_pg_sz;
134 	const char			*name;
135 	struct clk			*clk;
136 	struct device			*dev;
137 	unsigned			max_ctxs;
138 	struct timer_list		packet_timeout;
139 	unsigned			stat_irq_thresh;
140 	struct dma_pool			*req_pool;
141 };
142 
143 /* Algorithm type mask. */
144 #define SPACC_CRYPTO_ALG_MASK		0x7
145 
146 /* SPACC definition of a crypto algorithm. */
147 struct spacc_alg {
148 	unsigned long			ctrl_default;
149 	unsigned long			type;
150 	struct crypto_alg		alg;
151 	struct spacc_engine		*engine;
152 	struct list_head		entry;
153 	int				key_offs;
154 	int				iv_offs;
155 };
156 
157 /* Generic context structure for any algorithm type. */
158 struct spacc_generic_ctx {
159 	struct spacc_engine		*engine;
160 	int				flags;
161 	int				key_offs;
162 	int				iv_offs;
163 };
164 
165 /* Block cipher context. */
166 struct spacc_ablk_ctx {
167 	struct spacc_generic_ctx	generic;
168 	u8				key[AES_MAX_KEY_SIZE];
169 	u8				key_len;
170 	/*
171 	 * The fallback cipher. If the operation can't be done in hardware,
172 	 * fallback to a software version.
173 	 */
174 	struct crypto_skcipher		*sw_cipher;
175 };
176 
177 /* AEAD cipher context. */
178 struct spacc_aead_ctx {
179 	struct spacc_generic_ctx	generic;
180 	u8				cipher_key[AES_MAX_KEY_SIZE];
181 	u8				hash_ctx[SPACC_CRYPTO_IPSEC_HASH_PG_SZ];
182 	u8				cipher_key_len;
183 	u8				hash_key_len;
184 	struct crypto_aead		*sw_cipher;
185 };
186 
187 static int spacc_ablk_submit(struct spacc_req *req);
188 
to_spacc_alg(struct crypto_alg * alg)189 static inline struct spacc_alg *to_spacc_alg(struct crypto_alg *alg)
190 {
191 	return alg ? container_of(alg, struct spacc_alg, alg) : NULL;
192 }
193 
to_spacc_aead(struct aead_alg * alg)194 static inline struct spacc_aead *to_spacc_aead(struct aead_alg *alg)
195 {
196 	return container_of(alg, struct spacc_aead, alg);
197 }
198 
spacc_fifo_cmd_full(struct spacc_engine * engine)199 static inline int spacc_fifo_cmd_full(struct spacc_engine *engine)
200 {
201 	u32 fifo_stat = readl(engine->regs + SPA_FIFO_STAT_REG_OFFSET);
202 
203 	return fifo_stat & SPA_FIFO_CMD_FULL;
204 }
205 
206 /*
207  * Given a cipher context, and a context number, get the base address of the
208  * context page.
209  *
210  * Returns the address of the context page where the key/context may
211  * be written.
212  */
spacc_ctx_page_addr(struct spacc_generic_ctx * ctx,unsigned indx,bool is_cipher_ctx)213 static inline void __iomem *spacc_ctx_page_addr(struct spacc_generic_ctx *ctx,
214 						unsigned indx,
215 						bool is_cipher_ctx)
216 {
217 	return is_cipher_ctx ? ctx->engine->cipher_ctx_base +
218 			(indx * ctx->engine->cipher_pg_sz) :
219 		ctx->engine->hash_key_base + (indx * ctx->engine->hash_pg_sz);
220 }
221 
222 /* The context pages can only be written with 32-bit accesses. */
memcpy_toio32(u32 __iomem * dst,const void * src,unsigned count)223 static inline void memcpy_toio32(u32 __iomem *dst, const void *src,
224 				 unsigned count)
225 {
226 	const u32 *src32 = (const u32 *) src;
227 
228 	while (count--)
229 		writel(*src32++, dst++);
230 }
231 
spacc_cipher_write_ctx(struct spacc_generic_ctx * ctx,void __iomem * page_addr,const u8 * key,size_t key_len,const u8 * iv,size_t iv_len)232 static void spacc_cipher_write_ctx(struct spacc_generic_ctx *ctx,
233 				   void __iomem *page_addr, const u8 *key,
234 				   size_t key_len, const u8 *iv, size_t iv_len)
235 {
236 	void __iomem *key_ptr = page_addr + ctx->key_offs;
237 	void __iomem *iv_ptr = page_addr + ctx->iv_offs;
238 
239 	memcpy_toio32(key_ptr, key, key_len / 4);
240 	memcpy_toio32(iv_ptr, iv, iv_len / 4);
241 }
242 
243 /*
244  * Load a context into the engines context memory.
245  *
246  * Returns the index of the context page where the context was loaded.
247  */
spacc_load_ctx(struct spacc_generic_ctx * ctx,const u8 * ciph_key,size_t ciph_len,const u8 * iv,size_t ivlen,const u8 * hash_key,size_t hash_len)248 static unsigned spacc_load_ctx(struct spacc_generic_ctx *ctx,
249 			       const u8 *ciph_key, size_t ciph_len,
250 			       const u8 *iv, size_t ivlen, const u8 *hash_key,
251 			       size_t hash_len)
252 {
253 	unsigned indx = ctx->engine->next_ctx++;
254 	void __iomem *ciph_page_addr, *hash_page_addr;
255 
256 	ciph_page_addr = spacc_ctx_page_addr(ctx, indx, 1);
257 	hash_page_addr = spacc_ctx_page_addr(ctx, indx, 0);
258 
259 	ctx->engine->next_ctx &= ctx->engine->fifo_sz - 1;
260 	spacc_cipher_write_ctx(ctx, ciph_page_addr, ciph_key, ciph_len, iv,
261 			       ivlen);
262 	writel(ciph_len | (indx << SPA_KEY_SZ_CTX_INDEX_OFFSET) |
263 	       (1 << SPA_KEY_SZ_CIPHER_OFFSET),
264 	       ctx->engine->regs + SPA_KEY_SZ_REG_OFFSET);
265 
266 	if (hash_key) {
267 		memcpy_toio32(hash_page_addr, hash_key, hash_len / 4);
268 		writel(hash_len | (indx << SPA_KEY_SZ_CTX_INDEX_OFFSET),
269 		       ctx->engine->regs + SPA_KEY_SZ_REG_OFFSET);
270 	}
271 
272 	return indx;
273 }
274 
ddt_set(struct spacc_ddt * ddt,dma_addr_t phys,size_t len)275 static inline void ddt_set(struct spacc_ddt *ddt, dma_addr_t phys, size_t len)
276 {
277 	ddt->p = phys;
278 	ddt->len = len;
279 }
280 
281 /*
282  * Take a crypto request and scatterlists for the data and turn them into DDTs
283  * for passing to the crypto engines. This also DMA maps the data so that the
284  * crypto engines can DMA to/from them.
285  */
spacc_sg_to_ddt(struct spacc_engine * engine,struct scatterlist * payload,unsigned nbytes,enum dma_data_direction dir,dma_addr_t * ddt_phys)286 static struct spacc_ddt *spacc_sg_to_ddt(struct spacc_engine *engine,
287 					 struct scatterlist *payload,
288 					 unsigned nbytes,
289 					 enum dma_data_direction dir,
290 					 dma_addr_t *ddt_phys)
291 {
292 	unsigned mapped_ents;
293 	struct scatterlist *cur;
294 	struct spacc_ddt *ddt;
295 	int i;
296 	int nents;
297 
298 	nents = sg_nents_for_len(payload, nbytes);
299 	if (nents < 0) {
300 		dev_err(engine->dev, "Invalid numbers of SG.\n");
301 		return NULL;
302 	}
303 	mapped_ents = dma_map_sg(engine->dev, payload, nents, dir);
304 
305 	if (mapped_ents + 1 > MAX_DDT_LEN)
306 		goto out;
307 
308 	ddt = dma_pool_alloc(engine->req_pool, GFP_ATOMIC, ddt_phys);
309 	if (!ddt)
310 		goto out;
311 
312 	for_each_sg(payload, cur, mapped_ents, i)
313 		ddt_set(&ddt[i], sg_dma_address(cur), sg_dma_len(cur));
314 	ddt_set(&ddt[mapped_ents], 0, 0);
315 
316 	return ddt;
317 
318 out:
319 	dma_unmap_sg(engine->dev, payload, nents, dir);
320 	return NULL;
321 }
322 
spacc_aead_make_ddts(struct aead_request * areq)323 static int spacc_aead_make_ddts(struct aead_request *areq)
324 {
325 	struct crypto_aead *aead = crypto_aead_reqtfm(areq);
326 	struct spacc_req *req = aead_request_ctx(areq);
327 	struct spacc_engine *engine = req->engine;
328 	struct spacc_ddt *src_ddt, *dst_ddt;
329 	unsigned total;
330 	int src_nents, dst_nents;
331 	struct scatterlist *cur;
332 	int i, dst_ents, src_ents;
333 
334 	total = areq->assoclen + areq->cryptlen;
335 	if (req->is_encrypt)
336 		total += crypto_aead_authsize(aead);
337 
338 	src_nents = sg_nents_for_len(areq->src, total);
339 	if (src_nents < 0) {
340 		dev_err(engine->dev, "Invalid numbers of src SG.\n");
341 		return src_nents;
342 	}
343 	if (src_nents + 1 > MAX_DDT_LEN)
344 		return -E2BIG;
345 
346 	dst_nents = 0;
347 	if (areq->src != areq->dst) {
348 		dst_nents = sg_nents_for_len(areq->dst, total);
349 		if (dst_nents < 0) {
350 			dev_err(engine->dev, "Invalid numbers of dst SG.\n");
351 			return dst_nents;
352 		}
353 		if (src_nents + 1 > MAX_DDT_LEN)
354 			return -E2BIG;
355 	}
356 
357 	src_ddt = dma_pool_alloc(engine->req_pool, GFP_ATOMIC, &req->src_addr);
358 	if (!src_ddt)
359 		goto err;
360 
361 	dst_ddt = dma_pool_alloc(engine->req_pool, GFP_ATOMIC, &req->dst_addr);
362 	if (!dst_ddt)
363 		goto err_free_src;
364 
365 	req->src_ddt = src_ddt;
366 	req->dst_ddt = dst_ddt;
367 
368 	if (dst_nents) {
369 		src_ents = dma_map_sg(engine->dev, areq->src, src_nents,
370 				      DMA_TO_DEVICE);
371 		if (!src_ents)
372 			goto err_free_dst;
373 
374 		dst_ents = dma_map_sg(engine->dev, areq->dst, dst_nents,
375 				      DMA_FROM_DEVICE);
376 
377 		if (!dst_ents) {
378 			dma_unmap_sg(engine->dev, areq->src, src_nents,
379 				     DMA_TO_DEVICE);
380 			goto err_free_dst;
381 		}
382 	} else {
383 		src_ents = dma_map_sg(engine->dev, areq->src, src_nents,
384 				      DMA_BIDIRECTIONAL);
385 		if (!src_ents)
386 			goto err_free_dst;
387 		dst_ents = src_ents;
388 	}
389 
390 	/*
391 	 * Now map in the payload for the source and destination and terminate
392 	 * with the NULL pointers.
393 	 */
394 	for_each_sg(areq->src, cur, src_ents, i)
395 		ddt_set(src_ddt++, sg_dma_address(cur), sg_dma_len(cur));
396 
397 	/* For decryption we need to skip the associated data. */
398 	total = req->is_encrypt ? 0 : areq->assoclen;
399 	for_each_sg(areq->dst, cur, dst_ents, i) {
400 		unsigned len = sg_dma_len(cur);
401 
402 		if (len <= total) {
403 			total -= len;
404 			continue;
405 		}
406 
407 		ddt_set(dst_ddt++, sg_dma_address(cur) + total, len - total);
408 	}
409 
410 	ddt_set(src_ddt, 0, 0);
411 	ddt_set(dst_ddt, 0, 0);
412 
413 	return 0;
414 
415 err_free_dst:
416 	dma_pool_free(engine->req_pool, dst_ddt, req->dst_addr);
417 err_free_src:
418 	dma_pool_free(engine->req_pool, src_ddt, req->src_addr);
419 err:
420 	return -ENOMEM;
421 }
422 
spacc_aead_free_ddts(struct spacc_req * req)423 static void spacc_aead_free_ddts(struct spacc_req *req)
424 {
425 	struct aead_request *areq = container_of(req->req, struct aead_request,
426 						 base);
427 	struct crypto_aead *aead = crypto_aead_reqtfm(areq);
428 	unsigned total = areq->assoclen + areq->cryptlen +
429 			 (req->is_encrypt ? crypto_aead_authsize(aead) : 0);
430 	struct spacc_aead_ctx *aead_ctx = crypto_aead_ctx(aead);
431 	struct spacc_engine *engine = aead_ctx->generic.engine;
432 	int nents = sg_nents_for_len(areq->src, total);
433 
434 	/* sg_nents_for_len should not fail since it works when mapping sg */
435 	if (unlikely(nents < 0)) {
436 		dev_err(engine->dev, "Invalid numbers of src SG.\n");
437 		return;
438 	}
439 
440 	if (areq->src != areq->dst) {
441 		dma_unmap_sg(engine->dev, areq->src, nents, DMA_TO_DEVICE);
442 		nents = sg_nents_for_len(areq->dst, total);
443 		if (unlikely(nents < 0)) {
444 			dev_err(engine->dev, "Invalid numbers of dst SG.\n");
445 			return;
446 		}
447 		dma_unmap_sg(engine->dev, areq->dst, nents, DMA_FROM_DEVICE);
448 	} else
449 		dma_unmap_sg(engine->dev, areq->src, nents, DMA_BIDIRECTIONAL);
450 
451 	dma_pool_free(engine->req_pool, req->src_ddt, req->src_addr);
452 	dma_pool_free(engine->req_pool, req->dst_ddt, req->dst_addr);
453 }
454 
spacc_free_ddt(struct spacc_req * req,struct spacc_ddt * ddt,dma_addr_t ddt_addr,struct scatterlist * payload,unsigned nbytes,enum dma_data_direction dir)455 static void spacc_free_ddt(struct spacc_req *req, struct spacc_ddt *ddt,
456 			   dma_addr_t ddt_addr, struct scatterlist *payload,
457 			   unsigned nbytes, enum dma_data_direction dir)
458 {
459 	int nents = sg_nents_for_len(payload, nbytes);
460 
461 	if (nents < 0) {
462 		dev_err(req->engine->dev, "Invalid numbers of SG.\n");
463 		return;
464 	}
465 
466 	dma_unmap_sg(req->engine->dev, payload, nents, dir);
467 	dma_pool_free(req->engine->req_pool, ddt, ddt_addr);
468 }
469 
spacc_aead_setkey(struct crypto_aead * tfm,const u8 * key,unsigned int keylen)470 static int spacc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
471 			     unsigned int keylen)
472 {
473 	struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm);
474 	struct crypto_authenc_keys keys;
475 	int err;
476 
477 	crypto_aead_clear_flags(ctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
478 	crypto_aead_set_flags(ctx->sw_cipher, crypto_aead_get_flags(tfm) &
479 					      CRYPTO_TFM_REQ_MASK);
480 	err = crypto_aead_setkey(ctx->sw_cipher, key, keylen);
481 	crypto_aead_clear_flags(tfm, CRYPTO_TFM_RES_MASK);
482 	crypto_aead_set_flags(tfm, crypto_aead_get_flags(ctx->sw_cipher) &
483 				   CRYPTO_TFM_RES_MASK);
484 	if (err)
485 		return err;
486 
487 	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
488 		goto badkey;
489 
490 	if (keys.enckeylen > AES_MAX_KEY_SIZE)
491 		goto badkey;
492 
493 	if (keys.authkeylen > sizeof(ctx->hash_ctx))
494 		goto badkey;
495 
496 	memcpy(ctx->cipher_key, keys.enckey, keys.enckeylen);
497 	ctx->cipher_key_len = keys.enckeylen;
498 
499 	memcpy(ctx->hash_ctx, keys.authkey, keys.authkeylen);
500 	ctx->hash_key_len = keys.authkeylen;
501 
502 	memzero_explicit(&keys, sizeof(keys));
503 	return 0;
504 
505 badkey:
506 	crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
507 	memzero_explicit(&keys, sizeof(keys));
508 	return -EINVAL;
509 }
510 
spacc_aead_setauthsize(struct crypto_aead * tfm,unsigned int authsize)511 static int spacc_aead_setauthsize(struct crypto_aead *tfm,
512 				  unsigned int authsize)
513 {
514 	struct spacc_aead_ctx *ctx = crypto_tfm_ctx(crypto_aead_tfm(tfm));
515 
516 	return crypto_aead_setauthsize(ctx->sw_cipher, authsize);
517 }
518 
519 /*
520  * Check if an AEAD request requires a fallback operation. Some requests can't
521  * be completed in hardware because the hardware may not support certain key
522  * sizes. In these cases we need to complete the request in software.
523  */
spacc_aead_need_fallback(struct aead_request * aead_req)524 static int spacc_aead_need_fallback(struct aead_request *aead_req)
525 {
526 	struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
527 	struct aead_alg *alg = crypto_aead_alg(aead);
528 	struct spacc_aead *spacc_alg = to_spacc_aead(alg);
529 	struct spacc_aead_ctx *ctx = crypto_aead_ctx(aead);
530 
531 	/*
532 	 * If we have a non-supported key-length, then we need to do a
533 	 * software fallback.
534 	 */
535 	if ((spacc_alg->ctrl_default & SPACC_CRYPTO_ALG_MASK) ==
536 	    SPA_CTRL_CIPH_ALG_AES &&
537 	    ctx->cipher_key_len != AES_KEYSIZE_128 &&
538 	    ctx->cipher_key_len != AES_KEYSIZE_256)
539 		return 1;
540 
541 	return 0;
542 }
543 
spacc_aead_do_fallback(struct aead_request * req,unsigned alg_type,bool is_encrypt)544 static int spacc_aead_do_fallback(struct aead_request *req, unsigned alg_type,
545 				  bool is_encrypt)
546 {
547 	struct crypto_tfm *old_tfm = crypto_aead_tfm(crypto_aead_reqtfm(req));
548 	struct spacc_aead_ctx *ctx = crypto_tfm_ctx(old_tfm);
549 	struct aead_request *subreq = aead_request_ctx(req);
550 
551 	aead_request_set_tfm(subreq, ctx->sw_cipher);
552 	aead_request_set_callback(subreq, req->base.flags,
553 				  req->base.complete, req->base.data);
554 	aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
555 			       req->iv);
556 	aead_request_set_ad(subreq, req->assoclen);
557 
558 	return is_encrypt ? crypto_aead_encrypt(subreq) :
559 			    crypto_aead_decrypt(subreq);
560 }
561 
spacc_aead_complete(struct spacc_req * req)562 static void spacc_aead_complete(struct spacc_req *req)
563 {
564 	spacc_aead_free_ddts(req);
565 	req->req->complete(req->req, req->result);
566 }
567 
spacc_aead_submit(struct spacc_req * req)568 static int spacc_aead_submit(struct spacc_req *req)
569 {
570 	struct aead_request *aead_req =
571 		container_of(req->req, struct aead_request, base);
572 	struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
573 	unsigned int authsize = crypto_aead_authsize(aead);
574 	struct spacc_aead_ctx *ctx = crypto_aead_ctx(aead);
575 	struct aead_alg *alg = crypto_aead_alg(aead);
576 	struct spacc_aead *spacc_alg = to_spacc_aead(alg);
577 	struct spacc_engine *engine = ctx->generic.engine;
578 	u32 ctrl, proc_len, assoc_len;
579 
580 	req->result = -EINPROGRESS;
581 	req->ctx_id = spacc_load_ctx(&ctx->generic, ctx->cipher_key,
582 		ctx->cipher_key_len, aead_req->iv, crypto_aead_ivsize(aead),
583 		ctx->hash_ctx, ctx->hash_key_len);
584 
585 	/* Set the source and destination DDT pointers. */
586 	writel(req->src_addr, engine->regs + SPA_SRC_PTR_REG_OFFSET);
587 	writel(req->dst_addr, engine->regs + SPA_DST_PTR_REG_OFFSET);
588 	writel(0, engine->regs + SPA_OFFSET_REG_OFFSET);
589 
590 	assoc_len = aead_req->assoclen;
591 	proc_len = aead_req->cryptlen + assoc_len;
592 
593 	/*
594 	 * If we are decrypting, we need to take the length of the ICV out of
595 	 * the processing length.
596 	 */
597 	if (!req->is_encrypt)
598 		proc_len -= authsize;
599 
600 	writel(proc_len, engine->regs + SPA_PROC_LEN_REG_OFFSET);
601 	writel(assoc_len, engine->regs + SPA_AAD_LEN_REG_OFFSET);
602 	writel(authsize, engine->regs + SPA_ICV_LEN_REG_OFFSET);
603 	writel(0, engine->regs + SPA_ICV_OFFSET_REG_OFFSET);
604 	writel(0, engine->regs + SPA_AUX_INFO_REG_OFFSET);
605 
606 	ctrl = spacc_alg->ctrl_default | (req->ctx_id << SPA_CTRL_CTX_IDX) |
607 		(1 << SPA_CTRL_ICV_APPEND);
608 	if (req->is_encrypt)
609 		ctrl |= (1 << SPA_CTRL_ENCRYPT_IDX) | (1 << SPA_CTRL_AAD_COPY);
610 	else
611 		ctrl |= (1 << SPA_CTRL_KEY_EXP);
612 
613 	mod_timer(&engine->packet_timeout, jiffies + PACKET_TIMEOUT);
614 
615 	writel(ctrl, engine->regs + SPA_CTRL_REG_OFFSET);
616 
617 	return -EINPROGRESS;
618 }
619 
620 static int spacc_req_submit(struct spacc_req *req);
621 
spacc_push(struct spacc_engine * engine)622 static void spacc_push(struct spacc_engine *engine)
623 {
624 	struct spacc_req *req;
625 
626 	while (!list_empty(&engine->pending) &&
627 	       engine->in_flight + 1 <= engine->fifo_sz) {
628 
629 		++engine->in_flight;
630 		req = list_first_entry(&engine->pending, struct spacc_req,
631 				       list);
632 		list_move_tail(&req->list, &engine->in_progress);
633 
634 		req->result = spacc_req_submit(req);
635 	}
636 }
637 
638 /*
639  * Setup an AEAD request for processing. This will configure the engine, load
640  * the context and then start the packet processing.
641  */
spacc_aead_setup(struct aead_request * req,unsigned alg_type,bool is_encrypt)642 static int spacc_aead_setup(struct aead_request *req,
643 			    unsigned alg_type, bool is_encrypt)
644 {
645 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
646 	struct aead_alg *alg = crypto_aead_alg(aead);
647 	struct spacc_engine *engine = to_spacc_aead(alg)->engine;
648 	struct spacc_req *dev_req = aead_request_ctx(req);
649 	int err;
650 	unsigned long flags;
651 
652 	dev_req->req		= &req->base;
653 	dev_req->is_encrypt	= is_encrypt;
654 	dev_req->result		= -EBUSY;
655 	dev_req->engine		= engine;
656 	dev_req->complete	= spacc_aead_complete;
657 
658 	if (unlikely(spacc_aead_need_fallback(req) ||
659 		     ((err = spacc_aead_make_ddts(req)) == -E2BIG)))
660 		return spacc_aead_do_fallback(req, alg_type, is_encrypt);
661 
662 	if (err)
663 		goto out;
664 
665 	err = -EINPROGRESS;
666 	spin_lock_irqsave(&engine->hw_lock, flags);
667 	if (unlikely(spacc_fifo_cmd_full(engine)) ||
668 	    engine->in_flight + 1 > engine->fifo_sz) {
669 		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
670 			err = -EBUSY;
671 			spin_unlock_irqrestore(&engine->hw_lock, flags);
672 			goto out_free_ddts;
673 		}
674 		list_add_tail(&dev_req->list, &engine->pending);
675 	} else {
676 		list_add_tail(&dev_req->list, &engine->pending);
677 		spacc_push(engine);
678 	}
679 	spin_unlock_irqrestore(&engine->hw_lock, flags);
680 
681 	goto out;
682 
683 out_free_ddts:
684 	spacc_aead_free_ddts(dev_req);
685 out:
686 	return err;
687 }
688 
spacc_aead_encrypt(struct aead_request * req)689 static int spacc_aead_encrypt(struct aead_request *req)
690 {
691 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
692 	struct spacc_aead *alg = to_spacc_aead(crypto_aead_alg(aead));
693 
694 	return spacc_aead_setup(req, alg->type, 1);
695 }
696 
spacc_aead_decrypt(struct aead_request * req)697 static int spacc_aead_decrypt(struct aead_request *req)
698 {
699 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
700 	struct spacc_aead  *alg = to_spacc_aead(crypto_aead_alg(aead));
701 
702 	return spacc_aead_setup(req, alg->type, 0);
703 }
704 
705 /*
706  * Initialise a new AEAD context. This is responsible for allocating the
707  * fallback cipher and initialising the context.
708  */
spacc_aead_cra_init(struct crypto_aead * tfm)709 static int spacc_aead_cra_init(struct crypto_aead *tfm)
710 {
711 	struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm);
712 	struct aead_alg *alg = crypto_aead_alg(tfm);
713 	struct spacc_aead *spacc_alg = to_spacc_aead(alg);
714 	struct spacc_engine *engine = spacc_alg->engine;
715 
716 	ctx->generic.flags = spacc_alg->type;
717 	ctx->generic.engine = engine;
718 	ctx->sw_cipher = crypto_alloc_aead(alg->base.cra_name, 0,
719 					   CRYPTO_ALG_NEED_FALLBACK);
720 	if (IS_ERR(ctx->sw_cipher))
721 		return PTR_ERR(ctx->sw_cipher);
722 	ctx->generic.key_offs = spacc_alg->key_offs;
723 	ctx->generic.iv_offs = spacc_alg->iv_offs;
724 
725 	crypto_aead_set_reqsize(
726 		tfm,
727 		max(sizeof(struct spacc_req),
728 		    sizeof(struct aead_request) +
729 		    crypto_aead_reqsize(ctx->sw_cipher)));
730 
731 	return 0;
732 }
733 
734 /*
735  * Destructor for an AEAD context. This is called when the transform is freed
736  * and must free the fallback cipher.
737  */
spacc_aead_cra_exit(struct crypto_aead * tfm)738 static void spacc_aead_cra_exit(struct crypto_aead *tfm)
739 {
740 	struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm);
741 
742 	crypto_free_aead(ctx->sw_cipher);
743 }
744 
745 /*
746  * Set the DES key for a block cipher transform. This also performs weak key
747  * checking if the transform has requested it.
748  */
spacc_des_setkey(struct crypto_ablkcipher * cipher,const u8 * key,unsigned int len)749 static int spacc_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
750 			    unsigned int len)
751 {
752 	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
753 	struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
754 	u32 tmp[DES_EXPKEY_WORDS];
755 
756 	if (len > DES3_EDE_KEY_SIZE) {
757 		crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
758 		return -EINVAL;
759 	}
760 
761 	if (unlikely(!des_ekey(tmp, key)) &&
762 	    (crypto_ablkcipher_get_flags(cipher) & CRYPTO_TFM_REQ_WEAK_KEY)) {
763 		tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
764 		return -EINVAL;
765 	}
766 
767 	memcpy(ctx->key, key, len);
768 	ctx->key_len = len;
769 
770 	return 0;
771 }
772 
773 /*
774  * Set the key for an AES block cipher. Some key lengths are not supported in
775  * hardware so this must also check whether a fallback is needed.
776  */
spacc_aes_setkey(struct crypto_ablkcipher * cipher,const u8 * key,unsigned int len)777 static int spacc_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
778 			    unsigned int len)
779 {
780 	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
781 	struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
782 	int err = 0;
783 
784 	if (len > AES_MAX_KEY_SIZE) {
785 		crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
786 		return -EINVAL;
787 	}
788 
789 	/*
790 	 * IPSec engine only supports 128 and 256 bit AES keys. If we get a
791 	 * request for any other size (192 bits) then we need to do a software
792 	 * fallback.
793 	 */
794 	if (len != AES_KEYSIZE_128 && len != AES_KEYSIZE_256) {
795 		if (!ctx->sw_cipher)
796 			return -EINVAL;
797 
798 		/*
799 		 * Set the fallback transform to use the same request flags as
800 		 * the hardware transform.
801 		 */
802 		crypto_skcipher_clear_flags(ctx->sw_cipher,
803 					    CRYPTO_TFM_REQ_MASK);
804 		crypto_skcipher_set_flags(ctx->sw_cipher,
805 					  cipher->base.crt_flags &
806 					  CRYPTO_TFM_REQ_MASK);
807 
808 		err = crypto_skcipher_setkey(ctx->sw_cipher, key, len);
809 
810 		tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
811 		tfm->crt_flags |=
812 			crypto_skcipher_get_flags(ctx->sw_cipher) &
813 			CRYPTO_TFM_RES_MASK;
814 
815 		if (err)
816 			goto sw_setkey_failed;
817 	}
818 
819 	memcpy(ctx->key, key, len);
820 	ctx->key_len = len;
821 
822 sw_setkey_failed:
823 	return err;
824 }
825 
spacc_kasumi_f8_setkey(struct crypto_ablkcipher * cipher,const u8 * key,unsigned int len)826 static int spacc_kasumi_f8_setkey(struct crypto_ablkcipher *cipher,
827 				  const u8 *key, unsigned int len)
828 {
829 	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
830 	struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
831 	int err = 0;
832 
833 	if (len > AES_MAX_KEY_SIZE) {
834 		crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
835 		err = -EINVAL;
836 		goto out;
837 	}
838 
839 	memcpy(ctx->key, key, len);
840 	ctx->key_len = len;
841 
842 out:
843 	return err;
844 }
845 
spacc_ablk_need_fallback(struct spacc_req * req)846 static int spacc_ablk_need_fallback(struct spacc_req *req)
847 {
848 	struct spacc_ablk_ctx *ctx;
849 	struct crypto_tfm *tfm = req->req->tfm;
850 	struct crypto_alg *alg = req->req->tfm->__crt_alg;
851 	struct spacc_alg *spacc_alg = to_spacc_alg(alg);
852 
853 	ctx = crypto_tfm_ctx(tfm);
854 
855 	return (spacc_alg->ctrl_default & SPACC_CRYPTO_ALG_MASK) ==
856 			SPA_CTRL_CIPH_ALG_AES &&
857 			ctx->key_len != AES_KEYSIZE_128 &&
858 			ctx->key_len != AES_KEYSIZE_256;
859 }
860 
spacc_ablk_complete(struct spacc_req * req)861 static void spacc_ablk_complete(struct spacc_req *req)
862 {
863 	struct ablkcipher_request *ablk_req = ablkcipher_request_cast(req->req);
864 
865 	if (ablk_req->src != ablk_req->dst) {
866 		spacc_free_ddt(req, req->src_ddt, req->src_addr, ablk_req->src,
867 			       ablk_req->nbytes, DMA_TO_DEVICE);
868 		spacc_free_ddt(req, req->dst_ddt, req->dst_addr, ablk_req->dst,
869 			       ablk_req->nbytes, DMA_FROM_DEVICE);
870 	} else
871 		spacc_free_ddt(req, req->dst_ddt, req->dst_addr, ablk_req->dst,
872 			       ablk_req->nbytes, DMA_BIDIRECTIONAL);
873 
874 	req->req->complete(req->req, req->result);
875 }
876 
spacc_ablk_submit(struct spacc_req * req)877 static int spacc_ablk_submit(struct spacc_req *req)
878 {
879 	struct crypto_tfm *tfm = req->req->tfm;
880 	struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
881 	struct ablkcipher_request *ablk_req = ablkcipher_request_cast(req->req);
882 	struct crypto_alg *alg = req->req->tfm->__crt_alg;
883 	struct spacc_alg *spacc_alg = to_spacc_alg(alg);
884 	struct spacc_engine *engine = ctx->generic.engine;
885 	u32 ctrl;
886 
887 	req->ctx_id = spacc_load_ctx(&ctx->generic, ctx->key,
888 		ctx->key_len, ablk_req->info, alg->cra_ablkcipher.ivsize,
889 		NULL, 0);
890 
891 	writel(req->src_addr, engine->regs + SPA_SRC_PTR_REG_OFFSET);
892 	writel(req->dst_addr, engine->regs + SPA_DST_PTR_REG_OFFSET);
893 	writel(0, engine->regs + SPA_OFFSET_REG_OFFSET);
894 
895 	writel(ablk_req->nbytes, engine->regs + SPA_PROC_LEN_REG_OFFSET);
896 	writel(0, engine->regs + SPA_ICV_OFFSET_REG_OFFSET);
897 	writel(0, engine->regs + SPA_AUX_INFO_REG_OFFSET);
898 	writel(0, engine->regs + SPA_AAD_LEN_REG_OFFSET);
899 
900 	ctrl = spacc_alg->ctrl_default | (req->ctx_id << SPA_CTRL_CTX_IDX) |
901 		(req->is_encrypt ? (1 << SPA_CTRL_ENCRYPT_IDX) :
902 		 (1 << SPA_CTRL_KEY_EXP));
903 
904 	mod_timer(&engine->packet_timeout, jiffies + PACKET_TIMEOUT);
905 
906 	writel(ctrl, engine->regs + SPA_CTRL_REG_OFFSET);
907 
908 	return -EINPROGRESS;
909 }
910 
spacc_ablk_do_fallback(struct ablkcipher_request * req,unsigned alg_type,bool is_encrypt)911 static int spacc_ablk_do_fallback(struct ablkcipher_request *req,
912 				  unsigned alg_type, bool is_encrypt)
913 {
914 	struct crypto_tfm *old_tfm =
915 	    crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
916 	struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(old_tfm);
917 	SKCIPHER_REQUEST_ON_STACK(subreq, ctx->sw_cipher);
918 	int err;
919 
920 	/*
921 	 * Change the request to use the software fallback transform, and once
922 	 * the ciphering has completed, put the old transform back into the
923 	 * request.
924 	 */
925 	skcipher_request_set_tfm(subreq, ctx->sw_cipher);
926 	skcipher_request_set_callback(subreq, req->base.flags, NULL, NULL);
927 	skcipher_request_set_crypt(subreq, req->src, req->dst,
928 				   req->nbytes, req->info);
929 	err = is_encrypt ? crypto_skcipher_encrypt(subreq) :
930 			   crypto_skcipher_decrypt(subreq);
931 	skcipher_request_zero(subreq);
932 
933 	return err;
934 }
935 
spacc_ablk_setup(struct ablkcipher_request * req,unsigned alg_type,bool is_encrypt)936 static int spacc_ablk_setup(struct ablkcipher_request *req, unsigned alg_type,
937 			    bool is_encrypt)
938 {
939 	struct crypto_alg *alg = req->base.tfm->__crt_alg;
940 	struct spacc_engine *engine = to_spacc_alg(alg)->engine;
941 	struct spacc_req *dev_req = ablkcipher_request_ctx(req);
942 	unsigned long flags;
943 	int err = -ENOMEM;
944 
945 	dev_req->req		= &req->base;
946 	dev_req->is_encrypt	= is_encrypt;
947 	dev_req->engine		= engine;
948 	dev_req->complete	= spacc_ablk_complete;
949 	dev_req->result		= -EINPROGRESS;
950 
951 	if (unlikely(spacc_ablk_need_fallback(dev_req)))
952 		return spacc_ablk_do_fallback(req, alg_type, is_encrypt);
953 
954 	/*
955 	 * Create the DDT's for the engine. If we share the same source and
956 	 * destination then we can optimize by reusing the DDT's.
957 	 */
958 	if (req->src != req->dst) {
959 		dev_req->src_ddt = spacc_sg_to_ddt(engine, req->src,
960 			req->nbytes, DMA_TO_DEVICE, &dev_req->src_addr);
961 		if (!dev_req->src_ddt)
962 			goto out;
963 
964 		dev_req->dst_ddt = spacc_sg_to_ddt(engine, req->dst,
965 			req->nbytes, DMA_FROM_DEVICE, &dev_req->dst_addr);
966 		if (!dev_req->dst_ddt)
967 			goto out_free_src;
968 	} else {
969 		dev_req->dst_ddt = spacc_sg_to_ddt(engine, req->dst,
970 			req->nbytes, DMA_BIDIRECTIONAL, &dev_req->dst_addr);
971 		if (!dev_req->dst_ddt)
972 			goto out;
973 
974 		dev_req->src_ddt = NULL;
975 		dev_req->src_addr = dev_req->dst_addr;
976 	}
977 
978 	err = -EINPROGRESS;
979 	spin_lock_irqsave(&engine->hw_lock, flags);
980 	/*
981 	 * Check if the engine will accept the operation now. If it won't then
982 	 * we either stick it on the end of a pending list if we can backlog,
983 	 * or bailout with an error if not.
984 	 */
985 	if (unlikely(spacc_fifo_cmd_full(engine)) ||
986 	    engine->in_flight + 1 > engine->fifo_sz) {
987 		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
988 			err = -EBUSY;
989 			spin_unlock_irqrestore(&engine->hw_lock, flags);
990 			goto out_free_ddts;
991 		}
992 		list_add_tail(&dev_req->list, &engine->pending);
993 	} else {
994 		list_add_tail(&dev_req->list, &engine->pending);
995 		spacc_push(engine);
996 	}
997 	spin_unlock_irqrestore(&engine->hw_lock, flags);
998 
999 	goto out;
1000 
1001 out_free_ddts:
1002 	spacc_free_ddt(dev_req, dev_req->dst_ddt, dev_req->dst_addr, req->dst,
1003 		       req->nbytes, req->src == req->dst ?
1004 		       DMA_BIDIRECTIONAL : DMA_FROM_DEVICE);
1005 out_free_src:
1006 	if (req->src != req->dst)
1007 		spacc_free_ddt(dev_req, dev_req->src_ddt, dev_req->src_addr,
1008 			       req->src, req->nbytes, DMA_TO_DEVICE);
1009 out:
1010 	return err;
1011 }
1012 
spacc_ablk_cra_init(struct crypto_tfm * tfm)1013 static int spacc_ablk_cra_init(struct crypto_tfm *tfm)
1014 {
1015 	struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
1016 	struct crypto_alg *alg = tfm->__crt_alg;
1017 	struct spacc_alg *spacc_alg = to_spacc_alg(alg);
1018 	struct spacc_engine *engine = spacc_alg->engine;
1019 
1020 	ctx->generic.flags = spacc_alg->type;
1021 	ctx->generic.engine = engine;
1022 	if (alg->cra_flags & CRYPTO_ALG_NEED_FALLBACK) {
1023 		ctx->sw_cipher = crypto_alloc_skcipher(
1024 			alg->cra_name, 0, CRYPTO_ALG_ASYNC |
1025 					  CRYPTO_ALG_NEED_FALLBACK);
1026 		if (IS_ERR(ctx->sw_cipher)) {
1027 			dev_warn(engine->dev, "failed to allocate fallback for %s\n",
1028 				 alg->cra_name);
1029 			return PTR_ERR(ctx->sw_cipher);
1030 		}
1031 	}
1032 	ctx->generic.key_offs = spacc_alg->key_offs;
1033 	ctx->generic.iv_offs = spacc_alg->iv_offs;
1034 
1035 	tfm->crt_ablkcipher.reqsize = sizeof(struct spacc_req);
1036 
1037 	return 0;
1038 }
1039 
spacc_ablk_cra_exit(struct crypto_tfm * tfm)1040 static void spacc_ablk_cra_exit(struct crypto_tfm *tfm)
1041 {
1042 	struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
1043 
1044 	crypto_free_skcipher(ctx->sw_cipher);
1045 }
1046 
spacc_ablk_encrypt(struct ablkcipher_request * req)1047 static int spacc_ablk_encrypt(struct ablkcipher_request *req)
1048 {
1049 	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req);
1050 	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
1051 	struct spacc_alg *alg = to_spacc_alg(tfm->__crt_alg);
1052 
1053 	return spacc_ablk_setup(req, alg->type, 1);
1054 }
1055 
spacc_ablk_decrypt(struct ablkcipher_request * req)1056 static int spacc_ablk_decrypt(struct ablkcipher_request *req)
1057 {
1058 	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req);
1059 	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
1060 	struct spacc_alg *alg = to_spacc_alg(tfm->__crt_alg);
1061 
1062 	return spacc_ablk_setup(req, alg->type, 0);
1063 }
1064 
spacc_fifo_stat_empty(struct spacc_engine * engine)1065 static inline int spacc_fifo_stat_empty(struct spacc_engine *engine)
1066 {
1067 	return readl(engine->regs + SPA_FIFO_STAT_REG_OFFSET) &
1068 		SPA_FIFO_STAT_EMPTY;
1069 }
1070 
spacc_process_done(struct spacc_engine * engine)1071 static void spacc_process_done(struct spacc_engine *engine)
1072 {
1073 	struct spacc_req *req;
1074 	unsigned long flags;
1075 
1076 	spin_lock_irqsave(&engine->hw_lock, flags);
1077 
1078 	while (!spacc_fifo_stat_empty(engine)) {
1079 		req = list_first_entry(&engine->in_progress, struct spacc_req,
1080 				       list);
1081 		list_move_tail(&req->list, &engine->completed);
1082 		--engine->in_flight;
1083 
1084 		/* POP the status register. */
1085 		writel(~0, engine->regs + SPA_STAT_POP_REG_OFFSET);
1086 		req->result = (readl(engine->regs + SPA_STATUS_REG_OFFSET) &
1087 		     SPA_STATUS_RES_CODE_MASK) >> SPA_STATUS_RES_CODE_OFFSET;
1088 
1089 		/*
1090 		 * Convert the SPAcc error status into the standard POSIX error
1091 		 * codes.
1092 		 */
1093 		if (unlikely(req->result)) {
1094 			switch (req->result) {
1095 			case SPA_STATUS_ICV_FAIL:
1096 				req->result = -EBADMSG;
1097 				break;
1098 
1099 			case SPA_STATUS_MEMORY_ERROR:
1100 				dev_warn(engine->dev,
1101 					 "memory error triggered\n");
1102 				req->result = -EFAULT;
1103 				break;
1104 
1105 			case SPA_STATUS_BLOCK_ERROR:
1106 				dev_warn(engine->dev,
1107 					 "block error triggered\n");
1108 				req->result = -EIO;
1109 				break;
1110 			}
1111 		}
1112 	}
1113 
1114 	tasklet_schedule(&engine->complete);
1115 
1116 	spin_unlock_irqrestore(&engine->hw_lock, flags);
1117 }
1118 
spacc_spacc_irq(int irq,void * dev)1119 static irqreturn_t spacc_spacc_irq(int irq, void *dev)
1120 {
1121 	struct spacc_engine *engine = (struct spacc_engine *)dev;
1122 	u32 spacc_irq_stat = readl(engine->regs + SPA_IRQ_STAT_REG_OFFSET);
1123 
1124 	writel(spacc_irq_stat, engine->regs + SPA_IRQ_STAT_REG_OFFSET);
1125 	spacc_process_done(engine);
1126 
1127 	return IRQ_HANDLED;
1128 }
1129 
spacc_packet_timeout(struct timer_list * t)1130 static void spacc_packet_timeout(struct timer_list *t)
1131 {
1132 	struct spacc_engine *engine = from_timer(engine, t, packet_timeout);
1133 
1134 	spacc_process_done(engine);
1135 }
1136 
spacc_req_submit(struct spacc_req * req)1137 static int spacc_req_submit(struct spacc_req *req)
1138 {
1139 	struct crypto_alg *alg = req->req->tfm->__crt_alg;
1140 
1141 	if (CRYPTO_ALG_TYPE_AEAD == (CRYPTO_ALG_TYPE_MASK & alg->cra_flags))
1142 		return spacc_aead_submit(req);
1143 	else
1144 		return spacc_ablk_submit(req);
1145 }
1146 
spacc_spacc_complete(unsigned long data)1147 static void spacc_spacc_complete(unsigned long data)
1148 {
1149 	struct spacc_engine *engine = (struct spacc_engine *)data;
1150 	struct spacc_req *req, *tmp;
1151 	unsigned long flags;
1152 	LIST_HEAD(completed);
1153 
1154 	spin_lock_irqsave(&engine->hw_lock, flags);
1155 
1156 	list_splice_init(&engine->completed, &completed);
1157 	spacc_push(engine);
1158 	if (engine->in_flight)
1159 		mod_timer(&engine->packet_timeout, jiffies + PACKET_TIMEOUT);
1160 
1161 	spin_unlock_irqrestore(&engine->hw_lock, flags);
1162 
1163 	list_for_each_entry_safe(req, tmp, &completed, list) {
1164 		list_del(&req->list);
1165 		req->complete(req);
1166 	}
1167 }
1168 
1169 #ifdef CONFIG_PM
spacc_suspend(struct device * dev)1170 static int spacc_suspend(struct device *dev)
1171 {
1172 	struct spacc_engine *engine = dev_get_drvdata(dev);
1173 
1174 	/*
1175 	 * We only support standby mode. All we have to do is gate the clock to
1176 	 * the spacc. The hardware will preserve state until we turn it back
1177 	 * on again.
1178 	 */
1179 	clk_disable(engine->clk);
1180 
1181 	return 0;
1182 }
1183 
spacc_resume(struct device * dev)1184 static int spacc_resume(struct device *dev)
1185 {
1186 	struct spacc_engine *engine = dev_get_drvdata(dev);
1187 
1188 	return clk_enable(engine->clk);
1189 }
1190 
1191 static const struct dev_pm_ops spacc_pm_ops = {
1192 	.suspend	= spacc_suspend,
1193 	.resume		= spacc_resume,
1194 };
1195 #endif /* CONFIG_PM */
1196 
spacc_dev_to_engine(struct device * dev)1197 static inline struct spacc_engine *spacc_dev_to_engine(struct device *dev)
1198 {
1199 	return dev ? platform_get_drvdata(to_platform_device(dev)) : NULL;
1200 }
1201 
spacc_stat_irq_thresh_show(struct device * dev,struct device_attribute * attr,char * buf)1202 static ssize_t spacc_stat_irq_thresh_show(struct device *dev,
1203 					  struct device_attribute *attr,
1204 					  char *buf)
1205 {
1206 	struct spacc_engine *engine = spacc_dev_to_engine(dev);
1207 
1208 	return snprintf(buf, PAGE_SIZE, "%u\n", engine->stat_irq_thresh);
1209 }
1210 
spacc_stat_irq_thresh_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)1211 static ssize_t spacc_stat_irq_thresh_store(struct device *dev,
1212 					   struct device_attribute *attr,
1213 					   const char *buf, size_t len)
1214 {
1215 	struct spacc_engine *engine = spacc_dev_to_engine(dev);
1216 	unsigned long thresh;
1217 
1218 	if (kstrtoul(buf, 0, &thresh))
1219 		return -EINVAL;
1220 
1221 	thresh = clamp(thresh, 1UL, engine->fifo_sz - 1);
1222 
1223 	engine->stat_irq_thresh = thresh;
1224 	writel(engine->stat_irq_thresh << SPA_IRQ_CTRL_STAT_CNT_OFFSET,
1225 	       engine->regs + SPA_IRQ_CTRL_REG_OFFSET);
1226 
1227 	return len;
1228 }
1229 static DEVICE_ATTR(stat_irq_thresh, 0644, spacc_stat_irq_thresh_show,
1230 		   spacc_stat_irq_thresh_store);
1231 
1232 static struct spacc_alg ipsec_engine_algs[] = {
1233 	{
1234 		.ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC,
1235 		.key_offs = 0,
1236 		.iv_offs = AES_MAX_KEY_SIZE,
1237 		.alg = {
1238 			.cra_name = "cbc(aes)",
1239 			.cra_driver_name = "cbc-aes-picoxcell",
1240 			.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
1241 			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1242 				     CRYPTO_ALG_KERN_DRIVER_ONLY |
1243 				     CRYPTO_ALG_ASYNC |
1244 				     CRYPTO_ALG_NEED_FALLBACK,
1245 			.cra_blocksize = AES_BLOCK_SIZE,
1246 			.cra_ctxsize = sizeof(struct spacc_ablk_ctx),
1247 			.cra_type = &crypto_ablkcipher_type,
1248 			.cra_module = THIS_MODULE,
1249 			.cra_ablkcipher = {
1250 				.setkey = spacc_aes_setkey,
1251 				.encrypt = spacc_ablk_encrypt,
1252 				.decrypt = spacc_ablk_decrypt,
1253 				.min_keysize = AES_MIN_KEY_SIZE,
1254 				.max_keysize = AES_MAX_KEY_SIZE,
1255 				.ivsize = AES_BLOCK_SIZE,
1256 			},
1257 			.cra_init = spacc_ablk_cra_init,
1258 			.cra_exit = spacc_ablk_cra_exit,
1259 		},
1260 	},
1261 	{
1262 		.key_offs = 0,
1263 		.iv_offs = AES_MAX_KEY_SIZE,
1264 		.ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_ECB,
1265 		.alg = {
1266 			.cra_name = "ecb(aes)",
1267 			.cra_driver_name = "ecb-aes-picoxcell",
1268 			.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
1269 			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1270 				CRYPTO_ALG_KERN_DRIVER_ONLY |
1271 				CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1272 			.cra_blocksize = AES_BLOCK_SIZE,
1273 			.cra_ctxsize = sizeof(struct spacc_ablk_ctx),
1274 			.cra_type = &crypto_ablkcipher_type,
1275 			.cra_module = THIS_MODULE,
1276 			.cra_ablkcipher = {
1277 				.setkey = spacc_aes_setkey,
1278 				.encrypt = spacc_ablk_encrypt,
1279 				.decrypt = spacc_ablk_decrypt,
1280 				.min_keysize = AES_MIN_KEY_SIZE,
1281 				.max_keysize = AES_MAX_KEY_SIZE,
1282 			},
1283 			.cra_init = spacc_ablk_cra_init,
1284 			.cra_exit = spacc_ablk_cra_exit,
1285 		},
1286 	},
1287 	{
1288 		.key_offs = DES_BLOCK_SIZE,
1289 		.iv_offs = 0,
1290 		.ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_CBC,
1291 		.alg = {
1292 			.cra_name = "cbc(des)",
1293 			.cra_driver_name = "cbc-des-picoxcell",
1294 			.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
1295 			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1296 					CRYPTO_ALG_ASYNC |
1297 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1298 			.cra_blocksize = DES_BLOCK_SIZE,
1299 			.cra_ctxsize = sizeof(struct spacc_ablk_ctx),
1300 			.cra_type = &crypto_ablkcipher_type,
1301 			.cra_module = THIS_MODULE,
1302 			.cra_ablkcipher = {
1303 				.setkey = spacc_des_setkey,
1304 				.encrypt = spacc_ablk_encrypt,
1305 				.decrypt = spacc_ablk_decrypt,
1306 				.min_keysize = DES_KEY_SIZE,
1307 				.max_keysize = DES_KEY_SIZE,
1308 				.ivsize = DES_BLOCK_SIZE,
1309 			},
1310 			.cra_init = spacc_ablk_cra_init,
1311 			.cra_exit = spacc_ablk_cra_exit,
1312 		},
1313 	},
1314 	{
1315 		.key_offs = DES_BLOCK_SIZE,
1316 		.iv_offs = 0,
1317 		.ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_ECB,
1318 		.alg = {
1319 			.cra_name = "ecb(des)",
1320 			.cra_driver_name = "ecb-des-picoxcell",
1321 			.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
1322 			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1323 					CRYPTO_ALG_ASYNC |
1324 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1325 			.cra_blocksize = DES_BLOCK_SIZE,
1326 			.cra_ctxsize = sizeof(struct spacc_ablk_ctx),
1327 			.cra_type = &crypto_ablkcipher_type,
1328 			.cra_module = THIS_MODULE,
1329 			.cra_ablkcipher = {
1330 				.setkey = spacc_des_setkey,
1331 				.encrypt = spacc_ablk_encrypt,
1332 				.decrypt = spacc_ablk_decrypt,
1333 				.min_keysize = DES_KEY_SIZE,
1334 				.max_keysize = DES_KEY_SIZE,
1335 			},
1336 			.cra_init = spacc_ablk_cra_init,
1337 			.cra_exit = spacc_ablk_cra_exit,
1338 		},
1339 	},
1340 	{
1341 		.key_offs = DES_BLOCK_SIZE,
1342 		.iv_offs = 0,
1343 		.ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_CBC,
1344 		.alg = {
1345 			.cra_name = "cbc(des3_ede)",
1346 			.cra_driver_name = "cbc-des3-ede-picoxcell",
1347 			.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
1348 			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1349 					CRYPTO_ALG_ASYNC |
1350 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1351 			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1352 			.cra_ctxsize = sizeof(struct spacc_ablk_ctx),
1353 			.cra_type = &crypto_ablkcipher_type,
1354 			.cra_module = THIS_MODULE,
1355 			.cra_ablkcipher = {
1356 				.setkey = spacc_des_setkey,
1357 				.encrypt = spacc_ablk_encrypt,
1358 				.decrypt = spacc_ablk_decrypt,
1359 				.min_keysize = DES3_EDE_KEY_SIZE,
1360 				.max_keysize = DES3_EDE_KEY_SIZE,
1361 				.ivsize = DES3_EDE_BLOCK_SIZE,
1362 			},
1363 			.cra_init = spacc_ablk_cra_init,
1364 			.cra_exit = spacc_ablk_cra_exit,
1365 		},
1366 	},
1367 	{
1368 		.key_offs = DES_BLOCK_SIZE,
1369 		.iv_offs = 0,
1370 		.ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_ECB,
1371 		.alg = {
1372 			.cra_name = "ecb(des3_ede)",
1373 			.cra_driver_name = "ecb-des3-ede-picoxcell",
1374 			.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
1375 			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1376 					CRYPTO_ALG_ASYNC |
1377 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1378 			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1379 			.cra_ctxsize = sizeof(struct spacc_ablk_ctx),
1380 			.cra_type = &crypto_ablkcipher_type,
1381 			.cra_module = THIS_MODULE,
1382 			.cra_ablkcipher = {
1383 				.setkey = spacc_des_setkey,
1384 				.encrypt = spacc_ablk_encrypt,
1385 				.decrypt = spacc_ablk_decrypt,
1386 				.min_keysize = DES3_EDE_KEY_SIZE,
1387 				.max_keysize = DES3_EDE_KEY_SIZE,
1388 			},
1389 			.cra_init = spacc_ablk_cra_init,
1390 			.cra_exit = spacc_ablk_cra_exit,
1391 		},
1392 	},
1393 };
1394 
1395 static struct spacc_aead ipsec_engine_aeads[] = {
1396 	{
1397 		.ctrl_default = SPA_CTRL_CIPH_ALG_AES |
1398 				SPA_CTRL_CIPH_MODE_CBC |
1399 				SPA_CTRL_HASH_ALG_SHA |
1400 				SPA_CTRL_HASH_MODE_HMAC,
1401 		.key_offs = 0,
1402 		.iv_offs = AES_MAX_KEY_SIZE,
1403 		.alg = {
1404 			.base = {
1405 				.cra_name = "authenc(hmac(sha1),cbc(aes))",
1406 				.cra_driver_name = "authenc-hmac-sha1-"
1407 						   "cbc-aes-picoxcell",
1408 				.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
1409 				.cra_flags = CRYPTO_ALG_ASYNC |
1410 					     CRYPTO_ALG_NEED_FALLBACK |
1411 					     CRYPTO_ALG_KERN_DRIVER_ONLY,
1412 				.cra_blocksize = AES_BLOCK_SIZE,
1413 				.cra_ctxsize = sizeof(struct spacc_aead_ctx),
1414 				.cra_module = THIS_MODULE,
1415 			},
1416 			.setkey = spacc_aead_setkey,
1417 			.setauthsize = spacc_aead_setauthsize,
1418 			.encrypt = spacc_aead_encrypt,
1419 			.decrypt = spacc_aead_decrypt,
1420 			.ivsize = AES_BLOCK_SIZE,
1421 			.maxauthsize = SHA1_DIGEST_SIZE,
1422 			.init = spacc_aead_cra_init,
1423 			.exit = spacc_aead_cra_exit,
1424 		},
1425 	},
1426 	{
1427 		.ctrl_default = SPA_CTRL_CIPH_ALG_AES |
1428 				SPA_CTRL_CIPH_MODE_CBC |
1429 				SPA_CTRL_HASH_ALG_SHA256 |
1430 				SPA_CTRL_HASH_MODE_HMAC,
1431 		.key_offs = 0,
1432 		.iv_offs = AES_MAX_KEY_SIZE,
1433 		.alg = {
1434 			.base = {
1435 				.cra_name = "authenc(hmac(sha256),cbc(aes))",
1436 				.cra_driver_name = "authenc-hmac-sha256-"
1437 						   "cbc-aes-picoxcell",
1438 				.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
1439 				.cra_flags = CRYPTO_ALG_ASYNC |
1440 					     CRYPTO_ALG_NEED_FALLBACK |
1441 					     CRYPTO_ALG_KERN_DRIVER_ONLY,
1442 				.cra_blocksize = AES_BLOCK_SIZE,
1443 				.cra_ctxsize = sizeof(struct spacc_aead_ctx),
1444 				.cra_module = THIS_MODULE,
1445 			},
1446 			.setkey = spacc_aead_setkey,
1447 			.setauthsize = spacc_aead_setauthsize,
1448 			.encrypt = spacc_aead_encrypt,
1449 			.decrypt = spacc_aead_decrypt,
1450 			.ivsize = AES_BLOCK_SIZE,
1451 			.maxauthsize = SHA256_DIGEST_SIZE,
1452 			.init = spacc_aead_cra_init,
1453 			.exit = spacc_aead_cra_exit,
1454 		},
1455 	},
1456 	{
1457 		.key_offs = 0,
1458 		.iv_offs = AES_MAX_KEY_SIZE,
1459 		.ctrl_default = SPA_CTRL_CIPH_ALG_AES |
1460 				SPA_CTRL_CIPH_MODE_CBC |
1461 				SPA_CTRL_HASH_ALG_MD5 |
1462 				SPA_CTRL_HASH_MODE_HMAC,
1463 		.alg = {
1464 			.base = {
1465 				.cra_name = "authenc(hmac(md5),cbc(aes))",
1466 				.cra_driver_name = "authenc-hmac-md5-"
1467 						   "cbc-aes-picoxcell",
1468 				.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
1469 				.cra_flags = CRYPTO_ALG_ASYNC |
1470 					     CRYPTO_ALG_NEED_FALLBACK |
1471 					     CRYPTO_ALG_KERN_DRIVER_ONLY,
1472 				.cra_blocksize = AES_BLOCK_SIZE,
1473 				.cra_ctxsize = sizeof(struct spacc_aead_ctx),
1474 				.cra_module = THIS_MODULE,
1475 			},
1476 			.setkey = spacc_aead_setkey,
1477 			.setauthsize = spacc_aead_setauthsize,
1478 			.encrypt = spacc_aead_encrypt,
1479 			.decrypt = spacc_aead_decrypt,
1480 			.ivsize = AES_BLOCK_SIZE,
1481 			.maxauthsize = MD5_DIGEST_SIZE,
1482 			.init = spacc_aead_cra_init,
1483 			.exit = spacc_aead_cra_exit,
1484 		},
1485 	},
1486 	{
1487 		.key_offs = DES_BLOCK_SIZE,
1488 		.iv_offs = 0,
1489 		.ctrl_default = SPA_CTRL_CIPH_ALG_DES |
1490 				SPA_CTRL_CIPH_MODE_CBC |
1491 				SPA_CTRL_HASH_ALG_SHA |
1492 				SPA_CTRL_HASH_MODE_HMAC,
1493 		.alg = {
1494 			.base = {
1495 				.cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
1496 				.cra_driver_name = "authenc-hmac-sha1-"
1497 						   "cbc-3des-picoxcell",
1498 				.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
1499 				.cra_flags = CRYPTO_ALG_ASYNC |
1500 					     CRYPTO_ALG_NEED_FALLBACK |
1501 					     CRYPTO_ALG_KERN_DRIVER_ONLY,
1502 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1503 				.cra_ctxsize = sizeof(struct spacc_aead_ctx),
1504 				.cra_module = THIS_MODULE,
1505 			},
1506 			.setkey = spacc_aead_setkey,
1507 			.setauthsize = spacc_aead_setauthsize,
1508 			.encrypt = spacc_aead_encrypt,
1509 			.decrypt = spacc_aead_decrypt,
1510 			.ivsize = DES3_EDE_BLOCK_SIZE,
1511 			.maxauthsize = SHA1_DIGEST_SIZE,
1512 			.init = spacc_aead_cra_init,
1513 			.exit = spacc_aead_cra_exit,
1514 		},
1515 	},
1516 	{
1517 		.key_offs = DES_BLOCK_SIZE,
1518 		.iv_offs = 0,
1519 		.ctrl_default = SPA_CTRL_CIPH_ALG_AES |
1520 				SPA_CTRL_CIPH_MODE_CBC |
1521 				SPA_CTRL_HASH_ALG_SHA256 |
1522 				SPA_CTRL_HASH_MODE_HMAC,
1523 		.alg = {
1524 			.base = {
1525 				.cra_name = "authenc(hmac(sha256),"
1526 					    "cbc(des3_ede))",
1527 				.cra_driver_name = "authenc-hmac-sha256-"
1528 						   "cbc-3des-picoxcell",
1529 				.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
1530 				.cra_flags = CRYPTO_ALG_ASYNC |
1531 					     CRYPTO_ALG_NEED_FALLBACK |
1532 					     CRYPTO_ALG_KERN_DRIVER_ONLY,
1533 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1534 				.cra_ctxsize = sizeof(struct spacc_aead_ctx),
1535 				.cra_module = THIS_MODULE,
1536 			},
1537 			.setkey = spacc_aead_setkey,
1538 			.setauthsize = spacc_aead_setauthsize,
1539 			.encrypt = spacc_aead_encrypt,
1540 			.decrypt = spacc_aead_decrypt,
1541 			.ivsize = DES3_EDE_BLOCK_SIZE,
1542 			.maxauthsize = SHA256_DIGEST_SIZE,
1543 			.init = spacc_aead_cra_init,
1544 			.exit = spacc_aead_cra_exit,
1545 		},
1546 	},
1547 	{
1548 		.key_offs = DES_BLOCK_SIZE,
1549 		.iv_offs = 0,
1550 		.ctrl_default = SPA_CTRL_CIPH_ALG_DES |
1551 				SPA_CTRL_CIPH_MODE_CBC |
1552 				SPA_CTRL_HASH_ALG_MD5 |
1553 				SPA_CTRL_HASH_MODE_HMAC,
1554 		.alg = {
1555 			.base = {
1556 				.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
1557 				.cra_driver_name = "authenc-hmac-md5-"
1558 						   "cbc-3des-picoxcell",
1559 				.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
1560 				.cra_flags = CRYPTO_ALG_ASYNC |
1561 					     CRYPTO_ALG_NEED_FALLBACK |
1562 					     CRYPTO_ALG_KERN_DRIVER_ONLY,
1563 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1564 				.cra_ctxsize = sizeof(struct spacc_aead_ctx),
1565 				.cra_module = THIS_MODULE,
1566 			},
1567 			.setkey = spacc_aead_setkey,
1568 			.setauthsize = spacc_aead_setauthsize,
1569 			.encrypt = spacc_aead_encrypt,
1570 			.decrypt = spacc_aead_decrypt,
1571 			.ivsize = DES3_EDE_BLOCK_SIZE,
1572 			.maxauthsize = MD5_DIGEST_SIZE,
1573 			.init = spacc_aead_cra_init,
1574 			.exit = spacc_aead_cra_exit,
1575 		},
1576 	},
1577 };
1578 
1579 static struct spacc_alg l2_engine_algs[] = {
1580 	{
1581 		.key_offs = 0,
1582 		.iv_offs = SPACC_CRYPTO_KASUMI_F8_KEY_LEN,
1583 		.ctrl_default = SPA_CTRL_CIPH_ALG_KASUMI |
1584 				SPA_CTRL_CIPH_MODE_F8,
1585 		.alg = {
1586 			.cra_name = "f8(kasumi)",
1587 			.cra_driver_name = "f8-kasumi-picoxcell",
1588 			.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
1589 			.cra_flags = CRYPTO_ALG_TYPE_GIVCIPHER |
1590 					CRYPTO_ALG_ASYNC |
1591 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1592 			.cra_blocksize = 8,
1593 			.cra_ctxsize = sizeof(struct spacc_ablk_ctx),
1594 			.cra_type = &crypto_ablkcipher_type,
1595 			.cra_module = THIS_MODULE,
1596 			.cra_ablkcipher = {
1597 				.setkey = spacc_kasumi_f8_setkey,
1598 				.encrypt = spacc_ablk_encrypt,
1599 				.decrypt = spacc_ablk_decrypt,
1600 				.min_keysize = 16,
1601 				.max_keysize = 16,
1602 				.ivsize = 8,
1603 			},
1604 			.cra_init = spacc_ablk_cra_init,
1605 			.cra_exit = spacc_ablk_cra_exit,
1606 		},
1607 	},
1608 };
1609 
1610 #ifdef CONFIG_OF
1611 static const struct of_device_id spacc_of_id_table[] = {
1612 	{ .compatible = "picochip,spacc-ipsec" },
1613 	{ .compatible = "picochip,spacc-l2" },
1614 	{}
1615 };
1616 MODULE_DEVICE_TABLE(of, spacc_of_id_table);
1617 #endif /* CONFIG_OF */
1618 
spacc_probe(struct platform_device * pdev)1619 static int spacc_probe(struct platform_device *pdev)
1620 {
1621 	int i, err, ret;
1622 	struct resource *mem, *irq;
1623 	struct device_node *np = pdev->dev.of_node;
1624 	struct spacc_engine *engine = devm_kzalloc(&pdev->dev, sizeof(*engine),
1625 						   GFP_KERNEL);
1626 	if (!engine)
1627 		return -ENOMEM;
1628 
1629 	if (of_device_is_compatible(np, "picochip,spacc-ipsec")) {
1630 		engine->max_ctxs	= SPACC_CRYPTO_IPSEC_MAX_CTXS;
1631 		engine->cipher_pg_sz	= SPACC_CRYPTO_IPSEC_CIPHER_PG_SZ;
1632 		engine->hash_pg_sz	= SPACC_CRYPTO_IPSEC_HASH_PG_SZ;
1633 		engine->fifo_sz		= SPACC_CRYPTO_IPSEC_FIFO_SZ;
1634 		engine->algs		= ipsec_engine_algs;
1635 		engine->num_algs	= ARRAY_SIZE(ipsec_engine_algs);
1636 		engine->aeads		= ipsec_engine_aeads;
1637 		engine->num_aeads	= ARRAY_SIZE(ipsec_engine_aeads);
1638 	} else if (of_device_is_compatible(np, "picochip,spacc-l2")) {
1639 		engine->max_ctxs	= SPACC_CRYPTO_L2_MAX_CTXS;
1640 		engine->cipher_pg_sz	= SPACC_CRYPTO_L2_CIPHER_PG_SZ;
1641 		engine->hash_pg_sz	= SPACC_CRYPTO_L2_HASH_PG_SZ;
1642 		engine->fifo_sz		= SPACC_CRYPTO_L2_FIFO_SZ;
1643 		engine->algs		= l2_engine_algs;
1644 		engine->num_algs	= ARRAY_SIZE(l2_engine_algs);
1645 	} else {
1646 		return -EINVAL;
1647 	}
1648 
1649 	engine->name = dev_name(&pdev->dev);
1650 
1651 	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1652 	engine->regs = devm_ioremap_resource(&pdev->dev, mem);
1653 	if (IS_ERR(engine->regs))
1654 		return PTR_ERR(engine->regs);
1655 
1656 	irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1657 	if (!irq) {
1658 		dev_err(&pdev->dev, "no memory/irq resource for engine\n");
1659 		return -ENXIO;
1660 	}
1661 
1662 	if (devm_request_irq(&pdev->dev, irq->start, spacc_spacc_irq, 0,
1663 			     engine->name, engine)) {
1664 		dev_err(engine->dev, "failed to request IRQ\n");
1665 		return -EBUSY;
1666 	}
1667 
1668 	engine->dev		= &pdev->dev;
1669 	engine->cipher_ctx_base = engine->regs + SPA_CIPH_KEY_BASE_REG_OFFSET;
1670 	engine->hash_key_base	= engine->regs + SPA_HASH_KEY_BASE_REG_OFFSET;
1671 
1672 	engine->req_pool = dmam_pool_create(engine->name, engine->dev,
1673 		MAX_DDT_LEN * sizeof(struct spacc_ddt), 8, SZ_64K);
1674 	if (!engine->req_pool)
1675 		return -ENOMEM;
1676 
1677 	spin_lock_init(&engine->hw_lock);
1678 
1679 	engine->clk = clk_get(&pdev->dev, "ref");
1680 	if (IS_ERR(engine->clk)) {
1681 		dev_info(&pdev->dev, "clk unavailable\n");
1682 		return PTR_ERR(engine->clk);
1683 	}
1684 
1685 	if (clk_prepare_enable(engine->clk)) {
1686 		dev_info(&pdev->dev, "unable to prepare/enable clk\n");
1687 		ret = -EIO;
1688 		goto err_clk_put;
1689 	}
1690 
1691 	ret = device_create_file(&pdev->dev, &dev_attr_stat_irq_thresh);
1692 	if (ret)
1693 		goto err_clk_disable;
1694 
1695 
1696 	/*
1697 	 * Use an IRQ threshold of 50% as a default. This seems to be a
1698 	 * reasonable trade off of latency against throughput but can be
1699 	 * changed at runtime.
1700 	 */
1701 	engine->stat_irq_thresh = (engine->fifo_sz / 2);
1702 
1703 	/*
1704 	 * Configure the interrupts. We only use the STAT_CNT interrupt as we
1705 	 * only submit a new packet for processing when we complete another in
1706 	 * the queue. This minimizes time spent in the interrupt handler.
1707 	 */
1708 	writel(engine->stat_irq_thresh << SPA_IRQ_CTRL_STAT_CNT_OFFSET,
1709 	       engine->regs + SPA_IRQ_CTRL_REG_OFFSET);
1710 	writel(SPA_IRQ_EN_STAT_EN | SPA_IRQ_EN_GLBL_EN,
1711 	       engine->regs + SPA_IRQ_EN_REG_OFFSET);
1712 
1713 	timer_setup(&engine->packet_timeout, spacc_packet_timeout, 0);
1714 
1715 	INIT_LIST_HEAD(&engine->pending);
1716 	INIT_LIST_HEAD(&engine->completed);
1717 	INIT_LIST_HEAD(&engine->in_progress);
1718 	engine->in_flight = 0;
1719 	tasklet_init(&engine->complete, spacc_spacc_complete,
1720 		     (unsigned long)engine);
1721 
1722 	platform_set_drvdata(pdev, engine);
1723 
1724 	ret = -EINVAL;
1725 	INIT_LIST_HEAD(&engine->registered_algs);
1726 	for (i = 0; i < engine->num_algs; ++i) {
1727 		engine->algs[i].engine = engine;
1728 		err = crypto_register_alg(&engine->algs[i].alg);
1729 		if (!err) {
1730 			list_add_tail(&engine->algs[i].entry,
1731 				      &engine->registered_algs);
1732 			ret = 0;
1733 		}
1734 		if (err)
1735 			dev_err(engine->dev, "failed to register alg \"%s\"\n",
1736 				engine->algs[i].alg.cra_name);
1737 		else
1738 			dev_dbg(engine->dev, "registered alg \"%s\"\n",
1739 				engine->algs[i].alg.cra_name);
1740 	}
1741 
1742 	INIT_LIST_HEAD(&engine->registered_aeads);
1743 	for (i = 0; i < engine->num_aeads; ++i) {
1744 		engine->aeads[i].engine = engine;
1745 		err = crypto_register_aead(&engine->aeads[i].alg);
1746 		if (!err) {
1747 			list_add_tail(&engine->aeads[i].entry,
1748 				      &engine->registered_aeads);
1749 			ret = 0;
1750 		}
1751 		if (err)
1752 			dev_err(engine->dev, "failed to register alg \"%s\"\n",
1753 				engine->aeads[i].alg.base.cra_name);
1754 		else
1755 			dev_dbg(engine->dev, "registered alg \"%s\"\n",
1756 				engine->aeads[i].alg.base.cra_name);
1757 	}
1758 
1759 	if (!ret)
1760 		return 0;
1761 
1762 	del_timer_sync(&engine->packet_timeout);
1763 	device_remove_file(&pdev->dev, &dev_attr_stat_irq_thresh);
1764 err_clk_disable:
1765 	clk_disable_unprepare(engine->clk);
1766 err_clk_put:
1767 	clk_put(engine->clk);
1768 
1769 	return ret;
1770 }
1771 
spacc_remove(struct platform_device * pdev)1772 static int spacc_remove(struct platform_device *pdev)
1773 {
1774 	struct spacc_aead *aead, *an;
1775 	struct spacc_alg *alg, *next;
1776 	struct spacc_engine *engine = platform_get_drvdata(pdev);
1777 
1778 	del_timer_sync(&engine->packet_timeout);
1779 	device_remove_file(&pdev->dev, &dev_attr_stat_irq_thresh);
1780 
1781 	list_for_each_entry_safe(aead, an, &engine->registered_aeads, entry) {
1782 		list_del(&aead->entry);
1783 		crypto_unregister_aead(&aead->alg);
1784 	}
1785 
1786 	list_for_each_entry_safe(alg, next, &engine->registered_algs, entry) {
1787 		list_del(&alg->entry);
1788 		crypto_unregister_alg(&alg->alg);
1789 	}
1790 
1791 	clk_disable_unprepare(engine->clk);
1792 	clk_put(engine->clk);
1793 
1794 	return 0;
1795 }
1796 
1797 static struct platform_driver spacc_driver = {
1798 	.probe		= spacc_probe,
1799 	.remove		= spacc_remove,
1800 	.driver		= {
1801 		.name	= "picochip,spacc",
1802 #ifdef CONFIG_PM
1803 		.pm	= &spacc_pm_ops,
1804 #endif /* CONFIG_PM */
1805 		.of_match_table	= of_match_ptr(spacc_of_id_table),
1806 	},
1807 };
1808 
1809 module_platform_driver(spacc_driver);
1810 
1811 MODULE_LICENSE("GPL");
1812 MODULE_AUTHOR("Jamie Iles");
1813