1 /*
2  * Copyright (C) 2016 Pengutronix, Steffen Trumtrar <kernel@pengutronix.de>
3  *
4  * The driver is based on information gathered from
5  * drivers/mxc/security/mxc_scc.c which can be found in
6  * the Freescale linux-2.6-imx.git in the imx_2.6.35_maintain branch.
7  *
8  * This program is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU General Public License
10  * version 2, as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  */
18 #include <linux/clk.h>
19 #include <linux/crypto.h>
20 #include <linux/interrupt.h>
21 #include <linux/io.h>
22 #include <linux/irq.h>
23 #include <linux/kernel.h>
24 #include <linux/module.h>
25 #include <linux/mutex.h>
26 #include <linux/of.h>
27 #include <linux/of_device.h>
28 #include <linux/platform_device.h>
29 
30 #include <crypto/algapi.h>
31 #include <crypto/des.h>
32 
33 /* Secure Memory (SCM) registers */
34 #define SCC_SCM_RED_START		0x0000
35 #define SCC_SCM_BLACK_START		0x0004
36 #define SCC_SCM_LENGTH			0x0008
37 #define SCC_SCM_CTRL			0x000C
38 #define SCC_SCM_STATUS			0x0010
39 #define SCC_SCM_ERROR_STATUS		0x0014
40 #define SCC_SCM_INTR_CTRL		0x0018
41 #define SCC_SCM_CFG			0x001C
42 #define SCC_SCM_INIT_VECTOR_0		0x0020
43 #define SCC_SCM_INIT_VECTOR_1		0x0024
44 #define SCC_SCM_RED_MEMORY		0x0400
45 #define SCC_SCM_BLACK_MEMORY		0x0800
46 
47 /* Security Monitor (SMN) Registers */
48 #define SCC_SMN_STATUS			0x1000
49 #define SCC_SMN_COMMAND		0x1004
50 #define SCC_SMN_SEQ_START		0x1008
51 #define SCC_SMN_SEQ_END		0x100C
52 #define SCC_SMN_SEQ_CHECK		0x1010
53 #define SCC_SMN_BIT_COUNT		0x1014
54 #define SCC_SMN_BITBANK_INC_SIZE	0x1018
55 #define SCC_SMN_BITBANK_DECREMENT	0x101C
56 #define SCC_SMN_COMPARE_SIZE		0x1020
57 #define SCC_SMN_PLAINTEXT_CHECK	0x1024
58 #define SCC_SMN_CIPHERTEXT_CHECK	0x1028
59 #define SCC_SMN_TIMER_IV		0x102C
60 #define SCC_SMN_TIMER_CONTROL		0x1030
61 #define SCC_SMN_DEBUG_DETECT_STAT	0x1034
62 #define SCC_SMN_TIMER			0x1038
63 
64 #define SCC_SCM_CTRL_START_CIPHER	BIT(2)
65 #define SCC_SCM_CTRL_CBC_MODE		BIT(1)
66 #define SCC_SCM_CTRL_DECRYPT_MODE	BIT(0)
67 
68 #define SCC_SCM_STATUS_LEN_ERR		BIT(12)
69 #define SCC_SCM_STATUS_SMN_UNBLOCKED	BIT(11)
70 #define SCC_SCM_STATUS_CIPHERING_DONE	BIT(10)
71 #define SCC_SCM_STATUS_ZEROIZING_DONE	BIT(9)
72 #define SCC_SCM_STATUS_INTR_STATUS	BIT(8)
73 #define SCC_SCM_STATUS_SEC_KEY		BIT(7)
74 #define SCC_SCM_STATUS_INTERNAL_ERR	BIT(6)
75 #define SCC_SCM_STATUS_BAD_SEC_KEY	BIT(5)
76 #define SCC_SCM_STATUS_ZEROIZE_FAIL	BIT(4)
77 #define SCC_SCM_STATUS_SMN_BLOCKED	BIT(3)
78 #define SCC_SCM_STATUS_CIPHERING	BIT(2)
79 #define SCC_SCM_STATUS_ZEROIZING	BIT(1)
80 #define SCC_SCM_STATUS_BUSY		BIT(0)
81 
82 #define SCC_SMN_STATUS_STATE_MASK	0x0000001F
83 #define SCC_SMN_STATE_START		0x0
84 /* The SMN is zeroizing its RAM during reset */
85 #define SCC_SMN_STATE_ZEROIZE_RAM	0x5
86 /* SMN has passed internal checks */
87 #define SCC_SMN_STATE_HEALTH_CHECK	0x6
88 /* Fatal Security Violation. SMN is locked, SCM is inoperative. */
89 #define SCC_SMN_STATE_FAIL		0x9
90 /* SCC is in secure state. SCM is using secret key. */
91 #define SCC_SMN_STATE_SECURE		0xA
92 /* SCC is not secure. SCM is using default key. */
93 #define SCC_SMN_STATE_NON_SECURE	0xC
94 
95 #define SCC_SCM_INTR_CTRL_ZEROIZE_MEM	BIT(2)
96 #define SCC_SCM_INTR_CTRL_CLR_INTR	BIT(1)
97 #define SCC_SCM_INTR_CTRL_MASK_INTR	BIT(0)
98 
99 /* Size, in blocks, of Red memory. */
100 #define SCC_SCM_CFG_BLACK_SIZE_MASK	0x07fe0000
101 #define SCC_SCM_CFG_BLACK_SIZE_SHIFT	17
102 /* Size, in blocks, of Black memory. */
103 #define SCC_SCM_CFG_RED_SIZE_MASK	0x0001ff80
104 #define SCC_SCM_CFG_RED_SIZE_SHIFT	7
105 /* Number of bytes per block. */
106 #define SCC_SCM_CFG_BLOCK_SIZE_MASK	0x0000007f
107 
108 #define SCC_SMN_COMMAND_TAMPER_LOCK	BIT(4)
109 #define SCC_SMN_COMMAND_CLR_INTR	BIT(3)
110 #define SCC_SMN_COMMAND_CLR_BIT_BANK	BIT(2)
111 #define SCC_SMN_COMMAND_EN_INTR	BIT(1)
112 #define SCC_SMN_COMMAND_SET_SOFTWARE_ALARM  BIT(0)
113 
114 #define SCC_KEY_SLOTS			20
115 #define SCC_MAX_KEY_SIZE		32
116 #define SCC_KEY_SLOT_SIZE		32
117 
118 #define SCC_CRC_CCITT_START		0xFFFF
119 
120 /*
121  * Offset into each RAM of the base of the area which is not
122  * used for Stored Keys.
123  */
124 #define SCC_NON_RESERVED_OFFSET	(SCC_KEY_SLOTS * SCC_KEY_SLOT_SIZE)
125 
126 /* Fixed padding for appending to plaintext to fill out a block */
127 static char scc_block_padding[8] = { 0x80, 0, 0, 0, 0, 0, 0, 0 };
128 
129 enum mxc_scc_state {
130 	SCC_STATE_OK,
131 	SCC_STATE_UNIMPLEMENTED,
132 	SCC_STATE_FAILED
133 };
134 
135 struct mxc_scc {
136 	struct device		*dev;
137 	void __iomem		*base;
138 	struct clk		*clk;
139 	bool			hw_busy;
140 	spinlock_t		lock;
141 	struct crypto_queue	queue;
142 	struct crypto_async_request *req;
143 	int			block_size_bytes;
144 	int			black_ram_size_blocks;
145 	int			memory_size_bytes;
146 	int			bytes_remaining;
147 
148 	void __iomem		*red_memory;
149 	void __iomem		*black_memory;
150 };
151 
152 struct mxc_scc_ctx {
153 	struct mxc_scc		*scc;
154 	struct scatterlist	*sg_src;
155 	size_t			src_nents;
156 	struct scatterlist	*sg_dst;
157 	size_t			dst_nents;
158 	unsigned int		offset;
159 	unsigned int		size;
160 	unsigned int		ctrl;
161 };
162 
163 struct mxc_scc_crypto_tmpl {
164 	struct mxc_scc *scc;
165 	struct crypto_alg alg;
166 };
167 
mxc_scc_get_data(struct mxc_scc_ctx * ctx,struct crypto_async_request * req)168 static int mxc_scc_get_data(struct mxc_scc_ctx *ctx,
169 			    struct crypto_async_request *req)
170 {
171 	struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
172 	struct mxc_scc *scc = ctx->scc;
173 	size_t len;
174 	void __iomem *from;
175 
176 	if (ctx->ctrl & SCC_SCM_CTRL_DECRYPT_MODE)
177 		from = scc->red_memory;
178 	else
179 		from = scc->black_memory;
180 
181 	dev_dbg(scc->dev, "pcopy: from 0x%p %d bytes\n", from,
182 		ctx->dst_nents * 8);
183 	len = sg_pcopy_from_buffer(ablkreq->dst, ctx->dst_nents,
184 				   from, ctx->size, ctx->offset);
185 	if (!len) {
186 		dev_err(scc->dev, "pcopy err from 0x%p (len=%d)\n", from, len);
187 		return -EINVAL;
188 	}
189 
190 #ifdef DEBUG
191 	print_hex_dump(KERN_ERR,
192 		       "red memory@"__stringify(__LINE__)": ",
193 		       DUMP_PREFIX_ADDRESS, 16, 4,
194 		       scc->red_memory, ctx->size, 1);
195 	print_hex_dump(KERN_ERR,
196 		       "black memory@"__stringify(__LINE__)": ",
197 		       DUMP_PREFIX_ADDRESS, 16, 4,
198 		       scc->black_memory, ctx->size, 1);
199 #endif
200 
201 	ctx->offset += len;
202 
203 	if (ctx->offset < ablkreq->nbytes)
204 		return -EINPROGRESS;
205 
206 	return 0;
207 }
208 
mxc_scc_ablkcipher_req_init(struct ablkcipher_request * req,struct mxc_scc_ctx * ctx)209 static int mxc_scc_ablkcipher_req_init(struct ablkcipher_request *req,
210 				       struct mxc_scc_ctx *ctx)
211 {
212 	struct mxc_scc *scc = ctx->scc;
213 	int nents;
214 
215 	nents = sg_nents_for_len(req->src, req->nbytes);
216 	if (nents < 0) {
217 		dev_err(scc->dev, "Invalid number of src SC");
218 		return nents;
219 	}
220 	ctx->src_nents = nents;
221 
222 	nents = sg_nents_for_len(req->dst, req->nbytes);
223 	if (nents < 0) {
224 		dev_err(scc->dev, "Invalid number of dst SC");
225 		return nents;
226 	}
227 	ctx->dst_nents = nents;
228 
229 	ctx->size = 0;
230 	ctx->offset = 0;
231 
232 	return 0;
233 }
234 
mxc_scc_ablkcipher_req_complete(struct crypto_async_request * req,struct mxc_scc_ctx * ctx,int result)235 static int mxc_scc_ablkcipher_req_complete(struct crypto_async_request *req,
236 					   struct mxc_scc_ctx *ctx,
237 					   int result)
238 {
239 	struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
240 	struct mxc_scc *scc = ctx->scc;
241 
242 	scc->req = NULL;
243 	scc->bytes_remaining = scc->memory_size_bytes;
244 
245 	if (ctx->ctrl & SCC_SCM_CTRL_CBC_MODE)
246 		memcpy(ablkreq->info, scc->base + SCC_SCM_INIT_VECTOR_0,
247 		       scc->block_size_bytes);
248 
249 	req->complete(req, result);
250 	scc->hw_busy = false;
251 
252 	return 0;
253 }
254 
mxc_scc_put_data(struct mxc_scc_ctx * ctx,struct ablkcipher_request * req)255 static int mxc_scc_put_data(struct mxc_scc_ctx *ctx,
256 			     struct ablkcipher_request *req)
257 {
258 	u8 padding_buffer[sizeof(u16) + sizeof(scc_block_padding)];
259 	size_t len = min_t(size_t, req->nbytes - ctx->offset,
260 			   ctx->scc->bytes_remaining);
261 	unsigned int padding_byte_count = 0;
262 	struct mxc_scc *scc = ctx->scc;
263 	void __iomem *to;
264 
265 	if (ctx->ctrl & SCC_SCM_CTRL_DECRYPT_MODE)
266 		to = scc->black_memory;
267 	else
268 		to = scc->red_memory;
269 
270 	if (ctx->ctrl & SCC_SCM_CTRL_CBC_MODE && req->info)
271 		memcpy(scc->base + SCC_SCM_INIT_VECTOR_0, req->info,
272 		       scc->block_size_bytes);
273 
274 	len = sg_pcopy_to_buffer(req->src, ctx->src_nents,
275 				 to, len, ctx->offset);
276 	if (!len) {
277 		dev_err(scc->dev, "pcopy err to 0x%p (len=%d)\n", to, len);
278 		return -EINVAL;
279 	}
280 
281 	ctx->size = len;
282 
283 #ifdef DEBUG
284 	dev_dbg(scc->dev, "copied %d bytes to 0x%p\n", len, to);
285 	print_hex_dump(KERN_ERR,
286 		       "init vector0@"__stringify(__LINE__)": ",
287 		       DUMP_PREFIX_ADDRESS, 16, 4,
288 		       scc->base + SCC_SCM_INIT_VECTOR_0, scc->block_size_bytes,
289 		       1);
290 	print_hex_dump(KERN_ERR,
291 		       "red memory@"__stringify(__LINE__)": ",
292 		       DUMP_PREFIX_ADDRESS, 16, 4,
293 		       scc->red_memory, ctx->size, 1);
294 	print_hex_dump(KERN_ERR,
295 		       "black memory@"__stringify(__LINE__)": ",
296 		       DUMP_PREFIX_ADDRESS, 16, 4,
297 		       scc->black_memory, ctx->size, 1);
298 #endif
299 
300 	scc->bytes_remaining -= len;
301 
302 	padding_byte_count = len % scc->block_size_bytes;
303 
304 	if (padding_byte_count) {
305 		memcpy(padding_buffer, scc_block_padding, padding_byte_count);
306 		memcpy(to + len, padding_buffer, padding_byte_count);
307 		ctx->size += padding_byte_count;
308 	}
309 
310 #ifdef DEBUG
311 	print_hex_dump(KERN_ERR,
312 		       "data to encrypt@"__stringify(__LINE__)": ",
313 		       DUMP_PREFIX_ADDRESS, 16, 4,
314 		       to, ctx->size, 1);
315 #endif
316 
317 	return 0;
318 }
319 
mxc_scc_ablkcipher_next(struct mxc_scc_ctx * ctx,struct crypto_async_request * req)320 static void mxc_scc_ablkcipher_next(struct mxc_scc_ctx *ctx,
321 				    struct crypto_async_request *req)
322 {
323 	struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
324 	struct mxc_scc *scc = ctx->scc;
325 	int err;
326 
327 	dev_dbg(scc->dev, "dispatch request (nbytes=%d, src=%p, dst=%p)\n",
328 		ablkreq->nbytes, ablkreq->src, ablkreq->dst);
329 
330 	writel(0, scc->base + SCC_SCM_ERROR_STATUS);
331 
332 	err = mxc_scc_put_data(ctx, ablkreq);
333 	if (err) {
334 		mxc_scc_ablkcipher_req_complete(req, ctx, err);
335 		return;
336 	}
337 
338 	dev_dbg(scc->dev, "Start encryption (0x%p/0x%p)\n",
339 		(void *)readl(scc->base + SCC_SCM_RED_START),
340 		(void *)readl(scc->base + SCC_SCM_BLACK_START));
341 
342 	/* clear interrupt control registers */
343 	writel(SCC_SCM_INTR_CTRL_CLR_INTR,
344 	       scc->base + SCC_SCM_INTR_CTRL);
345 
346 	writel((ctx->size / ctx->scc->block_size_bytes) - 1,
347 	       scc->base + SCC_SCM_LENGTH);
348 
349 	dev_dbg(scc->dev, "Process %d block(s) in 0x%p\n",
350 		ctx->size / ctx->scc->block_size_bytes,
351 		(ctx->ctrl & SCC_SCM_CTRL_DECRYPT_MODE) ? scc->black_memory :
352 		scc->red_memory);
353 
354 	writel(ctx->ctrl, scc->base + SCC_SCM_CTRL);
355 }
356 
mxc_scc_int(int irq,void * priv)357 static irqreturn_t mxc_scc_int(int irq, void *priv)
358 {
359 	struct crypto_async_request *req;
360 	struct mxc_scc_ctx *ctx;
361 	struct mxc_scc *scc = priv;
362 	int status;
363 	int ret;
364 
365 	status = readl(scc->base + SCC_SCM_STATUS);
366 
367 	/* clear interrupt control registers */
368 	writel(SCC_SCM_INTR_CTRL_CLR_INTR, scc->base + SCC_SCM_INTR_CTRL);
369 
370 	if (status & SCC_SCM_STATUS_BUSY)
371 		return IRQ_NONE;
372 
373 	req = scc->req;
374 	if (req) {
375 		ctx = crypto_tfm_ctx(req->tfm);
376 		ret = mxc_scc_get_data(ctx, req);
377 		if (ret != -EINPROGRESS)
378 			mxc_scc_ablkcipher_req_complete(req, ctx, ret);
379 		else
380 			mxc_scc_ablkcipher_next(ctx, req);
381 	}
382 
383 	return IRQ_HANDLED;
384 }
385 
mxc_scc_cra_init(struct crypto_tfm * tfm)386 static int mxc_scc_cra_init(struct crypto_tfm *tfm)
387 {
388 	struct mxc_scc_ctx *ctx = crypto_tfm_ctx(tfm);
389 	struct crypto_alg *alg = tfm->__crt_alg;
390 	struct mxc_scc_crypto_tmpl *algt;
391 
392 	algt = container_of(alg, struct mxc_scc_crypto_tmpl, alg);
393 
394 	ctx->scc = algt->scc;
395 	return 0;
396 }
397 
mxc_scc_dequeue_req_unlocked(struct mxc_scc_ctx * ctx)398 static void mxc_scc_dequeue_req_unlocked(struct mxc_scc_ctx *ctx)
399 {
400 	struct crypto_async_request *req, *backlog;
401 
402 	if (ctx->scc->hw_busy)
403 		return;
404 
405 	spin_lock_bh(&ctx->scc->lock);
406 	backlog = crypto_get_backlog(&ctx->scc->queue);
407 	req = crypto_dequeue_request(&ctx->scc->queue);
408 	ctx->scc->req = req;
409 	ctx->scc->hw_busy = true;
410 	spin_unlock_bh(&ctx->scc->lock);
411 
412 	if (!req)
413 		return;
414 
415 	if (backlog)
416 		backlog->complete(backlog, -EINPROGRESS);
417 
418 	mxc_scc_ablkcipher_next(ctx, req);
419 }
420 
mxc_scc_queue_req(struct mxc_scc_ctx * ctx,struct crypto_async_request * req)421 static int mxc_scc_queue_req(struct mxc_scc_ctx *ctx,
422 			     struct crypto_async_request *req)
423 {
424 	int ret;
425 
426 	spin_lock_bh(&ctx->scc->lock);
427 	ret = crypto_enqueue_request(&ctx->scc->queue, req);
428 	spin_unlock_bh(&ctx->scc->lock);
429 
430 	if (ret != -EINPROGRESS)
431 		return ret;
432 
433 	mxc_scc_dequeue_req_unlocked(ctx);
434 
435 	return -EINPROGRESS;
436 }
437 
mxc_scc_des3_op(struct mxc_scc_ctx * ctx,struct ablkcipher_request * req)438 static int mxc_scc_des3_op(struct mxc_scc_ctx *ctx,
439 			   struct ablkcipher_request *req)
440 {
441 	int err;
442 
443 	err = mxc_scc_ablkcipher_req_init(req, ctx);
444 	if (err)
445 		return err;
446 
447 	return mxc_scc_queue_req(ctx, &req->base);
448 }
449 
mxc_scc_ecb_des_encrypt(struct ablkcipher_request * req)450 static int mxc_scc_ecb_des_encrypt(struct ablkcipher_request *req)
451 {
452 	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req);
453 	struct mxc_scc_ctx *ctx = crypto_ablkcipher_ctx(cipher);
454 
455 	ctx->ctrl = SCC_SCM_CTRL_START_CIPHER;
456 
457 	return mxc_scc_des3_op(ctx, req);
458 }
459 
mxc_scc_ecb_des_decrypt(struct ablkcipher_request * req)460 static int mxc_scc_ecb_des_decrypt(struct ablkcipher_request *req)
461 {
462 	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req);
463 	struct mxc_scc_ctx *ctx = crypto_ablkcipher_ctx(cipher);
464 
465 	ctx->ctrl = SCC_SCM_CTRL_START_CIPHER;
466 	ctx->ctrl |= SCC_SCM_CTRL_DECRYPT_MODE;
467 
468 	return mxc_scc_des3_op(ctx, req);
469 }
470 
mxc_scc_cbc_des_encrypt(struct ablkcipher_request * req)471 static int mxc_scc_cbc_des_encrypt(struct ablkcipher_request *req)
472 {
473 	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req);
474 	struct mxc_scc_ctx *ctx = crypto_ablkcipher_ctx(cipher);
475 
476 	ctx->ctrl = SCC_SCM_CTRL_START_CIPHER;
477 	ctx->ctrl |= SCC_SCM_CTRL_CBC_MODE;
478 
479 	return mxc_scc_des3_op(ctx, req);
480 }
481 
mxc_scc_cbc_des_decrypt(struct ablkcipher_request * req)482 static int mxc_scc_cbc_des_decrypt(struct ablkcipher_request *req)
483 {
484 	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req);
485 	struct mxc_scc_ctx *ctx = crypto_ablkcipher_ctx(cipher);
486 
487 	ctx->ctrl = SCC_SCM_CTRL_START_CIPHER;
488 	ctx->ctrl |= SCC_SCM_CTRL_CBC_MODE;
489 	ctx->ctrl |= SCC_SCM_CTRL_DECRYPT_MODE;
490 
491 	return mxc_scc_des3_op(ctx, req);
492 }
493 
mxc_scc_hw_init(struct mxc_scc * scc)494 static void mxc_scc_hw_init(struct mxc_scc *scc)
495 {
496 	int offset;
497 
498 	offset = SCC_NON_RESERVED_OFFSET / scc->block_size_bytes;
499 
500 	/* Fill the RED_START register */
501 	writel(offset, scc->base + SCC_SCM_RED_START);
502 
503 	/* Fill the BLACK_START register */
504 	writel(offset, scc->base + SCC_SCM_BLACK_START);
505 
506 	scc->red_memory = scc->base + SCC_SCM_RED_MEMORY +
507 			  SCC_NON_RESERVED_OFFSET;
508 
509 	scc->black_memory = scc->base + SCC_SCM_BLACK_MEMORY +
510 			    SCC_NON_RESERVED_OFFSET;
511 
512 	scc->bytes_remaining = scc->memory_size_bytes;
513 }
514 
mxc_scc_get_config(struct mxc_scc * scc)515 static int mxc_scc_get_config(struct mxc_scc *scc)
516 {
517 	int config;
518 
519 	config = readl(scc->base + SCC_SCM_CFG);
520 
521 	scc->block_size_bytes = config & SCC_SCM_CFG_BLOCK_SIZE_MASK;
522 
523 	scc->black_ram_size_blocks = config & SCC_SCM_CFG_BLACK_SIZE_MASK;
524 
525 	scc->memory_size_bytes = (scc->block_size_bytes *
526 				  scc->black_ram_size_blocks) -
527 				  SCC_NON_RESERVED_OFFSET;
528 
529 	return 0;
530 }
531 
mxc_scc_get_state(struct mxc_scc * scc)532 static enum mxc_scc_state mxc_scc_get_state(struct mxc_scc *scc)
533 {
534 	enum mxc_scc_state state;
535 	int status;
536 
537 	status = readl(scc->base + SCC_SMN_STATUS) &
538 		       SCC_SMN_STATUS_STATE_MASK;
539 
540 	/* If in Health Check, try to bringup to secure state */
541 	if (status & SCC_SMN_STATE_HEALTH_CHECK) {
542 		/*
543 		 * Write a simple algorithm to the Algorithm Sequence
544 		 * Checker (ASC)
545 		 */
546 		writel(0xaaaa, scc->base + SCC_SMN_SEQ_START);
547 		writel(0x5555, scc->base + SCC_SMN_SEQ_END);
548 		writel(0x5555, scc->base + SCC_SMN_SEQ_CHECK);
549 
550 		status = readl(scc->base + SCC_SMN_STATUS) &
551 			       SCC_SMN_STATUS_STATE_MASK;
552 	}
553 
554 	switch (status) {
555 	case SCC_SMN_STATE_NON_SECURE:
556 	case SCC_SMN_STATE_SECURE:
557 		state = SCC_STATE_OK;
558 		break;
559 	case SCC_SMN_STATE_FAIL:
560 		state = SCC_STATE_FAILED;
561 		break;
562 	default:
563 		state = SCC_STATE_UNIMPLEMENTED;
564 		break;
565 	}
566 
567 	return state;
568 }
569 
570 static struct mxc_scc_crypto_tmpl scc_ecb_des = {
571 	.alg = {
572 		.cra_name = "ecb(des3_ede)",
573 		.cra_driver_name = "ecb-des3-scc",
574 		.cra_priority = 300,
575 		.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER,
576 		.cra_blocksize = DES3_EDE_BLOCK_SIZE,
577 		.cra_ctxsize = sizeof(struct mxc_scc_ctx),
578 		.cra_alignmask = 0,
579 		.cra_type = &crypto_ablkcipher_type,
580 		.cra_module = THIS_MODULE,
581 		.cra_init = mxc_scc_cra_init,
582 		.cra_u.ablkcipher = {
583 			.min_keysize = DES3_EDE_KEY_SIZE,
584 			.max_keysize = DES3_EDE_KEY_SIZE,
585 			.encrypt = mxc_scc_ecb_des_encrypt,
586 			.decrypt = mxc_scc_ecb_des_decrypt,
587 		}
588 	}
589 };
590 
591 static struct mxc_scc_crypto_tmpl scc_cbc_des = {
592 	.alg = {
593 		.cra_name = "cbc(des3_ede)",
594 		.cra_driver_name = "cbc-des3-scc",
595 		.cra_priority = 300,
596 		.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER,
597 		.cra_blocksize = DES3_EDE_BLOCK_SIZE,
598 		.cra_ctxsize = sizeof(struct mxc_scc_ctx),
599 		.cra_alignmask = 0,
600 		.cra_type = &crypto_ablkcipher_type,
601 		.cra_module = THIS_MODULE,
602 		.cra_init = mxc_scc_cra_init,
603 		.cra_u.ablkcipher = {
604 			.min_keysize = DES3_EDE_KEY_SIZE,
605 			.max_keysize = DES3_EDE_KEY_SIZE,
606 			.encrypt = mxc_scc_cbc_des_encrypt,
607 			.decrypt = mxc_scc_cbc_des_decrypt,
608 		}
609 	}
610 };
611 
612 static struct mxc_scc_crypto_tmpl *scc_crypto_algs[] = {
613 	&scc_ecb_des,
614 	&scc_cbc_des,
615 };
616 
mxc_scc_crypto_register(struct mxc_scc * scc)617 static int mxc_scc_crypto_register(struct mxc_scc *scc)
618 {
619 	int i;
620 	int err = 0;
621 
622 	for (i = 0; i < ARRAY_SIZE(scc_crypto_algs); i++) {
623 		scc_crypto_algs[i]->scc = scc;
624 		err = crypto_register_alg(&scc_crypto_algs[i]->alg);
625 		if (err)
626 			goto err_out;
627 	}
628 
629 	return 0;
630 
631 err_out:
632 	while (--i >= 0)
633 		crypto_unregister_alg(&scc_crypto_algs[i]->alg);
634 
635 	return err;
636 }
637 
mxc_scc_crypto_unregister(void)638 static void mxc_scc_crypto_unregister(void)
639 {
640 	unsigned int i;
641 
642 	for (i = 0; i < ARRAY_SIZE(scc_crypto_algs); i++)
643 		crypto_unregister_alg(&scc_crypto_algs[i]->alg);
644 }
645 
mxc_scc_probe(struct platform_device * pdev)646 static int mxc_scc_probe(struct platform_device *pdev)
647 {
648 	struct device *dev = &pdev->dev;
649 	struct resource *res;
650 	struct mxc_scc *scc;
651 	enum mxc_scc_state state;
652 	int irq;
653 	int ret;
654 	int i;
655 
656 	scc = devm_kzalloc(dev, sizeof(*scc), GFP_KERNEL);
657 	if (!scc)
658 		return -ENOMEM;
659 
660 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
661 	scc->base = devm_ioremap_resource(dev, res);
662 	if (IS_ERR(scc->base))
663 		return PTR_ERR(scc->base);
664 
665 	scc->clk = devm_clk_get(&pdev->dev, "ipg");
666 	if (IS_ERR(scc->clk)) {
667 		dev_err(dev, "Could not get ipg clock\n");
668 		return PTR_ERR(scc->clk);
669 	}
670 
671 	ret = clk_prepare_enable(scc->clk);
672 	if (ret)
673 		return ret;
674 
675 	/* clear error status register */
676 	writel(0x0, scc->base + SCC_SCM_ERROR_STATUS);
677 
678 	/* clear interrupt control registers */
679 	writel(SCC_SCM_INTR_CTRL_CLR_INTR |
680 	       SCC_SCM_INTR_CTRL_MASK_INTR,
681 	       scc->base + SCC_SCM_INTR_CTRL);
682 
683 	writel(SCC_SMN_COMMAND_CLR_INTR |
684 	       SCC_SMN_COMMAND_EN_INTR,
685 	       scc->base + SCC_SMN_COMMAND);
686 
687 	scc->dev = dev;
688 	platform_set_drvdata(pdev, scc);
689 
690 	ret = mxc_scc_get_config(scc);
691 	if (ret)
692 		goto err_out;
693 
694 	state = mxc_scc_get_state(scc);
695 
696 	if (state != SCC_STATE_OK) {
697 		dev_err(dev, "SCC in unusable state %d\n", state);
698 		ret = -EINVAL;
699 		goto err_out;
700 	}
701 
702 	mxc_scc_hw_init(scc);
703 
704 	spin_lock_init(&scc->lock);
705 	/* FIXME: calculate queue from RAM slots */
706 	crypto_init_queue(&scc->queue, 50);
707 
708 	for (i = 0; i < 2; i++) {
709 		irq = platform_get_irq(pdev, i);
710 		if (irq < 0) {
711 			dev_err(dev, "failed to get irq resource: %d\n", irq);
712 			ret = irq;
713 			goto err_out;
714 		}
715 
716 		ret = devm_request_threaded_irq(dev, irq, NULL, mxc_scc_int,
717 						IRQF_ONESHOT, dev_name(dev), scc);
718 		if (ret)
719 			goto err_out;
720 	}
721 
722 	ret = mxc_scc_crypto_register(scc);
723 	if (ret) {
724 		dev_err(dev, "could not register algorithms");
725 		goto err_out;
726 	}
727 
728 	dev_info(dev, "registered successfully.\n");
729 
730 	return 0;
731 
732 err_out:
733 	clk_disable_unprepare(scc->clk);
734 
735 	return ret;
736 }
737 
mxc_scc_remove(struct platform_device * pdev)738 static int mxc_scc_remove(struct platform_device *pdev)
739 {
740 	struct mxc_scc *scc = platform_get_drvdata(pdev);
741 
742 	mxc_scc_crypto_unregister();
743 
744 	clk_disable_unprepare(scc->clk);
745 
746 	return 0;
747 }
748 
749 static const struct of_device_id mxc_scc_dt_ids[] = {
750 	{ .compatible = "fsl,imx25-scc", .data = NULL, },
751 	{ /* sentinel */ }
752 };
753 MODULE_DEVICE_TABLE(of, mxc_scc_dt_ids);
754 
755 static struct platform_driver mxc_scc_driver = {
756 	.probe	= mxc_scc_probe,
757 	.remove	= mxc_scc_remove,
758 	.driver	= {
759 		.name		= "mxc-scc",
760 		.of_match_table	= mxc_scc_dt_ids,
761 	},
762 };
763 
764 module_platform_driver(mxc_scc_driver);
765 MODULE_AUTHOR("Steffen Trumtrar <kernel@pengutronix.de>");
766 MODULE_DESCRIPTION("Freescale i.MX25 SCC Crypto driver");
767 MODULE_LICENSE("GPL v2");
768