1 /*
2  * AMD Cryptographic Coprocessor (CCP) driver
3  *
4  * Copyright (C) 2013,2017 Advanced Micro Devices, Inc.
5  *
6  * Author: Tom Lendacky <thomas.lendacky@amd.com>
7  * Author: Gary R Hook <gary.hook@amd.com>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as
11  * published by the Free Software Foundation.
12  */
13 
14 #include <linux/kernel.h>
15 #include <linux/kthread.h>
16 #include <linux/sched.h>
17 #include <linux/interrupt.h>
18 #include <linux/spinlock.h>
19 #include <linux/spinlock_types.h>
20 #include <linux/types.h>
21 #include <linux/mutex.h>
22 #include <linux/delay.h>
23 #include <linux/hw_random.h>
24 #include <linux/cpu.h>
25 #ifdef CONFIG_X86
26 #include <asm/cpu_device_id.h>
27 #endif
28 #include <linux/ccp.h>
29 
30 #include "ccp-dev.h"
31 
32 struct ccp_tasklet_data {
33 	struct completion completion;
34 	struct ccp_cmd *cmd;
35 };
36 
37 /* Human-readable error strings */
38 static char *ccp_error_codes[] = {
39 	"",
40 	"ERR 01: ILLEGAL_ENGINE",
41 	"ERR 02: ILLEGAL_KEY_ID",
42 	"ERR 03: ILLEGAL_FUNCTION_TYPE",
43 	"ERR 04: ILLEGAL_FUNCTION_MODE",
44 	"ERR 05: ILLEGAL_FUNCTION_ENCRYPT",
45 	"ERR 06: ILLEGAL_FUNCTION_SIZE",
46 	"ERR 07: Zlib_MISSING_INIT_EOM",
47 	"ERR 08: ILLEGAL_FUNCTION_RSVD",
48 	"ERR 09: ILLEGAL_BUFFER_LENGTH",
49 	"ERR 10: VLSB_FAULT",
50 	"ERR 11: ILLEGAL_MEM_ADDR",
51 	"ERR 12: ILLEGAL_MEM_SEL",
52 	"ERR 13: ILLEGAL_CONTEXT_ID",
53 	"ERR 14: ILLEGAL_KEY_ADDR",
54 	"ERR 15: 0xF Reserved",
55 	"ERR 16: Zlib_ILLEGAL_MULTI_QUEUE",
56 	"ERR 17: Zlib_ILLEGAL_JOBID_CHANGE",
57 	"ERR 18: CMD_TIMEOUT",
58 	"ERR 19: IDMA0_AXI_SLVERR",
59 	"ERR 20: IDMA0_AXI_DECERR",
60 	"ERR 21: 0x15 Reserved",
61 	"ERR 22: IDMA1_AXI_SLAVE_FAULT",
62 	"ERR 23: IDMA1_AIXI_DECERR",
63 	"ERR 24: 0x18 Reserved",
64 	"ERR 25: ZLIBVHB_AXI_SLVERR",
65 	"ERR 26: ZLIBVHB_AXI_DECERR",
66 	"ERR 27: 0x1B Reserved",
67 	"ERR 27: ZLIB_UNEXPECTED_EOM",
68 	"ERR 27: ZLIB_EXTRA_DATA",
69 	"ERR 30: ZLIB_BTYPE",
70 	"ERR 31: ZLIB_UNDEFINED_SYMBOL",
71 	"ERR 32: ZLIB_UNDEFINED_DISTANCE_S",
72 	"ERR 33: ZLIB_CODE_LENGTH_SYMBOL",
73 	"ERR 34: ZLIB _VHB_ILLEGAL_FETCH",
74 	"ERR 35: ZLIB_UNCOMPRESSED_LEN",
75 	"ERR 36: ZLIB_LIMIT_REACHED",
76 	"ERR 37: ZLIB_CHECKSUM_MISMATCH0",
77 	"ERR 38: ODMA0_AXI_SLVERR",
78 	"ERR 39: ODMA0_AXI_DECERR",
79 	"ERR 40: 0x28 Reserved",
80 	"ERR 41: ODMA1_AXI_SLVERR",
81 	"ERR 42: ODMA1_AXI_DECERR",
82 	"ERR 43: LSB_PARITY_ERR",
83 };
84 
ccp_log_error(struct ccp_device * d,int e)85 void ccp_log_error(struct ccp_device *d, int e)
86 {
87 	dev_err(d->dev, "CCP error: %s (0x%x)\n", ccp_error_codes[e], e);
88 }
89 
90 /* List of CCPs, CCP count, read-write access lock, and access functions
91  *
92  * Lock structure: get ccp_unit_lock for reading whenever we need to
93  * examine the CCP list. While holding it for reading we can acquire
94  * the RR lock to update the round-robin next-CCP pointer. The unit lock
95  * must be acquired before the RR lock.
96  *
97  * If the unit-lock is acquired for writing, we have total control over
98  * the list, so there's no value in getting the RR lock.
99  */
100 static DEFINE_RWLOCK(ccp_unit_lock);
101 static LIST_HEAD(ccp_units);
102 
103 /* Round-robin counter */
104 static DEFINE_SPINLOCK(ccp_rr_lock);
105 static struct ccp_device *ccp_rr;
106 
107 /**
108  * ccp_add_device - add a CCP device to the list
109  *
110  * @ccp: ccp_device struct pointer
111  *
112  * Put this CCP on the unit list, which makes it available
113  * for use.
114  *
115  * Returns zero if a CCP device is present, -ENODEV otherwise.
116  */
ccp_add_device(struct ccp_device * ccp)117 void ccp_add_device(struct ccp_device *ccp)
118 {
119 	unsigned long flags;
120 
121 	write_lock_irqsave(&ccp_unit_lock, flags);
122 	list_add_tail(&ccp->entry, &ccp_units);
123 	if (!ccp_rr)
124 		/* We already have the list lock (we're first) so this
125 		 * pointer can't change on us. Set its initial value.
126 		 */
127 		ccp_rr = ccp;
128 	write_unlock_irqrestore(&ccp_unit_lock, flags);
129 }
130 
131 /**
132  * ccp_del_device - remove a CCP device from the list
133  *
134  * @ccp: ccp_device struct pointer
135  *
136  * Remove this unit from the list of devices. If the next device
137  * up for use is this one, adjust the pointer. If this is the last
138  * device, NULL the pointer.
139  */
ccp_del_device(struct ccp_device * ccp)140 void ccp_del_device(struct ccp_device *ccp)
141 {
142 	unsigned long flags;
143 
144 	write_lock_irqsave(&ccp_unit_lock, flags);
145 	if (ccp_rr == ccp) {
146 		/* ccp_unit_lock is read/write; any read access
147 		 * will be suspended while we make changes to the
148 		 * list and RR pointer.
149 		 */
150 		if (list_is_last(&ccp_rr->entry, &ccp_units))
151 			ccp_rr = list_first_entry(&ccp_units, struct ccp_device,
152 						  entry);
153 		else
154 			ccp_rr = list_next_entry(ccp_rr, entry);
155 	}
156 	list_del(&ccp->entry);
157 	if (list_empty(&ccp_units))
158 		ccp_rr = NULL;
159 	write_unlock_irqrestore(&ccp_unit_lock, flags);
160 }
161 
162 
163 
ccp_register_rng(struct ccp_device * ccp)164 int ccp_register_rng(struct ccp_device *ccp)
165 {
166 	int ret = 0;
167 
168 	dev_dbg(ccp->dev, "Registering RNG...\n");
169 	/* Register an RNG */
170 	ccp->hwrng.name = ccp->rngname;
171 	ccp->hwrng.read = ccp_trng_read;
172 	ret = hwrng_register(&ccp->hwrng);
173 	if (ret)
174 		dev_err(ccp->dev, "error registering hwrng (%d)\n", ret);
175 
176 	return ret;
177 }
178 
ccp_unregister_rng(struct ccp_device * ccp)179 void ccp_unregister_rng(struct ccp_device *ccp)
180 {
181 	if (ccp->hwrng.name)
182 		hwrng_unregister(&ccp->hwrng);
183 }
184 
ccp_get_device(void)185 static struct ccp_device *ccp_get_device(void)
186 {
187 	unsigned long flags;
188 	struct ccp_device *dp = NULL;
189 
190 	/* We round-robin through the unit list.
191 	 * The (ccp_rr) pointer refers to the next unit to use.
192 	 */
193 	read_lock_irqsave(&ccp_unit_lock, flags);
194 	if (!list_empty(&ccp_units)) {
195 		spin_lock(&ccp_rr_lock);
196 		dp = ccp_rr;
197 		if (list_is_last(&ccp_rr->entry, &ccp_units))
198 			ccp_rr = list_first_entry(&ccp_units, struct ccp_device,
199 						  entry);
200 		else
201 			ccp_rr = list_next_entry(ccp_rr, entry);
202 		spin_unlock(&ccp_rr_lock);
203 	}
204 	read_unlock_irqrestore(&ccp_unit_lock, flags);
205 
206 	return dp;
207 }
208 
209 /**
210  * ccp_present - check if a CCP device is present
211  *
212  * Returns zero if a CCP device is present, -ENODEV otherwise.
213  */
ccp_present(void)214 int ccp_present(void)
215 {
216 	unsigned long flags;
217 	int ret;
218 
219 	read_lock_irqsave(&ccp_unit_lock, flags);
220 	ret = list_empty(&ccp_units);
221 	read_unlock_irqrestore(&ccp_unit_lock, flags);
222 
223 	return ret ? -ENODEV : 0;
224 }
225 EXPORT_SYMBOL_GPL(ccp_present);
226 
227 /**
228  * ccp_version - get the version of the CCP device
229  *
230  * Returns the version from the first unit on the list;
231  * otherwise a zero if no CCP device is present
232  */
ccp_version(void)233 unsigned int ccp_version(void)
234 {
235 	struct ccp_device *dp;
236 	unsigned long flags;
237 	int ret = 0;
238 
239 	read_lock_irqsave(&ccp_unit_lock, flags);
240 	if (!list_empty(&ccp_units)) {
241 		dp = list_first_entry(&ccp_units, struct ccp_device, entry);
242 		ret = dp->vdata->version;
243 	}
244 	read_unlock_irqrestore(&ccp_unit_lock, flags);
245 
246 	return ret;
247 }
248 EXPORT_SYMBOL_GPL(ccp_version);
249 
250 /**
251  * ccp_enqueue_cmd - queue an operation for processing by the CCP
252  *
253  * @cmd: ccp_cmd struct to be processed
254  *
255  * Queue a cmd to be processed by the CCP. If queueing the cmd
256  * would exceed the defined length of the cmd queue the cmd will
257  * only be queued if the CCP_CMD_MAY_BACKLOG flag is set and will
258  * result in a return code of -EBUSY.
259  *
260  * The callback routine specified in the ccp_cmd struct will be
261  * called to notify the caller of completion (if the cmd was not
262  * backlogged) or advancement out of the backlog. If the cmd has
263  * advanced out of the backlog the "err" value of the callback
264  * will be -EINPROGRESS. Any other "err" value during callback is
265  * the result of the operation.
266  *
267  * The cmd has been successfully queued if:
268  *   the return code is -EINPROGRESS or
269  *   the return code is -EBUSY and CCP_CMD_MAY_BACKLOG flag is set
270  */
ccp_enqueue_cmd(struct ccp_cmd * cmd)271 int ccp_enqueue_cmd(struct ccp_cmd *cmd)
272 {
273 	struct ccp_device *ccp;
274 	unsigned long flags;
275 	unsigned int i;
276 	int ret;
277 
278 	/* Some commands might need to be sent to a specific device */
279 	ccp = cmd->ccp ? cmd->ccp : ccp_get_device();
280 
281 	if (!ccp)
282 		return -ENODEV;
283 
284 	/* Caller must supply a callback routine */
285 	if (!cmd->callback)
286 		return -EINVAL;
287 
288 	cmd->ccp = ccp;
289 
290 	spin_lock_irqsave(&ccp->cmd_lock, flags);
291 
292 	i = ccp->cmd_q_count;
293 
294 	if (ccp->cmd_count >= MAX_CMD_QLEN) {
295 		if (cmd->flags & CCP_CMD_MAY_BACKLOG) {
296 			ret = -EBUSY;
297 			list_add_tail(&cmd->entry, &ccp->backlog);
298 		} else {
299 			ret = -ENOSPC;
300 		}
301 	} else {
302 		ret = -EINPROGRESS;
303 		ccp->cmd_count++;
304 		list_add_tail(&cmd->entry, &ccp->cmd);
305 
306 		/* Find an idle queue */
307 		if (!ccp->suspending) {
308 			for (i = 0; i < ccp->cmd_q_count; i++) {
309 				if (ccp->cmd_q[i].active)
310 					continue;
311 
312 				break;
313 			}
314 		}
315 	}
316 
317 	spin_unlock_irqrestore(&ccp->cmd_lock, flags);
318 
319 	/* If we found an idle queue, wake it up */
320 	if (i < ccp->cmd_q_count)
321 		wake_up_process(ccp->cmd_q[i].kthread);
322 
323 	return ret;
324 }
325 EXPORT_SYMBOL_GPL(ccp_enqueue_cmd);
326 
ccp_do_cmd_backlog(struct work_struct * work)327 static void ccp_do_cmd_backlog(struct work_struct *work)
328 {
329 	struct ccp_cmd *cmd = container_of(work, struct ccp_cmd, work);
330 	struct ccp_device *ccp = cmd->ccp;
331 	unsigned long flags;
332 	unsigned int i;
333 
334 	cmd->callback(cmd->data, -EINPROGRESS);
335 
336 	spin_lock_irqsave(&ccp->cmd_lock, flags);
337 
338 	ccp->cmd_count++;
339 	list_add_tail(&cmd->entry, &ccp->cmd);
340 
341 	/* Find an idle queue */
342 	for (i = 0; i < ccp->cmd_q_count; i++) {
343 		if (ccp->cmd_q[i].active)
344 			continue;
345 
346 		break;
347 	}
348 
349 	spin_unlock_irqrestore(&ccp->cmd_lock, flags);
350 
351 	/* If we found an idle queue, wake it up */
352 	if (i < ccp->cmd_q_count)
353 		wake_up_process(ccp->cmd_q[i].kthread);
354 }
355 
ccp_dequeue_cmd(struct ccp_cmd_queue * cmd_q)356 static struct ccp_cmd *ccp_dequeue_cmd(struct ccp_cmd_queue *cmd_q)
357 {
358 	struct ccp_device *ccp = cmd_q->ccp;
359 	struct ccp_cmd *cmd = NULL;
360 	struct ccp_cmd *backlog = NULL;
361 	unsigned long flags;
362 
363 	spin_lock_irqsave(&ccp->cmd_lock, flags);
364 
365 	cmd_q->active = 0;
366 
367 	if (ccp->suspending) {
368 		cmd_q->suspended = 1;
369 
370 		spin_unlock_irqrestore(&ccp->cmd_lock, flags);
371 		wake_up_interruptible(&ccp->suspend_queue);
372 
373 		return NULL;
374 	}
375 
376 	if (ccp->cmd_count) {
377 		cmd_q->active = 1;
378 
379 		cmd = list_first_entry(&ccp->cmd, struct ccp_cmd, entry);
380 		list_del(&cmd->entry);
381 
382 		ccp->cmd_count--;
383 	}
384 
385 	if (!list_empty(&ccp->backlog)) {
386 		backlog = list_first_entry(&ccp->backlog, struct ccp_cmd,
387 					   entry);
388 		list_del(&backlog->entry);
389 	}
390 
391 	spin_unlock_irqrestore(&ccp->cmd_lock, flags);
392 
393 	if (backlog) {
394 		INIT_WORK(&backlog->work, ccp_do_cmd_backlog);
395 		schedule_work(&backlog->work);
396 	}
397 
398 	return cmd;
399 }
400 
ccp_do_cmd_complete(unsigned long data)401 static void ccp_do_cmd_complete(unsigned long data)
402 {
403 	struct ccp_tasklet_data *tdata = (struct ccp_tasklet_data *)data;
404 	struct ccp_cmd *cmd = tdata->cmd;
405 
406 	cmd->callback(cmd->data, cmd->ret);
407 
408 	complete(&tdata->completion);
409 }
410 
411 /**
412  * ccp_cmd_queue_thread - create a kernel thread to manage a CCP queue
413  *
414  * @data: thread-specific data
415  */
ccp_cmd_queue_thread(void * data)416 int ccp_cmd_queue_thread(void *data)
417 {
418 	struct ccp_cmd_queue *cmd_q = (struct ccp_cmd_queue *)data;
419 	struct ccp_cmd *cmd;
420 	struct ccp_tasklet_data tdata;
421 	struct tasklet_struct tasklet;
422 
423 	tasklet_init(&tasklet, ccp_do_cmd_complete, (unsigned long)&tdata);
424 
425 	set_current_state(TASK_INTERRUPTIBLE);
426 	while (!kthread_should_stop()) {
427 		schedule();
428 
429 		set_current_state(TASK_INTERRUPTIBLE);
430 
431 		cmd = ccp_dequeue_cmd(cmd_q);
432 		if (!cmd)
433 			continue;
434 
435 		__set_current_state(TASK_RUNNING);
436 
437 		/* Execute the command */
438 		cmd->ret = ccp_run_cmd(cmd_q, cmd);
439 
440 		/* Schedule the completion callback */
441 		tdata.cmd = cmd;
442 		init_completion(&tdata.completion);
443 		tasklet_schedule(&tasklet);
444 		wait_for_completion(&tdata.completion);
445 	}
446 
447 	__set_current_state(TASK_RUNNING);
448 
449 	return 0;
450 }
451 
452 /**
453  * ccp_alloc_struct - allocate and initialize the ccp_device struct
454  *
455  * @dev: device struct of the CCP
456  */
ccp_alloc_struct(struct sp_device * sp)457 struct ccp_device *ccp_alloc_struct(struct sp_device *sp)
458 {
459 	struct device *dev = sp->dev;
460 	struct ccp_device *ccp;
461 
462 	ccp = devm_kzalloc(dev, sizeof(*ccp), GFP_KERNEL);
463 	if (!ccp)
464 		return NULL;
465 	ccp->dev = dev;
466 	ccp->sp = sp;
467 	ccp->axcache = sp->axcache;
468 
469 	INIT_LIST_HEAD(&ccp->cmd);
470 	INIT_LIST_HEAD(&ccp->backlog);
471 
472 	spin_lock_init(&ccp->cmd_lock);
473 	mutex_init(&ccp->req_mutex);
474 	mutex_init(&ccp->sb_mutex);
475 	ccp->sb_count = KSB_COUNT;
476 	ccp->sb_start = 0;
477 
478 	/* Initialize the wait queues */
479 	init_waitqueue_head(&ccp->sb_queue);
480 	init_waitqueue_head(&ccp->suspend_queue);
481 
482 	snprintf(ccp->name, MAX_CCP_NAME_LEN, "ccp-%u", sp->ord);
483 	snprintf(ccp->rngname, MAX_CCP_NAME_LEN, "ccp-%u-rng", sp->ord);
484 
485 	return ccp;
486 }
487 
ccp_trng_read(struct hwrng * rng,void * data,size_t max,bool wait)488 int ccp_trng_read(struct hwrng *rng, void *data, size_t max, bool wait)
489 {
490 	struct ccp_device *ccp = container_of(rng, struct ccp_device, hwrng);
491 	u32 trng_value;
492 	int len = min_t(int, sizeof(trng_value), max);
493 
494 	/* Locking is provided by the caller so we can update device
495 	 * hwrng-related fields safely
496 	 */
497 	trng_value = ioread32(ccp->io_regs + TRNG_OUT_REG);
498 	if (!trng_value) {
499 		/* Zero is returned if not data is available or if a
500 		 * bad-entropy error is present. Assume an error if
501 		 * we exceed TRNG_RETRIES reads of zero.
502 		 */
503 		if (ccp->hwrng_retries++ > TRNG_RETRIES)
504 			return -EIO;
505 
506 		return 0;
507 	}
508 
509 	/* Reset the counter and save the rng value */
510 	ccp->hwrng_retries = 0;
511 	memcpy(data, &trng_value, len);
512 
513 	return len;
514 }
515 
516 #ifdef CONFIG_PM
ccp_queues_suspended(struct ccp_device * ccp)517 bool ccp_queues_suspended(struct ccp_device *ccp)
518 {
519 	unsigned int suspended = 0;
520 	unsigned long flags;
521 	unsigned int i;
522 
523 	spin_lock_irqsave(&ccp->cmd_lock, flags);
524 
525 	for (i = 0; i < ccp->cmd_q_count; i++)
526 		if (ccp->cmd_q[i].suspended)
527 			suspended++;
528 
529 	spin_unlock_irqrestore(&ccp->cmd_lock, flags);
530 
531 	return ccp->cmd_q_count == suspended;
532 }
533 
ccp_dev_suspend(struct sp_device * sp,pm_message_t state)534 int ccp_dev_suspend(struct sp_device *sp, pm_message_t state)
535 {
536 	struct ccp_device *ccp = sp->ccp_data;
537 	unsigned long flags;
538 	unsigned int i;
539 
540 	spin_lock_irqsave(&ccp->cmd_lock, flags);
541 
542 	ccp->suspending = 1;
543 
544 	/* Wake all the queue kthreads to prepare for suspend */
545 	for (i = 0; i < ccp->cmd_q_count; i++)
546 		wake_up_process(ccp->cmd_q[i].kthread);
547 
548 	spin_unlock_irqrestore(&ccp->cmd_lock, flags);
549 
550 	/* Wait for all queue kthreads to say they're done */
551 	while (!ccp_queues_suspended(ccp))
552 		wait_event_interruptible(ccp->suspend_queue,
553 					 ccp_queues_suspended(ccp));
554 
555 	return 0;
556 }
557 
ccp_dev_resume(struct sp_device * sp)558 int ccp_dev_resume(struct sp_device *sp)
559 {
560 	struct ccp_device *ccp = sp->ccp_data;
561 	unsigned long flags;
562 	unsigned int i;
563 
564 	spin_lock_irqsave(&ccp->cmd_lock, flags);
565 
566 	ccp->suspending = 0;
567 
568 	/* Wake up all the kthreads */
569 	for (i = 0; i < ccp->cmd_q_count; i++) {
570 		ccp->cmd_q[i].suspended = 0;
571 		wake_up_process(ccp->cmd_q[i].kthread);
572 	}
573 
574 	spin_unlock_irqrestore(&ccp->cmd_lock, flags);
575 
576 	return 0;
577 }
578 #endif
579 
ccp_dev_init(struct sp_device * sp)580 int ccp_dev_init(struct sp_device *sp)
581 {
582 	struct device *dev = sp->dev;
583 	struct ccp_device *ccp;
584 	int ret;
585 
586 	ret = -ENOMEM;
587 	ccp = ccp_alloc_struct(sp);
588 	if (!ccp)
589 		goto e_err;
590 	sp->ccp_data = ccp;
591 
592 	ccp->vdata = (struct ccp_vdata *)sp->dev_vdata->ccp_vdata;
593 	if (!ccp->vdata || !ccp->vdata->version) {
594 		ret = -ENODEV;
595 		dev_err(dev, "missing driver data\n");
596 		goto e_err;
597 	}
598 
599 	ccp->use_tasklet = sp->use_tasklet;
600 
601 	ccp->io_regs = sp->io_map + ccp->vdata->offset;
602 	if (ccp->vdata->setup)
603 		ccp->vdata->setup(ccp);
604 
605 	ret = ccp->vdata->perform->init(ccp);
606 	if (ret)
607 		goto e_err;
608 
609 	dev_notice(dev, "ccp enabled\n");
610 
611 	return 0;
612 
613 e_err:
614 	sp->ccp_data = NULL;
615 
616 	dev_notice(dev, "ccp initialization failed\n");
617 
618 	return ret;
619 }
620 
ccp_dev_destroy(struct sp_device * sp)621 void ccp_dev_destroy(struct sp_device *sp)
622 {
623 	struct ccp_device *ccp = sp->ccp_data;
624 
625 	if (!ccp)
626 		return;
627 
628 	ccp->vdata->perform->destroy(ccp);
629 }
630