1 /*
2 * AMD Secure Processor driver
3 *
4 * Copyright (C) 2017 Advanced Micro Devices, Inc.
5 *
6 * Author: Tom Lendacky <thomas.lendacky@amd.com>
7 * Author: Gary R Hook <gary.hook@amd.com>
8 * Author: Brijesh Singh <brijesh.singh@amd.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
14
15 #include <linux/module.h>
16 #include <linux/kernel.h>
17 #include <linux/kthread.h>
18 #include <linux/sched.h>
19 #include <linux/interrupt.h>
20 #include <linux/spinlock.h>
21 #include <linux/spinlock_types.h>
22 #include <linux/types.h>
23 #include <linux/ccp.h>
24
25 #include "ccp-dev.h"
26 #include "sp-dev.h"
27
28 MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>");
29 MODULE_AUTHOR("Gary R Hook <gary.hook@amd.com>");
30 MODULE_LICENSE("GPL");
31 MODULE_VERSION("1.1.0");
32 MODULE_DESCRIPTION("AMD Secure Processor driver");
33
34 /* List of SPs, SP count, read-write access lock, and access functions
35 *
36 * Lock structure: get sp_unit_lock for reading whenever we need to
37 * examine the SP list.
38 */
39 static DEFINE_RWLOCK(sp_unit_lock);
40 static LIST_HEAD(sp_units);
41
42 /* Ever-increasing value to produce unique unit numbers */
43 static atomic_t sp_ordinal;
44
sp_add_device(struct sp_device * sp)45 static void sp_add_device(struct sp_device *sp)
46 {
47 unsigned long flags;
48
49 write_lock_irqsave(&sp_unit_lock, flags);
50
51 list_add_tail(&sp->entry, &sp_units);
52
53 write_unlock_irqrestore(&sp_unit_lock, flags);
54 }
55
sp_del_device(struct sp_device * sp)56 static void sp_del_device(struct sp_device *sp)
57 {
58 unsigned long flags;
59
60 write_lock_irqsave(&sp_unit_lock, flags);
61
62 list_del(&sp->entry);
63
64 write_unlock_irqrestore(&sp_unit_lock, flags);
65 }
66
sp_irq_handler(int irq,void * data)67 static irqreturn_t sp_irq_handler(int irq, void *data)
68 {
69 struct sp_device *sp = data;
70
71 if (sp->ccp_irq_handler)
72 sp->ccp_irq_handler(irq, sp->ccp_irq_data);
73
74 if (sp->psp_irq_handler)
75 sp->psp_irq_handler(irq, sp->psp_irq_data);
76
77 return IRQ_HANDLED;
78 }
79
sp_request_ccp_irq(struct sp_device * sp,irq_handler_t handler,const char * name,void * data)80 int sp_request_ccp_irq(struct sp_device *sp, irq_handler_t handler,
81 const char *name, void *data)
82 {
83 int ret;
84
85 if ((sp->psp_irq == sp->ccp_irq) && sp->dev_vdata->psp_vdata) {
86 /* Need a common routine to manage all interrupts */
87 sp->ccp_irq_data = data;
88 sp->ccp_irq_handler = handler;
89
90 if (!sp->irq_registered) {
91 ret = request_irq(sp->ccp_irq, sp_irq_handler, 0,
92 sp->name, sp);
93 if (ret)
94 return ret;
95
96 sp->irq_registered = true;
97 }
98 } else {
99 /* Each sub-device can manage it's own interrupt */
100 ret = request_irq(sp->ccp_irq, handler, 0, name, data);
101 if (ret)
102 return ret;
103 }
104
105 return 0;
106 }
107
sp_request_psp_irq(struct sp_device * sp,irq_handler_t handler,const char * name,void * data)108 int sp_request_psp_irq(struct sp_device *sp, irq_handler_t handler,
109 const char *name, void *data)
110 {
111 int ret;
112
113 if ((sp->psp_irq == sp->ccp_irq) && sp->dev_vdata->ccp_vdata) {
114 /* Need a common routine to manage all interrupts */
115 sp->psp_irq_data = data;
116 sp->psp_irq_handler = handler;
117
118 if (!sp->irq_registered) {
119 ret = request_irq(sp->psp_irq, sp_irq_handler, 0,
120 sp->name, sp);
121 if (ret)
122 return ret;
123
124 sp->irq_registered = true;
125 }
126 } else {
127 /* Each sub-device can manage it's own interrupt */
128 ret = request_irq(sp->psp_irq, handler, 0, name, data);
129 if (ret)
130 return ret;
131 }
132
133 return 0;
134 }
135
sp_free_ccp_irq(struct sp_device * sp,void * data)136 void sp_free_ccp_irq(struct sp_device *sp, void *data)
137 {
138 if ((sp->psp_irq == sp->ccp_irq) && sp->dev_vdata->psp_vdata) {
139 /* Using common routine to manage all interrupts */
140 if (!sp->psp_irq_handler) {
141 /* Nothing else using it, so free it */
142 free_irq(sp->ccp_irq, sp);
143
144 sp->irq_registered = false;
145 }
146
147 sp->ccp_irq_handler = NULL;
148 sp->ccp_irq_data = NULL;
149 } else {
150 /* Each sub-device can manage it's own interrupt */
151 free_irq(sp->ccp_irq, data);
152 }
153 }
154
sp_free_psp_irq(struct sp_device * sp,void * data)155 void sp_free_psp_irq(struct sp_device *sp, void *data)
156 {
157 if ((sp->psp_irq == sp->ccp_irq) && sp->dev_vdata->ccp_vdata) {
158 /* Using common routine to manage all interrupts */
159 if (!sp->ccp_irq_handler) {
160 /* Nothing else using it, so free it */
161 free_irq(sp->psp_irq, sp);
162
163 sp->irq_registered = false;
164 }
165
166 sp->psp_irq_handler = NULL;
167 sp->psp_irq_data = NULL;
168 } else {
169 /* Each sub-device can manage it's own interrupt */
170 free_irq(sp->psp_irq, data);
171 }
172 }
173
174 /**
175 * sp_alloc_struct - allocate and initialize the sp_device struct
176 *
177 * @dev: device struct of the SP
178 */
sp_alloc_struct(struct device * dev)179 struct sp_device *sp_alloc_struct(struct device *dev)
180 {
181 struct sp_device *sp;
182
183 sp = devm_kzalloc(dev, sizeof(*sp), GFP_KERNEL);
184 if (!sp)
185 return NULL;
186
187 sp->dev = dev;
188 sp->ord = atomic_inc_return(&sp_ordinal);
189 snprintf(sp->name, SP_MAX_NAME_LEN, "sp-%u", sp->ord);
190
191 return sp;
192 }
193
sp_init(struct sp_device * sp)194 int sp_init(struct sp_device *sp)
195 {
196 sp_add_device(sp);
197
198 if (sp->dev_vdata->ccp_vdata)
199 ccp_dev_init(sp);
200
201 if (sp->dev_vdata->psp_vdata)
202 psp_dev_init(sp);
203 return 0;
204 }
205
sp_destroy(struct sp_device * sp)206 void sp_destroy(struct sp_device *sp)
207 {
208 if (sp->dev_vdata->ccp_vdata)
209 ccp_dev_destroy(sp);
210
211 if (sp->dev_vdata->psp_vdata)
212 psp_dev_destroy(sp);
213
214 sp_del_device(sp);
215 }
216
217 #ifdef CONFIG_PM
sp_suspend(struct sp_device * sp,pm_message_t state)218 int sp_suspend(struct sp_device *sp, pm_message_t state)
219 {
220 int ret;
221
222 if (sp->dev_vdata->ccp_vdata) {
223 ret = ccp_dev_suspend(sp, state);
224 if (ret)
225 return ret;
226 }
227
228 return 0;
229 }
230
sp_resume(struct sp_device * sp)231 int sp_resume(struct sp_device *sp)
232 {
233 int ret;
234
235 if (sp->dev_vdata->ccp_vdata) {
236 ret = ccp_dev_resume(sp);
237 if (ret)
238 return ret;
239 }
240
241 return 0;
242 }
243 #endif
244
sp_get_psp_master_device(void)245 struct sp_device *sp_get_psp_master_device(void)
246 {
247 struct sp_device *i, *ret = NULL;
248 unsigned long flags;
249
250 write_lock_irqsave(&sp_unit_lock, flags);
251 if (list_empty(&sp_units))
252 goto unlock;
253
254 list_for_each_entry(i, &sp_units, entry) {
255 if (i->psp_data && i->get_psp_master_device) {
256 ret = i->get_psp_master_device();
257 break;
258 }
259 }
260
261 unlock:
262 write_unlock_irqrestore(&sp_unit_lock, flags);
263 return ret;
264 }
265
sp_mod_init(void)266 static int __init sp_mod_init(void)
267 {
268 #ifdef CONFIG_X86
269 int ret;
270
271 ret = sp_pci_init();
272 if (ret)
273 return ret;
274
275 #ifdef CONFIG_CRYPTO_DEV_SP_PSP
276 psp_pci_init();
277 #endif
278
279 return 0;
280 #endif
281
282 #ifdef CONFIG_ARM64
283 int ret;
284
285 ret = sp_platform_init();
286 if (ret)
287 return ret;
288
289 return 0;
290 #endif
291
292 return -ENODEV;
293 }
294
sp_mod_exit(void)295 static void __exit sp_mod_exit(void)
296 {
297 #ifdef CONFIG_X86
298
299 #ifdef CONFIG_CRYPTO_DEV_SP_PSP
300 psp_pci_exit();
301 #endif
302
303 sp_pci_exit();
304 #endif
305
306 #ifdef CONFIG_ARM64
307 sp_platform_exit();
308 #endif
309 }
310
311 module_init(sp_mod_init);
312 module_exit(sp_mod_exit);
313