1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Greybus Component Authentication Protocol (CAP) Driver.
4  *
5  * Copyright 2016 Google Inc.
6  * Copyright 2016 Linaro Ltd.
7  */
8 
9 #include "greybus.h"
10 
11 #include <linux/cdev.h>
12 #include <linux/fs.h>
13 #include <linux/ioctl.h>
14 #include <linux/uaccess.h>
15 
16 #include "greybus_authentication.h"
17 #include "firmware.h"
18 
19 #define CAP_TIMEOUT_MS		1000
20 
21 /*
22  * Number of minor devices this driver supports.
23  * There will be exactly one required per Interface.
24  */
25 #define NUM_MINORS		U8_MAX
26 
27 struct gb_cap {
28 	struct device		*parent;
29 	struct gb_connection	*connection;
30 	struct kref		kref;
31 	struct list_head	node;
32 	bool			disabled; /* connection getting disabled */
33 
34 	struct mutex		mutex;
35 	struct cdev		cdev;
36 	struct device		*class_device;
37 	dev_t			dev_num;
38 };
39 
40 static struct class *cap_class;
41 static dev_t cap_dev_num;
42 static DEFINE_IDA(cap_minors_map);
43 static LIST_HEAD(cap_list);
44 static DEFINE_MUTEX(list_mutex);
45 
cap_kref_release(struct kref * kref)46 static void cap_kref_release(struct kref *kref)
47 {
48 	struct gb_cap *cap = container_of(kref, struct gb_cap, kref);
49 
50 	kfree(cap);
51 }
52 
53 /*
54  * All users of cap take a reference (from within list_mutex lock), before
55  * they get a pointer to play with. And the structure will be freed only after
56  * the last user has put the reference to it.
57  */
put_cap(struct gb_cap * cap)58 static void put_cap(struct gb_cap *cap)
59 {
60 	kref_put(&cap->kref, cap_kref_release);
61 }
62 
63 /* Caller must call put_cap() after using struct gb_cap */
get_cap(struct cdev * cdev)64 static struct gb_cap *get_cap(struct cdev *cdev)
65 {
66 	struct gb_cap *cap;
67 
68 	mutex_lock(&list_mutex);
69 
70 	list_for_each_entry(cap, &cap_list, node) {
71 		if (&cap->cdev == cdev) {
72 			kref_get(&cap->kref);
73 			goto unlock;
74 		}
75 	}
76 
77 	cap = NULL;
78 
79 unlock:
80 	mutex_unlock(&list_mutex);
81 
82 	return cap;
83 }
84 
cap_get_endpoint_uid(struct gb_cap * cap,u8 * euid)85 static int cap_get_endpoint_uid(struct gb_cap *cap, u8 *euid)
86 {
87 	struct gb_connection *connection = cap->connection;
88 	struct gb_cap_get_endpoint_uid_response response;
89 	int ret;
90 
91 	ret = gb_operation_sync(connection, GB_CAP_TYPE_GET_ENDPOINT_UID, NULL,
92 				0, &response, sizeof(response));
93 	if (ret) {
94 		dev_err(cap->parent, "failed to get endpoint uid (%d)\n", ret);
95 		return ret;
96 	}
97 
98 	memcpy(euid, response.uid, sizeof(response.uid));
99 
100 	return 0;
101 }
102 
cap_get_ims_certificate(struct gb_cap * cap,u32 class,u32 id,u8 * certificate,u32 * size,u8 * result)103 static int cap_get_ims_certificate(struct gb_cap *cap, u32 class, u32 id,
104 				   u8 *certificate, u32 *size, u8 *result)
105 {
106 	struct gb_connection *connection = cap->connection;
107 	struct gb_cap_get_ims_certificate_request *request;
108 	struct gb_cap_get_ims_certificate_response *response;
109 	size_t max_size = gb_operation_get_payload_size_max(connection);
110 	struct gb_operation *op;
111 	int ret;
112 
113 	op = gb_operation_create_flags(connection,
114 				       GB_CAP_TYPE_GET_IMS_CERTIFICATE,
115 				       sizeof(*request), max_size,
116 				       GB_OPERATION_FLAG_SHORT_RESPONSE,
117 				       GFP_KERNEL);
118 	if (!op)
119 		return -ENOMEM;
120 
121 	request = op->request->payload;
122 	request->certificate_class = cpu_to_le32(class);
123 	request->certificate_id = cpu_to_le32(id);
124 
125 	ret = gb_operation_request_send_sync(op);
126 	if (ret) {
127 		dev_err(cap->parent, "failed to get certificate (%d)\n", ret);
128 		goto done;
129 	}
130 
131 	response = op->response->payload;
132 	*result = response->result_code;
133 	*size = op->response->payload_size - sizeof(*response);
134 	memcpy(certificate, response->certificate, *size);
135 
136 done:
137 	gb_operation_put(op);
138 	return ret;
139 }
140 
cap_authenticate(struct gb_cap * cap,u32 auth_type,u8 * uid,u8 * challenge,u8 * result,u8 * auth_response,u32 * signature_size,u8 * signature)141 static int cap_authenticate(struct gb_cap *cap, u32 auth_type, u8 *uid,
142 			    u8 *challenge, u8 *result, u8 *auth_response,
143 			    u32 *signature_size, u8 *signature)
144 {
145 	struct gb_connection *connection = cap->connection;
146 	struct gb_cap_authenticate_request *request;
147 	struct gb_cap_authenticate_response *response;
148 	size_t max_size = gb_operation_get_payload_size_max(connection);
149 	struct gb_operation *op;
150 	int ret;
151 
152 	op = gb_operation_create_flags(connection, GB_CAP_TYPE_AUTHENTICATE,
153 				       sizeof(*request), max_size,
154 				       GB_OPERATION_FLAG_SHORT_RESPONSE,
155 				       GFP_KERNEL);
156 	if (!op)
157 		return -ENOMEM;
158 
159 	request = op->request->payload;
160 	request->auth_type = cpu_to_le32(auth_type);
161 	memcpy(request->uid, uid, sizeof(request->uid));
162 	memcpy(request->challenge, challenge, sizeof(request->challenge));
163 
164 	ret = gb_operation_request_send_sync(op);
165 	if (ret) {
166 		dev_err(cap->parent, "failed to authenticate (%d)\n", ret);
167 		goto done;
168 	}
169 
170 	response = op->response->payload;
171 	*result = response->result_code;
172 	*signature_size = op->response->payload_size - sizeof(*response);
173 	memcpy(auth_response, response->response, sizeof(response->response));
174 	memcpy(signature, response->signature, *signature_size);
175 
176 done:
177 	gb_operation_put(op);
178 	return ret;
179 }
180 
181 /* Char device fops */
182 
cap_open(struct inode * inode,struct file * file)183 static int cap_open(struct inode *inode, struct file *file)
184 {
185 	struct gb_cap *cap = get_cap(inode->i_cdev);
186 
187 	/* cap structure can't get freed until file descriptor is closed */
188 	if (cap) {
189 		file->private_data = cap;
190 		return 0;
191 	}
192 
193 	return -ENODEV;
194 }
195 
cap_release(struct inode * inode,struct file * file)196 static int cap_release(struct inode *inode, struct file *file)
197 {
198 	struct gb_cap *cap = file->private_data;
199 
200 	put_cap(cap);
201 	return 0;
202 }
203 
cap_ioctl(struct gb_cap * cap,unsigned int cmd,void __user * buf)204 static int cap_ioctl(struct gb_cap *cap, unsigned int cmd,
205 		     void __user *buf)
206 {
207 	struct cap_ioc_get_endpoint_uid endpoint_uid;
208 	struct cap_ioc_get_ims_certificate *ims_cert;
209 	struct cap_ioc_authenticate *authenticate;
210 	size_t size;
211 	int ret;
212 
213 	switch (cmd) {
214 	case CAP_IOC_GET_ENDPOINT_UID:
215 		ret = cap_get_endpoint_uid(cap, endpoint_uid.uid);
216 		if (ret)
217 			return ret;
218 
219 		if (copy_to_user(buf, &endpoint_uid, sizeof(endpoint_uid)))
220 			return -EFAULT;
221 
222 		return 0;
223 	case CAP_IOC_GET_IMS_CERTIFICATE:
224 		size = sizeof(*ims_cert);
225 		ims_cert = memdup_user(buf, size);
226 		if (IS_ERR(ims_cert))
227 			return PTR_ERR(ims_cert);
228 
229 		ret = cap_get_ims_certificate(cap, ims_cert->certificate_class,
230 					      ims_cert->certificate_id,
231 					      ims_cert->certificate,
232 					      &ims_cert->cert_size,
233 					      &ims_cert->result_code);
234 		if (!ret && copy_to_user(buf, ims_cert, size))
235 			ret = -EFAULT;
236 		kfree(ims_cert);
237 
238 		return ret;
239 	case CAP_IOC_AUTHENTICATE:
240 		size = sizeof(*authenticate);
241 		authenticate = memdup_user(buf, size);
242 		if (IS_ERR(authenticate))
243 			return PTR_ERR(authenticate);
244 
245 		ret = cap_authenticate(cap, authenticate->auth_type,
246 				       authenticate->uid,
247 				       authenticate->challenge,
248 				       &authenticate->result_code,
249 				       authenticate->response,
250 				       &authenticate->signature_size,
251 				       authenticate->signature);
252 		if (!ret && copy_to_user(buf, authenticate, size))
253 			ret = -EFAULT;
254 		kfree(authenticate);
255 
256 		return ret;
257 	default:
258 		return -ENOTTY;
259 	}
260 }
261 
cap_ioctl_unlocked(struct file * file,unsigned int cmd,unsigned long arg)262 static long cap_ioctl_unlocked(struct file *file, unsigned int cmd,
263 			       unsigned long arg)
264 {
265 	struct gb_cap *cap = file->private_data;
266 	struct gb_bundle *bundle = cap->connection->bundle;
267 	int ret = -ENODEV;
268 
269 	/*
270 	 * Serialize ioctls.
271 	 *
272 	 * We don't want the user to do multiple authentication operations in
273 	 * parallel.
274 	 *
275 	 * This is also used to protect ->disabled, which is used to check if
276 	 * the connection is getting disconnected, so that we don't start any
277 	 * new operations.
278 	 */
279 	mutex_lock(&cap->mutex);
280 	if (!cap->disabled) {
281 		ret = gb_pm_runtime_get_sync(bundle);
282 		if (!ret) {
283 			ret = cap_ioctl(cap, cmd, (void __user *)arg);
284 			gb_pm_runtime_put_autosuspend(bundle);
285 		}
286 	}
287 	mutex_unlock(&cap->mutex);
288 
289 	return ret;
290 }
291 
292 static const struct file_operations cap_fops = {
293 	.owner		= THIS_MODULE,
294 	.open		= cap_open,
295 	.release	= cap_release,
296 	.unlocked_ioctl	= cap_ioctl_unlocked,
297 };
298 
gb_cap_connection_init(struct gb_connection * connection)299 int gb_cap_connection_init(struct gb_connection *connection)
300 {
301 	struct gb_cap *cap;
302 	int ret, minor;
303 
304 	if (!connection)
305 		return 0;
306 
307 	cap = kzalloc(sizeof(*cap), GFP_KERNEL);
308 	if (!cap)
309 		return -ENOMEM;
310 
311 	cap->parent = &connection->bundle->dev;
312 	cap->connection = connection;
313 	mutex_init(&cap->mutex);
314 	gb_connection_set_data(connection, cap);
315 	kref_init(&cap->kref);
316 
317 	mutex_lock(&list_mutex);
318 	list_add(&cap->node, &cap_list);
319 	mutex_unlock(&list_mutex);
320 
321 	ret = gb_connection_enable(connection);
322 	if (ret)
323 		goto err_list_del;
324 
325 	minor = ida_simple_get(&cap_minors_map, 0, NUM_MINORS, GFP_KERNEL);
326 	if (minor < 0) {
327 		ret = minor;
328 		goto err_connection_disable;
329 	}
330 
331 	/* Add a char device to allow userspace to interact with cap */
332 	cap->dev_num = MKDEV(MAJOR(cap_dev_num), minor);
333 	cdev_init(&cap->cdev, &cap_fops);
334 
335 	ret = cdev_add(&cap->cdev, cap->dev_num, 1);
336 	if (ret)
337 		goto err_remove_ida;
338 
339 	/* Add a soft link to the previously added char-dev within the bundle */
340 	cap->class_device = device_create(cap_class, cap->parent, cap->dev_num,
341 					  NULL, "gb-authenticate-%d", minor);
342 	if (IS_ERR(cap->class_device)) {
343 		ret = PTR_ERR(cap->class_device);
344 		goto err_del_cdev;
345 	}
346 
347 	return 0;
348 
349 err_del_cdev:
350 	cdev_del(&cap->cdev);
351 err_remove_ida:
352 	ida_simple_remove(&cap_minors_map, minor);
353 err_connection_disable:
354 	gb_connection_disable(connection);
355 err_list_del:
356 	mutex_lock(&list_mutex);
357 	list_del(&cap->node);
358 	mutex_unlock(&list_mutex);
359 
360 	put_cap(cap);
361 
362 	return ret;
363 }
364 
gb_cap_connection_exit(struct gb_connection * connection)365 void gb_cap_connection_exit(struct gb_connection *connection)
366 {
367 	struct gb_cap *cap;
368 
369 	if (!connection)
370 		return;
371 
372 	cap = gb_connection_get_data(connection);
373 
374 	device_destroy(cap_class, cap->dev_num);
375 	cdev_del(&cap->cdev);
376 	ida_simple_remove(&cap_minors_map, MINOR(cap->dev_num));
377 
378 	/*
379 	 * Disallow any new ioctl operations on the char device and wait for
380 	 * existing ones to finish.
381 	 */
382 	mutex_lock(&cap->mutex);
383 	cap->disabled = true;
384 	mutex_unlock(&cap->mutex);
385 
386 	/* All pending greybus operations should have finished by now */
387 	gb_connection_disable(cap->connection);
388 
389 	/* Disallow new users to get access to the cap structure */
390 	mutex_lock(&list_mutex);
391 	list_del(&cap->node);
392 	mutex_unlock(&list_mutex);
393 
394 	/*
395 	 * All current users of cap would have taken a reference to it by
396 	 * now, we can drop our reference and wait the last user will get
397 	 * cap freed.
398 	 */
399 	put_cap(cap);
400 }
401 
cap_init(void)402 int cap_init(void)
403 {
404 	int ret;
405 
406 	cap_class = class_create(THIS_MODULE, "gb_authenticate");
407 	if (IS_ERR(cap_class))
408 		return PTR_ERR(cap_class);
409 
410 	ret = alloc_chrdev_region(&cap_dev_num, 0, NUM_MINORS,
411 				  "gb_authenticate");
412 	if (ret)
413 		goto err_remove_class;
414 
415 	return 0;
416 
417 err_remove_class:
418 	class_destroy(cap_class);
419 	return ret;
420 }
421 
cap_exit(void)422 void cap_exit(void)
423 {
424 	unregister_chrdev_region(cap_dev_num, NUM_MINORS);
425 	class_destroy(cap_class);
426 	ida_destroy(&cap_minors_map);
427 }
428