1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * This module provides an interface to trigger and test firmware loading.
4  *
5  * It is designed to be used for basic evaluation of the firmware loading
6  * subsystem (for example when validating firmware verification). It lacks
7  * any extra dependencies, and will not normally be loaded by the system
8  * unless explicitly requested by name.
9  */
10 
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 
13 #include <linux/init.h>
14 #include <linux/module.h>
15 #include <linux/printk.h>
16 #include <linux/completion.h>
17 #include <linux/firmware.h>
18 #include <linux/device.h>
19 #include <linux/fs.h>
20 #include <linux/miscdevice.h>
21 #include <linux/sizes.h>
22 #include <linux/slab.h>
23 #include <linux/uaccess.h>
24 #include <linux/delay.h>
25 #include <linux/kthread.h>
26 #include <linux/vmalloc.h>
27 
28 #define TEST_FIRMWARE_NAME	"test-firmware.bin"
29 #define TEST_FIRMWARE_NUM_REQS	4
30 #define TEST_FIRMWARE_BUF_SIZE	SZ_1K
31 
32 static DEFINE_MUTEX(test_fw_mutex);
33 static const struct firmware *test_firmware;
34 
35 struct test_batched_req {
36 	u8 idx;
37 	int rc;
38 	bool sent;
39 	const struct firmware *fw;
40 	const char *name;
41 	struct completion completion;
42 	struct task_struct *task;
43 	struct device *dev;
44 };
45 
46 /**
47  * test_config - represents configuration for the test for different triggers
48  *
49  * @name: the name of the firmware file to look for
50  * @into_buf: when the into_buf is used if this is true
51  *	request_firmware_into_buf() will be used instead.
52  * @sync_direct: when the sync trigger is used if this is true
53  *	request_firmware_direct() will be used instead.
54  * @send_uevent: whether or not to send a uevent for async requests
55  * @num_requests: number of requests to try per test case. This is trigger
56  *	specific.
57  * @reqs: stores all requests information
58  * @read_fw_idx: index of thread from which we want to read firmware results
59  *	from through the read_fw trigger.
60  * @test_result: a test may use this to collect the result from the call
61  *	of the request_firmware*() calls used in their tests. In order of
62  *	priority we always keep first any setup error. If no setup errors were
63  *	found then we move on to the first error encountered while running the
64  *	API. Note that for async calls this typically will be a successful
65  *	result (0) unless of course you've used bogus parameters, or the system
66  *	is out of memory.  In the async case the callback is expected to do a
67  *	bit more homework to figure out what happened, unfortunately the only
68  *	information passed today on error is the fact that no firmware was
69  *	found so we can only assume -ENOENT on async calls if the firmware is
70  *	NULL.
71  *
72  *	Errors you can expect:
73  *
74  *	API specific:
75  *
76  *	0:		success for sync, for async it means request was sent
77  *	-EINVAL:	invalid parameters or request
78  *	-ENOENT:	files not found
79  *
80  *	System environment:
81  *
82  *	-ENOMEM:	memory pressure on system
83  *	-ENODEV:	out of number of devices to test
84  *	-EINVAL:	an unexpected error has occurred
85  * @req_firmware: if @sync_direct is true this is set to
86  *	request_firmware_direct(), otherwise request_firmware()
87  */
88 struct test_config {
89 	char *name;
90 	bool into_buf;
91 	bool sync_direct;
92 	bool send_uevent;
93 	u8 num_requests;
94 	u8 read_fw_idx;
95 
96 	/*
97 	 * These below don't belong her but we'll move them once we create
98 	 * a struct fw_test_device and stuff the misc_dev under there later.
99 	 */
100 	struct test_batched_req *reqs;
101 	int test_result;
102 	int (*req_firmware)(const struct firmware **fw, const char *name,
103 			    struct device *device);
104 };
105 
106 static struct test_config *test_fw_config;
107 
test_fw_misc_read(struct file * f,char __user * buf,size_t size,loff_t * offset)108 static ssize_t test_fw_misc_read(struct file *f, char __user *buf,
109 				 size_t size, loff_t *offset)
110 {
111 	ssize_t rc = 0;
112 
113 	mutex_lock(&test_fw_mutex);
114 	if (test_firmware)
115 		rc = simple_read_from_buffer(buf, size, offset,
116 					     test_firmware->data,
117 					     test_firmware->size);
118 	mutex_unlock(&test_fw_mutex);
119 	return rc;
120 }
121 
122 static const struct file_operations test_fw_fops = {
123 	.owner          = THIS_MODULE,
124 	.read           = test_fw_misc_read,
125 };
126 
__test_release_all_firmware(void)127 static void __test_release_all_firmware(void)
128 {
129 	struct test_batched_req *req;
130 	u8 i;
131 
132 	if (!test_fw_config->reqs)
133 		return;
134 
135 	for (i = 0; i < test_fw_config->num_requests; i++) {
136 		req = &test_fw_config->reqs[i];
137 		if (req->fw)
138 			release_firmware(req->fw);
139 	}
140 
141 	vfree(test_fw_config->reqs);
142 	test_fw_config->reqs = NULL;
143 }
144 
test_release_all_firmware(void)145 static void test_release_all_firmware(void)
146 {
147 	mutex_lock(&test_fw_mutex);
148 	__test_release_all_firmware();
149 	mutex_unlock(&test_fw_mutex);
150 }
151 
152 
__test_firmware_config_free(void)153 static void __test_firmware_config_free(void)
154 {
155 	__test_release_all_firmware();
156 	kfree_const(test_fw_config->name);
157 	test_fw_config->name = NULL;
158 }
159 
160 /*
161  * XXX: move to kstrncpy() once merged.
162  *
163  * Users should use kfree_const() when freeing these.
164  */
__kstrncpy(char ** dst,const char * name,size_t count,gfp_t gfp)165 static int __kstrncpy(char **dst, const char *name, size_t count, gfp_t gfp)
166 {
167 	*dst = kstrndup(name, count, gfp);
168 	if (!*dst)
169 		return -ENOSPC;
170 	return count;
171 }
172 
__test_firmware_config_init(void)173 static int __test_firmware_config_init(void)
174 {
175 	int ret;
176 
177 	ret = __kstrncpy(&test_fw_config->name, TEST_FIRMWARE_NAME,
178 			 strlen(TEST_FIRMWARE_NAME), GFP_KERNEL);
179 	if (ret < 0)
180 		goto out;
181 
182 	test_fw_config->num_requests = TEST_FIRMWARE_NUM_REQS;
183 	test_fw_config->send_uevent = true;
184 	test_fw_config->into_buf = false;
185 	test_fw_config->sync_direct = false;
186 	test_fw_config->req_firmware = request_firmware;
187 	test_fw_config->test_result = 0;
188 	test_fw_config->reqs = NULL;
189 
190 	return 0;
191 
192 out:
193 	__test_firmware_config_free();
194 	return ret;
195 }
196 
reset_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)197 static ssize_t reset_store(struct device *dev,
198 			   struct device_attribute *attr,
199 			   const char *buf, size_t count)
200 {
201 	int ret;
202 
203 	mutex_lock(&test_fw_mutex);
204 
205 	__test_firmware_config_free();
206 
207 	ret = __test_firmware_config_init();
208 	if (ret < 0) {
209 		ret = -ENOMEM;
210 		pr_err("could not alloc settings for config trigger: %d\n",
211 		       ret);
212 		goto out;
213 	}
214 
215 	pr_info("reset\n");
216 	ret = count;
217 
218 out:
219 	mutex_unlock(&test_fw_mutex);
220 
221 	return ret;
222 }
223 static DEVICE_ATTR_WO(reset);
224 
config_show(struct device * dev,struct device_attribute * attr,char * buf)225 static ssize_t config_show(struct device *dev,
226 			   struct device_attribute *attr,
227 			   char *buf)
228 {
229 	int len = 0;
230 
231 	mutex_lock(&test_fw_mutex);
232 
233 	len += scnprintf(buf, PAGE_SIZE - len,
234 			"Custom trigger configuration for: %s\n",
235 			dev_name(dev));
236 
237 	if (test_fw_config->name)
238 		len += scnprintf(buf+len, PAGE_SIZE - len,
239 				"name:\t%s\n",
240 				test_fw_config->name);
241 	else
242 		len += scnprintf(buf+len, PAGE_SIZE - len,
243 				"name:\tEMTPY\n");
244 
245 	len += scnprintf(buf+len, PAGE_SIZE - len,
246 			"num_requests:\t%u\n", test_fw_config->num_requests);
247 
248 	len += scnprintf(buf+len, PAGE_SIZE - len,
249 			"send_uevent:\t\t%s\n",
250 			test_fw_config->send_uevent ?
251 			"FW_ACTION_HOTPLUG" :
252 			"FW_ACTION_NOHOTPLUG");
253 	len += scnprintf(buf+len, PAGE_SIZE - len,
254 			"into_buf:\t\t%s\n",
255 			test_fw_config->into_buf ? "true" : "false");
256 	len += scnprintf(buf+len, PAGE_SIZE - len,
257 			"sync_direct:\t\t%s\n",
258 			test_fw_config->sync_direct ? "true" : "false");
259 	len += scnprintf(buf+len, PAGE_SIZE - len,
260 			"read_fw_idx:\t%u\n", test_fw_config->read_fw_idx);
261 
262 	mutex_unlock(&test_fw_mutex);
263 
264 	return len;
265 }
266 static DEVICE_ATTR_RO(config);
267 
config_name_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)268 static ssize_t config_name_store(struct device *dev,
269 				 struct device_attribute *attr,
270 				 const char *buf, size_t count)
271 {
272 	int ret;
273 
274 	mutex_lock(&test_fw_mutex);
275 	kfree_const(test_fw_config->name);
276 	ret = __kstrncpy(&test_fw_config->name, buf, count, GFP_KERNEL);
277 	mutex_unlock(&test_fw_mutex);
278 
279 	return ret;
280 }
281 
282 /*
283  * As per sysfs_kf_seq_show() the buf is max PAGE_SIZE.
284  */
config_test_show_str(char * dst,char * src)285 static ssize_t config_test_show_str(char *dst,
286 				    char *src)
287 {
288 	int len;
289 
290 	mutex_lock(&test_fw_mutex);
291 	len = snprintf(dst, PAGE_SIZE, "%s\n", src);
292 	mutex_unlock(&test_fw_mutex);
293 
294 	return len;
295 }
296 
test_dev_config_update_bool(const char * buf,size_t size,bool * cfg)297 static int test_dev_config_update_bool(const char *buf, size_t size,
298 				       bool *cfg)
299 {
300 	int ret;
301 
302 	mutex_lock(&test_fw_mutex);
303 	if (strtobool(buf, cfg) < 0)
304 		ret = -EINVAL;
305 	else
306 		ret = size;
307 	mutex_unlock(&test_fw_mutex);
308 
309 	return ret;
310 }
311 
312 static ssize_t
test_dev_config_show_bool(char * buf,bool config)313 test_dev_config_show_bool(char *buf,
314 			  bool config)
315 {
316 	bool val;
317 
318 	mutex_lock(&test_fw_mutex);
319 	val = config;
320 	mutex_unlock(&test_fw_mutex);
321 
322 	return snprintf(buf, PAGE_SIZE, "%d\n", val);
323 }
324 
test_dev_config_show_int(char * buf,int cfg)325 static ssize_t test_dev_config_show_int(char *buf, int cfg)
326 {
327 	int val;
328 
329 	mutex_lock(&test_fw_mutex);
330 	val = cfg;
331 	mutex_unlock(&test_fw_mutex);
332 
333 	return snprintf(buf, PAGE_SIZE, "%d\n", val);
334 }
335 
test_dev_config_update_u8(const char * buf,size_t size,u8 * cfg)336 static int test_dev_config_update_u8(const char *buf, size_t size, u8 *cfg)
337 {
338 	int ret;
339 	long new;
340 
341 	ret = kstrtol(buf, 10, &new);
342 	if (ret)
343 		return ret;
344 
345 	if (new > U8_MAX)
346 		return -EINVAL;
347 
348 	mutex_lock(&test_fw_mutex);
349 	*(u8 *)cfg = new;
350 	mutex_unlock(&test_fw_mutex);
351 
352 	/* Always return full write size even if we didn't consume all */
353 	return size;
354 }
355 
test_dev_config_show_u8(char * buf,u8 cfg)356 static ssize_t test_dev_config_show_u8(char *buf, u8 cfg)
357 {
358 	u8 val;
359 
360 	mutex_lock(&test_fw_mutex);
361 	val = cfg;
362 	mutex_unlock(&test_fw_mutex);
363 
364 	return snprintf(buf, PAGE_SIZE, "%u\n", val);
365 }
366 
config_name_show(struct device * dev,struct device_attribute * attr,char * buf)367 static ssize_t config_name_show(struct device *dev,
368 				struct device_attribute *attr,
369 				char *buf)
370 {
371 	return config_test_show_str(buf, test_fw_config->name);
372 }
373 static DEVICE_ATTR_RW(config_name);
374 
config_num_requests_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)375 static ssize_t config_num_requests_store(struct device *dev,
376 					 struct device_attribute *attr,
377 					 const char *buf, size_t count)
378 {
379 	int rc;
380 
381 	mutex_lock(&test_fw_mutex);
382 	if (test_fw_config->reqs) {
383 		pr_err("Must call release_all_firmware prior to changing config\n");
384 		rc = -EINVAL;
385 		mutex_unlock(&test_fw_mutex);
386 		goto out;
387 	}
388 	mutex_unlock(&test_fw_mutex);
389 
390 	rc = test_dev_config_update_u8(buf, count,
391 				       &test_fw_config->num_requests);
392 
393 out:
394 	return rc;
395 }
396 
config_num_requests_show(struct device * dev,struct device_attribute * attr,char * buf)397 static ssize_t config_num_requests_show(struct device *dev,
398 					struct device_attribute *attr,
399 					char *buf)
400 {
401 	return test_dev_config_show_u8(buf, test_fw_config->num_requests);
402 }
403 static DEVICE_ATTR_RW(config_num_requests);
404 
config_into_buf_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)405 static ssize_t config_into_buf_store(struct device *dev,
406 				     struct device_attribute *attr,
407 				     const char *buf, size_t count)
408 {
409 	return test_dev_config_update_bool(buf,
410 					   count,
411 					   &test_fw_config->into_buf);
412 }
413 
config_into_buf_show(struct device * dev,struct device_attribute * attr,char * buf)414 static ssize_t config_into_buf_show(struct device *dev,
415 				    struct device_attribute *attr,
416 				    char *buf)
417 {
418 	return test_dev_config_show_bool(buf, test_fw_config->into_buf);
419 }
420 static DEVICE_ATTR_RW(config_into_buf);
421 
config_sync_direct_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)422 static ssize_t config_sync_direct_store(struct device *dev,
423 					struct device_attribute *attr,
424 					const char *buf, size_t count)
425 {
426 	int rc = test_dev_config_update_bool(buf, count,
427 					     &test_fw_config->sync_direct);
428 
429 	if (rc == count)
430 		test_fw_config->req_firmware = test_fw_config->sync_direct ?
431 				       request_firmware_direct :
432 				       request_firmware;
433 	return rc;
434 }
435 
config_sync_direct_show(struct device * dev,struct device_attribute * attr,char * buf)436 static ssize_t config_sync_direct_show(struct device *dev,
437 				       struct device_attribute *attr,
438 				       char *buf)
439 {
440 	return test_dev_config_show_bool(buf, test_fw_config->sync_direct);
441 }
442 static DEVICE_ATTR_RW(config_sync_direct);
443 
config_send_uevent_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)444 static ssize_t config_send_uevent_store(struct device *dev,
445 					struct device_attribute *attr,
446 					const char *buf, size_t count)
447 {
448 	return test_dev_config_update_bool(buf, count,
449 					   &test_fw_config->send_uevent);
450 }
451 
config_send_uevent_show(struct device * dev,struct device_attribute * attr,char * buf)452 static ssize_t config_send_uevent_show(struct device *dev,
453 				       struct device_attribute *attr,
454 				       char *buf)
455 {
456 	return test_dev_config_show_bool(buf, test_fw_config->send_uevent);
457 }
458 static DEVICE_ATTR_RW(config_send_uevent);
459 
config_read_fw_idx_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)460 static ssize_t config_read_fw_idx_store(struct device *dev,
461 					struct device_attribute *attr,
462 					const char *buf, size_t count)
463 {
464 	return test_dev_config_update_u8(buf, count,
465 					 &test_fw_config->read_fw_idx);
466 }
467 
config_read_fw_idx_show(struct device * dev,struct device_attribute * attr,char * buf)468 static ssize_t config_read_fw_idx_show(struct device *dev,
469 				       struct device_attribute *attr,
470 				       char *buf)
471 {
472 	return test_dev_config_show_u8(buf, test_fw_config->read_fw_idx);
473 }
474 static DEVICE_ATTR_RW(config_read_fw_idx);
475 
476 
trigger_request_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)477 static ssize_t trigger_request_store(struct device *dev,
478 				     struct device_attribute *attr,
479 				     const char *buf, size_t count)
480 {
481 	int rc;
482 	char *name;
483 
484 	name = kstrndup(buf, count, GFP_KERNEL);
485 	if (!name)
486 		return -ENOSPC;
487 
488 	pr_info("loading '%s'\n", name);
489 
490 	mutex_lock(&test_fw_mutex);
491 	release_firmware(test_firmware);
492 	test_firmware = NULL;
493 	rc = request_firmware(&test_firmware, name, dev);
494 	if (rc) {
495 		pr_info("load of '%s' failed: %d\n", name, rc);
496 		goto out;
497 	}
498 	pr_info("loaded: %zu\n", test_firmware->size);
499 	rc = count;
500 
501 out:
502 	mutex_unlock(&test_fw_mutex);
503 
504 	kfree(name);
505 
506 	return rc;
507 }
508 static DEVICE_ATTR_WO(trigger_request);
509 
510 static DECLARE_COMPLETION(async_fw_done);
511 
trigger_async_request_cb(const struct firmware * fw,void * context)512 static void trigger_async_request_cb(const struct firmware *fw, void *context)
513 {
514 	test_firmware = fw;
515 	complete(&async_fw_done);
516 }
517 
trigger_async_request_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)518 static ssize_t trigger_async_request_store(struct device *dev,
519 					   struct device_attribute *attr,
520 					   const char *buf, size_t count)
521 {
522 	int rc;
523 	char *name;
524 
525 	name = kstrndup(buf, count, GFP_KERNEL);
526 	if (!name)
527 		return -ENOSPC;
528 
529 	pr_info("loading '%s'\n", name);
530 
531 	mutex_lock(&test_fw_mutex);
532 	release_firmware(test_firmware);
533 	test_firmware = NULL;
534 	rc = request_firmware_nowait(THIS_MODULE, 1, name, dev, GFP_KERNEL,
535 				     NULL, trigger_async_request_cb);
536 	if (rc) {
537 		pr_info("async load of '%s' failed: %d\n", name, rc);
538 		kfree(name);
539 		goto out;
540 	}
541 	/* Free 'name' ASAP, to test for race conditions */
542 	kfree(name);
543 
544 	wait_for_completion(&async_fw_done);
545 
546 	if (test_firmware) {
547 		pr_info("loaded: %zu\n", test_firmware->size);
548 		rc = count;
549 	} else {
550 		pr_err("failed to async load firmware\n");
551 		rc = -ENOMEM;
552 	}
553 
554 out:
555 	mutex_unlock(&test_fw_mutex);
556 
557 	return rc;
558 }
559 static DEVICE_ATTR_WO(trigger_async_request);
560 
trigger_custom_fallback_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)561 static ssize_t trigger_custom_fallback_store(struct device *dev,
562 					     struct device_attribute *attr,
563 					     const char *buf, size_t count)
564 {
565 	int rc;
566 	char *name;
567 
568 	name = kstrndup(buf, count, GFP_KERNEL);
569 	if (!name)
570 		return -ENOSPC;
571 
572 	pr_info("loading '%s' using custom fallback mechanism\n", name);
573 
574 	mutex_lock(&test_fw_mutex);
575 	release_firmware(test_firmware);
576 	test_firmware = NULL;
577 	rc = request_firmware_nowait(THIS_MODULE, FW_ACTION_NOHOTPLUG, name,
578 				     dev, GFP_KERNEL, NULL,
579 				     trigger_async_request_cb);
580 	if (rc) {
581 		pr_info("async load of '%s' failed: %d\n", name, rc);
582 		kfree(name);
583 		goto out;
584 	}
585 	/* Free 'name' ASAP, to test for race conditions */
586 	kfree(name);
587 
588 	wait_for_completion(&async_fw_done);
589 
590 	if (test_firmware) {
591 		pr_info("loaded: %zu\n", test_firmware->size);
592 		rc = count;
593 	} else {
594 		pr_err("failed to async load firmware\n");
595 		rc = -ENODEV;
596 	}
597 
598 out:
599 	mutex_unlock(&test_fw_mutex);
600 
601 	return rc;
602 }
603 static DEVICE_ATTR_WO(trigger_custom_fallback);
604 
test_fw_run_batch_request(void * data)605 static int test_fw_run_batch_request(void *data)
606 {
607 	struct test_batched_req *req = data;
608 
609 	if (!req) {
610 		test_fw_config->test_result = -EINVAL;
611 		return -EINVAL;
612 	}
613 
614 	if (test_fw_config->into_buf) {
615 		void *test_buf;
616 
617 		test_buf = kzalloc(TEST_FIRMWARE_BUF_SIZE, GFP_KERNEL);
618 		if (!test_buf)
619 			return -ENOSPC;
620 
621 		req->rc = request_firmware_into_buf(&req->fw,
622 						    req->name,
623 						    req->dev,
624 						    test_buf,
625 						    TEST_FIRMWARE_BUF_SIZE);
626 		if (!req->fw)
627 			kfree(test_buf);
628 	} else {
629 		req->rc = test_fw_config->req_firmware(&req->fw,
630 						       req->name,
631 						       req->dev);
632 	}
633 
634 	if (req->rc) {
635 		pr_info("#%u: batched sync load failed: %d\n",
636 			req->idx, req->rc);
637 		if (!test_fw_config->test_result)
638 			test_fw_config->test_result = req->rc;
639 	} else if (req->fw) {
640 		req->sent = true;
641 		pr_info("#%u: batched sync loaded %zu\n",
642 			req->idx, req->fw->size);
643 	}
644 	complete(&req->completion);
645 
646 	req->task = NULL;
647 
648 	return 0;
649 }
650 
651 /*
652  * We use a kthread as otherwise the kernel serializes all our sync requests
653  * and we would not be able to mimic batched requests on a sync call. Batched
654  * requests on a sync call can for instance happen on a device driver when
655  * multiple cards are used and firmware loading happens outside of probe.
656  */
trigger_batched_requests_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)657 static ssize_t trigger_batched_requests_store(struct device *dev,
658 					      struct device_attribute *attr,
659 					      const char *buf, size_t count)
660 {
661 	struct test_batched_req *req;
662 	int rc;
663 	u8 i;
664 
665 	mutex_lock(&test_fw_mutex);
666 
667 	test_fw_config->reqs =
668 		vzalloc(array3_size(sizeof(struct test_batched_req),
669 				    test_fw_config->num_requests, 2));
670 	if (!test_fw_config->reqs) {
671 		rc = -ENOMEM;
672 		goto out_unlock;
673 	}
674 
675 	pr_info("batched sync firmware loading '%s' %u times\n",
676 		test_fw_config->name, test_fw_config->num_requests);
677 
678 	for (i = 0; i < test_fw_config->num_requests; i++) {
679 		req = &test_fw_config->reqs[i];
680 		req->fw = NULL;
681 		req->idx = i;
682 		req->name = test_fw_config->name;
683 		req->dev = dev;
684 		init_completion(&req->completion);
685 		req->task = kthread_run(test_fw_run_batch_request, req,
686 					     "%s-%u", KBUILD_MODNAME, req->idx);
687 		if (!req->task || IS_ERR(req->task)) {
688 			pr_err("Setting up thread %u failed\n", req->idx);
689 			req->task = NULL;
690 			rc = -ENOMEM;
691 			goto out_bail;
692 		}
693 	}
694 
695 	rc = count;
696 
697 	/*
698 	 * We require an explicit release to enable more time and delay of
699 	 * calling release_firmware() to improve our chances of forcing a
700 	 * batched request. If we instead called release_firmware() right away
701 	 * then we might miss on an opportunity of having a successful firmware
702 	 * request pass on the opportunity to be come a batched request.
703 	 */
704 
705 out_bail:
706 	for (i = 0; i < test_fw_config->num_requests; i++) {
707 		req = &test_fw_config->reqs[i];
708 		if (req->task || req->sent)
709 			wait_for_completion(&req->completion);
710 	}
711 
712 	/* Override any worker error if we had a general setup error */
713 	if (rc < 0)
714 		test_fw_config->test_result = rc;
715 
716 out_unlock:
717 	mutex_unlock(&test_fw_mutex);
718 
719 	return rc;
720 }
721 static DEVICE_ATTR_WO(trigger_batched_requests);
722 
723 /*
724  * We wait for each callback to return with the lock held, no need to lock here
725  */
trigger_batched_cb(const struct firmware * fw,void * context)726 static void trigger_batched_cb(const struct firmware *fw, void *context)
727 {
728 	struct test_batched_req *req = context;
729 
730 	if (!req) {
731 		test_fw_config->test_result = -EINVAL;
732 		return;
733 	}
734 
735 	/* forces *some* batched requests to queue up */
736 	if (!req->idx)
737 		ssleep(2);
738 
739 	req->fw = fw;
740 
741 	/*
742 	 * Unfortunately the firmware API gives us nothing other than a null FW
743 	 * if the firmware was not found on async requests.  Best we can do is
744 	 * just assume -ENOENT. A better API would pass the actual return
745 	 * value to the callback.
746 	 */
747 	if (!fw && !test_fw_config->test_result)
748 		test_fw_config->test_result = -ENOENT;
749 
750 	complete(&req->completion);
751 }
752 
753 static
trigger_batched_requests_async_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)754 ssize_t trigger_batched_requests_async_store(struct device *dev,
755 					     struct device_attribute *attr,
756 					     const char *buf, size_t count)
757 {
758 	struct test_batched_req *req;
759 	bool send_uevent;
760 	int rc;
761 	u8 i;
762 
763 	mutex_lock(&test_fw_mutex);
764 
765 	test_fw_config->reqs =
766 		vzalloc(array3_size(sizeof(struct test_batched_req),
767 				    test_fw_config->num_requests, 2));
768 	if (!test_fw_config->reqs) {
769 		rc = -ENOMEM;
770 		goto out;
771 	}
772 
773 	pr_info("batched loading '%s' custom fallback mechanism %u times\n",
774 		test_fw_config->name, test_fw_config->num_requests);
775 
776 	send_uevent = test_fw_config->send_uevent ? FW_ACTION_HOTPLUG :
777 		FW_ACTION_NOHOTPLUG;
778 
779 	for (i = 0; i < test_fw_config->num_requests; i++) {
780 		req = &test_fw_config->reqs[i];
781 		req->name = test_fw_config->name;
782 		req->fw = NULL;
783 		req->idx = i;
784 		init_completion(&req->completion);
785 		rc = request_firmware_nowait(THIS_MODULE, send_uevent,
786 					     req->name,
787 					     dev, GFP_KERNEL, req,
788 					     trigger_batched_cb);
789 		if (rc) {
790 			pr_info("#%u: batched async load failed setup: %d\n",
791 				i, rc);
792 			req->rc = rc;
793 			goto out_bail;
794 		} else
795 			req->sent = true;
796 	}
797 
798 	rc = count;
799 
800 out_bail:
801 
802 	/*
803 	 * We require an explicit release to enable more time and delay of
804 	 * calling release_firmware() to improve our chances of forcing a
805 	 * batched request. If we instead called release_firmware() right away
806 	 * then we might miss on an opportunity of having a successful firmware
807 	 * request pass on the opportunity to be come a batched request.
808 	 */
809 
810 	for (i = 0; i < test_fw_config->num_requests; i++) {
811 		req = &test_fw_config->reqs[i];
812 		if (req->sent)
813 			wait_for_completion(&req->completion);
814 	}
815 
816 	/* Override any worker error if we had a general setup error */
817 	if (rc < 0)
818 		test_fw_config->test_result = rc;
819 
820 out:
821 	mutex_unlock(&test_fw_mutex);
822 
823 	return rc;
824 }
825 static DEVICE_ATTR_WO(trigger_batched_requests_async);
826 
test_result_show(struct device * dev,struct device_attribute * attr,char * buf)827 static ssize_t test_result_show(struct device *dev,
828 				struct device_attribute *attr,
829 				char *buf)
830 {
831 	return test_dev_config_show_int(buf, test_fw_config->test_result);
832 }
833 static DEVICE_ATTR_RO(test_result);
834 
release_all_firmware_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)835 static ssize_t release_all_firmware_store(struct device *dev,
836 					  struct device_attribute *attr,
837 					  const char *buf, size_t count)
838 {
839 	test_release_all_firmware();
840 	return count;
841 }
842 static DEVICE_ATTR_WO(release_all_firmware);
843 
read_firmware_show(struct device * dev,struct device_attribute * attr,char * buf)844 static ssize_t read_firmware_show(struct device *dev,
845 				  struct device_attribute *attr,
846 				  char *buf)
847 {
848 	struct test_batched_req *req;
849 	u8 idx;
850 	ssize_t rc = 0;
851 
852 	mutex_lock(&test_fw_mutex);
853 
854 	idx = test_fw_config->read_fw_idx;
855 	if (idx >= test_fw_config->num_requests) {
856 		rc = -ERANGE;
857 		goto out;
858 	}
859 
860 	if (!test_fw_config->reqs) {
861 		rc = -EINVAL;
862 		goto out;
863 	}
864 
865 	req = &test_fw_config->reqs[idx];
866 	if (!req->fw) {
867 		pr_err("#%u: failed to async load firmware\n", idx);
868 		rc = -ENOENT;
869 		goto out;
870 	}
871 
872 	pr_info("#%u: loaded %zu\n", idx, req->fw->size);
873 
874 	if (req->fw->size > PAGE_SIZE) {
875 		pr_err("Testing interface must use PAGE_SIZE firmware for now\n");
876 		rc = -EINVAL;
877 		goto out;
878 	}
879 	memcpy(buf, req->fw->data, req->fw->size);
880 
881 	rc = req->fw->size;
882 out:
883 	mutex_unlock(&test_fw_mutex);
884 
885 	return rc;
886 }
887 static DEVICE_ATTR_RO(read_firmware);
888 
889 #define TEST_FW_DEV_ATTR(name)          &dev_attr_##name.attr
890 
891 static struct attribute *test_dev_attrs[] = {
892 	TEST_FW_DEV_ATTR(reset),
893 
894 	TEST_FW_DEV_ATTR(config),
895 	TEST_FW_DEV_ATTR(config_name),
896 	TEST_FW_DEV_ATTR(config_num_requests),
897 	TEST_FW_DEV_ATTR(config_into_buf),
898 	TEST_FW_DEV_ATTR(config_sync_direct),
899 	TEST_FW_DEV_ATTR(config_send_uevent),
900 	TEST_FW_DEV_ATTR(config_read_fw_idx),
901 
902 	/* These don't use the config at all - they could be ported! */
903 	TEST_FW_DEV_ATTR(trigger_request),
904 	TEST_FW_DEV_ATTR(trigger_async_request),
905 	TEST_FW_DEV_ATTR(trigger_custom_fallback),
906 
907 	/* These use the config and can use the test_result */
908 	TEST_FW_DEV_ATTR(trigger_batched_requests),
909 	TEST_FW_DEV_ATTR(trigger_batched_requests_async),
910 
911 	TEST_FW_DEV_ATTR(release_all_firmware),
912 	TEST_FW_DEV_ATTR(test_result),
913 	TEST_FW_DEV_ATTR(read_firmware),
914 	NULL,
915 };
916 
917 ATTRIBUTE_GROUPS(test_dev);
918 
919 static struct miscdevice test_fw_misc_device = {
920 	.minor          = MISC_DYNAMIC_MINOR,
921 	.name           = "test_firmware",
922 	.fops           = &test_fw_fops,
923 	.groups 	= test_dev_groups,
924 };
925 
test_firmware_init(void)926 static int __init test_firmware_init(void)
927 {
928 	int rc;
929 
930 	test_fw_config = kzalloc(sizeof(struct test_config), GFP_KERNEL);
931 	if (!test_fw_config)
932 		return -ENOMEM;
933 
934 	rc = __test_firmware_config_init();
935 	if (rc) {
936 		kfree(test_fw_config);
937 		pr_err("could not init firmware test config: %d\n", rc);
938 		return rc;
939 	}
940 
941 	rc = misc_register(&test_fw_misc_device);
942 	if (rc) {
943 		kfree(test_fw_config);
944 		pr_err("could not register misc device: %d\n", rc);
945 		return rc;
946 	}
947 
948 	pr_warn("interface ready\n");
949 
950 	return 0;
951 }
952 
953 module_init(test_firmware_init);
954 
test_firmware_exit(void)955 static void __exit test_firmware_exit(void)
956 {
957 	mutex_lock(&test_fw_mutex);
958 	release_firmware(test_firmware);
959 	misc_deregister(&test_fw_misc_device);
960 	__test_firmware_config_free();
961 	kfree(test_fw_config);
962 	mutex_unlock(&test_fw_mutex);
963 
964 	pr_warn("removed interface\n");
965 }
966 
967 module_exit(test_firmware_exit);
968 
969 MODULE_AUTHOR("Kees Cook <keescook@chromium.org>");
970 MODULE_LICENSE("GPL");
971