1 /*
2 * This module provides an interface to trigger and test firmware loading.
3 *
4 * It is designed to be used for basic evaluation of the firmware loading
5 * subsystem (for example when validating firmware verification). It lacks
6 * any extra dependencies, and will not normally be loaded by the system
7 * unless explicitly requested by name.
8 */
9
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11
12 #include <linux/init.h>
13 #include <linux/module.h>
14 #include <linux/printk.h>
15 #include <linux/completion.h>
16 #include <linux/firmware.h>
17 #include <linux/device.h>
18 #include <linux/fs.h>
19 #include <linux/miscdevice.h>
20 #include <linux/slab.h>
21 #include <linux/uaccess.h>
22 #include <linux/delay.h>
23 #include <linux/kthread.h>
24 #include <linux/vmalloc.h>
25
26 #define TEST_FIRMWARE_NAME "test-firmware.bin"
27 #define TEST_FIRMWARE_NUM_REQS 4
28
29 static DEFINE_MUTEX(test_fw_mutex);
30 static const struct firmware *test_firmware;
31
32 struct test_batched_req {
33 u8 idx;
34 int rc;
35 bool sent;
36 const struct firmware *fw;
37 const char *name;
38 struct completion completion;
39 struct task_struct *task;
40 struct device *dev;
41 };
42
43 /**
44 * test_config - represents configuration for the test for different triggers
45 *
46 * @name: the name of the firmware file to look for
47 * @sync_direct: when the sync trigger is used if this is true
48 * request_firmware_direct() will be used instead.
49 * @send_uevent: whether or not to send a uevent for async requests
50 * @num_requests: number of requests to try per test case. This is trigger
51 * specific.
52 * @reqs: stores all requests information
53 * @read_fw_idx: index of thread from which we want to read firmware results
54 * from through the read_fw trigger.
55 * @test_result: a test may use this to collect the result from the call
56 * of the request_firmware*() calls used in their tests. In order of
57 * priority we always keep first any setup error. If no setup errors were
58 * found then we move on to the first error encountered while running the
59 * API. Note that for async calls this typically will be a successful
60 * result (0) unless of course you've used bogus parameters, or the system
61 * is out of memory. In the async case the callback is expected to do a
62 * bit more homework to figure out what happened, unfortunately the only
63 * information passed today on error is the fact that no firmware was
64 * found so we can only assume -ENOENT on async calls if the firmware is
65 * NULL.
66 *
67 * Errors you can expect:
68 *
69 * API specific:
70 *
71 * 0: success for sync, for async it means request was sent
72 * -EINVAL: invalid parameters or request
73 * -ENOENT: files not found
74 *
75 * System environment:
76 *
77 * -ENOMEM: memory pressure on system
78 * -ENODEV: out of number of devices to test
79 * -EINVAL: an unexpected error has occurred
80 * @req_firmware: if @sync_direct is true this is set to
81 * request_firmware_direct(), otherwise request_firmware()
82 */
83 struct test_config {
84 char *name;
85 bool sync_direct;
86 bool send_uevent;
87 u8 num_requests;
88 u8 read_fw_idx;
89
90 /*
91 * These below don't belong her but we'll move them once we create
92 * a struct fw_test_device and stuff the misc_dev under there later.
93 */
94 struct test_batched_req *reqs;
95 int test_result;
96 int (*req_firmware)(const struct firmware **fw, const char *name,
97 struct device *device);
98 };
99
100 static struct test_config *test_fw_config;
101
test_fw_misc_read(struct file * f,char __user * buf,size_t size,loff_t * offset)102 static ssize_t test_fw_misc_read(struct file *f, char __user *buf,
103 size_t size, loff_t *offset)
104 {
105 ssize_t rc = 0;
106
107 mutex_lock(&test_fw_mutex);
108 if (test_firmware)
109 rc = simple_read_from_buffer(buf, size, offset,
110 test_firmware->data,
111 test_firmware->size);
112 mutex_unlock(&test_fw_mutex);
113 return rc;
114 }
115
116 static const struct file_operations test_fw_fops = {
117 .owner = THIS_MODULE,
118 .read = test_fw_misc_read,
119 };
120
__test_release_all_firmware(void)121 static void __test_release_all_firmware(void)
122 {
123 struct test_batched_req *req;
124 u8 i;
125
126 if (!test_fw_config->reqs)
127 return;
128
129 for (i = 0; i < test_fw_config->num_requests; i++) {
130 req = &test_fw_config->reqs[i];
131 if (req->fw)
132 release_firmware(req->fw);
133 }
134
135 vfree(test_fw_config->reqs);
136 test_fw_config->reqs = NULL;
137 }
138
test_release_all_firmware(void)139 static void test_release_all_firmware(void)
140 {
141 mutex_lock(&test_fw_mutex);
142 __test_release_all_firmware();
143 mutex_unlock(&test_fw_mutex);
144 }
145
146
__test_firmware_config_free(void)147 static void __test_firmware_config_free(void)
148 {
149 __test_release_all_firmware();
150 kfree_const(test_fw_config->name);
151 test_fw_config->name = NULL;
152 }
153
154 /*
155 * XXX: move to kstrncpy() once merged.
156 *
157 * Users should use kfree_const() when freeing these.
158 */
__kstrncpy(char ** dst,const char * name,size_t count,gfp_t gfp)159 static int __kstrncpy(char **dst, const char *name, size_t count, gfp_t gfp)
160 {
161 *dst = kstrndup(name, count, gfp);
162 if (!*dst)
163 return -ENOSPC;
164 return count;
165 }
166
__test_firmware_config_init(void)167 static int __test_firmware_config_init(void)
168 {
169 int ret;
170
171 ret = __kstrncpy(&test_fw_config->name, TEST_FIRMWARE_NAME,
172 strlen(TEST_FIRMWARE_NAME), GFP_KERNEL);
173 if (ret < 0)
174 goto out;
175
176 test_fw_config->num_requests = TEST_FIRMWARE_NUM_REQS;
177 test_fw_config->send_uevent = true;
178 test_fw_config->sync_direct = false;
179 test_fw_config->req_firmware = request_firmware;
180 test_fw_config->test_result = 0;
181 test_fw_config->reqs = NULL;
182
183 return 0;
184
185 out:
186 __test_firmware_config_free();
187 return ret;
188 }
189
reset_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)190 static ssize_t reset_store(struct device *dev,
191 struct device_attribute *attr,
192 const char *buf, size_t count)
193 {
194 int ret;
195
196 mutex_lock(&test_fw_mutex);
197
198 __test_firmware_config_free();
199
200 ret = __test_firmware_config_init();
201 if (ret < 0) {
202 ret = -ENOMEM;
203 pr_err("could not alloc settings for config trigger: %d\n",
204 ret);
205 goto out;
206 }
207
208 pr_info("reset\n");
209 ret = count;
210
211 out:
212 mutex_unlock(&test_fw_mutex);
213
214 return ret;
215 }
216 static DEVICE_ATTR_WO(reset);
217
config_show(struct device * dev,struct device_attribute * attr,char * buf)218 static ssize_t config_show(struct device *dev,
219 struct device_attribute *attr,
220 char *buf)
221 {
222 int len = 0;
223
224 mutex_lock(&test_fw_mutex);
225
226 len += snprintf(buf, PAGE_SIZE,
227 "Custom trigger configuration for: %s\n",
228 dev_name(dev));
229
230 if (test_fw_config->name)
231 len += snprintf(buf+len, PAGE_SIZE,
232 "name:\t%s\n",
233 test_fw_config->name);
234 else
235 len += snprintf(buf+len, PAGE_SIZE,
236 "name:\tEMTPY\n");
237
238 len += snprintf(buf+len, PAGE_SIZE,
239 "num_requests:\t%u\n", test_fw_config->num_requests);
240
241 len += snprintf(buf+len, PAGE_SIZE,
242 "send_uevent:\t\t%s\n",
243 test_fw_config->send_uevent ?
244 "FW_ACTION_HOTPLUG" :
245 "FW_ACTION_NOHOTPLUG");
246 len += snprintf(buf+len, PAGE_SIZE,
247 "sync_direct:\t\t%s\n",
248 test_fw_config->sync_direct ? "true" : "false");
249 len += snprintf(buf+len, PAGE_SIZE,
250 "read_fw_idx:\t%u\n", test_fw_config->read_fw_idx);
251
252 mutex_unlock(&test_fw_mutex);
253
254 return len;
255 }
256 static DEVICE_ATTR_RO(config);
257
config_name_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)258 static ssize_t config_name_store(struct device *dev,
259 struct device_attribute *attr,
260 const char *buf, size_t count)
261 {
262 int ret;
263
264 mutex_lock(&test_fw_mutex);
265 kfree_const(test_fw_config->name);
266 ret = __kstrncpy(&test_fw_config->name, buf, count, GFP_KERNEL);
267 mutex_unlock(&test_fw_mutex);
268
269 return ret;
270 }
271
272 /*
273 * As per sysfs_kf_seq_show() the buf is max PAGE_SIZE.
274 */
config_test_show_str(char * dst,char * src)275 static ssize_t config_test_show_str(char *dst,
276 char *src)
277 {
278 int len;
279
280 mutex_lock(&test_fw_mutex);
281 len = snprintf(dst, PAGE_SIZE, "%s\n", src);
282 mutex_unlock(&test_fw_mutex);
283
284 return len;
285 }
286
test_dev_config_update_bool(const char * buf,size_t size,bool * cfg)287 static int test_dev_config_update_bool(const char *buf, size_t size,
288 bool *cfg)
289 {
290 int ret;
291
292 mutex_lock(&test_fw_mutex);
293 if (strtobool(buf, cfg) < 0)
294 ret = -EINVAL;
295 else
296 ret = size;
297 mutex_unlock(&test_fw_mutex);
298
299 return ret;
300 }
301
302 static ssize_t
test_dev_config_show_bool(char * buf,bool config)303 test_dev_config_show_bool(char *buf,
304 bool config)
305 {
306 bool val;
307
308 mutex_lock(&test_fw_mutex);
309 val = config;
310 mutex_unlock(&test_fw_mutex);
311
312 return snprintf(buf, PAGE_SIZE, "%d\n", val);
313 }
314
test_dev_config_show_int(char * buf,int cfg)315 static ssize_t test_dev_config_show_int(char *buf, int cfg)
316 {
317 int val;
318
319 mutex_lock(&test_fw_mutex);
320 val = cfg;
321 mutex_unlock(&test_fw_mutex);
322
323 return snprintf(buf, PAGE_SIZE, "%d\n", val);
324 }
325
test_dev_config_update_u8(const char * buf,size_t size,u8 * cfg)326 static int test_dev_config_update_u8(const char *buf, size_t size, u8 *cfg)
327 {
328 int ret;
329 long new;
330
331 ret = kstrtol(buf, 10, &new);
332 if (ret)
333 return ret;
334
335 if (new > U8_MAX)
336 return -EINVAL;
337
338 mutex_lock(&test_fw_mutex);
339 *(u8 *)cfg = new;
340 mutex_unlock(&test_fw_mutex);
341
342 /* Always return full write size even if we didn't consume all */
343 return size;
344 }
345
test_dev_config_show_u8(char * buf,u8 cfg)346 static ssize_t test_dev_config_show_u8(char *buf, u8 cfg)
347 {
348 u8 val;
349
350 mutex_lock(&test_fw_mutex);
351 val = cfg;
352 mutex_unlock(&test_fw_mutex);
353
354 return snprintf(buf, PAGE_SIZE, "%u\n", val);
355 }
356
config_name_show(struct device * dev,struct device_attribute * attr,char * buf)357 static ssize_t config_name_show(struct device *dev,
358 struct device_attribute *attr,
359 char *buf)
360 {
361 return config_test_show_str(buf, test_fw_config->name);
362 }
363 static DEVICE_ATTR_RW(config_name);
364
config_num_requests_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)365 static ssize_t config_num_requests_store(struct device *dev,
366 struct device_attribute *attr,
367 const char *buf, size_t count)
368 {
369 int rc;
370
371 mutex_lock(&test_fw_mutex);
372 if (test_fw_config->reqs) {
373 pr_err("Must call release_all_firmware prior to changing config\n");
374 rc = -EINVAL;
375 mutex_unlock(&test_fw_mutex);
376 goto out;
377 }
378 mutex_unlock(&test_fw_mutex);
379
380 rc = test_dev_config_update_u8(buf, count,
381 &test_fw_config->num_requests);
382
383 out:
384 return rc;
385 }
386
config_num_requests_show(struct device * dev,struct device_attribute * attr,char * buf)387 static ssize_t config_num_requests_show(struct device *dev,
388 struct device_attribute *attr,
389 char *buf)
390 {
391 return test_dev_config_show_u8(buf, test_fw_config->num_requests);
392 }
393 static DEVICE_ATTR_RW(config_num_requests);
394
config_sync_direct_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)395 static ssize_t config_sync_direct_store(struct device *dev,
396 struct device_attribute *attr,
397 const char *buf, size_t count)
398 {
399 int rc = test_dev_config_update_bool(buf, count,
400 &test_fw_config->sync_direct);
401
402 if (rc == count)
403 test_fw_config->req_firmware = test_fw_config->sync_direct ?
404 request_firmware_direct :
405 request_firmware;
406 return rc;
407 }
408
config_sync_direct_show(struct device * dev,struct device_attribute * attr,char * buf)409 static ssize_t config_sync_direct_show(struct device *dev,
410 struct device_attribute *attr,
411 char *buf)
412 {
413 return test_dev_config_show_bool(buf, test_fw_config->sync_direct);
414 }
415 static DEVICE_ATTR_RW(config_sync_direct);
416
config_send_uevent_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)417 static ssize_t config_send_uevent_store(struct device *dev,
418 struct device_attribute *attr,
419 const char *buf, size_t count)
420 {
421 return test_dev_config_update_bool(buf, count,
422 &test_fw_config->send_uevent);
423 }
424
config_send_uevent_show(struct device * dev,struct device_attribute * attr,char * buf)425 static ssize_t config_send_uevent_show(struct device *dev,
426 struct device_attribute *attr,
427 char *buf)
428 {
429 return test_dev_config_show_bool(buf, test_fw_config->send_uevent);
430 }
431 static DEVICE_ATTR_RW(config_send_uevent);
432
config_read_fw_idx_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)433 static ssize_t config_read_fw_idx_store(struct device *dev,
434 struct device_attribute *attr,
435 const char *buf, size_t count)
436 {
437 return test_dev_config_update_u8(buf, count,
438 &test_fw_config->read_fw_idx);
439 }
440
config_read_fw_idx_show(struct device * dev,struct device_attribute * attr,char * buf)441 static ssize_t config_read_fw_idx_show(struct device *dev,
442 struct device_attribute *attr,
443 char *buf)
444 {
445 return test_dev_config_show_u8(buf, test_fw_config->read_fw_idx);
446 }
447 static DEVICE_ATTR_RW(config_read_fw_idx);
448
449
trigger_request_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)450 static ssize_t trigger_request_store(struct device *dev,
451 struct device_attribute *attr,
452 const char *buf, size_t count)
453 {
454 int rc;
455 char *name;
456
457 name = kstrndup(buf, count, GFP_KERNEL);
458 if (!name)
459 return -ENOSPC;
460
461 pr_info("loading '%s'\n", name);
462
463 mutex_lock(&test_fw_mutex);
464 release_firmware(test_firmware);
465 test_firmware = NULL;
466 rc = request_firmware(&test_firmware, name, dev);
467 if (rc) {
468 pr_info("load of '%s' failed: %d\n", name, rc);
469 goto out;
470 }
471 pr_info("loaded: %zu\n", test_firmware->size);
472 rc = count;
473
474 out:
475 mutex_unlock(&test_fw_mutex);
476
477 kfree(name);
478
479 return rc;
480 }
481 static DEVICE_ATTR_WO(trigger_request);
482
483 static DECLARE_COMPLETION(async_fw_done);
484
trigger_async_request_cb(const struct firmware * fw,void * context)485 static void trigger_async_request_cb(const struct firmware *fw, void *context)
486 {
487 test_firmware = fw;
488 complete(&async_fw_done);
489 }
490
trigger_async_request_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)491 static ssize_t trigger_async_request_store(struct device *dev,
492 struct device_attribute *attr,
493 const char *buf, size_t count)
494 {
495 int rc;
496 char *name;
497
498 name = kstrndup(buf, count, GFP_KERNEL);
499 if (!name)
500 return -ENOSPC;
501
502 pr_info("loading '%s'\n", name);
503
504 mutex_lock(&test_fw_mutex);
505 release_firmware(test_firmware);
506 test_firmware = NULL;
507 rc = request_firmware_nowait(THIS_MODULE, 1, name, dev, GFP_KERNEL,
508 NULL, trigger_async_request_cb);
509 if (rc) {
510 pr_info("async load of '%s' failed: %d\n", name, rc);
511 kfree(name);
512 goto out;
513 }
514 /* Free 'name' ASAP, to test for race conditions */
515 kfree(name);
516
517 wait_for_completion(&async_fw_done);
518
519 if (test_firmware) {
520 pr_info("loaded: %zu\n", test_firmware->size);
521 rc = count;
522 } else {
523 pr_err("failed to async load firmware\n");
524 rc = -ENODEV;
525 }
526
527 out:
528 mutex_unlock(&test_fw_mutex);
529
530 return rc;
531 }
532 static DEVICE_ATTR_WO(trigger_async_request);
533
trigger_custom_fallback_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)534 static ssize_t trigger_custom_fallback_store(struct device *dev,
535 struct device_attribute *attr,
536 const char *buf, size_t count)
537 {
538 int rc;
539 char *name;
540
541 name = kstrndup(buf, count, GFP_KERNEL);
542 if (!name)
543 return -ENOSPC;
544
545 pr_info("loading '%s' using custom fallback mechanism\n", name);
546
547 mutex_lock(&test_fw_mutex);
548 release_firmware(test_firmware);
549 test_firmware = NULL;
550 rc = request_firmware_nowait(THIS_MODULE, FW_ACTION_NOHOTPLUG, name,
551 dev, GFP_KERNEL, NULL,
552 trigger_async_request_cb);
553 if (rc) {
554 pr_info("async load of '%s' failed: %d\n", name, rc);
555 kfree(name);
556 goto out;
557 }
558 /* Free 'name' ASAP, to test for race conditions */
559 kfree(name);
560
561 wait_for_completion(&async_fw_done);
562
563 if (test_firmware) {
564 pr_info("loaded: %zu\n", test_firmware->size);
565 rc = count;
566 } else {
567 pr_err("failed to async load firmware\n");
568 rc = -ENODEV;
569 }
570
571 out:
572 mutex_unlock(&test_fw_mutex);
573
574 return rc;
575 }
576 static DEVICE_ATTR_WO(trigger_custom_fallback);
577
test_fw_run_batch_request(void * data)578 static int test_fw_run_batch_request(void *data)
579 {
580 struct test_batched_req *req = data;
581
582 if (!req) {
583 test_fw_config->test_result = -EINVAL;
584 return -EINVAL;
585 }
586
587 req->rc = test_fw_config->req_firmware(&req->fw, req->name, req->dev);
588 if (req->rc) {
589 pr_info("#%u: batched sync load failed: %d\n",
590 req->idx, req->rc);
591 if (!test_fw_config->test_result)
592 test_fw_config->test_result = req->rc;
593 } else if (req->fw) {
594 req->sent = true;
595 pr_info("#%u: batched sync loaded %zu\n",
596 req->idx, req->fw->size);
597 }
598 complete(&req->completion);
599
600 req->task = NULL;
601
602 return 0;
603 }
604
605 /*
606 * We use a kthread as otherwise the kernel serializes all our sync requests
607 * and we would not be able to mimic batched requests on a sync call. Batched
608 * requests on a sync call can for instance happen on a device driver when
609 * multiple cards are used and firmware loading happens outside of probe.
610 */
trigger_batched_requests_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)611 static ssize_t trigger_batched_requests_store(struct device *dev,
612 struct device_attribute *attr,
613 const char *buf, size_t count)
614 {
615 struct test_batched_req *req;
616 int rc;
617 u8 i;
618
619 mutex_lock(&test_fw_mutex);
620
621 test_fw_config->reqs =
622 vzalloc(array3_size(sizeof(struct test_batched_req),
623 test_fw_config->num_requests, 2));
624 if (!test_fw_config->reqs) {
625 rc = -ENOMEM;
626 goto out_unlock;
627 }
628
629 pr_info("batched sync firmware loading '%s' %u times\n",
630 test_fw_config->name, test_fw_config->num_requests);
631
632 for (i = 0; i < test_fw_config->num_requests; i++) {
633 req = &test_fw_config->reqs[i];
634 if (!req) {
635 WARN_ON(1);
636 rc = -ENOMEM;
637 goto out_bail;
638 }
639 req->fw = NULL;
640 req->idx = i;
641 req->name = test_fw_config->name;
642 req->dev = dev;
643 init_completion(&req->completion);
644 req->task = kthread_run(test_fw_run_batch_request, req,
645 "%s-%u", KBUILD_MODNAME, req->idx);
646 if (!req->task || IS_ERR(req->task)) {
647 pr_err("Setting up thread %u failed\n", req->idx);
648 req->task = NULL;
649 rc = -ENOMEM;
650 goto out_bail;
651 }
652 }
653
654 rc = count;
655
656 /*
657 * We require an explicit release to enable more time and delay of
658 * calling release_firmware() to improve our chances of forcing a
659 * batched request. If we instead called release_firmware() right away
660 * then we might miss on an opportunity of having a successful firmware
661 * request pass on the opportunity to be come a batched request.
662 */
663
664 out_bail:
665 for (i = 0; i < test_fw_config->num_requests; i++) {
666 req = &test_fw_config->reqs[i];
667 if (req->task || req->sent)
668 wait_for_completion(&req->completion);
669 }
670
671 /* Override any worker error if we had a general setup error */
672 if (rc < 0)
673 test_fw_config->test_result = rc;
674
675 out_unlock:
676 mutex_unlock(&test_fw_mutex);
677
678 return rc;
679 }
680 static DEVICE_ATTR_WO(trigger_batched_requests);
681
682 /*
683 * We wait for each callback to return with the lock held, no need to lock here
684 */
trigger_batched_cb(const struct firmware * fw,void * context)685 static void trigger_batched_cb(const struct firmware *fw, void *context)
686 {
687 struct test_batched_req *req = context;
688
689 if (!req) {
690 test_fw_config->test_result = -EINVAL;
691 return;
692 }
693
694 /* forces *some* batched requests to queue up */
695 if (!req->idx)
696 ssleep(2);
697
698 req->fw = fw;
699
700 /*
701 * Unfortunately the firmware API gives us nothing other than a null FW
702 * if the firmware was not found on async requests. Best we can do is
703 * just assume -ENOENT. A better API would pass the actual return
704 * value to the callback.
705 */
706 if (!fw && !test_fw_config->test_result)
707 test_fw_config->test_result = -ENOENT;
708
709 complete(&req->completion);
710 }
711
712 static
trigger_batched_requests_async_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)713 ssize_t trigger_batched_requests_async_store(struct device *dev,
714 struct device_attribute *attr,
715 const char *buf, size_t count)
716 {
717 struct test_batched_req *req;
718 bool send_uevent;
719 int rc;
720 u8 i;
721
722 mutex_lock(&test_fw_mutex);
723
724 test_fw_config->reqs =
725 vzalloc(array3_size(sizeof(struct test_batched_req),
726 test_fw_config->num_requests, 2));
727 if (!test_fw_config->reqs) {
728 rc = -ENOMEM;
729 goto out;
730 }
731
732 pr_info("batched loading '%s' custom fallback mechanism %u times\n",
733 test_fw_config->name, test_fw_config->num_requests);
734
735 send_uevent = test_fw_config->send_uevent ? FW_ACTION_HOTPLUG :
736 FW_ACTION_NOHOTPLUG;
737
738 for (i = 0; i < test_fw_config->num_requests; i++) {
739 req = &test_fw_config->reqs[i];
740 if (!req) {
741 WARN_ON(1);
742 goto out_bail;
743 }
744 req->name = test_fw_config->name;
745 req->fw = NULL;
746 req->idx = i;
747 init_completion(&req->completion);
748 rc = request_firmware_nowait(THIS_MODULE, send_uevent,
749 req->name,
750 dev, GFP_KERNEL, req,
751 trigger_batched_cb);
752 if (rc) {
753 pr_info("#%u: batched async load failed setup: %d\n",
754 i, rc);
755 req->rc = rc;
756 goto out_bail;
757 } else
758 req->sent = true;
759 }
760
761 rc = count;
762
763 out_bail:
764
765 /*
766 * We require an explicit release to enable more time and delay of
767 * calling release_firmware() to improve our chances of forcing a
768 * batched request. If we instead called release_firmware() right away
769 * then we might miss on an opportunity of having a successful firmware
770 * request pass on the opportunity to be come a batched request.
771 */
772
773 for (i = 0; i < test_fw_config->num_requests; i++) {
774 req = &test_fw_config->reqs[i];
775 if (req->sent)
776 wait_for_completion(&req->completion);
777 }
778
779 /* Override any worker error if we had a general setup error */
780 if (rc < 0)
781 test_fw_config->test_result = rc;
782
783 out:
784 mutex_unlock(&test_fw_mutex);
785
786 return rc;
787 }
788 static DEVICE_ATTR_WO(trigger_batched_requests_async);
789
test_result_show(struct device * dev,struct device_attribute * attr,char * buf)790 static ssize_t test_result_show(struct device *dev,
791 struct device_attribute *attr,
792 char *buf)
793 {
794 return test_dev_config_show_int(buf, test_fw_config->test_result);
795 }
796 static DEVICE_ATTR_RO(test_result);
797
release_all_firmware_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)798 static ssize_t release_all_firmware_store(struct device *dev,
799 struct device_attribute *attr,
800 const char *buf, size_t count)
801 {
802 test_release_all_firmware();
803 return count;
804 }
805 static DEVICE_ATTR_WO(release_all_firmware);
806
read_firmware_show(struct device * dev,struct device_attribute * attr,char * buf)807 static ssize_t read_firmware_show(struct device *dev,
808 struct device_attribute *attr,
809 char *buf)
810 {
811 struct test_batched_req *req;
812 u8 idx;
813 ssize_t rc = 0;
814
815 mutex_lock(&test_fw_mutex);
816
817 idx = test_fw_config->read_fw_idx;
818 if (idx >= test_fw_config->num_requests) {
819 rc = -ERANGE;
820 goto out;
821 }
822
823 if (!test_fw_config->reqs) {
824 rc = -EINVAL;
825 goto out;
826 }
827
828 req = &test_fw_config->reqs[idx];
829 if (!req->fw) {
830 pr_err("#%u: failed to async load firmware\n", idx);
831 rc = -ENOENT;
832 goto out;
833 }
834
835 pr_info("#%u: loaded %zu\n", idx, req->fw->size);
836
837 if (req->fw->size > PAGE_SIZE) {
838 pr_err("Testing interface must use PAGE_SIZE firmware for now\n");
839 rc = -EINVAL;
840 }
841 memcpy(buf, req->fw->data, req->fw->size);
842
843 rc = req->fw->size;
844 out:
845 mutex_unlock(&test_fw_mutex);
846
847 return rc;
848 }
849 static DEVICE_ATTR_RO(read_firmware);
850
851 #define TEST_FW_DEV_ATTR(name) &dev_attr_##name.attr
852
853 static struct attribute *test_dev_attrs[] = {
854 TEST_FW_DEV_ATTR(reset),
855
856 TEST_FW_DEV_ATTR(config),
857 TEST_FW_DEV_ATTR(config_name),
858 TEST_FW_DEV_ATTR(config_num_requests),
859 TEST_FW_DEV_ATTR(config_sync_direct),
860 TEST_FW_DEV_ATTR(config_send_uevent),
861 TEST_FW_DEV_ATTR(config_read_fw_idx),
862
863 /* These don't use the config at all - they could be ported! */
864 TEST_FW_DEV_ATTR(trigger_request),
865 TEST_FW_DEV_ATTR(trigger_async_request),
866 TEST_FW_DEV_ATTR(trigger_custom_fallback),
867
868 /* These use the config and can use the test_result */
869 TEST_FW_DEV_ATTR(trigger_batched_requests),
870 TEST_FW_DEV_ATTR(trigger_batched_requests_async),
871
872 TEST_FW_DEV_ATTR(release_all_firmware),
873 TEST_FW_DEV_ATTR(test_result),
874 TEST_FW_DEV_ATTR(read_firmware),
875 NULL,
876 };
877
878 ATTRIBUTE_GROUPS(test_dev);
879
880 static struct miscdevice test_fw_misc_device = {
881 .minor = MISC_DYNAMIC_MINOR,
882 .name = "test_firmware",
883 .fops = &test_fw_fops,
884 .groups = test_dev_groups,
885 };
886
test_firmware_init(void)887 static int __init test_firmware_init(void)
888 {
889 int rc;
890
891 test_fw_config = kzalloc(sizeof(struct test_config), GFP_KERNEL);
892 if (!test_fw_config)
893 return -ENOMEM;
894
895 rc = __test_firmware_config_init();
896 if (rc)
897 return rc;
898
899 rc = misc_register(&test_fw_misc_device);
900 if (rc) {
901 kfree(test_fw_config);
902 pr_err("could not register misc device: %d\n", rc);
903 return rc;
904 }
905
906 pr_warn("interface ready\n");
907
908 return 0;
909 }
910
911 module_init(test_firmware_init);
912
test_firmware_exit(void)913 static void __exit test_firmware_exit(void)
914 {
915 mutex_lock(&test_fw_mutex);
916 release_firmware(test_firmware);
917 misc_deregister(&test_fw_misc_device);
918 __test_firmware_config_free();
919 kfree(test_fw_config);
920 mutex_unlock(&test_fw_mutex);
921
922 pr_warn("removed interface\n");
923 }
924
925 module_exit(test_firmware_exit);
926
927 MODULE_AUTHOR("Kees Cook <keescook@chromium.org>");
928 MODULE_LICENSE("GPL");
929