drm/etnaviv: add support for slave interface clock
[sfrench/cifs-2.6.git] / lib / test_firmware.c
1 /*
2  * This module provides an interface to trigger and test firmware loading.
3  *
4  * It is designed to be used for basic evaluation of the firmware loading
5  * subsystem (for example when validating firmware verification). It lacks
6  * any extra dependencies, and will not normally be loaded by the system
7  * unless explicitly requested by name.
8  */
9
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11
12 #include <linux/init.h>
13 #include <linux/module.h>
14 #include <linux/printk.h>
15 #include <linux/completion.h>
16 #include <linux/firmware.h>
17 #include <linux/device.h>
18 #include <linux/fs.h>
19 #include <linux/miscdevice.h>
20 #include <linux/slab.h>
21 #include <linux/uaccess.h>
22 #include <linux/delay.h>
23 #include <linux/kthread.h>
24
25 #define TEST_FIRMWARE_NAME      "test-firmware.bin"
26 #define TEST_FIRMWARE_NUM_REQS  4
27
28 static DEFINE_MUTEX(test_fw_mutex);
29 static const struct firmware *test_firmware;
30
31 struct test_batched_req {
32         u8 idx;
33         int rc;
34         bool sent;
35         const struct firmware *fw;
36         const char *name;
37         struct completion completion;
38         struct task_struct *task;
39         struct device *dev;
40 };
41
42 /**
43  * test_config - represents configuration for the test for different triggers
44  *
45  * @name: the name of the firmware file to look for
46  * @sync_direct: when the sync trigger is used if this is true
47  *      request_firmware_direct() will be used instead.
48  * @send_uevent: whether or not to send a uevent for async requests
49  * @num_requests: number of requests to try per test case. This is trigger
50  *      specific.
51  * @reqs: stores all requests information
52  * @read_fw_idx: index of thread from which we want to read firmware results
53  *      from through the read_fw trigger.
54  * @test_result: a test may use this to collect the result from the call
55  *      of the request_firmware*() calls used in their tests. In order of
56  *      priority we always keep first any setup error. If no setup errors were
57  *      found then we move on to the first error encountered while running the
58  *      API. Note that for async calls this typically will be a successful
59  *      result (0) unless of course you've used bogus parameters, or the system
60  *      is out of memory.  In the async case the callback is expected to do a
61  *      bit more homework to figure out what happened, unfortunately the only
62  *      information passed today on error is the fact that no firmware was
63  *      found so we can only assume -ENOENT on async calls if the firmware is
64  *      NULL.
65  *
66  *      Errors you can expect:
67  *
68  *      API specific:
69  *
70  *      0:              success for sync, for async it means request was sent
71  *      -EINVAL:        invalid parameters or request
72  *      -ENOENT:        files not found
73  *
74  *      System environment:
75  *
76  *      -ENOMEM:        memory pressure on system
77  *      -ENODEV:        out of number of devices to test
78  *      -EINVAL:        an unexpected error has occurred
79  * @req_firmware: if @sync_direct is true this is set to
80  *      request_firmware_direct(), otherwise request_firmware()
81  */
82 struct test_config {
83         char *name;
84         bool sync_direct;
85         bool send_uevent;
86         u8 num_requests;
87         u8 read_fw_idx;
88
89         /*
90          * These below don't belong her but we'll move them once we create
91          * a struct fw_test_device and stuff the misc_dev under there later.
92          */
93         struct test_batched_req *reqs;
94         int test_result;
95         int (*req_firmware)(const struct firmware **fw, const char *name,
96                             struct device *device);
97 };
98
99 static struct test_config *test_fw_config;
100
101 static ssize_t test_fw_misc_read(struct file *f, char __user *buf,
102                                  size_t size, loff_t *offset)
103 {
104         ssize_t rc = 0;
105
106         mutex_lock(&test_fw_mutex);
107         if (test_firmware)
108                 rc = simple_read_from_buffer(buf, size, offset,
109                                              test_firmware->data,
110                                              test_firmware->size);
111         mutex_unlock(&test_fw_mutex);
112         return rc;
113 }
114
115 static const struct file_operations test_fw_fops = {
116         .owner          = THIS_MODULE,
117         .read           = test_fw_misc_read,
118 };
119
120 static void __test_release_all_firmware(void)
121 {
122         struct test_batched_req *req;
123         u8 i;
124
125         if (!test_fw_config->reqs)
126                 return;
127
128         for (i = 0; i < test_fw_config->num_requests; i++) {
129                 req = &test_fw_config->reqs[i];
130                 if (req->fw)
131                         release_firmware(req->fw);
132         }
133
134         vfree(test_fw_config->reqs);
135         test_fw_config->reqs = NULL;
136 }
137
138 static void test_release_all_firmware(void)
139 {
140         mutex_lock(&test_fw_mutex);
141         __test_release_all_firmware();
142         mutex_unlock(&test_fw_mutex);
143 }
144
145
146 static void __test_firmware_config_free(void)
147 {
148         __test_release_all_firmware();
149         kfree_const(test_fw_config->name);
150         test_fw_config->name = NULL;
151 }
152
153 /*
154  * XXX: move to kstrncpy() once merged.
155  *
156  * Users should use kfree_const() when freeing these.
157  */
158 static int __kstrncpy(char **dst, const char *name, size_t count, gfp_t gfp)
159 {
160         *dst = kstrndup(name, count, gfp);
161         if (!*dst)
162                 return -ENOSPC;
163         return count;
164 }
165
166 static int __test_firmware_config_init(void)
167 {
168         int ret;
169
170         ret = __kstrncpy(&test_fw_config->name, TEST_FIRMWARE_NAME,
171                          strlen(TEST_FIRMWARE_NAME), GFP_KERNEL);
172         if (ret < 0)
173                 goto out;
174
175         test_fw_config->num_requests = TEST_FIRMWARE_NUM_REQS;
176         test_fw_config->send_uevent = true;
177         test_fw_config->sync_direct = false;
178         test_fw_config->req_firmware = request_firmware;
179         test_fw_config->test_result = 0;
180         test_fw_config->reqs = NULL;
181
182         return 0;
183
184 out:
185         __test_firmware_config_free();
186         return ret;
187 }
188
189 static ssize_t reset_store(struct device *dev,
190                            struct device_attribute *attr,
191                            const char *buf, size_t count)
192 {
193         int ret;
194
195         mutex_lock(&test_fw_mutex);
196
197         __test_firmware_config_free();
198
199         ret = __test_firmware_config_init();
200         if (ret < 0) {
201                 ret = -ENOMEM;
202                 pr_err("could not alloc settings for config trigger: %d\n",
203                        ret);
204                 goto out;
205         }
206
207         pr_info("reset\n");
208         ret = count;
209
210 out:
211         mutex_unlock(&test_fw_mutex);
212
213         return ret;
214 }
215 static DEVICE_ATTR_WO(reset);
216
217 static ssize_t config_show(struct device *dev,
218                            struct device_attribute *attr,
219                            char *buf)
220 {
221         int len = 0;
222
223         mutex_lock(&test_fw_mutex);
224
225         len += snprintf(buf, PAGE_SIZE,
226                         "Custom trigger configuration for: %s\n",
227                         dev_name(dev));
228
229         if (test_fw_config->name)
230                 len += snprintf(buf+len, PAGE_SIZE,
231                                 "name:\t%s\n",
232                                 test_fw_config->name);
233         else
234                 len += snprintf(buf+len, PAGE_SIZE,
235                                 "name:\tEMTPY\n");
236
237         len += snprintf(buf+len, PAGE_SIZE,
238                         "num_requests:\t%u\n", test_fw_config->num_requests);
239
240         len += snprintf(buf+len, PAGE_SIZE,
241                         "send_uevent:\t\t%s\n",
242                         test_fw_config->send_uevent ?
243                         "FW_ACTION_HOTPLUG" :
244                         "FW_ACTION_NOHOTPLUG");
245         len += snprintf(buf+len, PAGE_SIZE,
246                         "sync_direct:\t\t%s\n",
247                         test_fw_config->sync_direct ? "true" : "false");
248         len += snprintf(buf+len, PAGE_SIZE,
249                         "read_fw_idx:\t%u\n", test_fw_config->read_fw_idx);
250
251         mutex_unlock(&test_fw_mutex);
252
253         return len;
254 }
255 static DEVICE_ATTR_RO(config);
256
257 static ssize_t config_name_store(struct device *dev,
258                                  struct device_attribute *attr,
259                                  const char *buf, size_t count)
260 {
261         int ret;
262
263         mutex_lock(&test_fw_mutex);
264         kfree_const(test_fw_config->name);
265         ret = __kstrncpy(&test_fw_config->name, buf, count, GFP_KERNEL);
266         mutex_unlock(&test_fw_mutex);
267
268         return ret;
269 }
270
271 /*
272  * As per sysfs_kf_seq_show() the buf is max PAGE_SIZE.
273  */
274 static ssize_t config_test_show_str(char *dst,
275                                     char *src)
276 {
277         int len;
278
279         mutex_lock(&test_fw_mutex);
280         len = snprintf(dst, PAGE_SIZE, "%s\n", src);
281         mutex_unlock(&test_fw_mutex);
282
283         return len;
284 }
285
286 static int test_dev_config_update_bool(const char *buf, size_t size,
287                                        bool *cfg)
288 {
289         int ret;
290
291         mutex_lock(&test_fw_mutex);
292         if (strtobool(buf, cfg) < 0)
293                 ret = -EINVAL;
294         else
295                 ret = size;
296         mutex_unlock(&test_fw_mutex);
297
298         return ret;
299 }
300
301 static ssize_t
302 test_dev_config_show_bool(char *buf,
303                           bool config)
304 {
305         bool val;
306
307         mutex_lock(&test_fw_mutex);
308         val = config;
309         mutex_unlock(&test_fw_mutex);
310
311         return snprintf(buf, PAGE_SIZE, "%d\n", val);
312 }
313
314 static ssize_t test_dev_config_show_int(char *buf, int cfg)
315 {
316         int val;
317
318         mutex_lock(&test_fw_mutex);
319         val = cfg;
320         mutex_unlock(&test_fw_mutex);
321
322         return snprintf(buf, PAGE_SIZE, "%d\n", val);
323 }
324
325 static int test_dev_config_update_u8(const char *buf, size_t size, u8 *cfg)
326 {
327         int ret;
328         long new;
329
330         ret = kstrtol(buf, 10, &new);
331         if (ret)
332                 return ret;
333
334         if (new > U8_MAX)
335                 return -EINVAL;
336
337         mutex_lock(&test_fw_mutex);
338         *(u8 *)cfg = new;
339         mutex_unlock(&test_fw_mutex);
340
341         /* Always return full write size even if we didn't consume all */
342         return size;
343 }
344
345 static ssize_t test_dev_config_show_u8(char *buf, u8 cfg)
346 {
347         u8 val;
348
349         mutex_lock(&test_fw_mutex);
350         val = cfg;
351         mutex_unlock(&test_fw_mutex);
352
353         return snprintf(buf, PAGE_SIZE, "%u\n", val);
354 }
355
356 static ssize_t config_name_show(struct device *dev,
357                                 struct device_attribute *attr,
358                                 char *buf)
359 {
360         return config_test_show_str(buf, test_fw_config->name);
361 }
362 static DEVICE_ATTR_RW(config_name);
363
364 static ssize_t config_num_requests_store(struct device *dev,
365                                          struct device_attribute *attr,
366                                          const char *buf, size_t count)
367 {
368         int rc;
369
370         mutex_lock(&test_fw_mutex);
371         if (test_fw_config->reqs) {
372                 pr_err("Must call release_all_firmware prior to changing config\n");
373                 rc = -EINVAL;
374                 mutex_unlock(&test_fw_mutex);
375                 goto out;
376         }
377         mutex_unlock(&test_fw_mutex);
378
379         rc = test_dev_config_update_u8(buf, count,
380                                        &test_fw_config->num_requests);
381
382 out:
383         return rc;
384 }
385
386 static ssize_t config_num_requests_show(struct device *dev,
387                                         struct device_attribute *attr,
388                                         char *buf)
389 {
390         return test_dev_config_show_u8(buf, test_fw_config->num_requests);
391 }
392 static DEVICE_ATTR_RW(config_num_requests);
393
394 static ssize_t config_sync_direct_store(struct device *dev,
395                                         struct device_attribute *attr,
396                                         const char *buf, size_t count)
397 {
398         int rc = test_dev_config_update_bool(buf, count,
399                                              &test_fw_config->sync_direct);
400
401         if (rc == count)
402                 test_fw_config->req_firmware = test_fw_config->sync_direct ?
403                                        request_firmware_direct :
404                                        request_firmware;
405         return rc;
406 }
407
408 static ssize_t config_sync_direct_show(struct device *dev,
409                                        struct device_attribute *attr,
410                                        char *buf)
411 {
412         return test_dev_config_show_bool(buf, test_fw_config->sync_direct);
413 }
414 static DEVICE_ATTR_RW(config_sync_direct);
415
416 static ssize_t config_send_uevent_store(struct device *dev,
417                                         struct device_attribute *attr,
418                                         const char *buf, size_t count)
419 {
420         return test_dev_config_update_bool(buf, count,
421                                            &test_fw_config->send_uevent);
422 }
423
424 static ssize_t config_send_uevent_show(struct device *dev,
425                                        struct device_attribute *attr,
426                                        char *buf)
427 {
428         return test_dev_config_show_bool(buf, test_fw_config->send_uevent);
429 }
430 static DEVICE_ATTR_RW(config_send_uevent);
431
432 static ssize_t config_read_fw_idx_store(struct device *dev,
433                                         struct device_attribute *attr,
434                                         const char *buf, size_t count)
435 {
436         return test_dev_config_update_u8(buf, count,
437                                          &test_fw_config->read_fw_idx);
438 }
439
440 static ssize_t config_read_fw_idx_show(struct device *dev,
441                                        struct device_attribute *attr,
442                                        char *buf)
443 {
444         return test_dev_config_show_u8(buf, test_fw_config->read_fw_idx);
445 }
446 static DEVICE_ATTR_RW(config_read_fw_idx);
447
448
449 static ssize_t trigger_request_store(struct device *dev,
450                                      struct device_attribute *attr,
451                                      const char *buf, size_t count)
452 {
453         int rc;
454         char *name;
455
456         name = kstrndup(buf, count, GFP_KERNEL);
457         if (!name)
458                 return -ENOSPC;
459
460         pr_info("loading '%s'\n", name);
461
462         mutex_lock(&test_fw_mutex);
463         release_firmware(test_firmware);
464         test_firmware = NULL;
465         rc = request_firmware(&test_firmware, name, dev);
466         if (rc) {
467                 pr_info("load of '%s' failed: %d\n", name, rc);
468                 goto out;
469         }
470         pr_info("loaded: %zu\n", test_firmware->size);
471         rc = count;
472
473 out:
474         mutex_unlock(&test_fw_mutex);
475
476         kfree(name);
477
478         return rc;
479 }
480 static DEVICE_ATTR_WO(trigger_request);
481
482 static DECLARE_COMPLETION(async_fw_done);
483
484 static void trigger_async_request_cb(const struct firmware *fw, void *context)
485 {
486         test_firmware = fw;
487         complete(&async_fw_done);
488 }
489
490 static ssize_t trigger_async_request_store(struct device *dev,
491                                            struct device_attribute *attr,
492                                            const char *buf, size_t count)
493 {
494         int rc;
495         char *name;
496
497         name = kstrndup(buf, count, GFP_KERNEL);
498         if (!name)
499                 return -ENOSPC;
500
501         pr_info("loading '%s'\n", name);
502
503         mutex_lock(&test_fw_mutex);
504         release_firmware(test_firmware);
505         test_firmware = NULL;
506         rc = request_firmware_nowait(THIS_MODULE, 1, name, dev, GFP_KERNEL,
507                                      NULL, trigger_async_request_cb);
508         if (rc) {
509                 pr_info("async load of '%s' failed: %d\n", name, rc);
510                 kfree(name);
511                 goto out;
512         }
513         /* Free 'name' ASAP, to test for race conditions */
514         kfree(name);
515
516         wait_for_completion(&async_fw_done);
517
518         if (test_firmware) {
519                 pr_info("loaded: %zu\n", test_firmware->size);
520                 rc = count;
521         } else {
522                 pr_err("failed to async load firmware\n");
523                 rc = -ENODEV;
524         }
525
526 out:
527         mutex_unlock(&test_fw_mutex);
528
529         return rc;
530 }
531 static DEVICE_ATTR_WO(trigger_async_request);
532
533 static ssize_t trigger_custom_fallback_store(struct device *dev,
534                                              struct device_attribute *attr,
535                                              const char *buf, size_t count)
536 {
537         int rc;
538         char *name;
539
540         name = kstrndup(buf, count, GFP_KERNEL);
541         if (!name)
542                 return -ENOSPC;
543
544         pr_info("loading '%s' using custom fallback mechanism\n", name);
545
546         mutex_lock(&test_fw_mutex);
547         release_firmware(test_firmware);
548         test_firmware = NULL;
549         rc = request_firmware_nowait(THIS_MODULE, FW_ACTION_NOHOTPLUG, name,
550                                      dev, GFP_KERNEL, NULL,
551                                      trigger_async_request_cb);
552         if (rc) {
553                 pr_info("async load of '%s' failed: %d\n", name, rc);
554                 kfree(name);
555                 goto out;
556         }
557         /* Free 'name' ASAP, to test for race conditions */
558         kfree(name);
559
560         wait_for_completion(&async_fw_done);
561
562         if (test_firmware) {
563                 pr_info("loaded: %zu\n", test_firmware->size);
564                 rc = count;
565         } else {
566                 pr_err("failed to async load firmware\n");
567                 rc = -ENODEV;
568         }
569
570 out:
571         mutex_unlock(&test_fw_mutex);
572
573         return rc;
574 }
575 static DEVICE_ATTR_WO(trigger_custom_fallback);
576
577 static int test_fw_run_batch_request(void *data)
578 {
579         struct test_batched_req *req = data;
580
581         if (!req) {
582                 test_fw_config->test_result = -EINVAL;
583                 return -EINVAL;
584         }
585
586         req->rc = test_fw_config->req_firmware(&req->fw, req->name, req->dev);
587         if (req->rc) {
588                 pr_info("#%u: batched sync load failed: %d\n",
589                         req->idx, req->rc);
590                 if (!test_fw_config->test_result)
591                         test_fw_config->test_result = req->rc;
592         } else if (req->fw) {
593                 req->sent = true;
594                 pr_info("#%u: batched sync loaded %zu\n",
595                         req->idx, req->fw->size);
596         }
597         complete(&req->completion);
598
599         req->task = NULL;
600
601         return 0;
602 }
603
604 /*
605  * We use a kthread as otherwise the kernel serializes all our sync requests
606  * and we would not be able to mimic batched requests on a sync call. Batched
607  * requests on a sync call can for instance happen on a device driver when
608  * multiple cards are used and firmware loading happens outside of probe.
609  */
610 static ssize_t trigger_batched_requests_store(struct device *dev,
611                                               struct device_attribute *attr,
612                                               const char *buf, size_t count)
613 {
614         struct test_batched_req *req;
615         int rc;
616         u8 i;
617
618         mutex_lock(&test_fw_mutex);
619
620         test_fw_config->reqs = vzalloc(sizeof(struct test_batched_req) *
621                                        test_fw_config->num_requests * 2);
622         if (!test_fw_config->reqs) {
623                 rc = -ENOMEM;
624                 goto out_unlock;
625         }
626
627         pr_info("batched sync firmware loading '%s' %u times\n",
628                 test_fw_config->name, test_fw_config->num_requests);
629
630         for (i = 0; i < test_fw_config->num_requests; i++) {
631                 req = &test_fw_config->reqs[i];
632                 if (!req) {
633                         WARN_ON(1);
634                         rc = -ENOMEM;
635                         goto out_bail;
636                 }
637                 req->fw = NULL;
638                 req->idx = i;
639                 req->name = test_fw_config->name;
640                 req->dev = dev;
641                 init_completion(&req->completion);
642                 req->task = kthread_run(test_fw_run_batch_request, req,
643                                              "%s-%u", KBUILD_MODNAME, req->idx);
644                 if (!req->task || IS_ERR(req->task)) {
645                         pr_err("Setting up thread %u failed\n", req->idx);
646                         req->task = NULL;
647                         rc = -ENOMEM;
648                         goto out_bail;
649                 }
650         }
651
652         rc = count;
653
654         /*
655          * We require an explicit release to enable more time and delay of
656          * calling release_firmware() to improve our chances of forcing a
657          * batched request. If we instead called release_firmware() right away
658          * then we might miss on an opportunity of having a successful firmware
659          * request pass on the opportunity to be come a batched request.
660          */
661
662 out_bail:
663         for (i = 0; i < test_fw_config->num_requests; i++) {
664                 req = &test_fw_config->reqs[i];
665                 if (req->task || req->sent)
666                         wait_for_completion(&req->completion);
667         }
668
669         /* Override any worker error if we had a general setup error */
670         if (rc < 0)
671                 test_fw_config->test_result = rc;
672
673 out_unlock:
674         mutex_unlock(&test_fw_mutex);
675
676         return rc;
677 }
678 static DEVICE_ATTR_WO(trigger_batched_requests);
679
680 /*
681  * We wait for each callback to return with the lock held, no need to lock here
682  */
683 static void trigger_batched_cb(const struct firmware *fw, void *context)
684 {
685         struct test_batched_req *req = context;
686
687         if (!req) {
688                 test_fw_config->test_result = -EINVAL;
689                 return;
690         }
691
692         /* forces *some* batched requests to queue up */
693         if (!req->idx)
694                 ssleep(2);
695
696         req->fw = fw;
697
698         /*
699          * Unfortunately the firmware API gives us nothing other than a null FW
700          * if the firmware was not found on async requests.  Best we can do is
701          * just assume -ENOENT. A better API would pass the actual return
702          * value to the callback.
703          */
704         if (!fw && !test_fw_config->test_result)
705                 test_fw_config->test_result = -ENOENT;
706
707         complete(&req->completion);
708 }
709
710 static
711 ssize_t trigger_batched_requests_async_store(struct device *dev,
712                                              struct device_attribute *attr,
713                                              const char *buf, size_t count)
714 {
715         struct test_batched_req *req;
716         bool send_uevent;
717         int rc;
718         u8 i;
719
720         mutex_lock(&test_fw_mutex);
721
722         test_fw_config->reqs = vzalloc(sizeof(struct test_batched_req) *
723                                        test_fw_config->num_requests * 2);
724         if (!test_fw_config->reqs) {
725                 rc = -ENOMEM;
726                 goto out;
727         }
728
729         pr_info("batched loading '%s' custom fallback mechanism %u times\n",
730                 test_fw_config->name, test_fw_config->num_requests);
731
732         send_uevent = test_fw_config->send_uevent ? FW_ACTION_HOTPLUG :
733                 FW_ACTION_NOHOTPLUG;
734
735         for (i = 0; i < test_fw_config->num_requests; i++) {
736                 req = &test_fw_config->reqs[i];
737                 if (!req) {
738                         WARN_ON(1);
739                         goto out_bail;
740                 }
741                 req->name = test_fw_config->name;
742                 req->fw = NULL;
743                 req->idx = i;
744                 init_completion(&req->completion);
745                 rc = request_firmware_nowait(THIS_MODULE, send_uevent,
746                                              req->name,
747                                              dev, GFP_KERNEL, req,
748                                              trigger_batched_cb);
749                 if (rc) {
750                         pr_info("#%u: batched async load failed setup: %d\n",
751                                 i, rc);
752                         req->rc = rc;
753                         goto out_bail;
754                 } else
755                         req->sent = true;
756         }
757
758         rc = count;
759
760 out_bail:
761
762         /*
763          * We require an explicit release to enable more time and delay of
764          * calling release_firmware() to improve our chances of forcing a
765          * batched request. If we instead called release_firmware() right away
766          * then we might miss on an opportunity of having a successful firmware
767          * request pass on the opportunity to be come a batched request.
768          */
769
770         for (i = 0; i < test_fw_config->num_requests; i++) {
771                 req = &test_fw_config->reqs[i];
772                 if (req->sent)
773                         wait_for_completion(&req->completion);
774         }
775
776         /* Override any worker error if we had a general setup error */
777         if (rc < 0)
778                 test_fw_config->test_result = rc;
779
780 out:
781         mutex_unlock(&test_fw_mutex);
782
783         return rc;
784 }
785 static DEVICE_ATTR_WO(trigger_batched_requests_async);
786
787 static ssize_t test_result_show(struct device *dev,
788                                 struct device_attribute *attr,
789                                 char *buf)
790 {
791         return test_dev_config_show_int(buf, test_fw_config->test_result);
792 }
793 static DEVICE_ATTR_RO(test_result);
794
795 static ssize_t release_all_firmware_store(struct device *dev,
796                                           struct device_attribute *attr,
797                                           const char *buf, size_t count)
798 {
799         test_release_all_firmware();
800         return count;
801 }
802 static DEVICE_ATTR_WO(release_all_firmware);
803
804 static ssize_t read_firmware_show(struct device *dev,
805                                   struct device_attribute *attr,
806                                   char *buf)
807 {
808         struct test_batched_req *req;
809         u8 idx;
810         ssize_t rc = 0;
811
812         mutex_lock(&test_fw_mutex);
813
814         idx = test_fw_config->read_fw_idx;
815         if (idx >= test_fw_config->num_requests) {
816                 rc = -ERANGE;
817                 goto out;
818         }
819
820         if (!test_fw_config->reqs) {
821                 rc = -EINVAL;
822                 goto out;
823         }
824
825         req = &test_fw_config->reqs[idx];
826         if (!req->fw) {
827                 pr_err("#%u: failed to async load firmware\n", idx);
828                 rc = -ENOENT;
829                 goto out;
830         }
831
832         pr_info("#%u: loaded %zu\n", idx, req->fw->size);
833
834         if (req->fw->size > PAGE_SIZE) {
835                 pr_err("Testing interface must use PAGE_SIZE firmware for now\n");
836                 rc = -EINVAL;
837         }
838         memcpy(buf, req->fw->data, req->fw->size);
839
840         rc = req->fw->size;
841 out:
842         mutex_unlock(&test_fw_mutex);
843
844         return rc;
845 }
846 static DEVICE_ATTR_RO(read_firmware);
847
848 #define TEST_FW_DEV_ATTR(name)          &dev_attr_##name.attr
849
850 static struct attribute *test_dev_attrs[] = {
851         TEST_FW_DEV_ATTR(reset),
852
853         TEST_FW_DEV_ATTR(config),
854         TEST_FW_DEV_ATTR(config_name),
855         TEST_FW_DEV_ATTR(config_num_requests),
856         TEST_FW_DEV_ATTR(config_sync_direct),
857         TEST_FW_DEV_ATTR(config_send_uevent),
858         TEST_FW_DEV_ATTR(config_read_fw_idx),
859
860         /* These don't use the config at all - they could be ported! */
861         TEST_FW_DEV_ATTR(trigger_request),
862         TEST_FW_DEV_ATTR(trigger_async_request),
863         TEST_FW_DEV_ATTR(trigger_custom_fallback),
864
865         /* These use the config and can use the test_result */
866         TEST_FW_DEV_ATTR(trigger_batched_requests),
867         TEST_FW_DEV_ATTR(trigger_batched_requests_async),
868
869         TEST_FW_DEV_ATTR(release_all_firmware),
870         TEST_FW_DEV_ATTR(test_result),
871         TEST_FW_DEV_ATTR(read_firmware),
872         NULL,
873 };
874
875 ATTRIBUTE_GROUPS(test_dev);
876
877 static struct miscdevice test_fw_misc_device = {
878         .minor          = MISC_DYNAMIC_MINOR,
879         .name           = "test_firmware",
880         .fops           = &test_fw_fops,
881         .groups         = test_dev_groups,
882 };
883
884 static int __init test_firmware_init(void)
885 {
886         int rc;
887
888         test_fw_config = kzalloc(sizeof(struct test_config), GFP_KERNEL);
889         if (!test_fw_config)
890                 return -ENOMEM;
891
892         rc = __test_firmware_config_init();
893         if (rc)
894                 return rc;
895
896         rc = misc_register(&test_fw_misc_device);
897         if (rc) {
898                 kfree(test_fw_config);
899                 pr_err("could not register misc device: %d\n", rc);
900                 return rc;
901         }
902
903         pr_warn("interface ready\n");
904
905         return 0;
906 }
907
908 module_init(test_firmware_init);
909
910 static void __exit test_firmware_exit(void)
911 {
912         mutex_lock(&test_fw_mutex);
913         release_firmware(test_firmware);
914         misc_deregister(&test_fw_misc_device);
915         __test_firmware_config_free();
916         kfree(test_fw_config);
917         mutex_unlock(&test_fw_mutex);
918
919         pr_warn("removed interface\n");
920 }
921
922 module_exit(test_firmware_exit);
923
924 MODULE_AUTHOR("Kees Cook <keescook@chromium.org>");
925 MODULE_LICENSE("GPL");