]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/base/firmware_class.c
Merge tag 'for-linus-20170825' of git://git.infradead.org/linux-mtd
[mirror_ubuntu-artful-kernel.git] / drivers / base / firmware_class.c
CommitLineData
1da177e4
LT
1/*
2 * firmware_class.c - Multi purpose firmware loading support
3 *
87d37a4f 4 * Copyright (c) 2003 Manuel Estrada Sainz
1da177e4
LT
5 *
6 * Please see Documentation/firmware_class/ for more information.
7 *
8 */
9
c59ede7b 10#include <linux/capability.h>
1da177e4
LT
11#include <linux/device.h>
12#include <linux/module.h>
13#include <linux/init.h>
14#include <linux/timer.h>
15#include <linux/vmalloc.h>
16#include <linux/interrupt.h>
17#include <linux/bitops.h>
cad1e55d 18#include <linux/mutex.h>
a36cf844 19#include <linux/workqueue.h>
6e03a201 20#include <linux/highmem.h>
1da177e4 21#include <linux/firmware.h>
5a0e3ad6 22#include <linux/slab.h>
a36cf844 23#include <linux/sched.h>
abb139e7 24#include <linux/file.h>
1f2b7959 25#include <linux/list.h>
e40ba6d5 26#include <linux/fs.h>
37276a51
ML
27#include <linux/async.h>
28#include <linux/pm.h>
07646d9c 29#include <linux/suspend.h>
ac39b3ea 30#include <linux/syscore_ops.h>
fe304143 31#include <linux/reboot.h>
6593d924 32#include <linux/security.h>
37276a51 33
abb139e7
LT
34#include <generated/utsrelease.h>
35
37276a51 36#include "base.h"
1da177e4 37
87d37a4f 38MODULE_AUTHOR("Manuel Estrada Sainz");
1da177e4
LT
39MODULE_DESCRIPTION("Multi purpose firmware loading support");
40MODULE_LICENSE("GPL");
41
bcb9bd18
DT
42/* Builtin firmware support */
43
44#ifdef CONFIG_FW_LOADER
45
46extern struct builtin_fw __start_builtin_fw[];
47extern struct builtin_fw __end_builtin_fw[];
48
a098ecd2
SB
49static bool fw_get_builtin_firmware(struct firmware *fw, const char *name,
50 void *buf, size_t size)
bcb9bd18
DT
51{
52 struct builtin_fw *b_fw;
53
54 for (b_fw = __start_builtin_fw; b_fw != __end_builtin_fw; b_fw++) {
55 if (strcmp(name, b_fw->name) == 0) {
56 fw->size = b_fw->size;
57 fw->data = b_fw->data;
a098ecd2
SB
58
59 if (buf && fw->size <= size)
60 memcpy(buf, fw->data, fw->size);
bcb9bd18
DT
61 return true;
62 }
63 }
64
65 return false;
66}
67
68static bool fw_is_builtin_firmware(const struct firmware *fw)
69{
70 struct builtin_fw *b_fw;
71
72 for (b_fw = __start_builtin_fw; b_fw != __end_builtin_fw; b_fw++)
73 if (fw->data == b_fw->data)
74 return true;
75
76 return false;
77}
78
79#else /* Module case - no builtin firmware support */
80
a098ecd2
SB
81static inline bool fw_get_builtin_firmware(struct firmware *fw,
82 const char *name, void *buf,
83 size_t size)
bcb9bd18
DT
84{
85 return false;
86}
87
88static inline bool fw_is_builtin_firmware(const struct firmware *fw)
89{
90 return false;
91}
92#endif
93
f52cc379
DW
94enum fw_status {
95 FW_STATUS_UNKNOWN,
1da177e4
LT
96 FW_STATUS_LOADING,
97 FW_STATUS_DONE,
f52cc379 98 FW_STATUS_ABORTED,
1da177e4
LT
99};
100
2f65168d 101static int loading_timeout = 60; /* In seconds */
1da177e4 102
9b78c1da
RW
103static inline long firmware_loading_timeout(void)
104{
68ff2a00 105 return loading_timeout > 0 ? loading_timeout * HZ : MAX_JIFFY_OFFSET;
9b78c1da
RW
106}
107
f52cc379
DW
108/*
109 * Concurrent request_firmware() for the same firmware need to be
110 * serialized. struct fw_state is simple state machine which hold the
111 * state of the firmware loading.
112 */
113struct fw_state {
e44565f6 114 struct completion completion;
0430cafc 115 enum fw_status status;
f52cc379
DW
116};
117
118static void fw_state_init(struct fw_state *fw_st)
119{
e44565f6 120 init_completion(&fw_st->completion);
f52cc379
DW
121 fw_st->status = FW_STATUS_UNKNOWN;
122}
123
5b029624
DW
124static inline bool __fw_state_is_done(enum fw_status status)
125{
126 return status == FW_STATUS_DONE || status == FW_STATUS_ABORTED;
127}
128
5d47ec02 129static int __fw_state_wait_common(struct fw_state *fw_st, long timeout)
f52cc379
DW
130{
131 long ret;
132
260d9f2f 133 ret = wait_for_completion_killable_timeout(&fw_st->completion, timeout);
5b029624 134 if (ret != 0 && fw_st->status == FW_STATUS_ABORTED)
f52cc379 135 return -ENOENT;
5d47ec02
BA
136 if (!ret)
137 return -ETIMEDOUT;
f52cc379 138
5d47ec02 139 return ret < 0 ? ret : 0;
f52cc379
DW
140}
141
142static void __fw_state_set(struct fw_state *fw_st,
143 enum fw_status status)
144{
0430cafc 145 WRITE_ONCE(fw_st->status, status);
f52cc379 146
0430cafc 147 if (status == FW_STATUS_DONE || status == FW_STATUS_ABORTED)
e44565f6 148 complete_all(&fw_st->completion);
f52cc379
DW
149}
150
151#define fw_state_start(fw_st) \
152 __fw_state_set(fw_st, FW_STATUS_LOADING)
153#define fw_state_done(fw_st) \
154 __fw_state_set(fw_st, FW_STATUS_DONE)
90d41e74
LR
155#define fw_state_aborted(fw_st) \
156 __fw_state_set(fw_st, FW_STATUS_ABORTED)
f52cc379
DW
157#define fw_state_wait(fw_st) \
158 __fw_state_wait_common(fw_st, MAX_SCHEDULE_TIMEOUT)
159
fab82cb3
DW
160static int __fw_state_check(struct fw_state *fw_st, enum fw_status status)
161{
162 return fw_st->status == status;
163}
164
90d41e74
LR
165#define fw_state_is_aborted(fw_st) \
166 __fw_state_check(fw_st, FW_STATUS_ABORTED)
167
168#ifdef CONFIG_FW_LOADER_USER_HELPER
169
f52cc379
DW
170#define fw_state_aborted(fw_st) \
171 __fw_state_set(fw_st, FW_STATUS_ABORTED)
fab82cb3
DW
172#define fw_state_is_done(fw_st) \
173 __fw_state_check(fw_st, FW_STATUS_DONE)
f52cc379
DW
174#define fw_state_is_loading(fw_st) \
175 __fw_state_check(fw_st, FW_STATUS_LOADING)
f52cc379
DW
176#define fw_state_wait_timeout(fw_st, timeout) \
177 __fw_state_wait_common(fw_st, timeout)
178
179#endif /* CONFIG_FW_LOADER_USER_HELPER */
180
14c4bae7
TI
181/* firmware behavior options */
182#define FW_OPT_UEVENT (1U << 0)
183#define FW_OPT_NOWAIT (1U << 1)
68aeeaaa 184#ifdef CONFIG_FW_LOADER_USER_HELPER
5a1379e8 185#define FW_OPT_USERHELPER (1U << 2)
68aeeaaa 186#else
5a1379e8
TI
187#define FW_OPT_USERHELPER 0
188#endif
189#ifdef CONFIG_FW_LOADER_USER_HELPER_FALLBACK
190#define FW_OPT_FALLBACK FW_OPT_USERHELPER
191#else
192#define FW_OPT_FALLBACK 0
68aeeaaa 193#endif
c868edf4 194#define FW_OPT_NO_WARN (1U << 3)
0e742e92 195#define FW_OPT_NOCACHE (1U << 4)
14c4bae7 196
1f2b7959
ML
197struct firmware_cache {
198 /* firmware_buf instance will be added into the below list */
199 spinlock_t lock;
200 struct list_head head;
cfe016b1 201 int state;
37276a51 202
cfe016b1 203#ifdef CONFIG_PM_SLEEP
37276a51
ML
204 /*
205 * Names of firmware images which have been cached successfully
206 * will be added into the below list so that device uncache
207 * helper can trace which firmware images have been cached
208 * before.
209 */
210 spinlock_t name_lock;
211 struct list_head fw_names;
212
37276a51 213 struct delayed_work work;
07646d9c
ML
214
215 struct notifier_block pm_notify;
cfe016b1 216#endif
1f2b7959 217};
1da177e4 218
1244691c 219struct firmware_buf {
1f2b7959
ML
220 struct kref ref;
221 struct list_head list;
1f2b7959 222 struct firmware_cache *fwc;
f52cc379 223 struct fw_state fw_st;
65710cb6
ML
224 void *data;
225 size_t size;
a098ecd2 226 size_t allocated_size;
7b1269f7 227#ifdef CONFIG_FW_LOADER_USER_HELPER
cd7239fa 228 bool is_paged_buf;
af5bc11e 229 bool need_uevent;
6e03a201
DW
230 struct page **pages;
231 int nr_pages;
232 int page_array_size;
fe304143 233 struct list_head pending_list;
7b1269f7 234#endif
e0fd9b1d 235 const char *fw_id;
1244691c
ML
236};
237
37276a51
ML
238struct fw_cache_entry {
239 struct list_head list;
e0fd9b1d 240 const char *name;
37276a51
ML
241};
242
f531f05a
ML
243struct fw_name_devm {
244 unsigned long magic;
e0fd9b1d 245 const char *name;
f531f05a
ML
246};
247
1f2b7959
ML
248#define to_fwbuf(d) container_of(d, struct firmware_buf, ref)
249
ac39b3ea
ML
250#define FW_LOADER_NO_CACHE 0
251#define FW_LOADER_START_CACHE 1
252
253static int fw_cache_piggyback_on_request(const char *name);
254
1f2b7959
ML
255/* fw_lock could be moved to 'struct firmware_priv' but since it is just
256 * guarding for corner cases a global lock should be OK */
257static DEFINE_MUTEX(fw_lock);
258
81f95076
LR
259static bool __enable_firmware = false;
260
261static void enable_firmware(void)
262{
263 mutex_lock(&fw_lock);
264 __enable_firmware = true;
265 mutex_unlock(&fw_lock);
266}
267
268static void disable_firmware(void)
269{
270 mutex_lock(&fw_lock);
271 __enable_firmware = false;
272 mutex_unlock(&fw_lock);
273}
274
275/*
276 * When disabled only the built-in firmware and the firmware cache will be
277 * used to look for firmware.
278 */
279static bool firmware_enabled(void)
280{
281 bool enabled = false;
282
283 mutex_lock(&fw_lock);
284 if (__enable_firmware)
285 enabled = true;
286 mutex_unlock(&fw_lock);
287
288 return enabled;
289}
290
1f2b7959
ML
291static struct firmware_cache fw_cache;
292
293static struct firmware_buf *__allocate_fw_buf(const char *fw_name,
a098ecd2
SB
294 struct firmware_cache *fwc,
295 void *dbuf, size_t size)
1f2b7959
ML
296{
297 struct firmware_buf *buf;
298
e0fd9b1d 299 buf = kzalloc(sizeof(*buf), GFP_ATOMIC);
1f2b7959 300 if (!buf)
e0fd9b1d
LR
301 return NULL;
302
303 buf->fw_id = kstrdup_const(fw_name, GFP_ATOMIC);
304 if (!buf->fw_id) {
305 kfree(buf);
306 return NULL;
307 }
1f2b7959
ML
308
309 kref_init(&buf->ref);
1f2b7959 310 buf->fwc = fwc;
a098ecd2
SB
311 buf->data = dbuf;
312 buf->allocated_size = size;
f52cc379 313 fw_state_init(&buf->fw_st);
fe304143
TI
314#ifdef CONFIG_FW_LOADER_USER_HELPER
315 INIT_LIST_HEAD(&buf->pending_list);
316#endif
1f2b7959
ML
317
318 pr_debug("%s: fw-%s buf=%p\n", __func__, fw_name, buf);
319
320 return buf;
321}
322
2887b395
ML
323static struct firmware_buf *__fw_lookup_buf(const char *fw_name)
324{
325 struct firmware_buf *tmp;
326 struct firmware_cache *fwc = &fw_cache;
327
328 list_for_each_entry(tmp, &fwc->head, list)
329 if (!strcmp(tmp->fw_id, fw_name))
330 return tmp;
331 return NULL;
332}
333
1f2b7959
ML
334static int fw_lookup_and_allocate_buf(const char *fw_name,
335 struct firmware_cache *fwc,
a098ecd2
SB
336 struct firmware_buf **buf, void *dbuf,
337 size_t size)
1f2b7959
ML
338{
339 struct firmware_buf *tmp;
340
341 spin_lock(&fwc->lock);
2887b395
ML
342 tmp = __fw_lookup_buf(fw_name);
343 if (tmp) {
344 kref_get(&tmp->ref);
345 spin_unlock(&fwc->lock);
346 *buf = tmp;
347 return 1;
348 }
a098ecd2 349 tmp = __allocate_fw_buf(fw_name, fwc, dbuf, size);
1f2b7959
ML
350 if (tmp)
351 list_add(&tmp->list, &fwc->head);
352 spin_unlock(&fwc->lock);
353
354 *buf = tmp;
355
356 return tmp ? 0 : -ENOMEM;
357}
358
359static void __fw_free_buf(struct kref *ref)
98233b21 360 __releases(&fwc->lock)
1f2b7959
ML
361{
362 struct firmware_buf *buf = to_fwbuf(ref);
363 struct firmware_cache *fwc = buf->fwc;
1f2b7959
ML
364
365 pr_debug("%s: fw-%s buf=%p data=%p size=%u\n",
366 __func__, buf->fw_id, buf, buf->data,
367 (unsigned int)buf->size);
368
1f2b7959
ML
369 list_del(&buf->list);
370 spin_unlock(&fwc->lock);
371
7b1269f7 372#ifdef CONFIG_FW_LOADER_USER_HELPER
cd7239fa 373 if (buf->is_paged_buf) {
7b1269f7 374 int i;
746058f4
ML
375 vunmap(buf->data);
376 for (i = 0; i < buf->nr_pages; i++)
377 __free_page(buf->pages[i]);
10a3fbf1 378 vfree(buf->pages);
746058f4 379 } else
7b1269f7 380#endif
a098ecd2 381 if (!buf->allocated_size)
746058f4 382 vfree(buf->data);
e0fd9b1d 383 kfree_const(buf->fw_id);
1f2b7959
ML
384 kfree(buf);
385}
386
387static void fw_free_buf(struct firmware_buf *buf)
388{
bd9eb7fb
CL
389 struct firmware_cache *fwc = buf->fwc;
390 spin_lock(&fwc->lock);
391 if (!kref_put(&buf->ref, __fw_free_buf))
392 spin_unlock(&fwc->lock);
1f2b7959
ML
393}
394
746058f4 395/* direct firmware loading support */
27602842
ML
396static char fw_path_para[256];
397static const char * const fw_path[] = {
398 fw_path_para,
746058f4
ML
399 "/lib/firmware/updates/" UTS_RELEASE,
400 "/lib/firmware/updates",
401 "/lib/firmware/" UTS_RELEASE,
402 "/lib/firmware"
403};
404
27602842
ML
405/*
406 * Typical usage is that passing 'firmware_class.path=$CUSTOMIZED_PATH'
407 * from kernel command line because firmware_class is generally built in
408 * kernel instead of module.
409 */
410module_param_string(path, fw_path_para, sizeof(fw_path_para), 0644);
411MODULE_PARM_DESC(path, "customized firmware image search path with a higher priority than default path");
412
a098ecd2
SB
413static int
414fw_get_filesystem_firmware(struct device *device, struct firmware_buf *buf)
746058f4 415{
e40ba6d5 416 loff_t size;
1ba4de17 417 int i, len;
3e358ac2 418 int rc = -ENOENT;
f5727b05 419 char *path;
a098ecd2
SB
420 enum kernel_read_file_id id = READING_FIRMWARE;
421 size_t msize = INT_MAX;
422
423 /* Already populated data member means we're loading into a buffer */
424 if (buf->data) {
425 id = READING_FIRMWARE_PREALLOC_BUFFER;
426 msize = buf->allocated_size;
427 }
f5727b05
LR
428
429 path = __getname();
430 if (!path)
431 return -ENOMEM;
746058f4
ML
432
433 for (i = 0; i < ARRAY_SIZE(fw_path); i++) {
27602842
ML
434 /* skip the unset customized path */
435 if (!fw_path[i][0])
436 continue;
437
1ba4de17
LR
438 len = snprintf(path, PATH_MAX, "%s/%s",
439 fw_path[i], buf->fw_id);
440 if (len >= PATH_MAX) {
441 rc = -ENAMETOOLONG;
442 break;
443 }
746058f4 444
e40ba6d5 445 buf->size = 0;
a098ecd2
SB
446 rc = kernel_read_file_from_path(path, &buf->data, &size, msize,
447 id);
4b2530d8 448 if (rc) {
8e516aa5
LR
449 if (rc == -ENOENT)
450 dev_dbg(device, "loading %s failed with error %d\n",
451 path, rc);
452 else
453 dev_warn(device, "loading %s failed with error %d\n",
454 path, rc);
4b2530d8
KC
455 continue;
456 }
e40ba6d5
MZ
457 dev_dbg(device, "direct-loading %s\n", buf->fw_id);
458 buf->size = size;
5b029624 459 fw_state_done(&buf->fw_st);
4b2530d8 460 break;
4e0c92d0 461 }
4b2530d8 462 __putname(path);
4e0c92d0 463
3e358ac2 464 return rc;
746058f4
ML
465}
466
7b1269f7
TI
467/* firmware holds the ownership of pages */
468static void firmware_free_data(const struct firmware *fw)
469{
470 /* Loaded directly? */
471 if (!fw->priv) {
472 vfree(fw->data);
473 return;
474 }
475 fw_free_buf(fw->priv);
476}
477
cd7239fa
TI
478/* store the pages buffer info firmware from buf */
479static void fw_set_page_data(struct firmware_buf *buf, struct firmware *fw)
480{
481 fw->priv = buf;
482#ifdef CONFIG_FW_LOADER_USER_HELPER
483 fw->pages = buf->pages;
484#endif
485 fw->size = buf->size;
486 fw->data = buf->data;
487
488 pr_debug("%s: fw-%s buf=%p data=%p size=%u\n",
489 __func__, buf->fw_id, buf, buf->data,
490 (unsigned int)buf->size);
491}
492
493#ifdef CONFIG_PM_SLEEP
494static void fw_name_devm_release(struct device *dev, void *res)
495{
496 struct fw_name_devm *fwn = res;
497
498 if (fwn->magic == (unsigned long)&fw_cache)
499 pr_debug("%s: fw_name-%s devm-%p released\n",
500 __func__, fwn->name, res);
e0fd9b1d 501 kfree_const(fwn->name);
cd7239fa
TI
502}
503
504static int fw_devm_match(struct device *dev, void *res,
505 void *match_data)
506{
507 struct fw_name_devm *fwn = res;
508
509 return (fwn->magic == (unsigned long)&fw_cache) &&
510 !strcmp(fwn->name, match_data);
511}
512
513static struct fw_name_devm *fw_find_devm_name(struct device *dev,
514 const char *name)
515{
516 struct fw_name_devm *fwn;
517
518 fwn = devres_find(dev, fw_name_devm_release,
519 fw_devm_match, (void *)name);
520 return fwn;
521}
522
523/* add firmware name into devres list */
524static int fw_add_devm_name(struct device *dev, const char *name)
525{
526 struct fw_name_devm *fwn;
527
528 fwn = fw_find_devm_name(dev, name);
529 if (fwn)
530 return 1;
531
e0fd9b1d
LR
532 fwn = devres_alloc(fw_name_devm_release, sizeof(struct fw_name_devm),
533 GFP_KERNEL);
cd7239fa
TI
534 if (!fwn)
535 return -ENOMEM;
e0fd9b1d
LR
536 fwn->name = kstrdup_const(name, GFP_KERNEL);
537 if (!fwn->name) {
a885de67 538 devres_free(fwn);
e0fd9b1d
LR
539 return -ENOMEM;
540 }
cd7239fa
TI
541
542 fwn->magic = (unsigned long)&fw_cache;
cd7239fa
TI
543 devres_add(dev, fwn);
544
545 return 0;
546}
547#else
548static int fw_add_devm_name(struct device *dev, const char *name)
549{
550 return 0;
551}
552#endif
553
8509adca
LR
554static int assign_firmware_buf(struct firmware *fw, struct device *device,
555 unsigned int opt_flags)
556{
557 struct firmware_buf *buf = fw->priv;
558
559 mutex_lock(&fw_lock);
560 if (!buf->size || fw_state_is_aborted(&buf->fw_st)) {
561 mutex_unlock(&fw_lock);
562 return -ENOENT;
563 }
564
565 /*
566 * add firmware name into devres list so that we can auto cache
567 * and uncache firmware for device.
568 *
569 * device may has been deleted already, but the problem
570 * should be fixed in devres or driver core.
571 */
572 /* don't cache firmware handled without uevent */
573 if (device && (opt_flags & FW_OPT_UEVENT) &&
574 !(opt_flags & FW_OPT_NOCACHE))
575 fw_add_devm_name(device, buf->fw_id);
576
577 /*
578 * After caching firmware image is started, let it piggyback
579 * on request firmware.
580 */
581 if (!(opt_flags & FW_OPT_NOCACHE) &&
582 buf->fwc->state == FW_LOADER_START_CACHE) {
583 if (fw_cache_piggyback_on_request(buf->fw_id))
584 kref_get(&buf->ref);
585 }
586
587 /* pass the pages buffer to driver at the last minute */
588 fw_set_page_data(buf, fw);
589 mutex_unlock(&fw_lock);
590 return 0;
591}
cd7239fa
TI
592
593/*
594 * user-mode helper code
595 */
7b1269f7 596#ifdef CONFIG_FW_LOADER_USER_HELPER
cd7239fa 597struct firmware_priv {
cd7239fa
TI
598 bool nowait;
599 struct device dev;
600 struct firmware_buf *buf;
601 struct firmware *fw;
602};
7b1269f7 603
f8a4bd34
DT
604static struct firmware_priv *to_firmware_priv(struct device *dev)
605{
606 return container_of(dev, struct firmware_priv, dev);
607}
608
7068cb07 609static void __fw_load_abort(struct firmware_buf *buf)
1da177e4 610{
87597936
ML
611 /*
612 * There is a small window in which user can write to 'loading'
613 * between loading done and disappearance of 'loading'
614 */
f52cc379 615 if (fw_state_is_done(&buf->fw_st))
87597936
ML
616 return;
617
fe304143 618 list_del_init(&buf->pending_list);
f52cc379 619 fw_state_aborted(&buf->fw_st);
1da177e4
LT
620}
621
7068cb07
GKH
622static void fw_load_abort(struct firmware_priv *fw_priv)
623{
624 struct firmware_buf *buf = fw_priv->buf;
625
626 __fw_load_abort(buf);
1da177e4
LT
627}
628
fe304143
TI
629static LIST_HEAD(pending_fw_head);
630
c4b76893 631static void kill_pending_fw_fallback_reqs(bool only_kill_custom)
6383331d
LR
632{
633 struct firmware_buf *buf;
634 struct firmware_buf *next;
635
636 mutex_lock(&fw_lock);
637 list_for_each_entry_safe(buf, next, &pending_fw_head, pending_list) {
c4b76893 638 if (!buf->need_uevent || !only_kill_custom)
6383331d
LR
639 __fw_load_abort(buf);
640 }
641 mutex_unlock(&fw_lock);
642}
6383331d 643
14adbe53
GKH
644static ssize_t timeout_show(struct class *class, struct class_attribute *attr,
645 char *buf)
1da177e4
LT
646{
647 return sprintf(buf, "%d\n", loading_timeout);
648}
649
650/**
eb8e3179
RD
651 * firmware_timeout_store - set number of seconds to wait for firmware
652 * @class: device class pointer
e59817bf 653 * @attr: device attribute pointer
eb8e3179
RD
654 * @buf: buffer to scan for timeout value
655 * @count: number of bytes in @buf
656 *
1da177e4 657 * Sets the number of seconds to wait for the firmware. Once
eb8e3179 658 * this expires an error will be returned to the driver and no
1da177e4
LT
659 * firmware will be provided.
660 *
eb8e3179 661 * Note: zero means 'wait forever'.
1da177e4 662 **/
14adbe53
GKH
663static ssize_t timeout_store(struct class *class, struct class_attribute *attr,
664 const char *buf, size_t count)
1da177e4
LT
665{
666 loading_timeout = simple_strtol(buf, NULL, 10);
b92eac01
SG
667 if (loading_timeout < 0)
668 loading_timeout = 0;
f8a4bd34 669
1da177e4
LT
670 return count;
671}
3f214cff 672static CLASS_ATTR_RW(timeout);
1da177e4 673
3f214cff
GKH
674static struct attribute *firmware_class_attrs[] = {
675 &class_attr_timeout.attr,
676 NULL,
673fae90 677};
3f214cff 678ATTRIBUTE_GROUPS(firmware_class);
1da177e4 679
1244691c
ML
680static void fw_dev_release(struct device *dev)
681{
682 struct firmware_priv *fw_priv = to_firmware_priv(dev);
65710cb6 683
673fae90 684 kfree(fw_priv);
673fae90 685}
1da177e4 686
6f957724 687static int do_firmware_uevent(struct firmware_priv *fw_priv, struct kobj_uevent_env *env)
1da177e4 688{
1244691c 689 if (add_uevent_var(env, "FIRMWARE=%s", fw_priv->buf->fw_id))
1da177e4 690 return -ENOMEM;
7eff2e7a 691 if (add_uevent_var(env, "TIMEOUT=%i", loading_timeout))
6897089c 692 return -ENOMEM;
e9045f91
JB
693 if (add_uevent_var(env, "ASYNC=%d", fw_priv->nowait))
694 return -ENOMEM;
1da177e4
LT
695
696 return 0;
697}
698
6f957724
LT
699static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env)
700{
701 struct firmware_priv *fw_priv = to_firmware_priv(dev);
702 int err = 0;
703
704 mutex_lock(&fw_lock);
705 if (fw_priv->buf)
706 err = do_firmware_uevent(fw_priv, env);
707 mutex_unlock(&fw_lock);
708 return err;
709}
710
1b81d663
AB
711static struct class firmware_class = {
712 .name = "firmware",
3f214cff 713 .class_groups = firmware_class_groups,
e55c8790
GKH
714 .dev_uevent = firmware_uevent,
715 .dev_release = fw_dev_release,
1b81d663
AB
716};
717
e55c8790
GKH
718static ssize_t firmware_loading_show(struct device *dev,
719 struct device_attribute *attr, char *buf)
1da177e4 720{
f8a4bd34 721 struct firmware_priv *fw_priv = to_firmware_priv(dev);
87597936
ML
722 int loading = 0;
723
724 mutex_lock(&fw_lock);
725 if (fw_priv->buf)
f52cc379 726 loading = fw_state_is_loading(&fw_priv->buf->fw_st);
87597936 727 mutex_unlock(&fw_lock);
f8a4bd34 728
1da177e4
LT
729 return sprintf(buf, "%d\n", loading);
730}
731
6e03a201
DW
732/* Some architectures don't have PAGE_KERNEL_RO */
733#ifndef PAGE_KERNEL_RO
734#define PAGE_KERNEL_RO PAGE_KERNEL
735#endif
253c9240
ML
736
737/* one pages buffer should be mapped/unmapped only once */
738static int fw_map_pages_buf(struct firmware_buf *buf)
739{
cd7239fa 740 if (!buf->is_paged_buf)
746058f4
ML
741 return 0;
742
daa3d67f 743 vunmap(buf->data);
253c9240
ML
744 buf->data = vmap(buf->pages, buf->nr_pages, 0, PAGE_KERNEL_RO);
745 if (!buf->data)
746 return -ENOMEM;
747 return 0;
748}
749
1da177e4 750/**
eb8e3179 751 * firmware_loading_store - set value in the 'loading' control file
e55c8790 752 * @dev: device pointer
af9997e4 753 * @attr: device attribute pointer
eb8e3179
RD
754 * @buf: buffer to scan for loading control value
755 * @count: number of bytes in @buf
756 *
1da177e4
LT
757 * The relevant values are:
758 *
759 * 1: Start a load, discarding any previous partial load.
eb8e3179 760 * 0: Conclude the load and hand the data to the driver code.
1da177e4
LT
761 * -1: Conclude the load with an error and discard any written data.
762 **/
e55c8790
GKH
763static ssize_t firmware_loading_store(struct device *dev,
764 struct device_attribute *attr,
765 const char *buf, size_t count)
1da177e4 766{
f8a4bd34 767 struct firmware_priv *fw_priv = to_firmware_priv(dev);
87597936 768 struct firmware_buf *fw_buf;
6593d924 769 ssize_t written = count;
1da177e4 770 int loading = simple_strtol(buf, NULL, 10);
6e03a201 771 int i;
1da177e4 772
eea915bb 773 mutex_lock(&fw_lock);
87597936 774 fw_buf = fw_priv->buf;
191e885a 775 if (fw_state_is_aborted(&fw_buf->fw_st))
eea915bb
NH
776 goto out;
777
1da177e4
LT
778 switch (loading) {
779 case 1:
65710cb6 780 /* discarding any previous partial load */
f52cc379 781 if (!fw_state_is_done(&fw_buf->fw_st)) {
1244691c
ML
782 for (i = 0; i < fw_buf->nr_pages; i++)
783 __free_page(fw_buf->pages[i]);
10a3fbf1 784 vfree(fw_buf->pages);
1244691c
ML
785 fw_buf->pages = NULL;
786 fw_buf->page_array_size = 0;
787 fw_buf->nr_pages = 0;
f52cc379 788 fw_state_start(&fw_buf->fw_st);
28eefa75 789 }
1da177e4
LT
790 break;
791 case 0:
f52cc379 792 if (fw_state_is_loading(&fw_buf->fw_st)) {
6593d924
KC
793 int rc;
794
253c9240
ML
795 /*
796 * Several loading requests may be pending on
797 * one same firmware buf, so let all requests
798 * see the mapped 'buf->data' once the loading
799 * is completed.
800 * */
6593d924
KC
801 rc = fw_map_pages_buf(fw_buf);
802 if (rc)
2b1278cb 803 dev_err(dev, "%s: map pages failed\n",
804 __func__);
6593d924 805 else
e40ba6d5
MZ
806 rc = security_kernel_post_read_file(NULL,
807 fw_buf->data, fw_buf->size,
808 READING_FIRMWARE);
6593d924
KC
809
810 /*
811 * Same logic as fw_load_abort, only the DONE bit
812 * is ignored and we set ABORT only on failure.
813 */
fe304143 814 list_del_init(&fw_buf->pending_list);
6593d924 815 if (rc) {
f52cc379 816 fw_state_aborted(&fw_buf->fw_st);
6593d924 817 written = rc;
f52cc379
DW
818 } else {
819 fw_state_done(&fw_buf->fw_st);
6593d924 820 }
1da177e4
LT
821 break;
822 }
823 /* fallthrough */
824 default:
266a813c 825 dev_err(dev, "%s: unexpected value (%d)\n", __func__, loading);
1da177e4
LT
826 /* fallthrough */
827 case -1:
828 fw_load_abort(fw_priv);
829 break;
830 }
eea915bb
NH
831out:
832 mutex_unlock(&fw_lock);
6593d924 833 return written;
1da177e4
LT
834}
835
e55c8790 836static DEVICE_ATTR(loading, 0644, firmware_loading_show, firmware_loading_store);
1da177e4 837
a098ecd2
SB
838static void firmware_rw_buf(struct firmware_buf *buf, char *buffer,
839 loff_t offset, size_t count, bool read)
840{
841 if (read)
842 memcpy(buffer, buf->data + offset, count);
843 else
844 memcpy(buf->data + offset, buffer, count);
845}
846
9ccf9811
SB
847static void firmware_rw(struct firmware_buf *buf, char *buffer,
848 loff_t offset, size_t count, bool read)
849{
850 while (count) {
851 void *page_data;
852 int page_nr = offset >> PAGE_SHIFT;
853 int page_ofs = offset & (PAGE_SIZE-1);
854 int page_cnt = min_t(size_t, PAGE_SIZE - page_ofs, count);
855
856 page_data = kmap(buf->pages[page_nr]);
857
858 if (read)
859 memcpy(buffer, page_data + page_ofs, page_cnt);
860 else
861 memcpy(page_data + page_ofs, buffer, page_cnt);
862
863 kunmap(buf->pages[page_nr]);
864 buffer += page_cnt;
865 offset += page_cnt;
866 count -= page_cnt;
867 }
868}
869
f8a4bd34
DT
870static ssize_t firmware_data_read(struct file *filp, struct kobject *kobj,
871 struct bin_attribute *bin_attr,
872 char *buffer, loff_t offset, size_t count)
1da177e4 873{
b0d1f807 874 struct device *dev = kobj_to_dev(kobj);
f8a4bd34 875 struct firmware_priv *fw_priv = to_firmware_priv(dev);
1244691c 876 struct firmware_buf *buf;
f37e6617 877 ssize_t ret_count;
1da177e4 878
cad1e55d 879 mutex_lock(&fw_lock);
1244691c 880 buf = fw_priv->buf;
f52cc379 881 if (!buf || fw_state_is_done(&buf->fw_st)) {
1da177e4
LT
882 ret_count = -ENODEV;
883 goto out;
884 }
1244691c 885 if (offset > buf->size) {
308975fa
JS
886 ret_count = 0;
887 goto out;
888 }
1244691c
ML
889 if (count > buf->size - offset)
890 count = buf->size - offset;
6e03a201
DW
891
892 ret_count = count;
893
a098ecd2
SB
894 if (buf->data)
895 firmware_rw_buf(buf, buffer, offset, count, true);
896 else
897 firmware_rw(buf, buffer, offset, count, true);
6e03a201 898
1da177e4 899out:
cad1e55d 900 mutex_unlock(&fw_lock);
1da177e4
LT
901 return ret_count;
902}
eb8e3179 903
f8a4bd34 904static int fw_realloc_buffer(struct firmware_priv *fw_priv, int min_size)
1da177e4 905{
1244691c 906 struct firmware_buf *buf = fw_priv->buf;
a76040d8 907 int pages_needed = PAGE_ALIGN(min_size) >> PAGE_SHIFT;
6e03a201
DW
908
909 /* If the array of pages is too small, grow it... */
1244691c 910 if (buf->page_array_size < pages_needed) {
6e03a201 911 int new_array_size = max(pages_needed,
1244691c 912 buf->page_array_size * 2);
6e03a201
DW
913 struct page **new_pages;
914
10a3fbf1 915 new_pages = vmalloc(new_array_size * sizeof(void *));
6e03a201
DW
916 if (!new_pages) {
917 fw_load_abort(fw_priv);
918 return -ENOMEM;
919 }
1244691c
ML
920 memcpy(new_pages, buf->pages,
921 buf->page_array_size * sizeof(void *));
922 memset(&new_pages[buf->page_array_size], 0, sizeof(void *) *
923 (new_array_size - buf->page_array_size));
10a3fbf1 924 vfree(buf->pages);
1244691c
ML
925 buf->pages = new_pages;
926 buf->page_array_size = new_array_size;
6e03a201 927 }
1da177e4 928
1244691c
ML
929 while (buf->nr_pages < pages_needed) {
930 buf->pages[buf->nr_pages] =
6e03a201 931 alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
1da177e4 932
1244691c 933 if (!buf->pages[buf->nr_pages]) {
6e03a201
DW
934 fw_load_abort(fw_priv);
935 return -ENOMEM;
936 }
1244691c 937 buf->nr_pages++;
1da177e4 938 }
1da177e4
LT
939 return 0;
940}
941
942/**
eb8e3179 943 * firmware_data_write - write method for firmware
2c3c8bea 944 * @filp: open sysfs file
e55c8790 945 * @kobj: kobject for the device
42e61f4a 946 * @bin_attr: bin_attr structure
eb8e3179
RD
947 * @buffer: buffer being written
948 * @offset: buffer offset for write in total data store area
949 * @count: buffer size
1da177e4 950 *
eb8e3179 951 * Data written to the 'data' attribute will be later handed to
1da177e4
LT
952 * the driver as a firmware image.
953 **/
f8a4bd34
DT
954static ssize_t firmware_data_write(struct file *filp, struct kobject *kobj,
955 struct bin_attribute *bin_attr,
956 char *buffer, loff_t offset, size_t count)
1da177e4 957{
b0d1f807 958 struct device *dev = kobj_to_dev(kobj);
f8a4bd34 959 struct firmware_priv *fw_priv = to_firmware_priv(dev);
1244691c 960 struct firmware_buf *buf;
1da177e4
LT
961 ssize_t retval;
962
963 if (!capable(CAP_SYS_RAWIO))
964 return -EPERM;
b92eac01 965
cad1e55d 966 mutex_lock(&fw_lock);
1244691c 967 buf = fw_priv->buf;
f52cc379 968 if (!buf || fw_state_is_done(&buf->fw_st)) {
1da177e4
LT
969 retval = -ENODEV;
970 goto out;
971 }
65710cb6 972
a098ecd2
SB
973 if (buf->data) {
974 if (offset + count > buf->allocated_size) {
975 retval = -ENOMEM;
976 goto out;
977 }
978 firmware_rw_buf(buf, buffer, offset, count, false);
979 retval = count;
980 } else {
981 retval = fw_realloc_buffer(fw_priv, offset + count);
982 if (retval)
983 goto out;
1da177e4 984
a098ecd2
SB
985 retval = count;
986 firmware_rw(buf, buffer, offset, count, false);
987 }
6e03a201 988
9ccf9811 989 buf->size = max_t(size_t, offset + count, buf->size);
1da177e4 990out:
cad1e55d 991 mutex_unlock(&fw_lock);
1da177e4
LT
992 return retval;
993}
eb8e3179 994
0983ca2d
DT
995static struct bin_attribute firmware_attr_data = {
996 .attr = { .name = "data", .mode = 0644 },
1da177e4
LT
997 .size = 0,
998 .read = firmware_data_read,
999 .write = firmware_data_write,
1000};
1001
46239902
TI
1002static struct attribute *fw_dev_attrs[] = {
1003 &dev_attr_loading.attr,
1004 NULL
1005};
1006
1007static struct bin_attribute *fw_dev_bin_attrs[] = {
1008 &firmware_attr_data,
1009 NULL
1010};
1011
1012static const struct attribute_group fw_dev_attr_group = {
1013 .attrs = fw_dev_attrs,
1014 .bin_attrs = fw_dev_bin_attrs,
1015};
1016
1017static const struct attribute_group *fw_dev_attr_groups[] = {
1018 &fw_dev_attr_group,
1019 NULL
1020};
1021
f8a4bd34 1022static struct firmware_priv *
dddb5549 1023fw_create_instance(struct firmware *firmware, const char *fw_name,
14c4bae7 1024 struct device *device, unsigned int opt_flags)
1da177e4 1025{
f8a4bd34
DT
1026 struct firmware_priv *fw_priv;
1027 struct device *f_dev;
1da177e4 1028
1244691c 1029 fw_priv = kzalloc(sizeof(*fw_priv), GFP_KERNEL);
f8a4bd34 1030 if (!fw_priv) {
1244691c
ML
1031 fw_priv = ERR_PTR(-ENOMEM);
1032 goto exit;
1033 }
1034
14c4bae7 1035 fw_priv->nowait = !!(opt_flags & FW_OPT_NOWAIT);
1f2b7959 1036 fw_priv->fw = firmware;
f8a4bd34
DT
1037 f_dev = &fw_priv->dev;
1038
1039 device_initialize(f_dev);
99c2aa72 1040 dev_set_name(f_dev, "%s", fw_name);
e55c8790
GKH
1041 f_dev->parent = device;
1042 f_dev->class = &firmware_class;
46239902 1043 f_dev->groups = fw_dev_attr_groups;
1244691c 1044exit:
f8a4bd34 1045 return fw_priv;
1da177e4
LT
1046}
1047
cd7239fa 1048/* load a firmware via user helper */
14c4bae7
TI
1049static int _request_firmware_load(struct firmware_priv *fw_priv,
1050 unsigned int opt_flags, long timeout)
1f2b7959 1051{
cd7239fa
TI
1052 int retval = 0;
1053 struct device *f_dev = &fw_priv->dev;
1054 struct firmware_buf *buf = fw_priv->buf;
1f2b7959 1055
cd7239fa 1056 /* fall back on userspace loading */
a098ecd2
SB
1057 if (!buf->data)
1058 buf->is_paged_buf = true;
1f2b7959 1059
cd7239fa 1060 dev_set_uevent_suppress(f_dev, true);
f531f05a 1061
cd7239fa
TI
1062 retval = device_add(f_dev);
1063 if (retval) {
1064 dev_err(f_dev, "%s: device_register failed\n", __func__);
1065 goto err_put_dev;
1066 }
f531f05a 1067
1eeeef15
MB
1068 mutex_lock(&fw_lock);
1069 list_add(&buf->pending_list, &pending_fw_head);
1070 mutex_unlock(&fw_lock);
1071
14c4bae7 1072 if (opt_flags & FW_OPT_UEVENT) {
af5bc11e 1073 buf->need_uevent = true;
cd7239fa
TI
1074 dev_set_uevent_suppress(f_dev, false);
1075 dev_dbg(f_dev, "firmware: requesting %s\n", buf->fw_id);
cd7239fa 1076 kobject_uevent(&fw_priv->dev.kobj, KOBJ_ADD);
68ff2a00
ML
1077 } else {
1078 timeout = MAX_JIFFY_OFFSET;
cd7239fa 1079 }
f531f05a 1080
5d47ec02
BA
1081 retval = fw_state_wait_timeout(&buf->fw_st, timeout);
1082 if (retval < 0) {
0cb64249
ML
1083 mutex_lock(&fw_lock);
1084 fw_load_abort(fw_priv);
1085 mutex_unlock(&fw_lock);
1086 }
f531f05a 1087
f52cc379 1088 if (fw_state_is_aborted(&buf->fw_st))
0542ad88 1089 retval = -EAGAIN;
a098ecd2 1090 else if (buf->is_paged_buf && !buf->data)
2b1278cb 1091 retval = -ENOMEM;
f531f05a 1092
cd7239fa
TI
1093 device_del(f_dev);
1094err_put_dev:
1095 put_device(f_dev);
1096 return retval;
f531f05a 1097}
cd7239fa
TI
1098
1099static int fw_load_from_user_helper(struct firmware *firmware,
1100 const char *name, struct device *device,
06a45a93 1101 unsigned int opt_flags)
cfe016b1 1102{
cd7239fa 1103 struct firmware_priv *fw_priv;
06a45a93
LR
1104 long timeout;
1105 int ret;
1106
1107 timeout = firmware_loading_timeout();
1108 if (opt_flags & FW_OPT_NOWAIT) {
1109 timeout = usermodehelper_read_lock_wait(timeout);
1110 if (!timeout) {
1111 dev_dbg(device, "firmware: %s loading timed out\n",
1112 name);
1113 return -EBUSY;
1114 }
1115 } else {
1116 ret = usermodehelper_read_trylock();
1117 if (WARN_ON(ret)) {
1118 dev_err(device, "firmware: %s will not be loaded\n",
1119 name);
1120 return ret;
1121 }
1122 }
cd7239fa 1123
14c4bae7 1124 fw_priv = fw_create_instance(firmware, name, device, opt_flags);
06a45a93
LR
1125 if (IS_ERR(fw_priv)) {
1126 ret = PTR_ERR(fw_priv);
1127 goto out_unlock;
1128 }
cd7239fa
TI
1129
1130 fw_priv->buf = firmware->priv;
06a45a93
LR
1131 ret = _request_firmware_load(fw_priv, opt_flags, timeout);
1132
1133 if (!ret)
1134 ret = assign_firmware_buf(firmware, device, opt_flags);
1135
1136out_unlock:
1137 usermodehelper_read_unlock();
1138
1139 return ret;
cfe016b1 1140}
ddf1f064 1141
cd7239fa
TI
1142#else /* CONFIG_FW_LOADER_USER_HELPER */
1143static inline int
1144fw_load_from_user_helper(struct firmware *firmware, const char *name,
06a45a93 1145 struct device *device, unsigned int opt_flags)
cd7239fa
TI
1146{
1147 return -ENOENT;
1148}
807be03c 1149
c4b76893 1150static inline void kill_pending_fw_fallback_reqs(bool only_kill_custom) { }
ddf1f064 1151
cd7239fa
TI
1152#endif /* CONFIG_FW_LOADER_USER_HELPER */
1153
4e0c92d0
TI
1154/* prepare firmware and firmware_buf structs;
1155 * return 0 if a firmware is already assigned, 1 if need to load one,
1156 * or a negative error code
1157 */
1158static int
1159_request_firmware_prepare(struct firmware **firmware_p, const char *name,
a098ecd2 1160 struct device *device, void *dbuf, size_t size)
1da177e4 1161{
1da177e4 1162 struct firmware *firmware;
1f2b7959
ML
1163 struct firmware_buf *buf;
1164 int ret;
1da177e4 1165
4aed0644 1166 *firmware_p = firmware = kzalloc(sizeof(*firmware), GFP_KERNEL);
1da177e4 1167 if (!firmware) {
266a813c
BH
1168 dev_err(device, "%s: kmalloc(struct firmware) failed\n",
1169 __func__);
4e0c92d0 1170 return -ENOMEM;
1da177e4 1171 }
1da177e4 1172
a098ecd2 1173 if (fw_get_builtin_firmware(firmware, name, dbuf, size)) {
ed04630b 1174 dev_dbg(device, "using built-in %s\n", name);
4e0c92d0 1175 return 0; /* assigned */
5658c769
DW
1176 }
1177
a098ecd2 1178 ret = fw_lookup_and_allocate_buf(name, &fw_cache, &buf, dbuf, size);
4e0c92d0
TI
1179
1180 /*
1181 * bind with 'buf' now to avoid warning in failure path
1182 * of requesting firmware.
1183 */
1184 firmware->priv = buf;
1185
1186 if (ret > 0) {
5b029624 1187 ret = fw_state_wait(&buf->fw_st);
4e0c92d0
TI
1188 if (!ret) {
1189 fw_set_page_data(buf, firmware);
1190 return 0; /* assigned */
1191 }
dddb5549 1192 }
811fa400 1193
4e0c92d0
TI
1194 if (ret < 0)
1195 return ret;
1196 return 1; /* need to load */
1197}
1198
90d41e74
LR
1199/*
1200 * Batched requests need only one wake, we need to do this step last due to the
1201 * fallback mechanism. The buf is protected with kref_get(), and it won't be
1202 * released until the last user calls release_firmware().
1203 *
1204 * Failed batched requests are possible as well, in such cases we just share
1205 * the struct firmware_buf and won't release it until all requests are woken
1206 * and have gone through this same path.
1207 */
1208static void fw_abort_batch_reqs(struct firmware *fw)
1209{
1210 struct firmware_buf *buf;
1211
1212 /* Loaded directly? */
1213 if (!fw || !fw->priv)
1214 return;
1215
1216 buf = fw->priv;
1217 if (!fw_state_is_aborted(&buf->fw_st))
1218 fw_state_aborted(&buf->fw_st);
1219}
1220
4e0c92d0
TI
1221/* called from request_firmware() and request_firmware_work_func() */
1222static int
1223_request_firmware(const struct firmware **firmware_p, const char *name,
a098ecd2
SB
1224 struct device *device, void *buf, size_t size,
1225 unsigned int opt_flags)
4e0c92d0 1226{
715780ae 1227 struct firmware *fw = NULL;
4e0c92d0
TI
1228 int ret;
1229
1230 if (!firmware_p)
1231 return -EINVAL;
1232
715780ae
BN
1233 if (!name || name[0] == '\0') {
1234 ret = -EINVAL;
1235 goto out;
1236 }
471b095d 1237
a098ecd2 1238 ret = _request_firmware_prepare(&fw, name, device, buf, size);
4e0c92d0
TI
1239 if (ret <= 0) /* error or already assigned */
1240 goto out;
1241
81f95076
LR
1242 if (!firmware_enabled()) {
1243 WARN(1, "firmware request while host is not available\n");
1244 ret = -EHOSTDOWN;
1245 goto out;
1246 }
1247
3e358ac2
NH
1248 ret = fw_get_filesystem_firmware(device, fw->priv);
1249 if (ret) {
c868edf4 1250 if (!(opt_flags & FW_OPT_NO_WARN))
bba3a87e 1251 dev_warn(device,
c868edf4
LR
1252 "Direct firmware load for %s failed with error %d\n",
1253 name, ret);
1254 if (opt_flags & FW_OPT_USERHELPER) {
bba3a87e
TI
1255 dev_warn(device, "Falling back to user helper\n");
1256 ret = fw_load_from_user_helper(fw, name, device,
06a45a93 1257 opt_flags);
bba3a87e 1258 }
06a45a93 1259 } else
14c4bae7 1260 ret = assign_firmware_buf(fw, device, opt_flags);
4e0c92d0 1261
4e0c92d0
TI
1262 out:
1263 if (ret < 0) {
90d41e74 1264 fw_abort_batch_reqs(fw);
4e0c92d0
TI
1265 release_firmware(fw);
1266 fw = NULL;
1267 }
1268
1269 *firmware_p = fw;
1270 return ret;
1271}
1272
6e3eaab0 1273/**
312c004d 1274 * request_firmware: - send firmware request and wait for it
eb8e3179
RD
1275 * @firmware_p: pointer to firmware image
1276 * @name: name of firmware file
1277 * @device: device for which firmware is being loaded
1278 *
1279 * @firmware_p will be used to return a firmware image by the name
6e3eaab0
AS
1280 * of @name for device @device.
1281 *
1282 * Should be called from user context where sleeping is allowed.
1283 *
312c004d 1284 * @name will be used as $FIRMWARE in the uevent environment and
6e3eaab0
AS
1285 * should be distinctive enough not to be confused with any other
1286 * firmware image for this or any other device.
0cfc1e1e
ML
1287 *
1288 * Caller must hold the reference count of @device.
6a927857
ML
1289 *
1290 * The function can be called safely inside device's suspend and
1291 * resume callback.
6e3eaab0
AS
1292 **/
1293int
1294request_firmware(const struct firmware **firmware_p, const char *name,
ea31003c 1295 struct device *device)
6e3eaab0 1296{
d6c8aa39
ML
1297 int ret;
1298
1299 /* Need to pin this module until return */
1300 __module_get(THIS_MODULE);
a098ecd2 1301 ret = _request_firmware(firmware_p, name, device, NULL, 0,
14c4bae7 1302 FW_OPT_UEVENT | FW_OPT_FALLBACK);
d6c8aa39
ML
1303 module_put(THIS_MODULE);
1304 return ret;
6e3eaab0 1305}
f494513f 1306EXPORT_SYMBOL(request_firmware);
6e3eaab0 1307
bba3a87e 1308/**
3c1556b2 1309 * request_firmware_direct: - load firmware directly without usermode helper
bba3a87e
TI
1310 * @firmware_p: pointer to firmware image
1311 * @name: name of firmware file
1312 * @device: device for which firmware is being loaded
1313 *
1314 * This function works pretty much like request_firmware(), but this doesn't
1315 * fall back to usermode helper even if the firmware couldn't be loaded
1316 * directly from fs. Hence it's useful for loading optional firmwares, which
1317 * aren't always present, without extra long timeouts of udev.
1318 **/
1319int request_firmware_direct(const struct firmware **firmware_p,
1320 const char *name, struct device *device)
1321{
1322 int ret;
ea31003c 1323
bba3a87e 1324 __module_get(THIS_MODULE);
a098ecd2 1325 ret = _request_firmware(firmware_p, name, device, NULL, 0,
c868edf4 1326 FW_OPT_UEVENT | FW_OPT_NO_WARN);
bba3a87e
TI
1327 module_put(THIS_MODULE);
1328 return ret;
1329}
1330EXPORT_SYMBOL_GPL(request_firmware_direct);
bba3a87e 1331
a098ecd2
SB
1332/**
1333 * request_firmware_into_buf - load firmware into a previously allocated buffer
1334 * @firmware_p: pointer to firmware image
1335 * @name: name of firmware file
1336 * @device: device for which firmware is being loaded and DMA region allocated
1337 * @buf: address of buffer to load firmware into
1338 * @size: size of buffer
1339 *
1340 * This function works pretty much like request_firmware(), but it doesn't
1341 * allocate a buffer to hold the firmware data. Instead, the firmware
1342 * is loaded directly into the buffer pointed to by @buf and the @firmware_p
1343 * data member is pointed at @buf.
1344 *
1345 * This function doesn't cache firmware either.
1346 */
1347int
1348request_firmware_into_buf(const struct firmware **firmware_p, const char *name,
1349 struct device *device, void *buf, size_t size)
1350{
1351 int ret;
1352
1353 __module_get(THIS_MODULE);
1354 ret = _request_firmware(firmware_p, name, device, buf, size,
1355 FW_OPT_UEVENT | FW_OPT_FALLBACK |
1356 FW_OPT_NOCACHE);
1357 module_put(THIS_MODULE);
1358 return ret;
1359}
1360EXPORT_SYMBOL(request_firmware_into_buf);
1361
1da177e4
LT
1362/**
1363 * release_firmware: - release the resource associated with a firmware image
eb8e3179 1364 * @fw: firmware resource to release
1da177e4 1365 **/
bcb9bd18 1366void release_firmware(const struct firmware *fw)
1da177e4
LT
1367{
1368 if (fw) {
bcb9bd18
DT
1369 if (!fw_is_builtin_firmware(fw))
1370 firmware_free_data(fw);
1da177e4
LT
1371 kfree(fw);
1372 }
1373}
f494513f 1374EXPORT_SYMBOL(release_firmware);
1da177e4 1375
1da177e4
LT
1376/* Async support */
1377struct firmware_work {
1378 struct work_struct work;
1379 struct module *module;
1380 const char *name;
1381 struct device *device;
1382 void *context;
1383 void (*cont)(const struct firmware *fw, void *context);
14c4bae7 1384 unsigned int opt_flags;
1da177e4
LT
1385};
1386
a36cf844 1387static void request_firmware_work_func(struct work_struct *work)
1da177e4 1388{
a36cf844 1389 struct firmware_work *fw_work;
1da177e4 1390 const struct firmware *fw;
f8a4bd34 1391
a36cf844 1392 fw_work = container_of(work, struct firmware_work, work);
811fa400 1393
a098ecd2 1394 _request_firmware(&fw, fw_work->name, fw_work->device, NULL, 0,
14c4bae7 1395 fw_work->opt_flags);
9ebfbd45 1396 fw_work->cont(fw, fw_work->context);
4e0c92d0 1397 put_device(fw_work->device); /* taken in request_firmware_nowait() */
9ebfbd45 1398
1da177e4 1399 module_put(fw_work->module);
f9692b26 1400 kfree_const(fw_work->name);
1da177e4 1401 kfree(fw_work);
1da177e4
LT
1402}
1403
1404/**
3c31f07a 1405 * request_firmware_nowait - asynchronous version of request_firmware
eb8e3179 1406 * @module: module requesting the firmware
312c004d 1407 * @uevent: sends uevent to copy the firmware image if this flag
eb8e3179
RD
1408 * is non-zero else the firmware copy must be done manually.
1409 * @name: name of firmware file
1410 * @device: device for which firmware is being loaded
9ebfbd45 1411 * @gfp: allocation flags
eb8e3179
RD
1412 * @context: will be passed over to @cont, and
1413 * @fw may be %NULL if firmware request fails.
1414 * @cont: function will be called asynchronously when the firmware
1415 * request is over.
1da177e4 1416 *
0cfc1e1e
ML
1417 * Caller must hold the reference count of @device.
1418 *
6f21a62a
ML
1419 * Asynchronous variant of request_firmware() for user contexts:
1420 * - sleep for as small periods as possible since it may
88bcef50
SF
1421 * increase kernel boot time of built-in device drivers
1422 * requesting firmware in their ->probe() methods, if
1423 * @gfp is GFP_KERNEL.
6f21a62a
ML
1424 *
1425 * - can't sleep at all if @gfp is GFP_ATOMIC.
1da177e4
LT
1426 **/
1427int
1428request_firmware_nowait(
072fc8f0 1429 struct module *module, bool uevent,
9ebfbd45 1430 const char *name, struct device *device, gfp_t gfp, void *context,
1da177e4
LT
1431 void (*cont)(const struct firmware *fw, void *context))
1432{
f8a4bd34 1433 struct firmware_work *fw_work;
1da177e4 1434
ea31003c 1435 fw_work = kzalloc(sizeof(struct firmware_work), gfp);
1da177e4
LT
1436 if (!fw_work)
1437 return -ENOMEM;
f8a4bd34
DT
1438
1439 fw_work->module = module;
f9692b26 1440 fw_work->name = kstrdup_const(name, gfp);
303cda0e
LR
1441 if (!fw_work->name) {
1442 kfree(fw_work);
f9692b26 1443 return -ENOMEM;
303cda0e 1444 }
f8a4bd34
DT
1445 fw_work->device = device;
1446 fw_work->context = context;
1447 fw_work->cont = cont;
14c4bae7 1448 fw_work->opt_flags = FW_OPT_NOWAIT | FW_OPT_FALLBACK |
5a1379e8 1449 (uevent ? FW_OPT_UEVENT : FW_OPT_USERHELPER);
f8a4bd34 1450
1da177e4 1451 if (!try_module_get(module)) {
f9692b26 1452 kfree_const(fw_work->name);
1da177e4
LT
1453 kfree(fw_work);
1454 return -EFAULT;
1455 }
1456
0cfc1e1e 1457 get_device(fw_work->device);
a36cf844
SB
1458 INIT_WORK(&fw_work->work, request_firmware_work_func);
1459 schedule_work(&fw_work->work);
1da177e4
LT
1460 return 0;
1461}
f494513f 1462EXPORT_SYMBOL(request_firmware_nowait);
1da177e4 1463
90f89081
ML
1464#ifdef CONFIG_PM_SLEEP
1465static ASYNC_DOMAIN_EXCLUSIVE(fw_cache_domain);
1466
2887b395
ML
1467/**
1468 * cache_firmware - cache one firmware image in kernel memory space
1469 * @fw_name: the firmware image name
1470 *
1471 * Cache firmware in kernel memory so that drivers can use it when
1472 * system isn't ready for them to request firmware image from userspace.
1473 * Once it returns successfully, driver can use request_firmware or its
1474 * nowait version to get the cached firmware without any interacting
1475 * with userspace
1476 *
1477 * Return 0 if the firmware image has been cached successfully
1478 * Return !0 otherwise
1479 *
1480 */
93232e46 1481static int cache_firmware(const char *fw_name)
2887b395
ML
1482{
1483 int ret;
1484 const struct firmware *fw;
1485
1486 pr_debug("%s: %s\n", __func__, fw_name);
1487
1488 ret = request_firmware(&fw, fw_name, NULL);
1489 if (!ret)
1490 kfree(fw);
1491
1492 pr_debug("%s: %s ret=%d\n", __func__, fw_name, ret);
1493
1494 return ret;
1495}
1496
6a2c1234
ML
1497static struct firmware_buf *fw_lookup_buf(const char *fw_name)
1498{
1499 struct firmware_buf *tmp;
1500 struct firmware_cache *fwc = &fw_cache;
1501
1502 spin_lock(&fwc->lock);
1503 tmp = __fw_lookup_buf(fw_name);
1504 spin_unlock(&fwc->lock);
1505
1506 return tmp;
1507}
1508
2887b395
ML
1509/**
1510 * uncache_firmware - remove one cached firmware image
1511 * @fw_name: the firmware image name
1512 *
1513 * Uncache one firmware image which has been cached successfully
1514 * before.
1515 *
1516 * Return 0 if the firmware cache has been removed successfully
1517 * Return !0 otherwise
1518 *
1519 */
93232e46 1520static int uncache_firmware(const char *fw_name)
2887b395
ML
1521{
1522 struct firmware_buf *buf;
1523 struct firmware fw;
1524
1525 pr_debug("%s: %s\n", __func__, fw_name);
1526
a098ecd2 1527 if (fw_get_builtin_firmware(&fw, fw_name, NULL, 0))
2887b395
ML
1528 return 0;
1529
1530 buf = fw_lookup_buf(fw_name);
1531 if (buf) {
1532 fw_free_buf(buf);
1533 return 0;
1534 }
1535
1536 return -EINVAL;
1537}
1538
37276a51
ML
1539static struct fw_cache_entry *alloc_fw_cache_entry(const char *name)
1540{
1541 struct fw_cache_entry *fce;
1542
e0fd9b1d 1543 fce = kzalloc(sizeof(*fce), GFP_ATOMIC);
37276a51
ML
1544 if (!fce)
1545 goto exit;
1546
e0fd9b1d
LR
1547 fce->name = kstrdup_const(name, GFP_ATOMIC);
1548 if (!fce->name) {
1549 kfree(fce);
1550 fce = NULL;
1551 goto exit;
1552 }
37276a51
ML
1553exit:
1554 return fce;
1555}
1556
373304fe 1557static int __fw_entry_found(const char *name)
ac39b3ea
ML
1558{
1559 struct firmware_cache *fwc = &fw_cache;
1560 struct fw_cache_entry *fce;
ac39b3ea 1561
ac39b3ea
ML
1562 list_for_each_entry(fce, &fwc->fw_names, list) {
1563 if (!strcmp(fce->name, name))
373304fe 1564 return 1;
ac39b3ea 1565 }
373304fe
ML
1566 return 0;
1567}
1568
1569static int fw_cache_piggyback_on_request(const char *name)
1570{
1571 struct firmware_cache *fwc = &fw_cache;
1572 struct fw_cache_entry *fce;
1573 int ret = 0;
1574
1575 spin_lock(&fwc->name_lock);
1576 if (__fw_entry_found(name))
1577 goto found;
ac39b3ea
ML
1578
1579 fce = alloc_fw_cache_entry(name);
1580 if (fce) {
1581 ret = 1;
1582 list_add(&fce->list, &fwc->fw_names);
1583 pr_debug("%s: fw: %s\n", __func__, name);
1584 }
1585found:
1586 spin_unlock(&fwc->name_lock);
1587 return ret;
1588}
1589
37276a51
ML
1590static void free_fw_cache_entry(struct fw_cache_entry *fce)
1591{
e0fd9b1d 1592 kfree_const(fce->name);
37276a51
ML
1593 kfree(fce);
1594}
1595
1596static void __async_dev_cache_fw_image(void *fw_entry,
1597 async_cookie_t cookie)
1598{
1599 struct fw_cache_entry *fce = fw_entry;
1600 struct firmware_cache *fwc = &fw_cache;
1601 int ret;
1602
1603 ret = cache_firmware(fce->name);
ac39b3ea
ML
1604 if (ret) {
1605 spin_lock(&fwc->name_lock);
1606 list_del(&fce->list);
1607 spin_unlock(&fwc->name_lock);
37276a51 1608
ac39b3ea
ML
1609 free_fw_cache_entry(fce);
1610 }
37276a51
ML
1611}
1612
1613/* called with dev->devres_lock held */
1614static void dev_create_fw_entry(struct device *dev, void *res,
1615 void *data)
1616{
1617 struct fw_name_devm *fwn = res;
1618 const char *fw_name = fwn->name;
1619 struct list_head *head = data;
1620 struct fw_cache_entry *fce;
1621
1622 fce = alloc_fw_cache_entry(fw_name);
1623 if (fce)
1624 list_add(&fce->list, head);
1625}
1626
1627static int devm_name_match(struct device *dev, void *res,
1628 void *match_data)
1629{
1630 struct fw_name_devm *fwn = res;
1631 return (fwn->magic == (unsigned long)match_data);
1632}
1633
ab6dd8e5 1634static void dev_cache_fw_image(struct device *dev, void *data)
37276a51
ML
1635{
1636 LIST_HEAD(todo);
1637 struct fw_cache_entry *fce;
1638 struct fw_cache_entry *fce_next;
1639 struct firmware_cache *fwc = &fw_cache;
1640
1641 devres_for_each_res(dev, fw_name_devm_release,
1642 devm_name_match, &fw_cache,
1643 dev_create_fw_entry, &todo);
1644
1645 list_for_each_entry_safe(fce, fce_next, &todo, list) {
1646 list_del(&fce->list);
1647
1648 spin_lock(&fwc->name_lock);
373304fe
ML
1649 /* only one cache entry for one firmware */
1650 if (!__fw_entry_found(fce->name)) {
373304fe
ML
1651 list_add(&fce->list, &fwc->fw_names);
1652 } else {
1653 free_fw_cache_entry(fce);
1654 fce = NULL;
1655 }
37276a51
ML
1656 spin_unlock(&fwc->name_lock);
1657
373304fe 1658 if (fce)
d28d3882
ML
1659 async_schedule_domain(__async_dev_cache_fw_image,
1660 (void *)fce,
1661 &fw_cache_domain);
37276a51
ML
1662 }
1663}
1664
1665static void __device_uncache_fw_images(void)
1666{
1667 struct firmware_cache *fwc = &fw_cache;
1668 struct fw_cache_entry *fce;
1669
1670 spin_lock(&fwc->name_lock);
1671 while (!list_empty(&fwc->fw_names)) {
1672 fce = list_entry(fwc->fw_names.next,
1673 struct fw_cache_entry, list);
1674 list_del(&fce->list);
1675 spin_unlock(&fwc->name_lock);
1676
1677 uncache_firmware(fce->name);
1678 free_fw_cache_entry(fce);
1679
1680 spin_lock(&fwc->name_lock);
1681 }
1682 spin_unlock(&fwc->name_lock);
1683}
1684
1685/**
1686 * device_cache_fw_images - cache devices' firmware
1687 *
1688 * If one device called request_firmware or its nowait version
1689 * successfully before, the firmware names are recored into the
1690 * device's devres link list, so device_cache_fw_images can call
1691 * cache_firmware() to cache these firmwares for the device,
1692 * then the device driver can load its firmwares easily at
1693 * time when system is not ready to complete loading firmware.
1694 */
1695static void device_cache_fw_images(void)
1696{
1697 struct firmware_cache *fwc = &fw_cache;
ffe53f6f 1698 int old_timeout;
37276a51
ML
1699 DEFINE_WAIT(wait);
1700
1701 pr_debug("%s\n", __func__);
1702
373304fe
ML
1703 /* cancel uncache work */
1704 cancel_delayed_work_sync(&fwc->work);
1705
ffe53f6f
ML
1706 /*
1707 * use small loading timeout for caching devices' firmware
1708 * because all these firmware images have been loaded
1709 * successfully at lease once, also system is ready for
1710 * completing firmware loading now. The maximum size of
1711 * firmware in current distributions is about 2M bytes,
1712 * so 10 secs should be enough.
1713 */
1714 old_timeout = loading_timeout;
1715 loading_timeout = 10;
1716
ac39b3ea
ML
1717 mutex_lock(&fw_lock);
1718 fwc->state = FW_LOADER_START_CACHE;
ab6dd8e5 1719 dpm_for_each_dev(NULL, dev_cache_fw_image);
ac39b3ea 1720 mutex_unlock(&fw_lock);
37276a51
ML
1721
1722 /* wait for completion of caching firmware for all devices */
d28d3882 1723 async_synchronize_full_domain(&fw_cache_domain);
ffe53f6f
ML
1724
1725 loading_timeout = old_timeout;
37276a51
ML
1726}
1727
1728/**
1729 * device_uncache_fw_images - uncache devices' firmware
1730 *
1731 * uncache all firmwares which have been cached successfully
1732 * by device_uncache_fw_images earlier
1733 */
1734static void device_uncache_fw_images(void)
1735{
1736 pr_debug("%s\n", __func__);
1737 __device_uncache_fw_images();
1738}
1739
1740static void device_uncache_fw_images_work(struct work_struct *work)
1741{
1742 device_uncache_fw_images();
1743}
1744
1745/**
1746 * device_uncache_fw_images_delay - uncache devices firmwares
1747 * @delay: number of milliseconds to delay uncache device firmwares
1748 *
1749 * uncache all devices's firmwares which has been cached successfully
1750 * by device_cache_fw_images after @delay milliseconds.
1751 */
1752static void device_uncache_fw_images_delay(unsigned long delay)
1753{
bce6618a
SD
1754 queue_delayed_work(system_power_efficient_wq, &fw_cache.work,
1755 msecs_to_jiffies(delay));
37276a51
ML
1756}
1757
81f95076
LR
1758/**
1759 * fw_pm_notify - notifier for suspend/resume
1760 * @notify_block: unused
1761 * @mode: mode we are switching to
1762 * @unused: unused
1763 *
1764 * Used to modify the firmware_class state as we move in between states.
1765 * The firmware_class implements a firmware cache to enable device driver
1766 * to fetch firmware upon resume before the root filesystem is ready. We
1767 * disable API calls which do not use the built-in firmware or the firmware
1768 * cache when we know these calls will not work.
1769 *
1770 * The inner logic behind all this is a bit complex so it is worth summarizing
1771 * the kernel's own suspend/resume process with context and focus on how this
1772 * can impact the firmware API.
1773 *
1774 * First a review on how we go to suspend::
1775 *
1776 * pm_suspend() --> enter_state() -->
1777 * sys_sync()
1778 * suspend_prepare() -->
1779 * __pm_notifier_call_chain(PM_SUSPEND_PREPARE, ...);
1780 * suspend_freeze_processes() -->
1781 * freeze_processes() -->
1782 * __usermodehelper_set_disable_depth(UMH_DISABLED);
1783 * freeze all tasks ...
1784 * freeze_kernel_threads()
1785 * suspend_devices_and_enter() -->
1786 * dpm_suspend_start() -->
1787 * dpm_prepare()
1788 * dpm_suspend()
1789 * suspend_enter() -->
1790 * platform_suspend_prepare()
1791 * dpm_suspend_late()
1792 * freeze_enter()
1793 * syscore_suspend()
1794 *
1795 * When we resume we bail out of a loop from suspend_devices_and_enter() and
1796 * unwind back out to the caller enter_state() where we were before as follows::
1797 *
1798 * enter_state() -->
1799 * suspend_devices_and_enter() --> (bail from loop)
1800 * dpm_resume_end() -->
1801 * dpm_resume()
1802 * dpm_complete()
1803 * suspend_finish() -->
1804 * suspend_thaw_processes() -->
1805 * thaw_processes() -->
1806 * __usermodehelper_set_disable_depth(UMH_FREEZING);
1807 * thaw_workqueues();
1808 * thaw all processes ...
1809 * usermodehelper_enable();
1810 * pm_notifier_call_chain(PM_POST_SUSPEND);
1811 *
1812 * fw_pm_notify() works through pm_notifier_call_chain().
1813 */
07646d9c
ML
1814static int fw_pm_notify(struct notifier_block *notify_block,
1815 unsigned long mode, void *unused)
1816{
1817 switch (mode) {
1818 case PM_HIBERNATION_PREPARE:
1819 case PM_SUSPEND_PREPARE:
f8d5b9e9 1820 case PM_RESTORE_PREPARE:
c4b76893
LR
1821 /*
1822 * kill pending fallback requests with a custom fallback
1823 * to avoid stalling suspend.
1824 */
1825 kill_pending_fw_fallback_reqs(true);
07646d9c 1826 device_cache_fw_images();
81f95076 1827 disable_firmware();
07646d9c
ML
1828 break;
1829
1830 case PM_POST_SUSPEND:
1831 case PM_POST_HIBERNATION:
1832 case PM_POST_RESTORE:
ac39b3ea
ML
1833 /*
1834 * In case that system sleep failed and syscore_suspend is
1835 * not called.
1836 */
1837 mutex_lock(&fw_lock);
1838 fw_cache.state = FW_LOADER_NO_CACHE;
1839 mutex_unlock(&fw_lock);
81f95076 1840 enable_firmware();
ac39b3ea 1841
07646d9c
ML
1842 device_uncache_fw_images_delay(10 * MSEC_PER_SEC);
1843 break;
1844 }
1845
1846 return 0;
1847}
07646d9c 1848
ac39b3ea
ML
1849/* stop caching firmware once syscore_suspend is reached */
1850static int fw_suspend(void)
1851{
1852 fw_cache.state = FW_LOADER_NO_CACHE;
1853 return 0;
1854}
1855
1856static struct syscore_ops fw_syscore_ops = {
1857 .suspend = fw_suspend,
1858};
cfe016b1
ML
1859#else
1860static int fw_cache_piggyback_on_request(const char *name)
1861{
1862 return 0;
1863}
1864#endif
ac39b3ea 1865
37276a51
ML
1866static void __init fw_cache_init(void)
1867{
1868 spin_lock_init(&fw_cache.lock);
1869 INIT_LIST_HEAD(&fw_cache.head);
cfe016b1 1870 fw_cache.state = FW_LOADER_NO_CACHE;
37276a51 1871
cfe016b1 1872#ifdef CONFIG_PM_SLEEP
37276a51
ML
1873 spin_lock_init(&fw_cache.name_lock);
1874 INIT_LIST_HEAD(&fw_cache.fw_names);
37276a51 1875
37276a51
ML
1876 INIT_DELAYED_WORK(&fw_cache.work,
1877 device_uncache_fw_images_work);
07646d9c
ML
1878
1879 fw_cache.pm_notify.notifier_call = fw_pm_notify;
1880 register_pm_notifier(&fw_cache.pm_notify);
ac39b3ea
ML
1881
1882 register_syscore_ops(&fw_syscore_ops);
cfe016b1 1883#endif
37276a51
ML
1884}
1885
a669f04a
LR
1886static int fw_shutdown_notify(struct notifier_block *unused1,
1887 unsigned long unused2, void *unused3)
1888{
81f95076 1889 disable_firmware();
a669f04a
LR
1890 /*
1891 * Kill all pending fallback requests to avoid both stalling shutdown,
1892 * and avoid a deadlock with the usermode_lock.
1893 */
1894 kill_pending_fw_fallback_reqs(false);
1895
1896 return NOTIFY_DONE;
1897}
1898
1899static struct notifier_block fw_shutdown_nb = {
1900 .notifier_call = fw_shutdown_notify,
1901};
1902
673fae90 1903static int __init firmware_class_init(void)
1da177e4 1904{
81f95076 1905 enable_firmware();
1f2b7959 1906 fw_cache_init();
fe304143 1907 register_reboot_notifier(&fw_shutdown_nb);
a669f04a 1908#ifdef CONFIG_FW_LOADER_USER_HELPER
673fae90 1909 return class_register(&firmware_class);
7b1269f7
TI
1910#else
1911 return 0;
1912#endif
1da177e4 1913}
673fae90
DT
1914
1915static void __exit firmware_class_exit(void)
1da177e4 1916{
81f95076 1917 disable_firmware();
cfe016b1 1918#ifdef CONFIG_PM_SLEEP
ac39b3ea 1919 unregister_syscore_ops(&fw_syscore_ops);
07646d9c 1920 unregister_pm_notifier(&fw_cache.pm_notify);
cfe016b1 1921#endif
fe304143 1922 unregister_reboot_notifier(&fw_shutdown_nb);
a669f04a 1923#ifdef CONFIG_FW_LOADER_USER_HELPER
1da177e4 1924 class_unregister(&firmware_class);
7b1269f7 1925#endif
1da177e4
LT
1926}
1927
a30a6a2c 1928fs_initcall(firmware_class_init);
1da177e4 1929module_exit(firmware_class_exit);