Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0
2 : /*
3 : * main.c - Multi purpose firmware loading support
4 : *
5 : * Copyright (c) 2003 Manuel Estrada Sainz
6 : *
7 : * Please see Documentation/driver-api/firmware/ for more information.
8 : *
9 : */
10 :
11 : #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 :
13 : #include <linux/capability.h>
14 : #include <linux/device.h>
15 : #include <linux/kernel_read_file.h>
16 : #include <linux/module.h>
17 : #include <linux/init.h>
18 : #include <linux/initrd.h>
19 : #include <linux/timer.h>
20 : #include <linux/vmalloc.h>
21 : #include <linux/interrupt.h>
22 : #include <linux/bitops.h>
23 : #include <linux/mutex.h>
24 : #include <linux/workqueue.h>
25 : #include <linux/highmem.h>
26 : #include <linux/firmware.h>
27 : #include <linux/slab.h>
28 : #include <linux/sched.h>
29 : #include <linux/file.h>
30 : #include <linux/list.h>
31 : #include <linux/fs.h>
32 : #include <linux/async.h>
33 : #include <linux/pm.h>
34 : #include <linux/suspend.h>
35 : #include <linux/syscore_ops.h>
36 : #include <linux/reboot.h>
37 : #include <linux/security.h>
38 : #include <linux/xz.h>
39 :
40 : #include <generated/utsrelease.h>
41 :
42 : #include "../base.h"
43 : #include "firmware.h"
44 : #include "fallback.h"
45 :
46 : MODULE_AUTHOR("Manuel Estrada Sainz");
47 : MODULE_DESCRIPTION("Multi purpose firmware loading support");
48 : MODULE_LICENSE("GPL");
49 :
50 : struct firmware_cache {
51 : /* firmware_buf instance will be added into the below list */
52 : spinlock_t lock;
53 : struct list_head head;
54 : int state;
55 :
56 : #ifdef CONFIG_FW_CACHE
57 : /*
58 : * Names of firmware images which have been cached successfully
59 : * will be added into the below list so that device uncache
60 : * helper can trace which firmware images have been cached
61 : * before.
62 : */
63 : spinlock_t name_lock;
64 : struct list_head fw_names;
65 :
66 : struct delayed_work work;
67 :
68 : struct notifier_block pm_notify;
69 : #endif
70 : };
71 :
72 : struct fw_cache_entry {
73 : struct list_head list;
74 : const char *name;
75 : };
76 :
77 : struct fw_name_devm {
78 : unsigned long magic;
79 : const char *name;
80 : };
81 :
82 : static inline struct fw_priv *to_fw_priv(struct kref *ref)
83 : {
84 0 : return container_of(ref, struct fw_priv, ref);
85 : }
86 :
87 : #define FW_LOADER_NO_CACHE 0
88 : #define FW_LOADER_START_CACHE 1
89 :
90 : /* fw_lock could be moved to 'struct fw_sysfs' but since it is just
91 : * guarding for corner cases a global lock should be OK */
92 : DEFINE_MUTEX(fw_lock);
93 :
94 : static struct firmware_cache fw_cache;
95 :
96 : static void fw_state_init(struct fw_priv *fw_priv)
97 : {
98 0 : struct fw_state *fw_st = &fw_priv->fw_st;
99 :
100 0 : init_completion(&fw_st->completion);
101 0 : fw_st->status = FW_STATUS_UNKNOWN;
102 : }
103 :
104 : static inline int fw_state_wait(struct fw_priv *fw_priv)
105 : {
106 0 : return __fw_state_wait_common(fw_priv, MAX_SCHEDULE_TIMEOUT);
107 : }
108 :
109 : static void fw_cache_piggyback_on_request(struct fw_priv *fw_priv);
110 :
111 0 : static struct fw_priv *__allocate_fw_priv(const char *fw_name,
112 : struct firmware_cache *fwc,
113 : void *dbuf,
114 : size_t size,
115 : size_t offset,
116 : u32 opt_flags)
117 : {
118 : struct fw_priv *fw_priv;
119 :
120 : /* For a partial read, the buffer must be preallocated. */
121 0 : if ((opt_flags & FW_OPT_PARTIAL) && !dbuf)
122 : return NULL;
123 :
124 : /* Only partial reads are allowed to use an offset. */
125 0 : if (offset != 0 && !(opt_flags & FW_OPT_PARTIAL))
126 : return NULL;
127 :
128 0 : fw_priv = kzalloc(sizeof(*fw_priv), GFP_ATOMIC);
129 0 : if (!fw_priv)
130 : return NULL;
131 :
132 0 : fw_priv->fw_name = kstrdup_const(fw_name, GFP_ATOMIC);
133 0 : if (!fw_priv->fw_name) {
134 0 : kfree(fw_priv);
135 0 : return NULL;
136 : }
137 :
138 0 : kref_init(&fw_priv->ref);
139 0 : fw_priv->fwc = fwc;
140 0 : fw_priv->data = dbuf;
141 0 : fw_priv->allocated_size = size;
142 0 : fw_priv->offset = offset;
143 0 : fw_priv->opt_flags = opt_flags;
144 0 : fw_state_init(fw_priv);
145 : #ifdef CONFIG_FW_LOADER_USER_HELPER
146 : INIT_LIST_HEAD(&fw_priv->pending_list);
147 : #endif
148 :
149 : pr_debug("%s: fw-%s fw_priv=%p\n", __func__, fw_name, fw_priv);
150 :
151 0 : return fw_priv;
152 : }
153 :
154 0 : static struct fw_priv *__lookup_fw_priv(const char *fw_name)
155 : {
156 : struct fw_priv *tmp;
157 0 : struct firmware_cache *fwc = &fw_cache;
158 :
159 0 : list_for_each_entry(tmp, &fwc->head, list)
160 0 : if (!strcmp(tmp->fw_name, fw_name))
161 : return tmp;
162 : return NULL;
163 : }
164 :
165 : /* Returns 1 for batching firmware requests with the same name */
166 0 : static int alloc_lookup_fw_priv(const char *fw_name,
167 : struct firmware_cache *fwc,
168 : struct fw_priv **fw_priv,
169 : void *dbuf,
170 : size_t size,
171 : size_t offset,
172 : u32 opt_flags)
173 : {
174 : struct fw_priv *tmp;
175 :
176 0 : spin_lock(&fwc->lock);
177 : /*
178 : * Do not merge requests that are marked to be non-cached or
179 : * are performing partial reads.
180 : */
181 0 : if (!(opt_flags & (FW_OPT_NOCACHE | FW_OPT_PARTIAL))) {
182 0 : tmp = __lookup_fw_priv(fw_name);
183 0 : if (tmp) {
184 0 : kref_get(&tmp->ref);
185 0 : spin_unlock(&fwc->lock);
186 0 : *fw_priv = tmp;
187 : pr_debug("batched request - sharing the same struct fw_priv and lookup for multiple requests\n");
188 0 : return 1;
189 : }
190 : }
191 :
192 0 : tmp = __allocate_fw_priv(fw_name, fwc, dbuf, size, offset, opt_flags);
193 0 : if (tmp) {
194 0 : INIT_LIST_HEAD(&tmp->list);
195 0 : if (!(opt_flags & FW_OPT_NOCACHE))
196 0 : list_add(&tmp->list, &fwc->head);
197 : }
198 0 : spin_unlock(&fwc->lock);
199 :
200 0 : *fw_priv = tmp;
201 :
202 0 : return tmp ? 0 : -ENOMEM;
203 : }
204 :
205 0 : static void __free_fw_priv(struct kref *ref)
206 : __releases(&fwc->lock)
207 : {
208 0 : struct fw_priv *fw_priv = to_fw_priv(ref);
209 0 : struct firmware_cache *fwc = fw_priv->fwc;
210 :
211 : pr_debug("%s: fw-%s fw_priv=%p data=%p size=%u\n",
212 : __func__, fw_priv->fw_name, fw_priv, fw_priv->data,
213 : (unsigned int)fw_priv->size);
214 :
215 0 : list_del(&fw_priv->list);
216 0 : spin_unlock(&fwc->lock);
217 :
218 : if (fw_is_paged_buf(fw_priv))
219 : fw_free_paged_buf(fw_priv);
220 0 : else if (!fw_priv->allocated_size)
221 0 : vfree(fw_priv->data);
222 :
223 0 : kfree_const(fw_priv->fw_name);
224 0 : kfree(fw_priv);
225 0 : }
226 :
227 0 : static void free_fw_priv(struct fw_priv *fw_priv)
228 : {
229 0 : struct firmware_cache *fwc = fw_priv->fwc;
230 0 : spin_lock(&fwc->lock);
231 0 : if (!kref_put(&fw_priv->ref, __free_fw_priv))
232 0 : spin_unlock(&fwc->lock);
233 0 : }
234 :
235 : #ifdef CONFIG_FW_LOADER_PAGED_BUF
236 : bool fw_is_paged_buf(struct fw_priv *fw_priv)
237 : {
238 : return fw_priv->is_paged_buf;
239 : }
240 :
241 : void fw_free_paged_buf(struct fw_priv *fw_priv)
242 : {
243 : int i;
244 :
245 : if (!fw_priv->pages)
246 : return;
247 :
248 : vunmap(fw_priv->data);
249 :
250 : for (i = 0; i < fw_priv->nr_pages; i++)
251 : __free_page(fw_priv->pages[i]);
252 : kvfree(fw_priv->pages);
253 : fw_priv->pages = NULL;
254 : fw_priv->page_array_size = 0;
255 : fw_priv->nr_pages = 0;
256 : }
257 :
258 : int fw_grow_paged_buf(struct fw_priv *fw_priv, int pages_needed)
259 : {
260 : /* If the array of pages is too small, grow it */
261 : if (fw_priv->page_array_size < pages_needed) {
262 : int new_array_size = max(pages_needed,
263 : fw_priv->page_array_size * 2);
264 : struct page **new_pages;
265 :
266 : new_pages = kvmalloc_array(new_array_size, sizeof(void *),
267 : GFP_KERNEL);
268 : if (!new_pages)
269 : return -ENOMEM;
270 : memcpy(new_pages, fw_priv->pages,
271 : fw_priv->page_array_size * sizeof(void *));
272 : memset(&new_pages[fw_priv->page_array_size], 0, sizeof(void *) *
273 : (new_array_size - fw_priv->page_array_size));
274 : kvfree(fw_priv->pages);
275 : fw_priv->pages = new_pages;
276 : fw_priv->page_array_size = new_array_size;
277 : }
278 :
279 : while (fw_priv->nr_pages < pages_needed) {
280 : fw_priv->pages[fw_priv->nr_pages] =
281 : alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
282 :
283 : if (!fw_priv->pages[fw_priv->nr_pages])
284 : return -ENOMEM;
285 : fw_priv->nr_pages++;
286 : }
287 :
288 : return 0;
289 : }
290 :
291 : int fw_map_paged_buf(struct fw_priv *fw_priv)
292 : {
293 : /* one pages buffer should be mapped/unmapped only once */
294 : if (!fw_priv->pages)
295 : return 0;
296 :
297 : vunmap(fw_priv->data);
298 : fw_priv->data = vmap(fw_priv->pages, fw_priv->nr_pages, 0,
299 : PAGE_KERNEL_RO);
300 : if (!fw_priv->data)
301 : return -ENOMEM;
302 :
303 : return 0;
304 : }
305 : #endif
306 :
307 : /*
308 : * XZ-compressed firmware support
309 : */
310 : #ifdef CONFIG_FW_LOADER_COMPRESS
311 : /* show an error and return the standard error code */
312 : static int fw_decompress_xz_error(struct device *dev, enum xz_ret xz_ret)
313 : {
314 : if (xz_ret != XZ_STREAM_END) {
315 : dev_warn(dev, "xz decompression failed (xz_ret=%d)\n", xz_ret);
316 : return xz_ret == XZ_MEM_ERROR ? -ENOMEM : -EINVAL;
317 : }
318 : return 0;
319 : }
320 :
321 : /* single-shot decompression onto the pre-allocated buffer */
322 : static int fw_decompress_xz_single(struct device *dev, struct fw_priv *fw_priv,
323 : size_t in_size, const void *in_buffer)
324 : {
325 : struct xz_dec *xz_dec;
326 : struct xz_buf xz_buf;
327 : enum xz_ret xz_ret;
328 :
329 : xz_dec = xz_dec_init(XZ_SINGLE, (u32)-1);
330 : if (!xz_dec)
331 : return -ENOMEM;
332 :
333 : xz_buf.in_size = in_size;
334 : xz_buf.in = in_buffer;
335 : xz_buf.in_pos = 0;
336 : xz_buf.out_size = fw_priv->allocated_size;
337 : xz_buf.out = fw_priv->data;
338 : xz_buf.out_pos = 0;
339 :
340 : xz_ret = xz_dec_run(xz_dec, &xz_buf);
341 : xz_dec_end(xz_dec);
342 :
343 : fw_priv->size = xz_buf.out_pos;
344 : return fw_decompress_xz_error(dev, xz_ret);
345 : }
346 :
347 : /* decompression on paged buffer and map it */
348 : static int fw_decompress_xz_pages(struct device *dev, struct fw_priv *fw_priv,
349 : size_t in_size, const void *in_buffer)
350 : {
351 : struct xz_dec *xz_dec;
352 : struct xz_buf xz_buf;
353 : enum xz_ret xz_ret;
354 : struct page *page;
355 : int err = 0;
356 :
357 : xz_dec = xz_dec_init(XZ_DYNALLOC, (u32)-1);
358 : if (!xz_dec)
359 : return -ENOMEM;
360 :
361 : xz_buf.in_size = in_size;
362 : xz_buf.in = in_buffer;
363 : xz_buf.in_pos = 0;
364 :
365 : fw_priv->is_paged_buf = true;
366 : fw_priv->size = 0;
367 : do {
368 : if (fw_grow_paged_buf(fw_priv, fw_priv->nr_pages + 1)) {
369 : err = -ENOMEM;
370 : goto out;
371 : }
372 :
373 : /* decompress onto the new allocated page */
374 : page = fw_priv->pages[fw_priv->nr_pages - 1];
375 : xz_buf.out = kmap(page);
376 : xz_buf.out_pos = 0;
377 : xz_buf.out_size = PAGE_SIZE;
378 : xz_ret = xz_dec_run(xz_dec, &xz_buf);
379 : kunmap(page);
380 : fw_priv->size += xz_buf.out_pos;
381 : /* partial decompression means either end or error */
382 : if (xz_buf.out_pos != PAGE_SIZE)
383 : break;
384 : } while (xz_ret == XZ_OK);
385 :
386 : err = fw_decompress_xz_error(dev, xz_ret);
387 : if (!err)
388 : err = fw_map_paged_buf(fw_priv);
389 :
390 : out:
391 : xz_dec_end(xz_dec);
392 : return err;
393 : }
394 :
395 : static int fw_decompress_xz(struct device *dev, struct fw_priv *fw_priv,
396 : size_t in_size, const void *in_buffer)
397 : {
398 : /* if the buffer is pre-allocated, we can perform in single-shot mode */
399 : if (fw_priv->data)
400 : return fw_decompress_xz_single(dev, fw_priv, in_size, in_buffer);
401 : else
402 : return fw_decompress_xz_pages(dev, fw_priv, in_size, in_buffer);
403 : }
404 : #endif /* CONFIG_FW_LOADER_COMPRESS */
405 :
406 : /* direct firmware loading support */
407 : static char fw_path_para[256];
408 : static const char * const fw_path[] = {
409 : fw_path_para,
410 : "/lib/firmware/updates/" UTS_RELEASE,
411 : "/lib/firmware/updates",
412 : "/lib/firmware/" UTS_RELEASE,
413 : "/lib/firmware"
414 : };
415 :
416 : /*
417 : * Typical usage is that passing 'firmware_class.path=$CUSTOMIZED_PATH'
418 : * from kernel command line because firmware_class is generally built in
419 : * kernel instead of module.
420 : */
421 : module_param_string(path, fw_path_para, sizeof(fw_path_para), 0644);
422 : MODULE_PARM_DESC(path, "customized firmware image search path with a higher priority than default path");
423 :
424 : static int
425 0 : fw_get_filesystem_firmware(struct device *device, struct fw_priv *fw_priv,
426 : const char *suffix,
427 : int (*decompress)(struct device *dev,
428 : struct fw_priv *fw_priv,
429 : size_t in_size,
430 : const void *in_buffer))
431 : {
432 : size_t size;
433 : int i, len;
434 0 : int rc = -ENOENT;
435 : char *path;
436 0 : size_t msize = INT_MAX;
437 0 : void *buffer = NULL;
438 :
439 : /* Already populated data member means we're loading into a buffer */
440 0 : if (!decompress && fw_priv->data) {
441 0 : buffer = fw_priv->data;
442 0 : msize = fw_priv->allocated_size;
443 : }
444 :
445 0 : path = __getname();
446 0 : if (!path)
447 : return -ENOMEM;
448 :
449 : wait_for_initramfs();
450 0 : for (i = 0; i < ARRAY_SIZE(fw_path); i++) {
451 0 : size_t file_size = 0;
452 0 : size_t *file_size_ptr = NULL;
453 :
454 : /* skip the unset customized path */
455 0 : if (!fw_path[i][0])
456 0 : continue;
457 :
458 0 : len = snprintf(path, PATH_MAX, "%s/%s%s",
459 : fw_path[i], fw_priv->fw_name, suffix);
460 0 : if (len >= PATH_MAX) {
461 : rc = -ENAMETOOLONG;
462 0 : break;
463 : }
464 :
465 0 : fw_priv->size = 0;
466 :
467 : /*
468 : * The total file size is only examined when doing a partial
469 : * read; the "full read" case needs to fail if the whole
470 : * firmware was not completely loaded.
471 : */
472 0 : if ((fw_priv->opt_flags & FW_OPT_PARTIAL) && buffer)
473 0 : file_size_ptr = &file_size;
474 :
475 : /* load firmware files from the mount namespace of init */
476 0 : rc = kernel_read_file_from_path_initns(path, fw_priv->offset,
477 : &buffer, msize,
478 : file_size_ptr,
479 : READING_FIRMWARE);
480 0 : if (rc < 0) {
481 0 : if (rc != -ENOENT)
482 0 : dev_warn(device, "loading %s failed with error %d\n",
483 : path, rc);
484 : else
485 : dev_dbg(device, "loading %s failed for no such file or directory.\n",
486 : path);
487 0 : continue;
488 : }
489 0 : size = rc;
490 0 : rc = 0;
491 :
492 : dev_dbg(device, "Loading firmware from %s\n", path);
493 0 : if (decompress) {
494 : dev_dbg(device, "f/w decompressing %s\n",
495 : fw_priv->fw_name);
496 0 : rc = decompress(device, fw_priv, size, buffer);
497 : /* discard the superfluous original content */
498 0 : vfree(buffer);
499 0 : buffer = NULL;
500 0 : if (rc) {
501 0 : fw_free_paged_buf(fw_priv);
502 0 : continue;
503 : }
504 : } else {
505 : dev_dbg(device, "direct-loading %s\n",
506 : fw_priv->fw_name);
507 0 : if (!fw_priv->data)
508 0 : fw_priv->data = buffer;
509 0 : fw_priv->size = size;
510 : }
511 : fw_state_done(fw_priv);
512 : break;
513 : }
514 0 : __putname(path);
515 :
516 0 : return rc;
517 : }
518 :
519 : /* firmware holds the ownership of pages */
520 : static void firmware_free_data(const struct firmware *fw)
521 : {
522 : /* Loaded directly? */
523 0 : if (!fw->priv) {
524 0 : vfree(fw->data);
525 : return;
526 : }
527 0 : free_fw_priv(fw->priv);
528 : }
529 :
530 : /* store the pages buffer info firmware from buf */
531 : static void fw_set_page_data(struct fw_priv *fw_priv, struct firmware *fw)
532 : {
533 0 : fw->priv = fw_priv;
534 0 : fw->size = fw_priv->size;
535 0 : fw->data = fw_priv->data;
536 :
537 : pr_debug("%s: fw-%s fw_priv=%p data=%p size=%u\n",
538 : __func__, fw_priv->fw_name, fw_priv, fw_priv->data,
539 : (unsigned int)fw_priv->size);
540 : }
541 :
542 : #ifdef CONFIG_FW_CACHE
543 0 : static void fw_name_devm_release(struct device *dev, void *res)
544 : {
545 0 : struct fw_name_devm *fwn = res;
546 :
547 : if (fwn->magic == (unsigned long)&fw_cache)
548 : pr_debug("%s: fw_name-%s devm-%p released\n",
549 : __func__, fwn->name, res);
550 0 : kfree_const(fwn->name);
551 0 : }
552 :
553 0 : static int fw_devm_match(struct device *dev, void *res,
554 : void *match_data)
555 : {
556 0 : struct fw_name_devm *fwn = res;
557 :
558 0 : return (fwn->magic == (unsigned long)&fw_cache) &&
559 0 : !strcmp(fwn->name, match_data);
560 : }
561 :
562 : static struct fw_name_devm *fw_find_devm_name(struct device *dev,
563 : const char *name)
564 : {
565 : struct fw_name_devm *fwn;
566 :
567 0 : fwn = devres_find(dev, fw_name_devm_release,
568 : fw_devm_match, (void *)name);
569 : return fwn;
570 : }
571 :
572 : static bool fw_cache_is_setup(struct device *dev, const char *name)
573 : {
574 : struct fw_name_devm *fwn;
575 :
576 0 : fwn = fw_find_devm_name(dev, name);
577 0 : if (fwn)
578 : return true;
579 :
580 : return false;
581 : }
582 :
583 : /* add firmware name into devres list */
584 0 : static int fw_add_devm_name(struct device *dev, const char *name)
585 : {
586 : struct fw_name_devm *fwn;
587 :
588 0 : if (fw_cache_is_setup(dev, name))
589 : return 0;
590 :
591 0 : fwn = devres_alloc(fw_name_devm_release, sizeof(struct fw_name_devm),
592 : GFP_KERNEL);
593 0 : if (!fwn)
594 : return -ENOMEM;
595 0 : fwn->name = kstrdup_const(name, GFP_KERNEL);
596 0 : if (!fwn->name) {
597 0 : devres_free(fwn);
598 0 : return -ENOMEM;
599 : }
600 :
601 0 : fwn->magic = (unsigned long)&fw_cache;
602 0 : devres_add(dev, fwn);
603 :
604 0 : return 0;
605 : }
606 : #else
607 : static bool fw_cache_is_setup(struct device *dev, const char *name)
608 : {
609 : return false;
610 : }
611 :
612 : static int fw_add_devm_name(struct device *dev, const char *name)
613 : {
614 : return 0;
615 : }
616 : #endif
617 :
618 0 : int assign_fw(struct firmware *fw, struct device *device)
619 : {
620 0 : struct fw_priv *fw_priv = fw->priv;
621 : int ret;
622 :
623 0 : mutex_lock(&fw_lock);
624 0 : if (!fw_priv->size || fw_state_is_aborted(fw_priv)) {
625 0 : mutex_unlock(&fw_lock);
626 0 : return -ENOENT;
627 : }
628 :
629 : /*
630 : * add firmware name into devres list so that we can auto cache
631 : * and uncache firmware for device.
632 : *
633 : * device may has been deleted already, but the problem
634 : * should be fixed in devres or driver core.
635 : */
636 : /* don't cache firmware handled without uevent */
637 0 : if (device && (fw_priv->opt_flags & FW_OPT_UEVENT) &&
638 : !(fw_priv->opt_flags & FW_OPT_NOCACHE)) {
639 0 : ret = fw_add_devm_name(device, fw_priv->fw_name);
640 0 : if (ret) {
641 0 : mutex_unlock(&fw_lock);
642 0 : return ret;
643 : }
644 : }
645 :
646 : /*
647 : * After caching firmware image is started, let it piggyback
648 : * on request firmware.
649 : */
650 0 : if (!(fw_priv->opt_flags & FW_OPT_NOCACHE) &&
651 0 : fw_priv->fwc->state == FW_LOADER_START_CACHE)
652 0 : fw_cache_piggyback_on_request(fw_priv);
653 :
654 : /* pass the pages buffer to driver at the last minute */
655 0 : fw_set_page_data(fw_priv, fw);
656 0 : mutex_unlock(&fw_lock);
657 0 : return 0;
658 : }
659 :
660 : /* prepare firmware and firmware_buf structs;
661 : * return 0 if a firmware is already assigned, 1 if need to load one,
662 : * or a negative error code
663 : */
664 : static int
665 0 : _request_firmware_prepare(struct firmware **firmware_p, const char *name,
666 : struct device *device, void *dbuf, size_t size,
667 : size_t offset, u32 opt_flags)
668 : {
669 : struct firmware *firmware;
670 : struct fw_priv *fw_priv;
671 : int ret;
672 :
673 0 : *firmware_p = firmware = kzalloc(sizeof(*firmware), GFP_KERNEL);
674 0 : if (!firmware) {
675 0 : dev_err(device, "%s: kmalloc(struct firmware) failed\n",
676 : __func__);
677 0 : return -ENOMEM;
678 : }
679 :
680 0 : if (firmware_request_builtin_buf(firmware, name, dbuf, size)) {
681 : dev_dbg(device, "using built-in %s\n", name);
682 : return 0; /* assigned */
683 : }
684 :
685 0 : ret = alloc_lookup_fw_priv(name, &fw_cache, &fw_priv, dbuf, size,
686 : offset, opt_flags);
687 :
688 : /*
689 : * bind with 'priv' now to avoid warning in failure path
690 : * of requesting firmware.
691 : */
692 0 : firmware->priv = fw_priv;
693 :
694 0 : if (ret > 0) {
695 0 : ret = fw_state_wait(fw_priv);
696 0 : if (!ret) {
697 0 : fw_set_page_data(fw_priv, firmware);
698 0 : return 0; /* assigned */
699 : }
700 : }
701 :
702 0 : if (ret < 0)
703 : return ret;
704 0 : return 1; /* need to load */
705 : }
706 :
707 : /*
708 : * Batched requests need only one wake, we need to do this step last due to the
709 : * fallback mechanism. The buf is protected with kref_get(), and it won't be
710 : * released until the last user calls release_firmware().
711 : *
712 : * Failed batched requests are possible as well, in such cases we just share
713 : * the struct fw_priv and won't release it until all requests are woken
714 : * and have gone through this same path.
715 : */
716 0 : static void fw_abort_batch_reqs(struct firmware *fw)
717 : {
718 : struct fw_priv *fw_priv;
719 :
720 : /* Loaded directly? */
721 0 : if (!fw || !fw->priv)
722 : return;
723 :
724 0 : fw_priv = fw->priv;
725 0 : mutex_lock(&fw_lock);
726 0 : if (!fw_state_is_aborted(fw_priv))
727 : fw_state_aborted(fw_priv);
728 0 : mutex_unlock(&fw_lock);
729 : }
730 :
731 : /* called from request_firmware() and request_firmware_work_func() */
732 : static int
733 0 : _request_firmware(const struct firmware **firmware_p, const char *name,
734 : struct device *device, void *buf, size_t size,
735 : size_t offset, u32 opt_flags)
736 : {
737 0 : struct firmware *fw = NULL;
738 0 : struct cred *kern_cred = NULL;
739 : const struct cred *old_cred;
740 0 : bool nondirect = false;
741 : int ret;
742 :
743 0 : if (!firmware_p)
744 : return -EINVAL;
745 :
746 0 : if (!name || name[0] == '\0') {
747 : ret = -EINVAL;
748 : goto out;
749 : }
750 :
751 0 : ret = _request_firmware_prepare(&fw, name, device, buf, size,
752 : offset, opt_flags);
753 0 : if (ret <= 0) /* error or already assigned */
754 : goto out;
755 :
756 : /*
757 : * We are about to try to access the firmware file. Because we may have been
758 : * called by a driver when serving an unrelated request from userland, we use
759 : * the kernel credentials to read the file.
760 : */
761 0 : kern_cred = prepare_kernel_cred(NULL);
762 0 : if (!kern_cred) {
763 : ret = -ENOMEM;
764 : goto out;
765 : }
766 0 : old_cred = override_creds(kern_cred);
767 :
768 0 : ret = fw_get_filesystem_firmware(device, fw->priv, "", NULL);
769 :
770 : /* Only full reads can support decompression, platform, and sysfs. */
771 0 : if (!(opt_flags & FW_OPT_PARTIAL))
772 0 : nondirect = true;
773 :
774 : #ifdef CONFIG_FW_LOADER_COMPRESS
775 : if (ret == -ENOENT && nondirect)
776 : ret = fw_get_filesystem_firmware(device, fw->priv, ".xz",
777 : fw_decompress_xz);
778 : #endif
779 0 : if (ret == -ENOENT && nondirect)
780 0 : ret = firmware_fallback_platform(fw->priv);
781 :
782 0 : if (ret) {
783 0 : if (!(opt_flags & FW_OPT_NO_WARN))
784 0 : dev_warn(device,
785 : "Direct firmware load for %s failed with error %d\n",
786 : name, ret);
787 : if (nondirect)
788 : ret = firmware_fallback_sysfs(fw, name, device,
789 : opt_flags, ret);
790 : } else
791 0 : ret = assign_fw(fw, device);
792 :
793 0 : revert_creds(old_cred);
794 : put_cred(kern_cred);
795 :
796 : out:
797 0 : if (ret < 0) {
798 0 : fw_abort_batch_reqs(fw);
799 0 : release_firmware(fw);
800 0 : fw = NULL;
801 : }
802 :
803 0 : *firmware_p = fw;
804 0 : return ret;
805 : }
806 :
807 : /**
808 : * request_firmware() - send firmware request and wait for it
809 : * @firmware_p: pointer to firmware image
810 : * @name: name of firmware file
811 : * @device: device for which firmware is being loaded
812 : *
813 : * @firmware_p will be used to return a firmware image by the name
814 : * of @name for device @device.
815 : *
816 : * Should be called from user context where sleeping is allowed.
817 : *
818 : * @name will be used as $FIRMWARE in the uevent environment and
819 : * should be distinctive enough not to be confused with any other
820 : * firmware image for this or any other device.
821 : *
822 : * Caller must hold the reference count of @device.
823 : *
824 : * The function can be called safely inside device's suspend and
825 : * resume callback.
826 : **/
827 : int
828 0 : request_firmware(const struct firmware **firmware_p, const char *name,
829 : struct device *device)
830 : {
831 : int ret;
832 :
833 : /* Need to pin this module until return */
834 0 : __module_get(THIS_MODULE);
835 0 : ret = _request_firmware(firmware_p, name, device, NULL, 0, 0,
836 : FW_OPT_UEVENT);
837 0 : module_put(THIS_MODULE);
838 0 : return ret;
839 : }
840 : EXPORT_SYMBOL(request_firmware);
841 :
842 : /**
843 : * firmware_request_nowarn() - request for an optional fw module
844 : * @firmware: pointer to firmware image
845 : * @name: name of firmware file
846 : * @device: device for which firmware is being loaded
847 : *
848 : * This function is similar in behaviour to request_firmware(), except it
849 : * doesn't produce warning messages when the file is not found. The sysfs
850 : * fallback mechanism is enabled if direct filesystem lookup fails. However,
851 : * failures to find the firmware file with it are still suppressed. It is
852 : * therefore up to the driver to check for the return value of this call and to
853 : * decide when to inform the users of errors.
854 : **/
855 0 : int firmware_request_nowarn(const struct firmware **firmware, const char *name,
856 : struct device *device)
857 : {
858 : int ret;
859 :
860 : /* Need to pin this module until return */
861 0 : __module_get(THIS_MODULE);
862 0 : ret = _request_firmware(firmware, name, device, NULL, 0, 0,
863 : FW_OPT_UEVENT | FW_OPT_NO_WARN);
864 0 : module_put(THIS_MODULE);
865 0 : return ret;
866 : }
867 : EXPORT_SYMBOL_GPL(firmware_request_nowarn);
868 :
869 : /**
870 : * request_firmware_direct() - load firmware directly without usermode helper
871 : * @firmware_p: pointer to firmware image
872 : * @name: name of firmware file
873 : * @device: device for which firmware is being loaded
874 : *
875 : * This function works pretty much like request_firmware(), but this doesn't
876 : * fall back to usermode helper even if the firmware couldn't be loaded
877 : * directly from fs. Hence it's useful for loading optional firmwares, which
878 : * aren't always present, without extra long timeouts of udev.
879 : **/
880 0 : int request_firmware_direct(const struct firmware **firmware_p,
881 : const char *name, struct device *device)
882 : {
883 : int ret;
884 :
885 0 : __module_get(THIS_MODULE);
886 0 : ret = _request_firmware(firmware_p, name, device, NULL, 0, 0,
887 : FW_OPT_UEVENT | FW_OPT_NO_WARN |
888 : FW_OPT_NOFALLBACK_SYSFS);
889 0 : module_put(THIS_MODULE);
890 0 : return ret;
891 : }
892 : EXPORT_SYMBOL_GPL(request_firmware_direct);
893 :
894 : /**
895 : * firmware_request_platform() - request firmware with platform-fw fallback
896 : * @firmware: pointer to firmware image
897 : * @name: name of firmware file
898 : * @device: device for which firmware is being loaded
899 : *
900 : * This function is similar in behaviour to request_firmware, except that if
901 : * direct filesystem lookup fails, it will fallback to looking for a copy of the
902 : * requested firmware embedded in the platform's main (e.g. UEFI) firmware.
903 : **/
904 0 : int firmware_request_platform(const struct firmware **firmware,
905 : const char *name, struct device *device)
906 : {
907 : int ret;
908 :
909 : /* Need to pin this module until return */
910 0 : __module_get(THIS_MODULE);
911 0 : ret = _request_firmware(firmware, name, device, NULL, 0, 0,
912 : FW_OPT_UEVENT | FW_OPT_FALLBACK_PLATFORM);
913 0 : module_put(THIS_MODULE);
914 0 : return ret;
915 : }
916 : EXPORT_SYMBOL_GPL(firmware_request_platform);
917 :
918 : /**
919 : * firmware_request_cache() - cache firmware for suspend so resume can use it
920 : * @name: name of firmware file
921 : * @device: device for which firmware should be cached for
922 : *
923 : * There are some devices with an optimization that enables the device to not
924 : * require loading firmware on system reboot. This optimization may still
925 : * require the firmware present on resume from suspend. This routine can be
926 : * used to ensure the firmware is present on resume from suspend in these
927 : * situations. This helper is not compatible with drivers which use
928 : * request_firmware_into_buf() or request_firmware_nowait() with no uevent set.
929 : **/
930 0 : int firmware_request_cache(struct device *device, const char *name)
931 : {
932 : int ret;
933 :
934 0 : mutex_lock(&fw_lock);
935 0 : ret = fw_add_devm_name(device, name);
936 0 : mutex_unlock(&fw_lock);
937 :
938 0 : return ret;
939 : }
940 : EXPORT_SYMBOL_GPL(firmware_request_cache);
941 :
942 : /**
943 : * request_firmware_into_buf() - load firmware into a previously allocated buffer
944 : * @firmware_p: pointer to firmware image
945 : * @name: name of firmware file
946 : * @device: device for which firmware is being loaded and DMA region allocated
947 : * @buf: address of buffer to load firmware into
948 : * @size: size of buffer
949 : *
950 : * This function works pretty much like request_firmware(), but it doesn't
951 : * allocate a buffer to hold the firmware data. Instead, the firmware
952 : * is loaded directly into the buffer pointed to by @buf and the @firmware_p
953 : * data member is pointed at @buf.
954 : *
955 : * This function doesn't cache firmware either.
956 : */
957 : int
958 0 : request_firmware_into_buf(const struct firmware **firmware_p, const char *name,
959 : struct device *device, void *buf, size_t size)
960 : {
961 : int ret;
962 :
963 0 : if (fw_cache_is_setup(device, name))
964 : return -EOPNOTSUPP;
965 :
966 0 : __module_get(THIS_MODULE);
967 0 : ret = _request_firmware(firmware_p, name, device, buf, size, 0,
968 : FW_OPT_UEVENT | FW_OPT_NOCACHE);
969 0 : module_put(THIS_MODULE);
970 0 : return ret;
971 : }
972 : EXPORT_SYMBOL(request_firmware_into_buf);
973 :
974 : /**
975 : * request_partial_firmware_into_buf() - load partial firmware into a previously allocated buffer
976 : * @firmware_p: pointer to firmware image
977 : * @name: name of firmware file
978 : * @device: device for which firmware is being loaded and DMA region allocated
979 : * @buf: address of buffer to load firmware into
980 : * @size: size of buffer
981 : * @offset: offset into file to read
982 : *
983 : * This function works pretty much like request_firmware_into_buf except
984 : * it allows a partial read of the file.
985 : */
986 : int
987 0 : request_partial_firmware_into_buf(const struct firmware **firmware_p,
988 : const char *name, struct device *device,
989 : void *buf, size_t size, size_t offset)
990 : {
991 : int ret;
992 :
993 0 : if (fw_cache_is_setup(device, name))
994 : return -EOPNOTSUPP;
995 :
996 0 : __module_get(THIS_MODULE);
997 0 : ret = _request_firmware(firmware_p, name, device, buf, size, offset,
998 : FW_OPT_UEVENT | FW_OPT_NOCACHE |
999 : FW_OPT_PARTIAL);
1000 0 : module_put(THIS_MODULE);
1001 0 : return ret;
1002 : }
1003 : EXPORT_SYMBOL(request_partial_firmware_into_buf);
1004 :
1005 : /**
1006 : * release_firmware() - release the resource associated with a firmware image
1007 : * @fw: firmware resource to release
1008 : **/
1009 0 : void release_firmware(const struct firmware *fw)
1010 : {
1011 0 : if (fw) {
1012 0 : if (!firmware_is_builtin(fw))
1013 0 : firmware_free_data(fw);
1014 0 : kfree(fw);
1015 : }
1016 0 : }
1017 : EXPORT_SYMBOL(release_firmware);
1018 :
1019 : /* Async support */
1020 : struct firmware_work {
1021 : struct work_struct work;
1022 : struct module *module;
1023 : const char *name;
1024 : struct device *device;
1025 : void *context;
1026 : void (*cont)(const struct firmware *fw, void *context);
1027 : u32 opt_flags;
1028 : };
1029 :
1030 0 : static void request_firmware_work_func(struct work_struct *work)
1031 : {
1032 : struct firmware_work *fw_work;
1033 : const struct firmware *fw;
1034 :
1035 0 : fw_work = container_of(work, struct firmware_work, work);
1036 :
1037 0 : _request_firmware(&fw, fw_work->name, fw_work->device, NULL, 0, 0,
1038 : fw_work->opt_flags);
1039 0 : fw_work->cont(fw, fw_work->context);
1040 0 : put_device(fw_work->device); /* taken in request_firmware_nowait() */
1041 :
1042 0 : module_put(fw_work->module);
1043 0 : kfree_const(fw_work->name);
1044 0 : kfree(fw_work);
1045 0 : }
1046 :
1047 : /**
1048 : * request_firmware_nowait() - asynchronous version of request_firmware
1049 : * @module: module requesting the firmware
1050 : * @uevent: sends uevent to copy the firmware image if this flag
1051 : * is non-zero else the firmware copy must be done manually.
1052 : * @name: name of firmware file
1053 : * @device: device for which firmware is being loaded
1054 : * @gfp: allocation flags
1055 : * @context: will be passed over to @cont, and
1056 : * @fw may be %NULL if firmware request fails.
1057 : * @cont: function will be called asynchronously when the firmware
1058 : * request is over.
1059 : *
1060 : * Caller must hold the reference count of @device.
1061 : *
1062 : * Asynchronous variant of request_firmware() for user contexts:
1063 : * - sleep for as small periods as possible since it may
1064 : * increase kernel boot time of built-in device drivers
1065 : * requesting firmware in their ->probe() methods, if
1066 : * @gfp is GFP_KERNEL.
1067 : *
1068 : * - can't sleep at all if @gfp is GFP_ATOMIC.
1069 : **/
1070 : int
1071 0 : request_firmware_nowait(
1072 : struct module *module, bool uevent,
1073 : const char *name, struct device *device, gfp_t gfp, void *context,
1074 : void (*cont)(const struct firmware *fw, void *context))
1075 : {
1076 : struct firmware_work *fw_work;
1077 :
1078 0 : fw_work = kzalloc(sizeof(struct firmware_work), gfp);
1079 0 : if (!fw_work)
1080 : return -ENOMEM;
1081 :
1082 0 : fw_work->module = module;
1083 0 : fw_work->name = kstrdup_const(name, gfp);
1084 0 : if (!fw_work->name) {
1085 0 : kfree(fw_work);
1086 0 : return -ENOMEM;
1087 : }
1088 0 : fw_work->device = device;
1089 0 : fw_work->context = context;
1090 0 : fw_work->cont = cont;
1091 0 : fw_work->opt_flags = FW_OPT_NOWAIT |
1092 : (uevent ? FW_OPT_UEVENT : FW_OPT_USERHELPER);
1093 :
1094 0 : if (!uevent && fw_cache_is_setup(device, name)) {
1095 0 : kfree_const(fw_work->name);
1096 0 : kfree(fw_work);
1097 0 : return -EOPNOTSUPP;
1098 : }
1099 :
1100 0 : if (!try_module_get(module)) {
1101 : kfree_const(fw_work->name);
1102 : kfree(fw_work);
1103 : return -EFAULT;
1104 : }
1105 :
1106 0 : get_device(fw_work->device);
1107 0 : INIT_WORK(&fw_work->work, request_firmware_work_func);
1108 0 : schedule_work(&fw_work->work);
1109 0 : return 0;
1110 : }
1111 : EXPORT_SYMBOL(request_firmware_nowait);
1112 :
1113 : #ifdef CONFIG_FW_CACHE
1114 : static ASYNC_DOMAIN_EXCLUSIVE(fw_cache_domain);
1115 :
1116 : /**
1117 : * cache_firmware() - cache one firmware image in kernel memory space
1118 : * @fw_name: the firmware image name
1119 : *
1120 : * Cache firmware in kernel memory so that drivers can use it when
1121 : * system isn't ready for them to request firmware image from userspace.
1122 : * Once it returns successfully, driver can use request_firmware or its
1123 : * nowait version to get the cached firmware without any interacting
1124 : * with userspace
1125 : *
1126 : * Return 0 if the firmware image has been cached successfully
1127 : * Return !0 otherwise
1128 : *
1129 : */
1130 0 : static int cache_firmware(const char *fw_name)
1131 : {
1132 : int ret;
1133 : const struct firmware *fw;
1134 :
1135 0 : pr_debug("%s: %s\n", __func__, fw_name);
1136 :
1137 0 : ret = request_firmware(&fw, fw_name, NULL);
1138 0 : if (!ret)
1139 0 : kfree(fw);
1140 :
1141 : pr_debug("%s: %s ret=%d\n", __func__, fw_name, ret);
1142 :
1143 0 : return ret;
1144 : }
1145 :
1146 : static struct fw_priv *lookup_fw_priv(const char *fw_name)
1147 : {
1148 : struct fw_priv *tmp;
1149 0 : struct firmware_cache *fwc = &fw_cache;
1150 :
1151 0 : spin_lock(&fwc->lock);
1152 0 : tmp = __lookup_fw_priv(fw_name);
1153 0 : spin_unlock(&fwc->lock);
1154 :
1155 : return tmp;
1156 : }
1157 :
1158 : /**
1159 : * uncache_firmware() - remove one cached firmware image
1160 : * @fw_name: the firmware image name
1161 : *
1162 : * Uncache one firmware image which has been cached successfully
1163 : * before.
1164 : *
1165 : * Return 0 if the firmware cache has been removed successfully
1166 : * Return !0 otherwise
1167 : *
1168 : */
1169 0 : static int uncache_firmware(const char *fw_name)
1170 : {
1171 : struct fw_priv *fw_priv;
1172 : struct firmware fw;
1173 :
1174 : pr_debug("%s: %s\n", __func__, fw_name);
1175 :
1176 0 : if (firmware_request_builtin(&fw, fw_name))
1177 : return 0;
1178 :
1179 0 : fw_priv = lookup_fw_priv(fw_name);
1180 0 : if (fw_priv) {
1181 0 : free_fw_priv(fw_priv);
1182 0 : return 0;
1183 : }
1184 :
1185 : return -EINVAL;
1186 : }
1187 :
1188 0 : static struct fw_cache_entry *alloc_fw_cache_entry(const char *name)
1189 : {
1190 : struct fw_cache_entry *fce;
1191 :
1192 0 : fce = kzalloc(sizeof(*fce), GFP_ATOMIC);
1193 0 : if (!fce)
1194 : goto exit;
1195 :
1196 0 : fce->name = kstrdup_const(name, GFP_ATOMIC);
1197 0 : if (!fce->name) {
1198 0 : kfree(fce);
1199 0 : fce = NULL;
1200 0 : goto exit;
1201 : }
1202 : exit:
1203 0 : return fce;
1204 : }
1205 :
1206 0 : static int __fw_entry_found(const char *name)
1207 : {
1208 0 : struct firmware_cache *fwc = &fw_cache;
1209 : struct fw_cache_entry *fce;
1210 :
1211 0 : list_for_each_entry(fce, &fwc->fw_names, list) {
1212 0 : if (!strcmp(fce->name, name))
1213 : return 1;
1214 : }
1215 : return 0;
1216 : }
1217 :
1218 0 : static void fw_cache_piggyback_on_request(struct fw_priv *fw_priv)
1219 : {
1220 0 : const char *name = fw_priv->fw_name;
1221 0 : struct firmware_cache *fwc = fw_priv->fwc;
1222 : struct fw_cache_entry *fce;
1223 :
1224 0 : spin_lock(&fwc->name_lock);
1225 0 : if (__fw_entry_found(name))
1226 : goto found;
1227 :
1228 0 : fce = alloc_fw_cache_entry(name);
1229 0 : if (fce) {
1230 0 : list_add(&fce->list, &fwc->fw_names);
1231 0 : kref_get(&fw_priv->ref);
1232 : pr_debug("%s: fw: %s\n", __func__, name);
1233 : }
1234 : found:
1235 0 : spin_unlock(&fwc->name_lock);
1236 0 : }
1237 :
1238 : static void free_fw_cache_entry(struct fw_cache_entry *fce)
1239 : {
1240 0 : kfree_const(fce->name);
1241 0 : kfree(fce);
1242 : }
1243 :
1244 0 : static void __async_dev_cache_fw_image(void *fw_entry,
1245 : async_cookie_t cookie)
1246 : {
1247 0 : struct fw_cache_entry *fce = fw_entry;
1248 0 : struct firmware_cache *fwc = &fw_cache;
1249 : int ret;
1250 :
1251 0 : ret = cache_firmware(fce->name);
1252 0 : if (ret) {
1253 0 : spin_lock(&fwc->name_lock);
1254 0 : list_del(&fce->list);
1255 0 : spin_unlock(&fwc->name_lock);
1256 :
1257 : free_fw_cache_entry(fce);
1258 : }
1259 0 : }
1260 :
1261 : /* called with dev->devres_lock held */
1262 0 : static void dev_create_fw_entry(struct device *dev, void *res,
1263 : void *data)
1264 : {
1265 0 : struct fw_name_devm *fwn = res;
1266 0 : const char *fw_name = fwn->name;
1267 0 : struct list_head *head = data;
1268 : struct fw_cache_entry *fce;
1269 :
1270 0 : fce = alloc_fw_cache_entry(fw_name);
1271 0 : if (fce)
1272 0 : list_add(&fce->list, head);
1273 0 : }
1274 :
1275 0 : static int devm_name_match(struct device *dev, void *res,
1276 : void *match_data)
1277 : {
1278 0 : struct fw_name_devm *fwn = res;
1279 0 : return (fwn->magic == (unsigned long)match_data);
1280 : }
1281 :
1282 0 : static void dev_cache_fw_image(struct device *dev, void *data)
1283 : {
1284 0 : LIST_HEAD(todo);
1285 : struct fw_cache_entry *fce;
1286 : struct fw_cache_entry *fce_next;
1287 0 : struct firmware_cache *fwc = &fw_cache;
1288 :
1289 0 : devres_for_each_res(dev, fw_name_devm_release,
1290 : devm_name_match, &fw_cache,
1291 : dev_create_fw_entry, &todo);
1292 :
1293 0 : list_for_each_entry_safe(fce, fce_next, &todo, list) {
1294 0 : list_del(&fce->list);
1295 :
1296 0 : spin_lock(&fwc->name_lock);
1297 : /* only one cache entry for one firmware */
1298 0 : if (!__fw_entry_found(fce->name)) {
1299 0 : list_add(&fce->list, &fwc->fw_names);
1300 : } else {
1301 0 : free_fw_cache_entry(fce);
1302 0 : fce = NULL;
1303 : }
1304 0 : spin_unlock(&fwc->name_lock);
1305 :
1306 0 : if (fce)
1307 : async_schedule_domain(__async_dev_cache_fw_image,
1308 : (void *)fce,
1309 : &fw_cache_domain);
1310 : }
1311 0 : }
1312 :
1313 0 : static void __device_uncache_fw_images(void)
1314 : {
1315 0 : struct firmware_cache *fwc = &fw_cache;
1316 : struct fw_cache_entry *fce;
1317 :
1318 0 : spin_lock(&fwc->name_lock);
1319 0 : while (!list_empty(&fwc->fw_names)) {
1320 0 : fce = list_entry(fwc->fw_names.next,
1321 : struct fw_cache_entry, list);
1322 0 : list_del(&fce->list);
1323 0 : spin_unlock(&fwc->name_lock);
1324 :
1325 0 : uncache_firmware(fce->name);
1326 0 : free_fw_cache_entry(fce);
1327 :
1328 0 : spin_lock(&fwc->name_lock);
1329 : }
1330 0 : spin_unlock(&fwc->name_lock);
1331 0 : }
1332 :
1333 : /**
1334 : * device_cache_fw_images() - cache devices' firmware
1335 : *
1336 : * If one device called request_firmware or its nowait version
1337 : * successfully before, the firmware names are recored into the
1338 : * device's devres link list, so device_cache_fw_images can call
1339 : * cache_firmware() to cache these firmwares for the device,
1340 : * then the device driver can load its firmwares easily at
1341 : * time when system is not ready to complete loading firmware.
1342 : */
1343 0 : static void device_cache_fw_images(void)
1344 : {
1345 0 : struct firmware_cache *fwc = &fw_cache;
1346 0 : DEFINE_WAIT(wait);
1347 :
1348 : pr_debug("%s\n", __func__);
1349 :
1350 : /* cancel uncache work */
1351 0 : cancel_delayed_work_sync(&fwc->work);
1352 :
1353 : fw_fallback_set_cache_timeout();
1354 :
1355 0 : mutex_lock(&fw_lock);
1356 0 : fwc->state = FW_LOADER_START_CACHE;
1357 0 : dpm_for_each_dev(NULL, dev_cache_fw_image);
1358 0 : mutex_unlock(&fw_lock);
1359 :
1360 : /* wait for completion of caching firmware for all devices */
1361 0 : async_synchronize_full_domain(&fw_cache_domain);
1362 :
1363 : fw_fallback_set_default_timeout();
1364 0 : }
1365 :
1366 : /**
1367 : * device_uncache_fw_images() - uncache devices' firmware
1368 : *
1369 : * uncache all firmwares which have been cached successfully
1370 : * by device_uncache_fw_images earlier
1371 : */
1372 : static void device_uncache_fw_images(void)
1373 : {
1374 : pr_debug("%s\n", __func__);
1375 0 : __device_uncache_fw_images();
1376 : }
1377 :
1378 0 : static void device_uncache_fw_images_work(struct work_struct *work)
1379 : {
1380 : device_uncache_fw_images();
1381 0 : }
1382 :
1383 : /**
1384 : * device_uncache_fw_images_delay() - uncache devices firmwares
1385 : * @delay: number of milliseconds to delay uncache device firmwares
1386 : *
1387 : * uncache all devices's firmwares which has been cached successfully
1388 : * by device_cache_fw_images after @delay milliseconds.
1389 : */
1390 0 : static void device_uncache_fw_images_delay(unsigned long delay)
1391 : {
1392 0 : queue_delayed_work(system_power_efficient_wq, &fw_cache.work,
1393 : msecs_to_jiffies(delay));
1394 0 : }
1395 :
1396 0 : static int fw_pm_notify(struct notifier_block *notify_block,
1397 : unsigned long mode, void *unused)
1398 : {
1399 : switch (mode) {
1400 : case PM_HIBERNATION_PREPARE:
1401 : case PM_SUSPEND_PREPARE:
1402 : case PM_RESTORE_PREPARE:
1403 : /*
1404 : * kill pending fallback requests with a custom fallback
1405 : * to avoid stalling suspend.
1406 : */
1407 0 : kill_pending_fw_fallback_reqs(true);
1408 0 : device_cache_fw_images();
1409 0 : break;
1410 :
1411 : case PM_POST_SUSPEND:
1412 : case PM_POST_HIBERNATION:
1413 : case PM_POST_RESTORE:
1414 : /*
1415 : * In case that system sleep failed and syscore_suspend is
1416 : * not called.
1417 : */
1418 0 : mutex_lock(&fw_lock);
1419 0 : fw_cache.state = FW_LOADER_NO_CACHE;
1420 0 : mutex_unlock(&fw_lock);
1421 :
1422 0 : device_uncache_fw_images_delay(10 * MSEC_PER_SEC);
1423 0 : break;
1424 : }
1425 :
1426 0 : return 0;
1427 : }
1428 :
1429 : /* stop caching firmware once syscore_suspend is reached */
1430 0 : static int fw_suspend(void)
1431 : {
1432 0 : fw_cache.state = FW_LOADER_NO_CACHE;
1433 0 : return 0;
1434 : }
1435 :
1436 : static struct syscore_ops fw_syscore_ops = {
1437 : .suspend = fw_suspend,
1438 : };
1439 :
1440 1 : static int __init register_fw_pm_ops(void)
1441 : {
1442 : int ret;
1443 :
1444 1 : spin_lock_init(&fw_cache.name_lock);
1445 1 : INIT_LIST_HEAD(&fw_cache.fw_names);
1446 :
1447 2 : INIT_DELAYED_WORK(&fw_cache.work,
1448 : device_uncache_fw_images_work);
1449 :
1450 1 : fw_cache.pm_notify.notifier_call = fw_pm_notify;
1451 1 : ret = register_pm_notifier(&fw_cache.pm_notify);
1452 1 : if (ret)
1453 : return ret;
1454 :
1455 1 : register_syscore_ops(&fw_syscore_ops);
1456 :
1457 1 : return ret;
1458 : }
1459 :
1460 : static inline void unregister_fw_pm_ops(void)
1461 : {
1462 0 : unregister_syscore_ops(&fw_syscore_ops);
1463 0 : unregister_pm_notifier(&fw_cache.pm_notify);
1464 : }
1465 : #else
1466 : static void fw_cache_piggyback_on_request(struct fw_priv *fw_priv)
1467 : {
1468 : }
1469 : static inline int register_fw_pm_ops(void)
1470 : {
1471 : return 0;
1472 : }
1473 : static inline void unregister_fw_pm_ops(void)
1474 : {
1475 : }
1476 : #endif
1477 :
1478 1 : static void __init fw_cache_init(void)
1479 : {
1480 1 : spin_lock_init(&fw_cache.lock);
1481 1 : INIT_LIST_HEAD(&fw_cache.head);
1482 1 : fw_cache.state = FW_LOADER_NO_CACHE;
1483 1 : }
1484 :
1485 1 : static int fw_shutdown_notify(struct notifier_block *unused1,
1486 : unsigned long unused2, void *unused3)
1487 : {
1488 : /*
1489 : * Kill all pending fallback requests to avoid both stalling shutdown,
1490 : * and avoid a deadlock with the usermode_lock.
1491 : */
1492 1 : kill_pending_fw_fallback_reqs(false);
1493 :
1494 1 : return NOTIFY_DONE;
1495 : }
1496 :
1497 : static struct notifier_block fw_shutdown_nb = {
1498 : .notifier_call = fw_shutdown_notify,
1499 : };
1500 :
1501 1 : static int __init firmware_class_init(void)
1502 : {
1503 : int ret;
1504 :
1505 : /* No need to unfold these on exit */
1506 1 : fw_cache_init();
1507 :
1508 1 : ret = register_fw_pm_ops();
1509 1 : if (ret)
1510 : return ret;
1511 :
1512 1 : ret = register_reboot_notifier(&fw_shutdown_nb);
1513 1 : if (ret)
1514 : goto out;
1515 :
1516 : return register_sysfs_loader();
1517 :
1518 : out:
1519 : unregister_fw_pm_ops();
1520 0 : return ret;
1521 : }
1522 :
1523 0 : static void __exit firmware_class_exit(void)
1524 : {
1525 : unregister_fw_pm_ops();
1526 0 : unregister_reboot_notifier(&fw_shutdown_nb);
1527 : unregister_sysfs_loader();
1528 0 : }
1529 :
1530 : fs_initcall(firmware_class_init);
1531 : module_exit(firmware_class_exit);
|