.. | .. |
---|
9 | 9 | #include <linux/umh.h> |
---|
10 | 10 | #include <linux/sysctl.h> |
---|
11 | 11 | #include <linux/vmalloc.h> |
---|
| 12 | +#include <linux/module.h> |
---|
12 | 13 | |
---|
13 | 14 | #include "fallback.h" |
---|
14 | 15 | #include "firmware.h" |
---|
.. | .. |
---|
16 | 17 | /* |
---|
17 | 18 | * firmware fallback mechanism |
---|
18 | 19 | */ |
---|
| 20 | + |
---|
| 21 | +MODULE_IMPORT_NS(FIRMWARE_LOADER_PRIVATE); |
---|
19 | 22 | |
---|
20 | 23 | extern struct firmware_fallback_config fw_fallback_config; |
---|
21 | 24 | |
---|
.. | .. |
---|
103 | 106 | |
---|
104 | 107 | static LIST_HEAD(pending_fw_head); |
---|
105 | 108 | |
---|
106 | | -void kill_pending_fw_fallback_reqs(bool only_kill_custom) |
---|
| 109 | +void kill_pending_fw_fallback_reqs(bool kill_all) |
---|
107 | 110 | { |
---|
108 | 111 | struct fw_priv *fw_priv; |
---|
109 | 112 | struct fw_priv *next; |
---|
.. | .. |
---|
111 | 114 | mutex_lock(&fw_lock); |
---|
112 | 115 | list_for_each_entry_safe(fw_priv, next, &pending_fw_head, |
---|
113 | 116 | pending_list) { |
---|
114 | | - if (!fw_priv->need_uevent || !only_kill_custom) |
---|
| 117 | + if (kill_all || !fw_priv->need_uevent) |
---|
115 | 118 | __fw_load_abort(fw_priv); |
---|
116 | 119 | } |
---|
| 120 | + |
---|
| 121 | + if (kill_all) |
---|
| 122 | + fw_load_abort_all = true; |
---|
| 123 | + |
---|
117 | 124 | mutex_unlock(&fw_lock); |
---|
118 | 125 | } |
---|
119 | 126 | |
---|
120 | 127 | static ssize_t timeout_show(struct class *class, struct class_attribute *attr, |
---|
121 | 128 | char *buf) |
---|
122 | 129 | { |
---|
123 | | - return sprintf(buf, "%d\n", __firmware_loading_timeout()); |
---|
| 130 | + return sysfs_emit(buf, "%d\n", __firmware_loading_timeout()); |
---|
124 | 131 | } |
---|
125 | 132 | |
---|
126 | 133 | /** |
---|
.. | .. |
---|
215 | 222 | loading = fw_sysfs_loading(fw_sysfs->fw_priv); |
---|
216 | 223 | mutex_unlock(&fw_lock); |
---|
217 | 224 | |
---|
218 | | - return sprintf(buf, "%d\n", loading); |
---|
219 | | -} |
---|
220 | | - |
---|
221 | | -/* one pages buffer should be mapped/unmapped only once */ |
---|
222 | | -static int map_fw_priv_pages(struct fw_priv *fw_priv) |
---|
223 | | -{ |
---|
224 | | - if (!fw_priv->is_paged_buf) |
---|
225 | | - return 0; |
---|
226 | | - |
---|
227 | | - vunmap(fw_priv->data); |
---|
228 | | - fw_priv->data = vmap(fw_priv->pages, fw_priv->nr_pages, 0, |
---|
229 | | - PAGE_KERNEL_RO); |
---|
230 | | - if (!fw_priv->data) |
---|
231 | | - return -ENOMEM; |
---|
232 | | - return 0; |
---|
| 225 | + return sysfs_emit(buf, "%d\n", loading); |
---|
233 | 226 | } |
---|
234 | 227 | |
---|
235 | 228 | /** |
---|
.. | .. |
---|
253 | 246 | struct fw_priv *fw_priv; |
---|
254 | 247 | ssize_t written = count; |
---|
255 | 248 | int loading = simple_strtol(buf, NULL, 10); |
---|
256 | | - int i; |
---|
257 | 249 | |
---|
258 | 250 | mutex_lock(&fw_lock); |
---|
259 | 251 | fw_priv = fw_sysfs->fw_priv; |
---|
.. | .. |
---|
264 | 256 | case 1: |
---|
265 | 257 | /* discarding any previous partial load */ |
---|
266 | 258 | if (!fw_sysfs_done(fw_priv)) { |
---|
267 | | - for (i = 0; i < fw_priv->nr_pages; i++) |
---|
268 | | - __free_page(fw_priv->pages[i]); |
---|
269 | | - vfree(fw_priv->pages); |
---|
270 | | - fw_priv->pages = NULL; |
---|
271 | | - fw_priv->page_array_size = 0; |
---|
272 | | - fw_priv->nr_pages = 0; |
---|
| 259 | + fw_free_paged_buf(fw_priv); |
---|
273 | 260 | fw_state_start(fw_priv); |
---|
274 | 261 | } |
---|
275 | 262 | break; |
---|
.. | .. |
---|
283 | 270 | * see the mapped 'buf->data' once the loading |
---|
284 | 271 | * is completed. |
---|
285 | 272 | * */ |
---|
286 | | - rc = map_fw_priv_pages(fw_priv); |
---|
| 273 | + rc = fw_map_paged_buf(fw_priv); |
---|
287 | 274 | if (rc) |
---|
288 | 275 | dev_err(dev, "%s: map pages failed\n", |
---|
289 | 276 | __func__); |
---|
290 | 277 | else |
---|
291 | | - rc = security_kernel_post_read_file(NULL, |
---|
292 | | - fw_priv->data, fw_priv->size, |
---|
293 | | - READING_FIRMWARE); |
---|
| 278 | + rc = security_kernel_post_load_data(fw_priv->data, |
---|
| 279 | + fw_priv->size, |
---|
| 280 | + LOADING_FIRMWARE, "blob"); |
---|
294 | 281 | |
---|
295 | 282 | /* |
---|
296 | 283 | * Same logic as fw_load_abort, only the DONE bit |
---|
.. | .. |
---|
304 | 291 | } |
---|
305 | 292 | break; |
---|
306 | 293 | } |
---|
307 | | - /* fallthrough */ |
---|
| 294 | + fallthrough; |
---|
308 | 295 | default: |
---|
309 | 296 | dev_err(dev, "%s: unexpected value (%d)\n", __func__, loading); |
---|
310 | | - /* fallthrough */ |
---|
| 297 | + fallthrough; |
---|
311 | 298 | case -1: |
---|
312 | 299 | fw_load_abort(fw_sysfs); |
---|
313 | 300 | break; |
---|
.. | .. |
---|
387 | 374 | |
---|
388 | 375 | static int fw_realloc_pages(struct fw_sysfs *fw_sysfs, int min_size) |
---|
389 | 376 | { |
---|
390 | | - struct fw_priv *fw_priv= fw_sysfs->fw_priv; |
---|
391 | | - int pages_needed = PAGE_ALIGN(min_size) >> PAGE_SHIFT; |
---|
| 377 | + int err; |
---|
392 | 378 | |
---|
393 | | - /* If the array of pages is too small, grow it... */ |
---|
394 | | - if (fw_priv->page_array_size < pages_needed) { |
---|
395 | | - int new_array_size = max(pages_needed, |
---|
396 | | - fw_priv->page_array_size * 2); |
---|
397 | | - struct page **new_pages; |
---|
398 | | - |
---|
399 | | - new_pages = vmalloc(array_size(new_array_size, sizeof(void *))); |
---|
400 | | - if (!new_pages) { |
---|
401 | | - fw_load_abort(fw_sysfs); |
---|
402 | | - return -ENOMEM; |
---|
403 | | - } |
---|
404 | | - memcpy(new_pages, fw_priv->pages, |
---|
405 | | - fw_priv->page_array_size * sizeof(void *)); |
---|
406 | | - memset(&new_pages[fw_priv->page_array_size], 0, sizeof(void *) * |
---|
407 | | - (new_array_size - fw_priv->page_array_size)); |
---|
408 | | - vfree(fw_priv->pages); |
---|
409 | | - fw_priv->pages = new_pages; |
---|
410 | | - fw_priv->page_array_size = new_array_size; |
---|
411 | | - } |
---|
412 | | - |
---|
413 | | - while (fw_priv->nr_pages < pages_needed) { |
---|
414 | | - fw_priv->pages[fw_priv->nr_pages] = |
---|
415 | | - alloc_page(GFP_KERNEL | __GFP_HIGHMEM); |
---|
416 | | - |
---|
417 | | - if (!fw_priv->pages[fw_priv->nr_pages]) { |
---|
418 | | - fw_load_abort(fw_sysfs); |
---|
419 | | - return -ENOMEM; |
---|
420 | | - } |
---|
421 | | - fw_priv->nr_pages++; |
---|
422 | | - } |
---|
423 | | - return 0; |
---|
| 379 | + err = fw_grow_paged_buf(fw_sysfs->fw_priv, |
---|
| 380 | + PAGE_ALIGN(min_size) >> PAGE_SHIFT); |
---|
| 381 | + if (err) |
---|
| 382 | + fw_load_abort(fw_sysfs); |
---|
| 383 | + return err; |
---|
424 | 384 | } |
---|
425 | 385 | |
---|
426 | 386 | /** |
---|
.. | .. |
---|
505 | 465 | |
---|
506 | 466 | static struct fw_sysfs * |
---|
507 | 467 | fw_create_instance(struct firmware *firmware, const char *fw_name, |
---|
508 | | - struct device *device, enum fw_opt opt_flags) |
---|
| 468 | + struct device *device, u32 opt_flags) |
---|
509 | 469 | { |
---|
510 | 470 | struct fw_sysfs *fw_sysfs; |
---|
511 | 471 | struct device *f_dev; |
---|
.. | .. |
---|
532 | 492 | /** |
---|
533 | 493 | * fw_load_sysfs_fallback() - load a firmware via the sysfs fallback mechanism |
---|
534 | 494 | * @fw_sysfs: firmware sysfs information for the firmware to load |
---|
535 | | - * @opt_flags: flags of options, FW_OPT_* |
---|
536 | 495 | * @timeout: timeout to wait for the load |
---|
537 | 496 | * |
---|
538 | 497 | * In charge of constructing a sysfs fallback interface for firmware loading. |
---|
539 | 498 | **/ |
---|
540 | | -static int fw_load_sysfs_fallback(struct fw_sysfs *fw_sysfs, |
---|
541 | | - enum fw_opt opt_flags, long timeout) |
---|
| 499 | +static int fw_load_sysfs_fallback(struct fw_sysfs *fw_sysfs, long timeout) |
---|
542 | 500 | { |
---|
543 | 501 | int retval = 0; |
---|
544 | 502 | struct device *f_dev = &fw_sysfs->dev; |
---|
.. | .. |
---|
557 | 515 | } |
---|
558 | 516 | |
---|
559 | 517 | mutex_lock(&fw_lock); |
---|
560 | | - if (fw_state_is_aborted(fw_priv)) { |
---|
| 518 | + if (fw_load_abort_all || fw_state_is_aborted(fw_priv)) { |
---|
561 | 519 | mutex_unlock(&fw_lock); |
---|
562 | 520 | retval = -EINTR; |
---|
563 | 521 | goto out; |
---|
.. | .. |
---|
565 | 523 | list_add(&fw_priv->pending_list, &pending_fw_head); |
---|
566 | 524 | mutex_unlock(&fw_lock); |
---|
567 | 525 | |
---|
568 | | - if (opt_flags & FW_OPT_UEVENT) { |
---|
| 526 | + if (fw_priv->opt_flags & FW_OPT_UEVENT) { |
---|
569 | 527 | fw_priv->need_uevent = true; |
---|
570 | 528 | dev_set_uevent_suppress(f_dev, false); |
---|
571 | 529 | dev_dbg(f_dev, "firmware: requesting %s\n", fw_priv->fw_name); |
---|
.. | .. |
---|
596 | 554 | |
---|
597 | 555 | static int fw_load_from_user_helper(struct firmware *firmware, |
---|
598 | 556 | const char *name, struct device *device, |
---|
599 | | - enum fw_opt opt_flags) |
---|
| 557 | + u32 opt_flags) |
---|
600 | 558 | { |
---|
601 | 559 | struct fw_sysfs *fw_sysfs; |
---|
602 | 560 | long timeout; |
---|
.. | .. |
---|
626 | 584 | } |
---|
627 | 585 | |
---|
628 | 586 | fw_sysfs->fw_priv = firmware->priv; |
---|
629 | | - ret = fw_load_sysfs_fallback(fw_sysfs, opt_flags, timeout); |
---|
| 587 | + ret = fw_load_sysfs_fallback(fw_sysfs, timeout); |
---|
630 | 588 | |
---|
631 | 589 | if (!ret) |
---|
632 | | - ret = assign_fw(firmware, device, opt_flags); |
---|
| 590 | + ret = assign_fw(firmware, device); |
---|
633 | 591 | |
---|
634 | 592 | out_unlock: |
---|
635 | 593 | usermodehelper_read_unlock(); |
---|
.. | .. |
---|
637 | 595 | return ret; |
---|
638 | 596 | } |
---|
639 | 597 | |
---|
640 | | -static bool fw_force_sysfs_fallback(enum fw_opt opt_flags) |
---|
| 598 | +static bool fw_force_sysfs_fallback(u32 opt_flags) |
---|
641 | 599 | { |
---|
642 | 600 | if (fw_fallback_config.force_sysfs_fallback) |
---|
643 | 601 | return true; |
---|
.. | .. |
---|
646 | 604 | return true; |
---|
647 | 605 | } |
---|
648 | 606 | |
---|
649 | | -static bool fw_run_sysfs_fallback(enum fw_opt opt_flags) |
---|
| 607 | +static bool fw_run_sysfs_fallback(u32 opt_flags) |
---|
650 | 608 | { |
---|
651 | 609 | int ret; |
---|
652 | 610 | |
---|
.. | .. |
---|
655 | 613 | return false; |
---|
656 | 614 | } |
---|
657 | 615 | |
---|
658 | | - if ((opt_flags & FW_OPT_NOFALLBACK)) |
---|
| 616 | + if ((opt_flags & FW_OPT_NOFALLBACK_SYSFS)) |
---|
659 | 617 | return false; |
---|
660 | 618 | |
---|
661 | 619 | /* Also permit LSMs and IMA to fail firmware sysfs fallback */ |
---|
662 | | - ret = security_kernel_load_data(LOADING_FIRMWARE); |
---|
| 620 | + ret = security_kernel_load_data(LOADING_FIRMWARE, true); |
---|
663 | 621 | if (ret < 0) |
---|
664 | 622 | return false; |
---|
665 | 623 | |
---|
.. | .. |
---|
671 | 629 | * @fw: pointer to firmware image |
---|
672 | 630 | * @name: name of firmware file to look for |
---|
673 | 631 | * @device: device for which firmware is being loaded |
---|
674 | | - * @opt_flags: options to control firmware loading behaviour |
---|
| 632 | + * @opt_flags: options to control firmware loading behaviour, as defined by |
---|
| 633 | + * &enum fw_opt |
---|
675 | 634 | * @ret: return value from direct lookup which triggered the fallback mechanism |
---|
676 | 635 | * |
---|
677 | 636 | * This function is called if direct lookup for the firmware failed, it enables |
---|
678 | 637 | * a fallback mechanism through userspace by exposing a sysfs loading |
---|
679 | | - * interface. Userspace is in charge of loading the firmware through the syfs |
---|
680 | | - * loading interface. This syfs fallback mechanism may be disabled completely |
---|
| 638 | + * interface. Userspace is in charge of loading the firmware through the sysfs |
---|
| 639 | + * loading interface. This sysfs fallback mechanism may be disabled completely |
---|
681 | 640 | * on a system by setting the proc sysctl value ignore_sysfs_fallback to true. |
---|
682 | | - * If this false we check if the internal API caller set the @FW_OPT_NOFALLBACK |
---|
683 | | - * flag, if so it would also disable the fallback mechanism. A system may want |
---|
684 | | - * to enfoce the sysfs fallback mechanism at all times, it can do this by |
---|
685 | | - * setting ignore_sysfs_fallback to false and force_sysfs_fallback to true. |
---|
| 641 | + * If this is false we check if the internal API caller set the |
---|
| 642 | + * @FW_OPT_NOFALLBACK_SYSFS flag, if so it would also disable the fallback |
---|
| 643 | + * mechanism. A system may want to enforce the sysfs fallback mechanism at all |
---|
| 644 | + * times, it can do this by setting ignore_sysfs_fallback to false and |
---|
| 645 | + * force_sysfs_fallback to true. |
---|
686 | 646 | * Enabling force_sysfs_fallback is functionally equivalent to build a kernel |
---|
687 | 647 | * with CONFIG_FW_LOADER_USER_HELPER_FALLBACK. |
---|
688 | 648 | **/ |
---|
689 | 649 | int firmware_fallback_sysfs(struct firmware *fw, const char *name, |
---|
690 | 650 | struct device *device, |
---|
691 | | - enum fw_opt opt_flags, |
---|
| 651 | + u32 opt_flags, |
---|
692 | 652 | int ret) |
---|
693 | 653 | { |
---|
694 | 654 | if (!fw_run_sysfs_fallback(opt_flags)) |
---|
695 | 655 | return ret; |
---|
696 | 656 | |
---|
697 | 657 | if (!(opt_flags & FW_OPT_NO_WARN)) |
---|
698 | | - dev_warn(device, "Falling back to syfs fallback for: %s\n", |
---|
| 658 | + dev_warn(device, "Falling back to sysfs fallback for: %s\n", |
---|
699 | 659 | name); |
---|
700 | 660 | else |
---|
701 | 661 | dev_dbg(device, "Falling back to sysfs fallback for: %s\n", |
---|