.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-or-later |
---|
1 | 2 | /* |
---|
2 | 3 | * CPU Microcode Update Driver for Linux |
---|
3 | 4 | * |
---|
.. | .. |
---|
12 | 13 | * (C) 2015 Borislav Petkov <bp@alien8.de> |
---|
13 | 14 | * |
---|
14 | 15 | * This driver allows to upgrade microcode on x86 processors. |
---|
15 | | - * |
---|
16 | | - * This program is free software; you can redistribute it and/or |
---|
17 | | - * modify it under the terms of the GNU General Public License |
---|
18 | | - * as published by the Free Software Foundation; either version |
---|
19 | | - * 2 of the License, or (at your option) any later version. |
---|
20 | 16 | */ |
---|
21 | 17 | |
---|
22 | 18 | #define pr_fmt(fmt) "microcode: " fmt |
---|
.. | .. |
---|
66 | 62 | * updated at any particular moment of time. |
---|
67 | 63 | */ |
---|
68 | 64 | static DEFINE_MUTEX(microcode_mutex); |
---|
69 | | - |
---|
70 | | -/* |
---|
71 | | - * Serialize late loading so that CPUs get updated one-by-one. |
---|
72 | | - */ |
---|
73 | | -static DEFINE_RAW_SPINLOCK(update_lock); |
---|
74 | 65 | |
---|
75 | 66 | struct ucode_cpu_info ucode_cpu_info[NR_CPUS]; |
---|
76 | 67 | |
---|
.. | .. |
---|
154 | 145 | |
---|
155 | 146 | bool get_builtin_firmware(struct cpio_data *cd, const char *name) |
---|
156 | 147 | { |
---|
157 | | -#ifdef CONFIG_FW_LOADER |
---|
158 | 148 | struct builtin_fw *b_fw; |
---|
159 | 149 | |
---|
160 | 150 | for (b_fw = __start_builtin_fw; b_fw != __end_builtin_fw; b_fw++) { |
---|
.. | .. |
---|
164 | 154 | return true; |
---|
165 | 155 | } |
---|
166 | 156 | } |
---|
167 | | -#endif |
---|
168 | 157 | return false; |
---|
169 | 158 | } |
---|
170 | 159 | |
---|
.. | .. |
---|
428 | 417 | |
---|
429 | 418 | static int microcode_open(struct inode *inode, struct file *file) |
---|
430 | 419 | { |
---|
431 | | - return capable(CAP_SYS_RAWIO) ? nonseekable_open(inode, file) : -EPERM; |
---|
| 420 | + return capable(CAP_SYS_RAWIO) ? stream_open(inode, file) : -EPERM; |
---|
432 | 421 | } |
---|
433 | 422 | |
---|
434 | 423 | static ssize_t microcode_write(struct file *file, const char __user *buf, |
---|
435 | 424 | size_t len, loff_t *ppos) |
---|
436 | 425 | { |
---|
437 | 426 | ssize_t ret = -EINVAL; |
---|
| 427 | + unsigned long nr_pages = totalram_pages(); |
---|
438 | 428 | |
---|
439 | | - if ((len >> PAGE_SHIFT) > totalram_pages) { |
---|
440 | | - pr_err("too much data (max %ld pages)\n", totalram_pages); |
---|
| 429 | + if ((len >> PAGE_SHIFT) > nr_pages) { |
---|
| 430 | + pr_err("too much data (max %ld pages)\n", nr_pages); |
---|
441 | 431 | return ret; |
---|
442 | 432 | } |
---|
443 | 433 | |
---|
.. | .. |
---|
553 | 543 | /* |
---|
554 | 544 | * Returns: |
---|
555 | 545 | * < 0 - on error |
---|
556 | | - * 0 - no update done |
---|
557 | | - * 1 - microcode was updated |
---|
| 546 | + * 0 - success (no update done or microcode was updated) |
---|
558 | 547 | */ |
---|
559 | 548 | static int __reload_late(void *info) |
---|
560 | 549 | { |
---|
.. | .. |
---|
569 | 558 | if (__wait_for_cpus(&late_cpus_in, NSEC_PER_SEC)) |
---|
570 | 559 | return -1; |
---|
571 | 560 | |
---|
572 | | - raw_spin_lock(&update_lock); |
---|
573 | | - apply_microcode_local(&err); |
---|
574 | | - raw_spin_unlock(&update_lock); |
---|
| 561 | + /* |
---|
| 562 | + * On an SMT system, it suffices to load the microcode on one sibling of |
---|
| 563 | + * the core because the microcode engine is shared between the threads. |
---|
| 564 | + * Synchronization still needs to take place so that no concurrent |
---|
| 565 | + * loading attempts happen on multiple threads of an SMT core. See |
---|
| 566 | + * below. |
---|
| 567 | + */ |
---|
| 568 | + if (cpumask_first(topology_sibling_cpumask(cpu)) == cpu) |
---|
| 569 | + apply_microcode_local(&err); |
---|
| 570 | + else |
---|
| 571 | + goto wait_for_siblings; |
---|
575 | 572 | |
---|
576 | | - /* siblings return UCODE_OK because their engine got updated already */ |
---|
577 | | - if (err > UCODE_NFOUND) { |
---|
578 | | - pr_warn("Error reloading microcode on CPU %d\n", cpu); |
---|
| 573 | + if (err >= UCODE_NFOUND) { |
---|
| 574 | + if (err == UCODE_ERROR) |
---|
| 575 | + pr_warn("Error reloading microcode on CPU %d\n", cpu); |
---|
| 576 | + |
---|
579 | 577 | ret = -1; |
---|
580 | | - } else if (err == UCODE_UPDATED || err == UCODE_OK) { |
---|
581 | | - ret = 1; |
---|
582 | 578 | } |
---|
583 | 579 | |
---|
584 | | - /* |
---|
585 | | - * Increase the wait timeout to a safe value here since we're |
---|
586 | | - * serializing the microcode update and that could take a while on a |
---|
587 | | - * large number of CPUs. And that is fine as the *actual* timeout will |
---|
588 | | - * be determined by the last CPU finished updating and thus cut short. |
---|
589 | | - */ |
---|
590 | | - if (__wait_for_cpus(&late_cpus_out, NSEC_PER_SEC * num_online_cpus())) |
---|
| 580 | +wait_for_siblings: |
---|
| 581 | + if (__wait_for_cpus(&late_cpus_out, NSEC_PER_SEC)) |
---|
591 | 582 | panic("Timeout during microcode update!\n"); |
---|
| 583 | + |
---|
| 584 | + /* |
---|
| 585 | + * At least one thread has completed update on each core. |
---|
| 586 | + * For others, simply call the update to make sure the |
---|
| 587 | + * per-cpu cpuinfo can be updated with right microcode |
---|
| 588 | + * revision. |
---|
| 589 | + */ |
---|
| 590 | + if (cpumask_first(topology_sibling_cpumask(cpu)) != cpu) |
---|
| 591 | + apply_microcode_local(&err); |
---|
592 | 592 | |
---|
593 | 593 | return ret; |
---|
594 | 594 | } |
---|
.. | .. |
---|
605 | 605 | atomic_set(&late_cpus_out, 0); |
---|
606 | 606 | |
---|
607 | 607 | ret = stop_machine_cpuslocked(__reload_late, NULL, cpu_online_mask); |
---|
608 | | - if (ret > 0) |
---|
| 608 | + if (ret == 0) |
---|
609 | 609 | microcode_check(); |
---|
| 610 | + |
---|
| 611 | + pr_info("Reload completed, microcode revision: 0x%x\n", boot_cpu_data.microcode); |
---|
610 | 612 | |
---|
611 | 613 | return ret; |
---|
612 | 614 | } |
---|
.. | .. |
---|
644 | 646 | put: |
---|
645 | 647 | put_online_cpus(); |
---|
646 | 648 | |
---|
647 | | - if (ret >= 0) |
---|
| 649 | + if (ret == 0) |
---|
648 | 650 | ret = size; |
---|
649 | 651 | |
---|
650 | 652 | return ret; |
---|
.. | .. |
---|
667 | 669 | } |
---|
668 | 670 | |
---|
669 | 671 | static DEVICE_ATTR_WO(reload); |
---|
670 | | -static DEVICE_ATTR(version, 0400, version_show, NULL); |
---|
671 | | -static DEVICE_ATTR(processor_flags, 0400, pf_show, NULL); |
---|
| 672 | +static DEVICE_ATTR(version, 0444, version_show, NULL); |
---|
| 673 | +static DEVICE_ATTR(processor_flags, 0444, pf_show, NULL); |
---|
672 | 674 | |
---|
673 | 675 | static struct attribute *mc_default_attrs[] = { |
---|
674 | 676 | &dev_attr_version.attr, |
---|
.. | .. |
---|
773 | 775 | }; |
---|
774 | 776 | |
---|
775 | 777 | /** |
---|
776 | | - * mc_bp_resume - Update boot CPU microcode during resume. |
---|
| 778 | + * microcode_bsp_resume - Update boot CPU microcode during resume. |
---|
777 | 779 | */ |
---|
778 | | -static void mc_bp_resume(void) |
---|
| 780 | +void microcode_bsp_resume(void) |
---|
779 | 781 | { |
---|
780 | 782 | int cpu = smp_processor_id(); |
---|
781 | 783 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu; |
---|
.. | .. |
---|
787 | 789 | } |
---|
788 | 790 | |
---|
789 | 791 | static struct syscore_ops mc_syscore_ops = { |
---|
790 | | - .resume = mc_bp_resume, |
---|
| 792 | + .resume = microcode_bsp_resume, |
---|
791 | 793 | }; |
---|
792 | 794 | |
---|
793 | 795 | static int mc_cpu_starting(unsigned int cpu) |
---|