.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* |
---|
2 | 3 | * This only handles 32bit MTRR on 32bit hosts. This is strictly wrong |
---|
3 | 4 | * because MTRRs can span up to 40 bits (36bits on most modern x86) |
---|
.. | .. |
---|
14 | 15 | #include <asm/tlbflush.h> |
---|
15 | 16 | #include <asm/mtrr.h> |
---|
16 | 17 | #include <asm/msr.h> |
---|
17 | | -#include <asm/pat.h> |
---|
| 18 | +#include <asm/memtype.h> |
---|
18 | 19 | |
---|
19 | 20 | #include "mtrr.h" |
---|
20 | 21 | |
---|
.. | .. |
---|
742 | 743 | /* Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */ |
---|
743 | 744 | cr0 = read_cr0() | X86_CR0_CD; |
---|
744 | 745 | write_cr0(cr0); |
---|
745 | | - wbinvd(); |
---|
| 746 | + |
---|
| 747 | + /* |
---|
| 748 | + * Cache flushing is the most time-consuming step when programming |
---|
| 749 | + * the MTRRs. Fortunately, as per the Intel Software Development |
---|
| 750 | + * Manual, we can skip it if the processor supports cache self- |
---|
| 751 | + * snooping. |
---|
| 752 | + */ |
---|
| 753 | + if (!static_cpu_has(X86_FEATURE_SELFSNOOP)) |
---|
| 754 | + wbinvd(); |
---|
746 | 755 | |
---|
747 | 756 | /* Save value of CR4 and clear Page Global Enable (bit 7) */ |
---|
748 | 757 | if (boot_cpu_has(X86_FEATURE_PGE)) { |
---|
.. | .. |
---|
752 | 761 | |
---|
753 | 762 | /* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */ |
---|
754 | 763 | count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); |
---|
755 | | - __flush_tlb(); |
---|
| 764 | + flush_tlb_local(); |
---|
756 | 765 | |
---|
757 | 766 | /* Save MTRR state */ |
---|
758 | 767 | rdmsr(MSR_MTRRdefType, deftype_lo, deftype_hi); |
---|
759 | 768 | |
---|
760 | 769 | /* Disable MTRRs, and set the default type to uncached */ |
---|
761 | 770 | mtrr_wrmsr(MSR_MTRRdefType, deftype_lo & ~0xcff, deftype_hi); |
---|
762 | | - wbinvd(); |
---|
| 771 | + |
---|
| 772 | + /* Again, only flush caches if we have to. */ |
---|
| 773 | + if (!static_cpu_has(X86_FEATURE_SELFSNOOP)) |
---|
| 774 | + wbinvd(); |
---|
763 | 775 | } |
---|
764 | 776 | |
---|
765 | 777 | static void post_set(void) __releases(set_atomicity_lock) |
---|
766 | 778 | { |
---|
767 | 779 | /* Flush TLBs (no need to flush caches - they are disabled) */ |
---|
768 | 780 | count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); |
---|
769 | | - __flush_tlb(); |
---|
| 781 | + flush_tlb_local(); |
---|
770 | 782 | |
---|
771 | 783 | /* Intel (P6) standard MTRRs */ |
---|
772 | 784 | mtrr_wrmsr(MSR_MTRRdefType, deftype_lo, deftype_hi); |
---|
.. | .. |
---|
798 | 810 | local_irq_restore(flags); |
---|
799 | 811 | |
---|
800 | 812 | /* Use the atomic bitops to update the global mask */ |
---|
801 | | - for (count = 0; count < sizeof mask * 8; ++count) { |
---|
| 813 | + for (count = 0; count < sizeof(mask) * 8; ++count) { |
---|
802 | 814 | if (mask & 0x01) |
---|
803 | 815 | set_bit(count, &smp_changes_mask); |
---|
804 | 816 | mask >>= 1; |
---|