.. | .. |
---|
17 | 17 | #include <linux/pci.h> |
---|
18 | 18 | |
---|
19 | 19 | #include <asm/cpufeature.h> |
---|
| 20 | +#include <asm/cacheinfo.h> |
---|
20 | 21 | #include <asm/amd_nb.h> |
---|
21 | 22 | #include <asm/smp.h> |
---|
22 | 23 | |
---|
.. | .. |
---|
247 | 248 | switch (leaf) { |
---|
248 | 249 | case 1: |
---|
249 | 250 | l1 = &l1i; |
---|
| 251 | + fallthrough; |
---|
250 | 252 | case 0: |
---|
251 | 253 | if (!l1->val) |
---|
252 | 254 | return; |
---|
.. | .. |
---|
602 | 604 | else |
---|
603 | 605 | amd_cpuid4(index, &eax, &ebx, &ecx); |
---|
604 | 606 | amd_init_l3_cache(this_leaf, index); |
---|
| 607 | + } else if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { |
---|
| 608 | + cpuid_count(0x8000001d, index, &eax.full, |
---|
| 609 | + &ebx.full, &ecx.full, &edx); |
---|
| 610 | + amd_init_l3_cache(this_leaf, index); |
---|
605 | 611 | } else { |
---|
606 | 612 | cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx); |
---|
607 | 613 | } |
---|
.. | .. |
---|
625 | 631 | union _cpuid4_leaf_eax cache_eax; |
---|
626 | 632 | int i = -1; |
---|
627 | 633 | |
---|
628 | | - if (c->x86_vendor == X86_VENDOR_AMD) |
---|
| 634 | + if (c->x86_vendor == X86_VENDOR_AMD || |
---|
| 635 | + c->x86_vendor == X86_VENDOR_HYGON) |
---|
629 | 636 | op = 0x8000001d; |
---|
630 | 637 | else |
---|
631 | 638 | op = 4; |
---|
.. | .. |
---|
639 | 646 | return i; |
---|
640 | 647 | } |
---|
641 | 648 | |
---|
642 | | -void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, int cpu, u8 node_id) |
---|
| 649 | +void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, int cpu) |
---|
643 | 650 | { |
---|
644 | 651 | /* |
---|
645 | 652 | * We may have multiple LLCs if L3 caches exist, so check if we |
---|
.. | .. |
---|
650 | 657 | |
---|
651 | 658 | if (c->x86 < 0x17) { |
---|
652 | 659 | /* LLC is at the node level. */ |
---|
653 | | - per_cpu(cpu_llc_id, cpu) = node_id; |
---|
| 660 | + per_cpu(cpu_llc_id, cpu) = c->cpu_die_id; |
---|
654 | 661 | } else if (c->x86 == 0x17 && c->x86_model <= 0x1F) { |
---|
655 | 662 | /* |
---|
656 | 663 | * LLC is at the core complex level. |
---|
.. | .. |
---|
677 | 684 | } |
---|
678 | 685 | } |
---|
679 | 686 | |
---|
| 687 | +void cacheinfo_hygon_init_llc_id(struct cpuinfo_x86 *c, int cpu) |
---|
| 688 | +{ |
---|
| 689 | + /* |
---|
| 690 | + * We may have multiple LLCs if L3 caches exist, so check if we |
---|
| 691 | + * have an L3 cache by looking at the L3 cache CPUID leaf. |
---|
| 692 | + */ |
---|
| 693 | + if (!cpuid_edx(0x80000006)) |
---|
| 694 | + return; |
---|
| 695 | + |
---|
| 696 | + /* |
---|
| 697 | + * LLC is at the core complex level. |
---|
| 698 | + * Core complex ID is ApicId[3] for these processors. |
---|
| 699 | + */ |
---|
| 700 | + per_cpu(cpu_llc_id, cpu) = c->apicid >> 3; |
---|
| 701 | +} |
---|
| 702 | + |
---|
680 | 703 | void init_amd_cacheinfo(struct cpuinfo_x86 *c) |
---|
681 | 704 | { |
---|
682 | 705 | |
---|
.. | .. |
---|
688 | 711 | else |
---|
689 | 712 | num_cache_leaves = 3; |
---|
690 | 713 | } |
---|
| 714 | +} |
---|
| 715 | + |
---|
| 716 | +void init_hygon_cacheinfo(struct cpuinfo_x86 *c) |
---|
| 717 | +{ |
---|
| 718 | + num_cache_leaves = find_num_cache_leaves(c); |
---|
691 | 719 | } |
---|
692 | 720 | |
---|
693 | 721 | void init_intel_cacheinfo(struct cpuinfo_x86 *c) |
---|
.. | .. |
---|
912 | 940 | int index_msb, i; |
---|
913 | 941 | struct cpuinfo_x86 *c = &cpu_data(cpu); |
---|
914 | 942 | |
---|
915 | | - if (c->x86_vendor == X86_VENDOR_AMD) { |
---|
| 943 | + if (c->x86_vendor == X86_VENDOR_AMD || |
---|
| 944 | + c->x86_vendor == X86_VENDOR_HYGON) { |
---|
916 | 945 | if (__cache_amd_cpumap_setup(cpu, index, base)) |
---|
917 | 946 | return; |
---|
918 | 947 | } |
---|