.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | #include <linux/module.h> |
---|
2 | 3 | |
---|
3 | 4 | #include <asm/cpu_device_id.h> |
---|
.. | .. |
---|
7 | 8 | static struct intel_uncore_type *empty_uncore[] = { NULL, }; |
---|
8 | 9 | struct intel_uncore_type **uncore_msr_uncores = empty_uncore; |
---|
9 | 10 | struct intel_uncore_type **uncore_pci_uncores = empty_uncore; |
---|
| 11 | +struct intel_uncore_type **uncore_mmio_uncores = empty_uncore; |
---|
10 | 12 | |
---|
11 | 13 | static bool pcidrv_registered; |
---|
12 | 14 | struct pci_driver *uncore_pci_driver; |
---|
| 15 | +/* The PCI driver for the device which the uncore doesn't own. */ |
---|
| 16 | +struct pci_driver *uncore_pci_sub_driver; |
---|
13 | 17 | /* pci bus to socket mapping */ |
---|
14 | 18 | DEFINE_RAW_SPINLOCK(pci2phy_map_lock); |
---|
15 | 19 | struct list_head pci2phy_map_head = LIST_HEAD_INIT(pci2phy_map_head); |
---|
16 | 20 | struct pci_extra_dev *uncore_extra_pci_dev; |
---|
17 | | -static int max_packages; |
---|
| 21 | +int __uncore_max_dies; |
---|
18 | 22 | |
---|
19 | 23 | /* mask of cpus that collect uncore events */ |
---|
20 | 24 | static cpumask_t uncore_cpu_mask; |
---|
.. | .. |
---|
27 | 31 | |
---|
28 | 32 | MODULE_LICENSE("GPL"); |
---|
29 | 33 | |
---|
30 | | -static int uncore_pcibus_to_physid(struct pci_bus *bus) |
---|
| 34 | +int uncore_pcibus_to_physid(struct pci_bus *bus) |
---|
31 | 35 | { |
---|
32 | 36 | struct pci2phy_map *map; |
---|
33 | 37 | int phys_id = -1; |
---|
.. | .. |
---|
100 | 104 | |
---|
101 | 105 | struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu) |
---|
102 | 106 | { |
---|
103 | | - unsigned int pkgid = topology_logical_package_id(cpu); |
---|
| 107 | + unsigned int dieid = topology_logical_die_id(cpu); |
---|
104 | 108 | |
---|
105 | 109 | /* |
---|
106 | 110 | * The unsigned check also catches the '-1' return value for non |
---|
107 | 111 | * existent mappings in the topology map. |
---|
108 | 112 | */ |
---|
109 | | - return pkgid < max_packages ? pmu->boxes[pkgid] : NULL; |
---|
| 113 | + return dieid < uncore_max_dies() ? pmu->boxes[dieid] : NULL; |
---|
110 | 114 | } |
---|
111 | 115 | |
---|
112 | 116 | u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event) |
---|
.. | .. |
---|
116 | 120 | rdmsrl(event->hw.event_base, count); |
---|
117 | 121 | |
---|
118 | 122 | return count; |
---|
| 123 | +} |
---|
| 124 | + |
---|
| 125 | +void uncore_mmio_exit_box(struct intel_uncore_box *box) |
---|
| 126 | +{ |
---|
| 127 | + if (box->io_addr) |
---|
| 128 | + iounmap(box->io_addr); |
---|
| 129 | +} |
---|
| 130 | + |
---|
| 131 | +u64 uncore_mmio_read_counter(struct intel_uncore_box *box, |
---|
| 132 | + struct perf_event *event) |
---|
| 133 | +{ |
---|
| 134 | + if (!box->io_addr) |
---|
| 135 | + return 0; |
---|
| 136 | + |
---|
| 137 | + if (!uncore_mmio_is_valid_offset(box, event->hw.event_base)) |
---|
| 138 | + return 0; |
---|
| 139 | + |
---|
| 140 | + return readq(box->io_addr + event->hw.event_base); |
---|
119 | 141 | } |
---|
120 | 142 | |
---|
121 | 143 | /* |
---|
.. | .. |
---|
311 | 333 | uncore_pmu_init_hrtimer(box); |
---|
312 | 334 | box->cpu = -1; |
---|
313 | 335 | box->pci_phys_id = -1; |
---|
314 | | - box->pkgid = -1; |
---|
| 336 | + box->dieid = -1; |
---|
315 | 337 | |
---|
316 | 338 | /* set default hrtimer timeout */ |
---|
317 | 339 | box->hrtimer_duration = UNCORE_PMU_HRTIMER_INTERVAL; |
---|
.. | .. |
---|
691 | 713 | if (pmu->func_id < 0) |
---|
692 | 714 | return -ENOENT; |
---|
693 | 715 | |
---|
694 | | - /* |
---|
695 | | - * Uncore PMU does measure at all privilege level all the time. |
---|
696 | | - * So it doesn't make sense to specify any exclude bits. |
---|
697 | | - */ |
---|
698 | | - if (event->attr.exclude_user || event->attr.exclude_kernel || |
---|
699 | | - event->attr.exclude_hv || event->attr.exclude_idle) |
---|
700 | | - return -EINVAL; |
---|
701 | | - |
---|
702 | 716 | /* Sampling not supported yet */ |
---|
703 | 717 | if (hwc->sample_period) |
---|
704 | 718 | return -EINVAL; |
---|
.. | .. |
---|
833 | 847 | .stop = uncore_pmu_event_stop, |
---|
834 | 848 | .read = uncore_pmu_event_read, |
---|
835 | 849 | .module = THIS_MODULE, |
---|
| 850 | + .capabilities = PERF_PMU_CAP_NO_EXCLUDE, |
---|
| 851 | + .attr_update = pmu->type->attr_update, |
---|
836 | 852 | }; |
---|
837 | 853 | } else { |
---|
838 | 854 | pmu->pmu = *pmu->type->pmu; |
---|
839 | 855 | pmu->pmu.attr_groups = pmu->type->attr_groups; |
---|
| 856 | + pmu->pmu.attr_update = pmu->type->attr_update; |
---|
840 | 857 | } |
---|
841 | 858 | |
---|
842 | 859 | if (pmu->type->num_boxes == 1) { |
---|
.. | .. |
---|
865 | 882 | |
---|
866 | 883 | static void uncore_free_boxes(struct intel_uncore_pmu *pmu) |
---|
867 | 884 | { |
---|
868 | | - int pkg; |
---|
| 885 | + int die; |
---|
869 | 886 | |
---|
870 | | - for (pkg = 0; pkg < max_packages; pkg++) |
---|
871 | | - kfree(pmu->boxes[pkg]); |
---|
| 887 | + for (die = 0; die < uncore_max_dies(); die++) |
---|
| 888 | + kfree(pmu->boxes[die]); |
---|
872 | 889 | kfree(pmu->boxes); |
---|
873 | 890 | } |
---|
874 | 891 | |
---|
.. | .. |
---|
876 | 893 | { |
---|
877 | 894 | struct intel_uncore_pmu *pmu = type->pmus; |
---|
878 | 895 | int i; |
---|
| 896 | + |
---|
| 897 | + if (type->cleanup_mapping) |
---|
| 898 | + type->cleanup_mapping(type); |
---|
879 | 899 | |
---|
880 | 900 | if (pmu) { |
---|
881 | 901 | for (i = 0; i < type->num_boxes; i++, pmu++) { |
---|
.. | .. |
---|
905 | 925 | if (!pmus) |
---|
906 | 926 | return -ENOMEM; |
---|
907 | 927 | |
---|
908 | | - size = max_packages * sizeof(struct intel_uncore_box *); |
---|
| 928 | + size = uncore_max_dies() * sizeof(struct intel_uncore_box *); |
---|
909 | 929 | |
---|
910 | 930 | for (i = 0; i < type->num_boxes; i++) { |
---|
911 | 931 | pmus[i].func_id = setid ? i : -1; |
---|
.. | .. |
---|
944 | 964 | |
---|
945 | 965 | type->pmu_group = &uncore_pmu_attr_group; |
---|
946 | 966 | |
---|
| 967 | + if (type->set_mapping) |
---|
| 968 | + type->set_mapping(type); |
---|
| 969 | + |
---|
947 | 970 | return 0; |
---|
948 | 971 | |
---|
949 | 972 | err: |
---|
.. | .. |
---|
968 | 991 | } |
---|
969 | 992 | |
---|
970 | 993 | /* |
---|
971 | | - * add a pci uncore device |
---|
| 994 | + * Get the die information of a PCI device. |
---|
| 995 | + * @pdev: The PCI device. |
---|
| 996 | + * @phys_id: The physical socket id which the device maps to. |
---|
| 997 | + * @die: The die id which the device maps to. |
---|
972 | 998 | */ |
---|
973 | | -static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) |
---|
| 999 | +static int uncore_pci_get_dev_die_info(struct pci_dev *pdev, |
---|
| 1000 | + int *phys_id, int *die) |
---|
974 | 1001 | { |
---|
975 | | - struct intel_uncore_type *type; |
---|
976 | | - struct intel_uncore_pmu *pmu = NULL; |
---|
977 | | - struct intel_uncore_box *box; |
---|
978 | | - int phys_id, pkg, ret; |
---|
979 | | - |
---|
980 | | - phys_id = uncore_pcibus_to_physid(pdev->bus); |
---|
981 | | - if (phys_id < 0) |
---|
| 1002 | + *phys_id = uncore_pcibus_to_physid(pdev->bus); |
---|
| 1003 | + if (*phys_id < 0) |
---|
982 | 1004 | return -ENODEV; |
---|
983 | 1005 | |
---|
984 | | - pkg = topology_phys_to_logical_pkg(phys_id); |
---|
985 | | - if (pkg < 0) |
---|
| 1006 | + *die = (topology_max_die_per_package() > 1) ? *phys_id : |
---|
| 1007 | + topology_phys_to_logical_pkg(*phys_id); |
---|
| 1008 | + if (*die < 0) |
---|
986 | 1009 | return -EINVAL; |
---|
987 | 1010 | |
---|
988 | | - if (UNCORE_PCI_DEV_TYPE(id->driver_data) == UNCORE_EXTRA_PCI_DEV) { |
---|
989 | | - int idx = UNCORE_PCI_DEV_IDX(id->driver_data); |
---|
| 1011 | + return 0; |
---|
| 1012 | +} |
---|
990 | 1013 | |
---|
991 | | - uncore_extra_pci_dev[pkg].dev[idx] = pdev; |
---|
992 | | - pci_set_drvdata(pdev, NULL); |
---|
993 | | - return 0; |
---|
994 | | - } |
---|
| 1014 | +/* |
---|
| 1015 | + * Find the PMU of a PCI device. |
---|
| 1016 | + * @pdev: The PCI device. |
---|
| 1017 | + * @ids: The ID table of the available PCI devices with a PMU. |
---|
| 1018 | + */ |
---|
| 1019 | +static struct intel_uncore_pmu * |
---|
| 1020 | +uncore_pci_find_dev_pmu(struct pci_dev *pdev, const struct pci_device_id *ids) |
---|
| 1021 | +{ |
---|
| 1022 | + struct intel_uncore_pmu *pmu = NULL; |
---|
| 1023 | + struct intel_uncore_type *type; |
---|
| 1024 | + kernel_ulong_t data; |
---|
| 1025 | + unsigned int devfn; |
---|
995 | 1026 | |
---|
996 | | - type = uncore_pci_uncores[UNCORE_PCI_DEV_TYPE(id->driver_data)]; |
---|
997 | | - |
---|
998 | | - /* |
---|
999 | | - * Some platforms, e.g. Knights Landing, use a common PCI device ID |
---|
1000 | | - * for multiple instances of an uncore PMU device type. We should check |
---|
1001 | | - * PCI slot and func to indicate the uncore box. |
---|
1002 | | - */ |
---|
1003 | | - if (id->driver_data & ~0xffff) { |
---|
1004 | | - struct pci_driver *pci_drv = pdev->driver; |
---|
1005 | | - const struct pci_device_id *ids = pci_drv->id_table; |
---|
1006 | | - unsigned int devfn; |
---|
1007 | | - |
---|
1008 | | - while (ids && ids->vendor) { |
---|
1009 | | - if ((ids->vendor == pdev->vendor) && |
---|
1010 | | - (ids->device == pdev->device)) { |
---|
1011 | | - devfn = PCI_DEVFN(UNCORE_PCI_DEV_DEV(ids->driver_data), |
---|
1012 | | - UNCORE_PCI_DEV_FUNC(ids->driver_data)); |
---|
1013 | | - if (devfn == pdev->devfn) { |
---|
1014 | | - pmu = &type->pmus[UNCORE_PCI_DEV_IDX(ids->driver_data)]; |
---|
1015 | | - break; |
---|
1016 | | - } |
---|
| 1027 | + while (ids && ids->vendor) { |
---|
| 1028 | + if ((ids->vendor == pdev->vendor) && |
---|
| 1029 | + (ids->device == pdev->device)) { |
---|
| 1030 | + data = ids->driver_data; |
---|
| 1031 | + devfn = PCI_DEVFN(UNCORE_PCI_DEV_DEV(data), |
---|
| 1032 | + UNCORE_PCI_DEV_FUNC(data)); |
---|
| 1033 | + if (devfn == pdev->devfn) { |
---|
| 1034 | + type = uncore_pci_uncores[UNCORE_PCI_DEV_TYPE(data)]; |
---|
| 1035 | + pmu = &type->pmus[UNCORE_PCI_DEV_IDX(data)]; |
---|
| 1036 | + break; |
---|
1017 | 1037 | } |
---|
1018 | | - ids++; |
---|
1019 | 1038 | } |
---|
1020 | | - if (pmu == NULL) |
---|
1021 | | - return -ENODEV; |
---|
1022 | | - } else { |
---|
1023 | | - /* |
---|
1024 | | - * for performance monitoring unit with multiple boxes, |
---|
1025 | | - * each box has a different function id. |
---|
1026 | | - */ |
---|
1027 | | - pmu = &type->pmus[UNCORE_PCI_DEV_IDX(id->driver_data)]; |
---|
| 1039 | + ids++; |
---|
1028 | 1040 | } |
---|
| 1041 | + return pmu; |
---|
| 1042 | +} |
---|
1029 | 1043 | |
---|
1030 | | - if (WARN_ON_ONCE(pmu->boxes[pkg] != NULL)) |
---|
| 1044 | +/* |
---|
| 1045 | + * Register the PMU for a PCI device |
---|
| 1046 | + * @pdev: The PCI device. |
---|
| 1047 | + * @type: The corresponding PMU type of the device. |
---|
| 1048 | + * @pmu: The corresponding PMU of the device. |
---|
| 1049 | + * @phys_id: The physical socket id which the device maps to. |
---|
| 1050 | + * @die: The die id which the device maps to. |
---|
| 1051 | + */ |
---|
| 1052 | +static int uncore_pci_pmu_register(struct pci_dev *pdev, |
---|
| 1053 | + struct intel_uncore_type *type, |
---|
| 1054 | + struct intel_uncore_pmu *pmu, |
---|
| 1055 | + int phys_id, int die) |
---|
| 1056 | +{ |
---|
| 1057 | + struct intel_uncore_box *box; |
---|
| 1058 | + int ret; |
---|
| 1059 | + |
---|
| 1060 | + if (WARN_ON_ONCE(pmu->boxes[die] != NULL)) |
---|
1031 | 1061 | return -EINVAL; |
---|
1032 | 1062 | |
---|
1033 | 1063 | box = uncore_alloc_box(type, NUMA_NO_NODE); |
---|
.. | .. |
---|
1041 | 1071 | |
---|
1042 | 1072 | atomic_inc(&box->refcnt); |
---|
1043 | 1073 | box->pci_phys_id = phys_id; |
---|
1044 | | - box->pkgid = pkg; |
---|
| 1074 | + box->dieid = die; |
---|
1045 | 1075 | box->pci_dev = pdev; |
---|
1046 | 1076 | box->pmu = pmu; |
---|
1047 | 1077 | uncore_box_init(box); |
---|
1048 | | - pci_set_drvdata(pdev, box); |
---|
1049 | 1078 | |
---|
1050 | | - pmu->boxes[pkg] = box; |
---|
| 1079 | + pmu->boxes[die] = box; |
---|
1051 | 1080 | if (atomic_inc_return(&pmu->activeboxes) > 1) |
---|
1052 | 1081 | return 0; |
---|
1053 | 1082 | |
---|
1054 | 1083 | /* First active box registers the pmu */ |
---|
1055 | 1084 | ret = uncore_pmu_register(pmu); |
---|
1056 | 1085 | if (ret) { |
---|
1057 | | - pci_set_drvdata(pdev, NULL); |
---|
1058 | | - pmu->boxes[pkg] = NULL; |
---|
| 1086 | + pmu->boxes[die] = NULL; |
---|
1059 | 1087 | uncore_box_exit(box); |
---|
1060 | 1088 | kfree(box); |
---|
1061 | 1089 | } |
---|
1062 | 1090 | return ret; |
---|
1063 | 1091 | } |
---|
1064 | 1092 | |
---|
| 1093 | +/* |
---|
| 1094 | + * add a pci uncore device |
---|
| 1095 | + */ |
---|
| 1096 | +static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) |
---|
| 1097 | +{ |
---|
| 1098 | + struct intel_uncore_type *type; |
---|
| 1099 | + struct intel_uncore_pmu *pmu = NULL; |
---|
| 1100 | + int phys_id, die, ret; |
---|
| 1101 | + |
---|
| 1102 | + ret = uncore_pci_get_dev_die_info(pdev, &phys_id, &die); |
---|
| 1103 | + if (ret) |
---|
| 1104 | + return ret; |
---|
| 1105 | + |
---|
| 1106 | + if (UNCORE_PCI_DEV_TYPE(id->driver_data) == UNCORE_EXTRA_PCI_DEV) { |
---|
| 1107 | + int idx = UNCORE_PCI_DEV_IDX(id->driver_data); |
---|
| 1108 | + |
---|
| 1109 | + uncore_extra_pci_dev[die].dev[idx] = pdev; |
---|
| 1110 | + pci_set_drvdata(pdev, NULL); |
---|
| 1111 | + return 0; |
---|
| 1112 | + } |
---|
| 1113 | + |
---|
| 1114 | + type = uncore_pci_uncores[UNCORE_PCI_DEV_TYPE(id->driver_data)]; |
---|
| 1115 | + |
---|
| 1116 | + /* |
---|
| 1117 | + * Some platforms, e.g. Knights Landing, use a common PCI device ID |
---|
| 1118 | + * for multiple instances of an uncore PMU device type. We should check |
---|
| 1119 | + * PCI slot and func to indicate the uncore box. |
---|
| 1120 | + */ |
---|
| 1121 | + if (id->driver_data & ~0xffff) { |
---|
| 1122 | + struct pci_driver *pci_drv = pdev->driver; |
---|
| 1123 | + |
---|
| 1124 | + pmu = uncore_pci_find_dev_pmu(pdev, pci_drv->id_table); |
---|
| 1125 | + if (pmu == NULL) |
---|
| 1126 | + return -ENODEV; |
---|
| 1127 | + } else { |
---|
| 1128 | + /* |
---|
| 1129 | + * for performance monitoring unit with multiple boxes, |
---|
| 1130 | + * each box has a different function id. |
---|
| 1131 | + */ |
---|
| 1132 | + pmu = &type->pmus[UNCORE_PCI_DEV_IDX(id->driver_data)]; |
---|
| 1133 | + } |
---|
| 1134 | + |
---|
| 1135 | + ret = uncore_pci_pmu_register(pdev, type, pmu, phys_id, die); |
---|
| 1136 | + |
---|
| 1137 | + pci_set_drvdata(pdev, pmu->boxes[die]); |
---|
| 1138 | + |
---|
| 1139 | + return ret; |
---|
| 1140 | +} |
---|
| 1141 | + |
---|
| 1142 | +/* |
---|
| 1143 | + * Unregister the PMU of a PCI device |
---|
| 1144 | + * @pmu: The corresponding PMU is unregistered. |
---|
| 1145 | + * @phys_id: The physical socket id which the device maps to. |
---|
| 1146 | + * @die: The die id which the device maps to. |
---|
| 1147 | + */ |
---|
| 1148 | +static void uncore_pci_pmu_unregister(struct intel_uncore_pmu *pmu, |
---|
| 1149 | + int phys_id, int die) |
---|
| 1150 | +{ |
---|
| 1151 | + struct intel_uncore_box *box = pmu->boxes[die]; |
---|
| 1152 | + |
---|
| 1153 | + if (WARN_ON_ONCE(phys_id != box->pci_phys_id)) |
---|
| 1154 | + return; |
---|
| 1155 | + |
---|
| 1156 | + pmu->boxes[die] = NULL; |
---|
| 1157 | + if (atomic_dec_return(&pmu->activeboxes) == 0) |
---|
| 1158 | + uncore_pmu_unregister(pmu); |
---|
| 1159 | + uncore_box_exit(box); |
---|
| 1160 | + kfree(box); |
---|
| 1161 | +} |
---|
| 1162 | + |
---|
1065 | 1163 | static void uncore_pci_remove(struct pci_dev *pdev) |
---|
1066 | 1164 | { |
---|
1067 | 1165 | struct intel_uncore_box *box; |
---|
1068 | 1166 | struct intel_uncore_pmu *pmu; |
---|
1069 | | - int i, phys_id, pkg; |
---|
| 1167 | + int i, phys_id, die; |
---|
1070 | 1168 | |
---|
1071 | | - phys_id = uncore_pcibus_to_physid(pdev->bus); |
---|
| 1169 | + if (uncore_pci_get_dev_die_info(pdev, &phys_id, &die)) |
---|
| 1170 | + return; |
---|
1072 | 1171 | |
---|
1073 | 1172 | box = pci_get_drvdata(pdev); |
---|
1074 | 1173 | if (!box) { |
---|
1075 | | - pkg = topology_phys_to_logical_pkg(phys_id); |
---|
1076 | 1174 | for (i = 0; i < UNCORE_EXTRA_PCI_DEV_MAX; i++) { |
---|
1077 | | - if (uncore_extra_pci_dev[pkg].dev[i] == pdev) { |
---|
1078 | | - uncore_extra_pci_dev[pkg].dev[i] = NULL; |
---|
| 1175 | + if (uncore_extra_pci_dev[die].dev[i] == pdev) { |
---|
| 1176 | + uncore_extra_pci_dev[die].dev[i] = NULL; |
---|
1079 | 1177 | break; |
---|
1080 | 1178 | } |
---|
1081 | 1179 | } |
---|
.. | .. |
---|
1084 | 1182 | } |
---|
1085 | 1183 | |
---|
1086 | 1184 | pmu = box->pmu; |
---|
1087 | | - if (WARN_ON_ONCE(phys_id != box->pci_phys_id)) |
---|
1088 | | - return; |
---|
1089 | 1185 | |
---|
1090 | 1186 | pci_set_drvdata(pdev, NULL); |
---|
1091 | | - pmu->boxes[box->pkgid] = NULL; |
---|
1092 | | - if (atomic_dec_return(&pmu->activeboxes) == 0) |
---|
1093 | | - uncore_pmu_unregister(pmu); |
---|
1094 | | - uncore_box_exit(box); |
---|
1095 | | - kfree(box); |
---|
| 1187 | + |
---|
| 1188 | + uncore_pci_pmu_unregister(pmu, phys_id, die); |
---|
| 1189 | +} |
---|
| 1190 | + |
---|
| 1191 | +static int uncore_bus_notify(struct notifier_block *nb, |
---|
| 1192 | + unsigned long action, void *data) |
---|
| 1193 | +{ |
---|
| 1194 | + struct device *dev = data; |
---|
| 1195 | + struct pci_dev *pdev = to_pci_dev(dev); |
---|
| 1196 | + struct intel_uncore_pmu *pmu; |
---|
| 1197 | + int phys_id, die; |
---|
| 1198 | + |
---|
| 1199 | + /* Unregister the PMU when the device is going to be deleted. */ |
---|
| 1200 | + if (action != BUS_NOTIFY_DEL_DEVICE) |
---|
| 1201 | + return NOTIFY_DONE; |
---|
| 1202 | + |
---|
| 1203 | + pmu = uncore_pci_find_dev_pmu(pdev, uncore_pci_sub_driver->id_table); |
---|
| 1204 | + if (!pmu) |
---|
| 1205 | + return NOTIFY_DONE; |
---|
| 1206 | + |
---|
| 1207 | + if (uncore_pci_get_dev_die_info(pdev, &phys_id, &die)) |
---|
| 1208 | + return NOTIFY_DONE; |
---|
| 1209 | + |
---|
| 1210 | + uncore_pci_pmu_unregister(pmu, phys_id, die); |
---|
| 1211 | + |
---|
| 1212 | + return NOTIFY_OK; |
---|
| 1213 | +} |
---|
| 1214 | + |
---|
| 1215 | +static struct notifier_block uncore_notifier = { |
---|
| 1216 | + .notifier_call = uncore_bus_notify, |
---|
| 1217 | +}; |
---|
| 1218 | + |
---|
| 1219 | +static void uncore_pci_sub_driver_init(void) |
---|
| 1220 | +{ |
---|
| 1221 | + const struct pci_device_id *ids = uncore_pci_sub_driver->id_table; |
---|
| 1222 | + struct intel_uncore_type *type; |
---|
| 1223 | + struct intel_uncore_pmu *pmu; |
---|
| 1224 | + struct pci_dev *pci_sub_dev; |
---|
| 1225 | + bool notify = false; |
---|
| 1226 | + unsigned int devfn; |
---|
| 1227 | + int phys_id, die; |
---|
| 1228 | + |
---|
| 1229 | + while (ids && ids->vendor) { |
---|
| 1230 | + pci_sub_dev = NULL; |
---|
| 1231 | + type = uncore_pci_uncores[UNCORE_PCI_DEV_TYPE(ids->driver_data)]; |
---|
| 1232 | + /* |
---|
| 1233 | + * Search the available device, and register the |
---|
| 1234 | + * corresponding PMU. |
---|
| 1235 | + */ |
---|
| 1236 | + while ((pci_sub_dev = pci_get_device(PCI_VENDOR_ID_INTEL, |
---|
| 1237 | + ids->device, pci_sub_dev))) { |
---|
| 1238 | + devfn = PCI_DEVFN(UNCORE_PCI_DEV_DEV(ids->driver_data), |
---|
| 1239 | + UNCORE_PCI_DEV_FUNC(ids->driver_data)); |
---|
| 1240 | + if (devfn != pci_sub_dev->devfn) |
---|
| 1241 | + continue; |
---|
| 1242 | + |
---|
| 1243 | + pmu = &type->pmus[UNCORE_PCI_DEV_IDX(ids->driver_data)]; |
---|
| 1244 | + if (!pmu) |
---|
| 1245 | + continue; |
---|
| 1246 | + |
---|
| 1247 | + if (uncore_pci_get_dev_die_info(pci_sub_dev, |
---|
| 1248 | + &phys_id, &die)) |
---|
| 1249 | + continue; |
---|
| 1250 | + |
---|
| 1251 | + if (!uncore_pci_pmu_register(pci_sub_dev, type, pmu, |
---|
| 1252 | + phys_id, die)) |
---|
| 1253 | + notify = true; |
---|
| 1254 | + } |
---|
| 1255 | + ids++; |
---|
| 1256 | + } |
---|
| 1257 | + |
---|
| 1258 | + if (notify && bus_register_notifier(&pci_bus_type, &uncore_notifier)) |
---|
| 1259 | + notify = false; |
---|
| 1260 | + |
---|
| 1261 | + if (!notify) |
---|
| 1262 | + uncore_pci_sub_driver = NULL; |
---|
1096 | 1263 | } |
---|
1097 | 1264 | |
---|
1098 | 1265 | static int __init uncore_pci_init(void) |
---|
.. | .. |
---|
1100 | 1267 | size_t size; |
---|
1101 | 1268 | int ret; |
---|
1102 | 1269 | |
---|
1103 | | - size = max_packages * sizeof(struct pci_extra_dev); |
---|
| 1270 | + size = uncore_max_dies() * sizeof(struct pci_extra_dev); |
---|
1104 | 1271 | uncore_extra_pci_dev = kzalloc(size, GFP_KERNEL); |
---|
1105 | 1272 | if (!uncore_extra_pci_dev) { |
---|
1106 | 1273 | ret = -ENOMEM; |
---|
.. | .. |
---|
1117 | 1284 | ret = pci_register_driver(uncore_pci_driver); |
---|
1118 | 1285 | if (ret) |
---|
1119 | 1286 | goto errtype; |
---|
| 1287 | + |
---|
| 1288 | + if (uncore_pci_sub_driver) |
---|
| 1289 | + uncore_pci_sub_driver_init(); |
---|
1120 | 1290 | |
---|
1121 | 1291 | pcidrv_registered = true; |
---|
1122 | 1292 | return 0; |
---|
.. | .. |
---|
1135 | 1305 | { |
---|
1136 | 1306 | if (pcidrv_registered) { |
---|
1137 | 1307 | pcidrv_registered = false; |
---|
| 1308 | + if (uncore_pci_sub_driver) |
---|
| 1309 | + bus_unregister_notifier(&pci_bus_type, &uncore_notifier); |
---|
1138 | 1310 | pci_unregister_driver(uncore_pci_driver); |
---|
1139 | 1311 | uncore_types_exit(uncore_pci_uncores); |
---|
1140 | 1312 | kfree(uncore_extra_pci_dev); |
---|
.. | .. |
---|
1147 | 1319 | { |
---|
1148 | 1320 | struct intel_uncore_pmu *pmu = type->pmus; |
---|
1149 | 1321 | struct intel_uncore_box *box; |
---|
1150 | | - int i, pkg; |
---|
| 1322 | + int i, die; |
---|
1151 | 1323 | |
---|
1152 | | - pkg = topology_logical_package_id(old_cpu < 0 ? new_cpu : old_cpu); |
---|
| 1324 | + die = topology_logical_die_id(old_cpu < 0 ? new_cpu : old_cpu); |
---|
1153 | 1325 | for (i = 0; i < type->num_boxes; i++, pmu++) { |
---|
1154 | | - box = pmu->boxes[pkg]; |
---|
| 1326 | + box = pmu->boxes[die]; |
---|
1155 | 1327 | if (!box) |
---|
1156 | 1328 | continue; |
---|
1157 | 1329 | |
---|
.. | .. |
---|
1179 | 1351 | uncore_change_type_ctx(*uncores, old_cpu, new_cpu); |
---|
1180 | 1352 | } |
---|
1181 | 1353 | |
---|
1182 | | -static int uncore_event_cpu_offline(unsigned int cpu) |
---|
| 1354 | +static void uncore_box_unref(struct intel_uncore_type **types, int id) |
---|
1183 | 1355 | { |
---|
1184 | | - struct intel_uncore_type *type, **types = uncore_msr_uncores; |
---|
| 1356 | + struct intel_uncore_type *type; |
---|
1185 | 1357 | struct intel_uncore_pmu *pmu; |
---|
1186 | 1358 | struct intel_uncore_box *box; |
---|
1187 | | - int i, pkg, target; |
---|
| 1359 | + int i; |
---|
| 1360 | + |
---|
| 1361 | + for (; *types; types++) { |
---|
| 1362 | + type = *types; |
---|
| 1363 | + pmu = type->pmus; |
---|
| 1364 | + for (i = 0; i < type->num_boxes; i++, pmu++) { |
---|
| 1365 | + box = pmu->boxes[id]; |
---|
| 1366 | + if (box && atomic_dec_return(&box->refcnt) == 0) |
---|
| 1367 | + uncore_box_exit(box); |
---|
| 1368 | + } |
---|
| 1369 | + } |
---|
| 1370 | +} |
---|
| 1371 | + |
---|
| 1372 | +static int uncore_event_cpu_offline(unsigned int cpu) |
---|
| 1373 | +{ |
---|
| 1374 | + int die, target; |
---|
1188 | 1375 | |
---|
1189 | 1376 | /* Check if exiting cpu is used for collecting uncore events */ |
---|
1190 | 1377 | if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask)) |
---|
1191 | 1378 | goto unref; |
---|
1192 | 1379 | /* Find a new cpu to collect uncore events */ |
---|
1193 | | - target = cpumask_any_but(topology_core_cpumask(cpu), cpu); |
---|
| 1380 | + target = cpumask_any_but(topology_die_cpumask(cpu), cpu); |
---|
1194 | 1381 | |
---|
1195 | 1382 | /* Migrate uncore events to the new target */ |
---|
1196 | 1383 | if (target < nr_cpu_ids) |
---|
.. | .. |
---|
1199 | 1386 | target = -1; |
---|
1200 | 1387 | |
---|
1201 | 1388 | uncore_change_context(uncore_msr_uncores, cpu, target); |
---|
| 1389 | + uncore_change_context(uncore_mmio_uncores, cpu, target); |
---|
1202 | 1390 | uncore_change_context(uncore_pci_uncores, cpu, target); |
---|
1203 | 1391 | |
---|
1204 | 1392 | unref: |
---|
1205 | 1393 | /* Clear the references */ |
---|
1206 | | - pkg = topology_logical_package_id(cpu); |
---|
1207 | | - for (; *types; types++) { |
---|
1208 | | - type = *types; |
---|
1209 | | - pmu = type->pmus; |
---|
1210 | | - for (i = 0; i < type->num_boxes; i++, pmu++) { |
---|
1211 | | - box = pmu->boxes[pkg]; |
---|
1212 | | - if (box && atomic_dec_return(&box->refcnt) == 0) |
---|
1213 | | - uncore_box_exit(box); |
---|
1214 | | - } |
---|
1215 | | - } |
---|
| 1394 | + die = topology_logical_die_id(cpu); |
---|
| 1395 | + uncore_box_unref(uncore_msr_uncores, die); |
---|
| 1396 | + uncore_box_unref(uncore_mmio_uncores, die); |
---|
1216 | 1397 | return 0; |
---|
1217 | 1398 | } |
---|
1218 | 1399 | |
---|
1219 | 1400 | static int allocate_boxes(struct intel_uncore_type **types, |
---|
1220 | | - unsigned int pkg, unsigned int cpu) |
---|
| 1401 | + unsigned int die, unsigned int cpu) |
---|
1221 | 1402 | { |
---|
1222 | 1403 | struct intel_uncore_box *box, *tmp; |
---|
1223 | 1404 | struct intel_uncore_type *type; |
---|
.. | .. |
---|
1230 | 1411 | type = *types; |
---|
1231 | 1412 | pmu = type->pmus; |
---|
1232 | 1413 | for (i = 0; i < type->num_boxes; i++, pmu++) { |
---|
1233 | | - if (pmu->boxes[pkg]) |
---|
| 1414 | + if (pmu->boxes[die]) |
---|
1234 | 1415 | continue; |
---|
1235 | 1416 | box = uncore_alloc_box(type, cpu_to_node(cpu)); |
---|
1236 | 1417 | if (!box) |
---|
1237 | 1418 | goto cleanup; |
---|
1238 | 1419 | box->pmu = pmu; |
---|
1239 | | - box->pkgid = pkg; |
---|
| 1420 | + box->dieid = die; |
---|
1240 | 1421 | list_add(&box->active_list, &allocated); |
---|
1241 | 1422 | } |
---|
1242 | 1423 | } |
---|
1243 | 1424 | /* Install them in the pmus */ |
---|
1244 | 1425 | list_for_each_entry_safe(box, tmp, &allocated, active_list) { |
---|
1245 | 1426 | list_del_init(&box->active_list); |
---|
1246 | | - box->pmu->boxes[pkg] = box; |
---|
| 1427 | + box->pmu->boxes[die] = box; |
---|
1247 | 1428 | } |
---|
1248 | 1429 | return 0; |
---|
1249 | 1430 | |
---|
.. | .. |
---|
1255 | 1436 | return -ENOMEM; |
---|
1256 | 1437 | } |
---|
1257 | 1438 | |
---|
1258 | | -static int uncore_event_cpu_online(unsigned int cpu) |
---|
| 1439 | +static int uncore_box_ref(struct intel_uncore_type **types, |
---|
| 1440 | + int id, unsigned int cpu) |
---|
1259 | 1441 | { |
---|
1260 | | - struct intel_uncore_type *type, **types = uncore_msr_uncores; |
---|
| 1442 | + struct intel_uncore_type *type; |
---|
1261 | 1443 | struct intel_uncore_pmu *pmu; |
---|
1262 | 1444 | struct intel_uncore_box *box; |
---|
1263 | | - int i, ret, pkg, target; |
---|
| 1445 | + int i, ret; |
---|
1264 | 1446 | |
---|
1265 | | - pkg = topology_logical_package_id(cpu); |
---|
1266 | | - ret = allocate_boxes(types, pkg, cpu); |
---|
| 1447 | + ret = allocate_boxes(types, id, cpu); |
---|
1267 | 1448 | if (ret) |
---|
1268 | 1449 | return ret; |
---|
1269 | 1450 | |
---|
.. | .. |
---|
1271 | 1452 | type = *types; |
---|
1272 | 1453 | pmu = type->pmus; |
---|
1273 | 1454 | for (i = 0; i < type->num_boxes; i++, pmu++) { |
---|
1274 | | - box = pmu->boxes[pkg]; |
---|
| 1455 | + box = pmu->boxes[id]; |
---|
1275 | 1456 | if (box && atomic_inc_return(&box->refcnt) == 1) |
---|
1276 | 1457 | uncore_box_init(box); |
---|
1277 | 1458 | } |
---|
1278 | 1459 | } |
---|
| 1460 | + return 0; |
---|
| 1461 | +} |
---|
| 1462 | + |
---|
| 1463 | +static int uncore_event_cpu_online(unsigned int cpu) |
---|
| 1464 | +{ |
---|
| 1465 | + int die, target, msr_ret, mmio_ret; |
---|
| 1466 | + |
---|
| 1467 | + die = topology_logical_die_id(cpu); |
---|
| 1468 | + msr_ret = uncore_box_ref(uncore_msr_uncores, die, cpu); |
---|
| 1469 | + mmio_ret = uncore_box_ref(uncore_mmio_uncores, die, cpu); |
---|
| 1470 | + if (msr_ret && mmio_ret) |
---|
| 1471 | + return -ENOMEM; |
---|
1279 | 1472 | |
---|
1280 | 1473 | /* |
---|
1281 | 1474 | * Check if there is an online cpu in the package |
---|
1282 | 1475 | * which collects uncore events already. |
---|
1283 | 1476 | */ |
---|
1284 | | - target = cpumask_any_and(&uncore_cpu_mask, topology_core_cpumask(cpu)); |
---|
| 1477 | + target = cpumask_any_and(&uncore_cpu_mask, topology_die_cpumask(cpu)); |
---|
1285 | 1478 | if (target < nr_cpu_ids) |
---|
1286 | 1479 | return 0; |
---|
1287 | 1480 | |
---|
1288 | 1481 | cpumask_set_cpu(cpu, &uncore_cpu_mask); |
---|
1289 | 1482 | |
---|
1290 | | - uncore_change_context(uncore_msr_uncores, -1, cpu); |
---|
| 1483 | + if (!msr_ret) |
---|
| 1484 | + uncore_change_context(uncore_msr_uncores, -1, cpu); |
---|
| 1485 | + if (!mmio_ret) |
---|
| 1486 | + uncore_change_context(uncore_mmio_uncores, -1, cpu); |
---|
1291 | 1487 | uncore_change_context(uncore_pci_uncores, -1, cpu); |
---|
1292 | 1488 | return 0; |
---|
1293 | 1489 | } |
---|
.. | .. |
---|
1335 | 1531 | return ret; |
---|
1336 | 1532 | } |
---|
1337 | 1533 | |
---|
1338 | | -#define X86_UNCORE_MODEL_MATCH(model, init) \ |
---|
1339 | | - { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&init } |
---|
| 1534 | +static int __init uncore_mmio_init(void) |
---|
| 1535 | +{ |
---|
| 1536 | + struct intel_uncore_type **types = uncore_mmio_uncores; |
---|
| 1537 | + int ret; |
---|
| 1538 | + |
---|
| 1539 | + ret = uncore_types_init(types, true); |
---|
| 1540 | + if (ret) |
---|
| 1541 | + goto err; |
---|
| 1542 | + |
---|
| 1543 | + for (; *types; types++) { |
---|
| 1544 | + ret = type_pmu_register(*types); |
---|
| 1545 | + if (ret) |
---|
| 1546 | + goto err; |
---|
| 1547 | + } |
---|
| 1548 | + return 0; |
---|
| 1549 | +err: |
---|
| 1550 | + uncore_types_exit(uncore_mmio_uncores); |
---|
| 1551 | + uncore_mmio_uncores = empty_uncore; |
---|
| 1552 | + return ret; |
---|
| 1553 | +} |
---|
1340 | 1554 | |
---|
1341 | 1555 | struct intel_uncore_init_fun { |
---|
1342 | 1556 | void (*cpu_init)(void); |
---|
1343 | 1557 | int (*pci_init)(void); |
---|
| 1558 | + void (*mmio_init)(void); |
---|
1344 | 1559 | }; |
---|
1345 | 1560 | |
---|
1346 | 1561 | static const struct intel_uncore_init_fun nhm_uncore_init __initconst = { |
---|
.. | .. |
---|
1406 | 1621 | .pci_init = skx_uncore_pci_init, |
---|
1407 | 1622 | }; |
---|
1408 | 1623 | |
---|
1409 | | -static const struct x86_cpu_id intel_uncore_match[] __initconst = { |
---|
1410 | | - X86_UNCORE_MODEL_MATCH(INTEL_FAM6_NEHALEM_EP, nhm_uncore_init), |
---|
1411 | | - X86_UNCORE_MODEL_MATCH(INTEL_FAM6_NEHALEM, nhm_uncore_init), |
---|
1412 | | - X86_UNCORE_MODEL_MATCH(INTEL_FAM6_WESTMERE, nhm_uncore_init), |
---|
1413 | | - X86_UNCORE_MODEL_MATCH(INTEL_FAM6_WESTMERE_EP, nhm_uncore_init), |
---|
1414 | | - X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SANDYBRIDGE, snb_uncore_init), |
---|
1415 | | - X86_UNCORE_MODEL_MATCH(INTEL_FAM6_IVYBRIDGE, ivb_uncore_init), |
---|
1416 | | - X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL_CORE, hsw_uncore_init), |
---|
1417 | | - X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL_ULT, hsw_uncore_init), |
---|
1418 | | - X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL_GT3E, hsw_uncore_init), |
---|
1419 | | - X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_CORE, bdw_uncore_init), |
---|
1420 | | - X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_GT3E, bdw_uncore_init), |
---|
1421 | | - X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SANDYBRIDGE_X, snbep_uncore_init), |
---|
1422 | | - X86_UNCORE_MODEL_MATCH(INTEL_FAM6_NEHALEM_EX, nhmex_uncore_init), |
---|
1423 | | - X86_UNCORE_MODEL_MATCH(INTEL_FAM6_WESTMERE_EX, nhmex_uncore_init), |
---|
1424 | | - X86_UNCORE_MODEL_MATCH(INTEL_FAM6_IVYBRIDGE_X, ivbep_uncore_init), |
---|
1425 | | - X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL_X, hswep_uncore_init), |
---|
1426 | | - X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_X, bdx_uncore_init), |
---|
1427 | | - X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_XEON_D, bdx_uncore_init), |
---|
1428 | | - X86_UNCORE_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNL, knl_uncore_init), |
---|
1429 | | - X86_UNCORE_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNM, knl_uncore_init), |
---|
1430 | | - X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_DESKTOP,skl_uncore_init), |
---|
1431 | | - X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_MOBILE, skl_uncore_init), |
---|
1432 | | - X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_X, skx_uncore_init), |
---|
1433 | | - X86_UNCORE_MODEL_MATCH(INTEL_FAM6_KABYLAKE_MOBILE, skl_uncore_init), |
---|
1434 | | - X86_UNCORE_MODEL_MATCH(INTEL_FAM6_KABYLAKE_DESKTOP, skl_uncore_init), |
---|
1435 | | - {}, |
---|
| 1624 | +static const struct intel_uncore_init_fun icl_uncore_init __initconst = { |
---|
| 1625 | + .cpu_init = icl_uncore_cpu_init, |
---|
| 1626 | + .pci_init = skl_uncore_pci_init, |
---|
1436 | 1627 | }; |
---|
1437 | 1628 | |
---|
| 1629 | +static const struct intel_uncore_init_fun tgl_uncore_init __initconst = { |
---|
| 1630 | + .cpu_init = tgl_uncore_cpu_init, |
---|
| 1631 | + .mmio_init = tgl_uncore_mmio_init, |
---|
| 1632 | +}; |
---|
| 1633 | + |
---|
| 1634 | +static const struct intel_uncore_init_fun tgl_l_uncore_init __initconst = { |
---|
| 1635 | + .cpu_init = tgl_uncore_cpu_init, |
---|
| 1636 | + .mmio_init = tgl_l_uncore_mmio_init, |
---|
| 1637 | +}; |
---|
| 1638 | + |
---|
| 1639 | +static const struct intel_uncore_init_fun icx_uncore_init __initconst = { |
---|
| 1640 | + .cpu_init = icx_uncore_cpu_init, |
---|
| 1641 | + .pci_init = icx_uncore_pci_init, |
---|
| 1642 | + .mmio_init = icx_uncore_mmio_init, |
---|
| 1643 | +}; |
---|
| 1644 | + |
---|
| 1645 | +static const struct intel_uncore_init_fun snr_uncore_init __initconst = { |
---|
| 1646 | + .cpu_init = snr_uncore_cpu_init, |
---|
| 1647 | + .pci_init = snr_uncore_pci_init, |
---|
| 1648 | + .mmio_init = snr_uncore_mmio_init, |
---|
| 1649 | +}; |
---|
| 1650 | + |
---|
| 1651 | +static const struct x86_cpu_id intel_uncore_match[] __initconst = { |
---|
| 1652 | + X86_MATCH_INTEL_FAM6_MODEL(NEHALEM_EP, &nhm_uncore_init), |
---|
| 1653 | + X86_MATCH_INTEL_FAM6_MODEL(NEHALEM, &nhm_uncore_init), |
---|
| 1654 | + X86_MATCH_INTEL_FAM6_MODEL(WESTMERE, &nhm_uncore_init), |
---|
| 1655 | + X86_MATCH_INTEL_FAM6_MODEL(WESTMERE_EP, &nhm_uncore_init), |
---|
| 1656 | + X86_MATCH_INTEL_FAM6_MODEL(SANDYBRIDGE, &snb_uncore_init), |
---|
| 1657 | + X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE, &ivb_uncore_init), |
---|
| 1658 | + X86_MATCH_INTEL_FAM6_MODEL(HASWELL, &hsw_uncore_init), |
---|
| 1659 | + X86_MATCH_INTEL_FAM6_MODEL(HASWELL_L, &hsw_uncore_init), |
---|
| 1660 | + X86_MATCH_INTEL_FAM6_MODEL(HASWELL_G, &hsw_uncore_init), |
---|
| 1661 | + X86_MATCH_INTEL_FAM6_MODEL(BROADWELL, &bdw_uncore_init), |
---|
| 1662 | + X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_G, &bdw_uncore_init), |
---|
| 1663 | + X86_MATCH_INTEL_FAM6_MODEL(SANDYBRIDGE_X, &snbep_uncore_init), |
---|
| 1664 | + X86_MATCH_INTEL_FAM6_MODEL(NEHALEM_EX, &nhmex_uncore_init), |
---|
| 1665 | + X86_MATCH_INTEL_FAM6_MODEL(WESTMERE_EX, &nhmex_uncore_init), |
---|
| 1666 | + X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE_X, &ivbep_uncore_init), |
---|
| 1667 | + X86_MATCH_INTEL_FAM6_MODEL(HASWELL_X, &hswep_uncore_init), |
---|
| 1668 | + X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X, &bdx_uncore_init), |
---|
| 1669 | + X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_D, &bdx_uncore_init), |
---|
| 1670 | + X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNL, &knl_uncore_init), |
---|
| 1671 | + X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNM, &knl_uncore_init), |
---|
| 1672 | + X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE, &skl_uncore_init), |
---|
| 1673 | + X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_L, &skl_uncore_init), |
---|
| 1674 | + X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, &skx_uncore_init), |
---|
| 1675 | + X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE_L, &skl_uncore_init), |
---|
| 1676 | + X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE, &skl_uncore_init), |
---|
| 1677 | + X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE_L, &skl_uncore_init), |
---|
| 1678 | + X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE, &skl_uncore_init), |
---|
| 1679 | + X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L, &icl_uncore_init), |
---|
| 1680 | + X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_NNPI, &icl_uncore_init), |
---|
| 1681 | + X86_MATCH_INTEL_FAM6_MODEL(ICELAKE, &icl_uncore_init), |
---|
| 1682 | + X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, &icx_uncore_init), |
---|
| 1683 | + X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, &icx_uncore_init), |
---|
| 1684 | + X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, &tgl_l_uncore_init), |
---|
| 1685 | + X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, &tgl_uncore_init), |
---|
| 1686 | + X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D, &snr_uncore_init), |
---|
| 1687 | + {}, |
---|
| 1688 | +}; |
---|
1438 | 1689 | MODULE_DEVICE_TABLE(x86cpu, intel_uncore_match); |
---|
1439 | 1690 | |
---|
1440 | 1691 | static int __init intel_uncore_init(void) |
---|
1441 | 1692 | { |
---|
1442 | 1693 | const struct x86_cpu_id *id; |
---|
1443 | 1694 | struct intel_uncore_init_fun *uncore_init; |
---|
1444 | | - int pret = 0, cret = 0, ret; |
---|
| 1695 | + int pret = 0, cret = 0, mret = 0, ret; |
---|
1445 | 1696 | |
---|
1446 | 1697 | id = x86_match_cpu(intel_uncore_match); |
---|
1447 | 1698 | if (!id) |
---|
.. | .. |
---|
1450 | 1701 | if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) |
---|
1451 | 1702 | return -ENODEV; |
---|
1452 | 1703 | |
---|
1453 | | - max_packages = topology_max_packages(); |
---|
| 1704 | + __uncore_max_dies = |
---|
| 1705 | + topology_max_packages() * topology_max_die_per_package(); |
---|
1454 | 1706 | |
---|
1455 | 1707 | uncore_init = (struct intel_uncore_init_fun *)id->driver_data; |
---|
1456 | 1708 | if (uncore_init->pci_init) { |
---|
.. | .. |
---|
1464 | 1716 | cret = uncore_cpu_init(); |
---|
1465 | 1717 | } |
---|
1466 | 1718 | |
---|
1467 | | - if (cret && pret) |
---|
| 1719 | + if (uncore_init->mmio_init) { |
---|
| 1720 | + uncore_init->mmio_init(); |
---|
| 1721 | + mret = uncore_mmio_init(); |
---|
| 1722 | + } |
---|
| 1723 | + |
---|
| 1724 | + if (cret && pret && mret) |
---|
1468 | 1725 | return -ENODEV; |
---|
1469 | 1726 | |
---|
1470 | 1727 | /* Install hotplug callbacks to setup the targets for each package */ |
---|
.. | .. |
---|
1478 | 1735 | |
---|
1479 | 1736 | err: |
---|
1480 | 1737 | uncore_types_exit(uncore_msr_uncores); |
---|
| 1738 | + uncore_types_exit(uncore_mmio_uncores); |
---|
1481 | 1739 | uncore_pci_exit(); |
---|
1482 | 1740 | return ret; |
---|
1483 | 1741 | } |
---|
.. | .. |
---|
1487 | 1745 | { |
---|
1488 | 1746 | cpuhp_remove_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE); |
---|
1489 | 1747 | uncore_types_exit(uncore_msr_uncores); |
---|
| 1748 | + uncore_types_exit(uncore_mmio_uncores); |
---|
1490 | 1749 | uncore_pci_exit(); |
---|
1491 | 1750 | } |
---|
1492 | 1751 | module_exit(intel_uncore_exit); |
---|