hc
2024-01-03 2f7c68cb55ecb7331f2381deb497c27155f32faf
kernel/arch/x86/events/intel/uncore_snbep.c
....@@ -273,6 +273,30 @@
273273 #define SKX_CPUNODEID 0xc0
274274 #define SKX_GIDNIDMAP 0xd4
275275
276
+/*
277
+ * The CPU_BUS_NUMBER MSR returns the values of the respective CPUBUSNO CSR
278
+ * that BIOS programmed. MSR has package scope.
279
+ * | Bit | Default | Description
280
+ * | [63] | 00h | VALID - When set, indicates the CPU bus
281
+ * numbers have been initialized. (RO)
282
+ * |[62:48]| --- | Reserved
283
+ * |[47:40]| 00h | BUS_NUM_5 — Return the bus number BIOS assigned
284
+ * CPUBUSNO(5). (RO)
285
+ * |[39:32]| 00h | BUS_NUM_4 — Return the bus number BIOS assigned
286
+ * CPUBUSNO(4). (RO)
287
+ * |[31:24]| 00h | BUS_NUM_3 — Return the bus number BIOS assigned
288
+ * CPUBUSNO(3). (RO)
289
+ * |[23:16]| 00h | BUS_NUM_2 — Return the bus number BIOS assigned
290
+ * CPUBUSNO(2). (RO)
291
+ * |[15:8] | 00h | BUS_NUM_1 — Return the bus number BIOS assigned
292
+ * CPUBUSNO(1). (RO)
293
+ * | [7:0] | 00h | BUS_NUM_0 — Return the bus number BIOS assigned
294
+ * CPUBUSNO(0). (RO)
295
+ */
296
+#define SKX_MSR_CPU_BUS_NUMBER 0x300
297
+#define SKX_MSR_CPU_BUS_VALID_BIT (1ULL << 63)
298
+#define BUS_NUM_STRIDE 8
299
+
276300 /* SKX CHA */
277301 #define SKX_CHA_MSR_PMON_BOX_FILTER_TID (0x1ffULL << 0)
278302 #define SKX_CHA_MSR_PMON_BOX_FILTER_LINK (0xfULL << 9)
....@@ -324,12 +348,114 @@
324348 #define SKX_M2M_PCI_PMON_CTR0 0x200
325349 #define SKX_M2M_PCI_PMON_BOX_CTL 0x258
326350
351
+/* SNR Ubox */
352
+#define SNR_U_MSR_PMON_CTR0 0x1f98
353
+#define SNR_U_MSR_PMON_CTL0 0x1f91
354
+#define SNR_U_MSR_PMON_UCLK_FIXED_CTL 0x1f93
355
+#define SNR_U_MSR_PMON_UCLK_FIXED_CTR 0x1f94
356
+
357
+/* SNR CHA */
358
+#define SNR_CHA_RAW_EVENT_MASK_EXT 0x3ffffff
359
+#define SNR_CHA_MSR_PMON_CTL0 0x1c01
360
+#define SNR_CHA_MSR_PMON_CTR0 0x1c08
361
+#define SNR_CHA_MSR_PMON_BOX_CTL 0x1c00
362
+#define SNR_C0_MSR_PMON_BOX_FILTER0 0x1c05
363
+
364
+
365
+/* SNR IIO */
366
+#define SNR_IIO_MSR_PMON_CTL0 0x1e08
367
+#define SNR_IIO_MSR_PMON_CTR0 0x1e01
368
+#define SNR_IIO_MSR_PMON_BOX_CTL 0x1e00
369
+#define SNR_IIO_MSR_OFFSET 0x10
370
+#define SNR_IIO_PMON_RAW_EVENT_MASK_EXT 0x7ffff
371
+
372
+/* SNR IRP */
373
+#define SNR_IRP0_MSR_PMON_CTL0 0x1ea8
374
+#define SNR_IRP0_MSR_PMON_CTR0 0x1ea1
375
+#define SNR_IRP0_MSR_PMON_BOX_CTL 0x1ea0
376
+#define SNR_IRP_MSR_OFFSET 0x10
377
+
378
+/* SNR M2PCIE */
379
+#define SNR_M2PCIE_MSR_PMON_CTL0 0x1e58
380
+#define SNR_M2PCIE_MSR_PMON_CTR0 0x1e51
381
+#define SNR_M2PCIE_MSR_PMON_BOX_CTL 0x1e50
382
+#define SNR_M2PCIE_MSR_OFFSET 0x10
383
+
384
+/* SNR PCU */
385
+#define SNR_PCU_MSR_PMON_CTL0 0x1ef1
386
+#define SNR_PCU_MSR_PMON_CTR0 0x1ef8
387
+#define SNR_PCU_MSR_PMON_BOX_CTL 0x1ef0
388
+#define SNR_PCU_MSR_PMON_BOX_FILTER 0x1efc
389
+
390
+/* SNR M2M */
391
+#define SNR_M2M_PCI_PMON_CTL0 0x468
392
+#define SNR_M2M_PCI_PMON_CTR0 0x440
393
+#define SNR_M2M_PCI_PMON_BOX_CTL 0x438
394
+#define SNR_M2M_PCI_PMON_UMASK_EXT 0xff
395
+
396
+/* SNR PCIE3 */
397
+#define SNR_PCIE3_PCI_PMON_CTL0 0x508
398
+#define SNR_PCIE3_PCI_PMON_CTR0 0x4e8
399
+#define SNR_PCIE3_PCI_PMON_BOX_CTL 0x4e0
400
+
401
+/* SNR IMC */
402
+#define SNR_IMC_MMIO_PMON_FIXED_CTL 0x54
403
+#define SNR_IMC_MMIO_PMON_FIXED_CTR 0x38
404
+#define SNR_IMC_MMIO_PMON_CTL0 0x40
405
+#define SNR_IMC_MMIO_PMON_CTR0 0x8
406
+#define SNR_IMC_MMIO_PMON_BOX_CTL 0x22800
407
+#define SNR_IMC_MMIO_OFFSET 0x4000
408
+#define SNR_IMC_MMIO_SIZE 0x4000
409
+#define SNR_IMC_MMIO_BASE_OFFSET 0xd0
410
+#define SNR_IMC_MMIO_BASE_MASK 0x1FFFFFFF
411
+#define SNR_IMC_MMIO_MEM0_OFFSET 0xd8
412
+#define SNR_IMC_MMIO_MEM0_MASK 0x7FF
413
+
414
+/* ICX CHA */
415
+#define ICX_C34_MSR_PMON_CTR0 0xb68
416
+#define ICX_C34_MSR_PMON_CTL0 0xb61
417
+#define ICX_C34_MSR_PMON_BOX_CTL 0xb60
418
+#define ICX_C34_MSR_PMON_BOX_FILTER0 0xb65
419
+
420
+/* ICX IIO */
421
+#define ICX_IIO_MSR_PMON_CTL0 0xa58
422
+#define ICX_IIO_MSR_PMON_CTR0 0xa51
423
+#define ICX_IIO_MSR_PMON_BOX_CTL 0xa50
424
+
425
+/* ICX IRP */
426
+#define ICX_IRP0_MSR_PMON_CTL0 0xa4d
427
+#define ICX_IRP0_MSR_PMON_CTR0 0xa4b
428
+#define ICX_IRP0_MSR_PMON_BOX_CTL 0xa4a
429
+
430
+/* ICX M2PCIE */
431
+#define ICX_M2PCIE_MSR_PMON_CTL0 0xa46
432
+#define ICX_M2PCIE_MSR_PMON_CTR0 0xa41
433
+#define ICX_M2PCIE_MSR_PMON_BOX_CTL 0xa40
434
+
435
+/* ICX UPI */
436
+#define ICX_UPI_PCI_PMON_CTL0 0x350
437
+#define ICX_UPI_PCI_PMON_CTR0 0x320
438
+#define ICX_UPI_PCI_PMON_BOX_CTL 0x318
439
+#define ICX_UPI_CTL_UMASK_EXT 0xffffff
440
+
441
+/* ICX M3UPI*/
442
+#define ICX_M3UPI_PCI_PMON_CTL0 0xd8
443
+#define ICX_M3UPI_PCI_PMON_CTR0 0xa8
444
+#define ICX_M3UPI_PCI_PMON_BOX_CTL 0xa0
445
+
446
+/* ICX IMC */
447
+#define ICX_NUMBER_IMC_CHN 3
448
+#define ICX_IMC_MEM_STRIDE 0x4
449
+
327450 DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
328451 DEFINE_UNCORE_FORMAT_ATTR(event2, event, "config:0-6");
329452 DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21");
330453 DEFINE_UNCORE_FORMAT_ATTR(use_occ_ctr, use_occ_ctr, "config:7");
331454 DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
332455 DEFINE_UNCORE_FORMAT_ATTR(umask_ext, umask, "config:8-15,32-43,45-55");
456
+DEFINE_UNCORE_FORMAT_ATTR(umask_ext2, umask, "config:8-15,32-57");
457
+DEFINE_UNCORE_FORMAT_ATTR(umask_ext3, umask, "config:8-15,32-39");
458
+DEFINE_UNCORE_FORMAT_ATTR(umask_ext4, umask, "config:8-15,32-55");
333459 DEFINE_UNCORE_FORMAT_ATTR(qor, qor, "config:16");
334460 DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
335461 DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19");
....@@ -343,11 +469,14 @@
343469 DEFINE_UNCORE_FORMAT_ATTR(occ_edge, occ_edge, "config:14-51");
344470 DEFINE_UNCORE_FORMAT_ATTR(occ_edge_det, occ_edge_det, "config:31");
345471 DEFINE_UNCORE_FORMAT_ATTR(ch_mask, ch_mask, "config:36-43");
472
+DEFINE_UNCORE_FORMAT_ATTR(ch_mask2, ch_mask, "config:36-47");
346473 DEFINE_UNCORE_FORMAT_ATTR(fc_mask, fc_mask, "config:44-46");
474
+DEFINE_UNCORE_FORMAT_ATTR(fc_mask2, fc_mask, "config:48-50");
347475 DEFINE_UNCORE_FORMAT_ATTR(filter_tid, filter_tid, "config1:0-4");
348476 DEFINE_UNCORE_FORMAT_ATTR(filter_tid2, filter_tid, "config1:0");
349477 DEFINE_UNCORE_FORMAT_ATTR(filter_tid3, filter_tid, "config1:0-5");
350478 DEFINE_UNCORE_FORMAT_ATTR(filter_tid4, filter_tid, "config1:0-8");
479
+DEFINE_UNCORE_FORMAT_ATTR(filter_tid5, filter_tid, "config1:0-9");
351480 DEFINE_UNCORE_FORMAT_ATTR(filter_cid, filter_cid, "config1:5");
352481 DEFINE_UNCORE_FORMAT_ATTR(filter_link, filter_link, "config1:5-8");
353482 DEFINE_UNCORE_FORMAT_ATTR(filter_link2, filter_link, "config1:6-8");
....@@ -1057,8 +1186,8 @@
10571186
10581187 if (reg1->idx != EXTRA_REG_NONE) {
10591188 int idx = box->pmu->pmu_idx + SNBEP_PCI_QPI_PORT0_FILTER;
1060
- int pkg = box->pkgid;
1061
- struct pci_dev *filter_pdev = uncore_extra_pci_dev[pkg].dev[idx];
1189
+ int die = box->dieid;
1190
+ struct pci_dev *filter_pdev = uncore_extra_pci_dev[die].dev[idx];
10621191
10631192 if (filter_pdev) {
10641193 pci_write_config_dword(filter_pdev, reg1->reg,
....@@ -2699,6 +2828,7 @@
26992828 return false;
27002829
27012830 pci_read_config_dword(dev, HSWEP_PCU_CAPID4_OFFET, &capid4);
2831
+ pci_dev_put(dev);
27022832 if (!hswep_get_chop(capid4))
27032833 return true;
27042834
....@@ -3507,6 +3637,203 @@
35073637 .read_counter = uncore_msr_read_counter,
35083638 };
35093639
3640
+static inline u8 skx_iio_stack(struct intel_uncore_pmu *pmu, int die)
3641
+{
3642
+ return pmu->type->topology[die] >> (pmu->pmu_idx * BUS_NUM_STRIDE);
3643
+}
3644
+
3645
+static umode_t
3646
+pmu_iio_mapping_visible(struct kobject *kobj, struct attribute *attr,
3647
+ int die, int zero_bus_pmu)
3648
+{
3649
+ struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(kobj_to_dev(kobj));
3650
+
3651
+ return (!skx_iio_stack(pmu, die) && pmu->pmu_idx != zero_bus_pmu) ? 0 : attr->mode;
3652
+}
3653
+
3654
+static umode_t
3655
+skx_iio_mapping_visible(struct kobject *kobj, struct attribute *attr, int die)
3656
+{
3657
+ /* Root bus 0x00 is valid only for pmu_idx = 0. */
3658
+ return pmu_iio_mapping_visible(kobj, attr, die, 0);
3659
+}
3660
+
3661
+static ssize_t skx_iio_mapping_show(struct device *dev,
3662
+ struct device_attribute *attr, char *buf)
3663
+{
3664
+ struct pci_bus *bus = pci_find_next_bus(NULL);
3665
+ struct intel_uncore_pmu *uncore_pmu = dev_to_uncore_pmu(dev);
3666
+ struct dev_ext_attribute *ea = to_dev_ext_attribute(attr);
3667
+ long die = (long)ea->var;
3668
+
3669
+ /*
3670
+ * Current implementation is for single segment configuration hence it's
3671
+ * safe to take the segment value from the first available root bus.
3672
+ */
3673
+ return sprintf(buf, "%04x:%02x\n", pci_domain_nr(bus),
3674
+ skx_iio_stack(uncore_pmu, die));
3675
+}
3676
+
3677
+static int skx_msr_cpu_bus_read(int cpu, u64 *topology)
3678
+{
3679
+ u64 msr_value;
3680
+
3681
+ if (rdmsrl_on_cpu(cpu, SKX_MSR_CPU_BUS_NUMBER, &msr_value) ||
3682
+ !(msr_value & SKX_MSR_CPU_BUS_VALID_BIT))
3683
+ return -ENXIO;
3684
+
3685
+ *topology = msr_value;
3686
+
3687
+ return 0;
3688
+}
3689
+
3690
+static int die_to_cpu(int die)
3691
+{
3692
+ int res = 0, cpu, current_die;
3693
+ /*
3694
+ * Using cpus_read_lock() to ensure cpu is not going down between
3695
+ * looking at cpu_online_mask.
3696
+ */
3697
+ cpus_read_lock();
3698
+ for_each_online_cpu(cpu) {
3699
+ current_die = topology_logical_die_id(cpu);
3700
+ if (current_die == die) {
3701
+ res = cpu;
3702
+ break;
3703
+ }
3704
+ }
3705
+ cpus_read_unlock();
3706
+ return res;
3707
+}
3708
+
3709
+static int skx_iio_get_topology(struct intel_uncore_type *type)
3710
+{
3711
+ int i, ret;
3712
+ struct pci_bus *bus = NULL;
3713
+
3714
+ /*
3715
+ * Verified single-segment environments only; disabled for multiple
3716
+ * segment topologies for now except VMD domains.
3717
+ * VMD domains start at 0x10000 to not clash with ACPI _SEG domains.
3718
+ */
3719
+ while ((bus = pci_find_next_bus(bus))
3720
+ && (!pci_domain_nr(bus) || pci_domain_nr(bus) > 0xffff))
3721
+ ;
3722
+ if (bus)
3723
+ return -EPERM;
3724
+
3725
+ type->topology = kcalloc(uncore_max_dies(), sizeof(u64), GFP_KERNEL);
3726
+ if (!type->topology)
3727
+ return -ENOMEM;
3728
+
3729
+ for (i = 0; i < uncore_max_dies(); i++) {
3730
+ ret = skx_msr_cpu_bus_read(die_to_cpu(i), &type->topology[i]);
3731
+ if (ret) {
3732
+ kfree(type->topology);
3733
+ type->topology = NULL;
3734
+ return ret;
3735
+ }
3736
+ }
3737
+
3738
+ return 0;
3739
+}
3740
+
3741
+static struct attribute_group skx_iio_mapping_group = {
3742
+ .is_visible = skx_iio_mapping_visible,
3743
+};
3744
+
3745
+static const struct attribute_group *skx_iio_attr_update[] = {
3746
+ &skx_iio_mapping_group,
3747
+ NULL,
3748
+};
3749
+
3750
+static void pmu_clear_mapping_attr(const struct attribute_group **groups,
3751
+ struct attribute_group *ag)
3752
+{
3753
+ int i;
3754
+
3755
+ for (i = 0; groups[i]; i++) {
3756
+ if (groups[i] == ag) {
3757
+ for (i++; groups[i]; i++)
3758
+ groups[i - 1] = groups[i];
3759
+ groups[i - 1] = NULL;
3760
+ break;
3761
+ }
3762
+ }
3763
+}
3764
+
3765
+static int
3766
+pmu_iio_set_mapping(struct intel_uncore_type *type, struct attribute_group *ag)
3767
+{
3768
+ char buf[64];
3769
+ int ret;
3770
+ long die = -1;
3771
+ struct attribute **attrs = NULL;
3772
+ struct dev_ext_attribute *eas = NULL;
3773
+
3774
+ ret = type->get_topology(type);
3775
+ if (ret < 0)
3776
+ goto clear_attr_update;
3777
+
3778
+ ret = -ENOMEM;
3779
+
3780
+ /* One more for NULL. */
3781
+ attrs = kcalloc((uncore_max_dies() + 1), sizeof(*attrs), GFP_KERNEL);
3782
+ if (!attrs)
3783
+ goto clear_topology;
3784
+
3785
+ eas = kcalloc(uncore_max_dies(), sizeof(*eas), GFP_KERNEL);
3786
+ if (!eas)
3787
+ goto clear_attrs;
3788
+
3789
+ for (die = 0; die < uncore_max_dies(); die++) {
3790
+ sprintf(buf, "die%ld", die);
3791
+ sysfs_attr_init(&eas[die].attr.attr);
3792
+ eas[die].attr.attr.name = kstrdup(buf, GFP_KERNEL);
3793
+ if (!eas[die].attr.attr.name)
3794
+ goto err;
3795
+ eas[die].attr.attr.mode = 0444;
3796
+ eas[die].attr.show = skx_iio_mapping_show;
3797
+ eas[die].attr.store = NULL;
3798
+ eas[die].var = (void *)die;
3799
+ attrs[die] = &eas[die].attr.attr;
3800
+ }
3801
+ ag->attrs = attrs;
3802
+
3803
+ return 0;
3804
+err:
3805
+ for (; die >= 0; die--)
3806
+ kfree(eas[die].attr.attr.name);
3807
+ kfree(eas);
3808
+clear_attrs:
3809
+ kfree(attrs);
3810
+clear_topology:
3811
+ kfree(type->topology);
3812
+clear_attr_update:
3813
+ pmu_clear_mapping_attr(type->attr_update, ag);
3814
+ return ret;
3815
+}
3816
+
3817
+static int skx_iio_set_mapping(struct intel_uncore_type *type)
3818
+{
3819
+ return pmu_iio_set_mapping(type, &skx_iio_mapping_group);
3820
+}
3821
+
3822
+static void skx_iio_cleanup_mapping(struct intel_uncore_type *type)
3823
+{
3824
+ struct attribute **attr = skx_iio_mapping_group.attrs;
3825
+
3826
+ if (!attr)
3827
+ return;
3828
+
3829
+ for (; *attr; attr++)
3830
+ kfree((*attr)->name);
3831
+ kfree(attr_to_ext_attr(*skx_iio_mapping_group.attrs));
3832
+ kfree(skx_iio_mapping_group.attrs);
3833
+ skx_iio_mapping_group.attrs = NULL;
3834
+ kfree(type->topology);
3835
+}
3836
+
35103837 static struct intel_uncore_type skx_uncore_iio = {
35113838 .name = "iio",
35123839 .num_counters = 4,
....@@ -3521,6 +3848,10 @@
35213848 .constraints = skx_uncore_iio_constraints,
35223849 .ops = &skx_uncore_iio_ops,
35233850 .format_group = &skx_uncore_iio_format_group,
3851
+ .attr_update = skx_iio_attr_update,
3852
+ .get_topology = skx_iio_get_topology,
3853
+ .set_mapping = skx_iio_set_mapping,
3854
+ .cleanup_mapping = skx_iio_cleanup_mapping,
35243855 };
35253856
35263857 enum perf_uncore_iio_freerunning_type_id {
....@@ -3963,3 +4294,1063 @@
39634294 }
39644295
39654296 /* end of SKX uncore support */
4297
+
4298
+/* SNR uncore support */
4299
+
4300
+static struct intel_uncore_type snr_uncore_ubox = {
4301
+ .name = "ubox",
4302
+ .num_counters = 2,
4303
+ .num_boxes = 1,
4304
+ .perf_ctr_bits = 48,
4305
+ .fixed_ctr_bits = 48,
4306
+ .perf_ctr = SNR_U_MSR_PMON_CTR0,
4307
+ .event_ctl = SNR_U_MSR_PMON_CTL0,
4308
+ .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4309
+ .fixed_ctr = SNR_U_MSR_PMON_UCLK_FIXED_CTR,
4310
+ .fixed_ctl = SNR_U_MSR_PMON_UCLK_FIXED_CTL,
4311
+ .ops = &ivbep_uncore_msr_ops,
4312
+ .format_group = &ivbep_uncore_format_group,
4313
+};
4314
+
4315
+static struct attribute *snr_uncore_cha_formats_attr[] = {
4316
+ &format_attr_event.attr,
4317
+ &format_attr_umask_ext2.attr,
4318
+ &format_attr_edge.attr,
4319
+ &format_attr_tid_en.attr,
4320
+ &format_attr_inv.attr,
4321
+ &format_attr_thresh8.attr,
4322
+ &format_attr_filter_tid5.attr,
4323
+ NULL,
4324
+};
4325
+static const struct attribute_group snr_uncore_chabox_format_group = {
4326
+ .name = "format",
4327
+ .attrs = snr_uncore_cha_formats_attr,
4328
+};
4329
+
4330
+static int snr_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
4331
+{
4332
+ struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
4333
+
4334
+ reg1->reg = SNR_C0_MSR_PMON_BOX_FILTER0 +
4335
+ box->pmu->type->msr_offset * box->pmu->pmu_idx;
4336
+ reg1->config = event->attr.config1 & SKX_CHA_MSR_PMON_BOX_FILTER_TID;
4337
+ reg1->idx = 0;
4338
+
4339
+ return 0;
4340
+}
4341
+
4342
+static void snr_cha_enable_event(struct intel_uncore_box *box,
4343
+ struct perf_event *event)
4344
+{
4345
+ struct hw_perf_event *hwc = &event->hw;
4346
+ struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
4347
+
4348
+ if (reg1->idx != EXTRA_REG_NONE)
4349
+ wrmsrl(reg1->reg, reg1->config);
4350
+
4351
+ wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
4352
+}
4353
+
4354
+static struct intel_uncore_ops snr_uncore_chabox_ops = {
4355
+ .init_box = ivbep_uncore_msr_init_box,
4356
+ .disable_box = snbep_uncore_msr_disable_box,
4357
+ .enable_box = snbep_uncore_msr_enable_box,
4358
+ .disable_event = snbep_uncore_msr_disable_event,
4359
+ .enable_event = snr_cha_enable_event,
4360
+ .read_counter = uncore_msr_read_counter,
4361
+ .hw_config = snr_cha_hw_config,
4362
+};
4363
+
4364
+static struct intel_uncore_type snr_uncore_chabox = {
4365
+ .name = "cha",
4366
+ .num_counters = 4,
4367
+ .num_boxes = 6,
4368
+ .perf_ctr_bits = 48,
4369
+ .event_ctl = SNR_CHA_MSR_PMON_CTL0,
4370
+ .perf_ctr = SNR_CHA_MSR_PMON_CTR0,
4371
+ .box_ctl = SNR_CHA_MSR_PMON_BOX_CTL,
4372
+ .msr_offset = HSWEP_CBO_MSR_OFFSET,
4373
+ .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
4374
+ .event_mask_ext = SNR_CHA_RAW_EVENT_MASK_EXT,
4375
+ .ops = &snr_uncore_chabox_ops,
4376
+ .format_group = &snr_uncore_chabox_format_group,
4377
+};
4378
+
4379
+static struct attribute *snr_uncore_iio_formats_attr[] = {
4380
+ &format_attr_event.attr,
4381
+ &format_attr_umask.attr,
4382
+ &format_attr_edge.attr,
4383
+ &format_attr_inv.attr,
4384
+ &format_attr_thresh9.attr,
4385
+ &format_attr_ch_mask2.attr,
4386
+ &format_attr_fc_mask2.attr,
4387
+ NULL,
4388
+};
4389
+
4390
+static const struct attribute_group snr_uncore_iio_format_group = {
4391
+ .name = "format",
4392
+ .attrs = snr_uncore_iio_formats_attr,
4393
+};
4394
+
4395
+static struct intel_uncore_type snr_uncore_iio = {
4396
+ .name = "iio",
4397
+ .num_counters = 4,
4398
+ .num_boxes = 5,
4399
+ .perf_ctr_bits = 48,
4400
+ .event_ctl = SNR_IIO_MSR_PMON_CTL0,
4401
+ .perf_ctr = SNR_IIO_MSR_PMON_CTR0,
4402
+ .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4403
+ .event_mask_ext = SNR_IIO_PMON_RAW_EVENT_MASK_EXT,
4404
+ .box_ctl = SNR_IIO_MSR_PMON_BOX_CTL,
4405
+ .msr_offset = SNR_IIO_MSR_OFFSET,
4406
+ .ops = &ivbep_uncore_msr_ops,
4407
+ .format_group = &snr_uncore_iio_format_group,
4408
+};
4409
+
4410
+static struct intel_uncore_type snr_uncore_irp = {
4411
+ .name = "irp",
4412
+ .num_counters = 2,
4413
+ .num_boxes = 5,
4414
+ .perf_ctr_bits = 48,
4415
+ .event_ctl = SNR_IRP0_MSR_PMON_CTL0,
4416
+ .perf_ctr = SNR_IRP0_MSR_PMON_CTR0,
4417
+ .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4418
+ .box_ctl = SNR_IRP0_MSR_PMON_BOX_CTL,
4419
+ .msr_offset = SNR_IRP_MSR_OFFSET,
4420
+ .ops = &ivbep_uncore_msr_ops,
4421
+ .format_group = &ivbep_uncore_format_group,
4422
+};
4423
+
4424
+static struct intel_uncore_type snr_uncore_m2pcie = {
4425
+ .name = "m2pcie",
4426
+ .num_counters = 4,
4427
+ .num_boxes = 5,
4428
+ .perf_ctr_bits = 48,
4429
+ .event_ctl = SNR_M2PCIE_MSR_PMON_CTL0,
4430
+ .perf_ctr = SNR_M2PCIE_MSR_PMON_CTR0,
4431
+ .box_ctl = SNR_M2PCIE_MSR_PMON_BOX_CTL,
4432
+ .msr_offset = SNR_M2PCIE_MSR_OFFSET,
4433
+ .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4434
+ .ops = &ivbep_uncore_msr_ops,
4435
+ .format_group = &ivbep_uncore_format_group,
4436
+};
4437
+
4438
+static int snr_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
4439
+{
4440
+ struct hw_perf_event *hwc = &event->hw;
4441
+ struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
4442
+ int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
4443
+
4444
+ if (ev_sel >= 0xb && ev_sel <= 0xe) {
4445
+ reg1->reg = SNR_PCU_MSR_PMON_BOX_FILTER;
4446
+ reg1->idx = ev_sel - 0xb;
4447
+ reg1->config = event->attr.config1 & (0xff << reg1->idx);
4448
+ }
4449
+ return 0;
4450
+}
4451
+
4452
+static struct intel_uncore_ops snr_uncore_pcu_ops = {
4453
+ IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
4454
+ .hw_config = snr_pcu_hw_config,
4455
+ .get_constraint = snbep_pcu_get_constraint,
4456
+ .put_constraint = snbep_pcu_put_constraint,
4457
+};
4458
+
4459
+static struct intel_uncore_type snr_uncore_pcu = {
4460
+ .name = "pcu",
4461
+ .num_counters = 4,
4462
+ .num_boxes = 1,
4463
+ .perf_ctr_bits = 48,
4464
+ .perf_ctr = SNR_PCU_MSR_PMON_CTR0,
4465
+ .event_ctl = SNR_PCU_MSR_PMON_CTL0,
4466
+ .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4467
+ .box_ctl = SNR_PCU_MSR_PMON_BOX_CTL,
4468
+ .num_shared_regs = 1,
4469
+ .ops = &snr_uncore_pcu_ops,
4470
+ .format_group = &skx_uncore_pcu_format_group,
4471
+};
4472
+
4473
+enum perf_uncore_snr_iio_freerunning_type_id {
4474
+ SNR_IIO_MSR_IOCLK,
4475
+ SNR_IIO_MSR_BW_IN,
4476
+
4477
+ SNR_IIO_FREERUNNING_TYPE_MAX,
4478
+};
4479
+
4480
+static struct freerunning_counters snr_iio_freerunning[] = {
4481
+ [SNR_IIO_MSR_IOCLK] = { 0x1eac, 0x1, 0x10, 1, 48 },
4482
+ [SNR_IIO_MSR_BW_IN] = { 0x1f00, 0x1, 0x10, 8, 48 },
4483
+};
4484
+
4485
+static struct uncore_event_desc snr_uncore_iio_freerunning_events[] = {
4486
+ /* Free-Running IIO CLOCKS Counter */
4487
+ INTEL_UNCORE_EVENT_DESC(ioclk, "event=0xff,umask=0x10"),
4488
+ /* Free-Running IIO BANDWIDTH IN Counters */
4489
+ INTEL_UNCORE_EVENT_DESC(bw_in_port0, "event=0xff,umask=0x20"),
4490
+ INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale, "3.814697266e-6"),
4491
+ INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit, "MiB"),
4492
+ INTEL_UNCORE_EVENT_DESC(bw_in_port1, "event=0xff,umask=0x21"),
4493
+ INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale, "3.814697266e-6"),
4494
+ INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit, "MiB"),
4495
+ INTEL_UNCORE_EVENT_DESC(bw_in_port2, "event=0xff,umask=0x22"),
4496
+ INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale, "3.814697266e-6"),
4497
+ INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit, "MiB"),
4498
+ INTEL_UNCORE_EVENT_DESC(bw_in_port3, "event=0xff,umask=0x23"),
4499
+ INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale, "3.814697266e-6"),
4500
+ INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit, "MiB"),
4501
+ INTEL_UNCORE_EVENT_DESC(bw_in_port4, "event=0xff,umask=0x24"),
4502
+ INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale, "3.814697266e-6"),
4503
+ INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit, "MiB"),
4504
+ INTEL_UNCORE_EVENT_DESC(bw_in_port5, "event=0xff,umask=0x25"),
4505
+ INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale, "3.814697266e-6"),
4506
+ INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit, "MiB"),
4507
+ INTEL_UNCORE_EVENT_DESC(bw_in_port6, "event=0xff,umask=0x26"),
4508
+ INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale, "3.814697266e-6"),
4509
+ INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit, "MiB"),
4510
+ INTEL_UNCORE_EVENT_DESC(bw_in_port7, "event=0xff,umask=0x27"),
4511
+ INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale, "3.814697266e-6"),
4512
+ INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit, "MiB"),
4513
+ { /* end: all zeroes */ },
4514
+};
4515
+
4516
+static struct intel_uncore_type snr_uncore_iio_free_running = {
4517
+ .name = "iio_free_running",
4518
+ .num_counters = 9,
4519
+ .num_boxes = 5,
4520
+ .num_freerunning_types = SNR_IIO_FREERUNNING_TYPE_MAX,
4521
+ .freerunning = snr_iio_freerunning,
4522
+ .ops = &skx_uncore_iio_freerunning_ops,
4523
+ .event_descs = snr_uncore_iio_freerunning_events,
4524
+ .format_group = &skx_uncore_iio_freerunning_format_group,
4525
+};
4526
+
4527
+static struct intel_uncore_type *snr_msr_uncores[] = {
4528
+ &snr_uncore_ubox,
4529
+ &snr_uncore_chabox,
4530
+ &snr_uncore_iio,
4531
+ &snr_uncore_irp,
4532
+ &snr_uncore_m2pcie,
4533
+ &snr_uncore_pcu,
4534
+ &snr_uncore_iio_free_running,
4535
+ NULL,
4536
+};
4537
+
4538
+void snr_uncore_cpu_init(void)
4539
+{
4540
+ uncore_msr_uncores = snr_msr_uncores;
4541
+}
4542
+
4543
+static void snr_m2m_uncore_pci_init_box(struct intel_uncore_box *box)
4544
+{
4545
+ struct pci_dev *pdev = box->pci_dev;
4546
+ int box_ctl = uncore_pci_box_ctl(box);
4547
+
4548
+ __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
4549
+ pci_write_config_dword(pdev, box_ctl, IVBEP_PMON_BOX_CTL_INT);
4550
+}
4551
+
4552
+static struct intel_uncore_ops snr_m2m_uncore_pci_ops = {
4553
+ .init_box = snr_m2m_uncore_pci_init_box,
4554
+ .disable_box = snbep_uncore_pci_disable_box,
4555
+ .enable_box = snbep_uncore_pci_enable_box,
4556
+ .disable_event = snbep_uncore_pci_disable_event,
4557
+ .enable_event = snbep_uncore_pci_enable_event,
4558
+ .read_counter = snbep_uncore_pci_read_counter,
4559
+};
4560
+
4561
+static struct attribute *snr_m2m_uncore_formats_attr[] = {
4562
+ &format_attr_event.attr,
4563
+ &format_attr_umask_ext3.attr,
4564
+ &format_attr_edge.attr,
4565
+ &format_attr_inv.attr,
4566
+ &format_attr_thresh8.attr,
4567
+ NULL,
4568
+};
4569
+
4570
+static const struct attribute_group snr_m2m_uncore_format_group = {
4571
+ .name = "format",
4572
+ .attrs = snr_m2m_uncore_formats_attr,
4573
+};
4574
+
4575
+static struct intel_uncore_type snr_uncore_m2m = {
4576
+ .name = "m2m",
4577
+ .num_counters = 4,
4578
+ .num_boxes = 1,
4579
+ .perf_ctr_bits = 48,
4580
+ .perf_ctr = SNR_M2M_PCI_PMON_CTR0,
4581
+ .event_ctl = SNR_M2M_PCI_PMON_CTL0,
4582
+ .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4583
+ .event_mask_ext = SNR_M2M_PCI_PMON_UMASK_EXT,
4584
+ .box_ctl = SNR_M2M_PCI_PMON_BOX_CTL,
4585
+ .ops = &snr_m2m_uncore_pci_ops,
4586
+ .format_group = &snr_m2m_uncore_format_group,
4587
+};
4588
+
4589
+static void snr_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event)
4590
+{
4591
+ struct pci_dev *pdev = box->pci_dev;
4592
+ struct hw_perf_event *hwc = &event->hw;
4593
+
4594
+ pci_write_config_dword(pdev, hwc->config_base, (u32)(hwc->config | SNBEP_PMON_CTL_EN));
4595
+ pci_write_config_dword(pdev, hwc->config_base + 4, (u32)(hwc->config >> 32));
4596
+}
4597
+
4598
+static struct intel_uncore_ops snr_pcie3_uncore_pci_ops = {
4599
+ .init_box = snr_m2m_uncore_pci_init_box,
4600
+ .disable_box = snbep_uncore_pci_disable_box,
4601
+ .enable_box = snbep_uncore_pci_enable_box,
4602
+ .disable_event = snbep_uncore_pci_disable_event,
4603
+ .enable_event = snr_uncore_pci_enable_event,
4604
+ .read_counter = snbep_uncore_pci_read_counter,
4605
+};
4606
+
4607
+static struct intel_uncore_type snr_uncore_pcie3 = {
4608
+ .name = "pcie3",
4609
+ .num_counters = 4,
4610
+ .num_boxes = 1,
4611
+ .perf_ctr_bits = 48,
4612
+ .perf_ctr = SNR_PCIE3_PCI_PMON_CTR0,
4613
+ .event_ctl = SNR_PCIE3_PCI_PMON_CTL0,
4614
+ .event_mask = SKX_IIO_PMON_RAW_EVENT_MASK,
4615
+ .event_mask_ext = SKX_IIO_PMON_RAW_EVENT_MASK_EXT,
4616
+ .box_ctl = SNR_PCIE3_PCI_PMON_BOX_CTL,
4617
+ .ops = &snr_pcie3_uncore_pci_ops,
4618
+ .format_group = &skx_uncore_iio_format_group,
4619
+};
4620
+
4621
+enum {
4622
+ SNR_PCI_UNCORE_M2M,
4623
+ SNR_PCI_UNCORE_PCIE3,
4624
+};
4625
+
4626
+static struct intel_uncore_type *snr_pci_uncores[] = {
4627
+ [SNR_PCI_UNCORE_M2M] = &snr_uncore_m2m,
4628
+ [SNR_PCI_UNCORE_PCIE3] = &snr_uncore_pcie3,
4629
+ NULL,
4630
+};
4631
+
4632
+static const struct pci_device_id snr_uncore_pci_ids[] = {
4633
+ { /* M2M */
4634
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
4635
+ .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 0, SNR_PCI_UNCORE_M2M, 0),
4636
+ },
4637
+ { /* end: all zeroes */ }
4638
+};
4639
+
4640
+static struct pci_driver snr_uncore_pci_driver = {
4641
+ .name = "snr_uncore",
4642
+ .id_table = snr_uncore_pci_ids,
4643
+};
4644
+
4645
+static const struct pci_device_id snr_uncore_pci_sub_ids[] = {
4646
+ { /* PCIe3 RP */
4647
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x334a),
4648
+ .driver_data = UNCORE_PCI_DEV_FULL_DATA(4, 0, SNR_PCI_UNCORE_PCIE3, 0),
4649
+ },
4650
+ { /* end: all zeroes */ }
4651
+};
4652
+
4653
+static struct pci_driver snr_uncore_pci_sub_driver = {
4654
+ .name = "snr_uncore_sub",
4655
+ .id_table = snr_uncore_pci_sub_ids,
4656
+};
4657
+
4658
+int snr_uncore_pci_init(void)
4659
+{
4660
+ /* SNR UBOX DID */
4661
+ int ret = snbep_pci2phy_map_init(0x3460, SKX_CPUNODEID,
4662
+ SKX_GIDNIDMAP, true);
4663
+
4664
+ if (ret)
4665
+ return ret;
4666
+
4667
+ uncore_pci_uncores = snr_pci_uncores;
4668
+ uncore_pci_driver = &snr_uncore_pci_driver;
4669
+ uncore_pci_sub_driver = &snr_uncore_pci_sub_driver;
4670
+ return 0;
4671
+}
4672
+
4673
+static struct pci_dev *snr_uncore_get_mc_dev(int id)
4674
+{
4675
+ struct pci_dev *mc_dev = NULL;
4676
+ int phys_id, pkg;
4677
+
4678
+ while (1) {
4679
+ mc_dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3451, mc_dev);
4680
+ if (!mc_dev)
4681
+ break;
4682
+ phys_id = uncore_pcibus_to_physid(mc_dev->bus);
4683
+ if (phys_id < 0)
4684
+ continue;
4685
+ pkg = topology_phys_to_logical_pkg(phys_id);
4686
+ if (pkg < 0)
4687
+ continue;
4688
+ else if (pkg == id)
4689
+ break;
4690
+ }
4691
+ return mc_dev;
4692
+}
4693
+
4694
+static void __snr_uncore_mmio_init_box(struct intel_uncore_box *box,
4695
+ unsigned int box_ctl, int mem_offset)
4696
+{
4697
+ struct pci_dev *pdev = snr_uncore_get_mc_dev(box->dieid);
4698
+ struct intel_uncore_type *type = box->pmu->type;
4699
+ resource_size_t addr;
4700
+ u32 pci_dword;
4701
+
4702
+ if (!pdev)
4703
+ return;
4704
+
4705
+ pci_read_config_dword(pdev, SNR_IMC_MMIO_BASE_OFFSET, &pci_dword);
4706
+ addr = ((resource_size_t)pci_dword & SNR_IMC_MMIO_BASE_MASK) << 23;
4707
+
4708
+ pci_read_config_dword(pdev, mem_offset, &pci_dword);
4709
+ addr |= (pci_dword & SNR_IMC_MMIO_MEM0_MASK) << 12;
4710
+
4711
+ addr += box_ctl;
4712
+
4713
+ pci_dev_put(pdev);
4714
+
4715
+ box->io_addr = ioremap(addr, type->mmio_map_size);
4716
+ if (!box->io_addr) {
4717
+ pr_warn("perf uncore: Failed to ioremap for %s.\n", type->name);
4718
+ return;
4719
+ }
4720
+
4721
+ writel(IVBEP_PMON_BOX_CTL_INT, box->io_addr);
4722
+}
4723
+
4724
+static void snr_uncore_mmio_init_box(struct intel_uncore_box *box)
4725
+{
4726
+ __snr_uncore_mmio_init_box(box, uncore_mmio_box_ctl(box),
4727
+ SNR_IMC_MMIO_MEM0_OFFSET);
4728
+}
4729
+
4730
+static void snr_uncore_mmio_disable_box(struct intel_uncore_box *box)
4731
+{
4732
+ u32 config;
4733
+
4734
+ if (!box->io_addr)
4735
+ return;
4736
+
4737
+ config = readl(box->io_addr);
4738
+ config |= SNBEP_PMON_BOX_CTL_FRZ;
4739
+ writel(config, box->io_addr);
4740
+}
4741
+
4742
+static void snr_uncore_mmio_enable_box(struct intel_uncore_box *box)
4743
+{
4744
+ u32 config;
4745
+
4746
+ if (!box->io_addr)
4747
+ return;
4748
+
4749
+ config = readl(box->io_addr);
4750
+ config &= ~SNBEP_PMON_BOX_CTL_FRZ;
4751
+ writel(config, box->io_addr);
4752
+}
4753
+
4754
+static void snr_uncore_mmio_enable_event(struct intel_uncore_box *box,
4755
+ struct perf_event *event)
4756
+{
4757
+ struct hw_perf_event *hwc = &event->hw;
4758
+
4759
+ if (!box->io_addr)
4760
+ return;
4761
+
4762
+ if (!uncore_mmio_is_valid_offset(box, hwc->config_base))
4763
+ return;
4764
+
4765
+ writel(hwc->config | SNBEP_PMON_CTL_EN,
4766
+ box->io_addr + hwc->config_base);
4767
+}
4768
+
4769
+static void snr_uncore_mmio_disable_event(struct intel_uncore_box *box,
4770
+ struct perf_event *event)
4771
+{
4772
+ struct hw_perf_event *hwc = &event->hw;
4773
+
4774
+ if (!box->io_addr)
4775
+ return;
4776
+
4777
+ if (!uncore_mmio_is_valid_offset(box, hwc->config_base))
4778
+ return;
4779
+
4780
+ writel(hwc->config, box->io_addr + hwc->config_base);
4781
+}
4782
+
4783
+static struct intel_uncore_ops snr_uncore_mmio_ops = {
4784
+ .init_box = snr_uncore_mmio_init_box,
4785
+ .exit_box = uncore_mmio_exit_box,
4786
+ .disable_box = snr_uncore_mmio_disable_box,
4787
+ .enable_box = snr_uncore_mmio_enable_box,
4788
+ .disable_event = snr_uncore_mmio_disable_event,
4789
+ .enable_event = snr_uncore_mmio_enable_event,
4790
+ .read_counter = uncore_mmio_read_counter,
4791
+};
4792
+
4793
+static struct uncore_event_desc snr_uncore_imc_events[] = {
4794
+ INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x00,umask=0x00"),
4795
+ INTEL_UNCORE_EVENT_DESC(cas_count_read, "event=0x04,umask=0x0f"),
4796
+ INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
4797
+ INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
4798
+ INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x30"),
4799
+ INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
4800
+ INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
4801
+ { /* end: all zeroes */ },
4802
+};
4803
+
4804
+static struct intel_uncore_type snr_uncore_imc = {
4805
+ .name = "imc",
4806
+ .num_counters = 4,
4807
+ .num_boxes = 2,
4808
+ .perf_ctr_bits = 48,
4809
+ .fixed_ctr_bits = 48,
4810
+ .fixed_ctr = SNR_IMC_MMIO_PMON_FIXED_CTR,
4811
+ .fixed_ctl = SNR_IMC_MMIO_PMON_FIXED_CTL,
4812
+ .event_descs = snr_uncore_imc_events,
4813
+ .perf_ctr = SNR_IMC_MMIO_PMON_CTR0,
4814
+ .event_ctl = SNR_IMC_MMIO_PMON_CTL0,
4815
+ .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4816
+ .box_ctl = SNR_IMC_MMIO_PMON_BOX_CTL,
4817
+ .mmio_offset = SNR_IMC_MMIO_OFFSET,
4818
+ .mmio_map_size = SNR_IMC_MMIO_SIZE,
4819
+ .ops = &snr_uncore_mmio_ops,
4820
+ .format_group = &skx_uncore_format_group,
4821
+};
4822
+
4823
+enum perf_uncore_snr_imc_freerunning_type_id {
4824
+ SNR_IMC_DCLK,
4825
+ SNR_IMC_DDR,
4826
+
4827
+ SNR_IMC_FREERUNNING_TYPE_MAX,
4828
+};
4829
+
4830
+static struct freerunning_counters snr_imc_freerunning[] = {
4831
+ [SNR_IMC_DCLK] = { 0x22b0, 0x0, 0, 1, 48 },
4832
+ [SNR_IMC_DDR] = { 0x2290, 0x8, 0, 2, 48 },
4833
+};
4834
+
4835
+static struct uncore_event_desc snr_uncore_imc_freerunning_events[] = {
4836
+ INTEL_UNCORE_EVENT_DESC(dclk, "event=0xff,umask=0x10"),
4837
+
4838
+ INTEL_UNCORE_EVENT_DESC(read, "event=0xff,umask=0x20"),
4839
+ INTEL_UNCORE_EVENT_DESC(read.scale, "6.103515625e-5"),
4840
+ INTEL_UNCORE_EVENT_DESC(read.unit, "MiB"),
4841
+ INTEL_UNCORE_EVENT_DESC(write, "event=0xff,umask=0x21"),
4842
+ INTEL_UNCORE_EVENT_DESC(write.scale, "6.103515625e-5"),
4843
+ INTEL_UNCORE_EVENT_DESC(write.unit, "MiB"),
4844
+ { /* end: all zeroes */ },
4845
+};
4846
+
4847
+static struct intel_uncore_ops snr_uncore_imc_freerunning_ops = {
4848
+ .init_box = snr_uncore_mmio_init_box,
4849
+ .exit_box = uncore_mmio_exit_box,
4850
+ .read_counter = uncore_mmio_read_counter,
4851
+ .hw_config = uncore_freerunning_hw_config,
4852
+};
4853
+
4854
+static struct intel_uncore_type snr_uncore_imc_free_running = {
4855
+ .name = "imc_free_running",
4856
+ .num_counters = 3,
4857
+ .num_boxes = 1,
4858
+ .num_freerunning_types = SNR_IMC_FREERUNNING_TYPE_MAX,
4859
+ .mmio_map_size = SNR_IMC_MMIO_SIZE,
4860
+ .freerunning = snr_imc_freerunning,
4861
+ .ops = &snr_uncore_imc_freerunning_ops,
4862
+ .event_descs = snr_uncore_imc_freerunning_events,
4863
+ .format_group = &skx_uncore_iio_freerunning_format_group,
4864
+};
4865
+
4866
+static struct intel_uncore_type *snr_mmio_uncores[] = {
4867
+ &snr_uncore_imc,
4868
+ &snr_uncore_imc_free_running,
4869
+ NULL,
4870
+};
4871
+
4872
+void snr_uncore_mmio_init(void)
4873
+{
4874
+ uncore_mmio_uncores = snr_mmio_uncores;
4875
+}
4876
+
4877
+/* end of SNR uncore support */
4878
+
4879
+/* ICX uncore support */
4880
+
4881
+static unsigned icx_cha_msr_offsets[] = {
4882
+ 0x2a0, 0x2ae, 0x2bc, 0x2ca, 0x2d8, 0x2e6, 0x2f4, 0x302, 0x310,
4883
+ 0x31e, 0x32c, 0x33a, 0x348, 0x356, 0x364, 0x372, 0x380, 0x38e,
4884
+ 0x3aa, 0x3b8, 0x3c6, 0x3d4, 0x3e2, 0x3f0, 0x3fe, 0x40c, 0x41a,
4885
+ 0x428, 0x436, 0x444, 0x452, 0x460, 0x46e, 0x47c, 0x0, 0xe,
4886
+ 0x1c, 0x2a, 0x38, 0x46,
4887
+};
4888
+
4889
+static int icx_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
4890
+{
4891
+ struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
4892
+ bool tie_en = !!(event->hw.config & SNBEP_CBO_PMON_CTL_TID_EN);
4893
+
4894
+ if (tie_en) {
4895
+ reg1->reg = ICX_C34_MSR_PMON_BOX_FILTER0 +
4896
+ icx_cha_msr_offsets[box->pmu->pmu_idx];
4897
+ reg1->config = event->attr.config1 & SKX_CHA_MSR_PMON_BOX_FILTER_TID;
4898
+ reg1->idx = 0;
4899
+ }
4900
+
4901
+ return 0;
4902
+}
4903
+
4904
+static struct intel_uncore_ops icx_uncore_chabox_ops = {
4905
+ .init_box = ivbep_uncore_msr_init_box,
4906
+ .disable_box = snbep_uncore_msr_disable_box,
4907
+ .enable_box = snbep_uncore_msr_enable_box,
4908
+ .disable_event = snbep_uncore_msr_disable_event,
4909
+ .enable_event = snr_cha_enable_event,
4910
+ .read_counter = uncore_msr_read_counter,
4911
+ .hw_config = icx_cha_hw_config,
4912
+};
4913
+
4914
+static struct intel_uncore_type icx_uncore_chabox = {
4915
+ .name = "cha",
4916
+ .num_counters = 4,
4917
+ .perf_ctr_bits = 48,
4918
+ .event_ctl = ICX_C34_MSR_PMON_CTL0,
4919
+ .perf_ctr = ICX_C34_MSR_PMON_CTR0,
4920
+ .box_ctl = ICX_C34_MSR_PMON_BOX_CTL,
4921
+ .msr_offsets = icx_cha_msr_offsets,
4922
+ .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
4923
+ .event_mask_ext = SNR_CHA_RAW_EVENT_MASK_EXT,
4924
+ .constraints = skx_uncore_chabox_constraints,
4925
+ .ops = &icx_uncore_chabox_ops,
4926
+ .format_group = &snr_uncore_chabox_format_group,
4927
+};
4928
+
4929
+static unsigned icx_msr_offsets[] = {
4930
+ 0x0, 0x20, 0x40, 0x90, 0xb0, 0xd0,
4931
+};
4932
+
4933
+static struct event_constraint icx_uncore_iio_constraints[] = {
4934
+ UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
4935
+ UNCORE_EVENT_CONSTRAINT(0x03, 0x3),
4936
+ UNCORE_EVENT_CONSTRAINT(0x83, 0x3),
4937
+ UNCORE_EVENT_CONSTRAINT(0x88, 0xc),
4938
+ UNCORE_EVENT_CONSTRAINT(0xc0, 0xc),
4939
+ UNCORE_EVENT_CONSTRAINT(0xc5, 0xc),
4940
+ UNCORE_EVENT_CONSTRAINT(0xd5, 0xc),
4941
+ EVENT_CONSTRAINT_END
4942
+};
4943
+
4944
+static struct intel_uncore_type icx_uncore_iio = {
4945
+ .name = "iio",
4946
+ .num_counters = 4,
4947
+ .num_boxes = 6,
4948
+ .perf_ctr_bits = 48,
4949
+ .event_ctl = ICX_IIO_MSR_PMON_CTL0,
4950
+ .perf_ctr = ICX_IIO_MSR_PMON_CTR0,
4951
+ .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4952
+ .event_mask_ext = SNR_IIO_PMON_RAW_EVENT_MASK_EXT,
4953
+ .box_ctl = ICX_IIO_MSR_PMON_BOX_CTL,
4954
+ .msr_offsets = icx_msr_offsets,
4955
+ .constraints = icx_uncore_iio_constraints,
4956
+ .ops = &skx_uncore_iio_ops,
4957
+ .format_group = &snr_uncore_iio_format_group,
4958
+};
4959
+
4960
+static struct intel_uncore_type icx_uncore_irp = {
4961
+ .name = "irp",
4962
+ .num_counters = 2,
4963
+ .num_boxes = 6,
4964
+ .perf_ctr_bits = 48,
4965
+ .event_ctl = ICX_IRP0_MSR_PMON_CTL0,
4966
+ .perf_ctr = ICX_IRP0_MSR_PMON_CTR0,
4967
+ .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4968
+ .box_ctl = ICX_IRP0_MSR_PMON_BOX_CTL,
4969
+ .msr_offsets = icx_msr_offsets,
4970
+ .ops = &ivbep_uncore_msr_ops,
4971
+ .format_group = &ivbep_uncore_format_group,
4972
+};
4973
+
4974
+static struct event_constraint icx_uncore_m2pcie_constraints[] = {
4975
+ UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
4976
+ UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
4977
+ UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
4978
+ EVENT_CONSTRAINT_END
4979
+};
4980
+
4981
+static struct intel_uncore_type icx_uncore_m2pcie = {
4982
+ .name = "m2pcie",
4983
+ .num_counters = 4,
4984
+ .num_boxes = 6,
4985
+ .perf_ctr_bits = 48,
4986
+ .event_ctl = ICX_M2PCIE_MSR_PMON_CTL0,
4987
+ .perf_ctr = ICX_M2PCIE_MSR_PMON_CTR0,
4988
+ .box_ctl = ICX_M2PCIE_MSR_PMON_BOX_CTL,
4989
+ .msr_offsets = icx_msr_offsets,
4990
+ .constraints = icx_uncore_m2pcie_constraints,
4991
+ .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4992
+ .ops = &ivbep_uncore_msr_ops,
4993
+ .format_group = &ivbep_uncore_format_group,
4994
+};
4995
+
4996
+enum perf_uncore_icx_iio_freerunning_type_id {
4997
+ ICX_IIO_MSR_IOCLK,
4998
+ ICX_IIO_MSR_BW_IN,
4999
+
5000
+ ICX_IIO_FREERUNNING_TYPE_MAX,
5001
+};
5002
+
5003
+static unsigned icx_iio_clk_freerunning_box_offsets[] = {
5004
+ 0x0, 0x20, 0x40, 0x90, 0xb0, 0xd0,
5005
+};
5006
+
5007
+static unsigned icx_iio_bw_freerunning_box_offsets[] = {
5008
+ 0x0, 0x10, 0x20, 0x90, 0xa0, 0xb0,
5009
+};
5010
+
5011
+static struct freerunning_counters icx_iio_freerunning[] = {
5012
+ [ICX_IIO_MSR_IOCLK] = { 0xa55, 0x1, 0x20, 1, 48, icx_iio_clk_freerunning_box_offsets },
5013
+ [ICX_IIO_MSR_BW_IN] = { 0xaa0, 0x1, 0x10, 8, 48, icx_iio_bw_freerunning_box_offsets },
5014
+};
5015
+
5016
+static struct uncore_event_desc icx_uncore_iio_freerunning_events[] = {
5017
+ /* Free-Running IIO CLOCKS Counter */
5018
+ INTEL_UNCORE_EVENT_DESC(ioclk, "event=0xff,umask=0x10"),
5019
+ /* Free-Running IIO BANDWIDTH IN Counters */
5020
+ INTEL_UNCORE_EVENT_DESC(bw_in_port0, "event=0xff,umask=0x20"),
5021
+ INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale, "3.814697266e-6"),
5022
+ INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit, "MiB"),
5023
+ INTEL_UNCORE_EVENT_DESC(bw_in_port1, "event=0xff,umask=0x21"),
5024
+ INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale, "3.814697266e-6"),
5025
+ INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit, "MiB"),
5026
+ INTEL_UNCORE_EVENT_DESC(bw_in_port2, "event=0xff,umask=0x22"),
5027
+ INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale, "3.814697266e-6"),
5028
+ INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit, "MiB"),
5029
+ INTEL_UNCORE_EVENT_DESC(bw_in_port3, "event=0xff,umask=0x23"),
5030
+ INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale, "3.814697266e-6"),
5031
+ INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit, "MiB"),
5032
+ INTEL_UNCORE_EVENT_DESC(bw_in_port4, "event=0xff,umask=0x24"),
5033
+ INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale, "3.814697266e-6"),
5034
+ INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit, "MiB"),
5035
+ INTEL_UNCORE_EVENT_DESC(bw_in_port5, "event=0xff,umask=0x25"),
5036
+ INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale, "3.814697266e-6"),
5037
+ INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit, "MiB"),
5038
+ INTEL_UNCORE_EVENT_DESC(bw_in_port6, "event=0xff,umask=0x26"),
5039
+ INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale, "3.814697266e-6"),
5040
+ INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit, "MiB"),
5041
+ INTEL_UNCORE_EVENT_DESC(bw_in_port7, "event=0xff,umask=0x27"),
5042
+ INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale, "3.814697266e-6"),
5043
+ INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit, "MiB"),
5044
+ { /* end: all zeroes */ },
5045
+};
5046
+
5047
+static struct intel_uncore_type icx_uncore_iio_free_running = {
5048
+ .name = "iio_free_running",
5049
+ .num_counters = 9,
5050
+ .num_boxes = 6,
5051
+ .num_freerunning_types = ICX_IIO_FREERUNNING_TYPE_MAX,
5052
+ .freerunning = icx_iio_freerunning,
5053
+ .ops = &skx_uncore_iio_freerunning_ops,
5054
+ .event_descs = icx_uncore_iio_freerunning_events,
5055
+ .format_group = &skx_uncore_iio_freerunning_format_group,
5056
+};
5057
+
5058
+static struct intel_uncore_type *icx_msr_uncores[] = {
5059
+ &skx_uncore_ubox,
5060
+ &icx_uncore_chabox,
5061
+ &icx_uncore_iio,
5062
+ &icx_uncore_irp,
5063
+ &icx_uncore_m2pcie,
5064
+ &skx_uncore_pcu,
5065
+ &icx_uncore_iio_free_running,
5066
+ NULL,
5067
+};
5068
+
5069
+/*
5070
+ * To determine the number of CHAs, it should read CAPID6(Low) and CAPID7 (High)
5071
+ * registers which located at Device 30, Function 3
5072
+ */
5073
+#define ICX_CAPID6 0x9c
5074
+#define ICX_CAPID7 0xa0
5075
+
5076
+static u64 icx_count_chabox(void)
5077
+{
5078
+ struct pci_dev *dev = NULL;
5079
+ u64 caps = 0;
5080
+
5081
+ dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x345b, dev);
5082
+ if (!dev)
5083
+ goto out;
5084
+
5085
+ pci_read_config_dword(dev, ICX_CAPID6, (u32 *)&caps);
5086
+ pci_read_config_dword(dev, ICX_CAPID7, (u32 *)&caps + 1);
5087
+out:
5088
+ pci_dev_put(dev);
5089
+ return hweight64(caps);
5090
+}
5091
+
5092
+void icx_uncore_cpu_init(void)
5093
+{
5094
+ u64 num_boxes = icx_count_chabox();
5095
+
5096
+ if (WARN_ON(num_boxes > ARRAY_SIZE(icx_cha_msr_offsets)))
5097
+ return;
5098
+ icx_uncore_chabox.num_boxes = num_boxes;
5099
+ uncore_msr_uncores = icx_msr_uncores;
5100
+}
5101
+
5102
+static struct intel_uncore_type icx_uncore_m2m = {
5103
+ .name = "m2m",
5104
+ .num_counters = 4,
5105
+ .num_boxes = 4,
5106
+ .perf_ctr_bits = 48,
5107
+ .perf_ctr = SNR_M2M_PCI_PMON_CTR0,
5108
+ .event_ctl = SNR_M2M_PCI_PMON_CTL0,
5109
+ .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
5110
+ .event_mask_ext = SNR_M2M_PCI_PMON_UMASK_EXT,
5111
+ .box_ctl = SNR_M2M_PCI_PMON_BOX_CTL,
5112
+ .ops = &snr_m2m_uncore_pci_ops,
5113
+ .format_group = &snr_m2m_uncore_format_group,
5114
+};
5115
+
5116
+static struct attribute *icx_upi_uncore_formats_attr[] = {
5117
+ &format_attr_event.attr,
5118
+ &format_attr_umask_ext4.attr,
5119
+ &format_attr_edge.attr,
5120
+ &format_attr_inv.attr,
5121
+ &format_attr_thresh8.attr,
5122
+ NULL,
5123
+};
5124
+
5125
+static const struct attribute_group icx_upi_uncore_format_group = {
5126
+ .name = "format",
5127
+ .attrs = icx_upi_uncore_formats_attr,
5128
+};
5129
+
5130
+static struct intel_uncore_type icx_uncore_upi = {
5131
+ .name = "upi",
5132
+ .num_counters = 4,
5133
+ .num_boxes = 3,
5134
+ .perf_ctr_bits = 48,
5135
+ .perf_ctr = ICX_UPI_PCI_PMON_CTR0,
5136
+ .event_ctl = ICX_UPI_PCI_PMON_CTL0,
5137
+ .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
5138
+ .event_mask_ext = ICX_UPI_CTL_UMASK_EXT,
5139
+ .box_ctl = ICX_UPI_PCI_PMON_BOX_CTL,
5140
+ .ops = &skx_upi_uncore_pci_ops,
5141
+ .format_group = &icx_upi_uncore_format_group,
5142
+};
5143
+
5144
+static struct event_constraint icx_uncore_m3upi_constraints[] = {
5145
+ UNCORE_EVENT_CONSTRAINT(0x1c, 0x1),
5146
+ UNCORE_EVENT_CONSTRAINT(0x1d, 0x1),
5147
+ UNCORE_EVENT_CONSTRAINT(0x1e, 0x1),
5148
+ UNCORE_EVENT_CONSTRAINT(0x1f, 0x1),
5149
+ UNCORE_EVENT_CONSTRAINT(0x40, 0x7),
5150
+ UNCORE_EVENT_CONSTRAINT(0x4e, 0x7),
5151
+ UNCORE_EVENT_CONSTRAINT(0x4f, 0x7),
5152
+ UNCORE_EVENT_CONSTRAINT(0x50, 0x7),
5153
+ EVENT_CONSTRAINT_END
5154
+};
5155
+
5156
+static struct intel_uncore_type icx_uncore_m3upi = {
5157
+ .name = "m3upi",
5158
+ .num_counters = 4,
5159
+ .num_boxes = 3,
5160
+ .perf_ctr_bits = 48,
5161
+ .perf_ctr = ICX_M3UPI_PCI_PMON_CTR0,
5162
+ .event_ctl = ICX_M3UPI_PCI_PMON_CTL0,
5163
+ .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
5164
+ .box_ctl = ICX_M3UPI_PCI_PMON_BOX_CTL,
5165
+ .constraints = icx_uncore_m3upi_constraints,
5166
+ .ops = &ivbep_uncore_pci_ops,
5167
+ .format_group = &skx_uncore_format_group,
5168
+};
5169
+
5170
+enum {
5171
+ ICX_PCI_UNCORE_M2M,
5172
+ ICX_PCI_UNCORE_UPI,
5173
+ ICX_PCI_UNCORE_M3UPI,
5174
+};
5175
+
5176
+static struct intel_uncore_type *icx_pci_uncores[] = {
5177
+ [ICX_PCI_UNCORE_M2M] = &icx_uncore_m2m,
5178
+ [ICX_PCI_UNCORE_UPI] = &icx_uncore_upi,
5179
+ [ICX_PCI_UNCORE_M3UPI] = &icx_uncore_m3upi,
5180
+ NULL,
5181
+};
5182
+
5183
+static const struct pci_device_id icx_uncore_pci_ids[] = {
5184
+ { /* M2M 0 */
5185
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5186
+ .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 0, ICX_PCI_UNCORE_M2M, 0),
5187
+ },
5188
+ { /* M2M 1 */
5189
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5190
+ .driver_data = UNCORE_PCI_DEV_FULL_DATA(13, 0, ICX_PCI_UNCORE_M2M, 1),
5191
+ },
5192
+ { /* M2M 2 */
5193
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5194
+ .driver_data = UNCORE_PCI_DEV_FULL_DATA(14, 0, ICX_PCI_UNCORE_M2M, 2),
5195
+ },
5196
+ { /* M2M 3 */
5197
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5198
+ .driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, ICX_PCI_UNCORE_M2M, 3),
5199
+ },
5200
+ { /* UPI Link 0 */
5201
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3441),
5202
+ .driver_data = UNCORE_PCI_DEV_FULL_DATA(2, 1, ICX_PCI_UNCORE_UPI, 0),
5203
+ },
5204
+ { /* UPI Link 1 */
5205
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3441),
5206
+ .driver_data = UNCORE_PCI_DEV_FULL_DATA(3, 1, ICX_PCI_UNCORE_UPI, 1),
5207
+ },
5208
+ { /* UPI Link 2 */
5209
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3441),
5210
+ .driver_data = UNCORE_PCI_DEV_FULL_DATA(4, 1, ICX_PCI_UNCORE_UPI, 2),
5211
+ },
5212
+ { /* M3UPI Link 0 */
5213
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3446),
5214
+ .driver_data = UNCORE_PCI_DEV_FULL_DATA(5, 1, ICX_PCI_UNCORE_M3UPI, 0),
5215
+ },
5216
+ { /* M3UPI Link 1 */
5217
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3446),
5218
+ .driver_data = UNCORE_PCI_DEV_FULL_DATA(6, 1, ICX_PCI_UNCORE_M3UPI, 1),
5219
+ },
5220
+ { /* M3UPI Link 2 */
5221
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3446),
5222
+ .driver_data = UNCORE_PCI_DEV_FULL_DATA(7, 1, ICX_PCI_UNCORE_M3UPI, 2),
5223
+ },
5224
+ { /* end: all zeroes */ }
5225
+};
5226
+
5227
+static struct pci_driver icx_uncore_pci_driver = {
5228
+ .name = "icx_uncore",
5229
+ .id_table = icx_uncore_pci_ids,
5230
+};
5231
+
5232
+int icx_uncore_pci_init(void)
5233
+{
5234
+ /* ICX UBOX DID */
5235
+ int ret = snbep_pci2phy_map_init(0x3450, SKX_CPUNODEID,
5236
+ SKX_GIDNIDMAP, true);
5237
+
5238
+ if (ret)
5239
+ return ret;
5240
+
5241
+ uncore_pci_uncores = icx_pci_uncores;
5242
+ uncore_pci_driver = &icx_uncore_pci_driver;
5243
+ return 0;
5244
+}
5245
+
5246
+static void icx_uncore_imc_init_box(struct intel_uncore_box *box)
5247
+{
5248
+ unsigned int box_ctl = box->pmu->type->box_ctl +
5249
+ box->pmu->type->mmio_offset * (box->pmu->pmu_idx % ICX_NUMBER_IMC_CHN);
5250
+ int mem_offset = (box->pmu->pmu_idx / ICX_NUMBER_IMC_CHN) * ICX_IMC_MEM_STRIDE +
5251
+ SNR_IMC_MMIO_MEM0_OFFSET;
5252
+
5253
+ __snr_uncore_mmio_init_box(box, box_ctl, mem_offset);
5254
+}
5255
+
5256
+static struct intel_uncore_ops icx_uncore_mmio_ops = {
5257
+ .init_box = icx_uncore_imc_init_box,
5258
+ .exit_box = uncore_mmio_exit_box,
5259
+ .disable_box = snr_uncore_mmio_disable_box,
5260
+ .enable_box = snr_uncore_mmio_enable_box,
5261
+ .disable_event = snr_uncore_mmio_disable_event,
5262
+ .enable_event = snr_uncore_mmio_enable_event,
5263
+ .read_counter = uncore_mmio_read_counter,
5264
+};
5265
+
5266
+static struct intel_uncore_type icx_uncore_imc = {
5267
+ .name = "imc",
5268
+ .num_counters = 4,
5269
+ .num_boxes = 12,
5270
+ .perf_ctr_bits = 48,
5271
+ .fixed_ctr_bits = 48,
5272
+ .fixed_ctr = SNR_IMC_MMIO_PMON_FIXED_CTR,
5273
+ .fixed_ctl = SNR_IMC_MMIO_PMON_FIXED_CTL,
5274
+ .event_descs = snr_uncore_imc_events,
5275
+ .perf_ctr = SNR_IMC_MMIO_PMON_CTR0,
5276
+ .event_ctl = SNR_IMC_MMIO_PMON_CTL0,
5277
+ .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
5278
+ .box_ctl = SNR_IMC_MMIO_PMON_BOX_CTL,
5279
+ .mmio_offset = SNR_IMC_MMIO_OFFSET,
5280
+ .mmio_map_size = SNR_IMC_MMIO_SIZE,
5281
+ .ops = &icx_uncore_mmio_ops,
5282
+ .format_group = &skx_uncore_format_group,
5283
+};
5284
+
5285
+enum perf_uncore_icx_imc_freerunning_type_id {
5286
+ ICX_IMC_DCLK,
5287
+ ICX_IMC_DDR,
5288
+ ICX_IMC_DDRT,
5289
+
5290
+ ICX_IMC_FREERUNNING_TYPE_MAX,
5291
+};
5292
+
5293
+static struct freerunning_counters icx_imc_freerunning[] = {
5294
+ [ICX_IMC_DCLK] = { 0x22b0, 0x0, 0, 1, 48 },
5295
+ [ICX_IMC_DDR] = { 0x2290, 0x8, 0, 2, 48 },
5296
+ [ICX_IMC_DDRT] = { 0x22a0, 0x8, 0, 2, 48 },
5297
+};
5298
+
5299
+static struct uncore_event_desc icx_uncore_imc_freerunning_events[] = {
5300
+ INTEL_UNCORE_EVENT_DESC(dclk, "event=0xff,umask=0x10"),
5301
+
5302
+ INTEL_UNCORE_EVENT_DESC(read, "event=0xff,umask=0x20"),
5303
+ INTEL_UNCORE_EVENT_DESC(read.scale, "6.103515625e-5"),
5304
+ INTEL_UNCORE_EVENT_DESC(read.unit, "MiB"),
5305
+ INTEL_UNCORE_EVENT_DESC(write, "event=0xff,umask=0x21"),
5306
+ INTEL_UNCORE_EVENT_DESC(write.scale, "6.103515625e-5"),
5307
+ INTEL_UNCORE_EVENT_DESC(write.unit, "MiB"),
5308
+
5309
+ INTEL_UNCORE_EVENT_DESC(ddrt_read, "event=0xff,umask=0x30"),
5310
+ INTEL_UNCORE_EVENT_DESC(ddrt_read.scale, "6.103515625e-5"),
5311
+ INTEL_UNCORE_EVENT_DESC(ddrt_read.unit, "MiB"),
5312
+ INTEL_UNCORE_EVENT_DESC(ddrt_write, "event=0xff,umask=0x31"),
5313
+ INTEL_UNCORE_EVENT_DESC(ddrt_write.scale, "6.103515625e-5"),
5314
+ INTEL_UNCORE_EVENT_DESC(ddrt_write.unit, "MiB"),
5315
+ { /* end: all zeroes */ },
5316
+};
5317
+
5318
+static void icx_uncore_imc_freerunning_init_box(struct intel_uncore_box *box)
5319
+{
5320
+ int mem_offset = box->pmu->pmu_idx * ICX_IMC_MEM_STRIDE +
5321
+ SNR_IMC_MMIO_MEM0_OFFSET;
5322
+
5323
+ __snr_uncore_mmio_init_box(box, uncore_mmio_box_ctl(box), mem_offset);
5324
+}
5325
+
5326
+static struct intel_uncore_ops icx_uncore_imc_freerunning_ops = {
5327
+ .init_box = icx_uncore_imc_freerunning_init_box,
5328
+ .exit_box = uncore_mmio_exit_box,
5329
+ .read_counter = uncore_mmio_read_counter,
5330
+ .hw_config = uncore_freerunning_hw_config,
5331
+};
5332
+
5333
+static struct intel_uncore_type icx_uncore_imc_free_running = {
5334
+ .name = "imc_free_running",
5335
+ .num_counters = 5,
5336
+ .num_boxes = 4,
5337
+ .num_freerunning_types = ICX_IMC_FREERUNNING_TYPE_MAX,
5338
+ .mmio_map_size = SNR_IMC_MMIO_SIZE,
5339
+ .freerunning = icx_imc_freerunning,
5340
+ .ops = &icx_uncore_imc_freerunning_ops,
5341
+ .event_descs = icx_uncore_imc_freerunning_events,
5342
+ .format_group = &skx_uncore_iio_freerunning_format_group,
5343
+};
5344
+
5345
+static struct intel_uncore_type *icx_mmio_uncores[] = {
5346
+ &icx_uncore_imc,
5347
+ &icx_uncore_imc_free_running,
5348
+ NULL,
5349
+};
5350
+
5351
+void icx_uncore_mmio_init(void)
5352
+{
5353
+ uncore_mmio_uncores = icx_mmio_uncores;
5354
+}
5355
+
5356
+/* end of ICX uncore support */