forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-11 6778948f9de86c3cfaf36725a7c87dcff9ba247f
kernel/arch/x86/events/intel/uncore_snbep.c
....@@ -273,6 +273,30 @@
273273 #define SKX_CPUNODEID 0xc0
274274 #define SKX_GIDNIDMAP 0xd4
275275
276
+/*
277
+ * The CPU_BUS_NUMBER MSR returns the values of the respective CPUBUSNO CSR
278
+ * that BIOS programmed. MSR has package scope.
279
+ * | Bit | Default | Description
280
+ * | [63] | 00h | VALID - When set, indicates the CPU bus
281
+ * numbers have been initialized. (RO)
282
+ * |[62:48]| --- | Reserved
283
+ * |[47:40]| 00h | BUS_NUM_5 — Return the bus number BIOS assigned
284
+ * CPUBUSNO(5). (RO)
285
+ * |[39:32]| 00h | BUS_NUM_4 — Return the bus number BIOS assigned
286
+ * CPUBUSNO(4). (RO)
287
+ * |[31:24]| 00h | BUS_NUM_3 — Return the bus number BIOS assigned
288
+ * CPUBUSNO(3). (RO)
289
+ * |[23:16]| 00h | BUS_NUM_2 — Return the bus number BIOS assigned
290
+ * CPUBUSNO(2). (RO)
291
+ * |[15:8] | 00h | BUS_NUM_1 — Return the bus number BIOS assigned
292
+ * CPUBUSNO(1). (RO)
293
+ * | [7:0] | 00h | BUS_NUM_0 — Return the bus number BIOS assigned
294
+ * CPUBUSNO(0). (RO)
295
+ */
296
+#define SKX_MSR_CPU_BUS_NUMBER 0x300
297
+#define SKX_MSR_CPU_BUS_VALID_BIT (1ULL << 63)
298
+#define BUS_NUM_STRIDE 8
299
+
276300 /* SKX CHA */
277301 #define SKX_CHA_MSR_PMON_BOX_FILTER_TID (0x1ffULL << 0)
278302 #define SKX_CHA_MSR_PMON_BOX_FILTER_LINK (0xfULL << 9)
....@@ -324,12 +348,114 @@
324348 #define SKX_M2M_PCI_PMON_CTR0 0x200
325349 #define SKX_M2M_PCI_PMON_BOX_CTL 0x258
326350
351
+/* SNR Ubox */
352
+#define SNR_U_MSR_PMON_CTR0 0x1f98
353
+#define SNR_U_MSR_PMON_CTL0 0x1f91
354
+#define SNR_U_MSR_PMON_UCLK_FIXED_CTL 0x1f93
355
+#define SNR_U_MSR_PMON_UCLK_FIXED_CTR 0x1f94
356
+
357
+/* SNR CHA */
358
+#define SNR_CHA_RAW_EVENT_MASK_EXT 0x3ffffff
359
+#define SNR_CHA_MSR_PMON_CTL0 0x1c01
360
+#define SNR_CHA_MSR_PMON_CTR0 0x1c08
361
+#define SNR_CHA_MSR_PMON_BOX_CTL 0x1c00
362
+#define SNR_C0_MSR_PMON_BOX_FILTER0 0x1c05
363
+
364
+
365
+/* SNR IIO */
366
+#define SNR_IIO_MSR_PMON_CTL0 0x1e08
367
+#define SNR_IIO_MSR_PMON_CTR0 0x1e01
368
+#define SNR_IIO_MSR_PMON_BOX_CTL 0x1e00
369
+#define SNR_IIO_MSR_OFFSET 0x10
370
+#define SNR_IIO_PMON_RAW_EVENT_MASK_EXT 0x7ffff
371
+
372
+/* SNR IRP */
373
+#define SNR_IRP0_MSR_PMON_CTL0 0x1ea8
374
+#define SNR_IRP0_MSR_PMON_CTR0 0x1ea1
375
+#define SNR_IRP0_MSR_PMON_BOX_CTL 0x1ea0
376
+#define SNR_IRP_MSR_OFFSET 0x10
377
+
378
+/* SNR M2PCIE */
379
+#define SNR_M2PCIE_MSR_PMON_CTL0 0x1e58
380
+#define SNR_M2PCIE_MSR_PMON_CTR0 0x1e51
381
+#define SNR_M2PCIE_MSR_PMON_BOX_CTL 0x1e50
382
+#define SNR_M2PCIE_MSR_OFFSET 0x10
383
+
384
+/* SNR PCU */
385
+#define SNR_PCU_MSR_PMON_CTL0 0x1ef1
386
+#define SNR_PCU_MSR_PMON_CTR0 0x1ef8
387
+#define SNR_PCU_MSR_PMON_BOX_CTL 0x1ef0
388
+#define SNR_PCU_MSR_PMON_BOX_FILTER 0x1efc
389
+
390
+/* SNR M2M */
391
+#define SNR_M2M_PCI_PMON_CTL0 0x468
392
+#define SNR_M2M_PCI_PMON_CTR0 0x440
393
+#define SNR_M2M_PCI_PMON_BOX_CTL 0x438
394
+#define SNR_M2M_PCI_PMON_UMASK_EXT 0xff
395
+
396
+/* SNR PCIE3 */
397
+#define SNR_PCIE3_PCI_PMON_CTL0 0x508
398
+#define SNR_PCIE3_PCI_PMON_CTR0 0x4e8
399
+#define SNR_PCIE3_PCI_PMON_BOX_CTL 0x4e0
400
+
401
+/* SNR IMC */
402
+#define SNR_IMC_MMIO_PMON_FIXED_CTL 0x54
403
+#define SNR_IMC_MMIO_PMON_FIXED_CTR 0x38
404
+#define SNR_IMC_MMIO_PMON_CTL0 0x40
405
+#define SNR_IMC_MMIO_PMON_CTR0 0x8
406
+#define SNR_IMC_MMIO_PMON_BOX_CTL 0x22800
407
+#define SNR_IMC_MMIO_OFFSET 0x4000
408
+#define SNR_IMC_MMIO_SIZE 0x4000
409
+#define SNR_IMC_MMIO_BASE_OFFSET 0xd0
410
+#define SNR_IMC_MMIO_BASE_MASK 0x1FFFFFFF
411
+#define SNR_IMC_MMIO_MEM0_OFFSET 0xd8
412
+#define SNR_IMC_MMIO_MEM0_MASK 0x7FF
413
+
414
+/* ICX CHA */
415
+#define ICX_C34_MSR_PMON_CTR0 0xb68
416
+#define ICX_C34_MSR_PMON_CTL0 0xb61
417
+#define ICX_C34_MSR_PMON_BOX_CTL 0xb60
418
+#define ICX_C34_MSR_PMON_BOX_FILTER0 0xb65
419
+
420
+/* ICX IIO */
421
+#define ICX_IIO_MSR_PMON_CTL0 0xa58
422
+#define ICX_IIO_MSR_PMON_CTR0 0xa51
423
+#define ICX_IIO_MSR_PMON_BOX_CTL 0xa50
424
+
425
+/* ICX IRP */
426
+#define ICX_IRP0_MSR_PMON_CTL0 0xa4d
427
+#define ICX_IRP0_MSR_PMON_CTR0 0xa4b
428
+#define ICX_IRP0_MSR_PMON_BOX_CTL 0xa4a
429
+
430
+/* ICX M2PCIE */
431
+#define ICX_M2PCIE_MSR_PMON_CTL0 0xa46
432
+#define ICX_M2PCIE_MSR_PMON_CTR0 0xa41
433
+#define ICX_M2PCIE_MSR_PMON_BOX_CTL 0xa40
434
+
435
+/* ICX UPI */
436
+#define ICX_UPI_PCI_PMON_CTL0 0x350
437
+#define ICX_UPI_PCI_PMON_CTR0 0x320
438
+#define ICX_UPI_PCI_PMON_BOX_CTL 0x318
439
+#define ICX_UPI_CTL_UMASK_EXT 0xffffff
440
+
441
+/* ICX M3UPI*/
442
+#define ICX_M3UPI_PCI_PMON_CTL0 0xd8
443
+#define ICX_M3UPI_PCI_PMON_CTR0 0xa8
444
+#define ICX_M3UPI_PCI_PMON_BOX_CTL 0xa0
445
+
446
+/* ICX IMC */
447
+#define ICX_NUMBER_IMC_CHN 3
448
+#define ICX_IMC_MEM_STRIDE 0x4
449
+
327450 DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
328451 DEFINE_UNCORE_FORMAT_ATTR(event2, event, "config:0-6");
329452 DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21");
330453 DEFINE_UNCORE_FORMAT_ATTR(use_occ_ctr, use_occ_ctr, "config:7");
331454 DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
332455 DEFINE_UNCORE_FORMAT_ATTR(umask_ext, umask, "config:8-15,32-43,45-55");
456
+DEFINE_UNCORE_FORMAT_ATTR(umask_ext2, umask, "config:8-15,32-57");
457
+DEFINE_UNCORE_FORMAT_ATTR(umask_ext3, umask, "config:8-15,32-39");
458
+DEFINE_UNCORE_FORMAT_ATTR(umask_ext4, umask, "config:8-15,32-55");
333459 DEFINE_UNCORE_FORMAT_ATTR(qor, qor, "config:16");
334460 DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
335461 DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19");
....@@ -343,11 +469,14 @@
343469 DEFINE_UNCORE_FORMAT_ATTR(occ_edge, occ_edge, "config:14-51");
344470 DEFINE_UNCORE_FORMAT_ATTR(occ_edge_det, occ_edge_det, "config:31");
345471 DEFINE_UNCORE_FORMAT_ATTR(ch_mask, ch_mask, "config:36-43");
472
+DEFINE_UNCORE_FORMAT_ATTR(ch_mask2, ch_mask, "config:36-47");
346473 DEFINE_UNCORE_FORMAT_ATTR(fc_mask, fc_mask, "config:44-46");
474
+DEFINE_UNCORE_FORMAT_ATTR(fc_mask2, fc_mask, "config:48-50");
347475 DEFINE_UNCORE_FORMAT_ATTR(filter_tid, filter_tid, "config1:0-4");
348476 DEFINE_UNCORE_FORMAT_ATTR(filter_tid2, filter_tid, "config1:0");
349477 DEFINE_UNCORE_FORMAT_ATTR(filter_tid3, filter_tid, "config1:0-5");
350478 DEFINE_UNCORE_FORMAT_ATTR(filter_tid4, filter_tid, "config1:0-8");
479
+DEFINE_UNCORE_FORMAT_ATTR(filter_tid5, filter_tid, "config1:0-9");
351480 DEFINE_UNCORE_FORMAT_ATTR(filter_cid, filter_cid, "config1:5");
352481 DEFINE_UNCORE_FORMAT_ATTR(filter_link, filter_link, "config1:5-8");
353482 DEFINE_UNCORE_FORMAT_ATTR(filter_link2, filter_link, "config1:6-8");
....@@ -1057,8 +1186,8 @@
10571186
10581187 if (reg1->idx != EXTRA_REG_NONE) {
10591188 int idx = box->pmu->pmu_idx + SNBEP_PCI_QPI_PORT0_FILTER;
1060
- int pkg = box->pkgid;
1061
- struct pci_dev *filter_pdev = uncore_extra_pci_dev[pkg].dev[idx];
1189
+ int die = box->dieid;
1190
+ struct pci_dev *filter_pdev = uncore_extra_pci_dev[die].dev[idx];
10621191
10631192 if (filter_pdev) {
10641193 pci_write_config_dword(filter_pdev, reg1->reg,
....@@ -3507,6 +3636,175 @@
35073636 .read_counter = uncore_msr_read_counter,
35083637 };
35093638
3639
+static inline u8 skx_iio_stack(struct intel_uncore_pmu *pmu, int die)
3640
+{
3641
+ return pmu->type->topology[die] >> (pmu->pmu_idx * BUS_NUM_STRIDE);
3642
+}
3643
+
3644
+static umode_t
3645
+skx_iio_mapping_visible(struct kobject *kobj, struct attribute *attr, int die)
3646
+{
3647
+ struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(kobj_to_dev(kobj));
3648
+
3649
+ /* Root bus 0x00 is valid only for die 0 AND pmu_idx = 0. */
3650
+ return (!skx_iio_stack(pmu, die) && pmu->pmu_idx) ? 0 : attr->mode;
3651
+}
3652
+
3653
+static ssize_t skx_iio_mapping_show(struct device *dev,
3654
+ struct device_attribute *attr, char *buf)
3655
+{
3656
+ struct pci_bus *bus = pci_find_next_bus(NULL);
3657
+ struct intel_uncore_pmu *uncore_pmu = dev_to_uncore_pmu(dev);
3658
+ struct dev_ext_attribute *ea = to_dev_ext_attribute(attr);
3659
+ long die = (long)ea->var;
3660
+
3661
+ /*
3662
+ * Current implementation is for single segment configuration hence it's
3663
+ * safe to take the segment value from the first available root bus.
3664
+ */
3665
+ return sprintf(buf, "%04x:%02x\n", pci_domain_nr(bus),
3666
+ skx_iio_stack(uncore_pmu, die));
3667
+}
3668
+
3669
+static int skx_msr_cpu_bus_read(int cpu, u64 *topology)
3670
+{
3671
+ u64 msr_value;
3672
+
3673
+ if (rdmsrl_on_cpu(cpu, SKX_MSR_CPU_BUS_NUMBER, &msr_value) ||
3674
+ !(msr_value & SKX_MSR_CPU_BUS_VALID_BIT))
3675
+ return -ENXIO;
3676
+
3677
+ *topology = msr_value;
3678
+
3679
+ return 0;
3680
+}
3681
+
3682
+static int die_to_cpu(int die)
3683
+{
3684
+ int res = 0, cpu, current_die;
3685
+ /*
3686
+ * Using cpus_read_lock() to ensure cpu is not going down between
3687
+ * looking at cpu_online_mask.
3688
+ */
3689
+ cpus_read_lock();
3690
+ for_each_online_cpu(cpu) {
3691
+ current_die = topology_logical_die_id(cpu);
3692
+ if (current_die == die) {
3693
+ res = cpu;
3694
+ break;
3695
+ }
3696
+ }
3697
+ cpus_read_unlock();
3698
+ return res;
3699
+}
3700
+
3701
+static int skx_iio_get_topology(struct intel_uncore_type *type)
3702
+{
3703
+ int i, ret;
3704
+ struct pci_bus *bus = NULL;
3705
+
3706
+ /*
3707
+ * Verified single-segment environments only; disabled for multiple
3708
+ * segment topologies for now except VMD domains.
3709
+ * VMD domains start at 0x10000 to not clash with ACPI _SEG domains.
3710
+ */
3711
+ while ((bus = pci_find_next_bus(bus))
3712
+ && (!pci_domain_nr(bus) || pci_domain_nr(bus) > 0xffff))
3713
+ ;
3714
+ if (bus)
3715
+ return -EPERM;
3716
+
3717
+ type->topology = kcalloc(uncore_max_dies(), sizeof(u64), GFP_KERNEL);
3718
+ if (!type->topology)
3719
+ return -ENOMEM;
3720
+
3721
+ for (i = 0; i < uncore_max_dies(); i++) {
3722
+ ret = skx_msr_cpu_bus_read(die_to_cpu(i), &type->topology[i]);
3723
+ if (ret) {
3724
+ kfree(type->topology);
3725
+ type->topology = NULL;
3726
+ return ret;
3727
+ }
3728
+ }
3729
+
3730
+ return 0;
3731
+}
3732
+
3733
+static struct attribute_group skx_iio_mapping_group = {
3734
+ .is_visible = skx_iio_mapping_visible,
3735
+};
3736
+
3737
+static const struct attribute_group *skx_iio_attr_update[] = {
3738
+ &skx_iio_mapping_group,
3739
+ NULL,
3740
+};
3741
+
3742
+static int skx_iio_set_mapping(struct intel_uncore_type *type)
3743
+{
3744
+ char buf[64];
3745
+ int ret;
3746
+ long die = -1;
3747
+ struct attribute **attrs = NULL;
3748
+ struct dev_ext_attribute *eas = NULL;
3749
+
3750
+ ret = skx_iio_get_topology(type);
3751
+ if (ret)
3752
+ goto clear_attr_update;
3753
+
3754
+ ret = -ENOMEM;
3755
+
3756
+ /* One more for NULL. */
3757
+ attrs = kcalloc((uncore_max_dies() + 1), sizeof(*attrs), GFP_KERNEL);
3758
+ if (!attrs)
3759
+ goto clear_topology;
3760
+
3761
+ eas = kcalloc(uncore_max_dies(), sizeof(*eas), GFP_KERNEL);
3762
+ if (!eas)
3763
+ goto clear_attrs;
3764
+
3765
+ for (die = 0; die < uncore_max_dies(); die++) {
3766
+ sprintf(buf, "die%ld", die);
3767
+ sysfs_attr_init(&eas[die].attr.attr);
3768
+ eas[die].attr.attr.name = kstrdup(buf, GFP_KERNEL);
3769
+ if (!eas[die].attr.attr.name)
3770
+ goto err;
3771
+ eas[die].attr.attr.mode = 0444;
3772
+ eas[die].attr.show = skx_iio_mapping_show;
3773
+ eas[die].attr.store = NULL;
3774
+ eas[die].var = (void *)die;
3775
+ attrs[die] = &eas[die].attr.attr;
3776
+ }
3777
+ skx_iio_mapping_group.attrs = attrs;
3778
+
3779
+ return 0;
3780
+err:
3781
+ for (; die >= 0; die--)
3782
+ kfree(eas[die].attr.attr.name);
3783
+ kfree(eas);
3784
+clear_attrs:
3785
+ kfree(attrs);
3786
+clear_topology:
3787
+ kfree(type->topology);
3788
+clear_attr_update:
3789
+ type->attr_update = NULL;
3790
+ return ret;
3791
+}
3792
+
3793
+static void skx_iio_cleanup_mapping(struct intel_uncore_type *type)
3794
+{
3795
+ struct attribute **attr = skx_iio_mapping_group.attrs;
3796
+
3797
+ if (!attr)
3798
+ return;
3799
+
3800
+ for (; *attr; attr++)
3801
+ kfree((*attr)->name);
3802
+ kfree(attr_to_ext_attr(*skx_iio_mapping_group.attrs));
3803
+ kfree(skx_iio_mapping_group.attrs);
3804
+ skx_iio_mapping_group.attrs = NULL;
3805
+ kfree(type->topology);
3806
+}
3807
+
35103808 static struct intel_uncore_type skx_uncore_iio = {
35113809 .name = "iio",
35123810 .num_counters = 4,
....@@ -3521,6 +3819,9 @@
35213819 .constraints = skx_uncore_iio_constraints,
35223820 .ops = &skx_uncore_iio_ops,
35233821 .format_group = &skx_uncore_iio_format_group,
3822
+ .attr_update = skx_iio_attr_update,
3823
+ .set_mapping = skx_iio_set_mapping,
3824
+ .cleanup_mapping = skx_iio_cleanup_mapping,
35243825 };
35253826
35263827 enum perf_uncore_iio_freerunning_type_id {
....@@ -3963,3 +4264,1061 @@
39634264 }
39644265
39654266 /* end of SKX uncore support */
4267
+
4268
+/* SNR uncore support */
4269
+
4270
+static struct intel_uncore_type snr_uncore_ubox = {
4271
+ .name = "ubox",
4272
+ .num_counters = 2,
4273
+ .num_boxes = 1,
4274
+ .perf_ctr_bits = 48,
4275
+ .fixed_ctr_bits = 48,
4276
+ .perf_ctr = SNR_U_MSR_PMON_CTR0,
4277
+ .event_ctl = SNR_U_MSR_PMON_CTL0,
4278
+ .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4279
+ .fixed_ctr = SNR_U_MSR_PMON_UCLK_FIXED_CTR,
4280
+ .fixed_ctl = SNR_U_MSR_PMON_UCLK_FIXED_CTL,
4281
+ .ops = &ivbep_uncore_msr_ops,
4282
+ .format_group = &ivbep_uncore_format_group,
4283
+};
4284
+
4285
+static struct attribute *snr_uncore_cha_formats_attr[] = {
4286
+ &format_attr_event.attr,
4287
+ &format_attr_umask_ext2.attr,
4288
+ &format_attr_edge.attr,
4289
+ &format_attr_tid_en.attr,
4290
+ &format_attr_inv.attr,
4291
+ &format_attr_thresh8.attr,
4292
+ &format_attr_filter_tid5.attr,
4293
+ NULL,
4294
+};
4295
+static const struct attribute_group snr_uncore_chabox_format_group = {
4296
+ .name = "format",
4297
+ .attrs = snr_uncore_cha_formats_attr,
4298
+};
4299
+
4300
+static int snr_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
4301
+{
4302
+ struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
4303
+
4304
+ reg1->reg = SNR_C0_MSR_PMON_BOX_FILTER0 +
4305
+ box->pmu->type->msr_offset * box->pmu->pmu_idx;
4306
+ reg1->config = event->attr.config1 & SKX_CHA_MSR_PMON_BOX_FILTER_TID;
4307
+ reg1->idx = 0;
4308
+
4309
+ return 0;
4310
+}
4311
+
4312
+static void snr_cha_enable_event(struct intel_uncore_box *box,
4313
+ struct perf_event *event)
4314
+{
4315
+ struct hw_perf_event *hwc = &event->hw;
4316
+ struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
4317
+
4318
+ if (reg1->idx != EXTRA_REG_NONE)
4319
+ wrmsrl(reg1->reg, reg1->config);
4320
+
4321
+ wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
4322
+}
4323
+
4324
+static struct intel_uncore_ops snr_uncore_chabox_ops = {
4325
+ .init_box = ivbep_uncore_msr_init_box,
4326
+ .disable_box = snbep_uncore_msr_disable_box,
4327
+ .enable_box = snbep_uncore_msr_enable_box,
4328
+ .disable_event = snbep_uncore_msr_disable_event,
4329
+ .enable_event = snr_cha_enable_event,
4330
+ .read_counter = uncore_msr_read_counter,
4331
+ .hw_config = snr_cha_hw_config,
4332
+};
4333
+
4334
+static struct intel_uncore_type snr_uncore_chabox = {
4335
+ .name = "cha",
4336
+ .num_counters = 4,
4337
+ .num_boxes = 6,
4338
+ .perf_ctr_bits = 48,
4339
+ .event_ctl = SNR_CHA_MSR_PMON_CTL0,
4340
+ .perf_ctr = SNR_CHA_MSR_PMON_CTR0,
4341
+ .box_ctl = SNR_CHA_MSR_PMON_BOX_CTL,
4342
+ .msr_offset = HSWEP_CBO_MSR_OFFSET,
4343
+ .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
4344
+ .event_mask_ext = SNR_CHA_RAW_EVENT_MASK_EXT,
4345
+ .ops = &snr_uncore_chabox_ops,
4346
+ .format_group = &snr_uncore_chabox_format_group,
4347
+};
4348
+
4349
+static struct attribute *snr_uncore_iio_formats_attr[] = {
4350
+ &format_attr_event.attr,
4351
+ &format_attr_umask.attr,
4352
+ &format_attr_edge.attr,
4353
+ &format_attr_inv.attr,
4354
+ &format_attr_thresh9.attr,
4355
+ &format_attr_ch_mask2.attr,
4356
+ &format_attr_fc_mask2.attr,
4357
+ NULL,
4358
+};
4359
+
4360
+static const struct attribute_group snr_uncore_iio_format_group = {
4361
+ .name = "format",
4362
+ .attrs = snr_uncore_iio_formats_attr,
4363
+};
4364
+
4365
+static struct intel_uncore_type snr_uncore_iio = {
4366
+ .name = "iio",
4367
+ .num_counters = 4,
4368
+ .num_boxes = 5,
4369
+ .perf_ctr_bits = 48,
4370
+ .event_ctl = SNR_IIO_MSR_PMON_CTL0,
4371
+ .perf_ctr = SNR_IIO_MSR_PMON_CTR0,
4372
+ .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4373
+ .event_mask_ext = SNR_IIO_PMON_RAW_EVENT_MASK_EXT,
4374
+ .box_ctl = SNR_IIO_MSR_PMON_BOX_CTL,
4375
+ .msr_offset = SNR_IIO_MSR_OFFSET,
4376
+ .ops = &ivbep_uncore_msr_ops,
4377
+ .format_group = &snr_uncore_iio_format_group,
4378
+};
4379
+
4380
+static struct intel_uncore_type snr_uncore_irp = {
4381
+ .name = "irp",
4382
+ .num_counters = 2,
4383
+ .num_boxes = 5,
4384
+ .perf_ctr_bits = 48,
4385
+ .event_ctl = SNR_IRP0_MSR_PMON_CTL0,
4386
+ .perf_ctr = SNR_IRP0_MSR_PMON_CTR0,
4387
+ .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4388
+ .box_ctl = SNR_IRP0_MSR_PMON_BOX_CTL,
4389
+ .msr_offset = SNR_IRP_MSR_OFFSET,
4390
+ .ops = &ivbep_uncore_msr_ops,
4391
+ .format_group = &ivbep_uncore_format_group,
4392
+};
4393
+
4394
+static struct intel_uncore_type snr_uncore_m2pcie = {
4395
+ .name = "m2pcie",
4396
+ .num_counters = 4,
4397
+ .num_boxes = 5,
4398
+ .perf_ctr_bits = 48,
4399
+ .event_ctl = SNR_M2PCIE_MSR_PMON_CTL0,
4400
+ .perf_ctr = SNR_M2PCIE_MSR_PMON_CTR0,
4401
+ .box_ctl = SNR_M2PCIE_MSR_PMON_BOX_CTL,
4402
+ .msr_offset = SNR_M2PCIE_MSR_OFFSET,
4403
+ .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4404
+ .ops = &ivbep_uncore_msr_ops,
4405
+ .format_group = &ivbep_uncore_format_group,
4406
+};
4407
+
4408
+static int snr_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
4409
+{
4410
+ struct hw_perf_event *hwc = &event->hw;
4411
+ struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
4412
+ int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
4413
+
4414
+ if (ev_sel >= 0xb && ev_sel <= 0xe) {
4415
+ reg1->reg = SNR_PCU_MSR_PMON_BOX_FILTER;
4416
+ reg1->idx = ev_sel - 0xb;
4417
+ reg1->config = event->attr.config1 & (0xff << reg1->idx);
4418
+ }
4419
+ return 0;
4420
+}
4421
+
4422
+static struct intel_uncore_ops snr_uncore_pcu_ops = {
4423
+ IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
4424
+ .hw_config = snr_pcu_hw_config,
4425
+ .get_constraint = snbep_pcu_get_constraint,
4426
+ .put_constraint = snbep_pcu_put_constraint,
4427
+};
4428
+
4429
+static struct intel_uncore_type snr_uncore_pcu = {
4430
+ .name = "pcu",
4431
+ .num_counters = 4,
4432
+ .num_boxes = 1,
4433
+ .perf_ctr_bits = 48,
4434
+ .perf_ctr = SNR_PCU_MSR_PMON_CTR0,
4435
+ .event_ctl = SNR_PCU_MSR_PMON_CTL0,
4436
+ .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4437
+ .box_ctl = SNR_PCU_MSR_PMON_BOX_CTL,
4438
+ .num_shared_regs = 1,
4439
+ .ops = &snr_uncore_pcu_ops,
4440
+ .format_group = &skx_uncore_pcu_format_group,
4441
+};
4442
+
4443
+enum perf_uncore_snr_iio_freerunning_type_id {
4444
+ SNR_IIO_MSR_IOCLK,
4445
+ SNR_IIO_MSR_BW_IN,
4446
+
4447
+ SNR_IIO_FREERUNNING_TYPE_MAX,
4448
+};
4449
+
4450
+static struct freerunning_counters snr_iio_freerunning[] = {
4451
+ [SNR_IIO_MSR_IOCLK] = { 0x1eac, 0x1, 0x10, 1, 48 },
4452
+ [SNR_IIO_MSR_BW_IN] = { 0x1f00, 0x1, 0x10, 8, 48 },
4453
+};
4454
+
4455
+static struct uncore_event_desc snr_uncore_iio_freerunning_events[] = {
4456
+ /* Free-Running IIO CLOCKS Counter */
4457
+ INTEL_UNCORE_EVENT_DESC(ioclk, "event=0xff,umask=0x10"),
4458
+ /* Free-Running IIO BANDWIDTH IN Counters */
4459
+ INTEL_UNCORE_EVENT_DESC(bw_in_port0, "event=0xff,umask=0x20"),
4460
+ INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale, "3.814697266e-6"),
4461
+ INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit, "MiB"),
4462
+ INTEL_UNCORE_EVENT_DESC(bw_in_port1, "event=0xff,umask=0x21"),
4463
+ INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale, "3.814697266e-6"),
4464
+ INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit, "MiB"),
4465
+ INTEL_UNCORE_EVENT_DESC(bw_in_port2, "event=0xff,umask=0x22"),
4466
+ INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale, "3.814697266e-6"),
4467
+ INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit, "MiB"),
4468
+ INTEL_UNCORE_EVENT_DESC(bw_in_port3, "event=0xff,umask=0x23"),
4469
+ INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale, "3.814697266e-6"),
4470
+ INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit, "MiB"),
4471
+ INTEL_UNCORE_EVENT_DESC(bw_in_port4, "event=0xff,umask=0x24"),
4472
+ INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale, "3.814697266e-6"),
4473
+ INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit, "MiB"),
4474
+ INTEL_UNCORE_EVENT_DESC(bw_in_port5, "event=0xff,umask=0x25"),
4475
+ INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale, "3.814697266e-6"),
4476
+ INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit, "MiB"),
4477
+ INTEL_UNCORE_EVENT_DESC(bw_in_port6, "event=0xff,umask=0x26"),
4478
+ INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale, "3.814697266e-6"),
4479
+ INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit, "MiB"),
4480
+ INTEL_UNCORE_EVENT_DESC(bw_in_port7, "event=0xff,umask=0x27"),
4481
+ INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale, "3.814697266e-6"),
4482
+ INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit, "MiB"),
4483
+ { /* end: all zeroes */ },
4484
+};
4485
+
4486
+static struct intel_uncore_type snr_uncore_iio_free_running = {
4487
+ .name = "iio_free_running",
4488
+ .num_counters = 9,
4489
+ .num_boxes = 5,
4490
+ .num_freerunning_types = SNR_IIO_FREERUNNING_TYPE_MAX,
4491
+ .freerunning = snr_iio_freerunning,
4492
+ .ops = &skx_uncore_iio_freerunning_ops,
4493
+ .event_descs = snr_uncore_iio_freerunning_events,
4494
+ .format_group = &skx_uncore_iio_freerunning_format_group,
4495
+};
4496
+
4497
+static struct intel_uncore_type *snr_msr_uncores[] = {
4498
+ &snr_uncore_ubox,
4499
+ &snr_uncore_chabox,
4500
+ &snr_uncore_iio,
4501
+ &snr_uncore_irp,
4502
+ &snr_uncore_m2pcie,
4503
+ &snr_uncore_pcu,
4504
+ &snr_uncore_iio_free_running,
4505
+ NULL,
4506
+};
4507
+
4508
+void snr_uncore_cpu_init(void)
4509
+{
4510
+ uncore_msr_uncores = snr_msr_uncores;
4511
+}
4512
+
4513
+static void snr_m2m_uncore_pci_init_box(struct intel_uncore_box *box)
4514
+{
4515
+ struct pci_dev *pdev = box->pci_dev;
4516
+ int box_ctl = uncore_pci_box_ctl(box);
4517
+
4518
+ __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
4519
+ pci_write_config_dword(pdev, box_ctl, IVBEP_PMON_BOX_CTL_INT);
4520
+}
4521
+
4522
+static struct intel_uncore_ops snr_m2m_uncore_pci_ops = {
4523
+ .init_box = snr_m2m_uncore_pci_init_box,
4524
+ .disable_box = snbep_uncore_pci_disable_box,
4525
+ .enable_box = snbep_uncore_pci_enable_box,
4526
+ .disable_event = snbep_uncore_pci_disable_event,
4527
+ .enable_event = snbep_uncore_pci_enable_event,
4528
+ .read_counter = snbep_uncore_pci_read_counter,
4529
+};
4530
+
4531
+static struct attribute *snr_m2m_uncore_formats_attr[] = {
4532
+ &format_attr_event.attr,
4533
+ &format_attr_umask_ext3.attr,
4534
+ &format_attr_edge.attr,
4535
+ &format_attr_inv.attr,
4536
+ &format_attr_thresh8.attr,
4537
+ NULL,
4538
+};
4539
+
4540
+static const struct attribute_group snr_m2m_uncore_format_group = {
4541
+ .name = "format",
4542
+ .attrs = snr_m2m_uncore_formats_attr,
4543
+};
4544
+
4545
+static struct intel_uncore_type snr_uncore_m2m = {
4546
+ .name = "m2m",
4547
+ .num_counters = 4,
4548
+ .num_boxes = 1,
4549
+ .perf_ctr_bits = 48,
4550
+ .perf_ctr = SNR_M2M_PCI_PMON_CTR0,
4551
+ .event_ctl = SNR_M2M_PCI_PMON_CTL0,
4552
+ .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4553
+ .event_mask_ext = SNR_M2M_PCI_PMON_UMASK_EXT,
4554
+ .box_ctl = SNR_M2M_PCI_PMON_BOX_CTL,
4555
+ .ops = &snr_m2m_uncore_pci_ops,
4556
+ .format_group = &snr_m2m_uncore_format_group,
4557
+};
4558
+
4559
+static void snr_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event)
4560
+{
4561
+ struct pci_dev *pdev = box->pci_dev;
4562
+ struct hw_perf_event *hwc = &event->hw;
4563
+
4564
+ pci_write_config_dword(pdev, hwc->config_base, (u32)(hwc->config | SNBEP_PMON_CTL_EN));
4565
+ pci_write_config_dword(pdev, hwc->config_base + 4, (u32)(hwc->config >> 32));
4566
+}
4567
+
4568
+static struct intel_uncore_ops snr_pcie3_uncore_pci_ops = {
4569
+ .init_box = snr_m2m_uncore_pci_init_box,
4570
+ .disable_box = snbep_uncore_pci_disable_box,
4571
+ .enable_box = snbep_uncore_pci_enable_box,
4572
+ .disable_event = snbep_uncore_pci_disable_event,
4573
+ .enable_event = snr_uncore_pci_enable_event,
4574
+ .read_counter = snbep_uncore_pci_read_counter,
4575
+};
4576
+
4577
+static struct intel_uncore_type snr_uncore_pcie3 = {
4578
+ .name = "pcie3",
4579
+ .num_counters = 4,
4580
+ .num_boxes = 1,
4581
+ .perf_ctr_bits = 48,
4582
+ .perf_ctr = SNR_PCIE3_PCI_PMON_CTR0,
4583
+ .event_ctl = SNR_PCIE3_PCI_PMON_CTL0,
4584
+ .event_mask = SKX_IIO_PMON_RAW_EVENT_MASK,
4585
+ .event_mask_ext = SKX_IIO_PMON_RAW_EVENT_MASK_EXT,
4586
+ .box_ctl = SNR_PCIE3_PCI_PMON_BOX_CTL,
4587
+ .ops = &snr_pcie3_uncore_pci_ops,
4588
+ .format_group = &skx_uncore_iio_format_group,
4589
+};
4590
+
4591
+enum {
4592
+ SNR_PCI_UNCORE_M2M,
4593
+ SNR_PCI_UNCORE_PCIE3,
4594
+};
4595
+
4596
+static struct intel_uncore_type *snr_pci_uncores[] = {
4597
+ [SNR_PCI_UNCORE_M2M] = &snr_uncore_m2m,
4598
+ [SNR_PCI_UNCORE_PCIE3] = &snr_uncore_pcie3,
4599
+ NULL,
4600
+};
4601
+
4602
+static const struct pci_device_id snr_uncore_pci_ids[] = {
4603
+ { /* M2M */
4604
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
4605
+ .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 0, SNR_PCI_UNCORE_M2M, 0),
4606
+ },
4607
+ { /* end: all zeroes */ }
4608
+};
4609
+
4610
+static struct pci_driver snr_uncore_pci_driver = {
4611
+ .name = "snr_uncore",
4612
+ .id_table = snr_uncore_pci_ids,
4613
+};
4614
+
4615
+static const struct pci_device_id snr_uncore_pci_sub_ids[] = {
4616
+ { /* PCIe3 RP */
4617
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x334a),
4618
+ .driver_data = UNCORE_PCI_DEV_FULL_DATA(4, 0, SNR_PCI_UNCORE_PCIE3, 0),
4619
+ },
4620
+ { /* end: all zeroes */ }
4621
+};
4622
+
4623
+static struct pci_driver snr_uncore_pci_sub_driver = {
4624
+ .name = "snr_uncore_sub",
4625
+ .id_table = snr_uncore_pci_sub_ids,
4626
+};
4627
+
4628
+int snr_uncore_pci_init(void)
4629
+{
4630
+ /* SNR UBOX DID */
4631
+ int ret = snbep_pci2phy_map_init(0x3460, SKX_CPUNODEID,
4632
+ SKX_GIDNIDMAP, true);
4633
+
4634
+ if (ret)
4635
+ return ret;
4636
+
4637
+ uncore_pci_uncores = snr_pci_uncores;
4638
+ uncore_pci_driver = &snr_uncore_pci_driver;
4639
+ uncore_pci_sub_driver = &snr_uncore_pci_sub_driver;
4640
+ return 0;
4641
+}
4642
+
4643
+static struct pci_dev *snr_uncore_get_mc_dev(int id)
4644
+{
4645
+ struct pci_dev *mc_dev = NULL;
4646
+ int phys_id, pkg;
4647
+
4648
+ while (1) {
4649
+ mc_dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3451, mc_dev);
4650
+ if (!mc_dev)
4651
+ break;
4652
+ phys_id = uncore_pcibus_to_physid(mc_dev->bus);
4653
+ if (phys_id < 0)
4654
+ continue;
4655
+ pkg = topology_phys_to_logical_pkg(phys_id);
4656
+ if (pkg < 0)
4657
+ continue;
4658
+ else if (pkg == id)
4659
+ break;
4660
+ }
4661
+ return mc_dev;
4662
+}
4663
+
4664
+static void __snr_uncore_mmio_init_box(struct intel_uncore_box *box,
4665
+ unsigned int box_ctl, int mem_offset)
4666
+{
4667
+ struct pci_dev *pdev = snr_uncore_get_mc_dev(box->dieid);
4668
+ struct intel_uncore_type *type = box->pmu->type;
4669
+ resource_size_t addr;
4670
+ u32 pci_dword;
4671
+
4672
+ if (!pdev)
4673
+ return;
4674
+
4675
+ pci_read_config_dword(pdev, SNR_IMC_MMIO_BASE_OFFSET, &pci_dword);
4676
+ addr = ((resource_size_t)pci_dword & SNR_IMC_MMIO_BASE_MASK) << 23;
4677
+
4678
+ pci_read_config_dword(pdev, mem_offset, &pci_dword);
4679
+ addr |= (pci_dword & SNR_IMC_MMIO_MEM0_MASK) << 12;
4680
+
4681
+ addr += box_ctl;
4682
+
4683
+ box->io_addr = ioremap(addr, type->mmio_map_size);
4684
+ if (!box->io_addr) {
4685
+ pr_warn("perf uncore: Failed to ioremap for %s.\n", type->name);
4686
+ return;
4687
+ }
4688
+
4689
+ writel(IVBEP_PMON_BOX_CTL_INT, box->io_addr);
4690
+}
4691
+
4692
+static void snr_uncore_mmio_init_box(struct intel_uncore_box *box)
4693
+{
4694
+ __snr_uncore_mmio_init_box(box, uncore_mmio_box_ctl(box),
4695
+ SNR_IMC_MMIO_MEM0_OFFSET);
4696
+}
4697
+
4698
+static void snr_uncore_mmio_disable_box(struct intel_uncore_box *box)
4699
+{
4700
+ u32 config;
4701
+
4702
+ if (!box->io_addr)
4703
+ return;
4704
+
4705
+ config = readl(box->io_addr);
4706
+ config |= SNBEP_PMON_BOX_CTL_FRZ;
4707
+ writel(config, box->io_addr);
4708
+}
4709
+
4710
+static void snr_uncore_mmio_enable_box(struct intel_uncore_box *box)
4711
+{
4712
+ u32 config;
4713
+
4714
+ if (!box->io_addr)
4715
+ return;
4716
+
4717
+ config = readl(box->io_addr);
4718
+ config &= ~SNBEP_PMON_BOX_CTL_FRZ;
4719
+ writel(config, box->io_addr);
4720
+}
4721
+
4722
+static void snr_uncore_mmio_enable_event(struct intel_uncore_box *box,
4723
+ struct perf_event *event)
4724
+{
4725
+ struct hw_perf_event *hwc = &event->hw;
4726
+
4727
+ if (!box->io_addr)
4728
+ return;
4729
+
4730
+ if (!uncore_mmio_is_valid_offset(box, hwc->config_base))
4731
+ return;
4732
+
4733
+ writel(hwc->config | SNBEP_PMON_CTL_EN,
4734
+ box->io_addr + hwc->config_base);
4735
+}
4736
+
4737
+static void snr_uncore_mmio_disable_event(struct intel_uncore_box *box,
4738
+ struct perf_event *event)
4739
+{
4740
+ struct hw_perf_event *hwc = &event->hw;
4741
+
4742
+ if (!box->io_addr)
4743
+ return;
4744
+
4745
+ if (!uncore_mmio_is_valid_offset(box, hwc->config_base))
4746
+ return;
4747
+
4748
+ writel(hwc->config, box->io_addr + hwc->config_base);
4749
+}
4750
+
4751
+static struct intel_uncore_ops snr_uncore_mmio_ops = {
4752
+ .init_box = snr_uncore_mmio_init_box,
4753
+ .exit_box = uncore_mmio_exit_box,
4754
+ .disable_box = snr_uncore_mmio_disable_box,
4755
+ .enable_box = snr_uncore_mmio_enable_box,
4756
+ .disable_event = snr_uncore_mmio_disable_event,
4757
+ .enable_event = snr_uncore_mmio_enable_event,
4758
+ .read_counter = uncore_mmio_read_counter,
4759
+};
4760
+
4761
+static struct uncore_event_desc snr_uncore_imc_events[] = {
4762
+ INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x00,umask=0x00"),
4763
+ INTEL_UNCORE_EVENT_DESC(cas_count_read, "event=0x04,umask=0x0f"),
4764
+ INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
4765
+ INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
4766
+ INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x30"),
4767
+ INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
4768
+ INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
4769
+ { /* end: all zeroes */ },
4770
+};
4771
+
4772
+static struct intel_uncore_type snr_uncore_imc = {
4773
+ .name = "imc",
4774
+ .num_counters = 4,
4775
+ .num_boxes = 2,
4776
+ .perf_ctr_bits = 48,
4777
+ .fixed_ctr_bits = 48,
4778
+ .fixed_ctr = SNR_IMC_MMIO_PMON_FIXED_CTR,
4779
+ .fixed_ctl = SNR_IMC_MMIO_PMON_FIXED_CTL,
4780
+ .event_descs = snr_uncore_imc_events,
4781
+ .perf_ctr = SNR_IMC_MMIO_PMON_CTR0,
4782
+ .event_ctl = SNR_IMC_MMIO_PMON_CTL0,
4783
+ .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4784
+ .box_ctl = SNR_IMC_MMIO_PMON_BOX_CTL,
4785
+ .mmio_offset = SNR_IMC_MMIO_OFFSET,
4786
+ .mmio_map_size = SNR_IMC_MMIO_SIZE,
4787
+ .ops = &snr_uncore_mmio_ops,
4788
+ .format_group = &skx_uncore_format_group,
4789
+};
4790
+
4791
+enum perf_uncore_snr_imc_freerunning_type_id {
4792
+ SNR_IMC_DCLK,
4793
+ SNR_IMC_DDR,
4794
+
4795
+ SNR_IMC_FREERUNNING_TYPE_MAX,
4796
+};
4797
+
4798
+static struct freerunning_counters snr_imc_freerunning[] = {
4799
+ [SNR_IMC_DCLK] = { 0x22b0, 0x0, 0, 1, 48 },
4800
+ [SNR_IMC_DDR] = { 0x2290, 0x8, 0, 2, 48 },
4801
+};
4802
+
4803
+static struct uncore_event_desc snr_uncore_imc_freerunning_events[] = {
4804
+ INTEL_UNCORE_EVENT_DESC(dclk, "event=0xff,umask=0x10"),
4805
+
4806
+ INTEL_UNCORE_EVENT_DESC(read, "event=0xff,umask=0x20"),
4807
+ INTEL_UNCORE_EVENT_DESC(read.scale, "6.103515625e-5"),
4808
+ INTEL_UNCORE_EVENT_DESC(read.unit, "MiB"),
4809
+ INTEL_UNCORE_EVENT_DESC(write, "event=0xff,umask=0x21"),
4810
+ INTEL_UNCORE_EVENT_DESC(write.scale, "6.103515625e-5"),
4811
+ INTEL_UNCORE_EVENT_DESC(write.unit, "MiB"),
4812
+ { /* end: all zeroes */ },
4813
+};
4814
+
4815
+static struct intel_uncore_ops snr_uncore_imc_freerunning_ops = {
4816
+ .init_box = snr_uncore_mmio_init_box,
4817
+ .exit_box = uncore_mmio_exit_box,
4818
+ .read_counter = uncore_mmio_read_counter,
4819
+ .hw_config = uncore_freerunning_hw_config,
4820
+};
4821
+
4822
+static struct intel_uncore_type snr_uncore_imc_free_running = {
4823
+ .name = "imc_free_running",
4824
+ .num_counters = 3,
4825
+ .num_boxes = 1,
4826
+ .num_freerunning_types = SNR_IMC_FREERUNNING_TYPE_MAX,
4827
+ .mmio_map_size = SNR_IMC_MMIO_SIZE,
4828
+ .freerunning = snr_imc_freerunning,
4829
+ .ops = &snr_uncore_imc_freerunning_ops,
4830
+ .event_descs = snr_uncore_imc_freerunning_events,
4831
+ .format_group = &skx_uncore_iio_freerunning_format_group,
4832
+};
4833
+
4834
+static struct intel_uncore_type *snr_mmio_uncores[] = {
4835
+ &snr_uncore_imc,
4836
+ &snr_uncore_imc_free_running,
4837
+ NULL,
4838
+};
4839
+
4840
+void snr_uncore_mmio_init(void)
4841
+{
4842
+ uncore_mmio_uncores = snr_mmio_uncores;
4843
+}
4844
+
4845
+/* end of SNR uncore support */
4846
+
4847
+/* ICX uncore support */
4848
+
4849
+static unsigned icx_cha_msr_offsets[] = {
4850
+ 0x2a0, 0x2ae, 0x2bc, 0x2ca, 0x2d8, 0x2e6, 0x2f4, 0x302, 0x310,
4851
+ 0x31e, 0x32c, 0x33a, 0x348, 0x356, 0x364, 0x372, 0x380, 0x38e,
4852
+ 0x3aa, 0x3b8, 0x3c6, 0x3d4, 0x3e2, 0x3f0, 0x3fe, 0x40c, 0x41a,
4853
+ 0x428, 0x436, 0x444, 0x452, 0x460, 0x46e, 0x47c, 0x0, 0xe,
4854
+ 0x1c, 0x2a, 0x38, 0x46,
4855
+};
4856
+
4857
+static int icx_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
4858
+{
4859
+ struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
4860
+ bool tie_en = !!(event->hw.config & SNBEP_CBO_PMON_CTL_TID_EN);
4861
+
4862
+ if (tie_en) {
4863
+ reg1->reg = ICX_C34_MSR_PMON_BOX_FILTER0 +
4864
+ icx_cha_msr_offsets[box->pmu->pmu_idx];
4865
+ reg1->config = event->attr.config1 & SKX_CHA_MSR_PMON_BOX_FILTER_TID;
4866
+ reg1->idx = 0;
4867
+ }
4868
+
4869
+ return 0;
4870
+}
4871
+
4872
+static struct intel_uncore_ops icx_uncore_chabox_ops = {
4873
+ .init_box = ivbep_uncore_msr_init_box,
4874
+ .disable_box = snbep_uncore_msr_disable_box,
4875
+ .enable_box = snbep_uncore_msr_enable_box,
4876
+ .disable_event = snbep_uncore_msr_disable_event,
4877
+ .enable_event = snr_cha_enable_event,
4878
+ .read_counter = uncore_msr_read_counter,
4879
+ .hw_config = icx_cha_hw_config,
4880
+};
4881
+
4882
+static struct intel_uncore_type icx_uncore_chabox = {
4883
+ .name = "cha",
4884
+ .num_counters = 4,
4885
+ .perf_ctr_bits = 48,
4886
+ .event_ctl = ICX_C34_MSR_PMON_CTL0,
4887
+ .perf_ctr = ICX_C34_MSR_PMON_CTR0,
4888
+ .box_ctl = ICX_C34_MSR_PMON_BOX_CTL,
4889
+ .msr_offsets = icx_cha_msr_offsets,
4890
+ .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
4891
+ .event_mask_ext = SNR_CHA_RAW_EVENT_MASK_EXT,
4892
+ .constraints = skx_uncore_chabox_constraints,
4893
+ .ops = &icx_uncore_chabox_ops,
4894
+ .format_group = &snr_uncore_chabox_format_group,
4895
+};
4896
+
4897
+static unsigned icx_msr_offsets[] = {
4898
+ 0x0, 0x20, 0x40, 0x90, 0xb0, 0xd0,
4899
+};
4900
+
4901
+static struct event_constraint icx_uncore_iio_constraints[] = {
4902
+ UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
4903
+ UNCORE_EVENT_CONSTRAINT(0x03, 0x3),
4904
+ UNCORE_EVENT_CONSTRAINT(0x83, 0x3),
4905
+ UNCORE_EVENT_CONSTRAINT(0x88, 0xc),
4906
+ UNCORE_EVENT_CONSTRAINT(0xc0, 0xc),
4907
+ UNCORE_EVENT_CONSTRAINT(0xc5, 0xc),
4908
+ UNCORE_EVENT_CONSTRAINT(0xd5, 0xc),
4909
+ EVENT_CONSTRAINT_END
4910
+};
4911
+
4912
+static struct intel_uncore_type icx_uncore_iio = {
4913
+ .name = "iio",
4914
+ .num_counters = 4,
4915
+ .num_boxes = 6,
4916
+ .perf_ctr_bits = 48,
4917
+ .event_ctl = ICX_IIO_MSR_PMON_CTL0,
4918
+ .perf_ctr = ICX_IIO_MSR_PMON_CTR0,
4919
+ .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4920
+ .event_mask_ext = SNR_IIO_PMON_RAW_EVENT_MASK_EXT,
4921
+ .box_ctl = ICX_IIO_MSR_PMON_BOX_CTL,
4922
+ .msr_offsets = icx_msr_offsets,
4923
+ .constraints = icx_uncore_iio_constraints,
4924
+ .ops = &skx_uncore_iio_ops,
4925
+ .format_group = &snr_uncore_iio_format_group,
4926
+};
4927
+
4928
+static struct intel_uncore_type icx_uncore_irp = {
4929
+ .name = "irp",
4930
+ .num_counters = 2,
4931
+ .num_boxes = 6,
4932
+ .perf_ctr_bits = 48,
4933
+ .event_ctl = ICX_IRP0_MSR_PMON_CTL0,
4934
+ .perf_ctr = ICX_IRP0_MSR_PMON_CTR0,
4935
+ .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4936
+ .box_ctl = ICX_IRP0_MSR_PMON_BOX_CTL,
4937
+ .msr_offsets = icx_msr_offsets,
4938
+ .ops = &ivbep_uncore_msr_ops,
4939
+ .format_group = &ivbep_uncore_format_group,
4940
+};
4941
+
4942
+static struct event_constraint icx_uncore_m2pcie_constraints[] = {
4943
+ UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
4944
+ UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
4945
+ UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
4946
+ EVENT_CONSTRAINT_END
4947
+};
4948
+
4949
+static struct intel_uncore_type icx_uncore_m2pcie = {
4950
+ .name = "m2pcie",
4951
+ .num_counters = 4,
4952
+ .num_boxes = 6,
4953
+ .perf_ctr_bits = 48,
4954
+ .event_ctl = ICX_M2PCIE_MSR_PMON_CTL0,
4955
+ .perf_ctr = ICX_M2PCIE_MSR_PMON_CTR0,
4956
+ .box_ctl = ICX_M2PCIE_MSR_PMON_BOX_CTL,
4957
+ .msr_offsets = icx_msr_offsets,
4958
+ .constraints = icx_uncore_m2pcie_constraints,
4959
+ .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4960
+ .ops = &ivbep_uncore_msr_ops,
4961
+ .format_group = &ivbep_uncore_format_group,
4962
+};
4963
+
4964
+enum perf_uncore_icx_iio_freerunning_type_id {
4965
+ ICX_IIO_MSR_IOCLK,
4966
+ ICX_IIO_MSR_BW_IN,
4967
+
4968
+ ICX_IIO_FREERUNNING_TYPE_MAX,
4969
+};
4970
+
4971
+static unsigned icx_iio_clk_freerunning_box_offsets[] = {
4972
+ 0x0, 0x20, 0x40, 0x90, 0xb0, 0xd0,
4973
+};
4974
+
4975
+static unsigned icx_iio_bw_freerunning_box_offsets[] = {
4976
+ 0x0, 0x10, 0x20, 0x90, 0xa0, 0xb0,
4977
+};
4978
+
4979
+static struct freerunning_counters icx_iio_freerunning[] = {
4980
+ [ICX_IIO_MSR_IOCLK] = { 0xa55, 0x1, 0x20, 1, 48, icx_iio_clk_freerunning_box_offsets },
4981
+ [ICX_IIO_MSR_BW_IN] = { 0xaa0, 0x1, 0x10, 8, 48, icx_iio_bw_freerunning_box_offsets },
4982
+};
4983
+
4984
+static struct uncore_event_desc icx_uncore_iio_freerunning_events[] = {
4985
+ /* Free-Running IIO CLOCKS Counter */
4986
+ INTEL_UNCORE_EVENT_DESC(ioclk, "event=0xff,umask=0x10"),
4987
+ /* Free-Running IIO BANDWIDTH IN Counters */
4988
+ INTEL_UNCORE_EVENT_DESC(bw_in_port0, "event=0xff,umask=0x20"),
4989
+ INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale, "3.814697266e-6"),
4990
+ INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit, "MiB"),
4991
+ INTEL_UNCORE_EVENT_DESC(bw_in_port1, "event=0xff,umask=0x21"),
4992
+ INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale, "3.814697266e-6"),
4993
+ INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit, "MiB"),
4994
+ INTEL_UNCORE_EVENT_DESC(bw_in_port2, "event=0xff,umask=0x22"),
4995
+ INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale, "3.814697266e-6"),
4996
+ INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit, "MiB"),
4997
+ INTEL_UNCORE_EVENT_DESC(bw_in_port3, "event=0xff,umask=0x23"),
4998
+ INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale, "3.814697266e-6"),
4999
+ INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit, "MiB"),
5000
+ INTEL_UNCORE_EVENT_DESC(bw_in_port4, "event=0xff,umask=0x24"),
5001
+ INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale, "3.814697266e-6"),
5002
+ INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit, "MiB"),
5003
+ INTEL_UNCORE_EVENT_DESC(bw_in_port5, "event=0xff,umask=0x25"),
5004
+ INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale, "3.814697266e-6"),
5005
+ INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit, "MiB"),
5006
+ INTEL_UNCORE_EVENT_DESC(bw_in_port6, "event=0xff,umask=0x26"),
5007
+ INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale, "3.814697266e-6"),
5008
+ INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit, "MiB"),
5009
+ INTEL_UNCORE_EVENT_DESC(bw_in_port7, "event=0xff,umask=0x27"),
5010
+ INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale, "3.814697266e-6"),
5011
+ INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit, "MiB"),
5012
+ { /* end: all zeroes */ },
5013
+};
5014
+
5015
+static struct intel_uncore_type icx_uncore_iio_free_running = {
5016
+ .name = "iio_free_running",
5017
+ .num_counters = 9,
5018
+ .num_boxes = 6,
5019
+ .num_freerunning_types = ICX_IIO_FREERUNNING_TYPE_MAX,
5020
+ .freerunning = icx_iio_freerunning,
5021
+ .ops = &skx_uncore_iio_freerunning_ops,
5022
+ .event_descs = icx_uncore_iio_freerunning_events,
5023
+ .format_group = &skx_uncore_iio_freerunning_format_group,
5024
+};
5025
+
5026
+static struct intel_uncore_type *icx_msr_uncores[] = {
5027
+ &skx_uncore_ubox,
5028
+ &icx_uncore_chabox,
5029
+ &icx_uncore_iio,
5030
+ &icx_uncore_irp,
5031
+ &icx_uncore_m2pcie,
5032
+ &skx_uncore_pcu,
5033
+ &icx_uncore_iio_free_running,
5034
+ NULL,
5035
+};
5036
+
5037
+/*
5038
+ * To determine the number of CHAs, it should read CAPID6(Low) and CAPID7 (High)
5039
+ * registers which located at Device 30, Function 3
5040
+ */
5041
+#define ICX_CAPID6 0x9c
5042
+#define ICX_CAPID7 0xa0
5043
+
5044
+static u64 icx_count_chabox(void)
5045
+{
5046
+ struct pci_dev *dev = NULL;
5047
+ u64 caps = 0;
5048
+
5049
+ dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x345b, dev);
5050
+ if (!dev)
5051
+ goto out;
5052
+
5053
+ pci_read_config_dword(dev, ICX_CAPID6, (u32 *)&caps);
5054
+ pci_read_config_dword(dev, ICX_CAPID7, (u32 *)&caps + 1);
5055
+out:
5056
+ pci_dev_put(dev);
5057
+ return hweight64(caps);
5058
+}
5059
+
5060
+void icx_uncore_cpu_init(void)
5061
+{
5062
+ u64 num_boxes = icx_count_chabox();
5063
+
5064
+ if (WARN_ON(num_boxes > ARRAY_SIZE(icx_cha_msr_offsets)))
5065
+ return;
5066
+ icx_uncore_chabox.num_boxes = num_boxes;
5067
+ uncore_msr_uncores = icx_msr_uncores;
5068
+}
5069
+
5070
+static struct intel_uncore_type icx_uncore_m2m = {
5071
+ .name = "m2m",
5072
+ .num_counters = 4,
5073
+ .num_boxes = 4,
5074
+ .perf_ctr_bits = 48,
5075
+ .perf_ctr = SNR_M2M_PCI_PMON_CTR0,
5076
+ .event_ctl = SNR_M2M_PCI_PMON_CTL0,
5077
+ .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
5078
+ .event_mask_ext = SNR_M2M_PCI_PMON_UMASK_EXT,
5079
+ .box_ctl = SNR_M2M_PCI_PMON_BOX_CTL,
5080
+ .ops = &snr_m2m_uncore_pci_ops,
5081
+ .format_group = &snr_m2m_uncore_format_group,
5082
+};
5083
+
5084
+static struct attribute *icx_upi_uncore_formats_attr[] = {
5085
+ &format_attr_event.attr,
5086
+ &format_attr_umask_ext4.attr,
5087
+ &format_attr_edge.attr,
5088
+ &format_attr_inv.attr,
5089
+ &format_attr_thresh8.attr,
5090
+ NULL,
5091
+};
5092
+
5093
+static const struct attribute_group icx_upi_uncore_format_group = {
5094
+ .name = "format",
5095
+ .attrs = icx_upi_uncore_formats_attr,
5096
+};
5097
+
5098
+static struct intel_uncore_type icx_uncore_upi = {
5099
+ .name = "upi",
5100
+ .num_counters = 4,
5101
+ .num_boxes = 3,
5102
+ .perf_ctr_bits = 48,
5103
+ .perf_ctr = ICX_UPI_PCI_PMON_CTR0,
5104
+ .event_ctl = ICX_UPI_PCI_PMON_CTL0,
5105
+ .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
5106
+ .event_mask_ext = ICX_UPI_CTL_UMASK_EXT,
5107
+ .box_ctl = ICX_UPI_PCI_PMON_BOX_CTL,
5108
+ .ops = &skx_upi_uncore_pci_ops,
5109
+ .format_group = &icx_upi_uncore_format_group,
5110
+};
5111
+
5112
+static struct event_constraint icx_uncore_m3upi_constraints[] = {
5113
+ UNCORE_EVENT_CONSTRAINT(0x1c, 0x1),
5114
+ UNCORE_EVENT_CONSTRAINT(0x1d, 0x1),
5115
+ UNCORE_EVENT_CONSTRAINT(0x1e, 0x1),
5116
+ UNCORE_EVENT_CONSTRAINT(0x1f, 0x1),
5117
+ UNCORE_EVENT_CONSTRAINT(0x40, 0x7),
5118
+ UNCORE_EVENT_CONSTRAINT(0x4e, 0x7),
5119
+ UNCORE_EVENT_CONSTRAINT(0x4f, 0x7),
5120
+ UNCORE_EVENT_CONSTRAINT(0x50, 0x7),
5121
+ EVENT_CONSTRAINT_END
5122
+};
5123
+
5124
+static struct intel_uncore_type icx_uncore_m3upi = {
5125
+ .name = "m3upi",
5126
+ .num_counters = 4,
5127
+ .num_boxes = 3,
5128
+ .perf_ctr_bits = 48,
5129
+ .perf_ctr = ICX_M3UPI_PCI_PMON_CTR0,
5130
+ .event_ctl = ICX_M3UPI_PCI_PMON_CTL0,
5131
+ .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
5132
+ .box_ctl = ICX_M3UPI_PCI_PMON_BOX_CTL,
5133
+ .constraints = icx_uncore_m3upi_constraints,
5134
+ .ops = &ivbep_uncore_pci_ops,
5135
+ .format_group = &skx_uncore_format_group,
5136
+};
5137
+
5138
+enum {
5139
+ ICX_PCI_UNCORE_M2M,
5140
+ ICX_PCI_UNCORE_UPI,
5141
+ ICX_PCI_UNCORE_M3UPI,
5142
+};
5143
+
5144
+static struct intel_uncore_type *icx_pci_uncores[] = {
5145
+ [ICX_PCI_UNCORE_M2M] = &icx_uncore_m2m,
5146
+ [ICX_PCI_UNCORE_UPI] = &icx_uncore_upi,
5147
+ [ICX_PCI_UNCORE_M3UPI] = &icx_uncore_m3upi,
5148
+ NULL,
5149
+};
5150
+
5151
+static const struct pci_device_id icx_uncore_pci_ids[] = {
5152
+ { /* M2M 0 */
5153
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5154
+ .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 0, ICX_PCI_UNCORE_M2M, 0),
5155
+ },
5156
+ { /* M2M 1 */
5157
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5158
+ .driver_data = UNCORE_PCI_DEV_FULL_DATA(13, 0, ICX_PCI_UNCORE_M2M, 1),
5159
+ },
5160
+ { /* M2M 2 */
5161
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5162
+ .driver_data = UNCORE_PCI_DEV_FULL_DATA(14, 0, ICX_PCI_UNCORE_M2M, 2),
5163
+ },
5164
+ { /* M2M 3 */
5165
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5166
+ .driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, ICX_PCI_UNCORE_M2M, 3),
5167
+ },
5168
+ { /* UPI Link 0 */
5169
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3441),
5170
+ .driver_data = UNCORE_PCI_DEV_FULL_DATA(2, 1, ICX_PCI_UNCORE_UPI, 0),
5171
+ },
5172
+ { /* UPI Link 1 */
5173
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3441),
5174
+ .driver_data = UNCORE_PCI_DEV_FULL_DATA(3, 1, ICX_PCI_UNCORE_UPI, 1),
5175
+ },
5176
+ { /* UPI Link 2 */
5177
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3441),
5178
+ .driver_data = UNCORE_PCI_DEV_FULL_DATA(4, 1, ICX_PCI_UNCORE_UPI, 2),
5179
+ },
5180
+ { /* M3UPI Link 0 */
5181
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3446),
5182
+ .driver_data = UNCORE_PCI_DEV_FULL_DATA(5, 1, ICX_PCI_UNCORE_M3UPI, 0),
5183
+ },
5184
+ { /* M3UPI Link 1 */
5185
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3446),
5186
+ .driver_data = UNCORE_PCI_DEV_FULL_DATA(6, 1, ICX_PCI_UNCORE_M3UPI, 1),
5187
+ },
5188
+ { /* M3UPI Link 2 */
5189
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3446),
5190
+ .driver_data = UNCORE_PCI_DEV_FULL_DATA(7, 1, ICX_PCI_UNCORE_M3UPI, 2),
5191
+ },
5192
+ { /* end: all zeroes */ }
5193
+};
5194
+
5195
+static struct pci_driver icx_uncore_pci_driver = {
5196
+ .name = "icx_uncore",
5197
+ .id_table = icx_uncore_pci_ids,
5198
+};
5199
+
5200
+int icx_uncore_pci_init(void)
5201
+{
5202
+ /* ICX UBOX DID */
5203
+ int ret = snbep_pci2phy_map_init(0x3450, SKX_CPUNODEID,
5204
+ SKX_GIDNIDMAP, true);
5205
+
5206
+ if (ret)
5207
+ return ret;
5208
+
5209
+ uncore_pci_uncores = icx_pci_uncores;
5210
+ uncore_pci_driver = &icx_uncore_pci_driver;
5211
+ return 0;
5212
+}
5213
+
5214
+static void icx_uncore_imc_init_box(struct intel_uncore_box *box)
5215
+{
5216
+ unsigned int box_ctl = box->pmu->type->box_ctl +
5217
+ box->pmu->type->mmio_offset * (box->pmu->pmu_idx % ICX_NUMBER_IMC_CHN);
5218
+ int mem_offset = (box->pmu->pmu_idx / ICX_NUMBER_IMC_CHN) * ICX_IMC_MEM_STRIDE +
5219
+ SNR_IMC_MMIO_MEM0_OFFSET;
5220
+
5221
+ __snr_uncore_mmio_init_box(box, box_ctl, mem_offset);
5222
+}
5223
+
5224
+static struct intel_uncore_ops icx_uncore_mmio_ops = {
5225
+ .init_box = icx_uncore_imc_init_box,
5226
+ .exit_box = uncore_mmio_exit_box,
5227
+ .disable_box = snr_uncore_mmio_disable_box,
5228
+ .enable_box = snr_uncore_mmio_enable_box,
5229
+ .disable_event = snr_uncore_mmio_disable_event,
5230
+ .enable_event = snr_uncore_mmio_enable_event,
5231
+ .read_counter = uncore_mmio_read_counter,
5232
+};
5233
+
5234
+static struct intel_uncore_type icx_uncore_imc = {
5235
+ .name = "imc",
5236
+ .num_counters = 4,
5237
+ .num_boxes = 12,
5238
+ .perf_ctr_bits = 48,
5239
+ .fixed_ctr_bits = 48,
5240
+ .fixed_ctr = SNR_IMC_MMIO_PMON_FIXED_CTR,
5241
+ .fixed_ctl = SNR_IMC_MMIO_PMON_FIXED_CTL,
5242
+ .event_descs = snr_uncore_imc_events,
5243
+ .perf_ctr = SNR_IMC_MMIO_PMON_CTR0,
5244
+ .event_ctl = SNR_IMC_MMIO_PMON_CTL0,
5245
+ .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
5246
+ .box_ctl = SNR_IMC_MMIO_PMON_BOX_CTL,
5247
+ .mmio_offset = SNR_IMC_MMIO_OFFSET,
5248
+ .mmio_map_size = SNR_IMC_MMIO_SIZE,
5249
+ .ops = &icx_uncore_mmio_ops,
5250
+ .format_group = &skx_uncore_format_group,
5251
+};
5252
+
5253
+enum perf_uncore_icx_imc_freerunning_type_id {
5254
+ ICX_IMC_DCLK,
5255
+ ICX_IMC_DDR,
5256
+ ICX_IMC_DDRT,
5257
+
5258
+ ICX_IMC_FREERUNNING_TYPE_MAX,
5259
+};
5260
+
5261
+static struct freerunning_counters icx_imc_freerunning[] = {
5262
+ [ICX_IMC_DCLK] = { 0x22b0, 0x0, 0, 1, 48 },
5263
+ [ICX_IMC_DDR] = { 0x2290, 0x8, 0, 2, 48 },
5264
+ [ICX_IMC_DDRT] = { 0x22a0, 0x8, 0, 2, 48 },
5265
+};
5266
+
5267
+static struct uncore_event_desc icx_uncore_imc_freerunning_events[] = {
5268
+ INTEL_UNCORE_EVENT_DESC(dclk, "event=0xff,umask=0x10"),
5269
+
5270
+ INTEL_UNCORE_EVENT_DESC(read, "event=0xff,umask=0x20"),
5271
+ INTEL_UNCORE_EVENT_DESC(read.scale, "6.103515625e-5"),
5272
+ INTEL_UNCORE_EVENT_DESC(read.unit, "MiB"),
5273
+ INTEL_UNCORE_EVENT_DESC(write, "event=0xff,umask=0x21"),
5274
+ INTEL_UNCORE_EVENT_DESC(write.scale, "6.103515625e-5"),
5275
+ INTEL_UNCORE_EVENT_DESC(write.unit, "MiB"),
5276
+
5277
+ INTEL_UNCORE_EVENT_DESC(ddrt_read, "event=0xff,umask=0x30"),
5278
+ INTEL_UNCORE_EVENT_DESC(ddrt_read.scale, "6.103515625e-5"),
5279
+ INTEL_UNCORE_EVENT_DESC(ddrt_read.unit, "MiB"),
5280
+ INTEL_UNCORE_EVENT_DESC(ddrt_write, "event=0xff,umask=0x31"),
5281
+ INTEL_UNCORE_EVENT_DESC(ddrt_write.scale, "6.103515625e-5"),
5282
+ INTEL_UNCORE_EVENT_DESC(ddrt_write.unit, "MiB"),
5283
+ { /* end: all zeroes */ },
5284
+};
5285
+
5286
+static void icx_uncore_imc_freerunning_init_box(struct intel_uncore_box *box)
5287
+{
5288
+ int mem_offset = box->pmu->pmu_idx * ICX_IMC_MEM_STRIDE +
5289
+ SNR_IMC_MMIO_MEM0_OFFSET;
5290
+
5291
+ __snr_uncore_mmio_init_box(box, uncore_mmio_box_ctl(box), mem_offset);
5292
+}
5293
+
5294
+static struct intel_uncore_ops icx_uncore_imc_freerunning_ops = {
5295
+ .init_box = icx_uncore_imc_freerunning_init_box,
5296
+ .exit_box = uncore_mmio_exit_box,
5297
+ .read_counter = uncore_mmio_read_counter,
5298
+ .hw_config = uncore_freerunning_hw_config,
5299
+};
5300
+
5301
+static struct intel_uncore_type icx_uncore_imc_free_running = {
5302
+ .name = "imc_free_running",
5303
+ .num_counters = 5,
5304
+ .num_boxes = 4,
5305
+ .num_freerunning_types = ICX_IMC_FREERUNNING_TYPE_MAX,
5306
+ .mmio_map_size = SNR_IMC_MMIO_SIZE,
5307
+ .freerunning = icx_imc_freerunning,
5308
+ .ops = &icx_uncore_imc_freerunning_ops,
5309
+ .event_descs = icx_uncore_imc_freerunning_events,
5310
+ .format_group = &skx_uncore_iio_freerunning_format_group,
5311
+};
5312
+
5313
+static struct intel_uncore_type *icx_mmio_uncores[] = {
5314
+ &icx_uncore_imc,
5315
+ &icx_uncore_imc_free_running,
5316
+ NULL,
5317
+};
5318
+
5319
+void icx_uncore_mmio_init(void)
5320
+{
5321
+ uncore_mmio_uncores = icx_mmio_uncores;
5322
+}
5323
+
5324
+/* end of ICX uncore support */