forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-10 748e4f3d702def1a4bff191e0cf93b6a05340f01
kernel/arch/x86/events/perf_event.h
....@@ -49,28 +49,61 @@
4949 unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
5050 u64 idxmsk64;
5151 };
52
- u64 code;
53
- u64 cmask;
54
- int weight;
55
- int overlap;
56
- int flags;
52
+ u64 code;
53
+ u64 cmask;
54
+ int weight;
55
+ int overlap;
56
+ int flags;
57
+ unsigned int size;
5758 };
59
+
60
+static inline bool constraint_match(struct event_constraint *c, u64 ecode)
61
+{
62
+ return ((ecode & c->cmask) - c->code) <= (u64)c->size;
63
+}
64
+
5865 /*
5966 * struct hw_perf_event.flags flags
6067 */
6168 #define PERF_X86_EVENT_PEBS_LDLAT 0x0001 /* ld+ldlat data address sampling */
6269 #define PERF_X86_EVENT_PEBS_ST 0x0002 /* st data address sampling */
6370 #define PERF_X86_EVENT_PEBS_ST_HSW 0x0004 /* haswell style datala, store */
64
-#define PERF_X86_EVENT_COMMITTED 0x0008 /* event passed commit_txn */
65
-#define PERF_X86_EVENT_PEBS_LD_HSW 0x0010 /* haswell style datala, load */
66
-#define PERF_X86_EVENT_PEBS_NA_HSW 0x0020 /* haswell style datala, unknown */
67
-#define PERF_X86_EVENT_EXCL 0x0040 /* HT exclusivity on counter */
68
-#define PERF_X86_EVENT_DYNAMIC 0x0080 /* dynamic alloc'd constraint */
69
-#define PERF_X86_EVENT_RDPMC_ALLOWED 0x0100 /* grant rdpmc permission */
70
-#define PERF_X86_EVENT_EXCL_ACCT 0x0200 /* accounted EXCL event */
71
-#define PERF_X86_EVENT_AUTO_RELOAD 0x0400 /* use PEBS auto-reload */
72
-#define PERF_X86_EVENT_LARGE_PEBS 0x0800 /* use large PEBS */
71
+#define PERF_X86_EVENT_PEBS_LD_HSW 0x0008 /* haswell style datala, load */
72
+#define PERF_X86_EVENT_PEBS_NA_HSW 0x0010 /* haswell style datala, unknown */
73
+#define PERF_X86_EVENT_EXCL 0x0020 /* HT exclusivity on counter */
74
+#define PERF_X86_EVENT_DYNAMIC 0x0040 /* dynamic alloc'd constraint */
75
+#define PERF_X86_EVENT_RDPMC_ALLOWED 0x0080 /* grant rdpmc permission */
76
+#define PERF_X86_EVENT_EXCL_ACCT 0x0100 /* accounted EXCL event */
77
+#define PERF_X86_EVENT_AUTO_RELOAD 0x0200 /* use PEBS auto-reload */
78
+#define PERF_X86_EVENT_LARGE_PEBS 0x0400 /* use large PEBS */
79
+#define PERF_X86_EVENT_PEBS_VIA_PT 0x0800 /* use PT buffer for PEBS */
80
+#define PERF_X86_EVENT_PAIR 0x1000 /* Large Increment per Cycle */
81
+#define PERF_X86_EVENT_LBR_SELECT 0x2000 /* Save/Restore MSR_LBR_SELECT */
82
+#define PERF_X86_EVENT_TOPDOWN 0x4000 /* Count Topdown slots/metrics events */
7383
84
+static inline bool is_topdown_count(struct perf_event *event)
85
+{
86
+ return event->hw.flags & PERF_X86_EVENT_TOPDOWN;
87
+}
88
+
89
+static inline bool is_metric_event(struct perf_event *event)
90
+{
91
+ u64 config = event->attr.config;
92
+
93
+ return ((config & ARCH_PERFMON_EVENTSEL_EVENT) == 0) &&
94
+ ((config & INTEL_ARCH_EVENT_MASK) >= INTEL_TD_METRIC_RETIRING) &&
95
+ ((config & INTEL_ARCH_EVENT_MASK) <= INTEL_TD_METRIC_MAX);
96
+}
97
+
98
+static inline bool is_slots_event(struct perf_event *event)
99
+{
100
+ return (event->attr.config & INTEL_ARCH_EVENT_MASK) == INTEL_TD_SLOTS;
101
+}
102
+
103
+static inline bool is_topdown_event(struct perf_event *event)
104
+{
105
+ return is_metric_event(event) || is_slots_event(event);
106
+}
74107
75108 struct amd_nb {
76109 int nb_id; /* NorthBridge id */
....@@ -80,6 +113,11 @@
80113 };
81114
82115 #define PEBS_COUNTER_MASK ((1ULL << MAX_PEBS_EVENTS) - 1)
116
+#define PEBS_PMI_AFTER_EACH_RECORD BIT_ULL(60)
117
+#define PEBS_OUTPUT_OFFSET 61
118
+#define PEBS_OUTPUT_MASK (3ull << PEBS_OUTPUT_OFFSET)
119
+#define PEBS_OUTPUT_PT (1ull << PEBS_OUTPUT_OFFSET)
120
+#define PEBS_VIA_PT_MASK (PEBS_OUTPUT_PT | PEBS_PMI_AFTER_EACH_RECORD)
83121
84122 /*
85123 * Flags PEBS can handle without an PMI.
....@@ -167,6 +205,17 @@
167205 #define MAX_LBR_ENTRIES 32
168206
169207 enum {
208
+ LBR_FORMAT_32 = 0x00,
209
+ LBR_FORMAT_LIP = 0x01,
210
+ LBR_FORMAT_EIP = 0x02,
211
+ LBR_FORMAT_EIP_FLAGS = 0x03,
212
+ LBR_FORMAT_EIP_FLAGS2 = 0x04,
213
+ LBR_FORMAT_INFO = 0x05,
214
+ LBR_FORMAT_TIME = 0x06,
215
+ LBR_FORMAT_MAX_KNOWN = LBR_FORMAT_TIME,
216
+};
217
+
218
+enum {
170219 X86_PERF_KFREE_SHARED = 0,
171220 X86_PERF_KFREE_EXCL = 1,
172221 X86_PERF_KFREE_MAX
....@@ -186,6 +235,8 @@
186235 they've never been enabled yet */
187236 int n_txn; /* the # last events in the below arrays;
188237 added in the current transaction */
238
+ int n_txn_pair;
239
+ int n_txn_metric;
189240 int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
190241 u64 tags[X86_PMC_IDX_MAX];
191242
....@@ -206,17 +257,30 @@
206257 u64 pebs_enabled;
207258 int n_pebs;
208259 int n_large_pebs;
260
+ int n_pebs_via_pt;
261
+ int pebs_output;
262
+
263
+ /* Current super set of events hardware configuration */
264
+ u64 pebs_data_cfg;
265
+ u64 active_pebs_data_cfg;
266
+ int pebs_record_size;
209267
210268 /*
211269 * Intel LBR bits
212270 */
213271 int lbr_users;
272
+ int lbr_pebs_users;
214273 struct perf_branch_stack lbr_stack;
215274 struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES];
216
- struct er_account *lbr_sel;
275
+ union {
276
+ struct er_account *lbr_sel;
277
+ struct er_account *lbr_ctl;
278
+ };
217279 u64 br_sel;
218
- struct x86_perf_task_context *last_task_ctx;
280
+ void *last_task_ctx;
219281 int last_log_id;
282
+ int lbr_select;
283
+ void *lbr_xsave;
220284
221285 /*
222286 * Intel host/guest exclude bits
....@@ -248,26 +312,46 @@
248312 u64 tfa_shadow;
249313
250314 /*
315
+ * Perf Metrics
316
+ */
317
+ /* number of accepted metrics events */
318
+ int n_metric;
319
+
320
+ /*
251321 * AMD specific bits
252322 */
253323 struct amd_nb *amd_nb;
254324 /* Inverted mask of bits to clear in the perf_ctr ctrl registers */
255325 u64 perf_ctr_virt_mask;
326
+ int n_pair; /* Large increment events */
256327
257328 void *kfree_on_online[X86_PERF_KFREE_MAX];
329
+
330
+ struct pmu *pmu;
258331 };
259332
260
-#define __EVENT_CONSTRAINT(c, n, m, w, o, f) {\
333
+#define __EVENT_CONSTRAINT_RANGE(c, e, n, m, w, o, f) { \
261334 { .idxmsk64 = (n) }, \
262335 .code = (c), \
336
+ .size = (e) - (c), \
263337 .cmask = (m), \
264338 .weight = (w), \
265339 .overlap = (o), \
266340 .flags = f, \
267341 }
268342
343
+#define __EVENT_CONSTRAINT(c, n, m, w, o, f) \
344
+ __EVENT_CONSTRAINT_RANGE(c, c, n, m, w, o, f)
345
+
269346 #define EVENT_CONSTRAINT(c, n, m) \
270347 __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 0, 0)
348
+
349
+/*
350
+ * The constraint_match() function only works for 'simple' event codes
351
+ * and not for extended (AMD64_EVENTSEL_EVENT) events codes.
352
+ */
353
+#define EVENT_CONSTRAINT_RANGE(c, e, n, m) \
354
+ __EVENT_CONSTRAINT_RANGE(c, e, n, m, HWEIGHT(n), 0, 0)
271355
272356 #define INTEL_EXCLEVT_CONSTRAINT(c, n) \
273357 __EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT, HWEIGHT(n),\
....@@ -304,6 +388,12 @@
304388 EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT)
305389
306390 /*
391
+ * Constraint on a range of Event codes
392
+ */
393
+#define INTEL_EVENT_CONSTRAINT_RANGE(c, e, n) \
394
+ EVENT_CONSTRAINT_RANGE(c, e, n, ARCH_PERFMON_EVENTSEL_EVENT)
395
+
396
+/*
307397 * Constraint on the Event code + UMask + fixed-mask
308398 *
309399 * filter mask to validate fixed counter events.
....@@ -319,6 +409,19 @@
319409 #define FIXED_EVENT_FLAGS (X86_RAW_EVENT_MASK|HSW_IN_TX|HSW_IN_TX_CHECKPOINTED)
320410 #define FIXED_EVENT_CONSTRAINT(c, n) \
321411 EVENT_CONSTRAINT(c, (1ULL << (32+n)), FIXED_EVENT_FLAGS)
412
+
413
+/*
414
+ * The special metric counters do not actually exist. They are calculated from
415
+ * the combination of the FxCtr3 + MSR_PERF_METRICS.
416
+ *
417
+ * The special metric counters are mapped to a dummy offset for the scheduler.
418
+ * The sharing between multiple users of the same metric without multiplexing
419
+ * is not allowed, even though the hardware supports that in principle.
420
+ */
421
+
422
+#define METRIC_EVENT_CONSTRAINT(c, n) \
423
+ EVENT_CONSTRAINT(c, (1ULL << (INTEL_PMC_IDX_METRIC_BASE + n)), \
424
+ INTEL_ARCH_EVENT_MASK)
322425
323426 /*
324427 * Constraint on the Event code + UMask
....@@ -348,7 +451,10 @@
348451
349452 /* Event constraint, but match on all event flags too. */
350453 #define INTEL_FLAGS_EVENT_CONSTRAINT(c, n) \
351
- EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS)
454
+ EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS)
455
+
456
+#define INTEL_FLAGS_EVENT_CONSTRAINT_RANGE(c, e, n) \
457
+ EVENT_CONSTRAINT_RANGE(c, e, n, ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS)
352458
353459 /* Check only flags, but allow all event/umask */
354460 #define INTEL_ALL_EVENT_CONSTRAINT(code, n) \
....@@ -363,6 +469,11 @@
363469 /* Check flags and event code, and set the HSW load flag */
364470 #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(code, n) \
365471 __EVENT_CONSTRAINT(code, n, \
472
+ ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
473
+ HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW)
474
+
475
+#define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD_RANGE(code, end, n) \
476
+ __EVENT_CONSTRAINT_RANGE(code, end, n, \
366477 ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
367478 HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW)
368479
....@@ -473,6 +584,10 @@
473584 * values > 32bit.
474585 */
475586 u64 full_width_write:1;
587
+ u64 pebs_baseline:1;
588
+ u64 perf_metrics:1;
589
+ u64 pebs_output_pt_available:1;
590
+ u64 anythread_deprecated:1;
476591 };
477592 u64 capabilities;
478593 };
....@@ -565,23 +680,23 @@
565680 struct event_constraint *event_constraints;
566681 struct x86_pmu_quirk *quirks;
567682 int perfctr_second_write;
568
- bool late_ack;
569683 u64 (*limit_period)(struct perf_event *event, u64 l);
570684
685
+ /* PMI handler bits */
686
+ unsigned int late_ack :1,
687
+ enabled_ack :1,
688
+ counter_freezing :1;
571689 /*
572690 * sysfs attrs
573691 */
574692 int attr_rdpmc_broken;
575693 int attr_rdpmc;
576694 struct attribute **format_attrs;
577
- struct attribute **event_attrs;
578
- struct attribute **caps_attrs;
579695
580696 ssize_t (*events_sysfs_show)(char *page, u64 config);
581
- struct attribute **cpu_events;
697
+ const struct attribute_group **attr_update;
582698
583699 unsigned long attr_freeze_on_smi;
584
- struct attribute **attrs;
585700
586701 /*
587702 * CPU Hotplug hooks
....@@ -604,30 +719,56 @@
604719 /*
605720 * Intel DebugStore bits
606721 */
607
- unsigned int bts :1,
608
- bts_active :1,
609
- pebs :1,
610
- pebs_active :1,
611
- pebs_broken :1,
612
- pebs_prec_dist :1,
613
- pebs_no_tlb :1;
722
+ unsigned int bts :1,
723
+ bts_active :1,
724
+ pebs :1,
725
+ pebs_active :1,
726
+ pebs_broken :1,
727
+ pebs_prec_dist :1,
728
+ pebs_no_tlb :1,
729
+ pebs_no_isolation :1;
614730 int pebs_record_size;
615731 int pebs_buffer_size;
616
- void (*drain_pebs)(struct pt_regs *regs);
732
+ int max_pebs_events;
733
+ void (*drain_pebs)(struct pt_regs *regs, struct perf_sample_data *data);
617734 struct event_constraint *pebs_constraints;
618735 void (*pebs_aliases)(struct perf_event *event);
619
- int max_pebs_events;
620736 unsigned long large_pebs_flags;
737
+ u64 rtm_abort_event;
621738
622739 /*
623740 * Intel LBR
624741 */
625
- unsigned long lbr_tos, lbr_from, lbr_to; /* MSR base regs */
626
- int lbr_nr; /* hardware stack size */
627
- u64 lbr_sel_mask; /* LBR_SELECT valid bits */
628
- const int *lbr_sel_map; /* lbr_select mappings */
742
+ unsigned int lbr_tos, lbr_from, lbr_to,
743
+ lbr_info, lbr_nr; /* LBR base regs and size */
744
+ union {
745
+ u64 lbr_sel_mask; /* LBR_SELECT valid bits */
746
+ u64 lbr_ctl_mask; /* LBR_CTL valid bits */
747
+ };
748
+ union {
749
+ const int *lbr_sel_map; /* lbr_select mappings */
750
+ int *lbr_ctl_map; /* LBR_CTL mappings */
751
+ };
629752 bool lbr_double_abort; /* duplicated lbr aborts */
630753 bool lbr_pt_coexist; /* (LBR|BTS) may coexist with PT */
754
+
755
+ /*
756
+ * Intel Architectural LBR CPUID Enumeration
757
+ */
758
+ unsigned int lbr_depth_mask:8;
759
+ unsigned int lbr_deep_c_reset:1;
760
+ unsigned int lbr_lip:1;
761
+ unsigned int lbr_cpl:1;
762
+ unsigned int lbr_filter:1;
763
+ unsigned int lbr_call_stack:1;
764
+ unsigned int lbr_mispred:1;
765
+ unsigned int lbr_timed_lbr:1;
766
+ unsigned int lbr_br_type:1;
767
+
768
+ void (*lbr_reset)(void);
769
+ void (*lbr_read)(struct cpu_hw_events *cpuc);
770
+ void (*lbr_save)(void *ctx);
771
+ void (*lbr_restore)(void *ctx);
631772
632773 /*
633774 * Intel PT/LBR/BTS are exclusive
....@@ -635,9 +776,24 @@
635776 atomic_t lbr_exclusive[x86_lbr_exclusive_max];
636777
637778 /*
779
+ * Intel perf metrics
780
+ */
781
+ u64 (*update_topdown_event)(struct perf_event *event);
782
+ int (*set_topdown_event_period)(struct perf_event *event);
783
+
784
+ /*
785
+ * perf task context (i.e. struct perf_event_context::task_ctx_data)
786
+ * switch helper to bridge calls from perf/core to perf/x86.
787
+ * See struct pmu::swap_task_ctx() usage for examples;
788
+ */
789
+ void (*swap_task_ctx)(struct perf_event_context *prev,
790
+ struct perf_event_context *next);
791
+
792
+ /*
638793 * AMD bits
639794 */
640795 unsigned int amd_nb_constraints : 1;
796
+ u64 perf_ctr_pair_en;
641797
642798 /*
643799 * Extra registers for events
....@@ -654,17 +810,48 @@
654810 * Check period value for PERF_EVENT_IOC_PERIOD ioctl.
655811 */
656812 int (*check_period) (struct perf_event *event, u64 period);
813
+
814
+ int (*aux_output_match) (struct perf_event *event);
657815 };
658816
659
-struct x86_perf_task_context {
660
- u64 lbr_from[MAX_LBR_ENTRIES];
661
- u64 lbr_to[MAX_LBR_ENTRIES];
662
- u64 lbr_info[MAX_LBR_ENTRIES];
663
- int tos;
664
- int valid_lbrs;
817
+struct x86_perf_task_context_opt {
665818 int lbr_callstack_users;
666819 int lbr_stack_state;
667820 int log_id;
821
+};
822
+
823
+struct x86_perf_task_context {
824
+ u64 lbr_sel;
825
+ int tos;
826
+ int valid_lbrs;
827
+ struct x86_perf_task_context_opt opt;
828
+ struct lbr_entry lbr[MAX_LBR_ENTRIES];
829
+};
830
+
831
+struct x86_perf_task_context_arch_lbr {
832
+ struct x86_perf_task_context_opt opt;
833
+ struct lbr_entry entries[];
834
+};
835
+
836
+/*
837
+ * Add padding to guarantee the 64-byte alignment of the state buffer.
838
+ *
839
+ * The structure is dynamically allocated. The size of the LBR state may vary
840
+ * based on the number of LBR registers.
841
+ *
842
+ * Do not put anything after the LBR state.
843
+ */
844
+struct x86_perf_task_context_arch_lbr_xsave {
845
+ struct x86_perf_task_context_opt opt;
846
+
847
+ union {
848
+ struct xregs_state xsave;
849
+ struct {
850
+ struct fxregs_state i387;
851
+ struct xstate_header header;
852
+ struct arch_lbr_state lbr;
853
+ } __attribute__ ((packed, aligned (XSAVE_ALIGNMENT)));
854
+ };
668855 };
669856
670857 #define x86_add_quirk(func_) \
....@@ -685,6 +872,7 @@
685872 #define PMU_FL_EXCL_ENABLED 0x8 /* exclusive counter active */
686873 #define PMU_FL_PEBS_ALL 0x10 /* all events are valid PEBS events */
687874 #define PMU_FL_TFA 0x20 /* deal with TSX force abort */
875
+#define PMU_FL_PAIR 0x40 /* merge counters for large incr. events */
688876
689877 #define EVENT_VAR(_id) event_attr_##_id
690878 #define EVENT_PTR(_id) &event_attr_##_id.attr.attr
....@@ -711,7 +899,16 @@
711899 .event_str_ht = ht, \
712900 }
713901
902
+struct pmu *x86_get_pmu(unsigned int cpu);
714903 extern struct x86_pmu x86_pmu __read_mostly;
904
+
905
+static __always_inline struct x86_perf_task_context_opt *task_context_opt(void *ctx)
906
+{
907
+ if (static_cpu_has(X86_FEATURE_ARCH_LBR))
908
+ return &((struct x86_perf_task_context_arch_lbr *)ctx)->opt;
909
+
910
+ return &((struct x86_perf_task_context *)ctx)->opt;
911
+}
715912
716913 static inline bool x86_pmu_has_lbr_callstack(void)
717914 {
....@@ -779,6 +976,11 @@
779976
780977 void x86_pmu_disable_all(void);
781978
979
+static inline bool is_counter_pair(struct hw_perf_event *hwc)
980
+{
981
+ return hwc->flags & PERF_X86_EVENT_PAIR;
982
+}
983
+
782984 static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc,
783985 u64 enable_mask)
784986 {
....@@ -786,6 +988,14 @@
786988
787989 if (hwc->extra_reg.reg)
788990 wrmsrl(hwc->extra_reg.reg, hwc->extra_reg.config);
991
+
992
+ /*
993
+ * Add enabled Merge event on next counter
994
+ * if large increment event being enabled on this counter
995
+ */
996
+ if (is_counter_pair(hwc))
997
+ wrmsrl(x86_pmu_config_addr(hwc->idx + 1), x86_pmu.perf_ctr_pair_en);
998
+
789999 wrmsrl(hwc->config_base, (hwc->config | enable_mask) & ~disable_mask);
7901000 }
7911001
....@@ -803,6 +1013,9 @@
8031013 struct hw_perf_event *hwc = &event->hw;
8041014
8051015 wrmsrl(hwc->config_base, hwc->config & ~disable_mask);
1016
+
1017
+ if (is_counter_pair(hwc))
1018
+ wrmsrl(x86_pmu_config_addr(hwc->idx + 1), 0);
8061019 }
8071020
8081021 void x86_pmu_enable_event(struct perf_event *event);
....@@ -845,8 +1058,6 @@
8451058 ssize_t x86_event_sysfs_show(char *page, u64 config, u64 event);
8461059 ssize_t intel_event_sysfs_show(char *page, u64 config);
8471060
848
-struct attribute **merge_attr(struct attribute **a, struct attribute **b);
849
-
8501061 ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr,
8511062 char *page);
8521063 ssize_t events_ht_sysfs_show(struct device *dev, struct device_attribute *attr,
....@@ -864,6 +1075,11 @@
8641075 }
8651076
8661077 #endif /* CONFIG_CPU_SUP_AMD */
1078
+
1079
+static inline int is_pebs_pt(struct perf_event *event)
1080
+{
1081
+ return !!(event->hw.flags & PERF_X86_EVENT_PEBS_VIA_PT);
1082
+}
8671083
8681084 #ifdef CONFIG_CPU_SUP_INTEL
8691085
....@@ -907,7 +1123,12 @@
9071123
9081124 void reserve_ds_buffers(void);
9091125
1126
+void release_lbr_buffers(void);
1127
+
1128
+void reserve_lbr_buffers(void);
1129
+
9101130 extern struct event_constraint bts_constraint;
1131
+extern struct event_constraint vlbr_constraint;
9111132
9121133 void intel_pmu_enable_bts(u64 config);
9131134
....@@ -939,6 +1160,8 @@
9391160
9401161 extern struct event_constraint intel_skl_pebs_event_constraints[];
9411162
1163
+extern struct event_constraint intel_icl_pebs_event_constraints[];
1164
+
9421165 struct event_constraint *intel_pebs_constraints(struct perf_event *event);
9431166
9441167 void intel_pmu_pebs_add(struct perf_event *event);
....@@ -957,13 +1180,22 @@
9571180
9581181 void intel_pmu_auto_reload_read(struct perf_event *event);
9591182
1183
+void intel_pmu_store_pebs_lbrs(struct lbr_entry *lbr);
1184
+
9601185 void intel_ds_init(void);
1186
+
1187
+void intel_pmu_lbr_swap_task_ctx(struct perf_event_context *prev,
1188
+ struct perf_event_context *next);
9611189
9621190 void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in);
9631191
9641192 u64 lbr_from_signext_quirk_wr(u64 val);
9651193
9661194 void intel_pmu_lbr_reset(void);
1195
+
1196
+void intel_pmu_lbr_reset_32(void);
1197
+
1198
+void intel_pmu_lbr_reset_64(void);
9671199
9681200 void intel_pmu_lbr_add(struct perf_event *event);
9691201
....@@ -974,6 +1206,14 @@
9741206 void intel_pmu_lbr_disable_all(void);
9751207
9761208 void intel_pmu_lbr_read(void);
1209
+
1210
+void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc);
1211
+
1212
+void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc);
1213
+
1214
+void intel_pmu_lbr_save(void *ctx);
1215
+
1216
+void intel_pmu_lbr_restore(void *ctx);
9771217
9781218 void intel_pmu_lbr_init_core(void);
9791219
....@@ -990,6 +1230,8 @@
9901230 void intel_pmu_lbr_init_skl(void);
9911231
9921232 void intel_pmu_lbr_init_knl(void);
1233
+
1234
+void intel_pmu_arch_lbr_init(void);
9931235
9941236 void intel_pmu_pebs_data_source_nhm(void);
9951237
....@@ -1026,6 +1268,14 @@
10261268 {
10271269 }
10281270
1271
+static inline void release_lbr_buffers(void)
1272
+{
1273
+}
1274
+
1275
+static inline void reserve_lbr_buffers(void)
1276
+{
1277
+}
1278
+
10291279 static inline int intel_pmu_init(void)
10301280 {
10311281 return 0;
....@@ -1045,3 +1295,12 @@
10451295 return 0;
10461296 }
10471297 #endif /* CONFIG_CPU_SUP_INTEL */
1298
+
1299
+#if ((defined CONFIG_CPU_SUP_CENTAUR) || (defined CONFIG_CPU_SUP_ZHAOXIN))
1300
+int zhaoxin_pmu_init(void);
1301
+#else
1302
+static inline int zhaoxin_pmu_init(void)
1303
+{
1304
+ return 0;
1305
+}
1306
+#endif /*CONFIG_CPU_SUP_CENTAUR or CONFIG_CPU_SUP_ZHAOXIN*/