hc
2023-12-11 6778948f9de86c3cfaf36725a7c87dcff9ba247f
kernel/include/linux/perf_event.h
....@@ -30,6 +30,7 @@
3030 int (*is_in_guest)(void);
3131 int (*is_user_mode)(void);
3232 unsigned long (*get_guest_ip)(void);
33
+ void (*handle_intel_pt_intr)(void);
3334 };
3435
3536 #ifdef CONFIG_HAVE_HW_BREAKPOINT
....@@ -53,14 +54,14 @@
5354 #include <linux/atomic.h>
5455 #include <linux/sysfs.h>
5556 #include <linux/perf_regs.h>
56
-#include <linux/workqueue.h>
5757 #include <linux/cgroup.h>
58
+#include <linux/refcount.h>
5859 #include <linux/security.h>
5960 #include <asm/local.h>
6061
6162 struct perf_callchain_entry {
6263 __u64 nr;
63
- __u64 ip[0]; /* /proc/sys/kernel/perf_event_max_stack */
64
+ __u64 ip[]; /* /proc/sys/kernel/perf_event_max_stack */
6465 };
6566
6667 struct perf_callchain_entry_ctx {
....@@ -92,15 +93,27 @@
9293 /*
9394 * branch stack layout:
9495 * nr: number of taken branches stored in entries[]
96
+ * hw_idx: The low level index of raw branch records
97
+ * for the most recent branch.
98
+ * -1ULL means invalid/unknown.
9599 *
96100 * Note that nr can vary from sample to sample
97101 * branches (to, from) are stored from most recent
98102 * to least recent, i.e., entries[0] contains the most
99103 * recent branch.
104
+ * The entries[] is an abstraction of raw branch records,
105
+ * which may not be stored in age order in HW, e.g. Intel LBR.
106
+ * The hw_idx is to expose the low level index of raw
107
+ * branch record for the most recent branch aka entries[0].
108
+ * The hw_idx index is between -1 (unknown) and max depth,
109
+ * which can be retrieved in /sys/devices/cpu/caps/branches.
110
+ * For the architectures whose raw branch records are
111
+ * already stored in age order, the hw_idx should be 0.
100112 */
101113 struct perf_branch_stack {
102114 __u64 nr;
103
- struct perf_branch_entry entries[0];
115
+ __u64 hw_idx;
116
+ struct perf_branch_entry entries[];
104117 };
105118
106119 struct task_struct;
....@@ -199,17 +212,26 @@
199212 */
200213 u64 sample_period;
201214
202
- /*
203
- * The period we started this sample with.
204
- */
205
- u64 last_period;
215
+ union {
216
+ struct { /* Sampling */
217
+ /*
218
+ * The period we started this sample with.
219
+ */
220
+ u64 last_period;
206221
207
- /*
208
- * However much is left of the current period; note that this is
209
- * a full 64bit value and allows for generation of periods longer
210
- * than hardware might allow.
211
- */
212
- local64_t period_left;
222
+ /*
223
+ * However much is left of the current period;
224
+ * note that this is a full 64bit value and
225
+ * allows for generation of periods longer
226
+ * than hardware might allow.
227
+ */
228
+ local64_t period_left;
229
+ };
230
+ struct { /* Topdown events counting for context switch */
231
+ u64 saved_metric;
232
+ u64 saved_slots;
233
+ };
234
+ };
213235
214236 /*
215237 * State for throttling the event, see __perf_event_overflow() and
....@@ -241,10 +263,14 @@
241263 #define PERF_PMU_CAP_NO_INTERRUPT 0x01
242264 #define PERF_PMU_CAP_NO_NMI 0x02
243265 #define PERF_PMU_CAP_AUX_NO_SG 0x04
244
-#define PERF_PMU_CAP_AUX_SW_DOUBLEBUF 0x08
266
+#define PERF_PMU_CAP_EXTENDED_REGS 0x08
245267 #define PERF_PMU_CAP_EXCLUSIVE 0x10
246268 #define PERF_PMU_CAP_ITRACE 0x20
247269 #define PERF_PMU_CAP_HETEROGENEOUS_CPUS 0x40
270
+#define PERF_PMU_CAP_NO_EXCLUDE 0x80
271
+#define PERF_PMU_CAP_AUX_OUTPUT 0x100
272
+
273
+struct perf_output_handle;
248274
249275 /**
250276 * struct pmu - generic performance monitoring unit
....@@ -255,6 +281,7 @@
255281 struct module *module;
256282 struct device *dev;
257283 const struct attribute_group **attr_groups;
284
+ const struct attribute_group **attr_update;
258285 const char *name;
259286 int type;
260287
....@@ -263,13 +290,11 @@
263290 */
264291 int capabilities;
265292
266
- int * __percpu pmu_disable_count;
267
- struct perf_cpu_context * __percpu pmu_cpu_context;
293
+ int __percpu *pmu_disable_count;
294
+ struct perf_cpu_context __percpu *pmu_cpu_context;
268295 atomic_t exclusive_cnt; /* < 0: cpu; > 0: tsk */
269296 int task_ctx_nr;
270297 int hrtimer_interval_ms;
271
- u32 events_across_hotplug:1,
272
- reserved:31;
273298
274299 /* number of address filters this PMU can do */
275300 unsigned int nr_addr_filters;
....@@ -291,7 +316,7 @@
291316 * -EBUSY -- @event is for this PMU but PMU temporarily unavailable
292317 * -EINVAL -- @event is for this PMU but @event is not valid
293318 * -EOPNOTSUPP -- @event is for this PMU, @event is valid, but not supported
294
- * -EACCESS -- @event is for this PMU, @event is valid, but no privilidges
319
+ * -EACCES -- @event is for this PMU, @event is valid, but no privileges
295320 *
296321 * 0 -- @event is for this PMU and valid
297322 *
....@@ -350,7 +375,7 @@
350375 * ->stop() with PERF_EF_UPDATE will read the counter and update
351376 * period/count values like ->read() would.
352377 *
353
- * ->start() with PERF_EF_RELOAD will reprogram the the counter
378
+ * ->start() with PERF_EF_RELOAD will reprogram the counter
354379 * value, must be preceded by a ->stop() with PERF_EF_UPDATE.
355380 */
356381 void (*start) (struct perf_event *event, int flags);
....@@ -403,11 +428,21 @@
403428 */
404429 void (*sched_task) (struct perf_event_context *ctx,
405430 bool sched_in);
406
- /*
407
- * PMU specific data size
408
- */
409
- size_t task_ctx_size;
410431
432
+ /*
433
+ * Kmem cache of PMU specific data
434
+ */
435
+ struct kmem_cache *task_ctx_cache;
436
+
437
+ /*
438
+ * PMU specific parts of task perf event context (i.e. ctx->task_ctx_data)
439
+ * can be synchronized using this function. See Intel LBR callstack support
440
+ * implementation and Perf core context switch handling callbacks for usage
441
+ * examples.
442
+ */
443
+ void (*swap_task_ctx) (struct perf_event_context *prev,
444
+ struct perf_event_context *next);
445
+ /* optional */
411446
412447 /*
413448 * Set up pmu-private data structures for an AUX area
....@@ -420,6 +455,19 @@
420455 * Free pmu-private AUX data structures
421456 */
422457 void (*free_aux) (void *aux); /* optional */
458
+
459
+ /*
460
+ * Take a snapshot of the AUX buffer without touching the event
461
+ * state, so that preempting ->start()/->stop() callbacks does
462
+ * not interfere with their logic. Called in PMI context.
463
+ *
464
+ * Returns the size of AUX data copied to the output handle.
465
+ *
466
+ * Optional.
467
+ */
468
+ long (*snapshot_aux) (struct perf_event *event,
469
+ struct perf_output_handle *handle,
470
+ unsigned long size);
423471
424472 /*
425473 * Validate address range filters: make sure the HW supports the
....@@ -447,6 +495,16 @@
447495 /* optional */
448496
449497 /*
498
+ * Check if event can be used for aux_output purposes for
499
+ * events of this PMU.
500
+ *
501
+ * Runs from perf_event_open(). Should return 0 for "no match"
502
+ * or non-zero for "match".
503
+ */
504
+ int (*aux_output_match) (struct perf_event *event);
505
+ /* optional */
506
+
507
+ /*
450508 * Filter events for PMU-specific reasons.
451509 */
452510 int (*filter_match) (struct perf_event *event); /* optional */
....@@ -466,7 +524,7 @@
466524 /**
467525 * struct perf_addr_filter - address range filter definition
468526 * @entry: event's filter list linkage
469
- * @inode: object file's inode for file-based filters
527
+ * @path: object file's path for file-based filters
470528 * @offset: filter range offset
471529 * @size: filter range size (size==0 means single address trigger)
472530 * @action: filter/start/stop
....@@ -506,7 +564,6 @@
506564 * enum perf_event_state - the states of an event:
507565 */
508566 enum perf_event_state {
509
- PERF_EVENT_STATE_DORMANT = -5,
510567 PERF_EVENT_STATE_DEAD = -4,
511568 PERF_EVENT_STATE_EXIT = -3,
512569 PERF_EVENT_STATE_ERROR = -2,
....@@ -528,9 +585,13 @@
528585 * PERF_EV_CAP_SOFTWARE: Is a software event.
529586 * PERF_EV_CAP_READ_ACTIVE_PKG: A CPU event (or cgroup event) that can be read
530587 * from any CPU in the package where it is active.
588
+ * PERF_EV_CAP_SIBLING: An event with this flag must be a group sibling and
589
+ * cannot be a group leader. If an event with this flag is detached from the
590
+ * group it is scheduled out and moved into an unrecoverable ERROR state.
531591 */
532592 #define PERF_EV_CAP_SOFTWARE BIT(0)
533593 #define PERF_EV_CAP_READ_ACTIVE_PKG BIT(1)
594
+#define PERF_EV_CAP_SIBLING BIT(2)
534595
535596 #define SWEVENT_HLIST_BITS 8
536597 #define SWEVENT_HLIST_SIZE (1 << SWEVENT_HLIST_BITS)
....@@ -545,9 +606,10 @@
545606 #define PERF_ATTACH_TASK 0x04
546607 #define PERF_ATTACH_TASK_DATA 0x08
547608 #define PERF_ATTACH_ITRACE 0x10
609
+#define PERF_ATTACH_SCHED_CB 0x20
548610
549611 struct perf_cgroup;
550
-struct ring_buffer;
612
+struct perf_buffer;
551613
552614 struct pmu_event_list {
553615 raw_spinlock_t lock;
....@@ -619,7 +681,9 @@
619681 /*
620682 * timestamp shadows the actual context timing but it can
621683 * be safely used in NMI interrupt context. It reflects the
622
- * context time as it was when the event was last scheduled in.
684
+ * context time as it was when the event was last scheduled in,
685
+ * or when ctx_sched_in failed to schedule the event because we
686
+ * run out of PMC.
623687 *
624688 * ctx_time already accounts for ctx->timestamp. Therefore to
625689 * compute ctx_time for a sample, simply add perf_clock().
....@@ -651,7 +715,6 @@
651715
652716 int oncpu;
653717 int cpu;
654
- cpumask_t readable_on_cpus;
655718
656719 struct list_head owner_entry;
657720 struct task_struct *owner;
....@@ -660,7 +723,7 @@
660723 struct mutex mmap_mutex;
661724 atomic_t mmap_count;
662725
663
- struct ring_buffer *rb;
726
+ struct perf_buffer *rb;
664727 struct list_head rb_entry;
665728 unsigned long rcu_batches;
666729 int rcu_pending;
....@@ -682,6 +745,9 @@
682745 /* vma address array for file-based filders */
683746 struct perf_addr_filter_range *addr_filter_ranges;
684747 unsigned long addr_filters_gen;
748
+
749
+ /* for aux_output events */
750
+ struct perf_event *aux_event;
685751
686752 void (*destroy)(struct perf_event *);
687753 struct rcu_head rcu_head;
....@@ -713,12 +779,6 @@
713779 void *security;
714780 #endif
715781 struct list_head sb_list;
716
- /* Is this event shared with other events */
717
- bool shared;
718
-
719
- /* TODO: need to cherry-pick 3d3eb5fb85d97. This is just padding for now
720
- * to reduce the ABI diff */
721
- struct list_head dormant_event_entry;
722782 #endif /* CONFIG_PERF_EVENTS */
723783 };
724784
....@@ -761,7 +821,12 @@
761821 int nr_stat;
762822 int nr_freq;
763823 int rotate_disable;
764
- atomic_t refcount;
824
+ /*
825
+ * Set when nr_events != nr_active, except tolerant to events not
826
+ * necessary to be active due to scheduling constraints, such as cgroups.
827
+ */
828
+ int rotate_necessary;
829
+ refcount_t refcount;
765830 struct task_struct *task;
766831
767832 /*
....@@ -814,11 +879,18 @@
814879 int sched_cb_usage;
815880
816881 int online;
882
+ /*
883
+ * Per-CPU storage for iterators used in visit_groups_merge. The default
884
+ * storage is of size 2 to hold the CPU and any CPU event iterators.
885
+ */
886
+ int heap_size;
887
+ struct perf_event **heap;
888
+ struct perf_event *heap_default[2];
817889 };
818890
819891 struct perf_output_handle {
820892 struct perf_event *event;
821
- struct ring_buffer *rb;
893
+ struct perf_buffer *rb;
822894 unsigned long wakeup;
823895 unsigned long size;
824896 u64 aux_flags;
....@@ -901,6 +973,9 @@
901973 extern void perf_sched_cb_inc(struct pmu *pmu);
902974 extern int perf_event_task_disable(void);
903975 extern int perf_event_task_enable(void);
976
+
977
+extern void perf_pmu_resched(struct pmu *pmu);
978
+
904979 extern int perf_event_refresh(struct perf_event *event, int refresh);
905980 extern void perf_event_update_userpage(struct perf_event *event);
906981 extern int perf_event_release_kernel(struct perf_event *event);
....@@ -949,18 +1024,14 @@
9491024 u32 reserved;
9501025 } cpu_entry;
9511026 struct perf_callchain_entry *callchain;
1027
+ u64 aux_size;
9521028
953
- /*
954
- * regs_user may point to task_pt_regs or to regs_user_copy, depending
955
- * on arch details.
956
- */
9571029 struct perf_regs regs_user;
958
- struct pt_regs regs_user_copy;
959
-
9601030 struct perf_regs regs_intr;
9611031 u64 stack_user_size;
9621032
9631033 u64 phys_addr;
1034
+ u64 cgroup;
9641035 } ____cacheline_aligned;
9651036
9661037 /* default value for data source */
....@@ -1002,9 +1073,9 @@
10021073 extern void perf_event_output_backward(struct perf_event *event,
10031074 struct perf_sample_data *data,
10041075 struct pt_regs *regs);
1005
-extern void perf_event_output(struct perf_event *event,
1006
- struct perf_sample_data *data,
1007
- struct pt_regs *regs);
1076
+extern int perf_event_output(struct perf_event *event,
1077
+ struct perf_sample_data *data,
1078
+ struct pt_regs *regs);
10081079
10091080 static inline bool
10101081 is_default_overflow_handler(struct perf_event *event)
....@@ -1027,6 +1098,15 @@
10271098
10281099 extern void
10291100 perf_log_lost_samples(struct perf_event *event, u64 lost);
1101
+
1102
+static inline bool event_has_any_exclude_flag(struct perf_event *event)
1103
+{
1104
+ struct perf_event_attr *attr = &event->attr;
1105
+
1106
+ return attr->exclude_idle || attr->exclude_user ||
1107
+ attr->exclude_kernel || attr->exclude_hv ||
1108
+ attr->exclude_guest || attr->exclude_host;
1109
+}
10301110
10311111 static inline bool is_sampling_event(struct perf_event *event)
10321112 {
....@@ -1064,12 +1144,18 @@
10641144 #endif
10651145
10661146 /*
1067
- * Take a snapshot of the regs. Skip ip and frame pointer to
1068
- * the nth caller. We only need a few of the regs:
1147
+ * When generating a perf sample in-line, instead of from an interrupt /
1148
+ * exception, we lack a pt_regs. This is typically used from software events
1149
+ * like: SW_CONTEXT_SWITCHES, SW_MIGRATIONS and the tie-in with tracepoints.
1150
+ *
1151
+ * We typically don't need a full set, but (for x86) do require:
10691152 * - ip for PERF_SAMPLE_IP
10701153 * - cs for user_mode() tests
1071
- * - bp for callchains
1072
- * - eflags, for future purposes, just in case
1154
+ * - sp for PERF_SAMPLE_CALLCHAIN
1155
+ * - eflags for MISC bits and CALLCHAIN (see: perf_hw_regs())
1156
+ *
1157
+ * NOTE: assumes @regs is otherwise already 0 filled; this is important for
1158
+ * things like PERF_SAMPLE_REGS_INTR.
10731159 */
10741160 static inline void perf_fetch_caller_regs(struct pt_regs *regs)
10751161 {
....@@ -1142,7 +1228,25 @@
11421228 }
11431229
11441230 extern void perf_event_mmap(struct vm_area_struct *vma);
1145
-extern struct perf_guest_info_callbacks *perf_guest_cbs;
1231
+
1232
+extern void perf_event_ksymbol(u16 ksym_type, u64 addr, u32 len,
1233
+ bool unregister, const char *sym);
1234
+extern void perf_event_bpf_event(struct bpf_prog *prog,
1235
+ enum perf_bpf_event_type type,
1236
+ u16 flags);
1237
+
1238
+extern struct perf_guest_info_callbacks __rcu *perf_guest_cbs;
1239
+static inline struct perf_guest_info_callbacks *perf_get_guest_cbs(void)
1240
+{
1241
+ /*
1242
+ * Callbacks are RCU-protected and must be READ_ONCE to avoid reloading
1243
+ * the callbacks between a !NULL check and dereferences, to ensure
1244
+ * pending stores/changes to the callback pointers are visible before a
1245
+ * non-NULL perf_guest_cbs is visible to readers, and to prevent a
1246
+ * module from unloading callbacks while readers are active.
1247
+ */
1248
+ return rcu_dereference(perf_guest_cbs);
1249
+}
11461250 extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
11471251 extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
11481252
....@@ -1150,6 +1254,9 @@
11501254 extern void perf_event_comm(struct task_struct *tsk, bool exec);
11511255 extern void perf_event_namespaces(struct task_struct *tsk);
11521256 extern void perf_event_fork(struct task_struct *tsk);
1257
+extern void perf_event_text_poke(const void *addr,
1258
+ const void *old_bytes, size_t old_len,
1259
+ const void *new_bytes, size_t new_len);
11531260
11541261 /* Callchains */
11551262 DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry);
....@@ -1162,6 +1269,8 @@
11621269 extern struct perf_callchain_entry *perf_callchain(struct perf_event *event, struct pt_regs *regs);
11631270 extern int get_callchain_buffers(int max_stack);
11641271 extern void put_callchain_buffers(void);
1272
+extern struct perf_callchain_entry *get_callchain_entry(int *rctx);
1273
+extern void put_callchain_entry(int rctx);
11651274
11661275 extern int sysctl_perf_event_max_stack;
11671276 extern int sysctl_perf_event_max_contexts_per_stack;
....@@ -1198,15 +1307,12 @@
11981307
11991308 extern void perf_sample_event_took(u64 sample_len_ns);
12001309
1201
-extern int perf_proc_update_handler(struct ctl_table *table, int write,
1202
- void __user *buffer, size_t *lenp,
1203
- loff_t *ppos);
1204
-extern int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
1205
- void __user *buffer, size_t *lenp,
1206
- loff_t *ppos);
1207
-
1310
+int perf_proc_update_handler(struct ctl_table *table, int write,
1311
+ void *buffer, size_t *lenp, loff_t *ppos);
1312
+int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
1313
+ void *buffer, size_t *lenp, loff_t *ppos);
12081314 int perf_event_max_stack_handler(struct ctl_table *table, int write,
1209
- void __user *buffer, size_t *lenp, loff_t *ppos);
1315
+ void *buffer, size_t *lenp, loff_t *ppos);
12101316
12111317 /* Access to perf_event_open(2) syscall. */
12121318 #define PERF_SECURITY_OPEN 0
....@@ -1216,11 +1322,6 @@
12161322 #define PERF_SECURITY_KERNEL 2
12171323 #define PERF_SECURITY_TRACEPOINT 3
12181324
1219
-static inline bool perf_paranoid_any(void)
1220
-{
1221
- return sysctl_perf_event_paranoid > 2;
1222
-}
1223
-
12241325 static inline int perf_is_paranoid(void)
12251326 {
12261327 return sysctl_perf_event_paranoid > -1;
....@@ -1228,7 +1329,7 @@
12281329
12291330 static inline int perf_allow_kernel(struct perf_event_attr *attr)
12301331 {
1231
- if (sysctl_perf_event_paranoid > 1 && !capable(CAP_SYS_ADMIN))
1332
+ if (sysctl_perf_event_paranoid > 1 && !perfmon_capable())
12321333 return -EACCES;
12331334
12341335 return security_perf_event_open(attr, PERF_SECURITY_KERNEL);
....@@ -1236,7 +1337,7 @@
12361337
12371338 static inline int perf_allow_cpu(struct perf_event_attr *attr)
12381339 {
1239
- if (sysctl_perf_event_paranoid > 0 && !capable(CAP_SYS_ADMIN))
1340
+ if (sysctl_perf_event_paranoid > 0 && !perfmon_capable())
12401341 return -EACCES;
12411342
12421343 return security_perf_event_open(attr, PERF_SECURITY_CPU);
....@@ -1244,7 +1345,7 @@
12441345
12451346 static inline int perf_allow_tracepoint(struct perf_event_attr *attr)
12461347 {
1247
- if (sysctl_perf_event_paranoid > -1 && !capable(CAP_SYS_ADMIN))
1348
+ if (sysctl_perf_event_paranoid > -1 && !perfmon_capable())
12481349 return -EPERM;
12491350
12501351 return security_perf_event_open(attr, PERF_SECURITY_TRACEPOINT);
....@@ -1308,11 +1409,14 @@
13081409 extern void perf_event_addr_filters_sync(struct perf_event *event);
13091410
13101411 extern int perf_output_begin(struct perf_output_handle *handle,
1412
+ struct perf_sample_data *data,
13111413 struct perf_event *event, unsigned int size);
13121414 extern int perf_output_begin_forward(struct perf_output_handle *handle,
1313
- struct perf_event *event,
1314
- unsigned int size);
1415
+ struct perf_sample_data *data,
1416
+ struct perf_event *event,
1417
+ unsigned int size);
13151418 extern int perf_output_begin_backward(struct perf_output_handle *handle,
1419
+ struct perf_sample_data *data,
13161420 struct perf_event *event,
13171421 unsigned int size);
13181422
....@@ -1321,6 +1425,9 @@
13211425 const void *buf, unsigned int len);
13221426 extern unsigned int perf_output_skip(struct perf_output_handle *handle,
13231427 unsigned int len);
1428
+extern long perf_output_copy_aux(struct perf_output_handle *aux_handle,
1429
+ struct perf_output_handle *handle,
1430
+ unsigned long from, unsigned long to);
13241431 extern int perf_swevent_get_recursion_context(void);
13251432 extern void perf_swevent_put_recursion_context(int rctx);
13261433 extern u64 perf_swevent_set_period(struct perf_event *event);
....@@ -1330,6 +1437,8 @@
13301437 extern void perf_event_disable_inatomic(struct perf_event *event);
13311438 extern void perf_event_task_tick(void);
13321439 extern int perf_event_account_interrupt(struct perf_event *event);
1440
+extern int perf_event_period(struct perf_event *event, u64 value);
1441
+extern u64 perf_event_pause(struct perf_event *event, bool reset);
13331442 #else /* !CONFIG_PERF_EVENTS: */
13341443 static inline void *
13351444 perf_aux_output_begin(struct perf_output_handle *handle,
....@@ -1389,10 +1498,22 @@
13891498 (struct perf_guest_info_callbacks *callbacks) { return 0; }
13901499
13911500 static inline void perf_event_mmap(struct vm_area_struct *vma) { }
1501
+
1502
+typedef int (perf_ksymbol_get_name_f)(char *name, int name_len, void *data);
1503
+static inline void perf_event_ksymbol(u16 ksym_type, u64 addr, u32 len,
1504
+ bool unregister, const char *sym) { }
1505
+static inline void perf_event_bpf_event(struct bpf_prog *prog,
1506
+ enum perf_bpf_event_type type,
1507
+ u16 flags) { }
13921508 static inline void perf_event_exec(void) { }
13931509 static inline void perf_event_comm(struct task_struct *tsk, bool exec) { }
13941510 static inline void perf_event_namespaces(struct task_struct *tsk) { }
13951511 static inline void perf_event_fork(struct task_struct *tsk) { }
1512
+static inline void perf_event_text_poke(const void *addr,
1513
+ const void *old_bytes,
1514
+ size_t old_len,
1515
+ const void *new_bytes,
1516
+ size_t new_len) { }
13961517 static inline void perf_event_init(void) { }
13971518 static inline int perf_swevent_get_recursion_context(void) { return -1; }
13981519 static inline void perf_swevent_put_recursion_context(int rctx) { }
....@@ -1402,6 +1523,14 @@
14021523 static inline int __perf_event_disable(void *info) { return -1; }
14031524 static inline void perf_event_task_tick(void) { }
14041525 static inline int perf_event_release_kernel(struct perf_event *event) { return 0; }
1526
+static inline int perf_event_period(struct perf_event *event, u64 value)
1527
+{
1528
+ return -EINVAL;
1529
+}
1530
+static inline u64 perf_event_pause(struct perf_event *event, bool reset)
1531
+{
1532
+ return 0;
1533
+}
14051534 #endif
14061535
14071536 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL)
....@@ -1467,4 +1596,8 @@
14671596 #define perf_event_exit_cpu NULL
14681597 #endif
14691598
1599
+extern void __weak arch_perf_update_userpage(struct perf_event *event,
1600
+ struct perf_event_mmap_page *userpg,
1601
+ u64 now);
1602
+
14701603 #endif /* _LINUX_PERF_EVENT_H */