hc
2024-01-31 f70575805708cabdedea7498aaa3f710fde4d920
kernel/kernel/events/core.c
....@@ -1192,6 +1192,11 @@
11921192 return 0;
11931193 }
11941194
1195
+static int perf_mux_hrtimer_restart_ipi(void *arg)
1196
+{
1197
+ return perf_mux_hrtimer_restart(arg);
1198
+}
1199
+
11951200 void perf_pmu_disable(struct pmu *pmu)
11961201 {
11971202 int *count = this_cpu_ptr(pmu->pmu_disable_count);
....@@ -8665,7 +8670,7 @@
86658670
86668671 perf_event_header__init_id(&bpf_event->event_id.header,
86678672 &sample, event);
8668
- ret = perf_output_begin(&handle, data, event,
8673
+ ret = perf_output_begin(&handle, &sample, event,
86698674 bpf_event->event_id.header.size);
86708675 if (ret)
86718676 return;
....@@ -8880,8 +8885,8 @@
88808885 hwc->interrupts = 1;
88818886 } else {
88828887 hwc->interrupts++;
8883
- if (unlikely(throttle
8884
- && hwc->interrupts >= max_samples_per_tick)) {
8888
+ if (unlikely(throttle &&
8889
+ hwc->interrupts > max_samples_per_tick)) {
88858890 __this_cpu_inc(perf_throttled_count);
88868891 tick_dep_set_cpu(smp_processor_id(), TICK_DEP_BIT_PERF_EVENTS);
88878892 hwc->interrupts = MAX_INTERRUPTS;
....@@ -10727,8 +10732,7 @@
1072710732 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
1072810733 cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * timer);
1072910734
10730
- cpu_function_call(cpu,
10731
- (remote_function_f)perf_mux_hrtimer_restart, cpuctx);
10735
+ cpu_function_call(cpu, perf_mux_hrtimer_restart_ipi, cpuctx);
1073210736 }
1073310737 cpus_read_unlock();
1073410738 mutex_unlock(&mux_interval_mutex);
....@@ -10765,13 +10769,15 @@
1076510769
1076610770 pmu->dev->groups = pmu->attr_groups;
1076710771 device_initialize(pmu->dev);
10768
- ret = dev_set_name(pmu->dev, "%s", pmu->name);
10769
- if (ret)
10770
- goto free_dev;
1077110772
1077210773 dev_set_drvdata(pmu->dev, pmu);
1077310774 pmu->dev->bus = &pmu_bus;
1077410775 pmu->dev->release = pmu_dev_release;
10776
+
10777
+ ret = dev_set_name(pmu->dev, "%s", pmu->name);
10778
+ if (ret)
10779
+ goto free_dev;
10780
+
1077510781 ret = device_add(pmu->dev);
1077610782 if (ret)
1077710783 goto free_dev;
....@@ -11575,7 +11581,7 @@
1157511581 /*
1157611582 * If its not a per-cpu rb, it must be the same task.
1157711583 */
11578
- if (output_event->cpu == -1 && output_event->ctx != event->ctx)
11584
+ if (output_event->cpu == -1 && output_event->hw.target != event->hw.target)
1157911585 goto out;
1158011586
1158111587 /*
....@@ -11734,12 +11740,12 @@
1173411740 if (flags & ~PERF_FLAG_ALL)
1173511741 return -EINVAL;
1173611742
11737
- /* Do we allow access to perf_event_open(2) ? */
11738
- err = security_perf_event_open(&attr, PERF_SECURITY_OPEN);
11743
+ err = perf_copy_attr(attr_uptr, &attr);
1173911744 if (err)
1174011745 return err;
1174111746
11742
- err = perf_copy_attr(attr_uptr, &attr);
11747
+ /* Do we allow access to perf_event_open(2) ? */
11748
+ err = security_perf_event_open(&attr, PERF_SECURITY_OPEN);
1174311749 if (err)
1174411750 return err;
1174511751