.. | .. |
---|
1192 | 1192 | return 0; |
---|
1193 | 1193 | } |
---|
1194 | 1194 | |
---|
| 1195 | +static int perf_mux_hrtimer_restart_ipi(void *arg) |
---|
| 1196 | +{ |
---|
| 1197 | + return perf_mux_hrtimer_restart(arg); |
---|
| 1198 | +} |
---|
| 1199 | + |
---|
1195 | 1200 | void perf_pmu_disable(struct pmu *pmu) |
---|
1196 | 1201 | { |
---|
1197 | 1202 | int *count = this_cpu_ptr(pmu->pmu_disable_count); |
---|
.. | .. |
---|
8665 | 8670 | |
---|
8666 | 8671 | perf_event_header__init_id(&bpf_event->event_id.header, |
---|
8667 | 8672 | &sample, event); |
---|
8668 | | - ret = perf_output_begin(&handle, data, event, |
---|
| 8673 | + ret = perf_output_begin(&handle, &sample, event, |
---|
8669 | 8674 | bpf_event->event_id.header.size); |
---|
8670 | 8675 | if (ret) |
---|
8671 | 8676 | return; |
---|
.. | .. |
---|
8880 | 8885 | hwc->interrupts = 1; |
---|
8881 | 8886 | } else { |
---|
8882 | 8887 | hwc->interrupts++; |
---|
8883 | | - if (unlikely(throttle |
---|
8884 | | - && hwc->interrupts >= max_samples_per_tick)) { |
---|
| 8888 | + if (unlikely(throttle && |
---|
| 8889 | + hwc->interrupts > max_samples_per_tick)) { |
---|
8885 | 8890 | __this_cpu_inc(perf_throttled_count); |
---|
8886 | 8891 | tick_dep_set_cpu(smp_processor_id(), TICK_DEP_BIT_PERF_EVENTS); |
---|
8887 | 8892 | hwc->interrupts = MAX_INTERRUPTS; |
---|
.. | .. |
---|
10727 | 10732 | cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); |
---|
10728 | 10733 | cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * timer); |
---|
10729 | 10734 | |
---|
10730 | | - cpu_function_call(cpu, |
---|
10731 | | - (remote_function_f)perf_mux_hrtimer_restart, cpuctx); |
---|
| 10735 | + cpu_function_call(cpu, perf_mux_hrtimer_restart_ipi, cpuctx); |
---|
10732 | 10736 | } |
---|
10733 | 10737 | cpus_read_unlock(); |
---|
10734 | 10738 | mutex_unlock(&mux_interval_mutex); |
---|
.. | .. |
---|
10765 | 10769 | |
---|
10766 | 10770 | pmu->dev->groups = pmu->attr_groups; |
---|
10767 | 10771 | device_initialize(pmu->dev); |
---|
10768 | | - ret = dev_set_name(pmu->dev, "%s", pmu->name); |
---|
10769 | | - if (ret) |
---|
10770 | | - goto free_dev; |
---|
10771 | 10772 | |
---|
10772 | 10773 | dev_set_drvdata(pmu->dev, pmu); |
---|
10773 | 10774 | pmu->dev->bus = &pmu_bus; |
---|
10774 | 10775 | pmu->dev->release = pmu_dev_release; |
---|
| 10776 | + |
---|
| 10777 | + ret = dev_set_name(pmu->dev, "%s", pmu->name); |
---|
| 10778 | + if (ret) |
---|
| 10779 | + goto free_dev; |
---|
| 10780 | + |
---|
10775 | 10781 | ret = device_add(pmu->dev); |
---|
10776 | 10782 | if (ret) |
---|
10777 | 10783 | goto free_dev; |
---|
.. | .. |
---|
11575 | 11581 | /* |
---|
11576 | 11582 | * If its not a per-cpu rb, it must be the same task. |
---|
11577 | 11583 | */ |
---|
11578 | | - if (output_event->cpu == -1 && output_event->ctx != event->ctx) |
---|
| 11584 | + if (output_event->cpu == -1 && output_event->hw.target != event->hw.target) |
---|
11579 | 11585 | goto out; |
---|
11580 | 11586 | |
---|
11581 | 11587 | /* |
---|
.. | .. |
---|
11734 | 11740 | if (flags & ~PERF_FLAG_ALL) |
---|
11735 | 11741 | return -EINVAL; |
---|
11736 | 11742 | |
---|
11737 | | - /* Do we allow access to perf_event_open(2) ? */ |
---|
11738 | | - err = security_perf_event_open(&attr, PERF_SECURITY_OPEN); |
---|
| 11743 | + err = perf_copy_attr(attr_uptr, &attr); |
---|
11739 | 11744 | if (err) |
---|
11740 | 11745 | return err; |
---|
11741 | 11746 | |
---|
11742 | | - err = perf_copy_attr(attr_uptr, &attr); |
---|
| 11747 | + /* Do we allow access to perf_event_open(2) ? */ |
---|
| 11748 | + err = security_perf_event_open(&attr, PERF_SECURITY_OPEN); |
---|
11743 | 11749 | if (err) |
---|
11744 | 11750 | return err; |
---|
11745 | 11751 | |
---|