forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-09 b22da3d8526a935aa31e086e63f60ff3246cb61c
kernel/arch/s390/kernel/perf_cpum_sf.c
....@@ -156,8 +156,8 @@
156156 }
157157 }
158158
159
- debug_sprintf_event(sfdbg, 5,
160
- "free_sampling_buffer: freed sdbt=%p\n", sfb->sdbt);
159
+ debug_sprintf_event(sfdbg, 5, "%s: freed sdbt %#lx\n", __func__,
160
+ (unsigned long)sfb->sdbt);
161161 memset(sfb, 0, sizeof(*sfb));
162162 }
163163
....@@ -212,10 +212,11 @@
212212 * the sampling buffer origin.
213213 */
214214 if (sfb->sdbt != get_next_sdbt(tail)) {
215
- debug_sprintf_event(sfdbg, 3, "realloc_sampling_buffer: "
216
- "sampling buffer is not linked: origin=%p"
217
- "tail=%p\n",
218
- (void *) sfb->sdbt, (void *) tail);
215
+ debug_sprintf_event(sfdbg, 3, "%s: "
216
+ "sampling buffer is not linked: origin %#lx"
217
+ " tail %#lx\n", __func__,
218
+ (unsigned long)sfb->sdbt,
219
+ (unsigned long)tail);
219220 return -EINVAL;
220221 }
221222
....@@ -264,8 +265,8 @@
264265 *tail = (unsigned long) sfb->sdbt + 1;
265266 sfb->tail = tail;
266267
267
- debug_sprintf_event(sfdbg, 4, "realloc_sampling_buffer: new buffer"
268
- " settings: sdbt=%lu sdb=%lu\n",
268
+ debug_sprintf_event(sfdbg, 4, "%s: new buffer"
269
+ " settings: sdbt %lu sdb %lu\n", __func__,
269270 sfb->num_sdbt, sfb->num_sdb);
270271 return rc;
271272 }
....@@ -305,12 +306,13 @@
305306 rc = realloc_sampling_buffer(sfb, num_sdb, GFP_KERNEL);
306307 if (rc) {
307308 free_sampling_buffer(sfb);
308
- debug_sprintf_event(sfdbg, 4, "alloc_sampling_buffer: "
309
- "realloc_sampling_buffer failed with rc=%i\n", rc);
309
+ debug_sprintf_event(sfdbg, 4, "%s: "
310
+ "realloc_sampling_buffer failed with rc %i\n",
311
+ __func__, rc);
310312 } else
311313 debug_sprintf_event(sfdbg, 4,
312
- "alloc_sampling_buffer: tear=%p dear=%p\n",
313
- sfb->sdbt, (void *) *sfb->sdbt);
314
+ "%s: tear %#lx dear %#lx\n", __func__,
315
+ (unsigned long)sfb->sdbt, (unsigned long)*sfb->sdbt);
314316 return rc;
315317 }
316318
....@@ -370,28 +372,33 @@
370372
371373 static int allocate_buffers(struct cpu_hw_sf *cpuhw, struct hw_perf_event *hwc)
372374 {
373
- unsigned long n_sdb, freq, factor;
375
+ unsigned long n_sdb, freq;
374376 size_t sample_size;
375377
376378 /* Calculate sampling buffers using 4K pages
377379 *
378
- * 1. Determine the sample data size which depends on the used
379
- * sampling functions, for example, basic-sampling or
380
- * basic-sampling with diagnostic-sampling.
380
+ * 1. The sampling size is 32 bytes for basic sampling. This size
381
+ * is the same for all machine types. Diagnostic
382
+ * sampling uses auxlilary data buffer setup which provides the
383
+ * memory for SDBs using linux common code auxiliary trace
384
+ * setup.
381385 *
382
- * 2. Use the sampling frequency as input. The sampling buffer is
383
- * designed for almost one second. This can be adjusted through
384
- * the "factor" variable.
385
- * In any case, alloc_sampling_buffer() sets the Alert Request
386
+ * 2. Function alloc_sampling_buffer() sets the Alert Request
386387 * Control indicator to trigger a measurement-alert to harvest
387
- * sample-data-blocks (sdb).
388
+ * sample-data-blocks (SDB). This is done per SDB. This
389
+ * measurement alert interrupt fires quick enough to handle
390
+ * one SDB, on very high frequency and work loads there might
391
+ * be 2 to 3 SBDs available for sample processing.
392
+ * Currently there is no need for setup alert request on every
393
+ * n-th page. This is counterproductive as one IRQ triggers
394
+ * a very high number of samples to be processed at one IRQ.
388395 *
389
- * 3. Compute the number of sample-data-blocks and ensure a minimum
390
- * of CPUM_SF_MIN_SDB. Also ensure the upper limit does not
391
- * exceed a "calculated" maximum. The symbolic maximum is
392
- * designed for basic-sampling only and needs to be increased if
393
- * diagnostic-sampling is active.
394
- * See also the remarks for these symbolic constants.
396
+ * 3. Use the sampling frequency as input.
397
+ * Compute the number of SDBs and ensure a minimum
398
+ * of CPUM_SF_MIN_SDB. Depending on frequency add some more
399
+ * SDBs to handle a higher sampling rate.
400
+ * Use a minimum of CPUM_SF_MIN_SDB and allow for 100 samples
401
+ * (one SDB) for every 10000 HZ frequency increment.
395402 *
396403 * 4. Compute the number of sample-data-block-tables (SDBT) and
397404 * ensure a minimum of CPUM_SF_MIN_SDBT (one table can manage up
....@@ -399,10 +406,7 @@
399406 */
400407 sample_size = sizeof(struct hws_basic_entry);
401408 freq = sample_rate_to_freq(&cpuhw->qsi, SAMPL_RATE(hwc));
402
- factor = 1;
403
- n_sdb = DIV_ROUND_UP(freq, factor * ((PAGE_SIZE-64) / sample_size));
404
- if (n_sdb < CPUM_SF_MIN_SDB)
405
- n_sdb = CPUM_SF_MIN_SDB;
409
+ n_sdb = CPUM_SF_MIN_SDB + DIV_ROUND_UP(freq, 10000);
406410
407411 /* If there is already a sampling buffer allocated, it is very likely
408412 * that the sampling facility is enabled too. If the event to be
....@@ -417,8 +421,8 @@
417421 return 0;
418422
419423 debug_sprintf_event(sfdbg, 3,
420
- "allocate_buffers: rate=%lu f=%lu sdb=%lu/%lu"
421
- " sample_size=%lu cpuhw=%p\n",
424
+ "%s: rate %lu f %lu sdb %lu/%lu"
425
+ " sample_size %lu cpuhw %p\n", __func__,
422426 SAMPL_RATE(hwc), freq, n_sdb, sfb_max_limit(hwc),
423427 sample_size, cpuhw);
424428
....@@ -478,8 +482,8 @@
478482 if (num)
479483 sfb_account_allocs(num, hwc);
480484
481
- debug_sprintf_event(sfdbg, 5, "sfb: overflow: overflow=%llu ratio=%lu"
482
- " num=%lu\n", OVERFLOW_REG(hwc), ratio, num);
485
+ debug_sprintf_event(sfdbg, 5, "%s: overflow %llu ratio %lu num %lu\n",
486
+ __func__, OVERFLOW_REG(hwc), ratio, num);
483487 OVERFLOW_REG(hwc) = 0;
484488 }
485489
....@@ -517,16 +521,15 @@
517521 */
518522 rc = realloc_sampling_buffer(sfb, num, GFP_ATOMIC);
519523 if (rc)
520
- debug_sprintf_event(sfdbg, 5, "sfb: extend: realloc "
521
- "failed with rc=%i\n", rc);
524
+ debug_sprintf_event(sfdbg, 5, "%s: realloc failed with rc %i\n",
525
+ __func__, rc);
522526
523527 if (sfb_has_pending_allocs(sfb, hwc))
524
- debug_sprintf_event(sfdbg, 5, "sfb: extend: "
525
- "req=%lu alloc=%lu remaining=%lu\n",
526
- num, sfb->num_sdb - num_old,
528
+ debug_sprintf_event(sfdbg, 5, "%s: "
529
+ "req %lu alloc %lu remaining %lu\n",
530
+ __func__, num, sfb->num_sdb - num_old,
527531 sfb_pending_allocs(sfb, hwc));
528532 }
529
-
530533
531534 /* Number of perf events counting hardware events */
532535 static atomic_t num_events;
....@@ -552,20 +555,22 @@
552555 err = sf_disable();
553556 if (err)
554557 pr_err("Switching off the sampling facility failed "
555
- "with rc=%i\n", err);
558
+ "with rc %i\n", err);
556559 debug_sprintf_event(sfdbg, 5,
557
- "setup_pmc_cpu: initialized: cpuhw=%p\n", cpusf);
560
+ "%s: initialized: cpuhw %p\n", __func__,
561
+ cpusf);
558562 break;
559563 case PMC_RELEASE:
560564 cpusf->flags &= ~PMU_F_RESERVED;
561565 err = sf_disable();
562566 if (err) {
563567 pr_err("Switching off the sampling facility failed "
564
- "with rc=%i\n", err);
568
+ "with rc %i\n", err);
565569 } else
566570 deallocate_buffers(cpusf);
567571 debug_sprintf_event(sfdbg, 5,
568
- "setup_pmc_cpu: released: cpuhw=%p\n", cpusf);
572
+ "%s: released: cpuhw %p\n", __func__,
573
+ cpusf);
569574 break;
570575 }
571576 if (err)
....@@ -610,13 +615,6 @@
610615 hwc->sample_period = period;
611616 hwc->last_period = hwc->sample_period;
612617 local64_set(&hwc->period_left, hwc->sample_period);
613
-}
614
-
615
-static void hw_reset_registers(struct hw_perf_event *hwc,
616
- unsigned long *sdbt_origin)
617
-{
618
- /* (Re)set to first sample-data-block-table */
619
- TEAR_REG(hwc) = (unsigned long) sdbt_origin;
620618 }
621619
622620 static unsigned long hw_limit_rate(const struct hws_qsi_info_block *si,
....@@ -674,7 +672,7 @@
674672 rcu_read_lock();
675673
676674 perf_prepare_sample(&header, data, event, regs);
677
- if (perf_output_begin(&handle, event, header.size))
675
+ if (perf_output_begin(&handle, data, event, header.size))
678676 goto out;
679677
680678 /* Update the process ID (see also kernel/events/core.c) */
....@@ -687,13 +685,88 @@
687685 rcu_read_unlock();
688686 }
689687
688
+static unsigned long getrate(bool freq, unsigned long sample,
689
+ struct hws_qsi_info_block *si)
690
+{
691
+ unsigned long rate;
692
+
693
+ if (freq) {
694
+ rate = freq_to_sample_rate(si, sample);
695
+ rate = hw_limit_rate(si, rate);
696
+ } else {
697
+ /* The min/max sampling rates specifies the valid range
698
+ * of sample periods. If the specified sample period is
699
+ * out of range, limit the period to the range boundary.
700
+ */
701
+ rate = hw_limit_rate(si, sample);
702
+
703
+ /* The perf core maintains a maximum sample rate that is
704
+ * configurable through the sysctl interface. Ensure the
705
+ * sampling rate does not exceed this value. This also helps
706
+ * to avoid throttling when pushing samples with
707
+ * perf_event_overflow().
708
+ */
709
+ if (sample_rate_to_freq(si, rate) >
710
+ sysctl_perf_event_sample_rate) {
711
+ debug_sprintf_event(sfdbg, 1, "%s: "
712
+ "Sampling rate exceeds maximum "
713
+ "perf sample rate\n", __func__);
714
+ rate = 0;
715
+ }
716
+ }
717
+ return rate;
718
+}
719
+
720
+/* The sampling information (si) contains information about the
721
+ * min/max sampling intervals and the CPU speed. So calculate the
722
+ * correct sampling interval and avoid the whole period adjust
723
+ * feedback loop.
724
+ *
725
+ * Since the CPU Measurement sampling facility can not handle frequency
726
+ * calculate the sampling interval when frequency is specified using
727
+ * this formula:
728
+ * interval := cpu_speed * 1000000 / sample_freq
729
+ *
730
+ * Returns errno on bad input and zero on success with parameter interval
731
+ * set to the correct sampling rate.
732
+ *
733
+ * Note: This function turns off freq bit to avoid calling function
734
+ * perf_adjust_period(). This causes frequency adjustment in the common
735
+ * code part which causes tremendous variations in the counter values.
736
+ */
737
+static int __hw_perf_event_init_rate(struct perf_event *event,
738
+ struct hws_qsi_info_block *si)
739
+{
740
+ struct perf_event_attr *attr = &event->attr;
741
+ struct hw_perf_event *hwc = &event->hw;
742
+ unsigned long rate;
743
+
744
+ if (attr->freq) {
745
+ if (!attr->sample_freq)
746
+ return -EINVAL;
747
+ rate = getrate(attr->freq, attr->sample_freq, si);
748
+ attr->freq = 0; /* Don't call perf_adjust_period() */
749
+ SAMPL_FLAGS(hwc) |= PERF_CPUM_SF_FREQ_MODE;
750
+ } else {
751
+ rate = getrate(attr->freq, attr->sample_period, si);
752
+ if (!rate)
753
+ return -EINVAL;
754
+ }
755
+ attr->sample_period = rate;
756
+ SAMPL_RATE(hwc) = rate;
757
+ hw_init_period(hwc, SAMPL_RATE(hwc));
758
+ debug_sprintf_event(sfdbg, 4, "%s: cpu %d period %#llx freq %d,%#lx\n",
759
+ __func__, event->cpu, event->attr.sample_period,
760
+ event->attr.freq, SAMPLE_FREQ_MODE(hwc));
761
+ return 0;
762
+}
763
+
690764 static int __hw_perf_event_init(struct perf_event *event)
691765 {
692766 struct cpu_hw_sf *cpuhw;
693767 struct hws_qsi_info_block si;
694768 struct perf_event_attr *attr = &event->attr;
695769 struct hw_perf_event *hwc = &event->hw;
696
- unsigned long rate;
697770 int cpu, err;
698771
699772 /* Reserve CPU-measurement sampling facility */
....@@ -741,6 +814,12 @@
741814 goto out;
742815 }
743816
817
+ if (si.ribm & CPU_MF_SF_RIBM_NOTAV) {
818
+ pr_warn("CPU Measurement Facility sampling is temporarily not available\n");
819
+ err = -EBUSY;
820
+ goto out;
821
+ }
822
+
744823 /* Always enable basic sampling */
745824 SAMPL_FLAGS(hwc) = PERF_CPUM_SF_BASIC_MODE;
746825
....@@ -759,43 +838,9 @@
759838 if (attr->config1 & PERF_CPUM_SF_FULL_BLOCKS)
760839 SAMPL_FLAGS(hwc) |= PERF_CPUM_SF_FULL_BLOCKS;
761840
762
- /* The sampling information (si) contains information about the
763
- * min/max sampling intervals and the CPU speed. So calculate the
764
- * correct sampling interval and avoid the whole period adjust
765
- * feedback loop.
766
- */
767
- rate = 0;
768
- if (attr->freq) {
769
- if (!attr->sample_freq) {
770
- err = -EINVAL;
771
- goto out;
772
- }
773
- rate = freq_to_sample_rate(&si, attr->sample_freq);
774
- rate = hw_limit_rate(&si, rate);
775
- attr->freq = 0;
776
- attr->sample_period = rate;
777
- } else {
778
- /* The min/max sampling rates specifies the valid range
779
- * of sample periods. If the specified sample period is
780
- * out of range, limit the period to the range boundary.
781
- */
782
- rate = hw_limit_rate(&si, hwc->sample_period);
783
-
784
- /* The perf core maintains a maximum sample rate that is
785
- * configurable through the sysctl interface. Ensure the
786
- * sampling rate does not exceed this value. This also helps
787
- * to avoid throttling when pushing samples with
788
- * perf_event_overflow().
789
- */
790
- if (sample_rate_to_freq(&si, rate) >
791
- sysctl_perf_event_sample_rate) {
792
- err = -EINVAL;
793
- debug_sprintf_event(sfdbg, 1, "Sampling rate exceeds maximum perf sample rate\n");
794
- goto out;
795
- }
796
- }
797
- SAMPL_RATE(hwc) = rate;
798
- hw_init_period(hwc, SAMPL_RATE(hwc));
841
+ err = __hw_perf_event_init_rate(event, &si);
842
+ if (err)
843
+ goto out;
799844
800845 /* Initialize sample data overflow accounting */
801846 hwc->extra_reg.reg = REG_OVERFLOW;
....@@ -836,12 +881,21 @@
836881 return err;
837882 }
838883
884
+static bool is_callchain_event(struct perf_event *event)
885
+{
886
+ u64 sample_type = event->attr.sample_type;
887
+
888
+ return sample_type & (PERF_SAMPLE_CALLCHAIN | PERF_SAMPLE_REGS_USER |
889
+ PERF_SAMPLE_STACK_USER);
890
+}
891
+
839892 static int cpumsf_pmu_event_init(struct perf_event *event)
840893 {
841894 int err;
842895
843896 /* No support for taken branch sampling */
844
- if (has_branch_stack(event))
897
+ /* No support for callchain, stacks and registers */
898
+ if (has_branch_stack(event) || is_callchain_event(event))
845899 return -EOPNOTSUPP;
846900
847901 switch (event->attr.type) {
....@@ -867,7 +921,7 @@
867921
868922 /* Check online status of the CPU to which the event is pinned */
869923 if (event->cpu >= 0 && !cpu_online(event->cpu))
870
- return -ENODEV;
924
+ return -ENODEV;
871925
872926 /* Force reset of idle/hv excludes regardless of what the
873927 * user requested.
....@@ -915,9 +969,10 @@
915969 * buffer extents
916970 */
917971 sfb_account_overflows(cpuhw, hwc);
918
- if (sfb_has_pending_allocs(&cpuhw->sfb, hwc))
919
- extend_sampling_buffer(&cpuhw->sfb, hwc);
972
+ extend_sampling_buffer(&cpuhw->sfb, hwc);
920973 }
974
+ /* Rate may be adjusted with ioctl() */
975
+ cpuhw->lsctl.interval = SAMPL_RATE(&cpuhw->event->hw);
921976 }
922977
923978 /* (Re)enable the PMU and sampling facility */
....@@ -927,7 +982,7 @@
927982 err = lsctl(&cpuhw->lsctl);
928983 if (err) {
929984 cpuhw->flags &= ~PMU_F_ENABLED;
930
- pr_err("Loading sampling controls failed: op=%i err=%i\n",
985
+ pr_err("Loading sampling controls failed: op %i err %i\n",
931986 1, err);
932987 return;
933988 }
....@@ -935,10 +990,11 @@
935990 /* Load current program parameter */
936991 lpp(&S390_lowcore.lpp);
937992
938
- debug_sprintf_event(sfdbg, 6, "pmu_enable: es=%i cs=%i ed=%i cd=%i "
939
- "tear=%p dear=%p\n", cpuhw->lsctl.es, cpuhw->lsctl.cs,
940
- cpuhw->lsctl.ed, cpuhw->lsctl.cd,
941
- (void *) cpuhw->lsctl.tear, (void *) cpuhw->lsctl.dear);
993
+ debug_sprintf_event(sfdbg, 6, "%s: es %i cs %i ed %i cd %i "
994
+ "interval %#lx tear %#lx dear %#lx\n", __func__,
995
+ cpuhw->lsctl.es, cpuhw->lsctl.cs, cpuhw->lsctl.ed,
996
+ cpuhw->lsctl.cd, cpuhw->lsctl.interval,
997
+ cpuhw->lsctl.tear, cpuhw->lsctl.dear);
942998 }
943999
9441000 static void cpumsf_pmu_disable(struct pmu *pmu)
....@@ -961,13 +1017,14 @@
9611017
9621018 err = lsctl(&inactive);
9631019 if (err) {
964
- pr_err("Loading sampling controls failed: op=%i err=%i\n",
1020
+ pr_err("Loading sampling controls failed: op %i err %i\n",
9651021 2, err);
9661022 return;
9671023 }
9681024
9691025 /* Save state of TEAR and DEAR register contents */
970
- if (!qsi(&si)) {
1026
+ err = qsi(&si);
1027
+ if (!err) {
9711028 /* TEAR/DEAR values are valid only if the sampling facility is
9721029 * enabled. Note that cpumsf_pmu_disable() might be called even
9731030 * for a disabled sampling facility because cpumsf_pmu_enable()
....@@ -978,8 +1035,8 @@
9781035 cpuhw->lsctl.dear = si.dear;
9791036 }
9801037 } else
981
- debug_sprintf_event(sfdbg, 3, "cpumsf_pmu_disable: "
982
- "qsi() failed with err=%i\n", err);
1038
+ debug_sprintf_event(sfdbg, 3, "%s: qsi() failed with err %i\n",
1039
+ __func__, err);
9831040
9841041 cpuhw->flags &= ~PMU_F_ENABLED;
9851042 }
....@@ -1092,14 +1149,6 @@
10921149 local64_add(count, &event->count);
10931150 }
10941151
1095
-static void debug_sample_entry(struct hws_basic_entry *sample,
1096
- struct hws_trailer_entry *te)
1097
-{
1098
- debug_sprintf_event(sfdbg, 4, "hw_collect_samples: Found unknown "
1099
- "sampling data entry: te->f=%i basic.def=%04x (%p)\n",
1100
- te->f, sample->def, sample);
1101
-}
1102
-
11031152 /* hw_collect_samples() - Walk through a sample-data-block and collect samples
11041153 * @event: The perf event
11051154 * @sdbt: Sample-data-block table
....@@ -1153,7 +1202,11 @@
11531202 /* Count discarded samples */
11541203 *overflow += 1;
11551204 } else {
1156
- debug_sample_entry(sample, te);
1205
+ debug_sprintf_event(sfdbg, 4,
1206
+ "%s: Found unknown"
1207
+ " sampling data entry: te->f %i"
1208
+ " basic.def %#4x (%p)\n", __func__,
1209
+ te->f, sample->def, sample);
11571210 /* Sample slot is not yet written or other record.
11581211 *
11591212 * This condition can occur if the buffer was reused
....@@ -1228,9 +1281,9 @@
12281281 sampl_overflow += te->overflow;
12291282
12301283 /* Timestamps are valid for full sample-data-blocks only */
1231
- debug_sprintf_event(sfdbg, 6, "hw_perf_event_update: sdbt=%p "
1232
- "overflow=%llu timestamp=0x%llx\n",
1233
- sdbt, te->overflow,
1284
+ debug_sprintf_event(sfdbg, 6, "%s: sdbt %#lx "
1285
+ "overflow %llu timestamp %#llx\n",
1286
+ __func__, (unsigned long)sdbt, te->overflow,
12341287 (te->f) ? trailer_timestamp(te) : 0ULL);
12351288
12361289 /* Collect all samples from a single sample-data-block and
....@@ -1284,9 +1337,11 @@
12841337 }
12851338
12861339 if (sampl_overflow || event_overflow)
1287
- debug_sprintf_event(sfdbg, 4, "hw_perf_event_update: "
1288
- "overflow stats: sample=%llu event=%llu\n",
1289
- sampl_overflow, event_overflow);
1340
+ debug_sprintf_event(sfdbg, 4, "%s: "
1341
+ "overflows: sample %llu event %llu"
1342
+ " total %llu num_sdb %llu\n",
1343
+ __func__, sampl_overflow, event_overflow,
1344
+ OVERFLOW_REG(hwc), num_sdb);
12901345 }
12911346
12921347 #define AUX_SDB_INDEX(aux, i) ((i) % aux->sfb.num_sdb)
....@@ -1339,7 +1394,8 @@
13391394 te = aux_sdb_trailer(aux, aux->alert_mark);
13401395 te->flags &= ~SDB_TE_ALERT_REQ_MASK;
13411396
1342
- debug_sprintf_event(sfdbg, 6, "aux_output_end: collect %lx SDBs\n", i);
1397
+ debug_sprintf_event(sfdbg, 6, "%s: SDBs %ld range %ld head %ld\n",
1398
+ __func__, i, range_scan, aux->head);
13431399 }
13441400
13451401 /*
....@@ -1372,6 +1428,10 @@
13721428 * SDBs between aux->head and aux->empty_mark are already ready
13731429 * for new data. range_scan is num of SDBs not within them.
13741430 */
1431
+ debug_sprintf_event(sfdbg, 6,
1432
+ "%s: range %ld head %ld alert %ld empty %ld\n",
1433
+ __func__, range, aux->head, aux->alert_mark,
1434
+ aux->empty_mark);
13751435 if (range > AUX_SDB_NUM_EMPTY(aux)) {
13761436 range_scan = range - AUX_SDB_NUM_EMPTY(aux);
13771437 idx = aux->empty_mark + 1;
....@@ -1397,15 +1457,11 @@
13971457 cpuhw->lsctl.tear = base + offset * sizeof(unsigned long);
13981458 cpuhw->lsctl.dear = aux->sdb_index[head];
13991459
1400
- debug_sprintf_event(sfdbg, 6, "aux_output_begin: "
1401
- "head->alert_mark->empty_mark (num_alert, range)"
1402
- "[%lx -> %lx -> %lx] (%lx, %lx) "
1403
- "tear index %lx, tear %lx dear %lx\n",
1460
+ debug_sprintf_event(sfdbg, 6, "%s: head %ld alert %ld empty %ld "
1461
+ "index %ld tear %#lx dear %#lx\n", __func__,
14041462 aux->head, aux->alert_mark, aux->empty_mark,
1405
- AUX_SDB_NUM_ALERT(aux), range,
14061463 head / CPUM_SF_SDB_PER_TABLE,
1407
- cpuhw->lsctl.tear,
1408
- cpuhw->lsctl.dear);
1464
+ cpuhw->lsctl.tear, cpuhw->lsctl.dear);
14091465
14101466 return 0;
14111467 }
....@@ -1467,9 +1523,12 @@
14671523 unsigned long long *overflow)
14681524 {
14691525 unsigned long long orig_overflow, orig_flags, new_flags;
1470
- unsigned long i, range_scan, idx;
1526
+ unsigned long i, range_scan, idx, idx_old;
14711527 struct hws_trailer_entry *te;
14721528
1529
+ debug_sprintf_event(sfdbg, 6, "%s: range %ld head %ld alert %ld "
1530
+ "empty %ld\n", __func__, range, aux->head,
1531
+ aux->alert_mark, aux->empty_mark);
14731532 if (range <= AUX_SDB_NUM_EMPTY(aux))
14741533 /*
14751534 * No need to scan. All SDBs in range are marked as empty.
....@@ -1492,7 +1551,7 @@
14921551 * indicator fall into this range, set it.
14931552 */
14941553 range_scan = range - AUX_SDB_NUM_EMPTY(aux);
1495
- idx = aux->empty_mark + 1;
1554
+ idx_old = idx = aux->empty_mark + 1;
14961555 for (i = 0; i < range_scan; i++, idx++) {
14971556 te = aux_sdb_trailer(aux, idx);
14981557 do {
....@@ -1512,6 +1571,9 @@
15121571 /* Update empty_mark to new position */
15131572 aux->empty_mark = aux->head + range - 1;
15141573
1574
+ debug_sprintf_event(sfdbg, 6, "%s: range_scan %ld idx %ld..%ld "
1575
+ "empty %ld\n", __func__, range_scan, idx_old,
1576
+ idx - 1, aux->empty_mark);
15151577 return true;
15161578 }
15171579
....@@ -1533,8 +1595,9 @@
15331595
15341596 /* Inform user space new data arrived */
15351597 size = AUX_SDB_NUM_ALERT(aux) << PAGE_SHIFT;
1598
+ debug_sprintf_event(sfdbg, 6, "%s: #alert %ld\n", __func__,
1599
+ size >> PAGE_SHIFT);
15361600 perf_aux_output_end(handle, size);
1537
- num_sdb = aux->sfb.num_sdb;
15381601
15391602 num_sdb = aux->sfb.num_sdb;
15401603 while (!done) {
....@@ -1544,7 +1607,9 @@
15441607 pr_err("The AUX buffer with %lu pages for the "
15451608 "diagnostic-sampling mode is full\n",
15461609 num_sdb);
1547
- debug_sprintf_event(sfdbg, 1, "AUX buffer used up\n");
1610
+ debug_sprintf_event(sfdbg, 1,
1611
+ "%s: AUX buffer used up\n",
1612
+ __func__);
15481613 break;
15491614 }
15501615 if (WARN_ON_ONCE(!aux))
....@@ -1566,24 +1631,24 @@
15661631 size = range << PAGE_SHIFT;
15671632 perf_aux_output_end(&cpuhw->handle, size);
15681633 pr_err("Sample data caused the AUX buffer with %lu "
1569
- "pages to overflow\n", num_sdb);
1570
- debug_sprintf_event(sfdbg, 1, "head %lx range %lx "
1571
- "overflow %llx\n",
1634
+ "pages to overflow\n", aux->sfb.num_sdb);
1635
+ debug_sprintf_event(sfdbg, 1, "%s: head %ld range %ld "
1636
+ "overflow %lld\n", __func__,
15721637 aux->head, range, overflow);
15731638 } else {
15741639 size = AUX_SDB_NUM_ALERT(aux) << PAGE_SHIFT;
15751640 perf_aux_output_end(&cpuhw->handle, size);
1576
- debug_sprintf_event(sfdbg, 6, "head %lx alert %lx "
1641
+ debug_sprintf_event(sfdbg, 6, "%s: head %ld alert %ld "
15771642 "already full, try another\n",
1643
+ __func__,
15781644 aux->head, aux->alert_mark);
15791645 }
15801646 }
15811647
15821648 if (done)
1583
- debug_sprintf_event(sfdbg, 6, "aux_reset_buffer: "
1584
- "[%lx -> %lx -> %lx] (%lx, %lx)\n",
1585
- aux->head, aux->alert_mark, aux->empty_mark,
1586
- AUX_SDB_NUM_ALERT(aux), range);
1649
+ debug_sprintf_event(sfdbg, 6, "%s: head %ld alert %ld "
1650
+ "empty %ld\n", __func__, aux->head,
1651
+ aux->alert_mark, aux->empty_mark);
15871652 }
15881653
15891654 /*
....@@ -1606,8 +1671,7 @@
16061671 kfree(aux->sdb_index);
16071672 kfree(aux);
16081673
1609
- debug_sprintf_event(sfdbg, 4, "aux_buffer_free: free "
1610
- "%lu SDBTs\n", num_sdbt);
1674
+ debug_sprintf_event(sfdbg, 4, "%s: SDBTs %lu\n", __func__, num_sdbt);
16111675 }
16121676
16131677 static void aux_sdb_init(unsigned long sdb)
....@@ -1665,7 +1729,7 @@
16651729 sfb = &aux->sfb;
16661730
16671731 /* Allocate sdbt_index for fast reference */
1668
- n_sdbt = (nr_pages + CPUM_SF_SDB_PER_TABLE - 1) / CPUM_SF_SDB_PER_TABLE;
1732
+ n_sdbt = DIV_ROUND_UP(nr_pages, CPUM_SF_SDB_PER_TABLE);
16691733 aux->sdbt_index = kmalloc_array(n_sdbt, sizeof(void *), GFP_KERNEL);
16701734 if (!aux->sdbt_index)
16711735 goto no_sdbt_index;
....@@ -1715,8 +1779,7 @@
17151779 */
17161780 aux->empty_mark = sfb->num_sdb - 1;
17171781
1718
- debug_sprintf_event(sfdbg, 4, "aux_buffer_setup: setup %lu SDBTs"
1719
- " and %lu SDBs\n",
1782
+ debug_sprintf_event(sfdbg, 4, "%s: SDBTs %lu SDBs %lu\n", __func__,
17201783 sfb->num_sdbt, sfb->num_sdb);
17211784
17221785 return aux;
....@@ -1737,6 +1800,44 @@
17371800 static void cpumsf_pmu_read(struct perf_event *event)
17381801 {
17391802 /* Nothing to do ... updates are interrupt-driven */
1803
+}
1804
+
1805
+/* Check if the new sampling period/freqeuncy is appropriate.
1806
+ *
1807
+ * Return non-zero on error and zero on passed checks.
1808
+ */
1809
+static int cpumsf_pmu_check_period(struct perf_event *event, u64 value)
1810
+{
1811
+ struct hws_qsi_info_block si;
1812
+ unsigned long rate;
1813
+ bool do_freq;
1814
+
1815
+ memset(&si, 0, sizeof(si));
1816
+ if (event->cpu == -1) {
1817
+ if (qsi(&si))
1818
+ return -ENODEV;
1819
+ } else {
1820
+ /* Event is pinned to a particular CPU, retrieve the per-CPU
1821
+ * sampling structure for accessing the CPU-specific QSI.
1822
+ */
1823
+ struct cpu_hw_sf *cpuhw = &per_cpu(cpu_hw_sf, event->cpu);
1824
+
1825
+ si = cpuhw->qsi;
1826
+ }
1827
+
1828
+ do_freq = !!SAMPLE_FREQ_MODE(&event->hw);
1829
+ rate = getrate(do_freq, value, &si);
1830
+ if (!rate)
1831
+ return -EINVAL;
1832
+
1833
+ event->attr.sample_period = rate;
1834
+ SAMPL_RATE(&event->hw) = rate;
1835
+ hw_init_period(&event->hw, SAMPL_RATE(&event->hw));
1836
+ debug_sprintf_event(sfdbg, 4, "%s:"
1837
+ " cpu %d value %#llx period %#llx freq %d\n",
1838
+ __func__, event->cpu, value,
1839
+ event->attr.sample_period, do_freq);
1840
+ return 0;
17401841 }
17411842
17421843 /* Activate sampling control.
....@@ -1810,7 +1911,7 @@
18101911 if (!SAMPL_DIAG_MODE(&event->hw)) {
18111912 cpuhw->lsctl.tear = (unsigned long) cpuhw->sfb.sdbt;
18121913 cpuhw->lsctl.dear = *(unsigned long *) cpuhw->sfb.sdbt;
1813
- hw_reset_registers(&event->hw, cpuhw->sfb.sdbt);
1914
+ TEAR_REG(&event->hw) = (unsigned long) cpuhw->sfb.sdbt;
18141915 }
18151916
18161917 /* Ensure sampling functions are in the disabled state. If disabled,
....@@ -1865,10 +1966,30 @@
18651966 CPUMF_EVENT_ATTR(SF, SF_CYCLES_BASIC, PERF_EVENT_CPUM_SF);
18661967 CPUMF_EVENT_ATTR(SF, SF_CYCLES_BASIC_DIAG, PERF_EVENT_CPUM_SF_DIAG);
18671968
1868
-static struct attribute *cpumsf_pmu_events_attr[] = {
1869
- CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC),
1870
- NULL,
1871
- NULL,
1969
+/* Attribute list for CPU_SF.
1970
+ *
1971
+ * The availablitiy depends on the CPU_MF sampling facility authorization
1972
+ * for basic + diagnositic samples. This is determined at initialization
1973
+ * time by the sampling facility device driver.
1974
+ * If the authorization for basic samples is turned off, it should be
1975
+ * also turned off for diagnostic sampling.
1976
+ *
1977
+ * During initialization of the device driver, check the authorization
1978
+ * level for diagnostic sampling and installs the attribute
1979
+ * file for diagnostic sampling if necessary.
1980
+ *
1981
+ * For now install a placeholder to reference all possible attributes:
1982
+ * SF_CYCLES_BASIC and SF_CYCLES_BASIC_DIAG.
1983
+ * Add another entry for the final NULL pointer.
1984
+ */
1985
+enum {
1986
+ SF_CYCLES_BASIC_ATTR_IDX = 0,
1987
+ SF_CYCLES_BASIC_DIAG_ATTR_IDX,
1988
+ SF_CYCLES_ATTR_MAX
1989
+};
1990
+
1991
+static struct attribute *cpumsf_pmu_events_attr[SF_CYCLES_ATTR_MAX + 1] = {
1992
+ [SF_CYCLES_BASIC_ATTR_IDX] = CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC)
18721993 };
18731994
18741995 PMU_FORMAT_ATTR(event, "config:0-63");
....@@ -1882,10 +2003,12 @@
18822003 .name = "events",
18832004 .attrs = cpumsf_pmu_events_attr,
18842005 };
2006
+
18852007 static struct attribute_group cpumsf_pmu_format_group = {
18862008 .name = "format",
18872009 .attrs = cpumsf_pmu_format_attr,
18882010 };
2011
+
18892012 static const struct attribute_group *cpumsf_pmu_attr_groups[] = {
18902013 &cpumsf_pmu_events_group,
18912014 &cpumsf_pmu_format_group,
....@@ -1908,6 +2031,8 @@
19082031
19092032 .setup_aux = aux_buffer_setup,
19102033 .free_aux = aux_buffer_free,
2034
+
2035
+ .check_period = cpumsf_pmu_check_period,
19112036 };
19122037
19132038 static void cpumf_measurement_alert(struct ext_code ext_code,
....@@ -1941,7 +2066,8 @@
19412066
19422067 /* Report measurement alerts only for non-PRA codes */
19432068 if (alert != CPU_MF_INT_SF_PRA)
1944
- debug_sprintf_event(sfdbg, 6, "measurement alert: 0x%x\n", alert);
2069
+ debug_sprintf_event(sfdbg, 6, "%s: alert %#x\n", __func__,
2070
+ alert);
19452071
19462072 /* Sampling authorization change request */
19472073 if (alert & CPU_MF_INT_SF_SACA)
....@@ -1962,6 +2088,7 @@
19622088 sf_disable();
19632089 }
19642090 }
2091
+
19652092 static int cpusf_pmu_setup(unsigned int cpu, int flags)
19662093 {
19672094 /* Ignore the notification if no events are scheduled on the PMU.
....@@ -2018,7 +2145,7 @@
20182145
20192146 sfb_set_limits(min, max);
20202147 pr_info("The sampling buffer limits have changed to: "
2021
- "min=%lu max=%lu (diag=x%lu)\n",
2148
+ "min %lu max %lu (diag %lu)\n",
20222149 CPUM_SF_MIN_SDB, CPUM_SF_MAX_SDB, CPUM_SF_SDB_DIAG_FACTOR);
20232150 return 0;
20242151 }
....@@ -2036,7 +2163,7 @@
20362163 static void __init pr_cpumsf_err(unsigned int reason)
20372164 {
20382165 pr_err("Sampling facility support for perf is not available: "
2039
- "reason=%04x\n", reason);
2166
+ "reason %#x\n", reason);
20402167 }
20412168
20422169 static int __init init_cpum_sampling_pmu(void)
....@@ -2063,7 +2190,10 @@
20632190
20642191 if (si.ad) {
20652192 sfb_set_limits(CPUM_SF_MIN_SDB, CPUM_SF_MAX_SDB);
2066
- cpumsf_pmu_events_attr[1] =
2193
+ /* Sampling of diagnostic data authorized,
2194
+ * install event into attribute list of PMU device.
2195
+ */
2196
+ cpumsf_pmu_events_attr[SF_CYCLES_BASIC_DIAG_ATTR_IDX] =
20672197 CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC_DIAG);
20682198 }
20692199
....@@ -2096,5 +2226,6 @@
20962226 out:
20972227 return err;
20982228 }
2229
+
20992230 arch_initcall(init_cpum_sampling_pmu);
21002231 core_param(cpum_sfb_size, CPUM_SF_MAX_SDB, sfb_size, 0644);