hc
2024-05-11 04dd17822334871b23ea2862f7798fb0e0007777
kernel/arch/powerpc/perf/imc-pmu.c
....@@ -13,6 +13,7 @@
1313 #include <asm/cputhreads.h>
1414 #include <asm/smp.h>
1515 #include <linux/string.h>
16
+#include <linux/spinlock.h>
1617
1718 /* Nest IMC data structures and variables */
1819
....@@ -49,7 +50,7 @@
4950 * core and trace-imc
5051 */
5152 static struct imc_pmu_ref imc_global_refc = {
52
- .lock = __MUTEX_INITIALIZER(imc_global_refc.lock),
53
+ .lock = __SPIN_LOCK_INITIALIZER(imc_global_refc.lock),
5354 .id = 0,
5455 .refc = 0,
5556 };
....@@ -393,7 +394,7 @@
393394 get_hard_smp_processor_id(cpu));
394395 /*
395396 * If this is the last cpu in this chip then, skip the reference
396
- * count mutex lock and make the reference count on this chip zero.
397
+ * count lock and make the reference count on this chip zero.
397398 */
398399 ref = get_nest_pmu_ref(cpu);
399400 if (!ref)
....@@ -455,15 +456,15 @@
455456 /*
456457 * See if we need to disable the nest PMU.
457458 * If no events are currently in use, then we have to take a
458
- * mutex to ensure that we don't race with another task doing
459
+ * lock to ensure that we don't race with another task doing
459460 * enable or disable the nest counters.
460461 */
461462 ref = get_nest_pmu_ref(event->cpu);
462463 if (!ref)
463464 return;
464465
465
- /* Take the mutex lock for this node and then decrement the reference count */
466
- mutex_lock(&ref->lock);
466
+ /* Take the lock for this node and then decrement the reference count */
467
+ spin_lock(&ref->lock);
467468 if (ref->refc == 0) {
468469 /*
469470 * The scenario where this is true is, when perf session is
....@@ -475,7 +476,7 @@
475476 * an OPAL call to disable the engine in that node.
476477 *
477478 */
478
- mutex_unlock(&ref->lock);
479
+ spin_unlock(&ref->lock);
479480 return;
480481 }
481482 ref->refc--;
....@@ -483,7 +484,7 @@
483484 rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_NEST,
484485 get_hard_smp_processor_id(event->cpu));
485486 if (rc) {
486
- mutex_unlock(&ref->lock);
487
+ spin_unlock(&ref->lock);
487488 pr_err("nest-imc: Unable to stop the counters for core %d\n", node_id);
488489 return;
489490 }
....@@ -491,7 +492,7 @@
491492 WARN(1, "nest-imc: Invalid event reference count\n");
492493 ref->refc = 0;
493494 }
494
- mutex_unlock(&ref->lock);
495
+ spin_unlock(&ref->lock);
495496 }
496497
497498 static int nest_imc_event_init(struct perf_event *event)
....@@ -550,26 +551,25 @@
550551
551552 /*
552553 * Get the imc_pmu_ref struct for this node.
553
- * Take the mutex lock and then increment the count of nest pmu events
554
- * inited.
554
+ * Take the lock and then increment the count of nest pmu events inited.
555555 */
556556 ref = get_nest_pmu_ref(event->cpu);
557557 if (!ref)
558558 return -EINVAL;
559559
560
- mutex_lock(&ref->lock);
560
+ spin_lock(&ref->lock);
561561 if (ref->refc == 0) {
562562 rc = opal_imc_counters_start(OPAL_IMC_COUNTERS_NEST,
563563 get_hard_smp_processor_id(event->cpu));
564564 if (rc) {
565
- mutex_unlock(&ref->lock);
565
+ spin_unlock(&ref->lock);
566566 pr_err("nest-imc: Unable to start the counters for node %d\n",
567567 node_id);
568568 return rc;
569569 }
570570 }
571571 ++ref->refc;
572
- mutex_unlock(&ref->lock);
572
+ spin_unlock(&ref->lock);
573573
574574 event->destroy = nest_imc_counters_release;
575575 return 0;
....@@ -605,9 +605,8 @@
605605 return -ENOMEM;
606606 mem_info->vbase = page_address(page);
607607
608
- /* Init the mutex */
609608 core_imc_refc[core_id].id = core_id;
610
- mutex_init(&core_imc_refc[core_id].lock);
609
+ spin_lock_init(&core_imc_refc[core_id].lock);
611610
612611 rc = opal_imc_counters_init(OPAL_IMC_COUNTERS_CORE,
613612 __pa((void *)mem_info->vbase),
....@@ -696,9 +695,8 @@
696695 perf_pmu_migrate_context(&core_imc_pmu->pmu, cpu, ncpu);
697696 } else {
698697 /*
699
- * If this is the last cpu in this core then, skip taking refernce
700
- * count mutex lock for this core and directly zero "refc" for
701
- * this core.
698
+ * If this is the last cpu in this core then skip taking reference
699
+ * count lock for this core and directly zero "refc" for this core.
702700 */
703701 opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE,
704702 get_hard_smp_processor_id(cpu));
....@@ -713,11 +711,11 @@
713711 * last cpu in this core and core-imc event running
714712 * in this cpu.
715713 */
716
- mutex_lock(&imc_global_refc.lock);
714
+ spin_lock(&imc_global_refc.lock);
717715 if (imc_global_refc.id == IMC_DOMAIN_CORE)
718716 imc_global_refc.refc--;
719717
720
- mutex_unlock(&imc_global_refc.lock);
718
+ spin_unlock(&imc_global_refc.lock);
721719 }
722720 return 0;
723721 }
....@@ -732,7 +730,7 @@
732730
733731 static void reset_global_refc(struct perf_event *event)
734732 {
735
- mutex_lock(&imc_global_refc.lock);
733
+ spin_lock(&imc_global_refc.lock);
736734 imc_global_refc.refc--;
737735
738736 /*
....@@ -744,7 +742,7 @@
744742 imc_global_refc.refc = 0;
745743 imc_global_refc.id = 0;
746744 }
747
- mutex_unlock(&imc_global_refc.lock);
745
+ spin_unlock(&imc_global_refc.lock);
748746 }
749747
750748 static void core_imc_counters_release(struct perf_event *event)
....@@ -757,17 +755,17 @@
757755 /*
758756 * See if we need to disable the IMC PMU.
759757 * If no events are currently in use, then we have to take a
760
- * mutex to ensure that we don't race with another task doing
758
+ * lock to ensure that we don't race with another task doing
761759 * enable or disable the core counters.
762760 */
763761 core_id = event->cpu / threads_per_core;
764762
765
- /* Take the mutex lock and decrement the refernce count for this core */
763
+ /* Take the lock and decrement the refernce count for this core */
766764 ref = &core_imc_refc[core_id];
767765 if (!ref)
768766 return;
769767
770
- mutex_lock(&ref->lock);
768
+ spin_lock(&ref->lock);
771769 if (ref->refc == 0) {
772770 /*
773771 * The scenario where this is true is, when perf session is
....@@ -779,7 +777,7 @@
779777 * an OPAL call to disable the engine in that core.
780778 *
781779 */
782
- mutex_unlock(&ref->lock);
780
+ spin_unlock(&ref->lock);
783781 return;
784782 }
785783 ref->refc--;
....@@ -787,7 +785,7 @@
787785 rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE,
788786 get_hard_smp_processor_id(event->cpu));
789787 if (rc) {
790
- mutex_unlock(&ref->lock);
788
+ spin_unlock(&ref->lock);
791789 pr_err("IMC: Unable to stop the counters for core %d\n", core_id);
792790 return;
793791 }
....@@ -795,7 +793,7 @@
795793 WARN(1, "core-imc: Invalid event reference count\n");
796794 ref->refc = 0;
797795 }
798
- mutex_unlock(&ref->lock);
796
+ spin_unlock(&ref->lock);
799797
800798 reset_global_refc(event);
801799 }
....@@ -833,7 +831,6 @@
833831 if ((!pcmi->vbase))
834832 return -ENODEV;
835833
836
- /* Get the core_imc mutex for this core */
837834 ref = &core_imc_refc[core_id];
838835 if (!ref)
839836 return -EINVAL;
....@@ -841,22 +838,22 @@
841838 /*
842839 * Core pmu units are enabled only when it is used.
843840 * See if this is triggered for the first time.
844
- * If yes, take the mutex lock and enable the core counters.
841
+ * If yes, take the lock and enable the core counters.
845842 * If not, just increment the count in core_imc_refc struct.
846843 */
847
- mutex_lock(&ref->lock);
844
+ spin_lock(&ref->lock);
848845 if (ref->refc == 0) {
849846 rc = opal_imc_counters_start(OPAL_IMC_COUNTERS_CORE,
850847 get_hard_smp_processor_id(event->cpu));
851848 if (rc) {
852
- mutex_unlock(&ref->lock);
849
+ spin_unlock(&ref->lock);
853850 pr_err("core-imc: Unable to start the counters for core %d\n",
854851 core_id);
855852 return rc;
856853 }
857854 }
858855 ++ref->refc;
859
- mutex_unlock(&ref->lock);
856
+ spin_unlock(&ref->lock);
860857
861858 /*
862859 * Since the system can run either in accumulation or trace-mode
....@@ -867,7 +864,7 @@
867864 * to know whether any other trace/thread imc
868865 * events are running.
869866 */
870
- mutex_lock(&imc_global_refc.lock);
867
+ spin_lock(&imc_global_refc.lock);
871868 if (imc_global_refc.id == 0 || imc_global_refc.id == IMC_DOMAIN_CORE) {
872869 /*
873870 * No other trace/thread imc events are running in
....@@ -876,10 +873,10 @@
876873 imc_global_refc.id = IMC_DOMAIN_CORE;
877874 imc_global_refc.refc++;
878875 } else {
879
- mutex_unlock(&imc_global_refc.lock);
876
+ spin_unlock(&imc_global_refc.lock);
880877 return -EBUSY;
881878 }
882
- mutex_unlock(&imc_global_refc.lock);
879
+ spin_unlock(&imc_global_refc.lock);
883880
884881 event->hw.event_base = (u64)pcmi->vbase + (config & IMC_EVENT_OFFSET_MASK);
885882 event->destroy = core_imc_counters_release;
....@@ -951,10 +948,10 @@
951948 mtspr(SPRN_LDBAR, (mfspr(SPRN_LDBAR) & (~(1UL << 63))));
952949
953950 /* Reduce the refc if thread-imc event running on this cpu */
954
- mutex_lock(&imc_global_refc.lock);
951
+ spin_lock(&imc_global_refc.lock);
955952 if (imc_global_refc.id == IMC_DOMAIN_THREAD)
956953 imc_global_refc.refc--;
957
- mutex_unlock(&imc_global_refc.lock);
954
+ spin_unlock(&imc_global_refc.lock);
958955
959956 return 0;
960957 }
....@@ -994,7 +991,7 @@
994991 if (!target)
995992 return -EINVAL;
996993
997
- mutex_lock(&imc_global_refc.lock);
994
+ spin_lock(&imc_global_refc.lock);
998995 /*
999996 * Check if any other trace/core imc events are running in the
1000997 * system, if not set the global id to thread-imc.
....@@ -1003,10 +1000,10 @@
10031000 imc_global_refc.id = IMC_DOMAIN_THREAD;
10041001 imc_global_refc.refc++;
10051002 } else {
1006
- mutex_unlock(&imc_global_refc.lock);
1003
+ spin_unlock(&imc_global_refc.lock);
10071004 return -EBUSY;
10081005 }
1009
- mutex_unlock(&imc_global_refc.lock);
1006
+ spin_unlock(&imc_global_refc.lock);
10101007
10111008 event->pmu->task_ctx_nr = perf_sw_context;
10121009 event->destroy = reset_global_refc;
....@@ -1128,25 +1125,25 @@
11281125 /*
11291126 * imc pmus are enabled only when it is used.
11301127 * See if this is triggered for the first time.
1131
- * If yes, take the mutex lock and enable the counters.
1128
+ * If yes, take the lock and enable the counters.
11321129 * If not, just increment the count in ref count struct.
11331130 */
11341131 ref = &core_imc_refc[core_id];
11351132 if (!ref)
11361133 return -EINVAL;
11371134
1138
- mutex_lock(&ref->lock);
1135
+ spin_lock(&ref->lock);
11391136 if (ref->refc == 0) {
11401137 if (opal_imc_counters_start(OPAL_IMC_COUNTERS_CORE,
11411138 get_hard_smp_processor_id(smp_processor_id()))) {
1142
- mutex_unlock(&ref->lock);
1139
+ spin_unlock(&ref->lock);
11431140 pr_err("thread-imc: Unable to start the counter\
11441141 for core %d\n", core_id);
11451142 return -EINVAL;
11461143 }
11471144 }
11481145 ++ref->refc;
1149
- mutex_unlock(&ref->lock);
1146
+ spin_unlock(&ref->lock);
11501147 return 0;
11511148 }
11521149
....@@ -1163,12 +1160,12 @@
11631160 return;
11641161 }
11651162
1166
- mutex_lock(&ref->lock);
1163
+ spin_lock(&ref->lock);
11671164 ref->refc--;
11681165 if (ref->refc == 0) {
11691166 if (opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE,
11701167 get_hard_smp_processor_id(smp_processor_id()))) {
1171
- mutex_unlock(&ref->lock);
1168
+ spin_unlock(&ref->lock);
11721169 pr_err("thread-imc: Unable to stop the counters\
11731170 for core %d\n", core_id);
11741171 return;
....@@ -1176,7 +1173,7 @@
11761173 } else if (ref->refc < 0) {
11771174 ref->refc = 0;
11781175 }
1179
- mutex_unlock(&ref->lock);
1176
+ spin_unlock(&ref->lock);
11801177
11811178 /* Set bit 0 of LDBAR to zero, to stop posting updates to memory */
11821179 mtspr(SPRN_LDBAR, (mfspr(SPRN_LDBAR) & (~(1UL << 63))));
....@@ -1217,9 +1214,8 @@
12171214 }
12181215 }
12191216
1220
- /* Init the mutex, if not already */
12211217 trace_imc_refc[core_id].id = core_id;
1222
- mutex_init(&trace_imc_refc[core_id].lock);
1218
+ spin_lock_init(&trace_imc_refc[core_id].lock);
12231219
12241220 mtspr(SPRN_LDBAR, 0);
12251221 return 0;
....@@ -1239,10 +1235,10 @@
12391235 * Reduce the refc if any trace-imc event running
12401236 * on this cpu.
12411237 */
1242
- mutex_lock(&imc_global_refc.lock);
1238
+ spin_lock(&imc_global_refc.lock);
12431239 if (imc_global_refc.id == IMC_DOMAIN_TRACE)
12441240 imc_global_refc.refc--;
1245
- mutex_unlock(&imc_global_refc.lock);
1241
+ spin_unlock(&imc_global_refc.lock);
12461242
12471243 return 0;
12481244 }
....@@ -1364,17 +1360,17 @@
13641360 }
13651361
13661362 mtspr(SPRN_LDBAR, ldbar_value);
1367
- mutex_lock(&ref->lock);
1363
+ spin_lock(&ref->lock);
13681364 if (ref->refc == 0) {
13691365 if (opal_imc_counters_start(OPAL_IMC_COUNTERS_TRACE,
13701366 get_hard_smp_processor_id(smp_processor_id()))) {
1371
- mutex_unlock(&ref->lock);
1367
+ spin_unlock(&ref->lock);
13721368 pr_err("trace-imc: Unable to start the counters for core %d\n", core_id);
13731369 return -EINVAL;
13741370 }
13751371 }
13761372 ++ref->refc;
1377
- mutex_unlock(&ref->lock);
1373
+ spin_unlock(&ref->lock);
13781374 return 0;
13791375 }
13801376
....@@ -1407,19 +1403,19 @@
14071403 return;
14081404 }
14091405
1410
- mutex_lock(&ref->lock);
1406
+ spin_lock(&ref->lock);
14111407 ref->refc--;
14121408 if (ref->refc == 0) {
14131409 if (opal_imc_counters_stop(OPAL_IMC_COUNTERS_TRACE,
14141410 get_hard_smp_processor_id(smp_processor_id()))) {
1415
- mutex_unlock(&ref->lock);
1411
+ spin_unlock(&ref->lock);
14161412 pr_err("trace-imc: Unable to stop the counters for core %d\n", core_id);
14171413 return;
14181414 }
14191415 } else if (ref->refc < 0) {
14201416 ref->refc = 0;
14211417 }
1422
- mutex_unlock(&ref->lock);
1418
+ spin_unlock(&ref->lock);
14231419
14241420 trace_imc_event_stop(event, flags);
14251421 }
....@@ -1441,7 +1437,7 @@
14411437 * no other thread is running any core/thread imc
14421438 * events
14431439 */
1444
- mutex_lock(&imc_global_refc.lock);
1440
+ spin_lock(&imc_global_refc.lock);
14451441 if (imc_global_refc.id == 0 || imc_global_refc.id == IMC_DOMAIN_TRACE) {
14461442 /*
14471443 * No core/thread imc events are running in the
....@@ -1450,10 +1446,10 @@
14501446 imc_global_refc.id = IMC_DOMAIN_TRACE;
14511447 imc_global_refc.refc++;
14521448 } else {
1453
- mutex_unlock(&imc_global_refc.lock);
1449
+ spin_unlock(&imc_global_refc.lock);
14541450 return -EBUSY;
14551451 }
1456
- mutex_unlock(&imc_global_refc.lock);
1452
+ spin_unlock(&imc_global_refc.lock);
14571453
14581454 event->hw.idx = -1;
14591455
....@@ -1525,10 +1521,10 @@
15251521 i = 0;
15261522 for_each_node(nid) {
15271523 /*
1528
- * Mutex lock to avoid races while tracking the number of
1524
+ * Take the lock to avoid races while tracking the number of
15291525 * sessions using the chip's nest pmu units.
15301526 */
1531
- mutex_init(&nest_imc_refc[i].lock);
1527
+ spin_lock_init(&nest_imc_refc[i].lock);
15321528
15331529 /*
15341530 * Loop to init the "id" with the node_id. Variable "i" initialized to