.. | .. |
---|
13 | 13 | #include <asm/cputhreads.h> |
---|
14 | 14 | #include <asm/smp.h> |
---|
15 | 15 | #include <linux/string.h> |
---|
| 16 | +#include <linux/spinlock.h> |
---|
16 | 17 | |
---|
17 | 18 | /* Nest IMC data structures and variables */ |
---|
18 | 19 | |
---|
.. | .. |
---|
49 | 50 | * core and trace-imc |
---|
50 | 51 | */ |
---|
51 | 52 | static struct imc_pmu_ref imc_global_refc = { |
---|
52 | | - .lock = __MUTEX_INITIALIZER(imc_global_refc.lock), |
---|
| 53 | + .lock = __SPIN_LOCK_INITIALIZER(imc_global_refc.lock), |
---|
53 | 54 | .id = 0, |
---|
54 | 55 | .refc = 0, |
---|
55 | 56 | }; |
---|
.. | .. |
---|
393 | 394 | get_hard_smp_processor_id(cpu)); |
---|
394 | 395 | /* |
---|
395 | 396 | * If this is the last cpu in this chip then, skip the reference |
---|
396 | | - * count mutex lock and make the reference count on this chip zero. |
---|
| 397 | + * count lock and make the reference count on this chip zero. |
---|
397 | 398 | */ |
---|
398 | 399 | ref = get_nest_pmu_ref(cpu); |
---|
399 | 400 | if (!ref) |
---|
.. | .. |
---|
455 | 456 | /* |
---|
456 | 457 | * See if we need to disable the nest PMU. |
---|
457 | 458 | * If no events are currently in use, then we have to take a |
---|
458 | | - * mutex to ensure that we don't race with another task doing |
---|
| 459 | + * lock to ensure that we don't race with another task doing |
---|
459 | 460 | * enable or disable the nest counters. |
---|
460 | 461 | */ |
---|
461 | 462 | ref = get_nest_pmu_ref(event->cpu); |
---|
462 | 463 | if (!ref) |
---|
463 | 464 | return; |
---|
464 | 465 | |
---|
465 | | - /* Take the mutex lock for this node and then decrement the reference count */ |
---|
466 | | - mutex_lock(&ref->lock); |
---|
| 466 | + /* Take the lock for this node and then decrement the reference count */ |
---|
| 467 | + spin_lock(&ref->lock); |
---|
467 | 468 | if (ref->refc == 0) { |
---|
468 | 469 | /* |
---|
469 | 470 | * The scenario where this is true is, when perf session is |
---|
.. | .. |
---|
475 | 476 | * an OPAL call to disable the engine in that node. |
---|
476 | 477 | * |
---|
477 | 478 | */ |
---|
478 | | - mutex_unlock(&ref->lock); |
---|
| 479 | + spin_unlock(&ref->lock); |
---|
479 | 480 | return; |
---|
480 | 481 | } |
---|
481 | 482 | ref->refc--; |
---|
.. | .. |
---|
483 | 484 | rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_NEST, |
---|
484 | 485 | get_hard_smp_processor_id(event->cpu)); |
---|
485 | 486 | if (rc) { |
---|
486 | | - mutex_unlock(&ref->lock); |
---|
| 487 | + spin_unlock(&ref->lock); |
---|
487 | 488 | pr_err("nest-imc: Unable to stop the counters for core %d\n", node_id); |
---|
488 | 489 | return; |
---|
489 | 490 | } |
---|
.. | .. |
---|
491 | 492 | WARN(1, "nest-imc: Invalid event reference count\n"); |
---|
492 | 493 | ref->refc = 0; |
---|
493 | 494 | } |
---|
494 | | - mutex_unlock(&ref->lock); |
---|
| 495 | + spin_unlock(&ref->lock); |
---|
495 | 496 | } |
---|
496 | 497 | |
---|
497 | 498 | static int nest_imc_event_init(struct perf_event *event) |
---|
.. | .. |
---|
550 | 551 | |
---|
551 | 552 | /* |
---|
552 | 553 | * Get the imc_pmu_ref struct for this node. |
---|
553 | | - * Take the mutex lock and then increment the count of nest pmu events |
---|
554 | | - * inited. |
---|
| 554 | + * Take the lock and then increment the count of nest pmu events inited. |
---|
555 | 555 | */ |
---|
556 | 556 | ref = get_nest_pmu_ref(event->cpu); |
---|
557 | 557 | if (!ref) |
---|
558 | 558 | return -EINVAL; |
---|
559 | 559 | |
---|
560 | | - mutex_lock(&ref->lock); |
---|
| 560 | + spin_lock(&ref->lock); |
---|
561 | 561 | if (ref->refc == 0) { |
---|
562 | 562 | rc = opal_imc_counters_start(OPAL_IMC_COUNTERS_NEST, |
---|
563 | 563 | get_hard_smp_processor_id(event->cpu)); |
---|
564 | 564 | if (rc) { |
---|
565 | | - mutex_unlock(&ref->lock); |
---|
| 565 | + spin_unlock(&ref->lock); |
---|
566 | 566 | pr_err("nest-imc: Unable to start the counters for node %d\n", |
---|
567 | 567 | node_id); |
---|
568 | 568 | return rc; |
---|
569 | 569 | } |
---|
570 | 570 | } |
---|
571 | 571 | ++ref->refc; |
---|
572 | | - mutex_unlock(&ref->lock); |
---|
| 572 | + spin_unlock(&ref->lock); |
---|
573 | 573 | |
---|
574 | 574 | event->destroy = nest_imc_counters_release; |
---|
575 | 575 | return 0; |
---|
.. | .. |
---|
605 | 605 | return -ENOMEM; |
---|
606 | 606 | mem_info->vbase = page_address(page); |
---|
607 | 607 | |
---|
608 | | - /* Init the mutex */ |
---|
609 | 608 | core_imc_refc[core_id].id = core_id; |
---|
610 | | - mutex_init(&core_imc_refc[core_id].lock); |
---|
| 609 | + spin_lock_init(&core_imc_refc[core_id].lock); |
---|
611 | 610 | |
---|
612 | 611 | rc = opal_imc_counters_init(OPAL_IMC_COUNTERS_CORE, |
---|
613 | 612 | __pa((void *)mem_info->vbase), |
---|
.. | .. |
---|
696 | 695 | perf_pmu_migrate_context(&core_imc_pmu->pmu, cpu, ncpu); |
---|
697 | 696 | } else { |
---|
698 | 697 | /* |
---|
699 | | - * If this is the last cpu in this core then, skip taking refernce |
---|
700 | | - * count mutex lock for this core and directly zero "refc" for |
---|
701 | | - * this core. |
---|
| 698 | + * If this is the last cpu in this core then skip taking reference |
---|
| 699 | + * count lock for this core and directly zero "refc" for this core. |
---|
702 | 700 | */ |
---|
703 | 701 | opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE, |
---|
704 | 702 | get_hard_smp_processor_id(cpu)); |
---|
.. | .. |
---|
713 | 711 | * last cpu in this core and core-imc event running |
---|
714 | 712 | * in this cpu. |
---|
715 | 713 | */ |
---|
716 | | - mutex_lock(&imc_global_refc.lock); |
---|
| 714 | + spin_lock(&imc_global_refc.lock); |
---|
717 | 715 | if (imc_global_refc.id == IMC_DOMAIN_CORE) |
---|
718 | 716 | imc_global_refc.refc--; |
---|
719 | 717 | |
---|
720 | | - mutex_unlock(&imc_global_refc.lock); |
---|
| 718 | + spin_unlock(&imc_global_refc.lock); |
---|
721 | 719 | } |
---|
722 | 720 | return 0; |
---|
723 | 721 | } |
---|
.. | .. |
---|
732 | 730 | |
---|
733 | 731 | static void reset_global_refc(struct perf_event *event) |
---|
734 | 732 | { |
---|
735 | | - mutex_lock(&imc_global_refc.lock); |
---|
| 733 | + spin_lock(&imc_global_refc.lock); |
---|
736 | 734 | imc_global_refc.refc--; |
---|
737 | 735 | |
---|
738 | 736 | /* |
---|
.. | .. |
---|
744 | 742 | imc_global_refc.refc = 0; |
---|
745 | 743 | imc_global_refc.id = 0; |
---|
746 | 744 | } |
---|
747 | | - mutex_unlock(&imc_global_refc.lock); |
---|
| 745 | + spin_unlock(&imc_global_refc.lock); |
---|
748 | 746 | } |
---|
749 | 747 | |
---|
750 | 748 | static void core_imc_counters_release(struct perf_event *event) |
---|
.. | .. |
---|
757 | 755 | /* |
---|
758 | 756 | * See if we need to disable the IMC PMU. |
---|
759 | 757 | * If no events are currently in use, then we have to take a |
---|
760 | | - * mutex to ensure that we don't race with another task doing |
---|
| 758 | + * lock to ensure that we don't race with another task doing |
---|
761 | 759 | * enable or disable the core counters. |
---|
762 | 760 | */ |
---|
763 | 761 | core_id = event->cpu / threads_per_core; |
---|
764 | 762 | |
---|
765 | | - /* Take the mutex lock and decrement the refernce count for this core */ |
---|
| 763 | + /* Take the lock and decrement the refernce count for this core */ |
---|
766 | 764 | ref = &core_imc_refc[core_id]; |
---|
767 | 765 | if (!ref) |
---|
768 | 766 | return; |
---|
769 | 767 | |
---|
770 | | - mutex_lock(&ref->lock); |
---|
| 768 | + spin_lock(&ref->lock); |
---|
771 | 769 | if (ref->refc == 0) { |
---|
772 | 770 | /* |
---|
773 | 771 | * The scenario where this is true is, when perf session is |
---|
.. | .. |
---|
779 | 777 | * an OPAL call to disable the engine in that core. |
---|
780 | 778 | * |
---|
781 | 779 | */ |
---|
782 | | - mutex_unlock(&ref->lock); |
---|
| 780 | + spin_unlock(&ref->lock); |
---|
783 | 781 | return; |
---|
784 | 782 | } |
---|
785 | 783 | ref->refc--; |
---|
.. | .. |
---|
787 | 785 | rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE, |
---|
788 | 786 | get_hard_smp_processor_id(event->cpu)); |
---|
789 | 787 | if (rc) { |
---|
790 | | - mutex_unlock(&ref->lock); |
---|
| 788 | + spin_unlock(&ref->lock); |
---|
791 | 789 | pr_err("IMC: Unable to stop the counters for core %d\n", core_id); |
---|
792 | 790 | return; |
---|
793 | 791 | } |
---|
.. | .. |
---|
795 | 793 | WARN(1, "core-imc: Invalid event reference count\n"); |
---|
796 | 794 | ref->refc = 0; |
---|
797 | 795 | } |
---|
798 | | - mutex_unlock(&ref->lock); |
---|
| 796 | + spin_unlock(&ref->lock); |
---|
799 | 797 | |
---|
800 | 798 | reset_global_refc(event); |
---|
801 | 799 | } |
---|
.. | .. |
---|
833 | 831 | if ((!pcmi->vbase)) |
---|
834 | 832 | return -ENODEV; |
---|
835 | 833 | |
---|
836 | | - /* Get the core_imc mutex for this core */ |
---|
837 | 834 | ref = &core_imc_refc[core_id]; |
---|
838 | 835 | if (!ref) |
---|
839 | 836 | return -EINVAL; |
---|
.. | .. |
---|
841 | 838 | /* |
---|
842 | 839 | * Core pmu units are enabled only when it is used. |
---|
843 | 840 | * See if this is triggered for the first time. |
---|
844 | | - * If yes, take the mutex lock and enable the core counters. |
---|
| 841 | + * If yes, take the lock and enable the core counters. |
---|
845 | 842 | * If not, just increment the count in core_imc_refc struct. |
---|
846 | 843 | */ |
---|
847 | | - mutex_lock(&ref->lock); |
---|
| 844 | + spin_lock(&ref->lock); |
---|
848 | 845 | if (ref->refc == 0) { |
---|
849 | 846 | rc = opal_imc_counters_start(OPAL_IMC_COUNTERS_CORE, |
---|
850 | 847 | get_hard_smp_processor_id(event->cpu)); |
---|
851 | 848 | if (rc) { |
---|
852 | | - mutex_unlock(&ref->lock); |
---|
| 849 | + spin_unlock(&ref->lock); |
---|
853 | 850 | pr_err("core-imc: Unable to start the counters for core %d\n", |
---|
854 | 851 | core_id); |
---|
855 | 852 | return rc; |
---|
856 | 853 | } |
---|
857 | 854 | } |
---|
858 | 855 | ++ref->refc; |
---|
859 | | - mutex_unlock(&ref->lock); |
---|
| 856 | + spin_unlock(&ref->lock); |
---|
860 | 857 | |
---|
861 | 858 | /* |
---|
862 | 859 | * Since the system can run either in accumulation or trace-mode |
---|
.. | .. |
---|
867 | 864 | * to know whether any other trace/thread imc |
---|
868 | 865 | * events are running. |
---|
869 | 866 | */ |
---|
870 | | - mutex_lock(&imc_global_refc.lock); |
---|
| 867 | + spin_lock(&imc_global_refc.lock); |
---|
871 | 868 | if (imc_global_refc.id == 0 || imc_global_refc.id == IMC_DOMAIN_CORE) { |
---|
872 | 869 | /* |
---|
873 | 870 | * No other trace/thread imc events are running in |
---|
.. | .. |
---|
876 | 873 | imc_global_refc.id = IMC_DOMAIN_CORE; |
---|
877 | 874 | imc_global_refc.refc++; |
---|
878 | 875 | } else { |
---|
879 | | - mutex_unlock(&imc_global_refc.lock); |
---|
| 876 | + spin_unlock(&imc_global_refc.lock); |
---|
880 | 877 | return -EBUSY; |
---|
881 | 878 | } |
---|
882 | | - mutex_unlock(&imc_global_refc.lock); |
---|
| 879 | + spin_unlock(&imc_global_refc.lock); |
---|
883 | 880 | |
---|
884 | 881 | event->hw.event_base = (u64)pcmi->vbase + (config & IMC_EVENT_OFFSET_MASK); |
---|
885 | 882 | event->destroy = core_imc_counters_release; |
---|
.. | .. |
---|
951 | 948 | mtspr(SPRN_LDBAR, (mfspr(SPRN_LDBAR) & (~(1UL << 63)))); |
---|
952 | 949 | |
---|
953 | 950 | /* Reduce the refc if thread-imc event running on this cpu */ |
---|
954 | | - mutex_lock(&imc_global_refc.lock); |
---|
| 951 | + spin_lock(&imc_global_refc.lock); |
---|
955 | 952 | if (imc_global_refc.id == IMC_DOMAIN_THREAD) |
---|
956 | 953 | imc_global_refc.refc--; |
---|
957 | | - mutex_unlock(&imc_global_refc.lock); |
---|
| 954 | + spin_unlock(&imc_global_refc.lock); |
---|
958 | 955 | |
---|
959 | 956 | return 0; |
---|
960 | 957 | } |
---|
.. | .. |
---|
994 | 991 | if (!target) |
---|
995 | 992 | return -EINVAL; |
---|
996 | 993 | |
---|
997 | | - mutex_lock(&imc_global_refc.lock); |
---|
| 994 | + spin_lock(&imc_global_refc.lock); |
---|
998 | 995 | /* |
---|
999 | 996 | * Check if any other trace/core imc events are running in the |
---|
1000 | 997 | * system, if not set the global id to thread-imc. |
---|
.. | .. |
---|
1003 | 1000 | imc_global_refc.id = IMC_DOMAIN_THREAD; |
---|
1004 | 1001 | imc_global_refc.refc++; |
---|
1005 | 1002 | } else { |
---|
1006 | | - mutex_unlock(&imc_global_refc.lock); |
---|
| 1003 | + spin_unlock(&imc_global_refc.lock); |
---|
1007 | 1004 | return -EBUSY; |
---|
1008 | 1005 | } |
---|
1009 | | - mutex_unlock(&imc_global_refc.lock); |
---|
| 1006 | + spin_unlock(&imc_global_refc.lock); |
---|
1010 | 1007 | |
---|
1011 | 1008 | event->pmu->task_ctx_nr = perf_sw_context; |
---|
1012 | 1009 | event->destroy = reset_global_refc; |
---|
.. | .. |
---|
1128 | 1125 | /* |
---|
1129 | 1126 | * imc pmus are enabled only when it is used. |
---|
1130 | 1127 | * See if this is triggered for the first time. |
---|
1131 | | - * If yes, take the mutex lock and enable the counters. |
---|
| 1128 | + * If yes, take the lock and enable the counters. |
---|
1132 | 1129 | * If not, just increment the count in ref count struct. |
---|
1133 | 1130 | */ |
---|
1134 | 1131 | ref = &core_imc_refc[core_id]; |
---|
1135 | 1132 | if (!ref) |
---|
1136 | 1133 | return -EINVAL; |
---|
1137 | 1134 | |
---|
1138 | | - mutex_lock(&ref->lock); |
---|
| 1135 | + spin_lock(&ref->lock); |
---|
1139 | 1136 | if (ref->refc == 0) { |
---|
1140 | 1137 | if (opal_imc_counters_start(OPAL_IMC_COUNTERS_CORE, |
---|
1141 | 1138 | get_hard_smp_processor_id(smp_processor_id()))) { |
---|
1142 | | - mutex_unlock(&ref->lock); |
---|
| 1139 | + spin_unlock(&ref->lock); |
---|
1143 | 1140 | pr_err("thread-imc: Unable to start the counter\ |
---|
1144 | 1141 | for core %d\n", core_id); |
---|
1145 | 1142 | return -EINVAL; |
---|
1146 | 1143 | } |
---|
1147 | 1144 | } |
---|
1148 | 1145 | ++ref->refc; |
---|
1149 | | - mutex_unlock(&ref->lock); |
---|
| 1146 | + spin_unlock(&ref->lock); |
---|
1150 | 1147 | return 0; |
---|
1151 | 1148 | } |
---|
1152 | 1149 | |
---|
.. | .. |
---|
1163 | 1160 | return; |
---|
1164 | 1161 | } |
---|
1165 | 1162 | |
---|
1166 | | - mutex_lock(&ref->lock); |
---|
| 1163 | + spin_lock(&ref->lock); |
---|
1167 | 1164 | ref->refc--; |
---|
1168 | 1165 | if (ref->refc == 0) { |
---|
1169 | 1166 | if (opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE, |
---|
1170 | 1167 | get_hard_smp_processor_id(smp_processor_id()))) { |
---|
1171 | | - mutex_unlock(&ref->lock); |
---|
| 1168 | + spin_unlock(&ref->lock); |
---|
1172 | 1169 | pr_err("thread-imc: Unable to stop the counters\ |
---|
1173 | 1170 | for core %d\n", core_id); |
---|
1174 | 1171 | return; |
---|
.. | .. |
---|
1176 | 1173 | } else if (ref->refc < 0) { |
---|
1177 | 1174 | ref->refc = 0; |
---|
1178 | 1175 | } |
---|
1179 | | - mutex_unlock(&ref->lock); |
---|
| 1176 | + spin_unlock(&ref->lock); |
---|
1180 | 1177 | |
---|
1181 | 1178 | /* Set bit 0 of LDBAR to zero, to stop posting updates to memory */ |
---|
1182 | 1179 | mtspr(SPRN_LDBAR, (mfspr(SPRN_LDBAR) & (~(1UL << 63)))); |
---|
.. | .. |
---|
1217 | 1214 | } |
---|
1218 | 1215 | } |
---|
1219 | 1216 | |
---|
1220 | | - /* Init the mutex, if not already */ |
---|
1221 | 1217 | trace_imc_refc[core_id].id = core_id; |
---|
1222 | | - mutex_init(&trace_imc_refc[core_id].lock); |
---|
| 1218 | + spin_lock_init(&trace_imc_refc[core_id].lock); |
---|
1223 | 1219 | |
---|
1224 | 1220 | mtspr(SPRN_LDBAR, 0); |
---|
1225 | 1221 | return 0; |
---|
.. | .. |
---|
1239 | 1235 | * Reduce the refc if any trace-imc event running |
---|
1240 | 1236 | * on this cpu. |
---|
1241 | 1237 | */ |
---|
1242 | | - mutex_lock(&imc_global_refc.lock); |
---|
| 1238 | + spin_lock(&imc_global_refc.lock); |
---|
1243 | 1239 | if (imc_global_refc.id == IMC_DOMAIN_TRACE) |
---|
1244 | 1240 | imc_global_refc.refc--; |
---|
1245 | | - mutex_unlock(&imc_global_refc.lock); |
---|
| 1241 | + spin_unlock(&imc_global_refc.lock); |
---|
1246 | 1242 | |
---|
1247 | 1243 | return 0; |
---|
1248 | 1244 | } |
---|
.. | .. |
---|
1364 | 1360 | } |
---|
1365 | 1361 | |
---|
1366 | 1362 | mtspr(SPRN_LDBAR, ldbar_value); |
---|
1367 | | - mutex_lock(&ref->lock); |
---|
| 1363 | + spin_lock(&ref->lock); |
---|
1368 | 1364 | if (ref->refc == 0) { |
---|
1369 | 1365 | if (opal_imc_counters_start(OPAL_IMC_COUNTERS_TRACE, |
---|
1370 | 1366 | get_hard_smp_processor_id(smp_processor_id()))) { |
---|
1371 | | - mutex_unlock(&ref->lock); |
---|
| 1367 | + spin_unlock(&ref->lock); |
---|
1372 | 1368 | pr_err("trace-imc: Unable to start the counters for core %d\n", core_id); |
---|
1373 | 1369 | return -EINVAL; |
---|
1374 | 1370 | } |
---|
1375 | 1371 | } |
---|
1376 | 1372 | ++ref->refc; |
---|
1377 | | - mutex_unlock(&ref->lock); |
---|
| 1373 | + spin_unlock(&ref->lock); |
---|
1378 | 1374 | return 0; |
---|
1379 | 1375 | } |
---|
1380 | 1376 | |
---|
.. | .. |
---|
1407 | 1403 | return; |
---|
1408 | 1404 | } |
---|
1409 | 1405 | |
---|
1410 | | - mutex_lock(&ref->lock); |
---|
| 1406 | + spin_lock(&ref->lock); |
---|
1411 | 1407 | ref->refc--; |
---|
1412 | 1408 | if (ref->refc == 0) { |
---|
1413 | 1409 | if (opal_imc_counters_stop(OPAL_IMC_COUNTERS_TRACE, |
---|
1414 | 1410 | get_hard_smp_processor_id(smp_processor_id()))) { |
---|
1415 | | - mutex_unlock(&ref->lock); |
---|
| 1411 | + spin_unlock(&ref->lock); |
---|
1416 | 1412 | pr_err("trace-imc: Unable to stop the counters for core %d\n", core_id); |
---|
1417 | 1413 | return; |
---|
1418 | 1414 | } |
---|
1419 | 1415 | } else if (ref->refc < 0) { |
---|
1420 | 1416 | ref->refc = 0; |
---|
1421 | 1417 | } |
---|
1422 | | - mutex_unlock(&ref->lock); |
---|
| 1418 | + spin_unlock(&ref->lock); |
---|
1423 | 1419 | |
---|
1424 | 1420 | trace_imc_event_stop(event, flags); |
---|
1425 | 1421 | } |
---|
.. | .. |
---|
1441 | 1437 | * no other thread is running any core/thread imc |
---|
1442 | 1438 | * events |
---|
1443 | 1439 | */ |
---|
1444 | | - mutex_lock(&imc_global_refc.lock); |
---|
| 1440 | + spin_lock(&imc_global_refc.lock); |
---|
1445 | 1441 | if (imc_global_refc.id == 0 || imc_global_refc.id == IMC_DOMAIN_TRACE) { |
---|
1446 | 1442 | /* |
---|
1447 | 1443 | * No core/thread imc events are running in the |
---|
.. | .. |
---|
1450 | 1446 | imc_global_refc.id = IMC_DOMAIN_TRACE; |
---|
1451 | 1447 | imc_global_refc.refc++; |
---|
1452 | 1448 | } else { |
---|
1453 | | - mutex_unlock(&imc_global_refc.lock); |
---|
| 1449 | + spin_unlock(&imc_global_refc.lock); |
---|
1454 | 1450 | return -EBUSY; |
---|
1455 | 1451 | } |
---|
1456 | | - mutex_unlock(&imc_global_refc.lock); |
---|
| 1452 | + spin_unlock(&imc_global_refc.lock); |
---|
1457 | 1453 | |
---|
1458 | 1454 | event->hw.idx = -1; |
---|
1459 | 1455 | |
---|
.. | .. |
---|
1525 | 1521 | i = 0; |
---|
1526 | 1522 | for_each_node(nid) { |
---|
1527 | 1523 | /* |
---|
1528 | | - * Mutex lock to avoid races while tracking the number of |
---|
| 1524 | + * Take the lock to avoid races while tracking the number of |
---|
1529 | 1525 | * sessions using the chip's nest pmu units. |
---|
1530 | 1526 | */ |
---|
1531 | | - mutex_init(&nest_imc_refc[i].lock); |
---|
| 1527 | + spin_lock_init(&nest_imc_refc[i].lock); |
---|
1532 | 1528 | |
---|
1533 | 1529 | /* |
---|
1534 | 1530 | * Loop to init the "id" with the node_id. Variable "i" initialized to |
---|