forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-01-31 f70575805708cabdedea7498aaa3f710fde4d920
kernel/drivers/acpi/processor_idle.c
....@@ -1,3 +1,4 @@
1
+// SPDX-License-Identifier: GPL-2.0-or-later
12 /*
23 * processor_idle - idle state submodule to the ACPI processor driver
34 *
....@@ -8,20 +9,6 @@
89 * - Added processor hotplug support
910 * Copyright (C) 2005 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
1011 * - Added support for C3 on SMP
11
- *
12
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
13
- *
14
- * This program is free software; you can redistribute it and/or modify
15
- * it under the terms of the GNU General Public License as published by
16
- * the Free Software Foundation; either version 2 of the License, or (at
17
- * your option) any later version.
18
- *
19
- * This program is distributed in the hope that it will be useful, but
20
- * WITHOUT ANY WARRANTY; without even the implied warranty of
21
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22
- * General Public License for more details.
23
- *
24
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
2512 */
2613 #define pr_fmt(fmt) "ACPI: " fmt
2714
....@@ -43,6 +30,7 @@
4330 */
4431 #ifdef CONFIG_X86
4532 #include <asm/apic.h>
33
+#include <asm/cpu.h>
4634 #endif
4735
4836 #define ACPI_PROCESSOR_CLASS "processor"
....@@ -175,18 +163,10 @@
175163 }
176164
177165 /* Power(C) State timer broadcast control */
178
-static void lapic_timer_state_broadcast(struct acpi_processor *pr,
179
- struct acpi_processor_cx *cx,
180
- int broadcast)
166
+static bool lapic_timer_needs_broadcast(struct acpi_processor *pr,
167
+ struct acpi_processor_cx *cx)
181168 {
182
- int state = cx - pr->power.states;
183
-
184
- if (state >= pr->power.timer_broadcast_on_state) {
185
- if (broadcast)
186
- tick_broadcast_enter();
187
- else
188
- tick_broadcast_exit();
189
- }
169
+ return cx - pr->power.states >= pr->power.timer_broadcast_on_state;
190170 }
191171
192172 #else
....@@ -194,10 +174,11 @@
194174 static void lapic_timer_check_state(int state, struct acpi_processor *pr,
195175 struct acpi_processor_cx *cstate) { }
196176 static void lapic_timer_propagate_broadcast(struct acpi_processor *pr) { }
197
-static void lapic_timer_state_broadcast(struct acpi_processor *pr,
198
- struct acpi_processor_cx *cx,
199
- int broadcast)
177
+
178
+static bool lapic_timer_needs_broadcast(struct acpi_processor *pr,
179
+ struct acpi_processor_cx *cx)
200180 {
181
+ return false;
201182 }
202183
203184 #endif
....@@ -206,17 +187,18 @@
206187 static void tsc_check_state(int state)
207188 {
208189 switch (boot_cpu_data.x86_vendor) {
190
+ case X86_VENDOR_HYGON:
209191 case X86_VENDOR_AMD:
210192 case X86_VENDOR_INTEL:
211193 case X86_VENDOR_CENTAUR:
194
+ case X86_VENDOR_ZHAOXIN:
212195 /*
213196 * AMD Fam10h TSC will tick in all
214197 * C/P/S0/S1 states when this bit is set.
215198 */
216199 if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
217200 return;
218
-
219
- /*FALL THROUGH*/
201
+ fallthrough;
220202 default:
221203 /* TSC could halt in idle, so notify users */
222204 if (state > ACPI_STATE_C1)
....@@ -282,6 +264,13 @@
282264 pr->power.states[ACPI_STATE_C2].address,
283265 pr->power.states[ACPI_STATE_C3].address));
284266
267
+ snprintf(pr->power.states[ACPI_STATE_C2].desc,
268
+ ACPI_CX_DESC_LEN, "ACPI P_LVL2 IOPORT 0x%x",
269
+ pr->power.states[ACPI_STATE_C2].address);
270
+ snprintf(pr->power.states[ACPI_STATE_C3].desc,
271
+ ACPI_CX_DESC_LEN, "ACPI P_LVL3 IOPORT 0x%x",
272
+ pr->power.states[ACPI_STATE_C3].address);
273
+
285274 return 0;
286275 }
287276
....@@ -304,164 +293,20 @@
304293
305294 static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
306295 {
307
- acpi_status status;
308
- u64 count;
309
- int current_count;
310
- int i, ret = 0;
311
- struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
312
- union acpi_object *cst;
296
+ int ret;
313297
314298 if (nocst)
315299 return -ENODEV;
316300
317
- current_count = 0;
301
+ ret = acpi_processor_evaluate_cst(pr->handle, pr->id, &pr->power);
302
+ if (ret)
303
+ return ret;
318304
319
- status = acpi_evaluate_object(pr->handle, "_CST", NULL, &buffer);
320
- if (ACPI_FAILURE(status)) {
321
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No _CST, giving up\n"));
322
- return -ENODEV;
323
- }
305
+ if (!pr->power.count)
306
+ return -EFAULT;
324307
325
- cst = buffer.pointer;
326
-
327
- /* There must be at least 2 elements */
328
- if (!cst || (cst->type != ACPI_TYPE_PACKAGE) || cst->package.count < 2) {
329
- pr_err("not enough elements in _CST\n");
330
- ret = -EFAULT;
331
- goto end;
332
- }
333
-
334
- count = cst->package.elements[0].integer.value;
335
-
336
- /* Validate number of power states. */
337
- if (count < 1 || count != cst->package.count - 1) {
338
- pr_err("count given by _CST is not valid\n");
339
- ret = -EFAULT;
340
- goto end;
341
- }
342
-
343
- /* Tell driver that at least _CST is supported. */
344308 pr->flags.has_cst = 1;
345
-
346
- for (i = 1; i <= count; i++) {
347
- union acpi_object *element;
348
- union acpi_object *obj;
349
- struct acpi_power_register *reg;
350
- struct acpi_processor_cx cx;
351
-
352
- memset(&cx, 0, sizeof(cx));
353
-
354
- element = &(cst->package.elements[i]);
355
- if (element->type != ACPI_TYPE_PACKAGE)
356
- continue;
357
-
358
- if (element->package.count != 4)
359
- continue;
360
-
361
- obj = &(element->package.elements[0]);
362
-
363
- if (obj->type != ACPI_TYPE_BUFFER)
364
- continue;
365
-
366
- reg = (struct acpi_power_register *)obj->buffer.pointer;
367
-
368
- if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO &&
369
- (reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE))
370
- continue;
371
-
372
- /* There should be an easy way to extract an integer... */
373
- obj = &(element->package.elements[1]);
374
- if (obj->type != ACPI_TYPE_INTEGER)
375
- continue;
376
-
377
- cx.type = obj->integer.value;
378
- /*
379
- * Some buggy BIOSes won't list C1 in _CST -
380
- * Let acpi_processor_get_power_info_default() handle them later
381
- */
382
- if (i == 1 && cx.type != ACPI_STATE_C1)
383
- current_count++;
384
-
385
- cx.address = reg->address;
386
- cx.index = current_count + 1;
387
-
388
- cx.entry_method = ACPI_CSTATE_SYSTEMIO;
389
- if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) {
390
- if (acpi_processor_ffh_cstate_probe
391
- (pr->id, &cx, reg) == 0) {
392
- cx.entry_method = ACPI_CSTATE_FFH;
393
- } else if (cx.type == ACPI_STATE_C1) {
394
- /*
395
- * C1 is a special case where FIXED_HARDWARE
396
- * can be handled in non-MWAIT way as well.
397
- * In that case, save this _CST entry info.
398
- * Otherwise, ignore this info and continue.
399
- */
400
- cx.entry_method = ACPI_CSTATE_HALT;
401
- snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT");
402
- } else {
403
- continue;
404
- }
405
- if (cx.type == ACPI_STATE_C1 &&
406
- (boot_option_idle_override == IDLE_NOMWAIT)) {
407
- /*
408
- * In most cases the C1 space_id obtained from
409
- * _CST object is FIXED_HARDWARE access mode.
410
- * But when the option of idle=halt is added,
411
- * the entry_method type should be changed from
412
- * CSTATE_FFH to CSTATE_HALT.
413
- * When the option of idle=nomwait is added,
414
- * the C1 entry_method type should be
415
- * CSTATE_HALT.
416
- */
417
- cx.entry_method = ACPI_CSTATE_HALT;
418
- snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT");
419
- }
420
- } else {
421
- snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI IOPORT 0x%x",
422
- cx.address);
423
- }
424
-
425
- if (cx.type == ACPI_STATE_C1) {
426
- cx.valid = 1;
427
- }
428
-
429
- obj = &(element->package.elements[2]);
430
- if (obj->type != ACPI_TYPE_INTEGER)
431
- continue;
432
-
433
- cx.latency = obj->integer.value;
434
-
435
- obj = &(element->package.elements[3]);
436
- if (obj->type != ACPI_TYPE_INTEGER)
437
- continue;
438
-
439
- current_count++;
440
- memcpy(&(pr->power.states[current_count]), &cx, sizeof(cx));
441
-
442
- /*
443
- * We support total ACPI_PROCESSOR_MAX_POWER - 1
444
- * (From 1 through ACPI_PROCESSOR_MAX_POWER - 1)
445
- */
446
- if (current_count >= (ACPI_PROCESSOR_MAX_POWER - 1)) {
447
- pr_warn("Limiting number of power states to max (%d)\n",
448
- ACPI_PROCESSOR_MAX_POWER);
449
- pr_warn("Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n");
450
- break;
451
- }
452
- }
453
-
454
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d power states\n",
455
- current_count));
456
-
457
- /* Validate number of power states discovered */
458
- if (current_count < 2)
459
- ret = -EFAULT;
460
-
461
- end:
462
- kfree(buffer.pointer);
463
-
464
- return ret;
309
+ return 0;
465310 }
466311
467312 static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
....@@ -652,8 +497,7 @@
652497 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
653498 if (pr->power.states[i].valid) {
654499 pr->power.count = i;
655
- if (pr->power.states[i].type >= ACPI_STATE_C2)
656
- pr->flags.power = 1;
500
+ pr->flags.power = 1;
657501 }
658502 }
659503
....@@ -686,6 +530,36 @@
686530 return bm_status;
687531 }
688532
533
+static void wait_for_freeze(void)
534
+{
535
+#ifdef CONFIG_X86
536
+ /* No delay is needed if we are in guest */
537
+ if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
538
+ return;
539
+ /*
540
+ * Modern (>=Nehalem) Intel systems use ACPI via intel_idle,
541
+ * not this code. Assume that any Intel systems using this
542
+ * are ancient and may need the dummy wait. This also assumes
543
+ * that the motivating chipset issue was Intel-only.
544
+ */
545
+ if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
546
+ return;
547
+#endif
548
+ /*
549
+ * Dummy wait op - must do something useless after P_LVL2 read
550
+ * because chipsets cannot guarantee that STPCLK# signal gets
551
+ * asserted in time to freeze execution properly
552
+ *
553
+ * This workaround has been in place since the original ACPI
554
+ * implementation was merged, circa 2002.
555
+ *
556
+ * If a profile is pointing to this instruction, please first
557
+ * consider moving your system to a more modern idle
558
+ * mechanism.
559
+ */
560
+ inl(acpi_gbl_FADT.xpm_timer_block.address);
561
+}
562
+
689563 /**
690564 * acpi_idle_do_entry - enter idle state using the appropriate method
691565 * @cx: cstate data
....@@ -702,10 +576,7 @@
702576 } else {
703577 /* IO port based C-state */
704578 inb(cx->address);
705
- /* Dummy wait op - must do something useless after P_LVL2 read
706
- because chipsets cannot guarantee that STPCLK# signal
707
- gets asserted in time to freeze execution properly. */
708
- inl(acpi_gbl_FADT.xpm_timer_block.address);
579
+ wait_for_freeze();
709580 }
710581 }
711582
....@@ -726,10 +597,13 @@
726597 safe_halt();
727598 else if (cx->entry_method == ACPI_CSTATE_SYSTEMIO) {
728599 inb(cx->address);
729
- /* See comment in acpi_idle_do_entry() */
730
- inl(acpi_gbl_FADT.xpm_timer_block.address);
600
+ wait_for_freeze();
731601 } else
732602 return -ENODEV;
603
+
604
+#if defined(CONFIG_X86) && defined(CONFIG_HOTPLUG_CPU)
605
+ cond_wakeup_cpu0();
606
+#endif
733607 }
734608
735609 /* Never reached */
....@@ -747,32 +621,43 @@
747621
748622 /**
749623 * acpi_idle_enter_bm - enters C3 with proper BM handling
624
+ * @drv: cpuidle driver
750625 * @pr: Target processor
751626 * @cx: Target state context
752
- * @timer_bc: Whether or not to change timer mode to broadcast
627
+ * @index: index of target state
753628 */
754
-static void acpi_idle_enter_bm(struct acpi_processor *pr,
755
- struct acpi_processor_cx *cx, bool timer_bc)
629
+static int __cpuidle acpi_idle_enter_bm(struct cpuidle_driver *drv,
630
+ struct acpi_processor *pr,
631
+ struct acpi_processor_cx *cx,
632
+ int index)
756633 {
757
- acpi_unlazy_tlb(smp_processor_id());
758
-
759
- /*
760
- * Must be done before busmaster disable as we might need to
761
- * access HPET !
762
- */
763
- if (timer_bc)
764
- lapic_timer_state_broadcast(pr, cx, 1);
634
+ static struct acpi_processor_cx safe_cx = {
635
+ .entry_method = ACPI_CSTATE_HALT,
636
+ };
765637
766638 /*
767639 * disable bus master
768640 * bm_check implies we need ARB_DIS
769641 * bm_control implies whether we can do ARB_DIS
770642 *
771
- * That leaves a case where bm_check is set and bm_control is
772
- * not set. In that case we cannot do much, we enter C3
773
- * without doing anything.
643
+ * That leaves a case where bm_check is set and bm_control is not set.
644
+ * In that case we cannot do much, we enter C3 without doing anything.
774645 */
775
- if (pr->flags.bm_control) {
646
+ bool dis_bm = pr->flags.bm_control;
647
+
648
+ /* If we can skip BM, demote to a safe state. */
649
+ if (!cx->bm_sts_skip && acpi_idle_bm_check()) {
650
+ dis_bm = false;
651
+ index = drv->safe_state_index;
652
+ if (index >= 0) {
653
+ cx = this_cpu_read(acpi_cstate[index]);
654
+ } else {
655
+ cx = &safe_cx;
656
+ index = -EBUSY;
657
+ }
658
+ }
659
+
660
+ if (dis_bm) {
776661 raw_spin_lock(&c3_lock);
777662 c3_cpu_count++;
778663 /* Disable bus master arbitration when all CPUs are in C3 */
....@@ -781,21 +666,24 @@
781666 raw_spin_unlock(&c3_lock);
782667 }
783668
669
+ rcu_idle_enter();
670
+
784671 acpi_idle_do_entry(cx);
785672
673
+ rcu_idle_exit();
674
+
786675 /* Re-enable bus master arbitration */
787
- if (pr->flags.bm_control) {
676
+ if (dis_bm) {
788677 raw_spin_lock(&c3_lock);
789678 acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0);
790679 c3_cpu_count--;
791680 raw_spin_unlock(&c3_lock);
792681 }
793682
794
- if (timer_bc)
795
- lapic_timer_state_broadcast(pr, cx, 0);
683
+ return index;
796684 }
797685
798
-static int acpi_idle_enter(struct cpuidle_device *dev,
686
+static int __cpuidle acpi_idle_enter(struct cpuidle_device *dev,
799687 struct cpuidle_driver *drv, int index)
800688 {
801689 struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
....@@ -806,37 +694,26 @@
806694 return -EINVAL;
807695
808696 if (cx->type != ACPI_STATE_C1) {
697
+ if (cx->type == ACPI_STATE_C3 && pr->flags.bm_check)
698
+ return acpi_idle_enter_bm(drv, pr, cx, index);
699
+
700
+ /* C2 to C1 demotion. */
809701 if (acpi_idle_fallback_to_c1(pr) && num_online_cpus() > 1) {
810702 index = ACPI_IDLE_STATE_START;
811703 cx = per_cpu(acpi_cstate[index], dev->cpu);
812
- } else if (cx->type == ACPI_STATE_C3 && pr->flags.bm_check) {
813
- if (cx->bm_sts_skip || !acpi_idle_bm_check()) {
814
- acpi_idle_enter_bm(pr, cx, true);
815
- return index;
816
- } else if (drv->safe_state_index >= 0) {
817
- index = drv->safe_state_index;
818
- cx = per_cpu(acpi_cstate[index], dev->cpu);
819
- } else {
820
- acpi_safe_halt();
821
- return -EBUSY;
822
- }
823704 }
824705 }
825
-
826
- lapic_timer_state_broadcast(pr, cx, 1);
827706
828707 if (cx->type == ACPI_STATE_C3)
829708 ACPI_FLUSH_CPU_CACHE();
830709
831710 acpi_idle_do_entry(cx);
832711
833
- lapic_timer_state_broadcast(pr, cx, 0);
834
-
835712 return index;
836713 }
837714
838
-static void acpi_idle_enter_s2idle(struct cpuidle_device *dev,
839
- struct cpuidle_driver *drv, int index)
715
+static int __cpuidle acpi_idle_enter_s2idle(struct cpuidle_device *dev,
716
+ struct cpuidle_driver *drv, int index)
840717 {
841718 struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
842719
....@@ -844,16 +721,24 @@
844721 struct acpi_processor *pr = __this_cpu_read(processors);
845722
846723 if (unlikely(!pr))
847
- return;
724
+ return 0;
848725
849726 if (pr->flags.bm_check) {
850
- acpi_idle_enter_bm(pr, cx, false);
851
- return;
727
+ u8 bm_sts_skip = cx->bm_sts_skip;
728
+
729
+ /* Don't check BM_STS, do an unconditional ARB_DIS for S2IDLE */
730
+ cx->bm_sts_skip = 1;
731
+ acpi_idle_enter_bm(drv, pr, cx, index);
732
+ cx->bm_sts_skip = bm_sts_skip;
733
+
734
+ return 0;
852735 } else {
853736 ACPI_FLUSH_CPU_CACHE();
854737 }
855738 }
856739 acpi_idle_do_entry(cx);
740
+
741
+ return 0;
857742 }
858743
859744 static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr,
....@@ -861,17 +746,28 @@
861746 {
862747 int i, count = ACPI_IDLE_STATE_START;
863748 struct acpi_processor_cx *cx;
749
+ struct cpuidle_state *state;
864750
865751 if (max_cstate == 0)
866752 max_cstate = 1;
867753
868754 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
755
+ state = &acpi_idle_driver.states[count];
869756 cx = &pr->power.states[i];
870757
871758 if (!cx->valid)
872759 continue;
873760
874761 per_cpu(acpi_cstate[count], dev->cpu) = cx;
762
+
763
+ if (lapic_timer_needs_broadcast(pr, cx))
764
+ state->flags |= CPUIDLE_FLAG_TIMER_STOP;
765
+
766
+ if (cx->type == ACPI_STATE_C3) {
767
+ state->flags |= CPUIDLE_FLAG_TLB_FLUSHED;
768
+ if (pr->flags.bm_check)
769
+ state->flags |= CPUIDLE_FLAG_RCU_IDLE;
770
+ }
875771
876772 count++;
877773 if (count == CPUIDLE_STATE_MAX)
....@@ -944,7 +840,6 @@
944840
945841 static inline void acpi_processor_cstate_first_run_checks(void)
946842 {
947
- acpi_status status;
948843 static int first_run;
949844
950845 if (first_run)
....@@ -956,13 +851,10 @@
956851 max_cstate);
957852 first_run++;
958853
959
- if (acpi_gbl_FADT.cst_control && !nocst) {
960
- status = acpi_os_write_port(acpi_gbl_FADT.smi_command,
961
- acpi_gbl_FADT.cst_control, 8);
962
- if (ACPI_FAILURE(status))
963
- ACPI_EXCEPTION((AE_INFO, status,
964
- "Notifying BIOS of _CST ability failed"));
965
- }
854
+ if (nocst)
855
+ return;
856
+
857
+ acpi_processor_claim_cst_control();
966858 }
967859 #else
968860
....@@ -1205,6 +1097,11 @@
12051097 return 0;
12061098 }
12071099
1100
+int __weak acpi_processor_ffh_lpi_probe(unsigned int cpu)
1101
+{
1102
+ return -EOPNOTSUPP;
1103
+}
1104
+
12081105 static int acpi_processor_get_lpi_info(struct acpi_processor *pr)
12091106 {
12101107 int ret, i;
....@@ -1212,6 +1109,11 @@
12121109 acpi_handle handle = pr->handle, pr_ahandle;
12131110 struct acpi_device *d = NULL;
12141111 struct acpi_lpi_states_array info[2], *tmp, *prev, *curr;
1112
+
1113
+ /* make sure our architecture has support */
1114
+ ret = acpi_processor_ffh_lpi_probe(pr->id);
1115
+ if (ret == -EOPNOTSUPP)
1116
+ return ret;
12151117
12161118 if (!osc_pc_lpi_support_confirmed)
12171119 return -EOPNOTSUPP;
....@@ -1262,11 +1164,6 @@
12621164 pr->flags.power = 1;
12631165
12641166 return 0;
1265
-}
1266
-
1267
-int __weak acpi_processor_ffh_lpi_probe(unsigned int cpu)
1268
-{
1269
- return -ENODEV;
12701167 }
12711168
12721169 int __weak acpi_processor_ffh_lpi_enter(struct acpi_lpi_state *lpi)