.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-or-later |
---|
1 | 2 | /* |
---|
2 | 3 | * Idle daemon for PowerPC. Idle daemon will handle any action |
---|
3 | 4 | * that needs to be taken when the system becomes idle. |
---|
.. | .. |
---|
12 | 13 | * Copyright (c) 2003 Dave Engebretsen <engebret@us.ibm.com> |
---|
13 | 14 | * |
---|
14 | 15 | * 32-bit and 64-bit versions merged by Paul Mackerras <paulus@samba.org> |
---|
15 | | - * |
---|
16 | | - * This program is free software; you can redistribute it and/or |
---|
17 | | - * modify it under the terms of the GNU General Public License |
---|
18 | | - * as published by the Free Software Foundation; either version |
---|
19 | | - * 2 of the License, or (at your option) any later version. |
---|
20 | 16 | */ |
---|
21 | 17 | |
---|
22 | 18 | #include <linux/sched.h> |
---|
.. | .. |
---|
41 | 37 | { |
---|
42 | 38 | ppc_md.power_save = NULL; |
---|
43 | 39 | cpuidle_disable = IDLE_POWERSAVE_OFF; |
---|
44 | | - return 0; |
---|
| 40 | + return 1; |
---|
45 | 41 | } |
---|
46 | 42 | __setup("powersave=off", powersave_off); |
---|
47 | | - |
---|
48 | | -#ifdef CONFIG_HOTPLUG_CPU |
---|
49 | | -void arch_cpu_idle_dead(void) |
---|
50 | | -{ |
---|
51 | | - sched_preempt_enable_no_resched(); |
---|
52 | | - cpu_die(); |
---|
53 | | -} |
---|
54 | | -#endif |
---|
55 | 43 | |
---|
56 | 44 | void arch_cpu_idle(void) |
---|
57 | 45 | { |
---|
.. | .. |
---|
64 | 52 | * interrupts enabled, some don't. |
---|
65 | 53 | */ |
---|
66 | 54 | if (irqs_disabled()) |
---|
67 | | - local_irq_enable(); |
---|
| 55 | + raw_local_irq_enable(); |
---|
68 | 56 | } else { |
---|
69 | | - local_irq_enable(); |
---|
| 57 | + raw_local_irq_enable(); |
---|
70 | 58 | /* |
---|
71 | 59 | * Go into low thread priority and possibly |
---|
72 | 60 | * low power mode. |
---|
.. | .. |
---|
81 | 69 | |
---|
82 | 70 | int powersave_nap; |
---|
83 | 71 | |
---|
| 72 | +#ifdef CONFIG_PPC_970_NAP |
---|
| 73 | +void power4_idle(void) |
---|
| 74 | +{ |
---|
| 75 | + if (!cpu_has_feature(CPU_FTR_CAN_NAP)) |
---|
| 76 | + return; |
---|
| 77 | + |
---|
| 78 | + if (!powersave_nap) |
---|
| 79 | + return; |
---|
| 80 | + |
---|
| 81 | + if (!prep_irq_for_idle()) |
---|
| 82 | + return; |
---|
| 83 | + |
---|
| 84 | + if (cpu_has_feature(CPU_FTR_ALTIVEC)) |
---|
| 85 | + asm volatile(PPC_DSSALL " ; sync" ::: "memory"); |
---|
| 86 | + |
---|
| 87 | + power4_idle_nap(); |
---|
| 88 | + |
---|
| 89 | + /* |
---|
| 90 | + * power4_idle_nap returns with interrupts enabled (soft and hard). |
---|
| 91 | + * to our caller with interrupts enabled (soft and hard). Our caller |
---|
| 92 | + * can cope with either interrupts disabled or enabled upon return. |
---|
| 93 | + */ |
---|
| 94 | +} |
---|
| 95 | +#endif |
---|
| 96 | + |
---|
84 | 97 | #ifdef CONFIG_SYSCTL |
---|
85 | 98 | /* |
---|
86 | 99 | * Register the sysctl to set/clear powersave_nap. |
---|