forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-10-22 8ac6c7a54ed1b98d142dce24b11c6de6a1e239a5
kernel/drivers/cpuidle/governors/ladder.c
....@@ -27,8 +27,8 @@
2727 struct {
2828 u32 promotion_count;
2929 u32 demotion_count;
30
- u32 promotion_time;
31
- u32 demotion_time;
30
+ u64 promotion_time_ns;
31
+ u64 demotion_time_ns;
3232 } threshold;
3333 struct {
3434 int promotion_count;
....@@ -38,7 +38,6 @@
3838
3939 struct ladder_device {
4040 struct ladder_device_state states[CPUIDLE_STATE_MAX];
41
- int last_state_idx;
4241 };
4342
4443 static DEFINE_PER_CPU(struct ladder_device, ladder_devices);
....@@ -49,12 +48,13 @@
4948 * @old_idx: the current state index
5049 * @new_idx: the new target state index
5150 */
52
-static inline void ladder_do_selection(struct ladder_device *ldev,
51
+static inline void ladder_do_selection(struct cpuidle_device *dev,
52
+ struct ladder_device *ldev,
5353 int old_idx, int new_idx)
5454 {
5555 ldev->states[old_idx].stats.promotion_count = 0;
5656 ldev->states[old_idx].stats.demotion_count = 0;
57
- ldev->last_state_idx = new_idx;
57
+ dev->last_state_idx = new_idx;
5858 }
5959
6060 /**
....@@ -68,55 +68,54 @@
6868 {
6969 struct ladder_device *ldev = this_cpu_ptr(&ladder_devices);
7070 struct ladder_device_state *last_state;
71
- int last_residency, last_idx = ldev->last_state_idx;
71
+ int last_idx = dev->last_state_idx;
7272 int first_idx = drv->states[0].flags & CPUIDLE_FLAG_POLLING ? 1 : 0;
73
- int latency_req = cpuidle_governor_latency_req(dev->cpu);
73
+ s64 latency_req = cpuidle_governor_latency_req(dev->cpu);
74
+ s64 last_residency;
7475
7576 /* Special case when user has set very strict latency requirement */
7677 if (unlikely(latency_req == 0)) {
77
- ladder_do_selection(ldev, last_idx, 0);
78
+ ladder_do_selection(dev, ldev, last_idx, 0);
7879 return 0;
7980 }
8081
8182 last_state = &ldev->states[last_idx];
8283
83
- last_residency = cpuidle_get_last_residency(dev) - drv->states[last_idx].exit_latency;
84
+ last_residency = dev->last_residency_ns - drv->states[last_idx].exit_latency_ns;
8485
8586 /* consider promotion */
8687 if (last_idx < drv->state_count - 1 &&
87
- !drv->states[last_idx + 1].disabled &&
8888 !dev->states_usage[last_idx + 1].disable &&
89
- last_residency > last_state->threshold.promotion_time &&
90
- drv->states[last_idx + 1].exit_latency <= latency_req) {
89
+ last_residency > last_state->threshold.promotion_time_ns &&
90
+ drv->states[last_idx + 1].exit_latency_ns <= latency_req) {
9191 last_state->stats.promotion_count++;
9292 last_state->stats.demotion_count = 0;
9393 if (last_state->stats.promotion_count >= last_state->threshold.promotion_count) {
94
- ladder_do_selection(ldev, last_idx, last_idx + 1);
94
+ ladder_do_selection(dev, ldev, last_idx, last_idx + 1);
9595 return last_idx + 1;
9696 }
9797 }
9898
9999 /* consider demotion */
100100 if (last_idx > first_idx &&
101
- (drv->states[last_idx].disabled ||
102
- dev->states_usage[last_idx].disable ||
103
- drv->states[last_idx].exit_latency > latency_req)) {
101
+ (dev->states_usage[last_idx].disable ||
102
+ drv->states[last_idx].exit_latency_ns > latency_req)) {
104103 int i;
105104
106105 for (i = last_idx - 1; i > first_idx; i--) {
107
- if (drv->states[i].exit_latency <= latency_req)
106
+ if (drv->states[i].exit_latency_ns <= latency_req)
108107 break;
109108 }
110
- ladder_do_selection(ldev, last_idx, i);
109
+ ladder_do_selection(dev, ldev, last_idx, i);
111110 return i;
112111 }
113112
114113 if (last_idx > first_idx &&
115
- last_residency < last_state->threshold.demotion_time) {
114
+ last_residency < last_state->threshold.demotion_time_ns) {
116115 last_state->stats.demotion_count++;
117116 last_state->stats.promotion_count = 0;
118117 if (last_state->stats.demotion_count >= last_state->threshold.demotion_count) {
119
- ladder_do_selection(ldev, last_idx, last_idx - 1);
118
+ ladder_do_selection(dev, ldev, last_idx, last_idx - 1);
120119 return last_idx - 1;
121120 }
122121 }
....@@ -139,7 +138,7 @@
139138 struct ladder_device_state *lstate;
140139 struct cpuidle_state *state;
141140
142
- ldev->last_state_idx = first_idx;
141
+ dev->last_state_idx = first_idx;
143142
144143 for (i = first_idx; i < drv->state_count; i++) {
145144 state = &drv->states[i];
....@@ -152,9 +151,9 @@
152151 lstate->threshold.demotion_count = DEMOTION_COUNT;
153152
154153 if (i < drv->state_count - 1)
155
- lstate->threshold.promotion_time = state->exit_latency;
154
+ lstate->threshold.promotion_time_ns = state->exit_latency_ns;
156155 if (i > first_idx)
157
- lstate->threshold.demotion_time = state->exit_latency;
156
+ lstate->threshold.demotion_time_ns = state->exit_latency_ns;
158157 }
159158
160159 return 0;
....@@ -167,9 +166,8 @@
167166 */
168167 static void ladder_reflect(struct cpuidle_device *dev, int index)
169168 {
170
- struct ladder_device *ldev = this_cpu_ptr(&ladder_devices);
171169 if (index > 0)
172
- ldev->last_state_idx = index;
170
+ dev->last_state_idx = index;
173171 }
174172
175173 static struct cpuidle_governor ladder_governor = {