From b22da3d8526a935aa31e086e63f60ff3246cb61c Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Sat, 09 Dec 2023 07:24:11 +0000
Subject: [PATCH] add stmac read mac form eeprom
---
kernel/kernel/sched/cpufreq_schedutil.c | 247 ++++++++++++++++++-------------------------------
1 files changed, 92 insertions(+), 155 deletions(-)
diff --git a/kernel/kernel/sched/cpufreq_schedutil.c b/kernel/kernel/sched/cpufreq_schedutil.c
index 2d2519b..26cf3c3 100644
--- a/kernel/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/kernel/sched/cpufreq_schedutil.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* CPUFreq governor based on scheduler-provided CPU utilization data.
*
* Copyright (C) 2016, Intel Corporation
* Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -15,11 +12,16 @@
#include <linux/sched/cpufreq.h>
#include <trace/events/power.h>
+#include <trace/hooks/sched.h>
+
+#define IOWAIT_BOOST_MIN (SCHED_CAPACITY_SCALE / 8)
struct sugov_tunables {
struct gov_attr_set attr_set;
- unsigned int up_rate_limit_us;
- unsigned int down_rate_limit_us;
+ unsigned int rate_limit_us;
+#ifdef CONFIG_ARCH_ROCKCHIP
+ unsigned int target_load;
+#endif
};
struct sugov_policy {
@@ -30,12 +32,9 @@
raw_spinlock_t update_lock; /* For shared policies */
u64 last_freq_update_time;
- s64 min_rate_limit_ns;
- s64 up_rate_delay_ns;
- s64 down_rate_delay_ns;
+ s64 freq_update_delay_ns;
unsigned int next_freq;
unsigned int cached_raw_freq;
- unsigned int prev_cached_raw_freq;
/* The next fields are only needed if fast switch cannot be used: */
struct irq_work irq_work;
@@ -59,7 +58,6 @@
u64 last_update;
unsigned long bw_dl;
- unsigned long min;
unsigned long max;
/* The field below is for single-CPU policies only: */
@@ -100,44 +98,19 @@
return true;
}
- /* No need to recalculate next freq for min_rate_limit_us
- * at least. However we might still decide to further rate
- * limit once frequency change direction is decided, according
- * to the separate rate limits.
- */
-
- delta_ns = time - sg_policy->last_freq_update_time;
- return delta_ns >= sg_policy->min_rate_limit_ns;
-}
-
-static bool sugov_up_down_rate_limit(struct sugov_policy *sg_policy, u64 time,
- unsigned int next_freq)
-{
- s64 delta_ns;
-
delta_ns = time - sg_policy->last_freq_update_time;
- if (next_freq > sg_policy->next_freq &&
- delta_ns < sg_policy->up_rate_delay_ns)
- return true;
-
- if (next_freq < sg_policy->next_freq &&
- delta_ns < sg_policy->down_rate_delay_ns)
- return true;
-
- return false;
+ return delta_ns >= sg_policy->freq_update_delay_ns;
}
static bool sugov_update_next_freq(struct sugov_policy *sg_policy, u64 time,
unsigned int next_freq)
{
- if (sg_policy->next_freq == next_freq)
- return false;
-
- if (sugov_up_down_rate_limit(sg_policy, time, next_freq)) {
- /* Restore cached freq as next_freq is not changed */
- sg_policy->cached_raw_freq = sg_policy->prev_cached_raw_freq;
- return false;
+ if (!sg_policy->need_freq_update) {
+ if (sg_policy->next_freq == next_freq)
+ return false;
+ } else {
+ sg_policy->need_freq_update = cpufreq_driver_test_flags(CPUFREQ_NEED_UPDATE_LIMITS);
}
sg_policy->next_freq = next_freq;
@@ -149,22 +122,8 @@
static void sugov_fast_switch(struct sugov_policy *sg_policy, u64 time,
unsigned int next_freq)
{
- struct cpufreq_policy *policy = sg_policy->policy;
- int cpu;
-
- if (!sugov_update_next_freq(sg_policy, time, next_freq))
- return;
-
- next_freq = cpufreq_driver_fast_switch(policy, next_freq);
- if (!next_freq)
- return;
-
- policy->cur = next_freq;
-
- if (trace_cpu_frequency_enabled()) {
- for_each_cpu(cpu, policy->cpus)
- trace_cpu_frequency(next_freq, cpu);
- }
+ if (sugov_update_next_freq(sg_policy, time, next_freq))
+ cpufreq_driver_fast_switch(sg_policy->policy, next_freq);
}
static void sugov_deferred_update(struct sugov_policy *sg_policy, u64 time,
@@ -207,20 +166,25 @@
struct cpufreq_policy *policy = sg_policy->policy;
unsigned int freq = arch_scale_freq_invariant() ?
policy->cpuinfo.max_freq : policy->cur;
+ unsigned long next_freq = 0;
- freq = map_util_freq(util, freq, max);
+ trace_android_vh_map_util_freq(util, freq, max, &next_freq, policy,
+ &sg_policy->need_freq_update);
+ if (next_freq)
+ freq = next_freq;
+ else
+#ifdef CONFIG_ARCH_ROCKCHIP
+ freq = div64_ul((u64)(100 * freq / sg_policy->tunables->target_load) * util, max);
+#else
+ freq = map_util_freq(util, freq, max);
+#endif
if (freq == sg_policy->cached_raw_freq && !sg_policy->need_freq_update)
return sg_policy->next_freq;
- sg_policy->need_freq_update = false;
- sg_policy->prev_cached_raw_freq = sg_policy->cached_raw_freq;
sg_policy->cached_raw_freq = freq;
return cpufreq_driver_resolve_freq(policy, freq);
}
-
-extern long
-schedtune_cpu_margin_with(unsigned long util, int cpu, struct task_struct *p);
/*
* This function computes an effective utilization for the given CPU, to be
@@ -233,9 +197,6 @@
*
* Where the cfs,rt and dl util numbers are tracked with the same metric and
* synchronized windows and are thus directly comparable.
- *
- * The @util parameter passed to this function is assumed to be the aggregation
- * of RT and CFS util numbers. The cases of DL and IRQ are managed here.
*
* The cfs,rt,dl utilization are the running times measured with rq->clock_task
* which excludes things like IRQ and steal-time. These latter are then accrued
@@ -252,7 +213,7 @@
unsigned long dl_util, util, irq;
struct rq *rq = cpu_rq(cpu);
- if (sched_feat(SUGOV_RT_MAX_FREQ) && !IS_BUILTIN(CONFIG_UCLAMP_TASK) &&
+ if (!uclamp_is_used() &&
type == FREQUENCY_UTIL && rt_rq_is_runnable(&rq->rt)) {
return max;
}
@@ -280,11 +241,7 @@
*/
util = util_cfs + cpu_util_rt(rq);
if (type == FREQUENCY_UTIL)
-#ifdef CONFIG_SCHED_TUNE
- util += schedtune_cpu_margin_with(util, cpu, p);
-#else
util = uclamp_rq_util_with(rq, util, p);
-#endif
dl_util = cpu_util_dl(rq);
@@ -312,9 +269,9 @@
* irq metric. Because IRQ/steal time is hidden from the task clock we
* need to scale the task numbers:
*
- * 1 - irq
- * U' = irq + ------- * U
- * max
+ * max - irq
+ * U' = irq + --------- * U
+ * max
*/
util = scale_irq_capacity(util, irq, max);
util += irq;
@@ -334,18 +291,18 @@
return min(max, util);
}
+EXPORT_SYMBOL_GPL(schedutil_cpu_util);
static unsigned long sugov_get_util(struct sugov_cpu *sg_cpu)
{
struct rq *rq = cpu_rq(sg_cpu->cpu);
- unsigned long util_cfs = cpu_util_cfs(rq);
- unsigned long max = arch_scale_cpu_capacity(NULL, sg_cpu->cpu);
+ unsigned long util = cpu_util_cfs(rq);
+ unsigned long max = arch_scale_cpu_capacity(sg_cpu->cpu);
sg_cpu->max = max;
sg_cpu->bw_dl = cpu_bw_dl(rq);
- return schedutil_cpu_util(sg_cpu->cpu, util_cfs, max,
- FREQUENCY_UTIL, NULL);
+ return schedutil_cpu_util(sg_cpu->cpu, util, max, FREQUENCY_UTIL, NULL);
}
/**
@@ -356,8 +313,8 @@
*
* The IO wait boost of a task is disabled after a tick since the last update
* of a CPU. If a new IO wait boost is requested after more then a tick, then
- * we enable the boost starting from the minimum frequency, which improves
- * energy efficiency by ignoring sporadic wakeups from IO.
+ * we enable the boost starting from IOWAIT_BOOST_MIN, which improves energy
+ * efficiency by ignoring sporadic wakeups from IO.
*/
static bool sugov_iowait_reset(struct sugov_cpu *sg_cpu, u64 time,
bool set_iowait_boost)
@@ -368,7 +325,7 @@
if (delta_ns <= TICK_NSEC)
return false;
- sg_cpu->iowait_boost = set_iowait_boost ? sg_cpu->min : 0;
+ sg_cpu->iowait_boost = set_iowait_boost ? IOWAIT_BOOST_MIN : 0;
sg_cpu->iowait_boost_pending = set_iowait_boost;
return true;
@@ -382,8 +339,9 @@
*
* Each time a task wakes up after an IO operation, the CPU utilization can be
* boosted to a certain utilization which doubles at each "frequent and
- * successive" wakeup from IO, ranging from the utilization of the minimum
- * OPP to the utilization of the maximum OPP.
+ * successive" wakeup from IO, ranging from IOWAIT_BOOST_MIN to the utilization
+ * of the maximum OPP.
+ *
* To keep doubling, an IO boost has to be requested at least once per tick,
* otherwise we restart from the utilization of the minimum OPP.
*/
@@ -414,7 +372,7 @@
}
/* First wakeup after IO: start with minimum boost */
- sg_cpu->iowait_boost = sg_cpu->min;
+ sg_cpu->iowait_boost = IOWAIT_BOOST_MIN;
}
/**
@@ -454,7 +412,7 @@
* No boost pending; reduce the boost value.
*/
sg_cpu->iowait_boost >>= 1;
- if (sg_cpu->iowait_boost < sg_cpu->min) {
+ if (sg_cpu->iowait_boost < IOWAIT_BOOST_MIN) {
sg_cpu->iowait_boost = 0;
return util;
}
@@ -500,7 +458,7 @@
struct sugov_policy *sg_policy = sg_cpu->sg_policy;
unsigned long util, max;
unsigned int next_f;
- bool busy;
+ unsigned int cached_freq = sg_policy->cached_raw_freq;
sugov_iowait_boost(sg_cpu, time, flags);
sg_cpu->last_update = time;
@@ -510,9 +468,6 @@
if (!sugov_should_update_freq(sg_policy, time))
return;
- /* Limits may have changed, don't skip frequency update */
- busy = !sg_policy->need_freq_update && sugov_cpu_is_busy(sg_cpu);
-
util = sugov_get_util(sg_cpu);
max = sg_cpu->max;
util = sugov_iowait_apply(sg_cpu, time, util, max);
@@ -521,11 +476,11 @@
* Do not reduce the frequency if the CPU has not been idle
* recently, as the reduction is likely to be premature then.
*/
- if (busy && next_f < sg_policy->next_freq) {
+ if (sugov_cpu_is_busy(sg_cpu) && next_f < sg_policy->next_freq) {
next_f = sg_policy->next_freq;
/* Restore cached freq as next_freq has changed */
- sg_policy->cached_raw_freq = sg_policy->prev_cached_raw_freq;
+ sg_policy->cached_raw_freq = cached_freq;
}
/*
@@ -637,32 +592,15 @@
return container_of(attr_set, struct sugov_tunables, attr_set);
}
-static DEFINE_MUTEX(min_rate_lock);
-
-static void update_min_rate_limit_ns(struct sugov_policy *sg_policy)
-{
- mutex_lock(&min_rate_lock);
- sg_policy->min_rate_limit_ns = min(sg_policy->up_rate_delay_ns,
- sg_policy->down_rate_delay_ns);
- mutex_unlock(&min_rate_lock);
-}
-
-static ssize_t up_rate_limit_us_show(struct gov_attr_set *attr_set, char *buf)
+static ssize_t rate_limit_us_show(struct gov_attr_set *attr_set, char *buf)
{
struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
- return sprintf(buf, "%u\n", tunables->up_rate_limit_us);
+ return sprintf(buf, "%u\n", tunables->rate_limit_us);
}
-static ssize_t down_rate_limit_us_show(struct gov_attr_set *attr_set, char *buf)
-{
- struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
-
- return sprintf(buf, "%u\n", tunables->down_rate_limit_us);
-}
-
-static ssize_t up_rate_limit_us_store(struct gov_attr_set *attr_set,
- const char *buf, size_t count)
+static ssize_t
+rate_limit_us_store(struct gov_attr_set *attr_set, const char *buf, size_t count)
{
struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
struct sugov_policy *sg_policy;
@@ -671,44 +609,52 @@
if (kstrtouint(buf, 10, &rate_limit_us))
return -EINVAL;
- tunables->up_rate_limit_us = rate_limit_us;
+ tunables->rate_limit_us = rate_limit_us;
- list_for_each_entry(sg_policy, &attr_set->policy_list, tunables_hook) {
- sg_policy->up_rate_delay_ns = rate_limit_us * NSEC_PER_USEC;
- update_min_rate_limit_ns(sg_policy);
- }
+ list_for_each_entry(sg_policy, &attr_set->policy_list, tunables_hook)
+ sg_policy->freq_update_delay_ns = rate_limit_us * NSEC_PER_USEC;
return count;
}
-static ssize_t down_rate_limit_us_store(struct gov_attr_set *attr_set,
- const char *buf, size_t count)
+static struct governor_attr rate_limit_us = __ATTR_RW(rate_limit_us);
+
+#ifdef CONFIG_ARCH_ROCKCHIP
+static ssize_t target_load_show(struct gov_attr_set *attr_set, char *buf)
{
struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
- struct sugov_policy *sg_policy;
- unsigned int rate_limit_us;
- if (kstrtouint(buf, 10, &rate_limit_us))
+ return sprintf(buf, "%u\n", tunables->target_load);
+}
+
+static ssize_t
+target_load_store(struct gov_attr_set *attr_set, const char *buf, size_t count)
+{
+ struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
+ unsigned int target_load;
+
+ if (kstrtouint(buf, 10, &target_load))
return -EINVAL;
- tunables->down_rate_limit_us = rate_limit_us;
+ if (!target_load || (target_load > 100))
+ return -EINVAL;
- list_for_each_entry(sg_policy, &attr_set->policy_list, tunables_hook) {
- sg_policy->down_rate_delay_ns = rate_limit_us * NSEC_PER_USEC;
- update_min_rate_limit_ns(sg_policy);
- }
+ tunables->target_load = target_load;
return count;
}
-static struct governor_attr up_rate_limit_us = __ATTR_RW(up_rate_limit_us);
-static struct governor_attr down_rate_limit_us = __ATTR_RW(down_rate_limit_us);
+static struct governor_attr target_load = __ATTR_RW(target_load);
+#endif
-static struct attribute *sugov_attributes[] = {
- &up_rate_limit_us.attr,
- &down_rate_limit_us.attr,
+static struct attribute *sugov_attrs[] = {
+ &rate_limit_us.attr,
+#ifdef CONFIG_ARCH_ROCKCHIP
+ &target_load.attr,
+#endif
NULL
};
+ATTRIBUTE_GROUPS(sugov);
static void sugov_tunables_free(struct kobject *kobj)
{
@@ -718,14 +664,14 @@
}
static struct kobj_type sugov_tunables_ktype = {
- .default_attrs = sugov_attributes,
+ .default_groups = sugov_groups,
.sysfs_ops = &governor_sysfs_ops,
.release = &sugov_tunables_free,
};
/********************** cpufreq governor interface *********************/
-static struct cpufreq_governor schedutil_gov;
+struct cpufreq_governor schedutil_gov;
static struct sugov_policy *sugov_policy_alloc(struct cpufreq_policy *policy)
{
@@ -868,8 +814,10 @@
goto stop_kthread;
}
- tunables->up_rate_limit_us = cpufreq_policy_transition_delay_us(policy);
- tunables->down_rate_limit_us = cpufreq_policy_transition_delay_us(policy);
+ tunables->rate_limit_us = cpufreq_policy_transition_delay_us(policy);
+#ifdef CONFIG_ARCH_ROCKCHIP
+ tunables->target_load = 80;
+#endif
policy->governor_data = sg_policy;
sg_policy->tunables = tunables;
@@ -928,18 +876,14 @@
struct sugov_policy *sg_policy = policy->governor_data;
unsigned int cpu;
- sg_policy->up_rate_delay_ns =
- sg_policy->tunables->up_rate_limit_us * NSEC_PER_USEC;
- sg_policy->down_rate_delay_ns =
- sg_policy->tunables->down_rate_limit_us * NSEC_PER_USEC;
- update_min_rate_limit_ns(sg_policy);
+ sg_policy->freq_update_delay_ns = sg_policy->tunables->rate_limit_us * NSEC_PER_USEC;
sg_policy->last_freq_update_time = 0;
sg_policy->next_freq = 0;
sg_policy->work_in_progress = false;
sg_policy->limits_changed = false;
- sg_policy->need_freq_update = false;
sg_policy->cached_raw_freq = 0;
- sg_policy->prev_cached_raw_freq = 0;
+
+ sg_policy->need_freq_update = cpufreq_driver_test_flags(CPUFREQ_NEED_UPDATE_LIMITS);
for_each_cpu(cpu, policy->cpus) {
struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
@@ -947,9 +891,6 @@
memset(sg_cpu, 0, sizeof(*sg_cpu));
sg_cpu->cpu = cpu;
sg_cpu->sg_policy = sg_policy;
- sg_cpu->min =
- (SCHED_CAPACITY_SCALE * policy->cpuinfo.min_freq) /
- policy->cpuinfo.max_freq;
}
for_each_cpu(cpu, policy->cpus) {
@@ -971,7 +912,7 @@
for_each_cpu(cpu, policy->cpus)
cpufreq_remove_update_util_hook(cpu);
- synchronize_sched();
+ synchronize_rcu();
if (!policy->fast_switch_enabled) {
irq_work_sync(&sg_policy->irq_work);
@@ -992,10 +933,10 @@
sg_policy->limits_changed = true;
}
-static struct cpufreq_governor schedutil_gov = {
+struct cpufreq_governor schedutil_gov = {
.name = "schedutil",
.owner = THIS_MODULE,
- .dynamic_switching = true,
+ .flags = CPUFREQ_GOV_DYNAMIC_SWITCHING,
.init = sugov_init,
.exit = sugov_exit,
.start = sugov_start,
@@ -1010,8 +951,4 @@
}
#endif
-static int __init sugov_register(void)
-{
- return cpufreq_register_governor(&schedutil_gov);
-}
-fs_initcall(sugov_register);
+cpufreq_governor_init(schedutil_gov);
--
Gitblit v1.6.2