From 102a0743326a03cd1a1202ceda21e175b7d3575c Mon Sep 17 00:00:00 2001 From: hc <hc@nodka.com> Date: Tue, 20 Feb 2024 01:20:52 +0000 Subject: [PATCH] add new system file --- kernel/drivers/cpufreq/cpufreq_interactive.c | 120 +++++++++--------------------------------------------------- 1 files changed, 18 insertions(+), 102 deletions(-) diff --git a/kernel/drivers/cpufreq/cpufreq_interactive.c b/kernel/drivers/cpufreq/cpufreq_interactive.c index 06f7679..a486993 100644 --- a/kernel/drivers/cpufreq/cpufreq_interactive.c +++ b/kernel/drivers/cpufreq/cpufreq_interactive.c @@ -126,7 +126,6 @@ struct interactive_policy *ipolicy; struct irq_work irq_work; - struct irq_work boost_irq_work; u64 last_sample_time; unsigned long next_sample_jiffies; bool work_in_progress; @@ -149,9 +148,6 @@ u64 pol_hispeed_val_time; /* policy hispeed_validate_time */ u64 loc_hispeed_val_time; /* per-cpu hispeed_validate_time */ int cpu; - unsigned int task_boost_freq; - unsigned long task_boost_util; - u64 task_boos_endtime; }; static DEFINE_PER_CPU(struct interactive_cpu, interactive_cpu); @@ -427,9 +423,6 @@ new_freq < tunables->touchboost_freq) { new_freq = tunables->touchboost_freq; } - if ((now < icpu->task_boos_endtime) && (new_freq < icpu->task_boost_freq)) { - new_freq = icpu->task_boost_freq; - } #endif if (policy->cur >= tunables->hispeed_freq && new_freq > policy->cur && @@ -676,25 +669,31 @@ unsigned long val, void *data) { struct cpufreq_freqs *freq = data; - struct interactive_cpu *icpu = &per_cpu(interactive_cpu, freq->cpu); + struct cpufreq_policy *policy = freq->policy; + struct interactive_cpu *icpu; unsigned long flags; + int cpu; if (val != CPUFREQ_POSTCHANGE) return 0; - if (!down_read_trylock(&icpu->enable_sem)) - return 0; + for_each_cpu(cpu, policy->cpus) { + icpu = &per_cpu(interactive_cpu, cpu); - if (!icpu->ipolicy) { + if (!down_read_trylock(&icpu->enable_sem)) + continue; + + if (!icpu->ipolicy) { + up_read(&icpu->enable_sem); + continue; + } + + spin_lock_irqsave(&icpu->load_lock, flags); + update_load(icpu, cpu); + spin_unlock_irqrestore(&icpu->load_lock, flags); + up_read(&icpu->enable_sem); - return 0; } - - spin_lock_irqsave(&icpu->load_lock, flags); - update_load(icpu, freq->cpu); - spin_unlock_irqrestore(&icpu->load_lock, flags); - - up_read(&icpu->enable_sem); return 0; } @@ -1137,15 +1136,12 @@ for_each_cpu(i, policy->cpus) cpufreq_remove_update_util_hook(i); - synchronize_sched(); + synchronize_rcu(); } static void icpu_cancel_work(struct interactive_cpu *icpu) { irq_work_sync(&icpu->irq_work); -#ifdef CONFIG_ARCH_ROCKCHIP - irq_work_sync(&icpu->boost_irq_work); -#endif icpu->work_in_progress = false; del_timer_sync(&icpu->slack_timer); } @@ -1368,83 +1364,6 @@ attr_set = tunables->attr_set; *tunables = backup_tunables[index]; tunables->attr_set = attr_set; - } -} - -static unsigned int get_freq_for_util(struct cpufreq_policy *policy, unsigned long util) -{ - struct cpufreq_frequency_table *pos; - unsigned long max_cap, cur_cap; - unsigned int freq = 0; - - max_cap = arch_scale_cpu_capacity(NULL, policy->cpu); - cpufreq_for_each_valid_entry(pos, policy->freq_table) { - freq = pos->frequency; - - cur_cap = max_cap * freq / policy->max; - if (cur_cap > util) - break; - } - - return freq; -} - -static void task_boost_irq_work(struct irq_work *irq_work) -{ - struct interactive_cpu *pcpu; - struct interactive_policy *ipolicy; - unsigned long flags[2]; - u64 now, prev_boos_endtime; - unsigned int boost_freq; - - pcpu = container_of(irq_work, struct interactive_cpu, boost_irq_work); - if (!down_read_trylock(&pcpu->enable_sem)) - return; - - ipolicy = pcpu->ipolicy; - if (!ipolicy) - goto out; - - if (ipolicy->policy->cur == ipolicy->policy->max) - goto out; - - now = ktime_to_us(ktime_get()); - prev_boos_endtime = pcpu->task_boos_endtime;; - pcpu->task_boos_endtime = now + ipolicy->tunables->sampling_rate; - boost_freq = get_freq_for_util(ipolicy->policy, pcpu->task_boost_util); - if ((now < prev_boos_endtime) && (boost_freq <= pcpu->task_boost_freq)) - goto out; - pcpu->task_boost_freq = boost_freq; - - spin_lock_irqsave(&speedchange_cpumask_lock, flags[0]); - spin_lock_irqsave(&pcpu->target_freq_lock, flags[1]); - if (pcpu->target_freq < pcpu->task_boost_freq) { - pcpu->target_freq = pcpu->task_boost_freq; - cpumask_set_cpu(pcpu->cpu, &speedchange_cpumask); - wake_up_process(speedchange_task); - } - spin_unlock_irqrestore(&pcpu->target_freq_lock, flags[1]); - spin_unlock_irqrestore(&speedchange_cpumask_lock, flags[0]); - -out: - up_read(&pcpu->enable_sem); -} - -extern unsigned long capacity_curr_of(int cpu); - -void cpufreq_task_boost(int cpu, unsigned long util) -{ - struct interactive_cpu *pcpu = &per_cpu(interactive_cpu, cpu); - unsigned long cap, min_util; - - if (!speedchange_task) - return; - - min_util = util + (util >> 2); - cap = capacity_curr_of(cpu); - if (min_util > cap) { - pcpu->task_boost_util = min_util; - irq_work_queue(&pcpu->boost_irq_work); } } #endif @@ -1675,9 +1594,6 @@ icpu = &per_cpu(interactive_cpu, cpu); init_irq_work(&icpu->irq_work, irq_work); -#ifdef CONFIG_ARCH_ROCKCHIP - init_irq_work(&icpu->boost_irq_work, task_boost_irq_work); -#endif spin_lock_init(&icpu->load_lock); spin_lock_init(&icpu->target_freq_lock); init_rwsem(&icpu->enable_sem); -- Gitblit v1.6.2