.. | .. |
---|
126 | 126 | struct interactive_policy *ipolicy; |
---|
127 | 127 | |
---|
128 | 128 | struct irq_work irq_work; |
---|
129 | | - struct irq_work boost_irq_work; |
---|
130 | 129 | u64 last_sample_time; |
---|
131 | 130 | unsigned long next_sample_jiffies; |
---|
132 | 131 | bool work_in_progress; |
---|
.. | .. |
---|
149 | 148 | u64 pol_hispeed_val_time; /* policy hispeed_validate_time */ |
---|
150 | 149 | u64 loc_hispeed_val_time; /* per-cpu hispeed_validate_time */ |
---|
151 | 150 | int cpu; |
---|
152 | | - unsigned int task_boost_freq; |
---|
153 | | - unsigned long task_boost_util; |
---|
154 | | - u64 task_boos_endtime; |
---|
155 | 151 | }; |
---|
156 | 152 | |
---|
157 | 153 | static DEFINE_PER_CPU(struct interactive_cpu, interactive_cpu); |
---|
.. | .. |
---|
427 | 423 | new_freq < tunables->touchboost_freq) { |
---|
428 | 424 | new_freq = tunables->touchboost_freq; |
---|
429 | 425 | } |
---|
430 | | - if ((now < icpu->task_boos_endtime) && (new_freq < icpu->task_boost_freq)) { |
---|
431 | | - new_freq = icpu->task_boost_freq; |
---|
432 | | - } |
---|
433 | 426 | #endif |
---|
434 | 427 | if (policy->cur >= tunables->hispeed_freq && |
---|
435 | 428 | new_freq > policy->cur && |
---|
.. | .. |
---|
676 | 669 | unsigned long val, void *data) |
---|
677 | 670 | { |
---|
678 | 671 | struct cpufreq_freqs *freq = data; |
---|
679 | | - struct interactive_cpu *icpu = &per_cpu(interactive_cpu, freq->cpu); |
---|
| 672 | + struct cpufreq_policy *policy = freq->policy; |
---|
| 673 | + struct interactive_cpu *icpu; |
---|
680 | 674 | unsigned long flags; |
---|
| 675 | + int cpu; |
---|
681 | 676 | |
---|
682 | 677 | if (val != CPUFREQ_POSTCHANGE) |
---|
683 | 678 | return 0; |
---|
684 | 679 | |
---|
685 | | - if (!down_read_trylock(&icpu->enable_sem)) |
---|
686 | | - return 0; |
---|
| 680 | + for_each_cpu(cpu, policy->cpus) { |
---|
| 681 | + icpu = &per_cpu(interactive_cpu, cpu); |
---|
687 | 682 | |
---|
688 | | - if (!icpu->ipolicy) { |
---|
| 683 | + if (!down_read_trylock(&icpu->enable_sem)) |
---|
| 684 | + continue; |
---|
| 685 | + |
---|
| 686 | + if (!icpu->ipolicy) { |
---|
| 687 | + up_read(&icpu->enable_sem); |
---|
| 688 | + continue; |
---|
| 689 | + } |
---|
| 690 | + |
---|
| 691 | + spin_lock_irqsave(&icpu->load_lock, flags); |
---|
| 692 | + update_load(icpu, cpu); |
---|
| 693 | + spin_unlock_irqrestore(&icpu->load_lock, flags); |
---|
| 694 | + |
---|
689 | 695 | up_read(&icpu->enable_sem); |
---|
690 | | - return 0; |
---|
691 | 696 | } |
---|
692 | | - |
---|
693 | | - spin_lock_irqsave(&icpu->load_lock, flags); |
---|
694 | | - update_load(icpu, freq->cpu); |
---|
695 | | - spin_unlock_irqrestore(&icpu->load_lock, flags); |
---|
696 | | - |
---|
697 | | - up_read(&icpu->enable_sem); |
---|
698 | 697 | |
---|
699 | 698 | return 0; |
---|
700 | 699 | } |
---|
.. | .. |
---|
1137 | 1136 | for_each_cpu(i, policy->cpus) |
---|
1138 | 1137 | cpufreq_remove_update_util_hook(i); |
---|
1139 | 1138 | |
---|
1140 | | - synchronize_sched(); |
---|
| 1139 | + synchronize_rcu(); |
---|
1141 | 1140 | } |
---|
1142 | 1141 | |
---|
1143 | 1142 | static void icpu_cancel_work(struct interactive_cpu *icpu) |
---|
1144 | 1143 | { |
---|
1145 | 1144 | irq_work_sync(&icpu->irq_work); |
---|
1146 | | -#ifdef CONFIG_ARCH_ROCKCHIP |
---|
1147 | | - irq_work_sync(&icpu->boost_irq_work); |
---|
1148 | | -#endif |
---|
1149 | 1145 | icpu->work_in_progress = false; |
---|
1150 | 1146 | del_timer_sync(&icpu->slack_timer); |
---|
1151 | 1147 | } |
---|
.. | .. |
---|
1368 | 1364 | attr_set = tunables->attr_set; |
---|
1369 | 1365 | *tunables = backup_tunables[index]; |
---|
1370 | 1366 | tunables->attr_set = attr_set; |
---|
1371 | | - } |
---|
1372 | | -} |
---|
1373 | | - |
---|
1374 | | -static unsigned int get_freq_for_util(struct cpufreq_policy *policy, unsigned long util) |
---|
1375 | | -{ |
---|
1376 | | - struct cpufreq_frequency_table *pos; |
---|
1377 | | - unsigned long max_cap, cur_cap; |
---|
1378 | | - unsigned int freq = 0; |
---|
1379 | | - |
---|
1380 | | - max_cap = arch_scale_cpu_capacity(NULL, policy->cpu); |
---|
1381 | | - cpufreq_for_each_valid_entry(pos, policy->freq_table) { |
---|
1382 | | - freq = pos->frequency; |
---|
1383 | | - |
---|
1384 | | - cur_cap = max_cap * freq / policy->max; |
---|
1385 | | - if (cur_cap > util) |
---|
1386 | | - break; |
---|
1387 | | - } |
---|
1388 | | - |
---|
1389 | | - return freq; |
---|
1390 | | -} |
---|
1391 | | - |
---|
1392 | | -static void task_boost_irq_work(struct irq_work *irq_work) |
---|
1393 | | -{ |
---|
1394 | | - struct interactive_cpu *pcpu; |
---|
1395 | | - struct interactive_policy *ipolicy; |
---|
1396 | | - unsigned long flags[2]; |
---|
1397 | | - u64 now, prev_boos_endtime; |
---|
1398 | | - unsigned int boost_freq; |
---|
1399 | | - |
---|
1400 | | - pcpu = container_of(irq_work, struct interactive_cpu, boost_irq_work); |
---|
1401 | | - if (!down_read_trylock(&pcpu->enable_sem)) |
---|
1402 | | - return; |
---|
1403 | | - |
---|
1404 | | - ipolicy = pcpu->ipolicy; |
---|
1405 | | - if (!ipolicy) |
---|
1406 | | - goto out; |
---|
1407 | | - |
---|
1408 | | - if (ipolicy->policy->cur == ipolicy->policy->max) |
---|
1409 | | - goto out; |
---|
1410 | | - |
---|
1411 | | - now = ktime_to_us(ktime_get()); |
---|
1412 | | - prev_boos_endtime = pcpu->task_boos_endtime;; |
---|
1413 | | - pcpu->task_boos_endtime = now + ipolicy->tunables->sampling_rate; |
---|
1414 | | - boost_freq = get_freq_for_util(ipolicy->policy, pcpu->task_boost_util); |
---|
1415 | | - if ((now < prev_boos_endtime) && (boost_freq <= pcpu->task_boost_freq)) |
---|
1416 | | - goto out; |
---|
1417 | | - pcpu->task_boost_freq = boost_freq; |
---|
1418 | | - |
---|
1419 | | - spin_lock_irqsave(&speedchange_cpumask_lock, flags[0]); |
---|
1420 | | - spin_lock_irqsave(&pcpu->target_freq_lock, flags[1]); |
---|
1421 | | - if (pcpu->target_freq < pcpu->task_boost_freq) { |
---|
1422 | | - pcpu->target_freq = pcpu->task_boost_freq; |
---|
1423 | | - cpumask_set_cpu(pcpu->cpu, &speedchange_cpumask); |
---|
1424 | | - wake_up_process(speedchange_task); |
---|
1425 | | - } |
---|
1426 | | - spin_unlock_irqrestore(&pcpu->target_freq_lock, flags[1]); |
---|
1427 | | - spin_unlock_irqrestore(&speedchange_cpumask_lock, flags[0]); |
---|
1428 | | - |
---|
1429 | | -out: |
---|
1430 | | - up_read(&pcpu->enable_sem); |
---|
1431 | | -} |
---|
1432 | | - |
---|
1433 | | -extern unsigned long capacity_curr_of(int cpu); |
---|
1434 | | - |
---|
1435 | | -void cpufreq_task_boost(int cpu, unsigned long util) |
---|
1436 | | -{ |
---|
1437 | | - struct interactive_cpu *pcpu = &per_cpu(interactive_cpu, cpu); |
---|
1438 | | - unsigned long cap, min_util; |
---|
1439 | | - |
---|
1440 | | - if (!speedchange_task) |
---|
1441 | | - return; |
---|
1442 | | - |
---|
1443 | | - min_util = util + (util >> 2); |
---|
1444 | | - cap = capacity_curr_of(cpu); |
---|
1445 | | - if (min_util > cap) { |
---|
1446 | | - pcpu->task_boost_util = min_util; |
---|
1447 | | - irq_work_queue(&pcpu->boost_irq_work); |
---|
1448 | 1367 | } |
---|
1449 | 1368 | } |
---|
1450 | 1369 | #endif |
---|
.. | .. |
---|
1675 | 1594 | icpu = &per_cpu(interactive_cpu, cpu); |
---|
1676 | 1595 | |
---|
1677 | 1596 | init_irq_work(&icpu->irq_work, irq_work); |
---|
1678 | | -#ifdef CONFIG_ARCH_ROCKCHIP |
---|
1679 | | - init_irq_work(&icpu->boost_irq_work, task_boost_irq_work); |
---|
1680 | | -#endif |
---|
1681 | 1597 | spin_lock_init(&icpu->load_lock); |
---|
1682 | 1598 | spin_lock_init(&icpu->target_freq_lock); |
---|
1683 | 1599 | init_rwsem(&icpu->enable_sem); |
---|