hc
2023-12-11 d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d
kernel/drivers/cpufreq/vexpress-spc-cpufreq.c
....@@ -1,61 +1,584 @@
1
+// SPDX-License-Identifier: GPL-2.0
12 /*
23 * Versatile Express SPC CPUFreq Interface driver
34 *
4
- * It provides necessary ops to arm_big_little cpufreq driver.
5
+ * Copyright (C) 2013 - 2019 ARM Ltd.
6
+ * Sudeep Holla <sudeep.holla@arm.com>
57 *
6
- * Copyright (C) 2013 ARM Ltd.
7
- * Sudeep KarkadaNagesha <sudeep.karkadanagesha@arm.com>
8
- *
9
- * This program is free software; you can redistribute it and/or modify
10
- * it under the terms of the GNU General Public License version 2 as
11
- * published by the Free Software Foundation.
12
- *
13
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
14
- * kind, whether express or implied; without even the implied warranty
15
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16
- * GNU General Public License for more details.
8
+ * Copyright (C) 2013 Linaro.
9
+ * Viresh Kumar <viresh.kumar@linaro.org>
1710 */
1811
1912 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
2013
14
+#include <linux/clk.h>
2115 #include <linux/cpu.h>
2216 #include <linux/cpufreq.h>
17
+#include <linux/cpumask.h>
18
+#include <linux/cpu_cooling.h>
19
+#include <linux/device.h>
2320 #include <linux/module.h>
21
+#include <linux/mutex.h>
22
+#include <linux/of_platform.h>
2423 #include <linux/platform_device.h>
2524 #include <linux/pm_opp.h>
25
+#include <linux/slab.h>
26
+#include <linux/topology.h>
2627 #include <linux/types.h>
2728
28
-#include "arm_big_little.h"
29
+/* Currently we support only two clusters */
30
+#define A15_CLUSTER 0
31
+#define A7_CLUSTER 1
32
+#define MAX_CLUSTERS 2
2933
30
-static int ve_spc_init_opp_table(const struct cpumask *cpumask)
34
+#ifdef CONFIG_BL_SWITCHER
35
+#include <asm/bL_switcher.h>
36
+static bool bL_switching_enabled;
37
+#define is_bL_switching_enabled() bL_switching_enabled
38
+#define set_switching_enabled(x) (bL_switching_enabled = (x))
39
+#else
40
+#define is_bL_switching_enabled() false
41
+#define set_switching_enabled(x) do { } while (0)
42
+#define bL_switch_request(...) do { } while (0)
43
+#define bL_switcher_put_enabled() do { } while (0)
44
+#define bL_switcher_get_enabled() do { } while (0)
45
+#endif
46
+
47
+#define ACTUAL_FREQ(cluster, freq) ((cluster == A7_CLUSTER) ? freq << 1 : freq)
48
+#define VIRT_FREQ(cluster, freq) ((cluster == A7_CLUSTER) ? freq >> 1 : freq)
49
+
50
+static struct thermal_cooling_device *cdev[MAX_CLUSTERS];
51
+static struct clk *clk[MAX_CLUSTERS];
52
+static struct cpufreq_frequency_table *freq_table[MAX_CLUSTERS + 1];
53
+static atomic_t cluster_usage[MAX_CLUSTERS + 1];
54
+
55
+static unsigned int clk_big_min; /* (Big) clock frequencies */
56
+static unsigned int clk_little_max; /* Maximum clock frequency (Little) */
57
+
58
+static DEFINE_PER_CPU(unsigned int, physical_cluster);
59
+static DEFINE_PER_CPU(unsigned int, cpu_last_req_freq);
60
+
61
+static struct mutex cluster_lock[MAX_CLUSTERS];
62
+
63
+static inline int raw_cpu_to_cluster(int cpu)
3164 {
32
- struct device *cpu_dev = get_cpu_device(cpumask_first(cpumask));
65
+ return topology_physical_package_id(cpu);
66
+}
67
+
68
+static inline int cpu_to_cluster(int cpu)
69
+{
70
+ return is_bL_switching_enabled() ?
71
+ MAX_CLUSTERS : raw_cpu_to_cluster(cpu);
72
+}
73
+
74
+static unsigned int find_cluster_maxfreq(int cluster)
75
+{
76
+ int j;
77
+ u32 max_freq = 0, cpu_freq;
78
+
79
+ for_each_online_cpu(j) {
80
+ cpu_freq = per_cpu(cpu_last_req_freq, j);
81
+
82
+ if (cluster == per_cpu(physical_cluster, j) &&
83
+ max_freq < cpu_freq)
84
+ max_freq = cpu_freq;
85
+ }
86
+
87
+ return max_freq;
88
+}
89
+
90
+static unsigned int clk_get_cpu_rate(unsigned int cpu)
91
+{
92
+ u32 cur_cluster = per_cpu(physical_cluster, cpu);
93
+ u32 rate = clk_get_rate(clk[cur_cluster]) / 1000;
94
+
95
+ /* For switcher we use virtual A7 clock rates */
96
+ if (is_bL_switching_enabled())
97
+ rate = VIRT_FREQ(cur_cluster, rate);
98
+
99
+ return rate;
100
+}
101
+
102
+static unsigned int ve_spc_cpufreq_get_rate(unsigned int cpu)
103
+{
104
+ if (is_bL_switching_enabled())
105
+ return per_cpu(cpu_last_req_freq, cpu);
106
+ else
107
+ return clk_get_cpu_rate(cpu);
108
+}
109
+
110
+static unsigned int
111
+ve_spc_cpufreq_set_rate(u32 cpu, u32 old_cluster, u32 new_cluster, u32 rate)
112
+{
113
+ u32 new_rate, prev_rate;
114
+ int ret;
115
+ bool bLs = is_bL_switching_enabled();
116
+
117
+ mutex_lock(&cluster_lock[new_cluster]);
118
+
119
+ if (bLs) {
120
+ prev_rate = per_cpu(cpu_last_req_freq, cpu);
121
+ per_cpu(cpu_last_req_freq, cpu) = rate;
122
+ per_cpu(physical_cluster, cpu) = new_cluster;
123
+
124
+ new_rate = find_cluster_maxfreq(new_cluster);
125
+ new_rate = ACTUAL_FREQ(new_cluster, new_rate);
126
+ } else {
127
+ new_rate = rate;
128
+ }
129
+
130
+ ret = clk_set_rate(clk[new_cluster], new_rate * 1000);
131
+ if (!ret) {
132
+ /*
133
+ * FIXME: clk_set_rate hasn't returned an error here however it
134
+ * may be that clk_change_rate failed due to hardware or
135
+ * firmware issues and wasn't able to report that due to the
136
+ * current design of the clk core layer. To work around this
137
+ * problem we will read back the clock rate and check it is
138
+ * correct. This needs to be removed once clk core is fixed.
139
+ */
140
+ if (clk_get_rate(clk[new_cluster]) != new_rate * 1000)
141
+ ret = -EIO;
142
+ }
143
+
144
+ if (WARN_ON(ret)) {
145
+ if (bLs) {
146
+ per_cpu(cpu_last_req_freq, cpu) = prev_rate;
147
+ per_cpu(physical_cluster, cpu) = old_cluster;
148
+ }
149
+
150
+ mutex_unlock(&cluster_lock[new_cluster]);
151
+
152
+ return ret;
153
+ }
154
+
155
+ mutex_unlock(&cluster_lock[new_cluster]);
156
+
157
+ /* Recalc freq for old cluster when switching clusters */
158
+ if (old_cluster != new_cluster) {
159
+ /* Switch cluster */
160
+ bL_switch_request(cpu, new_cluster);
161
+
162
+ mutex_lock(&cluster_lock[old_cluster]);
163
+
164
+ /* Set freq of old cluster if there are cpus left on it */
165
+ new_rate = find_cluster_maxfreq(old_cluster);
166
+ new_rate = ACTUAL_FREQ(old_cluster, new_rate);
167
+
168
+ if (new_rate &&
169
+ clk_set_rate(clk[old_cluster], new_rate * 1000)) {
170
+ pr_err("%s: clk_set_rate failed: %d, old cluster: %d\n",
171
+ __func__, ret, old_cluster);
172
+ }
173
+ mutex_unlock(&cluster_lock[old_cluster]);
174
+ }
175
+
176
+ return 0;
177
+}
178
+
179
+/* Set clock frequency */
180
+static int ve_spc_cpufreq_set_target(struct cpufreq_policy *policy,
181
+ unsigned int index)
182
+{
183
+ u32 cpu = policy->cpu, cur_cluster, new_cluster, actual_cluster;
184
+ unsigned int freqs_new;
185
+
186
+ cur_cluster = cpu_to_cluster(cpu);
187
+ new_cluster = actual_cluster = per_cpu(physical_cluster, cpu);
188
+
189
+ freqs_new = freq_table[cur_cluster][index].frequency;
190
+
191
+ if (is_bL_switching_enabled()) {
192
+ if (actual_cluster == A15_CLUSTER && freqs_new < clk_big_min)
193
+ new_cluster = A7_CLUSTER;
194
+ else if (actual_cluster == A7_CLUSTER &&
195
+ freqs_new > clk_little_max)
196
+ new_cluster = A15_CLUSTER;
197
+ }
198
+
199
+ return ve_spc_cpufreq_set_rate(cpu, actual_cluster, new_cluster,
200
+ freqs_new);
201
+}
202
+
203
+static inline u32 get_table_count(struct cpufreq_frequency_table *table)
204
+{
205
+ int count;
206
+
207
+ for (count = 0; table[count].frequency != CPUFREQ_TABLE_END; count++)
208
+ ;
209
+
210
+ return count;
211
+}
212
+
213
+/* get the minimum frequency in the cpufreq_frequency_table */
214
+static inline u32 get_table_min(struct cpufreq_frequency_table *table)
215
+{
216
+ struct cpufreq_frequency_table *pos;
217
+ u32 min_freq = ~0;
218
+
219
+ cpufreq_for_each_entry(pos, table)
220
+ if (pos->frequency < min_freq)
221
+ min_freq = pos->frequency;
222
+ return min_freq;
223
+}
224
+
225
+/* get the maximum frequency in the cpufreq_frequency_table */
226
+static inline u32 get_table_max(struct cpufreq_frequency_table *table)
227
+{
228
+ struct cpufreq_frequency_table *pos;
229
+ u32 max_freq = 0;
230
+
231
+ cpufreq_for_each_entry(pos, table)
232
+ if (pos->frequency > max_freq)
233
+ max_freq = pos->frequency;
234
+ return max_freq;
235
+}
236
+
237
+static bool search_frequency(struct cpufreq_frequency_table *table, int size,
238
+ unsigned int freq)
239
+{
240
+ int count;
241
+
242
+ for (count = 0; count < size; count++) {
243
+ if (table[count].frequency == freq)
244
+ return true;
245
+ }
246
+
247
+ return false;
248
+}
249
+
250
+static int merge_cluster_tables(void)
251
+{
252
+ int i, j, k = 0, count = 1;
253
+ struct cpufreq_frequency_table *table;
254
+
255
+ for (i = 0; i < MAX_CLUSTERS; i++)
256
+ count += get_table_count(freq_table[i]);
257
+
258
+ table = kcalloc(count, sizeof(*table), GFP_KERNEL);
259
+ if (!table)
260
+ return -ENOMEM;
261
+
262
+ freq_table[MAX_CLUSTERS] = table;
263
+
264
+ /* Add in reverse order to get freqs in increasing order */
265
+ for (i = MAX_CLUSTERS - 1; i >= 0; i--, count = k) {
266
+ for (j = 0; freq_table[i][j].frequency != CPUFREQ_TABLE_END;
267
+ j++) {
268
+ if (i == A15_CLUSTER &&
269
+ search_frequency(table, count, freq_table[i][j].frequency))
270
+ continue; /* skip duplicates */
271
+ table[k++].frequency =
272
+ VIRT_FREQ(i, freq_table[i][j].frequency);
273
+ }
274
+ }
275
+
276
+ table[k].driver_data = k;
277
+ table[k].frequency = CPUFREQ_TABLE_END;
278
+
279
+ return 0;
280
+}
281
+
282
+static void _put_cluster_clk_and_freq_table(struct device *cpu_dev,
283
+ const struct cpumask *cpumask)
284
+{
285
+ u32 cluster = raw_cpu_to_cluster(cpu_dev->id);
286
+
287
+ if (!freq_table[cluster])
288
+ return;
289
+
290
+ clk_put(clk[cluster]);
291
+ dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]);
292
+}
293
+
294
+static void put_cluster_clk_and_freq_table(struct device *cpu_dev,
295
+ const struct cpumask *cpumask)
296
+{
297
+ u32 cluster = cpu_to_cluster(cpu_dev->id);
298
+ int i;
299
+
300
+ if (atomic_dec_return(&cluster_usage[cluster]))
301
+ return;
302
+
303
+ if (cluster < MAX_CLUSTERS)
304
+ return _put_cluster_clk_and_freq_table(cpu_dev, cpumask);
305
+
306
+ for_each_present_cpu(i) {
307
+ struct device *cdev = get_cpu_device(i);
308
+
309
+ if (!cdev)
310
+ return;
311
+
312
+ _put_cluster_clk_and_freq_table(cdev, cpumask);
313
+ }
314
+
315
+ /* free virtual table */
316
+ kfree(freq_table[cluster]);
317
+}
318
+
319
+static int _get_cluster_clk_and_freq_table(struct device *cpu_dev,
320
+ const struct cpumask *cpumask)
321
+{
322
+ u32 cluster = raw_cpu_to_cluster(cpu_dev->id);
323
+ int ret;
324
+
325
+ if (freq_table[cluster])
326
+ return 0;
327
+
33328 /*
34329 * platform specific SPC code must initialise the opp table
35330 * so just check if the OPP count is non-zero
36331 */
37
- return dev_pm_opp_get_opp_count(cpu_dev) <= 0;
332
+ ret = dev_pm_opp_get_opp_count(cpu_dev) <= 0;
333
+ if (ret)
334
+ goto out;
335
+
336
+ ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table[cluster]);
337
+ if (ret)
338
+ goto out;
339
+
340
+ clk[cluster] = clk_get(cpu_dev, NULL);
341
+ if (!IS_ERR(clk[cluster]))
342
+ return 0;
343
+
344
+ dev_err(cpu_dev, "%s: Failed to get clk for cpu: %d, cluster: %d\n",
345
+ __func__, cpu_dev->id, cluster);
346
+ ret = PTR_ERR(clk[cluster]);
347
+ dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]);
348
+
349
+out:
350
+ dev_err(cpu_dev, "%s: Failed to get data for cluster: %d\n", __func__,
351
+ cluster);
352
+ return ret;
38353 }
39354
40
-static int ve_spc_get_transition_latency(struct device *cpu_dev)
355
+static int get_cluster_clk_and_freq_table(struct device *cpu_dev,
356
+ const struct cpumask *cpumask)
41357 {
42
- return 1000000; /* 1 ms */
358
+ u32 cluster = cpu_to_cluster(cpu_dev->id);
359
+ int i, ret;
360
+
361
+ if (atomic_inc_return(&cluster_usage[cluster]) != 1)
362
+ return 0;
363
+
364
+ if (cluster < MAX_CLUSTERS) {
365
+ ret = _get_cluster_clk_and_freq_table(cpu_dev, cpumask);
366
+ if (ret)
367
+ atomic_dec(&cluster_usage[cluster]);
368
+ return ret;
369
+ }
370
+
371
+ /*
372
+ * Get data for all clusters and fill virtual cluster with a merge of
373
+ * both
374
+ */
375
+ for_each_present_cpu(i) {
376
+ struct device *cdev = get_cpu_device(i);
377
+
378
+ if (!cdev)
379
+ return -ENODEV;
380
+
381
+ ret = _get_cluster_clk_and_freq_table(cdev, cpumask);
382
+ if (ret)
383
+ goto put_clusters;
384
+ }
385
+
386
+ ret = merge_cluster_tables();
387
+ if (ret)
388
+ goto put_clusters;
389
+
390
+ /* Assuming 2 cluster, set clk_big_min and clk_little_max */
391
+ clk_big_min = get_table_min(freq_table[A15_CLUSTER]);
392
+ clk_little_max = VIRT_FREQ(A7_CLUSTER,
393
+ get_table_max(freq_table[A7_CLUSTER]));
394
+
395
+ return 0;
396
+
397
+put_clusters:
398
+ for_each_present_cpu(i) {
399
+ struct device *cdev = get_cpu_device(i);
400
+
401
+ if (!cdev)
402
+ return -ENODEV;
403
+
404
+ _put_cluster_clk_and_freq_table(cdev, cpumask);
405
+ }
406
+
407
+ atomic_dec(&cluster_usage[cluster]);
408
+
409
+ return ret;
43410 }
44411
45
-static const struct cpufreq_arm_bL_ops ve_spc_cpufreq_ops = {
46
- .name = "vexpress-spc",
47
- .get_transition_latency = ve_spc_get_transition_latency,
48
- .init_opp_table = ve_spc_init_opp_table,
412
+/* Per-CPU initialization */
413
+static int ve_spc_cpufreq_init(struct cpufreq_policy *policy)
414
+{
415
+ u32 cur_cluster = cpu_to_cluster(policy->cpu);
416
+ struct device *cpu_dev;
417
+ int ret;
418
+
419
+ cpu_dev = get_cpu_device(policy->cpu);
420
+ if (!cpu_dev) {
421
+ pr_err("%s: failed to get cpu%d device\n", __func__,
422
+ policy->cpu);
423
+ return -ENODEV;
424
+ }
425
+
426
+ if (cur_cluster < MAX_CLUSTERS) {
427
+ int cpu;
428
+
429
+ dev_pm_opp_get_sharing_cpus(cpu_dev, policy->cpus);
430
+
431
+ for_each_cpu(cpu, policy->cpus)
432
+ per_cpu(physical_cluster, cpu) = cur_cluster;
433
+ } else {
434
+ /* Assumption: during init, we are always running on A15 */
435
+ per_cpu(physical_cluster, policy->cpu) = A15_CLUSTER;
436
+ }
437
+
438
+ ret = get_cluster_clk_and_freq_table(cpu_dev, policy->cpus);
439
+ if (ret)
440
+ return ret;
441
+
442
+ policy->freq_table = freq_table[cur_cluster];
443
+ policy->cpuinfo.transition_latency = 1000000; /* 1 ms */
444
+
445
+ dev_pm_opp_of_register_em(cpu_dev, policy->cpus);
446
+
447
+ if (is_bL_switching_enabled())
448
+ per_cpu(cpu_last_req_freq, policy->cpu) =
449
+ clk_get_cpu_rate(policy->cpu);
450
+
451
+ dev_info(cpu_dev, "%s: CPU %d initialized\n", __func__, policy->cpu);
452
+ return 0;
453
+}
454
+
455
+static int ve_spc_cpufreq_exit(struct cpufreq_policy *policy)
456
+{
457
+ struct device *cpu_dev;
458
+ int cur_cluster = cpu_to_cluster(policy->cpu);
459
+
460
+ if (cur_cluster < MAX_CLUSTERS) {
461
+ cpufreq_cooling_unregister(cdev[cur_cluster]);
462
+ cdev[cur_cluster] = NULL;
463
+ }
464
+
465
+ cpu_dev = get_cpu_device(policy->cpu);
466
+ if (!cpu_dev) {
467
+ pr_err("%s: failed to get cpu%d device\n", __func__,
468
+ policy->cpu);
469
+ return -ENODEV;
470
+ }
471
+
472
+ put_cluster_clk_and_freq_table(cpu_dev, policy->related_cpus);
473
+ return 0;
474
+}
475
+
476
+static void ve_spc_cpufreq_ready(struct cpufreq_policy *policy)
477
+{
478
+ int cur_cluster = cpu_to_cluster(policy->cpu);
479
+
480
+ /* Do not register a cpu_cooling device if we are in IKS mode */
481
+ if (cur_cluster >= MAX_CLUSTERS)
482
+ return;
483
+
484
+ cdev[cur_cluster] = of_cpufreq_cooling_register(policy);
485
+}
486
+
487
+static struct cpufreq_driver ve_spc_cpufreq_driver = {
488
+ .name = "vexpress-spc",
489
+ .flags = CPUFREQ_STICKY |
490
+ CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
491
+ CPUFREQ_NEED_INITIAL_FREQ_CHECK,
492
+ .verify = cpufreq_generic_frequency_table_verify,
493
+ .target_index = ve_spc_cpufreq_set_target,
494
+ .get = ve_spc_cpufreq_get_rate,
495
+ .init = ve_spc_cpufreq_init,
496
+ .exit = ve_spc_cpufreq_exit,
497
+ .ready = ve_spc_cpufreq_ready,
498
+ .attr = cpufreq_generic_attr,
49499 };
500
+
501
+#ifdef CONFIG_BL_SWITCHER
502
+static int bL_cpufreq_switcher_notifier(struct notifier_block *nfb,
503
+ unsigned long action, void *_arg)
504
+{
505
+ pr_debug("%s: action: %ld\n", __func__, action);
506
+
507
+ switch (action) {
508
+ case BL_NOTIFY_PRE_ENABLE:
509
+ case BL_NOTIFY_PRE_DISABLE:
510
+ cpufreq_unregister_driver(&ve_spc_cpufreq_driver);
511
+ break;
512
+
513
+ case BL_NOTIFY_POST_ENABLE:
514
+ set_switching_enabled(true);
515
+ cpufreq_register_driver(&ve_spc_cpufreq_driver);
516
+ break;
517
+
518
+ case BL_NOTIFY_POST_DISABLE:
519
+ set_switching_enabled(false);
520
+ cpufreq_register_driver(&ve_spc_cpufreq_driver);
521
+ break;
522
+
523
+ default:
524
+ return NOTIFY_DONE;
525
+ }
526
+
527
+ return NOTIFY_OK;
528
+}
529
+
530
+static struct notifier_block bL_switcher_notifier = {
531
+ .notifier_call = bL_cpufreq_switcher_notifier,
532
+};
533
+
534
+static int __bLs_register_notifier(void)
535
+{
536
+ return bL_switcher_register_notifier(&bL_switcher_notifier);
537
+}
538
+
539
+static int __bLs_unregister_notifier(void)
540
+{
541
+ return bL_switcher_unregister_notifier(&bL_switcher_notifier);
542
+}
543
+#else
544
+static int __bLs_register_notifier(void) { return 0; }
545
+static int __bLs_unregister_notifier(void) { return 0; }
546
+#endif
50547
51548 static int ve_spc_cpufreq_probe(struct platform_device *pdev)
52549 {
53
- return bL_cpufreq_register(&ve_spc_cpufreq_ops);
550
+ int ret, i;
551
+
552
+ set_switching_enabled(bL_switcher_get_enabled());
553
+
554
+ for (i = 0; i < MAX_CLUSTERS; i++)
555
+ mutex_init(&cluster_lock[i]);
556
+
557
+ ret = cpufreq_register_driver(&ve_spc_cpufreq_driver);
558
+ if (ret) {
559
+ pr_info("%s: Failed registering platform driver: %s, err: %d\n",
560
+ __func__, ve_spc_cpufreq_driver.name, ret);
561
+ } else {
562
+ ret = __bLs_register_notifier();
563
+ if (ret)
564
+ cpufreq_unregister_driver(&ve_spc_cpufreq_driver);
565
+ else
566
+ pr_info("%s: Registered platform driver: %s\n",
567
+ __func__, ve_spc_cpufreq_driver.name);
568
+ }
569
+
570
+ bL_switcher_put_enabled();
571
+ return ret;
54572 }
55573
56574 static int ve_spc_cpufreq_remove(struct platform_device *pdev)
57575 {
58
- bL_cpufreq_unregister(&ve_spc_cpufreq_ops);
576
+ bL_switcher_get_enabled();
577
+ __bLs_unregister_notifier();
578
+ cpufreq_unregister_driver(&ve_spc_cpufreq_driver);
579
+ bL_switcher_put_enabled();
580
+ pr_info("%s: Un-registered platform driver: %s\n", __func__,
581
+ ve_spc_cpufreq_driver.name);
59582 return 0;
60583 }
61584
....@@ -68,4 +591,8 @@
68591 };
69592 module_platform_driver(ve_spc_cpufreq_platdrv);
70593
71
-MODULE_LICENSE("GPL");
594
+MODULE_ALIAS("platform:vexpress-spc-cpufreq");
595
+MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.org>");
596
+MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
597
+MODULE_DESCRIPTION("Vexpress SPC ARM big LITTLE cpufreq driver");
598
+MODULE_LICENSE("GPL v2");