hc
2023-12-11 1f93a7dfd1f8d5ff7a5c53246c7534fe2332d6f4
kernel/drivers/clk/clk.c
....@@ -1,11 +1,7 @@
1
+// SPDX-License-Identifier: GPL-2.0
12 /*
23 * Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com>
34 * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org>
4
- * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
5
- *
6
- * This program is free software; you can redistribute it and/or modify
7
- * it under the terms of the GNU General Public License version 2 as
8
- * published by the Free Software Foundation.
95 *
106 * Standard functionality for the common clock API. See Documentation/driver-api/clk.rst
117 */
....@@ -25,10 +21,6 @@
2521 #include <linux/pm_runtime.h>
2622 #include <linux/sched.h>
2723 #include <linux/clkdev.h>
28
-#include <linux/uaccess.h>
29
-#include <linux/of_platform.h>
30
-#include <linux/pm_opp.h>
31
-#include <linux/regulator/consumer.h>
3224
3325 #include "clk.h"
3426
....@@ -51,17 +43,15 @@
5143 NULL,
5244 };
5345
54
-/*
55
- * clk_rate_change_list is used during clk_core_set_rate_nolock() calls to
56
- * handle vdd_class vote tracking. core->rate_change_node is added to
57
- * clk_rate_change_list when core->new_rate requires a different voltage level
58
- * (core->new_vdd_class_vote) than core->vdd_class_vote. Elements are removed
59
- * from the list after unvoting core->vdd_class_vote immediately before
60
- * returning from clk_core_set_rate_nolock().
61
- */
62
-static LIST_HEAD(clk_rate_change_list);
63
-
6446 /*** private data structures ***/
47
+
48
+struct clk_parent_map {
49
+ const struct clk_hw *hw;
50
+ struct clk_core *core;
51
+ const char *fw_name;
52
+ const char *name;
53
+ int index;
54
+};
6555
6656 struct clk_core {
6757 const char *name;
....@@ -69,11 +59,11 @@
6959 struct clk_hw *hw;
7060 struct module *owner;
7161 struct device *dev;
62
+ struct device_node *of_node;
7263 struct clk_core *parent;
73
- const char **parent_names;
74
- struct clk_core **parents;
75
- unsigned int num_parents;
76
- unsigned int new_parent_index;
64
+ struct clk_parent_map *parents;
65
+ u8 num_parents;
66
+ u8 new_parent_index;
7767 unsigned long rate;
7868 unsigned long req_rate;
7969 unsigned long new_rate;
....@@ -87,8 +77,6 @@
8777 unsigned int enable_count;
8878 unsigned int prepare_count;
8979 unsigned int protect_count;
90
- bool need_handoff_enable;
91
- bool need_handoff_prepare;
9280 unsigned long min_rate;
9381 unsigned long max_rate;
9482 unsigned long accuracy;
....@@ -98,17 +86,11 @@
9886 struct hlist_node child_node;
9987 struct hlist_head clks;
10088 unsigned int notifier_count;
101
-#ifdef CONFIG_COMMON_CLK_DEBUGFS
89
+#ifdef CONFIG_DEBUG_FS
10290 struct dentry *dentry;
10391 struct hlist_node debug_node;
10492 #endif
10593 struct kref ref;
106
- struct clk_vdd_class *vdd_class;
107
- int vdd_class_vote;
108
- int new_vdd_class_vote;
109
- struct list_head rate_change_node;
110
- unsigned long *rate_max;
111
- int num_rate_max;
11294 };
11395
11496 #define CREATE_TRACE_POINTS
....@@ -116,6 +98,7 @@
11698
11799 struct clk {
118100 struct clk_core *core;
101
+ struct device *dev;
119102 const char *dev_id;
120103 const char *con_id;
121104 unsigned long min_rate;
....@@ -353,17 +336,124 @@
353336 return NULL;
354337 }
355338
339
+#ifdef CONFIG_OF
340
+static int of_parse_clkspec(const struct device_node *np, int index,
341
+ const char *name, struct of_phandle_args *out_args);
342
+static struct clk_hw *
343
+of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec);
344
+#else
345
+static inline int of_parse_clkspec(const struct device_node *np, int index,
346
+ const char *name,
347
+ struct of_phandle_args *out_args)
348
+{
349
+ return -ENOENT;
350
+}
351
+static inline struct clk_hw *
352
+of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec)
353
+{
354
+ return ERR_PTR(-ENOENT);
355
+}
356
+#endif
357
+
358
+/**
359
+ * clk_core_get - Find the clk_core parent of a clk
360
+ * @core: clk to find parent of
361
+ * @p_index: parent index to search for
362
+ *
363
+ * This is the preferred method for clk providers to find the parent of a
364
+ * clk when that parent is external to the clk controller. The parent_names
365
+ * array is indexed and treated as a local name matching a string in the device
366
+ * node's 'clock-names' property or as the 'con_id' matching the device's
367
+ * dev_name() in a clk_lookup. This allows clk providers to use their own
368
+ * namespace instead of looking for a globally unique parent string.
369
+ *
370
+ * For example the following DT snippet would allow a clock registered by the
371
+ * clock-controller@c001 that has a clk_init_data::parent_data array
372
+ * with 'xtal' in the 'name' member to find the clock provided by the
373
+ * clock-controller@f00abcd without needing to get the globally unique name of
374
+ * the xtal clk.
375
+ *
376
+ * parent: clock-controller@f00abcd {
377
+ * reg = <0xf00abcd 0xabcd>;
378
+ * #clock-cells = <0>;
379
+ * };
380
+ *
381
+ * clock-controller@c001 {
382
+ * reg = <0xc001 0xf00d>;
383
+ * clocks = <&parent>;
384
+ * clock-names = "xtal";
385
+ * #clock-cells = <1>;
386
+ * };
387
+ *
388
+ * Returns: -ENOENT when the provider can't be found or the clk doesn't
389
+ * exist in the provider or the name can't be found in the DT node or
390
+ * in a clkdev lookup. NULL when the provider knows about the clk but it
391
+ * isn't provided on this system.
392
+ * A valid clk_core pointer when the clk can be found in the provider.
393
+ */
394
+static struct clk_core *clk_core_get(struct clk_core *core, u8 p_index)
395
+{
396
+ const char *name = core->parents[p_index].fw_name;
397
+ int index = core->parents[p_index].index;
398
+ struct clk_hw *hw = ERR_PTR(-ENOENT);
399
+ struct device *dev = core->dev;
400
+ const char *dev_id = dev ? dev_name(dev) : NULL;
401
+ struct device_node *np = core->of_node;
402
+ struct of_phandle_args clkspec;
403
+
404
+ if (np && (name || index >= 0) &&
405
+ !of_parse_clkspec(np, index, name, &clkspec)) {
406
+ hw = of_clk_get_hw_from_clkspec(&clkspec);
407
+ of_node_put(clkspec.np);
408
+ } else if (name) {
409
+ /*
410
+ * If the DT search above couldn't find the provider fallback to
411
+ * looking up via clkdev based clk_lookups.
412
+ */
413
+ hw = clk_find_hw(dev_id, name);
414
+ }
415
+
416
+ if (IS_ERR(hw))
417
+ return ERR_CAST(hw);
418
+
419
+ return hw->core;
420
+}
421
+
422
+static void clk_core_fill_parent_index(struct clk_core *core, u8 index)
423
+{
424
+ struct clk_parent_map *entry = &core->parents[index];
425
+ struct clk_core *parent = ERR_PTR(-ENOENT);
426
+
427
+ if (entry->hw) {
428
+ parent = entry->hw->core;
429
+ /*
430
+ * We have a direct reference but it isn't registered yet?
431
+ * Orphan it and let clk_reparent() update the orphan status
432
+ * when the parent is registered.
433
+ */
434
+ if (!parent)
435
+ parent = ERR_PTR(-EPROBE_DEFER);
436
+ } else {
437
+ parent = clk_core_get(core, index);
438
+ if (PTR_ERR(parent) == -ENOENT && entry->name)
439
+ parent = clk_core_lookup(entry->name);
440
+ }
441
+
442
+ /* Only cache it if it's not an error */
443
+ if (!IS_ERR(parent))
444
+ entry->core = parent;
445
+}
446
+
356447 static struct clk_core *clk_core_get_parent_by_index(struct clk_core *core,
357448 u8 index)
358449 {
359
- if (!core || index >= core->num_parents)
450
+ if (!core || index >= core->num_parents || !core->parents)
360451 return NULL;
361452
362
- if (!core->parents[index])
363
- core->parents[index] =
364
- clk_core_lookup(core->parent_names[index]);
453
+ if (!core->parents[index].core)
454
+ clk_core_fill_parent_index(core, index);
365455
366
- return core->parents[index];
456
+ return core->parents[index].core;
367457 }
368458
369459 struct clk_hw *
....@@ -384,23 +474,18 @@
384474
385475 static unsigned long clk_core_get_rate_nolock(struct clk_core *core)
386476 {
387
- unsigned long ret;
477
+ if (!core)
478
+ return 0;
388479
389
- if (!core) {
390
- ret = 0;
391
- goto out;
392
- }
480
+ if (!core->num_parents || core->parent)
481
+ return core->rate;
393482
394
- ret = core->rate;
395
-
396
- if (!core->num_parents)
397
- goto out;
398
-
399
- if (!core->parent)
400
- ret = 0;
401
-
402
-out:
403
- return ret;
483
+ /*
484
+ * Clk must have a parent because num_parents > 0 but the parent isn't
485
+ * known yet. Best to return 0 as the rate of this clk until we can
486
+ * properly recalc the rate based on the parent's rate.
487
+ */
488
+ return 0;
404489 }
405490
406491 unsigned long clk_hw_get_rate(const struct clk_hw *hw)
....@@ -409,19 +494,13 @@
409494 }
410495 EXPORT_SYMBOL_GPL(clk_hw_get_rate);
411496
412
-static unsigned long __clk_get_accuracy(struct clk_core *core)
497
+static unsigned long clk_core_get_accuracy_no_lock(struct clk_core *core)
413498 {
414499 if (!core)
415500 return 0;
416501
417502 return core->accuracy;
418503 }
419
-
420
-unsigned long __clk_get_flags(struct clk *clk)
421
-{
422
- return !clk ? 0 : clk->core->flags;
423
-}
424
-EXPORT_SYMBOL_GPL(__clk_get_flags);
425504
426505 unsigned long clk_hw_get_flags(const struct clk_hw *hw)
427506 {
....@@ -439,6 +518,7 @@
439518 {
440519 return clk_core_rate_is_protected(hw->core);
441520 }
521
+EXPORT_SYMBOL_GPL(clk_hw_rate_is_protected);
442522
443523 bool clk_hw_is_enabled(const struct clk_hw *hw)
444524 {
....@@ -541,6 +621,8 @@
541621 {
542622 struct clk *clk_user;
543623
624
+ lockdep_assert_held(&prepare_lock);
625
+
544626 *min_rate = core->min_rate;
545627 *max_rate = core->max_rate;
546628
....@@ -549,6 +631,24 @@
549631
550632 hlist_for_each_entry(clk_user, &core->clks, clks_node)
551633 *max_rate = min(*max_rate, clk_user->max_rate);
634
+}
635
+
636
+static bool clk_core_check_boundaries(struct clk_core *core,
637
+ unsigned long min_rate,
638
+ unsigned long max_rate)
639
+{
640
+ struct clk *user;
641
+
642
+ lockdep_assert_held(&prepare_lock);
643
+
644
+ if (min_rate > core->max_rate || max_rate < core->min_rate)
645
+ return false;
646
+
647
+ hlist_for_each_entry(user, &core->clks, clks_node)
648
+ if (min_rate > user->max_rate || max_rate < user->min_rate)
649
+ return false;
650
+
651
+ return true;
552652 }
553653
554654 void clk_hw_set_rate_range(struct clk_hw *hw, unsigned long min_rate,
....@@ -560,29 +660,15 @@
560660 EXPORT_SYMBOL_GPL(clk_hw_set_rate_range);
561661
562662 /*
563
- * Aggregate the rate of all the enabled child nodes and exclude that
564
- * of the child node for which this request was made.
565
- */
566
-unsigned long clk_aggregate_rate(struct clk_hw *hw,
567
- const struct clk_core *parent)
568
-{
569
- struct clk_core *child;
570
- unsigned long aggre_rate = 0;
571
-
572
- hlist_for_each_entry(child, &parent->children, child_node) {
573
- if (child->enable_count &&
574
- strcmp(child->name, hw->init->name))
575
- aggre_rate = max(child->rate, aggre_rate);
576
- }
577
-
578
- return aggre_rate;
579
-}
580
-EXPORT_SYMBOL_GPL(clk_aggregate_rate);
581
-
582
-/*
663
+ * __clk_mux_determine_rate - clk_ops::determine_rate implementation for a mux type clk
664
+ * @hw: mux type clk to determine rate on
665
+ * @req: rate request, also used to return preferred parent and frequencies
666
+ *
583667 * Helper for finding best parent to provide a given frequency. This can be used
584668 * directly as a determine_rate callback (e.g. for a mux), or from a more
585669 * complex clock that may combine a mux with other operations.
670
+ *
671
+ * Returns: 0 on success, -EERROR value on error
586672 */
587673 int __clk_mux_determine_rate(struct clk_hw *hw,
588674 struct clk_rate_request *req)
....@@ -597,201 +683,6 @@
597683 return clk_mux_determine_rate_flags(hw, req, CLK_MUX_ROUND_CLOSEST);
598684 }
599685 EXPORT_SYMBOL_GPL(__clk_mux_determine_rate_closest);
600
-
601
-/*
602
- * Find the voltage level required for a given clock rate.
603
- */
604
-static int clk_find_vdd_level(struct clk_core *clk, unsigned long rate)
605
-{
606
- int level;
607
-
608
- /*
609
- * For certain PLLs, due to the limitation in the bits allocated for
610
- * programming the fractional divider, the actual rate of the PLL will
611
- * be slightly higher than the requested rate (in the order of several
612
- * Hz). To accommodate this difference, convert the FMAX rate and the
613
- * clock frequency to KHz and use that for deriving the voltage level.
614
- */
615
- for (level = 0; level < clk->num_rate_max; level++)
616
- if (DIV_ROUND_CLOSEST(rate, 1000) <=
617
- DIV_ROUND_CLOSEST(clk->rate_max[level], 1000) &&
618
- clk->rate_max[level] > 0)
619
- break;
620
-
621
- if (level == clk->num_rate_max) {
622
- pr_err("Rate %lu for %s is greater than highest Fmax\n", rate,
623
- clk->name);
624
- return -EINVAL;
625
- }
626
-
627
- return level;
628
-}
629
-
630
-/*
631
- * Update voltage level given the current votes.
632
- */
633
-static int clk_update_vdd(struct clk_vdd_class *vdd_class)
634
-{
635
- int level, rc = 0, i, ignore;
636
- struct regulator **r = vdd_class->regulator;
637
- int *uv = vdd_class->vdd_uv;
638
- int n_reg = vdd_class->num_regulators;
639
- int cur_lvl = vdd_class->cur_level;
640
- int max_lvl = vdd_class->num_levels - 1;
641
- int cur_base = cur_lvl * n_reg;
642
- int new_base;
643
-
644
- /* aggregate votes */
645
- for (level = max_lvl; level > 0; level--)
646
- if (vdd_class->level_votes[level])
647
- break;
648
-
649
- if (level == cur_lvl)
650
- return 0;
651
-
652
- max_lvl = max_lvl * n_reg;
653
- new_base = level * n_reg;
654
-
655
- for (i = 0; i < vdd_class->num_regulators; i++) {
656
- pr_debug("Set Voltage level Min %d, Max %d\n", uv[new_base + i],
657
- uv[max_lvl + i]);
658
- rc = regulator_set_voltage(r[i], uv[new_base + i], INT_MAX);
659
- if (rc)
660
- goto set_voltage_fail;
661
-
662
- if (cur_lvl == 0 || cur_lvl == vdd_class->num_levels)
663
- rc = regulator_enable(r[i]);
664
- else if (level == 0)
665
- rc = regulator_disable(r[i]);
666
- if (rc)
667
- goto enable_disable_fail;
668
- }
669
-
670
- if (vdd_class->set_vdd && !vdd_class->num_regulators)
671
- rc = vdd_class->set_vdd(vdd_class, level);
672
-
673
- if (!rc)
674
- vdd_class->cur_level = level;
675
-
676
- return rc;
677
-
678
-enable_disable_fail:
679
- regulator_set_voltage(r[i], uv[cur_base + i], INT_MAX);
680
-
681
-set_voltage_fail:
682
- for (i--; i >= 0; i--) {
683
- regulator_set_voltage(r[i], uv[cur_base + i], INT_MAX);
684
- if (cur_lvl == 0 || cur_lvl == vdd_class->num_levels)
685
- regulator_disable(r[i]);
686
- else if (level == 0)
687
- ignore = regulator_enable(r[i]);
688
- }
689
-
690
- return rc;
691
-}
692
-
693
-/*
694
- * Vote for a voltage level.
695
- */
696
-int clk_vote_vdd_level(struct clk_vdd_class *vdd_class, int level)
697
-{
698
- int rc = 0;
699
-
700
- if (level >= vdd_class->num_levels)
701
- return -EINVAL;
702
-
703
- mutex_lock(&vdd_class->lock);
704
-
705
- vdd_class->level_votes[level]++;
706
-
707
- rc = clk_update_vdd(vdd_class);
708
- if (rc)
709
- vdd_class->level_votes[level]--;
710
-
711
- mutex_unlock(&vdd_class->lock);
712
-
713
- return rc;
714
-}
715
-EXPORT_SYMBOL_GPL(clk_vote_vdd_level);
716
-
717
-/*
718
- * Remove vote for a voltage level.
719
- */
720
-int clk_unvote_vdd_level(struct clk_vdd_class *vdd_class, int level)
721
-{
722
- int rc = 0;
723
-
724
- if (level >= vdd_class->num_levels)
725
- return -EINVAL;
726
-
727
- mutex_lock(&vdd_class->lock);
728
-
729
- if (WARN(!vdd_class->level_votes[level],
730
- "Reference counts are incorrect for %s level %d\n",
731
- vdd_class->class_name, level)) {
732
- rc = -EINVAL;
733
- goto out;
734
- }
735
-
736
- vdd_class->level_votes[level]--;
737
-
738
- rc = clk_update_vdd(vdd_class);
739
- if (rc)
740
- vdd_class->level_votes[level]++;
741
-
742
-out:
743
- mutex_unlock(&vdd_class->lock);
744
- return rc;
745
-}
746
-EXPORT_SYMBOL_GPL(clk_unvote_vdd_level);
747
-
748
-/*
749
- * Vote for a voltage level corresponding to a clock's rate.
750
- */
751
-int clk_vote_rate_vdd(struct clk_core *core, unsigned long rate)
752
-{
753
- int level;
754
-
755
- if (!core->vdd_class)
756
- return 0;
757
-
758
- level = clk_find_vdd_level(core, rate);
759
- if (level < 0)
760
- return level;
761
-
762
- return clk_vote_vdd_level(core->vdd_class, level);
763
-}
764
-EXPORT_SYMBOL_GPL(clk_vote_rate_vdd);
765
-
766
-/*
767
- * Remove vote for a voltage level corresponding to a clock's rate.
768
- */
769
-void clk_unvote_rate_vdd(struct clk_core *core, unsigned long rate)
770
-{
771
- int level;
772
-
773
- if (!core->vdd_class)
774
- return;
775
-
776
- level = clk_find_vdd_level(core, rate);
777
- if (level < 0)
778
- return;
779
-
780
- clk_unvote_vdd_level(core->vdd_class, level);
781
-}
782
-EXPORT_SYMBOL_GPL(clk_unvote_rate_vdd);
783
-
784
-static bool clk_is_rate_level_valid(struct clk_core *core, unsigned long rate)
785
-{
786
- int level;
787
-
788
- if (!core->vdd_class)
789
- return true;
790
-
791
- level = clk_find_vdd_level(core, rate);
792
-
793
- return level >= 0;
794
-}
795686
796687 /*** clk api ***/
797688
....@@ -901,7 +792,7 @@
901792 * clk_rate_exclusive_get - get exclusivity over the clk rate control
902793 * @clk: the clk over which the exclusity of rate control is requested
903794 *
904
- * clk_rate_exlusive_get() begins a critical section during which a clock
795
+ * clk_rate_exclusive_get() begins a critical section during which a clock
905796 * consumer cannot tolerate any other consumer making any operation on the
906797 * clock which could result in a rate change or rate glitch. Exclusive clocks
907798 * cannot have their rate changed, either directly or indirectly due to changes
....@@ -957,17 +848,9 @@
957848 if (core->ops->unprepare)
958849 core->ops->unprepare(core->hw);
959850
960
- clk_pm_runtime_put(core);
961
-
962851 trace_clk_unprepare_complete(core);
963
-
964
- if (core->vdd_class) {
965
- clk_unvote_vdd_level(core->vdd_class, core->vdd_class_vote);
966
- core->vdd_class_vote = 0;
967
- core->new_vdd_class_vote = 0;
968
- }
969
-
970852 clk_core_unprepare(core->parent);
853
+ clk_pm_runtime_put(core);
971854 }
972855
973856 static void clk_core_unprepare_lock(struct clk_core *core)
....@@ -1017,28 +900,13 @@
1017900
1018901 trace_clk_prepare(core);
1019902
1020
- ret = clk_vote_rate_vdd(core, core->rate);
1021
- if (ret) {
1022
- clk_core_unprepare(core->parent);
1023
- return ret;
1024
- }
1025
- if (core->vdd_class) {
1026
- core->vdd_class_vote
1027
- = clk_find_vdd_level(core, core->rate);
1028
- core->new_vdd_class_vote = core->vdd_class_vote;
1029
- }
1030
-
1031903 if (core->ops->prepare)
1032904 ret = core->ops->prepare(core->hw);
1033905
1034906 trace_clk_prepare_complete(core);
1035907
1036
- if (ret) {
1037
- clk_unvote_rate_vdd(core, core->rate);
1038
- core->vdd_class_vote = 0;
1039
- core->new_vdd_class_vote = 0;
908
+ if (ret)
1040909 goto unprepare;
1041
- }
1042910 }
1043911
1044912 core->prepare_count++;
....@@ -1199,6 +1067,101 @@
11991067 }
12001068
12011069 /**
1070
+ * clk_gate_restore_context - restore context for poweroff
1071
+ * @hw: the clk_hw pointer of clock whose state is to be restored
1072
+ *
1073
+ * The clock gate restore context function enables or disables
1074
+ * the gate clocks based on the enable_count. This is done in cases
1075
+ * where the clock context is lost and based on the enable_count
1076
+ * the clock either needs to be enabled/disabled. This
1077
+ * helps restore the state of gate clocks.
1078
+ */
1079
+void clk_gate_restore_context(struct clk_hw *hw)
1080
+{
1081
+ struct clk_core *core = hw->core;
1082
+
1083
+ if (core->enable_count)
1084
+ core->ops->enable(hw);
1085
+ else
1086
+ core->ops->disable(hw);
1087
+}
1088
+EXPORT_SYMBOL_GPL(clk_gate_restore_context);
1089
+
1090
+static int clk_core_save_context(struct clk_core *core)
1091
+{
1092
+ struct clk_core *child;
1093
+ int ret = 0;
1094
+
1095
+ hlist_for_each_entry(child, &core->children, child_node) {
1096
+ ret = clk_core_save_context(child);
1097
+ if (ret < 0)
1098
+ return ret;
1099
+ }
1100
+
1101
+ if (core->ops && core->ops->save_context)
1102
+ ret = core->ops->save_context(core->hw);
1103
+
1104
+ return ret;
1105
+}
1106
+
1107
+static void clk_core_restore_context(struct clk_core *core)
1108
+{
1109
+ struct clk_core *child;
1110
+
1111
+ if (core->ops && core->ops->restore_context)
1112
+ core->ops->restore_context(core->hw);
1113
+
1114
+ hlist_for_each_entry(child, &core->children, child_node)
1115
+ clk_core_restore_context(child);
1116
+}
1117
+
1118
+/**
1119
+ * clk_save_context - save clock context for poweroff
1120
+ *
1121
+ * Saves the context of the clock register for powerstates in which the
1122
+ * contents of the registers will be lost. Occurs deep within the suspend
1123
+ * code. Returns 0 on success.
1124
+ */
1125
+int clk_save_context(void)
1126
+{
1127
+ struct clk_core *clk;
1128
+ int ret;
1129
+
1130
+ hlist_for_each_entry(clk, &clk_root_list, child_node) {
1131
+ ret = clk_core_save_context(clk);
1132
+ if (ret < 0)
1133
+ return ret;
1134
+ }
1135
+
1136
+ hlist_for_each_entry(clk, &clk_orphan_list, child_node) {
1137
+ ret = clk_core_save_context(clk);
1138
+ if (ret < 0)
1139
+ return ret;
1140
+ }
1141
+
1142
+ return 0;
1143
+}
1144
+EXPORT_SYMBOL_GPL(clk_save_context);
1145
+
1146
+/**
1147
+ * clk_restore_context - restore clock context after poweroff
1148
+ *
1149
+ * Restore the saved clock context upon resume.
1150
+ *
1151
+ */
1152
+void clk_restore_context(void)
1153
+{
1154
+ struct clk_core *core;
1155
+
1156
+ hlist_for_each_entry(core, &clk_root_list, child_node)
1157
+ clk_core_restore_context(core);
1158
+
1159
+ hlist_for_each_entry(core, &clk_orphan_list, child_node)
1160
+ clk_core_restore_context(core);
1161
+}
1162
+EXPORT_SYMBOL_GPL(clk_restore_context);
1163
+
1164
+/**
12021165 * clk_enable - ungate a clock
12031166 * @clk: the clk being ungated
12041167 *
....@@ -1241,7 +1204,7 @@
12411204 clk_core_unprepare_lock(core);
12421205 }
12431206
1244
-static void clk_unprepare_unused_subtree(struct clk_core *core)
1207
+static void __init clk_unprepare_unused_subtree(struct clk_core *core)
12451208 {
12461209 struct clk_core *child;
12471210
....@@ -1253,19 +1216,6 @@
12531216 if (dev_has_sync_state(core->dev) &&
12541217 !(core->flags & CLK_DONT_HOLD_STATE))
12551218 return;
1256
-
1257
- /*
1258
- * setting CLK_ENABLE_HAND_OFF flag triggers this conditional
1259
- *
1260
- * need_handoff_prepare implies this clk was already prepared by
1261
- * __clk_init. now we have a proper user, so unset the flag in our
1262
- * internal bookkeeping. See CLK_ENABLE_HAND_OFF flag in clk-provider.h
1263
- * for details.
1264
- */
1265
- if (core->need_handoff_prepare) {
1266
- core->need_handoff_prepare = false;
1267
- clk_core_unprepare(core);
1268
- }
12691219
12701220 if (core->prepare_count)
12711221 return;
....@@ -1288,7 +1238,7 @@
12881238 clk_pm_runtime_put(core);
12891239 }
12901240
1291
-static void clk_disable_unused_subtree(struct clk_core *core)
1241
+static void __init clk_disable_unused_subtree(struct clk_core *core)
12921242 {
12931243 struct clk_core *child;
12941244 unsigned long flags;
....@@ -1301,21 +1251,6 @@
13011251 if (dev_has_sync_state(core->dev) &&
13021252 !(core->flags & CLK_DONT_HOLD_STATE))
13031253 return;
1304
-
1305
- /*
1306
- * setting CLK_ENABLE_HAND_OFF flag triggers this conditional
1307
- *
1308
- * need_handoff_enable implies this clk was already enabled by
1309
- * __clk_init. now we have a proper user, so unset the flag in our
1310
- * internal bookkeeping. See CLK_ENABLE_HAND_OFF flag in clk-provider.h
1311
- * for details.
1312
- */
1313
- if (core->need_handoff_enable) {
1314
- core->need_handoff_enable = false;
1315
- flags = clk_enable_lock();
1316
- clk_core_disable(core);
1317
- clk_enable_unlock(flags);
1318
- }
13191254
13201255 if (core->flags & CLK_OPS_PARENT_ENABLE)
13211256 clk_core_prepare_enable(core->parent);
....@@ -1353,7 +1288,7 @@
13531288 clk_core_disable_unprepare(core->parent);
13541289 }
13551290
1356
-static bool clk_ignore_unused;
1291
+static bool clk_ignore_unused __initdata;
13571292 static int __init clk_ignore_unused_setup(char *__unused)
13581293 {
13591294 clk_ignore_unused = true;
....@@ -1361,7 +1296,7 @@
13611296 }
13621297 __setup("clk_ignore_unused", clk_ignore_unused_setup);
13631298
1364
-static int clk_disable_unused(void)
1299
+static int __init clk_disable_unused(void)
13651300 {
13661301 struct clk_core *core;
13671302
....@@ -1476,10 +1411,7 @@
14761411
14771412 static bool clk_core_can_round(struct clk_core * const core)
14781413 {
1479
- if (core->ops->determine_rate || core->ops->round_rate)
1480
- return true;
1481
-
1482
- return false;
1414
+ return core->ops->determine_rate || core->ops->round_rate;
14831415 }
14841416
14851417 static int clk_core_round_rate_nolock(struct clk_core *core,
....@@ -1521,6 +1453,21 @@
15211453 }
15221454 EXPORT_SYMBOL_GPL(__clk_determine_rate);
15231455
1456
+/**
1457
+ * clk_hw_round_rate() - round the given rate for a hw clk
1458
+ * @hw: the hw clk for which we are rounding a rate
1459
+ * @rate: the rate which is to be rounded
1460
+ *
1461
+ * Takes in a rate as input and rounds it to a rate that the clk can actually
1462
+ * use.
1463
+ *
1464
+ * Context: prepare_lock must be held.
1465
+ * For clk providers to call from within clk_ops such as .round_rate,
1466
+ * .determine_rate.
1467
+ *
1468
+ * Return: returns rounded rate of hw clk if clk supports round_rate operation
1469
+ * else returns the parent rate.
1470
+ */
15241471 unsigned long clk_hw_round_rate(struct clk_hw *hw, unsigned long rate)
15251472 {
15261473 int ret;
....@@ -1642,18 +1589,12 @@
16421589 __clk_recalc_accuracies(child);
16431590 }
16441591
1645
-static long clk_core_get_accuracy(struct clk_core *core)
1592
+static long clk_core_get_accuracy_recalc(struct clk_core *core)
16461593 {
1647
- unsigned long accuracy;
1648
-
1649
- clk_prepare_lock();
16501594 if (core && (core->flags & CLK_GET_ACCURACY_NOCACHE))
16511595 __clk_recalc_accuracies(core);
16521596
1653
- accuracy = __clk_get_accuracy(core);
1654
- clk_prepare_unlock();
1655
-
1656
- return accuracy;
1597
+ return clk_core_get_accuracy_no_lock(core);
16571598 }
16581599
16591600 /**
....@@ -1667,10 +1608,16 @@
16671608 */
16681609 long clk_get_accuracy(struct clk *clk)
16691610 {
1611
+ long accuracy;
1612
+
16701613 if (!clk)
16711614 return 0;
16721615
1673
- return clk_core_get_accuracy(clk->core);
1616
+ clk_prepare_lock();
1617
+ accuracy = clk_core_get_accuracy_recalc(clk->core);
1618
+ clk_prepare_unlock();
1619
+
1620
+ return accuracy;
16741621 }
16751622 EXPORT_SYMBOL_GPL(clk_get_accuracy);
16761623
....@@ -1724,19 +1671,12 @@
17241671 __clk_recalc_rates(child, msg);
17251672 }
17261673
1727
-static unsigned long clk_core_get_rate(struct clk_core *core)
1674
+static unsigned long clk_core_get_rate_recalc(struct clk_core *core)
17281675 {
1729
- unsigned long rate;
1730
-
1731
- clk_prepare_lock();
1732
-
17331676 if (core && (core->flags & CLK_GET_RATE_NOCACHE))
17341677 __clk_recalc_rates(core, 0);
17351678
1736
- rate = clk_core_get_rate_nolock(core);
1737
- clk_prepare_unlock();
1738
-
1739
- return rate;
1679
+ return clk_core_get_rate_nolock(core);
17401680 }
17411681
17421682 /**
....@@ -1749,10 +1689,16 @@
17491689 */
17501690 unsigned long clk_get_rate(struct clk *clk)
17511691 {
1692
+ unsigned long rate;
1693
+
17521694 if (!clk)
17531695 return 0;
17541696
1755
- return clk_core_get_rate(clk->core);
1697
+ clk_prepare_lock();
1698
+ rate = clk_core_get_rate_recalc(clk->core);
1699
+ clk_prepare_unlock();
1700
+
1701
+ return rate;
17561702 }
17571703 EXPORT_SYMBOL_GPL(clk_get_rate);
17581704
....@@ -1764,12 +1710,58 @@
17641710 if (!parent)
17651711 return -EINVAL;
17661712
1767
- for (i = 0; i < core->num_parents; i++)
1768
- if (clk_core_get_parent_by_index(core, i) == parent)
1713
+ for (i = 0; i < core->num_parents; i++) {
1714
+ /* Found it first try! */
1715
+ if (core->parents[i].core == parent)
17691716 return i;
17701717
1771
- return -EINVAL;
1718
+ /* Something else is here, so keep looking */
1719
+ if (core->parents[i].core)
1720
+ continue;
1721
+
1722
+ /* Maybe core hasn't been cached but the hw is all we know? */
1723
+ if (core->parents[i].hw) {
1724
+ if (core->parents[i].hw == parent->hw)
1725
+ break;
1726
+
1727
+ /* Didn't match, but we're expecting a clk_hw */
1728
+ continue;
1729
+ }
1730
+
1731
+ /* Maybe it hasn't been cached (clk_set_parent() path) */
1732
+ if (parent == clk_core_get(core, i))
1733
+ break;
1734
+
1735
+ /* Fallback to comparing globally unique names */
1736
+ if (core->parents[i].name &&
1737
+ !strcmp(parent->name, core->parents[i].name))
1738
+ break;
1739
+ }
1740
+
1741
+ if (i == core->num_parents)
1742
+ return -EINVAL;
1743
+
1744
+ core->parents[i].core = parent;
1745
+ return i;
17721746 }
1747
+
1748
+/**
1749
+ * clk_hw_get_parent_index - return the index of the parent clock
1750
+ * @hw: clk_hw associated with the clk being consumed
1751
+ *
1752
+ * Fetches and returns the index of parent clock. Returns -EINVAL if the given
1753
+ * clock does not have a current parent.
1754
+ */
1755
+int clk_hw_get_parent_index(struct clk_hw *hw)
1756
+{
1757
+ struct clk_hw *parent = clk_hw_get_parent(hw);
1758
+
1759
+ if (WARN_ON(parent == NULL))
1760
+ return -EINVAL;
1761
+
1762
+ return clk_fetch_parent_index(hw->core, parent->core);
1763
+}
1764
+EXPORT_SYMBOL_GPL(clk_hw_get_parent_index);
17731765
17741766 static void clk_core_hold_state(struct clk_core *core)
17751767 {
....@@ -1979,59 +1971,12 @@
19791971 return ret;
19801972 }
19811973
1982
-/*
1983
- * Vote for the voltage level required for core->new_rate. Keep track of all
1984
- * clocks with a changed voltage level in clk_rate_change_list.
1985
- */
1986
-static int clk_vote_new_rate_vdd(struct clk_core *core)
1987
-{
1988
- int cur_level, next_level;
1989
- int ret;
1990
-
1991
- if (IS_ERR_OR_NULL(core) || !core->vdd_class)
1992
- return 0;
1993
-
1994
- if (!clk_core_is_prepared(core))
1995
- return 0;
1996
-
1997
- cur_level = core->new_vdd_class_vote;
1998
- next_level = clk_find_vdd_level(core, core->new_rate);
1999
- if (cur_level == next_level)
2000
- return 0;
2001
-
2002
- ret = clk_vote_vdd_level(core->vdd_class, next_level);
2003
- if (ret)
2004
- return ret;
2005
-
2006
- core->new_vdd_class_vote = next_level;
2007
-
2008
- if (list_empty(&core->rate_change_node)) {
2009
- list_add(&core->rate_change_node, &clk_rate_change_list);
2010
- } else {
2011
- /*
2012
- * A different new_rate has been determined for a clock that
2013
- * was already encountered in the clock tree traversal so the
2014
- * level that was previously voted for it should be removed.
2015
- */
2016
- ret = clk_unvote_vdd_level(core->vdd_class, cur_level);
2017
- if (ret)
2018
- return ret;
2019
- }
2020
-
2021
- return 0;
2022
-}
2023
-
2024
-static int clk_calc_subtree(struct clk_core *core, unsigned long new_rate,
1974
+static void clk_calc_subtree(struct clk_core *core, unsigned long new_rate,
20251975 struct clk_core *new_parent, u8 p_index)
20261976 {
20271977 struct clk_core *child;
2028
- int ret;
20291978
20301979 core->new_rate = new_rate;
2031
- ret = clk_vote_new_rate_vdd(core);
2032
- if (ret)
2033
- return ret;
2034
-
20351980 core->new_parent = new_parent;
20361981 core->new_parent_index = p_index;
20371982 /* include clk in new parent's PRE_RATE_CHANGE notifications */
....@@ -2041,12 +1986,8 @@
20411986
20421987 hlist_for_each_entry(child, &core->children, child_node) {
20431988 child->new_rate = clk_recalc(child, new_rate);
2044
- ret = clk_calc_subtree(child, child->new_rate, NULL, 0);
2045
- if (ret)
2046
- return ret;
1989
+ clk_calc_subtree(child, child->new_rate, NULL, 0);
20471990 }
2048
-
2049
- return 0;
20501991 }
20511992
20521993 /*
....@@ -2125,23 +2066,12 @@
21252066 }
21262067 }
21272068
2128
- /*
2129
- * Certain PLLs only have 16 bits to program the fractional divider.
2130
- * Hence the programmed rate might be slightly different than the
2131
- * requested one.
2132
- */
21332069 if ((core->flags & CLK_SET_RATE_PARENT) && parent &&
2134
- (DIV_ROUND_CLOSEST(best_parent_rate, 1000) !=
2135
- DIV_ROUND_CLOSEST(parent->rate, 1000)))
2070
+ best_parent_rate != parent->rate)
21362071 top = clk_calc_new_rates(parent, best_parent_rate);
21372072
21382073 out:
2139
- if (!clk_is_rate_level_valid(core, rate))
2140
- return NULL;
2141
-
2142
- ret = clk_calc_subtree(core, new_rate, parent, p_index);
2143
- if (ret)
2144
- return NULL;
2074
+ clk_calc_subtree(core, new_rate, parent, p_index);
21452075
21462076 return top;
21472077 }
....@@ -2166,6 +2096,13 @@
21662096 fail_clk = core;
21672097 }
21682098
2099
+ if (core->ops->pre_rate_change) {
2100
+ ret = core->ops->pre_rate_change(core->hw, core->rate,
2101
+ core->new_rate);
2102
+ if (ret)
2103
+ fail_clk = core;
2104
+ }
2105
+
21692106 hlist_for_each_entry(child, &core->children, child_node) {
21702107 /* Skip children who will be reparented to another clock */
21712108 if (child->new_parent && child->new_parent != core)
....@@ -2185,14 +2122,11 @@
21852122 return fail_clk;
21862123 }
21872124
2188
-static int clk_core_set_rate_nolock(struct clk_core *core,
2189
- unsigned long req_rate);
2190
-
21912125 /*
21922126 * walk down a subtree and set the new rates notifying the rate
21932127 * change on the way
21942128 */
2195
-static int clk_change_rate(struct clk_core *core)
2129
+static void clk_change_rate(struct clk_core *core)
21962130 {
21972131 struct clk_core *child;
21982132 struct hlist_node *tmp;
....@@ -2201,7 +2135,6 @@
22012135 bool skip_set_rate = false;
22022136 struct clk_core *old_parent;
22032137 struct clk_core *parent = NULL;
2204
- int rc = 0;
22052138
22062139 old_rate = core->rate;
22072140
....@@ -2213,9 +2146,8 @@
22132146 best_parent_rate = core->parent->rate;
22142147 }
22152148
2216
- rc = clk_pm_runtime_get(core);
2217
- if (rc)
2218
- return rc;
2149
+ if (clk_pm_runtime_get(core))
2150
+ return;
22192151
22202152 if (core->flags & CLK_SET_RATE_UNGATE) {
22212153 unsigned long flags;
....@@ -2225,8 +2157,6 @@
22252157 clk_core_enable(core);
22262158 clk_enable_unlock(flags);
22272159 }
2228
-
2229
- trace_clk_set_rate(core, core->new_rate);
22302160
22312161 if (core->new_parent && core->new_parent != core->parent) {
22322162 old_parent = __clk_set_parent_before(core, core->new_parent);
....@@ -2248,14 +2178,10 @@
22482178 if (core->flags & CLK_OPS_PARENT_ENABLE)
22492179 clk_core_prepare_enable(parent);
22502180
2251
- if (!skip_set_rate && core->ops->set_rate) {
2252
- rc = core->ops->set_rate(core->hw, core->new_rate,
2253
- best_parent_rate);
2254
- if (rc) {
2255
- trace_clk_set_rate_complete(core, core->new_rate);
2256
- goto err_set_rate;
2257
- }
2258
- }
2181
+ trace_clk_set_rate(core, core->new_rate);
2182
+
2183
+ if (!skip_set_rate && core->ops->set_rate)
2184
+ core->ops->set_rate(core->hw, core->new_rate, best_parent_rate);
22592185
22602186 trace_clk_set_rate_complete(core, core->new_rate);
22612187
....@@ -2279,6 +2205,9 @@
22792205 if (core->flags & CLK_RECALC_NEW_RATES)
22802206 (void)clk_calc_new_rates(core, core->new_rate);
22812207
2208
+ if (core->ops->post_rate_change)
2209
+ core->ops->post_rate_change(core->hw, old_rate, core->rate);
2210
+
22822211 /*
22832212 * Use safe iteration, as change_rate can actually swap parents
22842213 * for certain clock types.
....@@ -2287,24 +2216,14 @@
22872216 /* Skip children who will be reparented to another clock */
22882217 if (child->new_parent && child->new_parent != core)
22892218 continue;
2290
- rc = clk_change_rate(child);
2291
- if (rc)
2292
- goto err_set_rate;
2219
+ clk_change_rate(child);
22932220 }
22942221
22952222 /* handle the new child who might not be in core->children yet */
22962223 if (core->new_child)
2297
- rc = clk_change_rate(core->new_child);
2224
+ clk_change_rate(core->new_child);
22982225
2299
- /* handle a changed clock that needs to readjust its rate */
2300
- if (core->flags & CLK_KEEP_REQ_RATE && core->req_rate
2301
- && core->new_rate != old_rate
2302
- && core->new_rate != core->req_rate)
2303
- clk_core_set_rate_nolock(core, core->req_rate);
2304
-
2305
-err_set_rate:
23062226 clk_pm_runtime_put(core);
2307
- return rc;
23082227 }
23092228
23102229 static unsigned long clk_core_req_round_rate_nolock(struct clk_core *core,
....@@ -2334,85 +2253,12 @@
23342253 return ret ? 0 : req.rate;
23352254 }
23362255
2337
-/*
2338
- * Unvote for the voltage level required for each core->new_vdd_class_vote in
2339
- * clk_rate_change_list. This is used when undoing voltage requests after an
2340
- * error is encountered before any physical rate changing.
2341
- */
2342
-static void clk_unvote_new_rate_vdd(void)
2343
-{
2344
- struct clk_core *core;
2345
-
2346
- list_for_each_entry(core, &clk_rate_change_list, rate_change_node) {
2347
- clk_unvote_vdd_level(core->vdd_class, core->new_vdd_class_vote);
2348
- core->new_vdd_class_vote = core->vdd_class_vote;
2349
- }
2350
-}
2351
-
2352
-/*
2353
- * Unvote for the voltage level required for each core->vdd_class_vote in
2354
- * clk_rate_change_list.
2355
- */
2356
-static int clk_unvote_old_rate_vdd(void)
2357
-{
2358
- struct clk_core *core;
2359
- int ret;
2360
-
2361
- list_for_each_entry(core, &clk_rate_change_list, rate_change_node) {
2362
- ret = clk_unvote_vdd_level(core->vdd_class,
2363
- core->vdd_class_vote);
2364
- if (ret)
2365
- return ret;
2366
- }
2367
-
2368
- return 0;
2369
-}
2370
-
2371
-/*
2372
- * In the case that rate setting fails, apply the max voltage level needed
2373
- * by either the old or new rate for each changed clock.
2374
- */
2375
-static void clk_vote_safe_vdd(void)
2376
-{
2377
- struct clk_core *core;
2378
-
2379
- list_for_each_entry(core, &clk_rate_change_list, rate_change_node) {
2380
- if (core->vdd_class_vote > core->new_vdd_class_vote) {
2381
- clk_vote_vdd_level(core->vdd_class,
2382
- core->vdd_class_vote);
2383
- clk_unvote_vdd_level(core->vdd_class,
2384
- core->new_vdd_class_vote);
2385
- core->new_vdd_class_vote = core->vdd_class_vote;
2386
- }
2387
- }
2388
-}
2389
-
2390
-static void clk_cleanup_vdd_votes(void)
2391
-{
2392
- struct clk_core *core, *temp;
2393
-
2394
- list_for_each_entry_safe(core, temp, &clk_rate_change_list,
2395
- rate_change_node) {
2396
- core->vdd_class_vote = core->new_vdd_class_vote;
2397
- list_del_init(&core->rate_change_node);
2398
- }
2399
-}
2400
-
24012256 static int clk_core_set_rate_nolock(struct clk_core *core,
24022257 unsigned long req_rate)
24032258 {
24042259 struct clk_core *top, *fail_clk;
24052260 unsigned long rate;
24062261 int ret = 0;
2407
- /*
2408
- * The prepare lock ensures mutual exclusion with other tasks.
2409
- * set_rate_nesting_count is a static so that it can be incremented in
2410
- * the case of reentrancy caused by a set_rate() ops callback itself
2411
- * calling clk_set_rate(). That way, the voltage level votes for the
2412
- * old rates are safely removed when the original invocation of this
2413
- * function completes.
2414
- */
2415
- static unsigned int set_rate_nesting_count;
24162262
24172263 if (!core)
24182264 return 0;
....@@ -2427,63 +2273,31 @@
24272273 if (clk_core_rate_is_protected(core))
24282274 return -EBUSY;
24292275
2430
- set_rate_nesting_count++;
2431
-
24322276 /* calculate new rates and get the topmost changed clock */
24332277 top = clk_calc_new_rates(core, req_rate);
2434
- if (!top) {
2435
- ret = -EINVAL;
2436
- goto pre_rate_change_err;
2437
- }
2278
+ if (!top)
2279
+ return -EINVAL;
24382280
24392281 ret = clk_pm_runtime_get(core);
24402282 if (ret)
2441
- goto pre_rate_change_err;
2283
+ return ret;
24422284
24432285 /* notify that we are about to change rates */
24442286 fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE);
24452287 if (fail_clk) {
2446
- pr_debug("%s: failed to set %s clock to run at %lu\n", __func__,
2447
- fail_clk->name, req_rate);
2288
+ pr_debug("%s: failed to set %s rate\n", __func__,
2289
+ fail_clk->name);
24482290 clk_propagate_rate_change(top, ABORT_RATE_CHANGE);
24492291 ret = -EBUSY;
2450
- clk_pm_runtime_put(core);
2451
- goto pre_rate_change_err;
2292
+ goto err;
24522293 }
2453
-
2454
- core->req_rate = req_rate;
24552294
24562295 /* change the rates */
2457
- ret = clk_change_rate(top);
2458
- set_rate_nesting_count--;
2459
- if (ret) {
2460
- pr_err("%s: failed to set %s clock to run at %lu\n", __func__,
2461
- top->name, req_rate);
2462
- clk_propagate_rate_change(top, ABORT_RATE_CHANGE);
2463
- clk_vote_safe_vdd();
2464
- goto post_rate_change_err;
2465
- }
2296
+ clk_change_rate(top);
24662297
2467
-post_rate_change_err:
2468
- /*
2469
- * Only remove vdd_class level votes for old clock rates after all
2470
- * nested clk_set_rate() calls have completed.
2471
- */
2472
- if (set_rate_nesting_count == 0) {
2473
- ret |= clk_unvote_old_rate_vdd();
2474
- clk_cleanup_vdd_votes();
2475
- }
2476
-
2298
+ core->req_rate = req_rate;
2299
+err:
24772300 clk_pm_runtime_put(core);
2478
-
2479
- return ret;
2480
-
2481
-pre_rate_change_err:
2482
- set_rate_nesting_count--;
2483
- if (set_rate_nesting_count == 0) {
2484
- clk_unvote_new_rate_vdd();
2485
- clk_cleanup_vdd_votes();
2486
- }
24872301
24882302 return ret;
24892303 }
....@@ -2534,7 +2348,7 @@
25342348 EXPORT_SYMBOL_GPL(clk_set_rate);
25352349
25362350 /**
2537
- * clk_set_rate_exclusive - specify a new rate get exclusive control
2351
+ * clk_set_rate_exclusive - specify a new rate and get exclusive control
25382352 * @clk: the clk whose rate is being changed
25392353 * @rate: the new rate for clk
25402354 *
....@@ -2542,7 +2356,7 @@
25422356 * within a critical section
25432357 *
25442358 * This can be used initially to ensure that at least 1 consumer is
2545
- * statisfied when several consumers are competing for exclusivity over the
2359
+ * satisfied when several consumers are competing for exclusivity over the
25462360 * same clock provider.
25472361 *
25482362 * The exclusivity is not applied if setting the rate failed.
....@@ -2614,6 +2428,11 @@
26142428 clk->min_rate = min;
26152429 clk->max_rate = max;
26162430
2431
+ if (!clk_core_check_boundaries(clk->core, min, max)) {
2432
+ ret = -EINVAL;
2433
+ goto out;
2434
+ }
2435
+
26172436 rate = clk_core_get_rate_nolock(clk->core);
26182437 if (rate < min || rate > max) {
26192438 /*
....@@ -2642,6 +2461,7 @@
26422461 }
26432462 }
26442463
2464
+out:
26452465 if (clk->exclusive_count)
26462466 clk_core_rate_protect(clk->core);
26472467
....@@ -2744,6 +2564,7 @@
27442564 bool clk_has_parent(struct clk *clk, struct clk *parent)
27452565 {
27462566 struct clk_core *core, *parent_core;
2567
+ int i;
27472568
27482569 /* NULL clocks should be nops, so return success if either is NULL. */
27492570 if (!clk || !parent)
....@@ -2756,8 +2577,11 @@
27562577 if (core->parent == parent_core)
27572578 return true;
27582579
2759
- return match_string(core->parent_names, core->num_parents,
2760
- parent_core->name) >= 0;
2580
+ for (i = 0; i < core->num_parents; i++)
2581
+ if (!strcmp(core->parents[i].name, parent_core->name))
2582
+ return true;
2583
+
2584
+ return false;
27612585 }
27622586 EXPORT_SYMBOL_GPL(clk_has_parent);
27632587
....@@ -2773,10 +2597,10 @@
27732597 if (!core)
27742598 return 0;
27752599
2776
- if (core->parent == parent && !(core->flags & CLK_IS_MEASURE))
2600
+ if (core->parent == parent)
27772601 return 0;
27782602
2779
- /* verify ops for for multi-parent clks */
2603
+ /* verify ops for multi-parent clks */
27802604 if (core->num_parents > 1 && !core->ops->set_parent)
27812605 return -EPERM;
27822606
....@@ -2825,6 +2649,12 @@
28252649
28262650 return ret;
28272651 }
2652
+
2653
+int clk_hw_set_parent(struct clk_hw *hw, struct clk_hw *parent)
2654
+{
2655
+ return clk_core_set_parent_nolock(hw->core, parent->core);
2656
+}
2657
+EXPORT_SYMBOL_GPL(clk_hw_set_parent);
28282658
28292659 /**
28302660 * clk_set_parent - switch the parent of a mux clk
....@@ -2944,12 +2774,14 @@
29442774 {
29452775 int ret;
29462776
2947
- clk_prepare_lock();
2777
+ lockdep_assert_held(&prepare_lock);
2778
+ if (!core->ops->get_phase)
2779
+ return 0;
2780
+
29482781 /* Always try to update cached phase if possible */
2949
- if (core->ops->get_phase)
2950
- core->phase = core->ops->get_phase(core->hw);
2951
- ret = core->phase;
2952
- clk_prepare_unlock();
2782
+ ret = core->ops->get_phase(core->hw);
2783
+ if (ret >= 0)
2784
+ core->phase = ret;
29532785
29542786 return ret;
29552787 }
....@@ -2963,10 +2795,16 @@
29632795 */
29642796 int clk_get_phase(struct clk *clk)
29652797 {
2798
+ int ret;
2799
+
29662800 if (!clk)
29672801 return 0;
29682802
2969
- return clk_core_get_phase(clk->core);
2803
+ clk_prepare_lock();
2804
+ ret = clk_core_get_phase(clk->core);
2805
+ clk_prepare_unlock();
2806
+
2807
+ return ret;
29702808 }
29712809 EXPORT_SYMBOL_GPL(clk_get_phase);
29722810
....@@ -3162,26 +3000,9 @@
31623000 }
31633001 EXPORT_SYMBOL_GPL(clk_is_match);
31643002
3165
-int clk_set_flags(struct clk *clk, unsigned long flags)
3166
-{
3167
- if (!clk)
3168
- return 0;
3169
-
3170
- if (!clk->core->ops->set_flags)
3171
- return -EINVAL;
3172
-
3173
- return clk->core->ops->set_flags(clk->core->hw, flags);
3174
-}
3175
-EXPORT_SYMBOL_GPL(clk_set_flags);
3176
-
3177
-void clk_debug_print_hw(struct clk_core *clk, struct seq_file *f)
3178
-{
3179
-}
3180
-EXPORT_SYMBOL(clk_debug_print_hw);
3181
-
31823003 /*** debugfs support ***/
31833004
3184
-#ifdef CONFIG_COMMON_CLK_DEBUGFS
3005
+#ifdef CONFIG_DEBUG_FS
31853006 #include <linux/debugfs.h>
31863007
31873008 static struct dentry *rootdir;
....@@ -3197,25 +3018,28 @@
31973018 static void clk_summary_show_one(struct seq_file *s, struct clk_core *c,
31983019 int level)
31993020 {
3200
- if (!c)
3201
- return;
3021
+ int phase;
32023022
3203
- seq_printf(s, "%*s%-*s %7d %8d %8d %11lu %10lu %5d %6d\n",
3023
+ seq_printf(s, "%*s%-*s %7d %8d %8d %11lu %10lu ",
32043024 level * 3 + 1, "",
32053025 30 - level * 3, c->name,
32063026 c->enable_count, c->prepare_count, c->protect_count,
3207
- clk_core_get_rate(c), clk_core_get_accuracy(c),
3208
- clk_core_get_phase(c),
3209
- clk_core_get_scaled_duty_cycle(c, 100000));
3027
+ clk_core_get_rate_recalc(c),
3028
+ clk_core_get_accuracy_recalc(c));
3029
+
3030
+ phase = clk_core_get_phase(c);
3031
+ if (phase >= 0)
3032
+ seq_printf(s, "%5d", phase);
3033
+ else
3034
+ seq_puts(s, "-----");
3035
+
3036
+ seq_printf(s, " %6d\n", clk_core_get_scaled_duty_cycle(c, 100000));
32103037 }
32113038
32123039 static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c,
32133040 int level)
32143041 {
32153042 struct clk_core *child;
3216
-
3217
- if (!c)
3218
- return;
32193043
32203044 clk_summary_show_one(s, c, level);
32213045
....@@ -3246,17 +3070,23 @@
32463070
32473071 static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level)
32483072 {
3249
- if (!c)
3250
- return;
3073
+ int phase;
3074
+ unsigned long min_rate, max_rate;
3075
+
3076
+ clk_core_get_boundaries(c, &min_rate, &max_rate);
32513077
32523078 /* This should be JSON format, i.e. elements separated with a comma */
32533079 seq_printf(s, "\"%s\": { ", c->name);
32543080 seq_printf(s, "\"enable_count\": %d,", c->enable_count);
32553081 seq_printf(s, "\"prepare_count\": %d,", c->prepare_count);
32563082 seq_printf(s, "\"protect_count\": %d,", c->protect_count);
3257
- seq_printf(s, "\"rate\": %lu,", clk_core_get_rate(c));
3258
- seq_printf(s, "\"accuracy\": %lu,", clk_core_get_accuracy(c));
3259
- seq_printf(s, "\"phase\": %d,", clk_core_get_phase(c));
3083
+ seq_printf(s, "\"rate\": %lu,", clk_core_get_rate_recalc(c));
3084
+ seq_printf(s, "\"min_rate\": %lu,", min_rate);
3085
+ seq_printf(s, "\"max_rate\": %lu,", max_rate);
3086
+ seq_printf(s, "\"accuracy\": %lu,", clk_core_get_accuracy_recalc(c));
3087
+ phase = clk_core_get_phase(c);
3088
+ if (phase >= 0)
3089
+ seq_printf(s, "\"phase\": %d,", phase);
32603090 seq_printf(s, "\"duty_cycle\": %u",
32613091 clk_core_get_scaled_duty_cycle(c, 100000));
32623092 }
....@@ -3264,9 +3094,6 @@
32643094 static void clk_dump_subtree(struct seq_file *s, struct clk_core *c, int level)
32653095 {
32663096 struct clk_core *child;
3267
-
3268
- if (!c)
3269
- return;
32703097
32713098 clk_dump_one(s, c, level);
32723099
....@@ -3303,6 +3130,70 @@
33033130 }
33043131 DEFINE_SHOW_ATTRIBUTE(clk_dump);
33053132
3133
+#ifdef CONFIG_ANDROID_BINDER_IPC
3134
+#define CLOCK_ALLOW_WRITE_DEBUGFS
3135
+#else
3136
+#undef CLOCK_ALLOW_WRITE_DEBUGFS
3137
+#endif
3138
+#ifdef CLOCK_ALLOW_WRITE_DEBUGFS
3139
+/*
3140
+ * This can be dangerous, therefore don't provide any real compile time
3141
+ * configuration option for this feature.
3142
+ * People who want to use this will need to modify the source code directly.
3143
+ */
3144
+static int clk_rate_set(void *data, u64 val)
3145
+{
3146
+ struct clk_core *core = data;
3147
+ int ret;
3148
+
3149
+ clk_prepare_lock();
3150
+ ret = clk_core_set_rate_nolock(core, val);
3151
+ clk_prepare_unlock();
3152
+
3153
+ return ret;
3154
+}
3155
+
3156
+#define clk_rate_mode 0644
3157
+
3158
+static int clk_prepare_enable_set(void *data, u64 val)
3159
+{
3160
+ struct clk_core *core = data;
3161
+ int ret = 0;
3162
+
3163
+ if (val)
3164
+ ret = clk_prepare_enable(core->hw->clk);
3165
+ else
3166
+ clk_disable_unprepare(core->hw->clk);
3167
+
3168
+ return ret;
3169
+}
3170
+
3171
+static int clk_prepare_enable_get(void *data, u64 *val)
3172
+{
3173
+ struct clk_core *core = data;
3174
+
3175
+ *val = core->enable_count && core->prepare_count;
3176
+ return 0;
3177
+}
3178
+
3179
+DEFINE_DEBUGFS_ATTRIBUTE(clk_prepare_enable_fops, clk_prepare_enable_get,
3180
+ clk_prepare_enable_set, "%llu\n");
3181
+
3182
+#else
3183
+#define clk_rate_set NULL
3184
+#define clk_rate_mode 0444
3185
+#endif
3186
+
3187
+static int clk_rate_get(void *data, u64 *val)
3188
+{
3189
+ struct clk_core *core = data;
3190
+
3191
+ *val = core->rate;
3192
+ return 0;
3193
+}
3194
+
3195
+DEFINE_DEBUGFS_ATTRIBUTE(clk_rate_fops, clk_rate_get, clk_rate_set, "%llu\n");
3196
+
33063197 static const struct {
33073198 unsigned long flag;
33083199 const char *name;
....@@ -3312,7 +3203,6 @@
33123203 ENTRY(CLK_SET_PARENT_GATE),
33133204 ENTRY(CLK_SET_RATE_PARENT),
33143205 ENTRY(CLK_IGNORE_UNUSED),
3315
- ENTRY(CLK_IS_BASIC),
33163206 ENTRY(CLK_GET_RATE_NOCACHE),
33173207 ENTRY(CLK_SET_RATE_NO_REPARENT),
33183208 ENTRY(CLK_GET_ACCURACY_NOCACHE),
....@@ -3321,8 +3211,6 @@
33213211 ENTRY(CLK_IS_CRITICAL),
33223212 ENTRY(CLK_OPS_PARENT_ENABLE),
33233213 ENTRY(CLK_DUTY_CYCLE_PARENT),
3324
- ENTRY(CLK_DONT_HOLD_STATE),
3325
- ENTRY(CLK_KEEP_REQ_RATE),
33263214 #undef ENTRY
33273215 };
33283216
....@@ -3347,19 +3235,64 @@
33473235 }
33483236 DEFINE_SHOW_ATTRIBUTE(clk_flags);
33493237
3238
+static void possible_parent_show(struct seq_file *s, struct clk_core *core,
3239
+ unsigned int i, char terminator)
3240
+{
3241
+ struct clk_core *parent;
3242
+
3243
+ /*
3244
+ * Go through the following options to fetch a parent's name.
3245
+ *
3246
+ * 1. Fetch the registered parent clock and use its name
3247
+ * 2. Use the global (fallback) name if specified
3248
+ * 3. Use the local fw_name if provided
3249
+ * 4. Fetch parent clock's clock-output-name if DT index was set
3250
+ *
3251
+ * This may still fail in some cases, such as when the parent is
3252
+ * specified directly via a struct clk_hw pointer, but it isn't
3253
+ * registered (yet).
3254
+ */
3255
+ parent = clk_core_get_parent_by_index(core, i);
3256
+ if (parent)
3257
+ seq_puts(s, parent->name);
3258
+ else if (core->parents[i].name)
3259
+ seq_puts(s, core->parents[i].name);
3260
+ else if (core->parents[i].fw_name)
3261
+ seq_printf(s, "<%s>(fw)", core->parents[i].fw_name);
3262
+ else if (core->parents[i].index >= 0)
3263
+ seq_puts(s,
3264
+ of_clk_get_parent_name(core->of_node,
3265
+ core->parents[i].index));
3266
+ else
3267
+ seq_puts(s, "(missing)");
3268
+
3269
+ seq_putc(s, terminator);
3270
+}
3271
+
33503272 static int possible_parents_show(struct seq_file *s, void *data)
33513273 {
33523274 struct clk_core *core = s->private;
33533275 int i;
33543276
33553277 for (i = 0; i < core->num_parents - 1; i++)
3356
- seq_printf(s, "%s ", core->parent_names[i]);
3278
+ possible_parent_show(s, core, i, ' ');
33573279
3358
- seq_printf(s, "%s\n", core->parent_names[i]);
3280
+ possible_parent_show(s, core, i, '\n');
33593281
33603282 return 0;
33613283 }
33623284 DEFINE_SHOW_ATTRIBUTE(possible_parents);
3285
+
3286
+static int current_parent_show(struct seq_file *s, void *data)
3287
+{
3288
+ struct clk_core *core = s->private;
3289
+
3290
+ if (core->parent)
3291
+ seq_printf(s, "%s\n", core->parent->name);
3292
+
3293
+ return 0;
3294
+}
3295
+DEFINE_SHOW_ATTRIBUTE(current_parent);
33633296
33643297 static int clk_duty_cycle_show(struct seq_file *s, void *data)
33653298 {
....@@ -3372,210 +3305,33 @@
33723305 }
33733306 DEFINE_SHOW_ATTRIBUTE(clk_duty_cycle);
33743307
3375
-static int clock_debug_rate_set(void *data, u64 val)
3308
+static int clk_min_rate_show(struct seq_file *s, void *data)
33763309 {
3377
- struct clk_core *core = data;
3378
- int ret;
3310
+ struct clk_core *core = s->private;
3311
+ unsigned long min_rate, max_rate;
33793312
3380
- ret = clk_set_rate(core->hw->clk, val);
3381
- if (ret)
3382
- pr_err("clk_set_rate(%lu) failed (%d)\n",
3383
- (unsigned long)val, ret);
3384
-
3385
- return ret;
3386
-}
3387
-
3388
-static int clock_debug_rate_get(void *data, u64 *val)
3389
-{
3390
- struct clk_core *core = data;
3391
-
3392
- *val = core->hw->core->rate;
3313
+ clk_prepare_lock();
3314
+ clk_core_get_boundaries(core, &min_rate, &max_rate);
3315
+ clk_prepare_unlock();
3316
+ seq_printf(s, "%lu\n", min_rate);
33933317
33943318 return 0;
33953319 }
3320
+DEFINE_SHOW_ATTRIBUTE(clk_min_rate);
33963321
3397
-DEFINE_SIMPLE_ATTRIBUTE(clock_rate_fops, clock_debug_rate_get,
3398
- clock_debug_rate_set, "%llu\n");
3399
-
3400
-static int clock_available_parent_show(struct seq_file *s, void *data)
3322
+static int clk_max_rate_show(struct seq_file *s, void *data)
34013323 {
3402
- struct clk_core *core = (struct clk_core *)s->private;
3403
- int i;
3324
+ struct clk_core *core = s->private;
3325
+ unsigned long min_rate, max_rate;
34043326
3405
- for (i = 0; i < core->num_parents; i++) {
3406
- if (!core->parents[i])
3407
- continue;
3408
- seq_printf(s, "%s ", core->parents[i]->name);
3409
- }
3410
- seq_puts(s, "\n");
3327
+ clk_prepare_lock();
3328
+ clk_core_get_boundaries(core, &min_rate, &max_rate);
3329
+ clk_prepare_unlock();
3330
+ seq_printf(s, "%lu\n", max_rate);
34113331
34123332 return 0;
34133333 }
3414
-
3415
-static int clock_available_parent_open(struct inode *inode, struct file *file)
3416
-{
3417
- return single_open(file, clock_available_parent_show, inode->i_private);
3418
-}
3419
-
3420
-static const struct file_operations clock_available_parent_fops = {
3421
- .open = clock_available_parent_open,
3422
- .read = seq_read,
3423
- .llseek = seq_lseek,
3424
- .release = single_release,
3425
-};
3426
-
3427
-static ssize_t clock_parent_read(struct file *filp, char __user *ubuf,
3428
- size_t cnt, loff_t *ppos)
3429
-{
3430
- char name[256] = {0};
3431
- struct clk_core *core = filp->private_data;
3432
- struct clk_core *p = core->parent;
3433
-
3434
- snprintf(name, sizeof(name), "%s\n", p ? p->name : "None\n");
3435
-
3436
- return simple_read_from_buffer(ubuf, cnt, ppos, name, strlen(name));
3437
-}
3438
-
3439
-static ssize_t clock_parent_write(struct file *filp, const char __user *buf,
3440
- size_t cnt, loff_t *ppos)
3441
-{
3442
- char temp[256] = {0};
3443
- char name[256] = {0};
3444
- struct clk_core *core = filp->private_data;
3445
- unsigned int ret, i;
3446
-
3447
- if (copy_from_user(temp, buf, cnt))
3448
- return -EINVAL;
3449
-
3450
- ret = sscanf(temp, "%s", name);
3451
- if (ret != 1)
3452
- return -EINVAL;
3453
-
3454
- for (i = 0; i < core->num_parents; i++) {
3455
- if (!core->parents[i])
3456
- continue;
3457
- if (!strcmp(core->parents[i]->name, name)) {
3458
- if (core->parents[i] != core->parent)
3459
- clk_core_set_parent_nolock(core,
3460
- core->parents[i]);
3461
- break;
3462
- }
3463
- }
3464
-
3465
- return cnt;
3466
-}
3467
-
3468
-static const struct file_operations clock_parent_fops = {
3469
- .open = simple_open,
3470
- .read = clock_parent_read,
3471
- .write = clock_parent_write,
3472
-};
3473
-
3474
-static int clock_debug_enable_set(void *data, u64 val)
3475
-{
3476
- struct clk_core *core = data;
3477
- int rc = 0;
3478
-
3479
- if (val)
3480
- rc = clk_prepare_enable(core->hw->clk);
3481
- else
3482
- clk_disable_unprepare(core->hw->clk);
3483
-
3484
- return rc;
3485
-}
3486
-
3487
-static int clock_debug_enable_get(void *data, u64 *val)
3488
-{
3489
- struct clk_core *core = data;
3490
- int enabled = 0;
3491
-
3492
- enabled = core->enable_count;
3493
-
3494
- *val = enabled;
3495
-
3496
- return 0;
3497
-}
3498
-
3499
-DEFINE_DEBUGFS_ATTRIBUTE(clock_enable_fops, clock_debug_enable_get,
3500
- clock_debug_enable_set, "%lld\n");
3501
-
3502
-#define clock_debug_output(m, c, fmt, ...) \
3503
-do { \
3504
- if (m) \
3505
- seq_printf(m, fmt, ##__VA_ARGS__); \
3506
- else if (c) \
3507
- pr_cont(fmt, ##__VA_ARGS__); \
3508
- else \
3509
- pr_info(fmt, ##__VA_ARGS__); \
3510
-} while (0)
3511
-
3512
-static int clock_debug_print_clock(struct clk_core *c, struct seq_file *s)
3513
-{
3514
- char *start = "";
3515
- struct clk *clk;
3516
-
3517
- if (!c || !c->prepare_count)
3518
- return 0;
3519
-
3520
- clk = c->hw->clk;
3521
-
3522
- clock_debug_output(s, 0, "\t");
3523
-
3524
- do {
3525
- clock_debug_output(s, 1, "%s%s:%u:%u [%ld]", start,
3526
- clk->core->name,
3527
- clk->core->prepare_count,
3528
- clk->core->enable_count,
3529
- clk->core->rate);
3530
- start = " -> ";
3531
- } while ((clk = clk_get_parent(clk)));
3532
-
3533
- clock_debug_output(s, 1, "\n");
3534
-
3535
- return 1;
3536
-}
3537
-
3538
-/*
3539
- * clock_debug_print_enabled_clocks() - Print names of enabled clocks
3540
- */
3541
-static void clock_debug_print_enabled_clocks(struct seq_file *s)
3542
-{
3543
- struct clk_core *core;
3544
- int cnt = 0;
3545
-
3546
- clock_debug_output(s, 0, "Enabled clocks:\n");
3547
-
3548
- mutex_lock(&clk_debug_lock);
3549
-
3550
- hlist_for_each_entry(core, &clk_debug_list, debug_node)
3551
- cnt += clock_debug_print_clock(core, s);
3552
-
3553
- mutex_unlock(&clk_debug_lock);
3554
-
3555
- if (cnt)
3556
- clock_debug_output(s, 0, "Enabled clock count: %d\n", cnt);
3557
- else
3558
- clock_debug_output(s, 0, "No clocks enabled.\n");
3559
-}
3560
-
3561
-static int enabled_clocks_show(struct seq_file *s, void *unused)
3562
-{
3563
- clock_debug_print_enabled_clocks(s);
3564
-
3565
- return 0;
3566
-}
3567
-
3568
-static int enabled_clocks_open(struct inode *inode, struct file *file)
3569
-{
3570
- return single_open(file, enabled_clocks_show, inode->i_private);
3571
-}
3572
-
3573
-static const struct file_operations clk_enabled_list_fops = {
3574
- .open = enabled_clocks_open,
3575
- .read = seq_read,
3576
- .llseek = seq_lseek,
3577
- .release = seq_release,
3578
-};
3334
+DEFINE_SHOW_ATTRIBUTE(clk_max_rate);
35793335
35803336 static void clk_debug_create_one(struct clk_core *core, struct dentry *pdentry)
35813337 {
....@@ -3587,22 +3343,27 @@
35873343 root = debugfs_create_dir(core->name, pdentry);
35883344 core->dentry = root;
35893345
3590
- debugfs_create_file("clk_rate", 0444, root, core,
3591
- &clock_rate_fops);
3346
+ debugfs_create_file("clk_rate", clk_rate_mode, root, core,
3347
+ &clk_rate_fops);
3348
+ debugfs_create_file("clk_min_rate", 0444, root, core, &clk_min_rate_fops);
3349
+ debugfs_create_file("clk_max_rate", 0444, root, core, &clk_max_rate_fops);
35923350 debugfs_create_ulong("clk_accuracy", 0444, root, &core->accuracy);
35933351 debugfs_create_u32("clk_phase", 0444, root, &core->phase);
35943352 debugfs_create_file("clk_flags", 0444, root, core, &clk_flags_fops);
35953353 debugfs_create_u32("clk_prepare_count", 0444, root, &core->prepare_count);
3596
- debugfs_create_file("clk_enable_count", 0444, root, core,
3597
- &clock_enable_fops);
3354
+ debugfs_create_u32("clk_enable_count", 0444, root, &core->enable_count);
35983355 debugfs_create_u32("clk_protect_count", 0444, root, &core->protect_count);
35993356 debugfs_create_u32("clk_notifier_count", 0444, root, &core->notifier_count);
36003357 debugfs_create_file("clk_duty_cycle", 0444, root, core,
36013358 &clk_duty_cycle_fops);
3602
- debugfs_create_file("clk_available_parent", 0444, root, core,
3603
- &clock_available_parent_fops);
3604
- debugfs_create_file("clk_parent", 0444, root, core,
3605
- &clock_parent_fops);
3359
+#ifdef CLOCK_ALLOW_WRITE_DEBUGFS
3360
+ debugfs_create_file("clk_prepare_enable", 0644, root, core,
3361
+ &clk_prepare_enable_fops);
3362
+#endif
3363
+
3364
+ if (core->num_parents > 0)
3365
+ debugfs_create_file("clk_parent", 0444, root, core,
3366
+ &current_parent_fops);
36063367
36073368 if (core->num_parents > 1)
36083369 debugfs_create_file("clk_possible_parents", 0444, root, core,
....@@ -3659,6 +3420,24 @@
36593420 {
36603421 struct clk_core *core;
36613422
3423
+#ifdef CLOCK_ALLOW_WRITE_DEBUGFS
3424
+ pr_warn("\n");
3425
+ pr_warn("********************************************************************\n");
3426
+ pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3427
+ pr_warn("** **\n");
3428
+ pr_warn("** WRITEABLE clk DebugFS SUPPORT HAS BEEN ENABLED IN THIS KERNEL **\n");
3429
+ pr_warn("** **\n");
3430
+ pr_warn("** This means that this kernel is built to expose clk operations **\n");
3431
+ pr_warn("** such as parent or rate setting, enabling, disabling, etc. **\n");
3432
+ pr_warn("** to userspace, which may compromise security on your system. **\n");
3433
+ pr_warn("** **\n");
3434
+ pr_warn("** If you see this message and you are not debugging the **\n");
3435
+ pr_warn("** kernel, report this immediately to your vendor! **\n");
3436
+ pr_warn("** **\n");
3437
+ pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3438
+ pr_warn("********************************************************************\n");
3439
+#endif
3440
+
36623441 rootdir = debugfs_create_dir("clk", NULL);
36633442
36643443 debugfs_create_file("clk_summary", 0444, rootdir, &all_lists,
....@@ -3669,9 +3448,6 @@
36693448 &clk_summary_fops);
36703449 debugfs_create_file("clk_orphan_dump", 0444, rootdir, &orphan_list,
36713450 &clk_dump_fops);
3672
-
3673
- debugfs_create_file("clk_enabled_list", 0444, rootdir,
3674
- &clk_debug_list, &clk_enabled_list_fops);
36753451
36763452 mutex_lock(&clk_debug_lock);
36773453 hlist_for_each_entry(core, &clk_debug_list, debug_node)
....@@ -3685,14 +3461,52 @@
36853461 late_initcall(clk_debug_init);
36863462 #else
36873463 static inline void clk_debug_register(struct clk_core *core) { }
3688
-static inline void clk_debug_reparent(struct clk_core *core,
3689
- struct clk_core *new_parent)
3690
-{
3691
-}
36923464 static inline void clk_debug_unregister(struct clk_core *core)
36933465 {
36943466 }
36953467 #endif
3468
+
3469
+static void clk_core_reparent_orphans_nolock(void)
3470
+{
3471
+ struct clk_core *orphan;
3472
+ struct hlist_node *tmp2;
3473
+
3474
+ /*
3475
+ * walk the list of orphan clocks and reparent any that newly finds a
3476
+ * parent.
3477
+ */
3478
+ hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) {
3479
+ struct clk_core *parent = __clk_init_parent(orphan);
3480
+
3481
+ /*
3482
+ * We need to use __clk_set_parent_before() and _after() to
3483
+ * to properly migrate any prepare/enable count of the orphan
3484
+ * clock. This is important for CLK_IS_CRITICAL clocks, which
3485
+ * are enabled during init but might not have a parent yet.
3486
+ */
3487
+ if (parent) {
3488
+ /* update the clk tree topology */
3489
+ __clk_set_parent_before(orphan, parent);
3490
+ __clk_set_parent_after(orphan, parent, NULL);
3491
+ __clk_recalc_accuracies(orphan);
3492
+ __clk_recalc_rates(orphan, 0);
3493
+ __clk_core_update_orphan_hold_state(orphan);
3494
+
3495
+ /*
3496
+ * __clk_init_parent() will set the initial req_rate to
3497
+ * 0 if the clock doesn't have clk_ops::recalc_rate and
3498
+ * is an orphan when it's registered.
3499
+ *
3500
+ * 'req_rate' is used by clk_set_rate_range() and
3501
+ * clk_put() to trigger a clk_set_rate() call whenever
3502
+ * the boundaries are modified. Let's make sure
3503
+ * 'req_rate' is set to something non-zero so that
3504
+ * clk_set_rate_range() doesn't drop the frequency.
3505
+ */
3506
+ orphan->req_rate = orphan->rate;
3507
+ }
3508
+ }
3509
+}
36963510
36973511 /**
36983512 * __clk_core_init - initialize the data structures in a struct clk_core
....@@ -3703,15 +3517,23 @@
37033517 */
37043518 static int __clk_core_init(struct clk_core *core)
37053519 {
3706
- int i, ret;
3707
- struct clk_core *orphan;
3708
- struct hlist_node *tmp2;
3520
+ int ret;
3521
+ struct clk_core *parent;
37093522 unsigned long rate;
3523
+ int phase;
37103524
37113525 if (!core)
37123526 return -EINVAL;
37133527
37143528 clk_prepare_lock();
3529
+
3530
+ /*
3531
+ * Set hw->core after grabbing the prepare_lock to synchronize with
3532
+ * callers of clk_core_fill_parent_index() where we treat hw->core
3533
+ * being NULL as the clk not being registered yet. This is crucial so
3534
+ * that clks aren't parented until their parent is fully registered.
3535
+ */
3536
+ core->hw->core = core;
37153537
37163538 ret = clk_pm_runtime_get(core);
37173539 if (ret)
....@@ -3757,13 +3579,27 @@
37573579 goto out;
37583580 }
37593581
3760
- /* throw a WARN if any entries in parent_names are NULL */
3761
- for (i = 0; i < core->num_parents; i++)
3762
- WARN(!core->parent_names[i],
3763
- "%s: invalid NULL in %s's .parent_names\n",
3764
- __func__, core->name);
3582
+ /*
3583
+ * optional platform-specific magic
3584
+ *
3585
+ * The .init callback is not used by any of the basic clock types, but
3586
+ * exists for weird hardware that must perform initialization magic for
3587
+ * CCF to get an accurate view of clock for any other callbacks. It may
3588
+ * also be used needs to perform dynamic allocations. Such allocation
3589
+ * must be freed in the terminate() callback.
3590
+ * This callback shall not be used to initialize the parameters state,
3591
+ * such as rate, parent, etc ...
3592
+ *
3593
+ * If it exist, this callback should called before any other callback of
3594
+ * the clock
3595
+ */
3596
+ if (core->ops->init) {
3597
+ ret = core->ops->init(core->hw);
3598
+ if (ret)
3599
+ goto out;
3600
+ }
37653601
3766
- core->parent = __clk_init_parent(core);
3602
+ parent = core->parent = __clk_init_parent(core);
37673603
37683604 /*
37693605 * Populate core->parent if parent has already been clk_core_init'd. If
....@@ -3775,10 +3611,9 @@
37753611 * clocks and re-parent any that are children of the clock currently
37763612 * being clk_init'd.
37773613 */
3778
- if (core->parent) {
3779
- hlist_add_head(&core->child_node,
3780
- &core->parent->children);
3781
- core->orphan = core->parent->orphan;
3614
+ if (parent) {
3615
+ hlist_add_head(&core->child_node, &parent->children);
3616
+ core->orphan = parent->orphan;
37823617 } else if (!core->num_parents) {
37833618 hlist_add_head(&core->child_node, &clk_root_list);
37843619 core->orphan = false;
....@@ -3786,17 +3621,6 @@
37863621 hlist_add_head(&core->child_node, &clk_orphan_list);
37873622 core->orphan = true;
37883623 }
3789
-
3790
- /*
3791
- * optional platform-specific magic
3792
- *
3793
- * The .init callback is not used by any of the basic clock types, but
3794
- * exists for weird hardware that must perform initialization magic.
3795
- * Please consider other ways of solving initialization problems before
3796
- * using this callback, as its use is discouraged.
3797
- */
3798
- if (core->ops->init)
3799
- core->ops->init(core->hw);
38003624
38013625 /*
38023626 * Set clk's accuracy. The preferred method is to use
....@@ -3807,21 +3631,24 @@
38073631 */
38083632 if (core->ops->recalc_accuracy)
38093633 core->accuracy = core->ops->recalc_accuracy(core->hw,
3810
- __clk_get_accuracy(core->parent));
3811
- else if (core->parent)
3812
- core->accuracy = core->parent->accuracy;
3634
+ clk_core_get_accuracy_no_lock(parent));
3635
+ else if (parent)
3636
+ core->accuracy = parent->accuracy;
38133637 else
38143638 core->accuracy = 0;
38153639
38163640 /*
3817
- * Set clk's phase.
3641
+ * Set clk's phase by clk_core_get_phase() caching the phase.
38183642 * Since a phase is by definition relative to its parent, just
38193643 * query the current clock phase, or just assume it's in phase.
38203644 */
3821
- if (core->ops->get_phase)
3822
- core->phase = core->ops->get_phase(core->hw);
3823
- else
3824
- core->phase = 0;
3645
+ phase = clk_core_get_phase(core);
3646
+ if (phase < 0) {
3647
+ ret = phase;
3648
+ pr_warn("%s: Failed to get phase for clk '%s'\n", __func__,
3649
+ core->name);
3650
+ goto out;
3651
+ }
38253652
38263653 /*
38273654 * Set clk's duty cycle.
....@@ -3836,9 +3663,9 @@
38363663 */
38373664 if (core->ops->recalc_rate)
38383665 rate = core->ops->recalc_rate(core->hw,
3839
- clk_core_get_rate_nolock(core->parent));
3840
- else if (core->parent)
3841
- rate = core->parent->rate;
3666
+ clk_core_get_rate_nolock(parent));
3667
+ else if (parent)
3668
+ rate = parent->rate;
38423669 else
38433670 rate = 0;
38443671 core->rate = core->req_rate = rate;
....@@ -3853,96 +3680,36 @@
38533680 if (core->flags & CLK_IS_CRITICAL) {
38543681 unsigned long flags;
38553682
3856
- clk_core_prepare(core);
3683
+ ret = clk_core_prepare(core);
3684
+ if (ret) {
3685
+ pr_warn("%s: critical clk '%s' failed to prepare\n",
3686
+ __func__, core->name);
3687
+ goto out;
3688
+ }
38573689
38583690 flags = clk_enable_lock();
3859
- clk_core_enable(core);
3691
+ ret = clk_core_enable(core);
38603692 clk_enable_unlock(flags);
3693
+ if (ret) {
3694
+ pr_warn("%s: critical clk '%s' failed to enable\n",
3695
+ __func__, core->name);
3696
+ clk_core_unprepare(core);
3697
+ goto out;
3698
+ }
38613699 }
38623700
38633701 clk_core_hold_state(core);
3702
+ clk_core_reparent_orphans_nolock();
38643703
3865
- /*
3866
- * walk the list of orphan clocks and reparent any that newly finds a
3867
- * parent.
3868
- */
3869
- hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) {
3870
- struct clk_core *parent = __clk_init_parent(orphan);
3871
-
3872
- /*
3873
- * We need to use __clk_set_parent_before() and _after() to
3874
- * to properly migrate any prepare/enable count of the orphan
3875
- * clock. This is important for CLK_IS_CRITICAL clocks, which
3876
- * are enabled during init but might not have a parent yet.
3877
- */
3878
- if (parent) {
3879
- /* update the clk tree topology */
3880
- __clk_set_parent_before(orphan, parent);
3881
- __clk_set_parent_after(orphan, parent, NULL);
3882
- __clk_recalc_accuracies(orphan);
3883
- __clk_recalc_rates(orphan, 0);
3884
- __clk_core_update_orphan_hold_state(orphan);
3885
- }
3886
- }
3887
-
3888
- /*
3889
- * optional platform-specific magic
3890
- *
3891
- * The .init callback is not used by any of the basic clock types, but
3892
- * exists for weird hardware that must perform initialization magic.
3893
- * Please consider other ways of solving initialization problems before
3894
- * using this callback, as its use is discouraged.
3895
- */
3896
- if (core->ops->init)
3897
- core->ops->init(core->hw);
3898
-
3899
- if (core->flags & CLK_IS_CRITICAL) {
3900
- unsigned long flags;
3901
-
3902
- clk_core_prepare(core);
3903
-
3904
- flags = clk_enable_lock();
3905
- clk_core_enable(core);
3906
- clk_enable_unlock(flags);
3907
- }
3908
-
3909
- /*
3910
- * enable clocks with the CLK_ENABLE_HAND_OFF flag set
3911
- *
3912
- * This flag causes the framework to enable the clock at registration
3913
- * time, which is sometimes necessary for clocks that would cause a
3914
- * system crash when gated (e.g. cpu, memory, etc). The prepare_count
3915
- * is migrated over to the first clk consumer to call clk_prepare().
3916
- * Similarly the clk's enable_count is migrated to the first consumer
3917
- * to call clk_enable().
3918
- */
3919
- if (core->flags & CLK_ENABLE_HAND_OFF) {
3920
- unsigned long flags;
3921
-
3922
- /*
3923
- * Few clocks might have hardware gating which would be
3924
- * required to be ON before prepare/enabling the clocks. So
3925
- * check if the clock has been turned ON earlier and we should
3926
- * prepare/enable those clocks.
3927
- */
3928
- if (clk_core_is_enabled(core)) {
3929
- core->need_handoff_prepare = true;
3930
- core->need_handoff_enable = true;
3931
- ret = clk_core_prepare(core);
3932
- if (ret)
3933
- goto out;
3934
- flags = clk_enable_lock();
3935
- clk_core_enable(core);
3936
- clk_enable_unlock(flags);
3937
- }
3938
- }
39393704
39403705 kref_init(&core->ref);
39413706 out:
39423707 clk_pm_runtime_put(core);
39433708 unlock:
3944
- if (ret)
3709
+ if (ret) {
39453710 hlist_del_init(&core->child_node);
3711
+ core->hw->core = NULL;
3712
+ }
39463713
39473714 clk_prepare_unlock();
39483715
....@@ -3952,8 +3719,38 @@
39523719 return ret;
39533720 }
39543721
3955
-static struct clk *clk_hw_create_clk(struct clk_hw *hw, const char *dev_id,
3956
- const char *con_id)
3722
+/**
3723
+ * clk_core_link_consumer - Add a clk consumer to the list of consumers in a clk_core
3724
+ * @core: clk to add consumer to
3725
+ * @clk: consumer to link to a clk
3726
+ */
3727
+static void clk_core_link_consumer(struct clk_core *core, struct clk *clk)
3728
+{
3729
+ clk_prepare_lock();
3730
+ hlist_add_head(&clk->clks_node, &core->clks);
3731
+ clk_prepare_unlock();
3732
+}
3733
+
3734
+/**
3735
+ * clk_core_unlink_consumer - Remove a clk consumer from the list of consumers in a clk_core
3736
+ * @clk: consumer to unlink
3737
+ */
3738
+static void clk_core_unlink_consumer(struct clk *clk)
3739
+{
3740
+ lockdep_assert_held(&prepare_lock);
3741
+ hlist_del(&clk->clks_node);
3742
+}
3743
+
3744
+/**
3745
+ * alloc_clk - Allocate a clk consumer, but leave it unlinked to the clk_core
3746
+ * @core: clk to allocate a consumer for
3747
+ * @dev_id: string describing device name
3748
+ * @con_id: connection ID string on device
3749
+ *
3750
+ * Returns: clk consumer left unlinked from the consumer list
3751
+ */
3752
+static struct clk *alloc_clk(struct clk_core *core, const char *dev_id,
3753
+ const char *con_id)
39573754 {
39583755 struct clk *clk;
39593756
....@@ -3961,56 +3758,192 @@
39613758 if (!clk)
39623759 return ERR_PTR(-ENOMEM);
39633760
3964
- clk->core = hw->core;
3761
+ clk->core = core;
39653762 clk->dev_id = dev_id;
39663763 clk->con_id = kstrdup_const(con_id, GFP_KERNEL);
39673764 clk->max_rate = ULONG_MAX;
39683765
3969
- clk_prepare_lock();
3970
- hlist_add_head(&clk->clks_node, &hw->core->clks);
3971
- clk_prepare_unlock();
3972
-
39733766 return clk;
39743767 }
39753768
3976
-struct clk *__clk_create_clk(struct clk_hw *hw, const char *dev_id,
3977
- const char *con_id, bool with_orphans)
3769
+/**
3770
+ * free_clk - Free a clk consumer
3771
+ * @clk: clk consumer to free
3772
+ *
3773
+ * Note, this assumes the clk has been unlinked from the clk_core consumer
3774
+ * list.
3775
+ */
3776
+static void free_clk(struct clk *clk)
39783777 {
3979
- /* This is to allow this function to be chained to others */
3980
- if (!hw || IS_ERR(hw))
3981
- return (struct clk *) hw;
3982
-
3983
- if (hw->core->orphan && !with_orphans)
3984
- return ERR_PTR(-EPROBE_DEFER);
3985
-
3986
- return clk_hw_create_clk(hw, dev_id, con_id);
3987
-}
3988
-
3989
-void __clk_free_clk(struct clk *clk)
3990
-{
3991
- clk_prepare_lock();
3992
- hlist_del(&clk->clks_node);
3993
- clk_prepare_unlock();
3994
-
39953778 kfree_const(clk->con_id);
39963779 kfree(clk);
39973780 }
39983781
39993782 /**
4000
- * clk_register - allocate a new clock, register it and return an opaque cookie
4001
- * @dev: device that is registering this clock
4002
- * @hw: link to hardware-specific clock data
3783
+ * clk_hw_create_clk: Allocate and link a clk consumer to a clk_core given
3784
+ * a clk_hw
3785
+ * @dev: clk consumer device
3786
+ * @hw: clk_hw associated with the clk being consumed
3787
+ * @dev_id: string describing device name
3788
+ * @con_id: connection ID string on device
40033789 *
4004
- * clk_register is the primary interface for populating the clock tree with new
4005
- * clock nodes. It returns a pointer to the newly allocated struct clk which
4006
- * cannot be dereferenced by driver code but may be used in conjunction with the
4007
- * rest of the clock API. In the event of an error clk_register will return an
4008
- * error code; drivers must test for an error code after calling clk_register.
3790
+ * This is the main function used to create a clk pointer for use by clk
3791
+ * consumers. It connects a consumer to the clk_core and clk_hw structures
3792
+ * used by the framework and clk provider respectively.
40093793 */
4010
-struct clk *clk_register(struct device *dev, struct clk_hw *hw)
3794
+struct clk *clk_hw_create_clk(struct device *dev, struct clk_hw *hw,
3795
+ const char *dev_id, const char *con_id)
40113796 {
4012
- int i, ret;
3797
+ struct clk *clk;
40133798 struct clk_core *core;
3799
+
3800
+ /* This is to allow this function to be chained to others */
3801
+ if (IS_ERR_OR_NULL(hw))
3802
+ return ERR_CAST(hw);
3803
+
3804
+ core = hw->core;
3805
+ clk = alloc_clk(core, dev_id, con_id);
3806
+ if (IS_ERR(clk))
3807
+ return clk;
3808
+ clk->dev = dev;
3809
+
3810
+ if (!try_module_get(core->owner)) {
3811
+ free_clk(clk);
3812
+ return ERR_PTR(-ENOENT);
3813
+ }
3814
+
3815
+ kref_get(&core->ref);
3816
+ clk_core_link_consumer(core, clk);
3817
+
3818
+ return clk;
3819
+}
3820
+
3821
+/**
3822
+ * clk_hw_get_clk - get clk consumer given an clk_hw
3823
+ * @hw: clk_hw associated with the clk being consumed
3824
+ * @con_id: connection ID string on device
3825
+ *
3826
+ * Returns: new clk consumer
3827
+ * This is the function to be used by providers which need
3828
+ * to get a consumer clk and act on the clock element
3829
+ * Calls to this function must be balanced with calls clk_put()
3830
+ */
3831
+struct clk *clk_hw_get_clk(struct clk_hw *hw, const char *con_id)
3832
+{
3833
+ struct device *dev = hw->core->dev;
3834
+ const char *name = dev ? dev_name(dev) : NULL;
3835
+
3836
+ return clk_hw_create_clk(dev, hw, name, con_id);
3837
+}
3838
+EXPORT_SYMBOL(clk_hw_get_clk);
3839
+
3840
+static int clk_cpy_name(const char **dst_p, const char *src, bool must_exist)
3841
+{
3842
+ const char *dst;
3843
+
3844
+ if (!src) {
3845
+ if (must_exist)
3846
+ return -EINVAL;
3847
+ return 0;
3848
+ }
3849
+
3850
+ *dst_p = dst = kstrdup_const(src, GFP_KERNEL);
3851
+ if (!dst)
3852
+ return -ENOMEM;
3853
+
3854
+ return 0;
3855
+}
3856
+
3857
+static int clk_core_populate_parent_map(struct clk_core *core,
3858
+ const struct clk_init_data *init)
3859
+{
3860
+ u8 num_parents = init->num_parents;
3861
+ const char * const *parent_names = init->parent_names;
3862
+ const struct clk_hw **parent_hws = init->parent_hws;
3863
+ const struct clk_parent_data *parent_data = init->parent_data;
3864
+ int i, ret = 0;
3865
+ struct clk_parent_map *parents, *parent;
3866
+
3867
+ if (!num_parents)
3868
+ return 0;
3869
+
3870
+ /*
3871
+ * Avoid unnecessary string look-ups of clk_core's possible parents by
3872
+ * having a cache of names/clk_hw pointers to clk_core pointers.
3873
+ */
3874
+ parents = kcalloc(num_parents, sizeof(*parents), GFP_KERNEL);
3875
+ core->parents = parents;
3876
+ if (!parents)
3877
+ return -ENOMEM;
3878
+
3879
+ /* Copy everything over because it might be __initdata */
3880
+ for (i = 0, parent = parents; i < num_parents; i++, parent++) {
3881
+ parent->index = -1;
3882
+ if (parent_names) {
3883
+ /* throw a WARN if any entries are NULL */
3884
+ WARN(!parent_names[i],
3885
+ "%s: invalid NULL in %s's .parent_names\n",
3886
+ __func__, core->name);
3887
+ ret = clk_cpy_name(&parent->name, parent_names[i],
3888
+ true);
3889
+ } else if (parent_data) {
3890
+ parent->hw = parent_data[i].hw;
3891
+ parent->index = parent_data[i].index;
3892
+ ret = clk_cpy_name(&parent->fw_name,
3893
+ parent_data[i].fw_name, false);
3894
+ if (!ret)
3895
+ ret = clk_cpy_name(&parent->name,
3896
+ parent_data[i].name,
3897
+ false);
3898
+ } else if (parent_hws) {
3899
+ parent->hw = parent_hws[i];
3900
+ } else {
3901
+ ret = -EINVAL;
3902
+ WARN(1, "Must specify parents if num_parents > 0\n");
3903
+ }
3904
+
3905
+ if (ret) {
3906
+ do {
3907
+ kfree_const(parents[i].name);
3908
+ kfree_const(parents[i].fw_name);
3909
+ } while (--i >= 0);
3910
+ kfree(parents);
3911
+
3912
+ return ret;
3913
+ }
3914
+ }
3915
+
3916
+ return 0;
3917
+}
3918
+
3919
+static void clk_core_free_parent_map(struct clk_core *core)
3920
+{
3921
+ int i = core->num_parents;
3922
+
3923
+ if (!core->num_parents)
3924
+ return;
3925
+
3926
+ while (--i >= 0) {
3927
+ kfree_const(core->parents[i].name);
3928
+ kfree_const(core->parents[i].fw_name);
3929
+ }
3930
+
3931
+ kfree(core->parents);
3932
+}
3933
+
3934
+static struct clk *
3935
+__clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
3936
+{
3937
+ int ret;
3938
+ struct clk_core *core;
3939
+ const struct clk_init_data *init = hw->init;
3940
+
3941
+ /*
3942
+ * The init data is not supposed to be used outside of registration path.
3943
+ * Set it to NULL so that provider drivers can't use it either and so that
3944
+ * we catch use of hw->init early on in the core.
3945
+ */
3946
+ hw->init = NULL;
40143947
40153948 core = kzalloc(sizeof(*core), GFP_KERNEL);
40163949 if (!core) {
....@@ -4018,90 +3951,108 @@
40183951 goto fail_out;
40193952 }
40203953
4021
- core->name = kstrdup_const(hw->init->name, GFP_KERNEL);
3954
+ core->name = kstrdup_const(init->name, GFP_KERNEL);
40223955 if (!core->name) {
40233956 ret = -ENOMEM;
40243957 goto fail_name;
40253958 }
40263959
4027
- if (WARN_ON(!hw->init->ops)) {
3960
+ if (WARN_ON(!init->ops)) {
40283961 ret = -EINVAL;
40293962 goto fail_ops;
40303963 }
4031
- core->ops = hw->init->ops;
3964
+ core->ops = init->ops;
40323965
40333966 if (dev && pm_runtime_enabled(dev))
40343967 core->rpm_enabled = true;
40353968 core->dev = dev;
3969
+ core->of_node = np;
40363970 if (dev && dev->driver)
40373971 core->owner = dev->driver->owner;
40383972 core->hw = hw;
4039
- core->flags = hw->init->flags;
4040
- core->num_parents = hw->init->num_parents;
3973
+ core->flags = init->flags;
3974
+ core->num_parents = init->num_parents;
40413975 core->min_rate = 0;
40423976 core->max_rate = ULONG_MAX;
4043
- core->vdd_class = hw->init->vdd_class;
4044
- core->rate_max = hw->init->rate_max;
4045
- core->num_rate_max = hw->init->num_rate_max;
4046
- hw->core = core;
40473977
4048
- /* allocate local copy in case parent_names is __initdata */
4049
- core->parent_names = kcalloc(core->num_parents, sizeof(char *),
4050
- GFP_KERNEL);
4051
-
4052
- if (!core->parent_names) {
4053
- ret = -ENOMEM;
4054
- goto fail_parent_names;
4055
- }
4056
-
4057
-
4058
- /* copy each string name in case parent_names is __initdata */
4059
- for (i = 0; i < core->num_parents; i++) {
4060
- core->parent_names[i] = kstrdup_const(hw->init->parent_names[i],
4061
- GFP_KERNEL);
4062
- if (!core->parent_names[i]) {
4063
- ret = -ENOMEM;
4064
- goto fail_parent_names_copy;
4065
- }
4066
- }
4067
-
4068
- /* avoid unnecessary string look-ups of clk_core's possible parents. */
4069
- core->parents = kcalloc(core->num_parents, sizeof(*core->parents),
4070
- GFP_KERNEL);
4071
- if (!core->parents) {
4072
- ret = -ENOMEM;
3978
+ ret = clk_core_populate_parent_map(core, init);
3979
+ if (ret)
40733980 goto fail_parents;
4074
- };
40753981
40763982 INIT_HLIST_HEAD(&core->clks);
4077
- INIT_LIST_HEAD(&core->rate_change_node);
40783983
4079
- hw->clk = clk_hw_create_clk(hw, NULL, NULL);
3984
+ /*
3985
+ * Don't call clk_hw_create_clk() here because that would pin the
3986
+ * provider module to itself and prevent it from ever being removed.
3987
+ */
3988
+ hw->clk = alloc_clk(core, NULL, NULL);
40803989 if (IS_ERR(hw->clk)) {
40813990 ret = PTR_ERR(hw->clk);
4082
- goto fail_parents;
3991
+ goto fail_create_clk;
40833992 }
3993
+
3994
+ clk_core_link_consumer(core, hw->clk);
40843995
40853996 ret = __clk_core_init(core);
40863997 if (!ret)
40873998 return hw->clk;
40883999
4089
- __clk_free_clk(hw->clk);
4000
+ clk_prepare_lock();
4001
+ clk_core_unlink_consumer(hw->clk);
4002
+ clk_prepare_unlock();
4003
+
4004
+ free_clk(hw->clk);
40904005 hw->clk = NULL;
40914006
4007
+fail_create_clk:
4008
+ clk_core_free_parent_map(core);
40924009 fail_parents:
4093
- kfree(core->parents);
4094
-fail_parent_names_copy:
4095
- while (--i >= 0)
4096
- kfree_const(core->parent_names[i]);
4097
- kfree(core->parent_names);
4098
-fail_parent_names:
40994010 fail_ops:
41004011 kfree_const(core->name);
41014012 fail_name:
41024013 kfree(core);
41034014 fail_out:
41044015 return ERR_PTR(ret);
4016
+}
4017
+
4018
+/**
4019
+ * dev_or_parent_of_node() - Get device node of @dev or @dev's parent
4020
+ * @dev: Device to get device node of
4021
+ *
4022
+ * Return: device node pointer of @dev, or the device node pointer of
4023
+ * @dev->parent if dev doesn't have a device node, or NULL if neither
4024
+ * @dev or @dev->parent have a device node.
4025
+ */
4026
+static struct device_node *dev_or_parent_of_node(struct device *dev)
4027
+{
4028
+ struct device_node *np;
4029
+
4030
+ if (!dev)
4031
+ return NULL;
4032
+
4033
+ np = dev_of_node(dev);
4034
+ if (!np)
4035
+ np = dev_of_node(dev->parent);
4036
+
4037
+ return np;
4038
+}
4039
+
4040
+/**
4041
+ * clk_register - allocate a new clock, register it and return an opaque cookie
4042
+ * @dev: device that is registering this clock
4043
+ * @hw: link to hardware-specific clock data
4044
+ *
4045
+ * clk_register is the *deprecated* interface for populating the clock tree with
4046
+ * new clock nodes. Use clk_hw_register() instead.
4047
+ *
4048
+ * Returns: a pointer to the newly allocated struct clk which
4049
+ * cannot be dereferenced by driver code but may be used in conjunction with the
4050
+ * rest of the clock API. In the event of an error clk_register will return an
4051
+ * error code; drivers must test for an error code after calling clk_register.
4052
+ */
4053
+struct clk *clk_register(struct device *dev, struct clk_hw *hw)
4054
+{
4055
+ return __clk_register(dev, dev_or_parent_of_node(dev), hw);
41054056 }
41064057 EXPORT_SYMBOL_GPL(clk_register);
41074058
....@@ -4117,23 +4068,36 @@
41174068 */
41184069 int clk_hw_register(struct device *dev, struct clk_hw *hw)
41194070 {
4120
- return PTR_ERR_OR_ZERO(clk_register(dev, hw));
4071
+ return PTR_ERR_OR_ZERO(__clk_register(dev, dev_or_parent_of_node(dev),
4072
+ hw));
41214073 }
41224074 EXPORT_SYMBOL_GPL(clk_hw_register);
4075
+
4076
+/*
4077
+ * of_clk_hw_register - register a clk_hw and return an error code
4078
+ * @node: device_node of device that is registering this clock
4079
+ * @hw: link to hardware-specific clock data
4080
+ *
4081
+ * of_clk_hw_register() is the primary interface for populating the clock tree
4082
+ * with new clock nodes when a struct device is not available, but a struct
4083
+ * device_node is. It returns an integer equal to zero indicating success or
4084
+ * less than zero indicating failure. Drivers must test for an error code after
4085
+ * calling of_clk_hw_register().
4086
+ */
4087
+int of_clk_hw_register(struct device_node *node, struct clk_hw *hw)
4088
+{
4089
+ return PTR_ERR_OR_ZERO(__clk_register(NULL, node, hw));
4090
+}
4091
+EXPORT_SYMBOL_GPL(of_clk_hw_register);
41234092
41244093 /* Free memory allocated for a clock. */
41254094 static void __clk_release(struct kref *ref)
41264095 {
41274096 struct clk_core *core = container_of(ref, struct clk_core, ref);
4128
- int i = core->num_parents;
41294097
41304098 lockdep_assert_held(&prepare_lock);
41314099
4132
- kfree(core->parents);
4133
- while (--i >= 0)
4134
- kfree_const(core->parent_names[i]);
4135
-
4136
- kfree(core->parent_names);
4100
+ clk_core_free_parent_map(core);
41374101 kfree_const(core->name);
41384102 kfree(core);
41394103 }
....@@ -4180,8 +4144,8 @@
41804144 struct clk_core *child;
41814145
41824146 for (i = 0; i < root->num_parents; i++)
4183
- if (root->parents[i] == target)
4184
- root->parents[i] = NULL;
4147
+ if (root->parents[i].core == target)
4148
+ root->parents[i].core = NULL;
41854149
41864150 hlist_for_each_entry(child, &root->children, child_node)
41874151 clk_core_evict_parent_cache_subtree(child, target);
....@@ -4208,6 +4172,7 @@
42084172 void clk_unregister(struct clk *clk)
42094173 {
42104174 unsigned long flags;
4175
+ const struct clk_ops *ops;
42114176
42124177 if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
42134178 return;
....@@ -4216,7 +4181,8 @@
42164181
42174182 clk_prepare_lock();
42184183
4219
- if (clk->core->ops == &clk_nodrv_ops) {
4184
+ ops = clk->core->ops;
4185
+ if (ops == &clk_nodrv_ops) {
42204186 pr_err("%s: unregistered clock: %s\n", __func__,
42214187 clk->core->name);
42224188 goto unlock;
....@@ -4228,6 +4194,9 @@
42284194 flags = clk_enable_lock();
42294195 clk->core->ops = &clk_nodrv_ops;
42304196 clk_enable_unlock(flags);
4197
+
4198
+ if (ops->terminate)
4199
+ ops->terminate(clk->core->hw);
42314200
42324201 if (!hlist_empty(&clk->core->children)) {
42334202 struct clk_core *child;
....@@ -4252,6 +4221,7 @@
42524221 __func__, clk->core->name);
42534222
42544223 kref_put(&clk->core->ref, __clk_release);
4224
+ free_clk(clk);
42554225 unlock:
42564226 clk_prepare_unlock();
42574227 }
....@@ -4267,174 +4237,14 @@
42674237 }
42684238 EXPORT_SYMBOL_GPL(clk_hw_unregister);
42694239
4270
-static void devm_clk_release(struct device *dev, void *res)
4240
+static void devm_clk_unregister_cb(struct device *dev, void *res)
42714241 {
42724242 clk_unregister(*(struct clk **)res);
42734243 }
42744244
4275
-static void devm_clk_hw_release(struct device *dev, void *res)
4245
+static void devm_clk_hw_unregister_cb(struct device *dev, void *res)
42764246 {
42774247 clk_hw_unregister(*(struct clk_hw **)res);
4278
-}
4279
-
4280
-#define MAX_LEN_OPP_HANDLE 50
4281
-#define LEN_OPP_HANDLE 16
4282
-
4283
-static int derive_device_list(struct device **device_list,
4284
- struct clk_core *core,
4285
- struct device_node *np,
4286
- char *clk_handle_name, int count)
4287
-{
4288
- int j;
4289
- struct platform_device *pdev;
4290
- struct device_node *dev_node;
4291
-
4292
- for (j = 0; j < count; j++) {
4293
- device_list[j] = NULL;
4294
- dev_node = of_parse_phandle(np, clk_handle_name, j);
4295
- if (!dev_node) {
4296
- pr_err("Unable to get device_node pointer for %s opp-handle (%s)\n",
4297
- core->name, clk_handle_name);
4298
- return -ENODEV;
4299
- }
4300
-
4301
- pdev = of_find_device_by_node(dev_node);
4302
- if (!pdev) {
4303
- pr_err("Unable to find platform_device node for %s opp-handle\n",
4304
- core->name);
4305
- return -ENODEV;
4306
- }
4307
- device_list[j] = &pdev->dev;
4308
- }
4309
- return 0;
4310
-}
4311
-
4312
-static int clk_get_voltage(struct clk_core *core, unsigned long rate, int n)
4313
-{
4314
- struct clk_vdd_class *vdd;
4315
- int level, corner;
4316
-
4317
- /* Use the first regulator in the vdd class for the OPP table. */
4318
- vdd = core->vdd_class;
4319
- if (vdd->num_regulators > 1) {
4320
- corner = vdd->vdd_uv[vdd->num_regulators * n];
4321
- } else {
4322
- level = clk_find_vdd_level(core, rate);
4323
- if (level < 0) {
4324
- pr_err("Could not find vdd level\n");
4325
- return -EINVAL;
4326
- }
4327
- corner = vdd->vdd_uv[level];
4328
- }
4329
-
4330
- if (!corner) {
4331
- pr_err("%s: Unable to find vdd level for rate %lu\n",
4332
- core->name, rate);
4333
- return -EINVAL;
4334
- }
4335
-
4336
- return corner;
4337
-}
4338
-
4339
-static int clk_add_and_print_opp(struct clk_hw *hw,
4340
- struct device **device_list, int count,
4341
- unsigned long rate, int uv, int n)
4342
-{
4343
- struct clk_core *core = hw->core;
4344
- int j, ret = 0;
4345
-
4346
- for (j = 0; j < count; j++) {
4347
- ret = dev_pm_opp_add(device_list[j], rate, uv);
4348
- if (ret) {
4349
- pr_err("%s: couldn't add OPP for %lu - err: %d\n",
4350
- core->name, rate, ret);
4351
- return ret;
4352
- }
4353
-
4354
- if (n == 0 || n == core->num_rate_max - 1 ||
4355
- rate == clk_hw_round_rate(hw, INT_MAX))
4356
- pr_info("%s: set OPP pair(%lu Hz: %u uV) on %s\n",
4357
- core->name, rate, uv,
4358
- dev_name(device_list[j]));
4359
- }
4360
- return ret;
4361
-}
4362
-
4363
-static void clk_populate_clock_opp_table(struct device_node *np,
4364
- struct clk_hw *hw)
4365
-{
4366
- struct device **device_list;
4367
- struct clk_core *core = hw->core;
4368
- char clk_handle_name[MAX_LEN_OPP_HANDLE];
4369
- int n, len, count, uv, ret;
4370
- unsigned long rate = 0, rrate = 0;
4371
-
4372
- if (!core || !core->num_rate_max)
4373
- return;
4374
-
4375
- if (strlen(core->name) + LEN_OPP_HANDLE < MAX_LEN_OPP_HANDLE) {
4376
- ret = snprintf(clk_handle_name, ARRAY_SIZE(clk_handle_name),
4377
- "qcom,%s-opp-handle", core->name);
4378
- if (ret < strlen(core->name) + LEN_OPP_HANDLE) {
4379
- pr_err("%s: Failed to hold clk_handle_name\n",
4380
- core->name);
4381
- return;
4382
- }
4383
- } else {
4384
- pr_err("clk name (%s) too large to fit in clk_handle_name\n",
4385
- core->name);
4386
- return;
4387
- }
4388
-
4389
- if (of_find_property(np, clk_handle_name, &len)) {
4390
- count = len/sizeof(u32);
4391
-
4392
- device_list = kmalloc_array(count, sizeof(struct device *),
4393
- GFP_KERNEL);
4394
- if (!device_list)
4395
- return;
4396
-
4397
- ret = derive_device_list(device_list, core, np,
4398
- clk_handle_name, count);
4399
- if (ret < 0) {
4400
- pr_err("Failed to fill device_list for %s\n",
4401
- clk_handle_name);
4402
- goto err_derive_device_list;
4403
- }
4404
- } else {
4405
- pr_debug("Unable to find %s\n", clk_handle_name);
4406
- return;
4407
- }
4408
-
4409
- for (n = 0; ; n++) {
4410
- rrate = clk_hw_round_rate(hw, rate + 1);
4411
- if (!rrate) {
4412
- pr_err("clk_round_rate failed for %s\n",
4413
- core->name);
4414
- goto err_derive_device_list;
4415
- }
4416
-
4417
- /*
4418
- * If clk_hw_round_rate gives the same value on consecutive
4419
- * iterations, exit the loop since we're at the maximum clock
4420
- * frequency.
4421
- */
4422
- if (rate == rrate)
4423
- break;
4424
- rate = rrate;
4425
-
4426
- uv = clk_get_voltage(core, rate, n);
4427
- if (uv < 0)
4428
- goto err_derive_device_list;
4429
-
4430
- ret = clk_add_and_print_opp(hw, device_list, count,
4431
- rate, uv, n);
4432
- if (ret)
4433
- goto err_derive_device_list;
4434
- }
4435
-
4436
-err_derive_device_list:
4437
- kfree(device_list);
44384248 }
44394249
44404250 /**
....@@ -4442,16 +4252,17 @@
44424252 * @dev: device that is registering this clock
44434253 * @hw: link to hardware-specific clock data
44444254 *
4445
- * Managed clk_register(). Clocks returned from this function are
4446
- * automatically clk_unregister()ed on driver detach. See clk_register() for
4447
- * more information.
4255
+ * Managed clk_register(). This function is *deprecated*, use devm_clk_hw_register() instead.
4256
+ *
4257
+ * Clocks returned from this function are automatically clk_unregister()ed on
4258
+ * driver detach. See clk_register() for more information.
44484259 */
44494260 struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw)
44504261 {
44514262 struct clk *clk;
44524263 struct clk **clkp;
44534264
4454
- clkp = devres_alloc(devm_clk_release, sizeof(*clkp), GFP_KERNEL);
4265
+ clkp = devres_alloc(devm_clk_unregister_cb, sizeof(*clkp), GFP_KERNEL);
44554266 if (!clkp)
44564267 return ERR_PTR(-ENOMEM);
44574268
....@@ -4463,7 +4274,6 @@
44634274 devres_free(clkp);
44644275 }
44654276
4466
- clk_populate_clock_opp_table(dev->of_node, hw);
44674277 return clk;
44684278 }
44694279 EXPORT_SYMBOL_GPL(devm_clk_register);
....@@ -4482,7 +4292,7 @@
44824292 struct clk_hw **hwp;
44834293 int ret;
44844294
4485
- hwp = devres_alloc(devm_clk_hw_release, sizeof(*hwp), GFP_KERNEL);
4295
+ hwp = devres_alloc(devm_clk_hw_unregister_cb, sizeof(*hwp), GFP_KERNEL);
44864296 if (!hwp)
44874297 return -ENOMEM;
44884298
....@@ -4494,7 +4304,6 @@
44944304 devres_free(hwp);
44954305 }
44964306
4497
- clk_populate_clock_opp_table(dev->of_node, hw);
44984307 return ret;
44994308 }
45004309 EXPORT_SYMBOL_GPL(devm_clk_hw_register);
....@@ -4518,6 +4327,7 @@
45184327
45194328 /**
45204329 * devm_clk_unregister - resource managed clk_unregister()
4330
+ * @dev: device that is unregistering the clock data
45214331 * @clk: clock to unregister
45224332 *
45234333 * Deallocate a clock allocated with devm_clk_register(). Normally
....@@ -4526,7 +4336,7 @@
45264336 */
45274337 void devm_clk_unregister(struct device *dev, struct clk *clk)
45284338 {
4529
- WARN_ON(devres_release(dev, devm_clk_release, devm_clk_match, clk));
4339
+ WARN_ON(devres_release(dev, devm_clk_unregister_cb, devm_clk_match, clk));
45304340 }
45314341 EXPORT_SYMBOL_GPL(devm_clk_unregister);
45324342
....@@ -4541,28 +4351,58 @@
45414351 */
45424352 void devm_clk_hw_unregister(struct device *dev, struct clk_hw *hw)
45434353 {
4544
- WARN_ON(devres_release(dev, devm_clk_hw_release, devm_clk_hw_match,
4354
+ WARN_ON(devres_release(dev, devm_clk_hw_unregister_cb, devm_clk_hw_match,
45454355 hw));
45464356 }
45474357 EXPORT_SYMBOL_GPL(devm_clk_hw_unregister);
45484358
4359
+static void devm_clk_release(struct device *dev, void *res)
4360
+{
4361
+ clk_put(*(struct clk **)res);
4362
+}
4363
+
4364
+/**
4365
+ * devm_clk_hw_get_clk - resource managed clk_hw_get_clk()
4366
+ * @dev: device that is registering this clock
4367
+ * @hw: clk_hw associated with the clk being consumed
4368
+ * @con_id: connection ID string on device
4369
+ *
4370
+ * Managed clk_hw_get_clk(). Clocks got with this function are
4371
+ * automatically clk_put() on driver detach. See clk_put()
4372
+ * for more information.
4373
+ */
4374
+struct clk *devm_clk_hw_get_clk(struct device *dev, struct clk_hw *hw,
4375
+ const char *con_id)
4376
+{
4377
+ struct clk *clk;
4378
+ struct clk **clkp;
4379
+
4380
+ /* This should not happen because it would mean we have drivers
4381
+ * passing around clk_hw pointers instead of having the caller use
4382
+ * proper clk_get() style APIs
4383
+ */
4384
+ WARN_ON_ONCE(dev != hw->core->dev);
4385
+
4386
+ clkp = devres_alloc(devm_clk_release, sizeof(*clkp), GFP_KERNEL);
4387
+ if (!clkp)
4388
+ return ERR_PTR(-ENOMEM);
4389
+
4390
+ clk = clk_hw_get_clk(hw, con_id);
4391
+ if (!IS_ERR(clk)) {
4392
+ *clkp = clk;
4393
+ devres_add(dev, clkp);
4394
+ } else {
4395
+ devres_free(clkp);
4396
+ }
4397
+
4398
+ return clk;
4399
+}
4400
+EXPORT_SYMBOL_GPL(devm_clk_hw_get_clk);
4401
+
45494402 /*
45504403 * clkdev helpers
45514404 */
4552
-int __clk_get(struct clk *clk)
4553
-{
4554
- struct clk_core *core = !clk ? NULL : clk->core;
45554405
4556
- if (core) {
4557
- if (!try_module_get(core->owner))
4558
- return 0;
4559
-
4560
- kref_get(&core->ref);
4561
- }
4562
- return 1;
4563
-}
4564
-
4565
-/* keep in sync with __clk_free_clk */
45664406 void __clk_put(struct clk *clk)
45674407 {
45684408 struct module *owner;
....@@ -4596,8 +4436,7 @@
45964436
45974437 module_put(owner);
45984438
4599
- kfree_const(clk->con_id);
4600
- kfree(clk);
4439
+ free_clk(clk);
46014440 }
46024441
46034442 /*** clk rate change notifiers ***/
....@@ -4702,13 +4541,58 @@
47024541 }
47034542 EXPORT_SYMBOL_GPL(clk_notifier_unregister);
47044543
4544
+struct clk_notifier_devres {
4545
+ struct clk *clk;
4546
+ struct notifier_block *nb;
4547
+};
4548
+
4549
+static void devm_clk_notifier_release(struct device *dev, void *res)
4550
+{
4551
+ struct clk_notifier_devres *devres = res;
4552
+
4553
+ clk_notifier_unregister(devres->clk, devres->nb);
4554
+}
4555
+
4556
+int devm_clk_notifier_register(struct device *dev, struct clk *clk,
4557
+ struct notifier_block *nb)
4558
+{
4559
+ struct clk_notifier_devres *devres;
4560
+ int ret;
4561
+
4562
+ devres = devres_alloc(devm_clk_notifier_release,
4563
+ sizeof(*devres), GFP_KERNEL);
4564
+
4565
+ if (!devres)
4566
+ return -ENOMEM;
4567
+
4568
+ ret = clk_notifier_register(clk, nb);
4569
+ if (!ret) {
4570
+ devres->clk = clk;
4571
+ devres->nb = nb;
4572
+ } else {
4573
+ devres_free(devres);
4574
+ }
4575
+
4576
+ return ret;
4577
+}
4578
+EXPORT_SYMBOL_GPL(devm_clk_notifier_register);
4579
+
47054580 #ifdef CONFIG_OF
4581
+static void clk_core_reparent_orphans(void)
4582
+{
4583
+ clk_prepare_lock();
4584
+ clk_core_reparent_orphans_nolock();
4585
+ clk_prepare_unlock();
4586
+}
4587
+
47064588 /**
47074589 * struct of_clk_provider - Clock provider registration structure
47084590 * @link: Entry in global list of clock providers
47094591 * @node: Pointer to device tree node of clock provider
47104592 * @get: Get clock callback. Returns NULL or a struct clk for the
47114593 * given clock specifier
4594
+ * @get_hw: Get clk_hw callback. Returns NULL, ERR_PTR or a
4595
+ * struct clk_hw for the given clock specifier
47124596 * @data: context pointer to be passed into @get callback
47134597 */
47144598 struct of_clk_provider {
....@@ -4720,8 +4604,9 @@
47204604 void *data;
47214605 };
47224606
4607
+extern struct of_device_id __clk_of_table;
47234608 static const struct of_device_id __clk_of_table_sentinel
4724
- __used __section(__clk_of_table_end);
4609
+ __used __section("__clk_of_table_end");
47254610
47264611 static LIST_HEAD(of_clk_providers);
47274612 static DEFINE_MUTEX(of_clk_mutex);
....@@ -4773,6 +4658,8 @@
47734658 * @np: Device node pointer associated with clock provider
47744659 * @clk_src_get: callback for decoding clock
47754660 * @data: context pointer for @clk_src_get callback.
4661
+ *
4662
+ * This function is *deprecated*. Use of_clk_add_hw_provider() instead.
47764663 */
47774664 int of_clk_add_provider(struct device_node *np,
47784665 struct clk *(*clk_src_get)(struct of_phandle_args *clkspec,
....@@ -4781,6 +4668,9 @@
47814668 {
47824669 struct of_clk_provider *cp;
47834670 int ret;
4671
+
4672
+ if (!np)
4673
+ return 0;
47844674
47854675 cp = kzalloc(sizeof(*cp), GFP_KERNEL);
47864676 if (!cp)
....@@ -4795,9 +4685,13 @@
47954685 mutex_unlock(&of_clk_mutex);
47964686 pr_debug("Added clock from %pOF\n", np);
47974687
4688
+ clk_core_reparent_orphans();
4689
+
47984690 ret = of_clk_set_defaults(np, true);
47994691 if (ret < 0)
48004692 of_clk_del_provider(np);
4693
+
4694
+ fwnode_dev_initialized(&np->fwnode, true);
48014695
48024696 return ret;
48034697 }
....@@ -4817,6 +4711,9 @@
48174711 struct of_clk_provider *cp;
48184712 int ret;
48194713
4714
+ if (!np)
4715
+ return 0;
4716
+
48204717 cp = kzalloc(sizeof(*cp), GFP_KERNEL);
48214718 if (!cp)
48224719 return -ENOMEM;
....@@ -4829,6 +4726,8 @@
48294726 list_add(&cp->link, &of_clk_providers);
48304727 mutex_unlock(&of_clk_mutex);
48314728 pr_debug("Added clk_hw provider from %pOF\n", np);
4729
+
4730
+ clk_core_reparent_orphans();
48324731
48334732 ret = of_clk_set_defaults(np, true);
48344733 if (ret < 0)
....@@ -4843,6 +4742,39 @@
48434742 of_clk_del_provider(*(struct device_node **)res);
48444743 }
48454744
4745
+/*
4746
+ * We allow a child device to use its parent device as the clock provider node
4747
+ * for cases like MFD sub-devices where the child device driver wants to use
4748
+ * devm_*() APIs but not list the device in DT as a sub-node.
4749
+ */
4750
+static struct device_node *get_clk_provider_node(struct device *dev)
4751
+{
4752
+ struct device_node *np, *parent_np;
4753
+
4754
+ np = dev->of_node;
4755
+ parent_np = dev->parent ? dev->parent->of_node : NULL;
4756
+
4757
+ if (!of_find_property(np, "#clock-cells", NULL))
4758
+ if (of_find_property(parent_np, "#clock-cells", NULL))
4759
+ np = parent_np;
4760
+
4761
+ return np;
4762
+}
4763
+
4764
+/**
4765
+ * devm_of_clk_add_hw_provider() - Managed clk provider node registration
4766
+ * @dev: Device acting as the clock provider (used for DT node and lifetime)
4767
+ * @get: callback for decoding clk_hw
4768
+ * @data: context pointer for @get callback
4769
+ *
4770
+ * Registers clock provider for given device's node. If the device has no DT
4771
+ * node or if the device node lacks of clock provider information (#clock-cells)
4772
+ * then the parent device's node is scanned for this information. If parent node
4773
+ * has the #clock-cells then it is used in registration. Provider is
4774
+ * automatically released at device exit.
4775
+ *
4776
+ * Return: 0 on success or an errno on failure.
4777
+ */
48464778 int devm_of_clk_add_hw_provider(struct device *dev,
48474779 struct clk_hw *(*get)(struct of_phandle_args *clkspec,
48484780 void *data),
....@@ -4856,7 +4788,7 @@
48564788 if (!ptr)
48574789 return -ENOMEM;
48584790
4859
- np = dev->of_node;
4791
+ np = get_clk_provider_node(dev);
48604792 ret = of_clk_add_hw_provider(np, get, data);
48614793 if (!ret) {
48624794 *ptr = np;
....@@ -4877,10 +4809,14 @@
48774809 {
48784810 struct of_clk_provider *cp;
48794811
4812
+ if (!np)
4813
+ return;
4814
+
48804815 mutex_lock(&of_clk_mutex);
48814816 list_for_each_entry(cp, &of_clk_providers, link) {
48824817 if (cp->node == np) {
48834818 list_del(&cp->link);
4819
+ fwnode_dev_initialized(&np->fwnode, false);
48844820 of_node_put(cp->node);
48854821 kfree(cp);
48864822 break;
....@@ -4900,16 +4836,95 @@
49004836 return *np == data;
49014837 }
49024838
4839
+/**
4840
+ * devm_of_clk_del_provider() - Remove clock provider registered using devm
4841
+ * @dev: Device to whose lifetime the clock provider was bound
4842
+ */
49034843 void devm_of_clk_del_provider(struct device *dev)
49044844 {
49054845 int ret;
4846
+ struct device_node *np = get_clk_provider_node(dev);
49064847
49074848 ret = devres_release(dev, devm_of_clk_release_provider,
4908
- devm_clk_provider_match, dev->of_node);
4849
+ devm_clk_provider_match, np);
49094850
49104851 WARN_ON(ret);
49114852 }
49124853 EXPORT_SYMBOL(devm_of_clk_del_provider);
4854
+
4855
+/**
4856
+ * of_parse_clkspec() - Parse a DT clock specifier for a given device node
4857
+ * @np: device node to parse clock specifier from
4858
+ * @index: index of phandle to parse clock out of. If index < 0, @name is used
4859
+ * @name: clock name to find and parse. If name is NULL, the index is used
4860
+ * @out_args: Result of parsing the clock specifier
4861
+ *
4862
+ * Parses a device node's "clocks" and "clock-names" properties to find the
4863
+ * phandle and cells for the index or name that is desired. The resulting clock
4864
+ * specifier is placed into @out_args, or an errno is returned when there's a
4865
+ * parsing error. The @index argument is ignored if @name is non-NULL.
4866
+ *
4867
+ * Example:
4868
+ *
4869
+ * phandle1: clock-controller@1 {
4870
+ * #clock-cells = <2>;
4871
+ * }
4872
+ *
4873
+ * phandle2: clock-controller@2 {
4874
+ * #clock-cells = <1>;
4875
+ * }
4876
+ *
4877
+ * clock-consumer@3 {
4878
+ * clocks = <&phandle1 1 2 &phandle2 3>;
4879
+ * clock-names = "name1", "name2";
4880
+ * }
4881
+ *
4882
+ * To get a device_node for `clock-controller@2' node you may call this
4883
+ * function a few different ways:
4884
+ *
4885
+ * of_parse_clkspec(clock-consumer@3, -1, "name2", &args);
4886
+ * of_parse_clkspec(clock-consumer@3, 1, NULL, &args);
4887
+ * of_parse_clkspec(clock-consumer@3, 1, "name2", &args);
4888
+ *
4889
+ * Return: 0 upon successfully parsing the clock specifier. Otherwise, -ENOENT
4890
+ * if @name is NULL or -EINVAL if @name is non-NULL and it can't be found in
4891
+ * the "clock-names" property of @np.
4892
+ */
4893
+static int of_parse_clkspec(const struct device_node *np, int index,
4894
+ const char *name, struct of_phandle_args *out_args)
4895
+{
4896
+ int ret = -ENOENT;
4897
+
4898
+ /* Walk up the tree of devices looking for a clock property that matches */
4899
+ while (np) {
4900
+ /*
4901
+ * For named clocks, first look up the name in the
4902
+ * "clock-names" property. If it cannot be found, then index
4903
+ * will be an error code and of_parse_phandle_with_args() will
4904
+ * return -EINVAL.
4905
+ */
4906
+ if (name)
4907
+ index = of_property_match_string(np, "clock-names", name);
4908
+ ret = of_parse_phandle_with_args(np, "clocks", "#clock-cells",
4909
+ index, out_args);
4910
+ if (!ret)
4911
+ break;
4912
+ if (name && index >= 0)
4913
+ break;
4914
+
4915
+ /*
4916
+ * No matching clock found on this node. If the parent node
4917
+ * has a "clock-ranges" property, then we can try one of its
4918
+ * clocks.
4919
+ */
4920
+ np = np->parent;
4921
+ if (np && !of_get_property(np, "clock-ranges", NULL))
4922
+ break;
4923
+ index = 0;
4924
+ }
4925
+
4926
+ return ret;
4927
+}
49134928
49144929 static struct clk_hw *
49154930 __of_clk_get_hw_from_provider(struct of_clk_provider *provider,
....@@ -4926,38 +4941,26 @@
49264941 return __clk_get_hw(clk);
49274942 }
49284943
4929
-struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec,
4930
- const char *dev_id, const char *con_id,
4931
- bool with_orphans)
4944
+static struct clk_hw *
4945
+of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec)
49324946 {
49334947 struct of_clk_provider *provider;
4934
- struct clk *clk = ERR_PTR(-EPROBE_DEFER);
4935
- struct clk_hw *hw;
4948
+ struct clk_hw *hw = ERR_PTR(-EPROBE_DEFER);
49364949
49374950 if (!clkspec)
49384951 return ERR_PTR(-EINVAL);
49394952
4940
- /* Check if we have such a provider in our array */
49414953 mutex_lock(&of_clk_mutex);
49424954 list_for_each_entry(provider, &of_clk_providers, link) {
49434955 if (provider->node == clkspec->np) {
49444956 hw = __of_clk_get_hw_from_provider(provider, clkspec);
4945
- clk = __clk_create_clk(hw, dev_id, con_id,
4946
- with_orphans);
4947
- }
4948
-
4949
- if (!IS_ERR(clk)) {
4950
- if (!__clk_get(clk)) {
4951
- __clk_free_clk(clk);
4952
- clk = ERR_PTR(-ENOENT);
4953
- }
4954
-
4955
- break;
4957
+ if (!IS_ERR(hw))
4958
+ break;
49564959 }
49574960 }
49584961 mutex_unlock(&of_clk_mutex);
49594962
4960
- return clk;
4963
+ return hw;
49614964 }
49624965
49634966 /**
....@@ -4970,27 +4973,61 @@
49704973 */
49714974 struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec)
49724975 {
4973
- return __of_clk_get_from_provider(clkspec, NULL, __func__, false);
4974
-}
4976
+ struct clk_hw *hw = of_clk_get_hw_from_clkspec(clkspec);
49754977
4976
-/**
4977
- * of_clk_get_from_provider_with_orphans() - Lookup clock from a clock provider
4978
- * @clkspec: pointer to a clock specifier data structure
4979
- *
4980
- * This function looks up a struct clk from the registered list of clock
4981
- * providers, an input is a clock specifier data structure as returned
4982
- * from the of_parse_phandle_with_args() function call.
4983
- *
4984
- * The difference to of_clk_get_from_provider() is that this function will
4985
- * also successfully lookup orphan-clocks, as it in some cases may be
4986
- * necessary to access such orphan-clocks as well.
4987
- */
4988
-struct clk *
4989
-of_clk_get_from_provider_with_orphans(struct of_phandle_args *clkspec)
4990
-{
4991
- return __of_clk_get_from_provider(clkspec, NULL, __func__, true);
4978
+ return clk_hw_create_clk(NULL, hw, NULL, __func__);
49924979 }
49934980 EXPORT_SYMBOL_GPL(of_clk_get_from_provider);
4981
+
4982
+struct clk_hw *of_clk_get_hw(struct device_node *np, int index,
4983
+ const char *con_id)
4984
+{
4985
+ int ret;
4986
+ struct clk_hw *hw;
4987
+ struct of_phandle_args clkspec;
4988
+
4989
+ ret = of_parse_clkspec(np, index, con_id, &clkspec);
4990
+ if (ret)
4991
+ return ERR_PTR(ret);
4992
+
4993
+ hw = of_clk_get_hw_from_clkspec(&clkspec);
4994
+ of_node_put(clkspec.np);
4995
+
4996
+ return hw;
4997
+}
4998
+
4999
+static struct clk *__of_clk_get(struct device_node *np,
5000
+ int index, const char *dev_id,
5001
+ const char *con_id)
5002
+{
5003
+ struct clk_hw *hw = of_clk_get_hw(np, index, con_id);
5004
+
5005
+ return clk_hw_create_clk(NULL, hw, dev_id, con_id);
5006
+}
5007
+
5008
+struct clk *of_clk_get(struct device_node *np, int index)
5009
+{
5010
+ return __of_clk_get(np, index, np->full_name, NULL);
5011
+}
5012
+EXPORT_SYMBOL(of_clk_get);
5013
+
5014
+/**
5015
+ * of_clk_get_by_name() - Parse and lookup a clock referenced by a device node
5016
+ * @np: pointer to clock consumer node
5017
+ * @name: name of consumer's clock input, or NULL for the first clock reference
5018
+ *
5019
+ * This function parses the clocks and clock-names properties,
5020
+ * and uses them to look up the struct clk from the registered list of clock
5021
+ * providers.
5022
+ */
5023
+struct clk *of_clk_get_by_name(struct device_node *np, const char *name)
5024
+{
5025
+ if (!np)
5026
+ return ERR_PTR(-ENOENT);
5027
+
5028
+ return __of_clk_get(np, 0, np->full_name, name);
5029
+}
5030
+EXPORT_SYMBOL(of_clk_get_by_name);
49945031
49955032 /**
49965033 * of_clk_get_parent_count() - Count the number of clocks a device node has
....@@ -4998,7 +5035,7 @@
49985035 *
49995036 * Returns: The number of clocks that are possible parents of this node
50005037 */
5001
-unsigned int of_clk_get_parent_count(struct device_node *np)
5038
+unsigned int of_clk_get_parent_count(const struct device_node *np)
50025039 {
50035040 int count;
50045041
....@@ -5010,7 +5047,7 @@
50105047 }
50115048 EXPORT_SYMBOL_GPL(of_clk_get_parent_count);
50125049
5013
-const char *of_clk_get_parent_name(struct device_node *np, int index)
5050
+const char *of_clk_get_parent_name(const struct device_node *np, int index)
50145051 {
50155052 struct of_phandle_args clkspec;
50165053 struct property *prop;
....@@ -5150,8 +5187,8 @@
51505187 *
51515188 * Return: error code or zero on success
51525189 */
5153
-int of_clk_detect_critical(struct device_node *np,
5154
- int index, unsigned long *flags)
5190
+int of_clk_detect_critical(struct device_node *np, int index,
5191
+ unsigned long *flags)
51555192 {
51565193 struct property *prop;
51575194 const __be32 *cur;
....@@ -5296,12 +5333,12 @@
52965333 return cnt;
52975334 }
52985335
5299
-static const struct file_operations clk_rate_fops = {
5300
- .open = clk_rate_open,
5301
- .read = seq_read,
5302
- .llseek = seq_lseek,
5303
- .release = single_release,
5304
- .write = clk_rate_write,
5336
+static const struct proc_ops clk_rate_proc_ops = {
5337
+ .proc_open = clk_rate_open,
5338
+ .proc_read = seq_read,
5339
+ .proc_write = clk_rate_write,
5340
+ .proc_lseek = seq_lseek,
5341
+ .proc_release = single_release,
53055342 };
53065343
53075344 static int clk_enable_show(struct seq_file *s, void *v)
....@@ -5357,12 +5394,12 @@
53575394 return cnt;
53585395 }
53595396
5360
-static const struct file_operations clk_enable_fops = {
5361
- .open = clk_enable_open,
5362
- .read = seq_read,
5363
- .llseek = seq_lseek,
5364
- .release = single_release,
5365
- .write = clk_enable_write,
5397
+static const struct proc_ops clk_enable_proc_ops = {
5398
+ .proc_open = clk_enable_open,
5399
+ .proc_read = seq_read,
5400
+ .proc_write = clk_enable_write,
5401
+ .proc_lseek = seq_lseek,
5402
+ .proc_release = single_release,
53665403 };
53675404
53685405 static int clk_parent_show(struct seq_file *s, void *v)
....@@ -5416,12 +5453,12 @@
54165453 return cnt;
54175454 }
54185455
5419
-static const struct file_operations clk_parent_fops = {
5420
- .open = clk_parent_open,
5421
- .read = seq_read,
5422
- .llseek = seq_lseek,
5423
- .release = single_release,
5424
- .write = clk_parent_write,
5456
+static const struct proc_ops clk_parent_proc_ops = {
5457
+ .proc_open = clk_parent_open,
5458
+ .proc_read = seq_read,
5459
+ .proc_write = clk_parent_write,
5460
+ .proc_lseek = seq_lseek,
5461
+ .proc_release = single_release,
54255462 };
54265463
54275464 static void clk_proc_summary_show_one(struct seq_file *s, struct clk_core *c,
....@@ -5434,7 +5471,8 @@
54345471 level * 3 + 1, "",
54355472 30 - level * 3, c->name,
54365473 c->enable_count, c->prepare_count, c->protect_count,
5437
- clk_core_get_rate(c), clk_core_get_accuracy(c),
5474
+ clk_core_get_rate_recalc(c),
5475
+ clk_core_get_accuracy_recalc(c),
54385476 clk_core_get_phase(c),
54395477 clk_core_get_scaled_duty_cycle(c, 100000));
54405478 }
....@@ -5487,15 +5525,15 @@
54875525 if (!proc_clk_root)
54885526 return -EINVAL;
54895527
5490
- ent = proc_create("rate", 0644, proc_clk_root, &clk_rate_fops);
5528
+ ent = proc_create("rate", 0644, proc_clk_root, &clk_rate_proc_ops);
54915529 if (!ent)
54925530 goto fail;
54935531
5494
- ent = proc_create("enable", 0644, proc_clk_root, &clk_enable_fops);
5532
+ ent = proc_create("enable", 0644, proc_clk_root, &clk_enable_proc_ops);
54955533 if (!ent)
54965534 goto fail;
54975535
5498
- ent = proc_create("parent", 0644, proc_clk_root, &clk_parent_fops);
5536
+ ent = proc_create("parent", 0644, proc_clk_root, &clk_parent_proc_ops);
54995537 if (!ent)
55005538 goto fail;
55015539