.. | .. |
---|
35 | 35 | #define AUTOIDLE_LOW 0x1 |
---|
36 | 36 | |
---|
37 | 37 | static LIST_HEAD(autoidle_clks); |
---|
38 | | -static LIST_HEAD(clk_hw_omap_clocks); |
---|
| 38 | + |
---|
| 39 | +/* |
---|
| 40 | + * we have some non-atomic read/write |
---|
| 41 | + * operations behind it, so lets |
---|
| 42 | + * take one lock for handling autoidle |
---|
| 43 | + * of all clocks |
---|
| 44 | + */ |
---|
| 45 | +static DEFINE_SPINLOCK(autoidle_spinlock); |
---|
| 46 | + |
---|
| 47 | +static int _omap2_clk_deny_idle(struct clk_hw_omap *clk) |
---|
| 48 | +{ |
---|
| 49 | + if (clk->ops && clk->ops->deny_idle) { |
---|
| 50 | + unsigned long irqflags; |
---|
| 51 | + |
---|
| 52 | + spin_lock_irqsave(&autoidle_spinlock, irqflags); |
---|
| 53 | + clk->autoidle_count++; |
---|
| 54 | + if (clk->autoidle_count == 1) |
---|
| 55 | + clk->ops->deny_idle(clk); |
---|
| 56 | + |
---|
| 57 | + spin_unlock_irqrestore(&autoidle_spinlock, irqflags); |
---|
| 58 | + } |
---|
| 59 | + return 0; |
---|
| 60 | +} |
---|
| 61 | + |
---|
| 62 | +static int _omap2_clk_allow_idle(struct clk_hw_omap *clk) |
---|
| 63 | +{ |
---|
| 64 | + if (clk->ops && clk->ops->allow_idle) { |
---|
| 65 | + unsigned long irqflags; |
---|
| 66 | + |
---|
| 67 | + spin_lock_irqsave(&autoidle_spinlock, irqflags); |
---|
| 68 | + clk->autoidle_count--; |
---|
| 69 | + if (clk->autoidle_count == 0) |
---|
| 70 | + clk->ops->allow_idle(clk); |
---|
| 71 | + |
---|
| 72 | + spin_unlock_irqrestore(&autoidle_spinlock, irqflags); |
---|
| 73 | + } |
---|
| 74 | + return 0; |
---|
| 75 | +} |
---|
39 | 76 | |
---|
40 | 77 | /** |
---|
41 | 78 | * omap2_clk_deny_idle - disable autoidle on an OMAP clock |
---|
.. | .. |
---|
45 | 82 | */ |
---|
46 | 83 | int omap2_clk_deny_idle(struct clk *clk) |
---|
47 | 84 | { |
---|
48 | | - struct clk_hw_omap *c; |
---|
| 85 | + struct clk_hw *hw; |
---|
49 | 86 | |
---|
50 | | - c = to_clk_hw_omap(__clk_get_hw(clk)); |
---|
51 | | - if (c->ops && c->ops->deny_idle) |
---|
52 | | - c->ops->deny_idle(c); |
---|
53 | | - return 0; |
---|
| 87 | + if (!clk) |
---|
| 88 | + return -EINVAL; |
---|
| 89 | + |
---|
| 90 | + hw = __clk_get_hw(clk); |
---|
| 91 | + |
---|
| 92 | + if (omap2_clk_is_hw_omap(hw)) { |
---|
| 93 | + struct clk_hw_omap *c = to_clk_hw_omap(hw); |
---|
| 94 | + |
---|
| 95 | + return _omap2_clk_deny_idle(c); |
---|
| 96 | + } |
---|
| 97 | + |
---|
| 98 | + return -EINVAL; |
---|
54 | 99 | } |
---|
55 | 100 | |
---|
56 | 101 | /** |
---|
.. | .. |
---|
61 | 106 | */ |
---|
62 | 107 | int omap2_clk_allow_idle(struct clk *clk) |
---|
63 | 108 | { |
---|
64 | | - struct clk_hw_omap *c; |
---|
| 109 | + struct clk_hw *hw; |
---|
65 | 110 | |
---|
66 | | - c = to_clk_hw_omap(__clk_get_hw(clk)); |
---|
67 | | - if (c->ops && c->ops->allow_idle) |
---|
68 | | - c->ops->allow_idle(c); |
---|
69 | | - return 0; |
---|
| 111 | + if (!clk) |
---|
| 112 | + return -EINVAL; |
---|
| 113 | + |
---|
| 114 | + hw = __clk_get_hw(clk); |
---|
| 115 | + |
---|
| 116 | + if (omap2_clk_is_hw_omap(hw)) { |
---|
| 117 | + struct clk_hw_omap *c = to_clk_hw_omap(hw); |
---|
| 118 | + |
---|
| 119 | + return _omap2_clk_allow_idle(c); |
---|
| 120 | + } |
---|
| 121 | + |
---|
| 122 | + return -EINVAL; |
---|
70 | 123 | } |
---|
71 | 124 | |
---|
72 | 125 | static void _allow_autoidle(struct clk_ti_autoidle *clk) |
---|
.. | .. |
---|
168 | 221 | } |
---|
169 | 222 | |
---|
170 | 223 | /** |
---|
171 | | - * omap2_init_clk_hw_omap_clocks - initialize an OMAP clock |
---|
172 | | - * @hw: struct clk_hw * to initialize |
---|
173 | | - * |
---|
174 | | - * Add an OMAP clock @clk to the internal list of OMAP clocks. Used |
---|
175 | | - * temporarily for autoidle handling, until this support can be |
---|
176 | | - * integrated into the common clock framework code in some way. No |
---|
177 | | - * return value. |
---|
178 | | - */ |
---|
179 | | -void omap2_init_clk_hw_omap_clocks(struct clk_hw *hw) |
---|
180 | | -{ |
---|
181 | | - struct clk_hw_omap *c; |
---|
182 | | - |
---|
183 | | - if (clk_hw_get_flags(hw) & CLK_IS_BASIC) |
---|
184 | | - return; |
---|
185 | | - |
---|
186 | | - c = to_clk_hw_omap(hw); |
---|
187 | | - list_add(&c->node, &clk_hw_omap_clocks); |
---|
188 | | -} |
---|
189 | | - |
---|
190 | | -/** |
---|
191 | 224 | * omap2_clk_enable_autoidle_all - enable autoidle on all OMAP clocks that |
---|
192 | 225 | * support it |
---|
193 | 226 | * |
---|
.. | .. |
---|
198 | 231 | */ |
---|
199 | 232 | int omap2_clk_enable_autoidle_all(void) |
---|
200 | 233 | { |
---|
201 | | - struct clk_hw_omap *c; |
---|
| 234 | + int ret; |
---|
202 | 235 | |
---|
203 | | - list_for_each_entry(c, &clk_hw_omap_clocks, node) |
---|
204 | | - if (c->ops && c->ops->allow_idle) |
---|
205 | | - c->ops->allow_idle(c); |
---|
| 236 | + ret = omap2_clk_for_each(_omap2_clk_allow_idle); |
---|
| 237 | + if (ret) |
---|
| 238 | + return ret; |
---|
206 | 239 | |
---|
207 | 240 | _clk_generic_allow_autoidle_all(); |
---|
208 | 241 | |
---|
.. | .. |
---|
220 | 253 | */ |
---|
221 | 254 | int omap2_clk_disable_autoidle_all(void) |
---|
222 | 255 | { |
---|
223 | | - struct clk_hw_omap *c; |
---|
| 256 | + int ret; |
---|
224 | 257 | |
---|
225 | | - list_for_each_entry(c, &clk_hw_omap_clocks, node) |
---|
226 | | - if (c->ops && c->ops->deny_idle) |
---|
227 | | - c->ops->deny_idle(c); |
---|
| 258 | + ret = omap2_clk_for_each(_omap2_clk_deny_idle); |
---|
| 259 | + if (ret) |
---|
| 260 | + return ret; |
---|
228 | 261 | |
---|
229 | 262 | _clk_generic_deny_autoidle_all(); |
---|
230 | 263 | |
---|