.. | .. |
---|
130 | 130 | /* |
---|
131 | 131 | * Caller must hold devtree_lock. |
---|
132 | 132 | */ |
---|
133 | | -static void __of_free_phandle_cache(void) |
---|
| 133 | +static struct device_node** __of_free_phandle_cache(void) |
---|
134 | 134 | { |
---|
135 | 135 | u32 cache_entries = phandle_cache_mask + 1; |
---|
136 | 136 | u32 k; |
---|
| 137 | + struct device_node **shadow; |
---|
137 | 138 | |
---|
138 | 139 | if (!phandle_cache) |
---|
139 | | - return; |
---|
| 140 | + return NULL; |
---|
140 | 141 | |
---|
141 | 142 | for (k = 0; k < cache_entries; k++) |
---|
142 | 143 | of_node_put(phandle_cache[k]); |
---|
143 | 144 | |
---|
144 | | - kfree(phandle_cache); |
---|
| 145 | + shadow = phandle_cache; |
---|
145 | 146 | phandle_cache = NULL; |
---|
| 147 | + return shadow; |
---|
146 | 148 | } |
---|
147 | 149 | |
---|
148 | 150 | int of_free_phandle_cache(void) |
---|
149 | 151 | { |
---|
150 | 152 | unsigned long flags; |
---|
| 153 | + struct device_node **shadow; |
---|
151 | 154 | |
---|
152 | 155 | raw_spin_lock_irqsave(&devtree_lock, flags); |
---|
153 | 156 | |
---|
154 | | - __of_free_phandle_cache(); |
---|
| 157 | + shadow = __of_free_phandle_cache(); |
---|
155 | 158 | |
---|
156 | 159 | raw_spin_unlock_irqrestore(&devtree_lock, flags); |
---|
157 | | - |
---|
| 160 | + kfree(shadow); |
---|
158 | 161 | return 0; |
---|
159 | 162 | } |
---|
160 | 163 | #if !defined(CONFIG_MODULES) |
---|
.. | .. |
---|
189 | 192 | u32 cache_entries; |
---|
190 | 193 | struct device_node *np; |
---|
191 | 194 | u32 phandles = 0; |
---|
| 195 | + struct device_node **shadow; |
---|
192 | 196 | |
---|
193 | 197 | raw_spin_lock_irqsave(&devtree_lock, flags); |
---|
194 | 198 | |
---|
195 | | - __of_free_phandle_cache(); |
---|
| 199 | + shadow = __of_free_phandle_cache(); |
---|
196 | 200 | |
---|
197 | 201 | for_each_of_allnodes(np) |
---|
198 | 202 | if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL) |
---|
.. | .. |
---|
200 | 204 | |
---|
201 | 205 | if (!phandles) |
---|
202 | 206 | goto out; |
---|
| 207 | + raw_spin_unlock_irqrestore(&devtree_lock, flags); |
---|
203 | 208 | |
---|
204 | 209 | cache_entries = roundup_pow_of_two(phandles); |
---|
205 | 210 | phandle_cache_mask = cache_entries - 1; |
---|
206 | 211 | |
---|
207 | 212 | phandle_cache = kcalloc(cache_entries, sizeof(*phandle_cache), |
---|
208 | 213 | GFP_ATOMIC); |
---|
| 214 | + raw_spin_lock_irqsave(&devtree_lock, flags); |
---|
209 | 215 | if (!phandle_cache) |
---|
210 | 216 | goto out; |
---|
211 | 217 | |
---|
.. | .. |
---|
217 | 223 | |
---|
218 | 224 | out: |
---|
219 | 225 | raw_spin_unlock_irqrestore(&devtree_lock, flags); |
---|
| 226 | + kfree(shadow); |
---|
220 | 227 | } |
---|
221 | 228 | |
---|
222 | 229 | void __init of_core_init(void) |
---|