.. | .. |
---|
24 | 24 | #include <linux/of.h> |
---|
25 | 25 | #include <linux/iommu.h> |
---|
26 | 26 | #include <linux/rculist.h> |
---|
27 | | -#include <linux/local_lock.h> |
---|
28 | 27 | #include <asm/io.h> |
---|
29 | 28 | #include <asm/prom.h> |
---|
30 | 29 | #include <asm/rtas.h> |
---|
.. | .. |
---|
191 | 190 | return ret; |
---|
192 | 191 | } |
---|
193 | 192 | |
---|
194 | | -struct tce_page { |
---|
195 | | - __be64 * page; |
---|
196 | | - local_lock_t lock; |
---|
197 | | -}; |
---|
198 | | -static DEFINE_PER_CPU(struct tce_page, tce_page) = { |
---|
199 | | - .lock = INIT_LOCAL_LOCK(lock), |
---|
200 | | -}; |
---|
| 193 | +static DEFINE_PER_CPU(__be64 *, tce_page); |
---|
201 | 194 | |
---|
202 | 195 | static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum, |
---|
203 | 196 | long npages, unsigned long uaddr, |
---|
.. | .. |
---|
219 | 212 | direction, attrs); |
---|
220 | 213 | } |
---|
221 | 214 | |
---|
222 | | - /* to protect tcep and the page behind it */ |
---|
223 | | - local_lock_irqsave(&tce_page.lock, flags); |
---|
| 215 | + local_irq_save(flags); /* to protect tcep and the page behind it */ |
---|
224 | 216 | |
---|
225 | | - tcep = __this_cpu_read(tce_page.page); |
---|
| 217 | + tcep = __this_cpu_read(tce_page); |
---|
226 | 218 | |
---|
227 | 219 | /* This is safe to do since interrupts are off when we're called |
---|
228 | 220 | * from iommu_alloc{,_sg}() |
---|
.. | .. |
---|
231 | 223 | tcep = (__be64 *)__get_free_page(GFP_ATOMIC); |
---|
232 | 224 | /* If allocation fails, fall back to the loop implementation */ |
---|
233 | 225 | if (!tcep) { |
---|
234 | | - local_unlock_irqrestore(&tce_page.lock, flags); |
---|
| 226 | + local_irq_restore(flags); |
---|
235 | 227 | return tce_build_pSeriesLP(tbl->it_index, tcenum, |
---|
236 | 228 | tbl->it_page_shift, |
---|
237 | 229 | npages, uaddr, direction, attrs); |
---|
238 | 230 | } |
---|
239 | | - __this_cpu_write(tce_page.page, tcep); |
---|
| 231 | + __this_cpu_write(tce_page, tcep); |
---|
240 | 232 | } |
---|
241 | 233 | |
---|
242 | 234 | rpn = __pa(uaddr) >> TCE_SHIFT; |
---|
.. | .. |
---|
266 | 258 | tcenum += limit; |
---|
267 | 259 | } while (npages > 0 && !rc); |
---|
268 | 260 | |
---|
269 | | - local_unlock_irqrestore(&tce_page.lock, flags); |
---|
| 261 | + local_irq_restore(flags); |
---|
270 | 262 | |
---|
271 | 263 | if (unlikely(rc == H_NOT_ENOUGH_RESOURCES)) { |
---|
272 | 264 | ret = (int)rc; |
---|
.. | .. |
---|
437 | 429 | DMA_BIDIRECTIONAL, 0); |
---|
438 | 430 | } |
---|
439 | 431 | |
---|
440 | | - /* to protect tcep and the page behind it */ |
---|
441 | | - local_lock_irq(&tce_page.lock); |
---|
442 | | - tcep = __this_cpu_read(tce_page.page); |
---|
| 432 | + local_irq_disable(); /* to protect tcep and the page behind it */ |
---|
| 433 | + tcep = __this_cpu_read(tce_page); |
---|
443 | 434 | |
---|
444 | 435 | if (!tcep) { |
---|
445 | 436 | tcep = (__be64 *)__get_free_page(GFP_ATOMIC); |
---|
446 | 437 | if (!tcep) { |
---|
447 | | - local_unlock_irq(&tce_page.lock); |
---|
| 438 | + local_irq_enable(); |
---|
448 | 439 | return -ENOMEM; |
---|
449 | 440 | } |
---|
450 | | - __this_cpu_write(tce_page.page, tcep); |
---|
| 441 | + __this_cpu_write(tce_page, tcep); |
---|
451 | 442 | } |
---|
452 | 443 | |
---|
453 | 444 | proto_tce = TCE_PCI_READ | TCE_PCI_WRITE; |
---|
.. | .. |
---|
490 | 481 | |
---|
491 | 482 | /* error cleanup: caller will clear whole range */ |
---|
492 | 483 | |
---|
493 | | - local_unlock_irq(&tce_page.lock); |
---|
| 484 | + local_irq_enable(); |
---|
494 | 485 | return rc; |
---|
495 | 486 | } |
---|
496 | 487 | |
---|