.. | .. |
---|
38 | 38 | #include <linux/of.h> |
---|
39 | 39 | #include <linux/iommu.h> |
---|
40 | 40 | #include <linux/rculist.h> |
---|
| 41 | +#include <linux/locallock.h> |
---|
41 | 42 | #include <asm/io.h> |
---|
42 | 43 | #include <asm/prom.h> |
---|
43 | 44 | #include <asm/rtas.h> |
---|
.. | .. |
---|
212 | 213 | } |
---|
213 | 214 | |
---|
214 | 215 | static DEFINE_PER_CPU(__be64 *, tce_page); |
---|
| 216 | +static DEFINE_LOCAL_IRQ_LOCK(tcp_page_lock); |
---|
215 | 217 | |
---|
216 | 218 | static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum, |
---|
217 | 219 | long npages, unsigned long uaddr, |
---|
.. | .. |
---|
233 | 235 | direction, attrs); |
---|
234 | 236 | } |
---|
235 | 237 | |
---|
236 | | - local_irq_save(flags); /* to protect tcep and the page behind it */ |
---|
| 238 | + /* to protect tcep and the page behind it */ |
---|
| 239 | + local_lock_irqsave(tcp_page_lock, flags); |
---|
237 | 240 | |
---|
238 | 241 | tcep = __this_cpu_read(tce_page); |
---|
239 | 242 | |
---|
.. | .. |
---|
244 | 247 | tcep = (__be64 *)__get_free_page(GFP_ATOMIC); |
---|
245 | 248 | /* If allocation fails, fall back to the loop implementation */ |
---|
246 | 249 | if (!tcep) { |
---|
247 | | - local_irq_restore(flags); |
---|
| 250 | + local_unlock_irqrestore(tcp_page_lock, flags); |
---|
248 | 251 | return tce_build_pSeriesLP(tbl->it_index, tcenum, |
---|
249 | 252 | tbl->it_page_shift, |
---|
250 | 253 | npages, uaddr, direction, attrs); |
---|
.. | .. |
---|
279 | 282 | tcenum += limit; |
---|
280 | 283 | } while (npages > 0 && !rc); |
---|
281 | 284 | |
---|
282 | | - local_irq_restore(flags); |
---|
| 285 | + local_unlock_irqrestore(tcp_page_lock, flags); |
---|
283 | 286 | |
---|
284 | 287 | if (unlikely(rc == H_NOT_ENOUGH_RESOURCES)) { |
---|
285 | 288 | ret = (int)rc; |
---|
.. | .. |
---|
450 | 453 | DMA_BIDIRECTIONAL, 0); |
---|
451 | 454 | } |
---|
452 | 455 | |
---|
453 | | - local_irq_disable(); /* to protect tcep and the page behind it */ |
---|
| 456 | + /* to protect tcep and the page behind it */ |
---|
| 457 | + local_lock_irq(tcp_page_lock); |
---|
454 | 458 | tcep = __this_cpu_read(tce_page); |
---|
455 | 459 | |
---|
456 | 460 | if (!tcep) { |
---|
457 | 461 | tcep = (__be64 *)__get_free_page(GFP_ATOMIC); |
---|
458 | 462 | if (!tcep) { |
---|
459 | | - local_irq_enable(); |
---|
| 463 | + local_unlock_irq(tcp_page_lock); |
---|
460 | 464 | return -ENOMEM; |
---|
461 | 465 | } |
---|
462 | 466 | __this_cpu_write(tce_page, tcep); |
---|
.. | .. |
---|
502 | 506 | |
---|
503 | 507 | /* error cleanup: caller will clear whole range */ |
---|
504 | 508 | |
---|
505 | | - local_irq_enable(); |
---|
| 509 | + local_unlock_irq(tcp_page_lock); |
---|
506 | 510 | return rc; |
---|
507 | 511 | } |
---|
508 | 512 | |
---|