.. | .. |
---|
38 | 38 | #include <linux/delay.h> |
---|
39 | 39 | #include <linux/module.h> |
---|
40 | 40 | #include <linux/srcu.h> |
---|
| 41 | +#include <linux/cpu.h> |
---|
| 42 | +#include <linux/locallock.h> |
---|
41 | 43 | |
---|
42 | 44 | #include "rcu.h" |
---|
43 | 45 | #include "rcu_segcblist.h" |
---|
.. | .. |
---|
461 | 463 | } |
---|
462 | 464 | |
---|
463 | 465 | /* |
---|
464 | | - * Track online CPUs to guide callback workqueue placement. |
---|
465 | | - */ |
---|
466 | | -DEFINE_PER_CPU(bool, srcu_online); |
---|
467 | | - |
---|
468 | | -void srcu_online_cpu(unsigned int cpu) |
---|
469 | | -{ |
---|
470 | | - WRITE_ONCE(per_cpu(srcu_online, cpu), true); |
---|
471 | | -} |
---|
472 | | - |
---|
473 | | -void srcu_offline_cpu(unsigned int cpu) |
---|
474 | | -{ |
---|
475 | | - WRITE_ONCE(per_cpu(srcu_online, cpu), false); |
---|
476 | | -} |
---|
477 | | - |
---|
478 | | -/* |
---|
479 | 466 | * Place the workqueue handler on the specified CPU if online, otherwise |
---|
480 | 467 | * just run it whereever. This is useful for placing workqueue handlers |
---|
481 | 468 | * that are to invoke the specified CPU's callbacks. |
---|
.. | .. |
---|
486 | 473 | { |
---|
487 | 474 | bool ret; |
---|
488 | 475 | |
---|
489 | | - preempt_disable(); |
---|
490 | | - if (READ_ONCE(per_cpu(srcu_online, cpu))) |
---|
| 476 | + cpus_read_lock(); |
---|
| 477 | + if (cpu_online(cpu)) |
---|
491 | 478 | ret = queue_delayed_work_on(cpu, wq, dwork, delay); |
---|
492 | 479 | else |
---|
493 | 480 | ret = queue_delayed_work(wq, dwork, delay); |
---|
494 | | - preempt_enable(); |
---|
| 481 | + cpus_read_unlock(); |
---|
495 | 482 | return ret; |
---|
496 | 483 | } |
---|
497 | 484 | |
---|
.. | .. |
---|
774 | 761 | * negligible when amoritized over that time period, and the extra latency |
---|
775 | 762 | * of a needlessly non-expedited grace period is similarly negligible. |
---|
776 | 763 | */ |
---|
| 764 | +static DEFINE_LOCAL_IRQ_LOCK(sp_llock); |
---|
| 765 | + |
---|
777 | 766 | static bool srcu_might_be_idle(struct srcu_struct *sp) |
---|
778 | 767 | { |
---|
779 | 768 | unsigned long curseq; |
---|
.. | .. |
---|
782 | 771 | unsigned long t; |
---|
783 | 772 | |
---|
784 | 773 | /* If the local srcu_data structure has callbacks, not idle. */ |
---|
785 | | - local_irq_save(flags); |
---|
| 774 | + local_lock_irqsave(sp_llock, flags); |
---|
786 | 775 | sdp = this_cpu_ptr(sp->sda); |
---|
787 | 776 | if (rcu_segcblist_pend_cbs(&sdp->srcu_cblist)) { |
---|
788 | | - local_irq_restore(flags); |
---|
| 777 | + local_unlock_irqrestore(sp_llock, flags); |
---|
789 | 778 | return false; /* Callbacks already present, so not idle. */ |
---|
790 | 779 | } |
---|
791 | | - local_irq_restore(flags); |
---|
| 780 | + local_unlock_irqrestore(sp_llock, flags); |
---|
792 | 781 | |
---|
793 | 782 | /* |
---|
794 | 783 | * No local callbacks, so probabalistically probe global state. |
---|
.. | .. |
---|
866 | 855 | return; |
---|
867 | 856 | } |
---|
868 | 857 | rhp->func = func; |
---|
869 | | - local_irq_save(flags); |
---|
| 858 | + local_lock_irqsave(sp_llock, flags); |
---|
870 | 859 | sdp = this_cpu_ptr(sp->sda); |
---|
871 | 860 | spin_lock_rcu_node(sdp); |
---|
872 | 861 | rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp, false); |
---|
.. | .. |
---|
882 | 871 | sdp->srcu_gp_seq_needed_exp = s; |
---|
883 | 872 | needexp = true; |
---|
884 | 873 | } |
---|
885 | | - spin_unlock_irqrestore_rcu_node(sdp, flags); |
---|
| 874 | + spin_unlock_rcu_node(sdp); |
---|
| 875 | + local_unlock_irqrestore(sp_llock, flags); |
---|
886 | 876 | if (needgp) |
---|
887 | 877 | srcu_funnel_gp_start(sp, sdp, s, do_norm); |
---|
888 | 878 | else if (needexp) |
---|