hc
2024-05-10 9999e48639b3cecb08ffb37358bcba3b48161b29
kernel/drivers/xen/events/events_base.c
....@@ -33,6 +33,7 @@
3333 #include <linux/slab.h>
3434 #include <linux/irqnr.h>
3535 #include <linux/pci.h>
36
+#include <linux/rcupdate.h>
3637 #include <linux/spinlock.h>
3738 #include <linux/cpuhotplug.h>
3839 #include <linux/atomic.h>
....@@ -94,6 +95,7 @@
9495 struct irq_info {
9596 struct list_head list;
9697 struct list_head eoi_list;
98
+ struct rcu_work rwork;
9799 short refcnt;
98100 short spurious_cnt;
99101 short type; /* type */
....@@ -142,22 +144,12 @@
142144 static DEFINE_MUTEX(irq_mapping_update_lock);
143145
144146 /*
145
- * Lock protecting event handling loop against removing event channels.
146
- * Adding of event channels is no issue as the associated IRQ becomes active
147
- * only after everything is setup (before request_[threaded_]irq() the handler
148
- * can't be entered for an event, as the event channel will be unmasked only
149
- * then).
150
- */
151
-static DEFINE_RWLOCK(evtchn_rwlock);
152
-
153
-/*
154147 * Lock hierarchy:
155148 *
156149 * irq_mapping_update_lock
157
- * evtchn_rwlock
158
- * IRQ-desc lock
159
- * percpu eoi_list_lock
160
- * irq_info->lock
150
+ * IRQ-desc lock
151
+ * percpu eoi_list_lock
152
+ * irq_info->lock
161153 */
162154
163155 static LIST_HEAD(xen_irq_list_head);
....@@ -270,6 +262,22 @@
270262 legacy_info_ptrs[irq] = info;
271263 else
272264 irq_set_chip_data(irq, info);
265
+}
266
+
267
+static void delayed_free_irq(struct work_struct *work)
268
+{
269
+ struct irq_info *info = container_of(to_rcu_work(work), struct irq_info,
270
+ rwork);
271
+ unsigned int irq = info->irq;
272
+
273
+ /* Remove the info pointer only now, with no potential users left. */
274
+ set_info_for_irq(irq, NULL);
275
+
276
+ kfree(info);
277
+
278
+ /* Legacy IRQ descriptors are managed by the arch. */
279
+ if (irq >= nr_legacy_irqs())
280
+ irq_free_desc(irq);
273281 }
274282
275283 /* Constructors for packed IRQ information. */
....@@ -606,33 +614,36 @@
606614
607615 eoi = container_of(to_delayed_work(work), struct lateeoi_work, delayed);
608616
609
- read_lock_irqsave(&evtchn_rwlock, flags);
617
+ rcu_read_lock();
610618
611619 while (true) {
612
- spin_lock(&eoi->eoi_list_lock);
620
+ spin_lock_irqsave(&eoi->eoi_list_lock, flags);
613621
614622 info = list_first_entry_or_null(&eoi->eoi_list, struct irq_info,
615623 eoi_list);
616624
617
- if (info == NULL || now < info->eoi_time) {
618
- spin_unlock(&eoi->eoi_list_lock);
625
+ if (info == NULL)
626
+ break;
627
+
628
+ if (now < info->eoi_time) {
629
+ mod_delayed_work_on(info->eoi_cpu, system_wq,
630
+ &eoi->delayed,
631
+ info->eoi_time - now);
619632 break;
620633 }
621634
622635 list_del_init(&info->eoi_list);
623636
624
- spin_unlock(&eoi->eoi_list_lock);
637
+ spin_unlock_irqrestore(&eoi->eoi_list_lock, flags);
625638
626639 info->eoi_time = 0;
627640
628641 xen_irq_lateeoi_locked(info, false);
629642 }
630643
631
- if (info)
632
- mod_delayed_work_on(info->eoi_cpu, system_wq,
633
- &eoi->delayed, info->eoi_time - now);
644
+ spin_unlock_irqrestore(&eoi->eoi_list_lock, flags);
634645
635
- read_unlock_irqrestore(&evtchn_rwlock, flags);
646
+ rcu_read_unlock();
636647 }
637648
638649 static void xen_cpu_init_eoi(unsigned int cpu)
....@@ -647,16 +658,15 @@
647658 void xen_irq_lateeoi(unsigned int irq, unsigned int eoi_flags)
648659 {
649660 struct irq_info *info;
650
- unsigned long flags;
651661
652
- read_lock_irqsave(&evtchn_rwlock, flags);
662
+ rcu_read_lock();
653663
654664 info = info_for_irq(irq);
655665
656666 if (info)
657667 xen_irq_lateeoi_locked(info, eoi_flags & XEN_EOI_FLAG_SPURIOUS);
658668
659
- read_unlock_irqrestore(&evtchn_rwlock, flags);
669
+ rcu_read_unlock();
660670 }
661671 EXPORT_SYMBOL_GPL(xen_irq_lateeoi);
662672
....@@ -675,6 +685,7 @@
675685
676686 info->type = IRQT_UNBOUND;
677687 info->refcnt = -1;
688
+ INIT_RCU_WORK(&info->rwork, delayed_free_irq);
678689
679690 set_info_for_irq(irq, info);
680691
....@@ -727,31 +738,18 @@
727738 static void xen_free_irq(unsigned irq)
728739 {
729740 struct irq_info *info = info_for_irq(irq);
730
- unsigned long flags;
731741
732742 if (WARN_ON(!info))
733743 return;
734
-
735
- write_lock_irqsave(&evtchn_rwlock, flags);
736744
737745 if (!list_empty(&info->eoi_list))
738746 lateeoi_list_del(info);
739747
740748 list_del(&info->list);
741749
742
- set_info_for_irq(irq, NULL);
743
-
744750 WARN_ON(info->refcnt > 0);
745751
746
- write_unlock_irqrestore(&evtchn_rwlock, flags);
747
-
748
- kfree(info);
749
-
750
- /* Legacy IRQ descriptors are managed by the arch. */
751
- if (irq < nr_legacy_irqs())
752
- return;
753
-
754
- irq_free_desc(irq);
752
+ queue_rcu_work(system_wq, &info->rwork);
755753 }
756754
757755 static void xen_evtchn_close(evtchn_port_t port)
....@@ -1639,7 +1637,14 @@
16391637 int cpu = smp_processor_id();
16401638 struct evtchn_loop_ctrl ctrl = { 0 };
16411639
1642
- read_lock(&evtchn_rwlock);
1640
+ /*
1641
+ * When closing an event channel the associated IRQ must not be freed
1642
+ * until all cpus have left the event handling loop. This is ensured
1643
+ * by taking the rcu_read_lock() while handling events, as freeing of
1644
+ * the IRQ is handled via queue_rcu_work() _after_ closing the event
1645
+ * channel.
1646
+ */
1647
+ rcu_read_lock();
16431648
16441649 do {
16451650 vcpu_info->evtchn_upcall_pending = 0;
....@@ -1652,7 +1657,7 @@
16521657
16531658 } while (vcpu_info->evtchn_upcall_pending);
16541659
1655
- read_unlock(&evtchn_rwlock);
1660
+ rcu_read_unlock();
16561661
16571662 /*
16581663 * Increment irq_epoch only now to defer EOIs only for