forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-10-16 50a212ec906f7524620675f0c57357691c26c81f
kernel/drivers/xen/events/events_base.c
....@@ -1,3 +1,4 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * Xen event channels
34 *
....@@ -28,10 +29,11 @@
2829 #include <linux/irq.h>
2930 #include <linux/moduleparam.h>
3031 #include <linux/string.h>
31
-#include <linux/bootmem.h>
32
+#include <linux/memblock.h>
3233 #include <linux/slab.h>
3334 #include <linux/irqnr.h>
3435 #include <linux/pci.h>
36
+#include <linux/rcupdate.h>
3537 #include <linux/spinlock.h>
3638 #include <linux/cpuhotplug.h>
3739 #include <linux/atomic.h>
....@@ -40,6 +42,7 @@
4042 #ifdef CONFIG_X86
4143 #include <asm/desc.h>
4244 #include <asm/ptrace.h>
45
+#include <asm/idtentry.h>
4346 #include <asm/irq.h>
4447 #include <asm/io_apic.h>
4548 #include <asm/i8259.h>
....@@ -68,6 +71,64 @@
6871 #undef MODULE_PARAM_PREFIX
6972 #define MODULE_PARAM_PREFIX "xen."
7073
74
+/* Interrupt types. */
75
+enum xen_irq_type {
76
+ IRQT_UNBOUND = 0,
77
+ IRQT_PIRQ,
78
+ IRQT_VIRQ,
79
+ IRQT_IPI,
80
+ IRQT_EVTCHN
81
+};
82
+
83
+/*
84
+ * Packed IRQ information:
85
+ * type - enum xen_irq_type
86
+ * event channel - irq->event channel mapping
87
+ * cpu - cpu this event channel is bound to
88
+ * index - type-specific information:
89
+ * PIRQ - vector, with MSB being "needs EIO", or physical IRQ of the HVM
90
+ * guest, or GSI (real passthrough IRQ) of the device.
91
+ * VIRQ - virq number
92
+ * IPI - IPI vector
93
+ * EVTCHN -
94
+ */
95
+struct irq_info {
96
+ struct list_head list;
97
+ struct list_head eoi_list;
98
+ struct rcu_work rwork;
99
+ short refcnt;
100
+ short spurious_cnt;
101
+ short type; /* type */
102
+ u8 mask_reason; /* Why is event channel masked */
103
+#define EVT_MASK_REASON_EXPLICIT 0x01
104
+#define EVT_MASK_REASON_TEMPORARY 0x02
105
+#define EVT_MASK_REASON_EOI_PENDING 0x04
106
+ u8 is_active; /* Is event just being handled? */
107
+ unsigned irq;
108
+ evtchn_port_t evtchn; /* event channel */
109
+ unsigned short cpu; /* cpu bound */
110
+ unsigned short eoi_cpu; /* EOI must happen on this cpu-1 */
111
+ unsigned int irq_epoch; /* If eoi_cpu valid: irq_epoch of event */
112
+ u64 eoi_time; /* Time in jiffies when to EOI. */
113
+ raw_spinlock_t lock;
114
+
115
+ union {
116
+ unsigned short virq;
117
+ enum ipi_vector ipi;
118
+ struct {
119
+ unsigned short pirq;
120
+ unsigned short gsi;
121
+ unsigned char vector;
122
+ unsigned char flags;
123
+ uint16_t domid;
124
+ } pirq;
125
+ } u;
126
+};
127
+
128
+#define PIRQ_NEEDS_EOI (1 << 0)
129
+#define PIRQ_SHAREABLE (1 << 1)
130
+#define PIRQ_MSI_GROUP (1 << 2)
131
+
71132 static uint __read_mostly event_loop_timeout = 2;
72133 module_param(event_loop_timeout, uint, 0644);
73134
....@@ -83,22 +144,12 @@
83144 static DEFINE_MUTEX(irq_mapping_update_lock);
84145
85146 /*
86
- * Lock protecting event handling loop against removing event channels.
87
- * Adding of event channels is no issue as the associated IRQ becomes active
88
- * only after everything is setup (before request_[threaded_]irq() the handler
89
- * can't be entered for an event, as the event channel will be unmasked only
90
- * then).
91
- */
92
-static DEFINE_RWLOCK(evtchn_rwlock);
93
-
94
-/*
95147 * Lock hierarchy:
96148 *
97149 * irq_mapping_update_lock
98
- * evtchn_rwlock
99
- * IRQ-desc lock
100
- * percpu eoi_list_lock
101
- * irq_info->lock
150
+ * IRQ-desc lock
151
+ * percpu eoi_list_lock
152
+ * irq_info->lock
102153 */
103154
104155 static LIST_HEAD(xen_irq_list_head);
....@@ -109,7 +160,7 @@
109160 /* IRQ <-> IPI mapping */
110161 static DEFINE_PER_CPU(int [XEN_NR_IPIS], ipi_to_irq) = {[0 ... XEN_NR_IPIS-1] = -1};
111162
112
-int **evtchn_to_irq;
163
+static int **evtchn_to_irq;
113164 #ifdef CONFIG_X86
114165 static unsigned long *pirq_eoi_map;
115166 #endif
....@@ -152,7 +203,7 @@
152203 }
153204 }
154205
155
-static int set_evtchn_to_irq(unsigned evtchn, unsigned irq)
206
+static int set_evtchn_to_irq(evtchn_port_t evtchn, unsigned int irq)
156207 {
157208 unsigned row;
158209 unsigned col;
....@@ -187,7 +238,7 @@
187238 return 0;
188239 }
189240
190
-int get_evtchn_to_irq(unsigned evtchn)
241
+int get_evtchn_to_irq(evtchn_port_t evtchn)
191242 {
192243 if (evtchn >= xen_evtchn_max_channels())
193244 return -1;
....@@ -197,7 +248,7 @@
197248 }
198249
199250 /* Get info for IRQ */
200
-struct irq_info *info_for_irq(unsigned irq)
251
+static struct irq_info *info_for_irq(unsigned irq)
201252 {
202253 if (irq < nr_legacy_irqs())
203254 return legacy_info_ptrs[irq];
....@@ -213,11 +264,27 @@
213264 irq_set_chip_data(irq, info);
214265 }
215266
267
+static void delayed_free_irq(struct work_struct *work)
268
+{
269
+ struct irq_info *info = container_of(to_rcu_work(work), struct irq_info,
270
+ rwork);
271
+ unsigned int irq = info->irq;
272
+
273
+ /* Remove the info pointer only now, with no potential users left. */
274
+ set_info_for_irq(irq, NULL);
275
+
276
+ kfree(info);
277
+
278
+ /* Legacy IRQ descriptors are managed by the arch. */
279
+ if (irq >= nr_legacy_irqs())
280
+ irq_free_desc(irq);
281
+}
282
+
216283 /* Constructors for packed IRQ information. */
217284 static int xen_irq_info_common_setup(struct irq_info *info,
218285 unsigned irq,
219286 enum xen_irq_type type,
220
- unsigned evtchn,
287
+ evtchn_port_t evtchn,
221288 unsigned short cpu)
222289 {
223290 int ret;
....@@ -237,11 +304,11 @@
237304
238305 irq_clear_status_flags(irq, IRQ_NOREQUEST|IRQ_NOAUTOEN);
239306
240
- return xen_evtchn_port_setup(info);
307
+ return xen_evtchn_port_setup(evtchn);
241308 }
242309
243310 static int xen_irq_info_evtchn_setup(unsigned irq,
244
- unsigned evtchn)
311
+ evtchn_port_t evtchn)
245312 {
246313 struct irq_info *info = info_for_irq(irq);
247314
....@@ -250,7 +317,7 @@
250317
251318 static int xen_irq_info_ipi_setup(unsigned cpu,
252319 unsigned irq,
253
- unsigned evtchn,
320
+ evtchn_port_t evtchn,
254321 enum ipi_vector ipi)
255322 {
256323 struct irq_info *info = info_for_irq(irq);
....@@ -264,7 +331,7 @@
264331
265332 static int xen_irq_info_virq_setup(unsigned cpu,
266333 unsigned irq,
267
- unsigned evtchn,
334
+ evtchn_port_t evtchn,
268335 unsigned virq)
269336 {
270337 struct irq_info *info = info_for_irq(irq);
....@@ -277,7 +344,7 @@
277344 }
278345
279346 static int xen_irq_info_pirq_setup(unsigned irq,
280
- unsigned evtchn,
347
+ evtchn_port_t evtchn,
281348 unsigned pirq,
282349 unsigned gsi,
283350 uint16_t domid,
....@@ -303,7 +370,7 @@
303370 /*
304371 * Accessors for packed IRQ information.
305372 */
306
-unsigned int evtchn_from_irq(unsigned irq)
373
+evtchn_port_t evtchn_from_irq(unsigned irq)
307374 {
308375 const struct irq_info *info = NULL;
309376
....@@ -315,7 +382,7 @@
315382 return info->evtchn;
316383 }
317384
318
-unsigned irq_from_evtchn(unsigned int evtchn)
385
+unsigned int irq_from_evtchn(evtchn_port_t evtchn)
319386 {
320387 return get_evtchn_to_irq(evtchn);
321388 }
....@@ -361,12 +428,12 @@
361428 return info_for_irq(irq)->type;
362429 }
363430
364
-unsigned cpu_from_irq(unsigned irq)
431
+static unsigned cpu_from_irq(unsigned irq)
365432 {
366433 return info_for_irq(irq)->cpu;
367434 }
368435
369
-unsigned int cpu_from_evtchn(unsigned int evtchn)
436
+unsigned int cpu_from_evtchn(evtchn_port_t evtchn)
370437 {
371438 int irq = get_evtchn_to_irq(evtchn);
372439 unsigned ret = 0;
....@@ -420,16 +487,16 @@
420487 return info->u.pirq.flags & PIRQ_NEEDS_EOI;
421488 }
422489
423
-static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
490
+static void bind_evtchn_to_cpu(evtchn_port_t evtchn, unsigned int cpu)
424491 {
425
- int irq = get_evtchn_to_irq(chn);
492
+ int irq = get_evtchn_to_irq(evtchn);
426493 struct irq_info *info = info_for_irq(irq);
427494
428495 BUG_ON(irq == -1);
429496 #ifdef CONFIG_SMP
430497 cpumask_copy(irq_get_affinity_mask(irq), cpumask_of(cpu));
431498 #endif
432
- xen_evtchn_port_bind_to_cpu(info, cpu);
499
+ xen_evtchn_port_bind_to_cpu(evtchn, cpu, info->cpu);
433500
434501 info->cpu = cpu;
435502 }
....@@ -444,7 +511,7 @@
444511 */
445512 void notify_remote_via_irq(int irq)
446513 {
447
- int evtchn = evtchn_from_irq(irq);
514
+ evtchn_port_t evtchn = evtchn_from_irq(irq);
448515
449516 if (VALID_EVTCHN(evtchn))
450517 notify_remote_via_evtchn(evtchn);
....@@ -547,33 +614,36 @@
547614
548615 eoi = container_of(to_delayed_work(work), struct lateeoi_work, delayed);
549616
550
- read_lock_irqsave(&evtchn_rwlock, flags);
617
+ rcu_read_lock();
551618
552619 while (true) {
553
- spin_lock(&eoi->eoi_list_lock);
620
+ spin_lock_irqsave(&eoi->eoi_list_lock, flags);
554621
555622 info = list_first_entry_or_null(&eoi->eoi_list, struct irq_info,
556623 eoi_list);
557624
558
- if (info == NULL || now < info->eoi_time) {
559
- spin_unlock(&eoi->eoi_list_lock);
625
+ if (info == NULL)
626
+ break;
627
+
628
+ if (now < info->eoi_time) {
629
+ mod_delayed_work_on(info->eoi_cpu, system_wq,
630
+ &eoi->delayed,
631
+ info->eoi_time - now);
560632 break;
561633 }
562634
563635 list_del_init(&info->eoi_list);
564636
565
- spin_unlock(&eoi->eoi_list_lock);
637
+ spin_unlock_irqrestore(&eoi->eoi_list_lock, flags);
566638
567639 info->eoi_time = 0;
568640
569641 xen_irq_lateeoi_locked(info, false);
570642 }
571643
572
- if (info)
573
- mod_delayed_work_on(info->eoi_cpu, system_wq,
574
- &eoi->delayed, info->eoi_time - now);
644
+ spin_unlock_irqrestore(&eoi->eoi_list_lock, flags);
575645
576
- read_unlock_irqrestore(&evtchn_rwlock, flags);
646
+ rcu_read_unlock();
577647 }
578648
579649 static void xen_cpu_init_eoi(unsigned int cpu)
....@@ -588,16 +658,15 @@
588658 void xen_irq_lateeoi(unsigned int irq, unsigned int eoi_flags)
589659 {
590660 struct irq_info *info;
591
- unsigned long flags;
592661
593
- read_lock_irqsave(&evtchn_rwlock, flags);
662
+ rcu_read_lock();
594663
595664 info = info_for_irq(irq);
596665
597666 if (info)
598667 xen_irq_lateeoi_locked(info, eoi_flags & XEN_EOI_FLAG_SPURIOUS);
599668
600
- read_unlock_irqrestore(&evtchn_rwlock, flags);
669
+ rcu_read_unlock();
601670 }
602671 EXPORT_SYMBOL_GPL(xen_irq_lateeoi);
603672
....@@ -616,6 +685,7 @@
616685
617686 info->type = IRQT_UNBOUND;
618687 info->refcnt = -1;
688
+ INIT_RCU_WORK(&info->rwork, delayed_free_irq);
619689
620690 set_info_for_irq(irq, info);
621691
....@@ -668,34 +738,21 @@
668738 static void xen_free_irq(unsigned irq)
669739 {
670740 struct irq_info *info = info_for_irq(irq);
671
- unsigned long flags;
672741
673742 if (WARN_ON(!info))
674743 return;
675
-
676
- write_lock_irqsave(&evtchn_rwlock, flags);
677744
678745 if (!list_empty(&info->eoi_list))
679746 lateeoi_list_del(info);
680747
681748 list_del(&info->list);
682749
683
- set_info_for_irq(irq, NULL);
684
-
685750 WARN_ON(info->refcnt > 0);
686751
687
- write_unlock_irqrestore(&evtchn_rwlock, flags);
688
-
689
- kfree(info);
690
-
691
- /* Legacy IRQ descriptors are managed by the arch. */
692
- if (irq < nr_legacy_irqs())
693
- return;
694
-
695
- irq_free_desc(irq);
752
+ queue_rcu_work(system_wq, &info->rwork);
696753 }
697754
698
-static void xen_evtchn_close(unsigned int port)
755
+static void xen_evtchn_close(evtchn_port_t port)
699756 {
700757 struct evtchn_close close;
701758
....@@ -729,7 +786,7 @@
729786 static void eoi_pirq(struct irq_data *data)
730787 {
731788 struct irq_info *info = info_for_irq(data->irq);
732
- int evtchn = info ? info->evtchn : 0;
789
+ evtchn_port_t evtchn = info ? info->evtchn : 0;
733790 struct physdev_eoi eoi = { .irq = pirq_from_irq(data->irq) };
734791 int rc = 0;
735792
....@@ -764,7 +821,7 @@
764821 {
765822 struct evtchn_bind_pirq bind_pirq;
766823 struct irq_info *info = info_for_irq(irq);
767
- int evtchn = evtchn_from_irq(irq);
824
+ evtchn_port_t evtchn = evtchn_from_irq(irq);
768825 int rc;
769826
770827 BUG_ON(info->type != IRQT_PIRQ);
....@@ -792,7 +849,7 @@
792849 info->evtchn = evtchn;
793850 bind_evtchn_to_cpu(evtchn, 0);
794851
795
- rc = xen_evtchn_port_setup(info);
852
+ rc = xen_evtchn_port_setup(evtchn);
796853 if (rc)
797854 goto err;
798855
....@@ -818,7 +875,7 @@
818875 {
819876 unsigned int irq = data->irq;
820877 struct irq_info *info = info_for_irq(irq);
821
- unsigned evtchn = evtchn_from_irq(irq);
878
+ evtchn_port_t evtchn = evtchn_from_irq(irq);
822879
823880 BUG_ON(info->type != IRQT_PIRQ);
824881
....@@ -858,7 +915,7 @@
858915
859916 static void __unbind_from_irq(unsigned int irq)
860917 {
861
- int evtchn = evtchn_from_irq(irq);
918
+ evtchn_port_t evtchn = evtchn_from_irq(irq);
862919 struct irq_info *info = info_for_irq(irq);
863920
864921 if (info->refcnt > 0) {
....@@ -1138,8 +1195,8 @@
11381195 static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
11391196 {
11401197 struct evtchn_bind_ipi bind_ipi;
1141
- int evtchn, irq;
1142
- int ret;
1198
+ evtchn_port_t evtchn;
1199
+ int ret, irq;
11431200
11441201 mutex_lock(&irq_mapping_update_lock);
11451202
....@@ -1193,14 +1250,6 @@
11931250 chip);
11941251 }
11951252
1196
-int bind_interdomain_evtchn_to_irq(unsigned int remote_domain,
1197
- evtchn_port_t remote_port)
1198
-{
1199
- return bind_interdomain_evtchn_to_irq_chip(remote_domain, remote_port,
1200
- &xen_dynamic_chip);
1201
-}
1202
-EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irq);
1203
-
12041253 int bind_interdomain_evtchn_to_irq_lateeoi(unsigned int remote_domain,
12051254 evtchn_port_t remote_port)
12061255 {
....@@ -1209,10 +1258,11 @@
12091258 }
12101259 EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irq_lateeoi);
12111260
1212
-static int find_virq(unsigned int virq, unsigned int cpu)
1261
+static int find_virq(unsigned int virq, unsigned int cpu, evtchn_port_t *evtchn)
12131262 {
12141263 struct evtchn_status status;
1215
- int port, rc = -ENOENT;
1264
+ evtchn_port_t port;
1265
+ int rc = -ENOENT;
12161266
12171267 memset(&status, 0, sizeof(status));
12181268 for (port = 0; port < xen_evtchn_max_channels(); port++) {
....@@ -1224,7 +1274,7 @@
12241274 if (status.status != EVTCHNSTAT_virq)
12251275 continue;
12261276 if (status.u.virq == virq && status.vcpu == xen_vcpu_nr(cpu)) {
1227
- rc = port;
1277
+ *evtchn = port;
12281278 break;
12291279 }
12301280 }
....@@ -1247,7 +1297,8 @@
12471297 int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu)
12481298 {
12491299 struct evtchn_bind_virq bind_virq;
1250
- int evtchn, irq, ret;
1300
+ evtchn_port_t evtchn = 0;
1301
+ int irq, ret;
12511302
12521303 mutex_lock(&irq_mapping_update_lock);
12531304
....@@ -1273,9 +1324,8 @@
12731324 evtchn = bind_virq.port;
12741325 else {
12751326 if (ret == -EEXIST)
1276
- ret = find_virq(virq, cpu);
1327
+ ret = find_virq(virq, cpu, &evtchn);
12771328 BUG_ON(ret < 0);
1278
- evtchn = ret;
12791329 }
12801330
12811331 ret = xen_irq_info_virq_setup(cpu, irq, evtchn, virq);
....@@ -1367,19 +1417,6 @@
13671417 return irq;
13681418 }
13691419
1370
-int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain,
1371
- evtchn_port_t remote_port,
1372
- irq_handler_t handler,
1373
- unsigned long irqflags,
1374
- const char *devname,
1375
- void *dev_id)
1376
-{
1377
- return bind_interdomain_evtchn_to_irqhandler_chip(remote_domain,
1378
- remote_port, handler, irqflags, devname,
1379
- dev_id, &xen_dynamic_chip);
1380
-}
1381
-EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irqhandler);
1382
-
13831420 int bind_interdomain_evtchn_to_irqhandler_lateeoi(unsigned int remote_domain,
13841421 evtchn_port_t remote_port,
13851422 irq_handler_t handler,
....@@ -1463,7 +1500,7 @@
14631500 }
14641501 EXPORT_SYMBOL_GPL(xen_set_irq_priority);
14651502
1466
-int evtchn_make_refcounted(unsigned int evtchn)
1503
+int evtchn_make_refcounted(evtchn_port_t evtchn)
14671504 {
14681505 int irq = get_evtchn_to_irq(evtchn);
14691506 struct irq_info *info;
....@@ -1484,7 +1521,7 @@
14841521 }
14851522 EXPORT_SYMBOL_GPL(evtchn_make_refcounted);
14861523
1487
-int evtchn_get(unsigned int evtchn)
1524
+int evtchn_get(evtchn_port_t evtchn)
14881525 {
14891526 int irq;
14901527 struct irq_info *info;
....@@ -1517,7 +1554,7 @@
15171554 }
15181555 EXPORT_SYMBOL_GPL(evtchn_get);
15191556
1520
-void evtchn_put(unsigned int evtchn)
1557
+void evtchn_put(evtchn_port_t evtchn)
15211558 {
15221559 int irq = get_evtchn_to_irq(evtchn);
15231560 if (WARN_ON(irq == -1))
....@@ -1594,33 +1631,33 @@
15941631 generic_handle_irq(irq);
15951632 }
15961633
1597
-static DEFINE_PER_CPU(unsigned, xed_nesting_count);
1598
-
15991634 static void __xen_evtchn_do_upcall(void)
16001635 {
16011636 struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
1602
- int cpu = get_cpu();
1603
- unsigned count;
1637
+ int cpu = smp_processor_id();
16041638 struct evtchn_loop_ctrl ctrl = { 0 };
16051639
1606
- read_lock(&evtchn_rwlock);
1640
+ /*
1641
+ * When closing an event channel the associated IRQ must not be freed
1642
+ * until all cpus have left the event handling loop. This is ensured
1643
+ * by taking the rcu_read_lock() while handling events, as freeing of
1644
+ * the IRQ is handled via queue_rcu_work() _after_ closing the event
1645
+ * channel.
1646
+ */
1647
+ rcu_read_lock();
16071648
16081649 do {
16091650 vcpu_info->evtchn_upcall_pending = 0;
1610
-
1611
- if (__this_cpu_inc_return(xed_nesting_count) - 1)
1612
- goto out;
16131651
16141652 xen_evtchn_handle_events(cpu, &ctrl);
16151653
16161654 BUG_ON(!irqs_disabled());
16171655
1618
- count = __this_cpu_read(xed_nesting_count);
1619
- __this_cpu_write(xed_nesting_count, 0);
1620
- } while (count != 1 || vcpu_info->evtchn_upcall_pending);
1656
+ virt_rmb(); /* Hypervisor can set upcall pending. */
16211657
1622
-out:
1623
- read_unlock(&evtchn_rwlock);
1658
+ } while (vcpu_info->evtchn_upcall_pending);
1659
+
1660
+ rcu_read_unlock();
16241661
16251662 /*
16261663 * Increment irq_epoch only now to defer EOIs only for
....@@ -1628,8 +1665,6 @@
16281665 * above.
16291666 */
16301667 __this_cpu_inc(irq_epoch);
1631
-
1632
- put_cpu();
16331668 }
16341669
16351670 void xen_evtchn_do_upcall(struct pt_regs *regs)
....@@ -1637,9 +1672,6 @@
16371672 struct pt_regs *old_regs = set_irq_regs(regs);
16381673
16391674 irq_enter();
1640
-#ifdef CONFIG_X86
1641
- inc_irq_stat(irq_hv_callback_count);
1642
-#endif
16431675
16441676 __xen_evtchn_do_upcall();
16451677
....@@ -1654,7 +1686,7 @@
16541686 EXPORT_SYMBOL_GPL(xen_hvm_evtchn_do_upcall);
16551687
16561688 /* Rebind a new event channel to an existing irq. */
1657
-void rebind_evtchn_irq(int evtchn, int irq)
1689
+void rebind_evtchn_irq(evtchn_port_t evtchn, int irq)
16581690 {
16591691 struct irq_info *info = info_for_irq(irq);
16601692
....@@ -1876,7 +1908,8 @@
18761908 static void restore_cpu_virqs(unsigned int cpu)
18771909 {
18781910 struct evtchn_bind_virq bind_virq;
1879
- int virq, irq, evtchn;
1911
+ evtchn_port_t evtchn;
1912
+ int virq, irq;
18801913
18811914 for (virq = 0; virq < NR_VIRQS; virq++) {
18821915 if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1)
....@@ -1901,7 +1934,8 @@
19011934 static void restore_cpu_ipis(unsigned int cpu)
19021935 {
19031936 struct evtchn_bind_ipi bind_ipi;
1904
- int ipi, irq, evtchn;
1937
+ evtchn_port_t evtchn;
1938
+ int ipi, irq;
19051939
19061940 for (ipi = 0; ipi < XEN_NR_IPIS; ipi++) {
19071941 if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1)
....@@ -1934,7 +1968,7 @@
19341968 EXPORT_SYMBOL(xen_clear_irq_pending);
19351969 void xen_set_irq_pending(int irq)
19361970 {
1937
- int evtchn = evtchn_from_irq(irq);
1971
+ evtchn_port_t evtchn = evtchn_from_irq(irq);
19381972
19391973 if (VALID_EVTCHN(evtchn))
19401974 set_evtchn(evtchn);
....@@ -1942,7 +1976,7 @@
19421976
19431977 bool xen_test_irq_pending(int irq)
19441978 {
1945
- int evtchn = evtchn_from_irq(irq);
1979
+ evtchn_port_t evtchn = evtchn_from_irq(irq);
19461980 bool ret = false;
19471981
19481982 if (VALID_EVTCHN(evtchn))
....@@ -2078,30 +2112,34 @@
20782112 /* Vector callbacks are better than PCI interrupts to receive event
20792113 * channel notifications because we can receive vector callbacks on any
20802114 * vcpu and we don't need PCI support or APIC interactions. */
2081
-void xen_callback_vector(void)
2115
+void xen_setup_callback_vector(void)
20822116 {
2083
- int rc;
20842117 uint64_t callback_via;
20852118
20862119 if (xen_have_vector_callback) {
20872120 callback_via = HVM_CALLBACK_VECTOR(HYPERVISOR_CALLBACK_VECTOR);
2088
- rc = xen_set_callback_via(callback_via);
2089
- if (rc) {
2121
+ if (xen_set_callback_via(callback_via)) {
20902122 pr_err("Request for Xen HVM callback vector failed\n");
20912123 xen_have_vector_callback = 0;
2092
- return;
20932124 }
2094
- pr_info_once("Xen HVM callback vector for event delivery is enabled\n");
2095
- alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR,
2096
- xen_hvm_callback_vector);
20972125 }
20982126 }
2127
+
2128
+static __init void xen_alloc_callback_vector(void)
2129
+{
2130
+ if (!xen_have_vector_callback)
2131
+ return;
2132
+
2133
+ pr_info("Xen HVM callback vector for event delivery is enabled\n");
2134
+ alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, asm_sysvec_xen_hvm_callback);
2135
+}
20992136 #else
2100
-void xen_callback_vector(void) {}
2137
+void xen_setup_callback_vector(void) {}
2138
+static inline void xen_alloc_callback_vector(void) {}
21012139 #endif
21022140
2103
-static bool fifo_events = true;
2104
-module_param(fifo_events, bool, 0);
2141
+bool xen_fifo_events = true;
2142
+module_param_named(fifo_events, xen_fifo_events, bool, 0);
21052143
21062144 static int xen_evtchn_cpu_prepare(unsigned int cpu)
21072145 {
....@@ -2128,12 +2166,14 @@
21282166 void __init xen_init_IRQ(void)
21292167 {
21302168 int ret = -EINVAL;
2131
- unsigned int evtchn;
2169
+ evtchn_port_t evtchn;
21322170
2133
- if (fifo_events)
2171
+ if (xen_fifo_events)
21342172 ret = xen_evtchn_fifo_init();
2135
- if (ret < 0)
2173
+ if (ret < 0) {
21362174 xen_evtchn_2l_init();
2175
+ xen_fifo_events = false;
2176
+ }
21372177
21382178 xen_cpu_init_eoi(smp_processor_id());
21392179
....@@ -2153,12 +2193,13 @@
21532193
21542194 #ifdef CONFIG_X86
21552195 if (xen_pv_domain()) {
2156
- irq_ctx_init(smp_processor_id());
21572196 if (xen_initial_domain())
21582197 pci_xen_initial_domain();
21592198 }
2160
- if (xen_feature(XENFEAT_hvm_callback_vector))
2161
- xen_callback_vector();
2199
+ if (xen_feature(XENFEAT_hvm_callback_vector)) {
2200
+ xen_setup_callback_vector();
2201
+ xen_alloc_callback_vector();
2202
+ }
21622203
21632204 if (xen_hvm_domain()) {
21642205 native_init_IRQ();