hc
2023-12-06 08f87f769b595151be1afeff53e144f543faa614
kernel/drivers/xen/events/events_base.c
....@@ -1,3 +1,4 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * Xen event channels
34 *
....@@ -28,7 +29,7 @@
2829 #include <linux/irq.h>
2930 #include <linux/moduleparam.h>
3031 #include <linux/string.h>
31
-#include <linux/bootmem.h>
32
+#include <linux/memblock.h>
3233 #include <linux/slab.h>
3334 #include <linux/irqnr.h>
3435 #include <linux/pci.h>
....@@ -40,6 +41,7 @@
4041 #ifdef CONFIG_X86
4142 #include <asm/desc.h>
4243 #include <asm/ptrace.h>
44
+#include <asm/idtentry.h>
4345 #include <asm/irq.h>
4446 #include <asm/io_apic.h>
4547 #include <asm/i8259.h>
....@@ -67,6 +69,63 @@
6769
6870 #undef MODULE_PARAM_PREFIX
6971 #define MODULE_PARAM_PREFIX "xen."
72
+
73
+/* Interrupt types. */
74
+enum xen_irq_type {
75
+ IRQT_UNBOUND = 0,
76
+ IRQT_PIRQ,
77
+ IRQT_VIRQ,
78
+ IRQT_IPI,
79
+ IRQT_EVTCHN
80
+};
81
+
82
+/*
83
+ * Packed IRQ information:
84
+ * type - enum xen_irq_type
85
+ * event channel - irq->event channel mapping
86
+ * cpu - cpu this event channel is bound to
87
+ * index - type-specific information:
88
+ * PIRQ - vector, with MSB being "needs EIO", or physical IRQ of the HVM
89
+ * guest, or GSI (real passthrough IRQ) of the device.
90
+ * VIRQ - virq number
91
+ * IPI - IPI vector
92
+ * EVTCHN -
93
+ */
94
+struct irq_info {
95
+ struct list_head list;
96
+ struct list_head eoi_list;
97
+ short refcnt;
98
+ short spurious_cnt;
99
+ short type; /* type */
100
+ u8 mask_reason; /* Why is event channel masked */
101
+#define EVT_MASK_REASON_EXPLICIT 0x01
102
+#define EVT_MASK_REASON_TEMPORARY 0x02
103
+#define EVT_MASK_REASON_EOI_PENDING 0x04
104
+ u8 is_active; /* Is event just being handled? */
105
+ unsigned irq;
106
+ evtchn_port_t evtchn; /* event channel */
107
+ unsigned short cpu; /* cpu bound */
108
+ unsigned short eoi_cpu; /* EOI must happen on this cpu-1 */
109
+ unsigned int irq_epoch; /* If eoi_cpu valid: irq_epoch of event */
110
+ u64 eoi_time; /* Time in jiffies when to EOI. */
111
+ raw_spinlock_t lock;
112
+
113
+ union {
114
+ unsigned short virq;
115
+ enum ipi_vector ipi;
116
+ struct {
117
+ unsigned short pirq;
118
+ unsigned short gsi;
119
+ unsigned char vector;
120
+ unsigned char flags;
121
+ uint16_t domid;
122
+ } pirq;
123
+ } u;
124
+};
125
+
126
+#define PIRQ_NEEDS_EOI (1 << 0)
127
+#define PIRQ_SHAREABLE (1 << 1)
128
+#define PIRQ_MSI_GROUP (1 << 2)
70129
71130 static uint __read_mostly event_loop_timeout = 2;
72131 module_param(event_loop_timeout, uint, 0644);
....@@ -109,7 +168,7 @@
109168 /* IRQ <-> IPI mapping */
110169 static DEFINE_PER_CPU(int [XEN_NR_IPIS], ipi_to_irq) = {[0 ... XEN_NR_IPIS-1] = -1};
111170
112
-int **evtchn_to_irq;
171
+static int **evtchn_to_irq;
113172 #ifdef CONFIG_X86
114173 static unsigned long *pirq_eoi_map;
115174 #endif
....@@ -152,7 +211,7 @@
152211 }
153212 }
154213
155
-static int set_evtchn_to_irq(unsigned evtchn, unsigned irq)
214
+static int set_evtchn_to_irq(evtchn_port_t evtchn, unsigned int irq)
156215 {
157216 unsigned row;
158217 unsigned col;
....@@ -187,7 +246,7 @@
187246 return 0;
188247 }
189248
190
-int get_evtchn_to_irq(unsigned evtchn)
249
+int get_evtchn_to_irq(evtchn_port_t evtchn)
191250 {
192251 if (evtchn >= xen_evtchn_max_channels())
193252 return -1;
....@@ -197,7 +256,7 @@
197256 }
198257
199258 /* Get info for IRQ */
200
-struct irq_info *info_for_irq(unsigned irq)
259
+static struct irq_info *info_for_irq(unsigned irq)
201260 {
202261 if (irq < nr_legacy_irqs())
203262 return legacy_info_ptrs[irq];
....@@ -217,7 +276,7 @@
217276 static int xen_irq_info_common_setup(struct irq_info *info,
218277 unsigned irq,
219278 enum xen_irq_type type,
220
- unsigned evtchn,
279
+ evtchn_port_t evtchn,
221280 unsigned short cpu)
222281 {
223282 int ret;
....@@ -237,11 +296,11 @@
237296
238297 irq_clear_status_flags(irq, IRQ_NOREQUEST|IRQ_NOAUTOEN);
239298
240
- return xen_evtchn_port_setup(info);
299
+ return xen_evtchn_port_setup(evtchn);
241300 }
242301
243302 static int xen_irq_info_evtchn_setup(unsigned irq,
244
- unsigned evtchn)
303
+ evtchn_port_t evtchn)
245304 {
246305 struct irq_info *info = info_for_irq(irq);
247306
....@@ -250,7 +309,7 @@
250309
251310 static int xen_irq_info_ipi_setup(unsigned cpu,
252311 unsigned irq,
253
- unsigned evtchn,
312
+ evtchn_port_t evtchn,
254313 enum ipi_vector ipi)
255314 {
256315 struct irq_info *info = info_for_irq(irq);
....@@ -264,7 +323,7 @@
264323
265324 static int xen_irq_info_virq_setup(unsigned cpu,
266325 unsigned irq,
267
- unsigned evtchn,
326
+ evtchn_port_t evtchn,
268327 unsigned virq)
269328 {
270329 struct irq_info *info = info_for_irq(irq);
....@@ -277,7 +336,7 @@
277336 }
278337
279338 static int xen_irq_info_pirq_setup(unsigned irq,
280
- unsigned evtchn,
339
+ evtchn_port_t evtchn,
281340 unsigned pirq,
282341 unsigned gsi,
283342 uint16_t domid,
....@@ -303,7 +362,7 @@
303362 /*
304363 * Accessors for packed IRQ information.
305364 */
306
-unsigned int evtchn_from_irq(unsigned irq)
365
+evtchn_port_t evtchn_from_irq(unsigned irq)
307366 {
308367 const struct irq_info *info = NULL;
309368
....@@ -315,7 +374,7 @@
315374 return info->evtchn;
316375 }
317376
318
-unsigned irq_from_evtchn(unsigned int evtchn)
377
+unsigned int irq_from_evtchn(evtchn_port_t evtchn)
319378 {
320379 return get_evtchn_to_irq(evtchn);
321380 }
....@@ -361,12 +420,12 @@
361420 return info_for_irq(irq)->type;
362421 }
363422
364
-unsigned cpu_from_irq(unsigned irq)
423
+static unsigned cpu_from_irq(unsigned irq)
365424 {
366425 return info_for_irq(irq)->cpu;
367426 }
368427
369
-unsigned int cpu_from_evtchn(unsigned int evtchn)
428
+unsigned int cpu_from_evtchn(evtchn_port_t evtchn)
370429 {
371430 int irq = get_evtchn_to_irq(evtchn);
372431 unsigned ret = 0;
....@@ -420,16 +479,16 @@
420479 return info->u.pirq.flags & PIRQ_NEEDS_EOI;
421480 }
422481
423
-static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
482
+static void bind_evtchn_to_cpu(evtchn_port_t evtchn, unsigned int cpu)
424483 {
425
- int irq = get_evtchn_to_irq(chn);
484
+ int irq = get_evtchn_to_irq(evtchn);
426485 struct irq_info *info = info_for_irq(irq);
427486
428487 BUG_ON(irq == -1);
429488 #ifdef CONFIG_SMP
430489 cpumask_copy(irq_get_affinity_mask(irq), cpumask_of(cpu));
431490 #endif
432
- xen_evtchn_port_bind_to_cpu(info, cpu);
491
+ xen_evtchn_port_bind_to_cpu(evtchn, cpu, info->cpu);
433492
434493 info->cpu = cpu;
435494 }
....@@ -444,7 +503,7 @@
444503 */
445504 void notify_remote_via_irq(int irq)
446505 {
447
- int evtchn = evtchn_from_irq(irq);
506
+ evtchn_port_t evtchn = evtchn_from_irq(irq);
448507
449508 if (VALID_EVTCHN(evtchn))
450509 notify_remote_via_evtchn(evtchn);
....@@ -695,7 +754,7 @@
695754 irq_free_desc(irq);
696755 }
697756
698
-static void xen_evtchn_close(unsigned int port)
757
+static void xen_evtchn_close(evtchn_port_t port)
699758 {
700759 struct evtchn_close close;
701760
....@@ -729,7 +788,7 @@
729788 static void eoi_pirq(struct irq_data *data)
730789 {
731790 struct irq_info *info = info_for_irq(data->irq);
732
- int evtchn = info ? info->evtchn : 0;
791
+ evtchn_port_t evtchn = info ? info->evtchn : 0;
733792 struct physdev_eoi eoi = { .irq = pirq_from_irq(data->irq) };
734793 int rc = 0;
735794
....@@ -764,7 +823,7 @@
764823 {
765824 struct evtchn_bind_pirq bind_pirq;
766825 struct irq_info *info = info_for_irq(irq);
767
- int evtchn = evtchn_from_irq(irq);
826
+ evtchn_port_t evtchn = evtchn_from_irq(irq);
768827 int rc;
769828
770829 BUG_ON(info->type != IRQT_PIRQ);
....@@ -792,7 +851,7 @@
792851 info->evtchn = evtchn;
793852 bind_evtchn_to_cpu(evtchn, 0);
794853
795
- rc = xen_evtchn_port_setup(info);
854
+ rc = xen_evtchn_port_setup(evtchn);
796855 if (rc)
797856 goto err;
798857
....@@ -818,7 +877,7 @@
818877 {
819878 unsigned int irq = data->irq;
820879 struct irq_info *info = info_for_irq(irq);
821
- unsigned evtchn = evtchn_from_irq(irq);
880
+ evtchn_port_t evtchn = evtchn_from_irq(irq);
822881
823882 BUG_ON(info->type != IRQT_PIRQ);
824883
....@@ -858,7 +917,7 @@
858917
859918 static void __unbind_from_irq(unsigned int irq)
860919 {
861
- int evtchn = evtchn_from_irq(irq);
920
+ evtchn_port_t evtchn = evtchn_from_irq(irq);
862921 struct irq_info *info = info_for_irq(irq);
863922
864923 if (info->refcnt > 0) {
....@@ -1138,8 +1197,8 @@
11381197 static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
11391198 {
11401199 struct evtchn_bind_ipi bind_ipi;
1141
- int evtchn, irq;
1142
- int ret;
1200
+ evtchn_port_t evtchn;
1201
+ int ret, irq;
11431202
11441203 mutex_lock(&irq_mapping_update_lock);
11451204
....@@ -1193,14 +1252,6 @@
11931252 chip);
11941253 }
11951254
1196
-int bind_interdomain_evtchn_to_irq(unsigned int remote_domain,
1197
- evtchn_port_t remote_port)
1198
-{
1199
- return bind_interdomain_evtchn_to_irq_chip(remote_domain, remote_port,
1200
- &xen_dynamic_chip);
1201
-}
1202
-EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irq);
1203
-
12041255 int bind_interdomain_evtchn_to_irq_lateeoi(unsigned int remote_domain,
12051256 evtchn_port_t remote_port)
12061257 {
....@@ -1209,10 +1260,11 @@
12091260 }
12101261 EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irq_lateeoi);
12111262
1212
-static int find_virq(unsigned int virq, unsigned int cpu)
1263
+static int find_virq(unsigned int virq, unsigned int cpu, evtchn_port_t *evtchn)
12131264 {
12141265 struct evtchn_status status;
1215
- int port, rc = -ENOENT;
1266
+ evtchn_port_t port;
1267
+ int rc = -ENOENT;
12161268
12171269 memset(&status, 0, sizeof(status));
12181270 for (port = 0; port < xen_evtchn_max_channels(); port++) {
....@@ -1224,7 +1276,7 @@
12241276 if (status.status != EVTCHNSTAT_virq)
12251277 continue;
12261278 if (status.u.virq == virq && status.vcpu == xen_vcpu_nr(cpu)) {
1227
- rc = port;
1279
+ *evtchn = port;
12281280 break;
12291281 }
12301282 }
....@@ -1247,7 +1299,8 @@
12471299 int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu)
12481300 {
12491301 struct evtchn_bind_virq bind_virq;
1250
- int evtchn, irq, ret;
1302
+ evtchn_port_t evtchn = 0;
1303
+ int irq, ret;
12511304
12521305 mutex_lock(&irq_mapping_update_lock);
12531306
....@@ -1273,9 +1326,8 @@
12731326 evtchn = bind_virq.port;
12741327 else {
12751328 if (ret == -EEXIST)
1276
- ret = find_virq(virq, cpu);
1329
+ ret = find_virq(virq, cpu, &evtchn);
12771330 BUG_ON(ret < 0);
1278
- evtchn = ret;
12791331 }
12801332
12811333 ret = xen_irq_info_virq_setup(cpu, irq, evtchn, virq);
....@@ -1367,19 +1419,6 @@
13671419 return irq;
13681420 }
13691421
1370
-int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain,
1371
- evtchn_port_t remote_port,
1372
- irq_handler_t handler,
1373
- unsigned long irqflags,
1374
- const char *devname,
1375
- void *dev_id)
1376
-{
1377
- return bind_interdomain_evtchn_to_irqhandler_chip(remote_domain,
1378
- remote_port, handler, irqflags, devname,
1379
- dev_id, &xen_dynamic_chip);
1380
-}
1381
-EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irqhandler);
1382
-
13831422 int bind_interdomain_evtchn_to_irqhandler_lateeoi(unsigned int remote_domain,
13841423 evtchn_port_t remote_port,
13851424 irq_handler_t handler,
....@@ -1463,7 +1502,7 @@
14631502 }
14641503 EXPORT_SYMBOL_GPL(xen_set_irq_priority);
14651504
1466
-int evtchn_make_refcounted(unsigned int evtchn)
1505
+int evtchn_make_refcounted(evtchn_port_t evtchn)
14671506 {
14681507 int irq = get_evtchn_to_irq(evtchn);
14691508 struct irq_info *info;
....@@ -1484,7 +1523,7 @@
14841523 }
14851524 EXPORT_SYMBOL_GPL(evtchn_make_refcounted);
14861525
1487
-int evtchn_get(unsigned int evtchn)
1526
+int evtchn_get(evtchn_port_t evtchn)
14881527 {
14891528 int irq;
14901529 struct irq_info *info;
....@@ -1517,7 +1556,7 @@
15171556 }
15181557 EXPORT_SYMBOL_GPL(evtchn_get);
15191558
1520
-void evtchn_put(unsigned int evtchn)
1559
+void evtchn_put(evtchn_port_t evtchn)
15211560 {
15221561 int irq = get_evtchn_to_irq(evtchn);
15231562 if (WARN_ON(irq == -1))
....@@ -1594,13 +1633,10 @@
15941633 generic_handle_irq(irq);
15951634 }
15961635
1597
-static DEFINE_PER_CPU(unsigned, xed_nesting_count);
1598
-
15991636 static void __xen_evtchn_do_upcall(void)
16001637 {
16011638 struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
1602
- int cpu = get_cpu();
1603
- unsigned count;
1639
+ int cpu = smp_processor_id();
16041640 struct evtchn_loop_ctrl ctrl = { 0 };
16051641
16061642 read_lock(&evtchn_rwlock);
....@@ -1608,18 +1644,14 @@
16081644 do {
16091645 vcpu_info->evtchn_upcall_pending = 0;
16101646
1611
- if (__this_cpu_inc_return(xed_nesting_count) - 1)
1612
- goto out;
1613
-
16141647 xen_evtchn_handle_events(cpu, &ctrl);
16151648
16161649 BUG_ON(!irqs_disabled());
16171650
1618
- count = __this_cpu_read(xed_nesting_count);
1619
- __this_cpu_write(xed_nesting_count, 0);
1620
- } while (count != 1 || vcpu_info->evtchn_upcall_pending);
1651
+ virt_rmb(); /* Hypervisor can set upcall pending. */
16211652
1622
-out:
1653
+ } while (vcpu_info->evtchn_upcall_pending);
1654
+
16231655 read_unlock(&evtchn_rwlock);
16241656
16251657 /*
....@@ -1628,8 +1660,6 @@
16281660 * above.
16291661 */
16301662 __this_cpu_inc(irq_epoch);
1631
-
1632
- put_cpu();
16331663 }
16341664
16351665 void xen_evtchn_do_upcall(struct pt_regs *regs)
....@@ -1637,9 +1667,6 @@
16371667 struct pt_regs *old_regs = set_irq_regs(regs);
16381668
16391669 irq_enter();
1640
-#ifdef CONFIG_X86
1641
- inc_irq_stat(irq_hv_callback_count);
1642
-#endif
16431670
16441671 __xen_evtchn_do_upcall();
16451672
....@@ -1654,7 +1681,7 @@
16541681 EXPORT_SYMBOL_GPL(xen_hvm_evtchn_do_upcall);
16551682
16561683 /* Rebind a new event channel to an existing irq. */
1657
-void rebind_evtchn_irq(int evtchn, int irq)
1684
+void rebind_evtchn_irq(evtchn_port_t evtchn, int irq)
16581685 {
16591686 struct irq_info *info = info_for_irq(irq);
16601687
....@@ -1876,7 +1903,8 @@
18761903 static void restore_cpu_virqs(unsigned int cpu)
18771904 {
18781905 struct evtchn_bind_virq bind_virq;
1879
- int virq, irq, evtchn;
1906
+ evtchn_port_t evtchn;
1907
+ int virq, irq;
18801908
18811909 for (virq = 0; virq < NR_VIRQS; virq++) {
18821910 if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1)
....@@ -1901,7 +1929,8 @@
19011929 static void restore_cpu_ipis(unsigned int cpu)
19021930 {
19031931 struct evtchn_bind_ipi bind_ipi;
1904
- int ipi, irq, evtchn;
1932
+ evtchn_port_t evtchn;
1933
+ int ipi, irq;
19051934
19061935 for (ipi = 0; ipi < XEN_NR_IPIS; ipi++) {
19071936 if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1)
....@@ -1934,7 +1963,7 @@
19341963 EXPORT_SYMBOL(xen_clear_irq_pending);
19351964 void xen_set_irq_pending(int irq)
19361965 {
1937
- int evtchn = evtchn_from_irq(irq);
1966
+ evtchn_port_t evtchn = evtchn_from_irq(irq);
19381967
19391968 if (VALID_EVTCHN(evtchn))
19401969 set_evtchn(evtchn);
....@@ -1942,7 +1971,7 @@
19421971
19431972 bool xen_test_irq_pending(int irq)
19441973 {
1945
- int evtchn = evtchn_from_irq(irq);
1974
+ evtchn_port_t evtchn = evtchn_from_irq(irq);
19461975 bool ret = false;
19471976
19481977 if (VALID_EVTCHN(evtchn))
....@@ -2078,30 +2107,34 @@
20782107 /* Vector callbacks are better than PCI interrupts to receive event
20792108 * channel notifications because we can receive vector callbacks on any
20802109 * vcpu and we don't need PCI support or APIC interactions. */
2081
-void xen_callback_vector(void)
2110
+void xen_setup_callback_vector(void)
20822111 {
2083
- int rc;
20842112 uint64_t callback_via;
20852113
20862114 if (xen_have_vector_callback) {
20872115 callback_via = HVM_CALLBACK_VECTOR(HYPERVISOR_CALLBACK_VECTOR);
2088
- rc = xen_set_callback_via(callback_via);
2089
- if (rc) {
2116
+ if (xen_set_callback_via(callback_via)) {
20902117 pr_err("Request for Xen HVM callback vector failed\n");
20912118 xen_have_vector_callback = 0;
2092
- return;
20932119 }
2094
- pr_info_once("Xen HVM callback vector for event delivery is enabled\n");
2095
- alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR,
2096
- xen_hvm_callback_vector);
20972120 }
20982121 }
2122
+
2123
+static __init void xen_alloc_callback_vector(void)
2124
+{
2125
+ if (!xen_have_vector_callback)
2126
+ return;
2127
+
2128
+ pr_info("Xen HVM callback vector for event delivery is enabled\n");
2129
+ alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, asm_sysvec_xen_hvm_callback);
2130
+}
20992131 #else
2100
-void xen_callback_vector(void) {}
2132
+void xen_setup_callback_vector(void) {}
2133
+static inline void xen_alloc_callback_vector(void) {}
21012134 #endif
21022135
2103
-static bool fifo_events = true;
2104
-module_param(fifo_events, bool, 0);
2136
+bool xen_fifo_events = true;
2137
+module_param_named(fifo_events, xen_fifo_events, bool, 0);
21052138
21062139 static int xen_evtchn_cpu_prepare(unsigned int cpu)
21072140 {
....@@ -2128,12 +2161,14 @@
21282161 void __init xen_init_IRQ(void)
21292162 {
21302163 int ret = -EINVAL;
2131
- unsigned int evtchn;
2164
+ evtchn_port_t evtchn;
21322165
2133
- if (fifo_events)
2166
+ if (xen_fifo_events)
21342167 ret = xen_evtchn_fifo_init();
2135
- if (ret < 0)
2168
+ if (ret < 0) {
21362169 xen_evtchn_2l_init();
2170
+ xen_fifo_events = false;
2171
+ }
21372172
21382173 xen_cpu_init_eoi(smp_processor_id());
21392174
....@@ -2153,12 +2188,13 @@
21532188
21542189 #ifdef CONFIG_X86
21552190 if (xen_pv_domain()) {
2156
- irq_ctx_init(smp_processor_id());
21572191 if (xen_initial_domain())
21582192 pci_xen_initial_domain();
21592193 }
2160
- if (xen_feature(XENFEAT_hvm_callback_vector))
2161
- xen_callback_vector();
2194
+ if (xen_feature(XENFEAT_hvm_callback_vector)) {
2195
+ xen_setup_callback_vector();
2196
+ xen_alloc_callback_vector();
2197
+ }
21622198
21632199 if (xen_hvm_domain()) {
21642200 native_init_IRQ();