.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* |
---|
2 | 3 | * Xen event channels |
---|
3 | 4 | * |
---|
.. | .. |
---|
28 | 29 | #include <linux/irq.h> |
---|
29 | 30 | #include <linux/moduleparam.h> |
---|
30 | 31 | #include <linux/string.h> |
---|
31 | | -#include <linux/bootmem.h> |
---|
| 32 | +#include <linux/memblock.h> |
---|
32 | 33 | #include <linux/slab.h> |
---|
33 | 34 | #include <linux/irqnr.h> |
---|
34 | 35 | #include <linux/pci.h> |
---|
.. | .. |
---|
40 | 41 | #ifdef CONFIG_X86 |
---|
41 | 42 | #include <asm/desc.h> |
---|
42 | 43 | #include <asm/ptrace.h> |
---|
| 44 | +#include <asm/idtentry.h> |
---|
43 | 45 | #include <asm/irq.h> |
---|
44 | 46 | #include <asm/io_apic.h> |
---|
45 | 47 | #include <asm/i8259.h> |
---|
.. | .. |
---|
67 | 69 | |
---|
68 | 70 | #undef MODULE_PARAM_PREFIX |
---|
69 | 71 | #define MODULE_PARAM_PREFIX "xen." |
---|
| 72 | + |
---|
| 73 | +/* Interrupt types. */ |
---|
| 74 | +enum xen_irq_type { |
---|
| 75 | + IRQT_UNBOUND = 0, |
---|
| 76 | + IRQT_PIRQ, |
---|
| 77 | + IRQT_VIRQ, |
---|
| 78 | + IRQT_IPI, |
---|
| 79 | + IRQT_EVTCHN |
---|
| 80 | +}; |
---|
| 81 | + |
---|
| 82 | +/* |
---|
| 83 | + * Packed IRQ information: |
---|
| 84 | + * type - enum xen_irq_type |
---|
| 85 | + * event channel - irq->event channel mapping |
---|
| 86 | + * cpu - cpu this event channel is bound to |
---|
| 87 | + * index - type-specific information: |
---|
| 88 | + * PIRQ - vector, with MSB being "needs EIO", or physical IRQ of the HVM |
---|
| 89 | + * guest, or GSI (real passthrough IRQ) of the device. |
---|
| 90 | + * VIRQ - virq number |
---|
| 91 | + * IPI - IPI vector |
---|
| 92 | + * EVTCHN - |
---|
| 93 | + */ |
---|
| 94 | +struct irq_info { |
---|
| 95 | + struct list_head list; |
---|
| 96 | + struct list_head eoi_list; |
---|
| 97 | + short refcnt; |
---|
| 98 | + short spurious_cnt; |
---|
| 99 | + short type; /* type */ |
---|
| 100 | + u8 mask_reason; /* Why is event channel masked */ |
---|
| 101 | +#define EVT_MASK_REASON_EXPLICIT 0x01 |
---|
| 102 | +#define EVT_MASK_REASON_TEMPORARY 0x02 |
---|
| 103 | +#define EVT_MASK_REASON_EOI_PENDING 0x04 |
---|
| 104 | + u8 is_active; /* Is event just being handled? */ |
---|
| 105 | + unsigned irq; |
---|
| 106 | + evtchn_port_t evtchn; /* event channel */ |
---|
| 107 | + unsigned short cpu; /* cpu bound */ |
---|
| 108 | + unsigned short eoi_cpu; /* EOI must happen on this cpu-1 */ |
---|
| 109 | + unsigned int irq_epoch; /* If eoi_cpu valid: irq_epoch of event */ |
---|
| 110 | + u64 eoi_time; /* Time in jiffies when to EOI. */ |
---|
| 111 | + raw_spinlock_t lock; |
---|
| 112 | + |
---|
| 113 | + union { |
---|
| 114 | + unsigned short virq; |
---|
| 115 | + enum ipi_vector ipi; |
---|
| 116 | + struct { |
---|
| 117 | + unsigned short pirq; |
---|
| 118 | + unsigned short gsi; |
---|
| 119 | + unsigned char vector; |
---|
| 120 | + unsigned char flags; |
---|
| 121 | + uint16_t domid; |
---|
| 122 | + } pirq; |
---|
| 123 | + } u; |
---|
| 124 | +}; |
---|
| 125 | + |
---|
| 126 | +#define PIRQ_NEEDS_EOI (1 << 0) |
---|
| 127 | +#define PIRQ_SHAREABLE (1 << 1) |
---|
| 128 | +#define PIRQ_MSI_GROUP (1 << 2) |
---|
70 | 129 | |
---|
71 | 130 | static uint __read_mostly event_loop_timeout = 2; |
---|
72 | 131 | module_param(event_loop_timeout, uint, 0644); |
---|
.. | .. |
---|
109 | 168 | /* IRQ <-> IPI mapping */ |
---|
110 | 169 | static DEFINE_PER_CPU(int [XEN_NR_IPIS], ipi_to_irq) = {[0 ... XEN_NR_IPIS-1] = -1}; |
---|
111 | 170 | |
---|
112 | | -int **evtchn_to_irq; |
---|
| 171 | +static int **evtchn_to_irq; |
---|
113 | 172 | #ifdef CONFIG_X86 |
---|
114 | 173 | static unsigned long *pirq_eoi_map; |
---|
115 | 174 | #endif |
---|
.. | .. |
---|
152 | 211 | } |
---|
153 | 212 | } |
---|
154 | 213 | |
---|
155 | | -static int set_evtchn_to_irq(unsigned evtchn, unsigned irq) |
---|
| 214 | +static int set_evtchn_to_irq(evtchn_port_t evtchn, unsigned int irq) |
---|
156 | 215 | { |
---|
157 | 216 | unsigned row; |
---|
158 | 217 | unsigned col; |
---|
.. | .. |
---|
187 | 246 | return 0; |
---|
188 | 247 | } |
---|
189 | 248 | |
---|
190 | | -int get_evtchn_to_irq(unsigned evtchn) |
---|
| 249 | +int get_evtchn_to_irq(evtchn_port_t evtchn) |
---|
191 | 250 | { |
---|
192 | 251 | if (evtchn >= xen_evtchn_max_channels()) |
---|
193 | 252 | return -1; |
---|
.. | .. |
---|
197 | 256 | } |
---|
198 | 257 | |
---|
199 | 258 | /* Get info for IRQ */ |
---|
200 | | -struct irq_info *info_for_irq(unsigned irq) |
---|
| 259 | +static struct irq_info *info_for_irq(unsigned irq) |
---|
201 | 260 | { |
---|
202 | 261 | if (irq < nr_legacy_irqs()) |
---|
203 | 262 | return legacy_info_ptrs[irq]; |
---|
.. | .. |
---|
217 | 276 | static int xen_irq_info_common_setup(struct irq_info *info, |
---|
218 | 277 | unsigned irq, |
---|
219 | 278 | enum xen_irq_type type, |
---|
220 | | - unsigned evtchn, |
---|
| 279 | + evtchn_port_t evtchn, |
---|
221 | 280 | unsigned short cpu) |
---|
222 | 281 | { |
---|
223 | 282 | int ret; |
---|
.. | .. |
---|
237 | 296 | |
---|
238 | 297 | irq_clear_status_flags(irq, IRQ_NOREQUEST|IRQ_NOAUTOEN); |
---|
239 | 298 | |
---|
240 | | - return xen_evtchn_port_setup(info); |
---|
| 299 | + return xen_evtchn_port_setup(evtchn); |
---|
241 | 300 | } |
---|
242 | 301 | |
---|
243 | 302 | static int xen_irq_info_evtchn_setup(unsigned irq, |
---|
244 | | - unsigned evtchn) |
---|
| 303 | + evtchn_port_t evtchn) |
---|
245 | 304 | { |
---|
246 | 305 | struct irq_info *info = info_for_irq(irq); |
---|
247 | 306 | |
---|
.. | .. |
---|
250 | 309 | |
---|
251 | 310 | static int xen_irq_info_ipi_setup(unsigned cpu, |
---|
252 | 311 | unsigned irq, |
---|
253 | | - unsigned evtchn, |
---|
| 312 | + evtchn_port_t evtchn, |
---|
254 | 313 | enum ipi_vector ipi) |
---|
255 | 314 | { |
---|
256 | 315 | struct irq_info *info = info_for_irq(irq); |
---|
.. | .. |
---|
264 | 323 | |
---|
265 | 324 | static int xen_irq_info_virq_setup(unsigned cpu, |
---|
266 | 325 | unsigned irq, |
---|
267 | | - unsigned evtchn, |
---|
| 326 | + evtchn_port_t evtchn, |
---|
268 | 327 | unsigned virq) |
---|
269 | 328 | { |
---|
270 | 329 | struct irq_info *info = info_for_irq(irq); |
---|
.. | .. |
---|
277 | 336 | } |
---|
278 | 337 | |
---|
279 | 338 | static int xen_irq_info_pirq_setup(unsigned irq, |
---|
280 | | - unsigned evtchn, |
---|
| 339 | + evtchn_port_t evtchn, |
---|
281 | 340 | unsigned pirq, |
---|
282 | 341 | unsigned gsi, |
---|
283 | 342 | uint16_t domid, |
---|
.. | .. |
---|
303 | 362 | /* |
---|
304 | 363 | * Accessors for packed IRQ information. |
---|
305 | 364 | */ |
---|
306 | | -unsigned int evtchn_from_irq(unsigned irq) |
---|
| 365 | +evtchn_port_t evtchn_from_irq(unsigned irq) |
---|
307 | 366 | { |
---|
308 | 367 | const struct irq_info *info = NULL; |
---|
309 | 368 | |
---|
.. | .. |
---|
315 | 374 | return info->evtchn; |
---|
316 | 375 | } |
---|
317 | 376 | |
---|
318 | | -unsigned irq_from_evtchn(unsigned int evtchn) |
---|
| 377 | +unsigned int irq_from_evtchn(evtchn_port_t evtchn) |
---|
319 | 378 | { |
---|
320 | 379 | return get_evtchn_to_irq(evtchn); |
---|
321 | 380 | } |
---|
.. | .. |
---|
361 | 420 | return info_for_irq(irq)->type; |
---|
362 | 421 | } |
---|
363 | 422 | |
---|
364 | | -unsigned cpu_from_irq(unsigned irq) |
---|
| 423 | +static unsigned cpu_from_irq(unsigned irq) |
---|
365 | 424 | { |
---|
366 | 425 | return info_for_irq(irq)->cpu; |
---|
367 | 426 | } |
---|
368 | 427 | |
---|
369 | | -unsigned int cpu_from_evtchn(unsigned int evtchn) |
---|
| 428 | +unsigned int cpu_from_evtchn(evtchn_port_t evtchn) |
---|
370 | 429 | { |
---|
371 | 430 | int irq = get_evtchn_to_irq(evtchn); |
---|
372 | 431 | unsigned ret = 0; |
---|
.. | .. |
---|
420 | 479 | return info->u.pirq.flags & PIRQ_NEEDS_EOI; |
---|
421 | 480 | } |
---|
422 | 481 | |
---|
423 | | -static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu) |
---|
| 482 | +static void bind_evtchn_to_cpu(evtchn_port_t evtchn, unsigned int cpu) |
---|
424 | 483 | { |
---|
425 | | - int irq = get_evtchn_to_irq(chn); |
---|
| 484 | + int irq = get_evtchn_to_irq(evtchn); |
---|
426 | 485 | struct irq_info *info = info_for_irq(irq); |
---|
427 | 486 | |
---|
428 | 487 | BUG_ON(irq == -1); |
---|
429 | 488 | #ifdef CONFIG_SMP |
---|
430 | 489 | cpumask_copy(irq_get_affinity_mask(irq), cpumask_of(cpu)); |
---|
431 | 490 | #endif |
---|
432 | | - xen_evtchn_port_bind_to_cpu(info, cpu); |
---|
| 491 | + xen_evtchn_port_bind_to_cpu(evtchn, cpu, info->cpu); |
---|
433 | 492 | |
---|
434 | 493 | info->cpu = cpu; |
---|
435 | 494 | } |
---|
.. | .. |
---|
444 | 503 | */ |
---|
445 | 504 | void notify_remote_via_irq(int irq) |
---|
446 | 505 | { |
---|
447 | | - int evtchn = evtchn_from_irq(irq); |
---|
| 506 | + evtchn_port_t evtchn = evtchn_from_irq(irq); |
---|
448 | 507 | |
---|
449 | 508 | if (VALID_EVTCHN(evtchn)) |
---|
450 | 509 | notify_remote_via_evtchn(evtchn); |
---|
.. | .. |
---|
695 | 754 | irq_free_desc(irq); |
---|
696 | 755 | } |
---|
697 | 756 | |
---|
698 | | -static void xen_evtchn_close(unsigned int port) |
---|
| 757 | +static void xen_evtchn_close(evtchn_port_t port) |
---|
699 | 758 | { |
---|
700 | 759 | struct evtchn_close close; |
---|
701 | 760 | |
---|
.. | .. |
---|
729 | 788 | static void eoi_pirq(struct irq_data *data) |
---|
730 | 789 | { |
---|
731 | 790 | struct irq_info *info = info_for_irq(data->irq); |
---|
732 | | - int evtchn = info ? info->evtchn : 0; |
---|
| 791 | + evtchn_port_t evtchn = info ? info->evtchn : 0; |
---|
733 | 792 | struct physdev_eoi eoi = { .irq = pirq_from_irq(data->irq) }; |
---|
734 | 793 | int rc = 0; |
---|
735 | 794 | |
---|
.. | .. |
---|
764 | 823 | { |
---|
765 | 824 | struct evtchn_bind_pirq bind_pirq; |
---|
766 | 825 | struct irq_info *info = info_for_irq(irq); |
---|
767 | | - int evtchn = evtchn_from_irq(irq); |
---|
| 826 | + evtchn_port_t evtchn = evtchn_from_irq(irq); |
---|
768 | 827 | int rc; |
---|
769 | 828 | |
---|
770 | 829 | BUG_ON(info->type != IRQT_PIRQ); |
---|
.. | .. |
---|
792 | 851 | info->evtchn = evtchn; |
---|
793 | 852 | bind_evtchn_to_cpu(evtchn, 0); |
---|
794 | 853 | |
---|
795 | | - rc = xen_evtchn_port_setup(info); |
---|
| 854 | + rc = xen_evtchn_port_setup(evtchn); |
---|
796 | 855 | if (rc) |
---|
797 | 856 | goto err; |
---|
798 | 857 | |
---|
.. | .. |
---|
818 | 877 | { |
---|
819 | 878 | unsigned int irq = data->irq; |
---|
820 | 879 | struct irq_info *info = info_for_irq(irq); |
---|
821 | | - unsigned evtchn = evtchn_from_irq(irq); |
---|
| 880 | + evtchn_port_t evtchn = evtchn_from_irq(irq); |
---|
822 | 881 | |
---|
823 | 882 | BUG_ON(info->type != IRQT_PIRQ); |
---|
824 | 883 | |
---|
.. | .. |
---|
858 | 917 | |
---|
859 | 918 | static void __unbind_from_irq(unsigned int irq) |
---|
860 | 919 | { |
---|
861 | | - int evtchn = evtchn_from_irq(irq); |
---|
| 920 | + evtchn_port_t evtchn = evtchn_from_irq(irq); |
---|
862 | 921 | struct irq_info *info = info_for_irq(irq); |
---|
863 | 922 | |
---|
864 | 923 | if (info->refcnt > 0) { |
---|
.. | .. |
---|
1138 | 1197 | static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu) |
---|
1139 | 1198 | { |
---|
1140 | 1199 | struct evtchn_bind_ipi bind_ipi; |
---|
1141 | | - int evtchn, irq; |
---|
1142 | | - int ret; |
---|
| 1200 | + evtchn_port_t evtchn; |
---|
| 1201 | + int ret, irq; |
---|
1143 | 1202 | |
---|
1144 | 1203 | mutex_lock(&irq_mapping_update_lock); |
---|
1145 | 1204 | |
---|
.. | .. |
---|
1193 | 1252 | chip); |
---|
1194 | 1253 | } |
---|
1195 | 1254 | |
---|
1196 | | -int bind_interdomain_evtchn_to_irq(unsigned int remote_domain, |
---|
1197 | | - evtchn_port_t remote_port) |
---|
1198 | | -{ |
---|
1199 | | - return bind_interdomain_evtchn_to_irq_chip(remote_domain, remote_port, |
---|
1200 | | - &xen_dynamic_chip); |
---|
1201 | | -} |
---|
1202 | | -EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irq); |
---|
1203 | | - |
---|
1204 | 1255 | int bind_interdomain_evtchn_to_irq_lateeoi(unsigned int remote_domain, |
---|
1205 | 1256 | evtchn_port_t remote_port) |
---|
1206 | 1257 | { |
---|
.. | .. |
---|
1209 | 1260 | } |
---|
1210 | 1261 | EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irq_lateeoi); |
---|
1211 | 1262 | |
---|
1212 | | -static int find_virq(unsigned int virq, unsigned int cpu) |
---|
| 1263 | +static int find_virq(unsigned int virq, unsigned int cpu, evtchn_port_t *evtchn) |
---|
1213 | 1264 | { |
---|
1214 | 1265 | struct evtchn_status status; |
---|
1215 | | - int port, rc = -ENOENT; |
---|
| 1266 | + evtchn_port_t port; |
---|
| 1267 | + int rc = -ENOENT; |
---|
1216 | 1268 | |
---|
1217 | 1269 | memset(&status, 0, sizeof(status)); |
---|
1218 | 1270 | for (port = 0; port < xen_evtchn_max_channels(); port++) { |
---|
.. | .. |
---|
1224 | 1276 | if (status.status != EVTCHNSTAT_virq) |
---|
1225 | 1277 | continue; |
---|
1226 | 1278 | if (status.u.virq == virq && status.vcpu == xen_vcpu_nr(cpu)) { |
---|
1227 | | - rc = port; |
---|
| 1279 | + *evtchn = port; |
---|
1228 | 1280 | break; |
---|
1229 | 1281 | } |
---|
1230 | 1282 | } |
---|
.. | .. |
---|
1247 | 1299 | int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu) |
---|
1248 | 1300 | { |
---|
1249 | 1301 | struct evtchn_bind_virq bind_virq; |
---|
1250 | | - int evtchn, irq, ret; |
---|
| 1302 | + evtchn_port_t evtchn = 0; |
---|
| 1303 | + int irq, ret; |
---|
1251 | 1304 | |
---|
1252 | 1305 | mutex_lock(&irq_mapping_update_lock); |
---|
1253 | 1306 | |
---|
.. | .. |
---|
1273 | 1326 | evtchn = bind_virq.port; |
---|
1274 | 1327 | else { |
---|
1275 | 1328 | if (ret == -EEXIST) |
---|
1276 | | - ret = find_virq(virq, cpu); |
---|
| 1329 | + ret = find_virq(virq, cpu, &evtchn); |
---|
1277 | 1330 | BUG_ON(ret < 0); |
---|
1278 | | - evtchn = ret; |
---|
1279 | 1331 | } |
---|
1280 | 1332 | |
---|
1281 | 1333 | ret = xen_irq_info_virq_setup(cpu, irq, evtchn, virq); |
---|
.. | .. |
---|
1367 | 1419 | return irq; |
---|
1368 | 1420 | } |
---|
1369 | 1421 | |
---|
1370 | | -int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain, |
---|
1371 | | - evtchn_port_t remote_port, |
---|
1372 | | - irq_handler_t handler, |
---|
1373 | | - unsigned long irqflags, |
---|
1374 | | - const char *devname, |
---|
1375 | | - void *dev_id) |
---|
1376 | | -{ |
---|
1377 | | - return bind_interdomain_evtchn_to_irqhandler_chip(remote_domain, |
---|
1378 | | - remote_port, handler, irqflags, devname, |
---|
1379 | | - dev_id, &xen_dynamic_chip); |
---|
1380 | | -} |
---|
1381 | | -EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irqhandler); |
---|
1382 | | - |
---|
1383 | 1422 | int bind_interdomain_evtchn_to_irqhandler_lateeoi(unsigned int remote_domain, |
---|
1384 | 1423 | evtchn_port_t remote_port, |
---|
1385 | 1424 | irq_handler_t handler, |
---|
.. | .. |
---|
1463 | 1502 | } |
---|
1464 | 1503 | EXPORT_SYMBOL_GPL(xen_set_irq_priority); |
---|
1465 | 1504 | |
---|
1466 | | -int evtchn_make_refcounted(unsigned int evtchn) |
---|
| 1505 | +int evtchn_make_refcounted(evtchn_port_t evtchn) |
---|
1467 | 1506 | { |
---|
1468 | 1507 | int irq = get_evtchn_to_irq(evtchn); |
---|
1469 | 1508 | struct irq_info *info; |
---|
.. | .. |
---|
1484 | 1523 | } |
---|
1485 | 1524 | EXPORT_SYMBOL_GPL(evtchn_make_refcounted); |
---|
1486 | 1525 | |
---|
1487 | | -int evtchn_get(unsigned int evtchn) |
---|
| 1526 | +int evtchn_get(evtchn_port_t evtchn) |
---|
1488 | 1527 | { |
---|
1489 | 1528 | int irq; |
---|
1490 | 1529 | struct irq_info *info; |
---|
.. | .. |
---|
1517 | 1556 | } |
---|
1518 | 1557 | EXPORT_SYMBOL_GPL(evtchn_get); |
---|
1519 | 1558 | |
---|
1520 | | -void evtchn_put(unsigned int evtchn) |
---|
| 1559 | +void evtchn_put(evtchn_port_t evtchn) |
---|
1521 | 1560 | { |
---|
1522 | 1561 | int irq = get_evtchn_to_irq(evtchn); |
---|
1523 | 1562 | if (WARN_ON(irq == -1)) |
---|
.. | .. |
---|
1594 | 1633 | generic_handle_irq(irq); |
---|
1595 | 1634 | } |
---|
1596 | 1635 | |
---|
1597 | | -static DEFINE_PER_CPU(unsigned, xed_nesting_count); |
---|
1598 | | - |
---|
1599 | 1636 | static void __xen_evtchn_do_upcall(void) |
---|
1600 | 1637 | { |
---|
1601 | 1638 | struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu); |
---|
1602 | | - int cpu = get_cpu(); |
---|
1603 | | - unsigned count; |
---|
| 1639 | + int cpu = smp_processor_id(); |
---|
1604 | 1640 | struct evtchn_loop_ctrl ctrl = { 0 }; |
---|
1605 | 1641 | |
---|
1606 | 1642 | read_lock(&evtchn_rwlock); |
---|
.. | .. |
---|
1608 | 1644 | do { |
---|
1609 | 1645 | vcpu_info->evtchn_upcall_pending = 0; |
---|
1610 | 1646 | |
---|
1611 | | - if (__this_cpu_inc_return(xed_nesting_count) - 1) |
---|
1612 | | - goto out; |
---|
1613 | | - |
---|
1614 | 1647 | xen_evtchn_handle_events(cpu, &ctrl); |
---|
1615 | 1648 | |
---|
1616 | 1649 | BUG_ON(!irqs_disabled()); |
---|
1617 | 1650 | |
---|
1618 | | - count = __this_cpu_read(xed_nesting_count); |
---|
1619 | | - __this_cpu_write(xed_nesting_count, 0); |
---|
1620 | | - } while (count != 1 || vcpu_info->evtchn_upcall_pending); |
---|
| 1651 | + virt_rmb(); /* Hypervisor can set upcall pending. */ |
---|
1621 | 1652 | |
---|
1622 | | -out: |
---|
| 1653 | + } while (vcpu_info->evtchn_upcall_pending); |
---|
| 1654 | + |
---|
1623 | 1655 | read_unlock(&evtchn_rwlock); |
---|
1624 | 1656 | |
---|
1625 | 1657 | /* |
---|
.. | .. |
---|
1628 | 1660 | * above. |
---|
1629 | 1661 | */ |
---|
1630 | 1662 | __this_cpu_inc(irq_epoch); |
---|
1631 | | - |
---|
1632 | | - put_cpu(); |
---|
1633 | 1663 | } |
---|
1634 | 1664 | |
---|
1635 | 1665 | void xen_evtchn_do_upcall(struct pt_regs *regs) |
---|
.. | .. |
---|
1637 | 1667 | struct pt_regs *old_regs = set_irq_regs(regs); |
---|
1638 | 1668 | |
---|
1639 | 1669 | irq_enter(); |
---|
1640 | | -#ifdef CONFIG_X86 |
---|
1641 | | - inc_irq_stat(irq_hv_callback_count); |
---|
1642 | | -#endif |
---|
1643 | 1670 | |
---|
1644 | 1671 | __xen_evtchn_do_upcall(); |
---|
1645 | 1672 | |
---|
.. | .. |
---|
1654 | 1681 | EXPORT_SYMBOL_GPL(xen_hvm_evtchn_do_upcall); |
---|
1655 | 1682 | |
---|
1656 | 1683 | /* Rebind a new event channel to an existing irq. */ |
---|
1657 | | -void rebind_evtchn_irq(int evtchn, int irq) |
---|
| 1684 | +void rebind_evtchn_irq(evtchn_port_t evtchn, int irq) |
---|
1658 | 1685 | { |
---|
1659 | 1686 | struct irq_info *info = info_for_irq(irq); |
---|
1660 | 1687 | |
---|
.. | .. |
---|
1876 | 1903 | static void restore_cpu_virqs(unsigned int cpu) |
---|
1877 | 1904 | { |
---|
1878 | 1905 | struct evtchn_bind_virq bind_virq; |
---|
1879 | | - int virq, irq, evtchn; |
---|
| 1906 | + evtchn_port_t evtchn; |
---|
| 1907 | + int virq, irq; |
---|
1880 | 1908 | |
---|
1881 | 1909 | for (virq = 0; virq < NR_VIRQS; virq++) { |
---|
1882 | 1910 | if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1) |
---|
.. | .. |
---|
1901 | 1929 | static void restore_cpu_ipis(unsigned int cpu) |
---|
1902 | 1930 | { |
---|
1903 | 1931 | struct evtchn_bind_ipi bind_ipi; |
---|
1904 | | - int ipi, irq, evtchn; |
---|
| 1932 | + evtchn_port_t evtchn; |
---|
| 1933 | + int ipi, irq; |
---|
1905 | 1934 | |
---|
1906 | 1935 | for (ipi = 0; ipi < XEN_NR_IPIS; ipi++) { |
---|
1907 | 1936 | if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1) |
---|
.. | .. |
---|
1934 | 1963 | EXPORT_SYMBOL(xen_clear_irq_pending); |
---|
1935 | 1964 | void xen_set_irq_pending(int irq) |
---|
1936 | 1965 | { |
---|
1937 | | - int evtchn = evtchn_from_irq(irq); |
---|
| 1966 | + evtchn_port_t evtchn = evtchn_from_irq(irq); |
---|
1938 | 1967 | |
---|
1939 | 1968 | if (VALID_EVTCHN(evtchn)) |
---|
1940 | 1969 | set_evtchn(evtchn); |
---|
.. | .. |
---|
1942 | 1971 | |
---|
1943 | 1972 | bool xen_test_irq_pending(int irq) |
---|
1944 | 1973 | { |
---|
1945 | | - int evtchn = evtchn_from_irq(irq); |
---|
| 1974 | + evtchn_port_t evtchn = evtchn_from_irq(irq); |
---|
1946 | 1975 | bool ret = false; |
---|
1947 | 1976 | |
---|
1948 | 1977 | if (VALID_EVTCHN(evtchn)) |
---|
.. | .. |
---|
2078 | 2107 | /* Vector callbacks are better than PCI interrupts to receive event |
---|
2079 | 2108 | * channel notifications because we can receive vector callbacks on any |
---|
2080 | 2109 | * vcpu and we don't need PCI support or APIC interactions. */ |
---|
2081 | | -void xen_callback_vector(void) |
---|
| 2110 | +void xen_setup_callback_vector(void) |
---|
2082 | 2111 | { |
---|
2083 | | - int rc; |
---|
2084 | 2112 | uint64_t callback_via; |
---|
2085 | 2113 | |
---|
2086 | 2114 | if (xen_have_vector_callback) { |
---|
2087 | 2115 | callback_via = HVM_CALLBACK_VECTOR(HYPERVISOR_CALLBACK_VECTOR); |
---|
2088 | | - rc = xen_set_callback_via(callback_via); |
---|
2089 | | - if (rc) { |
---|
| 2116 | + if (xen_set_callback_via(callback_via)) { |
---|
2090 | 2117 | pr_err("Request for Xen HVM callback vector failed\n"); |
---|
2091 | 2118 | xen_have_vector_callback = 0; |
---|
2092 | | - return; |
---|
2093 | 2119 | } |
---|
2094 | | - pr_info_once("Xen HVM callback vector for event delivery is enabled\n"); |
---|
2095 | | - alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, |
---|
2096 | | - xen_hvm_callback_vector); |
---|
2097 | 2120 | } |
---|
2098 | 2121 | } |
---|
| 2122 | + |
---|
| 2123 | +static __init void xen_alloc_callback_vector(void) |
---|
| 2124 | +{ |
---|
| 2125 | + if (!xen_have_vector_callback) |
---|
| 2126 | + return; |
---|
| 2127 | + |
---|
| 2128 | + pr_info("Xen HVM callback vector for event delivery is enabled\n"); |
---|
| 2129 | + alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, asm_sysvec_xen_hvm_callback); |
---|
| 2130 | +} |
---|
2099 | 2131 | #else |
---|
2100 | | -void xen_callback_vector(void) {} |
---|
| 2132 | +void xen_setup_callback_vector(void) {} |
---|
| 2133 | +static inline void xen_alloc_callback_vector(void) {} |
---|
2101 | 2134 | #endif |
---|
2102 | 2135 | |
---|
2103 | | -static bool fifo_events = true; |
---|
2104 | | -module_param(fifo_events, bool, 0); |
---|
| 2136 | +bool xen_fifo_events = true; |
---|
| 2137 | +module_param_named(fifo_events, xen_fifo_events, bool, 0); |
---|
2105 | 2138 | |
---|
2106 | 2139 | static int xen_evtchn_cpu_prepare(unsigned int cpu) |
---|
2107 | 2140 | { |
---|
.. | .. |
---|
2128 | 2161 | void __init xen_init_IRQ(void) |
---|
2129 | 2162 | { |
---|
2130 | 2163 | int ret = -EINVAL; |
---|
2131 | | - unsigned int evtchn; |
---|
| 2164 | + evtchn_port_t evtchn; |
---|
2132 | 2165 | |
---|
2133 | | - if (fifo_events) |
---|
| 2166 | + if (xen_fifo_events) |
---|
2134 | 2167 | ret = xen_evtchn_fifo_init(); |
---|
2135 | | - if (ret < 0) |
---|
| 2168 | + if (ret < 0) { |
---|
2136 | 2169 | xen_evtchn_2l_init(); |
---|
| 2170 | + xen_fifo_events = false; |
---|
| 2171 | + } |
---|
2137 | 2172 | |
---|
2138 | 2173 | xen_cpu_init_eoi(smp_processor_id()); |
---|
2139 | 2174 | |
---|
.. | .. |
---|
2153 | 2188 | |
---|
2154 | 2189 | #ifdef CONFIG_X86 |
---|
2155 | 2190 | if (xen_pv_domain()) { |
---|
2156 | | - irq_ctx_init(smp_processor_id()); |
---|
2157 | 2191 | if (xen_initial_domain()) |
---|
2158 | 2192 | pci_xen_initial_domain(); |
---|
2159 | 2193 | } |
---|
2160 | | - if (xen_feature(XENFEAT_hvm_callback_vector)) |
---|
2161 | | - xen_callback_vector(); |
---|
| 2194 | + if (xen_feature(XENFEAT_hvm_callback_vector)) { |
---|
| 2195 | + xen_setup_callback_vector(); |
---|
| 2196 | + xen_alloc_callback_vector(); |
---|
| 2197 | + } |
---|
2162 | 2198 | |
---|
2163 | 2199 | if (xen_hvm_domain()) { |
---|
2164 | 2200 | native_init_IRQ(); |
---|