hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/arch/powerpc/include/asm/xive.h
....@@ -1,13 +1,11 @@
1
+/* SPDX-License-Identifier: GPL-2.0-or-later */
12 /*
23 * Copyright 2016,2017 IBM Corporation.
3
- *
4
- * This program is free software; you can redistribute it and/or
5
- * modify it under the terms of the GNU General Public License
6
- * as published by the Free Software Foundation; either version
7
- * 2 of the License, or (at your option) any later version.
84 */
95 #ifndef _ASM_POWERPC_XIVE_H
106 #define _ASM_POWERPC_XIVE_H
7
+
8
+#include <asm/opal-api.h>
119
1210 #define XIVE_INVALID_VP 0xffffffff
1311
....@@ -23,6 +21,7 @@
2321 * same offset regardless of where the code is executing
2422 */
2523 extern void __iomem *xive_tima;
24
+extern unsigned long xive_tima_os;
2625
2726 /*
2827 * Offset in the TM area of our current execution level (provided by
....@@ -49,7 +48,15 @@
4948
5049 /* Setup/used by frontend */
5150 int target;
51
+ /*
52
+ * saved_p means that there is a queue entry for this interrupt
53
+ * in some CPU's queue (not including guest vcpu queues), even
54
+ * if P is not set in the source ESB.
55
+ * stale_p means that there is no queue entry for this interrupt
56
+ * in some CPU's queue, even if P is set in the source ESB.
57
+ */
5258 bool saved_p;
59
+ bool stale_p;
5360 };
5461 #define XIVE_IRQ_FLAG_STORE_EOI 0x01
5562 #define XIVE_IRQ_FLAG_LSI 0x02
....@@ -73,6 +80,8 @@
7380 u32 esc_irq;
7481 atomic_t count;
7582 atomic_t pending_count;
83
+ u64 guest_qaddr;
84
+ u32 guest_qshift;
7685 };
7786
7887 /* Global enable flags for the XIVE support */
....@@ -80,40 +89,61 @@
8089
8190 static inline bool xive_enabled(void) { return __xive_enabled; }
8291
83
-extern bool xive_spapr_init(void);
84
-extern bool xive_native_init(void);
85
-extern void xive_smp_probe(void);
86
-extern int xive_smp_prepare_cpu(unsigned int cpu);
87
-extern void xive_smp_setup_cpu(void);
88
-extern void xive_smp_disable_cpu(void);
89
-extern void xive_teardown_cpu(void);
90
-extern void xive_shutdown(void);
91
-extern void xive_flush_interrupt(void);
92
+bool xive_spapr_init(void);
93
+bool xive_native_init(void);
94
+void xive_smp_probe(void);
95
+int xive_smp_prepare_cpu(unsigned int cpu);
96
+void xive_smp_setup_cpu(void);
97
+void xive_smp_disable_cpu(void);
98
+void xive_teardown_cpu(void);
99
+void xive_shutdown(void);
100
+void xive_flush_interrupt(void);
92101
93102 /* xmon hook */
94
-extern void xmon_xive_do_dump(int cpu);
103
+void xmon_xive_do_dump(int cpu);
104
+int xmon_xive_get_irq_config(u32 hw_irq, struct irq_data *d);
95105
96106 /* APIs used by KVM */
97
-extern u32 xive_native_default_eq_shift(void);
98
-extern u32 xive_native_alloc_vp_block(u32 max_vcpus);
99
-extern void xive_native_free_vp_block(u32 vp_base);
100
-extern int xive_native_populate_irq_data(u32 hw_irq,
101
- struct xive_irq_data *data);
102
-extern void xive_cleanup_irq_data(struct xive_irq_data *xd);
103
-extern u32 xive_native_alloc_irq(void);
104
-extern void xive_native_free_irq(u32 irq);
105
-extern int xive_native_configure_irq(u32 hw_irq, u32 target, u8 prio, u32 sw_irq);
107
+u32 xive_native_default_eq_shift(void);
108
+u32 xive_native_alloc_vp_block(u32 max_vcpus);
109
+void xive_native_free_vp_block(u32 vp_base);
110
+int xive_native_populate_irq_data(u32 hw_irq,
111
+ struct xive_irq_data *data);
112
+void xive_cleanup_irq_data(struct xive_irq_data *xd);
113
+void xive_native_free_irq(u32 irq);
114
+int xive_native_configure_irq(u32 hw_irq, u32 target, u8 prio, u32 sw_irq);
106115
107
-extern int xive_native_configure_queue(u32 vp_id, struct xive_q *q, u8 prio,
108
- __be32 *qpage, u32 order, bool can_escalate);
109
-extern void xive_native_disable_queue(u32 vp_id, struct xive_q *q, u8 prio);
116
+int xive_native_configure_queue(u32 vp_id, struct xive_q *q, u8 prio,
117
+ __be32 *qpage, u32 order, bool can_escalate);
118
+void xive_native_disable_queue(u32 vp_id, struct xive_q *q, u8 prio);
110119
111
-extern void xive_native_sync_source(u32 hw_irq);
112
-extern bool is_xive_irq(struct irq_chip *chip);
113
-extern int xive_native_enable_vp(u32 vp_id, bool single_escalation);
114
-extern int xive_native_disable_vp(u32 vp_id);
115
-extern int xive_native_get_vp_info(u32 vp_id, u32 *out_cam_id, u32 *out_chip_id);
116
-extern bool xive_native_has_single_escalation(void);
120
+void xive_native_sync_source(u32 hw_irq);
121
+void xive_native_sync_queue(u32 hw_irq);
122
+bool is_xive_irq(struct irq_chip *chip);
123
+int xive_native_enable_vp(u32 vp_id, bool single_escalation);
124
+int xive_native_disable_vp(u32 vp_id);
125
+int xive_native_get_vp_info(u32 vp_id, u32 *out_cam_id, u32 *out_chip_id);
126
+bool xive_native_has_single_escalation(void);
127
+
128
+int xive_native_get_queue_info(u32 vp_id, uint32_t prio,
129
+ u64 *out_qpage,
130
+ u64 *out_qsize,
131
+ u64 *out_qeoi_page,
132
+ u32 *out_escalate_irq,
133
+ u64 *out_qflags);
134
+
135
+int xive_native_get_queue_state(u32 vp_id, uint32_t prio, u32 *qtoggle,
136
+ u32 *qindex);
137
+int xive_native_set_queue_state(u32 vp_id, uint32_t prio, u32 qtoggle,
138
+ u32 qindex);
139
+int xive_native_get_vp_state(u32 vp_id, u64 *out_state);
140
+bool xive_native_has_queue_state_support(void);
141
+extern u32 xive_native_alloc_irq_on_chip(u32 chip_id);
142
+
143
+static inline u32 xive_native_alloc_irq(void)
144
+{
145
+ return xive_native_alloc_irq_on_chip(OPAL_XIVE_ANY_CHIP);
146
+}
117147
118148 #else
119149
....@@ -125,7 +155,6 @@
125155 static inline int xive_smp_prepare_cpu(unsigned int cpu) { return -EINVAL; }
126156 static inline void xive_smp_setup_cpu(void) { }
127157 static inline void xive_smp_disable_cpu(void) { }
128
-static inline void xive_kexec_teardown_cpu(int secondary) { }
129158 static inline void xive_shutdown(void) { }
130159 static inline void xive_flush_interrupt(void) { }
131160