.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* |
---|
2 | 3 | * Copyright 2017 Benjamin Herrenschmidt, IBM Corporation. |
---|
3 | | - * |
---|
4 | | - * This program is free software; you can redistribute it and/or modify |
---|
5 | | - * it under the terms of the GNU General Public License, version 2, as |
---|
6 | | - * published by the Free Software Foundation. |
---|
7 | 4 | */ |
---|
8 | 5 | |
---|
9 | 6 | #define pr_fmt(fmt) "xive-kvm: " fmt |
---|
.. | .. |
---|
62 | 59 | #define XIVE_Q_GAP 2 |
---|
63 | 60 | |
---|
64 | 61 | /* |
---|
| 62 | + * Push a vcpu's context to the XIVE on guest entry. |
---|
| 63 | + * This assumes we are in virtual mode (MMU on) |
---|
| 64 | + */ |
---|
| 65 | +void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu) |
---|
| 66 | +{ |
---|
| 67 | + void __iomem *tima = local_paca->kvm_hstate.xive_tima_virt; |
---|
| 68 | + u64 pq; |
---|
| 69 | + |
---|
| 70 | + /* |
---|
| 71 | + * Nothing to do if the platform doesn't have a XIVE |
---|
| 72 | + * or this vCPU doesn't have its own XIVE context |
---|
| 73 | + * (e.g. because it's not using an in-kernel interrupt controller). |
---|
| 74 | + */ |
---|
| 75 | + if (!tima || !vcpu->arch.xive_cam_word) |
---|
| 76 | + return; |
---|
| 77 | + |
---|
| 78 | + eieio(); |
---|
| 79 | + __raw_writeq(vcpu->arch.xive_saved_state.w01, tima + TM_QW1_OS); |
---|
| 80 | + __raw_writel(vcpu->arch.xive_cam_word, tima + TM_QW1_OS + TM_WORD2); |
---|
| 81 | + vcpu->arch.xive_pushed = 1; |
---|
| 82 | + eieio(); |
---|
| 83 | + |
---|
| 84 | + /* |
---|
| 85 | + * We clear the irq_pending flag. There is a small chance of a |
---|
| 86 | + * race vs. the escalation interrupt happening on another |
---|
| 87 | + * processor setting it again, but the only consequence is to |
---|
| 88 | + * cause a spurious wakeup on the next H_CEDE, which is not an |
---|
| 89 | + * issue. |
---|
| 90 | + */ |
---|
| 91 | + vcpu->arch.irq_pending = 0; |
---|
| 92 | + |
---|
| 93 | + /* |
---|
| 94 | + * In single escalation mode, if the escalation interrupt is |
---|
| 95 | + * on, we mask it. |
---|
| 96 | + */ |
---|
| 97 | + if (vcpu->arch.xive_esc_on) { |
---|
| 98 | + pq = __raw_readq((void __iomem *)(vcpu->arch.xive_esc_vaddr + |
---|
| 99 | + XIVE_ESB_SET_PQ_01)); |
---|
| 100 | + mb(); |
---|
| 101 | + |
---|
| 102 | + /* |
---|
| 103 | + * We have a possible subtle race here: The escalation |
---|
| 104 | + * interrupt might have fired and be on its way to the |
---|
| 105 | + * host queue while we mask it, and if we unmask it |
---|
| 106 | + * early enough (re-cede right away), there is a |
---|
| 107 | + * theorical possibility that it fires again, thus |
---|
| 108 | + * landing in the target queue more than once which is |
---|
| 109 | + * a big no-no. |
---|
| 110 | + * |
---|
| 111 | + * Fortunately, solving this is rather easy. If the |
---|
| 112 | + * above load setting PQ to 01 returns a previous |
---|
| 113 | + * value where P is set, then we know the escalation |
---|
| 114 | + * interrupt is somewhere on its way to the host. In |
---|
| 115 | + * that case we simply don't clear the xive_esc_on |
---|
| 116 | + * flag below. It will be eventually cleared by the |
---|
| 117 | + * handler for the escalation interrupt. |
---|
| 118 | + * |
---|
| 119 | + * Then, when doing a cede, we check that flag again |
---|
| 120 | + * before re-enabling the escalation interrupt, and if |
---|
| 121 | + * set, we abort the cede. |
---|
| 122 | + */ |
---|
| 123 | + if (!(pq & XIVE_ESB_VAL_P)) |
---|
| 124 | + /* Now P is 0, we can clear the flag */ |
---|
| 125 | + vcpu->arch.xive_esc_on = 0; |
---|
| 126 | + } |
---|
| 127 | +} |
---|
| 128 | +EXPORT_SYMBOL_GPL(kvmppc_xive_push_vcpu); |
---|
| 129 | + |
---|
| 130 | +/* |
---|
65 | 131 | * This is a simple trigger for a generic XIVE IRQ. This must |
---|
66 | 132 | * only be called for interrupts that support a trigger page |
---|
67 | 133 | */ |
---|
.. | .. |
---|
100 | 166 | */ |
---|
101 | 167 | vcpu->arch.xive_esc_on = false; |
---|
102 | 168 | |
---|
| 169 | + /* This orders xive_esc_on = false vs. subsequent stale_p = true */ |
---|
| 170 | + smp_wmb(); /* goes with smp_mb() in cleanup_single_escalation */ |
---|
| 171 | + |
---|
103 | 172 | return IRQ_HANDLED; |
---|
104 | 173 | } |
---|
105 | 174 | |
---|
106 | | -static int xive_attach_escalation(struct kvm_vcpu *vcpu, u8 prio) |
---|
| 175 | +int kvmppc_xive_attach_escalation(struct kvm_vcpu *vcpu, u8 prio, |
---|
| 176 | + bool single_escalation) |
---|
107 | 177 | { |
---|
108 | 178 | struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; |
---|
109 | 179 | struct xive_q *q = &xc->queues[prio]; |
---|
.. | .. |
---|
122 | 192 | return -EIO; |
---|
123 | 193 | } |
---|
124 | 194 | |
---|
125 | | - if (xc->xive->single_escalation) |
---|
| 195 | + if (single_escalation) |
---|
126 | 196 | name = kasprintf(GFP_KERNEL, "kvm-%d-%d", |
---|
127 | 197 | vcpu->kvm->arch.lpid, xc->server_num); |
---|
128 | 198 | else |
---|
.. | .. |
---|
154 | 224 | * interrupt, thus leaving it effectively masked after |
---|
155 | 225 | * it fires once. |
---|
156 | 226 | */ |
---|
157 | | - if (xc->xive->single_escalation) { |
---|
| 227 | + if (single_escalation) { |
---|
158 | 228 | struct irq_data *d = irq_get_irq_data(xc->esc_virq[prio]); |
---|
159 | 229 | struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); |
---|
160 | 230 | |
---|
.. | .. |
---|
207 | 277 | return rc; |
---|
208 | 278 | } |
---|
209 | 279 | |
---|
210 | | -/* Called with kvm_lock held */ |
---|
| 280 | +/* Called with xive->lock held */ |
---|
211 | 281 | static int xive_check_provisioning(struct kvm *kvm, u8 prio) |
---|
212 | 282 | { |
---|
213 | 283 | struct kvmppc_xive *xive = kvm->arch.xive; |
---|
214 | 284 | struct kvm_vcpu *vcpu; |
---|
215 | 285 | int i, rc; |
---|
216 | 286 | |
---|
217 | | - lockdep_assert_held(&kvm->lock); |
---|
| 287 | + lockdep_assert_held(&xive->lock); |
---|
218 | 288 | |
---|
219 | 289 | /* Already provisioned ? */ |
---|
220 | 290 | if (xive->qmap & (1 << prio)) |
---|
.. | .. |
---|
228 | 298 | continue; |
---|
229 | 299 | rc = xive_provision_queue(vcpu, prio); |
---|
230 | 300 | if (rc == 0 && !xive->single_escalation) |
---|
231 | | - xive_attach_escalation(vcpu, prio); |
---|
| 301 | + kvmppc_xive_attach_escalation(vcpu, prio, |
---|
| 302 | + xive->single_escalation); |
---|
232 | 303 | if (rc) |
---|
233 | 304 | return rc; |
---|
234 | 305 | } |
---|
.. | .. |
---|
279 | 350 | return atomic_add_unless(&q->count, 1, max) ? 0 : -EBUSY; |
---|
280 | 351 | } |
---|
281 | 352 | |
---|
282 | | -static int xive_select_target(struct kvm *kvm, u32 *server, u8 prio) |
---|
| 353 | +int kvmppc_xive_select_target(struct kvm *kvm, u32 *server, u8 prio) |
---|
283 | 354 | { |
---|
284 | 355 | struct kvm_vcpu *vcpu; |
---|
285 | 356 | int i, rc; |
---|
.. | .. |
---|
315 | 386 | |
---|
316 | 387 | /* No available target ! */ |
---|
317 | 388 | return -EBUSY; |
---|
318 | | -} |
---|
319 | | - |
---|
320 | | -static u32 xive_vp(struct kvmppc_xive *xive, u32 server) |
---|
321 | | -{ |
---|
322 | | - return xive->vp_base + kvmppc_pack_vcpu_id(xive->kvm, server); |
---|
323 | 389 | } |
---|
324 | 390 | |
---|
325 | 391 | static u8 xive_lock_and_mask(struct kvmppc_xive *xive, |
---|
.. | .. |
---|
367 | 433 | */ |
---|
368 | 434 | if (xd->flags & OPAL_XIVE_IRQ_MASK_VIA_FW) { |
---|
369 | 435 | xive_native_configure_irq(hw_num, |
---|
370 | | - xive_vp(xive, state->act_server), |
---|
371 | | - MASKED, state->number); |
---|
| 436 | + kvmppc_xive_vp(xive, state->act_server), |
---|
| 437 | + MASKED, state->number); |
---|
372 | 438 | /* set old_p so we can track if an H_EOI was done */ |
---|
373 | 439 | state->old_p = true; |
---|
374 | 440 | state->old_q = false; |
---|
.. | .. |
---|
418 | 484 | kvmppc_xive_select_irq(state, &hw_num, &xd); |
---|
419 | 485 | |
---|
420 | 486 | /* |
---|
421 | | - * See command in xive_lock_and_mask() concerning masking |
---|
| 487 | + * See comment in xive_lock_and_mask() concerning masking |
---|
422 | 488 | * via firmware. |
---|
423 | 489 | */ |
---|
424 | 490 | if (xd->flags & OPAL_XIVE_IRQ_MASK_VIA_FW) { |
---|
425 | 491 | xive_native_configure_irq(hw_num, |
---|
426 | | - xive_vp(xive, state->act_server), |
---|
427 | | - state->act_priority, state->number); |
---|
| 492 | + kvmppc_xive_vp(xive, state->act_server), |
---|
| 493 | + state->act_priority, state->number); |
---|
428 | 494 | /* If an EOI is needed, do it here */ |
---|
429 | 495 | if (!state->old_p) |
---|
430 | 496 | xive_vm_source_eoi(hw_num, xd); |
---|
.. | .. |
---|
472 | 538 | * priority. The count for that new target will have |
---|
473 | 539 | * already been incremented. |
---|
474 | 540 | */ |
---|
475 | | - rc = xive_select_target(kvm, &server, prio); |
---|
| 541 | + rc = kvmppc_xive_select_target(kvm, &server, prio); |
---|
476 | 542 | |
---|
477 | 543 | /* |
---|
478 | 544 | * We failed to find a target ? Not much we can do |
---|
.. | .. |
---|
500 | 566 | kvmppc_xive_select_irq(state, &hw_num, NULL); |
---|
501 | 567 | |
---|
502 | 568 | return xive_native_configure_irq(hw_num, |
---|
503 | | - xive_vp(xive, server), |
---|
| 569 | + kvmppc_xive_vp(xive, server), |
---|
504 | 570 | prio, state->number); |
---|
505 | 571 | } |
---|
506 | 572 | |
---|
.. | .. |
---|
561 | 627 | irq, server, priority); |
---|
562 | 628 | |
---|
563 | 629 | /* First, check provisioning of queues */ |
---|
564 | | - if (priority != MASKED) |
---|
| 630 | + if (priority != MASKED) { |
---|
| 631 | + mutex_lock(&xive->lock); |
---|
565 | 632 | rc = xive_check_provisioning(xive->kvm, |
---|
566 | 633 | xive_prio_from_guest(priority)); |
---|
| 634 | + mutex_unlock(&xive->lock); |
---|
| 635 | + } |
---|
567 | 636 | if (rc) { |
---|
568 | 637 | pr_devel(" provisioning failure %d !\n", rc); |
---|
569 | 638 | return rc; |
---|
.. | .. |
---|
786 | 855 | |
---|
787 | 856 | /* |
---|
788 | 857 | * We can't update the state of a "pushed" VCPU, but that |
---|
789 | | - * shouldn't happen. |
---|
| 858 | + * shouldn't happen because the vcpu->mutex makes running a |
---|
| 859 | + * vcpu mutually exclusive with doing one_reg get/set on it. |
---|
790 | 860 | */ |
---|
791 | 861 | if (WARN_ON(vcpu->arch.xive_pushed)) |
---|
792 | 862 | return -EIO; |
---|
.. | .. |
---|
877 | 947 | /* Turn the IPI hard off */ |
---|
878 | 948 | xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_01); |
---|
879 | 949 | |
---|
| 950 | + /* |
---|
| 951 | + * Reset ESB guest mapping. Needed when ESB pages are exposed |
---|
| 952 | + * to the guest in XIVE native mode |
---|
| 953 | + */ |
---|
| 954 | + if (xive->ops && xive->ops->reset_mapped) |
---|
| 955 | + xive->ops->reset_mapped(kvm, guest_irq); |
---|
| 956 | + |
---|
880 | 957 | /* Grab info about irq */ |
---|
881 | 958 | state->pt_number = hw_irq; |
---|
882 | 959 | state->pt_data = irq_data_get_irq_handler_data(host_data); |
---|
.. | .. |
---|
888 | 965 | * which is fine for a never started interrupt. |
---|
889 | 966 | */ |
---|
890 | 967 | xive_native_configure_irq(hw_irq, |
---|
891 | | - xive_vp(xive, state->act_server), |
---|
| 968 | + kvmppc_xive_vp(xive, state->act_server), |
---|
892 | 969 | state->act_priority, state->number); |
---|
893 | 970 | |
---|
894 | 971 | /* |
---|
.. | .. |
---|
962 | 1039 | state->pt_number = 0; |
---|
963 | 1040 | state->pt_data = NULL; |
---|
964 | 1041 | |
---|
| 1042 | + /* |
---|
| 1043 | + * Reset ESB guest mapping. Needed when ESB pages are exposed |
---|
| 1044 | + * to the guest in XIVE native mode |
---|
| 1045 | + */ |
---|
| 1046 | + if (xive->ops && xive->ops->reset_mapped) { |
---|
| 1047 | + xive->ops->reset_mapped(kvm, guest_irq); |
---|
| 1048 | + } |
---|
| 1049 | + |
---|
965 | 1050 | /* Reconfigure the IPI */ |
---|
966 | 1051 | xive_native_configure_irq(state->ipi_number, |
---|
967 | | - xive_vp(xive, state->act_server), |
---|
| 1052 | + kvmppc_xive_vp(xive, state->act_server), |
---|
968 | 1053 | state->act_priority, state->number); |
---|
969 | 1054 | |
---|
970 | 1055 | /* |
---|
.. | .. |
---|
986 | 1071 | } |
---|
987 | 1072 | EXPORT_SYMBOL_GPL(kvmppc_xive_clr_mapped); |
---|
988 | 1073 | |
---|
989 | | -static void kvmppc_xive_disable_vcpu_interrupts(struct kvm_vcpu *vcpu) |
---|
| 1074 | +void kvmppc_xive_disable_vcpu_interrupts(struct kvm_vcpu *vcpu) |
---|
990 | 1075 | { |
---|
991 | 1076 | struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; |
---|
992 | 1077 | struct kvm *kvm = vcpu->kvm; |
---|
.. | .. |
---|
1020 | 1105 | arch_spin_unlock(&sb->lock); |
---|
1021 | 1106 | } |
---|
1022 | 1107 | } |
---|
| 1108 | + |
---|
| 1109 | + /* Disable vcpu's escalation interrupt */ |
---|
| 1110 | + if (vcpu->arch.xive_esc_on) { |
---|
| 1111 | + __raw_readq((void __iomem *)(vcpu->arch.xive_esc_vaddr + |
---|
| 1112 | + XIVE_ESB_SET_PQ_01)); |
---|
| 1113 | + vcpu->arch.xive_esc_on = false; |
---|
| 1114 | + } |
---|
| 1115 | + |
---|
| 1116 | + /* |
---|
| 1117 | + * Clear pointers to escalation interrupt ESB. |
---|
| 1118 | + * This is safe because the vcpu->mutex is held, preventing |
---|
| 1119 | + * any other CPU from concurrently executing a KVM_RUN ioctl. |
---|
| 1120 | + */ |
---|
| 1121 | + vcpu->arch.xive_esc_vaddr = 0; |
---|
| 1122 | + vcpu->arch.xive_esc_raddr = 0; |
---|
| 1123 | +} |
---|
| 1124 | + |
---|
| 1125 | +/* |
---|
| 1126 | + * In single escalation mode, the escalation interrupt is marked so |
---|
| 1127 | + * that EOI doesn't re-enable it, but just sets the stale_p flag to |
---|
| 1128 | + * indicate that the P bit has already been dealt with. However, the |
---|
| 1129 | + * assembly code that enters the guest sets PQ to 00 without clearing |
---|
| 1130 | + * stale_p (because it has no easy way to address it). Hence we have |
---|
| 1131 | + * to adjust stale_p before shutting down the interrupt. |
---|
| 1132 | + */ |
---|
| 1133 | +void xive_cleanup_single_escalation(struct kvm_vcpu *vcpu, |
---|
| 1134 | + struct kvmppc_xive_vcpu *xc, int irq) |
---|
| 1135 | +{ |
---|
| 1136 | + struct irq_data *d = irq_get_irq_data(irq); |
---|
| 1137 | + struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); |
---|
| 1138 | + |
---|
| 1139 | + /* |
---|
| 1140 | + * This slightly odd sequence gives the right result |
---|
| 1141 | + * (i.e. stale_p set if xive_esc_on is false) even if |
---|
| 1142 | + * we race with xive_esc_irq() and xive_irq_eoi(). |
---|
| 1143 | + */ |
---|
| 1144 | + xd->stale_p = false; |
---|
| 1145 | + smp_mb(); /* paired with smb_wmb in xive_esc_irq */ |
---|
| 1146 | + if (!vcpu->arch.xive_esc_on) |
---|
| 1147 | + xd->stale_p = true; |
---|
1023 | 1148 | } |
---|
1024 | 1149 | |
---|
1025 | 1150 | void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu) |
---|
1026 | 1151 | { |
---|
1027 | 1152 | struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; |
---|
1028 | | - struct kvmppc_xive *xive = xc->xive; |
---|
| 1153 | + struct kvmppc_xive *xive = vcpu->kvm->arch.xive; |
---|
1029 | 1154 | int i; |
---|
| 1155 | + |
---|
| 1156 | + if (!kvmppc_xics_enabled(vcpu)) |
---|
| 1157 | + return; |
---|
| 1158 | + |
---|
| 1159 | + if (!xc) |
---|
| 1160 | + return; |
---|
1030 | 1161 | |
---|
1031 | 1162 | pr_devel("cleanup_vcpu(cpu=%d)\n", xc->server_num); |
---|
1032 | 1163 | |
---|
.. | .. |
---|
1040 | 1171 | /* Free escalations */ |
---|
1041 | 1172 | for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) { |
---|
1042 | 1173 | if (xc->esc_virq[i]) { |
---|
| 1174 | + if (xc->xive->single_escalation) |
---|
| 1175 | + xive_cleanup_single_escalation(vcpu, xc, |
---|
| 1176 | + xc->esc_virq[i]); |
---|
1043 | 1177 | free_irq(xc->esc_virq[i], vcpu); |
---|
1044 | 1178 | irq_dispose_mapping(xc->esc_virq[i]); |
---|
1045 | 1179 | kfree(xc->esc_virq_names[i]); |
---|
.. | .. |
---|
1048 | 1182 | |
---|
1049 | 1183 | /* Disable the VP */ |
---|
1050 | 1184 | xive_native_disable_vp(xc->vp_id); |
---|
| 1185 | + |
---|
| 1186 | + /* Clear the cam word so guest entry won't try to push context */ |
---|
| 1187 | + vcpu->arch.xive_cam_word = 0; |
---|
1051 | 1188 | |
---|
1052 | 1189 | /* Free the queues */ |
---|
1053 | 1190 | for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) { |
---|
.. | .. |
---|
1068 | 1205 | } |
---|
1069 | 1206 | /* Free the VP */ |
---|
1070 | 1207 | kfree(xc); |
---|
| 1208 | + |
---|
| 1209 | + /* Cleanup the vcpu */ |
---|
| 1210 | + vcpu->arch.irq_type = KVMPPC_IRQ_DEFAULT; |
---|
| 1211 | + vcpu->arch.xive_vcpu = NULL; |
---|
| 1212 | +} |
---|
| 1213 | + |
---|
| 1214 | +static bool kvmppc_xive_vcpu_id_valid(struct kvmppc_xive *xive, u32 cpu) |
---|
| 1215 | +{ |
---|
| 1216 | + /* We have a block of xive->nr_servers VPs. We just need to check |
---|
| 1217 | + * packed vCPU ids are below that. |
---|
| 1218 | + */ |
---|
| 1219 | + return kvmppc_pack_vcpu_id(xive->kvm, cpu) < xive->nr_servers; |
---|
| 1220 | +} |
---|
| 1221 | + |
---|
| 1222 | +int kvmppc_xive_compute_vp_id(struct kvmppc_xive *xive, u32 cpu, u32 *vp) |
---|
| 1223 | +{ |
---|
| 1224 | + u32 vp_id; |
---|
| 1225 | + |
---|
| 1226 | + if (!kvmppc_xive_vcpu_id_valid(xive, cpu)) { |
---|
| 1227 | + pr_devel("Out of bounds !\n"); |
---|
| 1228 | + return -EINVAL; |
---|
| 1229 | + } |
---|
| 1230 | + |
---|
| 1231 | + if (xive->vp_base == XIVE_INVALID_VP) { |
---|
| 1232 | + xive->vp_base = xive_native_alloc_vp_block(xive->nr_servers); |
---|
| 1233 | + pr_devel("VP_Base=%x nr_servers=%d\n", xive->vp_base, xive->nr_servers); |
---|
| 1234 | + |
---|
| 1235 | + if (xive->vp_base == XIVE_INVALID_VP) |
---|
| 1236 | + return -ENOSPC; |
---|
| 1237 | + } |
---|
| 1238 | + |
---|
| 1239 | + vp_id = kvmppc_xive_vp(xive, cpu); |
---|
| 1240 | + if (kvmppc_xive_vp_in_use(xive->kvm, vp_id)) { |
---|
| 1241 | + pr_devel("Duplicate !\n"); |
---|
| 1242 | + return -EEXIST; |
---|
| 1243 | + } |
---|
| 1244 | + |
---|
| 1245 | + *vp = vp_id; |
---|
| 1246 | + |
---|
| 1247 | + return 0; |
---|
1071 | 1248 | } |
---|
1072 | 1249 | |
---|
1073 | 1250 | int kvmppc_xive_connect_vcpu(struct kvm_device *dev, |
---|
.. | .. |
---|
1076 | 1253 | struct kvmppc_xive *xive = dev->private; |
---|
1077 | 1254 | struct kvmppc_xive_vcpu *xc; |
---|
1078 | 1255 | int i, r = -EBUSY; |
---|
| 1256 | + u32 vp_id; |
---|
1079 | 1257 | |
---|
1080 | 1258 | pr_devel("connect_vcpu(cpu=%d)\n", cpu); |
---|
1081 | 1259 | |
---|
.. | .. |
---|
1085 | 1263 | } |
---|
1086 | 1264 | if (xive->kvm != vcpu->kvm) |
---|
1087 | 1265 | return -EPERM; |
---|
1088 | | - if (vcpu->arch.irq_type) |
---|
| 1266 | + if (vcpu->arch.irq_type != KVMPPC_IRQ_DEFAULT) |
---|
1089 | 1267 | return -EBUSY; |
---|
1090 | | - if (kvmppc_xive_find_server(vcpu->kvm, cpu)) { |
---|
1091 | | - pr_devel("Duplicate !\n"); |
---|
1092 | | - return -EEXIST; |
---|
1093 | | - } |
---|
1094 | | - if (cpu >= (KVM_MAX_VCPUS * vcpu->kvm->arch.emul_smt_mode)) { |
---|
1095 | | - pr_devel("Out of bounds !\n"); |
---|
1096 | | - return -EINVAL; |
---|
1097 | | - } |
---|
1098 | | - xc = kzalloc(sizeof(*xc), GFP_KERNEL); |
---|
1099 | | - if (!xc) |
---|
1100 | | - return -ENOMEM; |
---|
1101 | 1268 | |
---|
1102 | 1269 | /* We need to synchronize with queue provisioning */ |
---|
1103 | | - mutex_lock(&vcpu->kvm->lock); |
---|
| 1270 | + mutex_lock(&xive->lock); |
---|
| 1271 | + |
---|
| 1272 | + r = kvmppc_xive_compute_vp_id(xive, cpu, &vp_id); |
---|
| 1273 | + if (r) |
---|
| 1274 | + goto bail; |
---|
| 1275 | + |
---|
| 1276 | + xc = kzalloc(sizeof(*xc), GFP_KERNEL); |
---|
| 1277 | + if (!xc) { |
---|
| 1278 | + r = -ENOMEM; |
---|
| 1279 | + goto bail; |
---|
| 1280 | + } |
---|
| 1281 | + |
---|
1104 | 1282 | vcpu->arch.xive_vcpu = xc; |
---|
1105 | 1283 | xc->xive = xive; |
---|
1106 | 1284 | xc->vcpu = vcpu; |
---|
1107 | 1285 | xc->server_num = cpu; |
---|
1108 | | - xc->vp_id = xive_vp(xive, cpu); |
---|
| 1286 | + xc->vp_id = vp_id; |
---|
1109 | 1287 | xc->mfrr = 0xff; |
---|
1110 | 1288 | xc->valid = true; |
---|
1111 | 1289 | |
---|
.. | .. |
---|
1158 | 1336 | if (xive->qmap & (1 << i)) { |
---|
1159 | 1337 | r = xive_provision_queue(vcpu, i); |
---|
1160 | 1338 | if (r == 0 && !xive->single_escalation) |
---|
1161 | | - xive_attach_escalation(vcpu, i); |
---|
| 1339 | + kvmppc_xive_attach_escalation( |
---|
| 1340 | + vcpu, i, xive->single_escalation); |
---|
1162 | 1341 | if (r) |
---|
1163 | 1342 | goto bail; |
---|
1164 | 1343 | } else { |
---|
.. | .. |
---|
1173 | 1352 | } |
---|
1174 | 1353 | |
---|
1175 | 1354 | /* If not done above, attach priority 0 escalation */ |
---|
1176 | | - r = xive_attach_escalation(vcpu, 0); |
---|
| 1355 | + r = kvmppc_xive_attach_escalation(vcpu, 0, xive->single_escalation); |
---|
1177 | 1356 | if (r) |
---|
1178 | 1357 | goto bail; |
---|
1179 | 1358 | |
---|
.. | .. |
---|
1183 | 1362 | xive_vm_esb_load(&xc->vp_ipi_data, XIVE_ESB_SET_PQ_00); |
---|
1184 | 1363 | |
---|
1185 | 1364 | bail: |
---|
1186 | | - mutex_unlock(&vcpu->kvm->lock); |
---|
| 1365 | + mutex_unlock(&xive->lock); |
---|
1187 | 1366 | if (r) { |
---|
1188 | 1367 | kvmppc_xive_cleanup_vcpu(vcpu); |
---|
1189 | 1368 | return r; |
---|
.. | .. |
---|
1424 | 1603 | return 0; |
---|
1425 | 1604 | } |
---|
1426 | 1605 | |
---|
1427 | | -static struct kvmppc_xive_src_block *xive_create_src_block(struct kvmppc_xive *xive, |
---|
1428 | | - int irq) |
---|
| 1606 | +struct kvmppc_xive_src_block *kvmppc_xive_create_src_block( |
---|
| 1607 | + struct kvmppc_xive *xive, int irq) |
---|
1429 | 1608 | { |
---|
1430 | | - struct kvm *kvm = xive->kvm; |
---|
1431 | 1609 | struct kvmppc_xive_src_block *sb; |
---|
1432 | 1610 | int i, bid; |
---|
1433 | 1611 | |
---|
1434 | 1612 | bid = irq >> KVMPPC_XICS_ICS_SHIFT; |
---|
1435 | 1613 | |
---|
1436 | | - mutex_lock(&kvm->lock); |
---|
| 1614 | + mutex_lock(&xive->lock); |
---|
1437 | 1615 | |
---|
1438 | 1616 | /* block already exists - somebody else got here first */ |
---|
1439 | 1617 | if (xive->src_blocks[bid]) |
---|
.. | .. |
---|
1448 | 1626 | |
---|
1449 | 1627 | for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) { |
---|
1450 | 1628 | sb->irq_state[i].number = (bid << KVMPPC_XICS_ICS_SHIFT) | i; |
---|
| 1629 | + sb->irq_state[i].eisn = 0; |
---|
1451 | 1630 | sb->irq_state[i].guest_priority = MASKED; |
---|
1452 | 1631 | sb->irq_state[i].saved_priority = MASKED; |
---|
1453 | 1632 | sb->irq_state[i].act_priority = MASKED; |
---|
.. | .. |
---|
1459 | 1638 | xive->max_sbid = bid; |
---|
1460 | 1639 | |
---|
1461 | 1640 | out: |
---|
1462 | | - mutex_unlock(&kvm->lock); |
---|
| 1641 | + mutex_unlock(&xive->lock); |
---|
1463 | 1642 | return xive->src_blocks[bid]; |
---|
1464 | 1643 | } |
---|
1465 | 1644 | |
---|
.. | .. |
---|
1504 | 1683 | sb = kvmppc_xive_find_source(xive, irq, &idx); |
---|
1505 | 1684 | if (!sb) { |
---|
1506 | 1685 | pr_devel("No source, creating source block...\n"); |
---|
1507 | | - sb = xive_create_src_block(xive, irq); |
---|
| 1686 | + sb = kvmppc_xive_create_src_block(xive, irq); |
---|
1508 | 1687 | if (!sb) { |
---|
1509 | 1688 | pr_devel("Failed to create block...\n"); |
---|
1510 | 1689 | return -ENOMEM; |
---|
.. | .. |
---|
1569 | 1748 | /* If we have a priority target the interrupt */ |
---|
1570 | 1749 | if (act_prio != MASKED) { |
---|
1571 | 1750 | /* First, check provisioning of queues */ |
---|
1572 | | - mutex_lock(&xive->kvm->lock); |
---|
| 1751 | + mutex_lock(&xive->lock); |
---|
1573 | 1752 | rc = xive_check_provisioning(xive->kvm, act_prio); |
---|
1574 | | - mutex_unlock(&xive->kvm->lock); |
---|
| 1753 | + mutex_unlock(&xive->lock); |
---|
1575 | 1754 | |
---|
1576 | 1755 | /* Target interrupt */ |
---|
1577 | 1756 | if (rc == 0) |
---|
.. | .. |
---|
1684 | 1863 | return 0; |
---|
1685 | 1864 | } |
---|
1686 | 1865 | |
---|
| 1866 | +int kvmppc_xive_set_nr_servers(struct kvmppc_xive *xive, u64 addr) |
---|
| 1867 | +{ |
---|
| 1868 | + u32 __user *ubufp = (u32 __user *) addr; |
---|
| 1869 | + u32 nr_servers; |
---|
| 1870 | + int rc = 0; |
---|
| 1871 | + |
---|
| 1872 | + if (get_user(nr_servers, ubufp)) |
---|
| 1873 | + return -EFAULT; |
---|
| 1874 | + |
---|
| 1875 | + pr_devel("%s nr_servers=%u\n", __func__, nr_servers); |
---|
| 1876 | + |
---|
| 1877 | + if (!nr_servers || nr_servers > KVM_MAX_VCPU_ID) |
---|
| 1878 | + return -EINVAL; |
---|
| 1879 | + |
---|
| 1880 | + mutex_lock(&xive->lock); |
---|
| 1881 | + if (xive->vp_base != XIVE_INVALID_VP) |
---|
| 1882 | + /* The VP block is allocated once and freed when the device |
---|
| 1883 | + * is released. Better not allow to change its size since its |
---|
| 1884 | + * used by connect_vcpu to validate vCPU ids are valid (eg, |
---|
| 1885 | + * setting it back to a higher value could allow connect_vcpu |
---|
| 1886 | + * to come up with a VP id that goes beyond the VP block, which |
---|
| 1887 | + * is likely to cause a crash in OPAL). |
---|
| 1888 | + */ |
---|
| 1889 | + rc = -EBUSY; |
---|
| 1890 | + else if (nr_servers > KVM_MAX_VCPUS) |
---|
| 1891 | + /* We don't need more servers. Higher vCPU ids get packed |
---|
| 1892 | + * down below KVM_MAX_VCPUS by kvmppc_pack_vcpu_id(). |
---|
| 1893 | + */ |
---|
| 1894 | + xive->nr_servers = KVM_MAX_VCPUS; |
---|
| 1895 | + else |
---|
| 1896 | + xive->nr_servers = nr_servers; |
---|
| 1897 | + |
---|
| 1898 | + mutex_unlock(&xive->lock); |
---|
| 1899 | + |
---|
| 1900 | + return rc; |
---|
| 1901 | +} |
---|
| 1902 | + |
---|
1687 | 1903 | static int xive_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr) |
---|
1688 | 1904 | { |
---|
1689 | 1905 | struct kvmppc_xive *xive = dev->private; |
---|
.. | .. |
---|
1692 | 1908 | switch (attr->group) { |
---|
1693 | 1909 | case KVM_DEV_XICS_GRP_SOURCES: |
---|
1694 | 1910 | return xive_set_source(xive, attr->attr, attr->addr); |
---|
| 1911 | + case KVM_DEV_XICS_GRP_CTRL: |
---|
| 1912 | + switch (attr->attr) { |
---|
| 1913 | + case KVM_DEV_XICS_NR_SERVERS: |
---|
| 1914 | + return kvmppc_xive_set_nr_servers(xive, attr->addr); |
---|
| 1915 | + } |
---|
1695 | 1916 | } |
---|
1696 | 1917 | return -ENXIO; |
---|
1697 | 1918 | } |
---|
.. | .. |
---|
1717 | 1938 | attr->attr < KVMPPC_XICS_NR_IRQS) |
---|
1718 | 1939 | return 0; |
---|
1719 | 1940 | break; |
---|
| 1941 | + case KVM_DEV_XICS_GRP_CTRL: |
---|
| 1942 | + switch (attr->attr) { |
---|
| 1943 | + case KVM_DEV_XICS_NR_SERVERS: |
---|
| 1944 | + return 0; |
---|
| 1945 | + } |
---|
1720 | 1946 | } |
---|
1721 | 1947 | return -ENXIO; |
---|
1722 | 1948 | } |
---|
.. | .. |
---|
1727 | 1953 | xive_native_configure_irq(hw_num, 0, MASKED, 0); |
---|
1728 | 1954 | } |
---|
1729 | 1955 | |
---|
1730 | | -static void kvmppc_xive_free_sources(struct kvmppc_xive_src_block *sb) |
---|
| 1956 | +void kvmppc_xive_free_sources(struct kvmppc_xive_src_block *sb) |
---|
1731 | 1957 | { |
---|
1732 | 1958 | int i; |
---|
1733 | 1959 | |
---|
.. | .. |
---|
1749 | 1975 | } |
---|
1750 | 1976 | } |
---|
1751 | 1977 | |
---|
1752 | | -static void kvmppc_xive_free(struct kvm_device *dev) |
---|
| 1978 | +/* |
---|
| 1979 | + * Called when device fd is closed. kvm->lock is held. |
---|
| 1980 | + */ |
---|
| 1981 | +static void kvmppc_xive_release(struct kvm_device *dev) |
---|
1753 | 1982 | { |
---|
1754 | 1983 | struct kvmppc_xive *xive = dev->private; |
---|
1755 | 1984 | struct kvm *kvm = xive->kvm; |
---|
| 1985 | + struct kvm_vcpu *vcpu; |
---|
1756 | 1986 | int i; |
---|
| 1987 | + |
---|
| 1988 | + pr_devel("Releasing xive device\n"); |
---|
| 1989 | + |
---|
| 1990 | + /* |
---|
| 1991 | + * Since this is the device release function, we know that |
---|
| 1992 | + * userspace does not have any open fd referring to the |
---|
| 1993 | + * device. Therefore there can not be any of the device |
---|
| 1994 | + * attribute set/get functions being executed concurrently, |
---|
| 1995 | + * and similarly, the connect_vcpu and set/clr_mapped |
---|
| 1996 | + * functions also cannot be being executed. |
---|
| 1997 | + */ |
---|
1757 | 1998 | |
---|
1758 | 1999 | debugfs_remove(xive->dentry); |
---|
1759 | 2000 | |
---|
1760 | | - if (kvm) |
---|
1761 | | - kvm->arch.xive = NULL; |
---|
| 2001 | + /* |
---|
| 2002 | + * We should clean up the vCPU interrupt presenters first. |
---|
| 2003 | + */ |
---|
| 2004 | + kvm_for_each_vcpu(i, vcpu, kvm) { |
---|
| 2005 | + /* |
---|
| 2006 | + * Take vcpu->mutex to ensure that no one_reg get/set ioctl |
---|
| 2007 | + * (i.e. kvmppc_xive_[gs]et_icp) can be done concurrently. |
---|
| 2008 | + * Holding the vcpu->mutex also means that the vcpu cannot |
---|
| 2009 | + * be executing the KVM_RUN ioctl, and therefore it cannot |
---|
| 2010 | + * be executing the XIVE push or pull code or accessing |
---|
| 2011 | + * the XIVE MMIO regions. |
---|
| 2012 | + */ |
---|
| 2013 | + mutex_lock(&vcpu->mutex); |
---|
| 2014 | + kvmppc_xive_cleanup_vcpu(vcpu); |
---|
| 2015 | + mutex_unlock(&vcpu->mutex); |
---|
| 2016 | + } |
---|
| 2017 | + |
---|
| 2018 | + /* |
---|
| 2019 | + * Now that we have cleared vcpu->arch.xive_vcpu, vcpu->arch.irq_type |
---|
| 2020 | + * and vcpu->arch.xive_esc_[vr]addr on each vcpu, we are safe |
---|
| 2021 | + * against xive code getting called during vcpu execution or |
---|
| 2022 | + * set/get one_reg operations. |
---|
| 2023 | + */ |
---|
| 2024 | + kvm->arch.xive = NULL; |
---|
1762 | 2025 | |
---|
1763 | 2026 | /* Mask and free interrupts */ |
---|
1764 | 2027 | for (i = 0; i <= xive->max_sbid; i++) { |
---|
.. | .. |
---|
1771 | 2034 | if (xive->vp_base != XIVE_INVALID_VP) |
---|
1772 | 2035 | xive_native_free_vp_block(xive->vp_base); |
---|
1773 | 2036 | |
---|
| 2037 | + /* |
---|
| 2038 | + * A reference of the kvmppc_xive pointer is now kept under |
---|
| 2039 | + * the xive_devices struct of the machine for reuse. It is |
---|
| 2040 | + * freed when the VM is destroyed for now until we fix all the |
---|
| 2041 | + * execution paths. |
---|
| 2042 | + */ |
---|
1774 | 2043 | |
---|
1775 | | - kfree(xive); |
---|
1776 | 2044 | kfree(dev); |
---|
1777 | 2045 | } |
---|
1778 | 2046 | |
---|
| 2047 | +/* |
---|
| 2048 | + * When the guest chooses the interrupt mode (XICS legacy or XIVE |
---|
| 2049 | + * native), the VM will switch of KVM device. The previous device will |
---|
| 2050 | + * be "released" before the new one is created. |
---|
| 2051 | + * |
---|
| 2052 | + * Until we are sure all execution paths are well protected, provide a |
---|
| 2053 | + * fail safe (transitional) method for device destruction, in which |
---|
| 2054 | + * the XIVE device pointer is recycled and not directly freed. |
---|
| 2055 | + */ |
---|
| 2056 | +struct kvmppc_xive *kvmppc_xive_get_device(struct kvm *kvm, u32 type) |
---|
| 2057 | +{ |
---|
| 2058 | + struct kvmppc_xive **kvm_xive_device = type == KVM_DEV_TYPE_XIVE ? |
---|
| 2059 | + &kvm->arch.xive_devices.native : |
---|
| 2060 | + &kvm->arch.xive_devices.xics_on_xive; |
---|
| 2061 | + struct kvmppc_xive *xive = *kvm_xive_device; |
---|
| 2062 | + |
---|
| 2063 | + if (!xive) { |
---|
| 2064 | + xive = kzalloc(sizeof(*xive), GFP_KERNEL); |
---|
| 2065 | + *kvm_xive_device = xive; |
---|
| 2066 | + } else { |
---|
| 2067 | + memset(xive, 0, sizeof(*xive)); |
---|
| 2068 | + } |
---|
| 2069 | + |
---|
| 2070 | + return xive; |
---|
| 2071 | +} |
---|
| 2072 | + |
---|
| 2073 | +/* |
---|
| 2074 | + * Create a XICS device with XIVE backend. kvm->lock is held. |
---|
| 2075 | + */ |
---|
1779 | 2076 | static int kvmppc_xive_create(struct kvm_device *dev, u32 type) |
---|
1780 | 2077 | { |
---|
1781 | 2078 | struct kvmppc_xive *xive; |
---|
1782 | 2079 | struct kvm *kvm = dev->kvm; |
---|
1783 | | - int ret = 0; |
---|
1784 | 2080 | |
---|
1785 | 2081 | pr_devel("Creating xive for partition\n"); |
---|
1786 | 2082 | |
---|
1787 | | - xive = kzalloc(sizeof(*xive), GFP_KERNEL); |
---|
| 2083 | + /* Already there ? */ |
---|
| 2084 | + if (kvm->arch.xive) |
---|
| 2085 | + return -EEXIST; |
---|
| 2086 | + |
---|
| 2087 | + xive = kvmppc_xive_get_device(kvm, type); |
---|
1788 | 2088 | if (!xive) |
---|
1789 | 2089 | return -ENOMEM; |
---|
1790 | 2090 | |
---|
1791 | 2091 | dev->private = xive; |
---|
1792 | 2092 | xive->dev = dev; |
---|
1793 | 2093 | xive->kvm = kvm; |
---|
1794 | | - |
---|
1795 | | - /* Already there ? */ |
---|
1796 | | - if (kvm->arch.xive) |
---|
1797 | | - ret = -EEXIST; |
---|
1798 | | - else |
---|
1799 | | - kvm->arch.xive = xive; |
---|
| 2094 | + mutex_init(&xive->lock); |
---|
1800 | 2095 | |
---|
1801 | 2096 | /* We use the default queue size set by the host */ |
---|
1802 | 2097 | xive->q_order = xive_native_default_eq_shift(); |
---|
.. | .. |
---|
1805 | 2100 | else |
---|
1806 | 2101 | xive->q_page_order = xive->q_order - PAGE_SHIFT; |
---|
1807 | 2102 | |
---|
1808 | | - /* Allocate a bunch of VPs */ |
---|
1809 | | - xive->vp_base = xive_native_alloc_vp_block(KVM_MAX_VCPUS); |
---|
1810 | | - pr_devel("VP_Base=%x\n", xive->vp_base); |
---|
1811 | | - |
---|
1812 | | - if (xive->vp_base == XIVE_INVALID_VP) |
---|
1813 | | - ret = -ENOMEM; |
---|
| 2103 | + /* VP allocation is delayed to the first call to connect_vcpu */ |
---|
| 2104 | + xive->vp_base = XIVE_INVALID_VP; |
---|
| 2105 | + /* KVM_MAX_VCPUS limits the number of VMs to roughly 64 per sockets |
---|
| 2106 | + * on a POWER9 system. |
---|
| 2107 | + */ |
---|
| 2108 | + xive->nr_servers = KVM_MAX_VCPUS; |
---|
1814 | 2109 | |
---|
1815 | 2110 | xive->single_escalation = xive_native_has_single_escalation(); |
---|
1816 | 2111 | |
---|
1817 | | - if (ret) { |
---|
1818 | | - kfree(xive); |
---|
1819 | | - return ret; |
---|
1820 | | - } |
---|
1821 | | - |
---|
| 2112 | + kvm->arch.xive = xive; |
---|
1822 | 2113 | return 0; |
---|
1823 | 2114 | } |
---|
1824 | 2115 | |
---|
| 2116 | +int kvmppc_xive_debug_show_queues(struct seq_file *m, struct kvm_vcpu *vcpu) |
---|
| 2117 | +{ |
---|
| 2118 | + struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; |
---|
| 2119 | + unsigned int i; |
---|
| 2120 | + |
---|
| 2121 | + for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) { |
---|
| 2122 | + struct xive_q *q = &xc->queues[i]; |
---|
| 2123 | + u32 i0, i1, idx; |
---|
| 2124 | + |
---|
| 2125 | + if (!q->qpage && !xc->esc_virq[i]) |
---|
| 2126 | + continue; |
---|
| 2127 | + |
---|
| 2128 | + seq_printf(m, " [q%d]: ", i); |
---|
| 2129 | + |
---|
| 2130 | + if (q->qpage) { |
---|
| 2131 | + idx = q->idx; |
---|
| 2132 | + i0 = be32_to_cpup(q->qpage + idx); |
---|
| 2133 | + idx = (idx + 1) & q->msk; |
---|
| 2134 | + i1 = be32_to_cpup(q->qpage + idx); |
---|
| 2135 | + seq_printf(m, "T=%d %08x %08x...\n", q->toggle, |
---|
| 2136 | + i0, i1); |
---|
| 2137 | + } |
---|
| 2138 | + if (xc->esc_virq[i]) { |
---|
| 2139 | + struct irq_data *d = irq_get_irq_data(xc->esc_virq[i]); |
---|
| 2140 | + struct xive_irq_data *xd = |
---|
| 2141 | + irq_data_get_irq_handler_data(d); |
---|
| 2142 | + u64 pq = xive_vm_esb_load(xd, XIVE_ESB_GET); |
---|
| 2143 | + |
---|
| 2144 | + seq_printf(m, "E:%c%c I(%d:%llx:%llx)", |
---|
| 2145 | + (pq & XIVE_ESB_VAL_P) ? 'P' : 'p', |
---|
| 2146 | + (pq & XIVE_ESB_VAL_Q) ? 'Q' : 'q', |
---|
| 2147 | + xc->esc_virq[i], pq, xd->eoi_page); |
---|
| 2148 | + seq_puts(m, "\n"); |
---|
| 2149 | + } |
---|
| 2150 | + } |
---|
| 2151 | + return 0; |
---|
| 2152 | +} |
---|
1825 | 2153 | |
---|
1826 | 2154 | static int xive_debug_show(struct seq_file *m, void *private) |
---|
1827 | 2155 | { |
---|
.. | .. |
---|
1847 | 2175 | |
---|
1848 | 2176 | kvm_for_each_vcpu(i, vcpu, kvm) { |
---|
1849 | 2177 | struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; |
---|
1850 | | - unsigned int i; |
---|
1851 | 2178 | |
---|
1852 | 2179 | if (!xc) |
---|
1853 | 2180 | continue; |
---|
1854 | 2181 | |
---|
1855 | | - seq_printf(m, "cpu server %#x CPPR:%#x HWCPPR:%#x" |
---|
| 2182 | + seq_printf(m, "cpu server %#x VP:%#x CPPR:%#x HWCPPR:%#x" |
---|
1856 | 2183 | " MFRR:%#x PEND:%#x h_xirr: R=%lld V=%lld\n", |
---|
1857 | | - xc->server_num, xc->cppr, xc->hw_cppr, |
---|
| 2184 | + xc->server_num, xc->vp_id, xc->cppr, xc->hw_cppr, |
---|
1858 | 2185 | xc->mfrr, xc->pending, |
---|
1859 | 2186 | xc->stat_rm_h_xirr, xc->stat_vm_h_xirr); |
---|
1860 | | - for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) { |
---|
1861 | | - struct xive_q *q = &xc->queues[i]; |
---|
1862 | | - u32 i0, i1, idx; |
---|
1863 | 2187 | |
---|
1864 | | - if (!q->qpage && !xc->esc_virq[i]) |
---|
1865 | | - continue; |
---|
1866 | | - |
---|
1867 | | - seq_printf(m, " [q%d]: ", i); |
---|
1868 | | - |
---|
1869 | | - if (q->qpage) { |
---|
1870 | | - idx = q->idx; |
---|
1871 | | - i0 = be32_to_cpup(q->qpage + idx); |
---|
1872 | | - idx = (idx + 1) & q->msk; |
---|
1873 | | - i1 = be32_to_cpup(q->qpage + idx); |
---|
1874 | | - seq_printf(m, "T=%d %08x %08x... \n", q->toggle, i0, i1); |
---|
1875 | | - } |
---|
1876 | | - if (xc->esc_virq[i]) { |
---|
1877 | | - struct irq_data *d = irq_get_irq_data(xc->esc_virq[i]); |
---|
1878 | | - struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); |
---|
1879 | | - u64 pq = xive_vm_esb_load(xd, XIVE_ESB_GET); |
---|
1880 | | - seq_printf(m, "E:%c%c I(%d:%llx:%llx)", |
---|
1881 | | - (pq & XIVE_ESB_VAL_P) ? 'P' : 'p', |
---|
1882 | | - (pq & XIVE_ESB_VAL_Q) ? 'Q' : 'q', |
---|
1883 | | - xc->esc_virq[i], pq, xd->eoi_page); |
---|
1884 | | - seq_printf(m, "\n"); |
---|
1885 | | - } |
---|
1886 | | - } |
---|
| 2188 | + kvmppc_xive_debug_show_queues(m, vcpu); |
---|
1887 | 2189 | |
---|
1888 | 2190 | t_rm_h_xirr += xc->stat_rm_h_xirr; |
---|
1889 | 2191 | t_rm_h_ipoll += xc->stat_rm_h_ipoll; |
---|
.. | .. |
---|
1907 | 2209 | return 0; |
---|
1908 | 2210 | } |
---|
1909 | 2211 | |
---|
1910 | | -static int xive_debug_open(struct inode *inode, struct file *file) |
---|
1911 | | -{ |
---|
1912 | | - return single_open(file, xive_debug_show, inode->i_private); |
---|
1913 | | -} |
---|
1914 | | - |
---|
1915 | | -static const struct file_operations xive_debug_fops = { |
---|
1916 | | - .open = xive_debug_open, |
---|
1917 | | - .read = seq_read, |
---|
1918 | | - .llseek = seq_lseek, |
---|
1919 | | - .release = single_release, |
---|
1920 | | -}; |
---|
| 2212 | +DEFINE_SHOW_ATTRIBUTE(xive_debug); |
---|
1921 | 2213 | |
---|
1922 | 2214 | static void xive_debugfs_init(struct kvmppc_xive *xive) |
---|
1923 | 2215 | { |
---|
.. | .. |
---|
1948 | 2240 | .name = "kvm-xive", |
---|
1949 | 2241 | .create = kvmppc_xive_create, |
---|
1950 | 2242 | .init = kvmppc_xive_init, |
---|
1951 | | - .destroy = kvmppc_xive_free, |
---|
| 2243 | + .release = kvmppc_xive_release, |
---|
1952 | 2244 | .set_attr = xive_set_attr, |
---|
1953 | 2245 | .get_attr = xive_get_attr, |
---|
1954 | 2246 | .has_attr = xive_has_attr, |
---|