.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* |
---|
2 | | - * This program is free software; you can redistribute it and/or modify |
---|
3 | | - * it under the terms of the GNU General Public License, version 2, as |
---|
4 | | - * published by the Free Software Foundation. |
---|
5 | | - * |
---|
6 | | - * This program is distributed in the hope that it will be useful, |
---|
7 | | - * but WITHOUT ANY WARRANTY; without even the implied warranty of |
---|
8 | | - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
---|
9 | | - * GNU General Public License for more details. |
---|
10 | | - * |
---|
11 | | - * You should have received a copy of the GNU General Public License |
---|
12 | | - * along with this program; if not, write to the Free Software |
---|
13 | | - * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. |
---|
14 | 3 | * |
---|
15 | 4 | * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> |
---|
16 | 5 | */ |
---|
.. | .. |
---|
63 | 52 | struct work_struct work; |
---|
64 | 53 | u32 order; |
---|
65 | 54 | |
---|
66 | | - /* These fields protected by kvm->lock */ |
---|
| 55 | + /* These fields protected by kvm->arch.mmu_setup_lock */ |
---|
67 | 56 | |
---|
68 | 57 | /* Possible values and their usage: |
---|
69 | 58 | * <0 an error occurred during allocation, |
---|
.. | .. |
---|
73 | 62 | int error; |
---|
74 | 63 | |
---|
75 | 64 | /* Private to the work thread, until error != -EBUSY, |
---|
76 | | - * then protected by kvm->lock. |
---|
| 65 | + * then protected by kvm->arch.mmu_setup_lock. |
---|
77 | 66 | */ |
---|
78 | 67 | struct kvm_hpt_info hpt; |
---|
79 | 68 | }; |
---|
.. | .. |
---|
139 | 128 | long err = -EBUSY; |
---|
140 | 129 | struct kvm_hpt_info info; |
---|
141 | 130 | |
---|
142 | | - mutex_lock(&kvm->lock); |
---|
| 131 | + mutex_lock(&kvm->arch.mmu_setup_lock); |
---|
143 | 132 | if (kvm->arch.mmu_ready) { |
---|
144 | 133 | kvm->arch.mmu_ready = 0; |
---|
145 | 134 | /* order mmu_ready vs. vcpus_running */ |
---|
.. | .. |
---|
183 | 172 | /* Ensure that each vcpu will flush its TLB on next entry. */ |
---|
184 | 173 | cpumask_setall(&kvm->arch.need_tlb_flush); |
---|
185 | 174 | |
---|
186 | | - mutex_unlock(&kvm->lock); |
---|
| 175 | + mutex_unlock(&kvm->arch.mmu_setup_lock); |
---|
187 | 176 | return err; |
---|
188 | 177 | } |
---|
189 | 178 | |
---|
.. | .. |
---|
268 | 257 | { |
---|
269 | 258 | unsigned long host_lpid, rsvd_lpid; |
---|
270 | 259 | |
---|
271 | | - if (!cpu_has_feature(CPU_FTR_HVMODE)) |
---|
272 | | - return -EINVAL; |
---|
273 | | - |
---|
274 | 260 | if (!mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE)) |
---|
275 | 261 | return -EINVAL; |
---|
276 | 262 | |
---|
277 | | - /* POWER7 has 10-bit LPIDs (12-bit in POWER8) */ |
---|
278 | | - host_lpid = mfspr(SPRN_LPID); |
---|
279 | | - rsvd_lpid = LPID_RSVD; |
---|
| 263 | + host_lpid = 0; |
---|
| 264 | + if (cpu_has_feature(CPU_FTR_HVMODE)) |
---|
| 265 | + host_lpid = mfspr(SPRN_LPID); |
---|
| 266 | + |
---|
| 267 | + /* POWER8 and above have 12-bit LPIDs (10-bit in POWER7) */ |
---|
| 268 | + if (cpu_has_feature(CPU_FTR_ARCH_207S)) |
---|
| 269 | + rsvd_lpid = LPID_RSVD; |
---|
| 270 | + else |
---|
| 271 | + rsvd_lpid = LPID_RSVD_POWER7; |
---|
280 | 272 | |
---|
281 | 273 | kvmppc_init_lpid(rsvd_lpid + 1); |
---|
282 | 274 | |
---|
.. | .. |
---|
287 | 279 | return 0; |
---|
288 | 280 | } |
---|
289 | 281 | |
---|
290 | | -static void kvmppc_mmu_book3s_64_hv_reset_msr(struct kvm_vcpu *vcpu) |
---|
291 | | -{ |
---|
292 | | - unsigned long msr = vcpu->arch.intr_msr; |
---|
293 | | - |
---|
294 | | - /* If transactional, change to suspend mode on IRQ delivery */ |
---|
295 | | - if (MSR_TM_TRANSACTIONAL(vcpu->arch.shregs.msr)) |
---|
296 | | - msr |= MSR_TS_S; |
---|
297 | | - else |
---|
298 | | - msr |= vcpu->arch.shregs.msr & MSR_TS_MASK; |
---|
299 | | - kvmppc_set_msr(vcpu, msr); |
---|
300 | | -} |
---|
301 | | - |
---|
302 | 282 | static long kvmppc_virtmode_do_h_enter(struct kvm *kvm, unsigned long flags, |
---|
303 | 283 | long pte_index, unsigned long pteh, |
---|
304 | 284 | unsigned long ptel, unsigned long *pte_idx_ret) |
---|
305 | 285 | { |
---|
306 | 286 | long ret; |
---|
307 | 287 | |
---|
308 | | - /* Protect linux PTE lookup from page table destruction */ |
---|
309 | | - rcu_read_lock_sched(); /* this disables preemption too */ |
---|
| 288 | + preempt_disable(); |
---|
310 | 289 | ret = kvmppc_do_h_enter(kvm, flags, pte_index, pteh, ptel, |
---|
311 | | - current->mm->pgd, false, pte_idx_ret); |
---|
312 | | - rcu_read_unlock_sched(); |
---|
| 290 | + kvm->mm->pgd, false, pte_idx_ret); |
---|
| 291 | + preempt_enable(); |
---|
313 | 292 | if (ret == H_TOO_HARD) { |
---|
314 | 293 | /* this can't happen */ |
---|
315 | 294 | pr_err("KVM: Oops, kvmppc_h_enter returned too hard!\n"); |
---|
.. | .. |
---|
437 | 416 | return (instr & mask) != 0; |
---|
438 | 417 | } |
---|
439 | 418 | |
---|
440 | | -int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu, |
---|
| 419 | +int kvmppc_hv_emulate_mmio(struct kvm_vcpu *vcpu, |
---|
441 | 420 | unsigned long gpa, gva_t ea, int is_store) |
---|
442 | 421 | { |
---|
443 | 422 | u32 last_inst; |
---|
| 423 | + |
---|
| 424 | + /* |
---|
| 425 | + * Fast path - check if the guest physical address corresponds to a |
---|
| 426 | + * device on the FAST_MMIO_BUS, if so we can avoid loading the |
---|
| 427 | + * instruction all together, then we can just handle it and return. |
---|
| 428 | + */ |
---|
| 429 | + if (is_store) { |
---|
| 430 | + int idx, ret; |
---|
| 431 | + |
---|
| 432 | + idx = srcu_read_lock(&vcpu->kvm->srcu); |
---|
| 433 | + ret = kvm_io_bus_write(vcpu, KVM_FAST_MMIO_BUS, (gpa_t) gpa, 0, |
---|
| 434 | + NULL); |
---|
| 435 | + srcu_read_unlock(&vcpu->kvm->srcu, idx); |
---|
| 436 | + if (!ret) { |
---|
| 437 | + kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4); |
---|
| 438 | + return RESUME_GUEST; |
---|
| 439 | + } |
---|
| 440 | + } |
---|
444 | 441 | |
---|
445 | 442 | /* |
---|
446 | 443 | * If we fail, we just return to the guest and try executing it again. |
---|
.. | .. |
---|
479 | 476 | |
---|
480 | 477 | vcpu->arch.paddr_accessed = gpa; |
---|
481 | 478 | vcpu->arch.vaddr_accessed = ea; |
---|
482 | | - return kvmppc_emulate_mmio(run, vcpu); |
---|
| 479 | + return kvmppc_emulate_mmio(vcpu); |
---|
483 | 480 | } |
---|
484 | 481 | |
---|
485 | | -int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, |
---|
| 482 | +int kvmppc_book3s_hv_page_fault(struct kvm_vcpu *vcpu, |
---|
486 | 483 | unsigned long ea, unsigned long dsisr) |
---|
487 | 484 | { |
---|
488 | 485 | struct kvm *kvm = vcpu->kvm; |
---|
.. | .. |
---|
491 | 488 | __be64 *hptep; |
---|
492 | 489 | unsigned long mmu_seq, psize, pte_size; |
---|
493 | 490 | unsigned long gpa_base, gfn_base; |
---|
494 | | - unsigned long gpa, gfn, hva, pfn; |
---|
| 491 | + unsigned long gpa, gfn, hva, pfn, hpa; |
---|
495 | 492 | struct kvm_memory_slot *memslot; |
---|
496 | 493 | unsigned long *rmap; |
---|
497 | 494 | struct revmap_entry *rev; |
---|
498 | | - struct page *page, *pages[1]; |
---|
499 | | - long index, ret, npages; |
---|
| 495 | + struct page *page; |
---|
| 496 | + long index, ret; |
---|
500 | 497 | bool is_ci; |
---|
501 | | - unsigned int writing, write_ok; |
---|
502 | | - struct vm_area_struct *vma; |
---|
| 498 | + bool writing, write_ok; |
---|
| 499 | + unsigned int shift; |
---|
503 | 500 | unsigned long rcbits; |
---|
504 | 501 | long mmio_update; |
---|
| 502 | + pte_t pte, *ptep; |
---|
505 | 503 | |
---|
506 | 504 | if (kvm_is_radix(kvm)) |
---|
507 | | - return kvmppc_book3s_radix_page_fault(run, vcpu, ea, dsisr); |
---|
| 505 | + return kvmppc_book3s_radix_page_fault(vcpu, ea, dsisr); |
---|
508 | 506 | |
---|
509 | 507 | /* |
---|
510 | 508 | * Real-mode code has already searched the HPT and found the |
---|
.. | .. |
---|
524 | 522 | gpa_base = r & HPTE_R_RPN & ~(psize - 1); |
---|
525 | 523 | gfn_base = gpa_base >> PAGE_SHIFT; |
---|
526 | 524 | gpa = gpa_base | (ea & (psize - 1)); |
---|
527 | | - return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea, |
---|
| 525 | + return kvmppc_hv_emulate_mmio(vcpu, gpa, ea, |
---|
528 | 526 | dsisr & DSISR_ISSTORE); |
---|
529 | 527 | } |
---|
530 | 528 | } |
---|
.. | .. |
---|
560 | 558 | |
---|
561 | 559 | /* No memslot means it's an emulated MMIO region */ |
---|
562 | 560 | if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) |
---|
563 | | - return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea, |
---|
| 561 | + return kvmppc_hv_emulate_mmio(vcpu, gpa, ea, |
---|
564 | 562 | dsisr & DSISR_ISSTORE); |
---|
565 | 563 | |
---|
566 | 564 | /* |
---|
.. | .. |
---|
575 | 573 | smp_rmb(); |
---|
576 | 574 | |
---|
577 | 575 | ret = -EFAULT; |
---|
578 | | - is_ci = false; |
---|
579 | | - pfn = 0; |
---|
580 | 576 | page = NULL; |
---|
581 | | - pte_size = PAGE_SIZE; |
---|
582 | 577 | writing = (dsisr & DSISR_ISSTORE) != 0; |
---|
583 | 578 | /* If writing != 0, then the HPTE must allow writing, if we get here */ |
---|
584 | 579 | write_ok = writing; |
---|
585 | 580 | hva = gfn_to_hva_memslot(memslot, gfn); |
---|
586 | | - npages = get_user_pages_fast(hva, 1, writing, pages); |
---|
587 | | - if (npages < 1) { |
---|
588 | | - /* Check if it's an I/O mapping */ |
---|
589 | | - down_read(¤t->mm->mmap_sem); |
---|
590 | | - vma = find_vma(current->mm, hva); |
---|
591 | | - if (vma && vma->vm_start <= hva && hva + psize <= vma->vm_end && |
---|
592 | | - (vma->vm_flags & VM_PFNMAP)) { |
---|
593 | | - pfn = vma->vm_pgoff + |
---|
594 | | - ((hva - vma->vm_start) >> PAGE_SHIFT); |
---|
595 | | - pte_size = psize; |
---|
596 | | - is_ci = pte_ci(__pte((pgprot_val(vma->vm_page_prot)))); |
---|
597 | | - write_ok = vma->vm_flags & VM_WRITE; |
---|
598 | | - } |
---|
599 | | - up_read(¤t->mm->mmap_sem); |
---|
600 | | - if (!pfn) |
---|
601 | | - goto out_put; |
---|
| 581 | + |
---|
| 582 | + /* |
---|
| 583 | + * Do a fast check first, since __gfn_to_pfn_memslot doesn't |
---|
| 584 | + * do it with !atomic && !async, which is how we call it. |
---|
| 585 | + * We always ask for write permission since the common case |
---|
| 586 | + * is that the page is writable. |
---|
| 587 | + */ |
---|
| 588 | + if (get_user_page_fast_only(hva, FOLL_WRITE, &page)) { |
---|
| 589 | + write_ok = true; |
---|
602 | 590 | } else { |
---|
603 | | - page = pages[0]; |
---|
604 | | - pfn = page_to_pfn(page); |
---|
605 | | - if (PageHuge(page)) { |
---|
606 | | - page = compound_head(page); |
---|
607 | | - pte_size <<= compound_order(page); |
---|
608 | | - } |
---|
609 | | - /* if the guest wants write access, see if that is OK */ |
---|
610 | | - if (!writing && hpte_is_writable(r)) { |
---|
611 | | - pte_t *ptep, pte; |
---|
612 | | - unsigned long flags; |
---|
613 | | - /* |
---|
614 | | - * We need to protect against page table destruction |
---|
615 | | - * hugepage split and collapse. |
---|
616 | | - */ |
---|
617 | | - local_irq_save(flags); |
---|
618 | | - ptep = find_current_mm_pte(current->mm->pgd, |
---|
619 | | - hva, NULL, NULL); |
---|
620 | | - if (ptep) { |
---|
621 | | - pte = kvmppc_read_update_linux_pte(ptep, 1); |
---|
622 | | - if (__pte_write(pte)) |
---|
623 | | - write_ok = 1; |
---|
624 | | - } |
---|
625 | | - local_irq_restore(flags); |
---|
| 591 | + /* Call KVM generic code to do the slow-path check */ |
---|
| 592 | + pfn = __gfn_to_pfn_memslot(memslot, gfn, false, NULL, |
---|
| 593 | + writing, &write_ok); |
---|
| 594 | + if (is_error_noslot_pfn(pfn)) |
---|
| 595 | + return -EFAULT; |
---|
| 596 | + page = NULL; |
---|
| 597 | + if (pfn_valid(pfn)) { |
---|
| 598 | + page = pfn_to_page(pfn); |
---|
| 599 | + if (PageReserved(page)) |
---|
| 600 | + page = NULL; |
---|
626 | 601 | } |
---|
627 | 602 | } |
---|
628 | 603 | |
---|
| 604 | + /* |
---|
| 605 | + * Read the PTE from the process' radix tree and use that |
---|
| 606 | + * so we get the shift and attribute bits. |
---|
| 607 | + */ |
---|
| 608 | + spin_lock(&kvm->mmu_lock); |
---|
| 609 | + ptep = find_kvm_host_pte(kvm, mmu_seq, hva, &shift); |
---|
| 610 | + pte = __pte(0); |
---|
| 611 | + if (ptep) |
---|
| 612 | + pte = READ_ONCE(*ptep); |
---|
| 613 | + spin_unlock(&kvm->mmu_lock); |
---|
| 614 | + /* |
---|
| 615 | + * If the PTE disappeared temporarily due to a THP |
---|
| 616 | + * collapse, just return and let the guest try again. |
---|
| 617 | + */ |
---|
| 618 | + if (!pte_present(pte)) { |
---|
| 619 | + if (page) |
---|
| 620 | + put_page(page); |
---|
| 621 | + return RESUME_GUEST; |
---|
| 622 | + } |
---|
| 623 | + hpa = pte_pfn(pte) << PAGE_SHIFT; |
---|
| 624 | + pte_size = PAGE_SIZE; |
---|
| 625 | + if (shift) |
---|
| 626 | + pte_size = 1ul << shift; |
---|
| 627 | + is_ci = pte_ci(pte); |
---|
| 628 | + |
---|
629 | 629 | if (psize > pte_size) |
---|
630 | 630 | goto out_put; |
---|
| 631 | + if (pte_size > psize) |
---|
| 632 | + hpa |= hva & (pte_size - psize); |
---|
631 | 633 | |
---|
632 | 634 | /* Check WIMG vs. the actual page we're accessing */ |
---|
633 | 635 | if (!hpte_cache_flags_ok(r, is_ci)) { |
---|
.. | .. |
---|
641 | 643 | } |
---|
642 | 644 | |
---|
643 | 645 | /* |
---|
644 | | - * Set the HPTE to point to pfn. |
---|
645 | | - * Since the pfn is at PAGE_SIZE granularity, make sure we |
---|
| 646 | + * Set the HPTE to point to hpa. |
---|
| 647 | + * Since the hpa is at PAGE_SIZE granularity, make sure we |
---|
646 | 648 | * don't mask out lower-order bits if psize < PAGE_SIZE. |
---|
647 | 649 | */ |
---|
648 | 650 | if (psize < PAGE_SIZE) |
---|
649 | 651 | psize = PAGE_SIZE; |
---|
650 | | - r = (r & HPTE_R_KEY_HI) | (r & ~(HPTE_R_PP0 - psize)) | |
---|
651 | | - ((pfn << PAGE_SHIFT) & ~(psize - 1)); |
---|
| 652 | + r = (r & HPTE_R_KEY_HI) | (r & ~(HPTE_R_PP0 - psize)) | hpa; |
---|
652 | 653 | if (hpte_is_writable(r) && !write_ok) |
---|
653 | 654 | r = hpte_make_readonly(r); |
---|
654 | 655 | ret = RESUME_GUEST; |
---|
.. | .. |
---|
713 | 714 | asm volatile("ptesync" : : : "memory"); |
---|
714 | 715 | preempt_enable(); |
---|
715 | 716 | if (page && hpte_is_writable(r)) |
---|
716 | | - SetPageDirty(page); |
---|
| 717 | + set_page_dirty_lock(page); |
---|
717 | 718 | |
---|
718 | 719 | out_put: |
---|
719 | 720 | trace_kvm_page_fault_exit(vcpu, hpte, ret); |
---|
720 | 721 | |
---|
721 | | - if (page) { |
---|
722 | | - /* |
---|
723 | | - * We drop pages[0] here, not page because page might |
---|
724 | | - * have been set to the head page of a compound, but |
---|
725 | | - * we have to drop the reference on the correct tail |
---|
726 | | - * page to match the get inside gup() |
---|
727 | | - */ |
---|
728 | | - put_page(pages[0]); |
---|
729 | | - } |
---|
| 722 | + if (page) |
---|
| 723 | + put_page(page); |
---|
730 | 724 | return ret; |
---|
731 | 725 | |
---|
732 | 726 | out_unlock: |
---|
.. | .. |
---|
900 | 894 | |
---|
901 | 895 | gfn = memslot->base_gfn; |
---|
902 | 896 | rmapp = memslot->arch.rmap; |
---|
| 897 | + if (kvm_is_radix(kvm)) { |
---|
| 898 | + kvmppc_radix_flush_memslot(kvm, memslot); |
---|
| 899 | + return; |
---|
| 900 | + } |
---|
| 901 | + |
---|
903 | 902 | for (n = memslot->npages; n; --n, ++gfn) { |
---|
904 | | - if (kvm_is_radix(kvm)) { |
---|
905 | | - kvm_unmap_radix(kvm, memslot, gfn); |
---|
906 | | - continue; |
---|
907 | | - } |
---|
908 | 903 | /* |
---|
909 | 904 | * Testing the present bit without locking is OK because |
---|
910 | 905 | * the memslot has been marked invalid already, and hence |
---|
.. | .. |
---|
1175 | 1170 | if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) |
---|
1176 | 1171 | goto err; |
---|
1177 | 1172 | hva = gfn_to_hva_memslot(memslot, gfn); |
---|
1178 | | - npages = get_user_pages_fast(hva, 1, 1, pages); |
---|
| 1173 | + npages = get_user_pages_fast(hva, 1, FOLL_WRITE, pages); |
---|
1179 | 1174 | if (npages < 1) |
---|
1180 | 1175 | goto err; |
---|
1181 | 1176 | page = pages[0]; |
---|
.. | .. |
---|
1429 | 1424 | |
---|
1430 | 1425 | static void resize_hpt_release(struct kvm *kvm, struct kvm_resize_hpt *resize) |
---|
1431 | 1426 | { |
---|
1432 | | - if (WARN_ON(!mutex_is_locked(&kvm->lock))) |
---|
| 1427 | + if (WARN_ON(!mutex_is_locked(&kvm->arch.mmu_setup_lock))) |
---|
1433 | 1428 | return; |
---|
1434 | 1429 | |
---|
1435 | 1430 | if (!resize) |
---|
.. | .. |
---|
1456 | 1451 | if (WARN_ON(resize->error != -EBUSY)) |
---|
1457 | 1452 | return; |
---|
1458 | 1453 | |
---|
1459 | | - mutex_lock(&kvm->lock); |
---|
| 1454 | + mutex_lock(&kvm->arch.mmu_setup_lock); |
---|
1460 | 1455 | |
---|
1461 | 1456 | /* Request is still current? */ |
---|
1462 | 1457 | if (kvm->arch.resize_hpt == resize) { |
---|
1463 | 1458 | /* We may request large allocations here: |
---|
1464 | | - * do not sleep with kvm->lock held for a while. |
---|
| 1459 | + * do not sleep with kvm->arch.mmu_setup_lock held for a while. |
---|
1465 | 1460 | */ |
---|
1466 | | - mutex_unlock(&kvm->lock); |
---|
| 1461 | + mutex_unlock(&kvm->arch.mmu_setup_lock); |
---|
1467 | 1462 | |
---|
1468 | 1463 | resize_hpt_debug(resize, "resize_hpt_prepare_work(): order = %d\n", |
---|
1469 | 1464 | resize->order); |
---|
.. | .. |
---|
1476 | 1471 | if (WARN_ON(err == -EBUSY)) |
---|
1477 | 1472 | err = -EINPROGRESS; |
---|
1478 | 1473 | |
---|
1479 | | - mutex_lock(&kvm->lock); |
---|
| 1474 | + mutex_lock(&kvm->arch.mmu_setup_lock); |
---|
1480 | 1475 | /* It is possible that kvm->arch.resize_hpt != resize |
---|
1481 | | - * after we grab kvm->lock again. |
---|
| 1476 | + * after we grab kvm->arch.mmu_setup_lock again. |
---|
1482 | 1477 | */ |
---|
1483 | 1478 | } |
---|
1484 | 1479 | |
---|
.. | .. |
---|
1487 | 1482 | if (kvm->arch.resize_hpt != resize) |
---|
1488 | 1483 | resize_hpt_release(kvm, resize); |
---|
1489 | 1484 | |
---|
1490 | | - mutex_unlock(&kvm->lock); |
---|
| 1485 | + mutex_unlock(&kvm->arch.mmu_setup_lock); |
---|
1491 | 1486 | } |
---|
1492 | 1487 | |
---|
1493 | 1488 | long kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm, |
---|
.. | .. |
---|
1504 | 1499 | if (shift && ((shift < 18) || (shift > 46))) |
---|
1505 | 1500 | return -EINVAL; |
---|
1506 | 1501 | |
---|
1507 | | - mutex_lock(&kvm->lock); |
---|
| 1502 | + mutex_lock(&kvm->arch.mmu_setup_lock); |
---|
1508 | 1503 | |
---|
1509 | 1504 | resize = kvm->arch.resize_hpt; |
---|
1510 | 1505 | |
---|
.. | .. |
---|
1547 | 1542 | ret = 100; /* estimated time in ms */ |
---|
1548 | 1543 | |
---|
1549 | 1544 | out: |
---|
1550 | | - mutex_unlock(&kvm->lock); |
---|
| 1545 | + mutex_unlock(&kvm->arch.mmu_setup_lock); |
---|
1551 | 1546 | return ret; |
---|
1552 | 1547 | } |
---|
1553 | 1548 | |
---|
.. | .. |
---|
1570 | 1565 | if (shift && ((shift < 18) || (shift > 46))) |
---|
1571 | 1566 | return -EINVAL; |
---|
1572 | 1567 | |
---|
1573 | | - mutex_lock(&kvm->lock); |
---|
| 1568 | + mutex_lock(&kvm->arch.mmu_setup_lock); |
---|
1574 | 1569 | |
---|
1575 | 1570 | resize = kvm->arch.resize_hpt; |
---|
1576 | 1571 | |
---|
.. | .. |
---|
1607 | 1602 | smp_mb(); |
---|
1608 | 1603 | out_no_hpt: |
---|
1609 | 1604 | resize_hpt_release(kvm, resize); |
---|
1610 | | - mutex_unlock(&kvm->lock); |
---|
| 1605 | + mutex_unlock(&kvm->arch.mmu_setup_lock); |
---|
1611 | 1606 | return ret; |
---|
1612 | 1607 | } |
---|
1613 | 1608 | |
---|
.. | .. |
---|
1744 | 1739 | int first_pass; |
---|
1745 | 1740 | unsigned long hpte[2]; |
---|
1746 | 1741 | |
---|
1747 | | - if (!access_ok(VERIFY_WRITE, buf, count)) |
---|
| 1742 | + if (!access_ok(buf, count)) |
---|
1748 | 1743 | return -EFAULT; |
---|
1749 | 1744 | if (kvm_is_radix(kvm)) |
---|
1750 | 1745 | return 0; |
---|
.. | .. |
---|
1844 | 1839 | int mmu_ready; |
---|
1845 | 1840 | int pshift; |
---|
1846 | 1841 | |
---|
1847 | | - if (!access_ok(VERIFY_READ, buf, count)) |
---|
| 1842 | + if (!access_ok(buf, count)) |
---|
1848 | 1843 | return -EFAULT; |
---|
1849 | 1844 | if (kvm_is_radix(kvm)) |
---|
1850 | 1845 | return -EINVAL; |
---|
1851 | 1846 | |
---|
1852 | 1847 | /* lock out vcpus from running while we're doing this */ |
---|
1853 | | - mutex_lock(&kvm->lock); |
---|
| 1848 | + mutex_lock(&kvm->arch.mmu_setup_lock); |
---|
1854 | 1849 | mmu_ready = kvm->arch.mmu_ready; |
---|
1855 | 1850 | if (mmu_ready) { |
---|
1856 | 1851 | kvm->arch.mmu_ready = 0; /* temporarily */ |
---|
.. | .. |
---|
1858 | 1853 | smp_mb(); |
---|
1859 | 1854 | if (atomic_read(&kvm->arch.vcpus_running)) { |
---|
1860 | 1855 | kvm->arch.mmu_ready = 1; |
---|
1861 | | - mutex_unlock(&kvm->lock); |
---|
| 1856 | + mutex_unlock(&kvm->arch.mmu_setup_lock); |
---|
1862 | 1857 | return -EBUSY; |
---|
1863 | 1858 | } |
---|
1864 | 1859 | } |
---|
.. | .. |
---|
1945 | 1940 | /* Order HPTE updates vs. mmu_ready */ |
---|
1946 | 1941 | smp_wmb(); |
---|
1947 | 1942 | kvm->arch.mmu_ready = mmu_ready; |
---|
1948 | | - mutex_unlock(&kvm->lock); |
---|
| 1943 | + mutex_unlock(&kvm->arch.mmu_setup_lock); |
---|
1949 | 1944 | |
---|
1950 | 1945 | if (err) |
---|
1951 | 1946 | return err; |
---|
.. | .. |
---|
1993 | 1988 | ret = anon_inode_getfd("kvm-htab", &kvm_htab_fops, ctx, rwflag | O_CLOEXEC); |
---|
1994 | 1989 | if (ret < 0) { |
---|
1995 | 1990 | kfree(ctx); |
---|
1996 | | - kvm_put_kvm(kvm); |
---|
| 1991 | + kvm_put_kvm_no_destroy(kvm); |
---|
1997 | 1992 | return ret; |
---|
1998 | 1993 | } |
---|
1999 | 1994 | |
---|
.. | .. |
---|
2142 | 2137 | |
---|
2143 | 2138 | void kvmppc_mmu_debugfs_init(struct kvm *kvm) |
---|
2144 | 2139 | { |
---|
2145 | | - kvm->arch.htab_dentry = debugfs_create_file("htab", 0400, |
---|
2146 | | - kvm->arch.debugfs_dir, kvm, |
---|
2147 | | - &debugfs_htab_fops); |
---|
| 2140 | + debugfs_create_file("htab", 0400, kvm->arch.debugfs_dir, kvm, |
---|
| 2141 | + &debugfs_htab_fops); |
---|
2148 | 2142 | } |
---|
2149 | 2143 | |
---|
2150 | 2144 | void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu) |
---|
.. | .. |
---|
2154 | 2148 | vcpu->arch.slb_nr = 32; /* POWER7/POWER8 */ |
---|
2155 | 2149 | |
---|
2156 | 2150 | mmu->xlate = kvmppc_mmu_book3s_64_hv_xlate; |
---|
2157 | | - mmu->reset_msr = kvmppc_mmu_book3s_64_hv_reset_msr; |
---|
2158 | 2151 | |
---|
2159 | 2152 | vcpu->arch.hflags |= BOOK3S_HFLAG_SLB; |
---|
2160 | 2153 | } |
---|