hc
2024-05-10 23fa18eaa71266feff7ba8d83022d9e1cc83c65a
kernel/virt/kvm/async_pf.c
....@@ -1,3 +1,4 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * kvm asynchronous fault support
34 *
....@@ -5,19 +6,6 @@
56 *
67 * Author:
78 * Gleb Natapov <gleb@redhat.com>
8
- *
9
- * This file is free software; you can redistribute it and/or modify
10
- * it under the terms of version 2 of the GNU General Public License
11
- * as published by the Free Software Foundation.
12
- *
13
- * This program is distributed in the hope that it will be useful,
14
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
15
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16
- * GNU General Public License for more details.
17
- *
18
- * You should have received a copy of the GNU General Public License
19
- * along with this program; if not, write to the Free Software Foundation,
20
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
219 */
2210
2311 #include <linux/kvm_host.h>
....@@ -28,21 +16,6 @@
2816
2917 #include "async_pf.h"
3018 #include <trace/events/kvm.h>
31
-
32
-static inline void kvm_async_page_present_sync(struct kvm_vcpu *vcpu,
33
- struct kvm_async_pf *work)
34
-{
35
-#ifdef CONFIG_KVM_ASYNC_PF_SYNC
36
- kvm_arch_async_page_present(vcpu, work);
37
-#endif
38
-}
39
-static inline void kvm_async_page_present_async(struct kvm_vcpu *vcpu,
40
- struct kvm_async_pf *work)
41
-{
42
-#ifndef CONFIG_KVM_ASYNC_PF_SYNC
43
- kvm_arch_async_page_present(vcpu, work);
44
-#endif
45
-}
4619
4720 static struct kmem_cache *async_pf_cache;
4821
....@@ -78,26 +51,32 @@
7851 unsigned long addr = apf->addr;
7952 gpa_t cr2_or_gpa = apf->cr2_or_gpa;
8053 int locked = 1;
54
+ bool first;
8155
8256 might_sleep();
8357
8458 /*
85
- * This work is run asynchromously to the task which owns
59
+ * This work is run asynchronously to the task which owns
8660 * mm and might be done in another context, so we must
8761 * access remotely.
8862 */
89
- down_read(&mm->mmap_sem);
90
- get_user_pages_remote(NULL, mm, addr, 1, FOLL_WRITE, NULL, NULL,
63
+ mmap_read_lock(mm);
64
+ get_user_pages_remote(mm, addr, 1, FOLL_WRITE, NULL, NULL,
9165 &locked);
9266 if (locked)
93
- up_read(&mm->mmap_sem);
67
+ mmap_read_unlock(mm);
9468
95
- kvm_async_page_present_sync(vcpu, apf);
69
+ if (IS_ENABLED(CONFIG_KVM_ASYNC_PF_SYNC))
70
+ kvm_arch_async_page_present(vcpu, apf);
9671
9772 spin_lock(&vcpu->async_pf.lock);
73
+ first = list_empty(&vcpu->async_pf.done);
9874 list_add_tail(&apf->link, &vcpu->async_pf.done);
9975 apf->vcpu = NULL;
10076 spin_unlock(&vcpu->async_pf.lock);
77
+
78
+ if (!IS_ENABLED(CONFIG_KVM_ASYNC_PF_SYNC) && first)
79
+ kvm_arch_async_page_present_queued(vcpu);
10180
10281 /*
10382 * apf may be freed by kvm_check_async_pf_completion() after
....@@ -106,8 +85,7 @@
10685
10786 trace_kvm_async_pf_completed(addr, cr2_or_gpa);
10887
109
- if (swq_has_sleeper(&vcpu->wq))
110
- swake_up_one(&vcpu->wq);
88
+ rcuwait_wake_up(&vcpu->wait);
11189
11290 mmput(mm);
11391 kvm_put_kvm(vcpu->kvm);
....@@ -161,7 +139,7 @@
161139 struct kvm_async_pf *work;
162140
163141 while (!list_empty_careful(&vcpu->async_pf.done) &&
164
- kvm_arch_can_inject_async_page_present(vcpu)) {
142
+ kvm_arch_can_dequeue_async_page_present(vcpu)) {
165143 spin_lock(&vcpu->async_pf.lock);
166144 work = list_first_entry(&vcpu->async_pf.done, typeof(*work),
167145 link);
....@@ -169,7 +147,8 @@
169147 spin_unlock(&vcpu->async_pf.lock);
170148
171149 kvm_arch_async_page_ready(vcpu, work);
172
- kvm_async_page_present_async(vcpu, work);
150
+ if (!IS_ENABLED(CONFIG_KVM_ASYNC_PF_SYNC))
151
+ kvm_arch_async_page_present(vcpu, work);
173152
174153 list_del(&work->queue);
175154 vcpu->async_pf.queued--;
....@@ -177,15 +156,21 @@
177156 }
178157 }
179158
180
-int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
181
- unsigned long hva, struct kvm_arch_async_pf *arch)
159
+/*
160
+ * Try to schedule a job to handle page fault asynchronously. Returns 'true' on
161
+ * success, 'false' on failure (page fault has to be handled synchronously).
162
+ */
163
+bool kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
164
+ unsigned long hva, struct kvm_arch_async_pf *arch)
182165 {
183166 struct kvm_async_pf *work;
184167
185168 if (vcpu->async_pf.queued >= ASYNC_PF_PER_VCPU)
186
- return 0;
169
+ return false;
187170
188
- /* setup delayed work */
171
+ /* Arch specific code should not do async PF in this case */
172
+ if (unlikely(kvm_is_error_hva(hva)))
173
+ return false;
189174
190175 /*
191176 * do alloc nowait since if we are going to sleep anyway we
....@@ -193,7 +178,7 @@
193178 */
194179 work = kmem_cache_zalloc(async_pf_cache, GFP_NOWAIT | __GFP_NOWARN);
195180 if (!work)
196
- return 0;
181
+ return false;
197182
198183 work->wakeup_all = false;
199184 work->vcpu = vcpu;
....@@ -204,29 +189,21 @@
204189 mmget(work->mm);
205190 kvm_get_kvm(work->vcpu->kvm);
206191
207
- /* this can't really happen otherwise gfn_to_pfn_async
208
- would succeed */
209
- if (unlikely(kvm_is_error_hva(work->addr)))
210
- goto retry_sync;
211
-
212192 INIT_WORK(&work->work, async_pf_execute);
213
- if (!schedule_work(&work->work))
214
- goto retry_sync;
215193
216194 list_add_tail(&work->queue, &vcpu->async_pf.queue);
217195 vcpu->async_pf.queued++;
218
- kvm_arch_async_page_not_present(vcpu, work);
219
- return 1;
220
-retry_sync:
221
- kvm_put_kvm(work->vcpu->kvm);
222
- mmput(work->mm);
223
- kmem_cache_free(async_pf_cache, work);
224
- return 0;
196
+ work->notpresent_injected = kvm_arch_async_page_not_present(vcpu, work);
197
+
198
+ schedule_work(&work->work);
199
+
200
+ return true;
225201 }
226202
227203 int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu)
228204 {
229205 struct kvm_async_pf *work;
206
+ bool first;
230207
231208 if (!list_empty_careful(&vcpu->async_pf.done))
232209 return 0;
....@@ -239,9 +216,13 @@
239216 INIT_LIST_HEAD(&work->queue); /* for list_del to work */
240217
241218 spin_lock(&vcpu->async_pf.lock);
219
+ first = list_empty(&vcpu->async_pf.done);
242220 list_add_tail(&work->link, &vcpu->async_pf.done);
243221 spin_unlock(&vcpu->async_pf.lock);
244222
223
+ if (!IS_ENABLED(CONFIG_KVM_ASYNC_PF_SYNC) && first)
224
+ kvm_arch_async_page_present_queued(vcpu);
225
+
245226 vcpu->async_pf.queued++;
246227 return 0;
247228 }