hc
2024-12-19 9370bb92b2d16684ee45cf24e879c93c509162da
kernel/arch/um/kernel/tlb.c
....@@ -1,13 +1,12 @@
1
+// SPDX-License-Identifier: GPL-2.0
12 /*
23 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3
- * Licensed under the GPL
44 */
55
66 #include <linux/mm.h>
77 #include <linux/module.h>
88 #include <linux/sched/signal.h>
99
10
-#include <asm/pgtable.h>
1110 #include <asm/tlbflush.h>
1211 #include <as-layout.h>
1312 #include <mem_user.h>
....@@ -37,17 +36,19 @@
3736 } mprotect;
3837 } u;
3938 } ops[1];
39
+ int userspace;
4040 int index;
41
- struct mm_id *id;
41
+ struct mm_struct *mm;
4242 void *data;
4343 int force;
4444 };
4545
46
-#define INIT_HVC(mm, force) \
46
+#define INIT_HVC(mm, force, userspace) \
4747 ((struct host_vm_change) \
4848 { .ops = { { .type = NONE } }, \
49
- .id = &mm->context.id, \
49
+ .mm = mm, \
5050 .data = NULL, \
51
+ .userspace = userspace, \
5152 .index = 0, \
5253 .force = force })
5354
....@@ -68,18 +69,40 @@
6869 op = &hvc->ops[i];
6970 switch (op->type) {
7071 case MMAP:
71
- ret = map(hvc->id, op->u.mmap.addr, op->u.mmap.len,
72
- op->u.mmap.prot, op->u.mmap.fd,
73
- op->u.mmap.offset, finished, &hvc->data);
72
+ if (hvc->userspace)
73
+ ret = map(&hvc->mm->context.id, op->u.mmap.addr,
74
+ op->u.mmap.len, op->u.mmap.prot,
75
+ op->u.mmap.fd,
76
+ op->u.mmap.offset, finished,
77
+ &hvc->data);
78
+ else
79
+ map_memory(op->u.mmap.addr, op->u.mmap.offset,
80
+ op->u.mmap.len, 1, 1, 1);
7481 break;
7582 case MUNMAP:
76
- ret = unmap(hvc->id, op->u.munmap.addr,
77
- op->u.munmap.len, finished, &hvc->data);
83
+ if (hvc->userspace)
84
+ ret = unmap(&hvc->mm->context.id,
85
+ op->u.munmap.addr,
86
+ op->u.munmap.len, finished,
87
+ &hvc->data);
88
+ else
89
+ ret = os_unmap_memory(
90
+ (void *) op->u.munmap.addr,
91
+ op->u.munmap.len);
92
+
7893 break;
7994 case MPROTECT:
80
- ret = protect(hvc->id, op->u.mprotect.addr,
81
- op->u.mprotect.len, op->u.mprotect.prot,
82
- finished, &hvc->data);
95
+ if (hvc->userspace)
96
+ ret = protect(&hvc->mm->context.id,
97
+ op->u.mprotect.addr,
98
+ op->u.mprotect.len,
99
+ op->u.mprotect.prot,
100
+ finished, &hvc->data);
101
+ else
102
+ ret = os_protect_memory(
103
+ (void *) op->u.mprotect.addr,
104
+ op->u.mprotect.len,
105
+ 1, 1, 1);
83106 break;
84107 default:
85108 printk(KERN_ERR "Unknown op type %d in do_ops\n",
....@@ -100,9 +123,15 @@
100123 {
101124 __u64 offset;
102125 struct host_vm_op *last;
103
- int fd, ret = 0;
126
+ int fd = -1, ret = 0;
104127
105
- fd = phys_mapping(phys, &offset);
128
+ if (virt + len > STUB_START && virt < STUB_END)
129
+ return -EINVAL;
130
+
131
+ if (hvc->userspace)
132
+ fd = phys_mapping(phys, &offset);
133
+ else
134
+ offset = phys;
106135 if (hvc->index != 0) {
107136 last = &hvc->ops[hvc->index - 1];
108137 if ((last->type == MMAP) &&
....@@ -136,7 +165,7 @@
136165 struct host_vm_op *last;
137166 int ret = 0;
138167
139
- if ((addr >= STUB_START) && (addr < STUB_END))
168
+ if (addr + len > STUB_START && addr < STUB_END)
140169 return -EINVAL;
141170
142171 if (hvc->index != 0) {
....@@ -165,6 +194,9 @@
165194 {
166195 struct host_vm_op *last;
167196 int ret = 0;
197
+
198
+ if (addr + len > STUB_START && addr < STUB_END)
199
+ return -EINVAL;
168200
169201 if (hvc->index != 0) {
170202 last = &hvc->ops[hvc->index - 1];
....@@ -215,10 +247,11 @@
215247 prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) |
216248 (x ? UM_PROT_EXEC : 0));
217249 if (hvc->force || pte_newpage(*pte)) {
218
- if (pte_present(*pte))
219
- ret = add_mmap(addr, pte_val(*pte) & PAGE_MASK,
220
- PAGE_SIZE, prot, hvc);
221
- else
250
+ if (pte_present(*pte)) {
251
+ if (pte_newpage(*pte))
252
+ ret = add_mmap(addr, pte_val(*pte) & PAGE_MASK,
253
+ PAGE_SIZE, prot, hvc);
254
+ } else
222255 ret = add_munmap(addr, PAGE_SIZE, hvc);
223256 } else if (pte_newprot(*pte))
224257 ret = add_mprotect(addr, PAGE_SIZE, prot, hvc);
....@@ -249,7 +282,7 @@
249282 return ret;
250283 }
251284
252
-static inline int update_pud_range(pgd_t *pgd, unsigned long addr,
285
+static inline int update_pud_range(p4d_t *p4d, unsigned long addr,
253286 unsigned long end,
254287 struct host_vm_change *hvc)
255288 {
....@@ -257,7 +290,7 @@
257290 unsigned long next;
258291 int ret = 0;
259292
260
- pud = pud_offset(pgd, addr);
293
+ pud = pud_offset(p4d, addr);
261294 do {
262295 next = pud_addr_end(addr, end);
263296 if (!pud_present(*pud)) {
....@@ -271,15 +304,37 @@
271304 return ret;
272305 }
273306
307
+static inline int update_p4d_range(pgd_t *pgd, unsigned long addr,
308
+ unsigned long end,
309
+ struct host_vm_change *hvc)
310
+{
311
+ p4d_t *p4d;
312
+ unsigned long next;
313
+ int ret = 0;
314
+
315
+ p4d = p4d_offset(pgd, addr);
316
+ do {
317
+ next = p4d_addr_end(addr, end);
318
+ if (!p4d_present(*p4d)) {
319
+ if (hvc->force || p4d_newpage(*p4d)) {
320
+ ret = add_munmap(addr, next - addr, hvc);
321
+ p4d_mkuptodate(*p4d);
322
+ }
323
+ } else
324
+ ret = update_pud_range(p4d, addr, next, hvc);
325
+ } while (p4d++, addr = next, ((addr < end) && !ret));
326
+ return ret;
327
+}
328
+
274329 void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
275330 unsigned long end_addr, int force)
276331 {
277332 pgd_t *pgd;
278333 struct host_vm_change hvc;
279334 unsigned long addr = start_addr, next;
280
- int ret = 0;
335
+ int ret = 0, userspace = 1;
281336
282
- hvc = INIT_HVC(mm, force);
337
+ hvc = INIT_HVC(mm, force, userspace);
283338 pgd = pgd_offset(mm, addr);
284339 do {
285340 next = pgd_addr_end(addr, end_addr);
....@@ -288,8 +343,8 @@
288343 ret = add_munmap(addr, next - addr, &hvc);
289344 pgd_mkuptodate(*pgd);
290345 }
291
- }
292
- else ret = update_pud_range(pgd, addr, next, &hvc);
346
+ } else
347
+ ret = update_p4d_range(pgd, addr, next, &hvc);
293348 } while (pgd++, addr = next, ((addr < end_addr) && !ret));
294349
295350 if (!ret)
....@@ -297,12 +352,11 @@
297352
298353 /* This is not an else because ret is modified above */
299354 if (ret) {
355
+ struct mm_id *mm_idp = &current->mm->context.id;
356
+
300357 printk(KERN_ERR "fix_range_common: failed, killing current "
301358 "process: %d\n", task_tgid_vnr(current));
302
- /* We are under mmap_sem, release it such that current can terminate */
303
- up_write(&current->mm->mmap_sem);
304
- force_sig(SIGKILL, current);
305
- do_signal(&current->thread.regs);
359
+ mm_idp->kill = 1;
306360 }
307361 }
308362
....@@ -310,13 +364,16 @@
310364 {
311365 struct mm_struct *mm;
312366 pgd_t *pgd;
367
+ p4d_t *p4d;
313368 pud_t *pud;
314369 pmd_t *pmd;
315370 pte_t *pte;
316371 unsigned long addr, last;
317
- int updated = 0, err;
372
+ int updated = 0, err = 0, force = 0, userspace = 0;
373
+ struct host_vm_change hvc;
318374
319375 mm = &init_mm;
376
+ hvc = INIT_HVC(mm, force, userspace);
320377 for (addr = start; addr < end;) {
321378 pgd = pgd_offset(mm, addr);
322379 if (!pgd_present(*pgd)) {
....@@ -325,8 +382,7 @@
325382 last = end;
326383 if (pgd_newpage(*pgd)) {
327384 updated = 1;
328
- err = os_unmap_memory((void *) addr,
329
- last - addr);
385
+ err = add_munmap(addr, last - addr, &hvc);
330386 if (err < 0)
331387 panic("munmap failed, errno = %d\n",
332388 -err);
....@@ -335,15 +391,30 @@
335391 continue;
336392 }
337393
338
- pud = pud_offset(pgd, addr);
394
+ p4d = p4d_offset(pgd, addr);
395
+ if (!p4d_present(*p4d)) {
396
+ last = ADD_ROUND(addr, P4D_SIZE);
397
+ if (last > end)
398
+ last = end;
399
+ if (p4d_newpage(*p4d)) {
400
+ updated = 1;
401
+ err = add_munmap(addr, last - addr, &hvc);
402
+ if (err < 0)
403
+ panic("munmap failed, errno = %d\n",
404
+ -err);
405
+ }
406
+ addr = last;
407
+ continue;
408
+ }
409
+
410
+ pud = pud_offset(p4d, addr);
339411 if (!pud_present(*pud)) {
340412 last = ADD_ROUND(addr, PUD_SIZE);
341413 if (last > end)
342414 last = end;
343415 if (pud_newpage(*pud)) {
344416 updated = 1;
345
- err = os_unmap_memory((void *) addr,
346
- last - addr);
417
+ err = add_munmap(addr, last - addr, &hvc);
347418 if (err < 0)
348419 panic("munmap failed, errno = %d\n",
349420 -err);
....@@ -359,8 +430,7 @@
359430 last = end;
360431 if (pmd_newpage(*pmd)) {
361432 updated = 1;
362
- err = os_unmap_memory((void *) addr,
363
- last - addr);
433
+ err = add_munmap(addr, last - addr, &hvc);
364434 if (err < 0)
365435 panic("munmap failed, errno = %d\n",
366436 -err);
....@@ -372,28 +442,32 @@
372442 pte = pte_offset_kernel(pmd, addr);
373443 if (!pte_present(*pte) || pte_newpage(*pte)) {
374444 updated = 1;
375
- err = os_unmap_memory((void *) addr,
376
- PAGE_SIZE);
445
+ err = add_munmap(addr, PAGE_SIZE, &hvc);
377446 if (err < 0)
378447 panic("munmap failed, errno = %d\n",
379448 -err);
380449 if (pte_present(*pte))
381
- map_memory(addr,
382
- pte_val(*pte) & PAGE_MASK,
383
- PAGE_SIZE, 1, 1, 1);
450
+ err = add_mmap(addr, pte_val(*pte) & PAGE_MASK,
451
+ PAGE_SIZE, 0, &hvc);
384452 }
385453 else if (pte_newprot(*pte)) {
386454 updated = 1;
387
- os_protect_memory((void *) addr, PAGE_SIZE, 1, 1, 1);
455
+ err = add_mprotect(addr, PAGE_SIZE, 0, &hvc);
388456 }
389457 addr += PAGE_SIZE;
390458 }
459
+ if (!err)
460
+ err = do_ops(&hvc, hvc.index, 1);
461
+
462
+ if (err < 0)
463
+ panic("flush_tlb_kernel failed, errno = %d\n", err);
391464 return updated;
392465 }
393466
394467 void flush_tlb_page(struct vm_area_struct *vma, unsigned long address)
395468 {
396469 pgd_t *pgd;
470
+ p4d_t *p4d;
397471 pud_t *pud;
398472 pmd_t *pmd;
399473 pte_t *pte;
....@@ -403,11 +477,19 @@
403477 struct mm_id *mm_id;
404478
405479 address &= PAGE_MASK;
480
+
481
+ if (address >= STUB_START && address < STUB_END)
482
+ goto kill;
483
+
406484 pgd = pgd_offset(mm, address);
407485 if (!pgd_present(*pgd))
408486 goto kill;
409487
410
- pud = pud_offset(pgd, address);
488
+ p4d = p4d_offset(pgd, address);
489
+ if (!p4d_present(*p4d))
490
+ goto kill;
491
+
492
+ pud = pud_offset(p4d, address);
411493 if (!pud_present(*pud))
412494 goto kill;
413495
....@@ -457,40 +539,18 @@
457539
458540 kill:
459541 printk(KERN_ERR "Failed to flush page for address 0x%lx\n", address);
460
- force_sig(SIGKILL, current);
461
-}
462
-
463
-pgd_t *pgd_offset_proc(struct mm_struct *mm, unsigned long address)
464
-{
465
- return pgd_offset(mm, address);
466
-}
467
-
468
-pud_t *pud_offset_proc(pgd_t *pgd, unsigned long address)
469
-{
470
- return pud_offset(pgd, address);
471
-}
472
-
473
-pmd_t *pmd_offset_proc(pud_t *pud, unsigned long address)
474
-{
475
- return pmd_offset(pud, address);
476
-}
477
-
478
-pte_t *pte_offset_proc(pmd_t *pmd, unsigned long address)
479
-{
480
- return pte_offset_kernel(pmd, address);
481
-}
482
-
483
-pte_t *addr_pte(struct task_struct *task, unsigned long addr)
484
-{
485
- pgd_t *pgd = pgd_offset(task->mm, addr);
486
- pud_t *pud = pud_offset(pgd, addr);
487
- pmd_t *pmd = pmd_offset(pud, addr);
488
-
489
- return pte_offset_map(pmd, addr);
542
+ force_sig(SIGKILL);
490543 }
491544
492545 void flush_tlb_all(void)
493546 {
547
+ /*
548
+ * Don't bother flushing if this address space is about to be
549
+ * destroyed.
550
+ */
551
+ if (atomic_read(&current->mm->mm_users) == 0)
552
+ return;
553
+
494554 flush_tlb_mm(current->mm);
495555 }
496556
....@@ -512,6 +572,13 @@
512572 static void fix_range(struct mm_struct *mm, unsigned long start_addr,
513573 unsigned long end_addr, int force)
514574 {
575
+ /*
576
+ * Don't bother flushing if this address space is about to be
577
+ * destroyed.
578
+ */
579
+ if (atomic_read(&mm->mm_users) == 0)
580
+ return;
581
+
515582 fix_range_common(mm, start_addr, end_addr, force);
516583 }
517584
....@@ -527,13 +594,6 @@
527594 void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
528595 unsigned long end)
529596 {
530
- /*
531
- * Don't bother flushing if this address space is about to be
532
- * destroyed.
533
- */
534
- if (atomic_read(&mm->mm_users) == 0)
535
- return;
536
-
537597 fix_range(mm, start, end, 0);
538598 }
539599