hc
2024-05-10 37f49e37ab4cb5d0bc4c60eb5c6d4dd57db767bb
kernel/arch/m68k/kernel/sys_m68k.c
....@@ -388,6 +388,8 @@
388388 ret = -EPERM;
389389 if (!capable(CAP_SYS_ADMIN))
390390 goto out;
391
+
392
+ mmap_read_lock(current->mm);
391393 } else {
392394 struct vm_area_struct *vma;
393395
....@@ -399,7 +401,7 @@
399401 * Verify that the specified address region actually belongs
400402 * to this process.
401403 */
402
- down_read(&current->mm->mmap_sem);
404
+ mmap_read_lock(current->mm);
403405 vma = find_vma(current->mm, addr);
404406 if (!vma || addr < vma->vm_start || addr + len > vma->vm_end)
405407 goto out_unlock;
....@@ -450,7 +452,7 @@
450452 }
451453 }
452454 out_unlock:
453
- up_read(&current->mm->mmap_sem);
455
+ mmap_read_unlock(current->mm);
454456 out:
455457 return ret;
456458 }
....@@ -465,16 +467,24 @@
465467 for (;;) {
466468 struct mm_struct *mm = current->mm;
467469 pgd_t *pgd;
470
+ p4d_t *p4d;
471
+ pud_t *pud;
468472 pmd_t *pmd;
469473 pte_t *pte;
470474 spinlock_t *ptl;
471475 unsigned long mem_value;
472476
473
- down_read(&mm->mmap_sem);
477
+ mmap_read_lock(mm);
474478 pgd = pgd_offset(mm, (unsigned long)mem);
475479 if (!pgd_present(*pgd))
476480 goto bad_access;
477
- pmd = pmd_offset(pgd, (unsigned long)mem);
481
+ p4d = p4d_offset(pgd, (unsigned long)mem);
482
+ if (!p4d_present(*p4d))
483
+ goto bad_access;
484
+ pud = pud_offset(p4d, (unsigned long)mem);
485
+ if (!pud_present(*pud))
486
+ goto bad_access;
487
+ pmd = pmd_offset(pud, (unsigned long)mem);
478488 if (!pmd_present(*pmd))
479489 goto bad_access;
480490 pte = pte_offset_map_lock(mm, pmd, (unsigned long)mem, &ptl);
....@@ -493,11 +503,11 @@
493503 __put_user(newval, mem);
494504
495505 pte_unmap_unlock(pte, ptl);
496
- up_read(&mm->mmap_sem);
506
+ mmap_read_unlock(mm);
497507 return mem_value;
498508
499509 bad_access:
500
- up_read(&mm->mmap_sem);
510
+ mmap_read_unlock(mm);
501511 /* This is not necessarily a bad access, we can get here if
502512 a memory we're trying to write to should be copied-on-write.
503513 Make the kernel do the necessary page stuff, then re-iterate.
....@@ -537,13 +547,13 @@
537547 struct mm_struct *mm = current->mm;
538548 unsigned long mem_value;
539549
540
- down_read(&mm->mmap_sem);
550
+ mmap_read_lock(mm);
541551
542552 mem_value = *mem;
543553 if (mem_value == oldval)
544554 *mem = newval;
545555
546
- up_read(&mm->mmap_sem);
556
+ mmap_read_unlock(mm);
547557 return mem_value;
548558 }
549559