| .. | .. |
|---|
| 388 | 388 | ret = -EPERM; |
|---|
| 389 | 389 | if (!capable(CAP_SYS_ADMIN)) |
|---|
| 390 | 390 | goto out; |
|---|
| 391 | + |
|---|
| 392 | + mmap_read_lock(current->mm); |
|---|
| 391 | 393 | } else { |
|---|
| 392 | 394 | struct vm_area_struct *vma; |
|---|
| 393 | 395 | |
|---|
| .. | .. |
|---|
| 399 | 401 | * Verify that the specified address region actually belongs |
|---|
| 400 | 402 | * to this process. |
|---|
| 401 | 403 | */ |
|---|
| 402 | | - down_read(¤t->mm->mmap_sem); |
|---|
| 404 | + mmap_read_lock(current->mm); |
|---|
| 403 | 405 | vma = find_vma(current->mm, addr); |
|---|
| 404 | 406 | if (!vma || addr < vma->vm_start || addr + len > vma->vm_end) |
|---|
| 405 | 407 | goto out_unlock; |
|---|
| .. | .. |
|---|
| 450 | 452 | } |
|---|
| 451 | 453 | } |
|---|
| 452 | 454 | out_unlock: |
|---|
| 453 | | - up_read(¤t->mm->mmap_sem); |
|---|
| 455 | + mmap_read_unlock(current->mm); |
|---|
| 454 | 456 | out: |
|---|
| 455 | 457 | return ret; |
|---|
| 456 | 458 | } |
|---|
| .. | .. |
|---|
| 465 | 467 | for (;;) { |
|---|
| 466 | 468 | struct mm_struct *mm = current->mm; |
|---|
| 467 | 469 | pgd_t *pgd; |
|---|
| 470 | + p4d_t *p4d; |
|---|
| 471 | + pud_t *pud; |
|---|
| 468 | 472 | pmd_t *pmd; |
|---|
| 469 | 473 | pte_t *pte; |
|---|
| 470 | 474 | spinlock_t *ptl; |
|---|
| 471 | 475 | unsigned long mem_value; |
|---|
| 472 | 476 | |
|---|
| 473 | | - down_read(&mm->mmap_sem); |
|---|
| 477 | + mmap_read_lock(mm); |
|---|
| 474 | 478 | pgd = pgd_offset(mm, (unsigned long)mem); |
|---|
| 475 | 479 | if (!pgd_present(*pgd)) |
|---|
| 476 | 480 | goto bad_access; |
|---|
| 477 | | - pmd = pmd_offset(pgd, (unsigned long)mem); |
|---|
| 481 | + p4d = p4d_offset(pgd, (unsigned long)mem); |
|---|
| 482 | + if (!p4d_present(*p4d)) |
|---|
| 483 | + goto bad_access; |
|---|
| 484 | + pud = pud_offset(p4d, (unsigned long)mem); |
|---|
| 485 | + if (!pud_present(*pud)) |
|---|
| 486 | + goto bad_access; |
|---|
| 487 | + pmd = pmd_offset(pud, (unsigned long)mem); |
|---|
| 478 | 488 | if (!pmd_present(*pmd)) |
|---|
| 479 | 489 | goto bad_access; |
|---|
| 480 | 490 | pte = pte_offset_map_lock(mm, pmd, (unsigned long)mem, &ptl); |
|---|
| .. | .. |
|---|
| 493 | 503 | __put_user(newval, mem); |
|---|
| 494 | 504 | |
|---|
| 495 | 505 | pte_unmap_unlock(pte, ptl); |
|---|
| 496 | | - up_read(&mm->mmap_sem); |
|---|
| 506 | + mmap_read_unlock(mm); |
|---|
| 497 | 507 | return mem_value; |
|---|
| 498 | 508 | |
|---|
| 499 | 509 | bad_access: |
|---|
| 500 | | - up_read(&mm->mmap_sem); |
|---|
| 510 | + mmap_read_unlock(mm); |
|---|
| 501 | 511 | /* This is not necessarily a bad access, we can get here if |
|---|
| 502 | 512 | a memory we're trying to write to should be copied-on-write. |
|---|
| 503 | 513 | Make the kernel do the necessary page stuff, then re-iterate. |
|---|
| .. | .. |
|---|
| 537 | 547 | struct mm_struct *mm = current->mm; |
|---|
| 538 | 548 | unsigned long mem_value; |
|---|
| 539 | 549 | |
|---|
| 540 | | - down_read(&mm->mmap_sem); |
|---|
| 550 | + mmap_read_lock(mm); |
|---|
| 541 | 551 | |
|---|
| 542 | 552 | mem_value = *mem; |
|---|
| 543 | 553 | if (mem_value == oldval) |
|---|
| 544 | 554 | *mem = newval; |
|---|
| 545 | 555 | |
|---|
| 546 | | - up_read(&mm->mmap_sem); |
|---|
| 556 | + mmap_read_unlock(mm); |
|---|
| 547 | 557 | return mem_value; |
|---|
| 548 | 558 | } |
|---|
| 549 | 559 | |
|---|