From 9d77db3c730780c8ef5ccd4b66403ff5675cfe4e Mon Sep 17 00:00:00 2001 From: hc <hc@nodka.com> Date: Mon, 13 May 2024 10:30:14 +0000 Subject: [PATCH] modify sin led gpio --- kernel/arch/microblaze/mm/fault.c | 33 +++++++++++++++------------------ 1 files changed, 15 insertions(+), 18 deletions(-) diff --git a/kernel/arch/microblaze/mm/fault.c b/kernel/arch/microblaze/mm/fault.c index 202ad6a..b3fed2c 100644 --- a/kernel/arch/microblaze/mm/fault.c +++ b/kernel/arch/microblaze/mm/fault.c @@ -28,9 +28,9 @@ #include <linux/mman.h> #include <linux/mm.h> #include <linux/interrupt.h> +#include <linux/perf_event.h> #include <asm/page.h> -#include <asm/pgtable.h> #include <asm/mmu.h> #include <linux/mmu_context.h> #include <linux/uaccess.h> @@ -91,7 +91,7 @@ int code = SEGV_MAPERR; int is_write = error_code & ESR_S; vm_fault_t fault; - unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; + unsigned int flags = FAULT_FLAG_DEFAULT; regs->ear = address; regs->esr = error_code; @@ -122,10 +122,12 @@ if (user_mode(regs)) flags |= FAULT_FLAG_USER; + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); + /* When running in the kernel we expect faults to occur only to * addresses in user space. All other faults represent errors in the * kernel and should generate an OOPS. Unfortunately, in the case of an - * erroneous fault occurring in a code path which already holds mmap_sem + * erroneous fault occurring in a code path which already holds mmap_lock * we will deadlock attempting to validate the fault against the * address space. Luckily the kernel only validly references user * space from well defined areas of code, which are listed in the @@ -137,12 +139,12 @@ * source. If this is invalid we can skip the address space check, * thus avoiding the deadlock. */ - if (unlikely(!down_read_trylock(&mm->mmap_sem))) { + if (unlikely(!mmap_read_trylock(mm))) { if (kernel_mode(regs) && !search_exception_tables(regs->pc)) goto bad_area_nosemaphore; retry: - down_read(&mm->mmap_sem); + mmap_read_lock(mm); } vma = find_vma(mm, address); @@ -215,9 +217,9 @@ * make sure we exit gracefully rather than endlessly redo * the fault. */ - fault = handle_mm_fault(vma, address, flags); + fault = handle_mm_fault(vma, address, flags, regs); - if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) + if (fault_signal_pending(fault, regs)) return; if (unlikely(fault & VM_FAULT_ERROR)) { @@ -231,16 +233,11 @@ } if (flags & FAULT_FLAG_ALLOW_RETRY) { - if (unlikely(fault & VM_FAULT_MAJOR)) - current->maj_flt++; - else - current->min_flt++; if (fault & VM_FAULT_RETRY) { - flags &= ~FAULT_FLAG_ALLOW_RETRY; flags |= FAULT_FLAG_TRIED; /* - * No need to up_read(&mm->mmap_sem) as we would + * No need to mmap_read_unlock(mm) as we would * have already released it in __lock_page_or_retry * in mm/filemap.c. */ @@ -249,7 +246,7 @@ } } - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); /* * keep track of tlb+htab misses that are good addrs but @@ -260,7 +257,7 @@ return; bad_area: - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); bad_area_nosemaphore: pte_errors++; @@ -279,7 +276,7 @@ * us unable to handle the page fault gracefully. */ out_of_memory: - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); if (!user_mode(regs)) bad_page_fault(regs, address, SIGKILL); else @@ -287,9 +284,9 @@ return; do_sigbus: - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); if (user_mode(regs)) { - force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address, current); + force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address); return; } bad_page_fault(regs, address, SIGBUS); -- Gitblit v1.6.2