| .. | .. |
|---|
| 25 | 25 | #include <linux/interrupt.h> |
|---|
| 26 | 26 | #include <linux/extable.h> |
|---|
| 27 | 27 | #include <linux/uaccess.h> |
|---|
| 28 | +#include <linux/perf_event.h> |
|---|
| 28 | 29 | |
|---|
| 29 | 30 | extern void die_if_kernel(char *,struct pt_regs *,long, unsigned long *); |
|---|
| 30 | 31 | |
|---|
| .. | .. |
|---|
| 89 | 90 | const struct exception_table_entry *fixup; |
|---|
| 90 | 91 | int si_code = SEGV_MAPERR; |
|---|
| 91 | 92 | vm_fault_t fault; |
|---|
| 92 | | - unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; |
|---|
| 93 | + unsigned int flags = FAULT_FLAG_DEFAULT; |
|---|
| 93 | 94 | |
|---|
| 94 | 95 | /* As of EV6, a load into $31/$f31 is a prefetch, and never faults |
|---|
| 95 | 96 | (or is suppressed by the PALcode). Support that for older CPUs |
|---|
| .. | .. |
|---|
| 116 | 117 | #endif |
|---|
| 117 | 118 | if (user_mode(regs)) |
|---|
| 118 | 119 | flags |= FAULT_FLAG_USER; |
|---|
| 120 | + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); |
|---|
| 119 | 121 | retry: |
|---|
| 120 | | - down_read(&mm->mmap_sem); |
|---|
| 122 | + mmap_read_lock(mm); |
|---|
| 121 | 123 | vma = find_vma(mm, address); |
|---|
| 122 | 124 | if (!vma) |
|---|
| 123 | 125 | goto bad_area; |
|---|
| .. | .. |
|---|
| 148 | 150 | /* If for any reason at all we couldn't handle the fault, |
|---|
| 149 | 151 | make sure we exit gracefully rather than endlessly redo |
|---|
| 150 | 152 | the fault. */ |
|---|
| 151 | | - fault = handle_mm_fault(vma, address, flags); |
|---|
| 153 | + fault = handle_mm_fault(vma, address, flags, regs); |
|---|
| 152 | 154 | |
|---|
| 153 | | - if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) |
|---|
| 155 | + if (fault_signal_pending(fault, regs)) |
|---|
| 154 | 156 | return; |
|---|
| 155 | 157 | |
|---|
| 156 | 158 | if (unlikely(fault & VM_FAULT_ERROR)) { |
|---|
| .. | .. |
|---|
| 164 | 166 | } |
|---|
| 165 | 167 | |
|---|
| 166 | 168 | if (flags & FAULT_FLAG_ALLOW_RETRY) { |
|---|
| 167 | | - if (fault & VM_FAULT_MAJOR) |
|---|
| 168 | | - current->maj_flt++; |
|---|
| 169 | | - else |
|---|
| 170 | | - current->min_flt++; |
|---|
| 171 | 169 | if (fault & VM_FAULT_RETRY) { |
|---|
| 172 | | - flags &= ~FAULT_FLAG_ALLOW_RETRY; |
|---|
| 170 | + flags |= FAULT_FLAG_TRIED; |
|---|
| 173 | 171 | |
|---|
| 174 | | - /* No need to up_read(&mm->mmap_sem) as we would |
|---|
| 172 | + /* No need to mmap_read_unlock(mm) as we would |
|---|
| 175 | 173 | * have already released it in __lock_page_or_retry |
|---|
| 176 | 174 | * in mm/filemap.c. |
|---|
| 177 | 175 | */ |
|---|
| .. | .. |
|---|
| 180 | 178 | } |
|---|
| 181 | 179 | } |
|---|
| 182 | 180 | |
|---|
| 183 | | - up_read(&mm->mmap_sem); |
|---|
| 181 | + mmap_read_unlock(mm); |
|---|
| 184 | 182 | |
|---|
| 185 | 183 | return; |
|---|
| 186 | 184 | |
|---|
| 187 | 185 | /* Something tried to access memory that isn't in our memory map. |
|---|
| 188 | 186 | Fix it, but check if it's kernel or user first. */ |
|---|
| 189 | 187 | bad_area: |
|---|
| 190 | | - up_read(&mm->mmap_sem); |
|---|
| 188 | + mmap_read_unlock(mm); |
|---|
| 191 | 189 | |
|---|
| 192 | 190 | if (user_mode(regs)) |
|---|
| 193 | 191 | goto do_sigsegv; |
|---|
| .. | .. |
|---|
| 206 | 204 | printk(KERN_ALERT "Unable to handle kernel paging request at " |
|---|
| 207 | 205 | "virtual address %016lx\n", address); |
|---|
| 208 | 206 | die_if_kernel("Oops", regs, cause, (unsigned long*)regs - 16); |
|---|
| 209 | | - do_exit(SIGKILL); |
|---|
| 207 | + make_task_dead(SIGKILL); |
|---|
| 210 | 208 | |
|---|
| 211 | 209 | /* We ran out of memory, or some other thing happened to us that |
|---|
| 212 | 210 | made us unable to handle the page fault gracefully. */ |
|---|
| 213 | 211 | out_of_memory: |
|---|
| 214 | | - up_read(&mm->mmap_sem); |
|---|
| 212 | + mmap_read_unlock(mm); |
|---|
| 215 | 213 | if (!user_mode(regs)) |
|---|
| 216 | 214 | goto no_context; |
|---|
| 217 | 215 | pagefault_out_of_memory(); |
|---|
| 218 | 216 | return; |
|---|
| 219 | 217 | |
|---|
| 220 | 218 | do_sigbus: |
|---|
| 221 | | - up_read(&mm->mmap_sem); |
|---|
| 219 | + mmap_read_unlock(mm); |
|---|
| 222 | 220 | /* Send a sigbus, regardless of whether we were in kernel |
|---|
| 223 | 221 | or user mode. */ |
|---|
| 224 | | - force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *) address, 0, current); |
|---|
| 222 | + force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *) address, 0); |
|---|
| 225 | 223 | if (!user_mode(regs)) |
|---|
| 226 | 224 | goto no_context; |
|---|
| 227 | 225 | return; |
|---|
| 228 | 226 | |
|---|
| 229 | 227 | do_sigsegv: |
|---|
| 230 | | - force_sig_fault(SIGSEGV, si_code, (void __user *) address, 0, current); |
|---|
| 228 | + force_sig_fault(SIGSEGV, si_code, (void __user *) address, 0); |
|---|
| 231 | 229 | return; |
|---|
| 232 | 230 | |
|---|
| 233 | 231 | #ifdef CONFIG_ALPHA_LARGE_VMALLOC |
|---|