hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/arch/alpha/mm/fault.c
....@@ -25,6 +25,7 @@
2525 #include <linux/interrupt.h>
2626 #include <linux/extable.h>
2727 #include <linux/uaccess.h>
28
+#include <linux/perf_event.h>
2829
2930 extern void die_if_kernel(char *,struct pt_regs *,long, unsigned long *);
3031
....@@ -89,7 +90,7 @@
8990 const struct exception_table_entry *fixup;
9091 int si_code = SEGV_MAPERR;
9192 vm_fault_t fault;
92
- unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
93
+ unsigned int flags = FAULT_FLAG_DEFAULT;
9394
9495 /* As of EV6, a load into $31/$f31 is a prefetch, and never faults
9596 (or is suppressed by the PALcode). Support that for older CPUs
....@@ -116,8 +117,9 @@
116117 #endif
117118 if (user_mode(regs))
118119 flags |= FAULT_FLAG_USER;
120
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
119121 retry:
120
- down_read(&mm->mmap_sem);
122
+ mmap_read_lock(mm);
121123 vma = find_vma(mm, address);
122124 if (!vma)
123125 goto bad_area;
....@@ -148,9 +150,9 @@
148150 /* If for any reason at all we couldn't handle the fault,
149151 make sure we exit gracefully rather than endlessly redo
150152 the fault. */
151
- fault = handle_mm_fault(vma, address, flags);
153
+ fault = handle_mm_fault(vma, address, flags, regs);
152154
153
- if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
155
+ if (fault_signal_pending(fault, regs))
154156 return;
155157
156158 if (unlikely(fault & VM_FAULT_ERROR)) {
....@@ -164,14 +166,10 @@
164166 }
165167
166168 if (flags & FAULT_FLAG_ALLOW_RETRY) {
167
- if (fault & VM_FAULT_MAJOR)
168
- current->maj_flt++;
169
- else
170
- current->min_flt++;
171169 if (fault & VM_FAULT_RETRY) {
172
- flags &= ~FAULT_FLAG_ALLOW_RETRY;
170
+ flags |= FAULT_FLAG_TRIED;
173171
174
- /* No need to up_read(&mm->mmap_sem) as we would
172
+ /* No need to mmap_read_unlock(mm) as we would
175173 * have already released it in __lock_page_or_retry
176174 * in mm/filemap.c.
177175 */
....@@ -180,14 +178,14 @@
180178 }
181179 }
182180
183
- up_read(&mm->mmap_sem);
181
+ mmap_read_unlock(mm);
184182
185183 return;
186184
187185 /* Something tried to access memory that isn't in our memory map.
188186 Fix it, but check if it's kernel or user first. */
189187 bad_area:
190
- up_read(&mm->mmap_sem);
188
+ mmap_read_unlock(mm);
191189
192190 if (user_mode(regs))
193191 goto do_sigsegv;
....@@ -206,28 +204,28 @@
206204 printk(KERN_ALERT "Unable to handle kernel paging request at "
207205 "virtual address %016lx\n", address);
208206 die_if_kernel("Oops", regs, cause, (unsigned long*)regs - 16);
209
- do_exit(SIGKILL);
207
+ make_task_dead(SIGKILL);
210208
211209 /* We ran out of memory, or some other thing happened to us that
212210 made us unable to handle the page fault gracefully. */
213211 out_of_memory:
214
- up_read(&mm->mmap_sem);
212
+ mmap_read_unlock(mm);
215213 if (!user_mode(regs))
216214 goto no_context;
217215 pagefault_out_of_memory();
218216 return;
219217
220218 do_sigbus:
221
- up_read(&mm->mmap_sem);
219
+ mmap_read_unlock(mm);
222220 /* Send a sigbus, regardless of whether we were in kernel
223221 or user mode. */
224
- force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *) address, 0, current);
222
+ force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *) address, 0);
225223 if (!user_mode(regs))
226224 goto no_context;
227225 return;
228226
229227 do_sigsegv:
230
- force_sig_fault(SIGSEGV, si_code, (void __user *) address, 0, current);
228
+ force_sig_fault(SIGSEGV, si_code, (void __user *) address, 0);
231229 return;
232230
233231 #ifdef CONFIG_ALPHA_LARGE_VMALLOC