hc
2024-12-19 9370bb92b2d16684ee45cf24e879c93c509162da
kernel/arch/microblaze/mm/fault.c
....@@ -28,9 +28,9 @@
2828 #include <linux/mman.h>
2929 #include <linux/mm.h>
3030 #include <linux/interrupt.h>
31
+#include <linux/perf_event.h>
3132
3233 #include <asm/page.h>
33
-#include <asm/pgtable.h>
3434 #include <asm/mmu.h>
3535 #include <linux/mmu_context.h>
3636 #include <linux/uaccess.h>
....@@ -91,7 +91,7 @@
9191 int code = SEGV_MAPERR;
9292 int is_write = error_code & ESR_S;
9393 vm_fault_t fault;
94
- unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
94
+ unsigned int flags = FAULT_FLAG_DEFAULT;
9595
9696 regs->ear = address;
9797 regs->esr = error_code;
....@@ -122,10 +122,12 @@
122122 if (user_mode(regs))
123123 flags |= FAULT_FLAG_USER;
124124
125
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
126
+
125127 /* When running in the kernel we expect faults to occur only to
126128 * addresses in user space. All other faults represent errors in the
127129 * kernel and should generate an OOPS. Unfortunately, in the case of an
128
- * erroneous fault occurring in a code path which already holds mmap_sem
130
+ * erroneous fault occurring in a code path which already holds mmap_lock
129131 * we will deadlock attempting to validate the fault against the
130132 * address space. Luckily the kernel only validly references user
131133 * space from well defined areas of code, which are listed in the
....@@ -137,12 +139,12 @@
137139 * source. If this is invalid we can skip the address space check,
138140 * thus avoiding the deadlock.
139141 */
140
- if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
142
+ if (unlikely(!mmap_read_trylock(mm))) {
141143 if (kernel_mode(regs) && !search_exception_tables(regs->pc))
142144 goto bad_area_nosemaphore;
143145
144146 retry:
145
- down_read(&mm->mmap_sem);
147
+ mmap_read_lock(mm);
146148 }
147149
148150 vma = find_vma(mm, address);
....@@ -215,9 +217,9 @@
215217 * make sure we exit gracefully rather than endlessly redo
216218 * the fault.
217219 */
218
- fault = handle_mm_fault(vma, address, flags);
220
+ fault = handle_mm_fault(vma, address, flags, regs);
219221
220
- if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
222
+ if (fault_signal_pending(fault, regs))
221223 return;
222224
223225 if (unlikely(fault & VM_FAULT_ERROR)) {
....@@ -231,16 +233,11 @@
231233 }
232234
233235 if (flags & FAULT_FLAG_ALLOW_RETRY) {
234
- if (unlikely(fault & VM_FAULT_MAJOR))
235
- current->maj_flt++;
236
- else
237
- current->min_flt++;
238236 if (fault & VM_FAULT_RETRY) {
239
- flags &= ~FAULT_FLAG_ALLOW_RETRY;
240237 flags |= FAULT_FLAG_TRIED;
241238
242239 /*
243
- * No need to up_read(&mm->mmap_sem) as we would
240
+ * No need to mmap_read_unlock(mm) as we would
244241 * have already released it in __lock_page_or_retry
245242 * in mm/filemap.c.
246243 */
....@@ -249,7 +246,7 @@
249246 }
250247 }
251248
252
- up_read(&mm->mmap_sem);
249
+ mmap_read_unlock(mm);
253250
254251 /*
255252 * keep track of tlb+htab misses that are good addrs but
....@@ -260,7 +257,7 @@
260257 return;
261258
262259 bad_area:
263
- up_read(&mm->mmap_sem);
260
+ mmap_read_unlock(mm);
264261
265262 bad_area_nosemaphore:
266263 pte_errors++;
....@@ -279,7 +276,7 @@
279276 * us unable to handle the page fault gracefully.
280277 */
281278 out_of_memory:
282
- up_read(&mm->mmap_sem);
279
+ mmap_read_unlock(mm);
283280 if (!user_mode(regs))
284281 bad_page_fault(regs, address, SIGKILL);
285282 else
....@@ -287,9 +284,9 @@
287284 return;
288285
289286 do_sigbus:
290
- up_read(&mm->mmap_sem);
287
+ mmap_read_unlock(mm);
291288 if (user_mode(regs)) {
292
- force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address, current);
289
+ force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address);
293290 return;
294291 }
295292 bad_page_fault(regs, address, SIGBUS);