hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/arch/nds32/mm/fault.c
....@@ -9,8 +9,8 @@
99 #include <linux/init.h>
1010 #include <linux/hardirq.h>
1111 #include <linux/uaccess.h>
12
+#include <linux/perf_event.h>
1213
13
-#include <asm/pgtable.h>
1414 #include <asm/tlbflush.h>
1515
1616 extern void die(const char *str, struct pt_regs *regs, long err);
....@@ -30,6 +30,8 @@
3030 pr_alert("[%08lx] *pgd=%08lx", addr, pgd_val(*pgd));
3131
3232 do {
33
+ p4d_t *p4d;
34
+ pud_t *pud;
3335 pmd_t *pmd;
3436
3537 if (pgd_none(*pgd))
....@@ -40,7 +42,9 @@
4042 break;
4143 }
4244
43
- pmd = pmd_offset(pgd, addr);
45
+ p4d = p4d_offset(pgd, addr);
46
+ pud = pud_offset(p4d, addr);
47
+ pmd = pmd_offset(pud, addr);
4448 #if PTRS_PER_PMD != 1
4549 pr_alert(", *pmd=%08lx", pmd_val(*pmd));
4650 #endif
....@@ -74,8 +78,8 @@
7478 struct vm_area_struct *vma;
7579 int si_code;
7680 vm_fault_t fault;
77
- unsigned int mask = VM_READ | VM_WRITE | VM_EXEC;
78
- unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
81
+ unsigned int mask = VM_ACCESS_FLAGS;
82
+ unsigned int flags = FAULT_FLAG_DEFAULT;
7983
8084 error_code = error_code & (ITYPE_mskINST | ITYPE_mskETYPE);
8185 tsk = current;
....@@ -117,17 +121,19 @@
117121 if (unlikely(faulthandler_disabled() || !mm))
118122 goto no_context;
119123
124
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
125
+
120126 /*
121127 * As per x86, we may deadlock here. However, since the kernel only
122128 * validly references user space from well defined areas of the code,
123129 * we can bug out early if this is from code which shouldn't.
124130 */
125
- if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
131
+ if (unlikely(!mmap_read_trylock(mm))) {
126132 if (!user_mode(regs) &&
127133 !search_exception_tables(instruction_pointer(regs)))
128134 goto no_context;
129135 retry:
130
- down_read(&mm->mmap_sem);
136
+ mmap_read_lock(mm);
131137 } else {
132138 /*
133139 * The above down_read_trylock() might have succeeded in which
....@@ -169,8 +175,6 @@
169175 mask = VM_EXEC;
170176 else {
171177 mask = VM_READ | VM_WRITE;
172
- if (vma->vm_flags & VM_WRITE)
173
- flags |= FAULT_FLAG_WRITE;
174178 }
175179 } else if (entry == ENTRY_TLB_MISC) {
176180 switch (error_code & ITYPE_mskETYPE) {
....@@ -204,14 +208,14 @@
204208 * the fault.
205209 */
206210
207
- fault = handle_mm_fault(vma, addr, flags);
211
+ fault = handle_mm_fault(vma, addr, flags, regs);
208212
209213 /*
210214 * If we need to retry but a fatal signal is pending, handle the
211
- * signal first. We do not need to release the mmap_sem because it
215
+ * signal first. We do not need to release the mmap_lock because it
212216 * would already be released in __lock_page_or_retry in mm/filemap.c.
213217 */
214
- if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) {
218
+ if (fault_signal_pending(fault, regs)) {
215219 if (!user_mode(regs))
216220 goto no_context;
217221 return;
....@@ -226,21 +230,11 @@
226230 goto bad_area;
227231 }
228232
229
- /*
230
- * Major/minor page fault accounting is only done on the initial
231
- * attempt. If we go through a retry, it is extremely likely that the
232
- * page will be found in page cache at that point.
233
- */
234233 if (flags & FAULT_FLAG_ALLOW_RETRY) {
235
- if (fault & VM_FAULT_MAJOR)
236
- tsk->maj_flt++;
237
- else
238
- tsk->min_flt++;
239234 if (fault & VM_FAULT_RETRY) {
240
- flags &= ~FAULT_FLAG_ALLOW_RETRY;
241235 flags |= FAULT_FLAG_TRIED;
242236
243
- /* No need to up_read(&mm->mmap_sem) as we would
237
+ /* No need to mmap_read_unlock(mm) as we would
244238 * have already released it in __lock_page_or_retry
245239 * in mm/filemap.c.
246240 */
....@@ -248,7 +242,7 @@
248242 }
249243 }
250244
251
- up_read(&mm->mmap_sem);
245
+ mmap_read_unlock(mm);
252246 return;
253247
254248 /*
....@@ -256,7 +250,7 @@
256250 * Fix it, but check if it's kernel or user first..
257251 */
258252 bad_area:
259
- up_read(&mm->mmap_sem);
253
+ mmap_read_unlock(mm);
260254
261255 bad_area_nosemaphore:
262256
....@@ -266,7 +260,7 @@
266260 tsk->thread.address = addr;
267261 tsk->thread.error_code = error_code;
268262 tsk->thread.trap_no = entry;
269
- force_sig_fault(SIGSEGV, si_code, (void __user *)addr, tsk);
263
+ force_sig_fault(SIGSEGV, si_code, (void __user *)addr);
270264 return;
271265 }
272266
....@@ -316,14 +310,14 @@
316310 */
317311
318312 out_of_memory:
319
- up_read(&mm->mmap_sem);
313
+ mmap_read_unlock(mm);
320314 if (!user_mode(regs))
321315 goto no_context;
322316 pagefault_out_of_memory();
323317 return;
324318
325319 do_sigbus:
326
- up_read(&mm->mmap_sem);
320
+ mmap_read_unlock(mm);
327321
328322 /* Kernel mode? Handle exceptions or die */
329323 if (!user_mode(regs))
....@@ -335,7 +329,7 @@
335329 tsk->thread.address = addr;
336330 tsk->thread.error_code = error_code;
337331 tsk->thread.trap_no = entry;
338
- force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)addr, tsk);
332
+ force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)addr);
339333
340334 return;
341335
....@@ -354,6 +348,7 @@
354348
355349 unsigned int index = pgd_index(addr);
356350 pgd_t *pgd, *pgd_k;
351
+ p4d_t *p4d, *p4d_k;
357352 pud_t *pud, *pud_k;
358353 pmd_t *pmd, *pmd_k;
359354 pte_t *pte_k;
....@@ -364,8 +359,13 @@
364359 if (!pgd_present(*pgd_k))
365360 goto no_context;
366361
367
- pud = pud_offset(pgd, addr);
368
- pud_k = pud_offset(pgd_k, addr);
362
+ p4d = p4d_offset(pgd, addr);
363
+ p4d_k = p4d_offset(pgd_k, addr);
364
+ if (!p4d_present(*p4d_k))
365
+ goto no_context;
366
+
367
+ pud = pud_offset(p4d, addr);
368
+ pud_k = pud_offset(p4d_k, addr);
369369 if (!pud_present(*pud_k))
370370 goto no_context;
371371