hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/arch/nios2/mm/fault.c
....@@ -24,7 +24,7 @@
2424 #include <linux/mm.h>
2525 #include <linux/extable.h>
2626 #include <linux/uaccess.h>
27
-#include <linux/ptrace.h>
27
+#include <linux/perf_event.h>
2828
2929 #include <asm/mmu_context.h>
3030 #include <asm/traps.h>
....@@ -48,7 +48,7 @@
4848 struct mm_struct *mm = tsk->mm;
4949 int code = SEGV_MAPERR;
5050 vm_fault_t fault;
51
- unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
51
+ unsigned int flags = FAULT_FLAG_DEFAULT;
5252
5353 cause >>= 2;
5454
....@@ -84,11 +84,13 @@
8484 if (user_mode(regs))
8585 flags |= FAULT_FLAG_USER;
8686
87
- if (!down_read_trylock(&mm->mmap_sem)) {
87
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
88
+
89
+ if (!mmap_read_trylock(mm)) {
8890 if (!user_mode(regs) && !search_exception_tables(regs->ea))
8991 goto bad_area_nosemaphore;
9092 retry:
91
- down_read(&mm->mmap_sem);
93
+ mmap_read_lock(mm);
9294 }
9395
9496 vma = find_vma(mm, address);
....@@ -132,9 +134,9 @@
132134 * make sure we exit gracefully rather than endlessly redo
133135 * the fault.
134136 */
135
- fault = handle_mm_fault(vma, address, flags);
137
+ fault = handle_mm_fault(vma, address, flags, regs);
136138
137
- if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
139
+ if (fault_signal_pending(fault, regs))
138140 return;
139141
140142 if (unlikely(fault & VM_FAULT_ERROR)) {
....@@ -147,24 +149,12 @@
147149 BUG();
148150 }
149151
150
- /*
151
- * Major/minor page fault accounting is only done on the
152
- * initial attempt. If we go through a retry, it is extremely
153
- * likely that the page will be found in page cache at that point.
154
- */
155152 if (flags & FAULT_FLAG_ALLOW_RETRY) {
156
- if (fault & VM_FAULT_MAJOR)
157
- current->maj_flt++;
158
- else
159
- current->min_flt++;
160153 if (fault & VM_FAULT_RETRY) {
161
- /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
162
- * of starvation. */
163
- flags &= ~FAULT_FLAG_ALLOW_RETRY;
164154 flags |= FAULT_FLAG_TRIED;
165155
166156 /*
167
- * No need to up_read(&mm->mmap_sem) as we would
157
+ * No need to mmap_read_unlock(mm) as we would
168158 * have already released it in __lock_page_or_retry
169159 * in mm/filemap.c.
170160 */
....@@ -173,7 +163,7 @@
173163 }
174164 }
175165
176
- up_read(&mm->mmap_sem);
166
+ mmap_read_unlock(mm);
177167 return;
178168
179169 /*
....@@ -181,7 +171,7 @@
181171 * Fix it, but check if it's kernel or user first..
182172 */
183173 bad_area:
184
- up_read(&mm->mmap_sem);
174
+ mmap_read_unlock(mm);
185175
186176 bad_area_nosemaphore:
187177 /* User mode accesses just cause a SIGSEGV */
....@@ -219,14 +209,14 @@
219209 * us unable to handle the page fault gracefully.
220210 */
221211 out_of_memory:
222
- up_read(&mm->mmap_sem);
212
+ mmap_read_unlock(mm);
223213 if (!user_mode(regs))
224214 goto no_context;
225215 pagefault_out_of_memory();
226216 return;
227217
228218 do_sigbus:
229
- up_read(&mm->mmap_sem);
219
+ mmap_read_unlock(mm);
230220
231221 /* Kernel mode? Handle exceptions or die */
232222 if (!user_mode(regs))
....@@ -246,6 +236,7 @@
246236 */
247237 int offset = pgd_index(address);
248238 pgd_t *pgd, *pgd_k;
239
+ p4d_t *p4d, *p4d_k;
249240 pud_t *pud, *pud_k;
250241 pmd_t *pmd, *pmd_k;
251242 pte_t *pte_k;
....@@ -257,8 +248,12 @@
257248 goto no_context;
258249 set_pgd(pgd, *pgd_k);
259250
260
- pud = pud_offset(pgd, address);
261
- pud_k = pud_offset(pgd_k, address);
251
+ p4d = p4d_offset(pgd, address);
252
+ p4d_k = p4d_offset(pgd_k, address);
253
+ if (!p4d_present(*p4d_k))
254
+ goto no_context;
255
+ pud = pud_offset(p4d, address);
256
+ pud_k = pud_offset(p4d_k, address);
262257 if (!pud_present(*pud_k))
263258 goto no_context;
264259 pmd = pmd_offset(pud, address);
....@@ -271,7 +266,7 @@
271266 if (!pte_present(*pte_k))
272267 goto no_context;
273268
274
- flush_tlb_one(address);
269
+ flush_tlb_kernel_page(address);
275270 return;
276271 }
277272 }