hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/arch/xtensa/mm/fault.c
....@@ -20,7 +20,6 @@
2020 #include <asm/mmu_context.h>
2121 #include <asm/cacheflush.h>
2222 #include <asm/hardirq.h>
23
-#include <asm/pgalloc.h>
2423
2524 DEFINE_PER_CPU(unsigned long, asid_cache) = ASID_USER_FIRST;
2625 void bad_page_fault(struct pt_regs*, unsigned long, int);
....@@ -43,7 +42,7 @@
4342
4443 int is_write, is_exec;
4544 vm_fault_t fault;
46
- unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
45
+ unsigned int flags = FAULT_FLAG_DEFAULT;
4746
4847 code = SEGV_MAPERR;
4948
....@@ -73,8 +72,11 @@
7372
7473 if (user_mode(regs))
7574 flags |= FAULT_FLAG_USER;
75
+
76
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
77
+
7678 retry:
77
- down_read(&mm->mmap_sem);
79
+ mmap_read_lock(mm);
7880 vma = find_vma(mm, address);
7981
8082 if (!vma)
....@@ -108,10 +110,13 @@
108110 * make sure we exit gracefully rather than endlessly redo
109111 * the fault.
110112 */
111
- fault = handle_mm_fault(vma, address, flags);
113
+ fault = handle_mm_fault(vma, address, flags, regs);
112114
113
- if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
115
+ if (fault_signal_pending(fault, regs)) {
116
+ if (!user_mode(regs))
117
+ goto bad_page_fault;
114118 return;
119
+ }
115120
116121 if (unlikely(fault & VM_FAULT_ERROR)) {
117122 if (fault & VM_FAULT_OOM)
....@@ -123,15 +128,10 @@
123128 BUG();
124129 }
125130 if (flags & FAULT_FLAG_ALLOW_RETRY) {
126
- if (fault & VM_FAULT_MAJOR)
127
- current->maj_flt++;
128
- else
129
- current->min_flt++;
130131 if (fault & VM_FAULT_RETRY) {
131
- flags &= ~FAULT_FLAG_ALLOW_RETRY;
132132 flags |= FAULT_FLAG_TRIED;
133133
134
- /* No need to up_read(&mm->mmap_sem) as we would
134
+ /* No need to mmap_read_unlock(mm) as we would
135135 * have already released it in __lock_page_or_retry
136136 * in mm/filemap.c.
137137 */
....@@ -140,24 +140,18 @@
140140 }
141141 }
142142
143
- up_read(&mm->mmap_sem);
144
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
145
- if (flags & VM_FAULT_MAJOR)
146
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
147
- else
148
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
149
-
143
+ mmap_read_unlock(mm);
150144 return;
151145
152146 /* Something tried to access memory that isn't in our memory map..
153147 * Fix it, but check if it's kernel or user first..
154148 */
155149 bad_area:
156
- up_read(&mm->mmap_sem);
150
+ mmap_read_unlock(mm);
157151 if (user_mode(regs)) {
158152 current->thread.bad_vaddr = address;
159153 current->thread.error_code = is_write;
160
- force_sig_fault(SIGSEGV, code, (void *) address, current);
154
+ force_sig_fault(SIGSEGV, code, (void *) address);
161155 return;
162156 }
163157 bad_page_fault(regs, address, SIGSEGV);
....@@ -168,7 +162,7 @@
168162 * us unable to handle the page fault gracefully.
169163 */
170164 out_of_memory:
171
- up_read(&mm->mmap_sem);
165
+ mmap_read_unlock(mm);
172166 if (!user_mode(regs))
173167 bad_page_fault(regs, address, SIGKILL);
174168 else
....@@ -176,13 +170,13 @@
176170 return;
177171
178172 do_sigbus:
179
- up_read(&mm->mmap_sem);
173
+ mmap_read_unlock(mm);
180174
181175 /* Send a sigbus, regardless of whether we were in kernel
182176 * or user mode.
183177 */
184178 current->thread.bad_vaddr = address;
185
- force_sig_fault(SIGBUS, BUS_ADRERR, (void *) address, current);
179
+ force_sig_fault(SIGBUS, BUS_ADRERR, (void *) address);
186180
187181 /* Kernel mode? Handle exceptions or die */
188182 if (!user_mode(regs))
....@@ -197,6 +191,8 @@
197191 struct mm_struct *act_mm = current->active_mm;
198192 int index = pgd_index(address);
199193 pgd_t *pgd, *pgd_k;
194
+ p4d_t *p4d, *p4d_k;
195
+ pud_t *pud, *pud_k;
200196 pmd_t *pmd, *pmd_k;
201197 pte_t *pte_k;
202198
....@@ -211,8 +207,18 @@
211207
212208 pgd_val(*pgd) = pgd_val(*pgd_k);
213209
214
- pmd = pmd_offset(pgd, address);
215
- pmd_k = pmd_offset(pgd_k, address);
210
+ p4d = p4d_offset(pgd, address);
211
+ p4d_k = p4d_offset(pgd_k, address);
212
+ if (!p4d_present(*p4d) || !p4d_present(*p4d_k))
213
+ goto bad_page_fault;
214
+
215
+ pud = pud_offset(p4d, address);
216
+ pud_k = pud_offset(p4d_k, address);
217
+ if (!pud_present(*pud) || !pud_present(*pud_k))
218
+ goto bad_page_fault;
219
+
220
+ pmd = pmd_offset(pud, address);
221
+ pmd_k = pmd_offset(pud_k, address);
216222 if (!pmd_present(*pmd) || !pmd_present(*pmd_k))
217223 goto bad_page_fault;
218224