hc
2024-12-19 9370bb92b2d16684ee45cf24e879c93c509162da
kernel/arch/arc/mm/fault.c
....@@ -1,10 +1,7 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /* Page Fault Handling for ARC (TLB Miss / ProtV)
23 *
34 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
4
- *
5
- * This program is free software; you can redistribute it and/or modify
6
- * it under the terms of the GNU General Public License version 2 as
7
- * published by the Free Software Foundation.
85 */
96
107 #include <linux/signal.h>
....@@ -16,7 +13,6 @@
1613 #include <linux/kdebug.h>
1714 #include <linux/perf_event.h>
1815 #include <linux/mm_types.h>
19
-#include <asm/pgalloc.h>
2016 #include <asm/mmu.h>
2117
2218 /*
....@@ -33,6 +29,7 @@
3329 * with the 'reference' page table.
3430 */
3531 pgd_t *pgd, *pgd_k;
32
+ p4d_t *p4d, *p4d_k;
3633 pud_t *pud, *pud_k;
3734 pmd_t *pmd, *pmd_k;
3835
....@@ -42,8 +39,13 @@
4239 if (!pgd_present(*pgd_k))
4340 goto bad_area;
4441
45
- pud = pud_offset(pgd, address);
46
- pud_k = pud_offset(pgd_k, address);
42
+ p4d = p4d_offset(pgd, address);
43
+ p4d_k = p4d_offset(pgd_k, address);
44
+ if (!p4d_present(*p4d_k))
45
+ goto bad_area;
46
+
47
+ pud = pud_offset(p4d, address);
48
+ pud_k = pud_offset(p4d_k, address);
4749 if (!pud_present(*pud_k))
4850 goto bad_area;
4951
....@@ -66,24 +68,19 @@
6668 struct vm_area_struct *vma = NULL;
6769 struct task_struct *tsk = current;
6870 struct mm_struct *mm = tsk->mm;
69
- int si_code = SEGV_MAPERR;
70
- int ret;
71
- vm_fault_t fault;
72
- int write = regs->ecr_cause & ECR_C_PROTV_STORE; /* ST/EX */
73
- unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
71
+ int sig, si_code = SEGV_MAPERR;
72
+ unsigned int write = 0, exec = 0, mask;
73
+ vm_fault_t fault = VM_FAULT_SIGSEGV; /* handle_mm_fault() output */
74
+ unsigned int flags; /* handle_mm_fault() input */
7475
7576 /*
76
- * We fault-in kernel-space virtual memory on-demand. The
77
- * 'reference' page table is init_mm.pgd.
78
- *
7977 * NOTE! We MUST NOT take any locks for this case. We may
8078 * be in an interrupt or a critical region, and should
8179 * only copy the information from the master page table,
8280 * nothing more.
8381 */
8482 if (address >= VMALLOC_START && !user_mode(regs)) {
85
- ret = handle_kernel_vaddr_fault(address);
86
- if (unlikely(ret))
83
+ if (unlikely(handle_kernel_vaddr_fault(address)))
8784 goto no_context;
8885 else
8986 return;
....@@ -96,143 +93,96 @@
9693 if (faulthandler_disabled() || !mm)
9794 goto no_context;
9895
96
+ if (regs->ecr_cause & ECR_C_PROTV_STORE) /* ST/EX */
97
+ write = 1;
98
+ else if ((regs->ecr_vec == ECR_V_PROTV) &&
99
+ (regs->ecr_cause == ECR_C_PROTV_INST_FETCH))
100
+ exec = 1;
101
+
102
+ flags = FAULT_FLAG_DEFAULT;
99103 if (user_mode(regs))
100104 flags |= FAULT_FLAG_USER;
105
+ if (write)
106
+ flags |= FAULT_FLAG_WRITE;
107
+
108
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
101109 retry:
102
- down_read(&mm->mmap_sem);
110
+ mmap_read_lock(mm);
111
+
103112 vma = find_vma(mm, address);
104113 if (!vma)
105114 goto bad_area;
106
- if (vma->vm_start <= address)
107
- goto good_area;
108
- if (!(vma->vm_flags & VM_GROWSDOWN))
109
- goto bad_area;
110
- if (expand_stack(vma, address))
111
- goto bad_area;
112
-
113
- /*
114
- * Ok, we have a good vm_area for this memory access, so
115
- * we can handle it..
116
- */
117
-good_area:
118
- si_code = SEGV_ACCERR;
119
-
120
- /* Handle protection violation, execute on heap or stack */
121
-
122
- if ((regs->ecr_vec == ECR_V_PROTV) &&
123
- (regs->ecr_cause == ECR_C_PROTV_INST_FETCH))
124
- goto bad_area;
125
-
126
- if (write) {
127
- if (!(vma->vm_flags & VM_WRITE))
128
- goto bad_area;
129
- flags |= FAULT_FLAG_WRITE;
130
- } else {
131
- if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
115
+ if (unlikely(address < vma->vm_start)) {
116
+ if (!(vma->vm_flags & VM_GROWSDOWN) || expand_stack(vma, address))
132117 goto bad_area;
133118 }
134119
135120 /*
136
- * If for any reason at all we couldn't handle the fault,
137
- * make sure we exit gracefully rather than endlessly redo
138
- * the fault.
121
+ * vm_area is good, now check permissions for this memory access
139122 */
140
- fault = handle_mm_fault(vma, address, flags);
123
+ mask = VM_READ;
124
+ if (write)
125
+ mask = VM_WRITE;
126
+ if (exec)
127
+ mask = VM_EXEC;
141128
142
- if (unlikely(fatal_signal_pending(current))) {
143
-
144
- /*
145
- * if fault retry, mmap_sem already relinquished by core mm
146
- * so OK to return to user mode (with signal handled first)
147
- */
148
- if (fault & VM_FAULT_RETRY) {
149
- if (!user_mode(regs))
150
- goto no_context;
151
- return;
152
- }
129
+ if (!(vma->vm_flags & mask)) {
130
+ si_code = SEGV_ACCERR;
131
+ goto bad_area;
153132 }
154133
155
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
134
+ fault = handle_mm_fault(vma, address, flags, regs);
156135
157
- if (likely(!(fault & VM_FAULT_ERROR))) {
158
- if (flags & FAULT_FLAG_ALLOW_RETRY) {
159
- /* To avoid updating stats twice for retry case */
160
- if (fault & VM_FAULT_MAJOR) {
161
- tsk->maj_flt++;
162
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
163
- regs, address);
164
- } else {
165
- tsk->min_flt++;
166
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
167
- regs, address);
168
- }
169
-
170
- if (fault & VM_FAULT_RETRY) {
171
- flags &= ~FAULT_FLAG_ALLOW_RETRY;
172
- flags |= FAULT_FLAG_TRIED;
173
- goto retry;
174
- }
175
- }
176
-
177
- /* Fault Handled Gracefully */
178
- up_read(&mm->mmap_sem);
136
+ /* Quick path to respond to signals */
137
+ if (fault_signal_pending(fault, regs)) {
138
+ if (!user_mode(regs))
139
+ goto no_context;
179140 return;
180141 }
181142
182
- if (fault & VM_FAULT_OOM)
183
- goto out_of_memory;
184
- else if (fault & VM_FAULT_SIGSEGV)
185
- goto bad_area;
186
- else if (fault & VM_FAULT_SIGBUS)
187
- goto do_sigbus;
188
-
189
- /* no man's land */
190
- BUG();
191
-
192143 /*
193
- * Something tried to access memory that isn't in our memory map..
194
- * Fix it, but check if it's kernel or user first..
144
+ * Fault retry nuances, mmap_lock already relinquished by core mm
195145 */
146
+ if (unlikely((fault & VM_FAULT_RETRY) &&
147
+ (flags & FAULT_FLAG_ALLOW_RETRY))) {
148
+ flags |= FAULT_FLAG_TRIED;
149
+ goto retry;
150
+ }
151
+
196152 bad_area:
197
- up_read(&mm->mmap_sem);
153
+ mmap_read_unlock(mm);
198154
199
- /* User mode accesses just cause a SIGSEGV */
200
- if (user_mode(regs)) {
201
- tsk->thread.fault_address = address;
202
- force_sig_fault(SIGSEGV, si_code, (void __user *)address, tsk);
203
- return;
204
- }
205
-
206
-no_context:
207
- /* Are we prepared to handle this kernel fault?
208
- *
209
- * (The kernel has valid exception-points in the source
210
- * when it accesses user-memory. When it fails in one
211
- * of those points, we find it in a table and do a jump
212
- * to some fixup code that loads an appropriate error
213
- * code)
155
+ /*
156
+ * Major/minor page fault accounting
157
+ * (in case of retry we only land here once)
214158 */
215
- if (fixup_exception(regs))
159
+ if (likely(!(fault & VM_FAULT_ERROR)))
160
+ /* Normal return path: fault Handled Gracefully */
216161 return;
217
-
218
- die("Oops", regs, address);
219
-
220
-out_of_memory:
221
- up_read(&mm->mmap_sem);
222
-
223
- if (user_mode(regs)) {
224
- pagefault_out_of_memory();
225
- return;
226
- }
227
-
228
- goto no_context;
229
-
230
-do_sigbus:
231
- up_read(&mm->mmap_sem);
232162
233163 if (!user_mode(regs))
234164 goto no_context;
235165
166
+ if (fault & VM_FAULT_OOM) {
167
+ pagefault_out_of_memory();
168
+ return;
169
+ }
170
+
171
+ if (fault & VM_FAULT_SIGBUS) {
172
+ sig = SIGBUS;
173
+ si_code = BUS_ADRERR;
174
+ }
175
+ else {
176
+ sig = SIGSEGV;
177
+ }
178
+
236179 tsk->thread.fault_address = address;
237
- force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address, tsk);
180
+ force_sig_fault(sig, si_code, (void __user *)address);
181
+ return;
182
+
183
+no_context:
184
+ if (fixup_exception(regs))
185
+ return;
186
+
187
+ die("Oops", regs, address);
238188 }