.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* Page Fault Handling for ARC (TLB Miss / ProtV) |
---|
2 | 3 | * |
---|
3 | 4 | * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) |
---|
4 | | - * |
---|
5 | | - * This program is free software; you can redistribute it and/or modify |
---|
6 | | - * it under the terms of the GNU General Public License version 2 as |
---|
7 | | - * published by the Free Software Foundation. |
---|
8 | 5 | */ |
---|
9 | 6 | |
---|
10 | 7 | #include <linux/signal.h> |
---|
.. | .. |
---|
16 | 13 | #include <linux/kdebug.h> |
---|
17 | 14 | #include <linux/perf_event.h> |
---|
18 | 15 | #include <linux/mm_types.h> |
---|
19 | | -#include <asm/pgalloc.h> |
---|
20 | 16 | #include <asm/mmu.h> |
---|
21 | 17 | |
---|
22 | 18 | /* |
---|
.. | .. |
---|
33 | 29 | * with the 'reference' page table. |
---|
34 | 30 | */ |
---|
35 | 31 | pgd_t *pgd, *pgd_k; |
---|
| 32 | + p4d_t *p4d, *p4d_k; |
---|
36 | 33 | pud_t *pud, *pud_k; |
---|
37 | 34 | pmd_t *pmd, *pmd_k; |
---|
38 | 35 | |
---|
.. | .. |
---|
42 | 39 | if (!pgd_present(*pgd_k)) |
---|
43 | 40 | goto bad_area; |
---|
44 | 41 | |
---|
45 | | - pud = pud_offset(pgd, address); |
---|
46 | | - pud_k = pud_offset(pgd_k, address); |
---|
| 42 | + p4d = p4d_offset(pgd, address); |
---|
| 43 | + p4d_k = p4d_offset(pgd_k, address); |
---|
| 44 | + if (!p4d_present(*p4d_k)) |
---|
| 45 | + goto bad_area; |
---|
| 46 | + |
---|
| 47 | + pud = pud_offset(p4d, address); |
---|
| 48 | + pud_k = pud_offset(p4d_k, address); |
---|
47 | 49 | if (!pud_present(*pud_k)) |
---|
48 | 50 | goto bad_area; |
---|
49 | 51 | |
---|
.. | .. |
---|
66 | 68 | struct vm_area_struct *vma = NULL; |
---|
67 | 69 | struct task_struct *tsk = current; |
---|
68 | 70 | struct mm_struct *mm = tsk->mm; |
---|
69 | | - int si_code = SEGV_MAPERR; |
---|
70 | | - int ret; |
---|
71 | | - vm_fault_t fault; |
---|
72 | | - int write = regs->ecr_cause & ECR_C_PROTV_STORE; /* ST/EX */ |
---|
73 | | - unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; |
---|
| 71 | + int sig, si_code = SEGV_MAPERR; |
---|
| 72 | + unsigned int write = 0, exec = 0, mask; |
---|
| 73 | + vm_fault_t fault = VM_FAULT_SIGSEGV; /* handle_mm_fault() output */ |
---|
| 74 | + unsigned int flags; /* handle_mm_fault() input */ |
---|
74 | 75 | |
---|
75 | 76 | /* |
---|
76 | | - * We fault-in kernel-space virtual memory on-demand. The |
---|
77 | | - * 'reference' page table is init_mm.pgd. |
---|
78 | | - * |
---|
79 | 77 | * NOTE! We MUST NOT take any locks for this case. We may |
---|
80 | 78 | * be in an interrupt or a critical region, and should |
---|
81 | 79 | * only copy the information from the master page table, |
---|
82 | 80 | * nothing more. |
---|
83 | 81 | */ |
---|
84 | 82 | if (address >= VMALLOC_START && !user_mode(regs)) { |
---|
85 | | - ret = handle_kernel_vaddr_fault(address); |
---|
86 | | - if (unlikely(ret)) |
---|
| 83 | + if (unlikely(handle_kernel_vaddr_fault(address))) |
---|
87 | 84 | goto no_context; |
---|
88 | 85 | else |
---|
89 | 86 | return; |
---|
.. | .. |
---|
96 | 93 | if (faulthandler_disabled() || !mm) |
---|
97 | 94 | goto no_context; |
---|
98 | 95 | |
---|
| 96 | + if (regs->ecr_cause & ECR_C_PROTV_STORE) /* ST/EX */ |
---|
| 97 | + write = 1; |
---|
| 98 | + else if ((regs->ecr_vec == ECR_V_PROTV) && |
---|
| 99 | + (regs->ecr_cause == ECR_C_PROTV_INST_FETCH)) |
---|
| 100 | + exec = 1; |
---|
| 101 | + |
---|
| 102 | + flags = FAULT_FLAG_DEFAULT; |
---|
99 | 103 | if (user_mode(regs)) |
---|
100 | 104 | flags |= FAULT_FLAG_USER; |
---|
| 105 | + if (write) |
---|
| 106 | + flags |= FAULT_FLAG_WRITE; |
---|
| 107 | + |
---|
| 108 | + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); |
---|
101 | 109 | retry: |
---|
102 | | - down_read(&mm->mmap_sem); |
---|
| 110 | + mmap_read_lock(mm); |
---|
| 111 | + |
---|
103 | 112 | vma = find_vma(mm, address); |
---|
104 | 113 | if (!vma) |
---|
105 | 114 | goto bad_area; |
---|
106 | | - if (vma->vm_start <= address) |
---|
107 | | - goto good_area; |
---|
108 | | - if (!(vma->vm_flags & VM_GROWSDOWN)) |
---|
109 | | - goto bad_area; |
---|
110 | | - if (expand_stack(vma, address)) |
---|
111 | | - goto bad_area; |
---|
112 | | - |
---|
113 | | - /* |
---|
114 | | - * Ok, we have a good vm_area for this memory access, so |
---|
115 | | - * we can handle it.. |
---|
116 | | - */ |
---|
117 | | -good_area: |
---|
118 | | - si_code = SEGV_ACCERR; |
---|
119 | | - |
---|
120 | | - /* Handle protection violation, execute on heap or stack */ |
---|
121 | | - |
---|
122 | | - if ((regs->ecr_vec == ECR_V_PROTV) && |
---|
123 | | - (regs->ecr_cause == ECR_C_PROTV_INST_FETCH)) |
---|
124 | | - goto bad_area; |
---|
125 | | - |
---|
126 | | - if (write) { |
---|
127 | | - if (!(vma->vm_flags & VM_WRITE)) |
---|
128 | | - goto bad_area; |
---|
129 | | - flags |= FAULT_FLAG_WRITE; |
---|
130 | | - } else { |
---|
131 | | - if (!(vma->vm_flags & (VM_READ | VM_EXEC))) |
---|
| 115 | + if (unlikely(address < vma->vm_start)) { |
---|
| 116 | + if (!(vma->vm_flags & VM_GROWSDOWN) || expand_stack(vma, address)) |
---|
132 | 117 | goto bad_area; |
---|
133 | 118 | } |
---|
134 | 119 | |
---|
135 | 120 | /* |
---|
136 | | - * If for any reason at all we couldn't handle the fault, |
---|
137 | | - * make sure we exit gracefully rather than endlessly redo |
---|
138 | | - * the fault. |
---|
| 121 | + * vm_area is good, now check permissions for this memory access |
---|
139 | 122 | */ |
---|
140 | | - fault = handle_mm_fault(vma, address, flags); |
---|
| 123 | + mask = VM_READ; |
---|
| 124 | + if (write) |
---|
| 125 | + mask = VM_WRITE; |
---|
| 126 | + if (exec) |
---|
| 127 | + mask = VM_EXEC; |
---|
141 | 128 | |
---|
142 | | - if (unlikely(fatal_signal_pending(current))) { |
---|
143 | | - |
---|
144 | | - /* |
---|
145 | | - * if fault retry, mmap_sem already relinquished by core mm |
---|
146 | | - * so OK to return to user mode (with signal handled first) |
---|
147 | | - */ |
---|
148 | | - if (fault & VM_FAULT_RETRY) { |
---|
149 | | - if (!user_mode(regs)) |
---|
150 | | - goto no_context; |
---|
151 | | - return; |
---|
152 | | - } |
---|
| 129 | + if (!(vma->vm_flags & mask)) { |
---|
| 130 | + si_code = SEGV_ACCERR; |
---|
| 131 | + goto bad_area; |
---|
153 | 132 | } |
---|
154 | 133 | |
---|
155 | | - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); |
---|
| 134 | + fault = handle_mm_fault(vma, address, flags, regs); |
---|
156 | 135 | |
---|
157 | | - if (likely(!(fault & VM_FAULT_ERROR))) { |
---|
158 | | - if (flags & FAULT_FLAG_ALLOW_RETRY) { |
---|
159 | | - /* To avoid updating stats twice for retry case */ |
---|
160 | | - if (fault & VM_FAULT_MAJOR) { |
---|
161 | | - tsk->maj_flt++; |
---|
162 | | - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, |
---|
163 | | - regs, address); |
---|
164 | | - } else { |
---|
165 | | - tsk->min_flt++; |
---|
166 | | - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, |
---|
167 | | - regs, address); |
---|
168 | | - } |
---|
169 | | - |
---|
170 | | - if (fault & VM_FAULT_RETRY) { |
---|
171 | | - flags &= ~FAULT_FLAG_ALLOW_RETRY; |
---|
172 | | - flags |= FAULT_FLAG_TRIED; |
---|
173 | | - goto retry; |
---|
174 | | - } |
---|
175 | | - } |
---|
176 | | - |
---|
177 | | - /* Fault Handled Gracefully */ |
---|
178 | | - up_read(&mm->mmap_sem); |
---|
| 136 | + /* Quick path to respond to signals */ |
---|
| 137 | + if (fault_signal_pending(fault, regs)) { |
---|
| 138 | + if (!user_mode(regs)) |
---|
| 139 | + goto no_context; |
---|
179 | 140 | return; |
---|
180 | 141 | } |
---|
181 | 142 | |
---|
182 | | - if (fault & VM_FAULT_OOM) |
---|
183 | | - goto out_of_memory; |
---|
184 | | - else if (fault & VM_FAULT_SIGSEGV) |
---|
185 | | - goto bad_area; |
---|
186 | | - else if (fault & VM_FAULT_SIGBUS) |
---|
187 | | - goto do_sigbus; |
---|
188 | | - |
---|
189 | | - /* no man's land */ |
---|
190 | | - BUG(); |
---|
191 | | - |
---|
192 | 143 | /* |
---|
193 | | - * Something tried to access memory that isn't in our memory map.. |
---|
194 | | - * Fix it, but check if it's kernel or user first.. |
---|
| 144 | + * Fault retry nuances, mmap_lock already relinquished by core mm |
---|
195 | 145 | */ |
---|
| 146 | + if (unlikely((fault & VM_FAULT_RETRY) && |
---|
| 147 | + (flags & FAULT_FLAG_ALLOW_RETRY))) { |
---|
| 148 | + flags |= FAULT_FLAG_TRIED; |
---|
| 149 | + goto retry; |
---|
| 150 | + } |
---|
| 151 | + |
---|
196 | 152 | bad_area: |
---|
197 | | - up_read(&mm->mmap_sem); |
---|
| 153 | + mmap_read_unlock(mm); |
---|
198 | 154 | |
---|
199 | | - /* User mode accesses just cause a SIGSEGV */ |
---|
200 | | - if (user_mode(regs)) { |
---|
201 | | - tsk->thread.fault_address = address; |
---|
202 | | - force_sig_fault(SIGSEGV, si_code, (void __user *)address, tsk); |
---|
203 | | - return; |
---|
204 | | - } |
---|
205 | | - |
---|
206 | | -no_context: |
---|
207 | | - /* Are we prepared to handle this kernel fault? |
---|
208 | | - * |
---|
209 | | - * (The kernel has valid exception-points in the source |
---|
210 | | - * when it accesses user-memory. When it fails in one |
---|
211 | | - * of those points, we find it in a table and do a jump |
---|
212 | | - * to some fixup code that loads an appropriate error |
---|
213 | | - * code) |
---|
| 155 | + /* |
---|
| 156 | + * Major/minor page fault accounting |
---|
| 157 | + * (in case of retry we only land here once) |
---|
214 | 158 | */ |
---|
215 | | - if (fixup_exception(regs)) |
---|
| 159 | + if (likely(!(fault & VM_FAULT_ERROR))) |
---|
| 160 | + /* Normal return path: fault Handled Gracefully */ |
---|
216 | 161 | return; |
---|
217 | | - |
---|
218 | | - die("Oops", regs, address); |
---|
219 | | - |
---|
220 | | -out_of_memory: |
---|
221 | | - up_read(&mm->mmap_sem); |
---|
222 | | - |
---|
223 | | - if (user_mode(regs)) { |
---|
224 | | - pagefault_out_of_memory(); |
---|
225 | | - return; |
---|
226 | | - } |
---|
227 | | - |
---|
228 | | - goto no_context; |
---|
229 | | - |
---|
230 | | -do_sigbus: |
---|
231 | | - up_read(&mm->mmap_sem); |
---|
232 | 162 | |
---|
233 | 163 | if (!user_mode(regs)) |
---|
234 | 164 | goto no_context; |
---|
235 | 165 | |
---|
| 166 | + if (fault & VM_FAULT_OOM) { |
---|
| 167 | + pagefault_out_of_memory(); |
---|
| 168 | + return; |
---|
| 169 | + } |
---|
| 170 | + |
---|
| 171 | + if (fault & VM_FAULT_SIGBUS) { |
---|
| 172 | + sig = SIGBUS; |
---|
| 173 | + si_code = BUS_ADRERR; |
---|
| 174 | + } |
---|
| 175 | + else { |
---|
| 176 | + sig = SIGSEGV; |
---|
| 177 | + } |
---|
| 178 | + |
---|
236 | 179 | tsk->thread.fault_address = address; |
---|
237 | | - force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address, tsk); |
---|
| 180 | + force_sig_fault(sig, si_code, (void __user *)address); |
---|
| 181 | + return; |
---|
| 182 | + |
---|
| 183 | +no_context: |
---|
| 184 | + if (fixup_exception(regs)) |
---|
| 185 | + return; |
---|
| 186 | + |
---|
| 187 | + die("Oops", regs, address); |
---|
238 | 188 | } |
---|