.. | .. |
---|
25 | 25 | #include <linux/uaccess.h> |
---|
26 | 26 | |
---|
27 | 27 | #include <asm/page.h> |
---|
28 | | -#include <asm/pgtable.h> |
---|
29 | 28 | #include <asm/openprom.h> |
---|
30 | 29 | #include <asm/oplib.h> |
---|
31 | 30 | #include <asm/setup.h> |
---|
.. | .. |
---|
131 | 130 | show_signal_msg(regs, sig, code, |
---|
132 | 131 | addr, current); |
---|
133 | 132 | |
---|
134 | | - force_sig_fault(sig, code, (void __user *) addr, 0, current); |
---|
| 133 | + force_sig_fault(sig, code, (void __user *) addr, 0); |
---|
135 | 134 | } |
---|
136 | 135 | |
---|
137 | 136 | static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault) |
---|
.. | .. |
---|
168 | 167 | int from_user = !(regs->psr & PSR_PS); |
---|
169 | 168 | int code; |
---|
170 | 169 | vm_fault_t fault; |
---|
171 | | - unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; |
---|
| 170 | + unsigned int flags = FAULT_FLAG_DEFAULT; |
---|
172 | 171 | |
---|
173 | 172 | if (text_fault) |
---|
174 | 173 | address = regs->pc; |
---|
.. | .. |
---|
196 | 195 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); |
---|
197 | 196 | |
---|
198 | 197 | retry: |
---|
199 | | - down_read(&mm->mmap_sem); |
---|
| 198 | + mmap_read_lock(mm); |
---|
200 | 199 | |
---|
201 | 200 | if (!from_user && address >= PAGE_OFFSET) |
---|
202 | 201 | goto bad_area; |
---|
.. | .. |
---|
235 | 234 | * make sure we exit gracefully rather than endlessly redo |
---|
236 | 235 | * the fault. |
---|
237 | 236 | */ |
---|
238 | | - fault = handle_mm_fault(vma, address, flags); |
---|
| 237 | + fault = handle_mm_fault(vma, address, flags, regs); |
---|
239 | 238 | |
---|
240 | | - if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) |
---|
| 239 | + if (fault_signal_pending(fault, regs)) |
---|
241 | 240 | return; |
---|
242 | 241 | |
---|
243 | 242 | if (unlikely(fault & VM_FAULT_ERROR)) { |
---|
.. | .. |
---|
251 | 250 | } |
---|
252 | 251 | |
---|
253 | 252 | if (flags & FAULT_FLAG_ALLOW_RETRY) { |
---|
254 | | - if (fault & VM_FAULT_MAJOR) { |
---|
255 | | - current->maj_flt++; |
---|
256 | | - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, |
---|
257 | | - 1, regs, address); |
---|
258 | | - } else { |
---|
259 | | - current->min_flt++; |
---|
260 | | - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, |
---|
261 | | - 1, regs, address); |
---|
262 | | - } |
---|
263 | 253 | if (fault & VM_FAULT_RETRY) { |
---|
264 | | - flags &= ~FAULT_FLAG_ALLOW_RETRY; |
---|
265 | 254 | flags |= FAULT_FLAG_TRIED; |
---|
266 | 255 | |
---|
267 | | - /* No need to up_read(&mm->mmap_sem) as we would |
---|
| 256 | + /* No need to mmap_read_unlock(mm) as we would |
---|
268 | 257 | * have already released it in __lock_page_or_retry |
---|
269 | 258 | * in mm/filemap.c. |
---|
270 | 259 | */ |
---|
.. | .. |
---|
273 | 262 | } |
---|
274 | 263 | } |
---|
275 | 264 | |
---|
276 | | - up_read(&mm->mmap_sem); |
---|
| 265 | + mmap_read_unlock(mm); |
---|
277 | 266 | return; |
---|
278 | 267 | |
---|
279 | 268 | /* |
---|
.. | .. |
---|
281 | 270 | * Fix it, but check if it's kernel or user first.. |
---|
282 | 271 | */ |
---|
283 | 272 | bad_area: |
---|
284 | | - up_read(&mm->mmap_sem); |
---|
| 273 | + mmap_read_unlock(mm); |
---|
285 | 274 | |
---|
286 | 275 | bad_area_nosemaphore: |
---|
287 | 276 | /* User mode accesses just cause a SIGSEGV */ |
---|
.. | .. |
---|
299 | 288 | if (fixup > 10) { |
---|
300 | 289 | extern const unsigned int __memset_start[]; |
---|
301 | 290 | extern const unsigned int __memset_end[]; |
---|
302 | | - extern const unsigned int __csum_partial_copy_start[]; |
---|
303 | | - extern const unsigned int __csum_partial_copy_end[]; |
---|
304 | 291 | |
---|
305 | 292 | #ifdef DEBUG_EXCEPTIONS |
---|
306 | 293 | printk("Exception: PC<%08lx> faddr<%08lx>\n", |
---|
.. | .. |
---|
309 | 296 | regs->pc, fixup, g2); |
---|
310 | 297 | #endif |
---|
311 | 298 | if ((regs->pc >= (unsigned long)__memset_start && |
---|
312 | | - regs->pc < (unsigned long)__memset_end) || |
---|
313 | | - (regs->pc >= (unsigned long)__csum_partial_copy_start && |
---|
314 | | - regs->pc < (unsigned long)__csum_partial_copy_end)) { |
---|
| 299 | + regs->pc < (unsigned long)__memset_end)) { |
---|
315 | 300 | regs->u_regs[UREG_I4] = address; |
---|
316 | 301 | regs->u_regs[UREG_I5] = regs->pc; |
---|
317 | 302 | } |
---|
.. | .. |
---|
330 | 315 | * us unable to handle the page fault gracefully. |
---|
331 | 316 | */ |
---|
332 | 317 | out_of_memory: |
---|
333 | | - up_read(&mm->mmap_sem); |
---|
| 318 | + mmap_read_unlock(mm); |
---|
334 | 319 | if (from_user) { |
---|
335 | 320 | pagefault_out_of_memory(); |
---|
336 | 321 | return; |
---|
.. | .. |
---|
338 | 323 | goto no_context; |
---|
339 | 324 | |
---|
340 | 325 | do_sigbus: |
---|
341 | | - up_read(&mm->mmap_sem); |
---|
| 326 | + mmap_read_unlock(mm); |
---|
342 | 327 | do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, text_fault); |
---|
343 | 328 | if (!from_user) |
---|
344 | 329 | goto no_context; |
---|
.. | .. |
---|
351 | 336 | */ |
---|
352 | 337 | int offset = pgd_index(address); |
---|
353 | 338 | pgd_t *pgd, *pgd_k; |
---|
| 339 | + p4d_t *p4d, *p4d_k; |
---|
| 340 | + pud_t *pud, *pud_k; |
---|
354 | 341 | pmd_t *pmd, *pmd_k; |
---|
355 | 342 | |
---|
356 | 343 | pgd = tsk->active_mm->pgd + offset; |
---|
.. | .. |
---|
363 | 350 | return; |
---|
364 | 351 | } |
---|
365 | 352 | |
---|
366 | | - pmd = pmd_offset(pgd, address); |
---|
367 | | - pmd_k = pmd_offset(pgd_k, address); |
---|
| 353 | + p4d = p4d_offset(pgd, address); |
---|
| 354 | + pud = pud_offset(p4d, address); |
---|
| 355 | + pmd = pmd_offset(pud, address); |
---|
| 356 | + |
---|
| 357 | + p4d_k = p4d_offset(pgd_k, address); |
---|
| 358 | + pud_k = pud_offset(p4d_k, address); |
---|
| 359 | + pmd_k = pmd_offset(pud_k, address); |
---|
368 | 360 | |
---|
369 | 361 | if (pmd_present(*pmd) || !pmd_present(*pmd_k)) |
---|
370 | 362 | goto bad_area_nosemaphore; |
---|
.. | .. |
---|
385 | 377 | |
---|
386 | 378 | code = SEGV_MAPERR; |
---|
387 | 379 | |
---|
388 | | - down_read(&mm->mmap_sem); |
---|
| 380 | + mmap_read_lock(mm); |
---|
389 | 381 | vma = find_vma(mm, address); |
---|
390 | 382 | if (!vma) |
---|
391 | 383 | goto bad_area; |
---|
.. | .. |
---|
405 | 397 | if (!(vma->vm_flags & (VM_READ | VM_EXEC))) |
---|
406 | 398 | goto bad_area; |
---|
407 | 399 | } |
---|
408 | | - switch (handle_mm_fault(vma, address, flags)) { |
---|
| 400 | + switch (handle_mm_fault(vma, address, flags, NULL)) { |
---|
409 | 401 | case VM_FAULT_SIGBUS: |
---|
410 | 402 | case VM_FAULT_OOM: |
---|
411 | 403 | goto do_sigbus; |
---|
412 | 404 | } |
---|
413 | | - up_read(&mm->mmap_sem); |
---|
| 405 | + mmap_read_unlock(mm); |
---|
414 | 406 | return; |
---|
415 | 407 | bad_area: |
---|
416 | | - up_read(&mm->mmap_sem); |
---|
| 408 | + mmap_read_unlock(mm); |
---|
417 | 409 | __do_fault_siginfo(code, SIGSEGV, tsk->thread.kregs, address); |
---|
418 | 410 | return; |
---|
419 | 411 | |
---|
420 | 412 | do_sigbus: |
---|
421 | | - up_read(&mm->mmap_sem); |
---|
| 413 | + mmap_read_unlock(mm); |
---|
422 | 414 | __do_fault_siginfo(BUS_ADRERR, SIGBUS, tsk->thread.kregs, address); |
---|
423 | 415 | } |
---|
424 | 416 | |
---|
425 | 417 | static void check_stack_aligned(unsigned long sp) |
---|
426 | 418 | { |
---|
427 | 419 | if (sp & 0x7UL) |
---|
428 | | - force_sig(SIGILL, current); |
---|
| 420 | + force_sig(SIGILL); |
---|
429 | 421 | } |
---|
430 | 422 | |
---|
431 | 423 | void window_overflow_fault(void) |
---|