.. | .. |
---|
| 1 | +/* SPDX-License-Identifier: GPL-2.0-only */ |
---|
1 | 2 | /* |
---|
2 | 3 | * Copyright (C) 2012 ARM Ltd. |
---|
3 | | - * |
---|
4 | | - * This program is free software; you can redistribute it and/or modify |
---|
5 | | - * it under the terms of the GNU General Public License version 2 as |
---|
6 | | - * published by the Free Software Foundation. |
---|
7 | | - * |
---|
8 | | - * This program is distributed in the hope that it will be useful, |
---|
9 | | - * but WITHOUT ANY WARRANTY; without even the implied warranty of |
---|
10 | | - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
---|
11 | | - * GNU General Public License for more details. |
---|
12 | | - * |
---|
13 | | - * You should have received a copy of the GNU General Public License |
---|
14 | | - * along with this program. If not, see <http://www.gnu.org/licenses/>. |
---|
15 | 4 | */ |
---|
16 | 5 | #ifndef __ASM_PGTABLE_H |
---|
17 | 6 | #define __ASM_PGTABLE_H |
---|
.. | .. |
---|
20 | 9 | #include <asm/proc-fns.h> |
---|
21 | 10 | |
---|
22 | 11 | #include <asm/memory.h> |
---|
| 12 | +#include <asm/mte.h> |
---|
23 | 13 | #include <asm/pgtable-hwdef.h> |
---|
24 | 14 | #include <asm/pgtable-prot.h> |
---|
| 15 | +#include <asm/tlbflush.h> |
---|
25 | 16 | |
---|
26 | 17 | /* |
---|
27 | 18 | * VMALLOC range. |
---|
28 | 19 | * |
---|
29 | 20 | * VMALLOC_START: beginning of the kernel vmalloc space |
---|
30 | | - * VMALLOC_END: extends to the available space below vmmemmap, PCI I/O space |
---|
| 21 | + * VMALLOC_END: extends to the available space below vmemmap, PCI I/O space |
---|
31 | 22 | * and fixed mappings |
---|
32 | 23 | */ |
---|
33 | 24 | #define VMALLOC_START (MODULES_END) |
---|
34 | | -#define VMALLOC_END (PAGE_OFFSET - PUD_SIZE - VMEMMAP_SIZE - SZ_64K) |
---|
| 25 | +#define VMALLOC_END (- PUD_SIZE - VMEMMAP_SIZE - SZ_64K) |
---|
35 | 26 | |
---|
36 | 27 | #define vmemmap ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT)) |
---|
37 | 28 | |
---|
.. | .. |
---|
45 | 36 | #include <linux/mm_types.h> |
---|
46 | 37 | #include <linux/sched.h> |
---|
47 | 38 | |
---|
48 | | -extern void __pte_error(const char *file, int line, unsigned long val); |
---|
49 | | -extern void __pmd_error(const char *file, int line, unsigned long val); |
---|
50 | | -extern void __pud_error(const char *file, int line, unsigned long val); |
---|
51 | | -extern void __pgd_error(const char *file, int line, unsigned long val); |
---|
| 39 | +#ifdef CONFIG_TRANSPARENT_HUGEPAGE |
---|
| 40 | +#define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE |
---|
| 41 | + |
---|
| 42 | +/* Set stride and tlb_level in flush_*_tlb_range */ |
---|
| 43 | +#define flush_pmd_tlb_range(vma, addr, end) \ |
---|
| 44 | + __flush_tlb_range(vma, addr, end, PMD_SIZE, false, 2) |
---|
| 45 | +#define flush_pud_tlb_range(vma, addr, end) \ |
---|
| 46 | + __flush_tlb_range(vma, addr, end, PUD_SIZE, false, 1) |
---|
| 47 | +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
---|
| 48 | + |
---|
| 49 | +/* |
---|
| 50 | + * Outside of a few very special situations (e.g. hibernation), we always |
---|
| 51 | + * use broadcast TLB invalidation instructions, therefore a spurious page |
---|
| 52 | + * fault on one CPU which has been handled concurrently by another CPU |
---|
| 53 | + * does not need to perform additional invalidation. |
---|
| 54 | + */ |
---|
| 55 | +#define flush_tlb_fix_spurious_fault(vma, address) do { } while (0) |
---|
52 | 56 | |
---|
53 | 57 | /* |
---|
54 | 58 | * ZERO_PAGE is a global shared page that is always zero: used |
---|
.. | .. |
---|
57 | 61 | extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; |
---|
58 | 62 | #define ZERO_PAGE(vaddr) phys_to_page(__pa_symbol(empty_zero_page)) |
---|
59 | 63 | |
---|
60 | | -#define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte_val(pte)) |
---|
| 64 | +#define pte_ERROR(e) \ |
---|
| 65 | + pr_err("%s:%d: bad pte %016llx.\n", __FILE__, __LINE__, pte_val(e)) |
---|
61 | 66 | |
---|
62 | 67 | /* |
---|
63 | 68 | * Macros to convert between a physical address and its placement in a |
---|
.. | .. |
---|
95 | 100 | #define pte_write(pte) (!!(pte_val(pte) & PTE_WRITE)) |
---|
96 | 101 | #define pte_user_exec(pte) (!(pte_val(pte) & PTE_UXN)) |
---|
97 | 102 | #define pte_cont(pte) (!!(pte_val(pte) & PTE_CONT)) |
---|
| 103 | +#define pte_devmap(pte) (!!(pte_val(pte) & PTE_DEVMAP)) |
---|
| 104 | +#define pte_tagged(pte) ((pte_val(pte) & PTE_ATTRINDX_MASK) == \ |
---|
| 105 | + PTE_ATTRINDX(MT_NORMAL_TAGGED)) |
---|
98 | 106 | |
---|
99 | 107 | #define pte_cont_addr_end(addr, end) \ |
---|
100 | 108 | ({ unsigned long __boundary = ((addr) + CONT_PTE_SIZE) & CONT_PTE_MASK; \ |
---|
.. | .. |
---|
149 | 157 | { |
---|
150 | 158 | pte_val(pte) |= pgprot_val(prot); |
---|
151 | 159 | return pte; |
---|
| 160 | +} |
---|
| 161 | + |
---|
| 162 | +static inline pmd_t clear_pmd_bit(pmd_t pmd, pgprot_t prot) |
---|
| 163 | +{ |
---|
| 164 | + pmd_val(pmd) &= ~pgprot_val(prot); |
---|
| 165 | + return pmd; |
---|
| 166 | +} |
---|
| 167 | + |
---|
| 168 | +static inline pmd_t set_pmd_bit(pmd_t pmd, pgprot_t prot) |
---|
| 169 | +{ |
---|
| 170 | + pmd_val(pmd) |= pgprot_val(prot); |
---|
| 171 | + return pmd; |
---|
152 | 172 | } |
---|
153 | 173 | |
---|
154 | 174 | static inline pte_t pte_mkwrite(pte_t pte) |
---|
.. | .. |
---|
226 | 246 | return __pmd(pmd_val(pmd) | PMD_SECT_CONT); |
---|
227 | 247 | } |
---|
228 | 248 | |
---|
| 249 | +static inline pte_t pte_mkdevmap(pte_t pte) |
---|
| 250 | +{ |
---|
| 251 | + return set_pte_bit(pte, __pgprot(PTE_DEVMAP | PTE_SPECIAL)); |
---|
| 252 | +} |
---|
| 253 | + |
---|
229 | 254 | static inline void set_pte(pte_t *ptep, pte_t pte) |
---|
230 | 255 | { |
---|
231 | 256 | WRITE_ONCE(*ptep, pte); |
---|
.. | .. |
---|
257 | 282 | * |
---|
258 | 283 | * PTE_DIRTY || (PTE_WRITE && !PTE_RDONLY) |
---|
259 | 284 | */ |
---|
260 | | -static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, |
---|
261 | | - pte_t *ptep, pte_t pte) |
---|
| 285 | + |
---|
| 286 | +static inline void __check_racy_pte_update(struct mm_struct *mm, pte_t *ptep, |
---|
| 287 | + pte_t pte) |
---|
262 | 288 | { |
---|
263 | 289 | pte_t old_pte; |
---|
264 | 290 | |
---|
| 291 | + if (!IS_ENABLED(CONFIG_DEBUG_VM)) |
---|
| 292 | + return; |
---|
| 293 | + |
---|
| 294 | + old_pte = READ_ONCE(*ptep); |
---|
| 295 | + |
---|
| 296 | + if (!pte_valid(old_pte) || !pte_valid(pte)) |
---|
| 297 | + return; |
---|
| 298 | + if (mm != current->active_mm && atomic_read(&mm->mm_users) <= 1) |
---|
| 299 | + return; |
---|
| 300 | + |
---|
| 301 | + /* |
---|
| 302 | + * Check for potential race with hardware updates of the pte |
---|
| 303 | + * (ptep_set_access_flags safely changes valid ptes without going |
---|
| 304 | + * through an invalid entry). |
---|
| 305 | + */ |
---|
| 306 | + VM_WARN_ONCE(!pte_young(pte), |
---|
| 307 | + "%s: racy access flag clearing: 0x%016llx -> 0x%016llx", |
---|
| 308 | + __func__, pte_val(old_pte), pte_val(pte)); |
---|
| 309 | + VM_WARN_ONCE(pte_write(old_pte) && !pte_dirty(pte), |
---|
| 310 | + "%s: racy dirty state clearing: 0x%016llx -> 0x%016llx", |
---|
| 311 | + __func__, pte_val(old_pte), pte_val(pte)); |
---|
| 312 | +} |
---|
| 313 | + |
---|
| 314 | +static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, |
---|
| 315 | + pte_t *ptep, pte_t pte) |
---|
| 316 | +{ |
---|
265 | 317 | if (pte_present(pte) && pte_user_exec(pte) && !pte_special(pte)) |
---|
266 | 318 | __sync_icache_dcache(pte); |
---|
267 | 319 | |
---|
268 | | - /* |
---|
269 | | - * If the existing pte is valid, check for potential race with |
---|
270 | | - * hardware updates of the pte (ptep_set_access_flags safely changes |
---|
271 | | - * valid ptes without going through an invalid entry). |
---|
272 | | - */ |
---|
273 | | - old_pte = READ_ONCE(*ptep); |
---|
274 | | - if (IS_ENABLED(CONFIG_DEBUG_VM) && pte_valid(old_pte) && pte_valid(pte) && |
---|
275 | | - (mm == current->active_mm || atomic_read(&mm->mm_users) > 1)) { |
---|
276 | | - VM_WARN_ONCE(!pte_young(pte), |
---|
277 | | - "%s: racy access flag clearing: 0x%016llx -> 0x%016llx", |
---|
278 | | - __func__, pte_val(old_pte), pte_val(pte)); |
---|
279 | | - VM_WARN_ONCE(pte_write(old_pte) && !pte_dirty(pte), |
---|
280 | | - "%s: racy dirty state clearing: 0x%016llx -> 0x%016llx", |
---|
281 | | - __func__, pte_val(old_pte), pte_val(pte)); |
---|
282 | | - } |
---|
| 320 | + if (system_supports_mte() && |
---|
| 321 | + pte_present(pte) && pte_tagged(pte) && !pte_special(pte)) |
---|
| 322 | + mte_sync_tags(ptep, pte); |
---|
| 323 | + |
---|
| 324 | + __check_racy_pte_update(mm, ptep, pte); |
---|
283 | 325 | |
---|
284 | 326 | set_pte(ptep, pte); |
---|
285 | 327 | } |
---|
.. | .. |
---|
287 | 329 | /* |
---|
288 | 330 | * Huge pte definitions. |
---|
289 | 331 | */ |
---|
290 | | -#define pte_huge(pte) (!(pte_val(pte) & PTE_TABLE_BIT)) |
---|
291 | 332 | #define pte_mkhuge(pte) (__pte(pte_val(pte) & ~PTE_TABLE_BIT)) |
---|
292 | 333 | |
---|
293 | 334 | /* |
---|
.. | .. |
---|
304 | 345 | return __pte(pgd_val(pgd)); |
---|
305 | 346 | } |
---|
306 | 347 | |
---|
| 348 | +static inline pte_t p4d_pte(p4d_t p4d) |
---|
| 349 | +{ |
---|
| 350 | + return __pte(p4d_val(p4d)); |
---|
| 351 | +} |
---|
| 352 | + |
---|
307 | 353 | static inline pte_t pud_pte(pud_t pud) |
---|
308 | 354 | { |
---|
309 | 355 | return __pte(pud_val(pud)); |
---|
| 356 | +} |
---|
| 357 | + |
---|
| 358 | +static inline pud_t pte_pud(pte_t pte) |
---|
| 359 | +{ |
---|
| 360 | + return __pud(pte_val(pte)); |
---|
310 | 361 | } |
---|
311 | 362 | |
---|
312 | 363 | static inline pmd_t pud_pmd(pud_t pud) |
---|
.. | .. |
---|
324 | 375 | return __pmd(pte_val(pte)); |
---|
325 | 376 | } |
---|
326 | 377 | |
---|
327 | | -static inline pgprot_t mk_sect_prot(pgprot_t prot) |
---|
| 378 | +static inline pgprot_t mk_pud_sect_prot(pgprot_t prot) |
---|
328 | 379 | { |
---|
329 | | - return __pgprot(pgprot_val(prot) & ~PTE_TABLE_BIT); |
---|
| 380 | + return __pgprot((pgprot_val(prot) & ~PUD_TABLE_BIT) | PUD_TYPE_SECT); |
---|
| 381 | +} |
---|
| 382 | + |
---|
| 383 | +static inline pgprot_t mk_pmd_sect_prot(pgprot_t prot) |
---|
| 384 | +{ |
---|
| 385 | + return __pgprot((pgprot_val(prot) & ~PMD_TABLE_BIT) | PMD_TYPE_SECT); |
---|
330 | 386 | } |
---|
331 | 387 | |
---|
332 | 388 | #ifdef CONFIG_NUMA_BALANCING |
---|
333 | 389 | /* |
---|
334 | | - * See the comment in include/asm-generic/pgtable.h |
---|
| 390 | + * See the comment in include/linux/pgtable.h |
---|
335 | 391 | */ |
---|
336 | 392 | static inline int pte_protnone(pte_t pte) |
---|
337 | 393 | { |
---|
.. | .. |
---|
344 | 400 | } |
---|
345 | 401 | #endif |
---|
346 | 402 | |
---|
| 403 | +#define pmd_present_invalid(pmd) (!!(pmd_val(pmd) & PMD_PRESENT_INVALID)) |
---|
| 404 | + |
---|
| 405 | +static inline int pmd_present(pmd_t pmd) |
---|
| 406 | +{ |
---|
| 407 | + return pte_present(pmd_pte(pmd)) || pmd_present_invalid(pmd); |
---|
| 408 | +} |
---|
| 409 | + |
---|
347 | 410 | /* |
---|
348 | 411 | * THP definitions. |
---|
349 | 412 | */ |
---|
350 | 413 | |
---|
351 | 414 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
---|
352 | | -#define pmd_trans_huge(pmd) (pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT)) |
---|
| 415 | +static inline int pmd_trans_huge(pmd_t pmd) |
---|
| 416 | +{ |
---|
| 417 | + return pmd_val(pmd) && pmd_present(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT); |
---|
| 418 | +} |
---|
353 | 419 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
---|
354 | 420 | |
---|
355 | | -#define pmd_present(pmd) pte_present(pmd_pte(pmd)) |
---|
356 | 421 | #define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd)) |
---|
357 | 422 | #define pmd_young(pmd) pte_young(pmd_pte(pmd)) |
---|
| 423 | +#define pmd_valid(pmd) pte_valid(pmd_pte(pmd)) |
---|
358 | 424 | #define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd))) |
---|
359 | 425 | #define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd))) |
---|
360 | 426 | #define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd))) |
---|
361 | 427 | #define pmd_mkclean(pmd) pte_pmd(pte_mkclean(pmd_pte(pmd))) |
---|
362 | 428 | #define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd))) |
---|
363 | 429 | #define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd))) |
---|
364 | | -#define pmd_mknotpresent(pmd) (__pmd(pmd_val(pmd) & ~PMD_SECT_VALID)) |
---|
| 430 | + |
---|
| 431 | +static inline pmd_t pmd_mkinvalid(pmd_t pmd) |
---|
| 432 | +{ |
---|
| 433 | + pmd = set_pmd_bit(pmd, __pgprot(PMD_PRESENT_INVALID)); |
---|
| 434 | + pmd = clear_pmd_bit(pmd, __pgprot(PMD_SECT_VALID)); |
---|
| 435 | + |
---|
| 436 | + return pmd; |
---|
| 437 | +} |
---|
365 | 438 | |
---|
366 | 439 | #define pmd_thp_or_huge(pmd) (pmd_huge(pmd) || pmd_trans_huge(pmd)) |
---|
367 | 440 | |
---|
.. | .. |
---|
369 | 442 | |
---|
370 | 443 | #define pmd_mkhuge(pmd) (__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT)) |
---|
371 | 444 | |
---|
| 445 | +#ifdef CONFIG_TRANSPARENT_HUGEPAGE |
---|
| 446 | +#define pmd_devmap(pmd) pte_devmap(pmd_pte(pmd)) |
---|
| 447 | +#endif |
---|
| 448 | +static inline pmd_t pmd_mkdevmap(pmd_t pmd) |
---|
| 449 | +{ |
---|
| 450 | + return pte_pmd(set_pte_bit(pmd_pte(pmd), __pgprot(PTE_DEVMAP))); |
---|
| 451 | +} |
---|
| 452 | + |
---|
372 | 453 | #define __pmd_to_phys(pmd) __pte_to_phys(pmd_pte(pmd)) |
---|
373 | 454 | #define __phys_to_pmd_val(phys) __phys_to_pte_val(phys) |
---|
374 | 455 | #define pmd_pfn(pmd) ((__pmd_to_phys(pmd) & PMD_MASK) >> PAGE_SHIFT) |
---|
375 | 456 | #define pfn_pmd(pfn,prot) __pmd(__phys_to_pmd_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)) |
---|
376 | 457 | #define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot) |
---|
377 | 458 | |
---|
| 459 | +#define pud_young(pud) pte_young(pud_pte(pud)) |
---|
| 460 | +#define pud_mkyoung(pud) pte_pud(pte_mkyoung(pud_pte(pud))) |
---|
378 | 461 | #define pud_write(pud) pte_write(pud_pte(pud)) |
---|
| 462 | + |
---|
| 463 | +#define pud_mkhuge(pud) (__pud(pud_val(pud) & ~PUD_TABLE_BIT)) |
---|
379 | 464 | |
---|
380 | 465 | #define __pud_to_phys(pud) __pte_to_phys(pud_pte(pud)) |
---|
381 | 466 | #define __phys_to_pud_val(phys) __phys_to_pte_val(phys) |
---|
.. | .. |
---|
383 | 468 | #define pfn_pud(pfn,prot) __pud(__phys_to_pud_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)) |
---|
384 | 469 | |
---|
385 | 470 | #define set_pmd_at(mm, addr, pmdp, pmd) set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd)) |
---|
| 471 | +#define set_pud_at(mm, addr, pudp, pud) set_pte_at(mm, addr, (pte_t *)pudp, pud_pte(pud)) |
---|
| 472 | + |
---|
| 473 | +#define __p4d_to_phys(p4d) __pte_to_phys(p4d_pte(p4d)) |
---|
| 474 | +#define __phys_to_p4d_val(phys) __phys_to_pte_val(phys) |
---|
386 | 475 | |
---|
387 | 476 | #define __pgd_to_phys(pgd) __pte_to_phys(pgd_pte(pgd)) |
---|
388 | 477 | #define __phys_to_pgd_val(phys) __phys_to_pte_val(phys) |
---|
389 | 478 | |
---|
390 | 479 | #define __pgprot_modify(prot,mask,bits) \ |
---|
391 | 480 | __pgprot((pgprot_val(prot) & ~(mask)) | (bits)) |
---|
| 481 | + |
---|
| 482 | +#define pgprot_nx(prot) \ |
---|
| 483 | + __pgprot_modify(prot, PTE_MAYBE_GP, PTE_PXN) |
---|
392 | 484 | |
---|
393 | 485 | /* |
---|
394 | 486 | * Mark the prot value as uncacheable and unbufferable. |
---|
.. | .. |
---|
399 | 491 | __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN) |
---|
400 | 492 | #define pgprot_device(prot) \ |
---|
401 | 493 | __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_PXN | PTE_UXN) |
---|
| 494 | +#define pgprot_tagged(prot) \ |
---|
| 495 | + __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_TAGGED)) |
---|
| 496 | +#define pgprot_mhp pgprot_tagged |
---|
| 497 | +/* |
---|
| 498 | + * DMA allocations for non-coherent devices use what the Arm architecture calls |
---|
| 499 | + * "Normal non-cacheable" memory, which permits speculation, unaligned accesses |
---|
| 500 | + * and merging of writes. This is different from "Device-nGnR[nE]" memory which |
---|
| 501 | + * is intended for MMIO and thus forbids speculation, preserves access size, |
---|
| 502 | + * requires strict alignment and can also force write responses to come from the |
---|
| 503 | + * endpoint. |
---|
| 504 | + */ |
---|
| 505 | +#define pgprot_dmacoherent(prot) \ |
---|
| 506 | + __pgprot_modify(prot, PTE_ATTRINDX_MASK, \ |
---|
| 507 | + PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN) |
---|
| 508 | + |
---|
| 509 | +/* |
---|
| 510 | + * Mark the prot value as outer cacheable and inner non-cacheable. Non-coherent |
---|
| 511 | + * devices on a system with support for a system or last level cache use these |
---|
| 512 | + * attributes to cache allocations in the system cache. |
---|
| 513 | + */ |
---|
| 514 | +#define pgprot_syscached(prot) \ |
---|
| 515 | + __pgprot_modify(prot, PTE_ATTRINDX_MASK, \ |
---|
| 516 | + PTE_ATTRINDX(MT_NORMAL_iNC_oWB) | PTE_PXN | PTE_UXN) |
---|
| 517 | + |
---|
402 | 518 | #define __HAVE_PHYS_MEM_ACCESS_PROT |
---|
403 | 519 | struct file; |
---|
404 | 520 | extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, |
---|
.. | .. |
---|
406 | 522 | |
---|
407 | 523 | #define pmd_none(pmd) (!pmd_val(pmd)) |
---|
408 | 524 | |
---|
409 | | -#define pmd_bad(pmd) (!(pmd_val(pmd) & PMD_TABLE_BIT)) |
---|
410 | | - |
---|
411 | 525 | #define pmd_table(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \ |
---|
412 | 526 | PMD_TYPE_TABLE) |
---|
413 | 527 | #define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \ |
---|
414 | 528 | PMD_TYPE_SECT) |
---|
| 529 | +#define pmd_leaf(pmd) (pmd_present(pmd) && !pmd_table(pmd)) |
---|
| 530 | +#define pmd_bad(pmd) (!pmd_table(pmd)) |
---|
415 | 531 | |
---|
416 | 532 | #if defined(CONFIG_ARM64_64K_PAGES) || CONFIG_PGTABLE_LEVELS < 3 |
---|
417 | 533 | static inline bool pud_sect(pud_t pud) { return false; } |
---|
.. | .. |
---|
423 | 539 | PUD_TYPE_TABLE) |
---|
424 | 540 | #endif |
---|
425 | 541 | |
---|
| 542 | +extern pgd_t init_pg_dir[PTRS_PER_PGD]; |
---|
| 543 | +extern pgd_t init_pg_end[]; |
---|
| 544 | +extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; |
---|
| 545 | +extern pgd_t idmap_pg_dir[PTRS_PER_PGD]; |
---|
| 546 | +extern pgd_t idmap_pg_end[]; |
---|
| 547 | +extern pgd_t tramp_pg_dir[PTRS_PER_PGD]; |
---|
| 548 | +extern pgd_t reserved_pg_dir[PTRS_PER_PGD]; |
---|
| 549 | + |
---|
| 550 | +extern void set_swapper_pgd(pgd_t *pgdp, pgd_t pgd); |
---|
| 551 | + |
---|
| 552 | +#ifdef CONFIG_MEMORY_HOTPLUG |
---|
| 553 | +extern int populate_range_driver_managed(u64 start, u64 size, |
---|
| 554 | + const char *resource_name); |
---|
| 555 | +extern int depopulate_range_driver_managed(u64 start, u64 size, |
---|
| 556 | + const char *resource_name); |
---|
| 557 | +#endif |
---|
| 558 | + |
---|
| 559 | +static inline bool in_swapper_pgdir(void *addr) |
---|
| 560 | +{ |
---|
| 561 | + return ((unsigned long)addr & PAGE_MASK) == |
---|
| 562 | + ((unsigned long)swapper_pg_dir & PAGE_MASK); |
---|
| 563 | +} |
---|
| 564 | + |
---|
426 | 565 | static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) |
---|
427 | 566 | { |
---|
| 567 | +#ifdef __PAGETABLE_PMD_FOLDED |
---|
| 568 | + if (in_swapper_pgdir(pmdp)) { |
---|
| 569 | + set_swapper_pgd((pgd_t *)pmdp, __pgd(pmd_val(pmd))); |
---|
| 570 | + return; |
---|
| 571 | + } |
---|
| 572 | +#endif /* __PAGETABLE_PMD_FOLDED */ |
---|
| 573 | + |
---|
428 | 574 | WRITE_ONCE(*pmdp, pmd); |
---|
429 | | - dsb(ishst); |
---|
430 | | - isb(); |
---|
| 575 | + |
---|
| 576 | + if (pmd_valid(pmd)) { |
---|
| 577 | + dsb(ishst); |
---|
| 578 | + isb(); |
---|
| 579 | + } |
---|
431 | 580 | } |
---|
432 | 581 | |
---|
433 | 582 | static inline void pmd_clear(pmd_t *pmdp) |
---|
.. | .. |
---|
440 | 589 | return __pmd_to_phys(pmd); |
---|
441 | 590 | } |
---|
442 | 591 | |
---|
443 | | -static inline void pte_unmap(pte_t *pte) { } |
---|
| 592 | +static inline unsigned long pmd_page_vaddr(pmd_t pmd) |
---|
| 593 | +{ |
---|
| 594 | + return (unsigned long)__va(pmd_page_paddr(pmd)); |
---|
| 595 | +} |
---|
444 | 596 | |
---|
445 | 597 | /* Find an entry in the third-level page table. */ |
---|
446 | | -#define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) |
---|
447 | | - |
---|
448 | 598 | #define pte_offset_phys(dir,addr) (pmd_page_paddr(READ_ONCE(*(dir))) + pte_index(addr) * sizeof(pte_t)) |
---|
449 | | -#define pte_offset_kernel(dir,addr) ((pte_t *)__va(pte_offset_phys((dir), (addr)))) |
---|
450 | | - |
---|
451 | | -#define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr)) |
---|
452 | | -#define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir), (addr)) |
---|
453 | | -#define pte_unmap_nested(pte) do { } while (0) |
---|
454 | 599 | |
---|
455 | 600 | #define pte_set_fixmap(addr) ((pte_t *)set_fixmap_offset(FIX_PTE, addr)) |
---|
456 | 601 | #define pte_set_fixmap_offset(pmd, addr) pte_set_fixmap(pte_offset_phys(pmd, addr)) |
---|
457 | 602 | #define pte_clear_fixmap() clear_fixmap(FIX_PTE) |
---|
458 | 603 | |
---|
459 | | -#define pmd_page(pmd) pfn_to_page(__phys_to_pfn(__pmd_to_phys(pmd))) |
---|
| 604 | +#define pmd_page(pmd) phys_to_page(__pmd_to_phys(pmd)) |
---|
460 | 605 | |
---|
461 | 606 | /* use ONLY for statically allocated translation tables */ |
---|
462 | 607 | #define pte_offset_kimg(dir,addr) ((pte_t *)__phys_to_kimg(pte_offset_phys((dir), (addr)))) |
---|
.. | .. |
---|
469 | 614 | |
---|
470 | 615 | #if CONFIG_PGTABLE_LEVELS > 2 |
---|
471 | 616 | |
---|
472 | | -#define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd_val(pmd)) |
---|
| 617 | +#define pmd_ERROR(e) \ |
---|
| 618 | + pr_err("%s:%d: bad pmd %016llx.\n", __FILE__, __LINE__, pmd_val(e)) |
---|
473 | 619 | |
---|
474 | 620 | #define pud_none(pud) (!pud_val(pud)) |
---|
475 | | -#define pud_bad(pud) (!(pud_val(pud) & PUD_TABLE_BIT)) |
---|
| 621 | +#define pud_bad(pud) (!pud_table(pud)) |
---|
476 | 622 | #define pud_present(pud) pte_present(pud_pte(pud)) |
---|
| 623 | +#define pud_leaf(pud) (pud_present(pud) && !pud_table(pud)) |
---|
| 624 | +#define pud_valid(pud) pte_valid(pud_pte(pud)) |
---|
477 | 625 | |
---|
478 | 626 | static inline void set_pud(pud_t *pudp, pud_t pud) |
---|
479 | 627 | { |
---|
| 628 | +#ifdef __PAGETABLE_PUD_FOLDED |
---|
| 629 | + if (in_swapper_pgdir(pudp)) { |
---|
| 630 | + set_swapper_pgd((pgd_t *)pudp, __pgd(pud_val(pud))); |
---|
| 631 | + return; |
---|
| 632 | + } |
---|
| 633 | +#endif /* __PAGETABLE_PUD_FOLDED */ |
---|
| 634 | + |
---|
480 | 635 | WRITE_ONCE(*pudp, pud); |
---|
481 | | - dsb(ishst); |
---|
482 | | - isb(); |
---|
| 636 | + |
---|
| 637 | + if (pud_valid(pud)) { |
---|
| 638 | + dsb(ishst); |
---|
| 639 | + isb(); |
---|
| 640 | + } |
---|
483 | 641 | } |
---|
484 | 642 | |
---|
485 | 643 | static inline void pud_clear(pud_t *pudp) |
---|
.. | .. |
---|
492 | 650 | return __pud_to_phys(pud); |
---|
493 | 651 | } |
---|
494 | 652 | |
---|
495 | | -/* Find an entry in the second-level page table. */ |
---|
496 | | -#define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)) |
---|
| 653 | +static inline unsigned long pud_page_vaddr(pud_t pud) |
---|
| 654 | +{ |
---|
| 655 | + return (unsigned long)__va(pud_page_paddr(pud)); |
---|
| 656 | +} |
---|
497 | 657 | |
---|
| 658 | +/* Find an entry in the second-level page table. */ |
---|
498 | 659 | #define pmd_offset_phys(dir, addr) (pud_page_paddr(READ_ONCE(*(dir))) + pmd_index(addr) * sizeof(pmd_t)) |
---|
499 | | -#define pmd_offset(dir, addr) ((pmd_t *)__va(pmd_offset_phys((dir), (addr)))) |
---|
500 | 660 | |
---|
501 | 661 | #define pmd_set_fixmap(addr) ((pmd_t *)set_fixmap_offset(FIX_PMD, addr)) |
---|
502 | 662 | #define pmd_set_fixmap_offset(pud, addr) pmd_set_fixmap(pmd_offset_phys(pud, addr)) |
---|
503 | 663 | #define pmd_clear_fixmap() clear_fixmap(FIX_PMD) |
---|
504 | 664 | |
---|
505 | | -#define pud_page(pud) pfn_to_page(__phys_to_pfn(__pud_to_phys(pud))) |
---|
| 665 | +#define pud_page(pud) phys_to_page(__pud_to_phys(pud)) |
---|
506 | 666 | |
---|
507 | 667 | /* use ONLY for statically allocated translation tables */ |
---|
508 | 668 | #define pmd_offset_kimg(dir,addr) ((pmd_t *)__phys_to_kimg(pmd_offset_phys((dir), (addr)))) |
---|
.. | .. |
---|
522 | 682 | |
---|
523 | 683 | #if CONFIG_PGTABLE_LEVELS > 3 |
---|
524 | 684 | |
---|
525 | | -#define pud_ERROR(pud) __pud_error(__FILE__, __LINE__, pud_val(pud)) |
---|
| 685 | +#define pud_ERROR(e) \ |
---|
| 686 | + pr_err("%s:%d: bad pud %016llx.\n", __FILE__, __LINE__, pud_val(e)) |
---|
526 | 687 | |
---|
527 | | -#define pgd_none(pgd) (!pgd_val(pgd)) |
---|
528 | | -#define pgd_bad(pgd) (!(pgd_val(pgd) & 2)) |
---|
529 | | -#define pgd_present(pgd) (pgd_val(pgd)) |
---|
| 688 | +#define p4d_none(p4d) (!p4d_val(p4d)) |
---|
| 689 | +#define p4d_bad(p4d) (!(p4d_val(p4d) & 2)) |
---|
| 690 | +#define p4d_present(p4d) (p4d_val(p4d)) |
---|
530 | 691 | |
---|
531 | | -static inline void set_pgd(pgd_t *pgdp, pgd_t pgd) |
---|
| 692 | +static inline void set_p4d(p4d_t *p4dp, p4d_t p4d) |
---|
532 | 693 | { |
---|
533 | | - WRITE_ONCE(*pgdp, pgd); |
---|
| 694 | + if (in_swapper_pgdir(p4dp)) { |
---|
| 695 | + set_swapper_pgd((pgd_t *)p4dp, __pgd(p4d_val(p4d))); |
---|
| 696 | + return; |
---|
| 697 | + } |
---|
| 698 | + |
---|
| 699 | + WRITE_ONCE(*p4dp, p4d); |
---|
534 | 700 | dsb(ishst); |
---|
| 701 | + isb(); |
---|
535 | 702 | } |
---|
536 | 703 | |
---|
537 | | -static inline void pgd_clear(pgd_t *pgdp) |
---|
| 704 | +static inline void p4d_clear(p4d_t *p4dp) |
---|
538 | 705 | { |
---|
539 | | - set_pgd(pgdp, __pgd(0)); |
---|
| 706 | + set_p4d(p4dp, __p4d(0)); |
---|
540 | 707 | } |
---|
541 | 708 | |
---|
542 | | -static inline phys_addr_t pgd_page_paddr(pgd_t pgd) |
---|
| 709 | +static inline phys_addr_t p4d_page_paddr(p4d_t p4d) |
---|
543 | 710 | { |
---|
544 | | - return __pgd_to_phys(pgd); |
---|
| 711 | + return __p4d_to_phys(p4d); |
---|
| 712 | +} |
---|
| 713 | + |
---|
| 714 | +static inline unsigned long p4d_page_vaddr(p4d_t p4d) |
---|
| 715 | +{ |
---|
| 716 | + return (unsigned long)__va(p4d_page_paddr(p4d)); |
---|
545 | 717 | } |
---|
546 | 718 | |
---|
547 | 719 | /* Find an entry in the frst-level page table. */ |
---|
548 | | -#define pud_index(addr) (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1)) |
---|
549 | | - |
---|
550 | | -#define pud_offset_phys(dir, addr) (pgd_page_paddr(READ_ONCE(*(dir))) + pud_index(addr) * sizeof(pud_t)) |
---|
551 | | -#define pud_offset(dir, addr) ((pud_t *)__va(pud_offset_phys((dir), (addr)))) |
---|
| 720 | +#define pud_offset_phys(dir, addr) (p4d_page_paddr(READ_ONCE(*(dir))) + pud_index(addr) * sizeof(pud_t)) |
---|
552 | 721 | |
---|
553 | 722 | #define pud_set_fixmap(addr) ((pud_t *)set_fixmap_offset(FIX_PUD, addr)) |
---|
554 | | -#define pud_set_fixmap_offset(pgd, addr) pud_set_fixmap(pud_offset_phys(pgd, addr)) |
---|
| 723 | +#define pud_set_fixmap_offset(p4d, addr) pud_set_fixmap(pud_offset_phys(p4d, addr)) |
---|
555 | 724 | #define pud_clear_fixmap() clear_fixmap(FIX_PUD) |
---|
556 | 725 | |
---|
557 | | -#define pgd_page(pgd) pfn_to_page(__phys_to_pfn(__pgd_to_phys(pgd))) |
---|
| 726 | +#define p4d_page(p4d) pfn_to_page(__phys_to_pfn(__p4d_to_phys(p4d))) |
---|
558 | 727 | |
---|
559 | 728 | /* use ONLY for statically allocated translation tables */ |
---|
560 | 729 | #define pud_offset_kimg(dir,addr) ((pud_t *)__phys_to_kimg(pud_offset_phys((dir), (addr)))) |
---|
561 | 730 | |
---|
562 | 731 | #else |
---|
563 | 732 | |
---|
| 733 | +#define p4d_page_paddr(p4d) ({ BUILD_BUG(); 0;}) |
---|
564 | 734 | #define pgd_page_paddr(pgd) ({ BUILD_BUG(); 0;}) |
---|
565 | 735 | |
---|
566 | 736 | /* Match pud_offset folding in <asm/generic/pgtable-nopud.h> */ |
---|
.. | .. |
---|
572 | 742 | |
---|
573 | 743 | #endif /* CONFIG_PGTABLE_LEVELS > 3 */ |
---|
574 | 744 | |
---|
575 | | -#define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd_val(pgd)) |
---|
576 | | - |
---|
577 | | -/* to find an entry in a page-table-directory */ |
---|
578 | | -#define pgd_index(addr) (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)) |
---|
579 | | - |
---|
580 | | -#define pgd_offset_raw(pgd, addr) ((pgd) + pgd_index(addr)) |
---|
581 | | - |
---|
582 | | -#define pgd_offset(mm, addr) (pgd_offset_raw((mm)->pgd, (addr))) |
---|
583 | | - |
---|
584 | | -/* to find an entry in a kernel page-table-directory */ |
---|
585 | | -#define pgd_offset_k(addr) pgd_offset(&init_mm, addr) |
---|
| 745 | +#define pgd_ERROR(e) \ |
---|
| 746 | + pr_err("%s:%d: bad pgd %016llx.\n", __FILE__, __LINE__, pgd_val(e)) |
---|
586 | 747 | |
---|
587 | 748 | #define pgd_set_fixmap(addr) ((pgd_t *)set_fixmap_offset(FIX_PGD, addr)) |
---|
588 | 749 | #define pgd_clear_fixmap() clear_fixmap(FIX_PGD) |
---|
589 | 750 | |
---|
590 | 751 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) |
---|
591 | 752 | { |
---|
| 753 | + /* |
---|
| 754 | + * Normal and Normal-Tagged are two different memory types and indices |
---|
| 755 | + * in MAIR_EL1. The mask below has to include PTE_ATTRINDX_MASK. |
---|
| 756 | + */ |
---|
592 | 757 | const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY | |
---|
593 | | - PTE_PROT_NONE | PTE_VALID | PTE_WRITE; |
---|
| 758 | + PTE_PROT_NONE | PTE_VALID | PTE_WRITE | PTE_GP | |
---|
| 759 | + PTE_ATTRINDX_MASK; |
---|
594 | 760 | /* preserve the hardware dirty information */ |
---|
595 | 761 | if (pte_hw_dirty(pte)) |
---|
596 | 762 | pte = pte_mkdirty(pte); |
---|
.. | .. |
---|
615 | 781 | pmd_t entry, int dirty) |
---|
616 | 782 | { |
---|
617 | 783 | return ptep_set_access_flags(vma, address, (pte_t *)pmdp, pmd_pte(entry), dirty); |
---|
| 784 | +} |
---|
| 785 | + |
---|
| 786 | +static inline int pud_devmap(pud_t pud) |
---|
| 787 | +{ |
---|
| 788 | + return 0; |
---|
| 789 | +} |
---|
| 790 | + |
---|
| 791 | +static inline int pgd_devmap(pgd_t pgd) |
---|
| 792 | +{ |
---|
| 793 | + return 0; |
---|
618 | 794 | } |
---|
619 | 795 | #endif |
---|
620 | 796 | |
---|
.. | .. |
---|
642 | 818 | pte_t *ptep) |
---|
643 | 819 | { |
---|
644 | 820 | return __ptep_test_and_clear_young(ptep); |
---|
| 821 | +} |
---|
| 822 | + |
---|
| 823 | +#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH |
---|
| 824 | +static inline int ptep_clear_flush_young(struct vm_area_struct *vma, |
---|
| 825 | + unsigned long address, pte_t *ptep) |
---|
| 826 | +{ |
---|
| 827 | + int young = ptep_test_and_clear_young(vma, address, ptep); |
---|
| 828 | + |
---|
| 829 | + if (young) { |
---|
| 830 | + /* |
---|
| 831 | + * We can elide the trailing DSB here since the worst that can |
---|
| 832 | + * happen is that a CPU continues to use the young entry in its |
---|
| 833 | + * TLB and we mistakenly reclaim the associated page. The |
---|
| 834 | + * window for such an event is bounded by the next |
---|
| 835 | + * context-switch, which provides a DSB to complete the TLB |
---|
| 836 | + * invalidation. |
---|
| 837 | + */ |
---|
| 838 | + flush_tlb_page_nosync(vma, address); |
---|
| 839 | + } |
---|
| 840 | + |
---|
| 841 | + return young; |
---|
645 | 842 | } |
---|
646 | 843 | |
---|
647 | 844 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
---|
.. | .. |
---|
704 | 901 | } |
---|
705 | 902 | #endif |
---|
706 | 903 | |
---|
707 | | -extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; |
---|
708 | | -extern pgd_t swapper_pg_end[]; |
---|
709 | | -extern pgd_t idmap_pg_dir[PTRS_PER_PGD]; |
---|
710 | | -extern pgd_t tramp_pg_dir[PTRS_PER_PGD]; |
---|
711 | | - |
---|
712 | 904 | /* |
---|
713 | 905 | * Encode and decode a swap entry: |
---|
714 | 906 | * bits 0-1: present (must be zero) |
---|
.. | .. |
---|
730 | 922 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) |
---|
731 | 923 | #define __swp_entry_to_pte(swp) ((pte_t) { (swp).val }) |
---|
732 | 924 | |
---|
| 925 | +#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION |
---|
| 926 | +#define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val(pmd) }) |
---|
| 927 | +#define __swp_entry_to_pmd(swp) __pmd((swp).val) |
---|
| 928 | +#endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */ |
---|
| 929 | + |
---|
733 | 930 | /* |
---|
734 | 931 | * Ensure that there are not more swap files than can be encoded in the kernel |
---|
735 | 932 | * PTEs. |
---|
.. | .. |
---|
738 | 935 | |
---|
739 | 936 | extern int kern_addr_valid(unsigned long addr); |
---|
740 | 937 | |
---|
741 | | -#include <asm-generic/pgtable.h> |
---|
| 938 | +#ifdef CONFIG_ARM64_MTE |
---|
742 | 939 | |
---|
743 | | -void pgd_cache_init(void); |
---|
744 | | -#define pgtable_cache_init pgd_cache_init |
---|
| 940 | +#define __HAVE_ARCH_PREPARE_TO_SWAP |
---|
| 941 | +static inline int arch_prepare_to_swap(struct page *page) |
---|
| 942 | +{ |
---|
| 943 | + if (system_supports_mte()) |
---|
| 944 | + return mte_save_tags(page); |
---|
| 945 | + return 0; |
---|
| 946 | +} |
---|
| 947 | + |
---|
| 948 | +#define __HAVE_ARCH_SWAP_INVALIDATE |
---|
| 949 | +static inline void arch_swap_invalidate_page(int type, pgoff_t offset) |
---|
| 950 | +{ |
---|
| 951 | + if (system_supports_mte()) |
---|
| 952 | + mte_invalidate_tags(type, offset); |
---|
| 953 | +} |
---|
| 954 | + |
---|
| 955 | +static inline void arch_swap_invalidate_area(int type) |
---|
| 956 | +{ |
---|
| 957 | + if (system_supports_mte()) |
---|
| 958 | + mte_invalidate_tags_area(type); |
---|
| 959 | +} |
---|
| 960 | + |
---|
| 961 | +#define __HAVE_ARCH_SWAP_RESTORE |
---|
| 962 | +static inline void arch_swap_restore(swp_entry_t entry, struct page *page) |
---|
| 963 | +{ |
---|
| 964 | + if (system_supports_mte() && mte_restore_tags(entry, page)) |
---|
| 965 | + set_bit(PG_mte_tagged, &page->flags); |
---|
| 966 | +} |
---|
| 967 | + |
---|
| 968 | +#endif /* CONFIG_ARM64_MTE */ |
---|
745 | 969 | |
---|
746 | 970 | /* |
---|
747 | 971 | * On AArch64, the cache coherency is handled via the set_pte_at() function. |
---|
.. | .. |
---|
758 | 982 | |
---|
759 | 983 | #define update_mmu_cache_pmd(vma, address, pmd) do { } while (0) |
---|
760 | 984 | |
---|
761 | | -#define kc_vaddr_to_offset(v) ((v) & ~VA_START) |
---|
762 | | -#define kc_offset_to_vaddr(o) ((o) | VA_START) |
---|
763 | | - |
---|
764 | 985 | #ifdef CONFIG_ARM64_PA_BITS_52 |
---|
765 | 986 | #define phys_to_ttbr(addr) (((addr) | ((addr) >> 46)) & TTBR_BADDR_MASK_52) |
---|
766 | 987 | #else |
---|
767 | 988 | #define phys_to_ttbr(addr) (addr) |
---|
768 | 989 | #endif |
---|
769 | 990 | |
---|
| 991 | +/* |
---|
| 992 | + * On arm64 without hardware Access Flag, copying from user will fail because |
---|
| 993 | + * the pte is old and cannot be marked young. So we always end up with zeroed |
---|
| 994 | + * page after fork() + CoW for pfn mappings. We don't always have a |
---|
| 995 | + * hardware-managed access flag on arm64. |
---|
| 996 | + */ |
---|
| 997 | +static inline bool arch_faults_on_old_pte(void) |
---|
| 998 | +{ |
---|
| 999 | + /* The register read below requires a stable CPU to make any sense */ |
---|
| 1000 | + cant_migrate(); |
---|
| 1001 | + |
---|
| 1002 | + return !cpu_has_hw_af(); |
---|
| 1003 | +} |
---|
| 1004 | +#define arch_faults_on_old_pte arch_faults_on_old_pte |
---|
| 1005 | + |
---|
| 1006 | +/* |
---|
| 1007 | + * Experimentally, it's cheap to set the access flag in hardware and we |
---|
| 1008 | + * benefit from prefaulting mappings as 'old' to start with. |
---|
| 1009 | + */ |
---|
| 1010 | +static inline bool arch_wants_old_prefaulted_pte(void) |
---|
| 1011 | +{ |
---|
| 1012 | + return !arch_faults_on_old_pte(); |
---|
| 1013 | +} |
---|
| 1014 | +#define arch_wants_old_prefaulted_pte arch_wants_old_prefaulted_pte |
---|
| 1015 | + |
---|
770 | 1016 | #endif /* !__ASSEMBLY__ */ |
---|
771 | 1017 | |
---|
772 | 1018 | #endif /* __ASM_PGTABLE_H */ |
---|